# system call counts # (c) 2010, Tom Zanussi # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide system call totals, broken down by syscall. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import syscall_name usage = "perf script -s syscall-counts.py [comm]\n"; for_comm = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): print_syscall_totals() def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, id, args): if for_comm is not None: if common_comm != for_comm: return try: syscalls[id] += 1 except TypeError: syscalls[id] = 1 def syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): raw_syscalls__sys_enter(**locals()) def print_syscall_totals(): if for_comm is not None: print "\nsyscall events for %s:\n\n" % (for_comm), else: print "\nsyscall events:\n\n", print "%-40s %10s\n" % ("event", "count"), print "%-40s %10s\n" % ("----------------------------------------", \ "-----------"), for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \ reverse = True): print "%-40s %10d\n" % (syscall_name(id), val), pu/v6.7-rc4'>aarch64/hotplug-vcpu/v6.7-rc4 Russell King's ARM Linux kernel treeRussell King
summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig54
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/Space.c3
-rw-r--r--drivers/net/amt.c42
-rw-r--r--drivers/net/arcnet/arcnet.c4
-rw-r--r--drivers/net/arcnet/com20020-pci.c17
-rw-r--r--drivers/net/bareudp.c48
-rw-r--r--drivers/net/bonding/bond_3ad.c131
-rw-r--r--drivers/net/bonding/bond_alb.c8
-rw-r--r--drivers/net/bonding/bond_debugfs.c9
-rw-r--r--drivers/net/bonding/bond_main.c587
-rw-r--r--drivers/net/bonding/bond_netlink.c68
-rw-r--r--drivers/net/bonding/bond_options.c214
-rw-r--r--drivers/net/bonding/bond_sysfs.c6
-rw-r--r--drivers/net/caif/caif_serial.c16
-rw-r--r--drivers/net/caif/caif_virtio.c2
-rw-r--r--drivers/net/can/Kconfig20
-rw-r--r--drivers/net/can/Makefile3
-rw-r--r--drivers/net/can/at91_can.c1
-rw-r--r--drivers/net/can/bxcan.c5
-rw-r--r--drivers/net/can/c_can/c_can_main.c34
-rw-r--r--drivers/net/can/c_can/c_can_platform.c56
-rw-r--r--drivers/net/can/can327.c1
-rw-r--r--drivers/net/can/cc770/Kconfig2
-rw-r--r--drivers/net/can/cc770/cc770.c1
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c30
-rw-r--r--drivers/net/can/dev/bittiming.c63
-rw-r--r--drivers/net/can/dev/calc_bittiming.c124
-rw-r--r--drivers/net/can/dev/dev.c183
-rw-r--r--drivers/net/can/dev/netlink.c895
-rw-r--r--drivers/net/can/dummy_can.c285
-rw-r--r--drivers/net/can/esd/esd_402_pci-core.c4
-rw-r--r--drivers/net/can/esd/esdacc.c2
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c85
-rw-r--r--drivers/net/can/flexcan/flexcan.h6
-rw-r--r--drivers/net/can/grcan.c20
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c69
-rw-r--r--drivers/net/can/janz-ican3.c3
-rw-r--r--drivers/net/can/kvaser_pciefd/Makefile3
-rw-r--r--drivers/net/can/kvaser_pciefd/kvaser_pciefd.h96
-rw-r--r--drivers/net/can/kvaser_pciefd/kvaser_pciefd_core.c (renamed from drivers/net/can/kvaser_pciefd.c)415
-rw-r--r--drivers/net/can/kvaser_pciefd/kvaser_pciefd_devlink.c60
-rw-r--r--drivers/net/can/m_can/m_can.c442
-rw-r--r--drivers/net/can/m_can/m_can.h7
-rw-r--r--drivers/net/can/m_can/m_can_pci.c5
-rw-r--r--drivers/net/can/m_can/m_can_platform.c10
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c104
-rw-r--r--drivers/net/can/m_can/tcan4x5x.h2
-rw-r--r--drivers/net/can/mscan/mscan.c1
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c46
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd_user.h4
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c6
-rw-r--r--drivers/net/can/rcar/rcar_can.c310
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c926
-rw-r--r--drivers/net/can/rockchip/Kconfig3
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-core.c21
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-timestamp.c4
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-tx.c2
-rw-r--r--drivers/net/can/sja1000/Kconfig4
-rw-r--r--drivers/net/can/sja1000/peak_pci.c6
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c12
-rw-r--r--drivers/net/can/sja1000/sja1000.c72
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c15
-rw-r--r--drivers/net/can/slcan/slcan-core.c27
-rw-r--r--drivers/net/can/softing/softing_main.c1
-rw-r--r--drivers/net/can/spi/hi311x.c89
-rw-r--r--drivers/net/can/spi/mcp251x.c68
-rw-r--r--drivers/net/can/spi/mcp251xfd/Kconfig1
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c323
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c114
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c16
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c39
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h8
-rw-r--r--drivers/net/can/sun4i_can.c28
-rw-r--r--drivers/net/can/ti_hecc.c3
-rw-r--r--drivers/net/can/usb/Kconfig12
-rw-r--r--drivers/net/can/usb/Makefile1
-rw-r--r--drivers/net/can/usb/ems_usb.c59
-rw-r--r--drivers/net/can/usb/esd_usb.c71
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c9
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_devlink.c6
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.c8
-rw-r--r--drivers/net/can/usb/f81604.c11
-rw-r--r--drivers/net/can/usb/gs_usb.c182
-rw-r--r--drivers/net/can/usb/kvaser_usb/Makefile2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h35
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c152
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_devlink.c87
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c198
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c117
-rw-r--r--drivers/net/can/usb/nct6694_canfd.c831
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c8
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c54
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c20
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.h4
-rw-r--r--drivers/net/can/usb/ucan.c44
-rw-r--r--drivers/net/can/usb/usb_8dev.c1
-rw-r--r--drivers/net/can/vcan.c2
-rw-r--r--drivers/net/can/vxcan.c29
-rw-r--r--drivers/net/can/xilinx_can.c35
-rw-r--r--drivers/net/dsa/Kconfig26
-rw-r--r--drivers/net/dsa/Makefile7
-rw-r--r--drivers/net/dsa/b53/Kconfig1
-rw-r--r--drivers/net/dsa/b53/b53_common.c904
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c1
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c134
-rw-r--r--drivers/net/dsa/b53/b53_priv.h164
-rw-r--r--drivers/net/dsa/b53/b53_regs.h92
-rw-r--r--drivers/net/dsa/b53/b53_serdes.c5
-rw-r--r--drivers/net/dsa/bcm_sf2.c4
-rw-r--r--drivers/net/dsa/dsa_loop.c84
-rw-r--r--drivers/net/dsa/dsa_loop.h20
-rw-r--r--drivers/net/dsa/dsa_loop_bdinfo.c36
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c22
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.h2
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c24
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h5
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_ptp.c14
-rw-r--r--drivers/net/dsa/ks8995.c857
-rw-r--r--drivers/net/dsa/lantiq/Kconfig24
-rw-r--r--drivers/net/dsa/lantiq/Makefile3
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip.c518
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip.h301
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip_common.c1739
-rw-r--r--drivers/net/dsa/lantiq/lantiq_pce.h (renamed from drivers/net/dsa/lantiq_pce.h)9
-rw-r--r--drivers/net/dsa/lantiq/mxl-gsw1xx.c733
-rw-r--r--drivers/net/dsa/lantiq/mxl-gsw1xx.h126
-rw-r--r--drivers/net/dsa/lantiq/mxl-gsw1xx_pce.h154
-rw-r--r--drivers/net/dsa/lantiq_gswip.c2270
-rw-r--r--drivers/net/dsa/microchip/Kconfig1
-rw-r--r--drivers/net/dsa/microchip/ksz8.c218
-rw-r--r--drivers/net/dsa/microchip/ksz8.h4
-rw-r--r--drivers/net/dsa/microchip/ksz8_reg.h53
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c341
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h4
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c18
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h5
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c971
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h146
-rw-r--r--drivers/net/dsa/microchip/ksz_dcb.c241
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.c52
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.h7
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c115
-rw-r--r--drivers/net/dsa/microchip/lan937x.h2
-rw-r--r--drivers/net/dsa/microchip/lan937x_main.c289
-rw-r--r--drivers/net/dsa/microchip/lan937x_reg.h13
-rw-r--r--drivers/net/dsa/mt7530-mdio.c21
-rw-r--r--drivers/net/dsa/mt7530-mmio.c22
-rw-r--r--drivers/net/dsa/mt7530.c617
-rw-r--r--drivers/net/dsa/mt7530.h69
-rw-r--r--drivers/net/dsa/mv88e6060.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c206
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h7
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.c34
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c26
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.h17
-rw-r--r--drivers/net/dsa/mv88e6xxx/leds.c17
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-6185.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-6352.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-639x.c12
-rw-r--r--drivers/net/dsa/mv88e6xxx/phy.c9
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c87
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.h133
-rw-r--r--drivers/net/dsa/ocelot/felix.c94
-rw-r--r--drivers/net/dsa/ocelot/felix.h3
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c25
-rw-r--r--drivers/net/dsa/ocelot/ocelot_ext.c2
-rw-r--r--drivers/net/dsa/qca/ar9331.c4
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c13
-rw-r--r--drivers/net/dsa/qca/qca8k-common.c7
-rw-r--r--drivers/net/dsa/qca/qca8k.h3
-rw-r--r--drivers/net/dsa/realtek/Kconfig6
-rw-r--r--drivers/net/dsa/realtek/Makefile3
-rw-r--r--drivers/net/dsa/realtek/realtek-mdio.c6
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.c6
-rw-r--r--drivers/net/dsa/realtek/realtek.h3
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c8
-rw-r--r--drivers/net/dsa/realtek/rtl8366-core.c22
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb-leds.c177
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c275
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.h107
-rw-r--r--drivers/net/dsa/realtek/rtl83xx.c16
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c37
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ethtool.c9
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c13
-rw-r--r--drivers/net/dsa/sja1105/sja1105_mdio.c6
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c66
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h7
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c14
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.c8
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c8
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c11
-rw-r--r--drivers/net/dsa/yt921x.c3006
-rw-r--r--drivers/net/dsa/yt921x.h567
-rw-r--r--drivers/net/dummy.c19
-rw-r--r--drivers/net/eql.c4
-rw-r--r--drivers/net/ethernet/3com/3c515.c8
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c4
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c4
-rw-r--r--drivers/net/ethernet/3com/3c59x.c6
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c4
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c4
-rw-r--r--drivers/net/ethernet/Kconfig5
-rw-r--r--drivers/net/ethernet/Makefile3
-rw-r--r--drivers/net/ethernet/actions/owl-emac.c7
-rw-r--r--drivers/net/ethernet/adi/adin1110.c2
-rw-r--r--drivers/net/ethernet/agere/et131x.c41
-rw-r--r--drivers/net/ethernet/airoha/Kconfig34
-rw-r--r--drivers/net/ethernet/airoha/Makefile9
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c (renamed from drivers/net/ethernet/mediatek/airoha_eth.c)2642
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.h673
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.c783
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c1561
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe_debugfs.c187
-rw-r--r--drivers/net/ethernet/airoha/airoha_regs.h923
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h3
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c47
-rw-r--r--drivers/net/ethernet/amazon/Kconfig2
-rw-r--r--drivers/net/ethernet/amazon/ena/Makefile2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h76
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c319
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h116
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_debugfs.c62
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_debugfs.h27
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_devlink.c210
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_devlink.h21
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c74
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c120
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h14
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_phc.c233
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_phc.h37
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h8
-rw-r--r--drivers/net/ethernet/amd/Kconfig1
-rw-r--r--drivers/net/ethernet/amd/a2065.c4
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c7
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/amd/declance.c4
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c15
-rw-r--r--drivers/net/ethernet/amd/pds_core/adminq.c40
-rw-r--r--drivers/net/ethernet/amd/pds_core/auxbus.c46
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c17
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h15
-rw-r--r--drivers/net/ethernet/amd/pds_core/debugfs.c5
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c16
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c29
-rw-r--r--drivers/net/ethernet/amd/sunlance.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/Makefile4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h164
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dcb.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c136
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c126
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c453
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c467
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c154
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c399
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c119
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c119
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c131
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c206
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c165
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-platform.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pps.c74
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c218
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-selftest.c346
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-smn.h30
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h216
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c4
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/mdio.c18
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c16
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c10
-rw-r--r--drivers/net/ethernet/apple/bmac.c68
-rw-r--r--drivers/net/ethernet/apple/mace.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c22
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c67
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c12
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c19
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c39
-rw-r--r--drivers/net/ethernet/arc/emac_main.c27
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c9
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c13
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c8
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c6
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c85
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c15
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig17
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c176
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.h81
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c116
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c44
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h3
-rw-r--r--drivers/net/ethernet/broadcom/b44.c45
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c22
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c41
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c5
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c5
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnge/Makefile13
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge.h255
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_auxr.c258
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_auxr.h84
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_core.c420
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_db.h34
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_devlink.c306
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_devlink.h18
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_ethtool.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_ethtool.h9
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c548
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h112
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c1185
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h58
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_netdev.c2485
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_netdev.h454
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_resc.c617
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_resc.h97
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_rmem.c499
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_rmem.h202
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c121
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c82
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c2154
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h136
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c209
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h45
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c125
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c501
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h10720
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c211
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h54
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c141
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c103
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c44
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h3
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c1417
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h84
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c105
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c88
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c225
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c26
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c35
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.h280
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c1121
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c16
-rw-r--r--drivers/net/ethernet/cavium/Kconfig2
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c9
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c115
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c58
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c51
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c16
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.h4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c3
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c62
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c53
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c57
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c8
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c37
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c37
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c110
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c213
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c42
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c44
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c41
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c24
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h7
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c13
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c5
-rw-r--r--drivers/net/ethernet/cisco/enic/Kconfig1
-rw-r--r--drivers/net/ethernet/cisco/enic/Makefile2
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_desc.h25
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_enet_desc.h142
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h80
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c69
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c782
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.c129
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.h11
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_rq.c436
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_rq.h8
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_wq.c117
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_wq.h7
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.h45
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_devcmd.h19
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_enet.h5
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.h4
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.h2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c43
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/21142.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c10
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/interrupt.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic2.c8
-rw-r--r--drivers/net/ethernet/dec/tulip/timer.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c17
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c8
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c4
-rw-r--r--drivers/net/ethernet/dlink/Kconfig20
-rw-r--r--drivers/net/ethernet/dlink/Makefile1
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c121
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h24
-rw-r--r--drivers/net/ethernet/dlink/sundance.c1990
-rw-r--r--drivers/net/ethernet/ec_bhf.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c205
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c56
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c20
-rw-r--r--drivers/net/ethernet/engleder/tsnep.h8
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c97
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ptp.c88
-rw-r--r--drivers/net/ethernet/faraday/Kconfig1
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c37
-rw-r--r--drivers/net/ethernet/fealnx.c12
-rw-r--r--drivers/net/ethernet/freescale/Kconfig5
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c71
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c93
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c87
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c47
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c20
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig57
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile13
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c1068
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h166
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c90
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h20
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_hw.h232
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_pf.c1102
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_cbdr.c50
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c365
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h98
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c31
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c472
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.h35
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.c439
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.h22
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ptp.c5
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c36
-rw-r--r--drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c845
-rw-r--r--drivers/net/ethernet/freescale/enetc/ntmp.c457
-rw-r--r--drivers/net/ethernet/freescale/enetc/ntmp_private.h104
-rw-r--r--drivers/net/ethernet/freescale/fec.h59
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c550
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c111
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c35
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c5
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c93
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h14
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c6
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c84
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c39
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c630
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h24
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c74
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_queue.c65
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_queue.h1
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth.h4
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ethtool.c3
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_main.c40
-rw-r--r--drivers/net/ethernet/google/Kconfig1
-rw-r--r--drivers/net/ethernet/google/gve/Makefile4
-rw-r--r--drivers/net/ethernet/google/gve/gve.h210
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c193
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h31
-rw-r--r--drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c75
-rw-r--r--drivers/net/ethernet/google/gve/gve_desc_dqo.h6
-rw-r--r--drivers/net/ethernet/google/gve/gve_dqo.h4
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c231
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c957
-rw-r--r--drivers/net/ethernet/google/gve/gve_ptp.c166
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c44
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c371
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c93
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c438
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c6
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig2
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/Makefile4
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h169
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c168
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c349
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c194
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c483
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c136
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c76
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c325
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c86
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h159
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_trace.h84
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c394
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c179
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h30
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c67
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c31
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c71
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c71
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h32
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c1088
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c190
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c167
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c1370
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c175
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c59
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c50
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c27
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c2
-rw-r--r--drivers/net/ethernet/huawei/Kconfig1
-rw-r--r--drivers/net/ethernet/huawei/Makefile1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c47
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic3/Kconfig20
-rw-r--r--drivers/net/ethernet/huawei/hinic3/Makefile25
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c915
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.h156
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_common.c76
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_common.h54
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_csr.h79
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c776
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_eqs.h122
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c236
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h57
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c426
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h47
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h264
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c557
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h81
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c436
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h90
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_irq.c194
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_lld.c421
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_lld.h21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_main.c409
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c860
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h141
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h15
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h224
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c496
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c385
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h61
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h93
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c885
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h145
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_pci_id_tbl.h9
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c68
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h54
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rss.c336
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rss.h14
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.c551
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.h104
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.c779
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.h147
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_wq.c138
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_wq.h87
-rw-r--r--drivers/net/ethernet/ibm/Kconfig13
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c7
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c579
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h86
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c138
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h14
-rw-r--r--drivers/net/ethernet/intel/Kconfig13
-rw-r--r--drivers/net/ethernet/intel/Makefile2
-rw-r--r--drivers/net/ethernet/intel/e100.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h6
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h5
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c130
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c104
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c15
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c113
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c10
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c15
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c53
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c12
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c122
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c78
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h156
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c1229
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c23
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c172
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devlink.c55
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c188
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c594
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h57
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c45
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c43
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h47
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h38
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c233
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c34
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h10
-rw-r--r--drivers/net/ethernet/intel/iavf/Makefile2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h48
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.c62
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.h12
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h83
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adv_rss.c119
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adv_rss.h31
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c110
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c190
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c649
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_prototype.h3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ptp.c492
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ptp.h47
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_trace.h6
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c450
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h66
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_type.h273
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_types.h34
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c267
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile14
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c213
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.c551
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.h71
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.c (renamed from drivers/net/ethernet/intel/ice/devlink/devlink_port.c)4
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.h (renamed from drivers/net/ethernet/intel/ice/devlink/devlink_port.h)2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h153
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.c69
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h510
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c83
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c580
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_cgu_regs.h181
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c1570
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h85
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c53
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c51
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c397
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_debugfs.c633
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c1475
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.h33
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c456
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.h38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c179
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c318
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h162
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c54
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.c472
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.h79
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c269
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc_int.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_irq.c275
-rw-r--r--drivers/net/ethernet/intel/ice/ice_irq.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c1011
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h102
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c225
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c912
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser_rt.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c1051
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h45
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h270
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c1458
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h164
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sbq_cmd.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c199
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_eth.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c191
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c62
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c266
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_trace.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tspll.c626
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tspll.h31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c929
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h154
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c91
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h66
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h68
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c58
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h91
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib_private.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vlan_mode.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c335
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h36
-rw-r--r--drivers/net/ethernet/intel/ice/virt/allowlist.c (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c)11
-rw-r--r--drivers/net/ethernet/intel/ice/virt/allowlist.h (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h)0
-rw-r--r--drivers/net/ethernet/intel/ice/virt/fdir.c (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c)36
-rw-r--r--drivers/net/ethernet/intel/ice/virt/fdir.h (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h)0
-rw-r--r--drivers/net/ethernet/intel/ice/virt/queues.c975
-rw-r--r--drivers/net/ethernet/intel/ice/virt/queues.h20
-rw-r--r--drivers/net/ethernet/intel/ice/virt/rss.c1922
-rw-r--r--drivers/net/ethernet/intel/ice/virt/rss.h18
-rw-r--r--drivers/net/ethernet/intel/ice/virt/virtchnl.c (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl.c)1915
-rw-r--r--drivers/net/ethernet/intel/ice/virt/virtchnl.h (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl.h)29
-rw-r--r--drivers/net/ethernet/intel/idpf/Kconfig3
-rw-r--r--drivers/net/ethernet/intel/idpf/Makefile7
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h278
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.c43
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.h18
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq_api.h5
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c77
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c511
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_idc.c503
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h4
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h19
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c570
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c170
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_mem.h8
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.c1021
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.h379
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c216
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c1976
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h339
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_vf_dev.c59
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c1755
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.h125
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c673
-rw-r--r--drivers/net/ethernet/intel/idpf/virtchnl2.h590
-rw-r--r--drivers/net/ethernet/intel/idpf/xdp.c486
-rw-r--r--drivers/net/ethernet/intel/idpf/xdp.h175
-rw-r--r--drivers/net/ethernet/intel/idpf/xsk.c633
-rw-r--r--drivers/net/ethernet/intel/idpf/xsk.h33
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h72
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c47
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c382
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c70
-rw-r--r--drivers/net/ethernet/intel/igb/igb_xsk.c562
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h30
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c23
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h76
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h9
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h66
-rw-r--r--drivers/net/ethernet/intel/igc/igc_diag.c3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c166
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c318
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c528
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.c54
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c24
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c165
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h16
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c363
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.h57
-rw-r--r--drivers/net/ethernet/intel/igc/igc_xdp.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/devlink.c558
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/devlink.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/region.c290
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h57
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c34
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c59
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c4043
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h102
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c317
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c707
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c72
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c1032
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h33
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c61
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c151
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h140
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h1032
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c20
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c195
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h25
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c20
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c53
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h35
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c77
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c196
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h5
-rw-r--r--drivers/net/ethernet/intel/libeth/Kconfig10
-rw-r--r--drivers/net/ethernet/intel/libeth/Makefile8
-rw-r--r--drivers/net/ethernet/intel/libeth/priv.h37
-rw-r--r--drivers/net/ethernet/intel/libeth/rx.c42
-rw-r--r--drivers/net/ethernet/intel/libeth/tx.c41
-rw-r--r--drivers/net/ethernet/intel/libeth/xdp.c451
-rw-r--r--drivers/net/ethernet/intel/libeth/xsk.c271
-rw-r--r--drivers/net/ethernet/intel/libie/Kconfig15
-rw-r--r--drivers/net/ethernet/intel/libie/Makefile8
-rw-r--r--drivers/net/ethernet/intel/libie/adminq.c52
-rw-r--r--drivers/net/ethernet/intel/libie/fwlog.c1115
-rw-r--r--drivers/net/ethernet/intel/libie/rx.c9
-rw-r--r--drivers/net/ethernet/korina.c5
-rw-r--r--drivers/net/ethernet/lantiq_etop.c25
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c25
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c181
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.h2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h8
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c8
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h6
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c250
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c201
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c51
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c86
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.h7
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c26
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h6
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.c11
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.c7
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c39
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c31
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h8
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c9
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c7
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c182
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h33
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.c218
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.h28
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c424
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/nix.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/npa.c21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h81
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h380
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c123
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h178
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c87
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c260
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h154
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c149
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c26
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c338
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c82
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c260
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c29
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c198
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c62
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c469
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h63
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c28
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h88
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c48
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c1042
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h265
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c450
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c385
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h201
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c56
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c239
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c404
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h49
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c28
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c306
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c119
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c245
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h24
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c29
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.c879
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.h55
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_counter.c3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c6
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c8
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c24
-rw-r--r--drivers/net/ethernet/marvell/skge.c9
-rw-r--r--drivers/net/ethernet/marvell/sky2.c10
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig9
-rw-r--r--drivers/net/ethernet/mediatek/Makefile1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_path.c45
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c551
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h101
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c24
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c24
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c70
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.h2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_mcu.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c67
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c123
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c189
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c208
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dpll.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h84
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c376
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c145
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.h39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c294
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c135
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c67
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c297
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c837
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c1155
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c201
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.h121
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dim.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c405
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c865
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c490
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c267
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c202
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c1754
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c318
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c567
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c585
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h106
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c320
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c195
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c173
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/hwmon.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/hwmon.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c464
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c746
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c440
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c116
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c799
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/st.c185
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c185
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h118
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c227
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c103
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c)309
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h)25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c467
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.h69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c)4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h)6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c)823
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h)64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c1101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c)135
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h)24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c)45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h)18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c)117
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h)8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c)405
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h)17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c1641
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c427
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h)38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c)947
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h)61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c640
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h151
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c)89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h)7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c394
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h132
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h)42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c)312
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h)13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c)183
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h)7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c)53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h)8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c)2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_arg.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_buddy.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c)30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_definer.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_definer.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c)38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_fw.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_icm_pool.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_matcher.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ptrn.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_rule.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c)66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c)6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h)23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c)6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c)259
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h240
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c)76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c263
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_table.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h)1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c)12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h)10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h)40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr_ste_v1.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h)4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c138
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wc.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecards.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c306
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h48
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/txheader.h63
-rw-r--r--drivers/net/ethernet/meta/Kconfig4
-rw-r--r--drivers/net/ethernet/meta/fbnic/Makefile9
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h72
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.c149
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h327
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c271
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_devlink.c507
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c1843
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c1384
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h219
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c123
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw_log.h45
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c574
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h123
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_irq.c209
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c372
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.h74
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mdio.c195
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c288
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h52
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c145
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_phylink.c274
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.c618
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.h43
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_time.c2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_tlv.c55
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_tlv.h41
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c1472
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h97
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c2
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c8
-rw-r--r--drivers/net/ethernet/microchip/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/Makefile1
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c70
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c97
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h3
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c56
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.h11
-rw-r--r--drivers/net/ethernet/microchip/lan865x/lan865x.c30
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c18
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c10
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h12
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c3
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c4
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c68
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c8
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c21
-rw-r--r--drivers/net/ethernet/microchip/lan969x/Kconfig5
-rw-r--r--drivers/net/ethernet/microchip/lan969x/Makefile12
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Kconfig9
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Makefile8
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c (renamed from drivers/net/ethernet/microchip/lan969x/lan969x.c)21
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h (renamed from drivers/net/ethernet/microchip/lan969x/lan969x.h)25
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_calendar.c (renamed from drivers/net/ethernet/microchip/lan969x/lan969x_calendar.c)0
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c406
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_regs.c (renamed from drivers/net/ethernet/microchip/lan969x/lan969x_regs.c)0
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c224
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_ag_api.c3843
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_impl.c85
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c18
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c68
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c86
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h38
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h145
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c3
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c15
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c16
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.c59
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.h5
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c12
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c48
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h21
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c10
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c17
-rw-r--r--drivers/net/ethernet/microsoft/Kconfig1
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c812
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c56
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_bpf.c48
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c952
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c167
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c8
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c51
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c266
-rw-r--r--drivers/net/ethernet/mscc/ocelot_stats.c39
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c2
-rw-r--r--drivers/net/ethernet/mucse/Kconfig33
-rw-r--r--drivers/net/ethernet/mucse/Makefile7
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/Makefile11
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h71
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c143
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h17
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c320
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c406
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h20
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c191
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h88
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c8
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c8
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c17
-rw-r--r--drivers/net/ethernet/neterion/s2io.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/ipsec.c22
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/devlink_param.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c18
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c18
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_hwmon.c40
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c21
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c15
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c19
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c44
-rw-r--r--drivers/net/ethernet/oa_tc6.c14
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c44
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c4
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c4
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c5
-rw-r--r--drivers/net/ethernet/pensando/Kconfig1
-rw-r--r--drivers/net/ethernet/pensando/ionic/Makefile2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h10
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_api.h131
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_aux.c102
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_aux.h10
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c11
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c281
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h28
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c137
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h159
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c84
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h21
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c10
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_phc.c63
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c46
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c32
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c21
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h52
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c138
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ptp.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c8
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c26
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c5
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c24
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c78
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.h6
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c15
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c35
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig16
-rw-r--r--drivers/net/ethernet/qualcomm/Makefile1
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c2
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/Makefile7
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe.c239
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe.h39
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_config.c2034
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_config.h317
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c847
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h16
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_regs.h591
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c26
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h1
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c9
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c4
-rw-r--r--drivers/net/ethernet/realtek/8139too.c4
-rw-r--r--drivers/net/ethernet/realtek/Kconfig5
-rw-r--r--drivers/net/ethernet/realtek/atp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.h9
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c888
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c243
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase.h28
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c184
-rw-r--r--drivers/net/ethernet/renesas/Makefile1
-rw-r--r--drivers/net/ethernet/renesas/ravb.h16
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c195
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c12
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.c78
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.h46
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h121
-rw-r--r--drivers/net/ethernet/renesas/rswitch_l2.c316
-rw-r--r--drivers/net/ethernet/renesas/rswitch_l2.h15
-rw-r--r--drivers/net/ethernet/renesas/rswitch_main.c (renamed from drivers/net/ethernet/renesas/rswitch.c)454
-rw-r--r--drivers/net/ethernet/renesas/rtsn.c55
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c38
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c4
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c6
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c45
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c15
-rw-r--r--drivers/net/ethernet/seeq/ether3.c8
-rw-r--r--drivers/net/ethernet/sfc/Kconfig5
-rw-r--r--drivers/net/ethernet/sfc/Makefile2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c11
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c1
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.c7
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c49
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.c17
-rw-r--r--drivers/net/ethernet/sfc/efx.c32
-rw-r--r--drivers/net/ethernet/sfc/efx.h1
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c6
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c20
-rw-r--r--drivers/net/ethernet/sfc/efx_common.h1
-rw-r--r--drivers/net/ethernet/sfc/efx_devlink.c13
-rw-r--r--drivers/net/ethernet/sfc/efx_reflash.c522
-rw-r--r--drivers/net/ethernet/sfc/efx_reflash.h20
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c6
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c145
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c16
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.h1
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c89
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon.c8
-rw-r--r--drivers/net/ethernet/sfc/falcon/farch.c22
-rw-r--r--drivers/net/ethernet/sfc/falcon/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.c20
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.h7
-rw-r--r--drivers/net/ethernet/sfc/falcon/rx.c5
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.c8
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.h3
-rw-r--r--drivers/net/ethernet/sfc/fw_formats.h114
-rw-r--r--drivers/net/ethernet/sfc/io.h24
-rw-r--r--drivers/net/ethernet/sfc/mae.c17
-rw-r--r--drivers/net/ethernet/sfc/mae.h1
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c197
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h32
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h13844
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c59
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.c11
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h17
-rw-r--r--drivers/net/ethernet/sfc/nic.c9
-rw-r--r--drivers/net/ethernet/sfc/nic_common.h2
-rw-r--r--drivers/net/ethernet/sfc/ptp.c7
-rw-r--r--drivers/net/ethernet/sfc/ptp.h3
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c16
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_channels.c6
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_common.c3
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool.c4
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.c123
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.h2
-rw-r--r--drivers/net/ethernet/sfc/siena/farch.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi.c6
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_pcol.h12
-rw-r--r--drivers/net/ethernet/sfc/siena/net_driver.h6
-rw-r--r--drivers/net/ethernet/sfc/siena/nic.c14
-rw-r--r--drivers/net/ethernet/sfc/siena/nic_common.h5
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.h2
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.c16
-rw-r--r--drivers/net/ethernet/sfc/siena/siena.c2
-rw-r--r--drivers/net/ethernet/sfc/tc.c6
-rw-r--r--drivers/net/ethernet/sfc/tc_conntrack.c2
-rw-r--r--drivers/net/ethernet/sfc/tc_encap_actions.c6
-rw-r--r--drivers/net/ethernet/sfc/tx.c8
-rw-r--r--drivers/net/ethernet/sfc/tx.h3
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c10
-rw-r--r--drivers/net/ethernet/sis/sis190.c6
-rw-r--r--drivers/net/ethernet/sis/sis900.c7
-rw-r--r--drivers/net/ethernet/smsc/epic100.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c4
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c17
-rw-r--r--drivers/net/ethernet/socionext/netsec.c7
-rw-r--r--drivers/net/ethernet/spacemit/Kconfig29
-rw-r--r--drivers/net/ethernet/spacemit/Makefile6
-rw-r--r--drivers/net/ethernet/spacemit/k1_emac.c2162
-rw-r--r--drivers/net/ethernet/spacemit/k1_emac.h416
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig81
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h86
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c46
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c228
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-eic7700.c235
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c200
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c198
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c111
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c416
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c249
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c99
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c44
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c100
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c36
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c356
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c235
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c1615
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c186
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c281
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c93
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c66
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c167
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c201
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun55i.c159
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c92
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c70
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c286
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c170
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c236
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h61
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c501
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c36
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h55
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c188
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c66
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c277
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h125
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h79
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c291
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c177
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c56
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.c48
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.h12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c1635
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c478
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c121
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.c67
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h57
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c289
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c94
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c375
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.h64
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h1
-rw-r--r--drivers/net/ethernet/sun/cassini.c4
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c6
-rw-r--r--drivers/net/ethernet/sun/niu.c92
-rw-r--r--drivers/net/ethernet/sun/niu.h8
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c4
-rw-r--r--drivers/net/ethernet/sun/sungem.c10
-rw-r--r--drivers/net/ethernet/sun/sunhme.c10
-rw-r--r--drivers/net/ethernet/sun/sunqe.h2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c8
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_driver.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c5
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/tehuti/tn40.c9
-rw-r--r--drivers/net/ethernet/tehuti/tn40.h33
-rw-r--r--drivers/net/ethernet/tehuti/tn40_mdio.c84
-rw-r--r--drivers/net/ethernet/ti/Kconfig14
-rw-r--r--drivers/net/ethernet/ti/Makefile3
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c27
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c1001
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h14
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c51
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c63
-rw-r--r--drivers/net/ethernet/ti/cpsw.c37
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c54
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c12
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c24
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c70
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h7
-rw-r--r--drivers/net/ethernet/ti/cpts.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c21
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c268
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c875
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.c206
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.h81
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c1203
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h122
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c91
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.c20
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.h58
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_switch_map.h36
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth.c1746
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth.h262
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth_ptp.h85
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_switch.h257
-rw-r--r--drivers/net/ethernet/ti/netcp.h5
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c68
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c76
-rw-r--r--drivers/net/ethernet/ti/tlan.c8
-rw-r--r--drivers/net/ethernet/toshiba/Kconfig11
-rw-r--r--drivers/net/ethernet/toshiba/Makefile2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c58
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h1
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2556
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.h475
-rw-r--r--drivers/net/ethernet/toshiba/spider_net_ethtool.c174
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c4
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c119
-rw-r--r--drivers/net/ethernet/via/via-rhine.c11
-rw-r--r--drivers/net/ethernet/via/via-velocity.c6
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig43
-rw-r--r--drivers/net/ethernet/wangxun/Makefile2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/Makefile3
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.c352
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.h17
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c775
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h13
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c644
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.h8
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_mbx.c419
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_mbx.h99
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ptp.c905
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ptp.h20
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_sriov.c929
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_sriov.h18
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h346
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf.c599
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf.h129
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_common.c414
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_common.h22
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c292
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h15
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c11
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c115
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c16
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h10
-rw-r--r--drivers/net/ethernet/wangxun/ngbevf/Makefile9
-rw-r--r--drivers/net/ethernet/wangxun/ngbevf/ngbevf_main.c266
-rw-r--r--drivers/net/ethernet/wangxun/ngbevf/ngbevf_type.h29
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/Makefile3
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c530
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h18
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c77
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c23
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c18
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c83
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c273
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c219
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h4
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h164
-rw-r--r--drivers/net/ethernet/wangxun/txgbevf/Makefile9
-rw-r--r--drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c331
-rw-r--r--drivers/net/ethernet/wangxun/txgbevf/txgbevf_type.h26
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig1
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h32
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c377
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c2
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c2
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c61
-rw-r--r--drivers/net/fddi/defza.c12
-rw-r--r--drivers/net/fjes/fjes_main.c5
-rw-r--r--drivers/net/geneve.c124
-rw-r--r--drivers/net/gtp.c80
-rw-r--r--drivers/net/hamradio/6pack.c67
-rw-r--r--drivers/net/hamradio/baycom_epp.c5
-rw-r--r--drivers/net/hamradio/baycom_par.c4
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c3
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c4
-rw-r--r--drivers/net/hamradio/bpqether.c27
-rw-r--r--drivers/net/hamradio/scc.c44
-rw-r--r--drivers/net/hamradio/yam.c2
-rw-r--r--drivers/net/hippi/rrunner.c4
-rw-r--r--drivers/net/hyperv/Kconfig2
-rw-r--r--drivers/net/hyperv/hyperv_net.h20
-rw-r--r--drivers/net/hyperv/netvsc.c77
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c165
-rw-r--r--drivers/net/hyperv/rndis_filter.c60
-rw-r--r--drivers/net/ieee802154/at86rf230.c4
-rw-r--r--drivers/net/ieee802154/ca8210.c84
-rw-r--r--drivers/net/ifb.c18
-rw-r--r--drivers/net/ipa/Kconfig2
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.1.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.5.1.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.11.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.2.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.5.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.7.c19
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.9.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v5.0.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v5.5.c1
-rw-r--r--drivers/net/ipa/ipa_data.h2
-rw-r--r--drivers/net/ipa/ipa_interrupt.c1
-rw-r--r--drivers/net/ipa/ipa_main.c13
-rw-r--r--drivers/net/ipa/ipa_mem.c21
-rw-r--r--drivers/net/ipa/ipa_modem.c4
-rw-r--r--drivers/net/ipa/ipa_smp2p.c2
-rw-r--r--drivers/net/ipa/ipa_sysfs.c6
-rw-r--r--drivers/net/ipa/ipa_uc.c2
-rw-r--r--drivers/net/ipvlan/ipvlan.h3
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c29
-rw-r--r--drivers/net/ipvlan/ipvlan_l3s.c1
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c22
-rw-r--r--drivers/net/ipvlan/ipvtap.c6
-rw-r--r--drivers/net/loopback.c22
-rw-r--r--drivers/net/macsec.c304
-rw-r--r--drivers/net/macvlan.c48
-rw-r--r--drivers/net/macvtap.c6
-rw-r--r--drivers/net/mctp/Kconfig10
-rw-r--r--drivers/net/mctp/Makefile1
-rw-r--r--drivers/net/mctp/mctp-i2c.c13
-rw-r--r--drivers/net/mctp/mctp-i3c.c22
-rw-r--r--drivers/net/mctp/mctp-serial.c5
-rw-r--r--drivers/net/mctp/mctp-usb.c390
-rw-r--r--drivers/net/mdio.c172
-rw-r--r--drivers/net/mdio/Kconfig52
-rw-r--r--drivers/net/mdio/Makefile2
-rw-r--r--drivers/net/mdio/fwnode_mdio.c44
-rw-r--r--drivers/net/mdio/mdio-airoha.c278
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c11
-rw-r--r--drivers/net/mdio/mdio-i2c.c90
-rw-r--r--drivers/net/mdio/mdio-ipq4019.c5
-rw-r--r--drivers/net/mdio/mdio-mux-gpio.c3
-rw-r--r--drivers/net/mdio/mdio-mux-meson-gxl.c3
-rw-r--r--drivers/net/mdio/mdio-octeon.c25
-rw-r--r--drivers/net/mdio/mdio-realtek-rtl9300.c522
-rw-r--r--drivers/net/mdio/mdio-thunder.c10
-rw-r--r--drivers/net/mdio/of_mdio.c10
-rw-r--r--drivers/net/mii.c3
-rw-r--r--drivers/net/net_failover.c2
-rw-r--r--drivers/net/netconsole.c900
-rw-r--r--drivers/net/netdevsim/Makefile4
-rw-r--r--drivers/net/netdevsim/bpf.c3
-rw-r--r--drivers/net/netdevsim/bus.c29
-rw-r--r--drivers/net/netdevsim/dev.c128
-rw-r--r--drivers/net/netdevsim/ethtool.c58
-rw-r--r--drivers/net/netdevsim/health.c6
-rw-r--r--drivers/net/netdevsim/hwstats.c34
-rw-r--r--drivers/net/netdevsim/ipsec.c27
-rw-r--r--drivers/net/netdevsim/netdev.c546
-rw-r--r--drivers/net/netdevsim/netdevsim.h63
-rw-r--r--drivers/net/netdevsim/psp.c252
-rw-r--r--drivers/net/netdevsim/udp_tunnels.c35
-rw-r--r--drivers/net/netkit.c117
-rw-r--r--drivers/net/ntb_netdev.c4
-rw-r--r--drivers/net/ovpn/Makefile22
-rw-r--r--drivers/net/ovpn/bind.c55
-rw-r--r--drivers/net/ovpn/bind.h101
-rw-r--r--drivers/net/ovpn/crypto.c210
-rw-r--r--drivers/net/ovpn/crypto.h145
-rw-r--r--drivers/net/ovpn/crypto_aead.c389
-rw-r--r--drivers/net/ovpn/crypto_aead.h29
-rw-r--r--drivers/net/ovpn/io.c465
-rw-r--r--drivers/net/ovpn/io.h34
-rw-r--r--drivers/net/ovpn/main.c279
-rw-r--r--drivers/net/ovpn/main.h14
-rw-r--r--drivers/net/ovpn/netlink-gen.c263
-rw-r--r--drivers/net/ovpn/netlink-gen.h48
-rw-r--r--drivers/net/ovpn/netlink.c1293
-rw-r--r--drivers/net/ovpn/netlink.h18
-rw-r--r--drivers/net/ovpn/ovpnpriv.h55
-rw-r--r--drivers/net/ovpn/peer.c1364
-rw-r--r--drivers/net/ovpn/peer.h163
-rw-r--r--drivers/net/ovpn/pktid.c129
-rw-r--r--drivers/net/ovpn/pktid.h86
-rw-r--r--drivers/net/ovpn/proto.h118
-rw-r--r--drivers/net/ovpn/skb.h61
-rw-r--r--drivers/net/ovpn/socket.c241
-rw-r--r--drivers/net/ovpn/socket.h49
-rw-r--r--drivers/net/ovpn/stats.c21
-rw-r--r--drivers/net/ovpn/stats.h47
-rw-r--r--drivers/net/ovpn/tcp.c619
-rw-r--r--drivers/net/ovpn/tcp.h37
-rw-r--r--drivers/net/ovpn/udp.c448
-rw-r--r--drivers/net/ovpn/udp.h25
-rw-r--r--drivers/net/pcs/Kconfig11
-rw-r--r--drivers/net/pcs/pcs-lynx.c126
-rw-r--r--drivers/net/pcs/pcs-mtk-lynxi.c26
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c341
-rw-r--r--drivers/net/pcs/pcs-xpcs-plat.c11
-rw-r--r--drivers/net/pcs/pcs-xpcs.c285
-rw-r--r--drivers/net/pcs/pcs-xpcs.h26
-rw-r--r--drivers/net/pfcp.c35
-rw-r--r--drivers/net/phy/Kconfig79
-rw-r--r--drivers/net/phy/Makefile29
-rw-r--r--drivers/net/phy/adin.c2
-rw-r--r--drivers/net/phy/adin1100.c14
-rw-r--r--drivers/net/phy/air_en8811h.c142
-rw-r--r--drivers/net/phy/amd.c2
-rw-r--r--drivers/net/phy/aquantia/aquantia.h52
-rw-r--r--drivers/net/phy/aquantia/aquantia_firmware.c9
-rw-r--r--drivers/net/phy/aquantia/aquantia_hwmon.c32
-rw-r--r--drivers/net/phy/aquantia/aquantia_leds.c2
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c822
-rw-r--r--drivers/net/phy/as21xxx.c1088
-rw-r--r--drivers/net/phy/ax88796b.c7
-rw-r--r--drivers/net/phy/ax88796b_rust.rs2
-rw-r--r--drivers/net/phy/bcm-cygnus.c2
-rw-r--r--drivers/net/phy/bcm-phy-lib.c5
-rw-r--r--drivers/net/phy/bcm-phy-ptp.c26
-rw-r--r--drivers/net/phy/bcm54140.c3
-rw-r--r--drivers/net/phy/bcm63xx.c2
-rw-r--r--drivers/net/phy/bcm7xxx.c2
-rw-r--r--drivers/net/phy/bcm84881.c12
-rw-r--r--drivers/net/phy/bcm87xx.c14
-rw-r--r--drivers/net/phy/broadcom.c212
-rw-r--r--drivers/net/phy/cicada.c2
-rw-r--r--drivers/net/phy/cortina.c2
-rw-r--r--drivers/net/phy/davicom.c2
-rw-r--r--drivers/net/phy/dp83640.c102
-rw-r--r--drivers/net/phy/dp83822.c465
-rw-r--r--drivers/net/phy/dp83848.c4
-rw-r--r--drivers/net/phy/dp83867.c125
-rw-r--r--drivers/net/phy/dp83869.c33
-rw-r--r--drivers/net/phy/dp83tc811.c2
-rw-r--r--drivers/net/phy/dp83td510.c363
-rw-r--r--drivers/net/phy/dp83tg720.c334
-rw-r--r--drivers/net/phy/et1011c.c2
-rw-r--r--drivers/net/phy/fixed_phy.c282
-rw-r--r--drivers/net/phy/icplus.c8
-rw-r--r--drivers/net/phy/intel-xway.c11
-rw-r--r--drivers/net/phy/lxt.c2
-rw-r--r--drivers/net/phy/marvell-88q2xxx.c314
-rw-r--r--drivers/net/phy/marvell-88x2222.c15
-rw-r--r--drivers/net/phy/marvell.c193
-rw-r--r--drivers/net/phy/marvell10g.c45
-rw-r--r--drivers/net/phy/mdio-boardinfo.c80
-rw-r--r--drivers/net/phy/mdio-boardinfo.h23
-rw-r--r--drivers/net/phy/mdio-open-alliance.h49
-rw-r--r--drivers/net/phy/mdio-private.h11
-rw-r--r--drivers/net/phy/mdio_bus.c566
-rw-r--r--drivers/net/phy/mdio_bus_provider.c442
-rw-r--r--drivers/net/phy/mdio_device.c64
-rw-r--r--drivers/net/phy/mediatek-ge.c111
-rw-r--r--drivers/net/phy/mediatek/Kconfig40
-rw-r--r--drivers/net/phy/mediatek/Makefile5
-rw-r--r--drivers/net/phy/mediatek/mtk-2p5ge.c413
-rw-r--r--drivers/net/phy/mediatek/mtk-ge-soc.c (renamed from drivers/net/phy/mediatek-ge-soc.c)694
-rw-r--r--drivers/net/phy/mediatek/mtk-ge.c142
-rw-r--r--drivers/net/phy/mediatek/mtk-phy-lib.c347
-rw-r--r--drivers/net/phy/mediatek/mtk.h104
-rw-r--r--drivers/net/phy/meson-gxl.c2
-rw-r--r--drivers/net/phy/micrel.c1795
-rw-r--r--drivers/net/phy/microchip.c74
-rw-r--r--drivers/net/phy/microchip_rds_ptp.c1306
-rw-r--r--drivers/net/phy/microchip_rds_ptp.h247
-rw-r--r--drivers/net/phy/microchip_t1.c54
-rw-r--r--drivers/net/phy/microchip_t1s.c102
-rw-r--r--drivers/net/phy/motorcomm.c120
-rw-r--r--drivers/net/phy/mscc/mscc.h31
-rw-r--r--drivers/net/phy/mscc/mscc_main.c525
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c155
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.h1
-rw-r--r--drivers/net/phy/mxl-86110.c978
-rw-r--r--drivers/net/phy/mxl-gpy.c156
-rw-r--r--drivers/net/phy/national.c2
-rw-r--r--drivers/net/phy/ncn26000.c2
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx-macsec.c8
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c199
-rw-r--r--drivers/net/phy/nxp-cbtx.c2
-rw-r--r--drivers/net/phy/nxp-tja11xx.c65
-rw-r--r--drivers/net/phy/phy-c45.c369
-rw-r--r--drivers/net/phy/phy-caps.h64
-rw-r--r--drivers/net/phy/phy-core.c461
-rw-r--r--drivers/net/phy/phy.c402
-rw-r--r--drivers/net/phy/phy_caps.c380
-rw-r--r--drivers/net/phy/phy_device.c909
-rw-r--r--drivers/net/phy/phy_led_triggers.c25
-rw-r--r--drivers/net/phy/phy_link_topology.c2
-rw-r--r--drivers/net/phy/phy_package.c419
-rw-r--r--drivers/net/phy/phylib-internal.h31
-rw-r--r--drivers/net/phy/phylib.h34
-rw-r--r--drivers/net/phy/phylink.c1557
-rw-r--r--drivers/net/phy/qcom/Kconfig3
-rw-r--r--drivers/net/phy/qcom/at803x.c205
-rw-r--r--drivers/net/phy/qcom/qca807x.c67
-rw-r--r--drivers/net/phy/qcom/qca808x.c27
-rw-r--r--drivers/net/phy/qcom/qca83xx.c2
-rw-r--r--drivers/net/phy/qcom/qcom-phy-lib.c100
-rw-r--r--drivers/net/phy/qcom/qcom.h28
-rw-r--r--drivers/net/phy/qsemi.c2
-rw-r--r--drivers/net/phy/qt2025.rs14
-rw-r--r--drivers/net/phy/realtek/Kconfig15
-rw-r--r--drivers/net/phy/realtek/Makefile4
-rw-r--r--drivers/net/phy/realtek/realtek.h10
-rw-r--r--drivers/net/phy/realtek/realtek_hwmon.c74
-rw-r--r--drivers/net/phy/realtek/realtek_main.c (renamed from drivers/net/phy/realtek.c)1113
-rw-r--r--drivers/net/phy/rockchip.c2
-rw-r--r--drivers/net/phy/sfp-bus.c107
-rw-r--r--drivers/net/phy/sfp.c200
-rw-r--r--drivers/net/phy/sfp.h4
-rw-r--r--drivers/net/phy/smsc.c60
-rw-r--r--drivers/net/phy/spi_ks8995.c506
-rw-r--r--drivers/net/phy/ste10Xp.c2
-rw-r--r--drivers/net/phy/teranetics.c5
-rw-r--r--drivers/net/phy/uPD60620.c2
-rw-r--r--drivers/net/phy/vitesse.c2
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c7
-rw-r--r--drivers/net/ppp/Kconfig3
-rw-r--r--drivers/net/ppp/bsd_comp.c4
-rw-r--r--drivers/net/ppp/ppp_generic.c290
-rw-r--r--drivers/net/ppp/ppp_mppe.c108
-rw-r--r--drivers/net/ppp/ppp_synctty.c5
-rw-r--r--drivers/net/ppp/pppoe.c140
-rw-r--r--drivers/net/ppp/pptp.c27
-rw-r--r--drivers/net/pse-pd/Kconfig11
-rw-r--r--drivers/net/pse-pd/Makefile1
-rw-r--r--drivers/net/pse-pd/pd692x0.c611
-rw-r--r--drivers/net/pse-pd/pse_core.c1217
-rw-r--r--drivers/net/pse-pd/pse_regulator.c23
-rw-r--r--drivers/net/pse-pd/si3474.c578
-rw-r--r--drivers/net/pse-pd/tps23881.c911
-rw-r--r--drivers/net/sb1000.c1179
-rw-r--r--drivers/net/slip/slip.c18
-rw-r--r--drivers/net/sungem_phy.c2
-rw-r--r--drivers/net/tap.c192
-rw-r--r--drivers/net/team/team_core.c222
-rw-r--r--drivers/net/team/team_mode_activebackup.c3
-rw-r--r--drivers/net/team/team_mode_loadbalance.c13
-rw-r--r--drivers/net/team/team_nl.c1
-rw-r--r--drivers/net/team/team_nl.h1
-rw-r--r--drivers/net/thunderbolt/main.c21
-rw-r--r--drivers/net/tun.c312
-rw-r--r--drivers/net/tun_vnet.h269
-rw-r--r--drivers/net/usb/Kconfig8
-rw-r--r--drivers/net/usb/aqc111.c10
-rw-r--r--drivers/net/usb/asix.h1
-rw-r--r--drivers/net/usb/asix_common.c22
-rw-r--r--drivers/net/usb/asix_devices.c76
-rw-r--r--drivers/net/usb/ax88172a.c12
-rw-r--r--drivers/net/usb/catc.c4
-rw-r--r--drivers/net/usb/cdc_ether.c7
-rw-r--r--drivers/net/usb/cdc_mbim.c4
-rw-r--r--drivers/net/usb/cdc_ncm.c30
-rw-r--r--drivers/net/usb/ch9200.c7
-rw-r--r--drivers/net/usb/gl620a.c4
-rw-r--r--drivers/net/usb/ipheth.c69
-rw-r--r--drivers/net/usb/lan78xx.c2101
-rw-r--r--drivers/net/usb/qmi_wwan.c21
-rw-r--r--drivers/net/usb/r8152.c114
-rw-r--r--drivers/net/usb/r8153_ecm.c6
-rw-r--r--drivers/net/usb/rtl8150.c35
-rw-r--r--drivers/net/usb/sierra_net.c8
-rw-r--r--drivers/net/usb/smsc95xx.c72
-rw-r--r--drivers/net/usb/usbnet.c385
-rw-r--r--drivers/net/veth.c109
-rw-r--r--drivers/net/virtio_net.c1303
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c45
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c92
-rw-r--r--drivers/net/vmxnet3/vmxnet3_xdp.c16
-rw-r--r--drivers/net/vrf.c73
-rw-r--r--drivers/net/vxlan/vxlan_core.c912
-rw-r--r--drivers/net/vxlan/vxlan_mdb.c2
-rw-r--r--drivers/net/vxlan/vxlan_private.h19
-rw-r--r--drivers/net/vxlan/vxlan_vnifilter.c50
-rw-r--r--drivers/net/wan/framer/framer-core.c23
-rw-r--r--drivers/net/wan/framer/pef2256/pef2256.c35
-rw-r--r--drivers/net/wan/hdlc_cisco.c4
-rw-r--r--drivers/net/wan/hdlc_fr.c4
-rw-r--r--drivers/net/wan/hdlc_ppp.c8
-rw-r--r--drivers/net/wan/lapbether.c4
-rw-r--r--drivers/net/wireguard/Makefile2
-rw-r--r--drivers/net/wireguard/allowedips.c102
-rw-r--r--drivers/net/wireguard/allowedips.h4
-rw-r--r--drivers/net/wireguard/cookie.c22
-rw-r--r--drivers/net/wireguard/device.c19
-rw-r--r--drivers/net/wireguard/generated/netlink.c73
-rw-r--r--drivers/net/wireguard/generated/netlink.h30
-rw-r--r--drivers/net/wireguard/netlink.c107
-rw-r--r--drivers/net/wireguard/noise.c36
-rw-r--r--drivers/net/wireguard/peer.h2
-rw-r--r--drivers/net/wireguard/queueing.h13
-rw-r--r--drivers/net/wireguard/selftest/allowedips.c49
-rw-r--r--drivers/net/wireguard/socket.c4
-rw-r--r--drivers/net/wireguard/timers.c25
-rw-r--r--drivers/net/wireless/admtek/adm8211.c2
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c10
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c34
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c79
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h19
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c10
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c18
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c63
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h44
-rw-r--r--drivers/net/wireless/ath/ath10k/leds.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c204
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c14
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c31
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c255
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode_i.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h19
-rw-r--r--drivers/net/wireless/ath/ath11k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath11k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c49
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c17
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c535
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h52
-rw-r--r--drivers/net/wireless/ath/ath11k/coredump.c54
-rw-r--r--drivers/net/wireless/ath/ath11k/coredump.h79
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c188
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.h10
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c15
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c11
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c48
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h7
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c181
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c15
-rw-r--r--drivers/net/wireless/ath/ath11k/fw.c5
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c56
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h45
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/hif.h21
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c783
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c20
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.h5
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c268
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.h18
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c15
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c64
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h10
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c107
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode.c82
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c93
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h27
-rw-r--r--drivers/net/wireless/ath/ath11k/wow.c45
-rw-r--r--drivers/net/wireless/ath/ath12k/Kconfig20
-rw-r--r--drivers/net/wireless/ath/ath12k/Makefile5
-rw-r--r--drivers/net/wireless/ath/ath12k/acpi.c202
-rw-r--r--drivers/net/wireless/ath/ath12k/acpi.h40
-rw-r--r--drivers/net/wireless/ath/ath12k/ahb.c1156
-rw-r--r--drivers/net/wireless/ath/ath12k/ahb.h80
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.c109
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.h20
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c1211
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h623
-rw-r--r--drivers/net/wireless/ath/ath12k/coredump.c54
-rw-r--r--drivers/net/wireless/ath/ath12k/coredump.h81
-rw-r--r--drivers/net/wireless/ath/ath12k/dbring.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.c10
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.h16
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.c1418
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.h115
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c4324
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h1397
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_sta.c337
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_sta.h24
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c384
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h224
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c2775
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.h17
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c1367
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h75
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c609
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.h7
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.c9
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.h6
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c201
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h86
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h25
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.c144
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.h534
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_tx.h10
-rw-r--r--drivers/net/wireless/ath/ath12k/hif.h6
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c586
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h64
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c8898
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h141
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.c14
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.c20
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c324
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h9
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.c244
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.h59
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c757
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h51
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c616
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.h28
-rw-r--r--drivers/net/wireless/ath/ath12k/rx_desc.h100
-rw-r--r--drivers/net/wireless/ath/ath12k/testmode.c395
-rw-r--r--drivers/net/wireless/ath/ath12k/testmode.h40
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c3624
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h1004
-rw-r--r--drivers/net/wireless/ath/ath12k/wow.c91
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c8
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c12
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c12
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c14
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.h6
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/recovery.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c5
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h18
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c81
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_aic.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c24
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c37
-rw-r--r--drivers/net/wireless/ath/ath9k/common-beacon.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/common-debug.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/common-init.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/dynack.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c58
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c37
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c35
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c63
-rw-r--r--drivers/net/wireless/ath/carl9170/debug.c28
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c19
-rw-r--r--drivers/net/wireless/ath/main.c1
-rw-r--r--drivers/net/wireless/ath/testmode_i.h (renamed from drivers/net/wireless/ath/ath11k/testmode_i.h)54
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c6
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h74
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c18
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c60
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/testmode.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h2
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c26
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/p2p.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c1
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h4
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c8
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h4
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c6
-rw-r--r--drivers/net/wireless/broadcom/b43/debugfs.c27
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c8
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/debugfs.c26
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c30
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c155
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c314
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/fwil_types.h87
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h29
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c32
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c54
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c15
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c34
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c443
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_hal.h27
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/pmu.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h3
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c6
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c4
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw.h2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_crypto.c6
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_module.c2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_rx.c91
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c40
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-rs.c5
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.h1
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c15
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c21
-rw-r--r--drivers/net/wireless/intel/iwlegacy/commands.h2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c37
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h5
-rw-r--r--drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/1000.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/2000.c90
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c396
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c83
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/6000.c227
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c173
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c91
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c167
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/ax210.c296
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c199
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/dr.c89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c52
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c85
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-jf.c111
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-wh.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c202
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/commands.h170
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/devices.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c92
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/led.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c179
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.c36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rxon.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/ucode.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c158
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h61
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/binding.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/coex.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/context.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h305
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h50
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dhc.h226
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h187
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h288
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h194
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/offload.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h119
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h170
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h340
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h78
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/stats.h44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h60
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h95
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c381
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c50
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dhc-utils.h75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dump.c84
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h109
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/paging.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c154
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c233
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h87
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/rs.c137
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c195
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h67
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h550
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c337
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c123
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c274
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h107
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h107
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c509
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h405
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-utils.c195
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-utils.h58
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/sap.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/Makefile12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/agg.c677
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/agg.h127
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ap.c363
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ap.h45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/coex.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/coex.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/constants.h81
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.c2065
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.h51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.c1113
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.h244
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.c451
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/fw.c548
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/hcmd.h56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.c703
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.h250
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/key.c408
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/key.h46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/led.c100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/led.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.c909
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.h131
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/low_latency.c336
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/low_latency.h68
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.c2702
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mcc.c285
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mcc.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.c776
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.h615
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.c1199
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.h171
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.c719
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.h35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/phy.c198
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/phy.h60
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/power.c391
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/power.h33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ptp.c321
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ptp.h45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/regulatory.c383
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/regulatory.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/roc.c261
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/roc.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.c2264
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.h73
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.c2179
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.h173
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/session-protect.c222
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/session-protect.h102
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.c1319
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.h273
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/stats.c521
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/stats.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c663
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c339
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/link.c110
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/module.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/rx.c353
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c503
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h140
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/thermal.c467
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/thermal.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/time_sync.c240
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/time_sync.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tlc.c739
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tlc.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tx.c1394
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tx.h77
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/binding.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c142
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c1028
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c160
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c127
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c129
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c290
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/led.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c878
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c174
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c493
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c257
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c68
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h300
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/offloading.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c439
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ptp.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/quota.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c143
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c245
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c227
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c310
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/hcmd.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c433
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c64
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c97
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c384
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c192
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c)213
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c2460
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h (renamed from drivers/net/wireless/intel/iwlwifi/pcie/internal.h)239
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/rx.c)219
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans-gen2.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c)209
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/trans.c)1350
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c)88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/tx.c)292
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/iwl-context-info-v2.h (renamed from drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h)109
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/iwl-context-info.h (renamed from drivers/net/wireless/intel/iwlwifi/iwl-context-info.h)44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/utils.c104
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/utils.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/devinfo.c258
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/nvm_parse.c72
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/utils.c (renamed from drivers/net/wireless/intel/iwlwifi/mvm/tests/scan.c)43
-rw-r--r--drivers/net/wireless/intersil/p54/fwio.c2
-rw-r--r--drivers/net/wireless/intersil/p54/main.c3
-rw-r--r--drivers/net/wireless/intersil/p54/p54.h1
-rw-r--r--drivers/net/wireless/intersil/p54/p54spi.c4
-rw-r--r--drivers/net/wireless/intersil/p54/txrx.c15
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c13
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.c143
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.h10
-rw-r--r--drivers/net/wireless/marvell/libertas/cmdresp.c3
-rw-r--r--drivers/net/wireless/marvell/libertas/decl.h4
-rw-r--r--drivers/net/wireless/marvell/libertas/dev.h4
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c3
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c3
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c105
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/cmd.c2
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c4
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c74
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c79
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h18
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c28
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c63
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h20
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c182
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c60
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/txrx.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_event.c16
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c24
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c12
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c87
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig6
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile5
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/channel.c416
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c326
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h98
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c90
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c367
-rw-r--r--drivers/net/wireless/mediatek/mt76/mcu.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h436
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/beacon.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/core.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/soc.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/soc.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/testmode.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c62
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c165
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h41
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dma.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_trace.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/coredump.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/coredump.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c157
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c60
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c51
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c129
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c96
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c321
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h39
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/soc.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c160
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c60
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/testmode.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/Makefile5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/init.c50
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mac.c40
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c324
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c752
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.h110
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h68
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c76
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/regd.c265
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/regd.h19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/regs.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/testmode.c201
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/usb.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x.h48
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c125
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_core.c92
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_dma.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_mac.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_trace.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/Kconfig9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/Makefile3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/coredump.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/coredump.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c291
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/dma.c535
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c261
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c812
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c1328
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c1771
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c1589
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.h78
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c317
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h413
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/npu.c352
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/pci.c31
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/regs.h97
-rw-r--r--drivers/net/wireless/mediatek/mt76/npu.c501
-rw-r--r--drivers/net/wireless/mediatek/mt76/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/scan.c174
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio_txrx.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/trace.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c68
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/wed.c26
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c5
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c14
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c31
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c9
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c9
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c5
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_cfg.c37
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_cfg.h5
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.c17
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.h2
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/usb.c41
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c8
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h21
-rw-r--r--drivers/net/wireless/ralink/rt2x00/Kconfig11
-rw-r--r--drivers/net/wireless/ralink/rt2x00/Makefile1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c47
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.h5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800pci.c3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800soc.c116
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h10
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c20
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00soc.c153
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00soc.h29
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c11
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c32
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8188e.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8192c.c82
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8723a.c115
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/core.c260
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/regs.h1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c79
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c94
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c25
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c23
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c21
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c38
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c24
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h12
-rw-r--r--drivers/net/wireless/realtek/rtw88/Kconfig63
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile26
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.c8
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.h7
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c61
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.h11
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c59
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c82
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h18
-rw-r--r--drivers/net/wireless/realtek/rtw88/hci.h8
-rw-r--r--drivers/net/wireless/realtek/rtw88/led.c74
-rw-r--r--drivers/net/wireless/realtek/rtw88/led.h25
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c63
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.h6
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c11
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c170
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h113
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c57
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c295
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h20
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h263
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b.c93
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c29
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723de.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723ds.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723du.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.c71
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.h14
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a.c1125
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a_table.c2812
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a_table.h26
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812au.c94
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a.c2270
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a.h62
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a_table.c23930
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a_table.h40
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814ae.c31
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814au.c54
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a.c1226
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a_table.c2350
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a_table.h21
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821au.c78
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c62
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.h33
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821ce.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cu.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c61
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.h21
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822be.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bu.c16
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c58
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h9
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822ce.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822cu.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw88xxa.c1989
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw88xxa.h175
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.c18
-rw-r--r--drivers/net/wireless/realtek/rtw88/sar.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c45
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c9
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h4
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c336
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/Kconfig56
-rw-r--r--drivers/net/wireless/realtek/rtw89/Makefile15
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.c1131
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.h232
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c269
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.h475
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c1202
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h115
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c4125
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h25
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c2236
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h1033
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c2651
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.c150
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse_be.c52
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c2278
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h533
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c787
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h234
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c812
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c39
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c633
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h217
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci_be.c21
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c1993
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h95
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy_be.c15
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c271
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.h10
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h181
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c827
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c217
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c323
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.c77
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_table.c501
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851be.c9
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851bu.c66
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c162
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c16
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c9
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852au.c79
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c143
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.c110
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c96
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c9
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c59
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c96
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bte.c15
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bu.c81
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c292
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c81
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c9
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852cu.c69
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c292
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c118
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922ae.c30
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c755
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.h28
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c40
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h48
-rw-r--r--drivers/net/wireless/realtek/rtw89/usb.c1071
-rw-r--r--drivers/net/wireless/realtek/rtw89/usb.h77
-rw-r--r--drivers/net/wireless/realtek/rtw89/util.c220
-rw-r--r--drivers/net/wireless/realtek/rtw89/util.h13
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c122
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.h20
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c17
-rw-r--r--drivers/net/wireless/silabs/wfx/bus.h1
-rw-r--r--drivers/net/wireless/silabs/wfx/bus_sdio.c54
-rw-r--r--drivers/net/wireless/silabs/wfx/bus_spi.c45
-rw-r--r--drivers/net/wireless/silabs/wfx/main.c33
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.c29
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.h7
-rw-r--r--drivers/net/wireless/st/cw1200/bh.c11
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_spi.c4
-rw-r--r--drivers/net/wireless/st/cw1200/main.c2
-rw-r--r--drivers/net/wireless/st/cw1200/pm.c2
-rw-r--r--drivers/net/wireless/st/cw1200/queue.c4
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c16
-rw-r--r--drivers/net/wireless/st/cw1200/sta.h5
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.c35
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.h1
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c79
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.h3
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c5
-rw-r--r--drivers/net/wireless/ti/wl1251/reg.h6
-rw-r--r--drivers/net/wireless/ti/wl1251/tx.c4
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c2
-rw-r--r--drivers/net/wireless/ti/wl12xx/reg.h6
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c3
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c27
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c11
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c61
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/vendor_cmd.c3
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c362
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.h18
-rw-r--r--drivers/net/wireless/virtual/virt_wifi.c14
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c10
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c3
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_devlink.c3
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.c24
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mmio.c2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c58
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_trace.c3
-rw-r--r--drivers/net/wwan/mhi_wwan_mbim.c30
-rw-r--r--drivers/net/wwan/qcom_bam_dmux.c2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.c5
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.h2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c8
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.c1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_netdev.c11
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c85
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.h1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port.h3
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.c51
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.h1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_trace.c2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_wwan.c8
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.c26
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.h5
-rw-r--r--drivers/net/wwan/wwan_core.c24
-rw-r--r--drivers/net/wwan/wwan_hwsim.c2
-rw-r--r--drivers/net/xen-netback/interface.c2
-rw-r--r--drivers/net/xen-netback/netback.c3
-rw-r--r--drivers/net/xen-netfront.c37
3216 files changed, 374363 insertions, 123024 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 1fd5acdc73c6..ac12eaf11755 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -76,24 +76,11 @@ config WIREGUARD
tristate "WireGuard secure network tunnel"
depends on NET && INET
depends on IPV6 || !IPV6
- depends on !KMSAN # KMSAN doesn't support the crypto configs below
select NET_UDP_TUNNEL
select DST_CACHE
- select CRYPTO
select CRYPTO_LIB_CURVE25519
select CRYPTO_LIB_CHACHA20POLY1305
- select CRYPTO_CHACHA20_X86_64 if X86 && 64BIT
- select CRYPTO_POLY1305_X86_64 if X86 && 64BIT
- select CRYPTO_BLAKE2S_X86 if X86 && 64BIT
- select CRYPTO_CURVE25519_X86 if X86 && 64BIT
- select CRYPTO_CHACHA20_NEON if ARM || (ARM64 && KERNEL_MODE_NEON)
- select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON
- select CRYPTO_POLY1305_ARM if ARM
- select CRYPTO_BLAKE2S_ARM if ARM
- select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON
- select CRYPTO_CHACHA_MIPS if CPU_MIPS32_R2
- select CRYPTO_POLY1305_MIPS if MIPS
- select CRYPTO_CHACHA_S390 if S390
+ select CRYPTO_LIB_UTILS
help
WireGuard is a secure, fast, and easy to use replacement for IPSec
that uses modern cryptography and clever networking tricks. It's
@@ -115,6 +102,21 @@ config WIREGUARD_DEBUG
Say N here unless you know what you're doing.
+config OVPN
+ tristate "OpenVPN data channel offload"
+ depends on NET && INET
+ depends on IPV6 || !IPV6
+ select DST_CACHE
+ select NET_UDP_TUNNEL
+ select CRYPTO
+ select CRYPTO_AES
+ select CRYPTO_GCM
+ select CRYPTO_CHACHA20POLY1305
+ select STREAM_PARSER
+ help
+ This module enhances the performance of the OpenVPN userspace software
+ by offloading the data channel processing to kernelspace.
+
config EQUALIZER
tristate "EQL (serial line load balancing) support"
help
@@ -518,30 +520,6 @@ source "drivers/net/hippi/Kconfig"
source "drivers/net/ipa/Kconfig"
-config NET_SB1000
- tristate "General Instruments Surfboard 1000"
- depends on ISA && PNP
- help
- This is a driver for the General Instrument (also known as
- NextLevel) SURFboard 1000 internal
- cable modem. This is an ISA card which is used by a number of cable
- TV companies to provide cable modem access. It's a one-way
- downstream-only cable modem, meaning that your upstream net link is
- provided by your regular phone modem.
-
- At present this driver only compiles as a module, so say M here if
- you have this card. The module will be called sb1000. Then read
- <file:Documentation/networking/device_drivers/cable/sb1000.rst> for
- information on how to use this module, as it needs special ppp
- scripts for establishing a connection. Further documentation
- and the necessary scripts can be found at:
-
- <http://www.jacksonville.net/~fventuri/>
- <http://home.adelphia.net/~siglercm/sb1000.html>
- <http://linuxpower.cx/~cable/>
-
- If you don't have this card, of course say N.
-
source "drivers/net/phy/Kconfig"
source "drivers/net/pse-pd/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 13743d0e83b5..73bc63ecd65f 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_IPVLAN) += ipvlan/
obj-$(CONFIG_IPVTAP) += ipvlan/
obj-$(CONFIG_DUMMY) += dummy.o
obj-$(CONFIG_WIREGUARD) += wireguard/
+obj-$(CONFIG_OVPN) += ovpn/
obj-$(CONFIG_EQUALIZER) += eql.o
obj-$(CONFIG_IFB) += ifb.o
obj-$(CONFIG_MACSEC) += macsec.o
@@ -69,7 +70,6 @@ obj-$(CONFIG_PPPOL2TP) += ppp/
obj-$(CONFIG_PPTP) += ppp/
obj-$(CONFIG_SLIP) += slip/
obj-$(CONFIG_SLHC) += slip/
-obj-$(CONFIG_NET_SB1000) += sb1000.o
obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o
obj-$(CONFIG_WAN) += wan/
obj-$(CONFIG_WLAN) += wireless/
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index dc50797a2ed0..c01e2c2f7d6c 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -67,8 +67,7 @@ static int netdev_boot_setup_add(char *name, struct ifmap *map)
s = dev_boot_setup;
for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
- memset(s[i].name, 0, sizeof(s[i].name));
- strscpy(s[i].name, name, IFNAMSIZ);
+ strscpy_pad(s[i].name, name);
memcpy(&s[i].map, map, sizeof(s[i].map));
break;
}
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index 0433a0f36d1b..902c817a0dea 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -11,6 +11,7 @@
#include <linux/net.h>
#include <linux/igmp.h>
#include <linux/workqueue.h>
+#include <net/flow.h>
#include <net/pkt_sched.h>
#include <net/net_namespace.h>
#include <net/ip.h>
@@ -28,6 +29,7 @@
#include <net/addrconf.h>
#include <net/ip6_route.h>
#include <net/inet_common.h>
+#include <net/inet_dscp.h>
#include <net/ip6_checksum.h>
static struct workqueue_struct *amt_wq;
@@ -979,7 +981,7 @@ static void amt_event_send_request(struct amt_dev *amt)
amt->req_cnt++;
out:
exp = min_t(u32, (1 * (1 << amt->req_cnt)), AMT_MAX_REQ_TIMEOUT);
- mod_delayed_work(amt_wq, &amt->req_wq, msecs_to_jiffies(exp * 1000));
+ mod_delayed_work(amt_wq, &amt->req_wq, secs_to_jiffies(exp));
}
static void amt_req_work(struct work_struct *work)
@@ -1018,7 +1020,7 @@ static bool amt_send_membership_update(struct amt_dev *amt,
fl4.flowi4_oif = amt->stream_dev->ifindex;
fl4.daddr = amt->remote_ip;
fl4.saddr = amt->local_ip;
- fl4.flowi4_tos = AMT_TOS;
+ fl4.flowi4_dscp = inet_dsfield_to_dscp(AMT_TOS);
fl4.flowi4_proto = IPPROTO_UDP;
rt = ip_route_output_key(amt->net, &fl4);
if (IS_ERR(rt)) {
@@ -1046,7 +1048,8 @@ static bool amt_send_membership_update(struct amt_dev *amt,
amt->gw_port,
amt->relay_port,
false,
- false);
+ false,
+ 0);
amt_update_gw_status(amt, AMT_STATUS_SENT_UPDATE, true);
return false;
}
@@ -1103,7 +1106,8 @@ static void amt_send_multicast_data(struct amt_dev *amt,
amt->relay_port,
tunnel->source_port,
false,
- false);
+ false,
+ 0);
}
static bool amt_send_membership_query(struct amt_dev *amt,
@@ -1131,7 +1135,7 @@ static bool amt_send_membership_query(struct amt_dev *amt,
fl4.flowi4_oif = amt->stream_dev->ifindex;
fl4.daddr = tunnel->ip4;
fl4.saddr = amt->local_ip;
- fl4.flowi4_tos = AMT_TOS;
+ fl4.flowi4_dscp = inet_dsfield_to_dscp(AMT_TOS);
fl4.flowi4_proto = IPPROTO_UDP;
rt = ip_route_output_key(amt->net, &fl4);
if (IS_ERR(rt)) {
@@ -1161,7 +1165,8 @@ static bool amt_send_membership_query(struct amt_dev *amt,
amt->relay_port,
tunnel->source_port,
false,
- false);
+ false,
+ 0);
amt_update_relay_status(tunnel, AMT_STATUS_SENT_QUERY, true);
return false;
}
@@ -3099,7 +3104,7 @@ static void amt_link_setup(struct net_device *dev)
dev->addr_len = 0;
dev->priv_flags |= IFF_NO_QUEUE;
dev->lltx = true;
- dev->netns_local = true;
+ dev->netns_immutable = true;
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
dev->hw_features |= NETIF_F_FRAGLIST | NETIF_F_RXCSUM;
@@ -3161,14 +3166,17 @@ static int amt_validate(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
-static int amt_newlink(struct net *net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int amt_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
struct amt_dev *amt = netdev_priv(dev);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
int err = -EINVAL;
- amt->net = net;
+ amt->net = link_net;
amt->mode = nla_get_u32(data[IFLA_AMT_MODE]);
if (data[IFLA_AMT_MAX_TUNNELS] &&
@@ -3183,7 +3191,7 @@ static int amt_newlink(struct net *net, struct net_device *dev,
amt->hash_buckets = AMT_HSIZE;
amt->nr_tunnels = 0;
get_random_bytes(&amt->hash_seed, sizeof(amt->hash_seed));
- amt->stream_dev = dev_get_by_index(net,
+ amt->stream_dev = dev_get_by_index(link_net,
nla_get_u32(data[IFLA_AMT_LINK]));
if (!amt->stream_dev) {
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LINK],
@@ -3206,15 +3214,11 @@ static int amt_newlink(struct net *net, struct net_device *dev,
goto err;
}
- if (data[IFLA_AMT_RELAY_PORT])
- amt->relay_port = nla_get_be16(data[IFLA_AMT_RELAY_PORT]);
- else
- amt->relay_port = htons(IANA_AMT_UDP_PORT);
+ amt->relay_port = nla_get_be16_default(data[IFLA_AMT_RELAY_PORT],
+ htons(IANA_AMT_UDP_PORT));
- if (data[IFLA_AMT_GATEWAY_PORT])
- amt->gw_port = nla_get_be16(data[IFLA_AMT_GATEWAY_PORT]);
- else
- amt->gw_port = htons(IANA_AMT_UDP_PORT);
+ amt->gw_port = nla_get_be16_default(data[IFLA_AMT_GATEWAY_PORT],
+ htons(IANA_AMT_UDP_PORT));
if (!amt->relay_port) {
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 530c15d6a5eb..882972604c82 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -382,7 +382,7 @@ static void arcdev_setup(struct net_device *dev)
static void arcnet_timer(struct timer_list *t)
{
- struct arcnet_local *lp = from_timer(lp, t, timer);
+ struct arcnet_local *lp = timer_container_of(lp, t, timer);
struct net_device *dev = lp->dev;
spin_lock_irq(&lp->lock);
@@ -616,7 +616,7 @@ int arcnet_close(struct net_device *dev)
struct arcnet_local *lp = netdev_priv(dev);
arcnet_led_event(dev, ARCNET_LED_EVENT_STOP);
- del_timer_sync(&lp->timer);
+ timer_delete_sync(&lp->timer);
netif_stop_queue(dev);
netif_carrier_off(dev);
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index c5e571ec94c9..0472bcdff130 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -251,18 +251,33 @@ static int com20020pci_probe(struct pci_dev *pdev,
card->tx_led.default_trigger = devm_kasprintf(&pdev->dev,
GFP_KERNEL, "arc%d-%d-tx",
dev->dev_id, i);
+ if (!card->tx_led.default_trigger) {
+ ret = -ENOMEM;
+ goto err_free_arcdev;
+ }
card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"pci:green:tx:%d-%d",
dev->dev_id, i);
-
+ if (!card->tx_led.name) {
+ ret = -ENOMEM;
+ goto err_free_arcdev;
+ }
card->tx_led.dev = &dev->dev;
card->recon_led.brightness_set = led_recon_set;
card->recon_led.default_trigger = devm_kasprintf(&pdev->dev,
GFP_KERNEL, "arc%d-%d-recon",
dev->dev_id, i);
+ if (!card->recon_led.default_trigger) {
+ ret = -ENOMEM;
+ goto err_free_arcdev;
+ }
card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"pci:red:recon:%d-%d",
dev->dev_id, i);
+ if (!card->recon_led.name) {
+ ret = -ENOMEM;
+ goto err_free_arcdev;
+ }
card->recon_led.dev = &dev->dev;
ret = devm_led_classdev_register(&pdev->dev, &card->tx_led);
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index a2abfade82dd..0df3208783ad 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -84,7 +84,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion,
sizeof(ipversion))) {
- dev_core_stats_rx_dropped_inc(bareudp->dev);
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
ipversion >>= 4;
@@ -94,7 +94,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
} else if (ipversion == 6 && bareudp->multi_proto_mode) {
proto = htons(ETH_P_IPV6);
} else {
- dev_core_stats_rx_dropped_inc(bareudp->dev);
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
} else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) {
@@ -108,7 +108,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
ipv4_is_multicast(tunnel_hdr->daddr)) {
proto = htons(ETH_P_MPLS_MC);
} else {
- dev_core_stats_rx_dropped_inc(bareudp->dev);
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
} else {
@@ -124,7 +124,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
(addr_type & IPV6_ADDR_MULTICAST)) {
proto = htons(ETH_P_MPLS_MC);
} else {
- dev_core_stats_rx_dropped_inc(bareudp->dev);
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
}
@@ -136,7 +136,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
proto,
!net_eq(bareudp->net,
dev_net(bareudp->dev)))) {
- dev_core_stats_rx_dropped_inc(bareudp->dev);
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
@@ -144,7 +144,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
tun_dst = udp_tun_rx_dst(skb, family, key, 0, 0);
if (!tun_dst) {
- dev_core_stats_rx_dropped_inc(bareudp->dev);
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
skb_dst_set(skb, &tun_dst->dst);
@@ -194,7 +194,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
len = skb->len;
err = gro_cells_receive(&bareudp->gro_cells, skb);
if (likely(err == NET_RX_SUCCESS))
- dev_sw_netstats_rx_add(bareudp->dev, len);
+ dev_dstats_rx_add(bareudp->dev, len);
return 0;
drop:
@@ -362,8 +362,8 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
udp_tunnel_xmit_skb(rt, sock->sk, skb, saddr, info->key.u.ipv4.dst,
tos, ttl, df, sport, bareudp->port,
!net_eq(bareudp->net, dev_net(bareudp->dev)),
- !test_bit(IP_TUNNEL_CSUM_BIT,
- info->key.tun_flags));
+ !test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags),
+ 0);
return 0;
free_dst:
@@ -431,7 +431,8 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
&saddr, &daddr, prio, ttl,
info->key.label, sport, bareudp->port,
!test_bit(IP_TUNNEL_CSUM_BIT,
- info->key.tun_flags));
+ info->key.tun_flags),
+ 0);
return 0;
free_dst:
@@ -589,7 +590,7 @@ static void bareudp_setup(struct net_device *dev)
dev->priv_flags |= IFF_NO_QUEUE;
dev->lltx = true;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
- dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
}
static int bareudp_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -698,10 +699,13 @@ static void bareudp_dellink(struct net_device *dev, struct list_head *head)
unregister_netdevice_queue(dev, head);
}
-static int bareudp_newlink(struct net *net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int bareudp_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct bareudp_conf conf;
int err;
@@ -709,7 +713,7 @@ static int bareudp_newlink(struct net *net, struct net_device *dev,
if (err)
return err;
- err = bareudp_configure(net, dev, &conf, extack);
+ err = bareudp_configure(link_net, dev, &conf, extack);
if (err)
return err;
@@ -774,27 +778,19 @@ static __net_init int bareudp_init_net(struct net *net)
return 0;
}
-static void bareudp_destroy_tunnels(struct net *net, struct list_head *head)
+static void __net_exit bareudp_exit_rtnl_net(struct net *net,
+ struct list_head *dev_kill_list)
{
struct bareudp_net *bn = net_generic(net, bareudp_net_id);
struct bareudp_dev *bareudp, *next;
list_for_each_entry_safe(bareudp, next, &bn->bareudp_list, next)
- unregister_netdevice_queue(bareudp->dev, head);
-}
-
-static void __net_exit bareudp_exit_batch_rtnl(struct list_head *net_list,
- struct list_head *dev_kill_list)
-{
- struct net *net;
-
- list_for_each_entry(net, net_list, exit_list)
- bareudp_destroy_tunnels(net, dev_kill_list);
+ bareudp_dellink(bareudp->dev, dev_kill_list);
}
static struct pernet_operations bareudp_net_ops = {
.init = bareudp_init_net,
- .exit_batch_rtnl = bareudp_exit_batch_rtnl,
+ .exit_rtnl = bareudp_exit_rtnl_net,
.id = &bareudp_net_id,
.size = sizeof(struct bareudp_net),
};
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index c6807e473ab7..1a8de2bf8655 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -76,6 +76,7 @@ enum ad_link_speed_type {
AD_LINK_SPEED_200000MBPS,
AD_LINK_SPEED_400000MBPS,
AD_LINK_SPEED_800000MBPS,
+ AD_LINK_SPEED_1600000MBPS,
};
/* compare MAC addresses */
@@ -95,13 +96,13 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker);
static void ad_mux_machine(struct port *port, bool *update_slave_arr);
static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port);
static void ad_tx_machine(struct port *port);
-static void ad_periodic_machine(struct port *port, struct bond_params *bond_params);
+static void ad_periodic_machine(struct port *port);
static void ad_port_selection_logic(struct port *port, bool *update_slave_arr);
static void ad_agg_selection_logic(struct aggregator *aggregator,
bool *update_slave_arr);
static void ad_clear_agg(struct aggregator *aggregator);
static void ad_initialize_agg(struct aggregator *aggregator);
-static void ad_initialize_port(struct port *port, int lacp_fast);
+static void ad_initialize_port(struct port *port, const struct bond_params *bond_params);
static void ad_enable_collecting(struct port *port);
static void ad_disable_distributing(struct port *port,
bool *update_slave_arr);
@@ -300,6 +301,7 @@ static inline int __check_agg_selection_timer(struct port *port)
* %AD_LINK_SPEED_200000MBPS
* %AD_LINK_SPEED_400000MBPS
* %AD_LINK_SPEED_800000MBPS
+ * %AD_LINK_SPEED_1600000MBPS
*/
static u16 __get_link_speed(struct port *port)
{
@@ -379,6 +381,10 @@ static u16 __get_link_speed(struct port *port)
speed = AD_LINK_SPEED_800000MBPS;
break;
+ case SPEED_1600000:
+ speed = AD_LINK_SPEED_1600000MBPS;
+ break;
+
default:
/* unknown speed value from ethtool. shouldn't happen */
if (slave->speed != SPEED_UNKNOWN)
@@ -436,6 +442,7 @@ static void __ad_actor_update_port(struct port *port)
port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr;
port->actor_system_priority = BOND_AD_INFO(bond).system.sys_priority;
+ port->actor_port_priority = SLAVE_AD_INFO(port->slave)->port_priority;
}
/* Conversions */
@@ -746,6 +753,18 @@ static int __agg_active_ports(struct aggregator *agg)
return active;
}
+static unsigned int __agg_ports_priority(const struct aggregator *agg)
+{
+ struct port *port = agg->lag_ports;
+ unsigned int prio = 0;
+
+ for (; port; port = port->next_port_in_aggregator)
+ if (port->is_enabled)
+ prio += port->actor_port_priority;
+
+ return prio;
+}
+
/**
* __get_agg_bandwidth - get the total bandwidth of an aggregator
* @aggregator: the aggregator we're looking at
@@ -809,6 +828,9 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
case AD_LINK_SPEED_800000MBPS:
bandwidth = nports * 800000;
break;
+ case AD_LINK_SPEED_1600000MBPS:
+ bandwidth = nports * 1600000;
+ break;
default:
bandwidth = 0; /* to silence the compiler */
}
@@ -982,6 +1004,17 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
return 0;
}
+static void ad_cond_set_peer_notif(struct port *port)
+{
+ struct bonding *bond = port->slave->bond;
+
+ if (bond->params.broadcast_neighbor && rtnl_trylock()) {
+ bond->send_peer_notif = bond->params.num_peer_notif *
+ max(1, bond->params.peer_notif_delay);
+ rtnl_unlock();
+ }
+}
+
/**
* ad_mux_machine - handle a port's mux state machine
* @port: the port we're looking at
@@ -1296,10 +1329,16 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
* case of EXPIRED even if LINK_DOWN didn't arrive for
* the port.
*/
- port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION;
port->sm_vars &= ~AD_PORT_MATCHED;
+ /* Based on IEEE 8021AX-2014, Figure 6-18 - Receive
+ * machine state diagram, the statue should be
+ * Partner_Oper_Port_State.Synchronization = FALSE;
+ * Partner_Oper_Port_State.LACP_Timeout = Short Timeout;
+ * start current_while_timer(Short Timeout);
+ * Actor_Oper_Port_State.Expired = TRUE;
+ */
+ port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION;
port->partner_oper.port_state |= LACP_STATE_LACP_TIMEOUT;
- port->partner_oper.port_state |= LACP_STATE_LACP_ACTIVITY;
port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT));
port->actor_oper_port_state |= LACP_STATE_EXPIRED;
port->sm_vars |= AD_PORT_CHURNED;
@@ -1378,7 +1417,7 @@ static void ad_tx_machine(struct port *port)
/* check if tx timer expired, to verify that we do not send more than
* 3 packets per second
*/
- if (port->sm_tx_timer_counter && !(--port->sm_tx_timer_counter)) {
+ if (!port->sm_tx_timer_counter || !(--port->sm_tx_timer_counter)) {
/* check if there is something to send */
if (port->ntt && (port->sm_vars & AD_PORT_LACP_ENABLED)) {
__update_lacpdu_from_port(port);
@@ -1393,23 +1432,23 @@ static void ad_tx_machine(struct port *port)
* again until demanded
*/
port->ntt = false;
+
+ /* restart tx timer(to verify that we will not
+ * exceed AD_MAX_TX_IN_SECOND
+ */
+ port->sm_tx_timer_counter = ad_ticks_per_sec / AD_MAX_TX_IN_SECOND;
}
}
- /* restart tx timer(to verify that we will not exceed
- * AD_MAX_TX_IN_SECOND
- */
- port->sm_tx_timer_counter = ad_ticks_per_sec/AD_MAX_TX_IN_SECOND;
}
}
/**
* ad_periodic_machine - handle a port's periodic state machine
* @port: the port we're looking at
- * @bond_params: bond parameters we will use
*
* Turn ntt flag on priodically to perform periodic transmission of lacpdu's.
*/
-static void ad_periodic_machine(struct port *port, struct bond_params *bond_params)
+static void ad_periodic_machine(struct port *port)
{
periodic_states_t last_state;
@@ -1418,8 +1457,7 @@ static void ad_periodic_machine(struct port *port, struct bond_params *bond_para
/* check if port was reinitialized */
if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) ||
- (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY)) ||
- !bond_params->lacp_active) {
+ (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY))) {
port->sm_periodic_state = AD_NO_PERIODIC;
}
/* check if state machine should change state */
@@ -1691,6 +1729,9 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
* 4. Therefore, current and best both have partner replies or
* both do not, so perform selection policy:
*
+ * BOND_AD_PRIO: Select by total priority of ports. If priority
+ * is equal, select by count.
+ *
* BOND_AD_COUNT: Select by count of ports. If count is equal,
* select by bandwidth.
*
@@ -1712,6 +1753,14 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
return best;
switch (__get_agg_selection_mode(curr->lag_ports)) {
+ case BOND_AD_PRIO:
+ if (__agg_ports_priority(curr) > __agg_ports_priority(best))
+ return curr;
+
+ if (__agg_ports_priority(curr) < __agg_ports_priority(best))
+ return best;
+
+ fallthrough;
case BOND_AD_COUNT:
if (__agg_active_ports(curr) > __agg_active_ports(best))
return curr;
@@ -1777,6 +1826,10 @@ static int agg_device_up(const struct aggregator *agg)
* (slaves), and reselect whenever a link state change takes place or the
* set of slaves in the bond changes.
*
+ * BOND_AD_PRIO: select the aggregator with highest total priority of ports
+ * (slaves), and reselect whenever a link state change takes place or the
+ * set of slaves in the bond changes.
+ *
* FIXME: this function MUST be called with the first agg in the bond, or
* __get_active_agg() won't work correctly. This function should be better
* called with the bond itself, and retrieve the first agg from it.
@@ -1943,16 +1996,16 @@ static void ad_initialize_agg(struct aggregator *aggregator)
/**
* ad_initialize_port - initialize a given port's parameters
* @port: the port we're looking at
- * @lacp_fast: boolean. whether fast periodic should be used
+ * @bond_params: bond parameters we will use
*/
-static void ad_initialize_port(struct port *port, int lacp_fast)
+static void ad_initialize_port(struct port *port, const struct bond_params *bond_params)
{
static const struct port_params tmpl = {
.system_priority = 0xffff,
.key = 1,
.port_number = 1,
.port_priority = 0xff,
- .port_state = 1,
+ .port_state = 0,
};
static const struct lacpdu lacpdu = {
.subtype = 0x01,
@@ -1970,12 +2023,14 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
port->actor_port_priority = 0xff;
port->actor_port_aggregator_identifier = 0;
port->ntt = false;
- port->actor_admin_port_state = LACP_STATE_AGGREGATION |
- LACP_STATE_LACP_ACTIVITY;
- port->actor_oper_port_state = LACP_STATE_AGGREGATION |
- LACP_STATE_LACP_ACTIVITY;
+ port->actor_admin_port_state = LACP_STATE_AGGREGATION;
+ port->actor_oper_port_state = LACP_STATE_AGGREGATION;
+ if (bond_params->lacp_active) {
+ port->actor_admin_port_state |= LACP_STATE_LACP_ACTIVITY;
+ port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY;
+ }
- if (lacp_fast)
+ if (bond_params->lacp_fast)
port->actor_oper_port_state |= LACP_STATE_LACP_TIMEOUT;
memcpy(&port->partner_admin, &tmpl, sizeof(tmpl));
@@ -2061,6 +2116,8 @@ static void ad_enable_collecting_distributing(struct port *port,
__enable_port(port);
/* Slave array needs update */
*update_slave_arr = true;
+ /* Should notify peers if possible */
+ ad_cond_set_peer_notif(port);
}
}
@@ -2187,7 +2244,10 @@ void bond_3ad_bind_slave(struct slave *slave)
/* port initialization */
port = &(SLAVE_AD_INFO(slave)->port);
- ad_initialize_port(port, bond->params.lacp_fast);
+ ad_initialize_port(port, &bond->params);
+
+ /* Port priority is initialized. Update it to slave's ad info */
+ SLAVE_AD_INFO(slave)->port_priority = port->actor_port_priority;
port->slave = slave;
port->actor_port_number = SLAVE_AD_INFO(slave)->id;
@@ -2499,7 +2559,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
}
ad_rx_machine(NULL, port);
- ad_periodic_machine(port, &bond->params);
+ ad_periodic_machine(port);
ad_port_selection_logic(port, &update_slave_arr);
ad_mux_machine(port, &update_slave_arr);
ad_tx_machine(port);
@@ -2869,6 +2929,31 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
spin_unlock_bh(&bond->mode_lock);
}
+/**
+ * bond_3ad_update_lacp_active - change the lacp active
+ * @bond: bonding struct
+ *
+ * Update actor_oper_port_state when lacp_active is modified.
+ */
+void bond_3ad_update_lacp_active(struct bonding *bond)
+{
+ struct port *port = NULL;
+ struct list_head *iter;
+ struct slave *slave;
+ int lacp_active;
+
+ lacp_active = bond->params.lacp_active;
+ spin_lock_bh(&bond->mode_lock);
+ bond_for_each_slave(bond, slave, iter) {
+ port = &(SLAVE_AD_INFO(slave)->port);
+ if (lacp_active)
+ port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY;
+ else
+ port->actor_oper_port_state &= ~LACP_STATE_LACP_ACTIVITY;
+ }
+ spin_unlock_bh(&bond->mode_lock);
+}
+
size_t bond_3ad_stats_size(void)
{
return nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_RX */
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 7edf0fd58c34..2d37b07c8215 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1035,7 +1035,7 @@ static int alb_set_slave_mac_addr(struct slave *slave, const u8 addr[],
*/
memcpy(ss.__data, addr, len);
ss.ss_family = dev->type;
- if (dev_set_mac_address(dev, (struct sockaddr *)&ss, NULL)) {
+ if (dev_set_mac_address(dev, &ss, NULL)) {
slave_err(slave->bond->dev, dev, "dev_set_mac_address on slave failed! ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n");
return -EOPNOTSUPP;
}
@@ -1273,8 +1273,7 @@ unwind:
break;
bond_hw_addr_copy(tmp_addr, rollback_slave->dev->dev_addr,
rollback_slave->dev->addr_len);
- dev_set_mac_address(rollback_slave->dev,
- (struct sockaddr *)&ss, NULL);
+ dev_set_mac_address(rollback_slave->dev, &ss, NULL);
dev_addr_set(rollback_slave->dev, tmp_addr);
}
@@ -1763,8 +1762,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
bond->dev->addr_len);
ss.ss_family = bond->dev->type;
/* we don't care if it can't change its mac, best effort */
- dev_set_mac_address(new_slave->dev, (struct sockaddr *)&ss,
- NULL);
+ dev_set_mac_address(new_slave->dev, &ss, NULL);
dev_addr_set(new_slave->dev, tmp_addr);
}
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index b19492a7f6ad..8adbec7c5084 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -63,13 +63,8 @@ void bond_debug_unregister(struct bonding *bond)
void bond_debug_reregister(struct bonding *bond)
{
- struct dentry *d;
-
- d = debugfs_rename(bonding_debug_root, bond->debug_dir,
- bonding_debug_root, bond->dev->name);
- if (!IS_ERR(d)) {
- bond->debug_dir = d;
- } else {
+ int err = debugfs_change_name(bond->debug_dir, "%s", bond->dev->name);
+ if (err) {
netdev_warn(bond->dev, "failed to reregister, so just unregister old one\n");
bond_debug_unregister(bond);
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 5812e8eaccf1..3d56339a8a10 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -90,6 +90,7 @@
#include <net/tls.h>
#endif
#include <net/ip6_route.h>
+#include <net/netdev_lock.h>
#include <net/xdp.h>
#include "bonding_priv.h"
@@ -141,8 +142,7 @@ module_param(downdelay, int, 0);
MODULE_PARM_DESC(downdelay, "Delay before considering link down, "
"in milliseconds");
module_param(use_carrier, int, 0);
-MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
- "0 for off, 1 for on (default)");
+MODULE_PARM_DESC(use_carrier, "option obsolete, use_carrier cannot be disabled");
module_param(mode, charp, 0);
MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, "
"1 for active-backup, 2 for balance-xor, "
@@ -211,6 +211,8 @@ atomic_t netpoll_block_tx = ATOMIC_INIT(0);
unsigned int bond_net_id __read_mostly;
+DEFINE_STATIC_KEY_FALSE(bond_bcast_neigh_enabled);
+
static const struct flow_dissector_key flow_keys_bonding_keys[] = {
{
.key_id = FLOW_DISSECTOR_KEY_CONTROL,
@@ -322,9 +324,9 @@ static bool bond_sk_check(struct bonding *bond)
}
}
-static bool bond_xdp_check(struct bonding *bond)
+bool bond_xdp_check(struct bonding *bond, int mode)
{
- switch (BOND_MODE(bond)) {
+ switch (mode) {
case BOND_MODE_ROUNDROBIN:
case BOND_MODE_ACTIVEBACKUP:
return true;
@@ -432,9 +434,6 @@ static struct net_device *bond_ipsec_dev(struct xfrm_state *xs)
struct bonding *bond;
struct slave *slave;
- if (!bond_dev)
- return NULL;
-
bond = netdev_priv(bond_dev);
if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)
return NULL;
@@ -455,13 +454,14 @@ static struct net_device *bond_ipsec_dev(struct xfrm_state *xs)
/**
* bond_ipsec_add_sa - program device with a security association
+ * @bond_dev: pointer to the bond net device
* @xs: pointer to transformer state struct
* @extack: extack point to fill failure reason
**/
-static int bond_ipsec_add_sa(struct xfrm_state *xs,
+static int bond_ipsec_add_sa(struct net_device *bond_dev,
+ struct xfrm_state *xs,
struct netlink_ext_ack *extack)
{
- struct net_device *bond_dev = xs->xso.dev;
struct net_device *real_dev;
netdevice_tracker tracker;
struct bond_ipsec *ipsec;
@@ -497,9 +497,9 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs,
goto out;
}
- xs->xso.real_dev = real_dev;
- err = real_dev->xfrmdev_ops->xdo_dev_state_add(xs, extack);
+ err = real_dev->xfrmdev_ops->xdo_dev_state_add(real_dev, xs, extack);
if (!err) {
+ xs->xso.real_dev = real_dev;
ipsec->xs = xs;
INIT_LIST_HEAD(&ipsec->list);
mutex_lock(&bond->ipsec_lock);
@@ -541,11 +541,25 @@ static void bond_ipsec_add_sa_all(struct bonding *bond)
if (ipsec->xs->xso.real_dev == real_dev)
continue;
- ipsec->xs->xso.real_dev = real_dev;
- if (real_dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) {
+ if (real_dev->xfrmdev_ops->xdo_dev_state_add(real_dev,
+ ipsec->xs, NULL)) {
slave_warn(bond_dev, real_dev, "%s: failed to add SA\n", __func__);
- ipsec->xs->xso.real_dev = NULL;
+ continue;
}
+
+ spin_lock_bh(&ipsec->xs->lock);
+ /* xs might have been killed by the user during the migration
+ * to the new dev, but bond_ipsec_del_sa() should have done
+ * nothing, as xso.real_dev is NULL.
+ * Delete it from the device we just added it to. The pending
+ * bond_ipsec_free_sa() call will do the rest of the cleanup.
+ */
+ if (ipsec->xs->km.state == XFRM_STATE_DEAD &&
+ real_dev->xfrmdev_ops->xdo_dev_state_delete)
+ real_dev->xfrmdev_ops->xdo_dev_state_delete(real_dev,
+ ipsec->xs);
+ ipsec->xs->xso.real_dev = real_dev;
+ spin_unlock_bh(&ipsec->xs->lock);
}
out:
mutex_unlock(&bond->ipsec_lock);
@@ -553,54 +567,27 @@ out:
/**
* bond_ipsec_del_sa - clear out this specific SA
+ * @bond_dev: pointer to the bond net device
* @xs: pointer to transformer state struct
**/
-static void bond_ipsec_del_sa(struct xfrm_state *xs)
+static void bond_ipsec_del_sa(struct net_device *bond_dev,
+ struct xfrm_state *xs)
{
- struct net_device *bond_dev = xs->xso.dev;
struct net_device *real_dev;
- netdevice_tracker tracker;
- struct bond_ipsec *ipsec;
- struct bonding *bond;
- struct slave *slave;
- if (!bond_dev)
+ if (!bond_dev || !xs->xso.real_dev)
return;
- rcu_read_lock();
- bond = netdev_priv(bond_dev);
- slave = rcu_dereference(bond->curr_active_slave);
- real_dev = slave ? slave->dev : NULL;
- netdev_hold(real_dev, &tracker, GFP_ATOMIC);
- rcu_read_unlock();
-
- if (!slave)
- goto out;
-
- if (!xs->xso.real_dev)
- goto out;
-
- WARN_ON(xs->xso.real_dev != real_dev);
+ real_dev = xs->xso.real_dev;
if (!real_dev->xfrmdev_ops ||
!real_dev->xfrmdev_ops->xdo_dev_state_delete ||
netif_is_bond_master(real_dev)) {
slave_warn(bond_dev, real_dev, "%s: no slave xdo_dev_state_delete\n", __func__);
- goto out;
+ return;
}
- real_dev->xfrmdev_ops->xdo_dev_state_delete(xs);
-out:
- netdev_put(real_dev, &tracker);
- mutex_lock(&bond->ipsec_lock);
- list_for_each_entry(ipsec, &bond->ipsec_list, list) {
- if (ipsec->xs == xs) {
- list_del(&ipsec->list);
- kfree(ipsec);
- break;
- }
- }
- mutex_unlock(&bond->ipsec_lock);
+ real_dev->xfrmdev_ops->xdo_dev_state_delete(real_dev, xs);
}
static void bond_ipsec_del_sa_all(struct bonding *bond)
@@ -626,46 +613,55 @@ static void bond_ipsec_del_sa_all(struct bonding *bond)
slave_warn(bond_dev, real_dev,
"%s: no slave xdo_dev_state_delete\n",
__func__);
- } else {
- real_dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
- if (real_dev->xfrmdev_ops->xdo_dev_state_free)
- real_dev->xfrmdev_ops->xdo_dev_state_free(ipsec->xs);
+ continue;
}
+
+ spin_lock_bh(&ipsec->xs->lock);
+ ipsec->xs->xso.real_dev = NULL;
+ /* Don't double delete states killed by the user. */
+ if (ipsec->xs->km.state != XFRM_STATE_DEAD)
+ real_dev->xfrmdev_ops->xdo_dev_state_delete(real_dev,
+ ipsec->xs);
+ spin_unlock_bh(&ipsec->xs->lock);
+
+ if (real_dev->xfrmdev_ops->xdo_dev_state_free)
+ real_dev->xfrmdev_ops->xdo_dev_state_free(real_dev,
+ ipsec->xs);
}
mutex_unlock(&bond->ipsec_lock);
}
-static void bond_ipsec_free_sa(struct xfrm_state *xs)
+static void bond_ipsec_free_sa(struct net_device *bond_dev,
+ struct xfrm_state *xs)
{
- struct net_device *bond_dev = xs->xso.dev;
struct net_device *real_dev;
- netdevice_tracker tracker;
+ struct bond_ipsec *ipsec;
struct bonding *bond;
- struct slave *slave;
if (!bond_dev)
return;
- rcu_read_lock();
bond = netdev_priv(bond_dev);
- slave = rcu_dereference(bond->curr_active_slave);
- real_dev = slave ? slave->dev : NULL;
- netdev_hold(real_dev, &tracker, GFP_ATOMIC);
- rcu_read_unlock();
-
- if (!slave)
- goto out;
+ mutex_lock(&bond->ipsec_lock);
if (!xs->xso.real_dev)
goto out;
- WARN_ON(xs->xso.real_dev != real_dev);
+ real_dev = xs->xso.real_dev;
- if (real_dev && real_dev->xfrmdev_ops &&
+ xs->xso.real_dev = NULL;
+ if (real_dev->xfrmdev_ops &&
real_dev->xfrmdev_ops->xdo_dev_state_free)
- real_dev->xfrmdev_ops->xdo_dev_state_free(xs);
+ real_dev->xfrmdev_ops->xdo_dev_state_free(real_dev, xs);
out:
- netdev_put(real_dev, &tracker);
+ list_for_each_entry(ipsec, &bond->ipsec_list, list) {
+ if (ipsec->xs == xs) {
+ list_del(&ipsec->list);
+ kfree(ipsec);
+ break;
+ }
+ }
+ mutex_unlock(&bond->ipsec_lock);
}
/**
@@ -676,22 +672,16 @@ out:
static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
{
struct net_device *real_dev;
- bool ok = false;
rcu_read_lock();
real_dev = bond_ipsec_dev(xs);
- if (!real_dev)
- goto out;
-
- if (!real_dev->xfrmdev_ops ||
- !real_dev->xfrmdev_ops->xdo_dev_offload_ok ||
- netif_is_bond_master(real_dev))
- goto out;
+ if (!real_dev || netif_is_bond_master(real_dev)) {
+ rcu_read_unlock();
+ return false;
+ }
- ok = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
-out:
rcu_read_unlock();
- return ok;
+ return true;
}
/**
@@ -839,73 +829,6 @@ const char *bond_slave_link_status(s8 link)
}
}
-/* if <dev> supports MII link status reporting, check its link status.
- *
- * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(),
- * depending upon the setting of the use_carrier parameter.
- *
- * Return either BMSR_LSTATUS, meaning that the link is up (or we
- * can't tell and just pretend it is), or 0, meaning that the link is
- * down.
- *
- * If reporting is non-zero, instead of faking link up, return -1 if
- * both ETHTOOL and MII ioctls fail (meaning the device does not
- * support them). If use_carrier is set, return whatever it says.
- * It'd be nice if there was a good way to tell if a driver supports
- * netif_carrier, but there really isn't.
- */
-static int bond_check_dev_link(struct bonding *bond,
- struct net_device *slave_dev, int reporting)
-{
- const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
- int (*ioctl)(struct net_device *, struct ifreq *, int);
- struct ifreq ifr;
- struct mii_ioctl_data *mii;
-
- if (!reporting && !netif_running(slave_dev))
- return 0;
-
- if (bond->params.use_carrier)
- return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;
-
- /* Try to get link status using Ethtool first. */
- if (slave_dev->ethtool_ops->get_link)
- return slave_dev->ethtool_ops->get_link(slave_dev) ?
- BMSR_LSTATUS : 0;
-
- /* Ethtool can't be used, fallback to MII ioctls. */
- ioctl = slave_ops->ndo_eth_ioctl;
- if (ioctl) {
- /* TODO: set pointer to correct ioctl on a per team member
- * bases to make this more efficient. that is, once
- * we determine the correct ioctl, we will always
- * call it and not the others for that team
- * member.
- */
-
- /* We cannot assume that SIOCGMIIPHY will also read a
- * register; not all network drivers (e.g., e100)
- * support that.
- */
-
- /* Yes, the mii is overlaid on the ifreq.ifr_ifru */
- strscpy_pad(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
- mii = if_mii(&ifr);
- if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
- mii->reg_num = MII_BMSR;
- if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0)
- return mii->val_out & BMSR_LSTATUS;
- }
- }
-
- /* If reporting, report that either there's no ndo_eth_ioctl,
- * or both SIOCGMIIREG and get_link failed (meaning that we
- * cannot report link status). If not reporting, pretend
- * we're ok.
- */
- return reporting ? -1 : BMSR_LSTATUS;
-}
-
/*----------------------------- Multicast list ------------------------------*/
/* Push the promiscuity flag down to appropriate slaves */
@@ -1008,6 +931,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_UP)
bond_hw_addr_flush(bond->dev, old_active->dev);
+
+ bond_slave_ns_maddrs_add(bond, old_active);
}
if (new_active) {
@@ -1024,6 +949,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
dev_mc_sync(new_active->dev, bond->dev);
netif_addr_unlock_bh(bond->dev);
}
+
+ bond_slave_ns_maddrs_del(bond, new_active);
}
}
@@ -1041,7 +968,7 @@ static int bond_set_dev_addr(struct net_device *bond_dev,
slave_dbg(bond_dev, slave_dev, "bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n",
bond_dev, slave_dev, slave_dev->addr_len);
- err = dev_pre_changeaddr_notify(bond_dev, slave_dev->dev_addr, NULL);
+ err = netif_pre_changeaddr_notify(bond_dev, slave_dev->dev_addr, NULL);
if (err)
return err;
@@ -1115,8 +1042,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
ss.ss_family = bond->dev->type;
}
- rv = dev_set_mac_address(new_active->dev,
- (struct sockaddr *)&ss, NULL);
+ rv = dev_set_mac_address(new_active->dev, &ss, NULL);
if (rv) {
slave_err(bond->dev, new_active->dev, "Error %d setting MAC of new active slave\n",
-rv);
@@ -1130,8 +1056,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
new_active->dev->addr_len);
ss.ss_family = old_active->dev->type;
- rv = dev_set_mac_address(old_active->dev,
- (struct sockaddr *)&ss, NULL);
+ rv = dev_set_mac_address(old_active->dev, &ss, NULL);
if (rv)
slave_err(bond->dev, old_active->dev, "Error %d setting MAC of old active slave\n",
-rv);
@@ -1240,17 +1165,32 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
/* must be called in RCU critical section or with RTNL held */
static bool bond_should_notify_peers(struct bonding *bond)
{
- struct slave *slave = rcu_dereference_rtnl(bond->curr_active_slave);
+ struct bond_up_slave *usable;
+ struct slave *slave = NULL;
- if (!slave || !bond->send_peer_notif ||
+ if (!bond->send_peer_notif ||
bond->send_peer_notif %
max(1, bond->params.peer_notif_delay) != 0 ||
- !netif_carrier_ok(bond->dev) ||
- test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
+ !netif_carrier_ok(bond->dev))
return false;
+ /* The send_peer_notif is set by active-backup or 8023ad
+ * mode, and cleared in bond_close() when changing mode.
+ * It is safe to only check bond mode here.
+ */
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+ usable = rcu_dereference_rtnl(bond->usable_slaves);
+ if (!usable || !READ_ONCE(usable->count))
+ return false;
+ } else {
+ slave = rcu_dereference_rtnl(bond->curr_active_slave);
+ if (!slave || test_bit(__LINK_STATE_LINKWATCH_PENDING,
+ &slave->dev->state))
+ return false;
+ }
+
netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n",
- slave ? slave->dev->name : "NULL");
+ slave ? slave->dev->name : "all");
return true;
}
@@ -1516,9 +1456,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
struct slave *slave;
mask = features;
-
- features &= ~NETIF_F_ONE_FOR_ALL;
- features |= NETIF_F_ALL_FOR_ALL;
+ features = netdev_base_features(features);
bond_for_each_slave(bond, slave, iter) {
features = netdev_increment_features(features,
@@ -1530,86 +1468,6 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
return features;
}
-#define BOND_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
- NETIF_F_HIGHDMA | NETIF_F_LRO)
-
-#define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
-
-#define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_GSO_SOFTWARE)
-
-
-static void bond_compute_features(struct bonding *bond)
-{
- unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
- IFF_XMIT_DST_RELEASE_PERM;
- netdev_features_t vlan_features = BOND_VLAN_FEATURES;
- netdev_features_t enc_features = BOND_ENC_FEATURES;
-#ifdef CONFIG_XFRM_OFFLOAD
- netdev_features_t xfrm_features = BOND_XFRM_FEATURES;
-#endif /* CONFIG_XFRM_OFFLOAD */
- netdev_features_t mpls_features = BOND_MPLS_FEATURES;
- struct net_device *bond_dev = bond->dev;
- struct list_head *iter;
- struct slave *slave;
- unsigned short max_hard_header_len = ETH_HLEN;
- unsigned int tso_max_size = TSO_MAX_SIZE;
- u16 tso_max_segs = TSO_MAX_SEGS;
-
- if (!bond_has_slaves(bond))
- goto done;
- vlan_features &= NETIF_F_ALL_FOR_ALL;
- mpls_features &= NETIF_F_ALL_FOR_ALL;
-
- bond_for_each_slave(bond, slave, iter) {
- vlan_features = netdev_increment_features(vlan_features,
- slave->dev->vlan_features, BOND_VLAN_FEATURES);
-
- enc_features = netdev_increment_features(enc_features,
- slave->dev->hw_enc_features,
- BOND_ENC_FEATURES);
-
-#ifdef CONFIG_XFRM_OFFLOAD
- xfrm_features = netdev_increment_features(xfrm_features,
- slave->dev->hw_enc_features,
- BOND_XFRM_FEATURES);
-#endif /* CONFIG_XFRM_OFFLOAD */
-
- mpls_features = netdev_increment_features(mpls_features,
- slave->dev->mpls_features,
- BOND_MPLS_FEATURES);
-
- dst_release_flag &= slave->dev->priv_flags;
- if (slave->dev->hard_header_len > max_hard_header_len)
- max_hard_header_len = slave->dev->hard_header_len;
-
- tso_max_size = min(tso_max_size, slave->dev->tso_max_size);
- tso_max_segs = min(tso_max_segs, slave->dev->tso_max_segs);
- }
- bond_dev->hard_header_len = max_hard_header_len;
-
-done:
- bond_dev->vlan_features = vlan_features;
- bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX;
-#ifdef CONFIG_XFRM_OFFLOAD
- bond_dev->hw_enc_features |= xfrm_features;
-#endif /* CONFIG_XFRM_OFFLOAD */
- bond_dev->mpls_features = mpls_features;
- netif_set_tso_max_segs(bond_dev, tso_max_segs);
- netif_set_tso_max_size(bond_dev, tso_max_size);
-
- bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
- if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) &&
- dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
- bond_dev->priv_flags |= IFF_XMIT_DST_RELEASE;
-
- netdev_change_features(bond_dev);
-}
-
static void bond_setup_by_slave(struct net_device *bond_dev,
struct net_device *slave_dev)
{
@@ -1924,7 +1782,7 @@ void bond_xdp_set_features(struct net_device *bond_dev)
ASSERT_RTNL();
- if (!bond_xdp_check(bond) || !bond_has_slaves(bond)) {
+ if (!bond_xdp_check(bond, BOND_MODE(bond)) || !bond_has_slaves(bond)) {
xdp_clear_features_flag(bond_dev);
return;
}
@@ -1945,7 +1803,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
struct slave *new_slave = NULL, *prev_slave;
struct sockaddr_storage ss;
- int link_reporting;
int res = 0, i;
if (slave_dev->flags & IFF_MASTER &&
@@ -1955,12 +1812,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
return -EPERM;
}
- if (!bond->params.use_carrier &&
- slave_dev->ethtool_ops->get_link == NULL &&
- slave_ops->ndo_eth_ioctl == NULL) {
- slave_warn(bond_dev, slave_dev, "no link monitoring support\n");
- }
-
/* already in-use? */
if (netdev_is_rx_handler_busy(slave_dev)) {
SLAVE_NL_ERR(bond_dev, slave_dev, extack,
@@ -2109,15 +1960,27 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
* set the master's mac address to that of the first slave
*/
memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
- ss.ss_family = slave_dev->type;
- res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss,
- extack);
- if (res) {
- slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res);
- goto err_restore_mtu;
- }
+ } else if (bond->params.fail_over_mac == BOND_FOM_FOLLOW &&
+ BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
+ bond_has_slaves(bond) &&
+ memcmp(slave_dev->dev_addr, bond_dev->dev_addr, bond_dev->addr_len) == 0) {
+ /* Set slave to random address to avoid duplicate mac
+ * address in later fail over.
+ */
+ eth_random_addr(ss.__data);
+ } else {
+ goto skip_mac_set;
+ }
+
+ ss.ss_family = slave_dev->type;
+ res = dev_set_mac_address(slave_dev, &ss, extack);
+ if (res) {
+ slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res);
+ goto err_restore_mtu;
}
+skip_mac_set:
+
/* set no_addrconf flag before open to prevent IPv6 addrconf */
slave_dev->priv_flags |= IFF_NO_ADDRCONF;
@@ -2163,29 +2026,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
new_slave->last_tx = new_slave->last_rx;
- if (bond->params.miimon && !bond->params.use_carrier) {
- link_reporting = bond_check_dev_link(bond, slave_dev, 1);
-
- if ((link_reporting == -1) && !bond->params.arp_interval) {
- /* miimon is set but a bonded network driver
- * does not support ETHTOOL/MII and
- * arp_interval is not set. Note: if
- * use_carrier is enabled, we will never go
- * here (because netif_carrier is always
- * supported); thus, we don't need to change
- * the messages for netif_carrier.
- */
- slave_warn(bond_dev, slave_dev, "MII and ETHTOOL support not available for slave, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n");
- } else if (link_reporting == -1) {
- /* unable get link status using mii/ethtool */
- slave_warn(bond_dev, slave_dev, "can't get link status from slave; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n");
- }
- }
-
/* check for initial state */
new_slave->link = BOND_LINK_NOCHANGE;
if (bond->params.miimon) {
- if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
+ if (netif_running(slave_dev) && netif_carrier_ok(slave_dev)) {
if (bond->params.updelay) {
bond_set_slave_link_state(new_slave,
BOND_LINK_BACK,
@@ -2338,19 +2182,25 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
}
bond->slave_cnt++;
- bond_compute_features(bond);
+ netdev_compute_master_upper_features(bond->dev, true);
bond_set_carrier(bond);
+ /* Needs to be called before bond_select_active_slave(), which will
+ * remove the maddrs if the slave is selected as active slave.
+ */
+ bond_slave_ns_maddrs_add(bond, new_slave);
+
if (bond_uses_primary(bond)) {
block_netpoll_tx();
bond_select_active_slave(bond);
unblock_netpoll_tx();
}
- if (bond_mode_can_use_xmit_hash(bond))
+ /* broadcast mode uses the all_slaves to loop through slaves. */
+ if (bond_mode_can_use_xmit_hash(bond) ||
+ BOND_MODE(bond) == BOND_MODE_BROADCAST)
bond_update_slave_arr(bond, NULL);
-
if (!slave_dev->netdev_ops->ndo_bpf ||
!slave_dev->netdev_ops->ndo_xdp_xmit) {
if (bond->xdp_prog) {
@@ -2434,7 +2284,7 @@ err_restore_mac:
bond_hw_addr_copy(ss.__data, new_slave->perm_hwaddr,
new_slave->dev->addr_len);
ss.ss_family = slave_dev->type;
- dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
+ dev_set_mac_address(slave_dev, &ss, NULL);
}
err_restore_mtu:
@@ -2524,7 +2374,8 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_upper_dev_unlink(bond, slave);
- if (bond_mode_can_use_xmit_hash(bond))
+ if (bond_mode_can_use_xmit_hash(bond) ||
+ BOND_MODE(bond) == BOND_MODE_BROADCAST)
bond_update_slave_arr(bond, slave);
slave_info(bond_dev, slave_dev, "Releasing %s interface\n",
@@ -2534,7 +2385,7 @@ static int __bond_release_one(struct net_device *bond_dev,
RCU_INIT_POINTER(bond->current_arp_slave, NULL);
- if (!all && (!bond->params.fail_over_mac ||
+ if (!all && (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
bond_has_slaves(bond))
@@ -2548,6 +2399,12 @@ static int __bond_release_one(struct net_device *bond_dev,
if (oldcurrent == slave)
bond_change_active_slave(bond, NULL);
+ /* Must be called after bond_change_active_slave () as the slave
+ * might change from an active slave to a backup slave. Then it is
+ * necessary to clear the maddrs on the backup slave.
+ */
+ bond_slave_ns_maddrs_del(bond, slave);
+
if (bond_is_lb(bond)) {
/* Must be called only after the slave has been
* detached from the list and the curr_active_slave
@@ -2580,7 +2437,7 @@ static int __bond_release_one(struct net_device *bond_dev,
call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
}
- bond_compute_features(bond);
+ netdev_compute_master_upper_features(bond->dev, true);
if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
(old_features & NETIF_F_VLAN_CHALLENGED))
slave_info(bond_dev, slave_dev, "last VLAN challenged slave left bond - VLAN blocking is removed\n");
@@ -2622,13 +2479,16 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_hw_addr_copy(ss.__data, slave->perm_hwaddr,
slave->dev->addr_len);
ss.ss_family = slave_dev->type;
- dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
+ dev_set_mac_address(slave_dev, &ss, NULL);
}
- if (unregister)
- __dev_set_mtu(slave_dev, slave->original_mtu);
- else
+ if (unregister) {
+ netdev_lock_ops(slave_dev);
+ __netif_set_mtu(slave_dev, slave->original_mtu);
+ netdev_unlock_ops(slave_dev);
+ } else {
dev_set_mtu(slave_dev, slave->original_mtu);
+ }
if (!netif_is_bond_master(slave_dev))
slave_dev->priv_flags &= ~IFF_BONDING;
@@ -2714,7 +2574,8 @@ static int bond_miimon_inspect(struct bonding *bond)
bond_for_each_slave_rcu(bond, slave, iter) {
bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
- link_state = bond_check_dev_link(bond, slave->dev, 0);
+ link_state = netif_running(slave->dev) &&
+ netif_carrier_ok(slave->dev);
switch (slave->link) {
case BOND_LINK_UP:
@@ -2923,7 +2784,7 @@ static void bond_mii_monitor(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
mii_work.work);
- bool should_notify_peers = false;
+ bool should_notify_peers;
bool commit;
unsigned long delay;
struct slave *slave;
@@ -2935,30 +2796,33 @@ static void bond_mii_monitor(struct work_struct *work)
goto re_arm;
rcu_read_lock();
+
should_notify_peers = bond_should_notify_peers(bond);
commit = !!bond_miimon_inspect(bond);
- if (bond->send_peer_notif) {
- rcu_read_unlock();
- if (rtnl_trylock()) {
- bond->send_peer_notif--;
- rtnl_unlock();
- }
- } else {
- rcu_read_unlock();
- }
- if (commit) {
+ rcu_read_unlock();
+
+ if (commit || bond->send_peer_notif) {
/* Race avoidance with bond_close cancel of workqueue */
if (!rtnl_trylock()) {
delay = 1;
- should_notify_peers = false;
goto re_arm;
}
- bond_for_each_slave(bond, slave, iter) {
- bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER);
+ if (commit) {
+ bond_for_each_slave(bond, slave, iter) {
+ bond_commit_link_state(slave,
+ BOND_SLAVE_NOTIFY_LATER);
+ }
+ bond_miimon_commit(bond);
+ }
+
+ if (bond->send_peer_notif) {
+ bond->send_peer_notif--;
+ if (should_notify_peers)
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
+ bond->dev);
}
- bond_miimon_commit(bond);
rtnl_unlock(); /* might sleep, hold no other locks */
}
@@ -2966,13 +2830,6 @@ static void bond_mii_monitor(struct work_struct *work)
re_arm:
if (bond->params.miimon)
queue_delayed_work(bond->wq, &bond->mii_work, delay);
-
- if (should_notify_peers) {
- if (!rtnl_trylock())
- return;
- call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
- rtnl_unlock();
- }
}
static int bond_upper_dev_walk(struct net_device *upper,
@@ -3310,7 +3167,6 @@ static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
/* Find out through which dev should the packet go */
memset(&fl6, 0, sizeof(struct flowi6));
fl6.daddr = targets[i];
- fl6.flowi6_oif = bond->dev->ifindex;
dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6);
if (dst->error) {
@@ -4081,7 +3937,7 @@ static int bond_slave_netdev_event(unsigned long event,
case NETDEV_FEAT_CHANGE:
if (!bond->notifier_ctx) {
bond->notifier_ctx = true;
- bond_compute_features(bond);
+ netdev_compute_master_upper_features(bond->dev, true);
bond->notifier_ctx = false;
}
break;
@@ -4194,7 +4050,7 @@ static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, const void *
}
if (l34 && *ip_proto >= 0)
- fk->ports.ports = __skb_flow_get_ports(skb, *nhoff, *ip_proto, data, hlen);
+ fk->ports.ports = skb_flow_get_ports(skb, *nhoff, *ip_proto, data, hlen);
return true;
}
@@ -4366,7 +4222,7 @@ void bond_work_init_all(struct bonding *bond)
INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
}
-static void bond_work_cancel_all(struct bonding *bond)
+void bond_work_cancel_all(struct bonding *bond)
{
cancel_delayed_work_sync(&bond->mii_work);
cancel_delayed_work_sync(&bond->arp_work);
@@ -4428,6 +4284,9 @@ static int bond_open(struct net_device *bond_dev)
bond_for_each_slave(bond, slave, iter)
dev_mc_add(slave->dev, lacpdu_mcast_addr);
+
+ if (bond->params.broadcast_neighbor)
+ static_branch_inc(&bond_bcast_neigh_enabled);
}
if (bond_mode_can_use_xmit_hash(bond))
@@ -4447,6 +4306,10 @@ static int bond_close(struct net_device *bond_dev)
bond_alb_deinitialize(bond);
bond->recv_probe = NULL;
+ if (BOND_MODE(bond) == BOND_MODE_8023AD &&
+ bond->params.broadcast_neighbor)
+ static_branch_dec(&bond_bcast_neigh_enabled);
+
if (bond_uses_primary(bond)) {
rcu_read_lock();
slave = rcu_dereference(bond->curr_active_slave);
@@ -4906,8 +4769,7 @@ unwind:
if (rollback_slave == slave)
break;
- tmp_res = dev_set_mac_address(rollback_slave->dev,
- (struct sockaddr *)&tmp_ss, NULL);
+ tmp_res = dev_set_mac_address(rollback_slave->dev, &tmp_ss, NULL);
if (tmp_res) {
slave_dbg(bond_dev, rollback_slave->dev, "%s: unwind err %d\n",
__func__, tmp_res);
@@ -5283,6 +5145,37 @@ static struct slave *bond_xdp_xmit_3ad_xor_slave_get(struct bonding *bond,
return slaves->arr[hash % count];
}
+static bool bond_should_broadcast_neighbor(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct bonding *bond = netdev_priv(dev);
+ struct {
+ struct ipv6hdr ip6;
+ struct icmp6hdr icmp6;
+ } *combined, _combined;
+
+ if (!static_branch_unlikely(&bond_bcast_neigh_enabled))
+ return false;
+
+ if (!bond->params.broadcast_neighbor)
+ return false;
+
+ if (skb->protocol == htons(ETH_P_ARP))
+ return true;
+
+ if (skb->protocol == htons(ETH_P_IPV6)) {
+ combined = skb_header_pointer(skb, skb_mac_header_len(skb),
+ sizeof(_combined),
+ &_combined);
+ if (combined && combined->ip6.nexthdr == NEXTHDR_ICMP &&
+ (combined->icmp6.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
+ combined->icmp6.icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT))
+ return true;
+ }
+
+ return false;
+}
+
/* Use this Xmit function for 3AD as well as XOR modes. The current
* usable slave array is formed in the control path. The xmit function
* just calculates hash and sends the packet out.
@@ -5302,17 +5195,27 @@ static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb,
return bond_tx_drop(dev, skb);
}
-/* in broadcast mode, we send everything to all usable interfaces. */
+/* in broadcast mode, we send everything to all or usable slave interfaces.
+ * under rcu_read_lock when this function is called.
+ */
static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
- struct net_device *bond_dev)
+ struct net_device *bond_dev,
+ bool all_slaves)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave = NULL;
- struct list_head *iter;
+ struct bond_up_slave *slaves;
bool xmit_suc = false;
bool skb_used = false;
+ int slaves_count, i;
- bond_for_each_slave_rcu(bond, slave, iter) {
+ if (all_slaves)
+ slaves = rcu_dereference(bond->all_slaves);
+ else
+ slaves = rcu_dereference(bond->usable_slaves);
+
+ slaves_count = slaves ? READ_ONCE(slaves->count) : 0;
+ for (i = 0; i < slaves_count; i++) {
+ struct slave *slave = slaves->arr[i];
struct sk_buff *skb2;
if (!(bond_slave_is_up(slave) && slave->link == BOND_LINK_UP))
@@ -5550,10 +5453,13 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
case BOND_MODE_ACTIVEBACKUP:
return bond_xmit_activebackup(skb, dev);
case BOND_MODE_8023AD:
+ if (bond_should_broadcast_neighbor(skb, dev))
+ return bond_xmit_broadcast(skb, dev, false);
+ fallthrough;
case BOND_MODE_XOR:
return bond_3ad_xor_xmit(skb, dev);
case BOND_MODE_BROADCAST:
- return bond_xmit_broadcast(skb, dev);
+ return bond_xmit_broadcast(skb, dev, true);
case BOND_MODE_ALB:
return bond_alb_xmit(skb, dev);
case BOND_MODE_TLB:
@@ -5676,7 +5582,7 @@ static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog,
ASSERT_RTNL();
- if (!bond_xdp_check(bond)) {
+ if (!bond_xdp_check(bond, BOND_MODE(bond))) {
BOND_NL_ERR(dev, extack,
"No native XDP support for the current bonding mode");
return -EOPNOTSUPP;
@@ -6005,7 +5911,7 @@ void bond_setup(struct net_device *bond_dev)
bond_dev->lltx = true;
/* Don't allow bond devices to change network namespaces. */
- bond_dev->netns_local = true;
+ bond_dev->netns_immutable = true;
/* By default, we declare the bond to be fully
* VLAN hardware accelerated capable. Special
@@ -6014,7 +5920,7 @@ void bond_setup(struct net_device *bond_dev)
* capable
*/
- bond_dev->hw_features = BOND_VLAN_FEATURES |
+ bond_dev->hw_features = MASTER_UPPER_DEV_VLAN_FEATURES |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_STAG_RX |
@@ -6023,6 +5929,7 @@ void bond_setup(struct net_device *bond_dev)
bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
bond_dev->features |= bond_dev->hw_features;
bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
+ bond_dev->features |= NETIF_F_GSO_PARTIAL;
#ifdef CONFIG_XFRM_OFFLOAD
bond_dev->hw_features |= BOND_XFRM_FEATURES;
/* Only enable XFRM features if this is an active-backup config */
@@ -6161,10 +6068,10 @@ static int __init bond_check_params(struct bond_params *params)
downdelay = 0;
}
- if ((use_carrier != 0) && (use_carrier != 1)) {
- pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
- use_carrier);
- use_carrier = 1;
+ if (use_carrier != 1) {
+ pr_err("Error: invalid use_carrier parameter (%d)\n",
+ use_carrier);
+ return -EINVAL;
}
if (num_peer_notif < 0 || num_peer_notif > 255) {
@@ -6411,7 +6318,6 @@ static int __init bond_check_params(struct bond_params *params)
params->updelay = updelay;
params->downdelay = downdelay;
params->peer_notif_delay = 0;
- params->use_carrier = use_carrier;
params->lacp_active = 1;
params->lacp_fast = lacp_fast;
params->primary[0] = 0;
@@ -6428,6 +6334,7 @@ static int __init bond_check_params(struct bond_params *params)
eth_zero_addr(params->ad_actor_system);
params->ad_user_port_key = ad_user_port_key;
params->coupled_control = 1;
+ params->broadcast_neighbor = 0;
if (packets_per_slave > 0) {
params->reciprocal_packets_per_slave =
reciprocal_value(packets_per_slave);
@@ -6540,7 +6447,7 @@ static int __net_init bond_net_init(struct net *net)
/* According to commit 69b0216ac255 ("bonding: fix bonding_masters
* race condition in bond unloading") we need to remove sysfs files
- * before we remove our devices (done later in bond_net_exit_batch_rtnl())
+ * before we remove our devices (done later in bond_net_exit_rtnl())
*/
static void __net_exit bond_net_pre_exit(struct net *net)
{
@@ -6549,25 +6456,20 @@ static void __net_exit bond_net_pre_exit(struct net *net)
bond_destroy_sysfs(bn);
}
-static void __net_exit bond_net_exit_batch_rtnl(struct list_head *net_list,
- struct list_head *dev_kill_list)
+static void __net_exit bond_net_exit_rtnl(struct net *net,
+ struct list_head *dev_kill_list)
{
- struct bond_net *bn;
- struct net *net;
+ struct bond_net *bn = net_generic(net, bond_net_id);
+ struct bonding *bond, *tmp_bond;
/* Kill off any bonds created after unregistering bond rtnl ops */
- list_for_each_entry(net, net_list, exit_list) {
- struct bonding *bond, *tmp_bond;
-
- bn = net_generic(net, bond_net_id);
- list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
- unregister_netdevice_queue(bond->dev, dev_kill_list);
- }
+ list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
+ unregister_netdevice_queue(bond->dev, dev_kill_list);
}
/* According to commit 23fa5c2caae0 ("bonding: destroy proc directory
* only after all bonds are gone") bond_destroy_proc_dir() is called
- * after bond_net_exit_batch_rtnl() has completed.
+ * after bond_net_exit_rtnl() has completed.
*/
static void __net_exit bond_net_exit_batch(struct list_head *net_list)
{
@@ -6583,7 +6485,7 @@ static void __net_exit bond_net_exit_batch(struct list_head *net_list)
static struct pernet_operations bond_net_ops = {
.init = bond_net_init,
.pre_exit = bond_net_pre_exit,
- .exit_batch_rtnl = bond_net_exit_batch_rtnl,
+ .exit_rtnl = bond_net_exit_rtnl,
.exit_batch = bond_net_exit_batch,
.id = &bond_net_id,
.size = sizeof(struct bond_net),
@@ -6651,3 +6553,4 @@ module_exit(bonding_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");
+MODULE_IMPORT_NS("NETDEV_INTERNAL");
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 2a6a424806aa..286f11c517f7 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -28,6 +28,7 @@ static size_t bond_get_slave_size(const struct net_device *bond_dev,
nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE */
nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE */
nla_total_size(sizeof(s32)) + /* IFLA_BOND_SLAVE_PRIO */
+ nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_ACTOR_PORT_PRIO */
0;
}
@@ -77,6 +78,10 @@ static int bond_fill_slave_info(struct sk_buff *skb,
ad_port->partner_oper.port_state))
goto nla_put_failure;
}
+
+ if (nla_put_u16(skb, IFLA_BOND_SLAVE_ACTOR_PORT_PRIO,
+ SLAVE_AD_INFO(slave)->port_priority))
+ goto nla_put_failure;
}
return 0;
@@ -124,11 +129,13 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_MISSED_MAX] = { .type = NLA_U8 },
[IFLA_BOND_NS_IP6_TARGET] = { .type = NLA_NESTED },
[IFLA_BOND_COUPLED_CONTROL] = { .type = NLA_U8 },
+ [IFLA_BOND_BROADCAST_NEIGH] = { .type = NLA_U8 },
};
static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
[IFLA_BOND_SLAVE_QUEUE_ID] = { .type = NLA_U16 },
[IFLA_BOND_SLAVE_PRIO] = { .type = NLA_S32 },
+ [IFLA_BOND_SLAVE_ACTOR_PORT_PRIO] = { .type = NLA_U16 },
};
static int bond_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -179,6 +186,16 @@ static int bond_slave_changelink(struct net_device *bond_dev,
return err;
}
+ if (data[IFLA_BOND_SLAVE_ACTOR_PORT_PRIO]) {
+ u16 ad_prio = nla_get_u16(data[IFLA_BOND_SLAVE_ACTOR_PORT_PRIO]);
+
+ bond_opt_slave_initval(&newval, &slave_dev, ad_prio);
+ err = __bond_opt_set(bond, BOND_OPT_ACTOR_PORT_PRIO, &newval,
+ data[IFLA_BOND_SLAVE_ACTOR_PORT_PRIO], extack);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -258,13 +275,11 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
return err;
}
if (data[IFLA_BOND_USE_CARRIER]) {
- int use_carrier = nla_get_u8(data[IFLA_BOND_USE_CARRIER]);
-
- bond_opt_initval(&newval, use_carrier);
- err = __bond_opt_set(bond, BOND_OPT_USE_CARRIER, &newval,
- data[IFLA_BOND_USE_CARRIER], extack);
- if (err)
- return err;
+ if (nla_get_u8(data[IFLA_BOND_USE_CARRIER]) != 1) {
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_BOND_USE_CARRIER],
+ "option obsolete, use_carrier cannot be disabled");
+ return -EINVAL;
+ }
}
if (data[IFLA_BOND_ARP_INTERVAL]) {
int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
@@ -561,25 +576,39 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
return err;
}
+ if (data[IFLA_BOND_BROADCAST_NEIGH]) {
+ int broadcast_neigh = nla_get_u8(data[IFLA_BOND_BROADCAST_NEIGH]);
+
+ bond_opt_initval(&newval, broadcast_neigh);
+ err = __bond_opt_set(bond, BOND_OPT_BROADCAST_NEIGH, &newval,
+ data[IFLA_BOND_BROADCAST_NEIGH], extack);
+ if (err)
+ return err;
+ }
+
return 0;
}
-static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int bond_newlink(struct net_device *bond_dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
int err;
- err = bond_changelink(bond_dev, tb, data, extack);
- if (err < 0)
+ err = register_netdevice(bond_dev);
+ if (err)
return err;
- err = register_netdevice(bond_dev);
- if (!err) {
- struct bonding *bond = netdev_priv(bond_dev);
+ netif_carrier_off(bond_dev);
+ bond_work_init_all(bond);
- netif_carrier_off(bond_dev);
- bond_work_init_all(bond);
+ err = bond_changelink(bond_dev, tb, data, extack);
+ if (err) {
+ bond_work_cancel_all(bond);
+ unregister_netdevice(bond_dev);
}
return err;
@@ -628,6 +657,7 @@ static size_t bond_get_size(const struct net_device *bond_dev)
nla_total_size(sizeof(struct nlattr)) +
nla_total_size(sizeof(struct in6_addr)) * BOND_MAX_NS_TARGETS +
nla_total_size(sizeof(u8)) + /* IFLA_BOND_COUPLED_CONTROL */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_BROADCAST_NEIGH */
0;
}
@@ -674,7 +704,7 @@ static int bond_fill_info(struct sk_buff *skb,
bond->params.peer_notif_delay * bond->params.miimon))
goto nla_put_failure;
- if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, bond->params.use_carrier))
+ if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, 1))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BOND_ARP_INTERVAL, bond->params.arp_interval))
@@ -791,6 +821,10 @@ static int bond_fill_info(struct sk_buff *skb,
bond->params.coupled_control))
goto nla_put_failure;
+ if (nla_put_u8(skb, IFLA_BOND_BROADCAST_NEIGH,
+ bond->params.broadcast_neighbor))
+ goto nla_put_failure;
+
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info info;
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 95d59a18c022..384499c869b8 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -15,6 +15,7 @@
#include <linux/sched/signal.h>
#include <net/bonding.h>
+#include <net/ndisc.h>
static int bond_option_active_slave_set(struct bonding *bond,
const struct bond_opt_value *newval);
@@ -78,6 +79,8 @@ static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_ad_actor_sys_prio_set(struct bonding *bond,
const struct bond_opt_value *newval);
+static int bond_option_actor_port_prio_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static int bond_option_ad_actor_system_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_ad_user_port_key_set(struct bonding *bond,
@@ -86,6 +89,8 @@ static int bond_option_missed_max_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_coupled_control_set(struct bonding *bond,
const struct bond_opt_value *newval);
+static int bond_option_broadcast_neigh_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static const struct bond_opt_value bond_mode_tbl[] = {
{ "balance-rr", BOND_MODE_ROUNDROBIN, BOND_VALFLAG_DEFAULT},
@@ -157,10 +162,11 @@ static const struct bond_opt_value bond_lacp_rate_tbl[] = {
};
static const struct bond_opt_value bond_ad_select_tbl[] = {
- { "stable", BOND_AD_STABLE, BOND_VALFLAG_DEFAULT},
- { "bandwidth", BOND_AD_BANDWIDTH, 0},
- { "count", BOND_AD_COUNT, 0},
- { NULL, -1, 0},
+ { "stable", BOND_AD_STABLE, BOND_VALFLAG_DEFAULT},
+ { "bandwidth", BOND_AD_BANDWIDTH, 0},
+ { "count", BOND_AD_COUNT, 0},
+ { "actor_port_prio", BOND_AD_PRIO, 0},
+ { NULL, -1, 0},
};
static const struct bond_opt_value bond_num_peer_notif_tbl[] = {
@@ -184,7 +190,6 @@ static const struct bond_opt_value bond_primary_reselect_tbl[] = {
};
static const struct bond_opt_value bond_use_carrier_tbl[] = {
- { "off", 0, 0},
{ "on", 1, BOND_VALFLAG_DEFAULT},
{ NULL, -1, 0}
};
@@ -239,6 +244,12 @@ static const struct bond_opt_value bond_coupled_control_tbl[] = {
{ NULL, -1, 0},
};
+static const struct bond_opt_value bond_broadcast_neigh_tbl[] = {
+ { "off", 0, BOND_VALFLAG_DEFAULT},
+ { "on", 1, 0},
+ { NULL, -1, 0}
+};
+
static const struct bond_option bond_opts[BOND_OPT_LAST] = {
[BOND_OPT_MODE] = {
.id = BOND_OPT_MODE,
@@ -361,7 +372,7 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
[BOND_OPT_AD_SELECT] = {
.id = BOND_OPT_AD_SELECT,
.name = "ad_select",
- .desc = "803.ad aggregation selection logic",
+ .desc = "802.3ad aggregation selection logic",
.flags = BOND_OPTFLAG_IFDOWN,
.values = bond_ad_select_tbl,
.set = bond_option_ad_select_set
@@ -410,7 +421,7 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
[BOND_OPT_USE_CARRIER] = {
.id = BOND_OPT_USE_CARRIER,
.name = "use_carrier",
- .desc = "Use netif_carrier_ok (vs MII ioctls) in miimon",
+ .desc = "option obsolete, use_carrier cannot be disabled",
.values = bond_use_carrier_tbl,
.set = bond_option_use_carrier_set
},
@@ -475,6 +486,13 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.values = bond_ad_actor_sys_prio_tbl,
.set = bond_option_ad_actor_sys_prio_set,
},
+ [BOND_OPT_ACTOR_PORT_PRIO] = {
+ .id = BOND_OPT_ACTOR_PORT_PRIO,
+ .name = "actor_port_prio",
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+ .flags = BOND_OPTFLAG_RAWVAL,
+ .set = bond_option_actor_port_prio_set,
+ },
[BOND_OPT_AD_ACTOR_SYSTEM] = {
.id = BOND_OPT_AD_ACTOR_SYSTEM,
.name = "ad_actor_system",
@@ -512,6 +530,14 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.flags = BOND_OPTFLAG_IFDOWN,
.values = bond_coupled_control_tbl,
.set = bond_option_coupled_control_set,
+ },
+ [BOND_OPT_BROADCAST_NEIGH] = {
+ .id = BOND_OPT_BROADCAST_NEIGH,
+ .name = "broadcast_neighbor",
+ .desc = "Broadcast neighbor packets to all active slaves",
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+ .values = bond_broadcast_neigh_tbl,
+ .set = bond_option_broadcast_neigh_set,
}
};
@@ -867,6 +893,9 @@ static bool bond_set_xfrm_features(struct bonding *bond)
static int bond_option_mode_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
+ if (bond->xdp_prog && !bond_xdp_check(bond, newval->value))
+ return -EOPNOTSUPP;
+
if (!bond_mode_uses_arp(newval->value)) {
if (bond->params.arp_interval) {
netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
@@ -890,6 +919,13 @@ static int bond_option_mode_set(struct bonding *bond,
bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
bond->params.mode = newval->value;
+ /* When changing mode, the bond device is down, we may reduce
+ * the bond_bcast_neigh_enabled in bond_close() if broadcast_neighbor
+ * enabled in 8023ad mode. Therefore, only clear broadcast_neighbor
+ * to 0.
+ */
+ bond->params.broadcast_neighbor = 0;
+
if (bond->dev->reg_state == NETREG_REGISTERED) {
bool update = false;
@@ -1064,10 +1100,6 @@ static int bond_option_peer_notif_delay_set(struct bonding *bond,
static int bond_option_use_carrier_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
- netdev_dbg(bond->dev, "Setting use_carrier to %llu\n",
- newval->value);
- bond->params.use_carrier = newval->value;
-
return 0;
}
@@ -1234,6 +1266,107 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond,
}
#if IS_ENABLED(CONFIG_IPV6)
+static bool slave_can_set_ns_maddr(const struct bonding *bond, struct slave *slave)
+{
+ return BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
+ !bond_is_active_slave(slave) &&
+ slave->dev->flags & IFF_MULTICAST;
+}
+
+/**
+ * slave_set_ns_maddrs - add/del all NS mac addresses for slave
+ * @bond: bond device
+ * @slave: slave device
+ * @add: add or remove all the NS mac addresses
+ *
+ * This function tries to add or delete all the NS mac addresses on the slave
+ *
+ * Note, the IPv6 NS target address is the unicast address in Neighbor
+ * Solicitation (NS) message. The dest address of NS message should be
+ * solicited-node multicast address of the target. The dest mac of NS message
+ * is converted from the solicited-node multicast address.
+ *
+ * This function is called when
+ * * arp_validate changes
+ * * enslaving, releasing new slaves
+ */
+static void slave_set_ns_maddrs(struct bonding *bond, struct slave *slave, bool add)
+{
+ struct in6_addr *targets = bond->params.ns_targets;
+ char slot_maddr[MAX_ADDR_LEN];
+ struct in6_addr mcaddr;
+ int i;
+
+ if (!slave_can_set_ns_maddr(bond, slave))
+ return;
+
+ for (i = 0; i < BOND_MAX_NS_TARGETS; i++) {
+ if (ipv6_addr_any(&targets[i]))
+ break;
+
+ addrconf_addr_solict_mult(&targets[i], &mcaddr);
+ if (!ndisc_mc_map(&mcaddr, slot_maddr, slave->dev, 0)) {
+ if (add)
+ dev_mc_add(slave->dev, slot_maddr);
+ else
+ dev_mc_del(slave->dev, slot_maddr);
+ }
+ }
+}
+
+void bond_slave_ns_maddrs_add(struct bonding *bond, struct slave *slave)
+{
+ if (!bond->params.arp_validate)
+ return;
+ slave_set_ns_maddrs(bond, slave, true);
+}
+
+void bond_slave_ns_maddrs_del(struct bonding *bond, struct slave *slave)
+{
+ if (!bond->params.arp_validate)
+ return;
+ slave_set_ns_maddrs(bond, slave, false);
+}
+
+/**
+ * slave_set_ns_maddr - set new NS mac address for slave
+ * @bond: bond device
+ * @slave: slave device
+ * @target: the new IPv6 target
+ * @slot: the old IPv6 target in the slot
+ *
+ * This function tries to replace the old mac address to new one on the slave.
+ *
+ * Note, the target/slot IPv6 address is the unicast address in Neighbor
+ * Solicitation (NS) message. The dest address of NS message should be
+ * solicited-node multicast address of the target. The dest mac of NS message
+ * is converted from the solicited-node multicast address.
+ *
+ * This function is called when
+ * * An IPv6 NS target is added or removed.
+ */
+static void slave_set_ns_maddr(struct bonding *bond, struct slave *slave,
+ struct in6_addr *target, struct in6_addr *slot)
+{
+ char mac_addr[MAX_ADDR_LEN];
+ struct in6_addr mcast_addr;
+
+ if (!bond->params.arp_validate || !slave_can_set_ns_maddr(bond, slave))
+ return;
+
+ /* remove the previous mac addr from slave */
+ addrconf_addr_solict_mult(slot, &mcast_addr);
+ if (!ipv6_addr_any(slot) &&
+ !ndisc_mc_map(&mcast_addr, mac_addr, slave->dev, 0))
+ dev_mc_del(slave->dev, mac_addr);
+
+ /* add new mac addr on slave if target is set */
+ addrconf_addr_solict_mult(target, &mcast_addr);
+ if (!ipv6_addr_any(target) &&
+ !ndisc_mc_map(&mcast_addr, mac_addr, slave->dev, 0))
+ dev_mc_add(slave->dev, mac_addr);
+}
+
static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot,
struct in6_addr *target,
unsigned long last_rx)
@@ -1243,8 +1376,10 @@ static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot,
struct slave *slave;
if (slot >= 0 && slot < BOND_MAX_NS_TARGETS) {
- bond_for_each_slave(bond, slave, iter)
+ bond_for_each_slave(bond, slave, iter) {
slave->target_last_arp_rx[slot] = last_rx;
+ slave_set_ns_maddr(bond, slave, target, &targets[slot]);
+ }
targets[slot] = *target;
}
}
@@ -1296,15 +1431,30 @@ static int bond_option_ns_ip6_targets_set(struct bonding *bond,
{
return -EPERM;
}
+
+static void slave_set_ns_maddrs(struct bonding *bond, struct slave *slave, bool add) {}
+
+void bond_slave_ns_maddrs_add(struct bonding *bond, struct slave *slave) {}
+
+void bond_slave_ns_maddrs_del(struct bonding *bond, struct slave *slave) {}
#endif
static int bond_option_arp_validate_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
+ bool changed = !!bond->params.arp_validate != !!newval->value;
+ struct list_head *iter;
+ struct slave *slave;
+
netdev_dbg(bond->dev, "Setting arp_validate to %s (%llu)\n",
newval->string, newval->value);
bond->params.arp_validate = newval->value;
+ if (changed) {
+ bond_for_each_slave(bond, slave, iter)
+ slave_set_ns_maddrs(bond, slave, !!bond->params.arp_validate);
+ }
+
return 0;
}
@@ -1515,6 +1665,7 @@ static int bond_option_lacp_active_set(struct bonding *bond,
netdev_dbg(bond->dev, "Setting LACP active to %s (%llu)\n",
newval->string, newval->value);
bond->params.lacp_active = newval->value;
+ bond_3ad_update_lacp_active(bond);
return 0;
}
@@ -1671,6 +1822,26 @@ static int bond_option_ad_actor_sys_prio_set(struct bonding *bond,
return 0;
}
+static int bond_option_actor_port_prio_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ struct slave *slave;
+
+ slave = bond_slave_get_rtnl(newval->slave_dev);
+ if (!slave) {
+ netdev_dbg(bond->dev, "%s called on NULL slave\n", __func__);
+ return -ENODEV;
+ }
+
+ netdev_dbg(newval->slave_dev, "Setting actor_port_prio to %llu\n",
+ newval->value);
+
+ SLAVE_AD_INFO(slave)->port_priority = newval->value;
+ bond_3ad_update_ad_actor_settings(bond);
+
+ return 0;
+}
+
static int bond_option_ad_actor_system_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
@@ -1718,3 +1889,22 @@ static int bond_option_coupled_control_set(struct bonding *bond,
bond->params.coupled_control = newval->value;
return 0;
}
+
+static int bond_option_broadcast_neigh_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ if (bond->params.broadcast_neighbor == newval->value)
+ return 0;
+
+ bond->params.broadcast_neighbor = newval->value;
+ if (bond->dev->flags & IFF_UP) {
+ if (bond->params.broadcast_neighbor)
+ static_branch_inc(&bond_bcast_neigh_enabled);
+ else
+ static_branch_dec(&bond_bcast_neigh_enabled);
+ }
+
+ netdev_dbg(bond->dev, "Setting broadcast_neighbor to %s (%llu)\n",
+ newval->string, newval->value);
+ return 0;
+}
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 1e13bb170515..9a75ad3181ab 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -467,14 +467,12 @@ static ssize_t bonding_show_primary_reselect(struct device *d,
static DEVICE_ATTR(primary_reselect, 0644,
bonding_show_primary_reselect, bonding_sysfs_store_option);
-/* Show the use_carrier flag. */
+/* use_carrier is obsolete, but print value for compatibility */
static ssize_t bonding_show_carrier(struct device *d,
struct device_attribute *attr,
char *buf)
{
- struct bonding *bond = to_bond(d);
-
- return sysfs_emit(buf, "%d\n", bond->params.use_carrier);
+ return sysfs_emit(buf, "1\n");
}
static DEVICE_ATTR(use_carrier, 0644,
bonding_show_carrier, bonding_sysfs_store_option);
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index ed3a589def6b..c398ac42eae9 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -126,15 +126,6 @@ static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
ser->rx_blob.data = ser->rx_data;
ser->rx_blob.size = size;
}
-
-static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
-{
- if (size > sizeof(ser->tx_data))
- size = sizeof(ser->tx_data);
- memcpy(ser->tx_data, data, size);
- ser->tx_blob.data = ser->tx_data;
- ser->tx_blob.size = size;
-}
#else
static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
{
@@ -151,11 +142,6 @@ static inline void update_tty_status(struct ser_device *ser)
static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
{
}
-
-static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
-{
-}
-
#endif
static void ldisc_receive(struct tty_struct *tty, const u8 *data,
@@ -344,7 +330,7 @@ static int ldisc_open(struct tty_struct *tty)
ser->tty = tty_kref_get(tty);
ser->dev = dev;
debugfs_init(ser, tty);
- tty->receive_room = N_TTY_BUF_SIZE;
+ tty->receive_room = 4096;
tty->disc_data = ser;
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
rtnl_lock();
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index 7fea00c7ca8a..c60386bf2d1a 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -745,7 +745,7 @@ err:
if (cfv->vr_rx)
vdev->vringh_config->del_vrhs(cfv->vdev);
- if (cfv->vdev)
+ if (cfv->vq_tx)
vdev->config->del_vqs(cfv->vdev);
free_netdev(netdev);
return err;
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index cf989bea9aa3..e15e320db476 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -124,6 +124,23 @@ config CAN_CAN327
If this driver is built as a module, it will be called can327.
+config CAN_DUMMY
+ tristate "Dummy CAN"
+ help
+ A dummy CAN module supporting Classical CAN, CAN FD and CAN XL. It
+ exposes bittiming values which can be configured through the netlink
+ interface.
+
+ The module will simply echo any frame sent to it. If debug messages
+ are activated, it prints all the CAN bittiming information in the
+ kernel log. Aside from that it does nothing.
+
+ This is convenient for testing the CAN netlink interface. Most of the
+ users will never need this. If unsure, say NO.
+
+ To compile this driver as a module, choose M here: the module will be
+ called dummy-can.
+
config CAN_FLEXCAN
tristate "Support for Freescale FLEXCAN based chips"
depends on OF || COLDFIRE || COMPILE_TEST
@@ -154,6 +171,7 @@ config CAN_JANZ_ICAN3
config CAN_KVASER_PCIEFD
depends on PCI
tristate "Kvaser PCIe FD cards"
+ select NET_DEVLINK
help
This is a driver for the Kvaser PCI Express CAN FD family.
@@ -201,7 +219,7 @@ config CAN_SUN4I
be called sun4i_can.
config CAN_TI_HECC
- depends on ARM
+ depends on ARM || COMPILE_TEST
tristate "TI High End CAN Controller"
select CAN_RX_OFFLOAD
help
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index a71db2cfe990..d7bc10a6b8ea 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -21,11 +21,12 @@ obj-$(CONFIG_CAN_CAN327) += can327.o
obj-$(CONFIG_CAN_CC770) += cc770/
obj-$(CONFIG_CAN_C_CAN) += c_can/
obj-$(CONFIG_CAN_CTUCANFD) += ctucanfd/
+obj-$(CONFIG_CAN_DUMMY) += dummy_can.o
obj-$(CONFIG_CAN_FLEXCAN) += flexcan/
obj-$(CONFIG_CAN_GRCAN) += grcan.o
obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/
obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
-obj-$(CONFIG_CAN_KVASER_PCIEFD) += kvaser_pciefd.o
+obj-$(CONFIG_CAN_KVASER_PCIEFD) += kvaser_pciefd/
obj-$(CONFIG_CAN_MSCAN) += mscan/
obj-$(CONFIG_CAN_M_CAN) += m_can/
obj-$(CONFIG_CAN_PEAK_PCIEFD) += peak_canfd/
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 191707d7e3da..c2a3a4eef5b2 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -948,7 +948,6 @@ static const struct net_device_ops at91_netdev_ops = {
.ndo_open = at91_open,
.ndo_stop = at91_close,
.ndo_start_xmit = at91_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops at91_ethtool_ops = {
diff --git a/drivers/net/can/bxcan.c b/drivers/net/can/bxcan.c
index bfc60eb33dc3..baf494d20bef 100644
--- a/drivers/net/can/bxcan.c
+++ b/drivers/net/can/bxcan.c
@@ -227,7 +227,7 @@ static void bxcan_enable_filters(struct bxcan_priv *priv, enum bxcan_cfg cfg)
* mask mode with 32 bits width.
*/
- /* Enter filter initialization mode and assing filters to CAN
+ /* Enter filter initialization mode and assign filters to CAN
* controllers.
*/
regmap_update_bits(priv->gcan, BXCAN_FMR_REG,
@@ -842,7 +842,7 @@ static netdev_tx_t bxcan_start_xmit(struct sk_buff *skb,
u32 id;
int i, j;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
if (bxcan_tx_busy(priv))
@@ -881,7 +881,6 @@ static const struct net_device_ops bxcan_netdev_ops = {
.ndo_open = bxcan_open,
.ndo_stop = bxcan_stop,
.ndo_start_xmit = bxcan_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops bxcan_ethtool_ops = {
diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c
index c63f7fc1e691..3702cac7fbf0 100644
--- a/drivers/net/can/c_can/c_can_main.c
+++ b/drivers/net/can/c_can/c_can_main.c
@@ -1011,47 +1011,60 @@ static int c_can_handle_bus_err(struct net_device *dev,
/* common for all type of bus errors */
priv->can.can_stats.bus_error++;
- stats->rx_errors++;
/* propagate the error condition to the CAN stack */
skb = alloc_can_err_skb(dev, &cf);
- if (unlikely(!skb))
- return 0;
/* check for 'last error code' which tells us the
* type of the last error to occur on the CAN bus
*/
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ if (likely(skb))
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
switch (lec_type) {
case LEC_STUFF_ERROR:
netdev_dbg(dev, "stuff error\n");
- cf->data[2] |= CAN_ERR_PROT_STUFF;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ stats->rx_errors++;
break;
case LEC_FORM_ERROR:
netdev_dbg(dev, "form error\n");
- cf->data[2] |= CAN_ERR_PROT_FORM;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ stats->rx_errors++;
break;
case LEC_ACK_ERROR:
netdev_dbg(dev, "ack error\n");
- cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ if (likely(skb))
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ stats->tx_errors++;
break;
case LEC_BIT1_ERROR:
netdev_dbg(dev, "bit1 error\n");
- cf->data[2] |= CAN_ERR_PROT_BIT1;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ stats->tx_errors++;
break;
case LEC_BIT0_ERROR:
netdev_dbg(dev, "bit0 error\n");
- cf->data[2] |= CAN_ERR_PROT_BIT0;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ stats->tx_errors++;
break;
case LEC_CRC_ERROR:
netdev_dbg(dev, "CRC error\n");
- cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ if (likely(skb))
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ stats->rx_errors++;
break;
default:
break;
}
+ if (unlikely(!skb))
+ return 0;
+
netif_receive_skb(skb);
return 1;
}
@@ -1349,7 +1362,6 @@ static const struct net_device_ops c_can_netdev_ops = {
.ndo_open = c_can_open,
.ndo_stop = c_can_close,
.ndo_start_xmit = c_can_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
int register_c_can_dev(struct net_device *dev)
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 6cba9717a6d8..19c86b94a40e 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -269,30 +269,22 @@ static int c_can_plat_probe(struct platform_device *pdev)
/* get the appropriate clk */
clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- goto exit;
- }
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
/* get the platform data */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- goto exit;
- }
+ if (irq < 0)
+ return irq;
addr = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
- if (IS_ERR(addr)) {
- ret = PTR_ERR(addr);
- goto exit;
- }
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
/* allocate the c_can device */
dev = alloc_c_can_dev(drvdata->msg_obj_num);
- if (!dev) {
- ret = -ENOMEM;
- goto exit;
- }
+ if (!dev)
+ return -ENOMEM;
priv = netdev_priv(dev);
switch (drvdata->id) {
@@ -324,33 +316,22 @@ static int c_can_plat_probe(struct platform_device *pdev)
/* Check if we need custom RAMINIT via syscon. Mostly for TI
* platforms. Only supported with DT boot.
*/
- if (np && of_property_read_bool(np, "syscon-raminit")) {
+ if (np && of_property_present(np, "syscon-raminit")) {
+ unsigned int args[2];
u32 id;
struct c_can_raminit *raminit = &priv->raminit_sys;
ret = -EINVAL;
- raminit->syscon = syscon_regmap_lookup_by_phandle(np,
- "syscon-raminit");
+ raminit->syscon = syscon_regmap_lookup_by_phandle_args(np,
+ "syscon-raminit",
+ 2, args);
if (IS_ERR(raminit->syscon)) {
- /* can fail with -EPROBE_DEFER */
ret = PTR_ERR(raminit->syscon);
- free_c_can_dev(dev);
- return ret;
- }
-
- if (of_property_read_u32_index(np, "syscon-raminit", 1,
- &raminit->reg)) {
- dev_err(&pdev->dev,
- "couldn't get the RAMINIT reg. offset!\n");
goto exit_free_device;
}
- if (of_property_read_u32_index(np, "syscon-raminit", 2,
- &id)) {
- dev_err(&pdev->dev,
- "couldn't get the CAN instance ID\n");
- goto exit_free_device;
- }
+ raminit->reg = args[0];
+ id = args[1];
if (id >= drvdata->raminit_num) {
dev_err(&pdev->dev,
@@ -385,18 +366,17 @@ static int c_can_plat_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
KBUILD_MODNAME, ret);
- goto exit_free_device;
+ goto exit_pm_runtime;
}
dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
KBUILD_MODNAME, priv->base, dev->irq);
return 0;
-exit_free_device:
+exit_pm_runtime:
pm_runtime_disable(priv->device);
+exit_free_device:
free_c_can_dev(dev);
-exit:
- dev_err(&pdev->dev, "probe failed\n");
return ret;
}
diff --git a/drivers/net/can/can327.c b/drivers/net/can/can327.c
index 24af63961030..b66fc16aedd2 100644
--- a/drivers/net/can/can327.c
+++ b/drivers/net/can/can327.c
@@ -849,7 +849,6 @@ static const struct net_device_ops can327_netdev_ops = {
.ndo_open = can327_netdev_open,
.ndo_stop = can327_netdev_close,
.ndo_start_xmit = can327_netdev_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops can327_ethtool_ops = {
diff --git a/drivers/net/can/cc770/Kconfig b/drivers/net/can/cc770/Kconfig
index 467ef19de1c1..aae25c2f849e 100644
--- a/drivers/net/can/cc770/Kconfig
+++ b/drivers/net/can/cc770/Kconfig
@@ -7,7 +7,7 @@ if CAN_CC770
config CAN_CC770_ISA
tristate "ISA Bus based legacy CC770 driver"
- depends on ISA
+ depends on HAS_IOPORT
help
This driver adds legacy support for CC770 and AN82527 chips
connected to the ISA bus using I/O port, memory mapped or
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index 30909f3aab57..8d5abd643c06 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -834,7 +834,6 @@ static const struct net_device_ops cc770_netdev_ops = {
.ndo_open = cc770_open,
.ndo_stop = cc770_close,
.ndo_start_xmit = cc770_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops cc770_ethtool_ops = {
diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c
index 64c349fd4600..1e6b9e3dc2fe 100644
--- a/drivers/net/can/ctucanfd/ctucanfd_base.c
+++ b/drivers/net/can/ctucanfd/ctucanfd_base.c
@@ -275,7 +275,7 @@ static int ctucan_set_bittiming(struct net_device *ndev)
static int ctucan_set_data_bittiming(struct net_device *ndev)
{
struct ctucan_priv *priv = netdev_priv(ndev);
- struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
/* Note that dbt may be modified here */
return ctucan_set_btr(ndev, dbt, false);
@@ -290,7 +290,7 @@ static int ctucan_set_data_bittiming(struct net_device *ndev)
static int ctucan_set_secondary_sample_point(struct net_device *ndev)
{
struct ctucan_priv *priv = netdev_priv(ndev);
- struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
int ssp_offset = 0;
u32 ssp_cfg = 0; /* No SSP by default */
@@ -506,11 +506,12 @@ static bool ctucan_is_txt_buf_writable(struct ctucan_priv *priv, u8 buf)
* @buf: TXT Buffer index to which frame is inserted (0-based)
* @isfdf: True - CAN FD Frame, False - CAN 2.0 Frame
*
- * Return: True - Frame inserted successfully
- * False - Frame was not inserted due to one of:
- * 1. TXT Buffer is not writable (it is in wrong state)
- * 2. Invalid TXT buffer index
- * 3. Invalid frame length
+ * Return:
+ * * True - Frame inserted successfully
+ * * False - Frame was not inserted due to one of:
+ * 1. TXT Buffer is not writable (it is in wrong state)
+ * 2. Invalid TXT buffer index
+ * 3. Invalid frame length
*/
static bool ctucan_insert_frame(struct ctucan_priv *priv, const struct canfd_frame *cf, u8 buf,
bool isfdf)
@@ -867,10 +868,12 @@ static void ctucan_err_interrupt(struct net_device *ndev, u32 isr)
}
break;
case CAN_STATE_ERROR_ACTIVE:
- cf->can_id |= CAN_ERR_CNT;
- cf->data[1] = CAN_ERR_CRTL_ACTIVE;
- cf->data[6] = bec.txerr;
- cf->data[7] = bec.rxerr;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[1] = CAN_ERR_CRTL_ACTIVE;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
break;
default:
netdev_warn(ndev, "unhandled error state (%d:%s)!\n",
@@ -1298,7 +1301,6 @@ static const struct net_device_ops ctucan_netdev_ops = {
.ndo_open = ctucan_open,
.ndo_stop = ctucan_close,
.ndo_start_xmit = ctucan_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops ctucan_ethtool_ops = {
@@ -1356,12 +1358,12 @@ int ctucan_probe_common(struct device *dev, void __iomem *addr, int irq, unsigne
priv->ntxbufs = ntxbufs;
priv->dev = dev;
priv->can.bittiming_const = &ctu_can_fd_bit_timing_max;
- priv->can.data_bittiming_const = &ctu_can_fd_bit_timing_data_max;
+ priv->can.fd.data_bittiming_const = &ctu_can_fd_bit_timing_data_max;
priv->can.do_set_mode = ctucan_do_set_mode;
/* Needed for timing adjustment to be performed as soon as possible */
priv->can.do_set_bittiming = ctucan_set_bittiming;
- priv->can.do_set_data_bittiming = ctucan_set_data_bittiming;
+ priv->can.fd.do_set_data_bittiming = ctucan_set_data_bittiming;
priv->can.do_get_berr_counter = ctucan_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK
diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c
index 0b93900b1dfa..8f82418230ce 100644
--- a/drivers/net/can/dev/bittiming.c
+++ b/drivers/net/can/dev/bittiming.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
* Copyright (C) 2006 Andrey Volkov, Varma Electronics
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (c) 2025 Vincent Mailhol <mailhol@kernel.org>
*/
#include <linux/can/dev.h>
@@ -151,3 +152,65 @@ int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt,
return -EINVAL;
}
+
+int can_validate_pwm_bittiming(const struct net_device *dev,
+ const struct can_pwm *pwm,
+ struct netlink_ext_ack *extack)
+{
+ const struct can_priv *priv = netdev_priv(dev);
+ u32 xl_bit_time_tqmin = can_bit_time_tqmin(&priv->xl.data_bittiming);
+ u32 nom_bit_time_tqmin = can_bit_time_tqmin(&priv->bittiming);
+ u32 pwms_ns = can_tqmin_to_ns(pwm->pwms, priv->clock.freq);
+ u32 pwml_ns = can_tqmin_to_ns(pwm->pwml, priv->clock.freq);
+
+ if (pwms_ns + pwml_ns > CAN_PWM_NS_MAX) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "The PWM symbol duration: %u ns may not exceed %u ns",
+ pwms_ns + pwml_ns, CAN_PWM_NS_MAX);
+ return -EINVAL;
+ }
+
+ if (pwms_ns < CAN_PWM_DECODE_NS) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWMS: %u ns shall be at least %u ns",
+ pwms_ns, CAN_PWM_DECODE_NS);
+ return -EINVAL;
+ }
+
+ if (pwm->pwms >= pwm->pwml) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWMS: %u tqmin shall be smaller than PWML: %u tqmin",
+ pwm->pwms, pwm->pwml);
+ return -EINVAL;
+ }
+
+ if (pwml_ns - pwms_ns < 2 * CAN_PWM_DECODE_NS) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "At least %u ns shall separate PWMS: %u ns from PMWL: %u ns",
+ 2 * CAN_PWM_DECODE_NS, pwms_ns, pwml_ns);
+ return -EINVAL;
+ }
+
+ if (xl_bit_time_tqmin % (pwm->pwms + pwm->pwml) != 0) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWM duration: %u tqmin does not divide XL's bit time: %u tqmin",
+ pwm->pwms + pwm->pwml, xl_bit_time_tqmin);
+ return -EINVAL;
+ }
+
+ if (pwm->pwmo >= pwm->pwms + pwm->pwml) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWMO: %u tqmin can not be greater than PWMS + PWML: %u tqmin",
+ pwm->pwmo, pwm->pwms + pwm->pwml);
+ return -EINVAL;
+ }
+
+ if (nom_bit_time_tqmin % (pwm->pwms + pwm->pwml) != pwm->pwmo) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Can not assemble nominal bit time: %u tqmin out of PWMS + PMWL and PWMO",
+ nom_bit_time_tqmin);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/can/dev/calc_bittiming.c b/drivers/net/can/dev/calc_bittiming.c
index 3809c148fb88..cc4022241553 100644
--- a/drivers/net/can/dev/calc_bittiming.c
+++ b/drivers/net/can/dev/calc_bittiming.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
* Copyright (C) 2006 Andrey Volkov, Varma Electronics
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (C) 2021-2025 Vincent Mailhol <mailhol@kernel.org>
*/
#include <linux/units.h>
@@ -9,6 +10,33 @@
#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
+/* CiA recommended sample points for Non Return to Zero encoding. */
+static int can_calc_sample_point_nrz(const struct can_bittiming *bt)
+{
+ if (bt->bitrate > 800 * KILO /* BPS */)
+ return 750;
+
+ if (bt->bitrate > 500 * KILO /* BPS */)
+ return 800;
+
+ return 875;
+}
+
+/* Sample points for Pulse-Width Modulation encoding. */
+static int can_calc_sample_point_pwm(const struct can_bittiming *bt)
+{
+ if (bt->bitrate > 15 * MEGA /* BPS */)
+ return 625;
+
+ if (bt->bitrate > 9 * MEGA /* BPS */)
+ return 600;
+
+ if (bt->bitrate > 4 * MEGA /* BPS */)
+ return 560;
+
+ return 520;
+}
+
/* Bit-timing calculation derived from:
*
* Code based on LinCAN sources and H8S2638 project
@@ -23,7 +51,7 @@
*/
static int
can_update_sample_point(const struct can_bittiming_const *btc,
- const unsigned int sample_point_nominal, const unsigned int tseg,
+ const unsigned int sample_point_reference, const unsigned int tseg,
unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
unsigned int *sample_point_error_ptr)
{
@@ -34,7 +62,7 @@ can_update_sample_point(const struct can_bittiming_const *btc,
for (i = 0; i <= 1; i++) {
tseg2 = tseg + CAN_SYNC_SEG -
- (sample_point_nominal * (tseg + CAN_SYNC_SEG)) /
+ (sample_point_reference * (tseg + CAN_SYNC_SEG)) /
1000 - i;
tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
tseg1 = tseg - tseg2;
@@ -45,9 +73,9 @@ can_update_sample_point(const struct can_bittiming_const *btc,
sample_point = 1000 * (tseg + CAN_SYNC_SEG - tseg2) /
(tseg + CAN_SYNC_SEG);
- sample_point_error = abs(sample_point_nominal - sample_point);
+ sample_point_error = abs(sample_point_reference - sample_point);
- if (sample_point <= sample_point_nominal &&
+ if (sample_point <= sample_point_reference &&
sample_point_error < best_sample_point_error) {
best_sample_point = sample_point;
best_sample_point_error = sample_point_error;
@@ -67,28 +95,24 @@ int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
{
struct can_priv *priv = netdev_priv(dev);
unsigned int bitrate; /* current bitrate */
- unsigned int bitrate_error; /* difference between current and nominal value */
+ unsigned int bitrate_error; /* diff between calculated and reference value */
unsigned int best_bitrate_error = UINT_MAX;
- unsigned int sample_point_error; /* difference between current and nominal value */
+ unsigned int sample_point_error; /* diff between calculated and reference value */
unsigned int best_sample_point_error = UINT_MAX;
- unsigned int sample_point_nominal; /* nominal sample point */
+ unsigned int sample_point_reference; /* reference sample point */
unsigned int best_tseg = 0; /* current best value for tseg */
unsigned int best_brp = 0; /* current best value for brp */
unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
u64 v64;
int err;
- /* Use CiA recommended sample points */
- if (bt->sample_point) {
- sample_point_nominal = bt->sample_point;
- } else {
- if (bt->bitrate > 800 * KILO /* BPS */)
- sample_point_nominal = 750;
- else if (bt->bitrate > 500 * KILO /* BPS */)
- sample_point_nominal = 800;
- else
- sample_point_nominal = 875;
- }
+ if (bt->sample_point)
+ sample_point_reference = bt->sample_point;
+ else if (btc == priv->xl.data_bittiming_const &&
+ (priv->ctrlmode & CAN_CTRLMODE_XL_TMS))
+ sample_point_reference = can_calc_sample_point_pwm(bt);
+ else
+ sample_point_reference = can_calc_sample_point_nrz(bt);
/* tseg even = round down, odd = round up */
for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
@@ -114,7 +138,7 @@ int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
if (bitrate_error < best_bitrate_error)
best_sample_point_error = UINT_MAX;
- can_update_sample_point(btc, sample_point_nominal, tseg / 2,
+ can_update_sample_point(btc, sample_point_reference, tseg / 2,
&tseg1, &tseg2, &sample_point_error);
if (sample_point_error >= best_sample_point_error)
continue;
@@ -129,23 +153,26 @@ int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
}
if (best_bitrate_error) {
- /* Error in one-tenth of a percent */
- v64 = (u64)best_bitrate_error * 1000;
+ /* Error in one-hundredth of a percent */
+ v64 = (u64)best_bitrate_error * 10000;
do_div(v64, bt->bitrate);
bitrate_error = (u32)v64;
+ /* print at least 0.01% if the error is smaller */
+ bitrate_error = max(bitrate_error, 1U);
if (bitrate_error > CAN_CALC_MAX_ERROR) {
NL_SET_ERR_MSG_FMT(extack,
- "bitrate error: %u.%u%% too high",
- bitrate_error / 10, bitrate_error % 10);
+ "bitrate error: %u.%02u%% too high",
+ bitrate_error / 100,
+ bitrate_error % 100);
return -EINVAL;
}
NL_SET_ERR_MSG_FMT(extack,
- "bitrate error: %u.%u%%",
- bitrate_error / 10, bitrate_error % 10);
+ "bitrate error: %u.%02u%%",
+ bitrate_error / 100, bitrate_error % 100);
}
/* real sample point */
- bt->sample_point = can_update_sample_point(btc, sample_point_nominal,
+ bt->sample_point = can_update_sample_point(btc, sample_point_reference,
best_tseg, &tseg1, &tseg2,
NULL);
@@ -173,13 +200,15 @@ int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
const struct can_bittiming *dbt,
- u32 *ctrlmode, u32 ctrlmode_supported)
+ u32 tdc_mask, u32 *ctrlmode, u32 ctrlmode_supported)
{
- if (!tdc_const || !(ctrlmode_supported & CAN_CTRLMODE_TDC_AUTO))
+ u32 tdc_auto = tdc_mask & CAN_CTRLMODE_TDC_AUTO_MASK;
+
+ if (!tdc_const || !(ctrlmode_supported & tdc_auto))
return;
- *ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
+ *ctrlmode &= ~tdc_mask;
/* As specified in ISO 11898-1 section 11.3.3 "Transmitter
* delay compensation" (TDC) is only applicable if data BRP is
@@ -193,6 +222,41 @@ void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
if (sample_point_in_tc < tdc_const->tdco_min)
return;
tdc->tdco = min(sample_point_in_tc, tdc_const->tdco_max);
- *ctrlmode |= CAN_CTRLMODE_TDC_AUTO;
+ *ctrlmode |= tdc_auto;
}
}
+
+int can_calc_pwm(struct net_device *dev, struct netlink_ext_ack *extack)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ const struct can_pwm_const *pwm_const = priv->xl.pwm_const;
+ struct can_pwm *pwm = &priv->xl.pwm;
+ u32 xl_tqmin = can_bit_time_tqmin(&priv->xl.data_bittiming);
+ u32 xl_ns = can_tqmin_to_ns(xl_tqmin, priv->clock.freq);
+ u32 nom_tqmin = can_bit_time_tqmin(&priv->bittiming);
+ int pwm_per_bit_max = xl_tqmin / (pwm_const->pwms_min + pwm_const->pwml_min);
+ int pwm_per_bit;
+ u32 pwm_tqmin;
+
+ /* For 5 MB/s databitrate or greater, xl_ns < CAN_PWM_NS_MAX
+ * giving us a pwm_per_bit of 1 and the loop immediately breaks
+ */
+ for (pwm_per_bit = DIV_ROUND_UP(xl_ns, CAN_PWM_NS_MAX);
+ pwm_per_bit <= pwm_per_bit_max; pwm_per_bit++)
+ if (xl_tqmin % pwm_per_bit == 0)
+ break;
+
+ if (pwm_per_bit > pwm_per_bit_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Can not divide the XL data phase's bit time: %u tqmin into multiple PWM symbols",
+ xl_tqmin);
+ return -EINVAL;
+ }
+
+ pwm_tqmin = xl_tqmin / pwm_per_bit;
+ pwm->pwms = DIV_ROUND_UP_POW2(pwm_tqmin, 4);
+ pwm->pwml = pwm_tqmin - pwm->pwms;
+ pwm->pwmo = nom_tqmin % pwm_tqmin;
+
+ return 0;
+}
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index 6792c14fd7eb..091f30e94c61 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -4,17 +4,17 @@
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
*/
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/workqueue.h>
#include <linux/can.h>
#include <linux/can/can-ml.h>
#include <linux/can/dev.h>
#include <linux/can/skb.h>
#include <linux/gpio/consumer.h>
+#include <linux/if_arp.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
static void can_update_state_error_stats(struct net_device *dev,
enum can_state new_state)
@@ -85,11 +85,52 @@ const char *can_get_state_str(const enum can_state state)
default:
return "<unknown>";
}
-
- return "<unknown>";
}
EXPORT_SYMBOL_GPL(can_get_state_str);
+const char *can_get_ctrlmode_str(u32 ctrlmode)
+{
+ switch (ctrlmode & ~(ctrlmode - 1)) {
+ case 0:
+ return "(none)";
+ case CAN_CTRLMODE_LOOPBACK:
+ return "LOOPBACK";
+ case CAN_CTRLMODE_LISTENONLY:
+ return "LISTEN-ONLY";
+ case CAN_CTRLMODE_3_SAMPLES:
+ return "TRIPLE-SAMPLING";
+ case CAN_CTRLMODE_ONE_SHOT:
+ return "ONE-SHOT";
+ case CAN_CTRLMODE_BERR_REPORTING:
+ return "BERR-REPORTING";
+ case CAN_CTRLMODE_FD:
+ return "FD";
+ case CAN_CTRLMODE_PRESUME_ACK:
+ return "PRESUME-ACK";
+ case CAN_CTRLMODE_FD_NON_ISO:
+ return "FD-NON-ISO";
+ case CAN_CTRLMODE_CC_LEN8_DLC:
+ return "CC-LEN8-DLC";
+ case CAN_CTRLMODE_TDC_AUTO:
+ return "TDC-AUTO";
+ case CAN_CTRLMODE_TDC_MANUAL:
+ return "TDC-MANUAL";
+ case CAN_CTRLMODE_RESTRICTED:
+ return "RESTRICTED";
+ case CAN_CTRLMODE_XL:
+ return "XL";
+ case CAN_CTRLMODE_XL_TDC_AUTO:
+ return "XL-TDC-AUTO";
+ case CAN_CTRLMODE_XL_TDC_MANUAL:
+ return "XL-TDC-MANUAL";
+ case CAN_CTRLMODE_XL_TMS:
+ return "TMS";
+ default:
+ return "<unknown>";
+ }
+}
+EXPORT_SYMBOL_GPL(can_get_ctrlmode_str);
+
static enum can_state can_state_err_to_state(u16 err)
{
if (err < CAN_ERROR_WARNING_THRESHOLD)
@@ -147,13 +188,16 @@ void can_change_state(struct net_device *dev, struct can_frame *cf,
EXPORT_SYMBOL_GPL(can_change_state);
/* CAN device restart for bus-off recovery */
-static void can_restart(struct net_device *dev)
+static int can_restart(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
struct can_frame *cf;
int err;
+ if (!priv->do_set_mode)
+ return -EOPNOTSUPP;
+
if (netif_carrier_ok(dev))
netdev_err(dev, "Attempt to restart for bus-off recovery, but carrier is OK?\n");
@@ -175,10 +219,14 @@ static void can_restart(struct net_device *dev)
if (err) {
netdev_err(dev, "Restart failed, error %pe\n", ERR_PTR(err));
netif_carrier_off(dev);
+
+ return err;
} else {
netdev_dbg(dev, "Restarted\n");
priv->can_stats.restarts++;
}
+
+ return 0;
}
static void can_restart_work(struct work_struct *work)
@@ -203,9 +251,8 @@ int can_restart_now(struct net_device *dev)
return -EBUSY;
cancel_delayed_work_sync(&priv->restart_work);
- can_restart(dev);
- return 0;
+ return can_restart(dev);
}
/* CAN bus-off
@@ -236,6 +283,8 @@ void can_setup(struct net_device *dev)
{
dev->type = ARPHRD_CAN;
dev->mtu = CAN_MTU;
+ dev->min_mtu = CAN_MTU;
+ dev->max_mtu = CAN_MTU;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 10;
@@ -305,72 +354,74 @@ void free_candev(struct net_device *dev)
}
EXPORT_SYMBOL_GPL(free_candev);
-/* changing MTU and control mode for CAN/CANFD devices */
-int can_change_mtu(struct net_device *dev, int new_mtu)
+void can_set_default_mtu(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
- u32 ctrlmode_static = can_get_static_ctrlmode(priv);
- /* Do not allow changing the MTU while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
+ if (priv->ctrlmode & CAN_CTRLMODE_XL) {
+ if (can_is_canxl_dev_mtu(dev->mtu))
+ return;
+ dev->mtu = CANXL_MTU;
+ dev->min_mtu = CANXL_MIN_MTU;
+ dev->max_mtu = CANXL_MAX_MTU;
+ } else if (priv->ctrlmode & CAN_CTRLMODE_FD) {
+ dev->mtu = CANFD_MTU;
+ dev->min_mtu = CANFD_MTU;
+ dev->max_mtu = CANFD_MTU;
+ } else {
+ dev->mtu = CAN_MTU;
+ dev->min_mtu = CAN_MTU;
+ dev->max_mtu = CAN_MTU;
+ }
+}
- /* allow change of MTU according to the CANFD ability of the device */
- switch (new_mtu) {
- case CAN_MTU:
- /* 'CANFD-only' controllers can not switch to CAN_MTU */
- if (ctrlmode_static & CAN_CTRLMODE_FD)
- return -EINVAL;
+/* helper to define static CAN controller features at device creation time */
+int can_set_static_ctrlmode(struct net_device *dev, u32 static_mode)
+{
+ struct can_priv *priv = netdev_priv(dev);
- priv->ctrlmode &= ~CAN_CTRLMODE_FD;
- break;
+ /* alloc_candev() succeeded => netdev_priv() is valid at this point */
+ if (priv->ctrlmode_supported & static_mode) {
+ netdev_warn(dev,
+ "Controller features can not be supported and static at the same time\n");
+ return -EINVAL;
+ }
+ priv->ctrlmode = static_mode;
- case CANFD_MTU:
- /* check for potential CANFD ability */
- if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
- !(ctrlmode_static & CAN_CTRLMODE_FD))
- return -EINVAL;
+ /* override MTU which was set by default in can_setup()? */
+ can_set_default_mtu(dev);
- priv->ctrlmode |= CAN_CTRLMODE_FD;
- break;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(can_set_static_ctrlmode);
- default:
- return -EINVAL;
- }
+/* generic implementation of netdev_ops::ndo_hwtstamp_get for CAN devices
+ * supporting hardware timestamps
+ */
+int can_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg)
+{
+ cfg->tx_type = HWTSTAMP_TX_ON;
+ cfg->rx_filter = HWTSTAMP_FILTER_ALL;
- WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
-EXPORT_SYMBOL_GPL(can_change_mtu);
+EXPORT_SYMBOL(can_hwtstamp_get);
-/* generic implementation of netdev_ops::ndo_eth_ioctl for CAN devices
+/* generic implementation of netdev_ops::ndo_hwtstamp_set for CAN devices
* supporting hardware timestamps
*/
-int can_eth_ioctl_hwts(struct net_device *netdev, struct ifreq *ifr, int cmd)
+int can_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config hwts_cfg = { 0 };
-
- switch (cmd) {
- case SIOCSHWTSTAMP: /* set */
- if (copy_from_user(&hwts_cfg, ifr->ifr_data, sizeof(hwts_cfg)))
- return -EFAULT;
- if (hwts_cfg.tx_type == HWTSTAMP_TX_ON &&
- hwts_cfg.rx_filter == HWTSTAMP_FILTER_ALL)
- return 0;
- return -ERANGE;
-
- case SIOCGHWTSTAMP: /* get */
- hwts_cfg.tx_type = HWTSTAMP_TX_ON;
- hwts_cfg.rx_filter = HWTSTAMP_FILTER_ALL;
- if (copy_to_user(ifr->ifr_data, &hwts_cfg, sizeof(hwts_cfg)))
- return -EFAULT;
+ if (cfg->tx_type == HWTSTAMP_TX_ON &&
+ cfg->rx_filter == HWTSTAMP_FILTER_ALL)
return 0;
-
- default:
- return -EOPNOTSUPP;
- }
+ NL_SET_ERR_MSG_MOD(extack, "Only TX on and RX all packets filter supported");
+ return -ERANGE;
}
-EXPORT_SYMBOL(can_eth_ioctl_hwts);
+EXPORT_SYMBOL(can_hwtstamp_set);
/* generic implementation of ethtool_ops::get_ts_info for CAN devices
* supporting hardware timestamps
@@ -406,8 +457,8 @@ int open_candev(struct net_device *dev)
/* For CAN FD the data bitrate has to be >= the arbitration bitrate */
if ((priv->ctrlmode & CAN_CTRLMODE_FD) &&
- (!priv->data_bittiming.bitrate ||
- priv->data_bittiming.bitrate < priv->bittiming.bitrate)) {
+ (!priv->fd.data_bittiming.bitrate ||
+ priv->fd.data_bittiming.bitrate < priv->bittiming.bitrate)) {
netdev_err(dev, "incorrect/missing data bit-timing\n");
return -EINVAL;
}
@@ -468,7 +519,7 @@ static int can_set_termination(struct net_device *ndev, u16 term)
else
set = 0;
- gpiod_set_value(priv->termination_gpio, set);
+ gpiod_set_value_cansleep(priv->termination_gpio, set);
return 0;
}
@@ -545,16 +596,16 @@ int register_candev(struct net_device *dev)
if (!priv->bitrate_const != !priv->bitrate_const_cnt)
return -EINVAL;
- if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
+ if (!priv->fd.data_bitrate_const != !priv->fd.data_bitrate_const_cnt)
return -EINVAL;
/* We only support either fixed bit rates or bit timing const. */
- if ((priv->bitrate_const || priv->data_bitrate_const) &&
- (priv->bittiming_const || priv->data_bittiming_const))
+ if ((priv->bitrate_const || priv->fd.data_bitrate_const) &&
+ (priv->bittiming_const || priv->fd.data_bittiming_const))
return -EINVAL;
if (!can_bittiming_const_valid(priv->bittiming_const) ||
- !can_bittiming_const_valid(priv->data_bittiming_const))
+ !can_bittiming_const_valid(priv->fd.data_bittiming_const))
return -EINVAL;
if (!priv->termination_const) {
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index 01aacdcda260..d6b0e686fb11 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -2,7 +2,7 @@
/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
* Copyright (C) 2006 Andrey Volkov, Varma Electronics
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
- * Copyright (C) 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ * Copyright (C) 2021-2025 Vincent Mailhol <mailhol@kernel.org>
*/
#include <linux/can/dev.h>
@@ -18,10 +18,14 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
[IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
[IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
[IFLA_CAN_DATA_BITTIMING] = { .len = sizeof(struct can_bittiming) },
- [IFLA_CAN_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) },
+ [IFLA_CAN_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) },
[IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
[IFLA_CAN_TDC] = { .type = NLA_NESTED },
[IFLA_CAN_CTRLMODE_EXT] = { .type = NLA_NESTED },
+ [IFLA_CAN_XL_DATA_BITTIMING] = { .len = sizeof(struct can_bittiming) },
+ [IFLA_CAN_XL_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) },
+ [IFLA_CAN_XL_TDC] = { .type = NLA_NESTED },
+ [IFLA_CAN_XL_PWM] = { .type = NLA_NESTED },
};
static const struct nla_policy can_tdc_policy[IFLA_CAN_TDC_MAX + 1] = {
@@ -36,116 +40,360 @@ static const struct nla_policy can_tdc_policy[IFLA_CAN_TDC_MAX + 1] = {
[IFLA_CAN_TDC_TDCF] = { .type = NLA_U32 },
};
-static int can_validate_bittiming(const struct can_bittiming *bt,
- struct netlink_ext_ack *extack)
+static const struct nla_policy can_pwm_policy[IFLA_CAN_PWM_MAX + 1] = {
+ [IFLA_CAN_PWM_PWMS_MIN] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWMS_MAX] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWML_MIN] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWML_MAX] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWMO_MIN] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWMO_MAX] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWMS] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWML] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWMO] = { .type = NLA_U32 },
+};
+
+static int can_validate_bittiming(struct nlattr *data[],
+ struct netlink_ext_ack *extack,
+ int ifla_can_bittiming)
{
+ struct can_bittiming *bt;
+
+ if (!data[ifla_can_bittiming])
+ return 0;
+
+ static_assert(__alignof__(*bt) <= NLA_ALIGNTO);
+ bt = nla_data(data[ifla_can_bittiming]);
+
/* sample point is in one-tenth of a percent */
if (bt->sample_point >= 1000) {
NL_SET_ERR_MSG(extack, "sample point must be between 0 and 100%");
-
return -EINVAL;
}
return 0;
}
-static int can_validate(struct nlattr *tb[], struct nlattr *data[],
- struct netlink_ext_ack *extack)
+static int can_validate_tdc(struct nlattr *data_tdc,
+ struct netlink_ext_ack *extack, u32 tdc_flags)
{
- bool is_can_fd = false;
+ bool tdc_manual = tdc_flags & CAN_CTRLMODE_TDC_MANUAL_MASK;
+ bool tdc_auto = tdc_flags & CAN_CTRLMODE_TDC_AUTO_MASK;
int err;
- /* Make sure that valid CAN FD configurations always consist of
- * - nominal/arbitration bittiming
- * - data bittiming
- * - control mode with CAN_CTRLMODE_FD set
- * - TDC parameters are coherent (details below)
+ if (tdc_auto && tdc_manual) {
+ NL_SET_ERR_MSG(extack,
+ "TDC manual and auto modes are mutually exclusive");
+ return -EOPNOTSUPP;
+ }
+
+ /* If one of the CAN_CTRLMODE_{,XL}_TDC_* flags is set then TDC
+ * must be set and vice-versa
*/
+ if ((tdc_auto || tdc_manual) && !data_tdc) {
+ NL_SET_ERR_MSG(extack, "TDC parameters are missing");
+ return -EOPNOTSUPP;
+ }
+ if (!(tdc_auto || tdc_manual) && data_tdc) {
+ NL_SET_ERR_MSG(extack, "TDC mode (auto or manual) is missing");
+ return -EOPNOTSUPP;
+ }
- if (!data)
+ /* If providing TDC parameters, at least TDCO is needed. TDCV is
+ * needed if and only if CAN_CTRLMODE_{,XL}_TDC_MANUAL is set
+ */
+ if (data_tdc) {
+ struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
+
+ err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX,
+ data_tdc, can_tdc_policy, extack);
+ if (err)
+ return err;
+
+ if (tb_tdc[IFLA_CAN_TDC_TDCV]) {
+ if (tdc_auto) {
+ NL_SET_ERR_MSG(extack,
+ "TDCV is incompatible with TDC auto mode");
+ return -EOPNOTSUPP;
+ }
+ } else {
+ if (tdc_manual) {
+ NL_SET_ERR_MSG(extack,
+ "TDC manual mode requires TDCV");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (!tb_tdc[IFLA_CAN_TDC_TDCO]) {
+ NL_SET_ERR_MSG(extack, "TDCO is missing");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int can_validate_pwm(struct nlattr *data[],
+ struct netlink_ext_ack *extack, u32 flags)
+{
+ struct nlattr *tb_pwm[IFLA_CAN_PWM_MAX + 1];
+ int err;
+
+ if (!data[IFLA_CAN_XL_PWM])
return 0;
- if (data[IFLA_CAN_CTRLMODE]) {
- struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
- u32 tdc_flags = cm->flags & CAN_CTRLMODE_TDC_MASK;
+ if (!(flags & CAN_CTRLMODE_XL_TMS)) {
+ NL_SET_ERR_MSG(extack, "PWM requires TMS");
+ return -EOPNOTSUPP;
+ }
+
+ err = nla_parse_nested(tb_pwm, IFLA_CAN_PWM_MAX, data[IFLA_CAN_XL_PWM],
+ can_pwm_policy, extack);
+ if (err)
+ return err;
+
+ if (!tb_pwm[IFLA_CAN_PWM_PWMS] != !tb_pwm[IFLA_CAN_PWM_PWML]) {
+ NL_SET_ERR_MSG(extack,
+ "Provide either both PWMS and PWML, or none for automatic calculation");
+ return -EOPNOTSUPP;
+ }
+
+ if (tb_pwm[IFLA_CAN_PWM_PWMO] &&
+ (!tb_pwm[IFLA_CAN_PWM_PWMS] || !tb_pwm[IFLA_CAN_PWM_PWML])) {
+ NL_SET_ERR_MSG(extack, "PWMO requires both PWMS and PWML");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int can_validate_databittiming(struct nlattr *data[],
+ struct netlink_ext_ack *extack,
+ int ifla_can_data_bittiming, u32 flags)
+{
+ struct nlattr *data_tdc;
+ const char *type;
+ u32 tdc_flags;
+ bool is_on;
+ int err;
- is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
+ /* Make sure that valid CAN FD/XL configurations always consist of
+ * - nominal/arbitration bittiming
+ * - data bittiming
+ * - control mode with CAN_CTRLMODE_{FD,XL} set
+ * - TDC parameters are coherent (details in can_validate_tdc())
+ */
+
+ if (ifla_can_data_bittiming == IFLA_CAN_DATA_BITTIMING) {
+ data_tdc = data[IFLA_CAN_TDC];
+ tdc_flags = flags & CAN_CTRLMODE_FD_TDC_MASK;
+ is_on = flags & CAN_CTRLMODE_FD;
+ type = "FD";
+ } else {
+ data_tdc = data[IFLA_CAN_XL_TDC];
+ tdc_flags = flags & CAN_CTRLMODE_XL_TDC_MASK;
+ is_on = flags & CAN_CTRLMODE_XL;
+ type = "XL";
+ }
- /* CAN_CTRLMODE_TDC_{AUTO,MANUAL} are mutually exclusive */
- if (tdc_flags == CAN_CTRLMODE_TDC_MASK)
+ if (is_on) {
+ if (!data[IFLA_CAN_BITTIMING] || !data[ifla_can_data_bittiming]) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Provide both nominal and %s data bittiming",
+ type);
return -EOPNOTSUPP;
- /* If one of the CAN_CTRLMODE_TDC_* flag is set then
- * TDC must be set and vice-versa
- */
- if (!!tdc_flags != !!data[IFLA_CAN_TDC])
+ }
+ } else {
+ if (data[ifla_can_data_bittiming]) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "%s data bittiming requires CAN %s",
+ type, type);
return -EOPNOTSUPP;
- /* If providing TDC parameters, at least TDCO is
- * needed. TDCV is needed if and only if
- * CAN_CTRLMODE_TDC_MANUAL is set
- */
- if (data[IFLA_CAN_TDC]) {
- struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
+ }
+ if (data_tdc) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "%s TDC requires CAN %s",
+ type, type);
+ return -EOPNOTSUPP;
+ }
+ }
- err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX,
- data[IFLA_CAN_TDC],
- can_tdc_policy, extack);
- if (err)
- return err;
+ err = can_validate_bittiming(data, extack, ifla_can_data_bittiming);
+ if (err)
+ return err;
- if (tb_tdc[IFLA_CAN_TDC_TDCV]) {
- if (tdc_flags & CAN_CTRLMODE_TDC_AUTO)
- return -EOPNOTSUPP;
- } else {
- if (tdc_flags & CAN_CTRLMODE_TDC_MANUAL)
- return -EOPNOTSUPP;
- }
+ err = can_validate_tdc(data_tdc, extack, tdc_flags);
+ if (err)
+ return err;
+
+ return 0;
+}
- if (!tb_tdc[IFLA_CAN_TDC_TDCO])
+static int can_validate_xl_flags(struct netlink_ext_ack *extack,
+ u32 masked_flags, u32 mask)
+{
+ if (masked_flags & CAN_CTRLMODE_XL) {
+ if (masked_flags & CAN_CTRLMODE_XL_TMS) {
+ const u32 tms_conflicts_mask = CAN_CTRLMODE_FD |
+ CAN_CTRLMODE_XL_TDC_MASK;
+ u32 tms_conflicts = masked_flags & tms_conflicts_mask;
+
+ if (tms_conflicts) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "TMS and %s are mutually exclusive",
+ can_get_ctrlmode_str(tms_conflicts));
return -EOPNOTSUPP;
+ }
+ }
+ } else {
+ if (mask & CAN_CTRLMODE_XL_TMS) {
+ NL_SET_ERR_MSG(extack, "TMS requires CAN XL");
+ return -EOPNOTSUPP;
}
}
- if (data[IFLA_CAN_BITTIMING]) {
- struct can_bittiming bt;
+ return 0;
+}
- memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
- err = can_validate_bittiming(&bt, extack);
+static int can_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ u32 flags = 0;
+ int err;
+
+ if (!data)
+ return 0;
+
+ if (data[IFLA_CAN_CTRLMODE]) {
+ struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+
+ flags = cm->flags & cm->mask;
+
+ if ((flags & CAN_CTRLMODE_LISTENONLY) &&
+ (flags & CAN_CTRLMODE_RESTRICTED)) {
+ NL_SET_ERR_MSG(extack,
+ "LISTEN-ONLY and RESTRICTED modes are mutually exclusive");
+ return -EOPNOTSUPP;
+ }
+
+ err = can_validate_xl_flags(extack, flags, cm->mask);
if (err)
return err;
}
- if (is_can_fd) {
- if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
- return -EOPNOTSUPP;
+ err = can_validate_bittiming(data, extack, IFLA_CAN_BITTIMING);
+ if (err)
+ return err;
+
+ err = can_validate_databittiming(data, extack,
+ IFLA_CAN_DATA_BITTIMING, flags);
+ if (err)
+ return err;
+
+ err = can_validate_databittiming(data, extack,
+ IFLA_CAN_XL_DATA_BITTIMING, flags);
+ if (err)
+ return err;
+
+ err = can_validate_pwm(data, extack, flags);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int can_ctrlmode_changelink(struct net_device *dev,
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ struct can_ctrlmode *cm;
+ u32 ctrlstatic, maskedflags, deactivated, notsupp, ctrlstatic_missing;
+
+ if (!data[IFLA_CAN_CTRLMODE])
+ return 0;
+
+ /* Do not allow changing controller mode while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+ ctrlstatic = can_get_static_ctrlmode(priv);
+ maskedflags = cm->flags & cm->mask;
+ deactivated = ~cm->flags & cm->mask;
+ notsupp = maskedflags & ~(priv->ctrlmode_supported | ctrlstatic);
+ ctrlstatic_missing = (maskedflags & ctrlstatic) ^ ctrlstatic;
+
+ if (notsupp) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "requested control mode %s not supported",
+ can_get_ctrlmode_str(notsupp));
+ return -EOPNOTSUPP;
}
- if (data[IFLA_CAN_DATA_BITTIMING] || data[IFLA_CAN_TDC]) {
- if (!is_can_fd)
- return -EOPNOTSUPP;
+ /* do not check for static fd-non-iso if 'fd' is disabled */
+ if (!(maskedflags & CAN_CTRLMODE_FD))
+ ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
+
+ if (ctrlstatic_missing) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "missing required %s static control mode",
+ can_get_ctrlmode_str(ctrlstatic_missing));
+ return -EOPNOTSUPP;
}
- if (data[IFLA_CAN_DATA_BITTIMING]) {
- struct can_bittiming bt;
+ /* If FD was active and is not turned off, check for XL conflicts */
+ if (priv->ctrlmode & CAN_CTRLMODE_FD & ~deactivated) {
+ if (maskedflags & CAN_CTRLMODE_XL_TMS) {
+ NL_SET_ERR_MSG(extack,
+ "TMS can not be activated while CAN FD is on");
+ return -EOPNOTSUPP;
+ }
+ }
- memcpy(&bt, nla_data(data[IFLA_CAN_DATA_BITTIMING]), sizeof(bt));
- err = can_validate_bittiming(&bt, extack);
- if (err)
- return err;
+ /* If a top dependency flag is provided, reset all its dependencies */
+ if (cm->mask & CAN_CTRLMODE_FD)
+ priv->ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK;
+ if (cm->mask & CAN_CTRLMODE_XL)
+ priv->ctrlmode &= ~(CAN_CTRLMODE_XL_TDC_MASK |
+ CAN_CTRLMODE_XL_TMS);
+
+ /* clear bits to be modified and copy the flag values */
+ priv->ctrlmode &= ~cm->mask;
+ priv->ctrlmode |= maskedflags;
+
+ /* Wipe potential leftovers from previous CAN FD/XL config */
+ if (!(priv->ctrlmode & CAN_CTRLMODE_FD)) {
+ memset(&priv->fd.data_bittiming, 0,
+ sizeof(priv->fd.data_bittiming));
+ priv->ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK;
+ memset(&priv->fd.tdc, 0, sizeof(priv->fd.tdc));
}
+ if (!(priv->ctrlmode & CAN_CTRLMODE_XL)) {
+ memset(&priv->xl.data_bittiming, 0,
+ sizeof(priv->fd.data_bittiming));
+ priv->ctrlmode &= ~CAN_CTRLMODE_XL_TDC_MASK;
+ memset(&priv->xl.tdc, 0, sizeof(priv->xl.tdc));
+ memset(&priv->xl.pwm, 0, sizeof(priv->xl.pwm));
+ }
+
+ can_set_default_mtu(dev);
return 0;
}
-static int can_tdc_changelink(struct can_priv *priv, const struct nlattr *nla,
+static int can_tdc_changelink(struct data_bittiming_params *dbt_params,
+ const struct nlattr *nla,
struct netlink_ext_ack *extack)
{
struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
struct can_tdc tdc = { 0 };
- const struct can_tdc_const *tdc_const = priv->tdc_const;
+ const struct can_tdc_const *tdc_const = dbt_params->tdc_const;
int err;
- if (!tdc_const || !can_tdc_is_enabled(priv))
+ if (!tdc_const) {
+ NL_SET_ERR_MSG(extack, "The device does not support TDC");
return -EOPNOTSUPP;
+ }
err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX, nla,
can_tdc_policy, extack);
@@ -179,69 +427,181 @@ static int can_tdc_changelink(struct can_priv *priv, const struct nlattr *nla,
tdc.tdcf = tdcf;
}
- priv->tdc = tdc;
+ dbt_params->tdc = tdc;
return 0;
}
-static int can_changelink(struct net_device *dev, struct nlattr *tb[],
- struct nlattr *data[],
- struct netlink_ext_ack *extack)
+static int can_dbt_changelink(struct net_device *dev, struct nlattr *data[],
+ bool fd, struct netlink_ext_ack *extack)
{
+ struct nlattr *data_bittiming, *data_tdc;
struct can_priv *priv = netdev_priv(dev);
- u32 tdc_mask = 0;
+ struct data_bittiming_params *dbt_params;
+ struct can_bittiming dbt;
+ bool need_tdc_calc = false;
+ u32 tdc_mask;
int err;
- /* We need synchronization with dev->stop() */
- ASSERT_RTNL();
+ if (fd) {
+ data_bittiming = data[IFLA_CAN_DATA_BITTIMING];
+ data_tdc = data[IFLA_CAN_TDC];
+ dbt_params = &priv->fd;
+ tdc_mask = CAN_CTRLMODE_FD_TDC_MASK;
+ } else {
+ data_bittiming = data[IFLA_CAN_XL_DATA_BITTIMING];
+ data_tdc = data[IFLA_CAN_XL_TDC];
+ dbt_params = &priv->xl;
+ tdc_mask = CAN_CTRLMODE_XL_TDC_MASK;
+ }
+
+ if (!data_bittiming)
+ return 0;
+
+ /* Do not allow changing bittiming while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* Calculate bittiming parameters based on data_bittiming_const
+ * if set, otherwise pass bitrate directly via do_set_bitrate().
+ * Bail out if neither is given.
+ */
+ if (!dbt_params->data_bittiming_const && !dbt_params->do_set_data_bittiming &&
+ !dbt_params->data_bitrate_const)
+ return -EOPNOTSUPP;
+
+ memcpy(&dbt, nla_data(data_bittiming), sizeof(dbt));
+ err = can_get_bittiming(dev, &dbt, dbt_params->data_bittiming_const,
+ dbt_params->data_bitrate_const,
+ dbt_params->data_bitrate_const_cnt, extack);
+ if (err)
+ return err;
+
+ if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "CAN data bitrate %u bps surpasses transceiver capabilities of %u bps",
+ dbt.bitrate, priv->bitrate_max);
+ return -EINVAL;
+ }
+ memset(&dbt_params->tdc, 0, sizeof(dbt_params->tdc));
if (data[IFLA_CAN_CTRLMODE]) {
- struct can_ctrlmode *cm;
- u32 ctrlstatic;
- u32 maskedflags;
+ struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
- /* Do not allow changing controller mode while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
- cm = nla_data(data[IFLA_CAN_CTRLMODE]);
- ctrlstatic = can_get_static_ctrlmode(priv);
- maskedflags = cm->flags & cm->mask;
+ if (fd || !(priv->ctrlmode & CAN_CTRLMODE_XL_TMS))
+ need_tdc_calc = !(cm->mask & tdc_mask);
+ }
+ if (data_tdc) {
+ /* TDC parameters are provided: use them */
+ err = can_tdc_changelink(dbt_params, data_tdc, extack);
+ if (err) {
+ priv->ctrlmode &= ~tdc_mask;
+ return err;
+ }
+ } else if (need_tdc_calc) {
+ /* Neither of TDC parameters nor TDC flags are provided:
+ * do calculation
+ */
+ can_calc_tdco(&dbt_params->tdc, dbt_params->tdc_const, &dbt,
+ tdc_mask, &priv->ctrlmode, priv->ctrlmode_supported);
+ } /* else: both CAN_CTRLMODE_{,XL}_TDC_{AUTO,MANUAL} are explicitly
+ * turned off. TDC is disabled: do nothing
+ */
- /* check whether provided bits are allowed to be passed */
- if (maskedflags & ~(priv->ctrlmode_supported | ctrlstatic))
- return -EOPNOTSUPP;
+ memcpy(&dbt_params->data_bittiming, &dbt, sizeof(dbt));
- /* do not check for static fd-non-iso if 'fd' is disabled */
- if (!(maskedflags & CAN_CTRLMODE_FD))
- ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
+ if (dbt_params->do_set_data_bittiming) {
+ /* Finally, set the bit-timing registers */
+ err = dbt_params->do_set_data_bittiming(dev);
+ if (err)
+ return err;
+ }
- /* make sure static options are provided by configuration */
- if ((maskedflags & ctrlstatic) != ctrlstatic)
- return -EOPNOTSUPP;
+ return 0;
+}
- /* clear bits to be modified and copy the flag values */
- priv->ctrlmode &= ~cm->mask;
- priv->ctrlmode |= maskedflags;
+static int can_pwm_changelink(struct net_device *dev,
+ const struct nlattr *pwm_nla,
+ struct netlink_ext_ack *extack)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ const struct can_pwm_const *pwm_const = priv->xl.pwm_const;
+ struct nlattr *tb_pwm[IFLA_CAN_PWM_MAX + 1];
+ struct can_pwm pwm = { 0 };
+ int err;
- /* CAN_CTRLMODE_FD can only be set when driver supports FD */
- if (priv->ctrlmode & CAN_CTRLMODE_FD) {
- dev->mtu = CANFD_MTU;
- } else {
- dev->mtu = CAN_MTU;
- memset(&priv->data_bittiming, 0,
- sizeof(priv->data_bittiming));
- priv->ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
- memset(&priv->tdc, 0, sizeof(priv->tdc));
+ if (!(priv->ctrlmode & CAN_CTRLMODE_XL_TMS))
+ return 0;
+
+ if (!pwm_const) {
+ NL_SET_ERR_MSG(extack, "The device does not support PWM");
+ return -EOPNOTSUPP;
+ }
+
+ if (!pwm_nla)
+ return can_calc_pwm(dev, extack);
+
+ err = nla_parse_nested(tb_pwm, IFLA_CAN_PWM_MAX, pwm_nla,
+ can_pwm_policy, extack);
+ if (err)
+ return err;
+
+ if (tb_pwm[IFLA_CAN_PWM_PWMS]) {
+ pwm.pwms = nla_get_u32(tb_pwm[IFLA_CAN_PWM_PWMS]);
+ if (pwm.pwms < pwm_const->pwms_min ||
+ pwm.pwms > pwm_const->pwms_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWMS: %u tqmin is out of range: %u...%u",
+ pwm.pwms, pwm_const->pwms_min,
+ pwm_const->pwms_max);
+ return -EINVAL;
}
+ }
- tdc_mask = cm->mask & CAN_CTRLMODE_TDC_MASK;
- /* CAN_CTRLMODE_TDC_{AUTO,MANUAL} are mutually
- * exclusive: make sure to turn the other one off
- */
- if (tdc_mask)
- priv->ctrlmode &= cm->flags | ~CAN_CTRLMODE_TDC_MASK;
+ if (tb_pwm[IFLA_CAN_PWM_PWML]) {
+ pwm.pwml = nla_get_u32(tb_pwm[IFLA_CAN_PWM_PWML]);
+ if (pwm.pwml < pwm_const->pwml_min ||
+ pwm.pwml > pwm_const->pwml_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWML: %u tqmin is out of range: %u...%u",
+ pwm.pwml, pwm_const->pwml_min,
+ pwm_const->pwml_max);
+ return -EINVAL;
+ }
+ }
+
+ if (tb_pwm[IFLA_CAN_PWM_PWMO]) {
+ pwm.pwmo = nla_get_u32(tb_pwm[IFLA_CAN_PWM_PWMO]);
+ if (pwm.pwmo < pwm_const->pwmo_min ||
+ pwm.pwmo > pwm_const->pwmo_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWMO: %u tqmin is out of range: %u...%u",
+ pwm.pwmo, pwm_const->pwmo_min,
+ pwm_const->pwmo_max);
+ return -EINVAL;
+ }
}
+ err = can_validate_pwm_bittiming(dev, &pwm, extack);
+ if (err)
+ return err;
+
+ priv->xl.pwm = pwm;
+ return 0;
+}
+
+static int can_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ int err;
+
+ /* We need synchronization with dev->stop() */
+ ASSERT_RTNL();
+
+ can_ctrlmode_changelink(dev, data, extack);
+
if (data[IFLA_CAN_BITTIMING]) {
struct can_bittiming bt;
@@ -285,13 +645,27 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
}
if (data[IFLA_CAN_RESTART_MS]) {
+ unsigned int restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
+
+ if (restart_ms != 0 && !priv->do_set_mode) {
+ NL_SET_ERR_MSG(extack,
+ "Device doesn't support restart from Bus Off");
+ return -EOPNOTSUPP;
+ }
+
/* Do not allow changing restart delay while running */
if (dev->flags & IFF_UP)
return -EBUSY;
- priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
+ priv->restart_ms = restart_ms;
}
if (data[IFLA_CAN_RESTART]) {
+ if (!priv->do_set_mode) {
+ NL_SET_ERR_MSG(extack,
+ "Device doesn't support restart from Bus Off");
+ return -EOPNOTSUPP;
+ }
+
/* Do not allow a restart while not running */
if (!(dev->flags & IFF_UP))
return -EINVAL;
@@ -300,75 +674,29 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
return err;
}
- if (data[IFLA_CAN_DATA_BITTIMING]) {
- struct can_bittiming dbt;
-
- /* Do not allow changing bittiming while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- /* Calculate bittiming parameters based on
- * data_bittiming_const if set, otherwise pass bitrate
- * directly via do_set_bitrate(). Bail out if neither
- * is given.
- */
- if (!priv->data_bittiming_const && !priv->do_set_data_bittiming &&
- !priv->data_bitrate_const)
- return -EOPNOTSUPP;
-
- memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
- sizeof(dbt));
- err = can_get_bittiming(dev, &dbt,
- priv->data_bittiming_const,
- priv->data_bitrate_const,
- priv->data_bitrate_const_cnt,
- extack);
- if (err)
- return err;
-
- if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
- NL_SET_ERR_MSG_FMT(extack,
- "CANFD data bitrate %u bps surpasses transceiver capabilities of %u bps",
- dbt.bitrate, priv->bitrate_max);
- return -EINVAL;
- }
+ /* CAN FD */
+ err = can_dbt_changelink(dev, data, true, extack);
+ if (err)
+ return err;
- memset(&priv->tdc, 0, sizeof(priv->tdc));
- if (data[IFLA_CAN_TDC]) {
- /* TDC parameters are provided: use them */
- err = can_tdc_changelink(priv, data[IFLA_CAN_TDC],
- extack);
- if (err) {
- priv->ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
- return err;
- }
- } else if (!tdc_mask) {
- /* Neither of TDC parameters nor TDC flags are
- * provided: do calculation
- */
- can_calc_tdco(&priv->tdc, priv->tdc_const, &dbt,
- &priv->ctrlmode, priv->ctrlmode_supported);
- } /* else: both CAN_CTRLMODE_TDC_{AUTO,MANUAL} are explicitly
- * turned off. TDC is disabled: do nothing
- */
-
- memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
-
- if (priv->do_set_data_bittiming) {
- /* Finally, set the bit-timing registers */
- err = priv->do_set_data_bittiming(dev);
- if (err)
- return err;
- }
- }
+ /* CAN XL */
+ err = can_dbt_changelink(dev, data, false, extack);
+ if (err)
+ return err;
+ err = can_pwm_changelink(dev, data[IFLA_CAN_XL_PWM], extack);
+ if (err)
+ return err;
if (data[IFLA_CAN_TERMINATION]) {
const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]);
const unsigned int num_term = priv->termination_const_cnt;
unsigned int i;
- if (!priv->do_set_termination)
+ if (!priv->do_set_termination) {
+ NL_SET_ERR_MSG(extack,
+ "Termination is not configurable on this device");
return -EOPNOTSUPP;
+ }
/* check whether given value is supported by the interface */
for (i = 0; i < num_term; i++) {
@@ -389,44 +717,85 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
return 0;
}
-static size_t can_tdc_get_size(const struct net_device *dev)
+static size_t can_tdc_get_size(struct data_bittiming_params *dbt_params,
+ u32 tdc_flags)
{
- struct can_priv *priv = netdev_priv(dev);
+ bool tdc_manual = tdc_flags & CAN_CTRLMODE_TDC_MANUAL_MASK;
size_t size;
- if (!priv->tdc_const)
+ if (!dbt_params->tdc_const)
return 0;
size = nla_total_size(0); /* nest IFLA_CAN_TDC */
- if (priv->ctrlmode_supported & CAN_CTRLMODE_TDC_MANUAL) {
+ if (tdc_manual) {
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV_MIN */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV_MAX */
}
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MIN */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MAX */
- if (priv->tdc_const->tdcf_max) {
+ if (dbt_params->tdc_const->tdcf_max) {
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MIN */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MAX */
}
- if (can_tdc_is_enabled(priv)) {
- if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL ||
- priv->do_get_auto_tdcv)
+ if (tdc_flags) {
+ if (tdc_manual || dbt_params->do_get_auto_tdcv)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO */
- if (priv->tdc_const->tdcf_max)
+ if (dbt_params->tdc_const->tdcf_max)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF */
}
return size;
}
+static size_t can_data_bittiming_get_size(struct data_bittiming_params *dbt_params,
+ u32 tdc_flags)
+{
+ size_t size = 0;
+
+ if (dbt_params->data_bittiming.bitrate) /* IFLA_CAN_{,XL}_DATA_BITTIMING */
+ size += nla_total_size(sizeof(dbt_params->data_bittiming));
+ if (dbt_params->data_bittiming_const) /* IFLA_CAN_{,XL}_DATA_BITTIMING_CONST */
+ size += nla_total_size(sizeof(*dbt_params->data_bittiming_const));
+ if (dbt_params->data_bitrate_const) /* IFLA_CAN_{,XL}_DATA_BITRATE_CONST */
+ size += nla_total_size(sizeof(*dbt_params->data_bitrate_const) *
+ dbt_params->data_bitrate_const_cnt);
+ size += can_tdc_get_size(dbt_params, tdc_flags);/* IFLA_CAN_{,XL}_TDC */
+
+ return size;
+}
+
static size_t can_ctrlmode_ext_get_size(void)
{
return nla_total_size(0) + /* nest IFLA_CAN_CTRLMODE_EXT */
nla_total_size(sizeof(u32)); /* IFLA_CAN_CTRLMODE_SUPPORTED */
}
+static size_t can_pwm_get_size(const struct can_pwm_const *pwm_const,
+ bool pwm_on)
+{
+ size_t size;
+
+ if (!pwm_const || !pwm_on)
+ return 0;
+
+ size = nla_total_size(0); /* nest IFLA_CAN_PWM */
+
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMS_MIN */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMS_MAX */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWML_MIN */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWML_MAX */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMO_MIN */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMO_MAX */
+
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMS */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWML */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMO */
+
+ return size;
+}
+
static size_t can_get_size(const struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
@@ -442,10 +811,6 @@ static size_t can_get_size(const struct net_device *dev)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
size += nla_total_size(sizeof(struct can_berr_counter));
- if (priv->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */
- size += nla_total_size(sizeof(struct can_bittiming));
- if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
- size += nla_total_size(sizeof(struct can_bittiming_const));
if (priv->termination_const) {
size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */
size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */
@@ -454,31 +819,76 @@ static size_t can_get_size(const struct net_device *dev)
if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */
size += nla_total_size(sizeof(*priv->bitrate_const) *
priv->bitrate_const_cnt);
- if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
- size += nla_total_size(sizeof(*priv->data_bitrate_const) *
- priv->data_bitrate_const_cnt);
size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */
- size += can_tdc_get_size(dev); /* IFLA_CAN_TDC */
size += can_ctrlmode_ext_get_size(); /* IFLA_CAN_CTRLMODE_EXT */
+ size += can_data_bittiming_get_size(&priv->fd,
+ priv->ctrlmode & CAN_CTRLMODE_FD_TDC_MASK);
+
+ size += can_data_bittiming_get_size(&priv->xl,
+ priv->ctrlmode & CAN_CTRLMODE_XL_TDC_MASK);
+ size += can_pwm_get_size(priv->xl.pwm_const, /* IFLA_CAN_XL_PWM */
+ priv->ctrlmode & CAN_CTRLMODE_XL_TMS);
+
return size;
}
-static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev)
+static int can_bittiming_fill_info(struct sk_buff *skb, int ifla_can_bittiming,
+ struct can_bittiming *bittiming)
+{
+ return bittiming->bitrate != CAN_BITRATE_UNSET &&
+ bittiming->bitrate != CAN_BITRATE_UNKNOWN &&
+ nla_put(skb, ifla_can_bittiming, sizeof(*bittiming), bittiming);
+}
+
+static int can_bittiming_const_fill_info(struct sk_buff *skb,
+ int ifla_can_bittiming_const,
+ const struct can_bittiming_const *bittiming_const)
+{
+ return bittiming_const &&
+ nla_put(skb, ifla_can_bittiming_const,
+ sizeof(*bittiming_const), bittiming_const);
+}
+
+static int can_bitrate_const_fill_info(struct sk_buff *skb,
+ int ifla_can_bitrate_const,
+ const u32 *bitrate_const, unsigned int cnt)
+{
+ return bitrate_const &&
+ nla_put(skb, ifla_can_bitrate_const,
+ sizeof(*bitrate_const) * cnt, bitrate_const);
+}
+
+static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev,
+ int ifla_can_tdc)
{
- struct nlattr *nest;
struct can_priv *priv = netdev_priv(dev);
- struct can_tdc *tdc = &priv->tdc;
- const struct can_tdc_const *tdc_const = priv->tdc_const;
+ struct data_bittiming_params *dbt_params;
+ const struct can_tdc_const *tdc_const;
+ struct can_tdc *tdc;
+ struct nlattr *nest;
+ bool tdc_is_enabled, tdc_manual;
+
+ if (ifla_can_tdc == IFLA_CAN_TDC) {
+ dbt_params = &priv->fd;
+ tdc_is_enabled = can_fd_tdc_is_enabled(priv);
+ tdc_manual = priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL;
+ } else {
+ dbt_params = &priv->xl;
+ tdc_is_enabled = can_xl_tdc_is_enabled(priv);
+ tdc_manual = priv->ctrlmode & CAN_CTRLMODE_XL_TDC_MANUAL;
+ }
+ tdc_const = dbt_params->tdc_const;
+ tdc = &dbt_params->tdc;
if (!tdc_const)
return 0;
- nest = nla_nest_start(skb, IFLA_CAN_TDC);
+ nest = nla_nest_start(skb, ifla_can_tdc);
if (!nest)
return -EMSGSIZE;
- if (priv->ctrlmode_supported & CAN_CTRLMODE_TDC_MANUAL &&
+ if (tdc_manual &&
(nla_put_u32(skb, IFLA_CAN_TDC_TDCV_MIN, tdc_const->tdcv_min) ||
nla_put_u32(skb, IFLA_CAN_TDC_TDCV_MAX, tdc_const->tdcv_max)))
goto err_cancel;
@@ -490,15 +900,15 @@ static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u32(skb, IFLA_CAN_TDC_TDCF_MAX, tdc_const->tdcf_max)))
goto err_cancel;
- if (can_tdc_is_enabled(priv)) {
+ if (tdc_is_enabled) {
u32 tdcv;
int err = -EINVAL;
- if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL) {
+ if (tdc_manual) {
tdcv = tdc->tdcv;
err = 0;
- } else if (priv->do_get_auto_tdcv) {
- err = priv->do_get_auto_tdcv(dev, &tdcv);
+ } else if (dbt_params->do_get_auto_tdcv) {
+ err = dbt_params->do_get_auto_tdcv(dev, &tdcv);
}
if (!err && nla_put_u32(skb, IFLA_CAN_TDC_TDCV, tdcv))
goto err_cancel;
@@ -517,6 +927,42 @@ err_cancel:
return -EMSGSIZE;
}
+static int can_pwm_fill_info(struct sk_buff *skb, const struct can_priv *priv)
+{
+ const struct can_pwm_const *pwm_const = priv->xl.pwm_const;
+ const struct can_pwm *pwm = &priv->xl.pwm;
+ struct nlattr *nest;
+
+ if (!pwm_const)
+ return 0;
+
+ nest = nla_nest_start(skb, IFLA_CAN_XL_PWM);
+ if (!nest)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, IFLA_CAN_PWM_PWMS_MIN, pwm_const->pwms_min) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWMS_MAX, pwm_const->pwms_max) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWML_MIN, pwm_const->pwml_min) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWML_MAX, pwm_const->pwml_max) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWMO_MIN, pwm_const->pwmo_min) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWMO_MAX, pwm_const->pwmo_max))
+ goto err_cancel;
+
+ if (priv->ctrlmode & CAN_CTRLMODE_XL_TMS) {
+ if (nla_put_u32(skb, IFLA_CAN_PWM_PWMS, pwm->pwms) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWML, pwm->pwml) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWMO, pwm->pwmo))
+ goto err_cancel;
+ }
+
+ nla_nest_end(skb, nest);
+ return 0;
+
+err_cancel:
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+}
+
static int can_ctrlmode_ext_fill_info(struct sk_buff *skb,
const struct can_priv *priv)
{
@@ -546,14 +992,11 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (priv->do_get_state)
priv->do_get_state(dev, &state);
- if ((priv->bittiming.bitrate != CAN_BITRATE_UNSET &&
- priv->bittiming.bitrate != CAN_BITRATE_UNKNOWN &&
- nla_put(skb, IFLA_CAN_BITTIMING,
- sizeof(priv->bittiming), &priv->bittiming)) ||
+ if (can_bittiming_fill_info(skb, IFLA_CAN_BITTIMING,
+ &priv->bittiming) ||
- (priv->bittiming_const &&
- nla_put(skb, IFLA_CAN_BITTIMING_CONST,
- sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
+ can_bittiming_const_fill_info(skb, IFLA_CAN_BITTIMING_CONST,
+ priv->bittiming_const) ||
nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) ||
nla_put_u32(skb, IFLA_CAN_STATE, state) ||
@@ -564,14 +1007,11 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
!priv->do_get_berr_counter(dev, &bec) &&
nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
- (priv->data_bittiming.bitrate &&
- nla_put(skb, IFLA_CAN_DATA_BITTIMING,
- sizeof(priv->data_bittiming), &priv->data_bittiming)) ||
+ can_bittiming_fill_info(skb, IFLA_CAN_DATA_BITTIMING,
+ &priv->fd.data_bittiming) ||
- (priv->data_bittiming_const &&
- nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
- sizeof(*priv->data_bittiming_const),
- priv->data_bittiming_const)) ||
+ can_bittiming_const_fill_info(skb, IFLA_CAN_DATA_BITTIMING_CONST,
+ priv->fd.data_bittiming_const) ||
(priv->termination_const &&
(nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) ||
@@ -580,27 +1020,36 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
priv->termination_const_cnt,
priv->termination_const))) ||
- (priv->bitrate_const &&
- nla_put(skb, IFLA_CAN_BITRATE_CONST,
- sizeof(*priv->bitrate_const) *
- priv->bitrate_const_cnt,
- priv->bitrate_const)) ||
+ can_bitrate_const_fill_info(skb, IFLA_CAN_BITRATE_CONST,
+ priv->bitrate_const,
+ priv->bitrate_const_cnt) ||
- (priv->data_bitrate_const &&
- nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
- sizeof(*priv->data_bitrate_const) *
- priv->data_bitrate_const_cnt,
- priv->data_bitrate_const)) ||
+ can_bitrate_const_fill_info(skb, IFLA_CAN_DATA_BITRATE_CONST,
+ priv->fd.data_bitrate_const,
+ priv->fd.data_bitrate_const_cnt) ||
(nla_put(skb, IFLA_CAN_BITRATE_MAX,
sizeof(priv->bitrate_max),
&priv->bitrate_max)) ||
- can_tdc_fill_info(skb, dev) ||
+ can_tdc_fill_info(skb, dev, IFLA_CAN_TDC) ||
- can_ctrlmode_ext_fill_info(skb, priv)
- )
+ can_ctrlmode_ext_fill_info(skb, priv) ||
+ can_bittiming_fill_info(skb, IFLA_CAN_XL_DATA_BITTIMING,
+ &priv->xl.data_bittiming) ||
+
+ can_bittiming_const_fill_info(skb, IFLA_CAN_XL_DATA_BITTIMING_CONST,
+ priv->xl.data_bittiming_const) ||
+
+ can_bitrate_const_fill_info(skb, IFLA_CAN_XL_DATA_BITRATE_CONST,
+ priv->xl.data_bitrate_const,
+ priv->xl.data_bitrate_const_cnt) ||
+
+ can_tdc_fill_info(skb, dev, IFLA_CAN_XL_TDC) ||
+
+ can_pwm_fill_info(skb, priv)
+ )
return -EMSGSIZE;
return 0;
@@ -624,8 +1073,8 @@ nla_put_failure:
return -EMSGSIZE;
}
-static int can_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int can_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
diff --git a/drivers/net/can/dummy_can.c b/drivers/net/can/dummy_can.c
new file mode 100644
index 000000000000..41953655e3d3
--- /dev/null
+++ b/drivers/net/can/dummy_can.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2025 Vincent Mailhol <mailhol@kernel.org> */
+
+#include <linux/array_size.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/units.h>
+#include <linux/string_choices.h>
+
+#include <linux/can.h>
+#include <linux/can/bittiming.h>
+#include <linux/can/dev.h>
+#include <linux/can/skb.h>
+
+struct dummy_can {
+ struct can_priv can;
+ struct net_device *dev;
+};
+
+static struct dummy_can *dummy_can;
+
+static const struct can_bittiming_const dummy_can_bittiming_const = {
+ .name = "dummy_can CC",
+ .tseg1_min = 2,
+ .tseg1_max = 256,
+ .tseg2_min = 2,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 512,
+ .brp_inc = 1
+};
+
+static const struct can_bittiming_const dummy_can_fd_databittiming_const = {
+ .name = "dummy_can FD",
+ .tseg1_min = 2,
+ .tseg1_max = 256,
+ .tseg2_min = 2,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 512,
+ .brp_inc = 1
+};
+
+static const struct can_tdc_const dummy_can_fd_tdc_const = {
+ .tdcv_min = 0,
+ .tdcv_max = 0, /* Manual mode not supported. */
+ .tdco_min = 0,
+ .tdco_max = 127,
+ .tdcf_min = 0,
+ .tdcf_max = 127
+};
+
+static const struct can_bittiming_const dummy_can_xl_databittiming_const = {
+ .name = "dummy_can XL",
+ .tseg1_min = 2,
+ .tseg1_max = 256,
+ .tseg2_min = 2,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 512,
+ .brp_inc = 1
+};
+
+static const struct can_tdc_const dummy_can_xl_tdc_const = {
+ .tdcv_min = 0,
+ .tdcv_max = 0, /* Manual mode not supported. */
+ .tdco_min = 0,
+ .tdco_max = 127,
+ .tdcf_min = 0,
+ .tdcf_max = 127
+};
+
+static const struct can_pwm_const dummy_can_pwm_const = {
+ .pwms_min = 1,
+ .pwms_max = 8,
+ .pwml_min = 2,
+ .pwml_max = 24,
+ .pwmo_min = 0,
+ .pwmo_max = 16,
+};
+
+static void dummy_can_print_bittiming(struct net_device *dev,
+ struct can_bittiming *bt)
+{
+ netdev_dbg(dev, "\tbitrate: %u\n", bt->bitrate);
+ netdev_dbg(dev, "\tsample_point: %u\n", bt->sample_point);
+ netdev_dbg(dev, "\ttq: %u\n", bt->tq);
+ netdev_dbg(dev, "\tprop_seg: %u\n", bt->prop_seg);
+ netdev_dbg(dev, "\tphase_seg1: %u\n", bt->phase_seg1);
+ netdev_dbg(dev, "\tphase_seg2: %u\n", bt->phase_seg2);
+ netdev_dbg(dev, "\tsjw: %u\n", bt->sjw);
+ netdev_dbg(dev, "\tbrp: %u\n", bt->brp);
+}
+
+static void dummy_can_print_tdc(struct net_device *dev, struct can_tdc *tdc)
+{
+ netdev_dbg(dev, "\t\ttdcv: %u\n", tdc->tdcv);
+ netdev_dbg(dev, "\t\ttdco: %u\n", tdc->tdco);
+ netdev_dbg(dev, "\t\ttdcf: %u\n", tdc->tdcf);
+}
+
+static void dummy_can_print_pwm(struct net_device *dev, struct can_pwm *pwm,
+ struct can_bittiming *dbt)
+{
+ netdev_dbg(dev, "\t\tpwms: %u\n", pwm->pwms);
+ netdev_dbg(dev, "\t\tpwml: %u\n", pwm->pwml);
+ netdev_dbg(dev, "\t\tpwmo: %u\n", pwm->pwmo);
+}
+
+static void dummy_can_print_ctrlmode(struct net_device *dev)
+{
+ struct dummy_can *priv = netdev_priv(dev);
+ struct can_priv *can_priv = &priv->can;
+ unsigned long supported = can_priv->ctrlmode_supported;
+ u32 enabled = can_priv->ctrlmode;
+
+ netdev_dbg(dev, "Control modes:\n");
+ netdev_dbg(dev, "\tsupported: 0x%08x\n", (u32)supported);
+ netdev_dbg(dev, "\tenabled: 0x%08x\n", enabled);
+
+ if (supported) {
+ int idx;
+
+ netdev_dbg(dev, "\tlist:");
+ for_each_set_bit(idx, &supported, BITS_PER_TYPE(u32))
+ netdev_dbg(dev, "\t\t%s: %s\n",
+ can_get_ctrlmode_str(BIT(idx)),
+ enabled & BIT(idx) ? "on" : "off");
+ }
+}
+
+static void dummy_can_print_bittiming_info(struct net_device *dev)
+{
+ struct dummy_can *priv = netdev_priv(dev);
+ struct can_priv *can_priv = &priv->can;
+
+ netdev_dbg(dev, "Clock frequency: %u\n", can_priv->clock.freq);
+ netdev_dbg(dev, "Maximum bitrate: %u\n", can_priv->bitrate_max);
+ netdev_dbg(dev, "MTU: %u\n", dev->mtu);
+ netdev_dbg(dev, "\n");
+
+ dummy_can_print_ctrlmode(dev);
+ netdev_dbg(dev, "\n");
+
+ netdev_dbg(dev, "Classical CAN nominal bittiming:\n");
+ dummy_can_print_bittiming(dev, &can_priv->bittiming);
+ netdev_dbg(dev, "\n");
+
+ if (can_priv->ctrlmode & CAN_CTRLMODE_FD) {
+ netdev_dbg(dev, "CAN FD databittiming:\n");
+ dummy_can_print_bittiming(dev, &can_priv->fd.data_bittiming);
+ if (can_fd_tdc_is_enabled(can_priv)) {
+ netdev_dbg(dev, "\tCAN FD TDC:\n");
+ dummy_can_print_tdc(dev, &can_priv->fd.tdc);
+ }
+ }
+ netdev_dbg(dev, "\n");
+
+ if (can_priv->ctrlmode & CAN_CTRLMODE_XL) {
+ netdev_dbg(dev, "CAN XL databittiming:\n");
+ dummy_can_print_bittiming(dev, &can_priv->xl.data_bittiming);
+ if (can_xl_tdc_is_enabled(can_priv)) {
+ netdev_dbg(dev, "\tCAN XL TDC:\n");
+ dummy_can_print_tdc(dev, &can_priv->xl.tdc);
+ }
+ if (can_priv->ctrlmode & CAN_CTRLMODE_XL_TMS) {
+ netdev_dbg(dev, "\tCAN XL PWM:\n");
+ dummy_can_print_pwm(dev, &can_priv->xl.pwm,
+ &can_priv->xl.data_bittiming);
+ }
+ }
+ netdev_dbg(dev, "\n");
+}
+
+static int dummy_can_netdev_open(struct net_device *dev)
+{
+ int ret;
+ struct can_priv *priv = netdev_priv(dev);
+
+ dummy_can_print_bittiming_info(dev);
+ netdev_dbg(dev, "error-signalling is %s\n",
+ str_enabled_disabled(!can_dev_in_xl_only_mode(priv)));
+
+ ret = open_candev(dev);
+ if (ret)
+ return ret;
+ netif_start_queue(dev);
+ netdev_dbg(dev, "dummy-can is up\n");
+
+ return 0;
+}
+
+static int dummy_can_netdev_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ close_candev(dev);
+ netdev_dbg(dev, "dummy-can is down\n");
+
+ return 0;
+}
+
+static netdev_tx_t dummy_can_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ if (can_dev_dropped_skb(dev, skb))
+ return NETDEV_TX_OK;
+
+ can_put_echo_skb(skb, dev, 0, 0);
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += can_get_echo_skb(dev, 0, NULL);
+
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops dummy_can_netdev_ops = {
+ .ndo_open = dummy_can_netdev_open,
+ .ndo_stop = dummy_can_netdev_close,
+ .ndo_start_xmit = dummy_can_start_xmit,
+};
+
+static const struct ethtool_ops dummy_can_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static int __init dummy_can_init(void)
+{
+ struct net_device *dev;
+ struct dummy_can *priv;
+ int ret;
+
+ dev = alloc_candev(sizeof(*priv), 1);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->netdev_ops = &dummy_can_netdev_ops;
+ dev->ethtool_ops = &dummy_can_ethtool_ops;
+ priv = netdev_priv(dev);
+ priv->can.bittiming_const = &dummy_can_bittiming_const;
+ priv->can.bitrate_max = 20 * MEGA /* BPS */;
+ priv->can.clock.freq = 160 * MEGA /* Hz */;
+ priv->can.fd.data_bittiming_const = &dummy_can_fd_databittiming_const;
+ priv->can.fd.tdc_const = &dummy_can_fd_tdc_const;
+ priv->can.xl.data_bittiming_const = &dummy_can_xl_databittiming_const;
+ priv->can.xl.tdc_const = &dummy_can_xl_tdc_const;
+ priv->can.xl.pwm_const = &dummy_can_pwm_const;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_FD | CAN_CTRLMODE_TDC_AUTO |
+ CAN_CTRLMODE_RESTRICTED | CAN_CTRLMODE_XL |
+ CAN_CTRLMODE_XL_TDC_AUTO | CAN_CTRLMODE_XL_TMS;
+ priv->dev = dev;
+
+ ret = register_candev(priv->dev);
+ if (ret) {
+ free_candev(priv->dev);
+ return ret;
+ }
+
+ dummy_can = priv;
+ netdev_dbg(dev, "dummy-can ready\n");
+
+ return 0;
+}
+
+static void __exit dummy_can_exit(void)
+{
+ struct net_device *dev = dummy_can->dev;
+
+ netdev_dbg(dev, "dummy-can bye bye\n");
+ unregister_candev(dev);
+ free_candev(dev);
+}
+
+module_init(dummy_can_init);
+module_exit(dummy_can_exit);
+
+MODULE_DESCRIPTION("A dummy CAN driver, mainly to test the netlink interface");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vincent Mailhol <mailhol@kernel.org>");
diff --git a/drivers/net/can/esd/esd_402_pci-core.c b/drivers/net/can/esd/esd_402_pci-core.c
index 5d6d2828cd04..c826f00c551b 100644
--- a/drivers/net/can/esd/esd_402_pci-core.c
+++ b/drivers/net/can/esd/esd_402_pci-core.c
@@ -86,8 +86,8 @@ static const struct net_device_ops pci402_acc_netdev_ops = {
.ndo_open = acc_open,
.ndo_stop = acc_close,
.ndo_start_xmit = acc_start_xmit,
- .ndo_change_mtu = can_change_mtu,
- .ndo_eth_ioctl = can_eth_ioctl_hwts,
+ .ndo_hwtstamp_get = can_hwtstamp_get,
+ .ndo_hwtstamp_set = can_hwtstamp_set,
};
static const struct ethtool_ops pci402_acc_ethtool_ops = {
diff --git a/drivers/net/can/esd/esdacc.c b/drivers/net/can/esd/esdacc.c
index c80032bc1a52..73e66f9a3781 100644
--- a/drivers/net/can/esd/esdacc.c
+++ b/drivers/net/can/esd/esdacc.c
@@ -254,7 +254,7 @@ netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev)
u32 acc_id;
u32 acc_dlc;
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
/* Access core->tx_fifo_tail only once because it may be changed
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index ac1a860986df..f5d22c61503f 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -26,6 +26,7 @@
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/can/platform/flexcan.h>
+#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/regmap.h>
@@ -386,6 +387,16 @@ static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = {
FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
+static const struct flexcan_devtype_data nxp_s32g2_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_FD |
+ FLEXCAN_QUIRK_SUPPORT_ECC | FLEXCAN_QUIRK_NR_IRQ_3 |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR |
+ FLEXCAN_QUIRK_SECONDARY_MB_IRQ,
+};
+
static const struct can_bittiming_const flexcan_bittiming_const = {
.name = DRV_NAME,
.tseg1_min = 4,
@@ -634,18 +645,22 @@ static void flexcan_clks_disable(const struct flexcan_priv *priv)
static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
{
- if (!priv->reg_xceiver)
- return 0;
+ if (priv->reg_xceiver)
+ return regulator_enable(priv->reg_xceiver);
+ else if (priv->transceiver)
+ return phy_power_on(priv->transceiver);
- return regulator_enable(priv->reg_xceiver);
+ return 0;
}
static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv)
{
- if (!priv->reg_xceiver)
- return 0;
+ if (priv->reg_xceiver)
+ return regulator_disable(priv->reg_xceiver);
+ else if (priv->transceiver)
+ return phy_power_off(priv->transceiver);
- return regulator_disable(priv->reg_xceiver);
+ return 0;
}
static int flexcan_chip_enable(struct flexcan_priv *priv)
@@ -1211,7 +1226,7 @@ static void flexcan_set_bittiming_cbt(const struct net_device *dev)
{
struct flexcan_priv *priv = netdev_priv(dev);
struct can_bittiming *bt = &priv->can.bittiming;
- struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
struct flexcan_regs __iomem *regs = priv->regs;
u32 reg_cbt, reg_fdctrl;
@@ -1762,14 +1777,25 @@ static int flexcan_open(struct net_device *dev)
goto out_free_irq_boff;
}
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ) {
+ err = request_irq(priv->irq_secondary_mb,
+ flexcan_irq, IRQF_SHARED, dev->name, dev);
+ if (err)
+ goto out_free_irq_err;
+ }
+
flexcan_chip_interrupts_enable(dev);
netif_start_queue(dev);
return 0;
+ out_free_irq_err:
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3)
+ free_irq(priv->irq_err, dev);
out_free_irq_boff:
- free_irq(priv->irq_boff, dev);
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3)
+ free_irq(priv->irq_boff, dev);
out_free_irq:
free_irq(dev->irq, dev);
out_can_rx_offload_disable:
@@ -1794,6 +1820,9 @@ static int flexcan_close(struct net_device *dev)
netif_stop_queue(dev);
flexcan_chip_interrupts_disable(dev);
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ)
+ free_irq(priv->irq_secondary_mb, dev);
+
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
free_irq(priv->irq_err, dev);
free_irq(priv->irq_boff, dev);
@@ -1838,7 +1867,6 @@ static const struct net_device_ops flexcan_netdev_ops = {
.ndo_open = flexcan_open,
.ndo_stop = flexcan_close,
.ndo_start_xmit = flexcan_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static int register_flexcandev(struct net_device *dev)
@@ -2041,6 +2069,7 @@ static const struct of_device_id flexcan_of_match[] = {
{ .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, },
{ .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, },
{ .compatible = "fsl,lx2160ar1-flexcan", .data = &fsl_lx2160a_r1_devtype_data, },
+ { .compatible = "nxp,s32g2-flexcan", .data = &nxp_s32g2_devtype_data, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, flexcan_of_match);
@@ -2061,6 +2090,7 @@ static int flexcan_probe(struct platform_device *pdev)
struct net_device *dev;
struct flexcan_priv *priv;
struct regulator *reg_xceiver;
+ struct phy *transceiver;
struct clk *clk_ipg = NULL, *clk_per = NULL;
struct flexcan_regs __iomem *regs;
struct flexcan_platform_data *pdata;
@@ -2076,6 +2106,11 @@ static int flexcan_probe(struct platform_device *pdev)
else if (IS_ERR(reg_xceiver))
return PTR_ERR(reg_xceiver);
+ transceiver = devm_phy_optional_get(&pdev->dev, NULL);
+ if (IS_ERR(transceiver))
+ return dev_err_probe(&pdev->dev, PTR_ERR(transceiver),
+ "failed to get phy\n");
+
if (pdev->dev.of_node) {
of_property_read_u32(pdev->dev.of_node,
"clock-frequency", &clock_freq);
@@ -2173,6 +2208,10 @@ static int flexcan_probe(struct platform_device *pdev)
priv->clk_per = clk_per;
priv->clk_src = clk_src;
priv->reg_xceiver = reg_xceiver;
+ priv->transceiver = transceiver;
+
+ if (transceiver)
+ priv->can.bitrate_max = transceiver->attrs.max_link_rate;
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
priv->irq_boff = platform_get_irq(pdev, 1);
@@ -2187,11 +2226,19 @@ static int flexcan_probe(struct platform_device *pdev)
}
}
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ) {
+ priv->irq_secondary_mb = platform_get_irq_byname(pdev, "mb-1");
+ if (priv->irq_secondary_mb < 0) {
+ err = priv->irq_secondary_mb;
+ goto failed_platform_get_irq;
+ }
+ }
+
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SUPPORT_FD) {
priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
CAN_CTRLMODE_FD_NON_ISO;
priv->can.bittiming_const = &flexcan_fd_bittiming_const;
- priv->can.data_bittiming_const =
+ priv->can.fd.data_bittiming_const =
&flexcan_fd_data_bittiming_const;
} else {
priv->can.bittiming_const = &flexcan_bittiming_const;
@@ -2260,14 +2307,19 @@ static int __maybe_unused flexcan_suspend(struct device *device)
flexcan_chip_interrupts_disable(dev);
+ err = flexcan_transceiver_disable(priv);
+ if (err)
+ return err;
+
err = pinctrl_pm_select_sleep_state(device);
if (err)
return err;
}
netif_stop_queue(dev);
netif_device_detach(dev);
+
+ priv->can.state = CAN_STATE_SLEEPING;
}
- priv->can.state = CAN_STATE_SLEEPING;
return 0;
}
@@ -2278,7 +2330,6 @@ static int __maybe_unused flexcan_resume(struct device *device)
struct flexcan_priv *priv = netdev_priv(dev);
int err;
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(dev)) {
netif_device_attach(dev);
netif_start_queue(dev);
@@ -2292,12 +2343,20 @@ static int __maybe_unused flexcan_resume(struct device *device)
if (err)
return err;
- err = flexcan_chip_start(dev);
+ err = flexcan_transceiver_enable(priv);
if (err)
return err;
+ err = flexcan_chip_start(dev);
+ if (err) {
+ flexcan_transceiver_disable(priv);
+ return err;
+ }
+
flexcan_chip_interrupts_enable(dev);
}
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
}
return 0;
diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h
index 4933d8c7439e..16692a2502eb 100644
--- a/drivers/net/can/flexcan/flexcan.h
+++ b/drivers/net/can/flexcan/flexcan.h
@@ -70,6 +70,10 @@
#define FLEXCAN_QUIRK_SUPPORT_RX_FIFO BIT(16)
/* Setup stop mode with ATF SCMI protocol to support wakeup */
#define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI BIT(17)
+/* Device has two separate interrupt lines for two mailbox ranges, which
+ * both need to have an interrupt handler registered.
+ */
+#define FLEXCAN_QUIRK_SECONDARY_MB_IRQ BIT(18)
struct flexcan_devtype_data {
u32 quirks; /* quirks needed for different IP cores */
@@ -103,10 +107,12 @@ struct flexcan_priv {
struct clk *clk_per;
struct flexcan_devtype_data devtype_data;
struct regulator *reg_xceiver;
+ struct phy *transceiver;
struct flexcan_stop_mode stm;
int irq_boff;
int irq_err;
+ int irq_secondary_mb;
/* IPC handle when setup stop mode by System Controller firmware(scfw) */
struct imx_sc_ipc *sc_ipc_handle;
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index cdf0ec9fa7f3..3b1b09943436 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -778,7 +778,7 @@ static irqreturn_t grcan_interrupt(int irq, void *dev_id)
*/
if (priv->need_txbug_workaround &&
(sources & (GRCAN_IRQ_TX | GRCAN_IRQ_TXLOSS))) {
- del_timer(&priv->hang_timer);
+ timer_delete(&priv->hang_timer);
}
/* Frame(s) received or transmitted */
@@ -806,7 +806,7 @@ static irqreturn_t grcan_interrupt(int irq, void *dev_id)
*/
static void grcan_running_reset(struct timer_list *t)
{
- struct grcan_priv *priv = from_timer(priv, t, rr_timer);
+ struct grcan_priv *priv = timer_container_of(priv, t, rr_timer);
struct net_device *dev = priv->dev;
struct grcan_registers __iomem *regs = priv->regs;
unsigned long flags;
@@ -817,8 +817,8 @@ static void grcan_running_reset(struct timer_list *t)
spin_lock_irqsave(&priv->lock, flags);
priv->resetting = false;
- del_timer(&priv->hang_timer);
- del_timer(&priv->rr_timer);
+ timer_delete(&priv->hang_timer);
+ timer_delete(&priv->rr_timer);
if (!priv->closing) {
/* Save and reset - config register preserved by grcan_reset */
@@ -897,7 +897,7 @@ static inline void grcan_reset_timer(struct timer_list *timer, __u32 bitrate)
/* Disable channels and schedule a running reset */
static void grcan_initiate_running_reset(struct timer_list *t)
{
- struct grcan_priv *priv = from_timer(priv, t, hang_timer);
+ struct grcan_priv *priv = timer_container_of(priv, t, hang_timer);
struct net_device *dev = priv->dev;
struct grcan_registers __iomem *regs = priv->regs;
unsigned long flags;
@@ -1073,9 +1073,10 @@ static int grcan_open(struct net_device *dev)
if (err)
goto exit_close_candev;
+ napi_enable(&priv->napi);
+
spin_lock_irqsave(&priv->lock, flags);
- napi_enable(&priv->napi);
grcan_start(dev);
if (!(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
netif_start_queue(dev);
@@ -1107,8 +1108,8 @@ static int grcan_close(struct net_device *dev)
priv->closing = true;
if (priv->need_txbug_workaround) {
spin_unlock_irqrestore(&priv->lock, flags);
- del_timer_sync(&priv->hang_timer);
- del_timer_sync(&priv->rr_timer);
+ timer_delete_sync(&priv->hang_timer);
+ timer_delete_sync(&priv->rr_timer);
spin_lock_irqsave(&priv->lock, flags);
}
netif_stop_queue(dev);
@@ -1146,7 +1147,7 @@ static void grcan_transmit_catch_up(struct net_device *dev)
* so prevent a running reset while catching up
*/
if (priv->need_txbug_workaround)
- del_timer(&priv->hang_timer);
+ timer_delete(&priv->hang_timer);
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1560,7 +1561,6 @@ static const struct net_device_ops grcan_netdev_ops = {
.ndo_open = grcan_open,
.ndo_stop = grcan_close,
.ndo_start_xmit = grcan_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops grcan_ethtool_ops = {
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index d32b10900d2f..0f83335e4d07 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -390,36 +390,55 @@ static int ifi_canfd_handle_lec_err(struct net_device *ndev)
return 0;
priv->can.can_stats.bus_error++;
- stats->rx_errors++;
/* Propagate the error condition to the CAN stack. */
skb = alloc_can_err_skb(ndev, &cf);
- if (unlikely(!skb))
- return 0;
/* Read the error counter register and check for new errors. */
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ if (likely(skb))
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
- if (errctr & IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST)
- cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+ if (errctr & IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST) {
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+ }
- if (errctr & IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST)
- cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ if (errctr & IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST) {
+ stats->tx_errors++;
+ if (likely(skb))
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ }
- if (errctr & IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST)
- cf->data[2] |= CAN_ERR_PROT_BIT0;
+ if (errctr & IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST) {
+ stats->tx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ }
- if (errctr & IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST)
- cf->data[2] |= CAN_ERR_PROT_BIT1;
+ if (errctr & IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST) {
+ stats->tx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ }
- if (errctr & IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST)
- cf->data[2] |= CAN_ERR_PROT_STUFF;
+ if (errctr & IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST) {
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ }
- if (errctr & IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST)
- cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ if (errctr & IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST) {
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ }
- if (errctr & IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST)
- cf->data[2] |= CAN_ERR_PROT_FORM;
+ if (errctr & IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST) {
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ }
/* Reset the error counter, ack the IRQ and re-enable the counter. */
writel(IFI_CANFD_ERROR_CTR_ER_RESET, priv->base + IFI_CANFD_ERROR_CTR);
@@ -427,6 +446,9 @@ static int ifi_canfd_handle_lec_err(struct net_device *ndev)
priv->base + IFI_CANFD_INTERRUPT);
writel(IFI_CANFD_ERROR_CTR_ER_ENABLE, priv->base + IFI_CANFD_ERROR_CTR);
+ if (unlikely(!skb))
+ return 0;
+
netif_receive_skb(skb);
return 1;
@@ -647,7 +669,7 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
{
struct ifi_canfd_priv *priv = netdev_priv(ndev);
const struct can_bittiming *bt = &priv->can.bittiming;
- const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ const struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
u16 brp, sjw, tseg1, tseg2, tdc;
/* Configure bit timing */
@@ -922,7 +944,6 @@ static const struct net_device_ops ifi_canfd_netdev_ops = {
.ndo_open = ifi_canfd_open,
.ndo_stop = ifi_canfd_close,
.ndo_start_xmit = ifi_canfd_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops ifi_canfd_ethtool_ops = {
@@ -978,10 +999,10 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
priv->can.clock.freq = readl(addr + IFI_CANFD_CANCLOCK);
- priv->can.bittiming_const = &ifi_canfd_bittiming_const;
- priv->can.data_bittiming_const = &ifi_canfd_bittiming_const;
- priv->can.do_set_mode = ifi_canfd_set_mode;
- priv->can.do_get_berr_counter = ifi_canfd_get_berr_counter;
+ priv->can.bittiming_const = &ifi_canfd_bittiming_const;
+ priv->can.fd.data_bittiming_const = &ifi_canfd_bittiming_const;
+ priv->can.do_set_mode = ifi_canfd_set_mode;
+ priv->can.do_get_berr_counter = ifi_canfd_get_berr_counter;
/* IFI CANFD can do both Bosch FD and ISO FD */
priv->can.ctrlmode = CAN_CTRLMODE_FD;
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 60c7b83b4539..1efdd1fd8caa 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1752,7 +1752,6 @@ static const struct net_device_ops ican3_netdev_ops = {
.ndo_open = ican3_open,
.ndo_stop = ican3_stop,
.ndo_start_xmit = ican3_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops ican3_ethtool_ops = {
@@ -1867,7 +1866,7 @@ static ssize_t fwinfo_show(struct device *dev,
{
struct ican3_dev *mod = netdev_priv(to_net_dev(dev));
- return scnprintf(buf, PAGE_SIZE, "%s\n", mod->fwinfo);
+ return sysfs_emit(buf, "%s\n", mod->fwinfo);
}
static DEVICE_ATTR_RW(termination);
diff --git a/drivers/net/can/kvaser_pciefd/Makefile b/drivers/net/can/kvaser_pciefd/Makefile
new file mode 100644
index 000000000000..8c5b8cdc6b5f
--- /dev/null
+++ b/drivers/net/can/kvaser_pciefd/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CAN_KVASER_PCIEFD) += kvaser_pciefd.o
+kvaser_pciefd-y = kvaser_pciefd_core.o kvaser_pciefd_devlink.o
diff --git a/drivers/net/can/kvaser_pciefd/kvaser_pciefd.h b/drivers/net/can/kvaser_pciefd/kvaser_pciefd.h
new file mode 100644
index 000000000000..08c9ddc1ee85
--- /dev/null
+++ b/drivers/net/can/kvaser_pciefd/kvaser_pciefd.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/* kvaser_pciefd common definitions and declarations
+ *
+ * Copyright (C) 2025 KVASER AB, Sweden. All rights reserved.
+ */
+
+#ifndef _KVASER_PCIEFD_H
+#define _KVASER_PCIEFD_H
+
+#include <linux/can/dev.h>
+#include <linux/completion.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <net/devlink.h>
+
+#define KVASER_PCIEFD_MAX_CAN_CHANNELS 8UL
+#define KVASER_PCIEFD_DMA_COUNT 2U
+#define KVASER_PCIEFD_DMA_SIZE (4U * 1024U)
+#define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17U
+
+struct kvaser_pciefd;
+
+struct kvaser_pciefd_address_offset {
+ u32 serdes;
+ u32 pci_ien;
+ u32 pci_irq;
+ u32 sysid;
+ u32 loopback;
+ u32 kcan_srb_fifo;
+ u32 kcan_srb;
+ u32 kcan_ch0;
+ u32 kcan_ch1;
+};
+
+struct kvaser_pciefd_irq_mask {
+ u32 kcan_rx0;
+ u32 kcan_tx[KVASER_PCIEFD_MAX_CAN_CHANNELS];
+ u32 all;
+};
+
+struct kvaser_pciefd_dev_ops {
+ void (*kvaser_pciefd_write_dma_map)(struct kvaser_pciefd *pcie,
+ dma_addr_t addr, int index);
+};
+
+struct kvaser_pciefd_driver_data {
+ const struct kvaser_pciefd_address_offset *address_offset;
+ const struct kvaser_pciefd_irq_mask *irq_mask;
+ const struct kvaser_pciefd_dev_ops *ops;
+};
+
+struct kvaser_pciefd_fw_version {
+ u8 major;
+ u8 minor;
+ u16 build;
+};
+
+struct kvaser_pciefd_can {
+ struct can_priv can;
+ struct devlink_port devlink_port;
+ struct kvaser_pciefd *kv_pcie;
+ void __iomem *reg_base;
+ struct can_berr_counter bec;
+ u32 ioc;
+ u8 cmd_seq;
+ u8 tx_max_count;
+ u8 tx_idx;
+ u8 ack_idx;
+ int err_rep_cnt;
+ unsigned int completed_tx_pkts;
+ unsigned int completed_tx_bytes;
+ spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */
+ struct timer_list bec_poll_timer;
+ struct completion start_comp, flush_comp;
+};
+
+struct kvaser_pciefd {
+ struct pci_dev *pci;
+ void __iomem *reg_base;
+ struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS];
+ const struct kvaser_pciefd_driver_data *driver_data;
+ void *dma_data[KVASER_PCIEFD_DMA_COUNT];
+ u8 nr_channels;
+ u32 bus_freq;
+ u32 freq;
+ u32 freq_to_ticks_div;
+ struct kvaser_pciefd_fw_version fw_version;
+};
+
+extern const struct devlink_ops kvaser_pciefd_devlink_ops;
+
+int kvaser_pciefd_devlink_port_register(struct kvaser_pciefd_can *can);
+void kvaser_pciefd_devlink_port_unregister(struct kvaser_pciefd_can *can);
+#endif /* _KVASER_PCIEFD_H */
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd/kvaser_pciefd_core.c
index fee012b57f33..d8c9bfb20230 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd/kvaser_pciefd_core.c
@@ -5,6 +5,8 @@
* - PEAK linux canfd driver
*/
+#include "kvaser_pciefd.h"
+
#include <linux/bitfield.h>
#include <linux/can/dev.h>
#include <linux/device.h>
@@ -16,6 +18,7 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/timer.h>
+#include <net/netdev_queues.h>
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Kvaser AB <support@kvaser.com>");
@@ -26,10 +29,6 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
#define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000)
#define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200))
#define KVASER_PCIEFD_MAX_ERR_REP 256U
-#define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17U
-#define KVASER_PCIEFD_MAX_CAN_CHANNELS 8UL
-#define KVASER_PCIEFD_DMA_COUNT 2U
-#define KVASER_PCIEFD_DMA_SIZE (4U * 1024U)
#define KVASER_PCIEFD_VENDOR 0x1a07
@@ -65,6 +64,7 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
#define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180
#define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0
#define KVASER_PCIEFD_KCAN_CMD_REG 0x400
+#define KVASER_PCIEFD_KCAN_IOC_REG 0x404
#define KVASER_PCIEFD_KCAN_IEN_REG 0x408
#define KVASER_PCIEFD_KCAN_IRQ_REG 0x410
#define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG 0x414
@@ -135,6 +135,9 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
/* Request status packet */
#define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0)
+/* Control CAN LED, active low */
+#define KVASER_PCIEFD_KCAN_IOC_LED BIT(0)
+
/* Transmitter unaligned */
#define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17)
/* Tx FIFO empty */
@@ -291,35 +294,6 @@ static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie,
static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie,
dma_addr_t addr, int index);
-struct kvaser_pciefd_address_offset {
- u32 serdes;
- u32 pci_ien;
- u32 pci_irq;
- u32 sysid;
- u32 loopback;
- u32 kcan_srb_fifo;
- u32 kcan_srb;
- u32 kcan_ch0;
- u32 kcan_ch1;
-};
-
-struct kvaser_pciefd_dev_ops {
- void (*kvaser_pciefd_write_dma_map)(struct kvaser_pciefd *pcie,
- dma_addr_t addr, int index);
-};
-
-struct kvaser_pciefd_irq_mask {
- u32 kcan_rx0;
- u32 kcan_tx[KVASER_PCIEFD_MAX_CAN_CHANNELS];
- u32 all;
-};
-
-struct kvaser_pciefd_driver_data {
- const struct kvaser_pciefd_address_offset *address_offset;
- const struct kvaser_pciefd_irq_mask *irq_mask;
- const struct kvaser_pciefd_dev_ops *ops;
-};
-
static const struct kvaser_pciefd_address_offset kvaser_pciefd_altera_address_offset = {
.serdes = 0x1000,
.pci_ien = 0x50,
@@ -404,32 +378,6 @@ static const struct kvaser_pciefd_driver_data kvaser_pciefd_xilinx_driver_data =
.ops = &kvaser_pciefd_xilinx_dev_ops,
};
-struct kvaser_pciefd_can {
- struct can_priv can;
- struct kvaser_pciefd *kv_pcie;
- void __iomem *reg_base;
- struct can_berr_counter bec;
- u8 cmd_seq;
- int err_rep_cnt;
- int echo_idx;
- spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */
- spinlock_t echo_lock; /* Locks the message echo buffer */
- struct timer_list bec_poll_timer;
- struct completion start_comp, flush_comp;
-};
-
-struct kvaser_pciefd {
- struct pci_dev *pci;
- void __iomem *reg_base;
- struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS];
- const struct kvaser_pciefd_driver_data *driver_data;
- void *dma_data[KVASER_PCIEFD_DMA_COUNT];
- u8 nr_channels;
- u32 bus_freq;
- u32 freq;
- u32 freq_to_ticks_div;
-};
-
struct kvaser_pciefd_rx_packet {
u32 header[2];
u64 timestamp;
@@ -524,6 +472,16 @@ static inline void kvaser_pciefd_abort_flush_reset(struct kvaser_pciefd_can *can
kvaser_pciefd_send_kcan_cmd(can, KVASER_PCIEFD_KCAN_CMD_AT);
}
+static inline void kvaser_pciefd_set_led(struct kvaser_pciefd_can *can, bool on)
+{
+ if (on)
+ can->ioc &= ~KVASER_PCIEFD_KCAN_IOC_LED;
+ else
+ can->ioc |= KVASER_PCIEFD_KCAN_IOC_LED;
+
+ iowrite32(can->ioc, can->reg_base + KVASER_PCIEFD_KCAN_IOC_REG);
+}
+
static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can)
{
u32 mode;
@@ -631,7 +589,7 @@ static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
u32 mode;
unsigned long irq;
- del_timer(&can->bec_poll_timer);
+ timer_delete(&can->bec_poll_timer);
if (!completion_done(&can->flush_comp))
kvaser_pciefd_start_controller_flush(can);
@@ -714,6 +672,9 @@ static int kvaser_pciefd_open(struct net_device *netdev)
int ret;
struct kvaser_pciefd_can *can = netdev_priv(netdev);
+ can->tx_idx = 0;
+ can->ack_idx = 0;
+
ret = open_candev(netdev);
if (ret)
return ret;
@@ -742,24 +703,29 @@ static int kvaser_pciefd_stop(struct net_device *netdev)
ret = -ETIMEDOUT;
} else {
iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
- del_timer(&can->bec_poll_timer);
+ timer_delete(&can->bec_poll_timer);
}
can->can.state = CAN_STATE_STOPPED;
+ netdev_reset_queue(netdev);
close_candev(netdev);
return ret;
}
+static unsigned int kvaser_pciefd_tx_avail(const struct kvaser_pciefd_can *can)
+{
+ return can->tx_max_count - (READ_ONCE(can->tx_idx) - READ_ONCE(can->ack_idx));
+}
+
static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
- struct kvaser_pciefd_can *can,
+ struct can_priv *can, u8 seq,
struct sk_buff *skb)
{
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
int packet_size;
- int seq = can->echo_idx;
memset(p, 0, sizeof(*p));
- if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ if (can->ctrlmode & CAN_CTRLMODE_ONE_SHOT)
p->header[1] |= KVASER_PCIEFD_TPACKET_SMS;
if (cf->can_id & CAN_RTR_FLAG)
@@ -782,7 +748,7 @@ static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
} else {
p->header[1] |=
FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK,
- can_get_cc_dlc((struct can_frame *)cf, can->can.ctrlmode));
+ can_get_cc_dlc((struct can_frame *)cf, can->ctrlmode));
}
p->header[1] |= FIELD_PREP(KVASER_PCIEFD_PACKET_SEQ_MASK, seq);
@@ -797,22 +763,24 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct kvaser_pciefd_can *can = netdev_priv(netdev);
- unsigned long irq_flags;
struct kvaser_pciefd_tx_packet packet;
+ unsigned int seq = can->tx_idx & (can->can.echo_skb_max - 1);
+ unsigned int frame_len;
int nr_words;
- u8 count;
if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
+ if (!netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1))
+ return NETDEV_TX_BUSY;
- nr_words = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
+ nr_words = kvaser_pciefd_prepare_tx_packet(&packet, &can->can, seq, skb);
- spin_lock_irqsave(&can->echo_lock, irq_flags);
/* Prepare and save echo skb in internal slot */
- can_put_echo_skb(skb, netdev, can->echo_idx, 0);
-
- /* Move echo index to the next slot */
- can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;
+ WRITE_ONCE(can->can.echo_skb[seq], NULL);
+ frame_len = can_skb_get_frame_len(skb);
+ can_put_echo_skb(skb, netdev, seq, frame_len);
+ netdev_sent_queue(netdev, frame_len);
+ WRITE_ONCE(can->tx_idx, can->tx_idx + 1);
/* Write header to fifo */
iowrite32(packet.header[0],
@@ -836,14 +804,7 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
}
- count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK,
- ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
- /* No room for a new message, stop the queue until at least one
- * successful transmit
- */
- if (count >= can->can.echo_skb_max || can->can.echo_skb[can->echo_idx])
- netif_stop_queue(netdev);
- spin_unlock_irqrestore(&can->echo_lock, irq_flags);
+ netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1);
return NETDEV_TX_OK;
}
@@ -856,7 +817,7 @@ static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data)
struct can_bittiming *bt;
if (data)
- bt = &can->can.data_bittiming;
+ bt = &can->can.fd.data_bittiming;
else
bt = &can->can.bittiming;
@@ -930,7 +891,8 @@ static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev,
static void kvaser_pciefd_bec_poll_timer(struct timer_list *data)
{
- struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer);
+ struct kvaser_pciefd_can *can = timer_container_of(can, data,
+ bec_poll_timer);
kvaser_pciefd_enable_err_gen(can);
kvaser_pciefd_request_status(can);
@@ -940,13 +902,37 @@ static void kvaser_pciefd_bec_poll_timer(struct timer_list *data)
static const struct net_device_ops kvaser_pciefd_netdev_ops = {
.ndo_open = kvaser_pciefd_open,
.ndo_stop = kvaser_pciefd_stop,
- .ndo_eth_ioctl = can_eth_ioctl_hwts,
.ndo_start_xmit = kvaser_pciefd_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+ .ndo_hwtstamp_get = can_hwtstamp_get,
+ .ndo_hwtstamp_set = can_hwtstamp_set,
};
+static int kvaser_pciefd_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct kvaser_pciefd_can *can = netdev_priv(netdev);
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 3; /* 3 On/Off cycles per second */
+
+ case ETHTOOL_ID_ON:
+ kvaser_pciefd_set_led(can, true);
+ return 0;
+
+ case ETHTOOL_ID_OFF:
+ case ETHTOOL_ID_INACTIVE:
+ kvaser_pciefd_set_led(can, false);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct ethtool_ops kvaser_pciefd_ethtool_ops = {
.get_ts_info = can_ethtool_op_get_ts_info_hwts,
+ .set_phys_id = kvaser_pciefd_set_phys_id,
};
static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
@@ -957,9 +943,10 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
struct net_device *netdev;
struct kvaser_pciefd_can *can;
u32 status, tx_nr_packets_max;
+ int ret;
netdev = alloc_candev(sizeof(struct kvaser_pciefd_can),
- KVASER_PCIEFD_CAN_TX_MAX_COUNT);
+ roundup_pow_of_two(KVASER_PCIEFD_CAN_TX_MAX_COUNT));
if (!netdev)
return -ENOMEM;
@@ -970,8 +957,11 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
can->kv_pcie = pcie;
can->cmd_seq = 0;
can->err_rep_cnt = 0;
+ can->completed_tx_pkts = 0;
+ can->completed_tx_bytes = 0;
can->bec.txerr = 0;
can->bec.rxerr = 0;
+ can->can.dev->dev_port = i;
init_completion(&can->start_comp);
init_completion(&can->flush_comp);
@@ -980,26 +970,28 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
/* Disable Bus load reporting */
iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG);
+ can->ioc = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IOC_REG);
+ kvaser_pciefd_set_led(can, false);
+
tx_nr_packets_max =
FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK,
ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
+ can->tx_max_count = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1);
can->can.clock.freq = pcie->freq;
- can->can.echo_skb_max = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1);
- can->echo_idx = 0;
- spin_lock_init(&can->echo_lock);
spin_lock_init(&can->lock);
can->can.bittiming_const = &kvaser_pciefd_bittiming_const;
- can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const;
+ can->can.fd.data_bittiming_const = &kvaser_pciefd_bittiming_const;
can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming;
- can->can.do_set_data_bittiming = kvaser_pciefd_set_data_bittiming;
+ can->can.fd.do_set_data_bittiming = kvaser_pciefd_set_data_bittiming;
can->can.do_set_mode = kvaser_pciefd_set_mode;
can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter;
can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_FD |
CAN_CTRLMODE_FD_NON_ISO |
- CAN_CTRLMODE_CC_LEN8_DLC;
+ CAN_CTRLMODE_CC_LEN8_DLC |
+ CAN_CTRLMODE_BERR_REPORTING;
status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) {
@@ -1022,6 +1014,11 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
pcie->can[i] = can;
kvaser_pciefd_pwm_start(can);
+ ret = kvaser_pciefd_devlink_port_register(can);
+ if (ret) {
+ dev_err(&pcie->pci->dev, "Failed to register devlink port\n");
+ return ret;
+ }
}
return 0;
@@ -1154,14 +1151,12 @@ static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie)
u32 version, srb_status, build;
version = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_VERSION_REG);
+ build = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUILD_REG);
pcie->nr_channels = min(KVASER_PCIEFD_MAX_CAN_CHANNELS,
FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK, version));
-
- build = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUILD_REG);
- dev_dbg(&pcie->pci->dev, "Version %lu.%lu.%lu\n",
- FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK, version),
- FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK, version),
- FIELD_GET(KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK, build));
+ pcie->fw_version.major = FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK, version);
+ pcie->fw_version.minor = FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK, version);
+ pcie->fw_version.build = FIELD_GET(KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK, build);
srb_status = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_STAT_REG);
if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) {
@@ -1200,7 +1195,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
skb = alloc_canfd_skb(priv->dev, &cf);
if (!skb) {
priv->dev->stats.rx_dropped++;
- return -ENOMEM;
+ return 0;
}
cf->len = can_fd_dlc2len(dlc);
@@ -1212,7 +1207,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf);
if (!skb) {
priv->dev->stats.rx_dropped++;
- return -ENOMEM;
+ return 0;
}
can_frame_set_cc_len((struct can_frame *)cf, dlc, priv->ctrlmode);
}
@@ -1230,15 +1225,21 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
priv->dev->stats.rx_packets++;
kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
- return netif_rx(skb);
+ netif_rx(skb);
+
+ return 0;
}
static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
+ const struct can_berr_counter *bec,
struct can_frame *cf,
enum can_state new_state,
enum can_state tx_state,
enum can_state rx_state)
{
+ enum can_state old_state;
+
+ old_state = can->can.state;
can_change_state(can->can.dev, cf, tx_state, rx_state);
if (new_state == CAN_STATE_BUS_OFF) {
@@ -1254,6 +1255,18 @@ static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
can_bus_off(ndev);
}
}
+ if (old_state == CAN_STATE_BUS_OFF &&
+ new_state == CAN_STATE_ERROR_ACTIVE &&
+ can->can.restart_ms) {
+ can->can.can_stats.restarts++;
+ if (cf)
+ cf->can_id |= CAN_ERR_RESTARTED;
+ }
+ if (cf && new_state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec->txerr;
+ cf->data[7] = bec->rxerr;
+ }
}
static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p,
@@ -1288,7 +1301,7 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
struct can_berr_counter bec;
enum can_state old_state, new_state, tx_state, rx_state;
struct net_device *ndev = can->can.dev;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
struct can_frame *cf = NULL;
old_state = can->can.state;
@@ -1297,16 +1310,10 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
bec.rxerr = FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK, p->header[0]);
kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, &rx_state);
- skb = alloc_can_err_skb(ndev, &cf);
+ if (can->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ skb = alloc_can_err_skb(ndev, &cf);
if (new_state != old_state) {
- kvaser_pciefd_change_state(can, cf, new_state, tx_state, rx_state);
- if (old_state == CAN_STATE_BUS_OFF &&
- new_state == CAN_STATE_ERROR_ACTIVE &&
- can->can.restart_ms) {
- can->can.can_stats.restarts++;
- if (skb)
- cf->can_id |= CAN_ERR_RESTARTED;
- }
+ kvaser_pciefd_change_state(can, &bec, cf, new_state, tx_state, rx_state);
}
can->err_rep_cnt++;
@@ -1319,18 +1326,19 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
can->bec.txerr = bec.txerr;
can->bec.rxerr = bec.rxerr;
- if (!skb) {
- ndev->stats.rx_dropped++;
- return -ENOMEM;
+ if (can->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ if (!skb) {
+ netdev_warn(ndev, "No memory left for err_skb\n");
+ ndev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+ kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
+ cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ netif_rx(skb);
}
- kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
- cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT;
- cf->data[6] = bec.txerr;
- cf->data[7] = bec.rxerr;
-
- netif_rx(skb);
-
return 0;
}
@@ -1359,6 +1367,7 @@ static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
{
struct can_berr_counter bec;
enum can_state old_state, new_state, tx_state, rx_state;
+ int ret = 0;
old_state = can->can.state;
@@ -1372,25 +1381,15 @@ static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
struct can_frame *cf;
skb = alloc_can_err_skb(ndev, &cf);
- if (!skb) {
+ kvaser_pciefd_change_state(can, &bec, cf, new_state, tx_state, rx_state);
+ if (skb) {
+ kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
+ netif_rx(skb);
+ } else {
ndev->stats.rx_dropped++;
- return -ENOMEM;
+ netdev_warn(ndev, "No memory left for err_skb\n");
+ ret = -ENOMEM;
}
-
- kvaser_pciefd_change_state(can, cf, new_state, tx_state, rx_state);
- if (old_state == CAN_STATE_BUS_OFF &&
- new_state == CAN_STATE_ERROR_ACTIVE &&
- can->can.restart_ms) {
- can->can.can_stats.restarts++;
- cf->can_id |= CAN_ERR_RESTARTED;
- }
-
- kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
-
- cf->data[6] = bec.txerr;
- cf->data[7] = bec.rxerr;
-
- netif_rx(skb);
}
can->bec.txerr = bec.txerr;
can->bec.rxerr = bec.rxerr;
@@ -1398,7 +1397,7 @@ static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
if (bec.txerr || bec.rxerr)
mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
- return 0;
+ return ret;
}
static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
@@ -1507,19 +1506,21 @@ static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
netdev_dbg(can->can.dev, "Packet was flushed\n");
} else {
int echo_idx = FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[0]);
- int len;
- u8 count;
+ unsigned int len, frame_len = 0;
struct sk_buff *skb;
+ if (echo_idx != (can->ack_idx & (can->can.echo_skb_max - 1)))
+ return 0;
skb = can->can.echo_skb[echo_idx];
- if (skb)
- kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
- len = can_get_echo_skb(can->can.dev, echo_idx, NULL);
- count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK,
- ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
+ if (!skb)
+ return 0;
+ kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
+ len = can_get_echo_skb(can->can.dev, echo_idx, &frame_len);
- if (count < can->can.echo_skb_max && netif_queue_stopped(can->can.dev))
- netif_wake_queue(can->can.dev);
+ /* Pairs with barrier in kvaser_pciefd_start_xmit() */
+ smp_store_release(&can->ack_idx, can->ack_idx + 1);
+ can->completed_tx_pkts++;
+ can->completed_tx_bytes += frame_len;
if (!one_shot_fail) {
can->can.dev->stats.tx_bytes += len;
@@ -1635,32 +1636,51 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
{
int pos = 0;
int res = 0;
+ unsigned int i;
do {
res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf);
} while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE);
+ /* Report ACKs in this buffer to BQL en masse for correct periods */
+ for (i = 0; i < pcie->nr_channels; ++i) {
+ struct kvaser_pciefd_can *can = pcie->can[i];
+
+ if (!can->completed_tx_pkts)
+ continue;
+ netif_subqueue_completed_wake(can->can.dev, 0,
+ can->completed_tx_pkts,
+ can->completed_tx_bytes,
+ kvaser_pciefd_tx_avail(can), 1);
+ can->completed_tx_pkts = 0;
+ can->completed_tx_bytes = 0;
+ }
+
return res;
}
-static u32 kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
+static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
{
+ void __iomem *srb_cmd_reg = KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG;
u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
- if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
+ iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
+
+ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
kvaser_pciefd_read_buffer(pcie, 0);
+ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, srb_cmd_reg); /* Rearm buffer */
+ }
- if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
+ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
kvaser_pciefd_read_buffer(pcie, 1);
+ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, srb_cmd_reg); /* Rearm buffer */
+ }
if (unlikely(irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
irq & KVASER_PCIEFD_SRB_IRQ_DUF1))
dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
-
- iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
- return irq;
}
static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
@@ -1688,29 +1708,22 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask;
u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
- u32 srb_irq = 0;
- u32 srb_release = 0;
int i;
if (!(pci_irq & irq_mask->all))
return IRQ_NONE;
+ iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
+
if (pci_irq & irq_mask->kcan_rx0)
- srb_irq = kvaser_pciefd_receive_irq(pcie);
+ kvaser_pciefd_receive_irq(pcie);
for (i = 0; i < pcie->nr_channels; i++) {
if (pci_irq & irq_mask->kcan_tx[i])
kvaser_pciefd_transmit_irq(pcie->can[i]);
}
- if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
- srb_release |= KVASER_PCIEFD_SRB_CMD_RDB0;
-
- if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
- srb_release |= KVASER_PCIEFD_SRB_CMD_RDB1;
-
- if (srb_release)
- iowrite32(srb_release, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
+ iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
return IRQ_HANDLED;
}
@@ -1725,23 +1738,36 @@ static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie)
if (can) {
iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
kvaser_pciefd_pwm_stop(can);
+ kvaser_pciefd_devlink_port_unregister(can);
free_candev(can->can.dev);
}
}
}
+static void kvaser_pciefd_disable_irq_srcs(struct kvaser_pciefd *pcie)
+{
+ unsigned int i;
+
+ /* Masking PCI_IRQ is insufficient as running ISR will unmask it */
+ iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG);
+ for (i = 0; i < pcie->nr_channels; ++i)
+ iowrite32(0, pcie->can[i]->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+}
+
static int kvaser_pciefd_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
int ret;
+ struct devlink *devlink;
+ struct device *dev = &pdev->dev;
struct kvaser_pciefd *pcie;
const struct kvaser_pciefd_irq_mask *irq_mask;
- void __iomem *irq_en_base;
- pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
- if (!pcie)
+ devlink = devlink_alloc(&kvaser_pciefd_devlink_ops, sizeof(*pcie), dev);
+ if (!devlink)
return -ENOMEM;
+ pcie = devlink_priv(devlink);
pci_set_drvdata(pdev, pcie);
pcie->pci = pdev;
pcie->driver_data = (const struct kvaser_pciefd_driver_data *)id->driver_data;
@@ -1749,7 +1775,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
ret = pci_enable_device(pdev);
if (ret)
- return ret;
+ goto err_free_devlink;
ret = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME);
if (ret)
@@ -1777,7 +1803,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
ret = pci_alloc_irq_vectors(pcie->pci, 1, 1, PCI_IRQ_INTX | PCI_IRQ_MSI);
if (ret < 0) {
- dev_err(&pcie->pci->dev, "Failed to allocate IRQ vectors.\n");
+ dev_err(dev, "Failed to allocate IRQ vectors.\n");
goto err_teardown_can_ctrls;
}
@@ -1790,7 +1816,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
ret = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
if (ret) {
- dev_err(&pcie->pci->dev, "Failed to request IRQ %d\n", pcie->pci->irq);
+ dev_err(dev, "Failed to request IRQ %d\n", pcie->pci->irq);
goto err_pci_free_irq_vectors;
}
iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1,
@@ -1802,8 +1828,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG);
/* Enable PCI interrupts */
- irq_en_base = KVASER_PCIEFD_PCI_IEN_ADDR(pcie);
- iowrite32(irq_mask->all, irq_en_base);
+ iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
/* Ready the DMA buffers */
iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
@@ -1814,11 +1839,12 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
if (ret)
goto err_free_irq;
+ devlink_register(devlink);
+
return 0;
err_free_irq:
- /* Disable PCI interrupts */
- iowrite32(0, irq_en_base);
+ kvaser_pciefd_disable_irq_srcs(pcie);
free_irq(pcie->pci->irq, pcie);
err_pci_free_irq_vectors:
@@ -1838,41 +1864,38 @@ err_release_regions:
err_disable_pci:
pci_disable_device(pdev);
- return ret;
-}
-
-static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie)
-{
- int i;
+err_free_devlink:
+ devlink_free(devlink);
- for (i = 0; i < pcie->nr_channels; i++) {
- struct kvaser_pciefd_can *can = pcie->can[i];
-
- if (can) {
- iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
- unregister_candev(can->can.dev);
- del_timer(&can->bec_poll_timer);
- kvaser_pciefd_pwm_stop(can);
- free_candev(can->can.dev);
- }
- }
+ return ret;
}
static void kvaser_pciefd_remove(struct pci_dev *pdev)
{
struct kvaser_pciefd *pcie = pci_get_drvdata(pdev);
+ unsigned int i;
- kvaser_pciefd_remove_all_ctrls(pcie);
+ for (i = 0; i < pcie->nr_channels; ++i) {
+ struct kvaser_pciefd_can *can = pcie->can[i];
- /* Disable interrupts */
- iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
- iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
+ unregister_candev(can->can.dev);
+ timer_delete(&can->bec_poll_timer);
+ kvaser_pciefd_pwm_stop(can);
+ kvaser_pciefd_devlink_port_unregister(can);
+ }
+ kvaser_pciefd_disable_irq_srcs(pcie);
free_irq(pcie->pci->irq, pcie);
pci_free_irq_vectors(pcie->pci);
+
+ for (i = 0; i < pcie->nr_channels; ++i)
+ free_candev(pcie->can[i]->can.dev);
+
+ devlink_unregister(priv_to_devlink(pcie));
pci_iounmap(pdev, pcie->reg_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
+ devlink_free(priv_to_devlink(pcie));
}
static struct pci_driver kvaser_pciefd = {
diff --git a/drivers/net/can/kvaser_pciefd/kvaser_pciefd_devlink.c b/drivers/net/can/kvaser_pciefd/kvaser_pciefd_devlink.c
new file mode 100644
index 000000000000..1d61a8b0eeba
--- /dev/null
+++ b/drivers/net/can/kvaser_pciefd/kvaser_pciefd_devlink.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/* kvaser_pciefd devlink functions
+ *
+ * Copyright (C) 2025 KVASER AB, Sweden. All rights reserved.
+ */
+#include "kvaser_pciefd.h"
+
+#include <linux/netdevice.h>
+#include <net/devlink.h>
+
+static int kvaser_pciefd_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct kvaser_pciefd *pcie = devlink_priv(devlink);
+ char buf[] = "xxx.xxx.xxxxx";
+ int ret;
+
+ if (pcie->fw_version.major) {
+ snprintf(buf, sizeof(buf), "%u.%u.%u",
+ pcie->fw_version.major,
+ pcie->fw_version.minor,
+ pcie->fw_version.build);
+ ret = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+const struct devlink_ops kvaser_pciefd_devlink_ops = {
+ .info_get = kvaser_pciefd_devlink_info_get,
+};
+
+int kvaser_pciefd_devlink_port_register(struct kvaser_pciefd_can *can)
+{
+ int ret;
+ struct devlink_port_attrs attrs = {
+ .flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ .phys.port_number = can->can.dev->dev_port,
+ };
+ devlink_port_attrs_set(&can->devlink_port, &attrs);
+
+ ret = devlink_port_register(priv_to_devlink(can->kv_pcie),
+ &can->devlink_port, can->can.dev->dev_port);
+ if (ret)
+ return ret;
+
+ SET_NETDEV_DEVLINK_PORT(can->can.dev, &can->devlink_port);
+
+ return 0;
+}
+
+void kvaser_pciefd_devlink_port_unregister(struct kvaser_pciefd_can *can)
+{
+ devlink_port_unregister(&can->devlink_port);
+}
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index a978b960f1f1..eb856547ae7d 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// CAN bus driver for Bosch M_CAN controller
// Copyright (C) 2014 Freescale Semiconductor, Inc.
-// Dong Aisheng <b29396@freescale.com>
+// Dong Aisheng <aisheng.dong@nxp.com>
// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
/* Bosch M_CAN user manual can be obtained from:
@@ -23,6 +23,7 @@
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include "m_can.h"
@@ -386,8 +387,8 @@ static int m_can_cccr_update_bits(struct m_can_classdev *cdev, u32 mask, u32 val
size_t tries = 10;
if (!(mask & CCCR_INIT) && !(val_before & CCCR_INIT)) {
- dev_err(cdev->dev,
- "refusing to configure device when in normal mode\n");
+ netdev_err(cdev->net,
+ "refusing to configure device when in normal mode\n");
return -EBUSY;
}
@@ -451,7 +452,7 @@ static void m_can_interrupt_enable(struct m_can_classdev *cdev, u32 interrupts)
{
if (cdev->active_interrupts == interrupts)
return;
- cdev->ops->write_reg(cdev, M_CAN_IE, interrupts);
+ m_can_write(cdev, M_CAN_IE, interrupts);
cdev->active_interrupts = interrupts;
}
@@ -469,7 +470,7 @@ static void m_can_coalescing_disable(struct m_can_classdev *cdev)
static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev)
{
if (!cdev->net->irq) {
- dev_dbg(cdev->dev, "Start hrtimer\n");
+ netdev_dbg(cdev->net, "Start hrtimer\n");
hrtimer_start(&cdev->hrtimer,
ms_to_ktime(HRTIMER_POLL_INTERVAL_MS),
HRTIMER_MODE_REL_PINNED);
@@ -485,7 +486,7 @@ static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev)
m_can_write(cdev, M_CAN_ILE, 0x0);
if (!cdev->net->irq) {
- dev_dbg(cdev->dev, "Stop hrtimer\n");
+ netdev_dbg(cdev->net, "Stop hrtimer\n");
hrtimer_try_to_cancel(&cdev->hrtimer);
}
}
@@ -665,7 +666,7 @@ static int m_can_handle_lost_msg(struct net_device *dev)
struct can_frame *frame;
u32 timestamp = 0;
- netdev_err(dev, "msg lost in rxf0\n");
+ netdev_dbg(dev, "msg lost in rxf0\n");
stats->rx_errors++;
stats->rx_over_errors++;
@@ -695,47 +696,60 @@ static int m_can_handle_lec_err(struct net_device *dev,
u32 timestamp = 0;
cdev->can.can_stats.bus_error++;
- stats->rx_errors++;
/* propagate the error condition to the CAN stack */
skb = alloc_can_err_skb(dev, &cf);
- if (unlikely(!skb))
- return 0;
/* check for 'last error code' which tells us the
* type of the last error to occur on the CAN bus
*/
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ if (likely(skb))
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
switch (lec_type) {
case LEC_STUFF_ERROR:
netdev_dbg(dev, "stuff error\n");
- cf->data[2] |= CAN_ERR_PROT_STUFF;
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
break;
case LEC_FORM_ERROR:
netdev_dbg(dev, "form error\n");
- cf->data[2] |= CAN_ERR_PROT_FORM;
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_FORM;
break;
case LEC_ACK_ERROR:
netdev_dbg(dev, "ack error\n");
- cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ stats->tx_errors++;
+ if (likely(skb))
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
break;
case LEC_BIT1_ERROR:
netdev_dbg(dev, "bit1 error\n");
- cf->data[2] |= CAN_ERR_PROT_BIT1;
+ stats->tx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
break;
case LEC_BIT0_ERROR:
netdev_dbg(dev, "bit0 error\n");
- cf->data[2] |= CAN_ERR_PROT_BIT0;
+ stats->tx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
break;
case LEC_CRC_ERROR:
netdev_dbg(dev, "CRC error\n");
- cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
break;
default:
break;
}
+ if (unlikely(!skb))
+ return 0;
+
if (cdev->is_peripheral)
timestamp = m_can_get_timestamp(cdev);
@@ -777,6 +791,10 @@ static int m_can_get_berr_counter(const struct net_device *dev,
struct m_can_classdev *cdev = netdev_priv(dev);
int err;
+ /* Avoid waking up the controller if the interface is down */
+ if (!(dev->flags & IFF_UP))
+ return 0;
+
err = m_can_clk_start(cdev);
if (err)
return err;
@@ -799,6 +817,9 @@ static int m_can_handle_state_change(struct net_device *dev,
u32 timestamp = 0;
switch (new_state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ cdev->can.state = CAN_STATE_ERROR_ACTIVE;
+ break;
case CAN_STATE_ERROR_WARNING:
/* error warning state */
cdev->can.can_stats.error_warning++;
@@ -828,6 +849,12 @@ static int m_can_handle_state_change(struct net_device *dev,
__m_can_get_berr_counter(dev, &bec);
switch (new_state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
+ cf->data[1] = CAN_ERR_CRTL_ACTIVE;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ break;
case CAN_STATE_ERROR_WARNING:
/* error warning state */
cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
@@ -864,30 +891,33 @@ static int m_can_handle_state_change(struct net_device *dev,
return 1;
}
-static int m_can_handle_state_errors(struct net_device *dev, u32 psr)
+static enum can_state
+m_can_state_get_by_psr(struct m_can_classdev *cdev)
{
- struct m_can_classdev *cdev = netdev_priv(dev);
- int work_done = 0;
+ u32 reg_psr;
- if (psr & PSR_EW && cdev->can.state != CAN_STATE_ERROR_WARNING) {
- netdev_dbg(dev, "entered error warning state\n");
- work_done += m_can_handle_state_change(dev,
- CAN_STATE_ERROR_WARNING);
- }
+ reg_psr = m_can_read(cdev, M_CAN_PSR);
- if (psr & PSR_EP && cdev->can.state != CAN_STATE_ERROR_PASSIVE) {
- netdev_dbg(dev, "entered error passive state\n");
- work_done += m_can_handle_state_change(dev,
- CAN_STATE_ERROR_PASSIVE);
- }
+ if (reg_psr & PSR_BO)
+ return CAN_STATE_BUS_OFF;
+ if (reg_psr & PSR_EP)
+ return CAN_STATE_ERROR_PASSIVE;
+ if (reg_psr & PSR_EW)
+ return CAN_STATE_ERROR_WARNING;
- if (psr & PSR_BO && cdev->can.state != CAN_STATE_BUS_OFF) {
- netdev_dbg(dev, "entered error bus off state\n");
- work_done += m_can_handle_state_change(dev,
- CAN_STATE_BUS_OFF);
- }
+ return CAN_STATE_ERROR_ACTIVE;
+}
- return work_done;
+static int m_can_handle_state_errors(struct net_device *dev)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+ enum can_state new_state;
+
+ new_state = m_can_state_get_by_psr(cdev);
+ if (new_state == cdev->can.state)
+ return 0;
+
+ return m_can_handle_state_change(dev, new_state);
}
static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
@@ -1018,8 +1048,7 @@ static int m_can_rx_handler(struct net_device *dev, int quota, u32 irqstatus)
}
if (irqstatus & IR_ERR_STATE)
- work_done += m_can_handle_state_errors(dev,
- m_can_read(cdev, M_CAN_PSR));
+ work_done += m_can_handle_state_errors(dev);
if (irqstatus & IR_ERR_BUS_30X)
work_done += m_can_handle_bus_errors(dev, irqstatus,
@@ -1207,20 +1236,32 @@ static void m_can_coalescing_update(struct m_can_classdev *cdev, u32 ir)
static int m_can_interrupt_handler(struct m_can_classdev *cdev)
{
struct net_device *dev = cdev->net;
- u32 ir;
+ u32 ir = 0, ir_read;
int ret;
if (pm_runtime_suspended(cdev->dev))
return IRQ_NONE;
- ir = m_can_read(cdev, M_CAN_IR);
+ /* The m_can controller signals its interrupt status as a level, but
+ * depending in the integration the CPU may interpret the signal as
+ * edge-triggered (for example with m_can_pci). For these
+ * edge-triggered integrations, we must observe that IR is 0 at least
+ * once to be sure that the next interrupt will generate an edge.
+ */
+ while ((ir_read = m_can_read(cdev, M_CAN_IR)) != 0) {
+ ir |= ir_read;
+
+ /* ACK all irqs */
+ m_can_write(cdev, M_CAN_IR, ir);
+
+ if (!cdev->irq_edge_triggered)
+ break;
+ }
+
m_can_coalescing_update(cdev, ir);
if (!ir)
return IRQ_NONE;
- /* ACK all irqs */
- m_can_write(cdev, M_CAN_IR, ir);
-
if (cdev->ops->clear_interrupts)
cdev->ops->clear_interrupts(cdev);
@@ -1343,11 +1384,32 @@ static const struct can_bittiming_const m_can_data_bittiming_const_31X = {
.brp_inc = 1,
};
+static int m_can_init_ram(struct m_can_classdev *cdev)
+{
+ int end, i, start;
+ int err = 0;
+
+ /* initialize the entire Message RAM in use to avoid possible
+ * ECC/parity checksum errors when reading an uninitialized buffer
+ */
+ start = cdev->mcfg[MRAM_SIDF].off;
+ end = cdev->mcfg[MRAM_TXB].off +
+ cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
+
+ for (i = start; i < end; i += 4) {
+ err = m_can_fifo_write_no_off(cdev, i, 0x0);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
static int m_can_set_bittiming(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
const struct can_bittiming *bt = &cdev->can.bittiming;
- const struct can_bittiming *dbt = &cdev->can.data_bittiming;
+ const struct can_bittiming *dbt = &cdev->can.fd.data_bittiming;
u16 brp, sjw, tseg1, tseg2;
u32 reg_btp;
@@ -1428,7 +1490,7 @@ static int m_can_chip_config(struct net_device *dev)
err = m_can_init_ram(cdev);
if (err) {
- dev_err(cdev->dev, "Message RAM configuration failed\n");
+ netdev_err(dev, "Message RAM configuration failed\n");
return err;
}
@@ -1581,7 +1643,7 @@ static int m_can_start(struct net_device *dev)
netdev_queue_set_dql_min_limit(netdev_get_tx_queue(cdev->net, 0),
cdev->tx_max_coalesced_frames);
- cdev->can.state = CAN_STATE_ERROR_ACTIVE;
+ cdev->can.state = m_can_state_get_by_psr(cdev);
m_can_enable_all_interrupts(cdev);
@@ -1658,7 +1720,7 @@ static int m_can_niso_supported(struct m_can_classdev *cdev)
/* Then clear the it again. */
ret = m_can_cccr_update_bits(cdev, CCCR_NISO, 0);
if (ret) {
- dev_err(cdev->dev, "failed to revert the NON-ISO bit in CCCR\n");
+ netdev_err(cdev->net, "failed to revert the NON-ISO bit in CCCR\n");
return ret;
}
@@ -1677,11 +1739,19 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
m_can_version = m_can_check_core_release(cdev);
/* return if unsupported version */
if (!m_can_version) {
- dev_err(cdev->dev, "Unsupported version number: %2d",
- m_can_version);
+ netdev_err(cdev->net, "Unsupported version number: %2d",
+ m_can_version);
return -EINVAL;
}
+ /* Write the INIT bit, in case no hardware reset has happened before
+ * the probe (for example, it was observed that the Intel Elkhart Lake
+ * SoCs do not properly reset the CAN controllers on reboot)
+ */
+ err = m_can_cccr_update_bits(cdev, CCCR_INIT, CCCR_INIT);
+ if (err)
+ return err;
+
if (!cdev->is_peripheral)
netif_napi_add(dev, &cdev->napi, m_can_poll);
@@ -1705,7 +1775,7 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
if (err)
return err;
cdev->can.bittiming_const = &m_can_bittiming_const_30X;
- cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X;
+ cdev->can.fd.data_bittiming_const = &m_can_data_bittiming_const_30X;
break;
case 31:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
@@ -1713,13 +1783,13 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
if (err)
return err;
cdev->can.bittiming_const = &m_can_bittiming_const_31X;
- cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
+ cdev->can.fd.data_bittiming_const = &m_can_data_bittiming_const_31X;
break;
case 32:
case 33:
/* Support both MCAN version v3.2.x and v3.3.0 */
cdev->can.bittiming_const = &m_can_bittiming_const_31X;
- cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
+ cdev->can.fd.data_bittiming_const = &m_can_data_bittiming_const_31X;
niso = m_can_niso_supported(cdev);
if (niso < 0)
@@ -1728,16 +1798,12 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
cdev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO;
break;
default:
- dev_err(cdev->dev, "Unsupported version number: %2d",
- cdev->version);
+ netdev_err(cdev->net, "Unsupported version number: %2d",
+ cdev->version);
return -EINVAL;
}
- /* Forcing standby mode should be redundant, as the chip should be in
- * standby after a reset. Write the INIT bit anyways, should the chip
- * be configured by previous stage.
- */
- return m_can_cccr_update_bits(cdev, CCCR_INIT, CCCR_INIT);
+ return 0;
}
static void m_can_stop(struct net_device *dev)
@@ -1756,6 +1822,13 @@ static void m_can_stop(struct net_device *dev)
/* set the state as STOPPED */
cdev->can.state = CAN_STATE_STOPPED;
+
+ if (cdev->ops->deinit) {
+ ret = cdev->ops->deinit(cdev);
+ if (ret)
+ netdev_err(dev, "failed to deinitialize: %pe\n",
+ ERR_PTR(ret));
+ }
}
static int m_can_close(struct net_device *dev)
@@ -1765,7 +1838,8 @@ static int m_can_close(struct net_device *dev)
netif_stop_queue(dev);
m_can_stop(dev);
- free_irq(dev->irq, dev);
+ if (dev->irq)
+ free_irq(dev->irq, dev);
m_can_clean(dev);
@@ -1779,6 +1853,7 @@ static int m_can_close(struct net_device *dev)
close_candev(dev);
+ reset_control_assert(cdev->rst);
m_can_clk_stop(cdev);
phy_power_off(cdev->transceiver);
@@ -1902,11 +1977,6 @@ out_fail:
static void m_can_tx_submit(struct m_can_classdev *cdev)
{
- if (cdev->version == 30)
- return;
- if (!cdev->is_peripheral)
- return;
-
m_can_write(cdev, M_CAN_TXBAR, cdev->tx_peripheral_submit);
cdev->tx_peripheral_submit = 0;
}
@@ -1987,7 +2057,7 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
return ret;
}
-static enum hrtimer_restart hrtimer_callback(struct hrtimer *timer)
+static enum hrtimer_restart m_can_polling_timer(struct hrtimer *timer)
{
struct m_can_classdev *cdev = container_of(timer, struct
m_can_classdev, hrtimer);
@@ -2021,11 +2091,15 @@ static int m_can_open(struct net_device *dev)
if (err)
goto out_phy_power_off;
+ err = reset_control_deassert(cdev->rst);
+ if (err)
+ goto exit_disable_clks;
+
/* open the can device */
err = open_candev(dev);
if (err) {
netdev_err(dev, "failed to open can device\n");
- goto exit_disable_clks;
+ goto out_reset_control_assert;
}
if (cdev->is_peripheral)
@@ -2081,6 +2155,8 @@ out_wq_fail:
else
napi_disable(&cdev->napi);
close_candev(dev);
+out_reset_control_assert:
+ reset_control_assert(cdev->rst);
exit_disable_clks:
m_can_clk_stop(cdev);
out_phy_power_off:
@@ -2092,7 +2168,6 @@ static const struct net_device_ops m_can_netdev_ops = {
.ndo_open = m_can_open,
.ndo_stop = m_can_close,
.ndo_start_xmit = m_can_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static int m_can_get_coalesce(struct net_device *dev,
@@ -2176,13 +2251,60 @@ static int m_can_set_coalesce(struct net_device *dev,
cdev->tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
if (cdev->rx_coalesce_usecs_irq)
- cdev->irq_timer_wait =
- ns_to_ktime(cdev->rx_coalesce_usecs_irq * NSEC_PER_USEC);
+ cdev->irq_timer_wait = us_to_ktime(cdev->rx_coalesce_usecs_irq);
else
- cdev->irq_timer_wait =
- ns_to_ktime(cdev->tx_coalesce_usecs_irq * NSEC_PER_USEC);
+ cdev->irq_timer_wait = us_to_ktime(cdev->tx_coalesce_usecs_irq);
+
+ return 0;
+}
+
+static void m_can_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+
+ wol->supported = device_can_wakeup(cdev->dev) ? WAKE_PHY : 0;
+ wol->wolopts = device_may_wakeup(cdev->dev) ? WAKE_PHY : 0;
+}
+
+static int m_can_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+ bool wol_enable = !!(wol->wolopts & WAKE_PHY);
+ int ret;
+
+ if (wol->wolopts & ~WAKE_PHY)
+ return -EINVAL;
+
+ if (wol_enable == device_may_wakeup(cdev->dev))
+ return 0;
+
+ ret = device_set_wakeup_enable(cdev->dev, wol_enable);
+ if (ret) {
+ netdev_err(cdev->net, "Failed to set wakeup enable %pE\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ if (!IS_ERR_OR_NULL(cdev->pinctrl_state_wakeup)) {
+ if (wol_enable)
+ ret = pinctrl_select_state(cdev->pinctrl, cdev->pinctrl_state_wakeup);
+ else
+ ret = pinctrl_pm_select_default_state(cdev->dev);
+
+ if (ret) {
+ netdev_err(cdev->net, "Failed to select pinctrl state %pE\n",
+ ERR_PTR(ret));
+ goto err_wakeup_enable;
+ }
+ }
return 0;
+
+err_wakeup_enable:
+ /* Revert wakeup enable */
+ device_set_wakeup_enable(cdev->dev, !wol_enable);
+
+ return ret;
}
static const struct ethtool_ops m_can_ethtool_ops_coalescing = {
@@ -2194,10 +2316,14 @@ static const struct ethtool_ops m_can_ethtool_ops_coalescing = {
.get_ts_info = ethtool_op_get_ts_info,
.get_coalesce = m_can_get_coalesce,
.set_coalesce = m_can_set_coalesce,
+ .get_wol = m_can_get_wol,
+ .set_wol = m_can_set_wol,
};
static const struct ethtool_ops m_can_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
+ .get_wol = m_can_get_wol,
+ .set_wol = m_can_set_wol,
};
static int register_m_can_dev(struct m_can_classdev *cdev)
@@ -2221,8 +2347,8 @@ int m_can_check_mram_cfg(struct m_can_classdev *cdev, u32 mram_max_size)
total_size = cdev->mcfg[MRAM_TXB].off - cdev->mcfg[MRAM_SIDF].off +
cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
if (total_size > mram_max_size) {
- dev_err(cdev->dev, "Total size of mram config(%u) exceeds mram(%u)\n",
- total_size, mram_max_size);
+ netdev_err(cdev->net, "Total size of mram config(%u) exceeds mram(%u)\n",
+ total_size, mram_max_size);
return -EINVAL;
}
@@ -2257,39 +2383,17 @@ static void m_can_of_parse_mram(struct m_can_classdev *cdev,
cdev->mcfg[MRAM_TXB].num = mram_config_vals[7] &
FIELD_MAX(TXBC_NDTB_MASK);
- dev_dbg(cdev->dev,
- "sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n",
- cdev->mcfg[MRAM_SIDF].off, cdev->mcfg[MRAM_SIDF].num,
- cdev->mcfg[MRAM_XIDF].off, cdev->mcfg[MRAM_XIDF].num,
- cdev->mcfg[MRAM_RXF0].off, cdev->mcfg[MRAM_RXF0].num,
- cdev->mcfg[MRAM_RXF1].off, cdev->mcfg[MRAM_RXF1].num,
- cdev->mcfg[MRAM_RXB].off, cdev->mcfg[MRAM_RXB].num,
- cdev->mcfg[MRAM_TXE].off, cdev->mcfg[MRAM_TXE].num,
- cdev->mcfg[MRAM_TXB].off, cdev->mcfg[MRAM_TXB].num);
+ netdev_dbg(cdev->net,
+ "sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n",
+ cdev->mcfg[MRAM_SIDF].off, cdev->mcfg[MRAM_SIDF].num,
+ cdev->mcfg[MRAM_XIDF].off, cdev->mcfg[MRAM_XIDF].num,
+ cdev->mcfg[MRAM_RXF0].off, cdev->mcfg[MRAM_RXF0].num,
+ cdev->mcfg[MRAM_RXF1].off, cdev->mcfg[MRAM_RXF1].num,
+ cdev->mcfg[MRAM_RXB].off, cdev->mcfg[MRAM_RXB].num,
+ cdev->mcfg[MRAM_TXE].off, cdev->mcfg[MRAM_TXE].num,
+ cdev->mcfg[MRAM_TXB].off, cdev->mcfg[MRAM_TXB].num);
}
-int m_can_init_ram(struct m_can_classdev *cdev)
-{
- int end, i, start;
- int err = 0;
-
- /* initialize the entire Message RAM in use to avoid possible
- * ECC/parity checksum errors when reading an uninitialized buffer
- */
- start = cdev->mcfg[MRAM_SIDF].off;
- end = cdev->mcfg[MRAM_TXB].off +
- cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
-
- for (i = start; i < end; i += 4) {
- err = m_can_fifo_write_no_off(cdev, i, 0x0);
- if (err)
- break;
- }
-
- return err;
-}
-EXPORT_SYMBOL_GPL(m_can_init_ram);
-
int m_can_class_get_clocks(struct m_can_classdev *cdev)
{
int ret = 0;
@@ -2298,7 +2402,7 @@ int m_can_class_get_clocks(struct m_can_classdev *cdev)
cdev->cclk = devm_clk_get(cdev->dev, "cclk");
if (IS_ERR(cdev->hclk) || IS_ERR(cdev->cclk)) {
- dev_err(cdev->dev, "no clock found\n");
+ netdev_err(cdev->net, "no clock found\n");
ret = -ENODEV;
}
@@ -2306,6 +2410,42 @@ int m_can_class_get_clocks(struct m_can_classdev *cdev)
}
EXPORT_SYMBOL_GPL(m_can_class_get_clocks);
+static bool m_can_class_wakeup_pinctrl_enabled(struct m_can_classdev *class_dev)
+{
+ return device_may_wakeup(class_dev->dev) && class_dev->pinctrl_state_wakeup;
+}
+
+static int m_can_class_parse_pinctrl(struct m_can_classdev *class_dev)
+{
+ struct device *dev = class_dev->dev;
+ int ret;
+
+ class_dev->pinctrl = devm_pinctrl_get(dev);
+ if (IS_ERR(class_dev->pinctrl)) {
+ ret = PTR_ERR(class_dev->pinctrl);
+ class_dev->pinctrl = NULL;
+
+ if (ret == -ENODEV)
+ return 0;
+
+ return dev_err_probe(dev, ret, "Failed to get pinctrl\n");
+ }
+
+ class_dev->pinctrl_state_wakeup =
+ pinctrl_lookup_state(class_dev->pinctrl, "wakeup");
+ if (IS_ERR(class_dev->pinctrl_state_wakeup)) {
+ ret = PTR_ERR(class_dev->pinctrl_state_wakeup);
+ class_dev->pinctrl_state_wakeup = NULL;
+
+ if (ret == -ENODEV)
+ return 0;
+
+ return dev_err_probe(dev, ret, "Failed to lookup pinctrl wakeup state\n");
+ }
+
+ return 0;
+}
+
struct m_can_classdev *m_can_class_allocate_dev(struct device *dev,
int sizeof_priv)
{
@@ -2321,9 +2461,12 @@ struct m_can_classdev *m_can_class_allocate_dev(struct device *dev,
sizeof(mram_config_vals) / 4);
if (ret) {
dev_err(dev, "Could not get Message RAM configuration.");
- goto out;
+ return ERR_PTR(ret);
}
+ if (dev->of_node && of_property_read_bool(dev->of_node, "wakeup-source"))
+ device_set_wakeup_capable(dev, true);
+
/* Get TX FIFO size
* Defines the total amount of echo buffers for loopback
*/
@@ -2333,7 +2476,7 @@ struct m_can_classdev *m_can_class_allocate_dev(struct device *dev,
net_dev = alloc_candev(sizeof_priv, tx_fifo_size);
if (!net_dev) {
dev_err(dev, "Failed to allocate CAN device");
- goto out;
+ return ERR_PTR(-ENOMEM);
}
class_dev = netdev_priv(net_dev);
@@ -2342,8 +2485,17 @@ struct m_can_classdev *m_can_class_allocate_dev(struct device *dev,
SET_NETDEV_DEV(net_dev, dev);
m_can_of_parse_mram(class_dev, mram_config_vals);
-out:
+ spin_lock_init(&class_dev->tx_handling_spinlock);
+
+ ret = m_can_class_parse_pinctrl(class_dev);
+ if (ret)
+ goto err_free_candev;
+
return class_dev;
+
+err_free_candev:
+ free_candev(net_dev);
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(m_can_class_allocate_dev);
@@ -2364,31 +2516,37 @@ int m_can_class_register(struct m_can_classdev *cdev)
devm_kzalloc(cdev->dev,
cdev->tx_fifo_size * sizeof(*cdev->tx_ops),
GFP_KERNEL);
- if (!cdev->tx_ops) {
- dev_err(cdev->dev, "Failed to allocate tx_ops for workqueue\n");
+ if (!cdev->tx_ops)
return -ENOMEM;
- }
}
+ cdev->rst = devm_reset_control_get_optional_shared(cdev->dev, NULL);
+ if (IS_ERR(cdev->rst))
+ return dev_err_probe(cdev->dev, PTR_ERR(cdev->rst),
+ "Failed to get reset line\n");
+
ret = m_can_clk_start(cdev);
if (ret)
return ret;
+ ret = reset_control_deassert(cdev->rst);
+ if (ret)
+ goto clk_disable;
+
if (cdev->is_peripheral) {
ret = can_rx_offload_add_manual(cdev->net, &cdev->offload,
NAPI_POLL_WEIGHT);
if (ret)
- goto clk_disable;
+ goto out_reset_control_assert;
}
if (!cdev->net->irq) {
- dev_dbg(cdev->dev, "Polling enabled, initialize hrtimer");
- hrtimer_init(&cdev->hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- cdev->hrtimer.function = &hrtimer_callback;
+ netdev_dbg(cdev->net, "Polling enabled, initialize hrtimer");
+ hrtimer_setup(&cdev->hrtimer, m_can_polling_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
} else {
- hrtimer_init(&cdev->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- cdev->hrtimer.function = m_can_coalescing_timer;
+ hrtimer_setup(&cdev->hrtimer, m_can_coalescing_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
ret = m_can_dev_setup(cdev);
@@ -2397,19 +2555,21 @@ int m_can_class_register(struct m_can_classdev *cdev)
ret = register_m_can_dev(cdev);
if (ret) {
- dev_err(cdev->dev, "registering %s failed (err=%d)\n",
- cdev->net->name, ret);
+ netdev_err(cdev->net, "registering %s failed (err=%d)\n",
+ cdev->net->name, ret);
goto rx_offload_del;
}
of_can_transceiver(cdev->net);
- dev_info(cdev->dev, "%s device registered (irq=%d, version=%d)\n",
- KBUILD_MODNAME, cdev->net->irq, cdev->version);
+ netdev_info(cdev->net, "device registered (irq=%d, version=%d)\n",
+ cdev->net->irq, cdev->version);
/* Probe finished
- * Stop clocks. They will be reactivated once the M_CAN device is opened
+ * Assert reset and stop clocks.
+ * They will be reactivated once the M_CAN device is opened
*/
+ reset_control_assert(cdev->rst);
m_can_clk_stop(cdev);
return 0;
@@ -2417,6 +2577,8 @@ int m_can_class_register(struct m_can_classdev *cdev)
rx_offload_del:
if (cdev->is_peripheral)
can_rx_offload_del(&cdev->offload);
+out_reset_control_assert:
+ reset_control_assert(cdev->rst);
clk_disable:
m_can_clk_stop(cdev);
@@ -2426,9 +2588,9 @@ EXPORT_SYMBOL_GPL(m_can_class_register);
void m_can_class_unregister(struct m_can_classdev *cdev)
{
+ unregister_candev(cdev->net);
if (cdev->is_peripheral)
can_rx_offload_del(&cdev->offload);
- unregister_candev(cdev->net);
}
EXPORT_SYMBOL_GPL(m_can_class_unregister);
@@ -2436,6 +2598,7 @@ int m_can_class_suspend(struct device *dev)
{
struct m_can_classdev *cdev = dev_get_drvdata(dev);
struct net_device *ndev = cdev->net;
+ int ret = 0;
if (netif_running(ndev)) {
netif_stop_queue(ndev);
@@ -2448,18 +2611,21 @@ int m_can_class_suspend(struct device *dev)
if (cdev->pm_wake_source) {
hrtimer_cancel(&cdev->hrtimer);
m_can_write(cdev, M_CAN_IE, IR_RF0N);
+
+ if (cdev->ops->deinit)
+ ret = cdev->ops->deinit(cdev);
} else {
m_can_stop(ndev);
}
m_can_clk_stop(cdev);
+ cdev->can.state = CAN_STATE_SLEEPING;
}
- pinctrl_pm_select_sleep_state(dev);
-
- cdev->can.state = CAN_STATE_SLEEPING;
+ if (!m_can_class_wakeup_pinctrl_enabled(cdev))
+ pinctrl_pm_select_sleep_state(dev);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(m_can_class_suspend);
@@ -2467,14 +2633,12 @@ int m_can_class_resume(struct device *dev)
{
struct m_can_classdev *cdev = dev_get_drvdata(dev);
struct net_device *ndev = cdev->net;
+ int ret = 0;
- pinctrl_pm_select_default_state(dev);
-
- cdev->can.state = CAN_STATE_ERROR_ACTIVE;
+ if (!m_can_class_wakeup_pinctrl_enabled(cdev))
+ pinctrl_pm_select_default_state(dev);
if (netif_running(ndev)) {
- int ret;
-
ret = m_can_clk_start(cdev);
if (ret)
return ret;
@@ -2487,6 +2651,12 @@ int m_can_class_resume(struct device *dev)
* again.
*/
cdev->active_interrupts |= IR_RF0N | IR_TEFN;
+
+ if (cdev->ops->init)
+ ret = cdev->ops->init(cdev);
+
+ cdev->can.state = m_can_state_get_by_psr(cdev);
+
m_can_write(cdev, M_CAN_IE, cdev->active_interrupts);
} else {
ret = m_can_start(ndev);
@@ -2500,11 +2670,11 @@ int m_can_class_resume(struct device *dev)
netif_start_queue(ndev);
}
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(m_can_class_resume);
-MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>");
+MODULE_AUTHOR("Dong Aisheng <aisheng.dong@nxp.com>");
MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller");
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index 92b2bd8628e6..4743342b2fba 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -68,6 +68,7 @@ struct m_can_ops {
int (*write_fifo)(struct m_can_classdev *cdev, int addr_offset,
const void *val, size_t val_count);
int (*init)(struct m_can_classdev *cdev);
+ int (*deinit)(struct m_can_classdev *cdev);
};
struct m_can_tx_op {
@@ -85,6 +86,7 @@ struct m_can_classdev {
struct device *dev;
struct clk *hclk;
struct clk *cclk;
+ struct reset_control *rst;
struct workqueue_struct *tx_wq;
struct phy *transceiver;
@@ -99,6 +101,7 @@ struct m_can_classdev {
int pm_clock_support;
int pm_wake_source;
int is_peripheral;
+ bool irq_edge_triggered;
// Cached M_CAN_IE register content
u32 active_interrupts;
@@ -126,6 +129,9 @@ struct m_can_classdev {
struct mram_cfg mcfg[MRAM_CFG_NUM];
struct hrtimer hrtimer;
+
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pinctrl_state_wakeup;
};
struct m_can_classdev *m_can_class_allocate_dev(struct device *dev, int sizeof_priv);
@@ -133,7 +139,6 @@ void m_can_class_free_dev(struct net_device *net);
int m_can_class_register(struct m_can_classdev *cdev);
void m_can_class_unregister(struct m_can_classdev *cdev);
int m_can_class_get_clocks(struct m_can_classdev *cdev);
-int m_can_init_ram(struct m_can_classdev *priv);
int m_can_check_mram_cfg(struct m_can_classdev *cdev, u32 mram_max_size);
int m_can_class_suspend(struct device *dev);
diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c
index d72fe771dfc7..eb31ed1f9644 100644
--- a/drivers/net/can/m_can/m_can_pci.c
+++ b/drivers/net/can/m_can/m_can_pci.c
@@ -111,8 +111,8 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
mcan_class = m_can_class_allocate_dev(&pci->dev,
sizeof(struct m_can_pci_priv));
- if (!mcan_class)
- return -ENOMEM;
+ if (IS_ERR(mcan_class))
+ return PTR_ERR(mcan_class);
priv = cdev_to_priv(mcan_class);
@@ -127,6 +127,7 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
mcan_class->pm_clock_support = 1;
mcan_class->pm_wake_source = 0;
mcan_class->can.clock.freq = id->driver_data;
+ mcan_class->irq_edge_triggered = true;
mcan_class->ops = &m_can_pci_ops;
pci_set_drvdata(pci, mcan_class);
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index b832566efda0..56da411878af 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// IOMapped CAN bus driver for Bosch M_CAN controller
// Copyright (C) 2014 Freescale Semiconductor, Inc.
-// Dong Aisheng <b29396@freescale.com>
+// Dong Aisheng <aisheng.dong@nxp.com>
//
// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
@@ -87,8 +87,8 @@ static int m_can_plat_probe(struct platform_device *pdev)
mcan_class = m_can_class_allocate_dev(&pdev->dev,
sizeof(struct m_can_plat_priv));
- if (!mcan_class)
- return -ENOMEM;
+ if (IS_ERR(mcan_class))
+ return PTR_ERR(mcan_class);
priv = cdev_to_priv(mcan_class);
@@ -180,7 +180,7 @@ static void m_can_plat_remove(struct platform_device *pdev)
struct m_can_classdev *mcan_class = &priv->cdev;
m_can_class_unregister(mcan_class);
-
+ pm_runtime_disable(mcan_class->dev);
m_can_class_free_dev(mcan_class->net);
}
@@ -236,7 +236,7 @@ static struct platform_driver m_can_plat_driver = {
module_platform_driver(m_can_plat_driver);
-MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>");
+MODULE_AUTHOR("Dong Aisheng <aisheng.dong@nxp.com>");
MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("M_CAN driver for IO Mapped Bosch controllers");
diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c
index 2f73bf3abad8..31cc9d0abd45 100644
--- a/drivers/net/can/m_can/tcan4x5x-core.c
+++ b/drivers/net/can/m_can/tcan4x5x-core.c
@@ -92,6 +92,8 @@
#define TCAN4X5X_MODE_STANDBY BIT(6)
#define TCAN4X5X_MODE_NORMAL BIT(7)
+#define TCAN4X5X_NWKRQ_VOLTAGE_VIO BIT(19)
+
#define TCAN4X5X_DISABLE_WAKE_MSK (BIT(31) | BIT(30))
#define TCAN4X5X_DISABLE_INH_MSK BIT(9)
@@ -267,9 +269,24 @@ static int tcan4x5x_init(struct m_can_classdev *cdev)
if (ret)
return ret;
+ if (tcan4x5x->nwkrq_voltage_vio) {
+ ret = regmap_set_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG,
+ TCAN4X5X_NWKRQ_VOLTAGE_VIO);
+ if (ret)
+ return ret;
+ }
+
return ret;
}
+static int tcan4x5x_deinit(struct m_can_classdev *cdev)
+{
+ struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
+
+ return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG,
+ TCAN4X5X_MODE_SEL_MASK, TCAN4X5X_MODE_STANDBY);
+};
+
static int tcan4x5x_disable_wake(struct m_can_classdev *cdev)
{
struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
@@ -318,21 +335,27 @@ static const struct tcan4x5x_version_info
return &tcan4x5x_versions[TCAN4X5X];
}
-static int tcan4x5x_get_gpios(struct m_can_classdev *cdev,
- const struct tcan4x5x_version_info *version_info)
+static void tcan4x5x_get_dt_data(struct m_can_classdev *cdev)
+{
+ struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
+
+ tcan4x5x->nwkrq_voltage_vio =
+ of_property_read_bool(cdev->dev->of_node, "ti,nwkrq-voltage-vio");
+}
+
+static int tcan4x5x_get_gpios(struct m_can_classdev *cdev)
{
struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
int ret;
- if (version_info->has_wake_pin) {
- tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake",
- GPIOD_OUT_HIGH);
- if (IS_ERR(tcan4x5x->device_wake_gpio)) {
- if (PTR_ERR(tcan4x5x->device_wake_gpio) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ tcan4x5x->device_wake_gpio = devm_gpiod_get_optional(cdev->dev,
+ "device-wake",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(tcan4x5x->device_wake_gpio)) {
+ if (PTR_ERR(tcan4x5x->device_wake_gpio) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
- tcan4x5x_disable_wake(cdev);
- }
+ tcan4x5x->device_wake_gpio = NULL;
}
tcan4x5x->reset_gpio = devm_gpiod_get_optional(cdev->dev, "reset",
@@ -344,14 +367,31 @@ static int tcan4x5x_get_gpios(struct m_can_classdev *cdev,
if (ret)
return ret;
- if (version_info->has_state_pin) {
- tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev,
- "device-state",
- GPIOD_IN);
- if (IS_ERR(tcan4x5x->device_state_gpio)) {
- tcan4x5x->device_state_gpio = NULL;
- tcan4x5x_disable_state(cdev);
- }
+ tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev,
+ "device-state",
+ GPIOD_IN);
+ if (IS_ERR(tcan4x5x->device_state_gpio))
+ tcan4x5x->device_state_gpio = NULL;
+
+ return 0;
+}
+
+static int tcan4x5x_check_gpios(struct m_can_classdev *cdev,
+ const struct tcan4x5x_version_info *version_info)
+{
+ struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
+ int ret;
+
+ if (version_info->has_wake_pin && !tcan4x5x->device_wake_gpio) {
+ ret = tcan4x5x_disable_wake(cdev);
+ if (ret)
+ return ret;
+ }
+
+ if (version_info->has_state_pin && !tcan4x5x->device_state_gpio) {
+ ret = tcan4x5x_disable_state(cdev);
+ if (ret)
+ return ret;
}
return 0;
@@ -359,6 +399,7 @@ static int tcan4x5x_get_gpios(struct m_can_classdev *cdev,
static const struct m_can_ops tcan4x5x_ops = {
.init = tcan4x5x_init,
+ .deinit = tcan4x5x_deinit,
.read_reg = tcan4x5x_read_reg,
.write_reg = tcan4x5x_write_reg,
.write_fifo = tcan4x5x_write_fifo,
@@ -375,8 +416,8 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
mcan_class = m_can_class_allocate_dev(&spi->dev,
sizeof(struct tcan4x5x_priv));
- if (!mcan_class)
- return -ENOMEM;
+ if (IS_ERR(mcan_class))
+ return PTR_ERR(mcan_class);
ret = m_can_check_mram_cfg(mcan_class, TCAN4X5X_MRAM_SIZE);
if (ret)
@@ -385,14 +426,15 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
priv = cdev_to_priv(mcan_class);
priv->power = devm_regulator_get_optional(&spi->dev, "vsup");
- if (PTR_ERR(priv->power) == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
- goto out_m_can_class_free_dev;
- } else {
+ if (IS_ERR(priv->power)) {
+ if (PTR_ERR(priv->power) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto out_m_can_class_free_dev;
+ }
priv->power = NULL;
}
- m_can_class_get_clocks(mcan_class);
+ mcan_class->cclk = devm_clk_get(mcan_class->dev, "cclk");
if (IS_ERR(mcan_class->cclk)) {
dev_err(&spi->dev, "no CAN clock source defined\n");
freq = TCAN4X5X_EXT_CLK_DEF;
@@ -441,18 +483,26 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
goto out_m_can_class_free_dev;
}
+ ret = tcan4x5x_get_gpios(mcan_class);
+ if (ret) {
+ dev_err(&spi->dev, "Getting gpios failed %pe\n", ERR_PTR(ret));
+ goto out_power;
+ }
+
version_info = tcan4x5x_find_version(priv);
if (IS_ERR(version_info)) {
ret = PTR_ERR(version_info);
goto out_power;
}
- ret = tcan4x5x_get_gpios(mcan_class, version_info);
+ ret = tcan4x5x_check_gpios(mcan_class, version_info);
if (ret) {
- dev_err(&spi->dev, "Getting gpios failed %pe\n", ERR_PTR(ret));
+ dev_err(&spi->dev, "Checking gpios failed %pe\n", ERR_PTR(ret));
goto out_power;
}
+ tcan4x5x_get_dt_data(mcan_class);
+
tcan4x5x_check_wake(priv);
ret = tcan4x5x_write_tcan_reg(mcan_class, TCAN4X5X_INT_EN, 0);
diff --git a/drivers/net/can/m_can/tcan4x5x.h b/drivers/net/can/m_can/tcan4x5x.h
index e62c030d3e1e..203399d5e8cc 100644
--- a/drivers/net/can/m_can/tcan4x5x.h
+++ b/drivers/net/can/m_can/tcan4x5x.h
@@ -42,6 +42,8 @@ struct tcan4x5x_priv {
struct tcan4x5x_map_buf map_buf_rx;
struct tcan4x5x_map_buf map_buf_tx;
+
+ bool nwkrq_voltage_vio;
};
static inline void
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 8c2a7bc64d3d..39c7aa2a0b2f 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -607,7 +607,6 @@ static const struct net_device_ops mscan_netdev_ops = {
.ndo_open = mscan_open,
.ndo_stop = mscan_close,
.ndo_start_xmit = mscan_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops mscan_ethtool_ops = {
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index 28f3fd805273..06cb2629f66a 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
- * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
*
- * Copyright (C) 2016 PEAK System-Technik GmbH
+ * Copyright (C) 2016-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#include <linux/can.h>
@@ -624,7 +624,7 @@ static int peak_canfd_set_data_bittiming(struct net_device *ndev)
{
struct peak_canfd_priv *priv = netdev_priv(ndev);
- return pucan_set_timing_fast(priv, &priv->can.data_bittiming);
+ return pucan_set_timing_fast(priv, &priv->can.fd.data_bittiming);
}
static int peak_canfd_close(struct net_device *ndev)
@@ -743,37 +743,33 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static int peak_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+static int peak_eth_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config hwts_cfg = { 0 };
+ config->tx_type = HWTSTAMP_TX_OFF;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
- switch (cmd) {
- case SIOCSHWTSTAMP: /* set */
- if (copy_from_user(&hwts_cfg, ifr->ifr_data, sizeof(hwts_cfg)))
- return -EFAULT;
- if (hwts_cfg.tx_type == HWTSTAMP_TX_OFF &&
- hwts_cfg.rx_filter == HWTSTAMP_FILTER_ALL)
- return 0;
- return -ERANGE;
+ return 0;
+}
- case SIOCGHWTSTAMP: /* get */
- hwts_cfg.tx_type = HWTSTAMP_TX_OFF;
- hwts_cfg.rx_filter = HWTSTAMP_FILTER_ALL;
- if (copy_to_user(ifr->ifr_data, &hwts_cfg, sizeof(hwts_cfg)))
- return -EFAULT;
+static int peak_eth_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ if (config->tx_type == HWTSTAMP_TX_OFF &&
+ config->rx_filter == HWTSTAMP_FILTER_ALL)
return 0;
- default:
- return -EOPNOTSUPP;
- }
+ NL_SET_ERR_MSG_MOD(extack, "Only RX HWTSTAMP_FILTER_ALL is supported");
+ return -ERANGE;
}
static const struct net_device_ops peak_canfd_netdev_ops = {
.ndo_open = peak_canfd_open,
.ndo_stop = peak_canfd_close,
- .ndo_eth_ioctl = peak_eth_ioctl,
.ndo_start_xmit = peak_canfd_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+ .ndo_hwtstamp_get = peak_eth_hwtstamp_get,
+ .ndo_hwtstamp_set = peak_eth_hwtstamp_set,
};
static int peak_get_ts_info(struct net_device *dev,
@@ -813,12 +809,12 @@ struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index,
/* complete now socket-can initialization side */
priv->can.state = CAN_STATE_STOPPED;
priv->can.bittiming_const = &peak_canfd_nominal_const;
- priv->can.data_bittiming_const = &peak_canfd_data_const;
+ priv->can.fd.data_bittiming_const = &peak_canfd_data_const;
priv->can.do_set_mode = peak_canfd_set_mode;
priv->can.do_get_berr_counter = peak_canfd_get_berr_counter;
priv->can.do_set_bittiming = peak_canfd_set_bittiming;
- priv->can.do_set_data_bittiming = peak_canfd_set_data_bittiming;
+ priv->can.fd.do_set_data_bittiming = peak_canfd_set_data_bittiming;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_3_SAMPLES |
diff --git a/drivers/net/can/peak_canfd/peak_canfd_user.h b/drivers/net/can/peak_canfd/peak_canfd_user.h
index a72719dc3b74..60c6542028cf 100644
--- a/drivers/net/can/peak_canfd/peak_canfd_user.h
+++ b/drivers/net/can/peak_canfd/peak_canfd_user.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* CAN driver for PEAK System micro-CAN based adapters
*
- * Copyright (C) 2003-2011 PEAK System-Technik GmbH
- * Copyright (C) 2011-2013 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#ifndef PEAK_CANFD_USER_H
#define PEAK_CANFD_USER_H
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index 1df3c4b54f03..93558e33bc02 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -1,10 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
- * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
*
* Derived from the PCAN project file driver/src/pcan_pci.c:
*
- * Copyright (C) 2001-2006 PEAK System-Technik GmbH
+ * Copyright (C) 2001-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#include <linux/kernel.h>
@@ -19,7 +19,7 @@
#include "peak_canfd_user.h"
-MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
+MODULE_AUTHOR("Stéphane Grosjean <stephane.grosjean@hms-networks.com>");
MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe/M.2 FD family cards");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 2b7dd359f27b..fc3df328e877 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -5,6 +5,8 @@
* Copyright (C) 2013 Renesas Solutions Corp.
*/
+#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -16,6 +18,7 @@
#include <linux/can/dev.h>
#include <linux/clk.h>
#include <linux/of.h>
+#include <linux/pm_runtime.h>
#define RCAR_CAN_DRV_NAME "rcar_can"
@@ -92,7 +95,6 @@ struct rcar_can_priv {
struct net_device *ndev;
struct napi_struct napi;
struct rcar_can_regs __iomem *regs;
- struct clk *clk;
struct clk *can_clk;
u32 tx_head;
u32 tx_tail;
@@ -113,100 +115,102 @@ static const struct can_bittiming_const rcar_can_bittiming_const = {
};
/* Control Register bits */
-#define RCAR_CAN_CTLR_BOM (3 << 11) /* Bus-Off Recovery Mode Bits */
-#define RCAR_CAN_CTLR_BOM_ENT (1 << 11) /* Entry to halt mode */
- /* at bus-off entry */
-#define RCAR_CAN_CTLR_SLPM (1 << 10)
-#define RCAR_CAN_CTLR_CANM (3 << 8) /* Operating Mode Select Bit */
-#define RCAR_CAN_CTLR_CANM_HALT (1 << 9)
-#define RCAR_CAN_CTLR_CANM_RESET (1 << 8)
-#define RCAR_CAN_CTLR_CANM_FORCE_RESET (3 << 8)
-#define RCAR_CAN_CTLR_MLM (1 << 3) /* Message Lost Mode Select */
-#define RCAR_CAN_CTLR_IDFM (3 << 1) /* ID Format Mode Select Bits */
-#define RCAR_CAN_CTLR_IDFM_MIXED (1 << 2) /* Mixed ID mode */
-#define RCAR_CAN_CTLR_MBM (1 << 0) /* Mailbox Mode select */
+#define RCAR_CAN_CTLR_BOM GENMASK(12, 11) /* Bus-Off Recovery Mode Bits */
+#define RCAR_CAN_CTLR_BOM_ENT 1 /* Entry to halt mode */
+ /* at bus-off entry */
+#define RCAR_CAN_CTLR_SLPM BIT(10) /* Sleep Mode */
+#define RCAR_CAN_CTLR_CANM GENMASK(9, 8) /* Operating Mode Select Bit */
+#define RCAR_CAN_CTLR_CANM_OPER 0 /* Operation Mode */
+#define RCAR_CAN_CTLR_CANM_RESET 1 /* Reset Mode */
+#define RCAR_CAN_CTLR_CANM_HALT 2 /* Halt Mode */
+#define RCAR_CAN_CTLR_CANM_FORCE_RESET 3 /* Reset Mode (forcible) */
+#define RCAR_CAN_CTLR_MLM BIT(3) /* Message Lost Mode Select */
+#define RCAR_CAN_CTLR_IDFM GENMASK(2, 1) /* ID Format Mode Select Bits */
+#define RCAR_CAN_CTLR_IDFM_STD 0 /* Standard ID mode */
+#define RCAR_CAN_CTLR_IDFM_EXT 1 /* Extended ID mode */
+#define RCAR_CAN_CTLR_IDFM_MIXED 2 /* Mixed ID mode */
+#define RCAR_CAN_CTLR_MBM BIT(0) /* Mailbox Mode select */
/* Status Register bits */
-#define RCAR_CAN_STR_RSTST (1 << 8) /* Reset Status Bit */
+#define RCAR_CAN_STR_RSTST BIT(8) /* Reset Status Bit */
/* FIFO Received ID Compare Registers 0 and 1 bits */
-#define RCAR_CAN_FIDCR_IDE (1 << 31) /* ID Extension Bit */
-#define RCAR_CAN_FIDCR_RTR (1 << 30) /* Remote Transmission Request Bit */
+#define RCAR_CAN_FIDCR_IDE BIT(31) /* ID Extension Bit */
+#define RCAR_CAN_FIDCR_RTR BIT(30) /* Remote Transmission Request Bit */
/* Receive FIFO Control Register bits */
-#define RCAR_CAN_RFCR_RFEST (1 << 7) /* Receive FIFO Empty Status Flag */
-#define RCAR_CAN_RFCR_RFE (1 << 0) /* Receive FIFO Enable */
+#define RCAR_CAN_RFCR_RFEST BIT(7) /* Receive FIFO Empty Status Flag */
+#define RCAR_CAN_RFCR_RFE BIT(0) /* Receive FIFO Enable */
/* Transmit FIFO Control Register bits */
-#define RCAR_CAN_TFCR_TFUST (7 << 1) /* Transmit FIFO Unsent Message */
- /* Number Status Bits */
-#define RCAR_CAN_TFCR_TFUST_SHIFT 1 /* Offset of Transmit FIFO Unsent */
- /* Message Number Status Bits */
-#define RCAR_CAN_TFCR_TFE (1 << 0) /* Transmit FIFO Enable */
-
-#define RCAR_CAN_N_RX_MKREGS1 2 /* Number of mask registers */
- /* for Rx mailboxes 0-31 */
+#define RCAR_CAN_TFCR_TFUST GENMASK(3, 1) /* Transmit FIFO Unsent Message */
+ /* Number Status Bits */
+#define RCAR_CAN_TFCR_TFE BIT(0) /* Transmit FIFO Enable */
+
+#define RCAR_CAN_N_RX_MKREGS1 2 /* Number of mask registers */
+ /* for Rx mailboxes 0-31 */
#define RCAR_CAN_N_RX_MKREGS2 8
/* Bit Configuration Register settings */
-#define RCAR_CAN_BCR_TSEG1(x) (((x) & 0x0f) << 20)
-#define RCAR_CAN_BCR_BPR(x) (((x) & 0x3ff) << 8)
-#define RCAR_CAN_BCR_SJW(x) (((x) & 0x3) << 4)
-#define RCAR_CAN_BCR_TSEG2(x) ((x) & 0x07)
+#define RCAR_CAN_BCR_TSEG1 GENMASK(23, 20)
+#define RCAR_CAN_BCR_BRP GENMASK(17, 8)
+#define RCAR_CAN_BCR_SJW GENMASK(5, 4)
+#define RCAR_CAN_BCR_TSEG2 GENMASK(2, 0)
/* Mailbox and Mask Registers bits */
-#define RCAR_CAN_IDE (1 << 31)
-#define RCAR_CAN_RTR (1 << 30)
-#define RCAR_CAN_SID_SHIFT 18
+#define RCAR_CAN_IDE BIT(31) /* ID Extension */
+#define RCAR_CAN_RTR BIT(30) /* Remote Transmission Request */
+#define RCAR_CAN_SID GENMASK(28, 18) /* Standard ID */
+#define RCAR_CAN_EID GENMASK(28, 0) /* Extended ID */
/* Mailbox Interrupt Enable Register 1 bits */
-#define RCAR_CAN_MIER1_RXFIE (1 << 28) /* Receive FIFO Interrupt Enable */
-#define RCAR_CAN_MIER1_TXFIE (1 << 24) /* Transmit FIFO Interrupt Enable */
+#define RCAR_CAN_MIER1_RXFIE BIT(28) /* Receive FIFO Interrupt Enable */
+#define RCAR_CAN_MIER1_TXFIE BIT(24) /* Transmit FIFO Interrupt Enable */
/* Interrupt Enable Register bits */
-#define RCAR_CAN_IER_ERSIE (1 << 5) /* Error (ERS) Interrupt Enable Bit */
-#define RCAR_CAN_IER_RXFIE (1 << 4) /* Reception FIFO Interrupt */
- /* Enable Bit */
-#define RCAR_CAN_IER_TXFIE (1 << 3) /* Transmission FIFO Interrupt */
- /* Enable Bit */
+#define RCAR_CAN_IER_ERSIE BIT(5) /* Error (ERS) Interrupt Enable Bit */
+#define RCAR_CAN_IER_RXFIE BIT(4) /* Reception FIFO Interrupt */
+ /* Enable Bit */
+#define RCAR_CAN_IER_TXFIE BIT(3) /* Transmission FIFO Interrupt */
+ /* Enable Bit */
/* Interrupt Status Register bits */
-#define RCAR_CAN_ISR_ERSF (1 << 5) /* Error (ERS) Interrupt Status Bit */
-#define RCAR_CAN_ISR_RXFF (1 << 4) /* Reception FIFO Interrupt */
- /* Status Bit */
-#define RCAR_CAN_ISR_TXFF (1 << 3) /* Transmission FIFO Interrupt */
- /* Status Bit */
+#define RCAR_CAN_ISR_ERSF BIT(5) /* Error (ERS) Interrupt Status Bit */
+#define RCAR_CAN_ISR_RXFF BIT(4) /* Reception FIFO Interrupt */
+ /* Status Bit */
+#define RCAR_CAN_ISR_TXFF BIT(3) /* Transmission FIFO Interrupt */
+ /* Status Bit */
/* Error Interrupt Enable Register bits */
-#define RCAR_CAN_EIER_BLIE (1 << 7) /* Bus Lock Interrupt Enable */
-#define RCAR_CAN_EIER_OLIE (1 << 6) /* Overload Frame Transmit */
- /* Interrupt Enable */
-#define RCAR_CAN_EIER_ORIE (1 << 5) /* Receive Overrun Interrupt Enable */
-#define RCAR_CAN_EIER_BORIE (1 << 4) /* Bus-Off Recovery Interrupt Enable */
-#define RCAR_CAN_EIER_BOEIE (1 << 3) /* Bus-Off Entry Interrupt Enable */
-#define RCAR_CAN_EIER_EPIE (1 << 2) /* Error Passive Interrupt Enable */
-#define RCAR_CAN_EIER_EWIE (1 << 1) /* Error Warning Interrupt Enable */
-#define RCAR_CAN_EIER_BEIE (1 << 0) /* Bus Error Interrupt Enable */
+#define RCAR_CAN_EIER_BLIE BIT(7) /* Bus Lock Interrupt Enable */
+#define RCAR_CAN_EIER_OLIE BIT(6) /* Overload Frame Transmit */
+ /* Interrupt Enable */
+#define RCAR_CAN_EIER_ORIE BIT(5) /* Receive Overrun Interrupt Enable */
+#define RCAR_CAN_EIER_BORIE BIT(4) /* Bus-Off Recovery Interrupt Enable */
+#define RCAR_CAN_EIER_BOEIE BIT(3) /* Bus-Off Entry Interrupt Enable */
+#define RCAR_CAN_EIER_EPIE BIT(2) /* Error Passive Interrupt Enable */
+#define RCAR_CAN_EIER_EWIE BIT(1) /* Error Warning Interrupt Enable */
+#define RCAR_CAN_EIER_BEIE BIT(0) /* Bus Error Interrupt Enable */
/* Error Interrupt Factor Judge Register bits */
-#define RCAR_CAN_EIFR_BLIF (1 << 7) /* Bus Lock Detect Flag */
-#define RCAR_CAN_EIFR_OLIF (1 << 6) /* Overload Frame Transmission */
- /* Detect Flag */
-#define RCAR_CAN_EIFR_ORIF (1 << 5) /* Receive Overrun Detect Flag */
-#define RCAR_CAN_EIFR_BORIF (1 << 4) /* Bus-Off Recovery Detect Flag */
-#define RCAR_CAN_EIFR_BOEIF (1 << 3) /* Bus-Off Entry Detect Flag */
-#define RCAR_CAN_EIFR_EPIF (1 << 2) /* Error Passive Detect Flag */
-#define RCAR_CAN_EIFR_EWIF (1 << 1) /* Error Warning Detect Flag */
-#define RCAR_CAN_EIFR_BEIF (1 << 0) /* Bus Error Detect Flag */
+#define RCAR_CAN_EIFR_BLIF BIT(7) /* Bus Lock Detect Flag */
+#define RCAR_CAN_EIFR_OLIF BIT(6) /* Overload Frame Transmission */
+ /* Detect Flag */
+#define RCAR_CAN_EIFR_ORIF BIT(5) /* Receive Overrun Detect Flag */
+#define RCAR_CAN_EIFR_BORIF BIT(4) /* Bus-Off Recovery Detect Flag */
+#define RCAR_CAN_EIFR_BOEIF BIT(3) /* Bus-Off Entry Detect Flag */
+#define RCAR_CAN_EIFR_EPIF BIT(2) /* Error Passive Detect Flag */
+#define RCAR_CAN_EIFR_EWIF BIT(1) /* Error Warning Detect Flag */
+#define RCAR_CAN_EIFR_BEIF BIT(0) /* Bus Error Detect Flag */
/* Error Code Store Register bits */
-#define RCAR_CAN_ECSR_EDPM (1 << 7) /* Error Display Mode Select Bit */
-#define RCAR_CAN_ECSR_ADEF (1 << 6) /* ACK Delimiter Error Flag */
-#define RCAR_CAN_ECSR_BE0F (1 << 5) /* Bit Error (dominant) Flag */
-#define RCAR_CAN_ECSR_BE1F (1 << 4) /* Bit Error (recessive) Flag */
-#define RCAR_CAN_ECSR_CEF (1 << 3) /* CRC Error Flag */
-#define RCAR_CAN_ECSR_AEF (1 << 2) /* ACK Error Flag */
-#define RCAR_CAN_ECSR_FEF (1 << 1) /* Form Error Flag */
-#define RCAR_CAN_ECSR_SEF (1 << 0) /* Stuff Error Flag */
+#define RCAR_CAN_ECSR_EDPM BIT(7) /* Error Display Mode Select Bit */
+#define RCAR_CAN_ECSR_ADEF BIT(6) /* ACK Delimiter Error Flag */
+#define RCAR_CAN_ECSR_BE0F BIT(5) /* Bit Error (dominant) Flag */
+#define RCAR_CAN_ECSR_BE1F BIT(4) /* Bit Error (recessive) Flag */
+#define RCAR_CAN_ECSR_CEF BIT(3) /* CRC Error Flag */
+#define RCAR_CAN_ECSR_AEF BIT(2) /* ACK Error Flag */
+#define RCAR_CAN_ECSR_FEF BIT(1) /* Form Error Flag */
+#define RCAR_CAN_ECSR_SEF BIT(0) /* Stuff Error Flag */
#define RCAR_CAN_NAPI_WEIGHT 4
#define MAX_STR_READS 0x100
@@ -248,35 +252,35 @@ static void rcar_can_error(struct net_device *ndev)
if (ecsr & RCAR_CAN_ECSR_ADEF) {
netdev_dbg(priv->ndev, "ACK Delimiter Error\n");
tx_errors++;
- writeb(~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr);
if (skb)
cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL;
}
if (ecsr & RCAR_CAN_ECSR_BE0F) {
netdev_dbg(priv->ndev, "Bit Error (dominant)\n");
tx_errors++;
- writeb(~RCAR_CAN_ECSR_BE0F, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_BE0F, &priv->regs->ecsr);
if (skb)
cf->data[2] |= CAN_ERR_PROT_BIT0;
}
if (ecsr & RCAR_CAN_ECSR_BE1F) {
netdev_dbg(priv->ndev, "Bit Error (recessive)\n");
tx_errors++;
- writeb(~RCAR_CAN_ECSR_BE1F, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_BE1F, &priv->regs->ecsr);
if (skb)
cf->data[2] |= CAN_ERR_PROT_BIT1;
}
if (ecsr & RCAR_CAN_ECSR_CEF) {
netdev_dbg(priv->ndev, "CRC Error\n");
rx_errors++;
- writeb(~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr);
if (skb)
cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
}
if (ecsr & RCAR_CAN_ECSR_AEF) {
netdev_dbg(priv->ndev, "ACK Error\n");
tx_errors++;
- writeb(~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr);
if (skb) {
cf->can_id |= CAN_ERR_ACK;
cf->data[3] = CAN_ERR_PROT_LOC_ACK;
@@ -285,14 +289,14 @@ static void rcar_can_error(struct net_device *ndev)
if (ecsr & RCAR_CAN_ECSR_FEF) {
netdev_dbg(priv->ndev, "Form Error\n");
rx_errors++;
- writeb(~RCAR_CAN_ECSR_FEF, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_FEF, &priv->regs->ecsr);
if (skb)
cf->data[2] |= CAN_ERR_PROT_FORM;
}
if (ecsr & RCAR_CAN_ECSR_SEF) {
netdev_dbg(priv->ndev, "Stuff Error\n");
rx_errors++;
- writeb(~RCAR_CAN_ECSR_SEF, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_SEF, &priv->regs->ecsr);
if (skb)
cf->data[2] |= CAN_ERR_PROT_STUFF;
}
@@ -300,14 +304,14 @@ static void rcar_can_error(struct net_device *ndev)
priv->can.can_stats.bus_error++;
ndev->stats.rx_errors += rx_errors;
ndev->stats.tx_errors += tx_errors;
- writeb(~RCAR_CAN_EIFR_BEIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_BEIF, &priv->regs->eifr);
}
if (eifr & RCAR_CAN_EIFR_EWIF) {
netdev_dbg(priv->ndev, "Error warning interrupt\n");
priv->can.state = CAN_STATE_ERROR_WARNING;
priv->can.can_stats.error_warning++;
/* Clear interrupt condition */
- writeb(~RCAR_CAN_EIFR_EWIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_EWIF, &priv->regs->eifr);
if (skb)
cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
@@ -317,7 +321,7 @@ static void rcar_can_error(struct net_device *ndev)
priv->can.state = CAN_STATE_ERROR_PASSIVE;
priv->can.can_stats.error_passive++;
/* Clear interrupt condition */
- writeb(~RCAR_CAN_EIFR_EPIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_EPIF, &priv->regs->eifr);
if (skb)
cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_PASSIVE :
CAN_ERR_CRTL_RX_PASSIVE;
@@ -329,7 +333,7 @@ static void rcar_can_error(struct net_device *ndev)
writeb(priv->ier, &priv->regs->ier);
priv->can.state = CAN_STATE_BUS_OFF;
/* Clear interrupt condition */
- writeb(~RCAR_CAN_EIFR_BOEIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_BOEIF, &priv->regs->eifr);
priv->can.can_stats.bus_off++;
can_bus_off(ndev);
if (skb)
@@ -343,7 +347,7 @@ static void rcar_can_error(struct net_device *ndev)
netdev_dbg(priv->ndev, "Receive overrun error interrupt\n");
ndev->stats.rx_over_errors++;
ndev->stats.rx_errors++;
- writeb(~RCAR_CAN_EIFR_ORIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_ORIF, &priv->regs->eifr);
if (skb) {
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
@@ -354,7 +358,7 @@ static void rcar_can_error(struct net_device *ndev)
"Overload Frame Transmission error interrupt\n");
ndev->stats.rx_over_errors++;
ndev->stats.rx_errors++;
- writeb(~RCAR_CAN_EIFR_OLIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_OLIF, &priv->regs->eifr);
if (skb) {
cf->can_id |= CAN_ERR_PROT;
cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
@@ -372,10 +376,9 @@ static void rcar_can_tx_done(struct net_device *ndev)
u8 isr;
while (1) {
- u8 unsent = readb(&priv->regs->tfcr);
+ u8 unsent = FIELD_GET(RCAR_CAN_TFCR_TFUST,
+ readb(&priv->regs->tfcr));
- unsent = (unsent & RCAR_CAN_TFCR_TFUST) >>
- RCAR_CAN_TFCR_TFUST_SHIFT;
if (priv->tx_head - priv->tx_tail <= unsent)
break;
stats->tx_packets++;
@@ -420,15 +423,16 @@ static irqreturn_t rcar_can_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void rcar_can_set_bittiming(struct net_device *dev)
+static void rcar_can_set_bittiming(struct net_device *ndev)
{
- struct rcar_can_priv *priv = netdev_priv(dev);
+ struct rcar_can_priv *priv = netdev_priv(ndev);
struct can_bittiming *bt = &priv->can.bittiming;
u32 bcr;
- bcr = RCAR_CAN_BCR_TSEG1(bt->phase_seg1 + bt->prop_seg - 1) |
- RCAR_CAN_BCR_BPR(bt->brp - 1) | RCAR_CAN_BCR_SJW(bt->sjw - 1) |
- RCAR_CAN_BCR_TSEG2(bt->phase_seg2 - 1);
+ bcr = FIELD_PREP(RCAR_CAN_BCR_TSEG1, bt->phase_seg1 + bt->prop_seg - 1) |
+ FIELD_PREP(RCAR_CAN_BCR_BRP, bt->brp - 1) |
+ FIELD_PREP(RCAR_CAN_BCR_SJW, bt->sjw - 1) |
+ FIELD_PREP(RCAR_CAN_BCR_TSEG2, bt->phase_seg2 - 1);
/* Don't overwrite CLKR with 32-bit BCR access; CLKR has 8-bit access.
* All the registers are big-endian but they get byte-swapped on 32-bit
* read/write (but not on 8-bit, contrary to the manuals)...
@@ -452,16 +456,17 @@ static void rcar_can_start(struct net_device *ndev)
ctlr &= ~RCAR_CAN_CTLR_SLPM;
writew(ctlr, &priv->regs->ctlr);
/* Go to reset mode */
- ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET;
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_CANM, RCAR_CAN_CTLR_CANM_FORCE_RESET);
writew(ctlr, &priv->regs->ctlr);
for (i = 0; i < MAX_STR_READS; i++) {
if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)
break;
}
rcar_can_set_bittiming(ndev);
- ctlr |= RCAR_CAN_CTLR_IDFM_MIXED; /* Select mixed ID mode */
- ctlr |= RCAR_CAN_CTLR_BOM_ENT; /* Entry to halt mode automatically */
- /* at bus-off */
+ /* Select mixed ID mode */
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_IDFM, RCAR_CAN_CTLR_IDFM_MIXED);
+ /* Entry to halt mode automatically at bus-off */
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_BOM, RCAR_CAN_CTLR_BOM_ENT);
ctlr |= RCAR_CAN_CTLR_MBM; /* Select FIFO mailbox mode */
ctlr |= RCAR_CAN_CTLR_MLM; /* Overrun mode */
writew(ctlr, &priv->regs->ctlr);
@@ -491,7 +496,9 @@ static void rcar_can_start(struct net_device *ndev)
priv->can.state = CAN_STATE_ERROR_ACTIVE;
/* Go to operation mode */
- writew(ctlr & ~RCAR_CAN_CTLR_CANM, &priv->regs->ctlr);
+ ctlr &= ~RCAR_CAN_CTLR_CANM;
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_CANM, RCAR_CAN_CTLR_CANM_OPER);
+ writew(ctlr, &priv->regs->ctlr);
for (i = 0; i < MAX_STR_READS; i++) {
if (!(readw(&priv->regs->str) & RCAR_CAN_STR_RSTST))
break;
@@ -506,29 +513,28 @@ static int rcar_can_open(struct net_device *ndev)
struct rcar_can_priv *priv = netdev_priv(ndev);
int err;
- err = clk_prepare_enable(priv->clk);
+ err = pm_runtime_resume_and_get(ndev->dev.parent);
if (err) {
- netdev_err(ndev,
- "failed to enable peripheral clock, error %d\n",
- err);
+ netdev_err(ndev, "pm_runtime_resume_and_get() failed %pe\n",
+ ERR_PTR(err));
goto out;
}
err = clk_prepare_enable(priv->can_clk);
if (err) {
- netdev_err(ndev, "failed to enable CAN clock, error %d\n",
- err);
- goto out_clock;
+ netdev_err(ndev, "failed to enable CAN clock: %pe\n",
+ ERR_PTR(err));
+ goto out_rpm;
}
err = open_candev(ndev);
if (err) {
- netdev_err(ndev, "open_candev() failed, error %d\n", err);
+ netdev_err(ndev, "open_candev() failed %pe\n", ERR_PTR(err));
goto out_can_clock;
}
napi_enable(&priv->napi);
err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev);
if (err) {
- netdev_err(ndev, "request_irq(%d) failed, error %d\n",
- ndev->irq, err);
+ netdev_err(ndev, "request_irq(%d) failed %pe\n", ndev->irq,
+ ERR_PTR(err));
goto out_close;
}
rcar_can_start(ndev);
@@ -539,8 +545,8 @@ out_close:
close_candev(ndev);
out_can_clock:
clk_disable_unprepare(priv->can_clk);
-out_clock:
- clk_disable_unprepare(priv->clk);
+out_rpm:
+ pm_runtime_put(ndev->dev.parent);
out:
return err;
}
@@ -553,7 +559,7 @@ static void rcar_can_stop(struct net_device *ndev)
/* Go to (force) reset mode */
ctlr = readw(&priv->regs->ctlr);
- ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET;
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_CANM, RCAR_CAN_CTLR_CANM_FORCE_RESET);
writew(ctlr, &priv->regs->ctlr);
for (i = 0; i < MAX_STR_READS; i++) {
if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)
@@ -578,7 +584,7 @@ static int rcar_can_close(struct net_device *ndev)
free_irq(ndev->irq, ndev);
napi_disable(&priv->napi);
clk_disable_unprepare(priv->can_clk);
- clk_disable_unprepare(priv->clk);
+ pm_runtime_put(ndev->dev.parent);
close_candev(ndev);
return 0;
}
@@ -594,9 +600,10 @@ static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */
- data = (cf->can_id & CAN_EFF_MASK) | RCAR_CAN_IDE;
+ data = FIELD_PREP(RCAR_CAN_EID, cf->can_id & CAN_EFF_MASK) |
+ RCAR_CAN_IDE;
else /* Standard frame format */
- data = (cf->can_id & CAN_SFF_MASK) << RCAR_CAN_SID_SHIFT;
+ data = FIELD_PREP(RCAR_CAN_SID, cf->can_id & CAN_SFF_MASK);
if (cf->can_id & CAN_RTR_FLAG) { /* Remote transmission request */
data |= RCAR_CAN_RTR;
@@ -628,7 +635,6 @@ static const struct net_device_ops rcar_can_netdev_ops = {
.ndo_open = rcar_can_open,
.ndo_stop = rcar_can_close,
.ndo_start_xmit = rcar_can_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops rcar_can_ethtool_ops = {
@@ -651,9 +657,9 @@ static void rcar_can_rx_pkt(struct rcar_can_priv *priv)
data = readl(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].id);
if (data & RCAR_CAN_IDE)
- cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ cf->can_id = FIELD_GET(RCAR_CAN_EID, data) | CAN_EFF_FLAG;
else
- cf->can_id = (data >> RCAR_CAN_SID_SHIFT) & CAN_SFF_MASK;
+ cf->can_id = FIELD_GET(RCAR_CAN_SID, data);
dlc = readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].dlc);
cf->len = can_cc_dlc2len(dlc);
@@ -715,18 +721,21 @@ static int rcar_can_do_set_mode(struct net_device *ndev, enum can_mode mode)
}
}
-static int rcar_can_get_berr_counter(const struct net_device *dev,
+static int rcar_can_get_berr_counter(const struct net_device *ndev,
struct can_berr_counter *bec)
{
- struct rcar_can_priv *priv = netdev_priv(dev);
+ struct rcar_can_priv *priv = netdev_priv(ndev);
int err;
- err = clk_prepare_enable(priv->clk);
+ err = pm_runtime_resume_and_get(ndev->dev.parent);
if (err)
return err;
+
bec->txerr = readb(&priv->regs->tecr);
bec->rxerr = readb(&priv->regs->recr);
- clk_disable_unprepare(priv->clk);
+
+ pm_runtime_put(ndev->dev.parent);
+
return 0;
}
@@ -738,6 +747,7 @@ static const char * const clock_names[] = {
static int rcar_can_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct rcar_can_priv *priv;
struct net_device *ndev;
void __iomem *addr;
@@ -745,7 +755,7 @@ static int rcar_can_probe(struct platform_device *pdev)
int err = -ENODEV;
int irq;
- of_property_read_u32(pdev->dev.of_node, "renesas,can-clock-select",
+ of_property_read_u32(dev->of_node, "renesas,can-clock-select",
&clock_select);
irq = platform_get_irq(pdev, 0);
@@ -762,30 +772,21 @@ static int rcar_can_probe(struct platform_device *pdev)
ndev = alloc_candev(sizeof(struct rcar_can_priv), RCAR_CAN_FIFO_DEPTH);
if (!ndev) {
- dev_err(&pdev->dev, "alloc_candev() failed\n");
err = -ENOMEM;
goto fail;
}
priv = netdev_priv(ndev);
- priv->clk = devm_clk_get(&pdev->dev, "clkp1");
- if (IS_ERR(priv->clk)) {
- err = PTR_ERR(priv->clk);
- dev_err(&pdev->dev, "cannot get peripheral clock, error %d\n",
- err);
- goto fail_clk;
- }
-
if (!(BIT(clock_select) & RCAR_SUPPORTED_CLOCKS)) {
err = -EINVAL;
- dev_err(&pdev->dev, "invalid CAN clock selected\n");
+ dev_err(dev, "invalid CAN clock selected\n");
goto fail_clk;
}
- priv->can_clk = devm_clk_get(&pdev->dev, clock_names[clock_select]);
+ priv->can_clk = devm_clk_get(dev, clock_names[clock_select]);
if (IS_ERR(priv->can_clk)) {
+ dev_err(dev, "cannot get CAN clock: %pe\n", priv->can_clk);
err = PTR_ERR(priv->can_clk);
- dev_err(&pdev->dev, "cannot get CAN clock, error %d\n", err);
goto fail_clk;
}
@@ -802,21 +803,24 @@ static int rcar_can_probe(struct platform_device *pdev)
priv->can.do_get_berr_counter = rcar_can_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
platform_set_drvdata(pdev, ndev);
- SET_NETDEV_DEV(ndev, &pdev->dev);
+ SET_NETDEV_DEV(ndev, dev);
netif_napi_add_weight(ndev, &priv->napi, rcar_can_rx_poll,
RCAR_CAN_NAPI_WEIGHT);
+
+ pm_runtime_enable(dev);
+
err = register_candev(ndev);
if (err) {
- dev_err(&pdev->dev, "register_candev() failed, error %d\n",
- err);
- goto fail_candev;
+ dev_err(dev, "register_candev() failed %pe\n", ERR_PTR(err));
+ goto fail_rpm;
}
- dev_info(&pdev->dev, "device registered (IRQ%d)\n", ndev->irq);
+ dev_info(dev, "device registered (IRQ%d)\n", ndev->irq);
return 0;
-fail_candev:
+fail_rpm:
+ pm_runtime_disable(dev);
netif_napi_del(&priv->napi);
fail_clk:
free_candev(ndev);
@@ -830,11 +834,12 @@ static void rcar_can_remove(struct platform_device *pdev)
struct rcar_can_priv *priv = netdev_priv(ndev);
unregister_candev(ndev);
+ pm_runtime_disable(&pdev->dev);
netif_napi_del(&priv->napi);
free_candev(ndev);
}
-static int __maybe_unused rcar_can_suspend(struct device *dev)
+static int rcar_can_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct rcar_can_priv *priv = netdev_priv(ndev);
@@ -847,38 +852,32 @@ static int __maybe_unused rcar_can_suspend(struct device *dev)
netif_device_detach(ndev);
ctlr = readw(&priv->regs->ctlr);
- ctlr |= RCAR_CAN_CTLR_CANM_HALT;
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_CANM, RCAR_CAN_CTLR_CANM_HALT);
writew(ctlr, &priv->regs->ctlr);
ctlr |= RCAR_CAN_CTLR_SLPM;
writew(ctlr, &priv->regs->ctlr);
priv->can.state = CAN_STATE_SLEEPING;
- clk_disable(priv->clk);
+ pm_runtime_put(dev);
return 0;
}
-static int __maybe_unused rcar_can_resume(struct device *dev)
+static int rcar_can_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
- struct rcar_can_priv *priv = netdev_priv(ndev);
- u16 ctlr;
int err;
if (!netif_running(ndev))
return 0;
- err = clk_enable(priv->clk);
+ err = pm_runtime_resume_and_get(dev);
if (err) {
- netdev_err(ndev, "clk_enable() failed, error %d\n", err);
+ netdev_err(ndev, "pm_runtime_resume_and_get() failed %pe\n",
+ ERR_PTR(err));
return err;
}
- ctlr = readw(&priv->regs->ctlr);
- ctlr &= ~RCAR_CAN_CTLR_SLPM;
- writew(ctlr, &priv->regs->ctlr);
- ctlr &= ~RCAR_CAN_CTLR_CANM;
- writew(ctlr, &priv->regs->ctlr);
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ rcar_can_start(ndev);
netif_device_attach(ndev);
netif_start_queue(ndev);
@@ -886,7 +885,8 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(rcar_can_pm_ops, rcar_can_suspend, rcar_can_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(rcar_can_pm_ops, rcar_can_suspend,
+ rcar_can_resume);
static const struct of_device_id rcar_can_of_table[] __maybe_unused = {
{ .compatible = "renesas,can-r8a7778" },
@@ -904,7 +904,7 @@ static struct platform_driver rcar_can_driver = {
.driver = {
.name = RCAR_CAN_DRV_NAME,
.of_match_table = of_match_ptr(rcar_can_of_table),
- .pm = &rcar_can_pm_ops,
+ .pm = pm_sleep_ptr(&rcar_can_pm_ops),
},
.probe = rcar_can_probe,
.remove = rcar_can_remove,
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index df1a5d0b37b2..7895e1fdea1c 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -21,6 +21,7 @@
* wherever it is modified to a readable name.
*/
+#include <linux/bitfield.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/can/dev.h>
@@ -74,33 +75,24 @@
#define RCANFD_GSTS_GNOPM (BIT(0) | BIT(1) | BIT(2) | BIT(3))
/* RSCFDnCFDGERFL / RSCFDnGERFL */
-#define RCANFD_GERFL_EEF0_7 GENMASK(23, 16)
-#define RCANFD_GERFL_EEF(ch) BIT(16 + (ch))
+#define RCANFD_GERFL_EEF GENMASK(23, 16)
#define RCANFD_GERFL_CMPOF BIT(3) /* CAN FD only */
#define RCANFD_GERFL_THLES BIT(2)
#define RCANFD_GERFL_MES BIT(1)
#define RCANFD_GERFL_DEF BIT(0)
#define RCANFD_GERFL_ERR(gpriv, x) \
- ((x) & (reg_gen4(gpriv, RCANFD_GERFL_EEF0_7, \
- RCANFD_GERFL_EEF(0) | RCANFD_GERFL_EEF(1)) | \
- RCANFD_GERFL_MES | \
- ((gpriv)->fdmode ? RCANFD_GERFL_CMPOF : 0)))
+({\
+ typeof(gpriv) (_gpriv) = (gpriv); \
+ ((x) & ((FIELD_PREP(RCANFD_GERFL_EEF, (_gpriv)->channels_mask)) | \
+ RCANFD_GERFL_MES | ((_gpriv)->fdmode ? RCANFD_GERFL_CMPOF : 0))); \
+})
/* AFL Rx rules registers */
-/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */
-#define RCANFD_GAFLCFG_SETRNC(gpriv, n, x) \
- (((x) & reg_gen4(gpriv, 0x1ff, 0xff)) << \
- (reg_gen4(gpriv, 16, 24) - ((n) & 1) * reg_gen4(gpriv, 16, 8)))
-
-#define RCANFD_GAFLCFG_GETRNC(gpriv, n, x) \
- (((x) >> (reg_gen4(gpriv, 16, 24) - ((n) & 1) * reg_gen4(gpriv, 16, 8))) & \
- reg_gen4(gpriv, 0x1ff, 0xff))
-
/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
#define RCANFD_GAFLECTR_AFLDAE BIT(8)
-#define RCANFD_GAFLECTR_AFLPN(gpriv, x) ((x) & reg_gen4(gpriv, 0x7f, 0x1f))
+#define RCANFD_GAFLECTR_AFLPN(gpriv, page_num) ((page_num) & (gpriv)->info->max_aflpn)
/* RSCFDnCFDGAFLIDj / RSCFDnGAFLIDj */
#define RCANFD_GAFLID_GAFLLB BIT(29)
@@ -111,22 +103,13 @@
/* Channel register bits */
/* RSCFDnCmCFG - Classical CAN only */
-#define RCANFD_CFG_SJW(x) (((x) & 0x3) << 24)
-#define RCANFD_CFG_TSEG2(x) (((x) & 0x7) << 20)
-#define RCANFD_CFG_TSEG1(x) (((x) & 0xf) << 16)
-#define RCANFD_CFG_BRP(x) (((x) & 0x3ff) << 0)
+#define RCANFD_CFG_SJW GENMASK(25, 24)
+#define RCANFD_CFG_TSEG2 GENMASK(22, 20)
+#define RCANFD_CFG_TSEG1 GENMASK(19, 16)
+#define RCANFD_CFG_BRP GENMASK(9, 0)
/* RSCFDnCFDCmNCFG - CAN FD only */
-#define RCANFD_NCFG_NTSEG2(gpriv, x) \
- (((x) & reg_gen4(gpriv, 0x7f, 0x1f)) << reg_gen4(gpriv, 25, 24))
-
-#define RCANFD_NCFG_NTSEG1(gpriv, x) \
- (((x) & reg_gen4(gpriv, 0xff, 0x7f)) << reg_gen4(gpriv, 17, 16))
-
-#define RCANFD_NCFG_NSJW(gpriv, x) \
- (((x) & reg_gen4(gpriv, 0x7f, 0x1f)) << reg_gen4(gpriv, 10, 11))
-
-#define RCANFD_NCFG_NBRP(x) (((x) & 0x3ff) << 0)
+#define RCANFD_NCFG_NBRP GENMASK(9, 0)
/* RSCFDnCFDCmCTR / RSCFDnCmCTR */
#define RCANFD_CCTR_CTME BIT(24)
@@ -186,22 +169,24 @@
#define RCANFD_CERFL_ERR(x) ((x) & (0x7fff)) /* above bits 14:0 */
/* RSCFDnCFDCmDCFG */
-#define RCANFD_DCFG_DSJW(gpriv, x) (((x) & reg_gen4(gpriv, 0xf, 0x7)) << 24)
-
-#define RCANFD_DCFG_DTSEG2(gpriv, x) \
- (((x) & reg_gen4(gpriv, 0x0f, 0x7)) << reg_gen4(gpriv, 16, 20))
-
-#define RCANFD_DCFG_DTSEG1(gpriv, x) \
- (((x) & reg_gen4(gpriv, 0x1f, 0xf)) << reg_gen4(gpriv, 8, 16))
-
-#define RCANFD_DCFG_DBRP(x) (((x) & 0xff) << 0)
+#define RCANFD_DCFG_DBRP GENMASK(7, 0)
/* RSCFDnCFDCmFDCFG */
#define RCANFD_GEN4_FDCFG_CLOE BIT(30)
#define RCANFD_GEN4_FDCFG_FDOE BIT(28)
+#define RCANFD_FDCFG_TDCO GENMASK(23, 16)
#define RCANFD_FDCFG_TDCE BIT(9)
#define RCANFD_FDCFG_TDCOC BIT(8)
-#define RCANFD_FDCFG_TDCO(x) (((x) & 0x7f) >> 16)
+
+/* RSCFDnCFDCmFDSTS */
+#define RCANFD_FDSTS_SOC GENMASK(31, 24)
+#define RCANFD_FDSTS_EOC GENMASK(23, 16)
+#define RCANFD_GEN4_FDSTS_TDCVF BIT(15)
+#define RCANFD_GEN4_FDSTS_PNSTS GENMASK(13, 12)
+#define RCANFD_FDSTS_SOCO BIT(9)
+#define RCANFD_FDSTS_EOCO BIT(8)
+#define RCANFD_FDSTS_TDCVF BIT(7)
+#define RCANFD_FDSTS_TDCR GENMASK(7, 0)
/* RSCFDnCFDRFCCx */
#define RCANFD_RFCC_RFIM BIT(12)
@@ -222,8 +207,6 @@
/* RSCFDnCFDRFPTRx */
#define RCANFD_RFPTR_RFDLC(x) (((x) >> 28) & 0xf)
-#define RCANFD_RFPTR_RFPTR(x) (((x) >> 16) & 0xfff)
-#define RCANFD_RFPTR_RFTS(x) (((x) >> 0) & 0xffff)
/* RSCFDnCFDRFFDSTSx */
#define RCANFD_RFFDSTS_RFFDF BIT(2)
@@ -233,11 +216,14 @@
/* Common FIFO bits */
/* RSCFDnCFDCFCCk */
-#define RCANFD_CFCC_CFTML(gpriv, x) \
- (((x) & reg_gen4(gpriv, 0x1f, 0xf)) << reg_gen4(gpriv, 16, 20))
-#define RCANFD_CFCC_CFM(gpriv, x) (((x) & 0x3) << reg_gen4(gpriv, 8, 16))
+#define RCANFD_CFCC_CFTML(gpriv, cftml) \
+({\
+ typeof(gpriv) (_gpriv) = (gpriv); \
+ (((cftml) & (_gpriv)->info->max_cftml) << (_gpriv)->info->sh->cftml); \
+})
+#define RCANFD_CFCC_CFM(gpriv, x) (((x) & 0x3) << (gpriv)->info->sh->cfm)
#define RCANFD_CFCC_CFIM BIT(12)
-#define RCANFD_CFCC_CFDC(gpriv, x) (((x) & 0x7) << reg_gen4(gpriv, 21, 8))
+#define RCANFD_CFCC_CFDC(gpriv, x) (((x) & 0x7) << (gpriv)->info->sh->cfdc)
#define RCANFD_CFCC_CFPLS(x) (((x) & 0x7) << 4)
#define RCANFD_CFCC_CFTXIE BIT(2)
#define RCANFD_CFCC_CFE BIT(0)
@@ -252,12 +238,9 @@
/* RSCFDnCFDCFIDk */
#define RCANFD_CFID_CFIDE BIT(31)
#define RCANFD_CFID_CFRTR BIT(30)
-#define RCANFD_CFID_CFID_MASK(x) ((x) & 0x1fffffff)
/* RSCFDnCFDCFPTRk */
#define RCANFD_CFPTR_CFDLC(x) (((x) & 0xf) << 28)
-#define RCANFD_CFPTR_CFPTR(x) (((x) & 0xfff) << 16)
-#define RCANFD_CFPTR_CFTS(x) (((x) & 0xff) << 0)
/* RSCFDnCFDCFFDCSTSk */
#define RCANFD_CFFDCSTS_CFFDF BIT(2)
@@ -298,14 +281,14 @@
/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
#define RCANFD_GAFLECTR (0x0098)
/* RSCFDnCFDGAFLCFG / RSCFDnGAFLCFG */
-#define RCANFD_GAFLCFG(ch) (0x009c + (0x04 * ((ch) / 2)))
+#define RCANFD_GAFLCFG(w) (0x009c + (0x04 * (w)))
/* RSCFDnCFDRMNB / RSCFDnRMNB */
#define RCANFD_RMNB (0x00a4)
/* RSCFDnCFDRMND / RSCFDnRMND */
#define RCANFD_RMND(y) (0x00a8 + (0x04 * (y)))
/* RSCFDnCFDRFCCx / RSCFDnRFCCx */
-#define RCANFD_RFCC(gpriv, x) (reg_gen4(gpriv, 0x00c0, 0x00b8) + (0x04 * (x)))
+#define RCANFD_RFCC(gpriv, x) ((gpriv)->info->regs->rfcc + (0x04 * (x)))
/* RSCFDnCFDRFSTSx / RSCFDnRFSTSx */
#define RCANFD_RFSTS(gpriv, x) (RCANFD_RFCC(gpriv, x) + 0x20)
/* RSCFDnCFDRFPCTRx / RSCFDnRFPCTRx */
@@ -315,67 +298,14 @@
/* RSCFDnCFDCFCCx / RSCFDnCFCCx */
#define RCANFD_CFCC(gpriv, ch, idx) \
- (reg_gen4(gpriv, 0x0120, 0x0118) + (0x0c * (ch)) + (0x04 * (idx)))
+ ((gpriv)->info->regs->cfcc + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDCFSTSx / RSCFDnCFSTSx */
#define RCANFD_CFSTS(gpriv, ch, idx) \
- (reg_gen4(gpriv, 0x01e0, 0x0178) + (0x0c * (ch)) + (0x04 * (idx)))
+ ((gpriv)->info->regs->cfsts + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDCFPCTRx / RSCFDnCFPCTRx */
#define RCANFD_CFPCTR(gpriv, ch, idx) \
- (reg_gen4(gpriv, 0x0240, 0x01d8) + (0x0c * (ch)) + (0x04 * (idx)))
-
-/* RSCFDnCFDFESTS / RSCFDnFESTS */
-#define RCANFD_FESTS (0x0238)
-/* RSCFDnCFDFFSTS / RSCFDnFFSTS */
-#define RCANFD_FFSTS (0x023c)
-/* RSCFDnCFDFMSTS / RSCFDnFMSTS */
-#define RCANFD_FMSTS (0x0240)
-/* RSCFDnCFDRFISTS / RSCFDnRFISTS */
-#define RCANFD_RFISTS (0x0244)
-/* RSCFDnCFDCFRISTS / RSCFDnCFRISTS */
-#define RCANFD_CFRISTS (0x0248)
-/* RSCFDnCFDCFTISTS / RSCFDnCFTISTS */
-#define RCANFD_CFTISTS (0x024c)
-
-/* RSCFDnCFDTMCp / RSCFDnTMCp */
-#define RCANFD_TMC(p) (0x0250 + (0x01 * (p)))
-/* RSCFDnCFDTMSTSp / RSCFDnTMSTSp */
-#define RCANFD_TMSTS(p) (0x02d0 + (0x01 * (p)))
-
-/* RSCFDnCFDTMTRSTSp / RSCFDnTMTRSTSp */
-#define RCANFD_TMTRSTS(y) (0x0350 + (0x04 * (y)))
-/* RSCFDnCFDTMTARSTSp / RSCFDnTMTARSTSp */
-#define RCANFD_TMTARSTS(y) (0x0360 + (0x04 * (y)))
-/* RSCFDnCFDTMTCSTSp / RSCFDnTMTCSTSp */
-#define RCANFD_TMTCSTS(y) (0x0370 + (0x04 * (y)))
-/* RSCFDnCFDTMTASTSp / RSCFDnTMTASTSp */
-#define RCANFD_TMTASTS(y) (0x0380 + (0x04 * (y)))
-/* RSCFDnCFDTMIECy / RSCFDnTMIECy */
-#define RCANFD_TMIEC(y) (0x0390 + (0x04 * (y)))
-
-/* RSCFDnCFDTXQCCm / RSCFDnTXQCCm */
-#define RCANFD_TXQCC(m) (0x03a0 + (0x04 * (m)))
-/* RSCFDnCFDTXQSTSm / RSCFDnTXQSTSm */
-#define RCANFD_TXQSTS(m) (0x03c0 + (0x04 * (m)))
-/* RSCFDnCFDTXQPCTRm / RSCFDnTXQPCTRm */
-#define RCANFD_TXQPCTR(m) (0x03e0 + (0x04 * (m)))
-
-/* RSCFDnCFDTHLCCm / RSCFDnTHLCCm */
-#define RCANFD_THLCC(m) (0x0400 + (0x04 * (m)))
-/* RSCFDnCFDTHLSTSm / RSCFDnTHLSTSm */
-#define RCANFD_THLSTS(m) (0x0420 + (0x04 * (m)))
-/* RSCFDnCFDTHLPCTRm / RSCFDnTHLPCTRm */
-#define RCANFD_THLPCTR(m) (0x0440 + (0x04 * (m)))
-
-/* RSCFDnCFDGTINTSTS0 / RSCFDnGTINTSTS0 */
-#define RCANFD_GTINTSTS0 (0x0460)
-/* RSCFDnCFDGTINTSTS1 / RSCFDnGTINTSTS1 */
-#define RCANFD_GTINTSTS1 (0x0464)
-/* RSCFDnCFDGTSTCFG / RSCFDnGTSTCFG */
-#define RCANFD_GTSTCFG (0x0468)
-/* RSCFDnCFDGTSTCTR / RSCFDnGTSTCTR */
-#define RCANFD_GTSTCTR (0x046c)
-/* RSCFDnCFDGLOCKK / RSCFDnGLOCKK */
-#define RCANFD_GLOCKK (0x047c)
+ ((gpriv)->info->regs->cfpctr + (0x0c * (ch)) + (0x04 * (idx)))
+
/* RSCFDnCFDGRMCFG */
#define RCANFD_GRMCFG (0x04fc)
@@ -393,12 +323,6 @@
/* RSCFDnGAFLXXXj offset */
#define RCANFD_C_GAFL_OFFSET (0x0500)
-/* RSCFDnRMXXXq -> RCANFD_C_RMXXX(q) */
-#define RCANFD_C_RMID(q) (0x0600 + (0x10 * (q)))
-#define RCANFD_C_RMPTR(q) (0x0604 + (0x10 * (q)))
-#define RCANFD_C_RMDF0(q) (0x0608 + (0x10 * (q)))
-#define RCANFD_C_RMDF1(q) (0x060c + (0x10 * (q)))
-
/* RSCFDnRFXXx -> RCANFD_C_RFXX(x) */
#define RCANFD_C_RFOFFSET (0x0e00)
#define RCANFD_C_RFID(x) (RCANFD_C_RFOFFSET + (0x10 * (x)))
@@ -418,42 +342,26 @@
#define RCANFD_C_CFDF(ch, idx, df) \
(RCANFD_C_CFOFFSET + 0x08 + (0x30 * (ch)) + (0x10 * (idx)) + (0x04 * (df)))
-/* RSCFDnTMXXp -> RCANFD_C_TMXX(p) */
-#define RCANFD_C_TMID(p) (0x1000 + (0x10 * (p)))
-#define RCANFD_C_TMPTR(p) (0x1004 + (0x10 * (p)))
-#define RCANFD_C_TMDF0(p) (0x1008 + (0x10 * (p)))
-#define RCANFD_C_TMDF1(p) (0x100c + (0x10 * (p)))
-
-/* RSCFDnTHLACCm */
-#define RCANFD_C_THLACC(m) (0x1800 + (0x04 * (m)))
-/* RSCFDnRPGACCr */
-#define RCANFD_C_RPGACC(r) (0x1900 + (0x04 * (r)))
-
/* R-Car Gen4 Classical and CAN FD mode specific register map */
-#define RCANFD_GEN4_FDCFG(m) (0x1404 + (0x20 * (m)))
-
#define RCANFD_GEN4_GAFL_OFFSET (0x1800)
/* CAN FD mode specific register map */
-/* RSCFDnCFDCmXXX -> RCANFD_F_XXX(m) */
-#define RCANFD_F_DCFG(gpriv, m) (reg_gen4(gpriv, 0x1400, 0x0500) + (0x20 * (m)))
-#define RCANFD_F_CFDCFG(m) (0x0504 + (0x20 * (m)))
-#define RCANFD_F_CFDCTR(m) (0x0508 + (0x20 * (m)))
-#define RCANFD_F_CFDSTS(m) (0x050c + (0x20 * (m)))
-#define RCANFD_F_CFDCRC(m) (0x0510 + (0x20 * (m)))
+/* RSCFDnCFDCmXXX -> gpriv->fcbase[m].xxx */
+struct rcar_canfd_f_c {
+ u32 dcfg;
+ u32 cfdcfg;
+ u32 cfdctr;
+ u32 cfdsts;
+ u32 cfdcrc;
+ u32 pad[3];
+};
/* RSCFDnCFDGAFLXXXj offset */
#define RCANFD_F_GAFL_OFFSET (0x1000)
-/* RSCFDnCFDRMXXXq -> RCANFD_F_RMXXX(q) */
-#define RCANFD_F_RMID(q) (0x2000 + (0x20 * (q)))
-#define RCANFD_F_RMPTR(q) (0x2004 + (0x20 * (q)))
-#define RCANFD_F_RMFDSTS(q) (0x2008 + (0x20 * (q)))
-#define RCANFD_F_RMDF(q, b) (0x200c + (0x04 * (b)) + (0x20 * (q)))
-
/* RSCFDnCFDRFXXx -> RCANFD_F_RFXX(x) */
-#define RCANFD_F_RFOFFSET(gpriv) reg_gen4(gpriv, 0x6000, 0x3000)
+#define RCANFD_F_RFOFFSET(gpriv) ((gpriv)->info->regs->rfoffset)
#define RCANFD_F_RFID(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + (0x80 * (x)))
#define RCANFD_F_RFPTR(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x04 + (0x80 * (x)))
#define RCANFD_F_RFFDSTS(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x08 + (0x80 * (x)))
@@ -461,7 +369,7 @@
(RCANFD_F_RFOFFSET(gpriv) + 0x0c + (0x80 * (x)) + (0x04 * (df)))
/* RSCFDnCFDCFXXk -> RCANFD_F_CFXX(ch, k) */
-#define RCANFD_F_CFOFFSET(gpriv) reg_gen4(gpriv, 0x6400, 0x3400)
+#define RCANFD_F_CFOFFSET(gpriv) ((gpriv)->info->regs->cfoffset)
#define RCANFD_F_CFID(gpriv, ch, idx) \
(RCANFD_F_CFOFFSET(gpriv) + (0x180 * (ch)) + (0x80 * (idx)))
@@ -476,23 +384,11 @@
(RCANFD_F_CFOFFSET(gpriv) + 0x0c + (0x180 * (ch)) + (0x80 * (idx)) + \
(0x04 * (df)))
-/* RSCFDnCFDTMXXp -> RCANFD_F_TMXX(p) */
-#define RCANFD_F_TMID(p) (0x4000 + (0x20 * (p)))
-#define RCANFD_F_TMPTR(p) (0x4004 + (0x20 * (p)))
-#define RCANFD_F_TMFDCTR(p) (0x4008 + (0x20 * (p)))
-#define RCANFD_F_TMDF(p, b) (0x400c + (0x20 * (p)) + (0x04 * (b)))
-
-/* RSCFDnCFDTHLACCm */
-#define RCANFD_F_THLACC(m) (0x6000 + (0x04 * (m)))
-/* RSCFDnCFDRPGACCr */
-#define RCANFD_F_RPGACC(r) (0x6400 + (0x04 * (r)))
-
/* Constants */
#define RCANFD_FIFO_DEPTH 8 /* Tx FIFO depth */
#define RCANFD_NAPI_WEIGHT 8 /* Rx poll quota */
#define RCANFD_NUM_CHANNELS 8 /* Eight channels max */
-#define RCANFD_CHANNELS_MASK BIT((RCANFD_NUM_CHANNELS) - 1)
#define RCANFD_GAFL_PAGENUM(entry) ((entry) / 16)
#define RCANFD_CHANNEL_NUMRULES 1 /* only one rule per channel */
@@ -510,12 +406,44 @@
struct rcar_canfd_global;
+struct rcar_canfd_regs {
+ u16 rfcc; /* RX FIFO Configuration/Control Register */
+ u16 cfcc; /* Common FIFO Configuration/Control Register */
+ u16 cfsts; /* Common FIFO Status Register */
+ u16 cfpctr; /* Common FIFO Pointer Control Register */
+ u16 coffset; /* Channel Data Bitrate Configuration Register */
+ u16 rfoffset; /* Receive FIFO buffer access ID register */
+ u16 cfoffset; /* Transmit/receive FIFO buffer access ID register */
+};
+
+struct rcar_canfd_shift_data {
+ u8 ntseg2; /* Nominal Bit Rate Time Segment 2 Control */
+ u8 ntseg1; /* Nominal Bit Rate Time Segment 1 Control */
+ u8 nsjw; /* Nominal Bit Rate Resynchronization Jump Width Control */
+ u8 dtseg2; /* Data Bit Rate Time Segment 2 Control */
+ u8 dtseg1; /* Data Bit Rate Time Segment 1 Control */
+ u8 cftml; /* Common FIFO TX Message Buffer Link */
+ u8 cfm; /* Common FIFO Mode */
+ u8 cfdc; /* Common FIFO Depth Configuration */
+};
+
struct rcar_canfd_hw_info {
+ const struct can_bittiming_const *nom_bittiming;
+ const struct can_bittiming_const *data_bittiming;
+ const struct can_tdc_const *tdc_const;
+ const struct rcar_canfd_regs *regs;
+ const struct rcar_canfd_shift_data *sh;
+ u8 rnc_field_width;
+ u8 max_aflpn;
+ u8 max_cftml;
u8 max_channels;
u8 postdiv;
/* hardware features */
unsigned shared_global_irqs:1; /* Has shared global irqs */
unsigned multi_channel_irqs:1; /* Has multiple channel irqs */
+ unsigned ch_interface_mode:1; /* Has channel interface mode */
+ unsigned shared_can_regs:1; /* Has shared classical can registers */
+ unsigned external_clk:1; /* Has external clock */
};
/* Channel priv data */
@@ -536,9 +464,11 @@ struct rcar_canfd_channel {
struct rcar_canfd_global {
struct rcar_canfd_channel *ch[RCANFD_NUM_CHANNELS];
void __iomem *base; /* Register base address */
+ struct rcar_canfd_f_c __iomem *fcbase;
struct platform_device *pdev; /* Respective platform device */
struct clk *clkp; /* Peripheral clock */
struct clk *can_clk; /* fCAN clock */
+ struct clk *clk_ram; /* Clock RAM */
unsigned long channels_mask; /* Enabled channels mask */
bool extclk; /* CANFD or Ext clock */
bool fdmode; /* CAN FD or Classical CAN only mode */
@@ -548,7 +478,7 @@ struct rcar_canfd_global {
};
/* CAN FD mode nominal rate constants */
-static const struct can_bittiming_const rcar_canfd_nom_bittiming_const = {
+static const struct can_bittiming_const rcar_canfd_gen3_nom_bittiming_const = {
.name = RCANFD_DRV_NAME,
.tseg1_min = 2,
.tseg1_max = 128,
@@ -560,8 +490,20 @@ static const struct can_bittiming_const rcar_canfd_nom_bittiming_const = {
.brp_inc = 1,
};
+static const struct can_bittiming_const rcar_canfd_gen4_nom_bittiming_const = {
+ .name = RCANFD_DRV_NAME,
+ .tseg1_min = 2,
+ .tseg1_max = 256,
+ .tseg2_min = 2,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
/* CAN FD mode data rate constants */
-static const struct can_bittiming_const rcar_canfd_data_bittiming_const = {
+static const struct can_bittiming_const rcar_canfd_gen3_data_bittiming_const = {
.name = RCANFD_DRV_NAME,
.tseg1_min = 2,
.tseg1_max = 16,
@@ -573,6 +515,18 @@ static const struct can_bittiming_const rcar_canfd_data_bittiming_const = {
.brp_inc = 1,
};
+static const struct can_bittiming_const rcar_canfd_gen4_data_bittiming_const = {
+ .name = RCANFD_DRV_NAME,
+ .tseg1_min = 2,
+ .tseg1_max = 32,
+ .tseg2_min = 2,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
/* Classical CAN mode bitrate constants */
static const struct can_bittiming_const rcar_canfd_bittiming_const = {
.name = RCANFD_DRV_NAME,
@@ -586,36 +540,136 @@ static const struct can_bittiming_const rcar_canfd_bittiming_const = {
.brp_inc = 1,
};
+/* CAN FD Transmission Delay Compensation constants */
+static const struct can_tdc_const rcar_canfd_gen3_tdc_const = {
+ .tdcv_min = 1,
+ .tdcv_max = 128,
+ .tdco_min = 1,
+ .tdco_max = 128,
+ .tdcf_min = 0, /* Filter window not supported */
+ .tdcf_max = 0,
+};
+
+static const struct can_tdc_const rcar_canfd_gen4_tdc_const = {
+ .tdcv_min = 1,
+ .tdcv_max = 256,
+ .tdco_min = 1,
+ .tdco_max = 256,
+ .tdcf_min = 0, /* Filter window not supported */
+ .tdcf_max = 0,
+};
+
+static const struct rcar_canfd_regs rcar_gen3_regs = {
+ .rfcc = 0x00b8,
+ .cfcc = 0x0118,
+ .cfsts = 0x0178,
+ .cfpctr = 0x01d8,
+ .coffset = 0x0500,
+ .rfoffset = 0x3000,
+ .cfoffset = 0x3400,
+};
+
+static const struct rcar_canfd_regs rcar_gen4_regs = {
+ .rfcc = 0x00c0,
+ .cfcc = 0x0120,
+ .cfsts = 0x01e0,
+ .cfpctr = 0x0240,
+ .coffset = 0x1400,
+ .rfoffset = 0x6000,
+ .cfoffset = 0x6400,
+};
+
+static const struct rcar_canfd_shift_data rcar_gen3_shift_data = {
+ .ntseg2 = 24,
+ .ntseg1 = 16,
+ .nsjw = 11,
+ .dtseg2 = 20,
+ .dtseg1 = 16,
+ .cftml = 20,
+ .cfm = 16,
+ .cfdc = 8,
+};
+
+static const struct rcar_canfd_shift_data rcar_gen4_shift_data = {
+ .ntseg2 = 25,
+ .ntseg1 = 17,
+ .nsjw = 10,
+ .dtseg2 = 16,
+ .dtseg1 = 8,
+ .cftml = 16,
+ .cfm = 8,
+ .cfdc = 21,
+};
+
static const struct rcar_canfd_hw_info rcar_gen3_hw_info = {
+ .nom_bittiming = &rcar_canfd_gen3_nom_bittiming_const,
+ .data_bittiming = &rcar_canfd_gen3_data_bittiming_const,
+ .tdc_const = &rcar_canfd_gen3_tdc_const,
+ .regs = &rcar_gen3_regs,
+ .sh = &rcar_gen3_shift_data,
+ .rnc_field_width = 8,
+ .max_aflpn = 31,
+ .max_cftml = 15,
.max_channels = 2,
.postdiv = 2,
.shared_global_irqs = 1,
+ .ch_interface_mode = 0,
+ .shared_can_regs = 0,
+ .external_clk = 1,
};
static const struct rcar_canfd_hw_info rcar_gen4_hw_info = {
+ .nom_bittiming = &rcar_canfd_gen4_nom_bittiming_const,
+ .data_bittiming = &rcar_canfd_gen4_data_bittiming_const,
+ .tdc_const = &rcar_canfd_gen4_tdc_const,
+ .regs = &rcar_gen4_regs,
+ .sh = &rcar_gen4_shift_data,
+ .rnc_field_width = 16,
+ .max_aflpn = 127,
+ .max_cftml = 31,
.max_channels = 8,
.postdiv = 2,
.shared_global_irqs = 1,
+ .ch_interface_mode = 1,
+ .shared_can_regs = 1,
+ .external_clk = 1,
};
static const struct rcar_canfd_hw_info rzg2l_hw_info = {
+ .nom_bittiming = &rcar_canfd_gen3_nom_bittiming_const,
+ .data_bittiming = &rcar_canfd_gen3_data_bittiming_const,
+ .tdc_const = &rcar_canfd_gen3_tdc_const,
+ .regs = &rcar_gen3_regs,
+ .sh = &rcar_gen3_shift_data,
+ .rnc_field_width = 8,
+ .max_aflpn = 31,
+ .max_cftml = 15,
.max_channels = 2,
.postdiv = 1,
.multi_channel_irqs = 1,
+ .ch_interface_mode = 0,
+ .shared_can_regs = 0,
+ .external_clk = 1,
};
-/* Helper functions */
-static inline bool is_gen4(struct rcar_canfd_global *gpriv)
-{
- return gpriv->info == &rcar_gen4_hw_info;
-}
-
-static inline u32 reg_gen4(struct rcar_canfd_global *gpriv,
- u32 gen4, u32 not_gen4)
-{
- return is_gen4(gpriv) ? gen4 : not_gen4;
-}
+static const struct rcar_canfd_hw_info r9a09g047_hw_info = {
+ .nom_bittiming = &rcar_canfd_gen4_nom_bittiming_const,
+ .data_bittiming = &rcar_canfd_gen4_data_bittiming_const,
+ .tdc_const = &rcar_canfd_gen4_tdc_const,
+ .regs = &rcar_gen4_regs,
+ .sh = &rcar_gen4_shift_data,
+ .rnc_field_width = 16,
+ .max_aflpn = 63,
+ .max_cftml = 31,
+ .max_channels = 6,
+ .postdiv = 1,
+ .multi_channel_irqs = 1,
+ .ch_interface_mode = 1,
+ .shared_can_regs = 1,
+ .external_clk = 0,
+};
+/* Helper functions */
static inline void rcar_canfd_update(u32 mask, u32 val, u32 __iomem *reg)
{
u32 data = readl(reg);
@@ -651,26 +705,41 @@ static void rcar_canfd_update_bit(void __iomem *base, u32 reg,
rcar_canfd_update(mask, val, base + reg);
}
+static void rcar_canfd_set_bit_reg(void __iomem *addr, u32 val)
+{
+ rcar_canfd_update(val, val, addr);
+}
+
+static void rcar_canfd_clear_bit_reg(void __iomem *addr, u32 val)
+{
+ rcar_canfd_update(val, 0, addr);
+}
+
+static void rcar_canfd_update_bit_reg(void __iomem *addr, u32 mask, u32 val)
+{
+ rcar_canfd_update(mask, val, addr);
+}
+
static void rcar_canfd_get_data(struct rcar_canfd_channel *priv,
struct canfd_frame *cf, u32 off)
{
+ u32 *data = (u32 *)cf->data;
u32 i, lwords;
lwords = DIV_ROUND_UP(cf->len, sizeof(u32));
for (i = 0; i < lwords; i++)
- *((u32 *)cf->data + i) =
- rcar_canfd_read(priv->base, off + i * sizeof(u32));
+ data[i] = rcar_canfd_read(priv->base, off + i * sizeof(u32));
}
static void rcar_canfd_put_data(struct rcar_canfd_channel *priv,
struct canfd_frame *cf, u32 off)
{
+ const u32 *data = (u32 *)cf->data;
u32 i, lwords;
lwords = DIV_ROUND_UP(cf->len, sizeof(u32));
for (i = 0; i < lwords; i++)
- rcar_canfd_write(priv->base, off + i * sizeof(u32),
- *((u32 *)cf->data + i));
+ rcar_canfd_write(priv->base, off + i * sizeof(u32), data[i]);
}
static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev)
@@ -681,28 +750,20 @@ static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev)
can_free_echo_skb(ndev, i, NULL);
}
-static void rcar_canfd_set_mode(struct rcar_canfd_global *gpriv)
+static void rcar_canfd_set_rnc(struct rcar_canfd_global *gpriv, unsigned int ch,
+ unsigned int num_rules)
{
- if (is_gen4(gpriv)) {
- u32 ch, val = gpriv->fdmode ? RCANFD_GEN4_FDCFG_FDOE
- : RCANFD_GEN4_FDCFG_CLOE;
-
- for_each_set_bit(ch, &gpriv->channels_mask,
- gpriv->info->max_channels)
- rcar_canfd_set_bit(gpriv->base, RCANFD_GEN4_FDCFG(ch),
- val);
- } else {
- if (gpriv->fdmode)
- rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
- RCANFD_GRMCFG_RCMC);
- else
- rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG,
- RCANFD_GRMCFG_RCMC);
- }
+ unsigned int rnc_stride = 32 / gpriv->info->rnc_field_width;
+ unsigned int shift = 32 - (ch % rnc_stride + 1) * gpriv->info->rnc_field_width;
+ unsigned int w = ch / rnc_stride;
+ u32 rnc = num_rules << shift;
+
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG(w), rnc);
}
static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
{
+ struct device *dev = &gpriv->pdev->dev;
u32 sts, ch;
int err;
@@ -712,7 +773,7 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
!(sts & RCANFD_GSTS_GRAMINIT), 2, 500000);
if (err) {
- dev_dbg(&gpriv->pdev->dev, "global raminit failed\n");
+ dev_dbg(dev, "global raminit failed\n");
return err;
}
@@ -725,7 +786,7 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
(sts & RCANFD_GSTS_GRSTSTS), 2, 500000);
if (err) {
- dev_dbg(&gpriv->pdev->dev, "global reset failed\n");
+ dev_dbg(dev, "global reset failed\n");
return err;
}
@@ -733,7 +794,14 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0x0);
/* Set the controller into appropriate mode */
- rcar_canfd_set_mode(gpriv);
+ if (!gpriv->info->ch_interface_mode) {
+ if (gpriv->fdmode)
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
+ RCANFD_GRMCFG_RCMC);
+ else
+ rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG,
+ RCANFD_GRMCFG_RCMC);
+ }
/* Transition all Channels to reset mode */
for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
@@ -749,11 +817,27 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
(sts & RCANFD_CSTS_CRSTSTS),
2, 500000);
if (err) {
- dev_dbg(&gpriv->pdev->dev,
- "channel %u reset failed\n", ch);
+ dev_dbg(dev, "channel %u reset failed\n", ch);
return err;
}
+
+ /* Set the controller into appropriate mode */
+ if (gpriv->info->ch_interface_mode) {
+ /* Do not set CLOE and FDOE simultaneously */
+ if (!gpriv->fdmode) {
+ rcar_canfd_clear_bit_reg(&gpriv->fcbase[ch].cfdcfg,
+ RCANFD_GEN4_FDCFG_FDOE);
+ rcar_canfd_set_bit_reg(&gpriv->fcbase[ch].cfdcfg,
+ RCANFD_GEN4_FDCFG_CLOE);
+ } else {
+ rcar_canfd_clear_bit_reg(&gpriv->fcbase[ch].cfdcfg,
+ RCANFD_GEN4_FDCFG_FDOE);
+ rcar_canfd_clear_bit_reg(&gpriv->fcbase[ch].cfdcfg,
+ RCANFD_GEN4_FDCFG_CLOE);
+ }
+ }
}
+
return 0;
}
@@ -787,30 +871,21 @@ static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv)
}
static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
- u32 ch)
+ u32 ch, u32 rule_entry)
{
- u32 cfg;
- int offset, start, page, num_rules = RCANFD_CHANNEL_NUMRULES;
+ unsigned int offset, page, num_rules = RCANFD_CHANNEL_NUMRULES;
+ u32 rule_entry_index = rule_entry % 16;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
- if (ch == 0) {
- start = 0; /* Channel 0 always starts from 0th rule */
- } else {
- /* Get number of Channel 0 rules and adjust */
- cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG(ch));
- start = RCANFD_GAFLCFG_GETRNC(gpriv, 0, cfg);
- }
-
/* Enable write access to entry */
- page = RCANFD_GAFL_PAGENUM(start);
+ page = RCANFD_GAFL_PAGENUM(rule_entry);
rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLECTR,
(RCANFD_GAFLECTR_AFLPN(gpriv, page) |
RCANFD_GAFLECTR_AFLDAE));
/* Write number of rules for channel */
- rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG(ch),
- RCANFD_GAFLCFG_SETRNC(gpriv, ch, num_rules));
- if (is_gen4(gpriv))
+ rcar_canfd_set_rnc(gpriv, ch, num_rules);
+ if (gpriv->info->shared_can_regs)
offset = RCANFD_GEN4_GAFL_OFFSET;
else if (gpriv->fdmode)
offset = RCANFD_F_GAFL_OFFSET;
@@ -818,13 +893,13 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
offset = RCANFD_C_GAFL_OFFSET;
/* Accept all IDs */
- rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, start), 0);
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, rule_entry_index), 0);
/* IDE or RTR is not considered for matching */
- rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, start), 0);
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, rule_entry_index), 0);
/* Any data length accepted */
- rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, start), 0);
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, rule_entry_index), 0);
/* Place the msg in corresponding Rx FIFO entry */
- rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, start),
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, rule_entry_index),
RCANFD_GAFLP1_GAFLFDP(ridx));
/* Disable write access to page */
@@ -950,7 +1025,7 @@ static void rcar_canfd_global_error(struct net_device *ndev)
u32 ridx = ch + RCANFD_RFFIFO_IDX;
gerfl = rcar_canfd_read(priv->base, RCANFD_GERFL);
- if (gerfl & RCANFD_GERFL_EEF(ch)) {
+ if (gerfl & FIELD_PREP(RCANFD_GERFL_EEF, BIT(ch))) {
netdev_dbg(ndev, "Ch%u: ECC Error flag\n", ch);
stats->tx_dropped++;
}
@@ -1307,14 +1382,52 @@ static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void rcar_canfd_set_bittiming(struct net_device *dev)
+static inline u32 rcar_canfd_compute_nominal_bit_rate_cfg(struct rcar_canfd_channel *priv,
+ u16 tseg1, u16 tseg2, u16 sjw, u16 brp)
+{
+ struct rcar_canfd_global *gpriv = priv->gpriv;
+ const struct rcar_canfd_hw_info *info = gpriv->info;
+ u32 ntseg1, ntseg2, nsjw, nbrp;
+
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || gpriv->info->shared_can_regs) {
+ ntseg1 = (tseg1 & (info->nom_bittiming->tseg1_max - 1)) << info->sh->ntseg1;
+ ntseg2 = (tseg2 & (info->nom_bittiming->tseg2_max - 1)) << info->sh->ntseg2;
+ nsjw = (sjw & (info->nom_bittiming->sjw_max - 1)) << info->sh->nsjw;
+ nbrp = FIELD_PREP(RCANFD_NCFG_NBRP, brp);
+ } else {
+ ntseg1 = FIELD_PREP(RCANFD_CFG_TSEG1, tseg1);
+ ntseg2 = FIELD_PREP(RCANFD_CFG_TSEG2, tseg2);
+ nsjw = FIELD_PREP(RCANFD_CFG_SJW, sjw);
+ nbrp = FIELD_PREP(RCANFD_CFG_BRP, brp);
+ }
+
+ return (ntseg1 | ntseg2 | nsjw | nbrp);
+}
+
+static inline u32 rcar_canfd_compute_data_bit_rate_cfg(const struct rcar_canfd_hw_info *info,
+ u16 tseg1, u16 tseg2, u16 sjw, u16 brp)
{
- struct rcar_canfd_channel *priv = netdev_priv(dev);
+ u32 dtseg1, dtseg2, dsjw, dbrp;
+
+ dtseg1 = (tseg1 & (info->data_bittiming->tseg1_max - 1)) << info->sh->dtseg1;
+ dtseg2 = (tseg2 & (info->data_bittiming->tseg2_max - 1)) << info->sh->dtseg2;
+ dsjw = (sjw & (info->data_bittiming->sjw_max - 1)) << 24;
+ dbrp = FIELD_PREP(RCANFD_DCFG_DBRP, brp);
+
+ return (dtseg1 | dtseg2 | dsjw | dbrp);
+}
+
+static void rcar_canfd_set_bittiming(struct net_device *ndev)
+{
+ u32 mask = RCANFD_FDCFG_TDCO | RCANFD_FDCFG_TDCE | RCANFD_FDCFG_TDCOC;
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
struct rcar_canfd_global *gpriv = priv->gpriv;
const struct can_bittiming *bt = &priv->can.bittiming;
- const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ const struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
+ const struct can_tdc_const *tdc_const = priv->can.fd.tdc_const;
+ const struct can_tdc *tdc = &priv->can.fd.tdc;
+ u32 cfg, tdcmode = 0, tdco = 0;
u16 brp, sjw, tseg1, tseg2;
- u32 cfg;
u32 ch = priv->channel;
/* Nominal bit timing settings */
@@ -1322,47 +1435,33 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
sjw = bt->sjw - 1;
tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
tseg2 = bt->phase_seg2 - 1;
+ cfg = rcar_canfd_compute_nominal_bit_rate_cfg(priv, tseg1, tseg2, sjw, brp);
+ rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
- /* CAN FD only mode */
- cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) | RCANFD_NCFG_NBRP(brp) |
- RCANFD_NCFG_NSJW(gpriv, sjw) | RCANFD_NCFG_NTSEG2(gpriv, tseg2));
-
- rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
- netdev_dbg(priv->ndev, "nrate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
- brp, sjw, tseg1, tseg2);
-
- /* Data bit timing settings */
- brp = dbt->brp - 1;
- sjw = dbt->sjw - 1;
- tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
- tseg2 = dbt->phase_seg2 - 1;
-
- cfg = (RCANFD_DCFG_DTSEG1(gpriv, tseg1) | RCANFD_DCFG_DBRP(brp) |
- RCANFD_DCFG_DSJW(gpriv, sjw) | RCANFD_DCFG_DTSEG2(gpriv, tseg2));
-
- rcar_canfd_write(priv->base, RCANFD_F_DCFG(gpriv, ch), cfg);
- netdev_dbg(priv->ndev, "drate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
- brp, sjw, tseg1, tseg2);
- } else {
- /* Classical CAN only mode */
- if (is_gen4(gpriv)) {
- cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) |
- RCANFD_NCFG_NBRP(brp) |
- RCANFD_NCFG_NSJW(gpriv, sjw) |
- RCANFD_NCFG_NTSEG2(gpriv, tseg2));
- } else {
- cfg = (RCANFD_CFG_TSEG1(tseg1) |
- RCANFD_CFG_BRP(brp) |
- RCANFD_CFG_SJW(sjw) |
- RCANFD_CFG_TSEG2(tseg2));
- }
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD))
+ return;
- rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
- netdev_dbg(priv->ndev,
- "rate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
- brp, sjw, tseg1, tseg2);
+ /* Data bit timing settings */
+ brp = dbt->brp - 1;
+ sjw = dbt->sjw - 1;
+ tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
+ tseg2 = dbt->phase_seg2 - 1;
+ cfg = rcar_canfd_compute_data_bit_rate_cfg(gpriv->info, tseg1, tseg2, sjw, brp);
+ writel(cfg, &gpriv->fcbase[ch].dcfg);
+
+ /* Transceiver Delay Compensation */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_AUTO) {
+ /* TDC enabled, measured + offset */
+ tdcmode = RCANFD_FDCFG_TDCE;
+ tdco = tdc->tdco - 1;
+ } else if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_MANUAL) {
+ /* TDC enabled, offset only */
+ tdcmode = RCANFD_FDCFG_TDCE | RCANFD_FDCFG_TDCOC;
+ tdco = min(tdc->tdcv + tdc->tdco, tdc_const->tdco_max) - 1;
}
+
+ rcar_canfd_update_bit_reg(&gpriv->fcbase[ch].cfdcfg, mask,
+ tdcmode | FIELD_PREP(RCANFD_FDCFG_TDCO, tdco));
}
static int rcar_canfd_start(struct net_device *ndev)
@@ -1480,8 +1579,8 @@ static int rcar_canfd_close(struct net_device *ndev)
netif_stop_queue(ndev);
rcar_canfd_stop(ndev);
napi_disable(&priv->napi);
- clk_disable_unprepare(gpriv->can_clk);
close_candev(ndev);
+ clk_disable_unprepare(gpriv->can_clk);
phy_power_off(priv->transceiver);
return 0;
}
@@ -1511,7 +1610,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
dlc = RCANFD_CFPTR_CFDLC(can_fd_len2dlc(cf->len));
- if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_gen4(gpriv)) {
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || gpriv->info->shared_can_regs) {
rcar_canfd_write(priv->base,
RCANFD_F_CFID(gpriv, ch, RCANFD_CFFIFO_IDX), id);
rcar_canfd_write(priv->base,
@@ -1562,7 +1661,8 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
{
- struct net_device_stats *stats = &priv->ndev->stats;
+ struct net_device *ndev = priv->ndev;
+ struct net_device_stats *stats = &ndev->stats;
struct rcar_canfd_global *gpriv = priv->gpriv;
struct canfd_frame *cf;
struct sk_buff *skb;
@@ -1570,7 +1670,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
u32 ch = priv->channel;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
- if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_gen4(gpriv)) {
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || gpriv->info->shared_can_regs) {
id = rcar_canfd_read(priv->base, RCANFD_F_RFID(gpriv, ridx));
dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(gpriv, ridx));
@@ -1578,14 +1678,13 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) &&
sts & RCANFD_RFFDSTS_RFFDF)
- skb = alloc_canfd_skb(priv->ndev, &cf);
+ skb = alloc_canfd_skb(ndev, &cf);
else
- skb = alloc_can_skb(priv->ndev,
- (struct can_frame **)&cf);
+ skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
} else {
id = rcar_canfd_read(priv->base, RCANFD_C_RFID(ridx));
dlc = rcar_canfd_read(priv->base, RCANFD_C_RFPTR(ridx));
- skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cf);
+ skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
}
if (!skb) {
@@ -1606,7 +1705,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
if (sts & RCANFD_RFFDSTS_RFESI) {
cf->flags |= CANFD_ESI;
- netdev_dbg(priv->ndev, "ESI Error\n");
+ netdev_dbg(ndev, "ESI Error\n");
}
if (!(sts & RCANFD_RFFDSTS_RFFDF) && (id & RCANFD_RFID_RFRTR)) {
@@ -1621,7 +1720,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
cf->len = can_cc_dlc2len(RCANFD_RFPTR_RFDLC(dlc));
if (id & RCANFD_RFID_RFRTR)
cf->can_id |= CAN_RTR_FLAG;
- else if (is_gen4(gpriv))
+ else if (gpriv->info->shared_can_regs)
rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(gpriv, ridx, 0));
else
rcar_canfd_get_data(priv, cf, RCANFD_C_RFDF(ridx, 0));
@@ -1673,6 +1772,29 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
return num_pkts;
}
+static unsigned int rcar_canfd_get_tdcr(struct rcar_canfd_global *gpriv,
+ unsigned int ch)
+{
+ u32 sts = readl(&gpriv->fcbase[ch].cfdsts);
+ u32 tdcr = FIELD_GET(RCANFD_FDSTS_TDCR, sts);
+
+ return tdcr & (gpriv->info->tdc_const->tdcv_max - 1);
+}
+
+static int rcar_canfd_get_auto_tdcv(const struct net_device *ndev, u32 *tdcv)
+{
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ u32 tdco = priv->can.fd.tdc.tdco;
+ u32 tdcr;
+
+ /* Transceiver Delay Compensation Result */
+ tdcr = rcar_canfd_get_tdcr(priv->gpriv, priv->channel) + 1;
+
+ *tdcv = tdcr < tdco ? 0 : tdcr - tdco;
+
+ return 0;
+}
+
static int rcar_canfd_do_set_mode(struct net_device *ndev, enum can_mode mode)
{
int err;
@@ -1689,10 +1811,10 @@ static int rcar_canfd_do_set_mode(struct net_device *ndev, enum can_mode mode)
}
}
-static int rcar_canfd_get_berr_counter(const struct net_device *dev,
+static int rcar_canfd_get_berr_counter(const struct net_device *ndev,
struct can_berr_counter *bec)
{
- struct rcar_canfd_channel *priv = netdev_priv(dev);
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
u32 val, ch = priv->channel;
/* Peripheral clock is already enabled in probe */
@@ -1706,7 +1828,6 @@ static const struct net_device_ops rcar_canfd_netdev_ops = {
.ndo_open = rcar_canfd_open,
.ndo_stop = rcar_canfd_close,
.ndo_start_xmit = rcar_canfd_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops rcar_canfd_ethtool_ops = {
@@ -1744,16 +1865,19 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
if (info->multi_channel_irqs) {
char *irq_name;
+ char name[10];
int err_irq;
int tx_irq;
- err_irq = platform_get_irq_byname(pdev, ch == 0 ? "ch0_err" : "ch1_err");
+ scnprintf(name, sizeof(name), "ch%u_err", ch);
+ err_irq = platform_get_irq_byname(pdev, name);
if (err_irq < 0) {
err = err_irq;
goto fail;
}
- tx_irq = platform_get_irq_byname(pdev, ch == 0 ? "ch0_trx" : "ch1_trx");
+ scnprintf(name, sizeof(name), "ch%u_trx", ch);
+ tx_irq = platform_get_irq_byname(pdev, name);
if (tx_irq < 0) {
err = tx_irq;
goto fail;
@@ -1790,18 +1914,25 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
}
if (gpriv->fdmode) {
- priv->can.bittiming_const = &rcar_canfd_nom_bittiming_const;
- priv->can.data_bittiming_const =
- &rcar_canfd_data_bittiming_const;
+ priv->can.bittiming_const = gpriv->info->nom_bittiming;
+ priv->can.fd.data_bittiming_const = gpriv->info->data_bittiming;
+ priv->can.fd.tdc_const = gpriv->info->tdc_const;
/* Controller starts in CAN FD only mode */
err = can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD);
if (err)
goto fail;
- priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
+
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING |
+ CAN_CTRLMODE_TDC_AUTO |
+ CAN_CTRLMODE_TDC_MANUAL;
+ priv->can.fd.do_get_auto_tdcv = rcar_canfd_get_auto_tdcv;
} else {
/* Controller starts in Classical CAN only mode */
- priv->can.bittiming_const = &rcar_canfd_bittiming_const;
+ if (gpriv->info->shared_can_regs)
+ priv->can.bittiming_const = gpriv->info->nom_bittiming;
+ else
+ priv->can.bittiming_const = &rcar_canfd_bittiming_const;
priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
}
@@ -1839,13 +1970,112 @@ static void rcar_canfd_channel_remove(struct rcar_canfd_global *gpriv, u32 ch)
}
}
+static int rcar_canfd_global_init(struct rcar_canfd_global *gpriv)
+{
+ struct device *dev = &gpriv->pdev->dev;
+ u32 rule_entry = 0;
+ u32 ch, sts;
+ int err;
+
+ err = reset_control_reset(gpriv->rstc1);
+ if (err)
+ return err;
+
+ err = reset_control_reset(gpriv->rstc2);
+ if (err)
+ goto fail_reset1;
+
+ /* Enable peripheral clock for register access */
+ err = clk_prepare_enable(gpriv->clkp);
+ if (err) {
+ dev_err(dev, "failed to enable peripheral clock: %pe\n",
+ ERR_PTR(err));
+ goto fail_reset2;
+ }
+
+ /* Enable RAM clock */
+ err = clk_prepare_enable(gpriv->clk_ram);
+ if (err) {
+ dev_err(dev,
+ "failed to enable RAM clock, error %d\n", err);
+ goto fail_clk;
+ }
+
+ err = rcar_canfd_reset_controller(gpriv);
+ if (err) {
+ dev_err(dev, "reset controller failed: %pe\n", ERR_PTR(err));
+ goto fail_ram_clk;
+ }
+
+ /* Controller in Global reset & Channel reset mode */
+ rcar_canfd_configure_controller(gpriv);
+
+ /* Configure per channel attributes */
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
+ /* Configure Channel's Rx fifo */
+ rcar_canfd_configure_rx(gpriv, ch);
+
+ /* Configure Channel's Tx (Common) fifo */
+ rcar_canfd_configure_tx(gpriv, ch);
+
+ /* Configure receive rules */
+ rcar_canfd_configure_afl_rules(gpriv, ch, rule_entry);
+ rule_entry += RCANFD_CHANNEL_NUMRULES;
+ }
+
+ /* Configure common interrupts */
+ rcar_canfd_enable_global_interrupts(gpriv);
+
+ /* Start Global operation mode */
+ rcar_canfd_update_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GMDC_MASK,
+ RCANFD_GCTR_GMDC_GOPM);
+
+ /* Verify mode change */
+ err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
+ !(sts & RCANFD_GSTS_GNOPM), 2, 500000);
+ if (err) {
+ dev_err(dev, "global operational mode failed\n");
+ goto fail_mode;
+ }
+
+ return 0;
+
+fail_mode:
+ rcar_canfd_disable_global_interrupts(gpriv);
+fail_ram_clk:
+ clk_disable_unprepare(gpriv->clk_ram);
+fail_clk:
+ clk_disable_unprepare(gpriv->clkp);
+fail_reset2:
+ reset_control_assert(gpriv->rstc2);
+fail_reset1:
+ reset_control_assert(gpriv->rstc1);
+ return err;
+}
+
+static void rcar_canfd_global_deinit(struct rcar_canfd_global *gpriv, bool full)
+{
+ rcar_canfd_disable_global_interrupts(gpriv);
+
+ if (full) {
+ rcar_canfd_reset_controller(gpriv);
+
+ /* Enter global sleep mode */
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GSLPR);
+ }
+
+ clk_disable_unprepare(gpriv->clk_ram);
+ clk_disable_unprepare(gpriv->clkp);
+ reset_control_assert(gpriv->rstc2);
+ reset_control_assert(gpriv->rstc1);
+}
+
static int rcar_canfd_probe(struct platform_device *pdev)
{
struct phy *transceivers[RCANFD_NUM_CHANNELS] = { NULL, };
const struct rcar_canfd_hw_info *info;
struct device *dev = &pdev->dev;
void __iomem *addr;
- u32 sts, ch, fcan_freq;
struct rcar_canfd_global *gpriv;
struct device_node *of_child;
unsigned long channels_mask = 0;
@@ -1853,6 +2083,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
int g_err_irq, g_recc_irq;
bool fdmode = true; /* CAN FD only mode - default */
char name[9] = "channelX";
+ u32 ch, fcan_freq;
int i;
info = of_device_get_match_data(dev);
@@ -1862,13 +2093,13 @@ static int rcar_canfd_probe(struct platform_device *pdev)
for (i = 0; i < info->max_channels; ++i) {
name[7] = '0' + i;
- of_child = of_get_child_by_name(dev->of_node, name);
- if (of_child && of_device_is_available(of_child)) {
+ of_child = of_get_available_child_by_name(dev->of_node, name);
+ if (of_child) {
channels_mask |= BIT(i);
transceivers[i] = devm_of_phy_optional_get(dev,
of_child, NULL);
+ of_node_put(of_child);
}
- of_node_put(of_child);
if (IS_ERR(transceivers[i]))
return PTR_ERR(transceivers[i]);
}
@@ -1939,15 +2170,21 @@ static int rcar_canfd_probe(struct platform_device *pdev)
fcan_freq = clk_get_rate(gpriv->can_clk) / info->postdiv;
} else {
fcan_freq = clk_get_rate(gpriv->can_clk);
- gpriv->extclk = true;
+ gpriv->extclk = gpriv->info->external_clk;
}
+ gpriv->clk_ram = devm_clk_get_optional(dev, "ram_clk");
+ if (IS_ERR(gpriv->clk_ram))
+ return dev_err_probe(dev, PTR_ERR(gpriv->clk_ram),
+ "cannot get ram clock\n");
+
addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr)) {
err = PTR_ERR(addr);
goto fail_dev;
}
gpriv->base = addr;
+ gpriv->fcbase = addr + gpriv->info->regs->coffset;
/* Request IRQ that's common for both channels */
if (info->shared_global_irqs) {
@@ -1988,58 +2225,9 @@ static int rcar_canfd_probe(struct platform_device *pdev)
}
}
- err = reset_control_reset(gpriv->rstc1);
+ err = rcar_canfd_global_init(gpriv);
if (err)
- goto fail_dev;
- err = reset_control_reset(gpriv->rstc2);
- if (err) {
- reset_control_assert(gpriv->rstc1);
- goto fail_dev;
- }
-
- /* Enable peripheral clock for register access */
- err = clk_prepare_enable(gpriv->clkp);
- if (err) {
- dev_err(dev, "failed to enable peripheral clock: %pe\n",
- ERR_PTR(err));
- goto fail_reset;
- }
-
- err = rcar_canfd_reset_controller(gpriv);
- if (err) {
- dev_err(dev, "reset controller failed: %pe\n", ERR_PTR(err));
- goto fail_clk;
- }
-
- /* Controller in Global reset & Channel reset mode */
- rcar_canfd_configure_controller(gpriv);
-
- /* Configure per channel attributes */
- for_each_set_bit(ch, &gpriv->channels_mask, info->max_channels) {
- /* Configure Channel's Rx fifo */
- rcar_canfd_configure_rx(gpriv, ch);
-
- /* Configure Channel's Tx (Common) fifo */
- rcar_canfd_configure_tx(gpriv, ch);
-
- /* Configure receive rules */
- rcar_canfd_configure_afl_rules(gpriv, ch);
- }
-
- /* Configure common interrupts */
- rcar_canfd_enable_global_interrupts(gpriv);
-
- /* Start Global operation mode */
- rcar_canfd_update_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GMDC_MASK,
- RCANFD_GCTR_GMDC_GOPM);
-
- /* Verify mode change */
- err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
- !(sts & RCANFD_GSTS_GNOPM), 2, 500000);
- if (err) {
- dev_err(dev, "global operational mode failed\n");
goto fail_mode;
- }
for_each_set_bit(ch, &gpriv->channels_mask, info->max_channels) {
err = rcar_canfd_channel_probe(gpriv, ch, fcan_freq,
@@ -2058,12 +2246,7 @@ fail_channel:
for_each_set_bit(ch, &gpriv->channels_mask, info->max_channels)
rcar_canfd_channel_remove(gpriv, ch);
fail_mode:
- rcar_canfd_disable_global_interrupts(gpriv);
-fail_clk:
- clk_disable_unprepare(gpriv->clkp);
-fail_reset:
- reset_control_assert(gpriv->rstc1);
- reset_control_assert(gpriv->rstc2);
+ rcar_canfd_global_deinit(gpriv, false);
fail_dev:
return err;
}
@@ -2073,36 +2256,83 @@ static void rcar_canfd_remove(struct platform_device *pdev)
struct rcar_canfd_global *gpriv = platform_get_drvdata(pdev);
u32 ch;
- rcar_canfd_reset_controller(gpriv);
- rcar_canfd_disable_global_interrupts(gpriv);
-
for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
rcar_canfd_disable_channel_interrupts(gpriv->ch[ch]);
rcar_canfd_channel_remove(gpriv, ch);
}
- /* Enter global sleep mode */
- rcar_canfd_set_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GSLPR);
- clk_disable_unprepare(gpriv->clkp);
- reset_control_assert(gpriv->rstc1);
- reset_control_assert(gpriv->rstc2);
+ rcar_canfd_global_deinit(gpriv, true);
}
-static int __maybe_unused rcar_canfd_suspend(struct device *dev)
+static int rcar_canfd_suspend(struct device *dev)
{
+ struct rcar_canfd_global *gpriv = dev_get_drvdata(dev);
+ int err;
+ u32 ch;
+
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
+ struct rcar_canfd_channel *priv = gpriv->ch[ch];
+ struct net_device *ndev = priv->ndev;
+
+ if (!netif_running(ndev))
+ continue;
+
+ netif_device_detach(ndev);
+
+ err = rcar_canfd_close(ndev);
+ if (err) {
+ netdev_err(ndev, "rcar_canfd_close() failed %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ priv->can.state = CAN_STATE_SLEEPING;
+ }
+
+ /* TODO Skip if wake-up (which is not yet supported) is enabled */
+ rcar_canfd_global_deinit(gpriv, false);
+
return 0;
}
-static int __maybe_unused rcar_canfd_resume(struct device *dev)
+static int rcar_canfd_resume(struct device *dev)
{
+ struct rcar_canfd_global *gpriv = dev_get_drvdata(dev);
+ int err;
+ u32 ch;
+
+ err = rcar_canfd_global_init(gpriv);
+ if (err) {
+ dev_err(dev, "rcar_canfd_global_init() failed %pe\n", ERR_PTR(err));
+ return err;
+ }
+
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
+ struct rcar_canfd_channel *priv = gpriv->ch[ch];
+ struct net_device *ndev = priv->ndev;
+
+ if (!netif_running(ndev))
+ continue;
+
+ err = rcar_canfd_open(ndev);
+ if (err) {
+ netdev_err(ndev, "rcar_canfd_open() failed %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ netif_device_attach(ndev);
+ }
+
return 0;
}
-static SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend,
- rcar_canfd_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend,
+ rcar_canfd_resume);
static const __maybe_unused struct of_device_id rcar_canfd_of_table[] = {
{ .compatible = "renesas,r8a779a0-canfd", .data = &rcar_gen4_hw_info },
+ { .compatible = "renesas,r9a09g047-canfd", .data = &r9a09g047_hw_info },
{ .compatible = "renesas,rcar-gen3-canfd", .data = &rcar_gen3_hw_info },
{ .compatible = "renesas,rcar-gen4-canfd", .data = &rcar_gen4_hw_info },
{ .compatible = "renesas,rzg2l-canfd", .data = &rzg2l_hw_info },
@@ -2115,7 +2345,7 @@ static struct platform_driver rcar_canfd_driver = {
.driver = {
.name = RCANFD_DRV_NAME,
.of_match_table = of_match_ptr(rcar_canfd_of_table),
- .pm = &rcar_canfd_pm_ops,
+ .pm = pm_sleep_ptr(&rcar_canfd_pm_ops),
},
.probe = rcar_canfd_probe,
.remove = rcar_canfd_remove,
diff --git a/drivers/net/can/rockchip/Kconfig b/drivers/net/can/rockchip/Kconfig
index e029e2a3ca4b..d203c530551f 100644
--- a/drivers/net/can/rockchip/Kconfig
+++ b/drivers/net/can/rockchip/Kconfig
@@ -2,7 +2,8 @@
config CAN_ROCKCHIP_CANFD
tristate "Rockchip CAN-FD controller"
- depends on OF || COMPILE_TEST
+ depends on OF
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
select CAN_RX_OFFLOAD
help
Say Y here if you want to use CAN-FD controller found on
diff --git a/drivers/net/can/rockchip/rockchip_canfd-core.c b/drivers/net/can/rockchip/rockchip_canfd-core.c
index df18c85fc078..29de0c01e4ed 100644
--- a/drivers/net/can/rockchip/rockchip_canfd-core.c
+++ b/drivers/net/can/rockchip/rockchip_canfd-core.c
@@ -118,7 +118,7 @@ static void rkcanfd_chip_set_work_mode(const struct rkcanfd_priv *priv)
static int rkcanfd_set_bittiming(struct rkcanfd_priv *priv)
{
- const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ const struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
const struct can_bittiming *bt = &priv->can.bittiming;
u32 reg_nbt, reg_dbt, reg_tdc;
u32 tdco;
@@ -236,11 +236,6 @@ static void rkcanfd_chip_fifo_setup(struct rkcanfd_priv *priv)
{
u32 reg;
- /* TXE FIFO */
- reg = rkcanfd_read(priv, RKCANFD_REG_RX_FIFO_CTRL);
- reg |= RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_ENABLE;
- rkcanfd_write(priv, RKCANFD_REG_RX_FIFO_CTRL, reg);
-
/* RX FIFO */
reg = rkcanfd_read(priv, RKCANFD_REG_RX_FIFO_CTRL);
reg |= RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_ENABLE;
@@ -622,7 +617,7 @@ rkcanfd_handle_rx_fifo_overflow_int(struct rkcanfd_priv *priv)
netdev_dbg(priv->ndev, "RX-FIFO overflow\n");
skb = rkcanfd_alloc_can_err_skb(priv, &cf, &timestamp);
- if (skb)
+ if (!skb)
return 0;
rkcanfd_get_berr_counter_corrected(priv, &bec);
@@ -766,7 +761,6 @@ static const struct net_device_ops rkcanfd_netdev_ops = {
.ndo_open = rkcanfd_open,
.ndo_stop = rkcanfd_stop,
.ndo_start_xmit = rkcanfd_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static int __maybe_unused rkcanfd_runtime_suspend(struct device *dev)
@@ -904,18 +898,19 @@ static int rkcanfd_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
priv->can.clock.freq = clk_get_rate(priv->clks[0].clk);
priv->can.bittiming_const = &rkcanfd_bittiming_const;
- priv->can.data_bittiming_const = &rkcanfd_data_bittiming_const;
+ priv->can.fd.data_bittiming_const = &rkcanfd_data_bittiming_const;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_BERR_REPORTING;
- if (!(priv->devtype_data.quirks & RKCANFD_QUIRK_CANFD_BROKEN))
- priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
priv->can.do_set_mode = rkcanfd_set_mode;
priv->can.do_get_berr_counter = rkcanfd_get_berr_counter;
priv->ndev = ndev;
match = device_get_match_data(&pdev->dev);
- if (match)
+ if (match) {
priv->devtype_data = *(struct rkcanfd_devtype_data *)match;
+ if (!(priv->devtype_data.quirks & RKCANFD_QUIRK_CANFD_BROKEN))
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
+ }
err = can_rx_offload_add_manual(ndev, &priv->offload,
RKCANFD_NAPI_WEIGHT);
@@ -941,8 +936,8 @@ static void rkcanfd_remove(struct platform_device *pdev)
struct rkcanfd_priv *priv = platform_get_drvdata(pdev);
struct net_device *ndev = priv->ndev;
- can_rx_offload_del(&priv->offload);
rkcanfd_unregister(priv);
+ can_rx_offload_del(&priv->offload);
free_candev(ndev);
}
diff --git a/drivers/net/can/rockchip/rockchip_canfd-timestamp.c b/drivers/net/can/rockchip/rockchip_canfd-timestamp.c
index 43d4b5721812..72774cd2f94b 100644
--- a/drivers/net/can/rockchip/rockchip_canfd-timestamp.c
+++ b/drivers/net/can/rockchip/rockchip_canfd-timestamp.c
@@ -8,7 +8,7 @@
#include "rockchip_canfd.h"
-static u64 rkcanfd_timestamp_read(const struct cyclecounter *cc)
+static u64 rkcanfd_timestamp_read(struct cyclecounter *cc)
{
const struct rkcanfd_priv *priv = container_of(cc, struct rkcanfd_priv, cc);
@@ -39,7 +39,7 @@ static void rkcanfd_timestamp_work(struct work_struct *work)
void rkcanfd_timestamp_init(struct rkcanfd_priv *priv)
{
- const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ const struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
const struct can_bittiming *bt = &priv->can.bittiming;
struct cyclecounter *cc = &priv->cc;
u32 bitrate, div, reg, rate;
diff --git a/drivers/net/can/rockchip/rockchip_canfd-tx.c b/drivers/net/can/rockchip/rockchip_canfd-tx.c
index 865a15e033a9..12200dcfd338 100644
--- a/drivers/net/can/rockchip/rockchip_canfd-tx.c
+++ b/drivers/net/can/rockchip/rockchip_canfd-tx.c
@@ -72,7 +72,7 @@ netdev_tx_t rkcanfd_start_xmit(struct sk_buff *skb, struct net_device *ndev)
int err;
u8 i;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
if (!netif_subqueue_maybe_stop(priv->ndev, 0,
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 01168db4c106..e061e35769bf 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -87,7 +87,7 @@ config CAN_PLX_PCI
config CAN_SJA1000_ISA
tristate "ISA Bus based legacy SJA1000 driver"
- depends on ISA
+ depends on HAS_IOPORT
help
This driver adds legacy support for SJA1000 chips connected to
the ISA bus using I/O port, memory mapped or indirect access.
@@ -105,7 +105,7 @@ config CAN_SJA1000_PLATFORM
config CAN_TSCAN1
tristate "TS-CAN1 PC104 boards"
- depends on ISA
+ depends on (ISA && PC104) || (COMPILE_TEST && HAS_IOPORT)
help
This driver is for Technologic Systems' TSCAN-1 PC104 boards.
https://www.embeddedts.com/products/TS-CAN1
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index da396d641e24..10d88cbda465 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -1,11 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
- * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
*
* Derived from the PCAN project file driver/src/pcan_pci.c:
*
- * Copyright (C) 2001-2006 PEAK System-Technik GmbH
+ * Copyright (C) 2001-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#include <linux/kernel.h>
@@ -22,7 +22,7 @@
#include "sja1000.h"
-MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
+MODULE_AUTHOR("Stéphane Grosjean <stephane.grosjean@hms-networks.com>");
MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCI family cards");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index ebd5941c3f53..e1610b527d13 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -1,10 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2010-2012 Stephane Grosjean <s.grosjean@peak-system.com>
- *
* CAN driver for PEAK-System PCAN-PC Card
* Derived from the PCAN project file driver/src/pcan_pccard.c
- * Copyright (C) 2006-2010 PEAK System-Technik GmbH
+ *
+ * Copyright (C) 2006-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
@@ -19,7 +19,7 @@
#include <linux/can/dev.h>
#include "sja1000.h"
-MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
+MODULE_AUTHOR("Stéphane Grosjean <stephane.grosjean@hms-networks.com>");
MODULE_DESCRIPTION("CAN driver for PEAK-System PCAN-PC Cards");
MODULE_LICENSE("GPL v2");
@@ -167,7 +167,7 @@ static void pcan_start_led_timer(struct pcan_pccard *card)
*/
static void pcan_stop_led_timer(struct pcan_pccard *card)
{
- del_timer_sync(&card->led_timer);
+ timer_delete_sync(&card->led_timer);
}
/*
@@ -374,7 +374,7 @@ static inline void pcan_set_can_power(struct pcan_pccard *card, int onoff)
*/
static void pcan_led_timer(struct timer_list *t)
{
- struct pcan_pccard *card = from_timer(card, t, led_timer);
+ struct pcan_pccard *card = timer_container_of(card, t, led_timer);
struct net_device *netdev;
int i, up_count = 0;
u8 ccr;
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index ddb3247948ad..a8fa0d6516b9 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -416,8 +416,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
int ret = 0;
skb = alloc_can_err_skb(dev, &cf);
- if (skb == NULL)
- return -ENOMEM;
txerr = priv->read_reg(priv, SJA1000_TXERR);
rxerr = priv->read_reg(priv, SJA1000_RXERR);
@@ -425,8 +423,11 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
if (isrc & IRQ_DOI) {
/* data overrun interrupt */
netdev_dbg(dev, "data overrun interrupt\n");
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ }
+
stats->rx_over_errors++;
stats->rx_errors++;
sja1000_write_cmdreg(priv, CMD_CDO); /* clear bit */
@@ -452,7 +453,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
else
state = CAN_STATE_ERROR_ACTIVE;
}
- if (state != CAN_STATE_BUS_OFF) {
+ if (state != CAN_STATE_BUS_OFF && skb) {
cf->can_id |= CAN_ERR_CNT;
cf->data[6] = txerr;
cf->data[7] = rxerr;
@@ -460,33 +461,38 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
if (isrc & IRQ_BEI) {
/* bus error interrupt */
priv->can.can_stats.bus_error++;
- stats->rx_errors++;
ecc = priv->read_reg(priv, SJA1000_ECC);
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
-
- /* set error type */
- switch (ecc & ECC_MASK) {
- case ECC_BIT:
- cf->data[2] |= CAN_ERR_PROT_BIT;
- break;
- case ECC_FORM:
- cf->data[2] |= CAN_ERR_PROT_FORM;
- break;
- case ECC_STUFF:
- cf->data[2] |= CAN_ERR_PROT_STUFF;
- break;
- default:
- break;
- }
+ /* set error type */
+ switch (ecc & ECC_MASK) {
+ case ECC_BIT:
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ break;
+ case ECC_FORM:
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case ECC_STUFF:
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ default:
+ break;
+ }
- /* set error location */
- cf->data[3] = ecc & ECC_SEG;
+ /* set error location */
+ cf->data[3] = ecc & ECC_SEG;
+ }
/* Error occurred during transmission? */
- if ((ecc & ECC_DIR) == 0)
- cf->data[2] |= CAN_ERR_PROT_TX;
+ if ((ecc & ECC_DIR) == 0) {
+ stats->tx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_TX;
+ } else {
+ stats->rx_errors++;
+ }
}
if (isrc & IRQ_EPI) {
/* error passive interrupt */
@@ -502,8 +508,10 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
netdev_dbg(dev, "arbitration lost interrupt\n");
alc = priv->read_reg(priv, SJA1000_ALC);
priv->can.can_stats.arbitration_lost++;
- cf->can_id |= CAN_ERR_LOSTARB;
- cf->data[0] = alc & 0x1f;
+ if (skb) {
+ cf->can_id |= CAN_ERR_LOSTARB;
+ cf->data[0] = alc & 0x1f;
+ }
}
if (state != priv->can.state) {
@@ -516,6 +524,9 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
can_bus_off(dev);
}
+ if (!skb)
+ return -ENOMEM;
+
netif_rx(skb);
return ret;
@@ -537,8 +548,8 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF)
goto out;
- while ((isrc = priv->read_reg(priv, SJA1000_IR)) &&
- (n < SJA1000_MAX_IRQ)) {
+ while ((n < SJA1000_MAX_IRQ) &&
+ (isrc = priv->read_reg(priv, SJA1000_IR))) {
status = priv->read_reg(priv, SJA1000_SR);
/* check for absent controller due to hw unplug */
@@ -686,7 +697,6 @@ static const struct net_device_ops sja1000_netdev_ops = {
.ndo_open = sja1000_open,
.ndo_stop = sja1000_close,
.ndo_start_xmit = sja1000_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops sja1000_ethtool_ops = {
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index c42ebe9da55a..2d555f854008 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -230,18 +230,9 @@ static int sp_probe(struct platform_device *pdev)
return -ENODEV;
}
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res_mem)
- return -ENODEV;
-
- if (!devm_request_mem_region(&pdev->dev, res_mem->start,
- resource_size(res_mem), DRV_NAME))
- return -EBUSY;
-
- addr = devm_ioremap(&pdev->dev, res_mem->start,
- resource_size(res_mem));
- if (!addr)
- return -ENOMEM;
+ addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res_mem);
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
if (of) {
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/net/can/slcan/slcan-core.c b/drivers/net/can/slcan/slcan-core.c
index 24c6622d36bd..cd789e178d34 100644
--- a/drivers/net/can/slcan/slcan-core.c
+++ b/drivers/net/can/slcan/slcan-core.c
@@ -71,12 +71,21 @@ MODULE_AUTHOR("Dario Binacchi <dario.binacchi@amarulasolutions.com>");
#define SLCAN_CMD_LEN 1
#define SLCAN_SFF_ID_LEN 3
#define SLCAN_EFF_ID_LEN 8
+#define SLCAN_DATA_LENGTH_LEN 1
+#define SLCAN_ERROR_LEN 1
#define SLCAN_STATE_LEN 1
#define SLCAN_STATE_BE_RXCNT_LEN 3
#define SLCAN_STATE_BE_TXCNT_LEN 3
-#define SLCAN_STATE_FRAME_LEN (1 + SLCAN_CMD_LEN + \
- SLCAN_STATE_BE_RXCNT_LEN + \
- SLCAN_STATE_BE_TXCNT_LEN)
+#define SLCAN_STATE_MSG_LEN (SLCAN_CMD_LEN + \
+ SLCAN_STATE_LEN + \
+ SLCAN_STATE_BE_RXCNT_LEN + \
+ SLCAN_STATE_BE_TXCNT_LEN)
+#define SLCAN_ERROR_MSG_LEN_MIN (SLCAN_CMD_LEN + \
+ SLCAN_ERROR_LEN + \
+ SLCAN_DATA_LENGTH_LEN)
+#define SLCAN_FRAME_MSG_LEN_MIN (SLCAN_CMD_LEN + \
+ SLCAN_SFF_ID_LEN + \
+ SLCAN_DATA_LENGTH_LEN)
struct slcan {
struct can_priv can;
@@ -176,6 +185,9 @@ static void slcan_bump_frame(struct slcan *sl)
u32 tmpid;
char *cmd = sl->rbuff;
+ if (sl->rcount < SLCAN_FRAME_MSG_LEN_MIN)
+ return;
+
skb = alloc_can_skb(sl->dev, &cf);
if (unlikely(!skb)) {
sl->dev->stats.rx_dropped++;
@@ -281,7 +293,7 @@ static void slcan_bump_state(struct slcan *sl)
return;
}
- if (state == sl->can.state || sl->rcount < SLCAN_STATE_FRAME_LEN)
+ if (state == sl->can.state || sl->rcount != SLCAN_STATE_MSG_LEN)
return;
cmd += SLCAN_STATE_BE_RXCNT_LEN + SLCAN_CMD_LEN + 1;
@@ -328,6 +340,9 @@ static void slcan_bump_err(struct slcan *sl)
bool rx_errors = false, tx_errors = false, rx_over_errors = false;
int i, len;
+ if (sl->rcount < SLCAN_ERROR_MSG_LEN_MIN)
+ return;
+
/* get len from sanitized ASCII value */
len = cmd[1];
if (len >= '0' && len < '9')
@@ -456,8 +471,7 @@ static void slcan_bump(struct slcan *sl)
static void slcan_unesc(struct slcan *sl, unsigned char s)
{
if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
- if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
- sl->rcount > 4)
+ if (!test_and_clear_bit(SLF_ERROR, &sl->flags))
slcan_bump(sl);
sl->rcount = 0;
@@ -760,7 +774,6 @@ static const struct net_device_ops slcan_netdev_ops = {
.ndo_open = slcan_netdev_open,
.ndo_stop = slcan_netdev_close,
.ndo_start_xmit = slcan_netdev_xmit,
- .ndo_change_mtu = can_change_mtu,
};
/******************************************
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 278ee8722770..79bc64395ac4 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -609,7 +609,6 @@ static const struct net_device_ops softing_netdev_ops = {
.ndo_open = softing_netdev_open,
.ndo_stop = softing_netdev_stop,
.ndo_start_xmit = softing_netdev_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops softing_ethtool_ops = {
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index 148d974ebb21..e00d3dbc4cf4 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -545,8 +545,6 @@ static int hi3110_stop(struct net_device *net)
priv->force_quit = 1;
free_irq(spi->irq, priv);
- destroy_workqueue(priv->wq);
- priv->wq = NULL;
mutex_lock(&priv->hi3110_lock);
@@ -663,27 +661,27 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
u8 rxerr, txerr;
skb = alloc_can_err_skb(net, &cf);
- if (!skb)
- break;
txerr = hi3110_read(spi, HI3110_READ_TEC);
rxerr = hi3110_read(spi, HI3110_READ_REC);
tx_state = txerr >= rxerr ? new_state : 0;
rx_state = txerr <= rxerr ? new_state : 0;
can_change_state(net, cf, tx_state, rx_state);
- netif_rx(skb);
if (new_state == CAN_STATE_BUS_OFF) {
+ if (skb)
+ netif_rx(skb);
can_bus_off(net);
if (priv->can.restart_ms == 0) {
priv->force_quit = 1;
hi3110_hw_sleep(spi);
break;
}
- } else {
+ } else if (skb) {
cf->can_id |= CAN_ERR_CNT;
cf->data[6] = txerr;
cf->data[7] = rxerr;
+ netif_rx(skb);
}
}
@@ -696,27 +694,38 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
/* Check for protocol errors */
if (eflag & HI3110_ERR_PROTOCOL_MASK) {
skb = alloc_can_err_skb(net, &cf);
- if (!skb)
- break;
+ if (skb)
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
priv->can.can_stats.bus_error++;
- priv->net->stats.rx_errors++;
- if (eflag & HI3110_ERR_BITERR)
- cf->data[2] |= CAN_ERR_PROT_BIT;
- else if (eflag & HI3110_ERR_FRMERR)
- cf->data[2] |= CAN_ERR_PROT_FORM;
- else if (eflag & HI3110_ERR_STUFERR)
- cf->data[2] |= CAN_ERR_PROT_STUFF;
- else if (eflag & HI3110_ERR_CRCERR)
- cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
- else if (eflag & HI3110_ERR_ACKERR)
- cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
-
- cf->data[6] = hi3110_read(spi, HI3110_READ_TEC);
- cf->data[7] = hi3110_read(spi, HI3110_READ_REC);
+ if (eflag & HI3110_ERR_BITERR) {
+ priv->net->stats.tx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ } else if (eflag & HI3110_ERR_FRMERR) {
+ priv->net->stats.rx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ } else if (eflag & HI3110_ERR_STUFERR) {
+ priv->net->stats.rx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ } else if (eflag & HI3110_ERR_CRCERR) {
+ priv->net->stats.rx_errors++;
+ if (skb)
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+ } else if (eflag & HI3110_ERR_ACKERR) {
+ priv->net->stats.tx_errors++;
+ if (skb)
+ cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
+ }
+
netdev_dbg(priv->net, "Bus Error\n");
- netif_rx(skb);
+ if (skb) {
+ cf->data[6] = hi3110_read(spi, HI3110_READ_TEC);
+ cf->data[7] = hi3110_read(spi, HI3110_READ_REC);
+ netif_rx(skb);
+ }
}
}
@@ -759,34 +768,23 @@ static int hi3110_open(struct net_device *net)
goto out_close;
}
- priv->wq = alloc_workqueue("hi3110_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
- 0);
- if (!priv->wq) {
- ret = -ENOMEM;
- goto out_free_irq;
- }
- INIT_WORK(&priv->tx_work, hi3110_tx_work_handler);
- INIT_WORK(&priv->restart_work, hi3110_restart_work_handler);
-
ret = hi3110_hw_reset(spi);
if (ret)
- goto out_free_wq;
+ goto out_free_irq;
ret = hi3110_setup(net);
if (ret)
- goto out_free_wq;
+ goto out_free_irq;
ret = hi3110_set_normal_mode(spi);
if (ret)
- goto out_free_wq;
+ goto out_free_irq;
netif_wake_queue(net);
mutex_unlock(&priv->hi3110_lock);
return 0;
- out_free_wq:
- destroy_workqueue(priv->wq);
out_free_irq:
free_irq(spi->irq, priv);
hi3110_hw_sleep(spi);
@@ -897,6 +895,16 @@ static int hi3110_can_probe(struct spi_device *spi)
if (ret)
goto out_clk;
+ priv->wq = alloc_workqueue("hi3110_wq",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
+ 0);
+ if (!priv->wq) {
+ ret = -ENOMEM;
+ goto out_clk;
+ }
+ INIT_WORK(&priv->tx_work, hi3110_tx_work_handler);
+ INIT_WORK(&priv->restart_work, hi3110_restart_work_handler);
+
priv->spi = spi;
mutex_init(&priv->hi3110_lock);
@@ -932,6 +940,8 @@ static int hi3110_can_probe(struct spi_device *spi)
return 0;
error_probe:
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
hi3110_power_enable(priv->power, 0);
out_clk:
@@ -952,6 +962,9 @@ static void hi3110_can_remove(struct spi_device *spi)
hi3110_power_enable(priv->power, 0);
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
+
clk_disable_unprepare(priv->clk);
free_candev(net);
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index ec5c64006a16..fa97adf25b73 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -388,8 +388,8 @@ static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2)
mcp251x_spi_write(spi, 4);
}
-static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
- u8 mask, u8 val)
+static int mcp251x_write_bits(struct spi_device *spi, u8 reg,
+ u8 mask, u8 val)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
@@ -398,7 +398,7 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
priv->spi_tx_buf[2] = mask;
priv->spi_tx_buf[3] = val;
- mcp251x_spi_write(spi, 4);
+ return mcp251x_spi_write(spi, 4);
}
static u8 mcp251x_read_stat(struct spi_device *spi)
@@ -441,6 +441,7 @@ static int mcp251x_gpio_request(struct gpio_chip *chip,
unsigned int offset)
{
struct mcp251x_priv *priv = gpiochip_get_data(chip);
+ int ret;
u8 val;
/* nothing to be done for inputs */
@@ -450,8 +451,10 @@ static int mcp251x_gpio_request(struct gpio_chip *chip,
val = BFPCTRL_BFE(offset - MCP251X_GPIO_RX0BF);
mutex_lock(&priv->mcp_lock);
- mcp251x_write_bits(priv->spi, BFPCTRL, val, val);
+ ret = mcp251x_write_bits(priv->spi, BFPCTRL, val, val);
mutex_unlock(&priv->mcp_lock);
+ if (ret)
+ return ret;
priv->reg_bfpctrl |= val;
@@ -530,29 +533,35 @@ static int mcp251x_gpio_get_multiple(struct gpio_chip *chip,
return 0;
}
-static void mcp251x_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int mcp251x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct mcp251x_priv *priv = gpiochip_get_data(chip);
u8 mask, val;
+ int ret;
mask = BFPCTRL_BFS(offset - MCP251X_GPIO_RX0BF);
val = value ? mask : 0;
mutex_lock(&priv->mcp_lock);
- mcp251x_write_bits(priv->spi, BFPCTRL, mask, val);
+ ret = mcp251x_write_bits(priv->spi, BFPCTRL, mask, val);
mutex_unlock(&priv->mcp_lock);
+ if (ret)
+ return ret;
priv->reg_bfpctrl &= ~mask;
priv->reg_bfpctrl |= val;
+
+ return 0;
}
-static void
+static int
mcp251x_gpio_set_multiple(struct gpio_chip *chip,
unsigned long *maskp, unsigned long *bitsp)
{
struct mcp251x_priv *priv = gpiochip_get_data(chip);
u8 mask, val;
+ int ret;
mask = FIELD_GET(MCP251X_GPIO_OUTPUT_MASK, maskp[0]);
mask = FIELD_PREP(BFPCTRL_BFS_MASK, mask);
@@ -561,14 +570,18 @@ mcp251x_gpio_set_multiple(struct gpio_chip *chip,
val = FIELD_PREP(BFPCTRL_BFS_MASK, val);
if (!mask)
- return;
+ return 0;
mutex_lock(&priv->mcp_lock);
- mcp251x_write_bits(priv->spi, BFPCTRL, mask, val);
+ ret = mcp251x_write_bits(priv->spi, BFPCTRL, mask, val);
mutex_unlock(&priv->mcp_lock);
+ if (ret)
+ return ret;
priv->reg_bfpctrl &= ~mask;
priv->reg_bfpctrl |= val;
+
+ return 0;
}
static void mcp251x_gpio_restore(struct spi_device *spi)
@@ -1257,7 +1270,6 @@ static const struct net_device_ops mcp251x_netdev_ops = {
.ndo_open = mcp251x_open,
.ndo_stop = mcp251x_stop,
.ndo_start_xmit = mcp251x_hard_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops mcp251x_ethtool_ops = {
@@ -1308,7 +1320,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
clk = devm_clk_get_optional(&spi->dev, NULL);
if (IS_ERR(clk))
- return PTR_ERR(clk);
+ return dev_err_probe(&spi->dev, PTR_ERR(clk), "Cannot get clock\n");
freq = clk_get_rate(clk);
if (freq == 0)
@@ -1316,7 +1328,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
/* Sanity check */
if (freq < 1000000 || freq > 25000000)
- return -ERANGE;
+ return dev_err_probe(&spi->dev, -ERANGE, "clock frequency out of range\n");
/* Allocate can/net device */
net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX);
@@ -1324,8 +1336,10 @@ static int mcp251x_can_probe(struct spi_device *spi)
return -ENOMEM;
ret = clk_prepare_enable(clk);
- if (ret)
+ if (ret) {
+ dev_err_probe(&spi->dev, ret, "Cannot enable clock\n");
goto out_free;
+ }
net->netdev_ops = &mcp251x_netdev_ops;
net->ethtool_ops = &mcp251x_ethtool_ops;
@@ -1350,22 +1364,28 @@ static int mcp251x_can_probe(struct spi_device *spi)
else
spi->max_speed_hz = spi->max_speed_hz ? : 10 * 1000 * 1000;
ret = spi_setup(spi);
- if (ret)
+ if (ret) {
+ dev_err_probe(&spi->dev, ret, "Cannot set up spi\n");
goto out_clk;
+ }
priv->power = devm_regulator_get_optional(&spi->dev, "vdd");
priv->transceiver = devm_regulator_get_optional(&spi->dev, "xceiver");
if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
(PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) {
ret = -EPROBE_DEFER;
+ dev_err_probe(&spi->dev, ret, "supply deferred\n");
goto out_clk;
}
ret = mcp251x_power_enable(priv->power, 1);
- if (ret)
+ if (ret) {
+ dev_err_probe(&spi->dev, ret, "Cannot enable power\n");
goto out_clk;
+ }
- priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
+ priv->wq = alloc_workqueue("mcp251x_wq",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
0);
if (!priv->wq) {
ret = -ENOMEM;
@@ -1396,21 +1416,24 @@ static int mcp251x_can_probe(struct spi_device *spi)
/* Here is OK to not lock the MCP, no one knows about it yet */
ret = mcp251x_hw_probe(spi);
if (ret) {
- if (ret == -ENODEV)
- dev_err(&spi->dev, "Cannot initialize MCP%x. Wrong wiring?\n",
- priv->model);
+ dev_err_probe(&spi->dev, ret, "Cannot initialize MCP%x. Wrong wiring?\n",
+ priv->model);
goto error_probe;
}
mcp251x_hw_sleep(spi);
ret = register_candev(net);
- if (ret)
+ if (ret) {
+ dev_err_probe(&spi->dev, ret, "Cannot register CAN device\n");
goto error_probe;
+ }
ret = mcp251x_gpio_setup(priv);
- if (ret)
+ if (ret) {
+ dev_err_probe(&spi->dev, ret, "Cannot set up gpios\n");
goto out_unregister_candev;
+ }
netdev_info(net, "MCP%x successfully initialized.\n", priv->model);
return 0;
@@ -1429,7 +1452,6 @@ out_clk:
out_free:
free_candev(net);
- dev_err(&spi->dev, "Probe failed, err=%d\n", -ret);
return ret;
}
diff --git a/drivers/net/can/spi/mcp251xfd/Kconfig b/drivers/net/can/spi/mcp251xfd/Kconfig
index 877e4356010d..7c29846e6051 100644
--- a/drivers/net/can/spi/mcp251xfd/Kconfig
+++ b/drivers/net/can/spi/mcp251xfd/Kconfig
@@ -5,6 +5,7 @@ config CAN_MCP251XFD
select CAN_RX_OFFLOAD
select REGMAP
select WANT_DEV_COREDUMP
+ select GPIOLIB
help
Driver for the Microchip MCP251XFD SPI FD-CAN controller
family.
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index 3bc56517fe7a..5134ebb85880 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -75,6 +75,24 @@ static const struct can_bittiming_const mcp251xfd_data_bittiming_const = {
.brp_inc = 1,
};
+/* The datasheet of the mcp2518fd (DS20006027B) specifies a range of
+ * [-64,63] for TDCO, indicating a relative TDCO.
+ *
+ * Manual tests have shown, that using a relative TDCO configuration
+ * results in bus off, while an absolute configuration works.
+ *
+ * For TDCO use the max value (63) from the data sheet, but 0 as the
+ * minimum.
+ */
+static const struct can_tdc_const mcp251xfd_tdc_const = {
+ .tdcv_min = 0,
+ .tdcv_max = 63,
+ .tdco_min = 0,
+ .tdco_max = 63,
+ .tdcf_min = 0,
+ .tdcf_max = 0,
+};
+
static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model)
{
switch (model) {
@@ -509,9 +527,8 @@ static int mcp251xfd_chip_timestamp_init(const struct mcp251xfd_priv *priv)
static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
{
const struct can_bittiming *bt = &priv->can.bittiming;
- const struct can_bittiming *dbt = &priv->can.data_bittiming;
- u32 val = 0;
- s8 tdco;
+ const struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
+ u32 tdcmod, val = 0;
int err;
/* CAN Control Register
@@ -575,34 +592,37 @@ static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
return err;
/* Transmitter Delay Compensation */
- tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1),
- -64, 63);
- val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK,
- MCP251XFD_REG_TDC_TDCMOD_AUTO) |
- FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdco);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_AUTO)
+ tdcmod = MCP251XFD_REG_TDC_TDCMOD_AUTO;
+ else if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_MANUAL)
+ tdcmod = MCP251XFD_REG_TDC_TDCMOD_MANUAL;
+ else
+ tdcmod = MCP251XFD_REG_TDC_TDCMOD_DISABLED;
+
+ val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, tdcmod) |
+ FIELD_PREP(MCP251XFD_REG_TDC_TDCV_MASK, priv->can.fd.tdc.tdcv) |
+ FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, priv->can.fd.tdc.tdco);
return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val);
}
static int mcp251xfd_chip_rx_int_enable(const struct mcp251xfd_priv *priv)
{
- u32 val;
+ u32 val, mask;
if (!priv->rx_int)
return 0;
- /* Configure GPIOs:
- * - PIN0: GPIO Input
- * - PIN1: GPIO Input/RX Interrupt
+ /* Configure PIN1 as RX Interrupt:
*
* PIN1 must be Input, otherwise there is a glitch on the
* rx-INT line. It happens between setting the PIN as output
* (in the first byte of the SPI transfer) and configuring the
* PIN as interrupt (in the last byte of the SPI transfer).
*/
- val = MCP251XFD_REG_IOCON_PM0 | MCP251XFD_REG_IOCON_TRIS1 |
- MCP251XFD_REG_IOCON_TRIS0;
- return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
+ val = MCP251XFD_REG_IOCON_TRIS(1);
+ mask = MCP251XFD_REG_IOCON_TRIS(1) | MCP251XFD_REG_IOCON_PM(1);
+ return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, mask, val);
}
static int mcp251xfd_chip_rx_int_disable(const struct mcp251xfd_priv *priv)
@@ -612,13 +632,9 @@ static int mcp251xfd_chip_rx_int_disable(const struct mcp251xfd_priv *priv)
if (!priv->rx_int)
return 0;
- /* Configure GPIOs:
- * - PIN0: GPIO Input
- * - PIN1: GPIO Input
- */
- val = MCP251XFD_REG_IOCON_PM1 | MCP251XFD_REG_IOCON_PM0 |
- MCP251XFD_REG_IOCON_TRIS1 | MCP251XFD_REG_IOCON_TRIS0;
- return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
+ /* Configure PIN1 as GPIO Input */
+ val = MCP251XFD_REG_IOCON_PM(1) | MCP251XFD_REG_IOCON_TRIS(1);
+ return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, val, val);
}
static int mcp251xfd_chip_ecc_init(struct mcp251xfd_priv *priv)
@@ -745,21 +761,13 @@ static void mcp251xfd_chip_stop(struct mcp251xfd_priv *priv,
mcp251xfd_chip_interrupts_disable(priv);
mcp251xfd_chip_rx_int_disable(priv);
mcp251xfd_timestamp_stop(priv);
- mcp251xfd_chip_sleep(priv);
+ mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_CONFIG);
}
static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
{
int err;
- err = mcp251xfd_chip_softreset(priv);
- if (err)
- goto out_chip_stop;
-
- err = mcp251xfd_chip_clock_init(priv);
- if (err)
- goto out_chip_stop;
-
err = mcp251xfd_chip_timestamp_init(priv);
if (err)
goto out_chip_stop;
@@ -1603,8 +1611,11 @@ static int mcp251xfd_open(struct net_device *ndev)
return err;
err = pm_runtime_resume_and_get(ndev->dev.parent);
- if (err)
+ if (err) {
+ if (err == -ETIMEDOUT || err == -ENODEV)
+ pm_runtime_set_suspended(ndev->dev.parent);
goto out_close_candev;
+ }
err = mcp251xfd_ring_alloc(priv);
if (err)
@@ -1692,8 +1703,8 @@ static const struct net_device_ops mcp251xfd_netdev_ops = {
.ndo_open = mcp251xfd_open,
.ndo_stop = mcp251xfd_stop,
.ndo_start_xmit = mcp251xfd_start_xmit,
- .ndo_eth_ioctl = can_eth_ioctl_hwts,
- .ndo_change_mtu = can_change_mtu,
+ .ndo_hwtstamp_get = can_hwtstamp_get,
+ .ndo_hwtstamp_set = can_hwtstamp_set,
};
static void
@@ -1786,6 +1797,160 @@ static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv)
return 0;
}
+static const char * const mcp251xfd_gpio_names[] = { "GPIO0", "GPIO1" };
+
+static int mcp251xfd_gpio_request(struct gpio_chip *chip, unsigned int offset)
+{
+ struct mcp251xfd_priv *priv = gpiochip_get_data(chip);
+ u32 pin_mask = MCP251XFD_REG_IOCON_PM(offset);
+ int ret;
+
+ if (priv->rx_int && offset == 1) {
+ netdev_err(priv->ndev, "Can't use GPIO 1 with RX-INT!\n");
+ return -EINVAL;
+ }
+
+ ret = pm_runtime_resume_and_get(priv->ndev->dev.parent);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, pin_mask, pin_mask);
+}
+
+static void mcp251xfd_gpio_free(struct gpio_chip *chip, unsigned int offset)
+{
+ struct mcp251xfd_priv *priv = gpiochip_get_data(chip);
+
+ pm_runtime_put(priv->ndev->dev.parent);
+}
+
+static int mcp251xfd_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct mcp251xfd_priv *priv = gpiochip_get_data(chip);
+ u32 mask = MCP251XFD_REG_IOCON_TRIS(offset);
+ u32 val;
+ int ret;
+
+ ret = regmap_read(priv->map_reg, MCP251XFD_REG_IOCON, &val);
+ if (ret)
+ return ret;
+
+ if (mask & val)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static int mcp251xfd_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct mcp251xfd_priv *priv = gpiochip_get_data(chip);
+ u32 mask = MCP251XFD_REG_IOCON_GPIO(offset);
+ u32 val;
+ int ret;
+
+ ret = regmap_read(priv->map_reg, MCP251XFD_REG_IOCON, &val);
+ if (ret)
+ return ret;
+
+ return !!(mask & val);
+}
+
+static int mcp251xfd_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bit)
+{
+ struct mcp251xfd_priv *priv = gpiochip_get_data(chip);
+ u32 val;
+ int ret;
+
+ ret = regmap_read(priv->map_reg, MCP251XFD_REG_IOCON, &val);
+ if (ret)
+ return ret;
+
+ *bit = FIELD_GET(MCP251XFD_REG_IOCON_GPIO_MASK, val) & *mask;
+
+ return 0;
+}
+
+static int mcp251xfd_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct mcp251xfd_priv *priv = gpiochip_get_data(chip);
+ u32 dir_mask = MCP251XFD_REG_IOCON_TRIS(offset);
+ u32 val_mask = MCP251XFD_REG_IOCON_LAT(offset);
+ u32 val;
+
+ if (value)
+ val = val_mask;
+ else
+ val = 0;
+
+ return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON,
+ dir_mask | val_mask, val);
+}
+
+static int mcp251xfd_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct mcp251xfd_priv *priv = gpiochip_get_data(chip);
+ u32 dir_mask = MCP251XFD_REG_IOCON_TRIS(offset);
+
+ return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, dir_mask, dir_mask);
+}
+
+static int mcp251xfd_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ struct mcp251xfd_priv *priv = gpiochip_get_data(chip);
+ u32 val_mask = MCP251XFD_REG_IOCON_LAT(offset);
+ u32 val;
+
+ if (value)
+ val = val_mask;
+ else
+ val = 0;
+
+ return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, val_mask, val);
+}
+
+static int mcp251xfd_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct mcp251xfd_priv *priv = gpiochip_get_data(chip);
+ u32 val;
+
+ val = FIELD_PREP(MCP251XFD_REG_IOCON_LAT_MASK, *bits);
+
+ return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON,
+ MCP251XFD_REG_IOCON_LAT_MASK, val);
+}
+
+static int mcp251fdx_gpio_setup(struct mcp251xfd_priv *priv)
+{
+ struct gpio_chip *gc = &priv->gc;
+
+ if (!device_property_present(&priv->spi->dev, "gpio-controller"))
+ return 0;
+
+ gc->label = dev_name(&priv->spi->dev);
+ gc->parent = &priv->spi->dev;
+ gc->owner = THIS_MODULE;
+ gc->request = mcp251xfd_gpio_request;
+ gc->free = mcp251xfd_gpio_free;
+ gc->get_direction = mcp251xfd_gpio_get_direction;
+ gc->direction_output = mcp251xfd_gpio_direction_output;
+ gc->direction_input = mcp251xfd_gpio_direction_input;
+ gc->get = mcp251xfd_gpio_get;
+ gc->get_multiple = mcp251xfd_gpio_get_multiple;
+ gc->set = mcp251xfd_gpio_set;
+ gc->set_multiple = mcp251xfd_gpio_set_multiple;
+ gc->base = -1;
+ gc->can_sleep = true;
+ gc->ngpio = ARRAY_SIZE(mcp251xfd_gpio_names);
+ gc->names = mcp251xfd_gpio_names;
+
+ return devm_gpiochip_add_data(&priv->spi->dev, gc, priv);
+}
+
static int
mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, u32 *dev_id,
u32 *effective_speed_hz_slow,
@@ -1885,53 +2050,59 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv)
struct net_device *ndev = priv->ndev;
int err;
+ mcp251xfd_register_quirks(priv);
+
err = mcp251xfd_clks_and_vdd_enable(priv);
if (err)
return err;
- pm_runtime_get_noresume(ndev->dev.parent);
- err = pm_runtime_set_active(ndev->dev.parent);
- if (err)
- goto out_runtime_put_noidle;
- pm_runtime_enable(ndev->dev.parent);
-
- mcp251xfd_register_quirks(priv);
-
err = mcp251xfd_chip_softreset(priv);
if (err == -ENODEV)
- goto out_runtime_disable;
+ goto out_clks_and_vdd_disable;
if (err)
goto out_chip_sleep;
err = mcp251xfd_chip_clock_init(priv);
if (err == -ENODEV)
- goto out_runtime_disable;
+ goto out_clks_and_vdd_disable;
if (err)
goto out_chip_sleep;
+ pm_runtime_get_noresume(ndev->dev.parent);
+ err = pm_runtime_set_active(ndev->dev.parent);
+ if (err)
+ goto out_runtime_put_noidle;
+ pm_runtime_enable(ndev->dev.parent);
+
err = mcp251xfd_register_chip_detect(priv);
if (err)
- goto out_chip_sleep;
+ goto out_runtime_disable;
err = mcp251xfd_register_check_rx_int(priv);
if (err)
- goto out_chip_sleep;
+ goto out_runtime_disable;
mcp251xfd_ethtool_init(priv);
+ err = mcp251fdx_gpio_setup(priv);
+ if (err) {
+ dev_err_probe(&priv->spi->dev, err, "Failed to register gpio-controller.\n");
+ goto out_runtime_disable;
+ }
+
err = register_candev(ndev);
if (err)
- goto out_chip_sleep;
+ goto out_runtime_disable;
err = mcp251xfd_register_done(priv);
if (err)
goto out_unregister_candev;
- /* Put controller into sleep mode and let pm_runtime_put()
- * disable the clocks and vdd. If CONFIG_PM is not enabled,
- * the clocks and vdd will stay powered.
+ /* Put controller into Config mode and let pm_runtime_put()
+ * put in sleep mode, disable the clocks and vdd. If CONFIG_PM
+ * is not enabled, the clocks and vdd will stay powered.
*/
- err = mcp251xfd_chip_sleep(priv);
+ err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_CONFIG);
if (err)
goto out_unregister_candev;
@@ -1941,12 +2112,13 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv)
out_unregister_candev:
unregister_candev(ndev);
-out_chip_sleep:
- mcp251xfd_chip_sleep(priv);
out_runtime_disable:
pm_runtime_disable(ndev->dev.parent);
out_runtime_put_noidle:
pm_runtime_put_noidle(ndev->dev.parent);
+out_chip_sleep:
+ mcp251xfd_chip_sleep(priv);
+out_clks_and_vdd_disable:
mcp251xfd_clks_and_vdd_disable(priv);
return err;
@@ -1958,10 +2130,12 @@ static inline void mcp251xfd_unregister(struct mcp251xfd_priv *priv)
unregister_candev(ndev);
- if (pm_runtime_enabled(ndev->dev.parent))
+ if (pm_runtime_enabled(ndev->dev.parent)) {
pm_runtime_disable(ndev->dev.parent);
- else
+ } else {
+ mcp251xfd_chip_sleep(priv);
mcp251xfd_clks_and_vdd_disable(priv);
+ }
}
static const struct of_device_id mcp251xfd_of_match[] = {
@@ -2082,11 +2256,13 @@ static int mcp251xfd_probe(struct spi_device *spi)
priv->can.do_set_mode = mcp251xfd_set_mode;
priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter;
priv->can.bittiming_const = &mcp251xfd_bittiming_const;
- priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const;
+ priv->can.fd.data_bittiming_const = &mcp251xfd_data_bittiming_const;
+ priv->can.fd.tdc_const = &mcp251xfd_tdc_const;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
- CAN_CTRLMODE_CC_LEN8_DLC;
+ CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_TDC_AUTO |
+ CAN_CTRLMODE_TDC_MANUAL;
set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
priv->ndev = ndev;
priv->spi = spi;
@@ -2174,24 +2350,49 @@ static void mcp251xfd_remove(struct spi_device *spi)
struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
struct net_device *ndev = priv->ndev;
- can_rx_offload_del(&priv->offload);
mcp251xfd_unregister(priv);
+ can_rx_offload_del(&priv->offload);
spi->max_speed_hz = priv->spi_max_speed_hz_orig;
free_candev(ndev);
}
static int __maybe_unused mcp251xfd_runtime_suspend(struct device *device)
{
- const struct mcp251xfd_priv *priv = dev_get_drvdata(device);
+ struct mcp251xfd_priv *priv = dev_get_drvdata(device);
+ mcp251xfd_chip_sleep(priv);
return mcp251xfd_clks_and_vdd_disable(priv);
}
static int __maybe_unused mcp251xfd_runtime_resume(struct device *device)
{
- const struct mcp251xfd_priv *priv = dev_get_drvdata(device);
+ struct mcp251xfd_priv *priv = dev_get_drvdata(device);
+ int err;
+
+ err = mcp251xfd_clks_and_vdd_enable(priv);
+ if (err)
+ return err;
+
+ err = mcp251xfd_chip_softreset(priv);
+ if (err == -ENODEV)
+ goto out_clks_and_vdd_disable;
+ if (err)
+ goto out_chip_sleep;
+
+ err = mcp251xfd_chip_clock_init(priv);
+ if (err == -ENODEV)
+ goto out_clks_and_vdd_disable;
+ if (err)
+ goto out_chip_sleep;
- return mcp251xfd_clks_and_vdd_enable(priv);
+ return 0;
+
+out_chip_sleep:
+ mcp251xfd_chip_sleep(priv);
+out_clks_and_vdd_disable:
+ mcp251xfd_clks_and_vdd_disable(priv);
+
+ return err;
}
static const struct dev_pm_ops mcp251xfd_pm_ops = {
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
index 8c5be8d1c519..70d5ff0ae7ac 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
@@ -13,17 +13,9 @@
static const struct regmap_config mcp251xfd_regmap_crc;
static int
-mcp251xfd_regmap_nocrc_write(void *context, const void *data, size_t count)
-{
- struct spi_device *spi = context;
-
- return spi_write(spi, data, count);
-}
-
-static int
-mcp251xfd_regmap_nocrc_gather_write(void *context,
- const void *reg, size_t reg_len,
- const void *val, size_t val_len)
+_mcp251xfd_regmap_nocrc_gather_write(void *context,
+ const void *reg, size_t reg_len,
+ const void *val, size_t val_len)
{
struct spi_device *spi = context;
struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
@@ -47,6 +39,54 @@ mcp251xfd_regmap_nocrc_gather_write(void *context,
return spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
}
+static int
+mcp251xfd_regmap_nocrc_gather_write(void *context,
+ const void *reg_p, size_t reg_len,
+ const void *val, size_t val_len)
+{
+ const u16 byte_exclude = MCP251XFD_REG_IOCON +
+ mcp251xfd_first_byte_set(MCP251XFD_REG_IOCON_GPIO_MASK);
+ u16 reg = be16_to_cpu(*(__be16 *)reg_p) & MCP251XFD_SPI_ADDRESS_MASK;
+ int ret;
+
+ /* Never write to bits 16..23 of IOCON register to avoid clearing of LAT0/LAT1
+ *
+ * According to MCP2518FD Errata DS80000789E 5 writing IOCON register using one
+ * SPI write command clears LAT0/LAT1.
+ *
+ * Errata Fix/Work Around suggests to write registers with single byte
+ * write instructions. However, it seems that the byte at 0xe06(IOCON[23:16])
+ * is for read-only access and writing to it causes the clearing of LAT0/LAT1.
+ */
+ if (reg <= byte_exclude && reg + val_len > byte_exclude) {
+ size_t len = byte_exclude - reg;
+
+ /* Write up to 0xe05 */
+ ret = _mcp251xfd_regmap_nocrc_gather_write(context, reg_p, reg_len, val, len);
+ if (ret)
+ return ret;
+
+ /* Write from 0xe07 on */
+ reg += len + 1;
+ reg = (__force unsigned short)cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_WRITE | reg);
+ return _mcp251xfd_regmap_nocrc_gather_write(context, &reg, reg_len,
+ val + len + 1,
+ val_len - len - 1);
+ }
+
+ return _mcp251xfd_regmap_nocrc_gather_write(context, reg_p, reg_len,
+ val, val_len);
+}
+
+static int
+mcp251xfd_regmap_nocrc_write(void *context, const void *data, size_t count)
+{
+ const size_t data_offset = sizeof(__be16);
+
+ return mcp251xfd_regmap_nocrc_gather_write(context, data, data_offset,
+ data + data_offset, count - data_offset);
+}
+
static inline bool
mcp251xfd_update_bits_read_reg(const struct mcp251xfd_priv *priv,
unsigned int reg)
@@ -64,6 +104,7 @@ mcp251xfd_update_bits_read_reg(const struct mcp251xfd_priv *priv,
case MCP251XFD_REG_CON:
case MCP251XFD_REG_OSC:
case MCP251XFD_REG_ECCCON:
+ case MCP251XFD_REG_IOCON:
return true;
default:
mcp251xfd_for_each_rx_ring(priv, ring, n) {
@@ -139,10 +180,9 @@ mcp251xfd_regmap_nocrc_update_bits(void *context, unsigned int reg,
tmp_le32 = orig_le32 & ~mask_le32;
tmp_le32 |= val_le32 & mask_le32;
- mcp251xfd_spi_cmd_write_nocrc(&buf_tx->cmd, reg + first_byte);
- memcpy(buf_tx->data, &tmp_le32, len);
-
- return spi_write(spi, buf_tx, sizeof(buf_tx->cmd) + len);
+ reg += first_byte;
+ mcp251xfd_spi_cmd_write_nocrc(&buf_tx->cmd, reg);
+ return mcp251xfd_regmap_nocrc_gather_write(context, &buf_tx->cmd, 2, &tmp_le32, len);
}
static int
@@ -196,9 +236,9 @@ mcp251xfd_regmap_nocrc_read(void *context,
}
static int
-mcp251xfd_regmap_crc_gather_write(void *context,
- const void *reg_p, size_t reg_len,
- const void *val, size_t val_len)
+_mcp251xfd_regmap_crc_gather_write(void *context,
+ const void *reg_p, size_t reg_len,
+ const void *val, size_t val_len)
{
struct spi_device *spi = context;
struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
@@ -230,6 +270,44 @@ mcp251xfd_regmap_crc_gather_write(void *context,
}
static int
+mcp251xfd_regmap_crc_gather_write(void *context,
+ const void *reg_p, size_t reg_len,
+ const void *val, size_t val_len)
+{
+ const u16 byte_exclude = MCP251XFD_REG_IOCON +
+ mcp251xfd_first_byte_set(MCP251XFD_REG_IOCON_GPIO_MASK);
+ u16 reg = *(u16 *)reg_p;
+ int ret;
+
+ /* Never write to bits 16..23 of IOCON register to avoid clearing of LAT0/LAT1
+ *
+ * According to MCP2518FD Errata DS80000789E 5 writing IOCON register using one
+ * SPI write command clears LAT0/LAT1.
+ *
+ * Errata Fix/Work Around suggests to write registers with single byte
+ * write instructions. However, it seems that the byte at 0xe06(IOCON[23:16])
+ * is for read-only access and writing to it causes the clearing of LAT0/LAT1.
+ */
+ if (reg <= byte_exclude && reg + val_len > byte_exclude) {
+ size_t len = byte_exclude - reg;
+
+ /* Write up to 0xe05 */
+ ret = _mcp251xfd_regmap_crc_gather_write(context, &reg, reg_len, val, len);
+ if (ret)
+ return ret;
+
+ /* Write from 0xe07 on */
+ reg += len + 1;
+ return _mcp251xfd_regmap_crc_gather_write(context, &reg, reg_len,
+ val + len + 1,
+ val_len - len - 1);
+ }
+
+ return _mcp251xfd_regmap_crc_gather_write(context, reg_p, reg_len,
+ val, val_len);
+}
+
+static int
mcp251xfd_regmap_crc_write(void *context,
const void *data, size_t count)
{
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
index e684991fa391..c34f2067a989 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
@@ -2,7 +2,7 @@
//
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
//
-// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Copyright (c) 2019, 2020, 2021, 2024 Pengutronix,
// Marc Kleine-Budde <kernel@pengutronix.de>
//
// Based on:
@@ -483,9 +483,11 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
};
const struct ethtool_coalesce ec = {
.rx_coalesce_usecs_irq = priv->rx_coalesce_usecs_irq,
- .rx_max_coalesced_frames_irq = priv->rx_obj_num_coalesce_irq,
+ .rx_max_coalesced_frames_irq = priv->rx_obj_num_coalesce_irq == 0 ?
+ 1 : priv->rx_obj_num_coalesce_irq,
.tx_coalesce_usecs_irq = priv->tx_coalesce_usecs_irq,
- .tx_max_coalesced_frames_irq = priv->tx_obj_num_coalesce_irq,
+ .tx_max_coalesced_frames_irq = priv->tx_obj_num_coalesce_irq == 0 ?
+ 1 : priv->tx_obj_num_coalesce_irq,
};
struct can_ram_layout layout;
@@ -539,11 +541,11 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
}
priv->rx_ring_num = i;
- hrtimer_init(&priv->rx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- priv->rx_irq_timer.function = mcp251xfd_rx_irq_timer;
+ hrtimer_setup(&priv->rx_irq_timer, mcp251xfd_rx_irq_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
- hrtimer_init(&priv->tx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- priv->tx_irq_timer.function = mcp251xfd_tx_irq_timer;
+ hrtimer_setup(&priv->tx_irq_timer, mcp251xfd_tx_irq_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
return 0;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
index f732556d233a..e94321849fd7 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
@@ -16,9 +16,14 @@
#include "mcp251xfd.h"
-static inline bool mcp251xfd_tx_fifo_sta_full(u32 fifo_sta)
+static inline bool mcp251xfd_tx_fifo_sta_empty(u32 fifo_sta)
{
- return !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF);
+ return fifo_sta & MCP251XFD_REG_FIFOSTA_TFERFFIF;
+}
+
+static inline bool mcp251xfd_tx_fifo_sta_less_than_half_full(u32 fifo_sta)
+{
+ return fifo_sta & MCP251XFD_REG_FIFOSTA_TFHRFHIF;
}
static inline int
@@ -122,7 +127,11 @@ mcp251xfd_get_tef_len(struct mcp251xfd_priv *priv, u8 *len_p)
if (err)
return err;
- if (mcp251xfd_tx_fifo_sta_full(fifo_sta)) {
+ /* If the chip says the TX-FIFO is empty, but there are no TX
+ * buffers free in the ring, we assume all have been sent.
+ */
+ if (mcp251xfd_tx_fifo_sta_empty(fifo_sta) &&
+ mcp251xfd_get_tx_free(tx_ring) == 0) {
*len_p = tx_ring->obj_num;
return 0;
}
@@ -143,7 +152,29 @@ mcp251xfd_get_tef_len(struct mcp251xfd_priv *priv, u8 *len_p)
BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(len));
len = (chip_tx_tail << shift) - (tail << shift);
- *len_p = len >> shift;
+ len >>= shift;
+
+ /* According to mcp2518fd erratum DS80000789E 6. the FIFOCI
+ * bits of a FIFOSTA register, here the TX-FIFO tail index
+ * might be corrupted.
+ *
+ * However here it seems the bit indicating that the TX-FIFO
+ * is empty (MCP251XFD_REG_FIFOSTA_TFERFFIF) is not correct
+ * while the TX-FIFO tail index is.
+ *
+ * We assume the TX-FIFO is empty, i.e. all pending CAN frames
+ * haven been send, if:
+ * - Chip's head and tail index are equal (len == 0).
+ * - The TX-FIFO is less than half full.
+ * (The TX-FIFO empty case has already been checked at the
+ * beginning of this function.)
+ * - No free buffers in the TX ring.
+ */
+ if (len == 0 && mcp251xfd_tx_fifo_sta_less_than_half_full(fifo_sta) &&
+ mcp251xfd_get_tx_free(tx_ring) == 0)
+ len = tx_ring->obj_num;
+
+ *len_p = len;
return 0;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
index 202ca0d24d03..413a5cb75c13 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
@@ -11,7 +11,7 @@
#include "mcp251xfd.h"
-static u64 mcp251xfd_timestamp_raw_read(const struct cyclecounter *cc)
+static u64 mcp251xfd_timestamp_raw_read(struct cyclecounter *cc)
{
const struct mcp251xfd_priv *priv;
u32 ts_raw = 0;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
index dcbbd2b2fae8..085d7101e595 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
@@ -15,6 +15,7 @@
#include <linux/can/dev.h>
#include <linux/can/rx-offload.h>
#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/regmap.h>
@@ -335,13 +336,19 @@
#define MCP251XFD_REG_IOCON_TXCANOD BIT(28)
#define MCP251XFD_REG_IOCON_PM1 BIT(25)
#define MCP251XFD_REG_IOCON_PM0 BIT(24)
+#define MCP251XFD_REG_IOCON_PM(n) (MCP251XFD_REG_IOCON_PM0 << (n))
#define MCP251XFD_REG_IOCON_GPIO1 BIT(17)
#define MCP251XFD_REG_IOCON_GPIO0 BIT(16)
+#define MCP251XFD_REG_IOCON_GPIO(n) (MCP251XFD_REG_IOCON_GPIO0 << (n))
+#define MCP251XFD_REG_IOCON_GPIO_MASK GENMASK(17, 16)
#define MCP251XFD_REG_IOCON_LAT1 BIT(9)
#define MCP251XFD_REG_IOCON_LAT0 BIT(8)
+#define MCP251XFD_REG_IOCON_LAT(n) (MCP251XFD_REG_IOCON_LAT0 << (n))
+#define MCP251XFD_REG_IOCON_LAT_MASK GENMASK(9, 8)
#define MCP251XFD_REG_IOCON_XSTBYEN BIT(6)
#define MCP251XFD_REG_IOCON_TRIS1 BIT(1)
#define MCP251XFD_REG_IOCON_TRIS0 BIT(0)
+#define MCP251XFD_REG_IOCON_TRIS(n) (MCP251XFD_REG_IOCON_TRIS0 << (n))
#define MCP251XFD_REG_CRC 0xe08
#define MCP251XFD_REG_CRC_FERRIE BIT(25)
@@ -670,6 +677,7 @@ struct mcp251xfd_priv {
struct mcp251xfd_devtype_data devtype_data;
struct can_berr_counter bec;
+ struct gpio_chip gc;
};
#define MCP251XFD_IS(_model) \
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 360158c295d3..af52285d5a4e 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -570,7 +570,7 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
else
state = CAN_STATE_ERROR_ACTIVE;
}
- if (skb && state != CAN_STATE_BUS_OFF) {
+ if (likely(skb) && state != CAN_STATE_BUS_OFF) {
cf->can_id |= CAN_ERR_CNT;
cf->data[6] = txerr;
cf->data[7] = rxerr;
@@ -579,11 +579,9 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
/* bus error interrupt */
netdev_dbg(dev, "bus error interrupt\n");
priv->can.can_stats.bus_error++;
- stats->rx_errors++;
+ ecc = readl(priv->base + SUN4I_REG_STA_ADDR);
if (likely(skb)) {
- ecc = readl(priv->base + SUN4I_REG_STA_ADDR);
-
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
switch (ecc & SUN4I_STA_MASK_ERR) {
@@ -601,9 +599,15 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
>> 16;
break;
}
- /* error occurred during transmission? */
- if ((ecc & SUN4I_STA_ERR_DIR) == 0)
+ }
+
+ /* error occurred during transmission? */
+ if ((ecc & SUN4I_STA_ERR_DIR) == 0) {
+ if (likely(skb))
cf->data[2] |= CAN_ERR_PROT_TX;
+ stats->tx_errors++;
+ } else {
+ stats->rx_errors++;
}
}
if (isrc & SUN4I_INT_ERR_PASSIVE) {
@@ -629,10 +633,10 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
tx_state = txerr >= rxerr ? state : 0;
rx_state = txerr <= rxerr ? state : 0;
- if (likely(skb))
- can_change_state(dev, cf, tx_state, rx_state);
- else
- priv->can.state = state;
+ /* The skb allocation might fail, but can_change_state()
+ * handles cf == NULL.
+ */
+ can_change_state(dev, cf, tx_state, rx_state);
if (state == CAN_STATE_BUS_OFF)
can_bus_off(dev);
}
@@ -653,8 +657,8 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id)
u8 isrc, status;
int n = 0;
- while ((isrc = readl(priv->base + SUN4I_REG_INT_ADDR)) &&
- (n < SUN4I_CAN_MAX_IRQ)) {
+ while ((n < SUN4I_CAN_MAX_IRQ) &&
+ (isrc = readl(priv->base + SUN4I_REG_INT_ADDR))) {
n++;
status = readl(priv->base + SUN4I_REG_STA_ADDR);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 644e8b8eb91e..1d3dbf28b105 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -383,7 +383,7 @@ static void ti_hecc_start(struct net_device *ndev)
* overflows instead of the hardware silently dropping the
* messages.
*/
- mbx_mask = ~BIT(HECC_RX_LAST_MBOX);
+ mbx_mask = ~BIT_U32(HECC_RX_LAST_MBOX);
hecc_write(priv, HECC_CANOPC, mbx_mask);
/* Enable interrupts */
@@ -829,7 +829,6 @@ static const struct net_device_ops ti_hecc_netdev_ops = {
.ndo_open = ti_hecc_open,
.ndo_stop = ti_hecc_close,
.ndo_start_xmit = ti_hecc_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops ti_hecc_ethtool_ops = {
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index 9dae0c71a2e1..cf65a90816b9 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -66,6 +66,7 @@ config CAN_GS_USB
config CAN_KVASER_USB
tristate "Kvaser CAN/USB interface"
+ select NET_DEVLINK
help
This driver adds support for Kvaser CAN/USB devices like Kvaser
Leaf Light, Kvaser USBcan II and Kvaser Memorator Pro 5xHS.
@@ -133,6 +134,17 @@ config CAN_MCBA_USB
This driver supports the CAN BUS Analyzer interface
from Microchip (http://www.microchip.com/development-tools/).
+config CAN_NCT6694
+ tristate "Nuvoton NCT6694 Socket CANfd support"
+ depends on MFD_NCT6694
+ select CAN_RX_OFFLOAD
+ help
+ If you say yes to this option, support will be included for Nuvoton
+ NCT6694, a USB device to socket CANfd controller.
+
+ This driver can also be built as a module. If so, the module will
+ be called nct6694_canfd.
+
config CAN_PEAK_USB
tristate "PEAK PCAN-USB/USB Pro interfaces for CAN 2.0b/CAN-FD"
help
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index 8b11088e9a59..fcafb1ac262e 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -11,5 +11,6 @@ obj-$(CONFIG_CAN_F81604) += f81604.o
obj-$(CONFIG_CAN_GS_USB) += gs_usb.o
obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb/
obj-$(CONFIG_CAN_MCBA_USB) += mcba_usb.o
+obj-$(CONFIG_CAN_NCT6694) += nct6694_canfd.o
obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
obj-$(CONFIG_CAN_UCAN) += ucan.o
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 050c0b49938a..de8e212a1366 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -335,15 +335,14 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
struct net_device_stats *stats = &dev->netdev->stats;
skb = alloc_can_err_skb(dev->netdev, &cf);
- if (skb == NULL)
- return;
if (msg->type == CPC_MSG_TYPE_CAN_STATE) {
u8 state = msg->msg.can_state;
if (state & SJA1000_SR_BS) {
dev->can.state = CAN_STATE_BUS_OFF;
- cf->can_id |= CAN_ERR_BUSOFF;
+ if (skb)
+ cf->can_id |= CAN_ERR_BUSOFF;
dev->can.can_stats.bus_off++;
can_bus_off(dev->netdev);
@@ -361,44 +360,53 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
/* bus error interrupt */
dev->can.can_stats.bus_error++;
- stats->rx_errors++;
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
- switch (ecc & SJA1000_ECC_MASK) {
- case SJA1000_ECC_BIT:
- cf->data[2] |= CAN_ERR_PROT_BIT;
- break;
- case SJA1000_ECC_FORM:
- cf->data[2] |= CAN_ERR_PROT_FORM;
- break;
- case SJA1000_ECC_STUFF:
- cf->data[2] |= CAN_ERR_PROT_STUFF;
- break;
- default:
- cf->data[3] = ecc & SJA1000_ECC_SEG;
- break;
+ switch (ecc & SJA1000_ECC_MASK) {
+ case SJA1000_ECC_BIT:
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ break;
+ case SJA1000_ECC_FORM:
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case SJA1000_ECC_STUFF:
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ default:
+ cf->data[3] = ecc & SJA1000_ECC_SEG;
+ break;
+ }
}
/* Error occurred during transmission? */
- if ((ecc & SJA1000_ECC_DIR) == 0)
- cf->data[2] |= CAN_ERR_PROT_TX;
+ if ((ecc & SJA1000_ECC_DIR) == 0) {
+ stats->tx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_TX;
+ } else {
+ stats->rx_errors++;
+ }
- if (dev->can.state == CAN_STATE_ERROR_WARNING ||
- dev->can.state == CAN_STATE_ERROR_PASSIVE) {
+ if (skb && (dev->can.state == CAN_STATE_ERROR_WARNING ||
+ dev->can.state == CAN_STATE_ERROR_PASSIVE)) {
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = (txerr > rxerr) ?
CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE;
}
} else if (msg->type == CPC_MSG_TYPE_OVERRUN) {
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ }
stats->rx_over_errors++;
stats->rx_errors++;
}
- netif_rx(skb);
+ if (skb)
+ netif_rx(skb);
}
/*
@@ -877,7 +885,6 @@ static const struct net_device_ops ems_usb_netdev_ops = {
.ndo_open = ems_usb_open,
.ndo_stop = ems_usb_close,
.ndo_start_xmit = ems_usb_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops ems_usb_ethtool_ops = {
diff --git a/drivers/net/can/usb/esd_usb.c b/drivers/net/can/usb/esd_usb.c
index 03ad10b01867..08da507faef4 100644
--- a/drivers/net/can/usb/esd_usb.c
+++ b/drivers/net/can/usb/esd_usb.c
@@ -9,6 +9,7 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
+#include <linux/err.h>
#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -274,6 +275,7 @@ struct esd_usb {
int net_count;
u32 version;
int rxinitdone;
+ int in_usb_disconnect;
void *rxbuf[ESD_USB_MAX_RX_URBS];
dma_addr_t rxbuf_dma[ESD_USB_MAX_RX_URBS];
};
@@ -480,7 +482,7 @@ static void esd_usb_tx_done_msg(struct esd_usb_net_priv *priv,
static void esd_usb_read_bulk_callback(struct urb *urb)
{
struct esd_usb *dev = urb->context;
- int retval;
+ int err;
int pos = 0;
int i;
@@ -496,7 +498,7 @@ static void esd_usb_read_bulk_callback(struct urb *urb)
default:
dev_info(dev->udev->dev.parent,
- "Rx URB aborted (%d)\n", urb->status);
+ "Rx URB aborted (%pe)\n", ERR_PTR(urb->status));
goto resubmit_urb;
}
@@ -539,15 +541,15 @@ resubmit_urb:
urb->transfer_buffer, ESD_USB_RX_BUFFER_SIZE,
esd_usb_read_bulk_callback, dev);
- retval = usb_submit_urb(urb, GFP_ATOMIC);
- if (retval == -ENODEV) {
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err == -ENODEV) {
for (i = 0; i < dev->net_count; i++) {
if (dev->nets[i])
netif_device_detach(dev->nets[i]->netdev);
}
- } else if (retval) {
+ } else if (err) {
dev_err(dev->udev->dev.parent,
- "failed resubmitting read bulk urb: %d\n", retval);
+ "failed resubmitting read bulk urb: %pe\n", ERR_PTR(err));
}
}
@@ -572,7 +574,7 @@ static void esd_usb_write_bulk_callback(struct urb *urb)
return;
if (urb->status)
- netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);
+ netdev_info(netdev, "Tx URB aborted (%pe)\n", ERR_PTR(urb->status));
netif_trans_update(netdev);
}
@@ -758,7 +760,7 @@ out:
if (err == -ENODEV)
netif_device_detach(netdev);
if (err)
- netdev_err(netdev, "couldn't start device: %d\n", err);
+ netdev_err(netdev, "couldn't start device: %pe\n", ERR_PTR(err));
kfree(msg);
return err;
@@ -800,7 +802,6 @@ static int esd_usb_open(struct net_device *netdev)
/* finally start device */
err = esd_usb_start(priv);
if (err) {
- netdev_warn(netdev, "couldn't start device: %d\n", err);
close_candev(netdev);
return err;
}
@@ -923,7 +924,7 @@ static netdev_tx_t esd_usb_start_xmit(struct sk_buff *skb,
if (err == -ENODEV)
netif_device_detach(netdev);
else
- netdev_warn(netdev, "failed tx_urb %d\n", err);
+ netdev_warn(netdev, "failed tx_urb %pe\n", ERR_PTR(err));
goto releasebuf;
}
@@ -947,10 +948,11 @@ nourbmem:
return ret;
}
-static int esd_usb_close(struct net_device *netdev)
+/* Stop interface */
+static int esd_usb_stop(struct esd_usb_net_priv *priv)
{
- struct esd_usb_net_priv *priv = netdev_priv(netdev);
union esd_usb_msg *msg;
+ int err;
int i;
msg = kmalloc(sizeof(*msg), GFP_KERNEL);
@@ -964,8 +966,11 @@ static int esd_usb_close(struct net_device *netdev)
msg->filter.option = ESD_USB_ID_ENABLE; /* start with segment 0 */
for (i = 0; i <= ESD_USB_MAX_ID_SEGMENT; i++)
msg->filter.mask[i] = 0;
- if (esd_usb_send_msg(priv->usb, msg) < 0)
- netdev_err(netdev, "sending idadd message failed\n");
+ err = esd_usb_send_msg(priv->usb, msg);
+ if (err < 0) {
+ netdev_err(priv->netdev, "sending idadd message failed: %pe\n", ERR_PTR(err));
+ goto bail;
+ }
/* set CAN controller to reset mode */
msg->hdr.len = sizeof(struct esd_usb_set_baudrate_msg) / sizeof(u32); /* # of 32bit words */
@@ -973,8 +978,25 @@ static int esd_usb_close(struct net_device *netdev)
msg->setbaud.net = priv->index;
msg->setbaud.rsvd = 0;
msg->setbaud.baud = cpu_to_le32(ESD_USB_NO_BAUDRATE);
- if (esd_usb_send_msg(priv->usb, msg) < 0)
- netdev_err(netdev, "sending setbaud message failed\n");
+ err = esd_usb_send_msg(priv->usb, msg);
+ if (err < 0)
+ netdev_err(priv->netdev, "sending setbaud message failed: %pe\n", ERR_PTR(err));
+
+bail:
+ kfree(msg);
+
+ return err;
+}
+
+static int esd_usb_close(struct net_device *netdev)
+{
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
+ int err = 0;
+
+ if (!priv->usb->in_usb_disconnect) {
+ /* It's moot to try this in usb_disconnect()! */
+ err = esd_usb_stop(priv);
+ }
priv->can.state = CAN_STATE_STOPPED;
@@ -982,16 +1004,13 @@ static int esd_usb_close(struct net_device *netdev)
close_candev(netdev);
- kfree(msg);
-
- return 0;
+ return err;
}
static const struct net_device_ops esd_usb_netdev_ops = {
.ndo_open = esd_usb_open,
.ndo_stop = esd_usb_close,
.ndo_start_xmit = esd_usb_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops esd_usb_ethtool_ops = {
@@ -1098,7 +1117,7 @@ static int esd_usb_3_set_bittiming(struct net_device *netdev)
const struct can_bittiming_const *data_btc = &esd_usb_3_data_bittiming_const;
struct esd_usb_net_priv *priv = netdev_priv(netdev);
struct can_bittiming *nom_bt = &priv->can.bittiming;
- struct can_bittiming *data_bt = &priv->can.data_bittiming;
+ struct can_bittiming *data_bt = &priv->can.fd.data_bittiming;
struct esd_usb_3_set_baudrate_msg_x *baud_x;
union esd_usb_msg *msg;
u16 flags = 0;
@@ -1218,9 +1237,9 @@ static int esd_usb_probe_one_net(struct usb_interface *intf, int index)
priv->can.clock.freq = ESD_USB_3_CAN_CLOCK;
priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
priv->can.bittiming_const = &esd_usb_3_nom_bittiming_const;
- priv->can.data_bittiming_const = &esd_usb_3_data_bittiming_const;
+ priv->can.fd.data_bittiming_const = &esd_usb_3_data_bittiming_const;
priv->can.do_set_bittiming = esd_usb_3_set_bittiming;
- priv->can.do_set_data_bittiming = esd_usb_3_set_bittiming;
+ priv->can.fd.do_set_data_bittiming = esd_usb_3_set_bittiming;
break;
case ESD_USB_CANUSBM_PRODUCT_ID:
@@ -1251,14 +1270,14 @@ static int esd_usb_probe_one_net(struct usb_interface *intf, int index)
err = register_candev(netdev);
if (err) {
- dev_err(&intf->dev, "couldn't register CAN device: %d\n", err);
+ dev_err(&intf->dev, "couldn't register CAN device: %pe\n", ERR_PTR(err));
free_candev(netdev);
err = -ENOMEM;
goto done;
}
dev->nets[index] = priv;
- netdev_info(netdev, "device %s registered\n", netdev->name);
+ netdev_info(netdev, "registered\n");
done:
return err;
@@ -1354,9 +1373,11 @@ static void esd_usb_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
if (dev) {
+ dev->in_usb_disconnect = 1;
for (i = 0; i < dev->net_count; i++) {
if (dev->nets[i]) {
netdev = dev->nets[i]->netdev;
+ netdev_info(netdev, "unregister\n");
unregister_netdev(netdev);
free_candev(netdev);
}
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
index 71f24dc0a927..f799233c2b72 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
@@ -7,7 +7,7 @@
*
* Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved.
* Copyright (c) 2020 ETAS K.K.. All rights reserved.
- * Copyright (c) 2020-2022 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ * Copyright (c) 2020-2025 Vincent Mailhol <mailhol@kernel.org>
*/
#include <linux/unaligned.h>
@@ -1976,7 +1976,8 @@ static const struct net_device_ops es58x_netdev_ops = {
.ndo_open = es58x_open,
.ndo_stop = es58x_stop,
.ndo_start_xmit = es58x_start_xmit,
- .ndo_eth_ioctl = can_eth_ioctl_hwts,
+ .ndo_hwtstamp_get = can_hwtstamp_get,
+ .ndo_hwtstamp_set = can_hwtstamp_set,
};
static const struct ethtool_ops es58x_ethtool_ops = {
@@ -2059,8 +2060,8 @@ static int es58x_init_priv(struct es58x_device *es58x_dev,
can->bittiming_const = param->bittiming_const;
if (param->ctrlmode_supported & CAN_CTRLMODE_FD) {
- can->data_bittiming_const = param->data_bittiming_const;
- can->tdc_const = param->tdc_const;
+ can->fd.data_bittiming_const = param->data_bittiming_const;
+ can->fd.tdc_const = param->tdc_const;
}
can->bitrate_max = param->bitrate_max;
can->clock = param->clock;
diff --git a/drivers/net/can/usb/etas_es58x/es58x_devlink.c b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
index eee20839d96f..0d155eb1b9e9 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_devlink.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
@@ -248,7 +248,11 @@ static int es58x_devlink_info_get(struct devlink *devlink,
return ret;
}
- return devlink_info_serial_number_put(req, es58x_dev->udev->serial);
+ if (es58x_dev->udev->serial)
+ ret = devlink_info_serial_number_put(req,
+ es58x_dev->udev->serial);
+
+ return ret;
}
const struct devlink_ops es58x_dl_ops = {
diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c
index 84ffa1839bac..6476add1c105 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_fd.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c
@@ -427,12 +427,12 @@ static int es58x_fd_enable_channel(struct es58x_priv *priv)
if (tx_conf_msg.canfd_enabled) {
es58x_fd_convert_bittiming(&tx_conf_msg.data_bittiming,
- &priv->can.data_bittiming);
+ &priv->can.fd.data_bittiming);
- if (can_tdc_is_enabled(&priv->can)) {
+ if (can_fd_tdc_is_enabled(&priv->can)) {
tx_conf_msg.tdc_enabled = 1;
- tx_conf_msg.tdco = cpu_to_le16(priv->can.tdc.tdco);
- tx_conf_msg.tdcf = cpu_to_le16(priv->can.tdc.tdcf);
+ tx_conf_msg.tdco = cpu_to_le16(priv->can.fd.tdc.tdco);
+ tx_conf_msg.tdcf = cpu_to_le16(priv->can.fd.tdc.tdcf);
}
conf_len = ES58X_FD_CANFD_CONF_LEN;
diff --git a/drivers/net/can/usb/f81604.c b/drivers/net/can/usb/f81604.c
index bc0c8903fe77..efe61ece79ea 100644
--- a/drivers/net/can/usb/f81604.c
+++ b/drivers/net/can/usb/f81604.c
@@ -526,7 +526,6 @@ static void f81604_handle_can_bus_errors(struct f81604_port_priv *priv,
netdev_dbg(netdev, "bus error interrupt\n");
priv->can.can_stats.bus_error++;
- stats->rx_errors++;
if (skb) {
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -548,10 +547,15 @@ static void f81604_handle_can_bus_errors(struct f81604_port_priv *priv,
/* set error location */
cf->data[3] = data->ecc & F81604_SJA1000_ECC_SEG;
+ }
- /* Error occurred during transmission? */
- if ((data->ecc & F81604_SJA1000_ECC_DIR) == 0)
+ /* Error occurred during transmission? */
+ if ((data->ecc & F81604_SJA1000_ECC_DIR) == 0) {
+ stats->tx_errors++;
+ if (skb)
cf->data[2] |= CAN_ERR_PROT_TX;
+ } else {
+ stats->rx_errors++;
}
set_bit(F81604_CLEAR_ECC, &priv->clear_flags);
@@ -1048,7 +1052,6 @@ static const struct net_device_ops f81604_netdev_ops = {
.ndo_open = f81604_open,
.ndo_stop = f81604_close,
.ndo_start_xmit = f81604_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct can_bittiming_const f81604_bittiming_const = {
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index bc86e9b329fd..e29e85b67fd4 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -43,8 +43,8 @@
#define USB_XYLANTA_SAINT3_VENDOR_ID 0x16d0
#define USB_XYLANTA_SAINT3_PRODUCT_ID 0x0f30
-#define GS_USB_ENDPOINT_IN 1
-#define GS_USB_ENDPOINT_OUT 2
+#define USB_CANNECTIVITY_VENDOR_ID 0x1209
+#define USB_CANNECTIVITY_PRODUCT_ID 0xca01
/* Timestamp 32 bit timer runs at 1 MHz (1 µs tick). Worker accounts
* for timer overflow (will be after ~71 minutes)
@@ -261,14 +261,21 @@ struct canfd_quirk {
u8 quirk;
} __packed;
+/* struct gs_host_frame::echo_id == GS_HOST_FRAME_ECHO_ID_RX indicates
+ * a regular RX'ed CAN frame
+ */
+#define GS_HOST_FRAME_ECHO_ID_RX 0xffffffff
+
struct gs_host_frame {
- u32 echo_id;
- __le32 can_id;
+ struct_group(header,
+ u32 echo_id;
+ __le32 can_id;
- u8 can_dlc;
- u8 channel;
- u8 flags;
- u8 reserved;
+ u8 can_dlc;
+ u8 channel;
+ u8 flags;
+ u8 reserved;
+ );
union {
DECLARE_FLEX_ARRAY(struct classic_can, classic_can);
@@ -289,11 +296,6 @@ struct gs_host_frame {
#define GS_MAX_RX_URBS 30
#define GS_NAPI_WEIGHT 32
-/* Maximum number of interfaces the driver supports per device.
- * Current hardware only supports 3 interfaces. The future may vary.
- */
-#define GS_MAX_INTF 3
-
struct gs_tx_context {
struct gs_can *dev;
unsigned int echo_id;
@@ -324,7 +326,6 @@ struct gs_can {
/* usb interface struct */
struct gs_usb {
- struct gs_can *canch[GS_MAX_INTF];
struct usb_anchor rx_submitted;
struct usb_device *udev;
@@ -336,6 +337,11 @@ struct gs_usb {
unsigned int hf_size_rx;
u8 active_channels;
+ u8 channel_cnt;
+
+ unsigned int pipe_in;
+ unsigned int pipe_out;
+ struct gs_can *canch[] __counted_by(channel_cnt);
};
/* 'allocate' a tx context.
@@ -417,7 +423,7 @@ static inline int gs_usb_get_timestamp(const struct gs_usb *parent,
return 0;
}
-static u64 gs_usb_timestamp_read(const struct cyclecounter *cc) __must_hold(&dev->tc_lock)
+static u64 gs_usb_timestamp_read(struct cyclecounter *cc) __must_hold(&dev->tc_lock)
{
struct gs_usb *parent = container_of(cc, struct gs_usb, cc);
u32 timestamp = 0;
@@ -569,6 +575,37 @@ gs_usb_get_echo_skb(struct gs_can *dev, struct sk_buff *skb,
return len;
}
+static unsigned int
+gs_usb_get_minimum_rx_length(const struct gs_can *dev, const struct gs_host_frame *hf,
+ unsigned int *data_length_p)
+{
+ unsigned int minimum_length, data_length = 0;
+
+ if (hf->flags & GS_CAN_FLAG_FD) {
+ if (hf->echo_id == GS_HOST_FRAME_ECHO_ID_RX)
+ data_length = can_fd_dlc2len(hf->can_dlc);
+
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ /* timestamp follows data field of max size */
+ minimum_length = struct_size(hf, canfd_ts, 1);
+ else
+ minimum_length = sizeof(hf->header) + data_length;
+ } else {
+ if (hf->echo_id == GS_HOST_FRAME_ECHO_ID_RX &&
+ !(hf->can_id & cpu_to_le32(CAN_RTR_FLAG)))
+ data_length = can_cc_dlc2len(hf->can_dlc);
+
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ /* timestamp follows data field of max size */
+ minimum_length = struct_size(hf, classic_can_ts, 1);
+ else
+ minimum_length = sizeof(hf->header) + data_length;
+ }
+
+ *data_length_p = data_length;
+ return minimum_length;
+}
+
static void gs_usb_receive_bulk_callback(struct urb *urb)
{
struct gs_usb *parent = urb->context;
@@ -577,6 +614,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
int rc;
struct net_device_stats *stats;
struct gs_host_frame *hf = urb->transfer_buffer;
+ unsigned int minimum_length, data_length;
struct gs_tx_context *txc;
struct can_frame *cf;
struct canfd_frame *cfd;
@@ -595,8 +633,17 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
return;
}
+ minimum_length = sizeof(hf->header);
+ if (urb->actual_length < minimum_length) {
+ dev_err_ratelimited(&parent->udev->dev,
+ "short read (actual_length=%u, minimum_length=%u)\n",
+ urb->actual_length, minimum_length);
+
+ goto resubmit_urb;
+ }
+
/* device reports out of range channel id */
- if (hf->channel >= GS_MAX_INTF)
+ if (hf->channel >= parent->channel_cnt)
goto device_detach;
dev = parent->canch[hf->channel];
@@ -610,20 +657,33 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
if (!netif_running(netdev))
goto resubmit_urb;
- if (hf->echo_id == -1) { /* normal rx */
+ minimum_length = gs_usb_get_minimum_rx_length(dev, hf, &data_length);
+ if (urb->actual_length < minimum_length) {
+ stats->rx_errors++;
+ stats->rx_length_errors++;
+
+ if (net_ratelimit())
+ netdev_err(netdev,
+ "short read (actual_length=%u, minimum_length=%u)\n",
+ urb->actual_length, minimum_length);
+
+ goto resubmit_urb;
+ }
+
+ if (hf->echo_id == GS_HOST_FRAME_ECHO_ID_RX) { /* normal rx */
if (hf->flags & GS_CAN_FLAG_FD) {
skb = alloc_canfd_skb(netdev, &cfd);
if (!skb)
return;
cfd->can_id = le32_to_cpu(hf->can_id);
- cfd->len = can_fd_dlc2len(hf->can_dlc);
+ cfd->len = data_length;
if (hf->flags & GS_CAN_FLAG_BRS)
cfd->flags |= CANFD_BRS;
if (hf->flags & GS_CAN_FLAG_ESI)
cfd->flags |= CANFD_ESI;
- memcpy(cfd->data, hf->canfd->data, cfd->len);
+ memcpy(cfd->data, hf->canfd->data, data_length);
} else {
skb = alloc_can_skb(netdev, &cf);
if (!skb)
@@ -632,7 +692,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
cf->can_id = le32_to_cpu(hf->can_id);
can_frame_set_cc_len(cf, hf->can_dlc, dev->can.ctrlmode);
- memcpy(cf->data, hf->classic_can->data, 8);
+ memcpy(cf->data, hf->classic_can->data, data_length);
/* ERROR frames tell us information about the controller */
if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG)
@@ -687,8 +747,8 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
resubmit_urb:
usb_fill_bulk_urb(urb, parent->udev,
- usb_rcvbulkpipe(parent->udev, GS_USB_ENDPOINT_IN),
- hf, dev->parent->hf_size_rx,
+ parent->pipe_in,
+ hf, parent->hf_size_rx,
gs_usb_receive_bulk_callback, parent);
rc = usb_submit_urb(urb, GFP_ATOMIC);
@@ -696,7 +756,7 @@ resubmit_urb:
/* USB failure take down all interfaces */
if (rc == -ENODEV) {
device_detach:
- for (rc = 0; rc < GS_MAX_INTF; rc++) {
+ for (rc = 0; rc < parent->channel_cnt; rc++) {
if (parent->canch[rc])
netif_device_detach(parent->canch[rc]->netdev);
}
@@ -725,7 +785,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
static int gs_usb_set_data_bittiming(struct net_device *netdev)
{
struct gs_can *dev = netdev_priv(netdev);
- struct can_bittiming *bt = &dev->can.data_bittiming;
+ struct can_bittiming *bt = &dev->can.fd.data_bittiming;
struct gs_device_bittiming dbt = {
.prop_seg = cpu_to_le32(bt->prop_seg),
.phase_seg1 = cpu_to_le32(bt->phase_seg1),
@@ -751,8 +811,21 @@ static void gs_usb_xmit_callback(struct urb *urb)
struct gs_can *dev = txc->dev;
struct net_device *netdev = dev->netdev;
- if (urb->status)
- netdev_info(netdev, "usb xmit fail %u\n", txc->echo_id);
+ if (!urb->status)
+ return;
+
+ if (urb->status != -ESHUTDOWN && net_ratelimit())
+ netdev_info(netdev, "failed to xmit URB %u: %pe\n",
+ txc->echo_id, ERR_PTR(urb->status));
+
+ netdev->stats.tx_dropped++;
+ netdev->stats.tx_errors++;
+
+ can_free_echo_skb(netdev, txc->echo_id, NULL);
+ gs_free_tx_context(txc);
+ atomic_dec(&dev->active_tx_urbs);
+
+ netif_wake_queue(netdev);
}
static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
@@ -819,7 +892,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
}
usb_fill_bulk_urb(urb, dev->udev,
- usb_sndbulkpipe(dev->udev, GS_USB_ENDPOINT_OUT),
+ dev->parent->pipe_out,
hf, dev->hf_size_tx,
gs_usb_xmit_callback, txc);
@@ -925,8 +998,7 @@ static int gs_can_open(struct net_device *netdev)
/* fill, anchor, and submit rx urb */
usb_fill_bulk_urb(urb,
dev->udev,
- usb_rcvbulkpipe(dev->udev,
- GS_USB_ENDPOINT_IN),
+ dev->parent->pipe_in,
buf,
dev->parent->hf_size_rx,
gs_usb_receive_bulk_callback, parent);
@@ -1089,12 +1161,25 @@ static int gs_can_close(struct net_device *netdev)
return 0;
}
-static int gs_can_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+static int gs_can_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg)
{
const struct gs_can *dev = netdev_priv(netdev);
if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
- return can_eth_ioctl_hwts(netdev, ifr, cmd);
+ return can_hwtstamp_get(netdev, cfg);
+
+ return -EOPNOTSUPP;
+}
+
+static int gs_can_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ const struct gs_can *dev = netdev_priv(netdev);
+
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ return can_hwtstamp_set(netdev, cfg, extack);
return -EOPNOTSUPP;
}
@@ -1103,8 +1188,8 @@ static const struct net_device_ops gs_usb_netdev_ops = {
.ndo_open = gs_can_open,
.ndo_stop = gs_can_close,
.ndo_start_xmit = gs_can_start_xmit,
- .ndo_change_mtu = can_change_mtu,
- .ndo_eth_ioctl = gs_can_eth_ioctl,
+ .ndo_hwtstamp_get = gs_can_hwtstamp_get,
+ .ndo_hwtstamp_set = gs_can_hwtstamp_set,
};
static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
@@ -1247,6 +1332,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
netdev->flags |= IFF_ECHO; /* we support full roundtrip echo */
netdev->dev_id = channel;
+ netdev->dev_port = channel;
/* dev setup */
strcpy(dev->bt_const.name, KBUILD_MODNAME);
@@ -1298,8 +1384,8 @@ static struct gs_can *gs_make_candev(unsigned int channel,
/* The data bit timing will be overwritten, if
* GS_CAN_FEATURE_BT_CONST_EXT is set.
*/
- dev->can.data_bittiming_const = &dev->bt_const;
- dev->can.do_set_data_bittiming = gs_usb_set_data_bittiming;
+ dev->can.fd.data_bittiming_const = &dev->bt_const;
+ dev->can.fd.do_set_data_bittiming = gs_usb_set_data_bittiming;
}
if (feature & GS_CAN_FEATURE_TERMINATION) {
@@ -1379,7 +1465,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
dev->data_bt_const.brp_max = le32_to_cpu(bt_const_extended.dbrp_max);
dev->data_bt_const.brp_inc = le32_to_cpu(bt_const_extended.dbrp_inc);
- dev->can.data_bittiming_const = &dev->data_bt_const;
+ dev->can.fd.data_bittiming_const = &dev->data_bt_const;
}
can_rx_offload_add_manual(netdev, &dev->offload, GS_NAPI_WEIGHT);
@@ -1413,6 +1499,7 @@ static int gs_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_endpoint_descriptor *ep_in, *ep_out;
struct gs_host_frame *hf;
struct gs_usb *parent;
struct gs_host_config hconf = {
@@ -1422,6 +1509,13 @@ static int gs_usb_probe(struct usb_interface *intf,
unsigned int icount, i;
int rc;
+ rc = usb_find_common_endpoints(intf->cur_altsetting,
+ &ep_in, &ep_out, NULL, NULL);
+ if (rc) {
+ dev_err(&intf->dev, "Required endpoints not found\n");
+ return rc;
+ }
+
/* send host config */
rc = usb_control_msg_send(udev, 0,
GS_USB_BREQ_HOST_FORMAT,
@@ -1450,22 +1544,28 @@ static int gs_usb_probe(struct usb_interface *intf,
icount = dconf.icount + 1;
dev_info(&intf->dev, "Configuring for %u interfaces\n", icount);
- if (icount > GS_MAX_INTF) {
+ if (icount > type_max(parent->channel_cnt)) {
dev_err(&intf->dev,
"Driver cannot handle more that %u CAN interfaces\n",
- GS_MAX_INTF);
+ type_max(parent->channel_cnt));
return -EINVAL;
}
- parent = kzalloc(sizeof(*parent), GFP_KERNEL);
+ parent = kzalloc(struct_size(parent, canch, icount), GFP_KERNEL);
if (!parent)
return -ENOMEM;
+ parent->channel_cnt = icount;
+
init_usb_anchor(&parent->rx_submitted);
usb_set_intfdata(intf, parent);
parent->udev = udev;
+ /* store the detected endpoints */
+ parent->pipe_in = usb_rcvbulkpipe(parent->udev, ep_in->bEndpointAddress);
+ parent->pipe_out = usb_sndbulkpipe(parent->udev, ep_out->bEndpointAddress);
+
for (i = 0; i < icount; i++) {
unsigned int hf_size_rx = 0;
@@ -1517,7 +1617,7 @@ static void gs_usb_disconnect(struct usb_interface *intf)
return;
}
- for (i = 0; i < GS_MAX_INTF; i++)
+ for (i = 0; i < parent->channel_cnt; i++)
if (parent->canch[i])
gs_destroy_candev(parent->canch[i]);
@@ -1535,6 +1635,8 @@ static const struct usb_device_id gs_usb_table[] = {
USB_ABE_CANDEBUGGER_FD_PRODUCT_ID, 0) },
{ USB_DEVICE_INTERFACE_NUMBER(USB_XYLANTA_SAINT3_VENDOR_ID,
USB_XYLANTA_SAINT3_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_CANNECTIVITY_VENDOR_ID,
+ USB_CANNECTIVITY_PRODUCT_ID, 0) },
{} /* Terminating entry */
};
diff --git a/drivers/net/can/usb/kvaser_usb/Makefile b/drivers/net/can/usb/kvaser_usb/Makefile
index cf260044f0b9..41b4a11555aa 100644
--- a/drivers/net/can/usb/kvaser_usb/Makefile
+++ b/drivers/net/can/usb/kvaser_usb/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
-kvaser_usb-y = kvaser_usb_core.o kvaser_usb_leaf.o kvaser_usb_hydra.o
+kvaser_usb-y = kvaser_usb_core.o kvaser_usb_devlink.o kvaser_usb_leaf.o kvaser_usb_hydra.o
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
index 078496d9b7ba..46a1b6907a50 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
@@ -27,6 +27,7 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/usb.h>
+#include <net/devlink.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -47,6 +48,10 @@
#define KVASER_USB_CAP_EXT_CAP 0x02
#define KVASER_USB_HYDRA_CAP_EXT_CMD 0x04
+#define KVASER_USB_SW_VERSION_MAJOR_MASK GENMASK(31, 24)
+#define KVASER_USB_SW_VERSION_MINOR_MASK GENMASK(23, 16)
+#define KVASER_USB_SW_VERSION_BUILD_MASK GENMASK(15, 0)
+
struct kvaser_usb_dev_cfg;
enum kvaser_usb_leaf_family {
@@ -54,6 +59,11 @@ enum kvaser_usb_leaf_family {
KVASER_USBCAN,
};
+enum kvaser_usb_led_state {
+ KVASER_USB_LED_ON = 0,
+ KVASER_USB_LED_OFF = 1,
+};
+
#define KVASER_USB_HYDRA_MAX_CMD_LEN 128
struct kvaser_usb_dev_card_data_hydra {
u8 channel_to_he[KVASER_USB_MAX_NET_DEVICES];
@@ -78,6 +88,12 @@ struct kvaser_usb_tx_urb_context {
u32 echo_index;
};
+struct kvaser_usb_fw_version {
+ u8 major;
+ u8 minor;
+ u16 build;
+};
+
struct kvaser_usb_busparams {
__le32 bitrate;
u8 tseg1;
@@ -96,12 +112,15 @@ struct kvaser_usb {
struct usb_endpoint_descriptor *bulk_in, *bulk_out;
struct usb_anchor rx_submitted;
+ u32 ean[2];
+ u32 serial_number;
+ struct kvaser_usb_fw_version fw_version;
+ u8 hw_revision;
+ unsigned int nchannels;
/* @max_tx_urbs: Firmware-reported maximum number of outstanding,
* not yet ACKed, transmissions on this device. This value is
* also used as a sentinel for marking free tx contexts.
*/
- u32 fw_version;
- unsigned int nchannels;
unsigned int max_tx_urbs;
struct kvaser_usb_dev_card_data card_data;
@@ -112,6 +131,7 @@ struct kvaser_usb {
struct kvaser_usb_net_priv {
struct can_priv can;
+ struct devlink_port devlink_port;
struct can_berr_counter bec;
/* subdriver-specific data */
@@ -137,7 +157,7 @@ struct kvaser_usb_net_priv {
* @dev_set_mode: used for can.do_set_mode
* @dev_set_bittiming: used for can.do_set_bittiming
* @dev_get_busparams: readback arbitration busparams
- * @dev_set_data_bittiming: used for can.do_set_data_bittiming
+ * @dev_set_data_bittiming: used for can.fd.do_set_data_bittiming
* @dev_get_data_busparams: readback data busparams
* @dev_get_berr_counter: used for can.do_get_berr_counter
*
@@ -149,6 +169,7 @@ struct kvaser_usb_net_priv {
* @dev_get_software_details: get software details
* @dev_get_card_info: get card info
* @dev_get_capabilities: discover device capabilities
+ * @dev_set_led: turn on/off device LED
*
* @dev_set_opt_mode: set ctrlmod
* @dev_start_chip: start the CAN controller
@@ -176,6 +197,9 @@ struct kvaser_usb_dev_ops {
int (*dev_get_software_details)(struct kvaser_usb *dev);
int (*dev_get_card_info)(struct kvaser_usb *dev);
int (*dev_get_capabilities)(struct kvaser_usb *dev);
+ int (*dev_set_led)(struct kvaser_usb_net_priv *priv,
+ enum kvaser_usb_led_state state,
+ u16 duration_ms);
int (*dev_set_opt_mode)(const struct kvaser_usb_net_priv *priv);
int (*dev_start_chip)(struct kvaser_usb_net_priv *priv);
int (*dev_stop_chip)(struct kvaser_usb_net_priv *priv);
@@ -204,6 +228,11 @@ struct kvaser_usb_dev_cfg {
extern const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops;
extern const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops;
+extern const struct devlink_ops kvaser_usb_devlink_ops;
+
+int kvaser_usb_devlink_port_register(struct kvaser_usb_net_priv *priv);
+void kvaser_usb_devlink_port_unregister(struct kvaser_usb_net_priv *priv);
+
void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv);
int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index 7d12776ab63e..62701ec34272 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -364,10 +364,13 @@ resubmit_urb:
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err == -ENODEV) {
for (i = 0; i < dev->nchannels; i++) {
- if (!dev->nets[i])
+ struct kvaser_usb_net_priv *priv;
+
+ priv = dev->nets[i];
+ if (!priv)
continue;
- netif_device_detach(dev->nets[i]->netdev);
+ netif_device_detach(priv->netdev);
}
} else if (err) {
dev_err(&dev->intf->dev,
@@ -592,7 +595,7 @@ static int kvaser_usb_set_data_bittiming(struct net_device *netdev)
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
struct kvaser_usb *dev = priv->dev;
const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
- struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
struct kvaser_usb_busparams busparams;
int tseg1 = dbt->prop_seg + dbt->phase_seg1;
int tseg2 = dbt->phase_seg2;
@@ -753,40 +756,70 @@ freeurb:
return ret;
}
+static int kvaser_usb_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ const struct kvaser_usb_dev_ops *ops = priv->dev->driver_info->ops;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 3; /* 3 On/Off cycles per second */
+
+ case ETHTOOL_ID_ON:
+ return ops->dev_set_led(priv, KVASER_USB_LED_ON, 1000);
+
+ case ETHTOOL_ID_OFF:
+ return ops->dev_set_led(priv, KVASER_USB_LED_OFF, 1000);
+
+ case ETHTOOL_ID_INACTIVE:
+ /* Turn LED off and restore standard function after 1ms */
+ return ops->dev_set_led(priv, KVASER_USB_LED_OFF, 1);
+
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops kvaser_usb_netdev_ops = {
.ndo_open = kvaser_usb_open,
.ndo_stop = kvaser_usb_close,
- .ndo_eth_ioctl = can_eth_ioctl_hwts,
.ndo_start_xmit = kvaser_usb_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+ .ndo_hwtstamp_get = can_hwtstamp_get,
+ .ndo_hwtstamp_set = can_hwtstamp_set,
};
static const struct ethtool_ops kvaser_usb_ethtool_ops = {
.get_ts_info = can_ethtool_op_get_ts_info_hwts,
+ .set_phys_id = kvaser_usb_set_phys_id,
};
static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
{
const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
int i;
+ struct kvaser_usb_net_priv *priv;
for (i = 0; i < dev->nchannels; i++) {
- if (!dev->nets[i])
+ priv = dev->nets[i];
+ if (!priv)
continue;
- unregister_candev(dev->nets[i]->netdev);
+ unregister_candev(priv->netdev);
}
kvaser_usb_unlink_all_urbs(dev);
for (i = 0; i < dev->nchannels; i++) {
- if (!dev->nets[i])
+ priv = dev->nets[i];
+ if (!priv)
continue;
if (ops->dev_remove_channel)
- ops->dev_remove_channel(dev->nets[i]);
+ ops->dev_remove_channel(priv);
- free_candev(dev->nets[i]->netdev);
+ kvaser_usb_devlink_port_unregister(priv);
+ free_candev(priv->netdev);
}
}
@@ -818,7 +851,8 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
init_completion(&priv->stop_comp);
init_completion(&priv->flush_comp);
init_completion(&priv->get_busparams_comp);
- priv->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC |
+ CAN_CTRLMODE_BERR_REPORTING;
priv->dev = dev;
priv->netdev = netdev;
@@ -841,8 +875,8 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
priv->can.ctrlmode_supported |= dev->card_data.ctrlmode_supported;
if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
- priv->can.data_bittiming_const = dev->cfg->data_bittiming_const;
- priv->can.do_set_data_bittiming = kvaser_usb_set_data_bittiming;
+ priv->can.fd.data_bittiming_const = dev->cfg->data_bittiming_const;
+ priv->can.fd.do_set_data_bittiming = kvaser_usb_set_data_bittiming;
}
netdev->flags |= IFF_ECHO;
@@ -851,26 +885,35 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
netdev->ethtool_ops = &kvaser_usb_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->intf->dev);
netdev->dev_id = channel;
+ netdev->dev_port = channel;
dev->nets[channel] = priv;
if (ops->dev_init_channel) {
err = ops->dev_init_channel(priv);
if (err)
- goto err;
+ goto candev_free;
+ }
+
+ err = kvaser_usb_devlink_port_register(priv);
+ if (err) {
+ dev_err(&dev->intf->dev, "Failed to register devlink port\n");
+ goto candev_free;
}
err = register_candev(netdev);
if (err) {
dev_err(&dev->intf->dev, "Failed to register CAN device\n");
- goto err;
+ goto unregister_devlink_port;
}
netdev_dbg(netdev, "device registered\n");
return 0;
-err:
+unregister_devlink_port:
+ kvaser_usb_devlink_port_unregister(priv);
+candev_free:
free_candev(netdev);
dev->nets[channel] = NULL;
return err;
@@ -880,6 +923,7 @@ static int kvaser_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct kvaser_usb *dev;
+ struct devlink *devlink;
int err;
int i;
const struct kvaser_usb_driver_info *driver_info;
@@ -889,17 +933,20 @@ static int kvaser_usb_probe(struct usb_interface *intf,
if (!driver_info)
return -ENODEV;
- dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
- if (!dev)
+ devlink = devlink_alloc(&kvaser_usb_devlink_ops, sizeof(*dev), &intf->dev);
+ if (!devlink)
return -ENOMEM;
+ dev = devlink_priv(devlink);
dev->intf = intf;
dev->driver_info = driver_info;
ops = driver_info->ops;
err = ops->dev_setup_endpoints(dev);
- if (err)
- return dev_err_probe(&intf->dev, err, "Cannot get usb endpoint(s)");
+ if (err) {
+ dev_err_probe(&intf->dev, err, "Cannot get usb endpoint(s)");
+ goto free_devlink;
+ }
dev->udev = interface_to_usbdev(intf);
@@ -910,55 +957,66 @@ static int kvaser_usb_probe(struct usb_interface *intf,
dev->card_data.ctrlmode_supported = 0;
dev->card_data.capabilities = 0;
err = ops->dev_init_card(dev);
- if (err)
- return dev_err_probe(&intf->dev, err,
- "Failed to initialize card\n");
+ if (err) {
+ dev_err_probe(&intf->dev, err,
+ "Failed to initialize card\n");
+ goto free_devlink;
+ }
err = ops->dev_get_software_info(dev);
- if (err)
- return dev_err_probe(&intf->dev, err,
- "Cannot get software info\n");
+ if (err) {
+ dev_err_probe(&intf->dev, err,
+ "Cannot get software info\n");
+ goto free_devlink;
+ }
if (ops->dev_get_software_details) {
err = ops->dev_get_software_details(dev);
- if (err)
- return dev_err_probe(&intf->dev, err,
- "Cannot get software details\n");
+ if (err) {
+ dev_err_probe(&intf->dev, err,
+ "Cannot get software details\n");
+ goto free_devlink;
+ }
}
- if (WARN_ON(!dev->cfg))
- return -ENODEV;
-
- dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
- ((dev->fw_version >> 24) & 0xff),
- ((dev->fw_version >> 16) & 0xff),
- (dev->fw_version & 0xffff));
+ if (WARN_ON(!dev->cfg)) {
+ err = -ENODEV;
+ goto free_devlink;
+ }
dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs);
err = ops->dev_get_card_info(dev);
- if (err)
- return dev_err_probe(&intf->dev, err,
- "Cannot get card info\n");
+ if (err) {
+ dev_err_probe(&intf->dev, err,
+ "Cannot get card info\n");
+ goto free_devlink;
+ }
if (ops->dev_get_capabilities) {
err = ops->dev_get_capabilities(dev);
if (err) {
- kvaser_usb_remove_interfaces(dev);
- return dev_err_probe(&intf->dev, err,
- "Cannot get capabilities\n");
+ dev_err_probe(&intf->dev, err,
+ "Cannot get capabilities\n");
+ goto remove_interfaces;
}
}
for (i = 0; i < dev->nchannels; i++) {
err = kvaser_usb_init_one(dev, i);
- if (err) {
- kvaser_usb_remove_interfaces(dev);
- return err;
- }
+ if (err)
+ goto remove_interfaces;
}
+ devlink_register(devlink);
return 0;
+
+remove_interfaces:
+ kvaser_usb_remove_interfaces(dev);
+free_devlink:
+ devlink_free(devlink);
+
+ return err;
}
static void kvaser_usb_disconnect(struct usb_interface *intf)
@@ -971,6 +1029,8 @@ static void kvaser_usb_disconnect(struct usb_interface *intf)
return;
kvaser_usb_remove_interfaces(dev);
+ devlink_unregister(priv_to_devlink(dev));
+ devlink_free(priv_to_devlink(dev));
}
static struct usb_driver kvaser_usb_driver = {
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_devlink.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_devlink.c
new file mode 100644
index 000000000000..e838b82298ae
--- /dev/null
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_devlink.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+/* kvaser_usb devlink functions
+ *
+ * Copyright (C) 2025 KVASER AB, Sweden. All rights reserved.
+ */
+#include "kvaser_usb.h"
+
+#include <linux/netdevice.h>
+#include <net/devlink.h>
+
+#define KVASER_USB_EAN_MSB 0x00073301
+
+static int kvaser_usb_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct kvaser_usb *dev = devlink_priv(devlink);
+ char buf[] = "73301XXXXXXXXXX";
+ int ret;
+
+ if (dev->serial_number) {
+ snprintf(buf, sizeof(buf), "%u", dev->serial_number);
+ ret = devlink_info_serial_number_put(req, buf);
+ if (ret)
+ return ret;
+ }
+
+ if (dev->fw_version.major) {
+ snprintf(buf, sizeof(buf), "%u.%u.%u",
+ dev->fw_version.major,
+ dev->fw_version.minor,
+ dev->fw_version.build);
+ ret = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
+ if (ret)
+ return ret;
+ }
+
+ if (dev->hw_revision) {
+ snprintf(buf, sizeof(buf), "%u", dev->hw_revision);
+ ret = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_REV,
+ buf);
+ if (ret)
+ return ret;
+ }
+
+ if (dev->ean[1] == KVASER_USB_EAN_MSB) {
+ snprintf(buf, sizeof(buf), "%x%08x", dev->ean[1], dev->ean[0]);
+ ret = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_ID,
+ buf);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+const struct devlink_ops kvaser_usb_devlink_ops = {
+ .info_get = kvaser_usb_devlink_info_get,
+};
+
+int kvaser_usb_devlink_port_register(struct kvaser_usb_net_priv *priv)
+{
+ int ret;
+ struct devlink_port_attrs attrs = {
+ .flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ .phys.port_number = priv->channel,
+ };
+ devlink_port_attrs_set(&priv->devlink_port, &attrs);
+
+ ret = devlink_port_register(priv_to_devlink(priv->dev),
+ &priv->devlink_port, priv->channel);
+ if (ret)
+ return ret;
+
+ SET_NETDEV_DEVLINK_PORT(priv->netdev, &priv->devlink_port);
+
+ return 0;
+}
+
+void kvaser_usb_devlink_port_unregister(struct kvaser_usb_net_priv *priv)
+{
+ devlink_port_unregister(&priv->devlink_port);
+}
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index 3764b263add3..a59f20dad692 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -12,6 +12,7 @@
* distinguish between ERROR_WARNING and ERROR_ACTIVE.
*/
+#include <linux/bitfield.h>
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/gfp.h>
@@ -67,6 +68,8 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt;
#define CMD_SET_BUSPARAMS_RESP 85
#define CMD_GET_CAPABILITIES_REQ 95
#define CMD_GET_CAPABILITIES_RESP 96
+#define CMD_LED_ACTION_REQ 101
+#define CMD_LED_ACTION_RESP 102
#define CMD_RX_MESSAGE 106
#define CMD_MAP_CHANNEL_REQ 200
#define CMD_MAP_CHANNEL_RESP 201
@@ -111,7 +114,7 @@ struct kvaser_cmd_card_info {
__le32 clock_res;
__le32 mfg_date;
__le32 ean[2];
- u8 hw_version;
+ u8 hw_revision;
u8 usb_mode;
u8 hw_type;
u8 reserved0;
@@ -217,6 +220,22 @@ struct kvaser_cmd_get_busparams_res {
u8 reserved[20];
} __packed;
+/* The device has two LEDs per CAN channel
+ * The LSB of action field controls the state:
+ * 0 = ON
+ * 1 = OFF
+ * The remaining bits of action field is the LED index
+ */
+#define KVASER_USB_HYDRA_LED_IDX_MASK GENMASK(31, 1)
+#define KVASER_USB_HYDRA_LED_YELLOW_CH0_IDX 3
+#define KVASER_USB_HYDRA_LEDS_PER_CHANNEL 2
+struct kvaser_cmd_led_action_req {
+ u8 action;
+ u8 padding;
+ __le16 duration_ms;
+ u8 reserved[24];
+} __packed;
+
/* Ctrl modes */
#define KVASER_USB_HYDRA_CTRLMODE_NORMAL 0x01
#define KVASER_USB_HYDRA_CTRLMODE_LISTEN 0x02
@@ -299,6 +318,8 @@ struct kvaser_cmd {
struct kvaser_cmd_get_busparams_req get_busparams_req;
struct kvaser_cmd_get_busparams_res get_busparams_res;
+ struct kvaser_cmd_led_action_req led_action_req;
+
struct kvaser_cmd_chip_state_event chip_state_event;
struct kvaser_cmd_set_ctrlmode set_ctrlmode;
@@ -926,6 +947,42 @@ kvaser_usb_hydra_bus_status_to_can_state(const struct kvaser_usb_net_priv *priv,
}
}
+static void kvaser_usb_hydra_change_state(struct kvaser_usb_net_priv *priv,
+ const struct can_berr_counter *bec,
+ struct can_frame *cf,
+ enum can_state new_state)
+{
+ struct net_device *netdev = priv->netdev;
+ enum can_state old_state = priv->can.state;
+ enum can_state tx_state, rx_state;
+
+ tx_state = (bec->txerr >= bec->rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+ rx_state = (bec->txerr <= bec->rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+ can_change_state(netdev, cf, tx_state, rx_state);
+
+ if (new_state == CAN_STATE_BUS_OFF && old_state < CAN_STATE_BUS_OFF) {
+ if (priv->can.restart_ms == 0)
+ kvaser_usb_hydra_send_simple_cmd_async(priv, CMD_STOP_CHIP_REQ);
+
+ can_bus_off(netdev);
+ }
+
+ if (priv->can.restart_ms &&
+ old_state >= CAN_STATE_BUS_OFF &&
+ new_state < CAN_STATE_BUS_OFF) {
+ priv->can.can_stats.restarts++;
+ if (cf)
+ cf->can_id |= CAN_ERR_RESTARTED;
+ }
+ if (cf && new_state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec->txerr;
+ cf->data[7] = bec->rxerr;
+ }
+}
+
static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv,
u8 bus_status,
const struct can_berr_counter *bec)
@@ -951,41 +1008,11 @@ static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv,
return;
skb = alloc_can_err_skb(netdev, &cf);
- if (skb) {
- enum can_state tx_state, rx_state;
-
- tx_state = (bec->txerr >= bec->rxerr) ?
- new_state : CAN_STATE_ERROR_ACTIVE;
- rx_state = (bec->txerr <= bec->rxerr) ?
- new_state : CAN_STATE_ERROR_ACTIVE;
- can_change_state(netdev, cf, tx_state, rx_state);
- }
-
- if (new_state == CAN_STATE_BUS_OFF && old_state < CAN_STATE_BUS_OFF) {
- if (!priv->can.restart_ms)
- kvaser_usb_hydra_send_simple_cmd_async
- (priv, CMD_STOP_CHIP_REQ);
-
- can_bus_off(netdev);
- }
-
- if (!skb) {
+ kvaser_usb_hydra_change_state(priv, bec, cf, new_state);
+ if (skb)
+ netif_rx(skb);
+ else
netdev_warn(netdev, "No memory left for err_skb\n");
- return;
- }
-
- if (priv->can.restart_ms &&
- old_state >= CAN_STATE_BUS_OFF &&
- new_state < CAN_STATE_BUS_OFF)
- priv->can.can_stats.restarts++;
-
- if (new_state != CAN_STATE_BUS_OFF) {
- cf->can_id |= CAN_ERR_CNT;
- cf->data[6] = bec->txerr;
- cf->data[7] = bec->rxerr;
- }
-
- netif_rx(skb);
}
static void kvaser_usb_hydra_state_event(const struct kvaser_usb *dev,
@@ -1078,9 +1105,8 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
{
struct net_device *netdev = priv->netdev;
struct net_device_stats *stats = &netdev->stats;
- struct can_frame *cf;
- struct sk_buff *skb;
- struct skb_shared_hwtstamps *shhwtstamps;
+ struct can_frame *cf = NULL;
+ struct sk_buff *skb = NULL;
struct can_berr_counter bec;
enum can_state new_state, old_state;
u8 bus_status;
@@ -1096,52 +1122,26 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
kvaser_usb_hydra_bus_status_to_can_state(priv, bus_status, &bec,
&new_state);
- skb = alloc_can_err_skb(netdev, &cf);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ skb = alloc_can_err_skb(netdev, &cf);
+ if (new_state != old_state)
+ kvaser_usb_hydra_change_state(priv, &bec, cf, new_state);
- if (new_state != old_state) {
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
if (skb) {
- enum can_state tx_state, rx_state;
-
- tx_state = (bec.txerr >= bec.rxerr) ?
- new_state : CAN_STATE_ERROR_ACTIVE;
- rx_state = (bec.txerr <= bec.rxerr) ?
- new_state : CAN_STATE_ERROR_ACTIVE;
-
- can_change_state(netdev, cf, tx_state, rx_state);
-
- if (priv->can.restart_ms &&
- old_state >= CAN_STATE_BUS_OFF &&
- new_state < CAN_STATE_BUS_OFF)
- cf->can_id |= CAN_ERR_RESTARTED;
- }
-
- if (new_state == CAN_STATE_BUS_OFF) {
- if (!priv->can.restart_ms)
- kvaser_usb_hydra_send_simple_cmd_async
- (priv, CMD_STOP_CHIP_REQ);
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
- can_bus_off(netdev);
+ shhwtstamps->hwtstamp = hwtstamp;
+ cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ netif_rx(skb);
+ } else {
+ stats->rx_dropped++;
+ netdev_warn(netdev, "No memory left for err_skb\n");
}
}
- if (!skb) {
- stats->rx_dropped++;
- netdev_warn(netdev, "No memory left for err_skb\n");
- return;
- }
-
- shhwtstamps = skb_hwtstamps(skb);
- shhwtstamps->hwtstamp = hwtstamp;
-
- cf->can_id |= CAN_ERR_BUSERROR;
- if (new_state != CAN_STATE_BUS_OFF) {
- cf->can_id |= CAN_ERR_CNT;
- cf->data[6] = bec.txerr;
- cf->data[7] = bec.rxerr;
- }
-
- netif_rx(skb);
-
priv->bec.txerr = bec.txerr;
priv->bec.rxerr = bec.rxerr;
}
@@ -1411,6 +1411,7 @@ static void kvaser_usb_hydra_handle_cmd_std(const struct kvaser_usb *dev,
/* Ignored commands */
case CMD_SET_BUSPARAMS_RESP:
case CMD_SET_BUSPARAMS_FD_RESP:
+ case CMD_LED_ACTION_RESP:
break;
default:
@@ -1838,6 +1839,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
size_t cmd_len;
int err;
u32 flags;
+ u32 fw_version;
struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -1862,7 +1864,10 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
if (err)
goto end;
- dev->fw_version = le32_to_cpu(cmd->sw_detail_res.sw_version);
+ fw_version = le32_to_cpu(cmd->sw_detail_res.sw_version);
+ dev->fw_version.major = FIELD_GET(KVASER_USB_SW_VERSION_MAJOR_MASK, fw_version);
+ dev->fw_version.minor = FIELD_GET(KVASER_USB_SW_VERSION_MINOR_MASK, fw_version);
+ dev->fw_version.build = FIELD_GET(KVASER_USB_SW_VERSION_BUILD_MASK, fw_version);
flags = le32_to_cpu(cmd->sw_detail_res.sw_flags);
if (flags & KVASER_USB_HYDRA_SW_FLAG_FW_BAD) {
@@ -1913,6 +1918,10 @@ static int kvaser_usb_hydra_get_card_info(struct kvaser_usb *dev)
err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_CARD_INFO_RESP, &cmd);
if (err)
return err;
+ dev->ean[1] = le32_to_cpu(cmd.card_info.ean[1]);
+ dev->ean[0] = le32_to_cpu(cmd.card_info.ean[0]);
+ dev->serial_number = le32_to_cpu(cmd.card_info.serial_number);
+ dev->hw_revision = cmd.card_info.hw_revision;
dev->nchannels = cmd.card_info.nchannels;
if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES)
@@ -1967,6 +1976,36 @@ static int kvaser_usb_hydra_get_capabilities(struct kvaser_usb *dev)
return 0;
}
+static int kvaser_usb_hydra_set_led(struct kvaser_usb_net_priv *priv,
+ enum kvaser_usb_led_state state,
+ u16 duration_ms)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_cmd *cmd;
+ size_t cmd_len;
+ int ret;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_LED_ACTION_REQ;
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+ kvaser_usb_hydra_set_cmd_dest_he(cmd, dev->card_data.hydra.sysdbg_he);
+ kvaser_usb_hydra_set_cmd_transid(cmd, kvaser_usb_hydra_get_next_transid(dev));
+
+ cmd->led_action_req.duration_ms = cpu_to_le16(duration_ms);
+ cmd->led_action_req.action = state |
+ FIELD_PREP(KVASER_USB_HYDRA_LED_IDX_MASK,
+ KVASER_USB_HYDRA_LED_YELLOW_CH0_IDX +
+ KVASER_USB_HYDRA_LEDS_PER_CHANNEL * priv->channel);
+
+ ret = kvaser_usb_send_cmd(dev, cmd, cmd_len);
+ kfree(cmd);
+
+ return ret;
+}
+
static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
{
struct kvaser_usb *dev = priv->dev;
@@ -2170,6 +2209,7 @@ const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = {
.dev_get_software_details = kvaser_usb_hydra_get_software_details,
.dev_get_card_info = kvaser_usb_hydra_get_card_info,
.dev_get_capabilities = kvaser_usb_hydra_get_capabilities,
+ .dev_set_led = kvaser_usb_hydra_set_led,
.dev_set_opt_mode = kvaser_usb_hydra_set_opt_mode,
.dev_start_chip = kvaser_usb_hydra_start_chip,
.dev_stop_chip = kvaser_usb_hydra_stop_chip,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index 6b9122ab1464..1167d38344f1 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -10,6 +10,7 @@
* Copyright (C) 2015 Valeo S.A.
*/
+#include <linux/bitfield.h>
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/gfp.h>
@@ -81,6 +82,8 @@
#define CMD_FLUSH_QUEUE_REPLY 68
#define CMD_GET_CAPABILITIES_REQ 95
#define CMD_GET_CAPABILITIES_RESP 96
+#define CMD_LED_ACTION_REQ 101
+#define CMD_LED_ACTION_RESP 102
#define CMD_LEAF_LOG_MESSAGE 106
@@ -135,7 +138,7 @@ struct kvaser_cmd_cardinfo {
__le32 padding0;
__le32 clock_resolution;
__le32 mfgdate;
- u8 ean[8];
+ __le32 ean[2];
u8 hw_revision;
union {
struct {
@@ -173,6 +176,21 @@ struct kvaser_cmd_busparams {
struct kvaser_usb_busparams busparams;
} __packed;
+/* The device has one LED per CAN channel
+ * The LSB of action field controls the state:
+ * 0 = ON
+ * 1 = OFF
+ * The remaining bits of action field is the LED index
+ */
+#define KVASER_USB_LEAF_LED_IDX_MASK GENMASK(31, 1)
+#define KVASER_USB_LEAF_LED_YELLOW_CH0_IDX 2
+struct kvaser_cmd_led_action_req {
+ u8 tid;
+ u8 action;
+ __le16 duration_ms;
+ u8 padding[24];
+} __packed;
+
struct kvaser_cmd_tx_can {
u8 channel;
u8 tid;
@@ -359,6 +377,8 @@ struct kvaser_cmd {
struct kvaser_cmd_cardinfo cardinfo;
struct kvaser_cmd_busparams busparams;
+ struct kvaser_cmd_led_action_req led_action_req;
+
struct kvaser_cmd_rx_can_header rx_can_header;
struct kvaser_cmd_tx_acknowledge_header tx_acknowledge_header;
@@ -409,6 +429,7 @@ static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = {
[CMD_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event),
/* ignored events: */
[CMD_FLUSH_QUEUE_REPLY] = CMD_SIZE_ANY,
+ [CMD_LED_ACTION_RESP] = CMD_SIZE_ANY,
};
static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = {
@@ -423,6 +444,8 @@ static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = {
[CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.can_error_event),
[CMD_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event),
[CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = kvaser_fsize(u.usbcan.clk_overflow_event),
+ /* ignored events: */
+ [CMD_LED_ACTION_RESP] = CMD_SIZE_ANY,
};
/* Summary of a kvaser error event, for a unified Leaf/Usbcan error
@@ -662,7 +685,7 @@ static int kvaser_usb_leaf_wait_cmd(const struct kvaser_usb *dev, u8 id,
* for further details.
*/
if (tmp->len == 0) {
- pos = round_up(pos,
+ pos = round_up(pos + 1,
le16_to_cpu
(dev->bulk_in->wMaxPacketSize));
continue;
@@ -718,9 +741,13 @@ static int kvaser_usb_leaf_send_simple_cmd(const struct kvaser_usb *dev,
static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev,
const struct leaf_cmd_softinfo *softinfo)
{
+ u32 fw_version;
u32 sw_options = le32_to_cpu(softinfo->sw_options);
- dev->fw_version = le32_to_cpu(softinfo->fw_version);
+ fw_version = le32_to_cpu(softinfo->fw_version);
+ dev->fw_version.major = FIELD_GET(KVASER_USB_SW_VERSION_MAJOR_MASK, fw_version);
+ dev->fw_version.minor = FIELD_GET(KVASER_USB_SW_VERSION_MINOR_MASK, fw_version);
+ dev->fw_version.build = FIELD_GET(KVASER_USB_SW_VERSION_BUILD_MASK, fw_version);
dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx);
if (sw_options & KVASER_USB_LEAF_SWOPTION_EXT_CAP)
@@ -761,6 +788,7 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
{
struct kvaser_cmd cmd;
int err;
+ u32 fw_version;
err = kvaser_usb_leaf_send_simple_cmd(dev, CMD_GET_SOFTWARE_INFO, 0);
if (err)
@@ -775,7 +803,13 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
kvaser_usb_leaf_get_software_info_leaf(dev, &cmd.u.leaf.softinfo);
break;
case KVASER_USBCAN:
- dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version);
+ fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version);
+ dev->fw_version.major = FIELD_GET(KVASER_USB_SW_VERSION_MAJOR_MASK,
+ fw_version);
+ dev->fw_version.minor = FIELD_GET(KVASER_USB_SW_VERSION_MINOR_MASK,
+ fw_version);
+ dev->fw_version.build = FIELD_GET(KVASER_USB_SW_VERSION_BUILD_MASK,
+ fw_version);
dev->max_tx_urbs =
le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx);
dev->cfg = &kvaser_usb_leaf_usbcan_dev_cfg;
@@ -820,6 +854,10 @@ static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev)
(dev->driver_info->family == KVASER_USBCAN &&
dev->nchannels > MAX_USBCAN_NET_DEVICES))
return -EINVAL;
+ dev->ean[1] = le32_to_cpu(cmd.u.cardinfo.ean[1]);
+ dev->ean[0] = le32_to_cpu(cmd.u.cardinfo.ean[0]);
+ dev->serial_number = le32_to_cpu(cmd.u.cardinfo.serial_number);
+ dev->hw_revision = cmd.u.cardinfo.hw_revision;
return 0;
}
@@ -924,6 +962,34 @@ static int kvaser_usb_leaf_get_capabilities_leaf(struct kvaser_usb *dev)
return 0;
}
+static int kvaser_usb_leaf_set_led(struct kvaser_usb_net_priv *priv,
+ enum kvaser_usb_led_state state,
+ u16 duration_ms)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_cmd *cmd;
+ int ret;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->id = CMD_LED_ACTION_REQ;
+ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_led_action_req);
+ cmd->u.led_action_req.tid = 0xff;
+
+ cmd->u.led_action_req.duration_ms = cpu_to_le16(duration_ms);
+ cmd->u.led_action_req.action = state |
+ FIELD_PREP(KVASER_USB_LEAF_LED_IDX_MASK,
+ KVASER_USB_LEAF_LED_YELLOW_CH0_IDX +
+ priv->channel);
+
+ ret = kvaser_usb_send_cmd(dev, cmd, cmd->len);
+ kfree(cmd);
+
+ return ret;
+}
+
static int kvaser_usb_leaf_get_capabilities(struct kvaser_usb *dev)
{
int err = 0;
@@ -1120,10 +1186,8 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
const struct kvaser_usb_err_summary *es)
{
- struct can_frame *cf;
- struct can_frame tmp_cf = { .can_id = CAN_ERR_FLAG,
- .len = CAN_ERR_DLC };
- struct sk_buff *skb;
+ struct can_frame *cf = NULL;
+ struct sk_buff *skb = NULL;
struct net_device_stats *stats;
struct kvaser_usb_net_priv *priv;
struct kvaser_usb_net_leaf_priv *leaf;
@@ -1143,18 +1207,10 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
if (!netif_running(priv->netdev))
return;
- /* Update all of the CAN interface's state and error counters before
- * trying any memory allocation that can actually fail with -ENOMEM.
- *
- * We send a temporary stack-allocated error CAN frame to
- * can_change_state() for the very same reason.
- *
- * TODO: Split can_change_state() responsibility between updating the
- * CAN interface's state and counters, and the setting up of CAN error
- * frame ID and data to userspace. Remove stack allocation afterwards.
- */
old_state = priv->can.state;
- kvaser_usb_leaf_rx_error_update_can_state(priv, es, &tmp_cf);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ skb = alloc_can_err_skb(priv->netdev, &cf);
+ kvaser_usb_leaf_rx_error_update_can_state(priv, es, cf);
new_state = priv->can.state;
/* If there are errors, request status updates periodically as we do
@@ -1168,13 +1224,6 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
schedule_delayed_work(&leaf->chip_state_req_work,
msecs_to_jiffies(500));
- skb = alloc_can_err_skb(priv->netdev, &cf);
- if (!skb) {
- stats->rx_dropped++;
- return;
- }
- memcpy(cf, &tmp_cf, sizeof(*cf));
-
if (new_state != old_state) {
if (es->status &
(M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
@@ -1187,11 +1236,20 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
if (priv->can.restart_ms &&
old_state == CAN_STATE_BUS_OFF &&
new_state < CAN_STATE_BUS_OFF) {
- cf->can_id |= CAN_ERR_RESTARTED;
+ if (cf)
+ cf->can_id |= CAN_ERR_RESTARTED;
netif_carrier_on(priv->netdev);
}
}
+ if (!skb) {
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ stats->rx_dropped++;
+ netdev_warn(priv->netdev, "No memory left for err_skb\n");
+ }
+ return;
+ }
+
switch (dev->driver_info->family) {
case KVASER_LEAF:
if (es->leaf.error_factor) {
@@ -1646,6 +1704,8 @@ static void kvaser_usb_leaf_handle_command(struct kvaser_usb *dev,
if (dev->driver_info->family != KVASER_LEAF)
goto warn;
break;
+ case CMD_LED_ACTION_RESP:
+ break;
default:
warn: dev_warn(&dev->intf->dev, "Unhandled command (%d)\n", cmd->id);
@@ -1672,7 +1732,7 @@ static void kvaser_usb_leaf_read_bulk_callback(struct kvaser_usb *dev,
* number of events in case of a heavy rx load on the bus.
*/
if (cmd->len == 0) {
- pos = round_up(pos, le16_to_cpu
+ pos = round_up(pos + 1, le16_to_cpu
(dev->bulk_in->wMaxPacketSize));
continue;
}
@@ -1935,6 +1995,7 @@ const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = {
.dev_get_software_details = NULL,
.dev_get_card_info = kvaser_usb_leaf_get_card_info,
.dev_get_capabilities = kvaser_usb_leaf_get_capabilities,
+ .dev_set_led = kvaser_usb_leaf_set_led,
.dev_set_opt_mode = kvaser_usb_leaf_set_opt_mode,
.dev_start_chip = kvaser_usb_leaf_start_chip,
.dev_stop_chip = kvaser_usb_leaf_stop_chip,
diff --git a/drivers/net/can/usb/nct6694_canfd.c b/drivers/net/can/usb/nct6694_canfd.c
new file mode 100644
index 000000000000..dd6df2ec3742
--- /dev/null
+++ b/drivers/net/can/usb/nct6694_canfd.c
@@ -0,0 +1,831 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Nuvoton NCT6694 Socket CANfd driver based on USB interface.
+ *
+ * Copyright (C) 2025 Nuvoton Technology Corp.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/can/dev.h>
+#include <linux/can/rx-offload.h>
+#include <linux/ethtool.h>
+#include <linux/idr.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/mfd/nct6694.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+
+#define DEVICE_NAME "nct6694-canfd"
+
+/* USB command module type for NCT6694 CANfd controller.
+ * This defines the module type used for communication with the NCT6694
+ * CANfd controller over the USB interface.
+ */
+#define NCT6694_CANFD_MOD 0x05
+
+/* Command 00h - CAN Setting and Initialization */
+#define NCT6694_CANFD_SETTING 0x00
+#define NCT6694_CANFD_SETTING_ACTIVE_CTRL1 BIT(0)
+#define NCT6694_CANFD_SETTING_ACTIVE_CTRL2 BIT(1)
+#define NCT6694_CANFD_SETTING_ACTIVE_NBTP_DBTP BIT(2)
+#define NCT6694_CANFD_SETTING_CTRL1_MON BIT(0)
+#define NCT6694_CANFD_SETTING_CTRL1_NISO BIT(1)
+#define NCT6694_CANFD_SETTING_CTRL1_LBCK BIT(2)
+#define NCT6694_CANFD_SETTING_NBTP_NTSEG2 GENMASK(6, 0)
+#define NCT6694_CANFD_SETTING_NBTP_NTSEG1 GENMASK(15, 8)
+#define NCT6694_CANFD_SETTING_NBTP_NBRP GENMASK(24, 16)
+#define NCT6694_CANFD_SETTING_NBTP_NSJW GENMASK(31, 25)
+#define NCT6694_CANFD_SETTING_DBTP_DSJW GENMASK(3, 0)
+#define NCT6694_CANFD_SETTING_DBTP_DTSEG2 GENMASK(7, 4)
+#define NCT6694_CANFD_SETTING_DBTP_DTSEG1 GENMASK(12, 8)
+#define NCT6694_CANFD_SETTING_DBTP_DBRP GENMASK(20, 16)
+#define NCT6694_CANFD_SETTING_DBTP_TDC BIT(23)
+
+/* Command 01h - CAN Information */
+#define NCT6694_CANFD_INFORMATION 0x01
+#define NCT6694_CANFD_INFORMATION_SEL 0x00
+
+/* Command 02h - CAN Event */
+#define NCT6694_CANFD_EVENT 0x02
+#define NCT6694_CANFD_EVENT_SEL(idx, mask) \
+ ((idx ? 0x80 : 0x00) | ((mask) & 0x7F))
+
+#define NCT6694_CANFD_EVENT_MASK GENMASK(5, 0)
+#define NCT6694_CANFD_EVT_TX_FIFO_EMPTY BIT(7) /* Read-clear */
+#define NCT6694_CANFD_EVT_RX_DATA_LOST BIT(5) /* Read-clear */
+#define NCT6694_CANFD_EVT_RX_DATA_IN BIT(7) /* Read-clear */
+
+/* Command 10h - CAN Deliver */
+#define NCT6694_CANFD_DELIVER 0x10
+#define NCT6694_CANFD_DELIVER_SEL(buf_cnt) \
+ ((buf_cnt) & 0xFF)
+
+/* Command 11h - CAN Receive */
+#define NCT6694_CANFD_RECEIVE 0x11
+#define NCT6694_CANFD_RECEIVE_SEL(idx, buf_cnt) \
+ ((idx ? 0x80 : 0x00) | ((buf_cnt) & 0x7F))
+
+#define NCT6694_CANFD_FRAME_TAG(idx) (0xC0 | (idx))
+#define NCT6694_CANFD_FRAME_FLAG_EFF BIT(0)
+#define NCT6694_CANFD_FRAME_FLAG_RTR BIT(1)
+#define NCT6694_CANFD_FRAME_FLAG_FD BIT(2)
+#define NCT6694_CANFD_FRAME_FLAG_BRS BIT(3)
+#define NCT6694_CANFD_FRAME_FLAG_ERR BIT(4)
+
+#define NCT6694_NAPI_WEIGHT 32
+
+enum nct6694_event_err {
+ NCT6694_CANFD_EVT_ERR_NO_ERROR = 0,
+ NCT6694_CANFD_EVT_ERR_CRC_ERROR,
+ NCT6694_CANFD_EVT_ERR_STUFF_ERROR,
+ NCT6694_CANFD_EVT_ERR_ACK_ERROR,
+ NCT6694_CANFD_EVT_ERR_FORM_ERROR,
+ NCT6694_CANFD_EVT_ERR_BIT_ERROR,
+ NCT6694_CANFD_EVT_ERR_TIMEOUT_ERROR,
+ NCT6694_CANFD_EVT_ERR_UNKNOWN_ERROR,
+};
+
+enum nct6694_event_status {
+ NCT6694_CANFD_EVT_STS_ERROR_ACTIVE = 0,
+ NCT6694_CANFD_EVT_STS_ERROR_PASSIVE,
+ NCT6694_CANFD_EVT_STS_BUS_OFF,
+ NCT6694_CANFD_EVT_STS_WARNING,
+};
+
+struct __packed nct6694_canfd_setting {
+ __le32 nbr;
+ __le32 dbr;
+ u8 active;
+ u8 reserved[3];
+ __le16 ctrl1;
+ __le16 ctrl2;
+ __le32 nbtp;
+ __le32 dbtp;
+};
+
+struct __packed nct6694_canfd_information {
+ u8 tx_fifo_cnt;
+ u8 rx_fifo_cnt;
+ u8 reserved[2];
+ __le32 can_clk;
+};
+
+struct __packed nct6694_canfd_event {
+ u8 err;
+ u8 status;
+ u8 tx_evt;
+ u8 rx_evt;
+ u8 rec;
+ u8 tec;
+ u8 reserved[2];
+};
+
+struct __packed nct6694_canfd_frame {
+ u8 tag;
+ u8 flag;
+ u8 reserved;
+ u8 length;
+ __le32 id;
+ u8 data[CANFD_MAX_DLEN];
+};
+
+struct nct6694_canfd_priv {
+ struct can_priv can; /* must be the first member */
+ struct can_rx_offload offload;
+ struct net_device *ndev;
+ struct nct6694 *nct6694;
+ struct workqueue_struct *wq;
+ struct work_struct tx_work;
+ struct nct6694_canfd_frame tx;
+ struct nct6694_canfd_frame rx;
+ struct nct6694_canfd_event event[2];
+ struct can_berr_counter bec;
+};
+
+static inline struct nct6694_canfd_priv *rx_offload_to_priv(struct can_rx_offload *offload)
+{
+ return container_of(offload, struct nct6694_canfd_priv, offload);
+}
+
+static const struct can_bittiming_const nct6694_canfd_bittiming_nominal_const = {
+ .name = DEVICE_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 256,
+ .tseg2_min = 1,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 512,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const nct6694_canfd_bittiming_data_const = {
+ .name = DEVICE_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 32,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 32,
+ .brp_inc = 1,
+};
+
+static void nct6694_canfd_rx_offload(struct can_rx_offload *offload,
+ struct sk_buff *skb)
+{
+ struct nct6694_canfd_priv *priv = rx_offload_to_priv(offload);
+ int ret;
+
+ ret = can_rx_offload_queue_tail(offload, skb);
+ if (ret)
+ priv->ndev->stats.rx_fifo_errors++;
+}
+
+static void nct6694_canfd_handle_lost_msg(struct net_device *ndev)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ netdev_dbg(ndev, "RX FIFO overflow, message(s) lost.\n");
+
+ stats->rx_errors++;
+ stats->rx_over_errors++;
+
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (!skb)
+ return;
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+
+ nct6694_canfd_rx_offload(&priv->offload, skb);
+}
+
+static void nct6694_canfd_handle_rx(struct net_device *ndev, u8 rx_evt)
+{
+ struct net_device_stats *stats = &ndev->stats;
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ struct nct6694_canfd_frame *frame = &priv->rx;
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_CANFD_MOD,
+ .cmd = NCT6694_CANFD_RECEIVE,
+ .sel = NCT6694_CANFD_RECEIVE_SEL(ndev->dev_port, 1),
+ .len = cpu_to_le16(sizeof(*frame))
+ };
+ struct sk_buff *skb;
+ int ret;
+
+ ret = nct6694_read_msg(priv->nct6694, &cmd_hd, frame);
+ if (ret)
+ return;
+
+ if (frame->flag & NCT6694_CANFD_FRAME_FLAG_FD) {
+ struct canfd_frame *cfd;
+
+ skb = alloc_canfd_skb(priv->ndev, &cfd);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ cfd->can_id = le32_to_cpu(frame->id);
+ cfd->len = canfd_sanitize_len(frame->length);
+ if (frame->flag & NCT6694_CANFD_FRAME_FLAG_EFF)
+ cfd->can_id |= CAN_EFF_FLAG;
+ if (frame->flag & NCT6694_CANFD_FRAME_FLAG_BRS)
+ cfd->flags |= CANFD_BRS;
+ if (frame->flag & NCT6694_CANFD_FRAME_FLAG_ERR)
+ cfd->flags |= CANFD_ESI;
+
+ memcpy(cfd->data, frame->data, cfd->len);
+ } else {
+ struct can_frame *cf;
+
+ skb = alloc_can_skb(priv->ndev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ cf->can_id = le32_to_cpu(frame->id);
+ cf->len = can_cc_dlc2len(frame->length);
+ if (frame->flag & NCT6694_CANFD_FRAME_FLAG_EFF)
+ cf->can_id |= CAN_EFF_FLAG;
+
+ if (frame->flag & NCT6694_CANFD_FRAME_FLAG_RTR)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(cf->data, frame->data, cf->len);
+ }
+
+ nct6694_canfd_rx_offload(&priv->offload, skb);
+}
+
+static int nct6694_canfd_get_berr_counter(const struct net_device *ndev,
+ struct can_berr_counter *bec)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+
+ *bec = priv->bec;
+
+ return 0;
+}
+
+static void nct6694_canfd_handle_state_change(struct net_device *ndev, u8 status)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ enum can_state new_state, rx_state, tx_state;
+ struct can_berr_counter bec;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ nct6694_canfd_get_berr_counter(ndev, &bec);
+ can_state_get_by_berr_counter(ndev, &bec, &tx_state, &rx_state);
+
+ new_state = max(tx_state, rx_state);
+
+ /* state hasn't changed */
+ if (new_state == priv->can.state)
+ return;
+
+ skb = alloc_can_err_skb(ndev, &cf);
+
+ can_change_state(ndev, cf, tx_state, rx_state);
+
+ if (new_state == CAN_STATE_BUS_OFF) {
+ can_bus_off(ndev);
+ } else if (cf) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+
+ if (skb)
+ nct6694_canfd_rx_offload(&priv->offload, skb);
+}
+
+static void nct6694_canfd_handle_bus_err(struct net_device *ndev, u8 bus_err)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ priv->can.can_stats.bus_error++;
+
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (cf)
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+ switch (bus_err) {
+ case NCT6694_CANFD_EVT_ERR_CRC_ERROR:
+ netdev_dbg(ndev, "CRC error\n");
+ ndev->stats.rx_errors++;
+ if (cf)
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+ break;
+
+ case NCT6694_CANFD_EVT_ERR_STUFF_ERROR:
+ netdev_dbg(ndev, "Stuff error\n");
+ ndev->stats.rx_errors++;
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+
+ case NCT6694_CANFD_EVT_ERR_ACK_ERROR:
+ netdev_dbg(ndev, "Ack error\n");
+ ndev->stats.tx_errors++;
+ if (cf) {
+ cf->can_id |= CAN_ERR_ACK;
+ cf->data[2] |= CAN_ERR_PROT_TX;
+ }
+ break;
+
+ case NCT6694_CANFD_EVT_ERR_FORM_ERROR:
+ netdev_dbg(ndev, "Form error\n");
+ ndev->stats.rx_errors++;
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+
+ case NCT6694_CANFD_EVT_ERR_BIT_ERROR:
+ netdev_dbg(ndev, "Bit error\n");
+ ndev->stats.tx_errors++;
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT;
+ break;
+
+ default:
+ break;
+ }
+
+ if (skb)
+ nct6694_canfd_rx_offload(&priv->offload, skb);
+}
+
+static void nct6694_canfd_handle_tx(struct net_device *ndev)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+
+ stats->tx_bytes += can_rx_offload_get_echo_skb_queue_tail(&priv->offload,
+ 0, NULL);
+ stats->tx_packets++;
+ netif_wake_queue(ndev);
+}
+
+static irqreturn_t nct6694_canfd_irq(int irq, void *data)
+{
+ struct net_device *ndev = data;
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ struct nct6694_canfd_event *event = &priv->event[ndev->dev_port];
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_CANFD_MOD,
+ .cmd = NCT6694_CANFD_EVENT,
+ .sel = NCT6694_CANFD_EVENT_SEL(ndev->dev_port, NCT6694_CANFD_EVENT_MASK),
+ .len = cpu_to_le16(sizeof(priv->event))
+ };
+ irqreturn_t handled = IRQ_NONE;
+ int ret;
+
+ ret = nct6694_read_msg(priv->nct6694, &cmd_hd, priv->event);
+ if (ret < 0)
+ return handled;
+
+ if (event->rx_evt & NCT6694_CANFD_EVT_RX_DATA_IN) {
+ nct6694_canfd_handle_rx(ndev, event->rx_evt);
+ handled = IRQ_HANDLED;
+ }
+
+ if (event->rx_evt & NCT6694_CANFD_EVT_RX_DATA_LOST) {
+ nct6694_canfd_handle_lost_msg(ndev);
+ handled = IRQ_HANDLED;
+ }
+
+ if (event->status) {
+ nct6694_canfd_handle_state_change(ndev, event->status);
+ handled = IRQ_HANDLED;
+ }
+
+ if (event->err != NCT6694_CANFD_EVT_ERR_NO_ERROR) {
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ nct6694_canfd_handle_bus_err(ndev, event->err);
+ handled = IRQ_HANDLED;
+ }
+
+ if (event->tx_evt & NCT6694_CANFD_EVT_TX_FIFO_EMPTY) {
+ nct6694_canfd_handle_tx(ndev);
+ handled = IRQ_HANDLED;
+ }
+
+ if (handled)
+ can_rx_offload_threaded_irq_finish(&priv->offload);
+
+ priv->bec.rxerr = event->rec;
+ priv->bec.txerr = event->tec;
+
+ return handled;
+}
+
+static void nct6694_canfd_tx_work(struct work_struct *work)
+{
+ struct nct6694_canfd_priv *priv = container_of(work,
+ struct nct6694_canfd_priv,
+ tx_work);
+ struct nct6694_canfd_frame *frame = &priv->tx;
+ struct net_device *ndev = priv->ndev;
+ struct net_device_stats *stats = &ndev->stats;
+ struct sk_buff *skb = priv->can.echo_skb[0];
+ static const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_CANFD_MOD,
+ .cmd = NCT6694_CANFD_DELIVER,
+ .sel = NCT6694_CANFD_DELIVER_SEL(1),
+ .len = cpu_to_le16(sizeof(*frame))
+ };
+ u32 txid;
+ int err;
+
+ memset(frame, 0, sizeof(*frame));
+
+ frame->tag = NCT6694_CANFD_FRAME_TAG(ndev->dev_port);
+
+ if (can_is_canfd_skb(skb)) {
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+
+ if (cfd->flags & CANFD_BRS)
+ frame->flag |= NCT6694_CANFD_FRAME_FLAG_BRS;
+
+ if (cfd->can_id & CAN_EFF_FLAG) {
+ txid = cfd->can_id & CAN_EFF_MASK;
+ frame->flag |= NCT6694_CANFD_FRAME_FLAG_EFF;
+ } else {
+ txid = cfd->can_id & CAN_SFF_MASK;
+ }
+ frame->flag |= NCT6694_CANFD_FRAME_FLAG_FD;
+ frame->id = cpu_to_le32(txid);
+ frame->length = canfd_sanitize_len(cfd->len);
+
+ memcpy(frame->data, cfd->data, frame->length);
+ } else {
+ struct can_frame *cf = (struct can_frame *)skb->data;
+
+ if (cf->can_id & CAN_EFF_FLAG) {
+ txid = cf->can_id & CAN_EFF_MASK;
+ frame->flag |= NCT6694_CANFD_FRAME_FLAG_EFF;
+ } else {
+ txid = cf->can_id & CAN_SFF_MASK;
+ }
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ frame->flag |= NCT6694_CANFD_FRAME_FLAG_RTR;
+ else
+ memcpy(frame->data, cf->data, cf->len);
+
+ frame->id = cpu_to_le32(txid);
+ frame->length = cf->len;
+ }
+
+ err = nct6694_write_msg(priv->nct6694, &cmd_hd, frame);
+ if (err) {
+ can_free_echo_skb(ndev, 0, NULL);
+ stats->tx_dropped++;
+ stats->tx_errors++;
+ netif_wake_queue(ndev);
+ }
+}
+
+static netdev_tx_t nct6694_canfd_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+
+ if (can_dev_dropped_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ netif_stop_queue(ndev);
+ can_put_echo_skb(skb, ndev, 0, 0);
+ queue_work(priv->wq, &priv->tx_work);
+
+ return NETDEV_TX_OK;
+}
+
+static int nct6694_canfd_start(struct net_device *ndev)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ const struct can_bittiming *n_bt = &priv->can.bittiming;
+ const struct can_bittiming *d_bt = &priv->can.fd.data_bittiming;
+ struct nct6694_canfd_setting *setting __free(kfree) = NULL;
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_CANFD_MOD,
+ .cmd = NCT6694_CANFD_SETTING,
+ .sel = ndev->dev_port,
+ .len = cpu_to_le16(sizeof(*setting))
+ };
+ u32 en_tdc;
+ int ret;
+
+ setting = kzalloc(sizeof(*setting), GFP_KERNEL);
+ if (!setting)
+ return -ENOMEM;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ setting->ctrl1 |= cpu_to_le16(NCT6694_CANFD_SETTING_CTRL1_MON);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
+ setting->ctrl1 |= cpu_to_le16(NCT6694_CANFD_SETTING_CTRL1_NISO);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ setting->ctrl1 |= cpu_to_le16(NCT6694_CANFD_SETTING_CTRL1_LBCK);
+
+ /* Disable clock divider */
+ setting->ctrl2 = 0;
+
+ setting->nbtp = cpu_to_le32(FIELD_PREP(NCT6694_CANFD_SETTING_NBTP_NSJW,
+ n_bt->sjw - 1) |
+ FIELD_PREP(NCT6694_CANFD_SETTING_NBTP_NBRP,
+ n_bt->brp - 1) |
+ FIELD_PREP(NCT6694_CANFD_SETTING_NBTP_NTSEG2,
+ n_bt->phase_seg2 - 1) |
+ FIELD_PREP(NCT6694_CANFD_SETTING_NBTP_NTSEG1,
+ n_bt->prop_seg + n_bt->phase_seg1 - 1));
+
+ if (d_bt->brp <= 2)
+ en_tdc = NCT6694_CANFD_SETTING_DBTP_TDC;
+ else
+ en_tdc = 0;
+
+ setting->dbtp = cpu_to_le32(FIELD_PREP(NCT6694_CANFD_SETTING_DBTP_DSJW,
+ d_bt->sjw - 1) |
+ FIELD_PREP(NCT6694_CANFD_SETTING_DBTP_DBRP,
+ d_bt->brp - 1) |
+ FIELD_PREP(NCT6694_CANFD_SETTING_DBTP_DTSEG2,
+ d_bt->phase_seg2 - 1) |
+ FIELD_PREP(NCT6694_CANFD_SETTING_DBTP_DTSEG1,
+ d_bt->prop_seg + d_bt->phase_seg1 - 1) |
+ en_tdc);
+
+ setting->active = NCT6694_CANFD_SETTING_ACTIVE_CTRL1 |
+ NCT6694_CANFD_SETTING_ACTIVE_CTRL2 |
+ NCT6694_CANFD_SETTING_ACTIVE_NBTP_DBTP;
+
+ ret = nct6694_write_msg(priv->nct6694, &cmd_hd, setting);
+ if (ret)
+ return ret;
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ return 0;
+}
+
+static void nct6694_canfd_stop(struct net_device *ndev)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ struct nct6694_canfd_setting *setting __free(kfree) = NULL;
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_CANFD_MOD,
+ .cmd = NCT6694_CANFD_SETTING,
+ .sel = ndev->dev_port,
+ .len = cpu_to_le16(sizeof(*setting))
+ };
+
+ /* The NCT6694 cannot be stopped. To ensure safe operation and avoid
+ * interference, the control mode is set to Listen-Only mode. This
+ * mode allows the device to monitor bus activity without actively
+ * participating in communication.
+ */
+ setting = kzalloc(sizeof(*setting), GFP_KERNEL);
+ if (!setting)
+ return;
+
+ nct6694_read_msg(priv->nct6694, &cmd_hd, setting);
+ setting->ctrl1 = cpu_to_le16(NCT6694_CANFD_SETTING_CTRL1_MON);
+ setting->active = NCT6694_CANFD_SETTING_ACTIVE_CTRL1;
+ nct6694_write_msg(priv->nct6694, &cmd_hd, setting);
+
+ priv->can.state = CAN_STATE_STOPPED;
+}
+
+static int nct6694_canfd_close(struct net_device *ndev)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ nct6694_canfd_stop(ndev);
+ destroy_workqueue(priv->wq);
+ free_irq(ndev->irq, ndev);
+ can_rx_offload_disable(&priv->offload);
+ close_candev(ndev);
+ return 0;
+}
+
+static int nct6694_canfd_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ int ret;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ ret = nct6694_canfd_start(ndev);
+ if (ret)
+ return ret;
+
+ netif_wake_queue(ndev);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int nct6694_canfd_open(struct net_device *ndev)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = open_candev(ndev);
+ if (ret)
+ return ret;
+
+ can_rx_offload_enable(&priv->offload);
+
+ ret = request_threaded_irq(ndev->irq, NULL,
+ nct6694_canfd_irq, IRQF_ONESHOT,
+ "nct6694_canfd", ndev);
+ if (ret) {
+ netdev_err(ndev, "Failed to request IRQ\n");
+ goto can_rx_offload_disable;
+ }
+
+ priv->wq = alloc_ordered_workqueue("%s-nct6694_wq",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM,
+ ndev->name);
+ if (!priv->wq) {
+ ret = -ENOMEM;
+ goto free_irq;
+ }
+
+ ret = nct6694_canfd_start(ndev);
+ if (ret)
+ goto destroy_wq;
+
+ netif_start_queue(ndev);
+
+ return 0;
+
+destroy_wq:
+ destroy_workqueue(priv->wq);
+free_irq:
+ free_irq(ndev->irq, ndev);
+can_rx_offload_disable:
+ can_rx_offload_disable(&priv->offload);
+ close_candev(ndev);
+ return ret;
+}
+
+static const struct net_device_ops nct6694_canfd_netdev_ops = {
+ .ndo_open = nct6694_canfd_open,
+ .ndo_stop = nct6694_canfd_close,
+ .ndo_start_xmit = nct6694_canfd_start_xmit,
+};
+
+static const struct ethtool_ops nct6694_canfd_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static int nct6694_canfd_get_clock(struct nct6694_canfd_priv *priv)
+{
+ struct nct6694_canfd_information *info __free(kfree) = NULL;
+ static const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_CANFD_MOD,
+ .cmd = NCT6694_CANFD_INFORMATION,
+ .sel = NCT6694_CANFD_INFORMATION_SEL,
+ .len = cpu_to_le16(sizeof(*info))
+ };
+ int ret;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ ret = nct6694_read_msg(priv->nct6694, &cmd_hd, info);
+ if (ret)
+ return ret;
+
+ return le32_to_cpu(info->can_clk);
+}
+
+static int nct6694_canfd_probe(struct platform_device *pdev)
+{
+ struct nct6694 *nct6694 = dev_get_drvdata(pdev->dev.parent);
+ struct nct6694_canfd_priv *priv;
+ struct net_device *ndev;
+ int port, irq, ret, can_clk;
+
+ port = ida_alloc(&nct6694->canfd_ida, GFP_KERNEL);
+ if (port < 0)
+ return port;
+
+ irq = irq_create_mapping(nct6694->domain,
+ NCT6694_IRQ_CAN0 + port);
+ if (!irq) {
+ ret = -EINVAL;
+ goto free_ida;
+ }
+
+ ndev = alloc_candev(sizeof(struct nct6694_canfd_priv), 1);
+ if (!ndev) {
+ ret = -ENOMEM;
+ goto dispose_irq;
+ }
+
+ ndev->irq = irq;
+ ndev->flags |= IFF_ECHO;
+ ndev->dev_port = port;
+ ndev->netdev_ops = &nct6694_canfd_netdev_ops;
+ ndev->ethtool_ops = &nct6694_canfd_ethtool_ops;
+
+ priv = netdev_priv(ndev);
+ priv->nct6694 = nct6694;
+ priv->ndev = ndev;
+
+ can_clk = nct6694_canfd_get_clock(priv);
+ if (can_clk < 0) {
+ ret = dev_err_probe(&pdev->dev, can_clk,
+ "Failed to get clock\n");
+ goto free_candev;
+ }
+
+ INIT_WORK(&priv->tx_work, nct6694_canfd_tx_work);
+
+ priv->can.clock.freq = can_clk;
+ priv->can.bittiming_const = &nct6694_canfd_bittiming_nominal_const;
+ priv->can.fd.data_bittiming_const = &nct6694_canfd_bittiming_data_const;
+ priv->can.do_set_mode = nct6694_canfd_set_mode;
+ priv->can.do_get_berr_counter = nct6694_canfd_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+ CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING |
+ CAN_CTRLMODE_FD_NON_ISO;
+
+ ret = can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD);
+ if (ret)
+ goto free_candev;
+
+ ret = can_rx_offload_add_manual(ndev, &priv->offload,
+ NCT6694_NAPI_WEIGHT);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "Failed to add rx_offload\n");
+ goto free_candev;
+ }
+
+ platform_set_drvdata(pdev, priv);
+ SET_NETDEV_DEV(priv->ndev, &pdev->dev);
+
+ ret = register_candev(priv->ndev);
+ if (ret)
+ goto rx_offload_del;
+
+ return 0;
+
+rx_offload_del:
+ can_rx_offload_del(&priv->offload);
+free_candev:
+ free_candev(ndev);
+dispose_irq:
+ irq_dispose_mapping(irq);
+free_ida:
+ ida_free(&nct6694->canfd_ida, port);
+ return ret;
+}
+
+static void nct6694_canfd_remove(struct platform_device *pdev)
+{
+ struct nct6694_canfd_priv *priv = platform_get_drvdata(pdev);
+ struct nct6694 *nct6694 = priv->nct6694;
+ struct net_device *ndev = priv->ndev;
+ int port = ndev->dev_port;
+ int irq = ndev->irq;
+
+ unregister_candev(ndev);
+ can_rx_offload_del(&priv->offload);
+ free_candev(ndev);
+ irq_dispose_mapping(irq);
+ ida_free(&nct6694->canfd_ida, port);
+}
+
+static struct platform_driver nct6694_canfd_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ },
+ .probe = nct6694_canfd_probe,
+ .remove = nct6694_canfd_remove,
+};
+
+module_platform_driver(nct6694_canfd_driver);
+
+MODULE_DESCRIPTION("USB-CAN FD driver for NCT6694");
+MODULE_AUTHOR("Ming Yu <tmyu0@nuvoton.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index c75df1755b3b..9278a1522aae 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -3,8 +3,8 @@
* CAN driver for PEAK System PCAN-USB adapter
* Derived from the PCAN project file driver/src/pcan_usb.c
*
- * Copyright (C) 2003-2010 PEAK System-Technik GmbH
- * Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*
* Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
*/
@@ -319,7 +319,7 @@ static int pcan_usb_write_mode(struct peak_usb_device *dev, u8 onoff)
*/
static void pcan_usb_restart(struct timer_list *t)
{
- struct pcan_usb *pdev = from_timer(pdev, t, restart_timer);
+ struct pcan_usb *pdev = timer_container_of(pdev, t, restart_timer);
struct peak_usb_device *dev = &pdev->dev;
/* notify candev and netdev */
@@ -919,7 +919,7 @@ static int pcan_usb_init(struct peak_usb_device *dev)
CAN_CTRLMODE_LOOPBACK;
} else {
dev_info(dev->netdev->dev.parent,
- "Firmware update available. Please contact support@peak-system.com\n");
+ "Firmware update available. Please contact support.peak@hms-networks.com\n");
}
return 0;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 59f7cd8ceb39..cf48bb26d46d 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -3,8 +3,8 @@
* CAN driver for PEAK System USB adapters
* Derived from the PCAN project file driver/src/pcan_usb_core.c
*
- * Copyright (C) 2003-2010 PEAK System-Technik GmbH
- * Copyright (C) 2010-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*
* Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
*/
@@ -24,7 +24,7 @@
#include "pcan_usb_core.h"
-MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
+MODULE_AUTHOR("Stéphane Grosjean <stephane.grosjean@hms-networks.com>");
MODULE_DESCRIPTION("CAN driver for PEAK-System USB adapters");
MODULE_LICENSE("GPL v2");
@@ -111,7 +111,7 @@ void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now)
u32 delta_ts = time_ref->ts_dev_2 - time_ref->ts_dev_1;
if (time_ref->ts_dev_2 < time_ref->ts_dev_1)
- delta_ts &= (1 << time_ref->adapter->ts_used_bits) - 1;
+ delta_ts &= (1ULL << time_ref->adapter->ts_used_bits) - 1;
time_ref->ts_total += delta_ts;
}
@@ -770,7 +770,7 @@ static int peak_usb_set_data_bittiming(struct net_device *netdev)
const struct peak_usb_adapter *pa = dev->adapter;
if (pa->dev_set_data_bittiming) {
- struct can_bittiming *bt = &dev->can.data_bittiming;
+ struct can_bittiming *bt = &dev->can.fd.data_bittiming;
int err = pa->dev_set_data_bittiming(dev, bt);
if (err)
@@ -784,37 +784,33 @@ static int peak_usb_set_data_bittiming(struct net_device *netdev)
return 0;
}
-static int peak_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+static int peak_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config hwts_cfg = { 0 };
-
- switch (cmd) {
- case SIOCSHWTSTAMP: /* set */
- if (copy_from_user(&hwts_cfg, ifr->ifr_data, sizeof(hwts_cfg)))
- return -EFAULT;
- if (hwts_cfg.tx_type == HWTSTAMP_TX_OFF &&
- hwts_cfg.rx_filter == HWTSTAMP_FILTER_ALL)
- return 0;
- return -ERANGE;
-
- case SIOCGHWTSTAMP: /* get */
- hwts_cfg.tx_type = HWTSTAMP_TX_OFF;
- hwts_cfg.rx_filter = HWTSTAMP_FILTER_ALL;
- if (copy_to_user(ifr->ifr_data, &hwts_cfg, sizeof(hwts_cfg)))
- return -EFAULT;
+ config->tx_type = HWTSTAMP_TX_OFF;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+
+ return 0;
+}
+
+static int peak_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ if (config->tx_type == HWTSTAMP_TX_OFF &&
+ config->rx_filter == HWTSTAMP_FILTER_ALL)
return 0;
- default:
- return -EOPNOTSUPP;
- }
+ NL_SET_ERR_MSG_MOD(extack, "Only RX HWTSTAMP_FILTER_ALL is supported");
+ return -ERANGE;
}
static const struct net_device_ops peak_usb_netdev_ops = {
.ndo_open = peak_usb_ndo_open,
.ndo_stop = peak_usb_ndo_stop,
- .ndo_eth_ioctl = peak_eth_ioctl,
.ndo_start_xmit = peak_usb_ndo_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+ .ndo_hwtstamp_get = peak_hwtstamp_get,
+ .ndo_hwtstamp_set = peak_hwtstamp_set,
};
/* CAN-USB devices generally handle 32-bit CAN channel IDs.
@@ -954,8 +950,8 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
dev->can.clock = peak_usb_adapter->clock;
dev->can.bittiming_const = peak_usb_adapter->bittiming_const;
dev->can.do_set_bittiming = peak_usb_set_bittiming;
- dev->can.data_bittiming_const = peak_usb_adapter->data_bittiming_const;
- dev->can.do_set_data_bittiming = peak_usb_set_data_bittiming;
+ dev->can.fd.data_bittiming_const = peak_usb_adapter->data_bittiming_const;
+ dev->can.fd.do_set_data_bittiming = peak_usb_set_data_bittiming;
dev->can.do_set_mode = peak_usb_set_mode;
dev->can.do_get_berr_counter = peak_usb_adapter->do_get_berr_counter;
dev->can.ctrlmode_supported = peak_usb_adapter->ctrlmode_supported;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index abab00930b9d..d1c1897d47b9 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -3,8 +3,8 @@
* CAN driver for PEAK System USB adapters
* Derived from the PCAN project file driver/src/pcan_usb_core.c
*
- * Copyright (C) 2003-2010 PEAK System-Technik GmbH
- * Copyright (C) 2010-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*
* Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
*/
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 4d85b29a17b7..be84191cde56 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -2,7 +2,8 @@
/*
* CAN driver for PEAK System PCAN-USB FD / PCAN-USB Pro FD adapter
*
- * Copyright (C) 2013-2014 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2013-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#include <linux/ethtool.h>
#include <linux/module.h>
@@ -49,7 +50,7 @@ struct __packed pcan_ufd_fw_info {
__le32 ser_no; /* S/N */
__le32 flags; /* special functions */
- /* extended data when type == PCAN_USBFD_TYPE_EXT */
+ /* extended data when type >= PCAN_USBFD_TYPE_EXT */
u8 cmd_out_ep; /* ep for cmd */
u8 cmd_in_ep; /* ep for replies */
u8 data_out_ep[2]; /* ep for CANx TX */
@@ -982,10 +983,11 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
dev->can.ctrlmode |= CAN_CTRLMODE_FD_NON_ISO;
}
- /* if vendor rsp is of type 2, then it contains EP numbers to
- * use for cmds pipes. If not, then default EP should be used.
+ /* if vendor rsp type is greater than or equal to 2, then it
+ * contains EP numbers to use for cmds pipes. If not, then
+ * default EP should be used.
*/
- if (fw_info->type != cpu_to_le16(PCAN_USBFD_TYPE_EXT)) {
+ if (le16_to_cpu(fw_info->type) < PCAN_USBFD_TYPE_EXT) {
fw_info->cmd_out_ep = PCAN_USBPRO_EP_CMDOUT;
fw_info->cmd_in_ep = PCAN_USBPRO_EP_CMDIN;
}
@@ -1018,11 +1020,11 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
dev->can_channel_id =
le32_to_cpu(pdev->usb_if->fw_info.dev_id[dev->ctrl_idx]);
- /* if vendor rsp is of type 2, then it contains EP numbers to
- * use for data pipes. If not, then statically defined EP are used
- * (see peak_usb_create_dev()).
+ /* if vendor rsp type is greater than or equal to 2, then it contains EP
+ * numbers to use for data pipes. If not, then statically defined EP are
+ * used (see peak_usb_create_dev()).
*/
- if (fw_info->type == cpu_to_le16(PCAN_USBFD_TYPE_EXT)) {
+ if (le16_to_cpu(fw_info->type) >= PCAN_USBFD_TYPE_EXT) {
dev->ep_msg_in = fw_info->data_in_ep;
dev->ep_msg_out = fw_info->data_out_ep[dev->ctrl_idx];
}
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index f736196383ac..7be286293b1a 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -3,8 +3,8 @@
* CAN driver for PEAK System PCAN-USB Pro adapter
* Derived from the PCAN project file driver/src/pcan_usbpro.c
*
- * Copyright (C) 2003-2011 PEAK System-Technik GmbH
- * Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#include <linux/ethtool.h>
#include <linux/module.h>
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
index 28e740af905d..162c7546d3a8 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
@@ -3,8 +3,8 @@
* CAN driver for PEAK System PCAN-USB Pro adapter
* Derived from the PCAN project file driver/src/pcan_usbpro_fw.h
*
- * Copyright (C) 2003-2011 PEAK System-Technik GmbH
- * Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#ifndef PCAN_USB_PRO_H
#define PCAN_USB_PRO_H
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index 39a63b7313a4..de61d9da99e3 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -186,7 +186,7 @@ union ucan_ctl_payload {
*/
struct ucan_ctl_cmd_get_protocol_version cmd_get_protocol_version;
- u8 raw[128];
+ u8 fw_str[128];
} __packed;
enum {
@@ -424,18 +424,20 @@ static int ucan_ctrl_command_out(struct ucan_priv *up,
UCAN_USB_CTL_PIPE_TIMEOUT);
}
-static int ucan_device_request_in(struct ucan_priv *up,
- u8 cmd, u16 subcmd, u16 datalen)
+static void ucan_get_fw_str(struct ucan_priv *up, char *fw_str, size_t size)
{
- return usb_control_msg(up->udev,
- usb_rcvctrlpipe(up->udev, 0),
- cmd,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- subcmd,
- 0,
- up->ctl_msg_buffer,
- datalen,
- UCAN_USB_CTL_PIPE_TIMEOUT);
+ int ret;
+
+ ret = usb_control_msg(up->udev, usb_rcvctrlpipe(up->udev, 0),
+ UCAN_DEVICE_GET_FW_STRING,
+ USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE,
+ 0, 0, fw_str, size - 1,
+ UCAN_USB_CTL_PIPE_TIMEOUT);
+ if (ret > 0)
+ fw_str[ret] = '\0';
+ else
+ strscpy(fw_str, "unknown", size);
}
/* Parse the device information structure reported by the device and
@@ -1231,7 +1233,6 @@ static const struct net_device_ops ucan_netdev_ops = {
.ndo_open = ucan_open,
.ndo_stop = ucan_close,
.ndo_start_xmit = ucan_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops ucan_ethtool_ops = {
@@ -1314,7 +1315,6 @@ static int ucan_probe(struct usb_interface *intf,
u8 in_ep_addr;
u8 out_ep_addr;
union ucan_ctl_payload *ctl_msg_buffer;
- char firmware_str[sizeof(union ucan_ctl_payload) + 1];
udev = interface_to_usbdev(intf);
@@ -1527,17 +1527,6 @@ static int ucan_probe(struct usb_interface *intf,
*/
ucan_parse_device_info(up, &ctl_msg_buffer->cmd_get_device_info);
- /* just print some device information - if available */
- ret = ucan_device_request_in(up, UCAN_DEVICE_GET_FW_STRING, 0,
- sizeof(union ucan_ctl_payload));
- if (ret > 0) {
- /* copy string while ensuring zero termination */
- strscpy(firmware_str, up->ctl_msg_buffer->raw,
- sizeof(union ucan_ctl_payload) + 1);
- } else {
- strcpy(firmware_str, "unknown");
- }
-
/* device is compatible, reset it */
ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0);
if (ret < 0)
@@ -1555,7 +1544,10 @@ static int ucan_probe(struct usb_interface *intf,
/* initialisation complete, log device info */
netdev_info(up->netdev, "registered device\n");
- netdev_info(up->netdev, "firmware string: %s\n", firmware_str);
+ ucan_get_fw_str(up, up->ctl_msg_buffer->fw_str,
+ sizeof(up->ctl_msg_buffer->fw_str));
+ netdev_info(up->netdev, "firmware string: %s\n",
+ up->ctl_msg_buffer->fw_str);
/* success */
return 0;
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index 8a5596ce4e46..7449328f7cd7 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -868,7 +868,6 @@ static const struct net_device_ops usb_8dev_netdev_ops = {
.ndo_open = usb_8dev_open,
.ndo_stop = usb_8dev_close,
.ndo_start_xmit = usb_8dev_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops usb_8dev_ethtool_ops = {
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index f67e85807100..fdc662aea279 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -156,7 +156,7 @@ static const struct ethtool_ops vcan_ethtool_ops = {
static void vcan_setup(struct net_device *dev)
{
dev->type = ARPHRD_CAN;
- dev->mtu = CANFD_MTU;
+ dev->mtu = CANXL_MTU;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 0;
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 9e1b7d41005f..b2c19f8c5f8e 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -156,7 +156,7 @@ static void vxcan_setup(struct net_device *dev)
struct can_ml_priv *can_ml;
dev->type = ARPHRD_CAN;
- dev->mtu = CANFD_MTU;
+ dev->mtu = CANXL_MTU;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 0;
@@ -172,13 +172,15 @@ static void vxcan_setup(struct net_device *dev)
/* forward declaration for rtnl_create_link() */
static struct rtnl_link_ops vxcan_link_ops;
-static int vxcan_newlink(struct net *net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int vxcan_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *peer_net = rtnl_newlink_peer_net(params);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct vxcan_priv *priv;
struct net_device *peer;
- struct net *peer_net;
struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb;
char ifname[IFNAMSIZ];
@@ -188,14 +190,10 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
/* register peer device */
if (data && data[VXCAN_INFO_PEER]) {
- struct nlattr *nla_peer;
+ struct nlattr *nla_peer = data[VXCAN_INFO_PEER];
- nla_peer = data[VXCAN_INFO_PEER];
ifmp = nla_data(nla_peer);
- err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
- if (err < 0)
- return err;
-
+ rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
tbp = peer_tb;
}
@@ -207,23 +205,15 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
name_assign_type = NET_NAME_ENUM;
}
- peer_net = rtnl_link_get_net(net, tbp);
- if (IS_ERR(peer_net))
- return PTR_ERR(peer_net);
-
peer = rtnl_create_link(peer_net, ifname, name_assign_type,
&vxcan_link_ops, tbp, extack);
- if (IS_ERR(peer)) {
- put_net(peer_net);
+ if (IS_ERR(peer))
return PTR_ERR(peer);
- }
if (ifmp && dev->ifindex)
peer->ifindex = ifmp->ifi_index;
err = register_netdevice(peer);
- put_net(peer_net);
- peer_net = NULL;
if (err < 0) {
free_netdev(peer);
return err;
@@ -302,6 +292,7 @@ static struct rtnl_link_ops vxcan_link_ops = {
.newlink = vxcan_newlink,
.dellink = vxcan_dellink,
.policy = vxcan_policy,
+ .peer_type = VXCAN_INFO_PEER,
.maxtype = VXCAN_INFO_MAX,
.get_link_net = vxcan_get_link_net,
};
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 436c0e4b0344..43d7f22820b8 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -481,7 +481,7 @@ static int xcan_set_bittiming(struct net_device *ndev)
{
struct xcan_priv *priv = netdev_priv(ndev);
struct can_bittiming *bt = &priv->can.bittiming;
- struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
u32 btr0, btr1;
u32 is_config_mode;
@@ -515,12 +515,12 @@ static int xcan_set_bittiming(struct net_device *ndev)
priv->devtype.cantype == XAXI_CANFD_2_0) {
/* Setting Baud Rate prescaler value in F_BRPR Register */
btr0 = dbt->brp - 1;
- if (can_tdc_is_enabled(&priv->can)) {
+ if (can_fd_tdc_is_enabled(&priv->can)) {
if (priv->devtype.cantype == XAXI_CANFD)
- btr0 |= FIELD_PREP(XCAN_BRPR_TDCO_MASK, priv->can.tdc.tdco) |
+ btr0 |= FIELD_PREP(XCAN_BRPR_TDCO_MASK, priv->can.fd.tdc.tdco) |
XCAN_BRPR_TDC_ENABLE;
else
- btr0 |= FIELD_PREP(XCAN_2_BRPR_TDCO_MASK, priv->can.tdc.tdco) |
+ btr0 |= FIELD_PREP(XCAN_2_BRPR_TDCO_MASK, priv->can.fd.tdc.tdco) |
XCAN_BRPR_TDC_ENABLE;
}
@@ -690,14 +690,6 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
dlc |= XCAN_DLCR_EDL_MASK;
}
- if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
- (priv->devtype.flags & XCAN_FLAG_TXFEMP))
- can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
- else
- can_put_echo_skb(skb, ndev, 0, 0);
-
- priv->tx_head++;
-
priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
/* If the CAN frame is RTR frame this write triggers transmission
* (not on CAN FD)
@@ -730,6 +722,14 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
data[1]);
}
}
+
+ if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
+ (priv->devtype.flags & XCAN_FLAG_TXFEMP))
+ can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
+ else
+ can_put_echo_skb(skb, ndev, 0, 0);
+
+ priv->tx_head++;
}
/**
@@ -1702,7 +1702,6 @@ static const struct net_device_ops xcan_netdev_ops = {
.ndo_open = xcan_open,
.ndo_stop = xcan_close,
.ndo_start_xmit = xcan_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops xcan_ethtool_ops = {
@@ -1967,22 +1966,22 @@ static int xcan_probe(struct platform_device *pdev)
goto err_free;
if (devtype->cantype == XAXI_CANFD) {
- priv->can.data_bittiming_const =
+ priv->can.fd.data_bittiming_const =
&xcan_data_bittiming_const_canfd;
- priv->can.tdc_const = &xcan_tdc_const_canfd;
+ priv->can.fd.tdc_const = &xcan_tdc_const_canfd;
}
if (devtype->cantype == XAXI_CANFD_2_0) {
- priv->can.data_bittiming_const =
+ priv->can.fd.data_bittiming_const =
&xcan_data_bittiming_const_canfd2;
- priv->can.tdc_const = &xcan_tdc_const_canfd2;
+ priv->can.fd.tdc_const = &xcan_tdc_const_canfd2;
}
if (devtype->cantype == XAXI_CANFD ||
devtype->cantype == XAXI_CANFD_2_0) {
priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
CAN_CTRLMODE_TDC_AUTO;
- priv->can.do_get_auto_tdcv = xcan_get_auto_tdcv;
+ priv->can.fd.do_get_auto_tdcv = xcan_get_auto_tdcv;
}
priv->reg_base = addr;
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 2d10b4d6cfbb..7eb301fd987d 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -26,17 +26,12 @@ config NET_DSA_LOOP
source "drivers/net/dsa/hirschmann/Kconfig"
-config NET_DSA_LANTIQ_GSWIP
- tristate "Lantiq / Intel GSWIP"
- depends on HAS_IOMEM
- select NET_DSA_TAG_GSWIP
- help
- This enables support for the Lantiq / Intel GSWIP 2.1 found in
- the xrx200 / VR9 SoC.
+source "drivers/net/dsa/lantiq/Kconfig"
config NET_DSA_MT7530
tristate "MediaTek MT7530 and MT7531 Ethernet switch support"
select NET_DSA_TAG_MTK
+ select REGMAP_IRQ
imply NET_DSA_MT7530_MDIO
imply NET_DSA_MT7530_MMIO
help
@@ -91,13 +86,21 @@ source "drivers/net/dsa/realtek/Kconfig"
config NET_DSA_RZN1_A5PSW
tristate "Renesas RZ/N1 A5PSW Ethernet switch support"
- depends on OF && ARCH_RZN1
+ depends on OF && (ARCH_RZN1 || COMPILE_TEST)
select NET_DSA_TAG_RZN1_A5PSW
select PCS_RZN1_MIIC
help
This driver supports the A5PSW switch, which is embedded in Renesas
RZ/N1 SoC.
+config NET_DSA_KS8995
+ tristate "Micrel KS8995 family 5-ports 10/100 Ethernet switches"
+ depends on SPI
+ select NET_DSA_TAG_NONE
+ help
+ This driver supports the Micrel KS8995 family of 10/100 Mbit ethernet
+ switches, managed over SPI.
+
config NET_DSA_SMSC_LAN9303
tristate
select NET_DSA_TAG_LAN9303
@@ -151,4 +154,11 @@ config NET_DSA_VITESSE_VSC73XX_PLATFORM
This enables support for the Vitesse VSC7385, VSC7388, VSC7395
and VSC7398 SparX integrated ethernet switches, connected over
a CPU-attached address bus and work in memory-mapped I/O mode.
+
+config NET_DSA_YT921X
+ tristate "Motorcomm YT9215 ethernet switch chip support"
+ select NET_DSA_TAG_YT921X
+ help
+ This enables support for the Motorcomm YT9215 ethernet switch
+ chip.
endmenu
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index cb9a97340e58..16de4ba3fa38 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -2,10 +2,7 @@
obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o
bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o
obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o
-ifdef CONFIG_NET_DSA_LOOP
-obj-$(CONFIG_FIXED_PHY) += dsa_loop_bdinfo.o
-endif
-obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o
+obj-$(CONFIG_NET_DSA_KS8995) += ks8995.o
obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
obj-$(CONFIG_NET_DSA_MT7530_MDIO) += mt7530-mdio.o
obj-$(CONFIG_NET_DSA_MT7530_MMIO) += mt7530-mmio.o
@@ -17,8 +14,10 @@ obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o
obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX) += vitesse-vsc73xx-core.o
obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM) += vitesse-vsc73xx-platform.o
obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_SPI) += vitesse-vsc73xx-spi.o
+obj-$(CONFIG_NET_DSA_YT921X) += yt921x.o
obj-y += b53/
obj-y += hirschmann/
+obj-y += lantiq/
obj-y += microchip/
obj-y += mv88e6xxx/
obj-y += ocelot/
diff --git a/drivers/net/dsa/b53/Kconfig b/drivers/net/dsa/b53/Kconfig
index ebaa4a80d544..915008e8eff5 100644
--- a/drivers/net/dsa/b53/Kconfig
+++ b/drivers/net/dsa/b53/Kconfig
@@ -5,6 +5,7 @@ menuconfig B53
select NET_DSA_TAG_NONE
select NET_DSA_TAG_BRCM
select NET_DSA_TAG_BRCM_LEGACY
+ select NET_DSA_TAG_BRCM_LEGACY_FCS
select NET_DSA_TAG_BRCM_PREPEND
help
This driver adds support for Broadcom managed switch chips. It supports
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 285785c942b0..a1a177713d99 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -21,6 +21,8 @@
#include <linux/export.h>
#include <linux/gpio.h>
#include <linux/kernel.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/platform_data/b53.h>
#include <linux/phy.h>
@@ -326,6 +328,26 @@ static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
}
}
+static void b53_set_eap_mode(struct b53_device *dev, int port, int mode)
+{
+ u64 eap_conf;
+
+ if (is5325(dev) || is5365(dev) || dev->chip_id == BCM5389_DEVICE_ID)
+ return;
+
+ b53_read64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), &eap_conf);
+
+ if (is63xx(dev)) {
+ eap_conf &= ~EAP_MODE_MASK_63XX;
+ eap_conf |= (u64)mode << EAP_MODE_SHIFT_63XX;
+ } else {
+ eap_conf &= ~EAP_MODE_MASK;
+ eap_conf |= (u64)mode << EAP_MODE_SHIFT;
+ }
+
+ b53_write64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), eap_conf);
+}
+
static void b53_set_forwarding(struct b53_device *dev, int enable)
{
u8 mgmt;
@@ -339,18 +361,23 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
- /* Include IMP port in dumb forwarding mode
- */
- b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
- mgmt |= B53_MII_DUMB_FWDG_EN;
- b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
+ if (!is5325(dev)) {
+ /* Include IMP port in dumb forwarding mode */
+ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
+ mgmt |= B53_MII_DUMB_FWDG_EN;
+ b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
- /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether
- * frames should be flooded or not.
- */
- b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
- mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN;
- b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
+ /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether
+ * frames should be flooded or not.
+ */
+ b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
+ mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IP_MC;
+ b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
+ } else {
+ b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
+ mgmt |= B53_IP_MC;
+ b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
+ }
}
static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
@@ -373,15 +400,17 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5);
}
+ vc1 &= ~VC1_RX_MCST_FWD_EN;
+
if (enable) {
vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
- vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
+ vc1 |= VC1_RX_MCST_UNTAG_EN;
vc4 &= ~VC4_ING_VID_CHECK_MASK;
if (enable_filtering) {
vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
vc5 |= VC5_DROP_VTABLE_MISS;
} else {
- vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
+ vc4 |= VC4_NO_ING_VID_CHK << VC4_ING_VID_CHECK_S;
vc5 &= ~VC5_DROP_VTABLE_MISS;
}
@@ -393,7 +422,7 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
} else {
vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID);
- vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN);
+ vc1 &= ~VC1_RX_MCST_UNTAG_EN;
vc4 &= ~VC4_ING_VID_CHECK_MASK;
vc5 &= ~VC5_DROP_VTABLE_MISS;
@@ -463,6 +492,9 @@ static int b53_flush_arl(struct b53_device *dev, u8 mask)
{
unsigned int i;
+ if (is5325(dev))
+ return 0;
+
b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask);
@@ -487,6 +519,9 @@ out:
static int b53_fast_age_port(struct b53_device *dev, int port)
{
+ if (is5325(dev))
+ return 0;
+
b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port);
return b53_flush_arl(dev, FAST_AGE_PORT);
@@ -494,6 +529,9 @@ static int b53_fast_age_port(struct b53_device *dev, int port)
static int b53_fast_age_vlan(struct b53_device *dev, u16 vid)
{
+ if (is5325(dev))
+ return 0;
+
b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid);
return b53_flush_arl(dev, FAST_AGE_VLAN);
@@ -505,6 +543,10 @@ void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
unsigned int i;
u16 pvlan;
+ /* BCM5325 CPU port is at 8 */
+ if ((is5325(dev) || is5365(dev)) && cpu_port == B53_CPU_PORT_25)
+ cpu_port = B53_CPU_PORT;
+
/* Enable the IMP port to be in the same VLAN as the other ports
* on a per-port basis such that we only have Port i and IMP in
* the same VLAN.
@@ -522,12 +564,24 @@ static void b53_port_set_ucast_flood(struct b53_device *dev, int port,
{
u16 uc;
- b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
- if (unicast)
- uc |= BIT(port);
- else
- uc &= ~BIT(port);
- b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
+ if (is5325(dev)) {
+ if (port == B53_CPU_PORT_25)
+ port = B53_CPU_PORT;
+
+ b53_read16(dev, B53_IEEE_PAGE, B53_IEEE_UCAST_DLF, &uc);
+ if (unicast)
+ uc |= BIT(port) | B53_IEEE_UCAST_DROP_EN;
+ else
+ uc &= ~BIT(port);
+ b53_write16(dev, B53_IEEE_PAGE, B53_IEEE_UCAST_DLF, uc);
+ } else {
+ b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
+ if (unicast)
+ uc |= BIT(port);
+ else
+ uc &= ~BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
+ }
}
static void b53_port_set_mcast_flood(struct b53_device *dev, int port,
@@ -535,19 +589,31 @@ static void b53_port_set_mcast_flood(struct b53_device *dev, int port,
{
u16 mc;
- b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
- if (multicast)
- mc |= BIT(port);
- else
- mc &= ~BIT(port);
- b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
+ if (is5325(dev)) {
+ if (port == B53_CPU_PORT_25)
+ port = B53_CPU_PORT;
- b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
- if (multicast)
- mc |= BIT(port);
- else
- mc &= ~BIT(port);
- b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
+ b53_read16(dev, B53_IEEE_PAGE, B53_IEEE_MCAST_DLF, &mc);
+ if (multicast)
+ mc |= BIT(port) | B53_IEEE_MCAST_DROP_EN;
+ else
+ mc &= ~BIT(port);
+ b53_write16(dev, B53_IEEE_PAGE, B53_IEEE_MCAST_DLF, mc);
+ } else {
+ b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
+ if (multicast)
+ mc |= BIT(port);
+ else
+ mc &= ~BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
+
+ b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
+ if (multicast)
+ mc |= BIT(port);
+ else
+ mc &= ~BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
+ }
}
static void b53_port_set_learning(struct b53_device *dev, int port,
@@ -555,6 +621,9 @@ static void b53_port_set_learning(struct b53_device *dev, int port,
{
u16 reg;
+ if (is5325(dev))
+ return;
+
b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, &reg);
if (learning)
reg &= ~BIT(port);
@@ -563,6 +632,25 @@ static void b53_port_set_learning(struct b53_device *dev, int port,
b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg);
}
+static void b53_port_set_isolated(struct b53_device *dev, int port,
+ bool isolated)
+{
+ u8 offset;
+ u16 reg;
+
+ if (is5325(dev))
+ offset = B53_PROTECTED_PORT_SEL_25;
+ else
+ offset = B53_PROTECTED_PORT_SEL;
+
+ b53_read16(dev, B53_CTRL_PAGE, offset, &reg);
+ if (isolated)
+ reg |= BIT(port);
+ else
+ reg &= ~BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, offset, reg);
+}
+
static void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
{
struct b53_device *dev = ds->priv;
@@ -576,6 +664,39 @@ static void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg);
}
+int b53_setup_port(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds->priv;
+
+ b53_port_set_ucast_flood(dev, port, true);
+ b53_port_set_mcast_flood(dev, port, true);
+ b53_port_set_learning(dev, port, false);
+ b53_port_set_isolated(dev, port, false);
+
+ /* Force all traffic to go to the CPU port to prevent the ASIC from
+ * trying to forward to bridged ports on matching FDB entries, then
+ * dropping frames because it isn't allowed to forward there.
+ */
+ if (dsa_is_user_port(ds, port))
+ b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED);
+
+ if (is5325(dev) &&
+ in_range(port, 1, 4)) {
+ u8 reg;
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_PD_MODE_CTRL_25, &reg);
+ reg &= ~PD_MODE_POWER_DOWN_PORT(0);
+ if (dsa_is_unused_port(ds, port))
+ reg |= PD_MODE_POWER_DOWN_PORT(port);
+ else
+ reg &= ~PD_MODE_POWER_DOWN_PORT(port);
+ b53_write8(dev, B53_CTRL_PAGE, B53_PD_MODE_CTRL_25, reg);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(b53_setup_port);
+
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
{
struct b53_device *dev = ds->priv;
@@ -588,9 +709,8 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
- b53_port_set_ucast_flood(dev, port, true);
- b53_port_set_mcast_flood(dev, port, true);
- b53_port_set_learning(dev, port, false);
+ if (dev->ops->phy_enable)
+ dev->ops->phy_enable(dev, port);
if (dev->ops->irq_enable)
ret = dev->ops->irq_enable(dev, port);
@@ -630,6 +750,9 @@ void b53_disable_port(struct dsa_switch *ds, int port)
reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE;
b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
+ if (dev->ops->phy_disable)
+ dev->ops->phy_disable(dev, port);
+
if (dev->ops->irq_disable)
dev->ops->irq_disable(dev, port);
}
@@ -674,6 +797,11 @@ void b53_brcm_hdr_setup(struct dsa_switch *ds, int port)
hdr_ctl |= GC_FRM_MGMT_PORT_M;
b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, hdr_ctl);
+ /* B53_BRCM_HDR not present on devices with legacy tags */
+ if (dev->tag_protocol == DSA_TAG_PROTO_BRCM_LEGACY ||
+ dev->tag_protocol == DSA_TAG_PROTO_BRCM_LEGACY_FCS)
+ return;
+
/* Enable Broadcom tags for IMP port */
b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl);
if (tag_en)
@@ -722,10 +850,6 @@ static void b53_enable_cpu_port(struct b53_device *dev, int port)
b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl);
b53_brcm_hdr_setup(dev->ds, port);
-
- b53_port_set_ucast_flood(dev, port, true);
- b53_port_set_mcast_flood(dev, port, true);
- b53_port_set_learning(dev, port, false);
}
static void b53_enable_mib(struct b53_device *dev)
@@ -737,12 +861,18 @@ static void b53_enable_mib(struct b53_device *dev)
b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
}
+static void b53_enable_stp(struct b53_device *dev)
+{
+ u8 gc;
+
+ b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
+ gc |= GC_RX_BPDU_EN;
+ b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
+}
+
static u16 b53_default_pvid(struct b53_device *dev)
{
- if (is5325(dev) || is5365(dev))
- return 1;
- else
- return 0;
+ return 0;
}
static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port)
@@ -752,6 +882,22 @@ static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port)
return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port);
}
+static bool b53_vlan_port_may_join_untagged(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds->priv;
+ struct dsa_port *dp;
+
+ if (!dev->vlan_filtering)
+ return true;
+
+ dp = dsa_to_port(ds, port);
+
+ if (dsa_port_is_cpu(dp))
+ return true;
+
+ return dp->bridge == NULL;
+}
+
int b53_configure_vlan(struct dsa_switch *ds)
{
struct b53_device *dev = ds->priv;
@@ -770,7 +916,7 @@ int b53_configure_vlan(struct dsa_switch *ds)
b53_do_vlan_op(dev, VTA_CMD_CLEAR);
}
- b53_enable_vlan(dev, -1, dev->vlan_enabled, ds->vlan_filtering);
+ b53_enable_vlan(dev, -1, dev->vlan_enabled, dev->vlan_filtering);
/* Create an untagged VLAN entry for the default PVID in case
* CONFIG_VLAN_8021Q is disabled and there are no calls to
@@ -778,26 +924,39 @@ int b53_configure_vlan(struct dsa_switch *ds)
* entry. Do this only when the tagging protocol is not
* DSA_TAG_PROTO_NONE
*/
+ v = &dev->vlans[def_vid];
b53_for_each_port(dev, i) {
- v = &dev->vlans[def_vid];
- v->members |= BIT(i);
+ if (!b53_vlan_port_may_join_untagged(ds, i))
+ continue;
+
+ vl.members |= BIT(i);
if (!b53_vlan_port_needs_forced_tagged(ds, i))
- v->untag = v->members;
- b53_write16(dev, B53_VLAN_PAGE,
- B53_VLAN_PORT_DEF_TAG(i), def_vid);
+ vl.untag = vl.members;
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(i),
+ def_vid);
}
+ b53_set_vlan_entry(dev, def_vid, &vl);
- /* Upon initial call we have not set-up any VLANs, but upon
- * system resume, we need to restore all VLAN entries.
- */
- for (vid = def_vid; vid < dev->num_vlans; vid++) {
- v = &dev->vlans[vid];
+ if (dev->vlan_filtering) {
+ /* Upon initial call we have not set-up any VLANs, but upon
+ * system resume, we need to restore all VLAN entries.
+ */
+ for (vid = def_vid + 1; vid < dev->num_vlans; vid++) {
+ v = &dev->vlans[vid];
- if (!v->members)
- continue;
+ if (!v->members)
+ continue;
- b53_set_vlan_entry(dev, vid, v);
- b53_fast_age_vlan(dev, vid);
+ b53_set_vlan_entry(dev, vid, v);
+ b53_fast_age_vlan(dev, vid);
+ }
+
+ b53_for_each_port(dev, i) {
+ if (!dsa_is_cpu_port(ds, i))
+ b53_write16(dev, B53_VLAN_PAGE,
+ B53_VLAN_PORT_DEF_TAG(i),
+ dev->ports[i].pvid);
+ }
}
return 0;
@@ -876,6 +1035,7 @@ static int b53_switch_reset(struct b53_device *dev)
}
b53_enable_mib(dev);
+ b53_enable_stp(dev);
return b53_flush_arl(dev, FAST_AGE_STATIC);
}
@@ -1115,7 +1275,9 @@ EXPORT_SYMBOL(b53_setup_devlink_resources);
static int b53_setup(struct dsa_switch *ds)
{
struct b53_device *dev = ds->priv;
+ struct b53_vlan *vl;
unsigned int port;
+ u16 pvid;
int ret;
/* Request bridge PVID untagged when DSA_TAG_PROTO_NONE is set
@@ -1123,12 +1285,36 @@ static int b53_setup(struct dsa_switch *ds)
*/
ds->untag_bridge_pvid = dev->tag_protocol == DSA_TAG_PROTO_NONE;
+ /* The switch does not tell us the original VLAN for untagged
+ * packets, so keep the CPU port always tagged.
+ */
+ ds->untag_vlan_aware_bridge_pvid = true;
+
+ if (dev->chip_id == BCM53101_DEVICE_ID) {
+ /* BCM53101 uses 0.5 second increments */
+ ds->ageing_time_min = 1 * 500;
+ ds->ageing_time_max = AGE_TIME_MAX * 500;
+ } else {
+ /* Everything else uses 1 second increments */
+ ds->ageing_time_min = 1 * 1000;
+ ds->ageing_time_max = AGE_TIME_MAX * 1000;
+ }
+
ret = b53_reset_switch(dev);
if (ret) {
dev_err(ds->dev, "failed to reset switch\n");
return ret;
}
+ /* setup default vlan for filtering mode */
+ pvid = b53_default_pvid(dev);
+ vl = &dev->vlans[pvid];
+ b53_for_each_port(dev, port) {
+ vl->members |= BIT(port);
+ if (!b53_vlan_port_needs_forced_tagged(ds, port))
+ vl->untag |= BIT(port);
+ }
+
b53_reset_mib(dev);
ret = b53_apply_config(dev);
@@ -1163,6 +1349,8 @@ static void b53_force_link(struct b53_device *dev, int port, int link)
if (port == dev->imp_port) {
off = B53_PORT_OVERRIDE_CTRL;
val = PORT_OVERRIDE_EN;
+ } else if (is5325(dev)) {
+ return;
} else {
off = B53_GMII_PORT_OVERRIDE_CTRL(port);
val = GMII_PO_EN;
@@ -1187,6 +1375,8 @@ static void b53_force_port_config(struct b53_device *dev, int port,
if (port == dev->imp_port) {
off = B53_PORT_OVERRIDE_CTRL;
val = PORT_OVERRIDE_EN;
+ } else if (is5325(dev)) {
+ return;
} else {
off = B53_GMII_PORT_OVERRIDE_CTRL(port);
val = GMII_PO_EN;
@@ -1199,6 +1389,10 @@ static void b53_force_port_config(struct b53_device *dev, int port,
else
reg &= ~PORT_OVERRIDE_FULL_DUPLEX;
+ reg &= ~(0x3 << GMII_PO_SPEED_S);
+ if (is5301x(dev) || is58xx(dev))
+ reg &= ~PORT_OVERRIDE_SPEED_2000M;
+
switch (speed) {
case 2000:
reg |= PORT_OVERRIDE_SPEED_2000M;
@@ -1217,10 +1411,24 @@ static void b53_force_port_config(struct b53_device *dev, int port,
return;
}
- if (rx_pause)
- reg |= PORT_OVERRIDE_RX_FLOW;
- if (tx_pause)
- reg |= PORT_OVERRIDE_TX_FLOW;
+ if (is5325(dev))
+ reg &= ~PORT_OVERRIDE_LP_FLOW_25;
+ else
+ reg &= ~(PORT_OVERRIDE_RX_FLOW | PORT_OVERRIDE_TX_FLOW);
+
+ if (rx_pause) {
+ if (is5325(dev))
+ reg |= PORT_OVERRIDE_LP_FLOW_25;
+ else
+ reg |= PORT_OVERRIDE_RX_FLOW;
+ }
+
+ if (tx_pause) {
+ if (is5325(dev))
+ reg |= PORT_OVERRIDE_LP_FLOW_25;
+ else
+ reg |= PORT_OVERRIDE_TX_FLOW;
+ }
b53_write8(dev, B53_CTRL_PAGE, off, reg);
}
@@ -1229,41 +1437,17 @@ static void b53_adjust_63xx_rgmii(struct dsa_switch *ds, int port,
phy_interface_t interface)
{
struct b53_device *dev = ds->priv;
- u8 rgmii_ctrl = 0, off;
+ u8 rgmii_ctrl = 0;
- if (port == dev->imp_port)
- off = B53_RGMII_CTRL_IMP;
- else
- off = B53_RGMII_CTRL_P(port);
-
- b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
-
- switch (interface) {
- case PHY_INTERFACE_MODE_RGMII_ID:
- rgmii_ctrl |= (RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
- break;
- case PHY_INTERFACE_MODE_RGMII_RXID:
- rgmii_ctrl &= ~(RGMII_CTRL_DLL_TXC);
- rgmii_ctrl |= RGMII_CTRL_DLL_RXC;
- break;
- case PHY_INTERFACE_MODE_RGMII_TXID:
- rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC);
- rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
- break;
- case PHY_INTERFACE_MODE_RGMII:
- default:
- rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
- break;
- }
+ b53_read8(dev, B53_CTRL_PAGE, B53_RGMII_CTRL_P(port), &rgmii_ctrl);
+ rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
- if (port != dev->imp_port) {
- if (is63268(dev))
- rgmii_ctrl |= RGMII_CTRL_MII_OVERRIDE;
+ if (is6318_268(dev))
+ rgmii_ctrl |= RGMII_CTRL_MII_OVERRIDE;
- rgmii_ctrl |= RGMII_CTRL_ENABLE_GMII;
- }
+ rgmii_ctrl |= RGMII_CTRL_ENABLE_GMII;
- b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
+ b53_write8(dev, B53_CTRL_PAGE, B53_RGMII_CTRL_P(port), rgmii_ctrl);
dev_dbg(ds->dev, "Configured port %d for %s\n", port,
phy_modes(interface));
@@ -1284,8 +1468,7 @@ static void b53_adjust_531x5_rgmii(struct dsa_switch *ds, int port,
* tx_clk aligned timing (restoring to reset defaults)
*/
b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
- rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC |
- RGMII_CTRL_TIMING_SEL);
+ rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
/* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make
* sure that we enable the port TX clock internal delay to
@@ -1305,7 +1488,10 @@ static void b53_adjust_531x5_rgmii(struct dsa_switch *ds, int port,
rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
if (interface == PHY_INTERFACE_MODE_RGMII)
rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC;
- rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
+
+ if (dev->chip_id != BCM53115_DEVICE_ID)
+ rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
+
b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
dev_info(ds->dev, "Configured port %d for %s\n", port,
@@ -1369,6 +1555,10 @@ static void b53_phylink_get_caps(struct dsa_switch *ds, int port,
__set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_REVMII, config->supported_interfaces);
+ /* BCM63xx RGMII ports support RGMII */
+ if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4))
+ phy_interface_set_rgmii(config->supported_interfaces);
+
config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100;
@@ -1408,7 +1598,7 @@ static void b53_phylink_mac_config(struct phylink_config *config,
struct b53_device *dev = ds->priv;
int port = dp->index;
- if (is63xx(dev) && port >= B53_63XX_RGMII0)
+ if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4))
b53_adjust_63xx_rgmii(ds, port, interface);
if (mode == MLO_AN_FIXED) {
@@ -1429,8 +1619,11 @@ static void b53_phylink_mac_link_down(struct phylink_config *config,
struct b53_device *dev = dp->ds->priv;
int port = dp->index;
- if (mode == MLO_AN_PHY)
+ if (mode == MLO_AN_PHY) {
+ if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4))
+ b53_force_link(dev, port, false);
return;
+ }
if (mode == MLO_AN_FIXED) {
b53_force_link(dev, port, false);
@@ -1458,6 +1651,13 @@ static void b53_phylink_mac_link_up(struct phylink_config *config,
if (mode == MLO_AN_PHY) {
/* Re-negotiate EEE if it was enabled already */
p->eee_enabled = b53_eee_init(ds, port, phydev);
+
+ if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4)) {
+ b53_force_port_config(dev, port, speed, duplex,
+ tx_pause, rx_pause);
+ b53_force_link(dev, port, true);
+ }
+
return;
}
@@ -1482,7 +1682,10 @@ int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
{
struct b53_device *dev = ds->priv;
- b53_enable_vlan(dev, port, dev->vlan_enabled, vlan_filtering);
+ if (dev->vlan_filtering != vlan_filtering) {
+ dev->vlan_filtering = vlan_filtering;
+ b53_apply_config(dev);
+ }
return 0;
}
@@ -1493,9 +1696,6 @@ static int b53_vlan_prepare(struct dsa_switch *ds, int port,
{
struct b53_device *dev = ds->priv;
- if ((is5325(dev) || is5365(dev)) && vlan->vid == 0)
- return -EOPNOTSUPP;
-
/* Port 7 on 7278 connects to the ASP's UniMAC which is not capable of
* receiving VLAN tagged frames at all, we can still allow the port to
* be configured for egress untagged.
@@ -1507,7 +1707,7 @@ static int b53_vlan_prepare(struct dsa_switch *ds, int port,
if (vlan->vid >= dev->num_vlans)
return -ERANGE;
- b53_enable_vlan(dev, port, true, ds->vlan_filtering);
+ b53_enable_vlan(dev, port, true, dev->vlan_filtering);
return 0;
}
@@ -1520,18 +1720,29 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct b53_vlan *vl;
+ u16 old_pvid, new_pvid;
int err;
err = b53_vlan_prepare(ds, port, vlan);
if (err)
return err;
- vl = &dev->vlans[vlan->vid];
+ if (vlan->vid == 0)
+ return 0;
- b53_get_vlan_entry(dev, vlan->vid, vl);
+ old_pvid = dev->ports[port].pvid;
+ if (pvid)
+ new_pvid = vlan->vid;
+ else if (!pvid && vlan->vid == old_pvid)
+ new_pvid = b53_default_pvid(dev);
+ else
+ new_pvid = old_pvid;
+ dev->ports[port].pvid = new_pvid;
- if (vlan->vid == 0 && vlan->vid == b53_default_pvid(dev))
- untagged = true;
+ vl = &dev->vlans[vlan->vid];
+
+ if (dsa_is_cpu_port(ds, port))
+ untagged = false;
vl->members |= BIT(port);
if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port))
@@ -1539,13 +1750,16 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
else
vl->untag &= ~BIT(port);
+ if (!dev->vlan_filtering)
+ return 0;
+
b53_set_vlan_entry(dev, vlan->vid, vl);
b53_fast_age_vlan(dev, vlan->vid);
- if (pvid && !dsa_is_cpu_port(ds, port)) {
+ if (!dsa_is_cpu_port(ds, port) && new_pvid != old_pvid) {
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
- vlan->vid);
- b53_fast_age_vlan(dev, vlan->vid);
+ new_pvid);
+ b53_fast_age_vlan(dev, old_pvid);
}
return 0;
@@ -1560,20 +1774,25 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
struct b53_vlan *vl;
u16 pvid;
- b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
+ if (vlan->vid == 0)
+ return 0;
- vl = &dev->vlans[vlan->vid];
+ pvid = dev->ports[port].pvid;
- b53_get_vlan_entry(dev, vlan->vid, vl);
+ vl = &dev->vlans[vlan->vid];
vl->members &= ~BIT(port);
if (pvid == vlan->vid)
pvid = b53_default_pvid(dev);
+ dev->ports[port].pvid = pvid;
if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port))
vl->untag &= ~(BIT(port));
+ if (!dev->vlan_filtering)
+ return 0;
+
b53_set_vlan_entry(dev, vlan->vid, vl);
b53_fast_age_vlan(dev, vlan->vid);
@@ -1625,7 +1844,82 @@ static int b53_arl_rw_op(struct b53_device *dev, unsigned int op)
return b53_arl_op_wait(dev);
}
-static int b53_arl_read(struct b53_device *dev, u64 mac,
+static void b53_arl_read_entry_25(struct b53_device *dev,
+ struct b53_arl_entry *ent, u8 idx)
+{
+ u8 vid_entry;
+ u64 mac_vid;
+
+ b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_VID_ENTRY_25(idx),
+ &vid_entry);
+ b53_read64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx),
+ &mac_vid);
+ b53_arl_to_entry_25(ent, mac_vid, vid_entry);
+}
+
+static void b53_arl_write_entry_25(struct b53_device *dev,
+ const struct b53_arl_entry *ent, u8 idx)
+{
+ u8 vid_entry;
+ u64 mac_vid;
+
+ b53_arl_from_entry_25(&mac_vid, &vid_entry, ent);
+ b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_VID_ENTRY_25(idx), vid_entry);
+ b53_write64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx),
+ mac_vid);
+}
+
+static void b53_arl_read_entry_89(struct b53_device *dev,
+ struct b53_arl_entry *ent, u8 idx)
+{
+ u64 mac_vid;
+ u16 fwd_entry;
+
+ b53_read64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx),
+ &mac_vid);
+ b53_read16(dev, B53_ARLIO_PAGE, B53_ARLTBL_DATA_ENTRY(idx), &fwd_entry);
+ b53_arl_to_entry_89(ent, mac_vid, fwd_entry);
+}
+
+static void b53_arl_write_entry_89(struct b53_device *dev,
+ const struct b53_arl_entry *ent, u8 idx)
+{
+ u32 fwd_entry;
+ u64 mac_vid;
+
+ b53_arl_from_entry_89(&mac_vid, &fwd_entry, ent);
+ b53_write64(dev, B53_ARLIO_PAGE,
+ B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid);
+ b53_write16(dev, B53_ARLIO_PAGE,
+ B53_ARLTBL_DATA_ENTRY(idx), fwd_entry);
+}
+
+static void b53_arl_read_entry_95(struct b53_device *dev,
+ struct b53_arl_entry *ent, u8 idx)
+{
+ u32 fwd_entry;
+ u64 mac_vid;
+
+ b53_read64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx),
+ &mac_vid);
+ b53_read32(dev, B53_ARLIO_PAGE, B53_ARLTBL_DATA_ENTRY(idx), &fwd_entry);
+ b53_arl_to_entry(ent, mac_vid, fwd_entry);
+}
+
+static void b53_arl_write_entry_95(struct b53_device *dev,
+ const struct b53_arl_entry *ent, u8 idx)
+{
+ u32 fwd_entry;
+ u64 mac_vid;
+
+ b53_arl_from_entry(&mac_vid, &fwd_entry, ent);
+ b53_write64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx),
+ mac_vid);
+ b53_write32(dev, B53_ARLIO_PAGE, B53_ARLTBL_DATA_ENTRY(idx),
+ fwd_entry);
+}
+
+static int b53_arl_read(struct b53_device *dev, const u8 *mac,
u16 vid, struct b53_arl_entry *ent, u8 *idx)
{
DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES);
@@ -1640,23 +1934,15 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
/* Read the bins */
for (i = 0; i < dev->num_arl_bins; i++) {
- u64 mac_vid;
- u32 fwd_entry;
-
- b53_read64(dev, B53_ARLIO_PAGE,
- B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid);
- b53_read32(dev, B53_ARLIO_PAGE,
- B53_ARLTBL_DATA_ENTRY(i), &fwd_entry);
- b53_arl_to_entry(ent, mac_vid, fwd_entry);
+ b53_arl_read_entry(dev, ent, i);
- if (!(fwd_entry & ARLTBL_VALID)) {
+ if (!ent->is_valid) {
set_bit(i, free_bins);
continue;
}
- if ((mac_vid & ARLTBL_MAC_MASK) != mac)
+ if (!ether_addr_equal(ent->mac, mac))
continue;
- if (dev->vlan_enabled &&
- ((mac_vid >> ARLTBL_VID_S) & ARLTBL_VID_MASK) != vid)
+ if (dev->vlan_enabled && ent->vid != vid)
continue;
*idx = i;
return 0;
@@ -1670,9 +1956,8 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
const unsigned char *addr, u16 vid, bool is_valid)
{
struct b53_arl_entry ent;
- u32 fwd_entry;
- u64 mac, mac_vid = 0;
u8 idx = 0;
+ u64 mac;
int ret;
/* Convert the array into a 64-bit MAC */
@@ -1680,14 +1965,19 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
/* Perform a read for the given MAC and VID */
b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac);
- b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid);
+ if (!is5325m(dev)) {
+ if (is5325(dev) || is5365(dev))
+ b53_write8(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid);
+ else
+ b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid);
+ }
/* Issue a read operation for this MAC */
ret = b53_arl_rw_op(dev, 1);
if (ret)
return ret;
- ret = b53_arl_read(dev, mac, vid, &ent, &idx);
+ ret = b53_arl_read(dev, addr, vid, &ent, &idx);
/* If this is a read, just finish now */
if (op)
@@ -1704,7 +1994,6 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
/* We could not find a matching MAC, so reset to a new entry */
dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n",
addr, vid, idx);
- fwd_entry = 0;
break;
default:
dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n",
@@ -1731,12 +2020,7 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
ent.is_static = true;
ent.is_age = false;
memcpy(ent.mac, addr, ETH_ALEN);
- b53_arl_from_entry(&mac_vid, &fwd_entry, &ent);
-
- b53_write64(dev, B53_ARLIO_PAGE,
- B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid);
- b53_write32(dev, B53_ARLIO_PAGE,
- B53_ARLTBL_DATA_ENTRY(idx), fwd_entry);
+ b53_arl_write_entry(dev, &ent, idx);
return b53_arl_rw_op(dev, 0);
}
@@ -1748,12 +2032,6 @@ int b53_fdb_add(struct dsa_switch *ds, int port,
struct b53_device *priv = ds->priv;
int ret;
- /* 5325 and 5365 require some more massaging, but could
- * be supported eventually
- */
- if (is5325(priv) || is5365(priv))
- return -EOPNOTSUPP;
-
mutex_lock(&priv->arl_mutex);
ret = b53_arl_op(priv, 0, port, addr, vid, true);
mutex_unlock(&priv->arl_mutex);
@@ -1777,15 +2055,55 @@ int b53_fdb_del(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL(b53_fdb_del);
+static void b53_read_arl_srch_ctl(struct b53_device *dev, u8 *val)
+{
+ u8 offset;
+
+ if (is5325(dev) || is5365(dev))
+ offset = B53_ARL_SRCH_CTL_25;
+ else if (dev->chip_id == BCM5389_DEVICE_ID || is5397_98(dev) ||
+ is63xx(dev))
+ offset = B53_ARL_SRCH_CTL_89;
+ else
+ offset = B53_ARL_SRCH_CTL;
+
+ if (is63xx(dev)) {
+ u16 val16;
+
+ b53_read16(dev, B53_ARLIO_PAGE, offset, &val16);
+ *val = val16 & 0xff;
+ } else {
+ b53_read8(dev, B53_ARLIO_PAGE, offset, val);
+ }
+}
+
+static void b53_write_arl_srch_ctl(struct b53_device *dev, u8 val)
+{
+ u8 offset;
+
+ if (is5325(dev) || is5365(dev))
+ offset = B53_ARL_SRCH_CTL_25;
+ else if (dev->chip_id == BCM5389_DEVICE_ID || is5397_98(dev) ||
+ is63xx(dev))
+ offset = B53_ARL_SRCH_CTL_89;
+ else
+ offset = B53_ARL_SRCH_CTL;
+
+ if (is63xx(dev))
+ b53_write16(dev, B53_ARLIO_PAGE, offset, val);
+ else
+ b53_write8(dev, B53_ARLIO_PAGE, offset, val);
+}
+
static int b53_arl_search_wait(struct b53_device *dev)
{
unsigned int timeout = 1000;
u8 reg;
do {
- b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, &reg);
+ b53_read_arl_srch_ctl(dev, &reg);
if (!(reg & ARL_SRCH_STDN))
- return 0;
+ return -ENOENT;
if (reg & ARL_SRCH_VLID)
return 0;
@@ -1796,16 +2114,52 @@ static int b53_arl_search_wait(struct b53_device *dev)
return -ETIMEDOUT;
}
-static void b53_arl_search_rd(struct b53_device *dev, u8 idx,
- struct b53_arl_entry *ent)
+static void b53_arl_search_read_25(struct b53_device *dev, u8 idx,
+ struct b53_arl_entry *ent)
+{
+ u64 mac_vid;
+ u8 ext;
+
+ b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_EXT_25, &ext);
+ b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_0_MACVID_25,
+ &mac_vid);
+ b53_arl_search_to_entry_25(ent, mac_vid, ext);
+}
+
+static void b53_arl_search_read_89(struct b53_device *dev, u8 idx,
+ struct b53_arl_entry *ent)
{
+ u16 fwd_entry;
u64 mac_vid;
+
+ b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_MACVID_89,
+ &mac_vid);
+ b53_read16(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_89, &fwd_entry);
+ b53_arl_to_entry_89(ent, mac_vid, fwd_entry);
+}
+
+static void b53_arl_search_read_63xx(struct b53_device *dev, u8 idx,
+ struct b53_arl_entry *ent)
+{
+ u16 fwd_entry;
+ u64 mac_vid;
+
+ b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_MACVID_63XX,
+ &mac_vid);
+ b53_read16(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_63XX, &fwd_entry);
+ b53_arl_search_to_entry_63xx(ent, mac_vid, fwd_entry);
+}
+
+static void b53_arl_search_read_95(struct b53_device *dev, u8 idx,
+ struct b53_arl_entry *ent)
+{
u32 fwd_entry;
+ u64 mac_vid;
- b53_read64(dev, B53_ARLIO_PAGE,
- B53_ARL_SRCH_RSTL_MACVID(idx), &mac_vid);
- b53_read32(dev, B53_ARLIO_PAGE,
- B53_ARL_SRCH_RSTL(idx), &fwd_entry);
+ b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_MACVID(idx),
+ &mac_vid);
+ b53_read32(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL(idx),
+ &fwd_entry);
b53_arl_to_entry(ent, mac_vid, fwd_entry);
}
@@ -1824,30 +2178,31 @@ static int b53_fdb_copy(int port, const struct b53_arl_entry *ent,
int b53_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
+ unsigned int count = 0, results_per_hit = 1;
struct b53_device *priv = ds->priv;
struct b53_arl_entry results[2];
- unsigned int count = 0;
int ret;
- u8 reg;
+
+ if (priv->num_arl_bins > 2)
+ results_per_hit = 2;
mutex_lock(&priv->arl_mutex);
/* Start search operation */
- reg = ARL_SRCH_STDN;
- b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg);
+ b53_write_arl_srch_ctl(priv, ARL_SRCH_STDN);
do {
ret = b53_arl_search_wait(priv);
if (ret)
break;
- b53_arl_search_rd(priv, 0, &results[0]);
+ b53_arl_search_read(priv, 0, &results[0]);
ret = b53_fdb_copy(port, &results[0], cb, data);
if (ret)
break;
- if (priv->num_arl_bins > 2) {
- b53_arl_search_rd(priv, 1, &results[1]);
+ if (results_per_hit == 2) {
+ b53_arl_search_read(priv, 1, &results[1]);
ret = b53_fdb_copy(port, &results[1], cb, data);
if (ret)
break;
@@ -1856,7 +2211,7 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
break;
}
- } while (count++ < b53_max_arl_entries(priv) / 2);
+ } while (count++ < b53_max_arl_entries(priv) / results_per_hit);
mutex_unlock(&priv->arl_mutex);
@@ -1906,8 +2261,9 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
bool *tx_fwd_offload, struct netlink_ext_ack *extack)
{
struct b53_device *dev = ds->priv;
+ struct b53_vlan *vl;
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
- u16 pvlan, reg;
+ u16 pvlan, reg, pvid;
unsigned int i;
/* On 7278, port 7 which connects to the ASP should only receive
@@ -1916,15 +2272,26 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
if (dev->chip_id == BCM7278_DEVICE_ID && port == 7)
return -EINVAL;
- /* Make this port leave the all VLANs join since we will have proper
- * VLAN entries from now on
- */
- if (is58xx(dev)) {
- b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
- reg &= ~BIT(port);
- if ((reg & BIT(cpu_port)) == BIT(cpu_port))
- reg &= ~BIT(cpu_port);
- b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
+ pvid = b53_default_pvid(dev);
+ vl = &dev->vlans[pvid];
+
+ if (dev->vlan_filtering) {
+ /* Make this port leave the all VLANs join since we will have
+ * proper VLAN entries from now on
+ */
+ if (is58xx(dev)) {
+ b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN,
+ &reg);
+ reg &= ~BIT(port);
+ if ((reg & BIT(cpu_port)) == BIT(cpu_port))
+ reg &= ~BIT(cpu_port);
+ b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN,
+ reg);
+ }
+
+ b53_get_vlan_entry(dev, pvid, vl);
+ vl->members &= ~BIT(port);
+ b53_set_vlan_entry(dev, pvid, vl);
}
b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
@@ -1944,6 +2311,9 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
pvlan |= BIT(i);
}
+ /* Disable redirection of unknown SA to the CPU port */
+ b53_set_eap_mode(dev, port, EAP_MODE_BASIC);
+
/* Configure the local port VLAN control membership to include
* remote ports and update the local port bitmask
*/
@@ -1957,7 +2327,7 @@ EXPORT_SYMBOL(b53_br_join);
void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
{
struct b53_device *dev = ds->priv;
- struct b53_vlan *vl = &dev->vlans[0];
+ struct b53_vlan *vl;
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
unsigned int i;
u16 pvlan, reg, pvid;
@@ -1979,22 +2349,27 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
pvlan &= ~BIT(i);
}
+ /* Enable redirection of unknown SA to the CPU port */
+ b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED);
+
b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
dev->ports[port].vlan_ctl_mask = pvlan;
pvid = b53_default_pvid(dev);
+ vl = &dev->vlans[pvid];
+
+ if (dev->vlan_filtering) {
+ /* Make this port join all VLANs without VLAN entries */
+ if (is58xx(dev)) {
+ b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
+ reg |= BIT(port);
+ if (!(reg & BIT(cpu_port)))
+ reg |= BIT(cpu_port);
+ b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
+ }
- /* Make this port join all VLANs without VLAN entries */
- if (is58xx(dev)) {
- b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
- reg |= BIT(port);
- if (!(reg & BIT(cpu_port)))
- reg |= BIT(cpu_port);
- b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
- } else {
b53_get_vlan_entry(dev, pvid, vl);
- vl->members |= BIT(port) | BIT(cpu_port);
- vl->untag |= BIT(port) | BIT(cpu_port);
+ vl->members |= BIT(port);
b53_set_vlan_entry(dev, pvid, vl);
}
}
@@ -2047,7 +2422,13 @@ int b53_br_flags_pre(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
- if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_LEARNING))
+ struct b53_device *dev = ds->priv;
+ unsigned long mask = (BR_FLOOD | BR_MCAST_FLOOD | BR_ISOLATED);
+
+ if (!is5325(dev))
+ mask |= BR_LEARNING;
+
+ if (flags.mask & ~mask)
return -EINVAL;
return 0;
@@ -2067,6 +2448,9 @@ int b53_br_flags(struct dsa_switch *ds, int port,
if (flags.mask & BR_LEARNING)
b53_port_set_learning(ds->priv, port,
!!(flags.val & BR_LEARNING));
+ if (flags.mask & BR_ISOLATED)
+ b53_port_set_isolated(ds->priv, port,
+ !!(flags.val & BR_ISOLATED));
return 0;
}
@@ -2123,8 +2507,11 @@ enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
goto out;
}
- /* Older models require a different 6 byte tag */
- if (is5325(dev) || is5365(dev) || is63xx(dev)) {
+ /* Older models require different 6 byte tags */
+ if (is5325(dev) || is5365(dev)) {
+ dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY_FCS;
+ goto out;
+ } else if (is63xx(dev)) {
dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY;
goto out;
}
@@ -2214,6 +2601,9 @@ int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
{
int ret;
+ if (!b53_support_eee(ds, port))
+ return 0;
+
ret = phy_init_eee(phy, false);
if (ret)
return 0;
@@ -2224,25 +2614,19 @@ int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
}
EXPORT_SYMBOL(b53_eee_init);
-int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
+bool b53_support_eee(struct dsa_switch *ds, int port)
{
struct b53_device *dev = ds->priv;
- if (is5325(dev) || is5365(dev))
- return -EOPNOTSUPP;
-
- return 0;
+ return !is5325(dev) && !is5365(dev) && !is63xx(dev);
}
-EXPORT_SYMBOL(b53_get_mac_eee);
+EXPORT_SYMBOL(b53_support_eee);
int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
{
struct b53_device *dev = ds->priv;
struct ethtool_keee *p = &dev->ports[port].eee;
- if (is5325(dev) || is5365(dev))
- return -EOPNOTSUPP;
-
p->eee_enabled = e->eee_enabled;
b53_eee_enable_set(ds, port, e->eee_enabled);
@@ -2278,6 +2662,31 @@ static int b53_get_max_mtu(struct dsa_switch *ds, int port)
return B53_MAX_MTU;
}
+int b53_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+ struct b53_device *dev = ds->priv;
+ u32 atc;
+ int reg;
+
+ if (is63xx(dev))
+ reg = B53_AGING_TIME_CONTROL_63XX;
+ else
+ reg = B53_AGING_TIME_CONTROL;
+
+ if (dev->chip_id == BCM53101_DEVICE_ID)
+ atc = DIV_ROUND_CLOSEST(msecs, 500);
+ else
+ atc = DIV_ROUND_CLOSEST(msecs, 1000);
+
+ if (!is5325(dev) && !is5365(dev))
+ atc |= AGE_CHANGE;
+
+ b53_write32(dev, B53_MGMT_PAGE, reg, atc);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(b53_set_ageing_time);
+
static const struct phylink_mac_ops b53_phylink_mac_ops = {
.mac_select_pcs = b53_phylink_mac_select_pcs,
.mac_config = b53_phylink_mac_config,
@@ -2296,10 +2705,12 @@ static const struct dsa_switch_ops b53_switch_ops = {
.phy_read = b53_phy_read16,
.phy_write = b53_phy_write16,
.phylink_get_caps = b53_phylink_get_caps,
+ .port_setup = b53_setup_port,
.port_enable = b53_enable_port,
.port_disable = b53_disable_port,
- .get_mac_eee = b53_get_mac_eee,
+ .support_eee = b53_support_eee,
.set_mac_eee = b53_set_mac_eee,
+ .set_ageing_time = b53_set_ageing_time,
.port_bridge_join = b53_br_join,
.port_bridge_leave = b53_br_leave,
.port_pre_bridge_flags = b53_br_flags_pre,
@@ -2320,6 +2731,30 @@ static const struct dsa_switch_ops b53_switch_ops = {
.port_change_mtu = b53_change_mtu,
};
+static const struct b53_arl_ops b53_arl_ops_25 = {
+ .arl_read_entry = b53_arl_read_entry_25,
+ .arl_write_entry = b53_arl_write_entry_25,
+ .arl_search_read = b53_arl_search_read_25,
+};
+
+static const struct b53_arl_ops b53_arl_ops_89 = {
+ .arl_read_entry = b53_arl_read_entry_89,
+ .arl_write_entry = b53_arl_write_entry_89,
+ .arl_search_read = b53_arl_search_read_89,
+};
+
+static const struct b53_arl_ops b53_arl_ops_63xx = {
+ .arl_read_entry = b53_arl_read_entry_89,
+ .arl_write_entry = b53_arl_write_entry_89,
+ .arl_search_read = b53_arl_search_read_63xx,
+};
+
+static const struct b53_arl_ops b53_arl_ops_95 = {
+ .arl_read_entry = b53_arl_read_entry_95,
+ .arl_write_entry = b53_arl_write_entry_95,
+ .arl_search_read = b53_arl_search_read_95,
+};
+
struct b53_chip_data {
u32 chip_id;
const char *dev_name;
@@ -2333,6 +2768,7 @@ struct b53_chip_data {
u8 duplex_reg;
u8 jumbo_pm_reg;
u8 jumbo_size_reg;
+ const struct b53_arl_ops *arl_ops;
};
#define B53_VTA_REGS \
@@ -2352,6 +2788,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_buckets = 1024,
.imp_port = 5,
.duplex_reg = B53_DUPLEX_STAT_FE,
+ .arl_ops = &b53_arl_ops_25,
},
{
.chip_id = BCM5365_DEVICE_ID,
@@ -2362,6 +2799,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_buckets = 1024,
.imp_port = 5,
.duplex_reg = B53_DUPLEX_STAT_FE,
+ .arl_ops = &b53_arl_ops_25,
},
{
.chip_id = BCM5389_DEVICE_ID,
@@ -2375,6 +2813,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_89,
},
{
.chip_id = BCM5395_DEVICE_ID,
@@ -2388,6 +2827,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM5397_DEVICE_ID,
@@ -2401,6 +2841,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_89,
},
{
.chip_id = BCM5398_DEVICE_ID,
@@ -2414,6 +2855,21 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_89,
+ },
+ {
+ .chip_id = BCM53101_DEVICE_ID,
+ .dev_name = "BCM53101",
+ .vlans = 4096,
+ .enabled_ports = 0x11f,
+ .arl_bins = 4,
+ .arl_buckets = 512,
+ .vta_regs = B53_VTA_REGS,
+ .imp_port = 8,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM53115_DEVICE_ID,
@@ -2427,6 +2883,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM53125_DEVICE_ID,
@@ -2440,6 +2897,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM53128_DEVICE_ID,
@@ -2453,32 +2911,21 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM63XX_DEVICE_ID,
.dev_name = "BCM63xx",
.vlans = 4096,
.enabled_ports = 0, /* pdata must provide them */
- .arl_bins = 4,
- .arl_buckets = 1024,
- .imp_port = 8,
- .vta_regs = B53_VTA_REGS_63XX,
- .duplex_reg = B53_DUPLEX_STAT_63XX,
- .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX,
- .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX,
- },
- {
- .chip_id = BCM63268_DEVICE_ID,
- .dev_name = "BCM63268",
- .vlans = 4096,
- .enabled_ports = 0, /* pdata must provide them */
- .arl_bins = 4,
- .arl_buckets = 1024,
+ .arl_bins = 1,
+ .arl_buckets = 4096,
.imp_port = 8,
.vta_regs = B53_VTA_REGS_63XX,
.duplex_reg = B53_DUPLEX_STAT_63XX,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX,
+ .arl_ops = &b53_arl_ops_63xx,
},
{
.chip_id = BCM53010_DEVICE_ID,
@@ -2492,6 +2939,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM53011_DEVICE_ID,
@@ -2505,6 +2953,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM53012_DEVICE_ID,
@@ -2518,6 +2967,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM53018_DEVICE_ID,
@@ -2531,6 +2981,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM53019_DEVICE_ID,
@@ -2544,6 +2995,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM58XX_DEVICE_ID,
@@ -2557,6 +3009,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM583XX_DEVICE_ID,
@@ -2570,6 +3023,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
/* Starfighter 2 */
{
@@ -2584,6 +3038,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM7445_DEVICE_ID,
@@ -2597,6 +3052,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM7278_DEVICE_ID,
@@ -2610,6 +3066,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM53134_DEVICE_ID,
@@ -2624,18 +3081,23 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
};
static int b53_switch_init(struct b53_device *dev)
{
+ u32 chip_id = dev->chip_id;
unsigned int i;
int ret;
+ if (is63xx(dev))
+ chip_id = BCM63XX_DEVICE_ID;
+
for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) {
const struct b53_chip_data *chip = &b53_switch_chips[i];
- if (chip->chip_id == dev->chip_id) {
+ if (chip->chip_id == chip_id) {
if (!dev->enabled_ports)
dev->enabled_ports = chip->enabled_ports;
dev->name = chip->dev_name;
@@ -2648,6 +3110,7 @@ static int b53_switch_init(struct b53_device *dev)
dev->num_vlans = chip->vlans;
dev->num_arl_bins = chip->arl_bins;
dev->num_arl_buckets = chip->arl_buckets;
+ dev->arl_ops = chip->arl_ops;
break;
}
}
@@ -2678,6 +3141,9 @@ static int b53_switch_init(struct b53_device *dev)
}
}
+ if (is5325e(dev))
+ dev->num_arl_buckets = 512;
+
dev->num_ports = fls(dev->enabled_ports);
dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS);
@@ -2740,6 +3206,7 @@ struct b53_device *b53_switch_alloc(struct device *base,
ds->ops = &b53_switch_ops;
ds->phylink_mac_ops = &b53_phylink_mac_ops;
dev->vlan_enabled = true;
+ dev->vlan_filtering = false;
/* Let DSA handle the case were multiple bridges span the same switch
* device and different VLAN awareness settings are requested, which
* would be breaking filtering semantics for any of the other bridge
@@ -2778,10 +3245,24 @@ int b53_switch_detect(struct b53_device *dev)
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf);
b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp);
- if (tmp == 0xf)
+ if (tmp == 0xf) {
+ u32 phy_id;
+ int val;
+
dev->chip_id = BCM5325_DEVICE_ID;
- else
+
+ val = b53_phy_read16(dev->ds, 0, MII_PHYSID1);
+ phy_id = (val & 0xffff) << 16;
+ val = b53_phy_read16(dev->ds, 0, MII_PHYSID2);
+ phy_id |= (val & 0xfff0);
+
+ if (phy_id == 0x00406330)
+ dev->variant_id = B53_VARIANT_5325M;
+ else if (phy_id == 0x0143bc30)
+ dev->variant_id = B53_VARIANT_5325E;
+ } else {
dev->chip_id = BCM5365_DEVICE_ID;
+ }
break;
case BCM5389_DEVICE_ID:
case BCM5395_DEVICE_ID:
@@ -2795,6 +3276,7 @@ int b53_switch_detect(struct b53_device *dev)
return ret;
switch (id32) {
+ case BCM53101_DEVICE_ID:
case BCM53115_DEVICE_ID:
case BCM53125_DEVICE_ID:
case BCM53128_DEVICE_ID:
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
index 31d070bf161a..43a3b37b731b 100644
--- a/drivers/net/dsa/b53/b53_mdio.c
+++ b/drivers/net/dsa/b53/b53_mdio.c
@@ -374,6 +374,7 @@ static void b53_mdio_shutdown(struct mdio_device *mdiodev)
static const struct of_device_id b53_of_match[] = {
{ .compatible = "brcm,bcm5325" },
+ { .compatible = "brcm,bcm53101" },
{ .compatible = "brcm,bcm53115" },
{ .compatible = "brcm,bcm53125" },
{ .compatible = "brcm,bcm53128" },
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index c687360a5b7f..f4a59d8fbdd6 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -21,13 +21,60 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/io.h>
+#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/platform_data/b53.h>
+#include <linux/regmap.h>
#include "b53_priv.h"
+#define BCM63XX_EPHY_REG 0x3C
+#define BCM63268_GPHY_REG 0x54
+
+#define GPHY_CTRL_LOW_PWR BIT(3)
+#define GPHY_CTRL_IDDQ_BIAS BIT(0)
+
+struct b53_phy_info {
+ u32 gphy_port_mask;
+ u32 ephy_enable_mask;
+ u32 ephy_port_mask;
+ u32 ephy_bias_bit;
+ const u32 *ephy_offset;
+};
+
struct b53_mmap_priv {
void __iomem *regs;
+ struct regmap *gpio_ctrl;
+ const struct b53_phy_info *phy_info;
+ u32 phys_enabled;
+};
+
+static const u32 bcm6318_ephy_offsets[] = {4, 5, 6, 7};
+
+static const struct b53_phy_info bcm6318_ephy_info = {
+ .ephy_enable_mask = BIT(0) | BIT(4) | BIT(8) | BIT(12) | BIT(16),
+ .ephy_port_mask = GENMASK((ARRAY_SIZE(bcm6318_ephy_offsets) - 1), 0),
+ .ephy_bias_bit = 24,
+ .ephy_offset = bcm6318_ephy_offsets,
+};
+
+static const u32 bcm6368_ephy_offsets[] = {2, 3, 4, 5};
+
+static const struct b53_phy_info bcm6368_ephy_info = {
+ .ephy_enable_mask = BIT(0),
+ .ephy_port_mask = GENMASK((ARRAY_SIZE(bcm6368_ephy_offsets) - 1), 0),
+ .ephy_bias_bit = 0,
+ .ephy_offset = bcm6368_ephy_offsets,
+};
+
+static const u32 bcm63268_ephy_offsets[] = {4, 9, 14};
+
+static const struct b53_phy_info bcm63268_ephy_info = {
+ .gphy_port_mask = BIT(3),
+ .ephy_enable_mask = GENMASK(4, 0),
+ .ephy_port_mask = GENMASK((ARRAY_SIZE(bcm63268_ephy_offsets) - 1), 0),
+ .ephy_bias_bit = 24,
+ .ephy_offset = bcm63268_ephy_offsets,
};
static int b53_mmap_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
@@ -229,6 +276,71 @@ static int b53_mmap_phy_write16(struct b53_device *dev, int addr, int reg,
return -EIO;
}
+static int bcm63xx_ephy_set(struct b53_device *dev, int port, bool enable)
+{
+ struct b53_mmap_priv *priv = dev->priv;
+ const struct b53_phy_info *info = priv->phy_info;
+ struct regmap *gpio_ctrl = priv->gpio_ctrl;
+ u32 mask, val;
+
+ if (enable) {
+ mask = (info->ephy_enable_mask << info->ephy_offset[port])
+ | BIT(info->ephy_bias_bit);
+ val = 0;
+ } else {
+ mask = (info->ephy_enable_mask << info->ephy_offset[port]);
+ if (!((priv->phys_enabled & ~BIT(port)) & info->ephy_port_mask))
+ mask |= BIT(info->ephy_bias_bit);
+ val = mask;
+ }
+ return regmap_update_bits(gpio_ctrl, BCM63XX_EPHY_REG, mask, val);
+}
+
+static int bcm63268_gphy_set(struct b53_device *dev, bool enable)
+{
+ struct b53_mmap_priv *priv = dev->priv;
+ struct regmap *gpio_ctrl = priv->gpio_ctrl;
+ u32 mask = GPHY_CTRL_IDDQ_BIAS | GPHY_CTRL_LOW_PWR;
+ u32 val = 0;
+
+ if (!enable)
+ val = mask;
+
+ return regmap_update_bits(gpio_ctrl, BCM63268_GPHY_REG, mask, val);
+}
+
+static void b53_mmap_phy_enable(struct b53_device *dev, int port)
+{
+ struct b53_mmap_priv *priv = dev->priv;
+ int ret = 0;
+
+ if (priv->phy_info) {
+ if (BIT(port) & priv->phy_info->ephy_port_mask)
+ ret = bcm63xx_ephy_set(dev, port, true);
+ else if (BIT(port) & priv->phy_info->gphy_port_mask)
+ ret = bcm63268_gphy_set(dev, true);
+ }
+
+ if (!ret)
+ priv->phys_enabled |= BIT(port);
+}
+
+static void b53_mmap_phy_disable(struct b53_device *dev, int port)
+{
+ struct b53_mmap_priv *priv = dev->priv;
+ int ret = 0;
+
+ if (priv->phy_info) {
+ if (BIT(port) & priv->phy_info->ephy_port_mask)
+ ret = bcm63xx_ephy_set(dev, port, false);
+ else if (BIT(port) & priv->phy_info->gphy_port_mask)
+ ret = bcm63268_gphy_set(dev, false);
+ }
+
+ if (!ret)
+ priv->phys_enabled &= ~BIT(port);
+}
+
static const struct b53_io_ops b53_mmap_ops = {
.read8 = b53_mmap_read8,
.read16 = b53_mmap_read16,
@@ -242,6 +354,8 @@ static const struct b53_io_ops b53_mmap_ops = {
.write64 = b53_mmap_write64,
.phy_read16 = b53_mmap_phy_read16,
.phy_write16 = b53_mmap_phy_write16,
+ .phy_enable = b53_mmap_phy_enable,
+ .phy_disable = b53_mmap_phy_disable,
};
static int b53_mmap_probe_of(struct platform_device *pdev,
@@ -313,6 +427,18 @@ static int b53_mmap_probe(struct platform_device *pdev)
priv->regs = pdata->regs;
+ priv->gpio_ctrl = syscon_regmap_lookup_by_phandle(np, "brcm,gpio-ctrl");
+ if (!IS_ERR(priv->gpio_ctrl)) {
+ if (pdata->chip_id == BCM6318_DEVICE_ID ||
+ pdata->chip_id == BCM6328_DEVICE_ID ||
+ pdata->chip_id == BCM6362_DEVICE_ID)
+ priv->phy_info = &bcm6318_ephy_info;
+ else if (pdata->chip_id == BCM6368_DEVICE_ID)
+ priv->phy_info = &bcm6368_ephy_info;
+ else if (pdata->chip_id == BCM63268_DEVICE_ID)
+ priv->phy_info = &bcm63268_ephy_info;
+ }
+
dev = b53_switch_alloc(&pdev->dev, &b53_mmap_ops, priv);
if (!dev)
return -ENOMEM;
@@ -348,16 +474,16 @@ static const struct of_device_id b53_mmap_of_table[] = {
.data = (void *)BCM63XX_DEVICE_ID,
}, {
.compatible = "brcm,bcm6318-switch",
- .data = (void *)BCM63268_DEVICE_ID,
+ .data = (void *)BCM6318_DEVICE_ID,
}, {
.compatible = "brcm,bcm6328-switch",
- .data = (void *)BCM63XX_DEVICE_ID,
+ .data = (void *)BCM6328_DEVICE_ID,
}, {
.compatible = "brcm,bcm6362-switch",
- .data = (void *)BCM63XX_DEVICE_ID,
+ .data = (void *)BCM6362_DEVICE_ID,
}, {
.compatible = "brcm,bcm6368-switch",
- .data = (void *)BCM63XX_DEVICE_ID,
+ .data = (void *)BCM6368_DEVICE_ID,
}, {
.compatible = "brcm,bcm63268-switch",
.data = (void *)BCM63268_DEVICE_ID,
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 05141176daf5..bd6849e5bb93 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -45,6 +45,8 @@ struct b53_io_ops {
int (*phy_write16)(struct b53_device *dev, int addr, int reg, u16 value);
int (*irq_enable)(struct b53_device *dev, int port);
void (*irq_disable)(struct b53_device *dev, int port);
+ void (*phy_enable)(struct b53_device *dev, int port);
+ void (*phy_disable)(struct b53_device *dev, int port);
void (*phylink_get_caps)(struct b53_device *dev, int port,
struct phylink_config *config);
struct phylink_pcs *(*phylink_mac_select_pcs)(struct b53_device *dev,
@@ -56,6 +58,17 @@ struct b53_io_ops {
bool link_up);
};
+struct b53_arl_entry;
+
+struct b53_arl_ops {
+ void (*arl_read_entry)(struct b53_device *dev,
+ struct b53_arl_entry *ent, u8 idx);
+ void (*arl_write_entry)(struct b53_device *dev,
+ const struct b53_arl_entry *ent, u8 idx);
+ void (*arl_search_read)(struct b53_device *dev, u8 idx,
+ struct b53_arl_entry *ent);
+};
+
#define B53_INVALID_LANE 0xff
enum {
@@ -66,10 +79,15 @@ enum {
BCM5395_DEVICE_ID = 0x95,
BCM5397_DEVICE_ID = 0x97,
BCM5398_DEVICE_ID = 0x98,
+ BCM53101_DEVICE_ID = 0x53101,
BCM53115_DEVICE_ID = 0x53115,
BCM53125_DEVICE_ID = 0x53125,
BCM53128_DEVICE_ID = 0x53128,
BCM63XX_DEVICE_ID = 0x6300,
+ BCM6318_DEVICE_ID = 0x6318,
+ BCM6328_DEVICE_ID = 0x6328,
+ BCM6362_DEVICE_ID = 0x6362,
+ BCM6368_DEVICE_ID = 0x6368,
BCM63268_DEVICE_ID = 0x63268,
BCM53010_DEVICE_ID = 0x53010,
BCM53011_DEVICE_ID = 0x53011,
@@ -83,6 +101,12 @@ enum {
BCM53134_DEVICE_ID = 0x5075,
};
+enum b53_variant_id {
+ B53_VARIANT_NONE = 0,
+ B53_VARIANT_5325E,
+ B53_VARIANT_5325M,
+};
+
struct b53_pcs {
struct phylink_pcs pcs;
struct b53_device *dev;
@@ -95,6 +119,7 @@ struct b53_pcs {
struct b53_port {
u16 vlan_ctl_mask;
+ u16 pvid;
struct ethtool_keee eee;
};
@@ -113,9 +138,11 @@ struct b53_device {
struct mutex stats_mutex;
struct mutex arl_mutex;
const struct b53_io_ops *ops;
+ const struct b53_arl_ops *arl_ops;
/* chip specific data */
u32 chip_id;
+ enum b53_variant_id variant_id;
u8 core_rev;
u8 vta_regs[3];
u8 duplex_reg;
@@ -146,6 +173,7 @@ struct b53_device {
unsigned int num_vlans;
struct b53_vlan *vlans;
bool vlan_enabled;
+ bool vlan_filtering;
unsigned int num_ports;
struct b53_port *ports;
@@ -162,6 +190,18 @@ static inline int is5325(struct b53_device *dev)
return dev->chip_id == BCM5325_DEVICE_ID;
}
+static inline int is5325e(struct b53_device *dev)
+{
+ return is5325(dev) &&
+ dev->variant_id == B53_VARIANT_5325E;
+}
+
+static inline int is5325m(struct b53_device *dev)
+{
+ return is5325(dev) &&
+ dev->variant_id == B53_VARIANT_5325M;
+}
+
static inline int is5365(struct b53_device *dev)
{
#ifdef CONFIG_BCM47XX
@@ -188,6 +228,7 @@ static inline int is531x5(struct b53_device *dev)
{
return dev->chip_id == BCM53115_DEVICE_ID ||
dev->chip_id == BCM53125_DEVICE_ID ||
+ dev->chip_id == BCM53101_DEVICE_ID ||
dev->chip_id == BCM53128_DEVICE_ID ||
dev->chip_id == BCM53134_DEVICE_ID;
}
@@ -195,12 +236,17 @@ static inline int is531x5(struct b53_device *dev)
static inline int is63xx(struct b53_device *dev)
{
return dev->chip_id == BCM63XX_DEVICE_ID ||
+ dev->chip_id == BCM6318_DEVICE_ID ||
+ dev->chip_id == BCM6328_DEVICE_ID ||
+ dev->chip_id == BCM6362_DEVICE_ID ||
+ dev->chip_id == BCM6368_DEVICE_ID ||
dev->chip_id == BCM63268_DEVICE_ID;
}
-static inline int is63268(struct b53_device *dev)
+static inline int is6318_268(struct b53_device *dev)
{
- return dev->chip_id == BCM63268_DEVICE_ID;
+ return dev->chip_id == BCM6318_DEVICE_ID ||
+ dev->chip_id == BCM63268_DEVICE_ID;
}
static inline int is5301x(struct b53_device *dev)
@@ -294,6 +340,33 @@ static inline void b53_arl_to_entry(struct b53_arl_entry *ent,
ent->vid = mac_vid >> ARLTBL_VID_S;
}
+static inline void b53_arl_to_entry_25(struct b53_arl_entry *ent,
+ u64 mac_vid, u8 vid_entry)
+{
+ memset(ent, 0, sizeof(*ent));
+ ent->is_valid = !!(mac_vid & ARLTBL_VALID_25);
+ ent->is_age = !!(mac_vid & ARLTBL_AGE_25);
+ ent->is_static = !!(mac_vid & ARLTBL_STATIC_25);
+ u64_to_ether_addr(mac_vid, ent->mac);
+ ent->port = (mac_vid & ARLTBL_DATA_PORT_ID_MASK_25) >>
+ ARLTBL_DATA_PORT_ID_S_25;
+ if (is_unicast_ether_addr(ent->mac) && ent->port == B53_CPU_PORT)
+ ent->port = B53_CPU_PORT_25;
+ ent->vid = vid_entry;
+}
+
+static inline void b53_arl_to_entry_89(struct b53_arl_entry *ent,
+ u64 mac_vid, u16 fwd_entry)
+{
+ memset(ent, 0, sizeof(*ent));
+ ent->port = fwd_entry & ARLTBL_DATA_PORT_ID_MASK_89;
+ ent->is_valid = !!(fwd_entry & ARLTBL_VALID_89);
+ ent->is_age = !!(fwd_entry & ARLTBL_AGE_89);
+ ent->is_static = !!(fwd_entry & ARLTBL_STATIC_89);
+ u64_to_ether_addr(mac_vid, ent->mac);
+ ent->vid = mac_vid >> ARLTBL_VID_S;
+}
+
static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry,
const struct b53_arl_entry *ent)
{
@@ -308,6 +381,89 @@ static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry,
*fwd_entry |= ARLTBL_AGE;
}
+static inline void b53_arl_from_entry_25(u64 *mac_vid, u8 *vid_entry,
+ const struct b53_arl_entry *ent)
+{
+ *mac_vid = ether_addr_to_u64(ent->mac);
+ if (is_unicast_ether_addr(ent->mac) && ent->port == B53_CPU_PORT_25)
+ *mac_vid |= (u64)B53_CPU_PORT << ARLTBL_DATA_PORT_ID_S_25;
+ else
+ *mac_vid |= ((u64)ent->port << ARLTBL_DATA_PORT_ID_S_25) &
+ ARLTBL_DATA_PORT_ID_MASK_25;
+ if (ent->is_valid)
+ *mac_vid |= ARLTBL_VALID_25;
+ if (ent->is_static)
+ *mac_vid |= ARLTBL_STATIC_25;
+ if (ent->is_age)
+ *mac_vid |= ARLTBL_AGE_25;
+ *vid_entry = ent->vid;
+}
+
+static inline void b53_arl_from_entry_89(u64 *mac_vid, u32 *fwd_entry,
+ const struct b53_arl_entry *ent)
+{
+ *mac_vid = ether_addr_to_u64(ent->mac);
+ *mac_vid |= (u64)(ent->vid & ARLTBL_VID_MASK) << ARLTBL_VID_S;
+ *fwd_entry = ent->port & ARLTBL_DATA_PORT_ID_MASK_89;
+ if (ent->is_valid)
+ *fwd_entry |= ARLTBL_VALID_89;
+ if (ent->is_static)
+ *fwd_entry |= ARLTBL_STATIC_89;
+ if (ent->is_age)
+ *fwd_entry |= ARLTBL_AGE_89;
+}
+
+static inline void b53_arl_search_to_entry_25(struct b53_arl_entry *ent,
+ u64 mac_vid, u8 ext)
+{
+ memset(ent, 0, sizeof(*ent));
+ ent->is_valid = !!(mac_vid & ARLTBL_VALID_25);
+ ent->is_age = !!(mac_vid & ARLTBL_AGE_25);
+ ent->is_static = !!(mac_vid & ARLTBL_STATIC_25);
+ u64_to_ether_addr(mac_vid, ent->mac);
+ ent->vid = (mac_vid & ARL_SRCH_RSLT_VID_MASK_25) >>
+ ARL_SRCH_RSLT_VID_S_25;
+ ent->port = (mac_vid & ARL_SRCH_RSLT_PORT_ID_MASK_25) >>
+ ARL_SRCH_RSLT_PORT_ID_S_25;
+ if (is_multicast_ether_addr(ent->mac) && (ext & ARL_SRCH_RSLT_EXT_MC_MII))
+ ent->port |= BIT(B53_CPU_PORT_25);
+ else if (!is_multicast_ether_addr(ent->mac) && ent->port == B53_CPU_PORT)
+ ent->port = B53_CPU_PORT_25;
+}
+
+static inline void b53_arl_search_to_entry_63xx(struct b53_arl_entry *ent,
+ u64 mac_vid, u16 fwd_entry)
+{
+ memset(ent, 0, sizeof(*ent));
+ u64_to_ether_addr(mac_vid, ent->mac);
+ ent->vid = mac_vid >> ARLTBL_VID_S;
+
+ ent->port = fwd_entry & ARL_SRST_PORT_ID_MASK_63XX;
+ ent->port >>= 1;
+
+ ent->is_age = !!(fwd_entry & ARL_SRST_AGE_63XX);
+ ent->is_static = !!(fwd_entry & ARL_SRST_STATIC_63XX);
+ ent->is_valid = 1;
+}
+
+static inline void b53_arl_read_entry(struct b53_device *dev,
+ struct b53_arl_entry *ent, u8 idx)
+{
+ dev->arl_ops->arl_read_entry(dev, ent, idx);
+}
+
+static inline void b53_arl_write_entry(struct b53_device *dev,
+ const struct b53_arl_entry *ent, u8 idx)
+{
+ dev->arl_ops->arl_write_entry(dev, ent, idx);
+}
+
+static inline void b53_arl_search_read(struct b53_device *dev, u8 idx,
+ struct b53_arl_entry *ent)
+{
+ dev->arl_ops->arl_search_read(dev, idx, ent);
+}
+
#ifdef CONFIG_BCM47XX
#include <linux/bcm47xx_nvram.h>
@@ -339,6 +495,7 @@ void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
int b53_get_sset_count(struct dsa_switch *ds, int port, int sset);
void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data);
+int b53_set_ageing_time(struct dsa_switch *ds, unsigned int msecs);
int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
bool *tx_fwd_offload, struct netlink_ext_ack *extack);
void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge);
@@ -380,11 +537,12 @@ enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mprot);
void b53_mirror_del(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror);
+int b53_setup_port(struct dsa_switch *ds, int port);
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
void b53_disable_port(struct dsa_switch *ds, int port);
void b53_brcm_hdr_setup(struct dsa_switch *ds, int port);
int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy);
-int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
+bool b53_support_eee(struct dsa_switch *ds, int port);
int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
#endif
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
index bfbcb66bef66..54a278db67c9 100644
--- a/drivers/net/dsa/b53/b53_regs.h
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -29,6 +29,7 @@
#define B53_ARLIO_PAGE 0x05 /* ARL Access */
#define B53_FRAMEBUF_PAGE 0x06 /* Management frame access */
#define B53_MEM_ACCESS_PAGE 0x08 /* Memory access */
+#define B53_IEEE_PAGE 0x0a /* IEEE 802.1X */
/* PHY Registers */
#define B53_PORT_MII_PAGE(i) (0x10 + (i)) /* Port i MII Registers */
@@ -50,6 +51,9 @@
/* Jumbo Frame Registers */
#define B53_JUMBO_PAGE 0x40
+/* EAP Registers */
+#define B53_EAP_PAGE 0x42
+
/* EEE Control Registers Page */
#define B53_EEE_PAGE 0x92
@@ -92,18 +96,22 @@
#define PORT_OVERRIDE_SPEED_10M (0 << PORT_OVERRIDE_SPEED_S)
#define PORT_OVERRIDE_SPEED_100M (1 << PORT_OVERRIDE_SPEED_S)
#define PORT_OVERRIDE_SPEED_1000M (2 << PORT_OVERRIDE_SPEED_S)
+#define PORT_OVERRIDE_LP_FLOW_25 BIT(3) /* BCM5325 only */
#define PORT_OVERRIDE_RV_MII_25 BIT(4) /* BCM5325 only */
#define PORT_OVERRIDE_RX_FLOW BIT(4)
#define PORT_OVERRIDE_TX_FLOW BIT(5)
#define PORT_OVERRIDE_SPEED_2000M BIT(6) /* BCM5301X only, requires setting 1000M */
#define PORT_OVERRIDE_EN BIT(7) /* Use the register contents */
-/* Power-down mode control */
+/* Power-down mode control (8 bit) */
#define B53_PD_MODE_CTRL_25 0x0f
+#define PD_MODE_PORT_MASK 0x1f
+/* Bit 0 also powers down the switch. */
+#define PD_MODE_POWER_DOWN_PORT(i) BIT(i)
/* IP Multicast control (8 bit) */
#define B53_IP_MULTICAST_CTRL 0x21
-#define B53_IPMC_FWD_EN BIT(1)
+#define B53_IP_MC BIT(0)
#define B53_UC_FWD_EN BIT(6)
#define B53_MC_FWD_EN BIT(7)
@@ -111,6 +119,10 @@
#define B53_SWITCH_CTRL 0x22
#define B53_MII_DUMB_FWDG_EN BIT(6)
+/* Protected Port Selection (16 bit) */
+#define B53_PROTECTED_PORT_SEL 0x24
+#define B53_PROTECTED_PORT_SEL_25 0x26
+
/* (16 bit) */
#define B53_UC_FLOOD_MASK 0x32
#define B53_MC_FLOOD_MASK 0x34
@@ -217,6 +229,13 @@
#define BRCM_HDR_P5_EN BIT(1) /* Enable tagging on port 5 */
#define BRCM_HDR_P7_EN BIT(2) /* Enable tagging on port 7 */
+/* Aging Time control register (32 bit) */
+#define B53_AGING_TIME_CONTROL 0x06
+#define B53_AGING_TIME_CONTROL_63XX 0x08
+#define AGE_CHANGE BIT(20)
+#define AGE_TIME_MASK 0x7ffff
+#define AGE_TIME_MAX 1048575
+
/* Mirror capture control register (16 bit) */
#define B53_MIR_CAP_CTL 0x10
#define CAP_PORT_MASK 0xf
@@ -310,13 +329,12 @@
#define B53_ARLTBL_MAC_VID_ENTRY(n) ((0x10 * (n)) + 0x10)
#define ARLTBL_MAC_MASK 0xffffffffffffULL
#define ARLTBL_VID_S 48
-#define ARLTBL_VID_MASK_25 0xff
#define ARLTBL_VID_MASK 0xfff
#define ARLTBL_DATA_PORT_ID_S_25 48
-#define ARLTBL_DATA_PORT_ID_MASK_25 0xf
-#define ARLTBL_AGE_25 BIT(61)
-#define ARLTBL_STATIC_25 BIT(62)
-#define ARLTBL_VALID_25 BIT(63)
+#define ARLTBL_DATA_PORT_ID_MASK_25 GENMASK_ULL(53, 48)
+#define ARLTBL_AGE_25 BIT_ULL(61)
+#define ARLTBL_STATIC_25 BIT_ULL(62)
+#define ARLTBL_VALID_25 BIT_ULL(63)
/* ARL Table Data Entry N Registers (32 bit) */
#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x18)
@@ -326,12 +344,23 @@
#define ARLTBL_STATIC BIT(15)
#define ARLTBL_VALID BIT(16)
+/* BCM5389 ARL Table Data Entry N Register format (16 bit) */
+#define ARLTBL_DATA_PORT_ID_MASK_89 GENMASK(8, 0)
+#define ARLTBL_TC_MASK_89 GENMASK(12, 10)
+#define ARLTBL_AGE_89 BIT(13)
+#define ARLTBL_STATIC_89 BIT(14)
+#define ARLTBL_VALID_89 BIT(15)
+
+/* BCM5325/BCM565 ARL Table VID Entry N Registers (8 bit) */
+#define B53_ARLTBL_VID_ENTRY_25(n) ((0x2 * (n)) + 0x30)
+
/* Maximum number of bin entries in the ARL for all switches */
#define B53_ARLTBL_MAX_BIN_ENTRIES 4
/* ARL Search Control Register (8 bit) */
#define B53_ARL_SRCH_CTL 0x50
#define B53_ARL_SRCH_CTL_25 0x20
+#define B53_ARL_SRCH_CTL_89 0x30
#define ARL_SRCH_VLID BIT(0)
#define ARL_SRCH_STDN BIT(7)
@@ -339,22 +368,54 @@
#define B53_ARL_SRCH_ADDR 0x51
#define B53_ARL_SRCH_ADDR_25 0x22
#define B53_ARL_SRCH_ADDR_65 0x24
+#define B53_ARL_SRCH_ADDR_89 0x31
+#define B53_ARL_SRCH_ADDR_63XX 0x32
#define ARL_ADDR_MASK GENMASK(14, 0)
/* ARL Search MAC/VID Result (64 bit) */
#define B53_ARL_SRCH_RSTL_0_MACVID 0x60
+#define B53_ARL_SRCH_RSLT_MACVID_89 0x33
+#define B53_ARL_SRCH_RSLT_MACVID_63XX 0x34
-/* Single register search result on 5325 */
+/* Single register search result on 5325/5365 */
#define B53_ARL_SRCH_RSTL_0_MACVID_25 0x24
-/* Single register search result on 5365 */
-#define B53_ARL_SRCH_RSTL_0_MACVID_65 0x30
+#define ARL_SRCH_RSLT_PORT_ID_S_25 48
+#define ARL_SRCH_RSLT_PORT_ID_MASK_25 GENMASK_ULL(52, 48)
+#define ARL_SRCH_RSLT_VID_S_25 53
+#define ARL_SRCH_RSLT_VID_MASK_25 GENMASK_ULL(60, 53)
+
+/* BCM5325/5365 Search result extend register (8 bit) */
+#define B53_ARL_SRCH_RSLT_EXT_25 0x2c
+#define ARL_SRCH_RSLT_EXT_MC_MII BIT(2)
/* ARL Search Data Result (32 bit) */
#define B53_ARL_SRCH_RSTL_0 0x68
+/* BCM5389 ARL Search Data Result (16 bit) */
+#define B53_ARL_SRCH_RSLT_89 0x3b
+
#define B53_ARL_SRCH_RSTL_MACVID(x) (B53_ARL_SRCH_RSTL_0_MACVID + ((x) * 0x10))
#define B53_ARL_SRCH_RSTL(x) (B53_ARL_SRCH_RSTL_0 + ((x) * 0x10))
+/* 63XX ARL Search Data Result (16 bit) */
+#define B53_ARL_SRCH_RSLT_63XX 0x3c
+#define ARL_SRST_PORT_ID_MASK_63XX GENMASK(9, 1)
+#define ARL_SRST_TC_MASK_63XX GENMASK(13, 11)
+#define ARL_SRST_AGE_63XX BIT(14)
+#define ARL_SRST_STATIC_63XX BIT(15)
+
+/*************************************************************************
+ * IEEE 802.1X Registers
+ *************************************************************************/
+
+/* Multicast DLF Drop Control register (16 bit) */
+#define B53_IEEE_MCAST_DLF 0x94
+#define B53_IEEE_MCAST_DROP_EN BIT(11)
+
+/* Unicast DLF Drop Control register (16 bit) */
+#define B53_IEEE_UCAST_DLF 0x96
+#define B53_IEEE_UCAST_DROP_EN BIT(11)
+
/*************************************************************************
* Port VLAN Registers
*************************************************************************/
@@ -481,6 +542,17 @@
#define JMS_MAX_SIZE 9724
/*************************************************************************
+ * EAP Page Registers
+ *************************************************************************/
+#define B53_PORT_EAP_CONF(i) (0x20 + 8 * (i))
+#define EAP_MODE_SHIFT 51
+#define EAP_MODE_SHIFT_63XX 50
+#define EAP_MODE_MASK (0x3ull << EAP_MODE_SHIFT)
+#define EAP_MODE_MASK_63XX (0x3ull << EAP_MODE_SHIFT_63XX)
+#define EAP_MODE_BASIC 0
+#define EAP_MODE_SIMPLIFIED 3
+
+/*************************************************************************
* EEE Configuration Page Registers
*************************************************************************/
diff --git a/drivers/net/dsa/b53/b53_serdes.c b/drivers/net/dsa/b53/b53_serdes.c
index 3f8a491ce885..7460122f6abc 100644
--- a/drivers/net/dsa/b53/b53_serdes.c
+++ b/drivers/net/dsa/b53/b53_serdes.c
@@ -99,8 +99,8 @@ static void b53_serdes_an_restart(struct phylink_pcs *pcs)
SERDES_MII_BLK, reg);
}
-static void b53_serdes_get_state(struct phylink_pcs *pcs,
- struct phylink_link_state *state)
+static void b53_serdes_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
+ struct phylink_link_state *state)
{
struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev;
u8 lane = pcs_to_b53_pcs(pcs)->lane;
@@ -239,7 +239,6 @@ int b53_serdes_init(struct b53_device *dev, int port)
pcs->dev = dev;
pcs->lane = lane;
pcs->pcs.ops = &b53_pcs_ops;
- pcs->pcs.neg_mode = true;
return 0;
}
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 43bde1f583ff..960685596093 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -1230,10 +1230,12 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.resume = bcm_sf2_sw_resume,
.get_wol = bcm_sf2_sw_get_wol,
.set_wol = bcm_sf2_sw_set_wol,
+ .port_setup = b53_setup_port,
.port_enable = bcm_sf2_port_setup,
.port_disable = bcm_sf2_port_disable,
- .get_mac_eee = b53_get_mac_eee,
+ .support_eee = b53_support_eee,
.set_mac_eee = b53_set_mac_eee,
+ .set_ageing_time = b53_set_ageing_time,
.port_bridge_join = b53_br_join,
.port_bridge_leave = b53_br_leave,
.port_pre_bridge_flags = b53_br_flags_pre,
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index adbab544c60f..4a416f2717ba 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -17,7 +17,19 @@
#include <linux/dsa/loop.h>
#include <net/dsa.h>
-#include "dsa_loop.h"
+#define DSA_LOOP_NUM_PORTS 6
+#define DSA_LOOP_CPU_PORT (DSA_LOOP_NUM_PORTS - 1)
+#define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2)
+
+struct dsa_loop_pdata {
+ /* Must be first, such that dsa_register_switch() can access this
+ * without gory pointer manipulations
+ */
+ struct dsa_chip_data cd;
+ const char *name;
+ unsigned int enabled_ports;
+ const char *netdev;
+};
static struct dsa_loop_mib_entry dsa_loop_mibs[] = {
[DSA_LOOP_PHY_READ_OK] = { "phy_read_ok", },
@@ -27,6 +39,7 @@ static struct dsa_loop_mib_entry dsa_loop_mibs[] = {
};
static struct phy_device *phydevs[PHY_MAX_ADDR];
+static struct mdio_device *switch_mdiodev;
enum dsa_loop_devlink_resource_id {
DSA_LOOP_DEVLINK_PARAM_ID_VTU,
@@ -382,34 +395,68 @@ static struct mdio_driver dsa_loop_drv = {
.shutdown = dsa_loop_drv_shutdown,
};
-#define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2)
-
static void dsa_loop_phydevs_unregister(void)
{
- unsigned int i;
-
- for (i = 0; i < NUM_FIXED_PHYS; i++)
- if (!IS_ERR(phydevs[i])) {
+ for (int i = 0; i < NUM_FIXED_PHYS; i++) {
+ if (!IS_ERR(phydevs[i]))
fixed_phy_unregister(phydevs[i]);
- phy_device_free(phydevs[i]);
- }
+ }
}
-static int __init dsa_loop_init(void)
+static int __init dsa_loop_create_switch_mdiodev(void)
{
- struct fixed_phy_status status = {
- .link = 1,
- .speed = SPEED_100,
- .duplex = DUPLEX_FULL,
+ static struct dsa_loop_pdata dsa_loop_pdata = {
+ .cd = {
+ .port_names[0] = "lan1",
+ .port_names[1] = "lan2",
+ .port_names[2] = "lan3",
+ .port_names[3] = "lan4",
+ .port_names[DSA_LOOP_CPU_PORT] = "cpu",
+ },
+ .name = "DSA mockup driver",
+ .enabled_ports = 0x1f,
+ .netdev = "eth0",
};
- unsigned int i, ret;
+ struct mii_bus *bus;
+ int ret = -ENODEV;
+
+ bus = mdio_find_bus("fixed-0");
+ if (WARN_ON(!bus))
+ return ret;
+
+ switch_mdiodev = mdio_device_create(bus, 31);
+ if (IS_ERR(switch_mdiodev))
+ goto out;
+
+ strscpy(switch_mdiodev->modalias, "dsa-loop");
+ switch_mdiodev->dev.platform_data = &dsa_loop_pdata;
+
+ ret = mdio_device_register(switch_mdiodev);
+ if (ret)
+ mdio_device_free(switch_mdiodev);
+out:
+ put_device(&bus->dev);
+ return ret;
+}
+
+static int __init dsa_loop_init(void)
+{
+ unsigned int i;
+ int ret;
+
+ ret = dsa_loop_create_switch_mdiodev();
+ if (ret)
+ return ret;
for (i = 0; i < NUM_FIXED_PHYS; i++)
- phydevs[i] = fixed_phy_register(PHY_POLL, &status, NULL);
+ phydevs[i] = fixed_phy_register_100fd();
ret = mdio_driver_register(&dsa_loop_drv);
- if (ret)
+ if (ret) {
dsa_loop_phydevs_unregister();
+ mdio_device_remove(switch_mdiodev);
+ mdio_device_free(switch_mdiodev);
+ }
return ret;
}
@@ -419,10 +466,11 @@ static void __exit dsa_loop_exit(void)
{
mdio_driver_unregister(&dsa_loop_drv);
dsa_loop_phydevs_unregister();
+ mdio_device_remove(switch_mdiodev);
+ mdio_device_free(switch_mdiodev);
}
module_exit(dsa_loop_exit);
-MODULE_SOFTDEP("pre: dsa_loop_bdinfo");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Florian Fainelli");
MODULE_DESCRIPTION("DSA loopback driver");
diff --git a/drivers/net/dsa/dsa_loop.h b/drivers/net/dsa/dsa_loop.h
deleted file mode 100644
index 93e5c15d0efd..000000000000
--- a/drivers/net/dsa/dsa_loop.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DSA_LOOP_H
-#define __DSA_LOOP_H
-
-struct dsa_chip_data;
-
-struct dsa_loop_pdata {
- /* Must be first, such that dsa_register_switch() can access this
- * without gory pointer manipulations
- */
- struct dsa_chip_data cd;
- const char *name;
- unsigned int enabled_ports;
- const char *netdev;
-};
-
-#define DSA_LOOP_NUM_PORTS 6
-#define DSA_LOOP_CPU_PORT (DSA_LOOP_NUM_PORTS - 1)
-
-#endif /* __DSA_LOOP_H */
diff --git a/drivers/net/dsa/dsa_loop_bdinfo.c b/drivers/net/dsa/dsa_loop_bdinfo.c
deleted file mode 100644
index 14ca42491512..000000000000
--- a/drivers/net/dsa/dsa_loop_bdinfo.c
+++ /dev/null
@@ -1,36 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/phy.h>
-#include <net/dsa.h>
-
-#include "dsa_loop.h"
-
-static struct dsa_loop_pdata dsa_loop_pdata = {
- .cd = {
- .port_names[0] = "lan1",
- .port_names[1] = "lan2",
- .port_names[2] = "lan3",
- .port_names[3] = "lan4",
- .port_names[DSA_LOOP_CPU_PORT] = "cpu",
- },
- .name = "DSA mockup driver",
- .enabled_ports = 0x1f,
- .netdev = "eth0",
-};
-
-static const struct mdio_board_info bdinfo = {
- .bus_id = "fixed-0",
- .modalias = "dsa-loop",
- .mdio_addr = 31,
- .platform_data = &dsa_loop_pdata,
-};
-
-static int __init dsa_loop_bdinfo_init(void)
-{
- return mdiobus_register_board_info(&bdinfo, 1);
-}
-arch_initcall(dsa_loop_bdinfo_init)
-
-MODULE_DESCRIPTION("DSA mock-up switch driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
index 283ec5a6e23c..dd5f263ab984 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.c
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -1061,7 +1061,7 @@ static void hellcreek_setup_tc_identity_mapping(struct hellcreek *hellcreek)
static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
{
- static struct hellcreek_fdb_entry l2_ptp = {
+ static const struct hellcreek_fdb_entry l2_ptp = {
/* MAC: 01-1B-19-00-00-00 */
.mac = { 0x01, 0x1b, 0x19, 0x00, 0x00, 0x00 },
.portmask = 0x03, /* Management ports */
@@ -1072,7 +1072,7 @@ static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
.reprio_tc = 6, /* TC: 6 as per IEEE 802.1AS */
.reprio_en = 1,
};
- static struct hellcreek_fdb_entry udp4_ptp = {
+ static const struct hellcreek_fdb_entry udp4_ptp = {
/* MAC: 01-00-5E-00-01-81 */
.mac = { 0x01, 0x00, 0x5e, 0x00, 0x01, 0x81 },
.portmask = 0x03, /* Management ports */
@@ -1083,7 +1083,7 @@ static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
.reprio_tc = 6,
.reprio_en = 1,
};
- static struct hellcreek_fdb_entry udp6_ptp = {
+ static const struct hellcreek_fdb_entry udp6_ptp = {
/* MAC: 33-33-00-00-01-81 */
.mac = { 0x33, 0x33, 0x00, 0x00, 0x01, 0x81 },
.portmask = 0x03, /* Management ports */
@@ -1094,7 +1094,7 @@ static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
.reprio_tc = 6,
.reprio_en = 1,
};
- static struct hellcreek_fdb_entry l2_p2p = {
+ static const struct hellcreek_fdb_entry l2_p2p = {
/* MAC: 01-80-C2-00-00-0E */
.mac = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e },
.portmask = 0x03, /* Management ports */
@@ -1105,7 +1105,7 @@ static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
.reprio_tc = 6, /* TC: 6 as per IEEE 802.1AS */
.reprio_en = 1,
};
- static struct hellcreek_fdb_entry udp4_p2p = {
+ static const struct hellcreek_fdb_entry udp4_p2p = {
/* MAC: 01-00-5E-00-00-6B */
.mac = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x6b },
.portmask = 0x03, /* Management ports */
@@ -1116,7 +1116,7 @@ static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
.reprio_tc = 6,
.reprio_en = 1,
};
- static struct hellcreek_fdb_entry udp6_p2p = {
+ static const struct hellcreek_fdb_entry udp6_p2p = {
/* MAC: 33-33-00-00-00-6B */
.mac = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x6b },
.portmask = 0x03, /* Management ports */
@@ -1127,7 +1127,7 @@ static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
.reprio_tc = 6,
.reprio_en = 1,
};
- static struct hellcreek_fdb_entry stp = {
+ static const struct hellcreek_fdb_entry stp = {
/* MAC: 01-80-C2-00-00-00 */
.mac = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 },
.portmask = 0x03, /* Management ports */
@@ -1320,13 +1320,13 @@ static int hellcreek_devlink_region_fdb_snapshot(struct devlink *dl,
return 0;
}
-static struct devlink_region_ops hellcreek_region_vlan_ops = {
+static const struct devlink_region_ops hellcreek_region_vlan_ops = {
.name = "vlan",
.snapshot = hellcreek_devlink_region_vlan_snapshot,
.destructor = kfree,
};
-static struct devlink_region_ops hellcreek_region_fdb_ops = {
+static const struct devlink_region_ops hellcreek_region_fdb_ops = {
.name = "fdb",
.snapshot = hellcreek_devlink_region_fdb_snapshot,
.destructor = kfree,
@@ -1335,7 +1335,7 @@ static struct devlink_region_ops hellcreek_region_fdb_ops = {
static int hellcreek_setup_devlink_regions(struct dsa_switch *ds)
{
struct hellcreek *hellcreek = ds->priv;
- struct devlink_region_ops *ops;
+ const struct devlink_region_ops *ops;
struct devlink_region *region;
u64 size;
int ret;
@@ -1926,6 +1926,8 @@ static const struct dsa_switch_ops hellcreek_ds_ops = {
.port_vlan_filtering = hellcreek_vlan_filtering,
.setup = hellcreek_setup,
.teardown = hellcreek_teardown,
+ .port_hsr_join = dsa_port_simple_hsr_join,
+ .port_hsr_leave = dsa_port_simple_hsr_leave,
};
static int hellcreek_probe(struct platform_device *pdev)
diff --git a/drivers/net/dsa/hirschmann/hellcreek.h b/drivers/net/dsa/hirschmann/hellcreek.h
index 9c2ed2ba79da..bebf0d3ff330 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.h
+++ b/drivers/net/dsa/hirschmann/hellcreek.h
@@ -244,7 +244,7 @@ struct hellcreek_port_hwtstamp {
struct sk_buff *tx_skb;
/* Current timestamp configuration */
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
};
struct hellcreek_port {
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
index ca2500aba96f..99941ff1ebf9 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
@@ -40,7 +40,7 @@ int hellcreek_get_ts_info(struct dsa_switch *ds, int port,
* the user requested what is actually available or not
*/
static int hellcreek_set_hwtstamp_config(struct hellcreek *hellcreek, int port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
struct hellcreek_port_hwtstamp *ps =
&hellcreek->ports[port].port_hwtstamp;
@@ -110,41 +110,35 @@ static int hellcreek_set_hwtstamp_config(struct hellcreek *hellcreek, int port,
}
int hellcreek_port_hwtstamp_set(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct hellcreek *hellcreek = ds->priv;
struct hellcreek_port_hwtstamp *ps;
- struct hwtstamp_config config;
int err;
ps = &hellcreek->ports[port].port_hwtstamp;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = hellcreek_set_hwtstamp_config(hellcreek, port, &config);
+ err = hellcreek_set_hwtstamp_config(hellcreek, port, config);
if (err)
return err;
/* Save the chosen configuration to be returned later */
- memcpy(&ps->tstamp_config, &config, sizeof(config));
+ ps->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
int hellcreek_port_hwtstamp_get(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config)
{
struct hellcreek *hellcreek = ds->priv;
struct hellcreek_port_hwtstamp *ps;
- struct hwtstamp_config *config;
ps = &hellcreek->ports[port].port_hwtstamp;
- config = &ps->tstamp_config;
+ *config = ps->tstamp_config;
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
+ return 0;
}
/* Returns a pointer to the PTP header if the caller should time stamp, or NULL
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
index 7d88da2134f2..388821c4aa10 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
@@ -38,9 +38,10 @@
#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(40)
int hellcreek_port_hwtstamp_set(struct dsa_switch *ds, int port,
- struct ifreq *ifr);
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
int hellcreek_port_hwtstamp_get(struct dsa_switch *ds, int port,
- struct ifreq *ifr);
+ struct kernel_hwtstamp_config *config);
bool hellcreek_port_rxtstamp(struct dsa_switch *ds, int port,
struct sk_buff *clone, unsigned int type);
diff --git a/drivers/net/dsa/hirschmann/hellcreek_ptp.c b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
index bfe21f9f7dcd..cb23bea9c21b 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_ptp.c
+++ b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
@@ -376,8 +376,18 @@ static int hellcreek_led_setup(struct hellcreek *hellcreek)
hellcreek_set_brightness(hellcreek, STATUS_OUT_IS_GM, 1);
/* Register both leds */
- led_classdev_register(hellcreek->dev, &hellcreek->led_sync_good);
- led_classdev_register(hellcreek->dev, &hellcreek->led_is_gm);
+ ret = led_classdev_register(hellcreek->dev, &hellcreek->led_sync_good);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to register sync_good LED\n");
+ goto out;
+ }
+
+ ret = led_classdev_register(hellcreek->dev, &hellcreek->led_is_gm);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to register is_gm LED\n");
+ led_classdev_unregister(&hellcreek->led_sync_good);
+ goto out;
+ }
ret = 0;
diff --git a/drivers/net/dsa/ks8995.c b/drivers/net/dsa/ks8995.c
new file mode 100644
index 000000000000..77d8b842693c
--- /dev/null
+++ b/drivers/net/dsa/ks8995.c
@@ -0,0 +1,857 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SPI driver for Micrel/Kendin KS8995M and KSZ8864RMN ethernet switches
+ *
+ * Copyright (C) 2008 Gabor Juhos <juhosg at openwrt.org>
+ * Copyright (C) 2025 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * This file was based on: drivers/spi/at25.c
+ * Copyright (C) 2006 David Brownell
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bits.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/spi/spi.h>
+#include <net/dsa.h>
+
+#define DRV_VERSION "0.1.1"
+#define DRV_DESC "Micrel KS8995 Ethernet switch SPI driver"
+
+/* ------------------------------------------------------------------------ */
+
+#define KS8995_REG_ID0 0x00 /* Chip ID0 */
+#define KS8995_REG_ID1 0x01 /* Chip ID1 */
+
+#define KS8995_REG_GC0 0x02 /* Global Control 0 */
+
+#define KS8995_GC0_P5_PHY BIT(3) /* Port 5 PHY enabled */
+
+#define KS8995_REG_GC1 0x03 /* Global Control 1 */
+#define KS8995_REG_GC2 0x04 /* Global Control 2 */
+
+#define KS8995_GC2_HUGE BIT(2) /* Huge packet support */
+#define KS8995_GC2_LEGAL BIT(1) /* Legal size override */
+
+#define KS8995_REG_GC3 0x05 /* Global Control 3 */
+#define KS8995_REG_GC4 0x06 /* Global Control 4 */
+
+#define KS8995_GC4_10BT BIT(4) /* Force switch to 10Mbit */
+#define KS8995_GC4_MII_FLOW BIT(5) /* MII full-duplex flow control enable */
+#define KS8995_GC4_MII_HD BIT(6) /* MII half-duplex mode enable */
+
+#define KS8995_REG_GC5 0x07 /* Global Control 5 */
+#define KS8995_REG_GC6 0x08 /* Global Control 6 */
+#define KS8995_REG_GC7 0x09 /* Global Control 7 */
+#define KS8995_REG_GC8 0x0a /* Global Control 8 */
+#define KS8995_REG_GC9 0x0b /* Global Control 9 */
+
+#define KS8995_GC9_SPECIAL BIT(0) /* Special tagging mode (DSA) */
+
+/* In DSA the ports 1-4 are numbered 0-3 and the CPU port is port 4 */
+#define KS8995_REG_PC(p, r) (0x10 + (0x10 * (p)) + (r)) /* Port Control */
+#define KS8995_REG_PS(p, r) (0x1e + (0x10 * (p)) + (r)) /* Port Status */
+
+#define KS8995_REG_PC0 0x00 /* Port Control 0 */
+#define KS8995_REG_PC1 0x01 /* Port Control 1 */
+#define KS8995_REG_PC2 0x02 /* Port Control 2 */
+#define KS8995_REG_PC3 0x03 /* Port Control 3 */
+#define KS8995_REG_PC4 0x04 /* Port Control 4 */
+#define KS8995_REG_PC5 0x05 /* Port Control 5 */
+#define KS8995_REG_PC6 0x06 /* Port Control 6 */
+#define KS8995_REG_PC7 0x07 /* Port Control 7 */
+#define KS8995_REG_PC8 0x08 /* Port Control 8 */
+#define KS8995_REG_PC9 0x09 /* Port Control 9 */
+#define KS8995_REG_PC10 0x0a /* Port Control 10 */
+#define KS8995_REG_PC11 0x0b /* Port Control 11 */
+#define KS8995_REG_PC12 0x0c /* Port Control 12 */
+#define KS8995_REG_PC13 0x0d /* Port Control 13 */
+
+#define KS8995_PC0_TAG_INS BIT(2) /* Enable tag insertion on port */
+#define KS8995_PC0_TAG_REM BIT(1) /* Enable tag removal on port */
+#define KS8995_PC0_PRIO_EN BIT(0) /* Enable priority handling */
+
+#define KS8995_PC2_TXEN BIT(2) /* Enable TX on port */
+#define KS8995_PC2_RXEN BIT(1) /* Enable RX on port */
+#define KS8995_PC2_LEARN_DIS BIT(0) /* Disable learning on port */
+
+#define KS8995_PC13_TXDIS BIT(6) /* Disable transmitter */
+#define KS8995_PC13_PWDN BIT(3) /* Power down */
+
+#define KS8995_REG_TPC0 0x60 /* TOS Priority Control 0 */
+#define KS8995_REG_TPC1 0x61 /* TOS Priority Control 1 */
+#define KS8995_REG_TPC2 0x62 /* TOS Priority Control 2 */
+#define KS8995_REG_TPC3 0x63 /* TOS Priority Control 3 */
+#define KS8995_REG_TPC4 0x64 /* TOS Priority Control 4 */
+#define KS8995_REG_TPC5 0x65 /* TOS Priority Control 5 */
+#define KS8995_REG_TPC6 0x66 /* TOS Priority Control 6 */
+#define KS8995_REG_TPC7 0x67 /* TOS Priority Control 7 */
+
+#define KS8995_REG_MAC0 0x68 /* MAC address 0 */
+#define KS8995_REG_MAC1 0x69 /* MAC address 1 */
+#define KS8995_REG_MAC2 0x6a /* MAC address 2 */
+#define KS8995_REG_MAC3 0x6b /* MAC address 3 */
+#define KS8995_REG_MAC4 0x6c /* MAC address 4 */
+#define KS8995_REG_MAC5 0x6d /* MAC address 5 */
+
+#define KS8995_REG_IAC0 0x6e /* Indirect Access Control 0 */
+#define KS8995_REG_IAC1 0x6f /* Indirect Access Control 0 */
+#define KS8995_REG_IAD7 0x70 /* Indirect Access Data 7 */
+#define KS8995_REG_IAD6 0x71 /* Indirect Access Data 6 */
+#define KS8995_REG_IAD5 0x72 /* Indirect Access Data 5 */
+#define KS8995_REG_IAD4 0x73 /* Indirect Access Data 4 */
+#define KS8995_REG_IAD3 0x74 /* Indirect Access Data 3 */
+#define KS8995_REG_IAD2 0x75 /* Indirect Access Data 2 */
+#define KS8995_REG_IAD1 0x76 /* Indirect Access Data 1 */
+#define KS8995_REG_IAD0 0x77 /* Indirect Access Data 0 */
+
+#define KSZ8864_REG_ID1 0xfe /* Chip ID in bit 7 */
+
+#define KS8995_REGS_SIZE 0x80
+#define KSZ8864_REGS_SIZE 0x100
+#define KSZ8795_REGS_SIZE 0x100
+
+#define ID1_CHIPID_M 0xf
+#define ID1_CHIPID_S 4
+#define ID1_REVISION_M 0x7
+#define ID1_REVISION_S 1
+#define ID1_START_SW 1 /* start the switch */
+
+#define FAMILY_KS8995 0x95
+#define FAMILY_KSZ8795 0x87
+#define CHIPID_M 0
+#define KS8995_CHIP_ID 0x00
+#define KSZ8864_CHIP_ID 0x01
+#define KSZ8795_CHIP_ID 0x09
+
+#define KS8995_CMD_WRITE 0x02U
+#define KS8995_CMD_READ 0x03U
+
+#define KS8995_CPU_PORT 4
+#define KS8995_NUM_PORTS 5 /* 5 ports including the CPU port */
+#define KS8995_RESET_DELAY 10 /* usec */
+
+enum ks8995_chip_variant {
+ ks8995,
+ ksz8864,
+ ksz8795,
+ max_variant
+};
+
+struct ks8995_chip_params {
+ char *name;
+ int family_id;
+ int chip_id;
+ int regs_size;
+ int addr_width;
+ int addr_shift;
+};
+
+static const struct ks8995_chip_params ks8995_chip[] = {
+ [ks8995] = {
+ .name = "KS8995MA",
+ .family_id = FAMILY_KS8995,
+ .chip_id = KS8995_CHIP_ID,
+ .regs_size = KS8995_REGS_SIZE,
+ .addr_width = 8,
+ .addr_shift = 0,
+ },
+ [ksz8864] = {
+ .name = "KSZ8864RMN",
+ .family_id = FAMILY_KS8995,
+ .chip_id = KSZ8864_CHIP_ID,
+ .regs_size = KSZ8864_REGS_SIZE,
+ .addr_width = 8,
+ .addr_shift = 0,
+ },
+ [ksz8795] = {
+ .name = "KSZ8795CLX",
+ .family_id = FAMILY_KSZ8795,
+ .chip_id = KSZ8795_CHIP_ID,
+ .regs_size = KSZ8795_REGS_SIZE,
+ .addr_width = 12,
+ .addr_shift = 1,
+ },
+};
+
+struct ks8995_switch {
+ struct spi_device *spi;
+ struct device *dev;
+ struct dsa_switch *ds;
+ struct mutex lock;
+ struct gpio_desc *reset_gpio;
+ struct bin_attribute regs_attr;
+ const struct ks8995_chip_params *chip;
+ int revision_id;
+ unsigned int max_mtu[KS8995_NUM_PORTS];
+};
+
+static const struct spi_device_id ks8995_id[] = {
+ {"ks8995", ks8995},
+ {"ksz8864", ksz8864},
+ {"ksz8795", ksz8795},
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ks8995_id);
+
+static const struct of_device_id ks8995_spi_of_match[] = {
+ { .compatible = "micrel,ks8995" },
+ { .compatible = "micrel,ksz8864" },
+ { .compatible = "micrel,ksz8795" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ks8995_spi_of_match);
+
+static inline u8 get_chip_id(u8 val)
+{
+ return (val >> ID1_CHIPID_S) & ID1_CHIPID_M;
+}
+
+static inline u8 get_chip_rev(u8 val)
+{
+ return (val >> ID1_REVISION_S) & ID1_REVISION_M;
+}
+
+/* create_spi_cmd - create a chip specific SPI command header
+ * @ks: pointer to switch instance
+ * @cmd: SPI command for switch
+ * @address: register address for command
+ *
+ * Different chip families use different bit pattern to address the switches
+ * registers:
+ *
+ * KS8995: 8bit command + 8bit address
+ * KSZ8795: 3bit command + 12bit address + 1bit TR (?)
+ */
+static inline __be16 create_spi_cmd(struct ks8995_switch *ks, int cmd,
+ unsigned address)
+{
+ u16 result = cmd;
+
+ /* make room for address (incl. address shift) */
+ result <<= ks->chip->addr_width + ks->chip->addr_shift;
+ /* add address */
+ result |= address << ks->chip->addr_shift;
+ /* SPI protocol needs big endian */
+ return cpu_to_be16(result);
+}
+/* ------------------------------------------------------------------------ */
+static int ks8995_read(struct ks8995_switch *ks, char *buf,
+ unsigned offset, size_t count)
+{
+ __be16 cmd;
+ struct spi_transfer t[2];
+ struct spi_message m;
+ int err;
+
+ cmd = create_spi_cmd(ks, KS8995_CMD_READ, offset);
+ spi_message_init(&m);
+
+ memset(&t, 0, sizeof(t));
+
+ t[0].tx_buf = &cmd;
+ t[0].len = sizeof(cmd);
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].rx_buf = buf;
+ t[1].len = count;
+ spi_message_add_tail(&t[1], &m);
+
+ mutex_lock(&ks->lock);
+ err = spi_sync(ks->spi, &m);
+ mutex_unlock(&ks->lock);
+
+ return err ? err : count;
+}
+
+static int ks8995_write(struct ks8995_switch *ks, char *buf,
+ unsigned offset, size_t count)
+{
+ __be16 cmd;
+ struct spi_transfer t[2];
+ struct spi_message m;
+ int err;
+
+ cmd = create_spi_cmd(ks, KS8995_CMD_WRITE, offset);
+ spi_message_init(&m);
+
+ memset(&t, 0, sizeof(t));
+
+ t[0].tx_buf = &cmd;
+ t[0].len = sizeof(cmd);
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].tx_buf = buf;
+ t[1].len = count;
+ spi_message_add_tail(&t[1], &m);
+
+ mutex_lock(&ks->lock);
+ err = spi_sync(ks->spi, &m);
+ mutex_unlock(&ks->lock);
+
+ return err ? err : count;
+}
+
+static inline int ks8995_read_reg(struct ks8995_switch *ks, u8 addr, u8 *buf)
+{
+ return ks8995_read(ks, buf, addr, 1) != 1;
+}
+
+static inline int ks8995_write_reg(struct ks8995_switch *ks, u8 addr, u8 val)
+{
+ char buf = val;
+
+ return ks8995_write(ks, &buf, addr, 1) != 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int ks8995_stop(struct ks8995_switch *ks)
+{
+ return ks8995_write_reg(ks, KS8995_REG_ID1, 0);
+}
+
+static int ks8995_start(struct ks8995_switch *ks)
+{
+ return ks8995_write_reg(ks, KS8995_REG_ID1, 1);
+}
+
+static int ks8995_reset(struct ks8995_switch *ks)
+{
+ int err;
+
+ err = ks8995_stop(ks);
+ if (err)
+ return err;
+
+ udelay(KS8995_RESET_DELAY);
+
+ return ks8995_start(ks);
+}
+
+/* ks8995_get_revision - get chip revision
+ * @ks: pointer to switch instance
+ *
+ * Verify chip family and id and get chip revision.
+ */
+static int ks8995_get_revision(struct ks8995_switch *ks)
+{
+ int err;
+ u8 id0, id1, ksz8864_id;
+
+ /* read family id */
+ err = ks8995_read_reg(ks, KS8995_REG_ID0, &id0);
+ if (err) {
+ err = -EIO;
+ goto err_out;
+ }
+
+ /* verify family id */
+ if (id0 != ks->chip->family_id) {
+ dev_err(&ks->spi->dev, "chip family id mismatch: expected 0x%02x but 0x%02x read\n",
+ ks->chip->family_id, id0);
+ err = -ENODEV;
+ goto err_out;
+ }
+
+ switch (ks->chip->family_id) {
+ case FAMILY_KS8995:
+ /* try reading chip id at CHIP ID1 */
+ err = ks8995_read_reg(ks, KS8995_REG_ID1, &id1);
+ if (err) {
+ err = -EIO;
+ goto err_out;
+ }
+
+ /* verify chip id */
+ if ((get_chip_id(id1) == CHIPID_M) &&
+ (get_chip_id(id1) == ks->chip->chip_id)) {
+ /* KS8995MA */
+ ks->revision_id = get_chip_rev(id1);
+ } else if (get_chip_id(id1) != CHIPID_M) {
+ /* KSZ8864RMN */
+ err = ks8995_read_reg(ks, KS8995_REG_ID1, &ksz8864_id);
+ if (err) {
+ err = -EIO;
+ goto err_out;
+ }
+
+ if ((ksz8864_id & 0x80) &&
+ (ks->chip->chip_id == KSZ8864_CHIP_ID)) {
+ ks->revision_id = get_chip_rev(id1);
+ }
+
+ } else {
+ dev_err(&ks->spi->dev, "unsupported chip id for KS8995 family: 0x%02x\n",
+ id1);
+ err = -ENODEV;
+ }
+ break;
+ case FAMILY_KSZ8795:
+ /* try reading chip id at CHIP ID1 */
+ err = ks8995_read_reg(ks, KS8995_REG_ID1, &id1);
+ if (err) {
+ err = -EIO;
+ goto err_out;
+ }
+
+ if (get_chip_id(id1) == ks->chip->chip_id) {
+ ks->revision_id = get_chip_rev(id1);
+ } else {
+ dev_err(&ks->spi->dev, "unsupported chip id for KSZ8795 family: 0x%02x\n",
+ id1);
+ err = -ENODEV;
+ }
+ break;
+ default:
+ dev_err(&ks->spi->dev, "unsupported family id: 0x%02x\n", id0);
+ err = -ENODEV;
+ break;
+ }
+err_out:
+ return err;
+}
+
+static int ks8995_check_config(struct ks8995_switch *ks)
+{
+ int ret;
+ u8 val;
+
+ ret = ks8995_read_reg(ks, KS8995_REG_GC0, &val);
+ if (ret) {
+ dev_err(ks->dev, "failed to read KS8995_REG_GC0\n");
+ return ret;
+ }
+
+ dev_dbg(ks->dev, "port 5 PHY %senabled\n",
+ (val & KS8995_GC0_P5_PHY) ? "" : "not ");
+
+ val |= KS8995_GC0_P5_PHY;
+ ret = ks8995_write_reg(ks, KS8995_REG_GC0, val);
+ if (ret)
+ dev_err(ks->dev, "failed to set KS8995_REG_GC0\n");
+
+ dev_dbg(ks->dev, "set KS8995_REG_GC0 to 0x%02x\n", val);
+
+ return 0;
+}
+
+static void
+ks8995_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void
+ks8995_mac_link_up(struct phylink_config *config, struct phy_device *phydev,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ks8995_switch *ks = dp->ds->priv;
+ int port = dp->index;
+ int ret;
+ u8 val;
+
+ /* Allow forcing the mode on the fixed CPU port, no autonegotiation.
+ * We assume autonegotiation works on the PHY-facing ports.
+ */
+ if (port != KS8995_CPU_PORT)
+ return;
+
+ dev_dbg(ks->dev, "MAC link up on CPU port (%d)\n", port);
+
+ ret = ks8995_read_reg(ks, KS8995_REG_GC4, &val);
+ if (ret) {
+ dev_err(ks->dev, "failed to read KS8995_REG_GC4\n");
+ return;
+ }
+
+ /* Conjure port config */
+ switch (speed) {
+ case SPEED_10:
+ dev_dbg(ks->dev, "set switch MII to 100Mbit mode\n");
+ val |= KS8995_GC4_10BT;
+ break;
+ case SPEED_100:
+ default:
+ dev_dbg(ks->dev, "set switch MII to 100Mbit mode\n");
+ val &= ~KS8995_GC4_10BT;
+ break;
+ }
+
+ if (duplex == DUPLEX_HALF) {
+ dev_dbg(ks->dev, "set switch MII to half duplex\n");
+ val |= KS8995_GC4_MII_HD;
+ } else {
+ dev_dbg(ks->dev, "set switch MII to full duplex\n");
+ val &= ~KS8995_GC4_MII_HD;
+ }
+
+ dev_dbg(ks->dev, "set KS8995_REG_GC4 to %02x\n", val);
+
+ /* Enable the CPU port */
+ ret = ks8995_write_reg(ks, KS8995_REG_GC4, val);
+ if (ret)
+ dev_err(ks->dev, "failed to set KS8995_REG_GC4\n");
+}
+
+static void
+ks8995_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ks8995_switch *ks = dp->ds->priv;
+ int port = dp->index;
+
+ if (port != KS8995_CPU_PORT)
+ return;
+
+ dev_dbg(ks->dev, "MAC link down on CPU port (%d)\n", port);
+
+ /* Disable the CPU port */
+}
+
+static const struct phylink_mac_ops ks8995_phylink_mac_ops = {
+ .mac_config = ks8995_mac_config,
+ .mac_link_up = ks8995_mac_link_up,
+ .mac_link_down = ks8995_mac_link_down,
+};
+
+static enum
+dsa_tag_protocol ks8995_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ /* This switch actually uses the 6 byte KS8995 protocol */
+ return DSA_TAG_PROTO_NONE;
+}
+
+static int ks8995_setup(struct dsa_switch *ds)
+{
+ return 0;
+}
+
+static int ks8995_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct ks8995_switch *ks = ds->priv;
+
+ dev_dbg(ks->dev, "enable port %d\n", port);
+
+ return 0;
+}
+
+static void ks8995_port_disable(struct dsa_switch *ds, int port)
+{
+ struct ks8995_switch *ks = ds->priv;
+
+ dev_dbg(ks->dev, "disable port %d\n", port);
+}
+
+static int ks8995_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ /* We support enabling/disabling learning */
+ if (flags.mask & ~(BR_LEARNING))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ks8995_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct ks8995_switch *ks = ds->priv;
+ int ret;
+ u8 val;
+
+ if (flags.mask & BR_LEARNING) {
+ ret = ks8995_read_reg(ks, KS8995_REG_PC(port, KS8995_REG_PC2), &val);
+ if (ret) {
+ dev_err(ks->dev, "failed to read KS8995_REG_PC2 on port %d\n", port);
+ return ret;
+ }
+
+ if (flags.val & BR_LEARNING)
+ val &= ~KS8995_PC2_LEARN_DIS;
+ else
+ val |= KS8995_PC2_LEARN_DIS;
+
+ ret = ks8995_write_reg(ks, KS8995_REG_PC(port, KS8995_REG_PC2), val);
+ if (ret) {
+ dev_err(ks->dev, "failed to write KS8995_REG_PC2 on port %d\n", port);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ks8995_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct ks8995_switch *ks = ds->priv;
+ int ret;
+ u8 val;
+
+ ret = ks8995_read_reg(ks, KS8995_REG_PC(port, KS8995_REG_PC2), &val);
+ if (ret) {
+ dev_err(ks->dev, "failed to read KS8995_REG_PC2 on port %d\n", port);
+ return;
+ }
+
+ /* Set the bits for the different STP states in accordance with
+ * the datasheet, pages 36-37 "Spanning tree support".
+ */
+ switch (state) {
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ val &= ~KS8995_PC2_TXEN;
+ val &= ~KS8995_PC2_RXEN;
+ val |= KS8995_PC2_LEARN_DIS;
+ break;
+ case BR_STATE_LEARNING:
+ val &= ~KS8995_PC2_TXEN;
+ val &= ~KS8995_PC2_RXEN;
+ val &= ~KS8995_PC2_LEARN_DIS;
+ break;
+ case BR_STATE_FORWARDING:
+ val |= KS8995_PC2_TXEN;
+ val |= KS8995_PC2_RXEN;
+ val &= ~KS8995_PC2_LEARN_DIS;
+ break;
+ default:
+ dev_err(ks->dev, "unknown bridge state requested\n");
+ return;
+ }
+
+ ret = ks8995_write_reg(ks, KS8995_REG_PC(port, KS8995_REG_PC2), val);
+ if (ret) {
+ dev_err(ks->dev, "failed to write KS8995_REG_PC2 on port %d\n", port);
+ return;
+ }
+
+ dev_dbg(ks->dev, "set KS8995_REG_PC2 for port %d to %02x\n", port, val);
+}
+
+static void ks8995_phylink_get_caps(struct dsa_switch *dsa, int port,
+ struct phylink_config *config)
+{
+ unsigned long *interfaces = config->supported_interfaces;
+
+ if (port == KS8995_CPU_PORT)
+ __set_bit(PHY_INTERFACE_MODE_MII, interfaces);
+
+ if (port <= 3) {
+ /* Internal PHYs */
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL, interfaces);
+ /* phylib default */
+ __set_bit(PHY_INTERFACE_MODE_MII, interfaces);
+ }
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
+}
+
+/* Huge packet support up to 1916 byte packages "inclusive"
+ * which means that tags are included. If the bit is not set
+ * it is 1536 bytes "inclusive". We present the length without
+ * tags or ethernet headers. The setting affects all ports.
+ */
+static int ks8995_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct ks8995_switch *ks = ds->priv;
+ unsigned int max_mtu;
+ int ret;
+ u8 val;
+ int i;
+
+ ks->max_mtu[port] = new_mtu;
+
+ /* Roof out the MTU for the entire switch to the greatest
+ * common denominator: the biggest set for any one port will
+ * be the biggest MTU for the switch.
+ */
+ max_mtu = ETH_DATA_LEN;
+ for (i = 0; i < KS8995_NUM_PORTS; i++) {
+ if (ks->max_mtu[i] > max_mtu)
+ max_mtu = ks->max_mtu[i];
+ }
+
+ /* Translate to layer 2 size.
+ * Add ethernet and (possible) VLAN headers, and checksum to the size.
+ * For ETH_DATA_LEN (1500 bytes) this will add up to 1522 bytes.
+ */
+ max_mtu += VLAN_ETH_HLEN;
+ max_mtu += ETH_FCS_LEN;
+
+ ret = ks8995_read_reg(ks, KS8995_REG_GC2, &val);
+ if (ret) {
+ dev_err(ks->dev, "failed to read KS8995_REG_GC2\n");
+ return ret;
+ }
+
+ if (max_mtu <= 1522) {
+ val &= ~KS8995_GC2_HUGE;
+ val &= ~KS8995_GC2_LEGAL;
+ } else if (max_mtu > 1522 && max_mtu <= 1536) {
+ /* This accepts packets up to 1536 bytes */
+ val &= ~KS8995_GC2_HUGE;
+ val |= KS8995_GC2_LEGAL;
+ } else {
+ /* This accepts packets up to 1916 bytes */
+ val |= KS8995_GC2_HUGE;
+ val |= KS8995_GC2_LEGAL;
+ }
+
+ dev_dbg(ks->dev, "new max MTU %d bytes (inclusive)\n", max_mtu);
+
+ ret = ks8995_write_reg(ks, KS8995_REG_GC2, val);
+ if (ret)
+ dev_err(ks->dev, "failed to set KS8995_REG_GC2\n");
+
+ return ret;
+}
+
+static int ks8995_get_max_mtu(struct dsa_switch *ds, int port)
+{
+ return 1916 - ETH_HLEN - ETH_FCS_LEN;
+}
+
+static const struct dsa_switch_ops ks8995_ds_ops = {
+ .get_tag_protocol = ks8995_get_tag_protocol,
+ .setup = ks8995_setup,
+ .port_pre_bridge_flags = ks8995_port_pre_bridge_flags,
+ .port_bridge_flags = ks8995_port_bridge_flags,
+ .port_enable = ks8995_port_enable,
+ .port_disable = ks8995_port_disable,
+ .port_stp_state_set = ks8995_port_stp_state_set,
+ .port_change_mtu = ks8995_change_mtu,
+ .port_max_mtu = ks8995_get_max_mtu,
+ .phylink_get_caps = ks8995_phylink_get_caps,
+};
+
+/* ------------------------------------------------------------------------ */
+static int ks8995_probe(struct spi_device *spi)
+{
+ struct ks8995_switch *ks;
+ int err;
+ int variant = spi_get_device_id(spi)->driver_data;
+
+ if (variant >= max_variant) {
+ dev_err(&spi->dev, "bad chip variant %d\n", variant);
+ return -ENODEV;
+ }
+
+ ks = devm_kzalloc(&spi->dev, sizeof(*ks), GFP_KERNEL);
+ if (!ks)
+ return -ENOMEM;
+
+ mutex_init(&ks->lock);
+ ks->spi = spi;
+ ks->dev = &spi->dev;
+ ks->chip = &ks8995_chip[variant];
+
+ ks->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
+ GPIOD_OUT_HIGH);
+ err = PTR_ERR_OR_ZERO(ks->reset_gpio);
+ if (err) {
+ dev_err(&spi->dev,
+ "failed to get reset gpio: %d\n", err);
+ return err;
+ }
+
+ err = gpiod_set_consumer_name(ks->reset_gpio, "switch-reset");
+ if (err)
+ return err;
+
+ if (ks->reset_gpio) {
+ /*
+ * If a reset line was obtained, wait for 100us after
+ * de-asserting RESET before accessing any registers, see
+ * the KS8995MA datasheet, page 44.
+ */
+ gpiod_set_value_cansleep(ks->reset_gpio, 0);
+ udelay(100);
+ }
+
+ spi_set_drvdata(spi, ks);
+
+ spi->mode = SPI_MODE_0;
+ spi->bits_per_word = 8;
+ err = spi_setup(spi);
+ if (err) {
+ dev_err(&spi->dev, "spi_setup failed, err=%d\n", err);
+ return err;
+ }
+
+ err = ks8995_get_revision(ks);
+ if (err)
+ return err;
+
+ err = ks8995_reset(ks);
+ if (err)
+ return err;
+
+ dev_info(&spi->dev, "%s device found, Chip ID:%x, Revision:%x\n",
+ ks->chip->name, ks->chip->chip_id, ks->revision_id);
+
+ err = ks8995_check_config(ks);
+ if (err)
+ return err;
+
+ ks->ds = devm_kzalloc(&spi->dev, sizeof(*ks->ds), GFP_KERNEL);
+ if (!ks->ds)
+ return -ENOMEM;
+
+ ks->ds->dev = &spi->dev;
+ ks->ds->num_ports = KS8995_NUM_PORTS;
+ ks->ds->ops = &ks8995_ds_ops;
+ ks->ds->phylink_mac_ops = &ks8995_phylink_mac_ops;
+ ks->ds->priv = ks;
+
+ err = dsa_register_switch(ks->ds);
+ if (err)
+ return dev_err_probe(&spi->dev, err,
+ "unable to register DSA switch\n");
+
+ return 0;
+}
+
+static void ks8995_remove(struct spi_device *spi)
+{
+ struct ks8995_switch *ks = spi_get_drvdata(spi);
+
+ dsa_unregister_switch(ks->ds);
+ /* assert reset */
+ gpiod_set_value_cansleep(ks->reset_gpio, 1);
+}
+
+/* ------------------------------------------------------------------------ */
+static struct spi_driver ks8995_driver = {
+ .driver = {
+ .name = "spi-ks8995",
+ .of_match_table = ks8995_spi_of_match,
+ },
+ .probe = ks8995_probe,
+ .remove = ks8995_remove,
+ .id_table = ks8995_id,
+};
+
+module_spi_driver(ks8995_driver);
+
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/lantiq/Kconfig b/drivers/net/dsa/lantiq/Kconfig
new file mode 100644
index 000000000000..4a9771be5d58
--- /dev/null
+++ b/drivers/net/dsa/lantiq/Kconfig
@@ -0,0 +1,24 @@
+config NET_DSA_LANTIQ_COMMON
+ tristate
+ select REGMAP
+
+config NET_DSA_LANTIQ_GSWIP
+ tristate "Lantiq / Intel GSWIP"
+ depends on HAS_IOMEM
+ select NET_DSA_TAG_GSWIP
+ select NET_DSA_LANTIQ_COMMON
+ help
+ This enables support for the Lantiq / Intel GSWIP 2.1 found in
+ the xrx200 / VR9 SoC.
+
+config NET_DSA_MXL_GSW1XX
+ tristate "MaxLinear GSW1xx Ethernet switch support"
+ select NET_DSA_TAG_MXL_GSW1XX
+ select NET_DSA_LANTIQ_COMMON
+ help
+ This enables support for the MaxLinear GSW1xx family of 1GE switches
+ GSW120 4 port, 2 PHYs, RGMII & SGMII/2500Base-X
+ GSW125 4 port, 2 PHYs, RGMII & SGMII/2500Base-X, industrial temperature
+ GSW140 6 port, 4 PHYs, RGMII & SGMII/2500Base-X
+ GSW141 6 port, 4 PHYs, RGMII & SGMII
+ GSW145 6 port, 4 PHYs, RGMII & SGMII/2500Base-X, industrial temperature
diff --git a/drivers/net/dsa/lantiq/Makefile b/drivers/net/dsa/lantiq/Makefile
new file mode 100644
index 000000000000..85fce605310b
--- /dev/null
+++ b/drivers/net/dsa/lantiq/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o
+obj-$(CONFIG_NET_DSA_LANTIQ_COMMON) += lantiq_gswip_common.o
+obj-$(CONFIG_NET_DSA_MXL_GSW1XX) += mxl-gsw1xx.o
diff --git a/drivers/net/dsa/lantiq/lantiq_gswip.c b/drivers/net/dsa/lantiq/lantiq_gswip.c
new file mode 100644
index 000000000000..57dd063c0740
--- /dev/null
+++ b/drivers/net/dsa/lantiq/lantiq_gswip.c
@@ -0,0 +1,518 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Lantiq / Intel GSWIP switch driver for VRX200, xRX300 and xRX330 SoCs
+ *
+ * Copyright (C) 2025 Daniel Golle <daniel@makrotopia.org>
+ * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ * Copyright (C) 2010 Lantiq Deutschland
+ */
+
+#include "lantiq_gswip.h"
+#include "lantiq_pce.h"
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <dt-bindings/mips/lantiq_rcu_gphy.h>
+
+#include <net/dsa.h>
+
+struct xway_gphy_match_data {
+ char *fe_firmware_name;
+ char *ge_firmware_name;
+};
+
+static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ switch (port) {
+ case 0:
+ case 1:
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_REVMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+ break;
+
+ case 2:
+ case 3:
+ case 4:
+ case 6:
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ break;
+
+ case 5:
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ break;
+ }
+
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
+}
+
+static void gswip_xrx300_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ switch (port) {
+ case 0:
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+ break;
+
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 6:
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ break;
+
+ case 5:
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+ break;
+ }
+
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
+}
+
+static const struct xway_gphy_match_data xrx200a1x_gphy_data = {
+ .fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin",
+ .ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin",
+};
+
+static const struct xway_gphy_match_data xrx200a2x_gphy_data = {
+ .fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin",
+ .ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin",
+};
+
+static const struct xway_gphy_match_data xrx300_gphy_data = {
+ .fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin",
+ .ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin",
+};
+
+static const struct of_device_id xway_gphy_match[] __maybe_unused = {
+ { .compatible = "lantiq,xrx200-gphy-fw", .data = NULL },
+ { .compatible = "lantiq,xrx200a1x-gphy-fw", .data = &xrx200a1x_gphy_data },
+ { .compatible = "lantiq,xrx200a2x-gphy-fw", .data = &xrx200a2x_gphy_data },
+ { .compatible = "lantiq,xrx300-gphy-fw", .data = &xrx300_gphy_data },
+ { .compatible = "lantiq,xrx330-gphy-fw", .data = &xrx300_gphy_data },
+ {},
+};
+
+static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gphy_fw)
+{
+ struct device *dev = priv->dev;
+ const struct firmware *fw;
+ void *fw_addr;
+ dma_addr_t dma_addr;
+ dma_addr_t dev_addr;
+ size_t size;
+ int ret;
+
+ ret = clk_prepare_enable(gphy_fw->clk_gate);
+ if (ret)
+ return ret;
+
+ reset_control_assert(gphy_fw->reset);
+
+ /* The vendor BSP uses a 200ms delay after asserting the reset line.
+ * Without this some users are observing that the PHY is not coming up
+ * on the MDIO bus.
+ */
+ msleep(200);
+
+ ret = request_firmware(&fw, gphy_fw->fw_name, dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to load firmware: %s\n",
+ gphy_fw->fw_name);
+
+ /* GPHY cores need the firmware code in a persistent and contiguous
+ * memory area with a 16 kB boundary aligned start address.
+ */
+ size = fw->size + XRX200_GPHY_FW_ALIGN;
+
+ fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
+ if (fw_addr) {
+ fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN);
+ dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN);
+ memcpy(fw_addr, fw->data, fw->size);
+ } else {
+ release_firmware(fw);
+ return -ENOMEM;
+ }
+
+ release_firmware(fw);
+
+ ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, dev_addr);
+ if (ret)
+ return ret;
+
+ reset_control_deassert(gphy_fw->reset);
+
+ return ret;
+}
+
+static int gswip_gphy_fw_probe(struct gswip_priv *priv,
+ struct gswip_gphy_fw *gphy_fw,
+ struct device_node *gphy_fw_np, int i)
+{
+ struct device *dev = priv->dev;
+ u32 gphy_mode;
+ int ret;
+ char gphyname[10];
+
+ snprintf(gphyname, sizeof(gphyname), "gphy%d", i);
+
+ gphy_fw->clk_gate = devm_clk_get(dev, gphyname);
+ if (IS_ERR(gphy_fw->clk_gate)) {
+ return dev_err_probe(dev, PTR_ERR(gphy_fw->clk_gate),
+ "Failed to lookup gate clock\n");
+ }
+
+ ret = of_property_read_u32(gphy_fw_np, "reg", &gphy_fw->fw_addr_offset);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(gphy_fw_np, "lantiq,gphy-mode", &gphy_mode);
+ /* Default to GE mode */
+ if (ret)
+ gphy_mode = GPHY_MODE_GE;
+
+ switch (gphy_mode) {
+ case GPHY_MODE_FE:
+ gphy_fw->fw_name = priv->gphy_fw_name_cfg->fe_firmware_name;
+ break;
+ case GPHY_MODE_GE:
+ gphy_fw->fw_name = priv->gphy_fw_name_cfg->ge_firmware_name;
+ break;
+ default:
+ return dev_err_probe(dev, -EINVAL, "Unknown GPHY mode %d\n",
+ gphy_mode);
+ }
+
+ gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np);
+ if (IS_ERR(gphy_fw->reset))
+ return dev_err_probe(dev, PTR_ERR(gphy_fw->reset),
+ "Failed to lookup gphy reset\n");
+
+ return gswip_gphy_fw_load(priv, gphy_fw);
+}
+
+static void gswip_gphy_fw_remove(struct gswip_priv *priv,
+ struct gswip_gphy_fw *gphy_fw)
+{
+ int ret;
+
+ /* check if the device was fully probed */
+ if (!gphy_fw->fw_name)
+ return;
+
+ ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, 0);
+ if (ret)
+ dev_err(priv->dev, "can not reset GPHY FW pointer\n");
+
+ clk_disable_unprepare(gphy_fw->clk_gate);
+
+ reset_control_put(gphy_fw->reset);
+}
+
+static int gswip_gphy_fw_list(struct gswip_priv *priv,
+ struct device_node *gphy_fw_list_np, u32 version)
+{
+ struct device *dev = priv->dev;
+ struct device_node *gphy_fw_np;
+ const struct of_device_id *match;
+ int err;
+ int i = 0;
+
+ /* The VRX200 rev 1.1 uses the GSWIP 2.0 and needs the older
+ * GPHY firmware. The VRX200 rev 1.2 uses the GSWIP 2.1 and also
+ * needs a different GPHY firmware.
+ */
+ if (of_device_is_compatible(gphy_fw_list_np, "lantiq,xrx200-gphy-fw")) {
+ switch (version) {
+ case GSWIP_VERSION_2_0:
+ priv->gphy_fw_name_cfg = &xrx200a1x_gphy_data;
+ break;
+ case GSWIP_VERSION_2_1:
+ priv->gphy_fw_name_cfg = &xrx200a2x_gphy_data;
+ break;
+ default:
+ return dev_err_probe(dev, -ENOENT,
+ "unknown GSWIP version: 0x%x\n",
+ version);
+ }
+ }
+
+ match = of_match_node(xway_gphy_match, gphy_fw_list_np);
+ if (match && match->data)
+ priv->gphy_fw_name_cfg = match->data;
+
+ if (!priv->gphy_fw_name_cfg)
+ return dev_err_probe(dev, -ENOENT,
+ "GPHY compatible type not supported\n");
+
+ priv->num_gphy_fw = of_get_available_child_count(gphy_fw_list_np);
+ if (!priv->num_gphy_fw)
+ return -ENOENT;
+
+ priv->rcu_regmap = syscon_regmap_lookup_by_phandle(gphy_fw_list_np,
+ "lantiq,rcu");
+ if (IS_ERR(priv->rcu_regmap))
+ return PTR_ERR(priv->rcu_regmap);
+
+ priv->gphy_fw = devm_kmalloc_array(dev, priv->num_gphy_fw,
+ sizeof(*priv->gphy_fw),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!priv->gphy_fw)
+ return -ENOMEM;
+
+ for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) {
+ err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i],
+ gphy_fw_np, i);
+ if (err) {
+ of_node_put(gphy_fw_np);
+ goto remove_gphy;
+ }
+ i++;
+ }
+
+ /* The standalone PHY11G requires 300ms to be fully
+ * initialized and ready for any MDIO communication after being
+ * taken out of reset. For the SoC-internal GPHY variant there
+ * is no (known) documentation for the minimum time after a
+ * reset. Use the same value as for the standalone variant as
+ * some users have reported internal PHYs not being detected
+ * without any delay.
+ */
+ msleep(300);
+
+ return 0;
+
+remove_gphy:
+ for (i = 0; i < priv->num_gphy_fw; i++)
+ gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
+ return err;
+}
+
+static const struct regmap_config sw_regmap_config = {
+ .name = "switch",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_shift = REGMAP_UPSHIFT(2),
+ .val_format_endian = REGMAP_ENDIAN_NATIVE,
+ .max_register = GSWIP_SDMA_PCTRLp(6),
+};
+
+static const struct regmap_config mdio_regmap_config = {
+ .name = "mdio",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_shift = REGMAP_UPSHIFT(2),
+ .val_format_endian = REGMAP_ENDIAN_NATIVE,
+ .max_register = GSWIP_MDIO_PHYp(0),
+};
+
+static const struct regmap_config mii_regmap_config = {
+ .name = "mii",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_shift = REGMAP_UPSHIFT(2),
+ .val_format_endian = REGMAP_ENDIAN_NATIVE,
+ .max_register = GSWIP_MII_CFGp(6),
+};
+
+static int gswip_probe(struct platform_device *pdev)
+{
+ struct device_node *np, *gphy_fw_np;
+ __iomem void *gswip, *mdio, *mii;
+ struct device *dev = &pdev->dev;
+ struct gswip_priv *priv;
+ int err;
+ int i;
+ u32 version;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ gswip = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(gswip))
+ return PTR_ERR(gswip);
+
+ mdio = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(mdio))
+ return PTR_ERR(mdio);
+
+ mii = devm_platform_ioremap_resource(pdev, 2);
+ if (IS_ERR(mii))
+ return PTR_ERR(mii);
+
+ priv->gswip = devm_regmap_init_mmio(dev, gswip, &sw_regmap_config);
+ if (IS_ERR(priv->gswip))
+ return PTR_ERR(priv->gswip);
+
+ priv->mdio = devm_regmap_init_mmio(dev, mdio, &mdio_regmap_config);
+ if (IS_ERR(priv->mdio))
+ return PTR_ERR(priv->mdio);
+
+ priv->mii = devm_regmap_init_mmio(dev, mii, &mii_regmap_config);
+ if (IS_ERR(priv->mii))
+ return PTR_ERR(priv->mii);
+
+ priv->hw_info = of_device_get_match_data(dev);
+ if (!priv->hw_info)
+ return -EINVAL;
+
+ priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
+ if (!priv->ds)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ regmap_read(priv->gswip, GSWIP_VERSION, &version);
+
+ np = dev->of_node;
+ switch (version) {
+ case GSWIP_VERSION_2_0:
+ case GSWIP_VERSION_2_1:
+ if (!of_device_is_compatible(np, "lantiq,xrx200-gswip"))
+ return -EINVAL;
+ break;
+ case GSWIP_VERSION_2_2:
+ case GSWIP_VERSION_2_2_ETC:
+ if (!of_device_is_compatible(np, "lantiq,xrx300-gswip") &&
+ !of_device_is_compatible(np, "lantiq,xrx330-gswip"))
+ return -EINVAL;
+ break;
+ default:
+ return dev_err_probe(dev, -ENOENT,
+ "unknown GSWIP version: 0x%x\n", version);
+ }
+
+ /* bring up the mdio bus */
+ gphy_fw_np = of_get_compatible_child(dev->of_node, "lantiq,gphy-fw");
+ if (gphy_fw_np) {
+ err = gswip_gphy_fw_list(priv, gphy_fw_np, version);
+ of_node_put(gphy_fw_np);
+ if (err)
+ return dev_err_probe(dev, err,
+ "gphy fw probe failed\n");
+ }
+
+ err = gswip_probe_common(priv, version);
+ if (err)
+ goto gphy_fw_remove;
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+
+gphy_fw_remove:
+ for (i = 0; i < priv->num_gphy_fw; i++)
+ gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
+ return err;
+}
+
+static void gswip_remove(struct platform_device *pdev)
+{
+ struct gswip_priv *priv = platform_get_drvdata(pdev);
+ int i;
+
+ if (!priv)
+ return;
+
+ /* disable the switch */
+ gswip_disable_switch(priv);
+
+ dsa_unregister_switch(priv->ds);
+
+ for (i = 0; i < priv->num_gphy_fw; i++)
+ gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
+}
+
+static void gswip_shutdown(struct platform_device *pdev)
+{
+ struct gswip_priv *priv = platform_get_drvdata(pdev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static const struct gswip_hw_info gswip_xrx200 = {
+ .max_ports = 7,
+ .allowed_cpu_ports = BIT(6),
+ .mii_ports = BIT(0) | BIT(1) | BIT(5),
+ .mii_port_reg_offset = 0,
+ .phylink_get_caps = gswip_xrx200_phylink_get_caps,
+ .pce_microcode = &gswip_pce_microcode,
+ .pce_microcode_size = ARRAY_SIZE(gswip_pce_microcode),
+ .tag_protocol = DSA_TAG_PROTO_GSWIP,
+};
+
+static const struct gswip_hw_info gswip_xrx300 = {
+ .max_ports = 7,
+ .allowed_cpu_ports = BIT(6),
+ .mii_ports = BIT(0) | BIT(5),
+ .mii_port_reg_offset = 0,
+ .phylink_get_caps = gswip_xrx300_phylink_get_caps,
+ .pce_microcode = &gswip_pce_microcode,
+ .pce_microcode_size = ARRAY_SIZE(gswip_pce_microcode),
+ .tag_protocol = DSA_TAG_PROTO_GSWIP,
+};
+
+static const struct of_device_id gswip_of_match[] = {
+ { .compatible = "lantiq,xrx200-gswip", .data = &gswip_xrx200 },
+ { .compatible = "lantiq,xrx300-gswip", .data = &gswip_xrx300 },
+ { .compatible = "lantiq,xrx330-gswip", .data = &gswip_xrx300 },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gswip_of_match);
+
+static struct platform_driver gswip_driver = {
+ .probe = gswip_probe,
+ .remove = gswip_remove,
+ .shutdown = gswip_shutdown,
+ .driver = {
+ .name = "gswip",
+ .of_match_table = gswip_of_match,
+ },
+};
+
+module_platform_driver(gswip_driver);
+
+MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin");
+MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin");
+MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
+MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/lantiq/lantiq_gswip.h b/drivers/net/dsa/lantiq/lantiq_gswip.h
new file mode 100644
index 000000000000..9c38e51a75e8
--- /dev/null
+++ b/drivers/net/dsa/lantiq/lantiq_gswip.h
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __LANTIQ_GSWIP_H
+#define __LANTIQ_GSWIP_H
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/mutex.h>
+#include <linux/phylink.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/swab.h>
+#include <net/dsa.h>
+
+/* GSWIP MDIO Registers */
+#define GSWIP_MDIO_GLOB 0x00
+#define GSWIP_MDIO_GLOB_ENABLE BIT(15)
+#define GSWIP_MDIO_CTRL 0x08
+#define GSWIP_MDIO_CTRL_BUSY BIT(12)
+#define GSWIP_MDIO_CTRL_RD BIT(11)
+#define GSWIP_MDIO_CTRL_WR BIT(10)
+#define GSWIP_MDIO_CTRL_PHYAD_MASK 0x1f
+#define GSWIP_MDIO_CTRL_PHYAD_SHIFT 5
+#define GSWIP_MDIO_CTRL_REGAD_MASK 0x1f
+#define GSWIP_MDIO_READ 0x09
+#define GSWIP_MDIO_WRITE 0x0A
+#define GSWIP_MDIO_MDC_CFG0 0x0B
+#define GSWIP_MDIO_MDC_CFG1 0x0C
+#define GSWIP_MDIO_PHYp(p) (0x15 - (p))
+#define GSWIP_MDIO_PHY_LINK_MASK 0x6000
+#define GSWIP_MDIO_PHY_LINK_AUTO 0x0000
+#define GSWIP_MDIO_PHY_LINK_DOWN 0x4000
+#define GSWIP_MDIO_PHY_LINK_UP 0x2000
+#define GSWIP_MDIO_PHY_SPEED_MASK 0x1800
+#define GSWIP_MDIO_PHY_SPEED_AUTO 0x1800
+#define GSWIP_MDIO_PHY_SPEED_M10 0x0000
+#define GSWIP_MDIO_PHY_SPEED_M100 0x0800
+#define GSWIP_MDIO_PHY_SPEED_G1 0x1000
+#define GSWIP_MDIO_PHY_FDUP_MASK 0x0600
+#define GSWIP_MDIO_PHY_FDUP_AUTO 0x0000
+#define GSWIP_MDIO_PHY_FDUP_EN 0x0200
+#define GSWIP_MDIO_PHY_FDUP_DIS 0x0600
+#define GSWIP_MDIO_PHY_FCONTX_MASK 0x0180
+#define GSWIP_MDIO_PHY_FCONTX_AUTO 0x0000
+#define GSWIP_MDIO_PHY_FCONTX_EN 0x0100
+#define GSWIP_MDIO_PHY_FCONTX_DIS 0x0180
+#define GSWIP_MDIO_PHY_FCONRX_MASK 0x0060
+#define GSWIP_MDIO_PHY_FCONRX_AUTO 0x0000
+#define GSWIP_MDIO_PHY_FCONRX_EN 0x0020
+#define GSWIP_MDIO_PHY_FCONRX_DIS 0x0060
+#define GSWIP_MDIO_PHY_ADDR_MASK 0x001f
+#define GSWIP_MDIO_PHY_MASK (GSWIP_MDIO_PHY_ADDR_MASK | \
+ GSWIP_MDIO_PHY_FCONRX_MASK | \
+ GSWIP_MDIO_PHY_FCONTX_MASK | \
+ GSWIP_MDIO_PHY_LINK_MASK | \
+ GSWIP_MDIO_PHY_SPEED_MASK | \
+ GSWIP_MDIO_PHY_FDUP_MASK)
+
+/* GSWIP MII Registers */
+#define GSWIP_MII_CFGp(p) (0x2 * (p))
+#define GSWIP_MII_CFG_RESET BIT(15)
+#define GSWIP_MII_CFG_EN BIT(14)
+#define GSWIP_MII_CFG_ISOLATE BIT(13)
+#define GSWIP_MII_CFG_LDCLKDIS BIT(12)
+#define GSWIP_MII_CFG_RGMII_IBS BIT(8)
+#define GSWIP_MII_CFG_RMII_CLK BIT(7)
+#define GSWIP_MII_CFG_MODE_MIIP 0x0
+#define GSWIP_MII_CFG_MODE_MIIM 0x1
+#define GSWIP_MII_CFG_MODE_RMIIP 0x2
+#define GSWIP_MII_CFG_MODE_RMIIM 0x3
+#define GSWIP_MII_CFG_MODE_RGMII 0x4
+#define GSWIP_MII_CFG_MODE_GMII 0x9
+#define GSWIP_MII_CFG_MODE_MASK 0xf
+#define GSWIP_MII_CFG_RATE_M2P5 0x00
+#define GSWIP_MII_CFG_RATE_M25 0x10
+#define GSWIP_MII_CFG_RATE_M125 0x20
+#define GSWIP_MII_CFG_RATE_M50 0x30
+#define GSWIP_MII_CFG_RATE_AUTO 0x40
+#define GSWIP_MII_CFG_RATE_MASK 0x70
+#define GSWIP_MII_PCDU0 0x01
+#define GSWIP_MII_PCDU1 0x03
+#define GSWIP_MII_PCDU5 0x05
+#define GSWIP_MII_PCDU_TXDLY_MASK GENMASK(2, 0)
+#define GSWIP_MII_PCDU_RXDLY_MASK GENMASK(9, 7)
+#define GSWIP_MII_PCDU_TXDLY(x) u16_encode_bits(((x) / 500), GSWIP_MII_PCDU_TXDLY_MASK)
+#define GSWIP_MII_PCDU_RXDLY(x) u16_encode_bits(((x) / 500), GSWIP_MII_PCDU_RXDLY_MASK)
+#define GSWIP_MII_PCDU_RXDLY_DEFAULT 2000 /* picoseconds */
+#define GSWIP_MII_PCDU_TXDLY_DEFAULT 2000 /* picoseconds */
+
+/* GSWIP Core Registers */
+#define GSWIP_SWRES 0x000
+#define GSWIP_SWRES_R1 BIT(1) /* GSWIP Software reset */
+#define GSWIP_SWRES_R0 BIT(0) /* GSWIP Hardware reset */
+#define GSWIP_VERSION 0x013
+#define GSWIP_VERSION_REV_MASK GENMASK(7, 0)
+#define GSWIP_VERSION_MOD_MASK GENMASK(15, 8)
+#define GSWIP_VERSION_REV(v) FIELD_GET(GSWIP_VERSION_REV_MASK, v)
+#define GSWIP_VERSION_MOD(v) FIELD_GET(GSWIP_VERSION_MOD_MASK, v)
+#define GSWIP_VERSION_2_0 0x100
+#define GSWIP_VERSION_2_1 0x021
+#define GSWIP_VERSION_2_2 0x122
+#define GSWIP_VERSION_2_2_ETC 0x022
+/* The hardware has the 'major/minor' version bytes in the wrong order
+ * preventing numerical comparisons. Swap the bytes of the 16-bit value
+ * to end up with REV being the most significant byte and MOD being the
+ * least significant byte, which then allows comparing it with the
+ * value stored in struct gswip_priv.
+ */
+#define GSWIP_VERSION_GE(priv, ver) ((priv)->version >= swab16(ver))
+
+#define GSWIP_BM_RAM_VAL(x) (0x043 - (x))
+#define GSWIP_BM_RAM_ADDR 0x044
+#define GSWIP_BM_RAM_CTRL 0x045
+#define GSWIP_BM_RAM_CTRL_BAS BIT(15)
+#define GSWIP_BM_RAM_CTRL_OPMOD BIT(5)
+#define GSWIP_BM_RAM_CTRL_ADDR_MASK GENMASK(4, 0)
+#define GSWIP_BM_QUEUE_GCTRL 0x04A
+#define GSWIP_BM_QUEUE_GCTRL_GL_MOD BIT(10)
+/* buffer management Port Configuration Register */
+#define GSWIP_BM_PCFGp(p) (0x080 + ((p) * 2))
+#define GSWIP_BM_PCFG_CNTEN BIT(0) /* RMON Counter Enable */
+#define GSWIP_BM_PCFG_IGCNT BIT(1) /* Ingres Special Tag RMON count */
+/* buffer management Port Control Register */
+#define GSWIP_BM_RMON_CTRLp(p) (0x81 + ((p) * 2))
+#define GSWIP_BM_CTRL_RMON_RAM1_RES BIT(0) /* Software Reset for RMON RAM 1 */
+#define GSWIP_BM_CTRL_RMON_RAM2_RES BIT(1) /* Software Reset for RMON RAM 2 */
+
+/* PCE */
+#define GSWIP_PCE_TBL_KEY(x) (0x447 - (x))
+#define GSWIP_PCE_TBL_MASK 0x448
+#define GSWIP_PCE_TBL_VAL(x) (0x44D - (x))
+#define GSWIP_PCE_TBL_ADDR 0x44E
+#define GSWIP_PCE_TBL_CTRL 0x44F
+#define GSWIP_PCE_TBL_CTRL_BAS BIT(15)
+#define GSWIP_PCE_TBL_CTRL_TYPE BIT(13)
+#define GSWIP_PCE_TBL_CTRL_VLD BIT(12)
+#define GSWIP_PCE_TBL_CTRL_KEYFORM BIT(11)
+#define GSWIP_PCE_TBL_CTRL_GMAP_MASK GENMASK(10, 7)
+#define GSWIP_PCE_TBL_CTRL_OPMOD_MASK GENMASK(6, 5)
+#define GSWIP_PCE_TBL_CTRL_OPMOD_ADRD 0x00
+#define GSWIP_PCE_TBL_CTRL_OPMOD_ADWR 0x20
+#define GSWIP_PCE_TBL_CTRL_OPMOD_KSRD 0x40
+#define GSWIP_PCE_TBL_CTRL_OPMOD_KSWR 0x60
+#define GSWIP_PCE_TBL_CTRL_ADDR_MASK GENMASK(4, 0)
+#define GSWIP_PCE_PMAP1 0x453 /* Monitoring port map */
+#define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */
+#define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */
+#define GSWIP_PCE_GCTRL_0 0x456
+#define GSWIP_PCE_GCTRL_0_MTFL BIT(0) /* MAC Table Flushing */
+#define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3)
+#define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */
+#define GSWIP_PCE_GCTRL_1 0x457
+#define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */
+#define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */
+#define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA))
+#define GSWIP_PCE_PCTRL_0_TVM BIT(5) /* Transparent VLAN mode */
+#define GSWIP_PCE_PCTRL_0_VREP BIT(6) /* VLAN Replace Mode */
+#define GSWIP_PCE_PCTRL_0_INGRESS BIT(11) /* Accept special tag in ingress */
+#define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0
+#define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1
+#define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2
+#define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3
+#define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7
+#define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0)
+/* Ethernet Switch PCE Port Control Register 3 */
+#define GSWIP_PCE_PCTRL_3p(p) (0x483 + ((p) * 0xA))
+#define GSWIP_PCE_PCTRL_3_LNDIS BIT(15) /* Learning Disable */
+#define GSWIP_PCE_VCTRL(p) (0x485 + ((p) * 0xA))
+#define GSWIP_PCE_VCTRL_UVR BIT(0) /* Unknown VLAN Rule */
+#define GSWIP_PCE_VCTRL_VINR GENMASK(2, 1) /* VLAN Ingress Tag Rule */
+#define GSWIP_PCE_VCTRL_VINR_ALL 0 /* Admit tagged and untagged packets */
+#define GSWIP_PCE_VCTRL_VINR_TAGGED 1 /* Admit only tagged packets */
+#define GSWIP_PCE_VCTRL_VINR_UNTAGGED 2 /* Admit only untagged packets */
+#define GSWIP_PCE_VCTRL_VIMR BIT(3) /* VLAN Ingress Member violation rule */
+#define GSWIP_PCE_VCTRL_VEMR BIT(4) /* VLAN Egress Member violation rule */
+#define GSWIP_PCE_VCTRL_VSR BIT(5) /* VLAN Security */
+#define GSWIP_PCE_VCTRL_VID0 BIT(6) /* Priority Tagged Rule */
+#define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA))
+
+#define GSWIP_MAC_FLEN 0x8C5
+#define GSWIP_MAC_CTRL_0p(p) (0x903 + ((p) * 0xC))
+#define GSWIP_MAC_CTRL_0_PADEN BIT(8)
+#define GSWIP_MAC_CTRL_0_FCS_EN BIT(7)
+#define GSWIP_MAC_CTRL_0_FCON_MASK 0x0070
+#define GSWIP_MAC_CTRL_0_FCON_AUTO 0x0000
+#define GSWIP_MAC_CTRL_0_FCON_RX 0x0010
+#define GSWIP_MAC_CTRL_0_FCON_TX 0x0020
+#define GSWIP_MAC_CTRL_0_FCON_RXTX 0x0030
+#define GSWIP_MAC_CTRL_0_FCON_NONE 0x0040
+#define GSWIP_MAC_CTRL_0_FDUP_MASK 0x000C
+#define GSWIP_MAC_CTRL_0_FDUP_AUTO 0x0000
+#define GSWIP_MAC_CTRL_0_FDUP_EN 0x0004
+#define GSWIP_MAC_CTRL_0_FDUP_DIS 0x000C
+#define GSWIP_MAC_CTRL_0_GMII_MASK 0x0003
+#define GSWIP_MAC_CTRL_0_GMII_AUTO 0x0000
+#define GSWIP_MAC_CTRL_0_GMII_MII 0x0001
+#define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002
+#define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
+#define GSWIP_MAC_CTRL_2_LCHKL BIT(2) /* Frame Length Check Long Enable */
+#define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
+#define GSWIP_MAC_CTRL_4p(p) (0x907 + ((p) * 0xC))
+#define GSWIP_MAC_CTRL_4_LPIEN BIT(7) /* LPI Mode Enable */
+#define GSWIP_MAC_CTRL_4_GWAIT_MASK GENMASK(14, 8) /* LPI Wait Time 1G */
+#define GSWIP_MAC_CTRL_4_GWAIT(t) u16_encode_bits((t), GSWIP_MAC_CTRL_4_GWAIT_MASK)
+#define GSWIP_MAC_CTRL_4_WAIT_MASK GENMASK(6, 0) /* LPI Wait Time 100M */
+#define GSWIP_MAC_CTRL_4_WAIT(t) u16_encode_bits((t), GSWIP_MAC_CTRL_4_WAIT_MASK)
+
+/* Ethernet Switch Fetch DMA Port Control Register */
+#define GSWIP_FDMA_PCTRLp(p) (0xA80 + ((p) * 0x6))
+#define GSWIP_FDMA_PCTRL_EN BIT(0) /* FDMA Port Enable */
+#define GSWIP_FDMA_PCTRL_STEN BIT(1) /* Special Tag Insertion Enable */
+#define GSWIP_FDMA_PCTRL_VLANMOD_MASK GENMASK(4, 3) /* VLAN Modification Control */
+#define GSWIP_FDMA_PCTRL_VLANMOD_SHIFT 3 /* VLAN Modification Control */
+#define GSWIP_FDMA_PCTRL_VLANMOD_DIS (0x0 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
+#define GSWIP_FDMA_PCTRL_VLANMOD_PRIO (0x1 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
+#define GSWIP_FDMA_PCTRL_VLANMOD_ID (0x2 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
+#define GSWIP_FDMA_PCTRL_VLANMOD_BOTH (0x3 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
+
+/* Ethernet Switch Store DMA Port Control Register */
+#define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
+#define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
+#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
+#define GSWIP_SDMA_PCTRL_PAUFWD BIT(3) /* Pause Frame Forwarding */
+
+#define GSWIP_TABLE_ACTIVE_VLAN 0x01
+#define GSWIP_TABLE_VLAN_MAPPING 0x02
+#define GSWIP_TABLE_MAC_BRIDGE 0x0b
+#define GSWIP_TABLE_MAC_BRIDGE_KEY3_FID GENMASK(5, 0) /* Filtering identifier */
+#define GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT GENMASK(7, 4) /* Port on learned entries */
+#define GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC BIT(0) /* Static, non-aging entry */
+#define GSWIP_TABLE_MAC_BRIDGE_VAL1_VALID BIT(1) /* Valid bit */
+
+#define XRX200_GPHY_FW_ALIGN (16 * 1024)
+
+/* Maximum packet size supported by the switch. In theory this should be 10240,
+ * but long packets currently cause lock-ups with an MTU of over 2526. Medium
+ * packets are sometimes dropped (e.g. TCP over 2477, UDP over 2516-2519, ICMP
+ * over 2526), hence an MTU value of 2400 seems safe. This issue only affects
+ * packet reception. This is probably caused by the PPA engine, which is on the
+ * RX part of the device. Packet transmission works properly up to 10240.
+ */
+#define GSWIP_MAX_PACKET_LENGTH 2400
+
+#define GSWIP_VLAN_UNAWARE_PVID 0
+
+struct gswip_pce_microcode {
+ u16 val_3;
+ u16 val_2;
+ u16 val_1;
+ u16 val_0;
+};
+
+struct gswip_hw_info {
+ int max_ports;
+ unsigned int allowed_cpu_ports;
+ unsigned int mii_ports;
+ int mii_port_reg_offset;
+ bool supports_2500m;
+ const struct gswip_pce_microcode (*pce_microcode)[];
+ size_t pce_microcode_size;
+ enum dsa_tag_protocol tag_protocol;
+ void (*phylink_get_caps)(struct dsa_switch *ds, int port,
+ struct phylink_config *config);
+ struct phylink_pcs *(*mac_select_pcs)(struct phylink_config *config,
+ phy_interface_t interface);
+};
+
+struct gswip_gphy_fw {
+ struct clk *clk_gate;
+ struct reset_control *reset;
+ u32 fw_addr_offset;
+ char *fw_name;
+};
+
+struct gswip_vlan {
+ struct net_device *bridge;
+ u16 vid;
+ u8 fid;
+};
+
+struct gswip_priv {
+ struct regmap *gswip;
+ struct regmap *mdio;
+ struct regmap *mii;
+ const struct gswip_hw_info *hw_info;
+ const struct xway_gphy_match_data *gphy_fw_name_cfg;
+ struct dsa_switch *ds;
+ struct device *dev;
+ struct regmap *rcu_regmap;
+ struct gswip_vlan vlans[64];
+ int num_gphy_fw;
+ struct gswip_gphy_fw *gphy_fw;
+ struct mutex pce_table_lock;
+ u16 version;
+};
+
+void gswip_disable_switch(struct gswip_priv *priv);
+
+int gswip_probe_common(struct gswip_priv *priv, u32 version);
+
+#endif /* __LANTIQ_GSWIP_H */
diff --git a/drivers/net/dsa/lantiq/lantiq_gswip_common.c b/drivers/net/dsa/lantiq/lantiq_gswip_common.c
new file mode 100644
index 000000000000..9da39edf8f57
--- /dev/null
+++ b/drivers/net/dsa/lantiq/lantiq_gswip_common.c
@@ -0,0 +1,1739 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Lantiq / Intel / MaxLinear GSWIP common function library
+ *
+ * Copyright (C) 2025 Daniel Golle <daniel@makrotopia.org>
+ * Copyright (C) 2023 - 2024 MaxLinear Inc.
+ * Copyright (C) 2022 Snap One, LLC. All rights reserved.
+ * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ * Copyright (C) 2010 Lantiq Deutschland
+ *
+ * The VLAN and bridge model the GSWIP hardware uses does not directly
+ * matches the model DSA uses.
+ *
+ * The hardware has 64 possible table entries for bridges with one VLAN
+ * ID, one flow id and a list of ports for each bridge. All entries which
+ * match the same flow ID are combined in the mac learning table, they
+ * act as one global bridge.
+ * The hardware does not support VLAN filter on the port, but on the
+ * bridge, this driver converts the DSA model to the hardware.
+ *
+ * The CPU gets all the exception frames which do not match any forwarding
+ * rule and the CPU port is also added to all bridges. This makes it possible
+ * to handle all the special cases easily in software.
+ * At the initialization the driver allocates one bridge table entry for
+ * each switch port which is used when the port is used without an
+ * explicit bridge. This prevents the frames from being forwarded
+ * between all LAN ports by default.
+ */
+
+#include "lantiq_gswip.h"
+
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <linux/regmap.h>
+#include <net/dsa.h>
+
+struct gswip_pce_table_entry {
+ u16 index; // PCE_TBL_ADDR.ADDR = pData->table_index
+ u16 table; // PCE_TBL_CTRL.ADDR = pData->table
+ u16 key[8];
+ u16 val[5];
+ u16 mask;
+ u8 gmap;
+ bool type;
+ bool valid;
+ bool key_mode;
+};
+
+struct gswip_rmon_cnt_desc {
+ unsigned int size;
+ unsigned int offset;
+ const char *name;
+};
+
+#define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name}
+
+static const struct gswip_rmon_cnt_desc gswip_rmon_cnt[] = {
+ /** Receive Packet Count (only packets that are accepted and not discarded). */
+ MIB_DESC(1, 0x1F, "RxGoodPkts"),
+ MIB_DESC(1, 0x23, "RxUnicastPkts"),
+ MIB_DESC(1, 0x22, "RxMulticastPkts"),
+ MIB_DESC(1, 0x21, "RxFCSErrorPkts"),
+ MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"),
+ MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"),
+ MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"),
+ MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"),
+ MIB_DESC(1, 0x20, "RxGoodPausePkts"),
+ MIB_DESC(1, 0x1A, "RxAlignErrorPkts"),
+ MIB_DESC(1, 0x12, "Rx64BytePkts"),
+ MIB_DESC(1, 0x13, "Rx127BytePkts"),
+ MIB_DESC(1, 0x14, "Rx255BytePkts"),
+ MIB_DESC(1, 0x15, "Rx511BytePkts"),
+ MIB_DESC(1, 0x16, "Rx1023BytePkts"),
+ /** Receive Size 1024-1522 (or more, if configured) Packet Count. */
+ MIB_DESC(1, 0x17, "RxMaxBytePkts"),
+ MIB_DESC(1, 0x18, "RxDroppedPkts"),
+ MIB_DESC(1, 0x19, "RxFilteredPkts"),
+ MIB_DESC(2, 0x24, "RxGoodBytes"),
+ MIB_DESC(2, 0x26, "RxBadBytes"),
+ MIB_DESC(1, 0x11, "TxAcmDroppedPkts"),
+ MIB_DESC(1, 0x0C, "TxGoodPkts"),
+ MIB_DESC(1, 0x06, "TxUnicastPkts"),
+ MIB_DESC(1, 0x07, "TxMulticastPkts"),
+ MIB_DESC(1, 0x00, "Tx64BytePkts"),
+ MIB_DESC(1, 0x01, "Tx127BytePkts"),
+ MIB_DESC(1, 0x02, "Tx255BytePkts"),
+ MIB_DESC(1, 0x03, "Tx511BytePkts"),
+ MIB_DESC(1, 0x04, "Tx1023BytePkts"),
+ /** Transmit Size 1024-1522 (or more, if configured) Packet Count. */
+ MIB_DESC(1, 0x05, "TxMaxBytePkts"),
+ MIB_DESC(1, 0x08, "TxSingleCollCount"),
+ MIB_DESC(1, 0x09, "TxMultCollCount"),
+ MIB_DESC(1, 0x0A, "TxLateCollCount"),
+ MIB_DESC(1, 0x0B, "TxExcessCollCount"),
+ MIB_DESC(1, 0x0D, "TxPauseCount"),
+ MIB_DESC(1, 0x10, "TxDroppedPkts"),
+ MIB_DESC(2, 0x0E, "TxGoodBytes"),
+};
+
+static u32 gswip_switch_r_timeout(struct gswip_priv *priv, u32 offset,
+ u32 cleared)
+{
+ u32 val;
+
+ return regmap_read_poll_timeout(priv->gswip, offset, val,
+ !(val & cleared), 20, 50000);
+}
+
+static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 mask, u32 set,
+ int port)
+{
+ int reg_port;
+
+ /* MII_CFG register only exists for MII ports */
+ if (!(priv->hw_info->mii_ports & BIT(port)))
+ return;
+
+ reg_port = port + priv->hw_info->mii_port_reg_offset;
+
+ regmap_write_bits(priv->mii, GSWIP_MII_CFGp(reg_port), mask,
+ set);
+}
+
+static int gswip_mdio_poll(struct gswip_priv *priv)
+{
+ u32 ctrl;
+
+ return regmap_read_poll_timeout(priv->mdio, GSWIP_MDIO_CTRL, ctrl,
+ !(ctrl & GSWIP_MDIO_CTRL_BUSY), 40, 4000);
+}
+
+static int gswip_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val)
+{
+ struct gswip_priv *priv = bus->priv;
+ int err;
+
+ err = gswip_mdio_poll(priv);
+ if (err) {
+ dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
+ return err;
+ }
+
+ regmap_write(priv->mdio, GSWIP_MDIO_WRITE, val);
+ regmap_write(priv->mdio, GSWIP_MDIO_CTRL,
+ GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_WR |
+ ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
+ (reg & GSWIP_MDIO_CTRL_REGAD_MASK));
+
+ return 0;
+}
+
+static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg)
+{
+ struct gswip_priv *priv = bus->priv;
+ u32 val;
+ int err;
+
+ err = gswip_mdio_poll(priv);
+ if (err) {
+ dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
+ return err;
+ }
+
+ regmap_write(priv->mdio, GSWIP_MDIO_CTRL,
+ GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_RD |
+ ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
+ (reg & GSWIP_MDIO_CTRL_REGAD_MASK));
+
+ err = gswip_mdio_poll(priv);
+ if (err) {
+ dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
+ return err;
+ }
+
+ err = regmap_read(priv->mdio, GSWIP_MDIO_READ, &val);
+ if (err)
+ return err;
+
+ return val;
+}
+
+static int gswip_mdio(struct gswip_priv *priv)
+{
+ struct device_node *mdio_np, *switch_np = priv->dev->of_node;
+ struct device *dev = priv->dev;
+ struct mii_bus *bus;
+ int err = 0;
+
+ mdio_np = of_get_compatible_child(switch_np, "lantiq,xrx200-mdio");
+ if (!mdio_np)
+ mdio_np = of_get_child_by_name(switch_np, "mdio");
+
+ if (!of_device_is_available(mdio_np))
+ goto out_put_node;
+
+ bus = devm_mdiobus_alloc(dev);
+ if (!bus) {
+ err = -ENOMEM;
+ goto out_put_node;
+ }
+
+ bus->priv = priv;
+ bus->read = gswip_mdio_rd;
+ bus->write = gswip_mdio_wr;
+ bus->name = "lantiq,xrx200-mdio";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev));
+ bus->parent = priv->dev;
+
+ err = devm_of_mdiobus_register(dev, bus, mdio_np);
+
+out_put_node:
+ of_node_put(mdio_np);
+
+ return err;
+}
+
+static int gswip_pce_table_entry_read(struct gswip_priv *priv,
+ struct gswip_pce_table_entry *tbl)
+{
+ int i;
+ int err;
+ u32 crtl;
+ u32 tmp;
+ u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD :
+ GSWIP_PCE_TBL_CTRL_OPMOD_ADRD;
+
+ mutex_lock(&priv->pce_table_lock);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+ if (err)
+ goto out_unlock;
+
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_ADDR, tbl->index);
+ regmap_write_bits(priv->gswip, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_ADDR_MASK |
+ GSWIP_PCE_TBL_CTRL_OPMOD_MASK |
+ GSWIP_PCE_TBL_CTRL_BAS,
+ tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+ if (err)
+ goto out_unlock;
+
+ for (i = 0; i < ARRAY_SIZE(tbl->key); i++) {
+ err = regmap_read(priv->gswip, GSWIP_PCE_TBL_KEY(i), &tmp);
+ if (err)
+ goto out_unlock;
+ tbl->key[i] = tmp;
+ }
+ for (i = 0; i < ARRAY_SIZE(tbl->val); i++) {
+ err = regmap_read(priv->gswip, GSWIP_PCE_TBL_VAL(i), &tmp);
+ if (err)
+ goto out_unlock;
+ tbl->val[i] = tmp;
+ }
+
+ err = regmap_read(priv->gswip, GSWIP_PCE_TBL_MASK, &tmp);
+ if (err)
+ goto out_unlock;
+
+ tbl->mask = tmp;
+ err = regmap_read(priv->gswip, GSWIP_PCE_TBL_CTRL, &crtl);
+ if (err)
+ goto out_unlock;
+
+ tbl->type = !!(crtl & GSWIP_PCE_TBL_CTRL_TYPE);
+ tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD);
+ tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7;
+
+out_unlock:
+ mutex_unlock(&priv->pce_table_lock);
+
+ return err;
+}
+
+static int gswip_pce_table_entry_write(struct gswip_priv *priv,
+ struct gswip_pce_table_entry *tbl)
+{
+ int i;
+ int err;
+ u32 crtl;
+ u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR :
+ GSWIP_PCE_TBL_CTRL_OPMOD_ADWR;
+
+ mutex_lock(&priv->pce_table_lock);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+ if (err) {
+ mutex_unlock(&priv->pce_table_lock);
+ return err;
+ }
+
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_ADDR, tbl->index);
+ regmap_write_bits(priv->gswip, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_ADDR_MASK |
+ GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
+ tbl->table | addr_mode);
+
+ for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_KEY(i), tbl->key[i]);
+
+ for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(i), tbl->val[i]);
+
+ regmap_write_bits(priv->gswip, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_ADDR_MASK |
+ GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
+ tbl->table | addr_mode);
+
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_MASK, tbl->mask);
+
+ regmap_read(priv->gswip, GSWIP_PCE_TBL_CTRL, &crtl);
+ crtl &= ~(GSWIP_PCE_TBL_CTRL_TYPE | GSWIP_PCE_TBL_CTRL_VLD |
+ GSWIP_PCE_TBL_CTRL_GMAP_MASK);
+ if (tbl->type)
+ crtl |= GSWIP_PCE_TBL_CTRL_TYPE;
+ if (tbl->valid)
+ crtl |= GSWIP_PCE_TBL_CTRL_VLD;
+ crtl |= (tbl->gmap << 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK;
+ crtl |= GSWIP_PCE_TBL_CTRL_BAS;
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_CTRL, crtl);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+
+ mutex_unlock(&priv->pce_table_lock);
+
+ return err;
+}
+
+/* Add the LAN port into a bridge with the CPU port by
+ * default. This prevents automatic forwarding of
+ * packages between the LAN ports when no explicit
+ * bridge is configured.
+ */
+static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
+{
+ struct gswip_pce_table_entry vlan_active = {0,};
+ struct gswip_pce_table_entry vlan_mapping = {0,};
+ int err;
+
+ vlan_active.index = port + 1;
+ vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
+ vlan_active.key[0] = GSWIP_VLAN_UNAWARE_PVID;
+ vlan_active.val[0] = port + 1 /* fid */;
+ vlan_active.valid = add;
+ err = gswip_pce_table_entry_write(priv, &vlan_active);
+ if (err) {
+ dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
+ return err;
+ }
+
+ if (!add)
+ return 0;
+
+ vlan_mapping.index = port + 1;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ vlan_mapping.val[0] = GSWIP_VLAN_UNAWARE_PVID;
+ vlan_mapping.val[1] = BIT(port) | dsa_cpu_ports(priv->ds);
+ vlan_mapping.val[2] = 0;
+ err = gswip_pce_table_entry_write(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int gswip_port_set_learning(struct gswip_priv *priv, int port,
+ bool enable)
+{
+ if (!GSWIP_VERSION_GE(priv, GSWIP_VERSION_2_2))
+ return -EOPNOTSUPP;
+
+ /* learning disable bit */
+ return regmap_update_bits(priv->gswip, GSWIP_PCE_PCTRL_3p(port),
+ GSWIP_PCE_PCTRL_3_LNDIS,
+ enable ? 0 : GSWIP_PCE_PCTRL_3_LNDIS);
+}
+
+static int gswip_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct gswip_priv *priv = ds->priv;
+ unsigned long supported = 0;
+
+ if (GSWIP_VERSION_GE(priv, GSWIP_VERSION_2_2))
+ supported |= BR_LEARNING;
+
+ if (flags.mask & ~supported)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int gswip_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ if (flags.mask & BR_LEARNING)
+ return gswip_port_set_learning(priv, port,
+ !!(flags.val & BR_LEARNING));
+
+ return 0;
+}
+
+static int gswip_port_setup(struct dsa_switch *ds, int port)
+{
+ struct gswip_priv *priv = ds->priv;
+ int err;
+
+ if (!dsa_is_cpu_port(ds, port)) {
+ err = gswip_add_single_port_br(priv, port, true);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int gswip_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ if (!dsa_is_cpu_port(ds, port)) {
+ u32 mdio_phy = 0;
+
+ if (phydev)
+ mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
+
+ regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port),
+ GSWIP_MDIO_PHY_ADDR_MASK,
+ mdio_phy);
+ }
+
+ /* RMON Counter Enable for port */
+ regmap_write(priv->gswip, GSWIP_BM_PCFGp(port), GSWIP_BM_PCFG_CNTEN);
+
+ /* enable port fetch/store dma & VLAN Modification */
+ regmap_set_bits(priv->gswip, GSWIP_FDMA_PCTRLp(port),
+ GSWIP_FDMA_PCTRL_EN | GSWIP_FDMA_PCTRL_VLANMOD_BOTH);
+ regmap_set_bits(priv->gswip, GSWIP_SDMA_PCTRLp(port),
+ GSWIP_SDMA_PCTRL_EN);
+
+ return 0;
+}
+
+static void gswip_port_disable(struct dsa_switch *ds, int port)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ regmap_clear_bits(priv->gswip, GSWIP_FDMA_PCTRLp(port),
+ GSWIP_FDMA_PCTRL_EN);
+ regmap_clear_bits(priv->gswip, GSWIP_SDMA_PCTRLp(port),
+ GSWIP_SDMA_PCTRL_EN);
+}
+
+static int gswip_pce_load_microcode(struct gswip_priv *priv)
+{
+ int i;
+ int err;
+
+ regmap_write_bits(priv->gswip, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_ADDR_MASK |
+ GSWIP_PCE_TBL_CTRL_OPMOD_MASK |
+ GSWIP_PCE_TBL_CTRL_OPMOD_ADWR,
+ GSWIP_PCE_TBL_CTRL_OPMOD_ADWR);
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_MASK, 0);
+
+ for (i = 0; i < priv->hw_info->pce_microcode_size; i++) {
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_ADDR, i);
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(0),
+ (*priv->hw_info->pce_microcode)[i].val_0);
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(1),
+ (*priv->hw_info->pce_microcode)[i].val_1);
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(2),
+ (*priv->hw_info->pce_microcode)[i].val_2);
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(3),
+ (*priv->hw_info->pce_microcode)[i].val_3);
+
+ /* start the table access: */
+ regmap_set_bits(priv->gswip, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+ if (err)
+ return err;
+ }
+
+ /* tell the switch that the microcode is loaded */
+ regmap_set_bits(priv->gswip, GSWIP_PCE_GCTRL_0,
+ GSWIP_PCE_GCTRL_0_MC_VALID);
+
+ return 0;
+}
+
+static void gswip_port_commit_pvid(struct gswip_priv *priv, int port)
+{
+ struct dsa_port *dp = dsa_to_port(priv->ds, port);
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
+ u32 vinr;
+ int idx;
+
+ if (!dsa_port_is_user(dp))
+ return;
+
+ if (br) {
+ u16 pvid = GSWIP_VLAN_UNAWARE_PVID;
+
+ if (br_vlan_enabled(br))
+ br_vlan_get_pvid(br, &pvid);
+
+ /* VLAN-aware bridge ports with no PVID will use Active VLAN
+ * index 0. The expectation is that this drops all untagged and
+ * VID-0 tagged ingress traffic.
+ */
+ idx = 0;
+ for (int i = priv->hw_info->max_ports;
+ i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == br &&
+ priv->vlans[i].vid == pvid) {
+ idx = i;
+ break;
+ }
+ }
+ } else {
+ /* The Active VLAN table index as configured by
+ * gswip_add_single_port_br()
+ */
+ idx = port + 1;
+ }
+
+ vinr = idx ? GSWIP_PCE_VCTRL_VINR_ALL : GSWIP_PCE_VCTRL_VINR_TAGGED;
+ regmap_write_bits(priv->gswip, GSWIP_PCE_VCTRL(port),
+ GSWIP_PCE_VCTRL_VINR,
+ FIELD_PREP(GSWIP_PCE_VCTRL_VINR, vinr));
+
+ /* Note that in GSWIP 2.2 VLAN mode the VID needs to be programmed
+ * directly instead of referencing the index in the Active VLAN Tablet.
+ * However, without the VLANMD bit (9) in PCE_GCTRL_1 (0x457) even
+ * GSWIP 2.2 and newer hardware maintain the GSWIP 2.1 behavior.
+ */
+ regmap_write(priv->gswip, GSWIP_PCE_DEFPVID(port), idx);
+}
+
+static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering,
+ struct netlink_ext_ack *extack)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ if (vlan_filtering) {
+ /* Use tag based VLAN */
+ regmap_write_bits(priv->gswip, GSWIP_PCE_VCTRL(port),
+ GSWIP_PCE_VCTRL_VSR |
+ GSWIP_PCE_VCTRL_UVR |
+ GSWIP_PCE_VCTRL_VIMR |
+ GSWIP_PCE_VCTRL_VEMR |
+ GSWIP_PCE_VCTRL_VID0,
+ GSWIP_PCE_VCTRL_UVR |
+ GSWIP_PCE_VCTRL_VIMR |
+ GSWIP_PCE_VCTRL_VEMR |
+ GSWIP_PCE_VCTRL_VID0);
+ regmap_clear_bits(priv->gswip, GSWIP_PCE_PCTRL_0p(port),
+ GSWIP_PCE_PCTRL_0_TVM);
+ } else {
+ /* Use port based VLAN */
+ regmap_write_bits(priv->gswip, GSWIP_PCE_VCTRL(port),
+ GSWIP_PCE_VCTRL_UVR |
+ GSWIP_PCE_VCTRL_VIMR |
+ GSWIP_PCE_VCTRL_VEMR |
+ GSWIP_PCE_VCTRL_VID0 |
+ GSWIP_PCE_VCTRL_VSR,
+ GSWIP_PCE_VCTRL_VSR);
+ regmap_set_bits(priv->gswip, GSWIP_PCE_PCTRL_0p(port),
+ GSWIP_PCE_PCTRL_0_TVM);
+ }
+
+ gswip_port_commit_pvid(priv, port);
+
+ return 0;
+}
+
+static void gswip_mii_delay_setup(struct gswip_priv *priv, struct dsa_port *dp,
+ phy_interface_t interface)
+{
+ u32 tx_delay = GSWIP_MII_PCDU_TXDLY_DEFAULT;
+ u32 rx_delay = GSWIP_MII_PCDU_RXDLY_DEFAULT;
+ struct device_node *port_dn = dp->dn;
+ u16 mii_pcdu_reg;
+
+ /* As MII_PCDU registers only exist for MII ports, silently return
+ * unless the port is an MII port
+ */
+ if (!(priv->hw_info->mii_ports & BIT(dp->index)))
+ return;
+
+ switch (dp->index + priv->hw_info->mii_port_reg_offset) {
+ case 0:
+ mii_pcdu_reg = GSWIP_MII_PCDU0;
+ break;
+ case 1:
+ mii_pcdu_reg = GSWIP_MII_PCDU1;
+ break;
+ case 5:
+ mii_pcdu_reg = GSWIP_MII_PCDU5;
+ break;
+ default:
+ return;
+ }
+
+ /* legacy code to set default delays according to the interface mode */
+ switch (interface) {
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ tx_delay = 0;
+ rx_delay = 0;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ rx_delay = 0;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ tx_delay = 0;
+ break;
+ default:
+ break;
+ }
+
+ /* allow settings delays using device tree properties */
+ of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay);
+ of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay);
+
+ regmap_write_bits(priv->mii, mii_pcdu_reg,
+ GSWIP_MII_PCDU_TXDLY_MASK |
+ GSWIP_MII_PCDU_RXDLY_MASK,
+ GSWIP_MII_PCDU_TXDLY(tx_delay) |
+ GSWIP_MII_PCDU_RXDLY(rx_delay));
+}
+
+static int gswip_setup(struct dsa_switch *ds)
+{
+ unsigned int cpu_ports = dsa_cpu_ports(ds);
+ struct gswip_priv *priv = ds->priv;
+ struct dsa_port *cpu_dp;
+ int err, i;
+
+ regmap_write(priv->gswip, GSWIP_SWRES, GSWIP_SWRES_R0);
+ usleep_range(5000, 10000);
+ regmap_write(priv->gswip, GSWIP_SWRES, 0);
+
+ /* disable port fetch/store dma on all ports */
+ for (i = 0; i < priv->hw_info->max_ports; i++) {
+ gswip_port_disable(ds, i);
+ gswip_port_vlan_filtering(ds, i, false, NULL);
+ }
+
+ /* enable Switch */
+ regmap_set_bits(priv->mdio, GSWIP_MDIO_GLOB, GSWIP_MDIO_GLOB_ENABLE);
+
+ err = gswip_pce_load_microcode(priv);
+ if (err) {
+ dev_err(priv->dev, "writing PCE microcode failed, %i\n", err);
+ return err;
+ }
+
+ /* Default unknown Broadcast/Multicast/Unicast port maps */
+ regmap_write(priv->gswip, GSWIP_PCE_PMAP1, cpu_ports);
+ regmap_write(priv->gswip, GSWIP_PCE_PMAP2, cpu_ports);
+ regmap_write(priv->gswip, GSWIP_PCE_PMAP3, cpu_ports);
+
+ /* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an
+ * interoperability problem with this auto polling mechanism because
+ * their status registers think that the link is in a different state
+ * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set
+ * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the
+ * auto polling state machine consider the link being negotiated with
+ * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads
+ * to the switch port being completely dead (RX and TX are both not
+ * working).
+ * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F
+ * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes
+ * it would work fine for a few minutes to hours and then stop, on
+ * other device it would no traffic could be sent or received at all.
+ * Testing shows that when PHY auto polling is disabled these problems
+ * go away.
+ */
+ regmap_write(priv->mdio, GSWIP_MDIO_MDC_CFG0, 0x0);
+
+ /* Configure the MDIO Clock 2.5 MHz */
+ regmap_write_bits(priv->mdio, GSWIP_MDIO_MDC_CFG1, 0xff, 0x09);
+
+ /* bring up the mdio bus */
+ err = gswip_mdio(priv);
+ if (err) {
+ dev_err(priv->dev, "mdio bus setup failed\n");
+ return err;
+ }
+
+ /* Disable the xMII interface and clear it's isolation bit */
+ for (i = 0; i < priv->hw_info->max_ports; i++)
+ gswip_mii_mask_cfg(priv,
+ GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE,
+ 0, i);
+
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ /* enable special tag insertion on cpu port */
+ regmap_set_bits(priv->gswip, GSWIP_FDMA_PCTRLp(cpu_dp->index),
+ GSWIP_FDMA_PCTRL_STEN);
+
+ /* accept special tag in ingress direction */
+ regmap_set_bits(priv->gswip,
+ GSWIP_PCE_PCTRL_0p(cpu_dp->index),
+ GSWIP_PCE_PCTRL_0_INGRESS);
+ }
+
+ regmap_set_bits(priv->gswip, GSWIP_BM_QUEUE_GCTRL,
+ GSWIP_BM_QUEUE_GCTRL_GL_MOD);
+
+ /* VLAN aware Switching */
+ regmap_set_bits(priv->gswip, GSWIP_PCE_GCTRL_0,
+ GSWIP_PCE_GCTRL_0_VLAN);
+
+ /* Flush MAC Table */
+ regmap_set_bits(priv->gswip, GSWIP_PCE_GCTRL_0,
+ GSWIP_PCE_GCTRL_0_MTFL);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_GCTRL_0,
+ GSWIP_PCE_GCTRL_0_MTFL);
+ if (err) {
+ dev_err(priv->dev, "MAC flushing didn't finish\n");
+ return err;
+ }
+
+ ds->mtu_enforcement_ingress = true;
+
+ return 0;
+}
+
+static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ return priv->hw_info->tag_protocol;
+}
+
+static int gswip_vlan_active_create(struct gswip_priv *priv,
+ struct net_device *bridge,
+ int fid, u16 vid)
+{
+ struct gswip_pce_table_entry vlan_active = {0,};
+ unsigned int max_ports = priv->hw_info->max_ports;
+ int idx = -1;
+ int err;
+ int i;
+
+ /* Look for a free slot */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (!priv->vlans[i].bridge) {
+ idx = i;
+ break;
+ }
+ }
+
+ if (idx == -1)
+ return -ENOSPC;
+
+ if (fid == -1)
+ fid = idx;
+
+ vlan_active.index = idx;
+ vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
+ vlan_active.key[0] = vid;
+ vlan_active.val[0] = fid;
+ vlan_active.valid = true;
+
+ err = gswip_pce_table_entry_write(priv, &vlan_active);
+ if (err) {
+ dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
+ return err;
+ }
+
+ priv->vlans[idx].bridge = bridge;
+ priv->vlans[idx].vid = vid;
+ priv->vlans[idx].fid = fid;
+
+ return idx;
+}
+
+static int gswip_vlan_active_remove(struct gswip_priv *priv, int idx)
+{
+ struct gswip_pce_table_entry vlan_active = {0,};
+ int err;
+
+ vlan_active.index = idx;
+ vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
+ vlan_active.valid = false;
+ err = gswip_pce_table_entry_write(priv, &vlan_active);
+ if (err)
+ dev_err(priv->dev, "failed to delete active VLAN: %d\n", err);
+ priv->vlans[idx].bridge = NULL;
+
+ return err;
+}
+
+static int gswip_vlan_add(struct gswip_priv *priv, struct net_device *bridge,
+ int port, u16 vid, bool untagged, bool pvid,
+ bool vlan_aware)
+{
+ struct gswip_pce_table_entry vlan_mapping = {0,};
+ unsigned int max_ports = priv->hw_info->max_ports;
+ unsigned int cpu_ports = dsa_cpu_ports(priv->ds);
+ bool active_vlan_created = false;
+ int fid = -1, idx = -1;
+ int i, err;
+
+ /* Check if there is already a page for this bridge */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge) {
+ if (vlan_aware) {
+ if (fid != -1 && fid != priv->vlans[i].fid)
+ dev_err(priv->dev, "one bridge with multiple flow ids\n");
+ fid = priv->vlans[i].fid;
+ }
+ if (priv->vlans[i].vid == vid) {
+ idx = i;
+ break;
+ }
+ }
+ }
+
+ /* If this bridge is not programmed yet, add a Active VLAN table
+ * entry in a free slot and prepare the VLAN mapping table entry.
+ */
+ if (idx == -1) {
+ idx = gswip_vlan_active_create(priv, bridge, fid, vid);
+ if (idx < 0)
+ return idx;
+ active_vlan_created = true;
+
+ vlan_mapping.index = idx;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ } else {
+ /* Read the existing VLAN mapping entry from the switch */
+ vlan_mapping.index = idx;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ err = gswip_pce_table_entry_read(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ /* VLAN ID byte, maps to the VLAN ID of vlan active table */
+ vlan_mapping.val[0] = vid;
+ /* Update the VLAN mapping entry and write it to the switch */
+ vlan_mapping.val[1] |= cpu_ports;
+ vlan_mapping.val[1] |= BIT(port);
+ if (vlan_aware)
+ vlan_mapping.val[2] |= cpu_ports;
+ if (untagged)
+ vlan_mapping.val[2] &= ~BIT(port);
+ else
+ vlan_mapping.val[2] |= BIT(port);
+ err = gswip_pce_table_entry_write(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
+ /* In case an Active VLAN was creaetd delete it again */
+ if (active_vlan_created)
+ gswip_vlan_active_remove(priv, idx);
+ return err;
+ }
+
+ gswip_port_commit_pvid(priv, port);
+
+ return 0;
+}
+
+static int gswip_vlan_remove(struct gswip_priv *priv,
+ struct net_device *bridge, int port,
+ u16 vid)
+{
+ struct gswip_pce_table_entry vlan_mapping = {0,};
+ unsigned int max_ports = priv->hw_info->max_ports;
+ int idx = -1;
+ int i;
+ int err;
+
+ /* Check if there is already a page for this bridge */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge &&
+ priv->vlans[i].vid == vid) {
+ idx = i;
+ break;
+ }
+ }
+
+ if (idx == -1) {
+ dev_err(priv->dev, "Port %d cannot find VID %u of bridge %s\n",
+ port, vid, bridge ? bridge->name : "(null)");
+ return -ENOENT;
+ }
+
+ vlan_mapping.index = idx;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ err = gswip_pce_table_entry_read(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to read VLAN mapping: %d\n", err);
+ return err;
+ }
+
+ vlan_mapping.val[1] &= ~BIT(port);
+ vlan_mapping.val[2] &= ~BIT(port);
+ err = gswip_pce_table_entry_write(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
+ return err;
+ }
+
+ /* In case all ports are removed from the bridge, remove the VLAN */
+ if (!(vlan_mapping.val[1] & ~dsa_cpu_ports(priv->ds))) {
+ err = gswip_vlan_active_remove(priv, idx);
+ if (err) {
+ dev_err(priv->dev, "failed to write active VLAN: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ gswip_port_commit_pvid(priv, port);
+
+ return 0;
+}
+
+static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *br = bridge.dev;
+ struct gswip_priv *priv = ds->priv;
+ int err;
+
+ /* Set up the VLAN for VLAN-unaware bridging for this port, and remove
+ * it from the "single-port bridge" through which it was operating as
+ * standalone.
+ */
+ err = gswip_vlan_add(priv, br, port, GSWIP_VLAN_UNAWARE_PVID,
+ true, true, false);
+ if (err)
+ return err;
+
+ return gswip_add_single_port_br(priv, port, false);
+}
+
+static void gswip_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ struct net_device *br = bridge.dev;
+ struct gswip_priv *priv = ds->priv;
+
+ /* Add the port back to the "single-port bridge", and remove it from
+ * the VLAN-unaware PVID created for this bridge.
+ */
+ gswip_add_single_port_br(priv, port, true);
+ gswip_vlan_remove(priv, br, port, GSWIP_VLAN_UNAWARE_PVID);
+}
+
+static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
+ struct gswip_priv *priv = ds->priv;
+ unsigned int max_ports = priv->hw_info->max_ports;
+ int pos = max_ports;
+ int i, idx = -1;
+
+ /* We only support VLAN filtering on bridges */
+ if (!dsa_is_cpu_port(ds, port) && !bridge)
+ return -EOPNOTSUPP;
+
+ /* Check if there is already a page for this VLAN */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge &&
+ priv->vlans[i].vid == vlan->vid) {
+ idx = i;
+ break;
+ }
+ }
+
+ /* If this VLAN is not programmed yet, we have to reserve
+ * one entry in the VLAN table. Make sure we start at the
+ * next position round.
+ */
+ if (idx == -1) {
+ /* Look for a free slot */
+ for (; pos < ARRAY_SIZE(priv->vlans); pos++) {
+ if (!priv->vlans[pos].bridge) {
+ idx = pos;
+ pos++;
+ break;
+ }
+ }
+
+ if (idx == -1) {
+ NL_SET_ERR_MSG_MOD(extack, "No slot in VLAN table");
+ return -ENOSPC;
+ }
+ }
+
+ return 0;
+}
+
+static int gswip_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
+ struct gswip_priv *priv = ds->priv;
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ int err;
+
+ if (vlan->vid == GSWIP_VLAN_UNAWARE_PVID)
+ return 0;
+
+ err = gswip_port_vlan_prepare(ds, port, vlan, extack);
+ if (err)
+ return err;
+
+ /* We have to receive all packets on the CPU port and should not
+ * do any VLAN filtering here. This is also called with bridge
+ * NULL and then we do not know for which bridge to configure
+ * this.
+ */
+ if (dsa_is_cpu_port(ds, port))
+ return 0;
+
+ return gswip_vlan_add(priv, bridge, port, vlan->vid, untagged, pvid,
+ true);
+}
+
+static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
+ struct gswip_priv *priv = ds->priv;
+
+ if (vlan->vid == GSWIP_VLAN_UNAWARE_PVID)
+ return 0;
+
+ /* We have to receive all packets on the CPU port and should not
+ * do any VLAN filtering here. This is also called with bridge
+ * NULL and then we do not know for which bridge to configure
+ * this.
+ */
+ if (dsa_is_cpu_port(ds, port))
+ return 0;
+
+ return gswip_vlan_remove(priv, bridge, port, vlan->vid);
+}
+
+static void gswip_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct gswip_pce_table_entry mac_bridge = {0,};
+ int i;
+ int err;
+
+ for (i = 0; i < 2048; i++) {
+ mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
+ mac_bridge.index = i;
+
+ err = gswip_pce_table_entry_read(priv, &mac_bridge);
+ if (err) {
+ dev_err(priv->dev, "failed to read mac bridge: %d\n",
+ err);
+ return;
+ }
+
+ if (!mac_bridge.valid)
+ continue;
+
+ if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC)
+ continue;
+
+ if (port != FIELD_GET(GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT,
+ mac_bridge.val[0]))
+ continue;
+
+ mac_bridge.valid = false;
+ err = gswip_pce_table_entry_write(priv, &mac_bridge);
+ if (err) {
+ dev_err(priv->dev, "failed to write mac bridge: %d\n",
+ err);
+ return;
+ }
+ }
+}
+
+static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct gswip_priv *priv = ds->priv;
+ u32 stp_state;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ regmap_clear_bits(priv->gswip, GSWIP_SDMA_PCTRLp(port),
+ GSWIP_SDMA_PCTRL_EN);
+ return;
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LISTEN;
+ break;
+ case BR_STATE_LEARNING:
+ stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ stp_state = GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING;
+ break;
+ default:
+ dev_err(priv->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ regmap_set_bits(priv->gswip, GSWIP_SDMA_PCTRLp(port),
+ GSWIP_SDMA_PCTRL_EN);
+ regmap_write_bits(priv->gswip, GSWIP_PCE_PCTRL_0p(port),
+ GSWIP_PCE_PCTRL_0_PSTATE_MASK,
+ stp_state);
+}
+
+static int gswip_port_fdb(struct dsa_switch *ds, int port,
+ struct net_device *bridge, const unsigned char *addr,
+ u16 vid, bool add)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct gswip_pce_table_entry mac_bridge = {0,};
+ unsigned int max_ports = priv->hw_info->max_ports;
+ int fid = -1;
+ int i;
+ int err;
+
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge) {
+ fid = priv->vlans[i].fid;
+ break;
+ }
+ }
+
+ if (fid == -1) {
+ dev_err(priv->dev, "no FID found for bridge %s\n",
+ bridge->name);
+ return -EINVAL;
+ }
+
+ mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
+ mac_bridge.key_mode = true;
+ mac_bridge.key[0] = addr[5] | (addr[4] << 8);
+ mac_bridge.key[1] = addr[3] | (addr[2] << 8);
+ mac_bridge.key[2] = addr[1] | (addr[0] << 8);
+ mac_bridge.key[3] = FIELD_PREP(GSWIP_TABLE_MAC_BRIDGE_KEY3_FID, fid);
+ mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */
+ if (GSWIP_VERSION_GE(priv, GSWIP_VERSION_2_2_ETC))
+ mac_bridge.val[1] = add ? (GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC |
+ GSWIP_TABLE_MAC_BRIDGE_VAL1_VALID) : 0;
+ else
+ mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC;
+
+ mac_bridge.valid = add;
+
+ err = gswip_pce_table_entry_write(priv, &mac_bridge);
+ if (err)
+ dev_err(priv->dev, "failed to write mac bridge: %d\n", err);
+
+ return err;
+}
+
+static int gswip_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ if (db.type != DSA_DB_BRIDGE)
+ return -EOPNOTSUPP;
+
+ return gswip_port_fdb(ds, port, db.bridge.dev, addr, vid, true);
+}
+
+static int gswip_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ if (db.type != DSA_DB_BRIDGE)
+ return -EOPNOTSUPP;
+
+ return gswip_port_fdb(ds, port, db.bridge.dev, addr, vid, false);
+}
+
+static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct gswip_pce_table_entry mac_bridge = {0,};
+ unsigned char addr[ETH_ALEN];
+ int i;
+ int err;
+
+ for (i = 0; i < 2048; i++) {
+ mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
+ mac_bridge.index = i;
+
+ err = gswip_pce_table_entry_read(priv, &mac_bridge);
+ if (err) {
+ dev_err(priv->dev,
+ "failed to read mac bridge entry %d: %d\n",
+ i, err);
+ return err;
+ }
+
+ if (!mac_bridge.valid)
+ continue;
+
+ addr[5] = mac_bridge.key[0] & 0xff;
+ addr[4] = (mac_bridge.key[0] >> 8) & 0xff;
+ addr[3] = mac_bridge.key[1] & 0xff;
+ addr[2] = (mac_bridge.key[1] >> 8) & 0xff;
+ addr[1] = mac_bridge.key[2] & 0xff;
+ addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
+ if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC) {
+ if (mac_bridge.val[0] & BIT(port)) {
+ err = cb(addr, 0, true, data);
+ if (err)
+ return err;
+ }
+ } else {
+ if (port == FIELD_GET(GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT,
+ mac_bridge.val[0])) {
+ err = cb(addr, 0, false, data);
+ if (err)
+ return err;
+ }
+ }
+ }
+ return 0;
+}
+
+static int gswip_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ /* Includes 8 bytes for special header. */
+ return GSWIP_MAX_PACKET_LENGTH - VLAN_ETH_HLEN - ETH_FCS_LEN;
+}
+
+static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ /* CPU port always has maximum mtu of user ports, so use it to set
+ * switch frame size, including 8 byte special header.
+ */
+ if (dsa_is_cpu_port(ds, port)) {
+ new_mtu += 8;
+ regmap_write(priv->gswip, GSWIP_MAC_FLEN,
+ VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN);
+ }
+
+ /* Enable MLEN for ports with non-standard MTUs, including the special
+ * header on the CPU port added above.
+ */
+ if (new_mtu != ETH_DATA_LEN)
+ regmap_set_bits(priv->gswip, GSWIP_MAC_CTRL_2p(port),
+ GSWIP_MAC_CTRL_2_MLEN);
+ else
+ regmap_clear_bits(priv->gswip, GSWIP_MAC_CTRL_2p(port),
+ GSWIP_MAC_CTRL_2_MLEN);
+
+ return 0;
+}
+
+static void gswip_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ priv->hw_info->phylink_get_caps(ds, port, config);
+}
+
+static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
+{
+ u32 mdio_phy;
+
+ if (link)
+ mdio_phy = GSWIP_MDIO_PHY_LINK_UP;
+ else
+ mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN;
+
+ regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port),
+ GSWIP_MDIO_PHY_LINK_MASK, mdio_phy);
+}
+
+static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed,
+ phy_interface_t interface)
+{
+ u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0;
+
+ switch (speed) {
+ case SPEED_10:
+ mdio_phy = GSWIP_MDIO_PHY_SPEED_M10;
+
+ if (interface == PHY_INTERFACE_MODE_RMII)
+ mii_cfg = GSWIP_MII_CFG_RATE_M50;
+ else
+ mii_cfg = GSWIP_MII_CFG_RATE_M2P5;
+
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
+ break;
+
+ case SPEED_100:
+ mdio_phy = GSWIP_MDIO_PHY_SPEED_M100;
+
+ if (interface == PHY_INTERFACE_MODE_RMII)
+ mii_cfg = GSWIP_MII_CFG_RATE_M50;
+ else
+ mii_cfg = GSWIP_MII_CFG_RATE_M25;
+
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
+ break;
+
+ case SPEED_1000:
+ mdio_phy = GSWIP_MDIO_PHY_SPEED_G1;
+
+ mii_cfg = GSWIP_MII_CFG_RATE_M125;
+
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII;
+ break;
+ }
+
+ regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port),
+ GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy);
+ gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port);
+ regmap_write_bits(priv->gswip, GSWIP_MAC_CTRL_0p(port),
+ GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0);
+}
+
+static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex)
+{
+ u32 mac_ctrl_0, mdio_phy;
+
+ if (duplex == DUPLEX_FULL) {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN;
+ mdio_phy = GSWIP_MDIO_PHY_FDUP_EN;
+ } else {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS;
+ mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS;
+ }
+
+ regmap_write_bits(priv->gswip, GSWIP_MAC_CTRL_0p(port),
+ GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0);
+ regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port),
+ GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy);
+}
+
+static void gswip_port_set_pause(struct gswip_priv *priv, int port,
+ bool tx_pause, bool rx_pause)
+{
+ u32 mac_ctrl_0, mdio_phy;
+
+ if (tx_pause && rx_pause) {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX;
+ mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
+ GSWIP_MDIO_PHY_FCONRX_EN;
+ } else if (tx_pause) {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX;
+ mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
+ GSWIP_MDIO_PHY_FCONRX_DIS;
+ } else if (rx_pause) {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX;
+ mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
+ GSWIP_MDIO_PHY_FCONRX_EN;
+ } else {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE;
+ mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
+ GSWIP_MDIO_PHY_FCONRX_DIS;
+ }
+
+ regmap_write_bits(priv->gswip, GSWIP_MAC_CTRL_0p(port),
+ GSWIP_MAC_CTRL_0_FCON_MASK, mac_ctrl_0);
+ regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port),
+ GSWIP_MDIO_PHY_FCONTX_MASK | GSWIP_MDIO_PHY_FCONRX_MASK,
+ mdio_phy);
+}
+
+static void gswip_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *priv = dp->ds->priv;
+ int port = dp->index;
+ u32 miicfg = 0;
+
+ miicfg |= GSWIP_MII_CFG_LDCLKDIS;
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return;
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_INTERNAL:
+ miicfg |= GSWIP_MII_CFG_MODE_MIIM;
+ break;
+ case PHY_INTERFACE_MODE_REVMII:
+ miicfg |= GSWIP_MII_CFG_MODE_MIIP;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
+ if (of_property_read_bool(dp->dn, "maxlinear,rmii-refclk-out"))
+ miicfg |= GSWIP_MII_CFG_RMII_CLK;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ miicfg |= GSWIP_MII_CFG_MODE_RGMII;
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ miicfg |= GSWIP_MII_CFG_MODE_GMII;
+ break;
+ default:
+ dev_err(dp->ds->dev,
+ "Unsupported interface: %d\n", state->interface);
+ return;
+ }
+
+ gswip_mii_mask_cfg(priv,
+ GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK |
+ GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS,
+ miicfg, port);
+
+ gswip_mii_delay_setup(priv, dp, state->interface);
+}
+
+static void gswip_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *priv = dp->ds->priv;
+ int port = dp->index;
+
+ gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port);
+
+ if (!dsa_port_is_cpu(dp))
+ gswip_port_set_link(priv, port, false);
+}
+
+static void gswip_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *priv = dp->ds->priv;
+ int port = dp->index;
+
+ if (!dsa_port_is_cpu(dp) || interface != PHY_INTERFACE_MODE_INTERNAL) {
+ gswip_port_set_link(priv, port, true);
+ gswip_port_set_speed(priv, port, speed, interface);
+ gswip_port_set_duplex(priv, port, duplex);
+ gswip_port_set_pause(priv, port, tx_pause, rx_pause);
+ }
+
+ gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, GSWIP_MII_CFG_EN, port);
+}
+
+static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+{
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++)
+ ethtool_puts(&data, gswip_rmon_cnt[i].name);
+}
+
+static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table,
+ u32 index)
+{
+ u32 result, val;
+ int err;
+
+ regmap_write(priv->gswip, GSWIP_BM_RAM_ADDR, index);
+ regmap_write_bits(priv->gswip, GSWIP_BM_RAM_CTRL,
+ GSWIP_BM_RAM_CTRL_ADDR_MASK | GSWIP_BM_RAM_CTRL_OPMOD |
+ GSWIP_BM_RAM_CTRL_BAS,
+ table | GSWIP_BM_RAM_CTRL_BAS);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL,
+ GSWIP_BM_RAM_CTRL_BAS);
+ if (err) {
+ dev_err(priv->dev, "timeout while reading table: %u, index: %u\n",
+ table, index);
+ return 0;
+ }
+
+ regmap_read(priv->gswip, GSWIP_BM_RAM_VAL(0), &result);
+ regmap_read(priv->gswip, GSWIP_BM_RAM_VAL(1), &val);
+ result |= val << 16;
+
+ return result;
+}
+
+static void gswip_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct gswip_priv *priv = ds->priv;
+ const struct gswip_rmon_cnt_desc *rmon_cnt;
+ int i;
+ u64 high;
+
+ for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) {
+ rmon_cnt = &gswip_rmon_cnt[i];
+
+ data[i] = gswip_bcm_ram_entry_read(priv, port,
+ rmon_cnt->offset);
+ if (rmon_cnt->size == 2) {
+ high = gswip_bcm_ram_entry_read(priv, port,
+ rmon_cnt->offset + 1);
+ data[i] |= high << 32;
+ }
+ }
+}
+
+static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ return ARRAY_SIZE(gswip_rmon_cnt);
+}
+
+static int gswip_set_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_keee *e)
+{
+ if (e->tx_lpi_timer > 0x7f)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void gswip_phylink_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *priv = dp->ds->priv;
+
+ regmap_clear_bits(priv->gswip, GSWIP_MAC_CTRL_4p(dp->index),
+ GSWIP_MAC_CTRL_4_LPIEN);
+}
+
+static int gswip_phylink_mac_enable_tx_lpi(struct phylink_config *config,
+ u32 timer, bool tx_clock_stop)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *priv = dp->ds->priv;
+
+ return regmap_update_bits(priv->gswip, GSWIP_MAC_CTRL_4p(dp->index),
+ GSWIP_MAC_CTRL_4_LPIEN |
+ GSWIP_MAC_CTRL_4_GWAIT_MASK |
+ GSWIP_MAC_CTRL_4_WAIT_MASK,
+ GSWIP_MAC_CTRL_4_LPIEN |
+ GSWIP_MAC_CTRL_4_GWAIT(timer) |
+ GSWIP_MAC_CTRL_4_WAIT(timer));
+}
+
+static bool gswip_support_eee(struct dsa_switch *ds, int port)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ if (GSWIP_VERSION_GE(priv, GSWIP_VERSION_2_2))
+ return true;
+
+ return false;
+}
+
+static struct phylink_pcs *gswip_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *priv = dp->ds->priv;
+
+ if (priv->hw_info->mac_select_pcs)
+ return priv->hw_info->mac_select_pcs(config, interface);
+
+ return NULL;
+}
+
+static const struct phylink_mac_ops gswip_phylink_mac_ops = {
+ .mac_config = gswip_phylink_mac_config,
+ .mac_link_down = gswip_phylink_mac_link_down,
+ .mac_link_up = gswip_phylink_mac_link_up,
+ .mac_disable_tx_lpi = gswip_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = gswip_phylink_mac_enable_tx_lpi,
+ .mac_select_pcs = gswip_phylink_mac_select_pcs,
+};
+
+static const struct dsa_switch_ops gswip_switch_ops = {
+ .get_tag_protocol = gswip_get_tag_protocol,
+ .setup = gswip_setup,
+ .port_setup = gswip_port_setup,
+ .port_enable = gswip_port_enable,
+ .port_disable = gswip_port_disable,
+ .port_pre_bridge_flags = gswip_port_pre_bridge_flags,
+ .port_bridge_flags = gswip_port_bridge_flags,
+ .port_bridge_join = gswip_port_bridge_join,
+ .port_bridge_leave = gswip_port_bridge_leave,
+ .port_fast_age = gswip_port_fast_age,
+ .port_vlan_filtering = gswip_port_vlan_filtering,
+ .port_vlan_add = gswip_port_vlan_add,
+ .port_vlan_del = gswip_port_vlan_del,
+ .port_stp_state_set = gswip_port_stp_state_set,
+ .port_fdb_add = gswip_port_fdb_add,
+ .port_fdb_del = gswip_port_fdb_del,
+ .port_fdb_dump = gswip_port_fdb_dump,
+ .port_change_mtu = gswip_port_change_mtu,
+ .port_max_mtu = gswip_port_max_mtu,
+ .phylink_get_caps = gswip_phylink_get_caps,
+ .get_strings = gswip_get_strings,
+ .get_ethtool_stats = gswip_get_ethtool_stats,
+ .get_sset_count = gswip_get_sset_count,
+ .set_mac_eee = gswip_set_mac_eee,
+ .support_eee = gswip_support_eee,
+ .port_hsr_join = dsa_port_simple_hsr_join,
+ .port_hsr_leave = dsa_port_simple_hsr_leave,
+};
+
+void gswip_disable_switch(struct gswip_priv *priv)
+{
+ regmap_clear_bits(priv->mdio, GSWIP_MDIO_GLOB, GSWIP_MDIO_GLOB_ENABLE);
+}
+EXPORT_SYMBOL_GPL(gswip_disable_switch);
+
+static int gswip_validate_cpu_port(struct dsa_switch *ds)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct dsa_port *cpu_dp;
+ int cpu_port = -1;
+
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ if (cpu_port != -1)
+ return dev_err_probe(ds->dev, -EINVAL,
+ "only a single CPU port is supported\n");
+
+ cpu_port = cpu_dp->index;
+ }
+
+ if (cpu_port == -1)
+ return dev_err_probe(ds->dev, -EINVAL, "no CPU port defined\n");
+
+ if (BIT(cpu_port) & ~priv->hw_info->allowed_cpu_ports)
+ return dev_err_probe(ds->dev, -EINVAL,
+ "unsupported CPU port defined\n");
+
+ return 0;
+}
+
+int gswip_probe_common(struct gswip_priv *priv, u32 version)
+{
+ int err;
+
+ mutex_init(&priv->pce_table_lock);
+
+ priv->ds = devm_kzalloc(priv->dev, sizeof(*priv->ds), GFP_KERNEL);
+ if (!priv->ds)
+ return -ENOMEM;
+
+ priv->ds->dev = priv->dev;
+ priv->ds->num_ports = priv->hw_info->max_ports;
+ priv->ds->ops = &gswip_switch_ops;
+ priv->ds->phylink_mac_ops = &gswip_phylink_mac_ops;
+ priv->ds->priv = priv;
+
+ /* The hardware has the 'major/minor' version bytes in the wrong order
+ * preventing numerical comparisons. Construct a 16-bit unsigned integer
+ * having the REV field as most significant byte and the MOD field as
+ * least significant byte. This is effectively swapping the two bytes of
+ * the version variable, but other than using swab16 it doesn't affect
+ * the source variable.
+ */
+ priv->version = GSWIP_VERSION_REV(version) << 8 |
+ GSWIP_VERSION_MOD(version);
+
+ err = dsa_register_switch(priv->ds);
+ if (err)
+ return dev_err_probe(priv->dev, err, "dsa switch registration failed\n");
+
+ err = gswip_validate_cpu_port(priv->ds);
+ if (err)
+ goto disable_switch;
+
+ dev_info(priv->dev, "probed GSWIP version %lx mod %lx\n",
+ GSWIP_VERSION_REV(version), GSWIP_VERSION_MOD(version));
+
+ return 0;
+
+disable_switch:
+ gswip_disable_switch(priv);
+ dsa_unregister_switch(priv->ds);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(gswip_probe_common);
+
+MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
+MODULE_AUTHOR("Daniel Golle <daniel@makrotopia.org>");
+MODULE_DESCRIPTION("Lantiq / Intel / MaxLinear GSWIP common functions");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/lantiq_pce.h b/drivers/net/dsa/lantiq/lantiq_pce.h
index e2be31f3672a..659f9a0638d9 100644
--- a/drivers/net/dsa/lantiq_pce.h
+++ b/drivers/net/dsa/lantiq/lantiq_pce.h
@@ -7,6 +7,8 @@
* Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
*/
+#include "lantiq_gswip.h"
+
enum {
OUT_MAC0 = 0,
OUT_MAC1,
@@ -74,13 +76,6 @@ enum {
FLAG_NO, /*13*/
};
-struct gswip_pce_microcode {
- u16 val_3;
- u16 val_2;
- u16 val_1;
- u16 val_0;
-};
-
#define MC_ENTRY(val, msk, ns, out, len, type, flags, ipv4_len) \
{ val, msk, ((ns) << 10 | (out) << 4 | (len) >> 1),\
((len) & 1) << 15 | (type) << 13 | (flags) << 9 | (ipv4_len) << 8 }
diff --git a/drivers/net/dsa/lantiq/mxl-gsw1xx.c b/drivers/net/dsa/lantiq/mxl-gsw1xx.c
new file mode 100644
index 000000000000..0816c61a47f1
--- /dev/null
+++ b/drivers/net/dsa/lantiq/mxl-gsw1xx.c
@@ -0,0 +1,733 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* DSA Driver for MaxLinear GSW1xx switch devices
+ *
+ * Copyright (C) 2025 Daniel Golle <daniel@makrotopia.org>
+ * Copyright (C) 2023 - 2024 MaxLinear Inc.
+ * Copyright (C) 2022 Snap One, LLC. All rights reserved.
+ * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ * Copyright (C) 2010 Lantiq Deutschland
+ */
+
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/regmap.h>
+#include <net/dsa.h>
+
+#include "lantiq_gswip.h"
+#include "mxl-gsw1xx.h"
+#include "mxl-gsw1xx_pce.h"
+
+struct gsw1xx_priv {
+ struct mdio_device *mdio_dev;
+ int smdio_badr;
+ struct regmap *sgmii;
+ struct regmap *gpio;
+ struct regmap *clk;
+ struct regmap *shell;
+ struct phylink_pcs pcs;
+ phy_interface_t tbi_interface;
+ struct gswip_priv gswip;
+};
+
+static int gsw1xx_config_smdio_badr(struct gsw1xx_priv *priv,
+ unsigned int reg)
+{
+ struct mii_bus *bus = priv->mdio_dev->bus;
+ int sw_addr = priv->mdio_dev->addr;
+ int smdio_badr = priv->smdio_badr;
+ int res;
+
+ if (smdio_badr == GSW1XX_SMDIO_BADR_UNKNOWN ||
+ reg - smdio_badr >= GSW1XX_SMDIO_BADR ||
+ smdio_badr > reg) {
+ /* Configure the Switch Base Address */
+ smdio_badr = reg & ~GENMASK(3, 0);
+ res = __mdiobus_write(bus, sw_addr, GSW1XX_SMDIO_BADR, smdio_badr);
+ if (res < 0) {
+ dev_err(&priv->mdio_dev->dev,
+ "%s: Error %d, configuring switch base\n",
+ __func__, res);
+ return res;
+ }
+ priv->smdio_badr = smdio_badr;
+ }
+
+ return smdio_badr;
+}
+
+static int gsw1xx_regmap_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct gsw1xx_priv *priv = context;
+ struct mii_bus *bus = priv->mdio_dev->bus;
+ int sw_addr = priv->mdio_dev->addr;
+ int smdio_badr;
+ int res;
+
+ smdio_badr = gsw1xx_config_smdio_badr(priv, reg);
+ if (smdio_badr < 0)
+ return smdio_badr;
+
+ res = __mdiobus_read(bus, sw_addr, reg - smdio_badr);
+ if (res < 0) {
+ dev_err(&priv->mdio_dev->dev, "%s: Error %d reading 0x%x\n",
+ __func__, res, reg);
+ return res;
+ }
+
+ *val = res;
+
+ return 0;
+}
+
+static int gsw1xx_regmap_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct gsw1xx_priv *priv = context;
+ struct mii_bus *bus = priv->mdio_dev->bus;
+ int sw_addr = priv->mdio_dev->addr;
+ int smdio_badr;
+ int res;
+
+ smdio_badr = gsw1xx_config_smdio_badr(priv, reg);
+ if (smdio_badr < 0)
+ return smdio_badr;
+
+ res = __mdiobus_write(bus, sw_addr, reg - smdio_badr, val);
+ if (res < 0)
+ dev_err(&priv->mdio_dev->dev,
+ "%s: Error %d, writing 0x%x:0x%x\n", __func__, res, reg,
+ val);
+
+ return res;
+}
+
+static const struct regmap_bus gsw1xx_regmap_bus = {
+ .reg_write = gsw1xx_regmap_write,
+ .reg_read = gsw1xx_regmap_read,
+};
+
+static void gsw1xx_mdio_regmap_lock(void *mdio_lock)
+{
+ mutex_lock_nested(mdio_lock, MDIO_MUTEX_NESTED);
+}
+
+static void gsw1xx_mdio_regmap_unlock(void *mdio_lock)
+{
+ mutex_unlock(mdio_lock);
+}
+
+static unsigned int gsw1xx_pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+}
+
+static struct gsw1xx_priv *pcs_to_gsw1xx(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct gsw1xx_priv, pcs);
+}
+
+static int gsw1xx_pcs_enable(struct phylink_pcs *pcs)
+{
+ struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs);
+
+ /* Deassert SGMII shell reset */
+ return regmap_clear_bits(priv->shell, GSW1XX_SHELL_RST_REQ,
+ GSW1XX_RST_REQ_SGMII_SHELL);
+}
+
+static void gsw1xx_pcs_disable(struct phylink_pcs *pcs)
+{
+ struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs);
+
+ /* Assert SGMII shell reset */
+ regmap_set_bits(priv->shell, GSW1XX_SHELL_RST_REQ,
+ GSW1XX_RST_REQ_SGMII_SHELL);
+
+ priv->tbi_interface = PHY_INTERFACE_MODE_NA;
+}
+
+static void gsw1xx_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
+ struct phylink_link_state *state)
+{
+ struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs);
+ int ret;
+ u32 val;
+
+ ret = regmap_read(priv->sgmii, GSW1XX_SGMII_TBI_TBISTAT, &val);
+ if (ret < 0)
+ return;
+
+ state->link = !!(val & GSW1XX_SGMII_TBI_TBISTAT_LINK);
+ state->an_complete = !!(val & GSW1XX_SGMII_TBI_TBISTAT_AN_COMPLETE);
+
+ ret = regmap_read(priv->sgmii, GSW1XX_SGMII_TBI_LPSTAT, &val);
+ if (ret < 0)
+ return;
+
+ state->duplex = (val & GSW1XX_SGMII_TBI_LPSTAT_DUPLEX) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ if (val & GSW1XX_SGMII_TBI_LPSTAT_PAUSE_RX)
+ state->pause |= MLO_PAUSE_RX;
+
+ if (val & GSW1XX_SGMII_TBI_LPSTAT_PAUSE_TX)
+ state->pause |= MLO_PAUSE_TX;
+
+ switch (FIELD_GET(GSW1XX_SGMII_TBI_LPSTAT_SPEED, val)) {
+ case GSW1XX_SGMII_TBI_LPSTAT_SPEED_10:
+ state->speed = SPEED_10;
+ break;
+ case GSW1XX_SGMII_TBI_LPSTAT_SPEED_100:
+ state->speed = SPEED_100;
+ break;
+ case GSW1XX_SGMII_TBI_LPSTAT_SPEED_1000:
+ state->speed = SPEED_1000;
+ break;
+ case GSW1XX_SGMII_TBI_LPSTAT_SPEED_NOSGMII:
+ if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
+ state->speed = SPEED_1000;
+ else if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ state->speed = SPEED_2500;
+ else
+ state->speed = SPEED_UNKNOWN;
+ break;
+ }
+}
+
+static int gsw1xx_pcs_phy_xaui_write(struct gsw1xx_priv *priv, u16 addr,
+ u16 data)
+{
+ int ret, val;
+
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_D, data);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_A, addr);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_C,
+ GSW1XX_SGMII_PHY_WRITE |
+ GSW1XX_SGMII_PHY_RESET_N);
+ if (ret < 0)
+ return ret;
+
+ return regmap_read_poll_timeout(priv->sgmii, GSW1XX_SGMII_PHY_C,
+ val, val & GSW1XX_SGMII_PHY_STATUS,
+ 1000, 100000);
+}
+
+static int gsw1xx_pcs_reset(struct gsw1xx_priv *priv)
+{
+ int ret;
+ u16 val;
+
+ /* Assert and deassert SGMII shell reset */
+ ret = regmap_set_bits(priv->shell, GSW1XX_SHELL_RST_REQ,
+ GSW1XX_RST_REQ_SGMII_SHELL);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_clear_bits(priv->shell, GSW1XX_SHELL_RST_REQ,
+ GSW1XX_RST_REQ_SGMII_SHELL);
+ if (ret < 0)
+ return ret;
+
+ /* Hardware Bringup FSM Enable */
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_HWBU_CTRL,
+ GSW1XX_SGMII_PHY_HWBU_CTRL_EN_HWBU_FSM |
+ GSW1XX_SGMII_PHY_HWBU_CTRL_HW_FSM_EN);
+ if (ret < 0)
+ return ret;
+
+ /* Configure SGMII PHY Receiver */
+ val = FIELD_PREP(GSW1XX_SGMII_PHY_RX0_CFG2_EQ,
+ GSW1XX_SGMII_PHY_RX0_CFG2_EQ_DEF) |
+ GSW1XX_SGMII_PHY_RX0_CFG2_LOS_EN |
+ GSW1XX_SGMII_PHY_RX0_CFG2_TERM_EN |
+ FIELD_PREP(GSW1XX_SGMII_PHY_RX0_CFG2_FILT_CNT,
+ GSW1XX_SGMII_PHY_RX0_CFG2_FILT_CNT_DEF);
+
+ /* TODO: Take care of inverted RX pair once generic property is
+ * available
+ */
+
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_RX0_CFG2, val);
+ if (ret < 0)
+ return ret;
+
+ val = FIELD_PREP(GSW1XX_SGMII_PHY_TX0_CFG3_VBOOST_LEVEL,
+ GSW1XX_SGMII_PHY_TX0_CFG3_VBOOST_LEVEL_DEF);
+
+ /* TODO: Take care of inverted TX pair once generic property is
+ * available
+ */
+
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_TX0_CFG3, val);
+ if (ret < 0)
+ return ret;
+
+ /* Reset and Release TBI */
+ val = GSW1XX_SGMII_TBI_TBICTL_INITTBI | GSW1XX_SGMII_TBI_TBICTL_ENTBI |
+ GSW1XX_SGMII_TBI_TBICTL_CRSTRR | GSW1XX_SGMII_TBI_TBICTL_CRSOFF;
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_TBICTL, val);
+ if (ret < 0)
+ return ret;
+ val &= ~GSW1XX_SGMII_TBI_TBICTL_INITTBI;
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_TBICTL, val);
+ if (ret < 0)
+ return ret;
+
+ /* Release Tx Data Buffers */
+ ret = regmap_set_bits(priv->sgmii, GSW1XX_SGMII_PCS_TXB_CTL,
+ GSW1XX_SGMII_PCS_TXB_CTL_INIT_TX_TXB);
+ if (ret < 0)
+ return ret;
+ ret = regmap_clear_bits(priv->sgmii, GSW1XX_SGMII_PCS_TXB_CTL,
+ GSW1XX_SGMII_PCS_TXB_CTL_INIT_TX_TXB);
+ if (ret < 0)
+ return ret;
+
+ /* Release Rx Data Buffers */
+ ret = regmap_set_bits(priv->sgmii, GSW1XX_SGMII_PCS_RXB_CTL,
+ GSW1XX_SGMII_PCS_RXB_CTL_INIT_RX_RXB);
+ if (ret < 0)
+ return ret;
+ return regmap_clear_bits(priv->sgmii, GSW1XX_SGMII_PCS_RXB_CTL,
+ GSW1XX_SGMII_PCS_RXB_CTL_INIT_RX_RXB);
+}
+
+static int gsw1xx_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs);
+ u16 txaneg, anegctl, nco_ctrl;
+ bool reconf = false;
+ int ret = 0;
+
+ /* do not unnecessarily disrupt link and skip resetting the hardware in
+ * case the PCS has previously been successfully configured for this
+ * interface mode
+ */
+ if (priv->tbi_interface == interface)
+ reconf = true;
+
+ /* mark PCS configuration as incomplete */
+ priv->tbi_interface = PHY_INTERFACE_MODE_NA;
+
+ if (!reconf)
+ ret = gsw1xx_pcs_reset(priv);
+
+ if (ret)
+ return ret;
+
+ /* override bootstrap pin settings
+ * OVRANEG sets ANEG Mode, Enable ANEG and restart ANEG to be
+ * taken from bits ANMODE, ANEGEN, RANEG of the ANEGCTL register.
+ * OVERABL sets ability bits in tx_config_reg to be taken from
+ * the TXANEGH and TXANEGL registers.
+ */
+ anegctl = GSW1XX_SGMII_TBI_ANEGCTL_OVRANEG |
+ GSW1XX_SGMII_TBI_ANEGCTL_OVRABL;
+
+ switch (phylink_get_link_timer_ns(interface)) {
+ case 10000:
+ anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_LT,
+ GSW1XX_SGMII_TBI_ANEGCTL_LT_10US);
+ break;
+ case 1600000:
+ anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_LT,
+ GSW1XX_SGMII_TBI_ANEGCTL_LT_1_6MS);
+ break;
+ case 5000000:
+ anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_LT,
+ GSW1XX_SGMII_TBI_ANEGCTL_LT_5MS);
+ break;
+ case 10000000:
+ anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_LT,
+ GSW1XX_SGMII_TBI_ANEGCTL_LT_10MS);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (neg_mode & PHYLINK_PCS_NEG_INBAND)
+ anegctl |= GSW1XX_SGMII_TBI_ANEGCTL_ANEGEN;
+
+ txaneg = phylink_mii_c22_pcs_encode_advertisement(interface, advertising);
+
+ if (interface == PHY_INTERFACE_MODE_SGMII) {
+ /* lacking a defined reverse-SGMII interface mode this
+ * driver only supports SGMII (MAC side) for now
+ */
+ anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_ANMODE,
+ GSW1XX_SGMII_TBI_ANEGCTL_ANMODE_SGMII_MAC);
+ txaneg |= ADVERTISE_LPACK;
+ } else if (interface == PHY_INTERFACE_MODE_1000BASEX ||
+ interface == PHY_INTERFACE_MODE_2500BASEX) {
+ anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_ANMODE,
+ GSW1XX_SGMII_TBI_ANEGCTL_ANMODE_1000BASEX);
+ } else {
+ dev_err(priv->gswip.dev, "%s: wrong interface mode %s\n",
+ __func__, phy_modes(interface));
+ return -EINVAL;
+ }
+
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_TXANEGH,
+ FIELD_GET(GENMASK(15, 8), txaneg));
+ if (ret < 0)
+ return ret;
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_TXANEGL,
+ FIELD_GET(GENMASK(7, 0), txaneg));
+ if (ret < 0)
+ return ret;
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_ANEGCTL, anegctl);
+ if (ret < 0)
+ return ret;
+
+ if (!reconf) {
+ /* setup SerDes clock speed */
+ if (interface == PHY_INTERFACE_MODE_2500BASEX)
+ nco_ctrl = GSW1XX_SGMII_2G5 | GSW1XX_SGMII_2G5_NCO2;
+ else
+ nco_ctrl = GSW1XX_SGMII_1G | GSW1XX_SGMII_1G_NCO1;
+
+ ret = regmap_update_bits(priv->clk, GSW1XX_CLK_NCO_CTRL,
+ GSW1XX_SGMII_HSP_MASK |
+ GSW1XX_SGMII_SEL,
+ nco_ctrl);
+ if (ret)
+ return ret;
+
+ ret = gsw1xx_pcs_phy_xaui_write(priv, 0x30, 0x80);
+ if (ret)
+ return ret;
+ }
+
+ /* PCS configuration has now been completed, store mode to prevent
+ * disrupting the link in case of future calls of this function for the
+ * same interface mode.
+ */
+ priv->tbi_interface = interface;
+
+ return 0;
+}
+
+static void gsw1xx_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs);
+
+ regmap_set_bits(priv->sgmii, GSW1XX_SGMII_TBI_ANEGCTL,
+ GSW1XX_SGMII_TBI_ANEGCTL_RANEG);
+}
+
+static void gsw1xx_pcs_link_up(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
+ phy_interface_t interface, int speed,
+ int duplex)
+{
+ struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs);
+ u16 lpstat;
+
+ /* When in-band AN is enabled hardware will set lpstat */
+ if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
+ return;
+
+ /* Force speed and duplex settings */
+ if (interface == PHY_INTERFACE_MODE_SGMII) {
+ if (speed == SPEED_10)
+ lpstat = FIELD_PREP(GSW1XX_SGMII_TBI_LPSTAT_SPEED,
+ GSW1XX_SGMII_TBI_LPSTAT_SPEED_10);
+ else if (speed == SPEED_100)
+ lpstat = FIELD_PREP(GSW1XX_SGMII_TBI_LPSTAT_SPEED,
+ GSW1XX_SGMII_TBI_LPSTAT_SPEED_100);
+ else
+ lpstat = FIELD_PREP(GSW1XX_SGMII_TBI_LPSTAT_SPEED,
+ GSW1XX_SGMII_TBI_LPSTAT_SPEED_1000);
+ } else {
+ lpstat = FIELD_PREP(GSW1XX_SGMII_TBI_LPSTAT_SPEED,
+ GSW1XX_SGMII_TBI_LPSTAT_SPEED_NOSGMII);
+ }
+
+ if (duplex == DUPLEX_FULL)
+ lpstat |= GSW1XX_SGMII_TBI_LPSTAT_DUPLEX;
+
+ regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_LPSTAT, lpstat);
+}
+
+static const struct phylink_pcs_ops gsw1xx_pcs_ops = {
+ .pcs_inband_caps = gsw1xx_pcs_inband_caps,
+ .pcs_enable = gsw1xx_pcs_enable,
+ .pcs_disable = gsw1xx_pcs_disable,
+ .pcs_get_state = gsw1xx_pcs_get_state,
+ .pcs_config = gsw1xx_pcs_config,
+ .pcs_an_restart = gsw1xx_pcs_an_restart,
+ .pcs_link_up = gsw1xx_pcs_link_up,
+};
+
+static void gsw1xx_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
+
+ switch (port) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ break;
+ case 4: /* port 4: SGMII */
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ config->supported_interfaces);
+ if (priv->hw_info->supports_2500m) {
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ config->supported_interfaces);
+ config->mac_capabilities |= MAC_2500FD;
+ }
+ return; /* no support for EEE on SGMII port */
+ case 5: /* port 5: RGMII or RMII */
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+ phy_interface_set_rgmii(config->supported_interfaces);
+ break;
+ }
+
+ config->lpi_capabilities = MAC_100FD | MAC_1000FD;
+ config->lpi_timer_default = 20;
+ memcpy(config->lpi_interfaces, config->supported_interfaces,
+ sizeof(config->lpi_interfaces));
+}
+
+static struct phylink_pcs *gsw1xx_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *gswip_priv = dp->ds->priv;
+ struct gsw1xx_priv *gsw1xx_priv = container_of(gswip_priv,
+ struct gsw1xx_priv,
+ gswip);
+
+ switch (dp->index) {
+ case GSW1XX_SGMII_PORT:
+ return &gsw1xx_priv->pcs;
+ default:
+ return NULL;
+ }
+}
+
+static struct regmap *gsw1xx_regmap_init(struct gsw1xx_priv *priv,
+ const char *name,
+ unsigned int reg_base,
+ unsigned int max_register)
+{
+ const struct regmap_config config = {
+ .name = name,
+ .reg_bits = 16,
+ .val_bits = 16,
+ .reg_base = reg_base,
+ .max_register = max_register,
+ .lock = gsw1xx_mdio_regmap_lock,
+ .unlock = gsw1xx_mdio_regmap_unlock,
+ .lock_arg = &priv->mdio_dev->bus->mdio_lock,
+ };
+
+ return devm_regmap_init(&priv->mdio_dev->dev, &gsw1xx_regmap_bus,
+ priv, &config);
+}
+
+static int gsw1xx_probe(struct mdio_device *mdiodev)
+{
+ struct device *dev = &mdiodev->dev;
+ struct gsw1xx_priv *priv;
+ u32 version;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->mdio_dev = mdiodev;
+ priv->smdio_badr = GSW1XX_SMDIO_BADR_UNKNOWN;
+
+ priv->gswip.dev = dev;
+ priv->gswip.hw_info = of_device_get_match_data(dev);
+ if (!priv->gswip.hw_info)
+ return -EINVAL;
+
+ priv->gswip.gswip = gsw1xx_regmap_init(priv, "switch",
+ GSW1XX_SWITCH_BASE, 0xfff);
+ if (IS_ERR(priv->gswip.gswip))
+ return PTR_ERR(priv->gswip.gswip);
+
+ priv->gswip.mdio = gsw1xx_regmap_init(priv, "mdio", GSW1XX_MMDIO_BASE,
+ 0xff);
+ if (IS_ERR(priv->gswip.mdio))
+ return PTR_ERR(priv->gswip.mdio);
+
+ priv->gswip.mii = gsw1xx_regmap_init(priv, "mii", GSW1XX_RGMII_BASE,
+ 0xff);
+ if (IS_ERR(priv->gswip.mii))
+ return PTR_ERR(priv->gswip.mii);
+
+ priv->sgmii = gsw1xx_regmap_init(priv, "sgmii", GSW1XX_SGMII_BASE,
+ 0xfff);
+ if (IS_ERR(priv->sgmii))
+ return PTR_ERR(priv->sgmii);
+
+ priv->gpio = gsw1xx_regmap_init(priv, "gpio", GSW1XX_GPIO_BASE, 0xff);
+ if (IS_ERR(priv->gpio))
+ return PTR_ERR(priv->gpio);
+
+ priv->clk = gsw1xx_regmap_init(priv, "clk", GSW1XX_CLK_BASE, 0xff);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->shell = gsw1xx_regmap_init(priv, "shell", GSW1XX_SHELL_BASE,
+ 0xff);
+ if (IS_ERR(priv->shell))
+ return PTR_ERR(priv->shell);
+
+ priv->pcs.ops = &gsw1xx_pcs_ops;
+ priv->pcs.poll = true;
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ priv->pcs.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ priv->pcs.supported_interfaces);
+ if (priv->gswip.hw_info->supports_2500m)
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ priv->pcs.supported_interfaces);
+ priv->tbi_interface = PHY_INTERFACE_MODE_NA;
+
+ /* assert SGMII reset to power down SGMII unit */
+ ret = regmap_set_bits(priv->shell, GSW1XX_SHELL_RST_REQ,
+ GSW1XX_RST_REQ_SGMII_SHELL);
+ if (ret < 0)
+ return ret;
+
+ /* configure GPIO pin-mux for MMDIO in case of external PHY connected to
+ * SGMII or RGMII as slave interface
+ */
+ regmap_set_bits(priv->gpio, GPIO_ALTSEL0, 3);
+ regmap_set_bits(priv->gpio, GPIO_ALTSEL1, 3);
+
+ ret = regmap_read(priv->gswip.gswip, GSWIP_VERSION, &version);
+ if (ret)
+ return ret;
+
+ ret = gswip_probe_common(&priv->gswip, version);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dev, &priv->gswip);
+
+ return 0;
+}
+
+static void gsw1xx_remove(struct mdio_device *mdiodev)
+{
+ struct gswip_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ gswip_disable_switch(priv);
+
+ dsa_unregister_switch(priv->ds);
+}
+
+static void gsw1xx_shutdown(struct mdio_device *mdiodev)
+{
+ struct gswip_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+
+ gswip_disable_switch(priv);
+}
+
+static const struct gswip_hw_info gsw12x_data = {
+ .max_ports = GSW1XX_PORTS,
+ .allowed_cpu_ports = BIT(GSW1XX_MII_PORT) | BIT(GSW1XX_SGMII_PORT),
+ .mii_ports = BIT(GSW1XX_MII_PORT),
+ .mii_port_reg_offset = -GSW1XX_MII_PORT,
+ .mac_select_pcs = gsw1xx_phylink_mac_select_pcs,
+ .phylink_get_caps = &gsw1xx_phylink_get_caps,
+ .supports_2500m = true,
+ .pce_microcode = &gsw1xx_pce_microcode,
+ .pce_microcode_size = ARRAY_SIZE(gsw1xx_pce_microcode),
+ .tag_protocol = DSA_TAG_PROTO_MXL_GSW1XX,
+};
+
+static const struct gswip_hw_info gsw140_data = {
+ .max_ports = GSW1XX_PORTS,
+ .allowed_cpu_ports = BIT(GSW1XX_MII_PORT) | BIT(GSW1XX_SGMII_PORT),
+ .mii_ports = BIT(GSW1XX_MII_PORT),
+ .mii_port_reg_offset = -GSW1XX_MII_PORT,
+ .mac_select_pcs = gsw1xx_phylink_mac_select_pcs,
+ .phylink_get_caps = &gsw1xx_phylink_get_caps,
+ .supports_2500m = true,
+ .pce_microcode = &gsw1xx_pce_microcode,
+ .pce_microcode_size = ARRAY_SIZE(gsw1xx_pce_microcode),
+ .tag_protocol = DSA_TAG_PROTO_MXL_GSW1XX,
+};
+
+static const struct gswip_hw_info gsw141_data = {
+ .max_ports = GSW1XX_PORTS,
+ .allowed_cpu_ports = BIT(GSW1XX_MII_PORT) | BIT(GSW1XX_SGMII_PORT),
+ .mii_ports = BIT(GSW1XX_MII_PORT),
+ .mii_port_reg_offset = -GSW1XX_MII_PORT,
+ .mac_select_pcs = gsw1xx_phylink_mac_select_pcs,
+ .phylink_get_caps = gsw1xx_phylink_get_caps,
+ .pce_microcode = &gsw1xx_pce_microcode,
+ .pce_microcode_size = ARRAY_SIZE(gsw1xx_pce_microcode),
+ .tag_protocol = DSA_TAG_PROTO_MXL_GSW1XX,
+};
+
+/*
+ * GSW125 is the industrial temperature version of GSW120.
+ * GSW145 is the industrial temperature version of GSW140.
+ */
+static const struct of_device_id gsw1xx_of_match[] = {
+ { .compatible = "maxlinear,gsw120", .data = &gsw12x_data },
+ { .compatible = "maxlinear,gsw125", .data = &gsw12x_data },
+ { .compatible = "maxlinear,gsw140", .data = &gsw140_data },
+ { .compatible = "maxlinear,gsw141", .data = &gsw141_data },
+ { .compatible = "maxlinear,gsw145", .data = &gsw140_data },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, gsw1xx_of_match);
+
+static struct mdio_driver gsw1xx_driver = {
+ .probe = gsw1xx_probe,
+ .remove = gsw1xx_remove,
+ .shutdown = gsw1xx_shutdown,
+ .mdiodrv.driver = {
+ .name = "mxl-gsw1xx",
+ .of_match_table = gsw1xx_of_match,
+ },
+};
+
+mdio_module_driver(gsw1xx_driver);
+
+MODULE_AUTHOR("Daniel Golle <daniel@makrotopia.org>");
+MODULE_DESCRIPTION("Driver for MaxLinear GSW1xx ethernet switch");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/lantiq/mxl-gsw1xx.h b/drivers/net/dsa/lantiq/mxl-gsw1xx.h
new file mode 100644
index 000000000000..38e03c048a26
--- /dev/null
+++ b/drivers/net/dsa/lantiq/mxl-gsw1xx.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Register definitions for MaxLinear GSW1xx series switches
+ *
+ * Copyright (C) 2025 Daniel Golle <daniel@makrotopia.org>
+ * Copyright (C) 2023 - 2024 MaxLinear Inc.
+ */
+#ifndef __MXL_GSW1XX_H
+#define __MXL_GSW1XX_H
+
+#include <linux/bitfield.h>
+
+#define GSW1XX_PORTS 6
+/* Port used for RGMII or optional RMII */
+#define GSW1XX_MII_PORT 5
+/* Port used for SGMII */
+#define GSW1XX_SGMII_PORT 4
+
+#define GSW1XX_SYS_CLK_FREQ 340000000
+
+/* SMDIO switch register base address */
+#define GSW1XX_SMDIO_BADR 0x1f
+#define GSW1XX_SMDIO_BADR_UNKNOWN -1
+
+/* GSW1XX SGMII PCS */
+#define GSW1XX_SGMII_BASE 0xd000
+#define GSW1XX_SGMII_PHY_HWBU_CTRL 0x009
+#define GSW1XX_SGMII_PHY_HWBU_CTRL_EN_HWBU_FSM BIT(0)
+#define GSW1XX_SGMII_PHY_HWBU_CTRL_HW_FSM_EN BIT(3)
+#define GSW1XX_SGMII_TBI_TXANEGH 0x300
+#define GSW1XX_SGMII_TBI_TXANEGL 0x301
+#define GSW1XX_SGMII_TBI_ANEGCTL 0x304
+#define GSW1XX_SGMII_TBI_ANEGCTL_LT GENMASK(1, 0)
+#define GSW1XX_SGMII_TBI_ANEGCTL_LT_10US 0
+#define GSW1XX_SGMII_TBI_ANEGCTL_LT_1_6MS 1
+#define GSW1XX_SGMII_TBI_ANEGCTL_LT_5MS 2
+#define GSW1XX_SGMII_TBI_ANEGCTL_LT_10MS 3
+#define GSW1XX_SGMII_TBI_ANEGCTL_ANEGEN BIT(2)
+#define GSW1XX_SGMII_TBI_ANEGCTL_RANEG BIT(3)
+#define GSW1XX_SGMII_TBI_ANEGCTL_OVRABL BIT(4)
+#define GSW1XX_SGMII_TBI_ANEGCTL_OVRANEG BIT(5)
+#define GSW1XX_SGMII_TBI_ANEGCTL_ANMODE GENMASK(7, 6)
+#define GSW1XX_SGMII_TBI_ANEGCTL_ANMODE_1000BASEX 1
+#define GSW1XX_SGMII_TBI_ANEGCTL_ANMODE_SGMII_PHY 2
+#define GSW1XX_SGMII_TBI_ANEGCTL_ANMODE_SGMII_MAC 3
+#define GSW1XX_SGMII_TBI_ANEGCTL_BCOMP BIT(15)
+
+#define GSW1XX_SGMII_TBI_TBICTL 0x305
+#define GSW1XX_SGMII_TBI_TBICTL_INITTBI BIT(0)
+#define GSW1XX_SGMII_TBI_TBICTL_ENTBI BIT(1)
+#define GSW1XX_SGMII_TBI_TBICTL_CRSTRR BIT(4)
+#define GSW1XX_SGMII_TBI_TBICTL_CRSOFF BIT(5)
+#define GSW1XX_SGMII_TBI_TBISTAT 0x309
+#define GSW1XX_SGMII_TBI_TBISTAT_LINK BIT(0)
+#define GSW1XX_SGMII_TBI_TBISTAT_AN_COMPLETE BIT(1)
+#define GSW1XX_SGMII_TBI_LPSTAT 0x30a
+#define GSW1XX_SGMII_TBI_LPSTAT_DUPLEX BIT(0)
+#define GSW1XX_SGMII_TBI_LPSTAT_PAUSE_RX BIT(1)
+#define GSW1XX_SGMII_TBI_LPSTAT_PAUSE_TX BIT(2)
+#define GSW1XX_SGMII_TBI_LPSTAT_SPEED GENMASK(6, 5)
+#define GSW1XX_SGMII_TBI_LPSTAT_SPEED_10 0
+#define GSW1XX_SGMII_TBI_LPSTAT_SPEED_100 1
+#define GSW1XX_SGMII_TBI_LPSTAT_SPEED_1000 2
+#define GSW1XX_SGMII_TBI_LPSTAT_SPEED_NOSGMII 3
+#define GSW1XX_SGMII_PHY_D 0x100
+#define GSW1XX_SGMII_PHY_A 0x101
+#define GSW1XX_SGMII_PHY_C 0x102
+#define GSW1XX_SGMII_PHY_STATUS BIT(0)
+#define GSW1XX_SGMII_PHY_READ BIT(4)
+#define GSW1XX_SGMII_PHY_WRITE BIT(8)
+#define GSW1XX_SGMII_PHY_RESET_N BIT(12)
+#define GSW1XX_SGMII_PCS_RXB_CTL 0x401
+#define GSW1XX_SGMII_PCS_RXB_CTL_INIT_RX_RXB BIT(1)
+#define GSW1XX_SGMII_PCS_TXB_CTL 0x404
+#define GSW1XX_SGMII_PCS_TXB_CTL_INIT_TX_TXB BIT(1)
+
+#define GSW1XX_SGMII_PHY_RX0_CFG2 0x004
+#define GSW1XX_SGMII_PHY_RX0_CFG2_EQ GENMASK(2, 0)
+#define GSW1XX_SGMII_PHY_RX0_CFG2_EQ_DEF 2
+#define GSW1XX_SGMII_PHY_RX0_CFG2_INVERT BIT(3)
+#define GSW1XX_SGMII_PHY_RX0_CFG2_LOS_EN BIT(4)
+#define GSW1XX_SGMII_PHY_RX0_CFG2_TERM_EN BIT(5)
+#define GSW1XX_SGMII_PHY_RX0_CFG2_FILT_CNT GENMASK(12, 6)
+#define GSW1XX_SGMII_PHY_RX0_CFG2_FILT_CNT_DEF 20
+
+#define GSW1XX_SGMII_PHY_TX0_CFG3 0x007
+#define GSW1XX_SGMII_PHY_TX0_CFG3_VBOOST_EN BIT(12)
+#define GSW1XX_SGMII_PHY_TX0_CFG3_VBOOST_LEVEL GENMASK(11, 9)
+#define GSW1XX_SGMII_PHY_TX0_CFG3_VBOOST_LEVEL_DEF 4
+#define GSW1XX_SGMII_PHY_TX0_CFG3_INVERT BIT(8)
+
+/* GSW1XX PDI Registers */
+#define GSW1XX_SWITCH_BASE 0xe000
+
+/* GSW1XX MII Registers */
+#define GSW1XX_RGMII_BASE 0xf100
+
+/* GSW1XX GPIO Registers */
+#define GSW1XX_GPIO_BASE 0xf300
+#define GPIO_ALTSEL0 0x83
+#define GPIO_ALTSEL0_EXTPHY_MUX_VAL 0x03c3
+#define GPIO_ALTSEL1 0x84
+#define GPIO_ALTSEL1_EXTPHY_MUX_VAL 0x003f
+
+/* MDIO bus controller */
+#define GSW1XX_MMDIO_BASE 0xf400
+
+/* generic IC registers */
+#define GSW1XX_SHELL_BASE 0xfa00
+#define GSW1XX_SHELL_RST_REQ 0x01
+#define GSW1XX_RST_REQ_SGMII_SHELL BIT(5)
+/* RGMII PAD Slew Control Register */
+#define GSW1XX_SHELL_RGMII_SLEW_CFG 0x78
+#define RGMII_SLEW_CFG_RX_2_5_V BIT(4)
+#define RGMII_SLEW_CFG_TX_2_5_V BIT(5)
+
+/* SGMII clock related settings */
+#define GSW1XX_CLK_BASE 0xf900
+#define GSW1XX_CLK_NCO_CTRL 0x68
+#define GSW1XX_SGMII_HSP_MASK GENMASK(3, 2)
+#define GSW1XX_SGMII_SEL BIT(1)
+#define GSW1XX_SGMII_1G 0x0
+#define GSW1XX_SGMII_2G5 0xc
+#define GSW1XX_SGMII_1G_NCO1 0x0
+#define GSW1XX_SGMII_2G5_NCO2 0x2
+
+#endif /* __MXL_GSW1XX_H */
diff --git a/drivers/net/dsa/lantiq/mxl-gsw1xx_pce.h b/drivers/net/dsa/lantiq/mxl-gsw1xx_pce.h
new file mode 100644
index 000000000000..eefcd411a340
--- /dev/null
+++ b/drivers/net/dsa/lantiq/mxl-gsw1xx_pce.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PCE microcode code update for driver for MaxLinear GSW1xx switch chips
+ *
+ * Copyright (C) 2023 - 2024 MaxLinear Inc.
+ * Copyright (C) 2022 Snap One, LLC. All rights reserved.
+ * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ * Copyright (C) 2010 Lantiq Deutschland
+ */
+
+#include "lantiq_gswip.h"
+
+#define INSTR 0
+#define IPV6 1
+#define LENACCU 2
+
+/* GSWIP_2.X */
+enum {
+ OUT_MAC0 = 0,
+ OUT_MAC1,
+ OUT_MAC2,
+ OUT_MAC3,
+ OUT_MAC4,
+ OUT_MAC5,
+ OUT_ETHTYP,
+ OUT_VTAG0,
+ OUT_VTAG1,
+ OUT_ITAG0,
+ OUT_ITAG1, /* 10 */
+ OUT_ITAG2,
+ OUT_ITAG3,
+ OUT_IP0,
+ OUT_IP1,
+ OUT_IP2,
+ OUT_IP3,
+ OUT_SIP0,
+ OUT_SIP1,
+ OUT_SIP2,
+ OUT_SIP3, /* 20 */
+ OUT_SIP4,
+ OUT_SIP5,
+ OUT_SIP6,
+ OUT_SIP7,
+ OUT_DIP0,
+ OUT_DIP1,
+ OUT_DIP2,
+ OUT_DIP3,
+ OUT_DIP4,
+ OUT_DIP5, /* 30 */
+ OUT_DIP6,
+ OUT_DIP7,
+ OUT_SESID,
+ OUT_PROT,
+ OUT_APP0,
+ OUT_APP1,
+ OUT_IGMP0,
+ OUT_IGMP1,
+ OUT_STAG0 = 61,
+ OUT_STAG1 = 62,
+ OUT_NONE = 63,
+};
+
+/* parser's microcode flag type */
+enum {
+ FLAG_ITAG = 0,
+ FLAG_VLAN,
+ FLAG_SNAP,
+ FLAG_PPPOE,
+ FLAG_IPV6,
+ FLAG_IPV6FL,
+ FLAG_IPV4,
+ FLAG_IGMP,
+ FLAG_TU,
+ FLAG_HOP,
+ FLAG_NN1, /* 10 */
+ FLAG_NN2,
+ FLAG_END,
+ FLAG_NO, /* 13 */
+ FLAG_SVLAN, /* 14 */
+};
+
+#define PCE_MC_M(val, msk, ns, out, len, type, flags, ipv4_len) \
+ { (val), (msk), ((ns) << 10 | (out) << 4 | (len) >> 1),\
+ ((len) & 1) << 15 | (type) << 13 | (flags) << 9 | (ipv4_len) << 8 }
+
+/* V22_2X (IPv6 issue fixed) */
+static const struct gswip_pce_microcode gsw1xx_pce_microcode[] = {
+ /* value mask ns fields L type flags ipv4_len */
+ PCE_MC_M(0x88c3, 0xFFFF, 1, OUT_ITAG0, 4, INSTR, FLAG_ITAG, 0),
+ PCE_MC_M(0x8100, 0xFFFF, 4, OUT_STAG0, 2, INSTR, FLAG_SVLAN, 0),
+ PCE_MC_M(0x88A8, 0xFFFF, 4, OUT_STAG0, 2, INSTR, FLAG_SVLAN, 0),
+ PCE_MC_M(0x9100, 0xFFFF, 4, OUT_STAG0, 2, INSTR, FLAG_SVLAN, 0),
+ PCE_MC_M(0x8100, 0xFFFF, 5, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
+ PCE_MC_M(0x88A8, 0xFFFF, 6, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
+ PCE_MC_M(0x9100, 0xFFFF, 4, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
+ PCE_MC_M(0x8864, 0xFFFF, 20, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0800, 0xFFFF, 24, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x86DD, 0xFFFF, 25, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x8863, 0xFFFF, 19, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0xF800, 13, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 44, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0600, 0x0600, 44, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 15, OUT_NONE, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0xAAAA, 0xFFFF, 17, OUT_NONE, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0300, 0xFF00, 45, OUT_NONE, 0, INSTR, FLAG_SNAP, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_DIP7, 3, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 21, OUT_DIP7, 3, INSTR, FLAG_PPPOE, 0),
+ PCE_MC_M(0x0021, 0xFFFF, 24, OUT_NONE, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0057, 0xFFFF, 25, OUT_NONE, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 44, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x4000, 0xF000, 27, OUT_IP0, 4, INSTR, FLAG_IPV4, 1),
+ PCE_MC_M(0x6000, 0xF000, 30, OUT_IP0, 3, INSTR, FLAG_IPV6, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 28, OUT_IP3, 2, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 29, OUT_SIP0, 4, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 44, OUT_NONE, 0, LENACCU, FLAG_NO, 0),
+ PCE_MC_M(0x1100, 0xFF00, 43, OUT_PROT, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0600, 0xFF00, 43, OUT_PROT, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0xFF00, 36, OUT_IP3, 17, INSTR, FLAG_HOP, 0),
+ PCE_MC_M(0x2B00, 0xFF00, 36, OUT_IP3, 17, INSTR, FLAG_NN1, 0),
+ PCE_MC_M(0x3C00, 0xFF00, 36, OUT_IP3, 17, INSTR, FLAG_NN2, 0),
+ PCE_MC_M(0x0000, 0x0000, 43, OUT_PROT, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x00F0, 38, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 44, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0xFF00, 36, OUT_NONE, 0, IPV6, FLAG_HOP, 0),
+ PCE_MC_M(0x2B00, 0xFF00, 36, OUT_NONE, 0, IPV6, FLAG_NN1, 0),
+ PCE_MC_M(0x3C00, 0xFF00, 36, OUT_NONE, 0, IPV6, FLAG_NN2, 0),
+ PCE_MC_M(0x0000, 0x00FC, 44, OUT_PROT, 0, IPV6, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 44, OUT_NONE, 0, IPV6, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 44, OUT_SIP0, 16, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_APP0, 4, INSTR, FLAG_IGMP, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+};
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
deleted file mode 100644
index 6eb3140d4044..000000000000
--- a/drivers/net/dsa/lantiq_gswip.c
+++ /dev/null
@@ -1,2270 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Lantiq / Intel GSWIP switch driver for VRX200, xRX300 and xRX330 SoCs
- *
- * Copyright (C) 2010 Lantiq Deutschland
- * Copyright (C) 2012 John Crispin <john@phrozen.org>
- * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de>
- *
- * The VLAN and bridge model the GSWIP hardware uses does not directly
- * matches the model DSA uses.
- *
- * The hardware has 64 possible table entries for bridges with one VLAN
- * ID, one flow id and a list of ports for each bridge. All entries which
- * match the same flow ID are combined in the mac learning table, they
- * act as one global bridge.
- * The hardware does not support VLAN filter on the port, but on the
- * bridge, this driver converts the DSA model to the hardware.
- *
- * The CPU gets all the exception frames which do not match any forwarding
- * rule and the CPU port is also added to all bridges. This makes it possible
- * to handle all the special cases easily in software.
- * At the initialization the driver allocates one bridge table entry for
- * each switch port which is used when the port is used without an
- * explicit bridge. This prevents the frames from being forwarded
- * between all LAN ports by default.
- */
-
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/etherdevice.h>
-#include <linux/firmware.h>
-#include <linux/if_bridge.h>
-#include <linux/if_vlan.h>
-#include <linux/iopoll.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/of_mdio.h>
-#include <linux/of_net.h>
-#include <linux/of_platform.h>
-#include <linux/phy.h>
-#include <linux/phylink.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include <linux/reset.h>
-#include <net/dsa.h>
-#include <dt-bindings/mips/lantiq_rcu_gphy.h>
-
-#include "lantiq_pce.h"
-
-/* GSWIP MDIO Registers */
-#define GSWIP_MDIO_GLOB 0x00
-#define GSWIP_MDIO_GLOB_ENABLE BIT(15)
-#define GSWIP_MDIO_CTRL 0x08
-#define GSWIP_MDIO_CTRL_BUSY BIT(12)
-#define GSWIP_MDIO_CTRL_RD BIT(11)
-#define GSWIP_MDIO_CTRL_WR BIT(10)
-#define GSWIP_MDIO_CTRL_PHYAD_MASK 0x1f
-#define GSWIP_MDIO_CTRL_PHYAD_SHIFT 5
-#define GSWIP_MDIO_CTRL_REGAD_MASK 0x1f
-#define GSWIP_MDIO_READ 0x09
-#define GSWIP_MDIO_WRITE 0x0A
-#define GSWIP_MDIO_MDC_CFG0 0x0B
-#define GSWIP_MDIO_MDC_CFG1 0x0C
-#define GSWIP_MDIO_PHYp(p) (0x15 - (p))
-#define GSWIP_MDIO_PHY_LINK_MASK 0x6000
-#define GSWIP_MDIO_PHY_LINK_AUTO 0x0000
-#define GSWIP_MDIO_PHY_LINK_DOWN 0x4000
-#define GSWIP_MDIO_PHY_LINK_UP 0x2000
-#define GSWIP_MDIO_PHY_SPEED_MASK 0x1800
-#define GSWIP_MDIO_PHY_SPEED_AUTO 0x1800
-#define GSWIP_MDIO_PHY_SPEED_M10 0x0000
-#define GSWIP_MDIO_PHY_SPEED_M100 0x0800
-#define GSWIP_MDIO_PHY_SPEED_G1 0x1000
-#define GSWIP_MDIO_PHY_FDUP_MASK 0x0600
-#define GSWIP_MDIO_PHY_FDUP_AUTO 0x0000
-#define GSWIP_MDIO_PHY_FDUP_EN 0x0200
-#define GSWIP_MDIO_PHY_FDUP_DIS 0x0600
-#define GSWIP_MDIO_PHY_FCONTX_MASK 0x0180
-#define GSWIP_MDIO_PHY_FCONTX_AUTO 0x0000
-#define GSWIP_MDIO_PHY_FCONTX_EN 0x0100
-#define GSWIP_MDIO_PHY_FCONTX_DIS 0x0180
-#define GSWIP_MDIO_PHY_FCONRX_MASK 0x0060
-#define GSWIP_MDIO_PHY_FCONRX_AUTO 0x0000
-#define GSWIP_MDIO_PHY_FCONRX_EN 0x0020
-#define GSWIP_MDIO_PHY_FCONRX_DIS 0x0060
-#define GSWIP_MDIO_PHY_ADDR_MASK 0x001f
-#define GSWIP_MDIO_PHY_MASK (GSWIP_MDIO_PHY_ADDR_MASK | \
- GSWIP_MDIO_PHY_FCONRX_MASK | \
- GSWIP_MDIO_PHY_FCONTX_MASK | \
- GSWIP_MDIO_PHY_LINK_MASK | \
- GSWIP_MDIO_PHY_SPEED_MASK | \
- GSWIP_MDIO_PHY_FDUP_MASK)
-
-/* GSWIP MII Registers */
-#define GSWIP_MII_CFGp(p) (0x2 * (p))
-#define GSWIP_MII_CFG_RESET BIT(15)
-#define GSWIP_MII_CFG_EN BIT(14)
-#define GSWIP_MII_CFG_ISOLATE BIT(13)
-#define GSWIP_MII_CFG_LDCLKDIS BIT(12)
-#define GSWIP_MII_CFG_RGMII_IBS BIT(8)
-#define GSWIP_MII_CFG_RMII_CLK BIT(7)
-#define GSWIP_MII_CFG_MODE_MIIP 0x0
-#define GSWIP_MII_CFG_MODE_MIIM 0x1
-#define GSWIP_MII_CFG_MODE_RMIIP 0x2
-#define GSWIP_MII_CFG_MODE_RMIIM 0x3
-#define GSWIP_MII_CFG_MODE_RGMII 0x4
-#define GSWIP_MII_CFG_MODE_GMII 0x9
-#define GSWIP_MII_CFG_MODE_MASK 0xf
-#define GSWIP_MII_CFG_RATE_M2P5 0x00
-#define GSWIP_MII_CFG_RATE_M25 0x10
-#define GSWIP_MII_CFG_RATE_M125 0x20
-#define GSWIP_MII_CFG_RATE_M50 0x30
-#define GSWIP_MII_CFG_RATE_AUTO 0x40
-#define GSWIP_MII_CFG_RATE_MASK 0x70
-#define GSWIP_MII_PCDU0 0x01
-#define GSWIP_MII_PCDU1 0x03
-#define GSWIP_MII_PCDU5 0x05
-#define GSWIP_MII_PCDU_TXDLY_MASK GENMASK(2, 0)
-#define GSWIP_MII_PCDU_RXDLY_MASK GENMASK(9, 7)
-
-/* GSWIP Core Registers */
-#define GSWIP_SWRES 0x000
-#define GSWIP_SWRES_R1 BIT(1) /* GSWIP Software reset */
-#define GSWIP_SWRES_R0 BIT(0) /* GSWIP Hardware reset */
-#define GSWIP_VERSION 0x013
-#define GSWIP_VERSION_REV_SHIFT 0
-#define GSWIP_VERSION_REV_MASK GENMASK(7, 0)
-#define GSWIP_VERSION_MOD_SHIFT 8
-#define GSWIP_VERSION_MOD_MASK GENMASK(15, 8)
-#define GSWIP_VERSION_2_0 0x100
-#define GSWIP_VERSION_2_1 0x021
-#define GSWIP_VERSION_2_2 0x122
-#define GSWIP_VERSION_2_2_ETC 0x022
-
-#define GSWIP_BM_RAM_VAL(x) (0x043 - (x))
-#define GSWIP_BM_RAM_ADDR 0x044
-#define GSWIP_BM_RAM_CTRL 0x045
-#define GSWIP_BM_RAM_CTRL_BAS BIT(15)
-#define GSWIP_BM_RAM_CTRL_OPMOD BIT(5)
-#define GSWIP_BM_RAM_CTRL_ADDR_MASK GENMASK(4, 0)
-#define GSWIP_BM_QUEUE_GCTRL 0x04A
-#define GSWIP_BM_QUEUE_GCTRL_GL_MOD BIT(10)
-/* buffer management Port Configuration Register */
-#define GSWIP_BM_PCFGp(p) (0x080 + ((p) * 2))
-#define GSWIP_BM_PCFG_CNTEN BIT(0) /* RMON Counter Enable */
-#define GSWIP_BM_PCFG_IGCNT BIT(1) /* Ingres Special Tag RMON count */
-/* buffer management Port Control Register */
-#define GSWIP_BM_RMON_CTRLp(p) (0x81 + ((p) * 2))
-#define GSWIP_BM_CTRL_RMON_RAM1_RES BIT(0) /* Software Reset for RMON RAM 1 */
-#define GSWIP_BM_CTRL_RMON_RAM2_RES BIT(1) /* Software Reset for RMON RAM 2 */
-
-/* PCE */
-#define GSWIP_PCE_TBL_KEY(x) (0x447 - (x))
-#define GSWIP_PCE_TBL_MASK 0x448
-#define GSWIP_PCE_TBL_VAL(x) (0x44D - (x))
-#define GSWIP_PCE_TBL_ADDR 0x44E
-#define GSWIP_PCE_TBL_CTRL 0x44F
-#define GSWIP_PCE_TBL_CTRL_BAS BIT(15)
-#define GSWIP_PCE_TBL_CTRL_TYPE BIT(13)
-#define GSWIP_PCE_TBL_CTRL_VLD BIT(12)
-#define GSWIP_PCE_TBL_CTRL_KEYFORM BIT(11)
-#define GSWIP_PCE_TBL_CTRL_GMAP_MASK GENMASK(10, 7)
-#define GSWIP_PCE_TBL_CTRL_OPMOD_MASK GENMASK(6, 5)
-#define GSWIP_PCE_TBL_CTRL_OPMOD_ADRD 0x00
-#define GSWIP_PCE_TBL_CTRL_OPMOD_ADWR 0x20
-#define GSWIP_PCE_TBL_CTRL_OPMOD_KSRD 0x40
-#define GSWIP_PCE_TBL_CTRL_OPMOD_KSWR 0x60
-#define GSWIP_PCE_TBL_CTRL_ADDR_MASK GENMASK(4, 0)
-#define GSWIP_PCE_PMAP1 0x453 /* Monitoring port map */
-#define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */
-#define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */
-#define GSWIP_PCE_GCTRL_0 0x456
-#define GSWIP_PCE_GCTRL_0_MTFL BIT(0) /* MAC Table Flushing */
-#define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3)
-#define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */
-#define GSWIP_PCE_GCTRL_1 0x457
-#define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */
-#define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */
-#define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA))
-#define GSWIP_PCE_PCTRL_0_TVM BIT(5) /* Transparent VLAN mode */
-#define GSWIP_PCE_PCTRL_0_VREP BIT(6) /* VLAN Replace Mode */
-#define GSWIP_PCE_PCTRL_0_INGRESS BIT(11) /* Accept special tag in ingress */
-#define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0
-#define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1
-#define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2
-#define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3
-#define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7
-#define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0)
-#define GSWIP_PCE_VCTRL(p) (0x485 + ((p) * 0xA))
-#define GSWIP_PCE_VCTRL_UVR BIT(0) /* Unknown VLAN Rule */
-#define GSWIP_PCE_VCTRL_VIMR BIT(3) /* VLAN Ingress Member violation rule */
-#define GSWIP_PCE_VCTRL_VEMR BIT(4) /* VLAN Egress Member violation rule */
-#define GSWIP_PCE_VCTRL_VSR BIT(5) /* VLAN Security */
-#define GSWIP_PCE_VCTRL_VID0 BIT(6) /* Priority Tagged Rule */
-#define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA))
-
-#define GSWIP_MAC_FLEN 0x8C5
-#define GSWIP_MAC_CTRL_0p(p) (0x903 + ((p) * 0xC))
-#define GSWIP_MAC_CTRL_0_PADEN BIT(8)
-#define GSWIP_MAC_CTRL_0_FCS_EN BIT(7)
-#define GSWIP_MAC_CTRL_0_FCON_MASK 0x0070
-#define GSWIP_MAC_CTRL_0_FCON_AUTO 0x0000
-#define GSWIP_MAC_CTRL_0_FCON_RX 0x0010
-#define GSWIP_MAC_CTRL_0_FCON_TX 0x0020
-#define GSWIP_MAC_CTRL_0_FCON_RXTX 0x0030
-#define GSWIP_MAC_CTRL_0_FCON_NONE 0x0040
-#define GSWIP_MAC_CTRL_0_FDUP_MASK 0x000C
-#define GSWIP_MAC_CTRL_0_FDUP_AUTO 0x0000
-#define GSWIP_MAC_CTRL_0_FDUP_EN 0x0004
-#define GSWIP_MAC_CTRL_0_FDUP_DIS 0x000C
-#define GSWIP_MAC_CTRL_0_GMII_MASK 0x0003
-#define GSWIP_MAC_CTRL_0_GMII_AUTO 0x0000
-#define GSWIP_MAC_CTRL_0_GMII_MII 0x0001
-#define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002
-#define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
-#define GSWIP_MAC_CTRL_2_LCHKL BIT(2) /* Frame Length Check Long Enable */
-#define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
-
-/* Ethernet Switch Fetch DMA Port Control Register */
-#define GSWIP_FDMA_PCTRLp(p) (0xA80 + ((p) * 0x6))
-#define GSWIP_FDMA_PCTRL_EN BIT(0) /* FDMA Port Enable */
-#define GSWIP_FDMA_PCTRL_STEN BIT(1) /* Special Tag Insertion Enable */
-#define GSWIP_FDMA_PCTRL_VLANMOD_MASK GENMASK(4, 3) /* VLAN Modification Control */
-#define GSWIP_FDMA_PCTRL_VLANMOD_SHIFT 3 /* VLAN Modification Control */
-#define GSWIP_FDMA_PCTRL_VLANMOD_DIS (0x0 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
-#define GSWIP_FDMA_PCTRL_VLANMOD_PRIO (0x1 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
-#define GSWIP_FDMA_PCTRL_VLANMOD_ID (0x2 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
-#define GSWIP_FDMA_PCTRL_VLANMOD_BOTH (0x3 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
-
-/* Ethernet Switch Store DMA Port Control Register */
-#define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
-#define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
-#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
-#define GSWIP_SDMA_PCTRL_PAUFWD BIT(3) /* Pause Frame Forwarding */
-
-#define GSWIP_TABLE_ACTIVE_VLAN 0x01
-#define GSWIP_TABLE_VLAN_MAPPING 0x02
-#define GSWIP_TABLE_MAC_BRIDGE 0x0b
-#define GSWIP_TABLE_MAC_BRIDGE_KEY3_FID GENMASK(5, 0) /* Filtering identifier */
-#define GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT GENMASK(7, 4) /* Port on learned entries */
-#define GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC BIT(0) /* Static, non-aging entry */
-
-#define XRX200_GPHY_FW_ALIGN (16 * 1024)
-
-/* Maximum packet size supported by the switch. In theory this should be 10240,
- * but long packets currently cause lock-ups with an MTU of over 2526. Medium
- * packets are sometimes dropped (e.g. TCP over 2477, UDP over 2516-2519, ICMP
- * over 2526), hence an MTU value of 2400 seems safe. This issue only affects
- * packet reception. This is probably caused by the PPA engine, which is on the
- * RX part of the device. Packet transmission works properly up to 10240.
- */
-#define GSWIP_MAX_PACKET_LENGTH 2400
-
-struct gswip_hw_info {
- int max_ports;
- int cpu_port;
- const struct dsa_switch_ops *ops;
-};
-
-struct xway_gphy_match_data {
- char *fe_firmware_name;
- char *ge_firmware_name;
-};
-
-struct gswip_gphy_fw {
- struct clk *clk_gate;
- struct reset_control *reset;
- u32 fw_addr_offset;
- char *fw_name;
-};
-
-struct gswip_vlan {
- struct net_device *bridge;
- u16 vid;
- u8 fid;
-};
-
-struct gswip_priv {
- __iomem void *gswip;
- __iomem void *mdio;
- __iomem void *mii;
- const struct gswip_hw_info *hw_info;
- const struct xway_gphy_match_data *gphy_fw_name_cfg;
- struct dsa_switch *ds;
- struct device *dev;
- struct regmap *rcu_regmap;
- struct gswip_vlan vlans[64];
- int num_gphy_fw;
- struct gswip_gphy_fw *gphy_fw;
- u32 port_vlan_filter;
- struct mutex pce_table_lock;
-};
-
-struct gswip_pce_table_entry {
- u16 index; // PCE_TBL_ADDR.ADDR = pData->table_index
- u16 table; // PCE_TBL_CTRL.ADDR = pData->table
- u16 key[8];
- u16 val[5];
- u16 mask;
- u8 gmap;
- bool type;
- bool valid;
- bool key_mode;
-};
-
-struct gswip_rmon_cnt_desc {
- unsigned int size;
- unsigned int offset;
- const char *name;
-};
-
-#define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name}
-
-static const struct gswip_rmon_cnt_desc gswip_rmon_cnt[] = {
- /** Receive Packet Count (only packets that are accepted and not discarded). */
- MIB_DESC(1, 0x1F, "RxGoodPkts"),
- MIB_DESC(1, 0x23, "RxUnicastPkts"),
- MIB_DESC(1, 0x22, "RxMulticastPkts"),
- MIB_DESC(1, 0x21, "RxFCSErrorPkts"),
- MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"),
- MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"),
- MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"),
- MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"),
- MIB_DESC(1, 0x20, "RxGoodPausePkts"),
- MIB_DESC(1, 0x1A, "RxAlignErrorPkts"),
- MIB_DESC(1, 0x12, "Rx64BytePkts"),
- MIB_DESC(1, 0x13, "Rx127BytePkts"),
- MIB_DESC(1, 0x14, "Rx255BytePkts"),
- MIB_DESC(1, 0x15, "Rx511BytePkts"),
- MIB_DESC(1, 0x16, "Rx1023BytePkts"),
- /** Receive Size 1024-1522 (or more, if configured) Packet Count. */
- MIB_DESC(1, 0x17, "RxMaxBytePkts"),
- MIB_DESC(1, 0x18, "RxDroppedPkts"),
- MIB_DESC(1, 0x19, "RxFilteredPkts"),
- MIB_DESC(2, 0x24, "RxGoodBytes"),
- MIB_DESC(2, 0x26, "RxBadBytes"),
- MIB_DESC(1, 0x11, "TxAcmDroppedPkts"),
- MIB_DESC(1, 0x0C, "TxGoodPkts"),
- MIB_DESC(1, 0x06, "TxUnicastPkts"),
- MIB_DESC(1, 0x07, "TxMulticastPkts"),
- MIB_DESC(1, 0x00, "Tx64BytePkts"),
- MIB_DESC(1, 0x01, "Tx127BytePkts"),
- MIB_DESC(1, 0x02, "Tx255BytePkts"),
- MIB_DESC(1, 0x03, "Tx511BytePkts"),
- MIB_DESC(1, 0x04, "Tx1023BytePkts"),
- /** Transmit Size 1024-1522 (or more, if configured) Packet Count. */
- MIB_DESC(1, 0x05, "TxMaxBytePkts"),
- MIB_DESC(1, 0x08, "TxSingleCollCount"),
- MIB_DESC(1, 0x09, "TxMultCollCount"),
- MIB_DESC(1, 0x0A, "TxLateCollCount"),
- MIB_DESC(1, 0x0B, "TxExcessCollCount"),
- MIB_DESC(1, 0x0D, "TxPauseCount"),
- MIB_DESC(1, 0x10, "TxDroppedPkts"),
- MIB_DESC(2, 0x0E, "TxGoodBytes"),
-};
-
-static u32 gswip_switch_r(struct gswip_priv *priv, u32 offset)
-{
- return __raw_readl(priv->gswip + (offset * 4));
-}
-
-static void gswip_switch_w(struct gswip_priv *priv, u32 val, u32 offset)
-{
- __raw_writel(val, priv->gswip + (offset * 4));
-}
-
-static void gswip_switch_mask(struct gswip_priv *priv, u32 clear, u32 set,
- u32 offset)
-{
- u32 val = gswip_switch_r(priv, offset);
-
- val &= ~(clear);
- val |= set;
- gswip_switch_w(priv, val, offset);
-}
-
-static u32 gswip_switch_r_timeout(struct gswip_priv *priv, u32 offset,
- u32 cleared)
-{
- u32 val;
-
- return readx_poll_timeout(__raw_readl, priv->gswip + (offset * 4), val,
- (val & cleared) == 0, 20, 50000);
-}
-
-static u32 gswip_mdio_r(struct gswip_priv *priv, u32 offset)
-{
- return __raw_readl(priv->mdio + (offset * 4));
-}
-
-static void gswip_mdio_w(struct gswip_priv *priv, u32 val, u32 offset)
-{
- __raw_writel(val, priv->mdio + (offset * 4));
-}
-
-static void gswip_mdio_mask(struct gswip_priv *priv, u32 clear, u32 set,
- u32 offset)
-{
- u32 val = gswip_mdio_r(priv, offset);
-
- val &= ~(clear);
- val |= set;
- gswip_mdio_w(priv, val, offset);
-}
-
-static u32 gswip_mii_r(struct gswip_priv *priv, u32 offset)
-{
- return __raw_readl(priv->mii + (offset * 4));
-}
-
-static void gswip_mii_w(struct gswip_priv *priv, u32 val, u32 offset)
-{
- __raw_writel(val, priv->mii + (offset * 4));
-}
-
-static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set,
- u32 offset)
-{
- u32 val = gswip_mii_r(priv, offset);
-
- val &= ~(clear);
- val |= set;
- gswip_mii_w(priv, val, offset);
-}
-
-static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set,
- int port)
-{
- /* There's no MII_CFG register for the CPU port */
- if (!dsa_is_cpu_port(priv->ds, port))
- gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(port));
-}
-
-static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set,
- int port)
-{
- switch (port) {
- case 0:
- gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU0);
- break;
- case 1:
- gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU1);
- break;
- case 5:
- gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU5);
- break;
- }
-}
-
-static int gswip_mdio_poll(struct gswip_priv *priv)
-{
- int cnt = 100;
-
- while (likely(cnt--)) {
- u32 ctrl = gswip_mdio_r(priv, GSWIP_MDIO_CTRL);
-
- if ((ctrl & GSWIP_MDIO_CTRL_BUSY) == 0)
- return 0;
- usleep_range(20, 40);
- }
-
- return -ETIMEDOUT;
-}
-
-static int gswip_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val)
-{
- struct gswip_priv *priv = bus->priv;
- int err;
-
- err = gswip_mdio_poll(priv);
- if (err) {
- dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
- return err;
- }
-
- gswip_mdio_w(priv, val, GSWIP_MDIO_WRITE);
- gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_WR |
- ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
- (reg & GSWIP_MDIO_CTRL_REGAD_MASK),
- GSWIP_MDIO_CTRL);
-
- return 0;
-}
-
-static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg)
-{
- struct gswip_priv *priv = bus->priv;
- int err;
-
- err = gswip_mdio_poll(priv);
- if (err) {
- dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
- return err;
- }
-
- gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_RD |
- ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
- (reg & GSWIP_MDIO_CTRL_REGAD_MASK),
- GSWIP_MDIO_CTRL);
-
- err = gswip_mdio_poll(priv);
- if (err) {
- dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
- return err;
- }
-
- return gswip_mdio_r(priv, GSWIP_MDIO_READ);
-}
-
-static int gswip_mdio(struct gswip_priv *priv)
-{
- struct device_node *mdio_np, *switch_np = priv->dev->of_node;
- struct device *dev = priv->dev;
- struct mii_bus *bus;
- int err = 0;
-
- mdio_np = of_get_compatible_child(switch_np, "lantiq,xrx200-mdio");
- if (!of_device_is_available(mdio_np))
- goto out_put_node;
-
- bus = devm_mdiobus_alloc(dev);
- if (!bus) {
- err = -ENOMEM;
- goto out_put_node;
- }
-
- bus->priv = priv;
- bus->read = gswip_mdio_rd;
- bus->write = gswip_mdio_wr;
- bus->name = "lantiq,xrx200-mdio";
- snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev));
- bus->parent = priv->dev;
-
- err = devm_of_mdiobus_register(dev, bus, mdio_np);
-
-out_put_node:
- of_node_put(mdio_np);
-
- return err;
-}
-
-static int gswip_pce_table_entry_read(struct gswip_priv *priv,
- struct gswip_pce_table_entry *tbl)
-{
- int i;
- int err;
- u16 crtl;
- u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD :
- GSWIP_PCE_TBL_CTRL_OPMOD_ADRD;
-
- mutex_lock(&priv->pce_table_lock);
-
- err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
- GSWIP_PCE_TBL_CTRL_BAS);
- if (err) {
- mutex_unlock(&priv->pce_table_lock);
- return err;
- }
-
- gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
- gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
- GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
- tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS,
- GSWIP_PCE_TBL_CTRL);
-
- err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
- GSWIP_PCE_TBL_CTRL_BAS);
- if (err) {
- mutex_unlock(&priv->pce_table_lock);
- return err;
- }
-
- for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
- tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i));
-
- for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
- tbl->val[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_VAL(i));
-
- tbl->mask = gswip_switch_r(priv, GSWIP_PCE_TBL_MASK);
-
- crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
-
- tbl->type = !!(crtl & GSWIP_PCE_TBL_CTRL_TYPE);
- tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD);
- tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7;
-
- mutex_unlock(&priv->pce_table_lock);
-
- return 0;
-}
-
-static int gswip_pce_table_entry_write(struct gswip_priv *priv,
- struct gswip_pce_table_entry *tbl)
-{
- int i;
- int err;
- u16 crtl;
- u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR :
- GSWIP_PCE_TBL_CTRL_OPMOD_ADWR;
-
- mutex_lock(&priv->pce_table_lock);
-
- err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
- GSWIP_PCE_TBL_CTRL_BAS);
- if (err) {
- mutex_unlock(&priv->pce_table_lock);
- return err;
- }
-
- gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
- gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
- GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
- tbl->table | addr_mode,
- GSWIP_PCE_TBL_CTRL);
-
- for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
- gswip_switch_w(priv, tbl->key[i], GSWIP_PCE_TBL_KEY(i));
-
- for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
- gswip_switch_w(priv, tbl->val[i], GSWIP_PCE_TBL_VAL(i));
-
- gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
- GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
- tbl->table | addr_mode,
- GSWIP_PCE_TBL_CTRL);
-
- gswip_switch_w(priv, tbl->mask, GSWIP_PCE_TBL_MASK);
-
- crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
- crtl &= ~(GSWIP_PCE_TBL_CTRL_TYPE | GSWIP_PCE_TBL_CTRL_VLD |
- GSWIP_PCE_TBL_CTRL_GMAP_MASK);
- if (tbl->type)
- crtl |= GSWIP_PCE_TBL_CTRL_TYPE;
- if (tbl->valid)
- crtl |= GSWIP_PCE_TBL_CTRL_VLD;
- crtl |= (tbl->gmap << 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK;
- crtl |= GSWIP_PCE_TBL_CTRL_BAS;
- gswip_switch_w(priv, crtl, GSWIP_PCE_TBL_CTRL);
-
- err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
- GSWIP_PCE_TBL_CTRL_BAS);
-
- mutex_unlock(&priv->pce_table_lock);
-
- return err;
-}
-
-/* Add the LAN port into a bridge with the CPU port by
- * default. This prevents automatic forwarding of
- * packages between the LAN ports when no explicit
- * bridge is configured.
- */
-static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
-{
- struct gswip_pce_table_entry vlan_active = {0,};
- struct gswip_pce_table_entry vlan_mapping = {0,};
- unsigned int cpu_port = priv->hw_info->cpu_port;
- int err;
-
- vlan_active.index = port + 1;
- vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
- vlan_active.key[0] = 0; /* vid */
- vlan_active.val[0] = port + 1 /* fid */;
- vlan_active.valid = add;
- err = gswip_pce_table_entry_write(priv, &vlan_active);
- if (err) {
- dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
- return err;
- }
-
- if (!add)
- return 0;
-
- vlan_mapping.index = port + 1;
- vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
- vlan_mapping.val[0] = 0 /* vid */;
- vlan_mapping.val[1] = BIT(port) | BIT(cpu_port);
- vlan_mapping.val[2] = 0;
- err = gswip_pce_table_entry_write(priv, &vlan_mapping);
- if (err) {
- dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
- return err;
- }
-
- return 0;
-}
-
-static int gswip_port_enable(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
-{
- struct gswip_priv *priv = ds->priv;
- int err;
-
- if (!dsa_is_cpu_port(ds, port)) {
- u32 mdio_phy = 0;
-
- err = gswip_add_single_port_br(priv, port, true);
- if (err)
- return err;
-
- if (phydev)
- mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
-
- gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy,
- GSWIP_MDIO_PHYp(port));
- }
-
- /* RMON Counter Enable for port */
- gswip_switch_w(priv, GSWIP_BM_PCFG_CNTEN, GSWIP_BM_PCFGp(port));
-
- /* enable port fetch/store dma & VLAN Modification */
- gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_EN |
- GSWIP_FDMA_PCTRL_VLANMOD_BOTH,
- GSWIP_FDMA_PCTRLp(port));
- gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
- GSWIP_SDMA_PCTRLp(port));
-
- return 0;
-}
-
-static void gswip_port_disable(struct dsa_switch *ds, int port)
-{
- struct gswip_priv *priv = ds->priv;
-
- gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0,
- GSWIP_FDMA_PCTRLp(port));
- gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
- GSWIP_SDMA_PCTRLp(port));
-}
-
-static int gswip_pce_load_microcode(struct gswip_priv *priv)
-{
- int i;
- int err;
-
- gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
- GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
- GSWIP_PCE_TBL_CTRL_OPMOD_ADWR, GSWIP_PCE_TBL_CTRL);
- gswip_switch_w(priv, 0, GSWIP_PCE_TBL_MASK);
-
- for (i = 0; i < ARRAY_SIZE(gswip_pce_microcode); i++) {
- gswip_switch_w(priv, i, GSWIP_PCE_TBL_ADDR);
- gswip_switch_w(priv, gswip_pce_microcode[i].val_0,
- GSWIP_PCE_TBL_VAL(0));
- gswip_switch_w(priv, gswip_pce_microcode[i].val_1,
- GSWIP_PCE_TBL_VAL(1));
- gswip_switch_w(priv, gswip_pce_microcode[i].val_2,
- GSWIP_PCE_TBL_VAL(2));
- gswip_switch_w(priv, gswip_pce_microcode[i].val_3,
- GSWIP_PCE_TBL_VAL(3));
-
- /* start the table access: */
- gswip_switch_mask(priv, 0, GSWIP_PCE_TBL_CTRL_BAS,
- GSWIP_PCE_TBL_CTRL);
- err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
- GSWIP_PCE_TBL_CTRL_BAS);
- if (err)
- return err;
- }
-
- /* tell the switch that the microcode is loaded */
- gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MC_VALID,
- GSWIP_PCE_GCTRL_0);
-
- return 0;
-}
-
-static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering,
- struct netlink_ext_ack *extack)
-{
- struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
- struct gswip_priv *priv = ds->priv;
-
- /* Do not allow changing the VLAN filtering options while in bridge */
- if (bridge && !!(priv->port_vlan_filter & BIT(port)) != vlan_filtering) {
- NL_SET_ERR_MSG_MOD(extack,
- "Dynamic toggling of vlan_filtering not supported");
- return -EIO;
- }
-
- if (vlan_filtering) {
- /* Use tag based VLAN */
- gswip_switch_mask(priv,
- GSWIP_PCE_VCTRL_VSR,
- GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
- GSWIP_PCE_VCTRL_VEMR,
- GSWIP_PCE_VCTRL(port));
- gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_TVM, 0,
- GSWIP_PCE_PCTRL_0p(port));
- } else {
- /* Use port based VLAN */
- gswip_switch_mask(priv,
- GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
- GSWIP_PCE_VCTRL_VEMR,
- GSWIP_PCE_VCTRL_VSR,
- GSWIP_PCE_VCTRL(port));
- gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_TVM,
- GSWIP_PCE_PCTRL_0p(port));
- }
-
- return 0;
-}
-
-static int gswip_setup(struct dsa_switch *ds)
-{
- struct gswip_priv *priv = ds->priv;
- unsigned int cpu_port = priv->hw_info->cpu_port;
- int i;
- int err;
-
- gswip_switch_w(priv, GSWIP_SWRES_R0, GSWIP_SWRES);
- usleep_range(5000, 10000);
- gswip_switch_w(priv, 0, GSWIP_SWRES);
-
- /* disable port fetch/store dma on all ports */
- for (i = 0; i < priv->hw_info->max_ports; i++) {
- gswip_port_disable(ds, i);
- gswip_port_vlan_filtering(ds, i, false, NULL);
- }
-
- /* enable Switch */
- gswip_mdio_mask(priv, 0, GSWIP_MDIO_GLOB_ENABLE, GSWIP_MDIO_GLOB);
-
- err = gswip_pce_load_microcode(priv);
- if (err) {
- dev_err(priv->dev, "writing PCE microcode failed, %i\n", err);
- return err;
- }
-
- /* Default unknown Broadcast/Multicast/Unicast port maps */
- gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP1);
- gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2);
- gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3);
-
- /* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an
- * interoperability problem with this auto polling mechanism because
- * their status registers think that the link is in a different state
- * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set
- * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the
- * auto polling state machine consider the link being negotiated with
- * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads
- * to the switch port being completely dead (RX and TX are both not
- * working).
- * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F
- * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes
- * it would work fine for a few minutes to hours and then stop, on
- * other device it would no traffic could be sent or received at all.
- * Testing shows that when PHY auto polling is disabled these problems
- * go away.
- */
- gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0);
-
- /* Configure the MDIO Clock 2.5 MHz */
- gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
-
- /* Disable the xMII interface and clear it's isolation bit */
- for (i = 0; i < priv->hw_info->max_ports; i++)
- gswip_mii_mask_cfg(priv,
- GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE,
- 0, i);
-
- /* enable special tag insertion on cpu port */
- gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
- GSWIP_FDMA_PCTRLp(cpu_port));
-
- /* accept special tag in ingress direction */
- gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
- GSWIP_PCE_PCTRL_0p(cpu_port));
-
- gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD,
- GSWIP_BM_QUEUE_GCTRL);
-
- /* VLAN aware Switching */
- gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_VLAN, GSWIP_PCE_GCTRL_0);
-
- /* Flush MAC Table */
- gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MTFL, GSWIP_PCE_GCTRL_0);
-
- err = gswip_switch_r_timeout(priv, GSWIP_PCE_GCTRL_0,
- GSWIP_PCE_GCTRL_0_MTFL);
- if (err) {
- dev_err(priv->dev, "MAC flushing didn't finish\n");
- return err;
- }
-
- ds->mtu_enforcement_ingress = true;
-
- ds->configure_vlan_while_not_filtering = false;
-
- return 0;
-}
-
-static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds,
- int port,
- enum dsa_tag_protocol mp)
-{
- return DSA_TAG_PROTO_GSWIP;
-}
-
-static int gswip_vlan_active_create(struct gswip_priv *priv,
- struct net_device *bridge,
- int fid, u16 vid)
-{
- struct gswip_pce_table_entry vlan_active = {0,};
- unsigned int max_ports = priv->hw_info->max_ports;
- int idx = -1;
- int err;
- int i;
-
- /* Look for a free slot */
- for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
- if (!priv->vlans[i].bridge) {
- idx = i;
- break;
- }
- }
-
- if (idx == -1)
- return -ENOSPC;
-
- if (fid == -1)
- fid = idx;
-
- vlan_active.index = idx;
- vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
- vlan_active.key[0] = vid;
- vlan_active.val[0] = fid;
- vlan_active.valid = true;
-
- err = gswip_pce_table_entry_write(priv, &vlan_active);
- if (err) {
- dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
- return err;
- }
-
- priv->vlans[idx].bridge = bridge;
- priv->vlans[idx].vid = vid;
- priv->vlans[idx].fid = fid;
-
- return idx;
-}
-
-static int gswip_vlan_active_remove(struct gswip_priv *priv, int idx)
-{
- struct gswip_pce_table_entry vlan_active = {0,};
- int err;
-
- vlan_active.index = idx;
- vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
- vlan_active.valid = false;
- err = gswip_pce_table_entry_write(priv, &vlan_active);
- if (err)
- dev_err(priv->dev, "failed to delete active VLAN: %d\n", err);
- priv->vlans[idx].bridge = NULL;
-
- return err;
-}
-
-static int gswip_vlan_add_unaware(struct gswip_priv *priv,
- struct net_device *bridge, int port)
-{
- struct gswip_pce_table_entry vlan_mapping = {0,};
- unsigned int max_ports = priv->hw_info->max_ports;
- unsigned int cpu_port = priv->hw_info->cpu_port;
- bool active_vlan_created = false;
- int idx = -1;
- int i;
- int err;
-
- /* Check if there is already a page for this bridge */
- for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
- if (priv->vlans[i].bridge == bridge) {
- idx = i;
- break;
- }
- }
-
- /* If this bridge is not programmed yet, add a Active VLAN table
- * entry in a free slot and prepare the VLAN mapping table entry.
- */
- if (idx == -1) {
- idx = gswip_vlan_active_create(priv, bridge, -1, 0);
- if (idx < 0)
- return idx;
- active_vlan_created = true;
-
- vlan_mapping.index = idx;
- vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
- /* VLAN ID byte, maps to the VLAN ID of vlan active table */
- vlan_mapping.val[0] = 0;
- } else {
- /* Read the existing VLAN mapping entry from the switch */
- vlan_mapping.index = idx;
- vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
- err = gswip_pce_table_entry_read(priv, &vlan_mapping);
- if (err) {
- dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
- err);
- return err;
- }
- }
-
- /* Update the VLAN mapping entry and write it to the switch */
- vlan_mapping.val[1] |= BIT(cpu_port);
- vlan_mapping.val[1] |= BIT(port);
- err = gswip_pce_table_entry_write(priv, &vlan_mapping);
- if (err) {
- dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
- /* In case an Active VLAN was creaetd delete it again */
- if (active_vlan_created)
- gswip_vlan_active_remove(priv, idx);
- return err;
- }
-
- gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
- return 0;
-}
-
-static int gswip_vlan_add_aware(struct gswip_priv *priv,
- struct net_device *bridge, int port,
- u16 vid, bool untagged,
- bool pvid)
-{
- struct gswip_pce_table_entry vlan_mapping = {0,};
- unsigned int max_ports = priv->hw_info->max_ports;
- unsigned int cpu_port = priv->hw_info->cpu_port;
- bool active_vlan_created = false;
- int idx = -1;
- int fid = -1;
- int i;
- int err;
-
- /* Check if there is already a page for this bridge */
- for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
- if (priv->vlans[i].bridge == bridge) {
- if (fid != -1 && fid != priv->vlans[i].fid)
- dev_err(priv->dev, "one bridge with multiple flow ids\n");
- fid = priv->vlans[i].fid;
- if (priv->vlans[i].vid == vid) {
- idx = i;
- break;
- }
- }
- }
-
- /* If this bridge is not programmed yet, add a Active VLAN table
- * entry in a free slot and prepare the VLAN mapping table entry.
- */
- if (idx == -1) {
- idx = gswip_vlan_active_create(priv, bridge, fid, vid);
- if (idx < 0)
- return idx;
- active_vlan_created = true;
-
- vlan_mapping.index = idx;
- vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
- /* VLAN ID byte, maps to the VLAN ID of vlan active table */
- vlan_mapping.val[0] = vid;
- } else {
- /* Read the existing VLAN mapping entry from the switch */
- vlan_mapping.index = idx;
- vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
- err = gswip_pce_table_entry_read(priv, &vlan_mapping);
- if (err) {
- dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
- err);
- return err;
- }
- }
-
- vlan_mapping.val[0] = vid;
- /* Update the VLAN mapping entry and write it to the switch */
- vlan_mapping.val[1] |= BIT(cpu_port);
- vlan_mapping.val[2] |= BIT(cpu_port);
- vlan_mapping.val[1] |= BIT(port);
- if (untagged)
- vlan_mapping.val[2] &= ~BIT(port);
- else
- vlan_mapping.val[2] |= BIT(port);
- err = gswip_pce_table_entry_write(priv, &vlan_mapping);
- if (err) {
- dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
- /* In case an Active VLAN was creaetd delete it again */
- if (active_vlan_created)
- gswip_vlan_active_remove(priv, idx);
- return err;
- }
-
- if (pvid)
- gswip_switch_w(priv, idx, GSWIP_PCE_DEFPVID(port));
-
- return 0;
-}
-
-static int gswip_vlan_remove(struct gswip_priv *priv,
- struct net_device *bridge, int port,
- u16 vid, bool pvid, bool vlan_aware)
-{
- struct gswip_pce_table_entry vlan_mapping = {0,};
- unsigned int max_ports = priv->hw_info->max_ports;
- unsigned int cpu_port = priv->hw_info->cpu_port;
- int idx = -1;
- int i;
- int err;
-
- /* Check if there is already a page for this bridge */
- for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
- if (priv->vlans[i].bridge == bridge &&
- (!vlan_aware || priv->vlans[i].vid == vid)) {
- idx = i;
- break;
- }
- }
-
- if (idx == -1) {
- dev_err(priv->dev, "bridge to leave does not exists\n");
- return -ENOENT;
- }
-
- vlan_mapping.index = idx;
- vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
- err = gswip_pce_table_entry_read(priv, &vlan_mapping);
- if (err) {
- dev_err(priv->dev, "failed to read VLAN mapping: %d\n", err);
- return err;
- }
-
- vlan_mapping.val[1] &= ~BIT(port);
- vlan_mapping.val[2] &= ~BIT(port);
- err = gswip_pce_table_entry_write(priv, &vlan_mapping);
- if (err) {
- dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
- return err;
- }
-
- /* In case all ports are removed from the bridge, remove the VLAN */
- if ((vlan_mapping.val[1] & ~BIT(cpu_port)) == 0) {
- err = gswip_vlan_active_remove(priv, idx);
- if (err) {
- dev_err(priv->dev, "failed to write active VLAN: %d\n",
- err);
- return err;
- }
- }
-
- /* GSWIP 2.2 (GRX300) and later program here the VID directly. */
- if (pvid)
- gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
-
- return 0;
-}
-
-static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge,
- bool *tx_fwd_offload,
- struct netlink_ext_ack *extack)
-{
- struct net_device *br = bridge.dev;
- struct gswip_priv *priv = ds->priv;
- int err;
-
- /* When the bridge uses VLAN filtering we have to configure VLAN
- * specific bridges. No bridge is configured here.
- */
- if (!br_vlan_enabled(br)) {
- err = gswip_vlan_add_unaware(priv, br, port);
- if (err)
- return err;
- priv->port_vlan_filter &= ~BIT(port);
- } else {
- priv->port_vlan_filter |= BIT(port);
- }
- return gswip_add_single_port_br(priv, port, false);
-}
-
-static void gswip_port_bridge_leave(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge)
-{
- struct net_device *br = bridge.dev;
- struct gswip_priv *priv = ds->priv;
-
- gswip_add_single_port_br(priv, port, true);
-
- /* When the bridge uses VLAN filtering we have to configure VLAN
- * specific bridges. No bridge is configured here.
- */
- if (!br_vlan_enabled(br))
- gswip_vlan_remove(priv, br, port, 0, true, false);
-}
-
-static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct netlink_ext_ack *extack)
-{
- struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
- struct gswip_priv *priv = ds->priv;
- unsigned int max_ports = priv->hw_info->max_ports;
- int pos = max_ports;
- int i, idx = -1;
-
- /* We only support VLAN filtering on bridges */
- if (!dsa_is_cpu_port(ds, port) && !bridge)
- return -EOPNOTSUPP;
-
- /* Check if there is already a page for this VLAN */
- for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
- if (priv->vlans[i].bridge == bridge &&
- priv->vlans[i].vid == vlan->vid) {
- idx = i;
- break;
- }
- }
-
- /* If this VLAN is not programmed yet, we have to reserve
- * one entry in the VLAN table. Make sure we start at the
- * next position round.
- */
- if (idx == -1) {
- /* Look for a free slot */
- for (; pos < ARRAY_SIZE(priv->vlans); pos++) {
- if (!priv->vlans[pos].bridge) {
- idx = pos;
- pos++;
- break;
- }
- }
-
- if (idx == -1) {
- NL_SET_ERR_MSG_MOD(extack, "No slot in VLAN table");
- return -ENOSPC;
- }
- }
-
- return 0;
-}
-
-static int gswip_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct netlink_ext_ack *extack)
-{
- struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
- struct gswip_priv *priv = ds->priv;
- bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
- bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- int err;
-
- err = gswip_port_vlan_prepare(ds, port, vlan, extack);
- if (err)
- return err;
-
- /* We have to receive all packets on the CPU port and should not
- * do any VLAN filtering here. This is also called with bridge
- * NULL and then we do not know for which bridge to configure
- * this.
- */
- if (dsa_is_cpu_port(ds, port))
- return 0;
-
- return gswip_vlan_add_aware(priv, bridge, port, vlan->vid,
- untagged, pvid);
-}
-
-static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
- struct gswip_priv *priv = ds->priv;
- bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
-
- /* We have to receive all packets on the CPU port and should not
- * do any VLAN filtering here. This is also called with bridge
- * NULL and then we do not know for which bridge to configure
- * this.
- */
- if (dsa_is_cpu_port(ds, port))
- return 0;
-
- return gswip_vlan_remove(priv, bridge, port, vlan->vid, pvid, true);
-}
-
-static void gswip_port_fast_age(struct dsa_switch *ds, int port)
-{
- struct gswip_priv *priv = ds->priv;
- struct gswip_pce_table_entry mac_bridge = {0,};
- int i;
- int err;
-
- for (i = 0; i < 2048; i++) {
- mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
- mac_bridge.index = i;
-
- err = gswip_pce_table_entry_read(priv, &mac_bridge);
- if (err) {
- dev_err(priv->dev, "failed to read mac bridge: %d\n",
- err);
- return;
- }
-
- if (!mac_bridge.valid)
- continue;
-
- if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC)
- continue;
-
- if (port != FIELD_GET(GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT,
- mac_bridge.val[0]))
- continue;
-
- mac_bridge.valid = false;
- err = gswip_pce_table_entry_write(priv, &mac_bridge);
- if (err) {
- dev_err(priv->dev, "failed to write mac bridge: %d\n",
- err);
- return;
- }
- }
-}
-
-static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
-{
- struct gswip_priv *priv = ds->priv;
- u32 stp_state;
-
- switch (state) {
- case BR_STATE_DISABLED:
- gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
- GSWIP_SDMA_PCTRLp(port));
- return;
- case BR_STATE_BLOCKING:
- case BR_STATE_LISTENING:
- stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LISTEN;
- break;
- case BR_STATE_LEARNING:
- stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LEARNING;
- break;
- case BR_STATE_FORWARDING:
- stp_state = GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING;
- break;
- default:
- dev_err(priv->dev, "invalid STP state: %d\n", state);
- return;
- }
-
- gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
- GSWIP_SDMA_PCTRLp(port));
- gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_PSTATE_MASK, stp_state,
- GSWIP_PCE_PCTRL_0p(port));
-}
-
-static int gswip_port_fdb(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid, bool add)
-{
- struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
- struct gswip_priv *priv = ds->priv;
- struct gswip_pce_table_entry mac_bridge = {0,};
- unsigned int max_ports = priv->hw_info->max_ports;
- int fid = -1;
- int i;
- int err;
-
- if (!bridge)
- return -EINVAL;
-
- for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
- if (priv->vlans[i].bridge == bridge) {
- fid = priv->vlans[i].fid;
- break;
- }
- }
-
- if (fid == -1) {
- dev_err(priv->dev, "no FID found for bridge %s\n",
- bridge->name);
- return -EINVAL;
- }
-
- mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
- mac_bridge.key_mode = true;
- mac_bridge.key[0] = addr[5] | (addr[4] << 8);
- mac_bridge.key[1] = addr[3] | (addr[2] << 8);
- mac_bridge.key[2] = addr[1] | (addr[0] << 8);
- mac_bridge.key[3] = FIELD_PREP(GSWIP_TABLE_MAC_BRIDGE_KEY3_FID, fid);
- mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */
- mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC;
- mac_bridge.valid = add;
-
- err = gswip_pce_table_entry_write(priv, &mac_bridge);
- if (err)
- dev_err(priv->dev, "failed to write mac bridge: %d\n", err);
-
- return err;
-}
-
-static int gswip_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid,
- struct dsa_db db)
-{
- return gswip_port_fdb(ds, port, addr, vid, true);
-}
-
-static int gswip_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid,
- struct dsa_db db)
-{
- return gswip_port_fdb(ds, port, addr, vid, false);
-}
-
-static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
- dsa_fdb_dump_cb_t *cb, void *data)
-{
- struct gswip_priv *priv = ds->priv;
- struct gswip_pce_table_entry mac_bridge = {0,};
- unsigned char addr[ETH_ALEN];
- int i;
- int err;
-
- for (i = 0; i < 2048; i++) {
- mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
- mac_bridge.index = i;
-
- err = gswip_pce_table_entry_read(priv, &mac_bridge);
- if (err) {
- dev_err(priv->dev,
- "failed to read mac bridge entry %d: %d\n",
- i, err);
- return err;
- }
-
- if (!mac_bridge.valid)
- continue;
-
- addr[5] = mac_bridge.key[0] & 0xff;
- addr[4] = (mac_bridge.key[0] >> 8) & 0xff;
- addr[3] = mac_bridge.key[1] & 0xff;
- addr[2] = (mac_bridge.key[1] >> 8) & 0xff;
- addr[1] = mac_bridge.key[2] & 0xff;
- addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
- if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC) {
- if (mac_bridge.val[0] & BIT(port)) {
- err = cb(addr, 0, true, data);
- if (err)
- return err;
- }
- } else {
- if (port == FIELD_GET(GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT,
- mac_bridge.val[0])) {
- err = cb(addr, 0, false, data);
- if (err)
- return err;
- }
- }
- }
- return 0;
-}
-
-static int gswip_port_max_mtu(struct dsa_switch *ds, int port)
-{
- /* Includes 8 bytes for special header. */
- return GSWIP_MAX_PACKET_LENGTH - VLAN_ETH_HLEN - ETH_FCS_LEN;
-}
-
-static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
-{
- struct gswip_priv *priv = ds->priv;
-
- /* CPU port always has maximum mtu of user ports, so use it to set
- * switch frame size, including 8 byte special header.
- */
- if (dsa_is_cpu_port(ds, port)) {
- new_mtu += 8;
- gswip_switch_w(priv, VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN,
- GSWIP_MAC_FLEN);
- }
-
- /* Enable MLEN for ports with non-standard MTUs, including the special
- * header on the CPU port added above.
- */
- if (new_mtu != ETH_DATA_LEN)
- gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
- GSWIP_MAC_CTRL_2p(port));
- else
- gswip_switch_mask(priv, GSWIP_MAC_CTRL_2_MLEN, 0,
- GSWIP_MAC_CTRL_2p(port));
-
- return 0;
-}
-
-static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port,
- struct phylink_config *config)
-{
- switch (port) {
- case 0:
- case 1:
- phy_interface_set_rgmii(config->supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_MII,
- config->supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_REVMII,
- config->supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_RMII,
- config->supported_interfaces);
- break;
-
- case 2:
- case 3:
- case 4:
- case 6:
- __set_bit(PHY_INTERFACE_MODE_INTERNAL,
- config->supported_interfaces);
- break;
-
- case 5:
- phy_interface_set_rgmii(config->supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_INTERNAL,
- config->supported_interfaces);
- break;
- }
-
- config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
- MAC_10 | MAC_100 | MAC_1000;
-}
-
-static void gswip_xrx300_phylink_get_caps(struct dsa_switch *ds, int port,
- struct phylink_config *config)
-{
- switch (port) {
- case 0:
- phy_interface_set_rgmii(config->supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_GMII,
- config->supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_RMII,
- config->supported_interfaces);
- break;
-
- case 1:
- case 2:
- case 3:
- case 4:
- case 6:
- __set_bit(PHY_INTERFACE_MODE_INTERNAL,
- config->supported_interfaces);
- break;
-
- case 5:
- phy_interface_set_rgmii(config->supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_INTERNAL,
- config->supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_RMII,
- config->supported_interfaces);
- break;
- }
-
- config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
- MAC_10 | MAC_100 | MAC_1000;
-}
-
-static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
-{
- u32 mdio_phy;
-
- if (link)
- mdio_phy = GSWIP_MDIO_PHY_LINK_UP;
- else
- mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN;
-
- gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_MASK, mdio_phy,
- GSWIP_MDIO_PHYp(port));
-}
-
-static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed,
- phy_interface_t interface)
-{
- u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0;
-
- switch (speed) {
- case SPEED_10:
- mdio_phy = GSWIP_MDIO_PHY_SPEED_M10;
-
- if (interface == PHY_INTERFACE_MODE_RMII)
- mii_cfg = GSWIP_MII_CFG_RATE_M50;
- else
- mii_cfg = GSWIP_MII_CFG_RATE_M2P5;
-
- mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
- break;
-
- case SPEED_100:
- mdio_phy = GSWIP_MDIO_PHY_SPEED_M100;
-
- if (interface == PHY_INTERFACE_MODE_RMII)
- mii_cfg = GSWIP_MII_CFG_RATE_M50;
- else
- mii_cfg = GSWIP_MII_CFG_RATE_M25;
-
- mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
- break;
-
- case SPEED_1000:
- mdio_phy = GSWIP_MDIO_PHY_SPEED_G1;
-
- mii_cfg = GSWIP_MII_CFG_RATE_M125;
-
- mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII;
- break;
- }
-
- gswip_mdio_mask(priv, GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy,
- GSWIP_MDIO_PHYp(port));
- gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port);
- gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0,
- GSWIP_MAC_CTRL_0p(port));
-}
-
-static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex)
-{
- u32 mac_ctrl_0, mdio_phy;
-
- if (duplex == DUPLEX_FULL) {
- mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN;
- mdio_phy = GSWIP_MDIO_PHY_FDUP_EN;
- } else {
- mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS;
- mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS;
- }
-
- gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0,
- GSWIP_MAC_CTRL_0p(port));
- gswip_mdio_mask(priv, GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy,
- GSWIP_MDIO_PHYp(port));
-}
-
-static void gswip_port_set_pause(struct gswip_priv *priv, int port,
- bool tx_pause, bool rx_pause)
-{
- u32 mac_ctrl_0, mdio_phy;
-
- if (tx_pause && rx_pause) {
- mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX;
- mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
- GSWIP_MDIO_PHY_FCONRX_EN;
- } else if (tx_pause) {
- mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX;
- mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
- GSWIP_MDIO_PHY_FCONRX_DIS;
- } else if (rx_pause) {
- mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX;
- mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
- GSWIP_MDIO_PHY_FCONRX_EN;
- } else {
- mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE;
- mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
- GSWIP_MDIO_PHY_FCONRX_DIS;
- }
-
- gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FCON_MASK,
- mac_ctrl_0, GSWIP_MAC_CTRL_0p(port));
- gswip_mdio_mask(priv,
- GSWIP_MDIO_PHY_FCONTX_MASK |
- GSWIP_MDIO_PHY_FCONRX_MASK,
- mdio_phy, GSWIP_MDIO_PHYp(port));
-}
-
-static void gswip_phylink_mac_config(struct phylink_config *config,
- unsigned int mode,
- const struct phylink_link_state *state)
-{
- struct dsa_port *dp = dsa_phylink_to_port(config);
- struct gswip_priv *priv = dp->ds->priv;
- int port = dp->index;
- u32 miicfg = 0;
-
- miicfg |= GSWIP_MII_CFG_LDCLKDIS;
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_MII:
- case PHY_INTERFACE_MODE_INTERNAL:
- miicfg |= GSWIP_MII_CFG_MODE_MIIM;
- break;
- case PHY_INTERFACE_MODE_REVMII:
- miicfg |= GSWIP_MII_CFG_MODE_MIIP;
- break;
- case PHY_INTERFACE_MODE_RMII:
- miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
- break;
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- miicfg |= GSWIP_MII_CFG_MODE_RGMII;
- break;
- case PHY_INTERFACE_MODE_GMII:
- miicfg |= GSWIP_MII_CFG_MODE_GMII;
- break;
- default:
- dev_err(dp->ds->dev,
- "Unsupported interface: %d\n", state->interface);
- return;
- }
-
- gswip_mii_mask_cfg(priv,
- GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK |
- GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS,
- miicfg, port);
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_RGMII_ID:
- gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK |
- GSWIP_MII_PCDU_RXDLY_MASK, 0, port);
- break;
- case PHY_INTERFACE_MODE_RGMII_RXID:
- gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_RXDLY_MASK, 0, port);
- break;
- case PHY_INTERFACE_MODE_RGMII_TXID:
- gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK, 0, port);
- break;
- default:
- break;
- }
-}
-
-static void gswip_phylink_mac_link_down(struct phylink_config *config,
- unsigned int mode,
- phy_interface_t interface)
-{
- struct dsa_port *dp = dsa_phylink_to_port(config);
- struct gswip_priv *priv = dp->ds->priv;
- int port = dp->index;
-
- gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port);
-
- if (!dsa_port_is_cpu(dp))
- gswip_port_set_link(priv, port, false);
-}
-
-static void gswip_phylink_mac_link_up(struct phylink_config *config,
- struct phy_device *phydev,
- unsigned int mode,
- phy_interface_t interface,
- int speed, int duplex,
- bool tx_pause, bool rx_pause)
-{
- struct dsa_port *dp = dsa_phylink_to_port(config);
- struct gswip_priv *priv = dp->ds->priv;
- int port = dp->index;
-
- if (!dsa_port_is_cpu(dp)) {
- gswip_port_set_link(priv, port, true);
- gswip_port_set_speed(priv, port, speed, interface);
- gswip_port_set_duplex(priv, port, duplex);
- gswip_port_set_pause(priv, port, tx_pause, rx_pause);
- }
-
- gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
-}
-
-static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,
- uint8_t *data)
-{
- int i;
-
- if (stringset != ETH_SS_STATS)
- return;
-
- for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++)
- ethtool_puts(&data, gswip_rmon_cnt[i].name);
-}
-
-static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table,
- u32 index)
-{
- u32 result;
- int err;
-
- gswip_switch_w(priv, index, GSWIP_BM_RAM_ADDR);
- gswip_switch_mask(priv, GSWIP_BM_RAM_CTRL_ADDR_MASK |
- GSWIP_BM_RAM_CTRL_OPMOD,
- table | GSWIP_BM_RAM_CTRL_BAS,
- GSWIP_BM_RAM_CTRL);
-
- err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL,
- GSWIP_BM_RAM_CTRL_BAS);
- if (err) {
- dev_err(priv->dev, "timeout while reading table: %u, index: %u\n",
- table, index);
- return 0;
- }
-
- result = gswip_switch_r(priv, GSWIP_BM_RAM_VAL(0));
- result |= gswip_switch_r(priv, GSWIP_BM_RAM_VAL(1)) << 16;
-
- return result;
-}
-
-static void gswip_get_ethtool_stats(struct dsa_switch *ds, int port,
- uint64_t *data)
-{
- struct gswip_priv *priv = ds->priv;
- const struct gswip_rmon_cnt_desc *rmon_cnt;
- int i;
- u64 high;
-
- for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) {
- rmon_cnt = &gswip_rmon_cnt[i];
-
- data[i] = gswip_bcm_ram_entry_read(priv, port,
- rmon_cnt->offset);
- if (rmon_cnt->size == 2) {
- high = gswip_bcm_ram_entry_read(priv, port,
- rmon_cnt->offset + 1);
- data[i] |= high << 32;
- }
- }
-}
-
-static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset)
-{
- if (sset != ETH_SS_STATS)
- return 0;
-
- return ARRAY_SIZE(gswip_rmon_cnt);
-}
-
-static const struct phylink_mac_ops gswip_phylink_mac_ops = {
- .mac_config = gswip_phylink_mac_config,
- .mac_link_down = gswip_phylink_mac_link_down,
- .mac_link_up = gswip_phylink_mac_link_up,
-};
-
-static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
- .get_tag_protocol = gswip_get_tag_protocol,
- .setup = gswip_setup,
- .port_enable = gswip_port_enable,
- .port_disable = gswip_port_disable,
- .port_bridge_join = gswip_port_bridge_join,
- .port_bridge_leave = gswip_port_bridge_leave,
- .port_fast_age = gswip_port_fast_age,
- .port_vlan_filtering = gswip_port_vlan_filtering,
- .port_vlan_add = gswip_port_vlan_add,
- .port_vlan_del = gswip_port_vlan_del,
- .port_stp_state_set = gswip_port_stp_state_set,
- .port_fdb_add = gswip_port_fdb_add,
- .port_fdb_del = gswip_port_fdb_del,
- .port_fdb_dump = gswip_port_fdb_dump,
- .port_change_mtu = gswip_port_change_mtu,
- .port_max_mtu = gswip_port_max_mtu,
- .phylink_get_caps = gswip_xrx200_phylink_get_caps,
- .get_strings = gswip_get_strings,
- .get_ethtool_stats = gswip_get_ethtool_stats,
- .get_sset_count = gswip_get_sset_count,
-};
-
-static const struct dsa_switch_ops gswip_xrx300_switch_ops = {
- .get_tag_protocol = gswip_get_tag_protocol,
- .setup = gswip_setup,
- .port_enable = gswip_port_enable,
- .port_disable = gswip_port_disable,
- .port_bridge_join = gswip_port_bridge_join,
- .port_bridge_leave = gswip_port_bridge_leave,
- .port_fast_age = gswip_port_fast_age,
- .port_vlan_filtering = gswip_port_vlan_filtering,
- .port_vlan_add = gswip_port_vlan_add,
- .port_vlan_del = gswip_port_vlan_del,
- .port_stp_state_set = gswip_port_stp_state_set,
- .port_fdb_add = gswip_port_fdb_add,
- .port_fdb_del = gswip_port_fdb_del,
- .port_fdb_dump = gswip_port_fdb_dump,
- .port_change_mtu = gswip_port_change_mtu,
- .port_max_mtu = gswip_port_max_mtu,
- .phylink_get_caps = gswip_xrx300_phylink_get_caps,
- .get_strings = gswip_get_strings,
- .get_ethtool_stats = gswip_get_ethtool_stats,
- .get_sset_count = gswip_get_sset_count,
-};
-
-static const struct xway_gphy_match_data xrx200a1x_gphy_data = {
- .fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin",
- .ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin",
-};
-
-static const struct xway_gphy_match_data xrx200a2x_gphy_data = {
- .fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin",
- .ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin",
-};
-
-static const struct xway_gphy_match_data xrx300_gphy_data = {
- .fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin",
- .ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin",
-};
-
-static const struct of_device_id xway_gphy_match[] __maybe_unused = {
- { .compatible = "lantiq,xrx200-gphy-fw", .data = NULL },
- { .compatible = "lantiq,xrx200a1x-gphy-fw", .data = &xrx200a1x_gphy_data },
- { .compatible = "lantiq,xrx200a2x-gphy-fw", .data = &xrx200a2x_gphy_data },
- { .compatible = "lantiq,xrx300-gphy-fw", .data = &xrx300_gphy_data },
- { .compatible = "lantiq,xrx330-gphy-fw", .data = &xrx300_gphy_data },
- {},
-};
-
-static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gphy_fw)
-{
- struct device *dev = priv->dev;
- const struct firmware *fw;
- void *fw_addr;
- dma_addr_t dma_addr;
- dma_addr_t dev_addr;
- size_t size;
- int ret;
-
- ret = clk_prepare_enable(gphy_fw->clk_gate);
- if (ret)
- return ret;
-
- reset_control_assert(gphy_fw->reset);
-
- /* The vendor BSP uses a 200ms delay after asserting the reset line.
- * Without this some users are observing that the PHY is not coming up
- * on the MDIO bus.
- */
- msleep(200);
-
- ret = request_firmware(&fw, gphy_fw->fw_name, dev);
- if (ret)
- return dev_err_probe(dev, ret, "failed to load firmware: %s\n",
- gphy_fw->fw_name);
-
- /* GPHY cores need the firmware code in a persistent and contiguous
- * memory area with a 16 kB boundary aligned start address.
- */
- size = fw->size + XRX200_GPHY_FW_ALIGN;
-
- fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
- if (fw_addr) {
- fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN);
- dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN);
- memcpy(fw_addr, fw->data, fw->size);
- } else {
- release_firmware(fw);
- return dev_err_probe(dev, -ENOMEM,
- "failed to alloc firmware memory\n");
- }
-
- release_firmware(fw);
-
- ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, dev_addr);
- if (ret)
- return ret;
-
- reset_control_deassert(gphy_fw->reset);
-
- return ret;
-}
-
-static int gswip_gphy_fw_probe(struct gswip_priv *priv,
- struct gswip_gphy_fw *gphy_fw,
- struct device_node *gphy_fw_np, int i)
-{
- struct device *dev = priv->dev;
- u32 gphy_mode;
- int ret;
- char gphyname[10];
-
- snprintf(gphyname, sizeof(gphyname), "gphy%d", i);
-
- gphy_fw->clk_gate = devm_clk_get(dev, gphyname);
- if (IS_ERR(gphy_fw->clk_gate)) {
- return dev_err_probe(dev, PTR_ERR(gphy_fw->clk_gate),
- "Failed to lookup gate clock\n");
- }
-
- ret = of_property_read_u32(gphy_fw_np, "reg", &gphy_fw->fw_addr_offset);
- if (ret)
- return ret;
-
- ret = of_property_read_u32(gphy_fw_np, "lantiq,gphy-mode", &gphy_mode);
- /* Default to GE mode */
- if (ret)
- gphy_mode = GPHY_MODE_GE;
-
- switch (gphy_mode) {
- case GPHY_MODE_FE:
- gphy_fw->fw_name = priv->gphy_fw_name_cfg->fe_firmware_name;
- break;
- case GPHY_MODE_GE:
- gphy_fw->fw_name = priv->gphy_fw_name_cfg->ge_firmware_name;
- break;
- default:
- return dev_err_probe(dev, -EINVAL, "Unknown GPHY mode %d\n",
- gphy_mode);
- }
-
- gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np);
- if (IS_ERR(gphy_fw->reset))
- return dev_err_probe(dev, PTR_ERR(gphy_fw->reset),
- "Failed to lookup gphy reset\n");
-
- return gswip_gphy_fw_load(priv, gphy_fw);
-}
-
-static void gswip_gphy_fw_remove(struct gswip_priv *priv,
- struct gswip_gphy_fw *gphy_fw)
-{
- int ret;
-
- /* check if the device was fully probed */
- if (!gphy_fw->fw_name)
- return;
-
- ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, 0);
- if (ret)
- dev_err(priv->dev, "can not reset GPHY FW pointer\n");
-
- clk_disable_unprepare(gphy_fw->clk_gate);
-
- reset_control_put(gphy_fw->reset);
-}
-
-static int gswip_gphy_fw_list(struct gswip_priv *priv,
- struct device_node *gphy_fw_list_np, u32 version)
-{
- struct device *dev = priv->dev;
- struct device_node *gphy_fw_np;
- const struct of_device_id *match;
- int err;
- int i = 0;
-
- /* The VRX200 rev 1.1 uses the GSWIP 2.0 and needs the older
- * GPHY firmware. The VRX200 rev 1.2 uses the GSWIP 2.1 and also
- * needs a different GPHY firmware.
- */
- if (of_device_is_compatible(gphy_fw_list_np, "lantiq,xrx200-gphy-fw")) {
- switch (version) {
- case GSWIP_VERSION_2_0:
- priv->gphy_fw_name_cfg = &xrx200a1x_gphy_data;
- break;
- case GSWIP_VERSION_2_1:
- priv->gphy_fw_name_cfg = &xrx200a2x_gphy_data;
- break;
- default:
- return dev_err_probe(dev, -ENOENT,
- "unknown GSWIP version: 0x%x\n",
- version);
- }
- }
-
- match = of_match_node(xway_gphy_match, gphy_fw_list_np);
- if (match && match->data)
- priv->gphy_fw_name_cfg = match->data;
-
- if (!priv->gphy_fw_name_cfg)
- return dev_err_probe(dev, -ENOENT,
- "GPHY compatible type not supported\n");
-
- priv->num_gphy_fw = of_get_available_child_count(gphy_fw_list_np);
- if (!priv->num_gphy_fw)
- return -ENOENT;
-
- priv->rcu_regmap = syscon_regmap_lookup_by_phandle(gphy_fw_list_np,
- "lantiq,rcu");
- if (IS_ERR(priv->rcu_regmap))
- return PTR_ERR(priv->rcu_regmap);
-
- priv->gphy_fw = devm_kmalloc_array(dev, priv->num_gphy_fw,
- sizeof(*priv->gphy_fw),
- GFP_KERNEL | __GFP_ZERO);
- if (!priv->gphy_fw)
- return -ENOMEM;
-
- for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) {
- err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i],
- gphy_fw_np, i);
- if (err) {
- of_node_put(gphy_fw_np);
- goto remove_gphy;
- }
- i++;
- }
-
- /* The standalone PHY11G requires 300ms to be fully
- * initialized and ready for any MDIO communication after being
- * taken out of reset. For the SoC-internal GPHY variant there
- * is no (known) documentation for the minimum time after a
- * reset. Use the same value as for the standalone variant as
- * some users have reported internal PHYs not being detected
- * without any delay.
- */
- msleep(300);
-
- return 0;
-
-remove_gphy:
- for (i = 0; i < priv->num_gphy_fw; i++)
- gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
- return err;
-}
-
-static int gswip_probe(struct platform_device *pdev)
-{
- struct device_node *np, *gphy_fw_np;
- struct device *dev = &pdev->dev;
- struct gswip_priv *priv;
- int err;
- int i;
- u32 version;
-
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->gswip = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(priv->gswip))
- return PTR_ERR(priv->gswip);
-
- priv->mdio = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(priv->mdio))
- return PTR_ERR(priv->mdio);
-
- priv->mii = devm_platform_ioremap_resource(pdev, 2);
- if (IS_ERR(priv->mii))
- return PTR_ERR(priv->mii);
-
- priv->hw_info = of_device_get_match_data(dev);
- if (!priv->hw_info)
- return -EINVAL;
-
- priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
- if (!priv->ds)
- return -ENOMEM;
-
- priv->ds->dev = dev;
- priv->ds->num_ports = priv->hw_info->max_ports;
- priv->ds->priv = priv;
- priv->ds->ops = priv->hw_info->ops;
- priv->ds->phylink_mac_ops = &gswip_phylink_mac_ops;
- priv->dev = dev;
- mutex_init(&priv->pce_table_lock);
- version = gswip_switch_r(priv, GSWIP_VERSION);
-
- np = dev->of_node;
- switch (version) {
- case GSWIP_VERSION_2_0:
- case GSWIP_VERSION_2_1:
- if (!of_device_is_compatible(np, "lantiq,xrx200-gswip"))
- return -EINVAL;
- break;
- case GSWIP_VERSION_2_2:
- case GSWIP_VERSION_2_2_ETC:
- if (!of_device_is_compatible(np, "lantiq,xrx300-gswip") &&
- !of_device_is_compatible(np, "lantiq,xrx330-gswip"))
- return -EINVAL;
- break;
- default:
- return dev_err_probe(dev, -ENOENT,
- "unknown GSWIP version: 0x%x\n", version);
- }
-
- /* bring up the mdio bus */
- gphy_fw_np = of_get_compatible_child(dev->of_node, "lantiq,gphy-fw");
- if (gphy_fw_np) {
- err = gswip_gphy_fw_list(priv, gphy_fw_np, version);
- of_node_put(gphy_fw_np);
- if (err)
- return dev_err_probe(dev, err,
- "gphy fw probe failed\n");
- }
-
- /* bring up the mdio bus */
- err = gswip_mdio(priv);
- if (err) {
- dev_err_probe(dev, err, "mdio probe failed\n");
- goto gphy_fw_remove;
- }
-
- err = dsa_register_switch(priv->ds);
- if (err) {
- dev_err_probe(dev, err, "dsa switch registration failed\n");
- goto gphy_fw_remove;
- }
- if (!dsa_is_cpu_port(priv->ds, priv->hw_info->cpu_port)) {
- err = dev_err_probe(dev, -EINVAL,
- "wrong CPU port defined, HW only supports port: %i\n",
- priv->hw_info->cpu_port);
- goto disable_switch;
- }
-
- platform_set_drvdata(pdev, priv);
-
- dev_info(dev, "probed GSWIP version %lx mod %lx\n",
- (version & GSWIP_VERSION_REV_MASK) >> GSWIP_VERSION_REV_SHIFT,
- (version & GSWIP_VERSION_MOD_MASK) >> GSWIP_VERSION_MOD_SHIFT);
- return 0;
-
-disable_switch:
- gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
- dsa_unregister_switch(priv->ds);
-gphy_fw_remove:
- for (i = 0; i < priv->num_gphy_fw; i++)
- gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
- return err;
-}
-
-static void gswip_remove(struct platform_device *pdev)
-{
- struct gswip_priv *priv = platform_get_drvdata(pdev);
- int i;
-
- if (!priv)
- return;
-
- /* disable the switch */
- gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
-
- dsa_unregister_switch(priv->ds);
-
- for (i = 0; i < priv->num_gphy_fw; i++)
- gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
-}
-
-static void gswip_shutdown(struct platform_device *pdev)
-{
- struct gswip_priv *priv = platform_get_drvdata(pdev);
-
- if (!priv)
- return;
-
- dsa_switch_shutdown(priv->ds);
-
- platform_set_drvdata(pdev, NULL);
-}
-
-static const struct gswip_hw_info gswip_xrx200 = {
- .max_ports = 7,
- .cpu_port = 6,
- .ops = &gswip_xrx200_switch_ops,
-};
-
-static const struct gswip_hw_info gswip_xrx300 = {
- .max_ports = 7,
- .cpu_port = 6,
- .ops = &gswip_xrx300_switch_ops,
-};
-
-static const struct of_device_id gswip_of_match[] = {
- { .compatible = "lantiq,xrx200-gswip", .data = &gswip_xrx200 },
- { .compatible = "lantiq,xrx300-gswip", .data = &gswip_xrx300 },
- { .compatible = "lantiq,xrx330-gswip", .data = &gswip_xrx300 },
- {},
-};
-MODULE_DEVICE_TABLE(of, gswip_of_match);
-
-static struct platform_driver gswip_driver = {
- .probe = gswip_probe,
- .remove = gswip_remove,
- .shutdown = gswip_shutdown,
- .driver = {
- .name = "gswip",
- .of_match_table = gswip_of_match,
- },
-};
-
-module_platform_driver(gswip_driver);
-
-MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin");
-MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin");
-MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin");
-MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin");
-MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin");
-MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin");
-MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
-MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig
index 12a86585a77f..c71d3fd5dfeb 100644
--- a/drivers/net/dsa/microchip/Kconfig
+++ b/drivers/net/dsa/microchip/Kconfig
@@ -6,6 +6,7 @@ menuconfig NET_DSA_MICROCHIP_KSZ_COMMON
select NET_DSA_TAG_NONE
select NET_IEEE8021Q_HELPERS
select DCB
+ select PCS_XPCS
help
This driver adds support for Microchip KSZ8, KSZ9 and
LAN937X series switch chips, being KSZ8863/8873,
diff --git a/drivers/net/dsa/microchip/ksz8.c b/drivers/net/dsa/microchip/ksz8.c
index da7110d67558..c354abdafc1b 100644
--- a/drivers/net/dsa/microchip/ksz8.c
+++ b/drivers/net/dsa/microchip/ksz8.c
@@ -3,6 +3,7 @@
* Microchip KSZ8XXX series switch driver
*
* It supports the following switches:
+ * - KSZ8463
* - KSZ8863, KSZ8873 aka KSZ88X3
* - KSZ8895, KSZ8864 aka KSZ8895 family
* - KSZ8794, KSZ8795, KSZ8765 aka KSZ87XX
@@ -35,14 +36,14 @@
static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
{
- regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0);
+ ksz_rmw8(dev, addr, bits, set ? bits : 0);
}
static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
bool set)
{
- regmap_update_bits(ksz_regmap_8(dev), PORT_CTRL_ADDR(port, offset),
- bits, set ? bits : 0);
+ ksz_rmw8(dev, dev->dev_ops->get_port_addr(port, offset), bits,
+ set ? bits : 0);
}
/**
@@ -140,6 +141,11 @@ int ksz8_reset_switch(struct ksz_device *dev)
KSZ8863_GLOBAL_SOFTWARE_RESET | KSZ8863_PCS_RESET, true);
ksz_cfg(dev, KSZ8863_REG_SW_RESET,
KSZ8863_GLOBAL_SOFTWARE_RESET | KSZ8863_PCS_RESET, false);
+ } else if (ksz_is_ksz8463(dev)) {
+ ksz_cfg(dev, KSZ8463_REG_SW_RESET,
+ KSZ8463_GLOBAL_SOFTWARE_RESET, true);
+ ksz_cfg(dev, KSZ8463_REG_SW_RESET,
+ KSZ8463_GLOBAL_SOFTWARE_RESET, false);
} else {
/* reset switch */
ksz_write8(dev, REG_POWER_MANAGEMENT_1,
@@ -194,6 +200,7 @@ int ksz8_change_mtu(struct ksz_device *dev, int port, int mtu)
case KSZ8794_CHIP_ID:
case KSZ8765_CHIP_ID:
return ksz8795_change_mtu(dev, frame_size);
+ case KSZ8463_CHIP_ID:
case KSZ88X3_CHIP_ID:
case KSZ8864_CHIP_ID:
case KSZ8895_CHIP_ID:
@@ -227,6 +234,11 @@ static int ksz8_port_queue_split(struct ksz_device *dev, int port, int queues)
WEIGHTED_FAIR_QUEUE_ENABLE);
if (ret)
return ret;
+ } else if (ksz_is_ksz8463(dev)) {
+ mask_4q = KSZ8873_PORT_4QUEUE_SPLIT_EN;
+ mask_2q = KSZ8873_PORT_2QUEUE_SPLIT_EN;
+ reg_4q = P1CR1;
+ reg_2q = P1CR1 + 1;
} else {
mask_4q = KSZ8795_PORT_4QUEUE_SPLIT_EN;
mask_2q = KSZ8795_PORT_2QUEUE_SPLIT_EN;
@@ -371,6 +383,9 @@ static void ksz8863_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
addr -= dev->info->reg_mib_cnt;
ctrl_addr = addr ? KSZ8863_MIB_PACKET_DROPPED_TX_0 :
KSZ8863_MIB_PACKET_DROPPED_RX_0;
+ if (ksz_is_8895_family(dev) &&
+ ctrl_addr == KSZ8863_MIB_PACKET_DROPPED_RX_0)
+ ctrl_addr = KSZ8895_MIB_PACKET_DROPPED_RX_0;
ctrl_addr += port;
ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
@@ -1265,12 +1280,15 @@ int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
{
+ int offset = P_MIRROR_CTRL;
u8 data;
- ksz_pread8(dev, port, P_MIRROR_CTRL, &data);
- data &= ~PORT_VLAN_MEMBERSHIP;
+ if (ksz_is_ksz8463(dev))
+ offset = P1CR2;
+ ksz_pread8(dev, port, offset, &data);
+ data &= ~dev->port_mask;
data |= (member & dev->port_mask);
- ksz_pwrite8(dev, port, P_MIRROR_CTRL, data);
+ ksz_pwrite8(dev, port, offset, data);
}
void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
@@ -1278,6 +1296,8 @@ void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
u8 learn[DSA_MAX_PORTS];
int first, index, cnt;
const u16 *regs;
+ int reg = S_FLUSH_TABLE_CTRL;
+ int mask = SW_FLUSH_DYN_MAC_TABLE;
regs = dev->info->regs;
@@ -1295,7 +1315,11 @@ void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
ksz_pwrite8(dev, index, regs[P_STP_CTRL],
learn[index] | PORT_LEARN_DISABLE);
}
- ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
+ if (ksz_is_ksz8463(dev)) {
+ reg = KSZ8463_FLUSH_TABLE_CTRL;
+ mask = KSZ8463_FLUSH_DYN_MAC_TABLE;
+ }
+ ksz_cfg(dev, reg, mask, true);
for (index = first; index < cnt; index++) {
if (!(learn[index] & PORT_LEARN_DISABLE))
ksz_pwrite8(dev, index, regs[P_STP_CTRL], learn[index]);
@@ -1434,7 +1458,7 @@ int ksz8_fdb_del(struct ksz_device *dev, int port, const unsigned char *addr,
int ksz8_port_vlan_filtering(struct ksz_device *dev, int port, bool flag,
struct netlink_ext_ack *extack)
{
- if (ksz_is_ksz88x3(dev))
+ if (ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev))
return -ENOTSUPP;
/* Discard packets with VID not enabled on the switch */
@@ -1450,9 +1474,12 @@ int ksz8_port_vlan_filtering(struct ksz_device *dev, int port, bool flag,
static void ksz8_port_enable_pvid(struct ksz_device *dev, int port, bool state)
{
- if (ksz_is_ksz88x3(dev)) {
- ksz_cfg(dev, REG_SW_INSERT_SRC_PVID,
- 0x03 << (4 - 2 * port), state);
+ if (ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev)) {
+ int reg = REG_SW_INSERT_SRC_PVID;
+
+ if (ksz_is_ksz8463(dev))
+ reg = KSZ8463_REG_SW_CTRL_9;
+ ksz_cfg(dev, reg, 0x03 << (4 - 2 * port), state);
} else {
ksz_pwrite8(dev, port, REG_PORT_CTRL_12, state ? 0x0f : 0x00);
}
@@ -1467,7 +1494,7 @@ int ksz8_port_vlan_add(struct ksz_device *dev, int port,
u16 data, new_pvid = 0;
u8 fid, member, valid;
- if (ksz_is_ksz88x3(dev))
+ if (ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev))
return -ENOTSUPP;
/* If a VLAN is added with untagged flag different from the
@@ -1536,7 +1563,7 @@ int ksz8_port_vlan_del(struct ksz_device *dev, int port,
u16 data, pvid;
u8 fid, member, valid;
- if (ksz_is_ksz88x3(dev))
+ if (ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev))
return -ENOTSUPP;
ksz_pread16(dev, port, REG_PORT_CTRL_VID, &pvid);
@@ -1566,19 +1593,23 @@ int ksz8_port_mirror_add(struct ksz_device *dev, int port,
struct dsa_mall_mirror_tc_entry *mirror,
bool ingress, struct netlink_ext_ack *extack)
{
+ int offset = P_MIRROR_CTRL;
+
+ if (ksz_is_ksz8463(dev))
+ offset = P1CR2;
if (ingress) {
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
+ ksz_port_cfg(dev, port, offset, PORT_MIRROR_RX, true);
dev->mirror_rx |= BIT(port);
} else {
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
+ ksz_port_cfg(dev, port, offset, PORT_MIRROR_TX, true);
dev->mirror_tx |= BIT(port);
}
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false);
+ ksz_port_cfg(dev, port, offset, PORT_MIRROR_SNIFFER, false);
/* configure mirror port */
if (dev->mirror_rx || dev->mirror_tx)
- ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
+ ksz_port_cfg(dev, mirror->to_local_port, offset,
PORT_MIRROR_SNIFFER, true);
return 0;
@@ -1587,20 +1618,23 @@ int ksz8_port_mirror_add(struct ksz_device *dev, int port,
void ksz8_port_mirror_del(struct ksz_device *dev, int port,
struct dsa_mall_mirror_tc_entry *mirror)
{
+ int offset = P_MIRROR_CTRL;
u8 data;
+ if (ksz_is_ksz8463(dev))
+ offset = P1CR2;
if (mirror->ingress) {
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
+ ksz_port_cfg(dev, port, offset, PORT_MIRROR_RX, false);
dev->mirror_rx &= ~BIT(port);
} else {
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
+ ksz_port_cfg(dev, port, offset, PORT_MIRROR_TX, false);
dev->mirror_tx &= ~BIT(port);
}
- ksz_pread8(dev, port, P_MIRROR_CTRL, &data);
+ ksz_pread8(dev, port, offset, &data);
if (!dev->mirror_rx && !dev->mirror_tx)
- ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
+ ksz_port_cfg(dev, mirror->to_local_port, offset,
PORT_MIRROR_SNIFFER, false);
}
@@ -1625,26 +1659,24 @@ void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
const u16 *regs = dev->info->regs;
struct dsa_switch *ds = dev->ds;
const u32 *masks;
- int queues;
+ int offset;
u8 member;
masks = dev->info->masks;
/* enable broadcast storm limit */
- ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
-
- /* For KSZ88x3 enable only one queue by default, otherwise we won't
- * be able to get rid of PCP prios on Port 2.
- */
- if (ksz_is_ksz88x3(dev))
- queues = 1;
- else
- queues = dev->info->num_tx_queues;
+ offset = P_BCAST_STORM_CTRL;
+ if (ksz_is_ksz8463(dev))
+ offset = P1CR1;
+ ksz_port_cfg(dev, port, offset, PORT_BROADCAST_STORM, true);
- ksz8_port_queue_split(dev, port, queues);
+ ksz8_port_queue_split(dev, port, dev->info->num_tx_queues);
/* replace priority */
- ksz_port_cfg(dev, port, P_802_1P_CTRL,
+ offset = P_802_1P_CTRL;
+ if (ksz_is_ksz8463(dev))
+ offset = P1CR2;
+ ksz_port_cfg(dev, port, offset,
masks[PORT_802_1P_REMAPPING], false);
if (cpu_port)
@@ -1684,6 +1716,7 @@ void ksz8_config_cpu_port(struct dsa_switch *ds)
const u32 *masks;
const u16 *regs;
u8 remote;
+ u8 fiber_ports = 0;
int i;
masks = dev->info->masks;
@@ -1714,6 +1747,32 @@ void ksz8_config_cpu_port(struct dsa_switch *ds)
else
ksz_port_cfg(dev, i, regs[P_STP_CTRL],
PORT_FORCE_FLOW_CTRL, false);
+ if (p->fiber)
+ fiber_ports |= (1 << i);
+ }
+ if (ksz_is_ksz8463(dev)) {
+ /* Setup fiber ports. */
+ if (fiber_ports) {
+ fiber_ports &= 3;
+ regmap_update_bits(ksz_regmap_16(dev),
+ KSZ8463_REG_CFG_CTRL,
+ fiber_ports << PORT_COPPER_MODE_S,
+ 0);
+ regmap_update_bits(ksz_regmap_16(dev),
+ KSZ8463_REG_DSP_CTRL_6,
+ COPPER_RECEIVE_ADJUSTMENT, 0);
+ }
+
+ /* Turn off PTP function as the switch's proprietary way of
+ * handling timestamp is not supported in current Linux PTP
+ * stack implementation.
+ */
+ regmap_update_bits(ksz_regmap_16(dev),
+ KSZ8463_PTP_MSG_CONF1,
+ PTP_ENABLE, 0);
+ regmap_update_bits(ksz_regmap_16(dev),
+ KSZ8463_PTP_CLK_CTRL,
+ PTP_CLK_ENABLE, 0);
}
}
@@ -1895,22 +1954,25 @@ int ksz8_setup(struct dsa_switch *ds)
ksz_cfg(dev, S_LINK_AGING_CTRL, SW_LINK_AUTO_AGING, true);
/* Enable aggressive back off algorithm in half duplex mode. */
- regmap_update_bits(ksz_regmap_8(dev), REG_SW_CTRL_1,
- SW_AGGR_BACKOFF, SW_AGGR_BACKOFF);
+ ret = ksz_rmw8(dev, REG_SW_CTRL_1, SW_AGGR_BACKOFF, SW_AGGR_BACKOFF);
+ if (ret)
+ return ret;
/*
* Make sure unicast VLAN boundary is set as default and
* enable no excessive collision drop.
*/
- regmap_update_bits(ksz_regmap_8(dev), REG_SW_CTRL_2,
- UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP,
- UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP);
+ ret = ksz_rmw8(dev, REG_SW_CTRL_2,
+ UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP,
+ UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP);
+ if (ret)
+ return ret;
ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_REPLACE_VID, false);
ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
- if (!ksz_is_ksz88x3(dev))
+ if (!ksz_is_ksz88x3(dev) && !ksz_is_ksz8463(dev))
ksz_cfg(dev, REG_SW_CTRL_19, SW_INS_TAG_ENABLE, true);
for (i = 0; i < (dev->info->num_vlans / 4); i++)
@@ -1956,6 +2018,84 @@ u32 ksz8_get_port_addr(int port, int offset)
return PORT_CTRL_ADDR(port, offset);
}
+u32 ksz8463_get_port_addr(int port, int offset)
+{
+ return offset + 0x18 * port;
+}
+
+static u16 ksz8463_get_phy_addr(u16 phy, u16 reg, u16 offset)
+{
+ return offset + reg * 2 + phy * (P2MBCR - P1MBCR);
+}
+
+int ksz8463_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
+{
+ u16 sw_reg = 0;
+ u16 data = 0;
+ int ret;
+
+ if (phy > 1)
+ return -ENOSPC;
+ switch (reg) {
+ case MII_PHYSID1:
+ sw_reg = ksz8463_get_phy_addr(phy, 0, PHY1IHR);
+ break;
+ case MII_PHYSID2:
+ sw_reg = ksz8463_get_phy_addr(phy, 0, PHY1ILR);
+ break;
+ case MII_BMCR:
+ case MII_BMSR:
+ case MII_ADVERTISE:
+ case MII_LPA:
+ sw_reg = ksz8463_get_phy_addr(phy, reg, P1MBCR);
+ break;
+ case MII_TPISTATUS:
+ /* This register holds the PHY interrupt status for simulated
+ * Micrel KSZ PHY.
+ */
+ data = 0x0505;
+ break;
+ default:
+ break;
+ }
+ if (sw_reg) {
+ ret = ksz_read16(dev, sw_reg, &data);
+ if (ret)
+ return ret;
+ }
+ *val = data;
+
+ return 0;
+}
+
+int ksz8463_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
+{
+ u16 sw_reg = 0;
+ int ret;
+
+ if (phy > 1)
+ return -ENOSPC;
+
+ /* No write to fiber port. */
+ if (dev->ports[phy].fiber)
+ return 0;
+ switch (reg) {
+ case MII_BMCR:
+ case MII_ADVERTISE:
+ sw_reg = ksz8463_get_phy_addr(phy, reg, P1MBCR);
+ break;
+ default:
+ break;
+ }
+ if (sw_reg) {
+ ret = ksz_write16(dev, sw_reg, val);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
int ksz8_switch_init(struct ksz_device *dev)
{
dev->cpu_port = fls(dev->info->cpu_ports) - 1;
diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h
index e1c79ff97123..0f2cd1474b44 100644
--- a/drivers/net/dsa/microchip/ksz8.h
+++ b/drivers/net/dsa/microchip/ksz8.h
@@ -63,4 +63,8 @@ void ksz8_phylink_mac_link_up(struct phylink_config *config,
bool tx_pause, bool rx_pause);
int ksz8_all_queues_split(struct ksz_device *dev, int queues);
+u32 ksz8463_get_port_addr(int port, int offset);
+int ksz8463_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
+int ksz8463_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
+
#endif
diff --git a/drivers/net/dsa/microchip/ksz8_reg.h b/drivers/net/dsa/microchip/ksz8_reg.h
index 329688603a58..332408567b47 100644
--- a/drivers/net/dsa/microchip/ksz8_reg.h
+++ b/drivers/net/dsa/microchip/ksz8_reg.h
@@ -729,6 +729,55 @@
#define PHY_POWER_SAVING_ENABLE BIT(2)
#define PHY_REMOTE_LOOPBACK BIT(1)
+/* KSZ8463 specific registers. */
+#define P1MBCR 0x4C
+#define P1MBSR 0x4E
+#define PHY1ILR 0x50
+#define PHY1IHR 0x52
+#define P1ANAR 0x54
+#define P1ANLPR 0x56
+#define P2MBCR 0x58
+#define P2MBSR 0x5A
+#define PHY2ILR 0x5C
+#define PHY2IHR 0x5E
+#define P2ANAR 0x60
+#define P2ANLPR 0x62
+
+#define P1CR1 0x6C
+#define P1CR2 0x6E
+#define P1CR3 0x72
+#define P1CR4 0x7E
+#define P1SR 0x80
+
+#define KSZ8463_FLUSH_TABLE_CTRL 0xAD
+
+#define KSZ8463_FLUSH_DYN_MAC_TABLE BIT(2)
+#define KSZ8463_FLUSH_STA_MAC_TABLE BIT(1)
+
+#define KSZ8463_REG_SW_CTRL_9 0xAE
+
+#define KSZ8463_REG_CFG_CTRL 0xD8
+
+#define PORT_2_COPPER_MODE BIT(7)
+#define PORT_1_COPPER_MODE BIT(6)
+#define PORT_COPPER_MODE_S 6
+
+#define KSZ8463_REG_SW_RESET 0x126
+
+#define KSZ8463_GLOBAL_SOFTWARE_RESET BIT(0)
+
+#define KSZ8463_PTP_CLK_CTRL 0x600
+
+#define PTP_CLK_ENABLE BIT(1)
+
+#define KSZ8463_PTP_MSG_CONF1 0x620
+
+#define PTP_ENABLE BIT(6)
+
+#define KSZ8463_REG_DSP_CTRL_6 0x734
+
+#define COPPER_RECEIVE_ADJUSTMENT BIT(13)
+
/* Chip resource */
#define PRIO_QUEUES 4
@@ -784,7 +833,9 @@
#define KSZ8795_MIB_TOTAL_TX_1 0x105
#define KSZ8863_MIB_PACKET_DROPPED_TX_0 0x100
-#define KSZ8863_MIB_PACKET_DROPPED_RX_0 0x105
+#define KSZ8863_MIB_PACKET_DROPPED_RX_0 0x103
+
+#define KSZ8895_MIB_PACKET_DROPPED_RX_0 0x105
#define MIB_PACKET_DROPPED 0x0000FFFF
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 0ba658a72d8f..5facffbb9c9a 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -2,7 +2,7 @@
/*
* Microchip KSZ9477 switch driver main logic
*
- * Copyright (C) 2017-2019 Microchip Technology Inc.
+ * Copyright (C) 2017-2025 Microchip Technology Inc.
*/
#include <linux/kernel.h>
@@ -161,6 +161,190 @@ static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev)
10, 1000);
}
+static void port_sgmii_s(struct ksz_device *dev, uint port, u16 devid, u16 reg)
+{
+ u32 data;
+
+ data = (devid & MII_MMD_CTRL_DEVAD_MASK) << 16;
+ data |= reg;
+ ksz_pwrite32(dev, port, REG_PORT_SGMII_ADDR__4, data);
+}
+
+static void port_sgmii_r(struct ksz_device *dev, uint port, u16 devid, u16 reg,
+ u16 *buf)
+{
+ port_sgmii_s(dev, port, devid, reg);
+ ksz_pread16(dev, port, REG_PORT_SGMII_DATA__4 + 2, buf);
+}
+
+static void port_sgmii_w(struct ksz_device *dev, uint port, u16 devid, u16 reg,
+ u16 buf)
+{
+ port_sgmii_s(dev, port, devid, reg);
+ ksz_pwrite32(dev, port, REG_PORT_SGMII_DATA__4, buf);
+}
+
+static int ksz9477_pcs_read(struct mii_bus *bus, int phy, int mmd, int reg)
+{
+ struct ksz_device *dev = bus->priv;
+ int port = ksz_get_sgmii_port(dev);
+ u16 val;
+
+ port_sgmii_r(dev, port, mmd, reg, &val);
+
+ /* Simulate a value to activate special code in the XPCS driver if
+ * supported.
+ */
+ if (mmd == MDIO_MMD_PMAPMD) {
+ if (reg == MDIO_DEVID1)
+ val = 0x9477;
+ else if (reg == MDIO_DEVID2)
+ val = 0x22 << 10;
+ } else if (mmd == MDIO_MMD_VEND2) {
+ struct ksz_port *p = &dev->ports[port];
+
+ /* Need to update MII_BMCR register with the exact speed and
+ * duplex mode when running in SGMII mode and this register is
+ * used to detect connected speed in that mode.
+ */
+ if (reg == MMD_SR_MII_AUTO_NEG_STATUS) {
+ int duplex, speed;
+
+ if (val & SR_MII_STAT_LINK_UP) {
+ speed = (val >> SR_MII_STAT_S) & SR_MII_STAT_M;
+ if (speed == SR_MII_STAT_1000_MBPS)
+ speed = SPEED_1000;
+ else if (speed == SR_MII_STAT_100_MBPS)
+ speed = SPEED_100;
+ else
+ speed = SPEED_10;
+
+ if (val & SR_MII_STAT_FULL_DUPLEX)
+ duplex = DUPLEX_FULL;
+ else
+ duplex = DUPLEX_HALF;
+
+ if (!p->phydev.link ||
+ p->phydev.speed != speed ||
+ p->phydev.duplex != duplex) {
+ u16 ctrl;
+
+ p->phydev.link = 1;
+ p->phydev.speed = speed;
+ p->phydev.duplex = duplex;
+ port_sgmii_r(dev, port, mmd, MII_BMCR,
+ &ctrl);
+ ctrl &= BMCR_ANENABLE;
+ ctrl |= mii_bmcr_encode_fixed(speed,
+ duplex);
+ port_sgmii_w(dev, port, mmd, MII_BMCR,
+ ctrl);
+ }
+ } else {
+ p->phydev.link = 0;
+ }
+ } else if (reg == MII_BMSR) {
+ p->phydev.link = !!(val & BMSR_LSTATUS);
+ }
+ }
+
+ return val;
+}
+
+static int ksz9477_pcs_write(struct mii_bus *bus, int phy, int mmd, int reg,
+ u16 val)
+{
+ struct ksz_device *dev = bus->priv;
+ int port = ksz_get_sgmii_port(dev);
+
+ if (mmd == MDIO_MMD_VEND2) {
+ struct ksz_port *p = &dev->ports[port];
+
+ if (reg == MMD_SR_MII_AUTO_NEG_CTRL) {
+ u16 sgmii_mode = SR_MII_PCS_SGMII << SR_MII_PCS_MODE_S;
+
+ /* Need these bits for 1000BASE-X mode to work with
+ * AN on.
+ */
+ if (!(val & sgmii_mode))
+ val |= SR_MII_SGMII_LINK_UP |
+ SR_MII_TX_CFG_PHY_MASTER;
+
+ /* SGMII interrupt in the port cannot be masked, so
+ * make sure interrupt is not enabled as it is not
+ * handled.
+ */
+ val &= ~SR_MII_AUTO_NEG_COMPLETE_INTR;
+ } else if (reg == MII_BMCR) {
+ /* The MII_ADVERTISE register needs to write once
+ * before doing auto-negotiation for the correct
+ * config_word to be sent out after reset.
+ */
+ if ((val & BMCR_ANENABLE) && !p->sgmii_adv_write) {
+ u16 adv;
+
+ /* The SGMII port cannot disable flow control
+ * so it is better to just advertise symmetric
+ * pause.
+ */
+ port_sgmii_r(dev, port, mmd, MII_ADVERTISE,
+ &adv);
+ adv |= ADVERTISE_1000XPAUSE;
+ adv &= ~ADVERTISE_1000XPSE_ASYM;
+ port_sgmii_w(dev, port, mmd, MII_ADVERTISE,
+ adv);
+ p->sgmii_adv_write = 1;
+ } else if (val & BMCR_RESET) {
+ p->sgmii_adv_write = 0;
+ }
+ } else if (reg == MII_ADVERTISE) {
+ /* XPCS driver writes to this register so there is no
+ * need to update it for the errata.
+ */
+ p->sgmii_adv_write = 1;
+ }
+ }
+ port_sgmii_w(dev, port, mmd, reg, val);
+
+ return 0;
+}
+
+int ksz9477_pcs_create(struct ksz_device *dev)
+{
+ /* This chip has a SGMII port. */
+ if (ksz_has_sgmii_port(dev)) {
+ int port = ksz_get_sgmii_port(dev);
+ struct ksz_port *p = &dev->ports[port];
+ struct phylink_pcs *pcs;
+ struct mii_bus *bus;
+ int ret;
+
+ bus = devm_mdiobus_alloc(dev->dev);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "ksz_pcs_mdio_bus";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-pcs",
+ dev_name(dev->dev));
+ bus->read_c45 = &ksz9477_pcs_read;
+ bus->write_c45 = &ksz9477_pcs_write;
+ bus->parent = dev->dev;
+ bus->phy_mask = ~0;
+ bus->priv = dev;
+
+ ret = devm_mdiobus_register(dev->dev, bus);
+ if (ret)
+ return ret;
+
+ pcs = xpcs_create_pcs_mdiodev(bus, 0);
+ if (IS_ERR(pcs))
+ return PTR_ERR(pcs);
+ p->pcs = pcs;
+ }
+
+ return 0;
+}
+
int ksz9477_reset_switch(struct ksz_device *dev)
{
u8 data8;
@@ -978,31 +1162,64 @@ void ksz9477_get_caps(struct ksz_device *dev, int port,
if (dev->info->gbit_capable[port])
config->mac_capabilities |= MAC_1000FD;
+
+ if (ksz_is_sgmii_port(dev, port)) {
+ struct ksz_port *p = &dev->ports[port];
+
+ phy_interface_or(config->supported_interfaces,
+ config->supported_interfaces,
+ p->pcs->supported_interfaces);
+ }
}
int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
{
u32 secs = msecs / 1000;
- u8 value;
- u8 data;
+ u8 data, mult, value;
+ u32 max_val;
int ret;
- value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
+#define MAX_TIMER_VAL ((1 << 8) - 1)
- ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
- if (ret < 0)
- return ret;
+ /* The aging timer comprises a 3-bit multiplier and an 8-bit second
+ * value. Either of them cannot be zero. The maximum timer is then
+ * 7 * 255 = 1785 seconds.
+ */
+ if (!secs)
+ secs = 1;
- data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs);
+ /* Return error if too large. */
+ else if (secs > 7 * MAX_TIMER_VAL)
+ return -EINVAL;
ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value);
if (ret < 0)
return ret;
- value &= ~SW_AGE_CNT_M;
- value |= FIELD_PREP(SW_AGE_CNT_M, data);
+ /* Check whether there is need to update the multiplier. */
+ mult = FIELD_GET(SW_AGE_CNT_M, value);
+ max_val = MAX_TIMER_VAL;
+ if (mult > 0) {
+ /* Try to use the same multiplier already in the register as
+ * the hardware default uses multiplier 4 and 75 seconds for
+ * 300 seconds.
+ */
+ max_val = DIV_ROUND_UP(secs, mult);
+ if (max_val > MAX_TIMER_VAL || max_val * mult != secs)
+ max_val = MAX_TIMER_VAL;
+ }
+
+ data = DIV_ROUND_UP(secs, max_val);
+ if (mult != data) {
+ value &= ~SW_AGE_CNT_M;
+ value |= FIELD_PREP(SW_AGE_CNT_M, data);
+ ret = ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
+ if (ret < 0)
+ return ret;
+ }
- return ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
+ value = DIV_ROUND_UP(secs, data);
+ return ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
}
void ksz9477_port_queue_split(struct ksz_device *dev, int port)
@@ -1131,12 +1348,22 @@ void ksz9477_config_cpu_port(struct dsa_switch *ds)
if (i == dev->cpu_port)
continue;
ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
+
+ /* Power down the internal PHY if port is unused. */
+ if (dsa_is_unused_port(ds, i) && dev->info->internal_phy[i])
+ ksz_pwrite16(dev, i, 0x100, BMCR_PDOWN);
}
}
+#define RESV_MCAST_CNT 8
+
+static u8 reserved_mcast_map[RESV_MCAST_CNT] = { 0, 1, 3, 16, 32, 33, 2, 17 };
+
int ksz9477_enable_stp_addr(struct ksz_device *dev)
{
+ u8 i, ports, update;
const u32 *masks;
+ bool override;
u32 data;
int ret;
@@ -1145,23 +1372,87 @@ int ksz9477_enable_stp_addr(struct ksz_device *dev)
/* Enable Reserved multicast table */
ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_RESV_MCAST_ENABLE, true);
- /* Set the Override bit for forwarding BPDU packet to CPU */
- ret = ksz_write32(dev, REG_SW_ALU_VAL_B,
- ALU_V_OVERRIDE | BIT(dev->cpu_port));
- if (ret < 0)
- return ret;
+ /* The reserved multicast address table has 8 entries. Each entry has
+ * a default value of which port to forward. It is assumed the host
+ * port is the last port in most of the switches, but that is not the
+ * case for KSZ9477 or maybe KSZ9897. For LAN937X family the default
+ * port is port 5, the first RGMII port. It is okay for LAN9370, a
+ * 5-port switch, but may not be correct for the other 8-port
+ * versions. It is necessary to update the whole table to forward to
+ * the right ports.
+ * Furthermore PTP messages can use a reserved multicast address and
+ * the host will not receive them if this table is not correct.
+ */
+ for (i = 0; i < RESV_MCAST_CNT; i++) {
+ data = reserved_mcast_map[i] <<
+ dev->info->shifts[ALU_STAT_INDEX];
+ data |= ALU_STAT_START |
+ masks[ALU_STAT_DIRECT] |
+ masks[ALU_RESV_MCAST_ADDR] |
+ masks[ALU_STAT_READ];
+ ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+ if (ret < 0)
+ return ret;
- data = ALU_STAT_START | ALU_RESV_MCAST_ADDR | masks[ALU_STAT_WRITE];
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_sta_ready(dev);
+ if (ret < 0)
+ return ret;
- ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
- if (ret < 0)
- return ret;
+ ret = ksz_read32(dev, REG_SW_ALU_VAL_B, &data);
+ if (ret < 0)
+ return ret;
- /* wait to be finished */
- ret = ksz9477_wait_alu_sta_ready(dev);
- if (ret < 0) {
- dev_err(dev->dev, "Failed to update Reserved Multicast table\n");
- return ret;
+ override = false;
+ ports = data & dev->port_mask;
+ switch (i) {
+ case 0:
+ case 6:
+ /* Change the host port. */
+ update = BIT(dev->cpu_port);
+ override = true;
+ break;
+ case 2:
+ /* Change the host port. */
+ update = BIT(dev->cpu_port);
+ break;
+ case 4:
+ case 5:
+ case 7:
+ /* Skip the host port. */
+ update = dev->port_mask & ~BIT(dev->cpu_port);
+ break;
+ default:
+ update = ports;
+ break;
+ }
+ if (update != ports || override) {
+ data &= ~dev->port_mask;
+ data |= update;
+ /* Set Override bit to receive frame even when port is
+ * closed.
+ */
+ if (override)
+ data |= ALU_V_OVERRIDE;
+ ret = ksz_write32(dev, REG_SW_ALU_VAL_B, data);
+ if (ret < 0)
+ return ret;
+
+ data = reserved_mcast_map[i] <<
+ dev->info->shifts[ALU_STAT_INDEX];
+ data |= ALU_STAT_START |
+ masks[ALU_STAT_DIRECT] |
+ masks[ALU_RESV_MCAST_ADDR] |
+ masks[ALU_STAT_WRITE];
+ ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+ if (ret < 0)
+ return ret;
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_sta_ready(dev);
+ if (ret < 0)
+ return ret;
+ }
}
return 0;
diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h
index d2166b0d881e..0d1a6dfda23e 100644
--- a/drivers/net/dsa/microchip/ksz9477.h
+++ b/drivers/net/dsa/microchip/ksz9477.h
@@ -2,7 +2,7 @@
/*
* Microchip KSZ9477 series Header file
*
- * Copyright (C) 2017-2022 Microchip Technology Inc.
+ * Copyright (C) 2017-2025 Microchip Technology Inc.
*/
#ifndef __KSZ9477_H
@@ -97,4 +97,6 @@ void ksz9477_acl_match_process_l2(struct ksz_device *dev, int port,
u16 ethtype, u8 *src_mac, u8 *dst_mac,
unsigned long cookie, u32 prio);
+int ksz9477_pcs_create(struct ksz_device *dev);
+
#endif
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index 7d7560f23a73..a2beb27459f1 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -2,7 +2,7 @@
/*
* Microchip KSZ9477 series register access through I2C
*
- * Copyright (C) 2018-2019 Microchip Technology Inc.
+ * Copyright (C) 2018-2024 Microchip Technology Inc.
*/
#include <linux/i2c.h>
@@ -16,6 +16,8 @@ KSZ_REGMAP_TABLE(ksz9477, not_used, 16, 0, 0);
static int ksz9477_i2c_probe(struct i2c_client *i2c)
{
+ const struct ksz_chip_data *chip;
+ struct device *ddev = &i2c->dev;
struct regmap_config rc;
struct ksz_device *dev;
int i, ret;
@@ -24,6 +26,12 @@ static int ksz9477_i2c_probe(struct i2c_client *i2c)
if (!dev)
return -ENOMEM;
+ chip = device_get_match_data(ddev);
+ if (!chip)
+ return -EINVAL;
+
+ /* Save chip id to do special initialization when probing. */
+ dev->chip_id = chip->chip_id;
for (i = 0; i < __KSZ_NUM_REGMAPS; i++) {
rc = ksz9477_regmap_config[i];
rc.lock_arg = &dev->regmap_mutex;
@@ -111,14 +119,22 @@ static const struct of_device_id ksz9477_dt_ids[] = {
.compatible = "microchip,ksz9567",
.data = &ksz_switch_chips[KSZ9567]
},
+ {
+ .compatible = "microchip,lan9646",
+ .data = &ksz_switch_chips[LAN9646]
+ },
{},
};
MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
+static DEFINE_SIMPLE_DEV_PM_OPS(ksz_i2c_pm_ops,
+ ksz_switch_suspend, ksz_switch_resume);
+
static struct i2c_driver ksz9477_i2c_driver = {
.driver = {
.name = "ksz9477-switch",
.of_match_table = ksz9477_dt_ids,
+ .pm = &ksz_i2c_pm_ops,
},
.probe = ksz9477_i2c_probe,
.remove = ksz9477_i2c_remove,
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index 04235c22bf40..61ea11e3338e 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -2,7 +2,7 @@
/*
* Microchip KSZ9477 register definitions
*
- * Copyright (C) 2017-2018 Microchip Technology Inc.
+ * Copyright (C) 2017-2025 Microchip Technology Inc.
*/
#ifndef __KSZ9477_REGS_H
@@ -165,8 +165,6 @@
#define SW_VLAN_ENABLE BIT(7)
#define SW_DROP_INVALID_VID BIT(6)
#define SW_AGE_CNT_M GENMASK(5, 3)
-#define SW_AGE_CNT_S 3
-#define SW_AGE_PERIOD_10_8_M GENMASK(10, 8)
#define SW_RESV_MCAST_ENABLE BIT(2)
#define SW_HASH_OPTION_M 0x03
#define SW_HASH_OPTION_CRC 1
@@ -399,7 +397,6 @@
#define ALU_RESV_MCAST_INDEX_M (BIT(6) - 1)
#define ALU_STAT_START BIT(7)
-#define ALU_RESV_MCAST_ADDR BIT(1)
#define REG_SW_ALU_VAL_A 0x0420
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index f73833e24622..0c10351fe5eb 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -2,7 +2,7 @@
/*
* Microchip switch driver main logic
*
- * Copyright (C) 2017-2024 Microchip Technology Inc.
+ * Copyright (C) 2017-2025 Microchip Technology Inc.
*/
#include <linux/delay.h>
@@ -23,6 +23,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/micrel_phy.h>
+#include <linux/pinctrl/consumer.h>
#include <net/dsa.h>
#include <net/ieee8021q.h>
#include <net/pkt_cls.h>
@@ -265,16 +266,102 @@ static void ksz_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface);
+/**
+ * ksz_phylink_mac_disable_tx_lpi() - Callback to signal LPI support (Dummy)
+ * @config: phylink config structure
+ *
+ * This function is a dummy handler. See ksz_phylink_mac_enable_tx_lpi() for
+ * a detailed explanation of EEE/LPI handling in KSZ switches.
+ */
+static void ksz_phylink_mac_disable_tx_lpi(struct phylink_config *config)
+{
+}
+
+/**
+ * ksz_phylink_mac_enable_tx_lpi() - Callback to signal LPI support (Dummy)
+ * @config: phylink config structure
+ * @timer: timer value before entering LPI (unused)
+ * @tx_clock_stop: whether to stop the TX clock in LPI mode (unused)
+ *
+ * This function signals to phylink that the driver architecture supports
+ * LPI management, enabling phylink to control EEE advertisement during
+ * negotiation according to IEEE Std 802.3 (Clause 78).
+ *
+ * Hardware Management of EEE/LPI State:
+ * For KSZ switch ports with integrated PHYs (e.g., KSZ9893R ports 1-2),
+ * observation and testing suggest that the actual EEE / Low Power Idle (LPI)
+ * state transitions are managed autonomously by the hardware based on
+ * the auto-negotiation results. (Note: While the datasheet describes EEE
+ * operation based on negotiation, it doesn't explicitly detail the internal
+ * MAC/PHY interaction, so autonomous hardware management of the MAC state
+ * for LPI is inferred from observed behavior).
+ * This hardware control, consistent with the switch's ability to operate
+ * autonomously via strapping, means MAC-level software intervention is not
+ * required or exposed for managing the LPI state once EEE is negotiated.
+ * (Ref: KSZ9893R Data Sheet DS00002420D, primarily Section 4.7.5 explaining
+ * EEE, also Sections 4.1.7 on Auto-Negotiation and 3.2.1 on Configuration
+ * Straps).
+ *
+ * Additionally, ports configured as MAC interfaces (e.g., KSZ9893R port 3)
+ * lack documented MAC-level LPI control.
+ *
+ * Therefore, this callback performs no action and serves primarily to inform
+ * phylink of LPI awareness and to document the inferred hardware behavior.
+ *
+ * Returns: 0 (Always success)
+ */
+static int ksz_phylink_mac_enable_tx_lpi(struct phylink_config *config,
+ u32 timer, bool tx_clock_stop)
+{
+ return 0;
+}
+
static const struct phylink_mac_ops ksz88x3_phylink_mac_ops = {
.mac_config = ksz88x3_phylink_mac_config,
.mac_link_down = ksz_phylink_mac_link_down,
.mac_link_up = ksz8_phylink_mac_link_up,
+ .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
};
static const struct phylink_mac_ops ksz8_phylink_mac_ops = {
.mac_config = ksz_phylink_mac_config,
.mac_link_down = ksz_phylink_mac_link_down,
.mac_link_up = ksz8_phylink_mac_link_up,
+ .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
+};
+
+static const struct ksz_dev_ops ksz8463_dev_ops = {
+ .setup = ksz8_setup,
+ .get_port_addr = ksz8463_get_port_addr,
+ .cfg_port_member = ksz8_cfg_port_member,
+ .flush_dyn_mac_table = ksz8_flush_dyn_mac_table,
+ .port_setup = ksz8_port_setup,
+ .r_phy = ksz8463_r_phy,
+ .w_phy = ksz8463_w_phy,
+ .r_mib_cnt = ksz8_r_mib_cnt,
+ .r_mib_pkt = ksz8_r_mib_pkt,
+ .r_mib_stat64 = ksz88xx_r_mib_stats64,
+ .freeze_mib = ksz8_freeze_mib,
+ .port_init_cnt = ksz8_port_init_cnt,
+ .fdb_dump = ksz8_fdb_dump,
+ .fdb_add = ksz8_fdb_add,
+ .fdb_del = ksz8_fdb_del,
+ .mdb_add = ksz8_mdb_add,
+ .mdb_del = ksz8_mdb_del,
+ .vlan_filtering = ksz8_port_vlan_filtering,
+ .vlan_add = ksz8_port_vlan_add,
+ .vlan_del = ksz8_port_vlan_del,
+ .mirror_add = ksz8_port_mirror_add,
+ .mirror_del = ksz8_port_mirror_del,
+ .get_caps = ksz8_get_caps,
+ .config_cpu_port = ksz8_config_cpu_port,
+ .enable_stp_addr = ksz8_enable_stp_addr,
+ .reset = ksz8_reset_switch,
+ .init = ksz8_switch_init,
+ .exit = ksz8_switch_exit,
+ .change_mtu = ksz8_change_mtu,
};
static const struct ksz_dev_ops ksz88xx_dev_ops = {
@@ -354,10 +441,29 @@ static void ksz9477_phylink_mac_link_up(struct phylink_config *config,
int speed, int duplex, bool tx_pause,
bool rx_pause);
+static struct phylink_pcs *
+ksz_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ksz_device *dev = dp->ds->priv;
+ struct ksz_port *p = &dev->ports[dp->index];
+
+ if (ksz_is_sgmii_port(dev, dp->index) &&
+ (interface == PHY_INTERFACE_MODE_SGMII ||
+ interface == PHY_INTERFACE_MODE_1000BASEX))
+ return p->pcs;
+
+ return NULL;
+}
+
static const struct phylink_mac_ops ksz9477_phylink_mac_ops = {
.mac_config = ksz_phylink_mac_config,
.mac_link_down = ksz_phylink_mac_link_down,
.mac_link_up = ksz9477_phylink_mac_link_up,
+ .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
+ .mac_select_pcs = ksz_phylink_mac_select_pcs,
};
static const struct ksz_dev_ops ksz9477_dev_ops = {
@@ -395,12 +501,15 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
.reset = ksz9477_reset_switch,
.init = ksz9477_switch_init,
.exit = ksz9477_switch_exit,
+ .pcs_create = ksz9477_pcs_create,
};
static const struct phylink_mac_ops lan937x_phylink_mac_ops = {
.mac_config = ksz_phylink_mac_config,
.mac_link_down = ksz_phylink_mac_link_down,
.mac_link_up = ksz9477_phylink_mac_link_up,
+ .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
};
static const struct ksz_dev_ops lan937x_dev_ops = {
@@ -411,6 +520,8 @@ static const struct ksz_dev_ops lan937x_dev_ops = {
.flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
.port_setup = lan937x_port_setup,
.set_ageing_time = lan937x_set_ageing_time,
+ .mdio_bus_preinit = lan937x_mdio_bus_preinit,
+ .create_phy_addr_map = lan937x_create_phy_addr_map,
.r_phy = lan937x_r_phy,
.w_phy = lan937x_w_phy,
.r_mib_cnt = ksz9477_r_mib_cnt,
@@ -439,6 +550,60 @@ static const struct ksz_dev_ops lan937x_dev_ops = {
.exit = lan937x_switch_exit,
};
+static const u16 ksz8463_regs[] = {
+ [REG_SW_MAC_ADDR] = 0x10,
+ [REG_IND_CTRL_0] = 0x30,
+ [REG_IND_DATA_8] = 0x26,
+ [REG_IND_DATA_CHECK] = 0x26,
+ [REG_IND_DATA_HI] = 0x28,
+ [REG_IND_DATA_LO] = 0x2C,
+ [REG_IND_MIB_CHECK] = 0x2F,
+ [P_FORCE_CTRL] = 0x0C,
+ [P_LINK_STATUS] = 0x0E,
+ [P_LOCAL_CTRL] = 0x0C,
+ [P_NEG_RESTART_CTRL] = 0x0D,
+ [P_REMOTE_STATUS] = 0x0E,
+ [P_SPEED_STATUS] = 0x0F,
+ [S_TAIL_TAG_CTRL] = 0xAD,
+ [P_STP_CTRL] = 0x6F,
+ [S_START_CTRL] = 0x01,
+ [S_BROADCAST_CTRL] = 0x06,
+ [S_MULTICAST_CTRL] = 0x04,
+};
+
+static const u32 ksz8463_masks[] = {
+ [PORT_802_1P_REMAPPING] = BIT(3),
+ [SW_TAIL_TAG_ENABLE] = BIT(0),
+ [MIB_COUNTER_OVERFLOW] = BIT(7),
+ [MIB_COUNTER_VALID] = BIT(6),
+ [VLAN_TABLE_FID] = GENMASK(15, 12),
+ [VLAN_TABLE_MEMBERSHIP] = GENMASK(18, 16),
+ [VLAN_TABLE_VALID] = BIT(19),
+ [STATIC_MAC_TABLE_VALID] = BIT(19),
+ [STATIC_MAC_TABLE_USE_FID] = BIT(21),
+ [STATIC_MAC_TABLE_FID] = GENMASK(25, 22),
+ [STATIC_MAC_TABLE_OVERRIDE] = BIT(20),
+ [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(18, 16),
+ [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(1, 0),
+ [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(2),
+ [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 24),
+ [DYNAMIC_MAC_TABLE_FID] = GENMASK(19, 16),
+ [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(21, 20),
+ [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(23, 22),
+};
+
+static u8 ksz8463_shifts[] = {
+ [VLAN_TABLE_MEMBERSHIP_S] = 16,
+ [STATIC_MAC_FWD_PORTS] = 16,
+ [STATIC_MAC_FID] = 22,
+ [DYNAMIC_MAC_ENTRIES_H] = 8,
+ [DYNAMIC_MAC_ENTRIES] = 24,
+ [DYNAMIC_MAC_FID] = 16,
+ [DYNAMIC_MAC_TIMESTAMP] = 22,
+ [DYNAMIC_MAC_SRC_PORT] = 20,
+};
+
static const u16 ksz8795_regs[] = {
[REG_SW_MAC_ADDR] = 0x68,
[REG_IND_CTRL_0] = 0x6E,
@@ -643,6 +808,8 @@ static const u16 ksz9477_regs[] = {
static const u32 ksz9477_masks[] = {
[ALU_STAT_WRITE] = 0,
[ALU_STAT_READ] = 1,
+ [ALU_STAT_DIRECT] = 0,
+ [ALU_RESV_MCAST_ADDR] = BIT(1),
[P_MII_TX_FLOW_CTRL] = BIT(5),
[P_MII_RX_FLOW_CTRL] = BIT(3),
};
@@ -670,6 +837,8 @@ static const u8 ksz9477_xmii_ctrl1[] = {
static const u32 lan937x_masks[] = {
[ALU_STAT_WRITE] = 1,
[ALU_STAT_READ] = 2,
+ [ALU_STAT_DIRECT] = BIT(3),
+ [ALU_RESV_MCAST_ADDR] = BIT(2),
[P_MII_TX_FLOW_CTRL] = BIT(5),
[P_MII_RX_FLOW_CTRL] = BIT(3),
};
@@ -1033,8 +1202,7 @@ static const struct regmap_range ksz9477_valid_regs[] = {
regmap_reg_range(0x701b, 0x701b),
regmap_reg_range(0x701f, 0x7020),
regmap_reg_range(0x7030, 0x7030),
- regmap_reg_range(0x7200, 0x7203),
- regmap_reg_range(0x7206, 0x7207),
+ regmap_reg_range(0x7200, 0x7207),
regmap_reg_range(0x7300, 0x7301),
regmap_reg_range(0x7400, 0x7401),
regmap_reg_range(0x7403, 0x7403),
@@ -1098,10 +1266,9 @@ static const struct regmap_range ksz9896_valid_regs[] = {
regmap_reg_range(0x1030, 0x1030),
regmap_reg_range(0x1100, 0x1115),
regmap_reg_range(0x111a, 0x111f),
- regmap_reg_range(0x1122, 0x1127),
- regmap_reg_range(0x112a, 0x112b),
- regmap_reg_range(0x1136, 0x1139),
- regmap_reg_range(0x113e, 0x113f),
+ regmap_reg_range(0x1120, 0x112b),
+ regmap_reg_range(0x1134, 0x113b),
+ regmap_reg_range(0x113c, 0x113f),
regmap_reg_range(0x1400, 0x1401),
regmap_reg_range(0x1403, 0x1403),
regmap_reg_range(0x1410, 0x1417),
@@ -1128,10 +1295,9 @@ static const struct regmap_range ksz9896_valid_regs[] = {
regmap_reg_range(0x2030, 0x2030),
regmap_reg_range(0x2100, 0x2115),
regmap_reg_range(0x211a, 0x211f),
- regmap_reg_range(0x2122, 0x2127),
- regmap_reg_range(0x212a, 0x212b),
- regmap_reg_range(0x2136, 0x2139),
- regmap_reg_range(0x213e, 0x213f),
+ regmap_reg_range(0x2120, 0x212b),
+ regmap_reg_range(0x2134, 0x213b),
+ regmap_reg_range(0x213c, 0x213f),
regmap_reg_range(0x2400, 0x2401),
regmap_reg_range(0x2403, 0x2403),
regmap_reg_range(0x2410, 0x2417),
@@ -1158,10 +1324,9 @@ static const struct regmap_range ksz9896_valid_regs[] = {
regmap_reg_range(0x3030, 0x3030),
regmap_reg_range(0x3100, 0x3115),
regmap_reg_range(0x311a, 0x311f),
- regmap_reg_range(0x3122, 0x3127),
- regmap_reg_range(0x312a, 0x312b),
- regmap_reg_range(0x3136, 0x3139),
- regmap_reg_range(0x313e, 0x313f),
+ regmap_reg_range(0x3120, 0x312b),
+ regmap_reg_range(0x3134, 0x313b),
+ regmap_reg_range(0x313c, 0x313f),
regmap_reg_range(0x3400, 0x3401),
regmap_reg_range(0x3403, 0x3403),
regmap_reg_range(0x3410, 0x3417),
@@ -1188,10 +1353,9 @@ static const struct regmap_range ksz9896_valid_regs[] = {
regmap_reg_range(0x4030, 0x4030),
regmap_reg_range(0x4100, 0x4115),
regmap_reg_range(0x411a, 0x411f),
- regmap_reg_range(0x4122, 0x4127),
- regmap_reg_range(0x412a, 0x412b),
- regmap_reg_range(0x4136, 0x4139),
- regmap_reg_range(0x413e, 0x413f),
+ regmap_reg_range(0x4120, 0x412b),
+ regmap_reg_range(0x4134, 0x413b),
+ regmap_reg_range(0x413c, 0x413f),
regmap_reg_range(0x4400, 0x4401),
regmap_reg_range(0x4403, 0x4403),
regmap_reg_range(0x4410, 0x4417),
@@ -1218,10 +1382,9 @@ static const struct regmap_range ksz9896_valid_regs[] = {
regmap_reg_range(0x5030, 0x5030),
regmap_reg_range(0x5100, 0x5115),
regmap_reg_range(0x511a, 0x511f),
- regmap_reg_range(0x5122, 0x5127),
- regmap_reg_range(0x512a, 0x512b),
- regmap_reg_range(0x5136, 0x5139),
- regmap_reg_range(0x513e, 0x513f),
+ regmap_reg_range(0x5120, 0x512b),
+ regmap_reg_range(0x5134, 0x513b),
+ regmap_reg_range(0x513c, 0x513f),
regmap_reg_range(0x5400, 0x5401),
regmap_reg_range(0x5403, 0x5403),
regmap_reg_range(0x5410, 0x5417),
@@ -1248,10 +1411,9 @@ static const struct regmap_range ksz9896_valid_regs[] = {
regmap_reg_range(0x6030, 0x6030),
regmap_reg_range(0x6100, 0x6115),
regmap_reg_range(0x611a, 0x611f),
- regmap_reg_range(0x6122, 0x6127),
- regmap_reg_range(0x612a, 0x612b),
- regmap_reg_range(0x6136, 0x6139),
- regmap_reg_range(0x613e, 0x613f),
+ regmap_reg_range(0x6120, 0x612b),
+ regmap_reg_range(0x6134, 0x613b),
+ regmap_reg_range(0x613c, 0x613f),
regmap_reg_range(0x6300, 0x6301),
regmap_reg_range(0x6400, 0x6401),
regmap_reg_range(0x6403, 0x6403),
@@ -1290,6 +1452,7 @@ static const struct regmap_range ksz8873_valid_regs[] = {
regmap_reg_range(0x3f, 0x3f),
/* advanced control registers */
+ regmap_reg_range(0x43, 0x43),
regmap_reg_range(0x60, 0x6f),
regmap_reg_range(0x70, 0x75),
regmap_reg_range(0x76, 0x78),
@@ -1316,6 +1479,29 @@ static const struct regmap_access_table ksz8873_register_set = {
};
const struct ksz_chip_data ksz_switch_chips[] = {
+ [KSZ8463] = {
+ .chip_id = KSZ8463_CHIP_ID,
+ .dev_name = "KSZ8463",
+ .num_vlans = 16,
+ .num_alus = 0,
+ .num_statics = 8,
+ .cpu_ports = 0x4, /* can be configured as cpu port */
+ .port_cnt = 3,
+ .num_tx_queues = 4,
+ .num_ipms = 4,
+ .ops = &ksz8463_dev_ops,
+ .phylink_mac_ops = &ksz88x3_phylink_mac_ops,
+ .mib_names = ksz88xx_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8463_regs,
+ .masks = ksz8463_masks,
+ .shifts = ksz8463_shifts,
+ .supports_mii = {false, false, true},
+ .supports_rmii = {false, false, true},
+ .internal_phy = {true, true, false},
+ },
+
[KSZ8563] = {
.chip_id = KSZ8563_CHIP_ID,
.dev_name = "KSZ8563",
@@ -1343,6 +1529,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.supports_rgmii = {false, false, true},
.internal_phy = {true, true, false},
.gbit_capable = {false, false, true},
+ .ptp_capable = true,
.wr_table = &ksz8563_register_set,
.rd_table = &ksz8563_register_set,
},
@@ -1554,6 +1741,8 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.internal_phy = {true, true, true, true,
true, false, false},
.gbit_capable = {true, true, true, true, true, true, true},
+ .ptp_capable = true,
+ .sgmii_port = 7,
.wr_table = &ksz9477_register_set,
.rd_table = &ksz9477_register_set,
},
@@ -1681,6 +1870,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.supports_rgmii = {false, false, true},
.internal_phy = {true, true, false},
.gbit_capable = {true, true, true},
+ .ptp_capable = true,
},
[KSZ8567] = {
@@ -1716,6 +1906,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
true, false, false},
.gbit_capable = {false, false, false, false, false,
true, true},
+ .ptp_capable = true,
},
[KSZ9567] = {
@@ -1748,6 +1939,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.internal_phy = {true, true, true, true,
true, false, false},
.gbit_capable = {true, true, true, true, true, true, true},
+ .ptp_capable = true,
},
[LAN9370] = {
@@ -1762,6 +1954,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_tx_queues = 8,
.num_ipms = 8,
.tc_cbs_supported = true,
+ .phy_side_mdio_supported = true,
.ops = &lan937x_dev_ops,
.phylink_mac_ops = &lan937x_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
@@ -1776,6 +1969,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.supports_rmii = {false, false, false, false, true},
.supports_rgmii = {false, false, false, false, true},
.internal_phy = {true, true, true, true, false},
+ .ptp_capable = true,
},
[LAN9371] = {
@@ -1790,6 +1984,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_tx_queues = 8,
.num_ipms = 8,
.tc_cbs_supported = true,
+ .phy_side_mdio_supported = true,
.ops = &lan937x_dev_ops,
.phylink_mac_ops = &lan937x_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
@@ -1804,6 +1999,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.supports_rmii = {false, false, false, false, true, true},
.supports_rgmii = {false, false, false, false, true, true},
.internal_phy = {true, true, true, true, false, false},
+ .ptp_capable = true,
},
[LAN9372] = {
@@ -1818,6 +2014,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_tx_queues = 8,
.num_ipms = 8,
.tc_cbs_supported = true,
+ .phy_side_mdio_supported = true,
.ops = &lan937x_dev_ops,
.phylink_mac_ops = &lan937x_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
@@ -1836,6 +2033,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
true, true, false, false},
.internal_phy = {true, true, true, true,
false, false, true, true},
+ .ptp_capable = true,
},
[LAN9373] = {
@@ -1850,6 +2048,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_tx_queues = 8,
.num_ipms = 8,
.tc_cbs_supported = true,
+ .phy_side_mdio_supported = true,
.ops = &lan937x_dev_ops,
.phylink_mac_ops = &lan937x_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
@@ -1868,6 +2067,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
true, true, false, false},
.internal_phy = {true, true, true, false,
false, false, true, true},
+ .ptp_capable = true,
},
[LAN9374] = {
@@ -1882,6 +2082,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_tx_queues = 8,
.num_ipms = 8,
.tc_cbs_supported = true,
+ .phy_side_mdio_supported = true,
.ops = &lan937x_dev_ops,
.phylink_mac_ops = &lan937x_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
@@ -1900,6 +2101,43 @@ const struct ksz_chip_data ksz_switch_chips[] = {
true, true, false, false},
.internal_phy = {true, true, true, true,
false, false, true, true},
+ .ptp_capable = true,
+ },
+
+ [LAN9646] = {
+ .chip_id = LAN9646_CHIP_ID,
+ .dev_name = "LAN9646",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total physical port count */
+ .port_nirqs = 4,
+ .num_tx_queues = 4,
+ .num_ipms = 8,
+ .ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .phy_errata_9477 = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ false, true, true},
+ .supports_rmii = {false, false, false, false,
+ false, true, true},
+ .supports_rgmii = {false, false, false, false,
+ false, true, true},
+ .internal_phy = {true, true, true, true,
+ true, false, false},
+ .gbit_capable = {true, true, true, true, true, true, true},
+ .sgmii_port = 7,
+ .wr_table = &ksz9477_register_set,
+ .rd_table = &ksz9477_register_set,
},
};
EXPORT_SYMBOL_GPL(ksz_switch_chips);
@@ -1970,6 +2208,18 @@ static void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
if (dev->dev_ops->get_caps)
dev->dev_ops->get_caps(dev, port, config);
+
+ if (ds->ops->support_eee && ds->ops->support_eee(ds, port)) {
+ memcpy(config->lpi_interfaces, config->supported_interfaces,
+ sizeof(config->lpi_interfaces));
+
+ config->lpi_capabilities = MAC_100FD;
+ if (dev->info->gbit_capable[port])
+ config->lpi_capabilities |= MAC_1000FD;
+
+ /* EEE is fully operational */
+ config->eee_enabled_default = true;
+ }
}
void ksz_r_mib_stats64(struct ksz_device *dev, int port)
@@ -2021,7 +2271,7 @@ void ksz_r_mib_stats64(struct ksz_device *dev, int port)
spin_unlock(&mib->stats64_lock);
- if (dev->info->phy_errata_9477) {
+ if (dev->info->phy_errata_9477 && !ksz_is_sgmii_port(dev, port)) {
ret = ksz9477_errata_monitor(dev, port, raw->tx_late_col);
if (ret)
dev_err(dev->dev, "Failed to monitor transmission halt\n");
@@ -2212,6 +2462,12 @@ static void ksz_update_port_member(struct ksz_device *dev, int port)
dev->dev_ops->cfg_port_member(dev, i, val | cpu_port);
}
+ /* HSR ports are setup once so need to use the assigned membership
+ * when the port is enabled.
+ */
+ if (!port_member && p->stp_state == BR_STATE_FORWARDING &&
+ (dev->hsr_ports & BIT(port)))
+ port_member = dev->hsr_ports;
dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port);
}
@@ -2236,19 +2492,103 @@ static int ksz_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
return dev->dev_ops->w_phy(dev, addr, regnum, val);
}
+/**
+ * ksz_parent_mdio_read - Read data from a PHY register on the parent MDIO bus.
+ * @bus: MDIO bus structure.
+ * @addr: PHY address on the parent MDIO bus.
+ * @regnum: Register number to read.
+ *
+ * This function provides a direct read operation on the parent MDIO bus for
+ * accessing PHY registers. By bypassing SPI or I2C, it uses the parent MDIO bus
+ * to retrieve data from the PHY registers at the specified address and register
+ * number.
+ *
+ * Return: Value of the PHY register, or a negative error code on failure.
+ */
+static int ksz_parent_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct ksz_device *dev = bus->priv;
+
+ return mdiobus_read_nested(dev->parent_mdio_bus, addr, regnum);
+}
+
+/**
+ * ksz_parent_mdio_write - Write data to a PHY register on the parent MDIO bus.
+ * @bus: MDIO bus structure.
+ * @addr: PHY address on the parent MDIO bus.
+ * @regnum: Register number to write to.
+ * @val: Value to write to the PHY register.
+ *
+ * This function provides a direct write operation on the parent MDIO bus for
+ * accessing PHY registers. Bypassing SPI or I2C, it uses the parent MDIO bus
+ * to modify the PHY register values at the specified address.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ksz_parent_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct ksz_device *dev = bus->priv;
+
+ return mdiobus_write_nested(dev->parent_mdio_bus, addr, regnum, val);
+}
+
+/**
+ * ksz_phy_addr_to_port - Map a PHY address to the corresponding switch port.
+ * @dev: Pointer to device structure.
+ * @addr: PHY address to map to a port.
+ *
+ * This function finds the corresponding switch port for a given PHY address by
+ * iterating over all user ports on the device. It checks if a port's PHY
+ * address in `phy_addr_map` matches the specified address and if the port
+ * contains an internal PHY. If a match is found, the index of the port is
+ * returned.
+ *
+ * Return: Port index on success, or -EINVAL if no matching port is found.
+ */
+static int ksz_phy_addr_to_port(struct ksz_device *dev, int addr)
+{
+ struct dsa_switch *ds = dev->ds;
+ struct dsa_port *dp;
+
+ dsa_switch_for_each_user_port(dp, ds) {
+ if (dev->info->internal_phy[dp->index] &&
+ dev->phy_addr_map[dp->index] == addr)
+ return dp->index;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * ksz_irq_phy_setup - Configure IRQs for PHYs in the KSZ device.
+ * @dev: Pointer to the KSZ device structure.
+ *
+ * Sets up IRQs for each active PHY connected to the KSZ switch by mapping the
+ * appropriate IRQs for each PHY and assigning them to the `user_mii_bus` in
+ * the DSA switch structure. Each IRQ is mapped based on the port's IRQ domain.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
static int ksz_irq_phy_setup(struct ksz_device *dev)
{
struct dsa_switch *ds = dev->ds;
- int phy;
+ int phy, port;
int irq;
int ret;
- for (phy = 0; phy < KSZ_MAX_NUM_PORTS; phy++) {
+ for (phy = 0; phy < PHY_MAX_ADDR; phy++) {
if (BIT(phy) & ds->phys_mii_mask) {
- irq = irq_find_mapping(dev->ports[phy].pirq.domain,
+ port = ksz_phy_addr_to_port(dev, phy);
+ if (port < 0) {
+ ret = port;
+ goto out;
+ }
+
+ irq = irq_find_mapping(dev->ports[port].pirq.domain,
PORT_SRC_PHY_INT);
- if (irq < 0) {
- ret = irq;
+ if (!irq) {
+ ret = -EINVAL;
goto out;
}
ds->user_mii_bus->irq[phy] = irq;
@@ -2263,49 +2603,187 @@ out:
return ret;
}
+/**
+ * ksz_irq_phy_free - Release IRQ mappings for PHYs in the KSZ device.
+ * @dev: Pointer to the KSZ device structure.
+ *
+ * Releases any IRQ mappings previously assigned to active PHYs in the KSZ
+ * switch by disposing of each mapped IRQ in the `user_mii_bus` structure.
+ */
static void ksz_irq_phy_free(struct ksz_device *dev)
{
struct dsa_switch *ds = dev->ds;
int phy;
- for (phy = 0; phy < KSZ_MAX_NUM_PORTS; phy++)
+ for (phy = 0; phy < PHY_MAX_ADDR; phy++)
if (BIT(phy) & ds->phys_mii_mask)
irq_dispose_mapping(ds->user_mii_bus->irq[phy]);
}
+/**
+ * ksz_parse_dt_phy_config - Parse and validate PHY configuration from DT
+ * @dev: pointer to the KSZ device structure
+ * @bus: pointer to the MII bus structure
+ * @mdio_np: pointer to the MDIO node in the device tree
+ *
+ * This function parses and validates PHY configurations for each user port
+ * defined in the device tree for a KSZ switch device. It verifies that the
+ * `phy-handle` properties are correctly set and that the internal PHYs match
+ * expected addresses and parent nodes. Sets up the PHY mask in the MII bus if
+ * all validations pass. Logs error messages for any mismatches or missing data.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ksz_parse_dt_phy_config(struct ksz_device *dev, struct mii_bus *bus,
+ struct device_node *mdio_np)
+{
+ struct device_node *phy_node, *phy_parent_node;
+ bool phys_are_valid = true;
+ struct dsa_port *dp;
+ u32 phy_addr;
+ int ret;
+
+ dsa_switch_for_each_user_port(dp, dev->ds) {
+ if (!dev->info->internal_phy[dp->index])
+ continue;
+
+ phy_node = of_parse_phandle(dp->dn, "phy-handle", 0);
+ if (!phy_node) {
+ dev_err(dev->dev, "failed to parse phy-handle for port %d.\n",
+ dp->index);
+ phys_are_valid = false;
+ continue;
+ }
+
+ phy_parent_node = of_get_parent(phy_node);
+ if (!phy_parent_node) {
+ dev_err(dev->dev, "failed to get PHY-parent node for port %d\n",
+ dp->index);
+ phys_are_valid = false;
+ } else if (phy_parent_node != mdio_np) {
+ dev_err(dev->dev, "PHY-parent node mismatch for port %d, expected %pOF, got %pOF\n",
+ dp->index, mdio_np, phy_parent_node);
+ phys_are_valid = false;
+ } else {
+ ret = of_property_read_u32(phy_node, "reg", &phy_addr);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to read PHY address for port %d. Error %d\n",
+ dp->index, ret);
+ phys_are_valid = false;
+ } else if (phy_addr != dev->phy_addr_map[dp->index]) {
+ dev_err(dev->dev, "PHY address mismatch for port %d, expected 0x%x, got 0x%x\n",
+ dp->index, dev->phy_addr_map[dp->index],
+ phy_addr);
+ phys_are_valid = false;
+ } else {
+ bus->phy_mask |= BIT(phy_addr);
+ }
+ }
+
+ of_node_put(phy_node);
+ of_node_put(phy_parent_node);
+ }
+
+ if (!phys_are_valid)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * ksz_mdio_register - Register and configure the MDIO bus for the KSZ device.
+ * @dev: Pointer to the KSZ device structure.
+ *
+ * This function sets up and registers an MDIO bus for the KSZ switch device,
+ * allowing access to its internal PHYs. If the device supports side MDIO,
+ * the function will configure the external MDIO controller specified by the
+ * "mdio-parent-bus" device tree property to directly manage internal PHYs.
+ * Otherwise, SPI or I2C access is set up for PHY access.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
static int ksz_mdio_register(struct ksz_device *dev)
{
+ struct device_node *parent_bus_node;
+ struct mii_bus *parent_bus = NULL;
struct dsa_switch *ds = dev->ds;
struct device_node *mdio_np;
struct mii_bus *bus;
- int ret;
+ int ret, i;
mdio_np = of_get_child_by_name(dev->dev->of_node, "mdio");
if (!mdio_np)
return 0;
+ parent_bus_node = of_parse_phandle(mdio_np, "mdio-parent-bus", 0);
+ if (parent_bus_node && !dev->info->phy_side_mdio_supported) {
+ dev_err(dev->dev, "Side MDIO bus is not supported for this HW, ignoring 'mdio-parent-bus' property.\n");
+ ret = -EINVAL;
+
+ goto put_mdio_node;
+ } else if (parent_bus_node) {
+ parent_bus = of_mdio_find_bus(parent_bus_node);
+ if (!parent_bus) {
+ ret = -EPROBE_DEFER;
+
+ goto put_mdio_node;
+ }
+
+ dev->parent_mdio_bus = parent_bus;
+ }
+
bus = devm_mdiobus_alloc(ds->dev);
if (!bus) {
- of_node_put(mdio_np);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto put_mdio_node;
+ }
+
+ if (dev->dev_ops->mdio_bus_preinit) {
+ ret = dev->dev_ops->mdio_bus_preinit(dev, !!parent_bus);
+ if (ret)
+ goto put_mdio_node;
+ }
+
+ if (dev->dev_ops->create_phy_addr_map) {
+ ret = dev->dev_ops->create_phy_addr_map(dev, !!parent_bus);
+ if (ret)
+ goto put_mdio_node;
+ } else {
+ for (i = 0; i < dev->info->port_cnt; i++)
+ dev->phy_addr_map[i] = i;
}
bus->priv = dev;
- bus->read = ksz_sw_mdio_read;
- bus->write = ksz_sw_mdio_write;
- bus->name = "ksz user smi";
- snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index);
+ if (parent_bus) {
+ bus->read = ksz_parent_mdio_read;
+ bus->write = ksz_parent_mdio_write;
+ bus->name = "KSZ side MDIO";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "ksz-side-mdio-%d",
+ ds->index);
+ } else {
+ bus->read = ksz_sw_mdio_read;
+ bus->write = ksz_sw_mdio_write;
+ bus->name = "ksz user smi";
+ if (ds->dst->index != 0) {
+ snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d-%d", ds->dst->index, ds->index);
+ } else {
+ snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index);
+ }
+ }
+
+ ret = ksz_parse_dt_phy_config(dev, bus, mdio_np);
+ if (ret)
+ goto put_mdio_node;
+
+ ds->phys_mii_mask = bus->phy_mask;
bus->parent = ds->dev;
- bus->phy_mask = ~ds->phys_mii_mask;
ds->user_mii_bus = bus;
if (dev->irq > 0) {
ret = ksz_irq_phy_setup(dev);
- if (ret) {
- of_node_put(mdio_np);
- return ret;
- }
+ if (ret)
+ goto put_mdio_node;
}
ret = devm_of_mdiobus_register(ds->dev, bus, mdio_np);
@@ -2316,7 +2794,9 @@ static int ksz_mdio_register(struct ksz_device *dev)
ksz_irq_phy_free(dev);
}
+put_mdio_node:
of_node_put(mdio_np);
+ of_node_put(parent_bus_node);
return ret;
}
@@ -2427,8 +2907,8 @@ static int ksz_irq_common_setup(struct ksz_device *dev, struct ksz_irq *kirq)
kirq->dev = dev;
kirq->masked = ~0;
- kirq->domain = irq_domain_add_simple(dev->dev->of_node, kirq->nirqs, 0,
- &ksz_irq_domain_ops, kirq);
+ kirq->domain = irq_domain_create_simple(dev_fwnode(dev->dev), kirq->nirqs, 0,
+ &ksz_irq_domain_ops, kirq);
if (!kirq->domain)
return -ENOMEM;
@@ -2472,8 +2952,8 @@ static int ksz_pirq_setup(struct ksz_device *dev, u8 p)
snprintf(pirq->name, sizeof(pirq->name), "port_irq-%d", p);
pirq->irq_num = irq_find_mapping(dev->girq.domain, p);
- if (pirq->irq_num < 0)
- return pirq->irq_num;
+ if (!pirq->irq_num)
+ return -EINVAL;
return ksz_irq_common_setup(dev, pirq);
}
@@ -2483,6 +2963,7 @@ static int ksz_parse_drive_strength(struct ksz_device *dev);
static int ksz_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
+ u16 storm_mask, storm_rate;
struct dsa_port *dp;
struct ksz_port *p;
const u16 *regs;
@@ -2505,11 +2986,21 @@ static int ksz_setup(struct dsa_switch *ds)
if (ret)
return ret;
+ if (ksz_has_sgmii_port(dev) && dev->dev_ops->pcs_create) {
+ ret = dev->dev_ops->pcs_create(dev);
+ if (ret)
+ return ret;
+ }
+
/* set broadcast storm protection 10% rate */
+ storm_mask = BROADCAST_STORM_RATE;
+ storm_rate = (BROADCAST_STORM_VALUE * BROADCAST_STORM_PROT_RATE) / 100;
+ if (ksz_is_ksz8463(dev)) {
+ storm_mask = swab16(storm_mask);
+ storm_rate = swab16(storm_rate);
+ }
regmap_update_bits(ksz_regmap_16(dev), regs[S_BROADCAST_CTRL],
- BROADCAST_STORM_RATE,
- (BROADCAST_STORM_VALUE *
- BROADCAST_STORM_PROT_RATE) / 100);
+ storm_mask, storm_rate);
dev->dev_ops->config_cpu_port(ds);
@@ -2547,18 +3038,23 @@ static int ksz_setup(struct dsa_switch *ds)
dsa_switch_for_each_user_port(dp, dev->ds) {
ret = ksz_pirq_setup(dev, dp->index);
if (ret)
- goto out_girq;
+ goto port_release;
- ret = ksz_ptp_irq_setup(ds, dp->index);
- if (ret)
- goto out_pirq;
+ if (dev->info->ptp_capable) {
+ ret = ksz_ptp_irq_setup(ds, dp->index);
+ if (ret)
+ goto pirq_release;
+ }
}
}
- ret = ksz_ptp_clock_register(ds);
- if (ret) {
- dev_err(dev->dev, "Failed to register PTP clock: %d\n", ret);
- goto out_ptpirq;
+ if (dev->info->ptp_capable) {
+ ret = ksz_ptp_clock_register(ds);
+ if (ret) {
+ dev_err(dev->dev, "Failed to register PTP clock: %d\n",
+ ret);
+ goto port_release;
+ }
}
ret = ksz_mdio_register(dev);
@@ -2578,18 +3074,18 @@ static int ksz_setup(struct dsa_switch *ds)
return 0;
out_ptp_clock_unregister:
- ksz_ptp_clock_unregister(ds);
-out_ptpirq:
- if (dev->irq > 0)
- dsa_switch_for_each_user_port(dp, dev->ds)
- ksz_ptp_irq_free(ds, dp->index);
-out_pirq:
- if (dev->irq > 0)
- dsa_switch_for_each_user_port(dp, dev->ds)
+ if (dev->info->ptp_capable)
+ ksz_ptp_clock_unregister(ds);
+port_release:
+ if (dev->irq > 0) {
+ dsa_switch_for_each_user_port_continue_reverse(dp, dev->ds) {
+ if (dev->info->ptp_capable)
+ ksz_ptp_irq_free(ds, dp->index);
+pirq_release:
ksz_irq_free(&dev->ports[dp->index].pirq);
-out_girq:
- if (dev->irq > 0)
+ }
ksz_irq_free(&dev->girq);
+ }
return ret;
}
@@ -2599,11 +3095,13 @@ static void ksz_teardown(struct dsa_switch *ds)
struct ksz_device *dev = ds->priv;
struct dsa_port *dp;
- ksz_ptp_clock_unregister(ds);
+ if (dev->info->ptp_capable)
+ ksz_ptp_clock_unregister(ds);
if (dev->irq > 0) {
dsa_switch_for_each_user_port(dp, dev->ds) {
- ksz_ptp_irq_free(ds, dp->index);
+ if (dev->info->ptp_capable)
+ ksz_ptp_irq_free(ds, dp->index);
ksz_irq_free(&dev->ports[dp->index].pirq);
}
@@ -2730,30 +3228,6 @@ static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port)
if (!port)
return MICREL_KSZ8_P1_ERRATA;
break;
- case KSZ8567_CHIP_ID:
- /* KSZ8567R Errata DS80000752C Module 4 */
- case KSZ8765_CHIP_ID:
- case KSZ8794_CHIP_ID:
- case KSZ8795_CHIP_ID:
- /* KSZ879x/KSZ877x/KSZ876x Errata DS80000687C Module 2 */
- case KSZ9477_CHIP_ID:
- /* KSZ9477S Errata DS80000754A Module 4 */
- case KSZ9567_CHIP_ID:
- /* KSZ9567S Errata DS80000756A Module 4 */
- case KSZ9896_CHIP_ID:
- /* KSZ9896C Errata DS80000757A Module 3 */
- case KSZ9897_CHIP_ID:
- /* KSZ9897R Errata DS80000758C Module 4 */
- /* Energy Efficient Ethernet (EEE) feature select must be manually disabled
- * The EEE feature is enabled by default, but it is not fully
- * operational. It must be manually disabled through register
- * controls. If not disabled, the PHY ports can auto-negotiate
- * to enable EEE, and this feature can cause link drops when
- * linked to another device supporting EEE.
- *
- * The same item appears in the errata for all switches above.
- */
- return MICREL_NO_EEE;
}
return 0;
@@ -3003,6 +3477,7 @@ static void ksz_port_teardown(struct dsa_switch *ds, int port)
case KSZ9893_CHIP_ID:
case KSZ9896_CHIP_ID:
case KSZ9897_CHIP_ID:
+ case LAN9646_CHIP_ID:
if (dsa_is_user_port(ds, port))
ksz9477_port_acl_free(dev, port);
}
@@ -3050,6 +3525,7 @@ static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
proto = DSA_TAG_PROTO_KSZ8795;
if (dev->chip_id == KSZ88X3_CHIP_ID ||
+ dev->chip_id == KSZ8463_CHIP_ID ||
dev->chip_id == KSZ8563_CHIP_ID ||
dev->chip_id == KSZ9893_CHIP_ID ||
dev->chip_id == KSZ9563_CHIP_ID)
@@ -3059,7 +3535,8 @@ static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
dev->chip_id == KSZ9477_CHIP_ID ||
dev->chip_id == KSZ9896_CHIP_ID ||
dev->chip_id == KSZ9897_CHIP_ID ||
- dev->chip_id == KSZ9567_CHIP_ID)
+ dev->chip_id == KSZ9567_CHIP_ID ||
+ dev->chip_id == LAN9646_CHIP_ID)
proto = DSA_TAG_PROTO_KSZ9477;
if (is_lan937x(dev))
@@ -3161,6 +3638,7 @@ static int ksz_max_mtu(struct dsa_switch *ds, int port)
case KSZ8794_CHIP_ID:
case KSZ8765_CHIP_ID:
return KSZ8795_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
+ case KSZ8463_CHIP_ID:
case KSZ88X3_CHIP_ID:
case KSZ8864_CHIP_ID:
case KSZ8895_CHIP_ID:
@@ -3178,63 +3656,74 @@ static int ksz_max_mtu(struct dsa_switch *ds, int port)
case LAN9372_CHIP_ID:
case LAN9373_CHIP_ID:
case LAN9374_CHIP_ID:
+ case LAN9646_CHIP_ID:
return KSZ9477_MAX_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
}
return -EOPNOTSUPP;
}
-static int ksz_validate_eee(struct dsa_switch *ds, int port)
+/**
+ * ksz_support_eee - Determine Energy Efficient Ethernet (EEE) support for a
+ * port
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number to check
+ *
+ * This function also documents devices where EEE was initially advertised but
+ * later withdrawn due to reliability issues, as described in official errata
+ * documents. These devices are explicitly listed to record known limitations,
+ * even if there is no technical necessity for runtime checks.
+ *
+ * Returns: true if the internal PHY on the given port supports fully
+ * operational EEE, false otherwise.
+ */
+static bool ksz_support_eee(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
if (!dev->info->internal_phy[port])
- return -EOPNOTSUPP;
+ return false;
switch (dev->chip_id) {
case KSZ8563_CHIP_ID:
+ case KSZ9563_CHIP_ID:
+ case KSZ9893_CHIP_ID:
+ return true;
case KSZ8567_CHIP_ID:
+ /* KSZ8567R Errata DS80000752C Module 4 */
+ case KSZ8765_CHIP_ID:
+ case KSZ8794_CHIP_ID:
+ case KSZ8795_CHIP_ID:
+ /* KSZ879x/KSZ877x/KSZ876x Errata DS80000687C Module 2 */
case KSZ9477_CHIP_ID:
- case KSZ9563_CHIP_ID:
+ /* KSZ9477S Errata DS80000754A Module 4 */
case KSZ9567_CHIP_ID:
- case KSZ9893_CHIP_ID:
+ /* KSZ9567S Errata DS80000756A Module 4 */
case KSZ9896_CHIP_ID:
+ /* KSZ9896C Errata DS80000757A Module 3 */
case KSZ9897_CHIP_ID:
- return 0;
+ case LAN9646_CHIP_ID:
+ /* KSZ9897R Errata DS80000758C Module 4 */
+ /* Energy Efficient Ethernet (EEE) feature select must be
+ * manually disabled
+ * The EEE feature is enabled by default, but it is not fully
+ * operational. It must be manually disabled through register
+ * controls. If not disabled, the PHY ports can auto-negotiate
+ * to enable EEE, and this feature can cause link drops when
+ * linked to another device supporting EEE.
+ *
+ * The same item appears in the errata for all switches above.
+ */
+ break;
}
- return -EOPNOTSUPP;
-}
-
-static int ksz_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_keee *e)
-{
- int ret;
-
- ret = ksz_validate_eee(ds, port);
- if (ret)
- return ret;
-
- /* There is no documented control of Tx LPI configuration. */
- e->tx_lpi_enabled = true;
-
- /* There is no documented control of Tx LPI timer. According to tests
- * Tx LPI timer seems to be set by default to minimal value.
- */
- e->tx_lpi_timer = 0;
-
- return 0;
+ return false;
}
static int ksz_set_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_keee *e)
{
struct ksz_device *dev = ds->priv;
- int ret;
-
- ret = ksz_validate_eee(ds, port);
- if (ret)
- return ret;
if (!e->tx_lpi_enabled) {
dev_err(dev->dev, "Disabling EEE Tx LPI is not supported\n");
@@ -3355,6 +3844,10 @@ static void ksz_phylink_mac_config(struct phylink_config *config,
if (dev->info->internal_phy[port])
return;
+ /* No need to configure XMII control register when using SGMII. */
+ if (ksz_is_sgmii_port(dev, port))
+ return;
+
if (phylink_autoneg_inband(mode)) {
dev_err(dev->dev, "In-band AN not supported!\n");
return;
@@ -3500,6 +3993,9 @@ static int ksz_switch_detect(struct ksz_device *dev)
id2 = FIELD_GET(SW_CHIP_ID_M, id16);
switch (id1) {
+ case KSZ84_FAMILY_ID:
+ dev->chip_id = KSZ8463_CHIP_ID;
+ break;
case KSZ87_FAMILY_ID:
if (id2 == KSZ87_CHIP_ID_95) {
u8 val;
@@ -3552,7 +4048,10 @@ static int ksz_switch_detect(struct ksz_device *dev)
case LAN9372_CHIP_ID:
case LAN9373_CHIP_ID:
case LAN9374_CHIP_ID:
- dev->chip_id = id32;
+
+ /* LAN9646 does not have its own chip id. */
+ if (dev->chip_id != LAN9646_CHIP_ID)
+ dev->chip_id = id32;
break;
case KSZ9893_CHIP_ID:
ret = ksz_read8(dev, REG_CHIP_ID4,
@@ -3591,6 +4090,7 @@ static int ksz_cls_flower_add(struct dsa_switch *ds, int port,
case KSZ9893_CHIP_ID:
case KSZ9896_CHIP_ID:
case KSZ9897_CHIP_ID:
+ case LAN9646_CHIP_ID:
return ksz9477_cls_flower_add(ds, port, cls, ingress);
}
@@ -3611,6 +4111,7 @@ static int ksz_cls_flower_del(struct dsa_switch *ds, int port,
case KSZ9893_CHIP_ID:
case KSZ9896_CHIP_ID:
case KSZ9897_CHIP_ID:
+ case LAN9646_CHIP_ID:
return ksz9477_cls_flower_del(ds, port, cls, ingress);
}
@@ -3736,6 +4237,104 @@ static int ksz_ets_band_to_queue(struct tc_ets_qopt_offload_replace_params *p,
return p->bands - 1 - band;
}
+static u8 ksz8463_tc_ctrl(int port, int queue)
+{
+ u8 reg;
+
+ reg = 0xC8 + port * 4;
+ reg += ((3 - queue) / 2) * 2;
+ reg++;
+ reg -= (queue & 1);
+ return reg;
+}
+
+/**
+ * ksz88x3_tc_ets_add - Configure ETS (Enhanced Transmission Selection)
+ * for a port on KSZ88x3 switch
+ * @dev: Pointer to the KSZ switch device structure
+ * @port: Port number to configure
+ * @p: Pointer to offload replace parameters describing ETS bands and mapping
+ *
+ * The KSZ88x3 supports two scheduling modes: Strict Priority and
+ * Weighted Fair Queuing (WFQ). Both modes have fixed behavior:
+ * - No configurable queue-to-priority mapping
+ * - No weight adjustment in WFQ mode
+ *
+ * This function configures the switch to use strict priority mode by
+ * clearing the WFQ enable bit for all queues associated with ETS bands.
+ * If strict priority is not explicitly requested, the switch will default
+ * to WFQ mode.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+static int ksz88x3_tc_ets_add(struct ksz_device *dev, int port,
+ struct tc_ets_qopt_offload_replace_params *p)
+{
+ int ret, band;
+
+ /* Only strict priority mode is supported for now.
+ * WFQ is implicitly enabled when strict mode is disabled.
+ */
+ for (band = 0; band < p->bands; band++) {
+ int queue = ksz_ets_band_to_queue(p, band);
+ u8 reg;
+
+ /* Calculate TXQ Split Control register address for this
+ * port/queue
+ */
+ reg = KSZ8873_TXQ_SPLIT_CTRL_REG(port, queue);
+ if (ksz_is_ksz8463(dev))
+ reg = ksz8463_tc_ctrl(port, queue);
+
+ /* Clear WFQ enable bit to select strict priority scheduling */
+ ret = ksz_rmw8(dev, reg, KSZ8873_TXQ_WFQ_ENABLE, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ksz88x3_tc_ets_del - Reset ETS (Enhanced Transmission Selection) config
+ * for a port on KSZ88x3 switch
+ * @dev: Pointer to the KSZ switch device structure
+ * @port: Port number to reset
+ *
+ * The KSZ88x3 supports only fixed scheduling modes: Strict Priority or
+ * Weighted Fair Queuing (WFQ), with no reconfiguration of weights or
+ * queue mapping. This function resets the port’s scheduling mode to
+ * the default, which is WFQ, by enabling the WFQ bit for all queues.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+static int ksz88x3_tc_ets_del(struct ksz_device *dev, int port)
+{
+ int ret, queue;
+
+ /* Iterate over all transmit queues for this port */
+ for (queue = 0; queue < dev->info->num_tx_queues; queue++) {
+ u8 reg;
+
+ /* Calculate TXQ Split Control register address for this
+ * port/queue
+ */
+ reg = KSZ8873_TXQ_SPLIT_CTRL_REG(port, queue);
+ if (ksz_is_ksz8463(dev))
+ reg = ksz8463_tc_ctrl(port, queue);
+
+ /* Set WFQ enable bit to revert back to default scheduling
+ * mode
+ */
+ ret = ksz_rmw8(dev, reg, KSZ8873_TXQ_WFQ_ENABLE,
+ KSZ8873_TXQ_WFQ_ENABLE);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int ksz_queue_set_strict(struct ksz_device *dev, int port, int queue)
{
int ret;
@@ -3817,6 +4416,7 @@ static int ksz_tc_ets_del(struct ksz_device *dev, int port)
for (queue = 0; queue < dev->info->num_tx_queues; queue++) {
ret = ksz_queue_set_wrr(dev, port, queue,
KSZ9477_DEFAULT_WRR_WEIGHT);
+
if (ret)
return ret;
}
@@ -3869,7 +4469,7 @@ static int ksz_tc_setup_qdisc_ets(struct dsa_switch *ds, int port,
struct ksz_device *dev = ds->priv;
int ret;
- if (is_ksz8(dev))
+ if (is_ksz8(dev) && !(ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev)))
return -EOPNOTSUPP;
if (qopt->parent != TC_H_ROOT) {
@@ -3883,9 +4483,16 @@ static int ksz_tc_setup_qdisc_ets(struct dsa_switch *ds, int port,
if (ret)
return ret;
- return ksz_tc_ets_add(dev, port, &qopt->replace_params);
+ if (ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev))
+ return ksz88x3_tc_ets_add(dev, port,
+ &qopt->replace_params);
+ else
+ return ksz_tc_ets_add(dev, port, &qopt->replace_params);
case TC_ETS_DESTROY:
- return ksz_tc_ets_del(dev, port);
+ if (ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev))
+ return ksz88x3_tc_ets_del(dev, port);
+ else
+ return ksz_tc_ets_del(dev, port);
case TC_ETS_STATS:
case TC_ETS_GRAFT:
return -EOPNOTSUPP;
@@ -4225,7 +4832,16 @@ int ksz_switch_macaddr_get(struct dsa_switch *ds, int port,
/* Program the switch MAC address to hardware */
for (i = 0; i < ETH_ALEN; i++) {
- ret = ksz_write8(dev, regs[REG_SW_MAC_ADDR] + i, addr[i]);
+ if (ksz_is_ksz8463(dev)) {
+ u16 addr16 = ((u16)addr[i] << 8) | addr[i + 1];
+
+ ret = ksz_write16(dev, regs[REG_SW_MAC_ADDR] + i,
+ addr16);
+ i++;
+ } else {
+ ret = ksz_write8(dev, regs[REG_SW_MAC_ADDR] + i,
+ addr[i]);
+ }
if (ret)
goto macaddr_drop;
}
@@ -4327,6 +4943,23 @@ static int ksz_hsr_leave(struct dsa_switch *ds, int port,
return 0;
}
+static int ksz_suspend(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+
+ cancel_delayed_work_sync(&dev->mib_read);
+ return 0;
+}
+
+static int ksz_resume(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (dev->mib_read_interval)
+ schedule_delayed_work(&dev->mib_read, dev->mib_read_interval);
+ return 0;
+}
+
static const struct dsa_switch_ops ksz_switch_ops = {
.get_tag_protocol = ksz_get_tag_protocol,
.connect_tag_protocol = ksz_connect_tag_protocol,
@@ -4367,6 +5000,8 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.port_max_mtu = ksz_max_mtu,
.get_wol = ksz_get_wol,
.set_wol = ksz_set_wol,
+ .suspend = ksz_suspend,
+ .resume = ksz_resume,
.get_ts_info = ksz_get_ts_info,
.port_hwtstamp_get = ksz_hwtstamp_get,
.port_hwtstamp_set = ksz_hwtstamp_set,
@@ -4375,7 +5010,7 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.cls_flower_add = ksz_cls_flower_add,
.cls_flower_del = ksz_cls_flower_del,
.port_setup_tc = ksz_setup_tc,
- .get_mac_eee = ksz_get_mac_eee,
+ .support_eee = ksz_support_eee,
.set_mac_eee = ksz_set_mac_eee,
.port_get_default_prio = ksz_port_get_default_prio,
.port_set_default_prio = ksz_port_set_default_prio,
@@ -4698,6 +5333,7 @@ static int ksz_parse_drive_strength(struct ksz_device *dev)
case KSZ9893_CHIP_ID:
case KSZ9896_CHIP_ID:
case KSZ9897_CHIP_ID:
+ case LAN9646_CHIP_ID:
return ksz9477_drive_strength_write(dev, of_props,
ARRAY_SIZE(of_props));
default:
@@ -4713,6 +5349,38 @@ static int ksz_parse_drive_strength(struct ksz_device *dev)
return 0;
}
+static int ksz8463_configure_straps_spi(struct ksz_device *dev)
+{
+ struct pinctrl *pinctrl;
+ struct gpio_desc *rxd0;
+ struct gpio_desc *rxd1;
+
+ rxd0 = devm_gpiod_get_index_optional(dev->dev, "straps-rxd", 0, GPIOD_OUT_LOW);
+ if (IS_ERR(rxd0))
+ return PTR_ERR(rxd0);
+
+ rxd1 = devm_gpiod_get_index_optional(dev->dev, "straps-rxd", 1, GPIOD_OUT_HIGH);
+ if (IS_ERR(rxd1))
+ return PTR_ERR(rxd1);
+
+ if (!rxd0 && !rxd1)
+ return 0;
+
+ if ((rxd0 && !rxd1) || (rxd1 && !rxd0))
+ return -EINVAL;
+
+ pinctrl = devm_pinctrl_get_select(dev->dev, "reset");
+ if (IS_ERR(pinctrl))
+ return PTR_ERR(pinctrl);
+
+ return 0;
+}
+
+static int ksz8463_release_straps_spi(struct ksz_device *dev)
+{
+ return pinctrl_select_default_state(dev->dev);
+}
+
int ksz_switch_register(struct ksz_device *dev)
{
const struct ksz_chip_data *info;
@@ -4728,10 +5396,22 @@ int ksz_switch_register(struct ksz_device *dev)
return PTR_ERR(dev->reset_gpio);
if (dev->reset_gpio) {
+ if (of_device_is_compatible(dev->dev->of_node, "microchip,ksz8463")) {
+ ret = ksz8463_configure_straps_spi(dev);
+ if (ret)
+ return ret;
+ }
+
gpiod_set_value_cansleep(dev->reset_gpio, 1);
usleep_range(10000, 12000);
gpiod_set_value_cansleep(dev->reset_gpio, 0);
msleep(100);
+
+ if (of_device_is_compatible(dev->dev->of_node, "microchip,ksz8463")) {
+ ret = ksz8463_release_straps_spi(dev);
+ if (ret)
+ return ret;
+ }
}
mutex_init(&dev->dev_mutex);
@@ -4814,6 +5494,9 @@ int ksz_switch_register(struct ksz_device *dev)
&dev->ports[port_num].interface);
ksz_parse_rgmii_delay(dev, port_num, port);
+ dev->ports[port_num].fiber =
+ of_property_read_bool(port,
+ "micrel,fiber-mode");
}
of_node_put(ports);
}
@@ -4865,6 +5548,24 @@ void ksz_switch_remove(struct ksz_device *dev)
}
EXPORT_SYMBOL(ksz_switch_remove);
+#ifdef CONFIG_PM_SLEEP
+int ksz_switch_suspend(struct device *dev)
+{
+ struct ksz_device *priv = dev_get_drvdata(dev);
+
+ return dsa_switch_suspend(priv->ds);
+}
+EXPORT_SYMBOL(ksz_switch_suspend);
+
+int ksz_switch_resume(struct device *dev)
+{
+ struct ksz_device *priv = dev_get_drvdata(dev);
+
+ return dsa_switch_resume(priv->ds);
+}
+EXPORT_SYMBOL(ksz_switch_resume);
+#endif
+
MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
MODULE_DESCRIPTION("Microchip KSZ Series Switch DSA Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index bec846e20682..c65188cd3c0a 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Microchip switch driver common header
*
- * Copyright (C) 2017-2024 Microchip Technology Inc.
+ * Copyright (C) 2017-2025 Microchip Technology Inc.
*/
#ifndef __KSZ_COMMON_H
@@ -10,6 +10,7 @@
#include <linux/etherdevice.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
+#include <linux/pcs/pcs-xpcs.h>
#include <linux/phy.h>
#include <linux/regmap.h>
#include <net/dsa.h>
@@ -65,6 +66,12 @@ struct ksz_chip_data {
u8 num_tx_queues;
u8 num_ipms; /* number of Internal Priority Maps */
bool tc_cbs_supported;
+
+ /**
+ * @phy_side_mdio_supported: Indicates if the chip supports an additional
+ * side MDIO channel for accessing integrated PHYs.
+ */
+ bool phy_side_mdio_supported;
const struct ksz_dev_ops *ops;
const struct phylink_mac_ops *phylink_mac_ops;
bool phy_errata_9477;
@@ -86,6 +93,8 @@ struct ksz_chip_data {
bool supports_rgmii[KSZ_MAX_NUM_PORTS];
bool internal_phy[KSZ_MAX_NUM_PORTS];
bool gbit_capable[KSZ_MAX_NUM_PORTS];
+ bool ptp_capable;
+ u8 sgmii_port;
const struct regmap_access_table *wr_table;
const struct regmap_access_table *rd_table;
};
@@ -125,6 +134,7 @@ struct ksz_port {
u32 force:1;
u32 read:1; /* read MIB counters in background */
u32 freeze:1; /* MIB counter freeze is enabled */
+ u32 sgmii_adv_write:1;
struct ksz_port_mib mib;
phy_interface_t interface;
@@ -134,8 +144,9 @@ struct ksz_port {
void *acl_priv;
struct ksz_irq pirq;
u8 num;
+ struct phylink_pcs *pcs;
#if IS_ENABLED(CONFIG_NET_DSA_MICROCHIP_KSZ_PTP)
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
bool hwts_tx_en;
bool hwts_rx_en;
struct ksz_irq ptpirq;
@@ -191,10 +202,27 @@ struct ksz_device {
struct ksz_switch_macaddr *switch_macaddr;
struct net_device *hsr_dev; /* HSR */
u8 hsr_ports;
+
+ /**
+ * @phy_addr_map: Array mapping switch ports to their corresponding PHY
+ * addresses.
+ */
+ u8 phy_addr_map[KSZ_MAX_NUM_PORTS];
+
+ /**
+ * @parent_mdio_bus: Pointer to the external MDIO bus controller.
+ *
+ * This points to an external MDIO bus controller that is used to access
+ * the PHYs integrated within the switch. Unlike an integrated MDIO
+ * bus, this external controller provides a direct path for managing
+ * the switch’s internal PHYs, bypassing the main SPI interface.
+ */
+ struct mii_bus *parent_mdio_bus;
};
/* List of supported models */
enum ksz_model {
+ KSZ8463,
KSZ8563,
KSZ8567,
KSZ8795,
@@ -214,6 +242,7 @@ enum ksz_model {
LAN9372,
LAN9373,
LAN9374,
+ LAN9646,
};
enum ksz_regs {
@@ -265,6 +294,8 @@ enum ksz_masks {
DYNAMIC_MAC_TABLE_TIMESTAMP,
ALU_STAT_WRITE,
ALU_STAT_READ,
+ ALU_STAT_DIRECT,
+ ALU_RESV_MCAST_ADDR,
P_MII_TX_FLOW_CTRL,
P_MII_RX_FLOW_CTRL,
};
@@ -326,6 +357,43 @@ struct ksz_dev_ops {
void (*port_cleanup)(struct ksz_device *dev, int port);
void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port);
int (*set_ageing_time)(struct ksz_device *dev, unsigned int msecs);
+
+ /**
+ * @mdio_bus_preinit: Function pointer to pre-initialize the MDIO bus
+ * for accessing PHYs.
+ * @dev: Pointer to device structure.
+ * @side_mdio: Boolean indicating if the PHYs are accessed over a side
+ * MDIO bus.
+ *
+ * This function pointer is used to configure the MDIO bus for PHY
+ * access before initiating regular PHY operations. It enables either
+ * SPI/I2C or side MDIO access modes by unlocking necessary registers
+ * and setting up access permissions for the selected mode.
+ *
+ * Return:
+ * - 0 on success.
+ * - Negative error code on failure.
+ */
+ int (*mdio_bus_preinit)(struct ksz_device *dev, bool side_mdio);
+
+ /**
+ * @create_phy_addr_map: Function pointer to create a port-to-PHY
+ * address map.
+ * @dev: Pointer to device structure.
+ * @side_mdio: Boolean indicating if the PHYs are accessed over a side
+ * MDIO bus.
+ *
+ * This function pointer is responsible for mapping switch ports to PHY
+ * addresses according to the configured access mode (SPI or side MDIO)
+ * and the device’s strap configuration. The mapping setup may vary
+ * depending on the chip variant and configuration. Ensures the correct
+ * address mapping for PHY communication.
+ *
+ * Return:
+ * - 0 on success.
+ * - Negative error code on failure (e.g., invalid configuration).
+ */
+ int (*create_phy_addr_map)(struct ksz_device *dev, bool side_mdio);
int (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
int (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
void (*r_mib_cnt)(struct ksz_device *dev, int port, u16 addr,
@@ -379,11 +447,15 @@ struct ksz_dev_ops {
int (*reset)(struct ksz_device *dev);
int (*init)(struct ksz_device *dev);
void (*exit)(struct ksz_device *dev);
+
+ int (*pcs_create)(struct ksz_device *dev);
};
struct ksz_device *ksz_switch_alloc(struct device *base, void *priv);
int ksz_switch_register(struct ksz_device *dev);
void ksz_switch_remove(struct ksz_device *dev);
+int ksz_switch_suspend(struct device *dev);
+int ksz_switch_resume(struct device *dev);
void ksz_init_mib_timer(struct ksz_device *dev);
bool ksz_is_port_mac_global_usable(struct dsa_switch *ds, int port);
@@ -415,6 +487,11 @@ static inline struct regmap *ksz_regmap_32(struct ksz_device *dev)
return dev->regmap[KSZ_REGMAP_32];
}
+static inline bool ksz_is_ksz8463(struct ksz_device *dev)
+{
+ return dev->chip_id == KSZ8463_CHIP_ID;
+}
+
static inline int ksz_read8(struct ksz_device *dev, u32 reg, u8 *val)
{
unsigned int value;
@@ -640,12 +717,13 @@ static inline bool ksz_is_8895_family(struct ksz_device *dev)
static inline bool is_ksz8(struct ksz_device *dev)
{
return ksz_is_ksz87xx(dev) || ksz_is_ksz88x3(dev) ||
- ksz_is_8895_family(dev);
+ ksz_is_8895_family(dev) || ksz_is_ksz8463(dev);
}
static inline bool is_ksz88xx(struct ksz_device *dev)
{
- return ksz_is_ksz88x3(dev) || ksz_is_8895_family(dev);
+ return ksz_is_ksz88x3(dev) || ksz_is_8895_family(dev) ||
+ ksz_is_ksz8463(dev);
}
static inline bool is_ksz9477(struct ksz_device *dev)
@@ -668,6 +746,21 @@ static inline bool is_lan937x_tx_phy(struct ksz_device *dev, int port)
dev->chip_id == LAN9372_CHIP_ID) && port == KSZ_PORT_4;
}
+static inline int ksz_get_sgmii_port(struct ksz_device *dev)
+{
+ return dev->info->sgmii_port - 1;
+}
+
+static inline bool ksz_has_sgmii_port(struct ksz_device *dev)
+{
+ return dev->info->sgmii_port > 0;
+}
+
+static inline bool ksz_is_sgmii_port(struct ksz_device *dev, int port)
+{
+ return dev->info->sgmii_port == port + 1;
+}
+
/* STP State Defines */
#define PORT_TX_ENABLE BIT(2)
#define PORT_RX_ENABLE BIT(1)
@@ -677,6 +770,7 @@ static inline bool is_lan937x_tx_phy(struct ksz_device *dev, int port)
#define REG_CHIP_ID0 0x00
#define SW_FAMILY_ID_M GENMASK(15, 8)
+#define KSZ84_FAMILY_ID 0x84
#define KSZ87_FAMILY_ID 0x87
#define KSZ88_FAMILY_ID 0x88
#define KSZ8895_FAMILY_ID 0x95
@@ -773,6 +867,25 @@ static inline bool is_lan937x_tx_phy(struct ksz_device *dev, int port)
#define SW_HI_SPEED_DRIVE_STRENGTH_S 4
#define SW_LO_SPEED_DRIVE_STRENGTH_S 0
+/* TXQ Split Control Register for per-port, per-queue configuration.
+ * Register 0xAF is TXQ Split for Q3 on Port 1.
+ * Register offset formula: 0xAF + (port * 4) + (3 - queue)
+ * where: port = 0..2, queue = 0..3
+ */
+#define KSZ8873_TXQ_SPLIT_CTRL_REG(port, queue) \
+ (0xAF + ((port) * 4) + (3 - (queue)))
+
+/* Bit 7 selects between:
+ * 0 = Strict priority mode (highest-priority queue first)
+ * 1 = Weighted Fair Queuing (WFQ) mode:
+ * Queue weights: Q3:Q2:Q1:Q0 = 8:4:2:1
+ * If any queues are empty, weight is redistributed.
+ *
+ * Note: This is referred to as "Weighted Fair Queuing" (WFQ) in KSZ8863/8873
+ * documentation, and as "Weighted Round Robin" (WRR) in KSZ9477 family docs.
+ */
+#define KSZ8873_TXQ_WFQ_ENABLE BIT(7)
+
#define KSZ9477_REG_PORT_OUT_RATE_0 0x0420
#define KSZ9477_OUT_RATE_NO_LIMIT 0
@@ -836,4 +949,29 @@ static inline bool is_lan937x_tx_phy(struct ksz_device *dev, int port)
[KSZ_REGMAP_32] = KSZ_REGMAP_ENTRY(32, swp, (regbits), (regpad), (regalign)), \
}
+#define KSZ8463_REGMAP_ENTRY(width, regbits, regpad, regalign) \
+ { \
+ .name = #width, \
+ .val_bits = (width), \
+ .reg_stride = (width / 8), \
+ .reg_bits = (regbits) + (regalign), \
+ .pad_bits = (regpad), \
+ .read = ksz8463_spi_read, \
+ .write = ksz8463_spi_write, \
+ .max_register = BIT(regbits) - 1, \
+ .cache_type = REGCACHE_NONE, \
+ .zero_flag_mask = 1, \
+ .use_single_read = 1, \
+ .use_single_write = 1, \
+ .lock = ksz_regmap_lock, \
+ .unlock = ksz_regmap_unlock, \
+ }
+
+#define KSZ8463_REGMAP_TABLE(ksz, regbits, regpad, regalign) \
+ static const struct regmap_config ksz##_regmap_config[] = { \
+ [KSZ_REGMAP_8] = KSZ8463_REGMAP_ENTRY(8, (regbits), (regpad), (regalign)), \
+ [KSZ_REGMAP_16] = KSZ8463_REGMAP_ENTRY(16, (regbits), (regpad), (regalign)), \
+ [KSZ_REGMAP_32] = KSZ8463_REGMAP_ENTRY(32, (regbits), (regpad), (regalign)), \
+ }
+
#endif
diff --git a/drivers/net/dsa/microchip/ksz_dcb.c b/drivers/net/dsa/microchip/ksz_dcb.c
index 30b4a6186e38..7131c5caac54 100644
--- a/drivers/net/dsa/microchip/ksz_dcb.c
+++ b/drivers/net/dsa/microchip/ksz_dcb.c
@@ -10,11 +10,18 @@
#include "ksz_dcb.h"
#include "ksz8.h"
-#define KSZ8_REG_PORT_1_CTRL_0 0x10
+/* Port X Control 0 register.
+ * The datasheet specifies: Port 1 - 0x10, Port 2 - 0x20, Port 3 - 0x30.
+ * However, the driver uses get_port_addr(), which maps Port 1 to offset 0.
+ * Therefore, we define the base offset as 0x00 here to align with that logic.
+ */
+#define KSZ8_REG_PORT_1_CTRL_0 0x00
+#define KSZ8463_REG_PORT_1_CTRL_0 0x6C
#define KSZ8_PORT_DIFFSERV_ENABLE BIT(6)
#define KSZ8_PORT_802_1P_ENABLE BIT(5)
#define KSZ8_PORT_BASED_PRIO_M GENMASK(4, 3)
+#define KSZ8463_REG_TOS_DSCP_CTRL 0x16
#define KSZ88X3_REG_TOS_DSCP_CTRL 0x60
#define KSZ8765_REG_TOS_DSCP_CTRL 0x90
@@ -93,6 +100,8 @@ static void ksz_get_default_port_prio_reg(struct ksz_device *dev, int *reg,
*reg = KSZ8_REG_PORT_1_CTRL_0;
*mask = KSZ8_PORT_BASED_PRIO_M;
*shift = __bf_shf(KSZ8_PORT_BASED_PRIO_M);
+ if (ksz_is_ksz8463(dev))
+ *reg = KSZ8463_REG_PORT_1_CTRL_0;
} else {
*reg = KSZ9477_REG_PORT_MRI_MAC_CTRL;
*mask = KSZ9477_PORT_BASED_PRIO_M;
@@ -117,10 +126,12 @@ static void ksz_get_dscp_prio_reg(struct ksz_device *dev, int *reg,
*reg = KSZ8765_REG_TOS_DSCP_CTRL;
*per_reg = 4;
*mask = GENMASK(1, 0);
- } else if (ksz_is_ksz88x3(dev)) {
+ } else if (ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev)) {
*reg = KSZ88X3_REG_TOS_DSCP_CTRL;
*per_reg = 4;
*mask = GENMASK(1, 0);
+ if (ksz_is_ksz8463(dev))
+ *reg = KSZ8463_REG_TOS_DSCP_CTRL;
} else {
*reg = KSZ9477_REG_DIFFSERV_PRIO_MAP;
*per_reg = 2;
@@ -146,6 +157,8 @@ static void ksz_get_apptrust_map_and_reg(struct ksz_device *dev,
*map = ksz8_apptrust_map_to_bit;
*reg = KSZ8_REG_PORT_1_CTRL_0;
*mask = KSZ8_PORT_DIFFSERV_ENABLE | KSZ8_PORT_802_1P_ENABLE;
+ if (ksz_is_ksz8463(dev))
+ *reg = KSZ8463_REG_PORT_1_CTRL_0;
} else {
*map = ksz9477_apptrust_map_to_bit;
*reg = KSZ9477_REG_PORT_MRI_PRIO_CTRL;
@@ -182,49 +195,6 @@ int ksz_port_get_default_prio(struct dsa_switch *ds, int port)
}
/**
- * ksz88x3_port_set_default_prio_quirks - Quirks for default priority
- * @dev: Pointer to the KSZ switch device structure
- * @port: Port number for which to set the default priority
- * @prio: Priority value to set
- *
- * This function implements quirks for setting the default priority on KSZ88x3
- * devices. On Port 2, no other priority providers are working
- * except of PCP. So, configuring default priority on Port 2 is not possible.
- * On Port 1, it is not possible to configure port priority if PCP
- * apptrust on Port 2 is disabled. Since we disable multiple queues on the
- * switch to disable PCP on Port 2, we need to ensure that the default priority
- * configuration on Port 1 is in agreement with the configuration on Port 2.
- *
- * Return: 0 on success, or a negative error code on failure
- */
-static int ksz88x3_port_set_default_prio_quirks(struct ksz_device *dev, int port,
- u8 prio)
-{
- if (!prio)
- return 0;
-
- if (port == KSZ_PORT_2) {
- dev_err(dev->dev, "Port priority configuration is not working on Port 2\n");
- return -EINVAL;
- } else if (port == KSZ_PORT_1) {
- u8 port2_data;
- int ret;
-
- ret = ksz_pread8(dev, KSZ_PORT_2, KSZ8_REG_PORT_1_CTRL_0,
- &port2_data);
- if (ret)
- return ret;
-
- if (!(port2_data & KSZ8_PORT_802_1P_ENABLE)) {
- dev_err(dev->dev, "Not possible to configure port priority on Port 1 if PCP apptrust on Port 2 is disabled\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-/**
* ksz_port_set_default_prio - Sets the default priority for a port on a KSZ
* switch
* @ds: Pointer to the DSA switch structure
@@ -239,18 +209,12 @@ static int ksz88x3_port_set_default_prio_quirks(struct ksz_device *dev, int port
int ksz_port_set_default_prio(struct dsa_switch *ds, int port, u8 prio)
{
struct ksz_device *dev = ds->priv;
- int reg, shift, ret;
+ int reg, shift;
u8 mask;
if (prio >= dev->info->num_ipms)
return -EINVAL;
- if (ksz_is_ksz88x3(dev)) {
- ret = ksz88x3_port_set_default_prio_quirks(dev, port, prio);
- if (ret)
- return ret;
- }
-
ksz_get_default_port_prio_reg(dev, &reg, &mask, &shift);
return ksz_prmw8(dev, port, reg, mask, (prio << shift) & mask);
@@ -519,155 +483,6 @@ err_sel_not_vaild:
}
/**
- * ksz88x3_port1_apptrust_quirk - Quirk for apptrust configuration on Port 1
- * of KSZ88x3 devices
- * @dev: Pointer to the KSZ switch device structure
- * @port: Port number for which to set the apptrust selectors
- * @reg: Register address for the apptrust configuration
- * @port1_data: Data to set for the apptrust configuration
- *
- * This function implements a quirk for apptrust configuration on Port 1 of
- * KSZ88x3 devices. It ensures that apptrust configuration on Port 1 is not
- * possible if PCP apptrust on Port 2 is disabled. This is because the Port 2
- * seems to be permanently hardwired to PCP classification, so we need to
- * do Port 1 configuration always in agreement with Port 2 configuration.
- *
- * Return: 0 on success, or a negative error code on failure
- */
-static int ksz88x3_port1_apptrust_quirk(struct ksz_device *dev, int port,
- int reg, u8 port1_data)
-{
- u8 port2_data;
- int ret;
-
- /* If no apptrust is requested for Port 1, no need to care about Port 2
- * configuration.
- */
- if (!(port1_data & (KSZ8_PORT_802_1P_ENABLE | KSZ8_PORT_DIFFSERV_ENABLE)))
- return 0;
-
- /* We got request to enable any apptrust on Port 1. To make it possible,
- * we need to enable multiple queues on the switch. If we enable
- * multiqueue support, PCP classification on Port 2 will be
- * automatically activated by HW.
- */
- ret = ksz_pread8(dev, KSZ_PORT_2, reg, &port2_data);
- if (ret)
- return ret;
-
- /* If KSZ8_PORT_802_1P_ENABLE bit is set on Port 2, the driver showed
- * the interest in PCP classification on Port 2. In this case,
- * multiqueue support is enabled and we can enable any apptrust on
- * Port 1.
- * If KSZ8_PORT_802_1P_ENABLE bit is not set on Port 2, the PCP
- * classification on Port 2 is still active, but the driver disabled
- * multiqueue support and made frame prioritization inactive for
- * all ports. In this case, we can't enable any apptrust on Port 1.
- */
- if (!(port2_data & KSZ8_PORT_802_1P_ENABLE)) {
- dev_err(dev->dev, "Not possible to enable any apptrust on Port 1 if PCP apptrust on Port 2 is disabled\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * ksz88x3_port2_apptrust_quirk - Quirk for apptrust configuration on Port 2
- * of KSZ88x3 devices
- * @dev: Pointer to the KSZ switch device structure
- * @port: Port number for which to set the apptrust selectors
- * @reg: Register address for the apptrust configuration
- * @port2_data: Data to set for the apptrust configuration
- *
- * This function implements a quirk for apptrust configuration on Port 2 of
- * KSZ88x3 devices. It ensures that DSCP apptrust is not working on Port 2 and
- * that it is not possible to disable PCP on Port 2. The only way to disable PCP
- * on Port 2 is to disable multiple queues on the switch.
- *
- * Return: 0 on success, or a negative error code on failure
- */
-static int ksz88x3_port2_apptrust_quirk(struct ksz_device *dev, int port,
- int reg, u8 port2_data)
-{
- struct dsa_switch *ds = dev->ds;
- u8 port1_data;
- int ret;
-
- /* First validate Port 2 configuration. DiffServ/DSCP is not working
- * on this port.
- */
- if (port2_data & KSZ8_PORT_DIFFSERV_ENABLE) {
- dev_err(dev->dev, "DSCP apptrust is not working on Port 2\n");
- return -EINVAL;
- }
-
- /* If PCP support is requested, we need to enable all queues on the
- * switch to make PCP priority working on Port 2.
- */
- if (port2_data & KSZ8_PORT_802_1P_ENABLE)
- return ksz8_all_queues_split(dev, dev->info->num_tx_queues);
-
- /* We got request to disable PCP priority on Port 2.
- * Now, we need to compare Port 2 configuration with Port 1
- * configuration.
- */
- ret = ksz_pread8(dev, KSZ_PORT_1, reg, &port1_data);
- if (ret)
- return ret;
-
- /* If Port 1 has any apptrust enabled, we can't disable multiple queues
- * on the switch, so we can't disable PCP on Port 2.
- */
- if (port1_data & (KSZ8_PORT_802_1P_ENABLE | KSZ8_PORT_DIFFSERV_ENABLE)) {
- dev_err(dev->dev, "Not possible to disable PCP on Port 2 if any apptrust is enabled on Port 1\n");
- return -EINVAL;
- }
-
- /* Now we need to ensure that default priority on Port 1 is set to 0
- * otherwise we can't disable multiqueue support on the switch.
- */
- ret = ksz_port_get_default_prio(ds, KSZ_PORT_1);
- if (ret < 0) {
- return ret;
- } else if (ret) {
- dev_err(dev->dev, "Not possible to disable PCP on Port 2 if non zero default priority is set on Port 1\n");
- return -EINVAL;
- }
-
- /* Port 1 has no apptrust or default priority set and we got request to
- * disable PCP on Port 2. We can disable multiqueue support to disable
- * PCP on Port 2.
- */
- return ksz8_all_queues_split(dev, 1);
-}
-
-/**
- * ksz88x3_port_apptrust_quirk - Quirk for apptrust configuration on KSZ88x3
- * devices
- * @dev: Pointer to the KSZ switch device structure
- * @port: Port number for which to set the apptrust selectors
- * @reg: Register address for the apptrust configuration
- * @data: Data to set for the apptrust configuration
- *
- * This function implements a quirk for apptrust configuration on KSZ88x3
- * devices. It ensures that apptrust configuration on Port 1 and
- * Port 2 is done in agreement with each other.
- *
- * Return: 0 on success, or a negative error code on failure
- */
-static int ksz88x3_port_apptrust_quirk(struct ksz_device *dev, int port,
- int reg, u8 data)
-{
- if (port == KSZ_PORT_1)
- return ksz88x3_port1_apptrust_quirk(dev, port, reg, data);
- else if (port == KSZ_PORT_2)
- return ksz88x3_port2_apptrust_quirk(dev, port, reg, data);
-
- return 0;
-}
-
-/**
* ksz_port_set_apptrust - Sets the apptrust selectors for a port on a KSZ
* switch
* @ds: Pointer to the DSA switch structure
@@ -707,12 +522,6 @@ int ksz_port_set_apptrust(struct dsa_switch *ds, int port,
}
}
- if (ksz_is_ksz88x3(dev)) {
- ret = ksz88x3_port_apptrust_quirk(dev, port, reg, data);
- if (ret)
- return ret;
- }
-
return ksz_prmw8(dev, port, reg, mask, data);
}
@@ -799,21 +608,5 @@ int ksz_dcb_init_port(struct ksz_device *dev, int port)
*/
int ksz_dcb_init(struct ksz_device *dev)
{
- int ret;
-
- ret = ksz_init_global_dscp_map(dev);
- if (ret)
- return ret;
-
- /* Enable 802.1p priority control on Port 2 during switch initialization.
- * This setup is critical for the apptrust functionality on Port 1, which
- * relies on the priority settings of Port 2. Note: Port 1 is naturally
- * configured before Port 2, necessitating this configuration order.
- */
- if (ksz_is_ksz88x3(dev))
- return ksz_prmw8(dev, KSZ_PORT_2, KSZ8_REG_PORT_1_CTRL_0,
- KSZ8_PORT_802_1P_ENABLE,
- KSZ8_PORT_802_1P_ENABLE);
-
- return 0;
+ return ksz_init_global_dscp_map(dev);
}
diff --git a/drivers/net/dsa/microchip/ksz_ptp.c b/drivers/net/dsa/microchip/ksz_ptp.c
index 22fb9ef4645c..997e4a76d0a6 100644
--- a/drivers/net/dsa/microchip/ksz_ptp.c
+++ b/drivers/net/dsa/microchip/ksz_ptp.c
@@ -319,22 +319,21 @@ int ksz_get_ts_info(struct dsa_switch *ds, int port, struct kernel_ethtool_ts_in
return 0;
}
-int ksz_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
+int ksz_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config)
{
struct ksz_device *dev = ds->priv;
- struct hwtstamp_config *config;
struct ksz_port *prt;
prt = &dev->ports[port];
- config = &prt->tstamp_config;
+ *config = prt->tstamp_config;
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
+ return 0;
}
static int ksz_set_hwtstamp_config(struct ksz_device *dev,
struct ksz_port *prt,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
int ret;
@@ -404,26 +403,21 @@ static int ksz_set_hwtstamp_config(struct ksz_device *dev,
return ksz_ptp_enable_mode(dev);
}
-int ksz_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
+int ksz_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct ksz_device *dev = ds->priv;
- struct hwtstamp_config config;
struct ksz_port *prt;
int ret;
prt = &dev->ports[port];
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- ret = ksz_set_hwtstamp_config(dev, prt, &config);
+ ret = ksz_set_hwtstamp_config(dev, prt, config);
if (ret)
return ret;
- memcpy(&prt->tstamp_config, &config, sizeof(config));
-
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
+ prt->tstamp_config = *config;
return 0;
}
@@ -1099,19 +1093,19 @@ static int ksz_ptp_msg_irq_setup(struct ksz_port *port, u8 n)
static const char * const name[] = {"pdresp-msg", "xdreq-msg",
"sync-msg"};
const struct ksz_dev_ops *ops = port->ksz_dev->dev_ops;
+ struct ksz_irq *ptpirq = &port->ptpirq;
struct ksz_ptp_irq *ptpmsg_irq;
ptpmsg_irq = &port->ptpmsg_irq[n];
+ ptpmsg_irq->num = irq_create_mapping(ptpirq->domain, n);
+ if (!ptpmsg_irq->num)
+ return -EINVAL;
ptpmsg_irq->port = port;
ptpmsg_irq->ts_reg = ops->get_port_addr(port->num, ts_reg[n]);
strscpy(ptpmsg_irq->name, name[n]);
- ptpmsg_irq->num = irq_find_mapping(port->ptpirq.domain, n);
- if (ptpmsg_irq->num < 0)
- return ptpmsg_irq->num;
-
return request_threaded_irq(ptpmsg_irq->num, NULL,
ksz_ptp_msg_thread_fn, IRQF_ONESHOT,
ptpmsg_irq->name, ptpmsg_irq);
@@ -1136,17 +1130,14 @@ int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
init_completion(&port->tstamp_msg_comp);
- ptpirq->domain = irq_domain_add_linear(dev->dev->of_node, ptpirq->nirqs,
- &ksz_ptp_irq_domain_ops, ptpirq);
+ ptpirq->domain = irq_domain_create_linear(dev_fwnode(dev->dev), ptpirq->nirqs,
+ &ksz_ptp_irq_domain_ops, ptpirq);
if (!ptpirq->domain)
return -ENOMEM;
- for (irq = 0; irq < ptpirq->nirqs; irq++)
- irq_create_mapping(ptpirq->domain, irq);
-
ptpirq->irq_num = irq_find_mapping(port->pirq.domain, PORT_SRC_PTP_INT);
- if (ptpirq->irq_num < 0) {
- ret = ptpirq->irq_num;
+ if (!ptpirq->irq_num) {
+ ret = -EINVAL;
goto out;
}
@@ -1165,12 +1156,11 @@ int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
out_ptp_msg:
free_irq(ptpirq->irq_num, ptpirq);
- while (irq--)
+ while (irq--) {
free_irq(port->ptpmsg_irq[irq].num, &port->ptpmsg_irq[irq]);
-out:
- for (irq = 0; irq < ptpirq->nirqs; irq++)
irq_dispose_mapping(port->ptpmsg_irq[irq].num);
-
+ }
+out:
irq_domain_remove(ptpirq->domain);
return ret;
diff --git a/drivers/net/dsa/microchip/ksz_ptp.h b/drivers/net/dsa/microchip/ksz_ptp.h
index 2f1783c0d723..3086e519b1b6 100644
--- a/drivers/net/dsa/microchip/ksz_ptp.h
+++ b/drivers/net/dsa/microchip/ksz_ptp.h
@@ -39,8 +39,11 @@ void ksz_ptp_clock_unregister(struct dsa_switch *ds);
int ksz_get_ts_info(struct dsa_switch *ds, int port,
struct kernel_ethtool_ts_info *ts);
-int ksz_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr);
-int ksz_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr);
+int ksz_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config);
+int ksz_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void ksz_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
void ksz_port_deferred_xmit(struct kthread_work *work);
bool ksz_port_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb,
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
index 1c6652f2b9fe..d8001734b057 100644
--- a/drivers/net/dsa/microchip/ksz_spi.c
+++ b/drivers/net/dsa/microchip/ksz_spi.c
@@ -16,6 +16,10 @@
#include "ksz_common.h"
+#define KSZ8463_SPI_ADDR_SHIFT 13
+#define KSZ8463_SPI_ADDR_ALIGN 3
+#define KSZ8463_SPI_TURNAROUND_SHIFT 2
+
#define KSZ8795_SPI_ADDR_SHIFT 12
#define KSZ8795_SPI_ADDR_ALIGN 3
#define KSZ8795_SPI_TURNAROUND_SHIFT 1
@@ -37,6 +41,99 @@ KSZ_REGMAP_TABLE(ksz8863, 16, KSZ8863_SPI_ADDR_SHIFT,
KSZ_REGMAP_TABLE(ksz9477, 32, KSZ9477_SPI_ADDR_SHIFT,
KSZ9477_SPI_TURNAROUND_SHIFT, KSZ9477_SPI_ADDR_ALIGN);
+static u16 ksz8463_reg(u16 reg, size_t size)
+{
+ switch (size) {
+ case 1:
+ reg = ((reg >> 2) << 4) | (1 << (reg & 3));
+ break;
+ case 2:
+ reg = ((reg >> 2) << 4) | (reg & 2 ? 0x0c : 0x03);
+ break;
+ default:
+ reg = ((reg >> 2) << 4) | 0xf;
+ break;
+ }
+ reg <<= KSZ8463_SPI_TURNAROUND_SHIFT;
+ return reg;
+}
+
+static int ksz8463_spi_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+ u8 bytes[2];
+ u16 cmd;
+ int rc;
+
+ if (reg_size > 2 || val_size > 4)
+ return -EINVAL;
+ memcpy(&cmd, reg, sizeof(u16));
+ cmd = ksz8463_reg(cmd, val_size);
+ /* SPI command uses big-endian format. */
+ put_unaligned_be16(cmd, bytes);
+ rc = spi_write_then_read(spi, bytes, reg_size, val, val_size);
+#if defined(__BIG_ENDIAN)
+ /* Register value uses little-endian format so need to convert when
+ * running in big-endian system.
+ */
+ if (!rc && val_size > 1) {
+ if (val_size == 2) {
+ u16 v = get_unaligned_le16(val);
+
+ memcpy(val, &v, sizeof(v));
+ } else if (val_size == 4) {
+ u32 v = get_unaligned_le32(val);
+
+ memcpy(val, &v, sizeof(v));
+ }
+ }
+#endif
+ return rc;
+}
+
+static int ksz8463_spi_write(void *context, const void *data, size_t count)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+ size_t val_size = count - 2;
+ u8 bytes[6];
+ u16 cmd;
+
+ if (count <= 2 || count > 6)
+ return -EINVAL;
+ memcpy(bytes, data, count);
+ memcpy(&cmd, data, sizeof(u16));
+ cmd = ksz8463_reg(cmd, val_size);
+ cmd |= (1 << (KSZ8463_SPI_ADDR_SHIFT + KSZ8463_SPI_TURNAROUND_SHIFT));
+ /* SPI command uses big-endian format. */
+ put_unaligned_be16(cmd, bytes);
+#if defined(__BIG_ENDIAN)
+ /* Register value uses little-endian format so need to convert when
+ * running in big-endian system.
+ */
+ if (val_size == 2) {
+ u8 *val = &bytes[2];
+ u16 v;
+
+ memcpy(&v, val, sizeof(v));
+ put_unaligned_le16(v, val);
+ } else if (val_size == 4) {
+ u8 *val = &bytes[2];
+ u32 v;
+
+ memcpy(&v, val, sizeof(v));
+ put_unaligned_le32(v, val);
+ }
+#endif
+ return spi_write(spi, bytes, count);
+}
+
+KSZ8463_REGMAP_TABLE(ksz8463, KSZ8463_SPI_ADDR_SHIFT, 0,
+ KSZ8463_SPI_ADDR_ALIGN);
+
static int ksz_spi_probe(struct spi_device *spi)
{
const struct regmap_config *regmap_config;
@@ -54,8 +151,12 @@ static int ksz_spi_probe(struct spi_device *spi)
if (!chip)
return -EINVAL;
+ /* Save chip id to do special initialization when probing. */
+ dev->chip_id = chip->chip_id;
if (chip->chip_id == KSZ88X3_CHIP_ID)
regmap_config = ksz8863_regmap_config;
+ else if (chip->chip_id == KSZ8463_CHIP_ID)
+ regmap_config = ksz8463_regmap_config;
else if (chip->chip_id == KSZ8795_CHIP_ID ||
chip->chip_id == KSZ8794_CHIP_ID ||
chip->chip_id == KSZ8765_CHIP_ID)
@@ -124,6 +225,10 @@ static void ksz_spi_shutdown(struct spi_device *spi)
static const struct of_device_id ksz_dt_ids[] = {
{
+ .compatible = "microchip,ksz8463",
+ .data = &ksz_switch_chips[KSZ8463]
+ },
+ {
.compatible = "microchip,ksz8765",
.data = &ksz_switch_chips[KSZ8765]
},
@@ -203,11 +308,16 @@ static const struct of_device_id ksz_dt_ids[] = {
.compatible = "microchip,lan9374",
.data = &ksz_switch_chips[LAN9374]
},
+ {
+ .compatible = "microchip,lan9646",
+ .data = &ksz_switch_chips[LAN9646]
+ },
{},
};
MODULE_DEVICE_TABLE(of, ksz_dt_ids);
static const struct spi_device_id ksz_spi_ids[] = {
+ { "ksz8463" },
{ "ksz8765" },
{ "ksz8794" },
{ "ksz8795" },
@@ -228,14 +338,19 @@ static const struct spi_device_id ksz_spi_ids[] = {
{ "lan9372" },
{ "lan9373" },
{ "lan9374" },
+ { "lan9646" },
{ },
};
MODULE_DEVICE_TABLE(spi, ksz_spi_ids);
+static DEFINE_SIMPLE_DEV_PM_OPS(ksz_spi_pm_ops,
+ ksz_switch_suspend, ksz_switch_resume);
+
static struct spi_driver ksz_spi_driver = {
.driver = {
.name = "ksz-switch",
.of_match_table = ksz_dt_ids,
+ .pm = &ksz_spi_pm_ops,
},
.id_table = ksz_spi_ids,
.probe = ksz_spi_probe,
diff --git a/drivers/net/dsa/microchip/lan937x.h b/drivers/net/dsa/microchip/lan937x.h
index 3388d91dbc44..df13ebbd356f 100644
--- a/drivers/net/dsa/microchip/lan937x.h
+++ b/drivers/net/dsa/microchip/lan937x.h
@@ -13,6 +13,8 @@ void lan937x_port_setup(struct ksz_device *dev, int port, bool cpu_port);
void lan937x_config_cpu_port(struct dsa_switch *ds);
int lan937x_switch_init(struct ksz_device *dev);
void lan937x_switch_exit(struct ksz_device *dev);
+int lan937x_mdio_bus_preinit(struct ksz_device *dev, bool side_mdio);
+int lan937x_create_phy_addr_map(struct ksz_device *dev, bool side_mdio);
int lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data);
int lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val);
int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu);
diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c
index 824d9309a3d3..5a1496fff445 100644
--- a/drivers/net/dsa/microchip/lan937x_main.c
+++ b/drivers/net/dsa/microchip/lan937x_main.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Microchip LAN937X switch driver main logic
- * Copyright (C) 2019-2022 Microchip Technology Inc.
+ * Copyright (C) 2019-2024 Microchip Technology Inc.
*/
#include <linux/kernel.h>
#include <linux/module.h>
@@ -18,6 +18,87 @@
#include "ksz9477.h"
#include "lan937x.h"
+/* marker for ports without built-in PHY */
+#define LAN937X_NO_PHY U8_MAX
+
+/*
+ * lan9370_phy_addr - Mapping of LAN9370 switch ports to PHY addresses.
+ *
+ * Each entry corresponds to a specific port on the LAN9370 switch,
+ * where ports 1-4 are connected to integrated 100BASE-T1 PHYs, and
+ * Port 5 is connected to an RGMII interface without a PHY. The values
+ * are based on the documentation (DS00003108E, section 3.3).
+ */
+static const u8 lan9370_phy_addr[] = {
+ [0] = 2, /* Port 1, T1 AFE0 */
+ [1] = 3, /* Port 2, T1 AFE1 */
+ [2] = 5, /* Port 3, T1 AFE3 */
+ [3] = 6, /* Port 4, T1 AFE4 */
+ [4] = LAN937X_NO_PHY, /* Port 5, RGMII 2 */
+};
+
+/*
+ * lan9371_phy_addr - Mapping of LAN9371 switch ports to PHY addresses.
+ *
+ * The values are based on the documentation (DS00003109E, section 3.3).
+ */
+static const u8 lan9371_phy_addr[] = {
+ [0] = 2, /* Port 1, T1 AFE0 */
+ [1] = 3, /* Port 2, T1 AFE1 */
+ [2] = 5, /* Port 3, T1 AFE3 */
+ [3] = 8, /* Port 4, TX PHY */
+ [4] = LAN937X_NO_PHY, /* Port 5, RGMII 2 */
+ [5] = LAN937X_NO_PHY, /* Port 6, RGMII 1 */
+};
+
+/*
+ * lan9372_phy_addr - Mapping of LAN9372 switch ports to PHY addresses.
+ *
+ * The values are based on the documentation (DS00003110F, section 3.3).
+ */
+static const u8 lan9372_phy_addr[] = {
+ [0] = 2, /* Port 1, T1 AFE0 */
+ [1] = 3, /* Port 2, T1 AFE1 */
+ [2] = 5, /* Port 3, T1 AFE3 */
+ [3] = 8, /* Port 4, TX PHY */
+ [4] = LAN937X_NO_PHY, /* Port 5, RGMII 2 */
+ [5] = LAN937X_NO_PHY, /* Port 6, RGMII 1 */
+ [6] = 6, /* Port 7, T1 AFE4 */
+ [7] = 4, /* Port 8, T1 AFE2 */
+};
+
+/*
+ * lan9373_phy_addr - Mapping of LAN9373 switch ports to PHY addresses.
+ *
+ * The values are based on the documentation (DS00003110F, section 3.3).
+ */
+static const u8 lan9373_phy_addr[] = {
+ [0] = 2, /* Port 1, T1 AFE0 */
+ [1] = 3, /* Port 2, T1 AFE1 */
+ [2] = 5, /* Port 3, T1 AFE3 */
+ [3] = LAN937X_NO_PHY, /* Port 4, SGMII */
+ [4] = LAN937X_NO_PHY, /* Port 5, RGMII 2 */
+ [5] = LAN937X_NO_PHY, /* Port 6, RGMII 1 */
+ [6] = 6, /* Port 7, T1 AFE4 */
+ [7] = 4, /* Port 8, T1 AFE2 */
+};
+
+/*
+ * lan9374_phy_addr - Mapping of LAN9374 switch ports to PHY addresses.
+ *
+ * The values are based on the documentation (DS00003110F, section 3.3).
+ */
+static const u8 lan9374_phy_addr[] = {
+ [0] = 2, /* Port 1, T1 AFE0 */
+ [1] = 3, /* Port 2, T1 AFE1 */
+ [2] = 5, /* Port 3, T1 AFE3 */
+ [3] = 7, /* Port 4, T1 AFE5 */
+ [4] = LAN937X_NO_PHY, /* Port 5, RGMII 2 */
+ [5] = LAN937X_NO_PHY, /* Port 6, RGMII 1 */
+ [6] = 6, /* Port 7, T1 AFE4 */
+ [7] = 4, /* Port 8, T1 AFE2 */
+};
+
static int lan937x_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
{
return regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0);
@@ -30,24 +111,144 @@ static int lan937x_port_cfg(struct ksz_device *dev, int port, int offset,
bits, set ? bits : 0);
}
-static int lan937x_enable_spi_indirect_access(struct ksz_device *dev)
+/**
+ * lan937x_create_phy_addr_map - Create port-to-PHY address map for MDIO bus.
+ * @dev: Pointer to device structure.
+ * @side_mdio: Boolean indicating if the PHYs are accessed over a side MDIO bus.
+ *
+ * This function sets up the PHY address mapping for the LAN937x switches,
+ * which support two access modes for internal PHYs:
+ * 1. **SPI Access**: A straightforward one-to-one port-to-PHY address
+ * mapping is applied.
+ * 2. **MDIO Access**: The PHY address mapping varies based on chip variant
+ * and strap configuration. An offset is calculated based on strap settings
+ * to ensure correct PHY addresses are assigned. The offset calculation logic
+ * is based on Microchip's Article Number 000015828, available at:
+ * https://microchip.my.site.com/s/article/LAN9374-Virtual-PHY-PHY-Address-Mapping
+ *
+ * The function first checks if side MDIO access is disabled, in which case a
+ * simple direct mapping (port number = PHY address) is applied. If side MDIO
+ * access is enabled, it reads the strap configuration to determine the correct
+ * offset for PHY addresses.
+ *
+ * The appropriate mapping table is selected based on the chip ID, and the
+ * `phy_addr_map` is populated with the correct addresses for each port. Any
+ * port with no PHY is assigned a `LAN937X_NO_PHY` marker.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+int lan937x_create_phy_addr_map(struct ksz_device *dev, bool side_mdio)
+{
+ static const u8 *phy_addr_map;
+ u32 strap_val;
+ u8 offset = 0;
+ size_t size;
+ int ret, i;
+
+ if (!side_mdio) {
+ /* simple direct mapping */
+ for (i = 0; i < dev->info->port_cnt; i++)
+ dev->phy_addr_map[i] = i;
+
+ return 0;
+ }
+
+ ret = ksz_read32(dev, REG_SW_CFG_STRAP_VAL, &strap_val);
+ if (ret < 0)
+ return ret;
+
+ if (!(strap_val & SW_CASCADE_ID_CFG) && !(strap_val & SW_VPHY_ADD_CFG))
+ offset = 0;
+ else if (!(strap_val & SW_CASCADE_ID_CFG) && (strap_val & SW_VPHY_ADD_CFG))
+ offset = 7;
+ else if ((strap_val & SW_CASCADE_ID_CFG) && !(strap_val & SW_VPHY_ADD_CFG))
+ offset = 15;
+ else
+ offset = 22;
+
+ switch (dev->info->chip_id) {
+ case LAN9370_CHIP_ID:
+ phy_addr_map = lan9370_phy_addr;
+ size = ARRAY_SIZE(lan9370_phy_addr);
+ break;
+ case LAN9371_CHIP_ID:
+ phy_addr_map = lan9371_phy_addr;
+ size = ARRAY_SIZE(lan9371_phy_addr);
+ break;
+ case LAN9372_CHIP_ID:
+ phy_addr_map = lan9372_phy_addr;
+ size = ARRAY_SIZE(lan9372_phy_addr);
+ break;
+ case LAN9373_CHIP_ID:
+ phy_addr_map = lan9373_phy_addr;
+ size = ARRAY_SIZE(lan9373_phy_addr);
+ break;
+ case LAN9374_CHIP_ID:
+ phy_addr_map = lan9374_phy_addr;
+ size = ARRAY_SIZE(lan9374_phy_addr);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (size < dev->info->port_cnt)
+ return -EINVAL;
+
+ for (i = 0; i < dev->info->port_cnt; i++) {
+ if (phy_addr_map[i] == LAN937X_NO_PHY)
+ dev->phy_addr_map[i] = phy_addr_map[i];
+ else
+ dev->phy_addr_map[i] = phy_addr_map[i] + offset;
+ }
+
+ return 0;
+}
+
+/**
+ * lan937x_mdio_bus_preinit - Pre-initialize MDIO bus for accessing PHYs.
+ * @dev: Pointer to device structure.
+ * @side_mdio: Boolean indicating if the PHYs are accessed over a side MDIO bus.
+ *
+ * This function configures the LAN937x switch for PHY access either through
+ * SPI or the side MDIO bus, unlocking the necessary registers for each access
+ * mode.
+ *
+ * Operation Modes:
+ * 1. **SPI Access**: Enables SPI indirect access to address clock domain
+ * crossing issues when SPI is used for PHY access.
+ * 2. **MDIO Access**: Grants access to internal PHYs over the side MDIO bus,
+ * required when using the MDIO bus for PHY management.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+int lan937x_mdio_bus_preinit(struct ksz_device *dev, bool side_mdio)
{
u16 data16;
int ret;
- /* Enable Phy access through SPI */
+ /* Unlock access to the PHYs, needed for SPI and side MDIO access */
ret = lan937x_cfg(dev, REG_GLOBAL_CTRL_0, SW_PHY_REG_BLOCK, false);
if (ret < 0)
- return ret;
+ goto print_error;
- ret = ksz_read16(dev, REG_VPHY_SPECIAL_CTRL__2, &data16);
- if (ret < 0)
- return ret;
+ if (side_mdio)
+ /* Allow access to internal PHYs over MDIO bus */
+ data16 = VPHY_MDIO_INTERNAL_ENABLE;
+ else
+ /* Enable SPI indirect access to address clock domain crossing
+ * issue
+ */
+ data16 = VPHY_SPI_INDIRECT_ENABLE;
- /* Allow SPI access */
- data16 |= VPHY_SPI_INDIRECT_ENABLE;
+ ret = ksz_rmw16(dev, REG_VPHY_SPECIAL_CTRL__2,
+ VPHY_SPI_INDIRECT_ENABLE | VPHY_MDIO_INTERNAL_ENABLE,
+ data16);
- return ksz_write16(dev, REG_VPHY_SPECIAL_CTRL__2, data16);
+print_error:
+ if (ret < 0)
+ dev_err(dev->dev, "failed to preinit the MDIO bus\n");
+
+ return ret;
}
static int lan937x_vphy_ind_addr_wr(struct ksz_device *dev, int addr, int reg)
@@ -260,10 +461,66 @@ int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu)
int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
{
- u32 secs = msecs / 1000;
- u32 value;
+ u8 data, mult, value8;
+ bool in_msec = false;
+ u32 max_val, value;
+ u32 secs = msecs;
int ret;
+#define MAX_TIMER_VAL ((1 << 20) - 1)
+
+ /* The aging timer comprises a 3-bit multiplier and a 20-bit second
+ * value. Either of them cannot be zero. The maximum timer is then
+ * 7 * 1048575 = 7340025 seconds. As this value is too large for
+ * practical use it can be interpreted as microseconds, making the
+ * maximum timer 7340 seconds with finer control. This allows for
+ * maximum 122 minutes compared to 29 minutes in KSZ9477 switch.
+ */
+ if (msecs % 1000)
+ in_msec = true;
+ else
+ secs /= 1000;
+ if (!secs)
+ secs = 1;
+
+ /* Return error if too large. */
+ else if (secs > 7 * MAX_TIMER_VAL)
+ return -EINVAL;
+
+ /* Configure how to interpret the number value. */
+ ret = ksz_rmw8(dev, REG_SW_LUE_CTRL_2, SW_AGE_CNT_IN_MICROSEC,
+ in_msec ? SW_AGE_CNT_IN_MICROSEC : 0);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value8);
+ if (ret < 0)
+ return ret;
+
+ /* Check whether there is need to update the multiplier. */
+ mult = FIELD_GET(SW_AGE_CNT_M, value8);
+ max_val = MAX_TIMER_VAL;
+ if (mult > 0) {
+ /* Try to use the same multiplier already in the register as
+ * the hardware default uses multiplier 4 and 75 seconds for
+ * 300 seconds.
+ */
+ max_val = DIV_ROUND_UP(secs, mult);
+ if (max_val > MAX_TIMER_VAL || max_val * mult != secs)
+ max_val = MAX_TIMER_VAL;
+ }
+
+ data = DIV_ROUND_UP(secs, max_val);
+ if (mult != data) {
+ value8 &= ~SW_AGE_CNT_M;
+ value8 |= FIELD_PREP(SW_AGE_CNT_M, data);
+ ret = ksz_write8(dev, REG_SW_LUE_CTRL_0, value8);
+ if (ret < 0)
+ return ret;
+ }
+
+ secs = DIV_ROUND_UP(secs, data);
+
value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
ret = ksz_write8(dev, REG_SW_AGE_PERIOD__1, value);
@@ -283,6 +540,7 @@ static void lan937x_set_tune_adj(struct ksz_device *dev, int port,
ksz_pread16(dev, port, reg, &data16);
/* Update tune Adjust */
+ data16 &= ~PORT_TUNE_ADJ;
data16 |= FIELD_PREP(PORT_TUNE_ADJ, val);
ksz_pwrite16(dev, port, reg, data16);
@@ -363,13 +621,6 @@ int lan937x_setup(struct dsa_switch *ds)
struct ksz_device *dev = ds->priv;
int ret;
- /* enable Indirect Access from SPI to the VPHY registers */
- ret = lan937x_enable_spi_indirect_access(dev);
- if (ret < 0) {
- dev_err(dev->dev, "failed to enable spi indirect access");
- return ret;
- }
-
/* The VLAN aware is a global setting. Mixed vlan
* filterings are not supported.
*/
diff --git a/drivers/net/dsa/microchip/lan937x_reg.h b/drivers/net/dsa/microchip/lan937x_reg.h
index 2f22a9d01de3..72042fd64e5b 100644
--- a/drivers/net/dsa/microchip/lan937x_reg.h
+++ b/drivers/net/dsa/microchip/lan937x_reg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Microchip LAN937X switch register definitions
- * Copyright (C) 2019-2021 Microchip Technology Inc.
+ * Copyright (C) 2019-2024 Microchip Technology Inc.
*/
#ifndef __LAN937X_REG_H
#define __LAN937X_REG_H
@@ -37,6 +37,10 @@
#define SW_CLK125_ENB BIT(1)
#define SW_CLK25_ENB BIT(0)
+#define REG_SW_CFG_STRAP_VAL 0x0200
+#define SW_CASCADE_ID_CFG BIT(15)
+#define SW_VPHY_ADD_CFG BIT(0)
+
/* 2 - PHY Control */
#define REG_SW_CFG_STRAP_OVR 0x0214
#define SW_VPHY_DISABLE BIT(31)
@@ -52,8 +56,7 @@
#define SW_VLAN_ENABLE BIT(7)
#define SW_DROP_INVALID_VID BIT(6)
-#define SW_AGE_CNT_M 0x7
-#define SW_AGE_CNT_S 3
+#define SW_AGE_CNT_M GENMASK(5, 3)
#define SW_RESV_MCAST_ENABLE BIT(2)
#define REG_SW_LUE_CTRL_1 0x0311
@@ -66,6 +69,10 @@
#define SW_FAST_AGING BIT(1)
#define SW_LINK_AUTO_AGING BIT(0)
+#define REG_SW_LUE_CTRL_2 0x0312
+
+#define SW_AGE_CNT_IN_MICROSEC BIT(7)
+
#define REG_SW_AGE_PERIOD__1 0x0313
#define SW_AGE_PERIOD_7_0_M GENMASK(7, 0)
diff --git a/drivers/net/dsa/mt7530-mdio.c b/drivers/net/dsa/mt7530-mdio.c
index 51df42ccdbe6..0286a6cecb6f 100644
--- a/drivers/net/dsa/mt7530-mdio.c
+++ b/drivers/net/dsa/mt7530-mdio.c
@@ -136,10 +136,17 @@ static const struct of_device_id mt7530_of_match[] = {
};
MODULE_DEVICE_TABLE(of, mt7530_of_match);
+static const struct regmap_config regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = MT7530_CREV,
+ .disable_locking = true,
+};
+
static int
mt7530_probe(struct mdio_device *mdiodev)
{
- static struct regmap_config *regmap_config;
struct mt7530_priv *priv;
struct device_node *dn;
int ret;
@@ -193,18 +200,8 @@ mt7530_probe(struct mdio_device *mdiodev)
return PTR_ERR(priv->io_pwr);
}
- regmap_config = devm_kzalloc(&mdiodev->dev, sizeof(*regmap_config),
- GFP_KERNEL);
- if (!regmap_config)
- return -ENOMEM;
-
- regmap_config->reg_bits = 16;
- regmap_config->val_bits = 32;
- regmap_config->reg_stride = 4;
- regmap_config->max_register = MT7530_CREV;
- regmap_config->disable_locking = true;
priv->regmap = devm_regmap_init(priv->dev, &mt7530_regmap_bus, priv,
- regmap_config);
+ &regmap_config);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
diff --git a/drivers/net/dsa/mt7530-mmio.c b/drivers/net/dsa/mt7530-mmio.c
index 5f2db4317dd3..1dc8b93fb51a 100644
--- a/drivers/net/dsa/mt7530-mmio.c
+++ b/drivers/net/dsa/mt7530-mmio.c
@@ -11,16 +11,24 @@
#include "mt7530.h"
static const struct of_device_id mt7988_of_match[] = {
+ { .compatible = "airoha,an7583-switch", .data = &mt753x_table[ID_AN7583], },
{ .compatible = "airoha,en7581-switch", .data = &mt753x_table[ID_EN7581], },
{ .compatible = "mediatek,mt7988-switch", .data = &mt753x_table[ID_MT7988], },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, mt7988_of_match);
+static const struct regmap_config sw_regmap_config = {
+ .name = "switch",
+ .reg_bits = 16,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = MT7530_CREV,
+};
+
static int
mt7988_probe(struct platform_device *pdev)
{
- static struct regmap_config *sw_regmap_config;
struct mt7530_priv *priv;
void __iomem *base_addr;
int ret;
@@ -48,16 +56,8 @@ mt7988_probe(struct platform_device *pdev)
return -ENXIO;
}
- sw_regmap_config = devm_kzalloc(&pdev->dev, sizeof(*sw_regmap_config), GFP_KERNEL);
- if (!sw_regmap_config)
- return -ENOMEM;
-
- sw_regmap_config->name = "switch";
- sw_regmap_config->reg_bits = 16;
- sw_regmap_config->val_bits = 32;
- sw_regmap_config->reg_stride = 4;
- sw_regmap_config->max_register = MT7530_CREV;
- priv->regmap = devm_regmap_init_mmio(&pdev->dev, base_addr, sw_regmap_config);
+ priv->regmap = devm_regmap_init_mmio(&pdev->dev, base_addr,
+ &sw_regmap_config);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 086b8b3d5b40..b9423389c2ef 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -32,47 +32,15 @@ static struct mt753x_pcs *pcs_to_mt753x_pcs(struct phylink_pcs *pcs)
/* String, offset, and register size in bytes if different from 4 bytes */
static const struct mt7530_mib_desc mt7530_mib[] = {
- MIB_DESC(1, 0x00, "TxDrop"),
- MIB_DESC(1, 0x04, "TxCrcErr"),
- MIB_DESC(1, 0x08, "TxUnicast"),
- MIB_DESC(1, 0x0c, "TxMulticast"),
- MIB_DESC(1, 0x10, "TxBroadcast"),
- MIB_DESC(1, 0x14, "TxCollision"),
- MIB_DESC(1, 0x18, "TxSingleCollision"),
- MIB_DESC(1, 0x1c, "TxMultipleCollision"),
- MIB_DESC(1, 0x20, "TxDeferred"),
- MIB_DESC(1, 0x24, "TxLateCollision"),
- MIB_DESC(1, 0x28, "TxExcessiveCollistion"),
- MIB_DESC(1, 0x2c, "TxPause"),
- MIB_DESC(1, 0x30, "TxPktSz64"),
- MIB_DESC(1, 0x34, "TxPktSz65To127"),
- MIB_DESC(1, 0x38, "TxPktSz128To255"),
- MIB_DESC(1, 0x3c, "TxPktSz256To511"),
- MIB_DESC(1, 0x40, "TxPktSz512To1023"),
- MIB_DESC(1, 0x44, "Tx1024ToMax"),
- MIB_DESC(2, 0x48, "TxBytes"),
- MIB_DESC(1, 0x60, "RxDrop"),
- MIB_DESC(1, 0x64, "RxFiltering"),
- MIB_DESC(1, 0x68, "RxUnicast"),
- MIB_DESC(1, 0x6c, "RxMulticast"),
- MIB_DESC(1, 0x70, "RxBroadcast"),
- MIB_DESC(1, 0x74, "RxAlignErr"),
- MIB_DESC(1, 0x78, "RxCrcErr"),
- MIB_DESC(1, 0x7c, "RxUnderSizeErr"),
- MIB_DESC(1, 0x80, "RxFragErr"),
- MIB_DESC(1, 0x84, "RxOverSzErr"),
- MIB_DESC(1, 0x88, "RxJabberErr"),
- MIB_DESC(1, 0x8c, "RxPause"),
- MIB_DESC(1, 0x90, "RxPktSz64"),
- MIB_DESC(1, 0x94, "RxPktSz65To127"),
- MIB_DESC(1, 0x98, "RxPktSz128To255"),
- MIB_DESC(1, 0x9c, "RxPktSz256To511"),
- MIB_DESC(1, 0xa0, "RxPktSz512To1023"),
- MIB_DESC(1, 0xa4, "RxPktSz1024ToMax"),
- MIB_DESC(2, 0xa8, "RxBytes"),
- MIB_DESC(1, 0xb0, "RxCtrlDrop"),
- MIB_DESC(1, 0xb4, "RxIngressDrop"),
- MIB_DESC(1, 0xb8, "RxArlDrop"),
+ MIB_DESC(1, MT7530_PORT_MIB_TX_DROP, "TxDrop"),
+ MIB_DESC(1, MT7530_PORT_MIB_TX_CRC_ERR, "TxCrcErr"),
+ MIB_DESC(1, MT7530_PORT_MIB_TX_COLLISION, "TxCollision"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_DROP, "RxDrop"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_FILTERING, "RxFiltering"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_CRC_ERR, "RxCrcErr"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_CTRL_DROP, "RxCtrlDrop"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_INGRESS_DROP, "RxIngressDrop"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_ARL_DROP, "RxArlDrop"),
};
static void
@@ -790,23 +758,33 @@ mt7530_get_strings(struct dsa_switch *ds, int port, u32 stringset,
}
static void
+mt7530_read_port_stats(struct mt7530_priv *priv, int port,
+ u32 offset, u8 size, uint64_t *data)
+{
+ u32 val, reg = MT7530_PORT_MIB_COUNTER(port) + offset;
+
+ val = mt7530_read(priv, reg);
+ *data = val;
+
+ if (size == 2) {
+ val = mt7530_read(priv, reg + 4);
+ *data |= (u64)val << 32;
+ }
+}
+
+static void
mt7530_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data)
{
struct mt7530_priv *priv = ds->priv;
const struct mt7530_mib_desc *mib;
- u32 reg, i;
- u64 hi;
+ int i;
for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++) {
mib = &mt7530_mib[i];
- reg = MT7530_PORT_MIB_COUNTER(port) + mib->offset;
- data[i] = mt7530_read(priv, reg);
- if (mib->size == 2) {
- hi = mt7530_read(priv, reg + 4);
- data[i] |= hi << 32;
- }
+ mt7530_read_port_stats(priv, port, mib->offset, mib->size,
+ data + i);
}
}
@@ -819,6 +797,172 @@ mt7530_get_sset_count(struct dsa_switch *ds, int port, int sset)
return ARRAY_SIZE(mt7530_mib);
}
+static void mt7530_get_eth_mac_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ /* MIB counter doesn't provide a FramesTransmittedOK but instead
+ * provide stats for Unicast, Broadcast and Multicast frames separately.
+ * To simulate a global frame counter, read Unicast and addition Multicast
+ * and Broadcast later
+ */
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_UNICAST, 1,
+ &mac_stats->FramesTransmittedOK);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_SINGLE_COLLISION, 1,
+ &mac_stats->SingleCollisionFrames);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_MULTIPLE_COLLISION, 1,
+ &mac_stats->MultipleCollisionFrames);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_UNICAST, 1,
+ &mac_stats->FramesReceivedOK);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_BYTES, 2,
+ &mac_stats->OctetsTransmittedOK);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_ALIGN_ERR, 1,
+ &mac_stats->AlignmentErrors);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_DEFERRED, 1,
+ &mac_stats->FramesWithDeferredXmissions);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_LATE_COLLISION, 1,
+ &mac_stats->LateCollisions);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_EXCESSIVE_COLLISION, 1,
+ &mac_stats->FramesAbortedDueToXSColls);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_BYTES, 2,
+ &mac_stats->OctetsReceivedOK);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_MULTICAST, 1,
+ &mac_stats->MulticastFramesXmittedOK);
+ mac_stats->FramesTransmittedOK += mac_stats->MulticastFramesXmittedOK;
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_BROADCAST, 1,
+ &mac_stats->BroadcastFramesXmittedOK);
+ mac_stats->FramesTransmittedOK += mac_stats->BroadcastFramesXmittedOK;
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_MULTICAST, 1,
+ &mac_stats->MulticastFramesReceivedOK);
+ mac_stats->FramesReceivedOK += mac_stats->MulticastFramesReceivedOK;
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_BROADCAST, 1,
+ &mac_stats->BroadcastFramesReceivedOK);
+ mac_stats->FramesReceivedOK += mac_stats->BroadcastFramesReceivedOK;
+}
+
+static const struct ethtool_rmon_hist_range mt7530_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, MT7530_MAX_MTU },
+ {}
+};
+
+static void mt7530_get_rmon_stats(struct dsa_switch *ds, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_UNDER_SIZE_ERR, 1,
+ &rmon_stats->undersize_pkts);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_OVER_SZ_ERR, 1,
+ &rmon_stats->oversize_pkts);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_FRAG_ERR, 1,
+ &rmon_stats->fragments);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_JABBER_ERR, 1,
+ &rmon_stats->jabbers);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_64, 1,
+ &rmon_stats->hist[0]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_65_TO_127, 1,
+ &rmon_stats->hist[1]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_128_TO_255, 1,
+ &rmon_stats->hist[2]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_256_TO_511, 1,
+ &rmon_stats->hist[3]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_512_TO_1023, 1,
+ &rmon_stats->hist[4]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_1024_TO_MAX, 1,
+ &rmon_stats->hist[5]);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_64, 1,
+ &rmon_stats->hist_tx[0]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_65_TO_127, 1,
+ &rmon_stats->hist_tx[1]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_128_TO_255, 1,
+ &rmon_stats->hist_tx[2]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_256_TO_511, 1,
+ &rmon_stats->hist_tx[3]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_512_TO_1023, 1,
+ &rmon_stats->hist_tx[4]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_1024_TO_MAX, 1,
+ &rmon_stats->hist_tx[5]);
+
+ *ranges = mt7530_rmon_ranges;
+}
+
+static void mt7530_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *storage)
+{
+ struct mt7530_priv *priv = ds->priv;
+ uint64_t data;
+
+ /* MIB counter doesn't provide a FramesTransmittedOK but instead
+ * provide stats for Unicast, Broadcast and Multicast frames separately.
+ * To simulate a global frame counter, read Unicast and addition Multicast
+ * and Broadcast later
+ */
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_UNICAST, 1,
+ &storage->rx_packets);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_MULTICAST, 1,
+ &storage->multicast);
+ storage->rx_packets += storage->multicast;
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_BROADCAST, 1,
+ &data);
+ storage->rx_packets += data;
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_UNICAST, 1,
+ &storage->tx_packets);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_MULTICAST, 1,
+ &data);
+ storage->tx_packets += data;
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_BROADCAST, 1,
+ &data);
+ storage->tx_packets += data;
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_BYTES, 2,
+ &storage->rx_bytes);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_BYTES, 2,
+ &storage->tx_bytes);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_DROP, 1,
+ &storage->rx_dropped);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_DROP, 1,
+ &storage->tx_dropped);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_CRC_ERR, 1,
+ &storage->rx_crc_errors);
+}
+
+static void mt7530_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PAUSE, 1,
+ &ctrl_stats->MACControlFramesTransmitted);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PAUSE, 1,
+ &ctrl_stats->MACControlFramesReceived);
+}
+
static int
mt7530_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
{
@@ -1154,7 +1298,7 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
* is affine to the inbound user port.
*/
if (priv->id == ID_MT7531 || priv->id == ID_MT7988 ||
- priv->id == ID_EN7581)
+ priv->id == ID_EN7581 || priv->id == ID_AN7583)
mt7530_set(priv, MT7531_CFC, MT7531_CPU_PMAP(BIT(port)));
/* CPU port gets connected to all user ports of
@@ -1968,7 +2112,7 @@ mt7530_gpio_get(struct gpio_chip *gc, unsigned int offset)
return !!(mt7530_read(priv, MT7530_LED_GPIO_DATA) & bit);
}
-static void
+static int
mt7530_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct mt7530_priv *priv = gpiochip_get_data(gc);
@@ -1978,6 +2122,8 @@ mt7530_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
mt7530_set(priv, MT7530_LED_GPIO_DATA, bit);
else
mt7530_clear(priv, MT7530_LED_GPIO_DATA, bit);
+
+ return 0;
}
static int
@@ -2050,131 +2196,6 @@ mt7530_setup_gpio(struct mt7530_priv *priv)
}
#endif /* CONFIG_GPIOLIB */
-static irqreturn_t
-mt7530_irq_thread_fn(int irq, void *dev_id)
-{
- struct mt7530_priv *priv = dev_id;
- bool handled = false;
- u32 val;
- int p;
-
- mt7530_mutex_lock(priv);
- val = mt7530_mii_read(priv, MT7530_SYS_INT_STS);
- mt7530_mii_write(priv, MT7530_SYS_INT_STS, val);
- mt7530_mutex_unlock(priv);
-
- for (p = 0; p < MT7530_NUM_PHYS; p++) {
- if (BIT(p) & val) {
- unsigned int irq;
-
- irq = irq_find_mapping(priv->irq_domain, p);
- handle_nested_irq(irq);
- handled = true;
- }
- }
-
- return IRQ_RETVAL(handled);
-}
-
-static void
-mt7530_irq_mask(struct irq_data *d)
-{
- struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
-
- priv->irq_enable &= ~BIT(d->hwirq);
-}
-
-static void
-mt7530_irq_unmask(struct irq_data *d)
-{
- struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
-
- priv->irq_enable |= BIT(d->hwirq);
-}
-
-static void
-mt7530_irq_bus_lock(struct irq_data *d)
-{
- struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
-
- mt7530_mutex_lock(priv);
-}
-
-static void
-mt7530_irq_bus_sync_unlock(struct irq_data *d)
-{
- struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
-
- mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable);
- mt7530_mutex_unlock(priv);
-}
-
-static struct irq_chip mt7530_irq_chip = {
- .name = KBUILD_MODNAME,
- .irq_mask = mt7530_irq_mask,
- .irq_unmask = mt7530_irq_unmask,
- .irq_bus_lock = mt7530_irq_bus_lock,
- .irq_bus_sync_unlock = mt7530_irq_bus_sync_unlock,
-};
-
-static int
-mt7530_irq_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_set_chip_data(irq, domain->host_data);
- irq_set_chip_and_handler(irq, &mt7530_irq_chip, handle_simple_irq);
- irq_set_nested_thread(irq, true);
- irq_set_noprobe(irq);
-
- return 0;
-}
-
-static const struct irq_domain_ops mt7530_irq_domain_ops = {
- .map = mt7530_irq_map,
- .xlate = irq_domain_xlate_onecell,
-};
-
-static void
-mt7988_irq_mask(struct irq_data *d)
-{
- struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
-
- priv->irq_enable &= ~BIT(d->hwirq);
- mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable);
-}
-
-static void
-mt7988_irq_unmask(struct irq_data *d)
-{
- struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
-
- priv->irq_enable |= BIT(d->hwirq);
- mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable);
-}
-
-static struct irq_chip mt7988_irq_chip = {
- .name = KBUILD_MODNAME,
- .irq_mask = mt7988_irq_mask,
- .irq_unmask = mt7988_irq_unmask,
-};
-
-static int
-mt7988_irq_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_set_chip_data(irq, domain->host_data);
- irq_set_chip_and_handler(irq, &mt7988_irq_chip, handle_simple_irq);
- irq_set_nested_thread(irq, true);
- irq_set_noprobe(irq);
-
- return 0;
-}
-
-static const struct irq_domain_ops mt7988_irq_domain_ops = {
- .map = mt7988_irq_map,
- .xlate = irq_domain_xlate_onecell,
-};
-
static void
mt7530_setup_mdio_irq(struct mt7530_priv *priv)
{
@@ -2191,49 +2212,72 @@ mt7530_setup_mdio_irq(struct mt7530_priv *priv)
}
}
+static const struct regmap_irq mt7530_irqs[] = {
+ REGMAP_IRQ_REG_LINE(0, 32), /* PHY0_LC */
+ REGMAP_IRQ_REG_LINE(1, 32), /* PHY1_LC */
+ REGMAP_IRQ_REG_LINE(2, 32), /* PHY2_LC */
+ REGMAP_IRQ_REG_LINE(3, 32), /* PHY3_LC */
+ REGMAP_IRQ_REG_LINE(4, 32), /* PHY4_LC */
+ REGMAP_IRQ_REG_LINE(5, 32), /* PHY5_LC */
+ REGMAP_IRQ_REG_LINE(6, 32), /* PHY6_LC */
+ REGMAP_IRQ_REG_LINE(16, 32), /* MAC_PC */
+ REGMAP_IRQ_REG_LINE(17, 32), /* BMU */
+ REGMAP_IRQ_REG_LINE(18, 32), /* MIB */
+ REGMAP_IRQ_REG_LINE(22, 32), /* ARL_COL_FULL_COL */
+ REGMAP_IRQ_REG_LINE(23, 32), /* ARL_COL_FULL */
+ REGMAP_IRQ_REG_LINE(24, 32), /* ARL_TBL_ERR */
+ REGMAP_IRQ_REG_LINE(25, 32), /* ARL_PKT_QERR */
+ REGMAP_IRQ_REG_LINE(26, 32), /* ARL_EQ_ERR */
+ REGMAP_IRQ_REG_LINE(27, 32), /* ARL_PKT_BC */
+ REGMAP_IRQ_REG_LINE(28, 32), /* ARL_SEC_IG1X */
+ REGMAP_IRQ_REG_LINE(29, 32), /* ARL_SEC_VLAN */
+ REGMAP_IRQ_REG_LINE(30, 32), /* ARL_SEC_TAG */
+ REGMAP_IRQ_REG_LINE(31, 32), /* ACL */
+};
+
+static const struct regmap_irq_chip mt7530_regmap_irq_chip = {
+ .name = KBUILD_MODNAME,
+ .status_base = MT7530_SYS_INT_STS,
+ .unmask_base = MT7530_SYS_INT_EN,
+ .ack_base = MT7530_SYS_INT_STS,
+ .init_ack_masked = true,
+ .irqs = mt7530_irqs,
+ .num_irqs = ARRAY_SIZE(mt7530_irqs),
+ .num_regs = 1,
+};
+
static int
mt7530_setup_irq(struct mt7530_priv *priv)
{
+ struct regmap_irq_chip_data *irq_data;
struct device *dev = priv->dev;
struct device_node *np = dev->of_node;
- int ret;
+ int irq, ret;
if (!of_property_read_bool(np, "interrupt-controller")) {
dev_info(dev, "no interrupt support\n");
return 0;
}
- priv->irq = of_irq_get(np, 0);
- if (priv->irq <= 0) {
- dev_err(dev, "failed to get parent IRQ: %d\n", priv->irq);
- return priv->irq ? : -EINVAL;
- }
-
- if (priv->id == ID_MT7988 || priv->id == ID_EN7581)
- priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS,
- &mt7988_irq_domain_ops,
- priv);
- else
- priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS,
- &mt7530_irq_domain_ops,
- priv);
-
- if (!priv->irq_domain) {
- dev_err(dev, "failed to create IRQ domain\n");
- return -ENOMEM;
+ irq = of_irq_get(np, 0);
+ if (irq <= 0) {
+ dev_err(dev, "failed to get parent IRQ: %d\n", irq);
+ return irq ? : -EINVAL;
}
/* This register must be set for MT7530 to properly fire interrupts */
if (priv->id == ID_MT7530 || priv->id == ID_MT7621)
mt7530_set(priv, MT7530_TOP_SIG_CTRL, TOP_SIG_CTRL_NORMAL);
- ret = request_threaded_irq(priv->irq, NULL, mt7530_irq_thread_fn,
- IRQF_ONESHOT, KBUILD_MODNAME, priv);
- if (ret) {
- irq_domain_remove(priv->irq_domain);
- dev_err(dev, "failed to request IRQ: %d\n", ret);
+ ret = devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(dev),
+ priv->regmap, irq,
+ IRQF_ONESHOT,
+ 0, &mt7530_regmap_irq_chip,
+ &irq_data);
+ if (ret)
return ret;
- }
+
+ priv->irq_domain = regmap_irq_get_domain(irq_data);
return 0;
}
@@ -2253,26 +2297,6 @@ mt7530_free_mdio_irq(struct mt7530_priv *priv)
}
}
-static void
-mt7530_free_irq_common(struct mt7530_priv *priv)
-{
- free_irq(priv->irq, priv);
- irq_domain_remove(priv->irq_domain);
-}
-
-static void
-mt7530_free_irq(struct mt7530_priv *priv)
-{
- struct device_node *mnp, *np = priv->dev->of_node;
-
- mnp = of_get_child_by_name(np, "mdio");
- if (!mnp)
- mt7530_free_mdio_irq(priv);
- of_node_put(mnp);
-
- mt7530_free_irq_common(priv);
-}
-
static int
mt7530_setup_mdio(struct mt7530_priv *priv)
{
@@ -2307,13 +2331,13 @@ mt7530_setup_mdio(struct mt7530_priv *priv)
bus->parent = dev;
bus->phy_mask = ~ds->phys_mii_mask;
- if (priv->irq && !mnp)
+ if (priv->irq_domain && !mnp)
mt7530_setup_mdio_irq(priv);
ret = devm_of_mdiobus_register(dev, bus, mnp);
if (ret) {
dev_err(dev, "failed to register MDIO bus: %d\n", ret);
- if (priv->irq && !mnp)
+ if (priv->irq_domain && !mnp)
mt7530_free_mdio_irq(priv);
}
@@ -2541,6 +2565,9 @@ mt7531_setup_common(struct dsa_switch *ds)
struct mt7530_priv *priv = ds->priv;
int ret, i;
+ ds->assisted_learning_on_cpu_port = true;
+ ds->mtu_enforcement_ingress = true;
+
mt753x_trap_frames(priv);
/* Enable and reset MIB counters */
@@ -2586,12 +2613,18 @@ mt7531_setup_common(struct dsa_switch *ds)
/* Allow mirroring frames received on the local port (monitor port). */
mt7530_set(priv, MT753X_AGC, LOCAL_EN);
+ /* Enable Special Tag for rx frames */
+ if (priv->id == ID_EN7581 || priv->id == ID_AN7583)
+ mt7530_write(priv, MT753X_CPORT_SPTAG_CFG,
+ CPORT_SW2FE_STAG_EN | CPORT_FE2SW_STAG_EN);
+
/* Flush the FDB table */
ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
if (ret < 0)
return ret;
- return 0;
+ /* Setup VLAN ID 0 for VLAN-unaware bridges */
+ return mt7530_setup_vlan0(priv);
}
static int
@@ -2687,14 +2720,6 @@ mt7531_setup(struct dsa_switch *ds)
if (ret)
return ret;
- /* Setup VLAN ID 0 for VLAN-unaware bridges */
- ret = mt7530_setup_vlan0(priv);
- if (ret)
- return ret;
-
- ds->assisted_learning_on_cpu_port = true;
- ds->mtu_enforcement_ingress = true;
-
return 0;
}
@@ -2957,28 +2982,61 @@ static void mt753x_phylink_mac_link_up(struct phylink_config *config,
mcr |= PMCR_FORCE_RX_FC_EN;
}
- if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, false) >= 0) {
- switch (speed) {
- case SPEED_1000:
- case SPEED_2500:
- mcr |= PMCR_FORCE_EEE1G;
- break;
- case SPEED_100:
- mcr |= PMCR_FORCE_EEE100;
- break;
- }
- }
-
mt7530_set(priv, MT753X_PMCR_P(dp->index), mcr);
}
+static void mt753x_phylink_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mt7530_priv *priv = dp->ds->priv;
+
+ mt7530_clear(priv, MT753X_PMCR_P(dp->index),
+ PMCR_FORCE_EEE1G | PMCR_FORCE_EEE100);
+}
+
+static int mt753x_phylink_mac_enable_tx_lpi(struct phylink_config *config,
+ u32 timer, bool tx_clock_stop)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mt7530_priv *priv = dp->ds->priv;
+ u32 val;
+
+ /* If the timer is zero, then set LPI_MODE_EN, which allows the
+ * system to enter LPI mode immediately rather than waiting for
+ * the LPI threshold.
+ */
+ if (!timer)
+ val = LPI_MODE_EN;
+ else if (FIELD_FIT(LPI_THRESH_MASK, timer))
+ val = FIELD_PREP(LPI_THRESH_MASK, timer);
+ else
+ val = LPI_THRESH_MASK;
+
+ mt7530_rmw(priv, MT753X_PMEEECR_P(dp->index),
+ LPI_THRESH_MASK | LPI_MODE_EN, val);
+
+ mt7530_set(priv, MT753X_PMCR_P(dp->index),
+ PMCR_FORCE_EEE1G | PMCR_FORCE_EEE100);
+
+ return 0;
+}
+
static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
struct mt7530_priv *priv = ds->priv;
+ u32 eeecr;
config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE;
+ config->lpi_capabilities = MAC_100FD | MAC_1000FD | MAC_2500FD;
+
+ eeecr = mt7530_read(priv, MT753X_PMEEECR_P(port));
+ /* tx_lpi_timer should be in microseconds. The time units for
+ * LPI threshold are unspecified.
+ */
+ config->lpi_timer_default = FIELD_GET(LPI_THRESH_MASK, eeecr);
+
priv->info->mac_port_get_caps(ds, port, config);
}
@@ -2994,7 +3052,7 @@ static int mt753x_pcs_validate(struct phylink_pcs *pcs,
return 0;
}
-static void mt7530_pcs_get_state(struct phylink_pcs *pcs,
+static void mt7530_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
@@ -3063,55 +3121,31 @@ mt753x_setup(struct dsa_switch *ds)
return ret;
ret = mt7530_setup_mdio(priv);
- if (ret && priv->irq)
- mt7530_free_irq_common(priv);
if (ret)
return ret;
/* Initialise the PCS devices */
for (i = 0; i < priv->ds->num_ports; i++) {
priv->pcs[i].pcs.ops = priv->info->pcs_ops;
- priv->pcs[i].pcs.neg_mode = true;
priv->pcs[i].priv = priv;
priv->pcs[i].port = i;
}
- if (priv->create_sgmii) {
+ if (priv->create_sgmii)
ret = priv->create_sgmii(priv);
- if (ret && priv->irq)
- mt7530_free_irq(priv);
- }
-
- return ret;
-}
-static int mt753x_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_keee *e)
-{
- struct mt7530_priv *priv = ds->priv;
- u32 eeecr = mt7530_read(priv, MT753X_PMEEECR_P(port));
-
- e->tx_lpi_enabled = !(eeecr & LPI_MODE_EN);
- e->tx_lpi_timer = LPI_THRESH_GET(eeecr);
+ if (ret && priv->irq_domain)
+ mt7530_free_mdio_irq(priv);
- return 0;
+ return ret;
}
static int mt753x_set_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_keee *e)
{
- struct mt7530_priv *priv = ds->priv;
- u32 set, mask = LPI_THRESH_MASK | LPI_MODE_EN;
-
if (e->tx_lpi_timer > 0xFFF)
return -EINVAL;
- set = LPI_THRESH_SET(e->tx_lpi_timer);
- if (!e->tx_lpi_enabled)
- /* Force LPI Mode without a delay */
- set |= LPI_MODE_EN;
- mt7530_rmw(priv, MT753X_PMEEECR_P(port), mask, set);
-
return 0;
}
@@ -3204,19 +3238,33 @@ static int mt7988_setup(struct dsa_switch *ds)
reset_control_deassert(priv->rstc);
usleep_range(20, 50);
+ /* AN7583 require additional tweak to CONN_CFG */
+ if (priv->id == ID_AN7583)
+ mt7530_rmw(priv, AN7583_GEPHY_CONN_CFG,
+ AN7583_CSR_DPHY_CKIN_SEL |
+ AN7583_CSR_PHY_CORE_REG_CLK_SEL |
+ AN7583_CSR_ETHER_AFE_PWD,
+ AN7583_CSR_DPHY_CKIN_SEL |
+ AN7583_CSR_PHY_CORE_REG_CLK_SEL |
+ FIELD_PREP(AN7583_CSR_ETHER_AFE_PWD, 0));
+
/* Reset the switch PHYs */
mt7530_write(priv, MT7530_SYS_CTRL, SYS_CTRL_PHY_RST);
return mt7531_setup_common(ds);
}
-const struct dsa_switch_ops mt7530_switch_ops = {
+static const struct dsa_switch_ops mt7530_switch_ops = {
.get_tag_protocol = mtk_get_tag_protocol,
.setup = mt753x_setup,
.preferred_default_local_cpu_port = mt753x_preferred_default_local_cpu_port,
.get_strings = mt7530_get_strings,
.get_ethtool_stats = mt7530_get_ethtool_stats,
.get_sset_count = mt7530_get_sset_count,
+ .get_eth_mac_stats = mt7530_get_eth_mac_stats,
+ .get_rmon_stats = mt7530_get_rmon_stats,
+ .get_eth_ctrl_stats = mt7530_get_eth_ctrl_stats,
+ .get_stats64 = mt7530_get_stats64,
.set_ageing_time = mt7530_set_ageing_time,
.port_enable = mt7530_port_enable,
.port_disable = mt7530_port_disable,
@@ -3238,18 +3286,21 @@ const struct dsa_switch_ops mt7530_switch_ops = {
.port_mirror_add = mt753x_port_mirror_add,
.port_mirror_del = mt753x_port_mirror_del,
.phylink_get_caps = mt753x_phylink_get_caps,
- .get_mac_eee = mt753x_get_mac_eee,
+ .support_eee = dsa_supports_eee,
.set_mac_eee = mt753x_set_mac_eee,
.conduit_state_change = mt753x_conduit_state_change,
.port_setup_tc = mt753x_setup_tc,
+ .port_hsr_join = dsa_port_simple_hsr_join,
+ .port_hsr_leave = dsa_port_simple_hsr_leave,
};
-EXPORT_SYMBOL_GPL(mt7530_switch_ops);
static const struct phylink_mac_ops mt753x_phylink_mac_ops = {
.mac_select_pcs = mt753x_phylink_mac_select_pcs,
.mac_config = mt753x_phylink_mac_config,
.mac_link_down = mt753x_phylink_mac_link_down,
.mac_link_up = mt753x_phylink_mac_link_up,
+ .mac_disable_tx_lpi = mt753x_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = mt753x_phylink_mac_enable_tx_lpi,
};
const struct mt753x_info mt753x_table[] = {
@@ -3306,6 +3357,16 @@ const struct mt753x_info mt753x_table[] = {
.phy_write_c45 = mt7531_ind_c45_phy_write,
.mac_port_get_caps = en7581_mac_port_get_caps,
},
+ [ID_AN7583] = {
+ .id = ID_AN7583,
+ .pcs_ops = &mt7530_pcs_ops,
+ .sw_setup = mt7988_setup,
+ .phy_read_c22 = mt7531_ind_c22_phy_read,
+ .phy_write_c22 = mt7531_ind_c22_phy_write,
+ .phy_read_c45 = mt7531_ind_c45_phy_read,
+ .phy_write_c45 = mt7531_ind_c45_phy_write,
+ .mac_port_get_caps = en7581_mac_port_get_caps,
+ },
};
EXPORT_SYMBOL_GPL(mt753x_table);
@@ -3343,8 +3404,8 @@ EXPORT_SYMBOL_GPL(mt7530_probe_common);
void
mt7530_remove_common(struct mt7530_priv *priv)
{
- if (priv->irq)
- mt7530_free_irq(priv);
+ if (priv->irq_domain)
+ mt7530_free_mdio_irq(priv);
dsa_unregister_switch(priv->ds);
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 448200689f49..3e0090bed298 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -20,6 +20,7 @@ enum mt753x_id {
ID_MT7531 = 2,
ID_MT7988 = 3,
ID_EN7581 = 4,
+ ID_AN7583 = 5,
};
#define NUM_TRGMII_CTRL 5
@@ -66,7 +67,8 @@ enum mt753x_id {
#define MT753X_MIRROR_REG(id) ((id == ID_MT7531 || \
id == ID_MT7988 || \
- id == ID_EN7581) ? \
+ id == ID_EN7581 || \
+ id == ID_AN7583) ? \
MT7531_CFC : MT753X_MFC)
#define MT753X_MIRROR_EN(id) ((id == ID_MT7531 || \
@@ -76,19 +78,22 @@ enum mt753x_id {
#define MT753X_MIRROR_PORT_MASK(id) ((id == ID_MT7531 || \
id == ID_MT7988 || \
- id == ID_EN7581) ? \
+ id == ID_EN7581 || \
+ id == ID_AN7583) ? \
MT7531_MIRROR_PORT_MASK : \
MT7530_MIRROR_PORT_MASK)
#define MT753X_MIRROR_PORT_GET(id, val) ((id == ID_MT7531 || \
id == ID_MT7988 || \
- id == ID_EN7581) ? \
+ id == ID_EN7581 || \
+ id == ID_AN7583) ? \
MT7531_MIRROR_PORT_GET(val) : \
MT7530_MIRROR_PORT_GET(val))
#define MT753X_MIRROR_PORT_SET(id, val) ((id == ID_MT7531 || \
id == ID_MT7988 || \
- id == ID_EN7581) ? \
+ id == ID_EN7581 || \
+ id == ID_AN7583) ? \
MT7531_MIRROR_PORT_SET(val) : \
MT7530_MIRROR_PORT_SET(val))
@@ -423,6 +428,48 @@ enum mt7530_vlan_port_acc_frm {
/* Register for MIB */
#define MT7530_PORT_MIB_COUNTER(x) (0x4000 + (x) * 0x100)
+/* Each define is an offset of MT7530_PORT_MIB_COUNTER */
+#define MT7530_PORT_MIB_TX_DROP 0x00
+#define MT7530_PORT_MIB_TX_CRC_ERR 0x04
+#define MT7530_PORT_MIB_TX_UNICAST 0x08
+#define MT7530_PORT_MIB_TX_MULTICAST 0x0c
+#define MT7530_PORT_MIB_TX_BROADCAST 0x10
+#define MT7530_PORT_MIB_TX_COLLISION 0x14
+#define MT7530_PORT_MIB_TX_SINGLE_COLLISION 0x18
+#define MT7530_PORT_MIB_TX_MULTIPLE_COLLISION 0x1c
+#define MT7530_PORT_MIB_TX_DEFERRED 0x20
+#define MT7530_PORT_MIB_TX_LATE_COLLISION 0x24
+#define MT7530_PORT_MIB_TX_EXCESSIVE_COLLISION 0x28
+#define MT7530_PORT_MIB_TX_PAUSE 0x2c
+#define MT7530_PORT_MIB_TX_PKT_SZ_64 0x30
+#define MT7530_PORT_MIB_TX_PKT_SZ_65_TO_127 0x34
+#define MT7530_PORT_MIB_TX_PKT_SZ_128_TO_255 0x38
+#define MT7530_PORT_MIB_TX_PKT_SZ_256_TO_511 0x3c
+#define MT7530_PORT_MIB_TX_PKT_SZ_512_TO_1023 0x40
+#define MT7530_PORT_MIB_TX_PKT_SZ_1024_TO_MAX 0x44
+#define MT7530_PORT_MIB_TX_BYTES 0x48 /* 64 bytes */
+#define MT7530_PORT_MIB_RX_DROP 0x60
+#define MT7530_PORT_MIB_RX_FILTERING 0x64
+#define MT7530_PORT_MIB_RX_UNICAST 0x68
+#define MT7530_PORT_MIB_RX_MULTICAST 0x6c
+#define MT7530_PORT_MIB_RX_BROADCAST 0x70
+#define MT7530_PORT_MIB_RX_ALIGN_ERR 0x74
+#define MT7530_PORT_MIB_RX_CRC_ERR 0x78
+#define MT7530_PORT_MIB_RX_UNDER_SIZE_ERR 0x7c
+#define MT7530_PORT_MIB_RX_FRAG_ERR 0x80
+#define MT7530_PORT_MIB_RX_OVER_SZ_ERR 0x84
+#define MT7530_PORT_MIB_RX_JABBER_ERR 0x88
+#define MT7530_PORT_MIB_RX_PAUSE 0x8c
+#define MT7530_PORT_MIB_RX_PKT_SZ_64 0x90
+#define MT7530_PORT_MIB_RX_PKT_SZ_65_TO_127 0x94
+#define MT7530_PORT_MIB_RX_PKT_SZ_128_TO_255 0x98
+#define MT7530_PORT_MIB_RX_PKT_SZ_256_TO_511 0x9c
+#define MT7530_PORT_MIB_RX_PKT_SZ_512_TO_1023 0xa0
+#define MT7530_PORT_MIB_RX_PKT_SZ_1024_TO_MAX 0xa4
+#define MT7530_PORT_MIB_RX_BYTES 0xa8 /* 64 bytes */
+#define MT7530_PORT_MIB_RX_CTRL_DROP 0xb0
+#define MT7530_PORT_MIB_RX_INGRESS_DROP 0xb4
+#define MT7530_PORT_MIB_RX_ARL_DROP 0xb8
#define MT7530_MIB_CCR 0x4fe0
#define CCR_MIB_ENABLE BIT(31)
#define CCR_RX_OCT_CNT_GOOD BIT(7)
@@ -627,6 +674,15 @@ enum mt7531_xtal_fsel {
#define MT7531_GPIO12_RG_RXD3_MASK GENMASK(19, 16)
#define MT7531_EXT_P_MDIO_12 (2 << 16)
+#define MT753X_CPORT_SPTAG_CFG 0x7c10
+#define CPORT_SW2FE_STAG_EN BIT(1)
+#define CPORT_FE2SW_STAG_EN BIT(0)
+
+#define AN7583_GEPHY_CONN_CFG 0x7c14
+#define AN7583_CSR_DPHY_CKIN_SEL BIT(31)
+#define AN7583_CSR_PHY_CORE_REG_CLK_SEL BIT(30)
+#define AN7583_CSR_ETHER_AFE_PWD GENMASK(28, 24)
+
/* Registers for LED GPIO control (MT7530 only)
* All registers follow this pattern:
* [ 2: 0] port 0
@@ -815,9 +871,7 @@ struct mt753x_info {
* @p5_mode: Holding the current mode of port 5 of the MT7530 switch
* @p5_sgmii: Flag for distinguishing if port 5 of the MT7531 switch
* has got SGMII
- * @irq: IRQ number of the switch
* @irq_domain: IRQ domain of the switch irq_chip
- * @irq_enable: IRQ enable bits, synced to SYS_INT_EN
* @create_sgmii: Pointer to function creating SGMII PCS instance(s)
* @active_cpu_ports: Holding the active CPU ports
* @mdiodev: The pointer to the MDIO device structure
@@ -842,9 +896,7 @@ struct mt7530_priv {
struct mt753x_pcs pcs[MT7530_NUM_PORTS];
/* protect among processes for registers access*/
struct mutex reg_mutex;
- int irq;
struct irq_domain *irq_domain;
- u32 irq_enable;
int (*create_sgmii)(struct mt7530_priv *priv);
u8 active_cpu_ports;
struct mdio_device *mdiodev;
@@ -887,7 +939,6 @@ static inline void INIT_MT7530_DUMMY_POLL(struct mt7530_dummy_poll *p,
int mt7530_probe_common(struct mt7530_priv *priv);
void mt7530_remove_common(struct mt7530_priv *priv);
-extern const struct dsa_switch_ops mt7530_switch_ops;
extern const struct mt753x_info mt753x_table[];
#endif /* __MT7530_H */
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 294312b58e4f..9c8ac14cd4f5 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -297,6 +297,8 @@ static const struct dsa_switch_ops mv88e6060_switch_ops = {
.phy_read = mv88e6060_phy_read,
.phy_write = mv88e6060_phy_write,
.phylink_get_caps = mv88e6060_phylink_get_caps,
+ .port_hsr_join = dsa_port_simple_hsr_join,
+ .port_hsr_leave = dsa_port_simple_hsr_leave,
};
static int mv88e6060_probe(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 3a792f79270d..b4d48997bf46 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -297,7 +297,7 @@ static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip)
u16 reg, mask;
chip->g1_irq.nirqs = chip->info->g1_irqs;
- chip->g1_irq.domain = irq_domain_add_simple(
+ chip->g1_irq.domain = irq_domain_create_simple(
NULL, chip->g1_irq.nirqs, 0,
&mv88e6xxx_g1_irq_domain_ops, chip);
if (!chip->g1_irq.domain)
@@ -394,7 +394,7 @@ static int mv88e6xxx_irq_poll_setup(struct mv88e6xxx_chip *chip)
kthread_init_delayed_work(&chip->irq_poll_work,
mv88e6xxx_irq_poll);
- chip->kworker = kthread_create_worker(0, "%s", dev_name(chip->dev));
+ chip->kworker = kthread_run_worker(0, "%s", dev_name(chip->dev));
if (IS_ERR(chip->kworker))
return PTR_ERR(chip->kworker);
@@ -1289,9 +1289,6 @@ static size_t mv88e6095_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
const struct mv88e6xxx_hw_stat *stat,
uint64_t *data)
{
- if (!(stat->type & (STATS_TYPE_BANK0 | STATS_TYPE_PORT)))
- return 0;
-
*data = _mv88e6xxx_get_ethtool_stat(chip, stat, port, 0,
MV88E6XXX_G1_STATS_OP_HIST_RX);
return 1;
@@ -1301,9 +1298,6 @@ static size_t mv88e6250_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
const struct mv88e6xxx_hw_stat *stat,
uint64_t *data)
{
- if (!(stat->type & STATS_TYPE_BANK0))
- return 0;
-
*data = _mv88e6xxx_get_ethtool_stat(chip, stat, port, 0,
MV88E6XXX_G1_STATS_OP_HIST_RX);
return 1;
@@ -1313,9 +1307,6 @@ static size_t mv88e6320_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
const struct mv88e6xxx_hw_stat *stat,
uint64_t *data)
{
- if (!(stat->type & (STATS_TYPE_BANK0 | STATS_TYPE_BANK1)))
- return 0;
-
*data = _mv88e6xxx_get_ethtool_stat(chip, stat, port,
MV88E6XXX_G1_STATS_OP_BANK_1_BIT_9,
MV88E6XXX_G1_STATS_OP_HIST_RX);
@@ -1326,9 +1317,6 @@ static size_t mv88e6390_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
const struct mv88e6xxx_hw_stat *stat,
uint64_t *data)
{
- if (!(stat->type & (STATS_TYPE_BANK0 | STATS_TYPE_BANK1)))
- return 0;
-
*data = _mv88e6xxx_get_ethtool_stat(chip, stat, port,
MV88E6XXX_G1_STATS_OP_BANK_1_BIT_10,
0);
@@ -1341,6 +1329,9 @@ static size_t mv88e6xxx_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
{
int ret = 0;
+ if (!(stat->type & chip->info->stats_type))
+ return 0;
+
if (chip->info->ops->stats_get_stat) {
mv88e6xxx_reg_lock(chip);
ret = chip->info->ops->stats_get_stat(chip, port, stat, data);
@@ -1522,13 +1513,6 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
mv88e6xxx_reg_unlock(chip);
}
-static int mv88e6xxx_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_keee *e)
-{
- /* Nothing to do on the port's MAC */
- return 0;
-}
-
static int mv88e6xxx_set_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_keee *e)
{
@@ -1868,6 +1852,8 @@ static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
if (!chip->info->ops->vtu_getnext)
return -EOPNOTSUPP;
+ memset(entry, 0, sizeof(*entry));
+
entry->vid = vid ? vid - 1 : mv88e6xxx_max_vid(chip);
entry->valid = false;
@@ -1976,7 +1962,16 @@ static int mv88e6xxx_mst_put(struct mv88e6xxx_chip *chip, u8 sid)
struct mv88e6xxx_mst *mst, *tmp;
int err;
- if (!sid)
+ /* If the SID is zero, it is for a VLAN mapped to the default MSTI,
+ * and mv88e6xxx_stu_setup() made sure it is always present, and thus,
+ * should not be removed here.
+ *
+ * If the chip lacks STU support, numerically the "sid" variable will
+ * happen to also be zero, but we don't want to rely on that fact, so
+ * we explicitly test that first. In that case, there is also nothing
+ * to do here.
+ */
+ if (!mv88e6xxx_has_stu(chip) || !sid)
return 0;
list_for_each_entry_safe(mst, tmp, &chip->msts, node) {
@@ -2224,13 +2219,11 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
return err;
}
-static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
- const unsigned char *addr, u16 vid,
- u8 state)
+static int mv88e6xxx_port_db_get(struct mv88e6xxx_chip *chip,
+ const unsigned char *addr, u16 vid,
+ u16 *fid, struct mv88e6xxx_atu_entry *entry)
{
- struct mv88e6xxx_atu_entry entry;
struct mv88e6xxx_vtu_entry vlan;
- u16 fid;
int err;
/* Ports have two private address databases: one for when the port is
@@ -2241,7 +2234,7 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
* VLAN ID into the port's database used for VLAN-unaware bridging.
*/
if (vid == 0) {
- fid = MV88E6XXX_FID_BRIDGED;
+ *fid = MV88E6XXX_FID_BRIDGED;
} else {
err = mv88e6xxx_vtu_get(chip, vid, &vlan);
if (err)
@@ -2251,14 +2244,39 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
if (!vlan.valid)
return -EOPNOTSUPP;
- fid = vlan.fid;
+ *fid = vlan.fid;
}
- entry.state = 0;
- ether_addr_copy(entry.mac, addr);
- eth_addr_dec(entry.mac);
+ entry->state = 0;
+ ether_addr_copy(entry->mac, addr);
+ eth_addr_dec(entry->mac);
+
+ return mv88e6xxx_g1_atu_getnext(chip, *fid, entry);
+}
- err = mv88e6xxx_g1_atu_getnext(chip, fid, &entry);
+static bool mv88e6xxx_port_db_find(struct mv88e6xxx_chip *chip,
+ const unsigned char *addr, u16 vid)
+{
+ struct mv88e6xxx_atu_entry entry;
+ u16 fid;
+ int err;
+
+ err = mv88e6xxx_port_db_get(chip, addr, vid, &fid, &entry);
+ if (err)
+ return false;
+
+ return entry.state && ether_addr_equal(entry.mac, addr);
+}
+
+static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
+ const unsigned char *addr, u16 vid,
+ u8 state)
+{
+ struct mv88e6xxx_atu_entry entry;
+ u16 fid;
+ int err;
+
+ err = mv88e6xxx_port_db_get(chip, addr, vid, &fid, &entry);
if (err)
return err;
@@ -2862,6 +2880,13 @@ static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid,
MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC);
+ if (err)
+ goto out;
+
+ if (!mv88e6xxx_port_db_find(chip, addr, vid))
+ err = -ENOSPC;
+
+out:
mv88e6xxx_reg_unlock(chip);
return err;
@@ -3660,6 +3685,21 @@ static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_stats_clear(chip);
}
+static int mv88e6320_setup_errata(struct mv88e6xxx_chip *chip)
+{
+ u16 dummy;
+ int err;
+
+ /* Workaround for erratum
+ * 3.3 RGMII timing may be out of spec when transmit delay is enabled
+ */
+ err = mv88e6xxx_port_hidden_write(chip, 0, 0xf, 0x7, 0xe000);
+ if (err)
+ return err;
+
+ return mv88e6xxx_port_hidden_read(chip, 0, 0xf, 0x7, &dummy);
+}
+
/* Check if the errata has already been applied. */
static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip)
{
@@ -3925,6 +3965,8 @@ static void mv88e6xxx_teardown(struct dsa_switch *ds)
mv88e6xxx_teardown_devlink_params(ds);
dsa_devlink_resources_unregister(ds);
mv88e6xxx_teardown_devlink_regions_global(ds);
+ mv88e6xxx_hwtstamp_free(chip);
+ mv88e6xxx_ptp_free(chip);
mv88e6xxx_mdios_unregister(chip);
}
@@ -4065,7 +4107,7 @@ unlock:
mv88e6xxx_reg_unlock(chip);
if (err)
- goto out_mdios;
+ goto out_hwtstamp;
/* Have to be called without holding the register lock, since
* they take the devlink lock, and we later take the locks in
@@ -4074,7 +4116,7 @@ unlock:
*/
err = mv88e6xxx_setup_devlink_resources(ds);
if (err)
- goto out_mdios;
+ goto out_hwtstamp;
err = mv88e6xxx_setup_devlink_params(ds);
if (err)
@@ -4090,7 +4132,9 @@ out_params:
mv88e6xxx_teardown_devlink_params(ds);
out_resources:
dsa_devlink_resources_unregister(ds);
-out_mdios:
+out_hwtstamp:
+ mv88e6xxx_hwtstamp_free(chip);
+ mv88e6xxx_ptp_free(chip);
mv88e6xxx_mdios_unregister(chip);
return err;
@@ -5048,7 +5092,7 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
.avb_ops = &mv88e6352_avb_ops,
- .ptp_ops = &mv88e6250_ptp_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
.phylink_get_caps = mv88e6250_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -5116,6 +5160,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
static const struct mv88e6xxx_ops mv88e6320_ops = {
/* MV88E6XXX_FAMILY_6320 */
+ .setup_errata = mv88e6320_setup_errata,
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
@@ -5131,6 +5176,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -5155,8 +5201,10 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
- .vtu_getnext = mv88e6185_g1_vtu_getnext,
- .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -5165,6 +5213,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
static const struct mv88e6xxx_ops mv88e6321_ops = {
/* MV88E6XXX_FAMILY_6320 */
+ .setup_errata = mv88e6320_setup_errata,
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
@@ -5180,6 +5229,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -5203,8 +5253,10 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
- .vtu_getnext = mv88e6185_g1_vtu_getnext,
- .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -5645,6 +5697,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 5,
+ .stats_type = STATS_TYPE_BANK0,
.atu_move_port_mask = 0xf,
.dual_chip = true,
.ops = &mv88e6250_ops,
@@ -5665,6 +5718,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 5,
+ .stats_type = STATS_TYPE_BANK0,
.atu_move_port_mask = 0xf,
.dual_chip = true,
.ops = &mv88e6250_ops,
@@ -5687,6 +5741,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 8,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5708,6 +5763,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 8,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.multi_chip = true,
.ops = &mv88e6095_ops,
@@ -5730,6 +5786,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 8,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5754,6 +5811,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5776,6 +5834,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.multi_chip = true,
.ops = &mv88e6131_ops,
@@ -5797,9 +5856,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
- .atu_move_port_mask = 0x1f,
+ .atu_move_port_mask = 0xf,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
@@ -5823,6 +5883,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5848,6 +5909,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5872,6 +5934,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5897,6 +5960,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5921,6 +5985,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5946,6 +6011,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5968,6 +6034,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 8,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
@@ -5992,6 +6059,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.pvt = true,
.multi_chip = true,
.atu_move_port_mask = 0x1f,
@@ -6016,6 +6084,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6039,6 +6108,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6063,6 +6133,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 10,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6087,6 +6158,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 10,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6114,6 +6186,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0,
.atu_move_port_mask = 0xf,
.dual_chip = true,
.ptp_support = true,
@@ -6138,6 +6211,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -6161,6 +6235,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0,
.atu_move_port_mask = 0xf,
.dual_chip = true,
.ptp_support = true,
@@ -6184,6 +6259,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6198,9 +6274,11 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_macs = 8192,
.num_ports = 7,
- .num_internal_phys = 5,
+ .num_internal_phys = 2,
+ .internal_phys_offset = 3,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -6208,6 +6286,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 8,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -6223,9 +6302,11 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_macs = 8192,
.num_ports = 7,
- .num_internal_phys = 5,
+ .num_internal_phys = 2,
+ .internal_phys_offset = 3,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -6233,7 +6314,9 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 8,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0xf,
+ .pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ptp_support = true,
@@ -6256,9 +6339,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
- .atu_move_port_mask = 0x1f,
+ .atu_move_port_mask = 0xf,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
@@ -6283,6 +6367,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -6307,6 +6392,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -6332,6 +6418,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -6359,6 +6446,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 10,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6383,6 +6471,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6408,6 +6497,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6433,6 +6523,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 10,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6596,6 +6687,13 @@ static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid,
MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC);
+ if (err)
+ goto out;
+
+ if (!mv88e6xxx_port_db_find(chip, mdb->addr, mdb->vid))
+ err = -ENOSPC;
+
+out:
mv88e6xxx_reg_unlock(chip);
return err;
@@ -7074,7 +7172,7 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.get_sset_count = mv88e6xxx_get_sset_count,
.port_max_mtu = mv88e6xxx_get_max_mtu,
.port_change_mtu = mv88e6xxx_change_mtu,
- .get_mac_eee = mv88e6xxx_get_mac_eee,
+ .support_eee = dsa_supports_eee,
.set_mac_eee = mv88e6xxx_set_mac_eee,
.get_eeprom_len = mv88e6xxx_get_eeprom_len,
.get_eeprom = mv88e6xxx_get_eeprom,
@@ -7267,13 +7365,13 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
err = mv88e6xxx_switch_reset(chip);
mv88e6xxx_reg_unlock(chip);
if (err)
- goto out;
+ goto out_phy;
if (np) {
chip->irq = of_irq_get(np, 0);
if (chip->irq == -EPROBE_DEFER) {
err = chip->irq;
- goto out;
+ goto out_phy;
}
}
@@ -7292,7 +7390,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
mv88e6xxx_reg_unlock(chip);
if (err)
- goto out;
+ goto out_phy;
if (chip->info->g2_irqs > 0) {
err = mv88e6xxx_g2_irq_setup(chip);
@@ -7326,6 +7424,8 @@ out_g1_irq:
mv88e6xxx_g1_irq_free(chip);
else
mv88e6xxx_irq_poll_free(chip);
+out_phy:
+ mv88e6xxx_phy_destroy(chip);
out:
if (pdata)
dev_put(pdata->netdev);
@@ -7343,12 +7443,6 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
chip = ds->priv;
- if (chip->info->ptp_support) {
- mv88e6xxx_hwtstamp_free(chip);
- mv88e6xxx_ptp_free(chip);
- }
-
- mv88e6xxx_phy_destroy(chip);
mv88e6xxx_unregister_switch(chip);
mv88e6xxx_g1_vtu_prob_irq_free(chip);
@@ -7361,6 +7455,8 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
mv88e6xxx_g1_irq_free(chip);
else
mv88e6xxx_irq_poll_free(chip);
+
+ mv88e6xxx_phy_destroy(chip);
}
static void mv88e6xxx_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 9fe8e8a7856b..2f211e55cb47 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -144,6 +144,7 @@ struct mv88e6xxx_info {
unsigned int age_time_coeff;
unsigned int g1_irqs;
unsigned int g2_irqs;
+ int stats_type;
bool pvt;
/* Mark certain ports as invalid. This is required for example for the
@@ -240,7 +241,7 @@ struct mv88e6xxx_port_hwtstamp {
u16 tx_seq_id;
/* Current timestamp configuration */
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
};
enum mv88e6xxx_policy_mapping {
@@ -423,8 +424,6 @@ struct mv88e6xxx_chip {
struct ptp_clock_info ptp_clock_info;
struct delayed_work tai_event_work;
struct ptp_pin_desc pin_config[MV88E6XXX_MAX_GPIO];
- u16 trig_config;
- u16 evcap_config;
u16 enable_count;
/* Current ingress and egress monitor ports */
@@ -731,7 +730,7 @@ struct mv88e6xxx_avb_ops {
};
struct mv88e6xxx_ptp_ops {
- u64 (*clock_read)(const struct cyclecounter *cc);
+ u64 (*clock_read)(struct cyclecounter *cc);
int (*ptp_enable)(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on);
int (*ptp_verify)(struct ptp_clock_info *ptp, unsigned int pin,
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
index 795c8df7b6a7..da69e0b85879 100644
--- a/drivers/net/dsa/mv88e6xxx/devlink.c
+++ b/drivers/net/dsa/mv88e6xxx/devlink.c
@@ -376,19 +376,14 @@ static int mv88e6xxx_region_atu_snapshot(struct devlink *dl,
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
struct mv88e6xxx_devlink_atu_entry *table;
struct mv88e6xxx_chip *chip = ds->priv;
- int fid = -1, err = 0, count;
+ int fid = -1, err = 0, count = 0;
- table = kmalloc_array(mv88e6xxx_num_databases(chip),
- sizeof(struct mv88e6xxx_devlink_atu_entry),
- GFP_KERNEL);
+ table = kcalloc(mv88e6xxx_num_databases(chip),
+ sizeof(struct mv88e6xxx_devlink_atu_entry),
+ GFP_KERNEL);
if (!table)
return -ENOMEM;
- memset(table, 0, mv88e6xxx_num_databases(chip) *
- sizeof(struct mv88e6xxx_devlink_atu_entry));
-
- count = 0;
-
mv88e6xxx_reg_lock(chip);
while (1) {
@@ -647,7 +642,7 @@ static struct mv88e6xxx_region_priv mv88e6xxx_region_global1_priv = {
.id = MV88E6XXX_REGION_GLOBAL1,
};
-static struct devlink_region_ops mv88e6xxx_region_global1_ops = {
+static const struct devlink_region_ops mv88e6xxx_region_global1_ops = {
.name = "global1",
.snapshot = mv88e6xxx_region_global_snapshot,
.destructor = kfree,
@@ -658,32 +653,32 @@ static struct mv88e6xxx_region_priv mv88e6xxx_region_global2_priv = {
.id = MV88E6XXX_REGION_GLOBAL2,
};
-static struct devlink_region_ops mv88e6xxx_region_global2_ops = {
+static const struct devlink_region_ops mv88e6xxx_region_global2_ops = {
.name = "global2",
.snapshot = mv88e6xxx_region_global_snapshot,
.destructor = kfree,
.priv = &mv88e6xxx_region_global2_priv,
};
-static struct devlink_region_ops mv88e6xxx_region_atu_ops = {
+static const struct devlink_region_ops mv88e6xxx_region_atu_ops = {
.name = "atu",
.snapshot = mv88e6xxx_region_atu_snapshot,
.destructor = kfree,
};
-static struct devlink_region_ops mv88e6xxx_region_vtu_ops = {
+static const struct devlink_region_ops mv88e6xxx_region_vtu_ops = {
.name = "vtu",
.snapshot = mv88e6xxx_region_vtu_snapshot,
.destructor = kfree,
};
-static struct devlink_region_ops mv88e6xxx_region_stu_ops = {
+static const struct devlink_region_ops mv88e6xxx_region_stu_ops = {
.name = "stu",
.snapshot = mv88e6xxx_region_stu_snapshot,
.destructor = kfree,
};
-static struct devlink_region_ops mv88e6xxx_region_pvt_ops = {
+static const struct devlink_region_ops mv88e6xxx_region_pvt_ops = {
.name = "pvt",
.snapshot = mv88e6xxx_region_pvt_snapshot,
.destructor = kfree,
@@ -696,13 +691,13 @@ static const struct devlink_port_region_ops mv88e6xxx_region_port_ops = {
};
struct mv88e6xxx_region {
- struct devlink_region_ops *ops;
+ const struct devlink_region_ops *ops;
u64 size;
bool (*cond)(struct mv88e6xxx_chip *chip);
};
-static struct mv88e6xxx_region mv88e6xxx_regions[] = {
+static const struct mv88e6xxx_region mv88e6xxx_regions[] = {
[MV88E6XXX_REGION_GLOBAL1] = {
.ops = &mv88e6xxx_region_global1_ops,
.size = 32 * sizeof(u16)
@@ -736,7 +731,8 @@ void mv88e6xxx_teardown_devlink_regions_global(struct dsa_switch *ds)
int i;
for (i = 0; i < ARRAY_SIZE(mv88e6xxx_regions); i++)
- dsa_devlink_region_destroy(chip->regions[i]);
+ if (chip->regions[i])
+ dsa_devlink_region_destroy(chip->regions[i]);
}
void mv88e6xxx_teardown_devlink_regions_port(struct dsa_switch *ds, int port)
@@ -767,7 +763,7 @@ int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds)
{
bool (*cond)(struct mv88e6xxx_chip *chip);
struct mv88e6xxx_chip *chip = ds->priv;
- struct devlink_region_ops *ops;
+ const struct devlink_region_ops *ops;
struct devlink_region *region;
u64 size;
int i, j;
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index b2b5f6ba438f..30a6ffa7817b 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -1154,8 +1154,8 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
if (err)
return err;
- chip->g2_irq.domain = irq_domain_add_simple(
- chip->dev->of_node, 16, 0, &mv88e6xxx_g2_irq_domain_ops, chip);
+ chip->g2_irq.domain = irq_domain_create_simple(dev_fwnode(chip->dev), 16, 0,
+ &mv88e6xxx_g2_irq_domain_ops, chip);
if (!chip->g2_irq.domain)
return -ENOMEM;
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
index 49e6e1355142..6e6472a3b75a 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
@@ -89,7 +89,7 @@ int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
}
static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
@@ -169,42 +169,38 @@ static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port,
}
int mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
- struct hwtstamp_config config;
int err;
if (!chip->info->ptp_support)
return -EOPNOTSUPP;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = mv88e6xxx_set_hwtstamp_config(chip, port, &config);
+ err = mv88e6xxx_set_hwtstamp_config(chip, port, config);
if (err)
return err;
/* Save the chosen configuration to be returned later. */
- memcpy(&ps->tstamp_config, &config, sizeof(config));
+ ps->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
- struct hwtstamp_config *config = &ps->tstamp_config;
if (!chip->info->ptp_support)
return -EOPNOTSUPP;
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
+ *config = ps->tstamp_config;
+
+ return 0;
}
/* Returns a pointer to the PTP header if the caller should time stamp,
@@ -574,7 +570,7 @@ int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip)
}
/* Set the ethertype of L2 PTP messages */
- err = mv88e6xxx_ptp_write(chip, MV88E6XXX_PTP_GC_ETYPE, ETH_P_1588);
+ err = mv88e6xxx_ptp_write(chip, MV88E6XXX_PTP_ETHERTYPE, ETH_P_1588);
if (err)
return err;
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.h b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
index 85acc758e3eb..c359821d5a6e 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.h
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
@@ -111,9 +111,10 @@
#ifdef CONFIG_NET_DSA_MV88E6XXX_PTP
int mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds, int port,
- struct ifreq *ifr);
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds, int port,
- struct ifreq *ifr);
+ struct kernel_hwtstamp_config *cfg);
bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port,
struct sk_buff *clone, unsigned int type);
@@ -123,6 +124,7 @@ void mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
struct kernel_ethtool_ts_info *info);
+long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp);
int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_hwtstamp_free(struct mv88e6xxx_chip *chip);
int mv88e6352_hwtstamp_port_enable(struct mv88e6xxx_chip *chip, int port);
@@ -132,14 +134,17 @@ int mv88e6165_global_disable(struct mv88e6xxx_chip *chip);
#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */
-static inline int mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds,
- int port, struct ifreq *ifr)
+static inline int
+mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
-static inline int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds,
- int port, struct ifreq *ifr)
+static inline int
+mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/dsa/mv88e6xxx/leds.c b/drivers/net/dsa/mv88e6xxx/leds.c
index 1c88bfaea46b..ab3bc645da56 100644
--- a/drivers/net/dsa/mv88e6xxx/leds.c
+++ b/drivers/net/dsa/mv88e6xxx/leds.c
@@ -779,7 +779,8 @@ int mv88e6xxx_port_setup_leds(struct mv88e6xxx_chip *chip, int port)
continue;
if (led_num > 1) {
dev_err(dev, "invalid LED specified port %d\n", port);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_put_led;
}
if (led_num == 0)
@@ -823,17 +824,25 @@ int mv88e6xxx_port_setup_leds(struct mv88e6xxx_chip *chip, int port)
init_data.devname_mandatory = true;
init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d:0%d", chip->info->name,
port, led_num);
- if (!init_data.devicename)
- return -ENOMEM;
+ if (!init_data.devicename) {
+ ret = -ENOMEM;
+ goto err_put_led;
+ }
ret = devm_led_classdev_register_ext(dev, l, &init_data);
kfree(init_data.devicename);
if (ret) {
dev_err(dev, "Failed to init LED %d for port %d", led_num, port);
- return ret;
+ goto err_put_led;
}
}
+ fwnode_handle_put(leds);
return 0;
+
+err_put_led:
+ fwnode_handle_put(led);
+ fwnode_handle_put(leds);
+ return ret;
}
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-6185.c b/drivers/net/dsa/mv88e6xxx/pcs-6185.c
index 5a27d047a38e..af7e06d265f7 100644
--- a/drivers/net/dsa/mv88e6xxx/pcs-6185.c
+++ b/drivers/net/dsa/mv88e6xxx/pcs-6185.c
@@ -55,6 +55,7 @@ static irqreturn_t mv88e6185_pcs_handle_irq(int irq, void *dev_id)
}
static void mv88e6185_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mv88e6185_pcs *mpcs = pcs_to_mv88e6185_pcs(pcs);
@@ -137,7 +138,6 @@ static int mv88e6185_pcs_init(struct mv88e6xxx_chip *chip, int port)
mpcs->chip = chip;
mpcs->port = port;
mpcs->phylink_pcs.ops = &mv88e6185_phylink_pcs_ops;
- mpcs->phylink_pcs.neg_mode = true;
irq = mv88e6xxx_serdes_irq_mapping(chip, port);
if (irq) {
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-6352.c b/drivers/net/dsa/mv88e6xxx/pcs-6352.c
index 88f624b65470..36993400837e 100644
--- a/drivers/net/dsa/mv88e6xxx/pcs-6352.c
+++ b/drivers/net/dsa/mv88e6xxx/pcs-6352.c
@@ -158,6 +158,7 @@ static void marvell_c22_pcs_disable(struct phylink_pcs *pcs)
}
static void marvell_c22_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs);
@@ -274,7 +275,6 @@ static struct marvell_c22_pcs *marvell_c22_pcs_alloc(struct device *dev,
mpcs->mdio.bus = bus;
mpcs->mdio.addr = addr;
mpcs->phylink_pcs.ops = &marvell_c22_pcs_ops;
- mpcs->phylink_pcs.neg_mode = true;
return mpcs;
}
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-639x.c b/drivers/net/dsa/mv88e6xxx/pcs-639x.c
index d758a6c1b226..5db17c0b77f5 100644
--- a/drivers/net/dsa/mv88e6xxx/pcs-639x.c
+++ b/drivers/net/dsa/mv88e6xxx/pcs-639x.c
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/mii.h>
+#include <linux/string_choices.h>
#include "chip.h"
#include "global2.h"
@@ -257,6 +258,7 @@ static int mv88e639x_sgmii_pcs_post_config(struct phylink_pcs *pcs,
}
static void mv88e639x_sgmii_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
@@ -395,6 +397,7 @@ static void mv88e639x_xg_pcs_disable(struct mv88e639x_pcs *mpcs)
}
static void mv88e639x_xg_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
@@ -562,9 +565,7 @@ static int mv88e6390_pcs_init(struct mv88e6xxx_chip *chip, int port)
return -ENOMEM;
mpcs->sgmii_pcs.ops = &mv88e639x_sgmii_pcs_ops;
- mpcs->sgmii_pcs.neg_mode = true;
mpcs->xg_pcs.ops = &mv88e6390_xg_pcs_ops;
- mpcs->xg_pcs.neg_mode = true;
if (chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6190X ||
chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6390X)
@@ -748,7 +749,7 @@ static int mv88e6393x_sgmii_apply_2500basex_an(struct mv88e639x_pcs *mpcs,
if (err)
dev_err(mpcs->mdio.dev.parent,
"failed to %s 2500basex fix: %pe\n",
- enable ? "enable" : "disable", ERR_PTR(err));
+ str_enable_disable(enable), ERR_PTR(err));
return err;
}
@@ -889,6 +890,7 @@ static int mv88e6393x_xg_pcs_post_config(struct phylink_pcs *pcs,
}
static void mv88e6393x_xg_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
@@ -896,7 +898,7 @@ static void mv88e6393x_xg_pcs_get_state(struct phylink_pcs *pcs,
int err;
if (state->interface != PHY_INTERFACE_MODE_USXGMII)
- return mv88e639x_xg_pcs_get_state(pcs, state);
+ return mv88e639x_xg_pcs_get_state(pcs, neg_mode, state);
state->link = false;
@@ -941,9 +943,7 @@ static int mv88e6393x_pcs_init(struct mv88e6xxx_chip *chip, int port)
return -ENOMEM;
mpcs->sgmii_pcs.ops = &mv88e6393x_sgmii_pcs_ops;
- mpcs->sgmii_pcs.neg_mode = true;
mpcs->xg_pcs.ops = &mv88e6393x_xg_pcs_ops;
- mpcs->xg_pcs.neg_mode = true;
mpcs->supports_5g = true;
err = mv88e6393x_erratum_4_6(mpcs);
diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c
index 8bb88b3d900d..4e7827ee684a 100644
--- a/drivers/net/dsa/mv88e6xxx/phy.c
+++ b/drivers/net/dsa/mv88e6xxx/phy.c
@@ -182,7 +182,7 @@ static void mv88e6xxx_phy_ppu_reenable_work(struct work_struct *ugly)
static void mv88e6xxx_phy_ppu_reenable_timer(struct timer_list *t)
{
- struct mv88e6xxx_chip *chip = from_timer(chip, t, ppu_timer);
+ struct mv88e6xxx_chip *chip = timer_container_of(chip, t, ppu_timer);
schedule_work(&chip->ppu_work);
}
@@ -206,7 +206,7 @@ static int mv88e6xxx_phy_ppu_access_get(struct mv88e6xxx_chip *chip)
}
chip->ppu_disabled = 1;
} else {
- del_timer(&chip->ppu_timer);
+ timer_delete(&chip->ppu_timer);
ret = 0;
}
@@ -229,7 +229,10 @@ static void mv88e6xxx_phy_ppu_state_init(struct mv88e6xxx_chip *chip)
static void mv88e6xxx_phy_ppu_state_destroy(struct mv88e6xxx_chip *chip)
{
- del_timer_sync(&chip->ppu_timer);
+ mutex_lock(&chip->ppu_mutex);
+ timer_delete_sync(&chip->ppu_timer);
+ cancel_work_sync(&chip->ppu_work);
+ mutex_unlock(&chip->ppu_mutex);
}
int mv88e6185_phy_ppu_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index dc777ddce1f3..66b1b7277281 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -13,6 +13,7 @@
#include <linux/phy.h>
#include <linux/phylink.h>
#include <linux/property.h>
+#include <linux/string_choices.h>
#include "chip.h"
#include "global2.h"
@@ -176,7 +177,7 @@ int mv88e6xxx_port_set_link(struct mv88e6xxx_chip *chip, int port, int link)
dev_dbg(chip->dev, "p%d: %s link %s\n", port,
reg & MV88E6XXX_PORT_MAC_CTL_FORCE_LINK ? "Force" : "Unforce",
- reg & MV88E6XXX_PORT_MAC_CTL_LINK_UP ? "up" : "down");
+ str_up_down(reg & MV88E6XXX_PORT_MAC_CTL_LINK_UP));
return 0;
}
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c
index aed4a4b07f34..f7603573d3a9 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.c
+++ b/drivers/net/dsa/mv88e6xxx/ptp.c
@@ -138,13 +138,13 @@ mv88e6xxx_cc_coeff_get(struct mv88e6xxx_chip *chip)
}
}
-static u64 mv88e6352_ptp_clock_read(const struct cyclecounter *cc)
+static u64 mv88e6352_ptp_clock_read(struct cyclecounter *cc)
{
struct mv88e6xxx_chip *chip = cc_to_chip(cc);
u16 phc_time[2];
int err;
- err = mv88e6xxx_tai_read(chip, MV88E6XXX_TAI_TIME_LO, phc_time,
+ err = mv88e6xxx_tai_read(chip, MV88E6352_TAI_TIME_LO, phc_time,
ARRAY_SIZE(phc_time));
if (err)
return 0;
@@ -152,13 +152,13 @@ static u64 mv88e6352_ptp_clock_read(const struct cyclecounter *cc)
return ((u32)phc_time[1] << 16) | phc_time[0];
}
-static u64 mv88e6165_ptp_clock_read(const struct cyclecounter *cc)
+static u64 mv88e6165_ptp_clock_read(struct cyclecounter *cc)
{
struct mv88e6xxx_chip *chip = cc_to_chip(cc);
u16 phc_time[2];
int err;
- err = mv88e6xxx_tai_read(chip, MV88E6XXX_PTP_GC_TIME_LO, phc_time,
+ err = mv88e6xxx_tai_read(chip, MV88E6165_PTP_GC_TIME_LO, phc_time,
ARRAY_SIZE(phc_time));
if (err)
return 0;
@@ -167,42 +167,26 @@ static u64 mv88e6165_ptp_clock_read(const struct cyclecounter *cc)
}
/* mv88e6352_config_eventcap - configure TAI event capture
- * @event: PTP_CLOCK_PPS (internal) or PTP_CLOCK_EXTTS (external)
* @rising: zero for falling-edge trigger, else rising-edge trigger
*
* This will also reset the capture sequence counter.
*/
-static int mv88e6352_config_eventcap(struct mv88e6xxx_chip *chip, int event,
- int rising)
+static int mv88e6352_config_eventcap(struct mv88e6xxx_chip *chip, int rising)
{
- u16 global_config;
- u16 cap_config;
+ u16 evcap_config;
int err;
- chip->evcap_config = MV88E6XXX_TAI_CFG_CAP_OVERWRITE |
- MV88E6XXX_TAI_CFG_CAP_CTR_START;
+ evcap_config = MV88E6352_TAI_CFG_CAP_OVERWRITE |
+ MV88E6352_TAI_CFG_CAP_CTR_START;
if (!rising)
- chip->evcap_config |= MV88E6XXX_TAI_CFG_EVREQ_FALLING;
+ evcap_config |= MV88E6352_TAI_CFG_EVREQ_FALLING;
- global_config = (chip->evcap_config | chip->trig_config);
- err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_CFG, global_config);
+ err = mv88e6xxx_tai_write(chip, MV88E6352_TAI_CFG, evcap_config);
if (err)
return err;
- if (event == PTP_CLOCK_PPS) {
- cap_config = MV88E6XXX_TAI_EVENT_STATUS_CAP_TRIG;
- } else if (event == PTP_CLOCK_EXTTS) {
- /* if STATUS_CAP_TRIG is unset we capture PTP_EVREQ events */
- cap_config = 0;
- } else {
- return -EINVAL;
- }
-
/* Write the capture config; this also clears the capture counter */
- err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_EVENT_STATUS,
- cap_config);
-
- return err;
+ return mv88e6xxx_tai_write(chip, MV88E6352_TAI_EVENT_STATUS, 0);
}
static void mv88e6352_tai_event_work(struct work_struct *ugly)
@@ -215,7 +199,7 @@ static void mv88e6352_tai_event_work(struct work_struct *ugly)
int err;
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_tai_read(chip, MV88E6XXX_TAI_EVENT_STATUS,
+ err = mv88e6xxx_tai_read(chip, MV88E6352_TAI_EVENT_STATUS,
status, ARRAY_SIZE(status));
mv88e6xxx_reg_unlock(chip);
@@ -223,19 +207,19 @@ static void mv88e6352_tai_event_work(struct work_struct *ugly)
dev_err(chip->dev, "failed to read TAI status register\n");
return;
}
- if (status[0] & MV88E6XXX_TAI_EVENT_STATUS_ERROR) {
+ if (status[0] & MV88E6352_TAI_EVENT_STATUS_ERROR) {
dev_warn(chip->dev, "missed event capture\n");
return;
}
- if (!(status[0] & MV88E6XXX_TAI_EVENT_STATUS_VALID))
+ if (!(status[0] & MV88E6352_TAI_EVENT_STATUS_VALID))
goto out;
raw_ts = ((u32)status[2] << 16) | status[1];
/* Clear the valid bit so the next timestamp can come in */
- status[0] &= ~MV88E6XXX_TAI_EVENT_STATUS_VALID;
+ status[0] &= ~MV88E6352_TAI_EVENT_STATUS_VALID;
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_EVENT_STATUS, status[0]);
+ err = mv88e6xxx_tai_write(chip, MV88E6352_TAI_EVENT_STATUS, status[0]);
mv88e6xxx_reg_unlock(chip);
if (err) {
dev_err(chip->dev, "failed to write TAI status register\n");
@@ -332,13 +316,6 @@ static int mv88e6352_ptp_enable_extts(struct mv88e6xxx_chip *chip,
int pin;
int err;
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* Reject requests to enable time stamping on both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
@@ -362,7 +339,7 @@ static int mv88e6352_ptp_enable_extts(struct mv88e6xxx_chip *chip,
schedule_delayed_work(&chip->tai_event_work,
TAI_EVENT_WORK_INTERVAL);
- err = mv88e6352_config_eventcap(chip, PTP_CLOCK_EXTTS, rising);
+ err = mv88e6352_config_eventcap(chip, rising);
} else {
func = MV88E6352_G2_SCRATCH_GPIO_PCTL_GPIO;
@@ -420,29 +397,6 @@ const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
};
-const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {
- .clock_read = mv88e6352_ptp_clock_read,
- .ptp_enable = mv88e6352_ptp_enable,
- .ptp_verify = mv88e6352_ptp_verify,
- .event_work = mv88e6352_tai_event_work,
- .port_enable = mv88e6352_hwtstamp_port_enable,
- .port_disable = mv88e6352_hwtstamp_port_disable,
- .n_ext_ts = 1,
- .arr0_sts_reg = MV88E6XXX_PORT_PTP_ARR0_STS,
- .arr1_sts_reg = MV88E6XXX_PORT_PTP_ARR1_STS,
- .dep_sts_reg = MV88E6XXX_PORT_PTP_DEP_STS,
- .rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
-};
-
const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {
.clock_read = mv88e6352_ptp_clock_read,
.ptp_enable = mv88e6352_ptp_enable,
@@ -490,7 +444,7 @@ const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops = {
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
};
-static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc)
+static u64 mv88e6xxx_ptp_clock_read(struct cyclecounter *cc)
{
struct mv88e6xxx_chip *chip = cc_to_chip(cc);
@@ -566,6 +520,10 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
chip->ptp_clock_info.verify = ptp_ops->ptp_verify;
chip->ptp_clock_info.do_aux_work = mv88e6xxx_hwtstamp_work;
+ chip->ptp_clock_info.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
+
if (ptp_ops->set_ptp_cpu_port) {
struct dsa_port *dp;
int upstream = 0;
@@ -593,6 +551,7 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
return 0;
}
+/* This must never be called holding the register lock */
void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip)
{
if (chip->ptp_clock) {
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.h b/drivers/net/dsa/mv88e6xxx/ptp.h
index 6c4d09adc93c..95bdddb0bf39 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.h
+++ b/drivers/net/dsa/mv88e6xxx/ptp.h
@@ -16,132 +16,56 @@
#include "chip.h"
/* Offset 0x00: TAI Global Config */
-#define MV88E6XXX_TAI_CFG 0x00
-#define MV88E6XXX_TAI_CFG_CAP_OVERWRITE 0x8000
-#define MV88E6XXX_TAI_CFG_CAP_CTR_START 0x4000
-#define MV88E6XXX_TAI_CFG_EVREQ_FALLING 0x2000
-#define MV88E6XXX_TAI_CFG_TRIG_ACTIVE_LO 0x1000
-#define MV88E6XXX_TAI_CFG_IRL_ENABLE 0x0400
-#define MV88E6XXX_TAI_CFG_TRIG_IRQ_EN 0x0200
-#define MV88E6XXX_TAI_CFG_EVREQ_IRQ_EN 0x0100
-#define MV88E6XXX_TAI_CFG_TRIG_LOCK 0x0080
-#define MV88E6XXX_TAI_CFG_BLOCK_UPDATE 0x0008
-#define MV88E6XXX_TAI_CFG_MULTI_PTP 0x0004
-#define MV88E6XXX_TAI_CFG_TRIG_MODE_ONESHOT 0x0002
-#define MV88E6XXX_TAI_CFG_TRIG_ENABLE 0x0001
+#define MV88E6352_TAI_CFG 0x00
+#define MV88E6352_TAI_CFG_CAP_OVERWRITE 0x8000
+#define MV88E6352_TAI_CFG_CAP_CTR_START 0x4000
+#define MV88E6352_TAI_CFG_EVREQ_FALLING 0x2000
+#define MV88E6352_TAI_CFG_TRIG_ACTIVE_LO 0x1000
+#define MV88E6352_TAI_CFG_IRL_ENABLE 0x0400
+#define MV88E6352_TAI_CFG_TRIG_IRQ_EN 0x0200
+#define MV88E6352_TAI_CFG_EVREQ_IRQ_EN 0x0100
+#define MV88E6352_TAI_CFG_TRIG_LOCK 0x0080
+#define MV88E6352_TAI_CFG_BLOCK_UPDATE 0x0008
+#define MV88E6352_TAI_CFG_MULTI_PTP 0x0004
+#define MV88E6352_TAI_CFG_TRIG_MODE_ONESHOT 0x0002
+#define MV88E6352_TAI_CFG_TRIG_ENABLE 0x0001
/* Offset 0x01: Timestamp Clock Period (ps) */
#define MV88E6XXX_TAI_CLOCK_PERIOD 0x01
-/* Offset 0x02/0x03: Trigger Generation Amount */
-#define MV88E6XXX_TAI_TRIG_GEN_AMOUNT_LO 0x02
-#define MV88E6XXX_TAI_TRIG_GEN_AMOUNT_HI 0x03
-
-/* Offset 0x04: Clock Compensation */
-#define MV88E6XXX_TAI_TRIG_CLOCK_COMP 0x04
-
-/* Offset 0x05: Trigger Configuration */
-#define MV88E6XXX_TAI_TRIG_CFG 0x05
-
-/* Offset 0x06: Ingress Rate Limiter Clock Generation Amount */
-#define MV88E6XXX_TAI_IRL_AMOUNT 0x06
-
-/* Offset 0x07: Ingress Rate Limiter Compensation */
-#define MV88E6XXX_TAI_IRL_COMP 0x07
-
-/* Offset 0x08: Ingress Rate Limiter Compensation */
-#define MV88E6XXX_TAI_IRL_COMP_PS 0x08
-
/* Offset 0x09: Event Status */
-#define MV88E6XXX_TAI_EVENT_STATUS 0x09
-#define MV88E6XXX_TAI_EVENT_STATUS_CAP_TRIG 0x4000
-#define MV88E6XXX_TAI_EVENT_STATUS_ERROR 0x0200
-#define MV88E6XXX_TAI_EVENT_STATUS_VALID 0x0100
-#define MV88E6XXX_TAI_EVENT_STATUS_CTR_MASK 0x00ff
-
-/* Offset 0x0A/0x0B: Event Time */
-#define MV88E6XXX_TAI_EVENT_TIME_LO 0x0a
-#define MV88E6XXX_TAI_EVENT_TYPE_HI 0x0b
+#define MV88E6352_TAI_EVENT_STATUS 0x09
+#define MV88E6352_TAI_EVENT_STATUS_ERROR 0x0200
+#define MV88E6352_TAI_EVENT_STATUS_VALID 0x0100
+#define MV88E6352_TAI_EVENT_STATUS_CTR_MASK 0x00ff
+/* Offset 0x0A/0x0B: Event Time Lo/Hi. Always read with Event Status. */
/* Offset 0x0E/0x0F: PTP Global Time */
-#define MV88E6XXX_TAI_TIME_LO 0x0e
-#define MV88E6XXX_TAI_TIME_HI 0x0f
-
-/* Offset 0x10/0x11: Trig Generation Time */
-#define MV88E6XXX_TAI_TRIG_TIME_LO 0x10
-#define MV88E6XXX_TAI_TRIG_TIME_HI 0x11
-
-/* Offset 0x12: Lock Status */
-#define MV88E6XXX_TAI_LOCK_STATUS 0x12
-
-/* Offset 0x00: Ether Type */
-#define MV88E6XXX_PTP_GC_ETYPE 0x00
+#define MV88E6352_TAI_TIME_LO 0x0e
+#define MV88E6352_TAI_TIME_HI 0x0f
/* 6165 Global Control Registers */
-/* Offset 0x00: Ether Type */
-#define MV88E6XXX_PTP_GC_ETYPE 0x00
-
-/* Offset 0x01: Message ID */
-#define MV88E6XXX_PTP_GC_MESSAGE_ID 0x01
-
-/* Offset 0x02: Time Stamp Arrive Time */
-#define MV88E6XXX_PTP_GC_TS_ARR_PTR 0x02
-
-/* Offset 0x03: Port Arrival Interrupt Enable */
-#define MV88E6XXX_PTP_GC_PORT_ARR_INT_EN 0x03
-
-/* Offset 0x04: Port Departure Interrupt Enable */
-#define MV88E6XXX_PTP_GC_PORT_DEP_INT_EN 0x04
-
-/* Offset 0x05: Configuration */
-#define MV88E6XXX_PTP_GC_CONFIG 0x05
-#define MV88E6XXX_PTP_GC_CONFIG_DIS_OVERWRITE BIT(1)
-#define MV88E6XXX_PTP_GC_CONFIG_DIS_TS BIT(0)
-
-/* Offset 0x8: Interrupt Status */
-#define MV88E6XXX_PTP_GC_INT_STATUS 0x08
-
/* Offset 0x9/0xa: Global Time */
-#define MV88E6XXX_PTP_GC_TIME_LO 0x09
-#define MV88E6XXX_PTP_GC_TIME_HI 0x0A
+#define MV88E6165_PTP_GC_TIME_LO 0x09
+#define MV88E6165_PTP_GC_TIME_HI 0x0A
-/* 6165 Per Port Registers */
+/* 6165 Per Port Registers. The arrival and departure registers are a
+ * common block consisting of status, two time registers and the sequence ID
+ */
/* Offset 0: Arrival Time 0 Status */
#define MV88E6165_PORT_PTP_ARR0_STS 0x00
-/* Offset 0x01/0x02: PTP Arrival 0 Time */
-#define MV88E6165_PORT_PTP_ARR0_TIME_LO 0x01
-#define MV88E6165_PORT_PTP_ARR0_TIME_HI 0x02
-
-/* Offset 0x03: PTP Arrival 0 Sequence ID */
-#define MV88E6165_PORT_PTP_ARR0_SEQID 0x03
-
/* Offset 0x04: PTP Arrival 1 Status */
#define MV88E6165_PORT_PTP_ARR1_STS 0x04
-/* Offset 0x05/0x6E: PTP Arrival 1 Time */
-#define MV88E6165_PORT_PTP_ARR1_TIME_LO 0x05
-#define MV88E6165_PORT_PTP_ARR1_TIME_HI 0x06
-
-/* Offset 0x07: PTP Arrival 1 Sequence ID */
-#define MV88E6165_PORT_PTP_ARR1_SEQID 0x07
-
/* Offset 0x08: PTP Departure Status */
#define MV88E6165_PORT_PTP_DEP_STS 0x08
-/* Offset 0x09/0x0a: PTP Deperture Time */
-#define MV88E6165_PORT_PTP_DEP_TIME_LO 0x09
-#define MV88E6165_PORT_PTP_DEP_TIME_HI 0x0a
-
-/* Offset 0x0b: PTP Departure Sequence ID */
-#define MV88E6165_PORT_PTP_DEP_SEQID 0x0b
-
/* Offset 0x0d: Port Status */
#define MV88E6164_PORT_STATUS 0x0d
#ifdef CONFIG_NET_DSA_MV88E6XXX_PTP
-long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp);
int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip);
@@ -149,17 +73,11 @@ void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip);
ptp_clock_info)
extern const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops;
-extern const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops;
extern const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops;
extern const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops;
#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */
-static inline long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp)
-{
- return -1;
-}
-
static inline int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
{
return 0;
@@ -170,7 +88,6 @@ static inline void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip)
}
static const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {};
-static const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {};
static const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {};
static const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops = {};
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 3aa9c997018a..9e5ede932b42 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -1153,6 +1153,9 @@ static void felix_phylink_get_caps(struct dsa_switch *ds, int port,
__set_bit(ocelot->ports[port]->phy_mode,
config->supported_interfaces);
+ if (ocelot->ports[port]->phy_mode == PHY_INTERFACE_MODE_USXGMII)
+ __set_bit(PHY_INTERFACE_MODE_10G_QXGMII,
+ config->supported_interfaces);
}
static void felix_phylink_mac_config(struct phylink_config *config,
@@ -1230,6 +1233,7 @@ static int felix_port_enable(struct dsa_switch *ds, int port,
{
struct dsa_port *dp = dsa_to_port(ds, port);
struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
if (!dsa_port_is_user(dp))
return 0;
@@ -1243,7 +1247,25 @@ static int felix_port_enable(struct dsa_switch *ds, int port,
}
}
- return 0;
+ if (!dp->hsr_dev || felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q)
+ return 0;
+
+ return dsa_port_simple_hsr_join(ds, port, dp->hsr_dev, NULL);
+}
+
+static void felix_port_disable(struct dsa_switch *ds, int port)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ if (!dsa_port_is_user(dp))
+ return;
+
+ if (!dp->hsr_dev || felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q)
+ return;
+
+ dsa_port_simple_hsr_leave(ds, port, dp->hsr_dev);
}
static void felix_port_qos_map_init(struct ocelot *ocelot, int port)
@@ -1316,6 +1338,14 @@ static void felix_get_eth_phy_stats(struct dsa_switch *ds, int port,
ocelot_port_get_eth_phy_stats(ocelot, port, phy_stats);
}
+static void felix_get_ts_stats(struct dsa_switch *ds, int port,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_ts_stats(ocelot, port, ts_stats);
+}
+
static void felix_get_strings(struct dsa_switch *ds, int port,
u32 stringset, u8 *data)
{
@@ -1351,6 +1381,7 @@ static const u32 felix_phy_match_table[PHY_INTERFACE_MODE_MAX] = {
[PHY_INTERFACE_MODE_SGMII] = OCELOT_PORT_MODE_SGMII,
[PHY_INTERFACE_MODE_QSGMII] = OCELOT_PORT_MODE_QSGMII,
[PHY_INTERFACE_MODE_USXGMII] = OCELOT_PORT_MODE_USXGMII,
+ [PHY_INTERFACE_MODE_10G_QXGMII] = OCELOT_PORT_MODE_10G_QXGMII,
[PHY_INTERFACE_MODE_1000BASEX] = OCELOT_PORT_MODE_1000BASEX,
[PHY_INTERFACE_MODE_2500BASEX] = OCELOT_PORT_MODE_2500BASEX,
};
@@ -1766,22 +1797,25 @@ static void felix_teardown(struct dsa_switch *ds)
}
static int felix_hwtstamp_get(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config)
{
struct ocelot *ocelot = ds->priv;
- return ocelot_hwstamp_get(ocelot, port, ifr);
+ ocelot_hwstamp_get(ocelot, port, config);
+
+ return 0;
}
static int felix_hwtstamp_set(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
bool using_tag_8021q;
int err;
- err = ocelot_hwstamp_set(ocelot, port, ifr);
+ err = ocelot_hwstamp_set(ocelot, port, config, extack);
if (err)
return err;
@@ -2217,6 +2251,52 @@ static void felix_get_mm_stats(struct dsa_switch *ds, int port,
ocelot_port_get_mm_stats(ocelot, port, stats);
}
+/* Depending on port type, we may be able to support the offload later (with
+ * the "ocelot"/"seville" tagging protocols), or never.
+ * If we return 0, the dp->hsr_dev reference is kept for later; if we return
+ * -EOPNOTSUPP, it is cleared (which helps to not bother
+ * dsa_port_simple_hsr_leave() with an offload that didn't pass validation).
+ */
+static int felix_port_hsr_join(struct dsa_switch *ds, int port,
+ struct net_device *hsr,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ if (felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q) {
+ int err;
+
+ err = dsa_port_simple_hsr_validate(ds, port, hsr, extack);
+ if (err)
+ return err;
+
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offloading not supported with \"ocelot-8021q\"");
+ return 0;
+ }
+
+ if (!(dsa_to_port(ds, port)->user->flags & IFF_UP))
+ return 0;
+
+ return dsa_port_simple_hsr_join(ds, port, hsr, extack);
+}
+
+static int felix_port_hsr_leave(struct dsa_switch *ds, int port,
+ struct net_device *hsr)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ if (felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q)
+ return 0;
+
+ if (!(dsa_to_port(ds, port)->user->flags & IFF_UP))
+ return 0;
+
+ return dsa_port_simple_hsr_leave(ds, port, hsr);
+}
+
static const struct phylink_mac_ops felix_phylink_mac_ops = {
.mac_select_pcs = felix_phylink_mac_select_pcs,
.mac_config = felix_phylink_mac_config,
@@ -2237,6 +2317,7 @@ static const struct dsa_switch_ops felix_switch_ops = {
.get_stats64 = felix_get_stats64,
.get_pause_stats = felix_get_pause_stats,
.get_rmon_stats = felix_get_rmon_stats,
+ .get_ts_stats = felix_get_ts_stats,
.get_eth_ctrl_stats = felix_get_eth_ctrl_stats,
.get_eth_mac_stats = felix_get_eth_mac_stats,
.get_eth_phy_stats = felix_get_eth_phy_stats,
@@ -2246,6 +2327,7 @@ static const struct dsa_switch_ops felix_switch_ops = {
.get_ts_info = felix_get_ts_info,
.phylink_get_caps = felix_phylink_get_caps,
.port_enable = felix_port_enable,
+ .port_disable = felix_port_disable,
.port_fast_age = felix_port_fast_age,
.port_fdb_dump = felix_fdb_dump,
.port_fdb_add = felix_fdb_add,
@@ -2302,6 +2384,8 @@ static const struct dsa_switch_ops felix_switch_ops = {
.port_del_dscp_prio = felix_port_del_dscp_prio,
.port_set_host_flood = felix_port_set_host_flood,
.port_change_conduit = felix_port_change_conduit,
+ .port_hsr_join = felix_port_hsr_join,
+ .port_hsr_leave = felix_port_hsr_leave,
};
int felix_register_switch(struct device *dev, resource_size_t switch_base,
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 211991f494e3..a657b190c5d7 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -12,8 +12,9 @@
#define OCELOT_PORT_MODE_SGMII BIT(1)
#define OCELOT_PORT_MODE_QSGMII BIT(2)
#define OCELOT_PORT_MODE_2500BASEX BIT(3)
-#define OCELOT_PORT_MODE_USXGMII BIT(4)
+#define OCELOT_PORT_MODE_USXGMII BIT(4) /* compatibility */
#define OCELOT_PORT_MODE_1000BASEX BIT(5)
+#define OCELOT_PORT_MODE_10G_QXGMII BIT(6)
struct device_node;
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 0102a82e88cc..8cf4c8986587 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -24,7 +24,7 @@
#define VSC9959_NUM_PORTS 6
#define VSC9959_TAS_GCL_ENTRY_MAX 63
-#define VSC9959_TAS_MIN_GATE_LEN_NS 33
+#define VSC9959_TAS_MIN_GATE_LEN_NS 35
#define VSC9959_VCAP_POLICER_BASE 63
#define VSC9959_VCAP_POLICER_MAX 383
#define VSC9959_SWITCH_PCI_BAR 4
@@ -34,7 +34,8 @@
OCELOT_PORT_MODE_QSGMII | \
OCELOT_PORT_MODE_1000BASEX | \
OCELOT_PORT_MODE_2500BASEX | \
- OCELOT_PORT_MODE_USXGMII)
+ OCELOT_PORT_MODE_USXGMII | \
+ OCELOT_PORT_MODE_10G_QXGMII)
static const u32 vsc9959_port_modes[VSC9959_NUM_PORTS] = {
VSC9959_PORT_MODE_SERDES,
@@ -1056,11 +1057,15 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
mdiobus_free(felix->imdio);
}
-/* The switch considers any frame (regardless of size) as eligible for
- * transmission if the traffic class gate is open for at least 33 ns.
+/* The switch considers any frame (regardless of size) as eligible
+ * for transmission if the traffic class gate is open for at least
+ * VSC9959_TAS_MIN_GATE_LEN_NS.
+ *
* Overruns are prevented by cropping an interval at the end of the gate time
- * slot for which egress scheduling is blocked, but we need to still keep 33 ns
- * available for one packet to be transmitted, otherwise the port tc will hang.
+ * slot for which egress scheduling is blocked, but we need to still keep
+ * VSC9959_TAS_MIN_GATE_LEN_NS available for one packet to be transmitted,
+ * otherwise the port tc will hang.
+ *
* This function returns the size of a gate interval that remains available for
* setting the guard band, after reserving the space for one egress frame.
*/
@@ -1303,7 +1308,8 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
* per-tc static guard band lengths, so it reduces the
* useful gate interval length. Therefore, be careful
* to calculate a guard band (and therefore max_sdu)
- * that still leaves 33 ns available in the time slot.
+ * that still leaves VSC9959_TAS_MIN_GATE_LEN_NS
+ * available in the time slot.
*/
max_sdu = div_u64(remaining_gate_len_ps, picos_per_byte);
/* A TC gate may be completely closed, which is a
@@ -1538,7 +1544,7 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
struct tc_taprio_qopt_offload *taprio;
struct ocelot_port *ocelot_port;
struct timespec64 base_ts;
- int port;
+ int i, port;
u32 val;
mutex_lock(&ocelot->fwd_domain_lock);
@@ -1570,6 +1576,9 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M,
QSYS_PARAM_CFG_REG_3);
+ for (i = 0; i < taprio->num_entries; i++)
+ vsc9959_tas_gcl_set(ocelot, i, &taprio->entries[i]);
+
ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
QSYS_TAS_PARAM_CFG_CTRL);
diff --git a/drivers/net/dsa/ocelot/ocelot_ext.c b/drivers/net/dsa/ocelot/ocelot_ext.c
index 450bda18ef37..d5c557a20292 100644
--- a/drivers/net/dsa/ocelot/ocelot_ext.c
+++ b/drivers/net/dsa/ocelot/ocelot_ext.c
@@ -109,4 +109,4 @@ module_platform_driver(ocelot_ext_switch_driver);
MODULE_DESCRIPTION("External Ocelot Switch driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(MFD_OCELOT);
+MODULE_IMPORT_NS("MFD_OCELOT");
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index e9f2c67bc15f..0526aa96146e 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -821,8 +821,8 @@ static int ar9331_sw_irq_init(struct ar9331_sw_priv *priv)
return ret;
}
- priv->irqdomain = irq_domain_add_linear(np, 1, &ar9331_sw_irqdomain_ops,
- priv);
+ priv->irqdomain = irq_domain_create_linear(dev_fwnode(dev), 1, &ar9331_sw_irqdomain_ops,
+ priv);
if (!priv->irqdomain) {
dev_err(dev, "failed to create IRQ domain\n");
return -EINVAL;
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index f8d8c70642c4..a36b8b07030e 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -342,7 +342,7 @@ static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
dev_queue_xmit(skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
- msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
+ QCA8K_ETHERNET_TIMEOUT);
*val = mgmt_eth_data->data[0];
if (len > QCA_HDR_MGMT_DATA1_LEN)
@@ -394,7 +394,7 @@ static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
dev_queue_xmit(skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
- msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
+ QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
@@ -673,7 +673,7 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
* We therefore need to lock the MDIO bus onto which the switch is
* connected.
*/
- mutex_lock(&priv->bus->mdio_lock);
+ mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
/* Actually start the request:
* 1. Send mdio master packet
@@ -1019,7 +1019,7 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
of_get_phy_mode(port, &mode);
- if (of_property_read_bool(port, "phy-handle") &&
+ if (of_property_present(port, "phy-handle") &&
mode != PHY_INTERFACE_MODE_INTERNAL)
external_mdio_mask |= BIT(reg);
else
@@ -1491,7 +1491,7 @@ static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
return container_of(pcs, struct qca8k_pcs, pcs);
}
-static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
+static void qca8k_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
@@ -1634,7 +1634,6 @@ static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
int port)
{
qpcs->pcs.ops = &qca8k_pcs_ops;
- qpcs->pcs.neg_mode = true;
/* We don't have interrupts for link changes, so we need to poll */
qpcs->pcs.poll = true;
@@ -2016,7 +2015,7 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
.get_ethtool_stats = qca8k_get_ethtool_stats,
.get_sset_count = qca8k_get_sset_count,
.set_ageing_time = qca8k_set_ageing_time,
- .get_mac_eee = qca8k_get_mac_eee,
+ .support_eee = dsa_supports_eee,
.set_mac_eee = qca8k_set_mac_eee,
.port_enable = qca8k_port_enable,
.port_disable = qca8k_port_disable,
diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c
index 560c74c4ac3d..13005f10edb7 100644
--- a/drivers/net/dsa/qca/qca8k-common.c
+++ b/drivers/net/dsa/qca/qca8k-common.c
@@ -557,13 +557,6 @@ exit:
return ret;
}
-int qca8k_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_keee *e)
-{
- /* Nothing to do on the port's MAC */
- return 0;
-}
-
static int qca8k_port_configure_learning(struct dsa_switch *ds, int port,
bool learning)
{
diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h
index 3664a2e2f1f6..d046679265fa 100644
--- a/drivers/net/dsa/qca/qca8k.h
+++ b/drivers/net/dsa/qca/qca8k.h
@@ -16,7 +16,7 @@
#define QCA8K_ETHERNET_MDIO_PRIORITY 7
#define QCA8K_ETHERNET_PHY_PRIORITY 6
-#define QCA8K_ETHERNET_TIMEOUT 5
+#define QCA8K_ETHERNET_TIMEOUT msecs_to_jiffies(5)
#define QCA8K_NUM_PORTS 7
#define QCA8K_NUM_CPU_PORTS 2
@@ -520,7 +520,6 @@ int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset);
/* Common eee function */
int qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *eee);
-int qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
/* Common bridge function */
void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig
index 6989972eebc3..d6eb6713e5f6 100644
--- a/drivers/net/dsa/realtek/Kconfig
+++ b/drivers/net/dsa/realtek/Kconfig
@@ -43,4 +43,10 @@ config NET_DSA_REALTEK_RTL8366RB
help
Select to enable support for Realtek RTL8366RB.
+config NET_DSA_REALTEK_RTL8366RB_LEDS
+ bool
+ depends on (LEDS_CLASS=y || LEDS_CLASS=NET_DSA_REALTEK_RTL8366RB)
+ depends on NET_DSA_REALTEK_RTL8366RB
+ default NET_DSA_REALTEK_RTL8366RB
+
endif
diff --git a/drivers/net/dsa/realtek/Makefile b/drivers/net/dsa/realtek/Makefile
index 35491dc20d6d..17367bcba496 100644
--- a/drivers/net/dsa/realtek/Makefile
+++ b/drivers/net/dsa/realtek/Makefile
@@ -12,4 +12,7 @@ endif
obj-$(CONFIG_NET_DSA_REALTEK_RTL8366RB) += rtl8366.o
rtl8366-objs := rtl8366-core.o rtl8366rb.o
+ifdef CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS
+rtl8366-objs += rtl8366rb-leds.o
+endif
obj-$(CONFIG_NET_DSA_REALTEK_RTL8365MB) += rtl8365mb.o
diff --git a/drivers/net/dsa/realtek/realtek-mdio.c b/drivers/net/dsa/realtek/realtek-mdio.c
index 5f545dda702b..a5e7dff96e91 100644
--- a/drivers/net/dsa/realtek/realtek-mdio.c
+++ b/drivers/net/dsa/realtek/realtek-mdio.c
@@ -140,7 +140,7 @@ int realtek_mdio_probe(struct mdio_device *mdiodev)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(realtek_mdio_probe, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(realtek_mdio_probe, "REALTEK_DSA");
/**
* realtek_mdio_remove() - Remove the driver of an MDIO-connected switch
@@ -163,7 +163,7 @@ void realtek_mdio_remove(struct mdio_device *mdiodev)
rtl83xx_remove(priv);
}
-EXPORT_SYMBOL_NS_GPL(realtek_mdio_remove, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(realtek_mdio_remove, "REALTEK_DSA");
/**
* realtek_mdio_shutdown() - Shutdown the driver of a MDIO-connected switch
@@ -184,4 +184,4 @@ void realtek_mdio_shutdown(struct mdio_device *mdiodev)
rtl83xx_shutdown(priv);
}
-EXPORT_SYMBOL_NS_GPL(realtek_mdio_shutdown, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(realtek_mdio_shutdown, "REALTEK_DSA");
diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c
index d750bddf27b4..972e22218418 100644
--- a/drivers/net/dsa/realtek/realtek-smi.c
+++ b/drivers/net/dsa/realtek/realtek-smi.c
@@ -361,7 +361,7 @@ int realtek_smi_probe(struct platform_device *pdev)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(realtek_smi_probe, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(realtek_smi_probe, "REALTEK_DSA");
/**
* realtek_smi_remove() - Remove the driver of a SMI-connected switch
@@ -384,7 +384,7 @@ void realtek_smi_remove(struct platform_device *pdev)
rtl83xx_remove(priv);
}
-EXPORT_SYMBOL_NS_GPL(realtek_smi_remove, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(realtek_smi_remove, "REALTEK_DSA");
/**
* realtek_smi_shutdown() - Shutdown the driver of a SMI-connected switch
@@ -405,4 +405,4 @@ void realtek_smi_shutdown(struct platform_device *pdev)
rtl83xx_shutdown(priv);
}
-EXPORT_SYMBOL_NS_GPL(realtek_smi_shutdown, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(realtek_smi_shutdown, "REALTEK_DSA");
diff --git a/drivers/net/dsa/realtek/realtek.h b/drivers/net/dsa/realtek/realtek.h
index a1b2e0b529d5..c03485a80d93 100644
--- a/drivers/net/dsa/realtek/realtek.h
+++ b/drivers/net/dsa/realtek/realtek.h
@@ -19,9 +19,6 @@
struct phylink_mac_ops;
struct realtek_ops;
-struct dentry;
-struct inode;
-struct file;
struct rtl8366_mib_counter {
unsigned int base;
diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
index 6b9dbdb00941..c575e164368c 100644
--- a/drivers/net/dsa/realtek/rtl8365mb.c
+++ b/drivers/net/dsa/realtek/rtl8365mb.c
@@ -1719,8 +1719,8 @@ static int rtl8365mb_irq_setup(struct realtek_priv *priv)
goto out_put_node;
}
- priv->irqdomain = irq_domain_add_linear(intc, priv->num_ports,
- &rtl8365mb_irqdomain_ops, priv);
+ priv->irqdomain = irq_domain_create_linear(of_fwnode_handle(intc), priv->num_ports,
+ &rtl8365mb_irqdomain_ops, priv);
if (!priv->irqdomain) {
dev_err(priv->dev, "failed to add irq domain\n");
ret = -ENOMEM;
@@ -2134,6 +2134,8 @@ static const struct dsa_switch_ops rtl8365mb_switch_ops = {
.get_stats64 = rtl8365mb_get_stats64,
.port_change_mtu = rtl8365mb_port_change_mtu,
.port_max_mtu = rtl8365mb_port_max_mtu,
+ .port_hsr_join = dsa_port_simple_hsr_join,
+ .port_hsr_leave = dsa_port_simple_hsr_leave,
};
static const struct realtek_ops rtl8365mb_ops = {
@@ -2206,4 +2208,4 @@ module_exit(rtl8365mb_exit);
MODULE_AUTHOR("Alvin Å ipraga <alsi@bang-olufsen.dk>");
MODULE_DESCRIPTION("Driver for RTL8365MB-VC ethernet switch");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(REALTEK_DSA);
+MODULE_IMPORT_NS("REALTEK_DSA");
diff --git a/drivers/net/dsa/realtek/rtl8366-core.c b/drivers/net/dsa/realtek/rtl8366-core.c
index 7c6520ba3a26..047feeed96a2 100644
--- a/drivers/net/dsa/realtek/rtl8366-core.c
+++ b/drivers/net/dsa/realtek/rtl8366-core.c
@@ -34,7 +34,7 @@ int rtl8366_mc_is_used(struct realtek_priv *priv, int mc_index, int *used)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(rtl8366_mc_is_used, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl8366_mc_is_used, "REALTEK_DSA");
/**
* rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration
@@ -187,7 +187,7 @@ int rtl8366_set_vlan(struct realtek_priv *priv, int vid, u32 member,
return ret;
}
-EXPORT_SYMBOL_NS_GPL(rtl8366_set_vlan, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl8366_set_vlan, "REALTEK_DSA");
int rtl8366_set_pvid(struct realtek_priv *priv, unsigned int port,
unsigned int vid)
@@ -217,7 +217,7 @@ int rtl8366_set_pvid(struct realtek_priv *priv, unsigned int port,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(rtl8366_set_pvid, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl8366_set_pvid, "REALTEK_DSA");
int rtl8366_enable_vlan4k(struct realtek_priv *priv, bool enable)
{
@@ -243,7 +243,7 @@ int rtl8366_enable_vlan4k(struct realtek_priv *priv, bool enable)
priv->vlan4k_enabled = enable;
return 0;
}
-EXPORT_SYMBOL_NS_GPL(rtl8366_enable_vlan4k, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl8366_enable_vlan4k, "REALTEK_DSA");
int rtl8366_enable_vlan(struct realtek_priv *priv, bool enable)
{
@@ -265,7 +265,7 @@ int rtl8366_enable_vlan(struct realtek_priv *priv, bool enable)
return ret;
}
-EXPORT_SYMBOL_NS_GPL(rtl8366_enable_vlan, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl8366_enable_vlan, "REALTEK_DSA");
int rtl8366_reset_vlan(struct realtek_priv *priv)
{
@@ -290,7 +290,7 @@ int rtl8366_reset_vlan(struct realtek_priv *priv)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(rtl8366_reset_vlan, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl8366_reset_vlan, "REALTEK_DSA");
int rtl8366_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
@@ -345,7 +345,7 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(rtl8366_vlan_add, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl8366_vlan_add, "REALTEK_DSA");
int rtl8366_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
@@ -389,7 +389,7 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(rtl8366_vlan_del, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl8366_vlan_del, "REALTEK_DSA");
void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
uint8_t *data)
@@ -403,7 +403,7 @@ void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
for (i = 0; i < priv->num_mib_counters; i++)
ethtool_puts(&data, priv->mib_counters[i].name);
}
-EXPORT_SYMBOL_NS_GPL(rtl8366_get_strings, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl8366_get_strings, "REALTEK_DSA");
int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
@@ -417,7 +417,7 @@ int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset)
return priv->num_mib_counters;
}
-EXPORT_SYMBOL_NS_GPL(rtl8366_get_sset_count, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl8366_get_sset_count, "REALTEK_DSA");
void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
{
@@ -441,4 +441,4 @@ void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
data[i] = mibvalue;
}
}
-EXPORT_SYMBOL_NS_GPL(rtl8366_get_ethtool_stats, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl8366_get_ethtool_stats, "REALTEK_DSA");
diff --git a/drivers/net/dsa/realtek/rtl8366rb-leds.c b/drivers/net/dsa/realtek/rtl8366rb-leds.c
new file mode 100644
index 000000000000..99c890681ae6
--- /dev/null
+++ b/drivers/net/dsa/realtek/rtl8366rb-leds.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <net/dsa.h>
+#include "rtl83xx.h"
+#include "rtl8366rb.h"
+
+static inline u32 rtl8366rb_led_group_port_mask(u8 led_group, u8 port)
+{
+ switch (led_group) {
+ case 0:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ case 1:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ case 2:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ case 3:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ default:
+ return 0;
+ }
+}
+
+static int rb8366rb_get_port_led(struct rtl8366rb_led *led)
+{
+ struct realtek_priv *priv = led->priv;
+ u8 led_group = led->led_group;
+ u8 port_num = led->port_num;
+ int ret;
+ u32 val;
+
+ ret = regmap_read(priv->map, RTL8366RB_LED_X_X_CTRL_REG(led_group),
+ &val);
+ if (ret) {
+ dev_err(priv->dev, "error reading LED on port %d group %d\n",
+ led_group, port_num);
+ return ret;
+ }
+
+ return !!(val & rtl8366rb_led_group_port_mask(led_group, port_num));
+}
+
+static int rb8366rb_set_port_led(struct rtl8366rb_led *led, bool enable)
+{
+ struct realtek_priv *priv = led->priv;
+ u8 led_group = led->led_group;
+ u8 port_num = led->port_num;
+ int ret;
+
+ ret = regmap_update_bits(priv->map,
+ RTL8366RB_LED_X_X_CTRL_REG(led_group),
+ rtl8366rb_led_group_port_mask(led_group,
+ port_num),
+ enable ? 0xffff : 0);
+ if (ret) {
+ dev_err(priv->dev, "error updating LED on port %d group %d\n",
+ led_group, port_num);
+ return ret;
+ }
+
+ /* Change the LED group to manual controlled LEDs if required */
+ ret = rb8366rb_set_ledgroup_mode(priv, led_group,
+ RTL8366RB_LEDGROUP_FORCE);
+
+ if (ret) {
+ dev_err(priv->dev, "error updating LED GROUP group %d\n",
+ led_group);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+rtl8366rb_cled_brightness_set_blocking(struct led_classdev *ldev,
+ enum led_brightness brightness)
+{
+ struct rtl8366rb_led *led = container_of(ldev, struct rtl8366rb_led,
+ cdev);
+
+ return rb8366rb_set_port_led(led, brightness == LED_ON);
+}
+
+static int rtl8366rb_setup_led(struct realtek_priv *priv, struct dsa_port *dp,
+ struct fwnode_handle *led_fwnode)
+{
+ struct rtl8366rb *rb = priv->chip_data;
+ struct led_init_data init_data = { };
+ enum led_default_state state;
+ struct rtl8366rb_led *led;
+ u32 led_group;
+ int ret;
+
+ ret = fwnode_property_read_u32(led_fwnode, "reg", &led_group);
+ if (ret)
+ return ret;
+
+ if (led_group >= RTL8366RB_NUM_LEDGROUPS) {
+ dev_warn(priv->dev, "Invalid LED reg %d defined for port %d",
+ led_group, dp->index);
+ return -EINVAL;
+ }
+
+ led = &rb->leds[dp->index][led_group];
+ led->port_num = dp->index;
+ led->led_group = led_group;
+ led->priv = priv;
+
+ state = led_init_default_state_get(led_fwnode);
+ switch (state) {
+ case LEDS_DEFSTATE_ON:
+ led->cdev.brightness = 1;
+ rb8366rb_set_port_led(led, 1);
+ break;
+ case LEDS_DEFSTATE_KEEP:
+ led->cdev.brightness =
+ rb8366rb_get_port_led(led);
+ break;
+ case LEDS_DEFSTATE_OFF:
+ default:
+ led->cdev.brightness = 0;
+ rb8366rb_set_port_led(led, 0);
+ }
+
+ led->cdev.max_brightness = 1;
+ led->cdev.brightness_set_blocking =
+ rtl8366rb_cled_brightness_set_blocking;
+ init_data.fwnode = led_fwnode;
+ init_data.devname_mandatory = true;
+
+ init_data.devicename = kasprintf(GFP_KERNEL, "Realtek-%d:0%d:%d",
+ dp->ds->index, dp->index, led_group);
+ if (!init_data.devicename)
+ return -ENOMEM;
+
+ ret = devm_led_classdev_register_ext(priv->dev, &led->cdev, &init_data);
+ if (ret) {
+ dev_warn(priv->dev, "Failed to init LED %d for port %d",
+ led_group, dp->index);
+ return ret;
+ }
+
+ return 0;
+}
+
+int rtl8366rb_setup_leds(struct realtek_priv *priv)
+{
+ struct dsa_switch *ds = &priv->ds;
+ struct device_node *leds_np;
+ struct dsa_port *dp;
+ int ret = 0;
+
+ dsa_switch_for_each_port(dp, ds) {
+ if (!dp->dn)
+ continue;
+
+ leds_np = of_get_child_by_name(dp->dn, "leds");
+ if (!leds_np) {
+ dev_dbg(priv->dev, "No leds defined for port %d",
+ dp->index);
+ continue;
+ }
+
+ for_each_child_of_node_scoped(leds_np, led_np) {
+ ret = rtl8366rb_setup_led(priv, dp,
+ of_fwnode_handle(led_np));
+ if (ret)
+ break;
+ }
+
+ of_node_put(leds_np);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
index 6ba03f81c882..d96ae72b0a5c 100644
--- a/drivers/net/dsa/realtek/rtl8366rb.c
+++ b/drivers/net/dsa/realtek/rtl8366rb.c
@@ -21,16 +21,13 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/of_irq.h>
#include <linux/regmap.h>
+#include <linux/string_choices.h>
#include "realtek.h"
#include "realtek-smi.h"
#include "realtek-mdio.h"
#include "rtl83xx.h"
-
-#define RTL8366RB_PORT_NUM_CPU 5
-#define RTL8366RB_NUM_PORTS 6
-#define RTL8366RB_PHY_NO_MAX 4
-#define RTL8366RB_PHY_ADDR_MAX 31
+#include "rtl8366rb.h"
/* Switch Global Configuration register */
#define RTL8366RB_SGCR 0x0000
@@ -175,39 +172,6 @@
*/
#define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f
-/* LED control registers */
-/* The LED blink rate is global; it is used by all triggers in all groups. */
-#define RTL8366RB_LED_BLINKRATE_REG 0x0430
-#define RTL8366RB_LED_BLINKRATE_MASK 0x0007
-#define RTL8366RB_LED_BLINKRATE_28MS 0x0000
-#define RTL8366RB_LED_BLINKRATE_56MS 0x0001
-#define RTL8366RB_LED_BLINKRATE_84MS 0x0002
-#define RTL8366RB_LED_BLINKRATE_111MS 0x0003
-#define RTL8366RB_LED_BLINKRATE_222MS 0x0004
-#define RTL8366RB_LED_BLINKRATE_446MS 0x0005
-
-/* LED trigger event for each group */
-#define RTL8366RB_LED_CTRL_REG 0x0431
-#define RTL8366RB_LED_CTRL_OFFSET(led_group) \
- (4 * (led_group))
-#define RTL8366RB_LED_CTRL_MASK(led_group) \
- (0xf << RTL8366RB_LED_CTRL_OFFSET(led_group))
-
-/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only
- * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is
- * RTL8366RB_LEDGROUP_FORCE. Otherwise, it is ignored.
- */
-#define RTL8366RB_LED_0_1_CTRL_REG 0x0432
-#define RTL8366RB_LED_2_3_CTRL_REG 0x0433
-#define RTL8366RB_LED_X_X_CTRL_REG(led_group) \
- ((led_group) <= 1 ? \
- RTL8366RB_LED_0_1_CTRL_REG : \
- RTL8366RB_LED_2_3_CTRL_REG)
-#define RTL8366RB_LED_0_X_CTRL_MASK GENMASK(5, 0)
-#define RTL8366RB_LED_X_1_CTRL_MASK GENMASK(11, 6)
-#define RTL8366RB_LED_2_X_CTRL_MASK GENMASK(5, 0)
-#define RTL8366RB_LED_X_3_CTRL_MASK GENMASK(11, 6)
-
#define RTL8366RB_MIB_COUNT 33
#define RTL8366RB_GLOBAL_MIB_COUNT 1
#define RTL8366RB_MIB_COUNTER_PORT_OFFSET 0x0050
@@ -243,7 +207,6 @@
#define RTL8366RB_PORT_STATUS_AN_MASK 0x0080
#define RTL8366RB_NUM_VLANS 16
-#define RTL8366RB_NUM_LEDGROUPS 4
#define RTL8366RB_NUM_VIDS 4096
#define RTL8366RB_PRIORITYMAX 7
#define RTL8366RB_NUM_FIDS 8
@@ -350,46 +313,6 @@
#define RTL8366RB_GREEN_FEATURE_TX BIT(0)
#define RTL8366RB_GREEN_FEATURE_RX BIT(2)
-enum rtl8366_ledgroup_mode {
- RTL8366RB_LEDGROUP_OFF = 0x0,
- RTL8366RB_LEDGROUP_DUP_COL = 0x1,
- RTL8366RB_LEDGROUP_LINK_ACT = 0x2,
- RTL8366RB_LEDGROUP_SPD1000 = 0x3,
- RTL8366RB_LEDGROUP_SPD100 = 0x4,
- RTL8366RB_LEDGROUP_SPD10 = 0x5,
- RTL8366RB_LEDGROUP_SPD1000_ACT = 0x6,
- RTL8366RB_LEDGROUP_SPD100_ACT = 0x7,
- RTL8366RB_LEDGROUP_SPD10_ACT = 0x8,
- RTL8366RB_LEDGROUP_SPD100_10_ACT = 0x9,
- RTL8366RB_LEDGROUP_FIBER = 0xa,
- RTL8366RB_LEDGROUP_AN_FAULT = 0xb,
- RTL8366RB_LEDGROUP_LINK_RX = 0xc,
- RTL8366RB_LEDGROUP_LINK_TX = 0xd,
- RTL8366RB_LEDGROUP_MASTER = 0xe,
- RTL8366RB_LEDGROUP_FORCE = 0xf,
-
- __RTL8366RB_LEDGROUP_MODE_MAX
-};
-
-struct rtl8366rb_led {
- u8 port_num;
- u8 led_group;
- struct realtek_priv *priv;
- struct led_classdev cdev;
-};
-
-/**
- * struct rtl8366rb - RTL8366RB-specific data
- * @max_mtu: per-port max MTU setting
- * @pvid_enabled: if PVID is set for respective port
- * @leds: per-port and per-ledgroup led info
- */
-struct rtl8366rb {
- unsigned int max_mtu[RTL8366RB_NUM_PORTS];
- bool pvid_enabled[RTL8366RB_NUM_PORTS];
- struct rtl8366rb_led leds[RTL8366RB_NUM_PORTS][RTL8366RB_NUM_LEDGROUPS];
-};
-
static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
{ 0, 0, 4, "IfInOctets" },
{ 0, 4, 4, "EtherStatsOctets" },
@@ -627,10 +550,8 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_priv *priv)
dev_err(priv->dev, "unable to request irq: %d\n", ret);
goto out_put_node;
}
- priv->irqdomain = irq_domain_add_linear(intc,
- RTL8366RB_NUM_INTERRUPT,
- &rtl8366rb_irqdomain_ops,
- priv);
+ priv->irqdomain = irq_domain_create_linear(of_fwnode_handle(intc), RTL8366RB_NUM_INTERRUPT,
+ &rtl8366rb_irqdomain_ops, priv);
if (!priv->irqdomain) {
dev_err(priv->dev, "failed to create IRQ domain\n");
ret = -EINVAL;
@@ -830,9 +751,10 @@ static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
return 0;
}
-static int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
- u8 led_group,
- enum rtl8366_ledgroup_mode mode)
+/* This code is used also with LEDs disabled */
+int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
+ u8 led_group,
+ enum rtl8366_ledgroup_mode mode)
{
int ret;
u32 val;
@@ -849,144 +771,7 @@ static int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
return 0;
}
-static inline u32 rtl8366rb_led_group_port_mask(u8 led_group, u8 port)
-{
- switch (led_group) {
- case 0:
- return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
- case 1:
- return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
- case 2:
- return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
- case 3:
- return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
- default:
- return 0;
- }
-}
-
-static int rb8366rb_get_port_led(struct rtl8366rb_led *led)
-{
- struct realtek_priv *priv = led->priv;
- u8 led_group = led->led_group;
- u8 port_num = led->port_num;
- int ret;
- u32 val;
-
- ret = regmap_read(priv->map, RTL8366RB_LED_X_X_CTRL_REG(led_group),
- &val);
- if (ret) {
- dev_err(priv->dev, "error reading LED on port %d group %d\n",
- led_group, port_num);
- return ret;
- }
-
- return !!(val & rtl8366rb_led_group_port_mask(led_group, port_num));
-}
-
-static int rb8366rb_set_port_led(struct rtl8366rb_led *led, bool enable)
-{
- struct realtek_priv *priv = led->priv;
- u8 led_group = led->led_group;
- u8 port_num = led->port_num;
- int ret;
-
- ret = regmap_update_bits(priv->map,
- RTL8366RB_LED_X_X_CTRL_REG(led_group),
- rtl8366rb_led_group_port_mask(led_group,
- port_num),
- enable ? 0xffff : 0);
- if (ret) {
- dev_err(priv->dev, "error updating LED on port %d group %d\n",
- led_group, port_num);
- return ret;
- }
-
- /* Change the LED group to manual controlled LEDs if required */
- ret = rb8366rb_set_ledgroup_mode(priv, led_group,
- RTL8366RB_LEDGROUP_FORCE);
-
- if (ret) {
- dev_err(priv->dev, "error updating LED GROUP group %d\n",
- led_group);
- return ret;
- }
-
- return 0;
-}
-
-static int
-rtl8366rb_cled_brightness_set_blocking(struct led_classdev *ldev,
- enum led_brightness brightness)
-{
- struct rtl8366rb_led *led = container_of(ldev, struct rtl8366rb_led,
- cdev);
-
- return rb8366rb_set_port_led(led, brightness == LED_ON);
-}
-
-static int rtl8366rb_setup_led(struct realtek_priv *priv, struct dsa_port *dp,
- struct fwnode_handle *led_fwnode)
-{
- struct rtl8366rb *rb = priv->chip_data;
- struct led_init_data init_data = { };
- enum led_default_state state;
- struct rtl8366rb_led *led;
- u32 led_group;
- int ret;
-
- ret = fwnode_property_read_u32(led_fwnode, "reg", &led_group);
- if (ret)
- return ret;
-
- if (led_group >= RTL8366RB_NUM_LEDGROUPS) {
- dev_warn(priv->dev, "Invalid LED reg %d defined for port %d",
- led_group, dp->index);
- return -EINVAL;
- }
-
- led = &rb->leds[dp->index][led_group];
- led->port_num = dp->index;
- led->led_group = led_group;
- led->priv = priv;
-
- state = led_init_default_state_get(led_fwnode);
- switch (state) {
- case LEDS_DEFSTATE_ON:
- led->cdev.brightness = 1;
- rb8366rb_set_port_led(led, 1);
- break;
- case LEDS_DEFSTATE_KEEP:
- led->cdev.brightness =
- rb8366rb_get_port_led(led);
- break;
- case LEDS_DEFSTATE_OFF:
- default:
- led->cdev.brightness = 0;
- rb8366rb_set_port_led(led, 0);
- }
-
- led->cdev.max_brightness = 1;
- led->cdev.brightness_set_blocking =
- rtl8366rb_cled_brightness_set_blocking;
- init_data.fwnode = led_fwnode;
- init_data.devname_mandatory = true;
-
- init_data.devicename = kasprintf(GFP_KERNEL, "Realtek-%d:0%d:%d",
- dp->ds->index, dp->index, led_group);
- if (!init_data.devicename)
- return -ENOMEM;
-
- ret = devm_led_classdev_register_ext(priv->dev, &led->cdev, &init_data);
- if (ret) {
- dev_warn(priv->dev, "Failed to init LED %d for port %d",
- led_group, dp->index);
- return ret;
- }
-
- return 0;
-}
-
+/* This code is used also with LEDs disabled */
static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv)
{
int ret = 0;
@@ -1007,38 +792,6 @@ static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv)
return ret;
}
-static int rtl8366rb_setup_leds(struct realtek_priv *priv)
-{
- struct dsa_switch *ds = &priv->ds;
- struct device_node *leds_np;
- struct dsa_port *dp;
- int ret = 0;
-
- dsa_switch_for_each_port(dp, ds) {
- if (!dp->dn)
- continue;
-
- leds_np = of_get_child_by_name(dp->dn, "leds");
- if (!leds_np) {
- dev_dbg(priv->dev, "No leds defined for port %d",
- dp->index);
- continue;
- }
-
- for_each_child_of_node_scoped(leds_np, led_np) {
- ret = rtl8366rb_setup_led(priv, dp,
- of_fwnode_handle(led_np));
- if (ret)
- break;
- }
-
- of_node_put(leds_np);
- if (ret)
- return ret;
- }
- return 0;
-}
-
static int rtl8366rb_setup(struct dsa_switch *ds)
{
struct realtek_priv *priv = ds->priv;
@@ -1522,7 +1275,7 @@ static int rtl8366rb_vlan_filtering(struct dsa_switch *ds, int port,
rb = priv->chip_data;
dev_dbg(priv->dev, "port %d: %s VLAN filtering\n", port,
- vlan_filtering ? "enable" : "disable");
+ str_enable_disable(vlan_filtering));
/* If the port is not in the member set, the frame will be dropped */
ret = regmap_update_bits(priv->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
@@ -1884,7 +1637,7 @@ static bool rtl8366rb_is_vlan_valid(struct realtek_priv *priv, unsigned int vlan
static int rtl8366rb_enable_vlan(struct realtek_priv *priv, bool enable)
{
- dev_dbg(priv->dev, "%s VLAN\n", enable ? "enable" : "disable");
+ dev_dbg(priv->dev, "%s VLAN\n", str_enable_disable(enable));
return regmap_update_bits(priv->map,
RTL8366RB_SGCR, RTL8366RB_SGCR_EN_VLAN,
enable ? RTL8366RB_SGCR_EN_VLAN : 0);
@@ -1892,7 +1645,7 @@ static int rtl8366rb_enable_vlan(struct realtek_priv *priv, bool enable)
static int rtl8366rb_enable_vlan4k(struct realtek_priv *priv, bool enable)
{
- dev_dbg(priv->dev, "%s VLAN 4k\n", enable ? "enable" : "disable");
+ dev_dbg(priv->dev, "%s VLAN 4k\n", str_enable_disable(enable));
return regmap_update_bits(priv->map, RTL8366RB_SGCR,
RTL8366RB_SGCR_EN_VLAN_4KTB,
enable ? RTL8366RB_SGCR_EN_VLAN_4KTB : 0);
@@ -2062,6 +1815,8 @@ static const struct dsa_switch_ops rtl8366rb_switch_ops = {
.port_fast_age = rtl8366rb_port_fast_age,
.port_change_mtu = rtl8366rb_change_mtu,
.port_max_mtu = rtl8366rb_max_mtu,
+ .port_hsr_join = dsa_port_simple_hsr_join,
+ .port_hsr_leave = dsa_port_simple_hsr_leave,
};
static const struct realtek_ops rtl8366rb_ops = {
@@ -2144,4 +1899,4 @@ module_exit(rtl8366rb_exit);
MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
MODULE_DESCRIPTION("Driver for RTL8366RB ethernet switch");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(REALTEK_DSA);
+MODULE_IMPORT_NS("REALTEK_DSA");
diff --git a/drivers/net/dsa/realtek/rtl8366rb.h b/drivers/net/dsa/realtek/rtl8366rb.h
new file mode 100644
index 000000000000..685ff3275faa
--- /dev/null
+++ b/drivers/net/dsa/realtek/rtl8366rb.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _RTL8366RB_H
+#define _RTL8366RB_H
+
+#include "realtek.h"
+
+#define RTL8366RB_PORT_NUM_CPU 5
+#define RTL8366RB_NUM_PORTS 6
+#define RTL8366RB_PHY_NO_MAX 4
+#define RTL8366RB_NUM_LEDGROUPS 4
+#define RTL8366RB_PHY_ADDR_MAX 31
+
+/* LED control registers */
+/* The LED blink rate is global; it is used by all triggers in all groups. */
+#define RTL8366RB_LED_BLINKRATE_REG 0x0430
+#define RTL8366RB_LED_BLINKRATE_MASK 0x0007
+#define RTL8366RB_LED_BLINKRATE_28MS 0x0000
+#define RTL8366RB_LED_BLINKRATE_56MS 0x0001
+#define RTL8366RB_LED_BLINKRATE_84MS 0x0002
+#define RTL8366RB_LED_BLINKRATE_111MS 0x0003
+#define RTL8366RB_LED_BLINKRATE_222MS 0x0004
+#define RTL8366RB_LED_BLINKRATE_446MS 0x0005
+
+/* LED trigger event for each group */
+#define RTL8366RB_LED_CTRL_REG 0x0431
+#define RTL8366RB_LED_CTRL_OFFSET(led_group) \
+ (4 * (led_group))
+#define RTL8366RB_LED_CTRL_MASK(led_group) \
+ (0xf << RTL8366RB_LED_CTRL_OFFSET(led_group))
+
+/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only
+ * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is
+ * RTL8366RB_LEDGROUP_FORCE. Otherwise, it is ignored.
+ */
+#define RTL8366RB_LED_0_1_CTRL_REG 0x0432
+#define RTL8366RB_LED_2_3_CTRL_REG 0x0433
+#define RTL8366RB_LED_X_X_CTRL_REG(led_group) \
+ ((led_group) <= 1 ? \
+ RTL8366RB_LED_0_1_CTRL_REG : \
+ RTL8366RB_LED_2_3_CTRL_REG)
+#define RTL8366RB_LED_0_X_CTRL_MASK GENMASK(5, 0)
+#define RTL8366RB_LED_X_1_CTRL_MASK GENMASK(11, 6)
+#define RTL8366RB_LED_2_X_CTRL_MASK GENMASK(5, 0)
+#define RTL8366RB_LED_X_3_CTRL_MASK GENMASK(11, 6)
+
+enum rtl8366_ledgroup_mode {
+ RTL8366RB_LEDGROUP_OFF = 0x0,
+ RTL8366RB_LEDGROUP_DUP_COL = 0x1,
+ RTL8366RB_LEDGROUP_LINK_ACT = 0x2,
+ RTL8366RB_LEDGROUP_SPD1000 = 0x3,
+ RTL8366RB_LEDGROUP_SPD100 = 0x4,
+ RTL8366RB_LEDGROUP_SPD10 = 0x5,
+ RTL8366RB_LEDGROUP_SPD1000_ACT = 0x6,
+ RTL8366RB_LEDGROUP_SPD100_ACT = 0x7,
+ RTL8366RB_LEDGROUP_SPD10_ACT = 0x8,
+ RTL8366RB_LEDGROUP_SPD100_10_ACT = 0x9,
+ RTL8366RB_LEDGROUP_FIBER = 0xa,
+ RTL8366RB_LEDGROUP_AN_FAULT = 0xb,
+ RTL8366RB_LEDGROUP_LINK_RX = 0xc,
+ RTL8366RB_LEDGROUP_LINK_TX = 0xd,
+ RTL8366RB_LEDGROUP_MASTER = 0xe,
+ RTL8366RB_LEDGROUP_FORCE = 0xf,
+
+ __RTL8366RB_LEDGROUP_MODE_MAX
+};
+
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS)
+
+struct rtl8366rb_led {
+ u8 port_num;
+ u8 led_group;
+ struct realtek_priv *priv;
+ struct led_classdev cdev;
+};
+
+int rtl8366rb_setup_leds(struct realtek_priv *priv);
+
+#else
+
+static inline int rtl8366rb_setup_leds(struct realtek_priv *priv)
+{
+ return 0;
+}
+
+#endif /* IS_ENABLED(CONFIG_LEDS_CLASS) */
+
+/**
+ * struct rtl8366rb - RTL8366RB-specific data
+ * @max_mtu: per-port max MTU setting
+ * @pvid_enabled: if PVID is set for respective port
+ * @leds: per-port and per-ledgroup led info
+ */
+struct rtl8366rb {
+ unsigned int max_mtu[RTL8366RB_NUM_PORTS];
+ bool pvid_enabled[RTL8366RB_NUM_PORTS];
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS)
+ struct rtl8366rb_led leds[RTL8366RB_NUM_PORTS][RTL8366RB_NUM_LEDGROUPS];
+#endif
+};
+
+/* This code is used also with LEDs disabled */
+int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
+ u8 led_group,
+ enum rtl8366_ledgroup_mode mode);
+
+#endif /* _RTL8366RB_H */
diff --git a/drivers/net/dsa/realtek/rtl83xx.c b/drivers/net/dsa/realtek/rtl83xx.c
index 3c5018d5e1f9..2b9bd4462714 100644
--- a/drivers/net/dsa/realtek/rtl83xx.c
+++ b/drivers/net/dsa/realtek/rtl83xx.c
@@ -25,7 +25,7 @@ void rtl83xx_lock(void *ctx)
mutex_lock(&priv->map_lock);
}
-EXPORT_SYMBOL_NS_GPL(rtl83xx_lock, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl83xx_lock, "REALTEK_DSA");
/**
* rtl83xx_unlock() - Unlocks the mutex used by regmaps
@@ -42,7 +42,7 @@ void rtl83xx_unlock(void *ctx)
mutex_unlock(&priv->map_lock);
}
-EXPORT_SYMBOL_NS_GPL(rtl83xx_unlock, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl83xx_unlock, "REALTEK_DSA");
static int rtl83xx_user_mdio_read(struct mii_bus *bus, int addr, int regnum)
{
@@ -109,7 +109,7 @@ err_put_node:
return ret;
}
-EXPORT_SYMBOL_NS_GPL(rtl83xx_setup_user_mdio, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl83xx_setup_user_mdio, "REALTEK_DSA");
/**
* rtl83xx_probe() - probe a Realtek switch
@@ -208,7 +208,7 @@ rtl83xx_probe(struct device *dev,
return priv;
}
-EXPORT_SYMBOL_NS_GPL(rtl83xx_probe, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl83xx_probe, "REALTEK_DSA");
/**
* rtl83xx_register_switch() - detects and register a switch
@@ -245,7 +245,7 @@ int rtl83xx_register_switch(struct realtek_priv *priv)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(rtl83xx_register_switch, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl83xx_register_switch, "REALTEK_DSA");
/**
* rtl83xx_unregister_switch() - unregister a switch
@@ -262,7 +262,7 @@ void rtl83xx_unregister_switch(struct realtek_priv *priv)
dsa_unregister_switch(ds);
}
-EXPORT_SYMBOL_NS_GPL(rtl83xx_unregister_switch, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl83xx_unregister_switch, "REALTEK_DSA");
/**
* rtl83xx_shutdown() - shutdown a switch
@@ -283,7 +283,7 @@ void rtl83xx_shutdown(struct realtek_priv *priv)
dev_set_drvdata(priv->dev, NULL);
}
-EXPORT_SYMBOL_NS_GPL(rtl83xx_shutdown, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl83xx_shutdown, "REALTEK_DSA");
/**
* rtl83xx_remove() - Cleanup a realtek switch driver
@@ -297,7 +297,7 @@ EXPORT_SYMBOL_NS_GPL(rtl83xx_shutdown, REALTEK_DSA);
void rtl83xx_remove(struct realtek_priv *priv)
{
}
-EXPORT_SYMBOL_NS_GPL(rtl83xx_remove, REALTEK_DSA);
+EXPORT_SYMBOL_NS_GPL(rtl83xx_remove, "REALTEK_DSA");
void rtl83xx_reset_assert(struct realtek_priv *priv)
{
diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c
index 66974379334a..4d857e3be10b 100644
--- a/drivers/net/dsa/rzn1_a5psw.c
+++ b/drivers/net/dsa/rzn1_a5psw.c
@@ -337,8 +337,9 @@ static void a5psw_port_rx_block_set(struct a5psw *a5psw, int port, bool block)
static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port,
bool set)
{
- u8 offsets[] = {A5PSW_UCAST_DEF_MASK, A5PSW_BCAST_DEF_MASK,
- A5PSW_MCAST_DEF_MASK};
+ static const u8 offsets[] = {
+ A5PSW_UCAST_DEF_MASK, A5PSW_BCAST_DEF_MASK, A5PSW_MCAST_DEF_MASK
+ };
int i;
for (i = 0; i < ARRAY_SIZE(offsets); i++)
@@ -1034,6 +1035,8 @@ static const struct dsa_switch_ops a5psw_switch_ops = {
.port_fdb_add = a5psw_port_fdb_add,
.port_fdb_del = a5psw_port_fdb_del,
.port_fdb_dump = a5psw_port_fdb_dump,
+ .port_hsr_join = dsa_port_simple_hsr_join,
+ .port_hsr_leave = dsa_port_simple_hsr_leave,
};
static int a5psw_mdio_wait_busy(struct a5psw *a5psw)
@@ -1226,40 +1229,30 @@ static int a5psw_probe(struct platform_device *pdev)
if (ret)
return ret;
- a5psw->hclk = devm_clk_get(dev, "hclk");
+ a5psw->hclk = devm_clk_get_enabled(dev, "hclk");
if (IS_ERR(a5psw->hclk)) {
dev_err(dev, "failed get hclk clock\n");
ret = PTR_ERR(a5psw->hclk);
goto free_pcs;
}
- a5psw->clk = devm_clk_get(dev, "clk");
+ a5psw->clk = devm_clk_get_enabled(dev, "clk");
if (IS_ERR(a5psw->clk)) {
dev_err(dev, "failed get clk_switch clock\n");
ret = PTR_ERR(a5psw->clk);
goto free_pcs;
}
- ret = clk_prepare_enable(a5psw->clk);
- if (ret)
- goto free_pcs;
-
- ret = clk_prepare_enable(a5psw->hclk);
- if (ret)
- goto clk_disable;
-
- mdio = of_get_child_by_name(dev->of_node, "mdio");
- if (of_device_is_available(mdio)) {
+ mdio = of_get_available_child_by_name(dev->of_node, "mdio");
+ if (mdio) {
ret = a5psw_probe_mdio(a5psw, mdio);
+ of_node_put(mdio);
if (ret) {
- of_node_put(mdio);
dev_err(dev, "Failed to register MDIO: %d\n", ret);
- goto hclk_disable;
+ goto free_pcs;
}
}
- of_node_put(mdio);
-
ds = &a5psw->ds;
ds->dev = dev;
ds->num_ports = A5PSW_PORTS_NUM;
@@ -1270,15 +1263,11 @@ static int a5psw_probe(struct platform_device *pdev)
ret = dsa_register_switch(ds);
if (ret) {
dev_err(dev, "Failed to register DSA switch: %d\n", ret);
- goto hclk_disable;
+ goto free_pcs;
}
return 0;
-hclk_disable:
- clk_disable_unprepare(a5psw->hclk);
-clk_disable:
- clk_disable_unprepare(a5psw->clk);
free_pcs:
a5psw_pcs_free(a5psw);
@@ -1294,8 +1283,6 @@ static void a5psw_remove(struct platform_device *pdev)
dsa_unregister_switch(&a5psw->ds);
a5psw_pcs_free(a5psw);
- clk_disable_unprepare(a5psw->hclk);
- clk_disable_unprepare(a5psw->clk);
}
static void a5psw_shutdown(struct platform_device *pdev)
diff --git a/drivers/net/dsa/sja1105/sja1105_ethtool.c b/drivers/net/dsa/sja1105/sja1105_ethtool.c
index 2ea64b1d026d..84d7d3f66bd0 100644
--- a/drivers/net/dsa/sja1105/sja1105_ethtool.c
+++ b/drivers/net/dsa/sja1105/sja1105_ethtool.c
@@ -571,6 +571,9 @@ void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
max_ctr = __MAX_SJA1105PQRS_PORT_COUNTER;
for (i = 0; i < max_ctr; i++) {
+ if (!strlen(sja1105_port_counters[i].name))
+ continue;
+
rc = sja1105_port_counter_read(priv, port, i, &data[k++]);
if (rc) {
dev_err(ds->dev,
@@ -596,8 +599,12 @@ void sja1105_get_strings(struct dsa_switch *ds, int port,
else
max_ctr = __MAX_SJA1105PQRS_PORT_COUNTER;
- for (i = 0; i < max_ctr; i++)
+ for (i = 0; i < max_ctr; i++) {
+ if (!strlen(sja1105_port_counters[i].name))
+ continue;
+
ethtool_puts(&data, sja1105_port_counters[i].name);
+ }
}
int sja1105_get_sset_count(struct dsa_switch *ds, int port, int sset)
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index f8454f3b6f9c..aa2145cf29a6 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -1302,14 +1302,7 @@ static int sja1105_set_port_speed(struct sja1105_private *priv, int port,
* table, since this will be used for the clocking setup, and we no
* longer need to store it in the static config (already told hardware
* we want auto during upload phase).
- * Actually for the SGMII port, the MAC is fixed at 1 Gbps and
- * we need to configure the PCS only (if even that).
*/
- if (priv->phy_mode[port] == PHY_INTERFACE_MODE_SGMII)
- speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS];
- else if (priv->phy_mode[port] == PHY_INTERFACE_MODE_2500BASEX)
- speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS];
-
mac[port].speed = speed;
return 0;
@@ -2081,6 +2074,7 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
switch (state) {
case BR_STATE_DISABLED:
case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
/* From UM10944 description of DRPDTAG (why put this there?):
* "Management traffic flows to the port regardless of the state
* of the INGRESS flag". So BPDUs are still be allowed to pass.
@@ -2090,11 +2084,6 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
mac[port].egress = false;
mac[port].dyn_learn = false;
break;
- case BR_STATE_LISTENING:
- mac[port].ingress = true;
- mac[port].egress = false;
- mac[port].dyn_learn = false;
- break;
case BR_STATE_LEARNING:
mac[port].ingress = true;
mac[port].egress = false;
diff --git a/drivers/net/dsa/sja1105/sja1105_mdio.c b/drivers/net/dsa/sja1105/sja1105_mdio.c
index 84b7169f2974..8d535c033cef 100644
--- a/drivers/net/dsa/sja1105/sja1105_mdio.c
+++ b/drivers/net/dsa/sja1105/sja1105_mdio.c
@@ -468,13 +468,10 @@ int sja1105_mdiobus_register(struct dsa_switch *ds)
if (rc)
return rc;
- mdio_node = of_get_child_by_name(switch_node, "mdios");
+ mdio_node = of_get_available_child_by_name(switch_node, "mdios");
if (!mdio_node)
return 0;
- if (!of_device_is_available(mdio_node))
- goto out_put_mdio_node;
-
if (regs->mdio_100base_tx != SJA1105_RSV_ADDR) {
rc = sja1105_mdiobus_base_tx_register(priv, mdio_node);
if (rc)
@@ -487,7 +484,6 @@ int sja1105_mdiobus_register(struct dsa_switch *ds)
goto err_free_base_tx_mdiobus;
}
-out_put_mdio_node:
of_node_put(mdio_node);
return 0;
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index a1f4ca6ad888..fefe46e2a5e6 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -58,56 +58,60 @@ enum sja1105_ptp_clk_mode {
#define ptp_data_to_sja1105(d) \
container_of((d), struct sja1105_private, ptp_data)
-int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
+int sja1105_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct sja1105_private *priv = ds->priv;
- struct hwtstamp_config config;
+ unsigned long hwts_tx_en, hwts_rx_en;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ hwts_tx_en = priv->hwts_tx_en;
+ hwts_rx_en = priv->hwts_rx_en;
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
- priv->hwts_tx_en &= ~BIT(port);
+ hwts_tx_en &= ~BIT(port);
break;
case HWTSTAMP_TX_ON:
- priv->hwts_tx_en |= BIT(port);
+ hwts_tx_en |= BIT(port);
break;
default:
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
- priv->hwts_rx_en &= ~BIT(port);
+ hwts_rx_en &= ~BIT(port);
break;
- default:
- priv->hwts_rx_en |= BIT(port);
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ hwts_rx_en |= BIT(port);
break;
+ default:
+ return -ERANGE;
}
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
+ priv->hwts_tx_en = hwts_tx_en;
+ priv->hwts_rx_en = hwts_rx_en;
+
return 0;
}
-int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
+int sja1105_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config)
{
struct sja1105_private *priv = ds->priv;
- struct hwtstamp_config config;
- config.flags = 0;
+ config->flags = 0;
if (priv->hwts_tx_en & BIT(port))
- config.tx_type = HWTSTAMP_TX_ON;
+ config->tx_type = HWTSTAMP_TX_ON;
else
- config.tx_type = HWTSTAMP_TX_OFF;
+ config->tx_type = HWTSTAMP_TX_OFF;
if (priv->hwts_rx_en & BIT(port))
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
else
- config.rx_filter = HWTSTAMP_FILTER_NONE;
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
int sja1105_get_ts_info(struct dsa_switch *ds, int port,
@@ -727,10 +731,6 @@ static int sja1105_per_out_enable(struct sja1105_private *priv,
if (perout->index != 0)
return -EOPNOTSUPP;
- /* Reject requests with unsupported flags */
- if (perout->flags)
- return -EOPNOTSUPP;
-
mutex_lock(&ptp_data->lock);
rc = sja1105_change_ptp_clk_pin_func(priv, PTP_PF_PEROUT);
@@ -810,13 +810,6 @@ static int sja1105_extts_enable(struct sja1105_private *priv,
if (extts->index != 0)
return -EOPNOTSUPP;
- /* Reject requests with unsupported flags */
- if (extts->flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* We can only enable time stamping on both edges, sadly. */
if ((extts->flags & PTP_STRICT_FLAGS) &&
(extts->flags & PTP_ENABLE_FEATURE) &&
@@ -832,7 +825,7 @@ static int sja1105_extts_enable(struct sja1105_private *priv,
if (on)
sja1105_ptp_extts_setup_timer(&priv->ptp_data);
else
- del_timer_sync(&priv->ptp_data.extts_timer);
+ timer_delete_sync(&priv->ptp_data.extts_timer);
return 0;
}
@@ -902,6 +895,9 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
.n_pins = 1,
.n_ext_ts = 1,
.n_per_out = 1,
+ .supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS,
};
/* Only used on SJA1105 */
@@ -929,7 +925,7 @@ void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
if (IS_ERR_OR_NULL(ptp_data->clock))
return;
- del_timer_sync(&ptp_data->extts_timer);
+ timer_delete_sync(&ptp_data->extts_timer);
ptp_cancel_worker_sync(ptp_data->clock);
skb_queue_purge(&ptp_data->skb_txtstamp_queue);
skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
index 8add2bd5f728..325e3777ea07 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.h
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -112,9 +112,12 @@ bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
void sja1105_port_txtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb);
-int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr);
+int sja1105_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config);
-int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr);
+int sja1105_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
int __sja1105_ptp_gettimex(struct dsa_switch *ds, u64 *ns,
struct ptp_system_timestamp *sts);
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
index baba204ad62f..ffece8a400a6 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
@@ -26,12 +26,8 @@ void sja1105_pack(void *buf, const u64 *val, int start, int end, size_t len)
pr_err("Start bit (%d) expected to be larger than end (%d)\n",
start, end);
} else if (rc == -ERANGE) {
- if ((start - end + 1) > 64)
- pr_err("Field %d-%d too large for 64 bits!\n",
- start, end);
- else
- pr_err("Cannot store %llx inside bits %d-%d (would truncate)\n",
- *val, start, end);
+ pr_err("Field %d-%d too large for 64 bits!\n",
+ start, end);
}
dump_stack();
}
@@ -1921,8 +1917,10 @@ int sja1105_table_delete_entry(struct sja1105_table *table, int i)
if (i > table->entry_count)
return -ERANGE;
- memmove(entries + i * entry_size, entries + (i + 1) * entry_size,
- (table->entry_count - i) * entry_size);
+ if (i + 1 < table->entry_count) {
+ memmove(entries + i * entry_size, entries + (i + 1) * entry_size,
+ (table->entry_count - i - 1) * entry_size);
+ }
table->entry_count--;
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.c b/drivers/net/dsa/sja1105/sja1105_tas.c
index d7818710bc02..d5949d2c3e71 100644
--- a/drivers/net/dsa/sja1105/sja1105_tas.c
+++ b/drivers/net/dsa/sja1105/sja1105_tas.c
@@ -775,9 +775,8 @@ static void sja1105_tas_state_machine(struct work_struct *work)
base_time_ts = ns_to_timespec64(base_time);
now_ts = ns_to_timespec64(now);
- dev_dbg(ds->dev, "OPER base time %lld.%09ld (now %lld.%09ld)\n",
- base_time_ts.tv_sec, base_time_ts.tv_nsec,
- now_ts.tv_sec, now_ts.tv_nsec);
+ dev_dbg(ds->dev, "OPER base time %ptSp (now %ptSp)\n",
+ &base_time_ts, &now_ts);
break;
@@ -798,8 +797,7 @@ static void sja1105_tas_state_machine(struct work_struct *work)
if (now < tas_data->oper_base_time) {
/* TAS has not started yet */
diff = ns_to_timespec64(tas_data->oper_base_time - now);
- dev_dbg(ds->dev, "time to start: [%lld.%09ld]",
- diff.tv_sec, diff.tv_nsec);
+ dev_dbg(ds->dev, "time to start: [%ptSp]", &diff);
break;
}
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index f18aa321053d..9d31b8258268 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -2258,14 +2258,14 @@ static int vsc73xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BIT(offset));
}
-static void vsc73xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int val)
+static int vsc73xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct vsc73xx *vsc = gpiochip_get_data(chip);
u32 tmp = val ? BIT(offset) : 0;
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0,
- VSC73XX_GPIO, BIT(offset), tmp);
+ return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_GPIO, BIT(offset), tmp);
}
static int vsc73xx_gpio_direction_output(struct gpio_chip *chip,
diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
index 4dbcc49a9e52..0a05f4156ef4 100644
--- a/drivers/net/dsa/xrs700x/xrs700x.c
+++ b/drivers/net/dsa/xrs700x/xrs700x.c
@@ -566,6 +566,7 @@ static int xrs700x_hsr_join(struct dsa_switch *ds, int port,
struct xrs700x *priv = ds->priv;
struct net_device *user;
int ret, i, hsr_pair[2];
+ enum hsr_port_type type;
enum hsr_version ver;
bool fwd = false;
@@ -589,6 +590,16 @@ static int xrs700x_hsr_join(struct dsa_switch *ds, int port,
return -EOPNOTSUPP;
}
+ ret = hsr_get_port_type(hsr, dsa_to_port(ds, port)->user, &type);
+ if (ret)
+ return ret;
+
+ if (type != HSR_PT_SLAVE_A && type != HSR_PT_SLAVE_B) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only HSR slave ports can be offloaded");
+ return -EOPNOTSUPP;
+ }
+
dsa_hsr_foreach_port(dp, ds, hsr) {
if (dp->index != port) {
partner = dp;
diff --git a/drivers/net/dsa/yt921x.c b/drivers/net/dsa/yt921x.c
new file mode 100644
index 000000000000..1c511f5dc6ab
--- /dev/null
+++ b/drivers/net/dsa/yt921x.c
@@ -0,0 +1,3006 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Motorcomm YT921x Switch
+ *
+ * Should work on YT9213/YT9214/YT9215/YT9218, but only tested on YT9215+SGMII,
+ * be sure to do your own checks before porting to another chip.
+ *
+ * Copyright (c) 2025 David Yang
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/if_hsr.h>
+#include <linux/if_vlan.h>
+#include <linux/iopoll.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+
+#include <net/dsa.h>
+
+#include "yt921x.h"
+
+struct yt921x_mib_desc {
+ unsigned int size;
+ unsigned int offset;
+ const char *name;
+};
+
+#define MIB_DESC(_size, _offset, _name) \
+ {_size, _offset, _name}
+
+/* Must agree with yt921x_mib
+ *
+ * Unstructured fields (name != NULL) will appear in get_ethtool_stats(),
+ * structured go to their *_stats() methods, but we need their sizes and offsets
+ * to perform 32bit MIB overflow wraparound.
+ */
+static const struct yt921x_mib_desc yt921x_mib_descs[] = {
+ MIB_DESC(1, YT921X_MIB_DATA_RX_BROADCAST, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PAUSE, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_MULTICAST, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_CRC_ERR, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_RX_ALIGN_ERR, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_UNDERSIZE_ERR, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_FRAG_ERR, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_64, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_65_TO_127, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_128_TO_255, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_256_TO_511, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_512_TO_1023, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_1024_TO_1518, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_1519_TO_MAX, NULL),
+ MIB_DESC(2, YT921X_MIB_DATA_RX_GOOD_BYTES, NULL),
+
+ MIB_DESC(2, YT921X_MIB_DATA_RX_BAD_BYTES, "RxBadBytes"),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_OVERSIZE_ERR, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_RX_DROPPED, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_BROADCAST, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PAUSE, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_MULTICAST, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_TX_UNDERSIZE_ERR, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_64, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_65_TO_127, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_128_TO_255, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_256_TO_511, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_512_TO_1023, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_1024_TO_1518, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_1519_TO_MAX, NULL),
+
+ MIB_DESC(2, YT921X_MIB_DATA_TX_GOOD_BYTES, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_COLLISION, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_TX_EXCESSIVE_COLLISION, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_MULTIPLE_COLLISION, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_SINGLE_COLLISION, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_TX_DEFERRED, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_LATE_COLLISION, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_OAM, "RxOAM"),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_OAM, "TxOAM"),
+};
+
+struct yt921x_info {
+ const char *name;
+ u16 major;
+ /* Unknown, seems to be plain enumeration */
+ u8 mode;
+ u8 extmode;
+ /* Ports with integral GbE PHYs, not including MCU Port 10 */
+ u16 internal_mask;
+ /* TODO: see comments in yt921x_dsa_phylink_get_caps() */
+ u16 external_mask;
+};
+
+#define YT921X_PORT_MASK_INTn(port) BIT(port)
+#define YT921X_PORT_MASK_INT0_n(n) GENMASK((n) - 1, 0)
+#define YT921X_PORT_MASK_EXT0 BIT(8)
+#define YT921X_PORT_MASK_EXT1 BIT(9)
+
+static const struct yt921x_info yt921x_infos[] = {
+ {
+ "YT9215SC", YT9215_MAJOR, 1, 0,
+ YT921X_PORT_MASK_INT0_n(5),
+ YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1,
+ },
+ {
+ "YT9215S", YT9215_MAJOR, 2, 0,
+ YT921X_PORT_MASK_INT0_n(5),
+ YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1,
+ },
+ {
+ "YT9215RB", YT9215_MAJOR, 3, 0,
+ YT921X_PORT_MASK_INT0_n(5),
+ YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1,
+ },
+ {
+ "YT9214NB", YT9215_MAJOR, 3, 2,
+ YT921X_PORT_MASK_INTn(1) | YT921X_PORT_MASK_INTn(3),
+ YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1,
+ },
+ {
+ "YT9213NB", YT9215_MAJOR, 3, 3,
+ YT921X_PORT_MASK_INTn(1) | YT921X_PORT_MASK_INTn(3),
+ YT921X_PORT_MASK_EXT1,
+ },
+ {
+ "YT9218N", YT9218_MAJOR, 0, 0,
+ YT921X_PORT_MASK_INT0_n(8),
+ 0,
+ },
+ {
+ "YT9218MB", YT9218_MAJOR, 1, 0,
+ YT921X_PORT_MASK_INT0_n(8),
+ YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1,
+ },
+ {}
+};
+
+#define YT921X_NAME "yt921x"
+
+#define YT921X_VID_UNWARE 4095
+
+#define YT921X_POLL_SLEEP_US 10000
+#define YT921X_POLL_TIMEOUT_US 100000
+
+/* The interval should be small enough to avoid overflow of 32bit MIBs.
+ *
+ * Until we can read MIBs from stats64 call directly (i.e. sleep
+ * there), we have to poll stats more frequently then it is actually needed.
+ * For overflow protection, normally, 100 sec interval should have been OK.
+ */
+#define YT921X_STATS_INTERVAL_JIFFIES (3 * HZ)
+
+struct yt921x_reg_mdio {
+ struct mii_bus *bus;
+ int addr;
+ /* SWITCH_ID_1 / SWITCH_ID_0 of the device
+ *
+ * This is a way to multiplex multiple devices on the same MII phyaddr
+ * and should be configurable in DT. However, MDIO core simply doesn't
+ * allow multiple devices over one reg addr, so this is a fixed value
+ * for now until a solution is found.
+ *
+ * Keep this because we need switchid to form MII regaddrs anyway.
+ */
+ unsigned char switchid;
+};
+
+/* TODO: SPI/I2C */
+
+#define to_yt921x_priv(_ds) container_of_const(_ds, struct yt921x_priv, ds)
+#define to_device(priv) ((priv)->ds.dev)
+
+static int yt921x_reg_read(struct yt921x_priv *priv, u32 reg, u32 *valp)
+{
+ WARN_ON(!mutex_is_locked(&priv->reg_lock));
+
+ return priv->reg_ops->read(priv->reg_ctx, reg, valp);
+}
+
+static int yt921x_reg_write(struct yt921x_priv *priv, u32 reg, u32 val)
+{
+ WARN_ON(!mutex_is_locked(&priv->reg_lock));
+
+ return priv->reg_ops->write(priv->reg_ctx, reg, val);
+}
+
+static int
+yt921x_reg_wait(struct yt921x_priv *priv, u32 reg, u32 mask, u32 *valp)
+{
+ u32 val;
+ int res;
+ int ret;
+
+ ret = read_poll_timeout(yt921x_reg_read, res,
+ res || (val & mask) == *valp,
+ YT921X_POLL_SLEEP_US, YT921X_POLL_TIMEOUT_US,
+ false, priv, reg, &val);
+ if (ret)
+ return ret;
+ if (res)
+ return res;
+
+ *valp = val;
+ return 0;
+}
+
+static int
+yt921x_reg_update_bits(struct yt921x_priv *priv, u32 reg, u32 mask, u32 val)
+{
+ int res;
+ u32 v;
+ u32 u;
+
+ res = yt921x_reg_read(priv, reg, &v);
+ if (res)
+ return res;
+
+ u = v;
+ u &= ~mask;
+ u |= val;
+ if (u == v)
+ return 0;
+
+ return yt921x_reg_write(priv, reg, u);
+}
+
+static int yt921x_reg_set_bits(struct yt921x_priv *priv, u32 reg, u32 mask)
+{
+ return yt921x_reg_update_bits(priv, reg, 0, mask);
+}
+
+static int yt921x_reg_clear_bits(struct yt921x_priv *priv, u32 reg, u32 mask)
+{
+ return yt921x_reg_update_bits(priv, reg, mask, 0);
+}
+
+static int
+yt921x_reg_toggle_bits(struct yt921x_priv *priv, u32 reg, u32 mask, bool set)
+{
+ return yt921x_reg_update_bits(priv, reg, mask, !set ? 0 : mask);
+}
+
+/* Some registers, like VLANn_CTRL, should always be written in 64-bit, even if
+ * you are to write only the lower / upper 32 bits.
+ *
+ * There is no such restriction for reading, but we still provide 64-bit read
+ * wrappers so that we always handle u64 values.
+ */
+
+static int yt921x_reg64_read(struct yt921x_priv *priv, u32 reg, u64 *valp)
+{
+ u32 lo;
+ u32 hi;
+ int res;
+
+ res = yt921x_reg_read(priv, reg, &lo);
+ if (res)
+ return res;
+ res = yt921x_reg_read(priv, reg + 4, &hi);
+ if (res)
+ return res;
+
+ *valp = ((u64)hi << 32) | lo;
+ return 0;
+}
+
+static int yt921x_reg64_write(struct yt921x_priv *priv, u32 reg, u64 val)
+{
+ int res;
+
+ res = yt921x_reg_write(priv, reg, (u32)val);
+ if (res)
+ return res;
+ return yt921x_reg_write(priv, reg + 4, (u32)(val >> 32));
+}
+
+static int
+yt921x_reg64_update_bits(struct yt921x_priv *priv, u32 reg, u64 mask, u64 val)
+{
+ int res;
+ u64 v;
+ u64 u;
+
+ res = yt921x_reg64_read(priv, reg, &v);
+ if (res)
+ return res;
+
+ u = v;
+ u &= ~mask;
+ u |= val;
+ if (u == v)
+ return 0;
+
+ return yt921x_reg64_write(priv, reg, u);
+}
+
+static int yt921x_reg64_clear_bits(struct yt921x_priv *priv, u32 reg, u64 mask)
+{
+ return yt921x_reg64_update_bits(priv, reg, mask, 0);
+}
+
+static int yt921x_reg_mdio_read(void *context, u32 reg, u32 *valp)
+{
+ struct yt921x_reg_mdio *mdio = context;
+ struct mii_bus *bus = mdio->bus;
+ int addr = mdio->addr;
+ u32 reg_addr;
+ u32 reg_data;
+ u32 val;
+ int res;
+
+ /* Hold the mdio bus lock to avoid (un)locking for 4 times */
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ reg_addr = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_ADDR |
+ YT921X_SMI_READ;
+ res = __mdiobus_write(bus, addr, reg_addr, (u16)(reg >> 16));
+ if (res)
+ goto end;
+ res = __mdiobus_write(bus, addr, reg_addr, (u16)reg);
+ if (res)
+ goto end;
+
+ reg_data = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_DATA |
+ YT921X_SMI_READ;
+ res = __mdiobus_read(bus, addr, reg_data);
+ if (res < 0)
+ goto end;
+ val = (u16)res;
+ res = __mdiobus_read(bus, addr, reg_data);
+ if (res < 0)
+ goto end;
+ val = (val << 16) | (u16)res;
+
+ *valp = val;
+ res = 0;
+
+end:
+ mutex_unlock(&bus->mdio_lock);
+ return res;
+}
+
+static int yt921x_reg_mdio_write(void *context, u32 reg, u32 val)
+{
+ struct yt921x_reg_mdio *mdio = context;
+ struct mii_bus *bus = mdio->bus;
+ int addr = mdio->addr;
+ u32 reg_addr;
+ u32 reg_data;
+ int res;
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ reg_addr = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_ADDR |
+ YT921X_SMI_WRITE;
+ res = __mdiobus_write(bus, addr, reg_addr, (u16)(reg >> 16));
+ if (res)
+ goto end;
+ res = __mdiobus_write(bus, addr, reg_addr, (u16)reg);
+ if (res)
+ goto end;
+
+ reg_data = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_DATA |
+ YT921X_SMI_WRITE;
+ res = __mdiobus_write(bus, addr, reg_data, (u16)(val >> 16));
+ if (res)
+ goto end;
+ res = __mdiobus_write(bus, addr, reg_data, (u16)val);
+ if (res)
+ goto end;
+
+ res = 0;
+
+end:
+ mutex_unlock(&bus->mdio_lock);
+ return res;
+}
+
+static const struct yt921x_reg_ops yt921x_reg_ops_mdio = {
+ .read = yt921x_reg_mdio_read,
+ .write = yt921x_reg_mdio_write,
+};
+
+/* TODO: SPI/I2C */
+
+static int yt921x_intif_wait(struct yt921x_priv *priv)
+{
+ u32 val = 0;
+
+ return yt921x_reg_wait(priv, YT921X_INT_MBUS_OP, YT921X_MBUS_OP_START,
+ &val);
+}
+
+static int
+yt921x_intif_read(struct yt921x_priv *priv, int port, int reg, u16 *valp)
+{
+ struct device *dev = to_device(priv);
+ u32 mask;
+ u32 ctrl;
+ u32 val;
+ int res;
+
+ res = yt921x_intif_wait(priv);
+ if (res)
+ return res;
+
+ mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M |
+ YT921X_MBUS_CTRL_OP_M;
+ ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) |
+ YT921X_MBUS_CTRL_READ;
+ res = yt921x_reg_update_bits(priv, YT921X_INT_MBUS_CTRL, mask, ctrl);
+ if (res)
+ return res;
+ res = yt921x_reg_write(priv, YT921X_INT_MBUS_OP, YT921X_MBUS_OP_START);
+ if (res)
+ return res;
+
+ res = yt921x_intif_wait(priv);
+ if (res)
+ return res;
+ res = yt921x_reg_read(priv, YT921X_INT_MBUS_DIN, &val);
+ if (res)
+ return res;
+
+ if ((u16)val != val)
+ dev_info(dev,
+ "%s: port %d, reg 0x%x: Expected u16, got 0x%08x\n",
+ __func__, port, reg, val);
+ *valp = (u16)val;
+ return 0;
+}
+
+static int
+yt921x_intif_write(struct yt921x_priv *priv, int port, int reg, u16 val)
+{
+ u32 mask;
+ u32 ctrl;
+ int res;
+
+ res = yt921x_intif_wait(priv);
+ if (res)
+ return res;
+
+ mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M |
+ YT921X_MBUS_CTRL_OP_M;
+ ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) |
+ YT921X_MBUS_CTRL_WRITE;
+ res = yt921x_reg_update_bits(priv, YT921X_INT_MBUS_CTRL, mask, ctrl);
+ if (res)
+ return res;
+ res = yt921x_reg_write(priv, YT921X_INT_MBUS_DOUT, val);
+ if (res)
+ return res;
+ res = yt921x_reg_write(priv, YT921X_INT_MBUS_OP, YT921X_MBUS_OP_START);
+ if (res)
+ return res;
+
+ return yt921x_intif_wait(priv);
+}
+
+static int yt921x_mbus_int_read(struct mii_bus *mbus, int port, int reg)
+{
+ struct yt921x_priv *priv = mbus->priv;
+ u16 val;
+ int res;
+
+ if (port >= YT921X_PORT_NUM)
+ return U16_MAX;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_intif_read(priv, port, reg, &val);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ return res;
+ return val;
+}
+
+static int
+yt921x_mbus_int_write(struct mii_bus *mbus, int port, int reg, u16 data)
+{
+ struct yt921x_priv *priv = mbus->priv;
+ int res;
+
+ if (port >= YT921X_PORT_NUM)
+ return -ENODEV;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_intif_write(priv, port, reg, data);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_mbus_int_init(struct yt921x_priv *priv, struct device_node *mnp)
+{
+ struct device *dev = to_device(priv);
+ struct mii_bus *mbus;
+ int res;
+
+ mbus = devm_mdiobus_alloc(dev);
+ if (!mbus)
+ return -ENOMEM;
+
+ mbus->name = "YT921x internal MDIO bus";
+ snprintf(mbus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+ mbus->priv = priv;
+ mbus->read = yt921x_mbus_int_read;
+ mbus->write = yt921x_mbus_int_write;
+ mbus->parent = dev;
+ mbus->phy_mask = (u32)~GENMASK(YT921X_PORT_NUM - 1, 0);
+
+ res = devm_of_mdiobus_register(dev, mbus, mnp);
+ if (res)
+ return res;
+
+ priv->mbus_int = mbus;
+
+ return 0;
+}
+
+static int yt921x_extif_wait(struct yt921x_priv *priv)
+{
+ u32 val = 0;
+
+ return yt921x_reg_wait(priv, YT921X_EXT_MBUS_OP, YT921X_MBUS_OP_START,
+ &val);
+}
+
+static int
+yt921x_extif_read(struct yt921x_priv *priv, int port, int reg, u16 *valp)
+{
+ struct device *dev = to_device(priv);
+ u32 mask;
+ u32 ctrl;
+ u32 val;
+ int res;
+
+ res = yt921x_extif_wait(priv);
+ if (res)
+ return res;
+
+ mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M |
+ YT921X_MBUS_CTRL_TYPE_M | YT921X_MBUS_CTRL_OP_M;
+ ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) |
+ YT921X_MBUS_CTRL_TYPE_C22 | YT921X_MBUS_CTRL_READ;
+ res = yt921x_reg_update_bits(priv, YT921X_EXT_MBUS_CTRL, mask, ctrl);
+ if (res)
+ return res;
+ res = yt921x_reg_write(priv, YT921X_EXT_MBUS_OP, YT921X_MBUS_OP_START);
+ if (res)
+ return res;
+
+ res = yt921x_extif_wait(priv);
+ if (res)
+ return res;
+ res = yt921x_reg_read(priv, YT921X_EXT_MBUS_DIN, &val);
+ if (res)
+ return res;
+
+ if ((u16)val != val)
+ dev_info(dev,
+ "%s: port %d, reg 0x%x: Expected u16, got 0x%08x\n",
+ __func__, port, reg, val);
+ *valp = (u16)val;
+ return 0;
+}
+
+static int
+yt921x_extif_write(struct yt921x_priv *priv, int port, int reg, u16 val)
+{
+ u32 mask;
+ u32 ctrl;
+ int res;
+
+ res = yt921x_extif_wait(priv);
+ if (res)
+ return res;
+
+ mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M |
+ YT921X_MBUS_CTRL_TYPE_M | YT921X_MBUS_CTRL_OP_M;
+ ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) |
+ YT921X_MBUS_CTRL_TYPE_C22 | YT921X_MBUS_CTRL_WRITE;
+ res = yt921x_reg_update_bits(priv, YT921X_EXT_MBUS_CTRL, mask, ctrl);
+ if (res)
+ return res;
+ res = yt921x_reg_write(priv, YT921X_EXT_MBUS_DOUT, val);
+ if (res)
+ return res;
+ res = yt921x_reg_write(priv, YT921X_EXT_MBUS_OP, YT921X_MBUS_OP_START);
+ if (res)
+ return res;
+
+ return yt921x_extif_wait(priv);
+}
+
+static int yt921x_mbus_ext_read(struct mii_bus *mbus, int port, int reg)
+{
+ struct yt921x_priv *priv = mbus->priv;
+ u16 val;
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_extif_read(priv, port, reg, &val);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ return res;
+ return val;
+}
+
+static int
+yt921x_mbus_ext_write(struct mii_bus *mbus, int port, int reg, u16 data)
+{
+ struct yt921x_priv *priv = mbus->priv;
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_extif_write(priv, port, reg, data);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_mbus_ext_init(struct yt921x_priv *priv, struct device_node *mnp)
+{
+ struct device *dev = to_device(priv);
+ struct mii_bus *mbus;
+ int res;
+
+ mbus = devm_mdiobus_alloc(dev);
+ if (!mbus)
+ return -ENOMEM;
+
+ mbus->name = "YT921x external MDIO bus";
+ snprintf(mbus->id, MII_BUS_ID_SIZE, "%s@ext", dev_name(dev));
+ mbus->priv = priv;
+ /* TODO: c45? */
+ mbus->read = yt921x_mbus_ext_read;
+ mbus->write = yt921x_mbus_ext_write;
+ mbus->parent = dev;
+
+ res = devm_of_mdiobus_register(dev, mbus, mnp);
+ if (res)
+ return res;
+
+ priv->mbus_ext = mbus;
+
+ return 0;
+}
+
+/* Read and handle overflow of 32bit MIBs. MIB buffer must be zeroed before. */
+static int yt921x_read_mib(struct yt921x_priv *priv, int port)
+{
+ struct yt921x_port *pp = &priv->ports[port];
+ struct device *dev = to_device(priv);
+ struct yt921x_mib *mib = &pp->mib;
+ int res = 0;
+
+ /* Reading of yt921x_port::mib is not protected by a lock and it's vain
+ * to keep its consistency, since we have to read registers one by one
+ * and there is no way to make a snapshot of MIB stats.
+ *
+ * Writing (by this function only) is and should be protected by
+ * reg_lock.
+ */
+
+ for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) {
+ const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i];
+ u32 reg = YT921X_MIBn_DATA0(port) + desc->offset;
+ u64 *valp = &((u64 *)mib)[i];
+ u64 val = *valp;
+ u32 val0;
+ u32 val1;
+
+ res = yt921x_reg_read(priv, reg, &val0);
+ if (res)
+ break;
+
+ if (desc->size <= 1) {
+ if (val < (u32)val)
+ /* overflow */
+ val += (u64)U32_MAX + 1;
+ val &= ~U32_MAX;
+ val |= val0;
+ } else {
+ res = yt921x_reg_read(priv, reg + 4, &val1);
+ if (res)
+ break;
+ val = ((u64)val1 << 32) | val0;
+ }
+
+ WRITE_ONCE(*valp, val);
+ }
+
+ pp->rx_frames = mib->rx_64byte + mib->rx_65_127byte +
+ mib->rx_128_255byte + mib->rx_256_511byte +
+ mib->rx_512_1023byte + mib->rx_1024_1518byte +
+ mib->rx_jumbo;
+ pp->tx_frames = mib->tx_64byte + mib->tx_65_127byte +
+ mib->tx_128_255byte + mib->tx_256_511byte +
+ mib->tx_512_1023byte + mib->tx_1024_1518byte +
+ mib->tx_jumbo;
+
+ if (res)
+ dev_err(dev, "Failed to %s port %d: %i\n", "read stats for",
+ port, res);
+ return res;
+}
+
+static void yt921x_poll_mib(struct work_struct *work)
+{
+ struct yt921x_port *pp = container_of_const(work, struct yt921x_port,
+ mib_read.work);
+ struct yt921x_priv *priv = (void *)(pp - pp->index) -
+ offsetof(struct yt921x_priv, ports);
+ unsigned long delay = YT921X_STATS_INTERVAL_JIFFIES;
+ int port = pp->index;
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_read_mib(priv, port);
+ mutex_unlock(&priv->reg_lock);
+ if (res)
+ delay *= 4;
+
+ schedule_delayed_work(&pp->mib_read, delay);
+}
+
+static void
+yt921x_dsa_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+{
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) {
+ const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i];
+
+ if (desc->name)
+ ethtool_puts(&data, desc->name);
+ }
+}
+
+static void
+yt921x_dsa_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct yt921x_port *pp = &priv->ports[port];
+ struct yt921x_mib *mib = &pp->mib;
+ size_t j;
+
+ mutex_lock(&priv->reg_lock);
+ yt921x_read_mib(priv, port);
+ mutex_unlock(&priv->reg_lock);
+
+ j = 0;
+ for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) {
+ const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i];
+
+ if (!desc->name)
+ continue;
+
+ data[j] = ((u64 *)mib)[i];
+ j++;
+ }
+}
+
+static int yt921x_dsa_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ int cnt = 0;
+
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) {
+ const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i];
+
+ if (desc->name)
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static void
+yt921x_dsa_get_eth_mac_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct yt921x_port *pp = &priv->ports[port];
+ struct yt921x_mib *mib = &pp->mib;
+
+ mutex_lock(&priv->reg_lock);
+ yt921x_read_mib(priv, port);
+ mutex_unlock(&priv->reg_lock);
+
+ mac_stats->FramesTransmittedOK = pp->tx_frames;
+ mac_stats->SingleCollisionFrames = mib->tx_single_collisions;
+ mac_stats->MultipleCollisionFrames = mib->tx_multiple_collisions;
+ mac_stats->FramesReceivedOK = pp->rx_frames;
+ mac_stats->FrameCheckSequenceErrors = mib->rx_crc_errors;
+ mac_stats->AlignmentErrors = mib->rx_alignment_errors;
+ mac_stats->OctetsTransmittedOK = mib->tx_good_bytes;
+ mac_stats->FramesWithDeferredXmissions = mib->tx_deferred;
+ mac_stats->LateCollisions = mib->tx_late_collisions;
+ mac_stats->FramesAbortedDueToXSColls = mib->tx_aborted_errors;
+ /* mac_stats->FramesLostDueToIntMACXmitError */
+ /* mac_stats->CarrierSenseErrors */
+ mac_stats->OctetsReceivedOK = mib->rx_good_bytes;
+ /* mac_stats->FramesLostDueToIntMACRcvError */
+ mac_stats->MulticastFramesXmittedOK = mib->tx_multicast;
+ mac_stats->BroadcastFramesXmittedOK = mib->tx_broadcast;
+ /* mac_stats->FramesWithExcessiveDeferral */
+ mac_stats->MulticastFramesReceivedOK = mib->rx_multicast;
+ mac_stats->BroadcastFramesReceivedOK = mib->rx_broadcast;
+ /* mac_stats->InRangeLengthErrors */
+ /* mac_stats->OutOfRangeLengthField */
+ mac_stats->FrameTooLongErrors = mib->rx_oversize_errors;
+}
+
+static void
+yt921x_dsa_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct yt921x_port *pp = &priv->ports[port];
+ struct yt921x_mib *mib = &pp->mib;
+
+ mutex_lock(&priv->reg_lock);
+ yt921x_read_mib(priv, port);
+ mutex_unlock(&priv->reg_lock);
+
+ ctrl_stats->MACControlFramesTransmitted = mib->tx_pause;
+ ctrl_stats->MACControlFramesReceived = mib->rx_pause;
+ /* ctrl_stats->UnsupportedOpcodesReceived */
+}
+
+static const struct ethtool_rmon_hist_range yt921x_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, YT921X_FRAME_SIZE_MAX },
+ {}
+};
+
+static void
+yt921x_dsa_get_rmon_stats(struct dsa_switch *ds, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct yt921x_port *pp = &priv->ports[port];
+ struct yt921x_mib *mib = &pp->mib;
+
+ mutex_lock(&priv->reg_lock);
+ yt921x_read_mib(priv, port);
+ mutex_unlock(&priv->reg_lock);
+
+ *ranges = yt921x_rmon_ranges;
+
+ rmon_stats->undersize_pkts = mib->rx_undersize_errors;
+ rmon_stats->oversize_pkts = mib->rx_oversize_errors;
+ rmon_stats->fragments = mib->rx_alignment_errors;
+ /* rmon_stats->jabbers */
+
+ rmon_stats->hist[0] = mib->rx_64byte;
+ rmon_stats->hist[1] = mib->rx_65_127byte;
+ rmon_stats->hist[2] = mib->rx_128_255byte;
+ rmon_stats->hist[3] = mib->rx_256_511byte;
+ rmon_stats->hist[4] = mib->rx_512_1023byte;
+ rmon_stats->hist[5] = mib->rx_1024_1518byte;
+ rmon_stats->hist[6] = mib->rx_jumbo;
+
+ rmon_stats->hist_tx[0] = mib->tx_64byte;
+ rmon_stats->hist_tx[1] = mib->tx_65_127byte;
+ rmon_stats->hist_tx[2] = mib->tx_128_255byte;
+ rmon_stats->hist_tx[3] = mib->tx_256_511byte;
+ rmon_stats->hist_tx[4] = mib->tx_512_1023byte;
+ rmon_stats->hist_tx[5] = mib->tx_1024_1518byte;
+ rmon_stats->hist_tx[6] = mib->tx_jumbo;
+}
+
+static void
+yt921x_dsa_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *stats)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct yt921x_port *pp = &priv->ports[port];
+ struct yt921x_mib *mib = &pp->mib;
+
+ stats->rx_length_errors = mib->rx_undersize_errors +
+ mib->rx_fragment_errors;
+ stats->rx_over_errors = mib->rx_oversize_errors;
+ stats->rx_crc_errors = mib->rx_crc_errors;
+ stats->rx_frame_errors = mib->rx_alignment_errors;
+ /* stats->rx_fifo_errors */
+ /* stats->rx_missed_errors */
+
+ stats->tx_aborted_errors = mib->tx_aborted_errors;
+ /* stats->tx_carrier_errors */
+ stats->tx_fifo_errors = mib->tx_undersize_errors;
+ /* stats->tx_heartbeat_errors */
+ stats->tx_window_errors = mib->tx_late_collisions;
+
+ stats->rx_packets = pp->rx_frames;
+ stats->tx_packets = pp->tx_frames;
+ stats->rx_bytes = mib->rx_good_bytes - ETH_FCS_LEN * stats->rx_packets;
+ stats->tx_bytes = mib->tx_good_bytes - ETH_FCS_LEN * stats->tx_packets;
+ stats->rx_errors = stats->rx_length_errors + stats->rx_over_errors +
+ stats->rx_crc_errors + stats->rx_frame_errors;
+ stats->tx_errors = stats->tx_aborted_errors + stats->tx_fifo_errors +
+ stats->tx_window_errors;
+ stats->rx_dropped = mib->rx_dropped;
+ /* stats->tx_dropped */
+ stats->multicast = mib->rx_multicast;
+ stats->collisions = mib->tx_collisions;
+}
+
+static void
+yt921x_dsa_get_pause_stats(struct dsa_switch *ds, int port,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct yt921x_port *pp = &priv->ports[port];
+ struct yt921x_mib *mib = &pp->mib;
+
+ mutex_lock(&priv->reg_lock);
+ yt921x_read_mib(priv, port);
+ mutex_unlock(&priv->reg_lock);
+
+ pause_stats->tx_pause_frames = mib->tx_pause;
+ pause_stats->rx_pause_frames = mib->rx_pause;
+}
+
+static int
+yt921x_set_eee(struct yt921x_priv *priv, int port, struct ethtool_keee *e)
+{
+ /* Poor datasheet for EEE operations; don't ask if you are confused */
+
+ bool enable = e->eee_enabled;
+ u16 new_mask;
+ int res;
+
+ /* Enable / disable global EEE */
+ new_mask = priv->eee_ports_mask;
+ new_mask &= ~BIT(port);
+ new_mask |= !enable ? 0 : BIT(port);
+
+ if (!!new_mask != !!priv->eee_ports_mask) {
+ res = yt921x_reg_toggle_bits(priv, YT921X_PON_STRAP_FUNC,
+ YT921X_PON_STRAP_EEE, !!new_mask);
+ if (res)
+ return res;
+ res = yt921x_reg_toggle_bits(priv, YT921X_PON_STRAP_VAL,
+ YT921X_PON_STRAP_EEE, !!new_mask);
+ if (res)
+ return res;
+ }
+
+ priv->eee_ports_mask = new_mask;
+
+ /* Enable / disable port EEE */
+ res = yt921x_reg_toggle_bits(priv, YT921X_EEE_CTRL,
+ YT921X_EEE_CTRL_ENn(port), enable);
+ if (res)
+ return res;
+ res = yt921x_reg_toggle_bits(priv, YT921X_EEEn_VAL(port),
+ YT921X_EEE_VAL_DATA, enable);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static int
+yt921x_dsa_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_set_eee(priv, port, e);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_dsa_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ /* Only serves as packet filter, since the frame size is always set to
+ * maximum after reset
+ */
+
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ int frame_size;
+ int res;
+
+ frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+ if (dsa_port_is_cpu(dp))
+ frame_size += YT921X_TAG_LEN;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_reg_update_bits(priv, YT921X_MACn_FRAME(port),
+ YT921X_MAC_FRAME_SIZE_M,
+ YT921X_MAC_FRAME_SIZE(frame_size));
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int yt921x_dsa_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ /* Only called for user ports, exclude tag len here */
+ return YT921X_FRAME_SIZE_MAX - ETH_HLEN - ETH_FCS_LEN - YT921X_TAG_LEN;
+}
+
+static int
+yt921x_mirror_del(struct yt921x_priv *priv, int port, bool ingress)
+{
+ u32 mask;
+
+ if (ingress)
+ mask = YT921X_MIRROR_IGR_PORTn(port);
+ else
+ mask = YT921X_MIRROR_EGR_PORTn(port);
+ return yt921x_reg_clear_bits(priv, YT921X_MIRROR, mask);
+}
+
+static int
+yt921x_mirror_add(struct yt921x_priv *priv, int port, bool ingress,
+ int to_local_port, struct netlink_ext_ack *extack)
+{
+ u32 srcs;
+ u32 ctrl;
+ u32 val;
+ u32 dst;
+ int res;
+
+ if (ingress)
+ srcs = YT921X_MIRROR_IGR_PORTn(port);
+ else
+ srcs = YT921X_MIRROR_EGR_PORTn(port);
+ dst = YT921X_MIRROR_PORT(to_local_port);
+
+ res = yt921x_reg_read(priv, YT921X_MIRROR, &val);
+ if (res)
+ return res;
+
+ /* other mirror tasks & different dst port -> conflict */
+ if ((val & ~srcs & (YT921X_MIRROR_EGR_PORTS_M |
+ YT921X_MIRROR_IGR_PORTS_M)) &&
+ (val & YT921X_MIRROR_PORT_M) != dst) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Sniffer port is already configured, delete existing rules & retry");
+ return -EBUSY;
+ }
+
+ ctrl = val & ~YT921X_MIRROR_PORT_M;
+ ctrl |= srcs;
+ ctrl |= dst;
+
+ if (ctrl == val)
+ return 0;
+
+ return yt921x_reg_write(priv, YT921X_MIRROR, ctrl);
+}
+
+static void
+yt921x_dsa_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct device *dev = to_device(priv);
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_mirror_del(priv, port, mirror->ingress);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ dev_err(dev, "Failed to %s port %d: %i\n", "unmirror",
+ port, res);
+}
+
+static int
+yt921x_dsa_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_mirror_add(priv, port, ingress,
+ mirror->to_local_port, extack);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int yt921x_fdb_wait(struct yt921x_priv *priv, u32 *valp)
+{
+ struct device *dev = to_device(priv);
+ u32 val = YT921X_FDB_RESULT_DONE;
+ int res;
+
+ res = yt921x_reg_wait(priv, YT921X_FDB_RESULT, YT921X_FDB_RESULT_DONE,
+ &val);
+ if (res) {
+ dev_err(dev, "FDB probably stuck\n");
+ return res;
+ }
+
+ *valp = val;
+ return 0;
+}
+
+static int
+yt921x_fdb_in01(struct yt921x_priv *priv, const unsigned char *addr,
+ u16 vid, u32 ctrl1)
+{
+ u32 ctrl;
+ int res;
+
+ ctrl = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
+ res = yt921x_reg_write(priv, YT921X_FDB_IN0, ctrl);
+ if (res)
+ return res;
+
+ ctrl = ctrl1 | YT921X_FDB_IO1_FID(vid) | (addr[4] << 8) | addr[5];
+ return yt921x_reg_write(priv, YT921X_FDB_IN1, ctrl);
+}
+
+static int
+yt921x_fdb_has(struct yt921x_priv *priv, const unsigned char *addr, u16 vid,
+ u16 *indexp)
+{
+ u32 ctrl;
+ u32 val;
+ int res;
+
+ res = yt921x_fdb_in01(priv, addr, vid, 0);
+ if (res)
+ return res;
+
+ ctrl = 0;
+ res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl);
+ if (res)
+ return res;
+
+ ctrl = YT921X_FDB_OP_OP_GET_ONE | YT921X_FDB_OP_START;
+ res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl);
+ if (res)
+ return res;
+
+ res = yt921x_fdb_wait(priv, &val);
+ if (res)
+ return res;
+ if (val & YT921X_FDB_RESULT_NOTFOUND) {
+ *indexp = YT921X_FDB_NUM;
+ return 0;
+ }
+
+ *indexp = FIELD_GET(YT921X_FDB_RESULT_INDEX_M, val);
+ return 0;
+}
+
+static int
+yt921x_fdb_read(struct yt921x_priv *priv, unsigned char *addr, u16 *vidp,
+ u16 *ports_maskp, u16 *indexp, u8 *statusp)
+{
+ struct device *dev = to_device(priv);
+ u16 index;
+ u32 data0;
+ u32 data1;
+ u32 data2;
+ u32 val;
+ int res;
+
+ res = yt921x_fdb_wait(priv, &val);
+ if (res)
+ return res;
+ if (val & YT921X_FDB_RESULT_NOTFOUND) {
+ *ports_maskp = 0;
+ return 0;
+ }
+ index = FIELD_GET(YT921X_FDB_RESULT_INDEX_M, val);
+
+ res = yt921x_reg_read(priv, YT921X_FDB_OUT1, &data1);
+ if (res)
+ return res;
+ if ((data1 & YT921X_FDB_IO1_STATUS_M) ==
+ YT921X_FDB_IO1_STATUS_INVALID) {
+ *ports_maskp = 0;
+ return 0;
+ }
+
+ res = yt921x_reg_read(priv, YT921X_FDB_OUT0, &data0);
+ if (res)
+ return res;
+ res = yt921x_reg_read(priv, YT921X_FDB_OUT2, &data2);
+ if (res)
+ return res;
+
+ addr[0] = data0 >> 24;
+ addr[1] = data0 >> 16;
+ addr[2] = data0 >> 8;
+ addr[3] = data0;
+ addr[4] = data1 >> 8;
+ addr[5] = data1;
+ *vidp = FIELD_GET(YT921X_FDB_IO1_FID_M, data1);
+ *indexp = index;
+ *ports_maskp = FIELD_GET(YT921X_FDB_IO2_EGR_PORTS_M, data2);
+ *statusp = FIELD_GET(YT921X_FDB_IO1_STATUS_M, data1);
+
+ dev_dbg(dev,
+ "%s: index 0x%x, mac %02x:%02x:%02x:%02x:%02x:%02x, vid %d, ports 0x%x, status %d\n",
+ __func__, *indexp, addr[0], addr[1], addr[2], addr[3],
+ addr[4], addr[5], *vidp, *ports_maskp, *statusp);
+ return 0;
+}
+
+static int
+yt921x_fdb_dump(struct yt921x_priv *priv, u16 ports_mask,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ unsigned char addr[ETH_ALEN];
+ u8 status;
+ u16 pmask;
+ u16 index;
+ u32 ctrl;
+ u16 vid;
+ int res;
+
+ ctrl = YT921X_FDB_OP_INDEX(0) | YT921X_FDB_OP_MODE_INDEX |
+ YT921X_FDB_OP_OP_GET_ONE | YT921X_FDB_OP_START;
+ res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl);
+ if (res)
+ return res;
+ res = yt921x_fdb_read(priv, addr, &vid, &pmask, &index, &status);
+ if (res)
+ return res;
+ if ((pmask & ports_mask) && !is_multicast_ether_addr(addr)) {
+ res = cb(addr, vid,
+ status == YT921X_FDB_ENTRY_STATUS_STATIC, data);
+ if (res)
+ return res;
+ }
+
+ ctrl = YT921X_FDB_IO2_EGR_PORTS(ports_mask);
+ res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl);
+ if (res)
+ return res;
+
+ index = 0;
+ do {
+ ctrl = YT921X_FDB_OP_INDEX(index) | YT921X_FDB_OP_MODE_INDEX |
+ YT921X_FDB_OP_NEXT_TYPE_UCAST_PORT |
+ YT921X_FDB_OP_OP_GET_NEXT | YT921X_FDB_OP_START;
+ res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl);
+ if (res)
+ return res;
+
+ res = yt921x_fdb_read(priv, addr, &vid, &pmask, &index,
+ &status);
+ if (res)
+ return res;
+ if (!pmask)
+ break;
+
+ if ((pmask & ports_mask) && !is_multicast_ether_addr(addr)) {
+ res = cb(addr, vid,
+ status == YT921X_FDB_ENTRY_STATUS_STATIC,
+ data);
+ if (res)
+ return res;
+ }
+
+ /* Never call GET_NEXT with 4095, otherwise it will hang
+ * forever until a reset!
+ */
+ } while (index < YT921X_FDB_NUM - 1);
+
+ return 0;
+}
+
+static int
+yt921x_fdb_flush_raw(struct yt921x_priv *priv, u16 ports_mask, u16 vid,
+ bool flush_static)
+{
+ u32 ctrl;
+ u32 val;
+ int res;
+
+ if (vid < 4096) {
+ ctrl = YT921X_FDB_IO1_FID(vid);
+ res = yt921x_reg_write(priv, YT921X_FDB_IN1, ctrl);
+ if (res)
+ return res;
+ }
+
+ ctrl = YT921X_FDB_IO2_EGR_PORTS(ports_mask);
+ res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl);
+ if (res)
+ return res;
+
+ ctrl = YT921X_FDB_OP_OP_FLUSH | YT921X_FDB_OP_START;
+ if (vid >= 4096)
+ ctrl |= YT921X_FDB_OP_FLUSH_PORT;
+ else
+ ctrl |= YT921X_FDB_OP_FLUSH_PORT_VID;
+ if (flush_static)
+ ctrl |= YT921X_FDB_OP_FLUSH_STATIC;
+ res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl);
+ if (res)
+ return res;
+
+ res = yt921x_fdb_wait(priv, &val);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static int
+yt921x_fdb_flush_port(struct yt921x_priv *priv, int port, bool flush_static)
+{
+ return yt921x_fdb_flush_raw(priv, BIT(port), 4096, flush_static);
+}
+
+static int
+yt921x_fdb_add_index_in12(struct yt921x_priv *priv, u16 index, u16 ctrl1,
+ u16 ctrl2)
+{
+ u32 ctrl;
+ u32 val;
+ int res;
+
+ res = yt921x_reg_write(priv, YT921X_FDB_IN1, ctrl1);
+ if (res)
+ return res;
+ res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl2);
+ if (res)
+ return res;
+
+ ctrl = YT921X_FDB_OP_INDEX(index) | YT921X_FDB_OP_MODE_INDEX |
+ YT921X_FDB_OP_OP_ADD | YT921X_FDB_OP_START;
+ res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl);
+ if (res)
+ return res;
+
+ return yt921x_fdb_wait(priv, &val);
+}
+
+static int
+yt921x_fdb_add(struct yt921x_priv *priv, const unsigned char *addr, u16 vid,
+ u16 ports_mask)
+{
+ u32 ctrl;
+ u32 val;
+ int res;
+
+ ctrl = YT921X_FDB_IO1_STATUS_STATIC;
+ res = yt921x_fdb_in01(priv, addr, vid, ctrl);
+ if (res)
+ return res;
+
+ ctrl = YT921X_FDB_IO2_EGR_PORTS(ports_mask);
+ res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl);
+ if (res)
+ return res;
+
+ ctrl = YT921X_FDB_OP_OP_ADD | YT921X_FDB_OP_START;
+ res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl);
+ if (res)
+ return res;
+
+ return yt921x_fdb_wait(priv, &val);
+}
+
+static int
+yt921x_fdb_leave(struct yt921x_priv *priv, const unsigned char *addr,
+ u16 vid, u16 ports_mask)
+{
+ u16 index;
+ u32 ctrl1;
+ u32 ctrl2;
+ u32 ctrl;
+ u32 val2;
+ u32 val;
+ int res;
+
+ /* Check for presence */
+ res = yt921x_fdb_has(priv, addr, vid, &index);
+ if (res)
+ return res;
+ if (index >= YT921X_FDB_NUM)
+ return 0;
+
+ /* Check if action required */
+ res = yt921x_reg_read(priv, YT921X_FDB_OUT2, &val2);
+ if (res)
+ return res;
+
+ ctrl2 = val2 & ~YT921X_FDB_IO2_EGR_PORTS(ports_mask);
+ if (ctrl2 == val2)
+ return 0;
+ if (!(ctrl2 & YT921X_FDB_IO2_EGR_PORTS_M)) {
+ ctrl = YT921X_FDB_OP_OP_DEL | YT921X_FDB_OP_START;
+ res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl);
+ if (res)
+ return res;
+
+ return yt921x_fdb_wait(priv, &val);
+ }
+
+ res = yt921x_reg_read(priv, YT921X_FDB_OUT1, &ctrl1);
+ if (res)
+ return res;
+
+ return yt921x_fdb_add_index_in12(priv, index, ctrl1, ctrl2);
+}
+
+static int
+yt921x_fdb_join(struct yt921x_priv *priv, const unsigned char *addr, u16 vid,
+ u16 ports_mask)
+{
+ u16 index;
+ u32 ctrl1;
+ u32 ctrl2;
+ u32 val1;
+ u32 val2;
+ int res;
+
+ /* Check for presence */
+ res = yt921x_fdb_has(priv, addr, vid, &index);
+ if (res)
+ return res;
+ if (index >= YT921X_FDB_NUM)
+ return yt921x_fdb_add(priv, addr, vid, ports_mask);
+
+ /* Check if action required */
+ res = yt921x_reg_read(priv, YT921X_FDB_OUT1, &val1);
+ if (res)
+ return res;
+ res = yt921x_reg_read(priv, YT921X_FDB_OUT2, &val2);
+ if (res)
+ return res;
+
+ ctrl1 = val1 & ~YT921X_FDB_IO1_STATUS_M;
+ ctrl1 |= YT921X_FDB_IO1_STATUS_STATIC;
+ ctrl2 = val2 | YT921X_FDB_IO2_EGR_PORTS(ports_mask);
+ if (ctrl1 == val1 && ctrl2 == val2)
+ return 0;
+
+ return yt921x_fdb_add_index_in12(priv, index, ctrl1, ctrl2);
+}
+
+static int
+yt921x_dsa_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ /* Hardware FDB is shared for fdb and mdb, "bridge fdb show"
+ * only wants to see unicast
+ */
+ res = yt921x_fdb_dump(priv, BIT(port), cb, data);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static void yt921x_dsa_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct device *dev = to_device(priv);
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_fdb_flush_port(priv, port, false);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ dev_err(dev, "Failed to %s port %d: %i\n", "clear FDB for",
+ port, res);
+}
+
+static int
+yt921x_dsa_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ u32 ctrl;
+ int res;
+
+ /* AGEING reg is set in 5s step */
+ ctrl = clamp(msecs / 5000, 1, U16_MAX);
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_reg_write(priv, YT921X_AGEING, ctrl);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_dsa_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_fdb_leave(priv, addr, vid, BIT(port));
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_dsa_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_fdb_join(priv, addr, vid, BIT(port));
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_dsa_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ const unsigned char *addr = mdb->addr;
+ u16 vid = mdb->vid;
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_fdb_leave(priv, addr, vid, BIT(port));
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_dsa_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ const unsigned char *addr = mdb->addr;
+ u16 vid = mdb->vid;
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_fdb_join(priv, addr, vid, BIT(port));
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_port_set_pvid(struct yt921x_priv *priv, int port, u16 vid)
+{
+ u32 mask;
+ u32 ctrl;
+
+ mask = YT921X_PORT_VLAN_CTRL_CVID_M;
+ ctrl = YT921X_PORT_VLAN_CTRL_CVID(vid);
+ return yt921x_reg_update_bits(priv, YT921X_PORTn_VLAN_CTRL(port),
+ mask, ctrl);
+}
+
+static int
+yt921x_vlan_filtering(struct yt921x_priv *priv, int port, bool vlan_filtering)
+{
+ struct dsa_port *dp = dsa_to_port(&priv->ds, port);
+ struct net_device *bdev;
+ u16 pvid;
+ u32 mask;
+ u32 ctrl;
+ int res;
+
+ bdev = dsa_port_bridge_dev_get(dp);
+
+ if (!bdev || !vlan_filtering)
+ pvid = YT921X_VID_UNWARE;
+ else
+ br_vlan_get_pvid(bdev, &pvid);
+ res = yt921x_port_set_pvid(priv, port, pvid);
+ if (res)
+ return res;
+
+ mask = YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_TAGGED |
+ YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED;
+ ctrl = 0;
+ /* Do not drop tagged frames here; let VLAN_IGR_FILTER do it */
+ if (vlan_filtering && !pvid)
+ ctrl |= YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED;
+ res = yt921x_reg_update_bits(priv, YT921X_PORTn_VLAN_CTRL1(port),
+ mask, ctrl);
+ if (res)
+ return res;
+
+ res = yt921x_reg_toggle_bits(priv, YT921X_VLAN_IGR_FILTER,
+ YT921X_VLAN_IGR_FILTER_PORTn(port),
+ vlan_filtering);
+ if (res)
+ return res;
+
+ /* Turn on / off VLAN awareness */
+ mask = YT921X_PORT_IGR_TPIDn_CTAG_M;
+ if (!vlan_filtering)
+ ctrl = 0;
+ else
+ ctrl = YT921X_PORT_IGR_TPIDn_CTAG(0);
+ res = yt921x_reg_update_bits(priv, YT921X_PORTn_IGR_TPID(port),
+ mask, ctrl);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static int
+yt921x_vlan_del(struct yt921x_priv *priv, int port, u16 vid)
+{
+ u64 mask64;
+
+ mask64 = YT921X_VLAN_CTRL_PORTS(port) |
+ YT921X_VLAN_CTRL_UNTAG_PORTn(port);
+
+ return yt921x_reg64_clear_bits(priv, YT921X_VLANn_CTRL(vid), mask64);
+}
+
+static int
+yt921x_vlan_add(struct yt921x_priv *priv, int port, u16 vid, bool untagged)
+{
+ u64 mask64;
+ u64 ctrl64;
+
+ mask64 = YT921X_VLAN_CTRL_PORTn(port) |
+ YT921X_VLAN_CTRL_PORTS(priv->cpu_ports_mask);
+ ctrl64 = mask64;
+
+ mask64 |= YT921X_VLAN_CTRL_UNTAG_PORTn(port);
+ if (untagged)
+ ctrl64 |= YT921X_VLAN_CTRL_UNTAG_PORTn(port);
+
+ return yt921x_reg64_update_bits(priv, YT921X_VLANn_CTRL(vid),
+ mask64, ctrl64);
+}
+
+static int
+yt921x_pvid_clear(struct yt921x_priv *priv, int port)
+{
+ struct dsa_port *dp = dsa_to_port(&priv->ds, port);
+ bool vlan_filtering;
+ u32 mask;
+ int res;
+
+ vlan_filtering = dsa_port_is_vlan_filtering(dp);
+
+ res = yt921x_port_set_pvid(priv, port,
+ vlan_filtering ? 0 : YT921X_VID_UNWARE);
+ if (res)
+ return res;
+
+ if (vlan_filtering) {
+ mask = YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED;
+ res = yt921x_reg_set_bits(priv, YT921X_PORTn_VLAN_CTRL1(port),
+ mask);
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
+
+static int
+yt921x_pvid_set(struct yt921x_priv *priv, int port, u16 vid)
+{
+ struct dsa_port *dp = dsa_to_port(&priv->ds, port);
+ bool vlan_filtering;
+ u32 mask;
+ int res;
+
+ vlan_filtering = dsa_port_is_vlan_filtering(dp);
+
+ if (vlan_filtering) {
+ res = yt921x_port_set_pvid(priv, port, vid);
+ if (res)
+ return res;
+ }
+
+ mask = YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED;
+ res = yt921x_reg_clear_bits(priv, YT921X_PORTn_VLAN_CTRL1(port), mask);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static int
+yt921x_dsa_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering,
+ struct netlink_ext_ack *extack)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ int res;
+
+ if (dsa_is_cpu_port(ds, port))
+ return 0;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_vlan_filtering(priv, port, vlan_filtering);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_dsa_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ u16 vid = vlan->vid;
+ u16 pvid;
+ int res;
+
+ if (dsa_is_cpu_port(ds, port))
+ return 0;
+
+ mutex_lock(&priv->reg_lock);
+ do {
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct net_device *bdev;
+
+ res = yt921x_vlan_del(priv, port, vid);
+ if (res)
+ break;
+
+ bdev = dsa_port_bridge_dev_get(dp);
+ if (bdev) {
+ br_vlan_get_pvid(bdev, &pvid);
+ if (pvid == vid)
+ res = yt921x_pvid_clear(priv, port);
+ }
+ } while (0);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_dsa_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ u16 vid = vlan->vid;
+ u16 pvid;
+ int res;
+
+ /* CPU port is supposed to be a member of every VLAN; see
+ * yt921x_vlan_add() and yt921x_port_setup()
+ */
+ if (dsa_is_cpu_port(ds, port))
+ return 0;
+
+ mutex_lock(&priv->reg_lock);
+ do {
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct net_device *bdev;
+
+ res = yt921x_vlan_add(priv, port, vid,
+ vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ if (res)
+ break;
+
+ bdev = dsa_port_bridge_dev_get(dp);
+ if (bdev) {
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
+ res = yt921x_pvid_set(priv, port, vid);
+ } else {
+ br_vlan_get_pvid(bdev, &pvid);
+ if (pvid == vid)
+ res = yt921x_pvid_clear(priv, port);
+ }
+ }
+ } while (0);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int yt921x_userport_standalone(struct yt921x_priv *priv, int port)
+{
+ u32 mask;
+ u32 ctrl;
+ int res;
+
+ ctrl = ~priv->cpu_ports_mask;
+ res = yt921x_reg_write(priv, YT921X_PORTn_ISOLATION(port), ctrl);
+ if (res)
+ return res;
+
+ /* Turn off FDB learning to prevent FDB pollution */
+ mask = YT921X_PORT_LEARN_DIS;
+ res = yt921x_reg_set_bits(priv, YT921X_PORTn_LEARN(port), mask);
+ if (res)
+ return res;
+
+ /* Turn off VLAN awareness */
+ mask = YT921X_PORT_IGR_TPIDn_CTAG_M;
+ res = yt921x_reg_clear_bits(priv, YT921X_PORTn_IGR_TPID(port), mask);
+ if (res)
+ return res;
+
+ /* Unrelated since learning is off and all packets are trapped;
+ * set it anyway
+ */
+ res = yt921x_port_set_pvid(priv, port, YT921X_VID_UNWARE);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static int yt921x_userport_bridge(struct yt921x_priv *priv, int port)
+{
+ u32 mask;
+ int res;
+
+ mask = YT921X_PORT_LEARN_DIS;
+ res = yt921x_reg_clear_bits(priv, YT921X_PORTn_LEARN(port), mask);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static int yt921x_isolate(struct yt921x_priv *priv, int port)
+{
+ u32 mask;
+ int res;
+
+ mask = BIT(port);
+ for (int i = 0; i < YT921X_PORT_NUM; i++) {
+ if ((BIT(i) & priv->cpu_ports_mask) || i == port)
+ continue;
+
+ res = yt921x_reg_set_bits(priv, YT921X_PORTn_ISOLATION(i),
+ mask);
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
+
+/* Make sure to include the CPU port in ports_mask, or your bridge will
+ * not have it.
+ */
+static int yt921x_bridge(struct yt921x_priv *priv, u16 ports_mask)
+{
+ unsigned long targets_mask = ports_mask & ~priv->cpu_ports_mask;
+ u32 isolated_mask;
+ u32 ctrl;
+ int port;
+ int res;
+
+ isolated_mask = 0;
+ for_each_set_bit(port, &targets_mask, YT921X_PORT_NUM) {
+ struct yt921x_port *pp = &priv->ports[port];
+
+ if (pp->isolated)
+ isolated_mask |= BIT(port);
+ }
+
+ /* Block from non-cpu bridge ports ... */
+ for_each_set_bit(port, &targets_mask, YT921X_PORT_NUM) {
+ struct yt921x_port *pp = &priv->ports[port];
+
+ /* to non-bridge ports */
+ ctrl = ~ports_mask;
+ /* to isolated ports when isolated */
+ if (pp->isolated)
+ ctrl |= isolated_mask;
+ /* to itself when non-hairpin */
+ if (!pp->hairpin)
+ ctrl |= BIT(port);
+ else
+ ctrl &= ~BIT(port);
+
+ res = yt921x_reg_write(priv, YT921X_PORTn_ISOLATION(port),
+ ctrl);
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
+
+static int yt921x_bridge_leave(struct yt921x_priv *priv, int port)
+{
+ int res;
+
+ res = yt921x_userport_standalone(priv, port);
+ if (res)
+ return res;
+
+ res = yt921x_isolate(priv, port);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static int
+yt921x_bridge_join(struct yt921x_priv *priv, int port, u16 ports_mask)
+{
+ int res;
+
+ res = yt921x_userport_bridge(priv, port);
+ if (res)
+ return res;
+
+ res = yt921x_bridge(priv, ports_mask);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static u32
+dsa_bridge_ports(struct dsa_switch *ds, const struct net_device *bdev)
+{
+ struct dsa_port *dp;
+ u32 mask = 0;
+
+ dsa_switch_for_each_user_port(dp, ds)
+ if (dsa_port_offloads_bridge_dev(dp, bdev))
+ mask |= BIT(dp->index);
+
+ return mask;
+}
+
+static int
+yt921x_bridge_flags(struct yt921x_priv *priv, int port,
+ struct switchdev_brport_flags flags)
+{
+ struct yt921x_port *pp = &priv->ports[port];
+ bool do_flush;
+ u32 mask;
+ int res;
+
+ if (flags.mask & BR_LEARNING) {
+ bool learning = flags.val & BR_LEARNING;
+
+ mask = YT921X_PORT_LEARN_DIS;
+ res = yt921x_reg_toggle_bits(priv, YT921X_PORTn_LEARN(port),
+ mask, !learning);
+ if (res)
+ return res;
+ }
+
+ /* BR_FLOOD, BR_MCAST_FLOOD: see the comment where ACT_UNK_ACTn_TRAP
+ * is set
+ */
+
+ /* BR_BCAST_FLOOD: we can filter bcast, but cannot trap them */
+
+ do_flush = false;
+ if (flags.mask & BR_HAIRPIN_MODE) {
+ pp->hairpin = flags.val & BR_HAIRPIN_MODE;
+ do_flush = true;
+ }
+ if (flags.mask & BR_ISOLATED) {
+ pp->isolated = flags.val & BR_ISOLATED;
+ do_flush = true;
+ }
+ if (do_flush) {
+ struct dsa_switch *ds = &priv->ds;
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct net_device *bdev;
+
+ bdev = dsa_port_bridge_dev_get(dp);
+ if (bdev) {
+ u32 ports_mask;
+
+ ports_mask = dsa_bridge_ports(ds, bdev);
+ ports_mask |= priv->cpu_ports_mask;
+ res = yt921x_bridge(priv, ports_mask);
+ if (res)
+ return res;
+ }
+ }
+
+ return 0;
+}
+
+static int
+yt921x_dsa_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & ~(BR_HAIRPIN_MODE | BR_LEARNING | BR_FLOOD |
+ BR_MCAST_FLOOD | BR_ISOLATED))
+ return -EINVAL;
+ return 0;
+}
+
+static int
+yt921x_dsa_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ int res;
+
+ if (dsa_is_cpu_port(ds, port))
+ return 0;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_bridge_flags(priv, port, flags);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static void
+yt921x_dsa_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct device *dev = to_device(priv);
+ int res;
+
+ if (dsa_is_cpu_port(ds, port))
+ return;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_bridge_leave(priv, port);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ dev_err(dev, "Failed to %s port %d: %i\n", "unbridge",
+ port, res);
+}
+
+static int
+yt921x_dsa_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ u16 ports_mask;
+ int res;
+
+ if (dsa_is_cpu_port(ds, port))
+ return 0;
+
+ ports_mask = dsa_bridge_ports(ds, bridge.dev);
+ ports_mask |= priv->cpu_ports_mask;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_bridge_join(priv, port, ports_mask);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_dsa_port_mst_state_set(struct dsa_switch *ds, int port,
+ const struct switchdev_mst_state *st)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ u32 mask;
+ u32 ctrl;
+ int res;
+
+ mask = YT921X_STP_PORTn_M(port);
+ switch (st->state) {
+ case BR_STATE_DISABLED:
+ ctrl = YT921X_STP_PORTn_DISABLED(port);
+ break;
+ case BR_STATE_LISTENING:
+ case BR_STATE_LEARNING:
+ ctrl = YT921X_STP_PORTn_LEARNING(port);
+ break;
+ case BR_STATE_FORWARDING:
+ default:
+ ctrl = YT921X_STP_PORTn_FORWARD(port);
+ break;
+ case BR_STATE_BLOCKING:
+ ctrl = YT921X_STP_PORTn_BLOCKING(port);
+ break;
+ }
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_reg_update_bits(priv, YT921X_STPn(st->msti), mask, ctrl);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_dsa_vlan_msti_set(struct dsa_switch *ds, struct dsa_bridge bridge,
+ const struct switchdev_vlan_msti *msti)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ u64 mask64;
+ u64 ctrl64;
+ int res;
+
+ if (!msti->vid)
+ return -EINVAL;
+ if (!msti->msti || msti->msti >= YT921X_MSTI_NUM)
+ return -EINVAL;
+
+ mask64 = YT921X_VLAN_CTRL_STP_ID_M;
+ ctrl64 = YT921X_VLAN_CTRL_STP_ID(msti->msti);
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_reg64_update_bits(priv, YT921X_VLANn_CTRL(msti->vid),
+ mask64, ctrl64);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static void
+yt921x_dsa_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct device *dev = to_device(priv);
+ bool learning;
+ u32 mask;
+ u32 ctrl;
+ int res;
+
+ mask = YT921X_STP_PORTn_M(port);
+ learning = false;
+ switch (state) {
+ case BR_STATE_DISABLED:
+ ctrl = YT921X_STP_PORTn_DISABLED(port);
+ break;
+ case BR_STATE_LISTENING:
+ ctrl = YT921X_STP_PORTn_LEARNING(port);
+ break;
+ case BR_STATE_LEARNING:
+ ctrl = YT921X_STP_PORTn_LEARNING(port);
+ learning = dp->learning;
+ break;
+ case BR_STATE_FORWARDING:
+ default:
+ ctrl = YT921X_STP_PORTn_FORWARD(port);
+ learning = dp->learning;
+ break;
+ case BR_STATE_BLOCKING:
+ ctrl = YT921X_STP_PORTn_BLOCKING(port);
+ break;
+ }
+
+ mutex_lock(&priv->reg_lock);
+ do {
+ res = yt921x_reg_update_bits(priv, YT921X_STPn(0), mask, ctrl);
+ if (res)
+ break;
+
+ mask = YT921X_PORT_LEARN_DIS;
+ ctrl = !learning ? YT921X_PORT_LEARN_DIS : 0;
+ res = yt921x_reg_update_bits(priv, YT921X_PORTn_LEARN(port),
+ mask, ctrl);
+ } while (0);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ dev_err(dev, "Failed to %s port %d: %i\n", "set STP state for",
+ port, res);
+}
+
+static int yt921x_port_down(struct yt921x_priv *priv, int port)
+{
+ u32 mask;
+ int res;
+
+ mask = YT921X_PORT_LINK | YT921X_PORT_RX_MAC_EN | YT921X_PORT_TX_MAC_EN;
+ res = yt921x_reg_clear_bits(priv, YT921X_PORTn_CTRL(port), mask);
+ if (res)
+ return res;
+
+ if (yt921x_port_is_external(port)) {
+ mask = YT921X_SERDES_LINK;
+ res = yt921x_reg_clear_bits(priv, YT921X_SERDESn(port), mask);
+ if (res)
+ return res;
+
+ mask = YT921X_XMII_LINK;
+ res = yt921x_reg_clear_bits(priv, YT921X_XMIIn(port), mask);
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
+
+static int
+yt921x_port_up(struct yt921x_priv *priv, int port, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ u32 mask;
+ u32 ctrl;
+ int res;
+
+ switch (speed) {
+ case SPEED_10:
+ ctrl = YT921X_PORT_SPEED_10;
+ break;
+ case SPEED_100:
+ ctrl = YT921X_PORT_SPEED_100;
+ break;
+ case SPEED_1000:
+ ctrl = YT921X_PORT_SPEED_1000;
+ break;
+ case SPEED_2500:
+ ctrl = YT921X_PORT_SPEED_2500;
+ break;
+ case SPEED_10000:
+ ctrl = YT921X_PORT_SPEED_10000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (duplex == DUPLEX_FULL)
+ ctrl |= YT921X_PORT_DUPLEX_FULL;
+ if (tx_pause)
+ ctrl |= YT921X_PORT_TX_PAUSE;
+ if (rx_pause)
+ ctrl |= YT921X_PORT_RX_PAUSE;
+ ctrl |= YT921X_PORT_RX_MAC_EN | YT921X_PORT_TX_MAC_EN;
+ res = yt921x_reg_write(priv, YT921X_PORTn_CTRL(port), ctrl);
+ if (res)
+ return res;
+
+ if (yt921x_port_is_external(port)) {
+ mask = YT921X_SERDES_SPEED_M;
+ switch (speed) {
+ case SPEED_10:
+ ctrl = YT921X_SERDES_SPEED_10;
+ break;
+ case SPEED_100:
+ ctrl = YT921X_SERDES_SPEED_100;
+ break;
+ case SPEED_1000:
+ ctrl = YT921X_SERDES_SPEED_1000;
+ break;
+ case SPEED_2500:
+ ctrl = YT921X_SERDES_SPEED_2500;
+ break;
+ case SPEED_10000:
+ ctrl = YT921X_SERDES_SPEED_10000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ mask |= YT921X_SERDES_DUPLEX_FULL;
+ if (duplex == DUPLEX_FULL)
+ ctrl |= YT921X_SERDES_DUPLEX_FULL;
+ mask |= YT921X_SERDES_TX_PAUSE;
+ if (tx_pause)
+ ctrl |= YT921X_SERDES_TX_PAUSE;
+ mask |= YT921X_SERDES_RX_PAUSE;
+ if (rx_pause)
+ ctrl |= YT921X_SERDES_RX_PAUSE;
+ mask |= YT921X_SERDES_LINK;
+ ctrl |= YT921X_SERDES_LINK;
+ res = yt921x_reg_update_bits(priv, YT921X_SERDESn(port),
+ mask, ctrl);
+ if (res)
+ return res;
+
+ mask = YT921X_XMII_LINK;
+ res = yt921x_reg_set_bits(priv, YT921X_XMIIn(port), mask);
+ if (res)
+ return res;
+
+ switch (speed) {
+ case SPEED_10:
+ ctrl = YT921X_MDIO_POLLING_SPEED_10;
+ break;
+ case SPEED_100:
+ ctrl = YT921X_MDIO_POLLING_SPEED_100;
+ break;
+ case SPEED_1000:
+ ctrl = YT921X_MDIO_POLLING_SPEED_1000;
+ break;
+ case SPEED_2500:
+ ctrl = YT921X_MDIO_POLLING_SPEED_2500;
+ break;
+ case SPEED_10000:
+ ctrl = YT921X_MDIO_POLLING_SPEED_10000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (duplex == DUPLEX_FULL)
+ ctrl |= YT921X_MDIO_POLLING_DUPLEX_FULL;
+ ctrl |= YT921X_MDIO_POLLING_LINK;
+ res = yt921x_reg_write(priv, YT921X_MDIO_POLLINGn(port), ctrl);
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
+
+static int
+yt921x_port_config(struct yt921x_priv *priv, int port, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct device *dev = to_device(priv);
+ u32 mask;
+ u32 ctrl;
+ int res;
+
+ if (!yt921x_port_is_external(port)) {
+ if (interface != PHY_INTERFACE_MODE_INTERNAL) {
+ dev_err(dev, "Wrong mode %d on port %d\n",
+ interface, port);
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ switch (interface) {
+ /* SERDES */
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_100BASEX:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ mask = YT921X_SERDES_CTRL_PORTn(port);
+ res = yt921x_reg_set_bits(priv, YT921X_SERDES_CTRL, mask);
+ if (res)
+ return res;
+
+ mask = YT921X_XMII_CTRL_PORTn(port);
+ res = yt921x_reg_clear_bits(priv, YT921X_XMII_CTRL, mask);
+ if (res)
+ return res;
+
+ mask = YT921X_SERDES_MODE_M;
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ ctrl = YT921X_SERDES_MODE_SGMII;
+ break;
+ case PHY_INTERFACE_MODE_100BASEX:
+ ctrl = YT921X_SERDES_MODE_100BASEX;
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ ctrl = YT921X_SERDES_MODE_1000BASEX;
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ ctrl = YT921X_SERDES_MODE_2500BASEX;
+ break;
+ default:
+ return -EINVAL;
+ }
+ res = yt921x_reg_update_bits(priv, YT921X_SERDESn(port),
+ mask, ctrl);
+ if (res)
+ return res;
+
+ break;
+ /* add XMII support here */
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+yt921x_phylink_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct yt921x_priv *priv = to_yt921x_priv(dp->ds);
+ int port = dp->index;
+ int res;
+
+ /* No need to sync; port control block is hold until device remove */
+ cancel_delayed_work(&priv->ports[port].mib_read);
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_port_down(priv, port);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ dev_err(dp->ds->dev, "Failed to %s port %d: %i\n", "bring down",
+ port, res);
+}
+
+static void
+yt921x_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct yt921x_priv *priv = to_yt921x_priv(dp->ds);
+ int port = dp->index;
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_port_up(priv, port, mode, interface, speed, duplex,
+ tx_pause, rx_pause);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ dev_err(dp->ds->dev, "Failed to %s port %d: %i\n", "bring up",
+ port, res);
+
+ schedule_delayed_work(&priv->ports[port].mib_read, 0);
+}
+
+static void
+yt921x_phylink_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct yt921x_priv *priv = to_yt921x_priv(dp->ds);
+ int port = dp->index;
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_port_config(priv, port, mode, state->interface);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ dev_err(dp->ds->dev, "Failed to %s port %d: %i\n", "config",
+ port, res);
+}
+
+static void
+yt921x_dsa_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ const struct yt921x_info *info = priv->info;
+
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
+
+ if (info->internal_mask & BIT(port)) {
+ /* Port 10 for MCU should probably go here too. But since that
+ * is untested yet, turn it down for the moment by letting it
+ * fall to the default branch.
+ */
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ } else if (info->external_mask & BIT(port)) {
+ /* TODO: external ports may support SERDES only, XMII only, or
+ * SERDES + XMII depending on the chip. However, we can't get
+ * the accurate config table due to lack of document, thus
+ * we simply declare SERDES + XMII and rely on the correctness
+ * of devicetree for now.
+ */
+
+ /* SERDES */
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+ /* REVSGMII (SGMII in PHY role) should go here, once
+ * PHY_INTERFACE_MODE_REVSGMII is introduced.
+ */
+ __set_bit(PHY_INTERFACE_MODE_100BASEX,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ config->supported_interfaces);
+ config->mac_capabilities |= MAC_2500FD;
+
+ /* XMII */
+
+ /* Not tested. To add support for XMII:
+ * - Add proper interface modes below
+ * - Handle them in yt921x_port_config()
+ */
+ }
+ /* no such port: empty supported_interfaces causes phylink to turn it
+ * down
+ */
+}
+
+static int yt921x_port_setup(struct yt921x_priv *priv, int port)
+{
+ struct dsa_switch *ds = &priv->ds;
+ u32 ctrl;
+ int res;
+
+ res = yt921x_userport_standalone(priv, port);
+ if (res)
+ return res;
+
+ if (dsa_is_cpu_port(ds, port)) {
+ /* Egress of CPU port is supposed to be completely controlled
+ * via tagging, so set to oneway isolated (drop all packets
+ * without tag).
+ */
+ ctrl = ~(u32)0;
+ res = yt921x_reg_write(priv, YT921X_PORTn_ISOLATION(port),
+ ctrl);
+ if (res)
+ return res;
+
+ /* To simplify FDB "isolation" simulation, we also disable
+ * learning on the CPU port, and let software identify packets
+ * towarding CPU (either trapped or a static FDB entry is
+ * matched, no matter which bridge that entry is for), which is
+ * already done by yt921x_userport_standalone(). As a result,
+ * VLAN-awareness becomes unrelated on the CPU port (set to
+ * VLAN-unaware by the way).
+ */
+ }
+
+ return 0;
+}
+
+static enum dsa_tag_protocol
+yt921x_dsa_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol m)
+{
+ return DSA_TAG_PROTO_YT921X;
+}
+
+static int yt921x_dsa_port_setup(struct dsa_switch *ds, int port)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_port_setup(priv, port);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int yt921x_edata_wait(struct yt921x_priv *priv, u32 *valp)
+{
+ u32 val = YT921X_EDATA_DATA_IDLE;
+ int res;
+
+ res = yt921x_reg_wait(priv, YT921X_EDATA_DATA,
+ YT921X_EDATA_DATA_STATUS_M, &val);
+ if (res)
+ return res;
+
+ *valp = val;
+ return 0;
+}
+
+static int
+yt921x_edata_read_cont(struct yt921x_priv *priv, u8 addr, u8 *valp)
+{
+ u32 ctrl;
+ u32 val;
+ int res;
+
+ ctrl = YT921X_EDATA_CTRL_ADDR(addr) | YT921X_EDATA_CTRL_READ;
+ res = yt921x_reg_write(priv, YT921X_EDATA_CTRL, ctrl);
+ if (res)
+ return res;
+ res = yt921x_edata_wait(priv, &val);
+ if (res)
+ return res;
+
+ *valp = FIELD_GET(YT921X_EDATA_DATA_DATA_M, val);
+ return 0;
+}
+
+static int yt921x_edata_read(struct yt921x_priv *priv, u8 addr, u8 *valp)
+{
+ u32 val;
+ int res;
+
+ res = yt921x_edata_wait(priv, &val);
+ if (res)
+ return res;
+ return yt921x_edata_read_cont(priv, addr, valp);
+}
+
+static int yt921x_chip_detect(struct yt921x_priv *priv)
+{
+ struct device *dev = to_device(priv);
+ const struct yt921x_info *info;
+ u8 extmode;
+ u32 chipid;
+ u32 major;
+ u32 mode;
+ int res;
+
+ res = yt921x_reg_read(priv, YT921X_CHIP_ID, &chipid);
+ if (res)
+ return res;
+
+ major = FIELD_GET(YT921X_CHIP_ID_MAJOR, chipid);
+
+ for (info = yt921x_infos; info->name; info++)
+ if (info->major == major)
+ break;
+ if (!info->name) {
+ dev_err(dev, "Unexpected chipid 0x%x\n", chipid);
+ return -ENODEV;
+ }
+
+ res = yt921x_reg_read(priv, YT921X_CHIP_MODE, &mode);
+ if (res)
+ return res;
+ res = yt921x_edata_read(priv, YT921X_EDATA_EXTMODE, &extmode);
+ if (res)
+ return res;
+
+ for (; info->name; info++)
+ if (info->major == major && info->mode == mode &&
+ info->extmode == extmode)
+ break;
+ if (!info->name) {
+ dev_err(dev,
+ "Unsupported chipid 0x%x with chipmode 0x%x 0x%x\n",
+ chipid, mode, extmode);
+ return -ENODEV;
+ }
+
+ /* Print chipid here since we are interested in lower 16 bits */
+ dev_info(dev,
+ "Motorcomm %s ethernet switch, chipid: 0x%x, chipmode: 0x%x 0x%x\n",
+ info->name, chipid, mode, extmode);
+
+ priv->info = info;
+ return 0;
+}
+
+static int yt921x_chip_reset(struct yt921x_priv *priv)
+{
+ struct device *dev = to_device(priv);
+ u16 eth_p_tag;
+ u32 val;
+ int res;
+
+ res = yt921x_chip_detect(priv);
+ if (res)
+ return res;
+
+ /* Reset */
+ res = yt921x_reg_write(priv, YT921X_RST, YT921X_RST_HW);
+ if (res)
+ return res;
+
+ /* RST_HW is almost same as GPIO hard reset, so we need this delay. */
+ fsleep(YT921X_RST_DELAY_US);
+
+ val = 0;
+ res = yt921x_reg_wait(priv, YT921X_RST, ~0, &val);
+ if (res)
+ return res;
+
+ /* Check for tag EtherType; do it after reset in case you messed it up
+ * before.
+ */
+ res = yt921x_reg_read(priv, YT921X_CPU_TAG_TPID, &val);
+ if (res)
+ return res;
+ eth_p_tag = FIELD_GET(YT921X_CPU_TAG_TPID_TPID_M, val);
+ if (eth_p_tag != ETH_P_YT921X) {
+ dev_err(dev, "Tag type 0x%x != 0x%x\n", eth_p_tag,
+ ETH_P_YT921X);
+ /* Despite being possible, we choose not to set CPU_TAG_TPID,
+ * since there is no way it can be different unless you have the
+ * wrong chip.
+ */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int yt921x_chip_setup(struct yt921x_priv *priv)
+{
+ struct dsa_switch *ds = &priv->ds;
+ unsigned long cpu_ports_mask;
+ u64 ctrl64;
+ u32 ctrl;
+ int port;
+ int res;
+
+ /* Enable DSA */
+ priv->cpu_ports_mask = dsa_cpu_ports(ds);
+
+ ctrl = YT921X_EXT_CPU_PORT_TAG_EN | YT921X_EXT_CPU_PORT_PORT_EN |
+ YT921X_EXT_CPU_PORT_PORT(__ffs(priv->cpu_ports_mask));
+ res = yt921x_reg_write(priv, YT921X_EXT_CPU_PORT, ctrl);
+ if (res)
+ return res;
+
+ /* Enable and clear MIB */
+ res = yt921x_reg_set_bits(priv, YT921X_FUNC, YT921X_FUNC_MIB);
+ if (res)
+ return res;
+
+ ctrl = YT921X_MIB_CTRL_CLEAN | YT921X_MIB_CTRL_ALL_PORT;
+ res = yt921x_reg_write(priv, YT921X_MIB_CTRL, ctrl);
+ if (res)
+ return res;
+
+ /* Setup software switch */
+ ctrl = YT921X_CPU_COPY_TO_EXT_CPU;
+ res = yt921x_reg_write(priv, YT921X_CPU_COPY, ctrl);
+ if (res)
+ return res;
+
+ ctrl = GENMASK(10, 0);
+ res = yt921x_reg_write(priv, YT921X_FILTER_UNK_UCAST, ctrl);
+ if (res)
+ return res;
+ res = yt921x_reg_write(priv, YT921X_FILTER_UNK_MCAST, ctrl);
+ if (res)
+ return res;
+
+ /* YT921x does not support native DSA port bridging, so we use port
+ * isolation to emulate it. However, be especially careful that port
+ * isolation takes _after_ FDB lookups, i.e. if an FDB entry (from
+ * another bridge) is matched and the destination port (in another
+ * bridge) is blocked, the packet will be dropped instead of flooding to
+ * the "bridged" ports, thus we need to trap and handle those packets by
+ * software.
+ *
+ * If there is no more than one bridge, we might be able to drop them
+ * directly given some conditions are met, but we trap them in all cases
+ * for now.
+ */
+ ctrl = 0;
+ for (int i = 0; i < YT921X_PORT_NUM; i++)
+ ctrl |= YT921X_ACT_UNK_ACTn_TRAP(i);
+ /* Except for CPU ports, if any packets are sent via CPU ports without
+ * tag, they should be dropped.
+ */
+ cpu_ports_mask = priv->cpu_ports_mask;
+ for_each_set_bit(port, &cpu_ports_mask, YT921X_PORT_NUM) {
+ ctrl &= ~YT921X_ACT_UNK_ACTn_M(port);
+ ctrl |= YT921X_ACT_UNK_ACTn_DROP(port);
+ }
+ res = yt921x_reg_write(priv, YT921X_ACT_UNK_UCAST, ctrl);
+ if (res)
+ return res;
+ res = yt921x_reg_write(priv, YT921X_ACT_UNK_MCAST, ctrl);
+ if (res)
+ return res;
+
+ /* Tagged VID 0 should be treated as untagged, which confuses the
+ * hardware a lot
+ */
+ ctrl64 = YT921X_VLAN_CTRL_LEARN_DIS | YT921X_VLAN_CTRL_PORTS_M;
+ res = yt921x_reg64_write(priv, YT921X_VLANn_CTRL(0), ctrl64);
+ if (res)
+ return res;
+
+ /* Miscellaneous */
+ res = yt921x_reg_set_bits(priv, YT921X_SENSOR, YT921X_SENSOR_TEMP);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static int yt921x_dsa_setup(struct dsa_switch *ds)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct device *dev = to_device(priv);
+ struct device_node *np = dev->of_node;
+ struct device_node *child;
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_chip_reset(priv);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ return res;
+
+ /* Register the internal mdio bus. Nodes for internal ports should have
+ * proper phy-handle pointing to their PHYs. Not enabling the internal
+ * bus is possible, though pretty wired, if internal ports are not used.
+ */
+ child = of_get_child_by_name(np, "mdio");
+ if (child) {
+ res = yt921x_mbus_int_init(priv, child);
+ of_node_put(child);
+ if (res)
+ return res;
+ }
+
+ /* External mdio bus is optional */
+ child = of_get_child_by_name(np, "mdio-external");
+ if (child) {
+ res = yt921x_mbus_ext_init(priv, child);
+ of_node_put(child);
+ if (res)
+ return res;
+
+ dev_err(dev, "Untested external mdio bus\n");
+ return -ENODEV;
+ }
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_chip_setup(priv);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static const struct phylink_mac_ops yt921x_phylink_mac_ops = {
+ .mac_link_down = yt921x_phylink_mac_link_down,
+ .mac_link_up = yt921x_phylink_mac_link_up,
+ .mac_config = yt921x_phylink_mac_config,
+};
+
+static const struct dsa_switch_ops yt921x_dsa_switch_ops = {
+ /* mib */
+ .get_strings = yt921x_dsa_get_strings,
+ .get_ethtool_stats = yt921x_dsa_get_ethtool_stats,
+ .get_sset_count = yt921x_dsa_get_sset_count,
+ .get_eth_mac_stats = yt921x_dsa_get_eth_mac_stats,
+ .get_eth_ctrl_stats = yt921x_dsa_get_eth_ctrl_stats,
+ .get_rmon_stats = yt921x_dsa_get_rmon_stats,
+ .get_stats64 = yt921x_dsa_get_stats64,
+ .get_pause_stats = yt921x_dsa_get_pause_stats,
+ /* eee */
+ .support_eee = dsa_supports_eee,
+ .set_mac_eee = yt921x_dsa_set_mac_eee,
+ /* mtu */
+ .port_change_mtu = yt921x_dsa_port_change_mtu,
+ .port_max_mtu = yt921x_dsa_port_max_mtu,
+ /* hsr */
+ .port_hsr_leave = dsa_port_simple_hsr_leave,
+ .port_hsr_join = dsa_port_simple_hsr_join,
+ /* mirror */
+ .port_mirror_del = yt921x_dsa_port_mirror_del,
+ .port_mirror_add = yt921x_dsa_port_mirror_add,
+ /* fdb */
+ .port_fdb_dump = yt921x_dsa_port_fdb_dump,
+ .port_fast_age = yt921x_dsa_port_fast_age,
+ .set_ageing_time = yt921x_dsa_set_ageing_time,
+ .port_fdb_del = yt921x_dsa_port_fdb_del,
+ .port_fdb_add = yt921x_dsa_port_fdb_add,
+ .port_mdb_del = yt921x_dsa_port_mdb_del,
+ .port_mdb_add = yt921x_dsa_port_mdb_add,
+ /* vlan */
+ .port_vlan_filtering = yt921x_dsa_port_vlan_filtering,
+ .port_vlan_del = yt921x_dsa_port_vlan_del,
+ .port_vlan_add = yt921x_dsa_port_vlan_add,
+ /* bridge */
+ .port_pre_bridge_flags = yt921x_dsa_port_pre_bridge_flags,
+ .port_bridge_flags = yt921x_dsa_port_bridge_flags,
+ .port_bridge_leave = yt921x_dsa_port_bridge_leave,
+ .port_bridge_join = yt921x_dsa_port_bridge_join,
+ /* mst */
+ .port_mst_state_set = yt921x_dsa_port_mst_state_set,
+ .vlan_msti_set = yt921x_dsa_vlan_msti_set,
+ .port_stp_state_set = yt921x_dsa_port_stp_state_set,
+ /* port */
+ .get_tag_protocol = yt921x_dsa_get_tag_protocol,
+ .phylink_get_caps = yt921x_dsa_phylink_get_caps,
+ .port_setup = yt921x_dsa_port_setup,
+ /* chip */
+ .setup = yt921x_dsa_setup,
+};
+
+static void yt921x_mdio_shutdown(struct mdio_device *mdiodev)
+{
+ struct yt921x_priv *priv = mdiodev_get_drvdata(mdiodev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(&priv->ds);
+}
+
+static void yt921x_mdio_remove(struct mdio_device *mdiodev)
+{
+ struct yt921x_priv *priv = mdiodev_get_drvdata(mdiodev);
+
+ if (!priv)
+ return;
+
+ for (size_t i = ARRAY_SIZE(priv->ports); i-- > 0; ) {
+ struct yt921x_port *pp = &priv->ports[i];
+
+ disable_delayed_work_sync(&pp->mib_read);
+ }
+
+ dsa_unregister_switch(&priv->ds);
+
+ mutex_destroy(&priv->reg_lock);
+}
+
+static int yt921x_mdio_probe(struct mdio_device *mdiodev)
+{
+ struct device *dev = &mdiodev->dev;
+ struct yt921x_reg_mdio *mdio;
+ struct yt921x_priv *priv;
+ struct dsa_switch *ds;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mdio = devm_kzalloc(dev, sizeof(*mdio), GFP_KERNEL);
+ if (!mdio)
+ return -ENOMEM;
+
+ mdio->bus = mdiodev->bus;
+ mdio->addr = mdiodev->addr;
+ mdio->switchid = 0;
+
+ mutex_init(&priv->reg_lock);
+
+ priv->reg_ops = &yt921x_reg_ops_mdio;
+ priv->reg_ctx = mdio;
+
+ for (size_t i = 0; i < ARRAY_SIZE(priv->ports); i++) {
+ struct yt921x_port *pp = &priv->ports[i];
+
+ pp->index = i;
+ INIT_DELAYED_WORK(&pp->mib_read, yt921x_poll_mib);
+ }
+
+ ds = &priv->ds;
+ ds->dev = dev;
+ ds->assisted_learning_on_cpu_port = true;
+ ds->priv = priv;
+ ds->ops = &yt921x_dsa_switch_ops;
+ ds->ageing_time_min = 1 * 5000;
+ ds->ageing_time_max = U16_MAX * 5000;
+ ds->phylink_mac_ops = &yt921x_phylink_mac_ops;
+ ds->num_ports = YT921X_PORT_NUM;
+
+ mdiodev_set_drvdata(mdiodev, priv);
+
+ return dsa_register_switch(ds);
+}
+
+static const struct of_device_id yt921x_of_match[] = {
+ { .compatible = "motorcomm,yt9215" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, yt921x_of_match);
+
+static struct mdio_driver yt921x_mdio_driver = {
+ .probe = yt921x_mdio_probe,
+ .remove = yt921x_mdio_remove,
+ .shutdown = yt921x_mdio_shutdown,
+ .mdiodrv.driver = {
+ .name = YT921X_NAME,
+ .of_match_table = yt921x_of_match,
+ },
+};
+
+mdio_module_driver(yt921x_mdio_driver);
+
+MODULE_AUTHOR("David Yang <mmyangfl@gmail.com>");
+MODULE_DESCRIPTION("Driver for Motorcomm YT921x Switch");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/yt921x.h b/drivers/net/dsa/yt921x.h
new file mode 100644
index 000000000000..61bb0ab3b09a
--- /dev/null
+++ b/drivers/net/dsa/yt921x.h
@@ -0,0 +1,567 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2025 David Yang
+ */
+
+#ifndef __YT921X_H
+#define __YT921X_H
+
+#include <net/dsa.h>
+
+#define YT921X_SMI_SWITCHID_M GENMASK(3, 2)
+#define YT921X_SMI_SWITCHID(x) FIELD_PREP(YT921X_SMI_SWITCHID_M, (x))
+#define YT921X_SMI_AD BIT(1)
+#define YT921X_SMI_ADDR 0
+#define YT921X_SMI_DATA YT921X_SMI_AD
+#define YT921X_SMI_RW BIT(0)
+#define YT921X_SMI_WRITE 0
+#define YT921X_SMI_READ YT921X_SMI_RW
+
+#define YT921X_SWITCHID_NUM 4
+
+#define YT921X_RST 0x80000
+#define YT921X_RST_HW BIT(31)
+#define YT921X_RST_SW BIT(1)
+#define YT921X_FUNC 0x80004
+#define YT921X_FUNC_MIB BIT(1)
+#define YT921X_CHIP_ID 0x80008
+#define YT921X_CHIP_ID_MAJOR GENMASK(31, 16)
+#define YT921X_EXT_CPU_PORT 0x8000c
+#define YT921X_EXT_CPU_PORT_TAG_EN BIT(15)
+#define YT921X_EXT_CPU_PORT_PORT_EN BIT(14)
+#define YT921X_EXT_CPU_PORT_PORT_M GENMASK(3, 0)
+#define YT921X_EXT_CPU_PORT_PORT(x) FIELD_PREP(YT921X_EXT_CPU_PORT_PORT_M, (x))
+#define YT921X_CPU_TAG_TPID 0x80010
+#define YT921X_CPU_TAG_TPID_TPID_M GENMASK(15, 0)
+/* Same as ETH_P_YT921X, but this represents the true HW default, while the
+ * former is a local convention chosen by us.
+ */
+#define YT921X_CPU_TAG_TPID_TPID_DEFAULT 0x9988
+#define YT921X_PVID_SEL 0x80014
+#define YT921X_PVID_SEL_SVID_PORTn(port) BIT(port)
+#define YT921X_SERDES_CTRL 0x80028
+#define YT921X_SERDES_CTRL_PORTn_TEST(port) BIT((port) - 3)
+#define YT921X_SERDES_CTRL_PORTn(port) BIT((port) - 8)
+#define YT921X_IO_LEVEL 0x80030
+#define YT9215_IO_LEVEL_NORMAL_M GENMASK(5, 4)
+#define YT9215_IO_LEVEL_NORMAL(x) FIELD_PREP(YT9215_IO_LEVEL_NORMAL_M, (x))
+#define YT9215_IO_LEVEL_NORMAL_3V3 YT9215_IO_LEVEL_NORMAL(0)
+#define YT9215_IO_LEVEL_NORMAL_1V8 YT9215_IO_LEVEL_NORMAL(3)
+#define YT9215_IO_LEVEL_RGMII1_M GENMASK(3, 2)
+#define YT9215_IO_LEVEL_RGMII1(x) FIELD_PREP(YT9215_IO_LEVEL_RGMII1_M, (x))
+#define YT9215_IO_LEVEL_RGMII1_3V3 YT9215_IO_LEVEL_RGMII1(0)
+#define YT9215_IO_LEVEL_RGMII1_2V5 YT9215_IO_LEVEL_RGMII1(1)
+#define YT9215_IO_LEVEL_RGMII1_1V8 YT9215_IO_LEVEL_RGMII1(2)
+#define YT9215_IO_LEVEL_RGMII0_M GENMASK(1, 0)
+#define YT9215_IO_LEVEL_RGMII0(x) FIELD_PREP(YT9215_IO_LEVEL_RGMII0_M, (x))
+#define YT9215_IO_LEVEL_RGMII0_3V3 YT9215_IO_LEVEL_RGMII0(0)
+#define YT9215_IO_LEVEL_RGMII0_2V5 YT9215_IO_LEVEL_RGMII0(1)
+#define YT9215_IO_LEVEL_RGMII0_1V8 YT9215_IO_LEVEL_RGMII0(2)
+#define YT9218_IO_LEVEL_RGMII1_M GENMASK(5, 4)
+#define YT9218_IO_LEVEL_RGMII1(x) FIELD_PREP(YT9218_IO_LEVEL_RGMII1_M, (x))
+#define YT9218_IO_LEVEL_RGMII1_3V3 YT9218_IO_LEVEL_RGMII1(0)
+#define YT9218_IO_LEVEL_RGMII1_2V5 YT9218_IO_LEVEL_RGMII1(1)
+#define YT9218_IO_LEVEL_RGMII1_1V8 YT9218_IO_LEVEL_RGMII1(2)
+#define YT9218_IO_LEVEL_RGMII0_M GENMASK(3, 2)
+#define YT9218_IO_LEVEL_RGMII0(x) FIELD_PREP(YT9218_IO_LEVEL_RGMII0_M, (x))
+#define YT9218_IO_LEVEL_RGMII0_3V3 YT9218_IO_LEVEL_RGMII0(0)
+#define YT9218_IO_LEVEL_RGMII0_2V5 YT9218_IO_LEVEL_RGMII0(1)
+#define YT9218_IO_LEVEL_RGMII0_1V8 YT9218_IO_LEVEL_RGMII0(2)
+#define YT9218_IO_LEVEL_NORMAL_M GENMASK(1, 0)
+#define YT9218_IO_LEVEL_NORMAL(x) FIELD_PREP(YT9218_IO_LEVEL_NORMAL_M, (x))
+#define YT9218_IO_LEVEL_NORMAL_3V3 YT9218_IO_LEVEL_NORMAL(0)
+#define YT9218_IO_LEVEL_NORMAL_1V8 YT9218_IO_LEVEL_NORMAL(3)
+#define YT921X_MAC_ADDR_HI2 0x80080
+#define YT921X_MAC_ADDR_LO4 0x80084
+#define YT921X_SERDESn(port) (0x8008c + 4 * ((port) - 8))
+#define YT921X_SERDES_MODE_M GENMASK(9, 7)
+#define YT921X_SERDES_MODE(x) FIELD_PREP(YT921X_SERDES_MODE_M, (x))
+#define YT921X_SERDES_MODE_SGMII YT921X_SERDES_MODE(0)
+#define YT921X_SERDES_MODE_REVSGMII YT921X_SERDES_MODE(1)
+#define YT921X_SERDES_MODE_1000BASEX YT921X_SERDES_MODE(2)
+#define YT921X_SERDES_MODE_100BASEX YT921X_SERDES_MODE(3)
+#define YT921X_SERDES_MODE_2500BASEX YT921X_SERDES_MODE(4)
+#define YT921X_SERDES_RX_PAUSE BIT(6)
+#define YT921X_SERDES_TX_PAUSE BIT(5)
+#define YT921X_SERDES_LINK BIT(4) /* force link */
+#define YT921X_SERDES_DUPLEX_FULL BIT(3)
+#define YT921X_SERDES_SPEED_M GENMASK(2, 0)
+#define YT921X_SERDES_SPEED(x) FIELD_PREP(YT921X_SERDES_SPEED_M, (x))
+#define YT921X_SERDES_SPEED_10 YT921X_SERDES_SPEED(0)
+#define YT921X_SERDES_SPEED_100 YT921X_SERDES_SPEED(1)
+#define YT921X_SERDES_SPEED_1000 YT921X_SERDES_SPEED(2)
+#define YT921X_SERDES_SPEED_10000 YT921X_SERDES_SPEED(3)
+#define YT921X_SERDES_SPEED_2500 YT921X_SERDES_SPEED(4)
+#define YT921X_PORTn_CTRL(port) (0x80100 + 4 * (port))
+#define YT921X_PORT_CTRL_PAUSE_AN BIT(10)
+#define YT921X_PORTn_STATUS(port) (0x80200 + 4 * (port))
+#define YT921X_PORT_LINK BIT(9) /* CTRL: auto negotiation */
+#define YT921X_PORT_HALF_PAUSE BIT(8) /* Half-duplex back pressure mode */
+#define YT921X_PORT_DUPLEX_FULL BIT(7)
+#define YT921X_PORT_RX_PAUSE BIT(6)
+#define YT921X_PORT_TX_PAUSE BIT(5)
+#define YT921X_PORT_RX_MAC_EN BIT(4)
+#define YT921X_PORT_TX_MAC_EN BIT(3)
+#define YT921X_PORT_SPEED_M GENMASK(2, 0)
+#define YT921X_PORT_SPEED(x) FIELD_PREP(YT921X_PORT_SPEED_M, (x))
+#define YT921X_PORT_SPEED_10 YT921X_PORT_SPEED(0)
+#define YT921X_PORT_SPEED_100 YT921X_PORT_SPEED(1)
+#define YT921X_PORT_SPEED_1000 YT921X_PORT_SPEED(2)
+#define YT921X_PORT_SPEED_10000 YT921X_PORT_SPEED(3)
+#define YT921X_PORT_SPEED_2500 YT921X_PORT_SPEED(4)
+#define YT921X_PON_STRAP_FUNC 0x80320
+#define YT921X_PON_STRAP_VAL 0x80324
+#define YT921X_PON_STRAP_CAP 0x80328
+#define YT921X_PON_STRAP_EEE BIT(16)
+#define YT921X_PON_STRAP_LOOP_DETECT BIT(7)
+#define YT921X_MDIO_POLLINGn(port) (0x80364 + 4 * ((port) - 8))
+#define YT921X_MDIO_POLLING_DUPLEX_FULL BIT(4)
+#define YT921X_MDIO_POLLING_LINK BIT(3)
+#define YT921X_MDIO_POLLING_SPEED_M GENMASK(2, 0)
+#define YT921X_MDIO_POLLING_SPEED(x) FIELD_PREP(YT921X_MDIO_POLLING_SPEED_M, (x))
+#define YT921X_MDIO_POLLING_SPEED_10 YT921X_MDIO_POLLING_SPEED(0)
+#define YT921X_MDIO_POLLING_SPEED_100 YT921X_MDIO_POLLING_SPEED(1)
+#define YT921X_MDIO_POLLING_SPEED_1000 YT921X_MDIO_POLLING_SPEED(2)
+#define YT921X_MDIO_POLLING_SPEED_10000 YT921X_MDIO_POLLING_SPEED(3)
+#define YT921X_MDIO_POLLING_SPEED_2500 YT921X_MDIO_POLLING_SPEED(4)
+#define YT921X_SENSOR 0x8036c
+#define YT921X_SENSOR_TEMP BIT(18)
+#define YT921X_TEMP 0x80374
+#define YT921X_CHIP_MODE 0x80388
+#define YT921X_CHIP_MODE_MODE GENMASK(1, 0)
+#define YT921X_XMII_CTRL 0x80394
+#define YT921X_XMII_CTRL_PORTn(port) BIT(9 - (port)) /* Yes, it's reversed */
+#define YT921X_XMIIn(port) (0x80400 + 8 * ((port) - 8))
+#define YT921X_XMII_MODE_M GENMASK(31, 29)
+#define YT921X_XMII_MODE(x) FIELD_PREP(YT921X_XMII_MODE_M, (x))
+#define YT921X_XMII_MODE_MII YT921X_XMII_MODE(0)
+#define YT921X_XMII_MODE_REVMII YT921X_XMII_MODE(1)
+#define YT921X_XMII_MODE_RMII YT921X_XMII_MODE(2)
+#define YT921X_XMII_MODE_REVRMII YT921X_XMII_MODE(3)
+#define YT921X_XMII_MODE_RGMII YT921X_XMII_MODE(4)
+#define YT921X_XMII_MODE_DISABLE YT921X_XMII_MODE(5)
+#define YT921X_XMII_LINK BIT(19) /* force link */
+#define YT921X_XMII_EN BIT(18)
+#define YT921X_XMII_SOFT_RST BIT(17)
+#define YT921X_XMII_RGMII_TX_DELAY_150PS_M GENMASK(16, 13)
+#define YT921X_XMII_RGMII_TX_DELAY_150PS(x) FIELD_PREP(YT921X_XMII_RGMII_TX_DELAY_150PS_M, (x))
+#define YT921X_XMII_TX_CLK_IN BIT(11)
+#define YT921X_XMII_RX_CLK_IN BIT(10)
+#define YT921X_XMII_RGMII_TX_DELAY_2NS BIT(8)
+#define YT921X_XMII_RGMII_TX_CLK_OUT BIT(7)
+#define YT921X_XMII_RGMII_RX_DELAY_150PS_M GENMASK(6, 3)
+#define YT921X_XMII_RGMII_RX_DELAY_150PS(x) FIELD_PREP(YT921X_XMII_RGMII_RX_DELAY_150PS_M, (x))
+#define YT921X_XMII_RMII_PHY_TX_CLK_OUT BIT(2)
+#define YT921X_XMII_REVMII_TX_CLK_OUT BIT(1)
+#define YT921X_XMII_REVMII_RX_CLK_OUT BIT(0)
+
+#define YT921X_MACn_FRAME(port) (0x81008 + 0x1000 * (port))
+#define YT921X_MAC_FRAME_SIZE_M GENMASK(21, 8)
+#define YT921X_MAC_FRAME_SIZE(x) FIELD_PREP(YT921X_MAC_FRAME_SIZE_M, (x))
+
+#define YT921X_EEEn_VAL(port) (0xa0000 + 0x40 * (port))
+#define YT921X_EEE_VAL_DATA BIT(1)
+
+#define YT921X_EEE_CTRL 0xb0000
+#define YT921X_EEE_CTRL_ENn(port) BIT(port)
+
+#define YT921X_MIB_CTRL 0xc0004
+#define YT921X_MIB_CTRL_CLEAN BIT(30)
+#define YT921X_MIB_CTRL_PORT_M GENMASK(6, 3)
+#define YT921X_MIB_CTRL_PORT(x) FIELD_PREP(YT921X_MIB_CTRL_PORT_M, (x))
+#define YT921X_MIB_CTRL_ONE_PORT BIT(1)
+#define YT921X_MIB_CTRL_ALL_PORT BIT(0)
+#define YT921X_MIBn_DATA0(port) (0xc0100 + 0x100 * (port))
+#define YT921X_MIBn_DATAm(port, x) (YT921X_MIBn_DATA0(port) + 4 * (x))
+#define YT921X_MIB_DATA_RX_BROADCAST 0x00
+#define YT921X_MIB_DATA_RX_PAUSE 0x04
+#define YT921X_MIB_DATA_RX_MULTICAST 0x08
+#define YT921X_MIB_DATA_RX_CRC_ERR 0x0c
+
+#define YT921X_MIB_DATA_RX_ALIGN_ERR 0x10
+#define YT921X_MIB_DATA_RX_UNDERSIZE_ERR 0x14
+#define YT921X_MIB_DATA_RX_FRAG_ERR 0x18
+#define YT921X_MIB_DATA_RX_PKT_SZ_64 0x1c
+
+#define YT921X_MIB_DATA_RX_PKT_SZ_65_TO_127 0x20
+#define YT921X_MIB_DATA_RX_PKT_SZ_128_TO_255 0x24
+#define YT921X_MIB_DATA_RX_PKT_SZ_256_TO_511 0x28
+#define YT921X_MIB_DATA_RX_PKT_SZ_512_TO_1023 0x2c
+
+#define YT921X_MIB_DATA_RX_PKT_SZ_1024_TO_1518 0x30
+#define YT921X_MIB_DATA_RX_PKT_SZ_1519_TO_MAX 0x34
+/* 0x38: unused */
+#define YT921X_MIB_DATA_RX_GOOD_BYTES 0x3c
+
+/* 0x40: 64 bytes */
+#define YT921X_MIB_DATA_RX_BAD_BYTES 0x44
+/* 0x48: 64 bytes */
+#define YT921X_MIB_DATA_RX_OVERSIZE_ERR 0x4c
+
+#define YT921X_MIB_DATA_RX_DROPPED 0x50
+#define YT921X_MIB_DATA_TX_BROADCAST 0x54
+#define YT921X_MIB_DATA_TX_PAUSE 0x58
+#define YT921X_MIB_DATA_TX_MULTICAST 0x5c
+
+#define YT921X_MIB_DATA_TX_UNDERSIZE_ERR 0x60
+#define YT921X_MIB_DATA_TX_PKT_SZ_64 0x64
+#define YT921X_MIB_DATA_TX_PKT_SZ_65_TO_127 0x68
+#define YT921X_MIB_DATA_TX_PKT_SZ_128_TO_255 0x6c
+
+#define YT921X_MIB_DATA_TX_PKT_SZ_256_TO_511 0x70
+#define YT921X_MIB_DATA_TX_PKT_SZ_512_TO_1023 0x74
+#define YT921X_MIB_DATA_TX_PKT_SZ_1024_TO_1518 0x78
+#define YT921X_MIB_DATA_TX_PKT_SZ_1519_TO_MAX 0x7c
+
+/* 0x80: unused */
+#define YT921X_MIB_DATA_TX_GOOD_BYTES 0x84
+/* 0x88: 64 bytes */
+#define YT921X_MIB_DATA_TX_COLLISION 0x8c
+
+#define YT921X_MIB_DATA_TX_EXCESSIVE_COLLISION 0x90
+#define YT921X_MIB_DATA_TX_MULTIPLE_COLLISION 0x94
+#define YT921X_MIB_DATA_TX_SINGLE_COLLISION 0x98
+#define YT921X_MIB_DATA_TX_PKT 0x9c
+
+#define YT921X_MIB_DATA_TX_DEFERRED 0xa0
+#define YT921X_MIB_DATA_TX_LATE_COLLISION 0xa4
+#define YT921X_MIB_DATA_RX_OAM 0xa8
+#define YT921X_MIB_DATA_TX_OAM 0xac
+
+#define YT921X_EDATA_CTRL 0xe0000
+#define YT921X_EDATA_CTRL_ADDR_M GENMASK(15, 8)
+#define YT921X_EDATA_CTRL_ADDR(x) FIELD_PREP(YT921X_EDATA_CTRL_ADDR_M, (x))
+#define YT921X_EDATA_CTRL_OP_M GENMASK(3, 0)
+#define YT921X_EDATA_CTRL_OP(x) FIELD_PREP(YT921X_EDATA_CTRL_OP_M, (x))
+#define YT921X_EDATA_CTRL_READ YT921X_EDATA_CTRL_OP(5)
+#define YT921X_EDATA_DATA 0xe0004
+#define YT921X_EDATA_DATA_DATA_M GENMASK(31, 24)
+#define YT921X_EDATA_DATA_STATUS_M GENMASK(3, 0)
+#define YT921X_EDATA_DATA_STATUS(x) FIELD_PREP(YT921X_EDATA_DATA_STATUS_M, (x))
+#define YT921X_EDATA_DATA_IDLE YT921X_EDATA_DATA_STATUS(3)
+
+#define YT921X_EXT_MBUS_OP 0x6a000
+#define YT921X_INT_MBUS_OP 0xf0000
+#define YT921X_MBUS_OP_START BIT(0)
+#define YT921X_EXT_MBUS_CTRL 0x6a004
+#define YT921X_INT_MBUS_CTRL 0xf0004
+#define YT921X_MBUS_CTRL_PORT_M GENMASK(25, 21)
+#define YT921X_MBUS_CTRL_PORT(x) FIELD_PREP(YT921X_MBUS_CTRL_PORT_M, (x))
+#define YT921X_MBUS_CTRL_REG_M GENMASK(20, 16)
+#define YT921X_MBUS_CTRL_REG(x) FIELD_PREP(YT921X_MBUS_CTRL_REG_M, (x))
+#define YT921X_MBUS_CTRL_TYPE_M GENMASK(11, 8) /* wild guess */
+#define YT921X_MBUS_CTRL_TYPE(x) FIELD_PREP(YT921X_MBUS_CTRL_TYPE_M, (x))
+#define YT921X_MBUS_CTRL_TYPE_C22 YT921X_MBUS_CTRL_TYPE(4)
+#define YT921X_MBUS_CTRL_OP_M GENMASK(3, 2) /* wild guess */
+#define YT921X_MBUS_CTRL_OP(x) FIELD_PREP(YT921X_MBUS_CTRL_OP_M, (x))
+#define YT921X_MBUS_CTRL_WRITE YT921X_MBUS_CTRL_OP(1)
+#define YT921X_MBUS_CTRL_READ YT921X_MBUS_CTRL_OP(2)
+#define YT921X_EXT_MBUS_DOUT 0x6a008
+#define YT921X_INT_MBUS_DOUT 0xf0008
+#define YT921X_EXT_MBUS_DIN 0x6a00c
+#define YT921X_INT_MBUS_DIN 0xf000c
+
+#define YT921X_PORTn_EGR(port) (0x100000 + 4 * (port))
+#define YT921X_PORT_EGR_TPID_CTAG_M GENMASK(5, 4)
+#define YT921X_PORT_EGR_TPID_CTAG(x) FIELD_PREP(YT921X_PORT_EGR_TPID_CTAG_M, (x))
+#define YT921X_PORT_EGR_TPID_STAG_M GENMASK(3, 2)
+#define YT921X_PORT_EGR_TPID_STAG(x) FIELD_PREP(YT921X_PORT_EGR_TPID_STAG_M, (x))
+#define YT921X_TPID_EGRn(x) (0x100300 + 4 * (x)) /* [0, 3] */
+#define YT921X_TPID_EGR_TPID_M GENMASK(15, 0)
+
+#define YT921X_VLAN_IGR_FILTER 0x180280
+#define YT921X_VLAN_IGR_FILTER_PORTn_BYPASS_IGMP(port) BIT((port) + 11)
+#define YT921X_VLAN_IGR_FILTER_PORTn(port) BIT(port)
+#define YT921X_PORTn_ISOLATION(port) (0x180294 + 4 * (port))
+#define YT921X_PORT_ISOLATION_BLOCKn(port) BIT(port)
+#define YT921X_STPn(n) (0x18038c + 4 * (n))
+#define YT921X_STP_PORTn_M(port) GENMASK(2 * (port) + 1, 2 * (port))
+#define YT921X_STP_PORTn(port, x) ((x) << (2 * (port)))
+#define YT921X_STP_PORTn_DISABLED(port) YT921X_STP_PORTn(port, 0)
+#define YT921X_STP_PORTn_LEARNING(port) YT921X_STP_PORTn(port, 1)
+#define YT921X_STP_PORTn_BLOCKING(port) YT921X_STP_PORTn(port, 2)
+#define YT921X_STP_PORTn_FORWARD(port) YT921X_STP_PORTn(port, 3)
+#define YT921X_PORTn_LEARN(port) (0x1803d0 + 4 * (port))
+#define YT921X_PORT_LEARN_VID_LEARN_MULTI_EN BIT(22)
+#define YT921X_PORT_LEARN_VID_LEARN_MODE BIT(21)
+#define YT921X_PORT_LEARN_VID_LEARN_EN BIT(20)
+#define YT921X_PORT_LEARN_SUSPEND_COPY_EN BIT(19)
+#define YT921X_PORT_LEARN_SUSPEND_DROP_EN BIT(18)
+#define YT921X_PORT_LEARN_DIS BIT(17)
+#define YT921X_PORT_LEARN_LIMIT_EN BIT(16)
+#define YT921X_PORT_LEARN_LIMIT_M GENMASK(15, 8)
+#define YT921X_PORT_LEARN_LIMIT(x) FIELD_PREP(YT921X_PORT_LEARN_LIMIT_M, (x))
+#define YT921X_PORT_LEARN_DROP_ON_EXCEEDED BIT(2)
+#define YT921X_PORT_LEARN_MODE_M GENMASK(1, 0)
+#define YT921X_PORT_LEARN_MODE(x) FIELD_PREP(YT921X_PORT_LEARN_MODE_M, (x))
+#define YT921X_PORT_LEARN_MODE_AUTO YT921X_PORT_LEARN_MODE(0)
+#define YT921X_PORT_LEARN_MODE_AUTO_AND_COPY YT921X_PORT_LEARN_MODE(1)
+#define YT921X_PORT_LEARN_MODE_CPU_CONTROL YT921X_PORT_LEARN_MODE(2)
+#define YT921X_AGEING 0x180440
+#define YT921X_AGEING_INTERVAL_M GENMASK(15, 0)
+#define YT921X_FDB_IN0 0x180454
+#define YT921X_FDB_IN1 0x180458
+#define YT921X_FDB_IN2 0x18045c
+#define YT921X_FDB_OP 0x180460
+#define YT921X_FDB_OP_INDEX_M GENMASK(22, 11)
+#define YT921X_FDB_OP_INDEX(x) FIELD_PREP(YT921X_FDB_OP_INDEX_M, (x))
+#define YT921X_FDB_OP_MODE_INDEX BIT(10) /* mac+fid / index */
+#define YT921X_FDB_OP_FLUSH_MCAST BIT(9) /* ucast / mcast */
+#define YT921X_FDB_OP_FLUSH_M GENMASK(8, 7)
+#define YT921X_FDB_OP_FLUSH(x) FIELD_PREP(YT921X_FDB_OP_FLUSH_M, (x))
+#define YT921X_FDB_OP_FLUSH_ALL YT921X_FDB_OP_FLUSH(0)
+#define YT921X_FDB_OP_FLUSH_PORT YT921X_FDB_OP_FLUSH(1)
+#define YT921X_FDB_OP_FLUSH_PORT_VID YT921X_FDB_OP_FLUSH(2)
+#define YT921X_FDB_OP_FLUSH_VID YT921X_FDB_OP_FLUSH(3)
+#define YT921X_FDB_OP_FLUSH_STATIC BIT(6)
+#define YT921X_FDB_OP_NEXT_TYPE_M GENMASK(5, 4)
+#define YT921X_FDB_OP_NEXT_TYPE(x) FIELD_PREP(YT921X_FDB_OP_NEXT_TYPE_M, (x))
+#define YT921X_FDB_OP_NEXT_TYPE_UCAST_PORT YT921X_FDB_OP_NEXT_TYPE(0)
+#define YT921X_FDB_OP_NEXT_TYPE_UCAST_VID YT921X_FDB_OP_NEXT_TYPE(1)
+#define YT921X_FDB_OP_NEXT_TYPE_UCAST YT921X_FDB_OP_NEXT_TYPE(2)
+#define YT921X_FDB_OP_NEXT_TYPE_MCAST YT921X_FDB_OP_NEXT_TYPE(3)
+#define YT921X_FDB_OP_OP_M GENMASK(3, 1)
+#define YT921X_FDB_OP_OP(x) FIELD_PREP(YT921X_FDB_OP_OP_M, (x))
+#define YT921X_FDB_OP_OP_ADD YT921X_FDB_OP_OP(0)
+#define YT921X_FDB_OP_OP_DEL YT921X_FDB_OP_OP(1)
+#define YT921X_FDB_OP_OP_GET_ONE YT921X_FDB_OP_OP(2)
+#define YT921X_FDB_OP_OP_GET_NEXT YT921X_FDB_OP_OP(3)
+#define YT921X_FDB_OP_OP_FLUSH YT921X_FDB_OP_OP(4)
+#define YT921X_FDB_OP_START BIT(0)
+#define YT921X_FDB_RESULT 0x180464
+#define YT921X_FDB_RESULT_DONE BIT(15)
+#define YT921X_FDB_RESULT_NOTFOUND BIT(14)
+#define YT921X_FDB_RESULT_OVERWRITED BIT(13)
+#define YT921X_FDB_RESULT_INDEX_M GENMASK(11, 0)
+#define YT921X_FDB_RESULT_INDEX(x) FIELD_PREP(YT921X_FDB_RESULT_INDEX_M, (x))
+#define YT921X_FDB_OUT0 0x1804b0
+#define YT921X_FDB_IO0_ADDR_HI4_M GENMASK(31, 0)
+#define YT921X_FDB_OUT1 0x1804b4
+#define YT921X_FDB_IO1_EGR_INT_PRI_EN BIT(31)
+#define YT921X_FDB_IO1_STATUS_M GENMASK(30, 28)
+#define YT921X_FDB_IO1_STATUS(x) FIELD_PREP(YT921X_FDB_IO1_STATUS_M, (x))
+#define YT921X_FDB_IO1_STATUS_INVALID YT921X_FDB_IO1_STATUS(0)
+#define YT921X_FDB_IO1_STATUS_MIN_TIME YT921X_FDB_IO1_STATUS(1)
+#define YT921X_FDB_IO1_STATUS_MOVE_AGING_MAX_TIME YT921X_FDB_IO1_STATUS(3)
+#define YT921X_FDB_IO1_STATUS_MAX_TIME YT921X_FDB_IO1_STATUS(5)
+#define YT921X_FDB_IO1_STATUS_PENDING YT921X_FDB_IO1_STATUS(6)
+#define YT921X_FDB_IO1_STATUS_STATIC YT921X_FDB_IO1_STATUS(7)
+#define YT921X_FDB_IO1_FID_M GENMASK(27, 16) /* filtering ID (VID) */
+#define YT921X_FDB_IO1_FID(x) FIELD_PREP(YT921X_FDB_IO1_FID_M, (x))
+#define YT921X_FDB_IO1_ADDR_LO2_M GENMASK(15, 0)
+#define YT921X_FDB_OUT2 0x1804b8
+#define YT921X_FDB_IO2_MOVE_AGING_STATUS_M GENMASK(31, 30)
+#define YT921X_FDB_IO2_IGR_DROP BIT(29)
+#define YT921X_FDB_IO2_EGR_PORTS_M GENMASK(28, 18)
+#define YT921X_FDB_IO2_EGR_PORTS(x) FIELD_PREP(YT921X_FDB_IO2_EGR_PORTS_M, (x))
+#define YT921X_FDB_IO2_EGR_DROP BIT(17)
+#define YT921X_FDB_IO2_COPY_TO_CPU BIT(16)
+#define YT921X_FDB_IO2_IGR_INT_PRI_EN BIT(15)
+#define YT921X_FDB_IO2_INT_PRI_M GENMASK(14, 12)
+#define YT921X_FDB_IO2_INT_PRI(x) FIELD_PREP(YT921X_FDB_IO2_INT_PRI_M, (x))
+#define YT921X_FDB_IO2_NEW_VID_M GENMASK(11, 0)
+#define YT921X_FDB_IO2_NEW_VID(x) FIELD_PREP(YT921X_FDB_IO2_NEW_VID_M, (x))
+#define YT921X_FILTER_UNK_UCAST 0x180508
+#define YT921X_FILTER_UNK_MCAST 0x18050c
+#define YT921X_FILTER_MCAST 0x180510
+#define YT921X_FILTER_BCAST 0x180514
+#define YT921X_FILTER_PORTS_M GENMASK(10, 0)
+#define YT921X_FILTER_PORTS(x) FIELD_PREP(YT921X_FILTER_PORTS_M, (x))
+#define YT921X_FILTER_PORTn(port) BIT(port)
+#define YT921X_VLAN_EGR_FILTER 0x180598
+#define YT921X_VLAN_EGR_FILTER_PORTn(port) BIT(port)
+#define YT921X_CPU_COPY 0x180690
+#define YT921X_CPU_COPY_FORCE_INT_PORT BIT(2)
+#define YT921X_CPU_COPY_TO_INT_CPU BIT(1)
+#define YT921X_CPU_COPY_TO_EXT_CPU BIT(0)
+#define YT921X_ACT_UNK_UCAST 0x180734
+#define YT921X_ACT_UNK_MCAST 0x180738
+#define YT921X_ACT_UNK_MCAST_BYPASS_DROP_RMA BIT(23)
+#define YT921X_ACT_UNK_MCAST_BYPASS_DROP_IGMP BIT(22)
+#define YT921X_ACT_UNK_ACTn_M(port) GENMASK(2 * (port) + 1, 2 * (port))
+#define YT921X_ACT_UNK_ACTn(port, x) ((x) << (2 * (port)))
+#define YT921X_ACT_UNK_ACTn_FORWARD(port) YT921X_ACT_UNK_ACTn(port, 0) /* flood */
+#define YT921X_ACT_UNK_ACTn_TRAP(port) YT921X_ACT_UNK_ACTn(port, 1) /* steer to CPU */
+#define YT921X_ACT_UNK_ACTn_DROP(port) YT921X_ACT_UNK_ACTn(port, 2) /* discard */
+/* NEVER use this action; see comments in the tag driver */
+#define YT921X_ACT_UNK_ACTn_COPY(port) YT921X_ACT_UNK_ACTn(port, 3) /* flood and copy */
+#define YT921X_FDB_HW_FLUSH 0x180958
+#define YT921X_FDB_HW_FLUSH_ON_LINKDOWN BIT(0)
+
+#define YT921X_VLANn_CTRL(vlan) (0x188000 + 8 * (vlan))
+#define YT921X_VLAN_CTRL_UNTAG_PORTS_M GENMASK_ULL(50, 40)
+#define YT921X_VLAN_CTRL_UNTAG_PORTS(x) FIELD_PREP(YT921X_VLAN_CTRL_UNTAG_PORTS_M, (x))
+#define YT921X_VLAN_CTRL_UNTAG_PORTn(port) BIT_ULL((port) + 40)
+#define YT921X_VLAN_CTRL_STP_ID_M GENMASK_ULL(39, 36)
+#define YT921X_VLAN_CTRL_STP_ID(x) FIELD_PREP(YT921X_VLAN_CTRL_STP_ID_M, (x))
+#define YT921X_VLAN_CTRL_SVLAN_EN BIT_ULL(35)
+#define YT921X_VLAN_CTRL_FID_M GENMASK_ULL(34, 23)
+#define YT921X_VLAN_CTRL_FID(x) FIELD_PREP(YT921X_VLAN_CTRL_FID_M, (x))
+#define YT921X_VLAN_CTRL_LEARN_DIS BIT_ULL(22)
+#define YT921X_VLAN_CTRL_INT_PRI_EN BIT_ULL(21)
+#define YT921X_VLAN_CTRL_INT_PRI_M GENMASK_ULL(20, 18)
+#define YT921X_VLAN_CTRL_PORTS_M GENMASK_ULL(17, 7)
+#define YT921X_VLAN_CTRL_PORTS(x) FIELD_PREP(YT921X_VLAN_CTRL_PORTS_M, (x))
+#define YT921X_VLAN_CTRL_PORTn(port) BIT_ULL((port) + 7)
+#define YT921X_VLAN_CTRL_BYPASS_1X_AC BIT_ULL(6)
+#define YT921X_VLAN_CTRL_METER_EN BIT_ULL(5)
+#define YT921X_VLAN_CTRL_METER_ID_M GENMASK_ULL(4, 0)
+
+#define YT921X_TPID_IGRn(x) (0x210000 + 4 * (x)) /* [0, 3] */
+#define YT921X_TPID_IGR_TPID_M GENMASK(15, 0)
+#define YT921X_PORTn_IGR_TPID(port) (0x210010 + 4 * (port))
+#define YT921X_PORT_IGR_TPIDn_STAG_M GENMASK(7, 4)
+#define YT921X_PORT_IGR_TPIDn_STAG(x) BIT((x) + 4)
+#define YT921X_PORT_IGR_TPIDn_CTAG_M GENMASK(3, 0)
+#define YT921X_PORT_IGR_TPIDn_CTAG(x) BIT(x)
+
+#define YT921X_PORTn_VLAN_CTRL(port) (0x230010 + 4 * (port))
+#define YT921X_PORT_VLAN_CTRL_SVLAN_PRI_EN BIT(31)
+#define YT921X_PORT_VLAN_CTRL_CVLAN_PRI_EN BIT(30)
+#define YT921X_PORT_VLAN_CTRL_SVID_M GENMASK(29, 18)
+#define YT921X_PORT_VLAN_CTRL_SVID(x) FIELD_PREP(YT921X_PORT_VLAN_CTRL_SVID_M, (x))
+#define YT921X_PORT_VLAN_CTRL_CVID_M GENMASK(17, 6)
+#define YT921X_PORT_VLAN_CTRL_CVID(x) FIELD_PREP(YT921X_PORT_VLAN_CTRL_CVID_M, (x))
+#define YT921X_PORT_VLAN_CTRL_SVLAN_PRI_M GENMASK(5, 3)
+#define YT921X_PORT_VLAN_CTRL_CVLAN_PRI_M GENMASK(2, 0)
+#define YT921X_PORTn_VLAN_CTRL1(port) (0x230080 + 4 * (port))
+#define YT921X_PORT_VLAN_CTRL1_VLAN_RANGE_EN BIT(8)
+#define YT921X_PORT_VLAN_CTRL1_VLAN_RANGE_PROFILE_ID_M GENMASK(7, 4)
+#define YT921X_PORT_VLAN_CTRL1_SVLAN_DROP_TAGGED BIT(3)
+#define YT921X_PORT_VLAN_CTRL1_SVLAN_DROP_UNTAGGED BIT(2)
+#define YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_TAGGED BIT(1)
+#define YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED BIT(0)
+
+#define YT921X_MIRROR 0x300300
+#define YT921X_MIRROR_IGR_PORTS_M GENMASK(26, 16)
+#define YT921X_MIRROR_IGR_PORTS(x) FIELD_PREP(YT921X_MIRROR_IGR_PORTS_M, (x))
+#define YT921X_MIRROR_IGR_PORTn(port) BIT((port) + 16)
+#define YT921X_MIRROR_EGR_PORTS_M GENMASK(14, 4)
+#define YT921X_MIRROR_EGR_PORTS(x) FIELD_PREP(YT921X_MIRROR_EGR_PORTS_M, (x))
+#define YT921X_MIRROR_EGR_PORTn(port) BIT((port) + 4)
+#define YT921X_MIRROR_PORT_M GENMASK(3, 0)
+#define YT921X_MIRROR_PORT(x) FIELD_PREP(YT921X_MIRROR_PORT_M, (x))
+
+#define YT921X_EDATA_EXTMODE 0xfb
+#define YT921X_EDATA_LEN 0x100
+
+#define YT921X_FDB_NUM 4096
+
+enum yt921x_fdb_entry_status {
+ YT921X_FDB_ENTRY_STATUS_INVALID = 0,
+ YT921X_FDB_ENTRY_STATUS_MIN_TIME = 1,
+ YT921X_FDB_ENTRY_STATUS_MOVE_AGING_MAX_TIME = 3,
+ YT921X_FDB_ENTRY_STATUS_MAX_TIME = 5,
+ YT921X_FDB_ENTRY_STATUS_PENDING = 6,
+ YT921X_FDB_ENTRY_STATUS_STATIC = 7,
+};
+
+#define YT921X_MSTI_NUM 16
+
+#define YT9215_MAJOR 0x9002
+#define YT9218_MAJOR 0x9001
+
+/* required for a hard reset */
+#define YT921X_RST_DELAY_US 10000
+
+#define YT921X_FRAME_SIZE_MAX 0x2400 /* 9216 */
+
+#define YT921X_TAG_LEN 8
+
+/* 8 internal + 2 external + 1 mcu */
+#define YT921X_PORT_NUM 11
+
+#define yt921x_port_is_internal(port) ((port) < 8)
+#define yt921x_port_is_external(port) (8 <= (port) && (port) < 9)
+
+struct yt921x_mib {
+ u64 rx_broadcast;
+ u64 rx_pause;
+ u64 rx_multicast;
+ u64 rx_crc_errors;
+
+ u64 rx_alignment_errors;
+ u64 rx_undersize_errors;
+ u64 rx_fragment_errors;
+ u64 rx_64byte;
+
+ u64 rx_65_127byte;
+ u64 rx_128_255byte;
+ u64 rx_256_511byte;
+ u64 rx_512_1023byte;
+
+ u64 rx_1024_1518byte;
+ u64 rx_jumbo;
+ u64 rx_good_bytes;
+
+ u64 rx_bad_bytes;
+ u64 rx_oversize_errors;
+
+ u64 rx_dropped;
+ u64 tx_broadcast;
+ u64 tx_pause;
+ u64 tx_multicast;
+
+ u64 tx_undersize_errors;
+ u64 tx_64byte;
+ u64 tx_65_127byte;
+ u64 tx_128_255byte;
+
+ u64 tx_256_511byte;
+ u64 tx_512_1023byte;
+ u64 tx_1024_1518byte;
+ u64 tx_jumbo;
+
+ u64 tx_good_bytes;
+ u64 tx_collisions;
+
+ u64 tx_aborted_errors;
+ u64 tx_multiple_collisions;
+ u64 tx_single_collisions;
+ u64 tx_good;
+
+ u64 tx_deferred;
+ u64 tx_late_collisions;
+ u64 rx_oam;
+ u64 tx_oam;
+};
+
+struct yt921x_port {
+ unsigned char index;
+
+ bool hairpin;
+ bool isolated;
+
+ struct delayed_work mib_read;
+ struct yt921x_mib mib;
+ u64 rx_frames;
+ u64 tx_frames;
+};
+
+struct yt921x_reg_ops {
+ int (*read)(void *context, u32 reg, u32 *valp);
+ int (*write)(void *context, u32 reg, u32 val);
+};
+
+struct yt921x_priv {
+ struct dsa_switch ds;
+
+ const struct yt921x_info *info;
+ /* cache of dsa_cpu_ports(ds) */
+ u16 cpu_ports_mask;
+
+ /* protect the access to the switch registers */
+ struct mutex reg_lock;
+ const struct yt921x_reg_ops *reg_ops;
+ void *reg_ctx;
+
+ /* mdio master bus */
+ struct mii_bus *mbus_int;
+ struct mii_bus *mbus_ext;
+
+ struct yt921x_port ports[YT921X_PORT_NUM];
+
+ u16 eee_ports_mask;
+};
+
+#endif
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index e9c5e1e11fa0..d6bdad4baadd 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -38,6 +38,7 @@
#include <linux/moduleparam.h>
#include <linux/rtnetlink.h>
#include <linux/net_tstamp.h>
+#include <net/netdev_lock.h>
#include <net/rtnetlink.h>
#include <linux/u64_stats_sync.h>
@@ -104,6 +105,7 @@ static void dummy_setup(struct net_device *dev)
dev->netdev_ops = &dummy_netdev_ops;
dev->ethtool_ops = &dummy_ethtool_ops;
dev->needs_free_netdev = true;
+ dev->request_ops_lock = true;
/* Fill in device structure with ethernet-generic values. */
dev->flags |= IFF_NOARP;
@@ -168,22 +170,21 @@ static int __init dummy_init_module(void)
{
int i, err = 0;
- down_write(&pernet_ops_rwsem);
- rtnl_lock();
- err = __rtnl_link_register(&dummy_link_ops);
+ err = rtnl_link_register(&dummy_link_ops);
if (err < 0)
- goto out;
+ return err;
+
+ rtnl_net_lock(&init_net);
for (i = 0; i < numdummies && !err; i++) {
err = dummy_init_one();
cond_resched();
}
- if (err < 0)
- __rtnl_link_unregister(&dummy_link_ops);
-out:
- rtnl_unlock();
- up_write(&pernet_ops_rwsem);
+ rtnl_net_unlock(&init_net);
+
+ if (err < 0)
+ rtnl_link_unregister(&dummy_link_ops);
return err;
}
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index 3c2efda916f1..9ba10efd3794 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -143,7 +143,7 @@ static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave);
static void eql_timer(struct timer_list *t)
{
- equalizer_t *eql = from_timer(eql, t, timer);
+ equalizer_t *eql = timer_container_of(eql, t, timer);
struct list_head *this, *tmp, *head;
spin_lock(&eql->queue.lock);
@@ -254,7 +254,7 @@ static int eql_close(struct net_device *dev)
* at the data structure it scans every so often...
*/
- del_timer_sync(&eql->timer);
+ timer_delete_sync(&eql->timer);
eql_kill_slave_queue(&eql->queue);
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 4725a8cfd695..2227c83a4862 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -859,7 +859,7 @@ static int corkscrew_open(struct net_device *dev)
static void corkscrew_timer(struct timer_list *t)
{
#ifdef AUTOMEDIA
- struct corkscrew_private *vp = from_timer(vp, t, timer);
+ struct corkscrew_private *vp = timer_container_of(vp, t, timer);
struct net_device *dev = vp->our_dev;
int ioaddr = dev->base_addr;
unsigned long flags;
@@ -1414,7 +1414,7 @@ static int corkscrew_close(struct net_device *dev)
dev->name, rx_nocopy, rx_copy, queued_packet);
}
- del_timer_sync(&vp->timer);
+ timer_delete_sync(&vp->timer);
/* Turn off statistics ASAP. We update lp->stats below. */
outw(StatsDisable, ioaddr + EL3_CMD);
@@ -1547,9 +1547,8 @@ static const struct ethtool_ops netdev_ethtool_ops = {
.set_msglevel = netdev_set_msglevel,
};
-
#ifdef MODULE
-void cleanup_module(void)
+static void __exit corkscrew_exit_module(void)
{
while (!list_empty(&root_corkscrew_dev)) {
struct net_device *dev;
@@ -1563,4 +1562,5 @@ void cleanup_module(void)
free_netdev(dev);
}
}
+module_exit(corkscrew_exit_module);
#endif /* MODULE */
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index dc3b7c960611..1f2070497a75 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -858,7 +858,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
*/
static void media_check(struct timer_list *t)
{
- struct el3_private *lp = from_timer(lp, t, media);
+ struct el3_private *lp = timer_container_of(lp, t, media);
struct net_device *dev = lp->p_dev->priv;
unsigned int ioaddr = dev->base_addr;
unsigned long flags;
@@ -1140,7 +1140,7 @@ static int el3_close(struct net_device *dev)
link->open--;
netif_stop_queue(dev);
- del_timer_sync(&lp->media);
+ timer_delete_sync(&lp->media);
return 0;
}
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index be58dac0502a..ea49be43b8c3 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -685,7 +685,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
static void media_check(struct timer_list *t)
{
- struct el3_private *lp = from_timer(lp, t, media);
+ struct el3_private *lp = timer_container_of(lp, t, media);
struct net_device *dev = lp->p_dev->priv;
unsigned int ioaddr = dev->base_addr;
u16 media, errs;
@@ -946,7 +946,7 @@ static int el3_close(struct net_device *dev)
link->open--;
netif_stop_queue(dev);
- del_timer_sync(&lp->media);
+ timer_delete_sync(&lp->media);
return 0;
}
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 082388bb6169..8c9cc97efd4e 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1302,7 +1302,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
if (print_info)
pr_cont(", IRQ %d\n", dev->irq);
/* Tell them about an invalid IRQ. */
- if (dev->irq <= 0 || dev->irq >= nr_irqs)
+ if (dev->irq <= 0 || dev->irq >= irq_get_nr_irqs())
pr_warn(" *** Warning: IRQ %d is unlikely to work! ***\n",
dev->irq);
@@ -1783,7 +1783,7 @@ out:
static void
vortex_timer(struct timer_list *t)
{
- struct vortex_private *vp = from_timer(vp, t, timer);
+ struct vortex_private *vp = timer_container_of(vp, t, timer);
struct net_device *dev = vp->mii.dev;
void __iomem *ioaddr = vp->ioaddr;
int next_tick = 60*HZ;
@@ -2691,7 +2691,7 @@ vortex_down(struct net_device *dev, int final_down)
netdev_reset_queue(dev);
netif_stop_queue(dev);
- del_timer_sync(&vp->timer);
+ timer_delete_sync(&vp->timer);
/* Turn off statistics ASAP. We update dev->stats below. */
iowrite16(StatsDisable, ioaddr + EL3_CMD);
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index fea489af72fb..7c8213011b5c 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -504,7 +504,7 @@ static int axnet_close(struct net_device *dev)
link->open--;
netif_stop_queue(dev);
- del_timer_sync(&info->watchdog);
+ timer_delete_sync(&info->watchdog);
return 0;
} /* axnet_close */
@@ -550,7 +550,7 @@ static irqreturn_t ei_irq_wrapper(int irq, void *dev_id)
static void ei_watchdog(struct timer_list *t)
{
- struct axnet_dev *info = from_timer(info, t, watchdog);
+ struct axnet_dev *info = timer_container_of(info, t, watchdog);
struct net_device *dev = info->p_dev->priv;
unsigned int nic_base = dev->base_addr;
unsigned int mii_addr = nic_base + AXNET_MII_EEP;
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 780fb4afb6af..19f9c5db3f3b 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -947,7 +947,7 @@ static int pcnet_close(struct net_device *dev)
link->open--;
netif_stop_queue(dev);
- del_timer_sync(&info->watchdog);
+ timer_delete_sync(&info->watchdog);
return 0;
} /* pcnet_close */
@@ -1018,7 +1018,7 @@ static irqreturn_t ei_irq_wrapper(int irq, void *dev_id)
static void ei_watchdog(struct timer_list *t)
{
- struct pcnet_dev *info = from_timer(info, t, watchdog);
+ struct pcnet_dev *info = timer_container_of(info, t, watchdog);
struct net_device *dev = info->p_dev->priv;
unsigned int nic_base = dev->base_addr;
unsigned int mii_addr = nic_base + DLINK_GPIO;
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 9a542e3c9b05..4a1b368ca7e6 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -20,6 +20,7 @@ source "drivers/net/ethernet/actions/Kconfig"
source "drivers/net/ethernet/adaptec/Kconfig"
source "drivers/net/ethernet/aeroflex/Kconfig"
source "drivers/net/ethernet/agere/Kconfig"
+source "drivers/net/ethernet/airoha/Kconfig"
source "drivers/net/ethernet/alacritech/Kconfig"
source "drivers/net/ethernet/allwinner/Kconfig"
source "drivers/net/ethernet/alteon/Kconfig"
@@ -128,6 +129,7 @@ source "drivers/net/ethernet/microchip/Kconfig"
source "drivers/net/ethernet/mscc/Kconfig"
source "drivers/net/ethernet/microsoft/Kconfig"
source "drivers/net/ethernet/moxa/Kconfig"
+source "drivers/net/ethernet/mucse/Kconfig"
source "drivers/net/ethernet/myricom/Kconfig"
config FEALNX
@@ -159,7 +161,7 @@ config ETHOC
Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC.
config OA_TC6
- tristate "OPEN Alliance TC6 10BASE-T1x MAC-PHY support"
+ tristate "OPEN Alliance TC6 10BASE-T1x MAC-PHY support" if COMPILE_TEST
depends on SPI
select PHYLIB
help
@@ -187,6 +189,7 @@ source "drivers/net/ethernet/sis/Kconfig"
source "drivers/net/ethernet/sfc/Kconfig"
source "drivers/net/ethernet/smsc/Kconfig"
source "drivers/net/ethernet/socionext/Kconfig"
+source "drivers/net/ethernet/spacemit/Kconfig"
source "drivers/net/ethernet/stmicro/Kconfig"
source "drivers/net/ethernet/sun/Kconfig"
source "drivers/net/ethernet/sunplus/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 99fa180dedb8..2e18df8ca8ec 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/
obj-$(CONFIG_GRETH) += aeroflex/
obj-$(CONFIG_NET_VENDOR_ADI) += adi/
obj-$(CONFIG_NET_VENDOR_AGERE) += agere/
+obj-$(CONFIG_NET_VENDOR_AIROHA) += airoha/
obj-$(CONFIG_NET_VENDOR_ALACRITECH) += alacritech/
obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
@@ -64,6 +65,7 @@ obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
obj-$(CONFIG_NET_VENDOR_MICROSEMI) += mscc/
obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
+obj-$(CONFIG_NET_VENDOR_MUCSE) += mucse/
obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
@@ -90,6 +92,7 @@ obj-$(CONFIG_NET_VENDOR_SOLARFLARE) += sfc/
obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
obj-$(CONFIG_NET_VENDOR_SOCIONEXT) += socionext/
+obj-$(CONFIG_NET_VENDOR_SPACEMIT) += spacemit/
obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
obj-$(CONFIG_NET_VENDOR_SUN) += sun/
obj-$(CONFIG_NET_VENDOR_SUNPLUS) += sunplus/
diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c
index 115f48b3342c..0a08da799255 100644
--- a/drivers/net/ethernet/actions/owl-emac.c
+++ b/drivers/net/ethernet/actions/owl-emac.c
@@ -1325,15 +1325,10 @@ static int owl_emac_mdio_init(struct net_device *netdev)
struct device_node *mdio_node;
int ret;
- mdio_node = of_get_child_by_name(dev->of_node, "mdio");
+ mdio_node = of_get_available_child_by_name(dev->of_node, "mdio");
if (!mdio_node)
return -ENODEV;
- if (!of_device_is_available(mdio_node)) {
- ret = -ENODEV;
- goto err_put_node;
- }
-
priv->mii = devm_mdiobus_alloc(dev);
if (!priv->mii) {
ret = -ENOMEM;
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index 68fad5575fd4..30f9d271e595 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -1599,7 +1599,7 @@ static int adin1110_probe_netdevs(struct adin1110_priv *priv)
netdev->netdev_ops = &adin1110_netdev_ops;
netdev->ethtool_ops = &adin1110_ethtool_ops;
netdev->priv_flags |= IFF_UNICAST_FLT;
- netdev->netns_local = true;
+ netdev->netns_immutable = true;
port_priv->phydev = get_phy_device(priv->mii_bus, i + 1, false);
if (IS_ERR(port_priv->phydev)) {
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index b325e0cef120..5c8217638dda 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -2459,6 +2459,10 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
skb->data,
skb_headlen(skb),
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ dma_addr))
+ return -ENOMEM;
+
desc[frag].addr_lo = lower_32_bits(dma_addr);
desc[frag].addr_hi = upper_32_bits(dma_addr);
frag++;
@@ -2468,6 +2472,10 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
skb->data,
skb_headlen(skb) / 2,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ dma_addr))
+ return -ENOMEM;
+
desc[frag].addr_lo = lower_32_bits(dma_addr);
desc[frag].addr_hi = upper_32_bits(dma_addr);
frag++;
@@ -2478,6 +2486,10 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
skb_headlen(skb) / 2,
skb_headlen(skb) / 2,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ dma_addr))
+ goto unmap_first_out;
+
desc[frag].addr_lo = lower_32_bits(dma_addr);
desc[frag].addr_hi = upper_32_bits(dma_addr);
frag++;
@@ -2489,6 +2501,9 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
0,
desc[frag].len_vlan,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, dma_addr))
+ goto unmap_out;
+
desc[frag].addr_lo = lower_32_bits(dma_addr);
desc[frag].addr_hi = upper_32_bits(dma_addr);
frag++;
@@ -2578,6 +2593,27 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
&adapter->regs->global.watchdog_timer);
}
return 0;
+
+unmap_out:
+ // Unmap the body of the packet with map_page
+ while (--i) {
+ frag--;
+ dma_addr = desc[frag].addr_lo;
+ dma_addr |= (u64)desc[frag].addr_hi << 32;
+ dma_unmap_page(&adapter->pdev->dev, dma_addr,
+ desc[frag].len_vlan, DMA_TO_DEVICE);
+ }
+
+unmap_first_out:
+ // Unmap the header with map_single
+ while (frag--) {
+ dma_addr = desc[frag].addr_lo;
+ dma_addr |= (u64)desc[frag].addr_hi << 32;
+ dma_unmap_single(&adapter->pdev->dev, dma_addr,
+ desc[frag].len_vlan, DMA_TO_DEVICE);
+ }
+
+ return -ENOMEM;
}
static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
@@ -3076,7 +3112,8 @@ err_out:
*/
static void et131x_error_timer_handler(struct timer_list *t)
{
- struct et131x_adapter *adapter = from_timer(adapter, t, error_timer);
+ struct et131x_adapter *adapter = timer_container_of(adapter, t,
+ error_timer);
struct phy_device *phydev = adapter->netdev->phydev;
if (et1310_in_phy_coma(adapter)) {
@@ -3639,7 +3676,7 @@ static int et131x_close(struct net_device *netdev)
free_irq(adapter->pdev->irq, netdev);
/* Stop the error timer */
- return del_timer_sync(&adapter->error_timer);
+ return timer_delete_sync(&adapter->error_timer);
}
/* et131x_set_packet_filter - Configures the Rx Packet filtering */
diff --git a/drivers/net/ethernet/airoha/Kconfig b/drivers/net/ethernet/airoha/Kconfig
new file mode 100644
index 000000000000..ad3ce501e7a5
--- /dev/null
+++ b/drivers/net/ethernet/airoha/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_VENDOR_AIROHA
+ bool "Airoha devices"
+ depends on ARCH_AIROHA || COMPILE_TEST
+ help
+ If you have a Airoha SoC with ethernet, say Y.
+
+if NET_VENDOR_AIROHA
+
+config NET_AIROHA_NPU
+ tristate "Airoha NPU support"
+ select WANT_DEV_COREDUMP
+ select REGMAP_MMIO
+ help
+ This driver supports Airoha Network Processor (NPU) available
+ on the Airoha Soc family.
+
+config NET_AIROHA
+ tristate "Airoha SoC Gigabit Ethernet support"
+ depends on NET_DSA || !NET_DSA
+ select NET_AIROHA_NPU
+ select PAGE_POOL
+ help
+ This driver supports the gigabit ethernet MACs in the
+ Airoha SoC family.
+
+config NET_AIROHA_FLOW_STATS
+ default y
+ bool "Airoha flow stats"
+ depends on NET_AIROHA && NET_AIROHA_NPU
+ help
+ Enable Aiorha flowtable statistic counters.
+
+endif #NET_VENDOR_AIROHA
diff --git a/drivers/net/ethernet/airoha/Makefile b/drivers/net/ethernet/airoha/Makefile
new file mode 100644
index 000000000000..94468053e34b
--- /dev/null
+++ b/drivers/net/ethernet/airoha/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Airoha for the Mediatek SoCs built-in ethernet macs
+#
+
+obj-$(CONFIG_NET_AIROHA) += airoha-eth.o
+airoha-eth-y := airoha_eth.o airoha_ppe.o
+airoha-eth-$(CONFIG_DEBUG_FS) += airoha_ppe_debugfs.o
+obj-$(CONFIG_NET_AIROHA_NPU) += airoha_npu.o
diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c
index 6c683a12d5aa..75893c90a0a1 100644
--- a/drivers/net/ethernet/mediatek/airoha_eth.c
+++ b/drivers/net/ethernet/airoha/airoha_eth.c
@@ -3,841 +3,31 @@
* Copyright (c) 2024 AIROHA Inc
* Author: Lorenzo Bianconi <lorenzo@kernel.org>
*/
-#include <linux/etherdevice.h>
-#include <linux/iopoll.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_net.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
-#include <linux/reset.h>
#include <linux/tcp.h>
#include <linux/u64_stats_sync.h>
-#include <net/dsa.h>
+#include <net/dst_metadata.h>
#include <net/page_pool/helpers.h>
+#include <net/pkt_cls.h>
#include <uapi/linux/ppp_defs.h>
-#define AIROHA_MAX_NUM_GDM_PORTS 1
-#define AIROHA_MAX_NUM_QDMA 2
-#define AIROHA_MAX_NUM_RSTS 3
-#define AIROHA_MAX_NUM_XSI_RSTS 5
-#define AIROHA_MAX_MTU 2000
-#define AIROHA_MAX_PACKET_SIZE 2048
-#define AIROHA_NUM_TX_RING 32
-#define AIROHA_NUM_RX_RING 32
-#define AIROHA_FE_MC_MAX_VLAN_TABLE 64
-#define AIROHA_FE_MC_MAX_VLAN_PORT 16
-#define AIROHA_NUM_TX_IRQ 2
-#define HW_DSCP_NUM 2048
-#define IRQ_QUEUE_LEN(_n) ((_n) ? 1024 : 2048)
-#define TX_DSCP_NUM 1024
-#define RX_DSCP_NUM(_n) \
- ((_n) == 2 ? 128 : \
- (_n) == 11 ? 128 : \
- (_n) == 15 ? 128 : \
- (_n) == 0 ? 1024 : 16)
-
-#define PSE_RSV_PAGES 128
-#define PSE_QUEUE_RSV_PAGES 64
-
-/* FE */
-#define PSE_BASE 0x0100
-#define CSR_IFC_BASE 0x0200
-#define CDM1_BASE 0x0400
-#define GDM1_BASE 0x0500
-#define PPE1_BASE 0x0c00
-
-#define CDM2_BASE 0x1400
-#define GDM2_BASE 0x1500
-
-#define GDM3_BASE 0x1100
-#define GDM4_BASE 0x2500
-
-#define GDM_BASE(_n) \
- ((_n) == 4 ? GDM4_BASE : \
- (_n) == 3 ? GDM3_BASE : \
- (_n) == 2 ? GDM2_BASE : GDM1_BASE)
-
-#define REG_FE_DMA_GLO_CFG 0x0000
-#define FE_DMA_GLO_L2_SPACE_MASK GENMASK(7, 4)
-#define FE_DMA_GLO_PG_SZ_MASK BIT(3)
-
-#define REG_FE_RST_GLO_CFG 0x0004
-#define FE_RST_GDM4_MBI_ARB_MASK BIT(3)
-#define FE_RST_GDM3_MBI_ARB_MASK BIT(2)
-#define FE_RST_CORE_MASK BIT(0)
-
-#define REG_FE_WAN_MAC_H 0x0030
-#define REG_FE_LAN_MAC_H 0x0040
-
-#define REG_FE_MAC_LMIN(_n) ((_n) + 0x04)
-#define REG_FE_MAC_LMAX(_n) ((_n) + 0x08)
-
-#define REG_FE_CDM1_OQ_MAP0 0x0050
-#define REG_FE_CDM1_OQ_MAP1 0x0054
-#define REG_FE_CDM1_OQ_MAP2 0x0058
-#define REG_FE_CDM1_OQ_MAP3 0x005c
-
-#define REG_FE_PCE_CFG 0x0070
-#define PCE_DPI_EN_MASK BIT(2)
-#define PCE_KA_EN_MASK BIT(1)
-#define PCE_MC_EN_MASK BIT(0)
-
-#define REG_FE_PSE_QUEUE_CFG_WR 0x0080
-#define PSE_CFG_PORT_ID_MASK GENMASK(27, 24)
-#define PSE_CFG_QUEUE_ID_MASK GENMASK(20, 16)
-#define PSE_CFG_WR_EN_MASK BIT(8)
-#define PSE_CFG_OQRSV_SEL_MASK BIT(0)
-
-#define REG_FE_PSE_QUEUE_CFG_VAL 0x0084
-#define PSE_CFG_OQ_RSV_MASK GENMASK(13, 0)
-
-#define PSE_FQ_CFG 0x008c
-#define PSE_FQ_LIMIT_MASK GENMASK(14, 0)
-
-#define REG_FE_PSE_BUF_SET 0x0090
-#define PSE_SHARE_USED_LTHD_MASK GENMASK(31, 16)
-#define PSE_ALLRSV_MASK GENMASK(14, 0)
-
-#define REG_PSE_SHARE_USED_THD 0x0094
-#define PSE_SHARE_USED_MTHD_MASK GENMASK(31, 16)
-#define PSE_SHARE_USED_HTHD_MASK GENMASK(15, 0)
-
-#define REG_GDM_MISC_CFG 0x0148
-#define GDM2_RDM_ACK_WAIT_PREF_MASK BIT(9)
-#define GDM2_CHN_VLD_MODE_MASK BIT(5)
-
-#define REG_FE_CSR_IFC_CFG CSR_IFC_BASE
-#define FE_IFC_EN_MASK BIT(0)
-
-#define REG_FE_VIP_PORT_EN 0x01f0
-#define REG_FE_IFC_PORT_EN 0x01f4
-
-#define REG_PSE_IQ_REV1 (PSE_BASE + 0x08)
-#define PSE_IQ_RES1_P2_MASK GENMASK(23, 16)
-
-#define REG_PSE_IQ_REV2 (PSE_BASE + 0x0c)
-#define PSE_IQ_RES2_P5_MASK GENMASK(15, 8)
-#define PSE_IQ_RES2_P4_MASK GENMASK(7, 0)
-
-#define REG_FE_VIP_EN(_n) (0x0300 + ((_n) << 3))
-#define PATN_FCPU_EN_MASK BIT(7)
-#define PATN_SWP_EN_MASK BIT(6)
-#define PATN_DP_EN_MASK BIT(5)
-#define PATN_SP_EN_MASK BIT(4)
-#define PATN_TYPE_MASK GENMASK(3, 1)
-#define PATN_EN_MASK BIT(0)
-
-#define REG_FE_VIP_PATN(_n) (0x0304 + ((_n) << 3))
-#define PATN_DP_MASK GENMASK(31, 16)
-#define PATN_SP_MASK GENMASK(15, 0)
-
-#define REG_CDM1_VLAN_CTRL CDM1_BASE
-#define CDM1_VLAN_MASK GENMASK(31, 16)
-
-#define REG_CDM1_FWD_CFG (CDM1_BASE + 0x08)
-#define CDM1_VIP_QSEL_MASK GENMASK(24, 20)
-
-#define REG_CDM1_CRSN_QSEL(_n) (CDM1_BASE + 0x10 + ((_n) << 2))
-#define CDM1_CRSN_QSEL_REASON_MASK(_n) \
- GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
-
-#define REG_CDM2_FWD_CFG (CDM2_BASE + 0x08)
-#define CDM2_OAM_QSEL_MASK GENMASK(31, 27)
-#define CDM2_VIP_QSEL_MASK GENMASK(24, 20)
-
-#define REG_CDM2_CRSN_QSEL(_n) (CDM2_BASE + 0x10 + ((_n) << 2))
-#define CDM2_CRSN_QSEL_REASON_MASK(_n) \
- GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
-
-#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n)
-#define GDM_DROP_CRC_ERR BIT(23)
-#define GDM_IP4_CKSUM BIT(22)
-#define GDM_TCP_CKSUM BIT(21)
-#define GDM_UDP_CKSUM BIT(20)
-#define GDM_UCFQ_MASK GENMASK(15, 12)
-#define GDM_BCFQ_MASK GENMASK(11, 8)
-#define GDM_MCFQ_MASK GENMASK(7, 4)
-#define GDM_OCFQ_MASK GENMASK(3, 0)
-
-#define REG_GDM_INGRESS_CFG(_n) (GDM_BASE(_n) + 0x10)
-#define GDM_INGRESS_FC_EN_MASK BIT(1)
-#define GDM_STAG_EN_MASK BIT(0)
-
-#define REG_GDM_LEN_CFG(_n) (GDM_BASE(_n) + 0x14)
-#define GDM_SHORT_LEN_MASK GENMASK(13, 0)
-#define GDM_LONG_LEN_MASK GENMASK(29, 16)
-
-#define REG_FE_CPORT_CFG (GDM1_BASE + 0x40)
-#define FE_CPORT_PAD BIT(26)
-#define FE_CPORT_PORT_XFC_MASK BIT(25)
-#define FE_CPORT_QUEUE_XFC_MASK BIT(24)
-
-#define REG_FE_GDM_MIB_CLEAR(_n) (GDM_BASE(_n) + 0xf0)
-#define FE_GDM_MIB_RX_CLEAR_MASK BIT(1)
-#define FE_GDM_MIB_TX_CLEAR_MASK BIT(0)
-
-#define REG_FE_GDM1_MIB_CFG (GDM1_BASE + 0xf4)
-#define FE_STRICT_RFC2819_MODE_MASK BIT(31)
-#define FE_GDM1_TX_MIB_SPLIT_EN_MASK BIT(17)
-#define FE_GDM1_RX_MIB_SPLIT_EN_MASK BIT(16)
-#define FE_TX_MIB_ID_MASK GENMASK(15, 8)
-#define FE_RX_MIB_ID_MASK GENMASK(7, 0)
-
-#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x104)
-#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x10c)
-#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x110)
-#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x114)
-#define REG_FE_GDM_TX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x118)
-#define REG_FE_GDM_TX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x11c)
-#define REG_FE_GDM_TX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x120)
-#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x124)
-#define REG_FE_GDM_TX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x128)
-#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x12c)
-#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x130)
-#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x134)
-#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x138)
-#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x13c)
-#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x140)
-
-#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x148)
-#define REG_FE_GDM_RX_FC_DROP_CNT(_n) (GDM_BASE(_n) + 0x14c)
-#define REG_FE_GDM_RX_RC_DROP_CNT(_n) (GDM_BASE(_n) + 0x150)
-#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n) (GDM_BASE(_n) + 0x154)
-#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n) (GDM_BASE(_n) + 0x158)
-#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x15c)
-#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x160)
-#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x164)
-#define REG_FE_GDM_RX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x168)
-#define REG_FE_GDM_RX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x16c)
-#define REG_FE_GDM_RX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x170)
-#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n) (GDM_BASE(_n) + 0x174)
-#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n) (GDM_BASE(_n) + 0x178)
-#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n) (GDM_BASE(_n) + 0x17c)
-#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x180)
-#define REG_FE_GDM_RX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x184)
-#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x188)
-#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x18c)
-#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x190)
-#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x194)
-#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198)
-#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c)
-
-#define REG_PPE1_TB_HASH_CFG (PPE1_BASE + 0x250)
-#define PPE1_SRAM_TABLE_EN_MASK BIT(0)
-#define PPE1_SRAM_HASH1_EN_MASK BIT(8)
-#define PPE1_DRAM_TABLE_EN_MASK BIT(16)
-#define PPE1_DRAM_HASH1_EN_MASK BIT(24)
-
-#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280)
-#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284)
-#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288)
-#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c)
-
-#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290)
-#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294)
-#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298)
-#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c)
-#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8)
-#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc)
-#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0)
-#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4)
-#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8)
-#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc)
-#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8)
-#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec)
-#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0)
-#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4)
-#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8)
-#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc)
-
-#define REG_GDM2_CHN_RLS (GDM2_BASE + 0x20)
-#define MBI_RX_AGE_SEL_MASK GENMASK(26, 25)
-#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17)
-
-#define REG_GDM3_FWD_CFG GDM3_BASE
-#define GDM3_PAD_EN_MASK BIT(28)
-
-#define REG_GDM4_FWD_CFG (GDM4_BASE + 0x100)
-#define GDM4_PAD_EN_MASK BIT(28)
-#define GDM4_SPORT_OFFSET0_MASK GENMASK(11, 8)
-
-#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x33c)
-#define GDM4_SPORT_OFF2_MASK GENMASK(19, 16)
-#define GDM4_SPORT_OFF1_MASK GENMASK(15, 12)
-#define GDM4_SPORT_OFF0_MASK GENMASK(11, 8)
-
-#define REG_IP_FRAG_FP 0x2010
-#define IP_ASSEMBLE_PORT_MASK GENMASK(24, 21)
-#define IP_ASSEMBLE_NBQ_MASK GENMASK(20, 16)
-#define IP_FRAGMENT_PORT_MASK GENMASK(8, 5)
-#define IP_FRAGMENT_NBQ_MASK GENMASK(4, 0)
-
-#define REG_MC_VLAN_EN 0x2100
-#define MC_VLAN_EN_MASK BIT(0)
-
-#define REG_MC_VLAN_CFG 0x2104
-#define MC_VLAN_CFG_CMD_DONE_MASK BIT(31)
-#define MC_VLAN_CFG_TABLE_ID_MASK GENMASK(21, 16)
-#define MC_VLAN_CFG_PORT_ID_MASK GENMASK(11, 8)
-#define MC_VLAN_CFG_TABLE_SEL_MASK BIT(4)
-#define MC_VLAN_CFG_RW_MASK BIT(0)
-
-#define REG_MC_VLAN_DATA 0x2108
-
-#define REG_CDM5_RX_OQ1_DROP_CNT 0x29d4
-
-/* QDMA */
-#define REG_QDMA_GLOBAL_CFG 0x0004
-#define GLOBAL_CFG_RX_2B_OFFSET_MASK BIT(31)
-#define GLOBAL_CFG_DMA_PREFERENCE_MASK GENMASK(30, 29)
-#define GLOBAL_CFG_CPU_TXR_RR_MASK BIT(28)
-#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK BIT(27)
-#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK BIT(26)
-#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK BIT(25)
-#define GLOBAL_CFG_OAM_MODIFY_MASK BIT(24)
-#define GLOBAL_CFG_RESET_MASK BIT(23)
-#define GLOBAL_CFG_RESET_DONE_MASK BIT(22)
-#define GLOBAL_CFG_MULTICAST_EN_MASK BIT(21)
-#define GLOBAL_CFG_IRQ1_EN_MASK BIT(20)
-#define GLOBAL_CFG_IRQ0_EN_MASK BIT(19)
-#define GLOBAL_CFG_LOOPCNT_EN_MASK BIT(18)
-#define GLOBAL_CFG_RD_BYPASS_WR_MASK BIT(17)
-#define GLOBAL_CFG_QDMA_LOOPBACK_MASK BIT(16)
-#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK GENMASK(13, 8)
-#define GLOBAL_CFG_CHECK_DONE_MASK BIT(7)
-#define GLOBAL_CFG_TX_WB_DONE_MASK BIT(6)
-#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK GENMASK(5, 4)
-#define GLOBAL_CFG_RX_DMA_BUSY_MASK BIT(3)
-#define GLOBAL_CFG_RX_DMA_EN_MASK BIT(2)
-#define GLOBAL_CFG_TX_DMA_BUSY_MASK BIT(1)
-#define GLOBAL_CFG_TX_DMA_EN_MASK BIT(0)
-
-#define REG_FWD_DSCP_BASE 0x0010
-#define REG_FWD_BUF_BASE 0x0014
-
-#define REG_HW_FWD_DSCP_CFG 0x0018
-#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK GENMASK(29, 28)
-#define HW_FWD_DSCP_SCATTER_LEN_MASK GENMASK(17, 16)
-#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK GENMASK(15, 0)
-
-#define REG_INT_STATUS(_n) \
- (((_n) == 4) ? 0x0730 : \
- ((_n) == 3) ? 0x0724 : \
- ((_n) == 2) ? 0x0720 : \
- ((_n) == 1) ? 0x0024 : 0x0020)
-
-#define REG_INT_ENABLE(_n) \
- (((_n) == 4) ? 0x0750 : \
- ((_n) == 3) ? 0x0744 : \
- ((_n) == 2) ? 0x0740 : \
- ((_n) == 1) ? 0x002c : 0x0028)
-
-/* QDMA_CSR_INT_ENABLE1 */
-#define RX15_COHERENT_INT_MASK BIT(31)
-#define RX14_COHERENT_INT_MASK BIT(30)
-#define RX13_COHERENT_INT_MASK BIT(29)
-#define RX12_COHERENT_INT_MASK BIT(28)
-#define RX11_COHERENT_INT_MASK BIT(27)
-#define RX10_COHERENT_INT_MASK BIT(26)
-#define RX9_COHERENT_INT_MASK BIT(25)
-#define RX8_COHERENT_INT_MASK BIT(24)
-#define RX7_COHERENT_INT_MASK BIT(23)
-#define RX6_COHERENT_INT_MASK BIT(22)
-#define RX5_COHERENT_INT_MASK BIT(21)
-#define RX4_COHERENT_INT_MASK BIT(20)
-#define RX3_COHERENT_INT_MASK BIT(19)
-#define RX2_COHERENT_INT_MASK BIT(18)
-#define RX1_COHERENT_INT_MASK BIT(17)
-#define RX0_COHERENT_INT_MASK BIT(16)
-#define TX7_COHERENT_INT_MASK BIT(15)
-#define TX6_COHERENT_INT_MASK BIT(14)
-#define TX5_COHERENT_INT_MASK BIT(13)
-#define TX4_COHERENT_INT_MASK BIT(12)
-#define TX3_COHERENT_INT_MASK BIT(11)
-#define TX2_COHERENT_INT_MASK BIT(10)
-#define TX1_COHERENT_INT_MASK BIT(9)
-#define TX0_COHERENT_INT_MASK BIT(8)
-#define CNT_OVER_FLOW_INT_MASK BIT(7)
-#define IRQ1_FULL_INT_MASK BIT(5)
-#define IRQ1_INT_MASK BIT(4)
-#define HWFWD_DSCP_LOW_INT_MASK BIT(3)
-#define HWFWD_DSCP_EMPTY_INT_MASK BIT(2)
-#define IRQ0_FULL_INT_MASK BIT(1)
-#define IRQ0_INT_MASK BIT(0)
-
-#define TX_DONE_INT_MASK(_n) \
- ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \
- : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
-
-#define INT_TX_MASK \
- (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \
- IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
-
-#define INT_IDX0_MASK \
- (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK | \
- TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK | \
- TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK | \
- TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK | \
- RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK | \
- RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK | \
- RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK | \
- RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK | \
- RX15_COHERENT_INT_MASK | INT_TX_MASK)
-
-/* QDMA_CSR_INT_ENABLE2 */
-#define RX15_NO_CPU_DSCP_INT_MASK BIT(31)
-#define RX14_NO_CPU_DSCP_INT_MASK BIT(30)
-#define RX13_NO_CPU_DSCP_INT_MASK BIT(29)
-#define RX12_NO_CPU_DSCP_INT_MASK BIT(28)
-#define RX11_NO_CPU_DSCP_INT_MASK BIT(27)
-#define RX10_NO_CPU_DSCP_INT_MASK BIT(26)
-#define RX9_NO_CPU_DSCP_INT_MASK BIT(25)
-#define RX8_NO_CPU_DSCP_INT_MASK BIT(24)
-#define RX7_NO_CPU_DSCP_INT_MASK BIT(23)
-#define RX6_NO_CPU_DSCP_INT_MASK BIT(22)
-#define RX5_NO_CPU_DSCP_INT_MASK BIT(21)
-#define RX4_NO_CPU_DSCP_INT_MASK BIT(20)
-#define RX3_NO_CPU_DSCP_INT_MASK BIT(19)
-#define RX2_NO_CPU_DSCP_INT_MASK BIT(18)
-#define RX1_NO_CPU_DSCP_INT_MASK BIT(17)
-#define RX0_NO_CPU_DSCP_INT_MASK BIT(16)
-#define RX15_DONE_INT_MASK BIT(15)
-#define RX14_DONE_INT_MASK BIT(14)
-#define RX13_DONE_INT_MASK BIT(13)
-#define RX12_DONE_INT_MASK BIT(12)
-#define RX11_DONE_INT_MASK BIT(11)
-#define RX10_DONE_INT_MASK BIT(10)
-#define RX9_DONE_INT_MASK BIT(9)
-#define RX8_DONE_INT_MASK BIT(8)
-#define RX7_DONE_INT_MASK BIT(7)
-#define RX6_DONE_INT_MASK BIT(6)
-#define RX5_DONE_INT_MASK BIT(5)
-#define RX4_DONE_INT_MASK BIT(4)
-#define RX3_DONE_INT_MASK BIT(3)
-#define RX2_DONE_INT_MASK BIT(2)
-#define RX1_DONE_INT_MASK BIT(1)
-#define RX0_DONE_INT_MASK BIT(0)
-
-#define RX_DONE_INT_MASK \
- (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK | \
- RX2_DONE_INT_MASK | RX3_DONE_INT_MASK | \
- RX4_DONE_INT_MASK | RX7_DONE_INT_MASK | \
- RX8_DONE_INT_MASK | RX9_DONE_INT_MASK | \
- RX15_DONE_INT_MASK)
-#define INT_IDX1_MASK \
- (RX_DONE_INT_MASK | \
- RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK | \
- RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK | \
- RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK | \
- RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK | \
- RX15_NO_CPU_DSCP_INT_MASK)
-
-/* QDMA_CSR_INT_ENABLE5 */
-#define TX31_COHERENT_INT_MASK BIT(31)
-#define TX30_COHERENT_INT_MASK BIT(30)
-#define TX29_COHERENT_INT_MASK BIT(29)
-#define TX28_COHERENT_INT_MASK BIT(28)
-#define TX27_COHERENT_INT_MASK BIT(27)
-#define TX26_COHERENT_INT_MASK BIT(26)
-#define TX25_COHERENT_INT_MASK BIT(25)
-#define TX24_COHERENT_INT_MASK BIT(24)
-#define TX23_COHERENT_INT_MASK BIT(23)
-#define TX22_COHERENT_INT_MASK BIT(22)
-#define TX21_COHERENT_INT_MASK BIT(21)
-#define TX20_COHERENT_INT_MASK BIT(20)
-#define TX19_COHERENT_INT_MASK BIT(19)
-#define TX18_COHERENT_INT_MASK BIT(18)
-#define TX17_COHERENT_INT_MASK BIT(17)
-#define TX16_COHERENT_INT_MASK BIT(16)
-#define TX15_COHERENT_INT_MASK BIT(15)
-#define TX14_COHERENT_INT_MASK BIT(14)
-#define TX13_COHERENT_INT_MASK BIT(13)
-#define TX12_COHERENT_INT_MASK BIT(12)
-#define TX11_COHERENT_INT_MASK BIT(11)
-#define TX10_COHERENT_INT_MASK BIT(10)
-#define TX9_COHERENT_INT_MASK BIT(9)
-#define TX8_COHERENT_INT_MASK BIT(8)
-
-#define INT_IDX4_MASK \
- (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK | \
- TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK | \
- TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK | \
- TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK | \
- TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK | \
- TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK | \
- TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK | \
- TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK | \
- TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK | \
- TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK | \
- TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK | \
- TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK)
-
-#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050)
-
-#define REG_TX_IRQ_CFG(_n) ((_n) ? 0x004c : 0x0054)
-#define TX_IRQ_THR_MASK GENMASK(27, 16)
-#define TX_IRQ_DEPTH_MASK GENMASK(11, 0)
-
-#define REG_IRQ_CLEAR_LEN(_n) ((_n) ? 0x0064 : 0x0058)
-#define IRQ_CLEAR_LEN_MASK GENMASK(7, 0)
-
-#define REG_IRQ_STATUS(_n) ((_n) ? 0x0068 : 0x005c)
-#define IRQ_ENTRY_LEN_MASK GENMASK(27, 16)
-#define IRQ_HEAD_IDX_MASK GENMASK(11, 0)
-
-#define REG_TX_RING_BASE(_n) \
- (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
-
-#define REG_TX_RING_BLOCKING(_n) \
- (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
-
-#define TX_RING_IRQ_BLOCKING_MAP_MASK BIT(6)
-#define TX_RING_IRQ_BLOCKING_CFG_MASK BIT(4)
-#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK BIT(2)
-#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK BIT(1)
-#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK BIT(0)
-
-#define REG_TX_CPU_IDX(_n) \
- (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
-
-#define TX_RING_CPU_IDX_MASK GENMASK(15, 0)
-
-#define REG_TX_DMA_IDX(_n) \
- (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
-
-#define TX_RING_DMA_IDX_MASK GENMASK(15, 0)
-
-#define IRQ_RING_IDX_MASK GENMASK(20, 16)
-#define IRQ_DESC_IDX_MASK GENMASK(15, 0)
-
-#define REG_RX_RING_BASE(_n) \
- (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
-
-#define REG_RX_RING_SIZE(_n) \
- (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
-
-#define RX_RING_THR_MASK GENMASK(31, 16)
-#define RX_RING_SIZE_MASK GENMASK(15, 0)
-
-#define REG_RX_CPU_IDX(_n) \
- (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
-
-#define RX_RING_CPU_IDX_MASK GENMASK(15, 0)
-
-#define REG_RX_DMA_IDX(_n) \
- (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
-
-#define REG_RX_DELAY_INT_IDX(_n) \
- (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
-
-#define RX_DELAY_INT_MASK GENMASK(15, 0)
-
-#define RX_RING_DMA_IDX_MASK GENMASK(15, 0)
-
-#define REG_INGRESS_TRTCM_CFG 0x0070
-#define INGRESS_TRTCM_EN_MASK BIT(31)
-#define INGRESS_TRTCM_MODE_MASK BIT(30)
-#define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
-#define INGRESS_FAST_TICK_MASK GENMASK(15, 0)
-
-#define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0)
-#define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
-
-#define REG_LMGR_INIT_CFG 0x1000
-#define LMGR_INIT_START BIT(31)
-#define LMGR_SRAM_MODE_MASK BIT(30)
-#define HW_FWD_PKTSIZE_OVERHEAD_MASK GENMASK(27, 20)
-#define HW_FWD_DESC_NUM_MASK GENMASK(16, 0)
-
-#define REG_FWD_DSCP_LOW_THR 0x1004
-#define FWD_DSCP_LOW_THR_MASK GENMASK(17, 0)
-
-#define REG_EGRESS_RATE_METER_CFG 0x100c
-#define EGRESS_RATE_METER_EN_MASK BIT(31)
-#define EGRESS_RATE_METER_EQ_RATE_EN_MASK BIT(17)
-#define EGRESS_RATE_METER_WINDOW_SZ_MASK GENMASK(16, 12)
-#define EGRESS_RATE_METER_TIMESLICE_MASK GENMASK(10, 0)
-
-#define REG_EGRESS_TRTCM_CFG 0x1010
-#define EGRESS_TRTCM_EN_MASK BIT(31)
-#define EGRESS_TRTCM_MODE_MASK BIT(30)
-#define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
-#define EGRESS_FAST_TICK_MASK GENMASK(15, 0)
-
-#define REG_TXWRR_MODE_CFG 0x1020
-#define TWRR_WEIGHT_SCALE_MASK BIT(31)
-#define TWRR_WEIGHT_BASE_MASK BIT(3)
-
-#define REG_PSE_BUF_USAGE_CFG 0x1028
-#define PSE_BUF_ESTIMATE_EN_MASK BIT(29)
-
-#define REG_GLB_TRTCM_CFG 0x1080
-#define GLB_TRTCM_EN_MASK BIT(31)
-#define GLB_TRTCM_MODE_MASK BIT(30)
-#define GLB_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
-#define GLB_FAST_TICK_MASK GENMASK(15, 0)
-
-#define REG_TXQ_CNGST_CFG 0x10a0
-#define TXQ_CNGST_DROP_EN BIT(31)
-#define TXQ_CNGST_DEI_DROP_EN BIT(30)
-
-#define REG_SLA_TRTCM_CFG 0x1150
-#define SLA_TRTCM_EN_MASK BIT(31)
-#define SLA_TRTCM_MODE_MASK BIT(30)
-#define SLA_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
-#define SLA_FAST_TICK_MASK GENMASK(15, 0)
-
-/* CTRL */
-#define QDMA_DESC_DONE_MASK BIT(31)
-#define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */
-#define QDMA_DESC_MORE_MASK BIT(29) /* more SG elements */
-#define QDMA_DESC_DEI_MASK BIT(25)
-#define QDMA_DESC_NO_DROP_MASK BIT(24)
-#define QDMA_DESC_LEN_MASK GENMASK(15, 0)
-/* DATA */
-#define QDMA_DESC_NEXT_ID_MASK GENMASK(15, 0)
-/* TX MSG0 */
-#define QDMA_ETH_TXMSG_MIC_IDX_MASK BIT(30)
-#define QDMA_ETH_TXMSG_SP_TAG_MASK GENMASK(29, 14)
-#define QDMA_ETH_TXMSG_ICO_MASK BIT(13)
-#define QDMA_ETH_TXMSG_UCO_MASK BIT(12)
-#define QDMA_ETH_TXMSG_TCO_MASK BIT(11)
-#define QDMA_ETH_TXMSG_TSO_MASK BIT(10)
-#define QDMA_ETH_TXMSG_FAST_MASK BIT(9)
-#define QDMA_ETH_TXMSG_OAM_MASK BIT(8)
-#define QDMA_ETH_TXMSG_CHAN_MASK GENMASK(7, 3)
-#define QDMA_ETH_TXMSG_QUEUE_MASK GENMASK(2, 0)
-/* TX MSG1 */
-#define QDMA_ETH_TXMSG_NO_DROP BIT(31)
-#define QDMA_ETH_TXMSG_METER_MASK GENMASK(30, 24) /* 0x7f no meters */
-#define QDMA_ETH_TXMSG_FPORT_MASK GENMASK(23, 20)
-#define QDMA_ETH_TXMSG_NBOQ_MASK GENMASK(19, 15)
-#define QDMA_ETH_TXMSG_HWF_MASK BIT(14)
-#define QDMA_ETH_TXMSG_HOP_MASK BIT(13)
-#define QDMA_ETH_TXMSG_PTP_MASK BIT(12)
-#define QDMA_ETH_TXMSG_ACNT_G1_MASK GENMASK(10, 6) /* 0x1f do not count */
-#define QDMA_ETH_TXMSG_ACNT_G0_MASK GENMASK(5, 0) /* 0x3f do not count */
-
-/* RX MSG1 */
-#define QDMA_ETH_RXMSG_DEI_MASK BIT(31)
-#define QDMA_ETH_RXMSG_IP6_MASK BIT(30)
-#define QDMA_ETH_RXMSG_IP4_MASK BIT(29)
-#define QDMA_ETH_RXMSG_IP4F_MASK BIT(28)
-#define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(27)
-#define QDMA_ETH_RXMSG_L4F_MASK BIT(26)
-#define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(25, 21)
-#define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(20, 16)
-#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0)
-
-struct airoha_qdma_desc {
- __le32 rsv;
- __le32 ctrl;
- __le32 addr;
- __le32 data;
- __le32 msg0;
- __le32 msg1;
- __le32 msg2;
- __le32 msg3;
-};
-
-/* CTRL0 */
-#define QDMA_FWD_DESC_CTX_MASK BIT(31)
-#define QDMA_FWD_DESC_RING_MASK GENMASK(30, 28)
-#define QDMA_FWD_DESC_IDX_MASK GENMASK(27, 16)
-#define QDMA_FWD_DESC_LEN_MASK GENMASK(15, 0)
-/* CTRL1 */
-#define QDMA_FWD_DESC_FIRST_IDX_MASK GENMASK(15, 0)
-/* CTRL2 */
-#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK GENMASK(2, 0)
-
-struct airoha_qdma_fwd_desc {
- __le32 addr;
- __le32 ctrl0;
- __le32 ctrl1;
- __le32 ctrl2;
- __le32 msg0;
- __le32 msg1;
- __le32 rsv0;
- __le32 rsv1;
-};
-
-enum {
- QDMA_INT_REG_IDX0,
- QDMA_INT_REG_IDX1,
- QDMA_INT_REG_IDX2,
- QDMA_INT_REG_IDX3,
- QDMA_INT_REG_IDX4,
- QDMA_INT_REG_MAX
-};
-
-enum {
- XSI_PCIE0_PORT,
- XSI_PCIE1_PORT,
- XSI_USB_PORT,
- XSI_AE_PORT,
- XSI_ETH_PORT,
-};
-
-enum {
- XSI_PCIE0_VIP_PORT_MASK = BIT(22),
- XSI_PCIE1_VIP_PORT_MASK = BIT(23),
- XSI_USB_VIP_PORT_MASK = BIT(25),
- XSI_ETH_VIP_PORT_MASK = BIT(24),
-};
-
-enum {
- DEV_STATE_INITIALIZED,
-};
-
-enum {
- CDM_CRSN_QSEL_Q1 = 1,
- CDM_CRSN_QSEL_Q5 = 5,
- CDM_CRSN_QSEL_Q6 = 6,
- CDM_CRSN_QSEL_Q15 = 15,
-};
-
-enum {
- CRSN_08 = 0x8,
- CRSN_21 = 0x15, /* KA */
- CRSN_22 = 0x16, /* hit bind and force route to CPU */
- CRSN_24 = 0x18,
- CRSN_25 = 0x19,
-};
-
-enum {
- FE_PSE_PORT_CDM1,
- FE_PSE_PORT_GDM1,
- FE_PSE_PORT_GDM2,
- FE_PSE_PORT_GDM3,
- FE_PSE_PORT_PPE1,
- FE_PSE_PORT_CDM2,
- FE_PSE_PORT_CDM3,
- FE_PSE_PORT_CDM4,
- FE_PSE_PORT_PPE2,
- FE_PSE_PORT_GDM4,
- FE_PSE_PORT_CDM5,
- FE_PSE_PORT_DROP = 0xf,
-};
-
-struct airoha_queue_entry {
- union {
- void *buf;
- struct sk_buff *skb;
- };
- dma_addr_t dma_addr;
- u16 dma_len;
-};
-
-struct airoha_queue {
- struct airoha_qdma *qdma;
-
- /* protect concurrent queue accesses */
- spinlock_t lock;
- struct airoha_queue_entry *entry;
- struct airoha_qdma_desc *desc;
- u16 head;
- u16 tail;
-
- int queued;
- int ndesc;
- int free_thr;
- int buf_size;
-
- struct napi_struct napi;
- struct page_pool *page_pool;
-};
-
-struct airoha_tx_irq_queue {
- struct airoha_qdma *qdma;
-
- struct napi_struct napi;
-
- int size;
- u32 *q;
-};
-
-struct airoha_hw_stats {
- /* protect concurrent hw_stats accesses */
- spinlock_t lock;
- struct u64_stats_sync syncp;
-
- /* get_stats64 */
- u64 rx_ok_pkts;
- u64 tx_ok_pkts;
- u64 rx_ok_bytes;
- u64 tx_ok_bytes;
- u64 rx_multicast;
- u64 rx_errors;
- u64 rx_drops;
- u64 tx_drops;
- u64 rx_crc_error;
- u64 rx_over_errors;
- /* ethtool stats */
- u64 tx_broadcast;
- u64 tx_multicast;
- u64 tx_len[7];
- u64 rx_broadcast;
- u64 rx_fragment;
- u64 rx_jabber;
- u64 rx_len[7];
-};
-
-struct airoha_qdma {
- struct airoha_eth *eth;
- void __iomem *regs;
-
- /* protect concurrent irqmask accesses */
- spinlock_t irq_lock;
- u32 irqmask[QDMA_INT_REG_MAX];
- int irq;
-
- struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
+#include "airoha_regs.h"
+#include "airoha_eth.h"
- struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
- struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
-
- /* descriptor and packet buffers for qdma hw forward */
- struct {
- void *desc;
- void *q;
- } hfwd;
-};
-
-struct airoha_gdm_port {
- struct airoha_qdma *qdma;
- struct net_device *dev;
- int id;
-
- struct airoha_hw_stats stats;
-};
-
-struct airoha_eth {
- struct device *dev;
-
- unsigned long state;
- void __iomem *fe_regs;
-
- struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
- struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
-
- struct net_device *napi_dev;
-
- struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
- struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
-};
-
-static u32 airoha_rr(void __iomem *base, u32 offset)
+u32 airoha_rr(void __iomem *base, u32 offset)
{
return readl(base + offset);
}
-static void airoha_wr(void __iomem *base, u32 offset, u32 val)
+void airoha_wr(void __iomem *base, u32 offset, u32 val)
{
writel(val, base + offset);
}
-static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
+u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
{
val |= (airoha_rr(base, offset) & ~mask);
airoha_wr(base, offset, val);
@@ -845,68 +35,40 @@ static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
return val;
}
-#define airoha_fe_rr(eth, offset) \
- airoha_rr((eth)->fe_regs, (offset))
-#define airoha_fe_wr(eth, offset, val) \
- airoha_wr((eth)->fe_regs, (offset), (val))
-#define airoha_fe_rmw(eth, offset, mask, val) \
- airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
-#define airoha_fe_set(eth, offset, val) \
- airoha_rmw((eth)->fe_regs, (offset), 0, (val))
-#define airoha_fe_clear(eth, offset, val) \
- airoha_rmw((eth)->fe_regs, (offset), (val), 0)
-
-#define airoha_qdma_rr(qdma, offset) \
- airoha_rr((qdma)->regs, (offset))
-#define airoha_qdma_wr(qdma, offset, val) \
- airoha_wr((qdma)->regs, (offset), (val))
-#define airoha_qdma_rmw(qdma, offset, mask, val) \
- airoha_rmw((qdma)->regs, (offset), (mask), (val))
-#define airoha_qdma_set(qdma, offset, val) \
- airoha_rmw((qdma)->regs, (offset), 0, (val))
-#define airoha_qdma_clear(qdma, offset, val) \
- airoha_rmw((qdma)->regs, (offset), (val), 0)
-
-static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index,
- u32 clear, u32 set)
+static void airoha_qdma_set_irqmask(struct airoha_irq_bank *irq_bank,
+ int index, u32 clear, u32 set)
{
+ struct airoha_qdma *qdma = irq_bank->qdma;
+ int bank = irq_bank - &qdma->irq_banks[0];
unsigned long flags;
- if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask)))
+ if (WARN_ON_ONCE(index >= ARRAY_SIZE(irq_bank->irqmask)))
return;
- spin_lock_irqsave(&qdma->irq_lock, flags);
+ spin_lock_irqsave(&irq_bank->irq_lock, flags);
- qdma->irqmask[index] &= ~clear;
- qdma->irqmask[index] |= set;
- airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]);
+ irq_bank->irqmask[index] &= ~clear;
+ irq_bank->irqmask[index] |= set;
+ airoha_qdma_wr(qdma, REG_INT_ENABLE(bank, index),
+ irq_bank->irqmask[index]);
/* Read irq_enable register in order to guarantee the update above
* completes in the spinlock critical section.
*/
- airoha_qdma_rr(qdma, REG_INT_ENABLE(index));
+ airoha_qdma_rr(qdma, REG_INT_ENABLE(bank, index));
- spin_unlock_irqrestore(&qdma->irq_lock, flags);
+ spin_unlock_irqrestore(&irq_bank->irq_lock, flags);
}
-static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index,
- u32 mask)
+static void airoha_qdma_irq_enable(struct airoha_irq_bank *irq_bank,
+ int index, u32 mask)
{
- airoha_qdma_set_irqmask(qdma, index, 0, mask);
+ airoha_qdma_set_irqmask(irq_bank, index, 0, mask);
}
-static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index,
- u32 mask)
+static void airoha_qdma_irq_disable(struct airoha_irq_bank *irq_bank,
+ int index, u32 mask)
{
- airoha_qdma_set_irqmask(qdma, index, mask, 0);
-}
-
-static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
-{
- /* GDM1 port on EN7581 SoC is connected to the lan dsa switch.
- * GDM{2,3,4} can be used as wan port connected to an external
- * phy module.
- */
- return port->id == 1;
+ airoha_qdma_set_irqmask(irq_bank, index, mask, 0);
}
static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
@@ -922,6 +84,8 @@ static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val);
airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val);
+
+ airoha_ppe_init_upd_mem(port);
}
static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
@@ -937,30 +101,23 @@ static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
FIELD_PREP(GDM_UCFQ_MASK, val));
}
-static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable)
+static int airoha_set_vip_for_gdm_port(struct airoha_gdm_port *port,
+ bool enable)
{
- u32 val = enable ? FE_PSE_PORT_PPE1 : FE_PSE_PORT_DROP;
- u32 vip_port, cfg_addr;
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 vip_port;
- switch (port) {
- case XSI_PCIE0_PORT:
+ switch (port->id) {
+ case 3:
+ /* FIXME: handle XSI_PCIE1_PORT */
vip_port = XSI_PCIE0_VIP_PORT_MASK;
- cfg_addr = REG_GDM_FWD_CFG(3);
- break;
- case XSI_PCIE1_PORT:
- vip_port = XSI_PCIE1_VIP_PORT_MASK;
- cfg_addr = REG_GDM_FWD_CFG(3);
- break;
- case XSI_USB_PORT:
- vip_port = XSI_USB_VIP_PORT_MASK;
- cfg_addr = REG_GDM_FWD_CFG(4);
break;
- case XSI_ETH_PORT:
+ case 4:
+ /* FIXME: handle XSI_USB_PORT */
vip_port = XSI_ETH_VIP_PORT_MASK;
- cfg_addr = REG_GDM_FWD_CFG(4);
break;
default:
- return -EINVAL;
+ return 0;
}
if (enable) {
@@ -971,54 +128,20 @@ static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable)
airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port);
}
- airoha_set_gdm_port_fwd_cfg(eth, cfg_addr, val);
-
return 0;
}
-static int airoha_set_gdm_ports(struct airoha_eth *eth, bool enable)
-{
- const int port_list[] = {
- XSI_PCIE0_PORT,
- XSI_PCIE1_PORT,
- XSI_USB_PORT,
- XSI_ETH_PORT
- };
- int i, err;
-
- for (i = 0; i < ARRAY_SIZE(port_list); i++) {
- err = airoha_set_gdm_port(eth, port_list[i], enable);
- if (err)
- goto error;
- }
-
- return 0;
-
-error:
- for (i--; i >= 0; i--)
- airoha_set_gdm_port(eth, port_list[i], false);
-
- return err;
-}
-
static void airoha_fe_maccr_init(struct airoha_eth *eth)
{
int p;
- for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) {
+ for (p = 1; p <= ARRAY_SIZE(eth->ports); p++)
airoha_fe_set(eth, REG_GDM_FWD_CFG(p),
- GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM |
- GDM_DROP_CRC_ERR);
- airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(p),
- FE_PSE_PORT_CDM1);
- airoha_fe_rmw(eth, REG_GDM_LEN_CFG(p),
- GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
- FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
- FIELD_PREP(GDM_LONG_LEN_MASK, 4004));
- }
+ GDM_TCP_CKSUM_MASK | GDM_UDP_CKSUM_MASK |
+ GDM_IP4_CKSUM_MASK | GDM_DROP_CRC_ERR_MASK);
- airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK,
- FIELD_PREP(CDM1_VLAN_MASK, 0x8100));
+ airoha_fe_rmw(eth, REG_CDM_VLAN_CTRL(1), CDM_VLAN_MASK,
+ FIELD_PREP(CDM_VLAN_MASK, 0x8100));
airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PAD);
}
@@ -1174,8 +297,11 @@ static void airoha_fe_pse_ports_init(struct airoha_eth *eth)
int q;
all_rsv = airoha_fe_get_pse_all_rsv(eth);
- /* hw misses PPE2 oq rsv */
- all_rsv += PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2];
+ if (airoha_ppe_is_enabled(eth, 1)) {
+ /* hw misses PPE2 oq rsv */
+ all_rsv += PSE_RSV_PAGES *
+ pse_port_num_queues[FE_PSE_PORT_PPE2];
+ }
airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv);
/* CMD1 */
@@ -1212,13 +338,17 @@ static void airoha_fe_pse_ports_init(struct airoha_eth *eth)
for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++)
airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q,
PSE_QUEUE_RSV_PAGES);
- /* PPE2 */
- for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) {
- if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2)
- airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q,
- PSE_QUEUE_RSV_PAGES);
- else
- airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, 0);
+ if (airoha_ppe_is_enabled(eth, 1)) {
+ /* PPE2 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) {
+ if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2,
+ q,
+ PSE_QUEUE_RSV_PAGES);
+ else
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2,
+ q, 0);
+ }
}
/* GMD4 */
for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++)
@@ -1273,46 +403,46 @@ static int airoha_fe_mc_vlan_clear(struct airoha_eth *eth)
static void airoha_fe_crsn_qsel_init(struct airoha_eth *eth)
{
/* CDM1_CRSN_QSEL */
- airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_22 >> 2),
- CDM1_CRSN_QSEL_REASON_MASK(CRSN_22),
- FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_22),
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_22 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_22),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_22),
CDM_CRSN_QSEL_Q1));
- airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_08 >> 2),
- CDM1_CRSN_QSEL_REASON_MASK(CRSN_08),
- FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_08),
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_08 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_08),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_08),
CDM_CRSN_QSEL_Q1));
- airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_21 >> 2),
- CDM1_CRSN_QSEL_REASON_MASK(CRSN_21),
- FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_21),
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_21 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_21),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_21),
CDM_CRSN_QSEL_Q1));
- airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_24 >> 2),
- CDM1_CRSN_QSEL_REASON_MASK(CRSN_24),
- FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_24),
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_24 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_24),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_24),
CDM_CRSN_QSEL_Q6));
- airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_25 >> 2),
- CDM1_CRSN_QSEL_REASON_MASK(CRSN_25),
- FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_25),
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_25 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_25),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_25),
CDM_CRSN_QSEL_Q1));
/* CDM2_CRSN_QSEL */
- airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_08 >> 2),
- CDM2_CRSN_QSEL_REASON_MASK(CRSN_08),
- FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_08),
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_08 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_08),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_08),
CDM_CRSN_QSEL_Q1));
- airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_21 >> 2),
- CDM2_CRSN_QSEL_REASON_MASK(CRSN_21),
- FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_21),
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_21 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_21),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_21),
CDM_CRSN_QSEL_Q1));
- airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_22 >> 2),
- CDM2_CRSN_QSEL_REASON_MASK(CRSN_22),
- FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_22),
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_22 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_22),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_22),
CDM_CRSN_QSEL_Q1));
- airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_24 >> 2),
- CDM2_CRSN_QSEL_REASON_MASK(CRSN_24),
- FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_24),
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_24 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_24),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_24),
CDM_CRSN_QSEL_Q6));
- airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_25 >> 2),
- CDM2_CRSN_QSEL_REASON_MASK(CRSN_25),
- FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_25),
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_25 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_25),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_25),
CDM_CRSN_QSEL_Q1));
}
@@ -1332,18 +462,18 @@ static int airoha_fe_init(struct airoha_eth *eth)
airoha_fe_wr(eth, REG_FE_PCE_CFG,
PCE_DPI_EN_MASK | PCE_KA_EN_MASK | PCE_MC_EN_MASK);
/* set vip queue selection to ring 1 */
- airoha_fe_rmw(eth, REG_CDM1_FWD_CFG, CDM1_VIP_QSEL_MASK,
- FIELD_PREP(CDM1_VIP_QSEL_MASK, 0x4));
- airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_VIP_QSEL_MASK,
- FIELD_PREP(CDM2_VIP_QSEL_MASK, 0x4));
+ airoha_fe_rmw(eth, REG_CDM_FWD_CFG(1), CDM_VIP_QSEL_MASK,
+ FIELD_PREP(CDM_VIP_QSEL_MASK, 0x4));
+ airoha_fe_rmw(eth, REG_CDM_FWD_CFG(2), CDM_VIP_QSEL_MASK,
+ FIELD_PREP(CDM_VIP_QSEL_MASK, 0x4));
/* set GDM4 source interface offset to 8 */
- airoha_fe_rmw(eth, REG_GDM4_SRC_PORT_SET,
- GDM4_SPORT_OFF2_MASK |
- GDM4_SPORT_OFF1_MASK |
- GDM4_SPORT_OFF0_MASK,
- FIELD_PREP(GDM4_SPORT_OFF2_MASK, 8) |
- FIELD_PREP(GDM4_SPORT_OFF1_MASK, 8) |
- FIELD_PREP(GDM4_SPORT_OFF0_MASK, 8));
+ airoha_fe_rmw(eth, REG_GDM_SRC_PORT_SET(4),
+ GDM_SPORT_OFF2_MASK |
+ GDM_SPORT_OFF1_MASK |
+ GDM_SPORT_OFF0_MASK,
+ FIELD_PREP(GDM_SPORT_OFF2_MASK, 8) |
+ FIELD_PREP(GDM_SPORT_OFF1_MASK, 8) |
+ FIELD_PREP(GDM_SPORT_OFF0_MASK, 8));
/* set PSE Page as 128B */
airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG,
@@ -1369,8 +499,8 @@ static int airoha_fe_init(struct airoha_eth *eth)
airoha_fe_set(eth, REG_GDM_MISC_CFG,
GDM2_RDM_ACK_WAIT_PREF_MASK |
GDM2_CHN_VLD_MODE_MASK);
- airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_OAM_QSEL_MASK,
- FIELD_PREP(CDM2_OAM_QSEL_MASK, 15));
+ airoha_fe_rmw(eth, REG_CDM_FWD_CFG(2), CDM_OAM_QSEL_MASK,
+ FIELD_PREP(CDM_OAM_QSEL_MASK, 15));
/* init fragment and assemble Force Port */
/* NPU Core-3, NPU Bridge Channel-3 */
@@ -1384,8 +514,8 @@ static int airoha_fe_init(struct airoha_eth *eth)
FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) |
FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22));
- airoha_fe_set(eth, REG_GDM3_FWD_CFG, GDM3_PAD_EN_MASK);
- airoha_fe_set(eth, REG_GDM4_FWD_CFG, GDM4_PAD_EN_MASK);
+ airoha_fe_set(eth, REG_GDM_FWD_CFG(3), GDM_PAD_EN_MASK);
+ airoha_fe_set(eth, REG_GDM_FWD_CFG(4), GDM_PAD_EN_MASK);
airoha_fe_crsn_qsel_init(eth);
@@ -1393,7 +523,7 @@ static int airoha_fe_init(struct airoha_eth *eth)
airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK);
/* default aging mode for mbi unlock issue */
- airoha_fe_rmw(eth, REG_GDM2_CHN_RLS,
+ airoha_fe_rmw(eth, REG_GDM_CHN_RLS(2),
MBI_RX_AGE_SEL_MASK | MBI_TX_AGE_SEL_MASK,
FIELD_PREP(MBI_RX_AGE_SEL_MASK, 3) |
FIELD_PREP(MBI_TX_AGE_SEL_MASK, 3));
@@ -1409,9 +539,7 @@ static int airoha_fe_init(struct airoha_eth *eth)
static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
{
- enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
struct airoha_qdma *qdma = q->qdma;
- struct airoha_eth *eth = qdma->eth;
int qid = q - &qdma->q_rx[0];
int nframes = 0;
@@ -1435,9 +563,6 @@ static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
e->dma_addr = page_pool_get_dma_addr(page) + offset;
e->dma_len = SKB_WITH_OVERHEAD(q->buf_size);
- dma_sync_single_for_device(eth->dev, e->dma_addr, e->dma_len,
- dir);
-
val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len);
WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr));
@@ -1463,7 +588,7 @@ static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1);
switch (sport) {
- case 0x10 ... 0x13:
+ case 0x10 ... 0x14:
port = 0;
break;
case 0x2 ... 0x4:
@@ -1487,53 +612,94 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
while (done < budget) {
struct airoha_queue_entry *e = &q->entry[q->tail];
struct airoha_qdma_desc *desc = &q->desc[q->tail];
- dma_addr_t dma_addr = le32_to_cpu(desc->addr);
+ u32 hash, reason, msg1 = le32_to_cpu(desc->msg1);
+ struct page *page = virt_to_head_page(e->buf);
u32 desc_ctrl = le32_to_cpu(desc->ctrl);
- struct sk_buff *skb;
- int len, p;
+ struct airoha_gdm_port *port;
+ int data_len, len, p;
if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
break;
- if (!dma_addr)
- break;
-
- len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
- if (!len)
- break;
-
q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
- dma_sync_single_for_cpu(eth->dev, dma_addr,
+ dma_sync_single_for_cpu(eth->dev, e->dma_addr,
SKB_WITH_OVERHEAD(q->buf_size), dir);
+ len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
+ data_len = q->skb ? q->buf_size
+ : SKB_WITH_OVERHEAD(q->buf_size);
+ if (!len || data_len < len)
+ goto free_frag;
+
p = airoha_qdma_get_gdm_port(eth, desc);
- if (p < 0 || !eth->ports[p]) {
- page_pool_put_full_page(q->page_pool,
- virt_to_head_page(e->buf),
- true);
- continue;
+ if (p < 0 || !eth->ports[p])
+ goto free_frag;
+
+ port = eth->ports[p];
+ if (!q->skb) { /* first buffer */
+ q->skb = napi_build_skb(e->buf, q->buf_size);
+ if (!q->skb)
+ goto free_frag;
+
+ __skb_put(q->skb, len);
+ skb_mark_for_recycle(q->skb);
+ q->skb->dev = port->dev;
+ q->skb->protocol = eth_type_trans(q->skb, port->dev);
+ q->skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_record_rx_queue(q->skb, qid);
+ } else { /* scattered frame */
+ struct skb_shared_info *shinfo = skb_shinfo(q->skb);
+ int nr_frags = shinfo->nr_frags;
+
+ if (nr_frags >= ARRAY_SIZE(shinfo->frags))
+ goto free_frag;
+
+ skb_add_rx_frag(q->skb, nr_frags, page,
+ e->buf - page_address(page), len,
+ q->buf_size);
}
- skb = napi_build_skb(e->buf, q->buf_size);
- if (!skb) {
- page_pool_put_full_page(q->page_pool,
- virt_to_head_page(e->buf),
- true);
- break;
+ if (FIELD_GET(QDMA_DESC_MORE_MASK, desc_ctrl))
+ continue;
+
+ if (netdev_uses_dsa(port->dev)) {
+ /* PPE module requires untagged packets to work
+ * properly and it provides DSA port index via the
+ * DMA descriptor. Report DSA tag to the DSA stack
+ * via skb dst info.
+ */
+ u32 sptag = FIELD_GET(QDMA_ETH_RXMSG_SPTAG,
+ le32_to_cpu(desc->msg0));
+
+ if (sptag < ARRAY_SIZE(port->dsa_meta) &&
+ port->dsa_meta[sptag])
+ skb_dst_set_noref(q->skb,
+ &port->dsa_meta[sptag]->dst);
}
- skb_reserve(skb, 2);
- __skb_put(skb, len);
- skb_mark_for_recycle(skb);
- skb->dev = eth->ports[p]->dev;
- skb->protocol = eth_type_trans(skb, skb->dev);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb_record_rx_queue(skb, qid);
- napi_gro_receive(&q->napi, skb);
+ hash = FIELD_GET(AIROHA_RXD4_FOE_ENTRY, msg1);
+ if (hash != AIROHA_RXD4_FOE_ENTRY)
+ skb_set_hash(q->skb, jhash_1word(hash, 0),
+ PKT_HASH_TYPE_L4);
+
+ reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1);
+ if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
+ airoha_ppe_check_skb(&eth->ppe->dev, q->skb, hash,
+ false);
done++;
+ napi_gro_receive(&q->napi, q->skb);
+ q->skb = NULL;
+ continue;
+free_frag:
+ if (q->skb) {
+ dev_kfree_skb(q->skb);
+ q->skb = NULL;
+ } else {
+ page_pool_put_full_page(q->page_pool, page, true);
+ }
}
airoha_qdma_fill_rx_queue(q);
@@ -1550,9 +716,20 @@ static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
done += cur;
} while (cur && done < budget);
- if (done < budget && napi_complete(napi))
- airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1,
- RX_DONE_INT_MASK);
+ if (done < budget && napi_complete(napi)) {
+ struct airoha_qdma *qdma = q->qdma;
+ int i, qid = q - &qdma->q_rx[0];
+ int intr_reg = qid < RX_DONE_HIGH_OFFSET ? QDMA_INT_REG_IDX1
+ : QDMA_INT_REG_IDX2;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
+ if (!(BIT(qid) & RX_IRQ_BANK_PIN_MASK(i)))
+ continue;
+
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], intr_reg,
+ BIT(qid % RX_DONE_HIGH_OFFSET));
+ }
+ }
return done;
}
@@ -1608,6 +785,7 @@ static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
FIELD_PREP(RX_RING_THR_MASK, thr));
airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
+ airoha_qdma_set(qdma, REG_RX_SCATTER_CFG(qid), RX_RING_SG_EN_MASK);
airoha_qdma_fill_rx_queue(q);
@@ -1714,19 +892,13 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
DMA_TO_DEVICE);
- memset(e, 0, sizeof(*e));
+ e->dma_addr = 0;
+ list_add_tail(&e->list, &q->tx_list);
+
WRITE_ONCE(desc->msg0, 0);
WRITE_ONCE(desc->msg1, 0);
q->queued--;
- /* completion ring can report out-of-order indexes if hw QoS
- * is enabled and packets with different priority are queued
- * to same DMA ring. Take into account possible out-of-order
- * reports incrementing DMA ring tail pointer
- */
- while (q->tail != q->head && !q->entry[q->tail].dma_addr)
- q->tail = (q->tail + 1) % q->ndesc;
-
if (skb) {
u16 queue = skb_get_queue_mapping(skb);
struct netdev_queue *txq;
@@ -1754,7 +926,7 @@ unlock:
}
if (done < budget && napi_complete(napi))
- airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0,
+ airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
TX_DONE_INT_MASK(id));
return done;
@@ -1771,6 +943,7 @@ static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
q->ndesc = size;
q->qdma = qdma;
q->free_thr = 1 + MAX_SKB_FRAGS;
+ INIT_LIST_HEAD(&q->tx_list);
q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
GFP_KERNEL);
@@ -1783,17 +956,21 @@ static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
return -ENOMEM;
for (i = 0; i < q->ndesc; i++) {
- u32 val;
+ u32 val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1);
- val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1);
+ list_add_tail(&q->entry[i].list, &q->tx_list);
WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
}
+ /* xmit ring drop default setting */
+ airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(qid),
+ TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK);
+
airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
- FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
+ FIELD_PREP(TX_RING_CPU_IDX_MASK, 0));
airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
- FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head));
+ FIELD_PREP(TX_RING_DMA_IDX_MASK, 0));
return 0;
}
@@ -1849,17 +1026,21 @@ static int airoha_qdma_init_tx(struct airoha_qdma *qdma)
static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
{
struct airoha_eth *eth = q->qdma->eth;
+ int i;
spin_lock_bh(&q->lock);
- while (q->queued) {
- struct airoha_queue_entry *e = &q->entry[q->tail];
+ for (i = 0; i < q->ndesc; i++) {
+ struct airoha_queue_entry *e = &q->entry[i];
+
+ if (!e->dma_addr)
+ continue;
dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
DMA_TO_DEVICE);
dev_kfree_skb_any(e->skb);
+ e->dma_addr = 0;
e->skb = NULL;
-
- q->tail = (q->tail + 1) % q->ndesc;
+ list_add_tail(&e->list, &q->tx_list);
q->queued--;
}
spin_unlock_bh(&q->lock);
@@ -1867,37 +1048,64 @@ static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
{
+ int size, index, num_desc = HW_DSCP_NUM;
struct airoha_eth *eth = qdma->eth;
+ int id = qdma - &eth->qdma[0];
+ u32 status, buf_size;
dma_addr_t dma_addr;
- u32 status;
- int size;
+ const char *name;
- size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
- qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
- GFP_KERNEL);
- if (!qdma->hfwd.desc)
+ name = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d-buf", id);
+ if (!name)
return -ENOMEM;
- airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
+ buf_size = id ? AIROHA_MAX_PACKET_SIZE / 2 : AIROHA_MAX_PACKET_SIZE;
+ index = of_property_match_string(eth->dev->of_node,
+ "memory-region-names", name);
+ if (index >= 0) {
+ struct reserved_mem *rmem;
+ struct device_node *np;
- size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
- qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
- GFP_KERNEL);
- if (!qdma->hfwd.q)
- return -ENOMEM;
+ /* Consume reserved memory for hw forwarding buffers queue if
+ * available in the DTS
+ */
+ np = of_parse_phandle(eth->dev->of_node, "memory-region",
+ index);
+ if (!np)
+ return -ENODEV;
+
+ rmem = of_reserved_mem_lookup(np);
+ of_node_put(np);
+ dma_addr = rmem->base;
+ /* Compute the number of hw descriptors according to the
+ * reserved memory size and the payload buffer size
+ */
+ num_desc = div_u64(rmem->size, buf_size);
+ } else {
+ size = buf_size * num_desc;
+ if (!dmam_alloc_coherent(eth->dev, size, &dma_addr,
+ GFP_KERNEL))
+ return -ENOMEM;
+ }
airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
+ size = num_desc * sizeof(struct airoha_qdma_fwd_desc);
+ if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL))
+ return -ENOMEM;
+
+ airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
+ /* QDMA0: 2KB. QDMA1: 1KB */
airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
- FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0));
+ FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, !!id));
airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
HW_FWD_DESC_NUM_MASK,
- FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
- LMGR_INIT_START);
+ FIELD_PREP(HW_FWD_DESC_NUM_MASK, num_desc) |
+ LMGR_INIT_START | LMGR_SRAM_MODE_MASK);
return read_poll_timeout(airoha_qdma_rr, status,
!(status & LMGR_INIT_START), USEC_PER_MSEC,
@@ -1955,18 +1163,49 @@ static void airoha_qdma_init_qos(struct airoha_qdma *qdma)
FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
}
+static void airoha_qdma_init_qos_stats(struct airoha_qdma *qdma)
+{
+ int i;
+
+ for (i = 0; i < AIROHA_NUM_QOS_CHANNELS; i++) {
+ /* Tx-cpu transferred count */
+ airoha_qdma_wr(qdma, REG_CNTR_VAL(i << 1), 0);
+ airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
+ CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
+ CNTR_ALL_DSCP_RING_EN_MASK |
+ FIELD_PREP(CNTR_CHAN_MASK, i));
+ /* Tx-fwd transferred count */
+ airoha_qdma_wr(qdma, REG_CNTR_VAL((i << 1) + 1), 0);
+ airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
+ CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
+ CNTR_ALL_DSCP_RING_EN_MASK |
+ FIELD_PREP(CNTR_SRC_MASK, 1) |
+ FIELD_PREP(CNTR_CHAN_MASK, i));
+ }
+}
+
static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
{
int i;
- /* clear pending irqs */
- for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++)
+ for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
+ /* clear pending irqs */
airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
-
- /* setup irqs */
- airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
- airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
- airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
+ /* setup rx irqs */
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX0,
+ INT_RX0_MASK(RX_IRQ_BANK_PIN_MASK(i)));
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX1,
+ INT_RX1_MASK(RX_IRQ_BANK_PIN_MASK(i)));
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX2,
+ INT_RX2_MASK(RX_IRQ_BANK_PIN_MASK(i)));
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX3,
+ INT_RX3_MASK(RX_IRQ_BANK_PIN_MASK(i)));
+ }
+ /* setup tx irqs */
+ airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
+ TX_COHERENT_LOW_INT_MASK | INT_TX_MASK);
+ airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX4,
+ TX_COHERENT_HIGH_INT_MASK);
/* setup irq binding */
for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
@@ -1982,7 +1221,6 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
}
airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
- GLOBAL_CFG_RX_2B_OFFSET_MASK |
FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
GLOBAL_CFG_CPU_TXR_RR_MASK |
GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK |
@@ -2005,36 +1243,46 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG,
TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
+ airoha_qdma_init_qos_stats(qdma);
return 0;
}
static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
{
- struct airoha_qdma *qdma = dev_instance;
- u32 intr[ARRAY_SIZE(qdma->irqmask)];
+ struct airoha_irq_bank *irq_bank = dev_instance;
+ struct airoha_qdma *qdma = irq_bank->qdma;
+ u32 rx_intr_mask = 0, rx_intr1, rx_intr2;
+ u32 intr[ARRAY_SIZE(irq_bank->irqmask)];
int i;
- for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) {
+ for (i = 0; i < ARRAY_SIZE(intr); i++) {
intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
- intr[i] &= qdma->irqmask[i];
+ intr[i] &= irq_bank->irqmask[i];
airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
}
if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state))
return IRQ_NONE;
- if (intr[1] & RX_DONE_INT_MASK) {
- airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX1,
- RX_DONE_INT_MASK);
+ rx_intr1 = intr[1] & RX_DONE_LOW_INT_MASK;
+ if (rx_intr1) {
+ airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX1, rx_intr1);
+ rx_intr_mask |= rx_intr1;
+ }
- for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
- if (!qdma->q_rx[i].ndesc)
- continue;
+ rx_intr2 = intr[2] & RX_DONE_HIGH_INT_MASK;
+ if (rx_intr2) {
+ airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX2, rx_intr2);
+ rx_intr_mask |= (rx_intr2 << 16);
+ }
- if (intr[1] & BIT(i))
- napi_schedule(&qdma->q_rx[i].napi);
- }
+ for (i = 0; rx_intr_mask && i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+ if (rx_intr_mask & BIT(i))
+ napi_schedule(&qdma->q_rx[i].napi);
}
if (intr[0] & INT_TX_MASK) {
@@ -2042,7 +1290,7 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
if (!(intr[0] & TX_DONE_INT_MASK(i)))
continue;
- airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0,
+ airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX0,
TX_DONE_INT_MASK(i));
napi_schedule(&qdma->q_tx_irq[i].napi);
}
@@ -2051,6 +1299,39 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
return IRQ_HANDLED;
}
+static int airoha_qdma_init_irq_banks(struct platform_device *pdev,
+ struct airoha_qdma *qdma)
+{
+ struct airoha_eth *eth = qdma->eth;
+ int i, id = qdma - &eth->qdma[0];
+
+ for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
+ struct airoha_irq_bank *irq_bank = &qdma->irq_banks[i];
+ int err, irq_index = 4 * id + i;
+ const char *name;
+
+ spin_lock_init(&irq_bank->irq_lock);
+ irq_bank->qdma = qdma;
+
+ irq_bank->irq = platform_get_irq(pdev, irq_index);
+ if (irq_bank->irq < 0)
+ return irq_bank->irq;
+
+ name = devm_kasprintf(eth->dev, GFP_KERNEL,
+ KBUILD_MODNAME ".%d", irq_index);
+ if (!name)
+ return -ENOMEM;
+
+ err = devm_request_irq(eth->dev, irq_bank->irq,
+ airoha_irq_handler, IRQF_SHARED, name,
+ irq_bank);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int airoha_qdma_init(struct platform_device *pdev,
struct airoha_eth *eth,
struct airoha_qdma *qdma)
@@ -2058,9 +1339,7 @@ static int airoha_qdma_init(struct platform_device *pdev,
int err, id = qdma - &eth->qdma[0];
const char *res;
- spin_lock_init(&qdma->irq_lock);
qdma->eth = eth;
-
res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id);
if (!res)
return -ENOMEM;
@@ -2070,12 +1349,7 @@ static int airoha_qdma_init(struct platform_device *pdev,
return dev_err_probe(eth->dev, PTR_ERR(qdma->regs),
"failed to iomap qdma%d regs\n", id);
- qdma->irq = platform_get_irq(pdev, 4 * id);
- if (qdma->irq < 0)
- return qdma->irq;
-
- err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler,
- IRQF_SHARED, KBUILD_MODNAME, qdma);
+ err = airoha_qdma_init_irq_banks(pdev, qdma);
if (err)
return err;
@@ -2100,8 +1374,7 @@ static int airoha_hw_init(struct platform_device *pdev,
int err, i;
/* disable xsi */
- err = reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts),
- eth->xsi_rsts);
+ err = reset_control_bulk_assert(eth->soc->num_xsi_rsts, eth->xsi_rsts);
if (err)
return err;
@@ -2125,6 +1398,10 @@ static int airoha_hw_init(struct platform_device *pdev,
return err;
}
+ err = airoha_ppe_init(eth);
+ if (err)
+ return err;
+
set_bit(DEV_STATE_INITIALIZED, &eth->state);
return 0;
@@ -2138,17 +1415,14 @@ static void airoha_hw_cleanup(struct airoha_qdma *qdma)
if (!qdma->q_rx[i].ndesc)
continue;
- napi_disable(&qdma->q_rx[i].napi);
netif_napi_del(&qdma->q_rx[i].napi);
airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]);
if (qdma->q_rx[i].page_pool)
page_pool_destroy(qdma->q_rx[i].page_pool);
}
- for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
- napi_disable(&qdma->q_tx_irq[i].napi);
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
netif_napi_del(&qdma->q_tx_irq[i].napi);
- }
for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
if (!qdma->q_tx[i].ndesc)
@@ -2173,6 +1447,21 @@ static void airoha_qdma_start_napi(struct airoha_qdma *qdma)
}
}
+static void airoha_qdma_stop_napi(struct airoha_qdma *qdma)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
+ napi_disable(&qdma->q_tx_irq[i].napi);
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+ napi_disable(&qdma->q_rx[i].napi);
+ }
+}
+
static void airoha_update_hw_stats(struct airoha_gdm_port *port)
{
struct airoha_eth *eth = port->qdma->eth;
@@ -2319,12 +1608,12 @@ static void airoha_update_hw_stats(struct airoha_gdm_port *port)
static int airoha_dev_open(struct net_device *dev)
{
+ int err, len = ETH_HLEN + dev->mtu + ETH_FCS_LEN;
struct airoha_gdm_port *port = netdev_priv(dev);
struct airoha_qdma *qdma = port->qdma;
- int err;
netif_tx_start_all_queues(dev);
- err = airoha_set_gdm_ports(qdma->eth, true);
+ err = airoha_set_vip_for_gdm_port(port, true);
if (err)
return err;
@@ -2335,9 +1624,15 @@ static int airoha_dev_open(struct net_device *dev)
airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
GDM_STAG_EN_MASK);
+ airoha_fe_rmw(qdma->eth, REG_GDM_LEN_CFG(port->id),
+ GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
+ FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
+ FIELD_PREP(GDM_LONG_LEN_MASK, len));
+
airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
GLOBAL_CFG_TX_DMA_EN_MASK |
GLOBAL_CFG_RX_DMA_EN_MASK);
+ atomic_inc(&qdma->users);
return 0;
}
@@ -2349,20 +1644,24 @@ static int airoha_dev_stop(struct net_device *dev)
int i, err;
netif_tx_disable(dev);
- err = airoha_set_gdm_ports(qdma->eth, false);
+ err = airoha_set_vip_for_gdm_port(port, false);
if (err)
return err;
- airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
- GLOBAL_CFG_TX_DMA_EN_MASK |
- GLOBAL_CFG_RX_DMA_EN_MASK);
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++)
+ netdev_tx_reset_subqueue(dev, i);
- for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
- if (!qdma->q_tx[i].ndesc)
- continue;
+ if (atomic_dec_and_test(&qdma->users)) {
+ airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
+ GLOBAL_CFG_TX_DMA_EN_MASK |
+ GLOBAL_CFG_RX_DMA_EN_MASK);
- airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
- netdev_tx_reset_subqueue(dev, i);
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+ if (!qdma->q_tx[i].ndesc)
+ continue;
+
+ airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
+ }
}
return 0;
@@ -2382,12 +1681,106 @@ static int airoha_dev_set_macaddr(struct net_device *dev, void *p)
return 0;
}
+static int airhoha_set_gdm2_loopback(struct airoha_gdm_port *port)
+{
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 val, pse_port, chan, nbq;
+ int src_port;
+
+ /* Forward the traffic to the proper GDM port */
+ pse_port = port->id == AIROHA_GDM3_IDX ? FE_PSE_PORT_GDM3
+ : FE_PSE_PORT_GDM4;
+ airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(2), pse_port);
+ airoha_fe_clear(eth, REG_GDM_FWD_CFG(2), GDM_STRIP_CRC_MASK);
+
+ /* Enable GDM2 loopback */
+ airoha_fe_wr(eth, REG_GDM_TXCHN_EN(2), 0xffffffff);
+ airoha_fe_wr(eth, REG_GDM_RXCHN_EN(2), 0xffff);
+
+ chan = port->id == AIROHA_GDM3_IDX ? airoha_is_7581(eth) ? 4 : 3 : 0;
+ airoha_fe_rmw(eth, REG_GDM_LPBK_CFG(2),
+ LPBK_CHAN_MASK | LPBK_MODE_MASK | LPBK_EN_MASK,
+ FIELD_PREP(LPBK_CHAN_MASK, chan) |
+ LBK_GAP_MODE_MASK | LBK_LEN_MODE_MASK |
+ LBK_CHAN_MODE_MASK | LPBK_EN_MASK);
+ airoha_fe_rmw(eth, REG_GDM_LEN_CFG(2),
+ GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
+ FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
+ FIELD_PREP(GDM_LONG_LEN_MASK, AIROHA_MAX_MTU));
+
+ /* Disable VIP and IFC for GDM2 */
+ airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, BIT(2));
+ airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, BIT(2));
+
+ /* XXX: handle XSI_USB_PORT and XSI_PCE1_PORT */
+ nbq = port->id == AIROHA_GDM3_IDX && airoha_is_7581(eth) ? 4 : 0;
+ src_port = eth->soc->ops.get_src_port_id(port, nbq);
+ if (src_port < 0)
+ return src_port;
+
+ airoha_fe_rmw(eth, REG_FE_WAN_PORT,
+ WAN1_EN_MASK | WAN1_MASK | WAN0_MASK,
+ FIELD_PREP(WAN0_MASK, src_port));
+ val = src_port & SP_CPORT_DFT_MASK;
+ airoha_fe_rmw(eth,
+ REG_SP_DFT_CPORT(src_port >> fls(SP_CPORT_DFT_MASK)),
+ SP_CPORT_MASK(val),
+ FE_PSE_PORT_CDM2 << __ffs(SP_CPORT_MASK(val)));
+
+ if (port->id != AIROHA_GDM3_IDX && airoha_is_7581(eth))
+ airoha_fe_rmw(eth, REG_SRC_PORT_FC_MAP6,
+ FC_ID_OF_SRC_PORT24_MASK,
+ FIELD_PREP(FC_ID_OF_SRC_PORT24_MASK, 2));
+
+ return 0;
+}
+
static int airoha_dev_init(struct net_device *dev)
{
struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_qdma *qdma = port->qdma;
+ struct airoha_eth *eth = qdma->eth;
+ u32 pse_port, fe_cpu_port;
+ u8 ppe_id;
airoha_set_macaddr(port, dev->dev_addr);
+ switch (port->id) {
+ case 3:
+ case 4:
+ /* If GDM2 is active we can't enable loopback */
+ if (!eth->ports[1]) {
+ int err;
+
+ err = airhoha_set_gdm2_loopback(port);
+ if (err)
+ return err;
+ }
+ fallthrough;
+ case 2:
+ if (airoha_ppe_is_enabled(eth, 1)) {
+ /* For PPE2 always use secondary cpu port. */
+ fe_cpu_port = FE_PSE_PORT_CDM2;
+ pse_port = FE_PSE_PORT_PPE2;
+ break;
+ }
+ fallthrough;
+ default: {
+ u8 qdma_id = qdma - &eth->qdma[0];
+
+ /* For PPE1 select cpu port according to the running QDMA. */
+ fe_cpu_port = qdma_id ? FE_PSE_PORT_CDM2 : FE_PSE_PORT_CDM1;
+ pse_port = FE_PSE_PORT_PPE1;
+ break;
+ }
+ }
+
+ airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(port->id), pse_port);
+ ppe_id = pse_port == FE_PSE_PORT_PPE2 ? 1 : 0;
+ airoha_fe_rmw(eth, REG_PPE_DFT_CPORT0(ppe_id),
+ DFT_CPORT_MASK(port->id),
+ fe_cpu_port << __ffs(DFT_CPORT_MASK(port->id)));
+
return 0;
}
@@ -2413,21 +1806,121 @@ static void airoha_dev_get_stats64(struct net_device *dev,
} while (u64_stats_fetch_retry(&port->stats.syncp, start));
}
+static int airoha_dev_change_mtu(struct net_device *dev, int mtu)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 len = ETH_HLEN + mtu + ETH_FCS_LEN;
+
+ airoha_fe_rmw(eth, REG_GDM_LEN_CFG(port->id),
+ GDM_LONG_LEN_MASK,
+ FIELD_PREP(GDM_LONG_LEN_MASK, len));
+ WRITE_ONCE(dev->mtu, mtu);
+
+ return 0;
+}
+
+static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ int queue, channel;
+
+ /* For dsa device select QoS channel according to the dsa user port
+ * index, rely on port id otherwise. Select QoS queue based on the
+ * skb priority.
+ */
+ channel = netdev_uses_dsa(dev) ? skb_get_queue_mapping(skb) : port->id;
+ channel = channel % AIROHA_NUM_QOS_CHANNELS;
+ queue = (skb->priority - 1) % AIROHA_NUM_QOS_QUEUES; /* QoS queue */
+ queue = channel * AIROHA_NUM_QOS_QUEUES + queue;
+
+ return queue < dev->num_tx_queues ? queue : 0;
+}
+
+static u32 airoha_get_dsa_tag(struct sk_buff *skb, struct net_device *dev)
+{
+#if IS_ENABLED(CONFIG_NET_DSA)
+ struct ethhdr *ehdr;
+ u8 xmit_tpid;
+ u16 tag;
+
+ if (!netdev_uses_dsa(dev))
+ return 0;
+
+ if (dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
+ return 0;
+
+ if (skb_cow_head(skb, 0))
+ return 0;
+
+ ehdr = (struct ethhdr *)skb->data;
+ tag = be16_to_cpu(ehdr->h_proto);
+ xmit_tpid = tag >> 8;
+
+ switch (xmit_tpid) {
+ case MTK_HDR_XMIT_TAGGED_TPID_8100:
+ ehdr->h_proto = cpu_to_be16(ETH_P_8021Q);
+ tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_8100 << 8);
+ break;
+ case MTK_HDR_XMIT_TAGGED_TPID_88A8:
+ ehdr->h_proto = cpu_to_be16(ETH_P_8021AD);
+ tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_88A8 << 8);
+ break;
+ default:
+ /* PPE module requires untagged DSA packets to work properly,
+ * so move DSA tag to DMA descriptor.
+ */
+ memmove(skb->data + MTK_HDR_LEN, skb->data, 2 * ETH_ALEN);
+ __skb_pull(skb, MTK_HDR_LEN);
+ break;
+ }
+
+ return tag;
+#else
+ return 0;
+#endif
+}
+
+static int airoha_get_fe_port(struct airoha_gdm_port *port)
+{
+ struct airoha_qdma *qdma = port->qdma;
+ struct airoha_eth *eth = qdma->eth;
+
+ switch (eth->soc->version) {
+ case 0x7583:
+ return port->id == AIROHA_GDM3_IDX ? FE_PSE_PORT_GDM3
+ : port->id;
+ case 0x7581:
+ default:
+ return port->id == AIROHA_GDM4_IDX ? FE_PSE_PORT_GDM4
+ : port->id;
+ }
+}
+
static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct skb_shared_info *sinfo = skb_shinfo(skb);
struct airoha_gdm_port *port = netdev_priv(dev);
- u32 msg0 = 0, msg1, len = skb_headlen(skb);
- int i, qid = skb_get_queue_mapping(skb);
struct airoha_qdma *qdma = port->qdma;
- u32 nr_frags = 1 + sinfo->nr_frags;
+ u32 nr_frags, tag, msg0, msg1, len;
+ struct airoha_queue_entry *e;
struct netdev_queue *txq;
struct airoha_queue *q;
- void *data = skb->data;
+ LIST_HEAD(tx_list);
+ void *data;
+ int i, qid;
u16 index;
u8 fport;
+ qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx);
+ tag = airoha_get_dsa_tag(skb, dev);
+
+ msg0 = FIELD_PREP(QDMA_ETH_TXMSG_CHAN_MASK,
+ qid / AIROHA_NUM_QOS_QUEUES) |
+ FIELD_PREP(QDMA_ETH_TXMSG_QUEUE_MASK,
+ qid % AIROHA_NUM_QOS_QUEUES) |
+ FIELD_PREP(QDMA_ETH_TXMSG_SP_TAG_MASK, tag);
if (skb->ip_summed == CHECKSUM_PARTIAL)
msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) |
FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) |
@@ -2438,15 +1931,16 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
if (skb_cow_head(skb, 0))
goto error;
- if (sinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
- __be16 csum = cpu_to_be16(sinfo->gso_size);
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 |
+ SKB_GSO_TCPV6)) {
+ __be16 csum = cpu_to_be16(skb_shinfo(skb)->gso_size);
tcp_hdr(skb)->check = (__force __sum16)csum;
msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1);
}
}
- fport = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
+ fport = airoha_get_fe_port(port);
msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
@@ -2457,18 +1951,25 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
spin_lock_bh(&q->lock);
txq = netdev_get_tx_queue(dev, qid);
- if (q->queued + nr_frags > q->ndesc) {
+ nr_frags = 1 + skb_shinfo(skb)->nr_frags;
+
+ if (q->queued + nr_frags >= q->ndesc) {
/* not enough space in the queue */
netif_tx_stop_queue(txq);
spin_unlock_bh(&q->lock);
return NETDEV_TX_BUSY;
}
- index = q->head;
+ len = skb_headlen(skb);
+ data = skb->data;
+
+ e = list_first_entry(&q->tx_list, struct airoha_queue_entry,
+ list);
+ index = e - q->entry;
+
for (i = 0; i < nr_frags; i++) {
struct airoha_qdma_desc *desc = &q->desc[index];
- struct airoha_queue_entry *e = &q->entry[index];
- skb_frag_t *frag = &sinfo->frags[i];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr_t addr;
u32 val;
@@ -2477,7 +1978,14 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
if (unlikely(dma_mapping_error(dev->dev.parent, addr)))
goto error_unmap;
- index = (index + 1) % q->ndesc;
+ list_move_tail(&e->list, &tx_list);
+ e->skb = i ? NULL : skb;
+ e->dma_addr = addr;
+ e->dma_len = len;
+
+ e = list_first_entry(&q->tx_list, struct airoha_queue_entry,
+ list);
+ index = e - q->entry;
val = FIELD_PREP(QDMA_DESC_LEN_MASK, len);
if (i < nr_frags - 1)
@@ -2490,15 +1998,9 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
WRITE_ONCE(desc->msg1, cpu_to_le32(msg1));
WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff));
- e->skb = i ? NULL : skb;
- e->dma_addr = addr;
- e->dma_len = len;
-
data = skb_frag_address(frag);
len = skb_frag_size(frag);
}
-
- q->head = index;
q->queued += i;
skb_tx_timestamp(skb);
@@ -2507,7 +2009,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
if (netif_xmit_stopped(txq) || !netdev_xmit_more())
airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid),
TX_RING_CPU_IDX_MASK,
- FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
+ FIELD_PREP(TX_RING_CPU_IDX_MASK, index));
if (q->ndesc - q->queued < q->free_thr)
netif_tx_stop_queue(txq);
@@ -2517,10 +2019,13 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
error_unmap:
- for (i--; i >= 0; i--) {
- index = (q->head + i) % q->ndesc;
- dma_unmap_single(dev->dev.parent, q->entry[index].dma_addr,
- q->entry[index].dma_len, DMA_TO_DEVICE);
+ while (!list_empty(&tx_list)) {
+ e = list_first_entry(&tx_list, struct airoha_queue_entry,
+ list);
+ dma_unmap_single(dev->dev.parent, e->dma_addr, e->dma_len,
+ DMA_TO_DEVICE);
+ e->dma_addr = 0;
+ list_move_tail(&e->list, &q->tx_list);
}
spin_unlock_bh(&q->lock);
@@ -2550,8 +2055,12 @@ static void airoha_ethtool_get_mac_stats(struct net_device *dev,
airoha_update_hw_stats(port);
do {
start = u64_stats_fetch_begin(&port->stats.syncp);
+ stats->FramesTransmittedOK = port->stats.tx_ok_pkts;
+ stats->OctetsTransmittedOK = port->stats.tx_ok_bytes;
stats->MulticastFramesXmittedOK = port->stats.tx_multicast;
stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast;
+ stats->FramesReceivedOK = port->stats.rx_ok_pkts;
+ stats->OctetsReceivedOK = port->stats.rx_ok_bytes;
stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast;
} while (u64_stats_fetch_retry(&port->stats.syncp, start));
}
@@ -2597,28 +2106,758 @@ airoha_ethtool_get_rmon_stats(struct net_device *dev,
} while (u64_stats_fetch_retry(&port->stats.syncp, start));
}
+static int airoha_qdma_set_chan_tx_sched(struct airoha_gdm_port *port,
+ int channel, enum tx_sched_mode mode,
+ const u16 *weights, u8 n_weights)
+{
+ int i;
+
+ for (i = 0; i < AIROHA_NUM_TX_RING; i++)
+ airoha_qdma_clear(port->qdma, REG_QUEUE_CLOSE_CFG(channel),
+ TXQ_DISABLE_CHAN_QUEUE_MASK(channel, i));
+
+ for (i = 0; i < n_weights; i++) {
+ u32 status;
+ int err;
+
+ airoha_qdma_wr(port->qdma, REG_TXWRR_WEIGHT_CFG,
+ TWRR_RW_CMD_MASK |
+ FIELD_PREP(TWRR_CHAN_IDX_MASK, channel) |
+ FIELD_PREP(TWRR_QUEUE_IDX_MASK, i) |
+ FIELD_PREP(TWRR_VALUE_MASK, weights[i]));
+ err = read_poll_timeout(airoha_qdma_rr, status,
+ status & TWRR_RW_CMD_DONE,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC,
+ true, port->qdma,
+ REG_TXWRR_WEIGHT_CFG);
+ if (err)
+ return err;
+ }
+
+ airoha_qdma_rmw(port->qdma, REG_CHAN_QOS_MODE(channel >> 3),
+ CHAN_QOS_MODE_MASK(channel),
+ mode << __ffs(CHAN_QOS_MODE_MASK(channel)));
+
+ return 0;
+}
+
+static int airoha_qdma_set_tx_prio_sched(struct airoha_gdm_port *port,
+ int channel)
+{
+ static const u16 w[AIROHA_NUM_QOS_QUEUES] = {};
+
+ return airoha_qdma_set_chan_tx_sched(port, channel, TC_SCH_SP, w,
+ ARRAY_SIZE(w));
+}
+
+static int airoha_qdma_set_tx_ets_sched(struct airoha_gdm_port *port,
+ int channel,
+ struct tc_ets_qopt_offload *opt)
+{
+ struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params;
+ enum tx_sched_mode mode = TC_SCH_SP;
+ u16 w[AIROHA_NUM_QOS_QUEUES] = {};
+ int i, nstrict = 0;
+
+ if (p->bands > AIROHA_NUM_QOS_QUEUES)
+ return -EINVAL;
+
+ for (i = 0; i < p->bands; i++) {
+ if (!p->quanta[i])
+ nstrict++;
+ }
+
+ /* this configuration is not supported by the hw */
+ if (nstrict == AIROHA_NUM_QOS_QUEUES - 1)
+ return -EINVAL;
+
+ /* EN7581 SoC supports fixed QoS band priority where WRR queues have
+ * lowest priorities with respect to SP ones.
+ * e.g: WRR0, WRR1, .., WRRm, SP0, SP1, .., SPn
+ */
+ for (i = 0; i < nstrict; i++) {
+ if (p->priomap[p->bands - i - 1] != i)
+ return -EINVAL;
+ }
+
+ for (i = 0; i < p->bands - nstrict; i++) {
+ if (p->priomap[i] != nstrict + i)
+ return -EINVAL;
+
+ w[i] = p->weights[nstrict + i];
+ }
+
+ if (!nstrict)
+ mode = TC_SCH_WRR8;
+ else if (nstrict < AIROHA_NUM_QOS_QUEUES - 1)
+ mode = nstrict + 1;
+
+ return airoha_qdma_set_chan_tx_sched(port, channel, mode, w,
+ ARRAY_SIZE(w));
+}
+
+static int airoha_qdma_get_tx_ets_stats(struct airoha_gdm_port *port,
+ int channel,
+ struct tc_ets_qopt_offload *opt)
+{
+ u64 cpu_tx_packets = airoha_qdma_rr(port->qdma,
+ REG_CNTR_VAL(channel << 1));
+ u64 fwd_tx_packets = airoha_qdma_rr(port->qdma,
+ REG_CNTR_VAL((channel << 1) + 1));
+ u64 tx_packets = (cpu_tx_packets - port->cpu_tx_packets) +
+ (fwd_tx_packets - port->fwd_tx_packets);
+ _bstats_update(opt->stats.bstats, 0, tx_packets);
+
+ port->cpu_tx_packets = cpu_tx_packets;
+ port->fwd_tx_packets = fwd_tx_packets;
+
+ return 0;
+}
+
+static int airoha_tc_setup_qdisc_ets(struct airoha_gdm_port *port,
+ struct tc_ets_qopt_offload *opt)
+{
+ int channel;
+
+ if (opt->parent == TC_H_ROOT)
+ return -EINVAL;
+
+ channel = TC_H_MAJ(opt->handle) >> 16;
+ channel = channel % AIROHA_NUM_QOS_CHANNELS;
+
+ switch (opt->command) {
+ case TC_ETS_REPLACE:
+ return airoha_qdma_set_tx_ets_sched(port, channel, opt);
+ case TC_ETS_DESTROY:
+ /* PRIO is default qdisc scheduler */
+ return airoha_qdma_set_tx_prio_sched(port, channel);
+ case TC_ETS_STATS:
+ return airoha_qdma_get_tx_ets_stats(port, channel, opt);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int airoha_qdma_get_rl_param(struct airoha_qdma *qdma, int queue_id,
+ u32 addr, enum trtcm_param_type param,
+ u32 *val_low, u32 *val_high)
+{
+ u32 idx = QDMA_METER_IDX(queue_id), group = QDMA_METER_GROUP(queue_id);
+ u32 val, config = FIELD_PREP(RATE_LIMIT_PARAM_TYPE_MASK, param) |
+ FIELD_PREP(RATE_LIMIT_METER_GROUP_MASK, group) |
+ FIELD_PREP(RATE_LIMIT_PARAM_INDEX_MASK, idx);
+
+ airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
+ if (read_poll_timeout(airoha_qdma_rr, val,
+ val & RATE_LIMIT_PARAM_RW_DONE_MASK,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC, true, qdma,
+ REG_TRTCM_CFG_PARAM(addr)))
+ return -ETIMEDOUT;
+
+ *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr));
+ if (val_high)
+ *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr));
+
+ return 0;
+}
+
+static int airoha_qdma_set_rl_param(struct airoha_qdma *qdma, int queue_id,
+ u32 addr, enum trtcm_param_type param,
+ u32 val)
+{
+ u32 idx = QDMA_METER_IDX(queue_id), group = QDMA_METER_GROUP(queue_id);
+ u32 config = RATE_LIMIT_PARAM_RW_MASK |
+ FIELD_PREP(RATE_LIMIT_PARAM_TYPE_MASK, param) |
+ FIELD_PREP(RATE_LIMIT_METER_GROUP_MASK, group) |
+ FIELD_PREP(RATE_LIMIT_PARAM_INDEX_MASK, idx);
+
+ airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val);
+ airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
+
+ return read_poll_timeout(airoha_qdma_rr, val,
+ val & RATE_LIMIT_PARAM_RW_DONE_MASK,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
+ qdma, REG_TRTCM_CFG_PARAM(addr));
+}
+
+static int airoha_qdma_set_rl_config(struct airoha_qdma *qdma, int queue_id,
+ u32 addr, bool enable, u32 enable_mask)
+{
+ u32 val;
+ int err;
+
+ err = airoha_qdma_get_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE,
+ &val, NULL);
+ if (err)
+ return err;
+
+ val = enable ? val | enable_mask : val & ~enable_mask;
+
+ return airoha_qdma_set_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE,
+ val);
+}
+
+static int airoha_qdma_set_rl_token_bucket(struct airoha_qdma *qdma,
+ int queue_id, u32 rate_val,
+ u32 bucket_size)
+{
+ u32 val, config, tick, unit, rate, rate_frac;
+ int err;
+
+ err = airoha_qdma_get_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ TRTCM_MISC_MODE, &config, NULL);
+ if (err)
+ return err;
+
+ val = airoha_qdma_rr(qdma, REG_INGRESS_TRTCM_CFG);
+ tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val);
+ if (config & TRTCM_TICK_SEL)
+ tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val);
+ if (!tick)
+ return -EINVAL;
+
+ unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick;
+ if (!unit)
+ return -EINVAL;
+
+ rate = rate_val / unit;
+ rate_frac = rate_val % unit;
+ rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit;
+ rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) |
+ FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac);
+
+ err = airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ TRTCM_TOKEN_RATE_MODE, rate);
+ if (err)
+ return err;
+
+ val = bucket_size;
+ if (!(config & TRTCM_PKT_MODE))
+ val = max_t(u32, val, MIN_TOKEN_SIZE);
+ val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET);
+
+ return airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ TRTCM_BUCKETSIZE_SHIFT_MODE, val);
+}
+
+static int airoha_qdma_init_rl_config(struct airoha_qdma *qdma, int queue_id,
+ bool enable, enum trtcm_unit_type unit)
+{
+ bool tick_sel = queue_id == 0 || queue_id == 2 || queue_id == 8;
+ enum trtcm_param mode = TRTCM_METER_MODE;
+ int err;
+
+ mode |= unit == TRTCM_PACKET_UNIT ? TRTCM_PKT_MODE : 0;
+ err = airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ enable, mode);
+ if (err)
+ return err;
+
+ return airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ tick_sel, TRTCM_TICK_SEL);
+}
+
+static int airoha_qdma_get_trtcm_param(struct airoha_qdma *qdma, int channel,
+ u32 addr, enum trtcm_param_type param,
+ enum trtcm_mode_type mode,
+ u32 *val_low, u32 *val_high)
+{
+ u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
+ u32 val, config = FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
+ FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
+ FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
+ FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
+
+ airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
+ if (read_poll_timeout(airoha_qdma_rr, val,
+ val & TRTCM_PARAM_RW_DONE_MASK,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
+ qdma, REG_TRTCM_CFG_PARAM(addr)))
+ return -ETIMEDOUT;
+
+ *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr));
+ if (val_high)
+ *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr));
+
+ return 0;
+}
+
+static int airoha_qdma_set_trtcm_param(struct airoha_qdma *qdma, int channel,
+ u32 addr, enum trtcm_param_type param,
+ enum trtcm_mode_type mode, u32 val)
+{
+ u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
+ u32 config = TRTCM_PARAM_RW_MASK |
+ FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
+ FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
+ FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
+ FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
+
+ airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val);
+ airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
+
+ return read_poll_timeout(airoha_qdma_rr, val,
+ val & TRTCM_PARAM_RW_DONE_MASK,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
+ qdma, REG_TRTCM_CFG_PARAM(addr));
+}
+
+static int airoha_qdma_set_trtcm_config(struct airoha_qdma *qdma, int channel,
+ u32 addr, enum trtcm_mode_type mode,
+ bool enable, u32 enable_mask)
+{
+ u32 val;
+
+ if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
+ mode, &val, NULL))
+ return -EINVAL;
+
+ val = enable ? val | enable_mask : val & ~enable_mask;
+
+ return airoha_qdma_set_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
+ mode, val);
+}
+
+static int airoha_qdma_set_trtcm_token_bucket(struct airoha_qdma *qdma,
+ int channel, u32 addr,
+ enum trtcm_mode_type mode,
+ u32 rate_val, u32 bucket_size)
+{
+ u32 val, config, tick, unit, rate, rate_frac;
+ int err;
+
+ if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
+ mode, &config, NULL))
+ return -EINVAL;
+
+ val = airoha_qdma_rr(qdma, addr);
+ tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val);
+ if (config & TRTCM_TICK_SEL)
+ tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val);
+ if (!tick)
+ return -EINVAL;
+
+ unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick;
+ if (!unit)
+ return -EINVAL;
+
+ rate = rate_val / unit;
+ rate_frac = rate_val % unit;
+ rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit;
+ rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) |
+ FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac);
+
+ err = airoha_qdma_set_trtcm_param(qdma, channel, addr,
+ TRTCM_TOKEN_RATE_MODE, mode, rate);
+ if (err)
+ return err;
+
+ val = max_t(u32, bucket_size, MIN_TOKEN_SIZE);
+ val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET);
+
+ return airoha_qdma_set_trtcm_param(qdma, channel, addr,
+ TRTCM_BUCKETSIZE_SHIFT_MODE,
+ mode, val);
+}
+
+static int airoha_qdma_set_tx_rate_limit(struct airoha_gdm_port *port,
+ int channel, u32 rate,
+ u32 bucket_size)
+{
+ int i, err;
+
+ for (i = 0; i <= TRTCM_PEAK_MODE; i++) {
+ err = airoha_qdma_set_trtcm_config(port->qdma, channel,
+ REG_EGRESS_TRTCM_CFG, i,
+ !!rate, TRTCM_METER_MODE);
+ if (err)
+ return err;
+
+ err = airoha_qdma_set_trtcm_token_bucket(port->qdma, channel,
+ REG_EGRESS_TRTCM_CFG,
+ i, rate, bucket_size);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port *port,
+ struct tc_htb_qopt_offload *opt)
+{
+ u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
+ u32 rate = div_u64(opt->rate, 1000) << 3; /* kbps */
+ struct net_device *dev = port->dev;
+ int num_tx_queues = dev->real_num_tx_queues;
+ int err;
+
+ if (opt->parent_classid != TC_HTB_CLASSID_ROOT) {
+ NL_SET_ERR_MSG_MOD(opt->extack, "invalid parent classid");
+ return -EINVAL;
+ }
+
+ err = airoha_qdma_set_tx_rate_limit(port, channel, rate, opt->quantum);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(opt->extack,
+ "failed configuring htb offload");
+ return err;
+ }
+
+ if (opt->command == TC_HTB_NODE_MODIFY)
+ return 0;
+
+ err = netif_set_real_num_tx_queues(dev, num_tx_queues + 1);
+ if (err) {
+ airoha_qdma_set_tx_rate_limit(port, channel, 0, opt->quantum);
+ NL_SET_ERR_MSG_MOD(opt->extack,
+ "failed setting real_num_tx_queues");
+ return err;
+ }
+
+ set_bit(channel, port->qos_sq_bmap);
+ opt->qid = AIROHA_NUM_TX_RING + channel;
+
+ return 0;
+}
+
+static int airoha_qdma_set_rx_meter(struct airoha_gdm_port *port,
+ u32 rate, u32 bucket_size,
+ enum trtcm_unit_type unit_type)
+{
+ struct airoha_qdma *qdma = port->qdma;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ int err;
+
+ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+ err = airoha_qdma_init_rl_config(qdma, i, !!rate, unit_type);
+ if (err)
+ return err;
+
+ err = airoha_qdma_set_rl_token_bucket(qdma, i, rate,
+ bucket_size);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int airoha_tc_matchall_act_validate(struct tc_cls_matchall_offload *f)
+{
+ const struct flow_action *actions = &f->rule->action;
+ const struct flow_action_entry *act;
+
+ if (!flow_action_has_entries(actions)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "filter run with no actions");
+ return -EINVAL;
+ }
+
+ if (!flow_offload_has_one_action(actions)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "only once action per filter is supported");
+ return -EOPNOTSUPP;
+ }
+
+ act = &actions->entries[0];
+ if (act->id != FLOW_ACTION_POLICE) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "unsupported action");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "invalid exceed action id");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "invalid notexceed action id");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(actions, act)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "action accept must be last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps || act->police.avrate ||
+ act->police.overhead || act->police.mtu) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "peakrate/avrate/overhead/mtu unsupported");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int airoha_dev_tc_matchall(struct net_device *dev,
+ struct tc_cls_matchall_offload *f)
+{
+ enum trtcm_unit_type unit_type = TRTCM_BYTE_UNIT;
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ u32 rate = 0, bucket_size = 0;
+
+ switch (f->command) {
+ case TC_CLSMATCHALL_REPLACE: {
+ const struct flow_action_entry *act;
+ int err;
+
+ err = airoha_tc_matchall_act_validate(f);
+ if (err)
+ return err;
+
+ act = &f->rule->action.entries[0];
+ if (act->police.rate_pkt_ps) {
+ rate = act->police.rate_pkt_ps;
+ bucket_size = act->police.burst_pkt;
+ unit_type = TRTCM_PACKET_UNIT;
+ } else {
+ rate = div_u64(act->police.rate_bytes_ps, 1000);
+ rate = rate << 3; /* Kbps */
+ bucket_size = act->police.burst;
+ }
+ fallthrough;
+ }
+ case TC_CLSMATCHALL_DESTROY:
+ return airoha_qdma_set_rx_meter(port, rate, bucket_size,
+ unit_type);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int airoha_dev_setup_tc_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct net_device *dev = cb_priv;
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_eth *eth = port->qdma->eth;
+
+ if (!tc_can_offload(dev))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return airoha_ppe_setup_tc_block_cb(&eth->ppe->dev, type_data);
+ case TC_SETUP_CLSMATCHALL:
+ return airoha_dev_tc_matchall(dev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int airoha_dev_setup_tc_block(struct airoha_gdm_port *port,
+ struct flow_block_offload *f)
+{
+ flow_setup_cb_t *cb = airoha_dev_setup_tc_block_cb;
+ static LIST_HEAD(block_cb_list);
+ struct flow_block_cb *block_cb;
+
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ f->driver_block_list = &block_cb_list;
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ block_cb = flow_block_cb_lookup(f->block, cb, port->dev);
+ if (block_cb) {
+ flow_block_cb_incref(block_cb);
+ return 0;
+ }
+ block_cb = flow_block_cb_alloc(cb, port->dev, port->dev, NULL);
+ if (IS_ERR(block_cb))
+ return PTR_ERR(block_cb);
+
+ flow_block_cb_incref(block_cb);
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &block_cb_list);
+ return 0;
+ case FLOW_BLOCK_UNBIND:
+ block_cb = flow_block_cb_lookup(f->block, cb, port->dev);
+ if (!block_cb)
+ return -ENOENT;
+
+ if (!flow_block_cb_decref(block_cb)) {
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ }
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void airoha_tc_remove_htb_queue(struct airoha_gdm_port *port, int queue)
+{
+ struct net_device *dev = port->dev;
+
+ netif_set_real_num_tx_queues(dev, dev->real_num_tx_queues - 1);
+ airoha_qdma_set_tx_rate_limit(port, queue + 1, 0, 0);
+ clear_bit(queue, port->qos_sq_bmap);
+}
+
+static int airoha_tc_htb_delete_leaf_queue(struct airoha_gdm_port *port,
+ struct tc_htb_qopt_offload *opt)
+{
+ u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
+
+ if (!test_bit(channel, port->qos_sq_bmap)) {
+ NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
+ return -EINVAL;
+ }
+
+ airoha_tc_remove_htb_queue(port, channel);
+
+ return 0;
+}
+
+static int airoha_tc_htb_destroy(struct airoha_gdm_port *port)
+{
+ int q;
+
+ for_each_set_bit(q, port->qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS)
+ airoha_tc_remove_htb_queue(port, q);
+
+ return 0;
+}
+
+static int airoha_tc_get_htb_get_leaf_queue(struct airoha_gdm_port *port,
+ struct tc_htb_qopt_offload *opt)
+{
+ u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
+
+ if (!test_bit(channel, port->qos_sq_bmap)) {
+ NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
+ return -EINVAL;
+ }
+
+ opt->qid = AIROHA_NUM_TX_RING + channel;
+
+ return 0;
+}
+
+static int airoha_tc_setup_qdisc_htb(struct airoha_gdm_port *port,
+ struct tc_htb_qopt_offload *opt)
+{
+ switch (opt->command) {
+ case TC_HTB_CREATE:
+ break;
+ case TC_HTB_DESTROY:
+ return airoha_tc_htb_destroy(port);
+ case TC_HTB_NODE_MODIFY:
+ case TC_HTB_LEAF_ALLOC_QUEUE:
+ return airoha_tc_htb_alloc_leaf_queue(port, opt);
+ case TC_HTB_LEAF_DEL:
+ case TC_HTB_LEAF_DEL_LAST:
+ case TC_HTB_LEAF_DEL_LAST_FORCE:
+ return airoha_tc_htb_delete_leaf_queue(port, opt);
+ case TC_HTB_LEAF_QUERY_QUEUE:
+ return airoha_tc_get_htb_get_leaf_queue(port, opt);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_QDISC_ETS:
+ return airoha_tc_setup_qdisc_ets(port, type_data);
+ case TC_SETUP_QDISC_HTB:
+ return airoha_tc_setup_qdisc_htb(port, type_data);
+ case TC_SETUP_BLOCK:
+ case TC_SETUP_FT:
+ return airoha_dev_setup_tc_block(port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops airoha_netdev_ops = {
.ndo_init = airoha_dev_init,
.ndo_open = airoha_dev_open,
.ndo_stop = airoha_dev_stop,
+ .ndo_change_mtu = airoha_dev_change_mtu,
+ .ndo_select_queue = airoha_dev_select_queue,
.ndo_start_xmit = airoha_dev_xmit,
.ndo_get_stats64 = airoha_dev_get_stats64,
.ndo_set_mac_address = airoha_dev_set_macaddr,
+ .ndo_setup_tc = airoha_dev_tc_setup,
};
static const struct ethtool_ops airoha_ethtool_ops = {
.get_drvinfo = airoha_ethtool_get_drvinfo,
.get_eth_mac_stats = airoha_ethtool_get_mac_stats,
.get_rmon_stats = airoha_ethtool_get_rmon_stats,
+ .get_link = ethtool_op_get_link,
};
-static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
+static int airoha_metadata_dst_alloc(struct airoha_gdm_port *port)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) {
+ struct metadata_dst *md_dst;
+
+ md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
+ GFP_KERNEL);
+ if (!md_dst)
+ return -ENOMEM;
+
+ md_dst->u.port_info.port_id = i;
+ port->dsa_meta[i] = md_dst;
+ }
+
+ return 0;
+}
+
+static void airoha_metadata_dst_free(struct airoha_gdm_port *port)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) {
+ if (!port->dsa_meta[i])
+ continue;
+
+ metadata_dst_free(port->dsa_meta[i]);
+ }
+}
+
+bool airoha_is_valid_gdm_port(struct airoha_eth *eth,
+ struct airoha_gdm_port *port)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
+ if (eth->ports[i] == port)
+ return true;
+ }
+
+ return false;
+}
+
+static int airoha_alloc_gdm_port(struct airoha_eth *eth,
+ struct device_node *np, int index)
{
const __be32 *id_ptr = of_get_property(np, "reg", NULL);
struct airoha_gdm_port *port;
struct airoha_qdma *qdma;
struct net_device *dev;
- int err, index;
+ int err, p;
u32 id;
if (!id_ptr) {
@@ -2627,20 +2866,21 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
}
id = be32_to_cpup(id_ptr);
- index = id - 1;
+ p = id - 1;
if (!id || id > ARRAY_SIZE(eth->ports)) {
dev_err(eth->dev, "invalid gdm port id: %d\n", id);
return -EINVAL;
}
- if (eth->ports[index]) {
+ if (eth->ports[p]) {
dev_err(eth->dev, "duplicate gdm port id: %d\n", id);
return -EINVAL;
}
dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port),
- AIROHA_NUM_TX_RING, AIROHA_NUM_RX_RING);
+ AIROHA_NUM_NETDEV_TX_RINGS,
+ AIROHA_NUM_RX_RING);
if (!dev) {
dev_err(eth->dev, "alloc_etherdev failed\n");
return -ENOMEM;
@@ -2653,12 +2893,19 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
dev->watchdog_timeo = 5 * HZ;
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_TSO6 | NETIF_F_IPV6_CSUM |
- NETIF_F_SG | NETIF_F_TSO;
+ NETIF_F_SG | NETIF_F_TSO |
+ NETIF_F_HW_TC;
dev->features |= dev->hw_features;
+ dev->vlan_features = dev->hw_features;
dev->dev.of_node = np;
- dev->irq = qdma->irq;
+ dev->irq = qdma->irq_banks[0].irq;
SET_NETDEV_DEV(dev, eth->dev);
+ /* reserve hw queues for HTB offloading */
+ err = netif_set_real_num_tx_queues(dev, AIROHA_NUM_TX_RING);
+ if (err)
+ return err;
+
err = of_get_ethdev_address(np, dev);
if (err) {
if (err == -EPROBE_DEFER)
@@ -2675,13 +2922,26 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
port->qdma = qdma;
port->dev = dev;
port->id = id;
- eth->ports[index] = port;
+ eth->ports[p] = port;
- return register_netdev(dev);
+ err = airoha_metadata_dst_alloc(port);
+ if (err)
+ return err;
+
+ err = register_netdev(dev);
+ if (err)
+ goto free_metadata_dst;
+
+ return 0;
+
+free_metadata_dst:
+ airoha_metadata_dst_free(port);
+ return err;
}
static int airoha_probe(struct platform_device *pdev)
{
+ struct reset_control_bulk_data *xsi_rsts;
struct device_node *np;
struct airoha_eth *eth;
int i, err;
@@ -2690,6 +2950,10 @@ static int airoha_probe(struct platform_device *pdev)
if (!eth)
return -ENOMEM;
+ eth->soc = of_device_get_match_data(&pdev->dev);
+ if (!eth->soc)
+ return -EINVAL;
+
eth->dev = &pdev->dev;
err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32));
@@ -2714,13 +2978,18 @@ static int airoha_probe(struct platform_device *pdev)
return err;
}
- eth->xsi_rsts[0].id = "xsi-mac";
- eth->xsi_rsts[1].id = "hsi0-mac";
- eth->xsi_rsts[2].id = "hsi1-mac";
- eth->xsi_rsts[3].id = "hsi-mac";
- eth->xsi_rsts[4].id = "xfp-mac";
+ xsi_rsts = devm_kcalloc(eth->dev,
+ eth->soc->num_xsi_rsts, sizeof(*xsi_rsts),
+ GFP_KERNEL);
+ if (!xsi_rsts)
+ return -ENOMEM;
+
+ eth->xsi_rsts = xsi_rsts;
+ for (i = 0; i < eth->soc->num_xsi_rsts; i++)
+ eth->xsi_rsts[i].id = eth->soc->xsi_rsts_names[i];
+
err = devm_reset_control_bulk_get_exclusive(eth->dev,
- ARRAY_SIZE(eth->xsi_rsts),
+ eth->soc->num_xsi_rsts,
eth->xsi_rsts);
if (err) {
dev_err(eth->dev, "failed to get bulk xsi reset lines\n");
@@ -2738,11 +3007,12 @@ static int airoha_probe(struct platform_device *pdev)
err = airoha_hw_init(pdev, eth);
if (err)
- goto error;
+ goto error_hw_cleanup;
for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
airoha_qdma_start_napi(&eth->qdma[i]);
+ i = 0;
for_each_child_of_node(pdev->dev.of_node, np) {
if (!of_device_is_compatible(np, "airoha,eth-mac"))
continue;
@@ -2750,24 +3020,30 @@ static int airoha_probe(struct platform_device *pdev)
if (!of_device_is_available(np))
continue;
- err = airoha_alloc_gdm_port(eth, np);
+ err = airoha_alloc_gdm_port(eth, np, i++);
if (err) {
of_node_put(np);
- goto error;
+ goto error_napi_stop;
}
}
return 0;
-error:
+error_napi_stop:
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ airoha_qdma_stop_napi(&eth->qdma[i]);
+ airoha_ppe_deinit(eth);
+error_hw_cleanup:
for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
airoha_hw_cleanup(&eth->qdma[i]);
for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
struct airoha_gdm_port *port = eth->ports[i];
- if (port && port->dev->reg_state == NETREG_REGISTERED)
+ if (port && port->dev->reg_state == NETREG_REGISTERED) {
unregister_netdev(port->dev);
+ airoha_metadata_dst_free(port);
+ }
}
free_netdev(eth->napi_dev);
platform_set_drvdata(pdev, NULL);
@@ -2780,8 +3056,10 @@ static void airoha_remove(struct platform_device *pdev)
struct airoha_eth *eth = platform_get_drvdata(pdev);
int i;
- for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
+ airoha_qdma_stop_napi(&eth->qdma[i]);
airoha_hw_cleanup(&eth->qdma[i]);
+ }
for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
struct airoha_gdm_port *port = eth->ports[i];
@@ -2791,14 +3069,98 @@ static void airoha_remove(struct platform_device *pdev)
airoha_dev_stop(port->dev);
unregister_netdev(port->dev);
+ airoha_metadata_dst_free(port);
}
free_netdev(eth->napi_dev);
+ airoha_ppe_deinit(eth);
platform_set_drvdata(pdev, NULL);
}
+static const char * const en7581_xsi_rsts_names[] = {
+ "xsi-mac",
+ "hsi0-mac",
+ "hsi1-mac",
+ "hsi-mac",
+ "xfp-mac",
+};
+
+static int airoha_en7581_get_src_port_id(struct airoha_gdm_port *port, int nbq)
+{
+ switch (port->id) {
+ case 3:
+ /* 7581 SoC supports PCIe serdes on GDM3 port */
+ if (nbq == 4)
+ return HSGMII_LAN_7581_PCIE0_SRCPORT;
+ if (nbq == 5)
+ return HSGMII_LAN_7581_PCIE1_SRCPORT;
+ break;
+ case 4:
+ /* 7581 SoC supports eth and usb serdes on GDM4 port */
+ if (!nbq)
+ return HSGMII_LAN_7581_ETH_SRCPORT;
+ if (nbq == 1)
+ return HSGMII_LAN_7581_USB_SRCPORT;
+ break;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const char * const an7583_xsi_rsts_names[] = {
+ "xsi-mac",
+ "hsi0-mac",
+ "hsi1-mac",
+ "xfp-mac",
+};
+
+static int airoha_an7583_get_src_port_id(struct airoha_gdm_port *port, int nbq)
+{
+ switch (port->id) {
+ case 3:
+ /* 7583 SoC supports eth serdes on GDM3 port */
+ if (!nbq)
+ return HSGMII_LAN_7583_ETH_SRCPORT;
+ break;
+ case 4:
+ /* 7583 SoC supports PCIe and USB serdes on GDM4 port */
+ if (!nbq)
+ return HSGMII_LAN_7583_PCIE_SRCPORT;
+ if (nbq == 1)
+ return HSGMII_LAN_7583_USB_SRCPORT;
+ break;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct airoha_eth_soc_data en7581_soc_data = {
+ .version = 0x7581,
+ .xsi_rsts_names = en7581_xsi_rsts_names,
+ .num_xsi_rsts = ARRAY_SIZE(en7581_xsi_rsts_names),
+ .num_ppe = 2,
+ .ops = {
+ .get_src_port_id = airoha_en7581_get_src_port_id,
+ },
+};
+
+static const struct airoha_eth_soc_data an7583_soc_data = {
+ .version = 0x7583,
+ .xsi_rsts_names = an7583_xsi_rsts_names,
+ .num_xsi_rsts = ARRAY_SIZE(an7583_xsi_rsts_names),
+ .num_ppe = 1,
+ .ops = {
+ .get_src_port_id = airoha_an7583_get_src_port_id,
+ },
+};
+
static const struct of_device_id of_airoha_match[] = {
- { .compatible = "airoha,en7581-eth" },
+ { .compatible = "airoha,en7581-eth", .data = &en7581_soc_data },
+ { .compatible = "airoha,an7583-eth", .data = &an7583_soc_data },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, of_airoha_match);
diff --git a/drivers/net/ethernet/airoha/airoha_eth.h b/drivers/net/ethernet/airoha/airoha_eth.h
new file mode 100644
index 000000000000..fbbc58133364
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_eth.h
@@ -0,0 +1,673 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#ifndef AIROHA_ETH_H
+#define AIROHA_ETH_H
+
+#include <linux/debugfs.h>
+#include <linux/etherdevice.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/reset.h>
+#include <linux/soc/airoha/airoha_offload.h>
+#include <net/dsa.h>
+
+#define AIROHA_MAX_NUM_GDM_PORTS 4
+#define AIROHA_MAX_NUM_QDMA 2
+#define AIROHA_MAX_NUM_IRQ_BANKS 4
+#define AIROHA_MAX_DSA_PORTS 7
+#define AIROHA_MAX_NUM_RSTS 3
+#define AIROHA_MAX_MTU 9216
+#define AIROHA_MAX_PACKET_SIZE 2048
+#define AIROHA_NUM_QOS_CHANNELS 4
+#define AIROHA_NUM_QOS_QUEUES 8
+#define AIROHA_NUM_TX_RING 32
+#define AIROHA_NUM_RX_RING 32
+#define AIROHA_NUM_NETDEV_TX_RINGS (AIROHA_NUM_TX_RING + \
+ AIROHA_NUM_QOS_CHANNELS)
+#define AIROHA_FE_MC_MAX_VLAN_TABLE 64
+#define AIROHA_FE_MC_MAX_VLAN_PORT 16
+#define AIROHA_NUM_TX_IRQ 2
+#define HW_DSCP_NUM 2048
+#define IRQ_QUEUE_LEN(_n) ((_n) ? 1024 : 2048)
+#define TX_DSCP_NUM 1024
+#define RX_DSCP_NUM(_n) \
+ ((_n) == 2 ? 128 : \
+ (_n) == 11 ? 128 : \
+ (_n) == 15 ? 128 : \
+ (_n) == 0 ? 1024 : 16)
+
+#define PSE_RSV_PAGES 128
+#define PSE_QUEUE_RSV_PAGES 64
+
+#define QDMA_METER_IDX(_n) ((_n) & 0xff)
+#define QDMA_METER_GROUP(_n) (((_n) >> 8) & 0x3)
+
+#define PPE_SRAM_NUM_ENTRIES (8 * 1024)
+#define PPE_STATS_NUM_ENTRIES (4 * 1024)
+#define PPE_DRAM_NUM_ENTRIES (16 * 1024)
+#define PPE_ENTRY_SIZE 80
+#define PPE_RAM_NUM_ENTRIES_SHIFT(_n) (__ffs((_n) >> 10))
+
+#define MTK_HDR_LEN 4
+#define MTK_HDR_XMIT_TAGGED_TPID_8100 1
+#define MTK_HDR_XMIT_TAGGED_TPID_88A8 2
+
+enum {
+ QDMA_INT_REG_IDX0,
+ QDMA_INT_REG_IDX1,
+ QDMA_INT_REG_IDX2,
+ QDMA_INT_REG_IDX3,
+ QDMA_INT_REG_IDX4,
+ QDMA_INT_REG_MAX
+};
+
+enum {
+ HSGMII_LAN_7581_PCIE0_SRCPORT = 0x16,
+ HSGMII_LAN_7581_PCIE1_SRCPORT,
+ HSGMII_LAN_7581_ETH_SRCPORT,
+ HSGMII_LAN_7581_USB_SRCPORT,
+};
+
+enum {
+ HSGMII_LAN_7583_ETH_SRCPORT = 0x16,
+ HSGMII_LAN_7583_PCIE_SRCPORT = 0x18,
+ HSGMII_LAN_7583_USB_SRCPORT,
+};
+
+enum {
+ XSI_PCIE0_VIP_PORT_MASK = BIT(22),
+ XSI_PCIE1_VIP_PORT_MASK = BIT(23),
+ XSI_USB_VIP_PORT_MASK = BIT(25),
+ XSI_ETH_VIP_PORT_MASK = BIT(24),
+};
+
+enum {
+ DEV_STATE_INITIALIZED,
+};
+
+enum {
+ CDM_CRSN_QSEL_Q1 = 1,
+ CDM_CRSN_QSEL_Q5 = 5,
+ CDM_CRSN_QSEL_Q6 = 6,
+ CDM_CRSN_QSEL_Q15 = 15,
+};
+
+enum {
+ CRSN_08 = 0x8,
+ CRSN_21 = 0x15, /* KA */
+ CRSN_22 = 0x16, /* hit bind and force route to CPU */
+ CRSN_24 = 0x18,
+ CRSN_25 = 0x19,
+};
+
+enum airoha_gdm_index {
+ AIROHA_GDM1_IDX = 1,
+ AIROHA_GDM2_IDX = 2,
+ AIROHA_GDM3_IDX = 3,
+ AIROHA_GDM4_IDX = 4,
+};
+
+enum {
+ FE_PSE_PORT_CDM1,
+ FE_PSE_PORT_GDM1,
+ FE_PSE_PORT_GDM2,
+ FE_PSE_PORT_GDM3,
+ FE_PSE_PORT_PPE1,
+ FE_PSE_PORT_CDM2,
+ FE_PSE_PORT_CDM3,
+ FE_PSE_PORT_CDM4,
+ FE_PSE_PORT_PPE2,
+ FE_PSE_PORT_GDM4,
+ FE_PSE_PORT_CDM5,
+ FE_PSE_PORT_DROP = 0xf,
+};
+
+enum tx_sched_mode {
+ TC_SCH_WRR8,
+ TC_SCH_SP,
+ TC_SCH_WRR7,
+ TC_SCH_WRR6,
+ TC_SCH_WRR5,
+ TC_SCH_WRR4,
+ TC_SCH_WRR3,
+ TC_SCH_WRR2,
+};
+
+enum trtcm_unit_type {
+ TRTCM_BYTE_UNIT,
+ TRTCM_PACKET_UNIT,
+};
+
+enum trtcm_param_type {
+ TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */
+ TRTCM_TOKEN_RATE_MODE,
+ TRTCM_BUCKETSIZE_SHIFT_MODE,
+ TRTCM_BUCKET_COUNTER_MODE,
+};
+
+enum trtcm_mode_type {
+ TRTCM_COMMIT_MODE,
+ TRTCM_PEAK_MODE,
+};
+
+enum trtcm_param {
+ TRTCM_TICK_SEL = BIT(0),
+ TRTCM_PKT_MODE = BIT(1),
+ TRTCM_METER_MODE = BIT(2),
+};
+
+#define MIN_TOKEN_SIZE 4096
+#define MAX_TOKEN_SIZE_OFFSET 17
+#define TRTCM_TOKEN_RATE_MASK GENMASK(23, 6)
+#define TRTCM_TOKEN_RATE_FRACTION_MASK GENMASK(5, 0)
+
+struct airoha_queue_entry {
+ union {
+ void *buf;
+ struct {
+ struct list_head list;
+ struct sk_buff *skb;
+ };
+ };
+ dma_addr_t dma_addr;
+ u16 dma_len;
+};
+
+struct airoha_queue {
+ struct airoha_qdma *qdma;
+
+ /* protect concurrent queue accesses */
+ spinlock_t lock;
+ struct airoha_queue_entry *entry;
+ struct airoha_qdma_desc *desc;
+ u16 head;
+ u16 tail;
+
+ int queued;
+ int ndesc;
+ int free_thr;
+ int buf_size;
+
+ struct napi_struct napi;
+ struct page_pool *page_pool;
+ struct sk_buff *skb;
+
+ struct list_head tx_list;
+};
+
+struct airoha_tx_irq_queue {
+ struct airoha_qdma *qdma;
+
+ struct napi_struct napi;
+
+ int size;
+ u32 *q;
+};
+
+struct airoha_hw_stats {
+ /* protect concurrent hw_stats accesses */
+ spinlock_t lock;
+ struct u64_stats_sync syncp;
+
+ /* get_stats64 */
+ u64 rx_ok_pkts;
+ u64 tx_ok_pkts;
+ u64 rx_ok_bytes;
+ u64 tx_ok_bytes;
+ u64 rx_multicast;
+ u64 rx_errors;
+ u64 rx_drops;
+ u64 tx_drops;
+ u64 rx_crc_error;
+ u64 rx_over_errors;
+ /* ethtool stats */
+ u64 tx_broadcast;
+ u64 tx_multicast;
+ u64 tx_len[7];
+ u64 rx_broadcast;
+ u64 rx_fragment;
+ u64 rx_jabber;
+ u64 rx_len[7];
+};
+
+enum {
+ AIROHA_FOE_STATE_INVALID,
+ AIROHA_FOE_STATE_UNBIND,
+ AIROHA_FOE_STATE_BIND,
+ AIROHA_FOE_STATE_FIN
+};
+
+enum {
+ PPE_PKT_TYPE_IPV4_HNAPT = 0,
+ PPE_PKT_TYPE_IPV4_ROUTE = 1,
+ PPE_PKT_TYPE_BRIDGE = 2,
+ PPE_PKT_TYPE_IPV4_DSLITE = 3,
+ PPE_PKT_TYPE_IPV6_ROUTE_3T = 4,
+ PPE_PKT_TYPE_IPV6_ROUTE_5T = 5,
+ PPE_PKT_TYPE_IPV6_6RD = 7,
+};
+
+#define AIROHA_FOE_MAC_SMAC_ID GENMASK(20, 16)
+#define AIROHA_FOE_MAC_PPPOE_ID GENMASK(15, 0)
+
+#define AIROHA_FOE_MAC_WDMA_QOS GENMASK(15, 12)
+#define AIROHA_FOE_MAC_WDMA_BAND BIT(11)
+#define AIROHA_FOE_MAC_WDMA_WCID GENMASK(10, 0)
+
+struct airoha_foe_mac_info_common {
+ u16 vlan1;
+ u16 etype;
+
+ u32 dest_mac_hi;
+
+ u16 vlan2;
+ u16 dest_mac_lo;
+
+ u32 src_mac_hi;
+};
+
+struct airoha_foe_mac_info {
+ struct airoha_foe_mac_info_common common;
+
+ u16 pppoe_id;
+ u16 src_mac_lo;
+
+ u32 meter;
+};
+
+#define AIROHA_FOE_IB1_UNBIND_PREBIND BIT(24)
+#define AIROHA_FOE_IB1_UNBIND_PACKETS GENMASK(23, 8)
+#define AIROHA_FOE_IB1_UNBIND_TIMESTAMP GENMASK(7, 0)
+
+#define AIROHA_FOE_IB1_BIND_STATIC BIT(31)
+#define AIROHA_FOE_IB1_BIND_UDP BIT(30)
+#define AIROHA_FOE_IB1_BIND_STATE GENMASK(29, 28)
+#define AIROHA_FOE_IB1_BIND_PACKET_TYPE GENMASK(27, 25)
+#define AIROHA_FOE_IB1_BIND_TTL BIT(24)
+#define AIROHA_FOE_IB1_BIND_TUNNEL_DECAP BIT(23)
+#define AIROHA_FOE_IB1_BIND_PPPOE BIT(22)
+#define AIROHA_FOE_IB1_BIND_VPM GENMASK(21, 20)
+#define AIROHA_FOE_IB1_BIND_VLAN_LAYER GENMASK(19, 16)
+#define AIROHA_FOE_IB1_BIND_KEEPALIVE BIT(15)
+#define AIROHA_FOE_IB1_BIND_TIMESTAMP GENMASK(14, 0)
+
+#define AIROHA_FOE_IB2_DSCP GENMASK(31, 24)
+#define AIROHA_FOE_IB2_PORT_AG GENMASK(23, 13)
+#define AIROHA_FOE_IB2_PCP BIT(12)
+#define AIROHA_FOE_IB2_MULTICAST BIT(11)
+#define AIROHA_FOE_IB2_FAST_PATH BIT(10)
+#define AIROHA_FOE_IB2_PSE_QOS BIT(9)
+#define AIROHA_FOE_IB2_PSE_PORT GENMASK(8, 5)
+#define AIROHA_FOE_IB2_NBQ GENMASK(4, 0)
+
+#define AIROHA_FOE_ACTDP GENMASK(31, 24)
+#define AIROHA_FOE_SHAPER_ID GENMASK(23, 16)
+#define AIROHA_FOE_CHANNEL GENMASK(15, 11)
+#define AIROHA_FOE_QID GENMASK(10, 8)
+#define AIROHA_FOE_DPI BIT(7)
+#define AIROHA_FOE_TUNNEL BIT(6)
+#define AIROHA_FOE_TUNNEL_ID GENMASK(5, 0)
+
+#define AIROHA_FOE_TUNNEL_MTU GENMASK(31, 16)
+#define AIROHA_FOE_ACNT_GRP3 GENMASK(15, 9)
+#define AIROHA_FOE_METER_GRP3 GENMASK(8, 5)
+#define AIROHA_FOE_METER_GRP2 GENMASK(4, 0)
+
+struct airoha_foe_bridge {
+ u32 dest_mac_hi;
+
+ u16 src_mac_hi;
+ u16 dest_mac_lo;
+
+ u32 src_mac_lo;
+
+ u32 ib2;
+
+ u32 rsv[5];
+
+ u32 data;
+
+ struct airoha_foe_mac_info l2;
+};
+
+struct airoha_foe_ipv4_tuple {
+ u32 src_ip;
+ u32 dest_ip;
+ union {
+ struct {
+ u16 dest_port;
+ u16 src_port;
+ };
+ struct {
+ u8 protocol;
+ u8 _pad[3]; /* fill with 0xa5a5a5 */
+ };
+ u32 ports;
+ };
+};
+
+struct airoha_foe_ipv4 {
+ struct airoha_foe_ipv4_tuple orig_tuple;
+
+ u32 ib2;
+
+ struct airoha_foe_ipv4_tuple new_tuple;
+
+ u32 rsv[2];
+
+ u32 data;
+
+ struct airoha_foe_mac_info l2;
+};
+
+struct airoha_foe_ipv4_dslite {
+ struct airoha_foe_ipv4_tuple ip4;
+
+ u32 ib2;
+
+ u8 flow_label[3];
+ u8 priority;
+
+ u32 rsv[4];
+
+ u32 data;
+
+ struct airoha_foe_mac_info l2;
+};
+
+struct airoha_foe_ipv6 {
+ u32 src_ip[4];
+ u32 dest_ip[4];
+
+ union {
+ struct {
+ u16 dest_port;
+ u16 src_port;
+ };
+ struct {
+ u8 protocol;
+ u8 pad[3];
+ };
+ u32 ports;
+ };
+
+ u32 data;
+
+ u32 ib2;
+
+ struct airoha_foe_mac_info_common l2;
+
+ u32 meter;
+};
+
+struct airoha_foe_entry {
+ union {
+ struct {
+ u32 ib1;
+ union {
+ struct airoha_foe_bridge bridge;
+ struct airoha_foe_ipv4 ipv4;
+ struct airoha_foe_ipv4_dslite dslite;
+ struct airoha_foe_ipv6 ipv6;
+ DECLARE_FLEX_ARRAY(u32, d);
+ };
+ };
+ u8 data[PPE_ENTRY_SIZE];
+ };
+};
+
+struct airoha_foe_stats {
+ u32 bytes;
+ u32 packets;
+};
+
+struct airoha_foe_stats64 {
+ u64 bytes;
+ u64 packets;
+};
+
+struct airoha_flow_data {
+ struct ethhdr eth;
+
+ union {
+ struct {
+ __be32 src_addr;
+ __be32 dst_addr;
+ } v4;
+
+ struct {
+ struct in6_addr src_addr;
+ struct in6_addr dst_addr;
+ } v6;
+ };
+
+ __be16 src_port;
+ __be16 dst_port;
+
+ struct {
+ struct {
+ u16 id;
+ __be16 proto;
+ } hdr[2];
+ u8 num;
+ } vlan;
+ struct {
+ u16 sid;
+ u8 num;
+ } pppoe;
+};
+
+enum airoha_flow_entry_type {
+ FLOW_TYPE_L4,
+ FLOW_TYPE_L2,
+ FLOW_TYPE_L2_SUBFLOW,
+};
+
+struct airoha_flow_table_entry {
+ union {
+ struct hlist_node list; /* PPE L3 flow entry */
+ struct {
+ struct rhash_head l2_node; /* L2 flow entry */
+ struct hlist_head l2_flows; /* PPE L2 subflows list */
+ };
+ };
+
+ struct hlist_node l2_subflow_node; /* PPE L2 subflow entry */
+ u32 hash;
+
+ struct airoha_foe_stats64 stats;
+ enum airoha_flow_entry_type type;
+
+ struct rhash_head node;
+ unsigned long cookie;
+
+ /* Must be last --ends in a flexible-array member. */
+ struct airoha_foe_entry data;
+};
+
+struct airoha_wdma_info {
+ u8 idx;
+ u8 queue;
+ u16 wcid;
+ u8 bss;
+};
+
+/* RX queue to IRQ mapping: BIT(q) in IRQ(n) */
+#define RX_IRQ0_BANK_PIN_MASK 0x839f
+#define RX_IRQ1_BANK_PIN_MASK 0x7fe00000
+#define RX_IRQ2_BANK_PIN_MASK 0x20
+#define RX_IRQ3_BANK_PIN_MASK 0x40
+#define RX_IRQ_BANK_PIN_MASK(_n) \
+ (((_n) == 3) ? RX_IRQ3_BANK_PIN_MASK : \
+ ((_n) == 2) ? RX_IRQ2_BANK_PIN_MASK : \
+ ((_n) == 1) ? RX_IRQ1_BANK_PIN_MASK : \
+ RX_IRQ0_BANK_PIN_MASK)
+
+struct airoha_irq_bank {
+ struct airoha_qdma *qdma;
+
+ /* protect concurrent irqmask accesses */
+ spinlock_t irq_lock;
+ u32 irqmask[QDMA_INT_REG_MAX];
+ int irq;
+};
+
+struct airoha_qdma {
+ struct airoha_eth *eth;
+ void __iomem *regs;
+
+ atomic_t users;
+
+ struct airoha_irq_bank irq_banks[AIROHA_MAX_NUM_IRQ_BANKS];
+
+ struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
+
+ struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
+ struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
+};
+
+struct airoha_gdm_port {
+ struct airoha_qdma *qdma;
+ struct net_device *dev;
+ int id;
+
+ struct airoha_hw_stats stats;
+
+ DECLARE_BITMAP(qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS);
+
+ /* qos stats counters */
+ u64 cpu_tx_packets;
+ u64 fwd_tx_packets;
+
+ struct metadata_dst *dsa_meta[AIROHA_MAX_DSA_PORTS];
+};
+
+#define AIROHA_RXD4_PPE_CPU_REASON GENMASK(20, 16)
+#define AIROHA_RXD4_FOE_ENTRY GENMASK(15, 0)
+
+struct airoha_ppe {
+ struct airoha_ppe_dev dev;
+ struct airoha_eth *eth;
+
+ void *foe;
+ dma_addr_t foe_dma;
+
+ struct rhashtable l2_flows;
+
+ struct hlist_head *foe_flow;
+ u16 *foe_check_time;
+
+ struct airoha_foe_stats *foe_stats;
+ dma_addr_t foe_stats_dma;
+
+ struct dentry *debugfs_dir;
+};
+
+struct airoha_eth_soc_data {
+ u16 version;
+ const char * const *xsi_rsts_names;
+ int num_xsi_rsts;
+ int num_ppe;
+ struct {
+ int (*get_src_port_id)(struct airoha_gdm_port *port, int nbq);
+ } ops;
+};
+
+struct airoha_eth {
+ struct device *dev;
+
+ const struct airoha_eth_soc_data *soc;
+
+ unsigned long state;
+ void __iomem *fe_regs;
+
+ struct airoha_npu __rcu *npu;
+
+ struct airoha_ppe *ppe;
+ struct rhashtable flow_table;
+
+ struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
+ struct reset_control_bulk_data *xsi_rsts;
+
+ struct net_device *napi_dev;
+
+ struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
+ struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
+};
+
+u32 airoha_rr(void __iomem *base, u32 offset);
+void airoha_wr(void __iomem *base, u32 offset, u32 val);
+u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val);
+
+#define airoha_fe_rr(eth, offset) \
+ airoha_rr((eth)->fe_regs, (offset))
+#define airoha_fe_wr(eth, offset, val) \
+ airoha_wr((eth)->fe_regs, (offset), (val))
+#define airoha_fe_rmw(eth, offset, mask, val) \
+ airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
+#define airoha_fe_set(eth, offset, val) \
+ airoha_rmw((eth)->fe_regs, (offset), 0, (val))
+#define airoha_fe_clear(eth, offset, val) \
+ airoha_rmw((eth)->fe_regs, (offset), (val), 0)
+
+#define airoha_qdma_rr(qdma, offset) \
+ airoha_rr((qdma)->regs, (offset))
+#define airoha_qdma_wr(qdma, offset, val) \
+ airoha_wr((qdma)->regs, (offset), (val))
+#define airoha_qdma_rmw(qdma, offset, mask, val) \
+ airoha_rmw((qdma)->regs, (offset), (mask), (val))
+#define airoha_qdma_set(qdma, offset, val) \
+ airoha_rmw((qdma)->regs, (offset), 0, (val))
+#define airoha_qdma_clear(qdma, offset, val) \
+ airoha_rmw((qdma)->regs, (offset), (val), 0)
+
+static inline bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
+{
+ /* GDM1 port on EN7581 SoC is connected to the lan dsa switch.
+ * GDM{2,3,4} can be used as wan port connected to an external
+ * phy module.
+ */
+ return port->id == 1;
+}
+
+static inline bool airoha_is_7581(struct airoha_eth *eth)
+{
+ return eth->soc->version == 0x7581;
+}
+
+static inline bool airoha_is_7583(struct airoha_eth *eth)
+{
+ return eth->soc->version == 0x7583;
+}
+
+bool airoha_is_valid_gdm_port(struct airoha_eth *eth,
+ struct airoha_gdm_port *port);
+
+bool airoha_ppe_is_enabled(struct airoha_eth *eth, int index);
+void airoha_ppe_check_skb(struct airoha_ppe_dev *dev, struct sk_buff *skb,
+ u16 hash, bool rx_wlan);
+int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev, void *type_data);
+int airoha_ppe_init(struct airoha_eth *eth);
+void airoha_ppe_deinit(struct airoha_eth *eth);
+void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port);
+u32 airoha_ppe_get_total_num_entries(struct airoha_ppe *ppe);
+struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
+ u32 hash);
+void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
+ struct airoha_foe_stats64 *stats);
+
+#ifdef CONFIG_DEBUG_FS
+int airoha_ppe_debugfs_init(struct airoha_ppe *ppe);
+#else
+static inline int airoha_ppe_debugfs_init(struct airoha_ppe *ppe)
+{
+ return 0;
+}
+#endif
+
+#endif /* AIROHA_ETH_H */
diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c
new file mode 100644
index 000000000000..68b7f9684dc7
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_npu.c
@@ -0,0 +1,783 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/devcoredump.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/regmap.h>
+
+#include "airoha_eth.h"
+
+#define NPU_EN7581_FIRMWARE_DATA "airoha/en7581_npu_data.bin"
+#define NPU_EN7581_FIRMWARE_RV32 "airoha/en7581_npu_rv32.bin"
+#define NPU_AN7583_FIRMWARE_DATA "airoha/an7583_npu_data.bin"
+#define NPU_AN7583_FIRMWARE_RV32 "airoha/an7583_npu_rv32.bin"
+#define NPU_EN7581_FIRMWARE_RV32_MAX_SIZE 0x200000
+#define NPU_EN7581_FIRMWARE_DATA_MAX_SIZE 0x10000
+#define NPU_DUMP_SIZE 512
+
+#define REG_NPU_LOCAL_SRAM 0x0
+
+#define NPU_PC_BASE_ADDR 0x305000
+#define REG_PC_DBG(_n) (0x305000 + ((_n) * 0x100))
+
+#define NPU_CLUSTER_BASE_ADDR 0x306000
+
+#define REG_CR_BOOT_TRIGGER (NPU_CLUSTER_BASE_ADDR + 0x000)
+#define REG_CR_BOOT_CONFIG (NPU_CLUSTER_BASE_ADDR + 0x004)
+#define REG_CR_BOOT_BASE(_n) (NPU_CLUSTER_BASE_ADDR + 0x020 + ((_n) << 2))
+
+#define NPU_MBOX_BASE_ADDR 0x30c000
+
+#define REG_CR_MBOX_INT_STATUS (NPU_MBOX_BASE_ADDR + 0x000)
+#define MBOX_INT_STATUS_MASK BIT(8)
+
+#define REG_CR_MBOX_INT_MASK(_n) (NPU_MBOX_BASE_ADDR + 0x004 + ((_n) << 2))
+#define REG_CR_MBQ0_CTRL(_n) (NPU_MBOX_BASE_ADDR + 0x030 + ((_n) << 2))
+#define REG_CR_MBQ8_CTRL(_n) (NPU_MBOX_BASE_ADDR + 0x0b0 + ((_n) << 2))
+#define REG_CR_NPU_MIB(_n) (NPU_MBOX_BASE_ADDR + 0x140 + ((_n) << 2))
+
+#define NPU_WLAN_BASE_ADDR 0x30d000
+
+#define REG_IRQ_STATUS (NPU_WLAN_BASE_ADDR + 0x030)
+#define REG_IRQ_RXDONE(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 2) + 0x034)
+#define NPU_IRQ_RX_MASK(_n) ((_n) == 1 ? BIT(17) : BIT(16))
+
+#define REG_TX_BASE(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x080)
+#define REG_TX_DSCP_NUM(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x084)
+#define REG_TX_CPU_IDX(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x088)
+#define REG_TX_DMA_IDX(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x08c)
+
+#define REG_RX_BASE(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x180)
+#define REG_RX_DSCP_NUM(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x184)
+#define REG_RX_CPU_IDX(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x188)
+#define REG_RX_DMA_IDX(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x18c)
+
+#define NPU_TIMER_BASE_ADDR 0x310100
+#define REG_WDT_TIMER_CTRL(_n) (NPU_TIMER_BASE_ADDR + ((_n) * 0x100))
+#define WDT_EN_MASK BIT(25)
+#define WDT_INTR_MASK BIT(21)
+
+enum {
+ NPU_OP_SET = 1,
+ NPU_OP_SET_NO_WAIT,
+ NPU_OP_GET,
+ NPU_OP_GET_NO_WAIT,
+};
+
+enum {
+ NPU_FUNC_WIFI,
+ NPU_FUNC_TUNNEL,
+ NPU_FUNC_NOTIFY,
+ NPU_FUNC_DBA,
+ NPU_FUNC_TR471,
+ NPU_FUNC_PPE,
+};
+
+enum {
+ NPU_MBOX_ERROR,
+ NPU_MBOX_SUCCESS,
+};
+
+enum {
+ PPE_FUNC_SET_WAIT,
+ PPE_FUNC_SET_WAIT_HWNAT_INIT,
+ PPE_FUNC_SET_WAIT_HWNAT_DEINIT,
+ PPE_FUNC_SET_WAIT_API,
+ PPE_FUNC_SET_WAIT_FLOW_STATS_SETUP,
+};
+
+enum {
+ PPE2_SRAM_SET_ENTRY,
+ PPE_SRAM_SET_ENTRY,
+ PPE_SRAM_SET_VAL,
+ PPE_SRAM_RESET_VAL,
+};
+
+enum {
+ QDMA_WAN_ETHER = 1,
+ QDMA_WAN_PON_XDSL,
+};
+
+struct airoha_npu_fw {
+ const char *name;
+ int max_size;
+};
+
+struct airoha_npu_soc_data {
+ struct airoha_npu_fw fw_rv32;
+ struct airoha_npu_fw fw_data;
+};
+
+#define MBOX_MSG_FUNC_ID GENMASK(14, 11)
+#define MBOX_MSG_STATIC_BUF BIT(5)
+#define MBOX_MSG_STATUS GENMASK(4, 2)
+#define MBOX_MSG_DONE BIT(1)
+#define MBOX_MSG_WAIT_RSP BIT(0)
+
+#define PPE_TYPE_L2B_IPV4 2
+#define PPE_TYPE_L2B_IPV4_IPV6 3
+
+struct ppe_mbox_data {
+ u32 func_type;
+ u32 func_id;
+ union {
+ struct {
+ u8 cds;
+ u8 xpon_hal_api;
+ u8 wan_xsi;
+ u8 ct_joyme4;
+ u8 max_packet;
+ u8 rsv[3];
+ u32 ppe_type;
+ u32 wan_mode;
+ u32 wan_sel;
+ } init_info;
+ struct {
+ u32 func_id;
+ u32 size;
+ u32 data;
+ } set_info;
+ struct {
+ u32 npu_stats_addr;
+ u32 foe_stats_addr;
+ } stats_info;
+ };
+};
+
+struct wlan_mbox_data {
+ u32 ifindex:4;
+ u32 func_type:4;
+ u32 func_id;
+ DECLARE_FLEX_ARRAY(u8, d);
+};
+
+static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id,
+ void *p, int size)
+{
+ u16 core = 0; /* FIXME */
+ u32 val, offset = core << 4;
+ dma_addr_t dma_addr;
+ int ret;
+
+ dma_addr = dma_map_single(npu->dev, p, size, DMA_TO_DEVICE);
+ ret = dma_mapping_error(npu->dev, dma_addr);
+ if (ret)
+ return ret;
+
+ spin_lock_bh(&npu->cores[core].lock);
+
+ regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(0) + offset, dma_addr);
+ regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(1) + offset, size);
+ regmap_read(npu->regmap, REG_CR_MBQ0_CTRL(2) + offset, &val);
+ regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(2) + offset, val + 1);
+ val = FIELD_PREP(MBOX_MSG_FUNC_ID, func_id) | MBOX_MSG_WAIT_RSP;
+ regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(3) + offset, val);
+
+ ret = regmap_read_poll_timeout_atomic(npu->regmap,
+ REG_CR_MBQ0_CTRL(3) + offset,
+ val, (val & MBOX_MSG_DONE),
+ 100, 100 * MSEC_PER_SEC);
+ if (!ret && FIELD_GET(MBOX_MSG_STATUS, val) != NPU_MBOX_SUCCESS)
+ ret = -EINVAL;
+
+ spin_unlock_bh(&npu->cores[core].lock);
+
+ dma_unmap_single(npu->dev, dma_addr, size, DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static int airoha_npu_load_firmware(struct device *dev, void __iomem *addr,
+ const struct airoha_npu_fw *fw_info)
+{
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, fw_info->name, dev);
+ if (ret)
+ return ret == -ENOENT ? -EPROBE_DEFER : ret;
+
+ if (fw->size > fw_info->max_size) {
+ dev_err(dev, "%s: fw size too overlimit (%zu)\n",
+ fw_info->name, fw->size);
+ ret = -E2BIG;
+ goto out;
+ }
+
+ memcpy_toio(addr, fw->data, fw->size);
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int airoha_npu_run_firmware(struct device *dev, void __iomem *base,
+ struct resource *res)
+{
+ const struct airoha_npu_soc_data *soc;
+ void __iomem *addr;
+ int ret;
+
+ soc = of_device_get_match_data(dev);
+ if (!soc)
+ return -EINVAL;
+
+ addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
+
+ /* Load rv32 npu firmware */
+ ret = airoha_npu_load_firmware(dev, addr, &soc->fw_rv32);
+ if (ret)
+ return ret;
+
+ /* Load data npu firmware */
+ return airoha_npu_load_firmware(dev, base + REG_NPU_LOCAL_SRAM,
+ &soc->fw_data);
+}
+
+static irqreturn_t airoha_npu_mbox_handler(int irq, void *npu_instance)
+{
+ struct airoha_npu *npu = npu_instance;
+
+ /* clear mbox interrupt status */
+ regmap_write(npu->regmap, REG_CR_MBOX_INT_STATUS,
+ MBOX_INT_STATUS_MASK);
+
+ /* acknowledge npu */
+ regmap_update_bits(npu->regmap, REG_CR_MBQ8_CTRL(3),
+ MBOX_MSG_STATUS | MBOX_MSG_DONE, MBOX_MSG_DONE);
+
+ return IRQ_HANDLED;
+}
+
+static void airoha_npu_wdt_work(struct work_struct *work)
+{
+ struct airoha_npu_core *core;
+ struct airoha_npu *npu;
+ void *dump;
+ u32 val[3];
+ int c;
+
+ core = container_of(work, struct airoha_npu_core, wdt_work);
+ npu = core->npu;
+
+ dump = vzalloc(NPU_DUMP_SIZE);
+ if (!dump)
+ return;
+
+ c = core - &npu->cores[0];
+ regmap_bulk_read(npu->regmap, REG_PC_DBG(c), val, ARRAY_SIZE(val));
+ snprintf(dump, NPU_DUMP_SIZE, "PC: %08x SP: %08x LR: %08x\n",
+ val[0], val[1], val[2]);
+
+ dev_coredumpv(npu->dev, dump, NPU_DUMP_SIZE, GFP_KERNEL);
+}
+
+static irqreturn_t airoha_npu_wdt_handler(int irq, void *core_instance)
+{
+ struct airoha_npu_core *core = core_instance;
+ struct airoha_npu *npu = core->npu;
+ int c = core - &npu->cores[0];
+ u32 val;
+
+ regmap_set_bits(npu->regmap, REG_WDT_TIMER_CTRL(c), WDT_INTR_MASK);
+ if (!regmap_read(npu->regmap, REG_WDT_TIMER_CTRL(c), &val) &&
+ FIELD_GET(WDT_EN_MASK, val))
+ schedule_work(&core->wdt_work);
+
+ return IRQ_HANDLED;
+}
+
+static int airoha_npu_ppe_init(struct airoha_npu *npu)
+{
+ struct ppe_mbox_data *ppe_data;
+ int err;
+
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL);
+ if (!ppe_data)
+ return -ENOMEM;
+
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_HWNAT_INIT;
+ ppe_data->init_info.ppe_type = PPE_TYPE_L2B_IPV4_IPV6;
+ ppe_data->init_info.wan_mode = QDMA_WAN_ETHER;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+ kfree(ppe_data);
+
+ return err;
+}
+
+static int airoha_npu_ppe_deinit(struct airoha_npu *npu)
+{
+ struct ppe_mbox_data *ppe_data;
+ int err;
+
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL);
+ if (!ppe_data)
+ return -ENOMEM;
+
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_HWNAT_DEINIT;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+ kfree(ppe_data);
+
+ return err;
+}
+
+static int airoha_npu_ppe_flush_sram_entries(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ int sram_num_entries)
+{
+ struct ppe_mbox_data *ppe_data;
+ int err;
+
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL);
+ if (!ppe_data)
+ return -ENOMEM;
+
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_API;
+ ppe_data->set_info.func_id = PPE_SRAM_RESET_VAL;
+ ppe_data->set_info.data = foe_addr;
+ ppe_data->set_info.size = sram_num_entries;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+ kfree(ppe_data);
+
+ return err;
+}
+
+static int airoha_npu_foe_commit_entry(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ u32 entry_size, u32 hash, bool ppe2)
+{
+ struct ppe_mbox_data *ppe_data;
+ int err;
+
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_ATOMIC);
+ if (!ppe_data)
+ return -ENOMEM;
+
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_API;
+ ppe_data->set_info.data = foe_addr;
+ ppe_data->set_info.size = entry_size;
+ ppe_data->set_info.func_id = ppe2 ? PPE2_SRAM_SET_ENTRY
+ : PPE_SRAM_SET_ENTRY;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+ if (err)
+ goto out;
+
+ ppe_data->set_info.func_id = PPE_SRAM_SET_VAL;
+ ppe_data->set_info.data = hash;
+ ppe_data->set_info.size = sizeof(u32);
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+out:
+ kfree(ppe_data);
+
+ return err;
+}
+
+static int airoha_npu_ppe_stats_setup(struct airoha_npu *npu,
+ dma_addr_t foe_stats_addr,
+ u32 num_stats_entries)
+{
+ int err, size = num_stats_entries * sizeof(*npu->stats);
+ struct ppe_mbox_data *ppe_data;
+
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_ATOMIC);
+ if (!ppe_data)
+ return -ENOMEM;
+
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_FLOW_STATS_SETUP;
+ ppe_data->stats_info.foe_stats_addr = foe_stats_addr;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+ if (err)
+ goto out;
+
+ npu->stats = devm_ioremap(npu->dev,
+ ppe_data->stats_info.npu_stats_addr,
+ size);
+ if (!npu->stats)
+ err = -ENOMEM;
+out:
+ kfree(ppe_data);
+
+ return err;
+}
+
+static int airoha_npu_wlan_msg_send(struct airoha_npu *npu, int ifindex,
+ enum airoha_npu_wlan_set_cmd func_id,
+ void *data, int data_len, gfp_t gfp)
+{
+ struct wlan_mbox_data *wlan_data;
+ int err, len;
+
+ len = sizeof(*wlan_data) + data_len;
+ wlan_data = kzalloc(len, gfp);
+ if (!wlan_data)
+ return -ENOMEM;
+
+ wlan_data->ifindex = ifindex;
+ wlan_data->func_type = NPU_OP_SET;
+ wlan_data->func_id = func_id;
+ memcpy(wlan_data->d, data, data_len);
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_WIFI, wlan_data, len);
+ kfree(wlan_data);
+
+ return err;
+}
+
+static int airoha_npu_wlan_msg_get(struct airoha_npu *npu, int ifindex,
+ enum airoha_npu_wlan_get_cmd func_id,
+ void *data, int data_len, gfp_t gfp)
+{
+ struct wlan_mbox_data *wlan_data;
+ int err, len;
+
+ len = sizeof(*wlan_data) + data_len;
+ wlan_data = kzalloc(len, gfp);
+ if (!wlan_data)
+ return -ENOMEM;
+
+ wlan_data->ifindex = ifindex;
+ wlan_data->func_type = NPU_OP_GET;
+ wlan_data->func_id = func_id;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_WIFI, wlan_data, len);
+ if (!err)
+ memcpy(data, wlan_data->d, data_len);
+ kfree(wlan_data);
+
+ return err;
+}
+
+static int
+airoha_npu_wlan_set_reserved_memory(struct airoha_npu *npu,
+ int ifindex, const char *name,
+ enum airoha_npu_wlan_set_cmd func_id)
+{
+ struct device *dev = npu->dev;
+ struct resource res;
+ int err;
+ u32 val;
+
+ err = of_reserved_mem_region_to_resource_byname(dev->of_node, name,
+ &res);
+ if (err)
+ return err;
+
+ val = res.start;
+ return airoha_npu_wlan_msg_send(npu, ifindex, func_id, &val,
+ sizeof(val), GFP_KERNEL);
+}
+
+static int airoha_npu_wlan_init_memory(struct airoha_npu *npu)
+{
+ enum airoha_npu_wlan_set_cmd cmd = WLAN_FUNC_SET_WAIT_NPU_BAND0_ONCPU;
+ u32 val = 0;
+ int err;
+
+ err = airoha_npu_wlan_msg_send(npu, 1, cmd, &val, sizeof(val),
+ GFP_KERNEL);
+ if (err)
+ return err;
+
+ cmd = WLAN_FUNC_SET_WAIT_TX_BUF_CHECK_ADDR;
+ err = airoha_npu_wlan_set_reserved_memory(npu, 0, "tx-bufid", cmd);
+ if (err)
+ return err;
+
+ cmd = WLAN_FUNC_SET_WAIT_PKT_BUF_ADDR;
+ err = airoha_npu_wlan_set_reserved_memory(npu, 0, "pkt", cmd);
+ if (err)
+ return err;
+
+ cmd = WLAN_FUNC_SET_WAIT_TX_PKT_BUF_ADDR;
+ err = airoha_npu_wlan_set_reserved_memory(npu, 0, "tx-pkt", cmd);
+ if (err)
+ return err;
+
+ cmd = WLAN_FUNC_SET_WAIT_IS_FORCE_TO_CPU;
+ return airoha_npu_wlan_msg_send(npu, 0, cmd, &val, sizeof(val),
+ GFP_KERNEL);
+}
+
+static u32 airoha_npu_wlan_queue_addr_get(struct airoha_npu *npu, int qid,
+ bool xmit)
+{
+ if (xmit)
+ return REG_TX_BASE(qid + 2);
+
+ return REG_RX_BASE(qid);
+}
+
+static void airoha_npu_wlan_irq_status_set(struct airoha_npu *npu, u32 val)
+{
+ regmap_write(npu->regmap, REG_IRQ_STATUS, val);
+}
+
+static u32 airoha_npu_wlan_irq_status_get(struct airoha_npu *npu, int q)
+{
+ u32 val;
+
+ regmap_read(npu->regmap, REG_IRQ_STATUS, &val);
+ return val;
+}
+
+static void airoha_npu_wlan_irq_enable(struct airoha_npu *npu, int q)
+{
+ regmap_set_bits(npu->regmap, REG_IRQ_RXDONE(q), NPU_IRQ_RX_MASK(q));
+}
+
+static void airoha_npu_wlan_irq_disable(struct airoha_npu *npu, int q)
+{
+ regmap_clear_bits(npu->regmap, REG_IRQ_RXDONE(q), NPU_IRQ_RX_MASK(q));
+}
+
+struct airoha_npu *airoha_npu_get(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ struct airoha_npu *npu;
+
+ np = of_parse_phandle(dev->of_node, "airoha,npu", 0);
+ if (!np)
+ return ERR_PTR(-ENODEV);
+
+ pdev = of_find_device_by_node(np);
+
+ if (!pdev) {
+ dev_err(dev, "cannot find device node %s\n", np->name);
+ of_node_put(np);
+ return ERR_PTR(-ENODEV);
+ }
+ of_node_put(np);
+
+ if (!try_module_get(THIS_MODULE)) {
+ dev_err(dev, "failed to get the device driver module\n");
+ npu = ERR_PTR(-ENODEV);
+ goto error_pdev_put;
+ }
+
+ npu = platform_get_drvdata(pdev);
+ if (!npu) {
+ npu = ERR_PTR(-ENODEV);
+ goto error_module_put;
+ }
+
+ if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER)) {
+ dev_err(&pdev->dev,
+ "failed to create device link to consumer %s\n",
+ dev_name(dev));
+ npu = ERR_PTR(-EINVAL);
+ goto error_module_put;
+ }
+
+ return npu;
+
+error_module_put:
+ module_put(THIS_MODULE);
+error_pdev_put:
+ platform_device_put(pdev);
+
+ return npu;
+}
+EXPORT_SYMBOL_GPL(airoha_npu_get);
+
+void airoha_npu_put(struct airoha_npu *npu)
+{
+ module_put(THIS_MODULE);
+ put_device(npu->dev);
+}
+EXPORT_SYMBOL_GPL(airoha_npu_put);
+
+static const struct airoha_npu_soc_data en7581_npu_soc_data = {
+ .fw_rv32 = {
+ .name = NPU_EN7581_FIRMWARE_RV32,
+ .max_size = NPU_EN7581_FIRMWARE_RV32_MAX_SIZE,
+ },
+ .fw_data = {
+ .name = NPU_EN7581_FIRMWARE_DATA,
+ .max_size = NPU_EN7581_FIRMWARE_DATA_MAX_SIZE,
+ },
+};
+
+static const struct airoha_npu_soc_data an7583_npu_soc_data = {
+ .fw_rv32 = {
+ .name = NPU_AN7583_FIRMWARE_RV32,
+ .max_size = NPU_EN7581_FIRMWARE_RV32_MAX_SIZE,
+ },
+ .fw_data = {
+ .name = NPU_AN7583_FIRMWARE_DATA,
+ .max_size = NPU_EN7581_FIRMWARE_DATA_MAX_SIZE,
+ },
+};
+
+static const struct of_device_id of_airoha_npu_match[] = {
+ { .compatible = "airoha,en7581-npu", .data = &en7581_npu_soc_data },
+ { .compatible = "airoha,an7583-npu", .data = &an7583_npu_soc_data },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_airoha_npu_match);
+
+static const struct regmap_config regmap_config = {
+ .name = "npu",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .disable_locking = true,
+};
+
+static int airoha_npu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct airoha_npu *npu;
+ struct resource res;
+ void __iomem *base;
+ int i, irq, err;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ npu = devm_kzalloc(dev, sizeof(*npu), GFP_KERNEL);
+ if (!npu)
+ return -ENOMEM;
+
+ npu->dev = dev;
+ npu->ops.ppe_init = airoha_npu_ppe_init;
+ npu->ops.ppe_deinit = airoha_npu_ppe_deinit;
+ npu->ops.ppe_init_stats = airoha_npu_ppe_stats_setup;
+ npu->ops.ppe_flush_sram_entries = airoha_npu_ppe_flush_sram_entries;
+ npu->ops.ppe_foe_commit_entry = airoha_npu_foe_commit_entry;
+ npu->ops.wlan_init_reserved_memory = airoha_npu_wlan_init_memory;
+ npu->ops.wlan_send_msg = airoha_npu_wlan_msg_send;
+ npu->ops.wlan_get_msg = airoha_npu_wlan_msg_get;
+ npu->ops.wlan_get_queue_addr = airoha_npu_wlan_queue_addr_get;
+ npu->ops.wlan_set_irq_status = airoha_npu_wlan_irq_status_set;
+ npu->ops.wlan_get_irq_status = airoha_npu_wlan_irq_status_get;
+ npu->ops.wlan_enable_irq = airoha_npu_wlan_irq_enable;
+ npu->ops.wlan_disable_irq = airoha_npu_wlan_irq_disable;
+
+ npu->regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
+ if (IS_ERR(npu->regmap))
+ return PTR_ERR(npu->regmap);
+
+ err = of_reserved_mem_region_to_resource(dev->of_node, 0, &res);
+ if (err)
+ return err;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(dev, irq, airoha_npu_mbox_handler,
+ IRQF_SHARED, "airoha-npu-mbox", npu);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(npu->cores); i++) {
+ struct airoha_npu_core *core = &npu->cores[i];
+
+ spin_lock_init(&core->lock);
+ core->npu = npu;
+
+ irq = platform_get_irq(pdev, i + 1);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(dev, irq, airoha_npu_wdt_handler,
+ IRQF_SHARED, "airoha-npu-wdt", core);
+ if (err)
+ return err;
+
+ INIT_WORK(&core->wdt_work, airoha_npu_wdt_work);
+ }
+
+ /* wlan IRQ lines */
+ for (i = 0; i < ARRAY_SIZE(npu->irqs); i++) {
+ irq = platform_get_irq(pdev, i + ARRAY_SIZE(npu->cores) + 1);
+ if (irq < 0)
+ return irq;
+
+ npu->irqs[i] = irq;
+ }
+
+ err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+
+ err = airoha_npu_run_firmware(dev, base, &res);
+ if (err)
+ return dev_err_probe(dev, err, "failed to run npu firmware\n");
+
+ regmap_write(npu->regmap, REG_CR_NPU_MIB(10),
+ res.start + NPU_EN7581_FIRMWARE_RV32_MAX_SIZE);
+ regmap_write(npu->regmap, REG_CR_NPU_MIB(11), 0x40000); /* SRAM 256K */
+ regmap_write(npu->regmap, REG_CR_NPU_MIB(12), 0);
+ regmap_write(npu->regmap, REG_CR_NPU_MIB(21), 1);
+ msleep(100);
+
+ /* setting booting address */
+ for (i = 0; i < NPU_NUM_CORES; i++)
+ regmap_write(npu->regmap, REG_CR_BOOT_BASE(i), res.start);
+ usleep_range(1000, 2000);
+
+ /* enable NPU cores */
+ regmap_write(npu->regmap, REG_CR_BOOT_CONFIG, 0xff);
+ regmap_write(npu->regmap, REG_CR_BOOT_TRIGGER, 0x1);
+ msleep(100);
+
+ platform_set_drvdata(pdev, npu);
+
+ return 0;
+}
+
+static void airoha_npu_remove(struct platform_device *pdev)
+{
+ struct airoha_npu *npu = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(npu->cores); i++)
+ cancel_work_sync(&npu->cores[i].wdt_work);
+}
+
+static struct platform_driver airoha_npu_driver = {
+ .probe = airoha_npu_probe,
+ .remove = airoha_npu_remove,
+ .driver = {
+ .name = "airoha-npu",
+ .of_match_table = of_airoha_npu_match,
+ },
+};
+module_platform_driver(airoha_npu_driver);
+
+MODULE_FIRMWARE(NPU_EN7581_FIRMWARE_DATA);
+MODULE_FIRMWARE(NPU_EN7581_FIRMWARE_RV32);
+MODULE_FIRMWARE(NPU_AN7583_FIRMWARE_DATA);
+MODULE_FIRMWARE(NPU_AN7583_FIRMWARE_RV32);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("Airoha Network Processor Unit driver");
diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c
new file mode 100644
index 000000000000..0caabb0c3aa0
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
@@ -0,0 +1,1561 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/rhashtable.h>
+#include <net/ipv6.h>
+#include <net/pkt_cls.h>
+
+#include "airoha_regs.h"
+#include "airoha_eth.h"
+
+static DEFINE_MUTEX(flow_offload_mutex);
+static DEFINE_SPINLOCK(ppe_lock);
+
+static const struct rhashtable_params airoha_flow_table_params = {
+ .head_offset = offsetof(struct airoha_flow_table_entry, node),
+ .key_offset = offsetof(struct airoha_flow_table_entry, cookie),
+ .key_len = sizeof(unsigned long),
+ .automatic_shrinking = true,
+};
+
+static const struct rhashtable_params airoha_l2_flow_table_params = {
+ .head_offset = offsetof(struct airoha_flow_table_entry, l2_node),
+ .key_offset = offsetof(struct airoha_flow_table_entry, data.bridge),
+ .key_len = 2 * ETH_ALEN,
+ .automatic_shrinking = true,
+};
+
+static int airoha_ppe_get_num_stats_entries(struct airoha_ppe *ppe)
+{
+ if (!IS_ENABLED(CONFIG_NET_AIROHA_FLOW_STATS))
+ return -EOPNOTSUPP;
+
+ if (airoha_is_7583(ppe->eth))
+ return -EOPNOTSUPP;
+
+ return PPE_STATS_NUM_ENTRIES;
+}
+
+static int airoha_ppe_get_total_num_stats_entries(struct airoha_ppe *ppe)
+{
+ int num_stats = airoha_ppe_get_num_stats_entries(ppe);
+
+ if (num_stats > 0) {
+ struct airoha_eth *eth = ppe->eth;
+
+ num_stats = num_stats * eth->soc->num_ppe;
+ }
+
+ return num_stats;
+}
+
+static u32 airoha_ppe_get_total_sram_num_entries(struct airoha_ppe *ppe)
+{
+ struct airoha_eth *eth = ppe->eth;
+
+ return PPE_SRAM_NUM_ENTRIES * eth->soc->num_ppe;
+}
+
+u32 airoha_ppe_get_total_num_entries(struct airoha_ppe *ppe)
+{
+ u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
+
+ return sram_num_entries + PPE_DRAM_NUM_ENTRIES;
+}
+
+bool airoha_ppe_is_enabled(struct airoha_eth *eth, int index)
+{
+ if (index >= eth->soc->num_ppe)
+ return false;
+
+ return airoha_fe_rr(eth, REG_PPE_GLO_CFG(index)) & PPE_GLO_CFG_EN_MASK;
+}
+
+static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe)
+{
+ u16 timestamp = airoha_fe_rr(ppe->eth, REG_FE_FOE_TS);
+
+ return FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, timestamp);
+}
+
+static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
+{
+ u32 sram_ppe_num_data_entries = PPE_SRAM_NUM_ENTRIES, sram_num_entries;
+ u32 sram_tb_size, dram_num_entries;
+ struct airoha_eth *eth = ppe->eth;
+ int i, sram_num_stats_entries;
+
+ sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
+ sram_tb_size = sram_num_entries * sizeof(struct airoha_foe_entry);
+ dram_num_entries = PPE_RAM_NUM_ENTRIES_SHIFT(PPE_DRAM_NUM_ENTRIES);
+
+ sram_num_stats_entries = airoha_ppe_get_num_stats_entries(ppe);
+ if (sram_num_stats_entries > 0)
+ sram_ppe_num_data_entries -= sram_num_stats_entries;
+ sram_ppe_num_data_entries =
+ PPE_RAM_NUM_ENTRIES_SHIFT(sram_ppe_num_data_entries);
+
+ for (i = 0; i < eth->soc->num_ppe; i++) {
+ int p;
+
+ airoha_fe_wr(eth, REG_PPE_TB_BASE(i),
+ ppe->foe_dma + sram_tb_size);
+
+ airoha_fe_rmw(eth, REG_PPE_BND_AGE0(i),
+ PPE_BIND_AGE0_DELTA_NON_L4 |
+ PPE_BIND_AGE0_DELTA_UDP,
+ FIELD_PREP(PPE_BIND_AGE0_DELTA_NON_L4, 1) |
+ FIELD_PREP(PPE_BIND_AGE0_DELTA_UDP, 12));
+ airoha_fe_rmw(eth, REG_PPE_BND_AGE1(i),
+ PPE_BIND_AGE1_DELTA_TCP_FIN |
+ PPE_BIND_AGE1_DELTA_TCP,
+ FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
+ FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP, 7));
+
+ airoha_fe_rmw(eth, REG_PPE_TB_HASH_CFG(i),
+ PPE_SRAM_TABLE_EN_MASK |
+ PPE_SRAM_HASH1_EN_MASK |
+ PPE_DRAM_TABLE_EN_MASK |
+ PPE_SRAM_HASH0_MODE_MASK |
+ PPE_SRAM_HASH1_MODE_MASK |
+ PPE_DRAM_HASH0_MODE_MASK |
+ PPE_DRAM_HASH1_MODE_MASK,
+ FIELD_PREP(PPE_SRAM_TABLE_EN_MASK, 1) |
+ FIELD_PREP(PPE_SRAM_HASH1_EN_MASK, 1) |
+ FIELD_PREP(PPE_SRAM_HASH1_MODE_MASK, 1) |
+ FIELD_PREP(PPE_DRAM_HASH1_MODE_MASK, 3));
+
+ airoha_fe_rmw(eth, REG_PPE_TB_CFG(i),
+ PPE_TB_CFG_SEARCH_MISS_MASK |
+ PPE_SRAM_TB_NUM_ENTRY_MASK |
+ PPE_DRAM_TB_NUM_ENTRY_MASK |
+ PPE_TB_CFG_KEEPALIVE_MASK |
+ PPE_TB_ENTRY_SIZE_MASK,
+ FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) |
+ FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0) |
+ FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
+ sram_ppe_num_data_entries) |
+ FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
+ dram_num_entries));
+
+ airoha_fe_wr(eth, REG_PPE_HASH_SEED(i), PPE_HASH_SEED);
+
+ for (p = 0; p < ARRAY_SIZE(eth->ports); p++)
+ airoha_fe_rmw(eth, REG_PPE_MTU(i, p),
+ FP0_EGRESS_MTU_MASK |
+ FP1_EGRESS_MTU_MASK,
+ FIELD_PREP(FP0_EGRESS_MTU_MASK,
+ AIROHA_MAX_MTU) |
+ FIELD_PREP(FP1_EGRESS_MTU_MASK,
+ AIROHA_MAX_MTU));
+ }
+}
+
+static void airoha_ppe_flow_mangle_eth(const struct flow_action_entry *act, void *eth)
+{
+ void *dest = eth + act->mangle.offset;
+ const void *src = &act->mangle.val;
+
+ if (act->mangle.offset > 8)
+ return;
+
+ if (act->mangle.mask == 0xffff) {
+ src += 2;
+ dest += 2;
+ }
+
+ memcpy(dest, src, act->mangle.mask ? 2 : 4);
+}
+
+static int airoha_ppe_flow_mangle_ports(const struct flow_action_entry *act,
+ struct airoha_flow_data *data)
+{
+ u32 val = be32_to_cpu((__force __be32)act->mangle.val);
+
+ switch (act->mangle.offset) {
+ case 0:
+ if ((__force __be32)act->mangle.mask == ~cpu_to_be32(0xffff))
+ data->dst_port = cpu_to_be16(val);
+ else
+ data->src_port = cpu_to_be16(val >> 16);
+ break;
+ case 2:
+ data->dst_port = cpu_to_be16(val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry *act,
+ struct airoha_flow_data *data)
+{
+ __be32 *dest;
+
+ switch (act->mangle.offset) {
+ case offsetof(struct iphdr, saddr):
+ dest = &data->v4.src_addr;
+ break;
+ case offsetof(struct iphdr, daddr):
+ dest = &data->v4.dst_addr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ memcpy(dest, &act->mangle.val, sizeof(u32));
+
+ return 0;
+}
+
+static int airoha_ppe_get_wdma_info(struct net_device *dev, const u8 *addr,
+ struct airoha_wdma_info *info)
+{
+ struct net_device_path_stack stack;
+ struct net_device_path *path;
+ int err;
+
+ if (!dev)
+ return -ENODEV;
+
+ err = dev_fill_forward_path(dev, addr, &stack);
+ if (err)
+ return err;
+
+ path = &stack.path[stack.num_paths - 1];
+ if (path->type != DEV_PATH_MTK_WDMA)
+ return -1;
+
+ info->idx = path->mtk_wdma.wdma_idx;
+ info->bss = path->mtk_wdma.bss;
+ info->wcid = path->mtk_wdma.wcid;
+
+ return 0;
+}
+
+static int airoha_get_dsa_port(struct net_device **dev)
+{
+#if IS_ENABLED(CONFIG_NET_DSA)
+ struct dsa_port *dp = dsa_port_from_netdev(*dev);
+
+ if (IS_ERR(dp))
+ return -ENODEV;
+
+ *dev = dsa_port_to_conduit(dp);
+ return dp->index;
+#else
+ return -ENODEV;
+#endif
+}
+
+static void airoha_ppe_foe_set_bridge_addrs(struct airoha_foe_bridge *br,
+ struct ethhdr *eh)
+{
+ br->dest_mac_hi = get_unaligned_be32(eh->h_dest);
+ br->dest_mac_lo = get_unaligned_be16(eh->h_dest + 4);
+ br->src_mac_hi = get_unaligned_be16(eh->h_source);
+ br->src_mac_lo = get_unaligned_be32(eh->h_source + 2);
+}
+
+static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
+ struct airoha_foe_entry *hwe,
+ struct net_device *dev, int type,
+ struct airoha_flow_data *data,
+ int l4proto)
+{
+ u32 qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f), ports_pad, val;
+ int wlan_etype = -EINVAL, dsa_port = airoha_get_dsa_port(&dev);
+ struct airoha_foe_mac_info_common *l2;
+ u8 smac_id = 0xf;
+
+ memset(hwe, 0, sizeof(*hwe));
+
+ val = FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE, AIROHA_FOE_STATE_BIND) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_PACKET_TYPE, type) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_UDP, l4proto == IPPROTO_UDP) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_VLAN_LAYER, data->vlan.num) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_VPM, data->vlan.num) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_PPPOE, data->pppoe.num) |
+ AIROHA_FOE_IB1_BIND_TTL;
+ hwe->ib1 = val;
+
+ val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f);
+ if (dev) {
+ struct airoha_wdma_info info = {};
+
+ if (!airoha_ppe_get_wdma_info(dev, data->eth.h_dest, &info)) {
+ val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, info.idx) |
+ FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT,
+ FE_PSE_PORT_CDM4);
+ qdata |= FIELD_PREP(AIROHA_FOE_ACTDP, info.bss);
+ wlan_etype = FIELD_PREP(AIROHA_FOE_MAC_WDMA_BAND,
+ info.idx) |
+ FIELD_PREP(AIROHA_FOE_MAC_WDMA_WCID,
+ info.wcid);
+ } else {
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ u8 pse_port;
+
+ if (!airoha_is_valid_gdm_port(eth, port))
+ return -EINVAL;
+
+ if (dsa_port >= 0 || eth->ports[1])
+ pse_port = port->id == 4 ? FE_PSE_PORT_GDM4
+ : port->id;
+ else
+ pse_port = 2; /* uplink relies on GDM2
+ * loopback
+ */
+
+ val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port) |
+ AIROHA_FOE_IB2_PSE_QOS;
+ /* For downlink traffic consume SRAM memory for hw
+ * forwarding descriptors queue.
+ */
+ if (airhoa_is_lan_gdm_port(port))
+ val |= AIROHA_FOE_IB2_FAST_PATH;
+ if (dsa_port >= 0)
+ val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ,
+ dsa_port);
+
+ smac_id = port->id;
+ }
+ }
+
+ if (is_multicast_ether_addr(data->eth.h_dest))
+ val |= AIROHA_FOE_IB2_MULTICAST;
+
+ ports_pad = 0xa5a5a500 | (l4proto & 0xff);
+ if (type == PPE_PKT_TYPE_IPV4_ROUTE)
+ hwe->ipv4.orig_tuple.ports = ports_pad;
+ if (type == PPE_PKT_TYPE_IPV6_ROUTE_3T)
+ hwe->ipv6.ports = ports_pad;
+
+ if (type == PPE_PKT_TYPE_BRIDGE) {
+ airoha_ppe_foe_set_bridge_addrs(&hwe->bridge, &data->eth);
+ hwe->bridge.data = qdata;
+ hwe->bridge.ib2 = val;
+ l2 = &hwe->bridge.l2.common;
+ } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ hwe->ipv6.data = qdata;
+ hwe->ipv6.ib2 = val;
+ l2 = &hwe->ipv6.l2;
+ l2->etype = ETH_P_IPV6;
+ } else {
+ hwe->ipv4.data = qdata;
+ hwe->ipv4.ib2 = val;
+ l2 = &hwe->ipv4.l2.common;
+ l2->etype = ETH_P_IP;
+ }
+
+ l2->dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
+ l2->dest_mac_lo = get_unaligned_be16(data->eth.h_dest + 4);
+ if (type <= PPE_PKT_TYPE_IPV4_DSLITE) {
+ struct airoha_foe_mac_info *mac_info;
+
+ l2->src_mac_hi = get_unaligned_be32(data->eth.h_source);
+ hwe->ipv4.l2.src_mac_lo =
+ get_unaligned_be16(data->eth.h_source + 4);
+
+ mac_info = (struct airoha_foe_mac_info *)l2;
+ mac_info->pppoe_id = data->pppoe.sid;
+ } else {
+ l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id) |
+ FIELD_PREP(AIROHA_FOE_MAC_PPPOE_ID,
+ data->pppoe.sid);
+ }
+
+ if (data->vlan.num) {
+ l2->vlan1 = data->vlan.hdr[0].id;
+ if (data->vlan.num == 2)
+ l2->vlan2 = data->vlan.hdr[1].id;
+ }
+
+ if (wlan_etype >= 0) {
+ l2->etype = wlan_etype;
+ } else if (dsa_port >= 0) {
+ l2->etype = BIT(dsa_port);
+ l2->etype |= !data->vlan.num ? BIT(15) : 0;
+ } else if (data->pppoe.num) {
+ l2->etype = ETH_P_PPP_SES;
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry *hwe,
+ struct airoha_flow_data *data,
+ bool egress)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ struct airoha_foe_ipv4_tuple *t;
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV4_HNAPT:
+ if (egress) {
+ t = &hwe->ipv4.new_tuple;
+ break;
+ }
+ fallthrough;
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ case PPE_PKT_TYPE_IPV4_ROUTE:
+ t = &hwe->ipv4.orig_tuple;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ t->src_ip = be32_to_cpu(data->v4.src_addr);
+ t->dest_ip = be32_to_cpu(data->v4.dst_addr);
+
+ if (type != PPE_PKT_TYPE_IPV4_ROUTE) {
+ t->src_port = be16_to_cpu(data->src_port);
+ t->dest_port = be16_to_cpu(data->dst_port);
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry *hwe,
+ struct airoha_flow_data *data)
+
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ u32 *src, *dest;
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV6_ROUTE_5T:
+ case PPE_PKT_TYPE_IPV6_6RD:
+ hwe->ipv6.src_port = be16_to_cpu(data->src_port);
+ hwe->ipv6.dest_port = be16_to_cpu(data->dst_port);
+ fallthrough;
+ case PPE_PKT_TYPE_IPV6_ROUTE_3T:
+ src = hwe->ipv6.src_ip;
+ dest = hwe->ipv6.dest_ip;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ ipv6_addr_be32_to_cpu(src, data->v6.src_addr.s6_addr32);
+ ipv6_addr_be32_to_cpu(dest, data->v6.dst_addr.s6_addr32);
+
+ return 0;
+}
+
+static u32 airoha_ppe_foe_get_entry_hash(struct airoha_ppe *ppe,
+ struct airoha_foe_entry *hwe)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ u32 ppe_hash_mask = airoha_ppe_get_total_num_entries(ppe) - 1;
+ u32 hash, hv1, hv2, hv3;
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV4_ROUTE:
+ case PPE_PKT_TYPE_IPV4_HNAPT:
+ hv1 = hwe->ipv4.orig_tuple.ports;
+ hv2 = hwe->ipv4.orig_tuple.dest_ip;
+ hv3 = hwe->ipv4.orig_tuple.src_ip;
+ break;
+ case PPE_PKT_TYPE_IPV6_ROUTE_3T:
+ case PPE_PKT_TYPE_IPV6_ROUTE_5T:
+ hv1 = hwe->ipv6.src_ip[3] ^ hwe->ipv6.dest_ip[3];
+ hv1 ^= hwe->ipv6.ports;
+
+ hv2 = hwe->ipv6.src_ip[2] ^ hwe->ipv6.dest_ip[2];
+ hv2 ^= hwe->ipv6.dest_ip[0];
+
+ hv3 = hwe->ipv6.src_ip[1] ^ hwe->ipv6.dest_ip[1];
+ hv3 ^= hwe->ipv6.src_ip[0];
+ break;
+ case PPE_PKT_TYPE_BRIDGE: {
+ struct airoha_foe_mac_info *l2 = &hwe->bridge.l2;
+
+ hv1 = l2->common.src_mac_hi & 0xffff;
+ hv1 = hv1 << 16 | l2->src_mac_lo;
+
+ hv2 = l2->common.dest_mac_lo;
+ hv2 = hv2 << 16;
+ hv2 = hv2 | ((l2->common.src_mac_hi & 0xffff0000) >> 16);
+
+ hv3 = l2->common.dest_mac_hi;
+ break;
+ }
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ case PPE_PKT_TYPE_IPV6_6RD:
+ default:
+ WARN_ON_ONCE(1);
+ return ppe_hash_mask;
+ }
+
+ hash = (hv1 & hv2) | ((~hv1) & hv3);
+ hash = (hash >> 24) | ((hash & 0xffffff) << 8);
+ hash ^= hv1 ^ hv2 ^ hv3;
+ hash ^= hash >> 16;
+ hash &= ppe_hash_mask;
+
+ return hash;
+}
+
+static int airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe,
+ u32 hash, u32 *index)
+{
+ int ppe_num_stats_entries;
+
+ ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
+ if (ppe_num_stats_entries < 0)
+ return ppe_num_stats_entries;
+
+ *index = hash >= ppe_num_stats_entries ? hash - PPE_STATS_NUM_ENTRIES
+ : hash;
+
+ return 0;
+}
+
+static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe,
+ struct airoha_npu *npu,
+ int index)
+{
+ memset_io(&npu->stats[index], 0, sizeof(*npu->stats));
+ memset(&ppe->foe_stats[index], 0, sizeof(*ppe->foe_stats));
+}
+
+static void airoha_ppe_foe_flow_stats_reset(struct airoha_ppe *ppe,
+ struct airoha_npu *npu)
+{
+ int i, ppe_num_stats_entries;
+
+ ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
+ if (ppe_num_stats_entries < 0)
+ return;
+
+ for (i = 0; i < ppe_num_stats_entries; i++)
+ airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, i);
+}
+
+static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe,
+ struct airoha_npu *npu,
+ struct airoha_foe_entry *hwe,
+ u32 hash)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ u32 index, pse_port, val, *data, *ib2, *meter;
+ int ppe_num_stats_entries;
+ u8 nbq;
+
+ ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
+ if (ppe_num_stats_entries < 0)
+ return;
+
+ if (airoha_ppe_foe_get_flow_stats_index(ppe, hash, &index))
+ return;
+
+ if (index >= ppe_num_stats_entries)
+ return;
+
+ if (type == PPE_PKT_TYPE_BRIDGE) {
+ data = &hwe->bridge.data;
+ ib2 = &hwe->bridge.ib2;
+ meter = &hwe->bridge.l2.meter;
+ } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ data = &hwe->ipv6.data;
+ ib2 = &hwe->ipv6.ib2;
+ meter = &hwe->ipv6.meter;
+ } else {
+ data = &hwe->ipv4.data;
+ ib2 = &hwe->ipv4.ib2;
+ meter = &hwe->ipv4.l2.meter;
+ }
+
+ pse_port = FIELD_GET(AIROHA_FOE_IB2_PSE_PORT, *ib2);
+ if (pse_port == FE_PSE_PORT_CDM4)
+ return;
+
+ airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, index);
+
+ val = FIELD_GET(AIROHA_FOE_CHANNEL | AIROHA_FOE_QID, *data);
+ *data = (*data & ~AIROHA_FOE_ACTDP) |
+ FIELD_PREP(AIROHA_FOE_ACTDP, val);
+
+ val = *ib2 & (AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
+ AIROHA_FOE_IB2_PSE_QOS | AIROHA_FOE_IB2_FAST_PATH);
+ *meter |= FIELD_PREP(AIROHA_FOE_TUNNEL_MTU, val);
+
+ nbq = pse_port == 1 ? 6 : 5;
+ *ib2 &= ~(AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
+ AIROHA_FOE_IB2_PSE_QOS);
+ *ib2 |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, 6) |
+ FIELD_PREP(AIROHA_FOE_IB2_NBQ, nbq);
+}
+
+static struct airoha_foe_entry *
+airoha_ppe_foe_get_entry_locked(struct airoha_ppe *ppe, u32 hash)
+{
+ u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
+
+ lockdep_assert_held(&ppe_lock);
+
+ if (hash < sram_num_entries) {
+ u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry);
+ bool ppe2 = hash >= PPE_SRAM_NUM_ENTRIES;
+ struct airoha_eth *eth = ppe->eth;
+ u32 val;
+ int i;
+
+ airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2),
+ FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) |
+ PPE_SRAM_CTRL_REQ_MASK);
+ if (read_poll_timeout_atomic(airoha_fe_rr, val,
+ val & PPE_SRAM_CTRL_ACK_MASK,
+ 10, 100, false, eth,
+ REG_PPE_RAM_CTRL(ppe2)))
+ return NULL;
+
+ for (i = 0; i < sizeof(struct airoha_foe_entry) / sizeof(*hwe);
+ i++)
+ hwe[i] = airoha_fe_rr(eth,
+ REG_PPE_RAM_ENTRY(ppe2, i));
+ }
+
+ return ppe->foe + hash * sizeof(struct airoha_foe_entry);
+}
+
+struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
+ u32 hash)
+{
+ struct airoha_foe_entry *hwe;
+
+ spin_lock_bh(&ppe_lock);
+ hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
+ spin_unlock_bh(&ppe_lock);
+
+ return hwe;
+}
+
+static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e,
+ struct airoha_foe_entry *hwe)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
+ int len;
+
+ if ((hwe->ib1 ^ e->data.ib1) & AIROHA_FOE_IB1_BIND_UDP)
+ return false;
+
+ if (type > PPE_PKT_TYPE_IPV4_DSLITE)
+ len = offsetof(struct airoha_foe_entry, ipv6.data);
+ else
+ len = offsetof(struct airoha_foe_entry, ipv4.ib2);
+
+ return !memcmp(&e->data.d, &hwe->d, len - sizeof(hwe->ib1));
+}
+
+static int airoha_ppe_foe_commit_sram_entry(struct airoha_ppe *ppe, u32 hash)
+{
+ struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
+ bool ppe2 = hash >= PPE_SRAM_NUM_ENTRIES;
+ u32 *ptr = (u32 *)hwe, val;
+ int i;
+
+ for (i = 0; i < sizeof(*hwe) / sizeof(*ptr); i++)
+ airoha_fe_wr(ppe->eth, REG_PPE_RAM_ENTRY(ppe2, i), ptr[i]);
+
+ wmb();
+ airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2),
+ FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) |
+ PPE_SRAM_CTRL_WR_MASK | PPE_SRAM_CTRL_REQ_MASK);
+
+ return read_poll_timeout_atomic(airoha_fe_rr, val,
+ val & PPE_SRAM_CTRL_ACK_MASK,
+ 10, 100, false, ppe->eth,
+ REG_PPE_RAM_CTRL(ppe2));
+}
+
+static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
+ struct airoha_foe_entry *e,
+ u32 hash, bool rx_wlan)
+{
+ u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
+ struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
+ u32 ts = airoha_ppe_get_timestamp(ppe);
+ struct airoha_eth *eth = ppe->eth;
+ struct airoha_npu *npu;
+ int err = 0;
+
+ memcpy(&hwe->d, &e->d, sizeof(*hwe) - sizeof(hwe->ib1));
+ wmb();
+
+ e->ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
+ e->ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_TIMESTAMP, ts);
+ hwe->ib1 = e->ib1;
+
+ rcu_read_lock();
+
+ npu = rcu_dereference(eth->npu);
+ if (!npu) {
+ err = -ENODEV;
+ goto unlock;
+ }
+
+ if (!rx_wlan)
+ airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash);
+
+ if (hash < sram_num_entries)
+ err = airoha_ppe_foe_commit_sram_entry(ppe, hash);
+unlock:
+ rcu_read_unlock();
+
+ return err;
+}
+
+static void airoha_ppe_foe_remove_flow(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ lockdep_assert_held(&ppe_lock);
+
+ hlist_del_init(&e->list);
+ if (e->hash != 0xffff) {
+ e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
+ e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
+ AIROHA_FOE_STATE_INVALID);
+ airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash, false);
+ e->hash = 0xffff;
+ }
+ if (e->type == FLOW_TYPE_L2_SUBFLOW) {
+ hlist_del_init(&e->l2_subflow_node);
+ kfree(e);
+ }
+}
+
+static void airoha_ppe_foe_remove_l2_flow(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ struct hlist_head *head = &e->l2_flows;
+ struct hlist_node *n;
+
+ lockdep_assert_held(&ppe_lock);
+
+ rhashtable_remove_fast(&ppe->l2_flows, &e->l2_node,
+ airoha_l2_flow_table_params);
+ hlist_for_each_entry_safe(e, n, head, l2_subflow_node)
+ airoha_ppe_foe_remove_flow(ppe, e);
+}
+
+static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ spin_lock_bh(&ppe_lock);
+
+ if (e->type == FLOW_TYPE_L2)
+ airoha_ppe_foe_remove_l2_flow(ppe, e);
+ else
+ airoha_ppe_foe_remove_flow(ppe, e);
+
+ spin_unlock_bh(&ppe_lock);
+}
+
+static int
+airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e,
+ u32 hash, bool rx_wlan)
+{
+ u32 mask = AIROHA_FOE_IB1_BIND_PACKET_TYPE | AIROHA_FOE_IB1_BIND_UDP;
+ struct airoha_foe_entry *hwe_p, hwe;
+ struct airoha_flow_table_entry *f;
+ int type;
+
+ hwe_p = airoha_ppe_foe_get_entry_locked(ppe, hash);
+ if (!hwe_p)
+ return -EINVAL;
+
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (!f)
+ return -ENOMEM;
+
+ hlist_add_head(&f->l2_subflow_node, &e->l2_flows);
+ f->type = FLOW_TYPE_L2_SUBFLOW;
+ f->hash = hash;
+
+ memcpy(&hwe, hwe_p, sizeof(*hwe_p));
+ hwe.ib1 = (hwe.ib1 & mask) | (e->data.ib1 & ~mask);
+
+ type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe.ib1);
+ if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ memcpy(&hwe.ipv6.l2, &e->data.bridge.l2, sizeof(hwe.ipv6.l2));
+ hwe.ipv6.ib2 = e->data.bridge.ib2;
+ /* setting smac_id to 0xf instruct the hw to keep original
+ * source mac address
+ */
+ hwe.ipv6.l2.src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID,
+ 0xf);
+ } else {
+ memcpy(&hwe.bridge.l2, &e->data.bridge.l2,
+ sizeof(hwe.bridge.l2));
+ hwe.bridge.ib2 = e->data.bridge.ib2;
+ if (type == PPE_PKT_TYPE_IPV4_HNAPT)
+ memcpy(&hwe.ipv4.new_tuple, &hwe.ipv4.orig_tuple,
+ sizeof(hwe.ipv4.new_tuple));
+ }
+
+ hwe.bridge.data = e->data.bridge.data;
+ airoha_ppe_foe_commit_entry(ppe, &hwe, hash, rx_wlan);
+
+ return 0;
+}
+
+static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
+ struct sk_buff *skb,
+ u32 hash, bool rx_wlan)
+{
+ struct airoha_flow_table_entry *e;
+ struct airoha_foe_bridge br = {};
+ struct airoha_foe_entry *hwe;
+ bool commit_done = false;
+ struct hlist_node *n;
+ u32 index, state;
+
+ spin_lock_bh(&ppe_lock);
+
+ hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
+ if (!hwe)
+ goto unlock;
+
+ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
+ if (state == AIROHA_FOE_STATE_BIND)
+ goto unlock;
+
+ index = airoha_ppe_foe_get_entry_hash(ppe, hwe);
+ hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) {
+ if (e->type == FLOW_TYPE_L2_SUBFLOW) {
+ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
+ if (state != AIROHA_FOE_STATE_BIND) {
+ e->hash = 0xffff;
+ airoha_ppe_foe_remove_flow(ppe, e);
+ }
+ continue;
+ }
+
+ if (!airoha_ppe_foe_compare_entry(e, hwe))
+ continue;
+
+ airoha_ppe_foe_commit_entry(ppe, &e->data, hash, rx_wlan);
+ commit_done = true;
+ e->hash = hash;
+ }
+
+ if (commit_done)
+ goto unlock;
+
+ airoha_ppe_foe_set_bridge_addrs(&br, eth_hdr(skb));
+ e = rhashtable_lookup_fast(&ppe->l2_flows, &br,
+ airoha_l2_flow_table_params);
+ if (e)
+ airoha_ppe_foe_commit_subflow_entry(ppe, e, hash, rx_wlan);
+unlock:
+ spin_unlock_bh(&ppe_lock);
+}
+
+static int
+airoha_ppe_foe_l2_flow_commit_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ struct airoha_flow_table_entry *prev;
+
+ e->type = FLOW_TYPE_L2;
+ prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &e->l2_node,
+ airoha_l2_flow_table_params);
+ if (!prev)
+ return 0;
+
+ if (IS_ERR(prev))
+ return PTR_ERR(prev);
+
+ return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
+ &e->l2_node,
+ airoha_l2_flow_table_params);
+}
+
+static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
+ u32 hash;
+
+ if (type == PPE_PKT_TYPE_BRIDGE)
+ return airoha_ppe_foe_l2_flow_commit_entry(ppe, e);
+
+ hash = airoha_ppe_foe_get_entry_hash(ppe, &e->data);
+ e->type = FLOW_TYPE_L4;
+ e->hash = 0xffff;
+
+ spin_lock_bh(&ppe_lock);
+ hlist_add_head(&e->list, &ppe->foe_flow[hash]);
+ spin_unlock_bh(&ppe_lock);
+
+ return 0;
+}
+
+static int airoha_ppe_get_entry_idle_time(struct airoha_ppe *ppe, u32 ib1)
+{
+ u32 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
+ u32 ts, ts_mask, now = airoha_ppe_get_timestamp(ppe);
+ int idle;
+
+ if (state == AIROHA_FOE_STATE_BIND) {
+ ts = FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, ib1);
+ ts_mask = AIROHA_FOE_IB1_BIND_TIMESTAMP;
+ } else {
+ ts = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, ib1);
+ now = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, now);
+ ts_mask = AIROHA_FOE_IB1_UNBIND_TIMESTAMP;
+ }
+ idle = now - ts;
+
+ return idle < 0 ? idle + ts_mask + 1 : idle;
+}
+
+static void
+airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ int min_idle = airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
+ struct airoha_flow_table_entry *iter;
+ struct hlist_node *n;
+
+ lockdep_assert_held(&ppe_lock);
+
+ hlist_for_each_entry_safe(iter, n, &e->l2_flows, l2_subflow_node) {
+ struct airoha_foe_entry *hwe;
+ u32 ib1, state;
+ int idle;
+
+ hwe = airoha_ppe_foe_get_entry_locked(ppe, iter->hash);
+ if (!hwe)
+ continue;
+
+ ib1 = READ_ONCE(hwe->ib1);
+ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
+ if (state != AIROHA_FOE_STATE_BIND) {
+ iter->hash = 0xffff;
+ airoha_ppe_foe_remove_flow(ppe, iter);
+ continue;
+ }
+
+ idle = airoha_ppe_get_entry_idle_time(ppe, ib1);
+ if (idle >= min_idle)
+ continue;
+
+ min_idle = idle;
+ e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
+ e->data.ib1 |= ib1 & AIROHA_FOE_IB1_BIND_TIMESTAMP;
+ }
+}
+
+static void airoha_ppe_foe_flow_entry_update(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ struct airoha_foe_entry *hwe_p, hwe = {};
+
+ spin_lock_bh(&ppe_lock);
+
+ if (e->type == FLOW_TYPE_L2) {
+ airoha_ppe_foe_flow_l2_entry_update(ppe, e);
+ goto unlock;
+ }
+
+ if (e->hash == 0xffff)
+ goto unlock;
+
+ hwe_p = airoha_ppe_foe_get_entry_locked(ppe, e->hash);
+ if (!hwe_p)
+ goto unlock;
+
+ memcpy(&hwe, hwe_p, sizeof(*hwe_p));
+ if (!airoha_ppe_foe_compare_entry(e, &hwe)) {
+ e->hash = 0xffff;
+ goto unlock;
+ }
+
+ e->data.ib1 = hwe.ib1;
+unlock:
+ spin_unlock_bh(&ppe_lock);
+}
+
+static int airoha_ppe_entry_idle_time(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ airoha_ppe_foe_flow_entry_update(ppe, e);
+
+ return airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
+}
+
+static int airoha_ppe_flow_offload_replace(struct airoha_eth *eth,
+ struct flow_cls_offload *f)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct airoha_flow_table_entry *e;
+ struct airoha_flow_data data = {};
+ struct net_device *odev = NULL;
+ struct flow_action_entry *act;
+ struct airoha_foe_entry hwe;
+ int err, i, offload_type;
+ u16 addr_type = 0;
+ u8 l4proto = 0;
+
+ if (rhashtable_lookup(&eth->flow_table, &f->cookie,
+ airoha_flow_table_params))
+ return -EEXIST;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
+ return -EOPNOTSUPP;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
+ if (flow_rule_has_control_flags(match.mask->flags,
+ f->common.extack))
+ return -EOPNOTSUPP;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ l4proto = match.key->ip_proto;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ switch (addr_type) {
+ case 0:
+ offload_type = PPE_PKT_TYPE_BRIDGE;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
+ memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
+ } else {
+ return -EOPNOTSUPP;
+ }
+ break;
+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+ offload_type = PPE_PKT_TYPE_IPV4_HNAPT;
+ break;
+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+ offload_type = PPE_PKT_TYPE_IPV6_ROUTE_5T;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_MANGLE:
+ if (offload_type == PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
+ if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
+ airoha_ppe_flow_mangle_eth(act, &data.eth);
+ break;
+ case FLOW_ACTION_REDIRECT:
+ odev = act->dev;
+ break;
+ case FLOW_ACTION_CSUM:
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ if (data.vlan.num == 2 ||
+ act->vlan.proto != htons(ETH_P_8021Q))
+ return -EOPNOTSUPP;
+
+ data.vlan.hdr[data.vlan.num].id = act->vlan.vid;
+ data.vlan.hdr[data.vlan.num].proto = act->vlan.proto;
+ data.vlan.num++;
+ break;
+ case FLOW_ACTION_VLAN_POP:
+ break;
+ case FLOW_ACTION_PPPOE_PUSH:
+ if (data.pppoe.num == 1 || data.vlan.num == 2)
+ return -EOPNOTSUPP;
+
+ data.pppoe.sid = act->pppoe.sid;
+ data.pppoe.num++;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (!is_valid_ether_addr(data.eth.h_source) ||
+ !is_valid_ether_addr(data.eth.h_dest))
+ return -EINVAL;
+
+ err = airoha_ppe_foe_entry_prepare(eth, &hwe, odev, offload_type,
+ &data, l4proto);
+ if (err)
+ return err;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports ports;
+
+ if (offload_type == PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
+ flow_rule_match_ports(rule, &ports);
+ data.src_port = ports.key->src;
+ data.dst_port = ports.key->dst;
+ } else if (offload_type != PPE_PKT_TYPE_BRIDGE) {
+ return -EOPNOTSUPP;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs addrs;
+
+ flow_rule_match_ipv4_addrs(rule, &addrs);
+ data.v4.src_addr = addrs.key->src;
+ data.v4.dst_addr = addrs.key->dst;
+ airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, false);
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs addrs;
+
+ flow_rule_match_ipv6_addrs(rule, &addrs);
+
+ data.v6.src_addr = addrs.key->src;
+ data.v6.dst_addr = addrs.key->dst;
+ airoha_ppe_foe_entry_set_ipv6_tuple(&hwe, &data);
+ }
+
+ flow_action_for_each(i, act, &rule->action) {
+ if (act->id != FLOW_ACTION_MANGLE)
+ continue;
+
+ if (offload_type == PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
+ switch (act->mangle.htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ err = airoha_ppe_flow_mangle_ports(act, &data);
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ err = airoha_ppe_flow_mangle_ipv4(act, &data);
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
+ /* handled earlier */
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (err)
+ return err;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ err = airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, true);
+ if (err)
+ return err;
+ }
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->cookie = f->cookie;
+ memcpy(&e->data, &hwe, sizeof(e->data));
+
+ err = airoha_ppe_foe_flow_commit_entry(eth->ppe, e);
+ if (err)
+ goto free_entry;
+
+ err = rhashtable_insert_fast(&eth->flow_table, &e->node,
+ airoha_flow_table_params);
+ if (err < 0)
+ goto remove_foe_entry;
+
+ return 0;
+
+remove_foe_entry:
+ airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
+free_entry:
+ kfree(e);
+
+ return err;
+}
+
+static int airoha_ppe_flow_offload_destroy(struct airoha_eth *eth,
+ struct flow_cls_offload *f)
+{
+ struct airoha_flow_table_entry *e;
+
+ e = rhashtable_lookup(&eth->flow_table, &f->cookie,
+ airoha_flow_table_params);
+ if (!e)
+ return -ENOENT;
+
+ airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
+ rhashtable_remove_fast(&eth->flow_table, &e->node,
+ airoha_flow_table_params);
+ kfree(e);
+
+ return 0;
+}
+
+void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
+ struct airoha_foe_stats64 *stats)
+{
+ struct airoha_eth *eth = ppe->eth;
+ int ppe_num_stats_entries;
+ struct airoha_npu *npu;
+ u32 index;
+
+ ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
+ if (ppe_num_stats_entries < 0)
+ return;
+
+ if (airoha_ppe_foe_get_flow_stats_index(ppe, hash, &index))
+ return;
+
+ if (index >= ppe_num_stats_entries)
+ return;
+
+ rcu_read_lock();
+
+ npu = rcu_dereference(eth->npu);
+ if (npu) {
+ u64 packets = ppe->foe_stats[index].packets;
+ u64 bytes = ppe->foe_stats[index].bytes;
+ struct airoha_foe_stats npu_stats;
+
+ memcpy_fromio(&npu_stats, &npu->stats[index],
+ sizeof(*npu->stats));
+ stats->packets = packets << 32 | npu_stats.packets;
+ stats->bytes = bytes << 32 | npu_stats.bytes;
+ }
+
+ rcu_read_unlock();
+}
+
+static int airoha_ppe_flow_offload_stats(struct airoha_eth *eth,
+ struct flow_cls_offload *f)
+{
+ struct airoha_flow_table_entry *e;
+ u32 idle;
+
+ e = rhashtable_lookup(&eth->flow_table, &f->cookie,
+ airoha_flow_table_params);
+ if (!e)
+ return -ENOENT;
+
+ idle = airoha_ppe_entry_idle_time(eth->ppe, e);
+ f->stats.lastused = jiffies - idle * HZ;
+
+ if (e->hash != 0xffff) {
+ struct airoha_foe_stats64 stats = {};
+
+ airoha_ppe_foe_entry_get_stats(eth->ppe, e->hash, &stats);
+ f->stats.pkts += (stats.packets - e->stats.packets);
+ f->stats.bytes += (stats.bytes - e->stats.bytes);
+ e->stats = stats;
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_flow_offload_cmd(struct airoha_eth *eth,
+ struct flow_cls_offload *f)
+{
+ switch (f->command) {
+ case FLOW_CLS_REPLACE:
+ return airoha_ppe_flow_offload_replace(eth, f);
+ case FLOW_CLS_DESTROY:
+ return airoha_ppe_flow_offload_destroy(eth, f);
+ case FLOW_CLS_STATS:
+ return airoha_ppe_flow_offload_stats(eth, f);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe)
+{
+ u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
+ struct airoha_foe_entry *hwe = ppe->foe;
+ int i, err = 0;
+
+ for (i = 0; i < sram_num_entries; i++) {
+ int err;
+
+ memset(&hwe[i], 0, sizeof(*hwe));
+ err = airoha_ppe_foe_commit_sram_entry(ppe, i);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth)
+{
+ struct airoha_npu *npu = airoha_npu_get(eth->dev);
+
+ if (IS_ERR(npu)) {
+ request_module("airoha-npu");
+ npu = airoha_npu_get(eth->dev);
+ }
+
+ return npu;
+}
+
+static int airoha_ppe_offload_setup(struct airoha_eth *eth)
+{
+ struct airoha_npu *npu = airoha_ppe_npu_get(eth);
+ struct airoha_ppe *ppe = eth->ppe;
+ int err, ppe_num_stats_entries;
+
+ if (IS_ERR(npu))
+ return PTR_ERR(npu);
+
+ err = npu->ops.ppe_init(npu);
+ if (err)
+ goto error_npu_put;
+
+ ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
+ if (ppe_num_stats_entries > 0) {
+ err = npu->ops.ppe_init_stats(npu, ppe->foe_stats_dma,
+ ppe_num_stats_entries);
+ if (err)
+ goto error_npu_put;
+ }
+
+ airoha_ppe_hw_init(ppe);
+ airoha_ppe_foe_flow_stats_reset(ppe, npu);
+
+ rcu_assign_pointer(eth->npu, npu);
+ synchronize_rcu();
+
+ return 0;
+
+error_npu_put:
+ airoha_npu_put(npu);
+
+ return err;
+}
+
+int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev, void *type_data)
+{
+ struct airoha_ppe *ppe = dev->priv;
+ struct airoha_eth *eth = ppe->eth;
+ int err = 0;
+
+ mutex_lock(&flow_offload_mutex);
+
+ if (!eth->npu)
+ err = airoha_ppe_offload_setup(eth);
+ if (!err)
+ err = airoha_ppe_flow_offload_cmd(eth, type_data);
+
+ mutex_unlock(&flow_offload_mutex);
+
+ return err;
+}
+
+void airoha_ppe_check_skb(struct airoha_ppe_dev *dev, struct sk_buff *skb,
+ u16 hash, bool rx_wlan)
+{
+ struct airoha_ppe *ppe = dev->priv;
+ u32 ppe_hash_mask = airoha_ppe_get_total_num_entries(ppe) - 1;
+ u16 now, diff;
+
+ if (hash > ppe_hash_mask)
+ return;
+
+ now = (u16)jiffies;
+ diff = now - ppe->foe_check_time[hash];
+ if (diff < HZ / 10)
+ return;
+
+ ppe->foe_check_time[hash] = now;
+ airoha_ppe_foe_insert_entry(ppe, skb, hash, rx_wlan);
+}
+
+void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port)
+{
+ struct airoha_eth *eth = port->qdma->eth;
+ struct net_device *dev = port->dev;
+ const u8 *addr = dev->dev_addr;
+ u32 val;
+
+ val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
+ airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
+ airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
+ FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
+ PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
+
+ val = (addr[0] << 8) | addr[1];
+ airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
+ airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
+ FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
+ FIELD_PREP(PPE_UPDMEM_OFFSET_MASK, 1) |
+ PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
+}
+
+struct airoha_ppe_dev *airoha_ppe_get_dev(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ struct airoha_eth *eth;
+
+ np = of_parse_phandle(dev->of_node, "airoha,eth", 0);
+ if (!np)
+ return ERR_PTR(-ENODEV);
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ dev_err(dev, "cannot find device node %s\n", np->name);
+ of_node_put(np);
+ return ERR_PTR(-ENODEV);
+ }
+ of_node_put(np);
+
+ if (!try_module_get(THIS_MODULE)) {
+ dev_err(dev, "failed to get the device driver module\n");
+ goto error_pdev_put;
+ }
+
+ eth = platform_get_drvdata(pdev);
+ if (!eth)
+ goto error_module_put;
+
+ if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER)) {
+ dev_err(&pdev->dev,
+ "failed to create device link to consumer %s\n",
+ dev_name(dev));
+ goto error_module_put;
+ }
+
+ return &eth->ppe->dev;
+
+error_module_put:
+ module_put(THIS_MODULE);
+error_pdev_put:
+ platform_device_put(pdev);
+
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(airoha_ppe_get_dev);
+
+void airoha_ppe_put_dev(struct airoha_ppe_dev *dev)
+{
+ struct airoha_ppe *ppe = dev->priv;
+ struct airoha_eth *eth = ppe->eth;
+
+ module_put(THIS_MODULE);
+ put_device(eth->dev);
+}
+EXPORT_SYMBOL_GPL(airoha_ppe_put_dev);
+
+int airoha_ppe_init(struct airoha_eth *eth)
+{
+ int foe_size, err, ppe_num_stats_entries;
+ u32 ppe_num_entries;
+ struct airoha_ppe *ppe;
+
+ ppe = devm_kzalloc(eth->dev, sizeof(*ppe), GFP_KERNEL);
+ if (!ppe)
+ return -ENOMEM;
+
+ ppe->dev.ops.setup_tc_block_cb = airoha_ppe_setup_tc_block_cb;
+ ppe->dev.ops.check_skb = airoha_ppe_check_skb;
+ ppe->dev.priv = ppe;
+ ppe->eth = eth;
+ eth->ppe = ppe;
+
+ ppe_num_entries = airoha_ppe_get_total_num_entries(ppe);
+ foe_size = ppe_num_entries * sizeof(struct airoha_foe_entry);
+ ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma,
+ GFP_KERNEL);
+ if (!ppe->foe)
+ return -ENOMEM;
+
+ ppe->foe_flow = devm_kzalloc(eth->dev,
+ ppe_num_entries * sizeof(*ppe->foe_flow),
+ GFP_KERNEL);
+ if (!ppe->foe_flow)
+ return -ENOMEM;
+
+ ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
+ if (ppe_num_stats_entries > 0) {
+ foe_size = ppe_num_stats_entries * sizeof(*ppe->foe_stats);
+ ppe->foe_stats = dmam_alloc_coherent(eth->dev, foe_size,
+ &ppe->foe_stats_dma,
+ GFP_KERNEL);
+ if (!ppe->foe_stats)
+ return -ENOMEM;
+ }
+
+ ppe->foe_check_time = devm_kzalloc(eth->dev, ppe_num_entries,
+ GFP_KERNEL);
+ if (!ppe->foe_check_time)
+ return -ENOMEM;
+
+ err = airoha_ppe_flush_sram_entries(ppe);
+ if (err)
+ return err;
+
+ err = rhashtable_init(&eth->flow_table, &airoha_flow_table_params);
+ if (err)
+ return err;
+
+ err = rhashtable_init(&ppe->l2_flows, &airoha_l2_flow_table_params);
+ if (err)
+ goto error_flow_table_destroy;
+
+ err = airoha_ppe_debugfs_init(ppe);
+ if (err)
+ goto error_l2_flow_table_destroy;
+
+ return 0;
+
+error_l2_flow_table_destroy:
+ rhashtable_destroy(&ppe->l2_flows);
+error_flow_table_destroy:
+ rhashtable_destroy(&eth->flow_table);
+
+ return err;
+}
+
+void airoha_ppe_deinit(struct airoha_eth *eth)
+{
+ struct airoha_npu *npu;
+
+ rcu_read_lock();
+ npu = rcu_dereference(eth->npu);
+ if (npu) {
+ npu->ops.ppe_deinit(npu);
+ airoha_npu_put(npu);
+ }
+ rcu_read_unlock();
+
+ rhashtable_destroy(&eth->ppe->l2_flows);
+ rhashtable_destroy(&eth->flow_table);
+ debugfs_remove(eth->ppe->debugfs_dir);
+}
diff --git a/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
new file mode 100644
index 000000000000..0112c41150bb
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include "airoha_eth.h"
+
+static void airoha_debugfs_ppe_print_tuple(struct seq_file *m,
+ void *src_addr, void *dest_addr,
+ u16 *src_port, u16 *dest_port,
+ bool ipv6)
+{
+ __be32 n_addr[IPV6_ADDR_WORDS];
+
+ if (ipv6) {
+ ipv6_addr_cpu_to_be32(n_addr, src_addr);
+ seq_printf(m, "%pI6", n_addr);
+ } else {
+ seq_printf(m, "%pI4h", src_addr);
+ }
+ if (src_port)
+ seq_printf(m, ":%d", *src_port);
+
+ seq_puts(m, "->");
+
+ if (ipv6) {
+ ipv6_addr_cpu_to_be32(n_addr, dest_addr);
+ seq_printf(m, "%pI6", n_addr);
+ } else {
+ seq_printf(m, "%pI4h", dest_addr);
+ }
+ if (dest_port)
+ seq_printf(m, ":%d", *dest_port);
+}
+
+static int airoha_ppe_debugfs_foe_show(struct seq_file *m, void *private,
+ bool bind)
+{
+ static const char *const ppe_type_str[] = {
+ [PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
+ [PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
+ [PPE_PKT_TYPE_BRIDGE] = "L2B",
+ [PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
+ [PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
+ [PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
+ [PPE_PKT_TYPE_IPV6_6RD] = "6RD",
+ };
+ static const char *const ppe_state_str[] = {
+ [AIROHA_FOE_STATE_INVALID] = "INV",
+ [AIROHA_FOE_STATE_UNBIND] = "UNB",
+ [AIROHA_FOE_STATE_BIND] = "BND",
+ [AIROHA_FOE_STATE_FIN] = "FIN",
+ };
+ struct airoha_ppe *ppe = m->private;
+ u32 ppe_num_entries = airoha_ppe_get_total_num_entries(ppe);
+ int i;
+
+ for (i = 0; i < ppe_num_entries; i++) {
+ const char *state_str, *type_str = "UNKNOWN";
+ void *src_addr = NULL, *dest_addr = NULL;
+ u16 *src_port = NULL, *dest_port = NULL;
+ struct airoha_foe_mac_info_common *l2;
+ unsigned char h_source[ETH_ALEN] = {};
+ struct airoha_foe_stats64 stats = {};
+ unsigned char h_dest[ETH_ALEN];
+ struct airoha_foe_entry *hwe;
+ u32 type, state, ib2, data;
+ bool ipv6 = false;
+
+ hwe = airoha_ppe_foe_get_entry(ppe, i);
+ if (!hwe)
+ continue;
+
+ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
+ if (!state)
+ continue;
+
+ if (bind && state != AIROHA_FOE_STATE_BIND)
+ continue;
+
+ state_str = ppe_state_str[state % ARRAY_SIZE(ppe_state_str)];
+ type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ if (type < ARRAY_SIZE(ppe_type_str) && ppe_type_str[type])
+ type_str = ppe_type_str[type];
+
+ seq_printf(m, "%05x %s %7s", i, state_str, type_str);
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV4_HNAPT:
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ src_port = &hwe->ipv4.orig_tuple.src_port;
+ dest_port = &hwe->ipv4.orig_tuple.dest_port;
+ fallthrough;
+ case PPE_PKT_TYPE_IPV4_ROUTE:
+ src_addr = &hwe->ipv4.orig_tuple.src_ip;
+ dest_addr = &hwe->ipv4.orig_tuple.dest_ip;
+ break;
+ case PPE_PKT_TYPE_IPV6_ROUTE_5T:
+ src_port = &hwe->ipv6.src_port;
+ dest_port = &hwe->ipv6.dest_port;
+ fallthrough;
+ case PPE_PKT_TYPE_IPV6_ROUTE_3T:
+ case PPE_PKT_TYPE_IPV6_6RD:
+ src_addr = &hwe->ipv6.src_ip;
+ dest_addr = &hwe->ipv6.dest_ip;
+ ipv6 = true;
+ break;
+ default:
+ break;
+ }
+
+ if (src_addr && dest_addr) {
+ seq_puts(m, " orig=");
+ airoha_debugfs_ppe_print_tuple(m, src_addr, dest_addr,
+ src_port, dest_port, ipv6);
+ }
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV4_HNAPT:
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ src_port = &hwe->ipv4.new_tuple.src_port;
+ dest_port = &hwe->ipv4.new_tuple.dest_port;
+ fallthrough;
+ case PPE_PKT_TYPE_IPV4_ROUTE:
+ src_addr = &hwe->ipv4.new_tuple.src_ip;
+ dest_addr = &hwe->ipv4.new_tuple.dest_ip;
+ seq_puts(m, " new=");
+ airoha_debugfs_ppe_print_tuple(m, src_addr, dest_addr,
+ src_port, dest_port,
+ ipv6);
+ break;
+ default:
+ break;
+ }
+
+ if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ data = hwe->ipv6.data;
+ ib2 = hwe->ipv6.ib2;
+ l2 = &hwe->ipv6.l2;
+ } else {
+ data = hwe->ipv4.data;
+ ib2 = hwe->ipv4.ib2;
+ l2 = &hwe->ipv4.l2.common;
+ *((__be16 *)&h_source[4]) =
+ cpu_to_be16(hwe->ipv4.l2.src_mac_lo);
+ }
+
+ airoha_ppe_foe_entry_get_stats(ppe, i, &stats);
+
+ *((__be32 *)h_dest) = cpu_to_be32(l2->dest_mac_hi);
+ *((__be16 *)&h_dest[4]) = cpu_to_be16(l2->dest_mac_lo);
+ *((__be32 *)h_source) = cpu_to_be32(l2->src_mac_hi);
+
+ seq_printf(m, " eth=%pM->%pM etype=%04x data=%08x"
+ " vlan=%d,%d ib1=%08x ib2=%08x"
+ " packets=%llu bytes=%llu\n",
+ h_source, h_dest, l2->etype, data,
+ l2->vlan1, l2->vlan2, hwe->ib1, ib2,
+ stats.packets, stats.bytes);
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_debugfs_foe_all_show(struct seq_file *m, void *private)
+{
+ return airoha_ppe_debugfs_foe_show(m, private, false);
+}
+DEFINE_SHOW_ATTRIBUTE(airoha_ppe_debugfs_foe_all);
+
+static int airoha_ppe_debugfs_foe_bind_show(struct seq_file *m, void *private)
+{
+ return airoha_ppe_debugfs_foe_show(m, private, true);
+}
+DEFINE_SHOW_ATTRIBUTE(airoha_ppe_debugfs_foe_bind);
+
+int airoha_ppe_debugfs_init(struct airoha_ppe *ppe)
+{
+ ppe->debugfs_dir = debugfs_create_dir("ppe", NULL);
+ debugfs_create_file("entries", 0444, ppe->debugfs_dir, ppe,
+ &airoha_ppe_debugfs_foe_all_fops);
+ debugfs_create_file("bind", 0444, ppe->debugfs_dir, ppe,
+ &airoha_ppe_debugfs_foe_bind_fops);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/airoha/airoha_regs.h b/drivers/net/ethernet/airoha/airoha_regs.h
new file mode 100644
index 000000000000..ed4e3407f4a0
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_regs.h
@@ -0,0 +1,923 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#ifndef AIROHA_REGS_H
+#define AIROHA_REGS_H
+
+#include <linux/types.h>
+
+/* FE */
+#define PSE_BASE 0x0100
+#define CSR_IFC_BASE 0x0200
+#define CDM1_BASE 0x0400
+#define GDM1_BASE 0x0500
+#define PPE1_BASE 0x0c00
+#define PPE2_BASE 0x1c00
+
+#define CDM2_BASE 0x1400
+#define GDM2_BASE 0x1500
+
+#define GDM3_BASE 0x1100
+#define GDM4_BASE 0x2500
+
+#define CDM_BASE(_n) \
+ ((_n) == 2 ? CDM2_BASE : CDM1_BASE)
+#define GDM_BASE(_n) \
+ ((_n) == 4 ? GDM4_BASE : \
+ (_n) == 3 ? GDM3_BASE : \
+ (_n) == 2 ? GDM2_BASE : GDM1_BASE)
+
+#define REG_FE_DMA_GLO_CFG 0x0000
+#define FE_DMA_GLO_L2_SPACE_MASK GENMASK(7, 4)
+#define FE_DMA_GLO_PG_SZ_MASK BIT(3)
+
+#define REG_FE_RST_GLO_CFG 0x0004
+#define FE_RST_GDM4_MBI_ARB_MASK BIT(3)
+#define FE_RST_GDM3_MBI_ARB_MASK BIT(2)
+#define FE_RST_CORE_MASK BIT(0)
+
+#define REG_FE_FOE_TS 0x0010
+
+#define REG_FE_WAN_PORT 0x0024
+#define WAN1_EN_MASK BIT(16)
+#define WAN1_MASK GENMASK(12, 8)
+#define WAN0_MASK GENMASK(4, 0)
+
+#define REG_FE_WAN_MAC_H 0x0030
+#define REG_FE_LAN_MAC_H 0x0040
+
+#define REG_FE_MAC_LMIN(_n) ((_n) + 0x04)
+#define REG_FE_MAC_LMAX(_n) ((_n) + 0x08)
+
+#define REG_FE_CDM1_OQ_MAP0 0x0050
+#define REG_FE_CDM1_OQ_MAP1 0x0054
+#define REG_FE_CDM1_OQ_MAP2 0x0058
+#define REG_FE_CDM1_OQ_MAP3 0x005c
+
+#define REG_FE_PCE_CFG 0x0070
+#define PCE_DPI_EN_MASK BIT(2)
+#define PCE_KA_EN_MASK BIT(1)
+#define PCE_MC_EN_MASK BIT(0)
+
+#define REG_FE_PSE_QUEUE_CFG_WR 0x0080
+#define PSE_CFG_PORT_ID_MASK GENMASK(27, 24)
+#define PSE_CFG_QUEUE_ID_MASK GENMASK(20, 16)
+#define PSE_CFG_WR_EN_MASK BIT(8)
+#define PSE_CFG_OQRSV_SEL_MASK BIT(0)
+
+#define REG_FE_PSE_QUEUE_CFG_VAL 0x0084
+#define PSE_CFG_OQ_RSV_MASK GENMASK(13, 0)
+
+#define PSE_FQ_CFG 0x008c
+#define PSE_FQ_LIMIT_MASK GENMASK(14, 0)
+
+#define REG_FE_PSE_BUF_SET 0x0090
+#define PSE_SHARE_USED_LTHD_MASK GENMASK(31, 16)
+#define PSE_ALLRSV_MASK GENMASK(14, 0)
+
+#define REG_PSE_SHARE_USED_THD 0x0094
+#define PSE_SHARE_USED_MTHD_MASK GENMASK(31, 16)
+#define PSE_SHARE_USED_HTHD_MASK GENMASK(15, 0)
+
+#define REG_GDM_MISC_CFG 0x0148
+#define GDM2_RDM_ACK_WAIT_PREF_MASK BIT(9)
+#define GDM2_CHN_VLD_MODE_MASK BIT(5)
+
+#define REG_FE_CSR_IFC_CFG CSR_IFC_BASE
+#define FE_IFC_EN_MASK BIT(0)
+
+#define REG_FE_VIP_PORT_EN 0x01f0
+#define REG_FE_IFC_PORT_EN 0x01f4
+
+#define REG_PSE_IQ_REV1 (PSE_BASE + 0x08)
+#define PSE_IQ_RES1_P2_MASK GENMASK(23, 16)
+
+#define REG_PSE_IQ_REV2 (PSE_BASE + 0x0c)
+#define PSE_IQ_RES2_P5_MASK GENMASK(15, 8)
+#define PSE_IQ_RES2_P4_MASK GENMASK(7, 0)
+
+#define REG_FE_VIP_EN(_n) (0x0300 + ((_n) << 3))
+#define PATN_FCPU_EN_MASK BIT(7)
+#define PATN_SWP_EN_MASK BIT(6)
+#define PATN_DP_EN_MASK BIT(5)
+#define PATN_SP_EN_MASK BIT(4)
+#define PATN_TYPE_MASK GENMASK(3, 1)
+#define PATN_EN_MASK BIT(0)
+
+#define REG_FE_VIP_PATN(_n) (0x0304 + ((_n) << 3))
+#define PATN_DP_MASK GENMASK(31, 16)
+#define PATN_SP_MASK GENMASK(15, 0)
+
+#define REG_CDM_VLAN_CTRL(_n) CDM_BASE(_n)
+#define CDM_VLAN_MASK GENMASK(31, 16)
+
+#define REG_CDM_FWD_CFG(_n) (CDM_BASE(_n) + 0x08)
+#define CDM_OAM_QSEL_MASK GENMASK(31, 27)
+#define CDM_VIP_QSEL_MASK GENMASK(24, 20)
+
+#define REG_CDM_CRSN_QSEL(_n, _m) (CDM_BASE(_n) + 0x10 + ((_m) << 2))
+#define CDM_CRSN_QSEL_REASON_MASK(_n) \
+ GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
+
+#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n)
+#define GDM_PAD_EN_MASK BIT(28)
+#define GDM_DROP_CRC_ERR_MASK BIT(23)
+#define GDM_IP4_CKSUM_MASK BIT(22)
+#define GDM_TCP_CKSUM_MASK BIT(21)
+#define GDM_UDP_CKSUM_MASK BIT(20)
+#define GDM_STRIP_CRC_MASK BIT(16)
+#define GDM_UCFQ_MASK GENMASK(15, 12)
+#define GDM_BCFQ_MASK GENMASK(11, 8)
+#define GDM_MCFQ_MASK GENMASK(7, 4)
+#define GDM_OCFQ_MASK GENMASK(3, 0)
+
+#define REG_GDM_INGRESS_CFG(_n) (GDM_BASE(_n) + 0x10)
+#define GDM_INGRESS_FC_EN_MASK BIT(1)
+#define GDM_STAG_EN_MASK BIT(0)
+
+#define REG_GDM_LEN_CFG(_n) (GDM_BASE(_n) + 0x14)
+#define GDM_SHORT_LEN_MASK GENMASK(13, 0)
+#define GDM_LONG_LEN_MASK GENMASK(29, 16)
+
+#define REG_GDM_LPBK_CFG(_n) (GDM_BASE(_n) + 0x1c)
+#define LPBK_GAP_MASK GENMASK(31, 24)
+#define LPBK_LEN_MASK GENMASK(23, 10)
+#define LPBK_CHAN_MASK GENMASK(8, 4)
+#define LPBK_MODE_MASK GENMASK(3, 1)
+#define LBK_GAP_MODE_MASK BIT(3)
+#define LBK_LEN_MODE_MASK BIT(2)
+#define LBK_CHAN_MODE_MASK BIT(1)
+#define LPBK_EN_MASK BIT(0)
+
+#define REG_GDM_CHN_RLS(_n) (GDM_BASE(_n) + 0x20)
+#define MBI_RX_AGE_SEL_MASK GENMASK(26, 25)
+#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17)
+
+#define REG_GDM_TXCHN_EN(_n) (GDM_BASE(_n) + 0x24)
+#define REG_GDM_RXCHN_EN(_n) (GDM_BASE(_n) + 0x28)
+
+#define REG_FE_CPORT_CFG (GDM1_BASE + 0x40)
+#define FE_CPORT_PAD BIT(26)
+#define FE_CPORT_PORT_XFC_MASK BIT(25)
+#define FE_CPORT_QUEUE_XFC_MASK BIT(24)
+
+#define REG_FE_GDM_MIB_CLEAR(_n) (GDM_BASE(_n) + 0xf0)
+#define FE_GDM_MIB_RX_CLEAR_MASK BIT(1)
+#define FE_GDM_MIB_TX_CLEAR_MASK BIT(0)
+
+#define REG_FE_GDM_MIB_CFG(_n) (GDM_BASE(_n) + 0xf4)
+#define FE_STRICT_RFC2819_MODE_MASK BIT(31)
+#define FE_GDM_TX_MIB_SPLIT_EN_MASK BIT(17)
+#define FE_GDM_RX_MIB_SPLIT_EN_MASK BIT(16)
+#define FE_TX_MIB_ID_MASK GENMASK(15, 8)
+#define FE_RX_MIB_ID_MASK GENMASK(7, 0)
+
+#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x104)
+#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x10c)
+#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x110)
+#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x114)
+#define REG_FE_GDM_TX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x118)
+#define REG_FE_GDM_TX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x11c)
+#define REG_FE_GDM_TX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x120)
+#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x124)
+#define REG_FE_GDM_TX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x128)
+#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x12c)
+#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x130)
+#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x134)
+#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x138)
+#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x13c)
+#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x140)
+
+#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x148)
+#define REG_FE_GDM_RX_FC_DROP_CNT(_n) (GDM_BASE(_n) + 0x14c)
+#define REG_FE_GDM_RX_RC_DROP_CNT(_n) (GDM_BASE(_n) + 0x150)
+#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n) (GDM_BASE(_n) + 0x154)
+#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n) (GDM_BASE(_n) + 0x158)
+#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x15c)
+#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x160)
+#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x164)
+#define REG_FE_GDM_RX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x168)
+#define REG_FE_GDM_RX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x16c)
+#define REG_FE_GDM_RX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x170)
+#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n) (GDM_BASE(_n) + 0x174)
+#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n) (GDM_BASE(_n) + 0x178)
+#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n) (GDM_BASE(_n) + 0x17c)
+#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x180)
+#define REG_FE_GDM_RX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x184)
+#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x188)
+#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x18c)
+#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x190)
+#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x194)
+#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198)
+#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c)
+
+#define REG_GDM_SRC_PORT_SET(_n) (GDM_BASE(_n) + 0x23c)
+#define GDM_SPORT_OFF2_MASK GENMASK(19, 16)
+#define GDM_SPORT_OFF1_MASK GENMASK(15, 12)
+#define GDM_SPORT_OFF0_MASK GENMASK(11, 8)
+
+#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280)
+#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284)
+#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288)
+#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c)
+
+#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290)
+#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294)
+#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298)
+#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c)
+#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8)
+#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc)
+#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0)
+#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4)
+#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8)
+#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc)
+#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8)
+#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec)
+#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0)
+#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4)
+#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8)
+#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc)
+
+#define REG_PPE_GLO_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x200)
+#define PPE_GLO_CFG_BUSY_MASK BIT(31)
+#define PPE_GLO_CFG_FLOW_DROP_UPDATE_MASK BIT(9)
+#define PPE_GLO_CFG_PSE_HASH_OFS_MASK BIT(6)
+#define PPE_GLO_CFG_PPE_BSWAP_MASK BIT(5)
+#define PPE_GLO_CFG_TTL_DROP_MASK BIT(4)
+#define PPE_GLO_CFG_IP4_CS_DROP_MASK BIT(3)
+#define PPE_GLO_CFG_IP4_L4_CS_DROP_MASK BIT(2)
+#define PPE_GLO_CFG_EN_MASK BIT(0)
+
+#define REG_PPE_PPE_FLOW_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x204)
+#define PPE_FLOW_CFG_IP6_HASH_GRE_KEY_MASK BIT(20)
+#define PPE_FLOW_CFG_IP4_HASH_GRE_KEY_MASK BIT(19)
+#define PPE_FLOW_CFG_IP4_HASH_FLOW_LABEL_MASK BIT(18)
+#define PPE_FLOW_CFG_IP4_NAT_FRAG_MASK BIT(17)
+#define PPE_FLOW_CFG_IP_PROTO_BLACKLIST_MASK BIT(16)
+#define PPE_FLOW_CFG_IP4_DSLITE_MASK BIT(14)
+#define PPE_FLOW_CFG_IP4_NAPT_MASK BIT(13)
+#define PPE_FLOW_CFG_IP4_NAT_MASK BIT(12)
+#define PPE_FLOW_CFG_IP6_6RD_MASK BIT(10)
+#define PPE_FLOW_CFG_IP6_5T_ROUTE_MASK BIT(9)
+#define PPE_FLOW_CFG_IP6_3T_ROUTE_MASK BIT(8)
+#define PPE_FLOW_CFG_IP4_UDP_FRAG_MASK BIT(7)
+#define PPE_FLOW_CFG_IP4_TCP_FRAG_MASK BIT(6)
+
+#define REG_PPE_IP_PROTO_CHK(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x208)
+#define PPE_IP_PROTO_CHK_IPV4_MASK GENMASK(31, 16)
+#define PPE_IP_PROTO_CHK_IPV6_MASK GENMASK(15, 0)
+
+#define REG_PPE_TB_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x21c)
+#define PPE_SRAM_TB_NUM_ENTRY_MASK GENMASK(26, 24)
+#define PPE_TB_CFG_KEEPALIVE_MASK GENMASK(13, 12)
+#define PPE_TB_CFG_AGE_TCP_FIN_MASK BIT(11)
+#define PPE_TB_CFG_AGE_UDP_MASK BIT(10)
+#define PPE_TB_CFG_AGE_TCP_MASK BIT(9)
+#define PPE_TB_CFG_AGE_UNBIND_MASK BIT(8)
+#define PPE_TB_CFG_AGE_NON_L4_MASK BIT(7)
+#define PPE_TB_CFG_AGE_PREBIND_MASK BIT(6)
+#define PPE_TB_CFG_SEARCH_MISS_MASK GENMASK(5, 4)
+#define PPE_TB_ENTRY_SIZE_MASK BIT(3)
+#define PPE_DRAM_TB_NUM_ENTRY_MASK GENMASK(2, 0)
+
+#define REG_PPE_TB_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x220)
+
+#define REG_PPE_BIND_RATE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x228)
+#define PPE_BIND_RATE_L2B_BIND_MASK GENMASK(31, 16)
+#define PPE_BIND_RATE_BIND_MASK GENMASK(15, 0)
+
+#define REG_PPE_BIND_LIMIT0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x22c)
+#define PPE_BIND_LIMIT0_HALF_MASK GENMASK(29, 16)
+#define PPE_BIND_LIMIT0_QUARTER_MASK GENMASK(13, 0)
+
+#define REG_PPE_BIND_LIMIT1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x230)
+#define PPE_BIND_LIMIT1_NON_L4_MASK GENMASK(23, 16)
+#define PPE_BIND_LIMIT1_FULL_MASK GENMASK(13, 0)
+
+#define REG_PPE_BND_AGE0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x23c)
+#define PPE_BIND_AGE0_DELTA_NON_L4 GENMASK(30, 16)
+#define PPE_BIND_AGE0_DELTA_UDP GENMASK(14, 0)
+
+#define REG_PPE_UNBIND_AGE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x238)
+#define PPE_UNBIND_AGE_MIN_PACKETS_MASK GENMASK(31, 16)
+#define PPE_UNBIND_AGE_DELTA_MASK GENMASK(7, 0)
+
+#define REG_PPE_BND_AGE1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x240)
+#define PPE_BIND_AGE1_DELTA_TCP_FIN GENMASK(30, 16)
+#define PPE_BIND_AGE1_DELTA_TCP GENMASK(14, 0)
+
+#define REG_PPE_HASH_SEED(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x244)
+#define PPE_HASH_SEED 0x12345678
+
+#define REG_PPE_DFT_CPORT0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x248)
+#define DFT_CPORT_MASK(_n) GENMASK(3 + ((_n) << 2), ((_n) << 2))
+
+#define REG_PPE_DFT_CPORT1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x24c)
+
+#define REG_PPE_TB_HASH_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x250)
+#define PPE_DRAM_HASH1_MODE_MASK GENMASK(31, 28)
+#define PPE_DRAM_HASH1_EN_MASK BIT(24)
+#define PPE_DRAM_HASH0_MODE_MASK GENMASK(23, 20)
+#define PPE_DRAM_TABLE_EN_MASK BIT(16)
+#define PPE_SRAM_HASH1_MODE_MASK GENMASK(15, 12)
+#define PPE_SRAM_HASH1_EN_MASK BIT(8)
+#define PPE_SRAM_HASH0_MODE_MASK GENMASK(7, 4)
+#define PPE_SRAM_TABLE_EN_MASK BIT(0)
+
+#define REG_PPE_MTU_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x304)
+#define REG_PPE_MTU(_m, _n) (REG_PPE_MTU_BASE(_m) + ((_n) << 2))
+#define FP1_EGRESS_MTU_MASK GENMASK(29, 16)
+#define FP0_EGRESS_MTU_MASK GENMASK(13, 0)
+
+#define REG_PPE_RAM_CTRL(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x31c)
+#define PPE_SRAM_CTRL_ACK_MASK BIT(31)
+#define PPE_SRAM_CTRL_DUAL_SUCESS_MASK BIT(30)
+#define PPE_SRAM_CTRL_ENTRY_MASK GENMASK(23, 8)
+#define PPE_SRAM_WR_DUAL_DIRECTION_MASK BIT(2)
+#define PPE_SRAM_CTRL_WR_MASK BIT(1)
+#define PPE_SRAM_CTRL_REQ_MASK BIT(0)
+
+#define REG_PPE_RAM_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x320)
+#define REG_PPE_RAM_ENTRY(_m, _n) (REG_PPE_RAM_BASE(_m) + ((_n) << 2))
+
+#define REG_UPDMEM_CTRL(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x370)
+#define PPE_UPDMEM_ACK_MASK BIT(31)
+#define PPE_UPDMEM_ADDR_MASK GENMASK(11, 8)
+#define PPE_UPDMEM_OFFSET_MASK GENMASK(7, 4)
+#define PPE_UPDMEM_SEL_MASK GENMASK(3, 2)
+#define PPE_UPDMEM_WR_MASK BIT(1)
+#define PPE_UPDMEM_REQ_MASK BIT(0)
+
+#define REG_UPDMEM_DATA(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x374)
+
+#define REG_IP_FRAG_FP 0x2010
+#define IP_ASSEMBLE_PORT_MASK GENMASK(24, 21)
+#define IP_ASSEMBLE_NBQ_MASK GENMASK(20, 16)
+#define IP_FRAGMENT_PORT_MASK GENMASK(8, 5)
+#define IP_FRAGMENT_NBQ_MASK GENMASK(4, 0)
+
+#define REG_MC_VLAN_EN 0x2100
+#define MC_VLAN_EN_MASK BIT(0)
+
+#define REG_MC_VLAN_CFG 0x2104
+#define MC_VLAN_CFG_CMD_DONE_MASK BIT(31)
+#define MC_VLAN_CFG_TABLE_ID_MASK GENMASK(21, 16)
+#define MC_VLAN_CFG_PORT_ID_MASK GENMASK(11, 8)
+#define MC_VLAN_CFG_TABLE_SEL_MASK BIT(4)
+#define MC_VLAN_CFG_RW_MASK BIT(0)
+
+#define REG_MC_VLAN_DATA 0x2108
+
+#define REG_SP_DFT_CPORT(_n) (0x20e0 + ((_n) << 2))
+#define SP_CPORT_DFT_MASK GENMASK(2, 0)
+#define SP_CPORT_MASK(_n) GENMASK(3 + ((_n) << 2), ((_n) << 2))
+
+#define REG_SRC_PORT_FC_MAP6 0x2298
+#define FC_ID_OF_SRC_PORT27_MASK GENMASK(28, 24)
+#define FC_ID_OF_SRC_PORT26_MASK GENMASK(20, 16)
+#define FC_ID_OF_SRC_PORT25_MASK GENMASK(12, 8)
+#define FC_ID_OF_SRC_PORT24_MASK GENMASK(4, 0)
+
+#define REG_CDM5_RX_OQ1_DROP_CNT 0x29d4
+
+/* QDMA */
+#define REG_QDMA_GLOBAL_CFG 0x0004
+#define GLOBAL_CFG_RX_2B_OFFSET_MASK BIT(31)
+#define GLOBAL_CFG_DMA_PREFERENCE_MASK GENMASK(30, 29)
+#define GLOBAL_CFG_CPU_TXR_RR_MASK BIT(28)
+#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK BIT(27)
+#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK BIT(26)
+#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK BIT(25)
+#define GLOBAL_CFG_OAM_MODIFY_MASK BIT(24)
+#define GLOBAL_CFG_RESET_MASK BIT(23)
+#define GLOBAL_CFG_RESET_DONE_MASK BIT(22)
+#define GLOBAL_CFG_MULTICAST_EN_MASK BIT(21)
+#define GLOBAL_CFG_IRQ1_EN_MASK BIT(20)
+#define GLOBAL_CFG_IRQ0_EN_MASK BIT(19)
+#define GLOBAL_CFG_LOOPCNT_EN_MASK BIT(18)
+#define GLOBAL_CFG_RD_BYPASS_WR_MASK BIT(17)
+#define GLOBAL_CFG_QDMA_LOOPBACK_MASK BIT(16)
+#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK GENMASK(13, 8)
+#define GLOBAL_CFG_CHECK_DONE_MASK BIT(7)
+#define GLOBAL_CFG_TX_WB_DONE_MASK BIT(6)
+#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK GENMASK(5, 4)
+#define GLOBAL_CFG_RX_DMA_BUSY_MASK BIT(3)
+#define GLOBAL_CFG_RX_DMA_EN_MASK BIT(2)
+#define GLOBAL_CFG_TX_DMA_BUSY_MASK BIT(1)
+#define GLOBAL_CFG_TX_DMA_EN_MASK BIT(0)
+
+#define REG_FWD_DSCP_BASE 0x0010
+#define REG_FWD_BUF_BASE 0x0014
+
+#define REG_HW_FWD_DSCP_CFG 0x0018
+#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK GENMASK(29, 28)
+#define HW_FWD_DSCP_SCATTER_LEN_MASK GENMASK(17, 16)
+#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK GENMASK(15, 0)
+
+#define REG_INT_STATUS(_n) \
+ (((_n) == 4) ? 0x0730 : \
+ ((_n) == 3) ? 0x0724 : \
+ ((_n) == 2) ? 0x0720 : \
+ ((_n) == 1) ? 0x0024 : 0x0020)
+
+#define REG_INT_ENABLE(_b, _n) \
+ (((_n) == 4) ? 0x0750 + ((_b) << 5) : \
+ ((_n) == 3) ? 0x0744 + ((_b) << 5) : \
+ ((_n) == 2) ? 0x0740 + ((_b) << 5) : \
+ ((_n) == 1) ? 0x002c + ((_b) << 3) : \
+ 0x0028 + ((_b) << 3))
+
+/* QDMA_CSR_INT_ENABLE1 */
+#define RX15_COHERENT_INT_MASK BIT(31)
+#define RX14_COHERENT_INT_MASK BIT(30)
+#define RX13_COHERENT_INT_MASK BIT(29)
+#define RX12_COHERENT_INT_MASK BIT(28)
+#define RX11_COHERENT_INT_MASK BIT(27)
+#define RX10_COHERENT_INT_MASK BIT(26)
+#define RX9_COHERENT_INT_MASK BIT(25)
+#define RX8_COHERENT_INT_MASK BIT(24)
+#define RX7_COHERENT_INT_MASK BIT(23)
+#define RX6_COHERENT_INT_MASK BIT(22)
+#define RX5_COHERENT_INT_MASK BIT(21)
+#define RX4_COHERENT_INT_MASK BIT(20)
+#define RX3_COHERENT_INT_MASK BIT(19)
+#define RX2_COHERENT_INT_MASK BIT(18)
+#define RX1_COHERENT_INT_MASK BIT(17)
+#define RX0_COHERENT_INT_MASK BIT(16)
+#define TX7_COHERENT_INT_MASK BIT(15)
+#define TX6_COHERENT_INT_MASK BIT(14)
+#define TX5_COHERENT_INT_MASK BIT(13)
+#define TX4_COHERENT_INT_MASK BIT(12)
+#define TX3_COHERENT_INT_MASK BIT(11)
+#define TX2_COHERENT_INT_MASK BIT(10)
+#define TX1_COHERENT_INT_MASK BIT(9)
+#define TX0_COHERENT_INT_MASK BIT(8)
+#define CNT_OVER_FLOW_INT_MASK BIT(7)
+#define IRQ1_FULL_INT_MASK BIT(5)
+#define IRQ1_INT_MASK BIT(4)
+#define HWFWD_DSCP_LOW_INT_MASK BIT(3)
+#define HWFWD_DSCP_EMPTY_INT_MASK BIT(2)
+#define IRQ0_FULL_INT_MASK BIT(1)
+#define IRQ0_INT_MASK BIT(0)
+
+#define RX_COHERENT_LOW_INT_MASK \
+ (RX15_COHERENT_INT_MASK | RX14_COHERENT_INT_MASK | \
+ RX13_COHERENT_INT_MASK | RX12_COHERENT_INT_MASK | \
+ RX11_COHERENT_INT_MASK | RX10_COHERENT_INT_MASK | \
+ RX9_COHERENT_INT_MASK | RX8_COHERENT_INT_MASK | \
+ RX7_COHERENT_INT_MASK | RX6_COHERENT_INT_MASK | \
+ RX5_COHERENT_INT_MASK | RX4_COHERENT_INT_MASK | \
+ RX3_COHERENT_INT_MASK | RX2_COHERENT_INT_MASK | \
+ RX1_COHERENT_INT_MASK | RX0_COHERENT_INT_MASK)
+
+#define RX_COHERENT_LOW_OFFSET __ffs(RX_COHERENT_LOW_INT_MASK)
+#define INT_RX0_MASK(_n) \
+ (((_n) << RX_COHERENT_LOW_OFFSET) & RX_COHERENT_LOW_INT_MASK)
+
+#define TX_COHERENT_LOW_INT_MASK \
+ (TX7_COHERENT_INT_MASK | TX6_COHERENT_INT_MASK | \
+ TX5_COHERENT_INT_MASK | TX4_COHERENT_INT_MASK | \
+ TX3_COHERENT_INT_MASK | TX2_COHERENT_INT_MASK | \
+ TX1_COHERENT_INT_MASK | TX0_COHERENT_INT_MASK)
+
+#define TX_DONE_INT_MASK(_n) \
+ ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \
+ : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
+
+#define INT_TX_MASK \
+ (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \
+ IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
+
+/* QDMA_CSR_INT_ENABLE2 */
+#define RX15_NO_CPU_DSCP_INT_MASK BIT(31)
+#define RX14_NO_CPU_DSCP_INT_MASK BIT(30)
+#define RX13_NO_CPU_DSCP_INT_MASK BIT(29)
+#define RX12_NO_CPU_DSCP_INT_MASK BIT(28)
+#define RX11_NO_CPU_DSCP_INT_MASK BIT(27)
+#define RX10_NO_CPU_DSCP_INT_MASK BIT(26)
+#define RX9_NO_CPU_DSCP_INT_MASK BIT(25)
+#define RX8_NO_CPU_DSCP_INT_MASK BIT(24)
+#define RX7_NO_CPU_DSCP_INT_MASK BIT(23)
+#define RX6_NO_CPU_DSCP_INT_MASK BIT(22)
+#define RX5_NO_CPU_DSCP_INT_MASK BIT(21)
+#define RX4_NO_CPU_DSCP_INT_MASK BIT(20)
+#define RX3_NO_CPU_DSCP_INT_MASK BIT(19)
+#define RX2_NO_CPU_DSCP_INT_MASK BIT(18)
+#define RX1_NO_CPU_DSCP_INT_MASK BIT(17)
+#define RX0_NO_CPU_DSCP_INT_MASK BIT(16)
+#define RX15_DONE_INT_MASK BIT(15)
+#define RX14_DONE_INT_MASK BIT(14)
+#define RX13_DONE_INT_MASK BIT(13)
+#define RX12_DONE_INT_MASK BIT(12)
+#define RX11_DONE_INT_MASK BIT(11)
+#define RX10_DONE_INT_MASK BIT(10)
+#define RX9_DONE_INT_MASK BIT(9)
+#define RX8_DONE_INT_MASK BIT(8)
+#define RX7_DONE_INT_MASK BIT(7)
+#define RX6_DONE_INT_MASK BIT(6)
+#define RX5_DONE_INT_MASK BIT(5)
+#define RX4_DONE_INT_MASK BIT(4)
+#define RX3_DONE_INT_MASK BIT(3)
+#define RX2_DONE_INT_MASK BIT(2)
+#define RX1_DONE_INT_MASK BIT(1)
+#define RX0_DONE_INT_MASK BIT(0)
+
+#define RX_NO_CPU_DSCP_LOW_INT_MASK \
+ (RX15_NO_CPU_DSCP_INT_MASK | RX14_NO_CPU_DSCP_INT_MASK | \
+ RX13_NO_CPU_DSCP_INT_MASK | RX12_NO_CPU_DSCP_INT_MASK | \
+ RX11_NO_CPU_DSCP_INT_MASK | RX10_NO_CPU_DSCP_INT_MASK | \
+ RX9_NO_CPU_DSCP_INT_MASK | RX8_NO_CPU_DSCP_INT_MASK | \
+ RX7_NO_CPU_DSCP_INT_MASK | RX6_NO_CPU_DSCP_INT_MASK | \
+ RX5_NO_CPU_DSCP_INT_MASK | RX4_NO_CPU_DSCP_INT_MASK | \
+ RX3_NO_CPU_DSCP_INT_MASK | RX2_NO_CPU_DSCP_INT_MASK | \
+ RX1_NO_CPU_DSCP_INT_MASK | RX0_NO_CPU_DSCP_INT_MASK)
+
+#define RX_DONE_LOW_INT_MASK \
+ (RX15_DONE_INT_MASK | RX14_DONE_INT_MASK | \
+ RX13_DONE_INT_MASK | RX12_DONE_INT_MASK | \
+ RX11_DONE_INT_MASK | RX10_DONE_INT_MASK | \
+ RX9_DONE_INT_MASK | RX8_DONE_INT_MASK | \
+ RX7_DONE_INT_MASK | RX6_DONE_INT_MASK | \
+ RX5_DONE_INT_MASK | RX4_DONE_INT_MASK | \
+ RX3_DONE_INT_MASK | RX2_DONE_INT_MASK | \
+ RX1_DONE_INT_MASK | RX0_DONE_INT_MASK)
+
+#define RX_NO_CPU_DSCP_LOW_OFFSET __ffs(RX_NO_CPU_DSCP_LOW_INT_MASK)
+#define INT_RX1_MASK(_n) \
+ ((((_n) << RX_NO_CPU_DSCP_LOW_OFFSET) & RX_NO_CPU_DSCP_LOW_INT_MASK) | \
+ (RX_DONE_LOW_INT_MASK & (_n)))
+
+/* QDMA_CSR_INT_ENABLE3 */
+#define RX31_NO_CPU_DSCP_INT_MASK BIT(31)
+#define RX30_NO_CPU_DSCP_INT_MASK BIT(30)
+#define RX29_NO_CPU_DSCP_INT_MASK BIT(29)
+#define RX28_NO_CPU_DSCP_INT_MASK BIT(28)
+#define RX27_NO_CPU_DSCP_INT_MASK BIT(27)
+#define RX26_NO_CPU_DSCP_INT_MASK BIT(26)
+#define RX25_NO_CPU_DSCP_INT_MASK BIT(25)
+#define RX24_NO_CPU_DSCP_INT_MASK BIT(24)
+#define RX23_NO_CPU_DSCP_INT_MASK BIT(23)
+#define RX22_NO_CPU_DSCP_INT_MASK BIT(22)
+#define RX21_NO_CPU_DSCP_INT_MASK BIT(21)
+#define RX20_NO_CPU_DSCP_INT_MASK BIT(20)
+#define RX19_NO_CPU_DSCP_INT_MASK BIT(19)
+#define RX18_NO_CPU_DSCP_INT_MASK BIT(18)
+#define RX17_NO_CPU_DSCP_INT_MASK BIT(17)
+#define RX16_NO_CPU_DSCP_INT_MASK BIT(16)
+#define RX31_DONE_INT_MASK BIT(15)
+#define RX30_DONE_INT_MASK BIT(14)
+#define RX29_DONE_INT_MASK BIT(13)
+#define RX28_DONE_INT_MASK BIT(12)
+#define RX27_DONE_INT_MASK BIT(11)
+#define RX26_DONE_INT_MASK BIT(10)
+#define RX25_DONE_INT_MASK BIT(9)
+#define RX24_DONE_INT_MASK BIT(8)
+#define RX23_DONE_INT_MASK BIT(7)
+#define RX22_DONE_INT_MASK BIT(6)
+#define RX21_DONE_INT_MASK BIT(5)
+#define RX20_DONE_INT_MASK BIT(4)
+#define RX19_DONE_INT_MASK BIT(3)
+#define RX18_DONE_INT_MASK BIT(2)
+#define RX17_DONE_INT_MASK BIT(1)
+#define RX16_DONE_INT_MASK BIT(0)
+
+#define RX_NO_CPU_DSCP_HIGH_INT_MASK \
+ (RX31_NO_CPU_DSCP_INT_MASK | RX30_NO_CPU_DSCP_INT_MASK | \
+ RX29_NO_CPU_DSCP_INT_MASK | RX28_NO_CPU_DSCP_INT_MASK | \
+ RX27_NO_CPU_DSCP_INT_MASK | RX26_NO_CPU_DSCP_INT_MASK | \
+ RX25_NO_CPU_DSCP_INT_MASK | RX24_NO_CPU_DSCP_INT_MASK | \
+ RX23_NO_CPU_DSCP_INT_MASK | RX22_NO_CPU_DSCP_INT_MASK | \
+ RX21_NO_CPU_DSCP_INT_MASK | RX20_NO_CPU_DSCP_INT_MASK | \
+ RX19_NO_CPU_DSCP_INT_MASK | RX18_NO_CPU_DSCP_INT_MASK | \
+ RX17_NO_CPU_DSCP_INT_MASK | RX16_NO_CPU_DSCP_INT_MASK)
+
+#define RX_DONE_HIGH_INT_MASK \
+ (RX31_DONE_INT_MASK | RX30_DONE_INT_MASK | \
+ RX29_DONE_INT_MASK | RX28_DONE_INT_MASK | \
+ RX27_DONE_INT_MASK | RX26_DONE_INT_MASK | \
+ RX25_DONE_INT_MASK | RX24_DONE_INT_MASK | \
+ RX23_DONE_INT_MASK | RX22_DONE_INT_MASK | \
+ RX21_DONE_INT_MASK | RX20_DONE_INT_MASK | \
+ RX19_DONE_INT_MASK | RX18_DONE_INT_MASK | \
+ RX17_DONE_INT_MASK | RX16_DONE_INT_MASK)
+
+#define RX_DONE_HIGH_OFFSET fls(RX_DONE_HIGH_INT_MASK)
+#define RX_DONE_INT_MASK \
+ ((RX_DONE_HIGH_INT_MASK << RX_DONE_HIGH_OFFSET) | RX_DONE_LOW_INT_MASK)
+
+#define INT_RX2_MASK(_n) \
+ ((RX_NO_CPU_DSCP_HIGH_INT_MASK & (_n)) | \
+ (((_n) >> RX_DONE_HIGH_OFFSET) & RX_DONE_HIGH_INT_MASK))
+
+/* QDMA_CSR_INT_ENABLE4 */
+#define RX31_COHERENT_INT_MASK BIT(31)
+#define RX30_COHERENT_INT_MASK BIT(30)
+#define RX29_COHERENT_INT_MASK BIT(29)
+#define RX28_COHERENT_INT_MASK BIT(28)
+#define RX27_COHERENT_INT_MASK BIT(27)
+#define RX26_COHERENT_INT_MASK BIT(26)
+#define RX25_COHERENT_INT_MASK BIT(25)
+#define RX24_COHERENT_INT_MASK BIT(24)
+#define RX23_COHERENT_INT_MASK BIT(23)
+#define RX22_COHERENT_INT_MASK BIT(22)
+#define RX21_COHERENT_INT_MASK BIT(21)
+#define RX20_COHERENT_INT_MASK BIT(20)
+#define RX19_COHERENT_INT_MASK BIT(19)
+#define RX18_COHERENT_INT_MASK BIT(18)
+#define RX17_COHERENT_INT_MASK BIT(17)
+#define RX16_COHERENT_INT_MASK BIT(16)
+
+#define RX_COHERENT_HIGH_INT_MASK \
+ (RX31_COHERENT_INT_MASK | RX30_COHERENT_INT_MASK | \
+ RX29_COHERENT_INT_MASK | RX28_COHERENT_INT_MASK | \
+ RX27_COHERENT_INT_MASK | RX26_COHERENT_INT_MASK | \
+ RX25_COHERENT_INT_MASK | RX24_COHERENT_INT_MASK | \
+ RX23_COHERENT_INT_MASK | RX22_COHERENT_INT_MASK | \
+ RX21_COHERENT_INT_MASK | RX20_COHERENT_INT_MASK | \
+ RX19_COHERENT_INT_MASK | RX18_COHERENT_INT_MASK | \
+ RX17_COHERENT_INT_MASK | RX16_COHERENT_INT_MASK)
+
+#define INT_RX3_MASK(_n) (RX_COHERENT_HIGH_INT_MASK & (_n))
+
+/* QDMA_CSR_INT_ENABLE5 */
+#define TX31_COHERENT_INT_MASK BIT(31)
+#define TX30_COHERENT_INT_MASK BIT(30)
+#define TX29_COHERENT_INT_MASK BIT(29)
+#define TX28_COHERENT_INT_MASK BIT(28)
+#define TX27_COHERENT_INT_MASK BIT(27)
+#define TX26_COHERENT_INT_MASK BIT(26)
+#define TX25_COHERENT_INT_MASK BIT(25)
+#define TX24_COHERENT_INT_MASK BIT(24)
+#define TX23_COHERENT_INT_MASK BIT(23)
+#define TX22_COHERENT_INT_MASK BIT(22)
+#define TX21_COHERENT_INT_MASK BIT(21)
+#define TX20_COHERENT_INT_MASK BIT(20)
+#define TX19_COHERENT_INT_MASK BIT(19)
+#define TX18_COHERENT_INT_MASK BIT(18)
+#define TX17_COHERENT_INT_MASK BIT(17)
+#define TX16_COHERENT_INT_MASK BIT(16)
+#define TX15_COHERENT_INT_MASK BIT(15)
+#define TX14_COHERENT_INT_MASK BIT(14)
+#define TX13_COHERENT_INT_MASK BIT(13)
+#define TX12_COHERENT_INT_MASK BIT(12)
+#define TX11_COHERENT_INT_MASK BIT(11)
+#define TX10_COHERENT_INT_MASK BIT(10)
+#define TX9_COHERENT_INT_MASK BIT(9)
+#define TX8_COHERENT_INT_MASK BIT(8)
+
+#define TX_COHERENT_HIGH_INT_MASK \
+ (TX31_COHERENT_INT_MASK | TX30_COHERENT_INT_MASK | \
+ TX29_COHERENT_INT_MASK | TX28_COHERENT_INT_MASK | \
+ TX27_COHERENT_INT_MASK | TX26_COHERENT_INT_MASK | \
+ TX25_COHERENT_INT_MASK | TX24_COHERENT_INT_MASK | \
+ TX23_COHERENT_INT_MASK | TX22_COHERENT_INT_MASK | \
+ TX21_COHERENT_INT_MASK | TX20_COHERENT_INT_MASK | \
+ TX19_COHERENT_INT_MASK | TX18_COHERENT_INT_MASK | \
+ TX17_COHERENT_INT_MASK | TX16_COHERENT_INT_MASK | \
+ TX15_COHERENT_INT_MASK | TX14_COHERENT_INT_MASK | \
+ TX13_COHERENT_INT_MASK | TX12_COHERENT_INT_MASK | \
+ TX11_COHERENT_INT_MASK | TX10_COHERENT_INT_MASK | \
+ TX9_COHERENT_INT_MASK | TX8_COHERENT_INT_MASK)
+
+#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050)
+
+#define REG_TX_IRQ_CFG(_n) ((_n) ? 0x004c : 0x0054)
+#define TX_IRQ_THR_MASK GENMASK(27, 16)
+#define TX_IRQ_DEPTH_MASK GENMASK(11, 0)
+
+#define REG_IRQ_CLEAR_LEN(_n) ((_n) ? 0x0064 : 0x0058)
+#define IRQ_CLEAR_LEN_MASK GENMASK(7, 0)
+
+#define REG_IRQ_STATUS(_n) ((_n) ? 0x0068 : 0x005c)
+#define IRQ_ENTRY_LEN_MASK GENMASK(27, 16)
+#define IRQ_HEAD_IDX_MASK GENMASK(11, 0)
+
+#define REG_TX_RING_BASE(_n) \
+ (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
+
+#define REG_TX_RING_BLOCKING(_n) \
+ (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
+
+#define TX_RING_IRQ_BLOCKING_MAP_MASK BIT(6)
+#define TX_RING_IRQ_BLOCKING_CFG_MASK BIT(4)
+#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK BIT(2)
+#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK BIT(1)
+#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK BIT(0)
+
+#define REG_TX_CPU_IDX(_n) \
+ (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
+
+#define TX_RING_CPU_IDX_MASK GENMASK(15, 0)
+
+#define REG_TX_DMA_IDX(_n) \
+ (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
+
+#define TX_RING_DMA_IDX_MASK GENMASK(15, 0)
+
+#define IRQ_RING_IDX_MASK GENMASK(20, 16)
+#define IRQ_DESC_IDX_MASK GENMASK(15, 0)
+
+#define REG_RX_RING_BASE(_n) \
+ (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
+
+#define REG_RX_RING_SIZE(_n) \
+ (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
+
+#define RX_RING_THR_MASK GENMASK(31, 16)
+#define RX_RING_SIZE_MASK GENMASK(15, 0)
+
+#define REG_RX_CPU_IDX(_n) \
+ (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
+
+#define RX_RING_CPU_IDX_MASK GENMASK(15, 0)
+
+#define REG_RX_DMA_IDX(_n) \
+ (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
+
+#define REG_RX_DELAY_INT_IDX(_n) \
+ (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
+
+#define REG_RX_SCATTER_CFG(_n) \
+ (((_n) < 16) ? 0x0214 + ((_n) << 5) : 0x0e14 + (((_n) - 16) << 5))
+
+#define RX_DELAY_INT_MASK GENMASK(15, 0)
+
+#define RX_RING_DMA_IDX_MASK GENMASK(15, 0)
+
+#define RX_RING_SG_EN_MASK BIT(0)
+
+#define REG_INGRESS_TRTCM_CFG 0x0070
+#define INGRESS_TRTCM_EN_MASK BIT(31)
+#define INGRESS_TRTCM_MODE_MASK BIT(30)
+#define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define INGRESS_FAST_TICK_MASK GENMASK(15, 0)
+
+#define REG_QUEUE_CLOSE_CFG(_n) (0x00a0 + ((_n) & 0xfc))
+#define TXQ_DISABLE_CHAN_QUEUE_MASK(_n, _m) BIT((_m) + (((_n) & 0x3) << 3))
+
+#define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0)
+#define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
+
+#define REG_CNTR_CFG(_n) (0x0400 + ((_n) << 3))
+#define CNTR_EN_MASK BIT(31)
+#define CNTR_ALL_CHAN_EN_MASK BIT(30)
+#define CNTR_ALL_QUEUE_EN_MASK BIT(29)
+#define CNTR_ALL_DSCP_RING_EN_MASK BIT(28)
+#define CNTR_SRC_MASK GENMASK(27, 24)
+#define CNTR_DSCP_RING_MASK GENMASK(20, 16)
+#define CNTR_CHAN_MASK GENMASK(7, 3)
+#define CNTR_QUEUE_MASK GENMASK(2, 0)
+
+#define REG_CNTR_VAL(_n) (0x0404 + ((_n) << 3))
+
+#define REG_LMGR_INIT_CFG 0x1000
+#define LMGR_INIT_START BIT(31)
+#define LMGR_SRAM_MODE_MASK BIT(30)
+#define HW_FWD_PKTSIZE_OVERHEAD_MASK GENMASK(27, 20)
+#define HW_FWD_DESC_NUM_MASK GENMASK(16, 0)
+
+#define REG_FWD_DSCP_LOW_THR 0x1004
+#define FWD_DSCP_LOW_THR_MASK GENMASK(17, 0)
+
+#define REG_EGRESS_RATE_METER_CFG 0x100c
+#define EGRESS_RATE_METER_EN_MASK BIT(31)
+#define EGRESS_RATE_METER_EQ_RATE_EN_MASK BIT(17)
+#define EGRESS_RATE_METER_WINDOW_SZ_MASK GENMASK(16, 12)
+#define EGRESS_RATE_METER_TIMESLICE_MASK GENMASK(10, 0)
+
+#define REG_EGRESS_TRTCM_CFG 0x1010
+#define EGRESS_TRTCM_EN_MASK BIT(31)
+#define EGRESS_TRTCM_MODE_MASK BIT(30)
+#define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define EGRESS_FAST_TICK_MASK GENMASK(15, 0)
+
+#define TRTCM_PARAM_RW_MASK BIT(31)
+#define TRTCM_PARAM_RW_DONE_MASK BIT(30)
+#define TRTCM_PARAM_TYPE_MASK GENMASK(29, 28)
+#define TRTCM_METER_GROUP_MASK GENMASK(27, 26)
+#define TRTCM_PARAM_INDEX_MASK GENMASK(23, 17)
+#define TRTCM_PARAM_RATE_TYPE_MASK BIT(16)
+
+#define REG_TRTCM_CFG_PARAM(_n) ((_n) + 0x4)
+#define REG_TRTCM_DATA_LOW(_n) ((_n) + 0x8)
+#define REG_TRTCM_DATA_HIGH(_n) ((_n) + 0xc)
+
+#define RATE_LIMIT_PARAM_RW_MASK BIT(31)
+#define RATE_LIMIT_PARAM_RW_DONE_MASK BIT(30)
+#define RATE_LIMIT_PARAM_TYPE_MASK GENMASK(29, 28)
+#define RATE_LIMIT_METER_GROUP_MASK GENMASK(27, 26)
+#define RATE_LIMIT_PARAM_INDEX_MASK GENMASK(23, 16)
+
+#define REG_TXWRR_MODE_CFG 0x1020
+#define TWRR_WEIGHT_SCALE_MASK BIT(31)
+#define TWRR_WEIGHT_BASE_MASK BIT(3)
+
+#define REG_TXWRR_WEIGHT_CFG 0x1024
+#define TWRR_RW_CMD_MASK BIT(31)
+#define TWRR_RW_CMD_DONE BIT(30)
+#define TWRR_CHAN_IDX_MASK GENMASK(23, 19)
+#define TWRR_QUEUE_IDX_MASK GENMASK(18, 16)
+#define TWRR_VALUE_MASK GENMASK(15, 0)
+
+#define REG_PSE_BUF_USAGE_CFG 0x1028
+#define PSE_BUF_ESTIMATE_EN_MASK BIT(29)
+
+#define REG_CHAN_QOS_MODE(_n) (0x1040 + ((_n) << 2))
+#define CHAN_QOS_MODE_MASK(_n) GENMASK(2 + ((_n) << 2), (_n) << 2)
+
+#define REG_GLB_TRTCM_CFG 0x1080
+#define GLB_TRTCM_EN_MASK BIT(31)
+#define GLB_TRTCM_MODE_MASK BIT(30)
+#define GLB_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define GLB_FAST_TICK_MASK GENMASK(15, 0)
+
+#define REG_TXQ_CNGST_CFG 0x10a0
+#define TXQ_CNGST_DROP_EN BIT(31)
+#define TXQ_CNGST_DEI_DROP_EN BIT(30)
+
+#define REG_SLA_TRTCM_CFG 0x1150
+#define SLA_TRTCM_EN_MASK BIT(31)
+#define SLA_TRTCM_MODE_MASK BIT(30)
+#define SLA_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define SLA_FAST_TICK_MASK GENMASK(15, 0)
+
+/* CTRL */
+#define QDMA_DESC_DONE_MASK BIT(31)
+#define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */
+#define QDMA_DESC_MORE_MASK BIT(29) /* more SG elements */
+#define QDMA_DESC_DEI_MASK BIT(25)
+#define QDMA_DESC_NO_DROP_MASK BIT(24)
+#define QDMA_DESC_LEN_MASK GENMASK(15, 0)
+/* DATA */
+#define QDMA_DESC_NEXT_ID_MASK GENMASK(15, 0)
+/* TX MSG0 */
+#define QDMA_ETH_TXMSG_MIC_IDX_MASK BIT(30)
+#define QDMA_ETH_TXMSG_SP_TAG_MASK GENMASK(29, 14)
+#define QDMA_ETH_TXMSG_ICO_MASK BIT(13)
+#define QDMA_ETH_TXMSG_UCO_MASK BIT(12)
+#define QDMA_ETH_TXMSG_TCO_MASK BIT(11)
+#define QDMA_ETH_TXMSG_TSO_MASK BIT(10)
+#define QDMA_ETH_TXMSG_FAST_MASK BIT(9)
+#define QDMA_ETH_TXMSG_OAM_MASK BIT(8)
+#define QDMA_ETH_TXMSG_CHAN_MASK GENMASK(7, 3)
+#define QDMA_ETH_TXMSG_QUEUE_MASK GENMASK(2, 0)
+/* TX MSG1 */
+#define QDMA_ETH_TXMSG_NO_DROP BIT(31)
+#define QDMA_ETH_TXMSG_METER_MASK GENMASK(30, 24) /* 0x7f no meters */
+#define QDMA_ETH_TXMSG_FPORT_MASK GENMASK(23, 20)
+#define QDMA_ETH_TXMSG_NBOQ_MASK GENMASK(19, 15)
+#define QDMA_ETH_TXMSG_HWF_MASK BIT(14)
+#define QDMA_ETH_TXMSG_HOP_MASK BIT(13)
+#define QDMA_ETH_TXMSG_PTP_MASK BIT(12)
+#define QDMA_ETH_TXMSG_ACNT_G1_MASK GENMASK(10, 6) /* 0x1f do not count */
+#define QDMA_ETH_TXMSG_ACNT_G0_MASK GENMASK(5, 0) /* 0x3f do not count */
+
+/* RX MSG0 */
+#define QDMA_ETH_RXMSG_SPTAG GENMASK(21, 14)
+/* RX MSG1 */
+#define QDMA_ETH_RXMSG_DEI_MASK BIT(31)
+#define QDMA_ETH_RXMSG_IP6_MASK BIT(30)
+#define QDMA_ETH_RXMSG_IP4_MASK BIT(29)
+#define QDMA_ETH_RXMSG_IP4F_MASK BIT(28)
+#define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(27)
+#define QDMA_ETH_RXMSG_L4F_MASK BIT(26)
+#define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(25, 21)
+#define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(20, 16)
+#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0)
+
+struct airoha_qdma_desc {
+ __le32 rsv;
+ __le32 ctrl;
+ __le32 addr;
+ __le32 data;
+ __le32 msg0;
+ __le32 msg1;
+ __le32 msg2;
+ __le32 msg3;
+};
+
+/* CTRL0 */
+#define QDMA_FWD_DESC_CTX_MASK BIT(31)
+#define QDMA_FWD_DESC_RING_MASK GENMASK(30, 28)
+#define QDMA_FWD_DESC_IDX_MASK GENMASK(27, 16)
+#define QDMA_FWD_DESC_LEN_MASK GENMASK(15, 0)
+/* CTRL1 */
+#define QDMA_FWD_DESC_FIRST_IDX_MASK GENMASK(15, 0)
+/* CTRL2 */
+#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK GENMASK(2, 0)
+
+struct airoha_qdma_fwd_desc {
+ __le32 addr;
+ __le32 ctrl0;
+ __le32 ctrl1;
+ __le32 ctrl2;
+ __le32 msg0;
+ __le32 msg1;
+ __le32 rsv0;
+ __le32 rsv1;
+};
+
+#endif /* AIROHA_REGS_H */
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index 82f2363a45cd..e5a56bb989da 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -401,9 +401,6 @@ struct altera_tse_private {
/* MAC address space */
struct altera_tse_mac __iomem *mac_dev;
- /* TSE Revision */
- u32 revision;
-
/* mSGDMA Rx Dispatcher address space */
void __iomem *rx_dma_csr;
void __iomem *rx_dma_desc;
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 3f6204de9e6b..ca55c5fd11df 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -892,9 +892,6 @@ static int tse_open(struct net_device *dev)
netdev_warn(dev, "device MAC address %pM\n",
dev->dev_addr);
- if ((priv->revision < 0xd00) || (priv->revision > 0xe00))
- netdev_warn(dev, "TSE revision %x\n", priv->revision);
-
spin_lock(&priv->mac_cfg_lock);
ret = reset_mac(priv);
@@ -1142,6 +1139,7 @@ static int altera_tse_probe(struct platform_device *pdev)
struct net_device *ndev;
void __iomem *descmap;
int ret = -ENODEV;
+ u32 revision;
ndev = alloc_etherdev(sizeof(struct altera_tse_private));
if (!ndev) {
@@ -1150,6 +1148,7 @@ static int altera_tse_probe(struct platform_device *pdev)
}
SET_NETDEV_DEV(ndev, &pdev->dev);
+ platform_set_drvdata(pdev, ndev);
priv = netdev_priv(ndev);
priv->device = &pdev->dev;
@@ -1387,25 +1386,7 @@ static int altera_tse_probe(struct platform_device *pdev)
spin_lock_init(&priv->tx_lock);
spin_lock_init(&priv->rxdma_irq_lock);
- netif_carrier_off(ndev);
- ret = register_netdev(ndev);
- if (ret) {
- dev_err(&pdev->dev, "failed to register TSE net device\n");
- goto err_register_netdev;
- }
-
- platform_set_drvdata(pdev, ndev);
-
- priv->revision = ioread32(&priv->mac_dev->megacore_revision);
-
- if (netif_msg_probe(priv))
- dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n",
- (priv->revision >> 8) & 0xff,
- priv->revision & 0xff,
- (unsigned long) control_port->start, priv->rx_irq,
- priv->tx_irq);
-
- snprintf(mrc.name, MII_BUS_ID_SIZE, "%s-pcs-mii", ndev->name);
+ snprintf(mrc.name, MII_BUS_ID_SIZE, "%s-pcs-mii", dev_name(&pdev->dev));
pcs_bus = devm_mdio_regmap_register(&pdev->dev, &mrc);
if (IS_ERR(pcs_bus)) {
ret = PTR_ERR(pcs_bus);
@@ -1442,12 +1423,30 @@ static int altera_tse_probe(struct platform_device *pdev)
goto err_init_phylink;
}
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register TSE net device\n");
+ goto err_register_netdev;
+ }
+
+ revision = ioread32(&priv->mac_dev->megacore_revision);
+
+ if (revision < 0xd00 || revision > 0xe00)
+ netdev_warn(ndev, "TSE revision %x\n", revision);
+
+ if (netif_msg_probe(priv))
+ dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n",
+ (revision >> 8) & 0xff, revision & 0xff,
+ (unsigned long)control_port->start, priv->rx_irq,
+ priv->tx_irq);
+
return 0;
+
+err_register_netdev:
+ phylink_destroy(priv->phylink);
err_init_phylink:
lynx_pcs_destroy(priv->pcs);
err_init_pcs:
- unregister_netdev(ndev);
-err_register_netdev:
netif_napi_del(&priv->napi);
altera_tse_mdio_destroy(ndev);
err_free_netdev:
diff --git a/drivers/net/ethernet/amazon/Kconfig b/drivers/net/ethernet/amazon/Kconfig
index c37fa393b99e..95dcc3969f0c 100644
--- a/drivers/net/ethernet/amazon/Kconfig
+++ b/drivers/net/ethernet/amazon/Kconfig
@@ -19,7 +19,9 @@ if NET_VENDOR_AMAZON
config ENA_ETHERNET
tristate "Elastic Network Adapter (ENA) support"
depends on PCI_MSI && !CPU_BIG_ENDIAN
+ depends on PTP_1588_CLOCK_OPTIONAL
select DIMLIB
+ select NET_DEVLINK
help
This driver supports Elastic Network Adapter (ENA)"
diff --git a/drivers/net/ethernet/amazon/ena/Makefile b/drivers/net/ethernet/amazon/ena/Makefile
index 6ab615365172..6d8036bc1823 100644
--- a/drivers/net/ethernet/amazon/ena/Makefile
+++ b/drivers/net/ethernet/amazon/ena/Makefile
@@ -5,4 +5,4 @@
obj-$(CONFIG_ENA_ETHERNET) += ena.o
-ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o ena_xdp.o
+ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o ena_xdp.o ena_phc.o ena_devlink.o ena_debugfs.o
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index 9d9fa6559354..898ecd96b96a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -60,6 +60,7 @@ enum ena_admin_aq_feature_id {
ENA_ADMIN_AENQ_CONFIG = 26,
ENA_ADMIN_LINK_CONFIG = 27,
ENA_ADMIN_HOST_ATTR_CONFIG = 28,
+ ENA_ADMIN_PHC_CONFIG = 29,
ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
};
@@ -127,6 +128,14 @@ enum ena_admin_get_stats_scope {
ENA_ADMIN_ETH_TRAFFIC = 1,
};
+enum ena_admin_phc_type {
+ ENA_ADMIN_PHC_TYPE_READLESS = 0,
+};
+
+enum ena_admin_phc_error_flags {
+ ENA_ADMIN_PHC_ERROR_FLAG_TIMESTAMP = BIT(0),
+};
+
/* ENA SRD configuration for ENI */
enum ena_admin_ena_srd_flags {
/* Feature enabled */
@@ -943,7 +952,9 @@ struct ena_admin_host_info {
* 4 : rss_configurable_function_key
* 5 : reserved
* 6 : rx_page_reuse
- * 31:7 : reserved
+ * 7 : reserved
+ * 8 : phc
+ * 31:9 : reserved
*/
u32 driver_supported_features;
};
@@ -975,7 +986,7 @@ struct ena_admin_feature_rss_ind_table {
struct ena_admin_rss_ind_table_entry inline_entry;
};
-/* When hint value is 0, driver should use it's own predefined value */
+/* When hint value is 0, driver should use its own predefined value */
struct ena_admin_ena_hw_hints {
/* value in ms */
u16 mmio_read_timeout;
@@ -1023,6 +1034,43 @@ struct ena_admin_queue_ext_feature_desc {
};
};
+struct ena_admin_feature_phc_desc {
+ /* PHC type as defined in enum ena_admin_get_phc_type,
+ * used only for GET command.
+ */
+ u8 type;
+
+ /* Reserved - MBZ */
+ u8 reserved1[3];
+
+ /* PHC doorbell address as an offset to PCIe MMIO REG BAR,
+ * used only for GET command.
+ */
+ u32 doorbell_offset;
+
+ /* Max time for valid PHC retrieval, passing this threshold will
+ * fail the get-time request and block PHC requests for
+ * block_timeout_usec, used only for GET command.
+ */
+ u32 expire_timeout_usec;
+
+ /* PHC requests block period, blocking starts if PHC request expired
+ * in order to prevent floods on busy device,
+ * used only for GET command.
+ */
+ u32 block_timeout_usec;
+
+ /* Shared PHC physical address (ena_admin_phc_resp),
+ * used only for SET command.
+ */
+ struct ena_common_mem_addr output_address;
+
+ /* Shared PHC Size (ena_admin_phc_resp),
+ * used only for SET command.
+ */
+ u32 output_length;
+};
+
struct ena_admin_get_feat_resp {
struct ena_admin_acq_common_desc acq_common_desc;
@@ -1052,6 +1100,8 @@ struct ena_admin_get_feat_resp {
struct ena_admin_feature_intr_moder_desc intr_moderation;
struct ena_admin_ena_hw_hints hw_hints;
+
+ struct ena_admin_feature_phc_desc phc;
} u;
};
@@ -1085,6 +1135,9 @@ struct ena_admin_set_feat_cmd {
/* LLQ configuration */
struct ena_admin_feature_llq_desc llq;
+
+ /* PHC configuration */
+ struct ena_admin_feature_phc_desc phc;
} u;
};
@@ -1162,6 +1215,23 @@ struct ena_admin_ena_mmio_req_read_less_resp {
u32 reg_val;
};
+struct ena_admin_phc_resp {
+ /* Request Id, received from DB register */
+ u16 req_id;
+
+ u8 reserved1[6];
+
+ /* PHC timestamp (nsec) */
+ u64 timestamp;
+
+ u8 reserved2[12];
+
+ /* Bit field of enum ena_admin_phc_error_flags */
+ u32 error_flags;
+
+ u8 reserved3[32];
+};
+
/* aq_common_desc */
#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0)
@@ -1260,6 +1330,8 @@ struct ena_admin_ena_mmio_req_read_less_resp {
#define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK BIT(4)
#define ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_SHIFT 6
#define ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK BIT(6)
+#define ENA_ADMIN_HOST_INFO_PHC_SHIFT 8
+#define ENA_ADMIN_HOST_INFO_PHC_MASK BIT(8)
/* aenq_common_desc */
#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index d958cda9e58b..e67b592e5697 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -41,6 +41,12 @@
#define ENA_MAX_ADMIN_POLL_US 5000
+/* PHC definitions */
+#define ENA_PHC_DEFAULT_EXPIRE_TIMEOUT_USEC 10
+#define ENA_PHC_DEFAULT_BLOCK_TIMEOUT_USEC 1000
+#define ENA_PHC_REQ_ID_OFFSET 0xDEAD
+#define ENA_PHC_ERROR_FLAGS (ENA_ADMIN_PHC_ERROR_FLAG_TIMESTAMP)
+
/*****************************************************************************/
/*****************************************************************************/
/*****************************************************************************/
@@ -763,25 +769,16 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
if (comp_ctx->status == ENA_CMD_COMPLETED) {
netdev_err(admin_queue->ena_dev->net_device,
- "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
- comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
- /* Check if fallback to polling is enabled */
- if (admin_queue->auto_polling)
- admin_queue->polling = true;
+ "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d)\n",
+ comp_ctx->cmd_opcode);
} else {
netdev_err(admin_queue->ena_dev->net_device,
"The ena device didn't send a completion for the admin cmd %d status %d\n",
comp_ctx->cmd_opcode, comp_ctx->status);
}
- /* Check if shifted to polling mode.
- * This will happen if there is a completion without an interrupt
- * and autopolling mode is enabled. Continuing normal execution in such case
- */
- if (!admin_queue->polling) {
- admin_queue->running_state = false;
- ret = -ETIME;
- goto err;
- }
+ admin_queue->running_state = false;
+ ret = -ETIME;
+ goto err;
}
ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
@@ -1650,10 +1647,265 @@ void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
ena_dev->admin_queue.polling = polling;
}
-void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
- bool polling)
+bool ena_com_phc_supported(struct ena_com_dev *ena_dev)
+{
+ return ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_PHC_CONFIG);
+}
+
+int ena_com_phc_init(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_phc_info *phc = &ena_dev->phc;
+
+ memset(phc, 0x0, sizeof(*phc));
+
+ /* Allocate shared mem used PHC timestamp retrieved from device */
+ phc->virt_addr = dma_alloc_coherent(ena_dev->dmadev,
+ sizeof(*phc->virt_addr),
+ &phc->phys_addr,
+ GFP_KERNEL);
+ if (unlikely(!phc->virt_addr))
+ return -ENOMEM;
+
+ spin_lock_init(&phc->lock);
+
+ phc->virt_addr->req_id = 0;
+ phc->virt_addr->timestamp = 0;
+
+ return 0;
+}
+
+int ena_com_phc_config(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_phc_info *phc = &ena_dev->phc;
+ struct ena_admin_get_feat_resp get_feat_resp;
+ struct ena_admin_set_feat_resp set_feat_resp;
+ struct ena_admin_set_feat_cmd set_feat_cmd;
+ int ret = 0;
+
+ /* Get device PHC default configuration */
+ ret = ena_com_get_feature(ena_dev,
+ &get_feat_resp,
+ ENA_ADMIN_PHC_CONFIG,
+ 0);
+ if (unlikely(ret)) {
+ netdev_err(ena_dev->net_device,
+ "Failed to get PHC feature configuration, error: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Supporting only readless PHC retrieval */
+ if (get_feat_resp.u.phc.type != ENA_ADMIN_PHC_TYPE_READLESS) {
+ netdev_err(ena_dev->net_device,
+ "Unsupported PHC type, error: %d\n",
+ -EOPNOTSUPP);
+ return -EOPNOTSUPP;
+ }
+
+ /* Update PHC doorbell offset according to device value,
+ * used to write req_id to PHC bar
+ */
+ phc->doorbell_offset = get_feat_resp.u.phc.doorbell_offset;
+
+ /* Update PHC expire timeout according to device
+ * or default driver value
+ */
+ phc->expire_timeout_usec = (get_feat_resp.u.phc.expire_timeout_usec) ?
+ get_feat_resp.u.phc.expire_timeout_usec :
+ ENA_PHC_DEFAULT_EXPIRE_TIMEOUT_USEC;
+
+ /* Update PHC block timeout according to device
+ * or default driver value
+ */
+ phc->block_timeout_usec = (get_feat_resp.u.phc.block_timeout_usec) ?
+ get_feat_resp.u.phc.block_timeout_usec :
+ ENA_PHC_DEFAULT_BLOCK_TIMEOUT_USEC;
+
+ /* Sanity check - expire timeout must not exceed block timeout */
+ if (phc->expire_timeout_usec > phc->block_timeout_usec)
+ phc->expire_timeout_usec = phc->block_timeout_usec;
+
+ /* Prepare PHC feature command */
+ memset(&set_feat_cmd, 0x0, sizeof(set_feat_cmd));
+ set_feat_cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ set_feat_cmd.feat_common.feature_id = ENA_ADMIN_PHC_CONFIG;
+ set_feat_cmd.u.phc.output_length = sizeof(*phc->virt_addr);
+ ret = ena_com_mem_addr_set(ena_dev,
+ &set_feat_cmd.u.phc.output_address,
+ phc->phys_addr);
+ if (unlikely(ret)) {
+ netdev_err(ena_dev->net_device,
+ "Failed setting PHC output address, error: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Send PHC feature command to the device */
+ ret = ena_com_execute_admin_command(&ena_dev->admin_queue,
+ (struct ena_admin_aq_entry *)&set_feat_cmd,
+ sizeof(set_feat_cmd),
+ (struct ena_admin_acq_entry *)&set_feat_resp,
+ sizeof(set_feat_resp));
+
+ if (unlikely(ret)) {
+ netdev_err(ena_dev->net_device,
+ "Failed to enable PHC, error: %d\n",
+ ret);
+ return ret;
+ }
+
+ phc->active = true;
+ netdev_dbg(ena_dev->net_device, "PHC is active in the device\n");
+
+ return ret;
+}
+
+void ena_com_phc_destroy(struct ena_com_dev *ena_dev)
{
- ena_dev->admin_queue.auto_polling = polling;
+ struct ena_com_phc_info *phc = &ena_dev->phc;
+ unsigned long flags = 0;
+
+ /* In case PHC is not supported by the device, silently exiting */
+ if (!phc->virt_addr)
+ return;
+
+ spin_lock_irqsave(&phc->lock, flags);
+ phc->active = false;
+ spin_unlock_irqrestore(&phc->lock, flags);
+
+ dma_free_coherent(ena_dev->dmadev,
+ sizeof(*phc->virt_addr),
+ phc->virt_addr,
+ phc->phys_addr);
+ phc->virt_addr = NULL;
+}
+
+int ena_com_phc_get_timestamp(struct ena_com_dev *ena_dev, u64 *timestamp)
+{
+ volatile struct ena_admin_phc_resp *resp = ena_dev->phc.virt_addr;
+ const ktime_t zero_system_time = ktime_set(0, 0);
+ struct ena_com_phc_info *phc = &ena_dev->phc;
+ ktime_t expire_time;
+ ktime_t block_time;
+ unsigned long flags = 0;
+ int ret = 0;
+
+ if (!phc->active) {
+ netdev_err(ena_dev->net_device, "PHC feature is not active in the device\n");
+ return -EOPNOTSUPP;
+ }
+
+ spin_lock_irqsave(&phc->lock, flags);
+
+ /* Check if PHC is in blocked state */
+ if (unlikely(ktime_compare(phc->system_time, zero_system_time))) {
+ /* Check if blocking time expired */
+ block_time = ktime_add_us(phc->system_time, phc->block_timeout_usec);
+ if (!ktime_after(ktime_get(), block_time)) {
+ /* PHC is still in blocked state, skip PHC request */
+ phc->stats.phc_skp++;
+ ret = -EBUSY;
+ goto skip;
+ }
+
+ /* PHC is in active state, update statistics according
+ * to req_id and error_flags
+ */
+ if (READ_ONCE(resp->req_id) != phc->req_id) {
+ /* Device didn't update req_id during blocking time,
+ * this indicates on a device error
+ */
+ netdev_err(ena_dev->net_device,
+ "PHC get time request 0x%x failed (device error)\n",
+ phc->req_id);
+ phc->stats.phc_err_dv++;
+ } else if (resp->error_flags & ENA_PHC_ERROR_FLAGS) {
+ /* Device updated req_id during blocking time but got
+ * a PHC error, this occurs if device:
+ * - exceeded the get time request limit
+ * - received an invalid timestamp
+ */
+ netdev_err(ena_dev->net_device,
+ "PHC get time request 0x%x failed (error 0x%x)\n",
+ phc->req_id,
+ resp->error_flags);
+ phc->stats.phc_err_ts += !!(resp->error_flags &
+ ENA_ADMIN_PHC_ERROR_FLAG_TIMESTAMP);
+ } else {
+ /* Device updated req_id during blocking time
+ * with valid timestamp
+ */
+ phc->stats.phc_exp++;
+ }
+ }
+
+ /* Setting relative timeouts */
+ phc->system_time = ktime_get();
+ block_time = ktime_add_us(phc->system_time, phc->block_timeout_usec);
+ expire_time = ktime_add_us(phc->system_time, phc->expire_timeout_usec);
+
+ /* We expect the device to return this req_id once
+ * the new PHC timestamp is updated
+ */
+ phc->req_id++;
+
+ /* Initialize PHC shared memory with different req_id value
+ * to be able to identify once the device changes it to req_id
+ */
+ resp->req_id = phc->req_id + ENA_PHC_REQ_ID_OFFSET;
+
+ /* Writing req_id to PHC bar */
+ writel(phc->req_id, ena_dev->reg_bar + phc->doorbell_offset);
+
+ /* Stalling until the device updates req_id */
+ while (1) {
+ if (unlikely(ktime_after(ktime_get(), expire_time))) {
+ /* Gave up waiting for updated req_id, PHC enters into
+ * blocked state until passing blocking time,
+ * during this time any get PHC timestamp will fail with
+ * device busy error
+ */
+ ret = -EBUSY;
+ break;
+ }
+
+ /* Check if req_id was updated by the device */
+ if (READ_ONCE(resp->req_id) != phc->req_id) {
+ /* req_id was not updated by the device yet,
+ * check again on next loop
+ */
+ continue;
+ }
+
+ /* req_id was updated by the device which indicates that
+ * PHC timestamp and error_flags are updated too,
+ * checking errors before retrieving timestamp
+ */
+ if (unlikely(resp->error_flags & ENA_PHC_ERROR_FLAGS)) {
+ /* Retrieved invalid PHC timestamp, PHC enters into
+ * blocked state until passing blocking time,
+ * during this time any get PHC timestamp requests
+ * will fail with device busy error
+ */
+ ret = -EBUSY;
+ break;
+ }
+
+ /* PHC timestamp value is returned to the caller */
+ *timestamp = resp->timestamp;
+
+ /* Update statistic on valid PHC timestamp retrieval */
+ phc->stats.phc_cnt++;
+
+ /* This indicates PHC state is active */
+ phc->system_time = zero_system_time;
+ break;
+ }
+
+skip:
+ spin_unlock_irqrestore(&phc->lock, flags);
+
+ return ret;
}
int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
@@ -2198,21 +2450,6 @@ int ena_com_get_ena_srd_info(struct ena_com_dev *ena_dev,
return ret;
}
-int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
- struct ena_admin_basic_stats *stats)
-{
- struct ena_com_stats_ctx ctx;
- int ret;
-
- memset(&ctx, 0x0, sizeof(ctx));
- ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
- if (likely(ret == 0))
- memcpy(stats, &ctx.get_resp.u.basic_stats,
- sizeof(ctx.get_resp.u.basic_stats));
-
- return ret;
-}
-
int ena_com_get_customer_metrics(struct ena_com_dev *ena_dev, char *buffer, u32 len)
{
struct ena_admin_aq_get_stats_cmd *get_cmd;
@@ -2289,24 +2526,6 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
return ret;
}
-int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
- struct ena_admin_feature_offload_desc *offload)
-{
- int ret;
- struct ena_admin_get_feat_resp resp;
-
- ret = ena_com_get_feature(ena_dev, &resp,
- ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
- if (unlikely(ret)) {
- netdev_err(ena_dev->net_device, "Failed to get offload capabilities %d\n", ret);
- return ret;
- }
-
- memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
-
- return 0;
-}
-
int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index a372c5e768a7..64df2c48c9a6 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -210,6 +210,14 @@ struct ena_com_stats_admin {
u64 no_completion;
};
+struct ena_com_stats_phc {
+ u64 phc_cnt;
+ u64 phc_exp;
+ u64 phc_skp;
+ u64 phc_err_dv;
+ u64 phc_err_ts;
+};
+
struct ena_com_admin_queue {
void *q_dmadev;
struct ena_com_dev *ena_dev;
@@ -224,9 +232,6 @@ struct ena_com_admin_queue {
/* Indicate if the admin queue should poll for completion */
bool polling;
- /* Define if fallback to polling mode should occur */
- bool auto_polling;
-
u16 curr_cmd_id;
/* Indicate that the ena was initialized and can
@@ -261,6 +266,47 @@ struct ena_com_mmio_read {
spinlock_t lock;
};
+/* PTP hardware clock (PHC) MMIO read data info */
+struct ena_com_phc_info {
+ /* Internal PHC statistics */
+ struct ena_com_stats_phc stats;
+
+ /* PHC shared memory - virtual address */
+ struct ena_admin_phc_resp *virt_addr;
+
+ /* System time of last PHC request */
+ ktime_t system_time;
+
+ /* Spin lock to ensure a single outstanding PHC read */
+ spinlock_t lock;
+
+ /* PHC doorbell address as an offset to PCIe MMIO REG BAR */
+ u32 doorbell_offset;
+
+ /* Shared memory read expire timeout (usec)
+ * Max time for valid PHC retrieval, passing this threshold will fail
+ * the get time request and block new PHC requests for block_timeout_usec
+ * in order to prevent floods on busy device
+ */
+ u32 expire_timeout_usec;
+
+ /* Shared memory read abort timeout (usec)
+ * PHC requests block period, blocking starts once PHC request expired
+ * in order to prevent floods on busy device,
+ * any PHC requests during block period will be skipped
+ */
+ u32 block_timeout_usec;
+
+ /* PHC shared memory - physical address */
+ dma_addr_t phys_addr;
+
+ /* Request id sent to the device */
+ u16 req_id;
+
+ /* True if PHC is active in the device */
+ bool active;
+};
+
struct ena_rss {
/* Indirect table */
u16 *host_rss_ind_tbl;
@@ -320,6 +366,7 @@ struct ena_com_dev {
u32 ena_min_poll_delay_us;
struct ena_com_mmio_read mmio_read;
+ struct ena_com_phc_info phc;
struct ena_rss rss;
u32 supported_features;
@@ -385,6 +432,40 @@ struct ena_aenq_handlers {
*/
int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev);
+/* ena_com_phc_init - Allocate and initialize PHC feature
+ * @ena_dev: ENA communication layer struct
+ * @note: This method assumes PHC is supported by the device
+ * @return - 0 on success, negative value on failure
+ */
+int ena_com_phc_init(struct ena_com_dev *ena_dev);
+
+/* ena_com_phc_supported - Return if PHC feature is supported by the device
+ * @ena_dev: ENA communication layer struct
+ * @note: This method must be called after getting supported features
+ * @return - supported or not
+ */
+bool ena_com_phc_supported(struct ena_com_dev *ena_dev);
+
+/* ena_com_phc_config - Configure PHC feature
+ * @ena_dev: ENA communication layer struct
+ * Configure PHC feature in driver and device
+ * @note: This method assumes PHC is supported by the device
+ * @return - 0 on success, negative value on failure
+ */
+int ena_com_phc_config(struct ena_com_dev *ena_dev);
+
+/* ena_com_phc_destroy - Destroy PHC feature
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_phc_destroy(struct ena_com_dev *ena_dev);
+
+/* ena_com_phc_get_timestamp - Retrieve PHC timestamp
+ * @ena_dev: ENA communication layer struct
+ * @timestamp: Retrieved PHC timestamp
+ * @return - 0 on success, negative value on failure
+ */
+int ena_com_phc_get_timestamp(struct ena_com_dev *ena_dev, u64 *timestamp);
+
/* ena_com_set_mmio_read_mode - Enable/disable the indirect mmio reg read mechanism
* @ena_dev: ENA communication layer struct
* @readless_supported: readless mode (enable/disable)
@@ -493,17 +574,6 @@ bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev);
*/
void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);
-/* ena_com_set_admin_auto_polling_mode - Enable autoswitch to polling mode
- * @ena_dev: ENA communication layer struct
- * @polling: Enable/Disable polling mode
- *
- * Set the autopolling mode.
- * If autopolling is on:
- * In case of missing interrupt when data is available switch to polling.
- */
-void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
- bool polling);
-
/* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler
* @ena_dev: ENA communication layer struct
*
@@ -591,15 +661,6 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag);
int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
struct ena_com_dev_get_features_ctx *get_feat_ctx);
-/* ena_com_get_dev_basic_stats - Get device basic statistics
- * @ena_dev: ENA communication layer struct
- * @stats: stats return value
- *
- * @return: 0 on Success and negative value otherwise.
- */
-int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
- struct ena_admin_basic_stats *stats);
-
/* ena_com_get_eni_stats - Get extended network interface statistics
* @ena_dev: ENA communication layer struct
* @stats: stats return value
@@ -635,15 +696,6 @@ int ena_com_get_customer_metrics(struct ena_com_dev *ena_dev, char *buffer, u32
*/
int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu);
-/* ena_com_get_offload_settings - Retrieve the device offloads capabilities
- * @ena_dev: ENA communication layer struct
- * @offlad: offload return value
- *
- * @return: 0 on Success and negative value otherwise.
- */
-int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
- struct ena_admin_feature_offload_desc *offload);
-
/* ena_com_rss_init - Init RSS
* @ena_dev: ENA communication layer struct
* @log_size: indirection log size
diff --git a/drivers/net/ethernet/amazon/ena/ena_debugfs.c b/drivers/net/ethernet/amazon/ena/ena_debugfs.c
new file mode 100644
index 000000000000..46ed80986724
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_debugfs.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/seq_file.h>
+#include <linux/pci.h>
+#include "ena_debugfs.h"
+#include "ena_phc.h"
+
+static int phc_stats_show(struct seq_file *file, void *priv)
+{
+ struct ena_adapter *adapter = file->private;
+
+ if (!ena_phc_is_active(adapter))
+ return 0;
+
+ seq_printf(file,
+ "phc_cnt: %llu\n",
+ adapter->ena_dev->phc.stats.phc_cnt);
+ seq_printf(file,
+ "phc_exp: %llu\n",
+ adapter->ena_dev->phc.stats.phc_exp);
+ seq_printf(file,
+ "phc_skp: %llu\n",
+ adapter->ena_dev->phc.stats.phc_skp);
+ seq_printf(file,
+ "phc_err_dv: %llu\n",
+ adapter->ena_dev->phc.stats.phc_err_dv);
+ seq_printf(file,
+ "phc_err_ts: %llu\n",
+ adapter->ena_dev->phc.stats.phc_err_ts);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(phc_stats);
+
+void ena_debugfs_init(struct net_device *dev)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+
+ adapter->debugfs_base =
+ debugfs_create_dir(dev_name(&adapter->pdev->dev), NULL);
+
+ debugfs_create_file("phc_stats",
+ 0400,
+ adapter->debugfs_base,
+ adapter,
+ &phc_stats_fops);
+}
+
+void ena_debugfs_terminate(struct net_device *dev)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+
+ debugfs_remove_recursive(adapter->debugfs_base);
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/ethernet/amazon/ena/ena_debugfs.h b/drivers/net/ethernet/amazon/ena/ena_debugfs.h
new file mode 100644
index 000000000000..dc61dd998867
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_debugfs.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ */
+
+#ifndef __ENA_DEBUGFS_H__
+#define __ENA_DEBUGFS_H__
+
+#include <linux/debugfs.h>
+#include <linux/netdevice.h>
+#include "ena_netdev.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+void ena_debugfs_init(struct net_device *dev);
+
+void ena_debugfs_terminate(struct net_device *dev);
+
+#else /* CONFIG_DEBUG_FS */
+
+static inline void ena_debugfs_init(struct net_device *dev) {}
+
+static inline void ena_debugfs_terminate(struct net_device *dev) {}
+
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* __ENA_DEBUGFS_H__ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_devlink.c b/drivers/net/ethernet/amazon/ena/ena_devlink.c
new file mode 100644
index 000000000000..ac81c24016dd
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_devlink.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ */
+
+#include "linux/pci.h"
+#include "ena_devlink.h"
+#include "ena_phc.h"
+
+static int ena_devlink_enable_phc_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+
+ if (!val.vbool)
+ return 0;
+
+ if (!ena_com_phc_supported(adapter->ena_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support PHC");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param ena_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(ENABLE_PHC,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL,
+ NULL,
+ ena_devlink_enable_phc_validate),
+};
+
+void ena_devlink_params_get(struct devlink *devlink)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+ union devlink_param_value val;
+ int err;
+
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
+ &val);
+ if (err) {
+ netdev_err(adapter->netdev, "Failed to query PHC param\n");
+ return;
+ }
+
+ ena_phc_enable(adapter, val.vbool);
+}
+
+void ena_devlink_disable_phc_param(struct devlink *devlink)
+{
+ union devlink_param_value value;
+
+ value.vbool = false;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
+ value);
+}
+
+static void ena_devlink_port_register(struct devlink *devlink)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+ struct devlink_port_attrs attrs = {};
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ devlink_port_attrs_set(&adapter->devlink_port, &attrs);
+ devl_port_register(devlink, &adapter->devlink_port, 0);
+}
+
+static void ena_devlink_port_unregister(struct devlink *devlink)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+
+ devl_port_unregister(&adapter->devlink_port);
+}
+
+static int ena_devlink_reload_down(struct devlink *devlink,
+ bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+
+ if (netns_change) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Namespace change is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ ena_devlink_port_unregister(devlink);
+
+ rtnl_lock();
+ ena_destroy_device(adapter, false);
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int ena_devlink_reload_up(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+ int err = 0;
+
+ rtnl_lock();
+ /* Check that no other routine initialized the device (e.g.
+ * ena_fw_reset_device()). Also we're under devlink_mutex here,
+ * so devlink isn't freed under our feet.
+ */
+ if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
+ err = ena_restore_device(adapter);
+
+ rtnl_unlock();
+
+ ena_devlink_port_register(devlink);
+
+ if (!err)
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
+
+ return err;
+}
+
+static const struct devlink_ops ena_devlink_ops = {
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
+ .reload_down = ena_devlink_reload_down,
+ .reload_up = ena_devlink_reload_up,
+};
+
+static int ena_devlink_configure_params(struct devlink *devlink)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+ union devlink_param_value value;
+ int rc;
+
+ rc = devlink_params_register(devlink, ena_devlink_params,
+ ARRAY_SIZE(ena_devlink_params));
+ if (rc) {
+ netdev_err(adapter->netdev, "Failed to register devlink params\n");
+ return rc;
+ }
+
+ value.vbool = ena_phc_is_enabled(adapter);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
+ value);
+
+ return 0;
+}
+
+struct devlink *ena_devlink_alloc(struct ena_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct devlink *devlink;
+
+ devlink = devlink_alloc(&ena_devlink_ops,
+ sizeof(struct ena_adapter *),
+ dev);
+ if (!devlink) {
+ netdev_err(adapter->netdev,
+ "Failed to allocate devlink struct\n");
+ return NULL;
+ }
+
+ ENA_DEVLINK_PRIV(devlink) = adapter;
+ adapter->devlink = devlink;
+
+ if (ena_devlink_configure_params(devlink))
+ goto free_devlink;
+
+ return devlink;
+
+free_devlink:
+ devlink_free(devlink);
+ return NULL;
+}
+
+static void ena_devlink_configure_params_clean(struct devlink *devlink)
+{
+ devlink_params_unregister(devlink, ena_devlink_params,
+ ARRAY_SIZE(ena_devlink_params));
+}
+
+void ena_devlink_free(struct devlink *devlink)
+{
+ ena_devlink_configure_params_clean(devlink);
+
+ devlink_free(devlink);
+}
+
+void ena_devlink_register(struct devlink *devlink, struct device *dev)
+{
+ devl_lock(devlink);
+ ena_devlink_port_register(devlink);
+ devl_register(devlink);
+ devl_unlock(devlink);
+}
+
+void ena_devlink_unregister(struct devlink *devlink)
+{
+ devl_lock(devlink);
+ ena_devlink_port_unregister(devlink);
+ devl_unregister(devlink);
+ devl_unlock(devlink);
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_devlink.h b/drivers/net/ethernet/amazon/ena/ena_devlink.h
new file mode 100644
index 000000000000..7a19ce4830d9
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_devlink.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ */
+#ifndef DEVLINK_H
+#define DEVLINK_H
+
+#include "ena_netdev.h"
+#include <net/devlink.h>
+
+#define ENA_DEVLINK_PRIV(devlink) \
+ (*(struct ena_adapter **)devlink_priv(devlink))
+
+struct devlink *ena_devlink_alloc(struct ena_adapter *adapter);
+void ena_devlink_free(struct devlink *devlink);
+void ena_devlink_register(struct devlink *devlink, struct device *dev);
+void ena_devlink_unregister(struct devlink *devlink);
+void ena_devlink_params_get(struct devlink *devlink);
+void ena_devlink_disable_phc_param(struct devlink *devlink);
+
+#endif /* DEVLINK_H */
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 60fb35ec4b15..fe3479b84a1f 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -5,9 +5,11 @@
#include <linux/ethtool.h>
#include <linux/pci.h>
+#include <linux/net_tstamp.h>
#include "ena_netdev.h"
#include "ena_xdp.h"
+#include "ena_phc.h"
struct ena_stats {
char name[ETH_GSTRING_LEN];
@@ -298,6 +300,18 @@ static void ena_get_ethtool_stats(struct net_device *netdev,
ena_get_stats(adapter, data, true);
}
+static int ena_get_ts_info(struct net_device *netdev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
+
+ info->phc_index = ena_phc_get_index(adapter);
+
+ return 0;
+}
+
static int ena_get_sw_stats_count(struct ena_adapter *adapter)
{
return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
@@ -721,9 +735,11 @@ static u16 ena_flow_data_to_flow_hash(u32 hash_fields)
return data;
}
-static int ena_get_rss_hash(struct ena_com_dev *ena_dev,
- struct ethtool_rxnfc *cmd)
+static int ena_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
enum ena_admin_flow_hash_proto proto;
u16 hash_fields;
int rc;
@@ -772,9 +788,12 @@ static int ena_get_rss_hash(struct ena_com_dev *ena_dev,
return 0;
}
-static int ena_set_rss_hash(struct ena_com_dev *ena_dev,
- struct ethtool_rxnfc *cmd)
+static int ena_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
enum ena_admin_flow_hash_proto proto;
u16 hash_fields;
@@ -816,26 +835,6 @@ static int ena_set_rss_hash(struct ena_com_dev *ena_dev,
return ena_com_fill_hash_ctrl(ena_dev, proto, hash_fields);
}
-static int ena_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
-{
- struct ena_adapter *adapter = netdev_priv(netdev);
- int rc = 0;
-
- switch (info->cmd) {
- case ETHTOOL_SRXFH:
- rc = ena_set_rss_hash(adapter->ena_dev, info);
- break;
- case ETHTOOL_SRXCLSRLDEL:
- case ETHTOOL_SRXCLSRLINS:
- default:
- netif_err(adapter, drv, netdev,
- "Command parameter %d is not supported\n", info->cmd);
- rc = -EOPNOTSUPP;
- }
-
- return rc;
-}
-
static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
u32 *rules)
{
@@ -847,9 +846,6 @@ static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
info->data = adapter->num_io_queues;
rc = 0;
break;
- case ETHTOOL_GRXFH:
- rc = ena_get_rss_hash(adapter->ena_dev, info);
- break;
case ETHTOOL_GRXCLSRLCNT:
case ETHTOOL_GRXCLSRULE:
case ETHTOOL_GRXCLSRLALL:
@@ -869,7 +865,10 @@ static u32 ena_get_rxfh_indir_size(struct net_device *netdev)
static u32 ena_get_rxfh_key_size(struct net_device *netdev)
{
- return ENA_HASH_KEY_SIZE;
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_rss *rss = &adapter->ena_dev->rss;
+
+ return rss->hash_key ? ENA_HASH_KEY_SIZE : 0;
}
static int ena_indirection_table_set(struct ena_adapter *adapter,
@@ -1098,16 +1097,17 @@ static const struct ethtool_ops ena_ethtool_ops = {
.get_strings = ena_get_ethtool_strings,
.get_ethtool_stats = ena_get_ethtool_stats,
.get_rxnfc = ena_get_rxnfc,
- .set_rxnfc = ena_set_rxnfc,
.get_rxfh_indir_size = ena_get_rxfh_indir_size,
.get_rxfh_key_size = ena_get_rxfh_key_size,
.get_rxfh = ena_get_rxfh,
.set_rxfh = ena_set_rxfh,
+ .get_rxfh_fields = ena_get_rxfh_fields,
+ .set_rxfh_fields = ena_set_rxfh_fields,
.get_channels = ena_get_channels,
.set_channels = ena_set_channels,
.get_tunable = ena_get_tunable,
.set_tunable = ena_set_tunable,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = ena_get_ts_info,
};
void ena_set_ethtool_ops(struct net_device *netdev)
@@ -1129,22 +1129,18 @@ static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf)
return;
}
- strings_buf = devm_kcalloc(&adapter->pdev->dev,
- ETH_GSTRING_LEN, strings_num,
- GFP_ATOMIC);
+ strings_buf = kcalloc(strings_num, ETH_GSTRING_LEN, GFP_ATOMIC);
if (!strings_buf) {
netif_err(adapter, drv, netdev,
"Failed to allocate strings_buf\n");
return;
}
- data_buf = devm_kcalloc(&adapter->pdev->dev,
- strings_num, sizeof(u64),
- GFP_ATOMIC);
+ data_buf = kcalloc(strings_num, sizeof(u64), GFP_ATOMIC);
if (!data_buf) {
netif_err(adapter, drv, netdev,
"Failed to allocate data buf\n");
- devm_kfree(&adapter->pdev->dev, strings_buf);
+ kfree(strings_buf);
return;
}
@@ -1166,8 +1162,8 @@ static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf)
strings_buf + i * ETH_GSTRING_LEN,
data_buf[i]);
- devm_kfree(&adapter->pdev->dev, strings_buf);
- devm_kfree(&adapter->pdev->dev, data_buf);
+ kfree(strings_buf);
+ kfree(data_buf);
}
void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 63c8a2328142..92d149d4f091 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -5,9 +5,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#ifdef CONFIG_RFS_ACCEL
-#include <linux/cpu_rmap.h>
-#endif /* CONFIG_RFS_ACCEL */
#include <linux/ethtool.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -22,6 +19,12 @@
#include "ena_pci_id_tbl.h"
#include "ena_xdp.h"
+#include "ena_phc.h"
+
+#include "ena_devlink.h"
+
+#include "ena_debugfs.h"
+
MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
MODULE_DESCRIPTION(DEVICE_NAME);
MODULE_LICENSE("GPL");
@@ -42,8 +45,6 @@ MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
static int ena_rss_init_default(struct ena_adapter *adapter);
static void check_for_admin_com_state(struct ena_adapter *adapter);
-static int ena_destroy_device(struct ena_adapter *adapter, bool graceful);
-static int ena_restore_device(struct ena_adapter *adapter);
static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
@@ -74,7 +75,7 @@ static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
if (threshold < time_since_last_napi && napi_scheduled) {
netdev_err(dev,
"napi handler hasn't been called for a long time but is scheduled\n");
- reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION;
+ reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION;
}
schedule_reset:
/* Change the state of the device to trigger reset
@@ -162,30 +163,6 @@ int ena_xmit_common(struct ena_adapter *adapter,
return 0;
}
-static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
-{
-#ifdef CONFIG_RFS_ACCEL
- u32 i;
- int rc;
-
- adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues);
- if (!adapter->netdev->rx_cpu_rmap)
- return -ENOMEM;
- for (i = 0; i < adapter->num_io_queues; i++) {
- int irq_idx = ENA_IO_IRQ_IDX(i);
-
- rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
- pci_irq_vector(adapter->pdev, irq_idx));
- if (rc) {
- free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
- adapter->netdev->rx_cpu_rmap = NULL;
- return rc;
- }
- }
-#endif /* CONFIG_RFS_ACCEL */
- return 0;
-}
-
static void ena_init_io_rings_common(struct ena_adapter *adapter,
struct ena_ring *ring, u16 qid)
{
@@ -1596,7 +1573,7 @@ static int ena_enable_msix(struct ena_adapter *adapter)
adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
}
- if (ena_init_rx_cpu_rmap(adapter))
+ if (netif_enable_cpu_rmap(adapter->netdev, adapter->num_io_queues))
netif_warn(adapter, probe, adapter->netdev,
"Failed to map IRQs to CPUs\n");
@@ -1742,16 +1719,13 @@ static void ena_free_io_irq(struct ena_adapter *adapter)
struct ena_irq *irq;
int i;
-#ifdef CONFIG_RFS_ACCEL
- if (adapter->msix_vecs >= 1) {
- free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
- adapter->netdev->rx_cpu_rmap = NULL;
- }
-#endif /* CONFIG_RFS_ACCEL */
-
for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
+ struct ena_napi *ena_napi;
+
irq = &adapter->irq_tbl[i];
irq_set_affinity_hint(irq->vector, NULL);
+ ena_napi = irq->data;
+ netif_napi_set_irq(&ena_napi->napi, -1);
free_irq(irq->vector, irq->data);
}
}
@@ -1807,7 +1781,7 @@ static void ena_init_napi_in_range(struct ena_adapter *adapter,
if (ENA_IS_XDP_INDEX(adapter, i))
napi_handler = ena_xdp_io_poll;
- netif_napi_add(adapter->netdev, &napi->napi, napi_handler);
+ netif_napi_add_config(adapter->netdev, &napi->napi, napi_handler, i);
if (!ENA_IS_XDP_INDEX(adapter, i))
napi->rx_ring = rx_ring;
@@ -2773,7 +2747,8 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd
ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK |
ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK |
ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK |
- ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK;
+ ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK |
+ ENA_ADMIN_HOST_INFO_PHC_MASK;
rc = ena_com_set_host_attributes(ena_dev);
if (rc) {
@@ -3165,6 +3140,8 @@ static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev,
goto err_mmio_read_less;
}
+ ena_devlink_params_get(adapter->devlink);
+
/* ENA admin level init */
rc = ena_com_admin_init(ena_dev, &aenq_handlers);
if (rc) {
@@ -3218,6 +3195,10 @@ static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev,
if (unlikely(rc))
goto err_admin_init;
+ rc = ena_phc_init(adapter);
+ if (unlikely(rc && (rc != -EOPNOTSUPP)))
+ netdev_err(netdev, "Failed initializing PHC, error: %d\n", rc);
+
return 0;
err_admin_init:
@@ -3263,7 +3244,7 @@ err_disable_msix:
return rc;
}
-static int ena_destroy_device(struct ena_adapter *adapter, bool graceful)
+int ena_destroy_device(struct ena_adapter *adapter, bool graceful)
{
struct net_device *netdev = adapter->netdev;
struct ena_com_dev *ena_dev = adapter->ena_dev;
@@ -3275,7 +3256,7 @@ static int ena_destroy_device(struct ena_adapter *adapter, bool graceful)
netif_carrier_off(netdev);
- del_timer_sync(&adapter->timer_service);
+ timer_delete_sync(&adapter->timer_service);
dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
adapter->dev_up_before_reset = dev_up;
@@ -3301,6 +3282,8 @@ static int ena_destroy_device(struct ena_adapter *adapter, bool graceful)
ena_com_admin_destroy(ena_dev);
+ ena_phc_destroy(adapter);
+
ena_com_mmio_reg_read_request_destroy(ena_dev);
/* return reset reason to default value */
@@ -3312,7 +3295,7 @@ static int ena_destroy_device(struct ena_adapter *adapter, bool graceful)
return rc;
}
-static int ena_restore_device(struct ena_adapter *adapter)
+int ena_restore_device(struct ena_adapter *adapter)
{
struct ena_com_dev_get_features_ctx get_feat_ctx;
struct ena_com_dev *ena_dev = adapter->ena_dev;
@@ -3374,6 +3357,7 @@ err_device_destroy:
ena_com_wait_for_abort_completion(ena_dev);
ena_com_admin_destroy(ena_dev);
ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
+ ena_phc_destroy(adapter);
ena_com_mmio_reg_read_request_destroy(ena_dev);
err:
clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
@@ -3697,7 +3681,8 @@ static void ena_update_host_info(struct ena_admin_host_info *host_info,
static void ena_timer_service(struct timer_list *t)
{
- struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
+ struct ena_adapter *adapter = timer_container_of(adapter, t,
+ timer_service);
u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
struct ena_admin_host_info *host_info =
adapter->ena_dev->host_attr.host_info;
@@ -3896,6 +3881,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ena_adapter *adapter;
struct net_device *netdev;
static int adapters_found;
+ struct devlink *devlink;
u32 max_num_io_queues;
bool wd_state;
int bars, rc;
@@ -3961,10 +3947,16 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, adapter);
+ rc = ena_phc_alloc(adapter);
+ if (rc) {
+ netdev_err(netdev, "ena_phc_alloc failed\n");
+ goto err_netdev_destroy;
+ }
+
rc = ena_com_allocate_customer_metrics_buffer(ena_dev);
if (rc) {
netdev_err(netdev, "ena_com_allocate_customer_metrics_buffer failed\n");
- goto err_netdev_destroy;
+ goto err_free_phc;
}
rc = ena_map_llq_mem_bar(pdev, ena_dev, bars);
@@ -3973,12 +3965,20 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_metrics_destroy;
}
+ /* Need to do this before ena_device_init */
+ devlink = ena_devlink_alloc(adapter);
+ if (!devlink) {
+ netdev_err(netdev, "ena_devlink_alloc failed\n");
+ rc = -ENOMEM;
+ goto err_metrics_destroy;
+ }
+
rc = ena_device_init(adapter, pdev, &get_feat_ctx, &wd_state);
if (rc) {
dev_err(&pdev->dev, "ENA device init failed\n");
if (rc == -ETIME)
rc = -EPROBE_DEFER;
- goto err_metrics_destroy;
+ goto ena_devlink_destroy;
}
/* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
@@ -4062,6 +4062,8 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_rss;
}
+ ena_debugfs_init(netdev);
+
INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
adapter->last_keep_alive_jiffies = jiffies;
@@ -4083,6 +4085,12 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapters_found++;
+ /* From this point, the devlink device is visible to users.
+ * Perform the registration last to ensure that all the resources
+ * are available and that the netdevice is registered.
+ */
+ ena_devlink_register(devlink, &pdev->dev);
+
return 0;
err_rss:
@@ -4095,12 +4103,16 @@ err_free_msix:
ena_free_mgmnt_irq(adapter);
ena_disable_msix(adapter);
err_worker_destroy:
- del_timer(&adapter->timer_service);
+ timer_delete(&adapter->timer_service);
err_device_destroy:
ena_com_delete_host_info(ena_dev);
ena_com_admin_destroy(ena_dev);
+ena_devlink_destroy:
+ ena_devlink_free(devlink);
err_metrics_destroy:
ena_com_delete_customer_metrics_buffer(ena_dev);
+err_free_phc:
+ ena_phc_free(adapter);
err_netdev_destroy:
free_netdev(netdev);
err_free_region:
@@ -4131,23 +4143,23 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
ena_dev = adapter->ena_dev;
netdev = adapter->netdev;
-#ifdef CONFIG_RFS_ACCEL
- if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
- free_irq_cpu_rmap(netdev->rx_cpu_rmap);
- netdev->rx_cpu_rmap = NULL;
- }
+ ena_debugfs_terminate(netdev);
-#endif /* CONFIG_RFS_ACCEL */
/* Make sure timer and reset routine won't be called after
* freeing device resources.
*/
- del_timer_sync(&adapter->timer_service);
+ timer_delete_sync(&adapter->timer_service);
cancel_work_sync(&adapter->reset_task);
rtnl_lock(); /* lock released inside the below if-else block */
adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN;
ena_destroy_device(adapter, true);
+ ena_phc_free(adapter);
+
+ ena_devlink_unregister(adapter->devlink);
+ ena_devlink_free(adapter->devlink);
+
if (shutdown) {
netif_device_detach(netdev);
dev_close(netdev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 6e12ae3b12e5..006f9a3acea6 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -16,6 +16,7 @@
#include <linux/skbuff.h>
#include <net/xdp.h>
#include <uapi/linux/bpf.h>
+#include <net/devlink.h>
#include "ena_com.h"
#include "ena_eth_com.h"
@@ -110,6 +111,8 @@
#define ENA_MMIO_DISABLE_REG_READ BIT(0)
+struct ena_phc_info;
+
struct ena_irq {
irq_handler_t handler;
void *data;
@@ -348,6 +351,8 @@ struct ena_adapter {
char name[ENA_NAME_MAX_LEN];
+ struct ena_phc_info *phc_info;
+
unsigned long flags;
/* TX */
struct ena_ring tx_ring[ENA_MAX_NUM_IO_QUEUES]
@@ -383,6 +388,13 @@ struct ena_adapter {
struct bpf_prog *xdp_bpf_prog;
u32 xdp_first_ring;
u32 xdp_num_queues;
+
+ struct devlink *devlink;
+ struct devlink_port devlink_port;
+#ifdef CONFIG_DEBUG_FS
+
+ struct dentry *debugfs_base;
+#endif /* CONFIG_DEBUG_FS */
};
void ena_set_ethtool_ops(struct net_device *netdev);
@@ -412,6 +424,8 @@ static inline void ena_reset_device(struct ena_adapter *adapter,
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
}
+int ena_destroy_device(struct ena_adapter *adapter, bool graceful);
+int ena_restore_device(struct ena_adapter *adapter);
int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
struct ena_tx_buffer *tx_info, bool is_xdp);
diff --git a/drivers/net/ethernet/amazon/ena/ena_phc.c b/drivers/net/ethernet/amazon/ena/ena_phc.c
new file mode 100644
index 000000000000..7867e893fd15
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_phc.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright 2015-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#include <linux/pci.h>
+#include "ena_netdev.h"
+#include "ena_phc.h"
+#include "ena_devlink.h"
+
+static int ena_phc_adjtime(struct ptp_clock_info *clock_info, s64 delta)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ena_phc_adjfine(struct ptp_clock_info *clock_info, long scaled_ppm)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ena_phc_feature_enable(struct ptp_clock_info *clock_info,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ena_phc_gettimex64(struct ptp_clock_info *clock_info,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct ena_phc_info *phc_info =
+ container_of(clock_info, struct ena_phc_info, clock_info);
+ unsigned long flags;
+ u64 timestamp_nsec;
+ int rc;
+
+ spin_lock_irqsave(&phc_info->lock, flags);
+
+ ptp_read_system_prets(sts);
+
+ rc = ena_com_phc_get_timestamp(phc_info->adapter->ena_dev,
+ &timestamp_nsec);
+
+ ptp_read_system_postts(sts);
+
+ spin_unlock_irqrestore(&phc_info->lock, flags);
+
+ *ts = ns_to_timespec64(timestamp_nsec);
+
+ return rc;
+}
+
+static int ena_phc_settime64(struct ptp_clock_info *clock_info,
+ const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info ena_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .pps = 0,
+ .adjtime = ena_phc_adjtime,
+ .adjfine = ena_phc_adjfine,
+ .gettimex64 = ena_phc_gettimex64,
+ .settime64 = ena_phc_settime64,
+ .enable = ena_phc_feature_enable,
+};
+
+/* Enable/Disable PHC by the kernel, affects on the next init flow */
+void ena_phc_enable(struct ena_adapter *adapter, bool enable)
+{
+ struct ena_phc_info *phc_info = adapter->phc_info;
+
+ if (!phc_info) {
+ netdev_err(adapter->netdev, "phc_info is not allocated\n");
+ return;
+ }
+
+ phc_info->enabled = enable;
+}
+
+/* Check if PHC is enabled by the kernel */
+bool ena_phc_is_enabled(struct ena_adapter *adapter)
+{
+ struct ena_phc_info *phc_info = adapter->phc_info;
+
+ return (phc_info && phc_info->enabled);
+}
+
+/* PHC is activated if ptp clock is registered in the kernel */
+bool ena_phc_is_active(struct ena_adapter *adapter)
+{
+ struct ena_phc_info *phc_info = adapter->phc_info;
+
+ return (phc_info && phc_info->clock);
+}
+
+static int ena_phc_register(struct ena_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct ptp_clock_info *clock_info;
+ struct ena_phc_info *phc_info;
+ int rc = 0;
+
+ phc_info = adapter->phc_info;
+ clock_info = &phc_info->clock_info;
+
+ /* PHC may already be registered in case of a reset */
+ if (ena_phc_is_active(adapter))
+ return 0;
+
+ phc_info->adapter = adapter;
+
+ spin_lock_init(&phc_info->lock);
+
+ /* Fill the ptp_clock_info struct and register PTP clock */
+ *clock_info = ena_ptp_clock_info;
+ snprintf(clock_info->name,
+ sizeof(clock_info->name),
+ "ena-ptp-%02x",
+ PCI_SLOT(pdev->devfn));
+
+ phc_info->clock = ptp_clock_register(clock_info, &pdev->dev);
+ if (IS_ERR(phc_info->clock)) {
+ rc = PTR_ERR(phc_info->clock);
+ netdev_err(adapter->netdev, "Failed registering ptp clock, error: %d\n",
+ rc);
+ phc_info->clock = NULL;
+ }
+
+ return rc;
+}
+
+static void ena_phc_unregister(struct ena_adapter *adapter)
+{
+ struct ena_phc_info *phc_info = adapter->phc_info;
+
+ /* During reset flow, PHC must stay registered
+ * to keep kernel's PHC index
+ */
+ if (ena_phc_is_active(adapter) &&
+ !test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
+ ptp_clock_unregister(phc_info->clock);
+ phc_info->clock = NULL;
+ }
+}
+
+int ena_phc_alloc(struct ena_adapter *adapter)
+{
+ /* Allocate driver specific PHC info */
+ adapter->phc_info = vzalloc(sizeof(*adapter->phc_info));
+ if (unlikely(!adapter->phc_info)) {
+ netdev_err(adapter->netdev, "Failed to alloc phc_info\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void ena_phc_free(struct ena_adapter *adapter)
+{
+ if (adapter->phc_info) {
+ vfree(adapter->phc_info);
+ adapter->phc_info = NULL;
+ }
+}
+
+int ena_phc_init(struct ena_adapter *adapter)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ struct net_device *netdev = adapter->netdev;
+ int rc = -EOPNOTSUPP;
+
+ /* Validate PHC feature is supported in the device */
+ if (!ena_com_phc_supported(ena_dev)) {
+ netdev_dbg(netdev, "PHC feature is not supported by the device\n");
+ goto err_ena_com_phc_init;
+ }
+
+ /* Validate PHC feature is enabled by the kernel */
+ if (!ena_phc_is_enabled(adapter)) {
+ netdev_dbg(netdev, "PHC feature is not enabled by the kernel\n");
+ goto err_ena_com_phc_init;
+ }
+
+ /* Initialize device specific PHC info */
+ rc = ena_com_phc_init(ena_dev);
+ if (unlikely(rc)) {
+ netdev_err(netdev, "Failed to init phc, error: %d\n", rc);
+ goto err_ena_com_phc_init;
+ }
+
+ /* Configure PHC feature in driver and device */
+ rc = ena_com_phc_config(ena_dev);
+ if (unlikely(rc)) {
+ netdev_err(netdev, "Failed to config phc, error: %d\n", rc);
+ goto err_ena_com_phc_config;
+ }
+
+ /* Register to PTP class driver */
+ rc = ena_phc_register(adapter);
+ if (unlikely(rc)) {
+ netdev_err(netdev, "Failed to register phc, error: %d\n", rc);
+ goto err_ena_com_phc_config;
+ }
+
+ return 0;
+
+err_ena_com_phc_config:
+ ena_com_phc_destroy(ena_dev);
+err_ena_com_phc_init:
+ ena_phc_enable(adapter, false);
+ ena_devlink_disable_phc_param(adapter->devlink);
+ return rc;
+}
+
+void ena_phc_destroy(struct ena_adapter *adapter)
+{
+ ena_phc_unregister(adapter);
+ ena_com_phc_destroy(adapter->ena_dev);
+}
+
+int ena_phc_get_index(struct ena_adapter *adapter)
+{
+ if (ena_phc_is_active(adapter))
+ return ptp_clock_index(adapter->phc_info->clock);
+
+ return -1;
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_phc.h b/drivers/net/ethernet/amazon/ena/ena_phc.h
new file mode 100644
index 000000000000..7364fe714e44
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_phc.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright 2015-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef ENA_PHC_H
+#define ENA_PHC_H
+
+#include <linux/ptp_clock_kernel.h>
+
+struct ena_phc_info {
+ /* PTP hardware capabilities */
+ struct ptp_clock_info clock_info;
+
+ /* Registered PTP clock device */
+ struct ptp_clock *clock;
+
+ /* Adapter specific private data structure */
+ struct ena_adapter *adapter;
+
+ /* PHC lock */
+ spinlock_t lock;
+
+ /* Enabled by kernel */
+ bool enabled;
+};
+
+void ena_phc_enable(struct ena_adapter *adapter, bool enable);
+bool ena_phc_is_enabled(struct ena_adapter *adapter);
+bool ena_phc_is_active(struct ena_adapter *adapter);
+int ena_phc_get_index(struct ena_adapter *adapter);
+int ena_phc_init(struct ena_adapter *adapter);
+void ena_phc_destroy(struct ena_adapter *adapter);
+int ena_phc_alloc(struct ena_adapter *adapter);
+void ena_phc_free(struct ena_adapter *adapter);
+
+#endif /* ENA_PHC_H */
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index a2efebafd686..51068dc1cc2a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -53,6 +53,11 @@ enum ena_regs_reset_reason_types {
#define ENA_REGS_MMIO_RESP_HI_OFF 0x64
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68
+/* phc_registers offsets */
+
+/* 100 base */
+#define ENA_REGS_PHC_DB_OFF 0x100
+
/* version register */
#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff
#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8
@@ -129,4 +134,7 @@ enum ena_regs_reset_reason_types {
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000
+/* phc_db_req_id register */
+#define ENA_REGS_PHC_DB_REQ_ID_MASK 0xffff
+
#endif /* _ENA_REGS_H_ */
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index b39c6f3e1eda..d54dca3074eb 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -165,6 +165,7 @@ config AMD_XGBE
select CRC32
select PHYLIB
select AMD_XGBE_HAVE_ECC if X86
+ select NET_SELFTESTS
help
This driver supports the AMD 10GbE Ethernet device found on an
AMD SoC.
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 1ca26a8c40eb..ce9445425045 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -486,7 +486,7 @@ static int lance_close(struct net_device *dev)
volatile struct lance_regs *ll = lp->ll;
netif_stop_queue(dev);
- del_timer_sync(&lp->multicast_timer);
+ timer_delete_sync(&lp->multicast_timer);
/* Stop the card */
ll->rap = LE_CSR0;
@@ -636,7 +636,7 @@ static void lance_set_multicast(struct net_device *dev)
static void lance_set_multicast_retry(struct timer_list *t)
{
- struct lance_private *lp = from_timer(lp, t, multicast_timer);
+ struct lance_private *lp = timer_container_of(lp, t, multicast_timer);
lance_set_multicast(lp->dev);
}
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index f64f96fa17cf..76e8c13d5985 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1173,7 +1173,7 @@ static int amd8111e_close(struct net_device *dev)
/* Delete ipg timer */
if (lp->options & OPTION_DYN_IPG_ENABLE)
- del_timer_sync(&lp->ipg_data.ipg_timer);
+ timer_delete_sync(&lp->ipg_data.ipg_timer);
spin_unlock_irq(&lp->lock);
free_irq(dev->irq, dev);
@@ -1598,7 +1598,7 @@ static int __maybe_unused amd8111e_suspend(struct device *dev_d)
/* stop chip */
spin_lock_irq(&lp->lock);
if (lp->options & OPTION_DYN_IPG_ENABLE)
- del_timer_sync(&lp->ipg_data.ipg_timer);
+ timer_delete_sync(&lp->ipg_data.ipg_timer);
amd8111e_stop_chip(lp);
spin_unlock_irq(&lp->lock);
@@ -1641,7 +1641,8 @@ static int __maybe_unused amd8111e_resume(struct device *dev_d)
static void amd8111e_config_ipg(struct timer_list *t)
{
- struct amd8111e_priv *lp = from_timer(lp, t, ipg_data.ipg_timer);
+ struct amd8111e_priv *lp = timer_container_of(lp, t,
+ ipg_data.ipg_timer);
struct ipg_info *ipg_data = &lp->ipg_data;
void __iomem *mmio = lp->mmio;
unsigned int prev_col_cnt = ipg_data->col_cnt;
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 0671a066913b..9d35ac348ebe 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -571,7 +571,7 @@ static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup)
return pDB;
}
-void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
+static void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
{
struct db_dest *pDBfree = aup->pDBfree;
if (pDBfree)
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index ec8df05e7bf6..8d05a0c5f2d5 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -842,7 +842,7 @@ static int lance_close(struct net_device *dev)
volatile struct lance_regs *ll = lp->ll;
netif_stop_queue(dev);
- del_timer_sync(&lp->multicast_timer);
+ timer_delete_sync(&lp->multicast_timer);
/* Stop the card */
writereg(&ll->rap, LE_CSR0);
@@ -1004,7 +1004,7 @@ static void lance_set_multicast(struct net_device *dev)
static void lance_set_multicast_retry(struct timer_list *t)
{
- struct lance_private *lp = from_timer(lp, t, multicast_timer);
+ struct lance_private *lp = timer_container_of(lp, t, multicast_timer);
struct net_device *dev = lp->dev;
lance_set_multicast(dev);
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 72db9f9e7bee..9eaefa0f5e80 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -462,7 +462,7 @@ static void pcnet32_netif_start(struct net_device *dev)
val = lp->a->read_csr(ioaddr, CSR3);
val &= 0x00ff;
lp->a->write_csr(ioaddr, CSR3, val);
- napi_enable(&lp->napi);
+ napi_enable_locked(&lp->napi);
}
/*
@@ -889,6 +889,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
if (netif_running(dev))
pcnet32_netif_stop(dev);
+ netdev_lock(dev);
spin_lock_irqsave(&lp->lock, flags);
lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
@@ -920,6 +921,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
}
spin_unlock_irqrestore(&lp->lock, flags);
+ netdev_unlock(dev);
netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n",
lp->rx_ring_size, lp->tx_ring_size);
@@ -985,6 +987,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
if (netif_running(dev))
pcnet32_netif_stop(dev);
+ netdev_lock(dev);
spin_lock_irqsave(&lp->lock, flags);
lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
@@ -1122,6 +1125,7 @@ clean_up:
lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
}
spin_unlock_irqrestore(&lp->lock, flags);
+ netdev_unlock(dev);
return rc;
} /* end pcnet32_loopback_test */
@@ -2101,6 +2105,7 @@ static int pcnet32_open(struct net_device *dev)
return -EAGAIN;
}
+ netdev_lock(dev);
spin_lock_irqsave(&lp->lock, flags);
/* Check for a valid station address */
if (!is_valid_ether_addr(dev->dev_addr)) {
@@ -2266,7 +2271,7 @@ static int pcnet32_open(struct net_device *dev)
goto err_free_ring;
}
- napi_enable(&lp->napi);
+ napi_enable_locked(&lp->napi);
/* Re-initialize the PCNET32, and start it when done. */
lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
@@ -2300,6 +2305,7 @@ static int pcnet32_open(struct net_device *dev)
lp->a->read_csr(ioaddr, CSR0));
spin_unlock_irqrestore(&lp->lock, flags);
+ netdev_unlock(dev);
return 0; /* Always succeed */
@@ -2315,6 +2321,7 @@ err_free_ring:
err_free_irq:
spin_unlock_irqrestore(&lp->lock, flags);
+ netdev_unlock(dev);
free_irq(dev->irq, dev);
return rc;
}
@@ -2623,7 +2630,7 @@ static int pcnet32_close(struct net_device *dev)
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long flags;
- del_timer_sync(&lp->watchdog_timer);
+ timer_delete_sync(&lp->watchdog_timer);
netif_stop_queue(dev);
napi_disable(&lp->napi);
@@ -2898,7 +2905,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
static void pcnet32_watchdog(struct timer_list *t)
{
- struct pcnet32_private *lp = from_timer(lp, t, watchdog_timer);
+ struct pcnet32_private *lp = timer_container_of(lp, t, watchdog_timer);
struct net_device *dev = lp->dev;
unsigned long flags;
diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c
index c83a0a80d533..097bb092bdb8 100644
--- a/drivers/net/ethernet/amd/pds_core/adminq.c
+++ b/drivers/net/ethernet/amd/pds_core/adminq.c
@@ -5,11 +5,6 @@
#include "core.h"
-struct pdsc_wait_context {
- struct pdsc_qcq *qcq;
- struct completion wait_completion;
-};
-
static int pdsc_process_notifyq(struct pdsc_qcq *qcq)
{
union pds_core_notifyq_comp *comp;
@@ -109,10 +104,10 @@ void pdsc_process_adminq(struct pdsc_qcq *qcq)
q_info = &q->info[q->tail_idx];
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
- /* Copy out the completion data */
- memcpy(q_info->dest, comp, sizeof(*comp));
-
- complete_all(&q_info->wc->wait_completion);
+ if (!completion_done(&q_info->completion)) {
+ memcpy(q_info->dest, comp, sizeof(*comp));
+ complete(&q_info->completion);
+ }
if (cq->tail_idx == cq->num_descs - 1)
cq->done_color = !cq->done_color;
@@ -162,8 +157,7 @@ irqreturn_t pdsc_adminq_isr(int irq, void *data)
static int __pdsc_adminq_post(struct pdsc *pdsc,
struct pdsc_qcq *qcq,
union pds_core_adminq_cmd *cmd,
- union pds_core_adminq_comp *comp,
- struct pdsc_wait_context *wc)
+ union pds_core_adminq_comp *comp)
{
struct pdsc_queue *q = &qcq->q;
struct pdsc_q_info *q_info;
@@ -205,9 +199,9 @@ static int __pdsc_adminq_post(struct pdsc *pdsc,
/* Post the request */
index = q->head_idx;
q_info = &q->info[index];
- q_info->wc = wc;
q_info->dest = comp;
memcpy(q_info->desc, cmd, sizeof(*cmd));
+ reinit_completion(&q_info->completion);
dev_dbg(pdsc->dev, "head_idx %d tail_idx %d\n",
q->head_idx, q->tail_idx);
@@ -231,16 +225,13 @@ int pdsc_adminq_post(struct pdsc *pdsc,
union pds_core_adminq_comp *comp,
bool fast_poll)
{
- struct pdsc_wait_context wc = {
- .wait_completion =
- COMPLETION_INITIALIZER_ONSTACK(wc.wait_completion),
- };
- unsigned long poll_interval = 1;
+ unsigned long poll_interval = 200;
unsigned long poll_jiffies;
unsigned long time_limit;
unsigned long time_start;
unsigned long time_done;
unsigned long remaining;
+ struct completion *wc;
int err = 0;
int index;
@@ -250,20 +241,19 @@ int pdsc_adminq_post(struct pdsc *pdsc,
return -ENXIO;
}
- wc.qcq = &pdsc->adminqcq;
- index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp, &wc);
+ index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp);
if (index < 0) {
err = index;
goto err_out;
}
+ wc = &pdsc->adminqcq.q.info[index].completion;
time_start = jiffies;
time_limit = time_start + HZ * pdsc->devcmd_timeout;
do {
/* Timeslice the actual wait to catch IO errors etc early */
- poll_jiffies = msecs_to_jiffies(poll_interval);
- remaining = wait_for_completion_timeout(&wc.wait_completion,
- poll_jiffies);
+ poll_jiffies = usecs_to_jiffies(poll_interval);
+ remaining = wait_for_completion_timeout(wc, poll_jiffies);
if (remaining)
break;
@@ -292,9 +282,11 @@ int pdsc_adminq_post(struct pdsc *pdsc,
dev_dbg(pdsc->dev, "%s: elapsed %d msecs\n",
__func__, jiffies_to_msecs(time_done - time_start));
- /* Check the results */
- if (time_after_eq(time_done, time_limit))
+ /* Check the results and clear an un-completed timeout */
+ if (time_after_eq(time_done, time_limit) && !completion_done(wc)) {
err = -ETIMEDOUT;
+ complete(wc);
+ }
dev_dbg(pdsc->dev, "read admin queue completion idx %d:\n", index);
dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c
index 2babea110991..92f359f2b449 100644
--- a/drivers/net/ethernet/amd/pds_core/auxbus.c
+++ b/drivers/net/ethernet/amd/pds_core/auxbus.c
@@ -107,9 +107,6 @@ int pds_client_adminq_cmd(struct pds_auxiliary_dev *padev,
dev_dbg(pf->dev, "%s: %s opcode %d\n",
__func__, dev_name(&padev->aux_dev.dev), req->opcode);
- if (pf->state)
- return -ENXIO;
-
/* Wrap the client's request */
cmd.client_request.opcode = PDS_AQ_CMD_CLIENT_CMD;
cmd.client_request.client_id = cpu_to_le16(padev->client_id);
@@ -175,34 +172,31 @@ static struct pds_auxiliary_dev *pdsc_auxbus_dev_register(struct pdsc *cf,
return padev;
}
-int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf)
+void pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf,
+ struct pds_auxiliary_dev **pd_ptr)
{
struct pds_auxiliary_dev *padev;
- int err = 0;
- if (!cf)
- return -ENODEV;
+ if (!*pd_ptr)
+ return;
mutex_lock(&pf->config_lock);
- padev = pf->vfs[cf->vf_id].padev;
- if (padev) {
- pds_client_unregister(pf, padev->client_id);
- auxiliary_device_delete(&padev->aux_dev);
- auxiliary_device_uninit(&padev->aux_dev);
- padev->client_id = 0;
- }
- pf->vfs[cf->vf_id].padev = NULL;
+ padev = *pd_ptr;
+ pds_client_unregister(pf, padev->client_id);
+ auxiliary_device_delete(&padev->aux_dev);
+ auxiliary_device_uninit(&padev->aux_dev);
+ *pd_ptr = NULL;
mutex_unlock(&pf->config_lock);
- return err;
}
-int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
+int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf,
+ enum pds_core_vif_types vt,
+ struct pds_auxiliary_dev **pd_ptr)
{
struct pds_auxiliary_dev *padev;
char devname[PDS_DEVNAME_LEN];
- enum pds_core_vif_types vt;
unsigned long mask;
u16 vt_support;
int client_id;
@@ -211,6 +205,9 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
if (!cf)
return -ENODEV;
+ if (vt >= PDS_DEV_TYPE_MAX)
+ return -EINVAL;
+
mutex_lock(&pf->config_lock);
mask = BIT_ULL(PDSC_S_FW_DEAD) |
@@ -222,17 +219,10 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
goto out_unlock;
}
- /* We only support vDPA so far, so it is the only one to
- * be verified that it is available in the Core device and
- * enabled in the devlink param. In the future this might
- * become a loop for several VIF types.
- */
-
/* Verify that the type is supported and enabled. It is not
- * an error if there is no auxbus device support for this
- * VF, it just means something else needs to happen with it.
+ * an error if the firmware doesn't support the feature, the
+ * driver just won't set up an auxiliary_device for it.
*/
- vt = PDS_DEV_TYPE_VDPA;
vt_support = !!le16_to_cpu(pf->dev_ident.vif_types[vt]);
if (!(vt_support &&
pf->viftype_status[vt].supported &&
@@ -258,7 +248,7 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
err = PTR_ERR(padev);
goto out_unlock;
}
- pf->vfs[cf->vf_id].padev = padev;
+ *pd_ptr = padev;
out_unlock:
mutex_unlock(&pf->config_lock);
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
index 536635e57727..076dfe2910c7 100644
--- a/drivers/net/ethernet/amd/pds_core/core.c
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -167,8 +167,10 @@ static void pdsc_q_map(struct pdsc_queue *q, void *base, dma_addr_t base_pa)
q->base = base;
q->base_pa = base_pa;
- for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
+ for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) {
cur->desc = base + (i * q->desc_size);
+ init_completion(&cur->completion);
+ }
}
static void pdsc_cq_map(struct pdsc_cq *cq, void *base, dma_addr_t base_pa)
@@ -325,10 +327,7 @@ static int pdsc_core_init(struct pdsc *pdsc)
size_t sz;
int err;
- /* Scale the descriptor ring length based on number of CPUs and VFs */
- numdescs = max_t(int, PDSC_ADMINQ_MIN_LENGTH, num_online_cpus());
- numdescs += 2 * pci_sriov_get_totalvfs(pdsc->pdev);
- numdescs = roundup_pow_of_two(numdescs);
+ numdescs = PDSC_ADMINQ_MAX_LENGTH;
err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq",
PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR,
numdescs,
@@ -402,6 +401,10 @@ err_out_uninit:
}
static struct pdsc_viftype pdsc_viftype_defaults[] = {
+ [PDS_DEV_TYPE_FWCTL] = { .name = PDS_DEV_TYPE_FWCTL_STR,
+ .enabled = true,
+ .vif_id = PDS_DEV_TYPE_FWCTL,
+ .dl_id = -1 },
[PDS_DEV_TYPE_VDPA] = { .name = PDS_DEV_TYPE_VDPA_STR,
.vif_id = PDS_DEV_TYPE_VDPA,
.dl_id = DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET },
@@ -412,7 +415,8 @@ static int pdsc_viftypes_init(struct pdsc *pdsc)
{
enum pds_core_vif_types vt;
- pdsc->viftype_status = kzalloc(sizeof(pdsc_viftype_defaults),
+ pdsc->viftype_status = kcalloc(ARRAY_SIZE(pdsc_viftype_defaults),
+ sizeof(*pdsc->viftype_status),
GFP_KERNEL);
if (!pdsc->viftype_status)
return -ENOMEM;
@@ -428,6 +432,7 @@ static int pdsc_viftypes_init(struct pdsc *pdsc)
/* See what the Core device has for support */
vt_support = !!le16_to_cpu(pdsc->dev_ident.vif_types[vt]);
+
dev_dbg(pdsc->dev, "VIF %s is %ssupported\n",
pdsc->viftype_status[vt].name,
vt_support ? "" : "not ");
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
index 14522d6d5f86..4a6b35c84dab 100644
--- a/drivers/net/ethernet/amd/pds_core/core.h
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -16,7 +16,7 @@
#define PDSC_WATCHDOG_SECS 5
#define PDSC_QUEUE_NAME_MAX_SZ 16
-#define PDSC_ADMINQ_MIN_LENGTH 16 /* must be a power of two */
+#define PDSC_ADMINQ_MAX_LENGTH 16 /* must be a power of two */
#define PDSC_NOTIFYQ_LENGTH 64 /* must be a power of two */
#define PDSC_TEARDOWN_RECOVERY false
#define PDSC_TEARDOWN_REMOVING true
@@ -96,7 +96,7 @@ struct pdsc_q_info {
unsigned int bytes;
unsigned int nbufs;
struct pdsc_buf_info bufs[PDS_CORE_MAX_FRAGS];
- struct pdsc_wait_context *wc;
+ struct completion completion;
void *dest;
};
@@ -156,6 +156,7 @@ struct pdsc {
struct dentry *dentry;
struct device *dev;
struct pdsc_dev_bar bars[PDS_CORE_BARS_MAX];
+ struct pds_auxiliary_dev *padev;
struct pdsc_vf *vfs;
int num_vfs;
int vf_id;
@@ -254,7 +255,8 @@ int pdsc_dl_flash_update(struct devlink *dl,
struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack);
int pdsc_dl_enable_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx);
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack);
int pdsc_dl_enable_set(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack);
@@ -303,8 +305,11 @@ void pdsc_health_thread(struct work_struct *work);
int pdsc_register_notify(struct notifier_block *nb);
void pdsc_unregister_notify(struct notifier_block *nb);
void pdsc_notify(unsigned long event, void *data);
-int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf);
-int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf);
+int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf,
+ enum pds_core_vif_types vt,
+ struct pds_auxiliary_dev **pd_ptr);
+void pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf,
+ struct pds_auxiliary_dev **pd_ptr);
void pdsc_process_adminq(struct pdsc_qcq *qcq);
void pdsc_work_thread(struct work_struct *work);
diff --git a/drivers/net/ethernet/amd/pds_core/debugfs.c b/drivers/net/ethernet/amd/pds_core/debugfs.c
index ac37a4e738ae..04c5e3abd8d7 100644
--- a/drivers/net/ethernet/amd/pds_core/debugfs.c
+++ b/drivers/net/ethernet/amd/pds_core/debugfs.c
@@ -154,8 +154,9 @@ void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq)
debugfs_create_u32("index", 0400, intr_dentry, &intr->index);
debugfs_create_u32("vector", 0400, intr_dentry, &intr->vector);
- intr_ctrl_regset = kzalloc(sizeof(*intr_ctrl_regset),
- GFP_KERNEL);
+ intr_ctrl_regset = devm_kzalloc(pdsc->dev,
+ sizeof(*intr_ctrl_regset),
+ GFP_KERNEL);
if (!intr_ctrl_regset)
return;
intr_ctrl_regset->regs = intr_ctrl_regs;
diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
index 2681889162a2..b576be626a29 100644
--- a/drivers/net/ethernet/amd/pds_core/devlink.c
+++ b/drivers/net/ethernet/amd/pds_core/devlink.c
@@ -22,7 +22,8 @@ pdsc_viftype *pdsc_dl_find_viftype_by_id(struct pdsc *pdsc,
}
int pdsc_dl_enable_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct pdsc *pdsc = devlink_priv(dl);
struct pdsc_viftype *vt_entry;
@@ -56,8 +57,11 @@ int pdsc_dl_enable_set(struct devlink *dl, u32 id,
for (vf_id = 0; vf_id < pdsc->num_vfs; vf_id++) {
struct pdsc *vf = pdsc->vfs[vf_id].vf;
- err = ctx->val.vbool ? pdsc_auxbus_dev_add(vf, pdsc) :
- pdsc_auxbus_dev_del(vf, pdsc);
+ if (ctx->val.vbool)
+ err = pdsc_auxbus_dev_add(vf, pdsc, vt_entry->vif_id,
+ &pdsc->vfs[vf_id].padev);
+ else
+ pdsc_auxbus_dev_del(vf, pdsc, &pdsc->vfs[vf_id].padev);
}
return err;
@@ -102,7 +106,7 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
.fw_control.opcode = PDS_CORE_CMD_FW_CONTROL,
.fw_control.oper = PDS_CORE_FW_GET_LIST,
};
- struct pds_core_fw_list_info fw_list;
+ struct pds_core_fw_list_info fw_list = {};
struct pdsc *pdsc = devlink_priv(dl);
union pds_core_dev_comp comp;
char buf[32];
@@ -115,10 +119,8 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
if (!err)
memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list));
mutex_unlock(&pdsc->devcmd_lock);
- if (err && err != -EIO)
- return err;
- listlen = fw_list.num_fw_slots;
+ listlen = min(fw_list.num_fw_slots, ARRAY_SIZE(fw_list.fw_names));
for (i = 0; i < listlen; i++) {
if (i < ARRAY_SIZE(fw_slotnames))
strscpy(buf, fw_slotnames[i], sizeof(buf));
diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
index 660268ff9562..c7a2eff57632 100644
--- a/drivers/net/ethernet/amd/pds_core/main.c
+++ b/drivers/net/ethernet/amd/pds_core/main.c
@@ -23,7 +23,7 @@ MODULE_DEVICE_TABLE(pci, pdsc_id_table);
static void pdsc_wdtimer_cb(struct timer_list *t)
{
- struct pdsc *pdsc = from_timer(pdsc, t, wdtimer);
+ struct pdsc *pdsc = timer_container_of(pdsc, t, wdtimer);
dev_dbg(pdsc->dev, "%s: jiffies %ld\n", __func__, jiffies);
mod_timer(&pdsc->wdtimer,
@@ -190,7 +190,8 @@ static int pdsc_init_vf(struct pdsc *vf)
devl_unlock(dl);
pf->vfs[vf->vf_id].vf = vf;
- err = pdsc_auxbus_dev_add(vf, pf);
+ err = pdsc_auxbus_dev_add(vf, pf, PDS_DEV_TYPE_VDPA,
+ &pf->vfs[vf->vf_id].padev);
if (err) {
devl_lock(dl);
devl_unregister(dl);
@@ -264,6 +265,10 @@ static int pdsc_init_pf(struct pdsc *pdsc)
mutex_unlock(&pdsc->config_lock);
+ err = pdsc_auxbus_dev_add(pdsc, pdsc, PDS_DEV_TYPE_FWCTL, &pdsc->padev);
+ if (err)
+ goto err_out_stop;
+
dl = priv_to_devlink(pdsc);
devl_lock(dl);
err = devl_params_register(dl, pdsc_dl_params,
@@ -272,10 +277,10 @@ static int pdsc_init_pf(struct pdsc *pdsc)
devl_unlock(dl);
dev_warn(pdsc->dev, "Failed to register devlink params: %pe\n",
ERR_PTR(err));
- goto err_out_stop;
+ goto err_out_del_dev;
}
- hr = devl_health_reporter_create(dl, &pdsc_fw_reporter_ops, 0, pdsc);
+ hr = devl_health_reporter_create(dl, &pdsc_fw_reporter_ops, pdsc);
if (IS_ERR(hr)) {
devl_unlock(dl);
dev_warn(pdsc->dev, "Failed to create fw reporter: %pe\n", hr);
@@ -295,6 +300,8 @@ static int pdsc_init_pf(struct pdsc *pdsc)
err_out_unreg_params:
devlink_params_unregister(dl, pdsc_dl_params,
ARRAY_SIZE(pdsc_dl_params));
+err_out_del_dev:
+ pdsc_auxbus_dev_del(pdsc, pdsc, &pdsc->padev);
err_out_stop:
pdsc_stop(pdsc);
err_out_teardown:
@@ -417,7 +424,7 @@ static void pdsc_remove(struct pci_dev *pdev)
pf = pdsc_get_pf_struct(pdsc->pdev);
if (!IS_ERR(pf)) {
- pdsc_auxbus_dev_del(pdsc, pf);
+ pdsc_auxbus_dev_del(pdsc, pf, &pf->vfs[pdsc->vf_id].padev);
pf->vfs[pdsc->vf_id].vf = NULL;
}
} else {
@@ -426,6 +433,7 @@ static void pdsc_remove(struct pci_dev *pdev)
* shut themselves down.
*/
pdsc_sriov_configure(pdev, 0);
+ pdsc_auxbus_dev_del(pdsc, pdsc, &pdsc->padev);
timer_shutdown_sync(&pdsc->wdtimer);
if (pdsc->wq)
@@ -482,7 +490,10 @@ static void pdsc_reset_prepare(struct pci_dev *pdev)
pf = pdsc_get_pf_struct(pdsc->pdev);
if (!IS_ERR(pf))
- pdsc_auxbus_dev_del(pdsc, pf);
+ pdsc_auxbus_dev_del(pdsc, pf,
+ &pf->vfs[pdsc->vf_id].padev);
+ } else {
+ pdsc_auxbus_dev_del(pdsc, pdsc, &pdsc->padev);
}
pdsc_unmap_bars(pdsc);
@@ -527,7 +538,11 @@ static void pdsc_reset_done(struct pci_dev *pdev)
pf = pdsc_get_pf_struct(pdsc->pdev);
if (!IS_ERR(pf))
- pdsc_auxbus_dev_add(pdsc, pf);
+ pdsc_auxbus_dev_add(pdsc, pf, PDS_DEV_TYPE_VDPA,
+ &pf->vfs[pdsc->vf_id].padev);
+ } else {
+ pdsc_auxbus_dev_add(pdsc, pdsc, PDS_DEV_TYPE_FWCTL,
+ &pdsc->padev);
}
}
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 0f98b92408ed..0b273327f5a6 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -963,7 +963,7 @@ static int lance_close(struct net_device *dev)
struct lance_private *lp = netdev_priv(dev);
netif_stop_queue(dev);
- del_timer_sync(&lp->multicast_timer);
+ timer_delete_sync(&lp->multicast_timer);
STOP_LANCE(lp);
@@ -1246,7 +1246,7 @@ static void lance_set_multicast(struct net_device *dev)
static void lance_set_multicast_retry(struct timer_list *t)
{
- struct lance_private *lp = from_timer(lp, t, multicast_timer);
+ struct lance_private *lp = timer_container_of(lp, t, multicast_timer);
struct net_device *dev = lp->dev;
lance_set_multicast(dev);
diff --git a/drivers/net/ethernet/amd/xgbe/Makefile b/drivers/net/ethernet/amd/xgbe/Makefile
index 620785ffbd51..5992f7fd4d9b 100644
--- a/drivers/net/ethernet/amd/xgbe/Makefile
+++ b/drivers/net/ethernet/amd/xgbe/Makefile
@@ -3,9 +3,9 @@ obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o
amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \
xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \
- xgbe-ptp.o \
+ xgbe-hwtstamp.o xgbe-ptp.o xgbe-pps.o \
xgbe-i2c.o xgbe-phy-v1.o xgbe-phy-v2.o \
- xgbe-platform.o
+ xgbe-platform.o xgbe-selftest.o
amd-xgbe-$(CONFIG_PCI) += xgbe-pci.o
amd-xgbe-$(CONFIG_AMD_XGBE_DCB) += xgbe-dcb.o
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 3b70f6737633..62b01de93db4 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#ifndef __XGBE_COMMON_H__
@@ -332,7 +223,15 @@
#define MAC_TSSR 0x0d20
#define MAC_TXSNR 0x0d30
#define MAC_TXSSR 0x0d34
-
+#define MAC_TICNR 0x0d58
+#define MAC_TICSNR 0x0d5C
+#define MAC_TECNR 0x0d60
+#define MAC_TECSNR 0x0d64
+#define MAC_PPSCR 0x0d70
+#define MAC_PPS0_TTSR 0x0d80
+#define MAC_PPS0_TTNSR 0x0d84
+#define MAC_PPS0_INTERVAL 0x0d88
+#define MAC_PPS0_WIDTH 0x0d8C
#define MAC_QTFCR_INC 4
#define MAC_MACA_INC 4
#define MAC_HTR_INC 4
@@ -340,6 +239,18 @@
#define MAC_RQC2_INC 4
#define MAC_RQC2_Q_PER_REG 4
+/* PPS helpers */
+#define PPSEN0 BIT(4)
+#define MAC_PPSx_TTSR(x) ((MAC_PPS0_TTSR) + ((x) * 0x10))
+#define MAC_PPSx_TTNSR(x) ((MAC_PPS0_TTNSR) + ((x) * 0x10))
+#define MAC_PPSx_INTERVAL(x) ((MAC_PPS0_INTERVAL) + ((x) * 0x10))
+#define MAC_PPSx_WIDTH(x) ((MAC_PPS0_WIDTH) + ((x) * 0x10))
+#define PPS_MAXIDX(x) ((((x) + 1) * 8) - 1)
+#define PPS_MINIDX(x) ((x) * 8)
+#define XGBE_PPSCMD_STOP 0x5
+#define XGBE_PPSCMD_START 0x2
+#define XGBE_PPSTARGET_PULSE 0x2
+
/* MAC register entry bit positions and sizes */
#define MAC_HWF0R_ADDMACADRSEL_INDEX 18
#define MAC_HWF0R_ADDMACADRSEL_WIDTH 5
@@ -473,6 +384,10 @@
#define MAC_RCR_CST_WIDTH 1
#define MAC_RCR_DCRCC_INDEX 3
#define MAC_RCR_DCRCC_WIDTH 1
+#define MAC_RCR_GPSLCE_INDEX 6
+#define MAC_RCR_GPSLCE_WIDTH 1
+#define MAC_RCR_WD_INDEX 7
+#define MAC_RCR_WD_WIDTH 1
#define MAC_RCR_HDSMS_INDEX 12
#define MAC_RCR_HDSMS_WIDTH 3
#define MAC_RCR_IPC_INDEX 9
@@ -483,6 +398,8 @@
#define MAC_RCR_LM_WIDTH 1
#define MAC_RCR_RE_INDEX 0
#define MAC_RCR_RE_WIDTH 1
+#define MAC_RCR_GPSL_INDEX 16
+#define MAC_RCR_GPSL_WIDTH 14
#define MAC_RFCR_PFCE_INDEX 8
#define MAC_RFCR_PFCE_WIDTH 1
#define MAC_RFCR_RFE_INDEX 0
@@ -521,6 +438,8 @@
#define MAC_TCR_VNE_WIDTH 1
#define MAC_TCR_VNM_INDEX 25
#define MAC_TCR_VNM_WIDTH 1
+#define MAC_TCR_JD_INDEX 16
+#define MAC_TCR_JD_WIDTH 1
#define MAC_TIR_TNID_INDEX 0
#define MAC_TIR_TNID_WIDTH 16
#define MAC_TSCR_AV8021ASMEN_INDEX 28
@@ -529,6 +448,8 @@
#define MAC_TSCR_SNAPTYPSEL_WIDTH 2
#define MAC_TSCR_TSADDREG_INDEX 5
#define MAC_TSCR_TSADDREG_WIDTH 1
+#define MAC_TSCR_TSUPDT_INDEX 3
+#define MAC_TSCR_TSUPDT_WIDTH 1
#define MAC_TSCR_TSCFUPDT_INDEX 1
#define MAC_TSCR_TSCFUPDT_WIDTH 1
#define MAC_TSCR_TSCTRLSSR_INDEX 9
@@ -557,6 +478,10 @@
#define MAC_TSSR_TXTSC_WIDTH 1
#define MAC_TXSNR_TXTSSTSMIS_INDEX 31
#define MAC_TXSNR_TXTSSTSMIS_WIDTH 1
+#define MAC_TICSNR_TSICSNS_INDEX 8
+#define MAC_TICSNR_TSICSNS_WIDTH 8
+#define MAC_TECSNR_TSECSNS_INDEX 8
+#define MAC_TECSNR_TSECSNS_WIDTH 8
#define MAC_VLANHTR_VLHT_INDEX 0
#define MAC_VLANHTR_VLHT_WIDTH 16
#define MAC_VLANIR_VLTI_INDEX 20
@@ -587,8 +512,10 @@
#define MAC_VR_SNPSVER_WIDTH 8
#define MAC_VR_USERVER_INDEX 16
#define MAC_VR_USERVER_WIDTH 8
+#define MAC_PPSx_TTNSR_TRGTBUSY0_INDEX 31
+#define MAC_PPSx_TTNSR_TRGTBUSY0_WIDTH 1
-/* MMC register offsets */
+ /* MMC register offsets */
#define MMC_CR 0x0800
#define MMC_RISR 0x0804
#define MMC_TISR 0x0808
@@ -900,6 +827,11 @@
#define PCS_V2_RV_WINDOW_SELECT 0x1064
#define PCS_V2_YC_WINDOW_DEF 0x18060
#define PCS_V2_YC_WINDOW_SELECT 0x18064
+#define PCS_V3_RN_WINDOW_DEF 0xf8078
+#define PCS_V3_RN_WINDOW_SELECT 0xf807c
+
+#define PCS_RN_SMN_BASE_ADDR 0x11e00000
+#define PCS_RN_PORT_ADDR_SIZE 0x100000
/* PCS register entry bit positions and sizes */
#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
@@ -1373,6 +1305,8 @@
#define MDIO_VEND2_CTRL1_SS13 BIT(13)
#endif
+#define XGBE_VEND2_MAC_AUTO_SW BIT(9)
+
/* MDIO mask values */
#define XGBE_AN_CL73_INT_CMPLT BIT(0)
#define XGBE_AN_CL73_INC_LINK BIT(1)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
index c68ace804e37..1474df5544fa 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index b0a6c96b6ef4..d9157c4acde9 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/debugfs.h>
@@ -505,21 +396,6 @@ void xgbe_debugfs_exit(struct xgbe_prv_data *pdata)
void xgbe_debugfs_rename(struct xgbe_prv_data *pdata)
{
- char *buf;
-
- if (!pdata->xgbe_debugfs)
- return;
-
- buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
- if (!buf)
- return;
-
- if (!strcmp(pdata->xgbe_debugfs->d_name.name, buf))
- goto out;
-
- debugfs_rename(pdata->xgbe_debugfs->d_parent, pdata->xgbe_debugfs,
- pdata->xgbe_debugfs->d_parent, buf);
-
-out:
- kfree(buf);
+ debugfs_change_name(pdata->xgbe_debugfs,
+ "amd-xgbe-%s", pdata->netdev->name);
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 230726d7b74f..7c8a19988a52 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include "xgbe.h"
@@ -373,8 +264,13 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
}
/* Set up the header page info */
- xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
- XGBE_SKB_ALLOC_SIZE);
+ if (pdata->netdev->features & NETIF_F_RXCSUM) {
+ xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
+ XGBE_SKB_ALLOC_SIZE);
+ } else {
+ xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
+ pdata->rx_buf_size);
+ }
/* Set up the buffer page info */
xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index f393228d41c7..b646ae575e6a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/phy.h>
@@ -120,9 +11,11 @@
#include <linux/bitrev.h>
#include <linux/crc32.h>
#include <linux/crc32poly.h>
+#include <linux/pci.h>
#include "xgbe.h"
#include "xgbe-common.h"
+#include "xgbe-smn.h"
static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata)
{
@@ -318,6 +211,20 @@ static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
}
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
+ pdata->sph = true;
+}
+
+static void xgbe_disable_sph_mode(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 0);
+ }
+ pdata->sph = false;
}
static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
@@ -1150,18 +1057,19 @@ static int xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
return 0;
}
-static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
- int mmd_reg)
+static unsigned int xgbe_get_mmd_address(struct xgbe_prv_data *pdata,
+ int mmd_reg)
{
- unsigned long flags;
- unsigned int mmd_address, index, offset;
- int mmd_data;
-
- if (mmd_reg & XGBE_ADDR_C45)
- mmd_address = mmd_reg & ~XGBE_ADDR_C45;
- else
- mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+ return (mmd_reg & XGBE_ADDR_C45) ?
+ mmd_reg & ~XGBE_ADDR_C45 :
+ (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+}
+static void xgbe_get_pcs_index_and_offset(struct xgbe_prv_data *pdata,
+ unsigned int mmd_address,
+ unsigned int *index,
+ unsigned int *offset)
+{
/* The PCS registers are accessed using mmio. The underlying
* management interface uses indirect addressing to access the MMD
* register sets. This requires accessing of the PCS register in two
@@ -1172,8 +1080,98 @@ static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
* offset 1 bit and reading 16 bits of data.
*/
mmd_address <<= 1;
- index = mmd_address & ~pdata->xpcs_window_mask;
- offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+ *index = mmd_address & ~pdata->xpcs_window_mask;
+ *offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+}
+
+static int xgbe_read_mmd_regs_v3(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg)
+{
+ unsigned int mmd_address, index, offset;
+ u32 smn_address;
+ int mmd_data;
+ int ret;
+
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
+
+ xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
+
+ smn_address = pdata->smn_base + pdata->xpcs_window_sel_reg;
+ ret = amd_smn_write(0, smn_address, index);
+ if (ret)
+ return ret;
+
+ ret = amd_smn_read(0, pdata->smn_base + offset, &mmd_data);
+ if (ret)
+ return ret;
+
+ mmd_data = (offset % 4) ? FIELD_GET(XGBE_GEN_HI_MASK, mmd_data) :
+ FIELD_GET(XGBE_GEN_LO_MASK, mmd_data);
+
+ return mmd_data;
+}
+
+static void xgbe_write_mmd_regs_v3(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg, int mmd_data)
+{
+ unsigned int pci_mmd_data, hi_mask, lo_mask;
+ unsigned int mmd_address, index, offset;
+ struct pci_dev *dev;
+ u32 smn_address;
+ int ret;
+
+ dev = pdata->pcidev;
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
+
+ xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
+
+ smn_address = pdata->smn_base + pdata->xpcs_window_sel_reg;
+ ret = amd_smn_write(0, smn_address, index);
+ if (ret) {
+ pci_err(dev, "Failed to write data 0x%x\n", index);
+ return;
+ }
+
+ ret = amd_smn_read(0, pdata->smn_base + offset, &pci_mmd_data);
+ if (ret) {
+ pci_err(dev, "Failed to read data\n");
+ return;
+ }
+
+ if (offset % 4) {
+ hi_mask = FIELD_PREP(XGBE_GEN_HI_MASK, mmd_data);
+ lo_mask = FIELD_GET(XGBE_GEN_LO_MASK, pci_mmd_data);
+ } else {
+ hi_mask = FIELD_PREP(XGBE_GEN_HI_MASK,
+ FIELD_GET(XGBE_GEN_HI_MASK, pci_mmd_data));
+ lo_mask = FIELD_GET(XGBE_GEN_LO_MASK, mmd_data);
+ }
+
+ pci_mmd_data = hi_mask | lo_mask;
+
+ ret = amd_smn_write(0, smn_address, index);
+ if (ret) {
+ pci_err(dev, "Failed to write data 0x%x\n", index);
+ return;
+ }
+
+ ret = amd_smn_write(0, (pdata->smn_base + offset), pci_mmd_data);
+ if (ret) {
+ pci_err(dev, "Failed to write data 0x%x\n", pci_mmd_data);
+ return;
+ }
+}
+
+static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg)
+{
+ unsigned int mmd_address, index, offset;
+ unsigned long flags;
+ int mmd_data;
+
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
+
+ xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
spin_lock_irqsave(&pdata->xpcs_lock, flags);
XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
@@ -1189,23 +1187,9 @@ static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
unsigned long flags;
unsigned int mmd_address, index, offset;
- if (mmd_reg & XGBE_ADDR_C45)
- mmd_address = mmd_reg & ~XGBE_ADDR_C45;
- else
- mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
- /* The PCS registers are accessed using mmio. The underlying
- * management interface uses indirect addressing to access the MMD
- * register sets. This requires accessing of the PCS register in two
- * phases, an address phase and a data phase.
- *
- * The mmio interface is based on 16-bit offsets and values. All
- * register offsets must therefore be adjusted by left shifting the
- * offset 1 bit and writing 16 bits of data.
- */
- mmd_address <<= 1;
- index = mmd_address & ~pdata->xpcs_window_mask;
- offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+ xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
spin_lock_irqsave(&pdata->xpcs_lock, flags);
XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
@@ -1220,10 +1204,7 @@ static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
unsigned int mmd_address;
int mmd_data;
- if (mmd_reg & XGBE_ADDR_C45)
- mmd_address = mmd_reg & ~XGBE_ADDR_C45;
- else
- mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
/* The PCS registers are accessed using mmio. The underlying APB3
* management interface uses indirect addressing to access the MMD
@@ -1248,10 +1229,7 @@ static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
unsigned int mmd_address;
unsigned long flags;
- if (mmd_reg & XGBE_ADDR_C45)
- mmd_address = mmd_reg & ~XGBE_ADDR_C45;
- else
- mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
/* The PCS registers are accessed using mmio. The underlying APB3
* management interface uses indirect addressing to access the MMD
@@ -1278,6 +1256,9 @@ static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
case XGBE_XPCS_ACCESS_V2:
default:
return xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
+
+ case XGBE_XPCS_ACCESS_V3:
+ return xgbe_read_mmd_regs_v3(pdata, prtad, mmd_reg);
}
}
@@ -1288,6 +1269,9 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
case XGBE_XPCS_ACCESS_V1:
return xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data);
+ case XGBE_XPCS_ACCESS_V3:
+ return xgbe_write_mmd_regs_v3(pdata, prtad, mmd_reg, mmd_data);
+
case XGBE_XPCS_ACCESS_V2:
default:
return xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
@@ -1576,125 +1560,6 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
DBGPR("<--rx_desc_init\n");
}
-static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
- unsigned int addend)
-{
- unsigned int count = 10000;
-
- /* Set the addend register value and tell the device */
- XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
- XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
-
- /* Wait for addend update to complete */
- while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
- udelay(5);
-
- if (!count)
- netdev_err(pdata->netdev,
- "timed out updating timestamp addend register\n");
-}
-
-static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
- unsigned int nsec)
-{
- unsigned int count = 10000;
-
- /* Set the time values and tell the device */
- XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
- XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
- XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
-
- /* Wait for time update to complete */
- while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
- udelay(5);
-
- if (!count)
- netdev_err(pdata->netdev, "timed out initializing timestamp\n");
-}
-
-static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
-{
- u64 nsec;
-
- nsec = XGMAC_IOREAD(pdata, MAC_STSR);
- nsec *= NSEC_PER_SEC;
- nsec += XGMAC_IOREAD(pdata, MAC_STNR);
-
- return nsec;
-}
-
-static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
-{
- unsigned int tx_snr, tx_ssr;
- u64 nsec;
-
- if (pdata->vdata->tx_tstamp_workaround) {
- tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
- tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
- } else {
- tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
- tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
- }
-
- if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
- return 0;
-
- nsec = tx_ssr;
- nsec *= NSEC_PER_SEC;
- nsec += tx_snr;
-
- return nsec;
-}
-
-static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
- struct xgbe_ring_desc *rdesc)
-{
- u64 nsec;
-
- if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
- !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
- nsec = le32_to_cpu(rdesc->desc1);
- nsec <<= 32;
- nsec |= le32_to_cpu(rdesc->desc0);
- if (nsec != 0xffffffffffffffffULL) {
- packet->rx_tstamp = nsec;
- XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
- RX_TSTAMP, 1);
- }
- }
-}
-
-static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
- unsigned int mac_tscr)
-{
- /* Set one nano-second accuracy */
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
-
- /* Set fine timestamp update */
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
-
- /* Overwrite earlier timestamps */
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
-
- XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
-
- /* Exit if timestamping is not enabled */
- if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
- return 0;
-
- /* Initialize time registers */
- XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
- XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
- xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
- xgbe_set_tstamp_time(pdata, 0, 0);
-
- /* Initialize the timecounter */
- timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
- ktime_to_ns(ktime_get_real()));
-
- return 0;
-}
-
static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
struct xgbe_ring *ring)
{
@@ -2868,9 +2733,19 @@ static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
{
unsigned int val;
- val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
-
- XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
+ if (pdata->netdev->mtu > XGMAC_JUMBO_PACKET_MTU) {
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, GPSL,
+ XGMAC_GIANT_PACKET_MTU);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, WD, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, JD, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, GPSLCE, 1);
+ } else {
+ val = pdata->netdev->mtu > XGMAC_STD_PACKET_MTU ? 1 : 0;
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, GPSLCE, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, WD, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, JD, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
+ }
}
static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
@@ -3545,8 +3420,12 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
xgbe_config_tx_coalesce(pdata);
xgbe_config_rx_buffer_size(pdata);
xgbe_config_tso_mode(pdata);
- xgbe_config_sph_mode(pdata);
- xgbe_config_rss(pdata);
+
+ if (pdata->netdev->features & NETIF_F_RXCSUM) {
+ xgbe_config_sph_mode(pdata);
+ xgbe_config_rss(pdata);
+ }
+
desc_if->wrapper_tx_desc_init(pdata);
desc_if->wrapper_rx_desc_init(pdata);
xgbe_enable_dma_interrupts(pdata);
@@ -3675,13 +3554,6 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->rx_mmc_int = xgbe_rx_mmc_int;
hw_if->read_mmc_stats = xgbe_read_mmc_stats;
- /* For PTP config */
- hw_if->config_tstamp = xgbe_config_tstamp;
- hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
- hw_if->set_tstamp_time = xgbe_set_tstamp_time;
- hw_if->get_tstamp_time = xgbe_get_tstamp_time;
- hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
-
/* For Data Center Bridging config */
hw_if->config_tc = xgbe_config_tc;
hw_if->config_dcb_tc = xgbe_config_dcb_tc;
@@ -3702,5 +3574,26 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->disable_vxlan = xgbe_disable_vxlan;
hw_if->set_vxlan_id = xgbe_set_vxlan_id;
+ /* For Split Header*/
+ hw_if->enable_sph = xgbe_config_sph_mode;
+ hw_if->disable_sph = xgbe_disable_sph_mode;
+
DBGPR("<--xgbe_init_function_ptrs\n");
}
+
+int xgbe_enable_mac_loopback(struct xgbe_prv_data *pdata)
+{
+ /* Enable MAC loopback mode */
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, LM, 1);
+
+ /* Wait for loopback to stabilize */
+ usleep_range(10, 15);
+
+ return 0;
+}
+
+void xgbe_disable_mac_loopback(struct xgbe_prv_data *pdata)
+{
+ /* Disable MAC loopback mode */
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, LM, 0);
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 5475867708f4..3ddd896d6987 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
@@ -476,10 +367,11 @@ static irqreturn_t xgbe_ecc_isr(int irq, void *data)
static void xgbe_isr_bh_work(struct work_struct *work)
{
struct xgbe_prv_data *pdata = from_work(pdata, work, dev_bh_work);
+ unsigned int mac_isr, mac_tssr, mac_mdioisr;
struct xgbe_hw_if *hw_if = &pdata->hw_if;
- struct xgbe_channel *channel;
+ bool per_ch_irq, ti, ri, rbu, fbe;
unsigned int dma_isr, dma_ch_isr;
- unsigned int mac_isr, mac_tssr, mac_mdioisr;
+ struct xgbe_channel *channel;
unsigned int i;
/* The DMA interrupt status register also reports MAC and MTL
@@ -493,43 +385,73 @@ static void xgbe_isr_bh_work(struct work_struct *work)
netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
for (i = 0; i < pdata->channel_count; i++) {
+ bool schedule_napi = false;
+ struct napi_struct *napi;
+
if (!(dma_isr & (1 << i)))
continue;
channel = pdata->channel[i];
dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
+
+ /* Precompute flags once */
+ ti = !!XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI);
+ ri = !!XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI);
+ rbu = !!XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU);
+ fbe = !!XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE);
+
netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
i, dma_ch_isr);
- /* The TI or RI interrupt bits may still be set even if using
- * per channel DMA interrupts. Check to be sure those are not
- * enabled before using the private data napi structure.
+ per_ch_irq = pdata->per_channel_irq;
+
+ /*
+ * Decide which NAPI to use and whether to schedule:
+ * - When not using per-channel IRQs: schedule on global NAPI
+ * if TI or RI are set.
+ * - RBU should also trigger NAPI (either per-channel or global)
+ * to allow refill.
*/
- if (!pdata->per_channel_irq &&
- (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
- XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) {
- if (napi_schedule_prep(&pdata->napi)) {
- /* Disable Tx and Rx interrupts */
- xgbe_disable_rx_tx_ints(pdata);
+ if (!per_ch_irq && (ti || ri))
+ schedule_napi = true;
+
+ if (rbu) {
+ schedule_napi = true;
+ pdata->ext_stats.rx_buffer_unavailable++;
+ }
- /* Turn on polling */
- __napi_schedule(&pdata->napi);
+ napi = per_ch_irq ? &channel->napi : &pdata->napi;
+
+ if (schedule_napi && napi_schedule_prep(napi)) {
+ /* Disable interrupts appropriately before polling */
+ if (per_ch_irq) {
+ if (pdata->channel_irq_mode)
+ xgbe_disable_rx_tx_int(pdata, channel);
+ else
+ disable_irq_nosync(channel->dma_irq);
+ } else {
+ xgbe_disable_rx_tx_ints(pdata);
}
+
+ /* Turn on polling */
+ __napi_schedule(napi);
} else {
- /* Don't clear Rx/Tx status if doing per channel DMA
- * interrupts, these will be cleared by the ISR for
- * per channel DMA interrupts.
+ /*
+ * Don't clear Rx/Tx status if doing per-channel DMA
+ * interrupts; those bits will be serviced/cleared by
+ * the per-channel ISR/NAPI. In non-per-channel mode
+ * when we're not scheduling NAPI here, ensure we don't
+ * accidentally clear TI/RI in HW: zero them in the
+ * local copy so that the eventual write-back does not
+ * clear TI/RI.
*/
XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, TI, 0);
XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, RI, 0);
}
- if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
- pdata->ext_stats.rx_buffer_unavailable++;
-
/* Restart the device on a Fatal Bus Error */
- if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
+ if (fbe)
schedule_work(&pdata->restart_work);
/* Clear interrupt signals */
@@ -557,7 +479,7 @@ static void xgbe_isr_bh_work(struct work_struct *work)
if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
/* Read Tx Timestamp to clear interrupt */
pdata->tx_tstamp =
- hw_if->get_tx_tstamp(pdata);
+ xgbe_get_tx_tstamp(pdata);
queue_work(pdata->dev_workqueue,
&pdata->tx_tstamp_work);
}
@@ -643,7 +565,8 @@ static irqreturn_t xgbe_dma_isr(int irq, void *data)
static void xgbe_tx_timer(struct timer_list *t)
{
- struct xgbe_channel *channel = from_timer(channel, t, tx_timer);
+ struct xgbe_channel *channel = timer_container_of(channel, t,
+ tx_timer);
struct xgbe_prv_data *pdata = channel->pdata;
struct napi_struct *napi;
@@ -681,7 +604,8 @@ static void xgbe_service(struct work_struct *work)
static void xgbe_service_timer(struct timer_list *t)
{
- struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer);
+ struct xgbe_prv_data *pdata = timer_container_of(pdata, t,
+ service_timer);
struct xgbe_channel *channel;
unsigned int i;
@@ -728,7 +652,7 @@ static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
struct xgbe_channel *channel;
unsigned int i;
- del_timer_sync(&pdata->service_timer);
+ timer_delete_sync(&pdata->service_timer);
for (i = 0; i < pdata->channel_count; i++) {
channel = pdata->channel[i];
@@ -736,7 +660,7 @@ static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
break;
/* Deactivate the Tx timer */
- del_timer_sync(&channel->tx_timer);
+ timer_delete_sync(&channel->tx_timer);
channel->tx_timer_active = 0;
}
}
@@ -798,6 +722,21 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
+ /* Sanity check and warn if hardware reports more than supported */
+ if (hw_feat->pps_out_num > XGBE_MAX_PPS_OUT) {
+ dev_warn(pdata->dev,
+ "Hardware reports %u PPS outputs, limiting to %u\n",
+ hw_feat->pps_out_num, XGBE_MAX_PPS_OUT);
+ hw_feat->pps_out_num = XGBE_MAX_PPS_OUT;
+ }
+
+ if (hw_feat->aux_snap_num > XGBE_MAX_AUX_SNAP) {
+ dev_warn(pdata->dev,
+ "Hardware reports %u aux snapshot inputs, limiting to %u\n",
+ hw_feat->aux_snap_num, XGBE_MAX_AUX_SNAP);
+ hw_feat->aux_snap_num = XGBE_MAX_AUX_SNAP;
+ }
+
/* Translate the Hash Table size into actual number */
switch (hw_feat->hash_table_size) {
case 0:
@@ -1172,7 +1111,6 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
{
- pdata->phy_link = -1;
pdata->phy_speed = SPEED_UNKNOWN;
return pdata->phy_if.phy_reset(pdata);
@@ -1352,6 +1290,11 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
udp_tunnel_nic_reset_ntf(netdev);
+ /* Reset the phy settings */
+ ret = xgbe_phy_reset(pdata);
+ if (ret)
+ goto err_txrx;
+
netif_tx_start_all_queues(netdev);
xgbe_start_timers(pdata);
@@ -1361,6 +1304,10 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
return 0;
+err_txrx:
+ hw_if->disable_rx(pdata);
+ hw_if->disable_tx(pdata);
+
err_irqs:
xgbe_free_irqs(pdata);
@@ -1478,199 +1425,6 @@ static void xgbe_restart(struct work_struct *work)
rtnl_unlock();
}
-static void xgbe_tx_tstamp(struct work_struct *work)
-{
- struct xgbe_prv_data *pdata = container_of(work,
- struct xgbe_prv_data,
- tx_tstamp_work);
- struct skb_shared_hwtstamps hwtstamps;
- u64 nsec;
- unsigned long flags;
-
- spin_lock_irqsave(&pdata->tstamp_lock, flags);
- if (!pdata->tx_tstamp_skb)
- goto unlock;
-
- if (pdata->tx_tstamp) {
- nsec = timecounter_cyc2time(&pdata->tstamp_tc,
- pdata->tx_tstamp);
-
- memset(&hwtstamps, 0, sizeof(hwtstamps));
- hwtstamps.hwtstamp = ns_to_ktime(nsec);
- skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
- }
-
- dev_kfree_skb_any(pdata->tx_tstamp_skb);
-
- pdata->tx_tstamp_skb = NULL;
-
-unlock:
- spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
-}
-
-static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
- struct ifreq *ifreq)
-{
- if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
- sizeof(pdata->tstamp_config)))
- return -EFAULT;
-
- return 0;
-}
-
-static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
- struct ifreq *ifreq)
-{
- struct hwtstamp_config config;
- unsigned int mac_tscr;
-
- if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
- return -EFAULT;
-
- mac_tscr = 0;
-
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- break;
-
- case HWTSTAMP_TX_ON:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- default:
- return -ERANGE;
- }
-
- switch (config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- break;
-
- case HWTSTAMP_FILTER_NTP_ALL:
- case HWTSTAMP_FILTER_ALL:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2, UDP, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- fallthrough; /* to PTP v1, UDP, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2, UDP, Sync packet */
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- fallthrough; /* to PTP v1, UDP, Sync packet */
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2, UDP, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- fallthrough; /* to PTP v1, UDP, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* 802.AS1, Ethernet, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* 802.AS1, Ethernet, Sync packet */
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* 802.AS1, Ethernet, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2/802.AS1, any layer, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2/802.AS1, any layer, Sync packet */
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2/802.AS1, any layer, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- default:
- return -ERANGE;
- }
-
- pdata->hw_if.config_tstamp(pdata, mac_tscr);
-
- memcpy(&pdata->tstamp_config, &config, sizeof(config));
-
- return 0;
-}
-
-static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
- struct sk_buff *skb,
- struct xgbe_packet_data *packet)
-{
- unsigned long flags;
-
- if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
- spin_lock_irqsave(&pdata->tstamp_lock, flags);
- if (pdata->tx_tstamp_skb) {
- /* Another timestamp in progress, ignore this one */
- XGMAC_SET_BITS(packet->attributes,
- TX_PACKET_ATTRIBUTES, PTP, 0);
- } else {
- pdata->tx_tstamp_skb = skb_get(skb);
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- }
- spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
- }
-
- skb_tx_timestamp(skb);
-}
-
static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
{
if (skb_vlan_tag_present(skb))
@@ -1860,11 +1614,6 @@ static int xgbe_open(struct net_device *netdev)
goto err_dev_wq;
}
- /* Reset the phy settings */
- ret = xgbe_phy_reset(pdata);
- if (ret)
- goto err_an_wq;
-
/* Enable the clocks */
ret = clk_prepare_enable(pdata->sysclk);
if (ret) {
@@ -1883,6 +1632,9 @@ static int xgbe_open(struct net_device *netdev)
INIT_WORK(&pdata->stopdev_work, xgbe_stopdev);
INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
+ /* Initialize PTP timestamping and clock. */
+ xgbe_init_ptp(pdata);
+
ret = xgbe_alloc_memory(pdata);
if (ret)
goto err_ptpclk;
@@ -2037,27 +1789,6 @@ static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
return 0;
}
-static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- int ret;
-
- switch (cmd) {
- case SIOCGHWTSTAMP:
- ret = xgbe_get_hwtstamp_settings(pdata, ifreq);
- break;
-
- case SIOCSHWTSTAMP:
- ret = xgbe_set_hwtstamp_settings(pdata, ifreq);
- break;
-
- default:
- ret = -EOPNOTSUPP;
- }
-
- return ret;
-}
-
static int xgbe_change_mtu(struct net_device *netdev, int mtu)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
@@ -2257,10 +1988,17 @@ static int xgbe_set_features(struct net_device *netdev,
if (ret)
return ret;
- if ((features & NETIF_F_RXCSUM) && !rxcsum)
+ if ((features & NETIF_F_RXCSUM) && !rxcsum) {
+ hw_if->enable_sph(pdata);
+ hw_if->enable_vxlan(pdata);
hw_if->enable_rx_csum(pdata);
- else if (!(features & NETIF_F_RXCSUM) && rxcsum)
+ schedule_work(&pdata->restart_work);
+ } else if (!(features & NETIF_F_RXCSUM) && rxcsum) {
+ hw_if->disable_sph(pdata);
+ hw_if->disable_vxlan(pdata);
hw_if->disable_rx_csum(pdata);
+ schedule_work(&pdata->restart_work);
+ }
if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
hw_if->enable_rx_vlan_stripping(pdata);
@@ -2296,7 +2034,6 @@ static const struct net_device_ops xgbe_netdev_ops = {
.ndo_set_rx_mode = xgbe_set_rx_mode,
.ndo_set_mac_address = xgbe_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_eth_ioctl = xgbe_ioctl,
.ndo_change_mtu = xgbe_change_mtu,
.ndo_tx_timeout = xgbe_tx_timeout,
.ndo_get_stats64 = xgbe_get_stats64,
@@ -2309,6 +2046,8 @@ static const struct net_device_ops xgbe_netdev_ops = {
.ndo_fix_features = xgbe_fix_features,
.ndo_set_features = xgbe_set_features,
.ndo_features_check = xgbe_features_check,
+ .ndo_hwtstamp_get = xgbe_get_hwtstamp_settings,
+ .ndo_hwtstamp_set = xgbe_set_hwtstamp_settings,
};
const struct net_device_ops *xgbe_get_netdev_ops(void)
@@ -2646,12 +2385,8 @@ skip_data:
if (XGMAC_GET_BITS(packet->attributes,
RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
- u64 nsec;
-
- nsec = timecounter_cyc2time(&pdata->tstamp_tc,
- packet->rx_tstamp);
hwtstamps = skb_hwtstamps(skb);
- hwtstamps->hwtstamp = ns_to_ktime(nsec);
+ hwtstamps->hwtstamp = ns_to_ktime(packet->rx_tstamp);
}
if (XGMAC_GET_BITS(packet->attributes,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 4431ab1c18b3..0d19b09497a0 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/spinlock.h>
@@ -194,6 +85,9 @@ static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
int i;
switch (stringset) {
+ case ETH_SS_TEST:
+ xgbe_selftest_get_strings(pdata, data);
+ break;
case ETH_SS_STATS:
for (i = 0; i < XGBE_STATS_COUNT; i++)
ethtool_puts(&data, xgbe_gstring_stats[i].stat_string);
@@ -240,6 +134,9 @@ static int xgbe_get_sset_count(struct net_device *netdev, int stringset)
int ret;
switch (stringset) {
+ case ETH_SS_TEST:
+ ret = xgbe_selftest_get_count(pdata);
+ break;
case ETH_SS_STATS:
ret = XGBE_STATS_COUNT +
(pdata->tx_ring_count * 2) +
@@ -438,6 +335,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
ec->rx_coalesce_usecs = pdata->rx_usecs;
ec->rx_max_coalesced_frames = pdata->rx_frames;
+ ec->tx_coalesce_usecs = pdata->tx_usecs;
ec->tx_max_coalesced_frames = pdata->tx_frames;
return 0;
@@ -451,7 +349,8 @@ static int xgbe_set_coalesce(struct net_device *netdev,
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
unsigned int rx_frames, rx_riwt, rx_usecs;
- unsigned int tx_frames;
+ unsigned int tx_frames, tx_usecs;
+ unsigned int jiffy_us = jiffies_to_usecs(1);
rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
rx_usecs = ec->rx_coalesce_usecs;
@@ -473,20 +372,42 @@ static int xgbe_set_coalesce(struct net_device *netdev,
return -EINVAL;
}
+ tx_usecs = ec->tx_coalesce_usecs;
tx_frames = ec->tx_max_coalesced_frames;
/* Check the bounds of values for Tx */
+ if (!tx_usecs) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "tx-usecs must not be 0");
+ return -EINVAL;
+ }
+ if (tx_usecs > XGMAC_MAX_COAL_TX_TICK) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "tx-usecs is limited to %d usec",
+ XGMAC_MAX_COAL_TX_TICK);
+ return -EINVAL;
+ }
if (tx_frames > pdata->tx_desc_count) {
netdev_err(netdev, "tx-frames is limited to %d frames\n",
pdata->tx_desc_count);
return -EINVAL;
}
+ /* Round tx-usecs to nearest multiple of jiffy granularity */
+ if (tx_usecs % jiffy_us) {
+ tx_usecs = rounddown(tx_usecs, jiffy_us);
+ if (!tx_usecs)
+ tx_usecs = jiffy_us;
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "tx-usecs rounded to %u usec due to jiffy granularity (%u usec)",
+ tx_usecs, jiffy_us);
+ }
+
pdata->rx_riwt = rx_riwt;
pdata->rx_usecs = rx_usecs;
pdata->rx_frames = rx_frames;
hw_if->config_rx_coalesce(pdata);
+ pdata->tx_usecs = tx_usecs;
pdata->tx_frames = tx_frames;
hw_if->config_tx_coalesce(pdata);
@@ -549,7 +470,7 @@ static int xgbe_set_rxfh(struct net_device *netdev,
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
- unsigned int ret;
+ int ret;
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP) {
@@ -818,7 +739,7 @@ out:
}
static const struct ethtool_ops xgbe_ethtool_ops = {
- .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = xgbe_get_drvinfo,
.get_msglevel = xgbe_get_msglevel,
@@ -845,6 +766,7 @@ static const struct ethtool_ops xgbe_ethtool_ops = {
.set_ringparam = xgbe_set_ringparam,
.get_channels = xgbe_get_channels,
.set_channels = xgbe_set_channels,
+ .self_test = xgbe_selftest_run,
};
const struct ethtool_ops *xgbe_get_ethtool_ops(void)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c b/drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c
new file mode 100644
index 000000000000..0127988e10be
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
+/*
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
+ *
+ * Author: Raju Rangoju <Raju.Rangoju@amd.com>
+ */
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+void xgbe_update_tstamp_time(struct xgbe_prv_data *pdata,
+ unsigned int sec, unsigned int nsec)
+{
+ int count;
+
+ /* Set the time values and tell the device */
+ XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
+ XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
+
+ /* issue command to update the system time value */
+ XGMAC_IOWRITE(pdata, MAC_TSCR,
+ XGMAC_IOREAD(pdata, MAC_TSCR) |
+ (1 << MAC_TSCR_TSUPDT_INDEX));
+
+ /* Wait for the time adjust/update to complete */
+ count = 10000;
+ while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSUPDT))
+ udelay(5);
+
+ if (count < 0)
+ netdev_err(pdata->netdev,
+ "timed out updating system timestamp\n");
+}
+
+void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
+ unsigned int addend)
+{
+ unsigned int count = 10000;
+
+ /* Set the addend register value and tell the device */
+ XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
+
+ /* Wait for addend update to complete */
+ while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
+ udelay(5);
+
+ if (!count)
+ netdev_err(pdata->netdev,
+ "timed out updating timestamp addend register\n");
+}
+
+void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
+ unsigned int nsec)
+{
+ unsigned int count = 10000;
+
+ /* Set the time values and tell the device */
+ XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
+ XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
+
+ /* Wait for time update to complete */
+ while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
+ udelay(5);
+
+ if (!count)
+ netdev_err(pdata->netdev, "timed out initializing timestamp\n");
+}
+
+u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
+{
+ u64 nsec;
+
+ nsec = XGMAC_IOREAD(pdata, MAC_STSR);
+ nsec *= NSEC_PER_SEC;
+ nsec += XGMAC_IOREAD(pdata, MAC_STNR);
+
+ return nsec;
+}
+
+u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
+{
+ unsigned int tx_snr, tx_ssr;
+ u64 nsec;
+
+ if (pdata->vdata->tx_tstamp_workaround) {
+ tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
+ tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
+ } else {
+ tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
+ tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
+ }
+
+ if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
+ return 0;
+
+ nsec = tx_ssr;
+ nsec *= NSEC_PER_SEC;
+ nsec += tx_snr;
+
+ return nsec;
+}
+
+void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
+ struct xgbe_ring_desc *rdesc)
+{
+ u64 nsec;
+
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
+ !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
+ nsec = le32_to_cpu(rdesc->desc1);
+ nsec *= NSEC_PER_SEC;
+ nsec += le32_to_cpu(rdesc->desc0);
+ if (nsec != 0xffffffffffffffffULL) {
+ packet->rx_tstamp = nsec;
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ RX_TSTAMP, 1);
+ }
+ }
+}
+
+void xgbe_config_tstamp(struct xgbe_prv_data *pdata, unsigned int mac_tscr)
+{
+ unsigned int value = 0;
+
+ value = XGMAC_IOREAD(pdata, MAC_TSCR);
+ value |= mac_tscr;
+ XGMAC_IOWRITE(pdata, MAC_TSCR, value);
+}
+
+void xgbe_tx_tstamp(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ tx_tstamp_work);
+ struct skb_shared_hwtstamps hwtstamps;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ if (!pdata->tx_tstamp_skb)
+ goto unlock;
+
+ if (pdata->tx_tstamp) {
+ memset(&hwtstamps, 0, sizeof(hwtstamps));
+ hwtstamps.hwtstamp = ns_to_ktime(pdata->tx_tstamp);
+ skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
+ }
+
+ dev_kfree_skb_any(pdata->tx_tstamp_skb);
+
+ pdata->tx_tstamp_skb = NULL;
+
+unlock:
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+}
+
+int xgbe_get_hwtstamp_settings(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ *config = pdata->tstamp_config;
+
+ return 0;
+}
+
+int xgbe_set_hwtstamp_settings(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ unsigned int mac_tscr = 0;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ break;
+
+ case HWTSTAMP_TX_ON:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+
+ case HWTSTAMP_FILTER_NTP_ALL:
+ case HWTSTAMP_FILTER_ALL:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2, UDP, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ fallthrough; /* to PTP v1, UDP, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+ /* PTP v2, UDP, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ fallthrough; /* to PTP v1, UDP, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2, UDP, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ fallthrough; /* to PTP v1, UDP, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ xgbe_config_tstamp(pdata, mac_tscr);
+
+ pdata->tstamp_config = *config;
+
+ return 0;
+}
+
+void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
+ struct sk_buff *skb,
+ struct xgbe_packet_data *packet)
+{
+ unsigned long flags;
+
+ if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ if (pdata->tx_tstamp_skb) {
+ /* Another timestamp in progress, ignore this one */
+ XGMAC_SET_BITS(packet->attributes,
+ TX_PACKET_ATTRIBUTES, PTP, 0);
+ } else {
+ pdata->tx_tstamp_skb = skb_get(skb);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+ }
+
+ skb_tx_timestamp(skb);
+}
+
+int xgbe_init_ptp(struct xgbe_prv_data *pdata)
+{
+ unsigned int mac_tscr = 0;
+ struct timespec64 now;
+ u64 dividend;
+
+ /* Register Settings to be done based on the link speed. */
+ switch (pdata->phy.speed) {
+ case SPEED_1000:
+ XGMAC_IOWRITE(pdata, MAC_TICNR, MAC_TICNR_1G_INITVAL);
+ XGMAC_IOWRITE(pdata, MAC_TECNR, MAC_TECNR_1G_INITVAL);
+ break;
+ case SPEED_2500:
+ case SPEED_10000:
+ XGMAC_IOWRITE_BITS(pdata, MAC_TICSNR, TSICSNS,
+ MAC_TICSNR_10G_INITVAL);
+ XGMAC_IOWRITE(pdata, MAC_TECNR, MAC_TECNR_10G_INITVAL);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TECSNR, TSECSNS,
+ MAC_TECSNR_10G_INITVAL);
+ break;
+ case SPEED_UNKNOWN:
+ default:
+ break;
+ }
+
+ /* Enable IEEE1588 PTP clock. */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+
+ /* Overwrite earlier timestamps */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
+
+ /* Set one nano-second accuracy */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
+
+ /* Set fine timestamp update */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
+
+ xgbe_config_tstamp(pdata, mac_tscr);
+
+ /* Exit if timestamping is not enabled */
+ if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
+ return -EOPNOTSUPP;
+
+ if (pdata->vdata->tstamp_ptp_clock_freq) {
+ /* Initialize time registers based on
+ * 125MHz PTP Clock Frequency
+ */
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC,
+ XGBE_V2_TSTAMP_SSINC);
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC,
+ XGBE_V2_TSTAMP_SNSINC);
+ } else {
+ /* Initialize time registers based on
+ * 50MHz PTP Clock Frequency
+ */
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
+ }
+
+ /* Calculate the addend:
+ * addend = 2^32 / (PTP ref clock / (PTP clock based on SSINC))
+ * = (2^32 * (PTP clock based on SSINC)) / PTP ref clock
+ */
+ if (pdata->vdata->tstamp_ptp_clock_freq)
+ dividend = XGBE_V2_PTP_ACT_CLK_FREQ;
+ else
+ dividend = XGBE_PTP_ACT_CLK_FREQ;
+
+ dividend = (u64)(dividend << 32);
+ pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate);
+
+ xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
+
+ dma_wmb();
+ /* initialize system time */
+ ktime_get_real_ts64(&now);
+
+ /* lower 32 bits of tv_sec are safe until y2106 */
+ xgbe_set_tstamp_time(pdata, (u32)now.tv_sec, now.tv_nsec);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
index 7a833894f52a..65eb7b577b65 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
@@ -179,7 +70,7 @@ static int xgbe_i2c_set_enable(struct xgbe_prv_data *pdata, bool enable)
static int xgbe_i2c_disable(struct xgbe_prv_data *pdata)
{
- unsigned int ret;
+ int ret;
ret = xgbe_i2c_set_enable(pdata, false);
if (ret) {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 0e8698928e4d..d1f0419edb23 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
@@ -384,7 +275,7 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->min_mtu = 0;
- netdev->max_mtu = XGMAC_JUMBO_PACKET_MTU;
+ netdev->max_mtu = XGMAC_GIANT_PACKET_MTU - XGBE_ETH_FRAME_HDR;
/* Use default watchdog timeout */
netdev->watchdog_timeo = 0;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 07f4f3418d01..7675bb98f029 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/interrupt.h>
@@ -375,6 +266,10 @@ static void xgbe_an37_set(struct xgbe_prv_data *pdata, bool enable,
reg |= MDIO_VEND2_CTRL1_AN_RESTART;
XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_CTRL1, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_PCS_DIG_CTRL);
+ reg |= XGBE_VEND2_MAC_AUTO_SW;
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_PCS_DIG_CTRL, reg);
}
static void xgbe_an37_restart(struct xgbe_prv_data *pdata)
@@ -1003,6 +898,11 @@ static void xgbe_an37_init(struct xgbe_prv_data *pdata)
netif_dbg(pdata, link, pdata->netdev, "CL37 AN (%s) initialized\n",
(pdata->an_mode == XGBE_AN_MODE_CL37) ? "BaseX" : "SGMII");
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1);
+ reg &= ~MDIO_AN_CTRL1_ENABLE;
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg);
+
}
static void xgbe_an73_init(struct xgbe_prv_data *pdata)
@@ -1404,6 +1304,10 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
pdata->phy.link = pdata->phy_if.phy_impl.link_status(pdata,
&an_restart);
+ /* bail out if the link status register read fails */
+ if (pdata->phy.link < 0)
+ return;
+
if (an_restart) {
xgbe_phy_config_aneg(pdata);
goto adjust_link;
@@ -1651,6 +1555,7 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
pdata->phy.duplex = DUPLEX_FULL;
}
+ pdata->phy_link = 0;
pdata->phy.link = 0;
pdata->phy.pause_autoneg = pdata->pause_autoneg;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index c636999a6a84..e3e1dca9856a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -1,123 +1,15 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/log2.h>
+#include "xgbe-smn.h"
#include "xgbe.h"
#include "xgbe-common.h"
@@ -207,14 +99,14 @@ out:
static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- struct xgbe_prv_data *pdata;
- struct device *dev = &pdev->dev;
void __iomem * const *iomap_table;
- struct pci_dev *rdev;
+ unsigned int port_addr_size, reg;
+ struct device *dev = &pdev->dev;
+ struct xgbe_prv_data *pdata;
unsigned int ma_lo, ma_hi;
- unsigned int reg;
- int bar_mask;
- int ret;
+ struct pci_dev *rdev;
+ int bar_mask, ret;
+ u32 address;
pdata = xgbe_alloc_pdata(dev);
if (IS_ERR(pdata)) {
@@ -274,20 +166,31 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Set the PCS indirect addressing definition registers */
rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
- if (rdev &&
- (rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
- pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
- pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
- } else if (rdev && (rdev->vendor == PCI_VENDOR_ID_AMD) &&
- (rdev->device == 0x14b5)) {
- pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF;
- pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT;
-
- /* Yellow Carp devices do not need cdr workaround */
- pdata->vdata->an_cdr_workaround = 0;
-
- /* Yellow Carp devices do not need rrc */
- pdata->vdata->enable_rrc = 0;
+ if (rdev && rdev->vendor == PCI_VENDOR_ID_AMD) {
+ switch (rdev->device) {
+ case XGBE_RV_PCI_DEVICE_ID:
+ pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
+ break;
+ case XGBE_YC_PCI_DEVICE_ID:
+ pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT;
+
+ /* Yellow Carp devices do not need cdr workaround */
+ pdata->vdata->an_cdr_workaround = 0;
+
+ /* Yellow Carp devices do not need rrc */
+ pdata->vdata->enable_rrc = 0;
+ break;
+ case XGBE_RN_PCI_DEVICE_ID:
+ pdata->xpcs_window_def_reg = PCS_V3_RN_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V3_RN_WINDOW_SELECT;
+ break;
+ default:
+ pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
+ break;
+ }
} else {
pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
@@ -295,7 +198,22 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_dev_put(rdev);
/* Configure the PCS indirect addressing support */
- reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
+ if (pdata->vdata->xpcs_access == XGBE_XPCS_ACCESS_V3) {
+ reg = XP_IOREAD(pdata, XP_PROP_0);
+ port_addr_size = PCS_RN_PORT_ADDR_SIZE *
+ XP_GET_BITS(reg, XP_PROP_0, PORT_ID);
+ pdata->smn_base = PCS_RN_SMN_BASE_ADDR + port_addr_size;
+
+ address = pdata->smn_base + (pdata->xpcs_window_def_reg);
+ ret = amd_smn_read(0, address, &reg);
+ if (ret) {
+ pci_err(pdata->pcidev, "Failed to read data\n");
+ goto err_pci_enable;
+ }
+ } else {
+ reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
+ }
+
pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
pdata->xpcs_window <<= 6;
pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
@@ -473,6 +391,22 @@ static int __maybe_unused xgbe_pci_resume(struct device *dev)
return ret;
}
+static struct xgbe_version_data xgbe_v3 = {
+ .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
+ .xpcs_access = XGBE_XPCS_ACCESS_V3,
+ .mmc_64bit = 1,
+ .tx_max_fifo_size = 65536,
+ .rx_max_fifo_size = 65536,
+ .tx_tstamp_workaround = 1,
+ .ecc_support = 1,
+ .i2c_support = 1,
+ .irq_reissue_support = 1,
+ .tx_desc_prefetch = 5,
+ .rx_desc_prefetch = 5,
+ .an_cdr_workaround = 0,
+ .enable_rrc = 0,
+};
+
static struct xgbe_version_data xgbe_v2a = {
.init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
.xpcs_access = XGBE_XPCS_ACCESS_V2,
@@ -480,6 +414,7 @@ static struct xgbe_version_data xgbe_v2a = {
.tx_max_fifo_size = 229376,
.rx_max_fifo_size = 229376,
.tx_tstamp_workaround = 1,
+ .tstamp_ptp_clock_freq = 1,
.ecc_support = 1,
.i2c_support = 1,
.irq_reissue_support = 1,
@@ -496,6 +431,7 @@ static struct xgbe_version_data xgbe_v2b = {
.tx_max_fifo_size = 65536,
.rx_max_fifo_size = 65536,
.tx_tstamp_workaround = 1,
+ .tstamp_ptp_clock_freq = 1,
.ecc_support = 1,
.i2c_support = 1,
.irq_reissue_support = 1,
@@ -510,6 +446,8 @@ static const struct pci_device_id xgbe_pci_table[] = {
.driver_data = (kernel_ulong_t)&xgbe_v2a },
{ PCI_VDEVICE(AMD, 0x1459),
.driver_data = (kernel_ulong_t)&xgbe_v2b },
+ { PCI_VDEVICE(AMD, 0x1641),
+ .driver_data = (kernel_ulong_t)&xgbe_v3 },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c
index d16eae415f72..2e6b8ffe785c 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 6a716337f48b..a68757e8fd22 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
@@ -777,7 +668,7 @@ static int xgbe_phy_mii_read_c45(struct mii_bus *mii, int addr, int devad,
else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO)
ret = xgbe_phy_mdio_mii_read_c45(pdata, addr, devad, reg);
else
- ret = -ENOTSUPP;
+ ret = -EOPNOTSUPP;
xgbe_phy_put_comm_ownership(pdata);
@@ -923,7 +814,6 @@ static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata)
static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct xgbe_phy_data *phy_data = pdata->phy_data;
unsigned int phy_id = phy_data->phydev->phy_id;
@@ -945,14 +835,7 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
phy_write(phy_data->phydev, 0x04, 0x0d01);
phy_write(phy_data->phydev, 0x00, 0x9140);
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- supported);
- linkmode_set_bit_array(phy_gbit_features_array,
- ARRAY_SIZE(phy_gbit_features_array),
- supported);
-
- linkmode_copy(phy_data->phydev->supported, supported);
+ linkmode_copy(phy_data->phydev->supported, PHY_GBIT_FEATURES);
phy_support_asym_pause(phy_data->phydev);
@@ -964,7 +847,6 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct xgbe_phy_data *phy_data = pdata->phy_data;
struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
unsigned int phy_id = phy_data->phydev->phy_id;
@@ -1028,13 +910,7 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
reg = phy_read(phy_data->phydev, 0x00);
phy_write(phy_data->phydev, 0x00, reg & ~0x00800);
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- supported);
- linkmode_set_bit_array(phy_gbit_features_array,
- ARRAY_SIZE(phy_gbit_features_array),
- supported);
- linkmode_copy(phy_data->phydev->supported, supported);
+ linkmode_copy(phy_data->phydev->supported, PHY_GBIT_FEATURES);
phy_support_asym_pause(phy_data->phydev);
netif_dbg(pdata, drv, pdata->netdev,
@@ -1113,6 +989,7 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata)
return ret;
}
phy_data->phydev = phydev;
+ phy_data->phydev->mac_managed_pm = true;
xgbe_phy_external_phy_quirks(pdata);
@@ -2870,8 +2747,7 @@ static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int reg;
- int ret;
+ int reg, ret;
*an_restart = 0;
@@ -2905,11 +2781,20 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
return 0;
}
- /* Link status is latched low, so read once to clear
- * and then read again to get current state
- */
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ if (reg < 0)
+ return reg;
+
+ /* Link status is latched low so that momentary link drops
+ * can be detected. If link was already down read again
+ * to get the latest state.
+ */
+
+ if (!pdata->phy.link && !(reg & MDIO_STAT1_LSTATUS)) {
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ if (reg < 0)
+ return reg;
+ }
if (pdata->en_rx_adap) {
/* if the link is available and adaptation is done,
@@ -2928,9 +2813,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
xgbe_phy_set_mode(pdata, phy_data->cur_mode);
}
- /* check again for the link and adaptation status */
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
- if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done)
+ if (pdata->rx_adapt_done)
return 1;
} else if (reg & MDIO_STAT1_LSTATUS)
return 1;
@@ -3020,7 +2903,7 @@ static void xgbe_phy_sfp_setup(struct xgbe_prv_data *pdata)
static int xgbe_phy_int_mdio_reset(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int ret;
+ int ret;
ret = pdata->hw_if.set_gpio(pdata, phy_data->mdio_reset_gpio);
if (ret)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
index 4365bd62942c..47d53e59ccf6 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pps.c b/drivers/net/ethernet/amd/xgbe/xgbe-pps.c
new file mode 100644
index 000000000000..6d03ae7ab36f
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pps.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
+/*
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
+ *
+ * Author: Raju Rangoju <Raju.Rangoju@amd.com>
+ */
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static u32 get_pps_mask(unsigned int x)
+{
+ return GENMASK(PPS_MAXIDX(x), PPS_MINIDX(x));
+}
+
+static u32 get_pps_cmd(unsigned int x, u32 val)
+{
+ return (val & GENMASK(3, 0)) << PPS_MINIDX(x);
+}
+
+static u32 get_target_mode_sel(unsigned int x, u32 val)
+{
+ return (val & GENMASK(1, 0)) << (PPS_MAXIDX(x) - 2);
+}
+
+int xgbe_pps_config(struct xgbe_prv_data *pdata,
+ struct xgbe_pps_config *cfg, int index, bool on)
+{
+ unsigned int ppscr = 0;
+ unsigned int tnsec;
+ u64 period;
+
+ /* Check if target time register is busy */
+ tnsec = XGMAC_IOREAD(pdata, MAC_PPSx_TTNSR(index));
+ if (XGMAC_GET_BITS(tnsec, MAC_PPSx_TTNSR, TRGTBUSY0))
+ return -EBUSY;
+
+ ppscr = XGMAC_IOREAD(pdata, MAC_PPSCR);
+ ppscr &= ~get_pps_mask(index);
+
+ if (!on) {
+ /* Disable PPS output */
+ ppscr |= get_pps_cmd(index, XGBE_PPSCMD_STOP);
+ ppscr |= PPSEN0;
+ XGMAC_IOWRITE(pdata, MAC_PPSCR, ppscr);
+
+ return 0;
+ }
+
+ /* Configure start time */
+ XGMAC_IOWRITE(pdata, MAC_PPSx_TTSR(index), cfg->start.tv_sec);
+ XGMAC_IOWRITE(pdata, MAC_PPSx_TTNSR(index), cfg->start.tv_nsec);
+
+ period = cfg->period.tv_sec * NSEC_PER_SEC + cfg->period.tv_nsec;
+ period = div_u64(period, XGBE_V2_TSTAMP_SSINC);
+
+ if (period < 4)
+ return -EINVAL;
+
+ /* Configure interval and pulse width (50% duty cycle) */
+ XGMAC_IOWRITE(pdata, MAC_PPSx_INTERVAL(index), period - 1);
+ XGMAC_IOWRITE(pdata, MAC_PPSx_WIDTH(index), (period >> 1) - 1);
+
+ /* Enable PPS with pulse train mode */
+ ppscr |= get_pps_cmd(index, XGBE_PPSCMD_START);
+ ppscr |= get_target_mode_sel(index, XGBE_PPSTARGET_PULSE);
+ ppscr |= PPSEN0;
+
+ XGMAC_IOWRITE(pdata, MAC_PPSCR, ppscr);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
index 7051bd7cf6dc..0e0b8ec3b504 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/clk.h>
@@ -122,18 +13,6 @@
#include "xgbe.h"
#include "xgbe-common.h"
-static u64 xgbe_cc_read(const struct cyclecounter *cc)
-{
- struct xgbe_prv_data *pdata = container_of(cc,
- struct xgbe_prv_data,
- tstamp_cc);
- u64 nsec;
-
- nsec = pdata->hw_if.get_tstamp_time(pdata);
-
- return nsec;
-}
-
static int xgbe_adjfine(struct ptp_clock_info *info, long scaled_ppm)
{
struct xgbe_prv_data *pdata = container_of(info,
@@ -146,7 +25,7 @@ static int xgbe_adjfine(struct ptp_clock_info *info, long scaled_ppm)
spin_lock_irqsave(&pdata->tstamp_lock, flags);
- pdata->hw_if.update_tstamp_addend(pdata, addend);
+ xgbe_update_tstamp_addend(pdata, addend);
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
@@ -158,16 +37,39 @@ static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta)
struct xgbe_prv_data *pdata = container_of(info,
struct xgbe_prv_data,
ptp_clock_info);
+ unsigned int neg_adjust = 0;
+ unsigned int sec, nsec;
+ u32 quotient, reminder;
unsigned long flags;
+ if (delta < 0) {
+ neg_adjust = 1;
+ delta = -delta;
+ }
+
+ quotient = div_u64_rem(delta, 1000000000ULL, &reminder);
+ sec = quotient;
+ nsec = reminder;
+
+ /* Negative adjustment for Hw timer register. */
+ if (neg_adjust) {
+ sec = -sec;
+ if (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSCTRLSSR))
+ nsec = (1000000000UL - nsec);
+ else
+ nsec = (0x80000000UL - nsec);
+ }
+ nsec = (neg_adjust << 31) | nsec;
+
spin_lock_irqsave(&pdata->tstamp_lock, flags);
- timecounter_adjtime(&pdata->tstamp_tc, delta);
+ xgbe_update_tstamp_time(pdata, sec, nsec);
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
return 0;
}
-static int xgbe_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
+static int xgbe_gettimex(struct ptp_clock_info *info, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct xgbe_prv_data *pdata = container_of(info,
struct xgbe_prv_data,
@@ -176,9 +78,9 @@ static int xgbe_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
u64 nsec;
spin_lock_irqsave(&pdata->tstamp_lock, flags);
-
- nsec = timecounter_read(&pdata->tstamp_tc);
-
+ ptp_read_system_prets(sts);
+ nsec = xgbe_get_tstamp_time(pdata);
+ ptp_read_system_postts(sts);
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
*ts = ns_to_timespec64(nsec);
@@ -193,14 +95,9 @@ static int xgbe_settime(struct ptp_clock_info *info,
struct xgbe_prv_data,
ptp_clock_info);
unsigned long flags;
- u64 nsec;
-
- nsec = timespec64_to_ns(ts);
spin_lock_irqsave(&pdata->tstamp_lock, flags);
-
- timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec);
-
+ xgbe_set_tstamp_time(pdata, ts->tv_sec, ts->tv_nsec);
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
return 0;
@@ -209,15 +106,35 @@ static int xgbe_settime(struct ptp_clock_info *info,
static int xgbe_enable(struct ptp_clock_info *info,
struct ptp_clock_request *request, int on)
{
- return -EOPNOTSUPP;
+ struct xgbe_prv_data *pdata = container_of(info, struct xgbe_prv_data,
+ ptp_clock_info);
+ struct xgbe_pps_config *pps_cfg;
+ unsigned long flags;
+ int ret;
+
+ dev_dbg(pdata->dev, "rq->type %d on %d\n", request->type, on);
+
+ if (request->type != PTP_CLK_REQ_PEROUT)
+ return -EOPNOTSUPP;
+
+ pps_cfg = &pdata->pps[request->perout.index];
+
+ pps_cfg->start.tv_sec = request->perout.start.sec;
+ pps_cfg->start.tv_nsec = request->perout.start.nsec;
+ pps_cfg->period.tv_sec = request->perout.period.sec;
+ pps_cfg->period.tv_nsec = request->perout.period.nsec;
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ ret = xgbe_pps_config(pdata, pps_cfg, request->perout.index, on);
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+ return ret;
}
void xgbe_ptp_register(struct xgbe_prv_data *pdata)
{
struct ptp_clock_info *info = &pdata->ptp_clock_info;
struct ptp_clock *clock;
- struct cyclecounter *cc = &pdata->tstamp_cc;
- u64 dividend;
snprintf(info->name, sizeof(info->name), "%s",
netdev_name(pdata->netdev));
@@ -225,8 +142,10 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
info->max_adj = pdata->ptpclk_rate;
info->adjfine = xgbe_adjfine;
info->adjtime = xgbe_adjtime;
- info->gettime64 = xgbe_gettime;
+ info->gettimex64 = xgbe_gettimex;
info->settime64 = xgbe_settime;
+ info->n_per_out = pdata->hw_feat.pps_out_num;
+ info->n_ext_ts = pdata->hw_feat.aux_snap_num;
info->enable = xgbe_enable;
clock = ptp_clock_register(info, pdata->dev);
@@ -237,23 +156,6 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
pdata->ptp_clock = clock;
- /* Calculate the addend:
- * addend = 2^32 / (PTP ref clock / 50Mhz)
- * = (2^32 * 50Mhz) / PTP ref clock
- */
- dividend = 50000000;
- dividend <<= 32;
- pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate);
-
- /* Setup the timecounter */
- cc->read = xgbe_cc_read;
- cc->mask = CLOCKSOURCE_MASK(64);
- cc->mult = 1;
- cc->shift = 0;
-
- timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
- ktime_to_ns(ktime_get_real()));
-
/* Disable all timestamping to start */
XGMAC_IOWRITE(pdata, MAC_TSCR, 0);
pdata->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-selftest.c b/drivers/net/ethernet/amd/xgbe/xgbe-selftest.c
new file mode 100644
index 000000000000..55e5e467facd
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-selftest.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
+/*
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
+ *
+ * Author: Raju Rangoju <Raju.Rangoju@amd.com>
+ */
+#include <linux/crc32.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+#include <net/checksum.h>
+#include <net/selftests.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+#define XGBE_LOOPBACK_NONE 0
+#define XGBE_LOOPBACK_MAC 1
+#define XGBE_LOOPBACK_PHY 2
+
+struct xgbe_test {
+ char name[ETH_GSTRING_LEN];
+ int lb;
+ int (*fn)(struct xgbe_prv_data *pdata);
+};
+
+static u8 xgbe_test_id;
+
+static int xgbe_test_loopback_validate(struct sk_buff *skb,
+ struct net_device *ndev,
+ struct packet_type *pt,
+ struct net_device *orig_ndev)
+{
+ struct net_test_priv *tdata = pt->af_packet_priv;
+ const unsigned char *dst = tdata->packet->dst;
+ const unsigned char *src = tdata->packet->src;
+ struct netsfhdr *hdr;
+ struct ethhdr *eh;
+ struct tcphdr *th;
+ struct udphdr *uh;
+ struct iphdr *ih;
+ int eat;
+
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+
+ eat = (skb->tail + skb->data_len) - skb->end;
+ if (eat > 0 && skb_shared(skb)) {
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+ }
+
+ if (skb_linearize(skb))
+ goto out;
+
+ if (skb_headlen(skb) < (NET_TEST_PKT_SIZE - ETH_HLEN))
+ goto out;
+
+ eh = (struct ethhdr *)skb_mac_header(skb);
+ if (dst) {
+ if (!ether_addr_equal_unaligned(eh->h_dest, dst))
+ goto out;
+ }
+ if (src) {
+ if (!ether_addr_equal_unaligned(eh->h_source, src))
+ goto out;
+ }
+
+ ih = ip_hdr(skb);
+
+ if (tdata->packet->tcp) {
+ if (ih->protocol != IPPROTO_TCP)
+ goto out;
+
+ th = (struct tcphdr *)((u8 *)ih + 4 * ih->ihl);
+ if (th->dest != htons(tdata->packet->dport))
+ goto out;
+
+ hdr = (struct netsfhdr *)((u8 *)th + sizeof(*th));
+ } else {
+ if (ih->protocol != IPPROTO_UDP)
+ goto out;
+
+ uh = (struct udphdr *)((u8 *)ih + 4 * ih->ihl);
+ if (uh->dest != htons(tdata->packet->dport))
+ goto out;
+
+ hdr = (struct netsfhdr *)((u8 *)uh + sizeof(*uh));
+ }
+
+ if (hdr->magic != cpu_to_be64(NET_TEST_PKT_MAGIC))
+ goto out;
+ if (tdata->packet->id != hdr->id)
+ goto out;
+
+ tdata->ok = true;
+ complete(&tdata->comp);
+out:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int __xgbe_test_loopback(struct xgbe_prv_data *pdata,
+ struct net_packet_attrs *attr)
+{
+ struct net_test_priv *tdata;
+ struct sk_buff *skb = NULL;
+ int ret = 0;
+
+ tdata = kzalloc(sizeof(*tdata), GFP_KERNEL);
+ if (!tdata)
+ return -ENOMEM;
+
+ tdata->ok = false;
+ init_completion(&tdata->comp);
+
+ tdata->pt.type = htons(ETH_P_IP);
+ tdata->pt.func = xgbe_test_loopback_validate;
+ tdata->pt.dev = pdata->netdev;
+ tdata->pt.af_packet_priv = tdata;
+ tdata->packet = attr;
+
+ dev_add_pack(&tdata->pt);
+
+ skb = net_test_get_skb(pdata->netdev, xgbe_test_id, attr);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ xgbe_test_id++;
+ ret = dev_direct_xmit(skb, attr->queue_mapping);
+ if (ret)
+ goto cleanup;
+
+ if (!attr->timeout)
+ attr->timeout = NET_LB_TIMEOUT;
+
+ wait_for_completion_timeout(&tdata->comp, attr->timeout);
+ ret = tdata->ok ? 0 : -ETIMEDOUT;
+
+ if (ret)
+ netdev_err(pdata->netdev, "Response timedout: ret %d\n", ret);
+cleanup:
+ dev_remove_pack(&tdata->pt);
+ kfree(tdata);
+ return ret;
+}
+
+static int xgbe_test_mac_loopback(struct xgbe_prv_data *pdata)
+{
+ struct net_packet_attrs attr = {};
+
+ attr.dst = pdata->netdev->dev_addr;
+ return __xgbe_test_loopback(pdata, &attr);
+}
+
+static int xgbe_test_phy_loopback(struct xgbe_prv_data *pdata)
+{
+ struct net_packet_attrs attr = {};
+ int ret;
+
+ if (!pdata->netdev->phydev) {
+ netdev_err(pdata->netdev, "phydev not found: cannot start PHY loopback test\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = phy_loopback(pdata->netdev->phydev, true, 0);
+ if (ret)
+ return ret;
+
+ attr.dst = pdata->netdev->dev_addr;
+ ret = __xgbe_test_loopback(pdata, &attr);
+
+ phy_loopback(pdata->netdev->phydev, false, 0);
+ return ret;
+}
+
+static int xgbe_test_sph(struct xgbe_prv_data *pdata)
+{
+ struct net_packet_attrs attr = {};
+ unsigned long cnt_end, cnt_start;
+ int ret;
+
+ cnt_start = pdata->ext_stats.rx_split_header_packets;
+
+ if (!pdata->sph) {
+ netdev_err(pdata->netdev, "Split Header not enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* UDP test */
+ attr.dst = pdata->netdev->dev_addr;
+ attr.tcp = false;
+
+ ret = __xgbe_test_loopback(pdata, &attr);
+ if (ret)
+ return ret;
+
+ cnt_end = pdata->ext_stats.rx_split_header_packets;
+ if (cnt_end <= cnt_start)
+ return -EINVAL;
+
+ /* TCP test */
+ cnt_start = cnt_end;
+
+ attr.dst = pdata->netdev->dev_addr;
+ attr.tcp = true;
+
+ ret = __xgbe_test_loopback(pdata, &attr);
+ if (ret)
+ return ret;
+
+ cnt_end = pdata->ext_stats.rx_split_header_packets;
+ if (cnt_end <= cnt_start)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int xgbe_test_jumbo(struct xgbe_prv_data *pdata)
+{
+ struct net_packet_attrs attr = {};
+ int size = pdata->rx_buf_size;
+
+ attr.dst = pdata->netdev->dev_addr;
+ attr.max_size = size - ETH_FCS_LEN;
+
+ return __xgbe_test_loopback(pdata, &attr);
+}
+
+static const struct xgbe_test xgbe_selftests[] = {
+ {
+ .name = "MAC Loopback ",
+ .lb = XGBE_LOOPBACK_MAC,
+ .fn = xgbe_test_mac_loopback,
+ }, {
+ .name = "PHY Loopback ",
+ .lb = XGBE_LOOPBACK_NONE,
+ .fn = xgbe_test_phy_loopback,
+ }, {
+ .name = "Split Header ",
+ .lb = XGBE_LOOPBACK_PHY,
+ .fn = xgbe_test_sph,
+ }, {
+ .name = "Jumbo Frame ",
+ .lb = XGBE_LOOPBACK_PHY,
+ .fn = xgbe_test_jumbo,
+ },
+};
+
+void xgbe_selftest_run(struct net_device *dev,
+ struct ethtool_test *etest, u64 *buf)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(dev);
+ int count = xgbe_selftest_get_count(pdata);
+ int i, ret;
+
+ memset(buf, 0, sizeof(*buf) * count);
+ xgbe_test_id = 0;
+
+ if (etest->flags != ETH_TEST_FL_OFFLINE) {
+ netdev_err(pdata->netdev, "Only offline tests are supported\n");
+ etest->flags |= ETH_TEST_FL_FAILED;
+ return;
+ } else if (!netif_carrier_ok(dev)) {
+ netdev_err(pdata->netdev,
+ "Invalid link, cannot execute tests\n");
+ etest->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
+
+ /* Wait for queues drain */
+ msleep(200);
+
+ for (i = 0; i < count; i++) {
+ ret = 0;
+
+ switch (xgbe_selftests[i].lb) {
+ case XGBE_LOOPBACK_PHY:
+ ret = -EOPNOTSUPP;
+ if (dev->phydev)
+ ret = phy_loopback(dev->phydev, true, 0);
+ if (!ret)
+ break;
+ fallthrough;
+ case XGBE_LOOPBACK_MAC:
+ ret = xgbe_enable_mac_loopback(pdata);
+ break;
+ case XGBE_LOOPBACK_NONE:
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ /*
+ * First tests will always be MAC / PHY loopback.
+ * If any of them is not supported we abort earlier.
+ */
+ if (ret) {
+ netdev_err(pdata->netdev, "Loopback not supported\n");
+ etest->flags |= ETH_TEST_FL_FAILED;
+ break;
+ }
+
+ ret = xgbe_selftests[i].fn(pdata);
+ if (ret && (ret != -EOPNOTSUPP))
+ etest->flags |= ETH_TEST_FL_FAILED;
+ buf[i] = ret;
+
+ switch (xgbe_selftests[i].lb) {
+ case XGBE_LOOPBACK_PHY:
+ ret = -EOPNOTSUPP;
+ if (dev->phydev)
+ ret = phy_loopback(dev->phydev, false, 0);
+ if (!ret)
+ break;
+ fallthrough;
+ case XGBE_LOOPBACK_MAC:
+ xgbe_disable_mac_loopback(pdata);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void xgbe_selftest_get_strings(struct xgbe_prv_data *pdata, u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ for (i = 0; i < xgbe_selftest_get_count(pdata); i++)
+ ethtool_puts(&p, xgbe_selftests[i].name);
+}
+
+int xgbe_selftest_get_count(struct xgbe_prv_data *pdata)
+{
+ return ARRAY_SIZE(xgbe_selftests);
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-smn.h b/drivers/net/ethernet/amd/xgbe/xgbe-smn.h
new file mode 100644
index 000000000000..c6ae127ced03
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-smn.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
+ *
+ * Author: Raju Rangoju <Raju.Rangoju@amd.com>
+ */
+
+#ifndef __SMN_H__
+#define __SMN_H__
+
+#ifdef CONFIG_AMD_NB
+
+#include <asm/amd/nb.h>
+
+#else
+
+static inline int amd_smn_write(u16 node, u32 address, u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int amd_smn_read(u16 node, u32 address, u32 *value)
+{
+ return -ENODEV;
+}
+
+#endif
+#endif
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index d85386cac8d1..03ef0f548483 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#ifndef __XGBE_H__
@@ -189,11 +80,13 @@
#define XGBE_IRQ_MODE_EDGE 0
#define XGBE_IRQ_MODE_LEVEL 1
+#define XGBE_ETH_FRAME_HDR (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
#define XGMAC_MIN_PACKET 60
#define XGMAC_STD_PACKET_MTU 1500
#define XGMAC_MAX_STD_PACKET 1518
#define XGMAC_JUMBO_PACKET_MTU 9000
#define XGMAC_MAX_JUMBO_PACKET 9018
+#define XGMAC_GIANT_PACKET_MTU 16368
#define XGMAC_ETH_PREAMBLE (12 + 8) /* Inter-frame gap + preamble */
#define XGMAC_PFC_DATA_LEN 46
@@ -226,6 +119,14 @@
#define XGBE_MSI_BASE_COUNT 4
#define XGBE_MSI_MIN_COUNT (XGBE_MSI_BASE_COUNT + 1)
+/* Initial PTP register values based on Link Speed. */
+#define MAC_TICNR_1G_INITVAL 0x10
+#define MAC_TECNR_1G_INITVAL 0x28
+
+#define MAC_TICSNR_10G_INITVAL 0x33
+#define MAC_TECNR_10G_INITVAL 0x14
+#define MAC_TECSNR_10G_INITVAL 0xCC
+
/* PCI clock frequencies */
#define XGBE_V2_DMA_CLOCK_FREQ 500000000 /* 500 MHz */
#define XGBE_V2_PTP_CLOCK_FREQ 125000000 /* 125 MHz */
@@ -235,6 +136,15 @@
*/
#define XGBE_TSTAMP_SSINC 20
#define XGBE_TSTAMP_SNSINC 0
+#define XGBE_PTP_ACT_CLK_FREQ 500000000
+
+#define XGBE_V2_TSTAMP_SSINC 0xA
+#define XGBE_V2_TSTAMP_SNSINC 0
+#define XGBE_V2_PTP_ACT_CLK_FREQ 1000000000
+
+/* Define maximum supported values */
+#define XGBE_MAX_PPS_OUT 4
+#define XGBE_MAX_AUX_SNAP 4
/* Driver PMT macros */
#define XGMAC_DRIVER_CONTEXT 1
@@ -262,6 +172,7 @@
/* Default coalescing parameters */
#define XGMAC_INIT_DMA_TX_USECS 1000
#define XGMAC_INIT_DMA_TX_FRAMES 25
+#define XGMAC_MAX_COAL_TX_TICK 100000
#define XGMAC_MAX_DMA_RIWT 0xff
#define XGMAC_INIT_DMA_RX_USECS 30
@@ -292,12 +203,12 @@
#define XGBE_LINK_TIMEOUT 5
#define XGBE_KR_TRAINING_WAIT_ITER 50
-#define XGBE_SGMII_AN_LINK_STATUS BIT(1)
+#define XGBE_SGMII_AN_LINK_DUPLEX BIT(1)
#define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
#define XGBE_SGMII_AN_LINK_SPEED_10 0x00
#define XGBE_SGMII_AN_LINK_SPEED_100 0x04
#define XGBE_SGMII_AN_LINK_SPEED_1000 0x08
-#define XGBE_SGMII_AN_LINK_DUPLEX BIT(4)
+#define XGBE_SGMII_AN_LINK_STATUS BIT(4)
/* ECC correctable error notification window (seconds) */
#define XGBE_ECC_LIMIT 60
@@ -347,6 +258,15 @@
(_src)->link_modes._sname, \
__ETHTOOL_LINK_MODE_MASK_NBITS)
+/* XGBE PCI device id */
+#define XGBE_RV_PCI_DEVICE_ID 0x15d0
+#define XGBE_YC_PCI_DEVICE_ID 0x14b5
+#define XGBE_RN_PCI_DEVICE_ID 0x1630
+
+ /* Generic low and high masks */
+#define XGBE_GEN_HI_MASK GENMASK(31, 16)
+#define XGBE_GEN_LO_MASK GENMASK(15, 0)
+
struct xgbe_prv_data;
struct xgbe_packet_data {
@@ -565,6 +485,7 @@ enum xgbe_speed {
enum xgbe_xpcs_access {
XGBE_XPCS_ACCESS_V1 = 0,
XGBE_XPCS_ACCESS_V2,
+ XGBE_XPCS_ACCESS_V3,
};
enum xgbe_an_mode {
@@ -756,6 +677,11 @@ struct xgbe_ext_stats {
u64 rx_vxlan_csum_errors;
};
+struct xgbe_pps_config {
+ struct timespec64 start;
+ struct timespec64 period;
+};
+
struct xgbe_hw_if {
int (*tx_complete)(struct xgbe_ring_desc *);
@@ -838,14 +764,6 @@ struct xgbe_hw_if {
void (*tx_mmc_int)(struct xgbe_prv_data *);
void (*read_mmc_stats)(struct xgbe_prv_data *);
- /* For Timestamp config */
- int (*config_tstamp)(struct xgbe_prv_data *, unsigned int);
- void (*update_tstamp_addend)(struct xgbe_prv_data *, unsigned int);
- void (*set_tstamp_time)(struct xgbe_prv_data *, unsigned int sec,
- unsigned int nsec);
- u64 (*get_tstamp_time)(struct xgbe_prv_data *);
- u64 (*get_tx_tstamp)(struct xgbe_prv_data *);
-
/* For Data Center Bridging config */
void (*config_tc)(struct xgbe_prv_data *);
void (*config_dcb_tc)(struct xgbe_prv_data *);
@@ -865,6 +783,10 @@ struct xgbe_hw_if {
void (*enable_vxlan)(struct xgbe_prv_data *);
void (*disable_vxlan)(struct xgbe_prv_data *);
void (*set_vxlan_id)(struct xgbe_prv_data *);
+
+ /* For Split Header */
+ void (*enable_sph)(struct xgbe_prv_data *pdata);
+ void (*disable_sph)(struct xgbe_prv_data *pdata);
};
/* This structure represents implementation specific routines for an
@@ -1039,6 +961,7 @@ struct xgbe_version_data {
unsigned int tx_max_fifo_size;
unsigned int rx_max_fifo_size;
unsigned int tx_tstamp_workaround;
+ unsigned int tstamp_ptp_clock_freq;
unsigned int ecc_support;
unsigned int i2c_support;
unsigned int irq_reissue_support;
@@ -1056,6 +979,7 @@ struct xgbe_prv_data {
struct device *dev;
struct platform_device *phy_platdev;
struct device *phy_dev;
+ unsigned int smn_base;
/* Version related data */
struct xgbe_version_data *vdata;
@@ -1222,14 +1146,15 @@ struct xgbe_prv_data {
spinlock_t tstamp_lock;
struct ptp_clock_info ptp_clock_info;
struct ptp_clock *ptp_clock;
- struct hwtstamp_config tstamp_config;
- struct cyclecounter tstamp_cc;
- struct timecounter tstamp_tc;
+ struct kernel_hwtstamp_config tstamp_config;
unsigned int tstamp_addend;
struct work_struct tx_tstamp_work;
struct sk_buff *tx_tstamp_skb;
u64 tx_tstamp;
+ /* Pulse Per Second output */
+ struct xgbe_pps_config pps[XGBE_MAX_PPS_OUT];
+
/* DCB support */
struct ieee_ets *ets;
struct ieee_pfc *pfc;
@@ -1321,6 +1246,7 @@ struct xgbe_prv_data {
int rx_adapt_retries;
bool rx_adapt_done;
bool mode_set;
+ bool sph;
};
/* Function prototypes*/
@@ -1369,6 +1295,44 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *);
void xgbe_restart_dev(struct xgbe_prv_data *pdata);
void xgbe_full_restart_dev(struct xgbe_prv_data *pdata);
+/* For Timestamp config */
+void xgbe_config_tstamp(struct xgbe_prv_data *pdata, unsigned int mac_tscr);
+u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata);
+u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata);
+void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
+ struct xgbe_ring_desc *rdesc);
+void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
+ struct xgbe_ring_desc *rdesc);
+void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
+ unsigned int addend);
+void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
+ unsigned int nsec);
+void xgbe_tx_tstamp(struct work_struct *work);
+int xgbe_get_hwtstamp_settings(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int xgbe_set_hwtstamp_settings(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
+ struct sk_buff *skb,
+ struct xgbe_packet_data *packet);
+int xgbe_init_ptp(struct xgbe_prv_data *pdata);
+void xgbe_update_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
+ unsigned int nsec);
+
+int xgbe_pps_config(struct xgbe_prv_data *pdata, struct xgbe_pps_config *cfg,
+ int index, bool on);
+
+/* Selftest functions */
+void xgbe_selftest_run(struct net_device *dev,
+ struct ethtool_test *etest, u64 *buf);
+void xgbe_selftest_get_strings(struct xgbe_prv_data *pdata, u8 *data);
+int xgbe_selftest_get_count(struct xgbe_prv_data *pdata);
+
+/* Loopback control */
+int xgbe_enable_mac_loopback(struct xgbe_prv_data *pdata);
+void xgbe_disable_mac_loopback(struct xgbe_prv_data *pdata);
+
#ifdef CONFIG_DEBUG_FS
void xgbe_debugfs_init(struct xgbe_prv_data *);
void xgbe_debugfs_exit(struct xgbe_prv_data *);
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index 2a91c84aebdb..d7ca847d44c7 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -9,8 +9,6 @@
#include "main.h"
-static const struct acpi_device_id xge_acpi_match[];
-
static int xge_get_resources(struct xge_pdata *pdata)
{
struct platform_device *pdev;
@@ -731,7 +729,7 @@ MODULE_DEVICE_TABLE(acpi, xge_acpi_match);
static struct platform_driver xge_driver = {
.driver = {
.name = "xgene-enet-v2",
- .acpi_match_table = ACPI_PTR(xge_acpi_match),
+ .acpi_match_table = xge_acpi_match,
},
.probe = xge_probe,
.remove = xge_remove,
diff --git a/drivers/net/ethernet/apm/xgene-v2/mdio.c b/drivers/net/ethernet/apm/xgene-v2/mdio.c
index eba06831aec2..6a17045a5f62 100644
--- a/drivers/net/ethernet/apm/xgene-v2/mdio.c
+++ b/drivers/net/ethernet/apm/xgene-v2/mdio.c
@@ -97,7 +97,6 @@ void xge_mdio_remove(struct net_device *ndev)
int xge_mdio_config(struct net_device *ndev)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct xge_pdata *pdata = netdev_priv(ndev);
struct device *dev = &pdata->pdev->dev;
struct mii_bus *mdio_bus;
@@ -137,17 +136,12 @@ int xge_mdio_config(struct net_device *ndev)
goto err;
}
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_AUI_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_BNC_BIT, mask);
-
- linkmode_andnot(phydev->supported, phydev->supported, mask);
- linkmode_copy(phydev->advertising, phydev->supported);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+
pdata->phy_speed = SPEED_UNKNOWN;
return 0;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index e641dbbea1e2..b854b6b42d77 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -421,18 +421,12 @@ static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata)
if (dev->of_node) {
struct clk *parent = clk_get_parent(pdata->clk);
+ long rate = rgmii_clock(pdata->phy_speed);
- switch (pdata->phy_speed) {
- case SPEED_10:
- clk_set_rate(parent, 2500000);
- break;
- case SPEED_100:
- clk_set_rate(parent, 25000000);
- break;
- default:
- clk_set_rate(parent, 125000000);
- break;
- }
+ if (rate < 0)
+ rate = 125000000;
+
+ clk_set_rate(parent, rate);
}
#ifdef CONFIG_ACPI
else {
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 86607b79c09f..cc3b1631c905 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -6,8 +6,14 @@
* Keyur Chudgar <kchudgar@apm.com>
*/
-#include <linux/of_gpio.h>
-#include <linux/gpio.h>
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
#include "xgene_enet_main.h"
#include "xgene_enet_hw.h"
#include "xgene_enet_xgmac.h"
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 785f4b4ff758..b3bf8d6f88e8 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -20,7 +20,6 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/crc32.h>
-#include <linux/crc32poly.h>
#include <linux/bitrev.h>
#include <linux/ethtool.h>
#include <linux/slab.h>
@@ -461,7 +460,7 @@ static int bmac_suspend(struct macio_dev *mdev, pm_message_t state)
/* prolly should wait for dma to finish & turn off the chip */
spin_lock_irqsave(&bp->lock, flags);
if (bp->timeout_active) {
- del_timer(&bp->tx_timeout);
+ timer_delete(&bp->tx_timeout);
bp->timeout_active = 0;
}
disable_irq(dev->irq);
@@ -546,7 +545,7 @@ static inline void bmac_set_timeout(struct net_device *dev)
spin_lock_irqsave(&bp->lock, flags);
if (bp->timeout_active)
- del_timer(&bp->tx_timeout);
+ timer_delete(&bp->tx_timeout);
bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
add_timer(&bp->tx_timeout);
bp->timeout_active = 1;
@@ -755,7 +754,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
XXDEBUG(("bmac_txdma_intr\n"));
}
- /* del_timer(&bp->tx_timeout); */
+ /* timer_delete(&bp->tx_timeout); */
/* bp->timeout_active = 0; */
while (1) {
@@ -796,59 +795,6 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
}
#ifndef SUNHME_MULTICAST
-/* Real fast bit-reversal algorithm, 6-bit values */
-static int reverse6[64] = {
- 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
- 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
- 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
- 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
- 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
- 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
- 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
- 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
-};
-
-static unsigned int
-crc416(unsigned int curval, unsigned short nxtval)
-{
- unsigned int counter, cur = curval, next = nxtval;
- int high_crc_set, low_data_set;
-
- /* Swap bytes */
- next = ((next & 0x00FF) << 8) | (next >> 8);
-
- /* Compute bit-by-bit */
- for (counter = 0; counter < 16; ++counter) {
- /* is high CRC bit set? */
- if ((cur & 0x80000000) == 0) high_crc_set = 0;
- else high_crc_set = 1;
-
- cur = cur << 1;
-
- if ((next & 0x0001) == 0) low_data_set = 0;
- else low_data_set = 1;
-
- next = next >> 1;
-
- /* do the XOR */
- if (high_crc_set ^ low_data_set) cur = cur ^ CRC32_POLY_BE;
- }
- return cur;
-}
-
-static unsigned int
-bmac_crc(unsigned short *address)
-{
- unsigned int newcrc;
-
- XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
- newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */
- newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */
- newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */
-
- return(newcrc);
-}
-
/*
* Add requested mcast addr to BMac's hash table filter.
*
@@ -861,8 +807,7 @@ bmac_addhash(struct bmac_data *bp, unsigned char *addr)
unsigned short mask;
if (!(*addr)) return;
- crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
- crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
+ crc = crc32(~0, addr, ETH_ALEN) >> 26;
if (bp->hash_use_count[crc]++) return; /* This bit is already set */
mask = crc % 16;
mask = (unsigned char)1 << mask;
@@ -876,8 +821,7 @@ bmac_removehash(struct bmac_data *bp, unsigned char *addr)
unsigned char mask;
/* Now, delete the address from the filter copy, as indicated */
- crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
- crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
+ crc = crc32(~0, addr, ETH_ALEN) >> 26;
if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
mask = crc % 16;
@@ -1466,7 +1410,7 @@ bmac_output(struct sk_buff *skb, struct net_device *dev)
static void bmac_tx_timeout(struct timer_list *t)
{
- struct bmac_data *bp = from_timer(bp, t, tx_timeout);
+ struct bmac_data *bp = timer_container_of(bp, t, tx_timeout);
struct net_device *dev = macio_get_drvdata(bp->mdev);
volatile struct dbdma_regs __iomem *td = bp->tx_dma;
volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
index e6350971c707..af26905e44e3 100644
--- a/drivers/net/ethernet/apple/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -523,7 +523,7 @@ static inline void mace_set_timeout(struct net_device *dev)
struct mace_data *mp = netdev_priv(dev);
if (mp->timeout_active)
- del_timer(&mp->tx_timeout);
+ timer_delete(&mp->tx_timeout);
mp->tx_timeout.expires = jiffies + TX_TIMEOUT;
add_timer(&mp->tx_timeout);
mp->timeout_active = 1;
@@ -676,7 +676,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
i = mp->tx_empty;
while (in_8(&mb->pr) & XMTSV) {
- del_timer(&mp->tx_timeout);
+ timer_delete(&mp->tx_timeout);
mp->timeout_active = 0;
/*
* Clear any interrupt indication associated with this status
@@ -805,7 +805,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
static void mace_tx_timeout(struct timer_list *t)
{
- struct mace_data *mp = from_timer(mp, t, tx_timeout);
+ struct mace_data *mp = timer_container_of(mp, t, tx_timeout);
struct net_device *dev = macio_get_drvdata(mp->mdev);
volatile struct mace __iomem *mb = mp->mace;
volatile struct dbdma_regs __iomem *td = mp->tx_dma;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
index 414b2e448d59..787ea91802e7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
@@ -113,19 +113,9 @@ static const struct hwmon_ops aq_hwmon_ops = {
.read_string = aq_hwmon_read_string,
};
-static u32 aq_hwmon_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_LABEL,
- HWMON_T_INPUT | HWMON_T_LABEL,
- 0,
-};
-
-static const struct hwmon_channel_info aq_hwmon_temp = {
- .type = hwmon_temp,
- .config = aq_hwmon_temp_config,
-};
-
static const struct hwmon_channel_info * const aq_hwmon_info[] = {
- &aq_hwmon_temp,
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
NULL,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 42c0efc1b455..4e66fd9b2ab1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -113,6 +113,8 @@ struct aq_stats_s {
#define AQ_HW_POWER_STATE_D0 0U
#define AQ_HW_POWER_STATE_D3 3U
+#define AQ_FW_WAKE_ON_LINK_RTPM BIT(10)
+
#define AQ_HW_FLAG_STARTED 0x00000004U
#define AQ_HW_FLAG_STOPPING 0x00000008U
#define AQ_HW_FLAG_RESETTING 0x00000010U
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
index 1921741f7311..18b08277d2e1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
@@ -15,6 +15,7 @@
#include "aq_hw.h"
#include "aq_nic.h"
+#include "hw_atl/hw_atl_llh.h"
void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
u32 shift, u32 val)
@@ -81,6 +82,27 @@ void aq_hw_write_reg64(struct aq_hw_s *hw, u32 reg, u64 value)
lo_hi_writeq(value, hw->mmio + reg);
}
+int aq_hw_invalidate_descriptor_cache(struct aq_hw_s *hw)
+{
+ int err;
+ u32 val;
+
+ /* Invalidate Descriptor Cache to prevent writing to the cached
+ * descriptors and to the data pointer of those descriptors
+ */
+ hw_atl_rdm_rx_dma_desc_cache_init_tgl(hw);
+
+ err = aq_hw_err_from_flags(hw);
+ if (err)
+ goto err_exit;
+
+ readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
+ hw, val, val == 1, 1000U, 10000U);
+
+err_exit:
+ return err;
+}
+
int aq_hw_err_from_flags(struct aq_hw_s *hw)
{
int err = 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
index ffa6e4067c21..d89c63d88e4a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
@@ -35,6 +35,7 @@ u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg);
void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value);
u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg);
void aq_hw_write_reg64(struct aq_hw_s *hw, u32 reg, u64 value);
+int aq_hw_invalidate_descriptor_cache(struct aq_hw_s *hw);
int aq_hw_err_from_flags(struct aq_hw_s *hw);
int aq_hw_num_tcs(struct aq_hw_s *hw);
int aq_hw_q_per_tc(struct aq_hw_s *hw);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index c1d1673c5749..4ef4fe64b8ac 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -123,7 +123,6 @@ static netdev_tx_t aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *nd
}
#endif
- skb_tx_timestamp(skb);
return aq_nic_xmit(aq_nic, skb);
}
@@ -259,10 +258,15 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
(void)aq_nic_set_multicast_list(aq_nic, ndev);
}
-#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
-static int aq_ndev_config_hwtstamp(struct aq_nic_s *aq_nic,
- struct hwtstamp_config *config)
+static int aq_ndev_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
+ struct aq_nic_s *aq_nic = netdev_priv(netdev);
+
+ if (!IS_REACHABLE(CONFIG_PTP_1588_CLOCK) || !aq_nic->aq_ptp)
+ return -EOPNOTSUPP;
+
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
@@ -291,59 +295,17 @@ static int aq_ndev_config_hwtstamp(struct aq_nic_s *aq_nic,
return aq_ptp_hwtstamp_config_set(aq_nic->aq_ptp, config);
}
-#endif
-
-static int aq_ndev_hwtstamp_set(struct aq_nic_s *aq_nic, struct ifreq *ifr)
-{
- struct hwtstamp_config config;
-#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
- int ret_val;
-#endif
-
- if (!aq_nic->aq_ptp)
- return -EOPNOTSUPP;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
- ret_val = aq_ndev_config_hwtstamp(aq_nic, &config);
- if (ret_val)
- return ret_val;
-#endif
-
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
-}
-#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
-static int aq_ndev_hwtstamp_get(struct aq_nic_s *aq_nic, struct ifreq *ifr)
+static int aq_ndev_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config config;
+ struct aq_nic_s *aq_nic = netdev_priv(netdev);
if (!aq_nic->aq_ptp)
return -EOPNOTSUPP;
- aq_ptp_hwtstamp_config_get(aq_nic->aq_ptp, &config);
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
-}
-#endif
-
-static int aq_ndev_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- struct aq_nic_s *aq_nic = netdev_priv(netdev);
-
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return aq_ndev_hwtstamp_set(aq_nic, ifr);
-
-#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
- case SIOCGHWTSTAMP:
- return aq_ndev_hwtstamp_get(aq_nic, ifr);
-#endif
- }
-
- return -EOPNOTSUPP;
+ aq_ptp_hwtstamp_config_get(aq_nic->aq_ptp, config);
+ return 0;
}
static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto,
@@ -501,12 +463,13 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_set_mac_address = aq_ndev_set_mac_address,
.ndo_set_features = aq_ndev_set_features,
.ndo_fix_features = aq_ndev_fix_features,
- .ndo_eth_ioctl = aq_ndev_ioctl,
.ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
.ndo_setup_tc = aq_ndo_setup_tc,
.ndo_bpf = aq_xdp,
.ndo_xdp_xmit = aq_xdp_xmit,
+ .ndo_hwtstamp_get = aq_ndev_hwtstamp_get,
+ .ndo_hwtstamp_set = aq_ndev_hwtstamp_set,
};
static int __init aq_ndev_init_module(void)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index fe0e3e2a8117..b24eaa5283fa 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -254,7 +254,7 @@ static void aq_nic_service_task(struct work_struct *work)
static void aq_nic_service_timer_cb(struct timer_list *t)
{
- struct aq_nic_s *self = from_timer(self, t, service_timer);
+ struct aq_nic_s *self = timer_container_of(self, t, service_timer);
mod_timer(&self->service_timer,
jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
@@ -264,7 +264,7 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
static void aq_nic_polling_timer_cb(struct timer_list *t)
{
- struct aq_nic_s *self = from_timer(self, t, polling_timer);
+ struct aq_nic_s *self = timer_container_of(self, t, polling_timer);
unsigned int i = 0U;
for (i = 0U; self->aq_vecs > i; ++i)
@@ -898,6 +898,8 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
frags = aq_nic_map_skb(self, skb, ring);
+ skb_tx_timestamp(skb);
+
if (likely(frags)) {
err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
ring, frags);
@@ -1389,13 +1391,13 @@ int aq_nic_stop(struct aq_nic_s *self)
netif_tx_disable(self->ndev);
netif_carrier_off(self->ndev);
- del_timer_sync(&self->service_timer);
+ timer_delete_sync(&self->service_timer);
cancel_work_sync(&self->service_task);
self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
if (self->aq_nic_cfg.is_polling)
- del_timer_sync(&self->polling_timer);
+ timer_delete_sync(&self->polling_timer);
else
aq_pci_func_free_irqs(self);
@@ -1441,7 +1443,9 @@ void aq_nic_deinit(struct aq_nic_s *self, bool link_down)
aq_ptp_ring_free(self);
aq_ptp_free(self);
- if (likely(self->aq_fw_ops->deinit) && link_down) {
+ /* May be invoked during hot unplug. */
+ if (pci_device_is_present(self->pdev) &&
+ likely(self->aq_fw_ops->deinit) && link_down) {
mutex_lock(&self->fwreq_mutex);
self->aq_fw_ops->deinit(self->aq_hw);
mutex_unlock(&self->fwreq_mutex);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 43c71f6b314f..ed5231dece3f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -162,8 +162,8 @@ int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
self->msix_entry_mask |= (1 << i);
if (pdev->msix_enabled && affinity_mask)
- irq_set_affinity_hint(pci_irq_vector(pdev, i),
- affinity_mask);
+ irq_update_affinity_hint(pci_irq_vector(pdev, i),
+ affinity_mask);
}
return err;
@@ -187,7 +187,7 @@ void aq_pci_func_free_irqs(struct aq_nic_s *self)
continue;
if (pdev->msix_enabled)
- irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
+ irq_update_affinity_hint(pci_irq_vector(pdev, i), NULL);
free_irq(pci_irq_vector(pdev, i), irq_data);
self->msix_entry_mask &= ~(1U << i);
}
@@ -463,7 +463,7 @@ static const struct dev_pm_ops aq_pm_ops = {
};
#endif
-static struct pci_driver aq_pci_ops = {
+static struct pci_driver aq_pci_driver = {
.name = AQ_CFG_DRV_NAME,
.id_table = aq_pci_tbl,
.probe = aq_pci_probe,
@@ -476,11 +476,11 @@ static struct pci_driver aq_pci_ops = {
int aq_pci_func_register_driver(void)
{
- return pci_register_driver(&aq_pci_ops);
+ return pci_register_driver(&aq_pci_driver);
}
void aq_pci_func_unregister_driver(void)
{
- pci_unregister_driver(&aq_pci_ops);
+ pci_unregister_driver(&aq_pci_driver);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
index 5acb3e16b567..0fa0f891c0e0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
@@ -51,7 +51,7 @@ struct ptp_tx_timeout {
struct aq_ptp_s {
struct aq_nic_s *aq_nic;
- struct hwtstamp_config hwtstamp_config;
+ struct kernel_hwtstamp_config hwtstamp_config;
spinlock_t ptp_lock;
spinlock_t ptp_ring_lock;
struct ptp_clock *ptp_clock;
@@ -567,7 +567,7 @@ static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct skb_shared_hwtsta
}
void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
*config = aq_ptp->hwtstamp_config;
}
@@ -588,7 +588,7 @@ static void aq_ptp_prepare_filters(struct aq_ptp_s *aq_ptp)
}
int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
const struct aq_hw_ops *hw_ops;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
index 210b723f2207..5e643ec7cc06 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
@@ -60,9 +60,9 @@ void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp);
/* Must be to check available of PTP before call */
void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config);
int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config);
/* Return either ring is belong to PTP or not*/
bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring);
@@ -130,9 +130,9 @@ static inline int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb)
static inline void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp) {}
static inline void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
- struct hwtstamp_config *config) {}
+ struct kernel_hwtstamp_config *config) {}
static inline int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
return 0;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index f21de0c21e52..d23d23bed39f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -547,6 +547,11 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
if (!buff->is_eop) {
unsigned int frag_cnt = 0U;
+
+ /* There will be an extra fragment */
+ if (buff->len > AQ_CFG_RX_HDR_SIZE)
+ frag_cnt++;
+
buff_ = buff;
do {
bool is_rsc_completed = true;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 493432d036b9..c7895bfb2ecf 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -1198,26 +1198,9 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
{
- int err;
- u32 val;
-
hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
- /* Invalidate Descriptor Cache to prevent writing to the cached
- * descriptors and to the data pointer of those descriptors
- */
- hw_atl_rdm_rx_dma_desc_cache_init_tgl(self);
-
- err = aq_hw_err_from_flags(self);
-
- if (err)
- goto err_exit;
-
- readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
- self, val, val == 1, 1000U, 10000U);
-
-err_exit:
- return err;
+ return aq_hw_invalidate_descriptor_cache(self);
}
int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, struct aq_ring_s *ring)
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index f5901f8e3907..f6b990b7f5b4 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -226,7 +226,6 @@ struct __packed offload_info {
struct offload_port_info ports;
struct offload_ka_info kas;
struct offload_rr_info rrs;
- u8 buf[];
};
struct __packed hw_atl_utils_fw_rpc {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
index b0ed572e88c6..0ce9caae8799 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
@@ -759,7 +759,7 @@ static int hw_atl2_hw_stop(struct aq_hw_s *self)
{
hw_atl_b0_hw_irq_disable(self, HW_ATL2_INT_MASK);
- return 0;
+ return aq_hw_invalidate_descriptor_cache(self);
}
static struct aq_stats_s *hw_atl2_utils_get_hw_stats(struct aq_hw_s *self)
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
index 52e2070a4a2f..7370e3f76b62 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
@@ -462,6 +462,44 @@ static int aq_a2_fw_get_mac_temp(struct aq_hw_s *self, int *temp)
return aq_a2_fw_get_phy_temp(self, temp);
}
+static int aq_a2_fw_set_wol_params(struct aq_hw_s *self, const u8 *mac, u32 wol)
+{
+ struct mac_address_aligned_s mac_address;
+ struct link_control_s link_control;
+ struct wake_on_lan_s wake_on_lan;
+
+ memcpy(mac_address.aligned.mac_address, mac, ETH_ALEN);
+ hw_atl2_shared_buffer_write(self, mac_address, mac_address);
+
+ memset(&wake_on_lan, 0, sizeof(wake_on_lan));
+
+ if (wol & WAKE_MAGIC)
+ wake_on_lan.wake_on_magic_packet = 1U;
+
+ if (wol & (WAKE_PHY | AQ_FW_WAKE_ON_LINK_RTPM))
+ wake_on_lan.wake_on_link_up = 1U;
+
+ hw_atl2_shared_buffer_write(self, sleep_proxy, wake_on_lan);
+
+ hw_atl2_shared_buffer_get(self, link_control, link_control);
+ link_control.mode = AQ_HOST_MODE_SLEEP_PROXY;
+ hw_atl2_shared_buffer_write(self, link_control, link_control);
+
+ return hw_atl2_shared_buffer_finish_ack(self);
+}
+
+static int aq_a2_fw_set_power(struct aq_hw_s *self, unsigned int power_state,
+ const u8 *mac)
+{
+ u32 wol = self->aq_nic_cfg->wol;
+ int err = 0;
+
+ if (wol)
+ err = aq_a2_fw_set_wol_params(self, mac, wol);
+
+ return err;
+}
+
static int aq_a2_fw_set_eee_rate(struct aq_hw_s *self, u32 speed)
{
struct link_options_s link_options;
@@ -605,6 +643,7 @@ const struct aq_fw_ops aq_a2_fw_ops = {
.set_state = aq_a2_fw_set_state,
.update_link_status = aq_a2_fw_update_link_status,
.update_stats = aq_a2_fw_update_stats,
+ .set_power = aq_a2_fw_set_power,
.get_mac_temp = aq_a2_fw_get_mac_temp,
.get_phy_temp = aq_a2_fw_get_phy_temp,
.set_eee_rate = aq_a2_fw_set_eee_rate,
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 31ee477dd131..8283aeee35fb 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -111,6 +111,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
+ struct device *dev = ndev->dev.parent;
unsigned int i;
for (i = 0; i < TX_BD_NUM; i++) {
@@ -140,7 +141,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
stats->tx_bytes += skb->len;
}
- dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr),
+ dma_unmap_single(dev, dma_unmap_addr(tx_buff, addr),
dma_unmap_len(tx_buff, len), DMA_TO_DEVICE);
/* return the sk_buff to system */
@@ -174,6 +175,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
static int arc_emac_rx(struct net_device *ndev, int budget)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
+ struct device *dev = ndev->dev.parent;
unsigned int work_done;
for (work_done = 0; work_done < budget; work_done++) {
@@ -223,9 +225,9 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
continue;
}
- addr = dma_map_single(&ndev->dev, (void *)skb->data,
+ addr = dma_map_single(dev, (void *)skb->data,
EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, addr)) {
+ if (dma_mapping_error(dev, addr)) {
if (net_ratelimit())
netdev_err(ndev, "cannot map dma buffer\n");
dev_kfree_skb(skb);
@@ -237,7 +239,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
}
/* unmap previosly mapped skb */
- dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
+ dma_unmap_single(dev, dma_unmap_addr(rx_buff, addr),
dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
pktlen = info & LEN_MASK;
@@ -423,6 +425,7 @@ static int arc_emac_open(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
struct phy_device *phy_dev = ndev->phydev;
+ struct device *dev = ndev->dev.parent;
int i;
phy_dev->autoneg = AUTONEG_ENABLE;
@@ -445,9 +448,9 @@ static int arc_emac_open(struct net_device *ndev)
if (unlikely(!rx_buff->skb))
return -ENOMEM;
- addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
+ addr = dma_map_single(dev, (void *)rx_buff->skb->data,
EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, addr)) {
+ if (dma_mapping_error(dev, addr)) {
netdev_err(ndev, "cannot dma map\n");
dev_kfree_skb(rx_buff->skb);
return -ENOMEM;
@@ -548,6 +551,7 @@ static void arc_emac_set_rx_mode(struct net_device *ndev)
static void arc_free_tx_queue(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
+ struct device *dev = ndev->dev.parent;
unsigned int i;
for (i = 0; i < TX_BD_NUM; i++) {
@@ -555,7 +559,7 @@ static void arc_free_tx_queue(struct net_device *ndev)
struct buffer_state *tx_buff = &priv->tx_buff[i];
if (tx_buff->skb) {
- dma_unmap_single(&ndev->dev,
+ dma_unmap_single(dev,
dma_unmap_addr(tx_buff, addr),
dma_unmap_len(tx_buff, len),
DMA_TO_DEVICE);
@@ -579,6 +583,7 @@ static void arc_free_tx_queue(struct net_device *ndev)
static void arc_free_rx_queue(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
+ struct device *dev = ndev->dev.parent;
unsigned int i;
for (i = 0; i < RX_BD_NUM; i++) {
@@ -586,7 +591,7 @@ static void arc_free_rx_queue(struct net_device *ndev)
struct buffer_state *rx_buff = &priv->rx_buff[i];
if (rx_buff->skb) {
- dma_unmap_single(&ndev->dev,
+ dma_unmap_single(dev,
dma_unmap_addr(rx_buff, addr),
dma_unmap_len(rx_buff, len),
DMA_FROM_DEVICE);
@@ -679,6 +684,7 @@ static netdev_tx_t arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
unsigned int len, *txbd_curr = &priv->txbd_curr;
struct net_device_stats *stats = &ndev->stats;
__le32 *info = &priv->txbd[*txbd_curr].info;
+ struct device *dev = ndev->dev.parent;
dma_addr_t addr;
if (skb_padto(skb, ETH_ZLEN))
@@ -692,10 +698,9 @@ static netdev_tx_t arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_BUSY;
}
- addr = dma_map_single(&ndev->dev, (void *)skb->data, len,
- DMA_TO_DEVICE);
+ addr = dma_map_single(dev, (void *)skb->data, len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&ndev->dev, addr))) {
+ if (unlikely(dma_mapping_error(dev, addr))) {
stats->tx_dropped++;
stats->tx_errors++;
dev_kfree_skb_any(skb);
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
index 87f40c2ba904..078b1a72c161 100644
--- a/drivers/net/ethernet/arc/emac_mdio.c
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -133,6 +133,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
struct arc_emac_mdio_bus_data *data = &priv->bus_data;
struct device_node *np = priv->dev->of_node;
const char *name = "Synopsys MII Bus";
+ struct device_node *mdio_node;
struct mii_bus *bus;
int error;
@@ -164,7 +165,13 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
snprintf(bus->id, MII_BUS_ID_SIZE, "%s", bus->name);
- error = of_mdiobus_register(bus, priv->dev->of_node);
+ /* Backwards compatibility for EMAC nodes without MDIO subnode. */
+ mdio_node = of_get_child_by_name(np, "mdio");
+ if (!mdio_node)
+ mdio_node = of_node_get(np);
+
+ error = of_mdiobus_register(bus, mdio_node);
+ of_node_put(mdio_node);
if (error) {
mdiobus_free(bus);
return dev_err_probe(priv->dev, error,
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 3d4c3d8698e2..cbc730c7cff2 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1213,6 +1213,11 @@ static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf,
buf->rx.rx_buf = data;
buf->rx.dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&ag->pdev->dev, buf->rx.dma_addr)) {
+ skb_free_frag(data);
+ buf->rx.rx_buf = NULL;
+ return false;
+ }
desc->data = (u32)buf->rx.dma_addr + offset;
return true;
}
@@ -1391,7 +1396,7 @@ static void ag71xx_hw_disable(struct ag71xx *ag)
ag71xx_dma_reset(ag);
napi_disable(&ag->napi);
- del_timer_sync(&ag->oom_timer);
+ timer_delete_sync(&ag->oom_timer);
ag71xx_rings_cleanup(ag);
}
@@ -1511,6 +1516,10 @@ static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&ag->pdev->dev, dma_addr)) {
+ netif_dbg(ag, tx_err, ndev, "DMA mapping error\n");
+ goto err_drop;
+ }
i = ring->curr & ring_mask;
desc = ag71xx_ring_desc(ring, i);
@@ -1563,7 +1572,7 @@ err_drop:
static void ag71xx_oom_timer_handler(struct timer_list *t)
{
- struct ag71xx *ag = from_timer(ag, t, oom_timer);
+ struct ag71xx *ag = timer_container_of(ag, t, oom_timer);
napi_schedule(&ag->napi);
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index c571614b1d50..7efa3fc257b3 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -231,8 +231,8 @@ static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl)
*/
static void atl1c_phy_config(struct timer_list *t)
{
- struct atl1c_adapter *adapter = from_timer(adapter, t,
- phy_config_timer);
+ struct atl1c_adapter *adapter = timer_container_of(adapter, t,
+ phy_config_timer);
struct atl1c_hw *hw = &adapter->hw;
unsigned long flags;
@@ -357,7 +357,7 @@ static void atl1c_common_task(struct work_struct *work)
static void atl1c_del_timer(struct atl1c_adapter *adapter)
{
- del_timer_sync(&adapter->phy_config_timer);
+ timer_delete_sync(&adapter->phy_config_timer);
}
@@ -2688,7 +2688,7 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->mii.mdio_write = atl1c_mdio_write;
adapter->mii.phy_id_mask = 0x1f;
adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK;
- dev_set_threaded(netdev, true);
+ netif_threaded_enable(netdev);
for (i = 0; i < adapter->rx_queue_count; ++i)
netif_napi_add(netdev, &adapter->rrd_ring[i].napi,
atl1c_clean_rx);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 9b778b34b67e..40290028580b 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -115,8 +115,8 @@ static inline void atl1e_irq_reset(struct atl1e_adapter *adapter)
*/
static void atl1e_phy_config(struct timer_list *t)
{
- struct atl1e_adapter *adapter = from_timer(adapter, t,
- phy_config_timer);
+ struct atl1e_adapter *adapter = timer_container_of(adapter, t,
+ phy_config_timer);
struct atl1e_hw *hw = &adapter->hw;
unsigned long flags;
@@ -232,7 +232,7 @@ static void atl1e_link_chg_event(struct atl1e_adapter *adapter)
static void atl1e_del_timer(struct atl1e_adapter *adapter)
{
- del_timer_sync(&adapter->phy_config_timer);
+ timer_delete_sync(&adapter->phy_config_timer);
}
static void atl1e_cancel_work(struct atl1e_adapter *adapter)
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 3afd3627ce48..98a4d089270e 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -1861,14 +1861,21 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
break;
}
- buffer_info->alloced = 1;
- buffer_info->skb = skb;
- buffer_info->length = (u16) adapter->rx_buffer_len;
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
buffer_info->dma = dma_map_page(&pdev->dev, page, offset,
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
+ kfree_skb(skb);
+ adapter->soft_stats.rx_dropped++;
+ break;
+ }
+
+ buffer_info->alloced = 1;
+ buffer_info->skb = skb;
+ buffer_info->length = (u16)adapter->rx_buffer_len;
+
rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len);
rfd_desc->coalese = 0;
@@ -2183,8 +2190,8 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
return 0;
}
-static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
- struct tx_packet_desc *ptpd)
+static bool atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
+ struct tx_packet_desc *ptpd)
{
struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
struct atl1_buffer *buffer_info;
@@ -2194,6 +2201,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
unsigned int nr_frags;
unsigned int f;
int retval;
+ u16 first_mapped;
u16 next_to_use;
u16 data_len;
u8 hdr_len;
@@ -2201,6 +2209,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buf_len -= skb->data_len;
nr_frags = skb_shinfo(skb)->nr_frags;
next_to_use = atomic_read(&tpd_ring->next_to_use);
+ first_mapped = next_to_use;
buffer_info = &tpd_ring->buffer_info[next_to_use];
BUG_ON(buffer_info->skb);
/* put skb in last TPD */
@@ -2216,6 +2225,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buffer_info->dma = dma_map_page(&adapter->pdev->dev, page,
offset, hdr_len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))
+ goto dma_err;
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
@@ -2242,6 +2253,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
page, offset,
buffer_info->length,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ buffer_info->dma))
+ goto dma_err;
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
}
@@ -2254,6 +2268,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buffer_info->dma = dma_map_page(&adapter->pdev->dev, page,
offset, buf_len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))
+ goto dma_err;
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
}
@@ -2277,6 +2293,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
frag, i * ATL1_MAX_TX_BUF_LEN,
buffer_info->length, DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ buffer_info->dma))
+ goto dma_err;
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
@@ -2285,6 +2304,22 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
/* last tpd's buffer-info */
buffer_info->skb = skb;
+
+ return true;
+
+ dma_err:
+ while (first_mapped != next_to_use) {
+ buffer_info = &tpd_ring->buffer_info[first_mapped];
+ dma_unmap_page(&adapter->pdev->dev,
+ buffer_info->dma,
+ buffer_info->length,
+ DMA_TO_DEVICE);
+ buffer_info->dma = 0;
+
+ if (++first_mapped == tpd_ring->count)
+ first_mapped = 0;
+ }
+ return false;
}
static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count,
@@ -2355,10 +2390,8 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
len = skb_headlen(skb);
- if (unlikely(skb->len <= 0)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
+ if (unlikely(skb->len <= 0))
+ goto drop_packet;
nr_frags = skb_shinfo(skb)->nr_frags;
for (f = 0; f < nr_frags; f++) {
@@ -2371,10 +2404,9 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
if (mss) {
if (skb->protocol == htons(ETH_P_IP)) {
proto_hdr_len = skb_tcp_all_headers(skb);
- if (unlikely(proto_hdr_len > len)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
+ if (unlikely(proto_hdr_len > len))
+ goto drop_packet;
+
/* need additional TPD ? */
if (proto_hdr_len != len)
count += (len - proto_hdr_len +
@@ -2406,23 +2438,26 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
}
tso = atl1_tso(adapter, skb, ptpd);
- if (tso < 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
+ if (tso < 0)
+ goto drop_packet;
if (!tso) {
ret_val = atl1_tx_csum(adapter, skb, ptpd);
- if (ret_val < 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
+ if (ret_val < 0)
+ goto drop_packet;
}
- atl1_tx_map(adapter, skb, ptpd);
+ if (!atl1_tx_map(adapter, skb, ptpd))
+ goto drop_packet;
+
atl1_tx_queue(adapter, count, ptpd);
atl1_update_mailbox(adapter);
return NETDEV_TX_OK;
+
+drop_packet:
+ adapter->soft_stats.tx_errors++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
}
static int atl1_rings_clean(struct napi_struct *napi, int budget)
@@ -2556,8 +2591,8 @@ static irqreturn_t atl1_intr(int irq, void *data)
*/
static void atl1_phy_config(struct timer_list *t)
{
- struct atl1_adapter *adapter = from_timer(adapter, t,
- phy_config_timer);
+ struct atl1_adapter *adapter = timer_container_of(adapter, t,
+ phy_config_timer);
struct atl1_hw *hw = &adapter->hw;
unsigned long flags;
@@ -2641,7 +2676,7 @@ static void atl1_down(struct atl1_adapter *adapter)
napi_disable(&adapter->napi);
netif_stop_queue(netdev);
- del_timer_sync(&adapter->phy_config_timer);
+ timer_delete_sync(&adapter->phy_config_timer);
adapter->phy_timer_pending = false;
atlx_irq_disable(adapter);
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index fa9a4919f25d..280e2f5f4aa5 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -752,8 +752,8 @@ static void atl2_down(struct atl2_adapter *adapter)
atl2_irq_disable(adapter);
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_config_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->phy_config_timer);
clear_bit(0, &adapter->cfg_phy);
netif_carrier_off(netdev);
@@ -1010,7 +1010,8 @@ static void atl2_tx_timeout(struct net_device *netdev, unsigned int txqueue)
*/
static void atl2_watchdog(struct timer_list *t)
{
- struct atl2_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+ struct atl2_adapter *adapter = timer_container_of(adapter, t,
+ watchdog_timer);
if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
u32 drop_rxd, drop_rxs;
@@ -1035,8 +1036,8 @@ static void atl2_watchdog(struct timer_list *t)
*/
static void atl2_phy_config(struct timer_list *t)
{
- struct atl2_adapter *adapter = from_timer(adapter, t,
- phy_config_timer);
+ struct atl2_adapter *adapter = timer_container_of(adapter, t,
+ phy_config_timer);
struct atl2_hw *hw = &adapter->hw;
unsigned long flags;
@@ -1468,8 +1469,8 @@ static void atl2_remove(struct pci_dev *pdev)
* explicitly disable watchdog tasks from being rescheduled */
set_bit(__ATL2_DOWN, &adapter->flags);
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_config_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->phy_config_timer);
cancel_work_sync(&adapter->reset_task);
cancel_work_sync(&adapter->link_chg_task);
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index eeec8bf17cf4..666522d64775 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -25,6 +25,7 @@ config B44
select SSB
select MII
select PHYLIB
+ select FIXED_PHY if BCM47XX
help
If you have a network (Ethernet) controller of this type, say Y
or M here.
@@ -96,7 +97,6 @@ config BNX2
config CNIC
tristate "QLogic CNIC support"
depends on PCI && (IPV6 || IPV6=n)
- depends on MMU
select BNX2
select UIO
help
@@ -123,6 +123,7 @@ config TIGON3
tristate "Broadcom Tigon3 support"
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
+ select CRC32
select PHYLIB
help
This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
@@ -143,7 +144,7 @@ config BNX2X
depends on PTP_1588_CLOCK_OPTIONAL
select FW_LOADER
select ZLIB_INFLATE
- select LIBCRC32C
+ select CRC32
select MDIO
help
This driver supports Broadcom NetXtremeII 10 gigabit Ethernet cards.
@@ -207,7 +208,7 @@ config BNXT
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
select FW_LOADER
- select LIBCRC32C
+ select CRC32
select NET_DEVLINK
select PAGE_POOL
select DIMLIB
@@ -253,6 +254,16 @@ config BNXT_HWMON
Say Y if you want to expose the thermal sensor data on NetXtreme-C/E
devices, via the hwmon sysfs interface.
+config BNGE
+ tristate "Broadcom Ethernet device support"
+ depends on PCI
+ select NET_DEVLINK
+ select PAGE_POOL
+ help
+ This driver supports Broadcom 50/100/200/400/800 gigabit Ethernet cards.
+ The module will be called bng_en. To compile this driver as a module,
+ choose M here.
+
config BCMASP
tristate "Broadcom ASP 2.0 Ethernet support"
depends on ARCH_BRCMSTB || COMPILE_TEST
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index bac5cb6ad0cd..10cc1c92ecfc 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -18,3 +18,4 @@ obj-$(CONFIG_BGMAC_PLATFORM) += bgmac-platform.o
obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
obj-$(CONFIG_BNXT) += bnxt/
obj-$(CONFIG_BCMASP) += asp2/
+obj-$(CONFIG_BNGE) += bnge/
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
index a68fab1b05f0..fd35f4b4dc50 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -141,7 +141,7 @@ void bcmasp_flush_rx_port(struct bcmasp_intf *intf)
return;
}
- rx_ctrl_core_wl(priv, mask, priv->hw_info->rx_ctrl_flush);
+ rx_ctrl_core_wl(priv, mask, ASP_RX_CTRL_FLUSH);
}
static void bcmasp_netfilt_hw_en_wake(struct bcmasp_priv *priv,
@@ -518,7 +518,7 @@ void bcmasp_netfilt_suspend(struct bcmasp_intf *intf)
int ret, i;
/* Write all filters to HW */
- for (i = 0; i < NUM_NET_FILTERS; i++) {
+ for (i = 0; i < priv->num_net_filters; i++) {
/* If the filter does not match the port, skip programming. */
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
@@ -551,7 +551,7 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
struct bcmasp_priv *priv = intf->parent;
int j = 0, i;
- for (i = 0; i < NUM_NET_FILTERS; i++) {
+ for (i = 0; i < priv->num_net_filters; i++) {
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
continue;
@@ -577,7 +577,7 @@ int bcmasp_netfilt_get_active(struct bcmasp_intf *intf)
struct bcmasp_priv *priv = intf->parent;
int cnt = 0, i;
- for (i = 0; i < NUM_NET_FILTERS; i++) {
+ for (i = 0; i < priv->num_net_filters; i++) {
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
continue;
@@ -602,7 +602,7 @@ bool bcmasp_netfilt_check_dup(struct bcmasp_intf *intf,
size_t fs_size = 0;
int i;
- for (i = 0; i < NUM_NET_FILTERS; i++) {
+ for (i = 0; i < priv->num_net_filters; i++) {
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
continue;
@@ -670,7 +670,7 @@ struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf,
int i, open_index = -1;
/* Check whether we exceed the filter table capacity */
- if (loc != RX_CLS_LOC_ANY && loc >= NUM_NET_FILTERS)
+ if (loc != RX_CLS_LOC_ANY && loc >= priv->num_net_filters)
return ERR_PTR(-EINVAL);
/* If the filter location is busy (already claimed) and we are initializing
@@ -686,7 +686,7 @@ struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf,
/* Initialize the loop index based on the desired location or from 0 */
i = loc == RX_CLS_LOC_ANY ? 0 : loc;
- for ( ; i < NUM_NET_FILTERS; i++) {
+ for ( ; i < priv->num_net_filters; i++) {
/* Found matching network filter */
if (!init &&
priv->net_filters[i].claimed &&
@@ -779,7 +779,7 @@ static void bcmasp_en_mda_filter(struct bcmasp_intf *intf, bool en,
priv->mda_filters[i].en = en;
priv->mda_filters[i].port = intf->port;
- rx_filter_core_wl(priv, ((intf->channel + 8) |
+ rx_filter_core_wl(priv, ((intf->channel + priv->tx_chan_offset) |
(en << ASP_RX_FILTER_MDA_CFG_EN_SHIFT) |
ASP_RX_FILTER_MDA_CFG_UMC_SEL(intf->port)),
ASP_RX_FILTER_MDA_CFG(i));
@@ -865,7 +865,7 @@ void bcmasp_disable_all_filters(struct bcmasp_intf *intf)
res_count = bcmasp_total_res_mda_cnt(intf->parent);
/* Disable all filters held by this port */
- for (i = res_count; i < NUM_MDA_FILTERS; i++) {
+ for (i = res_count; i < priv->num_mda_filters; i++) {
if (priv->mda_filters[i].en &&
priv->mda_filters[i].port == intf->port)
bcmasp_en_mda_filter(intf, 0, i);
@@ -909,7 +909,7 @@ int bcmasp_set_en_mda_filter(struct bcmasp_intf *intf, unsigned char *addr,
res_count = bcmasp_total_res_mda_cnt(intf->parent);
- for (i = res_count; i < NUM_MDA_FILTERS; i++) {
+ for (i = res_count; i < priv->num_mda_filters; i++) {
/* If filter not enabled or belongs to another port skip */
if (!priv->mda_filters[i].en ||
priv->mda_filters[i].port != intf->port)
@@ -924,7 +924,7 @@ int bcmasp_set_en_mda_filter(struct bcmasp_intf *intf, unsigned char *addr,
}
/* Create new filter if possible */
- for (i = res_count; i < NUM_MDA_FILTERS; i++) {
+ for (i = res_count; i < priv->num_mda_filters; i++) {
if (priv->mda_filters[i].en)
continue;
@@ -944,12 +944,12 @@ static void bcmasp_core_init_filters(struct bcmasp_priv *priv)
/* Disable all filters and reset software view since the HW
* can lose context while in deep sleep suspend states
*/
- for (i = 0; i < NUM_MDA_FILTERS; i++) {
+ for (i = 0; i < priv->num_mda_filters; i++) {
rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_MDA_CFG(i));
priv->mda_filters[i].en = 0;
}
- for (i = 0; i < NUM_NET_FILTERS; i++)
+ for (i = 0; i < priv->num_net_filters; i++)
rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_NET_CFG(i));
/* Top level filter enable bit should be enabled at all times, set
@@ -966,18 +966,8 @@ static void bcmasp_core_init_filters(struct bcmasp_priv *priv)
/* ASP core initialization */
static void bcmasp_core_init(struct bcmasp_priv *priv)
{
- tx_analytics_core_wl(priv, 0x0, ASP_TX_ANALYTICS_CTRL);
- rx_analytics_core_wl(priv, 0x4, ASP_RX_ANALYTICS_CTRL);
-
- rx_edpkt_core_wl(priv, (ASP_EDPKT_HDR_SZ_128 << ASP_EDPKT_HDR_SZ_SHIFT),
- ASP_EDPKT_HDR_CFG);
- rx_edpkt_core_wl(priv,
- (ASP_EDPKT_ENDI_BT_SWP_WD << ASP_EDPKT_ENDI_DESC_SHIFT),
- ASP_EDPKT_ENDI);
-
rx_edpkt_core_wl(priv, 0x1b, ASP_EDPKT_BURST_BUF_PSCAL_TOUT);
rx_edpkt_core_wl(priv, 0x3e8, ASP_EDPKT_BURST_BUF_WRITE_TOUT);
- rx_edpkt_core_wl(priv, 0x3e8, ASP_EDPKT_BURST_BUF_READ_TOUT);
rx_edpkt_core_wl(priv, ASP_EDPKT_ENABLE_EN, ASP_EDPKT_ENABLE);
@@ -1020,6 +1010,18 @@ static void bcmasp_core_clock_select_one(struct bcmasp_priv *priv, bool slow)
ctrl_core_wl(priv, reg, ASP_CTRL_CORE_CLOCK_SELECT);
}
+static void bcmasp_core_clock_select_one_ctrl2(struct bcmasp_priv *priv, bool slow)
+{
+ u32 reg;
+
+ reg = ctrl2_core_rl(priv, ASP_CTRL2_CORE_CLOCK_SELECT);
+ if (slow)
+ reg &= ~ASP_CTRL2_CORE_CLOCK_SELECT_MAIN;
+ else
+ reg |= ASP_CTRL2_CORE_CLOCK_SELECT_MAIN;
+ ctrl2_core_wl(priv, reg, ASP_CTRL2_CORE_CLOCK_SELECT);
+}
+
static void bcmasp_core_clock_set_ll(struct bcmasp_priv *priv, u32 clr, u32 set)
{
u32 reg;
@@ -1108,7 +1110,7 @@ static int bcmasp_get_and_request_irq(struct bcmasp_priv *priv, int i)
return irq;
}
-static void bcmasp_init_wol_shared(struct bcmasp_priv *priv)
+static void bcmasp_init_wol(struct bcmasp_priv *priv)
{
struct platform_device *pdev = priv->pdev;
struct device *dev = &pdev->dev;
@@ -1125,7 +1127,7 @@ static void bcmasp_init_wol_shared(struct bcmasp_priv *priv)
device_set_wakeup_capable(&pdev->dev, 1);
}
-static void bcmasp_enable_wol_shared(struct bcmasp_intf *intf, bool en)
+void bcmasp_enable_wol(struct bcmasp_intf *intf, bool en)
{
struct bcmasp_priv *priv = intf->parent;
struct device *dev = &priv->pdev->dev;
@@ -1154,54 +1156,12 @@ static void bcmasp_enable_wol_shared(struct bcmasp_intf *intf, bool en)
}
}
-static void bcmasp_wol_irq_destroy_shared(struct bcmasp_priv *priv)
+static void bcmasp_wol_irq_destroy(struct bcmasp_priv *priv)
{
if (priv->wol_irq > 0)
free_irq(priv->wol_irq, priv);
}
-static void bcmasp_init_wol_per_intf(struct bcmasp_priv *priv)
-{
- struct platform_device *pdev = priv->pdev;
- struct device *dev = &pdev->dev;
- struct bcmasp_intf *intf;
- int irq;
-
- list_for_each_entry(intf, &priv->intfs, list) {
- irq = bcmasp_get_and_request_irq(priv, intf->port + 1);
- if (irq < 0) {
- dev_warn(dev, "Failed to init WoL irq(port %d): %d\n",
- intf->port, irq);
- continue;
- }
-
- intf->wol_irq = irq;
- intf->wol_irq_enabled = false;
- device_set_wakeup_capable(&pdev->dev, 1);
- }
-}
-
-static void bcmasp_enable_wol_per_intf(struct bcmasp_intf *intf, bool en)
-{
- struct device *dev = &intf->parent->pdev->dev;
-
- if (en ^ intf->wol_irq_enabled)
- irq_set_irq_wake(intf->wol_irq, en);
-
- intf->wol_irq_enabled = en;
- device_set_wakeup_enable(dev, en);
-}
-
-static void bcmasp_wol_irq_destroy_per_intf(struct bcmasp_priv *priv)
-{
- struct bcmasp_intf *intf;
-
- list_for_each_entry(intf, &priv->intfs, list) {
- if (intf->wol_irq > 0)
- free_irq(intf->wol_irq, priv);
- }
-}
-
static void bcmasp_eee_fixup(struct bcmasp_intf *intf, bool en)
{
u32 reg, phy_lpi_overwrite;
@@ -1220,70 +1180,53 @@ static void bcmasp_eee_fixup(struct bcmasp_intf *intf, bool en)
usleep_range(50, 100);
}
-static struct bcmasp_hw_info v20_hw_info = {
- .rx_ctrl_flush = ASP_RX_CTRL_FLUSH,
- .umac2fb = UMAC2FB_OFFSET,
- .rx_ctrl_fb_out_frame_count = ASP_RX_CTRL_FB_OUT_FRAME_COUNT,
- .rx_ctrl_fb_filt_out_frame_count = ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT,
- .rx_ctrl_fb_rx_fifo_depth = ASP_RX_CTRL_FB_RX_FIFO_DEPTH,
-};
-
-static const struct bcmasp_plat_data v20_plat_data = {
- .init_wol = bcmasp_init_wol_per_intf,
- .enable_wol = bcmasp_enable_wol_per_intf,
- .destroy_wol = bcmasp_wol_irq_destroy_per_intf,
- .core_clock_select = bcmasp_core_clock_select_one,
- .hw_info = &v20_hw_info,
-};
-
-static struct bcmasp_hw_info v21_hw_info = {
- .rx_ctrl_flush = ASP_RX_CTRL_FLUSH_2_1,
- .umac2fb = UMAC2FB_OFFSET_2_1,
- .rx_ctrl_fb_out_frame_count = ASP_RX_CTRL_FB_OUT_FRAME_COUNT_2_1,
- .rx_ctrl_fb_filt_out_frame_count =
- ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT_2_1,
- .rx_ctrl_fb_rx_fifo_depth = ASP_RX_CTRL_FB_RX_FIFO_DEPTH_2_1,
-};
-
static const struct bcmasp_plat_data v21_plat_data = {
- .init_wol = bcmasp_init_wol_shared,
- .enable_wol = bcmasp_enable_wol_shared,
- .destroy_wol = bcmasp_wol_irq_destroy_shared,
.core_clock_select = bcmasp_core_clock_select_one,
- .hw_info = &v21_hw_info,
+ .num_mda_filters = 32,
+ .num_net_filters = 32,
+ .tx_chan_offset = 8,
+ .rx_ctrl_offset = 0x0,
};
static const struct bcmasp_plat_data v22_plat_data = {
- .init_wol = bcmasp_init_wol_shared,
- .enable_wol = bcmasp_enable_wol_shared,
- .destroy_wol = bcmasp_wol_irq_destroy_shared,
.core_clock_select = bcmasp_core_clock_select_many,
- .hw_info = &v21_hw_info,
.eee_fixup = bcmasp_eee_fixup,
+ .num_mda_filters = 32,
+ .num_net_filters = 32,
+ .tx_chan_offset = 8,
+ .rx_ctrl_offset = 0x0,
+};
+
+static const struct bcmasp_plat_data v30_plat_data = {
+ .core_clock_select = bcmasp_core_clock_select_one_ctrl2,
+ .num_mda_filters = 20,
+ .num_net_filters = 16,
+ .tx_chan_offset = 0,
+ .rx_ctrl_offset = 0x10000,
};
static void bcmasp_set_pdata(struct bcmasp_priv *priv, const struct bcmasp_plat_data *pdata)
{
- priv->init_wol = pdata->init_wol;
- priv->enable_wol = pdata->enable_wol;
- priv->destroy_wol = pdata->destroy_wol;
priv->core_clock_select = pdata->core_clock_select;
priv->eee_fixup = pdata->eee_fixup;
- priv->hw_info = pdata->hw_info;
+ priv->num_mda_filters = pdata->num_mda_filters;
+ priv->num_net_filters = pdata->num_net_filters;
+ priv->tx_chan_offset = pdata->tx_chan_offset;
+ priv->rx_ctrl_offset = pdata->rx_ctrl_offset;
}
static const struct of_device_id bcmasp_of_match[] = {
- { .compatible = "brcm,asp-v2.0", .data = &v20_plat_data },
{ .compatible = "brcm,asp-v2.1", .data = &v21_plat_data },
{ .compatible = "brcm,asp-v2.2", .data = &v22_plat_data },
+ { .compatible = "brcm,asp-v3.0", .data = &v30_plat_data },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, bcmasp_of_match);
static const struct of_device_id bcmasp_mdio_of_match[] = {
- { .compatible = "brcm,asp-v2.2-mdio", },
{ .compatible = "brcm,asp-v2.1-mdio", },
- { .compatible = "brcm,asp-v2.0-mdio", },
+ { .compatible = "brcm,asp-v2.2-mdio", },
+ { .compatible = "brcm,asp-v3.0-mdio", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, bcmasp_mdio_of_match);
@@ -1365,6 +1308,17 @@ static int bcmasp_probe(struct platform_device *pdev)
* how many interfaces come up.
*/
bcmasp_core_init(priv);
+
+ priv->mda_filters = devm_kcalloc(dev, priv->num_mda_filters,
+ sizeof(*priv->mda_filters), GFP_KERNEL);
+ if (!priv->mda_filters)
+ return -ENOMEM;
+
+ priv->net_filters = devm_kcalloc(dev, priv->num_net_filters,
+ sizeof(*priv->net_filters), GFP_KERNEL);
+ if (!priv->net_filters)
+ return -ENOMEM;
+
bcmasp_core_init_filters(priv);
ports_node = of_find_node_by_name(dev->of_node, "ethernet-ports");
@@ -1387,7 +1341,7 @@ static int bcmasp_probe(struct platform_device *pdev)
}
/* Check and enable WoL */
- priv->init_wol(priv);
+ bcmasp_init_wol(priv);
/* Drop the clock reference count now and let ndo_open()/ndo_close()
* manage it for us from now on.
@@ -1404,7 +1358,7 @@ static int bcmasp_probe(struct platform_device *pdev)
if (ret) {
netdev_err(intf->ndev,
"failed to register net_device: %d\n", ret);
- priv->destroy_wol(priv);
+ bcmasp_wol_irq_destroy(priv);
bcmasp_remove_intfs(priv);
goto of_put_exit;
}
@@ -1425,7 +1379,7 @@ static void bcmasp_remove(struct platform_device *pdev)
if (!priv)
return;
- priv->destroy_wol(priv);
+ bcmasp_wol_irq_destroy(priv);
bcmasp_remove_intfs(priv);
}
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
index f93cb3da44b0..74adfdb50e11 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.h
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
@@ -53,22 +53,15 @@
#define ASP_RX_CTRL_FB_0_FRAME_COUNT 0x14
#define ASP_RX_CTRL_FB_1_FRAME_COUNT 0x18
#define ASP_RX_CTRL_FB_8_FRAME_COUNT 0x1c
-/* asp2.1 diverges offsets here */
-/* ASP2.0 */
-#define ASP_RX_CTRL_FB_OUT_FRAME_COUNT 0x20
-#define ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT 0x24
-#define ASP_RX_CTRL_FLUSH 0x28
-#define ASP_CTRL_UMAC0_FLUSH_MASK (BIT(0) | BIT(12))
-#define ASP_CTRL_UMAC1_FLUSH_MASK (BIT(1) | BIT(13))
-#define ASP_CTRL_SPB_FLUSH_MASK (BIT(8) | BIT(20))
-#define ASP_RX_CTRL_FB_RX_FIFO_DEPTH 0x30
-/* ASP2.1 */
-#define ASP_RX_CTRL_FB_9_FRAME_COUNT_2_1 0x20
-#define ASP_RX_CTRL_FB_10_FRAME_COUNT_2_1 0x24
-#define ASP_RX_CTRL_FB_OUT_FRAME_COUNT_2_1 0x28
-#define ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT_2_1 0x2c
-#define ASP_RX_CTRL_FLUSH_2_1 0x30
-#define ASP_RX_CTRL_FB_RX_FIFO_DEPTH_2_1 0x38
+#define ASP_RX_CTRL_FB_9_FRAME_COUNT 0x20
+#define ASP_RX_CTRL_FB_10_FRAME_COUNT 0x24
+#define ASP_RX_CTRL_FB_OUT_FRAME_COUNT 0x28
+#define ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT 0x2c
+#define ASP_RX_CTRL_FLUSH 0x30
+#define ASP_CTRL_UMAC0_FLUSH_MASK (BIT(0) | BIT(12))
+#define ASP_CTRL_UMAC1_FLUSH_MASK (BIT(1) | BIT(13))
+#define ASP_CTRL_SPB_FLUSH_MASK (BIT(8) | BIT(20))
+#define ASP_RX_CTRL_FB_RX_FIFO_DEPTH 0x38
#define ASP_RX_FILTER_OFFSET 0x80000
#define ASP_RX_FILTER_BLK_CTRL 0x0
@@ -345,11 +338,6 @@ struct bcmasp_intf {
u32 wolopts;
u8 sopass[SOPASS_MAX];
- /* Used if per intf wol irq */
- int wol_irq;
- unsigned int wol_irq_enabled:1;
-
- struct ethtool_keee eee;
};
#define NUM_NET_FILTERS 32
@@ -372,21 +360,13 @@ struct bcmasp_mda_filter {
u8 mask[ETH_ALEN];
};
-struct bcmasp_hw_info {
- u32 rx_ctrl_flush;
- u32 umac2fb;
- u32 rx_ctrl_fb_out_frame_count;
- u32 rx_ctrl_fb_filt_out_frame_count;
- u32 rx_ctrl_fb_rx_fifo_depth;
-};
-
struct bcmasp_plat_data {
- void (*init_wol)(struct bcmasp_priv *priv);
- void (*enable_wol)(struct bcmasp_intf *intf, bool en);
- void (*destroy_wol)(struct bcmasp_priv *priv);
void (*core_clock_select)(struct bcmasp_priv *priv, bool slow);
void (*eee_fixup)(struct bcmasp_intf *priv, bool en);
- struct bcmasp_hw_info *hw_info;
+ unsigned int num_mda_filters;
+ unsigned int num_net_filters;
+ unsigned int tx_chan_offset;
+ unsigned int rx_ctrl_offset;
};
struct bcmasp_priv {
@@ -401,18 +381,18 @@ struct bcmasp_priv {
int wol_irq;
unsigned long wol_irq_enabled_mask;
- void (*init_wol)(struct bcmasp_priv *priv);
- void (*enable_wol)(struct bcmasp_intf *intf, bool en);
- void (*destroy_wol)(struct bcmasp_priv *priv);
void (*core_clock_select)(struct bcmasp_priv *priv, bool slow);
void (*eee_fixup)(struct bcmasp_intf *intf, bool en);
+ unsigned int num_mda_filters;
+ unsigned int num_net_filters;
+ unsigned int tx_chan_offset;
+ unsigned int rx_ctrl_offset;
void __iomem *base;
- struct bcmasp_hw_info *hw_info;
struct list_head intfs;
- struct bcmasp_mda_filter mda_filters[NUM_MDA_FILTERS];
+ struct bcmasp_mda_filter *mda_filters;
/* MAC destination address filters lock */
spinlock_t mda_lock;
@@ -420,7 +400,7 @@ struct bcmasp_priv {
/* Protects accesses to ASP_CTRL_CLOCK_CTRL */
spinlock_t clk_lock;
- struct bcmasp_net_filter net_filters[NUM_NET_FILTERS];
+ struct bcmasp_net_filter *net_filters;
/* Network filter lock */
struct mutex net_lock;
@@ -510,8 +490,8 @@ BCMASP_FP_IO_MACRO_Q(rx_edpkt_cfg);
#define PKT_OFFLOAD_EPKT_IP(x) ((x) << 21)
#define PKT_OFFLOAD_EPKT_TP(x) ((x) << 19)
#define PKT_OFFLOAD_EPKT_LEN(x) ((x) << 16)
-#define PKT_OFFLOAD_EPKT_CSUM_L3 BIT(15)
-#define PKT_OFFLOAD_EPKT_CSUM_L2 BIT(14)
+#define PKT_OFFLOAD_EPKT_CSUM_L4 BIT(15)
+#define PKT_OFFLOAD_EPKT_CSUM_L3 BIT(14)
#define PKT_OFFLOAD_EPKT_ID(x) ((x) << 12)
#define PKT_OFFLOAD_EPKT_SEQ(x) ((x) << 10)
#define PKT_OFFLOAD_EPKT_TS(x) ((x) << 8)
@@ -543,12 +523,27 @@ BCMASP_CORE_IO_MACRO(intr2, ASP_INTR2_OFFSET);
BCMASP_CORE_IO_MACRO(wakeup_intr2, ASP_WAKEUP_INTR2_OFFSET);
BCMASP_CORE_IO_MACRO(tx_analytics, ASP_TX_ANALYTICS_OFFSET);
BCMASP_CORE_IO_MACRO(rx_analytics, ASP_RX_ANALYTICS_OFFSET);
-BCMASP_CORE_IO_MACRO(rx_ctrl, ASP_RX_CTRL_OFFSET);
BCMASP_CORE_IO_MACRO(rx_filter, ASP_RX_FILTER_OFFSET);
BCMASP_CORE_IO_MACRO(rx_edpkt, ASP_EDPKT_OFFSET);
BCMASP_CORE_IO_MACRO(ctrl, ASP_CTRL_OFFSET);
BCMASP_CORE_IO_MACRO(ctrl2, ASP_CTRL2_OFFSET);
+#define BCMASP_CORE_IO_MACRO_OFFSET(name, offset) \
+static inline u32 name##_core_rl(struct bcmasp_priv *priv, \
+ u32 off) \
+{ \
+ u32 reg = readl_relaxed(priv->base + priv->name##_offset + \
+ (offset) + off); \
+ return reg; \
+} \
+static inline void name##_core_wl(struct bcmasp_priv *priv, \
+ u32 val, u32 off) \
+{ \
+ writel_relaxed(val, priv->base + priv->name##_offset + \
+ (offset) + off); \
+}
+BCMASP_CORE_IO_MACRO_OFFSET(rx_ctrl, ASP_RX_CTRL_OFFSET);
+
struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
struct device_node *ndev_dn, int i);
@@ -601,5 +596,5 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
void bcmasp_netfilt_suspend(struct bcmasp_intf *intf);
-void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable);
+void bcmasp_enable_wol(struct bcmasp_intf *intf, bool en);
#endif
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
index 67928b5d8a26..dd80ccfca19d 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
@@ -10,7 +10,6 @@
#include "bcmasp_intf_defs.h"
enum bcmasp_stat_type {
- BCMASP_STAT_RX_EDPKT,
BCMASP_STAT_RX_CTRL,
BCMASP_STAT_RX_CTRL_PER_INTF,
BCMASP_STAT_SOFT,
@@ -33,8 +32,6 @@ struct bcmasp_stats {
.reg_offset = offset, \
}
-#define STAT_BCMASP_RX_EDPKT(str, offset) \
- STAT_BCMASP_OFFSET(str, BCMASP_STAT_RX_EDPKT, offset)
#define STAT_BCMASP_RX_CTRL(str, offset) \
STAT_BCMASP_OFFSET(str, BCMASP_STAT_RX_CTRL, offset)
#define STAT_BCMASP_RX_CTRL_PER_INTF(str, offset) \
@@ -42,11 +39,6 @@ struct bcmasp_stats {
/* Must match the order of struct bcmasp_mib_counters */
static const struct bcmasp_stats bcmasp_gstrings_stats[] = {
- /* EDPKT counters */
- STAT_BCMASP_RX_EDPKT("RX Time Stamp", ASP_EDPKT_RX_TS_COUNTER),
- STAT_BCMASP_RX_EDPKT("RX PKT Count", ASP_EDPKT_RX_PKT_CNT),
- STAT_BCMASP_RX_EDPKT("RX PKT Buffered", ASP_EDPKT_HDR_EXTR_CNT),
- STAT_BCMASP_RX_EDPKT("RX PKT Pushed to DRAM", ASP_EDPKT_HDR_OUT_CNT),
/* ASP RX control */
STAT_BCMASP_RX_CTRL_PER_INTF("Frames From Unimac",
ASP_RX_CTRL_UMAC_0_FRAME_COUNT),
@@ -71,23 +63,6 @@ static const struct bcmasp_stats bcmasp_gstrings_stats[] = {
#define BCMASP_STATS_LEN ARRAY_SIZE(bcmasp_gstrings_stats)
-static u16 bcmasp_stat_fixup_offset(struct bcmasp_intf *intf,
- const struct bcmasp_stats *s)
-{
- struct bcmasp_priv *priv = intf->parent;
-
- if (!strcmp("Frames Out(Buffer)", s->stat_string))
- return priv->hw_info->rx_ctrl_fb_out_frame_count;
-
- if (!strcmp("Frames Out(Filters)", s->stat_string))
- return priv->hw_info->rx_ctrl_fb_filt_out_frame_count;
-
- if (!strcmp("RX Buffer FIFO Depth", s->stat_string))
- return priv->hw_info->rx_ctrl_fb_rx_fifo_depth;
-
- return s->reg_offset;
-}
-
static int bcmasp_get_sset_count(struct net_device *dev, int string_set)
{
switch (string_set) {
@@ -101,14 +76,14 @@ static int bcmasp_get_sset_count(struct net_device *dev, int string_set)
static void bcmasp_get_strings(struct net_device *dev, u32 stringset,
u8 *data)
{
+ const char *str;
unsigned int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < BCMASP_STATS_LEN; i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- bcmasp_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ str = bcmasp_gstrings_stats[i].stat_string;
+ ethtool_puts(&data, str);
}
break;
default:
@@ -126,13 +101,10 @@ static void bcmasp_update_mib_counters(struct bcmasp_intf *intf)
char *p;
s = &bcmasp_gstrings_stats[i];
- offset = bcmasp_stat_fixup_offset(intf, s);
+ offset = s->reg_offset;
switch (s->type) {
case BCMASP_STAT_SOFT:
continue;
- case BCMASP_STAT_RX_EDPKT:
- val = rx_edpkt_core_rl(intf->parent, offset);
- break;
case BCMASP_STAT_RX_CTRL:
val = rx_ctrl_core_rl(intf->parent, offset);
break;
@@ -191,11 +163,30 @@ static void bcmasp_set_msglevel(struct net_device *dev, u32 level)
static void bcmasp_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bcmasp_intf *intf = netdev_priv(dev);
+ struct bcmasp_priv *priv = intf->parent;
+ struct device *kdev = &priv->pdev->dev;
+ u32 phy_wolopts = 0;
+
+ if (dev->phydev) {
+ phy_ethtool_get_wol(dev->phydev, wol);
+ phy_wolopts = wol->wolopts;
+ }
+
+ /* MAC is not wake-up capable, return what the PHY does */
+ if (!device_can_wakeup(kdev))
+ return;
+
+ /* Overlay MAC capabilities with that of the PHY queried before */
+ wol->supported |= BCMASP_SUPPORTED_WAKE;
+ wol->wolopts |= intf->wolopts;
+
+ /* Return the PHY configured magic password */
+ if (phy_wolopts & WAKE_MAGICSECURE)
+ return;
- wol->supported = BCMASP_SUPPORTED_WAKE;
- wol->wolopts = intf->wolopts;
memset(wol->sopass, 0, sizeof(wol->sopass));
+ /* Otherwise the MAC one */
if (wol->wolopts & WAKE_MAGICSECURE)
memcpy(wol->sopass, intf->sopass, sizeof(intf->sopass));
}
@@ -205,17 +196,28 @@ static int bcmasp_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
struct bcmasp_intf *intf = netdev_priv(dev);
struct bcmasp_priv *priv = intf->parent;
struct device *kdev = &priv->pdev->dev;
+ int ret = 0;
+
+ /* Try Wake-on-LAN from the PHY first */
+ if (dev->phydev) {
+ ret = phy_ethtool_set_wol(dev->phydev, wol);
+ if (ret != -EOPNOTSUPP && wol->wolopts)
+ return ret;
+ }
if (!device_can_wakeup(kdev))
return -EOPNOTSUPP;
+ if (wol->wolopts & ~BCMASP_SUPPORTED_WAKE)
+ return -EINVAL;
+
/* Interface Specific */
intf->wolopts = wol->wolopts;
if (intf->wolopts & WAKE_MAGICSECURE)
memcpy(intf->sopass, wol->sopass, sizeof(wol->sopass));
mutex_lock(&priv->wol_lock);
- priv->enable_wol(intf, !!intf->wolopts);
+ bcmasp_enable_wol(intf, !!intf->wolopts);
mutex_unlock(&priv->wol_lock);
return 0;
@@ -289,7 +291,7 @@ static int bcmasp_flow_get(struct bcmasp_intf *intf, struct ethtool_rxnfc *cmd)
memcpy(&cmd->fs, &nfilter->fs, sizeof(nfilter->fs));
- cmd->data = NUM_NET_FILTERS;
+ cmd->data = intf->parent->num_net_filters;
return 0;
}
@@ -336,7 +338,7 @@ static int bcmasp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
break;
case ETHTOOL_GRXCLSRLALL:
err = bcmasp_netfilt_get_all_active(intf, rule_locs, &cmd->rule_cnt);
- cmd->data = NUM_NET_FILTERS;
+ cmd->data = intf->parent->num_net_filters;
break;
default:
err = -EOPNOTSUPP;
@@ -348,58 +350,19 @@ static int bcmasp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return err;
}
-void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable)
-{
- u32 reg;
-
- reg = umac_rl(intf, UMC_EEE_CTRL);
- if (enable)
- reg |= EEE_EN;
- else
- reg &= ~EEE_EN;
- umac_wl(intf, reg, UMC_EEE_CTRL);
-
- intf->eee.eee_enabled = enable;
-}
-
static int bcmasp_get_eee(struct net_device *dev, struct ethtool_keee *e)
{
- struct bcmasp_intf *intf = netdev_priv(dev);
- struct ethtool_keee *p = &intf->eee;
-
if (!dev->phydev)
return -ENODEV;
- e->tx_lpi_enabled = p->tx_lpi_enabled;
- e->tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER);
-
return phy_ethtool_get_eee(dev->phydev, e);
}
static int bcmasp_set_eee(struct net_device *dev, struct ethtool_keee *e)
{
- struct bcmasp_intf *intf = netdev_priv(dev);
- struct ethtool_keee *p = &intf->eee;
- int ret;
-
if (!dev->phydev)
return -ENODEV;
- if (!p->eee_enabled) {
- bcmasp_eee_enable_set(intf, false);
- } else {
- ret = phy_init_eee(dev->phydev, 0);
- if (ret) {
- netif_err(intf, hw, dev,
- "EEE initialization failed: %d\n", ret);
- return ret;
- }
-
- umac_wl(intf, e->tx_lpi_timer, UMC_EEE_LPI_TIMER);
- intf->eee.tx_lpi_enabled = e->tx_lpi_enabled;
- bcmasp_eee_enable_set(intf, true);
- }
-
return phy_ethtool_set_eee(dev->phydev, e);
}
@@ -497,4 +460,5 @@ const struct ethtool_ops bcmasp_ethtool_ops = {
.get_ethtool_stats = bcmasp_get_ethtool_stats,
.get_sset_count = bcmasp_get_sset_count,
.get_ts_info = ethtool_op_get_ts_info,
+ .nway_reset = phy_ethtool_nway_reset,
};
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index cfd50efbdbc0..b9973956c480 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -180,14 +180,14 @@ static struct sk_buff *bcmasp_csum_offload(struct net_device *dev,
case htons(ETH_P_IP):
header |= PKT_OFFLOAD_HDR_SIZE_2((ip_hdrlen(skb) >> 8) & 0xf);
header2 |= PKT_OFFLOAD_HDR2_SIZE_2(ip_hdrlen(skb) & 0xff);
- epkt |= PKT_OFFLOAD_EPKT_IP(0) | PKT_OFFLOAD_EPKT_CSUM_L2;
+ epkt |= PKT_OFFLOAD_EPKT_IP(0);
ip_proto = ip_hdr(skb)->protocol;
header_cnt += 2;
break;
case htons(ETH_P_IPV6):
header |= PKT_OFFLOAD_HDR_SIZE_2((IP6_HLEN >> 8) & 0xf);
header2 |= PKT_OFFLOAD_HDR2_SIZE_2(IP6_HLEN & 0xff);
- epkt |= PKT_OFFLOAD_EPKT_IP(1) | PKT_OFFLOAD_EPKT_CSUM_L2;
+ epkt |= PKT_OFFLOAD_EPKT_IP(1);
ip_proto = ipv6_hdr(skb)->nexthdr;
header_cnt += 2;
break;
@@ -198,12 +198,12 @@ static struct sk_buff *bcmasp_csum_offload(struct net_device *dev,
switch (ip_proto) {
case IPPROTO_TCP:
header2 |= PKT_OFFLOAD_HDR2_SIZE_3(tcp_hdrlen(skb));
- epkt |= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L3;
+ epkt |= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L4;
header_cnt++;
break;
case IPPROTO_UDP:
header2 |= PKT_OFFLOAD_HDR2_SIZE_3(UDP_HLEN);
- epkt |= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L3;
+ epkt |= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L4;
header_cnt++;
break;
default:
@@ -605,10 +605,8 @@ next:
bcmasp_intf_rx_desc_write(intf, intf->rx_edpkt_dma_read);
- if (processed < budget) {
- napi_complete_done(&intf->rx_napi, processed);
+ if (processed < budget && napi_complete_done(&intf->rx_napi, processed))
bcmasp_enable_rx_irq(intf, 1);
- }
return processed;
}
@@ -619,7 +617,6 @@ static void bcmasp_adj_link(struct net_device *dev)
struct phy_device *phydev = dev->phydev;
u32 cmd_bits = 0, reg;
int changed = 0;
- bool active;
if (intf->old_link != phydev->link) {
changed = 1;
@@ -677,8 +674,13 @@ static void bcmasp_adj_link(struct net_device *dev)
}
umac_wl(intf, reg, UMC_CMD);
- active = phy_init_eee(phydev, 0) >= 0;
- bcmasp_eee_enable_set(intf, active);
+ umac_wl(intf, phydev->eee_cfg.tx_lpi_timer, UMC_EEE_LPI_TIMER);
+ reg = umac_rl(intf, UMC_EEE_CTRL);
+ if (phydev->enable_tx_lpi)
+ reg |= EEE_EN;
+ else
+ reg &= ~EEE_EN;
+ umac_wl(intf, reg, UMC_EEE_CTRL);
}
reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
@@ -814,9 +816,10 @@ static void bcmasp_init_tx(struct bcmasp_intf *intf)
/* Tx SPB */
tx_spb_ctrl_wl(intf, ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT),
TX_SPB_CTRL_XF_CTRL2);
- tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR);
+
+ if (intf->parent->tx_chan_offset)
+ tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR);
tx_spb_top_wl(intf, 0x1e, TX_SPB_TOP_BLKOUT);
- tx_spb_top_wl(intf, 0x0, TX_SPB_TOP_SPRE_BW_CTRL);
tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_READ);
tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE);
@@ -1055,6 +1058,9 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
/* Indicate that the MAC is responsible for PHY PM */
phydev->mac_managed_pm = true;
+
+ /* Set phylib's copy of the LPI timer */
+ phydev->eee_cfg.tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER);
}
umac_reset(intf);
@@ -1178,7 +1184,7 @@ static void bcmasp_map_res(struct bcmasp_priv *priv, struct bcmasp_intf *intf)
{
/* Per port */
intf->res.umac = priv->base + UMC_OFFSET(intf);
- intf->res.umac2fb = priv->base + (priv->hw_info->umac2fb +
+ intf->res.umac2fb = priv->base + (UMAC2FB_OFFSET + priv->rx_ctrl_offset +
(intf->port * 0x4));
intf->res.rgmii = priv->base + RGMII_OFFSET(intf);
@@ -1193,7 +1199,6 @@ static void bcmasp_map_res(struct bcmasp_priv *priv, struct bcmasp_intf *intf)
intf->rx_edpkt_cfg = priv->base + RX_EDPKT_CFG_OFFSET(intf);
}
-#define MAX_IRQ_STR_LEN 64
struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
struct device_node *ndev_dn, int i)
{
@@ -1277,6 +1282,8 @@ struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
ndev->hw_features |= ndev->features;
ndev->needed_headroom += sizeof(struct bcmasp_pkt_offload);
+ netdev_sw_irq_coalesce_default_on(ndev);
+
return intf;
err_free_netdev:
@@ -1331,7 +1338,8 @@ static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
ASP_WAKEUP_INTR2_MASK_CLEAR);
}
- if (intf->eee.eee_enabled && intf->parent->eee_fixup)
+ if (ndev->phydev && ndev->phydev->eee_cfg.eee_enabled &&
+ intf->parent->eee_fixup)
intf->parent->eee_fixup(intf, true);
netif_dbg(intf, wol, ndev, "entered WOL mode\n");
@@ -1373,7 +1381,8 @@ static void bcmasp_resume_from_wol(struct bcmasp_intf *intf)
{
u32 reg;
- if (intf->eee.eee_enabled && intf->parent->eee_fixup)
+ if (intf->ndev->phydev && intf->ndev->phydev->eee_cfg.eee_enabled &&
+ intf->parent->eee_fixup)
intf->parent->eee_fixup(intf, false);
reg = umac_rl(intf, UMC_MPD_CTRL);
@@ -1404,9 +1413,6 @@ int bcmasp_interface_resume(struct bcmasp_intf *intf)
bcmasp_resume_from_wol(intf);
- if (intf->eee.eee_enabled)
- bcmasp_eee_enable_set(intf, true);
-
netif_device_attach(dev);
return 0;
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h
index ad742612895f..af7418348e81 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h
@@ -118,8 +118,7 @@
#define UMC_PSW_MS 0x624
#define UMC_PSW_LS 0x628
-#define UMAC2FB_OFFSET_2_1 0x9f044
-#define UMAC2FB_OFFSET 0x9f03c
+#define UMAC2FB_OFFSET 0x9f044
#define UMAC2FB_CFG 0x0
#define UMAC2FB_CFG_OPUT_EN BIT(0)
#define UMAC2FB_CFG_VLAN_EN BIT(1)
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index e5809ad5eb82..888f28f11406 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -31,6 +31,7 @@
#include <linux/ssb/ssb.h>
#include <linux/slab.h>
#include <linux/phy.h>
+#include <linux/phy_fixed.h>
#include <linux/uaccess.h>
#include <asm/io.h>
@@ -575,7 +576,7 @@ static void b44_check_phy(struct b44 *bp)
static void b44_timer(struct timer_list *t)
{
- struct b44 *bp = from_timer(bp, t, timer);
+ struct b44 *bp = timer_container_of(bp, t, timer);
spin_lock_irq(&bp->lock);
@@ -1628,7 +1629,7 @@ static int b44_close(struct net_device *dev)
napi_disable(&bp->napi);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
spin_lock_irq(&bp->lock);
@@ -2233,7 +2234,6 @@ static int b44_register_phy_one(struct b44 *bp)
struct mii_bus *mii_bus;
struct ssb_device *sdev = bp->sdev;
struct phy_device *phydev;
- char bus_id[MII_BUS_ID_SIZE + 3];
struct ssb_sprom *sprom = &sdev->bus->sprom;
int err;
@@ -2260,27 +2260,26 @@ static int b44_register_phy_one(struct b44 *bp)
goto err_out_mdiobus;
}
- if (!mdiobus_is_registered_device(bp->mii_bus, bp->phy_addr) &&
- (sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM))) {
-
+ phydev = mdiobus_get_phy(bp->mii_bus, bp->phy_addr);
+ if (!phydev &&
+ sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM)) {
dev_info(sdev->dev,
"could not find PHY at %i, use fixed one\n",
bp->phy_addr);
- bp->phy_addr = 0;
- snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, "fixed-0",
- bp->phy_addr);
- } else {
- snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
- bp->phy_addr);
+ phydev = fixed_phy_register_100fd();
+ if (!IS_ERR(phydev))
+ bp->phy_addr = phydev->mdio.addr;
}
- phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link,
- PHY_INTERFACE_MODE_MII);
- if (IS_ERR(phydev)) {
+ if (IS_ERR_OR_NULL(phydev))
+ err = -ENODEV;
+ else
+ err = phy_connect_direct(bp->dev, phydev, &b44_adjust_link,
+ PHY_INTERFACE_MODE_MII);
+ if (err) {
dev_err(sdev->dev, "could not attach PHY at %i\n",
bp->phy_addr);
- err = PTR_ERR(phydev);
goto err_out_mdiobus_unregister;
}
@@ -2293,7 +2292,6 @@ static int b44_register_phy_one(struct b44 *bp)
linkmode_copy(phydev->advertising, phydev->supported);
bp->old_link = 0;
- bp->phy_addr = phydev->mdio.addr;
phy_attached_info(phydev);
@@ -2311,10 +2309,15 @@ err_out:
static void b44_unregister_phy_one(struct b44 *bp)
{
- struct net_device *dev = bp->dev;
struct mii_bus *mii_bus = bp->mii_bus;
+ struct net_device *dev = bp->dev;
+ struct phy_device *phydev;
+
+ phydev = dev->phydev;
- phy_disconnect(dev->phydev);
+ phy_disconnect(phydev);
+ if (phy_is_pseudo_fixed_link(phydev))
+ fixed_phy_unregister(phydev);
mdiobus_unregister(mii_bus);
mdiobus_free(mii_bus);
}
@@ -2473,7 +2476,7 @@ static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
if (!netif_running(dev))
return 0;
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
spin_lock_irq(&bp->lock);
@@ -2570,7 +2573,7 @@ static int __init b44_init(void)
unsigned int dma_desc_align_size = dma_get_cache_alignment();
int err;
- /* Setup paramaters for syncing RX/TX DMA descriptors */
+ /* Setup parameters for syncing RX/TX DMA descriptors */
dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
err = b44_pci_init();
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index e5e03aaa49f9..92204fea1f08 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -286,7 +286,7 @@ static int bcm_enet_refill_rx(struct net_device *dev, bool napi_mode)
*/
static void bcm_enet_refill_rx_timer(struct timer_list *t)
{
- struct bcm_enet_priv *priv = from_timer(priv, t, rx_timeout);
+ struct bcm_enet_priv *priv = timer_container_of(priv, t, rx_timeout);
struct net_device *dev = priv->net_dev;
spin_lock(&priv->rx_lock);
@@ -1195,7 +1195,7 @@ static int bcm_enet_stop(struct net_device *dev)
napi_disable(&priv->napi);
if (priv->has_phy)
phy_stop(dev->phydev);
- del_timer_sync(&priv->rx_timeout);
+ timer_delete_sync(&priv->rx_timeout);
/* mask all interrupts */
enet_writel(priv, 0, ENET_IRMASK_REG);
@@ -1339,14 +1339,14 @@ static int bcm_enet_get_sset_count(struct net_device *netdev,
static void bcm_enet_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
+ const char *str;
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- bcm_enet_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ str = bcm_enet_gstrings_stats[i].stat_string;
+ ethtool_puts(&data, str);
}
break;
}
@@ -2001,7 +2001,7 @@ static inline int bcm_enet_port_is_rgmii(int portid)
*/
static void swphy_poll_timer(struct timer_list *t)
{
- struct bcm_enet_priv *priv = from_timer(priv, t, swphy_poll);
+ struct bcm_enet_priv *priv = timer_container_of(priv, t, swphy_poll);
unsigned int i;
for (i = 0; i < priv->num_ports; i++) {
@@ -2346,10 +2346,10 @@ static int bcm_enetsw_stop(struct net_device *dev)
priv = netdev_priv(dev);
kdev = &priv->pdev->dev;
- del_timer_sync(&priv->swphy_poll);
+ timer_delete_sync(&priv->swphy_poll);
netif_stop_queue(dev);
napi_disable(&priv->napi);
- del_timer_sync(&priv->rx_timeout);
+ timer_delete_sync(&priv->rx_timeout);
/* mask all interrupts */
enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
@@ -2503,14 +2503,14 @@ static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = {
static void bcm_enetsw_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
+ const char *str;
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- bcm_enetsw_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ str = bcm_enetsw_gstrings_stats[i].stat_string;
+ ethtool_puts(&data, str);
}
break;
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 031e9e0cca53..bc4e1f3b3752 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -346,32 +346,22 @@ static void bcm_sysport_get_strings(struct net_device *dev,
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
const struct bcm_sysport_stats *s;
- char buf[128];
- int i, j;
+ int i;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
s = &bcm_sysport_gstrings_stats[i];
if (priv->is_lite &&
!bcm_sysport_lite_stat_valid(s->type))
continue;
- memcpy(data + j * ETH_GSTRING_LEN, s->stat_string,
- ETH_GSTRING_LEN);
- j++;
+ ethtool_puts(&data, s->stat_string);
}
for (i = 0; i < dev->num_tx_queues; i++) {
- snprintf(buf, sizeof(buf), "txq%d_packets", i);
- memcpy(data + j * ETH_GSTRING_LEN, buf,
- ETH_GSTRING_LEN);
- j++;
-
- snprintf(buf, sizeof(buf), "txq%d_bytes", i);
- memcpy(data + j * ETH_GSTRING_LEN, buf,
- ETH_GSTRING_LEN);
- j++;
+ ethtool_sprintf(&data, "txq%d_packets", i);
+ ethtool_sprintf(&data, "txq%d_bytes", i);
}
break;
default:
@@ -1943,7 +1933,11 @@ static int bcm_sysport_open(struct net_device *dev)
unsigned int i;
int ret;
- clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ netdev_err(dev, "could not enable priv clock\n");
+ return ret;
+ }
/* Reset UniMAC */
umac_reset(priv);
@@ -2601,7 +2595,11 @@ static int bcm_sysport_probe(struct platform_device *pdev)
goto err_deregister_notifier;
}
- clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "could not enable priv clock\n");
+ goto err_deregister_netdev;
+ }
priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
dev_info(&pdev->dev,
@@ -2615,6 +2613,8 @@ static int bcm_sysport_probe(struct platform_device *pdev)
return 0;
+err_deregister_netdev:
+ unregister_netdev(dev);
err_deregister_notifier:
unregister_netdevice_notifier(&priv->netdev_notifier);
err_deregister_fixed_link:
@@ -2784,7 +2784,12 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
if (!netif_running(dev))
return 0;
- clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ netdev_err(dev, "could not enable priv clock\n");
+ return ret;
+ }
+
if (priv->wolopts)
clk_disable_unprepare(priv->wol_clk);
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index ecce23cecbea..4e266ce41180 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -171,6 +171,7 @@ static int platform_phy_connect(struct bgmac *bgmac)
static int bgmac_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ struct device_node *phy_node;
struct bgmac *bgmac;
struct resource *regs;
int ret;
@@ -236,7 +237,9 @@ static int bgmac_probe(struct platform_device *pdev)
bgmac->cco_ctl_maskset = platform_bgmac_cco_ctl_maskset;
bgmac->get_bus_clock = platform_bgmac_get_bus_clock;
bgmac->cmn_maskset32 = platform_bgmac_cmn_maskset32;
- if (of_parse_phandle(np, "phy-handle", 0)) {
+ phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (phy_node) {
+ of_node_put(phy_node);
bgmac->phy_connect = platform_phy_connect;
} else {
bgmac->phy_connect = bgmac_phy_connect_direct;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 6ffdc4229407..3e9c57196a39 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1367,8 +1367,7 @@ static void bgmac_get_strings(struct net_device *dev, u32 stringset,
return;
for (i = 0; i < BGMAC_STATS_LEN; i++)
- strscpy(data + i * ETH_GSTRING_LEN,
- bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN);
+ ethtool_puts(&data, bgmac_get_strings_stats[i].name);
}
static void bgmac_get_ethtool_stats(struct net_device *dev,
@@ -1447,7 +1446,7 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac)
struct phy_device *phy_dev;
int err;
- phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+ phy_dev = fixed_phy_register(&fphy_status, NULL);
if (IS_ERR(phy_dev)) {
dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
return PTR_ERR(phy_dev);
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index d73ef262991d..6fee9a41839c 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -328,8 +328,7 @@
#define BGMAC_RX_FRAME_OFFSET 30 /* There are 2 unused bytes between header and real data */
#define BGMAC_RX_BUF_OFFSET (NET_SKB_PAD + NET_IP_ALIGN - \
BGMAC_RX_FRAME_OFFSET)
-/* Jumbo frame size with FCS */
-#define BGMAC_RX_MAX_FRAME_SIZE 9724
+#define BGMAC_RX_MAX_FRAME_SIZE 1536
#define BGMAC_RX_BUF_SIZE (BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE)
#define BGMAC_RX_ALLOC_SIZE (SKB_DATA_ALIGN(BGMAC_RX_BUF_SIZE + BGMAC_RX_BUF_OFFSET) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
diff --git a/drivers/net/ethernet/broadcom/bnge/Makefile b/drivers/net/ethernet/broadcom/bnge/Makefile
new file mode 100644
index 000000000000..ea6596854e5c
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_BNGE) += bng_en.o
+
+bng_en-y := bnge_core.o \
+ bnge_devlink.o \
+ bnge_hwrm.o \
+ bnge_hwrm_lib.o \
+ bnge_rmem.o \
+ bnge_resc.o \
+ bnge_netdev.o \
+ bnge_ethtool.o \
+ bnge_auxr.o
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge.h b/drivers/net/ethernet/broadcom/bnge/bnge.h
new file mode 100644
index 000000000000..411744894349
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge.h
@@ -0,0 +1,255 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_H_
+#define _BNGE_H_
+
+#define DRV_NAME "bng_en"
+#define DRV_SUMMARY "Broadcom 800G Ethernet Linux Driver"
+
+#include <linux/etherdevice.h>
+#include <linux/bnxt/hsi.h>
+#include "bnge_rmem.h"
+#include "bnge_resc.h"
+#include "bnge_auxr.h"
+
+#define DRV_VER_MAJ 1
+#define DRV_VER_MIN 15
+#define DRV_VER_UPD 1
+
+extern char bnge_driver_name[];
+
+enum board_idx {
+ BCM57708,
+};
+
+struct bnge_auxr_priv {
+ struct auxiliary_device aux_dev;
+ struct bnge_auxr_dev *auxr_dev;
+ int id;
+};
+
+struct bnge_pf_info {
+ u16 fw_fid;
+ u16 port_id;
+ u8 mac_addr[ETH_ALEN];
+};
+
+#define INVALID_HW_RING_ID ((u16)-1)
+
+enum {
+ BNGE_FW_CAP_SHORT_CMD = BIT_ULL(0),
+ BNGE_FW_CAP_LLDP_AGENT = BIT_ULL(1),
+ BNGE_FW_CAP_DCBX_AGENT = BIT_ULL(2),
+ BNGE_FW_CAP_IF_CHANGE = BIT_ULL(3),
+ BNGE_FW_CAP_KONG_MB_CHNL = BIT_ULL(4),
+ BNGE_FW_CAP_ERROR_RECOVERY = BIT_ULL(5),
+ BNGE_FW_CAP_PKG_VER = BIT_ULL(6),
+ BNGE_FW_CAP_CFA_ADV_FLOW = BIT_ULL(7),
+ BNGE_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 = BIT_ULL(8),
+ BNGE_FW_CAP_PCIE_STATS_SUPPORTED = BIT_ULL(9),
+ BNGE_FW_CAP_EXT_STATS_SUPPORTED = BIT_ULL(10),
+ BNGE_FW_CAP_ERR_RECOVER_RELOAD = BIT_ULL(11),
+ BNGE_FW_CAP_HOT_RESET = BIT_ULL(12),
+ BNGE_FW_CAP_RX_ALL_PKT_TS = BIT_ULL(13),
+ BNGE_FW_CAP_VLAN_RX_STRIP = BIT_ULL(14),
+ BNGE_FW_CAP_VLAN_TX_INSERT = BIT_ULL(15),
+ BNGE_FW_CAP_EXT_HW_STATS_SUPPORTED = BIT_ULL(16),
+ BNGE_FW_CAP_LIVEPATCH = BIT_ULL(17),
+ BNGE_FW_CAP_HOT_RESET_IF = BIT_ULL(18),
+ BNGE_FW_CAP_RING_MONITOR = BIT_ULL(19),
+ BNGE_FW_CAP_DBG_QCAPS = BIT_ULL(20),
+ BNGE_FW_CAP_THRESHOLD_TEMP_SUPPORTED = BIT_ULL(21),
+ BNGE_FW_CAP_DFLT_VLAN_TPID_PCP = BIT_ULL(22),
+ BNGE_FW_CAP_VNIC_TUNNEL_TPA = BIT_ULL(23),
+ BNGE_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO = BIT_ULL(24),
+ BNGE_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 = BIT_ULL(25),
+ BNGE_FW_CAP_VNIC_RE_FLUSH = BIT_ULL(26),
+};
+
+enum {
+ BNGE_EN_ROCE_V1 = BIT_ULL(0),
+ BNGE_EN_ROCE_V2 = BIT_ULL(1),
+ BNGE_EN_STRIP_VLAN = BIT_ULL(2),
+ BNGE_EN_SHARED_CHNL = BIT_ULL(3),
+ BNGE_EN_UDP_GSO_SUPP = BIT_ULL(4),
+};
+
+#define BNGE_EN_ROCE (BNGE_EN_ROCE_V1 | BNGE_EN_ROCE_V2)
+
+enum {
+ BNGE_RSS_CAP_RSS_HASH_TYPE_DELTA = BIT(0),
+ BNGE_RSS_CAP_UDP_RSS_CAP = BIT(1),
+ BNGE_RSS_CAP_NEW_RSS_CAP = BIT(2),
+ BNGE_RSS_CAP_RSS_TCAM = BIT(3),
+ BNGE_RSS_CAP_AH_V4_RSS_CAP = BIT(4),
+ BNGE_RSS_CAP_AH_V6_RSS_CAP = BIT(5),
+ BNGE_RSS_CAP_ESP_V4_RSS_CAP = BIT(6),
+ BNGE_RSS_CAP_ESP_V6_RSS_CAP = BIT(7),
+};
+
+#define BNGE_MAX_QUEUE 8
+struct bnge_queue_info {
+ u8 queue_id;
+ u8 queue_profile;
+};
+
+struct bnge_dev {
+ struct device *dev;
+ struct pci_dev *pdev;
+ struct net_device *netdev;
+ u64 dsn;
+#define BNGE_VPD_FLD_LEN 32
+ char board_partno[BNGE_VPD_FLD_LEN];
+ char board_serialno[BNGE_VPD_FLD_LEN];
+
+ void __iomem *bar0;
+ void __iomem *bar1;
+
+ u16 chip_num;
+ u8 chip_rev;
+
+#if BITS_PER_LONG == 32
+ /* ensure atomic 64-bit doorbell writes on 32-bit systems. */
+ spinlock_t db_lock;
+#endif
+ int db_offset; /* db_offset within db_size */
+ int db_size;
+
+ /* HWRM members */
+ u16 hwrm_cmd_seq;
+ u16 hwrm_cmd_kong_seq;
+ struct dma_pool *hwrm_dma_pool;
+ struct hlist_head hwrm_pending_list;
+ u16 hwrm_max_req_len;
+ u16 hwrm_max_ext_req_len;
+ unsigned int hwrm_cmd_timeout;
+ unsigned int hwrm_cmd_max_timeout;
+ struct mutex hwrm_cmd_lock; /* serialize hwrm messages */
+
+ struct hwrm_ver_get_output ver_resp;
+#define FW_VER_STR_LEN 32
+ char fw_ver_str[FW_VER_STR_LEN];
+ char hwrm_ver_supp[FW_VER_STR_LEN];
+ char nvm_cfg_ver[FW_VER_STR_LEN];
+ u64 fw_ver_code;
+#define BNGE_FW_VER_CODE(maj, min, bld, rsv) \
+ ((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv))
+
+ struct bnge_pf_info pf;
+
+ unsigned long state;
+#define BNGE_STATE_DRV_REGISTERED 0
+#define BNGE_STATE_OPEN 1
+
+ u64 fw_cap;
+
+ /* Backing stores */
+ struct bnge_ctx_mem_info *ctx;
+
+ u64 flags;
+
+ struct bnge_hw_resc hw_resc;
+
+ u16 tso_max_segs;
+
+ int max_fltr;
+#define BNGE_L2_FLTR_MAX_FLTR 1024
+
+ u32 *rss_indir_tbl;
+#define BNGE_RSS_TABLE_ENTRIES 64
+#define BNGE_RSS_TABLE_SIZE (BNGE_RSS_TABLE_ENTRIES * 4)
+#define BNGE_RSS_TABLE_MAX_TBL 8
+#define BNGE_MAX_RSS_TABLE_SIZE \
+ (BNGE_RSS_TABLE_SIZE * BNGE_RSS_TABLE_MAX_TBL)
+#define BNGE_MAX_RSS_TABLE_ENTRIES \
+ (BNGE_RSS_TABLE_ENTRIES * BNGE_RSS_TABLE_MAX_TBL)
+ u16 rss_indir_tbl_entries;
+
+ u32 rss_cap;
+ u32 rss_hash_cfg;
+
+ u16 rx_nr_rings;
+ u16 tx_nr_rings;
+ u16 tx_nr_rings_per_tc;
+ /* Number of NQs */
+ u16 nq_nr_rings;
+
+ /* Aux device resources */
+ u16 aux_num_msix;
+ u16 aux_num_stat_ctxs;
+
+ u16 max_mtu;
+#define BNGE_MAX_MTU 9500
+
+ u16 hw_ring_stats_size;
+#define BNGE_NUM_RX_RING_STATS 8
+#define BNGE_NUM_TX_RING_STATS 8
+#define BNGE_NUM_TPA_RING_STATS 6
+#define BNGE_RING_STATS_SIZE \
+ ((BNGE_NUM_RX_RING_STATS + BNGE_NUM_TX_RING_STATS + \
+ BNGE_NUM_TPA_RING_STATS) * 8)
+
+ u16 max_tpa_v2;
+#define BNGE_SUPPORTS_TPA(bd) ((bd)->max_tpa_v2)
+
+ u8 num_tc;
+ u8 max_tc;
+ u8 max_lltc; /* lossless TCs */
+ struct bnge_queue_info q_info[BNGE_MAX_QUEUE];
+ u8 tc_to_qidx[BNGE_MAX_QUEUE];
+ u8 q_ids[BNGE_MAX_QUEUE];
+ u8 max_q;
+ u8 port_count;
+
+ struct bnge_irq *irq_tbl;
+ u16 irqs_acquired;
+
+ struct bnge_auxr_priv *aux_priv;
+ struct bnge_auxr_dev *auxr_dev;
+};
+
+static inline bool bnge_is_roce_en(struct bnge_dev *bd)
+{
+ return bd->flags & BNGE_EN_ROCE;
+}
+
+static inline bool bnge_is_agg_reqd(struct bnge_dev *bd)
+{
+ if (bd->netdev) {
+ struct bnge_net *bn = netdev_priv(bd->netdev);
+
+ if (bn->priv_flags & BNGE_NET_EN_TPA ||
+ bn->priv_flags & BNGE_NET_EN_JUMBO)
+ return true;
+ else
+ return false;
+ }
+
+ return true;
+}
+
+static inline void bnge_writeq(struct bnge_dev *bd, u64 val,
+ void __iomem *addr)
+{
+#if BITS_PER_LONG == 32
+ spin_lock(&bd->db_lock);
+ lo_hi_writeq(val, addr);
+ spin_unlock(&bd->db_lock);
+#else
+ writeq(val, addr);
+#endif
+}
+
+/* For TX and RX ring doorbells */
+static inline void bnge_db_write(struct bnge_dev *bd, struct bnge_db_info *db,
+ u32 idx)
+{
+ bnge_writeq(bd, db->db_key64 | DB_RING_IDX(db, idx),
+ db->doorbell);
+}
+
+bool bnge_aux_registered(struct bnge_dev *bd);
+u16 bnge_aux_get_msix(struct bnge_dev *bd);
+
+#endif /* _BNGE_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_auxr.c b/drivers/net/ethernet/broadcom/bnge/bnge_auxr.c
new file mode 100644
index 000000000000..d64592b64e17
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_auxr.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <asm/byteorder.h>
+#include <linux/bitmap.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/bnxt/hsi.h>
+
+#include "bnge.h"
+#include "bnge_hwrm.h"
+#include "bnge_auxr.h"
+
+static DEFINE_IDA(bnge_aux_dev_ids);
+
+static void bnge_fill_msix_vecs(struct bnge_dev *bd,
+ struct bnge_msix_info *info)
+{
+ struct bnge_auxr_dev *auxr_dev = bd->auxr_dev;
+ int num_msix, i;
+
+ if (!auxr_dev->auxr_info->msix_requested) {
+ dev_warn(bd->dev, "Requested MSI-X vectors not allocated\n");
+ return;
+ }
+ num_msix = auxr_dev->auxr_info->msix_requested;
+ for (i = 0; i < num_msix; i++) {
+ info[i].vector = bd->irq_tbl[i].vector;
+ info[i].db_offset = bd->db_offset;
+ info[i].ring_idx = i;
+ }
+}
+
+int bnge_register_dev(struct bnge_auxr_dev *auxr_dev,
+ void *handle)
+{
+ struct bnge_dev *bd = pci_get_drvdata(auxr_dev->pdev);
+ struct bnge_auxr_info *auxr_info;
+ int rc = 0;
+
+ netdev_lock(bd->netdev);
+ mutex_lock(&auxr_dev->auxr_dev_lock);
+ if (!bd->irq_tbl) {
+ rc = -ENODEV;
+ goto exit;
+ }
+
+ if (!bnge_aux_has_enough_resources(bd)) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ auxr_info = auxr_dev->auxr_info;
+ auxr_info->handle = handle;
+
+ auxr_info->msix_requested = bd->aux_num_msix;
+
+ bnge_fill_msix_vecs(bd, bd->auxr_dev->msix_info);
+ auxr_dev->flags |= BNGE_ARDEV_MSIX_ALLOC;
+
+exit:
+ mutex_unlock(&auxr_dev->auxr_dev_lock);
+ netdev_unlock(bd->netdev);
+ return rc;
+}
+EXPORT_SYMBOL(bnge_register_dev);
+
+void bnge_unregister_dev(struct bnge_auxr_dev *auxr_dev)
+{
+ struct bnge_dev *bd = pci_get_drvdata(auxr_dev->pdev);
+ struct bnge_auxr_info *auxr_info;
+
+ auxr_info = auxr_dev->auxr_info;
+ netdev_lock(bd->netdev);
+ mutex_lock(&auxr_dev->auxr_dev_lock);
+ if (auxr_info->msix_requested)
+ auxr_dev->flags &= ~BNGE_ARDEV_MSIX_ALLOC;
+ auxr_info->msix_requested = 0;
+
+ mutex_unlock(&auxr_dev->auxr_dev_lock);
+ netdev_unlock(bd->netdev);
+}
+EXPORT_SYMBOL(bnge_unregister_dev);
+
+int bnge_send_msg(struct bnge_auxr_dev *auxr_dev, struct bnge_fw_msg *fw_msg)
+{
+ struct bnge_dev *bd = pci_get_drvdata(auxr_dev->pdev);
+ struct output *resp;
+ struct input *req;
+ u32 resp_len;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, 0 /* don't care */);
+ if (rc)
+ return rc;
+
+ rc = bnge_hwrm_req_replace(bd, req, fw_msg->msg, fw_msg->msg_len);
+ if (rc)
+ goto drop_req;
+
+ bnge_hwrm_req_timeout(bd, req, fw_msg->timeout);
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ resp_len = le16_to_cpu(resp->resp_len);
+ if (resp_len) {
+ if (fw_msg->resp_max_len < resp_len)
+ resp_len = fw_msg->resp_max_len;
+
+ memcpy(fw_msg->resp, resp, resp_len);
+ }
+drop_req:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+EXPORT_SYMBOL(bnge_send_msg);
+
+void bnge_rdma_aux_device_uninit(struct bnge_dev *bd)
+{
+ struct bnge_auxr_priv *aux_priv;
+ struct auxiliary_device *adev;
+
+ /* Skip if no auxiliary device init was done. */
+ if (!bd->aux_priv)
+ return;
+
+ aux_priv = bd->aux_priv;
+ adev = &aux_priv->aux_dev;
+ auxiliary_device_uninit(adev);
+}
+
+static void bnge_aux_dev_release(struct device *dev)
+{
+ struct bnge_auxr_priv *aux_priv =
+ container_of(dev, struct bnge_auxr_priv, aux_dev.dev);
+ struct bnge_dev *bd = pci_get_drvdata(aux_priv->auxr_dev->pdev);
+
+ ida_free(&bnge_aux_dev_ids, aux_priv->id);
+ kfree(aux_priv->auxr_dev->auxr_info);
+ bd->auxr_dev = NULL;
+ kfree(aux_priv->auxr_dev);
+ kfree(aux_priv);
+ bd->aux_priv = NULL;
+}
+
+void bnge_rdma_aux_device_del(struct bnge_dev *bd)
+{
+ if (!bd->auxr_dev)
+ return;
+
+ auxiliary_device_delete(&bd->aux_priv->aux_dev);
+}
+
+static void bnge_set_auxr_dev_info(struct bnge_auxr_dev *auxr_dev,
+ struct bnge_dev *bd)
+{
+ auxr_dev->pdev = bd->pdev;
+ auxr_dev->l2_db_size = bd->db_size;
+ auxr_dev->l2_db_size_nc = bd->db_size;
+ auxr_dev->l2_db_offset = bd->db_offset;
+ mutex_init(&auxr_dev->auxr_dev_lock);
+
+ if (bd->flags & BNGE_EN_ROCE_V1)
+ auxr_dev->flags |= BNGE_ARDEV_ROCEV1_SUPP;
+ if (bd->flags & BNGE_EN_ROCE_V2)
+ auxr_dev->flags |= BNGE_ARDEV_ROCEV2_SUPP;
+
+ auxr_dev->chip_num = bd->chip_num;
+ auxr_dev->hw_ring_stats_size = bd->hw_ring_stats_size;
+ auxr_dev->pf_port_id = bd->pf.port_id;
+ auxr_dev->en_state = bd->state;
+ auxr_dev->bar0 = bd->bar0;
+}
+
+void bnge_rdma_aux_device_add(struct bnge_dev *bd)
+{
+ struct auxiliary_device *aux_dev;
+ int rc;
+
+ if (!bd->auxr_dev)
+ return;
+
+ aux_dev = &bd->aux_priv->aux_dev;
+ rc = auxiliary_device_add(aux_dev);
+ if (rc) {
+ dev_warn(bd->dev, "Failed to add auxiliary device for ROCE\n");
+ auxiliary_device_uninit(aux_dev);
+ bd->flags &= ~BNGE_EN_ROCE;
+ }
+
+ bd->auxr_dev->net = bd->netdev;
+}
+
+void bnge_rdma_aux_device_init(struct bnge_dev *bd)
+{
+ struct auxiliary_device *aux_dev;
+ struct bnge_auxr_info *auxr_info;
+ struct bnge_auxr_priv *aux_priv;
+ struct bnge_auxr_dev *auxr_dev;
+ int rc;
+
+ if (!bnge_is_roce_en(bd))
+ return;
+
+ aux_priv = kzalloc(sizeof(*aux_priv), GFP_KERNEL);
+ if (!aux_priv)
+ goto exit;
+
+ aux_priv->id = ida_alloc(&bnge_aux_dev_ids, GFP_KERNEL);
+ if (aux_priv->id < 0) {
+ dev_warn(bd->dev, "ida alloc failed for aux device\n");
+ kfree(aux_priv);
+ goto exit;
+ }
+
+ aux_dev = &aux_priv->aux_dev;
+ aux_dev->id = aux_priv->id;
+ aux_dev->name = "rdma";
+ aux_dev->dev.parent = &bd->pdev->dev;
+ aux_dev->dev.release = bnge_aux_dev_release;
+
+ rc = auxiliary_device_init(aux_dev);
+ if (rc) {
+ ida_free(&bnge_aux_dev_ids, aux_priv->id);
+ kfree(aux_priv);
+ goto exit;
+ }
+ bd->aux_priv = aux_priv;
+
+ auxr_dev = kzalloc(sizeof(*auxr_dev), GFP_KERNEL);
+ if (!auxr_dev)
+ goto aux_dev_uninit;
+
+ aux_priv->auxr_dev = auxr_dev;
+
+ auxr_info = kzalloc(sizeof(*auxr_info), GFP_KERNEL);
+ if (!auxr_info)
+ goto aux_dev_uninit;
+
+ auxr_dev->auxr_info = auxr_info;
+ bd->auxr_dev = auxr_dev;
+ bnge_set_auxr_dev_info(auxr_dev, bd);
+
+ return;
+
+aux_dev_uninit:
+ auxiliary_device_uninit(aux_dev);
+exit:
+ bd->flags &= ~BNGE_EN_ROCE;
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_auxr.h b/drivers/net/ethernet/broadcom/bnge/bnge_auxr.h
new file mode 100644
index 000000000000..6c5c15ef2b0a
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_auxr.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_AUXR_H_
+#define _BNGE_AUXR_H_
+
+#include <linux/auxiliary_bus.h>
+
+#define BNGE_MIN_ROCE_CP_RINGS 2
+#define BNGE_MIN_ROCE_STAT_CTXS 1
+
+#define BNGE_MAX_ROCE_MSIX 64
+
+struct hwrm_async_event_cmpl;
+struct bnge;
+
+struct bnge_msix_info {
+ u32 vector;
+ u32 ring_idx;
+ u32 db_offset;
+};
+
+struct bnge_fw_msg {
+ void *msg;
+ int msg_len;
+ void *resp;
+ int resp_max_len;
+ int timeout;
+};
+
+struct bnge_auxr_info {
+ void *handle;
+ u16 msix_requested;
+};
+
+enum {
+ BNGE_ARDEV_ROCEV1_SUPP = BIT(0),
+ BNGE_ARDEV_ROCEV2_SUPP = BIT(1),
+ BNGE_ARDEV_MSIX_ALLOC = BIT(2),
+};
+
+#define BNGE_ARDEV_ROCE_SUPP (BNGE_ARDEV_ROCEV1_SUPP | \
+ BNGE_ARDEV_ROCEV2_SUPP)
+
+struct bnge_auxr_dev {
+ struct net_device *net;
+ struct pci_dev *pdev;
+ void __iomem *bar0;
+
+ struct bnge_msix_info msix_info[BNGE_MAX_ROCE_MSIX];
+
+ u32 flags;
+
+ struct bnge_auxr_info *auxr_info;
+
+ /* Doorbell BAR size in bytes mapped by L2 driver. */
+ int l2_db_size;
+ /* Doorbell BAR size in bytes mapped as non-cacheable. */
+ int l2_db_size_nc;
+ /* Doorbell offset in bytes within l2_db_size_nc. */
+ int l2_db_offset;
+
+ u16 chip_num;
+ u16 hw_ring_stats_size;
+ u16 pf_port_id;
+ unsigned long en_state;
+
+ u16 auxr_num_msix_vec;
+ u16 auxr_num_ctxs;
+
+ /* serialize auxr operations */
+ struct mutex auxr_dev_lock;
+};
+
+void bnge_rdma_aux_device_uninit(struct bnge_dev *bdev);
+void bnge_rdma_aux_device_del(struct bnge_dev *bdev);
+void bnge_rdma_aux_device_add(struct bnge_dev *bdev);
+void bnge_rdma_aux_device_init(struct bnge_dev *bdev);
+int bnge_register_dev(struct bnge_auxr_dev *adev,
+ void *handle);
+void bnge_unregister_dev(struct bnge_auxr_dev *adev);
+int bnge_send_msg(struct bnge_auxr_dev *adev, struct bnge_fw_msg *fw_msg);
+
+#endif /* _BNGE_AUXR_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_core.c b/drivers/net/ethernet/broadcom/bnge/bnge_core.c
new file mode 100644
index 000000000000..c94e132bebc8
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_core.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/init.h>
+#include <linux/crash_dump.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "bnge.h"
+#include "bnge_devlink.h"
+#include "bnge_hwrm.h"
+#include "bnge_hwrm_lib.h"
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRV_SUMMARY);
+
+char bnge_driver_name[] = DRV_NAME;
+
+static const struct {
+ char *name;
+} board_info[] = {
+ [BCM57708] = { "Broadcom BCM57708 50Gb/100Gb/200Gb/400Gb/800Gb Ethernet" },
+};
+
+static const struct pci_device_id bnge_pci_tbl[] = {
+ { PCI_VDEVICE(BROADCOM, 0x1780), .driver_data = BCM57708 },
+ /* Required last entry */
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, bnge_pci_tbl);
+
+static void bnge_print_device_info(struct pci_dev *pdev, enum board_idx idx)
+{
+ struct device *dev = &pdev->dev;
+
+ dev_info(dev, "%s found at mem %lx\n", board_info[idx].name,
+ (long)pci_resource_start(pdev, 0));
+
+ pcie_print_link_status(pdev);
+}
+
+bool bnge_aux_registered(struct bnge_dev *bd)
+{
+ struct bnge_auxr_dev *ba_dev = bd->auxr_dev;
+
+ if (ba_dev && ba_dev->auxr_info->msix_requested)
+ return true;
+
+ return false;
+}
+
+static void bnge_nvm_cfg_ver_get(struct bnge_dev *bd)
+{
+ struct hwrm_nvm_get_dev_info_output nvm_info;
+
+ if (!bnge_hwrm_nvm_dev_info(bd, &nvm_info))
+ snprintf(bd->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
+ nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
+ nvm_info.nvm_cfg_ver_upd);
+}
+
+static int bnge_func_qcaps(struct bnge_dev *bd)
+{
+ int rc;
+
+ rc = bnge_hwrm_func_qcaps(bd);
+ if (rc)
+ return rc;
+
+ rc = bnge_hwrm_queue_qportcfg(bd);
+ if (rc) {
+ dev_err(bd->dev, "query qportcfg failure rc: %d\n", rc);
+ return rc;
+ }
+
+ rc = bnge_hwrm_func_resc_qcaps(bd);
+ if (rc) {
+ dev_err(bd->dev, "query resc caps failure rc: %d\n", rc);
+ return rc;
+ }
+
+ rc = bnge_hwrm_func_qcfg(bd);
+ if (rc) {
+ dev_err(bd->dev, "query config failure rc: %d\n", rc);
+ return rc;
+ }
+
+ rc = bnge_hwrm_vnic_qcaps(bd);
+ if (rc) {
+ dev_err(bd->dev, "vnic caps failure rc: %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void bnge_fw_unregister_dev(struct bnge_dev *bd)
+{
+ /* ctx mem free after unrgtr only */
+ bnge_hwrm_func_drv_unrgtr(bd);
+ bnge_free_ctx_mem(bd);
+}
+
+static void bnge_set_dflt_rss_hash_type(struct bnge_dev *bd)
+{
+ bd->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
+}
+
+static int bnge_fw_register_dev(struct bnge_dev *bd)
+{
+ int rc;
+
+ bd->fw_cap = 0;
+ rc = bnge_hwrm_ver_get(bd);
+ if (rc) {
+ dev_err(bd->dev, "Get Version command failed rc: %d\n", rc);
+ return rc;
+ }
+
+ bnge_nvm_cfg_ver_get(bd);
+
+ rc = bnge_hwrm_func_reset(bd);
+ if (rc) {
+ dev_err(bd->dev, "Failed to reset function rc: %d\n", rc);
+ return rc;
+ }
+
+ bnge_hwrm_fw_set_time(bd);
+
+ rc = bnge_hwrm_func_drv_rgtr(bd);
+ if (rc) {
+ dev_err(bd->dev, "Failed to rgtr with firmware rc: %d\n", rc);
+ return rc;
+ }
+
+ rc = bnge_alloc_ctx_mem(bd);
+ if (rc) {
+ dev_err(bd->dev, "Failed to allocate ctx mem rc: %d\n", rc);
+ goto err_func_unrgtr;
+ }
+
+ /* Get the resources and configuration from firmware */
+ rc = bnge_func_qcaps(bd);
+ if (rc) {
+ dev_err(bd->dev, "Failed initial configuration rc: %d\n", rc);
+ rc = -ENODEV;
+ goto err_func_unrgtr;
+ }
+
+ bnge_set_dflt_rss_hash_type(bd);
+
+ return 0;
+
+err_func_unrgtr:
+ bnge_fw_unregister_dev(bd);
+ return rc;
+}
+
+static void bnge_pci_disable(struct pci_dev *pdev)
+{
+ pci_release_regions(pdev);
+ if (pci_is_enabled(pdev))
+ pci_disable_device(pdev);
+}
+
+static int bnge_pci_enable(struct pci_dev *pdev)
+{
+ int rc;
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+ return rc;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev,
+ "Cannot find PCI device base address, aborting\n");
+ rc = -ENODEV;
+ goto err_pci_disable;
+ }
+
+ rc = pci_request_regions(pdev, bnge_driver_name);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
+ goto err_pci_disable;
+ }
+
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+
+ pci_set_master(pdev);
+
+ return 0;
+
+err_pci_disable:
+ pci_disable_device(pdev);
+ return rc;
+}
+
+static void bnge_unmap_bars(struct pci_dev *pdev)
+{
+ struct bnge_dev *bd = pci_get_drvdata(pdev);
+
+ if (bd->bar1) {
+ pci_iounmap(pdev, bd->bar1);
+ bd->bar1 = NULL;
+ }
+
+ if (bd->bar0) {
+ pci_iounmap(pdev, bd->bar0);
+ bd->bar0 = NULL;
+ }
+}
+
+static void bnge_set_max_func_irqs(struct bnge_dev *bd,
+ unsigned int max_irqs)
+{
+ bd->hw_resc.max_irqs = max_irqs;
+}
+
+static int bnge_get_max_irq(struct pci_dev *pdev)
+{
+ u16 ctrl;
+
+ pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
+ return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
+}
+
+static int bnge_map_db_bar(struct bnge_dev *bd)
+{
+ if (!bd->db_size)
+ return -ENODEV;
+
+ bd->bar1 = pci_iomap(bd->pdev, 2, bd->db_size);
+ if (!bd->bar1)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int bnge_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ unsigned int max_irqs;
+ struct bnge_dev *bd;
+ int rc;
+
+ if (pci_is_bridge(pdev))
+ return -ENODEV;
+
+ if (!pdev->msix_cap) {
+ dev_err(&pdev->dev, "MSIX capability missing, aborting\n");
+ return -ENODEV;
+ }
+
+ if (is_kdump_kernel()) {
+ pci_clear_master(pdev);
+ pcie_flr(pdev);
+ }
+
+ rc = bnge_pci_enable(pdev);
+ if (rc)
+ return rc;
+
+ bnge_print_device_info(pdev, ent->driver_data);
+
+ bd = bnge_devlink_alloc(pdev);
+ if (!bd) {
+ dev_err(&pdev->dev, "Devlink allocation failed\n");
+ rc = -ENOMEM;
+ goto err_pci_disable;
+ }
+
+ bd->bar0 = pci_ioremap_bar(pdev, 0);
+ if (!bd->bar0) {
+ dev_err(&pdev->dev, "Failed mapping BAR-0, aborting\n");
+ rc = -ENOMEM;
+ goto err_devl_free;
+ }
+
+ rc = bnge_init_hwrm_resources(bd);
+ if (rc)
+ goto err_bar_unmap;
+
+ rc = bnge_fw_register_dev(bd);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to register with firmware rc = %d\n", rc);
+ goto err_hwrm_cleanup;
+ }
+
+ bnge_devlink_register(bd);
+
+ max_irqs = bnge_get_max_irq(pdev);
+ bnge_set_max_func_irqs(bd, max_irqs);
+
+ bnge_aux_init_dflt_config(bd);
+
+ rc = bnge_net_init_dflt_config(bd);
+ if (rc) {
+ dev_err(&pdev->dev, "Error setting up default cfg to netdev rc = %d\n",
+ rc);
+ goto err_fw_reg;
+ }
+
+ rc = bnge_map_db_bar(bd);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed mapping doorbell BAR rc = %d, aborting\n",
+ rc);
+ goto err_config_uninit;
+ }
+
+#if BITS_PER_LONG == 32
+ spin_lock_init(&bd->db_lock);
+#endif
+
+ bnge_rdma_aux_device_init(bd);
+
+ rc = bnge_alloc_irqs(bd);
+ if (rc) {
+ dev_err(&pdev->dev, "Error IRQ allocation rc = %d\n", rc);
+ goto err_uninit_auxr;
+ }
+
+ rc = bnge_netdev_alloc(bd, max_irqs);
+ if (rc)
+ goto err_free_irq;
+
+ bnge_rdma_aux_device_add(bd);
+
+ pci_save_state(pdev);
+
+ return 0;
+
+err_free_irq:
+ bnge_free_irqs(bd);
+
+err_uninit_auxr:
+ bnge_rdma_aux_device_uninit(bd);
+
+err_config_uninit:
+ bnge_net_uninit_dflt_config(bd);
+
+err_fw_reg:
+ bnge_devlink_unregister(bd);
+ bnge_fw_unregister_dev(bd);
+
+err_hwrm_cleanup:
+ bnge_cleanup_hwrm_resources(bd);
+
+err_bar_unmap:
+ bnge_unmap_bars(pdev);
+
+err_devl_free:
+ bnge_devlink_free(bd);
+
+err_pci_disable:
+ bnge_pci_disable(pdev);
+ return rc;
+}
+
+static void bnge_remove_one(struct pci_dev *pdev)
+{
+ struct bnge_dev *bd = pci_get_drvdata(pdev);
+
+ bnge_rdma_aux_device_del(bd);
+
+ bnge_netdev_free(bd);
+
+ bnge_free_irqs(bd);
+
+ bnge_rdma_aux_device_uninit(bd);
+
+ bnge_net_uninit_dflt_config(bd);
+
+ bnge_devlink_unregister(bd);
+
+ bnge_fw_unregister_dev(bd);
+
+ bnge_cleanup_hwrm_resources(bd);
+
+ bnge_unmap_bars(pdev);
+
+ bnge_devlink_free(bd);
+
+ bnge_pci_disable(pdev);
+}
+
+static void bnge_shutdown(struct pci_dev *pdev)
+{
+ pci_disable_device(pdev);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, 0);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+static struct pci_driver bnge_driver = {
+ .name = bnge_driver_name,
+ .id_table = bnge_pci_tbl,
+ .probe = bnge_probe_one,
+ .remove = bnge_remove_one,
+ .shutdown = bnge_shutdown,
+};
+
+static int __init bnge_init_module(void)
+{
+ return pci_register_driver(&bnge_driver);
+}
+module_init(bnge_init_module);
+
+static void __exit bnge_exit_module(void)
+{
+ pci_unregister_driver(&bnge_driver);
+}
+module_exit(bnge_exit_module);
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_db.h b/drivers/net/ethernet/broadcom/bnge/bnge_db.h
new file mode 100644
index 000000000000..950ed582f1d8
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_db.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_DB_H_
+#define _BNGE_DB_H_
+
+/* 64-bit doorbell */
+#define DBR_EPOCH_SFT 24
+#define DBR_TOGGLE_SFT 25
+#define DBR_XID_SFT 32
+#define DBR_PATH_L2 (0x1ULL << 56)
+#define DBR_VALID (0x1ULL << 58)
+#define DBR_TYPE_SQ (0x0ULL << 60)
+#define DBR_TYPE_SRQ (0x2ULL << 60)
+#define DBR_TYPE_CQ (0x4ULL << 60)
+#define DBR_TYPE_CQ_ARMALL (0x6ULL << 60)
+#define DBR_TYPE_NQ (0xaULL << 60)
+#define DBR_TYPE_NQ_ARM (0xbULL << 60)
+#define DBR_TYPE_NQ_MASK (0xeULL << 60)
+
+struct bnge_db_info {
+ void __iomem *doorbell;
+ u64 db_key64;
+ u32 db_ring_mask;
+ u32 db_epoch_mask;
+ u8 db_epoch_shift;
+};
+
+#define DB_EPOCH(db, idx) (((idx) & (db)->db_epoch_mask) << \
+ ((db)->db_epoch_shift))
+#define DB_RING_IDX(db, idx) (((idx) & (db)->db_ring_mask) | \
+ DB_EPOCH(db, idx))
+
+#endif /* _BNGE_DB_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_devlink.c b/drivers/net/ethernet/broadcom/bnge/bnge_devlink.c
new file mode 100644
index 000000000000..a987afebd64d
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_devlink.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/unaligned.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <net/devlink.h>
+
+#include "bnge.h"
+#include "bnge_devlink.h"
+#include "bnge_hwrm_lib.h"
+
+static int bnge_dl_info_put(struct bnge_dev *bd, struct devlink_info_req *req,
+ enum bnge_dl_version_type type, const char *key,
+ char *buf)
+{
+ if (!strlen(buf))
+ return 0;
+
+ if (!strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_NCSI) ||
+ !strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_ROCE))
+ return 0;
+
+ switch (type) {
+ case BNGE_VERSION_FIXED:
+ return devlink_info_version_fixed_put(req, key, buf);
+ case BNGE_VERSION_RUNNING:
+ return devlink_info_version_running_put(req, key, buf);
+ case BNGE_VERSION_STORED:
+ return devlink_info_version_stored_put(req, key, buf);
+ }
+
+ return 0;
+}
+
+static void bnge_vpd_read_info(struct bnge_dev *bd)
+{
+ struct pci_dev *pdev = bd->pdev;
+ unsigned int vpd_size, kw_len;
+ int pos, size;
+ u8 *vpd_data;
+
+ vpd_data = pci_vpd_alloc(pdev, &vpd_size);
+ if (IS_ERR(vpd_data)) {
+ pci_warn(pdev, "Unable to read VPD\n");
+ return;
+ }
+
+ pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
+ if (pos < 0)
+ goto read_sn;
+
+ size = min_t(int, kw_len, BNGE_VPD_FLD_LEN - 1);
+ memcpy(bd->board_partno, &vpd_data[pos], size);
+
+read_sn:
+ pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_SERIALNO,
+ &kw_len);
+ if (pos < 0)
+ goto exit;
+
+ size = min_t(int, kw_len, BNGE_VPD_FLD_LEN - 1);
+ memcpy(bd->board_serialno, &vpd_data[pos], size);
+
+exit:
+ kfree(vpd_data);
+}
+
+#define HWRM_FW_VER_STR_LEN 16
+
+static int bnge_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct hwrm_nvm_get_dev_info_output nvm_dev_info;
+ struct bnge_dev *bd = devlink_priv(devlink);
+ struct hwrm_ver_get_output *ver_resp;
+ char mgmt_ver[FW_VER_STR_LEN];
+ char roce_ver[FW_VER_STR_LEN];
+ char ncsi_ver[FW_VER_STR_LEN];
+ char buf[32];
+
+ int rc;
+
+ if (bd->dsn) {
+ char buf[32];
+ u8 dsn[8];
+ int rc;
+
+ put_unaligned_le64(bd->dsn, dsn);
+ sprintf(buf, "%02X-%02X-%02X-%02X-%02X-%02X-%02X-%02X",
+ dsn[7], dsn[6], dsn[5], dsn[4],
+ dsn[3], dsn[2], dsn[1], dsn[0]);
+ rc = devlink_info_serial_number_put(req, buf);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set dsn");
+ return rc;
+ }
+ }
+
+ if (strlen(bd->board_serialno)) {
+ rc = devlink_info_board_serial_number_put(req,
+ bd->board_serialno);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set board serial number");
+ return rc;
+ }
+ }
+
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_FIXED,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_ID,
+ bd->board_partno);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set board part number");
+ return rc;
+ }
+
+ /* More information from HWRM ver get command */
+ sprintf(buf, "%X", bd->chip_num);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_FIXED,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, buf);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set asic id");
+ return rc;
+ }
+
+ ver_resp = &bd->ver_resp;
+ sprintf(buf, "%c%d", 'A' + ver_resp->chip_rev, ver_resp->chip_metal);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_FIXED,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, buf);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set asic info");
+ return rc;
+ }
+
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
+ bd->nvm_cfg_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set firmware version");
+ return rc;
+ }
+
+ buf[0] = 0;
+ strncat(buf, ver_resp->active_pkg_name, HWRM_FW_VER_STR_LEN);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW, buf);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set firmware generic version");
+ return rc;
+ }
+
+ if (ver_resp->flags & VER_GET_RESP_FLAGS_EXT_VER_AVAIL) {
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->hwrm_fw_major, ver_resp->hwrm_fw_minor,
+ ver_resp->hwrm_fw_build, ver_resp->hwrm_fw_patch);
+
+ snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->mgmt_fw_major, ver_resp->mgmt_fw_minor,
+ ver_resp->mgmt_fw_build, ver_resp->mgmt_fw_patch);
+
+ snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->roce_fw_major, ver_resp->roce_fw_minor,
+ ver_resp->roce_fw_build, ver_resp->roce_fw_patch);
+ } else {
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->hwrm_fw_maj_8b, ver_resp->hwrm_fw_min_8b,
+ ver_resp->hwrm_fw_bld_8b, ver_resp->hwrm_fw_rsvd_8b);
+
+ snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->mgmt_fw_maj_8b, ver_resp->mgmt_fw_min_8b,
+ ver_resp->mgmt_fw_bld_8b, ver_resp->mgmt_fw_rsvd_8b);
+
+ snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->roce_fw_maj_8b, ver_resp->roce_fw_min_8b,
+ ver_resp->roce_fw_bld_8b, ver_resp->roce_fw_rsvd_8b);
+ }
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set firmware mgmt version");
+ return rc;
+ }
+
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API,
+ bd->hwrm_ver_supp);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set firmware mgmt api version");
+ return rc;
+ }
+
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, ncsi_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set ncsi firmware version");
+ return rc;
+ }
+
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set roce firmware version");
+ return rc;
+ }
+
+ rc = bnge_hwrm_nvm_dev_info(bd, &nvm_dev_info);
+ if (!(nvm_dev_info.flags & NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID))
+ return 0;
+
+ buf[0] = 0;
+ strncat(buf, nvm_dev_info.pkg_name, HWRM_FW_VER_STR_LEN);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW, buf);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set roce firmware version");
+ return rc;
+ }
+
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ nvm_dev_info.hwrm_fw_major, nvm_dev_info.hwrm_fw_minor,
+ nvm_dev_info.hwrm_fw_build, nvm_dev_info.hwrm_fw_patch);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set stored firmware version");
+ return rc;
+ }
+
+ snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ nvm_dev_info.mgmt_fw_major, nvm_dev_info.mgmt_fw_minor,
+ nvm_dev_info.mgmt_fw_build, nvm_dev_info.mgmt_fw_patch);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, ncsi_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set stored ncsi firmware version");
+ return rc;
+ }
+
+ snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ nvm_dev_info.roce_fw_major, nvm_dev_info.roce_fw_minor,
+ nvm_dev_info.roce_fw_build, nvm_dev_info.roce_fw_patch);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
+ if (rc)
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set stored roce firmware version");
+
+ return rc;
+}
+
+static const struct devlink_ops bnge_devlink_ops = {
+ .info_get = bnge_devlink_info_get,
+};
+
+void bnge_devlink_free(struct bnge_dev *bd)
+{
+ struct devlink *devlink = priv_to_devlink(bd);
+
+ devlink_free(devlink);
+}
+
+struct bnge_dev *bnge_devlink_alloc(struct pci_dev *pdev)
+{
+ struct devlink *devlink;
+ struct bnge_dev *bd;
+
+ devlink = devlink_alloc(&bnge_devlink_ops, sizeof(*bd), &pdev->dev);
+ if (!devlink)
+ return NULL;
+
+ bd = devlink_priv(devlink);
+ pci_set_drvdata(pdev, bd);
+ bd->dev = &pdev->dev;
+ bd->pdev = pdev;
+
+ bd->dsn = pci_get_dsn(pdev);
+ if (!bd->dsn)
+ pci_warn(pdev, "Failed to get DSN\n");
+
+ bnge_vpd_read_info(bd);
+
+ return bd;
+}
+
+void bnge_devlink_register(struct bnge_dev *bd)
+{
+ struct devlink *devlink = priv_to_devlink(bd);
+ devlink_register(devlink);
+}
+
+void bnge_devlink_unregister(struct bnge_dev *bd)
+{
+ struct devlink *devlink = priv_to_devlink(bd);
+ devlink_unregister(devlink);
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_devlink.h b/drivers/net/ethernet/broadcom/bnge/bnge_devlink.h
new file mode 100644
index 000000000000..c6575255e650
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_devlink.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_DEVLINK_H_
+#define _BNGE_DEVLINK_H_
+
+enum bnge_dl_version_type {
+ BNGE_VERSION_FIXED,
+ BNGE_VERSION_RUNNING,
+ BNGE_VERSION_STORED,
+};
+
+void bnge_devlink_free(struct bnge_dev *bd);
+struct bnge_dev *bnge_devlink_alloc(struct pci_dev *pdev);
+void bnge_devlink_register(struct bnge_dev *bd);
+void bnge_devlink_unregister(struct bnge_dev *bd);
+
+#endif /* _BNGE_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.c b/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.c
new file mode 100644
index 000000000000..569371c1b4f2
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/unaligned.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <net/devlink.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool_netlink.h>
+
+#include "bnge.h"
+#include "bnge_ethtool.h"
+
+static void bnge_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct bnge_net *bn = netdev_priv(dev);
+ struct bnge_dev *bd = bn->bd;
+
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->fw_version, bd->fw_ver_str, sizeof(info->fw_version));
+ strscpy(info->bus_info, pci_name(bd->pdev), sizeof(info->bus_info));
+}
+
+static const struct ethtool_ops bnge_ethtool_ops = {
+ .get_drvinfo = bnge_get_drvinfo,
+};
+
+void bnge_set_ethtool_ops(struct net_device *dev)
+{
+ dev->ethtool_ops = &bnge_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.h b/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.h
new file mode 100644
index 000000000000..21e96a0976d5
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_ETHTOOL_H_
+#define _BNGE_ETHTOOL_H_
+
+void bnge_set_ethtool_ops(struct net_device *dev);
+
+#endif /* _BNGE_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c
new file mode 100644
index 000000000000..c3087e5cd875
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c
@@ -0,0 +1,548 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <asm/byteorder.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+
+#include "bnge.h"
+#include "bnge_hwrm.h"
+
+static u64 bnge_cal_sentinel(struct bnge_hwrm_ctx *ctx, u16 req_type)
+{
+ return (((uintptr_t)ctx) + req_type) ^ BNGE_HWRM_SENTINEL;
+}
+
+int bnge_hwrm_req_create(struct bnge_dev *bd, void **req, u16 req_type,
+ u32 req_len)
+{
+ struct bnge_hwrm_ctx *ctx;
+ dma_addr_t dma_handle;
+ u8 *req_addr;
+
+ if (req_len > BNGE_HWRM_CTX_OFFSET)
+ return -E2BIG;
+
+ req_addr = dma_pool_alloc(bd->hwrm_dma_pool, GFP_KERNEL | __GFP_ZERO,
+ &dma_handle);
+ if (!req_addr)
+ return -ENOMEM;
+
+ ctx = (struct bnge_hwrm_ctx *)(req_addr + BNGE_HWRM_CTX_OFFSET);
+ /* safety first, sentinel used to check for invalid requests */
+ ctx->sentinel = bnge_cal_sentinel(ctx, req_type);
+ ctx->req_len = req_len;
+ ctx->req = (struct input *)req_addr;
+ ctx->resp = (struct output *)(req_addr + BNGE_HWRM_RESP_OFFSET);
+ ctx->dma_handle = dma_handle;
+ ctx->flags = 0; /* __GFP_ZERO, but be explicit regarding ownership */
+ ctx->timeout = bd->hwrm_cmd_timeout ?: BNGE_DFLT_HWRM_CMD_TIMEOUT;
+ ctx->allocated = BNGE_HWRM_DMA_SIZE - BNGE_HWRM_CTX_OFFSET;
+ ctx->gfp = GFP_KERNEL;
+ ctx->slice_addr = NULL;
+
+ /* initialize common request fields */
+ ctx->req->req_type = cpu_to_le16(req_type);
+ ctx->req->resp_addr = cpu_to_le64(dma_handle + BNGE_HWRM_RESP_OFFSET);
+ ctx->req->cmpl_ring = cpu_to_le16(BNGE_HWRM_NO_CMPL_RING);
+ ctx->req->target_id = cpu_to_le16(BNGE_HWRM_TARGET);
+ *req = ctx->req;
+
+ return 0;
+}
+
+static struct bnge_hwrm_ctx *__hwrm_ctx_get(struct bnge_dev *bd, u8 *req_addr)
+{
+ void *ctx_addr = req_addr + BNGE_HWRM_CTX_OFFSET;
+ struct input *req = (struct input *)req_addr;
+ struct bnge_hwrm_ctx *ctx = ctx_addr;
+ u64 sentinel;
+
+ if (!req) {
+ dev_err(bd->dev, "null HWRM request");
+ dump_stack();
+ return NULL;
+ }
+
+ /* HWRM API has no type safety, verify sentinel to validate address */
+ sentinel = bnge_cal_sentinel(ctx, le16_to_cpu(req->req_type));
+ if (ctx->sentinel != sentinel) {
+ dev_err(bd->dev, "HWRM sentinel mismatch, req_type = %u\n",
+ (u32)le16_to_cpu(req->req_type));
+ dump_stack();
+ return NULL;
+ }
+
+ return ctx;
+}
+
+void bnge_hwrm_req_timeout(struct bnge_dev *bd,
+ void *req, unsigned int timeout)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+
+ if (ctx)
+ ctx->timeout = timeout;
+}
+
+void bnge_hwrm_req_alloc_flags(struct bnge_dev *bd, void *req, gfp_t gfp)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+
+ if (ctx)
+ ctx->gfp = gfp;
+}
+
+int bnge_hwrm_req_replace(struct bnge_dev *bd, void *req, void *new_req,
+ u32 len)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+ struct input *internal_req = req;
+ u16 req_type;
+
+ if (!ctx)
+ return -EINVAL;
+
+ if (len > BNGE_HWRM_CTX_OFFSET)
+ return -E2BIG;
+
+ /* free any existing slices */
+ ctx->allocated = BNGE_HWRM_DMA_SIZE - BNGE_HWRM_CTX_OFFSET;
+ if (ctx->slice_addr) {
+ dma_free_coherent(bd->dev, ctx->slice_size,
+ ctx->slice_addr, ctx->slice_handle);
+ ctx->slice_addr = NULL;
+ }
+ ctx->gfp = GFP_KERNEL;
+
+ if ((bd->fw_cap & BNGE_FW_CAP_SHORT_CMD) || len > BNGE_HWRM_MAX_REQ_LEN) {
+ memcpy(internal_req, new_req, len);
+ } else {
+ internal_req->req_type = ((struct input *)new_req)->req_type;
+ ctx->req = new_req;
+ }
+
+ ctx->req_len = len;
+ ctx->req->resp_addr = cpu_to_le64(ctx->dma_handle +
+ BNGE_HWRM_RESP_OFFSET);
+
+ /* update sentinel for potentially new request type */
+ req_type = le16_to_cpu(internal_req->req_type);
+ ctx->sentinel = bnge_cal_sentinel(ctx, req_type);
+
+ return 0;
+}
+
+void bnge_hwrm_req_flags(struct bnge_dev *bd, void *req,
+ enum bnge_hwrm_ctx_flags flags)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+
+ if (ctx)
+ ctx->flags |= (flags & BNGE_HWRM_API_FLAGS);
+}
+
+void *bnge_hwrm_req_hold(struct bnge_dev *bd, void *req)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+ struct input *input = (struct input *)req;
+
+ if (!ctx)
+ return NULL;
+
+ if (ctx->flags & BNGE_HWRM_INTERNAL_CTX_OWNED) {
+ dev_err(bd->dev, "HWRM context already owned, req_type = %u\n",
+ (u32)le16_to_cpu(input->req_type));
+ dump_stack();
+ return NULL;
+ }
+
+ ctx->flags |= BNGE_HWRM_INTERNAL_CTX_OWNED;
+ return ((u8 *)req) + BNGE_HWRM_RESP_OFFSET;
+}
+
+static void __hwrm_ctx_invalidate(struct bnge_dev *bd,
+ struct bnge_hwrm_ctx *ctx)
+{
+ void *addr = ((u8 *)ctx) - BNGE_HWRM_CTX_OFFSET;
+ dma_addr_t dma_handle = ctx->dma_handle; /* save before invalidate */
+
+ /* unmap any auxiliary DMA slice */
+ if (ctx->slice_addr)
+ dma_free_coherent(bd->dev, ctx->slice_size,
+ ctx->slice_addr, ctx->slice_handle);
+
+ /* invalidate, ensure ownership, sentinel and dma_handle are cleared */
+ memset(ctx, 0, sizeof(struct bnge_hwrm_ctx));
+
+ /* return the buffer to the DMA pool */
+ if (dma_handle)
+ dma_pool_free(bd->hwrm_dma_pool, addr, dma_handle);
+}
+
+void bnge_hwrm_req_drop(struct bnge_dev *bd, void *req)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+
+ if (ctx)
+ __hwrm_ctx_invalidate(bd, ctx);
+}
+
+static int bnge_map_hwrm_error(u32 hwrm_err)
+{
+ switch (hwrm_err) {
+ case HWRM_ERR_CODE_SUCCESS:
+ return 0;
+ case HWRM_ERR_CODE_RESOURCE_LOCKED:
+ return -EROFS;
+ case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
+ return -EACCES;
+ case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
+ return -ENOSPC;
+ case HWRM_ERR_CODE_INVALID_PARAMS:
+ case HWRM_ERR_CODE_INVALID_FLAGS:
+ case HWRM_ERR_CODE_INVALID_ENABLES:
+ case HWRM_ERR_CODE_UNSUPPORTED_TLV:
+ case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
+ return -EINVAL;
+ case HWRM_ERR_CODE_NO_BUFFER:
+ return -ENOMEM;
+ case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
+ case HWRM_ERR_CODE_BUSY:
+ return -EAGAIN;
+ case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case HWRM_ERR_CODE_PF_UNAVAILABLE:
+ return -ENODEV;
+ default:
+ return -EIO;
+ }
+}
+
+static struct bnge_hwrm_wait_token *
+bnge_hwrm_create_token(struct bnge_dev *bd, enum bnge_hwrm_chnl dst)
+{
+ struct bnge_hwrm_wait_token *token;
+
+ token = kzalloc(sizeof(*token), GFP_KERNEL);
+ if (!token)
+ return NULL;
+
+ mutex_lock(&bd->hwrm_cmd_lock);
+
+ token->dst = dst;
+ token->state = BNGE_HWRM_PENDING;
+ if (dst == BNGE_HWRM_CHNL_CHIMP) {
+ token->seq_id = bd->hwrm_cmd_seq++;
+ hlist_add_head_rcu(&token->node, &bd->hwrm_pending_list);
+ } else {
+ token->seq_id = bd->hwrm_cmd_kong_seq++;
+ }
+
+ return token;
+}
+
+static void
+bnge_hwrm_destroy_token(struct bnge_dev *bd, struct bnge_hwrm_wait_token *token)
+{
+ if (token->dst == BNGE_HWRM_CHNL_CHIMP) {
+ hlist_del_rcu(&token->node);
+ kfree_rcu(token, rcu);
+ } else {
+ kfree(token);
+ }
+ mutex_unlock(&bd->hwrm_cmd_lock);
+}
+
+static void bnge_hwrm_req_dbg(struct bnge_dev *bd, struct input *req)
+{
+ u32 ring = le16_to_cpu(req->cmpl_ring);
+ u32 type = le16_to_cpu(req->req_type);
+ u32 tgt = le16_to_cpu(req->target_id);
+ u32 seq = le16_to_cpu(req->seq_id);
+ char opt[32] = "\n";
+
+ if (unlikely(ring != (u16)BNGE_HWRM_NO_CMPL_RING))
+ snprintf(opt, 16, " ring %d\n", ring);
+
+ if (unlikely(tgt != BNGE_HWRM_TARGET))
+ snprintf(opt + strlen(opt) - 1, 16, " tgt 0x%x\n", tgt);
+
+ dev_dbg(bd->dev, "sent hwrm req_type 0x%x seq id 0x%x%s",
+ type, seq, opt);
+}
+
+#define bnge_hwrm_err(bd, ctx, fmt, ...) \
+ do { \
+ if ((ctx)->flags & BNGE_HWRM_CTX_SILENT) \
+ dev_dbg((bd)->dev, fmt, __VA_ARGS__); \
+ else \
+ dev_err((bd)->dev, fmt, __VA_ARGS__); \
+ } while (0)
+
+static int __hwrm_send_ctx(struct bnge_dev *bd, struct bnge_hwrm_ctx *ctx)
+{
+ u32 doorbell_offset = BNGE_GRCPF_REG_CHIMP_COMM_TRIGGER;
+ enum bnge_hwrm_chnl dst = BNGE_HWRM_CHNL_CHIMP;
+ u32 bar_offset = BNGE_GRCPF_REG_CHIMP_COMM;
+ struct bnge_hwrm_wait_token *token = NULL;
+ u16 max_req_len = BNGE_HWRM_MAX_REQ_LEN;
+ unsigned int i, timeout, tmo_count;
+ u32 *data = (u32 *)ctx->req;
+ u32 msg_len = ctx->req_len;
+ int rc = -EBUSY;
+ u32 req_type;
+ u16 len = 0;
+ u8 *valid;
+
+ if (ctx->flags & BNGE_HWRM_INTERNAL_RESP_DIRTY)
+ memset(ctx->resp, 0, PAGE_SIZE);
+
+ req_type = le16_to_cpu(ctx->req->req_type);
+
+ if (msg_len > BNGE_HWRM_MAX_REQ_LEN &&
+ msg_len > bd->hwrm_max_ext_req_len) {
+ dev_warn(bd->dev, "oversized hwrm request, req_type 0x%x",
+ req_type);
+ rc = -E2BIG;
+ goto exit;
+ }
+
+ token = bnge_hwrm_create_token(bd, dst);
+ if (!token) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+ ctx->req->seq_id = cpu_to_le16(token->seq_id);
+
+ /* Ensure any associated DMA buffers are written before doorbell */
+ wmb();
+
+ /* Write request msg to hwrm channel */
+ __iowrite32_copy(bd->bar0 + bar_offset, data, msg_len / 4);
+
+ for (i = msg_len; i < max_req_len; i += 4)
+ writel(0, bd->bar0 + bar_offset + i);
+
+ /* Ring channel doorbell */
+ writel(1, bd->bar0 + doorbell_offset);
+
+ bnge_hwrm_req_dbg(bd, ctx->req);
+
+ /* Limit timeout to an upper limit */
+ timeout = min(ctx->timeout,
+ bd->hwrm_cmd_max_timeout ?: BNGE_HWRM_CMD_MAX_TIMEOUT);
+ /* convert timeout to usec */
+ timeout *= 1000;
+
+ i = 0;
+ /* Short timeout for the first few iterations:
+ * number of loops = number of loops for short timeout +
+ * number of loops for standard timeout.
+ */
+ tmo_count = BNGE_HWRM_SHORT_TIMEOUT_COUNTER;
+ timeout = timeout - BNGE_HWRM_SHORT_MIN_TIMEOUT *
+ BNGE_HWRM_SHORT_TIMEOUT_COUNTER;
+ tmo_count += DIV_ROUND_UP(timeout, BNGE_HWRM_MIN_TIMEOUT);
+
+ if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) {
+ /* Wait until hwrm response cmpl interrupt is processed */
+ while (READ_ONCE(token->state) < BNGE_HWRM_COMPLETE &&
+ i++ < tmo_count) {
+ /* on first few passes, just barely sleep */
+ if (i < BNGE_HWRM_SHORT_TIMEOUT_COUNTER) {
+ usleep_range(BNGE_HWRM_SHORT_MIN_TIMEOUT,
+ BNGE_HWRM_SHORT_MAX_TIMEOUT);
+ } else {
+ usleep_range(BNGE_HWRM_MIN_TIMEOUT,
+ BNGE_HWRM_MAX_TIMEOUT);
+ }
+ }
+
+ if (READ_ONCE(token->state) != BNGE_HWRM_COMPLETE) {
+ bnge_hwrm_err(bd, ctx, "No hwrm cmpl received: 0x%x\n",
+ req_type);
+ goto exit;
+ }
+ len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
+ valid = ((u8 *)ctx->resp) + len - 1;
+ } else {
+ __le16 seen_out_of_seq = ctx->req->seq_id; /* will never see */
+ int j;
+
+ /* Check if response len is updated */
+ for (i = 0; i < tmo_count; i++) {
+ if (token &&
+ READ_ONCE(token->state) == BNGE_HWRM_DEFERRED) {
+ bnge_hwrm_destroy_token(bd, token);
+ token = NULL;
+ }
+
+ len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
+ if (len) {
+ __le16 resp_seq = READ_ONCE(ctx->resp->seq_id);
+
+ if (resp_seq == ctx->req->seq_id)
+ break;
+ if (resp_seq != seen_out_of_seq) {
+ dev_warn(bd->dev, "Discarding out of seq response: 0x%x for msg {0x%x 0x%x}\n",
+ le16_to_cpu(resp_seq), req_type, le16_to_cpu(ctx->req->seq_id));
+ seen_out_of_seq = resp_seq;
+ }
+ }
+
+ /* on first few passes, just barely sleep */
+ if (i < BNGE_HWRM_SHORT_TIMEOUT_COUNTER) {
+ usleep_range(BNGE_HWRM_SHORT_MIN_TIMEOUT,
+ BNGE_HWRM_SHORT_MAX_TIMEOUT);
+ } else {
+ usleep_range(BNGE_HWRM_MIN_TIMEOUT,
+ BNGE_HWRM_MAX_TIMEOUT);
+ }
+ }
+
+ if (i >= tmo_count) {
+ bnge_hwrm_err(bd, ctx,
+ "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n",
+ bnge_hwrm_timeout(i), req_type,
+ le16_to_cpu(ctx->req->seq_id), len);
+ goto exit;
+ }
+
+ /* Last byte of resp contains valid bit */
+ valid = ((u8 *)ctx->resp) + len - 1;
+ for (j = 0; j < BNGE_HWRM_FIN_WAIT_USEC; ) {
+ /* make sure we read from updated DMA memory */
+ dma_rmb();
+ if (*valid)
+ break;
+ if (j < 10) {
+ udelay(1);
+ j++;
+ } else {
+ usleep_range(20, 30);
+ j += 20;
+ }
+ }
+
+ if (j >= BNGE_HWRM_FIN_WAIT_USEC) {
+ bnge_hwrm_err(bd, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
+ bnge_hwrm_timeout(i) + j, req_type,
+ le16_to_cpu(ctx->req->seq_id), len, *valid);
+ goto exit;
+ }
+ }
+
+ /* Zero valid bit for compatibility. Valid bit in an older spec
+ * may become a new field in a newer spec. We must make sure that
+ * a new field not implemented by old spec will read zero.
+ */
+ *valid = 0;
+ rc = le16_to_cpu(ctx->resp->error_code);
+ if (rc == HWRM_ERR_CODE_BUSY && !(ctx->flags & BNGE_HWRM_CTX_SILENT))
+ dev_warn(bd->dev, "FW returned busy, hwrm req_type 0x%x\n",
+ req_type);
+ else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE)
+ bnge_hwrm_err(bd, ctx, "hwrm req_type 0x%x seq id 0x%x error %d\n",
+ req_type, le16_to_cpu(ctx->req->seq_id), rc);
+ rc = bnge_map_hwrm_error(rc);
+
+exit:
+ if (token)
+ bnge_hwrm_destroy_token(bd, token);
+ if (ctx->flags & BNGE_HWRM_INTERNAL_CTX_OWNED)
+ ctx->flags |= BNGE_HWRM_INTERNAL_RESP_DIRTY;
+ else
+ __hwrm_ctx_invalidate(bd, ctx);
+ return rc;
+}
+
+int bnge_hwrm_req_send(struct bnge_dev *bd, void *req)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+
+ if (!ctx)
+ return -EINVAL;
+
+ return __hwrm_send_ctx(bd, ctx);
+}
+
+int bnge_hwrm_req_send_silent(struct bnge_dev *bd, void *req)
+{
+ bnge_hwrm_req_flags(bd, req, BNGE_HWRM_CTX_SILENT);
+ return bnge_hwrm_req_send(bd, req);
+}
+
+void *
+bnge_hwrm_req_dma_slice(struct bnge_dev *bd, void *req, u32 size,
+ dma_addr_t *dma_handle)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+ u8 *end = ((u8 *)req) + BNGE_HWRM_DMA_SIZE;
+ struct input *input = req;
+ u8 *addr, *req_addr = req;
+ u32 max_offset, offset;
+
+ if (!ctx)
+ return NULL;
+
+ max_offset = BNGE_HWRM_DMA_SIZE - ctx->allocated;
+ offset = max_offset - size;
+ offset = ALIGN_DOWN(offset, BNGE_HWRM_DMA_ALIGN);
+ addr = req_addr + offset;
+
+ if (addr < req_addr + max_offset && req_addr + ctx->req_len <= addr) {
+ ctx->allocated = end - addr;
+ *dma_handle = ctx->dma_handle + offset;
+ return addr;
+ }
+
+ if (ctx->slice_addr) {
+ dev_err(bd->dev, "HWRM refusing to reallocate DMA slice, req_type = %u\n",
+ (u32)le16_to_cpu(input->req_type));
+ dump_stack();
+ return NULL;
+ }
+
+ addr = dma_alloc_coherent(bd->dev, size, dma_handle, ctx->gfp);
+ if (!addr)
+ return NULL;
+
+ ctx->slice_addr = addr;
+ ctx->slice_size = size;
+ ctx->slice_handle = *dma_handle;
+
+ return addr;
+}
+
+void bnge_cleanup_hwrm_resources(struct bnge_dev *bd)
+{
+ struct bnge_hwrm_wait_token *token;
+
+ dma_pool_destroy(bd->hwrm_dma_pool);
+ bd->hwrm_dma_pool = NULL;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(token, &bd->hwrm_pending_list, node)
+ WRITE_ONCE(token->state, BNGE_HWRM_CANCELLED);
+ rcu_read_unlock();
+}
+
+int bnge_init_hwrm_resources(struct bnge_dev *bd)
+{
+ bd->hwrm_dma_pool = dma_pool_create("bnge_hwrm", bd->dev,
+ BNGE_HWRM_DMA_SIZE,
+ BNGE_HWRM_DMA_ALIGN, 0);
+ if (!bd->hwrm_dma_pool)
+ return -ENOMEM;
+
+ INIT_HLIST_HEAD(&bd->hwrm_pending_list);
+ mutex_init(&bd->hwrm_cmd_lock);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h
new file mode 100644
index 000000000000..6df629761d95
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_HWRM_H_
+#define _BNGE_HWRM_H_
+
+#include <linux/bnxt/hsi.h>
+
+enum bnge_hwrm_ctx_flags {
+ BNGE_HWRM_INTERNAL_CTX_OWNED = BIT(0),
+ BNGE_HWRM_INTERNAL_RESP_DIRTY = BIT(1),
+ BNGE_HWRM_CTX_SILENT = BIT(2),
+ BNGE_HWRM_FULL_WAIT = BIT(3),
+};
+
+#define BNGE_HWRM_API_FLAGS (BNGE_HWRM_CTX_SILENT | BNGE_HWRM_FULL_WAIT)
+
+struct bnge_hwrm_ctx {
+ u64 sentinel;
+ dma_addr_t dma_handle;
+ struct output *resp;
+ struct input *req;
+ dma_addr_t slice_handle;
+ void *slice_addr;
+ u32 slice_size;
+ u32 req_len;
+ enum bnge_hwrm_ctx_flags flags;
+ unsigned int timeout;
+ u32 allocated;
+ gfp_t gfp;
+};
+
+enum bnge_hwrm_wait_state {
+ BNGE_HWRM_PENDING,
+ BNGE_HWRM_DEFERRED,
+ BNGE_HWRM_COMPLETE,
+ BNGE_HWRM_CANCELLED,
+};
+
+enum bnge_hwrm_chnl { BNGE_HWRM_CHNL_CHIMP, BNGE_HWRM_CHNL_KONG };
+
+struct bnge_hwrm_wait_token {
+ struct rcu_head rcu;
+ struct hlist_node node;
+ enum bnge_hwrm_wait_state state;
+ enum bnge_hwrm_chnl dst;
+ u16 seq_id;
+};
+
+#define BNGE_DFLT_HWRM_CMD_TIMEOUT 500
+
+#define BNGE_GRCPF_REG_CHIMP_COMM 0x0
+#define BNGE_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
+
+#define BNGE_HWRM_MAX_REQ_LEN (bd->hwrm_max_req_len)
+#define BNGE_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
+#define BNGE_HWRM_CMD_MAX_TIMEOUT 40000U
+#define BNGE_SHORT_HWRM_CMD_TIMEOUT 20
+#define BNGE_HWRM_CMD_TIMEOUT (bd->hwrm_cmd_timeout)
+#define BNGE_HWRM_RESET_TIMEOUT ((BNGE_HWRM_CMD_TIMEOUT) * 4)
+#define BNGE_HWRM_TARGET 0xffff
+#define BNGE_HWRM_NO_CMPL_RING -1
+#define BNGE_HWRM_REQ_MAX_SIZE 128
+#define BNGE_HWRM_DMA_SIZE (2 * PAGE_SIZE) /* space for req+resp */
+#define BNGE_HWRM_RESP_RESERVED PAGE_SIZE
+#define BNGE_HWRM_RESP_OFFSET (BNGE_HWRM_DMA_SIZE - \
+ BNGE_HWRM_RESP_RESERVED)
+#define BNGE_HWRM_CTX_OFFSET (BNGE_HWRM_RESP_OFFSET - \
+ sizeof(struct bnge_hwrm_ctx))
+#define BNGE_HWRM_DMA_ALIGN 16
+#define BNGE_HWRM_SENTINEL 0xb6e1f68a12e9a7eb /* arbitrary value */
+#define BNGE_HWRM_SHORT_MIN_TIMEOUT 3
+#define BNGE_HWRM_SHORT_MAX_TIMEOUT 10
+#define BNGE_HWRM_SHORT_TIMEOUT_COUNTER 5
+
+#define BNGE_HWRM_MIN_TIMEOUT 25
+#define BNGE_HWRM_MAX_TIMEOUT 40
+
+static inline unsigned int bnge_hwrm_timeout(unsigned int n)
+{
+ return n <= BNGE_HWRM_SHORT_TIMEOUT_COUNTER ?
+ n * BNGE_HWRM_SHORT_MIN_TIMEOUT :
+ BNGE_HWRM_SHORT_TIMEOUT_COUNTER *
+ BNGE_HWRM_SHORT_MIN_TIMEOUT +
+ (n - BNGE_HWRM_SHORT_TIMEOUT_COUNTER) *
+ BNGE_HWRM_MIN_TIMEOUT;
+}
+
+#define BNGE_HWRM_FIN_WAIT_USEC 50000
+
+void bnge_cleanup_hwrm_resources(struct bnge_dev *bd);
+int bnge_init_hwrm_resources(struct bnge_dev *bd);
+
+int bnge_hwrm_req_create(struct bnge_dev *bd, void **req, u16 req_type,
+ u32 req_len);
+#define bnge_hwrm_req_init(bd, req, req_type) \
+ bnge_hwrm_req_create((bd), (void **)&(req), (req_type), \
+ sizeof(*(req)))
+void *bnge_hwrm_req_hold(struct bnge_dev *bd, void *req);
+void bnge_hwrm_req_drop(struct bnge_dev *bd, void *req);
+void bnge_hwrm_req_flags(struct bnge_dev *bd, void *req,
+ enum bnge_hwrm_ctx_flags flags);
+void bnge_hwrm_req_timeout(struct bnge_dev *bd, void *req,
+ unsigned int timeout);
+int bnge_hwrm_req_send(struct bnge_dev *bd, void *req);
+int bnge_hwrm_req_send_silent(struct bnge_dev *bd, void *req);
+void bnge_hwrm_req_alloc_flags(struct bnge_dev *bd, void *req, gfp_t flags);
+void *bnge_hwrm_req_dma_slice(struct bnge_dev *bd, void *req, u32 size,
+ dma_addr_t *dma);
+int bnge_hwrm_req_replace(struct bnge_dev *bd, void *req, void *new_req,
+ u32 len);
+#endif /* _BNGE_HWRM_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c
new file mode 100644
index 000000000000..198f49b40dbf
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c
@@ -0,0 +1,1185 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+#include <linux/bnxt/hsi.h>
+#include <linux/if_vlan.h>
+#include <net/netdev_queues.h>
+
+#include "bnge.h"
+#include "bnge_hwrm.h"
+#include "bnge_hwrm_lib.h"
+#include "bnge_rmem.h"
+#include "bnge_resc.h"
+
+int bnge_hwrm_ver_get(struct bnge_dev *bd)
+{
+ u32 dev_caps_cfg, hwrm_ver, hwrm_spec_code;
+ u16 fw_maj, fw_min, fw_bld, fw_rsv;
+ struct hwrm_ver_get_output *resp;
+ struct hwrm_ver_get_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VER_GET);
+ if (rc)
+ return rc;
+
+ bnge_hwrm_req_flags(bd, req, BNGE_HWRM_FULL_WAIT);
+ bd->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
+ req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
+ req->hwrm_intf_min = HWRM_VERSION_MINOR;
+ req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto hwrm_ver_get_exit;
+
+ memcpy(&bd->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
+
+ hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
+ resp->hwrm_intf_min_8b << 8 |
+ resp->hwrm_intf_upd_8b;
+ hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
+ HWRM_VERSION_UPDATE;
+
+ if (hwrm_spec_code > hwrm_ver)
+ snprintf(bd->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
+ HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
+ HWRM_VERSION_UPDATE);
+ else
+ snprintf(bd->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
+ resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
+ resp->hwrm_intf_upd_8b);
+
+ fw_maj = le16_to_cpu(resp->hwrm_fw_major);
+ fw_min = le16_to_cpu(resp->hwrm_fw_minor);
+ fw_bld = le16_to_cpu(resp->hwrm_fw_build);
+ fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
+
+ bd->fw_ver_code = BNGE_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
+ snprintf(bd->fw_ver_str, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ fw_maj, fw_min, fw_bld, fw_rsv);
+
+ if (strlen(resp->active_pkg_name)) {
+ int fw_ver_len = strlen(bd->fw_ver_str);
+
+ snprintf(bd->fw_ver_str + fw_ver_len,
+ FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
+ resp->active_pkg_name);
+ bd->fw_cap |= BNGE_FW_CAP_PKG_VER;
+ }
+
+ bd->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
+ if (!bd->hwrm_cmd_timeout)
+ bd->hwrm_cmd_timeout = BNGE_DFLT_HWRM_CMD_TIMEOUT;
+ bd->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
+ if (!bd->hwrm_cmd_max_timeout)
+ bd->hwrm_cmd_max_timeout = BNGE_HWRM_CMD_MAX_TIMEOUT;
+ else if (bd->hwrm_cmd_max_timeout > BNGE_HWRM_CMD_MAX_TIMEOUT)
+ dev_warn(bd->dev, "Default HWRM commands max timeout increased to %d seconds\n",
+ bd->hwrm_cmd_max_timeout / 1000);
+
+ bd->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
+ bd->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
+
+ if (bd->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
+ bd->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
+
+ bd->chip_num = le16_to_cpu(resp->chip_num);
+ bd->chip_rev = resp->chip_rev;
+
+ dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
+ if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
+ (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
+ bd->fw_cap |= BNGE_FW_CAP_SHORT_CMD;
+
+ if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
+ bd->fw_cap |= BNGE_FW_CAP_KONG_MB_CHNL;
+
+ if (dev_caps_cfg &
+ VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
+ bd->fw_cap |= BNGE_FW_CAP_CFA_ADV_FLOW;
+
+hwrm_ver_get_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int
+bnge_hwrm_nvm_dev_info(struct bnge_dev *bd,
+ struct hwrm_nvm_get_dev_info_output *nvm_info)
+{
+ struct hwrm_nvm_get_dev_info_output *resp;
+ struct hwrm_nvm_get_dev_info_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_NVM_GET_DEV_INFO);
+ if (rc)
+ return rc;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc)
+ memcpy(nvm_info, resp, sizeof(*resp));
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_func_reset(struct bnge_dev *bd)
+{
+ struct hwrm_func_reset_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_RESET);
+ if (rc)
+ return rc;
+
+ req->enables = 0;
+ bnge_hwrm_req_timeout(bd, req, BNGE_HWRM_RESET_TIMEOUT);
+ return bnge_hwrm_req_send(bd, req);
+}
+
+int bnge_hwrm_fw_set_time(struct bnge_dev *bd)
+{
+ struct hwrm_fw_set_time_input *req;
+ struct tm tm;
+ int rc;
+
+ time64_to_tm(ktime_get_real_seconds(), 0, &tm);
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FW_SET_TIME);
+ if (rc)
+ return rc;
+
+ req->year = cpu_to_le16(1900 + tm.tm_year);
+ req->month = 1 + tm.tm_mon;
+ req->day = tm.tm_mday;
+ req->hour = tm.tm_hour;
+ req->minute = tm.tm_min;
+ req->second = tm.tm_sec;
+ return bnge_hwrm_req_send(bd, req);
+}
+
+int bnge_hwrm_func_drv_rgtr(struct bnge_dev *bd)
+{
+ struct hwrm_func_drv_rgtr_output *resp;
+ struct hwrm_func_drv_rgtr_input *req;
+ u32 flags;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_DRV_RGTR);
+ if (rc)
+ return rc;
+
+ req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
+ FUNC_DRV_RGTR_REQ_ENABLES_VER |
+ FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
+
+ req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
+ flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
+
+ req->flags = cpu_to_le32(flags);
+ req->ver_maj_8b = DRV_VER_MAJ;
+ req->ver_min_8b = DRV_VER_MIN;
+ req->ver_upd_8b = DRV_VER_UPD;
+ req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
+ req->ver_min = cpu_to_le16(DRV_VER_MIN);
+ req->ver_upd = cpu_to_le16(DRV_VER_UPD);
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc) {
+ set_bit(BNGE_STATE_DRV_REGISTERED, &bd->state);
+ if (resp->flags &
+ cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
+ bd->fw_cap |= BNGE_FW_CAP_IF_CHANGE;
+ }
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_func_drv_unrgtr(struct bnge_dev *bd)
+{
+ struct hwrm_func_drv_unrgtr_input *req;
+ int rc;
+
+ if (!test_and_clear_bit(BNGE_STATE_DRV_REGISTERED, &bd->state))
+ return 0;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_DRV_UNRGTR);
+ if (rc)
+ return rc;
+ return bnge_hwrm_req_send(bd, req);
+}
+
+static void bnge_init_ctx_initializer(struct bnge_ctx_mem_type *ctxm,
+ u8 init_val, u8 init_offset,
+ bool init_mask_set)
+{
+ ctxm->init_value = init_val;
+ ctxm->init_offset = BNGE_CTX_INIT_INVALID_OFFSET;
+ if (init_mask_set)
+ ctxm->init_offset = init_offset * 4;
+ else
+ ctxm->init_value = 0;
+}
+
+static int bnge_alloc_all_ctx_pg_info(struct bnge_dev *bd, int ctx_max)
+{
+ struct bnge_ctx_mem_info *ctx = bd->ctx;
+ u16 type;
+
+ for (type = 0; type < ctx_max; type++) {
+ struct bnge_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ int n = 1;
+
+ if (!ctxm->max_entries)
+ continue;
+
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL);
+ if (!ctxm->pg_info)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+#define BNGE_CTX_INIT_VALID(flags) \
+ (!!((flags) & \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
+
+int bnge_hwrm_func_backing_store_qcaps(struct bnge_dev *bd)
+{
+ struct hwrm_func_backing_store_qcaps_v2_output *resp;
+ struct hwrm_func_backing_store_qcaps_v2_input *req;
+ struct bnge_ctx_mem_info *ctx;
+ u16 type;
+ int rc;
+
+ if (bd->ctx)
+ return 0;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
+ if (rc)
+ return rc;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ bd->ctx = ctx;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+
+ for (type = 0; type < BNGE_CTX_V2_MAX; ) {
+ struct bnge_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ u8 init_val, init_off, i;
+ __le32 *p;
+ u32 flags;
+
+ req->type = cpu_to_le16(type);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto ctx_done;
+ flags = le32_to_cpu(resp->flags);
+ type = le16_to_cpu(resp->next_valid_type);
+ if (!(flags &
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID))
+ continue;
+
+ ctxm->type = le16_to_cpu(resp->type);
+ ctxm->entry_size = le16_to_cpu(resp->entry_size);
+ ctxm->flags = flags;
+ ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
+ ctxm->entry_multiple = resp->entry_multiple;
+ ctxm->max_entries = le32_to_cpu(resp->max_num_entries);
+ ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
+ init_val = resp->ctx_init_value;
+ init_off = resp->ctx_init_offset;
+ bnge_init_ctx_initializer(ctxm, init_val, init_off,
+ BNGE_CTX_INIT_VALID(flags));
+ ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
+ BNGE_MAX_SPLIT_ENTRY);
+ for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
+ i++, p++)
+ ctxm->split[i] = le32_to_cpu(*p);
+ }
+ rc = bnge_alloc_all_ctx_pg_info(bd, BNGE_CTX_V2_MAX);
+
+ctx_done:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+static void bnge_hwrm_set_pg_attr(struct bnge_ring_mem_info *rmem, u8 *pg_attr,
+ __le64 *pg_dir)
+{
+ if (!rmem->nr_pages)
+ return;
+
+ BNGE_SET_CTX_PAGE_ATTR(*pg_attr);
+ if (rmem->depth >= 1) {
+ if (rmem->depth == 2)
+ *pg_attr |= 2;
+ else
+ *pg_attr |= 1;
+ *pg_dir = cpu_to_le64(rmem->dma_pg_tbl);
+ } else {
+ *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
+ }
+}
+
+int bnge_hwrm_func_backing_store(struct bnge_dev *bd,
+ struct bnge_ctx_mem_type *ctxm,
+ bool last)
+{
+ struct hwrm_func_backing_store_cfg_v2_input *req;
+ u32 instance_bmap = ctxm->instance_bmap;
+ int i, j, rc = 0, n = 1;
+ __le32 *p;
+
+ if (!(ctxm->flags & BNGE_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
+ return 0;
+
+ if (instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ else
+ instance_bmap = 1;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
+ if (rc)
+ return rc;
+ bnge_hwrm_req_hold(bd, req);
+ req->type = cpu_to_le16(ctxm->type);
+ req->entry_size = cpu_to_le16(ctxm->entry_size);
+ req->subtype_valid_cnt = ctxm->split_entry_cnt;
+ for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
+ p[i] = cpu_to_le32(ctxm->split[i]);
+ for (i = 0, j = 0; j < n && !rc; i++) {
+ struct bnge_ctx_pg_info *ctx_pg;
+
+ if (!(instance_bmap & (1 << i)))
+ continue;
+ req->instance = cpu_to_le16(i);
+ ctx_pg = &ctxm->pg_info[j++];
+ if (!ctx_pg->entries)
+ continue;
+ req->num_entries = cpu_to_le32(ctx_pg->entries);
+ bnge_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req->page_size_pbl_level,
+ &req->page_dir);
+ if (last && j == n)
+ req->flags =
+ cpu_to_le32(BNGE_BS_CFG_ALL_DONE);
+ rc = bnge_hwrm_req_send(bd, req);
+ }
+ bnge_hwrm_req_drop(bd, req);
+
+ return rc;
+}
+
+static int bnge_hwrm_get_rings(struct bnge_dev *bd)
+{
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+ struct hwrm_func_qcfg_output *resp;
+ struct hwrm_func_qcfg_input *req;
+ u16 cp, stats;
+ u16 rx, tx;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCFG);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc) {
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+ }
+
+ hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
+ hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
+ hw_resc->resv_hw_ring_grps =
+ le32_to_cpu(resp->alloc_hw_ring_grps);
+ hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
+ hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
+ cp = le16_to_cpu(resp->alloc_cmpl_rings);
+ stats = le16_to_cpu(resp->alloc_stat_ctx);
+ hw_resc->resv_irqs = cp;
+ rx = hw_resc->resv_rx_rings;
+ tx = hw_resc->resv_tx_rings;
+ if (bnge_is_agg_reqd(bd))
+ rx >>= 1;
+ if (cp < (rx + tx)) {
+ rc = bnge_fix_rings_count(&rx, &tx, cp, false);
+ if (rc)
+ goto get_rings_exit;
+ if (bnge_is_agg_reqd(bd))
+ rx <<= 1;
+ hw_resc->resv_rx_rings = rx;
+ hw_resc->resv_tx_rings = tx;
+ }
+ hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
+ hw_resc->resv_hw_ring_grps = rx;
+ hw_resc->resv_cp_rings = cp;
+ hw_resc->resv_stat_ctxs = stats;
+
+get_rings_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+static struct hwrm_func_cfg_input *
+__bnge_hwrm_reserve_pf_rings(struct bnge_dev *bd, struct bnge_hw_rings *hwr)
+{
+ struct hwrm_func_cfg_input *req;
+ u32 enables = 0;
+
+ if (bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCFG))
+ return NULL;
+
+ req->fid = cpu_to_le16(0xffff);
+ enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ req->num_tx_rings = cpu_to_le16(hwr->tx);
+
+ enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+ enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= hwr->nq ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
+ enables |= hwr->cmpl ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
+ enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
+ enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+
+ req->num_rx_rings = cpu_to_le16(hwr->rx);
+ req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
+ req->num_cmpl_rings = cpu_to_le16(hwr->cmpl);
+ req->num_msix = cpu_to_le16(hwr->nq);
+ req->num_stat_ctxs = cpu_to_le16(hwr->stat);
+ req->num_vnics = cpu_to_le16(hwr->vnic);
+ req->enables = cpu_to_le32(enables);
+
+ return req;
+}
+
+static int
+bnge_hwrm_reserve_pf_rings(struct bnge_dev *bd, struct bnge_hw_rings *hwr)
+{
+ struct hwrm_func_cfg_input *req;
+ int rc;
+
+ req = __bnge_hwrm_reserve_pf_rings(bd, hwr);
+ if (!req)
+ return -ENOMEM;
+
+ if (!req->enables) {
+ bnge_hwrm_req_drop(bd, req);
+ return 0;
+ }
+
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ return rc;
+
+ return bnge_hwrm_get_rings(bd);
+}
+
+int bnge_hwrm_reserve_rings(struct bnge_dev *bd, struct bnge_hw_rings *hwr)
+{
+ return bnge_hwrm_reserve_pf_rings(bd, hwr);
+}
+
+int bnge_hwrm_func_qcfg(struct bnge_dev *bd)
+{
+ struct hwrm_func_qcfg_output *resp;
+ struct hwrm_func_qcfg_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCFG);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto func_qcfg_exit;
+
+ bd->max_mtu = le16_to_cpu(resp->max_mtu_configured);
+ if (!bd->max_mtu)
+ bd->max_mtu = BNGE_MAX_MTU;
+
+ if (bd->db_size)
+ goto func_qcfg_exit;
+
+ bd->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
+ bd->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
+ 1024);
+ if (!bd->db_size || bd->db_size > pci_resource_len(bd->pdev, 2) ||
+ bd->db_size <= bd->db_offset)
+ bd->db_size = pci_resource_len(bd->pdev, 2);
+
+func_qcfg_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_func_resc_qcaps(struct bnge_dev *bd)
+{
+ struct hwrm_func_resource_qcaps_output *resp;
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+ struct hwrm_func_resource_qcaps_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_RESOURCE_QCAPS);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send_silent(bd, req);
+ if (rc)
+ goto hwrm_func_resc_qcaps_exit;
+
+ hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
+ hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
+ hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+ hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
+ hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+ hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
+ hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+ hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
+ hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+ hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
+ hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
+ hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
+ hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+ hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
+ hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
+ hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
+ hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+
+ hw_resc->max_nqs = le16_to_cpu(resp->max_msix);
+ hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
+
+hwrm_func_resc_qcaps_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_func_qcaps(struct bnge_dev *bd)
+{
+ struct hwrm_func_qcaps_output *resp;
+ struct hwrm_func_qcaps_input *req;
+ struct bnge_pf_info *pf = &bd->pf;
+ u32 flags;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCAPS);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto hwrm_func_qcaps_exit;
+
+ flags = le32_to_cpu(resp->flags);
+ if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
+ bd->flags |= BNGE_EN_ROCE_V1;
+ if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
+ bd->flags |= BNGE_EN_ROCE_V2;
+
+ pf->fw_fid = le16_to_cpu(resp->fid);
+ pf->port_id = le16_to_cpu(resp->port_id);
+ memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
+
+ bd->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
+
+hwrm_func_qcaps_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_vnic_qcaps(struct bnge_dev *bd)
+{
+ struct hwrm_vnic_qcaps_output *resp;
+ struct hwrm_vnic_qcaps_input *req;
+ int rc;
+
+ bd->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
+ bd->rss_cap &= ~BNGE_RSS_CAP_NEW_RSS_CAP;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_QCAPS);
+ if (rc)
+ return rc;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc) {
+ u32 flags = le32_to_cpu(resp->flags);
+
+ if (flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP)
+ bd->fw_cap |= BNGE_FW_CAP_VLAN_RX_STRIP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
+ bd->rss_cap |= BNGE_RSS_CAP_RSS_HASH_TYPE_DELTA;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
+ bd->rss_cap |= BNGE_RSS_CAP_RSS_TCAM;
+ bd->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
+ if (bd->max_tpa_v2)
+ bd->hw_ring_stats_size = BNGE_RING_STATS_SIZE;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
+ bd->fw_cap |= BNGE_FW_CAP_VNIC_TUNNEL_TPA;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
+ bd->rss_cap |= BNGE_RSS_CAP_AH_V4_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
+ bd->rss_cap |= BNGE_RSS_CAP_AH_V6_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
+ bd->rss_cap |= BNGE_RSS_CAP_ESP_V4_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
+ bd->rss_cap |= BNGE_RSS_CAP_ESP_V6_RSS_CAP;
+ }
+ bnge_hwrm_req_drop(bd, req);
+
+ return rc;
+}
+
+#define BNGE_CNPQ(q_profile) \
+ ((q_profile) == \
+ QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP)
+
+int bnge_hwrm_queue_qportcfg(struct bnge_dev *bd)
+{
+ struct hwrm_queue_qportcfg_output *resp;
+ struct hwrm_queue_qportcfg_input *req;
+ u8 i, j, *qptr;
+ bool no_rdma;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_QUEUE_QPORTCFG);
+ if (rc)
+ return rc;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto qportcfg_exit;
+
+ if (!resp->max_configurable_queues) {
+ rc = -EINVAL;
+ goto qportcfg_exit;
+ }
+ bd->max_tc = resp->max_configurable_queues;
+ bd->max_lltc = resp->max_configurable_lossless_queues;
+ if (bd->max_tc > BNGE_MAX_QUEUE)
+ bd->max_tc = BNGE_MAX_QUEUE;
+
+ no_rdma = !bnge_is_roce_en(bd);
+ qptr = &resp->queue_id0;
+ for (i = 0, j = 0; i < bd->max_tc; i++) {
+ bd->q_info[j].queue_id = *qptr;
+ bd->q_ids[i] = *qptr++;
+ bd->q_info[j].queue_profile = *qptr++;
+ bd->tc_to_qidx[j] = j;
+ if (!BNGE_CNPQ(bd->q_info[j].queue_profile) || no_rdma)
+ j++;
+ }
+ bd->max_q = bd->max_tc;
+ bd->max_tc = max_t(u8, j, 1);
+
+ if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
+ bd->max_tc = 1;
+
+ if (bd->max_lltc > bd->max_tc)
+ bd->max_lltc = bd->max_tc;
+
+qportcfg_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_vnic_set_hds(struct bnge_net *bn, struct bnge_vnic_info *vnic)
+{
+ u16 hds_thresh = (u16)bn->netdev->cfg_pending->hds_thresh;
+ struct hwrm_vnic_plcmodes_cfg_input *req;
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_PLCMODES_CFG);
+ if (rc)
+ return rc;
+
+ req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
+ req->enables = cpu_to_le32(BNGE_PLC_EN_JUMBO_THRES_VALID);
+ req->jumbo_thresh = cpu_to_le16(bn->rx_buf_use_size);
+
+ if (bnge_is_agg_reqd(bd)) {
+ req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
+ VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
+ req->enables |=
+ cpu_to_le32(BNGE_PLC_EN_HDS_THRES_VALID);
+ req->hds_threshold = cpu_to_le16(hds_thresh);
+ }
+ req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+ return bnge_hwrm_req_send(bd, req);
+}
+
+int bnge_hwrm_vnic_ctx_alloc(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic, u16 ctx_idx)
+{
+ struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
+ struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
+ if (rc)
+ return rc;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc)
+ vnic->fw_rss_cos_lb_ctx[ctx_idx] =
+ le16_to_cpu(resp->rss_cos_lb_ctx_id);
+ bnge_hwrm_req_drop(bd, req);
+
+ return rc;
+}
+
+static void
+__bnge_hwrm_vnic_set_rss(struct bnge_net *bn,
+ struct hwrm_vnic_rss_cfg_input *req,
+ struct bnge_vnic_info *vnic)
+{
+ struct bnge_dev *bd = bn->bd;
+
+ bnge_fill_hw_rss_tbl(bn, vnic);
+ req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
+
+ req->hash_type = cpu_to_le32(bd->rss_hash_cfg);
+ req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
+ req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
+ req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
+}
+
+int bnge_hwrm_vnic_set_rss(struct bnge_net *bn,
+ struct bnge_vnic_info *vnic, bool set_rss)
+{
+ struct hwrm_vnic_rss_cfg_input *req;
+ struct bnge_dev *bd = bn->bd;
+ dma_addr_t ring_tbl_map;
+ u32 i, nr_ctxs;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_CFG);
+ if (rc)
+ return rc;
+
+ req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+ if (!set_rss)
+ return bnge_hwrm_req_send(bd, req);
+
+ __bnge_hwrm_vnic_set_rss(bn, req, vnic);
+ ring_tbl_map = vnic->rss_table_dma_addr;
+ nr_ctxs = bnge_cal_nr_rss_ctxs(bd->rx_nr_rings);
+
+ bnge_hwrm_req_hold(bd, req);
+ for (i = 0; i < nr_ctxs; ring_tbl_map += BNGE_RSS_TABLE_SIZE, i++) {
+ req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
+ req->ring_table_pair_index = i;
+ req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto exit;
+ }
+
+exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_vnic_cfg(struct bnge_net *bn, struct bnge_vnic_info *vnic)
+{
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[0];
+ struct hwrm_vnic_cfg_input *req;
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_CFG);
+ if (rc)
+ return rc;
+
+ req->default_rx_ring_id =
+ cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
+ req->default_cmpl_ring_id =
+ cpu_to_le16(bnge_cp_ring_for_rx(rxr));
+ req->enables =
+ cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
+ VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
+ vnic->mru = bd->netdev->mtu + ETH_HLEN + VLAN_HLEN;
+ req->mru = cpu_to_le16(vnic->mru);
+
+ req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+
+ if (bd->flags & BNGE_EN_STRIP_VLAN)
+ req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
+ if (vnic->vnic_id == BNGE_VNIC_DEFAULT && bnge_aux_registered(bd))
+ req->flags |= cpu_to_le32(BNGE_VNIC_CFG_ROCE_DUAL_MODE);
+
+ return bnge_hwrm_req_send(bd, req);
+}
+
+void bnge_hwrm_update_rss_hash_cfg(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ struct hwrm_vnic_rss_qcfg_output *resp;
+ struct hwrm_vnic_rss_qcfg_input *req;
+ struct bnge_dev *bd = bn->bd;
+
+ if (bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_QCFG))
+ return;
+
+ req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+ /* all contexts configured to same hash_type, zero always exists */
+ req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
+ resp = bnge_hwrm_req_hold(bd, req);
+ if (!bnge_hwrm_req_send(bd, req))
+ bd->rss_hash_cfg =
+ le32_to_cpu(resp->hash_type) ?: bd->rss_hash_cfg;
+ bnge_hwrm_req_drop(bd, req);
+}
+
+int bnge_hwrm_l2_filter_free(struct bnge_dev *bd, struct bnge_l2_filter *fltr)
+{
+ struct hwrm_cfa_l2_filter_free_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_CFA_L2_FILTER_FREE);
+ if (rc)
+ return rc;
+
+ req->l2_filter_id = fltr->base.filter_id;
+ return bnge_hwrm_req_send(bd, req);
+}
+
+int bnge_hwrm_l2_filter_alloc(struct bnge_dev *bd, struct bnge_l2_filter *fltr)
+{
+ struct hwrm_cfa_l2_filter_alloc_output *resp;
+ struct hwrm_cfa_l2_filter_alloc_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_CFA_L2_FILTER_ALLOC);
+ if (rc)
+ return rc;
+
+ req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
+
+ req->flags |= cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
+ req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
+ req->enables =
+ cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
+ ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
+ eth_broadcast_addr(req->l2_addr_mask);
+
+ if (fltr->l2_key.vlan) {
+ req->enables |=
+ cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
+ req->num_vlans = 1;
+ req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
+ req->l2_ivlan_mask = cpu_to_le16(0xfff);
+ }
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc)
+ fltr->base.filter_id = resp->l2_filter_id;
+
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_cfa_l2_set_rx_mask(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic)
+{
+ struct hwrm_cfa_l2_set_rx_mask_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_CFA_L2_SET_RX_MASK);
+ if (rc)
+ return rc;
+
+ req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+ if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
+ req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
+ req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+ }
+ req->mask = cpu_to_le32(vnic->rx_mask);
+ return bnge_hwrm_req_send_silent(bd, req);
+}
+
+int bnge_hwrm_vnic_alloc(struct bnge_dev *bd, struct bnge_vnic_info *vnic,
+ unsigned int nr_rings)
+{
+ struct hwrm_vnic_alloc_output *resp;
+ struct hwrm_vnic_alloc_input *req;
+ unsigned int i;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_ALLOC);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < BNGE_MAX_CTX_PER_VNIC; i++)
+ vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
+ if (vnic->vnic_id == BNGE_VNIC_DEFAULT)
+ req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc)
+ vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+void bnge_hwrm_vnic_free_one(struct bnge_dev *bd, struct bnge_vnic_info *vnic)
+{
+ if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
+ struct hwrm_vnic_free_input *req;
+
+ if (bnge_hwrm_req_init(bd, req, HWRM_VNIC_FREE))
+ return;
+
+ req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+
+ bnge_hwrm_req_send(bd, req);
+ vnic->fw_vnic_id = INVALID_HW_RING_ID;
+ }
+}
+
+void bnge_hwrm_vnic_ctx_free_one(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic, u16 ctx_idx)
+{
+ struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
+
+ if (bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
+ return;
+
+ req->rss_cos_lb_ctx_id =
+ cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
+
+ bnge_hwrm_req_send(bd, req);
+ vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
+}
+
+void bnge_hwrm_stat_ctx_free(struct bnge_net *bn)
+{
+ struct hwrm_stat_ctx_free_input *req;
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ if (bnge_hwrm_req_init(bd, req, HWRM_STAT_CTX_FREE))
+ return;
+
+ bnge_hwrm_req_hold(bd, req);
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+
+ if (nqr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
+ req->stat_ctx_id = cpu_to_le32(nqr->hw_stats_ctx_id);
+ bnge_hwrm_req_send(bd, req);
+
+ nqr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
+ }
+ }
+ bnge_hwrm_req_drop(bd, req);
+}
+
+int bnge_hwrm_stat_ctx_alloc(struct bnge_net *bn)
+{
+ struct hwrm_stat_ctx_alloc_output *resp;
+ struct hwrm_stat_ctx_alloc_input *req;
+ struct bnge_dev *bd = bn->bd;
+ int rc, i;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_STAT_CTX_ALLOC);
+ if (rc)
+ return rc;
+
+ req->stats_dma_length = cpu_to_le16(bd->hw_ring_stats_size);
+ req->update_period_ms = cpu_to_le32(bn->stats_coal_ticks / 1000);
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+
+ req->stats_dma_addr = cpu_to_le64(nqr->stats.hw_stats_map);
+
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ break;
+
+ nqr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
+ bn->grp_info[i].fw_stats_ctx = nqr->hw_stats_ctx_id;
+ }
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int hwrm_ring_free_send_msg(struct bnge_net *bn,
+ struct bnge_ring_struct *ring,
+ u32 ring_type, int cmpl_ring_id)
+{
+ struct hwrm_ring_free_input *req;
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_RING_FREE);
+ if (rc)
+ goto exit;
+
+ req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
+ req->ring_type = ring_type;
+ req->ring_id = cpu_to_le16(ring->fw_ring_id);
+
+ bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ bnge_hwrm_req_drop(bd, req);
+exit:
+ if (rc) {
+ netdev_err(bd->netdev, "hwrm_ring_free type %d failed. rc:%d\n", ring_type, rc);
+ return -EIO;
+ }
+ return 0;
+}
+
+int hwrm_ring_alloc_send_msg(struct bnge_net *bn,
+ struct bnge_ring_struct *ring,
+ u32 ring_type, u32 map_index)
+{
+ struct bnge_ring_mem_info *rmem = &ring->ring_mem;
+ struct bnge_ring_grp_info *grp_info;
+ struct hwrm_ring_alloc_output *resp;
+ struct hwrm_ring_alloc_input *req;
+ struct bnge_dev *bd = bn->bd;
+ u16 ring_id, flags = 0;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_RING_ALLOC);
+ if (rc)
+ goto exit;
+
+ req->enables = 0;
+ if (rmem->nr_pages > 1) {
+ req->page_tbl_addr = cpu_to_le64(rmem->dma_pg_tbl);
+ /* Page size is in log2 units */
+ req->page_size = BNGE_PAGE_SHIFT;
+ req->page_tbl_depth = 1;
+ } else {
+ req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
+ }
+ req->fbo = 0;
+ /* Association of ring index with doorbell index and MSIX number */
+ req->logical_id = cpu_to_le16(map_index);
+
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX: {
+ struct bnge_tx_ring_info *txr;
+
+ txr = container_of(ring, struct bnge_tx_ring_info,
+ tx_ring_struct);
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
+ /* Association of transmit ring with completion ring */
+ grp_info = &bn->grp_info[ring->grp_idx];
+ req->cmpl_ring_id = cpu_to_le16(bnge_cp_ring_for_tx(txr));
+ req->length = cpu_to_le32(bn->tx_ring_mask + 1);
+ req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req->queue_id = cpu_to_le16(ring->queue_id);
+ req->flags = cpu_to_le16(flags);
+ break;
+ }
+ case HWRM_RING_ALLOC_RX:
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+ req->length = cpu_to_le32(bn->rx_ring_mask + 1);
+
+ /* Association of rx ring with stats context */
+ grp_info = &bn->grp_info[ring->grp_idx];
+ req->rx_buf_size = cpu_to_le16(bn->rx_buf_use_size);
+ req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req->enables |=
+ cpu_to_le32(RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
+ if (NET_IP_ALIGN == 2)
+ flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
+ req->flags = cpu_to_le16(flags);
+ break;
+ case HWRM_RING_ALLOC_AGG:
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
+ /* Association of agg ring with rx ring */
+ grp_info = &bn->grp_info[ring->grp_idx];
+ req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
+ req->rx_buf_size = cpu_to_le16(BNGE_RX_PAGE_SIZE);
+ req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req->enables |=
+ cpu_to_le32(RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
+ RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
+ req->length = cpu_to_le32(bn->rx_agg_ring_mask + 1);
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
+ req->length = cpu_to_le32(bn->cp_ring_mask + 1);
+ /* Association of cp ring with nq */
+ grp_info = &bn->grp_info[map_index];
+ req->nq_ring_id = cpu_to_le16(grp_info->nq_fw_ring_id);
+ req->cq_handle = cpu_to_le64(ring->handle);
+ req->enables |=
+ cpu_to_le32(RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
+ break;
+ case HWRM_RING_ALLOC_NQ:
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
+ req->length = cpu_to_le32(bn->cp_ring_mask + 1);
+ req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ break;
+ default:
+ netdev_err(bn->netdev, "hwrm alloc invalid ring type %d\n", ring_type);
+ return -EINVAL;
+ }
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ ring_id = le16_to_cpu(resp->ring_id);
+ bnge_hwrm_req_drop(bd, req);
+
+exit:
+ if (rc) {
+ netdev_err(bd->netdev, "hwrm_ring_alloc type %d failed. rc:%d\n", ring_type, rc);
+ return -EIO;
+ }
+ ring->fw_ring_id = ring_id;
+ return rc;
+}
+
+int bnge_hwrm_set_async_event_cr(struct bnge_dev *bd, int idx)
+{
+ struct hwrm_func_cfg_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_CFG);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
+ req->async_event_cr = cpu_to_le16(idx);
+ return bnge_hwrm_req_send(bd, req);
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h
new file mode 100644
index 000000000000..042f28e84a05
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_HWRM_LIB_H_
+#define _BNGE_HWRM_LIB_H_
+
+#define BNGE_PLC_EN_JUMBO_THRES_VALID \
+ VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID
+#define BNGE_PLC_EN_HDS_THRES_VALID \
+ VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID
+#define BNGE_VNIC_CFG_ROCE_DUAL_MODE \
+ VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE
+
+int bnge_hwrm_ver_get(struct bnge_dev *bd);
+int bnge_hwrm_func_reset(struct bnge_dev *bd);
+int bnge_hwrm_fw_set_time(struct bnge_dev *bd);
+int bnge_hwrm_func_drv_rgtr(struct bnge_dev *bd);
+int bnge_hwrm_func_drv_unrgtr(struct bnge_dev *bd);
+int bnge_hwrm_vnic_qcaps(struct bnge_dev *bd);
+int bnge_hwrm_nvm_dev_info(struct bnge_dev *bd,
+ struct hwrm_nvm_get_dev_info_output *nvm_dev_info);
+int bnge_hwrm_func_backing_store(struct bnge_dev *bd,
+ struct bnge_ctx_mem_type *ctxm,
+ bool last);
+int bnge_hwrm_func_backing_store_qcaps(struct bnge_dev *bd);
+int bnge_hwrm_reserve_rings(struct bnge_dev *bd,
+ struct bnge_hw_rings *hwr);
+int bnge_hwrm_func_qcaps(struct bnge_dev *bd);
+int bnge_hwrm_vnic_qcaps(struct bnge_dev *bd);
+int bnge_hwrm_func_qcfg(struct bnge_dev *bd);
+int bnge_hwrm_func_resc_qcaps(struct bnge_dev *bd);
+int bnge_hwrm_queue_qportcfg(struct bnge_dev *bd);
+
+int bnge_hwrm_vnic_set_hds(struct bnge_net *bn, struct bnge_vnic_info *vnic);
+int bnge_hwrm_vnic_ctx_alloc(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic, u16 ctx_idx);
+int bnge_hwrm_vnic_set_rss(struct bnge_net *bn,
+ struct bnge_vnic_info *vnic, bool set_rss);
+int bnge_hwrm_vnic_cfg(struct bnge_net *bn, struct bnge_vnic_info *vnic);
+void bnge_hwrm_update_rss_hash_cfg(struct bnge_net *bn);
+int bnge_hwrm_vnic_alloc(struct bnge_dev *bd, struct bnge_vnic_info *vnic,
+ unsigned int nr_rings);
+void bnge_hwrm_vnic_free_one(struct bnge_dev *bd, struct bnge_vnic_info *vnic);
+void bnge_hwrm_vnic_ctx_free_one(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic, u16 ctx_idx);
+int bnge_hwrm_l2_filter_free(struct bnge_dev *bd, struct bnge_l2_filter *fltr);
+int bnge_hwrm_l2_filter_alloc(struct bnge_dev *bd, struct bnge_l2_filter *fltr);
+int bnge_hwrm_cfa_l2_set_rx_mask(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic);
+void bnge_hwrm_stat_ctx_free(struct bnge_net *bn);
+int bnge_hwrm_stat_ctx_alloc(struct bnge_net *bn);
+int hwrm_ring_free_send_msg(struct bnge_net *bn, struct bnge_ring_struct *ring,
+ u32 ring_type, int cmpl_ring_id);
+int hwrm_ring_alloc_send_msg(struct bnge_net *bn,
+ struct bnge_ring_struct *ring,
+ u32 ring_type, u32 map_index);
+int bnge_hwrm_set_async_event_cr(struct bnge_dev *bd, int idx);
+#endif /* _BNGE_HWRM_LIB_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
new file mode 100644
index 000000000000..832eeb960bd2
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
@@ -0,0 +1,2485 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <asm/byteorder.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if.h>
+#include <net/ip.h>
+#include <linux/skbuff.h>
+#include <net/page_pool/helpers.h>
+
+#include "bnge.h"
+#include "bnge_hwrm_lib.h"
+#include "bnge_ethtool.h"
+#include "bnge_rmem.h"
+
+#define BNGE_RING_TO_TC_OFF(bd, tx) \
+ ((tx) % (bd)->tx_nr_rings_per_tc)
+
+#define BNGE_RING_TO_TC(bd, tx) \
+ ((tx) / (bd)->tx_nr_rings_per_tc)
+
+#define BNGE_TC_TO_RING_BASE(bd, tc) \
+ ((tc) * (bd)->tx_nr_rings_per_tc)
+
+static void bnge_free_stats_mem(struct bnge_net *bn,
+ struct bnge_stats_mem *stats)
+{
+ struct bnge_dev *bd = bn->bd;
+
+ if (stats->hw_stats) {
+ dma_free_coherent(bd->dev, stats->len, stats->hw_stats,
+ stats->hw_stats_map);
+ stats->hw_stats = NULL;
+ }
+}
+
+static int bnge_alloc_stats_mem(struct bnge_net *bn,
+ struct bnge_stats_mem *stats)
+{
+ struct bnge_dev *bd = bn->bd;
+
+ stats->hw_stats = dma_alloc_coherent(bd->dev, stats->len,
+ &stats->hw_stats_map, GFP_KERNEL);
+ if (!stats->hw_stats)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void bnge_free_ring_stats(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ if (!bn->bnapi)
+ return;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+
+ bnge_free_stats_mem(bn, &nqr->stats);
+ }
+}
+
+static int bnge_alloc_ring_stats(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ u32 size, i;
+ int rc;
+
+ size = bd->hw_ring_stats_size;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+
+ nqr->stats.len = size;
+ rc = bnge_alloc_stats_mem(bn, &nqr->stats);
+ if (rc)
+ goto err_free_ring_stats;
+
+ nqr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
+ }
+ return 0;
+
+err_free_ring_stats:
+ bnge_free_ring_stats(bn);
+ return rc;
+}
+
+static void bnge_free_nq_desc_arr(struct bnge_nq_ring_info *nqr)
+{
+ struct bnge_ring_struct *ring = &nqr->ring_struct;
+
+ kfree(nqr->desc_ring);
+ nqr->desc_ring = NULL;
+ ring->ring_mem.pg_arr = NULL;
+ kfree(nqr->desc_mapping);
+ nqr->desc_mapping = NULL;
+ ring->ring_mem.dma_arr = NULL;
+}
+
+static void bnge_free_cp_desc_arr(struct bnge_cp_ring_info *cpr)
+{
+ struct bnge_ring_struct *ring = &cpr->ring_struct;
+
+ kfree(cpr->desc_ring);
+ cpr->desc_ring = NULL;
+ ring->ring_mem.pg_arr = NULL;
+ kfree(cpr->desc_mapping);
+ cpr->desc_mapping = NULL;
+ ring->ring_mem.dma_arr = NULL;
+}
+
+static int bnge_alloc_nq_desc_arr(struct bnge_nq_ring_info *nqr, int n)
+{
+ nqr->desc_ring = kcalloc(n, sizeof(*nqr->desc_ring), GFP_KERNEL);
+ if (!nqr->desc_ring)
+ return -ENOMEM;
+
+ nqr->desc_mapping = kcalloc(n, sizeof(*nqr->desc_mapping), GFP_KERNEL);
+ if (!nqr->desc_mapping)
+ goto err_free_desc_ring;
+ return 0;
+
+err_free_desc_ring:
+ kfree(nqr->desc_ring);
+ nqr->desc_ring = NULL;
+ return -ENOMEM;
+}
+
+static int bnge_alloc_cp_desc_arr(struct bnge_cp_ring_info *cpr, int n)
+{
+ cpr->desc_ring = kcalloc(n, sizeof(*cpr->desc_ring), GFP_KERNEL);
+ if (!cpr->desc_ring)
+ return -ENOMEM;
+
+ cpr->desc_mapping = kcalloc(n, sizeof(*cpr->desc_mapping), GFP_KERNEL);
+ if (!cpr->desc_mapping)
+ goto err_free_desc_ring;
+ return 0;
+
+err_free_desc_ring:
+ kfree(cpr->desc_ring);
+ cpr->desc_ring = NULL;
+ return -ENOMEM;
+}
+
+static void bnge_free_nq_arrays(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+
+ bnge_free_nq_desc_arr(&bnapi->nq_ring);
+ }
+}
+
+static int bnge_alloc_nq_arrays(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, rc;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+
+ rc = bnge_alloc_nq_desc_arr(&bnapi->nq_ring, bn->cp_nr_pages);
+ if (rc)
+ goto err_free_nq_arrays;
+ }
+ return 0;
+
+err_free_nq_arrays:
+ bnge_free_nq_arrays(bn);
+ return rc;
+}
+
+static void bnge_free_nq_tree(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr;
+ struct bnge_ring_struct *ring;
+ int j;
+
+ nqr = &bnapi->nq_ring;
+ ring = &nqr->ring_struct;
+
+ bnge_free_ring(bd, &ring->ring_mem);
+
+ if (!nqr->cp_ring_arr)
+ continue;
+
+ for (j = 0; j < nqr->cp_ring_count; j++) {
+ struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[j];
+
+ ring = &cpr->ring_struct;
+ bnge_free_ring(bd, &ring->ring_mem);
+ bnge_free_cp_desc_arr(cpr);
+ }
+ kfree(nqr->cp_ring_arr);
+ nqr->cp_ring_arr = NULL;
+ nqr->cp_ring_count = 0;
+ }
+}
+
+static int alloc_one_cp_ring(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr)
+{
+ struct bnge_ring_mem_info *rmem;
+ struct bnge_ring_struct *ring;
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ rc = bnge_alloc_cp_desc_arr(cpr, bn->cp_nr_pages);
+ if (rc)
+ return -ENOMEM;
+ ring = &cpr->ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bn->cp_nr_pages;
+ rmem->page_size = HW_CMPD_RING_SIZE;
+ rmem->pg_arr = (void **)cpr->desc_ring;
+ rmem->dma_arr = cpr->desc_mapping;
+ rmem->flags = BNGE_RMEM_RING_PTE_FLAG;
+ rc = bnge_alloc_ring(bd, rmem);
+ if (rc)
+ goto err_free_cp_desc_arr;
+ return rc;
+
+err_free_cp_desc_arr:
+ bnge_free_cp_desc_arr(cpr);
+ return rc;
+}
+
+static int bnge_alloc_nq_tree(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j, ulp_msix, rc;
+ int tcs = 1;
+
+ ulp_msix = bnge_aux_get_msix(bd);
+ for (i = 0, j = 0; i < bd->nq_nr_rings; i++) {
+ bool sh = !!(bd->flags & BNGE_EN_SHARED_CHNL);
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr;
+ struct bnge_cp_ring_info *cpr;
+ struct bnge_ring_struct *ring;
+ int cp_count = 0, k;
+ int rx = 0, tx = 0;
+
+ nqr = &bnapi->nq_ring;
+ nqr->bnapi = bnapi;
+ ring = &nqr->ring_struct;
+
+ rc = bnge_alloc_ring(bd, &ring->ring_mem);
+ if (rc)
+ goto err_free_nq_tree;
+
+ ring->map_idx = ulp_msix + i;
+
+ if (i < bd->rx_nr_rings) {
+ cp_count++;
+ rx = 1;
+ }
+
+ if ((sh && i < bd->tx_nr_rings) ||
+ (!sh && i >= bd->rx_nr_rings)) {
+ cp_count += tcs;
+ tx = 1;
+ }
+
+ nqr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr),
+ GFP_KERNEL);
+ if (!nqr->cp_ring_arr) {
+ rc = -ENOMEM;
+ goto err_free_nq_tree;
+ }
+
+ nqr->cp_ring_count = cp_count;
+
+ for (k = 0; k < cp_count; k++) {
+ cpr = &nqr->cp_ring_arr[k];
+ rc = alloc_one_cp_ring(bn, cpr);
+ if (rc)
+ goto err_free_nq_tree;
+
+ cpr->bnapi = bnapi;
+ cpr->cp_idx = k;
+ if (!k && rx) {
+ bn->rx_ring[i].rx_cpr = cpr;
+ cpr->cp_ring_type = BNGE_NQ_HDL_TYPE_RX;
+ } else {
+ int n, tc = k - rx;
+
+ n = BNGE_TC_TO_RING_BASE(bd, tc) + j;
+ bn->tx_ring[n].tx_cpr = cpr;
+ cpr->cp_ring_type = BNGE_NQ_HDL_TYPE_TX;
+ }
+ }
+ if (tx)
+ j++;
+ }
+ return 0;
+
+err_free_nq_tree:
+ bnge_free_nq_tree(bn);
+ return rc;
+}
+
+static bool bnge_separate_head_pool(struct bnge_rx_ring_info *rxr)
+{
+ return rxr->need_head_pool || PAGE_SIZE > BNGE_RX_PAGE_SIZE;
+}
+
+static void bnge_free_one_rx_ring_bufs(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ int i, max_idx;
+
+ if (!rxr->rx_buf_ring)
+ return;
+
+ max_idx = bn->rx_nr_pages * RX_DESC_CNT;
+
+ for (i = 0; i < max_idx; i++) {
+ struct bnge_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
+ void *data = rx_buf->data;
+
+ if (!data)
+ continue;
+
+ rx_buf->data = NULL;
+ page_pool_free_va(rxr->head_pool, data, true);
+ }
+}
+
+static void bnge_free_one_agg_ring_bufs(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ int i, max_idx;
+
+ if (!rxr->rx_agg_buf_ring)
+ return;
+
+ max_idx = bn->rx_agg_nr_pages * RX_DESC_CNT;
+
+ for (i = 0; i < max_idx; i++) {
+ struct bnge_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_buf_ring[i];
+ netmem_ref netmem = rx_agg_buf->netmem;
+
+ if (!netmem)
+ continue;
+
+ rx_agg_buf->netmem = 0;
+ __clear_bit(i, rxr->rx_agg_bmap);
+
+ page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
+ }
+}
+
+static void bnge_free_one_rx_ring_pair_bufs(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ bnge_free_one_rx_ring_bufs(bn, rxr);
+ bnge_free_one_agg_ring_bufs(bn, rxr);
+}
+
+static void bnge_free_rx_ring_pair_bufs(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ if (!bn->rx_ring)
+ return;
+
+ for (i = 0; i < bd->rx_nr_rings; i++)
+ bnge_free_one_rx_ring_pair_bufs(bn, &bn->rx_ring[i]);
+}
+
+static void bnge_free_all_rings_bufs(struct bnge_net *bn)
+{
+ bnge_free_rx_ring_pair_bufs(bn);
+}
+
+static void bnge_free_rx_rings(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
+ struct bnge_ring_struct *ring;
+
+ page_pool_destroy(rxr->page_pool);
+ page_pool_destroy(rxr->head_pool);
+ rxr->page_pool = rxr->head_pool = NULL;
+
+ kfree(rxr->rx_agg_bmap);
+ rxr->rx_agg_bmap = NULL;
+
+ ring = &rxr->rx_ring_struct;
+ bnge_free_ring(bd, &ring->ring_mem);
+
+ ring = &rxr->rx_agg_ring_struct;
+ bnge_free_ring(bd, &ring->ring_mem);
+ }
+}
+
+static int bnge_alloc_rx_page_pool(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ int numa_node)
+{
+ const unsigned int agg_size_fac = PAGE_SIZE / BNGE_RX_PAGE_SIZE;
+ const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
+ struct page_pool_params pp = { 0 };
+ struct bnge_dev *bd = bn->bd;
+ struct page_pool *pool;
+
+ pp.pool_size = bn->rx_agg_ring_size / agg_size_fac;
+ pp.nid = numa_node;
+ pp.netdev = bn->netdev;
+ pp.dev = bd->dev;
+ pp.dma_dir = bn->rx_dir;
+ pp.max_len = PAGE_SIZE;
+ pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
+ PP_FLAG_ALLOW_UNREADABLE_NETMEM;
+ pp.queue_idx = rxr->bnapi->index;
+
+ pool = page_pool_create(&pp);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+ rxr->page_pool = pool;
+
+ rxr->need_head_pool = page_pool_is_unreadable(pool);
+ if (bnge_separate_head_pool(rxr)) {
+ pp.pool_size = min(bn->rx_ring_size / rx_size_fac, 1024);
+ pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pool = page_pool_create(&pp);
+ if (IS_ERR(pool))
+ goto err_destroy_pp;
+ } else {
+ page_pool_get(pool);
+ }
+ rxr->head_pool = pool;
+ return 0;
+
+err_destroy_pp:
+ page_pool_destroy(rxr->page_pool);
+ rxr->page_pool = NULL;
+ return PTR_ERR(pool);
+}
+
+static void bnge_enable_rx_page_pool(struct bnge_rx_ring_info *rxr)
+{
+ page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi);
+ page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi);
+}
+
+static int bnge_alloc_rx_agg_bmap(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ u16 mem_size;
+
+ rxr->rx_agg_bmap_size = bn->rx_agg_ring_mask + 1;
+ mem_size = rxr->rx_agg_bmap_size / 8;
+ rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
+ if (!rxr->rx_agg_bmap)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int bnge_alloc_rx_rings(struct bnge_net *bn)
+{
+ int i, rc = 0, agg_rings = 0, cpu;
+ struct bnge_dev *bd = bn->bd;
+
+ if (bnge_is_agg_reqd(bd))
+ agg_rings = 1;
+
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
+ struct bnge_ring_struct *ring;
+ int cpu_node;
+
+ ring = &rxr->rx_ring_struct;
+
+ cpu = cpumask_local_spread(i, dev_to_node(bd->dev));
+ cpu_node = cpu_to_node(cpu);
+ netdev_dbg(bn->netdev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
+ i, cpu_node);
+ rc = bnge_alloc_rx_page_pool(bn, rxr, cpu_node);
+ if (rc)
+ goto err_free_rx_rings;
+ bnge_enable_rx_page_pool(rxr);
+
+ rc = bnge_alloc_ring(bd, &ring->ring_mem);
+ if (rc)
+ goto err_free_rx_rings;
+
+ ring->grp_idx = i;
+ if (agg_rings) {
+ ring = &rxr->rx_agg_ring_struct;
+ rc = bnge_alloc_ring(bd, &ring->ring_mem);
+ if (rc)
+ goto err_free_rx_rings;
+
+ ring->grp_idx = i;
+ rc = bnge_alloc_rx_agg_bmap(bn, rxr);
+ if (rc)
+ goto err_free_rx_rings;
+ }
+ }
+ return rc;
+
+err_free_rx_rings:
+ bnge_free_rx_rings(bn);
+ return rc;
+}
+
+static void bnge_free_tx_rings(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->tx_nr_rings; i++) {
+ struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
+ struct bnge_ring_struct *ring;
+
+ ring = &txr->tx_ring_struct;
+
+ bnge_free_ring(bd, &ring->ring_mem);
+ }
+}
+
+static int bnge_alloc_tx_rings(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j, rc;
+
+ for (i = 0, j = 0; i < bd->tx_nr_rings; i++) {
+ struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
+ struct bnge_ring_struct *ring;
+ u8 qidx;
+
+ ring = &txr->tx_ring_struct;
+
+ rc = bnge_alloc_ring(bd, &ring->ring_mem);
+ if (rc)
+ goto err_free_tx_rings;
+
+ ring->grp_idx = txr->bnapi->index;
+ qidx = bd->tc_to_qidx[j];
+ ring->queue_id = bd->q_info[qidx].queue_id;
+ if (BNGE_RING_TO_TC_OFF(bd, i) == (bd->tx_nr_rings_per_tc - 1))
+ j++;
+ }
+ return 0;
+
+err_free_tx_rings:
+ bnge_free_tx_rings(bn);
+ return rc;
+}
+
+static void bnge_free_vnic_attributes(struct bnge_net *bn)
+{
+ struct pci_dev *pdev = bn->bd->pdev;
+ struct bnge_vnic_info *vnic;
+ int i;
+
+ if (!bn->vnic_info)
+ return;
+
+ for (i = 0; i < bn->nr_vnics; i++) {
+ vnic = &bn->vnic_info[i];
+
+ kfree(vnic->uc_list);
+ vnic->uc_list = NULL;
+
+ if (vnic->mc_list) {
+ dma_free_coherent(&pdev->dev, vnic->mc_list_size,
+ vnic->mc_list, vnic->mc_list_mapping);
+ vnic->mc_list = NULL;
+ }
+
+ if (vnic->rss_table) {
+ dma_free_coherent(&pdev->dev, vnic->rss_table_size,
+ vnic->rss_table,
+ vnic->rss_table_dma_addr);
+ vnic->rss_table = NULL;
+ }
+
+ vnic->rss_hash_key = NULL;
+ vnic->flags = 0;
+ }
+}
+
+static int bnge_alloc_vnic_attributes(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ struct bnge_vnic_info *vnic;
+ int i, size;
+
+ for (i = 0; i < bn->nr_vnics; i++) {
+ vnic = &bn->vnic_info[i];
+
+ if (vnic->flags & BNGE_VNIC_UCAST_FLAG) {
+ int mem_size = (BNGE_MAX_UC_ADDRS - 1) * ETH_ALEN;
+
+ vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
+ if (!vnic->uc_list)
+ goto err_free_vnic_attributes;
+ }
+
+ if (vnic->flags & BNGE_VNIC_MCAST_FLAG) {
+ vnic->mc_list_size = BNGE_MAX_MC_ADDRS * ETH_ALEN;
+ vnic->mc_list =
+ dma_alloc_coherent(bd->dev,
+ vnic->mc_list_size,
+ &vnic->mc_list_mapping,
+ GFP_KERNEL);
+ if (!vnic->mc_list)
+ goto err_free_vnic_attributes;
+ }
+
+ /* Allocate rss table and hash key */
+ size = L1_CACHE_ALIGN(BNGE_MAX_RSS_TABLE_SIZE);
+
+ vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
+ vnic->rss_table = dma_alloc_coherent(bd->dev,
+ vnic->rss_table_size,
+ &vnic->rss_table_dma_addr,
+ GFP_KERNEL);
+ if (!vnic->rss_table)
+ goto err_free_vnic_attributes;
+
+ vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
+ vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
+ }
+ return 0;
+
+err_free_vnic_attributes:
+ bnge_free_vnic_attributes(bn);
+ return -ENOMEM;
+}
+
+static int bnge_alloc_vnics(struct bnge_net *bn)
+{
+ int num_vnics;
+
+ /* Allocate only 1 VNIC for now
+ * Additional VNICs will be added based on RFS/NTUPLE in future patches
+ */
+ num_vnics = 1;
+
+ bn->vnic_info = kcalloc(num_vnics, sizeof(struct bnge_vnic_info),
+ GFP_KERNEL);
+ if (!bn->vnic_info)
+ return -ENOMEM;
+
+ bn->nr_vnics = num_vnics;
+
+ return 0;
+}
+
+static void bnge_free_vnics(struct bnge_net *bn)
+{
+ kfree(bn->vnic_info);
+ bn->vnic_info = NULL;
+ bn->nr_vnics = 0;
+}
+
+static void bnge_free_ring_grps(struct bnge_net *bn)
+{
+ kfree(bn->grp_info);
+ bn->grp_info = NULL;
+}
+
+static int bnge_init_ring_grps(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ bn->grp_info = kcalloc(bd->nq_nr_rings,
+ sizeof(struct bnge_ring_grp_info),
+ GFP_KERNEL);
+ if (!bn->grp_info)
+ return -ENOMEM;
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ bn->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
+ bn->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
+ bn->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
+ bn->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
+ bn->grp_info[i].nq_fw_ring_id = INVALID_HW_RING_ID;
+ }
+
+ return 0;
+}
+
+static void bnge_free_core(struct bnge_net *bn)
+{
+ bnge_free_vnic_attributes(bn);
+ bnge_free_tx_rings(bn);
+ bnge_free_rx_rings(bn);
+ bnge_free_nq_tree(bn);
+ bnge_free_nq_arrays(bn);
+ bnge_free_ring_stats(bn);
+ bnge_free_ring_grps(bn);
+ bnge_free_vnics(bn);
+ kfree(bn->tx_ring_map);
+ bn->tx_ring_map = NULL;
+ kfree(bn->tx_ring);
+ bn->tx_ring = NULL;
+ kfree(bn->rx_ring);
+ bn->rx_ring = NULL;
+ kfree(bn->bnapi);
+ bn->bnapi = NULL;
+}
+
+static int bnge_alloc_core(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j, size, arr_size;
+ int rc = -ENOMEM;
+ void *bnapi;
+
+ arr_size = L1_CACHE_ALIGN(sizeof(struct bnge_napi *) *
+ bd->nq_nr_rings);
+ size = L1_CACHE_ALIGN(sizeof(struct bnge_napi));
+ bnapi = kzalloc(arr_size + size * bd->nq_nr_rings, GFP_KERNEL);
+ if (!bnapi)
+ return rc;
+
+ bn->bnapi = bnapi;
+ bnapi += arr_size;
+ for (i = 0; i < bd->nq_nr_rings; i++, bnapi += size) {
+ struct bnge_nq_ring_info *nqr;
+
+ bn->bnapi[i] = bnapi;
+ bn->bnapi[i]->index = i;
+ bn->bnapi[i]->bn = bn;
+ nqr = &bn->bnapi[i]->nq_ring;
+ nqr->ring_struct.ring_mem.flags = BNGE_RMEM_RING_PTE_FLAG;
+ }
+
+ bn->rx_ring = kcalloc(bd->rx_nr_rings,
+ sizeof(struct bnge_rx_ring_info),
+ GFP_KERNEL);
+ if (!bn->rx_ring)
+ goto err_free_core;
+
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
+
+ rxr->rx_ring_struct.ring_mem.flags =
+ BNGE_RMEM_RING_PTE_FLAG;
+ rxr->rx_agg_ring_struct.ring_mem.flags =
+ BNGE_RMEM_RING_PTE_FLAG;
+ rxr->bnapi = bn->bnapi[i];
+ bn->bnapi[i]->rx_ring = &bn->rx_ring[i];
+ }
+
+ bn->tx_ring = kcalloc(bd->tx_nr_rings,
+ sizeof(struct bnge_tx_ring_info),
+ GFP_KERNEL);
+ if (!bn->tx_ring)
+ goto err_free_core;
+
+ bn->tx_ring_map = kcalloc(bd->tx_nr_rings, sizeof(u16),
+ GFP_KERNEL);
+ if (!bn->tx_ring_map)
+ goto err_free_core;
+
+ if (bd->flags & BNGE_EN_SHARED_CHNL)
+ j = 0;
+ else
+ j = bd->rx_nr_rings;
+
+ for (i = 0; i < bd->tx_nr_rings; i++) {
+ struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
+ struct bnge_napi *bnapi2;
+ int k;
+
+ txr->tx_ring_struct.ring_mem.flags = BNGE_RMEM_RING_PTE_FLAG;
+ bn->tx_ring_map[i] = i;
+ k = j + BNGE_RING_TO_TC_OFF(bd, i);
+
+ bnapi2 = bn->bnapi[k];
+ txr->txq_index = i;
+ txr->tx_napi_idx =
+ BNGE_RING_TO_TC(bd, txr->txq_index);
+ bnapi2->tx_ring[txr->tx_napi_idx] = txr;
+ txr->bnapi = bnapi2;
+ }
+
+ rc = bnge_alloc_ring_stats(bn);
+ if (rc)
+ goto err_free_core;
+
+ rc = bnge_alloc_vnics(bn);
+ if (rc)
+ goto err_free_core;
+
+ rc = bnge_alloc_nq_arrays(bn);
+ if (rc)
+ goto err_free_core;
+
+ bnge_init_ring_struct(bn);
+
+ rc = bnge_alloc_rx_rings(bn);
+ if (rc)
+ goto err_free_core;
+
+ rc = bnge_alloc_tx_rings(bn);
+ if (rc)
+ goto err_free_core;
+
+ rc = bnge_alloc_nq_tree(bn);
+ if (rc)
+ goto err_free_core;
+
+ bn->vnic_info[BNGE_VNIC_DEFAULT].flags |= BNGE_VNIC_RSS_FLAG |
+ BNGE_VNIC_MCAST_FLAG |
+ BNGE_VNIC_UCAST_FLAG;
+ rc = bnge_alloc_vnic_attributes(bn);
+ if (rc)
+ goto err_free_core;
+ return 0;
+
+err_free_core:
+ bnge_free_core(bn);
+ return rc;
+}
+
+u16 bnge_cp_ring_for_rx(struct bnge_rx_ring_info *rxr)
+{
+ return rxr->rx_cpr->ring_struct.fw_ring_id;
+}
+
+u16 bnge_cp_ring_for_tx(struct bnge_tx_ring_info *txr)
+{
+ return txr->tx_cpr->ring_struct.fw_ring_id;
+}
+
+static void bnge_db_nq(struct bnge_net *bn, struct bnge_db_info *db, u32 idx)
+{
+ bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_NQ_MASK |
+ DB_RING_IDX(db, idx), db->doorbell);
+}
+
+static void bnge_db_cq(struct bnge_net *bn, struct bnge_db_info *db, u32 idx)
+{
+ bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_CQ_ARMALL |
+ DB_RING_IDX(db, idx), db->doorbell);
+}
+
+static int bnge_cp_num_to_irq_num(struct bnge_net *bn, int n)
+{
+ struct bnge_napi *bnapi = bn->bnapi[n];
+ struct bnge_nq_ring_info *nqr;
+
+ nqr = &bnapi->nq_ring;
+
+ return nqr->ring_struct.map_idx;
+}
+
+static irqreturn_t bnge_msix(int irq, void *dev_instance)
+{
+ /* NAPI scheduling to be added in a future patch */
+ return IRQ_HANDLED;
+}
+
+static void bnge_init_nq_tree(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_nq_ring_info *nqr = &bn->bnapi[i]->nq_ring;
+ struct bnge_ring_struct *ring = &nqr->ring_struct;
+
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ for (j = 0; j < nqr->cp_ring_count; j++) {
+ struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[j];
+
+ ring = &cpr->ring_struct;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ }
+ }
+}
+
+static netmem_ref __bnge_alloc_rx_netmem(struct bnge_net *bn,
+ dma_addr_t *mapping,
+ struct bnge_rx_ring_info *rxr,
+ unsigned int *offset,
+ gfp_t gfp)
+{
+ netmem_ref netmem;
+
+ if (PAGE_SIZE > BNGE_RX_PAGE_SIZE) {
+ netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset,
+ BNGE_RX_PAGE_SIZE, gfp);
+ } else {
+ netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
+ *offset = 0;
+ }
+ if (!netmem)
+ return 0;
+
+ *mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
+ return netmem;
+}
+
+static u8 *__bnge_alloc_rx_frag(struct bnge_net *bn, dma_addr_t *mapping,
+ struct bnge_rx_ring_info *rxr,
+ gfp_t gfp)
+{
+ unsigned int offset;
+ struct page *page;
+
+ page = page_pool_alloc_frag(rxr->head_pool, &offset,
+ bn->rx_buf_size, gfp);
+ if (!page)
+ return NULL;
+
+ *mapping = page_pool_get_dma_addr(page) + bn->rx_dma_offset + offset;
+ return page_address(page) + offset;
+}
+
+static int bnge_alloc_rx_data(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
+{
+ struct bnge_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bn, prod)];
+ struct rx_bd *rxbd;
+ dma_addr_t mapping;
+ u8 *data;
+
+ rxbd = &rxr->rx_desc_ring[RX_RING(bn, prod)][RX_IDX(prod)];
+ data = __bnge_alloc_rx_frag(bn, &mapping, rxr, gfp);
+ if (!data)
+ return -ENOMEM;
+
+ rx_buf->data = data;
+ rx_buf->data_ptr = data + bn->rx_offset;
+ rx_buf->mapping = mapping;
+
+ rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+
+ return 0;
+}
+
+static int bnge_alloc_one_rx_ring_bufs(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ int ring_nr)
+{
+ u32 prod = rxr->rx_prod;
+ int i, rc = 0;
+
+ for (i = 0; i < bn->rx_ring_size; i++) {
+ rc = bnge_alloc_rx_data(bn, rxr, prod, GFP_KERNEL);
+ if (rc)
+ break;
+ prod = NEXT_RX(prod);
+ }
+
+ /* Abort if not a single buffer can be allocated */
+ if (rc && !i) {
+ netdev_err(bn->netdev,
+ "RX ring %d: allocated %d/%d buffers, abort\n",
+ ring_nr, i, bn->rx_ring_size);
+ return rc;
+ }
+
+ rxr->rx_prod = prod;
+
+ if (i < bn->rx_ring_size)
+ netdev_warn(bn->netdev,
+ "RX ring %d: allocated %d/%d buffers, continuing\n",
+ ring_nr, i, bn->rx_ring_size);
+ return 0;
+}
+
+static u16 bnge_find_next_agg_idx(struct bnge_rx_ring_info *rxr, u16 idx)
+{
+ u16 next, max = rxr->rx_agg_bmap_size;
+
+ next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
+ if (next >= max)
+ next = find_first_zero_bit(rxr->rx_agg_bmap, max);
+ return next;
+}
+
+static int bnge_alloc_rx_netmem(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
+{
+ struct bnge_sw_rx_agg_bd *rx_agg_buf;
+ u16 sw_prod = rxr->rx_sw_agg_prod;
+ unsigned int offset = 0;
+ struct rx_bd *rxbd;
+ dma_addr_t mapping;
+ netmem_ref netmem;
+
+ rxbd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bn, prod)][RX_IDX(prod)];
+ netmem = __bnge_alloc_rx_netmem(bn, &mapping, rxr, &offset, gfp);
+ if (!netmem)
+ return -ENOMEM;
+
+ if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
+ sw_prod = bnge_find_next_agg_idx(rxr, sw_prod);
+
+ __set_bit(sw_prod, rxr->rx_agg_bmap);
+ rx_agg_buf = &rxr->rx_agg_buf_ring[sw_prod];
+ rxr->rx_sw_agg_prod = RING_RX_AGG(bn, NEXT_RX_AGG(sw_prod));
+
+ rx_agg_buf->netmem = netmem;
+ rx_agg_buf->offset = offset;
+ rx_agg_buf->mapping = mapping;
+ rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+ rxbd->rx_bd_opaque = sw_prod;
+ return 0;
+}
+
+static int bnge_alloc_one_agg_ring_bufs(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ int ring_nr)
+{
+ u32 prod = rxr->rx_agg_prod;
+ int i, rc = 0;
+
+ for (i = 0; i < bn->rx_agg_ring_size; i++) {
+ rc = bnge_alloc_rx_netmem(bn, rxr, prod, GFP_KERNEL);
+ if (rc)
+ break;
+ prod = NEXT_RX_AGG(prod);
+ }
+
+ if (rc && i < MAX_SKB_FRAGS) {
+ netdev_err(bn->netdev,
+ "Agg ring %d: allocated %d/%d buffers (min %d), abort\n",
+ ring_nr, i, bn->rx_agg_ring_size, MAX_SKB_FRAGS);
+ goto err_free_one_agg_ring_bufs;
+ }
+
+ rxr->rx_agg_prod = prod;
+
+ if (i < bn->rx_agg_ring_size)
+ netdev_warn(bn->netdev,
+ "Agg ring %d: allocated %d/%d buffers, continuing\n",
+ ring_nr, i, bn->rx_agg_ring_size);
+ return 0;
+
+err_free_one_agg_ring_bufs:
+ bnge_free_one_agg_ring_bufs(bn, rxr);
+ return -ENOMEM;
+}
+
+static int bnge_alloc_one_rx_ring_pair_bufs(struct bnge_net *bn, int ring_nr)
+{
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[ring_nr];
+ int rc;
+
+ rc = bnge_alloc_one_rx_ring_bufs(bn, rxr, ring_nr);
+ if (rc)
+ return rc;
+
+ if (bnge_is_agg_reqd(bn->bd)) {
+ rc = bnge_alloc_one_agg_ring_bufs(bn, rxr, ring_nr);
+ if (rc)
+ goto err_free_one_rx_ring_bufs;
+ }
+ return 0;
+
+err_free_one_rx_ring_bufs:
+ bnge_free_one_rx_ring_bufs(bn, rxr);
+ return rc;
+}
+
+static void bnge_init_rxbd_pages(struct bnge_ring_struct *ring, u32 type)
+{
+ struct rx_bd **rx_desc_ring;
+ u32 prod;
+ int i;
+
+ rx_desc_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
+ for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
+ struct rx_bd *rxbd = rx_desc_ring[i];
+ int j;
+
+ for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
+ rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
+ rxbd->rx_bd_opaque = prod;
+ }
+ }
+}
+
+static void bnge_init_one_rx_ring_rxbd(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ struct bnge_ring_struct *ring;
+ u32 type;
+
+ type = (bn->rx_buf_use_size << RX_BD_LEN_SHIFT) |
+ RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
+
+ if (NET_IP_ALIGN == 2)
+ type |= RX_BD_FLAGS_SOP;
+
+ ring = &rxr->rx_ring_struct;
+ bnge_init_rxbd_pages(ring, type);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnge_init_one_agg_ring_rxbd(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ struct bnge_ring_struct *ring;
+ u32 type;
+
+ ring = &rxr->rx_agg_ring_struct;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ if (bnge_is_agg_reqd(bn->bd)) {
+ type = ((u32)BNGE_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
+ RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
+
+ bnge_init_rxbd_pages(ring, type);
+ }
+}
+
+static void bnge_init_one_rx_ring_pair(struct bnge_net *bn, int ring_nr)
+{
+ struct bnge_rx_ring_info *rxr;
+
+ rxr = &bn->rx_ring[ring_nr];
+ bnge_init_one_rx_ring_rxbd(bn, rxr);
+
+ netif_queue_set_napi(bn->netdev, ring_nr, NETDEV_QUEUE_TYPE_RX,
+ &rxr->bnapi->napi);
+
+ bnge_init_one_agg_ring_rxbd(bn, rxr);
+}
+
+static int bnge_alloc_rx_ring_pair_bufs(struct bnge_net *bn)
+{
+ int i, rc;
+
+ for (i = 0; i < bn->bd->rx_nr_rings; i++) {
+ rc = bnge_alloc_one_rx_ring_pair_bufs(bn, i);
+ if (rc)
+ goto err_free_rx_ring_pair_bufs;
+ }
+ return 0;
+
+err_free_rx_ring_pair_bufs:
+ bnge_free_rx_ring_pair_bufs(bn);
+ return rc;
+}
+
+static void bnge_init_rx_rings(struct bnge_net *bn)
+{
+ int i;
+
+#define BNGE_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
+#define BNGE_RX_DMA_OFFSET NET_SKB_PAD
+ bn->rx_offset = BNGE_RX_OFFSET;
+ bn->rx_dma_offset = BNGE_RX_DMA_OFFSET;
+
+ for (i = 0; i < bn->bd->rx_nr_rings; i++)
+ bnge_init_one_rx_ring_pair(bn, i);
+}
+
+static void bnge_init_tx_rings(struct bnge_net *bn)
+{
+ int i;
+
+ bn->tx_wake_thresh = max(bn->tx_ring_size / 2, BNGE_MIN_TX_DESC_CNT);
+
+ for (i = 0; i < bn->bd->tx_nr_rings; i++) {
+ struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
+ struct bnge_ring_struct *ring = &txr->tx_ring_struct;
+
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+
+ netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_TX,
+ &txr->bnapi->napi);
+ }
+}
+
+static void bnge_init_vnics(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic0 = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ int i;
+
+ for (i = 0; i < bn->nr_vnics; i++) {
+ struct bnge_vnic_info *vnic = &bn->vnic_info[i];
+ int j;
+
+ vnic->fw_vnic_id = INVALID_HW_RING_ID;
+ vnic->vnic_id = i;
+ for (j = 0; j < BNGE_MAX_CTX_PER_VNIC; j++)
+ vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
+
+ if (bn->vnic_info[i].rss_hash_key) {
+ if (i == BNGE_VNIC_DEFAULT) {
+ u8 *key = (void *)vnic->rss_hash_key;
+ int k;
+
+ if (!bn->rss_hash_key_valid &&
+ !bn->rss_hash_key_updated) {
+ get_random_bytes(bn->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+ bn->rss_hash_key_updated = true;
+ }
+
+ memcpy(vnic->rss_hash_key, bn->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+
+ if (!bn->rss_hash_key_updated)
+ continue;
+
+ bn->rss_hash_key_updated = false;
+ bn->rss_hash_key_valid = true;
+
+ bn->toeplitz_prefix = 0;
+ for (k = 0; k < 8; k++) {
+ bn->toeplitz_prefix <<= 8;
+ bn->toeplitz_prefix |= key[k];
+ }
+ } else {
+ memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+ }
+ }
+ }
+}
+
+static void bnge_set_db_mask(struct bnge_net *bn, struct bnge_db_info *db,
+ u32 ring_type)
+{
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX:
+ db->db_ring_mask = bn->tx_ring_mask;
+ break;
+ case HWRM_RING_ALLOC_RX:
+ db->db_ring_mask = bn->rx_ring_mask;
+ break;
+ case HWRM_RING_ALLOC_AGG:
+ db->db_ring_mask = bn->rx_agg_ring_mask;
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ case HWRM_RING_ALLOC_NQ:
+ db->db_ring_mask = bn->cp_ring_mask;
+ break;
+ }
+ db->db_epoch_mask = db->db_ring_mask + 1;
+ db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
+}
+
+static void bnge_set_db(struct bnge_net *bn, struct bnge_db_info *db,
+ u32 ring_type, u32 map_idx, u32 xid)
+{
+ struct bnge_dev *bd = bn->bd;
+
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX:
+ db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
+ break;
+ case HWRM_RING_ALLOC_RX:
+ case HWRM_RING_ALLOC_AGG:
+ db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ db->db_key64 = DBR_PATH_L2;
+ break;
+ case HWRM_RING_ALLOC_NQ:
+ db->db_key64 = DBR_PATH_L2;
+ break;
+ }
+ db->db_key64 |= ((u64)xid << DBR_XID_SFT) | DBR_VALID;
+
+ db->doorbell = bd->bar1 + bd->db_offset;
+ bnge_set_db_mask(bn, db, ring_type);
+}
+
+static int bnge_hwrm_cp_ring_alloc(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr)
+{
+ const u32 type = HWRM_RING_ALLOC_CMPL;
+ struct bnge_napi *bnapi = cpr->bnapi;
+ struct bnge_ring_struct *ring;
+ u32 map_idx = bnapi->index;
+ int rc;
+
+ ring = &cpr->ring_struct;
+ ring->handle = BNGE_SET_NQ_HDL(cpr);
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
+ if (rc)
+ return rc;
+
+ bnge_set_db(bn, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
+ bnge_db_cq(bn, &cpr->cp_db, cpr->cp_raw_cons);
+
+ return 0;
+}
+
+static int bnge_hwrm_tx_ring_alloc(struct bnge_net *bn,
+ struct bnge_tx_ring_info *txr, u32 tx_idx)
+{
+ struct bnge_ring_struct *ring = &txr->tx_ring_struct;
+ const u32 type = HWRM_RING_ALLOC_TX;
+ int rc;
+
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, tx_idx);
+ if (rc)
+ return rc;
+
+ bnge_set_db(bn, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
+
+ return 0;
+}
+
+static int bnge_hwrm_rx_agg_ring_alloc(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ struct bnge_ring_struct *ring = &rxr->rx_agg_ring_struct;
+ u32 type = HWRM_RING_ALLOC_AGG;
+ struct bnge_dev *bd = bn->bd;
+ u32 grp_idx = ring->grp_idx;
+ u32 map_idx;
+ int rc;
+
+ map_idx = grp_idx + bd->rx_nr_rings;
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
+ if (rc)
+ return rc;
+
+ bnge_set_db(bn, &rxr->rx_agg_db, type, map_idx,
+ ring->fw_ring_id);
+ bnge_db_write(bn->bd, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ bnge_db_write(bn->bd, &rxr->rx_db, rxr->rx_prod);
+ bn->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
+
+ return 0;
+}
+
+static int bnge_hwrm_rx_ring_alloc(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ struct bnge_ring_struct *ring = &rxr->rx_ring_struct;
+ struct bnge_napi *bnapi = rxr->bnapi;
+ u32 type = HWRM_RING_ALLOC_RX;
+ u32 map_idx = bnapi->index;
+ int rc;
+
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
+ if (rc)
+ return rc;
+
+ bnge_set_db(bn, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
+ bn->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
+
+ return 0;
+}
+
+static int bnge_hwrm_ring_alloc(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ bool agg_rings;
+ int i, rc = 0;
+
+ agg_rings = !!(bnge_is_agg_reqd(bd));
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+ struct bnge_ring_struct *ring = &nqr->ring_struct;
+ u32 type = HWRM_RING_ALLOC_NQ;
+ u32 map_idx = ring->map_idx;
+ unsigned int vector;
+
+ vector = bd->irq_tbl[map_idx].vector;
+ disable_irq_nosync(vector);
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
+ if (rc) {
+ enable_irq(vector);
+ goto err_out;
+ }
+ bnge_set_db(bn, &nqr->nq_db, type, map_idx, ring->fw_ring_id);
+ bnge_db_nq(bn, &nqr->nq_db, nqr->nq_raw_cons);
+ enable_irq(vector);
+ bn->grp_info[i].nq_fw_ring_id = ring->fw_ring_id;
+
+ if (!i) {
+ rc = bnge_hwrm_set_async_event_cr(bd, ring->fw_ring_id);
+ if (rc)
+ netdev_warn(bn->netdev, "Failed to set async event completion ring.\n");
+ }
+ }
+
+ for (i = 0; i < bd->tx_nr_rings; i++) {
+ struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
+
+ rc = bnge_hwrm_cp_ring_alloc(bn, txr->tx_cpr);
+ if (rc)
+ goto err_out;
+ rc = bnge_hwrm_tx_ring_alloc(bn, txr, i);
+ if (rc)
+ goto err_out;
+ }
+
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
+ struct bnge_cp_ring_info *cpr;
+ struct bnge_ring_struct *ring;
+ struct bnge_napi *bnapi;
+ u32 map_idx, type;
+
+ rc = bnge_hwrm_rx_ring_alloc(bn, rxr);
+ if (rc)
+ goto err_out;
+ /* If we have agg rings, post agg buffers first. */
+ if (!agg_rings)
+ bnge_db_write(bn->bd, &rxr->rx_db, rxr->rx_prod);
+
+ cpr = rxr->rx_cpr;
+ bnapi = rxr->bnapi;
+ type = HWRM_RING_ALLOC_CMPL;
+ map_idx = bnapi->index;
+
+ ring = &cpr->ring_struct;
+ ring->handle = BNGE_SET_NQ_HDL(cpr);
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
+ if (rc)
+ goto err_out;
+ bnge_set_db(bn, &cpr->cp_db, type, map_idx,
+ ring->fw_ring_id);
+ bnge_db_cq(bn, &cpr->cp_db, cpr->cp_raw_cons);
+ }
+
+ if (agg_rings) {
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ rc = bnge_hwrm_rx_agg_ring_alloc(bn, &bn->rx_ring[i]);
+ if (rc)
+ goto err_out;
+ }
+ }
+err_out:
+ return rc;
+}
+
+void bnge_fill_hw_rss_tbl(struct bnge_net *bn, struct bnge_vnic_info *vnic)
+{
+ __le16 *ring_tbl = vnic->rss_table;
+ struct bnge_rx_ring_info *rxr;
+ struct bnge_dev *bd = bn->bd;
+ u16 tbl_size, i;
+
+ tbl_size = bnge_get_rxfh_indir_size(bd);
+
+ for (i = 0; i < tbl_size; i++) {
+ u16 ring_id, j;
+
+ j = bd->rss_indir_tbl[i];
+ rxr = &bn->rx_ring[j];
+
+ ring_id = rxr->rx_ring_struct.fw_ring_id;
+ *ring_tbl++ = cpu_to_le16(ring_id);
+ ring_id = bnge_cp_ring_for_rx(rxr);
+ *ring_tbl++ = cpu_to_le16(ring_id);
+ }
+}
+
+static int bnge_hwrm_vnic_rss_cfg(struct bnge_net *bn,
+ struct bnge_vnic_info *vnic)
+{
+ int rc;
+
+ rc = bnge_hwrm_vnic_set_rss(bn, vnic, true);
+ if (rc) {
+ netdev_err(bn->netdev, "hwrm vnic %d set rss failure rc: %d\n",
+ vnic->vnic_id, rc);
+ return rc;
+ }
+ rc = bnge_hwrm_vnic_cfg(bn, vnic);
+ if (rc)
+ netdev_err(bn->netdev, "hwrm vnic %d cfg failure rc: %d\n",
+ vnic->vnic_id, rc);
+ return rc;
+}
+
+static int bnge_setup_vnic(struct bnge_net *bn, struct bnge_vnic_info *vnic)
+{
+ struct bnge_dev *bd = bn->bd;
+ int rc, i, nr_ctxs;
+
+ nr_ctxs = bnge_cal_nr_rss_ctxs(bd->rx_nr_rings);
+ for (i = 0; i < nr_ctxs; i++) {
+ rc = bnge_hwrm_vnic_ctx_alloc(bd, vnic, i);
+ if (rc) {
+ netdev_err(bn->netdev, "hwrm vnic %d ctx %d alloc failure rc: %d\n",
+ vnic->vnic_id, i, rc);
+ return -ENOMEM;
+ }
+ bn->rsscos_nr_ctxs++;
+ }
+
+ rc = bnge_hwrm_vnic_rss_cfg(bn, vnic);
+ if (rc)
+ return rc;
+
+ if (bnge_is_agg_reqd(bd)) {
+ rc = bnge_hwrm_vnic_set_hds(bn, vnic);
+ if (rc)
+ netdev_err(bn->netdev, "hwrm vnic %d set hds failure rc: %d\n",
+ vnic->vnic_id, rc);
+ }
+ return rc;
+}
+
+static void bnge_del_l2_filter(struct bnge_net *bn, struct bnge_l2_filter *fltr)
+{
+ if (!refcount_dec_and_test(&fltr->refcnt))
+ return;
+ hlist_del_rcu(&fltr->base.hash);
+ kfree_rcu(fltr, base.rcu);
+}
+
+static void bnge_init_l2_filter(struct bnge_net *bn,
+ struct bnge_l2_filter *fltr,
+ struct bnge_l2_key *key, u32 idx)
+{
+ struct hlist_head *head;
+
+ ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
+ fltr->l2_key.vlan = key->vlan;
+ fltr->base.type = BNGE_FLTR_TYPE_L2;
+
+ head = &bn->l2_fltr_hash_tbl[idx];
+ hlist_add_head_rcu(&fltr->base.hash, head);
+ refcount_set(&fltr->refcnt, 1);
+}
+
+static struct bnge_l2_filter *__bnge_lookup_l2_filter(struct bnge_net *bn,
+ struct bnge_l2_key *key,
+ u32 idx)
+{
+ struct bnge_l2_filter *fltr;
+ struct hlist_head *head;
+
+ head = &bn->l2_fltr_hash_tbl[idx];
+ hlist_for_each_entry_rcu(fltr, head, base.hash) {
+ struct bnge_l2_key *l2_key = &fltr->l2_key;
+
+ if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
+ l2_key->vlan == key->vlan)
+ return fltr;
+ }
+ return NULL;
+}
+
+static struct bnge_l2_filter *bnge_lookup_l2_filter(struct bnge_net *bn,
+ struct bnge_l2_key *key,
+ u32 idx)
+{
+ struct bnge_l2_filter *fltr;
+
+ rcu_read_lock();
+ fltr = __bnge_lookup_l2_filter(bn, key, idx);
+ if (fltr)
+ refcount_inc(&fltr->refcnt);
+ rcu_read_unlock();
+ return fltr;
+}
+
+static struct bnge_l2_filter *bnge_alloc_l2_filter(struct bnge_net *bn,
+ struct bnge_l2_key *key,
+ gfp_t gfp)
+{
+ struct bnge_l2_filter *fltr;
+ u32 idx;
+
+ idx = jhash2(&key->filter_key, BNGE_L2_KEY_SIZE, bn->hash_seed) &
+ BNGE_L2_FLTR_HASH_MASK;
+ fltr = bnge_lookup_l2_filter(bn, key, idx);
+ if (fltr)
+ return fltr;
+
+ fltr = kzalloc(sizeof(*fltr), gfp);
+ if (!fltr)
+ return ERR_PTR(-ENOMEM);
+
+ bnge_init_l2_filter(bn, fltr, key, idx);
+ return fltr;
+}
+
+static int bnge_hwrm_set_vnic_filter(struct bnge_net *bn, u16 vnic_id, u16 idx,
+ const u8 *mac_addr)
+{
+ struct bnge_l2_filter *fltr;
+ struct bnge_l2_key key;
+ int rc;
+
+ ether_addr_copy(key.dst_mac_addr, mac_addr);
+ key.vlan = 0;
+ fltr = bnge_alloc_l2_filter(bn, &key, GFP_KERNEL);
+ if (IS_ERR(fltr))
+ return PTR_ERR(fltr);
+
+ fltr->base.fw_vnic_id = bn->vnic_info[vnic_id].fw_vnic_id;
+ rc = bnge_hwrm_l2_filter_alloc(bn->bd, fltr);
+ if (rc)
+ goto err_del_l2_filter;
+ bn->vnic_info[vnic_id].l2_filters[idx] = fltr;
+ return rc;
+
+err_del_l2_filter:
+ bnge_del_l2_filter(bn, fltr);
+ return rc;
+}
+
+static bool bnge_mc_list_updated(struct bnge_net *bn, u32 *rx_mask)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ struct net_device *dev = bn->netdev;
+ struct netdev_hw_addr *ha;
+ int mc_count = 0, off = 0;
+ bool update = false;
+ u8 *haddr;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ if (mc_count >= BNGE_MAX_MC_ADDRS) {
+ *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+ vnic->mc_list_count = 0;
+ return false;
+ }
+ haddr = ha->addr;
+ if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
+ memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
+ update = true;
+ }
+ off += ETH_ALEN;
+ mc_count++;
+ }
+ if (mc_count)
+ *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
+
+ if (mc_count != vnic->mc_list_count) {
+ vnic->mc_list_count = mc_count;
+ update = true;
+ }
+ return update;
+}
+
+static bool bnge_uc_list_updated(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ struct net_device *dev = bn->netdev;
+ struct netdev_hw_addr *ha;
+ int off = 0;
+
+ if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
+ return true;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
+ return true;
+
+ off += ETH_ALEN;
+ }
+ return false;
+}
+
+static bool bnge_promisc_ok(struct bnge_net *bn)
+{
+ return true;
+}
+
+static int bnge_cfg_def_vnic(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ struct net_device *dev = bn->netdev;
+ struct bnge_dev *bd = bn->bd;
+ struct netdev_hw_addr *ha;
+ int i, off = 0, rc;
+ bool uc_update;
+
+ netif_addr_lock_bh(dev);
+ uc_update = bnge_uc_list_updated(bn);
+ netif_addr_unlock_bh(dev);
+
+ if (!uc_update)
+ goto skip_uc;
+
+ for (i = 1; i < vnic->uc_filter_count; i++) {
+ struct bnge_l2_filter *fltr = vnic->l2_filters[i];
+
+ bnge_hwrm_l2_filter_free(bd, fltr);
+ bnge_del_l2_filter(bn, fltr);
+ }
+
+ vnic->uc_filter_count = 1;
+
+ netif_addr_lock_bh(dev);
+ if (netdev_uc_count(dev) > (BNGE_MAX_UC_ADDRS - 1)) {
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+ } else {
+ netdev_for_each_uc_addr(ha, dev) {
+ memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
+ off += ETH_ALEN;
+ vnic->uc_filter_count++;
+ }
+ }
+ netif_addr_unlock_bh(dev);
+
+ for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
+ rc = bnge_hwrm_set_vnic_filter(bn, 0, i, vnic->uc_list + off);
+ if (rc) {
+ netdev_err(dev, "HWRM vnic filter failure rc: %d\n", rc);
+ vnic->uc_filter_count = i;
+ return rc;
+ }
+ }
+
+skip_uc:
+ if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
+ !bnge_promisc_ok(bn))
+ vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+ rc = bnge_hwrm_cfa_l2_set_rx_mask(bd, vnic);
+ if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
+ netdev_info(dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
+ rc);
+ vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+ vnic->mc_list_count = 0;
+ rc = bnge_hwrm_cfa_l2_set_rx_mask(bd, vnic);
+ }
+ if (rc)
+ netdev_err(dev, "HWRM cfa l2 rx mask failure rc: %d\n",
+ rc);
+
+ return rc;
+}
+
+static void bnge_hwrm_vnic_free(struct bnge_net *bn)
+{
+ int i;
+
+ for (i = 0; i < bn->nr_vnics; i++)
+ bnge_hwrm_vnic_free_one(bn->bd, &bn->vnic_info[i]);
+}
+
+static void bnge_hwrm_vnic_ctx_free(struct bnge_net *bn)
+{
+ int i, j;
+
+ for (i = 0; i < bn->nr_vnics; i++) {
+ struct bnge_vnic_info *vnic = &bn->vnic_info[i];
+
+ for (j = 0; j < BNGE_MAX_CTX_PER_VNIC; j++) {
+ if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
+ bnge_hwrm_vnic_ctx_free_one(bn->bd, vnic, j);
+ }
+ }
+ bn->rsscos_nr_ctxs = 0;
+}
+
+static void bnge_hwrm_clear_vnic_filter(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ int i;
+
+ for (i = 0; i < vnic->uc_filter_count; i++) {
+ struct bnge_l2_filter *fltr = vnic->l2_filters[i];
+
+ bnge_hwrm_l2_filter_free(bn->bd, fltr);
+ bnge_del_l2_filter(bn, fltr);
+ }
+
+ vnic->uc_filter_count = 0;
+}
+
+static void bnge_clear_vnic(struct bnge_net *bn)
+{
+ bnge_hwrm_clear_vnic_filter(bn);
+ bnge_hwrm_vnic_free(bn);
+ bnge_hwrm_vnic_ctx_free(bn);
+}
+
+static void bnge_hwrm_rx_ring_free(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ bool close_path)
+{
+ struct bnge_ring_struct *ring = &rxr->rx_ring_struct;
+ u32 grp_idx = rxr->bnapi->index;
+ u32 cmpl_ring_id;
+
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ cmpl_ring_id = bnge_cp_ring_for_rx(rxr);
+ hwrm_ring_free_send_msg(bn, ring,
+ RING_FREE_REQ_RING_TYPE_RX,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bn->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnge_hwrm_rx_agg_ring_free(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ bool close_path)
+{
+ struct bnge_ring_struct *ring = &rxr->rx_agg_ring_struct;
+ u32 grp_idx = rxr->bnapi->index;
+ u32 cmpl_ring_id;
+
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ cmpl_ring_id = bnge_cp_ring_for_rx(rxr);
+ hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_RX_AGG,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bn->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnge_hwrm_tx_ring_free(struct bnge_net *bn,
+ struct bnge_tx_ring_info *txr,
+ bool close_path)
+{
+ struct bnge_ring_struct *ring = &txr->tx_ring_struct;
+ u32 cmpl_ring_id;
+
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ cmpl_ring_id = close_path ? bnge_cp_ring_for_tx(txr) :
+ INVALID_HW_RING_ID;
+ hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_TX,
+ cmpl_ring_id);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnge_hwrm_cp_ring_free(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr)
+{
+ struct bnge_ring_struct *ring;
+
+ ring = &cpr->ring_struct;
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL,
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnge_hwrm_ring_free(struct bnge_net *bn, bool close_path)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ if (!bn->bnapi)
+ return;
+
+ for (i = 0; i < bd->tx_nr_rings; i++)
+ bnge_hwrm_tx_ring_free(bn, &bn->tx_ring[i], close_path);
+
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ bnge_hwrm_rx_ring_free(bn, &bn->rx_ring[i], close_path);
+ bnge_hwrm_rx_agg_ring_free(bn, &bn->rx_ring[i], close_path);
+ }
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr;
+ struct bnge_ring_struct *ring;
+ int j;
+
+ nqr = &bnapi->nq_ring;
+ for (j = 0; j < nqr->cp_ring_count && nqr->cp_ring_arr; j++)
+ bnge_hwrm_cp_ring_free(bn, &nqr->cp_ring_arr[j]);
+
+ ring = &nqr->ring_struct;
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ hwrm_ring_free_send_msg(bn, ring,
+ RING_FREE_REQ_RING_TYPE_NQ,
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bn->grp_info[i].nq_fw_ring_id = INVALID_HW_RING_ID;
+ }
+ }
+}
+
+static void bnge_setup_msix(struct bnge_net *bn)
+{
+ struct net_device *dev = bn->netdev;
+ struct bnge_dev *bd = bn->bd;
+ int len, i;
+
+ len = sizeof(bd->irq_tbl[0].name);
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ int map_idx = bnge_cp_num_to_irq_num(bn, i);
+ char *attr;
+
+ if (bd->flags & BNGE_EN_SHARED_CHNL)
+ attr = "TxRx";
+ else if (i < bd->rx_nr_rings)
+ attr = "rx";
+ else
+ attr = "tx";
+
+ snprintf(bd->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
+ attr, i);
+ bd->irq_tbl[map_idx].handler = bnge_msix;
+ }
+}
+
+static int bnge_setup_interrupts(struct bnge_net *bn)
+{
+ struct net_device *dev = bn->netdev;
+ struct bnge_dev *bd = bn->bd;
+
+ bnge_setup_msix(bn);
+
+ return netif_set_real_num_queues(dev, bd->tx_nr_rings, bd->rx_nr_rings);
+}
+
+static void bnge_hwrm_resource_free(struct bnge_net *bn, bool close_path)
+{
+ bnge_clear_vnic(bn);
+ bnge_hwrm_ring_free(bn, close_path);
+ bnge_hwrm_stat_ctx_free(bn);
+}
+
+static void bnge_free_irq(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ struct bnge_irq *irq;
+ int i;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ int map_idx = bnge_cp_num_to_irq_num(bn, i);
+
+ irq = &bd->irq_tbl[map_idx];
+ if (irq->requested) {
+ if (irq->have_cpumask) {
+ irq_set_affinity_hint(irq->vector, NULL);
+ free_cpumask_var(irq->cpu_mask);
+ irq->have_cpumask = 0;
+ }
+ free_irq(irq->vector, bn->bnapi[i]);
+ }
+
+ irq->requested = 0;
+ }
+}
+
+static int bnge_request_irq(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, rc;
+
+ rc = bnge_setup_interrupts(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_setup_interrupts err: %d\n", rc);
+ return rc;
+ }
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ int map_idx = bnge_cp_num_to_irq_num(bn, i);
+ struct bnge_irq *irq = &bd->irq_tbl[map_idx];
+
+ rc = request_irq(irq->vector, irq->handler, 0, irq->name,
+ bn->bnapi[i]);
+ if (rc)
+ goto err_free_irq;
+
+ netif_napi_set_irq_locked(&bn->bnapi[i]->napi, irq->vector);
+ irq->requested = 1;
+
+ if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
+ int numa_node = dev_to_node(&bd->pdev->dev);
+
+ irq->have_cpumask = 1;
+ cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+ irq->cpu_mask);
+ rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
+ if (rc) {
+ netdev_warn(bn->netdev,
+ "Set affinity failed, IRQ = %d\n",
+ irq->vector);
+ goto err_free_irq;
+ }
+ }
+ }
+ return 0;
+
+err_free_irq:
+ bnge_free_irq(bn);
+ return rc;
+}
+
+static int bnge_init_chip(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+#define BNGE_DEF_STATS_COAL_TICKS 1000000
+ bn->stats_coal_ticks = BNGE_DEF_STATS_COAL_TICKS;
+
+ rc = bnge_hwrm_stat_ctx_alloc(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "hwrm stat ctx alloc failure rc: %d\n", rc);
+ goto err_out;
+ }
+
+ rc = bnge_hwrm_ring_alloc(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "hwrm ring alloc failure rc: %d\n", rc);
+ goto err_out;
+ }
+
+ rc = bnge_hwrm_vnic_alloc(bd, vnic, bd->rx_nr_rings);
+ if (rc) {
+ netdev_err(bn->netdev, "hwrm vnic alloc failure rc: %d\n", rc);
+ goto err_out;
+ }
+
+ rc = bnge_setup_vnic(bn, vnic);
+ if (rc)
+ goto err_out;
+
+ if (bd->rss_cap & BNGE_RSS_CAP_RSS_HASH_TYPE_DELTA)
+ bnge_hwrm_update_rss_hash_cfg(bn);
+
+ /* Filter for default vnic 0 */
+ rc = bnge_hwrm_set_vnic_filter(bn, 0, 0, bn->netdev->dev_addr);
+ if (rc) {
+ netdev_err(bn->netdev, "HWRM vnic filter failure rc: %d\n", rc);
+ goto err_out;
+ }
+ vnic->uc_filter_count = 1;
+
+ vnic->rx_mask = 0;
+
+ if (bn->netdev->flags & IFF_BROADCAST)
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
+
+ if (bn->netdev->flags & IFF_PROMISC)
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+
+ if (bn->netdev->flags & IFF_ALLMULTI) {
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+ vnic->mc_list_count = 0;
+ } else if (bn->netdev->flags & IFF_MULTICAST) {
+ u32 mask = 0;
+
+ bnge_mc_list_updated(bn, &mask);
+ vnic->rx_mask |= mask;
+ }
+
+ rc = bnge_cfg_def_vnic(bn);
+ if (rc)
+ goto err_out;
+ return 0;
+
+err_out:
+ bnge_hwrm_resource_free(bn, 0);
+ return rc;
+}
+
+static int bnge_napi_poll(struct napi_struct *napi, int budget)
+{
+ int work_done = 0;
+
+ /* defer NAPI implementation to next patch series */
+ napi_complete_done(napi, work_done);
+
+ return work_done;
+}
+
+static void bnge_init_napi(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ struct bnge_napi *bnapi;
+ int i;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ bnapi = bn->bnapi[i];
+ netif_napi_add_config_locked(bn->netdev, &bnapi->napi,
+ bnge_napi_poll, bnapi->index);
+ }
+}
+
+static void bnge_del_napi(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->rx_nr_rings; i++)
+ netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_RX, NULL);
+ for (i = 0; i < bd->tx_nr_rings; i++)
+ netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_TX, NULL);
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+
+ __netif_napi_del_locked(&bnapi->napi);
+ }
+
+ /* Wait for RCU grace period after removing NAPI instances */
+ synchronize_net();
+}
+
+static int bnge_init_nic(struct bnge_net *bn)
+{
+ int rc;
+
+ bnge_init_nq_tree(bn);
+
+ bnge_init_rx_rings(bn);
+ rc = bnge_alloc_rx_ring_pair_bufs(bn);
+ if (rc)
+ return rc;
+
+ bnge_init_tx_rings(bn);
+
+ rc = bnge_init_ring_grps(bn);
+ if (rc)
+ goto err_free_rx_ring_pair_bufs;
+
+ bnge_init_vnics(bn);
+
+ rc = bnge_init_chip(bn);
+ if (rc)
+ goto err_free_ring_grps;
+ return rc;
+
+err_free_ring_grps:
+ bnge_free_ring_grps(bn);
+ return rc;
+
+err_free_rx_ring_pair_bufs:
+ bnge_free_rx_ring_pair_bufs(bn);
+ return rc;
+}
+
+static int bnge_open_core(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ netif_carrier_off(bn->netdev);
+
+ rc = bnge_reserve_rings(bd);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_reserve_rings err: %d\n", rc);
+ return rc;
+ }
+
+ rc = bnge_alloc_core(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_alloc_core err: %d\n", rc);
+ return rc;
+ }
+
+ bnge_init_napi(bn);
+ rc = bnge_request_irq(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_request_irq err: %d\n", rc);
+ goto err_del_napi;
+ }
+
+ rc = bnge_init_nic(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_init_nic err: %d\n", rc);
+ goto err_free_irq;
+ }
+ set_bit(BNGE_STATE_OPEN, &bd->state);
+ return 0;
+
+err_free_irq:
+ bnge_free_irq(bn);
+err_del_napi:
+ bnge_del_napi(bn);
+ bnge_free_core(bn);
+ return rc;
+}
+
+static netdev_tx_t bnge_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static int bnge_open(struct net_device *dev)
+{
+ struct bnge_net *bn = netdev_priv(dev);
+ int rc;
+
+ rc = bnge_open_core(bn);
+ if (rc)
+ netdev_err(dev, "bnge_open_core err: %d\n", rc);
+
+ return rc;
+}
+
+static int bnge_shutdown_nic(struct bnge_net *bn)
+{
+ /* TODO: close_path = 0 until we make NAPI functional */
+ bnge_hwrm_resource_free(bn, 0);
+ return 0;
+}
+
+static void bnge_close_core(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+
+ clear_bit(BNGE_STATE_OPEN, &bd->state);
+ bnge_shutdown_nic(bn);
+ bnge_free_all_rings_bufs(bn);
+ bnge_free_irq(bn);
+ bnge_del_napi(bn);
+
+ bnge_free_core(bn);
+}
+
+static int bnge_close(struct net_device *dev)
+{
+ struct bnge_net *bn = netdev_priv(dev);
+
+ bnge_close_core(bn);
+
+ return 0;
+}
+
+static const struct net_device_ops bnge_netdev_ops = {
+ .ndo_open = bnge_open,
+ .ndo_stop = bnge_close,
+ .ndo_start_xmit = bnge_start_xmit,
+};
+
+static void bnge_init_mac_addr(struct bnge_dev *bd)
+{
+ eth_hw_addr_set(bd->netdev, bd->pf.mac_addr);
+}
+
+static void bnge_set_tpa_flags(struct bnge_dev *bd)
+{
+ struct bnge_net *bn = netdev_priv(bd->netdev);
+
+ bn->priv_flags &= ~BNGE_NET_EN_TPA;
+
+ if (bd->netdev->features & NETIF_F_LRO)
+ bn->priv_flags |= BNGE_NET_EN_LRO;
+ else if (bd->netdev->features & NETIF_F_GRO_HW)
+ bn->priv_flags |= BNGE_NET_EN_GRO;
+}
+
+static void bnge_init_l2_fltr_tbl(struct bnge_net *bn)
+{
+ int i;
+
+ for (i = 0; i < BNGE_L2_FLTR_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&bn->l2_fltr_hash_tbl[i]);
+ get_random_bytes(&bn->hash_seed, sizeof(bn->hash_seed));
+}
+
+void bnge_set_ring_params(struct bnge_dev *bd)
+{
+ struct bnge_net *bn = netdev_priv(bd->netdev);
+ u32 ring_size, rx_size, rx_space, max_rx_cmpl;
+ u32 agg_factor = 0, agg_ring_size = 0;
+
+ /* 8 for CRC and VLAN */
+ rx_size = SKB_DATA_ALIGN(bn->netdev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
+
+ rx_space = rx_size + ALIGN(NET_SKB_PAD, 8) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ bn->rx_copy_thresh = BNGE_RX_COPY_THRESH;
+ ring_size = bn->rx_ring_size;
+ bn->rx_agg_ring_size = 0;
+ bn->rx_agg_nr_pages = 0;
+
+ if (bn->priv_flags & BNGE_NET_EN_TPA)
+ agg_factor = min_t(u32, 4, 65536 / BNGE_RX_PAGE_SIZE);
+
+ bn->priv_flags &= ~BNGE_NET_EN_JUMBO;
+ if (rx_space > PAGE_SIZE) {
+ u32 jumbo_factor;
+
+ bn->priv_flags |= BNGE_NET_EN_JUMBO;
+ jumbo_factor = PAGE_ALIGN(bn->netdev->mtu - 40) >> PAGE_SHIFT;
+ if (jumbo_factor > agg_factor)
+ agg_factor = jumbo_factor;
+ }
+ if (agg_factor) {
+ if (ring_size > BNGE_MAX_RX_DESC_CNT_JUM_ENA) {
+ ring_size = BNGE_MAX_RX_DESC_CNT_JUM_ENA;
+ netdev_warn(bn->netdev, "RX ring size reduced from %d to %d due to jumbo ring\n",
+ bn->rx_ring_size, ring_size);
+ bn->rx_ring_size = ring_size;
+ }
+ agg_ring_size = ring_size * agg_factor;
+
+ bn->rx_agg_nr_pages = bnge_adjust_pow_two(agg_ring_size,
+ RX_DESC_CNT);
+ if (bn->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
+ u32 tmp = agg_ring_size;
+
+ bn->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
+ agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
+ netdev_warn(bn->netdev, "RX agg ring size %d reduced to %d.\n",
+ tmp, agg_ring_size);
+ }
+ bn->rx_agg_ring_size = agg_ring_size;
+ bn->rx_agg_ring_mask = (bn->rx_agg_nr_pages * RX_DESC_CNT) - 1;
+
+ rx_size = SKB_DATA_ALIGN(BNGE_RX_COPY_THRESH + NET_IP_ALIGN);
+ rx_space = rx_size + NET_SKB_PAD +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ }
+
+ bn->rx_buf_use_size = rx_size;
+ bn->rx_buf_size = rx_space;
+
+ bn->rx_nr_pages = bnge_adjust_pow_two(ring_size, RX_DESC_CNT);
+ bn->rx_ring_mask = (bn->rx_nr_pages * RX_DESC_CNT) - 1;
+
+ ring_size = bn->tx_ring_size;
+ bn->tx_nr_pages = bnge_adjust_pow_two(ring_size, TX_DESC_CNT);
+ bn->tx_ring_mask = (bn->tx_nr_pages * TX_DESC_CNT) - 1;
+
+ max_rx_cmpl = bn->rx_ring_size;
+
+ if (bn->priv_flags & BNGE_NET_EN_TPA)
+ max_rx_cmpl += bd->max_tpa_v2;
+ ring_size = max_rx_cmpl * 2 + agg_ring_size + bn->tx_ring_size;
+ bn->cp_ring_size = ring_size;
+
+ bn->cp_nr_pages = bnge_adjust_pow_two(ring_size, CP_DESC_CNT);
+ if (bn->cp_nr_pages > MAX_CP_PAGES) {
+ bn->cp_nr_pages = MAX_CP_PAGES;
+ bn->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
+ netdev_warn(bn->netdev, "completion ring size %d reduced to %d.\n",
+ ring_size, bn->cp_ring_size);
+ }
+ bn->cp_bit = bn->cp_nr_pages * CP_DESC_CNT;
+ bn->cp_ring_mask = bn->cp_bit - 1;
+}
+
+int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs)
+{
+ struct net_device *netdev;
+ struct bnge_net *bn;
+ int rc;
+
+ netdev = alloc_etherdev_mqs(sizeof(*bn), max_irqs * BNGE_MAX_QUEUE,
+ max_irqs);
+ if (!netdev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(netdev, bd->dev);
+ bd->netdev = netdev;
+
+ netdev->netdev_ops = &bnge_netdev_ops;
+
+ bnge_set_ethtool_ops(netdev);
+
+ bn = netdev_priv(netdev);
+ bn->netdev = netdev;
+ bn->bd = bd;
+
+ netdev->min_mtu = ETH_ZLEN;
+ netdev->max_mtu = bd->max_mtu;
+
+ netdev->hw_features = NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_SG |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM |
+ NETIF_F_GRO;
+
+ if (bd->flags & BNGE_EN_UDP_GSO_SUPP)
+ netdev->hw_features |= NETIF_F_GSO_UDP_L4;
+
+ if (BNGE_SUPPORTS_TPA(bd))
+ netdev->hw_features |= NETIF_F_LRO;
+
+ netdev->hw_enc_features = NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_SG |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_PARTIAL;
+
+ if (bd->flags & BNGE_EN_UDP_GSO_SUPP)
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
+
+ netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_GRE_CSUM;
+
+ netdev->vlan_features = netdev->hw_features | NETIF_F_HIGHDMA;
+ if (bd->fw_cap & BNGE_FW_CAP_VLAN_RX_STRIP)
+ netdev->hw_features |= BNGE_HW_FEATURE_VLAN_ALL_RX;
+ if (bd->fw_cap & BNGE_FW_CAP_VLAN_TX_INSERT)
+ netdev->hw_features |= BNGE_HW_FEATURE_VLAN_ALL_TX;
+
+ if (BNGE_SUPPORTS_TPA(bd))
+ netdev->hw_features |= NETIF_F_GRO_HW;
+
+ netdev->features |= netdev->hw_features | NETIF_F_HIGHDMA;
+
+ if (netdev->features & NETIF_F_GRO_HW)
+ netdev->features &= ~NETIF_F_LRO;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
+ if (bd->tso_max_segs)
+ netif_set_tso_max_segs(netdev, bd->tso_max_segs);
+
+ bn->rx_ring_size = BNGE_DEFAULT_RX_RING_SIZE;
+ bn->tx_ring_size = BNGE_DEFAULT_TX_RING_SIZE;
+ bn->rx_dir = DMA_FROM_DEVICE;
+
+ bnge_set_tpa_flags(bd);
+ bnge_set_ring_params(bd);
+
+ bnge_init_l2_fltr_tbl(bn);
+ bnge_init_mac_addr(bd);
+
+ netdev->request_ops_lock = true;
+ rc = register_netdev(netdev);
+ if (rc) {
+ dev_err(bd->dev, "Register netdev failed rc: %d\n", rc);
+ goto err_netdev;
+ }
+
+ return 0;
+
+err_netdev:
+ free_netdev(netdev);
+ return rc;
+}
+
+void bnge_netdev_free(struct bnge_dev *bd)
+{
+ struct net_device *netdev = bd->netdev;
+
+ unregister_netdev(netdev);
+ free_netdev(netdev);
+ bd->netdev = NULL;
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
new file mode 100644
index 000000000000..fb3b961536ba
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
@@ -0,0 +1,454 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_NETDEV_H_
+#define _BNGE_NETDEV_H_
+
+#include <linux/bnxt/hsi.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/refcount.h>
+#include "bnge_db.h"
+
+struct tx_bd {
+ __le32 tx_bd_len_flags_type;
+ #define TX_BD_TYPE (0x3f << 0)
+ #define TX_BD_TYPE_SHORT_TX_BD (0x00 << 0)
+ #define TX_BD_TYPE_LONG_TX_BD (0x10 << 0)
+ #define TX_BD_FLAGS_PACKET_END (1 << 6)
+ #define TX_BD_FLAGS_NO_CMPL (1 << 7)
+ #define TX_BD_FLAGS_BD_CNT (0x1f << 8)
+ #define TX_BD_FLAGS_BD_CNT_SHIFT 8
+ #define TX_BD_FLAGS_LHINT (3 << 13)
+ #define TX_BD_FLAGS_LHINT_SHIFT 13
+ #define TX_BD_FLAGS_LHINT_512_AND_SMALLER (0 << 13)
+ #define TX_BD_FLAGS_LHINT_512_TO_1023 (1 << 13)
+ #define TX_BD_FLAGS_LHINT_1024_TO_2047 (2 << 13)
+ #define TX_BD_FLAGS_LHINT_2048_AND_LARGER (3 << 13)
+ #define TX_BD_FLAGS_COAL_NOW (1 << 15)
+ #define TX_BD_LEN (0xffff << 16)
+ #define TX_BD_LEN_SHIFT 16
+ u32 tx_bd_opaque;
+ __le64 tx_bd_haddr;
+} __packed;
+
+struct rx_bd {
+ __le32 rx_bd_len_flags_type;
+ #define RX_BD_TYPE (0x3f << 0)
+ #define RX_BD_TYPE_RX_PACKET_BD 0x4
+ #define RX_BD_TYPE_RX_BUFFER_BD 0x5
+ #define RX_BD_TYPE_RX_AGG_BD 0x6
+ #define RX_BD_TYPE_16B_BD_SIZE (0 << 4)
+ #define RX_BD_TYPE_32B_BD_SIZE (1 << 4)
+ #define RX_BD_TYPE_48B_BD_SIZE (2 << 4)
+ #define RX_BD_TYPE_64B_BD_SIZE (3 << 4)
+ #define RX_BD_FLAGS_SOP (1 << 6)
+ #define RX_BD_FLAGS_EOP (1 << 7)
+ #define RX_BD_FLAGS_BUFFERS (3 << 8)
+ #define RX_BD_FLAGS_1_BUFFER_PACKET (0 << 8)
+ #define RX_BD_FLAGS_2_BUFFER_PACKET (1 << 8)
+ #define RX_BD_FLAGS_3_BUFFER_PACKET (2 << 8)
+ #define RX_BD_FLAGS_4_BUFFER_PACKET (3 << 8)
+ #define RX_BD_LEN (0xffff << 16)
+ #define RX_BD_LEN_SHIFT 16
+ u32 rx_bd_opaque;
+ __le64 rx_bd_haddr;
+};
+
+struct tx_cmp {
+ __le32 tx_cmp_flags_type;
+ #define CMP_TYPE (0x3f << 0)
+ #define CMP_TYPE_TX_L2_CMP 0
+ #define CMP_TYPE_TX_L2_COAL_CMP 2
+ #define CMP_TYPE_TX_L2_PKT_TS_CMP 4
+ #define CMP_TYPE_RX_L2_CMP 17
+ #define CMP_TYPE_RX_AGG_CMP 18
+ #define CMP_TYPE_RX_L2_TPA_START_CMP 19
+ #define CMP_TYPE_RX_L2_TPA_END_CMP 21
+ #define CMP_TYPE_RX_TPA_AGG_CMP 22
+ #define CMP_TYPE_RX_L2_V3_CMP 23
+ #define CMP_TYPE_RX_L2_TPA_START_V3_CMP 25
+ #define CMP_TYPE_STATUS_CMP 32
+ #define CMP_TYPE_REMOTE_DRIVER_REQ 34
+ #define CMP_TYPE_REMOTE_DRIVER_RESP 36
+ #define CMP_TYPE_ERROR_STATUS 48
+ #define CMPL_BASE_TYPE_STAT_EJECT 0x1aUL
+ #define CMPL_BASE_TYPE_HWRM_DONE 0x20UL
+ #define CMPL_BASE_TYPE_HWRM_FWD_REQ 0x22UL
+ #define CMPL_BASE_TYPE_HWRM_FWD_RESP 0x24UL
+ #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define TX_CMP_FLAGS_ERROR (1 << 6)
+ #define TX_CMP_FLAGS_PUSH (1 << 7)
+ u32 tx_cmp_opaque;
+ __le32 tx_cmp_errors_v;
+ #define TX_CMP_V (1 << 0)
+ #define TX_CMP_ERRORS_BUFFER_ERROR (7 << 1)
+ #define TX_CMP_ERRORS_BUFFER_ERROR_NO_ERROR 0
+ #define TX_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT 2
+ #define TX_CMP_ERRORS_BUFFER_ERROR_INVALID_STAG 4
+ #define TX_CMP_ERRORS_BUFFER_ERROR_STAG_BOUNDS 5
+ #define TX_CMP_ERRORS_ZERO_LENGTH_PKT (1 << 4)
+ #define TX_CMP_ERRORS_EXCESSIVE_BD_LEN (1 << 5)
+ #define TX_CMP_ERRORS_DMA_ERROR (1 << 6)
+ #define TX_CMP_ERRORS_HINT_TOO_SHORT (1 << 7)
+ __le32 sq_cons_idx;
+ #define TX_CMP_SQ_CONS_IDX_MASK 0x00ffffff
+};
+
+struct bnge_sw_tx_bd {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_LEN(len);
+ struct page *page;
+ u8 is_ts_pkt;
+ u8 is_push;
+ u8 action;
+ unsigned short nr_frags;
+ union {
+ u16 rx_prod;
+ u16 txts_prod;
+ };
+};
+
+struct bnge_sw_rx_bd {
+ void *data;
+ u8 *data_ptr;
+ dma_addr_t mapping;
+};
+
+struct bnge_sw_rx_agg_bd {
+ netmem_ref netmem;
+ unsigned int offset;
+ dma_addr_t mapping;
+};
+
+#define HWRM_RING_ALLOC_TX 0x1
+#define HWRM_RING_ALLOC_RX 0x2
+#define HWRM_RING_ALLOC_AGG 0x4
+#define HWRM_RING_ALLOC_CMPL 0x8
+#define HWRM_RING_ALLOC_NQ 0x10
+
+struct bnge_ring_grp_info {
+ u16 fw_stats_ctx;
+ u16 fw_grp_id;
+ u16 rx_fw_ring_id;
+ u16 agg_fw_ring_id;
+ u16 nq_fw_ring_id;
+};
+
+#define BNGE_RX_COPY_THRESH 256
+
+#define BNGE_HW_FEATURE_VLAN_ALL_RX \
+ (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)
+#define BNGE_HW_FEATURE_VLAN_ALL_TX \
+ (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX)
+
+enum {
+ BNGE_NET_EN_GRO = BIT(0),
+ BNGE_NET_EN_LRO = BIT(1),
+ BNGE_NET_EN_JUMBO = BIT(2),
+};
+
+#define BNGE_NET_EN_TPA (BNGE_NET_EN_GRO | BNGE_NET_EN_LRO)
+
+/* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1. We need one extra
+ * BD because the first TX BD is always a long BD.
+ */
+#define BNGE_MIN_TX_DESC_CNT (MAX_SKB_FRAGS + 2)
+
+#define RX_RING(bn, x) (((x) & (bn)->rx_ring_mask) >> (BNGE_PAGE_SHIFT - 4))
+#define RX_AGG_RING(bn, x) (((x) & (bn)->rx_agg_ring_mask) >> \
+ (BNGE_PAGE_SHIFT - 4))
+#define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))
+
+#define TX_RING(bn, x) (((x) & (bn)->tx_ring_mask) >> (BNGE_PAGE_SHIFT - 4))
+#define TX_IDX(x) ((x) & (TX_DESC_CNT - 1))
+
+#define CP_RING(x) (((x) & ~(CP_DESC_CNT - 1)) >> (BNGE_PAGE_SHIFT - 4))
+#define CP_IDX(x) ((x) & (CP_DESC_CNT - 1))
+
+#define RING_RX(bn, idx) ((idx) & (bn)->rx_ring_mask)
+#define NEXT_RX(idx) ((idx) + 1)
+
+#define RING_RX_AGG(bn, idx) ((idx) & (bn)->rx_agg_ring_mask)
+#define NEXT_RX_AGG(idx) ((idx) + 1)
+
+#define BNGE_NQ_HDL_TYPE_SHIFT 24
+#define BNGE_NQ_HDL_TYPE_RX 0x00
+#define BNGE_NQ_HDL_TYPE_TX 0x01
+
+struct bnge_net {
+ struct bnge_dev *bd;
+ struct net_device *netdev;
+
+ u32 priv_flags;
+
+ u32 rx_ring_size;
+ u32 rx_buf_size;
+ u32 rx_buf_use_size; /* usable size */
+ u32 rx_agg_ring_size;
+ u32 rx_copy_thresh;
+ u32 rx_ring_mask;
+ u32 rx_agg_ring_mask;
+ u16 rx_nr_pages;
+ u16 rx_agg_nr_pages;
+
+ u32 tx_ring_size;
+ u32 tx_ring_mask;
+ u16 tx_nr_pages;
+
+ /* NQs and Completion rings */
+ u32 cp_ring_size;
+ u32 cp_ring_mask;
+ u32 cp_bit;
+ u16 cp_nr_pages;
+
+#define BNGE_L2_FLTR_HASH_SIZE 32
+#define BNGE_L2_FLTR_HASH_MASK (BNGE_L2_FLTR_HASH_SIZE - 1)
+ struct hlist_head l2_fltr_hash_tbl[BNGE_L2_FLTR_HASH_SIZE];
+ u32 hash_seed;
+ u64 toeplitz_prefix;
+
+ struct bnge_napi **bnapi;
+
+ struct bnge_rx_ring_info *rx_ring;
+ struct bnge_tx_ring_info *tx_ring;
+
+ u16 *tx_ring_map;
+ enum dma_data_direction rx_dir;
+
+ /* grp_info indexed by napi/nq index */
+ struct bnge_ring_grp_info *grp_info;
+ struct bnge_vnic_info *vnic_info;
+ int nr_vnics;
+ int total_irqs;
+
+ u32 tx_wake_thresh;
+ u16 rx_offset;
+ u16 rx_dma_offset;
+
+ u8 rss_hash_key[HW_HASH_KEY_SIZE];
+ u8 rss_hash_key_valid:1;
+ u8 rss_hash_key_updated:1;
+ int rsscos_nr_ctxs;
+ u32 stats_coal_ticks;
+};
+
+#define BNGE_DEFAULT_RX_RING_SIZE 511
+#define BNGE_DEFAULT_TX_RING_SIZE 511
+
+int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs);
+void bnge_netdev_free(struct bnge_dev *bd);
+void bnge_set_ring_params(struct bnge_dev *bd);
+
+#if (BNGE_PAGE_SHIFT == 16)
+#define MAX_RX_PAGES_AGG_ENA 1
+#define MAX_RX_PAGES 4
+#define MAX_RX_AGG_PAGES 4
+#define MAX_TX_PAGES 1
+#define MAX_CP_PAGES 16
+#else
+#define MAX_RX_PAGES_AGG_ENA 8
+#define MAX_RX_PAGES 32
+#define MAX_RX_AGG_PAGES 32
+#define MAX_TX_PAGES 8
+#define MAX_CP_PAGES 128
+#endif
+
+#define BNGE_RX_PAGE_SIZE (1 << BNGE_RX_PAGE_SHIFT)
+
+#define RX_DESC_CNT (BNGE_PAGE_SIZE / sizeof(struct rx_bd))
+#define TX_DESC_CNT (BNGE_PAGE_SIZE / sizeof(struct tx_bd))
+#define CP_DESC_CNT (BNGE_PAGE_SIZE / sizeof(struct tx_cmp))
+#define SW_RXBD_RING_SIZE (sizeof(struct bnge_sw_rx_bd) * RX_DESC_CNT)
+#define HW_RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT)
+#define SW_RXBD_AGG_RING_SIZE (sizeof(struct bnge_sw_rx_agg_bd) * RX_DESC_CNT)
+#define SW_TXBD_RING_SIZE (sizeof(struct bnge_sw_tx_bd) * TX_DESC_CNT)
+#define HW_TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT)
+#define HW_CMPD_RING_SIZE (sizeof(struct tx_cmp) * CP_DESC_CNT)
+#define BNGE_MAX_RX_DESC_CNT (RX_DESC_CNT * MAX_RX_PAGES - 1)
+#define BNGE_MAX_RX_DESC_CNT_JUM_ENA (RX_DESC_CNT * MAX_RX_PAGES_AGG_ENA - 1)
+#define BNGE_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
+#define BNGE_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1)
+
+#define BNGE_MAX_TXR_PER_NAPI 8
+
+#define bnge_for_each_napi_tx(iter, bnapi, txr) \
+ for (iter = 0, txr = (bnapi)->tx_ring[0]; txr; \
+ txr = (iter < BNGE_MAX_TXR_PER_NAPI - 1) ? \
+ (bnapi)->tx_ring[++iter] : NULL)
+
+#define BNGE_SET_NQ_HDL(cpr) \
+ (((cpr)->cp_ring_type << BNGE_NQ_HDL_TYPE_SHIFT) | (cpr)->cp_idx)
+
+struct bnge_stats_mem {
+ u64 *sw_stats;
+ u64 *hw_masks;
+ void *hw_stats;
+ dma_addr_t hw_stats_map;
+ int len;
+};
+
+struct bnge_cp_ring_info {
+ struct bnge_napi *bnapi;
+ dma_addr_t *desc_mapping;
+ struct tx_cmp **desc_ring;
+ struct bnge_ring_struct ring_struct;
+ u8 cp_ring_type;
+ u8 cp_idx;
+ u32 cp_raw_cons;
+ struct bnge_db_info cp_db;
+};
+
+struct bnge_nq_ring_info {
+ struct bnge_napi *bnapi;
+ dma_addr_t *desc_mapping;
+ struct nqe_cn **desc_ring;
+ struct bnge_ring_struct ring_struct;
+ u32 nq_raw_cons;
+ struct bnge_db_info nq_db;
+
+ struct bnge_stats_mem stats;
+ u32 hw_stats_ctx_id;
+
+ int cp_ring_count;
+ struct bnge_cp_ring_info *cp_ring_arr;
+};
+
+struct bnge_rx_ring_info {
+ struct bnge_napi *bnapi;
+ struct bnge_cp_ring_info *rx_cpr;
+ u16 rx_prod;
+ u16 rx_agg_prod;
+ u16 rx_sw_agg_prod;
+ u16 rx_next_cons;
+ struct bnge_db_info rx_db;
+ struct bnge_db_info rx_agg_db;
+
+ struct rx_bd *rx_desc_ring[MAX_RX_PAGES];
+ struct bnge_sw_rx_bd *rx_buf_ring;
+
+ struct rx_bd *rx_agg_desc_ring[MAX_RX_AGG_PAGES];
+ struct bnge_sw_rx_agg_bd *rx_agg_buf_ring;
+
+ unsigned long *rx_agg_bmap;
+ u16 rx_agg_bmap_size;
+
+ dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
+ dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
+
+ struct bnge_ring_struct rx_ring_struct;
+ struct bnge_ring_struct rx_agg_ring_struct;
+ struct page_pool *page_pool;
+ struct page_pool *head_pool;
+ bool need_head_pool;
+};
+
+struct bnge_tx_ring_info {
+ struct bnge_napi *bnapi;
+ struct bnge_cp_ring_info *tx_cpr;
+ u16 tx_prod;
+ u16 tx_cons;
+ u16 tx_hw_cons;
+ u16 txq_index;
+ u8 tx_napi_idx;
+ u8 kick_pending;
+ struct bnge_db_info tx_db;
+
+ struct tx_bd *tx_desc_ring[MAX_TX_PAGES];
+ struct bnge_sw_tx_bd *tx_buf_ring;
+
+ dma_addr_t tx_desc_mapping[MAX_TX_PAGES];
+
+ u32 dev_state;
+#define BNGE_DEV_STATE_CLOSING 0x1
+
+ struct bnge_ring_struct tx_ring_struct;
+};
+
+struct bnge_napi {
+ struct napi_struct napi;
+ struct bnge_net *bn;
+ int index;
+
+ struct bnge_nq_ring_info nq_ring;
+ struct bnge_rx_ring_info *rx_ring;
+ struct bnge_tx_ring_info *tx_ring[BNGE_MAX_TXR_PER_NAPI];
+};
+
+#define INVALID_STATS_CTX_ID -1
+#define BNGE_VNIC_DEFAULT 0
+#define BNGE_MAX_UC_ADDRS 4
+
+struct bnge_vnic_info {
+ u16 fw_vnic_id;
+#define BNGE_MAX_CTX_PER_VNIC 8
+ u16 fw_rss_cos_lb_ctx[BNGE_MAX_CTX_PER_VNIC];
+ u16 mru;
+ /* index 0 always dev_addr */
+ struct bnge_l2_filter *l2_filters[BNGE_MAX_UC_ADDRS];
+ u16 uc_filter_count;
+ u8 *uc_list;
+ dma_addr_t rss_table_dma_addr;
+ __le16 *rss_table;
+ dma_addr_t rss_hash_key_dma_addr;
+ u64 *rss_hash_key;
+ int rss_table_size;
+#define BNGE_RSS_TABLE_ENTRIES 64
+#define BNGE_RSS_TABLE_SIZE (BNGE_RSS_TABLE_ENTRIES * 4)
+#define BNGE_RSS_TABLE_MAX_TBL 8
+#define BNGE_MAX_RSS_TABLE_SIZE \
+ (BNGE_RSS_TABLE_SIZE * BNGE_RSS_TABLE_MAX_TBL)
+ u32 rx_mask;
+
+ u8 *mc_list;
+ int mc_list_size;
+ int mc_list_count;
+ dma_addr_t mc_list_mapping;
+#define BNGE_MAX_MC_ADDRS 16
+
+ u32 flags;
+#define BNGE_VNIC_RSS_FLAG 1
+#define BNGE_VNIC_MCAST_FLAG 4
+#define BNGE_VNIC_UCAST_FLAG 8
+ u32 vnic_id;
+};
+
+struct bnge_filter_base {
+ struct hlist_node hash;
+ struct list_head list;
+ __le64 filter_id;
+ u8 type;
+#define BNGE_FLTR_TYPE_L2 2
+ u8 flags;
+ u16 rxq;
+ u16 fw_vnic_id;
+ u16 vf_idx;
+ unsigned long state;
+#define BNGE_FLTR_VALID 0
+#define BNGE_FLTR_FW_DELETED 2
+
+ struct rcu_head rcu;
+};
+
+struct bnge_l2_key {
+ union {
+ struct {
+ u8 dst_mac_addr[ETH_ALEN];
+ u16 vlan;
+ };
+ u32 filter_key;
+ };
+};
+
+#define BNGE_L2_KEY_SIZE (sizeof(struct bnge_l2_key) / 4)
+struct bnge_l2_filter {
+ /* base filter must be the first member */
+ struct bnge_filter_base base;
+ struct bnge_l2_key l2_key;
+ refcount_t refcnt;
+};
+
+u16 bnge_cp_ring_for_rx(struct bnge_rx_ring_info *rxr);
+u16 bnge_cp_ring_for_tx(struct bnge_tx_ring_info *txr);
+void bnge_fill_hw_rss_tbl(struct bnge_net *bn, struct bnge_vnic_info *vnic);
+#endif /* _BNGE_NETDEV_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_resc.c b/drivers/net/ethernet/broadcom/bnge/bnge_resc.c
new file mode 100644
index 000000000000..943df5f60f01
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_resc.c
@@ -0,0 +1,617 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+#include "bnge.h"
+#include "bnge_hwrm.h"
+#include "bnge_hwrm_lib.h"
+#include "bnge_resc.h"
+
+static u16 bnge_num_tx_to_cp(struct bnge_dev *bd, u16 tx)
+{
+ u16 tcs = bd->num_tc;
+
+ if (!tcs)
+ tcs = 1;
+
+ return tx / tcs;
+}
+
+static u16 bnge_get_max_func_irqs(struct bnge_dev *bd)
+{
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+
+ return min_t(u16, hw_resc->max_irqs, hw_resc->max_nqs);
+}
+
+static unsigned int bnge_get_max_func_stat_ctxs(struct bnge_dev *bd)
+{
+ return bd->hw_resc.max_stat_ctxs;
+}
+
+bool bnge_aux_has_enough_resources(struct bnge_dev *bd)
+{
+ unsigned int max_stat_ctxs;
+
+ max_stat_ctxs = bnge_get_max_func_stat_ctxs(bd);
+ if (max_stat_ctxs <= BNGE_MIN_ROCE_STAT_CTXS ||
+ bd->nq_nr_rings == max_stat_ctxs)
+ return false;
+
+ return true;
+}
+
+static unsigned int bnge_get_max_func_cp_rings(struct bnge_dev *bd)
+{
+ return bd->hw_resc.max_cp_rings;
+}
+
+static int bnge_aux_get_dflt_msix(struct bnge_dev *bd)
+{
+ int roce_msix = BNGE_MAX_ROCE_MSIX;
+
+ return min_t(int, roce_msix, num_online_cpus() + 1);
+}
+
+u16 bnge_aux_get_msix(struct bnge_dev *bd)
+{
+ if (bnge_is_roce_en(bd))
+ return bd->aux_num_msix;
+
+ return 0;
+}
+
+static void bnge_aux_set_msix_num(struct bnge_dev *bd, u16 num)
+{
+ if (bnge_is_roce_en(bd))
+ bd->aux_num_msix = num;
+}
+
+static u16 bnge_aux_get_stat_ctxs(struct bnge_dev *bd)
+{
+ if (bnge_is_roce_en(bd))
+ return bd->aux_num_stat_ctxs;
+
+ return 0;
+}
+
+static void bnge_aux_set_stat_ctxs(struct bnge_dev *bd, u16 num_aux_ctx)
+{
+ if (bnge_is_roce_en(bd))
+ bd->aux_num_stat_ctxs = num_aux_ctx;
+}
+
+static u16 bnge_func_stat_ctxs_demand(struct bnge_dev *bd)
+{
+ return bd->nq_nr_rings + bnge_aux_get_stat_ctxs(bd);
+}
+
+static int bnge_get_dflt_aux_stat_ctxs(struct bnge_dev *bd)
+{
+ int stat_ctx = 0;
+
+ if (bnge_is_roce_en(bd)) {
+ stat_ctx = BNGE_MIN_ROCE_STAT_CTXS;
+
+ if (!bd->pf.port_id && bd->port_count > 1)
+ stat_ctx++;
+ }
+
+ return stat_ctx;
+}
+
+static u16 bnge_nqs_demand(struct bnge_dev *bd)
+{
+ return bd->nq_nr_rings + bnge_aux_get_msix(bd);
+}
+
+static u16 bnge_cprs_demand(struct bnge_dev *bd)
+{
+ return bd->tx_nr_rings + bd->rx_nr_rings;
+}
+
+static u16 bnge_get_avail_msix(struct bnge_dev *bd, int num)
+{
+ u16 max_irq = bnge_get_max_func_irqs(bd);
+ u16 total_demand = bd->nq_nr_rings + num;
+
+ if (max_irq < total_demand) {
+ num = max_irq - bd->nq_nr_rings;
+ if (num <= 0)
+ return 0;
+ }
+
+ return num;
+}
+
+static u16 bnge_num_cp_to_tx(struct bnge_dev *bd, u16 tx_chunks)
+{
+ return tx_chunks * bd->num_tc;
+}
+
+int bnge_fix_rings_count(u16 *rx, u16 *tx, u16 max, bool shared)
+{
+ u16 _rx = *rx, _tx = *tx;
+
+ if (shared) {
+ *rx = min_t(u16, _rx, max);
+ *tx = min_t(u16, _tx, max);
+ } else {
+ if (max < 2)
+ return -ENOMEM;
+ while (_rx + _tx > max) {
+ if (_rx > _tx && _rx > 1)
+ _rx--;
+ else if (_tx > 1)
+ _tx--;
+ }
+ *rx = _rx;
+ *tx = _tx;
+ }
+
+ return 0;
+}
+
+static int bnge_adjust_rings(struct bnge_dev *bd, u16 *rx,
+ u16 *tx, u16 max_nq, bool sh)
+{
+ u16 tx_chunks = bnge_num_tx_to_cp(bd, *tx);
+
+ if (tx_chunks != *tx) {
+ u16 tx_saved = tx_chunks, rc;
+
+ rc = bnge_fix_rings_count(rx, &tx_chunks, max_nq, sh);
+ if (rc)
+ return rc;
+ if (tx_chunks != tx_saved)
+ *tx = bnge_num_cp_to_tx(bd, tx_chunks);
+ return 0;
+ }
+
+ return bnge_fix_rings_count(rx, tx, max_nq, sh);
+}
+
+int bnge_cal_nr_rss_ctxs(u16 rx_rings)
+{
+ if (!rx_rings)
+ return 0;
+
+ return bnge_adjust_pow_two(rx_rings - 1,
+ BNGE_RSS_TABLE_ENTRIES);
+}
+
+static u16 bnge_rss_ctxs_in_use(struct bnge_dev *bd,
+ struct bnge_hw_rings *hwr)
+{
+ return bnge_cal_nr_rss_ctxs(hwr->grp);
+}
+
+static u16 bnge_get_total_vnics(struct bnge_dev *bd, u16 rx_rings)
+{
+ return 1;
+}
+
+u32 bnge_get_rxfh_indir_size(struct bnge_dev *bd)
+{
+ return bnge_cal_nr_rss_ctxs(bd->rx_nr_rings) *
+ BNGE_RSS_TABLE_ENTRIES;
+}
+
+static void bnge_set_dflt_rss_indir_tbl(struct bnge_dev *bd)
+{
+ u16 max_entries, pad;
+ u32 *rss_indir_tbl;
+ int i;
+
+ max_entries = bnge_get_rxfh_indir_size(bd);
+ rss_indir_tbl = &bd->rss_indir_tbl[0];
+
+ for (i = 0; i < max_entries; i++)
+ rss_indir_tbl[i] = ethtool_rxfh_indir_default(i,
+ bd->rx_nr_rings);
+
+ pad = bd->rss_indir_tbl_entries - max_entries;
+ if (pad)
+ memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl));
+}
+
+static void bnge_copy_reserved_rings(struct bnge_dev *bd,
+ struct bnge_hw_rings *hwr)
+{
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+
+ hwr->tx = hw_resc->resv_tx_rings;
+ hwr->rx = hw_resc->resv_rx_rings;
+ hwr->nq = hw_resc->resv_irqs;
+ hwr->cmpl = hw_resc->resv_cp_rings;
+ hwr->grp = hw_resc->resv_hw_ring_grps;
+ hwr->vnic = hw_resc->resv_vnics;
+ hwr->stat = hw_resc->resv_stat_ctxs;
+ hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
+}
+
+static bool bnge_rings_ok(struct bnge_hw_rings *hwr)
+{
+ return hwr->tx && hwr->rx && hwr->nq && hwr->grp && hwr->vnic &&
+ hwr->stat && hwr->cmpl;
+}
+
+static bool bnge_need_reserve_rings(struct bnge_dev *bd)
+{
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+ u16 cprs = bnge_cprs_demand(bd);
+ u16 rx = bd->rx_nr_rings, stat;
+ u16 nqs = bnge_nqs_demand(bd);
+ u16 vnic;
+
+ if (hw_resc->resv_tx_rings != bd->tx_nr_rings)
+ return true;
+
+ vnic = bnge_get_total_vnics(bd, rx);
+
+ if (bnge_is_agg_reqd(bd))
+ rx <<= 1;
+ stat = bnge_func_stat_ctxs_demand(bd);
+ if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cprs ||
+ hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat)
+ return true;
+ if (hw_resc->resv_irqs != nqs)
+ return true;
+
+ return false;
+}
+
+int bnge_reserve_rings(struct bnge_dev *bd)
+{
+ u16 aux_dflt_msix = bnge_aux_get_dflt_msix(bd);
+ struct bnge_hw_rings hwr = {0};
+ u16 rx_rings, old_rx_rings;
+ u16 nq = bd->nq_nr_rings;
+ u16 aux_msix = 0;
+ bool sh = false;
+ u16 tx_cp;
+ int rc;
+
+ if (!bnge_need_reserve_rings(bd))
+ return 0;
+
+ if (!bnge_aux_registered(bd)) {
+ aux_msix = bnge_get_avail_msix(bd, aux_dflt_msix);
+ if (!aux_msix)
+ bnge_aux_set_stat_ctxs(bd, 0);
+
+ if (aux_msix > aux_dflt_msix)
+ aux_msix = aux_dflt_msix;
+ hwr.nq = nq + aux_msix;
+ } else {
+ hwr.nq = bnge_nqs_demand(bd);
+ }
+
+ hwr.tx = bd->tx_nr_rings;
+ hwr.rx = bd->rx_nr_rings;
+ if (bd->flags & BNGE_EN_SHARED_CHNL)
+ sh = true;
+ hwr.cmpl = hwr.rx + hwr.tx;
+
+ hwr.vnic = bnge_get_total_vnics(bd, hwr.rx);
+
+ if (bnge_is_agg_reqd(bd))
+ hwr.rx <<= 1;
+ hwr.grp = bd->rx_nr_rings;
+ hwr.rss_ctx = bnge_rss_ctxs_in_use(bd, &hwr);
+ hwr.stat = bnge_func_stat_ctxs_demand(bd);
+ old_rx_rings = bd->hw_resc.resv_rx_rings;
+
+ rc = bnge_hwrm_reserve_rings(bd, &hwr);
+ if (rc)
+ return rc;
+
+ bnge_copy_reserved_rings(bd, &hwr);
+
+ rx_rings = hwr.rx;
+ if (bnge_is_agg_reqd(bd)) {
+ if (hwr.rx >= 2)
+ rx_rings = hwr.rx >> 1;
+ else
+ return -ENOMEM;
+ }
+
+ rx_rings = min_t(u16, rx_rings, hwr.grp);
+ hwr.nq = min_t(u16, hwr.nq, bd->nq_nr_rings);
+ if (hwr.stat > bnge_aux_get_stat_ctxs(bd))
+ hwr.stat -= bnge_aux_get_stat_ctxs(bd);
+ hwr.nq = min_t(u16, hwr.nq, hwr.stat);
+
+ /* Adjust the rings */
+ rc = bnge_adjust_rings(bd, &rx_rings, &hwr.tx, hwr.nq, sh);
+ if (bnge_is_agg_reqd(bd))
+ hwr.rx = rx_rings << 1;
+ tx_cp = hwr.tx;
+ hwr.nq = sh ? max_t(u16, tx_cp, rx_rings) : tx_cp + rx_rings;
+ bd->tx_nr_rings = hwr.tx;
+
+ if (rx_rings != bd->rx_nr_rings)
+ dev_warn(bd->dev, "RX rings resv reduced to %d than earlier %d requested\n",
+ rx_rings, bd->rx_nr_rings);
+
+ bd->rx_nr_rings = rx_rings;
+ bd->nq_nr_rings = hwr.nq;
+
+ if (!bnge_rings_ok(&hwr))
+ return -ENOMEM;
+
+ if (old_rx_rings != bd->hw_resc.resv_rx_rings)
+ bnge_set_dflt_rss_indir_tbl(bd);
+
+ if (!bnge_aux_registered(bd)) {
+ u16 resv_msix, resv_ctx, aux_ctxs;
+ struct bnge_hw_resc *hw_resc;
+
+ hw_resc = &bd->hw_resc;
+ resv_msix = hw_resc->resv_irqs - bd->nq_nr_rings;
+ aux_msix = min_t(u16, resv_msix, aux_msix);
+ bnge_aux_set_msix_num(bd, aux_msix);
+ resv_ctx = hw_resc->resv_stat_ctxs - bd->nq_nr_rings;
+ aux_ctxs = min(resv_ctx, bnge_aux_get_stat_ctxs(bd));
+ bnge_aux_set_stat_ctxs(bd, aux_ctxs);
+ }
+
+ return rc;
+}
+
+int bnge_alloc_irqs(struct bnge_dev *bd)
+{
+ u16 aux_msix, tx_cp, num_entries;
+ int i, irqs_demand, rc;
+ u16 max, min = 1;
+
+ irqs_demand = bnge_nqs_demand(bd);
+ max = bnge_get_max_func_irqs(bd);
+ if (irqs_demand > max)
+ irqs_demand = max;
+
+ if (!(bd->flags & BNGE_EN_SHARED_CHNL))
+ min = 2;
+
+ irqs_demand = pci_alloc_irq_vectors(bd->pdev, min, irqs_demand,
+ PCI_IRQ_MSIX);
+ aux_msix = bnge_aux_get_msix(bd);
+ if (irqs_demand < 0 || irqs_demand < aux_msix) {
+ rc = -ENODEV;
+ goto err_free_irqs;
+ }
+
+ num_entries = irqs_demand;
+ if (pci_msix_can_alloc_dyn(bd->pdev))
+ num_entries = max;
+ bd->irq_tbl = kcalloc(num_entries, sizeof(*bd->irq_tbl), GFP_KERNEL);
+ if (!bd->irq_tbl) {
+ rc = -ENOMEM;
+ goto err_free_irqs;
+ }
+
+ for (i = 0; i < irqs_demand; i++)
+ bd->irq_tbl[i].vector = pci_irq_vector(bd->pdev, i);
+
+ bd->irqs_acquired = irqs_demand;
+ /* Reduce rings based upon num of vectors allocated.
+ * We dont need to consider NQs as they have been calculated
+ * and must be more than irqs_demand.
+ */
+ rc = bnge_adjust_rings(bd, &bd->rx_nr_rings,
+ &bd->tx_nr_rings,
+ irqs_demand - aux_msix, min == 1);
+ if (rc)
+ goto err_free_irqs;
+
+ tx_cp = bnge_num_tx_to_cp(bd, bd->tx_nr_rings);
+ bd->nq_nr_rings = (min == 1) ?
+ max_t(u16, tx_cp, bd->rx_nr_rings) :
+ tx_cp + bd->rx_nr_rings;
+
+ /* Readjust tx_nr_rings_per_tc */
+ if (!bd->num_tc)
+ bd->tx_nr_rings_per_tc = bd->tx_nr_rings;
+
+ return 0;
+
+err_free_irqs:
+ dev_err(bd->dev, "Failed to allocate IRQs err = %d\n", rc);
+ bnge_free_irqs(bd);
+ return rc;
+}
+
+void bnge_free_irqs(struct bnge_dev *bd)
+{
+ pci_free_irq_vectors(bd->pdev);
+ kfree(bd->irq_tbl);
+ bd->irq_tbl = NULL;
+}
+
+static void _bnge_get_max_rings(struct bnge_dev *bd, u16 *max_rx,
+ u16 *max_tx, u16 *max_nq)
+{
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+ u16 max_ring_grps = 0, max_cp;
+ int rc;
+
+ *max_tx = hw_resc->max_tx_rings;
+ *max_rx = hw_resc->max_rx_rings;
+ *max_nq = min_t(int, bnge_get_max_func_irqs(bd),
+ hw_resc->max_stat_ctxs);
+ max_ring_grps = hw_resc->max_hw_ring_grps;
+ if (bnge_is_agg_reqd(bd))
+ *max_rx >>= 1;
+
+ max_cp = bnge_get_max_func_cp_rings(bd);
+
+ /* Fix RX and TX rings according to number of CPs available */
+ rc = bnge_fix_rings_count(max_rx, max_tx, max_cp, false);
+ if (rc) {
+ *max_rx = 0;
+ *max_tx = 0;
+ }
+
+ *max_rx = min_t(int, *max_rx, max_ring_grps);
+}
+
+static int bnge_get_max_rings(struct bnge_dev *bd, u16 *max_rx,
+ u16 *max_tx, bool shared)
+{
+ u16 rx, tx, nq;
+
+ _bnge_get_max_rings(bd, &rx, &tx, &nq);
+ *max_rx = rx;
+ *max_tx = tx;
+ if (!rx || !tx || !nq)
+ return -ENOMEM;
+
+ return bnge_fix_rings_count(max_rx, max_tx, nq, shared);
+}
+
+static int bnge_get_dflt_rings(struct bnge_dev *bd, u16 *max_rx, u16 *max_tx,
+ bool shared)
+{
+ int rc;
+
+ rc = bnge_get_max_rings(bd, max_rx, max_tx, shared);
+ if (rc) {
+ dev_info(bd->dev, "Not enough rings available\n");
+ return rc;
+ }
+
+ if (bnge_is_roce_en(bd)) {
+ int max_cp, max_stat, max_irq;
+
+ /* Reserve minimum resources for RoCE */
+ max_cp = bnge_get_max_func_cp_rings(bd);
+ max_stat = bnge_get_max_func_stat_ctxs(bd);
+ max_irq = bnge_get_max_func_irqs(bd);
+ if (max_cp <= BNGE_MIN_ROCE_CP_RINGS ||
+ max_irq <= BNGE_MIN_ROCE_CP_RINGS ||
+ max_stat <= BNGE_MIN_ROCE_STAT_CTXS)
+ return 0;
+
+ max_cp -= BNGE_MIN_ROCE_CP_RINGS;
+ max_irq -= BNGE_MIN_ROCE_CP_RINGS;
+ max_stat -= BNGE_MIN_ROCE_STAT_CTXS;
+ max_cp = min_t(u16, max_cp, max_irq);
+ max_cp = min_t(u16, max_cp, max_stat);
+ rc = bnge_adjust_rings(bd, max_rx, max_tx, max_cp, shared);
+ if (rc)
+ rc = 0;
+ }
+
+ return rc;
+}
+
+/* In initial default shared ring setting, each shared ring must have a
+ * RX/TX ring pair.
+ */
+static void bnge_trim_dflt_sh_rings(struct bnge_dev *bd)
+{
+ bd->nq_nr_rings = min_t(u16, bd->tx_nr_rings_per_tc, bd->rx_nr_rings);
+ bd->rx_nr_rings = bd->nq_nr_rings;
+ bd->tx_nr_rings_per_tc = bd->nq_nr_rings;
+ bd->tx_nr_rings = bd->tx_nr_rings_per_tc;
+}
+
+static int bnge_net_init_dflt_rings(struct bnge_dev *bd, bool sh)
+{
+ u16 dflt_rings, max_rx_rings, max_tx_rings;
+ int rc;
+
+ if (sh)
+ bd->flags |= BNGE_EN_SHARED_CHNL;
+
+ dflt_rings = netif_get_num_default_rss_queues();
+
+ rc = bnge_get_dflt_rings(bd, &max_rx_rings, &max_tx_rings, sh);
+ if (rc)
+ return rc;
+ bd->rx_nr_rings = min_t(u16, dflt_rings, max_rx_rings);
+ bd->tx_nr_rings_per_tc = min_t(u16, dflt_rings, max_tx_rings);
+ if (sh)
+ bnge_trim_dflt_sh_rings(bd);
+ else
+ bd->nq_nr_rings = bd->tx_nr_rings_per_tc + bd->rx_nr_rings;
+ bd->tx_nr_rings = bd->tx_nr_rings_per_tc;
+
+ rc = bnge_reserve_rings(bd);
+ if (rc && rc != -ENODEV)
+ dev_warn(bd->dev, "Unable to reserve tx rings\n");
+ bd->tx_nr_rings_per_tc = bd->tx_nr_rings;
+ if (sh)
+ bnge_trim_dflt_sh_rings(bd);
+
+ /* Rings may have been reduced, re-reserve them again */
+ if (bnge_need_reserve_rings(bd)) {
+ rc = bnge_reserve_rings(bd);
+ if (rc && rc != -ENODEV)
+ dev_warn(bd->dev, "Fewer rings reservation failed\n");
+ bd->tx_nr_rings_per_tc = bd->tx_nr_rings;
+ }
+ if (rc) {
+ bd->tx_nr_rings = 0;
+ bd->rx_nr_rings = 0;
+ }
+
+ return rc;
+}
+
+static int bnge_alloc_rss_indir_tbl(struct bnge_dev *bd)
+{
+ u16 entries;
+
+ entries = BNGE_MAX_RSS_TABLE_ENTRIES;
+
+ bd->rss_indir_tbl_entries = entries;
+ bd->rss_indir_tbl =
+ kmalloc_array(entries, sizeof(*bd->rss_indir_tbl), GFP_KERNEL);
+ if (!bd->rss_indir_tbl)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int bnge_net_init_dflt_config(struct bnge_dev *bd)
+{
+ struct bnge_hw_resc *hw_resc;
+ int rc;
+
+ rc = bnge_alloc_rss_indir_tbl(bd);
+ if (rc)
+ return rc;
+
+ rc = bnge_net_init_dflt_rings(bd, true);
+ if (rc)
+ goto err_free_tbl;
+
+ hw_resc = &bd->hw_resc;
+ bd->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
+ BNGE_L2_FLTR_MAX_FLTR;
+
+ return 0;
+
+err_free_tbl:
+ kfree(bd->rss_indir_tbl);
+ bd->rss_indir_tbl = NULL;
+ return rc;
+}
+
+void bnge_net_uninit_dflt_config(struct bnge_dev *bd)
+{
+ kfree(bd->rss_indir_tbl);
+ bd->rss_indir_tbl = NULL;
+}
+
+void bnge_aux_init_dflt_config(struct bnge_dev *bd)
+{
+ bd->aux_num_msix = bnge_aux_get_dflt_msix(bd);
+ bd->aux_num_stat_ctxs = bnge_get_dflt_aux_stat_ctxs(bd);
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_resc.h b/drivers/net/ethernet/broadcom/bnge/bnge_resc.h
new file mode 100644
index 000000000000..b62a634669f6
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_resc.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_RESC_H_
+#define _BNGE_RESC_H_
+
+#include "bnge_netdev.h"
+#include "bnge_rmem.h"
+
+struct bnge_hw_resc {
+ u16 min_rsscos_ctxs;
+ u16 max_rsscos_ctxs;
+ u16 resv_rsscos_ctxs;
+ u16 min_cp_rings;
+ u16 max_cp_rings;
+ u16 resv_cp_rings;
+ u16 min_tx_rings;
+ u16 max_tx_rings;
+ u16 resv_tx_rings;
+ u16 max_tx_sch_inputs;
+ u16 min_rx_rings;
+ u16 max_rx_rings;
+ u16 resv_rx_rings;
+ u16 min_hw_ring_grps;
+ u16 max_hw_ring_grps;
+ u16 resv_hw_ring_grps;
+ u16 min_l2_ctxs;
+ u16 max_l2_ctxs;
+ u16 min_vnics;
+ u16 max_vnics;
+ u16 resv_vnics;
+ u16 min_stat_ctxs;
+ u16 max_stat_ctxs;
+ u16 resv_stat_ctxs;
+ u16 max_nqs;
+ u16 max_irqs;
+ u16 resv_irqs;
+ u32 max_encap_records;
+ u32 max_decap_records;
+ u32 max_tx_em_flows;
+ u32 max_tx_wm_flows;
+ u32 max_rx_em_flows;
+ u32 max_rx_wm_flows;
+};
+
+struct bnge_hw_rings {
+ u16 tx;
+ u16 rx;
+ u16 grp;
+ u16 nq;
+ u16 cmpl;
+ u16 stat;
+ u16 vnic;
+ u16 rss_ctx;
+};
+
+/* "TXRX", 2 hypens, plus maximum integer */
+#define BNGE_IRQ_NAME_EXTRA 17
+struct bnge_irq {
+ irq_handler_t handler;
+ unsigned int vector;
+ u8 requested:1;
+ u8 have_cpumask:1;
+ char name[IFNAMSIZ + BNGE_IRQ_NAME_EXTRA];
+ cpumask_var_t cpu_mask;
+};
+
+int bnge_reserve_rings(struct bnge_dev *bd);
+int bnge_fix_rings_count(u16 *rx, u16 *tx, u16 max, bool shared);
+int bnge_alloc_irqs(struct bnge_dev *bd);
+void bnge_free_irqs(struct bnge_dev *bd);
+int bnge_net_init_dflt_config(struct bnge_dev *bd);
+void bnge_net_uninit_dflt_config(struct bnge_dev *bd);
+void bnge_aux_init_dflt_config(struct bnge_dev *bd);
+u32 bnge_get_rxfh_indir_size(struct bnge_dev *bd);
+int bnge_cal_nr_rss_ctxs(u16 rx_rings);
+bool bnge_aux_has_enough_resources(struct bnge_dev *bd);
+
+static inline u32
+bnge_adjust_pow_two(u32 total_ent, u16 ent_per_blk)
+{
+ u32 blks = total_ent / ent_per_blk;
+
+ if (blks == 0 || blks == 1)
+ return ++blks;
+
+ if (!is_power_of_2(blks))
+ blks = roundup_pow_of_two(blks);
+
+ return blks;
+}
+
+#define BNGE_MAX_ROCE_MSIX 64
+#define BNGE_MIN_ROCE_CP_RINGS 2
+#define BNGE_MIN_ROCE_STAT_CTXS 1
+
+#endif /* _BNGE_RESC_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c
new file mode 100644
index 000000000000..79f5ce2e5d08
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c
@@ -0,0 +1,499 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/crash_dump.h>
+#include <linux/bnxt/hsi.h>
+
+#include "bnge.h"
+#include "bnge_hwrm_lib.h"
+#include "bnge_rmem.h"
+
+static void bnge_init_ctx_mem(struct bnge_ctx_mem_type *ctxm,
+ void *p, int len)
+{
+ u8 init_val = ctxm->init_value;
+ u16 offset = ctxm->init_offset;
+ u8 *p2 = p;
+ int i;
+
+ if (!init_val)
+ return;
+ if (offset == BNGE_CTX_INIT_INVALID_OFFSET) {
+ memset(p, init_val, len);
+ return;
+ }
+ for (i = 0; i < len; i += ctxm->entry_size)
+ *(p2 + i + offset) = init_val;
+}
+
+void bnge_free_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem)
+{
+ struct pci_dev *pdev = bd->pdev;
+ int i;
+
+ if (!rmem->pg_arr)
+ goto skip_pages;
+
+ for (i = 0; i < rmem->nr_pages; i++) {
+ if (!rmem->pg_arr[i])
+ continue;
+
+ dma_free_coherent(&pdev->dev, rmem->page_size,
+ rmem->pg_arr[i], rmem->dma_arr[i]);
+
+ rmem->pg_arr[i] = NULL;
+ }
+skip_pages:
+ if (rmem->pg_tbl) {
+ size_t pg_tbl_size = rmem->nr_pages * 8;
+
+ if (rmem->flags & BNGE_RMEM_USE_FULL_PAGE_FLAG)
+ pg_tbl_size = rmem->page_size;
+ dma_free_coherent(&pdev->dev, pg_tbl_size,
+ rmem->pg_tbl, rmem->dma_pg_tbl);
+ rmem->pg_tbl = NULL;
+ }
+ if (rmem->vmem_size && *rmem->vmem) {
+ vfree(*rmem->vmem);
+ *rmem->vmem = NULL;
+ }
+}
+
+int bnge_alloc_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem)
+{
+ struct pci_dev *pdev = bd->pdev;
+ u64 valid_bit = 0;
+ int i;
+
+ if (rmem->flags & (BNGE_RMEM_VALID_PTE_FLAG | BNGE_RMEM_RING_PTE_FLAG))
+ valid_bit = PTU_PTE_VALID;
+
+ if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
+ size_t pg_tbl_size = rmem->nr_pages * 8;
+
+ if (rmem->flags & BNGE_RMEM_USE_FULL_PAGE_FLAG)
+ pg_tbl_size = rmem->page_size;
+ rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
+ &rmem->dma_pg_tbl,
+ GFP_KERNEL);
+ if (!rmem->pg_tbl)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < rmem->nr_pages; i++) {
+ u64 extra_bits = valid_bit;
+
+ rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
+ rmem->page_size,
+ &rmem->dma_arr[i],
+ GFP_KERNEL);
+ if (!rmem->pg_arr[i])
+ goto err_free_ring;
+
+ if (rmem->ctx_mem)
+ bnge_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
+ rmem->page_size);
+
+ if (rmem->nr_pages > 1 || rmem->depth > 0) {
+ if (i == rmem->nr_pages - 2 &&
+ (rmem->flags & BNGE_RMEM_RING_PTE_FLAG))
+ extra_bits |= PTU_PTE_NEXT_TO_LAST;
+ else if (i == rmem->nr_pages - 1 &&
+ (rmem->flags & BNGE_RMEM_RING_PTE_FLAG))
+ extra_bits |= PTU_PTE_LAST;
+ rmem->pg_tbl[i] =
+ cpu_to_le64(rmem->dma_arr[i] | extra_bits);
+ }
+ }
+
+ if (rmem->vmem_size) {
+ *rmem->vmem = vzalloc(rmem->vmem_size);
+ if (!(*rmem->vmem))
+ goto err_free_ring;
+ }
+ return 0;
+
+err_free_ring:
+ bnge_free_ring(bd, rmem);
+ return -ENOMEM;
+}
+
+static int bnge_alloc_ctx_one_lvl(struct bnge_dev *bd,
+ struct bnge_ctx_pg_info *ctx_pg)
+{
+ struct bnge_ring_mem_info *rmem = &ctx_pg->ring_mem;
+
+ rmem->page_size = BNGE_PAGE_SIZE;
+ rmem->pg_arr = ctx_pg->ctx_pg_arr;
+ rmem->dma_arr = ctx_pg->ctx_dma_arr;
+ rmem->flags = BNGE_RMEM_VALID_PTE_FLAG;
+ if (rmem->depth >= 1)
+ rmem->flags |= BNGE_RMEM_USE_FULL_PAGE_FLAG;
+ return bnge_alloc_ring(bd, rmem);
+}
+
+static int bnge_alloc_ctx_pg_tbls(struct bnge_dev *bd,
+ struct bnge_ctx_pg_info *ctx_pg, u32 mem_size,
+ u8 depth, struct bnge_ctx_mem_type *ctxm)
+{
+ struct bnge_ring_mem_info *rmem = &ctx_pg->ring_mem;
+ int rc;
+
+ if (!mem_size)
+ return -EINVAL;
+
+ ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNGE_PAGE_SIZE);
+ if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
+ ctx_pg->nr_pages = 0;
+ return -EINVAL;
+ }
+ if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
+ int nr_tbls, i;
+
+ rmem->depth = 2;
+ ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
+ GFP_KERNEL);
+ if (!ctx_pg->ctx_pg_tbl)
+ return -ENOMEM;
+ nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
+ rmem->nr_pages = nr_tbls;
+ rc = bnge_alloc_ctx_one_lvl(bd, ctx_pg);
+ if (rc)
+ return rc;
+ for (i = 0; i < nr_tbls; i++) {
+ struct bnge_ctx_pg_info *pg_tbl;
+
+ pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
+ if (!pg_tbl)
+ return -ENOMEM;
+ ctx_pg->ctx_pg_tbl[i] = pg_tbl;
+ rmem = &pg_tbl->ring_mem;
+ rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
+ rmem->dma_pg_tbl = ctx_pg->ctx_dma_arr[i];
+ rmem->depth = 1;
+ rmem->nr_pages = MAX_CTX_PAGES;
+ rmem->ctx_mem = ctxm;
+ if (i == (nr_tbls - 1)) {
+ int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
+
+ if (rem)
+ rmem->nr_pages = rem;
+ }
+ rc = bnge_alloc_ctx_one_lvl(bd, pg_tbl);
+ if (rc)
+ break;
+ }
+ } else {
+ rmem->nr_pages = DIV_ROUND_UP(mem_size, BNGE_PAGE_SIZE);
+ if (rmem->nr_pages > 1 || depth)
+ rmem->depth = 1;
+ rmem->ctx_mem = ctxm;
+ rc = bnge_alloc_ctx_one_lvl(bd, ctx_pg);
+ }
+
+ return rc;
+}
+
+static void bnge_free_ctx_pg_tbls(struct bnge_dev *bd,
+ struct bnge_ctx_pg_info *ctx_pg)
+{
+ struct bnge_ring_mem_info *rmem = &ctx_pg->ring_mem;
+
+ if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
+ ctx_pg->ctx_pg_tbl) {
+ int i, nr_tbls = rmem->nr_pages;
+
+ for (i = 0; i < nr_tbls; i++) {
+ struct bnge_ctx_pg_info *pg_tbl;
+ struct bnge_ring_mem_info *rmem2;
+
+ pg_tbl = ctx_pg->ctx_pg_tbl[i];
+ if (!pg_tbl)
+ continue;
+ rmem2 = &pg_tbl->ring_mem;
+ bnge_free_ring(bd, rmem2);
+ ctx_pg->ctx_pg_arr[i] = NULL;
+ kfree(pg_tbl);
+ ctx_pg->ctx_pg_tbl[i] = NULL;
+ }
+ kfree(ctx_pg->ctx_pg_tbl);
+ ctx_pg->ctx_pg_tbl = NULL;
+ }
+ bnge_free_ring(bd, rmem);
+ ctx_pg->nr_pages = 0;
+}
+
+static int bnge_setup_ctxm_pg_tbls(struct bnge_dev *bd,
+ struct bnge_ctx_mem_type *ctxm, u32 entries,
+ u8 pg_lvl)
+{
+ struct bnge_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ int i, rc = 0, n = 1;
+ u32 mem_size;
+
+ if (!ctxm->entry_size || !ctx_pg)
+ return -EINVAL;
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ if (ctxm->entry_multiple)
+ entries = roundup(entries, ctxm->entry_multiple);
+ entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
+ mem_size = entries * ctxm->entry_size;
+ for (i = 0; i < n && !rc; i++) {
+ ctx_pg[i].entries = entries;
+ rc = bnge_alloc_ctx_pg_tbls(bd, &ctx_pg[i], mem_size, pg_lvl,
+ ctxm->init_value ? ctxm : NULL);
+ }
+
+ return rc;
+}
+
+static int bnge_backing_store_cfg(struct bnge_dev *bd, u32 ena)
+{
+ struct bnge_ctx_mem_info *ctx = bd->ctx;
+ struct bnge_ctx_mem_type *ctxm;
+ u16 last_type;
+ int rc = 0;
+ u16 type;
+
+ if (!ena)
+ return 0;
+ else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM)
+ last_type = BNGE_CTX_MAX - 1;
+ else
+ last_type = BNGE_CTX_L2_MAX - 1;
+ ctx->ctx_arr[last_type].last = 1;
+
+ for (type = 0 ; type < BNGE_CTX_V2_MAX; type++) {
+ ctxm = &ctx->ctx_arr[type];
+
+ rc = bnge_hwrm_func_backing_store(bd, ctxm, ctxm->last);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+void bnge_free_ctx_mem(struct bnge_dev *bd)
+{
+ struct bnge_ctx_mem_info *ctx = bd->ctx;
+ u16 type;
+
+ if (!ctx)
+ return;
+
+ for (type = 0; type < BNGE_CTX_V2_MAX; type++) {
+ struct bnge_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ struct bnge_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ int i, n = 1;
+
+ if (!ctx_pg)
+ continue;
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ for (i = 0; i < n; i++)
+ bnge_free_ctx_pg_tbls(bd, &ctx_pg[i]);
+
+ kfree(ctx_pg);
+ ctxm->pg_info = NULL;
+ }
+
+ ctx->flags &= ~BNGE_CTX_FLAG_INITED;
+ kfree(ctx);
+ bd->ctx = NULL;
+}
+
+#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
+ (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
+
+int bnge_alloc_ctx_mem(struct bnge_dev *bd)
+{
+ struct bnge_ctx_mem_type *ctxm;
+ struct bnge_ctx_mem_info *ctx;
+ u32 l2_qps, qp1_qps, max_qps;
+ u32 ena, entries_sp, entries;
+ u32 srqs, max_srqs, min;
+ u32 num_mr, num_ah;
+ u32 extra_srqs = 0;
+ u32 extra_qps = 0;
+ u32 fast_qpmd_qps;
+ u8 pg_lvl = 1;
+ int i, rc;
+
+ rc = bnge_hwrm_func_backing_store_qcaps(bd);
+ if (rc) {
+ dev_err(bd->dev, "Failed querying ctx mem caps, rc: %d\n", rc);
+ return rc;
+ }
+
+ ctx = bd->ctx;
+ if (!ctx || (ctx->flags & BNGE_CTX_FLAG_INITED))
+ return 0;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_QP];
+ l2_qps = ctxm->qp_l2_entries;
+ qp1_qps = ctxm->qp_qp1_entries;
+ fast_qpmd_qps = ctxm->qp_fast_qpmd_entries;
+ max_qps = ctxm->max_entries;
+ ctxm = &ctx->ctx_arr[BNGE_CTX_SRQ];
+ srqs = ctxm->srq_l2_entries;
+ max_srqs = ctxm->max_entries;
+ ena = 0;
+ if (bnge_is_roce_en(bd) && !is_kdump_kernel()) {
+ pg_lvl = 2;
+ extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
+ /* allocate extra qps if fast qp destroy feature enabled */
+ extra_qps += fast_qpmd_qps;
+ extra_srqs = min_t(u32, 8192, max_srqs - srqs);
+ if (fast_qpmd_qps)
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
+ }
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_QP];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, l2_qps + qp1_qps + extra_qps,
+ pg_lvl);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_SRQ];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, srqs + extra_srqs, pg_lvl);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_CQ];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, ctxm->cq_l2_entries +
+ extra_qps * 2, pg_lvl);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_VNIC];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, ctxm->max_entries, 1);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_STAT];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, ctxm->max_entries, 1);
+ if (rc)
+ return rc;
+
+ if (!bnge_is_roce_en(bd))
+ goto skip_rdma;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_MRAV];
+ /* 128K extra is needed to accommodate static AH context
+ * allocation by f/w.
+ */
+ num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
+ num_ah = min_t(u32, num_mr, 1024 * 128);
+ ctxm->split_entry_cnt = BNGE_CTX_MRAV_AV_SPLIT_ENTRY + 1;
+ if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
+ ctxm->mrav_av_entries = num_ah;
+
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, num_mr + num_ah, 2);
+ if (rc)
+ return rc;
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_TIM];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, l2_qps + qp1_qps + extra_qps, 1);
+ if (rc)
+ return rc;
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
+
+skip_rdma:
+ ctxm = &ctx->ctx_arr[BNGE_CTX_STQM];
+ min = ctxm->min_entries;
+ entries_sp = ctx->ctx_arr[BNGE_CTX_VNIC].vnic_entries + l2_qps +
+ 2 * (extra_qps + qp1_qps) + min;
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, entries_sp, 2);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_FTQM];
+ entries = l2_qps + 2 * (extra_qps + qp1_qps);
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, entries, 2);
+ if (rc)
+ return rc;
+ for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
+ ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
+
+ rc = bnge_backing_store_cfg(bd, ena);
+ if (rc) {
+ dev_err(bd->dev, "Failed configuring ctx mem, rc: %d\n", rc);
+ return rc;
+ }
+ ctx->flags |= BNGE_CTX_FLAG_INITED;
+
+ return 0;
+}
+
+void bnge_init_ring_struct(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_ring_mem_info *rmem;
+ struct bnge_nq_ring_info *nqr;
+ struct bnge_rx_ring_info *rxr;
+ struct bnge_tx_ring_info *txr;
+ struct bnge_ring_struct *ring;
+
+ nqr = &bnapi->nq_ring;
+ ring = &nqr->ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bn->cp_nr_pages;
+ rmem->page_size = HW_CMPD_RING_SIZE;
+ rmem->pg_arr = (void **)nqr->desc_ring;
+ rmem->dma_arr = nqr->desc_mapping;
+ rmem->vmem_size = 0;
+
+ rxr = bnapi->rx_ring;
+ if (!rxr)
+ goto skip_rx;
+
+ ring = &rxr->rx_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bn->rx_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)rxr->rx_desc_ring;
+ rmem->dma_arr = rxr->rx_desc_mapping;
+ rmem->vmem_size = SW_RXBD_RING_SIZE * bn->rx_nr_pages;
+ rmem->vmem = (void **)&rxr->rx_buf_ring;
+
+ ring = &rxr->rx_agg_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bn->rx_agg_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
+ rmem->dma_arr = rxr->rx_agg_desc_mapping;
+ rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bn->rx_agg_nr_pages;
+ rmem->vmem = (void **)&rxr->rx_agg_buf_ring;
+
+skip_rx:
+ bnge_for_each_napi_tx(j, bnapi, txr) {
+ ring = &txr->tx_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bn->tx_nr_pages;
+ rmem->page_size = HW_TXBD_RING_SIZE;
+ rmem->pg_arr = (void **)txr->tx_desc_ring;
+ rmem->dma_arr = txr->tx_desc_mapping;
+ rmem->vmem_size = SW_TXBD_RING_SIZE * bn->tx_nr_pages;
+ rmem->vmem = (void **)&txr->tx_buf_ring;
+ }
+ }
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h
new file mode 100644
index 000000000000..341c7f81ed09
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_RMEM_H_
+#define _BNGE_RMEM_H_
+
+struct bnge_ctx_mem_type;
+struct bnge_dev;
+struct bnge_net;
+
+#define PTU_PTE_VALID 0x1UL
+#define PTU_PTE_LAST 0x2UL
+#define PTU_PTE_NEXT_TO_LAST 0x4UL
+
+struct bnge_ring_mem_info {
+ /* Number of pages to next level */
+ int nr_pages;
+ int page_size;
+ u16 flags;
+#define BNGE_RMEM_VALID_PTE_FLAG 1
+#define BNGE_RMEM_RING_PTE_FLAG 2
+#define BNGE_RMEM_USE_FULL_PAGE_FLAG 4
+
+ u16 depth;
+
+ void **pg_arr;
+ dma_addr_t *dma_arr;
+
+ __le64 *pg_tbl;
+ dma_addr_t dma_pg_tbl;
+
+ int vmem_size;
+ void **vmem;
+
+ struct bnge_ctx_mem_type *ctx_mem;
+};
+
+/* The hardware supports certain page sizes.
+ * Use the supported page sizes to allocate the rings.
+ */
+#if (PAGE_SHIFT < 12)
+#define BNGE_PAGE_SHIFT 12
+#elif (PAGE_SHIFT <= 13)
+#define BNGE_PAGE_SHIFT PAGE_SHIFT
+#elif (PAGE_SHIFT < 16)
+#define BNGE_PAGE_SHIFT 13
+#else
+#define BNGE_PAGE_SHIFT 16
+#endif
+#define BNGE_PAGE_SIZE (1 << BNGE_PAGE_SHIFT)
+/* The RXBD length is 16-bit so we can only support page sizes < 64K */
+#if (PAGE_SHIFT > 15)
+#define BNGE_RX_PAGE_SHIFT 15
+#else
+#define BNGE_RX_PAGE_SHIFT PAGE_SHIFT
+#endif
+#define MAX_CTX_PAGES (BNGE_PAGE_SIZE / 8)
+#define MAX_CTX_TOTAL_PAGES (MAX_CTX_PAGES * MAX_CTX_PAGES)
+
+struct bnge_ctx_pg_info {
+ u32 entries;
+ u32 nr_pages;
+ void *ctx_pg_arr[MAX_CTX_PAGES];
+ dma_addr_t ctx_dma_arr[MAX_CTX_PAGES];
+ struct bnge_ring_mem_info ring_mem;
+ struct bnge_ctx_pg_info **ctx_pg_tbl;
+};
+
+#define BNGE_MAX_TQM_SP_RINGS 1
+#define BNGE_MAX_TQM_FP_RINGS 8
+#define BNGE_MAX_TQM_RINGS \
+ (BNGE_MAX_TQM_SP_RINGS + BNGE_MAX_TQM_FP_RINGS)
+#define BNGE_BACKING_STORE_CFG_LEGACY_LEN 256
+#define BNGE_SET_CTX_PAGE_ATTR(attr) \
+do { \
+ if (BNGE_PAGE_SIZE == 0x2000) \
+ attr = FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K; \
+ else if (BNGE_PAGE_SIZE == 0x10000) \
+ attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K; \
+ else \
+ attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K; \
+} while (0)
+
+#define BNGE_CTX_MRAV_AV_SPLIT_ENTRY 0
+
+#define BNGE_CTX_QP \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP
+#define BNGE_CTX_SRQ \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ
+#define BNGE_CTX_CQ \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ
+#define BNGE_CTX_VNIC \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC
+#define BNGE_CTX_STAT \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT
+#define BNGE_CTX_STQM \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING
+#define BNGE_CTX_FTQM \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING
+#define BNGE_CTX_MRAV \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV
+#define BNGE_CTX_TIM \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM
+#define BNGE_CTX_TCK \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK
+#define BNGE_CTX_RCK \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK
+#define BNGE_CTX_MTQM \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING
+#define BNGE_CTX_SQDBS \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW
+#define BNGE_CTX_RQDBS \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW
+#define BNGE_CTX_SRQDBS \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW
+#define BNGE_CTX_CQDBS \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW
+#define BNGE_CTX_SRT_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE
+#define BNGE_CTX_SRT2_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE
+#define BNGE_CTX_CRT_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE
+#define BNGE_CTX_CRT2_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE
+#define BNGE_CTX_RIGP0_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE
+#define BNGE_CTX_L2_HWRM_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE
+#define BNGE_CTX_ROCE_HWRM_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE
+
+#define BNGE_CTX_MAX (BNGE_CTX_TIM + 1)
+#define BNGE_CTX_L2_MAX (BNGE_CTX_FTQM + 1)
+#define BNGE_CTX_INV ((u16)-1)
+
+#define BNGE_CTX_V2_MAX \
+ (FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE + 1)
+
+#define BNGE_BS_CFG_ALL_DONE \
+ FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE
+
+struct bnge_ctx_mem_type {
+ u16 type;
+ u16 entry_size;
+ u32 flags;
+#define BNGE_CTX_MEM_TYPE_VALID \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID
+ u32 instance_bmap;
+ u8 init_value;
+ u8 entry_multiple;
+ u16 init_offset;
+#define BNGE_CTX_INIT_INVALID_OFFSET 0xffff
+ u32 max_entries;
+ u32 min_entries;
+ u8 last:1;
+ u8 split_entry_cnt;
+#define BNGE_MAX_SPLIT_ENTRY 4
+ union {
+ struct {
+ u32 qp_l2_entries;
+ u32 qp_qp1_entries;
+ u32 qp_fast_qpmd_entries;
+ };
+ u32 srq_l2_entries;
+ u32 cq_l2_entries;
+ u32 vnic_entries;
+ struct {
+ u32 mrav_av_entries;
+ u32 mrav_num_entries_units;
+ };
+ u32 split[BNGE_MAX_SPLIT_ENTRY];
+ };
+ struct bnge_ctx_pg_info *pg_info;
+};
+
+struct bnge_ctx_mem_info {
+ u8 tqm_fp_rings_count;
+ u32 flags;
+#define BNGE_CTX_FLAG_INITED 0x01
+ struct bnge_ctx_mem_type ctx_arr[BNGE_CTX_V2_MAX];
+};
+
+struct bnge_ring_struct {
+ struct bnge_ring_mem_info ring_mem;
+
+ u16 fw_ring_id;
+ union {
+ u16 grp_idx;
+ u16 map_idx; /* Used by NQs */
+ };
+ u32 handle;
+ u8 queue_id;
+};
+
+int bnge_alloc_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem);
+void bnge_free_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem);
+int bnge_alloc_ctx_mem(struct bnge_dev *bd);
+void bnge_free_ctx_mem(struct bnge_dev *bd);
+void bnge_init_ring_struct(struct bnge_net *bn);
+
+#endif /* _BNGE_RMEM_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 6ec773e61182..805daae9dd36 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -6163,7 +6163,7 @@ bnx2_5708_serdes_timer(struct bnx2 *bp)
static void
bnx2_timer(struct timer_list *t)
{
- struct bnx2 *bp = from_timer(bp, t, timer);
+ struct bnx2 *bp = timer_container_of(bp, t, timer);
if (!netif_running(bp->dev))
return;
@@ -6400,7 +6400,7 @@ bnx2_open(struct net_device *dev)
rc = bnx2_request_irq(bp);
if (rc) {
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
goto open_err;
}
bnx2_enable_int(bp);
@@ -6444,7 +6444,6 @@ bnx2_reset_task(struct work_struct *work)
if (!(pcicmd & PCI_COMMAND_MEMORY)) {
/* in case PCI block has reset */
pci_restore_state(bp->pdev);
- pci_save_state(bp->pdev);
}
rc = bnx2_init_nic(bp, 1);
if (rc) {
@@ -6752,7 +6751,7 @@ bnx2_close(struct net_device *dev)
bnx2_disable_int_sync(bp);
bnx2_napi_disable(bp);
netif_tx_disable(dev);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
bnx2_shutdown_chip(bp);
bnx2_free_irq(bp);
bnx2_free_skbs(bp);
@@ -8602,7 +8601,7 @@ bnx2_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
cancel_work_sync(&bp->reset_task);
pci_iounmap(bp->pdev, bp->regview);
@@ -8629,7 +8628,7 @@ bnx2_suspend(struct device *device)
cancel_work_sync(&bp->reset_task);
bnx2_netif_stop(bp, true);
netif_device_detach(dev);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
bnx2_shutdown_chip(bp);
__bnx2_free_irq(bp);
bnx2_free_skbs(bp);
@@ -8687,7 +8686,7 @@ static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
if (netif_running(dev)) {
bnx2_netif_stop(bp, true);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
}
@@ -8718,7 +8717,6 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
if (netif_running(dev))
err = bnx2_init_nic(bp, 1);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index a8e07e51418f..e59530357e2c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3059,7 +3059,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
bp->rx_mode = BNX2X_RX_MODE_NONE;
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
if (IS_PF(bp) && !BP_NOMCP(bp)) {
/* Set ALWAYS_ALIVE bit in shmem */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 17ae6df90723..9af81630c8a4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -344,7 +344,7 @@ static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp,
}
}
-/* maps unmapped priorities to to the same COS as L2 */
+/* maps unmapped priorities to the same COS as L2 */
static void bnx2x_dcbx_map_nw(struct bnx2x *bp)
{
int i;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index adf7b6b94941..3d853eeb976f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -39,34 +39,34 @@ static const struct {
int size;
char string[ETH_GSTRING_LEN];
} bnx2x_q_stats_arr[] = {
-/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" },
+/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
{ Q_STATS_OFFSET32(total_unicast_packets_received_hi),
- 8, "[%s]: rx_ucast_packets" },
+ 8, "[%d]: rx_ucast_packets" },
{ Q_STATS_OFFSET32(total_multicast_packets_received_hi),
- 8, "[%s]: rx_mcast_packets" },
+ 8, "[%d]: rx_mcast_packets" },
{ Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
- 8, "[%s]: rx_bcast_packets" },
- { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%s]: rx_discards" },
+ 8, "[%d]: rx_bcast_packets" },
+ { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
{ Q_STATS_OFFSET32(rx_err_discard_pkt),
- 4, "[%s]: rx_phy_ip_err_discards"},
+ 4, "[%d]: rx_phy_ip_err_discards"},
{ Q_STATS_OFFSET32(rx_skb_alloc_failed),
- 4, "[%s]: rx_skb_alloc_discard" },
- { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" },
- { Q_STATS_OFFSET32(driver_xoff), 4, "[%s]: tx_exhaustion_events" },
- { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" },
+ 4, "[%d]: rx_skb_alloc_discard" },
+ { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
+ { Q_STATS_OFFSET32(driver_xoff), 4, "[%d]: tx_exhaustion_events" },
+ { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
/* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
- 8, "[%s]: tx_ucast_packets" },
+ 8, "[%d]: tx_ucast_packets" },
{ Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
- 8, "[%s]: tx_mcast_packets" },
+ 8, "[%d]: tx_mcast_packets" },
{ Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
- 8, "[%s]: tx_bcast_packets" },
+ 8, "[%d]: tx_bcast_packets" },
{ Q_STATS_OFFSET32(total_tpa_aggregations_hi),
- 8, "[%s]: tpa_aggregations" },
+ 8, "[%d]: tpa_aggregations" },
{ Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
- 8, "[%s]: tpa_aggregated_frames"},
- { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"},
+ 8, "[%d]: tpa_aggregated_frames"},
+ { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%d]: tpa_bytes"},
{ Q_STATS_OFFSET32(driver_filtered_tx_pkt),
- 4, "[%s]: driver_filtered_tx_pkt" }
+ 4, "[%d]: driver_filtered_tx_pkt" }
};
#define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr)
@@ -1243,9 +1243,9 @@ static int bnx2x_get_eeprom_len(struct net_device *dev)
* pf B succeeds in taking the same lock since they are from the same port.
* pf A takes the per pf misc lock. Performs eeprom access.
* pf A finishes. Unlocks the per pf misc lock.
- * Pf B takes the lock and proceeds to perform it's own access.
+ * Pf B takes the lock and proceeds to perform its own access.
* pf A unlocks the per port lock, while pf B is still working (!).
- * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
+ * mcp takes the per port lock and corrupts pf B's access (and/or has its own
* access corrupted by pf B)
*/
static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
@@ -3184,49 +3184,43 @@ static u32 bnx2x_get_private_flags(struct net_device *dev)
static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct bnx2x *bp = netdev_priv(dev);
- int i, j, k, start;
- char queue_name[MAX_QUEUE_NAME_LEN+1];
+ const char *str;
+ int i, j, start;
switch (stringset) {
case ETH_SS_STATS:
- k = 0;
if (is_multi(bp)) {
for_each_eth_queue(bp, i) {
- memset(queue_name, 0, sizeof(queue_name));
- snprintf(queue_name, sizeof(queue_name),
- "%d", i);
- for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
- snprintf(buf + (k + j)*ETH_GSTRING_LEN,
- ETH_GSTRING_LEN,
- bnx2x_q_stats_arr[j].string,
- queue_name);
- k += BNX2X_NUM_Q_STATS;
+ for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
+ str = bnx2x_q_stats_arr[j].string;
+ ethtool_sprintf(&buf, str, i);
+ }
}
}
- for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
+ for (i = 0; i < BNX2X_NUM_STATS; i++) {
if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
continue;
- strcpy(buf + (k + j)*ETH_GSTRING_LEN,
- bnx2x_stats_arr[i].string);
- j++;
+ ethtool_puts(&buf, bnx2x_stats_arr[i].string);
}
break;
case ETH_SS_TEST:
+ if (IS_VF(bp))
+ break;
/* First 4 tests cannot be done in MF mode */
if (!IS_MF(bp))
start = 0;
else
start = 4;
- memcpy(buf, bnx2x_tests_str_arr + start,
- ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp));
+ for (i = start; i < BNX2X_NUM_TESTS_SF; i++)
+ ethtool_puts(&buf, bnx2x_tests_str_arr[i]);
break;
case ETH_SS_PRIV_FLAGS:
- memcpy(buf, bnx2x_private_arr,
- ETH_GSTRING_LEN * BNX2X_PRI_FLAG_LEN);
+ for (i = 0; i < BNX2X_PRI_FLAG_LEN; i++)
+ ethtool_puts(&buf, bnx2x_private_arr[i]);
break;
}
}
@@ -3324,8 +3318,11 @@ static int bnx2x_set_phys_id(struct net_device *dev,
return 0;
}
-static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
+static int bnx2x_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
{
+ struct bnx2x *bp = netdev_priv(dev);
+
switch (info->flow_type) {
case TCP_V4_FLOW:
case TCP_V6_FLOW:
@@ -3358,29 +3355,22 @@ static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
return 0;
}
-static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
- u32 *rules __always_unused)
+static u32 bnx2x_get_rx_ring_count(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = BNX2X_NUM_ETH_QUEUES(bp);
- return 0;
- case ETHTOOL_GRXFH:
- return bnx2x_get_rss_flags(bp, info);
- default:
- DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
- return -EOPNOTSUPP;
- }
+ return BNX2X_NUM_ETH_QUEUES(bp);
}
-static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
+static int bnx2x_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *info,
+ struct netlink_ext_ack *extack)
{
+ struct bnx2x *bp = netdev_priv(dev);
int udp_rss_requested;
DP(BNX2X_MSG_ETHTOOL,
- "Set rss flags command parameters: flow type = %d, data = %llu\n",
+ "Set rss flags command parameters: flow type = %d, data = %u\n",
info->flow_type, info->data);
switch (info->flow_type) {
@@ -3466,19 +3456,6 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
}
}
-static int bnx2x_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
-{
- struct bnx2x *bp = netdev_priv(dev);
-
- switch (info->cmd) {
- case ETHTOOL_SRXFH:
- return bnx2x_set_rss_flags(bp, info);
- default:
- DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
- return -EOPNOTSUPP;
- }
-}
-
static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
{
return T_ETH_INDIRECTION_TABLE_SIZE;
@@ -3689,11 +3666,12 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_strings = bnx2x_get_strings,
.set_phys_id = bnx2x_set_phys_id,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
- .get_rxnfc = bnx2x_get_rxnfc,
- .set_rxnfc = bnx2x_set_rxnfc,
+ .get_rx_ring_count = bnx2x_get_rx_ring_count,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
.get_rxfh = bnx2x_get_rxfh,
.set_rxfh = bnx2x_set_rxfh,
+ .get_rxfh_fields = bnx2x_get_rxfh_fields,
+ .set_rxfh_fields = bnx2x_set_rxfh_fields,
.get_channels = bnx2x_get_channels,
.set_channels = bnx2x_set_channels,
.get_module_info = bnx2x_get_module_info,
@@ -3716,11 +3694,12 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
.get_sset_count = bnx2x_get_sset_count,
.get_strings = bnx2x_get_strings,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
- .get_rxnfc = bnx2x_get_rxnfc,
- .set_rxnfc = bnx2x_set_rxnfc,
+ .get_rx_ring_count = bnx2x_get_rx_ring_count,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
.get_rxfh = bnx2x_get_rxfh,
.set_rxfh = bnx2x_set_rxfh,
+ .get_rxfh_fields = bnx2x_get_rxfh_fields,
+ .set_rxfh_fields = bnx2x_set_rxfh_fields,
.get_channels = bnx2x_get_channels,
.set_channels = bnx2x_set_channels,
.get_link_ksettings = bnx2x_get_vf_link_ksettings,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index a84d015da5df..9221942290a8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -332,7 +332,7 @@
#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
-/* microcode fixed page page size 4K (chains and ring segments) */
+/* microcode fixed page size 4K (chains and ring segments) */
#define MC_PAGE_SIZE 4096
/* Number of indices per slow-path SB */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 678829646cec..6a1cc2032bf3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -308,8 +308,11 @@ static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
/****************************************************************************
* General service functions
****************************************************************************/
-
-static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
+static int bnx2x_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+static int bnx2x_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config);
static void __storm_memset_dma_mapping(struct bnx2x *bp,
u32 addr, dma_addr_t mapping)
@@ -1768,7 +1771,7 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
* @bp: driver handle
*
* Returns the recovery leader resource id according to the engine this function
- * belongs to. Currently only only 2 engines is supported.
+ * belongs to. Currently only 2 engines is supported.
*/
static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
{
@@ -5783,7 +5786,7 @@ void bnx2x_drv_pulse(struct bnx2x *bp)
static void bnx2x_timer(struct timer_list *t)
{
- struct bnx2x *bp = from_timer(bp, t, timer);
+ struct bnx2x *bp = timer_container_of(bp, t, timer);
if (!netif_running(bp->dev))
return;
@@ -10219,8 +10222,7 @@ static int bnx2x_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
static const struct udp_tunnel_nic_info bnx2x_udp_tunnels = {
.sync_table = bnx2x_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
@@ -12814,14 +12816,9 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (!netif_running(dev))
return -EAGAIN;
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return bnx2x_hwtstamp_ioctl(bp, ifr);
- default:
- DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
- mdio->phy_id, mdio->reg_num, mdio->val_in);
- return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
- }
+ DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
+ mdio->phy_id, mdio->reg_num, mdio->val_in);
+ return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
}
static int bnx2x_validate_addr(struct net_device *dev)
@@ -13037,6 +13034,8 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_get_phys_port_id = bnx2x_get_phys_port_id,
.ndo_set_vf_link_state = bnx2x_set_vf_link_state,
.ndo_features_check = bnx2x_features_check,
+ .ndo_hwtstamp_get = bnx2x_hwtstamp_get,
+ .ndo_hwtstamp_set = bnx2x_hwtstamp_set,
};
static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
@@ -14140,7 +14139,7 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
bnx2x_tx_disable(bp);
netdev_reset_tc(bp->dev);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
cancel_delayed_work_sync(&bp->sp_task);
cancel_delayed_work_sync(&bp->period_task);
@@ -14217,7 +14216,6 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
if (netif_running(dev))
bnx2x_set_power_state(bp, PCI_D0);
@@ -15176,7 +15174,7 @@ void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
}
/* Read the PHC */
-static u64 bnx2x_cyclecounter_read(const struct cyclecounter *cc)
+static u64 bnx2x_cyclecounter_read(struct cyclecounter *cc)
{
struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
int port = BP_PORT(bp);
@@ -15351,31 +15349,57 @@ int bnx2x_configure_ptp_filters(struct bnx2x *bp)
return 0;
}
-static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
+static int bnx2x_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
+ struct bnx2x *bp = netdev_priv(dev);
int rc;
- DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n");
+ DP(BNX2X_MSG_PTP, "HWTSTAMP SET called\n");
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ if (!netif_running(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device is down");
+ return -EAGAIN;
+ }
DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
- config.tx_type, config.rx_filter);
+ config->tx_type, config->rx_filter);
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_ON:
+ case HWTSTAMP_TX_OFF:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "One-step timestamping is not supported");
+ return -ERANGE;
+ }
bp->hwtstamp_ioctl_called = true;
- bp->tx_type = config.tx_type;
- bp->rx_filter = config.rx_filter;
+ bp->tx_type = config->tx_type;
+ bp->rx_filter = config->rx_filter;
rc = bnx2x_configure_ptp_filters(bp);
- if (rc)
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "HW configuration failure");
return rc;
+ }
+
+ config->rx_filter = bp->rx_filter;
+
+ return 0;
+}
+
+static int bnx2x_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct bnx2x *bp = netdev_priv(dev);
- config.rx_filter = bp->rx_filter;
+ config->rx_filter = bp->rx_filter;
+ config->tx_type = bp->tx_type;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
/* Configures HW for PTP */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 8e04552d2216..02c8213915a5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2593,7 +2593,7 @@ void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
/********************* Multicast verbs: SET, CLEAR ****************************/
static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
{
- return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
+ return (crc32c(0, mac, ETH_ALEN) >> 24) & 0xff;
}
struct bnx2x_mcast_mac_elem {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index bacc8552bce1..00ca861c80dd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -379,7 +379,7 @@ struct bnx2x_vlan_mac_obj {
/**
* Delete all configured elements having the given
* vlan_mac_flags specification. Assumes no pending for
- * execution commands. Will schedule all all currently
+ * execution commands. Will schedule all currently
* configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags
* specification for deletion and will use the given
* ramrod_flags for the last DEL operation.
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index ca42b81133d7..d17d0ea89c36 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -54,9 +54,12 @@
#include <net/pkt_cls.h>
#include <net/page_pool/helpers.h>
#include <linux/align.h>
+#include <net/netdev_lock.h>
#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
+#include <linux/pci-tph.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
@@ -76,12 +79,12 @@
#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
NETIF_MSG_TX_ERR)
+MODULE_IMPORT_NS("NETDEV_INTERNAL");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
-#define BNXT_RX_COPY_THRESH 256
#define BNXT_TX_PUSH_THRESH 164
@@ -139,6 +142,7 @@ static const struct {
[NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
[NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
[NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" },
+ [NETXTREME_E_P7_VF_HV] = { "Broadcom BCM5760X Virtual Function for Hyper-V" },
};
static const struct pci_device_id bnxt_pci_tbl[] = {
@@ -214,6 +218,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF },
+ { PCI_VDEVICE(BROADCOM, 0x181b), .driver_data = NETXTREME_E_P7_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
#endif
{ 0 }
@@ -245,6 +250,23 @@ static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
+ ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER,
+};
+
+const u16 bnxt_bstore_to_trace[] = {
+ [BNXT_CTX_SRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE,
+ [BNXT_CTX_SRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE,
+ [BNXT_CTX_CRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE,
+ [BNXT_CTX_CRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE,
+ [BNXT_CTX_RIGP0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE,
+ [BNXT_CTX_L2HWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE,
+ [BNXT_CTX_REHWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE,
+ [BNXT_CTX_CA0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE,
+ [BNXT_CTX_CA1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE,
+ [BNXT_CTX_CA2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE,
+ [BNXT_CTX_RIGP1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE,
+ [BNXT_CTX_KONG] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE,
+ [BNXT_CTX_QPC] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ERR_QPC_TRACE,
};
static struct workqueue_struct *bnxt_pf_wq;
@@ -297,7 +319,8 @@ static bool bnxt_vf_pciid(enum board_idx idx)
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
- idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF);
+ idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF ||
+ idx == NETXTREME_E_P7_VF_HV);
}
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
@@ -459,6 +482,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct bnxt_tx_ring_info *txr;
struct bnxt_sw_tx_bd *tx_buf;
__le32 lflags = 0;
+ skb_frag_t *frag;
i = skb_get_queue_mapping(skb);
if (unlikely(i >= bp->tx_nr_rings)) {
@@ -471,6 +495,17 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
txr = &bp->tx_ring[bp->tx_ring_map[i]];
prod = txr->tx_prod;
+#if (MAX_SKB_FRAGS > TX_MAX_FRAGS)
+ if (skb_shinfo(skb)->nr_frags > TX_MAX_FRAGS) {
+ netdev_warn_once(dev, "SKB has too many (%d) fragments, max supported is %d. SKB will be linearized.\n",
+ skb_shinfo(skb)->nr_frags, TX_MAX_FRAGS);
+ if (skb_linearize(skb)) {
+ dev_kfree_skb_any(skb);
+ dev_core_stats_tx_dropped_inc(dev);
+ return NETDEV_TX_OK;
+ }
+ }
+#endif
free_size = bnxt_tx_avail(bp, txr);
if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
/* We must have raced with NAPI cleanup */
@@ -534,7 +569,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
- !lflags) {
+ skb_frags_readable(skb) && !lflags) {
struct tx_push_buffer *tx_push_buf = txr->tx_push;
struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
@@ -550,7 +585,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
TX_BD_FLAGS_LHINT_512_AND_SMALLER |
TX_BD_FLAGS_COAL_NOW |
TX_BD_FLAGS_PACKET_END |
- (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
+ TX_BD_CNT(2));
if (skb->ip_summed == CHECKSUM_PARTIAL)
tx_push1->tx_bd_hsize_lflags =
@@ -569,9 +604,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_copy_from_linear_data(skb, pdata, len);
pdata += len;
for (j = 0; j < last_frag; j++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
void *fptr;
+ frag = &skb_shinfo(skb)->frags[j];
fptr = skb_frag_address_safe(frag);
if (!fptr)
goto normal_tx;
@@ -625,7 +660,7 @@ normal_tx:
dma_unmap_addr_set(tx_buf, mapping, mapping);
flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
- ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
+ TX_BD_CNT(last_frag + 2);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
@@ -679,8 +714,7 @@ normal_tx:
cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
txbd0 = txbd;
for (i = 0; i < last_frag; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
+ frag = &skb_shinfo(skb)->frags[i];
prod = NEXT_TX(prod);
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
@@ -692,7 +726,8 @@ normal_tx:
goto tx_dma_error;
tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
- dma_unmap_addr_set(tx_buf, mapping, mapping);
+ netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf,
+ mapping, mapping);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
@@ -749,16 +784,18 @@ tx_dma_error:
for (i = 0; i < last_frag; i++) {
prod = NEXT_TX(prod);
tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
- dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
- skb_frag_size(&skb_shinfo(skb)->frags[i]),
- DMA_TO_DEVICE);
+ frag = &skb_shinfo(skb)->frags[i];
+ netmem_dma_unmap_page_attrs(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE, 0);
}
tx_free:
dev_kfree_skb_any(skb);
tx_kick_pending:
if (BNXT_TX_PTP_IS_SET(lflags)) {
- txr->tx_buf_ring[txr->tx_prod].is_ts_pkt = 0;
+ txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].is_ts_pkt = 0;
atomic64_inc(&bp->ptp_cfg->stats.ts_err);
if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
/* set SKB to err so PTP worker will clean up */
@@ -766,7 +803,7 @@ tx_kick_pending:
}
if (txr->kick_pending)
bnxt_txr_db_kick(bp, txr, txr->tx_prod);
- txr->tx_buf_ring[txr->tx_prod].skb = NULL;
+ txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].skb = NULL;
dev_core_stats_tx_dropped_inc(dev);
return NETDEV_TX_OK;
}
@@ -780,6 +817,7 @@ static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
u16 hw_cons = txr->tx_hw_cons;
unsigned int tx_bytes = 0;
u16 cons = txr->tx_cons;
+ skb_frag_t *frag;
int tx_pkts = 0;
bool rc = false;
@@ -819,13 +857,14 @@ static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
last = tx_buf->nr_frags;
for (j = 0; j < last; j++) {
+ frag = &skb_shinfo(skb)->frags[j];
cons = NEXT_TX(cons);
tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
- dma_unmap_page(
- &pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- skb_frag_size(&skb_shinfo(skb)->frags[j]),
- DMA_TO_DEVICE);
+ netmem_dma_unmap_page_attrs(&pdev->dev,
+ dma_unmap_addr(tx_buf,
+ mapping),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE, 0);
}
if (unlikely(is_ts_pkt)) {
if (BNXT_CHIP_P5(bp)) {
@@ -838,7 +877,7 @@ static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
next_tx_int:
cons = NEXT_TX(cons);
- dev_consume_skb_any(skb);
+ napi_consume_skb(skb, budget);
}
WRITE_ONCE(txr->tx_cons, cons);
@@ -864,6 +903,11 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
bnapi->events &= ~BNXT_TX_CMP_EVENT;
}
+static bool bnxt_separate_head_pool(struct bnxt_rx_ring_info *rxr)
+{
+ return rxr->need_head_pool || PAGE_SIZE > BNXT_RX_PAGE_SIZE;
+}
+
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
struct bnxt_rx_ring_info *rxr,
unsigned int *offset,
@@ -885,28 +929,40 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
return page;
}
+static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
+ struct bnxt_rx_ring_info *rxr,
+ unsigned int *offset,
+ gfp_t gfp)
+{
+ netmem_ref netmem;
+
+ if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
+ netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset, BNXT_RX_PAGE_SIZE, gfp);
+ } else {
+ netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
+ *offset = 0;
+ }
+ if (!netmem)
+ return 0;
+
+ *mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
+ return netmem;
+}
+
static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
+ struct bnxt_rx_ring_info *rxr,
gfp_t gfp)
{
- u8 *data;
- struct pci_dev *pdev = bp->pdev;
+ unsigned int offset;
+ struct page *page;
- if (gfp == GFP_ATOMIC)
- data = napi_alloc_frag(bp->rx_buf_size);
- else
- data = netdev_alloc_frag(bp->rx_buf_size);
- if (!data)
+ page = page_pool_alloc_frag(rxr->head_pool, &offset,
+ bp->rx_buf_size, gfp);
+ if (!page)
return NULL;
- *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
- bp->rx_buf_use_size, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
-
- if (dma_mapping_error(&pdev->dev, *mapping)) {
- skb_free_frag(data);
- data = NULL;
- }
- return data;
+ *mapping = page_pool_get_dma_addr(page) + bp->rx_dma_offset + offset;
+ return page_address(page) + offset;
}
int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
@@ -928,7 +984,7 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
rx_buf->data = page;
rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
} else {
- u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
+ u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, gfp);
if (!data)
return -ENOMEM;
@@ -973,21 +1029,19 @@ static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
return next;
}
-static inline int bnxt_alloc_rx_page(struct bnxt *bp,
- struct bnxt_rx_ring_info *rxr,
- u16 prod, gfp_t gfp)
+static int bnxt_alloc_rx_netmem(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
{
struct rx_bd *rxbd =
&rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
struct bnxt_sw_rx_agg_bd *rx_agg_buf;
- struct page *page;
- dma_addr_t mapping;
u16 sw_prod = rxr->rx_sw_agg_prod;
unsigned int offset = 0;
+ dma_addr_t mapping;
+ netmem_ref netmem;
- page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
-
- if (!page)
+ netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, &offset, gfp);
+ if (!netmem)
return -ENOMEM;
if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
@@ -997,7 +1051,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
- rx_agg_buf->page = page;
+ rx_agg_buf->netmem = netmem;
rx_agg_buf->offset = offset;
rx_agg_buf->mapping = mapping;
rxbd->rx_bd_haddr = cpu_to_le64(mapping);
@@ -1041,11 +1095,11 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
p5_tpa = true;
for (i = 0; i < agg_bufs; i++) {
- u16 cons;
- struct rx_agg_cmp *agg;
struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
+ struct rx_agg_cmp *agg;
struct rx_bd *prod_bd;
- struct page *page;
+ netmem_ref netmem;
+ u16 cons;
if (p5_tpa)
agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
@@ -1062,11 +1116,11 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
cons_rx_buf = &rxr->rx_agg_ring[cons];
/* It is possible for sw_prod to be equal to cons, so
- * set cons_rx_buf->page to NULL first.
+ * set cons_rx_buf->netmem to 0 first.
*/
- page = cons_rx_buf->page;
- cons_rx_buf->page = NULL;
- prod_rx_buf->page = page;
+ netmem = cons_rx_buf->netmem;
+ cons_rx_buf->netmem = 0;
+ prod_rx_buf->netmem = netmem;
prod_rx_buf->offset = cons_rx_buf->offset;
prod_rx_buf->mapping = cons_rx_buf->mapping;
@@ -1179,41 +1233,48 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
}
skb = napi_build_skb(data, bp->rx_buf_size);
- dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
- bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
+ dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
+ bp->rx_dir);
if (!skb) {
- skb_free_frag(data);
+ page_pool_free_va(rxr->head_pool, data, true);
return NULL;
}
+ skb_mark_for_recycle(skb);
skb_reserve(skb, bp->rx_offset);
skb_put(skb, offset_and_len & 0xffff);
return skb;
}
-static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr,
- struct skb_shared_info *shinfo,
- u16 idx, u32 agg_bufs, bool tpa,
- struct xdp_buff *xdp)
+static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ u16 idx, u32 agg_bufs, bool tpa,
+ struct sk_buff *skb,
+ struct xdp_buff *xdp)
{
struct bnxt_napi *bnapi = cpr->bnapi;
- struct pci_dev *pdev = bp->pdev;
- struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
- u16 prod = rxr->rx_agg_prod;
+ struct skb_shared_info *shinfo;
+ struct bnxt_rx_ring_info *rxr;
u32 i, total_frag_len = 0;
bool p5_tpa = false;
+ u16 prod;
+
+ rxr = bnapi->rx_ring;
+ prod = rxr->rx_agg_prod;
if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
p5_tpa = true;
+ if (skb)
+ shinfo = skb_shinfo(skb);
+ else
+ shinfo = xdp_get_shared_info_from_buff(xdp);
+
for (i = 0; i < agg_bufs; i++) {
- skb_frag_t *frag = &shinfo->frags[i];
- u16 cons, frag_len;
- struct rx_agg_cmp *agg;
struct bnxt_sw_rx_agg_bd *cons_rx_buf;
- struct page *page;
- dma_addr_t mapping;
+ struct rx_agg_cmp *agg;
+ u16 cons, frag_len;
+ netmem_ref netmem;
if (p5_tpa)
agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
@@ -1224,27 +1285,41 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
cons_rx_buf = &rxr->rx_agg_ring[cons];
- skb_frag_fill_page_desc(frag, cons_rx_buf->page,
- cons_rx_buf->offset, frag_len);
- shinfo->nr_frags = i + 1;
+ if (skb) {
+ skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
+ cons_rx_buf->offset,
+ frag_len, BNXT_RX_PAGE_SIZE);
+ } else {
+ skb_frag_t *frag = &shinfo->frags[i];
+
+ skb_frag_fill_netmem_desc(frag, cons_rx_buf->netmem,
+ cons_rx_buf->offset,
+ frag_len);
+ shinfo->nr_frags = i + 1;
+ }
__clear_bit(cons, rxr->rx_agg_bmap);
- /* It is possible for bnxt_alloc_rx_page() to allocate
+ /* It is possible for bnxt_alloc_rx_netmem() to allocate
* a sw_prod index that equals the cons index, so we
* need to clear the cons entry now.
*/
- mapping = cons_rx_buf->mapping;
- page = cons_rx_buf->page;
- cons_rx_buf->page = NULL;
+ netmem = cons_rx_buf->netmem;
+ cons_rx_buf->netmem = 0;
- if (xdp && page_is_pfmemalloc(page))
+ if (xdp && netmem_is_pfmemalloc(netmem))
xdp_buff_set_frag_pfmemalloc(xdp);
- if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
+ if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_ATOMIC) != 0) {
+ if (skb) {
+ skb->len -= frag_len;
+ skb->data_len -= frag_len;
+ skb->truesize -= BNXT_RX_PAGE_SIZE;
+ }
+
--shinfo->nr_frags;
- cons_rx_buf->page = page;
+ cons_rx_buf->netmem = netmem;
- /* Update prod since possibly some pages have been
+ /* Update prod since possibly some netmems have been
* allocated already.
*/
rxr->rx_agg_prod = prod;
@@ -1252,8 +1327,8 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
return 0;
}
- dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
- bp->rx_dir);
+ page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0,
+ BNXT_RX_PAGE_SIZE);
total_frag_len += frag_len;
prod = NEXT_RX_AGG(prod);
@@ -1262,32 +1337,28 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
return total_frag_len;
}
-static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr,
- struct sk_buff *skb, u16 idx,
- u32 agg_bufs, bool tpa)
+static struct sk_buff *bnxt_rx_agg_netmems_skb(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct sk_buff *skb, u16 idx,
+ u32 agg_bufs, bool tpa)
{
- struct skb_shared_info *shinfo = skb_shinfo(skb);
u32 total_frag_len = 0;
- total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
- agg_bufs, tpa, NULL);
+ total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
+ skb, NULL);
if (!total_frag_len) {
skb_mark_for_recycle(skb);
dev_kfree_skb(skb);
return NULL;
}
- skb->data_len += total_frag_len;
- skb->len += total_frag_len;
- skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs;
return skb;
}
-static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr,
- struct xdp_buff *xdp, u16 idx,
- u32 agg_bufs, bool tpa)
+static u32 bnxt_rx_agg_netmems_xdp(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct xdp_buff *xdp, u16 idx,
+ u32 agg_bufs, bool tpa)
{
struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
u32 total_frag_len = 0;
@@ -1295,8 +1366,8 @@ static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
if (!xdp_buff_has_frags(xdp))
shinfo->nr_frags = 0;
- total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
- idx, agg_bufs, tpa, xdp);
+ total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
+ NULL, xdp);
if (total_frag_len) {
xdp_buff_set_frags_flag(xdp);
shinfo->nr_frags = agg_bufs;
@@ -1330,13 +1401,13 @@ static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data,
if (!skb)
return NULL;
- dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
+ dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copybreak,
bp->rx_dir);
memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
len + NET_IP_ALIGN);
- dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
+ dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copybreak,
bp->rx_dir);
skb_put(skb, len);
@@ -1518,7 +1589,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
if (TPA_START_IS_IPV6(tpa_start1))
tpa_info->gso_type = SKB_GSO_TCPV6;
/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
- else if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP &&
+ else if (!BNXT_CHIP_P4_PLUS(bp) &&
TPA_START_HASH_TYPE(tpa_start) == 3)
tpa_info->gso_type = SKB_GSO_TCPV6;
tpa_info->rss_hash =
@@ -1755,7 +1826,7 @@ static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
{
struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
- /* if vf-rep dev is NULL, the must belongs to the PF */
+ /* if vf-rep dev is NULL, it must belong to the PF */
return dev ? dev : bp->dev;
}
@@ -1829,7 +1900,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
return NULL;
}
- if (len <= bp->rx_copy_thresh) {
+ if (len <= bp->rx_copybreak) {
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
@@ -1840,7 +1911,8 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
u8 *new_data;
dma_addr_t new_mapping;
- new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
+ new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, rxr,
+ GFP_ATOMIC);
if (!new_data) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
cpr->sw_stats->rx.rx_oom_discards += 1;
@@ -1852,22 +1924,23 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
tpa_info->mapping = new_mapping;
skb = napi_build_skb(data, bp->rx_buf_size);
- dma_unmap_single_attrs(&bp->pdev->dev, mapping,
- bp->rx_buf_use_size, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
+ dma_sync_single_for_cpu(&bp->pdev->dev, mapping,
+ bp->rx_buf_use_size, bp->rx_dir);
if (!skb) {
- skb_free_frag(data);
+ page_pool_free_va(rxr->head_pool, data, true);
bnxt_abort_tpa(cpr, idx, agg_bufs);
cpr->sw_stats->rx.rx_oom_discards += 1;
return NULL;
}
+ skb_mark_for_recycle(skb);
skb_reserve(skb, bp->rx_offset);
skb_put(skb, len);
}
if (agg_bufs) {
- skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
+ skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, idx, agg_bufs,
+ true);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
cpr->sw_stats->rx.rx_oom_discards += 1;
@@ -1987,6 +2060,7 @@ static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
}
return skb;
vlan_err:
+ skb_mark_for_recycle(skb);
dev_kfree_skb(skb);
return NULL;
}
@@ -2025,6 +2099,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct rx_cmp_ext *rxcmp1;
u32 tmp_raw_cons = *raw_cons;
u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
+ struct skb_shared_info *sinfo;
struct bnxt_sw_rx_bd *rx_buf;
unsigned int len;
u8 *data_ptr, agg_bufs, cmp_type;
@@ -2146,11 +2221,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (bnxt_xdp_attached(bp, rxr)) {
bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
if (agg_bufs) {
- u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
- cp_cons, agg_bufs,
- false);
+ u32 frag_len = bnxt_rx_agg_netmems_xdp(bp, cpr, &xdp,
+ cp_cons,
+ agg_bufs,
+ false);
if (!frag_len)
goto oom_next_rx;
+
}
xdp_active = true;
}
@@ -2160,9 +2237,15 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = 1;
goto next_rx;
}
+ if (xdp_buff_has_frags(&xdp)) {
+ sinfo = xdp_get_shared_info_from_buff(&xdp);
+ agg_bufs = sinfo->nr_frags;
+ } else {
+ agg_bufs = 0;
+ }
}
- if (len <= bp->rx_copy_thresh) {
+ if (len <= bp->rx_copybreak) {
if (!xdp_active)
skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
else
@@ -2193,11 +2276,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (agg_bufs) {
if (!xdp_active) {
- skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
+ skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, cp_cons,
+ agg_bufs, false);
if (!skb)
goto oom_next_rx;
} else {
- skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
+ skb = bnxt_xdp_build_skb(bp, skb, agg_bufs,
+ rxr->page_pool, &xdp);
if (!skb) {
/* we should be able to free the old skb here */
bnxt_xdp_buff_frags_free(rxr, &xdp);
@@ -2212,15 +2297,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
type = bnxt_rss_ext_op(bp, rxcmp);
} else {
- u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
+ u32 itypes = RX_CMP_ITYPES(rxcmp);
- /* RSS profiles 1 and 3 with extract code 0 for inner
- * 4-tuple
- */
- if (hash_type != 1 && hash_type != 3)
- type = PKT_HASH_TYPE_L3;
- else
+ if (itypes == RX_CMP_FLAGS_ITYPE_TCP ||
+ itypes == RX_CMP_FLAGS_ITYPE_UDP)
type = PKT_HASH_TYPE_L4;
+ else
+ type = PKT_HASH_TYPE_L3;
}
skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
}
@@ -2254,11 +2337,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
- unsigned long flags;
- spin_lock_irqsave(&ptp->ptp_lock, flags);
- ns = timecounter_cyc2time(&ptp->tc, ts);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+ ns = bnxt_timecounter_cyc2time(ptp, ts);
memset(skb_hwtstamps(skb), 0,
sizeof(*skb_hwtstamps(skb)));
skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
@@ -2465,6 +2545,59 @@ static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info)
return false;
}
+bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type)
+{
+ u32 flags = bp->ctx->ctx_arr[type].flags;
+
+ return (flags & BNXT_CTX_MEM_TYPE_VALID) &&
+ ((flags & BNXT_CTX_MEM_FW_TRACE) ||
+ (flags & BNXT_CTX_MEM_FW_BIN_TRACE));
+}
+
+static void bnxt_bs_trace_init(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm)
+{
+ u32 mem_size, pages, rem_bytes, magic_byte_offset;
+ u16 trace_type = bnxt_bstore_to_trace[ctxm->type];
+ struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ struct bnxt_ring_mem_info *rmem, *rmem_pg_tbl;
+ struct bnxt_bs_trace_info *bs_trace;
+ int last_pg;
+
+ if (ctxm->instance_bmap && ctxm->instance_bmap > 1)
+ return;
+
+ mem_size = ctxm->max_entries * ctxm->entry_size;
+ rem_bytes = mem_size % BNXT_PAGE_SIZE;
+ pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
+
+ last_pg = (pages - 1) & (MAX_CTX_PAGES - 1);
+ magic_byte_offset = (rem_bytes ? rem_bytes : BNXT_PAGE_SIZE) - 1;
+
+ rmem = &ctx_pg[0].ring_mem;
+ bs_trace = &bp->bs_trace[trace_type];
+ bs_trace->ctx_type = ctxm->type;
+ bs_trace->trace_type = trace_type;
+ if (pages > MAX_CTX_PAGES) {
+ int last_pg_dir = rmem->nr_pages - 1;
+
+ rmem_pg_tbl = &ctx_pg[0].ctx_pg_tbl[last_pg_dir]->ring_mem;
+ bs_trace->magic_byte = rmem_pg_tbl->pg_arr[last_pg];
+ } else {
+ bs_trace->magic_byte = rmem->pg_arr[last_pg];
+ }
+ bs_trace->magic_byte += magic_byte_offset;
+ *bs_trace->magic_byte = BNXT_TRACE_BUF_MAGIC_BYTE;
+}
+
+#define BNXT_EVENT_BUF_PRODUCER_TYPE(data1) \
+ (((data1) & ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK) >>\
+ ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT)
+
+#define BNXT_EVENT_BUF_PRODUCER_OFFSET(data2) \
+ (((data2) & \
+ ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK) >>\
+ ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT)
+
#define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \
((data2) & \
ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK)
@@ -2764,12 +2897,12 @@ static int bnxt_async_event_process(struct bnxt *bp,
if (!ptp)
goto async_event_process_exit;
- spin_lock_irqsave(&ptp->ptp_lock, flags);
bnxt_ptp_update_current_time(bp);
ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
BNXT_PHC_BITS) | ptp->current_time);
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
bnxt_ptp_rtc_timecounter_init(ptp, ns);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
}
break;
}
@@ -2781,11 +2914,19 @@ static int bnxt_async_event_process(struct bnxt *bp,
hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
goto async_event_process_exit;
}
+ case ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER: {
+ u16 type = (u16)BNXT_EVENT_BUF_PRODUCER_TYPE(data1);
+ u32 offset = BNXT_EVENT_BUF_PRODUCER_OFFSET(data2);
+
+ bnxt_bs_trace_check_wrap(&bp->bs_trace[type], offset);
+ goto async_event_process_exit;
+ }
default:
goto async_event_process_exit;
}
__bnxt_queue_sp_work(bp);
async_event_process_exit:
+ bnxt_ulp_async_events(bp, cmpl);
return 0;
}
@@ -2828,6 +2969,13 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
return 0;
}
+static bool bnxt_vnic_is_active(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+
+ return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0;
+}
+
static irqreturn_t bnxt_msix(int irq, void *dev_instance)
{
struct bnxt_napi *bnapi = dev_instance;
@@ -2857,6 +3005,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
{
struct bnxt_napi *bnapi = cpr->bnapi;
u32 raw_cons = cpr->cp_raw_cons;
+ bool flush_xdp = false;
u32 cons;
int rx_pkts = 0;
u8 event = 0;
@@ -2910,6 +3059,8 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
else
rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
&event);
+ if (event & BNXT_REDIRECT_EVENT)
+ flush_xdp = true;
if (likely(rc >= 0))
rx_pkts += rc;
/* Increment rx_pkts when rc is -ENOMEM to count towards
@@ -2934,7 +3085,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
}
- if (event & BNXT_REDIRECT_EVENT) {
+ if (flush_xdp) {
xdp_do_flush();
event &= ~BNXT_REDIRECT_EVENT;
}
@@ -3095,7 +3246,7 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
break;
}
}
- if (bp->flags & BNXT_FLAG_DIM) {
+ if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
struct dim_sample dim_sample = {};
dim_update_sample(cpr->event_ctr,
@@ -3226,7 +3377,7 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
poll_done:
cpr_rx = &cpr->cp_ring_arr[0];
if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
- (bp->flags & BNXT_FLAG_DIM)) {
+ (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
struct dim_sample dim_sample = {};
dim_update_sample(cpr->event_ctr,
@@ -3238,101 +3389,107 @@ poll_done:
return work_done;
}
-static void bnxt_free_tx_skbs(struct bnxt *bp)
+static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr, int idx)
{
int i, max_idx;
struct pci_dev *pdev = bp->pdev;
- if (!bp->tx_ring)
- return;
-
max_idx = bp->tx_nr_pages * TX_DESC_CNT;
- for (i = 0; i < bp->tx_nr_rings; i++) {
- struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
- int j;
- if (!txr->tx_buf_ring)
+ for (i = 0; i < max_idx;) {
+ struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[i];
+ struct sk_buff *skb;
+ int j, last;
+
+ if (idx < bp->tx_nr_rings_xdp &&
+ tx_buf->action == XDP_REDIRECT) {
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ xdp_return_frame(tx_buf->xdpf);
+ tx_buf->action = 0;
+ tx_buf->xdpf = NULL;
+ i++;
continue;
+ }
- for (j = 0; j < max_idx;) {
- struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
- struct sk_buff *skb;
- int k, last;
-
- if (i < bp->tx_nr_rings_xdp &&
- tx_buf->action == XDP_REDIRECT) {
- dma_unmap_single(&pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- xdp_return_frame(tx_buf->xdpf);
- tx_buf->action = 0;
- tx_buf->xdpf = NULL;
- j++;
- continue;
- }
+ skb = tx_buf->skb;
+ if (!skb) {
+ i++;
+ continue;
+ }
- skb = tx_buf->skb;
- if (!skb) {
- j++;
- continue;
- }
+ tx_buf->skb = NULL;
- tx_buf->skb = NULL;
+ if (tx_buf->is_push) {
+ dev_kfree_skb(skb);
+ i += 2;
+ continue;
+ }
- if (tx_buf->is_push) {
- dev_kfree_skb(skb);
- j += 2;
- continue;
- }
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_headlen(skb),
+ DMA_TO_DEVICE);
- dma_unmap_single(&pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- skb_headlen(skb),
- DMA_TO_DEVICE);
+ last = tx_buf->nr_frags;
+ i += 2;
+ for (j = 0; j < last; j++, i++) {
+ int ring_idx = i & bp->tx_ring_mask;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
- last = tx_buf->nr_frags;
- j += 2;
- for (k = 0; k < last; k++, j++) {
- int ring_idx = j & bp->tx_ring_mask;
- skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
-
- tx_buf = &txr->tx_buf_ring[ring_idx];
- dma_unmap_page(
- &pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- skb_frag_size(frag), DMA_TO_DEVICE);
- }
- dev_kfree_skb(skb);
+ tx_buf = &txr->tx_buf_ring[ring_idx];
+ netmem_dma_unmap_page_attrs(&pdev->dev,
+ dma_unmap_addr(tx_buf,
+ mapping),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE, 0);
}
- netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
+ dev_kfree_skb(skb);
+ }
+ netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, idx));
+}
+
+static void bnxt_free_tx_skbs(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->tx_ring)
+ return;
+
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+
+ if (!txr->tx_buf_ring)
+ continue;
+
+ bnxt_free_one_tx_ring_skbs(bp, txr, i);
}
+
+ if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
+ bnxt_ptp_free_txts_skbs(bp->ptp_cfg);
}
static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
- struct pci_dev *pdev = bp->pdev;
int i, max_idx;
max_idx = bp->rx_nr_pages * RX_DESC_CNT;
for (i = 0; i < max_idx; i++) {
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
- dma_addr_t mapping = rx_buf->mapping;
void *data = rx_buf->data;
if (!data)
continue;
rx_buf->data = NULL;
- if (BNXT_RX_PAGE_MODE(bp)) {
+ if (BNXT_RX_PAGE_MODE(bp))
page_pool_recycle_direct(rxr->page_pool, data);
- } else {
- dma_unmap_single_attrs(&pdev->dev, mapping,
- bp->rx_buf_use_size, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
- skb_free_frag(data);
- }
+ else
+ page_pool_free_va(rxr->head_pool, data, true);
}
}
@@ -3344,28 +3501,23 @@ static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info
for (i = 0; i < max_idx; i++) {
struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
- struct page *page = rx_agg_buf->page;
+ netmem_ref netmem = rx_agg_buf->netmem;
- if (!page)
+ if (!netmem)
continue;
- rx_agg_buf->page = NULL;
+ rx_agg_buf->netmem = 0;
__clear_bit(i, rxr->rx_agg_bmap);
- page_pool_recycle_direct(rxr->page_pool, page);
+ page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
}
}
-static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
+static void bnxt_free_one_tpa_info_data(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
{
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
- struct pci_dev *pdev = bp->pdev;
- struct bnxt_tpa_idx_map *map;
int i;
- if (!rxr->rx_tpa)
- goto skip_rx_tpa_free;
-
for (i = 0; i < bp->max_tpa; i++) {
struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
u8 *data = tpa_info->data;
@@ -3373,14 +3525,20 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
if (!data)
continue;
- dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
- bp->rx_buf_use_size, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
-
tpa_info->data = NULL;
-
- skb_free_frag(data);
+ page_pool_free_va(rxr->head_pool, data, false);
}
+}
+
+static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_tpa_idx_map *map;
+
+ if (!rxr->rx_tpa)
+ goto skip_rx_tpa_free;
+
+ bnxt_free_one_tpa_info_data(bp, rxr);
skip_rx_tpa_free:
if (!rxr->rx_buf_ring)
@@ -3408,7 +3566,7 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
return;
for (i = 0; i < bp->rx_nr_rings; i++)
- bnxt_free_one_rx_ring_skbs(bp, i);
+ bnxt_free_one_rx_ring_skbs(bp, &bp->rx_ring[i]);
}
static void bnxt_free_skbs(struct bnxt *bp)
@@ -3434,6 +3592,35 @@ static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
*(p2 + i + offset) = init_val;
}
+static size_t __bnxt_copy_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem,
+ void *buf, size_t offset, size_t head,
+ size_t tail)
+{
+ int i, head_page, start_idx, source_offset;
+ size_t len, rem_len, total_len, max_bytes;
+
+ head_page = head / rmem->page_size;
+ source_offset = head % rmem->page_size;
+ total_len = (tail - head) & MAX_CTX_BYTES_MASK;
+ if (!total_len)
+ total_len = MAX_CTX_BYTES;
+ start_idx = head_page % MAX_CTX_PAGES;
+ max_bytes = (rmem->nr_pages - start_idx) * rmem->page_size -
+ source_offset;
+ total_len = min(total_len, max_bytes);
+ rem_len = total_len;
+
+ for (i = start_idx; rem_len; i++, source_offset = 0) {
+ len = min((size_t)(rmem->page_size - source_offset), rem_len);
+ if (buf)
+ memcpy(buf + offset, rmem->pg_arr[i] + source_offset,
+ len);
+ offset += len;
+ rem_len -= len;
+ }
+ return total_len;
+}
+
static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
{
struct pci_dev *pdev = bp->pdev;
@@ -3520,29 +3707,64 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
return 0;
}
+static void bnxt_free_one_tpa_info(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ int i;
+
+ kfree(rxr->rx_tpa_idx_map);
+ rxr->rx_tpa_idx_map = NULL;
+ if (rxr->rx_tpa) {
+ for (i = 0; i < bp->max_tpa; i++) {
+ kfree(rxr->rx_tpa[i].agg_arr);
+ rxr->rx_tpa[i].agg_arr = NULL;
+ }
+ }
+ kfree(rxr->rx_tpa);
+ rxr->rx_tpa = NULL;
+}
+
static void bnxt_free_tpa_info(struct bnxt *bp)
{
- int i, j;
+ int i;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- kfree(rxr->rx_tpa_idx_map);
- rxr->rx_tpa_idx_map = NULL;
- if (rxr->rx_tpa) {
- for (j = 0; j < bp->max_tpa; j++) {
- kfree(rxr->rx_tpa[j].agg_arr);
- rxr->rx_tpa[j].agg_arr = NULL;
- }
- }
- kfree(rxr->rx_tpa);
- rxr->rx_tpa = NULL;
+ bnxt_free_one_tpa_info(bp, rxr);
}
}
+static int bnxt_alloc_one_tpa_info(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct rx_agg_cmp *agg;
+ int i;
+
+ rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
+ GFP_KERNEL);
+ if (!rxr->rx_tpa)
+ return -ENOMEM;
+
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ return 0;
+ for (i = 0; i < bp->max_tpa; i++) {
+ agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
+ if (!agg)
+ return -ENOMEM;
+ rxr->rx_tpa[i].agg_arr = agg;
+ }
+ rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
+ GFP_KERNEL);
+ if (!rxr->rx_tpa_idx_map)
+ return -ENOMEM;
+
+ return 0;
+}
+
static int bnxt_alloc_tpa_info(struct bnxt *bp)
{
- int i, j;
+ int i, rc;
bp->max_tpa = MAX_TPA;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
@@ -3553,25 +3775,10 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct rx_agg_cmp *agg;
-
- rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
- GFP_KERNEL);
- if (!rxr->rx_tpa)
- return -ENOMEM;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
- continue;
- for (j = 0; j < bp->max_tpa; j++) {
- agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
- if (!agg)
- return -ENOMEM;
- rxr->rx_tpa[j].agg_arr = agg;
- }
- rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
- GFP_KERNEL);
- if (!rxr->rx_tpa_idx_map)
- return -ENOMEM;
+ rc = bnxt_alloc_one_tpa_info(bp, rxr);
+ if (rc)
+ return rc;
}
return 0;
}
@@ -3595,7 +3802,8 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
xdp_rxq_info_unreg(&rxr->xdp_rxq);
page_pool_destroy(rxr->page_pool);
- rxr->page_pool = NULL;
+ page_pool_destroy(rxr->head_pool);
+ rxr->page_pool = rxr->head_pool = NULL;
kfree(rxr->rx_agg_bmap);
rxr->rx_agg_bmap = NULL;
@@ -3612,26 +3820,64 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
int numa_node)
{
+ const unsigned int agg_size_fac = PAGE_SIZE / BNXT_RX_PAGE_SIZE;
+ const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
struct page_pool_params pp = { 0 };
+ struct page_pool *pool;
- pp.pool_size = bp->rx_agg_ring_size;
+ pp.pool_size = bp->rx_agg_ring_size / agg_size_fac;
if (BNXT_RX_PAGE_MODE(bp))
- pp.pool_size += bp->rx_ring_size;
+ pp.pool_size += bp->rx_ring_size / rx_size_fac;
pp.nid = numa_node;
- pp.napi = &rxr->bnapi->napi;
pp.netdev = bp->dev;
pp.dev = &bp->pdev->dev;
pp.dma_dir = bp->rx_dir;
pp.max_len = PAGE_SIZE;
- pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
+ PP_FLAG_ALLOW_UNREADABLE_NETMEM;
+ pp.queue_idx = rxr->bnapi->index;
+
+ pool = page_pool_create(&pp);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+ rxr->page_pool = pool;
+
+ rxr->need_head_pool = page_pool_is_unreadable(pool);
+ if (bnxt_separate_head_pool(rxr)) {
+ pp.pool_size = min(bp->rx_ring_size / rx_size_fac, 1024);
+ pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pool = page_pool_create(&pp);
+ if (IS_ERR(pool))
+ goto err_destroy_pp;
+ } else {
+ page_pool_get(pool);
+ }
+ rxr->head_pool = pool;
- rxr->page_pool = page_pool_create(&pp);
- if (IS_ERR(rxr->page_pool)) {
- int err = PTR_ERR(rxr->page_pool);
+ return 0;
+
+err_destroy_pp:
+ page_pool_destroy(rxr->page_pool);
+ rxr->page_pool = NULL;
+ return PTR_ERR(pool);
+}
+
+static void bnxt_enable_rx_page_pool(struct bnxt_rx_ring_info *rxr)
+{
+ page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi);
+ page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi);
+}
+
+static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+ u16 mem_size;
+
+ rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
+ mem_size = rxr->rx_agg_bmap_size / 8;
+ rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
+ if (!rxr->rx_agg_bmap)
+ return -ENOMEM;
- rxr->page_pool = NULL;
- return err;
- }
return 0;
}
@@ -3660,6 +3906,7 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
if (rc)
return rc;
+ bnxt_enable_rx_page_pool(rxr);
rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
if (rc < 0)
@@ -3679,19 +3926,15 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
ring->grp_idx = i;
if (agg_rings) {
- u16 mem_size;
-
ring = &rxr->rx_agg_ring_struct;
rc = bnxt_alloc_ring(bp, &ring->ring_mem);
if (rc)
return rc;
ring->grp_idx = i;
- rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
- mem_size = rxr->rx_agg_bmap_size / 8;
- rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
- if (!rxr->rx_agg_bmap)
- return -ENOMEM;
+ rc = bnxt_alloc_rx_agg_bmap(bp, rxr);
+ if (rc)
+ return rc;
}
}
if (bp->flags & BNXT_FLAG_TPA)
@@ -4023,6 +4266,8 @@ static void bnxt_reset_rx_ring_struct(struct bnxt *bp,
rxr->page_pool->p.napi = NULL;
rxr->page_pool = NULL;
+ rxr->head_pool->p.napi = NULL;
+ rxr->head_pool = NULL;
memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info));
ring = &rxr->rx_ring_struct;
@@ -4147,18 +4392,18 @@ static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp,
rxr->rx_prod = prod;
}
-static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp,
- struct bnxt_rx_ring_info *rxr,
- int ring_nr)
+static void bnxt_alloc_one_rx_ring_netmem(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ int ring_nr)
{
u32 prod;
int i;
prod = rxr->rx_agg_prod;
for (i = 0; i < bp->rx_agg_ring_size; i++) {
- if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
+ if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_KERNEL)) {
netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
- ring_nr, i, bp->rx_ring_size);
+ ring_nr, i, bp->rx_agg_ring_size);
break;
}
prod = NEXT_RX_AGG(prod);
@@ -4166,31 +4411,43 @@ static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp,
rxr->rx_agg_prod = prod;
}
+static int bnxt_alloc_one_tpa_info_data(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ dma_addr_t mapping;
+ u8 *data;
+ int i;
+
+ for (i = 0; i < bp->max_tpa; i++) {
+ data = __bnxt_alloc_rx_frag(bp, &mapping, rxr,
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ rxr->rx_tpa[i].data = data;
+ rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
+ rxr->rx_tpa[i].mapping = mapping;
+ }
+
+ return 0;
+}
+
static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
{
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
- int i;
+ int rc;
bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr);
if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
return 0;
- bnxt_alloc_one_rx_ring_page(bp, rxr, ring_nr);
+ bnxt_alloc_one_rx_ring_netmem(bp, rxr, ring_nr);
if (rxr->rx_tpa) {
- dma_addr_t mapping;
- u8 *data;
-
- for (i = 0; i < bp->max_tpa; i++) {
- data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- rxr->rx_tpa[i].data = data;
- rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
- rxr->rx_tpa[i].mapping = mapping;
- }
+ rc = bnxt_alloc_one_tpa_info_data(bp, rxr);
+ if (rc)
+ return rc;
}
return 0;
}
@@ -4222,7 +4479,14 @@ static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
ring->fw_ring_id = INVALID_HW_RING_ID;
if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
- RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
+ RX_BD_TYPE_RX_AGG_BD;
+
+ /* On P7, setting EOP will cause the chip to disable
+ * Relaxed Ordering (RO) for TPA data. Disable EOP for
+ * potentially higher performance with RO.
+ */
+ if (BNXT_CHIP_P5_AND_MINUS(bp) || !(bp->flags & BNXT_FLAG_TPA))
+ type |= RX_BD_FLAGS_AGG_EOP;
bnxt_init_rxbd_pages(ring, type);
}
@@ -4453,6 +4717,17 @@ void bnxt_set_tpa_flags(struct bnxt *bp)
bp->flags |= BNXT_FLAG_GRO;
}
+static void bnxt_init_ring_params(struct bnxt *bp)
+{
+ unsigned int rx_size;
+
+ bp->rx_copybreak = BNXT_DEFAULT_RX_COPYBREAK;
+ /* Try to fit 4 chunks into a 4k page */
+ rx_size = SZ_1K -
+ NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ bp->dev->cfg->hds_thresh = max(BNXT_DEFAULT_RX_COPYBREAK, rx_size);
+}
+
/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
* be set on entry.
*/
@@ -4467,12 +4742,11 @@ void bnxt_set_ring_params(struct bnxt *bp)
rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
ring_size = bp->rx_ring_size;
bp->rx_agg_ring_size = 0;
bp->rx_agg_nr_pages = 0;
- if (bp->flags & BNXT_FLAG_TPA)
+ if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS)
agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
bp->flags &= ~BNXT_FLAG_JUMBO;
@@ -4512,7 +4786,10 @@ void bnxt_set_ring_params(struct bnxt *bp)
ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
} else {
- rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
+ rx_size = max3(BNXT_DEFAULT_RX_COPYBREAK,
+ bp->rx_copybreak,
+ bp->dev->cfg_pending->hds_thresh);
+ rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN);
rx_space = rx_size + NET_SKB_PAD +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
@@ -4553,12 +4830,12 @@ void bnxt_set_ring_params(struct bnxt *bp)
/* Changing allocation mode of RX rings.
* TODO: Update when extending xdp_rxq_info to support allocation modes.
*/
-int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
{
struct net_device *dev = bp->dev;
if (page_mode) {
- bp->flags &= ~BNXT_FLAG_AGG_RINGS;
+ bp->flags &= ~(BNXT_FLAG_AGG_RINGS | BNXT_FLAG_NO_AGG_RINGS);
bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
if (bp->xdp_prog->aux->xdp_has_frags)
@@ -4574,15 +4851,30 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
bp->rx_skb_func = bnxt_rx_page_skb;
}
bp->rx_dir = DMA_BIDIRECTIONAL;
- /* Disable LRO or GRO_HW */
- netdev_update_features(dev);
} else {
dev->max_mtu = bp->max_mtu;
bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
bp->rx_dir = DMA_FROM_DEVICE;
bp->rx_skb_func = bnxt_rx_skb;
}
- return 0;
+}
+
+void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+{
+ __bnxt_set_rx_skb_mode(bp, page_mode);
+
+ if (!page_mode) {
+ int rx, tx;
+
+ bnxt_get_max_rings(bp, &rx, &tx, true);
+ if (rx > 1) {
+ bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
+ bp->dev->hw_features |= NETIF_F_LRO;
+ }
+ }
+
+ /* Update LRO and GRO_HW availability */
+ netdev_update_features(bp->dev);
}
static void bnxt_free_vnic_attributes(struct bnxt *bp)
@@ -5053,8 +5345,10 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
{
int i;
- /* Under rtnl_lock and all our NAPIs have been disabled. It's
- * safe to delete the hash table.
+ netdev_assert_locked_or_invisible(bp->dev);
+
+ /* Under netdev instance lock and all our NAPIs have been disabled.
+ * It's safe to delete the hash table.
*/
for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
struct hlist_head *head;
@@ -5382,6 +5676,8 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
+ if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2)
+ flags |= FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT;
req->flags = cpu_to_le32(flags);
req->ver_maj_8b = DRV_VER_MAJ;
req->ver_min_8b = DRV_VER_MIN;
@@ -5399,6 +5695,10 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
u16 cmd = bnxt_vf_req_snif[i];
unsigned int bit, idx;
+ if ((bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN) &&
+ cmd == HWRM_PORT_PHY_QCFG)
+ continue;
+
idx = cmd / 32;
bit = cmd % 32;
data[idx] |= 1 << bit;
@@ -6409,6 +6709,7 @@ static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
+ u16 hds_thresh = (u16)bp->dev->cfg_pending->hds_thresh;
struct hwrm_vnic_plcmodes_cfg_input *req;
int rc;
@@ -6418,16 +6719,14 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
+ req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
- if (BNXT_RX_PAGE_MODE(bp)) {
- req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
- } else {
+ if (!BNXT_RX_PAGE_MODE(bp) && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
req->enables |=
cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
- req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
- req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
+ req->hds_threshold = cpu_to_le16(hds_thresh);
}
req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
return hwrm_req_send(bp, req);
@@ -6550,7 +6849,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
req->lb_rule = cpu_to_le16(0xffff);
vnic_mru:
- vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
+ vnic->mru = bp->dev->mtu + VLAN_ETH_HLEN;
req->mru = cpu_to_le16(vnic->mru);
req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
@@ -6687,6 +6986,8 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP;
if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP)
bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH;
}
@@ -6753,6 +7054,30 @@ static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
hwrm_req_drop(bp, req);
}
+static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
+ struct hwrm_ring_alloc_input *req,
+ struct bnxt_ring_struct *ring)
+{
+ struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx];
+ u32 enables = RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID |
+ RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID;
+
+ if (ring_type == HWRM_RING_ALLOC_AGG) {
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
+ req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
+ req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
+ enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID;
+ } else {
+ req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
+ if (NET_IP_ALIGN == 2)
+ req->flags =
+ cpu_to_le16(RING_ALLOC_REQ_FLAGS_RX_SOP_PAD);
+ }
+ req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
+ req->enables |= cpu_to_le32(enables);
+}
+
static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
struct bnxt_ring_struct *ring,
u32 ring_type, u32 map_index)
@@ -6804,37 +7129,13 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
break;
}
case HWRM_RING_ALLOC_RX:
- req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
- req->length = cpu_to_le32(bp->rx_ring_mask + 1);
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- u16 flags = 0;
-
- /* Association of rx ring with stats context */
- grp_info = &bp->grp_info[ring->grp_idx];
- req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
- req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
- req->enables |= cpu_to_le32(
- RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
- if (NET_IP_ALIGN == 2)
- flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
- req->flags = cpu_to_le16(flags);
- }
- break;
case HWRM_RING_ALLOC_AGG:
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
- /* Association of agg ring with rx ring */
- grp_info = &bp->grp_info[ring->grp_idx];
- req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
- req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
- req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
- req->enables |= cpu_to_le32(
- RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
- RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
- } else {
- req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
- }
- req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+ req->length = (ring_type == HWRM_RING_ALLOC_RX) ?
+ cpu_to_le32(bp->rx_ring_mask + 1) :
+ cpu_to_le32(bp->rx_agg_ring_mask + 1);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ bnxt_set_rx_ring_params_p5(bp, ring_type, req, ring);
break;
case HWRM_RING_ALLOC_CMPL:
req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
@@ -6858,7 +7159,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
default:
netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
ring_type);
- return -1;
+ return -EINVAL;
}
resp = hwrm_req_hold(bp, req);
@@ -7015,6 +7316,39 @@ static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
return 0;
}
+static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr)
+{
+ const u32 type = HWRM_RING_ALLOC_CMPL;
+ struct bnxt_napi *bnapi = cpr->bnapi;
+ struct bnxt_ring_struct *ring;
+ u32 map_idx = bnapi->index;
+ int rc;
+
+ ring = &cpr->cp_ring_struct;
+ ring->handle = BNXT_SET_NQ_HDL(cpr);
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ if (rc)
+ return rc;
+ bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
+ bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
+ return 0;
+}
+
+static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr, u32 tx_idx)
+{
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+ const u32 type = HWRM_RING_ALLOC_TX;
+ int rc;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, tx_idx);
+ if (rc)
+ return rc;
+ bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
+ return 0;
+}
+
static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
{
bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
@@ -7051,33 +7385,17 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
}
}
- type = HWRM_RING_ALLOC_TX;
for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
- struct bnxt_ring_struct *ring;
- u32 map_idx;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- struct bnxt_cp_ring_info *cpr2 = txr->tx_cpr;
- struct bnxt_napi *bnapi = txr->bnapi;
- u32 type2 = HWRM_RING_ALLOC_CMPL;
-
- ring = &cpr2->cp_ring_struct;
- ring->handle = BNXT_SET_NQ_HDL(cpr2);
- map_idx = bnapi->index;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
if (rc)
goto err_out;
- bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
- ring->fw_ring_id);
- bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
}
- ring = &txr->tx_ring_struct;
- map_idx = i;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = bnxt_hwrm_tx_ring_alloc(bp, txr, i);
if (rc)
goto err_out;
- bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
}
for (i = 0; i < bp->rx_nr_rings; i++) {
@@ -7090,20 +7408,9 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
if (!agg_rings)
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr;
- struct bnxt_napi *bnapi = rxr->bnapi;
- u32 type2 = HWRM_RING_ALLOC_CMPL;
- struct bnxt_ring_struct *ring;
- u32 map_idx = bnapi->index;
-
- ring = &cpr2->cp_ring_struct;
- ring->handle = BNXT_SET_NQ_HDL(cpr2);
- rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
if (rc)
goto err_out;
- bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
- ring->fw_ring_id);
- bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
}
}
@@ -7118,6 +7425,26 @@ err_out:
return rc;
}
+static void bnxt_cancel_dim(struct bnxt *bp)
+{
+ int i;
+
+ /* DIM work is initialized in bnxt_enable_napi(). Proceed only
+ * if NAPI is enabled.
+ */
+ if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
+ return;
+
+ /* Make sure NAPI sees that the VNIC is disabled */
+ synchronize_net();
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+ struct bnxt_napi *bnapi = rxr->bnapi;
+
+ cancel_work_sync(&bnapi->cp_ring.dim.work);
+ }
+}
+
static int hwrm_ring_free_send_msg(struct bnxt *bp,
struct bnxt_ring_struct *ring,
u32 ring_type, int cmpl_ring_id)
@@ -7151,6 +7478,23 @@ exit:
return 0;
}
+static void bnxt_hwrm_tx_ring_free(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr,
+ bool close_path)
+{
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+ u32 cmpl_ring_id;
+
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) :
+ INVALID_HW_RING_ID;
+ hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX,
+ cmpl_ring_id);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
bool close_path)
@@ -7195,6 +7539,33 @@ static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp,
bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
}
+static void bnxt_hwrm_cp_ring_free(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr)
+{
+ struct bnxt_ring_struct *ring;
+
+ ring = &cpr->cp_ring_struct;
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL,
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnxt_clear_one_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+{
+ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+ int i, size = ring->ring_mem.page_size;
+
+ cpr->cp_raw_cons = 0;
+ cpr->toggle = 0;
+
+ for (i = 0; i < bp->cp_nr_pages; i++)
+ if (cpr->cp_desc_ring[i])
+ memset(cpr->cp_desc_ring[i], 0, size);
+}
+
static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
{
u32 type;
@@ -7203,21 +7574,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
if (!bp->bnapi)
return;
- for (i = 0; i < bp->tx_nr_rings; i++) {
- struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
- struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
-
- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
-
- hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_TX,
- close_path ? cmpl_ring_id :
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- }
- }
+ for (i = 0; i < bp->tx_nr_rings; i++)
+ bnxt_hwrm_tx_ring_free(bp, &bp->tx_ring[i], close_path);
+ bnxt_cancel_dim(bp);
for (i = 0; i < bp->rx_nr_rings; i++) {
bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
@@ -7239,17 +7599,9 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
struct bnxt_ring_struct *ring;
int j;
- for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) {
- struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
+ for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++)
+ bnxt_hwrm_cp_ring_free(bp, &cpr->cp_ring_arr[j]);
- ring = &cpr2->cp_ring_struct;
- if (ring->fw_ring_id == INVALID_HW_RING_ID)
- continue;
- hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_L2_CMPL,
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- }
ring = &cpr->cp_ring_struct;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
hwrm_ring_free_send_msg(bp, ring, type,
@@ -7683,7 +8035,8 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
}
rx_rings = min_t(int, rx_rings, hwr.grp);
hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
- if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
+ if (bnxt_ulp_registered(bp->edev) &&
+ hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
hwr.cp = min_t(int, hwr.cp, hwr.stat);
rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
@@ -7691,6 +8044,11 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
hwr.rx = rx_rings << 1;
tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
+ if (hwr.tx != bp->tx_nr_rings) {
+ netdev_warn(bp->dev,
+ "Able to reserve only %d out of %d requested TX rings\n",
+ hwr.tx, bp->tx_nr_rings);
+ }
bp->tx_nr_rings = hwr.tx;
/* If we cannot reserve all the RX rings, reset the RSS map only
@@ -8131,16 +8489,20 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
if (rc)
goto func_qcfg_exit;
+ flags = le16_to_cpu(resp->flags);
#ifdef CONFIG_BNXT_SRIOV
if (BNXT_VF(bp)) {
struct bnxt_vf_info *vf = &bp->vf;
vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
+ if (flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF)
+ vf->flags |= BNXT_VF_TRUST;
+ else
+ vf->flags &= ~BNXT_VF_TRUST;
} else {
bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
}
#endif
- flags = le16_to_cpu(resp->flags);
if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
@@ -8153,8 +8515,17 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
+ if (flags & FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV)
+ bp->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV;
+ if (resp->roce_bidi_opt_mode &
+ FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DEDICATED)
+ bp->cos0_cos1_shared = 1;
+ else
+ bp->cos0_cos1_shared = 0;
+
switch (resp->port_partition_type) {
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
+ case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2:
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
bp->port_partition_type = resp->port_partition_type;
@@ -8214,7 +8585,7 @@ static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
int n = 1;
- if (!ctxm->max_entries)
+ if (!ctxm->max_entries || ctxm->pg_info)
continue;
if (ctxm->instance_bmap)
@@ -8226,6 +8597,9 @@ static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
return 0;
}
+static void bnxt_free_one_ctx_mem(struct bnxt *bp,
+ struct bnxt_ctx_mem_type *ctxm, bool force);
+
#define BNXT_CTX_INIT_VALID(flags) \
(!!((flags) & \
FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
@@ -8234,7 +8608,7 @@ static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
{
struct hwrm_func_backing_store_qcaps_v2_output *resp;
struct hwrm_func_backing_store_qcaps_v2_input *req;
- struct bnxt_ctx_mem_info *ctx;
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
u16 type;
int rc;
@@ -8242,16 +8616,20 @@ static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
if (rc)
return rc;
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
- bp->ctx = ctx;
+ if (!ctx) {
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ bp->ctx = ctx;
+ }
resp = hwrm_req_hold(bp, req);
for (type = 0; type < BNXT_CTX_V2_MAX; ) {
struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
u8 init_val, init_off, i;
+ u32 max_entries;
+ u16 entry_size;
__le32 *p;
u32 flags;
@@ -8261,15 +8639,26 @@ static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
goto ctx_done;
flags = le32_to_cpu(resp->flags);
type = le16_to_cpu(resp->next_valid_type);
- if (!(flags & FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID))
+ if (!(flags & BNXT_CTX_MEM_TYPE_VALID)) {
+ bnxt_free_one_ctx_mem(bp, ctxm, true);
continue;
-
+ }
+ entry_size = le16_to_cpu(resp->entry_size);
+ max_entries = le32_to_cpu(resp->max_num_entries);
+ if (ctxm->mem_valid) {
+ if (!(flags & BNXT_CTX_MEM_PERSIST) ||
+ ctxm->entry_size != entry_size ||
+ ctxm->max_entries != max_entries)
+ bnxt_free_one_ctx_mem(bp, ctxm, true);
+ else
+ continue;
+ }
ctxm->type = le16_to_cpu(resp->type);
- ctxm->entry_size = le16_to_cpu(resp->entry_size);
+ ctxm->entry_size = entry_size;
ctxm->flags = flags;
ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
ctxm->entry_multiple = resp->entry_multiple;
- ctxm->max_entries = le32_to_cpu(resp->max_num_entries);
+ ctxm->max_entries = max_entries;
ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
init_val = resp->ctx_init_value;
init_off = resp->ctx_init_offset;
@@ -8294,7 +8683,8 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
struct hwrm_func_backing_store_qcaps_input *req;
int rc;
- if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
+ if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) ||
+ (bp->ctx && bp->ctx->flags & BNXT_CTX_FLAG_INITED))
return 0;
if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
@@ -8635,6 +9025,36 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
return rc;
}
+static size_t bnxt_copy_ctx_pg_tbls(struct bnxt *bp,
+ struct bnxt_ctx_pg_info *ctx_pg,
+ void *buf, size_t offset, size_t head,
+ size_t tail)
+{
+ struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
+ size_t nr_pages = ctx_pg->nr_pages;
+ int page_size = rmem->page_size;
+ size_t len = 0, total_len = 0;
+ u16 depth = rmem->depth;
+
+ tail %= nr_pages * page_size;
+ do {
+ if (depth > 1) {
+ int i = head / (page_size * MAX_CTX_PAGES);
+ struct bnxt_ctx_pg_info *pg_tbl;
+
+ pg_tbl = ctx_pg->ctx_pg_tbl[i];
+ rmem = &pg_tbl->ring_mem;
+ }
+ len = __bnxt_copy_ring(bp, rmem, buf, offset, head, tail);
+ head += len;
+ offset += len;
+ total_len += len;
+ if (head >= nr_pages * page_size)
+ head = 0;
+ } while (head != tail);
+ return total_len;
+}
+
static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
struct bnxt_ctx_pg_info *ctx_pg)
{
@@ -8685,6 +9105,8 @@ static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
ctxm->init_value ? ctxm : NULL);
}
+ if (!rc)
+ ctxm->mem_valid = 1;
return rc;
}
@@ -8711,6 +9133,16 @@ static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
hwrm_req_hold(bp, req);
req->type = cpu_to_le16(ctxm->type);
req->entry_size = cpu_to_le16(ctxm->entry_size);
+ if ((ctxm->flags & BNXT_CTX_MEM_PERSIST) &&
+ bnxt_bs_trace_avail(bp, ctxm->type)) {
+ struct bnxt_bs_trace_info *bs_trace;
+ u32 enables;
+
+ enables = FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET;
+ req->enables = cpu_to_le32(enables);
+ bs_trace = &bp->bs_trace[bnxt_bstore_to_trace[ctxm->type]];
+ req->next_bs_offset = cpu_to_le32(bs_trace->last_offset);
+ }
req->subtype_valid_cnt = ctxm->split_entry_cnt;
for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
p[i] = cpu_to_le32(ctxm->split[i]);
@@ -8736,25 +9168,47 @@ static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
return rc;
}
-static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena)
+static int bnxt_backing_store_cfg_v2(struct bnxt *bp)
{
struct bnxt_ctx_mem_info *ctx = bp->ctx;
struct bnxt_ctx_mem_type *ctxm;
- u16 last_type;
+ u16 last_type = BNXT_CTX_INV;
int rc = 0;
u16 type;
- if (!ena)
- return 0;
- else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM)
- last_type = BNXT_CTX_MAX - 1;
- else
- last_type = BNXT_CTX_L2_MAX - 1;
+ for (type = BNXT_CTX_SRT; type <= BNXT_CTX_QPC; type++) {
+ ctxm = &ctx->ctx_arr[type];
+ if (!bnxt_bs_trace_avail(bp, type))
+ continue;
+ if (!ctxm->mem_valid) {
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm,
+ ctxm->max_entries, 1);
+ if (rc) {
+ netdev_warn(bp->dev, "Unable to setup ctx page for type:0x%x.\n",
+ type);
+ continue;
+ }
+ bnxt_bs_trace_init(bp, ctxm);
+ }
+ last_type = type;
+ }
+
+ if (last_type == BNXT_CTX_INV) {
+ for (type = 0; type < BNXT_CTX_MAX; type++) {
+ ctxm = &ctx->ctx_arr[type];
+ if (ctxm->mem_valid)
+ last_type = type;
+ }
+ if (last_type == BNXT_CTX_INV)
+ return 0;
+ }
ctx->ctx_arr[last_type].last = 1;
for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
ctxm = &ctx->ctx_arr[type];
+ if (!ctxm->mem_valid)
+ continue;
rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
if (rc)
return rc;
@@ -8762,21 +9216,63 @@ static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena)
return 0;
}
-void bnxt_free_ctx_mem(struct bnxt *bp)
+/**
+ * __bnxt_copy_ctx_mem - copy host context memory
+ * @bp: The driver context
+ * @ctxm: The pointer to the context memory type
+ * @buf: The destination buffer or NULL to just obtain the length
+ * @offset: The buffer offset to copy the data to
+ * @head: The head offset of context memory to copy from
+ * @tail: The tail offset (last byte + 1) of context memory to end the copy
+ *
+ * This function is called for debugging purposes to dump the host context
+ * used by the chip.
+ *
+ * Return: Length of memory copied
+ */
+static size_t __bnxt_copy_ctx_mem(struct bnxt *bp,
+ struct bnxt_ctx_mem_type *ctxm, void *buf,
+ size_t offset, size_t head, size_t tail)
{
- struct bnxt_ctx_mem_info *ctx = bp->ctx;
- u16 type;
+ struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ size_t len = 0, total_len = 0;
+ int i, n = 1;
- if (!ctx)
- return;
+ if (!ctx_pg)
+ return 0;
- for (type = 0; type < BNXT_CTX_V2_MAX; type++) {
- struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
- struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
- int i, n = 1;
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ for (i = 0; i < n; i++) {
+ len = bnxt_copy_ctx_pg_tbls(bp, &ctx_pg[i], buf, offset, head,
+ tail);
+ offset += len;
+ total_len += len;
+ }
+ return total_len;
+}
- if (!ctx_pg)
- continue;
+size_t bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm,
+ void *buf, size_t offset)
+{
+ size_t tail = ctxm->max_entries * ctxm->entry_size;
+
+ return __bnxt_copy_ctx_mem(bp, ctxm, buf, offset, 0, tail);
+}
+
+static void bnxt_free_one_ctx_mem(struct bnxt *bp,
+ struct bnxt_ctx_mem_type *ctxm, bool force)
+{
+ struct bnxt_ctx_pg_info *ctx_pg;
+ int i, n = 1;
+
+ ctxm->last = 0;
+
+ if (ctxm->mem_valid && !force && (ctxm->flags & BNXT_CTX_MEM_PERSIST))
+ return;
+
+ ctx_pg = ctxm->pg_info;
+ if (ctx_pg) {
if (ctxm->instance_bmap)
n = hweight32(ctxm->instance_bmap);
for (i = 0; i < n; i++)
@@ -8784,11 +9280,27 @@ void bnxt_free_ctx_mem(struct bnxt *bp)
kfree(ctx_pg);
ctxm->pg_info = NULL;
+ ctxm->mem_valid = 0;
}
+ memset(ctxm, 0, sizeof(*ctxm));
+}
+
+void bnxt_free_ctx_mem(struct bnxt *bp, bool force)
+{
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ u16 type;
+
+ if (!ctx)
+ return;
+
+ for (type = 0; type < BNXT_CTX_V2_MAX; type++)
+ bnxt_free_one_ctx_mem(bp, &ctx->ctx_arr[type], force);
ctx->flags &= ~BNXT_CTX_FLAG_INITED;
- kfree(ctx);
- bp->ctx = NULL;
+ if (force) {
+ kfree(ctx);
+ bp->ctx = NULL;
+ }
}
static int bnxt_alloc_ctx_mem(struct bnxt *bp)
@@ -8815,6 +9327,10 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
return 0;
+ ena = 0;
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ goto skip_legacy;
+
ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
l2_qps = ctxm->qp_l2_entries;
qp1_qps = ctxm->qp_qp1_entries;
@@ -8823,13 +9339,20 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
srqs = ctxm->srq_l2_entries;
max_srqs = ctxm->max_entries;
- ena = 0;
if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
pg_lvl = 2;
- extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
- /* allocate extra qps if fw supports RoCE fast qp destroy feature */
- extra_qps += fast_qpmd_qps;
- extra_srqs = min_t(u32, 8192, max_srqs - srqs);
+ if (BNXT_SW_RES_LMT(bp)) {
+ extra_qps = max_qps - l2_qps - qp1_qps;
+ extra_srqs = max_srqs - srqs;
+ } else {
+ extra_qps = min_t(u32, 65536,
+ max_qps - l2_qps - qp1_qps);
+ /* allocate extra qps if fw supports RoCE fast qp
+ * destroy feature
+ */
+ extra_qps += fast_qpmd_qps;
+ extra_srqs = min_t(u32, 8192, max_srqs - srqs);
+ }
if (fast_qpmd_qps)
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
}
@@ -8865,14 +9388,20 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
goto skip_rdma;
ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
- /* 128K extra is needed to accommodate static AH context
- * allocation by f/w.
- */
- num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
- num_ah = min_t(u32, num_mr, 1024 * 128);
- ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
- if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
- ctxm->mrav_av_entries = num_ah;
+ if (BNXT_SW_RES_LMT(bp) &&
+ ctxm->split_entry_cnt == BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1) {
+ num_ah = ctxm->mrav_av_entries;
+ num_mr = ctxm->max_entries - num_ah;
+ } else {
+ /* 128K extra is needed to accommodate static AH context
+ * allocation by f/w.
+ */
+ num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
+ num_ah = min_t(u32, num_mr, 1024 * 128);
+ ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
+ if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
+ ctxm->mrav_av_entries = num_ah;
+ }
rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
if (rc)
@@ -8903,8 +9432,9 @@ skip_rdma:
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
+skip_legacy:
if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
- rc = bnxt_backing_store_cfg_v2(bp, ena);
+ rc = bnxt_backing_store_cfg_v2(bp);
else
rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
if (rc) {
@@ -9053,7 +9583,6 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
struct hwrm_port_mac_ptp_qcfg_output *resp;
struct hwrm_port_mac_ptp_qcfg_input *req;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
- bool phc_cfg;
u8 flags;
int rc;
@@ -9100,8 +9629,9 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
rc = -ENODEV;
goto exit;
}
- phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
- rc = bnxt_ptp_init(bp, phc_cfg);
+ ptp->rtc_configured =
+ (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
+ rc = bnxt_ptp_init(bp);
if (rc)
netdev_warn(bp->dev, "PTP initialization failed.\n");
exit:
@@ -9118,10 +9648,10 @@ no_ptp:
static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
+ u32 flags, flags_ext, flags_ext2, flags_ext3;
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
struct hwrm_func_qcaps_output *resp;
struct hwrm_func_qcaps_input *req;
- struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- u32 flags, flags_ext, flags_ext2;
int rc;
rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
@@ -9139,6 +9669,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->flags |= BNXT_FLAG_ROCEV1_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
bp->flags |= BNXT_FLAG_ROCEV2_CAP;
+ if (flags & FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
@@ -9165,6 +9697,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
+ if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_NPAR_1_2;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP;
if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
@@ -9179,6 +9713,18 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP;
+ if (flags_ext2 &
+ FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS;
+ if (BNXT_PF(bp) &&
+ (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED))
+ bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED;
+
+ flags_ext3 = le32_to_cpu(resp->flags_ext3);
+ if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_ROCE_VF_DYN_ALLOC_SUPPORT)
+ bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT;
+ if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_MIRROR_ON_ROCE;
bp->tx_push_thresh = 0;
if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
@@ -9635,7 +10181,7 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
struct hwrm_ver_get_input *req;
u16 fw_maj, fw_min, fw_bld, fw_rsv;
u32 dev_caps_cfg, hwrm_ver;
- int rc, len;
+ int rc, len, max_tmo_secs;
rc = hwrm_req_init(bp, req, HWRM_VER_GET);
if (rc)
@@ -9708,9 +10254,14 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
if (!bp->hwrm_cmd_max_timeout)
bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
- else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT)
- netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n",
- bp->hwrm_cmd_max_timeout / 1000);
+ max_tmo_secs = bp->hwrm_cmd_max_timeout / 1000;
+#ifdef CONFIG_DETECT_HUNG_TASK
+ if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT ||
+ max_tmo_secs > CONFIG_DEFAULT_HUNG_TASK_TIMEOUT) {
+ netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog (kernel default %ds)\n",
+ max_tmo_secs, CONFIG_DEFAULT_HUNG_TASK_TIMEOUT);
+ }
+#endif
if (resp->hwrm_intf_maj_8b >= 1) {
bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
@@ -10296,6 +10847,72 @@ void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
bp->num_rss_ctx--;
}
+static bool bnxt_vnic_has_rx_ring(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ int rxr_id)
+{
+ u16 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
+ int i, vnic_rx;
+
+ /* Ntuple VNIC always has all the rx rings. Any change of ring id
+ * must be updated because a future filter may use it.
+ */
+ if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
+ return true;
+
+ for (i = 0; i < tbl_size; i++) {
+ if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
+ vnic_rx = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
+ else
+ vnic_rx = bp->rss_indir_tbl[i];
+
+ if (rxr_id == vnic_rx)
+ return true;
+ }
+
+ return false;
+}
+
+static int bnxt_set_vnic_mru_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ u16 mru, int rxr_id)
+{
+ int rc;
+
+ if (!bnxt_vnic_has_rx_ring(bp, vnic, rxr_id))
+ return 0;
+
+ if (mru) {
+ rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
+ vnic->vnic_id, rc);
+ return rc;
+ }
+ }
+ vnic->mru = mru;
+ bnxt_hwrm_vnic_update(bp, vnic,
+ VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
+
+ return 0;
+}
+
+static int bnxt_set_rss_ctx_vnic_mru(struct bnxt *bp, u16 mru, int rxr_id)
+{
+ struct ethtool_rxfh_context *ctx;
+ unsigned long context;
+ int rc;
+
+ xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
+ struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
+ struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
+
+ rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, rxr_id);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
{
bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
@@ -10867,6 +11484,155 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
return 0;
}
+static void bnxt_tx_queue_stop(struct bnxt *bp, int idx)
+{
+ struct bnxt_tx_ring_info *txr;
+ struct netdev_queue *txq;
+ struct bnxt_napi *bnapi;
+ int i;
+
+ bnapi = bp->bnapi[idx];
+ bnxt_for_each_napi_tx(i, bnapi, txr) {
+ WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
+ synchronize_net();
+
+ if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) {
+ txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
+ if (txq) {
+ __netif_tx_lock_bh(txq);
+ netif_tx_stop_queue(txq);
+ __netif_tx_unlock_bh(txq);
+ }
+ }
+
+ if (!bp->tph_mode)
+ continue;
+
+ bnxt_hwrm_tx_ring_free(bp, txr, true);
+ bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
+ bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index);
+ bnxt_clear_one_cp_ring(bp, txr->tx_cpr);
+ }
+}
+
+static int bnxt_tx_queue_start(struct bnxt *bp, int idx)
+{
+ struct bnxt_tx_ring_info *txr;
+ struct netdev_queue *txq;
+ struct bnxt_napi *bnapi;
+ int rc, i;
+
+ bnapi = bp->bnapi[idx];
+ /* All rings have been reserved and previously allocated.
+ * Reallocating with the same parameters should never fail.
+ */
+ bnxt_for_each_napi_tx(i, bnapi, txr) {
+ if (!bp->tph_mode)
+ goto start_tx;
+
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
+ if (rc)
+ return rc;
+
+ rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false);
+ if (rc)
+ return rc;
+
+ txr->tx_prod = 0;
+ txr->tx_cons = 0;
+ txr->tx_hw_cons = 0;
+start_tx:
+ WRITE_ONCE(txr->dev_state, 0);
+ synchronize_net();
+
+ if (bnapi->flags & BNXT_NAPI_FLAG_XDP)
+ continue;
+
+ txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
+ if (txq)
+ netif_tx_start_queue(txq);
+ }
+
+ return 0;
+}
+
+static void bnxt_irq_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct bnxt_irq *irq;
+ u16 tag;
+ int err;
+
+ irq = container_of(notify, struct bnxt_irq, affinity_notify);
+
+ if (!irq->bp->tph_mode)
+ return;
+
+ cpumask_copy(irq->cpu_mask, mask);
+
+ if (irq->ring_nr >= irq->bp->rx_nr_rings)
+ return;
+
+ if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
+ cpumask_first(irq->cpu_mask), &tag))
+ return;
+
+ if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag))
+ return;
+
+ netdev_lock(irq->bp->dev);
+ if (netif_running(irq->bp->dev)) {
+ err = netdev_rx_queue_restart(irq->bp->dev, irq->ring_nr);
+ if (err)
+ netdev_err(irq->bp->dev,
+ "RX queue restart failed: err=%d\n", err);
+ }
+ netdev_unlock(irq->bp->dev);
+}
+
+static void bnxt_irq_affinity_release(struct kref *ref)
+{
+ struct irq_affinity_notify *notify =
+ container_of(ref, struct irq_affinity_notify, kref);
+ struct bnxt_irq *irq;
+
+ irq = container_of(notify, struct bnxt_irq, affinity_notify);
+
+ if (!irq->bp->tph_mode)
+ return;
+
+ if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, 0)) {
+ netdev_err(irq->bp->dev,
+ "Setting ST=0 for MSIX entry %d failed\n",
+ irq->msix_nr);
+ return;
+ }
+}
+
+static void bnxt_release_irq_notifier(struct bnxt_irq *irq)
+{
+ irq_set_affinity_notifier(irq->vector, NULL);
+}
+
+static void bnxt_register_irq_notifier(struct bnxt *bp, struct bnxt_irq *irq)
+{
+ struct irq_affinity_notify *notify;
+
+ irq->bp = bp;
+
+ /* Nothing to do if TPH is not enabled */
+ if (!bp->tph_mode)
+ return;
+
+ /* Register IRQ affinity notifier */
+ notify = &irq->affinity_notify;
+ notify->irq = irq->vector;
+ notify->notify = bnxt_irq_affinity_notify;
+ notify->release = bnxt_irq_affinity_release;
+
+ irq_set_affinity_notifier(irq->vector, notify);
+}
+
static void bnxt_free_irq(struct bnxt *bp)
{
struct bnxt_irq *irq;
@@ -10885,24 +11651,29 @@ static void bnxt_free_irq(struct bnxt *bp)
irq = &bp->irq_tbl[map_idx];
if (irq->requested) {
if (irq->have_cpumask) {
- irq_set_affinity_hint(irq->vector, NULL);
+ irq_update_affinity_hint(irq->vector, NULL);
free_cpumask_var(irq->cpu_mask);
irq->have_cpumask = 0;
}
+
+ bnxt_release_irq_notifier(irq);
+
free_irq(irq->vector, bp->bnapi[i]);
}
irq->requested = 0;
}
+
+ /* Disable TPH support */
+ pcie_disable_tph(bp->pdev);
+ bp->tph_mode = 0;
}
static int bnxt_request_irq(struct bnxt *bp)
{
+ struct cpu_rmap *rmap = NULL;
int i, j, rc = 0;
unsigned long flags = 0;
-#ifdef CONFIG_RFS_ACCEL
- struct cpu_rmap *rmap;
-#endif
rc = bnxt_setup_int_mode(bp);
if (rc) {
@@ -10913,40 +11684,59 @@ static int bnxt_request_irq(struct bnxt *bp)
#ifdef CONFIG_RFS_ACCEL
rmap = bp->dev->rx_cpu_rmap;
#endif
+
+ /* Enable TPH support as part of IRQ request */
+ rc = pcie_enable_tph(bp->pdev, PCI_TPH_ST_IV_MODE);
+ if (!rc)
+ bp->tph_mode = PCI_TPH_ST_IV_MODE;
+
for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
int map_idx = bnxt_cp_num_to_irq_num(bp, i);
struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
-#ifdef CONFIG_RFS_ACCEL
- if (rmap && bp->bnapi[i]->rx_ring) {
+ if (IS_ENABLED(CONFIG_RFS_ACCEL) &&
+ rmap && bp->bnapi[i]->rx_ring) {
rc = irq_cpu_rmap_add(rmap, irq->vector);
if (rc)
netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
j);
j++;
}
-#endif
+
rc = request_irq(irq->vector, irq->handler, flags, irq->name,
bp->bnapi[i]);
if (rc)
break;
- netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector);
+ netif_napi_set_irq_locked(&bp->bnapi[i]->napi, irq->vector);
irq->requested = 1;
if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
int numa_node = dev_to_node(&bp->pdev->dev);
+ u16 tag;
irq->have_cpumask = 1;
+ irq->msix_nr = map_idx;
+ irq->ring_nr = i;
cpumask_set_cpu(cpumask_local_spread(i, numa_node),
irq->cpu_mask);
- rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
+ rc = irq_update_affinity_hint(irq->vector, irq->cpu_mask);
if (rc) {
netdev_warn(bp->dev,
- "Set affinity failed, IRQ = %d\n",
+ "Update affinity hint failed, IRQ = %d\n",
irq->vector);
break;
}
+
+ bnxt_register_irq_notifier(bp, irq);
+
+ /* Init ST table entry */
+ if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
+ cpumask_first(irq->cpu_mask),
+ &tag))
+ continue;
+
+ pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag);
}
}
return rc;
@@ -10967,9 +11757,9 @@ static void bnxt_del_napi(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
- __netif_napi_del(&bnapi->napi);
+ __netif_napi_del_locked(&bnapi->napi);
}
- /* We called __netif_napi_del(), we need
+ /* We called __netif_napi_del_locked(), we need
* to respect an RCU grace period before freeing napi structures.
*/
synchronize_net();
@@ -10986,14 +11776,17 @@ static void bnxt_init_napi(struct bnxt *bp)
poll_fn = bnxt_poll_p5;
else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
cp_nr_rings--;
+
+ set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
+
for (i = 0; i < cp_nr_rings; i++) {
bnapi = bp->bnapi[i];
- netif_napi_add_config(bp->dev, &bnapi->napi, poll_fn,
- bnapi->index);
+ netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn,
+ bnapi->index);
}
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bnapi = bp->bnapi[cp_nr_rings];
- netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
+ netif_napi_add_locked(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
}
}
@@ -11014,9 +11807,7 @@ static void bnxt_disable_napi(struct bnxt *bp)
cpr->sw_stats->tx.tx_resets++;
if (bnapi->in_reset)
cpr->sw_stats->rx.rx_resets++;
- napi_disable(&bnapi->napi);
- if (bnapi->rx_ring)
- cancel_work_sync(&cpr->dim.work);
+ napi_disable_locked(&bnapi->napi);
}
}
@@ -11038,7 +11829,7 @@ static void bnxt_enable_napi(struct bnxt *bp)
INIT_WORK(&cpr->dim.work, bnxt_dim_work);
cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
- napi_enable(&bnapi->napi);
+ napi_enable_locked(&bnapi->napi);
}
}
@@ -11236,6 +12027,26 @@ hwrm_phy_qcaps_exit:
return rc;
}
+static void bnxt_hwrm_mac_qcaps(struct bnxt *bp)
+{
+ struct hwrm_port_mac_qcaps_output *resp;
+ struct hwrm_port_mac_qcaps_input *req;
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10a03)
+ return;
+
+ rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCAPS);
+ if (rc)
+ return;
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
+ if (!rc)
+ bp->mac_flags = resp->flags;
+ hwrm_req_drop(bp, req);
+}
+
static bool bnxt_support_dropped(u16 advertising, u16 supported)
{
u16 diff = advertising ^ supported;
@@ -11646,7 +12457,7 @@ static int bnxt_try_recover_fw(struct bnxt *bp)
return -ENODEV;
}
-static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
+void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
@@ -11687,11 +12498,15 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
{
struct hwrm_func_drv_if_change_output *resp;
struct hwrm_func_drv_if_change_input *req;
- bool fw_reset = !bp->irq_tbl;
bool resc_reinit = false;
+ bool caps_change = false;
int rc, retry = 0;
+ bool fw_reset;
u32 flags = 0;
+ fw_reset = (bp->fw_reset_state == BNXT_FW_RESET_STATE_ABORT);
+ bp->fw_reset_state = 0;
+
if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
return 0;
@@ -11744,12 +12559,15 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
return -ENODEV;
}
- if (resc_reinit || fw_reset) {
- if (fw_reset) {
+ if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE)
+ caps_change = true;
+
+ if (resc_reinit || fw_reset || caps_change) {
+ if (fw_reset || caps_change) {
set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
bnxt_ulp_irq_stop(bp);
- bnxt_free_ctx_mem(bp);
+ bnxt_free_ctx_mem(bp, false);
bnxt_dcb_free(bp);
rc = bnxt_fw_init_one(bp);
if (rc) {
@@ -11757,13 +12575,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
return rc;
}
+ /* IRQ will be initialized later in bnxt_request_irq()*/
bnxt_clear_int_mode(bp);
- rc = bnxt_init_int_mode(bp);
- if (rc) {
- clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
- netdev_err(bp->dev, "init int mode failed\n");
- return rc;
- }
}
rc = bnxt_cancel_reservations(bp, fw_reset);
}
@@ -12081,6 +12894,17 @@ static int bnxt_set_xps_mapping(struct bnxt *bp)
return rc;
}
+static int bnxt_tx_nr_rings(struct bnxt *bp)
+{
+ return bp->num_tc ? bp->tx_nr_rings_per_tc * bp->num_tc :
+ bp->tx_nr_rings_per_tc;
+}
+
+static int bnxt_tx_nr_rings_per_tc(struct bnxt *bp)
+{
+ return bp->num_tc ? bp->tx_nr_rings / bp->num_tc : bp->tx_nr_rings;
+}
+
static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
@@ -12098,6 +12922,13 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
if (rc)
return rc;
+ /* Make adjustments if reserved TX rings are less than requested */
+ bp->tx_nr_rings -= bp->tx_nr_rings_xdp;
+ bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
+ if (bp->tx_nr_rings_xdp) {
+ bp->tx_nr_rings_xdp = bp->tx_nr_rings_per_tc;
+ bp->tx_nr_rings += bp->tx_nr_rings_xdp;
+ }
rc = bnxt_alloc_mem(bp, irq_re_init);
if (rc) {
netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
@@ -12162,8 +12993,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* VF-reps may need to be re-opened after the PF is re-opened */
if (BNXT_PF(bp))
bnxt_vf_reps_open(bp);
- if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
- WRITE_ONCE(bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
bnxt_ptp_init_rtc(bp, true);
bnxt_ptp_cfg_tstamp_filters(bp);
if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
@@ -12181,7 +13010,6 @@ open_err_free_mem:
return rc;
}
-/* rtnl_lock held */
int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
@@ -12192,14 +13020,14 @@ int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
if (rc) {
netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
- dev_close(bp->dev);
+ netif_close(bp->dev);
}
return rc;
}
-/* rtnl_lock held, open the NIC half way by allocating all resources, but
- * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
- * self tests.
+/* netdev instance lock held, open the NIC half way by allocating all
+ * resources, but NAPI, IRQ, and TX are not enabled. This is mainly used
+ * for offline self tests.
*/
int bnxt_half_open_nic(struct bnxt *bp)
{
@@ -12230,12 +13058,12 @@ int bnxt_half_open_nic(struct bnxt *bp)
half_open_err:
bnxt_free_skbs(bp);
bnxt_free_mem(bp, true);
- dev_close(bp->dev);
+ netif_close(bp->dev);
return rc;
}
-/* rtnl_lock held, this call can only be made after a previous successful
- * call to bnxt_half_open_nic().
+/* netdev instance lock held, this call can only be made after a previous
+ * successful call to bnxt_half_open_nic().
*/
void bnxt_half_close_nic(struct bnxt *bp)
{
@@ -12324,7 +13152,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
bnxt_debug_dev_exit(bp);
bnxt_disable_napi(bp);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
bnxt_free_skbs(bp);
/* Save ring stats before shutdown */
@@ -12344,10 +13172,11 @@ void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
/* If we get here, it means firmware reset is in progress
* while we are trying to close. We can safely proceed with
- * the close because we are holding rtnl_lock(). Some firmware
- * messages may fail as we proceed to close. We set the
- * ABORT_ERR flag here so that the FW reset thread will later
- * abort when it gets the rtnl_lock() and sees the flag.
+ * the close because we are holding netdev instance lock.
+ * Some firmware messages may fail as we proceed to close.
+ * We set the ABORT_ERR flag here so that the FW reset thread
+ * will later abort when it gets the netdev instance lock
+ * and sees the flag.
*/
netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
@@ -12438,7 +13267,7 @@ static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
return hwrm_req_send(bp, req);
}
-/* rtnl_lock held */
+/* netdev instance lock held */
static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *mdio = if_mii(ifr);
@@ -12469,12 +13298,6 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
mdio->val_in);
- case SIOCSHWTSTAMP:
- return bnxt_hwtstamp_set(dev, ifr);
-
- case SIOCGHWTSTAMP:
- return bnxt_hwtstamp_get(dev, ifr);
-
default:
/* do nothing */
break;
@@ -13215,11 +14038,19 @@ static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
{
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- int i = bnapi->index;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring, *cpr2;
+ int i = bnapi->index, j;
netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
+ for (j = 0; j < cpr->cp_ring_count; j++) {
+ cpr2 = &cpr->cp_ring_arr[j];
+ if (!cpr2->bnapi)
+ continue;
+ netdev_info(bnapi->bp->dev, "[%d.%d]: cp{fw_ring: %d raw_cons: %x}\n",
+ i, j, cpr2->cp_ring_struct.fw_ring_id,
+ cpr2->cp_raw_cons);
+ }
}
static void bnxt_dbg_dump_states(struct bnxt *bp)
@@ -13315,7 +14146,7 @@ fw_reset:
static void bnxt_timer(struct timer_list *t)
{
- struct bnxt *bp = from_timer(bp, t, timer);
+ struct bnxt *bp = timer_container_of(bp, t, timer);
struct net_device *dev = bp->dev;
if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
@@ -13357,30 +14188,31 @@ bnxt_restart_timer:
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
-static void bnxt_rtnl_lock_sp(struct bnxt *bp)
+static void bnxt_lock_sp(struct bnxt *bp)
{
/* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
* set. If the device is being closed, bnxt_close() may be holding
- * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
- * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
+ * netdev instance lock and waiting for BNXT_STATE_IN_SP_TASK to clear.
+ * So we must clear BNXT_STATE_IN_SP_TASK before holding netdev
+ * instance lock.
*/
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- rtnl_lock();
+ netdev_lock(bp->dev);
}
-static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
+static void bnxt_unlock_sp(struct bnxt *bp)
{
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- rtnl_unlock();
+ netdev_unlock(bp->dev);
}
/* Only called from bnxt_sp_task() */
static void bnxt_reset(struct bnxt *bp, bool silent)
{
- bnxt_rtnl_lock_sp(bp);
+ bnxt_lock_sp(bp);
if (test_bit(BNXT_STATE_OPEN, &bp->state))
bnxt_reset_task(bp, silent);
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
/* Only called from bnxt_sp_task() */
@@ -13388,9 +14220,9 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
{
int i;
- bnxt_rtnl_lock_sp(bp);
+ bnxt_lock_sp(bp);
if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
return;
}
/* Disable and flush TPA before resetting the RX ring */
@@ -13414,7 +14246,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
bnxt_reset_task(bp, true);
break;
}
- bnxt_free_one_rx_ring_skbs(bp, i);
+ bnxt_free_one_rx_ring_skbs(bp, rxr);
rxr->rx_prod = 0;
rxr->rx_agg_prod = 0;
rxr->rx_sw_agg_prod = 0;
@@ -13429,7 +14261,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
}
if (bp->flags & BNXT_FLAG_TPA)
bnxt_set_tpa(bp, true);
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
static void bnxt_fw_fatal_close(struct bnxt *bp)
@@ -13462,7 +14294,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
bnxt_hwrm_func_drv_unrgtr(bp);
if (pci_is_enabled(bp->pdev))
pci_disable_device(bp->pdev);
- bnxt_free_ctx_mem(bp);
+ bnxt_free_ctx_mem(bp, false);
}
static bool is_bnxt_fw_ok(struct bnxt *bp)
@@ -13485,7 +14317,7 @@ static bool is_bnxt_fw_ok(struct bnxt *bp)
return false;
}
-/* rtnl_lock is acquired before calling this function */
+/* netdev instance lock is acquired before calling this function */
static void bnxt_force_fw_reset(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
@@ -13496,12 +14328,13 @@ static void bnxt_force_fw_reset(struct bnxt *bp)
test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
return;
+ /* we have to serialize with bnxt_refclk_read()*/
if (ptp) {
unsigned long flags;
- spin_lock_irqsave(&ptp->ptp_lock, flags);
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
} else {
set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
}
@@ -13527,9 +14360,9 @@ void bnxt_fw_exception(struct bnxt *bp)
netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
bnxt_ulp_stop(bp);
- bnxt_rtnl_lock_sp(bp);
+ bnxt_lock_sp(bp);
bnxt_force_fw_reset(bp);
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
/* Returns the number of registered VFs, or 1 if VF configuration is pending, or
@@ -13559,18 +14392,19 @@ static int bnxt_get_registered_vfs(struct bnxt *bp)
void bnxt_fw_reset(struct bnxt *bp)
{
bnxt_ulp_stop(bp);
- bnxt_rtnl_lock_sp(bp);
+ bnxt_lock_sp(bp);
if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
int n = 0, tmo;
+ /* we have to serialize with bnxt_refclk_read()*/
if (ptp) {
unsigned long flags;
- spin_lock_irqsave(&ptp->ptp_lock, flags);
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
} else {
set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
}
@@ -13581,7 +14415,7 @@ void bnxt_fw_reset(struct bnxt *bp)
netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
n);
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- dev_close(bp->dev);
+ netif_close(bp->dev);
goto fw_reset_exit;
} else if (n > 0) {
u16 vf_tmo_dsecs = n * 10;
@@ -13604,7 +14438,7 @@ void bnxt_fw_reset(struct bnxt *bp)
bnxt_queue_fw_reset_work(bp, tmo);
}
fw_reset_exit:
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
static void bnxt_chk_missed_irq(struct bnxt *bp)
@@ -13803,7 +14637,7 @@ static void bnxt_sp_task(struct work_struct *work)
static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
int *max_cp);
-/* Under rtnl_lock */
+/* Under netdev instance lock */
int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp)
{
@@ -13936,6 +14770,23 @@ static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
return false;
}
+static void bnxt_hwrm_pfcwd_qcaps(struct bnxt *bp)
+{
+ struct hwrm_queue_pfcwd_timeout_qcaps_output *resp;
+ struct hwrm_queue_pfcwd_timeout_qcaps_input *req;
+ int rc;
+
+ bp->max_pfcwd_tmo_ms = 0;
+ rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS);
+ if (rc)
+ return;
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
+ if (!rc)
+ bp->max_pfcwd_tmo_ms = le16_to_cpu(resp->max_pfcwd_timeout);
+ hwrm_req_drop(bp, req);
+}
+
static int bnxt_fw_init_one_p1(struct bnxt *bp)
{
int rc;
@@ -14013,6 +14864,7 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
if (bnxt_fw_pre_resv_vnics(bp))
bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
+ bnxt_hwrm_pfcwd_qcaps(bp);
bnxt_hwrm_func_qcfg(bp);
bnxt_hwrm_vnic_qcaps(bp);
bnxt_hwrm_port_led_qcaps(bp);
@@ -14195,8 +15047,8 @@ static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
bnxt_dl_health_fw_status_update(bp, false);
- bp->fw_reset_state = 0;
- dev_close(bp->dev);
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_ABORT;
+ netif_close(bp->dev);
}
static void bnxt_fw_reset_task(struct work_struct *work)
@@ -14231,10 +15083,10 @@ static void bnxt_fw_reset_task(struct work_struct *work)
return;
}
bp->fw_reset_timestamp = jiffies;
- rtnl_lock();
+ netdev_lock(bp->dev);
if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
bnxt_fw_reset_abort(bp, rc);
- rtnl_unlock();
+ netdev_unlock(bp->dev);
goto ulp_start;
}
bnxt_fw_reset_close(bp);
@@ -14245,7 +15097,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
tmo = bp->fw_reset_min_dsecs * HZ / 10;
}
- rtnl_unlock();
+ netdev_unlock(bp->dev);
bnxt_queue_fw_reset_work(bp, tmo);
return;
}
@@ -14319,7 +15171,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
fallthrough;
case BNXT_FW_RESET_STATE_OPENING:
- while (!rtnl_trylock()) {
+ while (!netdev_trylock(bp->dev)) {
bnxt_queue_fw_reset_work(bp, HZ / 10);
return;
}
@@ -14327,7 +15179,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
if (rc) {
netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
bnxt_fw_reset_abort(bp, rc);
- rtnl_unlock();
+ netdev_unlock(bp->dev);
goto ulp_start;
}
@@ -14346,13 +15198,13 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bnxt_dl_health_fw_recovery_done(bp);
bnxt_dl_health_fw_status_update(bp, true);
}
- rtnl_unlock();
+ netdev_unlock(bp->dev);
bnxt_ulp_start(bp, 0);
bnxt_reenable_sriov(bp);
- rtnl_lock();
+ netdev_lock(bp->dev);
bnxt_vf_reps_alloc(bp);
bnxt_vf_reps_open(bp);
- rtnl_unlock();
+ netdev_unlock(bp->dev);
break;
}
return;
@@ -14365,9 +15217,9 @@ fw_reset_abort_status:
netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
}
fw_reset_abort:
- rtnl_lock();
+ netdev_lock(bp->dev);
bnxt_fw_reset_abort(bp, rc);
- rtnl_unlock();
+ netdev_unlock(bp->dev);
ulp_start:
bnxt_ulp_start(bp, rc);
}
@@ -14459,13 +15311,14 @@ init_err:
return rc;
}
-/* rtnl_lock held */
static int bnxt_change_mac_addr(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
+ netdev_assert_locked(dev);
+
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
@@ -14486,15 +15339,24 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
return rc;
}
-/* rtnl_lock held */
static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
{
struct bnxt *bp = netdev_priv(dev);
+ netdev_assert_locked(dev);
+
if (netif_running(dev))
bnxt_close_nic(bp, true, false);
WRITE_ONCE(dev->mtu, new_mtu);
+
+ /* MTU change may change the AGG ring settings if an XDP multi-buffer
+ * program is attached. We need to set the AGG rings settings and
+ * rx_skb_func accordingly.
+ */
+ if (READ_ONCE(bp->xdp_prog))
+ bnxt_set_rx_skb_mode(bp, true);
+
bnxt_set_ring_params(bp);
if (netif_running(dev))
@@ -14862,8 +15724,7 @@ static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int ta
static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
.set_port = bnxt_udp_tunnel_set_port,
.unset_port = bnxt_udp_tunnel_unset_port,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
@@ -14871,8 +15732,7 @@ static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
}, bnxt_udp_tunnels_p7 = {
.set_port = bnxt_udp_tunnel_set_port,
.unset_port = bnxt_udp_tunnel_unset_port,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
@@ -14968,6 +15828,8 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_xdp_xmit = bnxt_xdp_xmit,
.ndo_bridge_getlink = bnxt_bridge_getlink,
.ndo_bridge_setlink = bnxt_bridge_setlink,
+ .ndo_hwtstamp_get = bnxt_hwtstamp_get,
+ .ndo_hwtstamp_set = bnxt_hwtstamp_set,
};
static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
@@ -14977,6 +15839,9 @@ static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
struct bnxt_cp_ring_info *cpr;
u64 *sw;
+ if (!bp->bnapi)
+ return;
+
cpr = &bp->bnapi[i]->cp_ring;
sw = cpr->stats.sw_stats;
@@ -15000,6 +15865,9 @@ static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
struct bnxt_napi *bnapi;
u64 *sw;
+ if (!bp->tx_ring)
+ return;
+
bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
sw = bnapi->cp_ring.stats.sw_stats;
@@ -15034,19 +15902,6 @@ static const struct netdev_stat_ops bnxt_stat_ops = {
.get_base_stats = bnxt_get_base_stats,
};
-static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
-{
- u16 mem_size;
-
- rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
- mem_size = rxr->rx_agg_bmap_size / 8;
- rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
- if (!rxr->rx_agg_bmap)
- return -ENOMEM;
-
- return 0;
-}
-
static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
{
struct bnxt_rx_ring_info *rxr, *clone;
@@ -15054,6 +15909,9 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
struct bnxt_ring_struct *ring;
int rc;
+ if (!bp->rx_ring)
+ return -ENETDOWN;
+
rxr = &bp->rx_ring[idx];
clone = qmem;
memcpy(clone, rxr, sizeof(*rxr));
@@ -15064,6 +15922,7 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
clone->rx_agg_prod = 0;
clone->rx_sw_agg_prod = 0;
clone->rx_next_cons = 0;
+ clone->need_head_pool = false;
rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
if (rc)
@@ -15095,15 +15954,25 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
goto err_free_rx_agg_ring;
}
+ if (bp->flags & BNXT_FLAG_TPA) {
+ rc = bnxt_alloc_one_tpa_info(bp, clone);
+ if (rc)
+ goto err_free_tpa_info;
+ }
+
bnxt_init_one_rx_ring_rxbd(bp, clone);
bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
if (bp->flags & BNXT_FLAG_AGG_RINGS)
- bnxt_alloc_one_rx_ring_page(bp, clone, idx);
+ bnxt_alloc_one_rx_ring_netmem(bp, clone, idx);
+ if (bp->flags & BNXT_FLAG_TPA)
+ bnxt_alloc_one_tpa_info_data(bp, clone);
return 0;
+err_free_tpa_info:
+ bnxt_free_one_tpa_info(bp, clone);
err_free_rx_agg_ring:
bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
err_free_rx_ring:
@@ -15111,9 +15980,10 @@ err_free_rx_ring:
err_rxq_info_unreg:
xdp_rxq_info_unreg(&clone->xdp_rxq);
err_page_pool_destroy:
- clone->page_pool->p.napi = NULL;
page_pool_destroy(clone->page_pool);
+ page_pool_destroy(clone->head_pool);
clone->page_pool = NULL;
+ clone->head_pool = NULL;
return rc;
}
@@ -15123,13 +15993,15 @@ static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
struct bnxt *bp = netdev_priv(dev);
struct bnxt_ring_struct *ring;
- bnxt_free_one_rx_ring(bp, rxr);
- bnxt_free_one_rx_agg_ring(bp, rxr);
+ bnxt_free_one_rx_ring_skbs(bp, rxr);
+ bnxt_free_one_tpa_info(bp, rxr);
xdp_rxq_info_unreg(&rxr->xdp_rxq);
page_pool_destroy(rxr->page_pool);
+ page_pool_destroy(rxr->head_pool);
rxr->page_pool = NULL;
+ rxr->head_pool = NULL;
ring = &rxr->rx_ring_struct;
bnxt_free_ring(bp, &ring->ring_mem);
@@ -15202,7 +16074,9 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
struct bnxt_rx_ring_info *rxr, *clone;
struct bnxt_cp_ring_info *cpr;
struct bnxt_vnic_info *vnic;
+ struct bnxt_napi *bnapi;
int i, rc;
+ u16 mru;
rxr = &bp->rx_ring[idx];
clone = qmem;
@@ -15211,36 +16085,65 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
rxr->rx_agg_prod = clone->rx_agg_prod;
rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
rxr->rx_next_cons = clone->rx_next_cons;
+ rxr->rx_tpa = clone->rx_tpa;
+ rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map;
rxr->page_pool = clone->page_pool;
+ rxr->head_pool = clone->head_pool;
rxr->xdp_rxq = clone->xdp_rxq;
+ rxr->need_head_pool = clone->need_head_pool;
bnxt_copy_rx_ring(bp, rxr, clone);
+ bnapi = rxr->bnapi;
+ cpr = &bnapi->cp_ring;
+
+ /* All rings have been reserved and previously allocated.
+ * Reallocating with the same parameters should never fail.
+ */
rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
if (rc)
- return rc;
+ goto err_reset;
+
+ if (bp->tph_mode) {
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
+ if (rc)
+ goto err_reset;
+ }
+
rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
if (rc)
- goto err_free_hwrm_rx_ring;
+ goto err_reset;
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
if (bp->flags & BNXT_FLAG_AGG_RINGS)
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
- cpr = &rxr->bnapi->cp_ring;
- cpr->sw_stats->rx.rx_resets++;
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
+ rc = bnxt_tx_queue_start(bp, idx);
+ if (rc)
+ goto err_reset;
+ }
- for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
+ bnxt_enable_rx_page_pool(rxr);
+ napi_enable_locked(&bnapi->napi);
+ bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
+
+ mru = bp->dev->mtu + VLAN_ETH_HLEN;
+ for (i = 0; i < bp->nr_vnics; i++) {
vnic = &bp->vnic_info[i];
- vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
- bnxt_hwrm_vnic_update(bp, vnic,
- VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
- }
- return 0;
+ rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, idx);
+ if (rc)
+ return rc;
+ }
+ return bnxt_set_rss_ctx_vnic_mru(bp, mru, idx);
-err_free_hwrm_rx_ring:
- bnxt_hwrm_rx_ring_free(bp, rxr, false);
+err_reset:
+ netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n",
+ rc);
+ napi_enable_locked(&bnapi->napi);
+ bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
+ bnxt_reset_task(bp, true);
return rc;
}
@@ -15248,21 +16151,43 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_rx_ring_info *rxr;
+ struct bnxt_cp_ring_info *cpr;
struct bnxt_vnic_info *vnic;
+ struct bnxt_napi *bnapi;
int i;
- for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
+ for (i = 0; i < bp->nr_vnics; i++) {
vnic = &bp->vnic_info[i];
- vnic->mru = 0;
- bnxt_hwrm_vnic_update(bp, vnic,
- VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
- }
+ bnxt_set_vnic_mru_p5(bp, vnic, 0, idx);
+ }
+ bnxt_set_rss_ctx_vnic_mru(bp, 0, idx);
+ /* Make sure NAPI sees that the VNIC is disabled */
+ synchronize_net();
rxr = &bp->rx_ring[idx];
+ bnapi = rxr->bnapi;
+ cpr = &bnapi->cp_ring;
+ cancel_work_sync(&cpr->dim.work);
bnxt_hwrm_rx_ring_free(bp, rxr, false);
bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
- rxr->rx_next_cons = 0;
page_pool_disable_direct_recycling(rxr->page_pool);
+ if (bnxt_separate_head_pool(rxr))
+ page_pool_disable_direct_recycling(rxr->head_pool);
+
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+ bnxt_tx_queue_stop(bp, idx);
+
+ /* Disable NAPI now after freeing the rings because HWRM_RING_FREE
+ * completion is handled in NAPI to guarantee no more DMA on that ring
+ * after seeing the completion.
+ */
+ napi_disable_locked(&bnapi->napi);
+
+ if (bp->tph_mode) {
+ bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
+ bnxt_clear_one_cp_ring(bp, rxr->rx_cpr);
+ }
+ bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
memcpy(qmem, rxr, sizeof(*rxr));
bnxt_init_rx_ring_struct(bp, qmem);
@@ -15284,12 +16209,12 @@ static void bnxt_remove_one(struct pci_dev *pdev)
struct bnxt *bp = netdev_priv(dev);
if (BNXT_PF(bp))
- bnxt_sriov_disable(bp);
+ __bnxt_sriov_disable(bp);
bnxt_rdma_aux_device_del(bp);
- bnxt_ptp_clear(bp);
unregister_netdev(dev);
+ bnxt_ptp_clear(bp);
bnxt_rdma_aux_device_uninit(bp);
@@ -15317,7 +16242,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
kfree(bp->fw_health);
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
- bnxt_free_ctx_mem(bp);
+ bnxt_free_ctx_mem(bp, true);
bnxt_free_crash_dump_mem(bp);
kfree(bp->rss_indir_tbl);
bp->rss_indir_tbl = NULL;
@@ -15341,6 +16266,10 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
bp->dev->priv_flags |= IFF_SUPP_NOFCS;
else
bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
+
+ bp->mac_flags = 0;
+ bnxt_hwrm_mac_qcaps(bp);
+
if (!fw_dflt)
return 0;
@@ -15477,7 +16406,7 @@ static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
bp->rx_nr_rings = bp->cp_nr_rings;
bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
- bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
}
static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
@@ -15509,7 +16438,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
bnxt_trim_dflt_sh_rings(bp);
else
bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
- bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings;
if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) {
@@ -15522,7 +16451,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
rc = __bnxt_reserve_rings(bp);
if (rc && rc != -ENODEV)
netdev_warn(bp->dev, "Unable to reserve tx rings\n");
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
if (sh)
bnxt_trim_dflt_sh_rings(bp);
@@ -15531,7 +16460,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
rc = __bnxt_reserve_rings(bp);
if (rc && rc != -ENODEV)
netdev_warn(bp->dev, "2nd rings reservation failed.\n");
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
}
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bp->rx_nr_rings++;
@@ -15565,7 +16494,7 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
if (rc)
goto init_dflt_ring_err;
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
bnxt_set_dflt_rfs(bp);
@@ -15578,7 +16507,7 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp)
{
int rc;
- ASSERT_RTNL();
+ netdev_ops_assert_locked(bp->dev);
bnxt_hwrm_func_qcaps(bp);
if (netif_running(bp->dev))
@@ -15591,7 +16520,7 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp)
if (netif_running(bp->dev)) {
if (rc)
- dev_close(bp->dev);
+ netif_close(bp->dev);
else
rc = bnxt_open_nic(bp, true, false);
}
@@ -15869,8 +16798,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (bp->max_fltr < BNXT_MAX_FLTR)
bp->max_fltr = BNXT_MAX_FLTR;
bnxt_init_l2_fltr_tbl(bp);
- bnxt_set_rx_skb_mode(bp, false);
+ __bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
+ bnxt_init_ring_params(bp);
bnxt_set_ring_params(bp);
bnxt_rdma_aux_device_init(bp);
rc = bnxt_set_dflt_rings(bp, true);
@@ -15927,6 +16857,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
if (BNXT_SUPPORTS_QUEUE_API(bp))
dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
+ dev->request_ops_lock = true;
+ dev->netmem_tx = true;
rc = register_netdev(dev);
if (rc)
@@ -15959,7 +16891,7 @@ init_err_pci_clean:
kfree(bp->fw_health);
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
- bnxt_free_ctx_mem(bp);
+ bnxt_free_ctx_mem(bp, true);
bnxt_free_crash_dump_mem(bp);
kfree(bp->rss_indir_tbl);
bp->rss_indir_tbl = NULL;
@@ -15978,13 +16910,19 @@ static void bnxt_shutdown(struct pci_dev *pdev)
return;
rtnl_lock();
+ netdev_lock(dev);
bp = netdev_priv(dev);
if (!bp)
goto shutdown_exit;
if (netif_running(dev))
- dev_close(dev);
+ netif_close(dev);
+ if (bnxt_hwrm_func_drv_unrgtr(bp)) {
+ pcie_flr(pdev);
+ goto shutdown_exit;
+ }
+ bnxt_ptp_clear(bp);
bnxt_clear_int_mode(bp);
pci_disable_device(pdev);
@@ -15994,6 +16932,7 @@ static void bnxt_shutdown(struct pci_dev *pdev)
}
shutdown_exit:
+ netdev_unlock(dev);
rtnl_unlock();
}
@@ -16006,15 +16945,16 @@ static int bnxt_suspend(struct device *device)
bnxt_ulp_stop(bp);
- rtnl_lock();
+ netdev_lock(dev);
if (netif_running(dev)) {
netif_device_detach(dev);
rc = bnxt_close(dev);
}
bnxt_hwrm_func_drv_unrgtr(bp);
+ bnxt_ptp_clear(bp);
pci_disable_device(bp->pdev);
- bnxt_free_ctx_mem(bp);
- rtnl_unlock();
+ bnxt_free_ctx_mem(bp, false);
+ netdev_unlock(dev);
return rc;
}
@@ -16024,7 +16964,7 @@ static int bnxt_resume(struct device *device)
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
- rtnl_lock();
+ netdev_lock(dev);
rc = pci_enable_device(bp->pdev);
if (rc) {
netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
@@ -16055,6 +16995,10 @@ static int bnxt_resume(struct device *device)
if (bp->fw_crash_mem)
bnxt_hwrm_crash_dump_mem_cfg(bp);
+ if (bnxt_ptp_init(bp)) {
+ kfree(bp->ptp_cfg);
+ bp->ptp_cfg = NULL;
+ }
bnxt_get_wol_settings(bp);
if (netif_running(dev)) {
rc = bnxt_open(dev);
@@ -16063,7 +17007,7 @@ static int bnxt_resume(struct device *device)
}
resume_exit:
- rtnl_unlock();
+ netdev_unlock(bp->dev);
bnxt_ulp_start(bp, rc);
if (!rc)
bnxt_reenable_sriov(bp);
@@ -16098,7 +17042,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
bnxt_ulp_stop(bp);
- rtnl_lock();
+ netdev_lock(netdev);
netif_device_detach(netdev);
if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
@@ -16107,7 +17051,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
}
if (abort || state == pci_channel_io_perm_failure) {
- rtnl_unlock();
+ netdev_unlock(netdev);
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -16125,10 +17069,10 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
if (pci_is_enabled(pdev))
pci_disable_device(pdev);
- bnxt_free_ctx_mem(bp);
- rtnl_unlock();
+ bnxt_free_ctx_mem(bp, false);
+ netdev_unlock(netdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
@@ -16156,7 +17100,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state))
msleep(900);
- rtnl_lock();
+ netdev_lock(netdev);
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev,
@@ -16202,16 +17146,15 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
if (!err)
result = PCI_ERS_RESULT_RECOVERED;
+ /* IRQ will be initialized later in bnxt_io_resume */
bnxt_ulp_irq_stop(bp);
bnxt_clear_int_mode(bp);
- err = bnxt_init_int_mode(bp);
- bnxt_ulp_irq_restart(bp, err);
}
reset_exit:
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
bnxt_clear_reservations(bp, true);
- rtnl_unlock();
+ netdev_unlock(netdev);
return result;
}
@@ -16230,16 +17173,23 @@ static void bnxt_io_resume(struct pci_dev *pdev)
int err;
netdev_info(bp->dev, "PCI Slot Resume\n");
- rtnl_lock();
+ netdev_lock(netdev);
err = bnxt_hwrm_func_qcaps(bp);
- if (!err && netif_running(netdev))
- err = bnxt_open(netdev);
+ if (!err) {
+ if (netif_running(netdev)) {
+ err = bnxt_open(netdev);
+ } else {
+ err = bnxt_reserve_rings(bp, true);
+ if (!err)
+ err = bnxt_init_int_mode(bp);
+ }
+ }
if (!err)
netif_device_attach(netdev);
- rtnl_unlock();
+ netdev_unlock(netdev);
bnxt_ulp_start(bp, err);
if (!err)
bnxt_reenable_sriov(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 69231e85140b..f5f07a7e6b29 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -34,6 +34,9 @@
#include <linux/firmware/broadcom/tee_bnxt_fw.h>
#endif
+#define BNXT_DEFAULT_RX_COPYBREAK 256
+#define BNXT_MAX_RX_COPYBREAK 1024
+
extern struct list_head bnxt_block_cb_list;
struct page_pool;
@@ -79,6 +82,12 @@ struct tx_bd {
#define TX_OPAQUE_PROD(bp, opq) ((TX_OPAQUE_IDX(opq) + TX_OPAQUE_BDS(opq)) &\
(bp)->tx_ring_mask)
+#define TX_BD_CNT(n) (((n) << TX_BD_FLAGS_BD_CNT_SHIFT) & TX_BD_FLAGS_BD_CNT)
+
+#define TX_MAX_BD_CNT 32
+
+#define TX_MAX_FRAGS (TX_MAX_BD_CNT - 2)
+
struct tx_bd_ext {
__le32 tx_bd_hsize_lflags;
#define TX_BD_FLAGS_TCP_UDP_CHKSUM (1 << 0)
@@ -122,6 +131,7 @@ struct rx_bd {
#define RX_BD_TYPE_48B_BD_SIZE (2 << 4)
#define RX_BD_TYPE_64B_BD_SIZE (3 << 4)
#define RX_BD_FLAGS_SOP (1 << 6)
+ #define RX_BD_FLAGS_AGG_EOP (1 << 6)
#define RX_BD_FLAGS_EOP (1 << 7)
#define RX_BD_FLAGS_BUFFERS (3 << 8)
#define RX_BD_FLAGS_1_BUFFER_PACKET (0 << 8)
@@ -267,6 +277,9 @@ struct rx_cmp {
(((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\
RX_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+#define RX_CMP_ITYPES(rxcmp) \
+ (le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_FLAGS_ITYPES_MASK)
+
#define RX_CMP_V3_HASH_TYPE_LEGACY(rxcmp) \
((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_V3_RSS_EXT_OP_LEGACY) >>\
RX_CMP_V3_RSS_EXT_OP_LEGACY_SHIFT)
@@ -378,7 +391,7 @@ struct rx_agg_cmp {
u32 rx_agg_cmp_opaque;
__le32 rx_agg_cmp_v;
#define RX_AGG_CMP_V (1 << 0)
- #define RX_AGG_CMP_AGG_ID (0xffff << 16)
+ #define RX_AGG_CMP_AGG_ID (0x0fff << 16)
#define RX_AGG_CMP_AGG_ID_SHIFT 16
__le32 rx_agg_cmp_unused;
};
@@ -416,7 +429,7 @@ struct rx_tpa_start_cmp {
#define RX_TPA_START_CMP_V3_RSS_HASH_TYPE_SHIFT 7
#define RX_TPA_START_CMP_AGG_ID (0x7f << 25)
#define RX_TPA_START_CMP_AGG_ID_SHIFT 25
- #define RX_TPA_START_CMP_AGG_ID_P5 (0xffff << 16)
+ #define RX_TPA_START_CMP_AGG_ID_P5 (0x0fff << 16)
#define RX_TPA_START_CMP_AGG_ID_SHIFT_P5 16
#define RX_TPA_START_CMP_METADATA1 (0xf << 28)
#define RX_TPA_START_CMP_METADATA1_SHIFT 28
@@ -540,7 +553,7 @@ struct rx_tpa_end_cmp {
#define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT 16
#define RX_TPA_END_CMP_AGG_ID (0x7f << 25)
#define RX_TPA_END_CMP_AGG_ID_SHIFT 25
- #define RX_TPA_END_CMP_AGG_ID_P5 (0xffff << 16)
+ #define RX_TPA_END_CMP_AGG_ID_P5 (0x0fff << 16)
#define RX_TPA_END_CMP_AGG_ID_SHIFT_P5 16
__le32 rx_tpa_end_cmp_tsdelta;
@@ -891,7 +904,7 @@ struct bnxt_sw_rx_bd {
};
struct bnxt_sw_rx_agg_bd {
- struct page *page;
+ netmem_ref netmem;
unsigned int offset;
dma_addr_t mapping;
};
@@ -1094,6 +1107,7 @@ struct bnxt_rx_ring_info {
unsigned long *rx_agg_bmap;
u16 rx_agg_bmap_size;
+ bool need_head_pool;
dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
@@ -1105,6 +1119,7 @@ struct bnxt_rx_ring_info {
struct bnxt_ring_struct rx_agg_ring_struct;
struct xdp_rxq_info xdp_rxq;
struct page_pool *page_pool;
+ struct page_pool *head_pool;
};
struct bnxt_rx_sw_stats {
@@ -1227,6 +1242,11 @@ struct bnxt_irq {
u8 have_cpumask:1;
char name[IFNAMSIZ + BNXT_IRQ_NAME_EXTRA];
cpumask_var_t cpu_mask;
+
+ struct bnxt *bp;
+ int msix_nr;
+ int ring_nr;
+ struct irq_affinity_notify affinity_notify;
};
#define HWRM_RING_ALLOC_TX 0x1
@@ -1848,6 +1868,8 @@ struct bnxt_vf_rep {
#define MAX_CTX_PAGES (BNXT_PAGE_SIZE / 8)
#define MAX_CTX_TOTAL_PAGES (MAX_CTX_PAGES * MAX_CTX_PAGES)
+#define MAX_CTX_BYTES ((size_t)MAX_CTX_TOTAL_PAGES * BNXT_PAGE_SIZE)
+#define MAX_CTX_BYTES_MASK (MAX_CTX_BYTES - 1)
struct bnxt_ctx_pg_info {
u32 entries;
@@ -1880,6 +1902,13 @@ struct bnxt_ctx_mem_type {
u16 entry_size;
u32 flags;
#define BNXT_CTX_MEM_TYPE_VALID FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID
+#define BNXT_CTX_MEM_FW_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_DBG_TRACE
+#define BNXT_CTX_MEM_FW_BIN_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_BIN_DBG_TRACE
+#define BNXT_CTX_MEM_PERSIST \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_NEXT_BS_OFFSET
+
u32 instance_bmap;
u8 init_value;
u8 entry_multiple;
@@ -1888,6 +1917,7 @@ struct bnxt_ctx_mem_type {
u32 max_entries;
u32 min_entries;
u8 last:1;
+ u8 mem_valid:1;
u8 split_entry_cnt;
#define BNXT_MAX_SPLIT_ENTRY 4
union {
@@ -1919,21 +1949,32 @@ struct bnxt_ctx_mem_type {
#define BNXT_CTX_FTQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING
#define BNXT_CTX_MRAV FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV
#define BNXT_CTX_TIM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM
-#define BNXT_CTX_TKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TKC
-#define BNXT_CTX_RKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RKC
+#define BNXT_CTX_TCK FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK
+#define BNXT_CTX_RCK FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK
#define BNXT_CTX_MTQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING
#define BNXT_CTX_SQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW
#define BNXT_CTX_RQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW
#define BNXT_CTX_SRQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW
#define BNXT_CTX_CQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW
-#define BNXT_CTX_QTKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_TKC
-#define BNXT_CTX_QRKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_RKC
#define BNXT_CTX_TBLSC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE
#define BNXT_CTX_XPAR FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION
+#define BNXT_CTX_SRT FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE
+#define BNXT_CTX_SRT2 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE
+#define BNXT_CTX_CRT FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE
+#define BNXT_CTX_CRT2 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE
+#define BNXT_CTX_RIGP0 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE
+#define BNXT_CTX_L2HWRM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE
+#define BNXT_CTX_REHWRM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE
+#define BNXT_CTX_CA0 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA0_TRACE
+#define BNXT_CTX_CA1 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA1_TRACE
+#define BNXT_CTX_CA2 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA2_TRACE
+#define BNXT_CTX_RIGP1 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP1_TRACE
+#define BNXT_CTX_KONG FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE
+#define BNXT_CTX_QPC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ERR_QPC_TRACE
#define BNXT_CTX_MAX (BNXT_CTX_TIM + 1)
#define BNXT_CTX_L2_MAX (BNXT_CTX_FTQM + 1)
-#define BNXT_CTX_V2_MAX (BNXT_CTX_XPAR + 1)
+#define BNXT_CTX_V2_MAX (BNXT_CTX_QPC + 1)
#define BNXT_CTX_INV ((u16)-1)
struct bnxt_ctx_mem_info {
@@ -2092,8 +2133,29 @@ enum board_idx {
NETXTREME_E_P5_VF,
NETXTREME_E_P5_VF_HV,
NETXTREME_E_P7_VF,
+ NETXTREME_E_P7_VF_HV,
+};
+
+#define BNXT_TRACE_BUF_MAGIC_BYTE ((u8)0xbc)
+#define BNXT_TRACE_MAX 11
+
+struct bnxt_bs_trace_info {
+ u8 *magic_byte;
+ u32 last_offset;
+ u8 wrapped:1;
+ u16 ctx_type;
+ u16 trace_type;
};
+static inline void bnxt_bs_trace_check_wrap(struct bnxt_bs_trace_info *bs_trace,
+ u32 offset)
+{
+ if (!bs_trace->wrapped && bs_trace->magic_byte &&
+ *bs_trace->magic_byte != BNXT_TRACE_BUF_MAGIC_BYTE)
+ bs_trace->wrapped = 1;
+ bs_trace->last_offset = offset;
+}
+
struct bnxt {
void __iomem *bar0;
void __iomem *bar1;
@@ -2198,8 +2260,6 @@ struct bnxt {
#define BNXT_FLAG_TPA (BNXT_FLAG_LRO | BNXT_FLAG_GRO)
#define BNXT_FLAG_JUMBO 0x10
#define BNXT_FLAG_STRIP_VLAN 0x20
- #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
- BNXT_FLAG_LRO)
#define BNXT_FLAG_RFS 0x100
#define BNXT_FLAG_SHARED_RINGS 0x200
#define BNXT_FLAG_PORT_STATS 0x400
@@ -2220,6 +2280,9 @@ struct bnxt {
#define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
#define BNXT_FLAG_TX_COAL_CMPL 0x8000000
#define BNXT_FLAG_PORT_STATS_EXT 0x10000000
+ #define BNXT_FLAG_HDS 0x20000000
+ #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
+ BNXT_FLAG_LRO | BNXT_FLAG_HDS)
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
@@ -2227,6 +2290,11 @@ struct bnxt {
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
+#ifdef CONFIG_BNXT_SRIOV
+#define BNXT_VF_IS_TRUSTED(bp) ((bp)->vf.flags & BNXT_VF_TRUST)
+#else
+#define BNXT_VF_IS_TRUSTED(bp) 0
+#endif
#define BNXT_NPAR(bp) ((bp)->port_partition_type)
#define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST)
#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
@@ -2299,7 +2367,7 @@ struct bnxt {
enum dma_data_direction rx_dir;
u32 rx_ring_size;
u32 rx_agg_ring_size;
- u32 rx_copy_thresh;
+ u32 rx_copybreak;
u32 rx_ring_mask;
u32 rx_agg_ring_mask;
int rx_nr_pages;
@@ -2343,6 +2411,7 @@ struct bnxt {
#define BNXT_RSS_CAP_ESP_V4_RSS_CAP BIT(6)
#define BNXT_RSS_CAP_ESP_V6_RSS_CAP BIT(7)
#define BNXT_RSS_CAP_MULTI_RSS_CTX BIT(8)
+#define BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP BIT(9)
u8 rss_hash_key[HW_HASH_KEY_SIZE];
u8 rss_hash_key_valid:1;
@@ -2356,8 +2425,13 @@ struct bnxt {
u8 tc_to_qidx[BNXT_MAX_QUEUE];
u8 q_ids[BNXT_MAX_QUEUE];
u8 max_q;
+ u8 cos0_cos1_shared;
u8 num_tc;
+ u16 max_pfcwd_tmo_ms;
+
+ u8 tph_mode;
+
unsigned int current_interval;
#define BNXT_TIMER_INTERVAL HZ
@@ -2406,7 +2480,11 @@ struct bnxt {
#define BNXT_FW_CAP_DCBX_AGENT BIT_ULL(2)
#define BNXT_FW_CAP_NEW_RM BIT_ULL(3)
#define BNXT_FW_CAP_IF_CHANGE BIT_ULL(4)
+ #define BNXT_FW_CAP_ENABLE_RDMA_SRIOV BIT_ULL(5)
+ #define BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED BIT_ULL(6)
#define BNXT_FW_CAP_KONG_MB_CHNL BIT_ULL(7)
+ #define BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT BIT_ULL(8)
+ #define BNXT_FW_CAP_LINK_ADMIN BIT_ULL(9)
#define BNXT_FW_CAP_OVS_64BIT_HANDLE BIT_ULL(10)
#define BNXT_FW_CAP_TRUSTED_VF BIT_ULL(11)
#define BNXT_FW_CAP_ERROR_RECOVERY BIT_ULL(13)
@@ -2437,6 +2515,9 @@ struct bnxt {
#define BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO BIT_ULL(38)
#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 BIT_ULL(39)
#define BNXT_FW_CAP_VNIC_RE_FLUSH BIT_ULL(40)
+ #define BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS BIT_ULL(41)
+ #define BNXT_FW_CAP_NPAR_1_2 BIT_ULL(42)
+ #define BNXT_FW_CAP_MIRROR_ON_ROCE BIT_ULL(43)
u32 fw_dbg_cap;
@@ -2449,9 +2530,19 @@ struct bnxt {
#define BNXT_SUPPORTS_MULTI_RSS_CTX(bp) \
(BNXT_PF(bp) && BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \
((bp)->rss_cap & BNXT_RSS_CAP_MULTI_RSS_CTX))
+#define BNXT_ROCE_VF_DYN_ALLOC_CAP(bp) \
+ ((bp)->fw_cap & BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT)
#define BNXT_SUPPORTS_QUEUE_API(bp) \
(BNXT_PF(bp) && BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \
((bp)->fw_cap & BNXT_FW_CAP_VNIC_RE_FLUSH))
+#define BNXT_RDMA_SRIOV_EN(bp) \
+ ((bp)->fw_cap & BNXT_FW_CAP_ENABLE_RDMA_SRIOV)
+#define BNXT_ROCE_VF_RESC_CAP(bp) \
+ ((bp)->fw_cap & BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED)
+#define BNXT_SW_RES_LMT(bp) \
+ ((bp)->fw_cap & BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS)
+#define BNXT_MIRROR_ON_ROCE_CAP(bp) \
+ ((bp)->fw_cap & BNXT_FW_CAP_MIRROR_ON_ROCE)
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
@@ -2466,6 +2557,7 @@ struct bnxt {
u16 fw_rx_stats_ext_size;
u16 fw_tx_stats_ext_size;
u16 hw_ring_stats_size;
+ u16 pcie_stat_len;
u8 pri2cos_idx[8];
u8 pri2cos_valid;
@@ -2539,6 +2631,7 @@ struct bnxt {
#define BNXT_FW_RESET_STATE_POLL_FW 4
#define BNXT_FW_RESET_STATE_OPENING 5
#define BNXT_FW_RESET_STATE_POLL_FW_DOWN 6
+#define BNXT_FW_RESET_STATE_ABORT 7
u16 fw_reset_min_dsecs;
#define BNXT_DFLT_FW_RST_MIN_DSECS 20
@@ -2611,6 +2704,11 @@ struct bnxt {
#define BNXT_PHY_FL_BANK_SEL (PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED << 8)
#define BNXT_PHY_FL_SPEEDS2 (PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED << 8)
+ /* copied from flags in hwrm_port_mac_qcaps_output */
+ u8 mac_flags;
+#define BNXT_MAC_FL_NO_MAC_LPBK \
+ PORT_MAC_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED
+
u8 num_tests;
struct bnxt_test_info *test_info;
@@ -2622,6 +2720,8 @@ struct bnxt {
u16 dump_flag;
#define BNXT_DUMP_LIVE 0
#define BNXT_DUMP_CRASH 1
+#define BNXT_DUMP_DRIVER 2
+#define BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE 3
struct bpf_prog *xdp_prog;
@@ -2650,6 +2750,7 @@ struct bnxt {
struct bnxt_ctx_pg_info *fw_crash_mem;
u32 fw_crash_len;
+ struct bnxt_bs_trace_info bs_trace[BNXT_TRACE_MAX];
};
#define BNXT_NUM_RX_RING_STATS 8
@@ -2711,6 +2812,8 @@ struct bnxt {
#define SFF_MODULE_ID_QSFP28 0x11
#define BNXT_MAX_PHY_I2C_RESP_SIZE 64
+#define BNXT_HDS_THRESHOLD_MAX 1023
+
static inline u32 bnxt_tx_avail(struct bnxt *bp,
const struct bnxt_tx_ring_info *txr)
{
@@ -2785,15 +2888,17 @@ static inline bool bnxt_sriov_cfg(struct bnxt *bp)
#endif
}
+extern const u16 bnxt_bstore_to_trace[];
extern const u16 bnxt_lhint_arr[];
int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
u16 prod, gfp_t gfp);
void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data);
u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx);
+bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type);
void bnxt_set_tpa_flags(struct bnxt *bp);
void bnxt_set_ring_params(struct bnxt *);
-int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
+void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
@@ -2822,7 +2927,9 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
int bnxt_nq_rings_in_use(struct bnxt *bp);
int bnxt_hwrm_set_coal(struct bnxt *);
-void bnxt_free_ctx_mem(struct bnxt *bp);
+size_t bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm,
+ void *buf, size_t offset);
+void bnxt_free_ctx_mem(struct bnxt *bp, bool force);
int bnxt_num_tx_to_cp(struct bnxt *bp, int tx);
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp);
@@ -2837,6 +2944,7 @@ void bnxt_report_link(struct bnxt *bp);
int bnxt_update_link(struct bnxt *bp, bool chng_link_state);
int bnxt_hwrm_set_pause(struct bnxt *);
int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
+void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset);
int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset);
int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
index 4e2b938ed1f7..ccb8b509662d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
@@ -10,11 +10,57 @@
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/pci.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_coredump.h"
+static const u16 bnxt_bstore_to_seg_id[] = {
+ [BNXT_CTX_QP] = BNXT_CTX_MEM_SEG_QP,
+ [BNXT_CTX_SRQ] = BNXT_CTX_MEM_SEG_SRQ,
+ [BNXT_CTX_CQ] = BNXT_CTX_MEM_SEG_CQ,
+ [BNXT_CTX_VNIC] = BNXT_CTX_MEM_SEG_VNIC,
+ [BNXT_CTX_STAT] = BNXT_CTX_MEM_SEG_STAT,
+ [BNXT_CTX_STQM] = BNXT_CTX_MEM_SEG_STQM,
+ [BNXT_CTX_FTQM] = BNXT_CTX_MEM_SEG_FTQM,
+ [BNXT_CTX_MRAV] = BNXT_CTX_MEM_SEG_MRAV,
+ [BNXT_CTX_TIM] = BNXT_CTX_MEM_SEG_TIM,
+ [BNXT_CTX_SRT] = BNXT_CTX_MEM_SEG_SRT,
+ [BNXT_CTX_SRT2] = BNXT_CTX_MEM_SEG_SRT2,
+ [BNXT_CTX_CRT] = BNXT_CTX_MEM_SEG_CRT,
+ [BNXT_CTX_CRT2] = BNXT_CTX_MEM_SEG_CRT2,
+ [BNXT_CTX_RIGP0] = BNXT_CTX_MEM_SEG_RIGP0,
+ [BNXT_CTX_L2HWRM] = BNXT_CTX_MEM_SEG_L2HWRM,
+ [BNXT_CTX_REHWRM] = BNXT_CTX_MEM_SEG_REHWRM,
+ [BNXT_CTX_CA0] = BNXT_CTX_MEM_SEG_CA0,
+ [BNXT_CTX_CA1] = BNXT_CTX_MEM_SEG_CA1,
+ [BNXT_CTX_CA2] = BNXT_CTX_MEM_SEG_CA2,
+ [BNXT_CTX_RIGP1] = BNXT_CTX_MEM_SEG_RIGP1,
+ [BNXT_CTX_KONG] = BNXT_CTX_MEM_SEG_KONG,
+ [BNXT_CTX_QPC] = BNXT_CTX_MEM_SEG_QPC,
+};
+
+static int bnxt_dbg_hwrm_log_buffer_flush(struct bnxt *bp, u16 type, u32 flags,
+ u32 *offset)
+{
+ struct hwrm_dbg_log_buffer_flush_output *resp;
+ struct hwrm_dbg_log_buffer_flush_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_LOG_BUFFER_FLUSH);
+ if (rc)
+ return rc;
+
+ req->flags = cpu_to_le32(flags);
+ req->type = cpu_to_le16(type);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (!rc)
+ *offset = le32_to_cpu(resp->current_buffer_offset);
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
struct bnxt_hwrm_dbg_dma_info *info)
{
@@ -66,20 +112,30 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
}
}
+ if (cmn_req->req_type ==
+ cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
+ info->dest_buf_size += len;
+
if (info->dest_buf) {
if ((info->seg_start + off + len) <=
BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
- memcpy(info->dest_buf + off, dma_buf, len);
+ u16 copylen = min_t(u16, len,
+ info->dest_buf_size - off);
+
+ memcpy(info->dest_buf + off, dma_buf, copylen);
+ if (copylen < len)
+ break;
} else {
rc = -ENOBUFS;
+ if (cmn_req->req_type ==
+ cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
+ kfree(info->dest_buf);
+ info->dest_buf = NULL;
+ }
break;
}
}
- if (cmn_req->req_type ==
- cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
- info->dest_buf_size += len;
-
if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
break;
@@ -115,8 +171,8 @@ static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
return rc;
}
-static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
- u16 segment_id)
+static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 dump_type,
+ u16 component_id, u16 segment_id)
{
struct hwrm_dbg_coredump_initiate_input *req;
int rc;
@@ -128,6 +184,8 @@ static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
req->component_id = cpu_to_le16(component_id);
req->segment_id = cpu_to_le16(segment_id);
+ if (dump_type == BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE)
+ req->seg_flags = DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_COLLECT_CTX_L1_CACHE;
return hwrm_req_send(bp, req);
}
@@ -165,11 +223,12 @@ static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
return rc;
}
-static void
+void
bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
struct bnxt_coredump_segment_hdr *seg_hdr,
struct coredump_segment_record *seg_rec, u32 seg_len,
- int status, u32 duration, u32 instance)
+ int status, u32 duration, u32 instance, u32 comp_id,
+ u32 seg_id)
{
memset(seg_hdr, 0, sizeof(*seg_hdr));
memcpy(seg_hdr->signature, "sEgM", 4);
@@ -180,11 +239,8 @@ bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
seg_hdr->high_version = seg_rec->version_hi;
seg_hdr->flags = cpu_to_le32(seg_rec->compress_flags);
} else {
- /* For hwrm_ver_get response Component id = 2
- * and Segment id = 0
- */
- seg_hdr->component_id = cpu_to_le32(2);
- seg_hdr->segment_id = 0;
+ seg_hdr->component_id = cpu_to_le32(comp_id);
+ seg_hdr->segment_id = cpu_to_le32(seg_id);
}
seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn);
seg_hdr->length = cpu_to_le32(seg_len);
@@ -269,7 +325,83 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
record->ioctl_high_version = 0;
}
-static int __bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
+static void bnxt_fill_drv_seg_record(struct bnxt *bp,
+ struct bnxt_driver_segment_record *record,
+ struct bnxt_ctx_mem_type *ctxm, u16 type)
+{
+ struct bnxt_bs_trace_info *bs_trace = &bp->bs_trace[type];
+ u32 offset = 0;
+ int rc = 0;
+
+ record->max_entries = cpu_to_le32(ctxm->max_entries);
+ record->entry_size = cpu_to_le32(ctxm->entry_size);
+
+ rc = bnxt_dbg_hwrm_log_buffer_flush(bp, type, 0, &offset);
+ if (rc)
+ return;
+
+ bnxt_bs_trace_check_wrap(bs_trace, offset);
+ record->offset = cpu_to_le32(bs_trace->last_offset);
+ record->wrapped = bs_trace->wrapped;
+}
+
+static u32 bnxt_get_ctx_coredump(struct bnxt *bp, void *buf, u32 offset,
+ u32 *segs)
+{
+ struct bnxt_driver_segment_record record = {};
+ struct bnxt_coredump_segment_hdr seg_hdr;
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ u32 comp_id = BNXT_DRV_COMP_ID;
+ void *data = NULL;
+ size_t len = 0;
+ u16 type;
+
+ *segs = 0;
+ if (!ctx)
+ return 0;
+
+ if (buf)
+ buf += offset;
+ for (type = 0; type < BNXT_CTX_V2_MAX; type++) {
+ struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ bool trace = bnxt_bs_trace_avail(bp, type);
+ u32 seg_id = bnxt_bstore_to_seg_id[type];
+ size_t seg_len, extra_hlen = 0;
+
+ if (!ctxm->mem_valid || !seg_id)
+ continue;
+
+ if (trace) {
+ extra_hlen = BNXT_SEG_RCD_LEN;
+ if (buf) {
+ u16 trace_type = bnxt_bstore_to_trace[type];
+
+ bnxt_fill_drv_seg_record(bp, &record, ctxm,
+ trace_type);
+ }
+ }
+
+ if (buf)
+ data = buf + BNXT_SEG_HDR_LEN + extra_hlen;
+
+ seg_len = bnxt_copy_ctx_mem(bp, ctxm, data, 0) + extra_hlen;
+ if (buf) {
+ bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, seg_len,
+ 0, 0, 0, comp_id, seg_id);
+ memcpy(buf, &seg_hdr, BNXT_SEG_HDR_LEN);
+ buf += BNXT_SEG_HDR_LEN;
+ if (trace)
+ memcpy(buf, &record, BNXT_SEG_RCD_LEN);
+ buf += seg_len;
+ }
+ len += BNXT_SEG_HDR_LEN + seg_len;
+ *segs += 1;
+ }
+ return len;
+}
+
+static int __bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf,
+ u32 *dump_len)
{
u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0;
@@ -287,17 +419,31 @@ static int __bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
start_utc = sys_tz.tz_minuteswest * 60;
seg_hdr_len = sizeof(seg_hdr);
- /* First segment should be hwrm_ver_get response */
+ /* First segment should be hwrm_ver_get response.
+ * For hwrm_ver_get response Component id = 2 and Segment id = 0.
+ */
*dump_len = seg_hdr_len + ver_get_resp_len;
if (buf) {
bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len,
- 0, 0, 0);
+ 0, 0, 0, BNXT_VER_GET_COMP_ID, 0);
memcpy(buf + offset, &seg_hdr, seg_hdr_len);
offset += seg_hdr_len;
memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len);
offset += ver_get_resp_len;
}
+ if (dump_type == BNXT_DUMP_DRIVER) {
+ u32 drv_len, segs = 0;
+
+ drv_len = bnxt_get_ctx_coredump(bp, buf, offset, &segs);
+ *dump_len += drv_len;
+ offset += drv_len;
+ if (buf)
+ coredump.total_segs += segs;
+ goto err;
+ }
+
+ seg_record_len = sizeof(*seg_record);
rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump);
if (rc) {
netdev_err(bp->dev, "Failed to get coredump segment list\n");
@@ -323,7 +469,8 @@ static int __bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
start = jiffies;
- rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
+ rc = bnxt_hwrm_dbg_coredump_initiate(bp, dump_type, comp_id,
+ seg_id);
if (rc) {
netdev_err(bp->dev,
"Failed to initiate coredump for seg = %d\n",
@@ -346,7 +493,7 @@ next_seg:
end = jiffies;
duration = jiffies_to_msecs(end - start);
bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len,
- rc, duration, 0);
+ rc, duration, 0, 0, 0);
if (buf) {
/* Write segment header into the buffer */
@@ -366,9 +513,16 @@ err:
start_utc, coredump.total_segs + 1,
rc);
kfree(coredump.data);
- *dump_len += sizeof(struct bnxt_coredump_record);
- if (rc == -ENOBUFS)
+ if (!rc) {
+ *dump_len += sizeof(struct bnxt_coredump_record);
+ /* The actual coredump length can be smaller than the FW
+ * reported length earlier. Use the ethtool provided length.
+ */
+ if (buf_len)
+ *dump_len = buf_len;
+ } else if (rc == -ENOBUFS) {
netdev_err(bp->dev, "Firmware returned large coredump buffer\n");
+ }
return rc;
}
@@ -442,7 +596,7 @@ int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len)
else
return -EOPNOTSUPP;
} else {
- return __bnxt_get_coredump(bp, buf, dump_len);
+ return __bnxt_get_coredump(bp, dump_type, buf, dump_len);
}
}
@@ -512,9 +666,12 @@ u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type)
return bp->fw_crash_len;
}
- if (bnxt_hwrm_get_dump_len(bp, dump_type, &len)) {
- if (dump_type != BNXT_DUMP_CRASH)
- __bnxt_get_coredump(bp, NULL, &len);
+ if (dump_type != BNXT_DUMP_DRIVER) {
+ if (!bnxt_hwrm_get_dump_len(bp, dump_type, &len))
+ return len;
}
+ if (dump_type != BNXT_DUMP_CRASH)
+ __bnxt_get_coredump(bp, dump_type, NULL, &len);
+
return len;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
index a76d5c281413..c087df88154a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
@@ -68,11 +68,51 @@ struct bnxt_coredump_record {
__le16 rsvd3[313];
};
+struct bnxt_driver_segment_record {
+ __le32 max_entries;
+ __le32 entry_size;
+ __le32 offset;
+ __u8 wrapped:1;
+ __u8 unused[3];
+};
+
+#define BNXT_VER_GET_COMP_ID 2
+#define BNXT_DRV_COMP_ID 0xd
+
+#define BNXT_CTX_MEM_SEG_ID_START 0x200
+
+#define BNXT_CTX_MEM_SEG_QP (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_QP)
+#define BNXT_CTX_MEM_SEG_SRQ (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_SRQ)
+#define BNXT_CTX_MEM_SEG_CQ (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_CQ)
+#define BNXT_CTX_MEM_SEG_VNIC (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_VNIC)
+#define BNXT_CTX_MEM_SEG_STAT (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_STAT)
+#define BNXT_CTX_MEM_SEG_STQM (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_STQM)
+#define BNXT_CTX_MEM_SEG_FTQM (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_FTQM)
+#define BNXT_CTX_MEM_SEG_MRAV (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_MRAV)
+#define BNXT_CTX_MEM_SEG_TIM (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_TIM)
+
+#define BNXT_CTX_MEM_SEG_SRT 0x1
+#define BNXT_CTX_MEM_SEG_SRT2 0x2
+#define BNXT_CTX_MEM_SEG_CRT 0x3
+#define BNXT_CTX_MEM_SEG_CRT2 0x4
+#define BNXT_CTX_MEM_SEG_RIGP0 0x5
+#define BNXT_CTX_MEM_SEG_L2HWRM 0x6
+#define BNXT_CTX_MEM_SEG_REHWRM 0x7
+#define BNXT_CTX_MEM_SEG_CA0 0x8
+#define BNXT_CTX_MEM_SEG_CA1 0x9
+#define BNXT_CTX_MEM_SEG_CA2 0xa
+#define BNXT_CTX_MEM_SEG_RIGP1 0xb
+#define BNXT_CTX_MEM_SEG_QPC 0xc
+#define BNXT_CTX_MEM_SEG_KONG 0xd
+
#define BNXT_CRASH_DUMP_LEN (8 << 20)
#define COREDUMP_LIST_BUF_LEN 2048
#define COREDUMP_RETRIEVE_BUF_LEN 4096
+#define BNXT_SEG_HDR_LEN sizeof(struct bnxt_coredump_segment_hdr)
+#define BNXT_SEG_RCD_LEN sizeof(struct bnxt_driver_segment_record)
+
struct bnxt_coredump {
void *data;
int data_size;
@@ -118,6 +158,11 @@ struct hwrm_dbg_cmn_output {
#define BNXT_DBG_CR_DUMP_MDM_CFG_DDR \
DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR
+void bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
+ struct bnxt_coredump_segment_hdr *seg_hdr,
+ struct coredump_segment_record *seg_rec,
+ u32 seg_len, int status, u32 duration,
+ u32 instance, u32 comp_id, u32 seg_id);
int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len);
int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len);
u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 0dbb880a7aa0..a00b67334f9b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -16,7 +16,7 @@
#include <linux/pci.h>
#include <linux/etherdevice.h>
#include <rdma/ib_verbs.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_dcb.h"
@@ -487,7 +487,9 @@ static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && i > bp->max_tc)
return -EINVAL;
+ }
+ for (i = 0; i < max_tc; i++) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT:
break;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
index 127b7015f676..3324afbb3bec 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
@@ -10,7 +10,7 @@
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include <linux/dim.h>
#include "bnxt.h"
#include "bnxt_debugfs.h"
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h
index d0bb4887acd0..a0a8d687dd99 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h
@@ -7,7 +7,7 @@
* the Free Software Foundation.
*/
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 4cb0fabf977e..15de802bbac4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -11,7 +11,8 @@
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include <net/devlink.h>
-#include "bnxt_hsi.h"
+#include <net/netdev_lock.h>
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_vfr.h"
@@ -39,12 +40,6 @@ bnxt_dl_flash_update(struct devlink *dl,
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
int rc;
- if (!BNXT_PF(bp)) {
- NL_SET_ERR_MSG_MOD(extack,
- "flash update not supported from a VF");
- return -EPERM;
- }
-
devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0);
rc = bnxt_flash_package_from_fw_obj(bp->dev, params->fw, 0, extack);
if (!rc)
@@ -219,7 +214,7 @@ __bnxt_dl_reporter_create(struct bnxt *bp,
{
struct devlink_health_reporter *reporter;
- reporter = devlink_health_reporter_create(bp->dl, ops, 0, bp);
+ reporter = devlink_health_reporter_create(bp->dl, ops, bp);
if (IS_ERR(reporter)) {
netdev_warn(bp->dev, "Failed to create %s health reporter, rc = %ld\n",
ops->name, PTR_ERR(reporter));
@@ -439,14 +434,17 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
bnxt_ulp_stop(bp);
rtnl_lock();
+ netdev_lock(bp->dev);
if (bnxt_sriov_cfg(bp)) {
NL_SET_ERR_MSG_MOD(extack,
"reload is unsupported while VFs are allocated or being configured");
+ netdev_unlock(bp->dev);
rtnl_unlock();
bnxt_ulp_start(bp, 0);
return -EOPNOTSUPP;
}
if (bp->dev->reg_state == NETREG_UNREGISTERED) {
+ netdev_unlock(bp->dev);
rtnl_unlock();
bnxt_ulp_start(bp, 0);
return -ENODEV;
@@ -458,12 +456,13 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Failed to deregister");
if (netif_running(bp->dev))
- dev_close(bp->dev);
+ netif_close(bp->dev);
+ netdev_unlock(bp->dev);
rtnl_unlock();
break;
}
- bnxt_cancel_reservations(bp, false);
- bnxt_free_ctx_mem(bp);
+ bnxt_clear_reservations(bp, false);
+ bnxt_free_ctx_mem(bp, false);
break;
}
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: {
@@ -479,7 +478,9 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
return -EPERM;
}
rtnl_lock();
+ netdev_lock(bp->dev);
if (bp->dev->reg_state == NETREG_UNREGISTERED) {
+ netdev_unlock(bp->dev);
rtnl_unlock();
return -ENODEV;
}
@@ -493,6 +494,7 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Failed to activate firmware");
clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
+ netdev_unlock(bp->dev);
rtnl_unlock();
}
break;
@@ -511,6 +513,8 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
int rc = 0;
+ netdev_assert_locked(bp->dev);
+
*actions_performed = 0;
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
@@ -535,6 +539,7 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
if (!netif_running(bp->dev))
NL_SET_ERR_MSG_MOD(extack,
"Device is closed, not waiting for reset notice that will never come");
+ netdev_unlock(bp->dev);
rtnl_unlock();
while (test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) {
if (time_after(jiffies, timeout)) {
@@ -550,6 +555,7 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
msleep(50);
}
rtnl_lock();
+ netdev_lock(bp->dev);
if (!rc)
*actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
@@ -568,8 +574,9 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
}
*actions_performed |= BIT(action);
} else if (netif_running(bp->dev)) {
- dev_close(bp->dev);
+ netif_close(bp->dev);
}
+ netdev_unlock(bp->dev);
rtnl_unlock();
if (action == DEVLINK_RELOAD_ACTION_DRIVER_REINIT)
bnxt_ulp_start(bp, rc);
@@ -666,6 +673,8 @@ static const struct bnxt_dl_nvm_param nvm_params[] = {
NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10, 4},
{DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7, 4},
+ {DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, NVM_OFF_SUPPORT_RDMA,
+ BNXT_NVM_FUNC_CFG, 1, 1},
{BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK,
BNXT_NVM_SHARED_CFG, 1, 1},
};
@@ -1010,37 +1019,19 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
}
-static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
- union devlink_param_value *val)
+static int __bnxt_hwrm_nvm_req(struct bnxt *bp,
+ const struct bnxt_dl_nvm_param *nvm, void *msg,
+ union devlink_param_value *val)
{
struct hwrm_nvm_get_variable_input *req = msg;
- struct bnxt_dl_nvm_param nvm_param;
struct hwrm_err_output *resp;
union bnxt_nvm_data *data;
dma_addr_t data_dma_addr;
- int idx = 0, rc, i;
+ int idx = 0, rc;
- /* Get/Set NVM CFG parameter is supported only on PFs */
- if (BNXT_VF(bp)) {
- hwrm_req_drop(bp, req);
- return -EPERM;
- }
-
- for (i = 0; i < ARRAY_SIZE(nvm_params); i++) {
- if (nvm_params[i].id == param_id) {
- nvm_param = nvm_params[i];
- break;
- }
- }
-
- if (i == ARRAY_SIZE(nvm_params)) {
- hwrm_req_drop(bp, req);
- return -EOPNOTSUPP;
- }
-
- if (nvm_param.dir_type == BNXT_NVM_PORT_CFG)
+ if (nvm->dir_type == BNXT_NVM_PORT_CFG)
idx = bp->pf.port_id;
- else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
+ else if (nvm->dir_type == BNXT_NVM_FUNC_CFG)
idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr);
@@ -1051,23 +1042,23 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
}
req->dest_data_addr = cpu_to_le64(data_dma_addr);
- req->data_len = cpu_to_le16(nvm_param.nvm_num_bits);
- req->option_num = cpu_to_le16(nvm_param.offset);
+ req->data_len = cpu_to_le16(nvm->nvm_num_bits);
+ req->option_num = cpu_to_le16(nvm->offset);
req->index_0 = cpu_to_le16(idx);
if (idx)
req->dimensions = cpu_to_le16(1);
resp = hwrm_req_hold(bp, req);
if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
- bnxt_copy_to_nvm_data(data, val, nvm_param.nvm_num_bits,
- nvm_param.dl_num_bytes);
+ bnxt_copy_to_nvm_data(data, val, nvm->nvm_num_bits,
+ nvm->dl_num_bytes);
rc = hwrm_req_send(bp, msg);
} else {
rc = hwrm_req_send_silent(bp, msg);
if (!rc) {
bnxt_copy_from_nvm_data(val, data,
- nvm_param.nvm_num_bits,
- nvm_param.dl_num_bytes);
+ nvm->nvm_num_bits,
+ nvm->dl_num_bytes);
} else {
if (resp->cmd_err ==
NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST)
@@ -1080,8 +1071,23 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
return rc;
}
+static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
+ union devlink_param_value *val)
+{
+ const struct bnxt_dl_nvm_param *nvm_param;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nvm_params); i++) {
+ nvm_param = &nvm_params[i];
+ if (nvm_param->id == param_id)
+ return __bnxt_hwrm_nvm_req(bp, nvm_param, msg, val);
+ }
+ return -EOPNOTSUPP;
+}
+
static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
struct hwrm_nvm_get_variable_input *req;
@@ -1116,6 +1122,32 @@ static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
return bnxt_hwrm_nvm_req(bp, id, req, &ctx->val);
}
+static int bnxt_dl_roce_validate(struct devlink *dl, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ const struct bnxt_dl_nvm_param nvm_roce_cap = {0, NVM_OFF_RDMA_CAPABLE,
+ BNXT_NVM_SHARED_CFG, 1, 1};
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ struct hwrm_nvm_get_variable_input *req;
+ union devlink_param_value roce_cap;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE);
+ if (rc)
+ return rc;
+
+ if (__bnxt_hwrm_nvm_req(bp, &nvm_roce_cap, req, &roce_cap)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to verify if device is RDMA Capable");
+ return -EINVAL;
+ }
+ if (!roce_cap.vbool) {
+ NL_SET_ERR_MSG_MOD(extack, "Device does not support RDMA");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int bnxt_dl_msix_validate(struct devlink *dl, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
@@ -1137,7 +1169,8 @@ static int bnxt_dl_msix_validate(struct devlink *dl, u32 id,
}
static int bnxt_remote_dev_reset_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
@@ -1180,6 +1213,10 @@ static const struct devlink_param bnxt_dl_params[] = {
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
bnxt_dl_msix_validate),
+ DEVLINK_PARAM_GENERIC(ENABLE_ROCE,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
+ bnxt_dl_roce_validate),
DEVLINK_PARAM_DRIVER(BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK,
"gre_ver_check", DEVLINK_PARAM_TYPE_BOOL,
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index b8105065367b..7f45dcd7b287 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -41,8 +41,10 @@ static inline void bnxt_dl_set_remote_reset(struct devlink *dl, bool value)
#define NVM_OFF_MSIX_VEC_PER_PF_MAX 108
#define NVM_OFF_MSIX_VEC_PER_PF_MIN 114
#define NVM_OFF_IGNORE_ARI 164
+#define NVM_OFF_RDMA_CAPABLE 161
#define NVM_OFF_DIS_GRE_VER_CHECK 171
#define NVM_OFF_ENABLE_SRIOV 401
+#define NVM_OFF_SUPPORT_RDMA 506
#define NVM_OFF_NVM_CFG_VER 602
#define BNXT_NVM_CFG_VER_BITS 8
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
index 6f6576dc417a..53a3bcb0efe0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
@@ -8,7 +8,7 @@
*/
#include <linux/dim.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
void bnxt_dim_work(struct work_struct *work)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 6ef06579df53..068e191ede19 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -24,8 +24,9 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
#include <linux/timecounter.h>
+#include <net/netdev_queues.h>
#include <net/netlink.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
@@ -687,16 +688,22 @@ skip_ring_stats:
buf[j] = *(rx_port_stats_ext + n);
}
for (i = 0; i < 8; i++, j++) {
- long n = bnxt_tx_bytes_pri_arr[i].base_off +
- bp->pri2cos_idx[i];
+ u8 cos_idx = bp->pri2cos_idx[i];
+ long n;
+ n = bnxt_tx_bytes_pri_arr[i].base_off + cos_idx;
buf[j] = *(tx_port_stats_ext + n);
+ if (bp->cos0_cos1_shared && !cos_idx)
+ buf[j] += *(tx_port_stats_ext + n + 1);
}
for (i = 0; i < 8; i++, j++) {
- long n = bnxt_tx_pkts_pri_arr[i].base_off +
- bp->pri2cos_idx[i];
+ u8 cos_idx = bp->pri2cos_idx[i];
+ long n;
+ n = bnxt_tx_pkts_pri_arr[i].base_off + cos_idx;
buf[j] = *(tx_port_stats_ext + n);
+ if (bp->cos0_cos1_shared && !cos_idx)
+ buf[j] += *(tx_port_stats_ext + n + 1);
}
}
}
@@ -833,6 +840,8 @@ static void bnxt_get_ringparam(struct net_device *dev,
ering->rx_pending = bp->rx_ring_size;
ering->rx_jumbo_pending = bp->rx_agg_ring_size;
ering->tx_pending = bp->tx_ring_size;
+
+ kernel_ering->hds_thresh_max = BNXT_HDS_THRESHOLD_MAX;
}
static int bnxt_set_ringparam(struct net_device *dev,
@@ -840,16 +849,35 @@ static int bnxt_set_ringparam(struct net_device *dev,
struct kernel_ethtool_ringparam *kernel_ering,
struct netlink_ext_ack *extack)
{
+ u8 tcp_data_split = kernel_ering->tcp_data_split;
struct bnxt *bp = netdev_priv(dev);
+ u8 hds_config_mod;
if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
(ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
(ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
return -EINVAL;
+ hds_config_mod = tcp_data_split != dev->cfg->hds_config;
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_DISABLED && hds_config_mod)
+ return -EINVAL;
+
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
+ hds_config_mod && BNXT_RX_PAGE_MODE(bp)) {
+ NL_SET_ERR_MSG_MOD(extack, "tcp-data-split is disallowed when XDP is attached");
+ return -EINVAL;
+ }
+
if (netif_running(dev))
bnxt_close_nic(bp, false, false);
+ if (hds_config_mod) {
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED)
+ bp->flags |= BNXT_FLAG_HDS;
+ else if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN)
+ bp->flags &= ~BNXT_FLAG_HDS;
+ }
+
bp->rx_ring_size = ering->rx_pending;
bp->tx_ring_size = ering->tx_pending;
bnxt_set_ring_params(bp);
@@ -1124,14 +1152,15 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
fkeys = &fltr->fkeys;
fmasks = &fltr->fmasks;
if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
- if (fkeys->basic.ip_proto == IPPROTO_ICMP ||
- fkeys->basic.ip_proto == IPPROTO_RAW) {
+ if (fkeys->basic.ip_proto == BNXT_IP_PROTO_WILDCARD) {
fs->flow_type = IP_USER_FLOW;
fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
- if (fkeys->basic.ip_proto == IPPROTO_ICMP)
- fs->h_u.usr_ip4_spec.proto = IPPROTO_ICMP;
- else
- fs->h_u.usr_ip4_spec.proto = IPPROTO_RAW;
+ fs->h_u.usr_ip4_spec.proto = BNXT_IP_PROTO_WILDCARD;
+ fs->m_u.usr_ip4_spec.proto = 0;
+ } else if (fkeys->basic.ip_proto == IPPROTO_ICMP) {
+ fs->flow_type = IP_USER_FLOW;
+ fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fs->h_u.usr_ip4_spec.proto = IPPROTO_ICMP;
fs->m_u.usr_ip4_spec.proto = BNXT_IP_PROTO_FULL_MASK;
} else if (fkeys->basic.ip_proto == IPPROTO_TCP) {
fs->flow_type = TCP_V4_FLOW;
@@ -1153,13 +1182,13 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
fs->m_u.tcp_ip4_spec.pdst = fmasks->ports.dst;
}
} else {
- if (fkeys->basic.ip_proto == IPPROTO_ICMPV6 ||
- fkeys->basic.ip_proto == IPPROTO_RAW) {
+ if (fkeys->basic.ip_proto == BNXT_IP_PROTO_WILDCARD) {
fs->flow_type = IPV6_USER_FLOW;
- if (fkeys->basic.ip_proto == IPPROTO_ICMPV6)
- fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_ICMPV6;
- else
- fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_RAW;
+ fs->h_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_WILDCARD;
+ fs->m_u.usr_ip6_spec.l4_proto = 0;
+ } else if (fkeys->basic.ip_proto == IPPROTO_ICMPV6) {
+ fs->flow_type = IPV6_USER_FLOW;
+ fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_ICMPV6;
fs->m_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_FULL_MASK;
} else if (fkeys->basic.ip_proto == IPPROTO_TCP) {
fs->flow_type = TCP_V6_FLOW;
@@ -1186,10 +1215,14 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
}
}
- if (fltr->base.flags & BNXT_ACT_DROP)
+ if (fltr->base.flags & BNXT_ACT_DROP) {
fs->ring_cookie = RX_CLS_FLOW_DISC;
- else
+ } else if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
+ fs->flow_type |= FLOW_RSS;
+ cmd->rss_context = fltr->base.fw_vnic_id;
+ } else {
fs->ring_cookie = fltr->base.rxq;
+ }
rc = 0;
fltr_err:
@@ -1282,10 +1315,12 @@ static int bnxt_add_l2_cls_rule(struct bnxt *bp,
static bool bnxt_verify_ntuple_ip4_flow(struct ethtool_usrip4_spec *ip_spec,
struct ethtool_usrip4_spec *ip_mask)
{
+ u8 mproto = ip_mask->proto;
+ u8 sproto = ip_spec->proto;
+
if (ip_mask->l4_4_bytes || ip_mask->tos ||
ip_spec->ip_ver != ETH_RX_NFC_IP4 ||
- ip_mask->proto != BNXT_IP_PROTO_FULL_MASK ||
- (ip_spec->proto != IPPROTO_RAW && ip_spec->proto != IPPROTO_ICMP))
+ (mproto && (mproto != BNXT_IP_PROTO_FULL_MASK || sproto != IPPROTO_ICMP)))
return false;
return true;
}
@@ -1293,10 +1328,11 @@ static bool bnxt_verify_ntuple_ip4_flow(struct ethtool_usrip4_spec *ip_spec,
static bool bnxt_verify_ntuple_ip6_flow(struct ethtool_usrip6_spec *ip_spec,
struct ethtool_usrip6_spec *ip_mask)
{
+ u8 mproto = ip_mask->l4_proto;
+ u8 sproto = ip_spec->l4_proto;
+
if (ip_mask->l4_4_bytes || ip_mask->tclass ||
- ip_mask->l4_proto != BNXT_IP_PROTO_FULL_MASK ||
- (ip_spec->l4_proto != IPPROTO_RAW &&
- ip_spec->l4_proto != IPPROTO_ICMPV6))
+ (mproto && (mproto != BNXT_IP_PROTO_FULL_MASK || sproto != IPPROTO_ICMPV6)))
return false;
return true;
}
@@ -1350,7 +1386,8 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
struct ethtool_usrip4_spec *ip_spec = &fs->h_u.usr_ip4_spec;
struct ethtool_usrip4_spec *ip_mask = &fs->m_u.usr_ip4_spec;
- fkeys->basic.ip_proto = ip_spec->proto;
+ fkeys->basic.ip_proto = ip_mask->proto ? ip_spec->proto
+ : BNXT_IP_PROTO_WILDCARD;
fkeys->basic.n_proto = htons(ETH_P_IP);
fkeys->addrs.v4addrs.src = ip_spec->ip4src;
fmasks->addrs.v4addrs.src = ip_mask->ip4src;
@@ -1381,7 +1418,8 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
struct ethtool_usrip6_spec *ip_spec = &fs->h_u.usr_ip6_spec;
struct ethtool_usrip6_spec *ip_mask = &fs->m_u.usr_ip6_spec;
- fkeys->basic.ip_proto = ip_spec->l4_proto;
+ fkeys->basic.ip_proto = ip_mask->l4_proto ? ip_spec->l4_proto
+ : BNXT_IP_PROTO_WILDCARD;
fkeys->basic.n_proto = htons(ETH_P_IPV6);
fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src;
fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src;
@@ -1552,11 +1590,16 @@ static u64 get_ethtool_ipv6_rss(struct bnxt *bp)
{
if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
return RXH_IP_SRC | RXH_IP_DST;
+ if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL)
+ return RXH_IP_SRC | RXH_IP_DST | RXH_IP6_FL;
return 0;
}
-static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+static int bnxt_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct bnxt *bp = netdev_priv(dev);
+
cmd->data = 0;
switch (cmd->flow_type) {
case TCP_V4_FLOW:
@@ -1615,20 +1658,30 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
#define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
#define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST)
-static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+static int bnxt_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
- u32 rss_hash_cfg = bp->rss_hash_cfg;
+ struct bnxt *bp = netdev_priv(dev);
int tuple, rc = 0;
+ u32 rss_hash_cfg;
+
+ rss_hash_cfg = bp->rss_hash_cfg;
if (cmd->data == RXH_4TUPLE)
tuple = 4;
- else if (cmd->data == RXH_2TUPLE)
+ else if (cmd->data == RXH_2TUPLE ||
+ cmd->data == (RXH_2TUPLE | RXH_IP6_FL))
tuple = 2;
else if (!cmd->data)
tuple = 0;
else
return -EINVAL;
+ if (cmd->data & RXH_IP6_FL &&
+ !(bp->rss_cap & BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP))
+ return -EINVAL;
+
if (cmd->flow_type == TCP_V4_FLOW) {
rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
if (tuple == 4)
@@ -1692,10 +1745,15 @@ static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
case AH_V6_FLOW:
case ESP_V6_FLOW:
case IPV6_FLOW:
- if (tuple == 2)
+ rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL);
+ if (!tuple)
+ break;
+ if (cmd->data & RXH_IP6_FL)
+ rss_hash_cfg |=
+ VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL;
+ else if (tuple == 2)
rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
- else if (!tuple)
- rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
break;
}
@@ -1712,6 +1770,13 @@ static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
return rc;
}
+static u32 bnxt_get_rx_ring_count(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ return bp->rx_nr_rings;
+}
+
static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -1719,10 +1784,6 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int rc = 0;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = bp->rx_nr_rings;
- break;
-
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = bp->ntp_fltr_count;
cmd->data = bp->max_fltr | RX_CLS_LOC_SPECIAL;
@@ -1736,10 +1797,6 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
rc = bnxt_grxclsrule(bp, cmd);
break;
- case ETHTOOL_GRXFH:
- rc = bnxt_grxfh(bp, cmd);
- break;
-
default:
rc = -EOPNOTSUPP;
break;
@@ -1754,10 +1811,6 @@ static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
int rc;
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- rc = bnxt_srxfh(bp, cmd);
- break;
-
case ETHTOOL_SRXCLSRLINS:
rc = bnxt_srxclsrlins(bp, cmd);
break;
@@ -2017,30 +2070,56 @@ static void bnxt_get_drvinfo(struct net_device *dev,
static int bnxt_get_regs_len(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
- int reg_len;
if (!BNXT_PF(bp))
return -EOPNOTSUPP;
- reg_len = BNXT_PXP_REG_LEN;
+ return BNXT_PXP_REG_LEN + bp->pcie_stat_len;
+}
- if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)
- reg_len += sizeof(struct pcie_ctx_hw_stats);
+static void *
+__bnxt_hwrm_pcie_qstats(struct bnxt *bp, struct hwrm_pcie_qstats_input *req)
+{
+ struct pcie_ctx_hw_stats_v2 *hw_pcie_stats;
+ dma_addr_t hw_pcie_stats_addr;
+ int rc;
- return reg_len;
+ hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats),
+ &hw_pcie_stats_addr);
+ if (!hw_pcie_stats)
+ return NULL;
+
+ req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
+ req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
+ rc = hwrm_req_send(bp, req);
+
+ return rc ? NULL : hw_pcie_stats;
}
+#define BNXT_PCIE_32B_ENTRY(start, end) \
+ { offsetof(struct pcie_ctx_hw_stats_v2, start),\
+ offsetof(struct pcie_ctx_hw_stats_v2, end) }
+
+static const struct {
+ u16 start;
+ u16 end;
+} bnxt_pcie_32b_entries[] = {
+ BNXT_PCIE_32B_ENTRY(pcie_ltssm_histogram[0], pcie_ltssm_histogram[3]),
+ BNXT_PCIE_32B_ENTRY(pcie_tl_credit_nph_histogram[0], unused_1),
+ BNXT_PCIE_32B_ENTRY(pcie_rd_latency_histogram[0], unused_2),
+};
+
static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *_p)
{
- struct pcie_ctx_hw_stats *hw_pcie_stats;
+ struct hwrm_pcie_qstats_output *resp;
struct hwrm_pcie_qstats_input *req;
struct bnxt *bp = netdev_priv(dev);
- dma_addr_t hw_pcie_stats_addr;
- int rc;
+ u8 *src;
regs->version = 0;
- bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
+ if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_REG_ACCESS_RESTRICTED))
+ bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
return;
@@ -2048,25 +2127,37 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS))
return;
- hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats),
- &hw_pcie_stats_addr);
- if (!hw_pcie_stats) {
- hwrm_req_drop(bp, req);
- return;
- }
-
- regs->version = 1;
- hwrm_req_hold(bp, req); /* hold on to slice */
- req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
- req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
- rc = hwrm_req_send(bp, req);
- if (!rc) {
- __le64 *src = (__le64 *)hw_pcie_stats;
- u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
- int i;
-
- for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
- dst[i] = le64_to_cpu(src[i]);
+ resp = hwrm_req_hold(bp, req);
+ src = __bnxt_hwrm_pcie_qstats(bp, req);
+ if (src) {
+ u8 *dst = (u8 *)(_p + BNXT_PXP_REG_LEN);
+ int i, j, len;
+
+ len = min(bp->pcie_stat_len, le16_to_cpu(resp->pcie_stat_size));
+ if (len <= sizeof(struct pcie_ctx_hw_stats))
+ regs->version = 1;
+ else if (len < sizeof(struct pcie_ctx_hw_stats_v2))
+ regs->version = 2;
+ else
+ regs->version = 3;
+
+ for (i = 0, j = 0; i < len; ) {
+ if (i >= bnxt_pcie_32b_entries[j].start &&
+ i <= bnxt_pcie_32b_entries[j].end) {
+ u32 *dst32 = (u32 *)(dst + i);
+
+ *dst32 = le32_to_cpu(*(__le32 *)(src + i));
+ i += 4;
+ if (i > bnxt_pcie_32b_entries[j].end &&
+ j < ARRAY_SIZE(bnxt_pcie_32b_entries) - 1)
+ j++;
+ } else {
+ u64 *dst64 = (u64 *)(dst + i);
+
+ *dst64 = le64_to_cpu(*(__le64 *)(src + i));
+ i += 8;
+ }
+ }
}
hwrm_req_drop(bp, req);
}
@@ -2774,7 +2865,7 @@ u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
static void bnxt_get_default_speeds(struct ethtool_link_ksettings *lk_ksettings,
struct bnxt_link_info *link_info)
{
- struct ethtool_link_settings_hdr *base = &lk_ksettings->base;
+ struct ethtool_link_settings *base = &lk_ksettings->base;
if (link_info->link_state == BNXT_LINK_STATE_UP) {
base->speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
@@ -2793,7 +2884,7 @@ static void bnxt_get_default_speeds(struct ethtool_link_ksettings *lk_ksettings,
static int bnxt_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *lk_ksettings)
{
- struct ethtool_link_settings_hdr *base = &lk_ksettings->base;
+ struct ethtool_link_settings *base = &lk_ksettings->base;
enum ethtool_link_mode_bit_indices link_mode;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info;
@@ -2831,19 +2922,24 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
}
base->port = PORT_NONE;
- if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
+ if (media == BNXT_MEDIA_TP) {
base->port = PORT_TP;
linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT,
lk_ksettings->link_modes.supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT,
lk_ksettings->link_modes.advertising);
+ } else if (media == BNXT_MEDIA_KR) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT,
+ lk_ksettings->link_modes.supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT,
+ lk_ksettings->link_modes.advertising);
} else {
linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
lk_ksettings->link_modes.supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
lk_ksettings->link_modes.advertising);
- if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
+ if (media == BNXT_MEDIA_CR)
base->port = PORT_DA;
else
base->port = PORT_FIBRE;
@@ -3016,9 +3112,9 @@ u16 bnxt_get_fw_auto_link_speeds(const unsigned long *mode)
static int bnxt_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *lk_ksettings)
{
- const struct ethtool_link_settings_hdr *base = &lk_ksettings->base;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
+ const struct ethtool_link_settings *base = &lk_ksettings->base;
bool set_pause = false;
u32 speed, lanes = 0;
int rc = 0;
@@ -3121,7 +3217,8 @@ static int bnxt_get_fecparam(struct net_device *dev,
}
static void bnxt_get_fec_stats(struct net_device *dev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct bnxt *bp = netdev_priv(dev);
u64 *rx;
@@ -4146,7 +4243,7 @@ err:
static void bnxt_get_pkgver(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
- char buf[FW_VER_STR_LEN];
+ char buf[FW_VER_STR_LEN - 5];
int len;
if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) {
@@ -4312,6 +4409,88 @@ static int bnxt_get_eee(struct net_device *dev, struct ethtool_keee *edata)
return 0;
}
+static int bnxt_hwrm_pfcwd_qcfg(struct bnxt *bp, u16 *val)
+{
+ struct hwrm_queue_pfcwd_timeout_qcfg_output *resp;
+ struct hwrm_queue_pfcwd_timeout_qcfg_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_QCFG);
+ if (rc)
+ return rc;
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (!rc)
+ *val = le16_to_cpu(resp->pfcwd_timeout_value);
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+static int bnxt_hwrm_pfcwd_cfg(struct bnxt *bp, u16 val)
+{
+ struct hwrm_queue_pfcwd_timeout_cfg_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_CFG);
+ if (rc)
+ return rc;
+ req->pfcwd_timeout_value = cpu_to_le16(val);
+ rc = hwrm_req_send(bp, req);
+ return rc;
+}
+
+static int bnxt_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u32 rx_copybreak, val;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ rx_copybreak = *(u32 *)data;
+ if (rx_copybreak > BNXT_MAX_RX_COPYBREAK)
+ return -ERANGE;
+ if (rx_copybreak != bp->rx_copybreak) {
+ if (netif_running(dev))
+ return -EBUSY;
+ bp->rx_copybreak = rx_copybreak;
+ }
+ return 0;
+ case ETHTOOL_PFC_PREVENTION_TOUT:
+ if (BNXT_VF(bp) || !bp->max_pfcwd_tmo_ms)
+ return -EOPNOTSUPP;
+
+ val = *(u16 *)data;
+ if (val > bp->max_pfcwd_tmo_ms &&
+ val != PFC_STORM_PREVENTION_AUTO)
+ return -EINVAL;
+ return bnxt_hwrm_pfcwd_cfg(bp, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bnxt_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = bp->rx_copybreak;
+ break;
+ case ETHTOOL_PFC_PREVENTION_TOUT:
+ if (!bp->max_pfcwd_tmo_ms)
+ return -EOPNOTSUPP;
+ return bnxt_hwrm_pfcwd_qcfg(bp, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
u16 page_number, u8 bank,
u16 start_addr, u16 data_length,
@@ -4360,6 +4539,9 @@ static int bnxt_get_module_info(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
int rc;
+ if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
+ return -EPERM;
+
/* No point in going further if phy status indicates
* module is not inserted or if it is powered down or
* if it is of type 10GBase-T
@@ -4411,6 +4593,9 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
u16 start = eeprom->offset, length = eeprom->len;
int rc = 0;
+ if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
+ return -EPERM;
+
memset(data, 0, eeprom->len);
/* Read A0 portion of the EEPROM */
@@ -4441,6 +4626,11 @@ static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extac
PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
return 0;
+ if (bp->link_info.phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET ||
+ bp->link_info.phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE){
+ NL_SET_ERR_MSG_MOD(extack, "Operation not supported as PHY type is Base-T");
+ return -EOPNOTSUPP;
+ }
switch (bp->link_info.module_status) {
case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
NL_SET_ERR_MSG_MOD(extack, "Transceiver module is powering down");
@@ -4458,13 +4648,19 @@ static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extac
return -EINVAL;
}
-static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
- const struct ethtool_module_eeprom *page_data,
- struct netlink_ext_ack *extack)
+static int
+bnxt_mod_eeprom_by_page_precheck(struct bnxt *bp,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
{
- struct bnxt *bp = netdev_priv(dev);
int rc;
+ if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Module read/write not permitted on untrusted VF");
+ return -EPERM;
+ }
+
rc = bnxt_get_module_status(bp, extack);
if (rc)
return rc;
@@ -4478,6 +4674,19 @@ static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
NL_SET_ERR_MSG_MOD(extack, "Firmware not capable for bank selection");
return -EINVAL;
}
+ return 0;
+}
+
+static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+
+ rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack);
+ if (rc)
+ return rc;
rc = bnxt_read_sfp_module_eeprom_info(bp, page_data->i2c_address << 1,
page_data->page, page_data->bank,
@@ -4491,6 +4700,62 @@ static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
return page_data->length;
}
+static int bnxt_write_sfp_module_eeprom_info(struct bnxt *bp,
+ const struct ethtool_module_eeprom *page)
+{
+ struct hwrm_port_phy_i2c_write_input *req;
+ int bytes_written = 0;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_WRITE);
+ if (rc)
+ return rc;
+
+ hwrm_req_hold(bp, req);
+ req->i2c_slave_addr = page->i2c_address << 1;
+ req->page_number = cpu_to_le16(page->page);
+ req->bank_number = page->bank;
+ req->port_id = cpu_to_le16(bp->pf.port_id);
+ req->enables = cpu_to_le32(PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET |
+ PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER);
+
+ while (bytes_written < page->length) {
+ u16 xfer_size;
+
+ xfer_size = min_t(u16, page->length - bytes_written,
+ BNXT_MAX_PHY_I2C_RESP_SIZE);
+ req->page_offset = cpu_to_le16(page->offset + bytes_written);
+ req->data_length = xfer_size;
+ memcpy(req->data, page->data + bytes_written, xfer_size);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ break;
+ bytes_written += xfer_size;
+ }
+
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+static int bnxt_set_module_eeprom_by_page(struct net_device *dev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+
+ rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack);
+ if (rc)
+ return rc;
+
+ rc = bnxt_write_sfp_module_eeprom_info(bp, page_data);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom write failed");
+ return rc;
+ }
+ return page_data->length;
+}
+
static int bnxt_nway_reset(struct net_device *dev)
{
int rc = 0;
@@ -4762,7 +5027,8 @@ static int bnxt_run_loopback(struct bnxt *bp)
cpr = &rxr->bnapi->cp_ring;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
cpr = rxr->rx_cpr;
- pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
+ pkt_size = min(bp->dev->mtu + ETH_HLEN, max(BNXT_DEFAULT_RX_COPYBREAK,
+ bp->rx_copybreak));
skb = netdev_alloc_skb(bp->dev, pkt_size);
if (!skb)
return -ENOMEM;
@@ -4832,6 +5098,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (!bp->num_tests || !BNXT_PF(bp))
return;
+ memset(buf, 0, sizeof(u64) * bp->num_tests);
if (etest->flags & ETH_TEST_FL_OFFLINE &&
bnxt_ulp_registered(bp->edev)) {
etest->flags |= ETH_TEST_FL_FAILED;
@@ -4839,7 +5106,6 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
return;
}
- memset(buf, 0, sizeof(u64) * bp->num_tests);
if (!netif_running(dev)) {
etest->flags |= ETH_TEST_FL_FAILED;
return;
@@ -4872,35 +5138,44 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
bnxt_close_nic(bp, true, false);
bnxt_run_fw_tests(bp, test_mask, &test_results);
- buf[BNXT_MACLPBK_TEST_IDX] = 1;
- bnxt_hwrm_mac_loopback(bp, true);
- msleep(250);
rc = bnxt_half_open_nic(bp);
if (rc) {
- bnxt_hwrm_mac_loopback(bp, false);
etest->flags |= ETH_TEST_FL_FAILED;
return;
}
+ buf[BNXT_MACLPBK_TEST_IDX] = 1;
+ if (bp->mac_flags & BNXT_MAC_FL_NO_MAC_LPBK)
+ goto skip_mac_loopback;
+
+ bnxt_hwrm_mac_loopback(bp, true);
+ msleep(250);
if (bnxt_run_loopback(bp))
etest->flags |= ETH_TEST_FL_FAILED;
else
buf[BNXT_MACLPBK_TEST_IDX] = 0;
bnxt_hwrm_mac_loopback(bp, false);
+skip_mac_loopback:
+ buf[BNXT_PHYLPBK_TEST_IDX] = 1;
+ if (bp->phy_flags & BNXT_PHY_FL_NO_PHY_LPBK)
+ goto skip_phy_loopback;
+
bnxt_hwrm_phy_loopback(bp, true, false);
msleep(1000);
- if (bnxt_run_loopback(bp)) {
- buf[BNXT_PHYLPBK_TEST_IDX] = 1;
+ if (bnxt_run_loopback(bp))
etest->flags |= ETH_TEST_FL_FAILED;
- }
+ else
+ buf[BNXT_PHYLPBK_TEST_IDX] = 0;
+skip_phy_loopback:
+ buf[BNXT_EXTLPBK_TEST_IDX] = 1;
if (do_ext_lpbk) {
etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
bnxt_hwrm_phy_loopback(bp, true, true);
msleep(1000);
- if (bnxt_run_loopback(bp)) {
- buf[BNXT_EXTLPBK_TEST_IDX] = 1;
+ if (bnxt_run_loopback(bp))
etest->flags |= ETH_TEST_FL_FAILED;
- }
+ else
+ buf[BNXT_EXTLPBK_TEST_IDX] = 0;
}
bnxt_hwrm_phy_loopback(bp, false, false);
bnxt_half_close_nic(bp);
@@ -4978,8 +5253,9 @@ static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
{
struct bnxt *bp = netdev_priv(dev);
- if (dump->flag > BNXT_DUMP_CRASH) {
- netdev_info(dev, "Supports only Live(0) and Crash(1) dumps.\n");
+ if (dump->flag > BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE) {
+ netdev_info(dev,
+ "Supports only Live(0), Crash(1), Driver(2), Live with cached context(3) dumps.\n");
return -EINVAL;
}
@@ -5059,6 +5335,26 @@ static int bnxt_get_ts_info(struct net_device *dev,
return 0;
}
+static void bnxt_hwrm_pcie_qstats(struct bnxt *bp)
+{
+ struct hwrm_pcie_qstats_output *resp;
+ struct hwrm_pcie_qstats_input *req;
+
+ bp->pcie_stat_len = 0;
+ if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
+ return;
+
+ if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS))
+ return;
+
+ resp = hwrm_req_hold(bp, req);
+ if (__bnxt_hwrm_pcie_qstats(bp, req))
+ bp->pcie_stat_len = min_t(u16,
+ le16_to_cpu(resp->pcie_stat_size),
+ sizeof(struct pcie_ctx_hw_stats_v2));
+ hwrm_req_drop(bp, req);
+}
+
void bnxt_ethtool_init(struct bnxt *bp)
{
struct hwrm_selftest_qlist_output *resp;
@@ -5067,6 +5363,7 @@ void bnxt_ethtool_init(struct bnxt *bp)
struct net_device *dev = bp->dev;
int i, rc;
+ bnxt_hwrm_pcie_qstats(bp);
if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER))
bnxt_get_pkgver(dev);
@@ -5294,6 +5591,8 @@ const struct ethtool_ops bnxt_ethtool_ops = {
ETHTOOL_COALESCE_STATS_BLOCK_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
ETHTOOL_COALESCE_USE_CQE,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT |
+ ETHTOOL_RING_USE_HDS_THRS,
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
.get_fec_stats = bnxt_get_fec_stats,
@@ -5320,10 +5619,13 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.set_channels = bnxt_set_channels,
.get_rxnfc = bnxt_get_rxnfc,
.set_rxnfc = bnxt_set_rxnfc,
+ .get_rx_ring_count = bnxt_get_rx_ring_count,
.get_rxfh_indir_size = bnxt_get_rxfh_indir_size,
.get_rxfh_key_size = bnxt_get_rxfh_key_size,
.get_rxfh = bnxt_get_rxfh,
.set_rxfh = bnxt_set_rxfh,
+ .get_rxfh_fields = bnxt_get_rxfh_fields,
+ .set_rxfh_fields = bnxt_set_rxfh_fields,
.create_rxfh_context = bnxt_create_rxfh_context,
.modify_rxfh_context = bnxt_modify_rxfh_context,
.remove_rxfh_context = bnxt_remove_rxfh_context,
@@ -5335,9 +5637,12 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_link_ext_stats = bnxt_get_link_ext_stats,
.get_eee = bnxt_get_eee,
.set_eee = bnxt_set_eee,
+ .get_tunable = bnxt_get_tunable,
+ .set_tunable = bnxt_set_tunable,
.get_module_info = bnxt_get_module_info,
.get_module_eeprom = bnxt_get_module_eeprom,
.get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page,
+ .set_module_eeprom_by_page = bnxt_set_module_eeprom_by_page,
.nway_reset = bnxt_nway_reset,
.set_phys_id = bnxt_set_phys_id,
.self_test = bnxt_self_test,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index e2ee030237d4..33b86ede1ce5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -44,6 +44,7 @@ struct bnxt_led_cfg {
#define BNXT_PXP_REG_LEN 0x3110
#define BNXT_IP_PROTO_FULL_MASK 0xFF
+#define BNXT_IP_PROTO_WILDCARD 0x0
extern const struct ethtool_ops bnxt_ethtool_ops;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
deleted file mode 100644
index f8ef6f1a1964..000000000000
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ /dev/null
@@ -1,10720 +0,0 @@
-/* Broadcom NetXtreme-C/E network driver.
- *
- * Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2014-2018 Broadcom Limited
- * Copyright (c) 2018-2024 Broadcom Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation.
- *
- * DO NOT MODIFY!!! This file is automatically generated.
- */
-
-#ifndef _BNXT_HSI_H_
-#define _BNXT_HSI_H_
-
-/* hwrm_cmd_hdr (size:128b/16B) */
-struct hwrm_cmd_hdr {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_resp_hdr (size:64b/8B) */
-struct hwrm_resp_hdr {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
-};
-
-#define CMD_DISCR_TLV_ENCAP 0x8000UL
-#define CMD_DISCR_LAST CMD_DISCR_TLV_ENCAP
-
-
-#define TLV_TYPE_HWRM_REQUEST 0x1UL
-#define TLV_TYPE_HWRM_RESPONSE 0x2UL
-#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL
-#define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL
-#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL
-#define TLV_TYPE_QUERY_ROCE_CC_GEN2 0x6UL
-#define TLV_TYPE_MODIFY_ROCE_CC_GEN2 0x7UL
-#define TLV_TYPE_ENGINE_CKV_ALIAS_ECC_PUBLIC_KEY 0x8001UL
-#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL
-#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL
-#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL
-#define TLV_TYPE_ENGINE_CKV_HOST_ALGORITHMS 0x8006UL
-#define TLV_TYPE_ENGINE_CKV_HOST_ECC_PUBLIC_KEY 0x8007UL
-#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL
-#define TLV_TYPE_ENGINE_CKV_FW_ECC_PUBLIC_KEY 0x8009UL
-#define TLV_TYPE_ENGINE_CKV_FW_ALGORITHMS 0x800aUL
-#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_FW_ALGORITHMS
-
-
-/* tlv (size:64b/8B) */
-struct tlv {
- __le16 cmd_discr;
- u8 reserved_8b;
- u8 flags;
- #define TLV_FLAGS_MORE 0x1UL
- #define TLV_FLAGS_MORE_LAST 0x0UL
- #define TLV_FLAGS_MORE_NOT_LAST 0x1UL
- #define TLV_FLAGS_REQUIRED 0x2UL
- #define TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
- #define TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
- #define TLV_FLAGS_REQUIRED_LAST TLV_FLAGS_REQUIRED_YES
- __le16 tlv_type;
- __le16 length;
-};
-
-/* input (size:128b/16B) */
-struct input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* output (size:64b/8B) */
-struct output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
-};
-
-/* hwrm_short_input (size:128b/16B) */
-struct hwrm_short_input {
- __le16 req_type;
- __le16 signature;
- #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL
- #define SHORT_REQ_SIGNATURE_LAST SHORT_REQ_SIGNATURE_SHORT_CMD
- __le16 target_id;
- #define SHORT_REQ_TARGET_ID_DEFAULT 0x0UL
- #define SHORT_REQ_TARGET_ID_TOOLS 0xfffdUL
- #define SHORT_REQ_TARGET_ID_LAST SHORT_REQ_TARGET_ID_TOOLS
- __le16 size;
- __le64 req_addr;
-};
-
-/* cmd_nums (size:64b/8B) */
-struct cmd_nums {
- __le16 req_type;
- #define HWRM_VER_GET 0x0UL
- #define HWRM_FUNC_ECHO_RESPONSE 0xbUL
- #define HWRM_ERROR_RECOVERY_QCFG 0xcUL
- #define HWRM_FUNC_DRV_IF_CHANGE 0xdUL
- #define HWRM_FUNC_BUF_UNRGTR 0xeUL
- #define HWRM_FUNC_VF_CFG 0xfUL
- #define HWRM_RESERVED1 0x10UL
- #define HWRM_FUNC_RESET 0x11UL
- #define HWRM_FUNC_GETFID 0x12UL
- #define HWRM_FUNC_VF_ALLOC 0x13UL
- #define HWRM_FUNC_VF_FREE 0x14UL
- #define HWRM_FUNC_QCAPS 0x15UL
- #define HWRM_FUNC_QCFG 0x16UL
- #define HWRM_FUNC_CFG 0x17UL
- #define HWRM_FUNC_QSTATS 0x18UL
- #define HWRM_FUNC_CLR_STATS 0x19UL
- #define HWRM_FUNC_DRV_UNRGTR 0x1aUL
- #define HWRM_FUNC_VF_RESC_FREE 0x1bUL
- #define HWRM_FUNC_VF_VNIC_IDS_QUERY 0x1cUL
- #define HWRM_FUNC_DRV_RGTR 0x1dUL
- #define HWRM_FUNC_DRV_QVER 0x1eUL
- #define HWRM_FUNC_BUF_RGTR 0x1fUL
- #define HWRM_PORT_PHY_CFG 0x20UL
- #define HWRM_PORT_MAC_CFG 0x21UL
- #define HWRM_PORT_TS_QUERY 0x22UL
- #define HWRM_PORT_QSTATS 0x23UL
- #define HWRM_PORT_LPBK_QSTATS 0x24UL
- #define HWRM_PORT_CLR_STATS 0x25UL
- #define HWRM_PORT_LPBK_CLR_STATS 0x26UL
- #define HWRM_PORT_PHY_QCFG 0x27UL
- #define HWRM_PORT_MAC_QCFG 0x28UL
- #define HWRM_PORT_MAC_PTP_QCFG 0x29UL
- #define HWRM_PORT_PHY_QCAPS 0x2aUL
- #define HWRM_PORT_PHY_I2C_WRITE 0x2bUL
- #define HWRM_PORT_PHY_I2C_READ 0x2cUL
- #define HWRM_PORT_LED_CFG 0x2dUL
- #define HWRM_PORT_LED_QCFG 0x2eUL
- #define HWRM_PORT_LED_QCAPS 0x2fUL
- #define HWRM_QUEUE_QPORTCFG 0x30UL
- #define HWRM_QUEUE_QCFG 0x31UL
- #define HWRM_QUEUE_CFG 0x32UL
- #define HWRM_FUNC_VLAN_CFG 0x33UL
- #define HWRM_FUNC_VLAN_QCFG 0x34UL
- #define HWRM_QUEUE_PFCENABLE_QCFG 0x35UL
- #define HWRM_QUEUE_PFCENABLE_CFG 0x36UL
- #define HWRM_QUEUE_PRI2COS_QCFG 0x37UL
- #define HWRM_QUEUE_PRI2COS_CFG 0x38UL
- #define HWRM_QUEUE_COS2BW_QCFG 0x39UL
- #define HWRM_QUEUE_COS2BW_CFG 0x3aUL
- #define HWRM_QUEUE_DSCP_QCAPS 0x3bUL
- #define HWRM_QUEUE_DSCP2PRI_QCFG 0x3cUL
- #define HWRM_QUEUE_DSCP2PRI_CFG 0x3dUL
- #define HWRM_VNIC_ALLOC 0x40UL
- #define HWRM_VNIC_FREE 0x41UL
- #define HWRM_VNIC_CFG 0x42UL
- #define HWRM_VNIC_QCFG 0x43UL
- #define HWRM_VNIC_TPA_CFG 0x44UL
- #define HWRM_VNIC_TPA_QCFG 0x45UL
- #define HWRM_VNIC_RSS_CFG 0x46UL
- #define HWRM_VNIC_RSS_QCFG 0x47UL
- #define HWRM_VNIC_PLCMODES_CFG 0x48UL
- #define HWRM_VNIC_PLCMODES_QCFG 0x49UL
- #define HWRM_VNIC_QCAPS 0x4aUL
- #define HWRM_VNIC_UPDATE 0x4bUL
- #define HWRM_RING_ALLOC 0x50UL
- #define HWRM_RING_FREE 0x51UL
- #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL
- #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL
- #define HWRM_RING_AGGINT_QCAPS 0x54UL
- #define HWRM_RING_SCHQ_ALLOC 0x55UL
- #define HWRM_RING_SCHQ_CFG 0x56UL
- #define HWRM_RING_SCHQ_FREE 0x57UL
- #define HWRM_RING_RESET 0x5eUL
- #define HWRM_RING_GRP_ALLOC 0x60UL
- #define HWRM_RING_GRP_FREE 0x61UL
- #define HWRM_RING_CFG 0x62UL
- #define HWRM_RING_QCFG 0x63UL
- #define HWRM_RESERVED5 0x64UL
- #define HWRM_RESERVED6 0x65UL
- #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL
- #define HWRM_VNIC_RSS_COS_LB_CTX_FREE 0x71UL
- #define HWRM_QUEUE_MPLS_QCAPS 0x80UL
- #define HWRM_QUEUE_MPLSTC2PRI_QCFG 0x81UL
- #define HWRM_QUEUE_MPLSTC2PRI_CFG 0x82UL
- #define HWRM_QUEUE_VLANPRI_QCAPS 0x83UL
- #define HWRM_QUEUE_VLANPRI2PRI_QCFG 0x84UL
- #define HWRM_QUEUE_VLANPRI2PRI_CFG 0x85UL
- #define HWRM_QUEUE_GLOBAL_CFG 0x86UL
- #define HWRM_QUEUE_GLOBAL_QCFG 0x87UL
- #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG 0x88UL
- #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG 0x89UL
- #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG 0x8aUL
- #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG 0x8bUL
- #define HWRM_QUEUE_QCAPS 0x8cUL
- #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_QCFG 0x8dUL
- #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG 0x8eUL
- #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_QCFG 0x8fUL
- #define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
- #define HWRM_CFA_L2_FILTER_FREE 0x91UL
- #define HWRM_CFA_L2_FILTER_CFG 0x92UL
- #define HWRM_CFA_L2_SET_RX_MASK 0x93UL
- #define HWRM_CFA_VLAN_ANTISPOOF_CFG 0x94UL
- #define HWRM_CFA_TUNNEL_FILTER_ALLOC 0x95UL
- #define HWRM_CFA_TUNNEL_FILTER_FREE 0x96UL
- #define HWRM_CFA_ENCAP_RECORD_ALLOC 0x97UL
- #define HWRM_CFA_ENCAP_RECORD_FREE 0x98UL
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC 0x99UL
- #define HWRM_CFA_NTUPLE_FILTER_FREE 0x9aUL
- #define HWRM_CFA_NTUPLE_FILTER_CFG 0x9bUL
- #define HWRM_CFA_EM_FLOW_ALLOC 0x9cUL
- #define HWRM_CFA_EM_FLOW_FREE 0x9dUL
- #define HWRM_CFA_EM_FLOW_CFG 0x9eUL
- #define HWRM_TUNNEL_DST_PORT_QUERY 0xa0UL
- #define HWRM_TUNNEL_DST_PORT_ALLOC 0xa1UL
- #define HWRM_TUNNEL_DST_PORT_FREE 0xa2UL
- #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG 0xa3UL
- #define HWRM_STAT_CTX_ENG_QUERY 0xafUL
- #define HWRM_STAT_CTX_ALLOC 0xb0UL
- #define HWRM_STAT_CTX_FREE 0xb1UL
- #define HWRM_STAT_CTX_QUERY 0xb2UL
- #define HWRM_STAT_CTX_CLR_STATS 0xb3UL
- #define HWRM_PORT_QSTATS_EXT 0xb4UL
- #define HWRM_PORT_PHY_MDIO_WRITE 0xb5UL
- #define HWRM_PORT_PHY_MDIO_READ 0xb6UL
- #define HWRM_PORT_PHY_MDIO_BUS_ACQUIRE 0xb7UL
- #define HWRM_PORT_PHY_MDIO_BUS_RELEASE 0xb8UL
- #define HWRM_PORT_QSTATS_EXT_PFC_WD 0xb9UL
- #define HWRM_RESERVED7 0xbaUL
- #define HWRM_PORT_TX_FIR_CFG 0xbbUL
- #define HWRM_PORT_TX_FIR_QCFG 0xbcUL
- #define HWRM_PORT_ECN_QSTATS 0xbdUL
- #define HWRM_FW_LIVEPATCH_QUERY 0xbeUL
- #define HWRM_FW_LIVEPATCH 0xbfUL
- #define HWRM_FW_RESET 0xc0UL
- #define HWRM_FW_QSTATUS 0xc1UL
- #define HWRM_FW_HEALTH_CHECK 0xc2UL
- #define HWRM_FW_SYNC 0xc3UL
- #define HWRM_FW_STATE_QCAPS 0xc4UL
- #define HWRM_FW_STATE_QUIESCE 0xc5UL
- #define HWRM_FW_STATE_BACKUP 0xc6UL
- #define HWRM_FW_STATE_RESTORE 0xc7UL
- #define HWRM_FW_SET_TIME 0xc8UL
- #define HWRM_FW_GET_TIME 0xc9UL
- #define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL
- #define HWRM_FW_GET_STRUCTURED_DATA 0xcbUL
- #define HWRM_FW_IPC_MAILBOX 0xccUL
- #define HWRM_FW_ECN_CFG 0xcdUL
- #define HWRM_FW_ECN_QCFG 0xceUL
- #define HWRM_FW_SECURE_CFG 0xcfUL
- #define HWRM_EXEC_FWD_RESP 0xd0UL
- #define HWRM_REJECT_FWD_RESP 0xd1UL
- #define HWRM_FWD_RESP 0xd2UL
- #define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL
- #define HWRM_OEM_CMD 0xd4UL
- #define HWRM_PORT_PRBS_TEST 0xd5UL
- #define HWRM_PORT_SFP_SIDEBAND_CFG 0xd6UL
- #define HWRM_PORT_SFP_SIDEBAND_QCFG 0xd7UL
- #define HWRM_FW_STATE_UNQUIESCE 0xd8UL
- #define HWRM_PORT_DSC_DUMP 0xd9UL
- #define HWRM_PORT_EP_TX_QCFG 0xdaUL
- #define HWRM_PORT_EP_TX_CFG 0xdbUL
- #define HWRM_PORT_CFG 0xdcUL
- #define HWRM_PORT_QCFG 0xddUL
- #define HWRM_PORT_MAC_QCAPS 0xdfUL
- #define HWRM_TEMP_MONITOR_QUERY 0xe0UL
- #define HWRM_REG_POWER_QUERY 0xe1UL
- #define HWRM_CORE_FREQUENCY_QUERY 0xe2UL
- #define HWRM_REG_POWER_HISTOGRAM 0xe3UL
- #define HWRM_WOL_FILTER_ALLOC 0xf0UL
- #define HWRM_WOL_FILTER_FREE 0xf1UL
- #define HWRM_WOL_FILTER_QCFG 0xf2UL
- #define HWRM_WOL_REASON_QCFG 0xf3UL
- #define HWRM_CFA_METER_QCAPS 0xf4UL
- #define HWRM_CFA_METER_PROFILE_ALLOC 0xf5UL
- #define HWRM_CFA_METER_PROFILE_FREE 0xf6UL
- #define HWRM_CFA_METER_PROFILE_CFG 0xf7UL
- #define HWRM_CFA_METER_INSTANCE_ALLOC 0xf8UL
- #define HWRM_CFA_METER_INSTANCE_FREE 0xf9UL
- #define HWRM_CFA_METER_INSTANCE_CFG 0xfaUL
- #define HWRM_CFA_VFR_ALLOC 0xfdUL
- #define HWRM_CFA_VFR_FREE 0xfeUL
- #define HWRM_CFA_VF_PAIR_ALLOC 0x100UL
- #define HWRM_CFA_VF_PAIR_FREE 0x101UL
- #define HWRM_CFA_VF_PAIR_INFO 0x102UL
- #define HWRM_CFA_FLOW_ALLOC 0x103UL
- #define HWRM_CFA_FLOW_FREE 0x104UL
- #define HWRM_CFA_FLOW_FLUSH 0x105UL
- #define HWRM_CFA_FLOW_STATS 0x106UL
- #define HWRM_CFA_FLOW_INFO 0x107UL
- #define HWRM_CFA_DECAP_FILTER_ALLOC 0x108UL
- #define HWRM_CFA_DECAP_FILTER_FREE 0x109UL
- #define HWRM_CFA_VLAN_ANTISPOOF_QCFG 0x10aUL
- #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC 0x10bUL
- #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE 0x10cUL
- #define HWRM_CFA_PAIR_ALLOC 0x10dUL
- #define HWRM_CFA_PAIR_FREE 0x10eUL
- #define HWRM_CFA_PAIR_INFO 0x10fUL
- #define HWRM_FW_IPC_MSG 0x110UL
- #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL
- #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE 0x112UL
- #define HWRM_CFA_FLOW_AGING_TIMER_RESET 0x113UL
- #define HWRM_CFA_FLOW_AGING_CFG 0x114UL
- #define HWRM_CFA_FLOW_AGING_QCFG 0x115UL
- #define HWRM_CFA_FLOW_AGING_QCAPS 0x116UL
- #define HWRM_CFA_CTX_MEM_RGTR 0x117UL
- #define HWRM_CFA_CTX_MEM_UNRGTR 0x118UL
- #define HWRM_CFA_CTX_MEM_QCTX 0x119UL
- #define HWRM_CFA_CTX_MEM_QCAPS 0x11aUL
- #define HWRM_CFA_COUNTER_QCAPS 0x11bUL
- #define HWRM_CFA_COUNTER_CFG 0x11cUL
- #define HWRM_CFA_COUNTER_QCFG 0x11dUL
- #define HWRM_CFA_COUNTER_QSTATS 0x11eUL
- #define HWRM_CFA_TCP_FLAG_PROCESS_QCFG 0x11fUL
- #define HWRM_CFA_EEM_QCAPS 0x120UL
- #define HWRM_CFA_EEM_CFG 0x121UL
- #define HWRM_CFA_EEM_QCFG 0x122UL
- #define HWRM_CFA_EEM_OP 0x123UL
- #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL
- #define HWRM_CFA_TFLIB 0x125UL
- #define HWRM_CFA_LAG_GROUP_MEMBER_RGTR 0x126UL
- #define HWRM_CFA_LAG_GROUP_MEMBER_UNRGTR 0x127UL
- #define HWRM_CFA_TLS_FILTER_ALLOC 0x128UL
- #define HWRM_CFA_TLS_FILTER_FREE 0x129UL
- #define HWRM_CFA_RELEASE_AFM_FUNC 0x12aUL
- #define HWRM_ENGINE_CKV_STATUS 0x12eUL
- #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
- #define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL
- #define HWRM_ENGINE_CKV_KEY_ADD 0x131UL
- #define HWRM_ENGINE_CKV_KEY_DELETE 0x132UL
- #define HWRM_ENGINE_CKV_FLUSH 0x133UL
- #define HWRM_ENGINE_CKV_RNG_GET 0x134UL
- #define HWRM_ENGINE_CKV_KEY_GEN 0x135UL
- #define HWRM_ENGINE_CKV_KEY_LABEL_CFG 0x136UL
- #define HWRM_ENGINE_CKV_KEY_LABEL_QCFG 0x137UL
- #define HWRM_ENGINE_QG_CONFIG_QUERY 0x13cUL
- #define HWRM_ENGINE_QG_QUERY 0x13dUL
- #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL
- #define HWRM_ENGINE_QG_METER_PROFILE_QUERY 0x13fUL
- #define HWRM_ENGINE_QG_METER_PROFILE_ALLOC 0x140UL
- #define HWRM_ENGINE_QG_METER_PROFILE_FREE 0x141UL
- #define HWRM_ENGINE_QG_METER_QUERY 0x142UL
- #define HWRM_ENGINE_QG_METER_BIND 0x143UL
- #define HWRM_ENGINE_QG_METER_UNBIND 0x144UL
- #define HWRM_ENGINE_QG_FUNC_BIND 0x145UL
- #define HWRM_ENGINE_SG_CONFIG_QUERY 0x146UL
- #define HWRM_ENGINE_SG_QUERY 0x147UL
- #define HWRM_ENGINE_SG_METER_QUERY 0x148UL
- #define HWRM_ENGINE_SG_METER_CONFIG 0x149UL
- #define HWRM_ENGINE_SG_QG_BIND 0x14aUL
- #define HWRM_ENGINE_QG_SG_UNBIND 0x14bUL
- #define HWRM_ENGINE_CONFIG_QUERY 0x154UL
- #define HWRM_ENGINE_STATS_CONFIG 0x155UL
- #define HWRM_ENGINE_STATS_CLEAR 0x156UL
- #define HWRM_ENGINE_STATS_QUERY 0x157UL
- #define HWRM_ENGINE_STATS_QUERY_CONTINUOUS_ERROR 0x158UL
- #define HWRM_ENGINE_RQ_ALLOC 0x15eUL
- #define HWRM_ENGINE_RQ_FREE 0x15fUL
- #define HWRM_ENGINE_CQ_ALLOC 0x160UL
- #define HWRM_ENGINE_CQ_FREE 0x161UL
- #define HWRM_ENGINE_NQ_ALLOC 0x162UL
- #define HWRM_ENGINE_NQ_FREE 0x163UL
- #define HWRM_ENGINE_ON_DIE_RQE_CREDITS 0x164UL
- #define HWRM_ENGINE_FUNC_QCFG 0x165UL
- #define HWRM_FUNC_RESOURCE_QCAPS 0x190UL
- #define HWRM_FUNC_VF_RESOURCE_CFG 0x191UL
- #define HWRM_FUNC_BACKING_STORE_QCAPS 0x192UL
- #define HWRM_FUNC_BACKING_STORE_CFG 0x193UL
- #define HWRM_FUNC_BACKING_STORE_QCFG 0x194UL
- #define HWRM_FUNC_VF_BW_CFG 0x195UL
- #define HWRM_FUNC_VF_BW_QCFG 0x196UL
- #define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL
- #define HWRM_FUNC_QSTATS_EXT 0x198UL
- #define HWRM_STAT_EXT_CTX_QUERY 0x199UL
- #define HWRM_FUNC_SPD_CFG 0x19aUL
- #define HWRM_FUNC_SPD_QCFG 0x19bUL
- #define HWRM_FUNC_PTP_PIN_QCFG 0x19cUL
- #define HWRM_FUNC_PTP_PIN_CFG 0x19dUL
- #define HWRM_FUNC_PTP_CFG 0x19eUL
- #define HWRM_FUNC_PTP_TS_QUERY 0x19fUL
- #define HWRM_FUNC_PTP_EXT_CFG 0x1a0UL
- #define HWRM_FUNC_PTP_EXT_QCFG 0x1a1UL
- #define HWRM_FUNC_KEY_CTX_ALLOC 0x1a2UL
- #define HWRM_FUNC_BACKING_STORE_CFG_V2 0x1a3UL
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2 0x1a4UL
- #define HWRM_FUNC_DBR_PACING_CFG 0x1a5UL
- #define HWRM_FUNC_DBR_PACING_QCFG 0x1a6UL
- #define HWRM_FUNC_DBR_PACING_BROADCAST_EVENT 0x1a7UL
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2 0x1a8UL
- #define HWRM_FUNC_DBR_PACING_NQLIST_QUERY 0x1a9UL
- #define HWRM_FUNC_DBR_RECOVERY_COMPLETED 0x1aaUL
- #define HWRM_FUNC_SYNCE_CFG 0x1abUL
- #define HWRM_FUNC_SYNCE_QCFG 0x1acUL
- #define HWRM_FUNC_KEY_CTX_FREE 0x1adUL
- #define HWRM_FUNC_LAG_MODE_CFG 0x1aeUL
- #define HWRM_FUNC_LAG_MODE_QCFG 0x1afUL
- #define HWRM_FUNC_LAG_CREATE 0x1b0UL
- #define HWRM_FUNC_LAG_UPDATE 0x1b1UL
- #define HWRM_FUNC_LAG_FREE 0x1b2UL
- #define HWRM_FUNC_LAG_QCFG 0x1b3UL
- #define HWRM_FUNC_TIMEDTX_PACING_RATE_ADD 0x1c2UL
- #define HWRM_FUNC_TIMEDTX_PACING_RATE_DELETE 0x1c3UL
- #define HWRM_FUNC_TIMEDTX_PACING_RATE_QUERY 0x1c4UL
- #define HWRM_SELFTEST_QLIST 0x200UL
- #define HWRM_SELFTEST_EXEC 0x201UL
- #define HWRM_SELFTEST_IRQ 0x202UL
- #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL
- #define HWRM_PCIE_QSTATS 0x204UL
- #define HWRM_MFG_FRU_WRITE_CONTROL 0x205UL
- #define HWRM_MFG_TIMERS_QUERY 0x206UL
- #define HWRM_MFG_OTP_CFG 0x207UL
- #define HWRM_MFG_OTP_QCFG 0x208UL
- #define HWRM_MFG_HDMA_TEST 0x209UL
- #define HWRM_MFG_FRU_EEPROM_WRITE 0x20aUL
- #define HWRM_MFG_FRU_EEPROM_READ 0x20bUL
- #define HWRM_MFG_SOC_IMAGE 0x20cUL
- #define HWRM_MFG_SOC_QSTATUS 0x20dUL
- #define HWRM_MFG_PARAM_CRITICAL_DATA_FINALIZE 0x20eUL
- #define HWRM_MFG_PARAM_CRITICAL_DATA_READ 0x20fUL
- #define HWRM_MFG_PARAM_CRITICAL_DATA_HEALTH 0x210UL
- #define HWRM_MFG_PRVSN_EXPORT_CSR 0x211UL
- #define HWRM_MFG_PRVSN_IMPORT_CERT 0x212UL
- #define HWRM_MFG_PRVSN_GET_STATE 0x213UL
- #define HWRM_MFG_GET_NVM_MEASUREMENT 0x214UL
- #define HWRM_MFG_PSOC_QSTATUS 0x215UL
- #define HWRM_MFG_SELFTEST_QLIST 0x216UL
- #define HWRM_MFG_SELFTEST_EXEC 0x217UL
- #define HWRM_STAT_GENERIC_QSTATS 0x218UL
- #define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL
- #define HWRM_STAT_DB_ERROR_QSTATS 0x21aUL
- #define HWRM_MFG_TESTS 0x21bUL
- #define HWRM_PORT_POE_CFG 0x230UL
- #define HWRM_PORT_POE_QCFG 0x231UL
- #define HWRM_UDCC_QCAPS 0x258UL
- #define HWRM_UDCC_CFG 0x259UL
- #define HWRM_UDCC_QCFG 0x25aUL
- #define HWRM_UDCC_SESSION_CFG 0x25bUL
- #define HWRM_UDCC_SESSION_QCFG 0x25cUL
- #define HWRM_UDCC_SESSION_QUERY 0x25dUL
- #define HWRM_UDCC_COMP_CFG 0x25eUL
- #define HWRM_UDCC_COMP_QCFG 0x25fUL
- #define HWRM_UDCC_COMP_QUERY 0x260UL
- #define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS 0x261UL
- #define HWRM_QUEUE_PFCWD_TIMEOUT_CFG 0x262UL
- #define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG 0x263UL
- #define HWRM_TF 0x2bcUL
- #define HWRM_TF_VERSION_GET 0x2bdUL
- #define HWRM_TF_SESSION_OPEN 0x2c6UL
- #define HWRM_TF_SESSION_REGISTER 0x2c8UL
- #define HWRM_TF_SESSION_UNREGISTER 0x2c9UL
- #define HWRM_TF_SESSION_CLOSE 0x2caUL
- #define HWRM_TF_SESSION_QCFG 0x2cbUL
- #define HWRM_TF_SESSION_RESC_QCAPS 0x2ccUL
- #define HWRM_TF_SESSION_RESC_ALLOC 0x2cdUL
- #define HWRM_TF_SESSION_RESC_FREE 0x2ceUL
- #define HWRM_TF_SESSION_RESC_FLUSH 0x2cfUL
- #define HWRM_TF_SESSION_RESC_INFO 0x2d0UL
- #define HWRM_TF_SESSION_HOTUP_STATE_SET 0x2d1UL
- #define HWRM_TF_SESSION_HOTUP_STATE_GET 0x2d2UL
- #define HWRM_TF_TBL_TYPE_GET 0x2daUL
- #define HWRM_TF_TBL_TYPE_SET 0x2dbUL
- #define HWRM_TF_TBL_TYPE_BULK_GET 0x2dcUL
- #define HWRM_TF_EM_INSERT 0x2eaUL
- #define HWRM_TF_EM_DELETE 0x2ebUL
- #define HWRM_TF_EM_HASH_INSERT 0x2ecUL
- #define HWRM_TF_EM_MOVE 0x2edUL
- #define HWRM_TF_TCAM_SET 0x2f8UL
- #define HWRM_TF_TCAM_GET 0x2f9UL
- #define HWRM_TF_TCAM_MOVE 0x2faUL
- #define HWRM_TF_TCAM_FREE 0x2fbUL
- #define HWRM_TF_GLOBAL_CFG_SET 0x2fcUL
- #define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL
- #define HWRM_TF_IF_TBL_SET 0x2feUL
- #define HWRM_TF_IF_TBL_GET 0x2ffUL
- #define HWRM_TF_RESC_USAGE_SET 0x300UL
- #define HWRM_TF_RESC_USAGE_QUERY 0x301UL
- #define HWRM_TF_TBL_TYPE_ALLOC 0x302UL
- #define HWRM_TF_TBL_TYPE_FREE 0x303UL
- #define HWRM_TFC_TBL_SCOPE_QCAPS 0x380UL
- #define HWRM_TFC_TBL_SCOPE_ID_ALLOC 0x381UL
- #define HWRM_TFC_TBL_SCOPE_CONFIG 0x382UL
- #define HWRM_TFC_TBL_SCOPE_DECONFIG 0x383UL
- #define HWRM_TFC_TBL_SCOPE_FID_ADD 0x384UL
- #define HWRM_TFC_TBL_SCOPE_FID_REM 0x385UL
- #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC 0x386UL
- #define HWRM_TFC_TBL_SCOPE_POOL_FREE 0x387UL
- #define HWRM_TFC_SESSION_ID_ALLOC 0x388UL
- #define HWRM_TFC_SESSION_FID_ADD 0x389UL
- #define HWRM_TFC_SESSION_FID_REM 0x38aUL
- #define HWRM_TFC_IDENT_ALLOC 0x38bUL
- #define HWRM_TFC_IDENT_FREE 0x38cUL
- #define HWRM_TFC_IDX_TBL_ALLOC 0x38dUL
- #define HWRM_TFC_IDX_TBL_ALLOC_SET 0x38eUL
- #define HWRM_TFC_IDX_TBL_SET 0x38fUL
- #define HWRM_TFC_IDX_TBL_GET 0x390UL
- #define HWRM_TFC_IDX_TBL_FREE 0x391UL
- #define HWRM_TFC_GLOBAL_ID_ALLOC 0x392UL
- #define HWRM_TFC_TCAM_SET 0x393UL
- #define HWRM_TFC_TCAM_GET 0x394UL
- #define HWRM_TFC_TCAM_ALLOC 0x395UL
- #define HWRM_TFC_TCAM_ALLOC_SET 0x396UL
- #define HWRM_TFC_TCAM_FREE 0x397UL
- #define HWRM_TFC_IF_TBL_SET 0x398UL
- #define HWRM_TFC_IF_TBL_GET 0x399UL
- #define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL
- #define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL
- #define HWRM_SV 0x400UL
- #define HWRM_DBG_SERDES_TEST 0xff0eUL
- #define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL
- #define HWRM_DBG_READ_DIRECT 0xff10UL
- #define HWRM_DBG_READ_INDIRECT 0xff11UL
- #define HWRM_DBG_WRITE_DIRECT 0xff12UL
- #define HWRM_DBG_WRITE_INDIRECT 0xff13UL
- #define HWRM_DBG_DUMP 0xff14UL
- #define HWRM_DBG_ERASE_NVM 0xff15UL
- #define HWRM_DBG_CFG 0xff16UL
- #define HWRM_DBG_COREDUMP_LIST 0xff17UL
- #define HWRM_DBG_COREDUMP_INITIATE 0xff18UL
- #define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL
- #define HWRM_DBG_FW_CLI 0xff1aUL
- #define HWRM_DBG_I2C_CMD 0xff1bUL
- #define HWRM_DBG_RING_INFO_GET 0xff1cUL
- #define HWRM_DBG_CRASHDUMP_HEADER 0xff1dUL
- #define HWRM_DBG_CRASHDUMP_ERASE 0xff1eUL
- #define HWRM_DBG_DRV_TRACE 0xff1fUL
- #define HWRM_DBG_QCAPS 0xff20UL
- #define HWRM_DBG_QCFG 0xff21UL
- #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG 0xff22UL
- #define HWRM_DBG_USEQ_ALLOC 0xff23UL
- #define HWRM_DBG_USEQ_FREE 0xff24UL
- #define HWRM_DBG_USEQ_FLUSH 0xff25UL
- #define HWRM_DBG_USEQ_QCAPS 0xff26UL
- #define HWRM_DBG_USEQ_CW_CFG 0xff27UL
- #define HWRM_DBG_USEQ_SCHED_CFG 0xff28UL
- #define HWRM_DBG_USEQ_RUN 0xff29UL
- #define HWRM_DBG_USEQ_DELIVERY_REQ 0xff2aUL
- #define HWRM_DBG_USEQ_RESP_HDR 0xff2bUL
- #define HWRM_DBG_COREDUMP_CAPTURE 0xff2cUL
- #define HWRM_DBG_PTRACE 0xff2dUL
- #define HWRM_DBG_SIM_CABLE_STATE 0xff2eUL
- #define HWRM_NVM_GET_VPD_FIELD_INFO 0xffeaUL
- #define HWRM_NVM_SET_VPD_FIELD_INFO 0xffebUL
- #define HWRM_NVM_DEFRAG 0xffecUL
- #define HWRM_NVM_REQ_ARBITRATION 0xffedUL
- #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
- #define HWRM_NVM_VALIDATE_OPTION 0xffefUL
- #define HWRM_NVM_FLUSH 0xfff0UL
- #define HWRM_NVM_GET_VARIABLE 0xfff1UL
- #define HWRM_NVM_SET_VARIABLE 0xfff2UL
- #define HWRM_NVM_INSTALL_UPDATE 0xfff3UL
- #define HWRM_NVM_MODIFY 0xfff4UL
- #define HWRM_NVM_VERIFY_UPDATE 0xfff5UL
- #define HWRM_NVM_GET_DEV_INFO 0xfff6UL
- #define HWRM_NVM_ERASE_DIR_ENTRY 0xfff7UL
- #define HWRM_NVM_MOD_DIR_ENTRY 0xfff8UL
- #define HWRM_NVM_FIND_DIR_ENTRY 0xfff9UL
- #define HWRM_NVM_GET_DIR_ENTRIES 0xfffaUL
- #define HWRM_NVM_GET_DIR_INFO 0xfffbUL
- #define HWRM_NVM_RAW_DUMP 0xfffcUL
- #define HWRM_NVM_READ 0xfffdUL
- #define HWRM_NVM_WRITE 0xfffeUL
- #define HWRM_NVM_RAW_WRITE_BLK 0xffffUL
- #define HWRM_LAST HWRM_NVM_RAW_WRITE_BLK
- __le16 unused_0[3];
-};
-
-/* ret_codes (size:64b/8B) */
-struct ret_codes {
- __le16 error_code;
- #define HWRM_ERR_CODE_SUCCESS 0x0UL
- #define HWRM_ERR_CODE_FAIL 0x1UL
- #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL
- #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL
- #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL
- #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL
- #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
- #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
- #define HWRM_ERR_CODE_NO_BUFFER 0x8UL
- #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL
- #define HWRM_ERR_CODE_HOT_RESET_PROGRESS 0xaUL
- #define HWRM_ERR_CODE_HOT_RESET_FAIL 0xbUL
- #define HWRM_ERR_CODE_NO_FLOW_COUNTER_DURING_ALLOC 0xcUL
- #define HWRM_ERR_CODE_KEY_HASH_COLLISION 0xdUL
- #define HWRM_ERR_CODE_KEY_ALREADY_EXISTS 0xeUL
- #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
- #define HWRM_ERR_CODE_BUSY 0x10UL
- #define HWRM_ERR_CODE_RESOURCE_LOCKED 0x11UL
- #define HWRM_ERR_CODE_PF_UNAVAILABLE 0x12UL
- #define HWRM_ERR_CODE_ENTITY_NOT_PRESENT 0x13UL
- #define HWRM_ERR_CODE_SECURE_SOC_ERROR 0x14UL
- #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
- #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
- #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
- #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED
- __le16 unused_0[3];
-};
-
-/* hwrm_err_output (size:128b/16B) */
-struct hwrm_err_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 opaque_0;
- __le16 opaque_1;
- u8 cmd_err;
- u8 valid;
-};
-#define HWRM_NA_SIGNATURE ((__le32)(-1))
-#define HWRM_MAX_REQ_LEN 128
-#define HWRM_MAX_RESP_LEN 704
-#define HW_HASH_INDEX_SIZE 0x80
-#define HW_HASH_KEY_SIZE 40
-#define HWRM_RESP_VALID_KEY 1
-#define HWRM_TARGET_ID_BONO 0xFFF8
-#define HWRM_TARGET_ID_KONG 0xFFF9
-#define HWRM_TARGET_ID_APE 0xFFFA
-#define HWRM_TARGET_ID_TOOLS 0xFFFD
-#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 10
-#define HWRM_VERSION_UPDATE 3
-#define HWRM_VERSION_RSVD 68
-#define HWRM_VERSION_STR "1.10.3.68"
-
-/* hwrm_ver_get_input (size:192b/24B) */
-struct hwrm_ver_get_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 hwrm_intf_maj;
- u8 hwrm_intf_min;
- u8 hwrm_intf_upd;
- u8 unused_0[5];
-};
-
-/* hwrm_ver_get_output (size:1408b/176B) */
-struct hwrm_ver_get_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 hwrm_intf_maj_8b;
- u8 hwrm_intf_min_8b;
- u8 hwrm_intf_upd_8b;
- u8 hwrm_intf_rsvd_8b;
- u8 hwrm_fw_maj_8b;
- u8 hwrm_fw_min_8b;
- u8 hwrm_fw_bld_8b;
- u8 hwrm_fw_rsvd_8b;
- u8 mgmt_fw_maj_8b;
- u8 mgmt_fw_min_8b;
- u8 mgmt_fw_bld_8b;
- u8 mgmt_fw_rsvd_8b;
- u8 netctrl_fw_maj_8b;
- u8 netctrl_fw_min_8b;
- u8 netctrl_fw_bld_8b;
- u8 netctrl_fw_rsvd_8b;
- __le32 dev_caps_cfg;
- #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
- #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
- #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
- #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
- #define VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED 0x10UL
- #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED 0x20UL
- #define VER_GET_RESP_DEV_CAPS_CFG_L2_FILTER_TYPES_ROCE_OR_L2_SUPPORTED 0x40UL
- #define VER_GET_RESP_DEV_CAPS_CFG_VIRTIO_VSWITCH_OFFLOAD_SUPPORTED 0x80UL
- #define VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED 0x100UL
- #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_AGING_SUPPORTED 0x200UL
- #define VER_GET_RESP_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED 0x400UL
- #define VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED 0x800UL
- #define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL
- #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL
- #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL
- #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE 0x8000UL
- #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_SOC_CAPABLE 0x10000UL
- u8 roce_fw_maj_8b;
- u8 roce_fw_min_8b;
- u8 roce_fw_bld_8b;
- u8 roce_fw_rsvd_8b;
- char hwrm_fw_name[16];
- char mgmt_fw_name[16];
- char netctrl_fw_name[16];
- char active_pkg_name[16];
- char roce_fw_name[16];
- __le16 chip_num;
- u8 chip_rev;
- u8 chip_metal;
- u8 chip_bond_id;
- u8 chip_platform_type;
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_LAST VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM
- __le16 max_req_win_len;
- __le16 max_resp_len;
- __le16 def_req_timeout;
- u8 flags;
- #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL
- #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL
- #define VER_GET_RESP_FLAGS_DEV_NOT_RDY_BACKING_STORE 0x4UL
- u8 unused_0[2];
- u8 always_1;
- __le16 hwrm_intf_major;
- __le16 hwrm_intf_minor;
- __le16 hwrm_intf_build;
- __le16 hwrm_intf_patch;
- __le16 hwrm_fw_major;
- __le16 hwrm_fw_minor;
- __le16 hwrm_fw_build;
- __le16 hwrm_fw_patch;
- __le16 mgmt_fw_major;
- __le16 mgmt_fw_minor;
- __le16 mgmt_fw_build;
- __le16 mgmt_fw_patch;
- __le16 netctrl_fw_major;
- __le16 netctrl_fw_minor;
- __le16 netctrl_fw_build;
- __le16 netctrl_fw_patch;
- __le16 roce_fw_major;
- __le16 roce_fw_minor;
- __le16 roce_fw_build;
- __le16 roce_fw_patch;
- __le16 max_ext_req_len;
- __le16 max_req_timeout;
- u8 unused_1[3];
- u8 valid;
-};
-
-/* eject_cmpl (size:128b/16B) */
-struct eject_cmpl {
- __le16 type;
- #define EJECT_CMPL_TYPE_MASK 0x3fUL
- #define EJECT_CMPL_TYPE_SFT 0
- #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
- #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT
- #define EJECT_CMPL_FLAGS_MASK 0xffc0UL
- #define EJECT_CMPL_FLAGS_SFT 6
- #define EJECT_CMPL_FLAGS_ERROR 0x40UL
- __le16 len;
- __le32 opaque;
- __le16 v;
- #define EJECT_CMPL_V 0x1UL
- #define EJECT_CMPL_ERRORS_MASK 0xfffeUL
- #define EJECT_CMPL_ERRORS_SFT 1
- #define EJECT_CMPL_ERRORS_BUFFER_ERROR_MASK 0xeUL
- #define EJECT_CMPL_ERRORS_BUFFER_ERROR_SFT 1
- #define EJECT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0UL << 1)
- #define EJECT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1UL << 1)
- #define EJECT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3UL << 1)
- #define EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH (0x5UL << 1)
- #define EJECT_CMPL_ERRORS_BUFFER_ERROR_LAST EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH
- __le16 reserved16;
- __le32 unused_2;
-};
-
-/* hwrm_cmpl (size:128b/16B) */
-struct hwrm_cmpl {
- __le16 type;
- #define CMPL_TYPE_MASK 0x3fUL
- #define CMPL_TYPE_SFT 0
- #define CMPL_TYPE_HWRM_DONE 0x20UL
- #define CMPL_TYPE_LAST CMPL_TYPE_HWRM_DONE
- __le16 sequence_id;
- __le32 unused_1;
- __le32 v;
- #define CMPL_V 0x1UL
- __le32 unused_3;
-};
-
-/* hwrm_fwd_req_cmpl (size:128b/16B) */
-struct hwrm_fwd_req_cmpl {
- __le16 req_len_type;
- #define FWD_REQ_CMPL_TYPE_MASK 0x3fUL
- #define FWD_REQ_CMPL_TYPE_SFT 0
- #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL
- #define FWD_REQ_CMPL_TYPE_LAST FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ
- #define FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
- #define FWD_REQ_CMPL_REQ_LEN_SFT 6
- __le16 source_id;
- __le32 unused0;
- __le32 req_buf_addr_v[2];
- #define FWD_REQ_CMPL_V 0x1UL
- #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
- #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
-};
-
-/* hwrm_fwd_resp_cmpl (size:128b/16B) */
-struct hwrm_fwd_resp_cmpl {
- __le16 type;
- #define FWD_RESP_CMPL_TYPE_MASK 0x3fUL
- #define FWD_RESP_CMPL_TYPE_SFT 0
- #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL
- #define FWD_RESP_CMPL_TYPE_LAST FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP
- __le16 source_id;
- __le16 resp_len;
- __le16 unused_1;
- __le32 resp_buf_addr_v[2];
- #define FWD_RESP_CMPL_V 0x1UL
- #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
- #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
-};
-
-/* hwrm_async_event_cmpl (size:128b/16B) */
-struct hwrm_async_event_cmpl {
- __le16 type;
- #define ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_TYPE_LAST ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY 0x9UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG 0xaUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE 0x43UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP 0x44UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD 0x46UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_RSS_CHANGE 0x47UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE 0x48UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR 0x49UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_CTX_ERROR 0x4aUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE 0x4bUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PEER_MMAP_CHANGE 0x4dUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_REPRESENTOR_PAIR_CHANGE 0x4eUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_STAT_CHANGE 0x4fUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_HOST_COREDUMP 0x50UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x51UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_V 0x1UL
- #define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
-};
-
-/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */
-struct hwrm_async_event_cmpl_link_status_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN 0x0UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_MASK 0xff00000UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_SFT 20
-};
-
-/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */
-struct hwrm_async_event_cmpl_port_conn_not_allowed {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
-};
-
-/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */
-struct hwrm_async_event_cmpl_link_speed_cfg_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
-};
-
-/* hwrm_async_event_cmpl_reset_notify (size:128b/16B) */
-struct hwrm_async_event_cmpl_reset_notify {
- __le16 type;
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY 0x8UL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_SFT 0
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_V 0x1UL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_SFT 0
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_STOP_TX_QUEUE 0x1UL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN 0x2UL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK 0xff00UL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_SFT 8
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MANAGEMENT_RESET_REQUEST (0x1UL << 8)
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL (0x2UL << 8)
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL (0x3UL << 8)
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET (0x4UL << 8)
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION (0x5UL << 8)
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK 0xffff0000UL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16
-};
-
-/* hwrm_async_event_cmpl_error_recovery (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_recovery {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY 0x9UL
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED 0x2UL
-};
-
-/* hwrm_async_event_cmpl_ring_monitor_msg (size:128b/16B) */
-struct hwrm_async_event_cmpl_ring_monitor_msg {
- __le16 type;
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG 0xaUL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_TX 0x0UL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX 0x1UL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL 0x2UL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_V 0x1UL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
-};
-
-/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
-struct hwrm_async_event_cmpl_vf_cfg_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_SFT 0
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TF_OWNERSHIP_RELEASE 0x20UL
-};
-
-/* hwrm_async_event_cmpl_default_vnic_change (size:128b/16B) */
-struct hwrm_async_event_cmpl_default_vnic_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_MASK 0xffc0UL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_SFT 6
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION 0x35UL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_MASK 0x3UL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_SFT 0
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_ALLOC 0x1UL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE 0x2UL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_MASK 0x3fcUL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_SFT 2
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_MASK 0x3fffc00UL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_SFT 10
-};
-
-/* hwrm_async_event_cmpl_hw_flow_aged (size:128b/16B) */
-struct hwrm_async_event_cmpl_hw_flow_aged {
- __le16 type;
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED 0x36UL
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_V 0x1UL
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_MASK 0x7fffffffUL
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_SFT 0
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION 0x80000000UL
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_RX (0x0UL << 31)
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX (0x1UL << 31)
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX
-};
-
-/* hwrm_async_event_cmpl_eem_cache_flush_req (size:128b/16B) */
-struct hwrm_async_event_cmpl_eem_cache_flush_req {
- __le16 type;
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_V 0x1UL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
-};
-
-/* hwrm_async_event_cmpl_eem_cache_flush_done (size:128b/16B) */
-struct hwrm_async_event_cmpl_eem_cache_flush_done {
- __le16 type;
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_V 0x1UL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_SFT 0
-};
-
-/* hwrm_async_event_cmpl_deferred_response (size:128b/16B) */
-struct hwrm_async_event_cmpl_deferred_response {
- __le16 type;
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE 0x40UL
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_SFT 0
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_V 0x1UL
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
-};
-
-/* hwrm_async_event_cmpl_echo_request (size:128b/16B) */
-struct hwrm_async_event_cmpl_echo_request {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST 0x42UL
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_V 0x1UL
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
-};
-
-/* hwrm_async_event_cmpl_phc_update (size:128b/16B) */
-struct hwrm_async_event_cmpl_phc_update {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE 0x43UL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_SFT 0
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_MASK 0xffff0000UL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_SFT 16
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_V 0x1UL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK 0xfUL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT 0
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_MASTER 0x1UL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_SECONDARY 0x2UL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_FAILOVER 0x3UL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE 0x4UL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK 0xffff0UL
- #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT 4
-};
-
-/* hwrm_async_event_cmpl_pps_timestamp (size:128b/16B) */
-struct hwrm_async_event_cmpl_pps_timestamp {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP 0x44UL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE 0x1UL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL 0x0UL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL 0x1UL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_MASK 0xeUL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_SFT 1
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_MASK 0xffff0UL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_SFT 4
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_V 0x1UL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_MASK 0xffffffffUL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_SFT 0
-};
-
-/* hwrm_async_event_cmpl_error_report (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_report {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_SFT 0
-};
-
-/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */
-struct hwrm_async_event_cmpl_hwrm_error {
- __le16 type;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
-};
-
-/* hwrm_async_event_cmpl_error_report_base (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_report_base {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED
-};
-
-/* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_report_pause_storm {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM
-};
-
-/* hwrm_async_event_cmpl_error_report_invalid_signal (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_report_invalid_signal {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT 0
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL
-};
-
-/* hwrm_async_event_cmpl_error_report_nvm (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_report_nvm {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_MASK 0xffffffffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_SFT 0
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR 0x3UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_MASK 0xff00UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_SFT 8
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_WRITE (0x1UL << 8)
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE (0x2UL << 8)
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE
-};
-
-/* hwrm_async_event_cmpl_error_report_doorbell_drop_threshold (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_report_doorbell_drop_threshold {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_MASK 0xffffff00UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_SFT 8
-};
-
-/* hwrm_async_event_cmpl_error_report_thermal (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_report_thermal {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK 0xff00UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT 8
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT 0x5UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK 0x700UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SFT 8
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN (0x0UL << 8)
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL (0x1UL << 8)
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL (0x2UL << 8)
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN (0x3UL << 8)
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR 0x800UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_DECREASING (0x0UL << 11)
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING (0x1UL << 11)
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING
-};
-
-/* hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED
-};
-
-/* hwrm_func_reset_input (size:192b/24B) */
-struct hwrm_func_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
- __le16 vf_id;
- u8 func_reset_level;
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_LAST FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF
- u8 unused_0;
-};
-
-/* hwrm_func_reset_output (size:128b/16B) */
-struct hwrm_func_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_getfid_input (size:192b/24B) */
-struct hwrm_func_getfid_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL
- __le16 pci_id;
- u8 unused_0[2];
-};
-
-/* hwrm_func_getfid_output (size:128b/16B) */
-struct hwrm_func_getfid_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_func_vf_alloc_input (size:192b/24B) */
-struct hwrm_func_vf_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL
- __le16 first_vf_id;
- __le16 num_vfs;
-};
-
-/* hwrm_func_vf_alloc_output (size:128b/16B) */
-struct hwrm_func_vf_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 first_vf_id;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_func_vf_free_input (size:192b/24B) */
-struct hwrm_func_vf_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL
- __le16 first_vf_id;
- __le16 num_vfs;
-};
-
-/* hwrm_func_vf_free_output (size:128b/16B) */
-struct hwrm_func_vf_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_vf_cfg_input (size:576b/72B) */
-struct hwrm_func_vf_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
- #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
- #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
- #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x10UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x20UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS 0x40UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS 0x80UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS 0x100UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_TX_KEY_CTXS 0x1000UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_RX_KEY_CTXS 0x2000UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_TX_KEY_CTXS 0x4000UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_RX_KEY_CTXS 0x8000UL
- __le16 mtu;
- __le16 guest_vlan;
- __le16 async_event_cr;
- u8 dflt_mac_addr[6];
- __le32 flags;
- #define FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x1UL
- #define FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x2UL
- #define FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x4UL
- #define FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x8UL
- #define FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x10UL
- #define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x20UL
- #define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x40UL
- #define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x80UL
- #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x100UL
- #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x200UL
- __le16 num_rsscos_ctxs;
- __le16 num_cmpl_rings;
- __le16 num_tx_rings;
- __le16 num_rx_rings;
- __le16 num_l2_ctxs;
- __le16 num_vnics;
- __le16 num_stat_ctxs;
- __le16 num_hw_ring_grps;
- __le32 num_ktls_tx_key_ctxs;
- __le32 num_ktls_rx_key_ctxs;
- __le16 num_msix;
- u8 unused[2];
- __le32 num_quic_tx_key_ctxs;
- __le32 num_quic_rx_key_ctxs;
-};
-
-/* hwrm_func_vf_cfg_output (size:128b/16B) */
-struct hwrm_func_vf_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_qcaps_input (size:192b/24B) */
-struct hwrm_func_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0[6];
-};
-
-/* hwrm_func_qcaps_output (size:1152b/144B) */
-struct hwrm_func_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- __le16 port_id;
- __le32 flags;
- #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
- #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
- #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
- #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
- #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
- #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
- #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
- #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
- #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
- #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
- #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL
- #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL
- #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL
- #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL
- #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL
- #define FUNC_QCAPS_RESP_FLAGS_ADOPTED_PF_SUPPORTED 0x20000UL
- #define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL
- #define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL
- #define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL
- #define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL
- #define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL
- #define FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE 0x800000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED 0x1000000UL
- #define FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD 0x2000000UL
- #define FUNC_QCAPS_RESP_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED 0x4000000UL
- #define FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED 0x8000000UL
- #define FUNC_QCAPS_RESP_FLAGS_COREDUMP_CMD_SUPPORTED 0x10000000UL
- #define FUNC_QCAPS_RESP_FLAGS_CRASHDUMP_CMD_SUPPORTED 0x20000000UL
- #define FUNC_QCAPS_RESP_FLAGS_PFC_WD_STATS_SUPPORTED 0x40000000UL
- #define FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED 0x80000000UL
- u8 mac_address[6];
- __le16 max_rsscos_ctx;
- __le16 max_cmpl_rings;
- __le16 max_tx_rings;
- __le16 max_rx_rings;
- __le16 max_l2_ctxs;
- __le16 max_vnics;
- __le16 first_vf_id;
- __le16 max_vfs;
- __le16 max_stat_ctx;
- __le32 max_encap_records;
- __le32 max_decap_records;
- __le32 max_tx_em_flows;
- __le32 max_tx_wm_flows;
- __le32 max_rx_em_flows;
- __le32 max_rx_wm_flows;
- __le32 max_mcast_filters;
- __le32 max_flow_id;
- __le32 max_hw_ring_grps;
- __le16 max_sp_tx_rings;
- __le16 max_msix_vfs;
- __le32 flags_ext;
- #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT 0x8UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PROXY_MODE_SUPPORT 0x10UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_EVB_MODE_CFG_NOT_SUPPORTED 0x100UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_SOC_SPD_SUPPORTED 0x200UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED 0x400UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_FAST_RESET_CAPABLE 0x800UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_METADATA_CFG_CAPABLE 0x1000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_NVM_OPTION_ACTION_SUPPORTED 0x2000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_BD_METADATA_SUPPORTED 0x4000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_ECHO_REQUEST_SUPPORTED 0x8000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED 0x10000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED 0x20000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED 0x40000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_VF_CFG_ASYNC_FOR_PF_SUPPORTED 0x80000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PARTITION_BW_SUPPORTED 0x100000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED 0x200000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_KTLS_SUPPORTED 0x400000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL 0x800000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_MIN_BW_SUPPORTED 0x1000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP 0x2000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED 0x4000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_REQUIRED 0x8000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED 0x10000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_DBR_PACING_SUPPORTED 0x20000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_HW_DBR_DROP_RECOV_SUPPORTED 0x40000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_DISABLE_CQ_OVERFLOW_DETECTION_SUPPORTED 0x80000000UL
- u8 max_schqs;
- u8 mpc_chnls_cap;
- #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL
- #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RCE 0x2UL
- #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TE_CFA 0x4UL
- #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RE_CFA 0x8UL
- #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE 0x10UL
- __le16 max_key_ctxs_alloc;
- __le32 flags_ext2;
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_QUIC_SUPPORTED 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_KDNET_SUPPORTED 0x4UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED 0x8UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED 0x10UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_GENERIC_STATS_SUPPORTED 0x20UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED 0x40UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_SYNCE_SUPPORTED 0x80UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED 0x100UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED 0x200UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_HW_LAG_SUPPORTED 0x400UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_ON_CHIP_CTX_SUPPORTED 0x800UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_STEERING_TAG_SUPPORTED 0x1000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_ENHANCED_VF_SCALE_SUPPORTED 0x2000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_KEY_XID_PARTITION_SUPPORTED 0x4000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_CONCURRENT_KTLS_QUIC_SUPPORTED 0x8000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_CROSS_TC_CAP_SUPPORTED 0x10000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_PER_TC_CAP_SUPPORTED 0x20000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_PER_TC_RESERVATION_SUPPORTED 0x40000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_DB_ERROR_STATS_SUPPORTED 0x80000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED 0x100000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDCC_SUPPORTED 0x200000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_SO_TXTIME_SUPPORTED 0x400000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED 0x800000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_INGRESS_NIC_FLOW_SUPPORTED 0x1000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_LPBK_STATS_SUPPORTED 0x2000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_EGRESS_NIC_FLOW_SUPPORTED 0x4000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_MULTI_LOSSLESS_QUEUES_SUPPORTED 0x8000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_PEER_MMAP_SUPPORTED 0x10000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_PACING_SUPPORTED 0x20000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_VF_STAT_EJECTION_SUPPORTED 0x40000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_HOST_COREDUMP_SUPPORTED 0x80000000UL
- __le16 tunnel_disable_flag;
- #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
- #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
- #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NVGRE 0x4UL
- #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_L2GRE 0x8UL
- #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_GRE 0x10UL
- #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_IPINIP 0x20UL
- #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_MPLS 0x40UL
- #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE 0x80UL
- __le16 xid_partition_cap;
- #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_TX_CK 0x1UL
- #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_RX_CK 0x2UL
- u8 device_serial_number[8];
- __le16 ctxs_per_partition;
- __le16 max_tso_segs;
- __le32 roce_vf_max_av;
- __le32 roce_vf_max_cq;
- __le32 roce_vf_max_mrw;
- __le32 roce_vf_max_qp;
- __le32 roce_vf_max_srq;
- __le32 roce_vf_max_gid;
- __le32 flags_ext3;
- #define FUNC_QCAPS_RESP_FLAGS_EXT3_RM_RSV_WHILE_ALLOC_CAP 0x1UL
- u8 unused_3[7];
- u8 valid;
-};
-
-/* hwrm_func_qcfg_input (size:192b/24B) */
-struct hwrm_func_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0[6];
-};
-
-/* hwrm_func_qcfg_output (size:1280b/160B) */
-struct hwrm_func_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- __le16 port_id;
- __le16 vlan;
- __le16 flags;
- #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
- #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
- #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
- #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
- #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
- #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
- #define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL
- #define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL
- #define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL
- #define FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED 0x200UL
- #define FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED 0x400UL
- #define FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED 0x800UL
- #define FUNC_QCFG_RESP_FLAGS_FAST_RESET_ALLOWED 0x1000UL
- #define FUNC_QCFG_RESP_FLAGS_MULTI_ROOT 0x2000UL
- #define FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV 0x4000UL
- #define FUNC_QCFG_RESP_FLAGS_ROCE_VNIC_ID_VALID 0x8000UL
- u8 mac_address[6];
- __le16 pci_id;
- __le16 alloc_rsscos_ctx;
- __le16 alloc_cmpl_rings;
- __le16 alloc_tx_rings;
- __le16 alloc_rx_rings;
- __le16 alloc_l2_ctx;
- __le16 alloc_vnics;
- __le16 admin_mtu;
- __le16 mru;
- __le16 stat_ctx_id;
- u8 port_partition_type;
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2 0x5UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_LAST FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN
- u8 port_pf_cnt;
- #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
- #define FUNC_QCFG_RESP_PORT_PF_CNT_LAST FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL
- __le16 dflt_vnic_id;
- __le16 max_mtu_configured;
- __le32 min_bw;
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
- #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL
- #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 max_bw;
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0
- #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL
- #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 evb_mode;
- #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
- #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
- #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
- #define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA
- u8 options;
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128
- #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL
- #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_SFT 2
- #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2)
- #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2)
- #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2)
- #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO
- #define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xf0UL
- #define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 4
- __le16 alloc_vfs;
- __le32 alloc_mcast_filters;
- __le32 alloc_hw_ring_grps;
- __le16 alloc_sp_tx_rings;
- __le16 alloc_stat_ctx;
- __le16 alloc_msix;
- __le16 registered_vfs;
- __le16 l2_doorbell_bar_size_kb;
- u8 active_endpoints;
- u8 always_1;
- __le32 reset_addr_poll;
- __le16 legacy_l2_db_size_kb;
- __le16 svif_info;
- #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_MASK 0x7fffUL
- #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_SFT 0
- #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_VALID 0x8000UL
- u8 mpc_chnls;
- #define FUNC_QCFG_RESP_MPC_CHNLS_TCE_ENABLED 0x1UL
- #define FUNC_QCFG_RESP_MPC_CHNLS_RCE_ENABLED 0x2UL
- #define FUNC_QCFG_RESP_MPC_CHNLS_TE_CFA_ENABLED 0x4UL
- #define FUNC_QCFG_RESP_MPC_CHNLS_RE_CFA_ENABLED 0x8UL
- #define FUNC_QCFG_RESP_MPC_CHNLS_PRIMATE_ENABLED 0x10UL
- u8 db_page_size;
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4KB 0x0UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_8KB 0x1UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_16KB 0x2UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_32KB 0x3UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_64KB 0x4UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_128KB 0x5UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_256KB 0x6UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_512KB 0x7UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_1MB 0x8UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_2MB 0x9UL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB 0xaUL
- #define FUNC_QCFG_RESP_DB_PAGE_SIZE_LAST FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB
- __le16 roce_vnic_id;
- __le32 partition_min_bw;
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_SFT 0
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE 0x10000000UL
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100
- __le32 partition_max_bw;
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_SFT 0
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE 0x10000000UL
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
- __le16 host_mtu;
- __le16 flags2;
- #define FUNC_QCFG_RESP_FLAGS2_SRIOV_DSCP_INSERT_ENABLED 0x1UL
- u8 unused_4[2];
- u8 port_kdnet_mode;
- #define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL
- #define FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED 0x1UL
- #define FUNC_QCFG_RESP_PORT_KDNET_MODE_LAST FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED
- u8 kdnet_pcie_function;
- __le16 port_kdnet_fid;
- u8 unused_5[2];
- __le32 num_ktls_tx_key_ctxs;
- __le32 num_ktls_rx_key_ctxs;
- u8 lag_id;
- u8 parif;
- u8 fw_lag_id;
- u8 unused_6;
- __le32 num_quic_tx_key_ctxs;
- __le32 num_quic_rx_key_ctxs;
- __le32 roce_max_av_per_vf;
- __le32 roce_max_cq_per_vf;
- __le32 roce_max_mrw_per_vf;
- __le32 roce_max_qp_per_vf;
- __le32 roce_max_srq_per_vf;
- __le32 roce_max_gid_per_vf;
- __le16 xid_partition_cfg;
- #define FUNC_QCFG_RESP_XID_PARTITION_CFG_TX_CK 0x1UL
- #define FUNC_QCFG_RESP_XID_PARTITION_CFG_RX_CK 0x2UL
- u8 unused_7;
- u8 valid;
-};
-
-/* hwrm_func_cfg_input (size:1280b/160B) */
-struct hwrm_func_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- __le16 num_msix;
- __le32 flags;
- #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL
- #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL
- #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL
- #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2
- #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL
- #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL
- #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL
- #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL
- #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL
- #define FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x4000UL
- #define FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x8000UL
- #define FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x10000UL
- #define FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x20000UL
- #define FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x40000UL
- #define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL
- #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL
- #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL
- #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL
- #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL
- #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL
- #define FUNC_CFG_REQ_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x2000000UL
- #define FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS 0x4000000UL
- #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x8000000UL
- #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL
- #define FUNC_CFG_REQ_FLAGS_BD_METADATA_ENABLE 0x20000000UL
- #define FUNC_CFG_REQ_FLAGS_BD_METADATA_DISABLE 0x40000000UL
- __le32 enables;
- #define FUNC_CFG_REQ_ENABLES_ADMIN_MTU 0x1UL
- #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
- #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
- #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
- #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
- #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
- #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
- #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
- #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
- #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
- #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
- #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
- #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
- #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
- #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
- #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL
- #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL
- #define FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT 0x800000UL
- #define FUNC_CFG_REQ_ENABLES_SCHQ_ID 0x1000000UL
- #define FUNC_CFG_REQ_ENABLES_MPC_CHNLS 0x2000000UL
- #define FUNC_CFG_REQ_ENABLES_PARTITION_MIN_BW 0x4000000UL
- #define FUNC_CFG_REQ_ENABLES_PARTITION_MAX_BW 0x8000000UL
- #define FUNC_CFG_REQ_ENABLES_TPID 0x10000000UL
- #define FUNC_CFG_REQ_ENABLES_HOST_MTU 0x20000000UL
- #define FUNC_CFG_REQ_ENABLES_KTLS_TX_KEY_CTXS 0x40000000UL
- #define FUNC_CFG_REQ_ENABLES_KTLS_RX_KEY_CTXS 0x80000000UL
- __le16 admin_mtu;
- __le16 mru;
- __le16 num_rsscos_ctxs;
- __le16 num_cmpl_rings;
- __le16 num_tx_rings;
- __le16 num_rx_rings;
- __le16 num_l2_ctxs;
- __le16 num_vnics;
- __le16 num_stat_ctxs;
- __le16 num_hw_ring_grps;
- u8 dflt_mac_addr[6];
- __le16 dflt_vlan;
- __be32 dflt_ip_addr[4];
- __le32 min_bw;
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0
- #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL
- #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 max_bw;
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0
- #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL
- #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
- __le16 async_event_cr;
- u8 vlan_antispoof_mode;
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN 0x3UL
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_LAST FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN
- u8 allowed_vlan_pris;
- u8 evb_mode;
- #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL
- #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL
- #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
- #define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA
- u8 options;
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128
- #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL
- #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_SFT 2
- #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2)
- #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2)
- #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2)
- #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO
- #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xf0UL
- #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4
- __le16 num_mcast_filters;
- __le16 schq_id;
- __le16 mpc_chnls;
- #define FUNC_CFG_REQ_MPC_CHNLS_TCE_ENABLE 0x1UL
- #define FUNC_CFG_REQ_MPC_CHNLS_TCE_DISABLE 0x2UL
- #define FUNC_CFG_REQ_MPC_CHNLS_RCE_ENABLE 0x4UL
- #define FUNC_CFG_REQ_MPC_CHNLS_RCE_DISABLE 0x8UL
- #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_ENABLE 0x10UL
- #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_DISABLE 0x20UL
- #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_ENABLE 0x40UL
- #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_DISABLE 0x80UL
- #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_ENABLE 0x100UL
- #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_DISABLE 0x200UL
- __le32 partition_min_bw;
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_SFT 0
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE 0x10000000UL
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_LAST FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100
- __le32 partition_max_bw;
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_SFT 0
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE 0x10000000UL
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
- __be16 tpid;
- __le16 host_mtu;
- __le32 flags2;
- #define FUNC_CFG_REQ_FLAGS2_KTLS_KEY_CTX_ASSETS_TEST 0x1UL
- #define FUNC_CFG_REQ_FLAGS2_QUIC_KEY_CTX_ASSETS_TEST 0x2UL
- __le32 enables2;
- #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL
- #define FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE 0x2UL
- #define FUNC_CFG_REQ_ENABLES2_QUIC_TX_KEY_CTXS 0x4UL
- #define FUNC_CFG_REQ_ENABLES2_QUIC_RX_KEY_CTXS 0x8UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF 0x10UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_CQ_PER_VF 0x20UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_MRW_PER_VF 0x40UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_QP_PER_VF 0x80UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_SRQ_PER_VF 0x100UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF 0x200UL
- #define FUNC_CFG_REQ_ENABLES2_XID_PARTITION_CFG 0x400UL
- u8 port_kdnet_mode;
- #define FUNC_CFG_REQ_PORT_KDNET_MODE_DISABLED 0x0UL
- #define FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED 0x1UL
- #define FUNC_CFG_REQ_PORT_KDNET_MODE_LAST FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED
- u8 db_page_size;
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_4KB 0x0UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_8KB 0x1UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_16KB 0x2UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_32KB 0x3UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_64KB 0x4UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_128KB 0x5UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_256KB 0x6UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_512KB 0x7UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_1MB 0x8UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_2MB 0x9UL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_4MB 0xaUL
- #define FUNC_CFG_REQ_DB_PAGE_SIZE_LAST FUNC_CFG_REQ_DB_PAGE_SIZE_4MB
- u8 unused_1[2];
- __le32 num_ktls_tx_key_ctxs;
- __le32 num_ktls_rx_key_ctxs;
- __le32 num_quic_tx_key_ctxs;
- __le32 num_quic_rx_key_ctxs;
- __le32 roce_max_av_per_vf;
- __le32 roce_max_cq_per_vf;
- __le32 roce_max_mrw_per_vf;
- __le32 roce_max_qp_per_vf;
- __le32 roce_max_srq_per_vf;
- __le32 roce_max_gid_per_vf;
- __le16 xid_partition_cfg;
- #define FUNC_CFG_REQ_XID_PARTITION_CFG_TX_CK 0x1UL
- #define FUNC_CFG_REQ_XID_PARTITION_CFG_RX_CK 0x2UL
- __le16 unused_2;
-};
-
-/* hwrm_func_cfg_output (size:128b/16B) */
-struct hwrm_func_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_cfg_cmd_err (size:64b/8B) */
-struct hwrm_func_cfg_cmd_err {
- u8 code;
- #define FUNC_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_BW_RANGE 0x1UL
- #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_MORE_THAN_MAX 0x2UL
- #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_BW_UNSUPPORTED 0x3UL
- #define FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_PERCENT 0x4UL
- #define FUNC_CFG_CMD_ERR_CODE_LAST FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_PERCENT
- u8 unused_0[7];
-};
-
-/* hwrm_func_qstats_input (size:192b/24B) */
-struct hwrm_func_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 flags;
- #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL
- #define FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x2UL
- #define FUNC_QSTATS_REQ_FLAGS_L2_ONLY 0x4UL
- u8 unused_0[5];
-};
-
-/* hwrm_func_qstats_output (size:1408b/176B) */
-struct hwrm_func_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_drop_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_drop_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 rx_agg_pkts;
- __le64 rx_agg_bytes;
- __le64 rx_agg_events;
- __le64 rx_agg_aborts;
- u8 clear_seq;
- u8 unused_0[6];
- u8 valid;
-};
-
-/* hwrm_func_qstats_ext_input (size:256b/32B) */
-struct hwrm_func_qstats_ext_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 flags;
- #define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL
- #define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL
- u8 unused_0[1];
- __le32 enables;
- #define FUNC_QSTATS_EXT_REQ_ENABLES_SCHQ_ID 0x1UL
- __le16 schq_id;
- __le16 traffic_class;
- u8 unused_1[4];
-};
-
-/* hwrm_func_qstats_ext_output (size:1536b/192B) */
-struct hwrm_func_qstats_ext_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_error_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_error_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 rx_tpa_eligible_pkt;
- __le64 rx_tpa_eligible_bytes;
- __le64 rx_tpa_pkt;
- __le64 rx_tpa_bytes;
- __le64 rx_tpa_errors;
- __le64 rx_tpa_events;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_clr_stats_input (size:192b/24B) */
-struct hwrm_func_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0[6];
-};
-
-/* hwrm_func_clr_stats_output (size:128b/16B) */
-struct hwrm_func_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_vf_resc_free_input (size:192b/24B) */
-struct hwrm_func_vf_resc_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vf_id;
- u8 unused_0[6];
-};
-
-/* hwrm_func_vf_resc_free_output (size:128b/16B) */
-struct hwrm_func_vf_resc_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_drv_rgtr_input (size:896b/112B) */
-struct hwrm_func_drv_rgtr_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT 0x80UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT 0x100UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_ASYM_QUEUE_CFG_SUPPORT 0x400UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_TF_INGRESS_NIC_FLOW_MODE 0x800UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_TF_EGRESS_NIC_FLOW_MODE 0x1000UL
- __le32 enables;
- #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
- __le16 os_type;
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_LAST FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI
- u8 ver_maj_8b;
- u8 ver_min_8b;
- u8 ver_upd_8b;
- u8 unused_0[3];
- __le32 timestamp;
- u8 unused_1[4];
- __le32 vf_req_fwd[8];
- __le32 async_event_fwd[8];
- __le16 ver_maj;
- __le16 ver_min;
- __le16 ver_upd;
- __le16 ver_patch;
-};
-
-/* hwrm_func_drv_rgtr_output (size:128b/16B) */
-struct hwrm_func_drv_rgtr_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED 0x1UL
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_func_drv_unrgtr_input (size:192b/24B) */
-struct hwrm_func_drv_unrgtr_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN 0x1UL
- u8 unused_0[4];
-};
-
-/* hwrm_func_drv_unrgtr_output (size:128b/16B) */
-struct hwrm_func_drv_unrgtr_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_buf_rgtr_input (size:1024b/128B) */
-struct hwrm_func_buf_rgtr_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL
- #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL
- __le16 vf_id;
- __le16 req_buf_num_pages;
- __le16 req_buf_page_size;
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_LAST FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G
- __le16 req_buf_len;
- __le16 resp_buf_len;
- u8 unused_0[2];
- __le64 req_buf_page_addr0;
- __le64 req_buf_page_addr1;
- __le64 req_buf_page_addr2;
- __le64 req_buf_page_addr3;
- __le64 req_buf_page_addr4;
- __le64 req_buf_page_addr5;
- __le64 req_buf_page_addr6;
- __le64 req_buf_page_addr7;
- __le64 req_buf_page_addr8;
- __le64 req_buf_page_addr9;
- __le64 error_buf_addr;
- __le64 resp_buf_addr;
-};
-
-/* hwrm_func_buf_rgtr_output (size:128b/16B) */
-struct hwrm_func_buf_rgtr_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_drv_qver_input (size:192b/24B) */
-struct hwrm_func_drv_qver_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 reserved;
- __le16 fid;
- u8 driver_type;
- #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_L2 0x0UL
- #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE 0x1UL
- #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_LAST FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE
- u8 unused_0;
-};
-
-/* hwrm_func_drv_qver_output (size:256b/32B) */
-struct hwrm_func_drv_qver_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 os_type;
- #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_LAST FUNC_DRV_QVER_RESP_OS_TYPE_UEFI
- u8 ver_maj_8b;
- u8 ver_min_8b;
- u8 ver_upd_8b;
- u8 unused_0[3];
- __le16 ver_maj;
- __le16 ver_min;
- __le16 ver_upd;
- __le16 ver_patch;
- u8 unused_1[7];
- u8 valid;
-};
-
-/* hwrm_func_resource_qcaps_input (size:192b/24B) */
-struct hwrm_func_resource_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0[6];
-};
-
-/* hwrm_func_resource_qcaps_output (size:704b/88B) */
-struct hwrm_func_resource_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 max_vfs;
- __le16 max_msix;
- __le16 vf_reservation_strategy;
- #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL
- #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL
- #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC 0x2UL
- #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC
- __le16 min_rsscos_ctx;
- __le16 max_rsscos_ctx;
- __le16 min_cmpl_rings;
- __le16 max_cmpl_rings;
- __le16 min_tx_rings;
- __le16 max_tx_rings;
- __le16 min_rx_rings;
- __le16 max_rx_rings;
- __le16 min_l2_ctxs;
- __le16 max_l2_ctxs;
- __le16 min_vnics;
- __le16 max_vnics;
- __le16 min_stat_ctx;
- __le16 max_stat_ctx;
- __le16 min_hw_ring_grps;
- __le16 max_hw_ring_grps;
- __le16 max_tx_scheduler_inputs;
- __le16 flags;
- #define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED 0x1UL
- __le16 min_msix;
- __le32 min_ktls_tx_key_ctxs;
- __le32 max_ktls_tx_key_ctxs;
- __le32 min_ktls_rx_key_ctxs;
- __le32 max_ktls_rx_key_ctxs;
- __le32 min_quic_tx_key_ctxs;
- __le32 max_quic_tx_key_ctxs;
- __le32 min_quic_rx_key_ctxs;
- __le32 max_quic_rx_key_ctxs;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_func_vf_resource_cfg_input (size:704b/88B) */
-struct hwrm_func_vf_resource_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vf_id;
- __le16 max_msix;
- __le16 min_rsscos_ctx;
- __le16 max_rsscos_ctx;
- __le16 min_cmpl_rings;
- __le16 max_cmpl_rings;
- __le16 min_tx_rings;
- __le16 max_tx_rings;
- __le16 min_rx_rings;
- __le16 max_rx_rings;
- __le16 min_l2_ctxs;
- __le16 max_l2_ctxs;
- __le16 min_vnics;
- __le16 max_vnics;
- __le16 min_stat_ctx;
- __le16 max_stat_ctx;
- __le16 min_hw_ring_grps;
- __le16 max_hw_ring_grps;
- __le16 flags;
- #define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED 0x1UL
- __le16 min_msix;
- __le32 min_ktls_tx_key_ctxs;
- __le32 max_ktls_tx_key_ctxs;
- __le32 min_ktls_rx_key_ctxs;
- __le32 max_ktls_rx_key_ctxs;
- __le32 min_quic_tx_key_ctxs;
- __le32 max_quic_tx_key_ctxs;
- __le32 min_quic_rx_key_ctxs;
- __le32 max_quic_rx_key_ctxs;
-};
-
-/* hwrm_func_vf_resource_cfg_output (size:384b/48B) */
-struct hwrm_func_vf_resource_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 reserved_rsscos_ctx;
- __le16 reserved_cmpl_rings;
- __le16 reserved_tx_rings;
- __le16 reserved_rx_rings;
- __le16 reserved_l2_ctxs;
- __le16 reserved_vnics;
- __le16 reserved_stat_ctx;
- __le16 reserved_hw_ring_grps;
- __le32 reserved_ktls_tx_key_ctxs;
- __le32 reserved_ktls_rx_key_ctxs;
- __le32 reserved_quic_tx_key_ctxs;
- __le32 reserved_quic_rx_key_ctxs;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_backing_store_qcaps_input (size:128b/16B) */
-struct hwrm_func_backing_store_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_func_backing_store_qcaps_output (size:832b/104B) */
-struct hwrm_func_backing_store_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 qp_max_entries;
- __le16 qp_min_qp1_entries;
- __le16 qp_max_l2_entries;
- __le16 qp_entry_size;
- __le16 srq_max_l2_entries;
- __le32 srq_max_entries;
- __le16 srq_entry_size;
- __le16 cq_max_l2_entries;
- __le32 cq_max_entries;
- __le16 cq_entry_size;
- __le16 vnic_max_vnic_entries;
- __le16 vnic_max_ring_table_entries;
- __le16 vnic_entry_size;
- __le32 stat_max_entries;
- __le16 stat_entry_size;
- __le16 tqm_entry_size;
- __le32 tqm_min_entries_per_ring;
- __le32 tqm_max_entries_per_ring;
- __le32 mrav_max_entries;
- __le16 mrav_entry_size;
- __le16 tim_entry_size;
- __le32 tim_max_entries;
- __le16 mrav_num_entries_units;
- u8 tqm_entries_multiple;
- u8 ctx_kind_initializer;
- __le16 ctx_init_mask;
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_QP 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_SRQ 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_CQ 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_VNIC 0x8UL
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_STAT 0x10UL
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_MRAV 0x20UL
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_TKC 0x40UL
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_RKC 0x80UL
- u8 qp_init_offset;
- u8 srq_init_offset;
- u8 cq_init_offset;
- u8 vnic_init_offset;
- u8 tqm_fp_rings_count;
- u8 stat_init_offset;
- u8 mrav_init_offset;
- u8 tqm_fp_rings_count_ext;
- u8 tkc_init_offset;
- u8 rkc_init_offset;
- __le16 tkc_entry_size;
- __le16 rkc_entry_size;
- __le32 tkc_max_entries;
- __le32 rkc_max_entries;
- __le16 fast_qpmd_qp_num_entries;
- u8 rsvd1[5];
- u8 valid;
-};
-
-/* tqm_fp_ring_cfg (size:128b/16B) */
-struct tqm_fp_ring_cfg {
- u8 tqm_ring_pg_size_tqm_ring_lvl;
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_MASK 0xfUL
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_SFT 0
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_0 0x0UL
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_1 0x1UL
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2 0x2UL
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_MASK 0xf0UL
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_SFT 4
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G
- u8 unused[3];
- __le32 tqm_ring_num_entries;
- __le64 tqm_ring_page_dir;
-};
-
-/* hwrm_func_backing_store_cfg_input (size:2688b/336B) */
-struct hwrm_func_backing_store_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT 0x2UL
- __le32 enables;
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8 0x10000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING9 0x20000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING10 0x40000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TKC 0x80000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_RKC 0x100000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD 0x200000UL
- u8 qpc_pg_size_qpc_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G
- u8 srq_pg_size_srq_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G
- u8 cq_pg_size_cq_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G
- u8 vnic_pg_size_vnic_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G
- u8 stat_pg_size_stat_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G
- u8 tqm_sp_pg_size_tqm_sp_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G
- u8 tqm_ring0_pg_size_tqm_ring0_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G
- u8 tqm_ring1_pg_size_tqm_ring1_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G
- u8 tqm_ring2_pg_size_tqm_ring2_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G
- u8 tqm_ring3_pg_size_tqm_ring3_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G
- u8 tqm_ring4_pg_size_tqm_ring4_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G
- u8 tqm_ring5_pg_size_tqm_ring5_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G
- u8 tqm_ring6_pg_size_tqm_ring6_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G
- u8 tqm_ring7_pg_size_tqm_ring7_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G
- u8 mrav_pg_size_mrav_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G
- u8 tim_pg_size_tim_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G
- __le64 qpc_page_dir;
- __le64 srq_page_dir;
- __le64 cq_page_dir;
- __le64 vnic_page_dir;
- __le64 stat_page_dir;
- __le64 tqm_sp_page_dir;
- __le64 tqm_ring0_page_dir;
- __le64 tqm_ring1_page_dir;
- __le64 tqm_ring2_page_dir;
- __le64 tqm_ring3_page_dir;
- __le64 tqm_ring4_page_dir;
- __le64 tqm_ring5_page_dir;
- __le64 tqm_ring6_page_dir;
- __le64 tqm_ring7_page_dir;
- __le64 mrav_page_dir;
- __le64 tim_page_dir;
- __le32 qp_num_entries;
- __le32 srq_num_entries;
- __le32 cq_num_entries;
- __le32 stat_num_entries;
- __le32 tqm_sp_num_entries;
- __le32 tqm_ring0_num_entries;
- __le32 tqm_ring1_num_entries;
- __le32 tqm_ring2_num_entries;
- __le32 tqm_ring3_num_entries;
- __le32 tqm_ring4_num_entries;
- __le32 tqm_ring5_num_entries;
- __le32 tqm_ring6_num_entries;
- __le32 tqm_ring7_num_entries;
- __le32 mrav_num_entries;
- __le32 tim_num_entries;
- __le16 qp_num_qp1_entries;
- __le16 qp_num_l2_entries;
- __le16 qp_entry_size;
- __le16 srq_num_l2_entries;
- __le16 srq_entry_size;
- __le16 cq_num_l2_entries;
- __le16 cq_entry_size;
- __le16 vnic_num_vnic_entries;
- __le16 vnic_num_ring_table_entries;
- __le16 vnic_entry_size;
- __le16 stat_entry_size;
- __le16 tqm_entry_size;
- __le16 mrav_entry_size;
- __le16 tim_entry_size;
- u8 tqm_ring8_pg_size_tqm_ring_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G
- u8 ring8_unused[3];
- __le32 tqm_ring8_num_entries;
- __le64 tqm_ring8_page_dir;
- u8 tqm_ring9_pg_size_tqm_ring_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G
- u8 ring9_unused[3];
- __le32 tqm_ring9_num_entries;
- __le64 tqm_ring9_page_dir;
- u8 tqm_ring10_pg_size_tqm_ring_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G
- u8 ring10_unused[3];
- __le32 tqm_ring10_num_entries;
- __le64 tqm_ring10_page_dir;
- __le32 tkc_num_entries;
- __le32 rkc_num_entries;
- __le64 tkc_page_dir;
- __le64 rkc_page_dir;
- __le16 tkc_entry_size;
- __le16 rkc_entry_size;
- u8 tkc_pg_size_tkc_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G
- u8 rkc_pg_size_rkc_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G
- __le16 qp_num_fast_qpmd_entries;
-};
-
-/* hwrm_func_backing_store_cfg_output (size:128b/16B) */
-struct hwrm_func_backing_store_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_error_recovery_qcfg_input (size:192b/24B) */
-struct hwrm_error_recovery_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 unused_0[8];
-};
-
-/* hwrm_error_recovery_qcfg_output (size:1664b/208B) */
-struct hwrm_error_recovery_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST 0x1UL
- #define ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU 0x2UL
- __le32 driver_polling_freq;
- __le32 master_func_wait_period;
- __le32 normal_func_wait_period;
- __le32 master_func_wait_period_after_reset;
- __le32 max_bailout_time_after_reset;
- __le32 fw_health_status_reg;
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_MASK 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_SFT 0
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_GRC 0x1UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR0 0x2UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_MASK 0xfffffffcUL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SFT 2
- __le32 fw_heartbeat_reg;
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_MASK 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_SFT 0
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_GRC 0x1UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR0 0x2UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_MASK 0xfffffffcUL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SFT 2
- __le32 fw_reset_cnt_reg;
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_MASK 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_SFT 0
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_GRC 0x1UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR0 0x2UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_MASK 0xfffffffcUL
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SFT 2
- __le32 reset_inprogress_reg;
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_MASK 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_SFT 0
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_GRC 0x1UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR0 0x2UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_MASK 0xfffffffcUL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SFT 2
- __le32 reset_inprogress_reg_mask;
- u8 unused_0[3];
- u8 reg_array_cnt;
- __le32 reset_reg[16];
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_MASK 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_SFT 0
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_GRC 0x1UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR0 0x2UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_MASK 0xfffffffcUL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SFT 2
- __le32 reset_reg_val[16];
- u8 delay_after_reset[16];
- __le32 err_recovery_cnt_reg;
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_MASK 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_SFT 0
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_GRC 0x1UL
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR0 0x2UL
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_MASK 0xfffffffcUL
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SFT 2
- u8 unused_1[3];
- u8 valid;
-};
-
-/* hwrm_func_echo_response_input (size:192b/24B) */
-struct hwrm_func_echo_response_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 event_data1;
- __le32 event_data2;
-};
-
-/* hwrm_func_echo_response_output (size:128b/16B) */
-struct hwrm_func_echo_response_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_ptp_pin_qcfg_input (size:192b/24B) */
-struct hwrm_func_ptp_pin_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 unused_0[8];
-};
-
-/* hwrm_func_ptp_pin_qcfg_output (size:128b/16B) */
-struct hwrm_func_ptp_pin_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_pins;
- u8 state;
- #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN0_ENABLED 0x1UL
- #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN1_ENABLED 0x2UL
- #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN2_ENABLED 0x4UL
- #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN3_ENABLED 0x8UL
- u8 pin0_usage;
- #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT
- u8 pin1_usage;
- #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT
- u8 pin2_usage;
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT
- u8 pin3_usage;
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_func_ptp_pin_cfg_input (size:256b/32B) */
-struct hwrm_func_ptp_pin_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_STATE 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_USAGE 0x2UL
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_STATE 0x4UL
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_USAGE 0x8UL
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_STATE 0x10UL
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_USAGE 0x20UL
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_STATE 0x40UL
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_USAGE 0x80UL
- u8 pin0_state;
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_DISABLED 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED
- u8 pin0_usage;
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT
- u8 pin1_state;
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_DISABLED 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED
- u8 pin1_usage;
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT
- u8 pin2_state;
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_DISABLED 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED
- u8 pin2_usage;
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT
- u8 pin3_state;
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_DISABLED 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED
- u8 pin3_usage;
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT
- u8 unused_0[4];
-};
-
-/* hwrm_func_ptp_pin_cfg_output (size:128b/16B) */
-struct hwrm_func_ptp_pin_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_ptp_cfg_input (size:384b/48B) */
-struct hwrm_func_ptp_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 enables;
- #define FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT 0x1UL
- #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_SOURCE 0x2UL
- #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_PHASE 0x4UL
- #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD 0x8UL
- #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP 0x10UL
- #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE 0x20UL
- #define FUNC_PTP_CFG_REQ_ENABLES_PTP_SET_TIME 0x40UL
- u8 ptp_pps_event;
- #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_INTERNAL 0x1UL
- #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_EXTERNAL 0x2UL
- u8 ptp_freq_adj_dll_source;
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_NONE 0x0UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_0 0x1UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_1 0x2UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_2 0x3UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_3 0x4UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_0 0x5UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_1 0x6UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_2 0x7UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_3 0x8UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID 0xffUL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID
- u8 ptp_freq_adj_dll_phase;
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_NONE 0x0UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_4K 0x1UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_8K 0x2UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_10M 0x3UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_25M 0x4UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_25M
- u8 unused_0[3];
- __le32 ptp_freq_adj_ext_period;
- __le32 ptp_freq_adj_ext_up;
- __le32 ptp_freq_adj_ext_phase_lower;
- __le32 ptp_freq_adj_ext_phase_upper;
- __le64 ptp_set_time;
-};
-
-/* hwrm_func_ptp_cfg_output (size:128b/16B) */
-struct hwrm_func_ptp_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_ptp_ts_query_input (size:192b/24B) */
-struct hwrm_func_ptp_ts_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PPS_TIME 0x1UL
- #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PTM_TIME 0x2UL
- u8 unused_0[4];
-};
-
-/* hwrm_func_ptp_ts_query_output (size:320b/40B) */
-struct hwrm_func_ptp_ts_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 pps_event_ts;
- __le64 ptm_local_ts;
- __le64 ptm_system_ts;
- __le32 ptm_link_delay;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_func_ptp_ext_cfg_input (size:256b/32B) */
-struct hwrm_func_ptp_ext_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 enables;
- #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_MASTER_FID 0x1UL
- #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_FID 0x2UL
- #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_MODE 0x4UL
- #define FUNC_PTP_EXT_CFG_REQ_ENABLES_FAILOVER_TIMER 0x8UL
- __le16 phc_master_fid;
- __le16 phc_sec_fid;
- u8 phc_sec_mode;
- #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_SWITCH 0x0UL
- #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_ALL 0x1UL
- #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY 0x2UL
- #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_LAST FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY
- u8 unused_0;
- __le32 failover_timer;
- u8 unused_1[4];
-};
-
-/* hwrm_func_ptp_ext_cfg_output (size:128b/16B) */
-struct hwrm_func_ptp_ext_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_ptp_ext_qcfg_input (size:192b/24B) */
-struct hwrm_func_ptp_ext_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 unused_0[8];
-};
-
-/* hwrm_func_ptp_ext_qcfg_output (size:256b/32B) */
-struct hwrm_func_ptp_ext_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 phc_master_fid;
- __le16 phc_sec_fid;
- __le16 phc_active_fid0;
- __le16 phc_active_fid1;
- __le32 last_failover_event;
- __le16 from_fid;
- __le16 to_fid;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_backing_store_cfg_v2_input (size:448b/56B) */
-struct hwrm_func_backing_store_cfg_v2_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 type;
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TX_CK 0x13UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RX_CK 0x14UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA0_TRACE 0x26UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA1_TRACE 0x27UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA2_TRACE 0x28UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
- __le16 instance;
- __le32 flags;
- #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE 0x2UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_EXTEND 0x4UL
- __le64 page_dir;
- __le32 num_entries;
- __le16 entry_size;
- u8 page_size_pbl_level;
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_SFT 0
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G
- u8 subtype_valid_cnt;
- __le32 split_entry_0;
- __le32 split_entry_1;
- __le32 split_entry_2;
- __le32 split_entry_3;
-};
-
-/* hwrm_func_backing_store_cfg_v2_output (size:128b/16B) */
-struct hwrm_func_backing_store_cfg_v2_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 rsvd0[7];
- u8 valid;
-};
-
-/* hwrm_func_backing_store_qcfg_v2_input (size:192b/24B) */
-struct hwrm_func_backing_store_qcfg_v2_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 type;
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TX_CK 0x13UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RX_CK 0x14UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_XID_PARTITION_TABLE 0x1dUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA0_TRACE 0x26UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA1_TRACE 0x27UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA2_TRACE 0x28UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
- __le16 instance;
- u8 rsvd[4];
-};
-
-/* hwrm_func_backing_store_qcfg_v2_output (size:448b/56B) */
-struct hwrm_func_backing_store_qcfg_v2_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 type;
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TX_CK 0x13UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RX_CK 0x14UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT_TRACE 0x1eUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT_TRACE 0x20UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT2_TRACE 0x21UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TTX_PACING_TQM_RING 0x25UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA0_TRACE 0x26UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA1_TRACE 0x27UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA2_TRACE 0x28UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP1_TRACE 0x29UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
- __le16 instance;
- __le32 flags;
- __le64 page_dir;
- __le32 num_entries;
- u8 page_size_pbl_level;
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_MASK 0xfUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_SFT 0
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_SFT 4
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G
- u8 subtype_valid_cnt;
- u8 rsvd[2];
- __le32 split_entry_0;
- __le32 split_entry_1;
- __le32 split_entry_2;
- __le32 split_entry_3;
- u8 rsvd2[7];
- u8 valid;
-};
-
-/* qpc_split_entries (size:128b/16B) */
-struct qpc_split_entries {
- __le32 qp_num_l2_entries;
- __le32 qp_num_qp1_entries;
- __le32 qp_num_fast_qpmd_entries;
- __le32 rsvd;
-};
-
-/* srq_split_entries (size:128b/16B) */
-struct srq_split_entries {
- __le32 srq_num_l2_entries;
- __le32 rsvd;
- __le32 rsvd2[2];
-};
-
-/* cq_split_entries (size:128b/16B) */
-struct cq_split_entries {
- __le32 cq_num_l2_entries;
- __le32 rsvd;
- __le32 rsvd2[2];
-};
-
-/* vnic_split_entries (size:128b/16B) */
-struct vnic_split_entries {
- __le32 vnic_num_vnic_entries;
- __le32 rsvd;
- __le32 rsvd2[2];
-};
-
-/* mrav_split_entries (size:128b/16B) */
-struct mrav_split_entries {
- __le32 mrav_num_av_entries;
- __le32 rsvd;
- __le32 rsvd2[2];
-};
-
-/* ts_split_entries (size:128b/16B) */
-struct ts_split_entries {
- __le32 region_num_entries;
- u8 tsid;
- u8 lkup_static_bkt_cnt_exp[2];
- u8 rsvd;
- __le32 rsvd2[2];
-};
-
-/* ck_split_entries (size:128b/16B) */
-struct ck_split_entries {
- __le32 num_quic_entries;
- __le32 rsvd;
- __le32 rsvd2[2];
-};
-
-/* hwrm_func_backing_store_qcaps_v2_input (size:192b/24B) */
-struct hwrm_func_backing_store_qcaps_v2_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 type;
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK 0x13UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK 0x14UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE 0x1eUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE 0x20UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE 0x21UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA0_TRACE 0x26UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA1_TRACE 0x27UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA2_TRACE 0x28UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
- u8 rsvd[6];
-};
-
-/* hwrm_func_backing_store_qcaps_v2_output (size:448b/56B) */
-struct hwrm_func_backing_store_qcaps_v2_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 type;
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TX_CK 0x13UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RX_CK 0x14UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT_TRACE 0x1eUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT_TRACE 0x20UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT2_TRACE 0x21UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TTX_PACING_TQM_RING 0x25UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA0_TRACE 0x26UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA1_TRACE 0x27UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA2_TRACE 0x28UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP1_TRACE 0x29UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
- __le16 entry_size;
- __le32 flags;
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ROCE_QP_PSEUDO_STATIC_ALLOC 0x8UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_DBG_TRACE 0x10UL
- __le32 instance_bit_map;
- u8 ctx_init_value;
- u8 ctx_init_offset;
- u8 entry_multiple;
- u8 rsvd;
- __le32 max_num_entries;
- __le32 min_num_entries;
- __le16 next_valid_type;
- u8 subtype_valid_cnt;
- u8 exact_cnt_bit_map;
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_0_EXACT 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_1_EXACT 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_2_EXACT 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_3_EXACT 0x8UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_MASK 0xf0UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_SFT 4
- __le32 split_entry_0;
- __le32 split_entry_1;
- __le32 split_entry_2;
- __le32 split_entry_3;
- u8 rsvd3[3];
- u8 valid;
-};
-
-/* hwrm_func_dbr_pacing_qcfg_input (size:128b/16B) */
-struct hwrm_func_dbr_pacing_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_func_dbr_pacing_qcfg_output (size:512b/64B) */
-struct hwrm_func_dbr_pacing_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define FUNC_DBR_PACING_QCFG_RESP_FLAGS_DBR_NQ_EVENT_ENABLED 0x1UL
- u8 unused_0[7];
- __le32 dbr_stat_db_fifo_reg;
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK 0x3UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_SFT 0
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC 0x1UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR0 0x2UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1 0x3UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_LAST FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_MASK 0xfffffffcUL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SFT 2
- __le32 dbr_stat_db_fifo_reg_watermark_mask;
- u8 dbr_stat_db_fifo_reg_watermark_shift;
- u8 unused_1[3];
- __le32 dbr_stat_db_fifo_reg_fifo_room_mask;
- u8 dbr_stat_db_fifo_reg_fifo_room_shift;
- u8 unused_2[3];
- __le32 dbr_throttling_aeq_arm_reg;
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_MASK 0x3UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_SFT 0
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_GRC 0x1UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR0 0x2UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1 0x3UL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_LAST FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_MASK 0xfffffffcUL
- #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SFT 2
- u8 dbr_throttling_aeq_arm_reg_val;
- u8 unused_3[3];
- __le32 dbr_stat_db_max_fifo_depth;
- __le32 primary_nq_id;
- __le32 pacing_threshold;
- u8 unused_4[7];
- u8 valid;
-};
-
-/* hwrm_func_drv_if_change_input (size:192b/24B) */
-struct hwrm_func_drv_if_change_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP 0x1UL
- __le32 unused;
-};
-
-/* hwrm_func_drv_if_change_output (size:128b/16B) */
-struct hwrm_func_drv_if_change_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL
- #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE 0x2UL
- #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE 0x4UL
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_port_phy_cfg_input (size:512b/64B) */
-struct hwrm_port_phy_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
- #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
- #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE 0x8000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE 0x10000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_ENABLE 0x20000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_DISABLE 0x40000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_ENABLE 0x80000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_DISABLE 0x100000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_ENABLE 0x200000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_DISABLE 0x400000UL
- __le32 enables;
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
- #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
- #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
- #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
- #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
- #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
- #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
- #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED 0x800UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK 0x1000UL
- #define PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2 0x2000UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK 0x4000UL
- __le16 port_id;
- __le16 force_link_speed;
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB
- u8 auto_mode;
- #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_LAST PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK
- u8 auto_duplex;
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_LAST PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH
- u8 auto_pause;
- #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
- u8 mgmt_flag;
- #define PORT_PHY_CFG_REQ_MGMT_FLAG_LINK_RELEASE 0x1UL
- #define PORT_PHY_CFG_REQ_MGMT_FLAG_MGMT_VALID 0x80UL
- __le16 auto_link_speed;
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB
- __le16 auto_link_speed_mask;
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
- u8 wirespeed;
- #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
- #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
- #define PORT_PHY_CFG_REQ_WIRESPEED_LAST PORT_PHY_CFG_REQ_WIRESPEED_ON
- u8 lpbk;
- #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
- #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
- #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
- #define PORT_PHY_CFG_REQ_LPBK_EXTERNAL 0x3UL
- #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_EXTERNAL
- u8 force_pause;
- #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
- #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
- u8 unused_1;
- __le32 preemphasis;
- __le16 eee_link_speed_mask;
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
- __le16 force_pam4_link_speed;
- #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
- #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB
- __le32 tx_lpi_timer;
- #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
- #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
- __le16 auto_link_pam4_speed_mask;
- #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_50G 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_100G 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_200G 0x4UL
- __le16 force_link_speeds2;
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_1GB 0xaUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_10GB 0x64UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_25GB 0xfaUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_40GB 0x190UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB 0x1f4UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112
- __le16 auto_link_speeds2_mask;
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_1GB 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_10GB 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_25GB 0x4UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_40GB 0x8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB 0x10UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB 0x20UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB_PAM4_56 0x40UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_56 0x80UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_56 0x100UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_56 0x200UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_112 0x400UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_112 0x800UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_112 0x1000UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_800GB_PAM4_112 0x2000UL
- u8 unused_2[6];
-};
-
-/* hwrm_port_phy_cfg_output (size:128b/16B) */
-struct hwrm_port_phy_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_port_phy_cfg_cmd_err (size:64b/8B) */
-struct hwrm_port_phy_cfg_cmd_err {
- u8 code;
- #define PORT_PHY_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define PORT_PHY_CFG_CMD_ERR_CODE_ILLEGAL_SPEED 0x1UL
- #define PORT_PHY_CFG_CMD_ERR_CODE_RETRY 0x2UL
- #define PORT_PHY_CFG_CMD_ERR_CODE_LAST PORT_PHY_CFG_CMD_ERR_CODE_RETRY
- u8 unused_0[7];
-};
-
-/* hwrm_port_phy_qcfg_input (size:192b/24B) */
-struct hwrm_port_phy_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_port_phy_qcfg_output (size:832b/104B) */
-struct hwrm_port_phy_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 link;
- #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL
- #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK
- u8 active_fec_signal_mode;
- #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK 0xfUL
- #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_SFT 0
- #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ 0x0UL
- #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4 0x1UL
- #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112 0x2UL
- #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK 0xf0UL
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_SFT 4
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE (0x0UL << 4)
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE (0x1UL << 4)
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE (0x2UL << 4)
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE (0x3UL << 4)
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE (0x4UL << 4)
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE (0x5UL << 4)
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE (0x6UL << 4)
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_LAST PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE
- __le16 link_speed;
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_400GB 0xfa0UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_800GB 0x1f40UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
- u8 duplex_cfg;
- #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL
- #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL
- #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_LAST PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL
- u8 pause;
- #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
- __le16 support_speeds;
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
- __le16 force_link_speed;
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB
- u8 auto_mode;
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK
- u8 auto_pause;
- #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
- __le16 auto_link_speed;
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB
- __le16 auto_link_speed_mask;
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
- u8 wirespeed;
- #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
- #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
- #define PORT_PHY_QCFG_RESP_WIRESPEED_LAST PORT_PHY_QCFG_RESP_WIRESPEED_ON
- u8 lpbk;
- #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
- #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
- #define PORT_PHY_QCFG_RESP_LPBK_EXTERNAL 0x3UL
- #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_EXTERNAL
- u8 force_pause;
- #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
- u8 module_status;
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT 0x5UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_OVERHEATED 0x6UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE
- __le32 preemphasis;
- u8 phy_maj;
- u8 phy_min;
- u8 phy_bld;
- u8 phy_type;
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4 0x1cUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4 0x1dUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4 0x1eUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4 0x1fUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR 0x20UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR 0x21UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR 0x22UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER 0x23UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2 0x24UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2 0x25UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2 0x26UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2 0x27UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR 0x28UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR 0x29UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR 0x2aUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER 0x2bUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2 0x2cUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2 0x2dUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2 0x2eUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2 0x2fUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8 0x30UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8 0x31UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8 0x32UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8 0x33UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4 0x34UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4 0x35UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4 0x36UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4 0x37UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASECR8 0x38UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASESR8 0x39UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASELR8 0x3aUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEER8 0x3bUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEFR8 0x3cUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8 0x3dUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8
- u8 media_type;
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_LAST PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE
- u8 xcvr_pkg_type;
- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL
- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL
- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL
- u8 eee_config_phy_addr;
- #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
- #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL
- u8 parallel_detect;
- #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL
- __le16 link_partner_adv_speeds;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL
- u8 link_partner_adv_auto_mode;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW 0x3UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK
- u8 link_partner_adv_pause;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
- __le16 adv_eee_link_speed_mask;
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
- __le16 link_partner_adv_eee_link_speed_mask;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
- __le32 xcvr_identifier_type_tx_lpi_timer;
- #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL
- #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN (0x0UL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP (0x3UL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPDD (0x18UL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP112 (0x1eUL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFPDD (0x1fUL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP (0x20UL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP
- __le16 fec_cfg;
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_SUPPORTED 0x80UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED 0x100UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_SUPPORTED 0x200UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_ENABLED 0x400UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_SUPPORTED 0x800UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_ENABLED 0x1000UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_SUPPORTED 0x2000UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_ENABLED 0x4000UL
- u8 duplex_state;
- #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
- #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
- #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_LAST PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
- u8 option_flags;
- #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
- #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN 0x2UL
- #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SPEEDS2_SUPPORTED 0x4UL
- char phy_vendor_name[16];
- char phy_vendor_partnumber[16];
- __le16 support_pam4_speeds;
- #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G 0x1UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G 0x2UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G 0x4UL
- __le16 force_pam4_link_speed;
- #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
- #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB
- __le16 auto_pam4_link_speed_mask;
- #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_50G 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_100G 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_200G 0x4UL
- u8 link_partner_pam4_adv_speeds;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_50GB 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_100GB 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL
- u8 link_down_reason;
- #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF 0x1UL
- __le16 support_speeds2;
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_1GB 0x1UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_10GB 0x2UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_25GB 0x4UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_40GB 0x8UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB 0x10UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB 0x20UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB_PAM4_56 0x40UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_56 0x80UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_56 0x100UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_56 0x200UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_112 0x400UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_112 0x800UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_112 0x1000UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_800GB_PAM4_112 0x2000UL
- __le16 force_link_speeds2;
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_1GB 0xaUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_10GB 0x64UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_25GB 0xfaUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_40GB 0x190UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112
- __le16 auto_link_speeds2;
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_1GB 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_10GB 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_25GB 0x4UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_40GB 0x8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB 0x10UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB 0x20UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB_PAM4_56 0x40UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_56 0x80UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_56 0x100UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_56 0x200UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_112 0x400UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_112 0x800UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_112 0x1000UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_800GB_PAM4_112 0x2000UL
- u8 active_lanes;
- u8 valid;
-};
-
-/* hwrm_port_mac_cfg_input (size:448b/56B) */
-struct hwrm_port_mac_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
- #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL
- #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
- #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
- #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
- #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
- #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
- #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
- #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_ONE_STEP_TX_TS 0x2000UL
- #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE 0x4000UL
- #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE 0x8000UL
- __le32 enables;
- #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
- #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
- #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL
- #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
- #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
- #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
- #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
- #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
- #define PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB 0x200UL
- #define PORT_MAC_CFG_REQ_ENABLES_PTP_ADJ_PHASE 0x400UL
- #define PORT_MAC_CFG_REQ_ENABLES_PTP_LOAD_CONTROL 0x800UL
- __le16 port_id;
- u8 ipg;
- u8 lpbk;
- #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL
- #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL
- #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
- #define PORT_MAC_CFG_REQ_LPBK_LAST PORT_MAC_CFG_REQ_LPBK_REMOTE
- u8 vlan_pri2cos_map_pri;
- u8 reserved1;
- u8 tunnel_pri2cos_map_pri;
- u8 dscp2pri_map_pri;
- __le16 rx_ts_capture_ptp_msg_type;
- __le16 tx_ts_capture_ptp_msg_type;
- u8 cos_field_cfg;
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
- u8 unused_0[3];
- __le32 ptp_freq_adj_ppb;
- u8 unused_1[3];
- u8 ptp_load_control;
- #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_NONE 0x0UL
- #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_IMMEDIATE 0x1UL
- #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_PPS_EVENT 0x2UL
- #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_LAST PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_PPS_EVENT
- __le64 ptp_adj_phase;
-};
-
-/* hwrm_port_mac_cfg_output (size:128b/16B) */
-struct hwrm_port_mac_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 mru;
- __le16 mtu;
- u8 ipg;
- u8 lpbk;
- #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL
- #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL
- #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
- #define PORT_MAC_CFG_RESP_LPBK_LAST PORT_MAC_CFG_RESP_LPBK_REMOTE
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */
-struct hwrm_port_mac_ptp_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_port_mac_ptp_qcfg_output (size:704b/88B) */
-struct hwrm_port_mac_ptp_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK 0x10UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED 0x20UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME 0x40UL
- u8 unused_0[3];
- __le32 rx_ts_reg_off_lower;
- __le32 rx_ts_reg_off_upper;
- __le32 rx_ts_reg_off_seq_id;
- __le32 rx_ts_reg_off_src_id_0;
- __le32 rx_ts_reg_off_src_id_1;
- __le32 rx_ts_reg_off_src_id_2;
- __le32 rx_ts_reg_off_domain_id;
- __le32 rx_ts_reg_off_fifo;
- __le32 rx_ts_reg_off_fifo_adv;
- __le32 rx_ts_reg_off_granularity;
- __le32 tx_ts_reg_off_lower;
- __le32 tx_ts_reg_off_upper;
- __le32 tx_ts_reg_off_seq_id;
- __le32 tx_ts_reg_off_fifo;
- __le32 tx_ts_reg_off_granularity;
- __le32 ts_ref_clock_reg_lower;
- __le32 ts_ref_clock_reg_upper;
- u8 unused_1[7];
- u8 valid;
-};
-
-/* tx_port_stats (size:3264b/408B) */
-struct tx_port_stats {
- __le64 tx_64b_frames;
- __le64 tx_65b_127b_frames;
- __le64 tx_128b_255b_frames;
- __le64 tx_256b_511b_frames;
- __le64 tx_512b_1023b_frames;
- __le64 tx_1024b_1518b_frames;
- __le64 tx_good_vlan_frames;
- __le64 tx_1519b_2047b_frames;
- __le64 tx_2048b_4095b_frames;
- __le64 tx_4096b_9216b_frames;
- __le64 tx_9217b_16383b_frames;
- __le64 tx_good_frames;
- __le64 tx_total_frames;
- __le64 tx_ucast_frames;
- __le64 tx_mcast_frames;
- __le64 tx_bcast_frames;
- __le64 tx_pause_frames;
- __le64 tx_pfc_frames;
- __le64 tx_jabber_frames;
- __le64 tx_fcs_err_frames;
- __le64 tx_control_frames;
- __le64 tx_oversz_frames;
- __le64 tx_single_dfrl_frames;
- __le64 tx_multi_dfrl_frames;
- __le64 tx_single_coll_frames;
- __le64 tx_multi_coll_frames;
- __le64 tx_late_coll_frames;
- __le64 tx_excessive_coll_frames;
- __le64 tx_frag_frames;
- __le64 tx_err;
- __le64 tx_tagged_frames;
- __le64 tx_dbl_tagged_frames;
- __le64 tx_runt_frames;
- __le64 tx_fifo_underruns;
- __le64 tx_pfc_ena_frames_pri0;
- __le64 tx_pfc_ena_frames_pri1;
- __le64 tx_pfc_ena_frames_pri2;
- __le64 tx_pfc_ena_frames_pri3;
- __le64 tx_pfc_ena_frames_pri4;
- __le64 tx_pfc_ena_frames_pri5;
- __le64 tx_pfc_ena_frames_pri6;
- __le64 tx_pfc_ena_frames_pri7;
- __le64 tx_eee_lpi_events;
- __le64 tx_eee_lpi_duration;
- __le64 tx_llfc_logical_msgs;
- __le64 tx_hcfc_msgs;
- __le64 tx_total_collisions;
- __le64 tx_bytes;
- __le64 tx_xthol_frames;
- __le64 tx_stat_discard;
- __le64 tx_stat_error;
-};
-
-/* rx_port_stats (size:4224b/528B) */
-struct rx_port_stats {
- __le64 rx_64b_frames;
- __le64 rx_65b_127b_frames;
- __le64 rx_128b_255b_frames;
- __le64 rx_256b_511b_frames;
- __le64 rx_512b_1023b_frames;
- __le64 rx_1024b_1518b_frames;
- __le64 rx_good_vlan_frames;
- __le64 rx_1519b_2047b_frames;
- __le64 rx_2048b_4095b_frames;
- __le64 rx_4096b_9216b_frames;
- __le64 rx_9217b_16383b_frames;
- __le64 rx_total_frames;
- __le64 rx_ucast_frames;
- __le64 rx_mcast_frames;
- __le64 rx_bcast_frames;
- __le64 rx_fcs_err_frames;
- __le64 rx_ctrl_frames;
- __le64 rx_pause_frames;
- __le64 rx_pfc_frames;
- __le64 rx_unsupported_opcode_frames;
- __le64 rx_unsupported_da_pausepfc_frames;
- __le64 rx_wrong_sa_frames;
- __le64 rx_align_err_frames;
- __le64 rx_oor_len_frames;
- __le64 rx_code_err_frames;
- __le64 rx_false_carrier_frames;
- __le64 rx_ovrsz_frames;
- __le64 rx_jbr_frames;
- __le64 rx_mtu_err_frames;
- __le64 rx_match_crc_frames;
- __le64 rx_promiscuous_frames;
- __le64 rx_tagged_frames;
- __le64 rx_double_tagged_frames;
- __le64 rx_trunc_frames;
- __le64 rx_good_frames;
- __le64 rx_pfc_xon2xoff_frames_pri0;
- __le64 rx_pfc_xon2xoff_frames_pri1;
- __le64 rx_pfc_xon2xoff_frames_pri2;
- __le64 rx_pfc_xon2xoff_frames_pri3;
- __le64 rx_pfc_xon2xoff_frames_pri4;
- __le64 rx_pfc_xon2xoff_frames_pri5;
- __le64 rx_pfc_xon2xoff_frames_pri6;
- __le64 rx_pfc_xon2xoff_frames_pri7;
- __le64 rx_pfc_ena_frames_pri0;
- __le64 rx_pfc_ena_frames_pri1;
- __le64 rx_pfc_ena_frames_pri2;
- __le64 rx_pfc_ena_frames_pri3;
- __le64 rx_pfc_ena_frames_pri4;
- __le64 rx_pfc_ena_frames_pri5;
- __le64 rx_pfc_ena_frames_pri6;
- __le64 rx_pfc_ena_frames_pri7;
- __le64 rx_sch_crc_err_frames;
- __le64 rx_undrsz_frames;
- __le64 rx_frag_frames;
- __le64 rx_eee_lpi_events;
- __le64 rx_eee_lpi_duration;
- __le64 rx_llfc_physical_msgs;
- __le64 rx_llfc_logical_msgs;
- __le64 rx_llfc_msgs_with_crc_err;
- __le64 rx_hcfc_msgs;
- __le64 rx_hcfc_msgs_with_crc_err;
- __le64 rx_bytes;
- __le64 rx_runt_bytes;
- __le64 rx_runt_frames;
- __le64 rx_stat_discard;
- __le64 rx_stat_err;
-};
-
-/* hwrm_port_qstats_input (size:320b/40B) */
-struct hwrm_port_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 flags;
- #define PORT_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
- u8 unused_0[5];
- __le64 tx_stat_host_addr;
- __le64 rx_stat_host_addr;
-};
-
-/* hwrm_port_qstats_output (size:128b/16B) */
-struct hwrm_port_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 tx_stat_size;
- __le16 rx_stat_size;
- u8 flags;
- #define PORT_QSTATS_RESP_FLAGS_CLEARED 0x1UL
- u8 unused_0[2];
- u8 valid;
-};
-
-/* tx_port_stats_ext (size:2048b/256B) */
-struct tx_port_stats_ext {
- __le64 tx_bytes_cos0;
- __le64 tx_bytes_cos1;
- __le64 tx_bytes_cos2;
- __le64 tx_bytes_cos3;
- __le64 tx_bytes_cos4;
- __le64 tx_bytes_cos5;
- __le64 tx_bytes_cos6;
- __le64 tx_bytes_cos7;
- __le64 tx_packets_cos0;
- __le64 tx_packets_cos1;
- __le64 tx_packets_cos2;
- __le64 tx_packets_cos3;
- __le64 tx_packets_cos4;
- __le64 tx_packets_cos5;
- __le64 tx_packets_cos6;
- __le64 tx_packets_cos7;
- __le64 pfc_pri0_tx_duration_us;
- __le64 pfc_pri0_tx_transitions;
- __le64 pfc_pri1_tx_duration_us;
- __le64 pfc_pri1_tx_transitions;
- __le64 pfc_pri2_tx_duration_us;
- __le64 pfc_pri2_tx_transitions;
- __le64 pfc_pri3_tx_duration_us;
- __le64 pfc_pri3_tx_transitions;
- __le64 pfc_pri4_tx_duration_us;
- __le64 pfc_pri4_tx_transitions;
- __le64 pfc_pri5_tx_duration_us;
- __le64 pfc_pri5_tx_transitions;
- __le64 pfc_pri6_tx_duration_us;
- __le64 pfc_pri6_tx_transitions;
- __le64 pfc_pri7_tx_duration_us;
- __le64 pfc_pri7_tx_transitions;
-};
-
-/* rx_port_stats_ext (size:3904b/488B) */
-struct rx_port_stats_ext {
- __le64 link_down_events;
- __le64 continuous_pause_events;
- __le64 resume_pause_events;
- __le64 continuous_roce_pause_events;
- __le64 resume_roce_pause_events;
- __le64 rx_bytes_cos0;
- __le64 rx_bytes_cos1;
- __le64 rx_bytes_cos2;
- __le64 rx_bytes_cos3;
- __le64 rx_bytes_cos4;
- __le64 rx_bytes_cos5;
- __le64 rx_bytes_cos6;
- __le64 rx_bytes_cos7;
- __le64 rx_packets_cos0;
- __le64 rx_packets_cos1;
- __le64 rx_packets_cos2;
- __le64 rx_packets_cos3;
- __le64 rx_packets_cos4;
- __le64 rx_packets_cos5;
- __le64 rx_packets_cos6;
- __le64 rx_packets_cos7;
- __le64 pfc_pri0_rx_duration_us;
- __le64 pfc_pri0_rx_transitions;
- __le64 pfc_pri1_rx_duration_us;
- __le64 pfc_pri1_rx_transitions;
- __le64 pfc_pri2_rx_duration_us;
- __le64 pfc_pri2_rx_transitions;
- __le64 pfc_pri3_rx_duration_us;
- __le64 pfc_pri3_rx_transitions;
- __le64 pfc_pri4_rx_duration_us;
- __le64 pfc_pri4_rx_transitions;
- __le64 pfc_pri5_rx_duration_us;
- __le64 pfc_pri5_rx_transitions;
- __le64 pfc_pri6_rx_duration_us;
- __le64 pfc_pri6_rx_transitions;
- __le64 pfc_pri7_rx_duration_us;
- __le64 pfc_pri7_rx_transitions;
- __le64 rx_bits;
- __le64 rx_buffer_passed_threshold;
- __le64 rx_pcs_symbol_err;
- __le64 rx_corrected_bits;
- __le64 rx_discard_bytes_cos0;
- __le64 rx_discard_bytes_cos1;
- __le64 rx_discard_bytes_cos2;
- __le64 rx_discard_bytes_cos3;
- __le64 rx_discard_bytes_cos4;
- __le64 rx_discard_bytes_cos5;
- __le64 rx_discard_bytes_cos6;
- __le64 rx_discard_bytes_cos7;
- __le64 rx_discard_packets_cos0;
- __le64 rx_discard_packets_cos1;
- __le64 rx_discard_packets_cos2;
- __le64 rx_discard_packets_cos3;
- __le64 rx_discard_packets_cos4;
- __le64 rx_discard_packets_cos5;
- __le64 rx_discard_packets_cos6;
- __le64 rx_discard_packets_cos7;
- __le64 rx_fec_corrected_blocks;
- __le64 rx_fec_uncorrectable_blocks;
- __le64 rx_filter_miss;
- __le64 rx_fec_symbol_err;
-};
-
-/* hwrm_port_qstats_ext_input (size:320b/40B) */
-struct hwrm_port_qstats_ext_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 tx_stat_size;
- __le16 rx_stat_size;
- u8 flags;
- #define PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x1UL
- u8 unused_0;
- __le64 tx_stat_host_addr;
- __le64 rx_stat_host_addr;
-};
-
-/* hwrm_port_qstats_ext_output (size:128b/16B) */
-struct hwrm_port_qstats_ext_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 tx_stat_size;
- __le16 rx_stat_size;
- __le16 total_active_cos_queues;
- u8 flags;
- #define PORT_QSTATS_EXT_RESP_FLAGS_CLEAR_ROCE_COUNTERS_SUPPORTED 0x1UL
- #define PORT_QSTATS_EXT_RESP_FLAGS_CLEARED 0x2UL
- u8 valid;
-};
-
-/* hwrm_port_lpbk_qstats_input (size:256b/32B) */
-struct hwrm_port_lpbk_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 lpbk_stat_size;
- u8 flags;
- #define PORT_LPBK_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
- u8 unused_0[5];
- __le64 lpbk_stat_host_addr;
-};
-
-/* hwrm_port_lpbk_qstats_output (size:128b/16B) */
-struct hwrm_port_lpbk_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 lpbk_stat_size;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* port_lpbk_stats (size:640b/80B) */
-struct port_lpbk_stats {
- __le64 lpbk_ucast_frames;
- __le64 lpbk_mcast_frames;
- __le64 lpbk_bcast_frames;
- __le64 lpbk_ucast_bytes;
- __le64 lpbk_mcast_bytes;
- __le64 lpbk_bcast_bytes;
- __le64 lpbk_tx_discards;
- __le64 lpbk_tx_errors;
- __le64 lpbk_rx_discards;
- __le64 lpbk_rx_errors;
-};
-
-/* hwrm_port_ecn_qstats_input (size:256b/32B) */
-struct hwrm_port_ecn_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 ecn_stat_buf_size;
- u8 flags;
- #define PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
- u8 unused_0[3];
- __le64 ecn_stat_host_addr;
-};
-
-/* hwrm_port_ecn_qstats_output (size:128b/16B) */
-struct hwrm_port_ecn_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 ecn_stat_buf_size;
- u8 mark_en;
- u8 unused_0[4];
- u8 valid;
-};
-
-/* port_stats_ecn (size:512b/64B) */
-struct port_stats_ecn {
- __le64 mark_cnt_cos0;
- __le64 mark_cnt_cos1;
- __le64 mark_cnt_cos2;
- __le64 mark_cnt_cos3;
- __le64 mark_cnt_cos4;
- __le64 mark_cnt_cos5;
- __le64 mark_cnt_cos6;
- __le64 mark_cnt_cos7;
-};
-
-/* hwrm_port_clr_stats_input (size:192b/24B) */
-struct hwrm_port_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 flags;
- #define PORT_CLR_STATS_REQ_FLAGS_ROCE_COUNTERS 0x1UL
- u8 unused_0[5];
-};
-
-/* hwrm_port_clr_stats_output (size:128b/16B) */
-struct hwrm_port_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_port_lpbk_clr_stats_input (size:192b/24B) */
-struct hwrm_port_lpbk_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
-struct hwrm_port_lpbk_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_port_ts_query_input (size:320b/40B) */
-struct hwrm_port_ts_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define PORT_TS_QUERY_REQ_FLAGS_PATH 0x1UL
- #define PORT_TS_QUERY_REQ_FLAGS_PATH_TX 0x0UL
- #define PORT_TS_QUERY_REQ_FLAGS_PATH_RX 0x1UL
- #define PORT_TS_QUERY_REQ_FLAGS_PATH_LAST PORT_TS_QUERY_REQ_FLAGS_PATH_RX
- #define PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME 0x2UL
- __le16 port_id;
- u8 unused_0[2];
- __le16 enables;
- #define PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT 0x1UL
- #define PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID 0x2UL
- #define PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET 0x4UL
- __le16 ts_req_timeout;
- __le32 ptp_seq_id;
- __le16 ptp_hdr_offset;
- u8 unused_1[6];
-};
-
-/* hwrm_port_ts_query_output (size:192b/24B) */
-struct hwrm_port_ts_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 ptp_msg_ts;
- __le16 ptp_msg_seqid;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_port_phy_qcaps_input (size:192b/24B) */
-struct hwrm_port_phy_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_port_phy_qcaps_output (size:320b/40B) */
-struct hwrm_port_phy_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET 0x10UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x20UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN 0x40UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS 0x80UL
- u8 port_cnt;
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_12 0xcUL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_12
- __le16 supported_speeds_force_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
- __le16 supported_speeds_auto_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
- __le16 supported_speeds_eee_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL
- __le32 tx_lpi_timer_low;
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0
- #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL
- #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24
- __le32 valid_tx_lpi_timer_high;
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
- #define PORT_PHY_QCAPS_RESP_RSVD_MASK 0xff000000UL
- #define PORT_PHY_QCAPS_RESP_RSVD_SFT 24
- __le16 supported_pam4_speeds_auto_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_50G 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_100G 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_200G 0x4UL
- __le16 supported_pam4_speeds_force_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_50G 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL
- __le16 flags2;
- #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
- #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
- #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL
- #define PORT_PHY_QCAPS_RESP_FLAGS2_REMOTE_LPBK_UNSUPPORTED 0x10UL
- u8 internal_port_cnt;
- u8 unused_0;
- __le16 supported_speeds2_force_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_1GB 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_10GB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_25GB 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_40GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB_PAM4_56 0x40UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_56 0x80UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_56 0x100UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_56 0x200UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_112 0x400UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_112 0x800UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_112 0x1000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_800GB_PAM4_112 0x2000UL
- __le16 supported_speeds2_auto_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_1GB 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_10GB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_25GB 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_40GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB_PAM4_56 0x40UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_56 0x80UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_56 0x100UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_56 0x200UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_112 0x400UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_112 0x800UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_112 0x1000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_800GB_PAM4_112 0x2000UL
- u8 unused_1[3];
- u8 valid;
-};
-
-/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
-struct hwrm_port_phy_i2c_read_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- __le32 enables;
- #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL
- #define PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER 0x2UL
- __le16 port_id;
- u8 i2c_slave_addr;
- u8 bank_number;
- __le16 page_number;
- __le16 page_offset;
- u8 data_length;
- u8 unused_1[7];
-};
-
-/* hwrm_port_phy_i2c_read_output (size:640b/80B) */
-struct hwrm_port_phy_i2c_read_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 data[16];
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_port_phy_mdio_write_input (size:320b/40B) */
-struct hwrm_port_phy_mdio_write_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 unused_0[2];
- __le16 port_id;
- u8 phy_addr;
- u8 dev_addr;
- __le16 reg_addr;
- __le16 reg_data;
- u8 cl45_mdio;
- u8 unused_1[7];
-};
-
-/* hwrm_port_phy_mdio_write_output (size:128b/16B) */
-struct hwrm_port_phy_mdio_write_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_port_phy_mdio_read_input (size:256b/32B) */
-struct hwrm_port_phy_mdio_read_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 unused_0[2];
- __le16 port_id;
- u8 phy_addr;
- u8 dev_addr;
- __le16 reg_addr;
- u8 cl45_mdio;
- u8 unused_1;
-};
-
-/* hwrm_port_phy_mdio_read_output (size:128b/16B) */
-struct hwrm_port_phy_mdio_read_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 reg_data;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_port_led_cfg_input (size:512b/64B) */
-struct hwrm_port_led_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL
- __le16 port_id;
- u8 num_leds;
- u8 rsvd;
- u8 led0_id;
- u8 led0_state;
- #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL
- #define PORT_LED_CFG_REQ_LED0_STATE_LAST PORT_LED_CFG_REQ_LED0_STATE_BLINKALT
- u8 led0_color;
- #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_CFG_REQ_LED0_COLOR_LAST PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER
- u8 unused_0;
- __le16 led0_blink_on;
- __le16 led0_blink_off;
- u8 led0_group_id;
- u8 rsvd0;
- u8 led1_id;
- u8 led1_state;
- #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL
- #define PORT_LED_CFG_REQ_LED1_STATE_LAST PORT_LED_CFG_REQ_LED1_STATE_BLINKALT
- u8 led1_color;
- #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_CFG_REQ_LED1_COLOR_LAST PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER
- u8 unused_1;
- __le16 led1_blink_on;
- __le16 led1_blink_off;
- u8 led1_group_id;
- u8 rsvd1;
- u8 led2_id;
- u8 led2_state;
- #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL
- #define PORT_LED_CFG_REQ_LED2_STATE_LAST PORT_LED_CFG_REQ_LED2_STATE_BLINKALT
- u8 led2_color;
- #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_CFG_REQ_LED2_COLOR_LAST PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER
- u8 unused_2;
- __le16 led2_blink_on;
- __le16 led2_blink_off;
- u8 led2_group_id;
- u8 rsvd2;
- u8 led3_id;
- u8 led3_state;
- #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL
- #define PORT_LED_CFG_REQ_LED3_STATE_LAST PORT_LED_CFG_REQ_LED3_STATE_BLINKALT
- u8 led3_color;
- #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_CFG_REQ_LED3_COLOR_LAST PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER
- u8 unused_3;
- __le16 led3_blink_on;
- __le16 led3_blink_off;
- u8 led3_group_id;
- u8 rsvd3;
-};
-
-/* hwrm_port_led_cfg_output (size:128b/16B) */
-struct hwrm_port_led_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_port_led_qcfg_input (size:192b/24B) */
-struct hwrm_port_led_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_port_led_qcfg_output (size:448b/56B) */
-struct hwrm_port_led_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_leds;
- u8 led0_id;
- u8 led0_type;
- #define PORT_LED_QCFG_RESP_LED0_TYPE_SPEED 0x0UL
- #define PORT_LED_QCFG_RESP_LED0_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCFG_RESP_LED0_TYPE_INVALID 0xffUL
- #define PORT_LED_QCFG_RESP_LED0_TYPE_LAST PORT_LED_QCFG_RESP_LED0_TYPE_INVALID
- u8 led0_state;
- #define PORT_LED_QCFG_RESP_LED0_STATE_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED0_STATE_OFF 0x1UL
- #define PORT_LED_QCFG_RESP_LED0_STATE_ON 0x2UL
- #define PORT_LED_QCFG_RESP_LED0_STATE_BLINK 0x3UL
- #define PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT 0x4UL
- #define PORT_LED_QCFG_RESP_LED0_STATE_LAST PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT
- u8 led0_color;
- #define PORT_LED_QCFG_RESP_LED0_COLOR_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED0_COLOR_AMBER 0x1UL
- #define PORT_LED_QCFG_RESP_LED0_COLOR_GREEN 0x2UL
- #define PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_QCFG_RESP_LED0_COLOR_LAST PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER
- u8 unused_0;
- __le16 led0_blink_on;
- __le16 led0_blink_off;
- u8 led0_group_id;
- u8 led1_id;
- u8 led1_type;
- #define PORT_LED_QCFG_RESP_LED1_TYPE_SPEED 0x0UL
- #define PORT_LED_QCFG_RESP_LED1_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCFG_RESP_LED1_TYPE_INVALID 0xffUL
- #define PORT_LED_QCFG_RESP_LED1_TYPE_LAST PORT_LED_QCFG_RESP_LED1_TYPE_INVALID
- u8 led1_state;
- #define PORT_LED_QCFG_RESP_LED1_STATE_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED1_STATE_OFF 0x1UL
- #define PORT_LED_QCFG_RESP_LED1_STATE_ON 0x2UL
- #define PORT_LED_QCFG_RESP_LED1_STATE_BLINK 0x3UL
- #define PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT 0x4UL
- #define PORT_LED_QCFG_RESP_LED1_STATE_LAST PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT
- u8 led1_color;
- #define PORT_LED_QCFG_RESP_LED1_COLOR_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED1_COLOR_AMBER 0x1UL
- #define PORT_LED_QCFG_RESP_LED1_COLOR_GREEN 0x2UL
- #define PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_QCFG_RESP_LED1_COLOR_LAST PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER
- u8 unused_1;
- __le16 led1_blink_on;
- __le16 led1_blink_off;
- u8 led1_group_id;
- u8 led2_id;
- u8 led2_type;
- #define PORT_LED_QCFG_RESP_LED2_TYPE_SPEED 0x0UL
- #define PORT_LED_QCFG_RESP_LED2_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCFG_RESP_LED2_TYPE_INVALID 0xffUL
- #define PORT_LED_QCFG_RESP_LED2_TYPE_LAST PORT_LED_QCFG_RESP_LED2_TYPE_INVALID
- u8 led2_state;
- #define PORT_LED_QCFG_RESP_LED2_STATE_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED2_STATE_OFF 0x1UL
- #define PORT_LED_QCFG_RESP_LED2_STATE_ON 0x2UL
- #define PORT_LED_QCFG_RESP_LED2_STATE_BLINK 0x3UL
- #define PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT 0x4UL
- #define PORT_LED_QCFG_RESP_LED2_STATE_LAST PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT
- u8 led2_color;
- #define PORT_LED_QCFG_RESP_LED2_COLOR_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED2_COLOR_AMBER 0x1UL
- #define PORT_LED_QCFG_RESP_LED2_COLOR_GREEN 0x2UL
- #define PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_QCFG_RESP_LED2_COLOR_LAST PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER
- u8 unused_2;
- __le16 led2_blink_on;
- __le16 led2_blink_off;
- u8 led2_group_id;
- u8 led3_id;
- u8 led3_type;
- #define PORT_LED_QCFG_RESP_LED3_TYPE_SPEED 0x0UL
- #define PORT_LED_QCFG_RESP_LED3_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCFG_RESP_LED3_TYPE_INVALID 0xffUL
- #define PORT_LED_QCFG_RESP_LED3_TYPE_LAST PORT_LED_QCFG_RESP_LED3_TYPE_INVALID
- u8 led3_state;
- #define PORT_LED_QCFG_RESP_LED3_STATE_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED3_STATE_OFF 0x1UL
- #define PORT_LED_QCFG_RESP_LED3_STATE_ON 0x2UL
- #define PORT_LED_QCFG_RESP_LED3_STATE_BLINK 0x3UL
- #define PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT 0x4UL
- #define PORT_LED_QCFG_RESP_LED3_STATE_LAST PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT
- u8 led3_color;
- #define PORT_LED_QCFG_RESP_LED3_COLOR_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED3_COLOR_AMBER 0x1UL
- #define PORT_LED_QCFG_RESP_LED3_COLOR_GREEN 0x2UL
- #define PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_QCFG_RESP_LED3_COLOR_LAST PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER
- u8 unused_3;
- __le16 led3_blink_on;
- __le16 led3_blink_off;
- u8 led3_group_id;
- u8 unused_4[6];
- u8 valid;
-};
-
-/* hwrm_port_led_qcaps_input (size:192b/24B) */
-struct hwrm_port_led_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_port_led_qcaps_output (size:384b/48B) */
-struct hwrm_port_led_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_leds;
- u8 unused[3];
- u8 led0_id;
- u8 led0_type;
- #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL
- #define PORT_LED_QCAPS_RESP_LED0_TYPE_LAST PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID
- u8 led0_group_id;
- u8 unused_0;
- __le16 led0_state_caps;
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led0_color_caps;
- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 led1_id;
- u8 led1_type;
- #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL
- #define PORT_LED_QCAPS_RESP_LED1_TYPE_LAST PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID
- u8 led1_group_id;
- u8 unused_1;
- __le16 led1_state_caps;
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led1_color_caps;
- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 led2_id;
- u8 led2_type;
- #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL
- #define PORT_LED_QCAPS_RESP_LED2_TYPE_LAST PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID
- u8 led2_group_id;
- u8 unused_2;
- __le16 led2_state_caps;
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led2_color_caps;
- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 led3_id;
- u8 led3_type;
- #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL
- #define PORT_LED_QCAPS_RESP_LED3_TYPE_LAST PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID
- u8 led3_group_id;
- u8 unused_3;
- __le16 led3_state_caps;
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led3_color_caps;
- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 unused_4[3];
- u8 valid;
-};
-
-/* hwrm_port_mac_qcaps_input (size:192b/24B) */
-struct hwrm_port_mac_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_port_mac_qcaps_output (size:128b/16B) */
-struct hwrm_port_mac_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define PORT_MAC_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x1UL
- #define PORT_MAC_QCAPS_RESP_FLAGS_REMOTE_LPBK_SUPPORTED 0x2UL
- u8 unused_0[6];
- u8 valid;
-};
-
-/* hwrm_queue_qportcfg_input (size:192b/24B) */
-struct hwrm_queue_qportcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
- __le16 port_id;
- u8 drv_qmap_cap;
- #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_DISABLED 0x0UL
- #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED 0x1UL
- #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_LAST QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED
- u8 unused_0;
-};
-
-/* hwrm_queue_qportcfg_output (size:1344b/168B) */
-struct hwrm_queue_qportcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 max_configurable_queues;
- u8 max_configurable_lossless_queues;
- u8 queue_cfg_allowed;
- u8 queue_cfg_info;
- #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_USE_PROFILE_TYPE 0x2UL
- u8 queue_pfcenable_cfg_allowed;
- u8 queue_pri2cos_cfg_allowed;
- u8 queue_cos2bw_cfg_allowed;
- u8 queue_id0;
- u8 queue_id0_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN
- u8 queue_id1;
- u8 queue_id1_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN
- u8 queue_id2;
- u8 queue_id2_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN
- u8 queue_id3;
- u8 queue_id3_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN
- u8 queue_id4;
- u8 queue_id4_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN
- u8 queue_id5;
- u8 queue_id5_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN
- u8 queue_id6;
- u8 queue_id6_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN
- u8 queue_id7;
- u8 queue_id7_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
- u8 queue_id0_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_CNP 0x4UL
- char qid0_name[16];
- char qid1_name[16];
- char qid2_name[16];
- char qid3_name[16];
- char qid4_name[16];
- char qid5_name[16];
- char qid6_name[16];
- char qid7_name[16];
- u8 queue_id1_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_CNP 0x4UL
- u8 queue_id2_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_CNP 0x4UL
- u8 queue_id3_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_CNP 0x4UL
- u8 queue_id4_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_CNP 0x4UL
- u8 queue_id5_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_CNP 0x4UL
- u8 queue_id6_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_CNP 0x4UL
- u8 queue_id7_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_CNP 0x4UL
- u8 valid;
-};
-
-/* hwrm_queue_qcfg_input (size:192b/24B) */
-struct hwrm_queue_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_QCFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_QCFG_REQ_FLAGS_PATH_TX 0x0UL
- #define QUEUE_QCFG_REQ_FLAGS_PATH_RX 0x1UL
- #define QUEUE_QCFG_REQ_FLAGS_PATH_LAST QUEUE_QCFG_REQ_FLAGS_PATH_RX
- __le32 queue_id;
-};
-
-/* hwrm_queue_qcfg_output (size:128b/16B) */
-struct hwrm_queue_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 queue_len;
- u8 service_profile;
- #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LAST QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN
- u8 queue_cfg_info;
- #define QUEUE_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_queue_cfg_input (size:320b/40B) */
-struct hwrm_queue_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0
- #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
- __le32 enables;
- #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
- #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
- __le32 queue_id;
- __le32 dflt_len;
- u8 service_profile;
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LAST QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN
- u8 unused_0[7];
-};
-
-/* hwrm_queue_cfg_output (size:128b/16B) */
-struct hwrm_queue_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_queue_pfcenable_qcfg_input (size:192b/24B) */
-struct hwrm_queue_pfcenable_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */
-struct hwrm_queue_pfcenable_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_queue_pfcenable_cfg_input (size:192b/24B) */
-struct hwrm_queue_pfcenable_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
- __le16 port_id;
- u8 unused_0[2];
-};
-
-/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */
-struct hwrm_queue_pfcenable_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_queue_pri2cos_qcfg_input (size:192b/24B) */
-struct hwrm_queue_pri2cos_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX 0x0UL
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX 0x1UL
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN 0x2UL
- u8 port_id;
- u8 unused_0[3];
-};
-
-/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */
-struct hwrm_queue_pri2cos_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 pri0_cos_queue_id;
- u8 pri1_cos_queue_id;
- u8 pri2_cos_queue_id;
- u8 pri3_cos_queue_id;
- u8 pri4_cos_queue_id;
- u8 pri5_cos_queue_id;
- u8 pri6_cos_queue_id;
- u8 pri7_cos_queue_id;
- u8 queue_cfg_info;
- #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
- u8 unused_0[6];
- u8 valid;
-};
-
-/* hwrm_queue_pri2cos_cfg_input (size:320b/40B) */
-struct hwrm_queue_pri2cos_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX 0x0UL
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX 0x1UL
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL
- __le32 enables;
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL
- u8 port_id;
- u8 pri0_cos_queue_id;
- u8 pri1_cos_queue_id;
- u8 pri2_cos_queue_id;
- u8 pri3_cos_queue_id;
- u8 pri4_cos_queue_id;
- u8 pri5_cos_queue_id;
- u8 pri6_cos_queue_id;
- u8 pri7_cos_queue_id;
- u8 unused_0[7];
-};
-
-/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */
-struct hwrm_queue_pri2cos_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_queue_cos2bw_qcfg_input (size:192b/24B) */
-struct hwrm_queue_cos2bw_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */
-struct hwrm_queue_cos2bw_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 queue_id0;
- u8 unused_0;
- __le16 unused_1;
- __le32 queue_id0_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id0_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id0_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id0_pri_lvl;
- u8 queue_id0_bw_weight;
- struct {
- u8 queue_id;
- __le32 queue_id_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id_pri_lvl;
- u8 queue_id_bw_weight;
- } __packed cfg[7];
- u8 unused_2[4];
- u8 valid;
-};
-
-/* hwrm_queue_cos2bw_cfg_input (size:1024b/128B) */
-struct hwrm_queue_cos2bw_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- __le32 enables;
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL
- __le16 port_id;
- u8 queue_id0;
- u8 unused_0;
- __le32 queue_id0_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id0_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id0_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id0_pri_lvl;
- u8 queue_id0_bw_weight;
- struct {
- u8 queue_id;
- __le32 queue_id_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id_pri_lvl;
- u8 queue_id_bw_weight;
- } __packed cfg[7];
- u8 unused_1[5];
-};
-
-/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */
-struct hwrm_queue_cos2bw_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */
-struct hwrm_queue_dscp_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 port_id;
- u8 unused_0[7];
-};
-
-/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */
-struct hwrm_queue_dscp_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_dscp_bits;
- u8 unused_0;
- __le16 max_entries;
- u8 unused_1[3];
- u8 valid;
-};
-
-/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */
-struct hwrm_queue_dscp2pri_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 dest_data_addr;
- u8 port_id;
- u8 unused_0;
- __le16 dest_data_buffer_size;
- u8 unused_1[4];
-};
-
-/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */
-struct hwrm_queue_dscp2pri_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 entry_cnt;
- u8 default_pri;
- u8 unused_0[4];
- u8 valid;
-};
-
-/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */
-struct hwrm_queue_dscp2pri_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 src_data_addr;
- __le32 flags;
- #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL
- __le32 enables;
- #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL
- u8 port_id;
- u8 default_pri;
- __le16 entry_cnt;
- u8 unused_0[4];
-};
-
-/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */
-struct hwrm_queue_dscp2pri_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vnic_alloc_input (size:192b/24B) */
-struct hwrm_vnic_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
- #define VNIC_ALLOC_REQ_FLAGS_VIRTIO_NET_FID_VALID 0x2UL
- __le16 virtio_net_fid;
- u8 unused_0[2];
-};
-
-/* hwrm_vnic_alloc_output (size:128b/16B) */
-struct hwrm_vnic_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 vnic_id;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_vnic_update_input (size:256b/32B) */
-struct hwrm_vnic_update_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 vnic_id;
- __le32 enables;
- #define VNIC_UPDATE_REQ_ENABLES_VNIC_STATE_VALID 0x1UL
- #define VNIC_UPDATE_REQ_ENABLES_MRU_VALID 0x2UL
- #define VNIC_UPDATE_REQ_ENABLES_METADATA_FORMAT_TYPE_VALID 0x4UL
- u8 vnic_state;
- #define VNIC_UPDATE_REQ_VNIC_STATE_NORMAL 0x0UL
- #define VNIC_UPDATE_REQ_VNIC_STATE_DROP 0x1UL
- #define VNIC_UPDATE_REQ_VNIC_STATE_LAST VNIC_UPDATE_REQ_VNIC_STATE_DROP
- u8 metadata_format_type;
- #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_0 0x0UL
- #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_1 0x1UL
- #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_2 0x2UL
- #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_3 0x3UL
- #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4 0x4UL
- #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_LAST VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4
- __le16 mru;
- u8 unused_1[4];
-};
-
-/* hwrm_vnic_update_output (size:128b/16B) */
-struct hwrm_vnic_update_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vnic_free_input (size:192b/24B) */
-struct hwrm_vnic_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 vnic_id;
- u8 unused_0[4];
-};
-
-/* hwrm_vnic_free_output (size:128b/16B) */
-struct hwrm_vnic_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vnic_cfg_input (size:384b/48B) */
-struct hwrm_vnic_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
- #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
- #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
- #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
- #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
- #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
- #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
- #define VNIC_CFG_REQ_FLAGS_PORTCOS_MAPPING_MODE 0x80UL
- __le32 enables;
- #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
- #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
- #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
- #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
- #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
- #define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID 0x20UL
- #define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL
- #define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL
- #define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL
- #define VNIC_CFG_REQ_ENABLES_L2_CQE_MODE 0x200UL
- __le16 vnic_id;
- __le16 dflt_ring_grp;
- __le16 rss_rule;
- __le16 cos_rule;
- __le16 lb_rule;
- __le16 mru;
- __le16 default_rx_ring_id;
- __le16 default_cmpl_ring_id;
- __le16 queue_id;
- u8 rx_csum_v2_mode;
- #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_DEFAULT 0x0UL
- #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_ALL_OK 0x1UL
- #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX 0x2UL
- #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_LAST VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX
- u8 l2_cqe_mode;
- #define VNIC_CFG_REQ_L2_CQE_MODE_DEFAULT 0x0UL
- #define VNIC_CFG_REQ_L2_CQE_MODE_COMPRESSED 0x1UL
- #define VNIC_CFG_REQ_L2_CQE_MODE_MIXED 0x2UL
- #define VNIC_CFG_REQ_L2_CQE_MODE_LAST VNIC_CFG_REQ_L2_CQE_MODE_MIXED
- u8 unused0[4];
-};
-
-/* hwrm_vnic_cfg_output (size:128b/16B) */
-struct hwrm_vnic_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vnic_qcaps_input (size:192b/24B) */
-struct hwrm_vnic_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- u8 unused_0[4];
-};
-
-/* hwrm_vnic_qcaps_output (size:192b/24B) */
-struct hwrm_vnic_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 mru;
- u8 unused_0[2];
- __le32 flags;
- #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
- #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
- #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
- #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
- #define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL
- #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL
- #define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP 0x400UL
- #define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL
- #define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_STRICT_HASH_TYPE_CAP 0x2000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP 0x4000UL
- #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CAP 0x8000UL
- #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_XOR_CAP 0x10000UL
- #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CHKSM_CAP 0x20000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP 0x40000UL
- #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V3_CAP 0x80000UL
- #define VNIC_QCAPS_RESP_FLAGS_L2_CQE_MODE_CAP 0x100000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP 0x200000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP 0x400000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP 0x800000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP 0x1000000UL
- #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_TRUSTED_VF_CAP 0x2000000UL
- #define VNIC_QCAPS_RESP_FLAGS_PORTCOS_MAPPING_MODE 0x4000000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED 0x8000000UL
- #define VNIC_QCAPS_RESP_FLAGS_VNIC_RSS_HASH_MODE_CAP 0x10000000UL
- #define VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP 0x20000000UL
- #define VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP 0x40000000UL
- __le16 max_aggs_supported;
- u8 unused_1[5];
- u8 valid;
-};
-
-/* hwrm_vnic_tpa_cfg_input (size:384b/48B) */
-struct hwrm_vnic_tpa_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL
- #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL
- #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL
- #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL
- #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL
- #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
- #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL
- #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL
- #define VNIC_TPA_CFG_REQ_FLAGS_AGG_PACK_AS_GRO 0x100UL
- __le32 enables;
- #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL
- #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
- #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL
- #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
- #define VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN 0x10UL
- __le16 vnic_id;
- __le16 max_agg_segs;
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_LAST VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX
- __le16 max_aggs;
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_LAST VNIC_TPA_CFG_REQ_MAX_AGGS_MAX
- u8 unused_0[2];
- __le32 max_agg_timer;
- __le32 min_agg_len;
- __le32 tnl_tpa_en_bitmap;
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN 0x1UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE 0x2UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_NVGRE 0x4UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE 0x8UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 0x10UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6 0x20UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR1 0x200UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR2 0x400UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR3 0x800UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL
- #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL
- u8 unused_1[4];
-};
-
-/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */
-struct hwrm_vnic_tpa_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */
-struct hwrm_vnic_tpa_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vnic_id;
- u8 unused_0[6];
-};
-
-/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */
-struct hwrm_vnic_tpa_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define VNIC_TPA_QCFG_RESP_FLAGS_TPA 0x1UL
- #define VNIC_TPA_QCFG_RESP_FLAGS_ENCAP_TPA 0x2UL
- #define VNIC_TPA_QCFG_RESP_FLAGS_RSC_WND_UPDATE 0x4UL
- #define VNIC_TPA_QCFG_RESP_FLAGS_GRO 0x8UL
- #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_ECN 0x10UL
- #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
- #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_IPID_CHECK 0x40UL
- #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_TTL_CHECK 0x80UL
- __le16 max_agg_segs;
- #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_1 0x0UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_2 0x1UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_4 0x2UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_8 0x3UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX 0x1fUL
- #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX
- __le16 max_aggs;
- #define VNIC_TPA_QCFG_RESP_MAX_AGGS_1 0x0UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGGS_2 0x1UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGGS_4 0x2UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGGS_8 0x3UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGGS_16 0x4UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX 0x7UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX
- __le32 max_agg_timer;
- __le32 min_agg_len;
- __le32 tnl_tpa_en_bitmap;
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN 0x1UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GENEVE 0x2UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_NVGRE 0x4UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE 0x8UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV4 0x10UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV6 0x20UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR1 0x200UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR2 0x400UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR3 0x800UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL
- #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_vnic_rss_cfg_input (size:384b/48B) */
-struct hwrm_vnic_rss_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 hash_type;
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 0x80UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4 0x100UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 0x200UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6 0x400UL
- __le16 vnic_id;
- u8 ring_table_pair_index;
- u8 hash_mode_flags;
- #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT 0x1UL
- #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_4 0x2UL
- #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_2 0x4UL
- #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL
- #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL
- __le64 ring_grp_tbl_addr;
- __le64 hash_key_tbl_addr;
- __le16 rss_ctx_idx;
- u8 flags;
- #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE 0x1UL
- #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE 0x2UL
- #define VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT 0x4UL
- u8 ring_select_mode;
- #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ 0x0UL
- #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_XOR 0x1UL
- #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL
- #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_LAST VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM
- u8 unused_1[4];
-};
-
-/* hwrm_vnic_rss_cfg_output (size:128b/16B) */
-struct hwrm_vnic_rss_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vnic_rss_cfg_cmd_err (size:64b/8B) */
-struct hwrm_vnic_rss_cfg_cmd_err {
- u8 code;
- #define VNIC_RSS_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY 0x1UL
- #define VNIC_RSS_CFG_CMD_ERR_CODE_LAST VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY
- u8 unused_0[7];
-};
-
-/* hwrm_vnic_rss_qcfg_input (size:192b/24B) */
-struct hwrm_vnic_rss_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 rss_ctx_idx;
- __le16 vnic_id;
- u8 unused_0[4];
-};
-
-/* hwrm_vnic_rss_qcfg_output (size:512b/64B) */
-struct hwrm_vnic_rss_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 hash_type;
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV4 0x1UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV4 0x2UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV4 0x4UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6 0x8UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV6 0x10UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV6 0x20UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_AH_SPI_IPV4 0x80UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_ESP_SPI_IPV4 0x100UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_AH_SPI_IPV6 0x200UL
- #define VNIC_RSS_QCFG_RESP_HASH_TYPE_ESP_SPI_IPV6 0x400UL
- u8 unused_0[4];
- __le32 hash_key[10];
- u8 hash_mode_flags;
- #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_DEFAULT 0x1UL
- #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_4 0x2UL
- #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_2 0x4UL
- #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL
- #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL
- u8 ring_select_mode;
- #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ 0x0UL
- #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_XOR 0x1UL
- #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL
- #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_LAST VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ_CHECKSUM
- u8 unused_1[5];
- u8 valid;
-};
-
-/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */
-struct hwrm_vnic_plcmodes_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_VIRTIO_PLACEMENT 0x40UL
- __le32 enables;
- #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
- #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
- #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
- #define VNIC_PLCMODES_CFG_REQ_ENABLES_MAX_BDS_VALID 0x8UL
- __le32 vnic_id;
- __le16 jumbo_thresh;
- __le16 hds_offset;
- __le16 hds_threshold;
- __le16 max_bds;
- u8 unused_0[4];
-};
-
-/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */
-struct hwrm_vnic_plcmodes_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */
-struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */
-struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 rss_cos_lb_ctx_id;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */
-struct hwrm_vnic_rss_cos_lb_ctx_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 rss_cos_lb_ctx_id;
- u8 unused_0[6];
-};
-
-/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */
-struct hwrm_vnic_rss_cos_lb_ctx_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_ring_alloc_input (size:704b/88B) */
-struct hwrm_ring_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
- #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
- #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
- #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
- #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
- #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
- #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
- #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL
- #define RING_ALLOC_REQ_ENABLES_STEERING_TAG_VALID 0x800UL
- u8 ring_type;
- #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
- #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
- #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- #define RING_ALLOC_REQ_RING_TYPE_RX_AGG 0x4UL
- #define RING_ALLOC_REQ_RING_TYPE_NQ 0x5UL
- #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_NQ
- u8 cmpl_coal_cnt;
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_OFF 0x0UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_4 0x1UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_8 0x2UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_12 0x3UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_16 0x4UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_24 0x5UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_32 0x6UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_48 0x7UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64 0x8UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_96 0x9UL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_128 0xaUL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_192 0xbUL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_256 0xcUL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_320 0xdUL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_384 0xeUL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX 0xfUL
- #define RING_ALLOC_REQ_CMPL_COAL_CNT_LAST RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX
- __le16 flags;
- #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL
- #define RING_ALLOC_REQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x2UL
- #define RING_ALLOC_REQ_FLAGS_NQ_DBR_PACING 0x4UL
- #define RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE 0x8UL
- __le64 page_tbl_addr;
- __le32 fbo;
- u8 page_size;
- u8 page_tbl_depth;
- __le16 schq_id;
- __le32 length;
- __le16 logical_id;
- __le16 cmpl_ring_id;
- __le16 queue_id;
- __le16 rx_buf_size;
- __le16 rx_ring_id;
- __le16 nq_ring_id;
- __le16 ring_arb_cfg;
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP 0x1UL
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ 0x2UL
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
- #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL
- #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
- __le16 steering_tag;
- __le32 reserved3;
- __le32 stat_ctx_id;
- __le32 reserved4;
- __le32 max_bw;
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0
- #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL
- #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 int_mode;
- #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL
- #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL
- #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL
- #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
- #define RING_ALLOC_REQ_INT_MODE_LAST RING_ALLOC_REQ_INT_MODE_POLL
- u8 mpc_chnls_type;
- #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TCE 0x0UL
- #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RCE 0x1UL
- #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TE_CFA 0x2UL
- #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA 0x3UL
- #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE 0x4UL
- #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_LAST RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE
- u8 unused_4[2];
- __le64 cq_handle;
-};
-
-/* hwrm_ring_alloc_output (size:128b/16B) */
-struct hwrm_ring_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 ring_id;
- __le16 logical_ring_id;
- u8 push_buffer_index;
- #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
- #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
- #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_LAST RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
- u8 unused_0[2];
- u8 valid;
-};
-
-/* hwrm_ring_free_input (size:256b/32B) */
-struct hwrm_ring_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 ring_type;
- #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_FREE_REQ_RING_TYPE_TX 0x1UL
- #define RING_FREE_REQ_RING_TYPE_RX 0x2UL
- #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- #define RING_FREE_REQ_RING_TYPE_RX_AGG 0x4UL
- #define RING_FREE_REQ_RING_TYPE_NQ 0x5UL
- #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_NQ
- u8 flags;
- #define RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID 0x1UL
- #define RING_FREE_REQ_FLAGS_LAST RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID
- __le16 ring_id;
- __le32 prod_idx;
- __le32 opaque;
- __le32 unused_1;
-};
-
-/* hwrm_ring_free_output (size:128b/16B) */
-struct hwrm_ring_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_ring_reset_input (size:192b/24B) */
-struct hwrm_ring_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 ring_type;
- #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
- #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
- #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- #define RING_RESET_REQ_RING_TYPE_RX_RING_GRP 0x6UL
- #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_RX_RING_GRP
- u8 unused_0;
- __le16 ring_id;
- u8 unused_1[4];
-};
-
-/* hwrm_ring_reset_output (size:128b/16B) */
-struct hwrm_ring_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 push_buffer_index;
- #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
- #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
- #define RING_RESET_RESP_PUSH_BUFFER_INDEX_LAST RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
- u8 unused_0[3];
- u8 consumer_idx[3];
- u8 valid;
-};
-
-/* hwrm_ring_aggint_qcaps_input (size:128b/16B) */
-struct hwrm_ring_aggint_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_ring_aggint_qcaps_output (size:384b/48B) */
-struct hwrm_ring_aggint_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 cmpl_params;
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN 0x1UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MAX 0x2UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET 0x4UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE 0x8UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR 0x10UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT 0x20UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR 0x40UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT 0x80UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT 0x100UL
- __le32 nq_params;
- #define RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN 0x1UL
- __le16 num_cmpl_dma_aggr_min;
- __le16 num_cmpl_dma_aggr_max;
- __le16 num_cmpl_dma_aggr_during_int_min;
- __le16 num_cmpl_dma_aggr_during_int_max;
- __le16 cmpl_aggr_dma_tmr_min;
- __le16 cmpl_aggr_dma_tmr_max;
- __le16 cmpl_aggr_dma_tmr_during_int_min;
- __le16 cmpl_aggr_dma_tmr_during_int_max;
- __le16 int_lat_tmr_min_min;
- __le16 int_lat_tmr_min_max;
- __le16 int_lat_tmr_max_min;
- __le16 int_lat_tmr_max_max;
- __le16 num_cmpl_aggr_int_min;
- __le16 num_cmpl_aggr_int_max;
- __le16 timer_units;
- u8 unused_0[1];
- u8 valid;
-};
-
-/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */
-struct hwrm_ring_cmpl_ring_qaggint_params_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 ring_id;
- __le16 flags;
- #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_MASK 0x3UL
- #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_SFT 0
- #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
- u8 unused_0[4];
-};
-
-/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */
-struct hwrm_ring_cmpl_ring_qaggint_params_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 flags;
- #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
- #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
- __le16 num_cmpl_dma_aggr;
- __le16 num_cmpl_dma_aggr_during_int;
- __le16 cmpl_aggr_dma_tmr;
- __le16 cmpl_aggr_dma_tmr_during_int;
- __le16 int_lat_tmr_min;
- __le16 int_lat_tmr_max;
- __le16 num_cmpl_aggr_int;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_ring_cmpl_ring_cfg_aggint_params_input (size:320b/40B) */
-struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 ring_id;
- __le16 flags;
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
- __le16 num_cmpl_dma_aggr;
- __le16 num_cmpl_dma_aggr_during_int;
- __le16 cmpl_aggr_dma_tmr;
- __le16 cmpl_aggr_dma_tmr_during_int;
- __le16 int_lat_tmr_min;
- __le16 int_lat_tmr_max;
- __le16 num_cmpl_aggr_int;
- __le16 enables;
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR 0x1UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT 0x2UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_CMPL_AGGR_DMA_TMR 0x4UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MIN 0x8UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MAX 0x10UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_AGGR_INT 0x20UL
- u8 unused_0[4];
-};
-
-/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */
-struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_ring_grp_alloc_input (size:192b/24B) */
-struct hwrm_ring_grp_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 cr;
- __le16 rr;
- __le16 ar;
- __le16 sc;
-};
-
-/* hwrm_ring_grp_alloc_output (size:128b/16B) */
-struct hwrm_ring_grp_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 ring_group_id;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_ring_grp_free_input (size:192b/24B) */
-struct hwrm_ring_grp_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 ring_group_id;
- u8 unused_0[4];
-};
-
-/* hwrm_ring_grp_free_output (size:128b/16B) */
-struct hwrm_ring_grp_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-#define DEFAULT_FLOW_ID 0xFFFFFFFFUL
-#define ROCEV1_FLOW_ID 0xFFFFFFFEUL
-#define ROCEV2_FLOW_ID 0xFFFFFFFDUL
-#define ROCEV2_CNP_FLOW_ID 0xFFFFFFFCUL
-
-/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */
-struct hwrm_cfa_l2_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_MASK 0x30UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_SFT 4
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 4)
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 4)
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 4)
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_XDP_DISABLE 0x40UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_SOURCE_VALID 0x80UL
- __le32 enables;
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS 0x20000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_NUM_VLANS 0x40000UL
- u8 l2_addr[6];
- u8 num_vlans;
- u8 t_num_vlans;
- u8 l2_addr_mask[6];
- __le16 l2_ovlan;
- __le16 l2_ovlan_mask;
- __le16 l2_ivlan;
- __le16 l2_ivlan_mask;
- u8 unused_1[2];
- u8 t_l2_addr[6];
- u8 unused_2[2];
- u8 t_l2_addr_mask[6];
- __le16 t_l2_ovlan;
- __le16 t_l2_ovlan_mask;
- __le16 t_l2_ivlan;
- __le16 t_l2_ivlan_mask;
- u8 src_type;
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG
- u8 unused_3;
- __le32 src_id;
- u8 tunnel_type;
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
- u8 unused_4;
- __le16 dst_id;
- __le16 mirror_vnic_id;
- u8 pri_hint;
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN
- u8 unused_5;
- __le32 unused_6;
- __le64 l2_filter_id_hint;
-};
-
-/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */
-struct hwrm_cfa_l2_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 l2_filter_id;
- __le32 flow_id;
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_cfa_l2_filter_free_input (size:192b/24B) */
-struct hwrm_cfa_l2_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 l2_filter_id;
-};
-
-/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */
-struct hwrm_cfa_l2_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_l2_filter_cfg_input (size:384b/48B) */
-struct hwrm_cfa_l2_filter_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_MASK 0x30UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_SFT 4
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_NO_UPDATE (0x0UL << 4)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_BYPASS_LKUP (0x1UL << 4)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP (0x2UL << 4)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_RESTORE_FW_OP (0x3UL << 4)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_RESTORE_FW_OP
- __le32 enables;
- #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
- #define CFA_L2_FILTER_CFG_REQ_ENABLES_PROF_FUNC 0x4UL
- #define CFA_L2_FILTER_CFG_REQ_ENABLES_L2_CONTEXT_ID 0x8UL
- __le64 l2_filter_id;
- __le32 dst_id;
- __le32 new_mirror_vnic_id;
- __le32 prof_func;
- __le32 l2_context_id;
-};
-
-/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */
-struct hwrm_cfa_l2_filter_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_l2_set_rx_mask_input (size:448b/56B) */
-struct hwrm_cfa_l2_set_rx_mask_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 vnic_id;
- __le32 mask;
- #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL
- __le64 mc_tbl_addr;
- __le32 num_mc_entries;
- u8 unused_0[4];
- __le64 vlan_tag_tbl_addr;
- __le32 num_vlan_tags;
- u8 unused_1[4];
-};
-
-/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */
-struct hwrm_cfa_l2_set_rx_mask_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */
-struct hwrm_cfa_l2_set_rx_mask_cmd_err {
- u8 code;
- #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR 0x1UL
- #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR
- u8 unused_0[7];
-};
-
-/* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */
-struct hwrm_cfa_tunnel_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
- __le32 enables;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
- __le64 l2_filter_id;
- u8 l2_addr[6];
- __le16 l2_ivlan;
- __le32 l3_addr[4];
- __le32 t_l3_addr[4];
- u8 l3_addr_type;
- u8 t_l3_addr_type;
- u8 tunnel_type;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
- u8 tunnel_flags;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR 0x1UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 0x2UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_EXTHDR_SEQNUM_S0 0x4UL
- __le32 vni;
- __le32 dst_vnic_id;
- __le32 mirror_vnic_id;
-};
-
-/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */
-struct hwrm_cfa_tunnel_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 tunnel_filter_id;
- __le32 flow_id;
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_cfa_tunnel_filter_free_input (size:192b/24B) */
-struct hwrm_cfa_tunnel_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 tunnel_filter_id;
-};
-
-/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */
-struct hwrm_cfa_tunnel_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vxlan_ipv4_hdr (size:128b/16B) */
-struct hwrm_vxlan_ipv4_hdr {
- u8 ver_hlen;
- #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL
- #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0
- #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK 0xf0UL
- #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4
- u8 tos;
- __be16 ip_id;
- __be16 flags_frag_offset;
- u8 ttl;
- u8 protocol;
- __be32 src_ip_addr;
- __be32 dest_ip_addr;
-};
-
-/* hwrm_vxlan_ipv6_hdr (size:320b/40B) */
-struct hwrm_vxlan_ipv6_hdr {
- __be32 ver_tc_flow_label;
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT 0x1cUL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK 0xf0000000UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT 0x14UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK 0xff00000UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT 0x0UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_LAST VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK
- __be16 payload_len;
- u8 next_hdr;
- u8 ttl;
- __be32 src_ip_addr[4];
- __be32 dest_ip_addr[4];
-};
-
-/* hwrm_cfa_encap_data_vxlan (size:640b/80B) */
-struct hwrm_cfa_encap_data_vxlan {
- u8 src_mac_addr[6];
- __le16 unused_0;
- u8 dst_mac_addr[6];
- u8 num_vlan_tags;
- u8 unused_1;
- __be16 ovlan_tpid;
- __be16 ovlan_tci;
- __be16 ivlan_tpid;
- __be16 ivlan_tci;
- __le32 l3[10];
- #define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL
- #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL
- #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL
- #define CFA_ENCAP_DATA_VXLAN_L3_LAST CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6
- __be16 src_port;
- __be16 dst_port;
- __be32 vni;
- u8 hdr_rsvd0[3];
- u8 hdr_rsvd1;
- u8 hdr_flags;
- u8 unused[3];
-};
-
-/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */
-struct hwrm_cfa_encap_record_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_EXTERNAL 0x2UL
- u8 encap_type;
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_V4 0x9UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE_V1 0xaUL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE 0xbUL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE_V6 0xcUL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE 0x10UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE
- u8 unused_0[3];
- __le32 encap_data[20];
-};
-
-/* hwrm_cfa_encap_record_alloc_output (size:128b/16B) */
-struct hwrm_cfa_encap_record_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 encap_record_id;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_cfa_encap_record_free_input (size:192b/24B) */
-struct hwrm_cfa_encap_record_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 encap_record_id;
- u8 unused_0[4];
-};
-
-/* hwrm_cfa_encap_record_free_output (size:128b/16B) */
-struct hwrm_cfa_encap_record_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */
-struct hwrm_cfa_ntuple_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_ARP_REPLY 0x10UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX 0x20UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_NO_L2_CONTEXT 0x40UL
- __le32 enables;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX 0x80000UL
- __le64 l2_filter_id;
- u8 src_macaddr[6];
- __be16 ethertype;
- u8 ip_addr_type;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
- u8 ip_protocol;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_ICMP 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_ICMPV6 0x3aUL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD 0xffUL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD
- __le16 dst_id;
- __le16 rfs_ring_tbl_idx;
- u8 tunnel_type;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
- u8 pri_hint;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST 0x3UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST
- __be32 src_ipaddr[4];
- __be32 src_ipaddr_mask[4];
- __be32 dst_ipaddr[4];
- __be32 dst_ipaddr_mask[4];
- __be16 src_port;
- __be16 src_port_mask;
- __be16 dst_port;
- __be16 dst_port_mask;
- __le64 ntuple_filter_id_hint;
-};
-
-/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */
-struct hwrm_cfa_ntuple_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 ntuple_filter_id;
- __le32 flow_id;
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */
-struct hwrm_cfa_ntuple_filter_alloc_cmd_err {
- u8 code;
- #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR
- u8 unused_0[7];
-};
-
-/* hwrm_cfa_ntuple_filter_free_input (size:192b/24B) */
-struct hwrm_cfa_ntuple_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 ntuple_filter_id;
-};
-
-/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */
-struct hwrm_cfa_ntuple_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_ntuple_filter_cfg_input (size:384b/48B) */
-struct hwrm_cfa_ntuple_filter_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL
- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
- __le32 flags;
- #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL
- #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_RFS_RING_IDX 0x2UL
- #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_NO_L2_CONTEXT 0x4UL
- __le64 ntuple_filter_id;
- __le32 new_dst_id;
- __le32 new_mirror_vnic_id;
- __le16 new_meter_instance_id;
- #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL
- #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_LAST CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID
- u8 unused_1[6];
-};
-
-/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */
-struct hwrm_cfa_ntuple_filter_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_decap_filter_alloc_input (size:832b/104B) */
-struct hwrm_cfa_decap_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL 0x1UL
- __le32 enables;
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x1UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID 0x2UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x4UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x8UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID 0x10UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID 0x20UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID 0x40UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID 0x80UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x100UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x200UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x400UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x800UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x1000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x2000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x4000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
- __be32 tunnel_id;
- u8 tunnel_type;
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
- u8 unused_0;
- __le16 unused_1;
- u8 src_macaddr[6];
- u8 unused_2[2];
- u8 dst_macaddr[6];
- __be16 ovlan_vid;
- __be16 ivlan_vid;
- __be16 t_ovlan_vid;
- __be16 t_ivlan_vid;
- __be16 ethertype;
- u8 ip_addr_type;
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
- u8 ip_protocol;
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP
- __le16 unused_3;
- __le32 unused_4;
- __be32 src_ipaddr[4];
- __be32 dst_ipaddr[4];
- __be16 src_port;
- __be16 dst_port;
- __le16 dst_id;
- __le16 l2_ctxt_ref_id;
-};
-
-/* hwrm_cfa_decap_filter_alloc_output (size:128b/16B) */
-struct hwrm_cfa_decap_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 decap_filter_id;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_cfa_decap_filter_free_input (size:192b/24B) */
-struct hwrm_cfa_decap_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 decap_filter_id;
- u8 unused_0[4];
-};
-
-/* hwrm_cfa_decap_filter_free_output (size:128b/16B) */
-struct hwrm_cfa_decap_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_flow_alloc_input (size:1024b/128B) */
-struct hwrm_cfa_flow_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 flags;
- #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
- #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_TX 0x40UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_RX 0x80UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_MATCH_VXLAN_IP_VNI 0x100UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_VHOST_ID_USE_VLAN 0x200UL
- __le16 src_fid;
- __le32 tunnel_handle;
- __le16 action_flags;
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FLOW_AGING_ENABLED 0x800UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_PRI_HINT 0x1000UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NO_FLOW_COUNTER_ALLOC 0x2000UL
- __le16 dst_fid;
- __be16 l2_rewrite_vlan_tpid;
- __be16 l2_rewrite_vlan_tci;
- __le16 act_meter_id;
- __le16 ref_flow_handle;
- __be16 ethertype;
- __be16 outer_vlan_tci;
- __be16 dmac[3];
- __be16 inner_vlan_tci;
- __be16 smac[3];
- u8 ip_dst_mask_len;
- u8 ip_src_mask_len;
- __be32 ip_dst[4];
- __be32 ip_src[4];
- __be16 l4_src_port;
- __be16 l4_src_port_mask;
- __be16 l4_dst_port;
- __be16 l4_dst_port_mask;
- __be32 nat_ip_address[4];
- __be16 l2_rewrite_dmac[3];
- __be16 nat_port;
- __be16 l2_rewrite_smac[3];
- u8 ip_proto;
- u8 tunnel_type;
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
-};
-
-/* hwrm_cfa_flow_alloc_output (size:256b/32B) */
-struct hwrm_cfa_flow_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 flow_handle;
- u8 unused_0[2];
- __le32 flow_id;
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX
- __le64 ext_flow_handle;
- __le32 flow_counter_id;
- u8 unused_1[3];
- u8 valid;
-};
-
-/* hwrm_cfa_flow_alloc_cmd_err (size:64b/8B) */
-struct hwrm_cfa_flow_alloc_cmd_err {
- u8 code;
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_L2_CONTEXT_TCAM 0x1UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_ACTION_RECORD 0x2UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_COUNTER 0x3UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_WILD_CARD_TCAM 0x4UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_HASH_COLLISION 0x5UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_KEY_EXISTS 0x6UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB 0x7UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_LAST CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB
- u8 unused_0[7];
-};
-
-/* hwrm_cfa_flow_free_input (size:256b/32B) */
-struct hwrm_cfa_flow_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 flow_handle;
- __le16 unused_0;
- __le32 flow_counter_id;
- __le64 ext_flow_handle;
-};
-
-/* hwrm_cfa_flow_free_output (size:256b/32B) */
-struct hwrm_cfa_flow_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 packet;
- __le64 byte;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_flow_info_input (size:256b/32B) */
-struct hwrm_cfa_flow_info_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 flow_handle;
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK 0xfffUL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT 0x1000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT 0x2000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_NIC_TX 0x3000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT 0x4000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX 0x8000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT_RX 0x9000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT_RX 0xa000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_NIC_RX 0xb000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT_RX 0xc000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_LAST CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT_RX
- u8 unused_0[6];
- __le64 ext_flow_handle;
-};
-
-/* hwrm_cfa_flow_info_output (size:5632b/704B) */
-struct hwrm_cfa_flow_info_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define CFA_FLOW_INFO_RESP_FLAGS_PATH_TX 0x1UL
- #define CFA_FLOW_INFO_RESP_FLAGS_PATH_RX 0x2UL
- u8 profile;
- __le16 src_fid;
- __le16 dst_fid;
- __le16 l2_ctxt_id;
- __le64 em_info;
- __le64 tcam_info;
- __le64 vfp_tcam_info;
- __le16 ar_id;
- __le16 flow_handle;
- __le32 tunnel_handle;
- __le16 flow_timer;
- u8 unused_0[6];
- __le32 flow_key_data[130];
- __le32 flow_action_info[30];
- u8 unused_1[7];
- u8 valid;
-};
-
-/* hwrm_cfa_flow_stats_input (size:640b/80B) */
-struct hwrm_cfa_flow_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 num_flows;
- __le16 flow_handle_0;
- __le16 flow_handle_1;
- __le16 flow_handle_2;
- __le16 flow_handle_3;
- __le16 flow_handle_4;
- __le16 flow_handle_5;
- __le16 flow_handle_6;
- __le16 flow_handle_7;
- __le16 flow_handle_8;
- __le16 flow_handle_9;
- u8 unused_0[2];
- __le32 flow_id_0;
- __le32 flow_id_1;
- __le32 flow_id_2;
- __le32 flow_id_3;
- __le32 flow_id_4;
- __le32 flow_id_5;
- __le32 flow_id_6;
- __le32 flow_id_7;
- __le32 flow_id_8;
- __le32 flow_id_9;
-};
-
-/* hwrm_cfa_flow_stats_output (size:1408b/176B) */
-struct hwrm_cfa_flow_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 packet_0;
- __le64 packet_1;
- __le64 packet_2;
- __le64 packet_3;
- __le64 packet_4;
- __le64 packet_5;
- __le64 packet_6;
- __le64 packet_7;
- __le64 packet_8;
- __le64 packet_9;
- __le64 byte_0;
- __le64 byte_1;
- __le64 byte_2;
- __le64 byte_3;
- __le64 byte_4;
- __le64 byte_5;
- __le64 byte_6;
- __le64 byte_7;
- __le64 byte_8;
- __le64 byte_9;
- __le16 flow_hits;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */
-struct hwrm_cfa_vfr_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vf_id;
- __le16 reserved;
- u8 unused_0[4];
- char vfr_name[32];
-};
-
-/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */
-struct hwrm_cfa_vfr_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 rx_cfa_code;
- __le16 tx_cfa_action;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_cfa_vfr_free_input (size:448b/56B) */
-struct hwrm_cfa_vfr_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- char vfr_name[32];
- __le16 vf_id;
- __le16 reserved;
- u8 unused_0[4];
-};
-
-/* hwrm_cfa_vfr_free_output (size:128b/16B) */
-struct hwrm_cfa_vfr_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_eem_qcaps_input (size:192b/24B) */
-struct hwrm_cfa_eem_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_EEM_QCAPS_REQ_FLAGS_PATH_TX 0x1UL
- #define CFA_EEM_QCAPS_REQ_FLAGS_PATH_RX 0x2UL
- #define CFA_EEM_QCAPS_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL
- __le32 unused_0;
-};
-
-/* hwrm_cfa_eem_qcaps_output (size:320b/40B) */
-struct hwrm_cfa_eem_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_TX 0x1UL
- #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_RX 0x2UL
- #define CFA_EEM_QCAPS_RESP_FLAGS_CENTRALIZED_MEMORY_MODEL_SUPPORTED 0x4UL
- #define CFA_EEM_QCAPS_RESP_FLAGS_DETACHED_CENTRALIZED_MEMORY_MODEL_SUPPORTED 0x8UL
- __le32 unused_0;
- __le32 supported;
- #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY0_TABLE 0x1UL
- #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY1_TABLE 0x2UL
- #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_RECORD_TABLE 0x4UL
- #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE 0x8UL
- #define CFA_EEM_QCAPS_RESP_SUPPORTED_FID_TABLE 0x10UL
- __le32 max_entries_supported;
- __le16 key_entry_size;
- __le16 record_entry_size;
- __le16 efc_entry_size;
- __le16 fid_entry_size;
- u8 unused_1[7];
- u8 valid;
-};
-
-/* hwrm_cfa_eem_cfg_input (size:384b/48B) */
-struct hwrm_cfa_eem_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_EEM_CFG_REQ_FLAGS_PATH_TX 0x1UL
- #define CFA_EEM_CFG_REQ_FLAGS_PATH_RX 0x2UL
- #define CFA_EEM_CFG_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL
- #define CFA_EEM_CFG_REQ_FLAGS_SECONDARY_PF 0x8UL
- __le16 group_id;
- __le16 unused_0;
- __le32 num_entries;
- __le32 unused_1;
- __le16 key0_ctx_id;
- __le16 key1_ctx_id;
- __le16 record_ctx_id;
- __le16 efc_ctx_id;
- __le16 fid_ctx_id;
- __le16 unused_2;
- __le32 unused_3;
-};
-
-/* hwrm_cfa_eem_cfg_output (size:128b/16B) */
-struct hwrm_cfa_eem_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_eem_qcfg_input (size:192b/24B) */
-struct hwrm_cfa_eem_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_EEM_QCFG_REQ_FLAGS_PATH_TX 0x1UL
- #define CFA_EEM_QCFG_REQ_FLAGS_PATH_RX 0x2UL
- __le32 unused_0;
-};
-
-/* hwrm_cfa_eem_qcfg_output (size:256b/32B) */
-struct hwrm_cfa_eem_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define CFA_EEM_QCFG_RESP_FLAGS_PATH_TX 0x1UL
- #define CFA_EEM_QCFG_RESP_FLAGS_PATH_RX 0x2UL
- #define CFA_EEM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x4UL
- __le32 num_entries;
- __le16 key0_ctx_id;
- __le16 key1_ctx_id;
- __le16 record_ctx_id;
- __le16 efc_ctx_id;
- __le16 fid_ctx_id;
- u8 unused_2[5];
- u8 valid;
-};
-
-/* hwrm_cfa_eem_op_input (size:192b/24B) */
-struct hwrm_cfa_eem_op_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_EEM_OP_REQ_FLAGS_PATH_TX 0x1UL
- #define CFA_EEM_OP_REQ_FLAGS_PATH_RX 0x2UL
- __le16 unused_0;
- __le16 op;
- #define CFA_EEM_OP_REQ_OP_RESERVED 0x0UL
- #define CFA_EEM_OP_REQ_OP_EEM_DISABLE 0x1UL
- #define CFA_EEM_OP_REQ_OP_EEM_ENABLE 0x2UL
- #define CFA_EEM_OP_REQ_OP_EEM_CLEANUP 0x3UL
- #define CFA_EEM_OP_REQ_OP_LAST CFA_EEM_OP_REQ_OP_EEM_CLEANUP
-};
-
-/* hwrm_cfa_eem_op_output (size:128b/16B) */
-struct hwrm_cfa_eem_op_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_adv_flow_mgnt_qcaps_input (size:256b/32B) */
-struct hwrm_cfa_adv_flow_mgnt_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 unused_0[4];
-};
-
-/* hwrm_cfa_adv_flow_mgnt_qcaps_output (size:128b/16B) */
-struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED 0x1000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED 0x2000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED 0x4000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TRUFLOW_CAPABLE 0x8000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_FILTER_TRAFFIC_TYPE_L2_ROCE_SUPPORTED 0x10000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_LAG_SUPPORTED 0x20000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_NO_L2CTX_SUPPORTED 0x40000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NIC_FLOW_STATS_SUPPORTED 0x80000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED 0x100000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED 0x200000UL
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
-struct hwrm_tunnel_dst_port_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 tunnel_type;
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
- u8 tunnel_next_proto;
- u8 unused_0[6];
-};
-
-/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */
-struct hwrm_tunnel_dst_port_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 tunnel_dst_port_id;
- __be16 tunnel_dst_port_val;
- u8 upar_in_use;
- #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR0 0x1UL
- #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR1 0x2UL
- #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR2 0x4UL
- #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR3 0x8UL
- #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR4 0x10UL
- #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR5 0x20UL
- #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR6 0x40UL
- #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR7 0x80UL
- u8 status;
- #define TUNNEL_DST_PORT_QUERY_RESP_STATUS_CHIP_LEVEL 0x1UL
- #define TUNNEL_DST_PORT_QUERY_RESP_STATUS_FUNC_LEVEL 0x2UL
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */
-struct hwrm_tunnel_dst_port_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 tunnel_type;
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
- u8 tunnel_next_proto;
- __be16 tunnel_dst_port_val;
- u8 unused_0[4];
-};
-
-/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */
-struct hwrm_tunnel_dst_port_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 tunnel_dst_port_id;
- u8 error_info;
- #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_SUCCESS 0x0UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ALLOCATED 0x1UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_NO_RESOURCE 0x2UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ENABLED 0x3UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ENABLED
- u8 upar_in_use;
- #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR0 0x1UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR1 0x2UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR2 0x4UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR3 0x8UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR4 0x10UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR5 0x20UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR6 0x40UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR7 0x80UL
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */
-struct hwrm_tunnel_dst_port_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 tunnel_type;
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
- u8 tunnel_next_proto;
- __le16 tunnel_dst_port_id;
- u8 unused_0[4];
-};
-
-/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */
-struct hwrm_tunnel_dst_port_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 error_info;
- #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_SUCCESS 0x0UL
- #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_OWNER 0x1UL
- #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_ALLOCATED 0x2UL
- #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_ALLOCATED
- u8 unused_1[6];
- u8 valid;
-};
-
-/* ctx_hw_stats (size:1280b/160B) */
-struct ctx_hw_stats {
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_error_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_error_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 tpa_pkts;
- __le64 tpa_bytes;
- __le64 tpa_events;
- __le64 tpa_aborts;
-};
-
-/* ctx_hw_stats_ext (size:1408b/176B) */
-struct ctx_hw_stats_ext {
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_error_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_error_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 rx_tpa_eligible_pkt;
- __le64 rx_tpa_eligible_bytes;
- __le64 rx_tpa_pkt;
- __le64 rx_tpa_bytes;
- __le64 rx_tpa_errors;
- __le64 rx_tpa_events;
-};
-
-/* hwrm_stat_ctx_alloc_input (size:384b/48B) */
-struct hwrm_stat_ctx_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 stats_dma_addr;
- __le32 update_period_ms;
- u8 stat_ctx_flags;
- #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
- #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_DUP_HOST_BUF 0x2UL
- u8 unused_0;
- __le16 stats_dma_length;
- __le16 flags;
- #define STAT_CTX_ALLOC_REQ_FLAGS_STEERING_TAG_VALID 0x1UL
- __le16 steering_tag;
- __le32 stat_ctx_id;
- __le16 alloc_seq_id;
- u8 unused_1[6];
-};
-
-/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
-struct hwrm_stat_ctx_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 stat_ctx_id;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_stat_ctx_free_input (size:192b/24B) */
-struct hwrm_stat_ctx_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 stat_ctx_id;
- u8 unused_0[4];
-};
-
-/* hwrm_stat_ctx_free_output (size:128b/16B) */
-struct hwrm_stat_ctx_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 stat_ctx_id;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_stat_ctx_query_input (size:192b/24B) */
-struct hwrm_stat_ctx_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 stat_ctx_id;
- u8 flags;
- #define STAT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
- u8 unused_0[3];
-};
-
-/* hwrm_stat_ctx_query_output (size:1408b/176B) */
-struct hwrm_stat_ctx_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_error_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_error_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 rx_agg_pkts;
- __le64 rx_agg_bytes;
- __le64 rx_agg_events;
- __le64 rx_agg_aborts;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_stat_ext_ctx_query_input (size:192b/24B) */
-struct hwrm_stat_ext_ctx_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 stat_ctx_id;
- u8 flags;
- #define STAT_EXT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
- u8 unused_0[3];
-};
-
-/* hwrm_stat_ext_ctx_query_output (size:1536b/192B) */
-struct hwrm_stat_ext_ctx_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_error_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_error_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 rx_tpa_eligible_pkt;
- __le64 rx_tpa_eligible_bytes;
- __le64 rx_tpa_pkt;
- __le64 rx_tpa_bytes;
- __le64 rx_tpa_errors;
- __le64 rx_tpa_events;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */
-struct hwrm_stat_ctx_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 stat_ctx_id;
- u8 unused_0[4];
-};
-
-/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */
-struct hwrm_stat_ctx_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_pcie_qstats_input (size:256b/32B) */
-struct hwrm_pcie_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 pcie_stat_size;
- u8 unused_0[6];
- __le64 pcie_stat_host_addr;
-};
-
-/* hwrm_pcie_qstats_output (size:128b/16B) */
-struct hwrm_pcie_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 pcie_stat_size;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* pcie_ctx_hw_stats (size:768b/96B) */
-struct pcie_ctx_hw_stats {
- __le64 pcie_pl_signal_integrity;
- __le64 pcie_dl_signal_integrity;
- __le64 pcie_tl_signal_integrity;
- __le64 pcie_link_integrity;
- __le64 pcie_tx_traffic_rate;
- __le64 pcie_rx_traffic_rate;
- __le64 pcie_tx_dllp_statistics;
- __le64 pcie_rx_dllp_statistics;
- __le64 pcie_equalization_time;
- __le32 pcie_ltssm_histogram[4];
- __le64 pcie_recovery_histogram;
-};
-
-/* hwrm_stat_generic_qstats_input (size:256b/32B) */
-struct hwrm_stat_generic_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 generic_stat_size;
- u8 flags;
- #define STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
- u8 unused_0[5];
- __le64 generic_stat_host_addr;
-};
-
-/* hwrm_stat_generic_qstats_output (size:128b/16B) */
-struct hwrm_stat_generic_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 generic_stat_size;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* generic_sw_hw_stats (size:1472b/184B) */
-struct generic_sw_hw_stats {
- __le64 pcie_statistics_tx_tlp;
- __le64 pcie_statistics_rx_tlp;
- __le64 pcie_credit_fc_hdr_posted;
- __le64 pcie_credit_fc_hdr_nonposted;
- __le64 pcie_credit_fc_hdr_cmpl;
- __le64 pcie_credit_fc_data_posted;
- __le64 pcie_credit_fc_data_nonposted;
- __le64 pcie_credit_fc_data_cmpl;
- __le64 pcie_credit_fc_tgt_nonposted;
- __le64 pcie_credit_fc_tgt_data_posted;
- __le64 pcie_credit_fc_tgt_hdr_posted;
- __le64 pcie_credit_fc_cmpl_hdr_posted;
- __le64 pcie_credit_fc_cmpl_data_posted;
- __le64 pcie_cmpl_longest;
- __le64 pcie_cmpl_shortest;
- __le64 cache_miss_count_cfcq;
- __le64 cache_miss_count_cfcs;
- __le64 cache_miss_count_cfcc;
- __le64 cache_miss_count_cfcm;
- __le64 hw_db_recov_dbs_dropped;
- __le64 hw_db_recov_drops_serviced;
- __le64 hw_db_recov_dbs_recovered;
- __le64 hw_db_recov_oo_drop_count;
-};
-
-/* hwrm_fw_reset_input (size:192b/24B) */
-struct hwrm_fw_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 embedded_proc_type;
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT 0x7UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_IMPACTLESS_ACTIVATION 0x8UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_IMPACTLESS_ACTIVATION
- u8 selfrst_status;
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
- #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE
- u8 host_idx;
- u8 flags;
- #define FW_RESET_REQ_FLAGS_RESET_GRACEFUL 0x1UL
- #define FW_RESET_REQ_FLAGS_FW_ACTIVATION 0x2UL
- u8 unused_0[4];
-};
-
-/* hwrm_fw_reset_output (size:128b/16B) */
-struct hwrm_fw_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 selfrst_status;
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
- #define FW_RESET_RESP_SELFRST_STATUS_LAST FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE
- u8 unused_0[6];
- u8 valid;
-};
-
-/* hwrm_fw_qstatus_input (size:192b/24B) */
-struct hwrm_fw_qstatus_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 embedded_proc_type;
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_LAST FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP
- u8 unused_0[7];
-};
-
-/* hwrm_fw_qstatus_output (size:128b/16B) */
-struct hwrm_fw_qstatus_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 selfrst_status;
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER 0x3UL
- #define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER
- u8 nvm_option_action_status;
- #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_NONE 0x0UL
- #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_HOTRESET 0x1UL
- #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_WARMBOOT 0x2UL
- #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT 0x3UL
- #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_LAST FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_fw_set_time_input (size:256b/32B) */
-struct hwrm_fw_set_time_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 year;
- #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL
- #define FW_SET_TIME_REQ_YEAR_LAST FW_SET_TIME_REQ_YEAR_UNKNOWN
- u8 month;
- u8 day;
- u8 hour;
- u8 minute;
- u8 second;
- u8 unused_0;
- __le16 millisecond;
- __le16 zone;
- #define FW_SET_TIME_REQ_ZONE_UTC 0
- #define FW_SET_TIME_REQ_ZONE_UNKNOWN 65535
- #define FW_SET_TIME_REQ_ZONE_LAST FW_SET_TIME_REQ_ZONE_UNKNOWN
- u8 unused_1[4];
-};
-
-/* hwrm_fw_set_time_output (size:128b/16B) */
-struct hwrm_fw_set_time_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_struct_hdr (size:128b/16B) */
-struct hwrm_struct_hdr {
- __le16 struct_id;
- #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL
- #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
- #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
- #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
- #define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL
- #define STRUCT_HDR_STRUCT_ID_PEER_MMAP 0x429UL
- #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
- #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
- #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
- #define STRUCT_HDR_STRUCT_ID_MSIX_PER_VF 0xc8UL
- #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_MSIX_PER_VF
- __le16 len;
- u8 version;
- u8 count;
- __le16 subtype;
- __le16 next_offset;
- #define STRUCT_HDR_NEXT_OFFSET_LAST 0x0UL
- u8 unused_0[6];
-};
-
-/* hwrm_struct_data_dcbx_app (size:64b/8B) */
-struct hwrm_struct_data_dcbx_app {
- __be16 protocol_id;
- u8 protocol_selector;
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_LAST STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT
- u8 priority;
- u8 valid;
- u8 unused_0[3];
-};
-
-/* hwrm_fw_set_structured_data_input (size:256b/32B) */
-struct hwrm_fw_set_structured_data_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 src_data_addr;
- __le16 data_len;
- u8 hdr_cnt;
- u8 unused_0[5];
-};
-
-/* hwrm_fw_set_structured_data_output (size:128b/16B) */
-struct hwrm_fw_set_structured_data_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_fw_set_structured_data_cmd_err (size:64b/8B) */
-struct hwrm_fw_set_structured_data_cmd_err {
- u8 code;
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID
- u8 unused_0[7];
-};
-
-/* hwrm_fw_get_structured_data_input (size:256b/32B) */
-struct hwrm_fw_get_structured_data_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 dest_data_addr;
- __le16 data_len;
- __le16 structure_id;
- __le16 subtype;
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_UNUSED 0x0UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL 0xffffUL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN 0x100UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER 0x101UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_OPERATIONAL 0x102UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_LAST FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL
- u8 count;
- u8 unused_0;
-};
-
-/* hwrm_fw_get_structured_data_output (size:128b/16B) */
-struct hwrm_fw_get_structured_data_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 hdr_cnt;
- u8 unused_0[6];
- u8 valid;
-};
-
-/* hwrm_fw_get_structured_data_cmd_err (size:64b/8B) */
-struct hwrm_fw_get_structured_data_cmd_err {
- u8 code;
- #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
- #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID
- u8 unused_0[7];
-};
-
-/* hwrm_fw_livepatch_query_input (size:192b/24B) */
-struct hwrm_fw_livepatch_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 fw_target;
- #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_COMMON_FW 0x1UL
- #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW 0x2UL
- #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_LAST FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW
- u8 unused_0[7];
-};
-
-/* hwrm_fw_livepatch_query_output (size:640b/80B) */
-struct hwrm_fw_livepatch_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- char install_ver[32];
- char active_ver[32];
- __le16 status_flags;
- #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL 0x1UL
- #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE 0x2UL
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_fw_livepatch_input (size:256b/32B) */
-struct hwrm_fw_livepatch_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 opcode;
- #define FW_LIVEPATCH_REQ_OPCODE_ACTIVATE 0x1UL
- #define FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE 0x2UL
- #define FW_LIVEPATCH_REQ_OPCODE_LAST FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE
- u8 fw_target;
- #define FW_LIVEPATCH_REQ_FW_TARGET_COMMON_FW 0x1UL
- #define FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW 0x2UL
- #define FW_LIVEPATCH_REQ_FW_TARGET_LAST FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW
- u8 loadtype;
- #define FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL 0x1UL
- #define FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT 0x2UL
- #define FW_LIVEPATCH_REQ_LOADTYPE_LAST FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT
- u8 flags;
- __le32 patch_len;
- __le64 host_addr;
-};
-
-/* hwrm_fw_livepatch_output (size:128b/16B) */
-struct hwrm_fw_livepatch_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_fw_livepatch_cmd_err (size:64b/8B) */
-struct hwrm_fw_livepatch_cmd_err {
- u8 code;
- #define FW_LIVEPATCH_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_OPCODE 0x1UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_TARGET 0x2UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_SUPPORTED 0x3UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_INSTALLED 0x4UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_PATCHED 0x5UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_AUTH_FAIL 0x6UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_HEADER 0x7UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_SIZE 0x8UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED 0x9UL
- #define FW_LIVEPATCH_CMD_ERR_CODE_LAST FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED
- u8 unused_0[7];
-};
-
-/* hwrm_exec_fwd_resp_input (size:1024b/128B) */
-struct hwrm_exec_fwd_resp_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 encap_request[26];
- __le16 encap_resp_target_id;
- u8 unused_0[6];
-};
-
-/* hwrm_exec_fwd_resp_output (size:128b/16B) */
-struct hwrm_exec_fwd_resp_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_reject_fwd_resp_input (size:1024b/128B) */
-struct hwrm_reject_fwd_resp_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 encap_request[26];
- __le16 encap_resp_target_id;
- u8 unused_0[6];
-};
-
-/* hwrm_reject_fwd_resp_output (size:128b/16B) */
-struct hwrm_reject_fwd_resp_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_fwd_resp_input (size:1024b/128B) */
-struct hwrm_fwd_resp_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 encap_resp_target_id;
- __le16 encap_resp_cmpl_ring;
- __le16 encap_resp_len;
- u8 unused_0;
- u8 unused_1;
- __le64 encap_resp_addr;
- __le32 encap_resp[24];
-};
-
-/* hwrm_fwd_resp_output (size:128b/16B) */
-struct hwrm_fwd_resp_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */
-struct hwrm_fwd_async_event_cmpl_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 encap_async_event_target_id;
- u8 unused_0[6];
- __le32 encap_async_event_cmpl[4];
-};
-
-/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */
-struct hwrm_fwd_async_event_cmpl_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_temp_monitor_query_input (size:128b/16B) */
-struct hwrm_temp_monitor_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_temp_monitor_query_output (size:192b/24B) */
-struct hwrm_temp_monitor_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 temp;
- u8 phy_temp;
- u8 om_temp;
- u8 flags;
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_TEMP_NOT_AVAILABLE 0x1UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_PHY_TEMP_NOT_AVAILABLE 0x2UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_NOT_PRESENT 0x4UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_TEMP_NOT_AVAILABLE 0x8UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_EXT_TEMP_FIELDS_AVAILABLE 0x10UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_THRESHOLD_VALUES_AVAILABLE 0x20UL
- u8 temp2;
- u8 phy_temp2;
- u8 om_temp2;
- u8 warn_threshold;
- u8 critical_threshold;
- u8 fatal_threshold;
- u8 shutdown_threshold;
- u8 unused_0[4];
- u8 valid;
-};
-
-/* hwrm_wol_filter_alloc_input (size:512b/64B) */
-struct hwrm_wol_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- __le32 enables;
- #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS 0x1UL
- #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET 0x2UL
- #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_SIZE 0x4UL
- #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_ADDR 0x8UL
- #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_ADDR 0x10UL
- #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_SIZE 0x20UL
- __le16 port_id;
- u8 wol_type;
- #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL
- #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP 0x1UL
- #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID 0xffUL
- #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_LAST WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID
- u8 unused_0[5];
- u8 mac_address[6];
- __le16 pattern_offset;
- __le16 pattern_buf_size;
- __le16 pattern_mask_size;
- u8 unused_1[4];
- __le64 pattern_buf_addr;
- __le64 pattern_mask_addr;
-};
-
-/* hwrm_wol_filter_alloc_output (size:128b/16B) */
-struct hwrm_wol_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 wol_filter_id;
- u8 unused_0[6];
- u8 valid;
-};
-
-/* hwrm_wol_filter_free_input (size:256b/32B) */
-struct hwrm_wol_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define WOL_FILTER_FREE_REQ_FLAGS_FREE_ALL_WOL_FILTERS 0x1UL
- __le32 enables;
- #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID 0x1UL
- __le16 port_id;
- u8 wol_filter_id;
- u8 unused_0[5];
-};
-
-/* hwrm_wol_filter_free_output (size:128b/16B) */
-struct hwrm_wol_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_wol_filter_qcfg_input (size:448b/56B) */
-struct hwrm_wol_filter_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 handle;
- u8 unused_0[4];
- __le64 pattern_buf_addr;
- __le16 pattern_buf_size;
- u8 unused_1[6];
- __le64 pattern_mask_addr;
- __le16 pattern_mask_size;
- u8 unused_2[6];
-};
-
-/* hwrm_wol_filter_qcfg_output (size:256b/32B) */
-struct hwrm_wol_filter_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 next_handle;
- u8 wol_filter_id;
- u8 wol_type;
- #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL
- #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP 0x1UL
- #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID 0xffUL
- #define WOL_FILTER_QCFG_RESP_WOL_TYPE_LAST WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID
- __le32 unused_0;
- u8 mac_address[6];
- __le16 pattern_offset;
- __le16 pattern_size;
- __le16 pattern_mask_size;
- u8 unused_1[3];
- u8 valid;
-};
-
-/* hwrm_wol_reason_qcfg_input (size:320b/40B) */
-struct hwrm_wol_reason_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
- __le64 wol_pkt_buf_addr;
- __le16 wol_pkt_buf_size;
- u8 unused_1[6];
-};
-
-/* hwrm_wol_reason_qcfg_output (size:128b/16B) */
-struct hwrm_wol_reason_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 wol_filter_id;
- u8 wol_reason;
- #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL
- #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP 0x1UL
- #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID 0xffUL
- #define WOL_REASON_QCFG_RESP_WOL_REASON_LAST WOL_REASON_QCFG_RESP_WOL_REASON_INVALID
- u8 wol_pkt_len;
- u8 unused_0[4];
- u8 valid;
-};
-
-/* hwrm_dbg_read_direct_input (size:256b/32B) */
-struct hwrm_dbg_read_direct_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
- __le32 read_addr;
- __le32 read_len32;
-};
-
-/* hwrm_dbg_read_direct_output (size:128b/16B) */
-struct hwrm_dbg_read_direct_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 crc32;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_dbg_qcaps_input (size:192b/24B) */
-struct hwrm_dbg_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0[6];
-};
-
-/* hwrm_dbg_qcaps_output (size:192b/24B) */
-struct hwrm_dbg_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- u8 unused_0[2];
- __le32 coredump_component_disable_caps;
- #define DBG_QCAPS_RESP_COREDUMP_COMPONENT_DISABLE_CAPS_NVRAM 0x1UL
- __le32 flags;
- #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_NVM 0x1UL
- #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR 0x2UL
- #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR 0x4UL
- #define DBG_QCAPS_RESP_FLAGS_USEQ 0x8UL
- #define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_DDR 0x10UL
- #define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_CAPTURE 0x20UL
- #define DBG_QCAPS_RESP_FLAGS_PTRACE 0x40UL
- u8 unused_1[3];
- u8 valid;
-};
-
-/* hwrm_dbg_qcfg_input (size:192b/24B) */
-struct hwrm_dbg_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- __le16 flags;
- #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_MASK 0x3UL
- #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_SFT 0
- #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_NVM 0x0UL
- #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_HOST_DDR 0x1UL
- #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR 0x2UL
- #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_LAST DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR
- __le32 coredump_component_disable_flags;
- #define DBG_QCFG_REQ_COREDUMP_COMPONENT_DISABLE_FLAGS_NVRAM 0x1UL
-};
-
-/* hwrm_dbg_qcfg_output (size:256b/32B) */
-struct hwrm_dbg_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- u8 unused_0[2];
- __le32 coredump_size;
- __le32 flags;
- #define DBG_QCFG_RESP_FLAGS_UART_LOG 0x1UL
- #define DBG_QCFG_RESP_FLAGS_UART_LOG_SECONDARY 0x2UL
- #define DBG_QCFG_RESP_FLAGS_FW_TRACE 0x4UL
- #define DBG_QCFG_RESP_FLAGS_FW_TRACE_SECONDARY 0x8UL
- #define DBG_QCFG_RESP_FLAGS_DEBUG_NOTIFY 0x10UL
- #define DBG_QCFG_RESP_FLAGS_JTAG_DEBUG 0x20UL
- __le16 async_cmpl_ring;
- u8 unused_2[2];
- __le32 crashdump_size;
- u8 unused_3[3];
- u8 valid;
-};
-
-/* hwrm_dbg_crashdump_medium_cfg_input (size:320b/40B) */
-struct hwrm_dbg_crashdump_medium_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 output_dest_flags;
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR 0x1UL
- __le16 pg_size_lvl;
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_MASK 0x3UL
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_SFT 0
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_0 0x0UL
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_1 0x1UL
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2 0x2UL
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_MASK 0x1cUL
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_SFT 2
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K (0x0UL << 2)
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K (0x1UL << 2)
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K (0x2UL << 2)
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_2M (0x3UL << 2)
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8M (0x4UL << 2)
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G (0x5UL << 2)
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_MASK 0xffe0UL
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_SFT 5
- __le32 size;
- __le32 coredump_component_disable_flags;
- #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_NVRAM 0x1UL
- __le32 unused_0;
- __le64 pbl;
-};
-
-/* hwrm_dbg_crashdump_medium_cfg_output (size:128b/16B) */
-struct hwrm_dbg_crashdump_medium_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_1[7];
- u8 valid;
-};
-
-/* coredump_segment_record (size:128b/16B) */
-struct coredump_segment_record {
- __le16 component_id;
- __le16 segment_id;
- __le16 max_instances;
- u8 version_hi;
- u8 version_low;
- u8 seg_flags;
- u8 compress_flags;
- #define SFLAG_COMPRESSED_ZLIB 0x1UL
- u8 unused_0[2];
- __le32 segment_len;
-};
-
-/* hwrm_dbg_coredump_list_input (size:256b/32B) */
-struct hwrm_dbg_coredump_list_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
- __le32 host_buf_len;
- __le16 seq_no;
- u8 flags;
- #define DBG_COREDUMP_LIST_REQ_FLAGS_CRASHDUMP 0x1UL
- u8 unused_0[1];
-};
-
-/* hwrm_dbg_coredump_list_output (size:128b/16B) */
-struct hwrm_dbg_coredump_list_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define DBG_COREDUMP_LIST_RESP_FLAGS_MORE 0x1UL
- u8 unused_0;
- __le16 total_segments;
- __le16 data_len;
- u8 unused_1;
- u8 valid;
-};
-
-/* hwrm_dbg_coredump_initiate_input (size:256b/32B) */
-struct hwrm_dbg_coredump_initiate_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 component_id;
- __le16 segment_id;
- __le16 instance;
- __le16 unused_0;
- u8 seg_flags;
- #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_LIVE_DATA 0x1UL
- #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_CRASH_DATA 0x2UL
- #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_COLLECT_CTX_L1_CACHE 0x4UL
- u8 unused_1[7];
-};
-
-/* hwrm_dbg_coredump_initiate_output (size:128b/16B) */
-struct hwrm_dbg_coredump_initiate_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* coredump_data_hdr (size:128b/16B) */
-struct coredump_data_hdr {
- __le32 address;
- __le32 flags_length;
- #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_MASK 0xffffffUL
- #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_SFT 0
- #define COREDUMP_DATA_HDR_FLAGS_LENGTH_INDIRECT_ACCESS 0x1000000UL
- __le32 instance;
- __le32 next_offset;
-};
-
-/* hwrm_dbg_coredump_retrieve_input (size:448b/56B) */
-struct hwrm_dbg_coredump_retrieve_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
- __le32 host_buf_len;
- __le32 unused_0;
- __le16 component_id;
- __le16 segment_id;
- __le16 instance;
- __le16 unused_1;
- u8 seg_flags;
- u8 unused_2;
- __le16 unused_3;
- __le32 unused_4;
- __le32 seq_no;
- __le32 unused_5;
-};
-
-/* hwrm_dbg_coredump_retrieve_output (size:128b/16B) */
-struct hwrm_dbg_coredump_retrieve_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define DBG_COREDUMP_RETRIEVE_RESP_FLAGS_MORE 0x1UL
- u8 unused_0;
- __le16 data_len;
- u8 unused_1[3];
- u8 valid;
-};
-
-/* hwrm_dbg_ring_info_get_input (size:192b/24B) */
-struct hwrm_dbg_ring_info_get_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 ring_type;
- #define DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define DBG_RING_INFO_GET_REQ_RING_TYPE_TX 0x1UL
- #define DBG_RING_INFO_GET_REQ_RING_TYPE_RX 0x2UL
- #define DBG_RING_INFO_GET_REQ_RING_TYPE_NQ 0x3UL
- #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_NQ
- u8 unused_0[3];
- __le32 fw_ring_id;
-};
-
-/* hwrm_dbg_ring_info_get_output (size:192b/24B) */
-struct hwrm_dbg_ring_info_get_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 producer_index;
- __le32 consumer_index;
- __le32 cag_vector_ctrl;
- __le16 st_tag;
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_nvm_read_input (size:320b/40B) */
-struct hwrm_nvm_read_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
- __le16 dir_idx;
- u8 unused_0[2];
- __le32 offset;
- __le32 len;
- u8 unused_1[4];
-};
-
-/* hwrm_nvm_read_output (size:128b/16B) */
-struct hwrm_nvm_read_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */
-struct hwrm_nvm_get_dir_entries_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
-};
-
-/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */
-struct hwrm_nvm_get_dir_entries_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_get_dir_info_input (size:128b/16B) */
-struct hwrm_nvm_get_dir_info_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_nvm_get_dir_info_output (size:192b/24B) */
-struct hwrm_nvm_get_dir_info_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 entries;
- __le32 entry_length;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_write_input (size:448b/56B) */
-struct hwrm_nvm_write_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_src_addr;
- __le16 dir_type;
- __le16 dir_ordinal;
- __le16 dir_ext;
- __le16 dir_attr;
- __le32 dir_data_length;
- __le16 option;
- __le16 flags;
- #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
- #define NVM_WRITE_REQ_FLAGS_BATCH_MODE 0x2UL
- #define NVM_WRITE_REQ_FLAGS_BATCH_LAST 0x4UL
- __le32 dir_item_length;
- __le32 offset;
- __le32 len;
- __le32 unused_0;
-};
-
-/* hwrm_nvm_write_output (size:128b/16B) */
-struct hwrm_nvm_write_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 dir_item_length;
- __le16 dir_idx;
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_nvm_write_cmd_err (size:64b/8B) */
-struct hwrm_nvm_write_cmd_err {
- u8 code;
- #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL
- #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL
- #define NVM_WRITE_CMD_ERR_CODE_LAST NVM_WRITE_CMD_ERR_CODE_NO_SPACE
- u8 unused_0[7];
-};
-
-/* hwrm_nvm_modify_input (size:320b/40B) */
-struct hwrm_nvm_modify_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_src_addr;
- __le16 dir_idx;
- __le16 flags;
- #define NVM_MODIFY_REQ_FLAGS_BATCH_MODE 0x1UL
- #define NVM_MODIFY_REQ_FLAGS_BATCH_LAST 0x2UL
- __le32 offset;
- __le32 len;
- u8 unused_1[4];
-};
-
-/* hwrm_nvm_modify_output (size:128b/16B) */
-struct hwrm_nvm_modify_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */
-struct hwrm_nvm_find_dir_entry_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL
- __le16 dir_idx;
- __le16 dir_type;
- __le16 dir_ordinal;
- __le16 dir_ext;
- u8 opt_ordinal;
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ 0x0UL
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE 0x1UL
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT 0x2UL
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_LAST NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT
- u8 unused_0[3];
-};
-
-/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */
-struct hwrm_nvm_find_dir_entry_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 dir_item_length;
- __le32 dir_data_length;
- __le32 fw_ver;
- __le16 dir_ordinal;
- __le16 dir_idx;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */
-struct hwrm_nvm_erase_dir_entry_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 dir_idx;
- u8 unused_0[6];
-};
-
-/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */
-struct hwrm_nvm_erase_dir_entry_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_get_dev_info_input (size:192b/24B) */
-struct hwrm_nvm_get_dev_info_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 flags;
- #define NVM_GET_DEV_INFO_REQ_FLAGS_SECURITY_SOC_NVM 0x1UL
- u8 unused_0[7];
-};
-
-/* hwrm_nvm_get_dev_info_output (size:768b/96B) */
-struct hwrm_nvm_get_dev_info_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 manufacturer_id;
- __le16 device_id;
- __le32 sector_size;
- __le32 nvram_size;
- __le32 reserved_size;
- __le32 available_size;
- u8 nvm_cfg_ver_maj;
- u8 nvm_cfg_ver_min;
- u8 nvm_cfg_ver_upd;
- u8 flags;
- #define NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID 0x1UL
- char pkg_name[16];
- __le16 hwrm_fw_major;
- __le16 hwrm_fw_minor;
- __le16 hwrm_fw_build;
- __le16 hwrm_fw_patch;
- __le16 mgmt_fw_major;
- __le16 mgmt_fw_minor;
- __le16 mgmt_fw_build;
- __le16 mgmt_fw_patch;
- __le16 roce_fw_major;
- __le16 roce_fw_minor;
- __le16 roce_fw_build;
- __le16 roce_fw_patch;
- __le16 netctrl_fw_major;
- __le16 netctrl_fw_minor;
- __le16 netctrl_fw_build;
- __le16 netctrl_fw_patch;
- __le16 srt2_fw_major;
- __le16 srt2_fw_minor;
- __le16 srt2_fw_build;
- __le16 srt2_fw_patch;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */
-struct hwrm_nvm_mod_dir_entry_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL
- __le16 dir_idx;
- __le16 dir_ordinal;
- __le16 dir_ext;
- __le16 dir_attr;
- __le32 checksum;
-};
-
-/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */
-struct hwrm_nvm_mod_dir_entry_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_verify_update_input (size:192b/24B) */
-struct hwrm_nvm_verify_update_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 dir_type;
- __le16 dir_ordinal;
- __le16 dir_ext;
- u8 unused_0[2];
-};
-
-/* hwrm_nvm_verify_update_output (size:128b/16B) */
-struct hwrm_nvm_verify_update_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_install_update_input (size:192b/24B) */
-struct hwrm_nvm_install_update_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 install_type;
- #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL
- #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL
- #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_LAST NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL
- __le16 flags;
- #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL
- #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL
- #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL
- #define NVM_INSTALL_UPDATE_REQ_FLAGS_VERIFY_ONLY 0x8UL
- u8 unused_0[2];
-};
-
-/* hwrm_nvm_install_update_output (size:192b/24B) */
-struct hwrm_nvm_install_update_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 installed_items;
- u8 result;
- #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_FAILURE 0xffUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_MALLOC_FAILURE 0xfdUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER 0xfbUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER 0xf3UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE 0xf2UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER 0xecUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE 0xebUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM 0xeaUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH 0xe9UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST 0xe8UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER 0xe7UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM 0xe6UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM 0xe5UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH 0xe4UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE 0xe1UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV 0xceUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID 0xcdUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR 0xccUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID 0xcbUL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM 0xc5UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM 0xc4UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM 0xc3UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR 0xb9UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR 0xb8UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR 0xb7UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND 0xb0UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED 0xa7UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED
- u8 problem_item;
- #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL
- #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
- #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_LAST NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE
- u8 reset_required;
- #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE 0x0UL
- #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI 0x1UL
- #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL
- #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_LAST NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER
- u8 unused_0[4];
- u8 valid;
-};
-
-/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */
-struct hwrm_nvm_install_update_cmd_err {
- u8 code;
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK 0x3UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT 0x4UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT
- u8 unused_0[7];
-};
-
-/* hwrm_nvm_get_variable_input (size:320b/40B) */
-struct hwrm_nvm_get_variable_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 dest_data_addr;
- __le16 data_len;
- __le16 option_num;
- #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
- #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
- #define NVM_GET_VARIABLE_REQ_OPTION_NUM_LAST NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
- __le16 dimensions;
- __le16 index_0;
- __le16 index_1;
- __le16 index_2;
- __le16 index_3;
- u8 flags;
- #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL
- u8 unused_0;
-};
-
-/* hwrm_nvm_get_variable_output (size:128b/16B) */
-struct hwrm_nvm_get_variable_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 data_len;
- __le16 option_num;
- #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL
- #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL
- #define NVM_GET_VARIABLE_RESP_OPTION_NUM_LAST NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */
-struct hwrm_nvm_get_variable_cmd_err {
- u8 code;
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_LAST NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT
- u8 unused_0[7];
-};
-
-/* hwrm_nvm_set_variable_input (size:320b/40B) */
-struct hwrm_nvm_set_variable_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 src_data_addr;
- __le16 data_len;
- __le16 option_num;
- #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
- #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
- #define NVM_SET_VARIABLE_REQ_OPTION_NUM_LAST NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
- __le16 dimensions;
- __le16 index_0;
- __le16 index_1;
- __le16 index_2;
- __le16 index_3;
- u8 flags;
- #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256 (0x2UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH (0x3UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH
- #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_MASK 0x70UL
- #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_SFT 4
- #define NVM_SET_VARIABLE_REQ_FLAGS_FACTORY_DEFAULT 0x80UL
- u8 unused_0;
-};
-
-/* hwrm_nvm_set_variable_output (size:128b/16B) */
-struct hwrm_nvm_set_variable_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */
-struct hwrm_nvm_set_variable_cmd_err {
- u8 code;
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_LAST NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR
- u8 unused_0[7];
-};
-
-/* hwrm_selftest_qlist_input (size:128b/16B) */
-struct hwrm_selftest_qlist_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_selftest_qlist_output (size:2240b/280B) */
-struct hwrm_selftest_qlist_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_tests;
- u8 available_tests;
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST 0x1UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL
- u8 offline_tests;
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL
- u8 unused_0;
- __le16 test_timeout;
- u8 unused_1[2];
- char test_name[8][32];
- u8 eyescope_target_BER_support;
- #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED 0x0UL
- #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED 0x1UL
- #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E10_SUPPORTED 0x2UL
- #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E11_SUPPORTED 0x3UL
- #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED 0x4UL
- #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_LAST SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED
- u8 unused_2[6];
- u8 valid;
-};
-
-/* hwrm_selftest_exec_input (size:192b/24B) */
-struct hwrm_selftest_exec_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 flags;
- #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST 0x1UL
- #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL
- #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL
- #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL
- #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
- u8 unused_0[7];
-};
-
-/* hwrm_selftest_exec_output (size:128b/16B) */
-struct hwrm_selftest_exec_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 requested_tests;
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST 0x1UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL
- u8 test_success;
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_selftest_irq_input (size:128b/16B) */
-struct hwrm_selftest_irq_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_selftest_irq_output (size:128b/16B) */
-struct hwrm_selftest_irq_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* dbc_dbc (size:64b/8B) */
-struct dbc_dbc {
- __le32 index;
- #define DBC_DBC_INDEX_MASK 0xffffffUL
- #define DBC_DBC_INDEX_SFT 0
- #define DBC_DBC_EPOCH 0x1000000UL
- #define DBC_DBC_TOGGLE_MASK 0x6000000UL
- #define DBC_DBC_TOGGLE_SFT 25
- __le32 type_path_xid;
- #define DBC_DBC_XID_MASK 0xfffffUL
- #define DBC_DBC_XID_SFT 0
- #define DBC_DBC_PATH_MASK 0x3000000UL
- #define DBC_DBC_PATH_SFT 24
- #define DBC_DBC_PATH_ROCE (0x0UL << 24)
- #define DBC_DBC_PATH_L2 (0x1UL << 24)
- #define DBC_DBC_PATH_ENGINE (0x2UL << 24)
- #define DBC_DBC_PATH_LAST DBC_DBC_PATH_ENGINE
- #define DBC_DBC_VALID 0x4000000UL
- #define DBC_DBC_DEBUG_TRACE 0x8000000UL
- #define DBC_DBC_TYPE_MASK 0xf0000000UL
- #define DBC_DBC_TYPE_SFT 28
- #define DBC_DBC_TYPE_SQ (0x0UL << 28)
- #define DBC_DBC_TYPE_RQ (0x1UL << 28)
- #define DBC_DBC_TYPE_SRQ (0x2UL << 28)
- #define DBC_DBC_TYPE_SRQ_ARM (0x3UL << 28)
- #define DBC_DBC_TYPE_CQ (0x4UL << 28)
- #define DBC_DBC_TYPE_CQ_ARMSE (0x5UL << 28)
- #define DBC_DBC_TYPE_CQ_ARMALL (0x6UL << 28)
- #define DBC_DBC_TYPE_CQ_ARMENA (0x7UL << 28)
- #define DBC_DBC_TYPE_SRQ_ARMENA (0x8UL << 28)
- #define DBC_DBC_TYPE_CQ_CUTOFF_ACK (0x9UL << 28)
- #define DBC_DBC_TYPE_NQ (0xaUL << 28)
- #define DBC_DBC_TYPE_NQ_ARM (0xbUL << 28)
- #define DBC_DBC_TYPE_NQ_MASK (0xeUL << 28)
- #define DBC_DBC_TYPE_NULL (0xfUL << 28)
- #define DBC_DBC_TYPE_LAST DBC_DBC_TYPE_NULL
-};
-
-/* db_push_start (size:64b/8B) */
-struct db_push_start {
- u64 db;
- #define DB_PUSH_START_DB_INDEX_MASK 0xffffffUL
- #define DB_PUSH_START_DB_INDEX_SFT 0
- #define DB_PUSH_START_DB_PI_LO_MASK 0xff000000UL
- #define DB_PUSH_START_DB_PI_LO_SFT 24
- #define DB_PUSH_START_DB_XID_MASK 0xfffff00000000ULL
- #define DB_PUSH_START_DB_XID_SFT 32
- #define DB_PUSH_START_DB_PI_HI_MASK 0xf0000000000000ULL
- #define DB_PUSH_START_DB_PI_HI_SFT 52
- #define DB_PUSH_START_DB_TYPE_MASK 0xf000000000000000ULL
- #define DB_PUSH_START_DB_TYPE_SFT 60
- #define DB_PUSH_START_DB_TYPE_PUSH_START (0xcULL << 60)
- #define DB_PUSH_START_DB_TYPE_PUSH_END (0xdULL << 60)
- #define DB_PUSH_START_DB_TYPE_LAST DB_PUSH_START_DB_TYPE_PUSH_END
-};
-
-/* db_push_end (size:64b/8B) */
-struct db_push_end {
- u64 db;
- #define DB_PUSH_END_DB_INDEX_MASK 0xffffffUL
- #define DB_PUSH_END_DB_INDEX_SFT 0
- #define DB_PUSH_END_DB_PI_LO_MASK 0xff000000UL
- #define DB_PUSH_END_DB_PI_LO_SFT 24
- #define DB_PUSH_END_DB_XID_MASK 0xfffff00000000ULL
- #define DB_PUSH_END_DB_XID_SFT 32
- #define DB_PUSH_END_DB_PI_HI_MASK 0xf0000000000000ULL
- #define DB_PUSH_END_DB_PI_HI_SFT 52
- #define DB_PUSH_END_DB_PATH_MASK 0x300000000000000ULL
- #define DB_PUSH_END_DB_PATH_SFT 56
- #define DB_PUSH_END_DB_PATH_ROCE (0x0ULL << 56)
- #define DB_PUSH_END_DB_PATH_L2 (0x1ULL << 56)
- #define DB_PUSH_END_DB_PATH_ENGINE (0x2ULL << 56)
- #define DB_PUSH_END_DB_PATH_LAST DB_PUSH_END_DB_PATH_ENGINE
- #define DB_PUSH_END_DB_DEBUG_TRACE 0x800000000000000ULL
- #define DB_PUSH_END_DB_TYPE_MASK 0xf000000000000000ULL
- #define DB_PUSH_END_DB_TYPE_SFT 60
- #define DB_PUSH_END_DB_TYPE_PUSH_START (0xcULL << 60)
- #define DB_PUSH_END_DB_TYPE_PUSH_END (0xdULL << 60)
- #define DB_PUSH_END_DB_TYPE_LAST DB_PUSH_END_DB_TYPE_PUSH_END
-};
-
-/* db_push_info (size:64b/8B) */
-struct db_push_info {
- u32 push_size_push_index;
- #define DB_PUSH_INFO_PUSH_INDEX_MASK 0xffffffUL
- #define DB_PUSH_INFO_PUSH_INDEX_SFT 0
- #define DB_PUSH_INFO_PUSH_SIZE_MASK 0x1f000000UL
- #define DB_PUSH_INFO_PUSH_SIZE_SFT 24
- u32 reserved32;
-};
-
-/* fw_status_reg (size:32b/4B) */
-struct fw_status_reg {
- u32 fw_status;
- #define FW_STATUS_REG_CODE_MASK 0xffffUL
- #define FW_STATUS_REG_CODE_SFT 0
- #define FW_STATUS_REG_CODE_READY 0x8000UL
- #define FW_STATUS_REG_CODE_LAST FW_STATUS_REG_CODE_READY
- #define FW_STATUS_REG_IMAGE_DEGRADED 0x10000UL
- #define FW_STATUS_REG_RECOVERABLE 0x20000UL
- #define FW_STATUS_REG_CRASHDUMP_ONGOING 0x40000UL
- #define FW_STATUS_REG_CRASHDUMP_COMPLETE 0x80000UL
- #define FW_STATUS_REG_SHUTDOWN 0x100000UL
- #define FW_STATUS_REG_CRASHED_NO_MASTER 0x200000UL
- #define FW_STATUS_REG_RECOVERING 0x400000UL
- #define FW_STATUS_REG_MANU_DEBUG_STATUS 0x800000UL
-};
-
-/* hcomm_status (size:64b/8B) */
-struct hcomm_status {
- u32 sig_ver;
- #define HCOMM_STATUS_VER_MASK 0xffUL
- #define HCOMM_STATUS_VER_SFT 0
- #define HCOMM_STATUS_VER_LATEST 0x1UL
- #define HCOMM_STATUS_VER_LAST HCOMM_STATUS_VER_LATEST
- #define HCOMM_STATUS_SIGNATURE_MASK 0xffffff00UL
- #define HCOMM_STATUS_SIGNATURE_SFT 8
- #define HCOMM_STATUS_SIGNATURE_VAL (0x484353UL << 8)
- #define HCOMM_STATUS_SIGNATURE_LAST HCOMM_STATUS_SIGNATURE_VAL
- u32 fw_status_loc;
- #define HCOMM_STATUS_TRUE_ADDR_SPACE_MASK 0x3UL
- #define HCOMM_STATUS_TRUE_ADDR_SPACE_SFT 0
- #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_PCIE_CFG 0x0UL
- #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_GRC 0x1UL
- #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR0 0x2UL
- #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1 0x3UL
- #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_LAST HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1
- #define HCOMM_STATUS_TRUE_OFFSET_MASK 0xfffffffcUL
- #define HCOMM_STATUS_TRUE_OFFSET_SFT 2
-};
-#define HCOMM_STATUS_STRUCT_LOC 0x31001F0UL
-
-#endif /* _BNXT_HSI_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c
index 669d24ba0e87..de3427c6c6aa 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c
@@ -12,8 +12,8 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/pci.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_hwmon.h"
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
index d2fd2d04ed47..5ce190f50120 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
@@ -20,8 +20,8 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
index 15ca51b5d204..791b3a0cdb83 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
@@ -10,7 +10,7 @@
#ifndef BNXT_HWRM_H
#define BNXT_HWRM_H
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
enum bnxt_hwrm_ctx_flags {
/* Update the HWRM_API_FLAGS right below for any new non-internal bit added here */
@@ -58,7 +58,7 @@ void hwrm_update_token(struct bnxt *bp, u16 seq, enum bnxt_hwrm_wait_state s);
#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
-#define HWRM_CMD_MAX_TIMEOUT 40000U
+#define HWRM_CMD_MAX_TIMEOUT 60000U
#define SHORT_HWRM_CMD_TIMEOUT 20
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index fa514be87650..a8a74f07bb54 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -15,7 +15,7 @@
#include <linux/timekeeping.h>
#include <linux/ptp_classify.h>
#include <linux/clocksource.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ptp.h"
@@ -67,15 +67,15 @@ static int bnxt_ptp_settime(struct ptp_clock_info *ptp_info,
if (BNXT_PTP_USE_RTC(ptp->bp))
return bnxt_ptp_cfg_settime(ptp->bp, ns);
- spin_lock_irqsave(&ptp->ptp_lock, flags);
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
timecounter_init(&ptp->tc, &ptp->cc, ns);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
return 0;
}
/* Caller holds ptp_lock */
-static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,
- u64 *ns)
+static int __bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,
+ u64 *ns)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
u32 high_before, high_now, low;
@@ -98,17 +98,50 @@ static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,
return 0;
}
-static void bnxt_ptp_get_current_time(struct bnxt *bp)
+static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,
+ u64 *ns)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
unsigned long flags;
+ int rc;
+
+ /* We have to serialize reg access and FW reset */
+ read_seqlock_excl_irqsave(&ptp->ptp_lock, flags);
+ rc = __bnxt_refclk_read(bp, sts, ns);
+ read_sequnlock_excl_irqrestore(&ptp->ptp_lock, flags);
+ return rc;
+}
+
+static int bnxt_refclk_read_low(struct bnxt *bp, struct ptp_system_timestamp *sts,
+ u32 *low)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ unsigned long flags;
+
+ /* We have to serialize reg access and FW reset */
+ read_seqlock_excl_irqsave(&ptp->ptp_lock, flags);
+
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ read_sequnlock_excl_irqrestore(&ptp->ptp_lock, flags);
+ return -EIO;
+ }
+
+ ptp_read_system_prets(sts);
+ *low = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
+ ptp_read_system_postts(sts);
+
+ read_sequnlock_excl_irqrestore(&ptp->ptp_lock, flags);
+ return 0;
+}
+
+static void bnxt_ptp_get_current_time(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
if (!ptp)
return;
- spin_lock_irqsave(&ptp->ptp_lock, flags);
- WRITE_ONCE(ptp->old_time, ptp->current_time);
+ WRITE_ONCE(ptp->old_time, ptp->current_time >> BNXT_HI_TIMER_SHIFT);
bnxt_refclk_read(bp, NULL, &ptp->current_time);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
}
static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts,
@@ -151,36 +184,32 @@ static int bnxt_ptp_gettimex(struct ptp_clock_info *ptp_info,
{
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
- unsigned long flags;
u64 ns, cycles;
+ u32 low;
int rc;
- spin_lock_irqsave(&ptp->ptp_lock, flags);
- rc = bnxt_refclk_read(ptp->bp, sts, &cycles);
- if (rc) {
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+ rc = bnxt_refclk_read_low(ptp->bp, sts, &low);
+ if (rc)
return rc;
- }
- ns = timecounter_cyc2time(&ptp->tc, cycles);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+
+ cycles = bnxt_extend_cycles_32b_to_48b(ptp, low);
+ ns = bnxt_timecounter_cyc2time(ptp, cycles);
*ts = ns_to_timespec64(ns);
return 0;
}
-/* Caller holds ptp_lock */
void bnxt_ptp_update_current_time(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
bnxt_refclk_read(ptp->bp, NULL, &ptp->current_time);
- WRITE_ONCE(ptp->old_time, ptp->current_time);
+ WRITE_ONCE(ptp->old_time, ptp->current_time >> BNXT_HI_TIMER_SHIFT);
}
static int bnxt_ptp_adjphc(struct bnxt_ptp_cfg *ptp, s64 delta)
{
struct hwrm_port_mac_cfg_input *req;
- unsigned long flags;
int rc;
rc = hwrm_req_init(ptp->bp, req, HWRM_PORT_MAC_CFG);
@@ -194,9 +223,7 @@ static int bnxt_ptp_adjphc(struct bnxt_ptp_cfg *ptp, s64 delta)
if (rc) {
netdev_err(ptp->bp->dev, "ptp adjphc failed. rc = %x\n", rc);
} else {
- spin_lock_irqsave(&ptp->ptp_lock, flags);
bnxt_ptp_update_current_time(ptp->bp);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
}
return rc;
@@ -211,9 +238,9 @@ static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
if (BNXT_PTP_USE_RTC(ptp->bp))
return bnxt_ptp_adjphc(ptp, delta);
- spin_lock_irqsave(&ptp->ptp_lock, flags);
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
timecounter_adjtime(&ptp->tc, delta);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
return 0;
}
@@ -246,10 +273,10 @@ static int bnxt_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
if (!BNXT_MH(bp))
return bnxt_ptp_adjfine_rtc(bp, scaled_ppm);
- spin_lock_irqsave(&ptp->ptp_lock, flags);
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
timecounter_read(&ptp->tc);
ptp->cc.mult = adjust_by_scaled_ppm(ptp->cmult, scaled_ppm);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
return 0;
}
@@ -257,13 +284,10 @@ void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
struct ptp_clock_event event;
- unsigned long flags;
u64 ns, pps_ts;
pps_ts = EVENT_PPS_TS(data2, data1);
- spin_lock_irqsave(&ptp->ptp_lock, flags);
- ns = timecounter_cyc2time(&ptp->tc, pps_ts);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+ ns = bnxt_timecounter_cyc2time(ptp, pps_ts);
switch (EVENT_DATA2_PPS_EVENT_TYPE(data2)) {
case ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL:
@@ -400,17 +424,13 @@ static int bnxt_get_target_cycles(struct bnxt_ptp_cfg *ptp, u64 target_ns,
{
u64 cycles_now;
u64 nsec_now, nsec_delta;
- unsigned long flags;
int rc;
- spin_lock_irqsave(&ptp->ptp_lock, flags);
rc = bnxt_refclk_read(ptp->bp, NULL, &cycles_now);
- if (rc) {
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+ if (rc)
return rc;
- }
- nsec_now = timecounter_cyc2time(&ptp->tc, cycles_now);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+
+ nsec_now = bnxt_timecounter_cyc2time(ptp, cycles_now);
nsec_delta = target_ns - nsec_now;
*cycles_delta = div64_u64(nsec_delta << ptp->cc.shift, ptp->cc.mult);
@@ -540,10 +560,11 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
return bnxt_ptp_cfg_tstamp_filters(bp);
}
-int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+int bnxt_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = netdev_priv(dev);
- struct hwtstamp_config stmpconf;
struct bnxt_ptp_cfg *ptp;
u16 old_rxctl;
int old_rx_filter, rc;
@@ -553,17 +574,14 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
if (!ptp)
return -EOPNOTSUPP;
- if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
- return -EFAULT;
-
- if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
- stmpconf.tx_type != HWTSTAMP_TX_OFF)
+ if (stmpconf->tx_type != HWTSTAMP_TX_ON &&
+ stmpconf->tx_type != HWTSTAMP_TX_OFF)
return -ERANGE;
old_rx_filter = ptp->rx_filter;
old_rxctl = ptp->rxctl;
old_tx_tstamp_en = ptp->tx_tstamp_en;
- switch (stmpconf.rx_filter) {
+ switch (stmpconf->rx_filter) {
case HWTSTAMP_FILTER_NONE:
ptp->rxctl = 0;
ptp->rx_filter = HWTSTAMP_FILTER_NONE;
@@ -596,7 +614,7 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
return -ERANGE;
}
- if (stmpconf.tx_type == HWTSTAMP_TX_ON)
+ if (stmpconf->tx_type == HWTSTAMP_TX_ON)
ptp->tx_tstamp_en = 1;
else
ptp->tx_tstamp_en = 0;
@@ -605,9 +623,8 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
if (rc)
goto ts_set_err;
- stmpconf.rx_filter = ptp->rx_filter;
- return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
- -EFAULT : 0;
+ stmpconf->rx_filter = ptp->rx_filter;
+ return 0;
ts_set_err:
ptp->rx_filter = old_rx_filter;
@@ -616,22 +633,22 @@ ts_set_err:
return rc;
}
-int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+int bnxt_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf)
{
struct bnxt *bp = netdev_priv(dev);
- struct hwtstamp_config stmpconf;
struct bnxt_ptp_cfg *ptp;
ptp = bp->ptp_cfg;
if (!ptp)
return -EOPNOTSUPP;
- stmpconf.flags = 0;
- stmpconf.tx_type = ptp->tx_tstamp_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ stmpconf->flags = 0;
+ stmpconf->tx_type = ptp->tx_tstamp_en ? HWTSTAMP_TX_ON
+ : HWTSTAMP_TX_OFF;
- stmpconf.rx_filter = ptp->rx_filter;
- return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
- -EFAULT : 0;
+ stmpconf->rx_filter = ptp->rx_filter;
+ return 0;
}
static int bnxt_map_regs(struct bnxt *bp, u32 *reg_arr, int count, int reg_win)
@@ -682,12 +699,12 @@ static void bnxt_unmap_ptp_regs(struct bnxt *bp)
(BNXT_PTP_GRC_WIN - 1) * 4);
}
-static u64 bnxt_cc_read(const struct cyclecounter *cc)
+static u64 bnxt_cc_read(struct cyclecounter *cc)
{
struct bnxt_ptp_cfg *ptp = container_of(cc, struct bnxt_ptp_cfg, cc);
u64 ns = 0;
- bnxt_refclk_read(ptp->bp, NULL, &ns);
+ __bnxt_refclk_read(ptp->bp, NULL, &ns);
return ns;
}
@@ -697,7 +714,6 @@ static int bnxt_stamp_tx_skb(struct bnxt *bp, int slot)
struct skb_shared_hwtstamps timestamp;
struct bnxt_ptp_tx_req *txts_req;
unsigned long now = jiffies;
- unsigned long flags;
u64 ts = 0, ns = 0;
u32 tmo = 0;
int rc;
@@ -711,9 +727,7 @@ static int bnxt_stamp_tx_skb(struct bnxt *bp, int slot)
tmo, slot);
if (!rc) {
memset(&timestamp, 0, sizeof(timestamp));
- spin_lock_irqsave(&ptp->ptp_lock, flags);
- ns = timecounter_cyc2time(&ptp->tc, ts);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+ ns = bnxt_timecounter_cyc2time(ptp, ts);
timestamp.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(txts_req->tx_skb, &timestamp);
ptp->stats.ts_pkts++;
@@ -767,9 +781,9 @@ next_slot:
bnxt_ptp_get_current_time(bp);
ptp->next_period = now + HZ;
if (time_after_eq(now, ptp->next_overflow_check)) {
- spin_lock_irqsave(&ptp->ptp_lock, flags);
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
timecounter_read(&ptp->tc);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
ptp->next_overflow_check = now + BNXT_PHC_OVERFLOW_PERIOD;
}
if (rc == -EAGAIN)
@@ -777,6 +791,27 @@ next_slot:
return HZ;
}
+void bnxt_ptp_free_txts_skbs(struct bnxt_ptp_cfg *ptp)
+{
+ struct bnxt_ptp_tx_req *txts_req;
+ u16 cons = ptp->txts_cons;
+
+ /* make sure ptp aux worker finished with
+ * possible BNXT_STATE_OPEN set
+ */
+ ptp_cancel_worker_sync(ptp->ptp_clock);
+
+ ptp->tx_avail = BNXT_MAX_TX_TS;
+ while (cons != ptp->txts_prod) {
+ txts_req = &ptp->txts_req[cons];
+ if (!IS_ERR_OR_NULL(txts_req->tx_skb))
+ dev_kfree_skb_any(txts_req->tx_skb);
+ cons = NEXT_TXTS(cons);
+ }
+ ptp->txts_cons = cons;
+ ptp_schedule_worker(ptp->ptp_clock, 0);
+}
+
int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod)
{
spin_lock_bh(&ptp->ptp_tx_lock);
@@ -808,15 +843,11 @@ void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod)
int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
- u64 time;
if (!ptp)
return -ENODEV;
- BNXT_READ_TIME64(ptp, time, ptp->old_time);
- *ts = (time & BNXT_HI_TIMER_MASK) | pkt_ts;
- if (pkt_ts < (time & BNXT_LO_TIMER_MASK))
- *ts += BNXT_LO_TIMER_MASK + 1;
+ *ts = bnxt_extend_cycles_32b_to_48b(ptp, pkt_ts);
return 0;
}
@@ -829,7 +860,6 @@ void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi,
u32 opaque = tscmp->tx_ts_cmp_opaque;
struct bnxt_tx_ring_info *txr;
struct bnxt_sw_tx_bd *tx_buf;
- unsigned long flags;
u64 ts, ns;
u16 cons;
@@ -844,9 +874,7 @@ void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi,
le32_to_cpu(tscmp->tx_ts_cmp_flags_type),
le32_to_cpu(tscmp->tx_ts_cmp_errors_v));
} else {
- spin_lock_irqsave(&ptp->ptp_lock, flags);
- ns = timecounter_cyc2time(&ptp->tc, ts);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+ ns = bnxt_timecounter_cyc2time(ptp, ts);
timestamp.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(tx_buf->skb, &timestamp);
}
@@ -924,7 +952,6 @@ static int bnxt_ptp_pps_init(struct bnxt *bp)
snprintf(ptp_info->pin_config[i].name,
sizeof(ptp_info->pin_config[i].name), "bnxt_pps%d", i);
ptp_info->pin_config[i].index = i;
- ptp_info->pin_config[i].chan = i;
if (*pin_usg == BNXT_PPS_PIN_PPS_IN)
ptp_info->pin_config[i].func = PTP_PF_EXTTS;
else if (*pin_usg == BNXT_PPS_PIN_PPS_OUT)
@@ -941,6 +968,8 @@ static int bnxt_ptp_pps_init(struct bnxt *bp)
ptp_info->n_per_out = 1;
ptp_info->pps = 1;
ptp_info->verify = bnxt_ptp_verify;
+ ptp_info->supported_extts_flags = PTP_RISING_EDGE | PTP_STRICT_FLAGS;
+ ptp_info->supported_perout_flags = PTP_PEROUT_DUTY_CYCLE;
return 0;
}
@@ -955,6 +984,7 @@ static bool bnxt_pps_config_ok(struct bnxt *bp)
static void bnxt_ptp_timecounter_init(struct bnxt *bp, bool init_tc)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ unsigned long flags;
if (!ptp->ptp_clock) {
memset(&ptp->cc, 0, sizeof(ptp->cc));
@@ -971,8 +1001,11 @@ static void bnxt_ptp_timecounter_init(struct bnxt *bp, bool init_tc)
}
ptp->next_overflow_check = jiffies + BNXT_PHC_OVERFLOW_PERIOD;
}
- if (init_tc)
+ if (init_tc) {
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
+ }
}
/* Caller holds ptp_lock */
@@ -1005,9 +1038,9 @@ int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg)
if (rc)
return rc;
}
- spin_lock_irqsave(&bp->ptp_cfg->ptp_lock, flags);
+ write_seqlock_irqsave(&bp->ptp_cfg->ptp_lock, flags);
bnxt_ptp_rtc_timecounter_init(bp->ptp_cfg, ns);
- spin_unlock_irqrestore(&bp->ptp_cfg->ptp_lock, flags);
+ write_sequnlock_irqrestore(&bp->ptp_cfg->ptp_lock, flags);
return 0;
}
@@ -1019,12 +1052,12 @@ static void bnxt_ptp_free(struct bnxt *bp)
if (ptp->ptp_clock) {
ptp_clock_unregister(ptp->ptp_clock);
ptp->ptp_clock = NULL;
- kfree(ptp->ptp_info.pin_config);
- ptp->ptp_info.pin_config = NULL;
}
+ kfree(ptp->ptp_info.pin_config);
+ ptp->ptp_info.pin_config = NULL;
}
-int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
+int bnxt_ptp_init(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
int rc;
@@ -1042,12 +1075,12 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
bnxt_ptp_free(bp);
WRITE_ONCE(ptp->tx_avail, BNXT_MAX_TX_TS);
- spin_lock_init(&ptp->ptp_lock);
+ seqlock_init(&ptp->ptp_lock);
spin_lock_init(&ptp->ptp_tx_lock);
if (BNXT_PTP_USE_RTC(bp)) {
bnxt_ptp_timecounter_init(bp, false);
- rc = bnxt_ptp_init_rtc(bp, phc_cfg);
+ rc = bnxt_ptp_init_rtc(bp, ptp->rtc_configured);
if (rc)
goto out;
} else {
@@ -1075,12 +1108,8 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
atomic64_set(&ptp->stats.ts_err, 0);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- unsigned long flags;
-
- spin_lock_irqsave(&ptp->ptp_lock, flags);
bnxt_refclk_read(bp, NULL, &ptp->current_time);
- WRITE_ONCE(ptp->old_time, ptp->current_time);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+ WRITE_ONCE(ptp->old_time, ptp->current_time >> BNXT_HI_TIMER_SHIFT);
ptp_schedule_worker(ptp->ptp_clock, 0);
}
ptp->txts_tmo = BNXT_PTP_DFLT_TX_TMO;
@@ -1095,7 +1124,6 @@ out:
void bnxt_ptp_clear(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
- int i;
if (!ptp)
return;
@@ -1107,12 +1135,5 @@ void bnxt_ptp_clear(struct bnxt *bp)
kfree(ptp->ptp_info.pin_config);
ptp->ptp_info.pin_config = NULL;
- for (i = 0; i < BNXT_MAX_TX_TS; i++) {
- if (ptp->txts_req[i].tx_skb) {
- dev_kfree_skb_any(ptp->txts_req[i].tx_skb);
- ptp->txts_req[i].tx_skb = NULL;
- }
- }
-
bnxt_unmap_ptp_regs(bp);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index f322466ecad3..8cc2fae47644 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -21,6 +21,7 @@
#define BNXT_DEVCLK_FREQ 1000000
#define BNXT_LO_TIMER_MASK 0x0000ffffffffUL
#define BNXT_HI_TIMER_MASK 0xffff00000000UL
+#define BNXT_HI_TIMER_SHIFT 24
#define BNXT_PTP_DFLT_TX_TMO 1000 /* ms */
#define BNXT_PTP_QTS_TIMEOUT 1000
@@ -102,14 +103,15 @@ struct bnxt_ptp_cfg {
struct timecounter tc;
struct bnxt_pps pps_info;
/* serialize timecounter access */
- spinlock_t ptp_lock;
+ seqlock_t ptp_lock;
/* serialize ts tx request queuing */
spinlock_t ptp_tx_lock;
u64 current_time;
- u64 old_time;
unsigned long next_period;
unsigned long next_overflow_check;
u32 cmult;
+ /* cache of upper 24 bits of cyclecoutner. 8 bits are used to check for roll-over */
+ u32 old_time;
/* a 23b shift cyclecounter will overflow in ~36 mins. Check overflow every 18 mins. */
#define BNXT_PHC_OVERFLOW_PERIOD (18 * 60 * HZ)
@@ -133,6 +135,7 @@ struct bnxt_ptp_cfg {
BNXT_PTP_MSG_PDELAY_REQ | \
BNXT_PTP_MSG_PDELAY_RESP)
u8 tx_tstamp_en:1;
+ u8 rtc_configured:1;
int rx_filter;
u32 tstamp_filters;
@@ -145,20 +148,6 @@ struct bnxt_ptp_cfg {
struct bnxt_ptp_stats stats;
};
-#if BITS_PER_LONG == 32
-#define BNXT_READ_TIME64(ptp, dst, src) \
-do { \
- unsigned long flags; \
- \
- spin_lock_irqsave(&(ptp)->ptp_lock, flags); \
- (dst) = (src); \
- spin_unlock_irqrestore(&(ptp)->ptp_lock, flags); \
-} while (0)
-#else
-#define BNXT_READ_TIME64(ptp, dst, src) \
- ((dst) = READ_ONCE(src))
-#endif
-
#define BNXT_PTP_INC_TX_AVAIL(ptp) \
do { \
spin_lock_bh(&(ptp)->ptp_tx_lock); \
@@ -171,8 +160,12 @@ void bnxt_ptp_update_current_time(struct bnxt *bp);
void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2);
int bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp);
void bnxt_ptp_reapply_pps(struct bnxt *bp);
-int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
-int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
+int bnxt_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf,
+ struct netlink_ext_ack *extack);
+int bnxt_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf);
+void bnxt_ptp_free_txts_skbs(struct bnxt_ptp_cfg *ptp);
int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod);
void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod);
int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts);
@@ -180,6 +173,29 @@ void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi,
struct tx_ts_cmp *tscmp);
void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns);
int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg);
-int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg);
+int bnxt_ptp_init(struct bnxt *bp);
void bnxt_ptp_clear(struct bnxt *bp);
+static inline u64 bnxt_timecounter_cyc2time(struct bnxt_ptp_cfg *ptp, u64 ts)
+{
+ unsigned int seq;
+ u64 ns;
+
+ do {
+ seq = read_seqbegin(&ptp->ptp_lock);
+ ns = timecounter_cyc2time(&ptp->tc, ts);
+ } while (read_seqretry(&ptp->ptp_lock, seq));
+
+ return ns;
+}
+
+static inline u64 bnxt_extend_cycles_32b_to_48b(struct bnxt_ptp_cfg *ptp, u32 ts)
+{
+ u64 time, cycles;
+
+ time = (u64)READ_ONCE(ptp->old_time) << BNXT_HI_TIMER_SHIFT;
+ cycles = (time & BNXT_HI_TIMER_MASK) | ts;
+ if (ts < (time & BNXT_LO_TIMER_MASK))
+ cycles += BNXT_LO_TIMER_MASK + 1;
+ return cycles;
+}
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 7bb8a5d74430..be7deb9cc410 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -16,7 +16,7 @@
#include <linux/interrupt.h>
#include <linux/etherdevice.h>
#include <net/dcbnl.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
@@ -332,6 +332,38 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
return rc;
}
+static int bnxt_set_vf_link_admin_state(struct bnxt *bp, int vf_id)
+{
+ struct hwrm_func_cfg_input *req;
+ struct bnxt_vf_info *vf;
+ int rc;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN))
+ return 0;
+
+ vf = &bp->pf.vf[vf_id];
+
+ rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(vf->fw_fid);
+ switch (vf->flags & (BNXT_VF_LINK_FORCED | BNXT_VF_LINK_UP)) {
+ case BNXT_VF_LINK_FORCED:
+ req->options =
+ FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN;
+ break;
+ case (BNXT_VF_LINK_FORCED | BNXT_VF_LINK_UP):
+ req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP;
+ break;
+ default:
+ req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO;
+ break;
+ }
+ req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE);
+ return hwrm_req_send(bp, req);
+}
+
int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
{
struct bnxt *bp = netdev_priv(dev);
@@ -357,10 +389,11 @@ int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
break;
default:
netdev_err(bp->dev, "Invalid link option\n");
- rc = -EINVAL;
- break;
+ return -EINVAL;
}
- if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
+ if (bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN)
+ rc = bnxt_set_vf_link_admin_state(bp, vf_id);
+ else if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
return rc;
@@ -520,6 +553,63 @@ static int __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
return hwrm_req_send(bp, req);
}
+static void bnxt_hwrm_roce_sriov_cfg(struct bnxt *bp, int num_vfs)
+{
+ struct hwrm_func_qcaps_output *resp;
+ struct hwrm_func_cfg_input *cfg_req;
+ struct hwrm_func_qcaps_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
+ if (rc)
+ return;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ goto err;
+
+ rc = hwrm_req_init(bp, cfg_req, HWRM_FUNC_CFG);
+ if (rc)
+ goto err;
+
+ /* In case of VF Dynamic resource allocation, driver will provision
+ * maximum resources to all the VFs. FW will dynamically allocate
+ * resources to VFs on the fly, so always divide the resources by 1.
+ */
+ if (BNXT_ROCE_VF_DYN_ALLOC_CAP(bp))
+ num_vfs = 1;
+
+ cfg_req->fid = cpu_to_le16(0xffff);
+ cfg_req->enables2 =
+ cpu_to_le32(FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF |
+ FUNC_CFG_REQ_ENABLES2_ROCE_MAX_CQ_PER_VF |
+ FUNC_CFG_REQ_ENABLES2_ROCE_MAX_MRW_PER_VF |
+ FUNC_CFG_REQ_ENABLES2_ROCE_MAX_QP_PER_VF |
+ FUNC_CFG_REQ_ENABLES2_ROCE_MAX_SRQ_PER_VF |
+ FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF);
+ cfg_req->roce_max_av_per_vf =
+ cpu_to_le32(le32_to_cpu(resp->roce_vf_max_av) / num_vfs);
+ cfg_req->roce_max_cq_per_vf =
+ cpu_to_le32(le32_to_cpu(resp->roce_vf_max_cq) / num_vfs);
+ cfg_req->roce_max_mrw_per_vf =
+ cpu_to_le32(le32_to_cpu(resp->roce_vf_max_mrw) / num_vfs);
+ cfg_req->roce_max_qp_per_vf =
+ cpu_to_le32(le32_to_cpu(resp->roce_vf_max_qp) / num_vfs);
+ cfg_req->roce_max_srq_per_vf =
+ cpu_to_le32(le32_to_cpu(resp->roce_vf_max_srq) / num_vfs);
+ cfg_req->roce_max_gid_per_vf =
+ cpu_to_le32(le32_to_cpu(resp->roce_vf_max_gid) / num_vfs);
+
+ rc = hwrm_req_send(bp, cfg_req);
+
+err:
+ hwrm_req_drop(bp, req);
+ if (rc)
+ netdev_err(bp->dev, "RoCE sriov configuration failed\n");
+}
+
/* Only called by PF to reserve resources for VFs, returns actual number of
* VFs configured, or < 0 on error.
*/
@@ -609,15 +699,21 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
hwrm_req_hold(bp, req);
for (i = 0; i < num_vfs; i++) {
+ struct bnxt_vf_info *vf = &pf->vf[i];
+
+ vf->fw_fid = pf->first_vf_id + i;
+ rc = bnxt_set_vf_link_admin_state(bp, i);
+ if (rc)
+ break;
+
if (reset)
__bnxt_set_vf_params(bp, i);
- req->vf_id = cpu_to_le16(pf->first_vf_id + i);
+ req->vf_id = cpu_to_le16(vf->fw_fid);
rc = hwrm_req_send(bp, req);
if (rc)
break;
pf->active_vfs = i + 1;
- pf->vf[i].fw_fid = pf->first_vf_id + i;
}
if (pf->active_vfs) {
@@ -684,7 +780,13 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
FUNC_CFG_REQ_ENABLES_NUM_VNICS |
FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
- mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
+ if (bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN) {
+ req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO;
+ req->enables |=
+ cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE);
+ }
+
+ mtu = bp->dev->mtu + VLAN_ETH_HLEN;
req->mru = cpu_to_le16(mtu);
req->admin_mtu = cpu_to_le16(mtu);
@@ -759,6 +861,9 @@ int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
*num_vfs = rc;
}
+ if (BNXT_RDMA_SRIOV_EN(bp) && BNXT_ROCE_VF_RESC_CAP(bp))
+ bnxt_hwrm_roce_sriov_cfg(bp, *num_vfs);
+
return 0;
}
@@ -770,7 +875,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
int tx_ok = 0, rx_ok = 0, rss_ok = 0;
int avail_cp, avail_stat;
- /* Check if we can enable requested num of vf's. At a mininum
+ /* Check if we can enable requested num of vf's. At a minimum
* we require 1 RX 1 TX rings for each VF. In this minimum conf
* features like TPA will not be available.
*/
@@ -866,7 +971,7 @@ err_out1:
return rc;
}
-void bnxt_sriov_disable(struct bnxt *bp)
+void __bnxt_sriov_disable(struct bnxt *bp)
{
u16 num_vfs = pci_num_vf(bp->pdev);
@@ -890,10 +995,20 @@ void bnxt_sriov_disable(struct bnxt *bp)
devl_unlock(bp->dl);
bnxt_free_vf_resources(bp);
+}
+
+static void bnxt_sriov_disable(struct bnxt *bp)
+{
+ if (!pci_num_vf(bp->pdev))
+ return;
+
+ __bnxt_sriov_disable(bp);
/* Reclaim all resources for the PF. */
rtnl_lock();
+ netdev_lock(bp->dev);
bnxt_restore_pf_fw_resources(bp);
+ netdev_unlock(bp->dev);
rtnl_unlock();
}
@@ -903,17 +1018,21 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
struct bnxt *bp = netdev_priv(dev);
rtnl_lock();
+ netdev_lock(dev);
if (!netif_running(dev)) {
netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
+ netdev_unlock(dev);
rtnl_unlock();
return 0;
}
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n");
+ netdev_unlock(dev);
rtnl_unlock();
return 0;
}
bp->sriov_cfg = true;
+ netdev_unlock(dev);
rtnl_unlock();
if (pci_vfs_assigned(bp->pdev)) {
@@ -1066,7 +1185,7 @@ static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
/* There are two cases:
* 1.If firmware spec < 0x10202,VF MAC address is not forwarded
* to the PF and so it doesn't have to match
- * 2.Allow VF to modify it's own MAC when PF has not assigned a
+ * 2.Allow VF to modify its own MAC when PF has not assigned a
* valid MAC address and firmware spec >= 0x10202
*/
mac_ok = true;
@@ -1262,7 +1381,7 @@ int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
return 0;
}
-void bnxt_sriov_disable(struct bnxt *bp)
+void __bnxt_sriov_disable(struct bnxt *bp)
{
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index 9a4bacba477b..e4979d729312 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -38,7 +38,7 @@ bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf);
int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trust);
int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset);
-void bnxt_sriov_disable(struct bnxt *);
+void __bnxt_sriov_disable(struct bnxt *bp);
void bnxt_hwrm_exec_fwd_req(struct bnxt *);
void bnxt_update_vf_mac(struct bnxt *);
int bnxt_approve_mac(struct bnxt *, const u8 *, bool);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index d2ca90407cce..2d66bf59cd64 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -19,8 +19,8 @@
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_tunnel_key.h>
#include <net/vxlan.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_sriov.h"
@@ -244,7 +244,7 @@ bnxt_tc_parse_pedit(struct bnxt *bp, struct bnxt_tc_actions *actions,
offset < offset_of_ip6_daddr + 16) {
actions->nat.src_xlate = false;
idx = (offset - offset_of_ip6_daddr) / 4;
- actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val);
+ actions->nat.l3.ipv6.daddr.s6_addr32[idx] = htonl(val);
} else {
netdev_err(bp->dev,
"%s: IPv6_hdr: Invalid pedit field\n",
@@ -1316,7 +1316,7 @@ static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
/* Check if there's another flow using the same tunnel decap.
* If not, add this tunnel to the table and resolve the other
- * tunnel header fileds. Ignore src_port in the tunnel_key,
+ * tunnel header fields. Ignore src_port in the tunnel_key,
* since it is not required for decap filters.
*/
decap_key->tp_src = 0;
@@ -1410,7 +1410,7 @@ static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
/* Check if there's another flow using the same tunnel encap.
* If not, add this tunnel to the table and resolve the other
- * tunnel header fileds
+ * tunnel header fields
*/
encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table,
&tc_info->encap_ht_params,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index fdd6356f21ef..927971c362f1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -20,8 +20,9 @@
#include <asm/byteorder.h>
#include <linux/bitmap.h>
#include <linux/auxiliary_bus.h>
+#include <net/netdev_lock.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
@@ -99,6 +100,12 @@ void bnxt_set_dflt_ulp_stat_ctxs(struct bnxt *bp)
if (BNXT_PF(bp) && !bp->pf.port_id &&
bp->port_count > 1)
bp->edev->ulp_num_ctxs++;
+
+ /* Reserve one additional stat_ctx when the device is capable
+ * of supporting port mirroring on RDMA device.
+ */
+ if (BNXT_MIRROR_ON_ROCE_CAP(bp))
+ bp->edev->ulp_num_ctxs++;
}
}
@@ -112,7 +119,7 @@ int bnxt_register_dev(struct bnxt_en_dev *edev,
struct bnxt_ulp *ulp;
int rc = 0;
- rtnl_lock();
+ netdev_lock(dev);
mutex_lock(&edev->en_dev_lock);
if (!bp->irq_tbl) {
rc = -ENODEV;
@@ -135,10 +142,9 @@ int bnxt_register_dev(struct bnxt_en_dev *edev,
edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
bnxt_fill_msix_vecs(bp, bp->edev->msix_entries);
- edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
exit:
mutex_unlock(&edev->en_dev_lock);
- rtnl_unlock();
+ netdev_unlock(dev);
return rc;
}
EXPORT_SYMBOL(bnxt_register_dev);
@@ -148,13 +154,10 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_ulp *ulp;
- int i = 0;
ulp = edev->ulp_tbl;
- rtnl_lock();
+ netdev_lock(dev);
mutex_lock(&edev->en_dev_lock);
- if (ulp->msix_requested)
- edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
edev->ulp_tbl->msix_requested = 0;
if (ulp->max_async_event_id)
@@ -164,12 +167,8 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
synchronize_rcu();
ulp->max_async_event_id = 0;
ulp->async_events_bmap = NULL;
- while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
- msleep(100);
- i++;
- }
mutex_unlock(&edev->en_dev_lock);
- rtnl_unlock();
+ netdev_unlock(dev);
return;
}
EXPORT_SYMBOL(bnxt_unregister_dev);
@@ -208,7 +207,7 @@ int bnxt_send_msg(struct bnxt_en_dev *edev,
rc = hwrm_req_replace(bp, req, fw_msg->msg, fw_msg->msg_len);
if (rc)
- return rc;
+ goto drop_req;
hwrm_req_timeout(bp, req, fw_msg->timeout);
resp = hwrm_req_hold(bp, req);
@@ -220,6 +219,7 @@ int bnxt_send_msg(struct bnxt_en_dev *edev,
memcpy(fw_msg->resp, resp, resp_len);
}
+drop_req:
hwrm_req_drop(bp, req);
return rc;
}
@@ -234,10 +234,9 @@ void bnxt_ulp_stop(struct bnxt *bp)
return;
mutex_lock(&edev->en_dev_lock);
- if (!bnxt_ulp_registered(edev)) {
- mutex_unlock(&edev->en_dev_lock);
- return;
- }
+ if (!bnxt_ulp_registered(edev) ||
+ (edev->flags & BNXT_EN_FLAG_ULP_STOPPED))
+ goto ulp_stop_exit;
edev->flags |= BNXT_EN_FLAG_ULP_STOPPED;
if (aux_priv) {
@@ -253,6 +252,7 @@ void bnxt_ulp_stop(struct bnxt *bp)
adrv->suspend(adev, pm);
}
}
+ulp_stop_exit:
mutex_unlock(&edev->en_dev_lock);
}
@@ -261,19 +261,13 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
struct bnxt_aux_priv *aux_priv = bp->aux_priv;
struct bnxt_en_dev *edev = bp->edev;
- if (!edev)
- return;
-
- edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
-
- if (err)
+ if (!edev || err)
return;
mutex_lock(&edev->en_dev_lock);
- if (!bnxt_ulp_registered(edev)) {
- mutex_unlock(&edev->en_dev_lock);
- return;
- }
+ if (!bnxt_ulp_registered(edev) ||
+ !(edev->flags & BNXT_EN_FLAG_ULP_STOPPED))
+ goto ulp_start_exit;
if (edev->ulp_tbl->msix_requested)
bnxt_fill_msix_vecs(bp, edev->msix_entries);
@@ -290,6 +284,8 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
adrv->resume(adev);
}
}
+ulp_start_exit:
+ edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
mutex_unlock(&edev->en_dev_lock);
}
@@ -297,8 +293,9 @@ void bnxt_ulp_irq_stop(struct bnxt *bp)
{
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
+ bool reset = false;
- if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
+ if (!edev)
return;
if (bnxt_ulp_registered(bp->edev)) {
@@ -307,10 +304,12 @@ void bnxt_ulp_irq_stop(struct bnxt *bp)
if (!ulp->msix_requested)
return;
- ops = rtnl_dereference(ulp->ulp_ops);
+ ops = netdev_lock_dereference(ulp->ulp_ops, bp->dev);
if (!ops || !ops->ulp_irq_stop)
return;
- ops->ulp_irq_stop(ulp->handle);
+ if (test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
+ reset = true;
+ ops->ulp_irq_stop(ulp->handle, reset);
}
}
@@ -319,7 +318,7 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
- if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
+ if (!edev)
return;
if (bnxt_ulp_registered(bp->edev)) {
@@ -329,7 +328,7 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
if (!ulp->msix_requested)
return;
- ops = rtnl_dereference(ulp->ulp_ops);
+ ops = netdev_lock_dereference(ulp->ulp_ops, bp->dev);
if (!ops || !ops->ulp_irq_restart)
return;
@@ -345,9 +344,36 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
}
}
-int bnxt_register_async_events(struct bnxt_en_dev *edev,
- unsigned long *events_bmap,
- u16 max_id)
+void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
+{
+ u16 event_id = le16_to_cpu(cmpl->event_id);
+ struct bnxt_en_dev *edev = bp->edev;
+ struct bnxt_ulp_ops *ops;
+ struct bnxt_ulp *ulp;
+
+ if (!bnxt_ulp_registered(edev))
+ return;
+ ulp = edev->ulp_tbl;
+
+ rcu_read_lock();
+
+ ops = rcu_dereference(ulp->ulp_ops);
+ if (!ops || !ops->ulp_async_notifier)
+ goto exit_unlock_rcu;
+ if (!ulp->async_events_bmap || event_id > ulp->max_async_event_id)
+ goto exit_unlock_rcu;
+
+ /* Read max_async_event_id first before testing the bitmap. */
+ smp_rmb();
+
+ if (test_bit(event_id, ulp->async_events_bmap))
+ ops->ulp_async_notifier(ulp->handle, cmpl);
+exit_unlock_rcu:
+ rcu_read_unlock();
+}
+
+void bnxt_register_async_events(struct bnxt_en_dev *edev,
+ unsigned long *events_bmap, u16 max_id)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
@@ -359,7 +385,6 @@ int bnxt_register_async_events(struct bnxt_en_dev *edev,
smp_wmb();
ulp->max_async_event_id = max_id;
bnxt_hwrm_func_drv_rgtr(bp, events_bmap, max_id + 1, true);
- return 0;
}
EXPORT_SYMBOL(bnxt_register_async_events);
@@ -414,6 +439,10 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
if (bp->flags & BNXT_FLAG_VF)
edev->flags |= BNXT_EN_FLAG_VF;
+ if (BNXT_ROCE_VF_RESC_CAP(bp))
+ edev->flags |= BNXT_EN_FLAG_ROCE_VF_RES_MGMT;
+ if (BNXT_SW_RES_LMT(bp))
+ edev->flags |= BNXT_EN_FLAG_SW_RES_LMT;
edev->chip_num = bp->chip_num;
edev->hw_ring_stats_size = bp->hw_ring_stats_size;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 4f4914f5c84c..3c5b8a53f715 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -10,9 +10,6 @@
#ifndef BNXT_ULP_H
#define BNXT_ULP_H
-#define BNXT_ROCE_ULP 0
-#define BNXT_MAX_ULP 1
-
#define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1
@@ -30,7 +27,9 @@ struct bnxt_msix_entry {
};
struct bnxt_ulp_ops {
- void (*ulp_irq_stop)(void *);
+ /* async_notifier() cannot sleep (in BH context) */
+ void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *);
+ void (*ulp_irq_stop)(void *, bool);
void (*ulp_irq_restart)(void *, struct bnxt_msix_entry *);
};
@@ -48,7 +47,6 @@ struct bnxt_ulp {
unsigned long *async_events_bmap;
u16 max_async_event_id;
u16 msix_requested;
- atomic_t ref_count;
};
struct bnxt_en_dev {
@@ -60,10 +58,12 @@ struct bnxt_en_dev {
#define BNXT_EN_FLAG_ROCEV2_CAP 0x2
#define BNXT_EN_FLAG_ROCE_CAP (BNXT_EN_FLAG_ROCEV1_CAP | \
BNXT_EN_FLAG_ROCEV2_CAP)
- #define BNXT_EN_FLAG_MSIX_REQUESTED 0x4
#define BNXT_EN_FLAG_ULP_STOPPED 0x8
#define BNXT_EN_FLAG_VF 0x10
#define BNXT_EN_VF(edev) ((edev)->flags & BNXT_EN_FLAG_VF)
+ #define BNXT_EN_FLAG_ROCE_VF_RES_MGMT 0x20
+ #define BNXT_EN_FLAG_SW_RES_LMT 0x40
+#define BNXT_EN_SW_RES_LMT(edev) ((edev)->flags & BNXT_EN_FLAG_SW_RES_LMT)
struct bnxt_ulp *ulp_tbl;
int l2_db_size; /* Doorbell BAR size in
@@ -123,6 +123,6 @@ int bnxt_register_dev(struct bnxt_en_dev *edev, struct bnxt_ulp_ops *ulp_ops,
void *handle);
void bnxt_unregister_dev(struct bnxt_en_dev *edev);
int bnxt_send_msg(struct bnxt_en_dev *edev, struct bnxt_fw_msg *fw_msg);
-int bnxt_register_async_events(struct bnxt_en_dev *edev,
- unsigned long *events_bmap, u16 max_id);
+void bnxt_register_async_events(struct bnxt_en_dev *edev,
+ unsigned long *events_bmap, u16 max_id);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 1467b94a6427..bd116fd578d8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -12,8 +12,8 @@
#include <linux/rtnetlink.h>
#include <linux/jhash.h>
#include <net/pkt_cls.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_vfr.h"
@@ -257,8 +257,7 @@ bool bnxt_dev_is_vf_rep(struct net_device *dev)
/* Called when the parent PF interface is closed:
* As the mode transition from SWITCHDEV to LEGACY
- * happens under the rtnl_lock() this routine is safe
- * under the rtnl_lock()
+ * happens under the netdev instance lock this routine is safe
*/
void bnxt_vf_reps_close(struct bnxt *bp)
{
@@ -278,8 +277,7 @@ void bnxt_vf_reps_close(struct bnxt *bp)
/* Called when the parent PF interface is opened (re-opened):
* As the mode transition from SWITCHDEV to LEGACY
- * happen under the rtnl_lock() this routine is safe
- * under the rtnl_lock()
+ * happen under the netdev instance lock this routine is safe
*/
void bnxt_vf_reps_open(struct bnxt *bp)
{
@@ -348,7 +346,7 @@ void bnxt_vf_reps_destroy(struct bnxt *bp)
/* Ensure that parent PF's and VF-reps' RX/TX has been quiesced
* before proceeding with VF-rep cleanup.
*/
- rtnl_lock();
+ netdev_lock(bp->dev);
if (netif_running(bp->dev)) {
bnxt_close_nic(bp, false, false);
closed = true;
@@ -365,10 +363,10 @@ void bnxt_vf_reps_destroy(struct bnxt *bp)
bnxt_open_nic(bp, false, false);
bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
}
- rtnl_unlock();
+ netdev_unlock(bp->dev);
- /* Need to call vf_reps_destroy() outside of rntl_lock
- * as unregister_netdev takes rtnl_lock
+ /* Need to call vf_reps_destroy() outside of netdev instance lock
+ * as unregister_netdev takes it
*/
__bnxt_vf_reps_destroy(bp);
}
@@ -376,7 +374,7 @@ void bnxt_vf_reps_destroy(struct bnxt *bp)
/* Free the VF-Reps in firmware, during firmware hot-reset processing.
* Note that the VF-Rep netdevs are still active (not unregistered) during
* this process. As the mode transition from SWITCHDEV to LEGACY happens
- * under the rtnl_lock() this routine is safe under the rtnl_lock().
+ * under the netdev instance lock this routine is safe.
*/
void bnxt_vf_reps_free(struct bnxt *bp)
{
@@ -413,7 +411,7 @@ static int bnxt_alloc_vf_rep(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
/* Allocate the VF-Reps in firmware, during firmware hot-reset processing.
* Note that the VF-Rep netdevs are still active (not unregistered) during
* this process. As the mode transition from SWITCHDEV to LEGACY happens
- * under the rtnl_lock() this routine is safe under the rtnl_lock().
+ * under the netdev instance lock this routine is safe.
*/
int bnxt_vf_reps_alloc(struct bnxt *bp)
{
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index f88b641533fc..3e77a96e5a3e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -15,8 +15,9 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/filter.h>
+#include <net/netdev_lock.h>
#include <net/page_pool/helpers.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_xdp.h"
@@ -48,8 +49,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
tx_buf->page = virt_to_head_page(xdp->data);
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
- flags = (len << TX_BD_LEN_SHIFT) |
- ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) |
+ flags = (len << TX_BD_LEN_SHIFT) | TX_BD_CNT(num_frags + 1) |
bnxt_lhint_arr[len >> 9];
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 1 + num_frags);
@@ -115,7 +115,7 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
tx_buf->action = XDP_REDIRECT;
tx_buf->xdpf = xdpf;
dma_unmap_addr_set(tx_buf, mapping, mapping);
- dma_unmap_len_set(tx_buf, len, 0);
+ dma_unmap_len_set(tx_buf, len, len);
}
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
@@ -382,19 +382,24 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
return nxmit;
}
-/* Under rtnl_lock */
static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
{
struct net_device *dev = bp->dev;
int tx_xdp = 0, tx_cp, rc, tc;
struct bpf_prog *old;
+ netdev_assert_locked(dev);
+
if (prog && !prog->aux->xdp_has_frags &&
bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
netdev_warn(dev, "MTU %d larger than %d without XDP frag support.\n",
bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU);
return -EOPNOTSUPP;
}
+ if (prog && bp->flags & BNXT_FLAG_HDS) {
+ netdev_warn(dev, "XDP is disallowed when HDS is enabled.\n");
+ return -EOPNOTSUPP;
+ }
if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) {
netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
return -EOPNOTSUPP;
@@ -420,17 +425,10 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
if (prog) {
bnxt_set_rx_skb_mode(bp, true);
- xdp_features_set_redirect_target(dev, true);
+ xdp_features_set_redirect_target_locked(dev, true);
} else {
- int rx, tx;
-
- xdp_features_clear_redirect_target(dev);
+ xdp_features_clear_redirect_target_locked(dev);
bnxt_set_rx_skb_mode(bp, false);
- bnxt_get_max_rings(bp, &rx, &tx, true);
- if (rx > 1) {
- bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
- bp->dev->hw_features |= NETIF_F_LRO;
- }
}
bp->tx_nr_rings_xdp = tx_xdp;
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
@@ -463,23 +461,15 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
struct sk_buff *
bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
- struct page_pool *pool, struct xdp_buff *xdp,
- struct rx_cmp_ext *rxcmp1)
+ struct page_pool *pool, struct xdp_buff *xdp)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
if (!skb)
return NULL;
- skb_checksum_none_assert(skb);
- if (RX_CMP_L4_CS_OK(rxcmp1)) {
- if (bp->dev->features & NETIF_F_RXCSUM) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = RX_CMP_ENCAP(rxcmp1);
- }
- }
- xdp_update_skb_shared_info(skb, num_frags,
- sinfo->xdp_frags_size,
- BNXT_RX_PAGE_SIZE * sinfo->nr_frags,
- xdp_buff_is_frag_pfmemalloc(xdp));
+
+ xdp_update_skb_frags_info(skb, num_frags, sinfo->xdp_frags_size,
+ BNXT_RX_PAGE_SIZE * num_frags,
+ xdp_buff_get_skb_flags(xdp));
return skb;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 0122782400b8..220285e190fc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -33,6 +33,5 @@ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
struct xdp_buff *xdp);
struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb,
u8 num_frags, struct page_pool *pool,
- struct xdp_buff *xdp,
- struct rx_cmp_ext *rxcmp1);
+ struct xdp_buff *xdp);
#endif
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index a9040c42d2ff..6e97a5a7daaf 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -4230,8 +4230,7 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
cnic_bnx2x_delete_wait(dev, 0);
- cancel_delayed_work(&cp->delete_task);
- flush_workqueue(cnic_wq);
+ cancel_delayed_work_sync(&cp->delete_task);
if (atomic_read(&cp->iscsi_conn) != 0)
netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 53a949eb9180..05512aa10c20 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) controller driver
*
- * Copyright (c) 2014-2024 Broadcom
+ * Copyright (c) 2014-2025 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet: " fmt
@@ -35,21 +35,18 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/phy.h>
-#include <linux/platform_data/bcmgenet.h>
#include <linux/unaligned.h>
#include "bcmgenet.h"
-/* Maximum number of hardware queues, downsized if needed */
-#define GENET_MAX_MQ_CNT 4
-
/* Default highest priority queue for multi queue support */
-#define GENET_Q0_PRIORITY 0
+#define GENET_Q1_PRIORITY 0
+#define GENET_Q0_PRIORITY 1
-#define GENET_Q16_RX_BD_CNT \
+#define GENET_Q0_RX_BD_CNT \
(TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
-#define GENET_Q16_TX_BD_CNT \
+#define GENET_Q0_TX_BD_CNT \
(TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
#define RX_BUF_LENGTH 2048
@@ -104,7 +101,7 @@ static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
* the platform is explicitly configured for 64-bits/LPAE.
*/
#ifdef CONFIG_PHYS_ADDR_T_64BIT
- if (priv->hw_params->flags & GENET_HAS_40BITS)
+ if (bcmgenet_has_40bits(priv))
bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
#endif
}
@@ -446,33 +443,48 @@ static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
u32 offset;
u32 reg;
- offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
- reg = bcmgenet_hfb_reg_readl(priv, offset);
- reg |= (1 << (f_index % 32));
- bcmgenet_hfb_reg_writel(priv, reg, offset);
- reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- reg |= RBUF_HFB_EN;
- bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) {
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg |= (1 << ((f_index % 32) + RBUF_HFB_FILTER_EN_SHIFT)) |
+ RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ } else {
+ offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ reg |= (1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg, offset);
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg |= RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ }
}
static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv *priv, u32 f_index)
{
u32 offset, reg, reg1;
- offset = HFB_FLT_ENABLE_V3PLUS;
- reg = bcmgenet_hfb_reg_readl(priv, offset);
- reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
- if (f_index < 32) {
- reg1 &= ~(1 << (f_index % 32));
- bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32));
- } else {
- reg &= ~(1 << (f_index % 32));
- bcmgenet_hfb_reg_writel(priv, reg, offset);
- }
- if (!reg && !reg1) {
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) {
reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- reg &= ~RBUF_HFB_EN;
+ reg &= ~(1 << ((f_index % 32) + RBUF_HFB_FILTER_EN_SHIFT));
+ if (!(reg & RBUF_HFB_FILTER_EN_MASK))
+ reg &= ~RBUF_HFB_EN;
bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ } else {
+ offset = HFB_FLT_ENABLE_V3PLUS;
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
+ if (f_index < 32) {
+ reg1 &= ~(1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32));
+ } else {
+ reg &= ~(1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg, offset);
+ }
+ if (!reg && !reg1) {
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg &= ~RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ }
}
}
@@ -482,6 +494,9 @@ static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
u32 offset;
u32 reg;
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
+ return;
+
offset = f_index / 8;
reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
reg &= ~(0xF << (4 * (f_index % 8)));
@@ -495,9 +510,13 @@ static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
u32 offset;
u32 reg;
- offset = HFB_FLT_LEN_V3PLUS +
- ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
- sizeof(u32);
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
+ offset = HFB_FLT_LEN_V2;
+ else
+ offset = HFB_FLT_LEN_V3PLUS;
+
+ offset += sizeof(u32) *
+ ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4);
reg = bcmgenet_hfb_reg_readl(priv, offset);
reg &= ~(0xFF << (8 * (f_index % 4)));
reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
@@ -579,13 +598,13 @@ static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
struct bcmgenet_rxnfc_rule *rule)
{
struct ethtool_rx_flow_spec *fs = &rule->fs;
- u32 offset = 0, f_length = 0, f;
+ u32 offset = 0, f_length = 0, f, q;
u8 val_8, mask_8;
__be16 val_16;
u16 mask_16;
size_t size;
- f = fs->location;
+ f = fs->location + 1;
if (fs->flow_type & FLOW_MAC_EXT) {
bcmgenet_hfb_insert_data(priv, f, 0,
&fs->h_ext.h_dest, &fs->m_ext.h_dest,
@@ -667,19 +686,16 @@ static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
}
bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length);
- if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) {
- /* Ring 0 flows can be handled by the default Descriptor Ring
- * We'll map them to ring 0, but don't enable the filter
- */
- bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0);
- rule->state = BCMGENET_RXNFC_STATE_DISABLED;
- } else {
+ if (fs->ring_cookie == RX_CLS_FLOW_WAKE)
+ q = 0;
+ else if (fs->ring_cookie == RX_CLS_FLOW_DISC)
+ q = priv->hw_params->rx_queues + 1;
+ else
/* Other Rx rings are direct mapped here */
- bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f,
- fs->ring_cookie);
- bcmgenet_hfb_enable_filter(priv, f);
- rule->state = BCMGENET_RXNFC_STATE_ENABLED;
- }
+ q = fs->ring_cookie;
+ bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, q);
+ bcmgenet_hfb_enable_filter(priv, f);
+ rule->state = BCMGENET_RXNFC_STATE_ENABLED;
}
/* bcmgenet_hfb_clear
@@ -690,6 +706,7 @@ static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index)
{
u32 base, i;
+ bcmgenet_hfb_set_filter_length(priv, f_index, 0);
base = f_index * priv->hw_params->hfb_filter_size;
for (i = 0; i < priv->hw_params->hfb_filter_size; i++)
bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32));
@@ -699,22 +716,23 @@ static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
{
u32 i;
- if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
- return;
-
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
-
- for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
- bcmgenet_rdma_writel(priv, 0x0, i);
+ bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
- for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
- bcmgenet_hfb_reg_writel(priv, 0x0,
- HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
+ if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) {
+ bcmgenet_hfb_reg_writel(priv, 0,
+ HFB_FLT_ENABLE_V3PLUS);
+ bcmgenet_hfb_reg_writel(priv, 0,
+ HFB_FLT_ENABLE_V3PLUS + 4);
+ for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
+ bcmgenet_rdma_writel(priv, 0, i);
+ }
for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++)
bcmgenet_hfb_clear_filter(priv, i);
+
+ /* Enable filter 0 to send default flow to ring 0 */
+ bcmgenet_hfb_set_filter_length(priv, 0, 4);
+ bcmgenet_hfb_enable_filter(priv, 0);
}
static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
@@ -722,9 +740,6 @@ static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
int i;
INIT_LIST_HEAD(&priv->rxnfc_list);
- if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
- return;
-
for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
INIT_LIST_HEAD(&priv->rxnfc_rules[i].list);
priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED;
@@ -819,20 +834,16 @@ static int bcmgenet_get_coalesce(struct net_device *dev,
unsigned int i;
ec->tx_max_coalesced_frames =
- bcmgenet_tdma_ring_readl(priv, DESC_INDEX,
- DMA_MBUF_DONE_THRESH);
+ bcmgenet_tdma_ring_readl(priv, 0, DMA_MBUF_DONE_THRESH);
ec->rx_max_coalesced_frames =
- bcmgenet_rdma_ring_readl(priv, DESC_INDEX,
- DMA_MBUF_DONE_THRESH);
+ bcmgenet_rdma_ring_readl(priv, 0, DMA_MBUF_DONE_THRESH);
ec->rx_coalesce_usecs =
- bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000;
+ bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT) * 8192 / 1000;
- for (i = 0; i < priv->hw_params->rx_queues; i++) {
+ for (i = 0; i <= priv->hw_params->rx_queues; i++) {
ring = &priv->rx_rings[i];
ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
}
- ring = &priv->rx_rings[DESC_INDEX];
- ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
return 0;
}
@@ -902,17 +913,13 @@ static int bcmgenet_set_coalesce(struct net_device *dev,
/* Program all TX queues with the same values, as there is no
* ethtool knob to do coalescing on a per-queue basis
*/
- for (i = 0; i < priv->hw_params->tx_queues; i++)
+ for (i = 0; i <= priv->hw_params->tx_queues; i++)
bcmgenet_tdma_ring_writel(priv, i,
ec->tx_max_coalesced_frames,
DMA_MBUF_DONE_THRESH);
- bcmgenet_tdma_ring_writel(priv, DESC_INDEX,
- ec->tx_max_coalesced_frames,
- DMA_MBUF_DONE_THRESH);
- for (i = 0; i < priv->hw_params->rx_queues; i++)
+ for (i = 0; i <= priv->hw_params->rx_queues; i++)
bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec);
- bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec);
return 0;
}
@@ -961,12 +968,13 @@ static int bcmgenet_set_pauseparam(struct net_device *dev,
/* standard ethtool support functions. */
enum bcmgenet_stat_type {
- BCMGENET_STAT_NETDEV = -1,
+ BCMGENET_STAT_RTNL = -1,
BCMGENET_STAT_MIB_RX,
BCMGENET_STAT_MIB_TX,
BCMGENET_STAT_RUNT,
BCMGENET_STAT_MISC,
BCMGENET_STAT_SOFT,
+ BCMGENET_STAT_SOFT64,
};
struct bcmgenet_stats {
@@ -976,13 +984,15 @@ struct bcmgenet_stats {
enum bcmgenet_stat_type type;
/* reg offset from UMAC base for misc counters */
u16 reg_offset;
+ /* sync for u64 stats counters */
+ int syncp_offset;
};
-#define STAT_NETDEV(m) { \
+#define STAT_RTNL(m) { \
.stat_string = __stringify(m), \
- .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
- .stat_offset = offsetof(struct net_device_stats, m), \
- .type = BCMGENET_STAT_NETDEV, \
+ .stat_sizeof = sizeof(((struct rtnl_link_stats64 *)0)->m), \
+ .stat_offset = offsetof(struct rtnl_link_stats64, m), \
+ .type = BCMGENET_STAT_RTNL, \
}
#define STAT_GENET_MIB(str, m, _type) { \
@@ -992,6 +1002,14 @@ struct bcmgenet_stats {
.type = _type, \
}
+#define STAT_GENET_SOFT_MIB64(str, s, m) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->s.m), \
+ .stat_offset = offsetof(struct bcmgenet_priv, s.m), \
+ .type = BCMGENET_STAT_SOFT64, \
+ .syncp_offset = offsetof(struct bcmgenet_priv, s.syncp), \
+}
+
#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
@@ -1006,18 +1024,38 @@ struct bcmgenet_stats {
}
#define STAT_GENET_Q(num) \
- STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \
- tx_rings[num].packets), \
- STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \
- tx_rings[num].bytes), \
- STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \
- rx_rings[num].bytes), \
- STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \
- rx_rings[num].packets), \
- STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \
- rx_rings[num].errors), \
- STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \
- rx_rings[num].dropped)
+ STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_packets", \
+ tx_rings[num].stats64, packets), \
+ STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_bytes", \
+ tx_rings[num].stats64, bytes), \
+ STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_errors", \
+ tx_rings[num].stats64, errors), \
+ STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_dropped", \
+ tx_rings[num].stats64, dropped), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_bytes", \
+ rx_rings[num].stats64, bytes), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_packets", \
+ rx_rings[num].stats64, packets), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_errors", \
+ rx_rings[num].stats64, errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_dropped", \
+ rx_rings[num].stats64, dropped), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_multicast", \
+ rx_rings[num].stats64, multicast), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_missed", \
+ rx_rings[num].stats64, missed), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_length_errors", \
+ rx_rings[num].stats64, length_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_over_errors", \
+ rx_rings[num].stats64, over_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_crc_errors", \
+ rx_rings[num].stats64, crc_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_frame_errors", \
+ rx_rings[num].stats64, frame_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_fragmented_errors", \
+ rx_rings[num].stats64, fragmented_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_broadcast", \
+ rx_rings[num].stats64, broadcast)
/* There is a 0xC gap between the end of RX and beginning of TX stats and then
* between the end of TX stats and the beginning of the RX RUNT
@@ -1029,15 +1067,20 @@ struct bcmgenet_stats {
*/
static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
/* general stats */
- STAT_NETDEV(rx_packets),
- STAT_NETDEV(tx_packets),
- STAT_NETDEV(rx_bytes),
- STAT_NETDEV(tx_bytes),
- STAT_NETDEV(rx_errors),
- STAT_NETDEV(tx_errors),
- STAT_NETDEV(rx_dropped),
- STAT_NETDEV(tx_dropped),
- STAT_NETDEV(multicast),
+ STAT_RTNL(rx_packets),
+ STAT_RTNL(tx_packets),
+ STAT_RTNL(rx_bytes),
+ STAT_RTNL(tx_bytes),
+ STAT_RTNL(rx_errors),
+ STAT_RTNL(tx_errors),
+ STAT_RTNL(rx_dropped),
+ STAT_RTNL(tx_dropped),
+ STAT_RTNL(multicast),
+ STAT_RTNL(rx_missed_errors),
+ STAT_RTNL(rx_length_errors),
+ STAT_RTNL(rx_over_errors),
+ STAT_RTNL(rx_crc_errors),
+ STAT_RTNL(rx_frame_errors),
/* UniMAC RSV counters */
STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
@@ -1120,11 +1163,25 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
STAT_GENET_Q(1),
STAT_GENET_Q(2),
STAT_GENET_Q(3),
- STAT_GENET_Q(16),
+ STAT_GENET_Q(4),
};
#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
+#define BCMGENET_STATS64_ADD(stats, m, v) \
+ do { \
+ u64_stats_update_begin(&stats->syncp); \
+ u64_stats_add(&stats->m, v); \
+ u64_stats_update_end(&stats->syncp); \
+ } while (0)
+
+#define BCMGENET_STATS64_INC(stats, m) \
+ do { \
+ u64_stats_update_begin(&stats->syncp); \
+ u64_stats_inc(&stats->m); \
+ u64_stats_update_end(&stats->syncp); \
+ } while (0)
+
static void bcmgenet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -1144,14 +1201,14 @@ static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
u8 *data)
{
+ const char *str;
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < BCMGENET_STATS_LEN; i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- bcmgenet_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ str = bcmgenet_gstrings_stats[i].stat_string;
+ ethtool_puts(&data, str);
}
break;
}
@@ -1208,8 +1265,9 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
s = &bcmgenet_gstrings_stats[i];
switch (s->type) {
- case BCMGENET_STAT_NETDEV:
+ case BCMGENET_STAT_RTNL:
case BCMGENET_STAT_SOFT:
+ case BCMGENET_STAT_SOFT64:
continue;
case BCMGENET_STAT_RUNT:
offset += BCMGENET_STAT_OFFSET;
@@ -1247,28 +1305,40 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
u64 *data)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct rtnl_link_stats64 stats64;
+ struct u64_stats_sync *syncp;
+ unsigned int start;
int i;
if (netif_running(dev))
bcmgenet_update_mib_counters(priv);
- dev->netdev_ops->ndo_get_stats(dev);
+ dev_get_stats(dev, &stats64);
for (i = 0; i < BCMGENET_STATS_LEN; i++) {
const struct bcmgenet_stats *s;
char *p;
s = &bcmgenet_gstrings_stats[i];
- if (s->type == BCMGENET_STAT_NETDEV)
- p = (char *)&dev->stats;
- else
- p = (char *)priv;
- p += s->stat_offset;
- if (sizeof(unsigned long) != sizeof(u32) &&
- s->stat_sizeof == sizeof(unsigned long))
- data[i] = *(unsigned long *)p;
- else
- data[i] = *(u32 *)p;
+ p = (char *)priv;
+
+ if (s->type == BCMGENET_STAT_SOFT64) {
+ syncp = (struct u64_stats_sync *)(p + s->syncp_offset);
+ do {
+ start = u64_stats_fetch_begin(syncp);
+ data[i] = u64_stats_read((u64_stats_t *)(p + s->stat_offset));
+ } while (u64_stats_fetch_retry(syncp, start));
+ } else {
+ if (s->type == BCMGENET_STAT_RTNL)
+ p = (char *)&stats64;
+
+ p += s->stat_offset;
+ if (sizeof(unsigned long) != sizeof(u32) &&
+ s->stat_sizeof == sizeof(unsigned long))
+ data[i] = *(unsigned long *)p;
+ else
+ data[i] = *(u32 *)p;
+ }
}
}
@@ -1438,7 +1508,8 @@ static int bcmgenet_insert_flow(struct net_device *dev,
}
if (cmd->fs.ring_cookie > priv->hw_params->rx_queues &&
- cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) {
+ cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE &&
+ cmd->fs.ring_cookie != RX_CLS_FLOW_DISC) {
netdev_err(dev, "rxnfc: Unsupported action (%llu)\n",
cmd->fs.ring_cookie);
return -EINVAL;
@@ -1472,10 +1543,10 @@ static int bcmgenet_insert_flow(struct net_device *dev,
loc_rule = &priv->rxnfc_rules[cmd->fs.location];
}
if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED)
- bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
+ bcmgenet_hfb_disable_filter(priv, cmd->fs.location + 1);
if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
list_del(&loc_rule->list);
- bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
+ bcmgenet_hfb_clear_filter(priv, cmd->fs.location + 1);
}
loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED;
memcpy(&loc_rule->fs, &cmd->fs,
@@ -1505,10 +1576,10 @@ static int bcmgenet_delete_flow(struct net_device *dev,
}
if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
- bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
+ bcmgenet_hfb_disable_filter(priv, cmd->fs.location + 1);
if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
list_del(&rule->list);
- bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
+ bcmgenet_hfb_clear_filter(priv, cmd->fs.location + 1);
}
rule->state = BCMGENET_RXNFC_STATE_UNUSED;
memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec));
@@ -1569,6 +1640,13 @@ static int bcmgenet_get_num_flows(struct bcmgenet_priv *priv)
return res;
}
+static u32 bcmgenet_get_rx_ring_count(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ return priv->hw_params->rx_queues ?: 1;
+}
+
static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -1578,9 +1656,6 @@ static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int i = 0;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = priv->hw_params->rx_queues ?: 1;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = bcmgenet_get_num_flows(priv);
cmd->data = MAX_NUM_OF_FS_RULES | RX_CLS_LOC_SPECIAL;
@@ -1629,6 +1704,7 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.get_rxnfc = bcmgenet_get_rxnfc,
.set_rxnfc = bcmgenet_set_rxnfc,
+ .get_rx_ring_count = bcmgenet_get_rx_ring_count,
.get_pauseparam = bcmgenet_get_pauseparam,
.set_pauseparam = bcmgenet_set_pauseparam,
};
@@ -1651,9 +1727,9 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
case GENET_POWER_PASSIVE:
/* Power down LED */
- if (priv->hw_params->flags & GENET_HAS_EXT) {
+ if (bcmgenet_has_ext(priv)) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- if (GENET_IS_V5(priv) && !priv->ephy_16nm)
+ if (GENET_IS_V5(priv) && !bcmgenet_has_ephy_16nm(priv))
reg |= EXT_PWR_DOWN_PHY_EN |
EXT_PWR_DOWN_PHY_RD |
EXT_PWR_DOWN_PHY_SD |
@@ -1676,13 +1752,14 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
return ret;
}
-static void bcmgenet_power_up(struct bcmgenet_priv *priv,
- enum bcmgenet_power_mode mode)
+static int bcmgenet_power_up(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode)
{
+ int ret = 0;
u32 reg;
- if (!(priv->hw_params->flags & GENET_HAS_EXT))
- return;
+ if (!bcmgenet_has_ext(priv))
+ return ret;
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
@@ -1690,7 +1767,7 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
case GENET_POWER_PASSIVE:
reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS |
EXT_ENERGY_DET_MASK);
- if (GENET_IS_V5(priv) && !priv->ephy_16nm) {
+ if (GENET_IS_V5(priv) && !bcmgenet_has_ephy_16nm(priv)) {
reg &= ~(EXT_PWR_DOWN_PHY_EN |
EXT_PWR_DOWN_PHY_RD |
EXT_PWR_DOWN_PHY_SD |
@@ -1718,11 +1795,13 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
}
break;
case GENET_POWER_WOL_MAGIC:
- bcmgenet_wol_power_up_cfg(priv, mode);
- return;
+ ret = bcmgenet_wol_power_up_cfg(priv, mode);
+ break;
default:
break;
}
+
+ return ret;
}
static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
@@ -1759,18 +1838,6 @@ static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv,
return tx_cb_ptr;
}
-static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
-{
- bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
- INTRL2_CPU_MASK_SET);
-}
-
-static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
-{
- bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
- INTRL2_CPU_MASK_CLEAR);
-}
-
static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
{
bcmgenet_intrl2_1_writel(ring->priv,
@@ -1785,18 +1852,6 @@ static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
INTRL2_CPU_MASK_CLEAR);
}
-static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
-{
- bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
- INTRL2_CPU_MASK_SET);
-}
-
-static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
-{
- bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
- INTRL2_CPU_MASK_CLEAR);
-}
-
static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
{
bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
@@ -1868,6 +1923,7 @@ static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev,
static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
struct bcmgenet_tx_ring *ring)
{
+ struct bcmgenet_tx_stats64 *stats = &ring->stats64;
struct bcmgenet_priv *priv = netdev_priv(dev);
unsigned int txbds_processed = 0;
unsigned int bytes_compl = 0;
@@ -1877,12 +1933,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
struct sk_buff *skb;
/* Clear status before servicing to reduce spurious interrupts */
- if (ring->index == DESC_INDEX)
- bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE,
- INTRL2_CPU_CLEAR);
- else
- bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
- INTRL2_CPU_CLEAR);
+ bcmgenet_intrl2_1_writel(priv, (1 << ring->index), INTRL2_CPU_CLEAR);
/* Compute how many buffers are transmitted since last xmit call */
c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
@@ -1913,22 +1964,51 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
ring->free_bds += txbds_processed;
ring->c_index = c_index;
- ring->packets += pkts_compl;
- ring->bytes += bytes_compl;
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_add(&stats->packets, pkts_compl);
+ u64_stats_add(&stats->bytes, bytes_compl);
+ u64_stats_update_end(&stats->syncp);
- netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
+ netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->index),
pkts_compl, bytes_compl);
return txbds_processed;
}
static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
- struct bcmgenet_tx_ring *ring)
+ struct bcmgenet_tx_ring *ring,
+ bool all)
{
- unsigned int released;
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
+ unsigned int released, drop, wr_ptr;
+ struct enet_cb *cb_ptr;
+ struct sk_buff *skb;
spin_lock_bh(&ring->lock);
released = __bcmgenet_tx_reclaim(dev, ring);
+ if (all) {
+ skb = NULL;
+ drop = (ring->prod_index - ring->c_index) & DMA_C_INDEX_MASK;
+ released += drop;
+ ring->prod_index = ring->c_index & DMA_C_INDEX_MASK;
+ while (drop--) {
+ cb_ptr = bcmgenet_put_txcb(priv, ring);
+ skb = cb_ptr->skb;
+ bcmgenet_free_tx_cb(kdev, cb_ptr);
+ if (skb && cb_ptr == GENET_CB(skb)->first_cb) {
+ dev_consume_skb_any(skb);
+ skb = NULL;
+ }
+ }
+ if (skb)
+ dev_consume_skb_any(skb);
+ bcmgenet_tdma_ring_writel(priv, ring->index,
+ ring->prod_index, TDMA_PROD_INDEX);
+ wr_ptr = ring->write_ptr * WORDS_PER_BD(priv);
+ bcmgenet_tdma_ring_writel(priv, ring->index, wr_ptr,
+ TDMA_WRITE_PTR);
+ }
spin_unlock_bh(&ring->lock);
return released;
@@ -1944,14 +2024,14 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
spin_lock(&ring->lock);
work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
- txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
+ txq = netdev_get_tx_queue(ring->priv->dev, ring->index);
netif_tx_wake_queue(txq);
}
spin_unlock(&ring->lock);
if (work_done == 0) {
napi_complete(napi);
- ring->int_enable(ring);
+ bcmgenet_tx_ring_int_enable(ring);
return 0;
}
@@ -1962,22 +2042,21 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
static void bcmgenet_tx_reclaim_all(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- int i;
-
- if (netif_is_multiqueue(dev)) {
- for (i = 0; i < priv->hw_params->tx_queues; i++)
- bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
- }
+ int i = 0;
- bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
+ do {
+ bcmgenet_tx_reclaim(dev, &priv->tx_rings[i++], true);
+ } while (i <= priv->hw_params->tx_queues && netif_is_multiqueue(dev));
}
/* Reallocate the SKB to put enough headroom in front of it and insert
* the transmit checksum offsets in the descriptors
*/
static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct bcmgenet_tx_ring *ring)
{
+ struct bcmgenet_tx_stats64 *stats = &ring->stats64;
struct bcmgenet_priv *priv = netdev_priv(dev);
struct status_64 *status = NULL;
struct sk_buff *new_skb;
@@ -1994,7 +2073,7 @@ static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
if (!new_skb) {
dev_kfree_skb_any(skb);
priv->mib.tx_realloc_tsb_failed++;
- dev->stats.tx_dropped++;
+ BCMGENET_STATS64_INC(stats, dropped);
return NULL;
}
dev_consume_skb_any(skb);
@@ -2057,19 +2136,14 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
index = skb_get_queue_mapping(skb);
/* Mapping strategy:
- * queue_mapping = 0, unclassified, packet xmited through ring16
- * queue_mapping = 1, goes to ring 0. (highest priority queue
- * queue_mapping = 2, goes to ring 1.
- * queue_mapping = 3, goes to ring 2.
- * queue_mapping = 4, goes to ring 3.
+ * queue_mapping = 0, unclassified, packet xmited through ring 0
+ * queue_mapping = 1, goes to ring 1. (highest priority queue)
+ * queue_mapping = 2, goes to ring 2.
+ * queue_mapping = 3, goes to ring 3.
+ * queue_mapping = 4, goes to ring 4.
*/
- if (index == 0)
- index = DESC_INDEX;
- else
- index -= 1;
-
ring = &priv->tx_rings[index];
- txq = netdev_get_tx_queue(dev, ring->queue);
+ txq = netdev_get_tx_queue(dev, index);
nr_frags = skb_shinfo(skb)->nr_frags;
@@ -2087,7 +2161,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
GENET_CB(skb)->bytes_sent = skb->len;
/* add the Transmit Status Block */
- skb = bcmgenet_add_tsb(dev, skb);
+ skb = bcmgenet_add_tsb(dev, skb, ring);
if (!skb) {
ret = NETDEV_TX_OK;
goto out;
@@ -2229,6 +2303,7 @@ static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
unsigned int budget)
{
+ struct bcmgenet_rx_stats64 *stats = &ring->stats64;
struct bcmgenet_priv *priv = ring->priv;
struct net_device *dev = priv->dev;
struct enet_cb *cb;
@@ -2242,15 +2317,8 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
unsigned int discards;
/* Clear status before servicing to reduce spurious interrupts */
- if (ring->index == DESC_INDEX) {
- bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE,
- INTRL2_CPU_CLEAR);
- } else {
- mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
- bcmgenet_intrl2_1_writel(priv,
- mask,
- INTRL2_CPU_CLEAR);
- }
+ mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
+ bcmgenet_intrl2_1_writel(priv, mask, INTRL2_CPU_CLEAR);
p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
@@ -2258,7 +2326,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
DMA_P_INDEX_DISCARD_CNT_MASK;
if (discards > ring->old_discards) {
discards = discards - ring->old_discards;
- ring->errors += discards;
+ BCMGENET_STATS64_ADD(stats, missed, discards);
ring->old_discards += discards;
/* Clear HW register when we reach 75% of maximum 0xFFFF */
@@ -2284,7 +2352,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
skb = bcmgenet_rx_refill(priv, cb);
if (unlikely(!skb)) {
- ring->dropped++;
+ BCMGENET_STATS64_INC(stats, dropped);
goto next;
}
@@ -2311,8 +2379,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
if (unlikely(len > RX_BUF_LENGTH)) {
netif_err(priv, rx_status, dev, "oversized packet\n");
- dev->stats.rx_length_errors++;
- dev->stats.rx_errors++;
+ BCMGENET_STATS64_INC(stats, length_errors);
dev_kfree_skb_any(skb);
goto next;
}
@@ -2320,7 +2387,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
netif_err(priv, rx_status, dev,
"dropping fragmented packet!\n");
- ring->errors++;
+ BCMGENET_STATS64_INC(stats, fragmented_errors);
dev_kfree_skb_any(skb);
goto next;
}
@@ -2333,15 +2400,22 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
DMA_RX_RXER))) {
netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
(unsigned int)dma_flag);
+ u64_stats_update_begin(&stats->syncp);
if (dma_flag & DMA_RX_CRC_ERROR)
- dev->stats.rx_crc_errors++;
+ u64_stats_inc(&stats->crc_errors);
if (dma_flag & DMA_RX_OV)
- dev->stats.rx_over_errors++;
+ u64_stats_inc(&stats->over_errors);
if (dma_flag & DMA_RX_NO)
- dev->stats.rx_frame_errors++;
+ u64_stats_inc(&stats->frame_errors);
if (dma_flag & DMA_RX_LG)
- dev->stats.rx_length_errors++;
- dev->stats.rx_errors++;
+ u64_stats_inc(&stats->length_errors);
+ if ((dma_flag & (DMA_RX_CRC_ERROR |
+ DMA_RX_OV |
+ DMA_RX_NO |
+ DMA_RX_LG |
+ DMA_RX_RXER)) == DMA_RX_RXER)
+ u64_stats_inc(&stats->errors);
+ u64_stats_update_end(&stats->syncp);
dev_kfree_skb_any(skb);
goto next;
} /* error packet */
@@ -2361,10 +2435,15 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
/*Finish setting up the received SKB and send it to the kernel*/
skb->protocol = eth_type_trans(skb, priv->dev);
- ring->packets++;
- ring->bytes += len;
+
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->packets);
+ u64_stats_add(&stats->bytes, len);
if (dma_flag & DMA_RX_MULT)
- dev->stats.multicast++;
+ u64_stats_inc(&stats->multicast);
+ else if (dma_flag & DMA_RX_BRDCAST)
+ u64_stats_inc(&stats->broadcast);
+ u64_stats_update_end(&stats->syncp);
/* Notify kernel */
napi_gro_receive(&ring->napi, skb);
@@ -2397,10 +2476,8 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
work_done = bcmgenet_desc_rx(ring, budget);
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
- ring->int_enable(ring);
- }
+ if (work_done < budget && napi_complete_done(napi, work_done))
+ bcmgenet_rx_ring_int_enable(ring);
if (ring->dim.use_dim) {
dim_update_sample(ring->dim.event_ctr, ring->dim.packets,
@@ -2523,7 +2600,7 @@ static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
} else if (priv->ext_phy) {
int0_enable |= UMAC_IRQ_LINK_EVENT;
} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
- if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+ if (bcmgenet_has_moca_link_det(priv))
int0_enable |= UMAC_IRQ_LINK_EVENT;
}
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
@@ -2588,8 +2665,8 @@ static void init_umac(struct bcmgenet_priv *priv)
}
/* Enable MDIO interrupts on GENET v3+ */
- if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
- int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
+ if (bcmgenet_has_mdio_intr(priv))
+ int0_enable |= UMAC_IRQ_MDIO_EVENT;
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
@@ -2639,15 +2716,6 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
spin_lock_init(&ring->lock);
ring->priv = priv;
ring->index = index;
- if (index == DESC_INDEX) {
- ring->queue = 0;
- ring->int_enable = bcmgenet_tx_ring16_int_enable;
- ring->int_disable = bcmgenet_tx_ring16_int_disable;
- } else {
- ring->queue = index + 1;
- ring->int_enable = bcmgenet_tx_ring_int_enable;
- ring->int_disable = bcmgenet_tx_ring_int_disable;
- }
ring->cbs = priv->tx_cbs + start_ptr;
ring->size = size;
ring->clean_ptr = start_ptr;
@@ -2658,8 +2726,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
ring->end_ptr = end_ptr - 1;
ring->prod_index = 0;
- /* Set flow period for ring != 16 */
- if (index != DESC_INDEX)
+ /* Set flow period for ring != 0 */
+ if (index)
flow_period_val = ENET_MAX_MTU_SIZE << 16;
bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
@@ -2697,13 +2765,6 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
ring->priv = priv;
ring->index = index;
- if (index == DESC_INDEX) {
- ring->int_enable = bcmgenet_rx_ring16_int_enable;
- ring->int_disable = bcmgenet_rx_ring16_int_disable;
- } else {
- ring->int_enable = bcmgenet_rx_ring_int_enable;
- ring->int_disable = bcmgenet_rx_ring_int_disable;
- }
ring->cbs = priv->rx_cbs + start_ptr;
ring->size = size;
ring->c_index = 0;
@@ -2749,15 +2810,11 @@ static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_tx_ring *ring;
- for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
napi_enable(&ring->napi);
- ring->int_enable(ring);
+ bcmgenet_tx_ring_int_enable(ring);
}
-
- ring = &priv->tx_rings[DESC_INDEX];
- napi_enable(&ring->napi);
- ring->int_enable(ring);
}
static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
@@ -2765,13 +2822,10 @@ static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_tx_ring *ring;
- for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
napi_disable(&ring->napi);
}
-
- ring = &priv->tx_rings[DESC_INDEX];
- napi_disable(&ring->napi);
}
static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
@@ -2779,82 +2833,104 @@ static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_tx_ring *ring;
- for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
netif_napi_del(&ring->napi);
}
+}
+
+static int bcmgenet_tdma_disable(struct bcmgenet_priv *priv)
+{
+ int timeout = 0;
+ u32 reg, mask;
+
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ mask = (1 << (priv->hw_params->tx_queues + 1)) - 1;
+ mask = (mask << DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+ reg &= ~mask;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
+ /* Check DMA status register to confirm DMA is disabled */
+ while (timeout++ < DMA_TIMEOUT_VAL) {
+ reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
+ if ((reg & mask) == mask)
+ return 0;
- ring = &priv->tx_rings[DESC_INDEX];
- netif_napi_del(&ring->napi);
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int bcmgenet_rdma_disable(struct bcmgenet_priv *priv)
+{
+ int timeout = 0;
+ u32 reg, mask;
+
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ mask = (1 << (priv->hw_params->rx_queues + 1)) - 1;
+ mask = (mask << DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+ reg &= ~mask;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ /* Check DMA status register to confirm DMA is disabled */
+ while (timeout++ < DMA_TIMEOUT_VAL) {
+ reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
+ if ((reg & mask) == mask)
+ return 0;
+
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
}
/* Initialize Tx queues
*
- * Queues 0-3 are priority-based, each one has 32 descriptors,
- * with queue 0 being the highest priority queue.
+ * Queues 1-4 are priority-based, each one has 32 descriptors,
+ * with queue 1 being the highest priority queue.
*
- * Queue 16 is the default Tx queue with
- * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
+ * Queue 0 is the default Tx queue with
+ * GENET_Q0_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
*
* The transmit control block pool is then partitioned as follows:
- * - Tx queue 0 uses tx_cbs[0..31]
- * - Tx queue 1 uses tx_cbs[32..63]
- * - Tx queue 2 uses tx_cbs[64..95]
- * - Tx queue 3 uses tx_cbs[96..127]
- * - Tx queue 16 uses tx_cbs[128..255]
+ * - Tx queue 0 uses tx_cbs[0..127]
+ * - Tx queue 1 uses tx_cbs[128..159]
+ * - Tx queue 2 uses tx_cbs[160..191]
+ * - Tx queue 3 uses tx_cbs[192..223]
+ * - Tx queue 4 uses tx_cbs[224..255]
*/
static void bcmgenet_init_tx_queues(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 i, dma_enable;
- u32 dma_ctrl, ring_cfg;
- u32 dma_priority[3] = {0, 0, 0};
-
- dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
- dma_enable = dma_ctrl & DMA_EN;
- dma_ctrl &= ~DMA_EN;
- bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
-
- dma_ctrl = 0;
- ring_cfg = 0;
+ unsigned int start = 0, end = GENET_Q0_TX_BD_CNT;
+ u32 i, ring_mask, dma_priority[3] = {0, 0, 0};
/* Enable strict priority arbiter mode */
bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
/* Initialize Tx priority queues */
- for (i = 0; i < priv->hw_params->tx_queues; i++) {
- bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
- i * priv->hw_params->tx_bds_per_q,
- (i + 1) * priv->hw_params->tx_bds_per_q);
- ring_cfg |= (1 << i);
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+ for (i = 0; i <= priv->hw_params->tx_queues; i++) {
+ bcmgenet_init_tx_ring(priv, i, end - start, start, end);
+ start = end;
+ end += priv->hw_params->tx_bds_per_q;
dma_priority[DMA_PRIO_REG_INDEX(i)] |=
- ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
+ (i ? GENET_Q1_PRIORITY : GENET_Q0_PRIORITY)
+ << DMA_PRIO_REG_SHIFT(i);
}
- /* Initialize Tx default queue 16 */
- bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
- priv->hw_params->tx_queues *
- priv->hw_params->tx_bds_per_q,
- TOTAL_DESC);
- ring_cfg |= (1 << DESC_INDEX);
- dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
- dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
- ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
- DMA_PRIO_REG_SHIFT(DESC_INDEX));
-
/* Set Tx queue priorities */
bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
- /* Enable Tx queues */
- bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
+ /* Configure Tx queues as descriptor rings */
+ ring_mask = (1 << (priv->hw_params->tx_queues + 1)) - 1;
+ bcmgenet_tdma_writel(priv, ring_mask, DMA_RING_CFG);
- /* Enable Tx DMA */
- if (dma_enable)
- dma_ctrl |= DMA_EN;
- bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
+ /* Enable Tx rings */
+ ring_mask <<= DMA_RING_BUF_EN_SHIFT;
+ bcmgenet_tdma_writel(priv, ring_mask, DMA_CTRL);
}
static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
@@ -2862,15 +2938,11 @@ static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_rx_ring *ring;
- for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
napi_enable(&ring->napi);
- ring->int_enable(ring);
+ bcmgenet_rx_ring_int_enable(ring);
}
-
- ring = &priv->rx_rings[DESC_INDEX];
- napi_enable(&ring->napi);
- ring->int_enable(ring);
}
static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
@@ -2878,15 +2950,11 @@ static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_rx_ring *ring;
- for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
napi_disable(&ring->napi);
cancel_work_sync(&ring->dim.dim.work);
}
-
- ring = &priv->rx_rings[DESC_INDEX];
- napi_disable(&ring->napi);
- cancel_work_sync(&ring->dim.dim.work);
}
static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
@@ -2894,13 +2962,10 @@ static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_rx_ring *ring;
- for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
netif_napi_del(&ring->napi);
}
-
- ring = &priv->rx_rings[DESC_INDEX];
- netif_napi_del(&ring->napi);
}
/* Initialize Rx queues
@@ -2908,57 +2973,32 @@ static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
* Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
* used to direct traffic to these queues.
*
- * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
+ * Queue 0 is also the default Rx queue with GENET_Q0_RX_BD_CNT descriptors.
*/
static int bcmgenet_init_rx_queues(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 i;
- u32 dma_enable;
- u32 dma_ctrl;
- u32 ring_cfg;
+ unsigned int start = 0, end = GENET_Q0_RX_BD_CNT;
+ u32 i, ring_mask;
int ret;
- dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
- dma_enable = dma_ctrl & DMA_EN;
- dma_ctrl &= ~DMA_EN;
- bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
-
- dma_ctrl = 0;
- ring_cfg = 0;
-
/* Initialize Rx priority queues */
- for (i = 0; i < priv->hw_params->rx_queues; i++) {
- ret = bcmgenet_init_rx_ring(priv, i,
- priv->hw_params->rx_bds_per_q,
- i * priv->hw_params->rx_bds_per_q,
- (i + 1) *
- priv->hw_params->rx_bds_per_q);
+ for (i = 0; i <= priv->hw_params->rx_queues; i++) {
+ ret = bcmgenet_init_rx_ring(priv, i, end - start, start, end);
if (ret)
return ret;
- ring_cfg |= (1 << i);
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+ start = end;
+ end += priv->hw_params->rx_bds_per_q;
}
- /* Initialize Rx default queue 16 */
- ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
- priv->hw_params->rx_queues *
- priv->hw_params->rx_bds_per_q,
- TOTAL_DESC);
- if (ret)
- return ret;
-
- ring_cfg |= (1 << DESC_INDEX);
- dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
+ /* Configure Rx queues as descriptor rings */
+ ring_mask = (1 << (priv->hw_params->rx_queues + 1)) - 1;
+ bcmgenet_rdma_writel(priv, ring_mask, DMA_RING_CFG);
- /* Enable rings */
- bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
-
- /* Configure ring as descriptor ring and re-enable DMA if enabled */
- if (dma_enable)
- dma_ctrl |= DMA_EN;
- bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
+ /* Enable Rx rings */
+ ring_mask <<= DMA_RING_BUF_EN_SHIFT;
+ bcmgenet_rdma_writel(priv, ring_mask, DMA_CTRL);
return 0;
}
@@ -2966,26 +3006,9 @@ static int bcmgenet_init_rx_queues(struct net_device *dev)
static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
{
int ret = 0;
- int timeout = 0;
- u32 reg;
- u32 dma_ctrl;
- int i;
/* Disable TDMA to stop add more frames in TX DMA */
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg &= ~DMA_EN;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-
- /* Check TDMA status register to confirm TDMA is disabled */
- while (timeout++ < DMA_TIMEOUT_VAL) {
- reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
- if (reg & DMA_DISABLED)
- break;
-
- udelay(1);
- }
-
- if (timeout == DMA_TIMEOUT_VAL) {
+ if (-ETIMEDOUT == bcmgenet_tdma_disable(priv)) {
netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
ret = -ETIMEDOUT;
}
@@ -2994,39 +3017,11 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
usleep_range(10000, 20000);
/* Disable RDMA */
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg &= ~DMA_EN;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- timeout = 0;
- /* Check RDMA status register to confirm RDMA is disabled */
- while (timeout++ < DMA_TIMEOUT_VAL) {
- reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
- if (reg & DMA_DISABLED)
- break;
-
- udelay(1);
- }
-
- if (timeout == DMA_TIMEOUT_VAL) {
+ if (-ETIMEDOUT == bcmgenet_rdma_disable(priv)) {
netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
ret = -ETIMEDOUT;
}
- dma_ctrl = 0;
- for (i = 0; i < priv->hw_params->rx_queues; i++)
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg &= ~dma_ctrl;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- dma_ctrl = 0;
- for (i = 0; i < priv->hw_params->tx_queues; i++)
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg &= ~dma_ctrl;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-
return ret;
}
@@ -3038,32 +3033,53 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
bcmgenet_fini_rx_napi(priv);
bcmgenet_fini_tx_napi(priv);
- for (i = 0; i < priv->num_tx_bds; i++)
- dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev,
- priv->tx_cbs + i));
-
- for (i = 0; i < priv->hw_params->tx_queues; i++) {
- txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
+ for (i = 0; i <= priv->hw_params->tx_queues; i++) {
+ txq = netdev_get_tx_queue(priv->dev, i);
netdev_tx_reset_queue(txq);
}
- txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue);
- netdev_tx_reset_queue(txq);
-
bcmgenet_free_rx_buffers(priv);
kfree(priv->rx_cbs);
kfree(priv->tx_cbs);
}
/* init_edma: Initialize DMA control register */
-static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
+static int bcmgenet_init_dma(struct bcmgenet_priv *priv, bool flush_rx)
{
- int ret;
- unsigned int i;
struct enet_cb *cb;
+ unsigned int i;
+ int ret;
+ u32 reg;
netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
+ /* Disable TX DMA */
+ ret = bcmgenet_tdma_disable(priv);
+ if (ret) {
+ netdev_err(priv->dev, "failed to halt Tx DMA\n");
+ return ret;
+ }
+
+ /* Disable RX DMA */
+ ret = bcmgenet_rdma_disable(priv);
+ if (ret) {
+ netdev_err(priv->dev, "failed to halt Rx DMA\n");
+ return ret;
+ }
+
+ /* Flush TX queues */
+ bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
+ udelay(10);
+ bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
+
+ if (flush_rx) {
+ reg = bcmgenet_rbuf_ctrl_get(priv);
+ bcmgenet_rbuf_ctrl_set(priv, reg | BIT(0));
+ udelay(10);
+ bcmgenet_rbuf_ctrl_set(priv, reg);
+ udelay(10);
+ }
+
/* Initialize common Rx ring structures */
priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
priv->num_rx_bds = TOTAL_DESC;
@@ -3113,6 +3129,15 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
/* Initialize Tx queues */
bcmgenet_init_tx_queues(priv->dev);
+ /* Enable RX/TX DMA */
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ reg |= DMA_EN;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg |= DMA_EN;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
return 0;
}
@@ -3142,7 +3167,7 @@ static void bcmgenet_irq_task(struct work_struct *work)
}
-/* bcmgenet_isr1: handle Rx and Tx priority queues */
+/* bcmgenet_isr1: handle Rx and Tx queues */
static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
{
struct bcmgenet_priv *priv = dev_id;
@@ -3161,7 +3186,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
"%s: IRQ=0x%x\n", __func__, status);
/* Check Rx priority queue interrupts */
- for (index = 0; index < priv->hw_params->rx_queues; index++) {
+ for (index = 0; index <= priv->hw_params->rx_queues; index++) {
if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
continue;
@@ -3169,20 +3194,20 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
rx_ring->dim.event_ctr++;
if (likely(napi_schedule_prep(&rx_ring->napi))) {
- rx_ring->int_disable(rx_ring);
+ bcmgenet_rx_ring_int_disable(rx_ring);
__napi_schedule_irqoff(&rx_ring->napi);
}
}
/* Check Tx priority queue interrupts */
- for (index = 0; index < priv->hw_params->tx_queues; index++) {
+ for (index = 0; index <= priv->hw_params->tx_queues; index++) {
if (!(status & BIT(index)))
continue;
tx_ring = &priv->tx_rings[index];
if (likely(napi_schedule_prep(&tx_ring->napi))) {
- tx_ring->int_disable(tx_ring);
+ bcmgenet_tx_ring_int_disable(tx_ring);
__napi_schedule_irqoff(&tx_ring->napi);
}
}
@@ -3190,12 +3215,10 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
+/* bcmgenet_isr0: handle other stuff */
static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
{
struct bcmgenet_priv *priv = dev_id;
- struct bcmgenet_rx_ring *rx_ring;
- struct bcmgenet_tx_ring *tx_ring;
unsigned int status;
unsigned long flags;
@@ -3209,29 +3232,8 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
netif_dbg(priv, intr, priv->dev,
"IRQ=0x%x\n", status);
- if (status & UMAC_IRQ_RXDMA_DONE) {
- rx_ring = &priv->rx_rings[DESC_INDEX];
- rx_ring->dim.event_ctr++;
-
- if (likely(napi_schedule_prep(&rx_ring->napi))) {
- rx_ring->int_disable(rx_ring);
- __napi_schedule_irqoff(&rx_ring->napi);
- }
- }
-
- if (status & UMAC_IRQ_TXDMA_DONE) {
- tx_ring = &priv->tx_rings[DESC_INDEX];
-
- if (likely(napi_schedule_prep(&tx_ring->napi))) {
- tx_ring->int_disable(tx_ring);
- __napi_schedule_irqoff(&tx_ring->napi);
- }
- }
-
- if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
- status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
+ if (bcmgenet_has_mdio_intr(priv) && status & UMAC_IRQ_MDIO_EVENT)
wake_up(&priv->wq);
- }
/* all other interested interrupts handled in bottom half */
status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R);
@@ -3285,56 +3287,6 @@ static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
put_unaligned_be16(addr_tmp, &addr[4]);
}
-/* Returns a reusable dma control register value */
-static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv, bool flush_rx)
-{
- unsigned int i;
- u32 reg;
- u32 dma_ctrl;
-
- /* disable DMA */
- dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
- for (i = 0; i < priv->hw_params->tx_queues; i++)
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg &= ~dma_ctrl;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-
- dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
- for (i = 0; i < priv->hw_params->rx_queues; i++)
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg &= ~dma_ctrl;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
- udelay(10);
- bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
-
- if (flush_rx) {
- reg = bcmgenet_rbuf_ctrl_get(priv);
- bcmgenet_rbuf_ctrl_set(priv, reg | BIT(0));
- udelay(10);
- bcmgenet_rbuf_ctrl_set(priv, reg);
- udelay(10);
- }
-
- return dma_ctrl;
-}
-
-static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
-{
- u32 reg;
-
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg |= dma_ctrl;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg |= dma_ctrl;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-}
-
static void bcmgenet_netif_start(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -3358,7 +3310,6 @@ static void bcmgenet_netif_start(struct net_device *dev)
static int bcmgenet_open(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- unsigned long dma_ctrl;
int ret;
netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
@@ -3384,22 +3335,16 @@ static int bcmgenet_open(struct net_device *dev)
bcmgenet_set_hw_addr(priv, dev->dev_addr);
- /* Disable RX/TX DMA and flush TX and RX queues */
- dma_ctrl = bcmgenet_dma_disable(priv, true);
+ /* HFB init */
+ bcmgenet_hfb_init(priv);
/* Reinitialize TDMA and RDMA and SW housekeeping */
- ret = bcmgenet_init_dma(priv);
+ ret = bcmgenet_init_dma(priv, true);
if (ret) {
netdev_err(dev, "failed to initialize DMA\n");
goto err_clk_disable;
}
- /* Always enable ring 16 - descriptor ring */
- bcmgenet_enable_dma(priv, dma_ctrl);
-
- /* HFB init */
- bcmgenet_hfb_init(priv);
-
ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
dev->name, priv);
if (ret < 0) {
@@ -3446,19 +3391,21 @@ static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- bcmgenet_disable_tx_napi(priv);
netif_tx_disable(dev);
/* Disable MAC receive */
+ bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
umac_enable_set(priv, CMD_RX_EN, false);
+ if (stop_phy)
+ phy_stop(dev->phydev);
+
bcmgenet_dma_teardown(priv);
/* Disable MAC transmit. TX DMA disabled must be done before this */
umac_enable_set(priv, CMD_TX_EN, false);
- if (stop_phy)
- phy_stop(dev->phydev);
+ bcmgenet_disable_tx_napi(priv);
bcmgenet_disable_rx_napi(priv);
bcmgenet_intr_disable(priv);
@@ -3506,16 +3453,11 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
if (!netif_msg_tx_err(priv))
return;
- txq = netdev_get_tx_queue(priv->dev, ring->queue);
+ txq = netdev_get_tx_queue(priv->dev, ring->index);
spin_lock(&ring->lock);
- if (ring->index == DESC_INDEX) {
- intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
- intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
- } else {
- intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
- intmsk = 1 << ring->index;
- }
+ intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
+ intmsk = 1 << ring->index;
c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
txq_stopped = netif_tx_queue_stopped(txq);
@@ -3529,7 +3471,7 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
"(sw)c_index: %d (hw)c_index: %d\n"
"(sw)clean_p: %d (sw)write_p: %d\n"
"(sw)cb_ptr: %d (sw)end_ptr: %d\n",
- ring->index, ring->queue,
+ ring->index, ring->index,
txq_stopped ? "stopped" : "active",
intsts & intmsk ? "enabled" : "disabled",
free_bds, ring->size,
@@ -3542,30 +3484,25 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 int0_enable = 0;
u32 int1_enable = 0;
unsigned int q;
netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
- for (q = 0; q < priv->hw_params->tx_queues; q++)
+ for (q = 0; q <= priv->hw_params->tx_queues; q++)
bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
- bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
bcmgenet_tx_reclaim_all(dev);
- for (q = 0; q < priv->hw_params->tx_queues; q++)
+ for (q = 0; q <= priv->hw_params->tx_queues; q++)
int1_enable |= (1 << q);
- int0_enable = UMAC_IRQ_TXDMA_DONE;
-
/* Re-enable TX interrupts if disabled */
- bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
netif_trans_update(dev);
- dev->stats.tx_errors++;
+ BCMGENET_STATS64_INC((&priv->tx_rings[txqueue].stats64), errors);
netif_tx_wake_all_queues(dev);
}
@@ -3654,47 +3591,72 @@ static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
return 0;
}
-static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
+static void bcmgenet_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- unsigned long tx_bytes = 0, tx_packets = 0;
- unsigned long rx_bytes = 0, rx_packets = 0;
- unsigned long rx_errors = 0, rx_dropped = 0;
- struct bcmgenet_tx_ring *tx_ring;
- struct bcmgenet_rx_ring *rx_ring;
+ struct bcmgenet_tx_stats64 *tx_stats;
+ struct bcmgenet_rx_stats64 *rx_stats;
+ u64 rx_length_errors, rx_over_errors;
+ u64 rx_missed, rx_fragmented_errors;
+ u64 rx_crc_errors, rx_frame_errors;
+ u64 tx_errors, tx_dropped;
+ u64 rx_errors, rx_dropped;
+ u64 tx_bytes, tx_packets;
+ u64 rx_bytes, rx_packets;
+ unsigned int start;
unsigned int q;
-
- for (q = 0; q < priv->hw_params->tx_queues; q++) {
- tx_ring = &priv->tx_rings[q];
- tx_bytes += tx_ring->bytes;
- tx_packets += tx_ring->packets;
+ u64 multicast;
+
+ for (q = 0; q <= priv->hw_params->tx_queues; q++) {
+ tx_stats = &priv->tx_rings[q].stats64;
+ do {
+ start = u64_stats_fetch_begin(&tx_stats->syncp);
+ tx_bytes = u64_stats_read(&tx_stats->bytes);
+ tx_packets = u64_stats_read(&tx_stats->packets);
+ tx_errors = u64_stats_read(&tx_stats->errors);
+ tx_dropped = u64_stats_read(&tx_stats->dropped);
+ } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
+
+ stats->tx_bytes += tx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_errors += tx_errors;
+ stats->tx_dropped += tx_dropped;
}
- tx_ring = &priv->tx_rings[DESC_INDEX];
- tx_bytes += tx_ring->bytes;
- tx_packets += tx_ring->packets;
- for (q = 0; q < priv->hw_params->rx_queues; q++) {
- rx_ring = &priv->rx_rings[q];
-
- rx_bytes += rx_ring->bytes;
- rx_packets += rx_ring->packets;
- rx_errors += rx_ring->errors;
- rx_dropped += rx_ring->dropped;
+ for (q = 0; q <= priv->hw_params->rx_queues; q++) {
+ rx_stats = &priv->rx_rings[q].stats64;
+ do {
+ start = u64_stats_fetch_begin(&rx_stats->syncp);
+ rx_bytes = u64_stats_read(&rx_stats->bytes);
+ rx_packets = u64_stats_read(&rx_stats->packets);
+ rx_errors = u64_stats_read(&rx_stats->errors);
+ rx_dropped = u64_stats_read(&rx_stats->dropped);
+ rx_missed = u64_stats_read(&rx_stats->missed);
+ rx_length_errors = u64_stats_read(&rx_stats->length_errors);
+ rx_over_errors = u64_stats_read(&rx_stats->over_errors);
+ rx_crc_errors = u64_stats_read(&rx_stats->crc_errors);
+ rx_frame_errors = u64_stats_read(&rx_stats->frame_errors);
+ rx_fragmented_errors = u64_stats_read(&rx_stats->fragmented_errors);
+ multicast = u64_stats_read(&rx_stats->multicast);
+ } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
+
+ rx_errors += rx_length_errors;
+ rx_errors += rx_crc_errors;
+ rx_errors += rx_frame_errors;
+ rx_errors += rx_fragmented_errors;
+
+ stats->rx_bytes += rx_bytes;
+ stats->rx_packets += rx_packets;
+ stats->rx_errors += rx_errors;
+ stats->rx_dropped += rx_dropped;
+ stats->rx_missed_errors += rx_missed;
+ stats->rx_length_errors += rx_length_errors;
+ stats->rx_over_errors += rx_over_errors;
+ stats->rx_crc_errors += rx_crc_errors;
+ stats->rx_frame_errors += rx_frame_errors;
+ stats->multicast += multicast;
}
- rx_ring = &priv->rx_rings[DESC_INDEX];
- rx_bytes += rx_ring->bytes;
- rx_packets += rx_ring->packets;
- rx_errors += rx_ring->errors;
- rx_dropped += rx_ring->dropped;
-
- dev->stats.tx_bytes = tx_bytes;
- dev->stats.tx_packets = tx_packets;
- dev->stats.rx_bytes = rx_bytes;
- dev->stats.rx_packets = rx_packets;
- dev->stats.rx_errors = rx_errors;
- dev->stats.rx_missed_errors = rx_errors;
- dev->stats.rx_dropped = rx_dropped;
- return &dev->stats;
}
static int bcmgenet_change_carrier(struct net_device *dev, bool new_carrier)
@@ -3722,132 +3684,113 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
.ndo_set_mac_address = bcmgenet_set_mac_addr,
.ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_features = bcmgenet_set_features,
- .ndo_get_stats = bcmgenet_get_stats,
+ .ndo_get_stats64 = bcmgenet_get_stats64,
.ndo_change_carrier = bcmgenet_change_carrier,
};
-/* Array of GENET hardware parameters/characteristics */
-static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
- [GENET_V1] = {
- .tx_queues = 0,
- .tx_bds_per_q = 0,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 16,
- .bp_in_mask = 0xffff,
- .hfb_filter_cnt = 16,
- .qtag_mask = 0x1F,
- .hfb_offset = 0x1000,
- .rdma_offset = 0x2000,
- .tdma_offset = 0x3000,
- .words_per_bd = 2,
- },
- [GENET_V2] = {
- .tx_queues = 4,
- .tx_bds_per_q = 32,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 16,
- .bp_in_mask = 0xffff,
- .hfb_filter_cnt = 16,
- .qtag_mask = 0x1F,
- .tbuf_offset = 0x0600,
- .hfb_offset = 0x1000,
- .hfb_reg_offset = 0x2000,
- .rdma_offset = 0x3000,
- .tdma_offset = 0x4000,
- .words_per_bd = 2,
- .flags = GENET_HAS_EXT,
- },
- [GENET_V3] = {
- .tx_queues = 4,
- .tx_bds_per_q = 32,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 17,
- .bp_in_mask = 0x1ffff,
- .hfb_filter_cnt = 48,
- .hfb_filter_size = 128,
- .qtag_mask = 0x3F,
- .tbuf_offset = 0x0600,
- .hfb_offset = 0x8000,
- .hfb_reg_offset = 0xfc00,
- .rdma_offset = 0x10000,
- .tdma_offset = 0x11000,
- .words_per_bd = 2,
- .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
- GENET_HAS_MOCA_LINK_DET,
- },
- [GENET_V4] = {
- .tx_queues = 4,
- .tx_bds_per_q = 32,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 17,
- .bp_in_mask = 0x1ffff,
- .hfb_filter_cnt = 48,
- .hfb_filter_size = 128,
- .qtag_mask = 0x3F,
- .tbuf_offset = 0x0600,
- .hfb_offset = 0x8000,
- .hfb_reg_offset = 0xfc00,
- .rdma_offset = 0x2000,
- .tdma_offset = 0x4000,
- .words_per_bd = 3,
- .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
- GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
- },
- [GENET_V5] = {
- .tx_queues = 4,
- .tx_bds_per_q = 32,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 17,
- .bp_in_mask = 0x1ffff,
- .hfb_filter_cnt = 48,
- .hfb_filter_size = 128,
- .qtag_mask = 0x3F,
- .tbuf_offset = 0x0600,
- .hfb_offset = 0x8000,
- .hfb_reg_offset = 0xfc00,
- .rdma_offset = 0x2000,
- .tdma_offset = 0x4000,
- .words_per_bd = 3,
- .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
- GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
- },
+/* GENET hardware parameters/characteristics */
+static const struct bcmgenet_hw_params bcmgenet_hw_params_v1 = {
+ .tx_queues = 0,
+ .tx_bds_per_q = 0,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 16,
+ .bp_in_mask = 0xffff,
+ .hfb_filter_cnt = 16,
+ .hfb_filter_size = 64,
+ .qtag_mask = 0x1F,
+ .hfb_offset = 0x1000,
+ .hfb_reg_offset = GENET_RBUF_OFF + RBUF_HFB_CTRL_V1,
+ .rdma_offset = 0x2000,
+ .tdma_offset = 0x3000,
+ .words_per_bd = 2,
+};
+
+static const struct bcmgenet_hw_params bcmgenet_hw_params_v2 = {
+ .tx_queues = 4,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 16,
+ .bp_in_mask = 0xffff,
+ .hfb_filter_cnt = 16,
+ .hfb_filter_size = 64,
+ .qtag_mask = 0x1F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x1000,
+ .hfb_reg_offset = 0x2000,
+ .rdma_offset = 0x3000,
+ .tdma_offset = 0x4000,
+ .words_per_bd = 2,
+};
+
+static const struct bcmgenet_hw_params bcmgenet_hw_params_v3 = {
+ .tx_queues = 4,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 17,
+ .bp_in_mask = 0x1ffff,
+ .hfb_filter_cnt = 48,
+ .hfb_filter_size = 128,
+ .qtag_mask = 0x3F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x8000,
+ .hfb_reg_offset = 0xfc00,
+ .rdma_offset = 0x10000,
+ .tdma_offset = 0x11000,
+ .words_per_bd = 2,
+};
+
+static const struct bcmgenet_hw_params bcmgenet_hw_params_v4 = {
+ .tx_queues = 4,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 17,
+ .bp_in_mask = 0x1ffff,
+ .hfb_filter_cnt = 48,
+ .hfb_filter_size = 128,
+ .qtag_mask = 0x3F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x8000,
+ .hfb_reg_offset = 0xfc00,
+ .rdma_offset = 0x2000,
+ .tdma_offset = 0x4000,
+ .words_per_bd = 3,
};
/* Infer hardware parameters from the detected GENET version */
static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
{
- struct bcmgenet_hw_params *params;
+ const struct bcmgenet_hw_params *params;
u32 reg;
u8 major;
u16 gphy_rev;
- if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
- bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
- genet_dma_ring_regs = genet_dma_ring_regs_v4;
- } else if (GENET_IS_V3(priv)) {
+ /* default to latest values */
+ params = &bcmgenet_hw_params_v4;
+ bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
+ genet_dma_ring_regs = genet_dma_ring_regs_v4;
+ if (GENET_IS_V3(priv)) {
+ params = &bcmgenet_hw_params_v3;
bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
} else if (GENET_IS_V2(priv)) {
+ params = &bcmgenet_hw_params_v2;
bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
} else if (GENET_IS_V1(priv)) {
+ params = &bcmgenet_hw_params_v1;
bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
}
-
- /* enum genet_version starts at 1 */
- priv->hw_params = &bcmgenet_hw_params[priv->version];
- params = priv->hw_params;
+ priv->hw_params = params;
/* Read GENET HW version */
reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
major = (reg >> 24 & 0x0f);
- if (major == 6)
+ if (major == 6 || major == 7)
major = 5;
else if (major == 5)
major = 4;
@@ -3898,7 +3841,7 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
}
#ifdef CONFIG_PHYS_ADDR_T_64BIT
- if (!(params->flags & GENET_HAS_40BITS))
+ if (!bcmgenet_has_40bits(priv))
pr_warn("GENET does not support 40-bits PA\n");
#endif
@@ -3923,7 +3866,7 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
struct bcmgenet_plat_data {
enum bcmgenet_version version;
u32 dma_max_burst_length;
- bool ephy_16nm;
+ u32 flags;
};
static const struct bcmgenet_plat_data v1_plat_data = {
@@ -3934,32 +3877,43 @@ static const struct bcmgenet_plat_data v1_plat_data = {
static const struct bcmgenet_plat_data v2_plat_data = {
.version = GENET_V2,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .flags = GENET_HAS_EXT,
};
static const struct bcmgenet_plat_data v3_plat_data = {
.version = GENET_V3,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
+ GENET_HAS_MOCA_LINK_DET,
};
static const struct bcmgenet_plat_data v4_plat_data = {
.version = GENET_V4,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
};
static const struct bcmgenet_plat_data v5_plat_data = {
.version = GENET_V5,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
};
static const struct bcmgenet_plat_data bcm2711_plat_data = {
.version = GENET_V5,
.dma_max_burst_length = 0x08,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
};
static const struct bcmgenet_plat_data bcm7712_plat_data = {
.version = GENET_V5,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
- .ephy_16nm = true,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET |
+ GENET_HAS_EPHY_16NM,
};
static const struct of_device_id bcmgenet_match[] = {
@@ -3976,7 +3930,6 @@ MODULE_DEVICE_TABLE(of, bcmgenet_match);
static int bcmgenet_probe(struct platform_device *pdev)
{
- struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
const struct bcmgenet_plat_data *pdata;
struct bcmgenet_priv *priv;
struct net_device *dev;
@@ -4036,6 +3989,8 @@ static int bcmgenet_probe(struct platform_device *pdev)
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
+ netdev_sw_irq_coalesce_default_on(dev);
+
/* Request the WOL interrupt and advertise suspend if available */
priv->wol_irq_disabled = true;
if (priv->wol_irq > 0) {
@@ -4057,10 +4012,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (pdata) {
priv->version = pdata->version;
priv->dma_max_burst_length = pdata->dma_max_burst_length;
- priv->ephy_16nm = pdata->ephy_16nm;
- } else {
- priv->version = pd->genet_version;
- priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH;
+ priv->flags = pdata->flags;
}
priv->clk = devm_clk_get_optional(&priv->pdev->dev, "enet");
@@ -4077,7 +4029,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
bcmgenet_set_hw_params(priv);
err = -EIO;
- if (priv->hw_params->flags & GENET_HAS_40BITS)
+ if (bcmgenet_has_40bits(priv))
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
if (err)
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
@@ -4110,16 +4062,13 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL)
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
- if (pd && !IS_ERR_OR_NULL(pd->mac_address))
- eth_hw_addr_set(dev, pd->mac_address);
- else
- if (device_get_ethdev_address(&pdev->dev, dev))
- if (has_acpi_companion(&pdev->dev)) {
- u8 addr[ETH_ALEN];
+ if (device_get_ethdev_address(&pdev->dev, dev))
+ if (has_acpi_companion(&pdev->dev)) {
+ u8 addr[ETH_ALEN];
- bcmgenet_get_hw_addr(priv, addr);
- eth_hw_addr_set(dev, addr);
- }
+ bcmgenet_get_hw_addr(priv, addr);
+ eth_hw_addr_set(dev, addr);
+ }
if (!is_valid_ether_addr(dev->dev_addr)) {
dev_warn(&pdev->dev, "using random Ethernet MAC\n");
@@ -4132,16 +4081,19 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (err)
goto err_clk_disable;
- /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
- * just the ring 16 descriptor based TX
- */
+ /* setup number of real queues + 1 */
netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
/* Set default coalescing parameters */
- for (i = 0; i < priv->hw_params->rx_queues; i++)
+ for (i = 0; i <= priv->hw_params->rx_queues; i++)
priv->rx_rings[i].rx_max_coalesced_frames = 1;
- priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1;
+
+ /* Initialize u64 stats seq counter for 32bit machines */
+ for (i = 0; i <= priv->hw_params->rx_queues; i++)
+ u64_stats_init(&priv->rx_rings[i].stats64.syncp);
+ for (i = 0; i <= priv->hw_params->tx_queues; i++)
+ u64_stats_init(&priv->tx_rings[i].stats64.syncp);
/* libphy will determine the link state */
netif_carrier_off(dev);
@@ -4205,9 +4157,22 @@ static int bcmgenet_resume_noirq(struct device *d)
reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT);
if (reg & UMAC_IRQ_WAKE_EVENT)
pm_wakeup_event(&priv->pdev->dev, 0);
+
+ /* From WOL-enabled suspend, switch to regular clock */
+ if (!bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC))
+ return 0;
+
+ /* Failed so fall through to reset MAC */
}
- bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT, INTRL2_CPU_CLEAR);
+ /* If this is an internal GPHY, power it back on now, before UniMAC is
+ * brought out of reset as absolutely no UniMAC activity is allowed
+ */
+ if (priv->internal_phy)
+ bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+
+ /* take MAC out of reset */
+ bcmgenet_umac_reset(priv);
return 0;
}
@@ -4217,23 +4182,46 @@ static int bcmgenet_resume(struct device *d)
struct net_device *dev = dev_get_drvdata(d);
struct bcmgenet_priv *priv = netdev_priv(dev);
struct bcmgenet_rxnfc_rule *rule;
- unsigned long dma_ctrl;
int ret;
+ u32 reg;
if (!netif_running(dev))
return 0;
- /* From WOL-enabled suspend, switch to regular clock */
- if (device_may_wakeup(d) && priv->wolopts)
- bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
-
- /* If this is an internal GPHY, power it back on now, before UniMAC is
- * brought out of reset as absolutely no UniMAC activity is allowed
- */
- if (priv->internal_phy)
- bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
-
- bcmgenet_umac_reset(priv);
+ if (device_may_wakeup(d) && priv->wolopts) {
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ if (reg & CMD_RX_EN) {
+ /* Successfully exited WoL, just resume data flows */
+ list_for_each_entry(rule, &priv->rxnfc_list, list)
+ if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
+ bcmgenet_hfb_enable_filter(priv,
+ rule->fs.location + 1);
+ bcmgenet_hfb_enable_filter(priv, 0);
+ bcmgenet_set_rx_mode(dev);
+ bcmgenet_enable_rx_napi(priv);
+
+ /* Reinitialize Tx flows */
+ bcmgenet_tdma_disable(priv);
+ bcmgenet_init_tx_queues(priv->dev);
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg |= DMA_EN;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+ bcmgenet_enable_tx_napi(priv);
+
+ bcmgenet_link_intr_enable(priv);
+ phy_start_machine(dev->phydev);
+
+ netif_device_attach(dev);
+ enable_irq(priv->irq1);
+ return 0;
+ }
+ /* MAC was reset so complete bcmgenet_netif_stop() */
+ umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, false);
+ bcmgenet_rdma_disable(priv);
+ bcmgenet_intr_disable(priv);
+ bcmgenet_fini_dma(priv);
+ enable_irq(priv->irq1);
+ }
init_umac(priv);
@@ -4254,19 +4242,13 @@ static int bcmgenet_resume(struct device *d)
if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
bcmgenet_hfb_create_rxnfc_filter(priv, rule);
- /* Disable RX/TX DMA and flush TX queues */
- dma_ctrl = bcmgenet_dma_disable(priv, false);
-
/* Reinitialize TDMA and RDMA and SW housekeeping */
- ret = bcmgenet_init_dma(priv);
+ ret = bcmgenet_init_dma(priv, false);
if (ret) {
netdev_err(dev, "failed to initialize DMA\n");
goto out_clk_disable;
}
- /* Always enable ring 16 - descriptor ring */
- bcmgenet_enable_dma(priv, dma_ctrl);
-
if (!device_may_wakeup(d))
phy_resume(dev->phydev);
@@ -4287,19 +4269,52 @@ static int bcmgenet_suspend(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct bcmgenet_rxnfc_rule *rule;
+ u32 reg, hfb_enable = 0;
if (!netif_running(dev))
return 0;
netif_device_detach(dev);
- bcmgenet_netif_stop(dev, true);
+ if (device_may_wakeup(d) && priv->wolopts) {
+ netif_tx_disable(dev);
+
+ /* Suspend non-wake Rx data flows */
+ if (priv->wolopts & WAKE_FILTER)
+ list_for_each_entry(rule, &priv->rxnfc_list, list)
+ if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE &&
+ rule->state == BCMGENET_RXNFC_STATE_ENABLED)
+ hfb_enable |= 1 << rule->fs.location;
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) {
+ reg &= ~RBUF_HFB_FILTER_EN_MASK;
+ reg |= hfb_enable << (RBUF_HFB_FILTER_EN_SHIFT + 1);
+ } else {
+ bcmgenet_hfb_reg_writel(priv, hfb_enable << 1,
+ HFB_FLT_ENABLE_V3PLUS + 4);
+ }
+ if (!hfb_enable)
+ reg &= ~RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
- if (!device_may_wakeup(d))
- phy_suspend(dev->phydev);
+ /* Clear any old filter matches so only new matches wake */
+ bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
+ bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
- /* Disable filtering */
- bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
+ if (-ETIMEDOUT == bcmgenet_tdma_disable(priv))
+ netdev_warn(priv->dev,
+ "Timed out while disabling TX DMA\n");
+
+ bcmgenet_disable_tx_napi(priv);
+ bcmgenet_disable_rx_napi(priv);
+ disable_irq(priv->irq1);
+ bcmgenet_tx_reclaim_all(dev);
+ bcmgenet_fini_tx_napi(priv);
+ } else {
+ /* Teardown the interface */
+ bcmgenet_netif_stop(dev, true);
+ }
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 43b923c48b14..5ec3979779ec 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2014-2024 Broadcom
+ * Copyright (c) 2014-2025 Broadcom
*/
#ifndef __BCMGENET_H__
@@ -18,6 +18,9 @@
#include "../unimac.h"
+/* Maximum number of hardware queues, downsized if needed */
+#define GENET_MAX_MQ_CNT 4
+
/* total number of Buffer Descriptors, same for Rx/Tx */
#define TOTAL_DESC 256
@@ -152,6 +155,30 @@ struct bcmgenet_mib_counters {
u32 tx_realloc_tsb_failed;
};
+struct bcmgenet_tx_stats64 {
+ struct u64_stats_sync syncp;
+ u64_stats_t packets;
+ u64_stats_t bytes;
+ u64_stats_t errors;
+ u64_stats_t dropped;
+};
+
+struct bcmgenet_rx_stats64 {
+ struct u64_stats_sync syncp;
+ u64_stats_t bytes;
+ u64_stats_t packets;
+ u64_stats_t errors;
+ u64_stats_t dropped;
+ u64_stats_t multicast;
+ u64_stats_t broadcast;
+ u64_stats_t missed;
+ u64_stats_t length_errors;
+ u64_stats_t over_errors;
+ u64_stats_t crc_errors;
+ u64_stats_t frame_errors;
+ u64_stats_t fragmented_errors;
+};
+
#define UMAC_MIB_START 0x400
#define UMAC_MDIO_CMD 0x614
@@ -271,6 +298,8 @@ struct bcmgenet_mib_counters {
/* Only valid for GENETv3+ */
#define UMAC_IRQ_MDIO_DONE (1 << 23)
#define UMAC_IRQ_MDIO_ERROR (1 << 24)
+#define UMAC_IRQ_MDIO_EVENT (UMAC_IRQ_MDIO_DONE | \
+ UMAC_IRQ_MDIO_ERROR)
/* INTRL2 instance 1 definitions */
#define UMAC_IRQ1_TX_INTR_MASK 0xFFFF
@@ -476,6 +505,7 @@ enum bcmgenet_version {
#define GENET_HAS_EXT (1 << 1)
#define GENET_HAS_MDIO_INTR (1 << 2)
#define GENET_HAS_MOCA_LINK_DET (1 << 3)
+#define GENET_HAS_EPHY_16NM (1 << 4)
/* BCMGENET hardware parameters, keep this structure nicely aligned
* since it is going to be used in hot paths
@@ -496,7 +526,6 @@ struct bcmgenet_hw_params {
u32 rdma_offset;
u32 tdma_offset;
u32 words_per_bd;
- u32 flags;
};
struct bcmgenet_skb_cb {
@@ -510,10 +539,8 @@ struct bcmgenet_skb_cb {
struct bcmgenet_tx_ring {
spinlock_t lock; /* ring lock */
struct napi_struct napi; /* NAPI per tx queue */
- unsigned long packets;
- unsigned long bytes;
+ struct bcmgenet_tx_stats64 stats64;
unsigned int index; /* ring index */
- unsigned int queue; /* queue index */
struct enet_cb *cbs; /* tx ring buffer control block*/
unsigned int size; /* size of each tx ring */
unsigned int clean_ptr; /* Tx ring clean pointer */
@@ -523,8 +550,6 @@ struct bcmgenet_tx_ring {
unsigned int prod_index; /* Tx ring producer index SW copy */
unsigned int cb_ptr; /* Tx ring initial CB ptr */
unsigned int end_ptr; /* Tx ring end CB ptr */
- void (*int_enable)(struct bcmgenet_tx_ring *);
- void (*int_disable)(struct bcmgenet_tx_ring *);
struct bcmgenet_priv *priv;
};
@@ -538,10 +563,7 @@ struct bcmgenet_net_dim {
struct bcmgenet_rx_ring {
struct napi_struct napi; /* Rx NAPI struct */
- unsigned long bytes;
- unsigned long packets;
- unsigned long errors;
- unsigned long dropped;
+ struct bcmgenet_rx_stats64 stats64;
unsigned int index; /* Rx ring index */
struct enet_cb *cbs; /* Rx ring buffer control block */
unsigned int size; /* Rx ring size */
@@ -553,8 +575,6 @@ struct bcmgenet_rx_ring {
struct bcmgenet_net_dim dim;
u32 rx_max_coalesced_frames;
u32 rx_coalesce_usecs;
- void (*int_enable)(struct bcmgenet_rx_ring *);
- void (*int_disable)(struct bcmgenet_rx_ring *);
struct bcmgenet_priv *priv;
};
@@ -583,7 +603,7 @@ struct bcmgenet_priv {
struct enet_cb *tx_cbs;
unsigned int num_tx_bds;
- struct bcmgenet_tx_ring tx_rings[DESC_INDEX + 1];
+ struct bcmgenet_tx_ring tx_rings[GENET_MAX_MQ_CNT + 1];
/* receive variables */
void __iomem *rx_bds;
@@ -593,10 +613,11 @@ struct bcmgenet_priv {
struct bcmgenet_rxnfc_rule rxnfc_rules[MAX_NUM_OF_FS_RULES];
struct list_head rxnfc_list;
- struct bcmgenet_rx_ring rx_rings[DESC_INDEX + 1];
+ struct bcmgenet_rx_ring rx_rings[GENET_MAX_MQ_CNT + 1];
/* other misc variables */
- struct bcmgenet_hw_params *hw_params;
+ const struct bcmgenet_hw_params *hw_params;
+ u32 flags;
unsigned autoneg_pause:1;
unsigned tx_pause:1;
unsigned rx_pause:1;
@@ -615,7 +636,6 @@ struct bcmgenet_priv {
phy_interface_t phy_interface;
int phy_addr;
int ext_phy;
- bool ephy_16nm;
/* Interrupt variables */
struct work_struct bcmgenet_irq_work;
@@ -643,13 +663,37 @@ struct bcmgenet_priv {
struct clk *clk_wol;
u32 wolopts;
u8 sopass[SOPASS_MAX];
- bool wol_active;
struct bcmgenet_mib_counters mib;
struct ethtool_keee eee;
};
+static inline bool bcmgenet_has_40bits(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_40BITS);
+}
+
+static inline bool bcmgenet_has_ext(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_EXT);
+}
+
+static inline bool bcmgenet_has_mdio_intr(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_MDIO_INTR);
+}
+
+static inline bool bcmgenet_has_moca_link_det(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_MOCA_LINK_DET);
+}
+
+static inline bool bcmgenet_has_ephy_16nm(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_EPHY_16NM);
+}
+
#define GENET_IO_MACRO(name, offset) \
static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv, \
u32 off) \
@@ -702,8 +746,8 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode);
-void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
- enum bcmgenet_power_mode mode);
+int bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode);
void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
bool tx_lpi_enabled);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 0715ea5bf13e..8fb551288298 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
*
- * Copyright (c) 2014-2024 Broadcom
+ * Copyright (c) 2014-2025 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet_wol: " fmt
@@ -41,9 +41,12 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
+ u32 phy_wolopts = 0;
- if (dev->phydev)
+ if (dev->phydev) {
phy_ethtool_get_wol(dev->phydev, wol);
+ phy_wolopts = wol->wolopts;
+ }
/* MAC is not wake-up capable, return what the PHY does */
if (!device_can_wakeup(kdev))
@@ -51,9 +54,14 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
/* Overlay MAC capabilities with that of the PHY queried before */
wol->supported |= WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
- wol->wolopts = priv->wolopts;
- memset(wol->sopass, 0, sizeof(wol->sopass));
+ wol->wolopts |= priv->wolopts;
+
+ /* Return the PHY configured magic password */
+ if (phy_wolopts & WAKE_MAGICSECURE)
+ return;
+ /* Otherwise the MAC one */
+ memset(wol->sopass, 0, sizeof(wol->sopass));
if (wol->wolopts & WAKE_MAGICSECURE)
memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
}
@@ -70,7 +78,7 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
/* Try Wake-on-LAN from the PHY first */
if (dev->phydev) {
ret = phy_ethtool_set_wol(dev->phydev, wol);
- if (ret != -EOPNOTSUPP)
+ if (ret != -EOPNOTSUPP && wol->wolopts)
return ret;
}
@@ -137,8 +145,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode)
{
struct net_device *dev = priv->dev;
- struct bcmgenet_rxnfc_rule *rule;
- u32 reg, hfb_ctrl_reg, hfb_enable = 0;
+ u32 reg, hfb_ctrl_reg;
int retries = 0;
if (mode != GENET_POWER_WOL_MAGIC) {
@@ -146,18 +153,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
return -EINVAL;
}
- /* Can't suspend with WoL if MAC is still in reset */
- spin_lock_bh(&priv->reg_lock);
- reg = bcmgenet_umac_readl(priv, UMAC_CMD);
- if (reg & CMD_SW_RESET)
- reg &= ~CMD_SW_RESET;
-
- /* disable RX */
- reg &= ~CMD_RX_EN;
- bcmgenet_umac_writel(priv, reg, UMAC_CMD);
- spin_unlock_bh(&priv->reg_lock);
- mdelay(10);
-
if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
reg |= MPD_EN;
@@ -169,13 +164,8 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
}
hfb_ctrl_reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- if (priv->wolopts & WAKE_FILTER) {
- list_for_each_entry(rule, &priv->rxnfc_list, list)
- if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE)
- hfb_enable |= (1 << rule->fs.location);
- reg = (hfb_ctrl_reg & ~RBUF_HFB_EN) | RBUF_ACPI_EN;
- bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
- }
+ reg = hfb_ctrl_reg | RBUF_ACPI_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
/* Do not leave UniMAC in MPD mode only */
retries = bcmgenet_poll_wol_status(priv);
@@ -190,15 +180,12 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
netif_dbg(priv, wol, dev, "MPD WOL-ready status set after %d msec\n",
retries);
- clk_prepare_enable(priv->clk_wol);
- priv->wol_active = 1;
+ /* Disable phy status updates while suspending */
+ mutex_lock(&dev->phydev->lock);
+ dev->phydev->state = PHY_READY;
+ mutex_unlock(&dev->phydev->lock);
- if (hfb_enable) {
- bcmgenet_hfb_reg_writel(priv, hfb_enable,
- HFB_FLT_ENABLE_V3PLUS + 4);
- hfb_ctrl_reg = RBUF_HFB_EN | RBUF_ACPI_EN;
- bcmgenet_hfb_reg_writel(priv, hfb_ctrl_reg, HFB_CTRL);
- }
+ clk_prepare_enable(priv->clk_wol);
/* Enable CRC forward */
spin_lock_bh(&priv->reg_lock);
@@ -206,13 +193,17 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
priv->crc_fwd_en = 1;
reg |= CMD_CRC_FWD;
+ /* Can't suspend with WoL if MAC is still in reset */
+ if (reg & CMD_SW_RESET)
+ reg &= ~CMD_SW_RESET;
+
/* Receiver must be enabled for WOL MP detection */
reg |= CMD_RX_EN;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
spin_unlock_bh(&priv->reg_lock);
reg = UMAC_IRQ_MPD_R;
- if (hfb_enable)
+ if (hfb_ctrl_reg & RBUF_HFB_EN)
reg |= UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM;
bcmgenet_intrl2_0_writel(priv, reg, INTRL2_CPU_MASK_CLEAR);
@@ -220,40 +211,42 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
return 0;
}
-void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
- enum bcmgenet_power_mode mode)
+int bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode)
{
+ struct net_device *dev = priv->dev;
u32 reg;
if (mode != GENET_POWER_WOL_MAGIC) {
netif_err(priv, wol, priv->dev, "invalid mode: %d\n", mode);
- return;
+ return -EINVAL;
}
- if (!priv->wol_active)
- return; /* failed to suspend so skip the rest */
-
- priv->wol_active = 0;
clk_disable_unprepare(priv->clk_wol);
priv->crc_fwd_en = 0;
+ bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT,
+ INTRL2_CPU_MASK_SET);
+ if (bcmgenet_has_mdio_intr(priv))
+ bcmgenet_intrl2_0_writel(priv,
+ UMAC_IRQ_MDIO_EVENT,
+ INTRL2_CPU_MASK_CLEAR);
+
/* Disable Magic Packet Detection */
if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
if (!(reg & MPD_EN))
- return; /* already reset so skip the rest */
+ return -EPERM; /* already reset so skip the rest */
reg &= ~(MPD_EN | MPD_PW_EN);
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
}
- /* Disable WAKE_FILTER Detection */
- if (priv->wolopts & WAKE_FILTER) {
- reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- if (!(reg & RBUF_ACPI_EN))
- return; /* already reset so skip the rest */
- reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN);
- bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
- }
+ /* Disable ACPI mode */
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ if (!(reg & RBUF_ACPI_EN))
+ return -EPERM; /* already reset so skip the rest */
+ reg &= ~RBUF_ACPI_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
/* Disable CRC Forward */
spin_lock_bh(&priv->reg_lock);
@@ -261,4 +254,14 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
reg &= ~CMD_CRC_FWD;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
spin_unlock_bh(&priv->reg_lock);
+
+ /* Resume link status tracking */
+ mutex_lock(&dev->phydev->lock);
+ if (dev->phydev->link)
+ dev->phydev->state = PHY_RUNNING;
+ else
+ dev->phydev->state = PHY_NOLINK;
+ mutex_unlock(&dev->phydev->lock);
+
+ return 0;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index c4a3698cef66..38f854b94a79 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET MDIO routines
*
- * Copyright (c) 2014-2024 Broadcom
+ * Copyright (c) 2014-2025 Broadcom
*/
#include <linux/acpi.h>
@@ -20,7 +20,6 @@
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
-#include <linux/platform_data/bcmgenet.h>
#include <linux/platform_data/mdio-bcm-unimac.h>
#include "bcmgenet.h"
@@ -154,7 +153,7 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
u32 reg = 0;
/* EXT_GPHY_CTRL is only valid for GENETv4 and onward */
- if (GENET_IS_V4(priv) || priv->ephy_16nm) {
+ if (GENET_IS_V4(priv) || bcmgenet_has_ephy_16nm(priv)) {
reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
if (enable) {
reg &= ~EXT_CK25_DIS;
@@ -169,10 +168,15 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
reg &= ~EXT_GPHY_RESET;
} else {
+ reg |= EXT_GPHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ mdelay(1);
+
reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN |
- EXT_GPHY_RESET | EXT_CFG_IDDQ_GLOBAL_PWR;
+ EXT_CFG_IDDQ_GLOBAL_PWR;
bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
mdelay(1);
+
reg |= EXT_CK25_DIS;
}
bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
@@ -184,7 +188,7 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
{
- if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+ if (bcmgenet_has_moca_link_det(priv))
fixed_phy_set_link_update(priv->dev->phydev,
bcmgenet_fixed_phy_link_update);
}
@@ -431,23 +435,6 @@ static struct device_node *bcmgenet_mii_of_find_mdio(struct bcmgenet_priv *priv)
return priv->mdio_dn;
}
-static void bcmgenet_mii_pdata_init(struct bcmgenet_priv *priv,
- struct unimac_mdio_pdata *ppd)
-{
- struct device *kdev = &priv->pdev->dev;
- struct bcmgenet_platform_data *pd = kdev->platform_data;
-
- if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) {
- /*
- * Internal or external PHY with MDIO access
- */
- if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR)
- ppd->phy_mask = 1 << pd->phy_address;
- else
- ppd->phy_mask = 0;
- }
-}
-
static int bcmgenet_mii_wait(void *wait_func_data)
{
struct bcmgenet_priv *priv = wait_func_data;
@@ -462,7 +449,6 @@ static int bcmgenet_mii_wait(void *wait_func_data)
static int bcmgenet_mii_register(struct bcmgenet_priv *priv)
{
struct platform_device *pdev = priv->pdev;
- struct bcmgenet_platform_data *pdata = pdev->dev.platform_data;
struct device_node *dn = pdev->dev.of_node;
struct unimac_mdio_pdata ppd;
struct platform_device *ppdev;
@@ -506,8 +492,6 @@ static int bcmgenet_mii_register(struct bcmgenet_priv *priv)
ppdev->dev.parent = &pdev->dev;
if (dn)
ppdev->dev.of_node = bcmgenet_mii_of_find_mdio(priv);
- else if (pdata)
- bcmgenet_mii_pdata_init(priv, &ppd);
else
ppd.phy_mask = ~0;
@@ -589,58 +573,6 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
return 0;
}
-static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
-{
- struct device *kdev = &priv->pdev->dev;
- struct bcmgenet_platform_data *pd = kdev->platform_data;
- char phy_name[MII_BUS_ID_SIZE + 3];
- char mdio_bus_id[MII_BUS_ID_SIZE];
- struct phy_device *phydev;
-
- snprintf(mdio_bus_id, MII_BUS_ID_SIZE, "%s-%d",
- UNIMAC_MDIO_DRV_NAME, priv->pdev->id);
-
- if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) {
- snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
- mdio_bus_id, pd->phy_address);
-
- /*
- * Internal or external PHY with MDIO access
- */
- phydev = phy_attach(priv->dev, phy_name, pd->phy_interface);
- if (IS_ERR(phydev)) {
- dev_err(kdev, "failed to register PHY device\n");
- return PTR_ERR(phydev);
- }
- } else {
- /*
- * MoCA port or no MDIO access.
- * Use fixed PHY to represent the link layer.
- */
- struct fixed_phy_status fphy_status = {
- .link = 1,
- .speed = pd->phy_speed,
- .duplex = pd->phy_duplex,
- .pause = 0,
- .asym_pause = 0,
- };
-
- phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
- if (IS_ERR(phydev)) {
- dev_err(kdev, "failed to register fixed PHY device\n");
- return PTR_ERR(phydev);
- }
-
- /* Make sure we initialize MoCA PHYs with a link down */
- phydev->link = 0;
-
- }
-
- priv->phy_interface = pd->phy_interface;
-
- return 0;
-}
-
static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv)
{
struct device *kdev = &priv->pdev->dev;
@@ -651,7 +583,7 @@ static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv)
else if (has_acpi_companion(kdev))
return bcmgenet_phy_interface_init(priv);
else
- return bcmgenet_mii_pd_init(priv);
+ return -EINVAL;
}
int bcmgenet_mii_init(struct net_device *dev)
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 01dfec115942..75f66587983d 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -54,7 +54,8 @@
#include <linux/ssb/ssb_driver_gige.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
-#include <linux/crc32poly.h>
+#include <linux/crc32.h>
+#include <linux/dmi.h>
#include <net/checksum.h>
#include <net/gso.h>
@@ -5802,7 +5803,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
u32 current_speed = SPEED_UNKNOWN;
u8 current_duplex = DUPLEX_UNKNOWN;
bool current_link_up = false;
- u32 local_adv, remote_adv, sgsr;
+ u32 local_adv = 0, remote_adv = 0, sgsr;
if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
tg3_asic_rev(tp) == ASIC_REV_5720) &&
@@ -5943,9 +5944,6 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
else
current_duplex = DUPLEX_HALF;
- local_adv = 0;
- remote_adv = 0;
-
if (bmcr & BMCR_ANENABLE) {
u32 common;
@@ -6685,7 +6683,7 @@ static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
* We only need to fill in the address because the other members
* of the RX descriptor are invariant, see tg3_init_rings.
*
- * Note the purposeful assymetry of cpu vs. chip accesses. For
+ * Note the purposeful asymmetry of cpu vs. chip accesses. For
* posting buffers we only dirty the first cache line of the RX
* descriptor (containing the address). Whereas for the RX status
* buffers the cpu only reads the last cacheline of the RX descriptor
@@ -7424,7 +7422,7 @@ static void tg3_napi_enable(struct tg3 *tp)
for (i = 0; i < tp->irq_cnt; i++) {
tnapi = &tp->napi[i];
- napi_enable(&tnapi->napi);
+ napi_enable_locked(&tnapi->napi);
if (tnapi->tx_buffers) {
netif_queue_set_napi(tp->dev, txq_idx,
NETDEV_QUEUE_TYPE_TX,
@@ -7445,9 +7443,10 @@ static void tg3_napi_init(struct tg3 *tp)
int i;
for (i = 0; i < tp->irq_cnt; i++) {
- netif_napi_add(tp->dev, &tp->napi[i].napi,
- i ? tg3_poll_msix : tg3_poll);
- netif_napi_set_irq(&tp->napi[i].napi, tp->napi[i].irq_vec);
+ netif_napi_add_locked(tp->dev, &tp->napi[i].napi,
+ i ? tg3_poll_msix : tg3_poll);
+ netif_napi_set_irq_locked(&tp->napi[i].napi,
+ tp->napi[i].irq_vec);
}
}
@@ -9807,26 +9806,7 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
static inline u32 calc_crc(unsigned char *buf, int len)
{
- u32 reg;
- u32 tmp;
- int j, k;
-
- reg = 0xffffffff;
-
- for (j = 0; j < len; j++) {
- reg ^= buf[j];
-
- for (k = 0; k < 8; k++) {
- tmp = reg & 0x01;
-
- reg >>= 1;
-
- if (tmp)
- reg ^= CRC32_POLY_LE;
- }
- }
-
- return ~reg;
+ return ~crc32(~0, buf, len);
}
static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
@@ -10162,7 +10142,7 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
/* Pseudo-header checksum is done by hardware logic and not
- * the offload processers, so make the chip do the pseudo-
+ * the offload processors, so make the chip do the pseudo-
* header checksums on receive. For transmit it is more
* convenient to do the pseudo-header checksum in software
* as Linux does that on transmit for us in all cases.
@@ -11079,7 +11059,7 @@ static void tg3_chk_missed_msi(struct tg3 *tp)
static void tg3_timer(struct timer_list *t)
{
- struct tg3 *tp = from_timer(tp, t, timer);
+ struct tg3 *tp = timer_container_of(tp, t, timer);
spin_lock(&tp->lock);
@@ -11250,7 +11230,7 @@ static void tg3_timer_start(struct tg3 *tp)
static void tg3_timer_stop(struct tg3 *tp)
{
- del_timer_sync(&tp->timer);
+ timer_delete_sync(&tp->timer);
}
/* Restart hardware after configuration changes, self-test, etc.
@@ -11259,6 +11239,8 @@ static void tg3_timer_stop(struct tg3 *tp)
static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
__releases(tp->lock)
__acquires(tp->lock)
+ __releases(tp->dev->lock)
+ __acquires(tp->dev->lock)
{
int err;
@@ -11271,7 +11253,9 @@ static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
tg3_timer_stop(tp);
tp->irq_sync = 0;
tg3_napi_enable(tp);
+ netdev_unlock(tp->dev);
dev_close(tp->dev);
+ netdev_lock(tp->dev);
tg3_full_lock(tp, 0);
}
return err;
@@ -11299,6 +11283,7 @@ static void tg3_reset_task(struct work_struct *work)
tg3_netif_stop(tp);
+ netdev_lock(tp->dev);
tg3_full_lock(tp, 1);
if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
@@ -11318,12 +11303,14 @@ static void tg3_reset_task(struct work_struct *work)
* call cancel_work_sync() and wait forever.
*/
tg3_flag_clear(tp, RESET_TASK_PENDING);
+ netdev_unlock(tp->dev);
dev_close(tp->dev);
goto out;
}
tg3_netif_start(tp);
tg3_full_unlock(tp);
+ netdev_unlock(tp->dev);
tg3_phy_start(tp);
tg3_flag_clear(tp, RESET_TASK_PENDING);
out:
@@ -11683,9 +11670,11 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
if (err)
goto out_ints_fini;
+ netdev_lock(dev);
tg3_napi_init(tp);
tg3_napi_enable(tp);
+ netdev_unlock(dev);
for (i = 0; i < tp->irq_cnt; i++) {
err = tg3_request_irq(tp, i);
@@ -12569,6 +12558,7 @@ static int tg3_set_ringparam(struct net_device *dev,
irq_sync = 1;
}
+ netdev_lock(dev);
tg3_full_lock(tp, irq_sync);
tp->rx_pending = ering->rx_pending;
@@ -12597,6 +12587,7 @@ static int tg3_set_ringparam(struct net_device *dev,
}
tg3_full_unlock(tp);
+ netdev_unlock(dev);
if (irq_sync && !err)
tg3_phy_start(tp);
@@ -12678,6 +12669,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
irq_sync = 1;
}
+ netdev_lock(dev);
tg3_full_lock(tp, irq_sync);
if (epause->autoneg)
@@ -12707,6 +12699,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
}
tg3_full_unlock(tp);
+ netdev_unlock(dev);
}
tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
@@ -12726,29 +12719,17 @@ static int tg3_get_sset_count(struct net_device *dev, int sset)
}
}
-static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
- u32 *rules __always_unused)
+static u32 tg3_get_rx_ring_count(struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
if (!tg3_flag(tp, SUPPORT_MSIX))
- return -EOPNOTSUPP;
-
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- if (netif_running(tp->dev))
- info->data = tp->rxq_cnt;
- else {
- info->data = num_online_cpus();
- if (info->data > TG3_RSS_MAX_NUM_QS)
- info->data = TG3_RSS_MAX_NUM_QS;
- }
+ return 1;
- return 0;
+ if (netif_running(tp->dev))
+ return tp->rxq_cnt;
- default:
- return -EOPNOTSUPP;
- }
+ return min_t(u32, netif_get_num_default_rss_queues(), tp->rxq_max);
}
static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
@@ -13911,6 +13892,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
data[TG3_INTERRUPT_TEST] = 1;
}
+ netdev_lock(dev);
tg3_full_lock(tp, 0);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
@@ -13922,6 +13904,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
}
tg3_full_unlock(tp);
+ netdev_unlock(dev);
if (irq_sync && !err2)
tg3_phy_start(tp);
@@ -13931,22 +13914,20 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
}
-static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+static int tg3_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf,
+ struct netlink_ext_ack *extack)
{
struct tg3 *tp = netdev_priv(dev);
- struct hwtstamp_config stmpconf;
if (!tg3_flag(tp, PTP_CAPABLE))
return -EOPNOTSUPP;
- if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
- return -EFAULT;
-
- if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
- stmpconf.tx_type != HWTSTAMP_TX_OFF)
+ if (stmpconf->tx_type != HWTSTAMP_TX_ON &&
+ stmpconf->tx_type != HWTSTAMP_TX_OFF)
return -ERANGE;
- switch (stmpconf.rx_filter) {
+ switch (stmpconf->rx_filter) {
case HWTSTAMP_FILTER_NONE:
tp->rxptpctl = 0;
break;
@@ -14006,74 +13987,72 @@ static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
tw32(TG3_RX_PTP_CTL,
tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
- if (stmpconf.tx_type == HWTSTAMP_TX_ON)
+ if (stmpconf->tx_type == HWTSTAMP_TX_ON)
tg3_flag_set(tp, TX_TSTAMP_EN);
else
tg3_flag_clear(tp, TX_TSTAMP_EN);
- return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
- -EFAULT : 0;
+ return 0;
}
-static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+static int tg3_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf)
{
struct tg3 *tp = netdev_priv(dev);
- struct hwtstamp_config stmpconf;
if (!tg3_flag(tp, PTP_CAPABLE))
return -EOPNOTSUPP;
- stmpconf.flags = 0;
- stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
- HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
+ stmpconf->flags = 0;
+ stmpconf->tx_type = tg3_flag(tp, TX_TSTAMP_EN) ?
+ HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
switch (tp->rxptpctl) {
case 0:
- stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_NONE;
break;
case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
break;
case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
break;
default:
WARN_ON_ONCE(1);
return -ERANGE;
}
- return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
- -EFAULT : 0;
+ return 0;
}
static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -14128,12 +14107,6 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return err;
- case SIOCSHWTSTAMP:
- return tg3_hwtstamp_set(dev, ifr);
-
- case SIOCGHWTSTAMP:
- return tg3_hwtstamp_get(dev, ifr);
-
default:
/* do nothing */
break;
@@ -14283,7 +14256,7 @@ static const struct ethtool_ops tg3_ethtool_ops = {
.get_coalesce = tg3_get_coalesce,
.set_coalesce = tg3_set_coalesce,
.get_sset_count = tg3_get_sset_count,
- .get_rxnfc = tg3_get_rxnfc,
+ .get_rx_ring_count = tg3_get_rx_ring_count,
.get_rxfh_indir_size = tg3_get_rxfh_indir_size,
.get_rxfh = tg3_get_rxfh,
.set_rxfh = tg3_set_rxfh,
@@ -14365,6 +14338,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_set_mtu(dev, tp, new_mtu);
+ netdev_lock(dev);
tg3_full_lock(tp, 1);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
@@ -14384,6 +14358,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_netif_start(tp);
tg3_full_unlock(tp);
+ netdev_unlock(dev);
if (!err)
tg3_phy_start(tp);
@@ -14407,6 +14382,8 @@ static const struct net_device_ops tg3_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tg3_poll_controller,
#endif
+ .ndo_hwtstamp_get = tg3_hwtstamp_get,
+ .ndo_hwtstamp_set = tg3_hwtstamp_set,
};
static void tg3_get_eeprom_size(struct tg3 *tp)
@@ -16610,7 +16587,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tg3_flag_set(tp, PCIX_TARGET_HWBUG);
- /* The chip can have it's power management PCI config
+ /* The chip can have its power management PCI config
* space registers clobbered due to this bug.
* So explicitly force the chip into D0 here.
*/
@@ -17839,6 +17816,9 @@ static int tg3_init_one(struct pci_dev *pdev,
} else
persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
+ if (tg3_asic_rev(tp) == ASIC_REV_57766)
+ persist_dma_mask = DMA_BIT_MASK(31);
+
/* Configure DMA attributes. */
if (dma_mask > DMA_BIT_MASK(32)) {
err = dma_set_mask(&pdev->dev, dma_mask);
@@ -18161,6 +18141,7 @@ static int tg3_resume(struct device *device)
netif_device_attach(dev);
+ netdev_lock(dev);
tg3_full_lock(tp, 0);
tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
@@ -18177,6 +18158,7 @@ static int tg3_resume(struct device *device)
out:
tg3_full_unlock(tp);
+ netdev_unlock(dev);
if (!err)
tg3_phy_start(tp);
@@ -18189,6 +18171,50 @@ unlock:
static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
+/* Systems where ACPI _PTS (Prepare To Sleep) S5 will result in a fatal
+ * PCIe AER event on the tg3 device if the tg3 device is not, or cannot
+ * be, powered down.
+ */
+static const struct dmi_system_id tg3_restart_aer_quirk_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R440"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R540"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R640"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R650"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R740"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R750"),
+ },
+ },
+ {}
+};
+
static void tg3_shutdown(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -18205,6 +18231,19 @@ static void tg3_shutdown(struct pci_dev *pdev)
if (system_state == SYSTEM_POWER_OFF)
tg3_power_down(tp);
+ else if (system_state == SYSTEM_RESTART &&
+ dmi_first_match(tg3_restart_aer_quirk_table) &&
+ pdev->current_state != PCI_D3cold &&
+ pdev->current_state != PCI_UNKNOWN) {
+ /* Disable PCIe AER on the tg3 to avoid a fatal
+ * error during this system restart.
+ */
+ pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_CERE |
+ PCI_EXP_DEVCTL_NFERE |
+ PCI_EXP_DEVCTL_FERE |
+ PCI_EXP_DEVCTL_URRE);
+ }
rtnl_unlock();
@@ -18257,7 +18296,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
done:
if (state == pci_channel_io_perm_failure) {
if (netdev) {
+ netdev_lock(netdev);
tg3_napi_enable(tp);
+ netdev_unlock(netdev);
dev_close(netdev);
}
err = PCI_ERS_RESULT_DISCONNECT;
@@ -18296,7 +18337,6 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
if (!netdev || !netif_running(netdev)) {
rc = PCI_ERS_RESULT_RECOVERED;
@@ -18311,7 +18351,9 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
done:
if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
+ netdev_lock(netdev);
tg3_napi_enable(tp);
+ netdev_unlock(netdev);
dev_close(netdev);
}
rtnl_unlock();
@@ -18337,12 +18379,14 @@ static void tg3_io_resume(struct pci_dev *pdev)
if (!netdev || !netif_running(netdev))
goto done;
+ netdev_lock(netdev);
tg3_full_lock(tp, 0);
tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
tg3_flag_set(tp, INIT_COMPLETE);
err = tg3_restart_hw(tp, true);
if (err) {
tg3_full_unlock(tp);
+ netdev_unlock(netdev);
netdev_err(netdev, "Cannot restart hardware after reset.\n");
goto done;
}
@@ -18354,6 +18398,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
tg3_netif_start(tp);
tg3_full_unlock(tp);
+ netdev_unlock(netdev);
tg3_phy_start(tp);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index b473f8014d9c..a9e7f88fa26d 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2390,7 +2390,7 @@
#define TG3_CL45_D7_EEERES_STAT_LP_1000T 0x0004
-/* Fast Ethernet Tranceiver definitions */
+/* Fast Ethernet Transceiver definitions */
#define MII_TG3_FET_PTEST 0x17
#define MII_TG3_FET_PTEST_TRIM_SEL 0x0010
#define MII_TG3_FET_PTEST_TRIM_2 0x0002
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 9c80ab07a735..92c7639d1fc7 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -314,13 +314,13 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
{
switch (event) {
case IOC_E_FWRSP_GETATTR:
- del_timer(&ioc->ioc_timer);
+ timer_delete(&ioc->ioc_timer);
bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
break;
case IOC_E_PFFAILED:
case IOC_E_HWERROR:
- del_timer(&ioc->ioc_timer);
+ timer_delete(&ioc->ioc_timer);
fallthrough;
case IOC_E_TIMEOUT:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
@@ -330,7 +330,7 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
break;
case IOC_E_DISABLE:
- del_timer(&ioc->ioc_timer);
+ timer_delete(&ioc->ioc_timer);
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
break;
@@ -659,13 +659,13 @@ bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
case IOCPF_E_DISABLE:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
bfa_ioc_pf_disabled(ioc);
break;
case IOCPF_E_STOP:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
break;
@@ -741,7 +741,7 @@ bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
case IOCPF_E_DISABLE:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
bfa_ioc_sync_leave(ioc);
bfa_nw_ioc_hw_sem_release(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
@@ -774,13 +774,13 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
switch (event) {
case IOCPF_E_FWRSP_ENABLE:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
bfa_nw_ioc_hw_sem_release(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
break;
case IOCPF_E_INITFAIL:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
fallthrough;
case IOCPF_E_TIMEOUT:
@@ -791,7 +791,7 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
case IOCPF_E_DISABLE:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
bfa_nw_ioc_hw_sem_release(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
break;
@@ -844,12 +844,12 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
switch (event) {
case IOCPF_E_FWRSP_DISABLE:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
break;
case IOCPF_E_FAIL:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
fallthrough;
case IOCPF_E_TIMEOUT:
@@ -1210,7 +1210,7 @@ bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
static void
bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
{
- del_timer(&ioc->sem_timer);
+ timer_delete(&ioc->sem_timer);
}
/* Initialize LPU local memory (aka secondary memory / SRAM) */
@@ -1982,7 +1982,7 @@ bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
static void
bfa_ioc_hb_stop(struct bfa_ioc *ioc)
{
- del_timer(&ioc->hb_timer);
+ timer_delete(&ioc->hb_timer);
}
/* Initiate a full firmware download. */
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index ece6f3b48327..9bed33295839 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -19,6 +19,7 @@
#include <linux/ip.h>
#include <linux/prefetch.h>
#include <linux/module.h>
+#include <net/gro.h>
#include "bnad.h"
#include "bna.h"
@@ -1687,7 +1688,8 @@ err_return:
static void
bnad_ioc_timeout(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.ioc_timer);
+ struct bnad *bnad = timer_container_of(bnad, t,
+ bna.ioceth.ioc.ioc_timer);
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1698,7 +1700,8 @@ bnad_ioc_timeout(struct timer_list *t)
static void
bnad_ioc_hb_check(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.hb_timer);
+ struct bnad *bnad = timer_container_of(bnad, t,
+ bna.ioceth.ioc.hb_timer);
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1709,7 +1712,8 @@ bnad_ioc_hb_check(struct timer_list *t)
static void
bnad_iocpf_timeout(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.iocpf_timer);
+ struct bnad *bnad = timer_container_of(bnad, t,
+ bna.ioceth.ioc.iocpf_timer);
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1720,7 +1724,8 @@ bnad_iocpf_timeout(struct timer_list *t)
static void
bnad_iocpf_sem_timeout(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.sem_timer);
+ struct bnad *bnad = timer_container_of(bnad, t,
+ bna.ioceth.ioc.sem_timer);
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1734,7 +1739,7 @@ bnad_iocpf_sem_timeout(struct timer_list *t)
* Time CPU m CPU n
* 0 1 = test_bit
* 1 clear_bit
- * 2 del_timer_sync
+ * 2 timer_delete_sync
* 3 mod_timer
*/
@@ -1742,7 +1747,7 @@ bnad_iocpf_sem_timeout(struct timer_list *t)
static void
bnad_dim_timeout(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, dim_timer);
+ struct bnad *bnad = timer_container_of(bnad, t, dim_timer);
struct bnad_rx_info *rx_info;
struct bnad_rx_ctrl *rx_ctrl;
int i, j;
@@ -1775,7 +1780,7 @@ bnad_dim_timeout(struct timer_list *t)
static void
bnad_stats_timeout(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, stats_timer);
+ struct bnad *bnad = timer_container_of(bnad, t, stats_timer);
unsigned long flags;
if (!netif_running(bnad->netdev) ||
@@ -1836,7 +1841,7 @@ bnad_stats_timer_stop(struct bnad *bnad)
to_del = 1;
spin_unlock_irqrestore(&bnad->bna_lock, flags);
if (to_del)
- del_timer_sync(&bnad->stats_timer);
+ timer_delete_sync(&bnad->stats_timer);
}
/* Utilities */
@@ -2159,7 +2164,7 @@ bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
}
spin_unlock_irqrestore(&bnad->bna_lock, flags);
if (to_del)
- del_timer_sync(&bnad->dim_timer);
+ timer_delete_sync(&bnad->dim_timer);
}
init_completion(&bnad->bnad_completions.rx_comp);
@@ -3725,9 +3730,9 @@ probe_uninit:
bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
disable_ioceth:
bnad_ioceth_disable(bnad);
- del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
- del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
- del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
+ timer_delete_sync(&bnad->bna.ioceth.ioc.ioc_timer);
+ timer_delete_sync(&bnad->bna.ioceth.ioc.sem_timer);
+ timer_delete_sync(&bnad->bna.ioceth.ioc.hb_timer);
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_uninit(bna);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -3768,9 +3773,9 @@ bnad_pci_remove(struct pci_dev *pdev)
mutex_lock(&bnad->conf_mutex);
bnad_ioceth_disable(bnad);
- del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
- del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
- del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
+ timer_delete_sync(&bnad->bna.ioceth.ioc.ioc_timer);
+ timer_delete_sync(&bnad->bna.ioceth.ioc.sem_timer);
+ timer_delete_sync(&bnad->bna.ioceth.ioc.hb_timer);
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_uninit(bna);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index d1ad6c9f8140..216e25f26dbb 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -373,7 +373,7 @@ static int bnad_set_coalesce(struct net_device *netdev,
}
spin_unlock_irqrestore(&bnad->bna_lock, flags);
if (to_del)
- del_timer_sync(&bnad->dim_timer);
+ timer_delete_sync(&bnad->dim_timer);
spin_lock_irqsave(&bnad->bna_lock, flags);
bnad_rx_coalescing_timeo_set(bnad);
}
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 5740c98d8c9f..87414a2ddf6e 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -15,10 +15,6 @@
#include <linux/phy/phy.h>
#include <linux/workqueue.h>
-#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) || defined(CONFIG_MACB_USE_HWSTAMP)
-#define MACB_EXT_DESC
-#endif
-
#define MACB_GREGS_NBR 16
#define MACB_GREGS_VERSION 2
#define MACB_MAX_QUEUES 8
@@ -184,6 +180,13 @@
#define GEM_DCFG8 0x029C /* Design Config 8 */
#define GEM_DCFG10 0x02A4 /* Design Config 10 */
#define GEM_DCFG12 0x02AC /* Design Config 12 */
+#define GEM_ENST_START_TIME_Q0 0x0800 /* ENST Q0 start time */
+#define GEM_ENST_START_TIME_Q1 0x0804 /* ENST Q1 start time */
+#define GEM_ENST_ON_TIME_Q0 0x0820 /* ENST Q0 on time */
+#define GEM_ENST_ON_TIME_Q1 0x0824 /* ENST Q1 on time */
+#define GEM_ENST_OFF_TIME_Q0 0x0840 /* ENST Q0 off time */
+#define GEM_ENST_OFF_TIME_Q1 0x0844 /* ENST Q1 off time */
+#define GEM_ENST_CONTROL 0x0880 /* ENST control register */
#define GEM_USX_CONTROL 0x0A80 /* High speed PCS control register */
#define GEM_USX_STATUS 0x0A88 /* High speed PCS status register */
@@ -213,14 +216,19 @@
#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2))
#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2))
-#define GEM_TBQPH(hw_q) (0x04C8)
#define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2))
#define GEM_RBQS(hw_q) (0x04A0 + ((hw_q) << 2))
-#define GEM_RBQPH(hw_q) (0x04D4)
#define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2))
#define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2))
#define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2))
+#define GEM_ENST_START_TIME(hw_q) (0x0800 + ((hw_q) << 2))
+#define GEM_ENST_ON_TIME(hw_q) (0x0820 + ((hw_q) << 2))
+#define GEM_ENST_OFF_TIME(hw_q) (0x0840 + ((hw_q) << 2))
+
+/* Bitfields in ENST_CONTROL */
+#define GEM_ENST_DISABLE_QUEUE_OFFSET 16
+
/* Bitfields in NCR */
#define MACB_LB_OFFSET 0 /* reserved */
#define MACB_LB_SIZE 1
@@ -529,6 +537,8 @@
/* Bitfields in DCFG6. */
#define GEM_PBUF_LSO_OFFSET 27
#define GEM_PBUF_LSO_SIZE 1
+#define GEM_PBUF_RSC_OFFSET 26
+#define GEM_PBUF_RSC_SIZE 1
#define GEM_PBUF_CUTTHRU_OFFSET 25
#define GEM_PBUF_CUTTHRU_SIZE 1
#define GEM_DAW64_OFFSET 23
@@ -554,6 +564,23 @@
#define GEM_HIGH_SPEED_OFFSET 26
#define GEM_HIGH_SPEED_SIZE 1
+/* Bitfields in ENST_START_TIME_Qx. */
+#define GEM_START_TIME_SEC_OFFSET 30
+#define GEM_START_TIME_SEC_SIZE 2
+#define GEM_START_TIME_NSEC_OFFSET 0
+#define GEM_START_TIME_NSEC_SIZE 30
+
+/* Bitfields in ENST_ON_TIME_Qx. */
+#define GEM_ON_TIME_OFFSET 0
+#define GEM_ON_TIME_SIZE 17
+
+/* Bitfields in ENST_OFF_TIME_Qx. */
+#define GEM_OFF_TIME_OFFSET 0
+#define GEM_OFF_TIME_SIZE 17
+
+/* Hardware ENST timing registers granularity */
+#define ENST_TIME_GRANULARITY_NS 8
+
/* Bitfields in USX_CONTROL. */
#define GEM_USX_CTRL_SPEED_OFFSET 14
#define GEM_USX_CTRL_SPEED_SIZE 3
@@ -727,26 +754,31 @@
#define MACB_MAN_C45_CODE 2
/* Capability mask bits */
-#define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x00000001
-#define MACB_CAPS_USRIO_HAS_CLKEN 0x00000002
-#define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004
-#define MACB_CAPS_NO_GIGABIT_HALF 0x00000008
-#define MACB_CAPS_USRIO_DISABLED 0x00000010
-#define MACB_CAPS_JUMBO 0x00000020
-#define MACB_CAPS_GEM_HAS_PTP 0x00000040
-#define MACB_CAPS_BD_RD_PREFETCH 0x00000080
-#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
-#define MACB_CAPS_MIIONRGMII 0x00000200
-#define MACB_CAPS_NEED_TSUCLK 0x00000400
-#define MACB_CAPS_QUEUE_DISABLE 0x00000800
-#define MACB_CAPS_PCS 0x01000000
-#define MACB_CAPS_HIGH_SPEED 0x02000000
-#define MACB_CAPS_CLK_HW_CHG 0x04000000
-#define MACB_CAPS_MACB_IS_EMAC 0x08000000
-#define MACB_CAPS_FIFO_MODE 0x10000000
-#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
-#define MACB_CAPS_SG_DISABLED 0x40000000
-#define MACB_CAPS_MACB_IS_GEM 0x80000000
+#define MACB_CAPS_ISR_CLEAR_ON_WRITE BIT(0)
+#define MACB_CAPS_USRIO_HAS_CLKEN BIT(1)
+#define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII BIT(2)
+#define MACB_CAPS_NO_GIGABIT_HALF BIT(3)
+#define MACB_CAPS_USRIO_DISABLED BIT(4)
+#define MACB_CAPS_JUMBO BIT(5)
+#define MACB_CAPS_GEM_HAS_PTP BIT(6)
+#define MACB_CAPS_BD_RD_PREFETCH BIT(7)
+#define MACB_CAPS_NEEDS_RSTONUBR BIT(8)
+#define MACB_CAPS_MIIONRGMII BIT(9)
+#define MACB_CAPS_NEED_TSUCLK BIT(10)
+#define MACB_CAPS_QUEUE_DISABLE BIT(11)
+#define MACB_CAPS_QBV BIT(12)
+#define MACB_CAPS_PCS BIT(13)
+#define MACB_CAPS_HIGH_SPEED BIT(14)
+#define MACB_CAPS_CLK_HW_CHG BIT(15)
+#define MACB_CAPS_MACB_IS_EMAC BIT(16)
+#define MACB_CAPS_FIFO_MODE BIT(17)
+#define MACB_CAPS_GIGABIT_MODE_AVAILABLE BIT(18)
+#define MACB_CAPS_SG_DISABLED BIT(19)
+#define MACB_CAPS_MACB_IS_GEM BIT(20)
+#define MACB_CAPS_DMA_64B BIT(21)
+#define MACB_CAPS_DMA_PTP BIT(22)
+#define MACB_CAPS_RSC BIT(23)
+#define MACB_CAPS_NO_LSO BIT(24)
/* LSO settings */
#define MACB_LSO_UFO_ENABLE 0x01
@@ -823,12 +855,6 @@ struct macb_dma_desc {
u32 ctrl;
};
-#ifdef MACB_EXT_DESC
-#define HW_DMA_CAP_32B 0
-#define HW_DMA_CAP_64B (1 << 0)
-#define HW_DMA_CAP_PTP (1 << 1)
-#define HW_DMA_CAP_64B_PTP (HW_DMA_CAP_64B | HW_DMA_CAP_PTP)
-
struct macb_dma_desc_64 {
u32 addrh;
u32 resvd;
@@ -838,7 +864,6 @@ struct macb_dma_desc_ptp {
u32 ts_1;
u32 ts_2;
};
-#endif
/* DMA descriptor bitfields */
#define MACB_RX_USED_OFFSET 0
@@ -951,75 +976,73 @@ struct macb_tx_skb {
* device stats by a periodic timer.
*/
struct macb_stats {
- u32 rx_pause_frames;
- u32 tx_ok;
- u32 tx_single_cols;
- u32 tx_multiple_cols;
- u32 rx_ok;
- u32 rx_fcs_errors;
- u32 rx_align_errors;
- u32 tx_deferred;
- u32 tx_late_cols;
- u32 tx_excessive_cols;
- u32 tx_underruns;
- u32 tx_carrier_errors;
- u32 rx_resource_errors;
- u32 rx_overruns;
- u32 rx_symbol_errors;
- u32 rx_oversize_pkts;
- u32 rx_jabbers;
- u32 rx_undersize_pkts;
- u32 sqe_test_errors;
- u32 rx_length_mismatch;
- u32 tx_pause_frames;
+ u64 rx_pause_frames;
+ u64 tx_ok;
+ u64 tx_single_cols;
+ u64 tx_multiple_cols;
+ u64 rx_ok;
+ u64 rx_fcs_errors;
+ u64 rx_align_errors;
+ u64 tx_deferred;
+ u64 tx_late_cols;
+ u64 tx_excessive_cols;
+ u64 tx_underruns;
+ u64 tx_carrier_errors;
+ u64 rx_resource_errors;
+ u64 rx_overruns;
+ u64 rx_symbol_errors;
+ u64 rx_oversize_pkts;
+ u64 rx_jabbers;
+ u64 rx_undersize_pkts;
+ u64 sqe_test_errors;
+ u64 rx_length_mismatch;
+ u64 tx_pause_frames;
};
struct gem_stats {
- u32 tx_octets_31_0;
- u32 tx_octets_47_32;
- u32 tx_frames;
- u32 tx_broadcast_frames;
- u32 tx_multicast_frames;
- u32 tx_pause_frames;
- u32 tx_64_byte_frames;
- u32 tx_65_127_byte_frames;
- u32 tx_128_255_byte_frames;
- u32 tx_256_511_byte_frames;
- u32 tx_512_1023_byte_frames;
- u32 tx_1024_1518_byte_frames;
- u32 tx_greater_than_1518_byte_frames;
- u32 tx_underrun;
- u32 tx_single_collision_frames;
- u32 tx_multiple_collision_frames;
- u32 tx_excessive_collisions;
- u32 tx_late_collisions;
- u32 tx_deferred_frames;
- u32 tx_carrier_sense_errors;
- u32 rx_octets_31_0;
- u32 rx_octets_47_32;
- u32 rx_frames;
- u32 rx_broadcast_frames;
- u32 rx_multicast_frames;
- u32 rx_pause_frames;
- u32 rx_64_byte_frames;
- u32 rx_65_127_byte_frames;
- u32 rx_128_255_byte_frames;
- u32 rx_256_511_byte_frames;
- u32 rx_512_1023_byte_frames;
- u32 rx_1024_1518_byte_frames;
- u32 rx_greater_than_1518_byte_frames;
- u32 rx_undersized_frames;
- u32 rx_oversize_frames;
- u32 rx_jabbers;
- u32 rx_frame_check_sequence_errors;
- u32 rx_length_field_frame_errors;
- u32 rx_symbol_errors;
- u32 rx_alignment_errors;
- u32 rx_resource_errors;
- u32 rx_overruns;
- u32 rx_ip_header_checksum_errors;
- u32 rx_tcp_checksum_errors;
- u32 rx_udp_checksum_errors;
+ u64 tx_octets;
+ u64 tx_frames;
+ u64 tx_broadcast_frames;
+ u64 tx_multicast_frames;
+ u64 tx_pause_frames;
+ u64 tx_64_byte_frames;
+ u64 tx_65_127_byte_frames;
+ u64 tx_128_255_byte_frames;
+ u64 tx_256_511_byte_frames;
+ u64 tx_512_1023_byte_frames;
+ u64 tx_1024_1518_byte_frames;
+ u64 tx_greater_than_1518_byte_frames;
+ u64 tx_underrun;
+ u64 tx_single_collision_frames;
+ u64 tx_multiple_collision_frames;
+ u64 tx_excessive_collisions;
+ u64 tx_late_collisions;
+ u64 tx_deferred_frames;
+ u64 tx_carrier_sense_errors;
+ u64 rx_octets;
+ u64 rx_frames;
+ u64 rx_broadcast_frames;
+ u64 rx_multicast_frames;
+ u64 rx_pause_frames;
+ u64 rx_64_byte_frames;
+ u64 rx_65_127_byte_frames;
+ u64 rx_128_255_byte_frames;
+ u64 rx_256_511_byte_frames;
+ u64 rx_512_1023_byte_frames;
+ u64 rx_1024_1518_byte_frames;
+ u64 rx_greater_than_1518_byte_frames;
+ u64 rx_undersized_frames;
+ u64 rx_oversize_frames;
+ u64 rx_jabbers;
+ u64 rx_frame_check_sequence_errors;
+ u64 rx_length_field_frame_errors;
+ u64 rx_symbol_errors;
+ u64 rx_alignment_errors;
+ u64 rx_resource_errors;
+ u64 rx_overruns;
+ u64 rx_ip_header_checksum_errors;
+ u64 rx_tcp_checksum_errors;
+ u64 rx_udp_checksum_errors;
};
/* Describes the name and offset of an individual statistic register, as
@@ -1027,7 +1050,7 @@ struct gem_stats {
* this register should contribute to.
*/
struct gem_statistic {
- char stat_string[ETH_GSTRING_LEN];
+ char stat_string[ETH_GSTRING_LEN] __nonstring;
int offset;
u32 stat_bits;
};
@@ -1216,10 +1239,13 @@ struct macb_queue {
unsigned int IDR;
unsigned int IMR;
unsigned int TBQP;
- unsigned int TBQPH;
unsigned int RBQS;
unsigned int RBQP;
- unsigned int RBQPH;
+
+ /* ENST register offsets for this queue */
+ unsigned int ENST_START_TIME;
+ unsigned int ENST_ON_TIME;
+ unsigned int ENST_OFF_TIME;
/* Lock to protect tx_head and tx_tail */
spinlock_t tx_ptr_lock;
@@ -1268,7 +1294,6 @@ struct macb {
unsigned int tx_ring_size;
unsigned int num_queues;
- unsigned int queue_mask;
struct macb_queue queues[MACB_MAX_QUEUES];
spinlock_t lock;
@@ -1279,6 +1304,8 @@ struct macb {
struct clk *rx_clk;
struct clk *tsu_clk;
struct net_device *dev;
+ /* Protects hw_stats and ethtool_stats */
+ spinlock_t stats_lock;
union {
struct macb_stats macb;
struct gem_stats gem;
@@ -1314,11 +1341,8 @@ struct macb {
struct macb_ptp_info *ptp_info; /* macb-ptp interface */
- struct phy *sgmii_phy; /* for ZynqMP SGMII mode */
+ struct phy *phy;
-#ifdef MACB_EXT_DESC
- uint8_t hw_dma_cap;
-#endif
spinlock_t tsu_clk_lock; /* gem tsu clock locking */
unsigned int tsu_rate;
struct ptp_clock *ptp_clock;
@@ -1397,6 +1421,31 @@ static inline bool gem_has_ptp(struct macb *bp)
return IS_ENABLED(CONFIG_MACB_USE_HWSTAMP) && (bp->caps & MACB_CAPS_GEM_HAS_PTP);
}
+/* ENST Helper functions */
+static inline u64 enst_ns_to_hw_units(size_t ns, u32 speed_mbps)
+{
+ return DIV_ROUND_UP((ns) * (speed_mbps),
+ (ENST_TIME_GRANULARITY_NS * 1000));
+}
+
+static inline u64 enst_max_hw_interval(u32 speed_mbps)
+{
+ return DIV_ROUND_UP(GENMASK(GEM_ON_TIME_SIZE - 1, 0) *
+ ENST_TIME_GRANULARITY_NS * 1000, (speed_mbps));
+}
+
+static inline bool macb_dma64(struct macb *bp)
+{
+ return IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
+ bp->caps & MACB_CAPS_DMA_64B;
+}
+
+static inline bool macb_dma_ptp(struct macb *bp)
+{
+ return IS_ENABLED(CONFIG_MACB_USE_HWSTAMP) &&
+ bp->caps & MACB_CAPS_DMA_PTP;
+}
+
/**
* struct macb_platform_data - platform data for MACB Ethernet used for PCI registration
* @pclk: platform clock
@@ -1407,4 +1456,21 @@ struct macb_platform_data {
struct clk *hclk;
};
+/**
+ * struct macb_queue_enst_config - Configuration for Enhanced Scheduled Traffic
+ * @start_time_mask: Bitmask representing the start time for the queue
+ * @on_time_bytes: "on" time nsec expressed in bytes
+ * @off_time_bytes: "off" time nsec expressed in bytes
+ * @queue_id: Identifier for the queue
+ *
+ * This structure holds the configuration parameters for an ENST queue,
+ * used to control time-based transmission scheduling in the MACB driver.
+ */
+struct macb_queue_enst_config {
+ u32 start_time_mask;
+ u32 on_time_bytes;
+ u32 off_time_bytes;
+ u8 queue_id;
+};
+
#endif /* _MACB_H */
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index daa416fb1724..e461f5072884 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -6,39 +6,37 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/clk.h>
+#include <linux/circ_buf.h>
#include <linux/clk-provider.h>
+#include <linux/clk.h>
#include <linux/crc32.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/circ_buf.h>
-#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/inetdevice.h>
#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#include <linux/phylink.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/ip.h>
-#include <linux/udp.h>
-#include <linux/tcp.h>
-#include <linux/iopoll.h>
#include <linux/phy/phy.h>
+#include <linux/phylink.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/ptp_classify.h>
#include <linux/reset.h>
-#include <linux/firmware/xlnx-zynqmp.h>
-#include <linux/inetdevice.h>
+#include <linux/slab.h>
+#include <linux/tcp.h>
+#include <linux/types.h>
+#include <linux/udp.h>
+#include <net/pkt_sched.h>
#include "macb.h"
/* This structure is only used for MACB on SiFive FU540 devices */
@@ -54,14 +52,10 @@ struct sifive_fu540_macb_mgmt {
#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
#define MIN_RX_RING_SIZE 64
#define MAX_RX_RING_SIZE 8192
-#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
- * (bp)->rx_ring_size)
#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
#define MIN_TX_RING_SIZE 64
#define MAX_TX_RING_SIZE 4096
-#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
- * (bp)->tx_ring_size)
/* level of occupied TX descriptors under which we wake up TX process */
#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
@@ -127,56 +121,26 @@ struct sifive_fu540_macb_mgmt {
*/
static unsigned int macb_dma_desc_get_size(struct macb *bp)
{
-#ifdef MACB_EXT_DESC
- unsigned int desc_size;
+ unsigned int desc_size = sizeof(struct macb_dma_desc);
+
+ if (macb_dma64(bp))
+ desc_size += sizeof(struct macb_dma_desc_64);
+ if (macb_dma_ptp(bp))
+ desc_size += sizeof(struct macb_dma_desc_ptp);
- switch (bp->hw_dma_cap) {
- case HW_DMA_CAP_64B:
- desc_size = sizeof(struct macb_dma_desc)
- + sizeof(struct macb_dma_desc_64);
- break;
- case HW_DMA_CAP_PTP:
- desc_size = sizeof(struct macb_dma_desc)
- + sizeof(struct macb_dma_desc_ptp);
- break;
- case HW_DMA_CAP_64B_PTP:
- desc_size = sizeof(struct macb_dma_desc)
- + sizeof(struct macb_dma_desc_64)
- + sizeof(struct macb_dma_desc_ptp);
- break;
- default:
- desc_size = sizeof(struct macb_dma_desc);
- }
return desc_size;
-#endif
- return sizeof(struct macb_dma_desc);
}
static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx)
{
-#ifdef MACB_EXT_DESC
- switch (bp->hw_dma_cap) {
- case HW_DMA_CAP_64B:
- case HW_DMA_CAP_PTP:
- desc_idx <<= 1;
- break;
- case HW_DMA_CAP_64B_PTP:
- desc_idx *= 3;
- break;
- default:
- break;
- }
-#endif
- return desc_idx;
+ return desc_idx * (1 + macb_dma64(bp) + macb_dma_ptp(bp));
}
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
{
return (struct macb_dma_desc_64 *)((void *)desc
+ sizeof(struct macb_dma_desc));
}
-#endif
/* Ring buffer accessors */
static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
@@ -281,9 +245,9 @@ static void macb_set_hwaddr(struct macb *bp)
u32 bottom;
u16 top;
- bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
+ bottom = get_unaligned_le32(bp->dev->dev_addr);
macb_or_gem_writel(bp, SA1B, bottom);
- top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
+ top = get_unaligned_le16(bp->dev->dev_addr + 4);
macb_or_gem_writel(bp, SA1T, top);
if (gem_has_ptp(bp)) {
@@ -363,7 +327,6 @@ static int macb_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum)
status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
mdio_read_exit:
- pm_runtime_mark_last_busy(&bp->pdev->dev);
pm_runtime_put_autosuspend(&bp->pdev->dev);
mdio_pm_exit:
return status;
@@ -409,7 +372,6 @@ static int macb_mdio_read_c45(struct mii_bus *bus, int mii_id, int devad,
status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
mdio_read_exit:
- pm_runtime_mark_last_busy(&bp->pdev->dev);
pm_runtime_put_autosuspend(&bp->pdev->dev);
mdio_pm_exit:
return status;
@@ -441,7 +403,6 @@ static int macb_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum,
goto mdio_write_exit;
mdio_write_exit:
- pm_runtime_mark_last_busy(&bp->pdev->dev);
pm_runtime_put_autosuspend(&bp->pdev->dev);
mdio_pm_exit:
return status;
@@ -487,7 +448,6 @@ static int macb_mdio_write_c45(struct mii_bus *bus, int mii_id,
goto mdio_write_exit;
mdio_write_exit:
- pm_runtime_mark_last_busy(&bp->pdev->dev);
pm_runtime_put_autosuspend(&bp->pdev->dev);
mdio_pm_exit:
return status;
@@ -498,19 +458,17 @@ static void macb_init_buffers(struct macb *bp)
struct macb_queue *queue;
unsigned int q;
+ /* Single register for all queues' high 32 bits. */
+ if (macb_dma64(bp)) {
+ macb_writel(bp, RBQPH,
+ upper_32_bits(bp->queues[0].rx_ring_dma));
+ macb_writel(bp, TBQPH,
+ upper_32_bits(bp->queues[0].tx_ring_dma));
+ }
+
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, RBQPH,
- upper_32_bits(queue->rx_ring_dma));
-#endif
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, TBQPH,
- upper_32_bits(queue->tx_ring_dma));
-#endif
}
}
@@ -530,19 +488,9 @@ static void macb_set_tx_clk(struct macb *bp, int speed)
if (bp->phy_interface == PHY_INTERFACE_MODE_MII)
return;
- switch (speed) {
- case SPEED_10:
- rate = 2500000;
- break;
- case SPEED_100:
- rate = 25000000;
- break;
- case SPEED_1000:
- rate = 125000000;
- break;
- default:
+ rate = rgmii_clock(speed);
+ if (rate < 0)
return;
- }
rate_rounded = clk_round_rate(bp->tx_clk, rate);
if (rate_rounded < 0)
@@ -578,6 +526,7 @@ static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
}
static void macb_usx_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
@@ -608,7 +557,7 @@ static int macb_usx_pcs_config(struct phylink_pcs *pcs,
return 0;
}
-static void macb_pcs_get_state(struct phylink_pcs *pcs,
+static void macb_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
state->link = 0;
@@ -862,9 +811,7 @@ static int macb_mii_probe(struct net_device *dev)
struct macb *bp = netdev_priv(dev);
bp->phylink_sgmii_pcs.ops = &macb_phylink_pcs_ops;
- bp->phylink_sgmii_pcs.neg_mode = true;
bp->phylink_usx_pcs.ops = &macb_phylink_usx_pcs_ops;
- bp->phylink_usx_pcs.neg_mode = true;
bp->phylink_config.dev = &dev->dev;
bp->phylink_config.type = PHYLINK_NETDEV;
@@ -999,8 +946,8 @@ err_out:
static void macb_update_stats(struct macb *bp)
{
- u32 *p = &bp->hw_stats.macb.rx_pause_frames;
- u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
+ u64 *p = &bp->hw_stats.macb.rx_pause_frames;
+ u64 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
int offset = MACB_PFR;
WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
@@ -1011,22 +958,15 @@ static void macb_update_stats(struct macb *bp)
static int macb_halt_tx(struct macb *bp)
{
- unsigned long halt_time, timeout;
- u32 status;
+ u32 status;
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
- timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
- do {
- halt_time = jiffies;
- status = macb_readl(bp, TSR);
- if (!(status & MACB_BIT(TGO)))
- return 0;
-
- udelay(250);
- } while (time_before(halt_time, timeout));
-
- return -ETIMEDOUT;
+ /* Poll TSR until TGO is cleared or timeout. */
+ return read_poll_timeout_atomic(macb_readl, status,
+ !(status & MACB_BIT(TGO)),
+ 250, MACB_HALT_TIMEOUT, false,
+ bp, TSR);
}
static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budget)
@@ -1049,10 +989,9 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budge
static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
{
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- struct macb_dma_desc_64 *desc_64;
+ if (macb_dma64(bp)) {
+ struct macb_dma_desc_64 *desc_64;
- if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
desc_64 = macb_64b_desc(bp, desc);
desc_64->addrh = upper_32_bits(addr);
/* The low bits of RX address contain the RX_USED bit, clearing
@@ -1061,26 +1000,23 @@ static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_
*/
dma_wmb();
}
-#endif
+
desc->addr = lower_32_bits(addr);
}
static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
{
dma_addr_t addr = 0;
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- struct macb_dma_desc_64 *desc_64;
- if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
+ if (macb_dma64(bp)) {
+ struct macb_dma_desc_64 *desc_64;
+
desc_64 = macb_64b_desc(bp, desc);
addr = ((u64)(desc_64->addrh) << 32);
}
-#endif
addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
-#ifdef CONFIG_MACB_USE_HWSTAMP
- if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
+ if (macb_dma_ptp(bp))
addr &= ~GEM_BIT(DMA_RXVALID);
-#endif
return addr;
}
@@ -1090,15 +1026,18 @@ static void macb_tx_error_task(struct work_struct *work)
tx_error_task);
bool halt_timeout = false;
struct macb *bp = queue->bp;
+ u32 queue_index;
+ u32 packets = 0;
+ u32 bytes = 0;
struct macb_tx_skb *tx_skb;
struct macb_dma_desc *desc;
struct sk_buff *skb;
unsigned int tail;
unsigned long flags;
+ queue_index = queue - bp->queues;
netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
- (unsigned int)(queue - bp->queues),
- queue->tx_tail, queue->tx_head);
+ queue_index, queue->tx_tail, queue->tx_head);
/* Prevent the queue NAPI TX poll from running, as it calls
* macb_tx_complete(), which in turn may call netif_wake_subqueue().
@@ -1151,8 +1090,10 @@ static void macb_tx_error_task(struct work_struct *work)
skb->data);
bp->dev->stats.tx_packets++;
queue->stats.tx_packets++;
+ packets++;
bp->dev->stats.tx_bytes += skb->len;
queue->stats.tx_bytes += skb->len;
+ bytes += skb->len;
}
} else {
/* "Buffers exhausted mid-frame" errors may only happen
@@ -1169,6 +1110,9 @@ static void macb_tx_error_task(struct work_struct *work)
macb_tx_unmap(bp, tx_skb, 0);
}
+ netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index),
+ packets, bytes);
+
/* Set end of TX queue */
desc = macb_tx_desc(queue, 0);
macb_set_addr(bp, desc, 0);
@@ -1179,10 +1123,6 @@ static void macb_tx_error_task(struct work_struct *work)
/* Reinitialize the TX desc queue */
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
-#endif
/* Make TX ring reflect state of hardware */
queue->tx_head = 0;
queue->tx_tail = 0;
@@ -1236,11 +1176,13 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
{
struct macb *bp = queue->bp;
u16 queue_index = queue - bp->queues;
+ unsigned long flags;
unsigned int tail;
unsigned int head;
int packets = 0;
+ u32 bytes = 0;
- spin_lock(&queue->tx_ptr_lock);
+ spin_lock_irqsave(&queue->tx_ptr_lock, flags);
head = queue->tx_head;
for (tail = queue->tx_tail; tail != head && packets < budget; tail++) {
struct macb_tx_skb *tx_skb;
@@ -1280,6 +1222,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
bp->dev->stats.tx_bytes += skb->len;
queue->stats.tx_bytes += skb->len;
packets++;
+ bytes += skb->len;
}
/* Now we can safely release resources */
@@ -1294,12 +1237,15 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
}
}
+ netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index),
+ packets, bytes);
+
queue->tx_tail = tail;
if (__netif_subqueue_stopped(bp->dev, queue_index) &&
CIRC_CNT(queue->tx_head, queue->tx_tail,
bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
netif_wake_subqueue(bp->dev, queue_index);
- spin_unlock(&queue->tx_ptr_lock);
+ spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
return packets;
}
@@ -1350,8 +1296,19 @@ static void gem_rx_refill(struct macb_queue *queue)
dma_wmb();
macb_set_addr(bp, desc, paddr);
- /* properly align Ethernet header */
- skb_reserve(skb, NET_IP_ALIGN);
+ /* Properly align Ethernet header.
+ *
+ * Hardware can add dummy bytes if asked using the RBOF
+ * field inside the NCFGR register. That feature isn't
+ * available if hardware is RSC capable.
+ *
+ * We cannot fallback to doing the 2-byte shift before
+ * DMA mapping because the address field does not allow
+ * setting the low 2/3 bits.
+ * It is 3 bits if HW_DMA_CAP_PTP, else 2 bits.
+ */
+ if (!(bp->caps & MACB_CAPS_RSC))
+ skb_reserve(skb, NET_IP_ALIGN);
} else {
desc->ctrl = 0;
dma_wmb();
@@ -1715,8 +1672,9 @@ static void macb_tx_restart(struct macb_queue *queue)
{
struct macb *bp = queue->bp;
unsigned int head_idx, tbqp;
+ unsigned long flags;
- spin_lock(&queue->tx_ptr_lock);
+ spin_lock_irqsave(&queue->tx_ptr_lock, flags);
if (queue->tx_head == queue->tx_tail)
goto out_tx_ptr_unlock;
@@ -1728,19 +1686,20 @@ static void macb_tx_restart(struct macb_queue *queue)
if (tbqp == head_idx)
goto out_tx_ptr_unlock;
- spin_lock_irq(&bp->lock);
+ spin_lock(&bp->lock);
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
- spin_unlock_irq(&bp->lock);
+ spin_unlock(&bp->lock);
out_tx_ptr_unlock:
- spin_unlock(&queue->tx_ptr_lock);
+ spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
}
static bool macb_tx_complete_pending(struct macb_queue *queue)
{
bool retval = false;
+ unsigned long flags;
- spin_lock(&queue->tx_ptr_lock);
+ spin_lock_irqsave(&queue->tx_ptr_lock, flags);
if (queue->tx_head != queue->tx_tail) {
/* Make hw descriptor updates visible to CPU */
rmb();
@@ -1748,7 +1707,7 @@ static bool macb_tx_complete_pending(struct macb_queue *queue)
if (macb_tx_desc(queue, queue->tx_tail)->ctrl & MACB_BIT(TX_USED))
retval = true;
}
- spin_unlock(&queue->tx_ptr_lock);
+ spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
return retval;
}
@@ -1987,10 +1946,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (status & MACB_BIT(ISR_ROVR)) {
/* We missed at least one packet */
+ spin_lock(&bp->stats_lock);
if (macb_is_gem(bp))
bp->hw_stats.gem.rx_overruns++;
else
bp->hw_stats.macb.rx_overruns++;
+ spin_unlock(&bp->stats_lock);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
@@ -2034,14 +1995,14 @@ static unsigned int macb_tx_map(struct macb *bp,
struct sk_buff *skb,
unsigned int hdrlen)
{
- dma_addr_t mapping;
- unsigned int len, entry, i, tx_head = queue->tx_head;
- struct macb_tx_skb *tx_skb = NULL;
- struct macb_dma_desc *desc;
- unsigned int offset, size, count = 0;
unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
- unsigned int eof = 1, mss_mfs = 0;
+ unsigned int len, i, tx_head = queue->tx_head;
u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
+ unsigned int eof = 1, mss_mfs = 0;
+ struct macb_tx_skb *tx_skb = NULL;
+ struct macb_dma_desc *desc;
+ unsigned int offset, size;
+ dma_addr_t mapping;
/* LSO */
if (skb_shinfo(skb)->gso_size != 0) {
@@ -2061,8 +2022,7 @@ static unsigned int macb_tx_map(struct macb *bp,
offset = 0;
while (len) {
- entry = macb_tx_ring_wrap(bp, tx_head);
- tx_skb = &queue->tx_skb[entry];
+ tx_skb = macb_tx_skb(queue, tx_head);
mapping = dma_map_single(&bp->pdev->dev,
skb->data + offset,
@@ -2078,10 +2038,9 @@ static unsigned int macb_tx_map(struct macb *bp,
len -= size;
offset += size;
- count++;
tx_head++;
- size = min(len, bp->max_tx_length);
+ size = umin(len, bp->max_tx_length);
}
/* Then, map paged data from fragments */
@@ -2091,9 +2050,8 @@ static unsigned int macb_tx_map(struct macb *bp,
len = skb_frag_size(frag);
offset = 0;
while (len) {
- size = min(len, bp->max_tx_length);
- entry = macb_tx_ring_wrap(bp, tx_head);
- tx_skb = &queue->tx_skb[entry];
+ size = umin(len, bp->max_tx_length);
+ tx_skb = macb_tx_skb(queue, tx_head);
mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
offset, size, DMA_TO_DEVICE);
@@ -2108,7 +2066,6 @@ static unsigned int macb_tx_map(struct macb *bp,
len -= size;
offset += size;
- count++;
tx_head++;
}
}
@@ -2130,9 +2087,8 @@ static unsigned int macb_tx_map(struct macb *bp,
* to set the end of TX queue
*/
i = tx_head;
- entry = macb_tx_ring_wrap(bp, i);
ctrl = MACB_BIT(TX_USED);
- desc = macb_tx_desc(queue, entry);
+ desc = macb_tx_desc(queue, i);
desc->ctrl = ctrl;
if (lso_ctrl) {
@@ -2152,16 +2108,15 @@ static unsigned int macb_tx_map(struct macb *bp,
do {
i--;
- entry = macb_tx_ring_wrap(bp, i);
- tx_skb = &queue->tx_skb[entry];
- desc = macb_tx_desc(queue, entry);
+ tx_skb = macb_tx_skb(queue, i);
+ desc = macb_tx_desc(queue, i);
ctrl = (u32)tx_skb->size;
if (eof) {
ctrl |= MACB_BIT(TX_LAST);
eof = 0;
}
- if (unlikely(entry == (bp->tx_ring_size - 1)))
+ if (unlikely(macb_tx_ring_wrap(bp, i) == bp->tx_ring_size - 1))
ctrl |= MACB_BIT(TX_WRAP);
/* First descriptor is header descriptor */
@@ -2189,7 +2144,7 @@ static unsigned int macb_tx_map(struct macb *bp,
queue->tx_head = tx_head;
- return count;
+ return 0;
dma_error:
netdev_err(bp->dev, "TX DMA map failed\n");
@@ -2200,7 +2155,7 @@ dma_error:
macb_tx_unmap(bp, tx_skb, 0);
}
- return 0;
+ return -ENOMEM;
}
static netdev_features_t macb_features_check(struct sk_buff *skb,
@@ -2314,6 +2269,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct macb_queue *queue = &bp->queues[queue_index];
unsigned int desc_cnt, nr_frags, frag_size, f;
unsigned int hdrlen;
+ unsigned long flags;
bool is_lso;
netdev_tx_t ret = NETDEV_TX_OK;
@@ -2327,11 +2283,9 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
return ret;
}
-#ifdef CONFIG_MACB_USE_HWSTAMP
- if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- (bp->hw_dma_cap & HW_DMA_CAP_PTP))
+ if (macb_dma_ptp(bp) &&
+ (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
-#endif
is_lso = (skb_shinfo(skb)->gso_size != 0);
@@ -2348,7 +2302,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
} else
- hdrlen = min(skb_headlen(skb), bp->max_tx_length);
+ hdrlen = umin(skb_headlen(skb), bp->max_tx_length);
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
netdev_vdbg(bp->dev,
@@ -2374,7 +2328,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
}
- spin_lock_bh(&queue->tx_ptr_lock);
+ spin_lock_irqsave(&queue->tx_ptr_lock, flags);
/* This is a hard error, log it. */
if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
@@ -2387,7 +2341,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Map socket buffer for DMA transfer */
- if (!macb_tx_map(bp, queue, skb, hdrlen)) {
+ if (macb_tx_map(bp, queue, skb, hdrlen)) {
dev_kfree_skb_any(skb);
goto unlock;
}
@@ -2395,16 +2349,18 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Make newly initialized descriptor visible to hardware */
wmb();
skb_tx_timestamp(skb);
+ netdev_tx_sent_queue(netdev_get_tx_queue(bp->dev, queue_index),
+ skb->len);
- spin_lock_irq(&bp->lock);
+ spin_lock(&bp->lock);
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
- spin_unlock_irq(&bp->lock);
+ spin_unlock(&bp->lock);
if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
netif_stop_subqueue(dev, queue_index);
unlock:
- spin_unlock_bh(&queue->tx_ptr_lock);
+ spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
return ret;
}
@@ -2474,35 +2430,42 @@ static void macb_free_rx_buffers(struct macb *bp)
}
}
+static unsigned int macb_tx_ring_size_per_queue(struct macb *bp)
+{
+ return macb_dma_desc_get_size(bp) * bp->tx_ring_size + bp->tx_bd_rd_prefetch;
+}
+
+static unsigned int macb_rx_ring_size_per_queue(struct macb *bp)
+{
+ return macb_dma_desc_get_size(bp) * bp->rx_ring_size + bp->rx_bd_rd_prefetch;
+}
+
static void macb_free_consistent(struct macb *bp)
{
+ struct device *dev = &bp->pdev->dev;
struct macb_queue *queue;
unsigned int q;
- int size;
+ size_t size;
if (bp->rx_ring_tieoff) {
- dma_free_coherent(&bp->pdev->dev, macb_dma_desc_get_size(bp),
+ dma_free_coherent(dev, macb_dma_desc_get_size(bp),
bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma);
bp->rx_ring_tieoff = NULL;
}
bp->macbgem_ops.mog_free_rx_buffers(bp);
+ size = bp->num_queues * macb_tx_ring_size_per_queue(bp);
+ dma_free_coherent(dev, size, bp->queues[0].tx_ring, bp->queues[0].tx_ring_dma);
+
+ size = bp->num_queues * macb_rx_ring_size_per_queue(bp);
+ dma_free_coherent(dev, size, bp->queues[0].rx_ring, bp->queues[0].rx_ring_dma);
+
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
kfree(queue->tx_skb);
queue->tx_skb = NULL;
- if (queue->tx_ring) {
- size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
- dma_free_coherent(&bp->pdev->dev, size,
- queue->tx_ring, queue->tx_ring_dma);
- queue->tx_ring = NULL;
- }
- if (queue->rx_ring) {
- size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
- dma_free_coherent(&bp->pdev->dev, size,
- queue->rx_ring, queue->rx_ring_dma);
- queue->rx_ring = NULL;
- }
+ queue->tx_ring = NULL;
+ queue->rx_ring = NULL;
}
}
@@ -2544,35 +2507,45 @@ static int macb_alloc_rx_buffers(struct macb *bp)
static int macb_alloc_consistent(struct macb *bp)
{
+ struct device *dev = &bp->pdev->dev;
+ dma_addr_t tx_dma, rx_dma;
struct macb_queue *queue;
unsigned int q;
- int size;
+ void *tx, *rx;
+ size_t size;
+
+ /*
+ * Upper 32-bits of Tx/Rx DMA descriptor for each queues much match!
+ * We cannot enforce this guarantee, the best we can do is do a single
+ * allocation and hope it will land into alloc_pages() that guarantees
+ * natural alignment of physical addresses.
+ */
+
+ size = bp->num_queues * macb_tx_ring_size_per_queue(bp);
+ tx = dma_alloc_coherent(dev, size, &tx_dma, GFP_KERNEL);
+ if (!tx || upper_32_bits(tx_dma) != upper_32_bits(tx_dma + size - 1))
+ goto out_err;
+ netdev_dbg(bp->dev, "Allocated %zu bytes for %u TX rings at %08lx (mapped %p)\n",
+ size, bp->num_queues, (unsigned long)tx_dma, tx);
+
+ size = bp->num_queues * macb_rx_ring_size_per_queue(bp);
+ rx = dma_alloc_coherent(dev, size, &rx_dma, GFP_KERNEL);
+ if (!rx || upper_32_bits(rx_dma) != upper_32_bits(rx_dma + size - 1))
+ goto out_err;
+ netdev_dbg(bp->dev, "Allocated %zu bytes for %u RX rings at %08lx (mapped %p)\n",
+ size, bp->num_queues, (unsigned long)rx_dma, rx);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
- queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
- &queue->tx_ring_dma,
- GFP_KERNEL);
- if (!queue->tx_ring)
- goto out_err;
- netdev_dbg(bp->dev,
- "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
- q, size, (unsigned long)queue->tx_ring_dma,
- queue->tx_ring);
+ queue->tx_ring = tx + macb_tx_ring_size_per_queue(bp) * q;
+ queue->tx_ring_dma = tx_dma + macb_tx_ring_size_per_queue(bp) * q;
+
+ queue->rx_ring = rx + macb_rx_ring_size_per_queue(bp) * q;
+ queue->rx_ring_dma = rx_dma + macb_rx_ring_size_per_queue(bp) * q;
size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
queue->tx_skb = kmalloc(size, GFP_KERNEL);
if (!queue->tx_skb)
goto out_err;
-
- size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
- queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
- &queue->rx_ring_dma, GFP_KERNEL);
- if (!queue->rx_ring)
- goto out_err;
- netdev_dbg(bp->dev,
- "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
}
if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
goto out_err;
@@ -2789,14 +2762,10 @@ static void macb_configure_dma(struct macb *bp)
dmacfg &= ~GEM_BIT(TXCOEN);
dmacfg &= ~GEM_BIT(ADDR64);
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ if (macb_dma64(bp))
dmacfg |= GEM_BIT(ADDR64);
-#endif
-#ifdef CONFIG_MACB_USE_HWSTAMP
- if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
+ if (macb_dma_ptp(bp))
dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT);
-#endif
netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
dmacfg);
gem_writel(bp, DMACFG, dmacfg);
@@ -2811,7 +2780,11 @@ static void macb_init_hw(struct macb *bp)
macb_set_hwaddr(bp);
config = macb_mdc_clk_div(bp);
- config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
+ /* Make eth data aligned.
+ * If RSC capable, that offset is ignored by HW.
+ */
+ if (!(bp->caps & MACB_CAPS_RSC))
+ config |= MACB_BF(RBOF, NET_IP_ALIGN);
config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
if (bp->caps & MACB_CAPS_JUMBO)
config |= MACB_BIT(JFRAME); /* Enable jumbo frames */
@@ -2988,7 +2961,11 @@ static int macb_open(struct net_device *dev)
macb_init_hw(bp);
- err = phy_power_on(bp->sgmii_phy);
+ err = phy_set_mode_ext(bp->phy, PHY_MODE_ETHERNET, bp->phy_interface);
+ if (err)
+ goto reset_hw;
+
+ err = phy_power_on(bp->phy);
if (err)
goto reset_hw;
@@ -3004,7 +2981,7 @@ static int macb_open(struct net_device *dev)
return 0;
phy_off:
- phy_power_off(bp->sgmii_phy);
+ phy_power_off(bp->phy);
reset_hw:
macb_reset_hw(bp);
@@ -3030,12 +3007,13 @@ static int macb_close(struct net_device *dev)
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
napi_disable(&queue->napi_rx);
napi_disable(&queue->napi_tx);
+ netdev_tx_reset_queue(netdev_get_tx_queue(dev, q));
}
phylink_stop(bp->phylink);
phylink_disconnect_phy(bp->phylink);
- phy_power_off(bp->sgmii_phy);
+ phy_power_off(bp->phy);
spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp);
@@ -3080,7 +3058,7 @@ static void gem_update_stats(struct macb *bp)
unsigned int i, q, idx;
unsigned long *stat;
- u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
+ u64 *p = &bp->hw_stats.gem.tx_octets;
for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
u32 offset = gem_statistics[i].offset;
@@ -3093,7 +3071,7 @@ static void gem_update_stats(struct macb *bp)
/* Add GEM_OCTTXH, GEM_OCTRXH */
val = bp->macb_reg_readl(bp, offset + 4);
bp->ethtool_stats[i] += ((u64)val) << 32;
- *(++p) += val;
+ *p += ((u64)val) << 32;
}
}
@@ -3103,15 +3081,13 @@ static void gem_update_stats(struct macb *bp)
bp->ethtool_stats[idx++] = *stat;
}
-static struct net_device_stats *gem_get_stats(struct macb *bp)
+static void gem_get_stats(struct macb *bp, struct rtnl_link_stats64 *nstat)
{
struct gem_stats *hwstat = &bp->hw_stats.gem;
- struct net_device_stats *nstat = &bp->dev->stats;
-
- if (!netif_running(bp->dev))
- return nstat;
- gem_update_stats(bp);
+ spin_lock_irq(&bp->stats_lock);
+ if (netif_running(bp->dev))
+ gem_update_stats(bp);
nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
hwstat->rx_alignment_errors +
@@ -3140,19 +3116,19 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
nstat->tx_fifo_errors = hwstat->tx_underrun;
-
- return nstat;
+ spin_unlock_irq(&bp->stats_lock);
}
static void gem_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
- struct macb *bp;
+ struct macb *bp = netdev_priv(dev);
- bp = netdev_priv(dev);
+ spin_lock_irq(&bp->stats_lock);
gem_update_stats(bp);
memcpy(data, &bp->ethtool_stats, sizeof(u64)
* (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
+ spin_unlock_irq(&bp->stats_lock);
}
static int gem_get_sset_count(struct net_device *dev, int sset)
@@ -3192,16 +3168,20 @@ static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
}
}
-static struct net_device_stats *macb_get_stats(struct net_device *dev)
+static void macb_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *nstat)
{
struct macb *bp = netdev_priv(dev);
- struct net_device_stats *nstat = &bp->dev->stats;
struct macb_stats *hwstat = &bp->hw_stats.macb;
- if (macb_is_gem(bp))
- return gem_get_stats(bp);
+ netdev_stats_to_stats64(nstat, &bp->dev->stats);
+ if (macb_is_gem(bp)) {
+ gem_get_stats(bp, nstat);
+ return;
+ }
/* read stats from hardware */
+ spin_lock_irq(&bp->stats_lock);
macb_update_stats(bp);
/* Convert HW stats into netdevice stats */
@@ -3235,8 +3215,171 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
nstat->tx_fifo_errors = hwstat->tx_underruns;
/* Don't know about heartbeat or window errors... */
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void macb_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+ pause_stats->tx_pause_frames = hwstat->tx_pause_frames;
+ pause_stats->rx_pause_frames = hwstat->rx_pause_frames;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void gem_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
+
+ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ pause_stats->tx_pause_frames = hwstat->tx_pause_frames;
+ pause_stats->rx_pause_frames = hwstat->rx_pause_frames;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void macb_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+ mac_stats->FramesTransmittedOK = hwstat->tx_ok;
+ mac_stats->SingleCollisionFrames = hwstat->tx_single_cols;
+ mac_stats->MultipleCollisionFrames = hwstat->tx_multiple_cols;
+ mac_stats->FramesReceivedOK = hwstat->rx_ok;
+ mac_stats->FrameCheckSequenceErrors = hwstat->rx_fcs_errors;
+ mac_stats->AlignmentErrors = hwstat->rx_align_errors;
+ mac_stats->FramesWithDeferredXmissions = hwstat->tx_deferred;
+ mac_stats->LateCollisions = hwstat->tx_late_cols;
+ mac_stats->FramesAbortedDueToXSColls = hwstat->tx_excessive_cols;
+ mac_stats->FramesLostDueToIntMACXmitError = hwstat->tx_underruns;
+ mac_stats->CarrierSenseErrors = hwstat->tx_carrier_errors;
+ mac_stats->FramesLostDueToIntMACRcvError = hwstat->rx_overruns;
+ mac_stats->InRangeLengthErrors = hwstat->rx_length_mismatch;
+ mac_stats->FrameTooLongErrors = hwstat->rx_oversize_pkts;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void gem_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
- return nstat;
+ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ mac_stats->FramesTransmittedOK = hwstat->tx_frames;
+ mac_stats->SingleCollisionFrames = hwstat->tx_single_collision_frames;
+ mac_stats->MultipleCollisionFrames =
+ hwstat->tx_multiple_collision_frames;
+ mac_stats->FramesReceivedOK = hwstat->rx_frames;
+ mac_stats->FrameCheckSequenceErrors =
+ hwstat->rx_frame_check_sequence_errors;
+ mac_stats->AlignmentErrors = hwstat->rx_alignment_errors;
+ mac_stats->OctetsTransmittedOK = hwstat->tx_octets;
+ mac_stats->FramesWithDeferredXmissions = hwstat->tx_deferred_frames;
+ mac_stats->LateCollisions = hwstat->tx_late_collisions;
+ mac_stats->FramesAbortedDueToXSColls = hwstat->tx_excessive_collisions;
+ mac_stats->FramesLostDueToIntMACXmitError = hwstat->tx_underrun;
+ mac_stats->CarrierSenseErrors = hwstat->tx_carrier_sense_errors;
+ mac_stats->OctetsReceivedOK = hwstat->rx_octets;
+ mac_stats->MulticastFramesXmittedOK = hwstat->tx_multicast_frames;
+ mac_stats->BroadcastFramesXmittedOK = hwstat->tx_broadcast_frames;
+ mac_stats->MulticastFramesReceivedOK = hwstat->rx_multicast_frames;
+ mac_stats->BroadcastFramesReceivedOK = hwstat->rx_broadcast_frames;
+ mac_stats->InRangeLengthErrors = hwstat->rx_length_field_frame_errors;
+ mac_stats->FrameTooLongErrors = hwstat->rx_oversize_frames;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+/* TODO: Report SQE test errors when added to phy_stats */
+static void macb_get_eth_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+ phy_stats->SymbolErrorDuringCarrier = hwstat->rx_symbol_errors;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void gem_get_eth_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
+
+ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ phy_stats->SymbolErrorDuringCarrier = hwstat->rx_symbol_errors;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void macb_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+ rmon_stats->undersize_pkts = hwstat->rx_undersize_pkts;
+ rmon_stats->oversize_pkts = hwstat->rx_oversize_pkts;
+ rmon_stats->jabbers = hwstat->rx_jabbers;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static const struct ethtool_rmon_hist_range gem_rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 16384 },
+ { },
+};
+
+static void gem_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
+
+ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ rmon_stats->undersize_pkts = hwstat->rx_undersized_frames;
+ rmon_stats->oversize_pkts = hwstat->rx_oversize_frames;
+ rmon_stats->jabbers = hwstat->rx_jabbers;
+ rmon_stats->hist[0] = hwstat->rx_64_byte_frames;
+ rmon_stats->hist[1] = hwstat->rx_65_127_byte_frames;
+ rmon_stats->hist[2] = hwstat->rx_128_255_byte_frames;
+ rmon_stats->hist[3] = hwstat->rx_256_511_byte_frames;
+ rmon_stats->hist[4] = hwstat->rx_512_1023_byte_frames;
+ rmon_stats->hist[5] = hwstat->rx_1024_1518_byte_frames;
+ rmon_stats->hist[6] = hwstat->rx_greater_than_1518_byte_frames;
+ rmon_stats->hist_tx[0] = hwstat->tx_64_byte_frames;
+ rmon_stats->hist_tx[1] = hwstat->tx_65_127_byte_frames;
+ rmon_stats->hist_tx[2] = hwstat->tx_128_255_byte_frames;
+ rmon_stats->hist_tx[3] = hwstat->tx_256_511_byte_frames;
+ rmon_stats->hist_tx[4] = hwstat->tx_512_1023_byte_frames;
+ rmon_stats->hist_tx[5] = hwstat->tx_1024_1518_byte_frames;
+ rmon_stats->hist_tx[6] = hwstat->tx_greater_than_1518_byte_frames;
+ spin_unlock_irq(&bp->stats_lock);
+ *ranges = gem_rmon_ranges;
}
static int macb_get_regs_len(struct net_device *netdev)
@@ -3406,7 +3549,7 @@ static int gem_get_ts_info(struct net_device *dev,
{
struct macb *bp = netdev_priv(dev);
- if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) {
+ if (!macb_dma_ptp(bp)) {
ethtool_op_get_ts_info(dev, info);
return 0;
}
@@ -3765,6 +3908,10 @@ static const struct ethtool_ops macb_ethtool_ops = {
.get_regs = macb_get_regs,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_pause_stats = macb_get_pause_stats,
+ .get_eth_mac_stats = macb_get_eth_mac_stats,
+ .get_eth_phy_stats = macb_get_eth_phy_stats,
+ .get_rmon_stats = macb_get_rmon_stats,
.get_wol = macb_get_wol,
.set_wol = macb_set_wol,
.get_link_ksettings = macb_get_link_ksettings,
@@ -3783,6 +3930,10 @@ static const struct ethtool_ops gem_ethtool_ops = {
.get_ethtool_stats = gem_get_ethtool_stats,
.get_strings = gem_get_ethtool_strings,
.get_sset_count = gem_get_sset_count,
+ .get_pause_stats = gem_get_pause_stats,
+ .get_eth_mac_stats = gem_get_eth_mac_stats,
+ .get_eth_phy_stats = gem_get_eth_phy_stats,
+ .get_rmon_stats = gem_get_rmon_stats,
.get_link_ksettings = macb_get_link_ksettings,
.set_link_ksettings = macb_set_link_ksettings,
.get_ringparam = macb_get_ringparam,
@@ -3914,12 +4065,234 @@ static void macb_restore_features(struct macb *bp)
macb_set_rxflow_feature(bp, features);
}
+static int macb_taprio_setup_replace(struct net_device *ndev,
+ struct tc_taprio_qopt_offload *conf)
+{
+ u64 total_on_time = 0, start_time_sec = 0, start_time = conf->base_time;
+ u32 configured_queues = 0, speed = 0, start_time_nsec;
+ struct macb_queue_enst_config *enst_queue;
+ struct tc_taprio_sched_entry *entry;
+ struct macb *bp = netdev_priv(ndev);
+ struct ethtool_link_ksettings kset;
+ struct macb_queue *queue;
+ u32 queue_mask;
+ u8 queue_id;
+ size_t i;
+ int err;
+
+ if (conf->num_entries > bp->num_queues) {
+ netdev_err(ndev, "Too many TAPRIO entries: %zu > %d queues\n",
+ conf->num_entries, bp->num_queues);
+ return -EINVAL;
+ }
+
+ if (conf->base_time < 0) {
+ netdev_err(ndev, "Invalid base_time: must be 0 or positive, got %lld\n",
+ conf->base_time);
+ return -ERANGE;
+ }
+
+ /* Get the current link speed */
+ err = phylink_ethtool_ksettings_get(bp->phylink, &kset);
+ if (unlikely(err)) {
+ netdev_err(ndev, "Failed to get link settings: %d\n", err);
+ return err;
+ }
+
+ speed = kset.base.speed;
+ if (unlikely(speed <= 0)) {
+ netdev_err(ndev, "Invalid speed: %d\n", speed);
+ return -EINVAL;
+ }
+
+ enst_queue = kcalloc(conf->num_entries, sizeof(*enst_queue), GFP_KERNEL);
+ if (unlikely(!enst_queue))
+ return -ENOMEM;
+
+ /* Pre-validate all entries before making any hardware changes */
+ for (i = 0; i < conf->num_entries; i++) {
+ entry = &conf->entries[i];
+
+ if (entry->command != TC_TAPRIO_CMD_SET_GATES) {
+ netdev_err(ndev, "Entry %zu: unsupported command %d\n",
+ i, entry->command);
+ err = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ /* Validate gate_mask: must be nonzero, single queue, and within range */
+ if (!is_power_of_2(entry->gate_mask)) {
+ netdev_err(ndev, "Entry %zu: gate_mask 0x%x is not a power of 2 (only one queue per entry allowed)\n",
+ i, entry->gate_mask);
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* gate_mask must not select queues outside the valid queues */
+ queue_id = order_base_2(entry->gate_mask);
+ if (queue_id >= bp->num_queues) {
+ netdev_err(ndev, "Entry %zu: gate_mask 0x%x exceeds queue range (max_queues=%d)\n",
+ i, entry->gate_mask, bp->num_queues);
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* Check for start time limits */
+ start_time_sec = start_time;
+ start_time_nsec = do_div(start_time_sec, NSEC_PER_SEC);
+ if (start_time_sec > GENMASK(GEM_START_TIME_SEC_SIZE - 1, 0)) {
+ netdev_err(ndev, "Entry %zu: Start time %llu s exceeds hardware limit\n",
+ i, start_time_sec);
+ err = -ERANGE;
+ goto cleanup;
+ }
+
+ /* Check for on time limit */
+ if (entry->interval > enst_max_hw_interval(speed)) {
+ netdev_err(ndev, "Entry %zu: interval %u ns exceeds hardware limit %llu ns\n",
+ i, entry->interval, enst_max_hw_interval(speed));
+ err = -ERANGE;
+ goto cleanup;
+ }
+
+ /* Check for off time limit*/
+ if ((conf->cycle_time - entry->interval) > enst_max_hw_interval(speed)) {
+ netdev_err(ndev, "Entry %zu: off_time %llu ns exceeds hardware limit %llu ns\n",
+ i, conf->cycle_time - entry->interval,
+ enst_max_hw_interval(speed));
+ err = -ERANGE;
+ goto cleanup;
+ }
+
+ enst_queue[i].queue_id = queue_id;
+ enst_queue[i].start_time_mask =
+ (start_time_sec << GEM_START_TIME_SEC_OFFSET) |
+ start_time_nsec;
+ enst_queue[i].on_time_bytes =
+ enst_ns_to_hw_units(entry->interval, speed);
+ enst_queue[i].off_time_bytes =
+ enst_ns_to_hw_units(conf->cycle_time - entry->interval, speed);
+
+ configured_queues |= entry->gate_mask;
+ total_on_time += entry->interval;
+ start_time += entry->interval;
+ }
+
+ /* Check total interval doesn't exceed cycle time */
+ if (total_on_time > conf->cycle_time) {
+ netdev_err(ndev, "Total ON %llu ns exceeds cycle time %llu ns\n",
+ total_on_time, conf->cycle_time);
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ netdev_dbg(ndev, "TAPRIO setup: %zu entries, base_time=%lld ns, cycle_time=%llu ns\n",
+ conf->num_entries, conf->base_time, conf->cycle_time);
+
+ /* All validations passed - proceed with hardware configuration */
+ scoped_guard(spinlock_irqsave, &bp->lock) {
+ /* Disable ENST queues if running before configuring */
+ queue_mask = BIT_U32(bp->num_queues) - 1;
+ gem_writel(bp, ENST_CONTROL,
+ queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET);
+
+ for (i = 0; i < conf->num_entries; i++) {
+ queue = &bp->queues[enst_queue[i].queue_id];
+ /* Configure queue timing registers */
+ queue_writel(queue, ENST_START_TIME,
+ enst_queue[i].start_time_mask);
+ queue_writel(queue, ENST_ON_TIME,
+ enst_queue[i].on_time_bytes);
+ queue_writel(queue, ENST_OFF_TIME,
+ enst_queue[i].off_time_bytes);
+ }
+
+ /* Enable ENST for all configured queues in one write */
+ gem_writel(bp, ENST_CONTROL, configured_queues);
+ }
+
+ netdev_info(ndev, "TAPRIO configuration completed successfully: %zu entries, %d queues configured\n",
+ conf->num_entries, hweight32(configured_queues));
+
+cleanup:
+ kfree(enst_queue);
+ return err;
+}
+
+static void macb_taprio_destroy(struct net_device *ndev)
+{
+ struct macb *bp = netdev_priv(ndev);
+ struct macb_queue *queue;
+ u32 queue_mask;
+ unsigned int q;
+
+ netdev_reset_tc(ndev);
+ queue_mask = BIT_U32(bp->num_queues) - 1;
+
+ scoped_guard(spinlock_irqsave, &bp->lock) {
+ /* Single disable command for all queues */
+ gem_writel(bp, ENST_CONTROL,
+ queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET);
+
+ /* Clear all queue ENST registers in batch */
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, ENST_START_TIME, 0);
+ queue_writel(queue, ENST_ON_TIME, 0);
+ queue_writel(queue, ENST_OFF_TIME, 0);
+ }
+ }
+ netdev_info(ndev, "TAPRIO destroy: All gates disabled\n");
+}
+
+static int macb_setup_taprio(struct net_device *ndev,
+ struct tc_taprio_qopt_offload *taprio)
+{
+ struct macb *bp = netdev_priv(ndev);
+ int err = 0;
+
+ if (unlikely(!(ndev->hw_features & NETIF_F_HW_TC)))
+ return -EOPNOTSUPP;
+
+ /* Check if Device is in runtime suspend */
+ if (unlikely(pm_runtime_suspended(&bp->pdev->dev))) {
+ netdev_err(ndev, "Device is in runtime suspend\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (taprio->cmd) {
+ case TAPRIO_CMD_REPLACE:
+ err = macb_taprio_setup_replace(ndev, taprio);
+ break;
+ case TAPRIO_CMD_DESTROY:
+ macb_taprio_destroy(ndev);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int macb_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ if (!dev || !type_data)
+ return -EINVAL;
+
+ switch (type) {
+ case TC_SETUP_QDISC_TAPRIO:
+ return macb_setup_taprio(dev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops macb_netdev_ops = {
.ndo_open = macb_open,
.ndo_stop = macb_close,
.ndo_start_xmit = macb_start_xmit,
.ndo_set_rx_mode = macb_set_rx_mode,
- .ndo_get_stats = macb_get_stats,
+ .ndo_get_stats64 = macb_get_stats,
.ndo_eth_ioctl = macb_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = macb_change_mtu,
@@ -3931,6 +4304,7 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_features_check = macb_features_check,
.ndo_hwtstamp_set = macb_hwtstamp_set,
.ndo_hwtstamp_get = macb_hwtstamp_get,
+ .ndo_setup_tc = macb_setup_tc,
};
/* Configure peripheral capabilities according to device tree
@@ -3939,8 +4313,12 @@ static const struct net_device_ops macb_netdev_ops = {
static void macb_configure_caps(struct macb *bp,
const struct macb_config *dt_conf)
{
+ struct device_node *np = bp->pdev->dev.of_node;
+ bool refclk_ext;
u32 dcfg;
+ refclk_ext = of_property_read_bool(np, "cdns,refclk-ext");
+
if (dt_conf)
bp->caps = dt_conf->caps;
@@ -3958,42 +4336,46 @@ static void macb_configure_caps(struct macb *bp,
dcfg = gem_readl(bp, DCFG2);
if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
bp->caps |= MACB_CAPS_FIFO_MODE;
+ if (GEM_BFEXT(PBUF_RSC, gem_readl(bp, DCFG6)))
+ bp->caps |= MACB_CAPS_RSC;
if (gem_has_ptp(bp)) {
if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
dev_err(&bp->pdev->dev,
"GEM doesn't support hardware ptp.\n");
else {
#ifdef CONFIG_MACB_USE_HWSTAMP
- bp->hw_dma_cap |= HW_DMA_CAP_PTP;
+ bp->caps |= MACB_CAPS_DMA_PTP;
bp->ptp_info = &gem_ptp_info;
#endif
}
}
}
+ if (refclk_ext)
+ bp->caps |= MACB_CAPS_USRIO_HAS_CLKEN;
+
dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
}
-static void macb_probe_queues(void __iomem *mem,
- bool native_io,
- unsigned int *queue_mask,
- unsigned int *num_queues)
+static int macb_probe_queues(struct device *dev, void __iomem *mem, bool native_io)
{
- *queue_mask = 0x1;
- *num_queues = 1;
+ /* BIT(0) is never set but queue 0 always exists. */
+ unsigned int queue_mask = 0x1;
- /* is it macb or gem ?
- *
- * We need to read directly from the hardware here because
- * we are early in the probe process and don't have the
- * MACB_CAPS_MACB_IS_GEM flag positioned
- */
- if (!hw_is_gem(mem, native_io))
- return;
+ /* Use hw_is_gem() as MACB_CAPS_MACB_IS_GEM is not yet positioned. */
+ if (hw_is_gem(mem, native_io)) {
+ if (native_io)
+ queue_mask |= __raw_readl(mem + GEM_DCFG6) & 0xFF;
+ else
+ queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xFF;
+
+ if (fls(queue_mask) != ffz(queue_mask)) {
+ dev_err(dev, "queue mask %#x has a hole\n", queue_mask);
+ return -EINVAL;
+ }
+ }
- /* bit 0 is never set but queue 0 always exists */
- *queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xff;
- *num_queues = hweight32(*queue_mask);
+ return hweight32(queue_mask);
}
static void macb_clks_disable(struct clk *pclk, struct clk *hclk, struct clk *tx_clk,
@@ -4111,10 +4493,7 @@ static int macb_init(struct platform_device *pdev)
* register mapping but we don't want to test the queue index then
* compute the corresponding register offset at run time.
*/
- for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
- if (!(bp->queue_mask & (1 << hw_q)))
- continue;
-
+ for (hw_q = 0, q = 0; hw_q < bp->num_queues; ++hw_q) {
queue = &bp->queues[q];
queue->bp = bp;
spin_lock_init(&queue->tx_ptr_lock);
@@ -4128,12 +4507,6 @@ static int macb_init(struct platform_device *pdev)
queue->TBQP = GEM_TBQP(hw_q - 1);
queue->RBQP = GEM_RBQP(hw_q - 1);
queue->RBQS = GEM_RBQS(hw_q - 1);
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
- queue->TBQPH = GEM_TBQPH(hw_q - 1);
- queue->RBQPH = GEM_RBQPH(hw_q - 1);
- }
-#endif
} else {
/* queue0 uses legacy registers */
queue->ISR = MACB_ISR;
@@ -4142,14 +4515,12 @@ static int macb_init(struct platform_device *pdev)
queue->IMR = MACB_IMR;
queue->TBQP = MACB_TBQP;
queue->RBQP = MACB_RBQP;
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
- queue->TBQPH = MACB_TBQPH;
- queue->RBQPH = MACB_RBQPH;
- }
-#endif
}
+ queue->ENST_START_TIME = GEM_ENST_START_TIME(hw_q);
+ queue->ENST_ON_TIME = GEM_ENST_ON_TIME(hw_q);
+ queue->ENST_OFF_TIME = GEM_ENST_OFF_TIME(hw_q);
+
/* get irq: here we use the linux queue index, not the hardware
* queue index. the queue irq definitions in the device tree
* must remove the optional gaps that could exist in the
@@ -4193,8 +4564,11 @@ static int macb_init(struct platform_device *pdev)
/* Set features */
dev->hw_features = NETIF_F_SG;
- /* Check LSO capability */
- if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
+ /* Check LSO capability; runtime detection can be overridden by a cap
+ * flag if the hardware is known to be buggy
+ */
+ if (!(bp->caps & MACB_CAPS_NO_LSO) &&
+ GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
dev->hw_features |= MACB_NETIF_LSO;
/* Checksum offload is only available on gem with packet buffer */
@@ -4202,6 +4576,10 @@ static int macb_init(struct platform_device *pdev)
dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
if (bp->caps & MACB_CAPS_SG_DISABLED)
dev->hw_features &= ~NETIF_F_SG;
+ /* Enable HW_TC if hardware supports QBV */
+ if (bp->caps & MACB_CAPS_QBV)
+ dev->hw_features |= NETIF_F_HW_TC;
+
dev->features = dev->hw_features;
/* Check RX Flow Filters support.
@@ -4209,8 +4587,8 @@ static int macb_init(struct platform_device *pdev)
* each 4-tuple define requires 1 T2 screener reg + 3 compare regs
*/
reg = gem_readl(bp, DCFG8);
- bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
- GEM_BFEXT(T2SCR, reg));
+ bp->max_tuples = umin((GEM_BFEXT(SCR2CMP, reg) / 3),
+ GEM_BFEXT(T2SCR, reg));
INIT_LIST_HEAD(&bp->rx_fs_list.list);
if (bp->max_tuples > 0) {
/* also needs one ethtype match to check IPv4 */
@@ -4580,7 +4958,7 @@ static const struct net_device_ops at91ether_netdev_ops = {
.ndo_open = at91ether_open,
.ndo_stop = at91ether_close,
.ndo_start_xmit = at91ether_start_xmit,
- .ndo_get_stats = macb_get_stats,
+ .ndo_get_stats64 = macb_get_stats,
.ndo_set_rx_mode = macb_set_rx_mode,
.ndo_set_mac_address = eth_mac_addr,
.ndo_eth_ioctl = macb_ioctl,
@@ -4645,36 +5023,45 @@ static unsigned long fu540_macb_tx_recalc_rate(struct clk_hw *hw,
return mgmt->rate;
}
-static long fu540_macb_tx_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- if (WARN_ON(rate < 2500000))
- return 2500000;
- else if (rate == 2500000)
- return 2500000;
- else if (WARN_ON(rate < 13750000))
- return 2500000;
- else if (WARN_ON(rate < 25000000))
- return 25000000;
- else if (rate == 25000000)
- return 25000000;
- else if (WARN_ON(rate < 75000000))
- return 25000000;
- else if (WARN_ON(rate < 125000000))
- return 125000000;
- else if (rate == 125000000)
- return 125000000;
-
- WARN_ON(rate > 125000000);
+static int fu540_macb_tx_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ if (WARN_ON(req->rate < 2500000))
+ req->rate = 2500000;
+ else if (req->rate == 2500000)
+ req->rate = 2500000;
+ else if (WARN_ON(req->rate < 13750000))
+ req->rate = 2500000;
+ else if (WARN_ON(req->rate < 25000000))
+ req->rate = 25000000;
+ else if (req->rate == 25000000)
+ req->rate = 25000000;
+ else if (WARN_ON(req->rate < 75000000))
+ req->rate = 25000000;
+ else if (WARN_ON(req->rate < 125000000))
+ req->rate = 125000000;
+ else if (req->rate == 125000000)
+ req->rate = 125000000;
+ else if (WARN_ON(req->rate > 125000000))
+ req->rate = 125000000;
+ else
+ req->rate = 125000000;
- return 125000000;
+ return 0;
}
static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- rate = fu540_macb_tx_round_rate(hw, rate, &parent_rate);
- if (rate != 125000000)
+ struct clk_rate_request req;
+ int ret;
+
+ clk_hw_init_rate_request(hw, &req, rate);
+ ret = fu540_macb_tx_determine_rate(hw, &req);
+ if (ret != 0)
+ return ret;
+
+ if (req.rate != 125000000)
iowrite32(1, mgmt->reg);
else
iowrite32(0, mgmt->reg);
@@ -4685,7 +5072,7 @@ static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops fu540_c000_ops = {
.recalc_rate = fu540_macb_tx_recalc_rate,
- .round_rate = fu540_macb_tx_round_rate,
+ .determine_rate = fu540_macb_tx_determine_rate,
.set_rate = fu540_macb_tx_set_rate,
};
@@ -4754,13 +5141,13 @@ static int init_reset_optional(struct platform_device *pdev)
if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
/* Ensure PHY device used in SGMII mode is ready */
- bp->sgmii_phy = devm_phy_optional_get(&pdev->dev, NULL);
+ bp->phy = devm_phy_optional_get(&pdev->dev, NULL);
- if (IS_ERR(bp->sgmii_phy))
- return dev_err_probe(&pdev->dev, PTR_ERR(bp->sgmii_phy),
+ if (IS_ERR(bp->phy))
+ return dev_err_probe(&pdev->dev, PTR_ERR(bp->phy),
"failed to get SGMII PHY\n");
- ret = phy_init(bp->sgmii_phy);
+ ret = phy_init(bp->phy);
if (ret)
return dev_err_probe(&pdev->dev, ret,
"failed to init SGMII PHY\n");
@@ -4789,7 +5176,7 @@ static int init_reset_optional(struct platform_device *pdev)
/* Fully reset controller at hardware level if mapped in device tree */
ret = device_reset_optional(&pdev->dev);
if (ret) {
- phy_exit(bp->sgmii_phy);
+ phy_exit(bp->phy);
return dev_err_probe(&pdev->dev, ret, "failed to reset controller");
}
@@ -4797,8 +5184,30 @@ static int init_reset_optional(struct platform_device *pdev)
err_out_phy_exit:
if (ret)
- phy_exit(bp->sgmii_phy);
+ phy_exit(bp->phy);
+
+ return ret;
+}
+
+static int eyeq5_init(struct platform_device *pdev)
+{
+ struct net_device *netdev = platform_get_drvdata(pdev);
+ struct macb *bp = netdev_priv(netdev);
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ bp->phy = devm_phy_get(dev, NULL);
+ if (IS_ERR(bp->phy))
+ return dev_err_probe(dev, PTR_ERR(bp->phy),
+ "failed to get PHY\n");
+
+ ret = phy_init(bp->phy);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to init PHY\n");
+ ret = macb_init(pdev);
+ if (ret)
+ phy_exit(bp->phy);
return ret;
}
@@ -4926,6 +5335,7 @@ static const struct macb_config mpfs_config = {
static const struct macb_config sama7g5_gem_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG |
+ MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII |
MACB_CAPS_MIIONRGMII | MACB_CAPS_GEM_HAS_PTP,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
@@ -4945,8 +5355,9 @@ static const struct macb_config sama7g5_emac_config = {
static const struct macb_config versal_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
- MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH | MACB_CAPS_NEED_TSUCLK |
- MACB_CAPS_QUEUE_DISABLE,
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH |
+ MACB_CAPS_NEED_TSUCLK | MACB_CAPS_QUEUE_DISABLE |
+ MACB_CAPS_QBV,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = init_reset_optional,
@@ -4954,6 +5365,28 @@ static const struct macb_config versal_config = {
.usrio = &macb_default_usrio,
};
+static const struct macb_config eyeq5_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_QUEUE_DISABLE |
+ MACB_CAPS_NO_LSO,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = eyeq5_init,
+ .jumbo_max_len = 10240,
+ .usrio = &macb_default_usrio,
+};
+
+static const struct macb_config raspberrypi_rp1_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG |
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+ .usrio = &macb_default_usrio,
+ .jumbo_max_len = 10240,
+};
+
static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
{ .compatible = "cdns,macb" },
@@ -4974,6 +5407,8 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "microchip,mpfs-macb", .data = &mpfs_config },
{ .compatible = "microchip,sama7g5-gem", .data = &sama7g5_gem_config },
{ .compatible = "microchip,sama7g5-emac", .data = &sama7g5_emac_config },
+ { .compatible = "mobileye,eyeq5-gem", .data = &eyeq5_config },
+ { .compatible = "raspberrypi,rp1-gem", .data = &raspberrypi_rp1_config },
{ .compatible = "xlnx,zynqmp-gem", .data = &zynqmp_config},
{ .compatible = "xlnx,zynq-gem", .data = &zynq_config },
{ .compatible = "xlnx,versal-gem", .data = &versal_config},
@@ -4996,21 +5431,17 @@ static const struct macb_config default_gem_config = {
static int macb_probe(struct platform_device *pdev)
{
const struct macb_config *macb_config = &default_gem_config;
- int (*clk_init)(struct platform_device *, struct clk **,
- struct clk **, struct clk **, struct clk **,
- struct clk **) = macb_config->clk_init;
- int (*init)(struct platform_device *) = macb_config->init;
struct device_node *np = pdev->dev.of_node;
struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
struct clk *tsu_clk = NULL;
- unsigned int queue_mask, num_queues;
- bool native_io;
phy_interface_t interface;
struct net_device *dev;
struct resource *regs;
u32 wtrmrk_rst_val;
void __iomem *mem;
struct macb *bp;
+ int num_queues;
+ bool native_io;
int err, val;
mem = devm_platform_get_and_ioremap_resource(pdev, 0, &regs);
@@ -5021,14 +5452,11 @@ static int macb_probe(struct platform_device *pdev)
const struct of_device_id *match;
match = of_match_node(macb_dt_ids, np);
- if (match && match->data) {
+ if (match && match->data)
macb_config = match->data;
- clk_init = macb_config->clk_init;
- init = macb_config->init;
- }
}
- err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk);
+ err = macb_config->clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk);
if (err)
return err;
@@ -5039,7 +5467,12 @@ static int macb_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
native_io = hw_is_native_io(mem);
- macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
+ num_queues = macb_probe_queues(&pdev->dev, mem, native_io);
+ if (num_queues < 0) {
+ err = num_queues;
+ goto err_disable_clocks;
+ }
+
dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
if (!dev) {
err = -ENOMEM;
@@ -5063,16 +5496,13 @@ static int macb_probe(struct platform_device *pdev)
bp->macb_reg_writel = hw_writel;
}
bp->num_queues = num_queues;
- bp->queue_mask = queue_mask;
- if (macb_config)
- bp->dma_burst_length = macb_config->dma_burst_length;
+ bp->dma_burst_length = macb_config->dma_burst_length;
bp->pclk = pclk;
bp->hclk = hclk;
bp->tx_clk = tx_clk;
bp->rx_clk = rx_clk;
bp->tsu_clk = tsu_clk;
- if (macb_config)
- bp->jumbo_max_len = macb_config->jumbo_max_len;
+ bp->jumbo_max_len = macb_config->jumbo_max_len;
if (!hw_is_gem(bp->regs, bp->native_io))
bp->max_tx_length = MACB_MAX_TX_LEN;
@@ -5106,14 +5536,19 @@ static int macb_probe(struct platform_device *pdev)
}
}
spin_lock_init(&bp->lock);
+ spin_lock_init(&bp->stats_lock);
/* setup capabilities */
macb_configure_caps(bp, macb_config);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
- bp->hw_dma_cap |= HW_DMA_CAP_64B;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
+ if (err) {
+ dev_err(&pdev->dev, "failed to set DMA mask\n");
+ goto err_out_free_netdev;
+ }
+ bp->caps |= MACB_CAPS_DMA_64B;
}
#endif
platform_set_drvdata(pdev, dev);
@@ -5161,7 +5596,7 @@ static int macb_probe(struct platform_device *pdev)
bp->phy_interface = interface;
/* IP specific init */
- err = init(pdev);
+ err = macb_config->init(pdev);
if (err)
goto err_out_free_netdev;
@@ -5183,7 +5618,6 @@ static int macb_probe(struct platform_device *pdev)
macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
dev->base_addr, dev->irq, dev->dev_addr);
- pm_runtime_mark_last_busy(&bp->pdev->dev);
pm_runtime_put_autosuspend(&bp->pdev->dev);
return 0;
@@ -5193,7 +5627,7 @@ err_out_unregister_mdio:
mdiobus_free(bp->mii_bus);
err_out_phy_exit:
- phy_exit(bp->sgmii_phy);
+ phy_exit(bp->phy);
err_out_free_netdev:
free_netdev(dev);
@@ -5216,19 +5650,16 @@ static void macb_remove(struct platform_device *pdev)
if (dev) {
bp = netdev_priv(dev);
- phy_exit(bp->sgmii_phy);
+ unregister_netdev(dev);
+ phy_exit(bp->phy);
mdiobus_unregister(bp->mii_bus);
mdiobus_free(bp->mii_bus);
- unregister_netdev(dev);
+ device_set_wakeup_enable(&bp->pdev->dev, 0);
cancel_work_sync(&bp->hresp_err_bh_work);
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
- if (!pm_runtime_suspended(&pdev->dev)) {
- macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk,
- bp->rx_clk, bp->tsu_clk);
- pm_runtime_set_suspended(&pdev->dev);
- }
+ pm_runtime_set_suspended(&pdev->dev);
phylink_destroy(bp->phylink);
free_netdev(dev);
}
@@ -5247,7 +5678,7 @@ static int __maybe_unused macb_suspend(struct device *dev)
u32 tmp;
if (!device_may_wakeup(&bp->dev->dev))
- phy_exit(bp->sgmii_phy);
+ phy_exit(bp->phy);
if (!netif_running(netdev))
return 0;
@@ -5268,6 +5699,11 @@ static int __maybe_unused macb_suspend(struct device *dev)
*/
tmp = macb_readl(bp, NCR);
macb_writel(bp, NCR, tmp & ~(MACB_BIT(TE) | MACB_BIT(RE)));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE))
+ macb_writel(bp, RBQPH,
+ upper_32_bits(bp->rx_ring_tieoff_dma));
+#endif
for (q = 0, queue = bp->queues; q < bp->num_queues;
++q, ++queue) {
/* Disable RX queues */
@@ -5277,10 +5713,6 @@ static int __maybe_unused macb_suspend(struct device *dev)
/* Tie off RX queues */
queue_writel(queue, RBQP,
lower_32_bits(bp->rx_ring_tieoff_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- queue_writel(queue, RBQPH,
- upper_32_bits(bp->rx_ring_tieoff_dma));
-#endif
}
/* Disable all interrupts */
queue_writel(queue, IDR, -1);
@@ -5375,7 +5807,7 @@ static int __maybe_unused macb_resume(struct device *dev)
int err;
if (!device_may_wakeup(&bp->dev->dev))
- phy_init(bp->sgmii_phy);
+ phy_init(bp->phy);
if (!netif_running(netdev))
return 0;
@@ -5479,6 +5911,20 @@ static int __maybe_unused macb_runtime_resume(struct device *dev)
return 0;
}
+static void macb_shutdown(struct platform_device *pdev)
+{
+ struct net_device *netdev = platform_get_drvdata(pdev);
+
+ rtnl_lock();
+
+ if (netif_running(netdev))
+ dev_close(netdev);
+
+ netif_device_detach(netdev);
+
+ rtnl_unlock();
+}
+
static const struct dev_pm_ops macb_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume)
SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL)
@@ -5492,6 +5938,7 @@ static struct platform_driver macb_driver = {
.of_match_table = of_match_ptr(macb_dt_ids),
.pm = &macb_pm_ops,
},
+ .shutdown = macb_shutdown,
};
module_platform_driver(macb_driver);
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index a63bf29c4fa8..c9e77819196e 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -28,14 +28,16 @@
static struct macb_dma_desc_ptp *macb_ptp_desc(struct macb *bp,
struct macb_dma_desc *desc)
{
- if (bp->hw_dma_cap == HW_DMA_CAP_PTP)
- return (struct macb_dma_desc_ptp *)
- ((u8 *)desc + sizeof(struct macb_dma_desc));
- if (bp->hw_dma_cap == HW_DMA_CAP_64B_PTP)
+ if (!macb_dma_ptp(bp))
+ return NULL;
+
+ if (macb_dma64(bp))
return (struct macb_dma_desc_ptp *)
((u8 *)desc + sizeof(struct macb_dma_desc)
+ sizeof(struct macb_dma_desc_64));
- return NULL;
+ else
+ return (struct macb_dma_desc_ptp *)
+ ((u8 *)desc + sizeof(struct macb_dma_desc));
}
static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts,
@@ -380,7 +382,7 @@ int gem_get_hwtst(struct net_device *dev,
struct macb *bp = netdev_priv(dev);
*tstamp_config = bp->tstamp_config;
- if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0)
+ if (!macb_dma_ptp(bp))
return -EOPNOTSUPP;
return 0;
@@ -407,7 +409,7 @@ int gem_set_hwtst(struct net_device *dev,
struct macb *bp = netdev_priv(dev);
u32 regval;
- if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0)
+ if (!macb_dma_ptp(bp))
return -EOPNOTSUPP;
switch (tstamp_config->tx_type) {
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index ca742cc146d7..7dae5aad3689 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -70,8 +70,8 @@ config LIQUIDIO
depends on 64BIT && PCI
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
+ select CRC32
select FW_LOADER
- select LIBCRC32C
select LIQUIDIO_CORE
select NET_DEVLINK
help
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
index 9fd717b9cf69..61e261657073 100644
--- a/drivers/net/ethernet/cavium/common/cavium_ptp.c
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -209,7 +209,7 @@ static int cavium_ptp_enable(struct ptp_clock_info *ptp_info,
return -EOPNOTSUPP;
}
-static u64 cavium_ptp_cc_read(const struct cyclecounter *cc)
+static u64 cavium_ptp_cc_read(struct cyclecounter *cc)
{
struct cavium_ptp *clock =
container_of(cc, struct cavium_ptp, cycle_counter);
@@ -239,12 +239,11 @@ static int cavium_ptp_probe(struct pci_dev *pdev,
if (err)
goto error_free;
- err = pcim_iomap_regions(pdev, 1 << PCI_PTP_BAR_NO, pci_name(pdev));
+ clock->reg_base = pcim_iomap_region(pdev, PCI_PTP_BAR_NO, pci_name(pdev));
+ err = PTR_ERR_OR_ZERO(clock->reg_base);
if (err)
goto error_free;
- clock->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
-
spin_lock_init(&clock->spin_lock);
cc = &clock->cycle_counter;
@@ -292,7 +291,7 @@ error_stop:
clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG);
clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG);
- pcim_iounmap_regions(pdev, 1 << PCI_PTP_BAR_NO);
+ pcim_iounmap_region(pdev, PCI_PTP_BAR_NO);
error_free:
devm_kfree(dev, clock);
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index 9ad49aea2673..75f22f74774c 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -49,7 +49,7 @@ static int cn23xx_pf_soft_reset(struct octeon_device *oct)
lio_pci_readq(oct, CN23XX_RST_SOFT_RST);
lio_pci_writeq(oct, 1, CN23XX_RST_SOFT_RST);
- /* Wait for 100ms as Octeon resets. */
+ /* Wait for 100ms as Octeon resets */
mdelay(100);
if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)) {
@@ -61,7 +61,7 @@ static int cn23xx_pf_soft_reset(struct octeon_device *oct)
dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Reset completed\n",
oct->octeon_id);
- /* restore the reset value*/
+ /* Restore the reset value */
octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
return 0;
@@ -121,7 +121,7 @@ u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us)
oqticks_per_us /= 1024;
/* time_intr is in microseconds. The next 2 steps gives the oq ticks
- * corressponding to time_intr.
+ * corresponding to time_intr.
*/
oqticks_per_us *= time_intr_in_us;
oqticks_per_us /= 1000;
@@ -136,11 +136,11 @@ static void cn23xx_setup_global_mac_regs(struct octeon_device *oct)
u64 reg_val;
u64 temp;
- /* programming SRN and TRS for each MAC(0..3) */
+ /* Programming SRN and TRS for each MAC(0..3) */
dev_dbg(&oct->pci_dev->dev, "%s:Using pcie port %d\n",
__func__, mac_no);
- /* By default, mapping all 64 IOQs to a single MACs */
+ /* By default, map all 64 IOQs to a single MAC */
reg_val =
octeon_read_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num));
@@ -164,7 +164,7 @@ static void cn23xx_setup_global_mac_regs(struct octeon_device *oct)
temp = oct->sriov_info.max_vfs & 0xff;
reg_val |= (temp << CN23XX_PKT_MAC_CTL_RINFO_NVFS_BIT_POS);
- /* write these settings to MAC register */
+ /* Write these settings to MAC register */
octeon_write_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num),
reg_val);
@@ -183,10 +183,10 @@ static int cn23xx_reset_io_queues(struct octeon_device *oct)
srn = oct->sriov_info.pf_srn;
ern = srn + oct->sriov_info.num_pf_rings;
- /*As per HRM reg description, s/w cant write 0 to ENB. */
- /*to make the queue off, need to set the RST bit. */
+ /* As per HRM reg description, s/w can't write 0 to ENB. */
+ /* We need to set the RST bit, to turn the queue off. */
- /* Reset the Enable bit for all the 64 IQs. */
+ /* Reset the enable bit for all the 64 IQs. */
for (q_no = srn; q_no < ern; q_no++) {
/* set RST bit to 1. This bit applies to both IQ and OQ */
d64 = octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
@@ -194,7 +194,7 @@ static int cn23xx_reset_io_queues(struct octeon_device *oct)
octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64);
}
- /*wait until the RST bit is clear or the RST and quite bits are set*/
+ /* Wait until the RST bit is clear or the RST and quiet bits are set */
for (q_no = srn; q_no < ern; q_no++) {
u64 reg_val = octeon_read_csr64(oct,
CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
@@ -245,15 +245,15 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
if (cn23xx_reset_io_queues(oct))
return -1;
- /** Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg
- * for all queues.Only PF can set these bits.
+ /* Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg
+ * for all queues. Only PF can set these bits.
* bits 29:30 indicate the MAC num.
* bits 32:47 indicate the PVF num.
*/
for (q_no = 0; q_no < ern; q_no++) {
reg_val = (u64)oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
- /* for VF assigned queues. */
+ /* For VF assigned queues. */
if (q_no < oct->sriov_info.pf_srn) {
vf_num = q_no / oct->sriov_info.rings_per_vf;
vf_num += 1; /* VF1, VF2,........ */
@@ -268,7 +268,7 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
reg_val);
}
- /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
+ /* Select ES, RO, NS, RDSIZE,DPTR Format#0 for
* pf queues
*/
for (q_no = srn; q_no < ern; q_no++) {
@@ -289,7 +289,7 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
reg_val);
- /* Set WMARK level for triggering PI_INT */
+ /* Set WMARK level to trigger PI_INT */
/* intr_threshold = CN23XX_DEF_IQ_INTR_THRESHOLD & */
intr_threshold = CFG_GET_IQ_INTR_PKT(cn23xx->conf) &
CN23XX_PKT_IN_DONE_WMARK_MASK;
@@ -354,7 +354,7 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
/* set the ES bit */
reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);
- /* write all the selected settings */
+ /* Write all the selected settings */
octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no), reg_val);
/* Enabling these interrupt in oct->fn_list.enable_interrupt()
@@ -373,7 +373,7 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
/** Setting the water mark level for pko back pressure **/
writeq(0x40, (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_WMARK);
- /** Disabling setting OQs in reset when ring has no dorebells
+ /* Disabling setting OQs in reset when ring has no doorbells
* enabling this will cause of head of line blocking
*/
/* Do it only for pass1.1. and pass1.2 */
@@ -383,7 +383,7 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
CN23XX_SLI_GBL_CONTROL) | 0x2,
(u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_GBL_CONTROL);
- /** Enable channel-level backpressure */
+ /** Enable channel-level backpressure **/
if (oct->pf_num)
writeq(0xffffffffffffffffULL,
(u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN2_W1S);
@@ -396,7 +396,7 @@ static int cn23xx_setup_pf_device_regs(struct octeon_device *oct)
{
cn23xx_enable_error_reporting(oct);
- /* program the MAC(0..3)_RINFO before setting up input/output regs */
+ /* Program the MAC(0..3)_RINFO before setting up input/output regs */
cn23xx_setup_global_mac_regs(oct);
if (cn23xx_pf_setup_global_input_regs(oct))
@@ -410,7 +410,7 @@ static int cn23xx_setup_pf_device_regs(struct octeon_device *oct)
octeon_write_csr64(oct, CN23XX_SLI_WINDOW_CTL,
CN23XX_SLI_WINDOW_CTL_DEFAULT);
- /* set SLI_PKT_IN_JABBER to handle large VXLAN packets */
+ /* Set SLI_PKT_IN_JABBER to handle large VXLAN packets */
octeon_write_csr64(oct, CN23XX_SLI_PKT_IN_JABBER, CN23XX_INPUT_JABBER);
return 0;
}
@@ -574,7 +574,7 @@ static int cn23xx_setup_pf_mbox(struct octeon_device *oct)
mbox->mbox_read_reg = (u8 *)oct->mmio[0].hw_addr +
CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q_no, 1);
- /*Mail Box Thread creation*/
+ /* Mail Box Thread creation */
INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work,
cn23xx_pf_mbox_thread);
mbox->mbox_poll_wk.ctxptr = (void *)mbox;
@@ -626,7 +626,7 @@ static int cn23xx_enable_io_queues(struct octeon_device *oct)
ern = srn + oct->num_iqs;
for (q_no = srn; q_no < ern; q_no++) {
- /* set the corresponding IQ IS_64B bit */
+ /* Set the corresponding IQ IS_64B bit */
if (oct->io_qmask.iq64B & BIT_ULL(q_no - srn)) {
reg_val = octeon_read_csr64(
oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
@@ -635,7 +635,7 @@ static int cn23xx_enable_io_queues(struct octeon_device *oct)
oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
}
- /* set the corresponding IQ ENB bit */
+ /* Set the corresponding IQ ENB bit */
if (oct->io_qmask.iq & BIT_ULL(q_no - srn)) {
/* IOQs are in reset by default in PEM2 mode,
* clearing reset bit
@@ -681,7 +681,7 @@ static int cn23xx_enable_io_queues(struct octeon_device *oct)
}
for (q_no = srn; q_no < ern; q_no++) {
u32 reg_val;
- /* set the corresponding OQ ENB bit */
+ /* Set the corresponding OQ ENB bit */
if (oct->io_qmask.oq & BIT_ULL(q_no - srn)) {
reg_val = octeon_read_csr(
oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
@@ -707,7 +707,7 @@ static void cn23xx_disable_io_queues(struct octeon_device *oct)
for (q_no = srn; q_no < ern; q_no++) {
loop = HZ;
- /* start the Reset for a particular ring */
+ /* Start the Reset for a particular ring */
WRITE_ONCE(d64, octeon_read_csr64(
oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
WRITE_ONCE(d64, READ_ONCE(d64) &
@@ -740,7 +740,7 @@ static void cn23xx_disable_io_queues(struct octeon_device *oct)
loop = HZ;
/* Wait until hardware indicates that the particular IQ
- * is out of reset.It given that SLI_PKT_RING_RST is
+ * is out of reset. Given that SLI_PKT_RING_RST is
* common for both IQs and OQs
*/
WRITE_ONCE(d64, octeon_read_csr64(
@@ -760,7 +760,7 @@ static void cn23xx_disable_io_queues(struct octeon_device *oct)
schedule_timeout_uninterruptible(1);
}
- /* clear the SLI_PKT(0..63)_CNTS[CNT] reg value */
+ /* Clear the SLI_PKT(0..63)_CNTS[CNT] reg value */
WRITE_ONCE(d32, octeon_read_csr(
oct, CN23XX_SLI_OQ_PKTS_SENT(q_no)));
octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_SENT(q_no),
@@ -793,7 +793,7 @@ static u64 cn23xx_pf_msix_interrupt_handler(void *dev)
if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL))
return ret;
- /* Write count reg in sli_pkt_cnts to clear these int.*/
+ /* Write count reg in sli_pkt_cnts to clear these int. */
if ((pkts_sent & CN23XX_INTR_PO_INT) ||
(pkts_sent & CN23XX_INTR_PI_INT)) {
if (pkts_sent & CN23XX_INTR_PO_INT)
@@ -908,7 +908,7 @@ static u32 cn23xx_bar1_idx_read(struct octeon_device *oct, u32 idx)
oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
}
-/* always call with lock held */
+/* Always call with lock held */
static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq)
{
u32 new_idx;
@@ -919,7 +919,7 @@ static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq)
iq->pkt_in_done = pkt_in_done;
/* Modulo of the new index with the IQ size will give us
- * the new index. The iq->reset_instr_cnt is always zero for
+ * the new index. The iq->reset_instr_cnt is always zero for
* cn23xx, so no extra adjustments are needed.
*/
new_idx = (iq->octeon_read_index +
@@ -934,8 +934,8 @@ static void cn23xx_enable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
u64 intr_val = 0;
- /* Divide the single write to multiple writes based on the flag. */
- /* Enable Interrupt */
+ /* Divide the single write to multiple writes based on the flag. */
+ /* Enable Interrupts */
if (intr_flag == OCTEON_ALL_INTR) {
writeq(cn23xx->intr_mask64, cn23xx->intr_enb_reg64);
} else if (intr_flag & OCTEON_OUTPUT_INTR) {
@@ -990,7 +990,7 @@ static int cn23xx_get_pf_num(struct octeon_device *oct)
ret = 0;
- /** Read Function Dependency Link reg to get the function number */
+ /* Read Function Dependency Link reg to get the function number */
if (pci_read_config_dword(oct->pci_dev, CN23XX_PCIE_SRIOV_FDL,
&fdl_bit) == 0) {
oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) &
@@ -1003,13 +1003,13 @@ static int cn23xx_get_pf_num(struct octeon_device *oct)
* In this case, read the PF number from the
* SLI_PKT0_INPUT_CONTROL reg (written by f/w)
*/
- pkt0_in_ctl = octeon_read_csr64(oct,
- CN23XX_SLI_IQ_PKT_CONTROL64(0));
+ pkt0_in_ctl =
+ octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(0));
pfnum = (pkt0_in_ctl >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) &
CN23XX_PKT_INPUT_CTL_PF_NUM_MASK;
mac = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff;
- /* validate PF num by reading RINFO; f/w writes RINFO.trs == 1*/
+ /* Validate PF num by reading RINFO; f/w writes RINFO.trs == 1 */
d64 = octeon_read_csr64(oct,
CN23XX_SLI_PKT_MAC_RINFO64(mac, pfnum));
trs = (int)(d64 >> CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS) & 0xff;
@@ -1208,53 +1208,14 @@ int setup_cn23xx_octeon_pf_device(struct octeon_device *oct)
}
EXPORT_SYMBOL_GPL(setup_cn23xx_octeon_pf_device);
-int validate_cn23xx_pf_config_info(struct octeon_device *oct,
- struct octeon_config *conf23xx)
-{
- if (CFG_GET_IQ_MAX_Q(conf23xx) > CN23XX_MAX_INPUT_QUEUES) {
- dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n",
- __func__, CFG_GET_IQ_MAX_Q(conf23xx),
- CN23XX_MAX_INPUT_QUEUES);
- return 1;
- }
-
- if (CFG_GET_OQ_MAX_Q(conf23xx) > CN23XX_MAX_OUTPUT_QUEUES) {
- dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n",
- __func__, CFG_GET_OQ_MAX_Q(conf23xx),
- CN23XX_MAX_OUTPUT_QUEUES);
- return 1;
- }
-
- if (CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_32BYTE_INSTR &&
- CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_64BYTE_INSTR) {
- dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n",
- __func__);
- return 1;
- }
-
- if (!CFG_GET_OQ_REFILL_THRESHOLD(conf23xx)) {
- dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
- __func__);
- return 1;
- }
-
- if (!(CFG_GET_OQ_INTR_TIME(conf23xx))) {
- dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
- __func__);
- return 1;
- }
-
- return 0;
-}
-
int cn23xx_fw_loaded(struct octeon_device *oct)
{
u64 val;
/* If there's more than one active PF on this NIC, then that
- * implies that the NIC firmware is loaded and running. This check
+ * implies that the NIC firmware is loaded and running. This check
* prevents a rare false negative that might occur if we only relied
- * on checking the SCR2_BIT_FW_LOADED flag. The false negative would
+ * on checking the SCR2_BIT_FW_LOADED flag. The false negative would
* happen if the PF driver sees SCR2_BIT_FW_LOADED as cleared even
* though the firmware was already loaded but still booting and has yet
* to set SCR2_BIT_FW_LOADED.
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
index 234b96b4f488..bbe9f3133b07 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
@@ -54,9 +54,6 @@ struct oct_vf_stats {
int setup_cn23xx_octeon_pf_device(struct octeon_device *oct);
-int validate_cn23xx_pf_config_info(struct octeon_device *oct,
- struct octeon_config *conf23xx);
-
u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
int cn23xx_sriov_config(struct octeon_device *oct);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 674c54831875..215dac201b4a 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -472,7 +472,7 @@ int setup_rx_oom_poll_fn(struct net_device *netdev)
q_no = lio->linfo.rxpciq[q].s.q_no;
wq = &lio->rxq_status_wq[q_no];
wq->wq = alloc_workqueue("rxq-oom-status",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!wq->wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 1d79f6eaa41f..0732440eeacd 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -526,7 +526,8 @@ static inline int setup_link_status_change_wq(struct net_device *netdev)
struct octeon_device *oct = lio->oct_dev;
lio->link_status_wq.wq = alloc_workqueue("link-status",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU,
+ 0);
if (!lio->link_status_wq.wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
return -1;
@@ -659,7 +660,8 @@ static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
struct octeon_device *oct = lio->oct_dev;
lio->sync_octeon_time_wq.wq =
- alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0);
+ alloc_workqueue("update-octeon-time",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!lio->sync_octeon_time_wq.wq) {
dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n");
return -1;
@@ -1734,7 +1736,7 @@ static inline int setup_tx_poll_fn(struct net_device *netdev)
struct octeon_device *oct = lio->oct_dev;
lio->txq_status_wq.wq = alloc_workqueue("txq-status",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!lio->txq_status_wq.wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
return -1;
@@ -2105,20 +2107,16 @@ liquidio_get_stats64(struct net_device *netdev,
lstats->tx_fifo_errors;
}
-/**
- * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl
- * @netdev: network device
- * @ifr: interface request
- */
-static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
+static int liquidio_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *conf,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config conf;
struct lio *lio = GET_LIO(netdev);
- if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
- return -EFAULT;
+ if (!lio->oct_dev->ptp_enable)
+ return -EOPNOTSUPP;
- switch (conf.tx_type) {
+ switch (conf->tx_type) {
case HWTSTAMP_TX_ON:
case HWTSTAMP_TX_OFF:
break;
@@ -2126,7 +2124,7 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
return -ERANGE;
}
- switch (conf.rx_filter) {
+ switch (conf->rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
case HWTSTAMP_FILTER_ALL:
@@ -2144,39 +2142,32 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
- conf.rx_filter = HWTSTAMP_FILTER_ALL;
+ conf->rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
- if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
+ if (conf->rx_filter == HWTSTAMP_FILTER_ALL)
ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
else
ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
- return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
+ return 0;
}
-/**
- * liquidio_ioctl - ioctl handler
- * @netdev: network device
- * @ifr: interface request
- * @cmd: command
- */
-static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+static int liquidio_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *conf)
{
struct lio *lio = GET_LIO(netdev);
- switch (cmd) {
- case SIOCSHWTSTAMP:
- if (lio->oct_dev->ptp_enable)
- return hwtstamp_ioctl(netdev, ifr);
- fallthrough;
- default:
- return -EOPNOTSUPP;
- }
+ /* TX timestamping is technically always on */
+ conf->tx_type = HWTSTAMP_TX_ON;
+ conf->rx_filter = ifstate_check(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED) ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+
+ return 0;
}
/**
@@ -3225,7 +3216,6 @@ static const struct net_device_ops lionetdevops = {
.ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
.ndo_change_mtu = liquidio_change_mtu,
- .ndo_eth_ioctl = liquidio_ioctl,
.ndo_fix_features = liquidio_fix_features,
.ndo_set_features = liquidio_set_features,
.ndo_set_vf_mac = liquidio_set_vf_mac,
@@ -3236,6 +3226,8 @@ static const struct net_device_ops lionetdevops = {
.ndo_set_vf_link_state = liquidio_set_vf_link_state,
.ndo_get_vf_stats = liquidio_get_vf_stats,
.ndo_get_port_parent_id = liquidio_get_port_parent_id,
+ .ndo_hwtstamp_get = liquidio_hwtstamp_get,
+ .ndo_hwtstamp_set = liquidio_hwtstamp_set,
};
/**
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 62c2eadc33e3..e02942dbbcce 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -304,7 +304,8 @@ static int setup_link_status_change_wq(struct net_device *netdev)
struct octeon_device *oct = lio->oct_dev;
lio->link_status_wq.wq = alloc_workqueue("link-status",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU,
+ 0);
if (!lio->link_status_wq.wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
return -1;
@@ -1235,20 +1236,13 @@ liquidio_get_stats64(struct net_device *netdev,
lstats->tx_carrier_errors;
}
-/**
- * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl
- * @netdev: network device
- * @ifr: interface request
- */
-static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
+static int liquidio_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *conf,
+ struct netlink_ext_ack *extack)
{
struct lio *lio = GET_LIO(netdev);
- struct hwtstamp_config conf;
-
- if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
- return -EFAULT;
- switch (conf.tx_type) {
+ switch (conf->tx_type) {
case HWTSTAMP_TX_ON:
case HWTSTAMP_TX_OFF:
break;
@@ -1256,7 +1250,7 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
return -ERANGE;
}
- switch (conf.rx_filter) {
+ switch (conf->rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
case HWTSTAMP_FILTER_ALL:
@@ -1274,35 +1268,31 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
- conf.rx_filter = HWTSTAMP_FILTER_ALL;
+ conf->rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
- if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
+ if (conf->rx_filter == HWTSTAMP_FILTER_ALL)
ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
else
ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
- return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
+ return 0;
}
-/**
- * liquidio_ioctl - ioctl handler
- * @netdev: network device
- * @ifr: interface request
- * @cmd: command
- */
-static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+static int liquidio_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *conf)
{
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return hwtstamp_ioctl(netdev, ifr);
- default:
- return -EOPNOTSUPP;
- }
+ struct lio *lio = GET_LIO(netdev);
+
+ /* TX timestamping is techically always on */
+ conf->tx_type = HWTSTAMP_TX_ON;
+ conf->rx_filter = ifstate_check(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED) ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+ return 0;
}
static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf)
@@ -1880,9 +1870,10 @@ static const struct net_device_ops lionetdevops = {
.ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
.ndo_change_mtu = liquidio_change_mtu,
- .ndo_eth_ioctl = liquidio_ioctl,
.ndo_fix_features = liquidio_fix_features,
.ndo_set_features = liquidio_set_features,
+ .ndo_hwtstamp_get = liquidio_hwtstamp_get,
+ .ndo_hwtstamp_set = liquidio_hwtstamp_set,
};
static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 6b6cb73482d7..1753bb87dfbd 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -1433,22 +1433,6 @@ int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout)
}
EXPORT_SYMBOL_GPL(octeon_wait_for_ddr_init);
-/* Get the octeon id assigned to the octeon device passed as argument.
- * This function is exported to other modules.
- * @param dev - octeon device pointer passed as a void *.
- * @return octeon device id
- */
-int lio_get_device_id(void *dev)
-{
- struct octeon_device *octeon_dev = (struct octeon_device *)dev;
- u32 i;
-
- for (i = 0; i < MAX_OCTEON_DEVICES; i++)
- if (octeon_device[i] == octeon_dev)
- return octeon_dev->octeon_id;
- return -1;
-}
-
void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
{
u64 instr_cnt;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index d26364c2ac81..19344b21f8fb 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -705,13 +705,6 @@ octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode,
*/
struct octeon_device *lio_get_device(u32 octeon_id);
-/** Get the octeon id assigned to the octeon device passed as argument.
- * This function is exported to other modules.
- * @param dev - octeon device pointer passed as a void *.
- * @return octeon device id
- */
-int lio_get_device_id(void *dev);
-
/** Read windowed register.
* @param oct - pointer to the Octeon device.
* @param addr - Address of the register to read.
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
index 5b4cb725f60f..953edf0c7096 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -157,7 +157,7 @@ err_release_region:
response of the request.
* 0: the request will wait until its response gets back
* from the firmware within LIO_SC_MAX_TMO_MS milli sec.
- * It the response does not return within
+ * If the response does not return within
* LIO_SC_MAX_TMO_MS milli sec, lio_process_ordered_list()
* will move the request to zombie response list.
*
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
index 87dd6f89ce51..c139fc423764 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
@@ -268,7 +268,7 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
* @param oct - octeon device pointer
* @param ndata - control structure with queueing, and buffer information
*
- * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
+ * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if the
* queue should be stopped, and IQ_SEND_OK if it sent okay.
*/
int octnet_send_nic_data_pkt(struct octeon_device *oct,
@@ -278,7 +278,7 @@ int octnet_send_nic_data_pkt(struct octeon_device *oct,
/** Send a NIC control packet to the device
* @param oct - octeon device pointer
* @param nctrl - control structure with command, timout, and callback info
- * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
+ * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if the
* queue should be stopped, and IQ_SEND_OK if it sent okay.
*/
int
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index de8a6ce86ad7..d7cfb20eea00 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -126,13 +126,13 @@ int octeon_init_instr_queue(struct octeon_device *oct,
oct->io_qmask.iq |= BIT_ULL(iq_no);
/* Set the 32B/64B mode for each input queue */
- oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
+ oct->io_qmask.iq64B |= ((u64)(conf->instr_type == 64) << iq_no);
iq->iqcmd_64B = (conf->instr_type == 64);
oct->fn_list.setup_iq_regs(oct, iq_no);
oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db",
- WQ_MEM_RECLAIM,
+ WQ_MEM_RECLAIM | WQ_PERCPU,
0);
if (!oct->check_db_wq[iq_no].wq) {
vfree(iq->request_list);
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c
index 861050966e18..de1a8335b545 100644
--- a/drivers/net/ethernet/cavium/liquidio/response_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c
@@ -39,7 +39,8 @@ int octeon_setup_response_list(struct octeon_device *oct)
}
spin_lock_init(&oct->cmd_resp_wqlock);
- oct->dma_comp_wq.wq = alloc_workqueue("dma-comp", WQ_MEM_RECLAIM, 0);
+ oct->dma_comp_wq.wq = alloc_workqueue("dma-comp",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!oct->dma_comp_wq.wq) {
dev_err(&oct->pci_dev->dev, "failed to create wq thread\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 393b9951490a..c190fc6538d4 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -690,19 +690,16 @@ static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
return IRQ_HANDLED;
}
-static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
- struct ifreq *rq, int cmd)
+static int octeon_mgmt_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct octeon_mgmt *p = netdev_priv(netdev);
- struct hwtstamp_config config;
- union cvmx_mio_ptp_clock_cfg ptp;
union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
+ union cvmx_mio_ptp_clock_cfg ptp;
bool have_hw_timestamps = false;
- if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
- return -EFAULT;
-
- /* Check the status of hardware for tiemstamps */
+ /* Check the status of hardware for timestamps */
if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
/* Get the current state of the PTP clock */
ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);
@@ -733,10 +730,12 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
have_hw_timestamps = true;
}
- if (!have_hw_timestamps)
+ if (!have_hw_timestamps) {
+ NL_SET_ERR_MSG_MOD(extack, "HW doesn't support timestamping");
return -EINVAL;
+ }
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
break;
@@ -744,7 +743,7 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
p->has_rx_tstamp = false;
rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
@@ -766,33 +765,34 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
- p->has_rx_tstamp = have_hw_timestamps;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- if (p->has_rx_tstamp) {
- rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
- rxx_frm_ctl.s.ptp_mode = 1;
- cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
- }
+ p->has_rx_tstamp = true;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
+ rxx_frm_ctl.s.ptp_mode = 1;
+ cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
break;
default:
return -ERANGE;
}
- if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
- return -EFAULT;
-
return 0;
}
-static int octeon_mgmt_ioctl(struct net_device *netdev,
- struct ifreq *rq, int cmd)
+static int octeon_mgmt_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
- default:
- return phy_do_ioctl(netdev, rq, cmd);
- }
+ struct octeon_mgmt *p = netdev_priv(netdev);
+
+ /* Check the status of hardware for timestamps */
+ if (!OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ return -EINVAL;
+
+ config->tx_type = HWTSTAMP_TX_ON;
+ config->rx_filter = p->has_rx_tstamp ?
+ HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
+
+ return 0;
}
static void octeon_mgmt_disable_link(struct octeon_mgmt *p)
@@ -1370,11 +1370,13 @@ static const struct net_device_ops octeon_mgmt_ops = {
.ndo_start_xmit = octeon_mgmt_xmit,
.ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
.ndo_set_mac_address = octeon_mgmt_set_mac_address,
- .ndo_eth_ioctl = octeon_mgmt_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
.ndo_change_mtu = octeon_mgmt_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = octeon_mgmt_poll_controller,
#endif
+ .ndo_hwtstamp_get = octeon_mgmt_hwtstamp_get,
+ .ndo_hwtstamp_set = octeon_mgmt_hwtstamp_set,
};
static int octeon_mgmt_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index d0ff0c170b1a..413028bdcacb 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -516,8 +516,8 @@ static int nicvf_set_ringparam(struct net_device *netdev,
return 0;
}
-static int nicvf_get_rss_hash_opts(struct nicvf *nic,
- struct ethtool_rxnfc *info)
+static int nicvf_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
{
info->data = 0;
@@ -541,36 +541,29 @@ static int nicvf_get_rss_hash_opts(struct nicvf *nic,
return 0;
}
-static int nicvf_get_rxnfc(struct net_device *dev,
- struct ethtool_rxnfc *info, u32 *rules)
+static u32 nicvf_get_rx_ring_count(struct net_device *dev)
{
struct nicvf *nic = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = nic->rx_queues;
- ret = 0;
- break;
- case ETHTOOL_GRXFH:
- return nicvf_get_rss_hash_opts(nic, info);
- default:
- break;
- }
- return ret;
+ return nic->rx_queues;
}
-static int nicvf_set_rss_hash_opts(struct nicvf *nic,
- struct ethtool_rxnfc *info)
+static int nicvf_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *info,
+ struct netlink_ext_ack *extack)
{
- struct nicvf_rss_info *rss = &nic->rss_info;
- u64 rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
+ struct nicvf *nic = netdev_priv(dev);
+ struct nicvf_rss_info *rss;
+ u64 rss_cfg;
+
+ rss = &nic->rss_info;
+ rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
if (!rss->enable)
netdev_err(nic->netdev,
"RSS is disabled, hash cannot be set\n");
- netdev_info(nic->netdev, "Set RSS flow type = %d, data = %lld\n",
+ netdev_info(nic->netdev, "Set RSS flow type = %d, data = %u\n",
info->flow_type, info->data);
if (!(info->data & RXH_IP_SRC) || !(info->data & RXH_IP_DST))
@@ -628,19 +621,6 @@ static int nicvf_set_rss_hash_opts(struct nicvf *nic,
return 0;
}
-static int nicvf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
-{
- struct nicvf *nic = netdev_priv(dev);
-
- switch (info->cmd) {
- case ETHTOOL_SRXFH:
- return nicvf_set_rss_hash_opts(nic, info);
- default:
- break;
- }
- return -EOPNOTSUPP;
-}
-
static u32 nicvf_get_rxfh_key_size(struct net_device *netdev)
{
return RSS_HASH_KEY_SIZE * sizeof(u64);
@@ -871,12 +851,13 @@ static const struct ethtool_ops nicvf_ethtool_ops = {
.get_coalesce = nicvf_get_coalesce,
.get_ringparam = nicvf_get_ringparam,
.set_ringparam = nicvf_set_ringparam,
- .get_rxnfc = nicvf_get_rxnfc,
- .set_rxnfc = nicvf_set_rxnfc,
+ .get_rx_ring_count = nicvf_get_rx_ring_count,
.get_rxfh_key_size = nicvf_get_rxfh_key_size,
.get_rxfh_indir_size = nicvf_get_rxfh_indir_size,
.get_rxfh = nicvf_get_rxfh,
.set_rxfh = nicvf_set_rxfh,
+ .get_rxfh_fields = nicvf_get_rxfh_fields,
+ .set_rxfh_fields = nicvf_set_rxfh_fields,
.get_channels = nicvf_get_channels,
.set_channels = nicvf_set_channels,
.get_pauseparam = nicvf_get_pauseparam,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index aebb9fef3f6e..0b6e30a8feb0 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1578,7 +1578,6 @@ napi_del:
static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
{
struct nicvf *nic = netdev_priv(netdev);
- int orig_mtu = netdev->mtu;
/* For now just support only the usual MTU sized frames,
* plus some headroom for VLAN, QinQ.
@@ -1589,15 +1588,10 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
}
- WRITE_ONCE(netdev->mtu, new_mtu);
-
- if (!netif_running(netdev))
- return 0;
-
- if (nicvf_update_hw_max_frs(nic, new_mtu)) {
- netdev->mtu = orig_mtu;
+ if (netif_running(netdev) && nicvf_update_hw_max_frs(nic, new_mtu))
return -EINVAL;
- }
+
+ WRITE_ONCE(netdev->mtu, new_mtu);
return 0;
}
@@ -1905,18 +1899,18 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
}
}
-static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
+static int nicvf_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
struct nicvf *nic = netdev_priv(netdev);
- if (!nic->ptp_clock)
+ if (!nic->ptp_clock) {
+ NL_SET_ERR_MSG_MOD(extack, "HW timestamping is not supported");
return -ENODEV;
+ }
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
break;
@@ -1924,7 +1918,7 @@ static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
nic->hw_rx_tstamp = false;
break;
@@ -1943,7 +1937,7 @@ static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
nic->hw_rx_tstamp = true;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
@@ -1952,20 +1946,24 @@ static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
if (netif_running(netdev))
nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp);
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
-
return 0;
}
-static int nicvf_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+static int nicvf_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return nicvf_config_hwtstamp(netdev, req);
- default:
- return -EOPNOTSUPP;
- }
+ struct nicvf *nic = netdev_priv(netdev);
+
+ if (!nic->ptp_clock)
+ return -ENODEV;
+
+ /* TX timestamping is technically always on */
+ config->tx_type = HWTSTAMP_TX_ON;
+ config->rx_filter = nic->hw_rx_tstamp ?
+ HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
+
+ return 0;
}
static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
@@ -2087,8 +2085,9 @@ static const struct net_device_ops nicvf_netdev_ops = {
.ndo_fix_features = nicvf_fix_features,
.ndo_set_features = nicvf_set_features,
.ndo_bpf = nicvf_xdp,
- .ndo_eth_ioctl = nicvf_ioctl,
.ndo_set_rx_mode = nicvf_set_rx_mode,
+ .ndo_hwtstamp_get = nicvf_hwtstamp_get,
+ .ndo_hwtstamp_set = nicvf_hwtstamp_set,
};
static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 06397cc8bb36..5211759bfe47 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1389,11 +1389,9 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
}
- /* Check if timestamp is requested */
- if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
- skb_tx_timestamp(skb);
+ /* Check if hw timestamp is requested */
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
return;
- }
/* Tx timestamping not supported along with TSO, so ignore request */
if (skb_shinfo(skb)->gso_size)
@@ -1472,6 +1470,8 @@ static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb,
netdev_tx_sent_queue(txq, skb->len);
+ skb_tx_timestamp(skb);
+
/* make sure all memory stores are done before ringing doorbell */
smp_wmb();
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 608cc6af5af1..9efb60842ad1 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1429,9 +1429,9 @@ static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,
{
struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
struct bgx *bgx = context;
- char bgx_sel[5];
+ char bgx_sel[7];
- snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id);
+ snprintf(bgx_sel, sizeof(bgx_sel), "BGX%d", bgx->bgx_id);
if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) {
pr_warn("Invalid link device\n");
return AE_OK;
@@ -1493,13 +1493,17 @@ static int bgx_init_of_phy(struct bgx *bgx)
* this cortina phy, for which there is no driver
* support, ignore it.
*/
- if (phy_np &&
- !of_device_is_compatible(phy_np, "cortina,cs4223-slice")) {
- /* Wait until the phy drivers are available */
- pd = of_phy_find_device(phy_np);
- if (!pd)
- goto defer;
- bgx->lmac[lmac].phydev = pd;
+ if (phy_np) {
+ if (!of_device_is_compatible(phy_np, "cortina,cs4223-slice")) {
+ /* Wait until the phy drivers are available */
+ pd = of_phy_find_device(phy_np);
+ if (!pd) {
+ of_node_put(phy_np);
+ goto defer;
+ }
+ bgx->lmac[lmac].phydev = pd;
+ }
+ of_node_put(phy_np);
}
lmac++;
@@ -1515,11 +1519,11 @@ defer:
* for phy devices we may have already found.
*/
while (lmac) {
+ lmac--;
if (bgx->lmac[lmac].phydev) {
put_device(&bgx->lmac[lmac].phydev->mdio.dev);
bgx->lmac[lmac].phydev = NULL;
}
- lmac--;
}
of_node_put(node);
return -EPROBE_DEFER;
@@ -1605,10 +1609,10 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return dev_err_probe(dev, err, "Failed to enable PCI device\n");
}
- err = pci_request_regions(pdev, DRV_NAME);
+ err = pcim_request_all_regions(pdev, DRV_NAME);
if (err) {
dev_err(dev, "PCI request regions failed 0x%x\n", err);
- goto err_disable_device;
+ goto err_zero_drv_data;
}
/* MAP configuration registers */
@@ -1616,7 +1620,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!bgx->reg_base) {
dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n");
err = -ENOMEM;
- goto err_release_regions;
+ goto err_zero_drv_data;
}
set_max_bgx_per_node(pdev);
@@ -1688,10 +1692,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_enable:
bgx_vnic[bgx->bgx_id] = NULL;
pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
-err_release_regions:
- pci_release_regions(pdev);
-err_disable_device:
- pci_disable_device(pdev);
+err_zero_drv_data:
pci_set_drvdata(pdev, NULL);
return err;
}
@@ -1710,8 +1711,6 @@ static void bgx_remove(struct pci_dev *pdev)
pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
bgx_vnic[bgx->bgx_id] = NULL;
- pci_release_regions(pdev);
- pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 3b7068832f95..4a0e2d2eb60a 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -351,7 +351,7 @@ static void set_msglevel(struct net_device *dev, u32 val)
adapter->msg_enable = val;
}
-static const char stats_strings[][ETH_GSTRING_LEN] = {
+static const char stats_strings[][ETH_GSTRING_LEN] __nonstring_array = {
"TxOctetsOK",
"TxOctetsBad",
"TxUnicastFramesOK",
diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
index cbfa03d5663a..f3ada6e7cdc5 100644
--- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c
+++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
@@ -141,7 +141,7 @@ static int pm3393_interrupt_enable(struct cmac *cmac)
pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE,
0 /*SUNI1x10GEXP_BITMSK_TOP_INTE */ );
- /* TERMINATOR - PL_INTERUPTS_EXT */
+ /* TERMINATOR - PL_INTERRUPTS_EXT */
pl_intr = readl(cmac->adapter->regs + A_PL_ENABLE);
pl_intr |= F_PL_INTR_EXT;
writel(pl_intr, cmac->adapter->regs + A_PL_ENABLE);
@@ -179,7 +179,7 @@ static int pm3393_interrupt_disable(struct cmac *cmac)
elmer &= ~ELMER0_GP_BIT1;
t1_tpi_write(cmac->adapter, A_ELMER0_INT_ENABLE, elmer);
- /* TERMINATOR - PL_INTERUPTS_EXT */
+ /* TERMINATOR - PL_INTERRUPTS_EXT */
/* DO NOT DISABLE TERMINATOR's EXTERNAL INTERRUPTS. ANOTHER CHIP
* COULD WANT THEM ENABLED. We disable PM3393 at the ELMER level.
*/
@@ -222,7 +222,7 @@ static int pm3393_interrupt_clear(struct cmac *cmac)
elmer |= ELMER0_GP_BIT1;
t1_tpi_write(cmac->adapter, A_ELMER0_INT_CAUSE, elmer);
- /* TERMINATOR - PL_INTERUPTS_EXT
+ /* TERMINATOR - PL_INTERRUPTS_EXT
*/
pl_intr = readl(cmac->adapter->regs + A_PL_CAUSE);
pl_intr |= F_PL_INTR_EXT;
@@ -756,7 +756,7 @@ static int pm3393_mac_reset(adapter_t * adapter)
/* ??? If this fails, might be able to software reset the XAUI part
* and try to recover... thus saving us from doing another HW reset */
- /* Has the XAUI MABC PLL circuitry stablized? */
+ /* Has the XAUI MABC PLL circuitry stabilized? */
is_xaui_mabc_pll_locked =
(val & SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED);
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 861edff5ed89..5f354cf62cdd 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1922,7 +1922,7 @@ send:
static void sge_tx_reclaim_cb(struct timer_list *t)
{
int i;
- struct sge *sge = from_timer(sge, t, tx_reclaim_timer);
+ struct sge *sge = timer_container_of(sge, t, tx_reclaim_timer);
for (i = 0; i < SGE_CMDQ_N; ++i) {
struct cmdQ *q = &sge->cmdQ[i];
@@ -1984,9 +1984,9 @@ void t1_sge_stop(struct sge *sge)
readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
if (is_T2(sge->adapter))
- del_timer_sync(&sge->espibug_timer);
+ timer_delete_sync(&sge->espibug_timer);
- del_timer_sync(&sge->tx_reclaim_timer);
+ timer_delete_sync(&sge->tx_reclaim_timer);
if (sge->tx_sched)
tx_sched_stop(sge);
@@ -2017,7 +2017,7 @@ void t1_sge_start(struct sge *sge)
*/
static void espibug_workaround_t204(struct timer_list *t)
{
- struct sge *sge = from_timer(sge, t, espibug_timer);
+ struct sge *sge = timer_container_of(sge, t, espibug_timer);
struct adapter *adapter = sge->adapter;
unsigned int nports = adapter->params.nports;
u32 seop[MAX_NPORTS];
@@ -2060,7 +2060,7 @@ static void espibug_workaround_t204(struct timer_list *t)
static void espibug_workaround(struct timer_list *t)
{
- struct sge *sge = from_timer(sge, t, espibug_timer);
+ struct sge *sge = timer_container_of(sge, t, espibug_timer);
struct adapter *adapter = sge->adapter;
if (netif_running(adapter->port[0].dev)) {
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index f92a3550e480..3b1321c8ed14 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -2933,7 +2933,6 @@ static int t3_reenable_adapter(struct adapter *adapter)
}
pci_set_master(adapter->pdev);
pci_restore_state(adapter->pdev);
- pci_save_state(adapter->pdev);
/* Free sge resources */
t3_free_sge_resources(adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 9749d1239f58..5d5f3380ecca 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -176,43 +176,6 @@ again:
EXPORT_SYMBOL(t3_l2t_send_slow);
-void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e)
-{
-again:
- switch (e->state) {
- case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
- neigh_event_send(e->neigh, NULL);
- spin_lock_bh(&e->lock);
- if (e->state == L2T_STATE_STALE) {
- e->state = L2T_STATE_VALID;
- }
- spin_unlock_bh(&e->lock);
- return;
- case L2T_STATE_VALID: /* fast-path, send the packet on */
- return;
- case L2T_STATE_RESOLVING:
- spin_lock_bh(&e->lock);
- if (e->state != L2T_STATE_RESOLVING) {
- /* ARP already completed */
- spin_unlock_bh(&e->lock);
- goto again;
- }
- spin_unlock_bh(&e->lock);
-
- /*
- * Only the first packet added to the arpq should kick off
- * resolution. However, because the alloc_skb below can fail,
- * we allow each packet added to the arpq to retry resolution
- * as a way of recovering from transient memory exhaustion.
- * A better way would be to use a work request to retry L2T
- * entries when there's no memory.
- */
- neigh_event_send(e->neigh, NULL);
- }
-}
-
-EXPORT_SYMBOL(t3_l2t_send_event);
-
/*
* Allocate a free L2T entry. Must be called with l2t_data.lock held.
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
index 646ca0bc25bd..33558f177497 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -113,7 +113,6 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
struct net_device *dev, const void *daddr);
int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
struct l2t_entry *e);
-void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 6268f96cb4aa..b59735d0e065 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -2906,7 +2906,7 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
*/
static void sge_timer_tx(struct timer_list *t)
{
- struct sge_qset *qs = from_timer(qs, t, tx_reclaim_timer);
+ struct sge_qset *qs = timer_container_of(qs, t, tx_reclaim_timer);
struct port_info *pi = netdev_priv(qs->netdev);
struct adapter *adap = pi->adapter;
unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
@@ -2947,7 +2947,7 @@ static void sge_timer_tx(struct timer_list *t)
static void sge_timer_rx(struct timer_list *t)
{
spinlock_t *lock;
- struct sge_qset *qs = from_timer(qs, t, rx_reclaim_timer);
+ struct sge_qset *qs = timer_container_of(qs, t, rx_reclaim_timer);
struct port_info *pi = netdev_priv(qs->netdev);
struct adapter *adap = pi->adapter;
u32 status;
@@ -3223,9 +3223,9 @@ void t3_stop_sge_timers(struct adapter *adap)
struct sge_qset *q = &adap->sge.qs[i];
if (q->tx_reclaim_timer.function)
- del_timer_sync(&q->tx_reclaim_timer);
+ timer_delete_sync(&q->tx_reclaim_timer);
if (q->rx_reclaim_timer.function)
- del_timer_sync(&q->rx_reclaim_timer);
+ timer_delete_sync(&q->rx_reclaim_timer);
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 75bd69ff61a8..f20f4bc58492 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -674,7 +674,7 @@ struct port_info {
struct cxgb_fcoe fcoe;
#endif /* CONFIG_CHELSIO_T4_FCOE */
bool rxtstamp; /* Enable TS */
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
bool ptp_enable;
struct sched_table *sched_tbl;
u32 eth_flags;
@@ -1211,9 +1211,6 @@ struct adapter {
struct timer_list flower_stats_timer;
struct work_struct flower_stats_work;
- /* Ethtool Dump */
- struct ethtool_dump eth_dump;
-
/* HMA */
struct hma_data hma;
@@ -1233,6 +1230,10 @@ struct adapter {
/* Ethtool n-tuple */
struct cxgb4_ethtool_filter *ethtool_filters;
+
+ /* Ethtool Dump */
+ /* Must be last - ends in a flex-array member. */
+ struct ethtool_dump eth_dump;
};
/* Support for "sched-class" command to allow a TX Scheduling Class to be
@@ -1315,7 +1316,7 @@ struct ch_sched_flowc {
* (value, mask) tuples. The associated ingress packet field matches the
* tuple when ((field & mask) == value). (Thus a wildcard "don't care" field
* rule can be constructed by specifying a tuple of (0, 0).) A filter rule
- * matches an ingress packet when all of the individual individual field
+ * matches an ingress packet when all of the individual field
* matching rules are true.
*
* Partial field masks are always valid, however, while it may be easy to
@@ -2076,7 +2077,7 @@ void t4_idma_monitor(struct adapter *adapter,
struct sge_idma_monitor_state *idma,
int hz, int ticks);
int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
- unsigned int naddr, u8 *addr);
+ u8 start, unsigned int naddr, u8 *addr);
void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
u32 start_index, bool sleep_ok);
void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 45d28a65347e..23326235d4ab 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -662,8 +662,8 @@ static unsigned int lmm_to_fw_caps(const unsigned long *link_mode_mask)
static int get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *link_ksettings)
{
- struct ethtool_link_settings_hdr *base = &link_ksettings->base;
struct port_info *pi = netdev_priv(dev);
+ struct ethtool_link_settings *base = &link_ksettings->base;
/* For the nonce, the Firmware doesn't send up Port State changes
* when the Virtual Interface attached to the Port is down. So
@@ -717,9 +717,9 @@ static int get_link_ksettings(struct net_device *dev,
static int set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *link_ksettings)
{
- const struct ethtool_link_settings_hdr *base = &link_ksettings->base;
struct port_info *pi = netdev_priv(dev);
struct link_config *lc = &pi->link_cfg;
+ const struct ethtool_link_settings *base = &link_ksettings->base;
struct link_config old_lc;
unsigned int fw_caps;
int ret = 0;
@@ -1730,6 +1730,60 @@ static int cxgb4_ntuple_get_filter(struct net_device *dev,
return 0;
}
+static int cxgb4_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ unsigned int v = pi->rss_mode;
+
+ info->data = 0;
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case UDP_V4_FLOW:
+ if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
+ (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case IPV4_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case UDP_V6_FLOW:
+ if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
+ (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV6_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ }
+ return 0;
+}
+
static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rules)
{
@@ -1739,56 +1793,6 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
int ret = 0;
switch (info->cmd) {
- case ETHTOOL_GRXFH: {
- unsigned int v = pi->rss_mode;
-
- info->data = 0;
- switch (info->flow_type) {
- case TCP_V4_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case UDP_V4_FLOW:
- if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
- (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
- info->data = RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case SCTP_V4_FLOW:
- case AH_ESP_V4_FLOW:
- case IPV4_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case TCP_V6_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case UDP_V6_FLOW:
- if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
- (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
- info->data = RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case SCTP_V6_FLOW:
- case AH_ESP_V6_FLOW:
- case IPV6_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- }
- return 0;
- }
case ETHTOOL_GRXRINGS:
info->data = pi->nqsets;
return 0;
@@ -2199,6 +2203,7 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.get_rxfh_indir_size = get_rss_table_size,
.get_rxfh = get_rss_table,
.set_rxfh = set_rss_table,
+ .get_rxfh_fields = cxgb4_get_rxfh_fields,
.self_test = cxgb4_self_test,
.flash_device = set_flash,
.get_ts_info = get_ts_info,
@@ -2270,6 +2275,7 @@ int cxgb4_init_ethtool_filters(struct adapter *adap)
eth_filter->port[i].bmap = bitmap_zalloc(nentries, GFP_KERNEL);
if (!eth_filter->port[i].bmap) {
ret = -ENOMEM;
+ kvfree(eth_filter->port[i].loc_array);
goto free_eth_finfo;
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 97a261d5357e..043733c5c812 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -1799,7 +1799,10 @@ void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
struct adapter *adap = container_of(t, struct adapter, tids);
struct sk_buff *skb;
- WARN_ON(tid_out_of_range(&adap->tids, tid));
+ if (tid_out_of_range(&adap->tids, tid)) {
+ dev_err(adap->pdev_dev, "tid %d out of range\n", tid);
+ return;
+ }
if (t->tid_tab[tid - adap->tids.tid_base]) {
t->tid_tab[tid - adap->tids.tid_base] = NULL;
@@ -3039,12 +3042,87 @@ static void cxgb_get_stats(struct net_device *dev,
ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
}
+static int cxgb_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct port_info *pi = netdev_priv(dev);
+
+ *config = pi->tstamp_config;
+ return 0;
+}
+
+static int cxgb_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ if (is_t4(adapter->params.chip)) {
+ /* For T4 Adapters */
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ pi->rxtstamp = false;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ pi->rxtstamp = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+ pi->tstamp_config = *config;
+ return 0;
+ }
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ pi->rxtstamp = false;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ cxgb4_ptprx_timestamping(pi, pi->port_id, PTP_TS_L4);
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ cxgb4_ptprx_timestamping(pi, pi->port_id, PTP_TS_L2_L4);
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ pi->rxtstamp = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (config->tx_type == HWTSTAMP_TX_OFF &&
+ config->rx_filter == HWTSTAMP_FILTER_NONE) {
+ if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0)
+ pi->ptp_enable = false;
+ }
+
+ if (config->rx_filter != HWTSTAMP_FILTER_NONE) {
+ if (cxgb4_ptp_redirect_rx_packet(adapter, pi) >= 0)
+ pi->ptp_enable = true;
+ }
+ pi->tstamp_config = *config;
+ return 0;
+}
+
static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
unsigned int mbox;
int ret = 0, prtad, devad;
struct port_info *pi = netdev_priv(dev);
- struct adapter *adapter = pi->adapter;
struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
switch (cmd) {
@@ -3073,81 +3151,6 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
data->reg_num, data->val_in);
break;
- case SIOCGHWTSTAMP:
- return copy_to_user(req->ifr_data, &pi->tstamp_config,
- sizeof(pi->tstamp_config)) ?
- -EFAULT : 0;
- case SIOCSHWTSTAMP:
- if (copy_from_user(&pi->tstamp_config, req->ifr_data,
- sizeof(pi->tstamp_config)))
- return -EFAULT;
-
- if (!is_t4(adapter->params.chip)) {
- switch (pi->tstamp_config.tx_type) {
- case HWTSTAMP_TX_OFF:
- case HWTSTAMP_TX_ON:
- break;
- default:
- return -ERANGE;
- }
-
- switch (pi->tstamp_config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- pi->rxtstamp = false;
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- cxgb4_ptprx_timestamping(pi, pi->port_id,
- PTP_TS_L4);
- break;
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- cxgb4_ptprx_timestamping(pi, pi->port_id,
- PTP_TS_L2_L4);
- break;
- case HWTSTAMP_FILTER_ALL:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- pi->rxtstamp = true;
- break;
- default:
- pi->tstamp_config.rx_filter =
- HWTSTAMP_FILTER_NONE;
- return -ERANGE;
- }
-
- if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) &&
- (pi->tstamp_config.rx_filter ==
- HWTSTAMP_FILTER_NONE)) {
- if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0)
- pi->ptp_enable = false;
- }
-
- if (pi->tstamp_config.rx_filter !=
- HWTSTAMP_FILTER_NONE) {
- if (cxgb4_ptp_redirect_rx_packet(adapter,
- pi) >= 0)
- pi->ptp_enable = true;
- }
- } else {
- /* For T4 Adapters */
- switch (pi->tstamp_config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- pi->rxtstamp = false;
- break;
- case HWTSTAMP_FILTER_ALL:
- pi->rxtstamp = true;
- break;
- default:
- pi->tstamp_config.rx_filter =
- HWTSTAMP_FILTER_NONE;
- return -ERANGE;
- }
- }
- return copy_to_user(req->ifr_data, &pi->tstamp_config,
- sizeof(pi->tstamp_config)) ?
- -EFAULT : 0;
default:
return -EOPNOTSUPP;
}
@@ -3234,7 +3237,7 @@ static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
dev_info(pi->adapter->pdev_dev,
"Setting MAC %pM on VF %d\n", mac, vf);
- ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac);
+ ret = t4_set_vf_mac_acl(adap, vf + 1, pi->lport, 1, mac);
if (!ret)
ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac);
return ret;
@@ -3294,7 +3297,7 @@ static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
}
if (max_tx_rate == 0) {
- /* unbind VF to to any Traffic Class */
+ /* unbind VF to any Traffic Class */
fw_pfvf =
(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
@@ -3482,7 +3485,7 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
struct adapter *adap = pi->adapter;
struct ch_sched_queue qe = { 0 };
struct ch_sched_params p = { 0 };
- struct sched_class *e;
+ struct ch_sched_class *e;
u32 req_rate;
int err = 0;
@@ -3872,6 +3875,8 @@ static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_setup_tc = cxgb_setup_tc,
.ndo_features_check = cxgb_features_check,
.ndo_fix_features = cxgb_fix_features,
+ .ndo_hwtstamp_get = cxgb_hwtstamp_get,
+ .ndo_hwtstamp_set = cxgb_hwtstamp_set,
};
#ifdef CONFIG_PCI_IOV
@@ -4813,7 +4818,7 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
goto bye;
}
- /* Get FW from from /lib/firmware/ */
+ /* Get FW from /lib/firmware/ */
ret = request_firmware(&fw, fw_info->fw_mod_name,
adap->pdev_dev);
if (ret < 0) {
@@ -5453,7 +5458,6 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
if (!adap) {
pci_restore_state(pdev);
- pci_save_state(pdev);
return PCI_ERS_RESULT_RECOVERED;
}
@@ -5468,7 +5472,6 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
if (t4_wait_dev_ready(adap->regs) < 0)
return PCI_ERS_RESULT_DISCONNECT;
@@ -6477,10 +6480,11 @@ static const struct tlsdev_ops cxgb4_ktls_ops = {
#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
-static int cxgb4_xfrm_add_state(struct xfrm_state *x,
+static int cxgb4_xfrm_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack)
{
- struct adapter *adap = netdev2adap(x->xso.dev);
+ struct adapter *adap = netdev2adap(dev);
int ret;
if (!mutex_trylock(&uld_mutex)) {
@@ -6491,7 +6495,8 @@ static int cxgb4_xfrm_add_state(struct xfrm_state *x,
if (ret)
goto out_unlock;
- ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x, extack);
+ ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(dev, x,
+ extack);
out_unlock:
mutex_unlock(&uld_mutex);
@@ -6499,9 +6504,9 @@ out_unlock:
return ret;
}
-static void cxgb4_xfrm_del_state(struct xfrm_state *x)
+static void cxgb4_xfrm_del_state(struct net_device *dev, struct xfrm_state *x)
{
- struct adapter *adap = netdev2adap(x->xso.dev);
+ struct adapter *adap = netdev2adap(dev);
if (!mutex_trylock(&uld_mutex)) {
dev_dbg(adap->pdev_dev,
@@ -6511,15 +6516,15 @@ static void cxgb4_xfrm_del_state(struct xfrm_state *x)
if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
goto out_unlock;
- adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(x);
+ adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(dev, x);
out_unlock:
mutex_unlock(&uld_mutex);
}
-static void cxgb4_xfrm_free_state(struct xfrm_state *x)
+static void cxgb4_xfrm_free_state(struct net_device *dev, struct xfrm_state *x)
{
- struct adapter *adap = netdev2adap(x->xso.dev);
+ struct adapter *adap = netdev2adap(dev);
if (!mutex_trylock(&uld_mutex)) {
dev_dbg(adap->pdev_dev,
@@ -6529,36 +6534,19 @@ static void cxgb4_xfrm_free_state(struct xfrm_state *x)
if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
goto out_unlock;
- adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(x);
-
-out_unlock:
- mutex_unlock(&uld_mutex);
-}
-
-static bool cxgb4_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- struct adapter *adap = netdev2adap(x->xso.dev);
- bool ret = false;
-
- if (!mutex_trylock(&uld_mutex)) {
- dev_dbg(adap->pdev_dev,
- "crypto uld critical resource is under use\n");
- return ret;
- }
- if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
- goto out_unlock;
-
- ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_offload_ok(skb, x);
+ adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(dev, x);
out_unlock:
mutex_unlock(&uld_mutex);
- return ret;
}
static void cxgb4_advance_esn_state(struct xfrm_state *x)
{
struct adapter *adap = netdev2adap(x->xso.dev);
+ if (x->xso.dir != XFRM_DEV_OFFLOAD_IN)
+ return;
+
if (!mutex_trylock(&uld_mutex)) {
dev_dbg(adap->pdev_dev,
"crypto uld critical resource is under use\n");
@@ -6577,7 +6565,6 @@ static const struct xfrmdev_ops cxgb4_xfrmdev_ops = {
.xdo_dev_state_add = cxgb4_xfrm_add_state,
.xdo_dev_state_delete = cxgb4_xfrm_del_state,
.xdo_dev_state_free = cxgb4_xfrm_free_state,
- .xdo_dev_offload_ok = cxgb4_ipsec_offload_ok,
.xdo_dev_state_advance_esn = cxgb4_advance_esn_state,
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 69d045d769c4..e2b5554531b5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -161,20 +161,9 @@ static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap,
static void cxgb4_process_flow_match(struct net_device *dev,
struct flow_rule *rule,
+ u16 addr_type,
struct ch_filter_specification *fs)
{
- u16 addr_type = 0;
-
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
- struct flow_match_control match;
-
- flow_rule_match_control(rule, &match);
- addr_type = match.key->addr_type;
- } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
- addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
- } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
- addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
- }
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -327,9 +316,6 @@ static int cxgb4_validate_flow_match(struct netlink_ext_ack *extack,
return -EOPNOTSUPP;
}
- if (flow_rule_match_has_control_flags(rule, extack))
- return -EOPNOTSUPP;
-
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -858,6 +844,7 @@ int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule,
{
struct adapter *adap = netdev2adap(dev);
struct filter_ctx ctx;
+ u16 addr_type = 0;
u8 inet_family;
int fidx, ret;
@@ -867,7 +854,28 @@ int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule,
if (cxgb4_validate_flow_match(extack, rule))
return -EOPNOTSUPP;
- cxgb4_process_flow_match(dev, rule, fs);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
+
+ if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ fs->val.frag = match.key->flags & FLOW_DIS_IS_FRAGMENT;
+ fs->mask.frag = true;
+ }
+
+ if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT,
+ match.mask->flags, extack))
+ return -EOPNOTSUPP;
+
+ } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ }
+
+ cxgb4_process_flow_match(dev, rule, addr_type, fs);
cxgb4_process_flow_actions(dev, &rule->action, fs);
fs->hash = is_filter_exact_match(adap, fs);
@@ -1051,7 +1059,7 @@ static void ch_flower_stats_handler(struct work_struct *work)
static void ch_flower_stats_cb(struct timer_list *t)
{
- struct adapter *adap = from_timer(adap, t, flower_stats_timer);
+ struct adapter *adap = timer_container_of(adap, t, flower_stats_timer);
schedule_work(&adap->flower_stats_work);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
index 1672d3afe5be..f8dcf0b4abcd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
@@ -56,7 +56,7 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev,
struct port_info *pi = netdev2pinfo(dev);
struct flow_action_entry *entry;
struct ch_sched_queue qe;
- struct sched_class *e;
+ struct ch_sched_class *e;
u64 max_link_rate;
u32 i, speed;
int ret;
@@ -180,7 +180,7 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev,
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
struct flow_action_entry *entry;
- struct sched_class *e;
+ struct ch_sched_class *e;
int ret;
u32 i;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
index 338b04f339b3..a2dcd2e24263 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
@@ -330,7 +330,7 @@ static int cxgb4_mqprio_alloc_tc(struct net_device *dev,
struct cxgb4_tc_port_mqprio *tc_port_mqprio;
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
- struct sched_class *e;
+ struct ch_sched_class *e;
int ret;
u8 i;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index a5d2f84dcdd5..8524246fd67e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -186,7 +186,7 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
/* Ensure that uhtid is either root u32 (i.e. 0x800)
- * or a a valid linked bucket.
+ * or a valid linked bucket.
*/
if (uhtid != 0x800 && uhtid >= t->size)
return -EINVAL;
@@ -422,7 +422,7 @@ int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
uhtid = TC_U32_USERHTID(cls->knode.handle);
/* Ensure that uhtid is either root u32 (i.e. 0x800)
- * or a a valid linked bucket.
+ * or a valid linked bucket.
*/
if (uhtid != 0x800 && uhtid >= t->size)
return -EINVAL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
index b08356060fb4..7bab8da8f6e6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
@@ -29,7 +29,7 @@ static int cxgb4_thermal_get_temp(struct thermal_zone_device *tzdev,
return 0;
}
-static struct thermal_zone_device_ops cxgb4_thermal_ops = {
+static const struct thermal_zone_device_ops cxgb4_thermal_ops = {
.get_temp = cxgb4_thermal_get_temp,
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index a1b14468d1ff..38a30aeee122 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -44,7 +44,7 @@ static int t4_sched_class_fw_cmd(struct port_info *pi,
{
struct adapter *adap = pi->adapter;
struct sched_table *s = pi->sched_tbl;
- struct sched_class *e;
+ struct ch_sched_class *e;
int err = 0;
e = &s->tab[p->u.params.class];
@@ -122,7 +122,7 @@ static void *t4_sched_entry_lookup(struct port_info *pi,
const u32 val)
{
struct sched_table *s = pi->sched_tbl;
- struct sched_class *e, *end;
+ struct ch_sched_class *e, *end;
void *found = NULL;
/* Look for an entry with matching @val */
@@ -166,8 +166,8 @@ static void *t4_sched_entry_lookup(struct port_info *pi,
return found;
}
-struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
- struct ch_sched_queue *p)
+struct ch_sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
+ struct ch_sched_queue *p)
{
struct port_info *pi = netdev2pinfo(dev);
struct sched_queue_entry *qe = NULL;
@@ -187,7 +187,7 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
struct sched_queue_entry *qe = NULL;
struct adapter *adap = pi->adapter;
struct sge_eth_txq *txq;
- struct sched_class *e;
+ struct ch_sched_class *e;
int err = 0;
if (p->queue < 0 || p->queue >= pi->nqsets)
@@ -218,7 +218,7 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
struct sched_queue_entry *qe = NULL;
struct adapter *adap = pi->adapter;
struct sge_eth_txq *txq;
- struct sched_class *e;
+ struct ch_sched_class *e;
unsigned int qid;
int err = 0;
@@ -260,7 +260,7 @@ static int t4_sched_flowc_unbind(struct port_info *pi, struct ch_sched_flowc *p)
{
struct sched_flowc_entry *fe = NULL;
struct adapter *adap = pi->adapter;
- struct sched_class *e;
+ struct ch_sched_class *e;
int err = 0;
if (p->tid < 0 || p->tid >= adap->tids.neotids)
@@ -288,7 +288,7 @@ static int t4_sched_flowc_bind(struct port_info *pi, struct ch_sched_flowc *p)
struct sched_table *s = pi->sched_tbl;
struct sched_flowc_entry *fe = NULL;
struct adapter *adap = pi->adapter;
- struct sched_class *e;
+ struct ch_sched_class *e;
int err = 0;
if (p->tid < 0 || p->tid >= adap->tids.neotids)
@@ -322,7 +322,7 @@ out_err:
}
static void t4_sched_class_unbind_all(struct port_info *pi,
- struct sched_class *e,
+ struct ch_sched_class *e,
enum sched_bind_type type)
{
if (!e)
@@ -476,12 +476,12 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
}
/* If @p is NULL, fetch any available unused class */
-static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
- const struct ch_sched_params *p)
+static struct ch_sched_class *t4_sched_class_lookup(struct port_info *pi,
+ const struct ch_sched_params *p)
{
struct sched_table *s = pi->sched_tbl;
- struct sched_class *found = NULL;
- struct sched_class *e, *end;
+ struct ch_sched_class *found = NULL;
+ struct ch_sched_class *e, *end;
if (!p) {
/* Get any available unused class */
@@ -522,10 +522,10 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
return found;
}
-static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
- struct ch_sched_params *p)
+static struct ch_sched_class *t4_sched_class_alloc(struct port_info *pi,
+ struct ch_sched_params *p)
{
- struct sched_class *e = NULL;
+ struct ch_sched_class *e = NULL;
u8 class_id;
int err;
@@ -579,8 +579,8 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
* scheduling class with matching @p is found, then the matching class is
* returned.
*/
-struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
- struct ch_sched_params *p)
+struct ch_sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
+ struct ch_sched_params *p)
{
struct port_info *pi = netdev2pinfo(dev);
u8 class_id;
@@ -607,7 +607,7 @@ void cxgb4_sched_class_free(struct net_device *dev, u8 classid)
struct port_info *pi = netdev2pinfo(dev);
struct sched_table *s = pi->sched_tbl;
struct ch_sched_params p;
- struct sched_class *e;
+ struct ch_sched_class *e;
u32 speed;
int ret;
@@ -640,7 +640,7 @@ void cxgb4_sched_class_free(struct net_device *dev, u8 classid)
}
}
-static void t4_sched_class_free(struct net_device *dev, struct sched_class *e)
+static void t4_sched_class_free(struct net_device *dev, struct ch_sched_class *e)
{
struct port_info *pi = netdev2pinfo(dev);
@@ -660,7 +660,7 @@ struct sched_table *t4_init_sched(unsigned int sched_size)
s->sched_size = sched_size;
for (i = 0; i < s->sched_size; i++) {
- memset(&s->tab[i], 0, sizeof(struct sched_class));
+ memset(&s->tab[i], 0, sizeof(struct ch_sched_class));
s->tab[i].idx = i;
s->tab[i].state = SCHED_STATE_UNUSED;
INIT_LIST_HEAD(&s->tab[i].entry_list);
@@ -682,7 +682,7 @@ void t4_cleanup_sched(struct adapter *adap)
continue;
for (i = 0; i < s->sched_size; i++) {
- struct sched_class *e;
+ struct ch_sched_class *e;
e = &s->tab[i];
if (e->state == SCHED_STATE_ACTIVE)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h
index 6b3c778815f0..4d3b5a757536 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h
@@ -71,7 +71,7 @@ struct sched_flowc_entry {
struct ch_sched_flowc param;
};
-struct sched_class {
+struct ch_sched_class {
u8 state;
u8 idx;
struct ch_sched_params info;
@@ -82,7 +82,7 @@ struct sched_class {
struct sched_table { /* per port scheduling table */
u8 sched_size;
- struct sched_class tab[] __counted_by(sched_size);
+ struct ch_sched_class tab[] __counted_by(sched_size);
};
static inline bool can_sched(struct net_device *dev)
@@ -103,15 +103,15 @@ static inline bool valid_class_id(struct net_device *dev, u8 class_id)
return true;
}
-struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
- struct ch_sched_queue *p);
+struct ch_sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
+ struct ch_sched_queue *p);
int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
enum sched_bind_type type);
int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
enum sched_bind_type type);
-struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
- struct ch_sched_params *p);
+struct ch_sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
+ struct ch_sched_params *p);
void cxgb4_sched_class_free(struct net_device *dev, u8 classid);
struct sched_table *t4_init_sched(unsigned int size);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index a7d76a8ed050..9fccb8ea9bcd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -163,7 +163,7 @@ static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
* for DMA, but this is of course never sent to the hardware and is only used
* to prevent double unmappings. All of the above requires that the Free List
* Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
- * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
+ * 32-byte or a power of 2 greater in alignment. Since the SGE's minimal
* Free List Buffer alignment is 32 bytes, this works out for us ...
*/
enum {
@@ -1533,7 +1533,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
q = &adap->sge.ethtxq[qidx + pi->first_qset];
}
- skb_tx_timestamp(skb);
reclaim_completed_tx(adap, &q->q, -1, true);
cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
@@ -1706,6 +1705,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
cpl->len = htons(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
+ skb_tx_timestamp(skb);
+
if (immediate) {
cxgb4_inline_tx_skb(skb, &q->q, sgl);
dev_consume_skb_any(skb);
@@ -2268,7 +2269,6 @@ static int ethofld_hard_xmit(struct net_device *dev,
d = &eosw_txq->desc[eosw_txq->last_pidx];
skb = d->skb;
- skb_tx_timestamp(skb);
wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx];
if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE &&
@@ -2373,6 +2373,7 @@ write_wr_headers:
eohw_txq->vlan_ins++;
txq_advance(&eohw_txq->q, ndesc);
+ skb_tx_timestamp(skb);
cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc);
eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc);
@@ -4233,7 +4234,7 @@ static void sge_rx_timer_cb(struct timer_list *t)
{
unsigned long m;
unsigned int i;
- struct adapter *adap = from_timer(adap, t, sge.rx_timer);
+ struct adapter *adap = timer_container_of(adap, t, sge.rx_timer);
struct sge *s = &adap->sge;
for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
@@ -4268,7 +4269,7 @@ done:
static void sge_tx_timer_cb(struct timer_list *t)
{
- struct adapter *adap = from_timer(adap, t, sge.tx_timer);
+ struct adapter *adap = timer_container_of(adap, t, sge.tx_timer);
struct sge *s = &adap->sge;
unsigned long m, period;
unsigned int i, budget;
@@ -4996,9 +4997,9 @@ void t4_sge_stop(struct adapter *adap)
struct sge *s = &adap->sge;
if (s->rx_timer.function)
- del_timer_sync(&s->rx_timer);
+ timer_delete_sync(&s->rx_timer);
if (s->tx_timer.function)
- del_timer_sync(&s->tx_timer);
+ timer_delete_sync(&s->tx_timer);
if (is_offload(adap)) {
struct sge_uld_txq_info *txq_info;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 76de55306c4d..171750fad44f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -9348,7 +9348,7 @@ int t4_init_devlog_params(struct adapter *adap)
return 0;
}
- /* Otherwise, ask the firmware for it's Device Log Parameters.
+ /* Otherwise, ask the firmware for its Device Log Parameters.
*/
memset(&devlog_cmd, 0, sizeof(devlog_cmd));
devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
@@ -10215,11 +10215,12 @@ out:
* t4_set_vf_mac_acl - Set MAC address for the specified VF
* @adapter: The adapter
* @vf: one of the VFs instantiated by the specified PF
+ * @start: The start port id associated with specified VF
* @naddr: the number of MAC addresses
* @addr: the MAC address(es) to be set to the specified VF
*/
int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
- unsigned int naddr, u8 *addr)
+ u8 start, unsigned int naddr, u8 *addr)
{
struct fw_acl_mac_cmd cmd;
@@ -10234,7 +10235,7 @@ int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
cmd.nmac = naddr;
- switch (adapter->pf) {
+ switch (start) {
case 3:
memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
break;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 61d08547e3f9..2fbe0f059a0b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1436,8 +1436,8 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
static int cxgb4vf_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *link_ksettings)
{
- struct ethtool_link_settings_hdr *base = &link_ksettings->base;
struct port_info *pi = netdev_priv(dev);
+ struct ethtool_link_settings *base = &link_ksettings->base;
/* For the nonce, the Firmware doesn't send up Port State changes
* when the Virtual Interface attached to the Port is down. So
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 5b1d746e6563..31fab2415743 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2062,7 +2062,7 @@ irq_handler_t t4vf_intr_handler(struct adapter *adapter)
*/
static void sge_rx_timer_cb(struct timer_list *t)
{
- struct adapter *adapter = from_timer(adapter, t, sge.rx_timer);
+ struct adapter *adapter = timer_container_of(adapter, t, sge.rx_timer);
struct sge *s = &adapter->sge;
unsigned int i;
@@ -2121,7 +2121,7 @@ static void sge_rx_timer_cb(struct timer_list *t)
*/
static void sge_tx_timer_cb(struct timer_list *t)
{
- struct adapter *adapter = from_timer(adapter, t, sge.tx_timer);
+ struct adapter *adapter = timer_container_of(adapter, t, sge.tx_timer);
struct sge *s = &adapter->sge;
unsigned int i, budget;
@@ -2191,7 +2191,7 @@ static void __iomem *bar2_address(struct adapter *adapter,
/**
* t4vf_sge_alloc_rxq - allocate an SGE RX Queue
* @adapter: the adapter
- * @rspq: pointer to to the new rxq's Response Queue to be filled in
+ * @rspq: pointer to the new rxq's Response Queue to be filled in
* @iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue
* @dev: the network device associated with the new rspq
* @intr_dest: MSI-X vector index (overriden in MSI mode)
@@ -2609,9 +2609,9 @@ void t4vf_sge_stop(struct adapter *adapter)
struct sge *s = &adapter->sge;
if (s->rx_timer.function)
- del_timer_sync(&s->rx_timer);
+ timer_delete_sync(&s->rx_timer);
if (s->tx_timer.function)
- del_timer_sync(&s->tx_timer);
+ timer_delete_sync(&s->tx_timer);
}
/**
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 1c52592d3b65..56fcc531af2e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -706,7 +706,7 @@ int t4vf_fl_pkt_align(struct adapter *adapter)
* separately. The actual Ingress Packet Data alignment boundary
* within Packed Buffer Mode is the maximum of these two
* specifications. (Note that it makes no real practical sense to
- * have the Pading Boudary be larger than the Packing Boundary but you
+ * have the Padding Boundary be larger than the Packing Boundary but you
* could set the chip up that way and, in fact, legacy T4 code would
* end doing this because it would initialize the Padding Boundary and
* leave the Packing Boundary initialized to 0 (16 bytes).)
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
index c7338ac6a5bb..49b57bb5fac1 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
@@ -71,21 +71,22 @@
static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex);
-static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state);
static int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop);
static void ch_ipsec_advance_esn_state(struct xfrm_state *x);
-static void ch_ipsec_xfrm_free_state(struct xfrm_state *x);
-static void ch_ipsec_xfrm_del_state(struct xfrm_state *x);
-static int ch_ipsec_xfrm_add_state(struct xfrm_state *x,
+static void ch_ipsec_xfrm_free_state(struct net_device *dev,
+ struct xfrm_state *x);
+static void ch_ipsec_xfrm_del_state(struct net_device *dev,
+ struct xfrm_state *x);
+static int ch_ipsec_xfrm_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack);
static const struct xfrmdev_ops ch_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = ch_ipsec_xfrm_add_state,
.xdo_dev_state_delete = ch_ipsec_xfrm_del_state,
.xdo_dev_state_free = ch_ipsec_xfrm_free_state,
- .xdo_dev_offload_ok = ch_ipsec_offload_ok,
.xdo_dev_state_advance_esn = ch_ipsec_advance_esn_state,
};
@@ -225,7 +226,8 @@ out:
* returns 0 on success, negative error if failed to send message to FPGA
* positive error if FPGA returned a bad response
*/
-static int ch_ipsec_xfrm_add_state(struct xfrm_state *x,
+static int ch_ipsec_xfrm_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack)
{
struct ipsec_sa_entry *sa_entry;
@@ -288,9 +290,15 @@ static int ch_ipsec_xfrm_add_state(struct xfrm_state *x,
return -EINVAL;
}
+ if (unlikely(!try_module_get(THIS_MODULE))) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire module reference");
+ return -ENODEV;
+ }
+
sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
if (!sa_entry) {
res = -ENOMEM;
+ module_put(THIS_MODULE);
goto out;
}
@@ -299,19 +307,20 @@ static int ch_ipsec_xfrm_add_state(struct xfrm_state *x,
sa_entry->esn = 1;
ch_ipsec_setkey(x, sa_entry);
x->xso.offload_handle = (unsigned long)sa_entry;
- try_module_get(THIS_MODULE);
out:
return res;
}
-static void ch_ipsec_xfrm_del_state(struct xfrm_state *x)
+static void ch_ipsec_xfrm_del_state(struct net_device *dev,
+ struct xfrm_state *x)
{
/* do nothing */
if (!x->xso.offload_handle)
return;
}
-static void ch_ipsec_xfrm_free_state(struct xfrm_state *x)
+static void ch_ipsec_xfrm_free_state(struct net_device *dev,
+ struct xfrm_state *x)
{
struct ipsec_sa_entry *sa_entry;
@@ -323,20 +332,6 @@ static void ch_ipsec_xfrm_free_state(struct xfrm_state *x)
module_put(THIS_MODULE);
}
-static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- if (x->props.family == AF_INET) {
- /* Offload with IP options is not supported yet */
- if (ip_hdr(skb)->ihl > 5)
- return false;
- } else {
- /* Offload with IPv6 extension headers is not support yet */
- if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
- return false;
- }
- return true;
-}
-
static void ch_ipsec_advance_esn_state(struct xfrm_state *x)
{
/* do nothing */
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index e8e460a92e0e..4e2096e49684 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -1640,6 +1640,7 @@ static int chcr_ktls_tunnel_pkt(struct chcr_ktls_info *tx_info,
cxgb4_write_sgl(skb, &q->q, pos, end, 0, sgl_sdesc->addr);
sgl_sdesc->skb = skb;
chcr_txq_advance(&q->q, ndesc);
+ skb_tx_timestamp(skb);
cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
return 0;
}
@@ -1903,7 +1904,6 @@ static int chcr_ktls_sw_fallback(struct sk_buff *skb,
th = tcp_hdr(nskb);
skb_offset = skb_tcp_all_headers(nskb);
data_len = nskb->len - skb_offset;
- skb_tx_timestamp(nskb);
if (chcr_ktls_tunnel_pkt(tx_info, nskb, q))
goto out;
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index 6f6525983130..ee0154337a9c 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -171,7 +171,7 @@ static void chtls_purge_receive_queue(struct sock *sk)
struct sk_buff *skb;
while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
- skb_dst_set(skb, (void *)NULL);
+ skb_dstref_steal(skb);
kfree_skb(skb);
}
}
@@ -194,7 +194,7 @@ static void chtls_purge_recv_queue(struct sock *sk)
struct sk_buff *skb;
while ((skb = __skb_dequeue(&tlsk->sk_recv_queue)) != NULL) {
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
kfree_skb(skb);
}
}
@@ -505,7 +505,7 @@ static void reset_listen_child(struct sock *child)
chtls_send_reset(child, CPL_ABORT_SEND_RST, skb);
sock_orphan(child);
- INC_ORPHAN_COUNT(child);
+ tcp_orphan_count_inc();
if (child->sk_state == TCP_CLOSE)
inet_csk_destroy_sock(child);
}
@@ -870,7 +870,7 @@ static void do_abort_syn_rcv(struct sock *child, struct sock *parent)
* created only after 3 way handshake is done.
*/
sock_orphan(child);
- INC_ORPHAN_COUNT(child);
+ tcp_orphan_count_inc();
chtls_release_resources(child);
chtls_conn_done(child);
} else {
@@ -951,6 +951,7 @@ static unsigned int chtls_select_mss(const struct chtls_sock *csk,
struct tcp_sock *tp;
unsigned int mss;
struct sock *sk;
+ u16 user_mss;
mss = ntohs(req->tcpopt.mss);
sk = csk->sk;
@@ -969,8 +970,9 @@ static unsigned int chtls_select_mss(const struct chtls_sock *csk,
tcpoptsz += round_up(TCPOLEN_TIMESTAMP, 4);
tp->advmss = dst_metric_advmss(dst);
- if (USER_MSS(tp) && tp->advmss > USER_MSS(tp))
- tp->advmss = USER_MSS(tp);
+ user_mss = USER_MSS(tp);
+ if (user_mss && tp->advmss > user_mss)
+ tp->advmss = user_mss;
if (tp->advmss > pmtu - iphdrsz)
tp->advmss = pmtu - iphdrsz;
if (mss && tp->advmss > mss)
@@ -1197,12 +1199,12 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
struct ipv6_pinfo *newnp = inet6_sk(newsk);
struct ipv6_pinfo *np = inet6_sk(lsk);
- inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
+ newinet->pinet6 = &newtcp6sk->inet6;
+ newinet->ipv6_fl_list = NULL;
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
newsk->sk_v6_daddr = treq->ir_v6_rmt_addr;
newsk->sk_v6_rcv_saddr = treq->ir_v6_loc_addr;
inet6_sk(newsk)->saddr = treq->ir_v6_loc_addr;
- newnp->ipv6_fl_list = NULL;
newnp->pktoptions = NULL;
newsk->sk_bound_dev_if = treq->ir_iif;
newinet->inet_opt = NULL;
@@ -1734,7 +1736,7 @@ static int chtls_rx_data(struct chtls_dev *cdev, struct sk_buff *skb)
pr_err("can't find conn. for hwtid %u.\n", hwtid);
return -EINVAL;
}
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
process_cpl_msg(chtls_recv_data, sk, skb);
return 0;
}
@@ -1786,7 +1788,7 @@ static int chtls_rx_pdu(struct chtls_dev *cdev, struct sk_buff *skb)
pr_err("can't find conn. for hwtid %u.\n", hwtid);
return -EINVAL;
}
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
process_cpl_msg(chtls_recv_pdu, sk, skb);
return 0;
}
@@ -1855,7 +1857,7 @@ static int chtls_rx_cmp(struct chtls_dev *cdev, struct sk_buff *skb)
pr_err("can't find conn. for hwtid %u.\n", hwtid);
return -EINVAL;
}
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
process_cpl_msg(chtls_rx_hdr, sk, skb);
return 0;
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
index f61ca657601c..29ceff5a5fcb 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
@@ -90,12 +90,11 @@ struct deferred_skb_cb {
#define SND_WSCALE(tp) ((tp)->rx_opt.snd_wscale)
#define RCV_WSCALE(tp) ((tp)->rx_opt.rcv_wscale)
-#define USER_MSS(tp) ((tp)->rx_opt.user_mss)
+#define USER_MSS(tp) (READ_ONCE((tp)->rx_opt.user_mss))
#define TS_RECENT_STAMP(tp) ((tp)->rx_opt.ts_recent_stamp)
#define WSCALE_OK(tp) ((tp)->rx_opt.wscale_ok)
#define TSTAMP_OK(tp) ((tp)->rx_opt.tstamp_ok)
#define SACK_OK(tp) ((tp)->rx_opt.sack_ok)
-#define INC_ORPHAN_COUNT(sk) this_cpu_inc(*(sk)->sk_prot->orphan_count)
/* TLS SKB */
#define skb_ulp_tls_inline(skb) (ULP_SKB_CB(skb)->ulp.tls.ofld)
@@ -171,14 +170,14 @@ static inline void chtls_set_req_addr(struct request_sock *oreq,
static inline void chtls_free_skb(struct sock *sk, struct sk_buff *skb)
{
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
__skb_unlink(skb, &sk->sk_receive_queue);
__kfree_skb(skb);
}
static inline void chtls_kfree_skb(struct sock *sk, struct sk_buff *skb)
{
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
__skb_unlink(skb, &sk->sk_receive_queue);
kfree_skb(skb);
}
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
index d567e42e1760..ee19933e2cca 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
@@ -159,19 +159,13 @@ static u8 tcp_state_to_flowc_state(u8 state)
int send_tx_flowc_wr(struct sock *sk, int compl,
u32 snd_nxt, u32 rcv_nxt)
{
- struct flowc_packed {
- struct fw_flowc_wr fc;
- struct fw_flowc_mnemval mnemval[FW_FLOWC_MNEM_MAX];
- } __packed sflowc;
+ DEFINE_RAW_FLEX(struct fw_flowc_wr, flowc, mnemval, FW_FLOWC_MNEM_MAX);
int nparams, paramidx, flowclen16, flowclen;
- struct fw_flowc_wr *flowc;
struct chtls_sock *csk;
struct tcp_sock *tp;
csk = rcu_dereference_sk_user_data(sk);
tp = tcp_sk(sk);
- memset(&sflowc, 0, sizeof(sflowc));
- flowc = &sflowc.fc;
#define FLOWC_PARAM(__m, __v) \
do { \
@@ -1096,8 +1090,7 @@ new_buf:
copy = size;
if (msg->msg_flags & MSG_SPLICE_PAGES) {
- err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
- sk->sk_allocation);
+ err = skb_splice_from_iter(skb, &msg->msg_iter, copy);
if (err < 0) {
if (err == -EMSGSIZE)
goto new_buf;
@@ -1435,7 +1428,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
continue;
found_ok_skb:
if (!skb->len) {
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
__skb_unlink(skb, &sk->sk_receive_queue);
kfree_skb(skb);
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
index 96fd31d75dfd..daa1ebaef511 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
@@ -346,8 +346,9 @@ static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
* driver. Once driver synthesizes cpl_pass_accept_req the skb will go
* through the regular cpl_pass_accept_req processing in TOM.
*/
- skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req)
- - pktshift, GFP_ATOMIC);
+ skb = alloc_skb(size_add(gl->tot_len,
+ sizeof(struct cpl_pass_accept_req)) -
+ pktshift, GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
__skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req)
diff --git a/drivers/net/ethernet/cisco/enic/Kconfig b/drivers/net/ethernet/cisco/enic/Kconfig
index ad80c0fa96a6..96709875fe4f 100644
--- a/drivers/net/ethernet/cisco/enic/Kconfig
+++ b/drivers/net/ethernet/cisco/enic/Kconfig
@@ -6,5 +6,6 @@
config ENIC
tristate "Cisco VIC Ethernet NIC Support"
depends on PCI
+ select PAGE_POOL
help
This enables the support for the Cisco VIC Ethernet card.
diff --git a/drivers/net/ethernet/cisco/enic/Makefile b/drivers/net/ethernet/cisco/enic/Makefile
index c3b6febfdbe4..a96b8332e6e2 100644
--- a/drivers/net/ethernet/cisco/enic/Makefile
+++ b/drivers/net/ethernet/cisco/enic/Makefile
@@ -3,5 +3,5 @@ obj-$(CONFIG_ENIC) := enic.o
enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \
- enic_ethtool.o enic_api.o enic_clsf.o
+ enic_ethtool.o enic_api.o enic_clsf.o enic_rq.o enic_wq.o
diff --git a/drivers/net/ethernet/cisco/enic/cq_desc.h b/drivers/net/ethernet/cisco/enic/cq_desc.h
index 462c5435a206..bfb3f14e89f5 100644
--- a/drivers/net/ethernet/cisco/enic/cq_desc.h
+++ b/drivers/net/ethernet/cisco/enic/cq_desc.h
@@ -40,28 +40,7 @@ struct cq_desc {
#define CQ_DESC_COMP_NDX_BITS 12
#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
-static inline void cq_desc_dec(const struct cq_desc *desc_arg,
- u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
-{
- const struct cq_desc *desc = desc_arg;
- const u8 type_color = desc->type_color;
-
- *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
-
- /*
- * Make sure color bit is read from desc *before* other fields
- * are read from desc. Hardware guarantees color bit is last
- * bit (byte) written. Adding the rmb() prevents the compiler
- * and/or CPU from reordering the reads which would potentially
- * result in reading stale values.
- */
-
- rmb();
-
- *type = type_color & CQ_DESC_TYPE_MASK;
- *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
- *completed_index = le16_to_cpu(desc->completed_index) &
- CQ_DESC_COMP_NDX_MASK;
-}
+#define CQ_DESC_32_FI_MASK (BIT(0) | BIT(1))
+#define CQ_DESC_64_FI_MASK (BIT(0) | BIT(1))
#endif /* _CQ_DESC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/cq_enet_desc.h b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
index d25426470a29..50787cff29db 100644
--- a/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
+++ b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
@@ -17,12 +17,22 @@ struct cq_enet_wq_desc {
u8 type_color;
};
-static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
- u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
-{
- cq_desc_dec((struct cq_desc *)desc, type,
- color, q_number, completed_index);
-}
+/*
+ * Defines and Capabilities for CMD_CQ_ENTRY_SIZE_SET
+ */
+#define VNIC_RQ_ALL (~0ULL)
+
+#define VNIC_RQ_CQ_ENTRY_SIZE_16 0
+#define VNIC_RQ_CQ_ENTRY_SIZE_32 1
+#define VNIC_RQ_CQ_ENTRY_SIZE_64 2
+
+#define VNIC_RQ_CQ_ENTRY_SIZE_16_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_16)
+#define VNIC_RQ_CQ_ENTRY_SIZE_32_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_32)
+#define VNIC_RQ_CQ_ENTRY_SIZE_64_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_64)
+
+#define VNIC_RQ_CQ_ENTRY_SIZE_ALL_BIT (VNIC_RQ_CQ_ENTRY_SIZE_16_CAPABLE | \
+ VNIC_RQ_CQ_ENTRY_SIZE_32_CAPABLE | \
+ VNIC_RQ_CQ_ENTRY_SIZE_64_CAPABLE)
/* Completion queue descriptor: Ethernet receive queue, 16B */
struct cq_enet_rq_desc {
@@ -36,6 +46,45 @@ struct cq_enet_rq_desc {
u8 type_color;
};
+/* Completion queue descriptor: Ethernet receive queue, 32B */
+struct cq_enet_rq_desc_32 {
+ __le16 completed_index_flags;
+ __le16 q_number_rss_type_flags;
+ __le32 rss_hash;
+ __le16 bytes_written_flags;
+ __le16 vlan;
+ __le16 checksum_fcoe;
+ u8 flags;
+ u8 fetch_index_flags;
+ __le32 time_stamp;
+ __le16 time_stamp2;
+ __le16 pie_info;
+ __le32 pie_info2;
+ __le16 pie_info3;
+ u8 pie_info4;
+ u8 type_color;
+};
+
+/* Completion queue descriptor: Ethernet receive queue, 64B */
+struct cq_enet_rq_desc_64 {
+ __le16 completed_index_flags;
+ __le16 q_number_rss_type_flags;
+ __le32 rss_hash;
+ __le16 bytes_written_flags;
+ __le16 vlan;
+ __le16 checksum_fcoe;
+ u8 flags;
+ u8 fetch_index_flags;
+ __le32 time_stamp;
+ __le16 time_stamp2;
+ __le16 pie_info;
+ __le32 pie_info2;
+ __le16 pie_info3;
+ u8 pie_info4;
+ u8 reserved[32];
+ u8 type_color;
+};
+
#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12)
#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13)
#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14)
@@ -88,85 +137,4 @@ struct cq_enet_rq_desc {
#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6)
#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7)
-static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
- u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
- u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
- u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
- u8 *vlan_stripped, u16 *vlan_tci, u16 *checksum, u8 *fcoe_sof,
- u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
- u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
- u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
-{
- u16 completed_index_flags;
- u16 q_number_rss_type_flags;
- u16 bytes_written_flags;
-
- cq_desc_dec((struct cq_desc *)desc, type,
- color, q_number, completed_index);
-
- completed_index_flags = le16_to_cpu(desc->completed_index_flags);
- q_number_rss_type_flags =
- le16_to_cpu(desc->q_number_rss_type_flags);
- bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
-
- *ingress_port = (completed_index_flags &
- CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
- *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
- 1 : 0;
- *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
- 1 : 0;
- *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
- 1 : 0;
-
- *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
- CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
- *csum_not_calc = (q_number_rss_type_flags &
- CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
-
- *rss_hash = le32_to_cpu(desc->rss_hash);
-
- *bytes_written = bytes_written_flags &
- CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
- *packet_error = (bytes_written_flags &
- CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
- *vlan_stripped = (bytes_written_flags &
- CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
-
- /*
- * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12)
- */
- *vlan_tci = le16_to_cpu(desc->vlan);
-
- if (*fcoe) {
- *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
- CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
- *fcoe_fc_crc_ok = (desc->flags &
- CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
- *fcoe_enc_error = (desc->flags &
- CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
- *fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >>
- CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
- CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
- *checksum = 0;
- } else {
- *fcoe_sof = 0;
- *fcoe_fc_crc_ok = 0;
- *fcoe_enc_error = 0;
- *fcoe_eof = 0;
- *checksum = le16_to_cpu(desc->checksum_fcoe);
- }
-
- *tcp_udp_csum_ok =
- (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
- *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
- *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
- *ipv4_csum_ok =
- (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
- *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
- *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
- *ipv4_fragment =
- (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
- *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
-}
-
#endif /* _CQ_ENET_DESC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 0cc3644ee855..301b3f3114af 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -17,21 +17,28 @@
#include "vnic_nic.h"
#include "vnic_rss.h"
#include <linux/irq.h>
+#include <net/page_pool/helpers.h>
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
#define ENIC_BARS_MAX 6
-#define ENIC_WQ_MAX 8
-#define ENIC_RQ_MAX 8
-#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
-#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
+#define ENIC_WQ_MAX 256
+#define ENIC_RQ_MAX 256
+#define ENIC_RQ_MIN_DEFAULT 8
#define ENIC_WQ_NAPI_BUDGET 256
#define ENIC_AIC_LARGE_PKT_DIFF 3
+enum ext_cq {
+ ENIC_RQ_CQ_ENTRY_SIZE_16,
+ ENIC_RQ_CQ_ENTRY_SIZE_32,
+ ENIC_RQ_CQ_ENTRY_SIZE_64,
+ ENIC_RQ_CQ_ENTRY_SIZE_MAX,
+};
+
struct enic_msix_entry {
int requested;
char devname[IFNAMSIZ + 8];
@@ -77,6 +84,10 @@ struct enic_rx_coal {
#define ENIC_SET_INSTANCE (1 << 3)
#define ENIC_SET_HOST (1 << 4)
+#define MAX_TSO BIT(16)
+#define WQ_ENET_MAX_DESC_LEN BIT(WQ_ENET_LEN_BITS)
+#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
+
struct enic_port_profile {
u32 set;
u8 request;
@@ -160,8 +171,21 @@ struct enic_rq_stats {
u64 pkt_truncated; /* truncated pkts */
u64 no_skb; /* out of skbs */
u64 desc_skip; /* Rx pkt went into later buffer */
+ u64 pp_alloc_fail; /* page pool alloc failure */
};
+struct enic_wq {
+ spinlock_t lock; /* spinlock for wq */
+ struct vnic_wq vwq;
+ struct enic_wq_stats stats;
+} ____cacheline_aligned;
+
+struct enic_rq {
+ struct vnic_rq vrq;
+ struct enic_rq_stats stats;
+ struct page_pool *pool;
+} ____cacheline_aligned;
+
/* Per-instance private data structure */
struct enic {
struct net_device *netdev;
@@ -173,8 +197,8 @@ struct enic {
struct work_struct reset;
struct work_struct tx_hang_reset;
struct work_struct change_mtu_work;
- struct msix_entry msix_entry[ENIC_INTR_MAX];
- struct enic_msix_entry msix[ENIC_INTR_MAX];
+ struct msix_entry *msix_entry;
+ struct enic_msix_entry *msix;
u32 msg_enable;
spinlock_t devcmd_lock;
u8 mac_addr[ETH_ALEN];
@@ -193,33 +217,30 @@ struct enic {
bool enic_api_busy;
struct enic_port_profile *pp;
- /* work queue cache line section */
- ____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX];
- spinlock_t wq_lock[ENIC_WQ_MAX];
- struct enic_wq_stats wq_stats[ENIC_WQ_MAX];
+ struct enic_wq *wq;
+ unsigned int wq_avail;
unsigned int wq_count;
u16 loop_enable;
u16 loop_tag;
- /* receive queue cache line section */
- ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
- struct enic_rq_stats rq_stats[ENIC_RQ_MAX];
+ struct enic_rq *rq;
+ unsigned int rq_avail;
unsigned int rq_count;
struct vxlan_offload vxlan;
- struct napi_struct napi[ENIC_RQ_MAX + ENIC_WQ_MAX];
+ struct napi_struct *napi;
- /* interrupt resource cache line section */
- ____cacheline_aligned struct vnic_intr intr[ENIC_INTR_MAX];
+ struct vnic_intr *intr;
+ unsigned int intr_avail;
unsigned int intr_count;
u32 __iomem *legacy_pba; /* memory-mapped */
- /* completion queue cache line section */
- ____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX];
+ struct vnic_cq *cq;
+ unsigned int cq_avail;
unsigned int cq_count;
struct enic_rfs_flw_tbl rfs_h;
- u32 rx_copybreak;
u8 rss_key[ENIC_RSS_LEN];
struct vnic_gen_stats gen_stats;
+ enum ext_cq ext_cq;
};
static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev)
@@ -272,18 +293,28 @@ static inline unsigned int enic_msix_wq_intr(struct enic *enic,
return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
}
-static inline unsigned int enic_msix_err_intr(struct enic *enic)
-{
- return enic->rq_count + enic->wq_count;
-}
+/* MSIX interrupts are organized as the error interrupt, then the notify
+ * interrupt followed by all the I/O interrupts. The error interrupt needs
+ * to fit in 7 bits due to hardware constraints
+ */
+#define ENIC_MSIX_RESERVED_INTR 2
+#define ENIC_MSIX_ERR_INTR 0
+#define ENIC_MSIX_NOTIFY_INTR 1
+#define ENIC_MSIX_IO_INTR_BASE ENIC_MSIX_RESERVED_INTR
+#define ENIC_MSIX_MIN_INTR (ENIC_MSIX_RESERVED_INTR + 2)
#define ENIC_LEGACY_IO_INTR 0
#define ENIC_LEGACY_ERR_INTR 1
#define ENIC_LEGACY_NOTIFY_INTR 2
+static inline unsigned int enic_msix_err_intr(struct enic *enic)
+{
+ return ENIC_MSIX_ERR_INTR;
+}
+
static inline unsigned int enic_msix_notify_intr(struct enic *enic)
{
- return enic->rq_count + enic->wq_count + 1;
+ return ENIC_MSIX_NOTIFY_INTR;
}
static inline bool enic_is_err_intr(struct enic *enic, int intr)
@@ -331,5 +362,6 @@ int enic_is_valid_vf(struct enic *enic, int vf);
int enic_is_dynamic(struct enic *enic);
void enic_set_ethtool_ops(struct net_device *netdev);
int __enic_set_rsskey(struct enic *enic);
+void enic_ext_cq(struct enic *enic);
#endif /* _ENIC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
index 9900993b6aea..837f954873ee 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.c
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c
@@ -125,7 +125,7 @@ struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id)
#ifdef CONFIG_RFS_ACCEL
void enic_flow_may_expire(struct timer_list *t)
{
- struct enic *enic = from_timer(enic, t, rfs_h.rfs_may_expire);
+ struct enic *enic = timer_container_of(enic, t, rfs_h.rfs_may_expire);
bool res;
int j;
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.h b/drivers/net/ethernet/cisco/enic/enic_clsf.h
index 8c4ce50da6e1..5f5284102fb0 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.h
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.h
@@ -26,7 +26,7 @@ static inline void enic_rfs_timer_start(struct enic *enic)
static inline void enic_rfs_timer_stop(struct enic *enic)
{
- del_timer_sync(&enic->rfs_h.rfs_may_expire);
+ timer_delete_sync(&enic->rfs_h.rfs_may_expire);
}
#else
static inline void enic_rfs_timer_start(struct enic *enic) {}
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 4fe85780a950..a50f5dad34d5 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -129,8 +129,8 @@ static void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
static int enic_get_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *ecmd)
{
- struct ethtool_link_settings_hdr *base = &ecmd->base;
struct enic *enic = netdev_priv(netdev);
+ struct ethtool_link_settings *base = &ecmd->base;
ethtool_link_ksettings_add_link_mode(ecmd, supported,
10000baseT_Full);
@@ -222,9 +222,9 @@ static void enic_get_ringparam(struct net_device *netdev,
struct enic *enic = netdev_priv(netdev);
struct vnic_enet_config *c = &enic->config;
- ring->rx_max_pending = ENIC_MAX_RQ_DESCS;
+ ring->rx_max_pending = c->max_rq_ring;
ring->rx_pending = c->rq_desc_count;
- ring->tx_max_pending = ENIC_MAX_WQ_DESCS;
+ ring->tx_max_pending = c->max_wq_ring;
ring->tx_pending = c->wq_desc_count;
}
@@ -252,18 +252,18 @@ static int enic_set_ringparam(struct net_device *netdev,
}
rx_pending = c->rq_desc_count;
tx_pending = c->wq_desc_count;
- if (ring->rx_pending > ENIC_MAX_RQ_DESCS ||
+ if (ring->rx_pending > c->max_rq_ring ||
ring->rx_pending < ENIC_MIN_RQ_DESCS) {
netdev_info(netdev, "rx pending (%u) not in range [%u,%u]",
ring->rx_pending, ENIC_MIN_RQ_DESCS,
- ENIC_MAX_RQ_DESCS);
+ c->max_rq_ring);
return -EINVAL;
}
- if (ring->tx_pending > ENIC_MAX_WQ_DESCS ||
+ if (ring->tx_pending > c->max_wq_ring ||
ring->tx_pending < ENIC_MIN_WQ_DESCS) {
netdev_info(netdev, "tx pending (%u) not in range [%u,%u]",
ring->tx_pending, ENIC_MIN_WQ_DESCS,
- ENIC_MAX_WQ_DESCS);
+ c->max_wq_ring);
return -EINVAL;
}
if (running)
@@ -337,7 +337,7 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
for (i = 0; i < NUM_ENIC_GEN_STATS; i++)
*(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index];
for (i = 0; i < enic->rq_count; i++) {
- struct enic_rq_stats *rqstats = &enic->rq_stats[i];
+ struct enic_rq_stats *rqstats = &enic->rq[i].stats;
int index;
for (j = 0; j < NUM_ENIC_PER_RQ_STATS; j++) {
@@ -346,7 +346,7 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
}
}
for (i = 0; i < enic->wq_count; i++) {
- struct enic_wq_stats *wqstats = &enic->wq_stats[i];
+ struct enic_wq_stats *wqstats = &enic->wq[i].stats;
int index;
for (j = 0; j < NUM_ENIC_PER_WQ_STATS; j++) {
@@ -528,8 +528,10 @@ static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
return 0;
}
-static int enic_get_rx_flow_hash(struct enic *enic, struct ethtool_rxnfc *cmd)
+static int enic_get_rx_flow_hash(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct enic *enic = netdev_priv(dev);
u8 rss_hash_type = 0;
cmd->data = 0;
@@ -597,9 +599,6 @@ static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
ret = enic_grxclsrule(enic, cmd);
spin_unlock_bh(&enic->rfs_h.lock);
break;
- case ETHTOOL_GRXFH:
- ret = enic_get_rx_flow_hash(enic, cmd);
- break;
default:
ret = -EOPNOTSUPP;
break;
@@ -608,43 +607,6 @@ static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return ret;
}
-static int enic_get_tunable(struct net_device *dev,
- const struct ethtool_tunable *tuna, void *data)
-{
- struct enic *enic = netdev_priv(dev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- *(u32 *)data = enic->rx_copybreak;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static int enic_set_tunable(struct net_device *dev,
- const struct ethtool_tunable *tuna,
- const void *data)
-{
- struct enic *enic = netdev_priv(dev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- enic->rx_copybreak = *(u32 *)data;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
static u32 enic_get_rxfh_key_size(struct net_device *netdev)
{
return ENIC_RSS_LEN;
@@ -695,8 +657,8 @@ static void enic_get_channels(struct net_device *netdev,
switch (vnic_dev_get_intr_mode(enic->vdev)) {
case VNIC_DEV_INTR_MODE_MSIX:
- channels->max_rx = ENIC_RQ_MAX;
- channels->max_tx = ENIC_WQ_MAX;
+ channels->max_rx = min(enic->rq_avail, ENIC_RQ_MAX);
+ channels->max_tx = min(enic->wq_avail, ENIC_WQ_MAX);
channels->rx_count = enic->rq_count;
channels->tx_count = enic->wq_count;
break;
@@ -727,11 +689,10 @@ static const struct ethtool_ops enic_ethtool_ops = {
.get_coalesce = enic_get_coalesce,
.set_coalesce = enic_set_coalesce,
.get_rxnfc = enic_get_rxnfc,
- .get_tunable = enic_get_tunable,
- .set_tunable = enic_set_tunable,
.get_rxfh_key_size = enic_get_rxfh_key_size,
.get_rxfh = enic_get_rxfh,
.set_rxfh = enic_set_rxfh,
+ .get_rxfh_fields = enic_get_rx_flow_hash,
.get_link_ksettings = enic_get_ksettings,
.get_ts_info = enic_get_ts_info,
.get_channels = enic_get_channels,
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index ffed14b63d41..6bc8dfdb3d4b 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -58,18 +58,15 @@
#include "enic_dev.h"
#include "enic_pp.h"
#include "enic_clsf.h"
+#include "enic_rq.h"
+#include "enic_wq.h"
#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
-#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
-#define MAX_TSO (1 << 16)
-#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
-#define RX_COPYBREAK_DEFAULT 256
-
/* Supported devices */
static const struct pci_device_id enic_id_table[] = {
{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
@@ -109,7 +106,7 @@ static struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
{0, 0}, /* 0 - 4 Gbps */
{0, 3}, /* 4 - 10 Gbps */
- {3, 6}, /* 10 - 40 Gbps */
+ {3, 6}, /* 10+ Gbps */
};
static void enic_init_affinity_hint(struct enic *enic)
@@ -322,54 +319,6 @@ int enic_is_valid_vf(struct enic *enic, int vf)
#endif
}
-static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
-{
- struct enic *enic = vnic_dev_priv(wq->vdev);
-
- if (buf->sop)
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_TO_DEVICE);
- else
- dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_TO_DEVICE);
-
- if (buf->os_buf)
- dev_kfree_skb_any(buf->os_buf);
-}
-
-static void enic_wq_free_buf(struct vnic_wq *wq,
- struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(wq->vdev);
-
- enic->wq_stats[wq->index].cq_work++;
- enic->wq_stats[wq->index].cq_bytes += buf->len;
- enic_free_wq_buf(wq, buf);
-}
-
-static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- u8 type, u16 q_number, u16 completed_index, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(vdev);
-
- spin_lock(&enic->wq_lock[q_number]);
-
- vnic_wq_service(&enic->wq[q_number], cq_desc,
- completed_index, enic_wq_free_buf,
- opaque);
-
- if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
- vnic_wq_desc_avail(&enic->wq[q_number]) >=
- (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) {
- netif_wake_subqueue(enic->netdev, q_number);
- enic->wq_stats[q_number].wake++;
- }
-
- spin_unlock(&enic->wq_lock[q_number]);
-
- return 0;
-}
-
static bool enic_log_q_error(struct enic *enic)
{
unsigned int i;
@@ -377,7 +326,7 @@ static bool enic_log_q_error(struct enic *enic)
bool err = false;
for (i = 0; i < enic->wq_count; i++) {
- error_status = vnic_wq_error_status(&enic->wq[i]);
+ error_status = vnic_wq_error_status(&enic->wq[i].vwq);
err |= error_status;
if (error_status)
netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
@@ -385,7 +334,7 @@ static bool enic_log_q_error(struct enic *enic)
}
for (i = 0; i < enic->rq_count; i++) {
- error_status = vnic_rq_error_status(&enic->rq[i]);
+ error_status = vnic_rq_error_status(&enic->rq[i].vrq);
err |= error_status;
if (error_status)
netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
@@ -428,6 +377,36 @@ static void enic_mtu_check(struct enic *enic)
}
}
+static void enic_set_rx_coal_setting(struct enic *enic)
+{
+ unsigned int speed;
+ int index = -1;
+ struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
+
+ /* 1. Read the link speed from fw
+ * 2. Pick the default range for the speed
+ * 3. Update it in enic->rx_coalesce_setting
+ */
+ speed = vnic_dev_port_speed(enic->vdev);
+ if (speed > ENIC_LINK_SPEED_10G)
+ index = ENIC_LINK_40G_INDEX;
+ else if (speed > ENIC_LINK_SPEED_4G)
+ index = ENIC_LINK_10G_INDEX;
+ else
+ index = ENIC_LINK_4G_INDEX;
+
+ rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
+ rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
+ rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
+
+ /* Start with the value provided by UCSM */
+ for (index = 0; index < enic->rq_count; index++)
+ enic->cq[index].cur_rx_coal_timeval =
+ enic->config.intr_timer_usec;
+
+ rx_coal->use_adaptive_rx_coalesce = 1;
+}
+
static void enic_link_check(struct enic *enic)
{
int link_status = vnic_dev_link_status(enic->vdev);
@@ -436,6 +415,7 @@ static void enic_link_check(struct enic *enic)
if (link_status && !carrier_ok) {
netdev_info(enic->netdev, "Link UP\n");
netif_carrier_on(enic->netdev);
+ enic_set_rx_coal_setting(enic);
} else if (!link_status && carrier_ok) {
netdev_info(enic->netdev, "Link DOWN\n");
netif_carrier_off(enic->netdev);
@@ -598,9 +578,9 @@ static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq,
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
/* The enic_queue_wq_desc() above does not do HW checksum */
- enic->wq_stats[wq->index].csum_none++;
- enic->wq_stats[wq->index].packets++;
- enic->wq_stats[wq->index].bytes += skb->len;
+ enic->wq[wq->index].stats.csum_none++;
+ enic->wq[wq->index].stats.packets++;
+ enic->wq[wq->index].stats.bytes += skb->len;
return err;
}
@@ -634,9 +614,9 @@ static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
if (!eop)
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
- enic->wq_stats[wq->index].csum_partial++;
- enic->wq_stats[wq->index].packets++;
- enic->wq_stats[wq->index].bytes += skb->len;
+ enic->wq[wq->index].stats.csum_partial++;
+ enic->wq[wq->index].stats.packets++;
+ enic->wq[wq->index].stats.bytes += skb->len;
return err;
}
@@ -699,11 +679,11 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
if (skb->encapsulation) {
hdr_len = skb_inner_tcp_all_headers(skb);
enic_preload_tcp_csum_encap(skb);
- enic->wq_stats[wq->index].encap_tso++;
+ enic->wq[wq->index].stats.encap_tso++;
} else {
hdr_len = skb_tcp_all_headers(skb);
enic_preload_tcp_csum(skb);
- enic->wq_stats[wq->index].tso++;
+ enic->wq[wq->index].stats.tso++;
}
/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
@@ -757,8 +737,8 @@ tso_out_stats:
pkts = len / mss;
if ((len % mss) > 0)
pkts++;
- enic->wq_stats[wq->index].packets += pkts;
- enic->wq_stats[wq->index].bytes += (len + (pkts * hdr_len));
+ enic->wq[wq->index].stats.packets += pkts;
+ enic->wq[wq->index].stats.bytes += (len + (pkts * hdr_len));
return 0;
}
@@ -792,9 +772,9 @@ static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq,
if (!eop)
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
- enic->wq_stats[wq->index].encap_csum++;
- enic->wq_stats[wq->index].packets++;
- enic->wq_stats[wq->index].bytes += skb->len;
+ enic->wq[wq->index].stats.encap_csum++;
+ enic->wq[wq->index].stats.packets++;
+ enic->wq[wq->index].stats.bytes += skb->len;
return err;
}
@@ -812,7 +792,7 @@ static inline int enic_queue_wq_skb(struct enic *enic,
/* VLAN tag from trunking driver */
vlan_tag_insert = 1;
vlan_tag = skb_vlan_tag_get(skb);
- enic->wq_stats[wq->index].add_vlan++;
+ enic->wq[wq->index].stats.add_vlan++;
} else if (enic->loop_enable) {
vlan_tag = enic->loop_tag;
loopback = 1;
@@ -859,11 +839,11 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
struct netdev_queue *txq;
txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
- wq = &enic->wq[txq_map];
+ wq = &enic->wq[txq_map].vwq;
if (skb->len <= 0) {
dev_kfree_skb_any(skb);
- enic->wq_stats[wq->index].null_pkt++;
+ enic->wq[wq->index].stats.null_pkt++;
return NETDEV_TX_OK;
}
@@ -878,19 +858,19 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
skb_linearize(skb)) {
dev_kfree_skb_any(skb);
- enic->wq_stats[wq->index].skb_linear_fail++;
+ enic->wq[wq->index].stats.skb_linear_fail++;
return NETDEV_TX_OK;
}
- spin_lock(&enic->wq_lock[txq_map]);
+ spin_lock(&enic->wq[txq_map].lock);
if (vnic_wq_desc_avail(wq) <
skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
netif_tx_stop_queue(txq);
/* This is a hard error, log it */
netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
- spin_unlock(&enic->wq_lock[txq_map]);
- enic->wq_stats[wq->index].desc_full_awake++;
+ spin_unlock(&enic->wq[txq_map].lock);
+ enic->wq[wq->index].stats.desc_full_awake++;
return NETDEV_TX_BUSY;
}
@@ -899,14 +879,14 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) {
netif_tx_stop_queue(txq);
- enic->wq_stats[wq->index].stopped++;
+ enic->wq[wq->index].stats.stopped++;
}
skb_tx_timestamp(skb);
if (!netdev_xmit_more() || netif_xmit_stopped(txq))
vnic_wq_doorbell(wq);
error:
- spin_unlock(&enic->wq_lock[txq_map]);
+ spin_unlock(&enic->wq[txq_map].lock);
return NETDEV_TX_OK;
}
@@ -940,10 +920,10 @@ static void enic_get_stats(struct net_device *netdev,
net_stats->rx_errors = stats->rx.rx_errors;
net_stats->multicast = stats->rx.rx_multicast_frames_ok;
- for (i = 0; i < ENIC_RQ_MAX; i++) {
- struct enic_rq_stats *rqs = &enic->rq_stats[i];
+ for (i = 0; i < enic->rq_count; i++) {
+ struct enic_rq_stats *rqs = &enic->rq[i].stats;
- if (!enic->rq->ctrl)
+ if (!enic->rq[i].vrq.ctrl)
break;
pkt_truncated += rqs->pkt_truncated;
bad_fcs += rqs->bad_fcs;
@@ -1282,243 +1262,6 @@ nla_put_failure:
return -EMSGSIZE;
}
-static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
-{
- struct enic *enic = vnic_dev_priv(rq->vdev);
-
- if (!buf->os_buf)
- return;
-
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(buf->os_buf);
- buf->os_buf = NULL;
-}
-
-static int enic_rq_alloc_buf(struct vnic_rq *rq)
-{
- struct enic *enic = vnic_dev_priv(rq->vdev);
- struct net_device *netdev = enic->netdev;
- struct sk_buff *skb;
- unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
- unsigned int os_buf_index = 0;
- dma_addr_t dma_addr;
- struct vnic_rq_buf *buf = rq->to_use;
-
- if (buf->os_buf) {
- enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
- buf->len);
-
- return 0;
- }
- skb = netdev_alloc_skb_ip_align(netdev, len);
- if (!skb) {
- enic->rq_stats[rq->index].no_skb++;
- return -ENOMEM;
- }
-
- dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len,
- DMA_FROM_DEVICE);
- if (unlikely(enic_dma_map_check(enic, dma_addr))) {
- dev_kfree_skb(skb);
- return -ENOMEM;
- }
-
- enic_queue_rq_desc(rq, skb, os_buf_index,
- dma_addr, len);
-
- return 0;
-}
-
-static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
- u32 pkt_len)
-{
- if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
- pkt_size->large_pkt_bytes_cnt += pkt_len;
- else
- pkt_size->small_pkt_bytes_cnt += pkt_len;
-}
-
-static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb,
- struct vnic_rq_buf *buf, u16 len)
-{
- struct enic *enic = netdev_priv(netdev);
- struct sk_buff *new_skb;
-
- if (len > enic->rx_copybreak)
- return false;
- new_skb = netdev_alloc_skb_ip_align(netdev, len);
- if (!new_skb)
- return false;
- dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, len,
- DMA_FROM_DEVICE);
- memcpy(new_skb->data, (*skb)->data, len);
- *skb = new_skb;
-
- return true;
-}
-
-static void enic_rq_indicate_buf(struct vnic_rq *rq,
- struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
- int skipped, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(rq->vdev);
- struct net_device *netdev = enic->netdev;
- struct sk_buff *skb;
- struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
- struct enic_rq_stats *rqstats = &enic->rq_stats[rq->index];
-
- u8 type, color, eop, sop, ingress_port, vlan_stripped;
- u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
- u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
- u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
- u8 packet_error;
- u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
- u32 rss_hash;
- bool outer_csum_ok = true, encap = false;
-
- rqstats->packets++;
- if (skipped) {
- rqstats->desc_skip++;
- return;
- }
-
- skb = buf->os_buf;
-
- cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
- &type, &color, &q_number, &completed_index,
- &ingress_port, &fcoe, &eop, &sop, &rss_type,
- &csum_not_calc, &rss_hash, &bytes_written,
- &packet_error, &vlan_stripped, &vlan_tci, &checksum,
- &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
- &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
- &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
- &fcs_ok);
-
- if (packet_error) {
-
- if (!fcs_ok) {
- if (bytes_written > 0)
- rqstats->bad_fcs++;
- else if (bytes_written == 0)
- rqstats->pkt_truncated++;
- }
-
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- buf->os_buf = NULL;
-
- return;
- }
-
- if (eop && bytes_written > 0) {
-
- /* Good receive
- */
- rqstats->bytes += bytes_written;
- if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
- buf->os_buf = NULL;
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr,
- buf->len, DMA_FROM_DEVICE);
- }
- prefetch(skb->data - NET_IP_ALIGN);
-
- skb_put(skb, bytes_written);
- skb->protocol = eth_type_trans(skb, netdev);
- skb_record_rx_queue(skb, q_number);
- if ((netdev->features & NETIF_F_RXHASH) && rss_hash &&
- (type == 3)) {
- switch (rss_type) {
- case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4:
- case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
- case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
- skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
- rqstats->l4_rss_hash++;
- break;
- case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
- case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
- case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
- skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
- rqstats->l3_rss_hash++;
- break;
- }
- }
- if (enic->vxlan.vxlan_udp_port_number) {
- switch (enic->vxlan.patch_level) {
- case 0:
- if (fcoe) {
- encap = true;
- outer_csum_ok = fcoe_fc_crc_ok;
- }
- break;
- case 2:
- if ((type == 7) &&
- (rss_hash & BIT(0))) {
- encap = true;
- outer_csum_ok = (rss_hash & BIT(1)) &&
- (rss_hash & BIT(2));
- }
- break;
- }
- }
-
- /* Hardware does not provide whole packet checksum. It only
- * provides pseudo checksum. Since hw validates the packet
- * checksum but not provide us the checksum value. use
- * CHECSUM_UNNECESSARY.
- *
- * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is
- * inner csum_ok. outer_csum_ok is set by hw when outer udp
- * csum is correct or is zero.
- */
- if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
- tcp_udp_csum_ok && outer_csum_ok &&
- (ipv4_csum_ok || ipv6)) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = encap;
- if (encap)
- rqstats->csum_unnecessary_encap++;
- else
- rqstats->csum_unnecessary++;
- }
-
- if (vlan_stripped) {
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
- rqstats->vlan_stripped++;
- }
- skb_mark_napi_id(skb, &enic->napi[rq->index]);
- if (!(netdev->features & NETIF_F_GRO))
- netif_receive_skb(skb);
- else
- napi_gro_receive(&enic->napi[q_number], skb);
- if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
- enic_intr_update_pkt_size(&cq->pkt_size_counter,
- bytes_written);
- } else {
-
- /* Buffer overflow
- */
- rqstats->pkt_truncated++;
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- buf->os_buf = NULL;
- }
-}
-
-static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- u8 type, u16 q_number, u16 completed_index, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(vdev);
-
- vnic_rq_service(&enic->rq[q_number], cq_desc,
- completed_index, VNIC_RQ_RETURN_DESC,
- enic_rq_indicate_buf, opaque);
-
- return 0;
-}
-
static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
{
unsigned int intr = enic_msix_rq_intr(enic, rq->index);
@@ -1589,12 +1332,10 @@ static int enic_poll(struct napi_struct *napi, int budget)
unsigned int work_done, rq_work_done = 0, wq_work_done;
int err;
- wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do,
- enic_wq_service, NULL);
+ wq_work_done = enic_wq_cq_service(enic, cq_wq, wq_work_to_do);
if (budget > 0)
- rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
- rq_work_to_do, enic_rq_service, NULL);
+ rq_work_done = enic_rq_cq_service(enic, cq_rq, rq_work_to_do);
/* Accumulate intr event credits for this polling
* cycle. An intr event is the completion of a
@@ -1609,7 +1350,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
0 /* don't unmask intr */,
0 /* don't reset intr timer */);
- err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
+ err = vnic_rq_fill(&enic->rq[0].vrq, enic_rq_alloc_buf);
/* Buffer allocation failed. Stay in polling
* mode so we can try to fill the ring again.
@@ -1621,7 +1362,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
/* Call the function which refreshes the intr coalescing timer
* value based on the traffic.
*/
- enic_calc_int_moderation(enic, &enic->rq[0]);
+ enic_calc_int_moderation(enic, &enic->rq[0].vrq);
if ((rq_work_done < budget) && napi_complete_done(napi, rq_work_done)) {
@@ -1630,11 +1371,11 @@ static int enic_poll(struct napi_struct *napi, int budget)
*/
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
- enic_set_int_moderation(enic, &enic->rq[0]);
+ enic_set_int_moderation(enic, &enic->rq[0].vrq);
vnic_intr_unmask(&enic->intr[intr]);
- enic->rq_stats[0].napi_complete++;
+ enic->rq[0].stats.napi_complete++;
} else {
- enic->rq_stats[0].napi_repoll++;
+ enic->rq[0].stats.napi_repoll++;
}
return rq_work_done;
@@ -1683,7 +1424,7 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
struct net_device *netdev = napi->dev;
struct enic *enic = netdev_priv(netdev);
unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count;
- struct vnic_wq *wq = &enic->wq[wq_index];
+ struct vnic_wq *wq = &enic->wq[wq_index].vwq;
unsigned int cq;
unsigned int intr;
unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
@@ -1693,8 +1434,8 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
wq_irq = wq->index;
cq = enic_cq_wq(enic, wq_irq);
intr = enic_msix_wq_intr(enic, wq_irq);
- wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do,
- enic_wq_service, NULL);
+
+ wq_work_done = enic_wq_cq_service(enic, cq, wq_work_to_do);
vnic_intr_return_credits(&enic->intr[intr], wq_work_done,
0 /* don't unmask intr */,
@@ -1723,8 +1464,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
*/
if (budget > 0)
- work_done = vnic_cq_service(&enic->cq[cq],
- work_to_do, enic_rq_service, NULL);
+ work_done = enic_rq_cq_service(enic, cq, work_to_do);
/* Return intr event credits for this polling
* cycle. An intr event is the completion of a
@@ -1737,7 +1477,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
0 /* don't unmask intr */,
0 /* don't reset intr timer */);
- err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
+ err = vnic_rq_fill(&enic->rq[rq].vrq, enic_rq_alloc_buf);
/* Buffer allocation failed. Stay in polling mode
* so we can try to fill the ring again.
@@ -1749,7 +1489,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
/* Call the function which refreshes the intr coalescing timer
* value based on the traffic.
*/
- enic_calc_int_moderation(enic, &enic->rq[rq]);
+ enic_calc_int_moderation(enic, &enic->rq[rq].vrq);
if ((work_done < budget) && napi_complete_done(napi, work_done)) {
@@ -1758,11 +1498,11 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
*/
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
- enic_set_int_moderation(enic, &enic->rq[rq]);
+ enic_set_int_moderation(enic, &enic->rq[rq].vrq);
vnic_intr_unmask(&enic->intr[intr]);
- enic->rq_stats[rq].napi_complete++;
+ enic->rq[rq].stats.napi_complete++;
} else {
- enic->rq_stats[rq].napi_repoll++;
+ enic->rq[rq].stats.napi_repoll++;
}
return work_done;
@@ -1770,7 +1510,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
static void enic_notify_timer(struct timer_list *t)
{
- struct enic *enic = from_timer(enic, t, notify_timer);
+ struct enic *enic = timer_container_of(enic, t, notify_timer);
enic_notify_check(enic);
@@ -1792,7 +1532,7 @@ static void enic_free_intr(struct enic *enic)
free_irq(enic->pdev->irq, enic);
break;
case VNIC_DEV_INTR_MODE_MSIX:
- for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
+ for (i = 0; i < enic->intr_count; i++)
if (enic->msix[i].requested)
free_irq(enic->msix_entry[i].vector,
enic->msix[i].devid);
@@ -1859,7 +1599,7 @@ static int enic_request_intr(struct enic *enic)
enic->msix[intr].isr = enic_isr_msix_notify;
enic->msix[intr].devid = enic;
- for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
+ for (i = 0; i < enic->intr_count; i++)
enic->msix[i].requested = 0;
for (i = 0; i < enic->intr_count; i++) {
@@ -1901,36 +1641,6 @@ static void enic_synchronize_irqs(struct enic *enic)
}
}
-static void enic_set_rx_coal_setting(struct enic *enic)
-{
- unsigned int speed;
- int index = -1;
- struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
-
- /* 1. Read the link speed from fw
- * 2. Pick the default range for the speed
- * 3. Update it in enic->rx_coalesce_setting
- */
- speed = vnic_dev_port_speed(enic->vdev);
- if (ENIC_LINK_SPEED_10G < speed)
- index = ENIC_LINK_40G_INDEX;
- else if (ENIC_LINK_SPEED_4G < speed)
- index = ENIC_LINK_10G_INDEX;
- else
- index = ENIC_LINK_4G_INDEX;
-
- rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
- rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
- rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
-
- /* Start with the value provided by UCSM */
- for (index = 0; index < enic->rq_count; index++)
- enic->cq[index].cur_rx_coal_timeval =
- enic->config.intr_timer_usec;
-
- rx_coal->use_adaptive_rx_coalesce = 1;
-}
-
static int enic_dev_notify_set(struct enic *enic)
{
int err;
@@ -1971,6 +1681,17 @@ static int enic_open(struct net_device *netdev)
struct enic *enic = netdev_priv(netdev);
unsigned int i;
int err, ret;
+ unsigned int max_pkt_len = netdev->mtu + VLAN_ETH_HLEN;
+ struct page_pool_params pp_params = {
+ .order = get_order(max_pkt_len),
+ .pool_size = enic->config.rq_desc_count,
+ .nid = dev_to_node(&enic->pdev->dev),
+ .dev = &enic->pdev->dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .max_len = (max_pkt_len > PAGE_SIZE) ? max_pkt_len : PAGE_SIZE,
+ .netdev = netdev,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ };
err = enic_request_intr(enic);
if (err) {
@@ -1988,11 +1709,21 @@ static int enic_open(struct net_device *netdev)
}
for (i = 0; i < enic->rq_count; i++) {
+ /* create a page pool for each RQ */
+ pp_params.napi = &enic->napi[i];
+ pp_params.queue_idx = i;
+ enic->rq[i].pool = page_pool_create(&pp_params);
+ if (IS_ERR(enic->rq[i].pool)) {
+ err = PTR_ERR(enic->rq[i].pool);
+ enic->rq[i].pool = NULL;
+ goto err_out_free_rq;
+ }
+
/* enable rq before updating rq desc */
- vnic_rq_enable(&enic->rq[i]);
- vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
+ vnic_rq_enable(&enic->rq[i].vrq);
+ vnic_rq_fill(&enic->rq[i].vrq, enic_rq_alloc_buf);
/* Need at least one buffer on ring to get going */
- if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
+ if (vnic_rq_desc_used(&enic->rq[i].vrq) == 0) {
netdev_err(netdev, "Unable to alloc receive buffers\n");
err = -ENOMEM;
goto err_out_free_rq;
@@ -2000,7 +1731,7 @@ static int enic_open(struct net_device *netdev)
}
for (i = 0; i < enic->wq_count; i++)
- vnic_wq_enable(&enic->wq[i]);
+ vnic_wq_enable(&enic->wq[i].vwq);
if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
enic_dev_add_station_addr(enic);
@@ -2027,9 +1758,12 @@ static int enic_open(struct net_device *netdev)
err_out_free_rq:
for (i = 0; i < enic->rq_count; i++) {
- ret = vnic_rq_disable(&enic->rq[i]);
- if (!ret)
- vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+ ret = vnic_rq_disable(&enic->rq[i].vrq);
+ if (!ret) {
+ vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf);
+ page_pool_destroy(enic->rq[i].pool);
+ enic->rq[i].pool = NULL;
+ }
}
enic_dev_notify_unset(enic);
err_out_free_intr:
@@ -2053,7 +1787,7 @@ static int enic_stop(struct net_device *netdev)
enic_synchronize_irqs(enic);
- del_timer_sync(&enic->notify_timer);
+ timer_delete_sync(&enic->notify_timer);
enic_rfs_flw_tbl_free(enic);
enic_dev_disable(enic);
@@ -2071,12 +1805,12 @@ static int enic_stop(struct net_device *netdev)
enic_dev_del_station_addr(enic);
for (i = 0; i < enic->wq_count; i++) {
- err = vnic_wq_disable(&enic->wq[i]);
+ err = vnic_wq_disable(&enic->wq[i].vwq);
if (err)
return err;
}
for (i = 0; i < enic->rq_count; i++) {
- err = vnic_rq_disable(&enic->rq[i]);
+ err = vnic_rq_disable(&enic->rq[i].vrq);
if (err)
return err;
}
@@ -2086,9 +1820,12 @@ static int enic_stop(struct net_device *netdev)
enic_free_intr(enic);
for (i = 0; i < enic->wq_count; i++)
- vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
- for (i = 0; i < enic->rq_count; i++)
- vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+ vnic_wq_clean(&enic->wq[i].vwq, enic_free_wq_buf);
+ for (i = 0; i < enic->rq_count; i++) {
+ vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf);
+ page_pool_destroy(enic->rq[i].pool);
+ enic->rq[i].pool = NULL;
+ }
for (i = 0; i < enic->cq_count; i++)
vnic_cq_clean(&enic->cq[i]);
for (i = 0; i < enic->intr_count; i++)
@@ -2127,10 +1864,10 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu)
if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
return -EOPNOTSUPP;
- if (netdev->mtu > enic->port_mtu)
+ if (new_mtu > enic->port_mtu)
netdev_warn(netdev,
"interface MTU (%d) set higher than port MTU (%d)\n",
- netdev->mtu, enic->port_mtu);
+ new_mtu, enic->port_mtu);
return _enic_change_mtu(netdev, new_mtu);
}
@@ -2404,6 +2141,7 @@ static void enic_reset(struct work_struct *work)
enic_init_vnic_resources(enic);
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
+ enic_ext_cq(enic);
enic_open(enic->netdev);
/* Allow infiniband to fiddle with the device again */
@@ -2430,6 +2168,7 @@ static void enic_tx_hang_reset(struct work_struct *work)
enic_init_vnic_resources(enic);
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
+ enic_ext_cq(enic);
enic_open(enic->netdev);
/* Allow infiniband to fiddle with the device again */
@@ -2442,112 +2181,56 @@ static void enic_tx_hang_reset(struct work_struct *work)
static int enic_set_intr_mode(struct enic *enic)
{
- unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
- unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
unsigned int i;
+ int num_intr;
/* Set interrupt mode (INTx, MSI, MSI-X) depending
* on system capabilities.
*
* Try MSI-X first
- *
- * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
- * (the second to last INTR is used for WQ/RQ errors)
- * (the last INTR is used for notifications)
*/
- BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
- for (i = 0; i < n + m + 2; i++)
- enic->msix_entry[i].entry = i;
-
- /* Use multiple RQs if RSS is enabled
- */
-
- if (ENIC_SETTING(enic, RSS) &&
- enic->config.intr_mode < 1 &&
- enic->rq_count >= n &&
- enic->wq_count >= m &&
- enic->cq_count >= n + m &&
- enic->intr_count >= n + m + 2) {
-
- if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
- n + m + 2, n + m + 2) > 0) {
-
- enic->rq_count = n;
- enic->wq_count = m;
- enic->cq_count = n + m;
- enic->intr_count = n + m + 2;
-
- vnic_dev_set_intr_mode(enic->vdev,
- VNIC_DEV_INTR_MODE_MSIX);
-
- return 0;
- }
- }
-
if (enic->config.intr_mode < 1 &&
- enic->rq_count >= 1 &&
- enic->wq_count >= m &&
- enic->cq_count >= 1 + m &&
- enic->intr_count >= 1 + m + 2) {
- if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
- 1 + m + 2, 1 + m + 2) > 0) {
-
- enic->rq_count = 1;
- enic->wq_count = m;
- enic->cq_count = 1 + m;
- enic->intr_count = 1 + m + 2;
-
+ enic->intr_avail >= ENIC_MSIX_MIN_INTR) {
+ for (i = 0; i < enic->intr_avail; i++)
+ enic->msix_entry[i].entry = i;
+
+ num_intr = pci_enable_msix_range(enic->pdev, enic->msix_entry,
+ ENIC_MSIX_MIN_INTR,
+ enic->intr_avail);
+ if (num_intr > 0) {
vnic_dev_set_intr_mode(enic->vdev,
- VNIC_DEV_INTR_MODE_MSIX);
-
+ VNIC_DEV_INTR_MODE_MSIX);
+ enic->intr_avail = num_intr;
return 0;
}
}
/* Next try MSI
*
- * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
+ * We need 1 INTR
*/
if (enic->config.intr_mode < 2 &&
- enic->rq_count >= 1 &&
- enic->wq_count >= 1 &&
- enic->cq_count >= 2 &&
- enic->intr_count >= 1 &&
+ enic->intr_avail >= 1 &&
!pci_enable_msi(enic->pdev)) {
-
- enic->rq_count = 1;
- enic->wq_count = 1;
- enic->cq_count = 2;
- enic->intr_count = 1;
-
+ enic->intr_avail = 1;
vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
-
return 0;
}
/* Next try INTx
*
- * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
+ * We need 3 INTRs
* (the first INTR is used for WQ/RQ)
* (the second INTR is used for WQ/RQ errors)
* (the last INTR is used for notifications)
*/
if (enic->config.intr_mode < 3 &&
- enic->rq_count >= 1 &&
- enic->wq_count >= 1 &&
- enic->cq_count >= 2 &&
- enic->intr_count >= 3) {
-
- enic->rq_count = 1;
- enic->wq_count = 1;
- enic->cq_count = 2;
- enic->intr_count = 3;
-
+ enic->intr_avail >= 3) {
+ enic->intr_avail = 3;
vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
-
return 0;
}
@@ -2572,11 +2255,82 @@ static void enic_clear_intr_mode(struct enic *enic)
vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
}
+static int enic_adjust_resources(struct enic *enic)
+{
+ unsigned int max_queues;
+ unsigned int rq_default;
+ unsigned int rq_avail;
+ unsigned int wq_avail;
+
+ if (enic->rq_avail < 1 || enic->wq_avail < 1 || enic->cq_avail < 2) {
+ dev_err(enic_get_dev(enic),
+ "Not enough resources available rq: %d wq: %d cq: %d\n",
+ enic->rq_avail, enic->wq_avail,
+ enic->cq_avail);
+ return -ENOSPC;
+ }
+
+ if (is_kdump_kernel()) {
+ dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n");
+ enic->rq_avail = 1;
+ enic->wq_avail = 1;
+ enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS;
+ enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS;
+ enic->config.mtu = min_t(u16, 1500, enic->config.mtu);
+ }
+
+ /* if RSS isn't set, then we can only use one RQ */
+ if (!ENIC_SETTING(enic, RSS))
+ enic->rq_avail = 1;
+
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ case VNIC_DEV_INTR_MODE_MSI:
+ enic->rq_count = 1;
+ enic->wq_count = 1;
+ enic->cq_count = 2;
+ enic->intr_count = enic->intr_avail;
+ break;
+ case VNIC_DEV_INTR_MODE_MSIX:
+ /* Adjust the number of wqs/rqs/cqs/interrupts that will be
+ * used based on which resource is the most constrained
+ */
+ wq_avail = min(enic->wq_avail, ENIC_WQ_MAX);
+ rq_default = max(netif_get_num_default_rss_queues(),
+ ENIC_RQ_MIN_DEFAULT);
+ rq_avail = min3(enic->rq_avail, ENIC_RQ_MAX, rq_default);
+ max_queues = min(enic->cq_avail,
+ enic->intr_avail - ENIC_MSIX_RESERVED_INTR);
+ if (wq_avail + rq_avail <= max_queues) {
+ enic->rq_count = rq_avail;
+ enic->wq_count = wq_avail;
+ } else {
+ /* recalculate wq/rq count */
+ if (rq_avail < wq_avail) {
+ enic->rq_count = min(rq_avail, max_queues / 2);
+ enic->wq_count = max_queues - enic->rq_count;
+ } else {
+ enic->wq_count = min(wq_avail, max_queues / 2);
+ enic->rq_count = max_queues - enic->wq_count;
+ }
+ }
+ enic->cq_count = enic->rq_count + enic->wq_count;
+ enic->intr_count = enic->cq_count + ENIC_MSIX_RESERVED_INTR;
+
+ break;
+ default:
+ dev_err(enic_get_dev(enic), "Unknown interrupt mode\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void enic_get_queue_stats_rx(struct net_device *dev, int idx,
struct netdev_queue_stats_rx *rxs)
{
struct enic *enic = netdev_priv(dev);
- struct enic_rq_stats *rqstats = &enic->rq_stats[idx];
+ struct enic_rq_stats *rqstats = &enic->rq[idx].stats;
rxs->bytes = rqstats->bytes;
rxs->packets = rqstats->packets;
@@ -2584,13 +2338,14 @@ static void enic_get_queue_stats_rx(struct net_device *dev, int idx,
rxs->hw_drop_overruns = rqstats->pkt_truncated;
rxs->csum_unnecessary = rqstats->csum_unnecessary +
rqstats->csum_unnecessary_encap;
+ rxs->alloc_fail = rqstats->pp_alloc_fail;
}
static void enic_get_queue_stats_tx(struct net_device *dev, int idx,
struct netdev_queue_stats_tx *txs)
{
struct enic *enic = netdev_priv(dev);
- struct enic_wq_stats *wqstats = &enic->wq_stats[idx];
+ struct enic_wq_stats *wqstats = &enic->wq[idx].stats;
txs->bytes = wqstats->bytes;
txs->packets = wqstats->packets;
@@ -2611,6 +2366,7 @@ static void enic_get_base_stats(struct net_device *dev,
rxs->hw_drops = 0;
rxs->hw_drop_overruns = 0;
rxs->csum_unnecessary = 0;
+ rxs->alloc_fail = 0;
txs->bytes = 0;
txs->packets = 0;
txs->csum_none = 0;
@@ -2674,6 +2430,71 @@ static const struct netdev_stat_ops enic_netdev_stat_ops = {
.get_base_stats = enic_get_base_stats,
};
+static void enic_free_enic_resources(struct enic *enic)
+{
+ kfree(enic->wq);
+ enic->wq = NULL;
+
+ kfree(enic->rq);
+ enic->rq = NULL;
+
+ kfree(enic->cq);
+ enic->cq = NULL;
+
+ kfree(enic->napi);
+ enic->napi = NULL;
+
+ kfree(enic->msix_entry);
+ enic->msix_entry = NULL;
+
+ kfree(enic->msix);
+ enic->msix = NULL;
+
+ kfree(enic->intr);
+ enic->intr = NULL;
+}
+
+static int enic_alloc_enic_resources(struct enic *enic)
+{
+ enic->wq = kcalloc(enic->wq_avail, sizeof(struct enic_wq), GFP_KERNEL);
+ if (!enic->wq)
+ goto free_queues;
+
+ enic->rq = kcalloc(enic->rq_avail, sizeof(struct enic_rq), GFP_KERNEL);
+ if (!enic->rq)
+ goto free_queues;
+
+ enic->cq = kcalloc(enic->cq_avail, sizeof(struct vnic_cq), GFP_KERNEL);
+ if (!enic->cq)
+ goto free_queues;
+
+ enic->napi = kcalloc(enic->wq_avail + enic->rq_avail,
+ sizeof(struct napi_struct), GFP_KERNEL);
+ if (!enic->napi)
+ goto free_queues;
+
+ enic->msix_entry = kcalloc(enic->intr_avail, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!enic->msix_entry)
+ goto free_queues;
+
+ enic->msix = kcalloc(enic->intr_avail, sizeof(struct enic_msix_entry),
+ GFP_KERNEL);
+ if (!enic->msix)
+ goto free_queues;
+
+ enic->intr = kcalloc(enic->intr_avail, sizeof(struct vnic_intr),
+ GFP_KERNEL);
+ if (!enic->intr)
+ goto free_queues;
+
+ return 0;
+
+free_queues:
+ enic_free_enic_resources(enic);
+ return -ENOMEM;
+}
+
static void enic_dev_deinit(struct enic *enic)
{
unsigned int i;
@@ -2691,18 +2512,7 @@ static void enic_dev_deinit(struct enic *enic)
enic_free_vnic_resources(enic);
enic_clear_intr_mode(enic);
enic_free_affinity_hint(enic);
-}
-
-static void enic_kdump_kernel_config(struct enic *enic)
-{
- if (is_kdump_kernel()) {
- dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n");
- enic->rq_count = 1;
- enic->wq_count = 1;
- enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS;
- enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS;
- enic->config.mtu = min_t(u16, 1500, enic->config.mtu);
- }
+ enic_free_enic_resources(enic);
}
static int enic_dev_init(struct enic *enic)
@@ -2734,19 +2544,28 @@ static int enic_dev_init(struct enic *enic)
enic_get_res_counts(enic);
- /* modify resource count if we are in kdump_kernel
- */
- enic_kdump_kernel_config(enic);
+ enic_ext_cq(enic);
- /* Set interrupt mode based on resource counts and system
- * capabilities
- */
+ err = enic_alloc_enic_resources(enic);
+ if (err) {
+ dev_err(dev, "Failed to allocate enic resources\n");
+ return err;
+ }
+
+ /* Set interrupt mode based on system capabilities */
err = enic_set_intr_mode(enic);
if (err) {
dev_err(dev, "Failed to set intr mode based on resource "
"counts and system capabilities, aborting\n");
- return err;
+ goto err_out_free_vnic_resources;
+ }
+
+ /* Adjust resource counts based on most constrained resources */
+ err = enic_adjust_resources(enic);
+ if (err) {
+ dev_err(dev, "Failed to adjust resources\n");
+ goto err_out_free_vnic_resources;
}
/* Allocate and configure vNIC resources
@@ -2788,6 +2607,7 @@ err_out_free_vnic_resources:
enic_free_affinity_hint(enic);
enic_clear_intr_mode(enic);
enic_free_vnic_resources(enic);
+ enic_free_enic_resources(enic);
return err;
}
@@ -2987,13 +2807,12 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
timer_setup(&enic->notify_timer, enic_notify_timer, 0);
enic_rfs_flw_tbl_init(enic);
- enic_set_rx_coal_setting(enic);
INIT_WORK(&enic->reset, enic_reset);
INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
for (i = 0; i < enic->wq_count; i++)
- spin_lock_init(&enic->wq_lock[i]);
+ spin_lock_init(&enic->wq[i].lock);
/* Register net device
*/
@@ -3103,7 +2922,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(dev, "Cannot register net device, aborting\n");
goto err_out_dev_deinit;
}
- enic->rx_copybreak = RX_COPYBREAK_DEFAULT;
return 0;
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.c b/drivers/net/ethernet/cisco/enic/enic_res.c
index 1c48aebdbab0..bbd3143ed73e 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.c
+++ b/drivers/net/ethernet/cisco/enic/enic_res.c
@@ -59,31 +59,38 @@ int enic_get_vnic_config(struct enic *enic)
GET_CONFIG(intr_timer_usec);
GET_CONFIG(loop_tag);
GET_CONFIG(num_arfs);
+ GET_CONFIG(max_rq_ring);
+ GET_CONFIG(max_wq_ring);
+ GET_CONFIG(max_cq_ring);
+
+ if (!c->max_wq_ring)
+ c->max_wq_ring = ENIC_MAX_WQ_DESCS_DEFAULT;
+ if (!c->max_rq_ring)
+ c->max_rq_ring = ENIC_MAX_RQ_DESCS_DEFAULT;
+ if (!c->max_cq_ring)
+ c->max_cq_ring = ENIC_MAX_CQ_DESCS_DEFAULT;
c->wq_desc_count =
- min_t(u32, ENIC_MAX_WQ_DESCS,
- max_t(u32, ENIC_MIN_WQ_DESCS,
- c->wq_desc_count));
+ min_t(u32, c->max_wq_ring,
+ max_t(u32, ENIC_MIN_WQ_DESCS, c->wq_desc_count));
c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
c->rq_desc_count =
- min_t(u32, ENIC_MAX_RQ_DESCS,
- max_t(u32, ENIC_MIN_RQ_DESCS,
- c->rq_desc_count));
+ min_t(u32, c->max_rq_ring,
+ max_t(u32, ENIC_MIN_RQ_DESCS, c->rq_desc_count));
c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
if (c->mtu == 0)
c->mtu = 1500;
- c->mtu = min_t(u16, ENIC_MAX_MTU,
- max_t(u16, ENIC_MIN_MTU,
- c->mtu));
+ c->mtu = min_t(u16, ENIC_MAX_MTU, max_t(u16, ENIC_MIN_MTU, c->mtu));
c->intr_timer_usec = min_t(u32, c->intr_timer_usec,
vnic_dev_get_intr_coal_timer_max(enic->vdev));
dev_info(enic_get_dev(enic),
- "vNIC MAC addr %pM wq/rq %d/%d mtu %d\n",
- enic->mac_addr, c->wq_desc_count, c->rq_desc_count, c->mtu);
+ "vNIC MAC addr %pM wq/rq %d/%d max wq/rq/cq %d/%d/%d mtu %d\n",
+ enic->mac_addr, c->wq_desc_count, c->rq_desc_count,
+ c->max_wq_ring, c->max_rq_ring, c->max_cq_ring, c->mtu);
dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s "
"tso/lro %s/%s rss %s intr mode %s type %s timer %d usec "
@@ -176,9 +183,9 @@ void enic_free_vnic_resources(struct enic *enic)
unsigned int i;
for (i = 0; i < enic->wq_count; i++)
- vnic_wq_free(&enic->wq[i]);
+ vnic_wq_free(&enic->wq[i].vwq);
for (i = 0; i < enic->rq_count; i++)
- vnic_rq_free(&enic->rq[i]);
+ vnic_rq_free(&enic->rq[i].vrq);
for (i = 0; i < enic->cq_count; i++)
vnic_cq_free(&enic->cq[i]);
for (i = 0; i < enic->intr_count; i++)
@@ -187,16 +194,21 @@ void enic_free_vnic_resources(struct enic *enic)
void enic_get_res_counts(struct enic *enic)
{
- enic->wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ);
- enic->rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ);
- enic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ);
- enic->intr_count = vnic_dev_get_res_count(enic->vdev,
- RES_TYPE_INTR_CTRL);
+ enic->wq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ);
+ enic->rq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ);
+ enic->cq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ);
+ enic->intr_avail = vnic_dev_get_res_count(enic->vdev,
+ RES_TYPE_INTR_CTRL);
+
+ enic->wq_count = enic->wq_avail;
+ enic->rq_count = enic->rq_avail;
+ enic->cq_count = enic->cq_avail;
+ enic->intr_count = enic->intr_avail;
dev_info(enic_get_dev(enic),
"vNIC resources avail: wq %d rq %d cq %d intr %d\n",
- enic->wq_count, enic->rq_count,
- enic->cq_count, enic->intr_count);
+ enic->wq_avail, enic->rq_avail,
+ enic->cq_avail, enic->intr_avail);
}
void enic_init_vnic_resources(struct enic *enic)
@@ -221,9 +233,12 @@ void enic_init_vnic_resources(struct enic *enic)
switch (intr_mode) {
case VNIC_DEV_INTR_MODE_INTX:
+ error_interrupt_enable = 1;
+ error_interrupt_offset = ENIC_LEGACY_ERR_INTR;
+ break;
case VNIC_DEV_INTR_MODE_MSIX:
error_interrupt_enable = 1;
- error_interrupt_offset = enic->intr_count - 2;
+ error_interrupt_offset = enic_msix_err_intr(enic);
break;
default:
error_interrupt_enable = 0;
@@ -233,7 +248,7 @@ void enic_init_vnic_resources(struct enic *enic)
for (i = 0; i < enic->rq_count; i++) {
cq_index = i;
- vnic_rq_init(&enic->rq[i],
+ vnic_rq_init(&enic->rq[i].vrq,
cq_index,
error_interrupt_enable,
error_interrupt_offset);
@@ -241,7 +256,7 @@ void enic_init_vnic_resources(struct enic *enic)
for (i = 0; i < enic->wq_count; i++) {
cq_index = enic->rq_count + i;
- vnic_wq_init(&enic->wq[i],
+ vnic_wq_init(&enic->wq[i].vwq,
cq_index,
error_interrupt_enable,
error_interrupt_offset);
@@ -249,15 +264,15 @@ void enic_init_vnic_resources(struct enic *enic)
/* Init CQ resources
*
- * CQ[0 - n+m-1] point to INTR[0] for INTx, MSI
- * CQ[0 - n+m-1] point to INTR[0 - n+m-1] for MSI-X
+ * All CQs point to INTR[0] for INTx, MSI
+ * CQ[i] point to INTR[ENIC_MSIX_IO_INTR_BASE + i] for MSI-X
*/
for (i = 0; i < enic->cq_count; i++) {
switch (intr_mode) {
case VNIC_DEV_INTR_MODE_MSIX:
- interrupt_offset = i;
+ interrupt_offset = ENIC_MSIX_IO_INTR_BASE + i;
break;
default:
interrupt_offset = 0;
@@ -304,6 +319,7 @@ void enic_init_vnic_resources(struct enic *enic)
int enic_alloc_vnic_resources(struct enic *enic)
{
enum vnic_dev_intr_mode intr_mode;
+ int rq_cq_desc_size;
unsigned int i;
int err;
@@ -318,11 +334,29 @@ int enic_alloc_vnic_resources(struct enic *enic)
intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" :
"unknown");
+ switch (enic->ext_cq) {
+ case ENIC_RQ_CQ_ENTRY_SIZE_16:
+ rq_cq_desc_size = 16;
+ break;
+ case ENIC_RQ_CQ_ENTRY_SIZE_32:
+ rq_cq_desc_size = 32;
+ break;
+ case ENIC_RQ_CQ_ENTRY_SIZE_64:
+ rq_cq_desc_size = 64;
+ break;
+ default:
+ dev_err(enic_get_dev(enic),
+ "Unable to determine rq cq desc size: %d",
+ enic->ext_cq);
+ err = -ENODEV;
+ goto err_out;
+ }
+
/* Allocate queue resources
*/
for (i = 0; i < enic->wq_count; i++) {
- err = vnic_wq_alloc(enic->vdev, &enic->wq[i], i,
+ err = vnic_wq_alloc(enic->vdev, &enic->wq[i].vwq, i,
enic->config.wq_desc_count,
sizeof(struct wq_enet_desc));
if (err)
@@ -330,7 +364,7 @@ int enic_alloc_vnic_resources(struct enic *enic)
}
for (i = 0; i < enic->rq_count; i++) {
- err = vnic_rq_alloc(enic->vdev, &enic->rq[i], i,
+ err = vnic_rq_alloc(enic->vdev, &enic->rq[i].vrq, i,
enic->config.rq_desc_count,
sizeof(struct rq_enet_desc));
if (err)
@@ -340,8 +374,8 @@ int enic_alloc_vnic_resources(struct enic *enic)
for (i = 0; i < enic->cq_count; i++) {
if (i < enic->rq_count)
err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
- enic->config.rq_desc_count,
- sizeof(struct cq_enet_rq_desc));
+ enic->config.rq_desc_count,
+ rq_cq_desc_size);
else
err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
enic->config.wq_desc_count,
@@ -372,6 +406,39 @@ int enic_alloc_vnic_resources(struct enic *enic)
err_out_cleanup:
enic_free_vnic_resources(enic);
-
+err_out:
return err;
}
+
+/*
+ * CMD_CQ_ENTRY_SIZE_SET can fail on older hw generations that don't support
+ * that command
+ */
+void enic_ext_cq(struct enic *enic)
+{
+ u64 a0 = CMD_CQ_ENTRY_SIZE_SET, a1 = 0;
+ int wait = 1000;
+ int ret;
+
+ spin_lock_bh(&enic->devcmd_lock);
+ ret = vnic_dev_cmd(enic->vdev, CMD_CAPABILITY, &a0, &a1, wait);
+ if (ret || a0) {
+ dev_info(&enic->pdev->dev,
+ "CMD_CQ_ENTRY_SIZE_SET not supported.");
+ enic->ext_cq = ENIC_RQ_CQ_ENTRY_SIZE_16;
+ goto out;
+ }
+ a1 &= VNIC_RQ_CQ_ENTRY_SIZE_ALL_BIT;
+ enic->ext_cq = fls(a1) - 1;
+ a0 = VNIC_RQ_ALL;
+ a1 = enic->ext_cq;
+ ret = vnic_dev_cmd(enic->vdev, CMD_CQ_ENTRY_SIZE_SET, &a0, &a1, wait);
+ if (ret) {
+ dev_info(&enic->pdev->dev, "CMD_CQ_ENTRY_SIZE_SET failed.");
+ enic->ext_cq = ENIC_RQ_CQ_ENTRY_SIZE_16;
+ }
+out:
+ spin_unlock_bh(&enic->devcmd_lock);
+ dev_info(&enic->pdev->dev, "CQ entry size set to %d bytes",
+ 16 << enic->ext_cq);
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.h b/drivers/net/ethernet/cisco/enic/enic_res.h
index b8ee42d297aa..02dca1ae4a22 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.h
+++ b/drivers/net/ethernet/cisco/enic/enic_res.h
@@ -12,10 +12,13 @@
#include "vnic_wq.h"
#include "vnic_rq.h"
-#define ENIC_MIN_WQ_DESCS 64
-#define ENIC_MAX_WQ_DESCS 4096
-#define ENIC_MIN_RQ_DESCS 64
-#define ENIC_MAX_RQ_DESCS 4096
+#define ENIC_MIN_WQ_DESCS 64
+#define ENIC_MAX_WQ_DESCS_DEFAULT 4096
+#define ENIC_MAX_WQ_DESCS 16384
+#define ENIC_MIN_RQ_DESCS 64
+#define ENIC_MAX_RQ_DESCS 16384
+#define ENIC_MAX_RQ_DESCS_DEFAULT 4096
+#define ENIC_MAX_CQ_DESCS_DEFAULT (64 * 1024)
#define ENIC_MIN_MTU ETH_MIN_MTU
#define ENIC_MAX_MTU 9000
diff --git a/drivers/net/ethernet/cisco/enic/enic_rq.c b/drivers/net/ethernet/cisco/enic/enic_rq.c
new file mode 100644
index 000000000000..ccbf5c9a21d0
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_rq.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2024 Cisco Systems, Inc. All rights reserved.
+
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <net/busy_poll.h>
+#include "enic.h"
+#include "enic_res.h"
+#include "enic_rq.h"
+#include "vnic_rq.h"
+#include "cq_enet_desc.h"
+
+#define ENIC_LARGE_PKT_THRESHOLD 1000
+
+static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
+ u32 pkt_len)
+{
+ if (pkt_len > ENIC_LARGE_PKT_THRESHOLD)
+ pkt_size->large_pkt_bytes_cnt += pkt_len;
+ else
+ pkt_size->small_pkt_bytes_cnt += pkt_len;
+}
+
+static void enic_rq_cq_desc_dec(void *cq_desc, u8 cq_desc_size, u8 *type,
+ u8 *color, u16 *q_number, u16 *completed_index)
+{
+ /* type_color is the last field for all cq structs */
+ u8 type_color;
+
+ switch (cq_desc_size) {
+ case VNIC_RQ_CQ_ENTRY_SIZE_16: {
+ struct cq_enet_rq_desc *desc =
+ (struct cq_enet_rq_desc *)cq_desc;
+ type_color = desc->type_color;
+
+ /* Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
+ CQ_DESC_Q_NUM_MASK;
+ *completed_index = le16_to_cpu(desc->completed_index_flags) &
+ CQ_DESC_COMP_NDX_MASK;
+ break;
+ }
+ case VNIC_RQ_CQ_ENTRY_SIZE_32: {
+ struct cq_enet_rq_desc_32 *desc =
+ (struct cq_enet_rq_desc_32 *)cq_desc;
+ type_color = desc->type_color;
+
+ /* Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
+ CQ_DESC_Q_NUM_MASK;
+ *completed_index = le16_to_cpu(desc->completed_index_flags) &
+ CQ_DESC_COMP_NDX_MASK;
+ *completed_index |= (desc->fetch_index_flags & CQ_DESC_32_FI_MASK) <<
+ CQ_DESC_COMP_NDX_BITS;
+ break;
+ }
+ case VNIC_RQ_CQ_ENTRY_SIZE_64: {
+ struct cq_enet_rq_desc_64 *desc =
+ (struct cq_enet_rq_desc_64 *)cq_desc;
+ type_color = desc->type_color;
+
+ /* Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
+ CQ_DESC_Q_NUM_MASK;
+ *completed_index = le16_to_cpu(desc->completed_index_flags) &
+ CQ_DESC_COMP_NDX_MASK;
+ *completed_index |= (desc->fetch_index_flags & CQ_DESC_64_FI_MASK) <<
+ CQ_DESC_COMP_NDX_BITS;
+ break;
+ }
+ }
+
+ *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
+ *type = type_color & CQ_DESC_TYPE_MASK;
+}
+
+static void enic_rq_set_skb_flags(struct vnic_rq *vrq, u8 type, u32 rss_hash,
+ u8 rss_type, u8 fcoe, u8 fcoe_fc_crc_ok,
+ u8 vlan_stripped, u8 csum_not_calc,
+ u8 tcp_udp_csum_ok, u8 ipv6, u8 ipv4_csum_ok,
+ u16 vlan_tci, struct sk_buff *skb)
+{
+ struct enic *enic = vnic_dev_priv(vrq->vdev);
+ struct net_device *netdev = enic->netdev;
+ struct enic_rq_stats *rqstats = &enic->rq[vrq->index].stats;
+ bool outer_csum_ok = true, encap = false;
+
+ if ((netdev->features & NETIF_F_RXHASH) && rss_hash && type == 3) {
+ switch (rss_type) {
+ case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
+ skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
+ rqstats->l4_rss_hash++;
+ break;
+ case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
+ skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
+ rqstats->l3_rss_hash++;
+ break;
+ }
+ }
+ if (enic->vxlan.vxlan_udp_port_number) {
+ switch (enic->vxlan.patch_level) {
+ case 0:
+ if (fcoe) {
+ encap = true;
+ outer_csum_ok = fcoe_fc_crc_ok;
+ }
+ break;
+ case 2:
+ if (type == 7 && (rss_hash & BIT(0))) {
+ encap = true;
+ outer_csum_ok = (rss_hash & BIT(1)) &&
+ (rss_hash & BIT(2));
+ }
+ break;
+ }
+ }
+
+ /* Hardware does not provide whole packet checksum. It only
+ * provides pseudo checksum. Since hw validates the packet
+ * checksum but not provide us the checksum value. use
+ * CHECSUM_UNNECESSARY.
+ *
+ * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is
+ * inner csum_ok. outer_csum_ok is set by hw when outer udp
+ * csum is correct or is zero.
+ */
+ if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
+ tcp_udp_csum_ok && outer_csum_ok && (ipv4_csum_ok || ipv6)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = encap;
+ if (encap)
+ rqstats->csum_unnecessary_encap++;
+ else
+ rqstats->csum_unnecessary++;
+ }
+
+ if (vlan_stripped) {
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
+ rqstats->vlan_stripped++;
+ }
+}
+
+/*
+ * cq_enet_rq_desc accesses section uses only the 1st 15 bytes of the cq which
+ * is identical for all type (16,32 and 64 byte) of cqs.
+ */
+static void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, u8 *ingress_port,
+ u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
+ u8 *csum_not_calc, u32 *rss_hash,
+ u16 *bytes_written, u8 *packet_error,
+ u8 *vlan_stripped, u16 *vlan_tci,
+ u16 *checksum, u8 *fcoe_sof,
+ u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error,
+ u8 *fcoe_eof, u8 *tcp_udp_csum_ok, u8 *udp,
+ u8 *tcp, u8 *ipv4_csum_ok, u8 *ipv6, u8 *ipv4,
+ u8 *ipv4_fragment, u8 *fcs_ok)
+{
+ u16 completed_index_flags;
+ u16 q_number_rss_type_flags;
+ u16 bytes_written_flags;
+
+ completed_index_flags = le16_to_cpu(desc->completed_index_flags);
+ q_number_rss_type_flags =
+ le16_to_cpu(desc->q_number_rss_type_flags);
+ bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
+
+ *ingress_port = (completed_index_flags &
+ CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
+ *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
+ 1 : 0;
+ *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
+ 1 : 0;
+ *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
+ 1 : 0;
+
+ *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
+ CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
+ *csum_not_calc = (q_number_rss_type_flags &
+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
+
+ *rss_hash = le32_to_cpu(desc->rss_hash);
+
+ *bytes_written = bytes_written_flags &
+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+ *packet_error = (bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
+ *vlan_stripped = (bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
+
+ /*
+ * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12)
+ */
+ *vlan_tci = le16_to_cpu(desc->vlan);
+
+ if (*fcoe) {
+ *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
+ CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
+ *fcoe_fc_crc_ok = (desc->flags &
+ CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
+ *fcoe_enc_error = (desc->flags &
+ CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
+ *fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >>
+ CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
+ CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
+ *checksum = 0;
+ } else {
+ *fcoe_sof = 0;
+ *fcoe_fc_crc_ok = 0;
+ *fcoe_enc_error = 0;
+ *fcoe_eof = 0;
+ *checksum = le16_to_cpu(desc->checksum_fcoe);
+ }
+
+ *tcp_udp_csum_ok =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
+ *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
+ *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
+ *ipv4_csum_ok =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
+ *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
+ *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
+ *ipv4_fragment =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
+ *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
+}
+
+static bool enic_rq_pkt_error(struct vnic_rq *vrq, u8 packet_error, u8 fcs_ok,
+ u16 bytes_written)
+{
+ struct enic *enic = vnic_dev_priv(vrq->vdev);
+ struct enic_rq_stats *rqstats = &enic->rq[vrq->index].stats;
+
+ if (packet_error) {
+ if (!fcs_ok) {
+ if (bytes_written > 0)
+ rqstats->bad_fcs++;
+ else if (bytes_written == 0)
+ rqstats->pkt_truncated++;
+ }
+ return true;
+ }
+ return false;
+}
+
+int enic_rq_alloc_buf(struct vnic_rq *rq)
+{
+ struct enic *enic = vnic_dev_priv(rq->vdev);
+ struct net_device *netdev = enic->netdev;
+ struct enic_rq *erq = &enic->rq[rq->index];
+ struct enic_rq_stats *rqstats = &erq->stats;
+ unsigned int offset = 0;
+ unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
+ unsigned int os_buf_index = 0;
+ dma_addr_t dma_addr;
+ struct vnic_rq_buf *buf = rq->to_use;
+ struct page *page;
+ unsigned int truesize = len;
+
+ if (buf->os_buf) {
+ enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
+ buf->len);
+
+ return 0;
+ }
+
+ page = page_pool_dev_alloc(erq->pool, &offset, &truesize);
+ if (unlikely(!page)) {
+ rqstats->pp_alloc_fail++;
+ return -ENOMEM;
+ }
+ buf->offset = offset;
+ buf->truesize = truesize;
+ dma_addr = page_pool_get_dma_addr(page) + offset;
+ enic_queue_rq_desc(rq, (void *)page, os_buf_index, dma_addr, len);
+
+ return 0;
+}
+
+void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
+{
+ struct enic *enic = vnic_dev_priv(rq->vdev);
+ struct enic_rq *erq = &enic->rq[rq->index];
+
+ if (!buf->os_buf)
+ return;
+
+ page_pool_put_full_page(erq->pool, (struct page *)buf->os_buf, true);
+ buf->os_buf = NULL;
+}
+
+static void enic_rq_indicate_buf(struct enic *enic, struct vnic_rq *rq,
+ struct vnic_rq_buf *buf, void *cq_desc,
+ u8 type, u16 q_number, u16 completed_index)
+{
+ struct sk_buff *skb;
+ struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ struct enic_rq_stats *rqstats = &enic->rq[rq->index].stats;
+ struct napi_struct *napi;
+
+ u8 eop, sop, ingress_port, vlan_stripped;
+ u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
+ u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
+ u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
+ u8 packet_error;
+ u16 bytes_written, vlan_tci, checksum;
+ u32 rss_hash;
+
+ rqstats->packets++;
+
+ cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, &ingress_port,
+ &fcoe, &eop, &sop, &rss_type, &csum_not_calc,
+ &rss_hash, &bytes_written, &packet_error,
+ &vlan_stripped, &vlan_tci, &checksum, &fcoe_sof,
+ &fcoe_fc_crc_ok, &fcoe_enc_error, &fcoe_eof,
+ &tcp_udp_csum_ok, &udp, &tcp, &ipv4_csum_ok, &ipv6,
+ &ipv4, &ipv4_fragment, &fcs_ok);
+
+ if (enic_rq_pkt_error(rq, packet_error, fcs_ok, bytes_written))
+ return;
+
+ if (eop && bytes_written > 0) {
+ /* Good receive
+ */
+ rqstats->bytes += bytes_written;
+ napi = &enic->napi[rq->index];
+ skb = napi_get_frags(napi);
+ if (unlikely(!skb)) {
+ net_warn_ratelimited("%s: skb alloc error rq[%d], desc[%d]\n",
+ enic->netdev->name, rq->index,
+ completed_index);
+ rqstats->no_skb++;
+ return;
+ }
+
+ prefetch(skb->data - NET_IP_ALIGN);
+
+ dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr,
+ bytes_written, DMA_FROM_DEVICE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ (struct page *)buf->os_buf, buf->offset,
+ bytes_written, buf->truesize);
+ skb_record_rx_queue(skb, q_number);
+ enic_rq_set_skb_flags(rq, type, rss_hash, rss_type, fcoe,
+ fcoe_fc_crc_ok, vlan_stripped,
+ csum_not_calc, tcp_udp_csum_ok, ipv6,
+ ipv4_csum_ok, vlan_tci, skb);
+ skb_mark_for_recycle(skb);
+ napi_gro_frags(napi);
+ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+ enic_intr_update_pkt_size(&cq->pkt_size_counter,
+ bytes_written);
+ buf->os_buf = NULL;
+ buf->dma_addr = 0;
+ buf = buf->next;
+ } else {
+ /* Buffer overflow
+ */
+ rqstats->pkt_truncated++;
+ }
+}
+
+static void enic_rq_service(struct enic *enic, void *cq_desc, u8 type,
+ u16 q_number, u16 completed_index)
+{
+ struct enic_rq_stats *rqstats = &enic->rq[q_number].stats;
+ struct vnic_rq *vrq = &enic->rq[q_number].vrq;
+ struct vnic_rq_buf *vrq_buf = vrq->to_clean;
+ int skipped;
+
+ while (1) {
+ skipped = (vrq_buf->index != completed_index);
+ if (!skipped)
+ enic_rq_indicate_buf(enic, vrq, vrq_buf, cq_desc, type,
+ q_number, completed_index);
+ else
+ rqstats->desc_skip++;
+
+ vrq->ring.desc_avail++;
+ vrq->to_clean = vrq_buf->next;
+ vrq_buf = vrq_buf->next;
+ if (!skipped)
+ break;
+ }
+}
+
+unsigned int enic_rq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do)
+{
+ struct vnic_cq *cq = &enic->cq[cq_index];
+ void *cq_desc = vnic_cq_to_clean(cq);
+ u16 q_number, completed_index;
+ unsigned int work_done = 0;
+ u8 type, color;
+
+ enic_rq_cq_desc_dec(cq_desc, enic->ext_cq, &type, &color, &q_number,
+ &completed_index);
+
+ while (color != cq->last_color) {
+ enic_rq_service(enic, cq_desc, type, q_number, completed_index);
+ vnic_cq_inc_to_clean(cq);
+
+ if (++work_done >= work_to_do)
+ break;
+
+ cq_desc = vnic_cq_to_clean(cq);
+ enic_rq_cq_desc_dec(cq_desc, enic->ext_cq, &type, &color,
+ &q_number, &completed_index);
+ }
+
+ return work_done;
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_rq.h b/drivers/net/ethernet/cisco/enic/enic_rq.h
new file mode 100644
index 000000000000..98476a7297af
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_rq.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright 2024 Cisco Systems, Inc. All rights reserved.
+ */
+
+unsigned int enic_rq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do);
+int enic_rq_alloc_buf(struct vnic_rq *rq);
+void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
diff --git a/drivers/net/ethernet/cisco/enic/enic_wq.c b/drivers/net/ethernet/cisco/enic/enic_wq.c
new file mode 100644
index 000000000000..07936f8b4231
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_wq.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2025 Cisco Systems, Inc. All rights reserved.
+
+#include <net/netdev_queues.h>
+#include "enic_res.h"
+#include "enic.h"
+#include "enic_wq.h"
+
+#define ENET_CQ_DESC_COMP_NDX_BITS 14
+#define ENET_CQ_DESC_COMP_NDX_MASK GENMASK(ENET_CQ_DESC_COMP_NDX_BITS - 1, 0)
+
+static void enic_wq_cq_desc_dec(const struct cq_desc *desc_arg, bool ext_wq,
+ u8 *type, u8 *color, u16 *q_number,
+ u16 *completed_index)
+{
+ const struct cq_desc *desc = desc_arg;
+ const u8 type_color = desc->type_color;
+
+ *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
+
+ /*
+ * Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *type = type_color & CQ_DESC_TYPE_MASK;
+ *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
+
+ if (ext_wq)
+ *completed_index = le16_to_cpu(desc->completed_index) &
+ ENET_CQ_DESC_COMP_NDX_MASK;
+ else
+ *completed_index = le16_to_cpu(desc->completed_index) &
+ CQ_DESC_COMP_NDX_MASK;
+}
+
+void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
+{
+ struct enic *enic = vnic_dev_priv(wq->vdev);
+
+ if (buf->sop)
+ dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
+
+ if (buf->os_buf)
+ dev_kfree_skb_any(buf->os_buf);
+}
+
+static void enic_wq_free_buf(struct vnic_wq *wq, struct cq_desc *cq_desc,
+ struct vnic_wq_buf *buf, void *opaque)
+{
+ struct enic *enic = vnic_dev_priv(wq->vdev);
+
+ enic->wq[wq->index].stats.cq_work++;
+ enic->wq[wq->index].stats.cq_bytes += buf->len;
+ enic_free_wq_buf(wq, buf);
+}
+
+static void enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+ u8 type, u16 q_number, u16 completed_index)
+{
+ struct enic *enic = vnic_dev_priv(vdev);
+
+ spin_lock(&enic->wq[q_number].lock);
+
+ vnic_wq_service(&enic->wq[q_number].vwq, cq_desc,
+ completed_index, enic_wq_free_buf, NULL);
+
+ if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number))
+ && vnic_wq_desc_avail(&enic->wq[q_number].vwq) >=
+ (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) {
+ netif_wake_subqueue(enic->netdev, q_number);
+ enic->wq[q_number].stats.wake++;
+ }
+
+ spin_unlock(&enic->wq[q_number].lock);
+}
+
+unsigned int enic_wq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do)
+{
+ struct vnic_cq *cq = &enic->cq[cq_index];
+ u16 q_number, completed_index;
+ unsigned int work_done = 0;
+ struct cq_desc *cq_desc;
+ u8 type, color;
+ bool ext_wq;
+
+ ext_wq = cq->ring.size > ENIC_MAX_WQ_DESCS_DEFAULT;
+
+ cq_desc = (struct cq_desc *)vnic_cq_to_clean(cq);
+ enic_wq_cq_desc_dec(cq_desc, ext_wq, &type, &color,
+ &q_number, &completed_index);
+
+ while (color != cq->last_color) {
+ enic_wq_service(cq->vdev, cq_desc, type, q_number,
+ completed_index);
+
+ vnic_cq_inc_to_clean(cq);
+
+ if (++work_done >= work_to_do)
+ break;
+
+ cq_desc = (struct cq_desc *)vnic_cq_to_clean(cq);
+ enic_wq_cq_desc_dec(cq_desc, ext_wq, &type, &color,
+ &q_number, &completed_index);
+ }
+
+ return work_done;
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_wq.h b/drivers/net/ethernet/cisco/enic/enic_wq.h
new file mode 100644
index 000000000000..12acb3f2fbc9
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_wq.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright 2025 Cisco Systems, Inc. All rights reserved.
+ */
+
+void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
+unsigned int enic_wq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.h b/drivers/net/ethernet/cisco/enic/vnic_cq.h
index eed5bf59e5d2..0e37f5d5e527 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_cq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.h
@@ -56,45 +56,18 @@ struct vnic_cq {
ktime_t prev_ts;
};
-static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
- unsigned int work_to_do,
- int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- u8 type, u16 q_number, u16 completed_index, void *opaque),
- void *opaque)
+static inline void *vnic_cq_to_clean(struct vnic_cq *cq)
{
- struct cq_desc *cq_desc;
- unsigned int work_done = 0;
- u16 q_number, completed_index;
- u8 type, color;
-
- cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
- cq->ring.desc_size * cq->to_clean);
- cq_desc_dec(cq_desc, &type, &color,
- &q_number, &completed_index);
-
- while (color != cq->last_color) {
-
- if ((*q_service)(cq->vdev, cq_desc, type,
- q_number, completed_index, opaque))
- break;
-
- cq->to_clean++;
- if (cq->to_clean == cq->ring.desc_count) {
- cq->to_clean = 0;
- cq->last_color = cq->last_color ? 0 : 1;
- }
-
- cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
- cq->ring.desc_size * cq->to_clean);
- cq_desc_dec(cq_desc, &type, &color,
- &q_number, &completed_index);
+ return ((u8 *)cq->ring.descs + cq->ring.desc_size * cq->to_clean);
+}
- work_done++;
- if (work_done >= work_to_do)
- break;
+static inline void vnic_cq_inc_to_clean(struct vnic_cq *cq)
+{
+ cq->to_clean++;
+ if (cq->to_clean == cq->ring.desc_count) {
+ cq->to_clean = 0;
+ cq->last_color = cq->last_color ? 0 : 1;
}
-
- return work_done;
}
void vnic_cq_free(struct vnic_cq *cq);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
index db56d778877a..605ef17f967e 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
@@ -436,6 +436,25 @@ enum vnic_devcmd_cmd {
* in: (u16) a2 = unsigned short int port information
*/
CMD_OVERLAY_OFFLOAD_CFG = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 73),
+
+ /*
+ * Set extended CQ field in MREGS of RQ (or all RQs)
+ * for given vNIC
+ * in: (u64) a0 = RQ selection (VNIC_RQ_ALL for all RQs)
+ * (u32) a1 = CQ entry size
+ * VNIC_RQ_CQ_ENTRY_SIZE_16 --> 16 bytes
+ * VNIC_RQ_CQ_ENTRY_SIZE_32 --> 32 bytes
+ * VNIC_RQ_CQ_ENTRY_SIZE_64 --> 64 bytes
+ *
+ * Capability query:
+ * out: (u32) a0 = errno, 0:valid cmd
+ * (u32) a1 = value consisting of supported entries
+ * bit 0: 16 bytes
+ * bit 1: 32 bytes
+ * bit 2: 64 bytes
+ */
+ CMD_CQ_ENTRY_SIZE_SET = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 90),
+
};
/* CMD_ENABLE2 flags */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_enet.h b/drivers/net/ethernet/cisco/enic/vnic_enet.h
index 5acc236069de..9e8e86262a3f 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_enet.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_enet.h
@@ -21,6 +21,11 @@ struct vnic_enet_config {
u16 loop_tag;
u16 vf_rq_count;
u16 num_arfs;
+ u8 reserved[66];
+ u32 max_rq_ring; // MAX RQ ring size
+ u32 max_wq_ring; // MAX WQ ring size
+ u32 max_cq_ring; // MAX CQ ring size
+ u32 rdma_rsvd_lkey; // Reserved (privileged) LKey
};
#define VENETF_TSO 0x1 /* TSO enabled */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h
index 0bc595abc03b..a1cdd729caec 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h
@@ -50,7 +50,7 @@ struct vnic_rq_ctrl {
(VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf))
#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries))
-#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
+#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(16384)
struct vnic_rq_buf {
struct vnic_rq_buf *next;
@@ -61,6 +61,8 @@ struct vnic_rq_buf {
unsigned int index;
void *desc;
uint64_t wr_id;
+ unsigned int offset;
+ unsigned int truesize;
};
enum enic_poll_state {
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h
index 75c526911074..3bb4758100ba 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h
@@ -62,7 +62,7 @@ struct vnic_wq_buf {
(VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf))
#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries))
-#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
+#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(16384)
struct vnic_wq {
unsigned int index;
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 991e3839858b..6a2004bbe87f 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -40,6 +40,7 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <net/gro.h>
#include "gemini.h"
@@ -1143,6 +1144,7 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
struct gmac_txdesc *txd;
skb_frag_t *skb_frag;
dma_addr_t mapping;
+ bool tcp = false;
void *buffer;
u16 mss;
int ret;
@@ -1150,6 +1152,13 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
word1 = skb->len;
word3 = SOF_BIT;
+ /* Determine if we are doing TCP */
+ if (skb->protocol == htons(ETH_P_IP))
+ tcp = (ip_hdr(skb)->protocol == IPPROTO_TCP);
+ else
+ /* IPv6 */
+ tcp = (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP);
+
mss = skb_shinfo(skb)->gso_size;
if (mss) {
/* This means we are dealing with TCP and skb->len is the
@@ -1162,8 +1171,26 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
mss, skb->len);
word1 |= TSS_MTU_ENABLE_BIT;
word3 |= mss;
+ } else if (tcp) {
+ /* Even if we are not using TSO, use the hardware offloader
+ * for transferring the TCP frame: this hardware has partial
+ * TCP awareness (called TOE - TCP Offload Engine) and will
+ * according to the datasheet put packets belonging to the
+ * same TCP connection in the same queue for the TOE/TSO
+ * engine to process. The engine will deal with chopping
+ * up frames that exceed ETH_DATA_LEN which the
+ * checksumming engine cannot handle (see below) into
+ * manageable chunks. It flawlessly deals with quite big
+ * frames and frames containing custom DSA EtherTypes.
+ */
+ mss = netdev->mtu + skb_tcp_all_headers(skb);
+ mss = min(mss, skb->len);
+ netdev_dbg(netdev, "TOE/TSO len %04x mtu %04x mss %04x\n",
+ skb->len, netdev->mtu, mss);
+ word1 |= TSS_MTU_ENABLE_BIT;
+ word3 |= mss;
} else if (skb->len >= ETH_FRAME_LEN) {
- /* Hardware offloaded checksumming isn't working on frames
+ /* Hardware offloaded checksumming isn't working on non-TCP frames
* bigger than 1514 bytes. A hypothesis about this is that the
* checksum buffer is only 1518 bytes, so when the frames get
* bigger they get truncated, or the last few bytes get
@@ -1180,21 +1207,16 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- int tcp = 0;
-
/* We do not switch off the checksumming on non TCP/UDP
* frames: as is shown from tests, the checksumming engine
* is smart enough to see that a frame is not actually TCP
* or UDP and then just pass it through without any changes
* to the frame.
*/
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (skb->protocol == htons(ETH_P_IP))
word1 |= TSS_IP_CHKSUM_BIT;
- tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
- } else { /* IPv6 */
+ else
word1 |= TSS_IPV6_ENABLE_BIT;
- tcp = ipv6_hdr(skb)->nexthdr == IPPROTO_TCP;
- }
word1 |= tcp ? TSS_TCP_CHKSUM_BIT : TSS_UDP_CHKSUM_BIT;
}
@@ -1833,9 +1855,8 @@ static int gmac_open(struct net_device *netdev)
gmac_enable_tx_rx(netdev);
netif_tx_start_all_queues(netdev);
- hrtimer_init(&port->rx_coalesce_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- port->rx_coalesce_timer.function = &gmac_coalesce_delay_expired;
+ hrtimer_setup(&port->rx_coalesce_timer, &gmac_coalesce_delay_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
netdev_dbg(netdev, "opened\n");
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 8735e333034c..b87eaf0c250c 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1777,10 +1777,11 @@ static void dm9000_drv_remove(struct platform_device *pdev)
unregister_netdev(ndev);
dm9000_release_board(pdev, dm);
- free_netdev(ndev); /* free device structure */
if (dm->power_supply)
regulator_disable(dm->power_supply);
+ free_netdev(ndev); /* free device structure */
+
dev_dbg(&pdev->dev, "released and freed device\n");
}
diff --git a/drivers/net/ethernet/dec/tulip/21142.c b/drivers/net/ethernet/dec/tulip/21142.c
index 369858272650..76767dec216d 100644
--- a/drivers/net/ethernet/dec/tulip/21142.c
+++ b/drivers/net/ethernet/dec/tulip/21142.c
@@ -216,7 +216,7 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
(csr12 & 2) == 2) ||
(tp->nway && (csr5 & (TPLnkFail)))) {
/* Link blew? Maybe restart NWay. */
- del_timer_sync(&tp->timer);
+ timer_delete_sync(&tp->timer);
t21142_start_nway(dev);
tp->timer.expires = RUN_AT(3*HZ);
add_timer(&tp->timer);
@@ -226,7 +226,7 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
medianame[dev->if_port],
(csr12 & 2) ? "failed" : "good");
if ((csr12 & 2) && ! tp->medialock) {
- del_timer_sync(&tp->timer);
+ timer_delete_sync(&tp->timer);
t21142_start_nway(dev);
tp->timer.expires = RUN_AT(3*HZ);
add_timer(&tp->timer);
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 0a161a4db242..f9504f340c4a 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -963,7 +963,7 @@ static void de_next_media (struct de_private *de, const u32 *media,
static void de21040_media_timer (struct timer_list *t)
{
- struct de_private *de = from_timer(de, t, media_timer);
+ struct de_private *de = timer_container_of(de, t, media_timer);
struct net_device *dev = de->dev;
u32 status = dr32(SIAStatus);
unsigned int carrier;
@@ -1044,7 +1044,7 @@ static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
static void de21041_media_timer (struct timer_list *t)
{
- struct de_private *de = from_timer(de, t, media_timer);
+ struct de_private *de = timer_container_of(de, t, media_timer);
struct net_device *dev = de->dev;
u32 status = dr32(SIAStatus);
unsigned int carrier;
@@ -1428,7 +1428,7 @@ static int de_close (struct net_device *dev)
netif_dbg(de, ifdown, dev, "disabling interface\n");
- del_timer_sync(&de->media_timer);
+ timer_delete_sync(&de->media_timer);
spin_lock_irqsave(&de->lock, flags);
de_stop_hw(de);
@@ -1452,7 +1452,7 @@ static void de_tx_timeout (struct net_device *dev, unsigned int txqueue)
dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
de->rx_tail, de->tx_head, de->tx_tail);
- del_timer_sync(&de->media_timer);
+ timer_delete_sync(&de->media_timer);
disable_irq(irq);
spin_lock_irq(&de->lock);
@@ -2126,7 +2126,7 @@ static int __maybe_unused de_suspend(struct device *dev_d)
if (netif_running (dev)) {
const int irq = pdev->irq;
- del_timer_sync(&de->media_timer);
+ timer_delete_sync(&de->media_timer);
disable_irq(irq);
spin_lock_irq(&de->lock);
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 3188ba7b450f..2d3bd343b6e6 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -745,7 +745,7 @@ static int dmfe_stop(struct net_device *dev)
netif_stop_queue(dev);
/* deleted timer */
- del_timer_sync(&db->timer);
+ timer_delete_sync(&db->timer);
/* Reset & stop DM910X board */
dw32(DCR0, DM910X_RESET);
@@ -1115,7 +1115,7 @@ static const struct ethtool_ops netdev_ethtool_ops = {
static void dmfe_timer(struct timer_list *t)
{
- struct dmfe_board_info *db = from_timer(db, t, timer);
+ struct dmfe_board_info *db = timer_container_of(db, t, timer);
struct net_device *dev = pci_get_drvdata(db->pdev);
void __iomem *ioaddr = db->ioaddr;
u32 tmp_cr8;
diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c
index 54560f9a1651..0a12cb9b3ba7 100644
--- a/drivers/net/ethernet/dec/tulip/interrupt.c
+++ b/drivers/net/ethernet/dec/tulip/interrupt.c
@@ -104,7 +104,7 @@ int tulip_refill_rx(struct net_device *dev)
void oom_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, oom_timer);
+ struct tulip_private *tp = timer_container_of(tp, t, oom_timer);
napi_schedule(&tp->napi);
}
@@ -699,8 +699,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
tulip_start_rxtx(tp);
}
/*
- * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
- * call is ever done under the spinlock
+ * NB: t21142_lnk_change() does a timer_delete_sync(), so be careful
+ * if this call is ever done under the spinlock
*/
if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
if (tp->link_change)
diff --git a/drivers/net/ethernet/dec/tulip/pnic.c b/drivers/net/ethernet/dec/tulip/pnic.c
index 653bde48ef44..1de5ed967070 100644
--- a/drivers/net/ethernet/dec/tulip/pnic.c
+++ b/drivers/net/ethernet/dec/tulip/pnic.c
@@ -86,7 +86,7 @@ void pnic_lnk_change(struct net_device *dev, int csr5)
void pnic_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, timer);
+ struct tulip_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
void __iomem *ioaddr = tp->base_addr;
int next_tick = 60*HZ;
diff --git a/drivers/net/ethernet/dec/tulip/pnic2.c b/drivers/net/ethernet/dec/tulip/pnic2.c
index 72a09156b48b..39c410bf224e 100644
--- a/drivers/net/ethernet/dec/tulip/pnic2.c
+++ b/drivers/net/ethernet/dec/tulip/pnic2.c
@@ -78,7 +78,7 @@
void pnic2_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, timer);
+ struct tulip_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
void __iomem *ioaddr = tp->base_addr;
int next_tick = 60*HZ;
@@ -323,7 +323,7 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
if (tulip_debug > 2)
netdev_dbg(dev, "Ugh! Link blew?\n");
- del_timer_sync(&tp->timer);
+ timer_delete_sync(&tp->timer);
pnic2_start_nway(dev);
tp->timer.expires = RUN_AT(3*HZ);
add_timer(&tp->timer);
@@ -348,7 +348,7 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
/* if failed then try doing an nway to get in sync */
if ((csr12 & 2) && ! tp->medialock) {
- del_timer_sync(&tp->timer);
+ timer_delete_sync(&tp->timer);
pnic2_start_nway(dev);
tp->timer.expires = RUN_AT(3*HZ);
add_timer(&tp->timer);
@@ -372,7 +372,7 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
/* if failed, try doing an nway to get in sync */
if ((csr12 & 4) && ! tp->medialock) {
- del_timer_sync(&tp->timer);
+ timer_delete_sync(&tp->timer);
pnic2_start_nway(dev);
tp->timer.expires = RUN_AT(3*HZ);
add_timer(&tp->timer);
diff --git a/drivers/net/ethernet/dec/tulip/timer.c b/drivers/net/ethernet/dec/tulip/timer.c
index 642e9dfc5451..ca0c509b601c 100644
--- a/drivers/net/ethernet/dec/tulip/timer.c
+++ b/drivers/net/ethernet/dec/tulip/timer.c
@@ -139,7 +139,7 @@ void tulip_media_task(struct work_struct *work)
void mxic_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, timer);
+ struct tulip_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
void __iomem *ioaddr = tp->base_addr;
int next_tick = 60*HZ;
@@ -156,7 +156,7 @@ void mxic_timer(struct timer_list *t)
void comet_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, timer);
+ struct tulip_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
int next_tick = 2*HZ;
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 27e01d780cd0..b608585f1954 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -114,7 +114,7 @@ int tulip_debug = 1;
static void tulip_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, timer);
+ struct tulip_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
if (netif_running(dev))
@@ -747,9 +747,9 @@ static void tulip_down (struct net_device *dev)
napi_disable(&tp->napi);
#endif
- del_timer_sync (&tp->timer);
+ timer_delete_sync(&tp->timer);
#ifdef CONFIG_TULIP_NAPI
- del_timer_sync (&tp->oom_timer);
+ timer_delete_sync(&tp->oom_timer);
#endif
spin_lock_irqsave (&tp->lock, flags);
@@ -1177,7 +1177,6 @@ static void set_rx_mode(struct net_device *dev)
iowrite32(csr6, ioaddr + CSR6);
}
-#ifdef CONFIG_TULIP_MWI
static void tulip_mwi_config(struct pci_dev *pdev, struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
@@ -1251,7 +1250,6 @@ out:
netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n",
cache, csr0);
}
-#endif
/*
* Chips that have the MRM/reserved bit quirk and the burst quirk. That
@@ -1413,7 +1411,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* grab all resources from both PIO and MMIO regions, as we
* don't want anyone else messing around with our hardware */
- if (pci_request_regions(pdev, DRV_NAME))
+ if (pcim_request_all_regions(pdev, DRV_NAME))
return -ENODEV;
ioaddr = pcim_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
@@ -1463,10 +1461,9 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
-#ifdef CONFIG_TULIP_MWI
- if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
+ if (IS_ENABLED(CONFIG_TULIP_MWI) && !force_csr0 &&
+ (tp->flags & HAS_PCI_MWI))
tulip_mwi_config (pdev, dev);
-#endif
/* Stop the chip's Tx and Rx processes. */
tulip_stop_rxtx(tp);
@@ -1553,7 +1550,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
(PCI_SLOT(pdev->devfn) == 12))) {
/* Cobalt MAC address in first EEPROM locations. */
sa_offset = 0;
- /* Ensure our media table fixup get's applied */
+ /* Ensure our media table fixup gets applied */
memcpy(ee_data + 16, ee_data, 8);
}
#endif
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index ff080ab0f116..6e4d8d31aba9 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -656,7 +656,7 @@ static int uli526x_stop(struct net_device *dev)
netif_stop_queue(dev);
/* deleted timer */
- del_timer_sync(&db->timer);
+ timer_delete_sync(&db->timer);
/* Reset & stop ULI526X board */
uw32(DCR0, ULI526X_RESET);
@@ -1014,7 +1014,7 @@ static const struct ethtool_ops netdev_ethtool_ops = {
static void uli526x_timer(struct timer_list *t)
{
- struct uli526x_board_info *db = from_timer(db, t, timer);
+ struct uli526x_board_info *db = timer_container_of(db, t, timer);
struct net_device *dev = pci_get_drvdata(db->pdev);
struct uli_phy_ops *phy = &db->phy;
void __iomem *ioaddr = db->ioaddr;
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 37fba39c0056..a24a25a5f73d 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -375,7 +375,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
- if (pci_request_regions(pdev, DRV_NAME))
+ if (pcim_request_all_regions(pdev, DRV_NAME))
goto err_out_netdev;
ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
@@ -763,7 +763,7 @@ static inline void update_csr6(struct net_device *dev, int new)
static void netdev_timer(struct timer_list *t)
{
- struct netdev_private *np = from_timer(np, t, timer);
+ struct netdev_private *np = timer_container_of(np, t, timer);
struct net_device *dev = pci_get_drvdata(np->pci_dev);
void __iomem *ioaddr = np->base_addr;
@@ -1509,7 +1509,7 @@ static int netdev_close(struct net_device *dev)
}
#endif /* __i386__ debugging only */
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
free_rxtx_rings(np);
free_ringdesc(np);
@@ -1560,7 +1560,7 @@ static int __maybe_unused w840_suspend(struct device *dev_d)
rtnl_lock();
if (netif_running (dev)) {
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
spin_lock_irq(&np->lock);
netif_device_detach(dev);
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index 8759f9f76b62..e5d2ede13845 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -143,7 +143,7 @@ static const struct pci_device_id xircom_pci_table[] = {
};
MODULE_DEVICE_TABLE(pci, xircom_pci_table);
-static struct pci_driver xircom_ops = {
+static struct pci_driver xircom_driver = {
.name = "xircom_cb",
.id_table = xircom_pci_table,
.probe = xircom_probe,
@@ -1169,4 +1169,4 @@ investigate_write_descriptor(struct net_device *dev,
}
}
-module_pci_driver(xircom_ops);
+module_pci_driver(xircom_driver);
diff --git a/drivers/net/ethernet/dlink/Kconfig b/drivers/net/ethernet/dlink/Kconfig
index e9e13654812c..0d77f84c8e7b 100644
--- a/drivers/net/ethernet/dlink/Kconfig
+++ b/drivers/net/ethernet/dlink/Kconfig
@@ -32,4 +32,24 @@ config DL2K
To compile this driver as a module, choose M here: the
module will be called dl2k.
+config SUNDANCE
+ tristate "Sundance Alta support"
+ depends on PCI
+ select CRC32
+ select MII
+ help
+ This driver is for the Sundance "Alta" chip.
+ More specific information and updates are available from
+ <http://www.scyld.com/network/sundance.html>.
+
+config SUNDANCE_MMIO
+ bool "Use MMIO instead of PIO"
+ depends on SUNDANCE
+ help
+ Enable memory-mapped I/O for interaction with Sundance NIC registers.
+ Do NOT enable this by default, PIO (enabled when MMIO is disabled)
+ is known to solve bugs on certain chips.
+
+ If unsure, say N.
+
endif # NET_VENDOR_DLINK
diff --git a/drivers/net/ethernet/dlink/Makefile b/drivers/net/ethernet/dlink/Makefile
index 38c236eb6007..3ff503c747db 100644
--- a/drivers/net/ethernet/dlink/Makefile
+++ b/drivers/net/ethernet/dlink/Makefile
@@ -4,3 +4,4 @@
#
obj-$(CONFIG_DL2K) += dl2k.o
+obj-$(CONFIG_SUNDANCE) += sundance.o
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index d0ea92607870..846d58c769ea 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -41,7 +41,7 @@ module_param(tx_flow, int, 0);
module_param(rx_flow, int, 0);
module_param(copy_thresh, int, 0);
module_param(rx_coalesce, int, 0); /* Rx frame count each interrupt */
-module_param(rx_timeout, int, 0); /* Rx DMA wait time in 64ns increments */
+module_param(rx_timeout, int, 0); /* Rx DMA wait time in 640ns increments */
module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */
@@ -99,6 +99,13 @@ static const struct net_device_ops netdev_ops = {
.ndo_tx_timeout = rio_tx_timeout,
};
+static bool is_support_rmon_mmio(struct pci_dev *pdev)
+{
+ return pdev->vendor == PCI_VENDOR_ID_DLINK &&
+ pdev->device == 0x4000 &&
+ pdev->revision == 0x0c;
+}
+
static int
rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -131,21 +138,27 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
np = netdev_priv(dev);
+ if (is_support_rmon_mmio(pdev))
+ np->rmon_enable = true;
+
/* IO registers range. */
ioaddr = pci_iomap(pdev, 0, 0);
if (!ioaddr)
goto err_out_dev;
np->eeprom_addr = ioaddr;
-#ifdef MEM_MAPPING
- /* MM registers range. */
- ioaddr = pci_iomap(pdev, 1, 0);
- if (!ioaddr)
- goto err_out_iounmap;
-#endif
+ if (np->rmon_enable) {
+ /* MM registers range. */
+ ioaddr = pci_iomap(pdev, 1, 0);
+ if (!ioaddr)
+ goto err_out_iounmap;
+ }
+
np->ioaddr = ioaddr;
np->chip_id = chip_idx;
np->pdev = pdev;
+
+ spin_lock_init(&np->stats_lock);
spin_lock_init (&np->tx_lock);
spin_lock_init (&np->rx_lock);
@@ -249,7 +262,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
np->link_status = 0;
/* Set media and reset PHY */
if (np->phy_media) {
- /* default Auto-Negotiation for fiber deivices */
+ /* default Auto-Negotiation for fiber devices */
if (np->an_enable == 2) {
np->an_enable = 1;
}
@@ -287,9 +300,8 @@ err_out_unmap_tx:
dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
np->tx_ring_dma);
err_out_iounmap:
-#ifdef MEM_MAPPING
- pci_iounmap(pdev, np->ioaddr);
-#endif
+ if (np->rmon_enable)
+ pci_iounmap(pdev, np->ioaddr);
pci_iounmap(pdev, np->eeprom_addr);
err_out_dev:
free_netdev (dev);
@@ -352,7 +364,7 @@ parse_eeprom (struct net_device *dev)
eth_hw_addr_set(dev, psrom->mac_addr);
if (np->chip_id == CHIP_IP1000A) {
- np->led_mode = psrom->led_mode;
+ np->led_mode = le16_to_cpu(psrom->led_mode);
return 0;
}
@@ -496,25 +508,34 @@ static int alloc_list(struct net_device *dev)
for (i = 0; i < RX_RING_SIZE; i++) {
/* Allocated fixed size of skbuff */
struct sk_buff *skb;
+ dma_addr_t addr;
skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
np->rx_skbuff[i] = skb;
- if (!skb) {
- free_list(dev);
- return -ENOMEM;
- }
+ if (!skb)
+ goto err_free_list;
+
+ addr = dma_map_single(&np->pdev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&np->pdev->dev, addr))
+ goto err_kfree_skb;
np->rx_ring[i].next_desc = cpu_to_le64(np->rx_ring_dma +
((i + 1) % RX_RING_SIZE) *
sizeof(struct netdev_desc));
/* Rubicon now supports 40 bits of addressing space. */
- np->rx_ring[i].fraginfo =
- cpu_to_le64(dma_map_single(&np->pdev->dev, skb->data,
- np->rx_buf_sz, DMA_FROM_DEVICE));
+ np->rx_ring[i].fraginfo = cpu_to_le64(addr);
np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
}
return 0;
+
+err_kfree_skb:
+ dev_kfree_skb(np->rx_skbuff[i]);
+ np->rx_skbuff[i] = NULL;
+err_free_list:
+ free_list(dev);
+ return -ENOMEM;
}
static void rio_hw_init(struct net_device *dev)
@@ -576,7 +597,8 @@ static void rio_hw_init(struct net_device *dev)
dw8(TxDMAPollPeriod, 0xff);
dw8(RxDMABurstThresh, 0x30);
dw8(RxDMAUrgentThresh, 0x30);
- dw32(RmonStatMask, 0x0007ffff);
+ if (!np->rmon_enable)
+ dw32(RmonStatMask, 0x0007ffff);
/* clear statistics */
clear_stats (dev);
@@ -648,7 +670,7 @@ static int rio_open(struct net_device *dev)
static void
rio_timer (struct timer_list *t)
{
- struct netdev_private *np = from_timer(np, t, timer);
+ struct netdev_private *np = timer_container_of(np, t, timer);
struct net_device *dev = pci_get_drvdata(np->pdev);
unsigned int entry;
int next_tick = 1*HZ;
@@ -711,7 +733,7 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
u64 tfc_vlan_tag = 0;
if (np->link_status == 0) { /* Link Down */
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
entry = np->cur_tx % TX_RING_SIZE;
@@ -865,8 +887,7 @@ tx_error (struct net_device *dev, int tx_status)
frame_id = (tx_status & 0xffff0000);
printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
dev->name, tx_status, frame_id);
- dev->stats.tx_errors++;
- /* Ttransmit Underrun */
+ /* Transmit Underrun */
if (tx_status & 0x10) {
dev->stats.tx_fifo_errors++;
dw16(TxStartThresh, dr16(TxStartThresh) + 0x10);
@@ -902,9 +923,15 @@ tx_error (struct net_device *dev, int tx_status)
rio_set_led_mode(dev);
/* Let TxStartThresh stay default value */
}
+
+ spin_lock(&np->stats_lock);
/* Maximum Collisions */
if (tx_status & 0x08)
dev->stats.collisions++;
+
+ dev->stats.tx_errors++;
+ spin_unlock(&np->stats_lock);
+
/* Restart the Tx */
dw32(MACCtrl, dr16(MACCtrl) | TxEnable);
}
@@ -946,15 +973,18 @@ receive_packet (struct net_device *dev)
} else {
struct sk_buff *skb;
+ skb = NULL;
/* Small skbuffs for short packets */
- if (pkt_len > copy_thresh) {
+ if (pkt_len <= copy_thresh)
+ skb = netdev_alloc_skb_ip_align(dev, pkt_len);
+ if (!skb) {
dma_unmap_single(&np->pdev->dev,
desc_to_dma(desc),
np->rx_buf_sz,
DMA_FROM_DEVICE);
skb_put (skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
- } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
+ } else {
dma_sync_single_for_cpu(&np->pdev->dev,
desc_to_dma(desc),
np->rx_buf_sz,
@@ -1053,7 +1083,7 @@ rio_error (struct net_device *dev, int int_status)
get_stats (dev);
}
- /* PCI Error, a catastronphic error related to the bus interface
+ /* PCI Error, a catastrophic error related to the bus interface
occurs, set GlobalReset and HostReset to reset. */
if (int_status & HostError) {
printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n",
@@ -1069,11 +1099,10 @@ get_stats (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
-#ifdef MEM_MAPPING
- int i;
-#endif
unsigned int stat_reg;
+ unsigned long flags;
+ spin_lock_irqsave(&np->stats_lock, flags);
/* All statistics registers need to be acknowledged,
else statistic overflow could cause problems */
@@ -1082,7 +1111,7 @@ get_stats (struct net_device *dev)
dev->stats.rx_bytes += dr32(OctetRcvOk);
dev->stats.tx_bytes += dr32(OctetXmtOk);
- dev->stats.multicast = dr32(McstFramesRcvdOk);
+ dev->stats.multicast += dr32(McstFramesRcvdOk);
dev->stats.collisions += dr32(SingleColFrames)
+ dr32(MultiColFrames);
@@ -1114,15 +1143,18 @@ get_stats (struct net_device *dev)
dr16(MacControlFramesXmtd);
dr16(FramesWEXDeferal);
-#ifdef MEM_MAPPING
- for (i = 0x100; i <= 0x150; i += 4)
- dr32(i);
-#endif
+ if (np->rmon_enable)
+ for (int i = 0x100; i <= 0x150; i += 4)
+ dr32(i);
+
dr16(TxJumboFrames);
dr16(RxJumboFrames);
dr16(TCPCheckSumErrors);
dr16(UDPCheckSumErrors);
dr16(IPCheckSumErrors);
+
+ spin_unlock_irqrestore(&np->stats_lock, flags);
+
return &dev->stats;
}
@@ -1131,9 +1163,6 @@ clear_stats (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
-#ifdef MEM_MAPPING
- int i;
-#endif
/* All statistics registers need to be acknowledged,
else statistic overflow could cause problems */
@@ -1169,10 +1198,9 @@ clear_stats (struct net_device *dev)
dr16(BcstFramesXmtdOk);
dr16(MacControlFramesXmtd);
dr16(FramesWEXDeferal);
-#ifdef MEM_MAPPING
- for (i = 0x100; i <= 0x150; i += 4)
- dr32(i);
-#endif
+ if (np->rmon_enable)
+ for (int i = 0x100; i <= 0x150; i += 4)
+ dr32(i);
dr16(TxJumboFrames);
dr16(RxJumboFrames);
dr16(TCPCheckSumErrors);
@@ -1778,7 +1806,7 @@ rio_close (struct net_device *dev)
rio_hw_stop(dev);
free_irq(pdev->irq, dev);
- del_timer_sync (&np->timer);
+ timer_delete_sync(&np->timer);
free_list(dev);
@@ -1798,9 +1826,8 @@ rio_remove1 (struct pci_dev *pdev)
np->rx_ring_dma);
dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
np->tx_ring_dma);
-#ifdef MEM_MAPPING
- pci_iounmap(pdev, np->ioaddr);
-#endif
+ if (np->rmon_enable)
+ pci_iounmap(pdev, np->ioaddr);
pci_iounmap(pdev, np->eeprom_addr);
free_netdev (dev);
pci_release_regions (pdev);
@@ -1818,7 +1845,7 @@ static int rio_suspend(struct device *device)
return 0;
netif_device_detach(dev);
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
rio_hw_stop(dev);
return 0;
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index 195dc6cfd895..9ebf7a6db93e 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -270,7 +270,7 @@ enum _pcs_reg {
PCS_ESR = 15,
};
-/* IEEE Extened Status Register */
+/* IEEE Extended Status Register */
enum _mii_esr {
MII_ESR_1000BX_FD = 0x8000,
MII_ESR_1000BX_HD = 0x4000,
@@ -329,18 +329,18 @@ enum _pcs_anlpar {
};
typedef struct t_SROM {
- u16 config_param; /* 0x00 */
- u16 asic_ctrl; /* 0x02 */
- u16 sub_vendor_id; /* 0x04 */
- u16 sub_system_id; /* 0x06 */
- u16 pci_base_1; /* 0x08 (IP1000A only) */
- u16 pci_base_2; /* 0x0a (IP1000A only) */
- u16 led_mode; /* 0x0c (IP1000A only) */
- u16 reserved1[9]; /* 0x0e-0x1f */
+ __le16 config_param; /* 0x00 */
+ __le16 asic_ctrl; /* 0x02 */
+ __le16 sub_vendor_id; /* 0x04 */
+ __le16 sub_system_id; /* 0x06 */
+ __le16 pci_base_1; /* 0x08 (IP1000A only) */
+ __le16 pci_base_2; /* 0x0a (IP1000A only) */
+ __le16 led_mode; /* 0x0c (IP1000A only) */
+ __le16 reserved1[9]; /* 0x0e-0x1f */
u8 mac_addr[6]; /* 0x20-0x25 */
u8 reserved2[10]; /* 0x26-0x2f */
u8 sib[204]; /* 0x30-0xfb */
- u32 crc; /* 0xfc-0xff */
+ __le32 crc; /* 0xfc-0xff */
} SROM_t, *PSROM_t;
/* Ioctl custom data */
@@ -372,6 +372,8 @@ struct netdev_private {
struct pci_dev *pdev;
void __iomem *ioaddr;
void __iomem *eeprom_addr;
+ // To ensure synchronization when stats are updated.
+ spinlock_t stats_lock;
spinlock_t tx_lock;
spinlock_t rx_lock;
unsigned int rx_buf_sz; /* Based on MTU+slack. */
@@ -401,6 +403,8 @@ struct netdev_private {
u16 negotiate; /* Negotiated media */
int phy_addr; /* PHY addresses. */
u16 led_mode; /* LED mode read from EEPROM (IP1000A only) */
+
+ bool rmon_enable;
};
/* The station address location in the EEPROM. */
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
new file mode 100644
index 000000000000..277c50ef773f
--- /dev/null
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -0,0 +1,1990 @@
+/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
+/*
+ Written 1999-2000 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support and updates available at
+ http://www.scyld.com/network/sundance.html
+ [link no longer provides useful info -jgarzik]
+ Archives of the mailing list are still available at
+ https://www.beowulf.org/pipermail/netdrivers/
+
+*/
+
+#define DRV_NAME "sundance"
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ Typical is a 64 element hash table based on the Ethernet CRC. */
+static const int multicast_filter_limit = 32;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature.
+ This chip can receive into offset buffers, so the Alpha does not
+ need a copy-align. */
+static int rx_copybreak;
+static int flowctrl=1;
+
+/* media[] specifies the media type the NIC operates at.
+ autosense Autosensing active media.
+ 10mbps_hd 10Mbps half duplex.
+ 10mbps_fd 10Mbps full duplex.
+ 100mbps_hd 100Mbps half duplex.
+ 100mbps_fd 100Mbps full duplex.
+ 0 Autosensing active media.
+ 1 10Mbps half duplex.
+ 2 10Mbps full duplex.
+ 3 100Mbps half duplex.
+ 4 100Mbps full duplex.
+*/
+#define MAX_UNITS 8
+static char *media[MAX_UNITS];
+
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority, and more than 128 requires modifying the
+ Tx error recovery.
+ Large receive rings merely waste memory. */
+#define TX_RING_SIZE 32
+#define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
+#define RX_RING_SIZE 64
+#define RX_BUDGET 32
+#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
+#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (4*HZ)
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/uaccess.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/io.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/crc32.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
+MODULE_LICENSE("GPL");
+
+module_param(debug, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param_array(media, charp, NULL, 0);
+module_param(flowctrl, int, 0);
+MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
+MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is designed for the Sundance Technologies "Alta" ST201 chip.
+
+II. Board-specific settings
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
+Some chips explicitly use only 2^N sized rings, while others use a
+'next descriptor' pointer that the driver forms into rings.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver uses a zero-copy receive and transmit scheme.
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack. Buffers consumed this way are replaced by newly allocated
+skbuffs in a later phase of receives.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets. When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine. Copying also preloads the cache, which is
+most useful with small frames.
+
+A subtle aspect of the operation is that the IP header at offset 14 in an
+ethernet frame isn't longword aligned for further processing.
+Unaligned buffers are permitted by the Sundance hardware, so
+frames are received into the skbuff at an offset of "+2", 16-byte aligning
+the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'lp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IV. Notes
+
+IVb. References
+
+The Sundance ST201 datasheet, preliminary version.
+The Kendin KS8723 datasheet, preliminary version.
+The ICplus IP100 datasheet, preliminary version.
+http://www.scyld.com/expert/100mbps.html
+http://www.scyld.com/expert/NWay.html
+
+IVc. Errata
+
+*/
+
+/* Work-around for Kendin chip bugs. */
+#ifndef CONFIG_SUNDANCE_MMIO
+#define USE_IO_OPS 1
+#endif
+
+static const struct pci_device_id sundance_pci_tbl[] = {
+ { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
+ { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
+ { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
+ { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
+ { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
+ { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
+ { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
+
+enum {
+ netdev_io_size = 128
+};
+
+struct pci_id_info {
+ const char *name;
+};
+static const struct pci_id_info pci_id_tbl[] = {
+ {"D-Link DFE-550TX FAST Ethernet Adapter"},
+ {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
+ {"D-Link DFE-580TX 4 port Server Adapter"},
+ {"D-Link DFE-530TXS FAST Ethernet Adapter"},
+ {"D-Link DL10050-based FAST Ethernet Adapter"},
+ {"Sundance Technology Alta"},
+ {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
+ { } /* terminate list. */
+};
+
+/* This driver was written to use PCI memory space, however x86-oriented
+ hardware often uses I/O space accesses. */
+
+/* Offsets to the device registers.
+ Unlike software-only systems, device drivers interact with complex hardware.
+ It's not useful to define symbolic names for every register bit in the
+ device. The name can only partially document the semantics and make
+ the driver longer and more difficult to read.
+ In general, only the important configuration values or bits changed
+ multiple times should be defined symbolically.
+*/
+enum alta_offsets {
+ DMACtrl = 0x00,
+ TxListPtr = 0x04,
+ TxDMABurstThresh = 0x08,
+ TxDMAUrgentThresh = 0x09,
+ TxDMAPollPeriod = 0x0a,
+ RxDMAStatus = 0x0c,
+ RxListPtr = 0x10,
+ DebugCtrl0 = 0x1a,
+ DebugCtrl1 = 0x1c,
+ RxDMABurstThresh = 0x14,
+ RxDMAUrgentThresh = 0x15,
+ RxDMAPollPeriod = 0x16,
+ LEDCtrl = 0x1a,
+ ASICCtrl = 0x30,
+ EEData = 0x34,
+ EECtrl = 0x36,
+ FlashAddr = 0x40,
+ FlashData = 0x44,
+ WakeEvent = 0x45,
+ TxStatus = 0x46,
+ TxFrameId = 0x47,
+ DownCounter = 0x18,
+ IntrClear = 0x4a,
+ IntrEnable = 0x4c,
+ IntrStatus = 0x4e,
+ MACCtrl0 = 0x50,
+ MACCtrl1 = 0x52,
+ StationAddr = 0x54,
+ MaxFrameSize = 0x5A,
+ RxMode = 0x5c,
+ MIICtrl = 0x5e,
+ MulticastFilter0 = 0x60,
+ MulticastFilter1 = 0x64,
+ RxOctetsLow = 0x68,
+ RxOctetsHigh = 0x6a,
+ TxOctetsLow = 0x6c,
+ TxOctetsHigh = 0x6e,
+ TxFramesOK = 0x70,
+ RxFramesOK = 0x72,
+ StatsCarrierError = 0x74,
+ StatsLateColl = 0x75,
+ StatsMultiColl = 0x76,
+ StatsOneColl = 0x77,
+ StatsTxDefer = 0x78,
+ RxMissed = 0x79,
+ StatsTxXSDefer = 0x7a,
+ StatsTxAbort = 0x7b,
+ StatsBcastTx = 0x7c,
+ StatsBcastRx = 0x7d,
+ StatsMcastTx = 0x7e,
+ StatsMcastRx = 0x7f,
+ /* Aliased and bogus values! */
+ RxStatus = 0x0c,
+};
+
+#define ASIC_HI_WORD(x) ((x) + 2)
+
+enum ASICCtrl_HiWord_bit {
+ GlobalReset = 0x0001,
+ RxReset = 0x0002,
+ TxReset = 0x0004,
+ DMAReset = 0x0008,
+ FIFOReset = 0x0010,
+ NetworkReset = 0x0020,
+ HostReset = 0x0040,
+ ResetBusy = 0x0400,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
+ IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
+ IntrDrvRqst=0x0040,
+ StatsMax=0x0080, LinkChange=0x0100,
+ IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
+};
+
+/* Bits in the RxMode register. */
+enum rx_mode_bits {
+ AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
+ AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
+};
+/* Bits in MACCtrl. */
+enum mac_ctrl0_bits {
+ EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
+ EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
+};
+enum mac_ctrl1_bits {
+ StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
+ TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
+ RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
+};
+
+/* Bits in WakeEvent register. */
+enum wake_event_bits {
+ WakePktEnable = 0x01,
+ MagicPktEnable = 0x02,
+ LinkEventEnable = 0x04,
+ WolEnable = 0x80,
+};
+
+/* The Rx and Tx buffer descriptors. */
+/* Note that using only 32 bit fields simplifies conversion to big-endian
+ architectures. */
+struct netdev_desc {
+ __le32 next_desc;
+ __le32 status;
+ struct desc_frag { __le32 addr, length; } frag;
+};
+
+/* Bits in netdev_desc.status */
+enum desc_status_bits {
+ DescOwn=0x8000,
+ DescEndPacket=0x4000,
+ DescEndRing=0x2000,
+ LastFrag=0x80000000,
+ DescIntrOnTx=0x8000,
+ DescIntrOnDMADone=0x80000000,
+ DisableAlign = 0x00000001,
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
+ within the structure. */
+#define MII_CNT 4
+struct netdev_private {
+ /* Descriptor rings first for alignment. */
+ struct netdev_desc *rx_ring;
+ struct netdev_desc *tx_ring;
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ dma_addr_t tx_ring_dma;
+ dma_addr_t rx_ring_dma;
+ struct timer_list timer; /* Media monitoring timer. */
+ struct net_device *ndev; /* backpointer */
+ /* ethtool extra stats */
+ struct {
+ u64 tx_multiple_collisions;
+ u64 tx_single_collisions;
+ u64 tx_late_collisions;
+ u64 tx_deferred;
+ u64 tx_deferred_excessive;
+ u64 tx_aborted;
+ u64 tx_bcasts;
+ u64 rx_bcasts;
+ u64 tx_mcasts;
+ u64 rx_mcasts;
+ } xstats;
+ /* Frequently used values: keep some adjacent for cache effect. */
+ spinlock_t lock;
+ int msg_enable;
+ int chip_id;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ struct netdev_desc *last_tx; /* Last Tx descriptor used. */
+ unsigned int cur_tx, dirty_tx;
+ /* These values are keep track of the transceiver/media in use. */
+ unsigned int flowctrl:1;
+ unsigned int default_port:4; /* Last dev->if_port value. */
+ unsigned int an_enable:1;
+ unsigned int speed;
+ unsigned int wol_enabled:1; /* Wake on LAN enabled */
+ struct tasklet_struct rx_tasklet;
+ struct tasklet_struct tx_tasklet;
+ int budget;
+ int cur_task;
+ /* Multicast and receive mode. */
+ spinlock_t mcastlock; /* SMP lock multicast updates. */
+ u16 mcast_filter[4];
+ /* MII transceiver section. */
+ struct mii_if_info mii_if;
+ int mii_preamble_required;
+ unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
+ struct pci_dev *pci_dev;
+ void __iomem *base;
+ spinlock_t statlock;
+};
+
+/* The station address location in the EEPROM. */
+#define EEPROM_SA_OFFSET 0x10
+#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
+ IntrDrvRqst | IntrTxDone | StatsMax | \
+ LinkChange)
+
+static int change_mtu(struct net_device *dev, int new_mtu);
+static int eeprom_read(void __iomem *ioaddr, int location);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int mdio_wait_link(struct net_device *dev, int wait);
+static int netdev_open(struct net_device *dev);
+static void check_duplex(struct net_device *dev);
+static void netdev_timer(struct timer_list *t);
+static void tx_timeout(struct net_device *dev, unsigned int txqueue);
+static void init_ring(struct net_device *dev);
+static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
+static int reset_tx (struct net_device *dev);
+static irqreturn_t intr_handler(int irq, void *dev_instance);
+static void rx_poll(struct tasklet_struct *t);
+static void tx_poll(struct tasklet_struct *t);
+static void refill_rx (struct net_device *dev);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void set_rx_mode(struct net_device *dev);
+static int __set_mac_addr(struct net_device *dev);
+static int sundance_set_mac_addr(struct net_device *dev, void *data);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_close(struct net_device *dev);
+static const struct ethtool_ops ethtool_ops;
+
+static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base + ASICCtrl;
+ int countdown;
+
+ /* ST201 documentation states ASICCtrl is a 32bit register */
+ iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
+ /* ST201 documentation states reset can take up to 1 ms */
+ countdown = 10 + 1;
+ while (ioread32 (ioaddr) & (ResetBusy << 16)) {
+ if (--countdown == 0) {
+ printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
+ break;
+ }
+ udelay(100);
+ }
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void sundance_poll_controller(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ disable_irq(np->pci_dev->irq);
+ intr_handler(np->pci_dev->irq, dev);
+ enable_irq(np->pci_dev->irq);
+}
+#endif
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = netdev_open,
+ .ndo_stop = netdev_close,
+ .ndo_start_xmit = start_tx,
+ .ndo_get_stats = get_stats,
+ .ndo_set_rx_mode = set_rx_mode,
+ .ndo_eth_ioctl = netdev_ioctl,
+ .ndo_tx_timeout = tx_timeout,
+ .ndo_change_mtu = change_mtu,
+ .ndo_set_mac_address = sundance_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = sundance_poll_controller,
+#endif
+};
+
+static int sundance_probe1(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ struct netdev_private *np;
+ static int card_idx;
+ int chip_idx = ent->driver_data;
+ int irq;
+ int i;
+ void __iomem *ioaddr;
+ u16 mii_ctl;
+ void *ring_space;
+ dma_addr_t ring_dma;
+#ifdef USE_IO_OPS
+ int bar = 0;
+#else
+ int bar = 1;
+#endif
+ int phy, phy_end, phy_idx = 0;
+ __le16 addr[ETH_ALEN / 2];
+
+ if (pci_enable_device(pdev))
+ return -EIO;
+ pci_set_master(pdev);
+
+ irq = pdev->irq;
+
+ dev = alloc_etherdev(sizeof(*np));
+ if (!dev)
+ return -ENOMEM;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ if (pci_request_regions(pdev, DRV_NAME))
+ goto err_out_netdev;
+
+ ioaddr = pci_iomap(pdev, bar, netdev_io_size);
+ if (!ioaddr)
+ goto err_out_res;
+
+ for (i = 0; i < 3; i++)
+ addr[i] =
+ cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
+ eth_hw_addr_set(dev, (u8 *)addr);
+
+ np = netdev_priv(dev);
+ np->ndev = dev;
+ np->base = ioaddr;
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->msg_enable = (1 << debug) - 1;
+ spin_lock_init(&np->lock);
+ spin_lock_init(&np->statlock);
+ tasklet_setup(&np->rx_tasklet, rx_poll);
+ tasklet_setup(&np->tx_tasklet, tx_poll);
+
+ ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
+ &ring_dma, GFP_KERNEL);
+ if (!ring_space)
+ goto err_out_cleardev;
+ np->tx_ring = (struct netdev_desc *)ring_space;
+ np->tx_ring_dma = ring_dma;
+
+ ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
+ &ring_dma, GFP_KERNEL);
+ if (!ring_space)
+ goto err_out_unmap_tx;
+ np->rx_ring = (struct netdev_desc *)ring_space;
+ np->rx_ring_dma = ring_dma;
+
+ np->mii_if.dev = dev;
+ np->mii_if.mdio_read = mdio_read;
+ np->mii_if.mdio_write = mdio_write;
+ np->mii_if.phy_id_mask = 0x1f;
+ np->mii_if.reg_num_mask = 0x1f;
+
+ /* The chip-specific entries in the device structure. */
+ dev->netdev_ops = &netdev_ops;
+ dev->ethtool_ops = &ethtool_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ /* MTU range: 68 - 8191 */
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = 8191;
+
+ pci_set_drvdata(pdev, dev);
+
+ i = register_netdev(dev);
+ if (i)
+ goto err_out_unmap_rx;
+
+ printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
+ dev->name, pci_id_tbl[chip_idx].name, ioaddr,
+ dev->dev_addr, irq);
+
+ np->phys[0] = 1; /* Default setting */
+ np->mii_preamble_required++;
+
+ /*
+ * It seems some phys doesn't deal well with address 0 being accessed
+ * first
+ */
+ if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
+ phy = 0;
+ phy_end = 31;
+ } else {
+ phy = 1;
+ phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */
+ }
+ for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
+ int phyx = phy & 0x1f;
+ int mii_status = mdio_read(dev, phyx, MII_BMSR);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phyx;
+ np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
+ if ((mii_status & 0x0040) == 0)
+ np->mii_preamble_required++;
+ printk(KERN_INFO "%s: MII PHY found at address %d, status "
+ "0x%4.4x advertising %4.4x.\n",
+ dev->name, phyx, mii_status, np->mii_if.advertising);
+ }
+ }
+ np->mii_preamble_required--;
+
+ if (phy_idx == 0) {
+ printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
+ dev->name, ioread32(ioaddr + ASICCtrl));
+ goto err_out_unregister;
+ }
+
+ np->mii_if.phy_id = np->phys[0];
+
+ /* Parse override configuration */
+ np->an_enable = 1;
+ if (card_idx < MAX_UNITS) {
+ if (media[card_idx] != NULL) {
+ np->an_enable = 0;
+ if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
+ strcmp (media[card_idx], "4") == 0) {
+ np->speed = 100;
+ np->mii_if.full_duplex = 1;
+ } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
+ strcmp (media[card_idx], "3") == 0) {
+ np->speed = 100;
+ np->mii_if.full_duplex = 0;
+ } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
+ strcmp (media[card_idx], "2") == 0) {
+ np->speed = 10;
+ np->mii_if.full_duplex = 1;
+ } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
+ strcmp (media[card_idx], "1") == 0) {
+ np->speed = 10;
+ np->mii_if.full_duplex = 0;
+ } else {
+ np->an_enable = 1;
+ }
+ }
+ if (flowctrl == 1)
+ np->flowctrl = 1;
+ }
+
+ /* Fibre PHY? */
+ if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
+ /* Default 100Mbps Full */
+ if (np->an_enable) {
+ np->speed = 100;
+ np->mii_if.full_duplex = 1;
+ np->an_enable = 0;
+ }
+ }
+ /* Reset PHY */
+ mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
+ mdelay (300);
+ /* If flow control enabled, we need to advertise it.*/
+ if (np->flowctrl)
+ mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
+ mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
+ /* Force media type */
+ if (!np->an_enable) {
+ mii_ctl = 0;
+ mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
+ mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
+ mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
+ printk (KERN_INFO "Override speed=%d, %s duplex\n",
+ np->speed, np->mii_if.full_duplex ? "Full" : "Half");
+
+ }
+
+ /* Perhaps move the reset here? */
+ /* Reset the chip to erase previous misconfiguration. */
+ if (netif_msg_hw(np))
+ printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
+ sundance_reset(dev, 0x00ff << 16);
+ if (netif_msg_hw(np))
+ printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
+
+ card_idx++;
+ return 0;
+
+err_out_unregister:
+ unregister_netdev(dev);
+err_out_unmap_rx:
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
+ np->rx_ring, np->rx_ring_dma);
+err_out_unmap_tx:
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
+ np->tx_ring, np->tx_ring_dma);
+err_out_cleardev:
+ pci_iounmap(pdev, ioaddr);
+err_out_res:
+ pci_release_regions(pdev);
+err_out_netdev:
+ free_netdev (dev);
+ return -ENODEV;
+}
+
+static int change_mtu(struct net_device *dev, int new_mtu)
+{
+ if (netif_running(dev))
+ return -EBUSY;
+ WRITE_ONCE(dev->mtu, new_mtu);
+ return 0;
+}
+
+#define eeprom_delay(ee_addr) ioread32(ee_addr)
+/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
+static int eeprom_read(void __iomem *ioaddr, int location)
+{
+ int boguscnt = 10000; /* Typical 1900 ticks. */
+ iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
+ do {
+ eeprom_delay(ioaddr + EECtrl);
+ if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
+ return ioread16(ioaddr + EEData);
+ }
+ } while (--boguscnt > 0);
+ return 0;
+}
+
+/* MII transceiver control section.
+ Read and write the MII registers using software-generated serial
+ MDIO protocol. See the MII specifications or DP83840A data sheet
+ for details.
+
+ The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
+ met by back-to-back 33Mhz PCI cycles. */
+#define mdio_delay() ioread8(mdio_addr)
+
+enum mii_reg_bits {
+ MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
+};
+#define MDIO_EnbIn (0)
+#define MDIO_WRITE0 (MDIO_EnbOutput)
+#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
+
+/* Generate the preamble required for initial synchronization and
+ a few older transceivers. */
+static void mdio_sync(void __iomem *mdio_addr)
+{
+ int bits = 32;
+
+ /* Establish sync by sending at least 32 logic ones. */
+ while (--bits >= 0) {
+ iowrite8(MDIO_WRITE1, mdio_addr);
+ mdio_delay();
+ iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
+ mdio_delay();
+ }
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *mdio_addr = np->base + MIICtrl;
+ int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ int i, retval = 0;
+
+ if (np->mii_preamble_required)
+ mdio_sync(mdio_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+ iowrite8(dataval, mdio_addr);
+ mdio_delay();
+ iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
+ mdio_delay();
+ }
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ iowrite8(MDIO_EnbIn, mdio_addr);
+ mdio_delay();
+ retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
+ iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ mdio_delay();
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *mdio_addr = np->base + MIICtrl;
+ int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
+ int i;
+
+ if (np->mii_preamble_required)
+ mdio_sync(mdio_addr);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+ iowrite8(dataval, mdio_addr);
+ mdio_delay();
+ iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
+ mdio_delay();
+ }
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ iowrite8(MDIO_EnbIn, mdio_addr);
+ mdio_delay();
+ iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ mdio_delay();
+ }
+}
+
+static int mdio_wait_link(struct net_device *dev, int wait)
+{
+ int bmsr;
+ int phy_id;
+ struct netdev_private *np;
+
+ np = netdev_priv(dev);
+ phy_id = np->phys[0];
+
+ do {
+ bmsr = mdio_read(dev, phy_id, MII_BMSR);
+ if (bmsr & 0x0004)
+ return 0;
+ mdelay(1);
+ } while (--wait > 0);
+ return -1;
+}
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ const int irq = np->pci_dev->irq;
+ unsigned long flags;
+ int i;
+
+ sundance_reset(dev, 0x00ff << 16);
+
+ i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
+ if (i)
+ return i;
+
+ if (netif_msg_ifup(np))
+ printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
+
+ init_ring(dev);
+
+ iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
+ /* The Tx list pointer is written as packets are queued. */
+
+ /* Initialize other registers. */
+ __set_mac_addr(dev);
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+ iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
+#else
+ iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
+#endif
+ if (dev->mtu > 2047)
+ iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
+
+ /* Configure the PCI bus bursts and FIFO thresholds. */
+
+ if (dev->if_port == 0)
+ dev->if_port = np->default_port;
+
+ spin_lock_init(&np->mcastlock);
+
+ set_rx_mode(dev);
+ iowrite16(0, ioaddr + IntrEnable);
+ iowrite16(0, ioaddr + DownCounter);
+ /* Set the chip to poll every N*320nsec. */
+ iowrite8(100, ioaddr + RxDMAPollPeriod);
+ iowrite8(127, ioaddr + TxDMAPollPeriod);
+ /* Fix DFE-580TX packet drop issue */
+ if (np->pci_dev->revision >= 0x14)
+ iowrite8(0x01, ioaddr + DebugCtrl1);
+ netif_start_queue(dev);
+
+ spin_lock_irqsave(&np->lock, flags);
+ reset_tx(dev);
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
+
+ /* Disable Wol */
+ iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent);
+ np->wol_enabled = 0;
+
+ if (netif_msg_ifup(np))
+ printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
+ "MAC Control %x, %4.4x %4.4x.\n",
+ dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
+ ioread32(ioaddr + MACCtrl0),
+ ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
+
+ /* Set the timer to check for link beat. */
+ timer_setup(&np->timer, netdev_timer, 0);
+ np->timer.expires = jiffies + 3*HZ;
+ add_timer(&np->timer);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
+
+ return 0;
+}
+
+static void check_duplex(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
+ int negotiated = mii_lpa & np->mii_if.advertising;
+ int duplex;
+
+ /* Force media */
+ if (!np->an_enable || mii_lpa == 0xffff) {
+ if (np->mii_if.full_duplex)
+ iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
+ ioaddr + MACCtrl0);
+ return;
+ }
+
+ /* Autonegotiation */
+ duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
+ if (np->mii_if.full_duplex != duplex) {
+ np->mii_if.full_duplex = duplex;
+ if (netif_msg_link(np))
+ printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
+ "negotiated capability %4.4x.\n", dev->name,
+ duplex ? "full" : "half", np->phys[0], negotiated);
+ iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
+ }
+}
+
+static void netdev_timer(struct timer_list *t)
+{
+ struct netdev_private *np = timer_container_of(np, t, timer);
+ struct net_device *dev = np->mii_if.dev;
+ void __iomem *ioaddr = np->base;
+ int next_tick = 10*HZ;
+
+ if (netif_msg_timer(np)) {
+ printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
+ "Tx %x Rx %x.\n",
+ dev->name, ioread16(ioaddr + IntrEnable),
+ ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
+ }
+ check_duplex(dev);
+ np->timer.expires = jiffies + next_tick;
+ add_timer(&np->timer);
+}
+
+static void tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ unsigned long flag;
+
+ netif_stop_queue(dev);
+ tasklet_disable_in_atomic(&np->tx_tasklet);
+ iowrite16(0, ioaddr + IntrEnable);
+ printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
+ "TxFrameId %2.2x,"
+ " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
+ ioread8(ioaddr + TxFrameId));
+
+ {
+ int i;
+ for (i=0; i<TX_RING_SIZE; i++) {
+ printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
+ (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
+ le32_to_cpu(np->tx_ring[i].next_desc),
+ le32_to_cpu(np->tx_ring[i].status),
+ (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
+ le32_to_cpu(np->tx_ring[i].frag.addr),
+ le32_to_cpu(np->tx_ring[i].frag.length));
+ }
+ printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
+ ioread32(np->base + TxListPtr),
+ netif_queue_stopped(dev));
+ printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
+ np->cur_tx, np->cur_tx % TX_RING_SIZE,
+ np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
+ printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
+ printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
+ }
+ spin_lock_irqsave(&np->lock, flag);
+
+ /* Stop and restart the chip's Tx processes . */
+ reset_tx(dev);
+ spin_unlock_irqrestore(&np->lock, flag);
+
+ dev->if_port = 0;
+
+ netif_trans_update(dev); /* prevent tx timeout */
+ dev->stats.tx_errors++;
+ if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+ netif_wake_queue(dev);
+ }
+ iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
+ tasklet_enable(&np->tx_tasklet);
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int i;
+
+ np->cur_rx = np->cur_tx = 0;
+ np->dirty_rx = np->dirty_tx = 0;
+ np->cur_task = 0;
+
+ np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
+
+ /* Initialize all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
+ ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].frag.length = 0;
+ np->rx_skbuff[i] = NULL;
+ }
+
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ dma_addr_t addr;
+
+ struct sk_buff *skb =
+ netdev_alloc_skb(dev, np->rx_buf_sz + 2);
+ np->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb_reserve(skb, 2); /* 16 byte align the IP header. */
+ addr = dma_map_single(&np->pci_dev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&np->pci_dev->dev, addr)) {
+ dev_kfree_skb(skb);
+ np->rx_skbuff[i] = NULL;
+ break;
+ }
+ np->rx_ring[i].frag.addr = cpu_to_le32(addr);
+ np->rx_ring[i].frag.length = cpu_to_le32(np->rx_buf_sz | LastFrag);
+ }
+ np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_skbuff[i] = NULL;
+ np->tx_ring[i].status = 0;
+ }
+}
+
+static void tx_poll(struct tasklet_struct *t)
+{
+ struct netdev_private *np = from_tasklet(np, t, tx_tasklet);
+ unsigned head = np->cur_task % TX_RING_SIZE;
+ struct netdev_desc *txdesc =
+ &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
+
+ /* Chain the next pointer */
+ for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
+ int entry = np->cur_task % TX_RING_SIZE;
+ txdesc = &np->tx_ring[entry];
+ if (np->last_tx) {
+ np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
+ entry*sizeof(struct netdev_desc));
+ }
+ np->last_tx = txdesc;
+ }
+ /* Indicate the latest descriptor of tx ring */
+ txdesc->status |= cpu_to_le32(DescIntrOnTx);
+
+ if (ioread32 (np->base + TxListPtr) == 0)
+ iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
+ np->base + TxListPtr);
+}
+
+static netdev_tx_t
+start_tx (struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ struct netdev_desc *txdesc;
+ dma_addr_t addr;
+ unsigned entry;
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = np->cur_tx % TX_RING_SIZE;
+ np->tx_skbuff[entry] = skb;
+ txdesc = &np->tx_ring[entry];
+
+ addr = dma_map_single(&np->pci_dev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&np->pci_dev->dev, addr))
+ goto drop_frame;
+
+ txdesc->next_desc = 0;
+ txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
+ txdesc->frag.addr = cpu_to_le32(addr);
+ txdesc->frag.length = cpu_to_le32 (skb->len | LastFrag);
+
+ /* Increment cur_tx before tasklet_schedule() */
+ np->cur_tx++;
+ mb();
+ /* Schedule a tx_poll() task */
+ tasklet_schedule(&np->tx_tasklet);
+
+ /* On some architectures: explicitly flush cache lines here. */
+ if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
+ !netif_queue_stopped(dev)) {
+ /* do nothing */
+ } else {
+ netif_stop_queue (dev);
+ }
+ if (netif_msg_tx_queued(np)) {
+ printk (KERN_DEBUG
+ "%s: Transmit frame #%d queued in slot %d.\n",
+ dev->name, np->cur_tx, entry);
+ }
+ return NETDEV_TX_OK;
+
+drop_frame:
+ dev_kfree_skb_any(skb);
+ np->tx_skbuff[entry] = NULL;
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+}
+
+/* Reset hardware tx and free all of tx buffers */
+static int
+reset_tx (struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ struct sk_buff *skb;
+ int i;
+
+ /* Reset tx logic, TxListPtr will be cleaned */
+ iowrite16 (TxDisable, ioaddr + MACCtrl1);
+ sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
+
+ /* free all tx skbuff */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_ring[i].next_desc = 0;
+
+ skb = np->tx_skbuff[i];
+ if (skb) {
+ dma_unmap_single(&np->pci_dev->dev,
+ le32_to_cpu(np->tx_ring[i].frag.addr),
+ skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ np->tx_skbuff[i] = NULL;
+ dev->stats.tx_dropped++;
+ }
+ }
+ np->cur_tx = np->dirty_tx = 0;
+ np->cur_task = 0;
+
+ np->last_tx = NULL;
+ iowrite8(127, ioaddr + TxDMAPollPeriod);
+
+ iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
+ return 0;
+}
+
+/* The interrupt handler cleans up after the Tx thread,
+ and schedule a Rx thread work */
+static irqreturn_t intr_handler(int irq, void *dev_instance)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ int hw_frame_id;
+ int tx_cnt;
+ int tx_status;
+ int handled = 0;
+ int i;
+
+ do {
+ int intr_status = ioread16(ioaddr + IntrStatus);
+ iowrite16(intr_status, ioaddr + IntrStatus);
+
+ if (netif_msg_intr(np))
+ printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
+ dev->name, intr_status);
+
+ if (!(intr_status & DEFAULT_INTR))
+ break;
+
+ handled = 1;
+
+ if (intr_status & (IntrRxDMADone)) {
+ iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
+ ioaddr + IntrEnable);
+ if (np->budget < 0)
+ np->budget = RX_BUDGET;
+ tasklet_schedule(&np->rx_tasklet);
+ }
+ if (intr_status & (IntrTxDone | IntrDrvRqst)) {
+ tx_status = ioread16 (ioaddr + TxStatus);
+ for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
+ if (netif_msg_tx_done(np))
+ printk
+ ("%s: Transmit status is %2.2x.\n",
+ dev->name, tx_status);
+ if (tx_status & 0x1e) {
+ if (netif_msg_tx_err(np))
+ printk("%s: Transmit error status %4.4x.\n",
+ dev->name, tx_status);
+ dev->stats.tx_errors++;
+ if (tx_status & 0x10)
+ dev->stats.tx_fifo_errors++;
+ if (tx_status & 0x08)
+ dev->stats.collisions++;
+ if (tx_status & 0x04)
+ dev->stats.tx_fifo_errors++;
+ if (tx_status & 0x02)
+ dev->stats.tx_window_errors++;
+
+ /*
+ ** This reset has been verified on
+ ** DFE-580TX boards ! phdm@macqel.be.
+ */
+ if (tx_status & 0x10) { /* TxUnderrun */
+ /* Restart Tx FIFO and transmitter */
+ sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
+ /* No need to reset the Tx pointer here */
+ }
+ /* Restart the Tx. Need to make sure tx enabled */
+ i = 10;
+ do {
+ iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
+ if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
+ break;
+ mdelay(1);
+ } while (--i);
+ }
+ /* Yup, this is a documentation bug. It cost me *hours*. */
+ iowrite16 (0, ioaddr + TxStatus);
+ if (tx_cnt < 0) {
+ iowrite32(5000, ioaddr + DownCounter);
+ break;
+ }
+ tx_status = ioread16 (ioaddr + TxStatus);
+ }
+ hw_frame_id = (tx_status >> 8) & 0xff;
+ } else {
+ hw_frame_id = ioread8(ioaddr + TxFrameId);
+ }
+
+ if (np->pci_dev->revision >= 0x14) {
+ spin_lock(&np->lock);
+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+ int entry = np->dirty_tx % TX_RING_SIZE;
+ struct sk_buff *skb;
+ int sw_frame_id;
+ sw_frame_id = (le32_to_cpu(
+ np->tx_ring[entry].status) >> 2) & 0xff;
+ if (sw_frame_id == hw_frame_id &&
+ !(le32_to_cpu(np->tx_ring[entry].status)
+ & 0x00010000))
+ break;
+ if (sw_frame_id == (hw_frame_id + 1) %
+ TX_RING_SIZE)
+ break;
+ skb = np->tx_skbuff[entry];
+ /* Free the original skb. */
+ dma_unmap_single(&np->pci_dev->dev,
+ le32_to_cpu(np->tx_ring[entry].frag.addr),
+ skb->len, DMA_TO_DEVICE);
+ dev_consume_skb_irq(np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = NULL;
+ np->tx_ring[entry].frag.addr = 0;
+ np->tx_ring[entry].frag.length = 0;
+ }
+ spin_unlock(&np->lock);
+ } else {
+ spin_lock(&np->lock);
+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+ int entry = np->dirty_tx % TX_RING_SIZE;
+ struct sk_buff *skb;
+ if (!(le32_to_cpu(np->tx_ring[entry].status)
+ & 0x00010000))
+ break;
+ skb = np->tx_skbuff[entry];
+ /* Free the original skb. */
+ dma_unmap_single(&np->pci_dev->dev,
+ le32_to_cpu(np->tx_ring[entry].frag.addr),
+ skb->len, DMA_TO_DEVICE);
+ dev_consume_skb_irq(np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = NULL;
+ np->tx_ring[entry].frag.addr = 0;
+ np->tx_ring[entry].frag.length = 0;
+ }
+ spin_unlock(&np->lock);
+ }
+
+ if (netif_queue_stopped(dev) &&
+ np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, clear busy flag. */
+ netif_wake_queue (dev);
+ }
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
+ netdev_error(dev, intr_status);
+ } while (0);
+ if (netif_msg_intr(np))
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, ioread16(ioaddr + IntrStatus));
+ return IRQ_RETVAL(handled);
+}
+
+static void rx_poll(struct tasklet_struct *t)
+{
+ struct netdev_private *np = from_tasklet(np, t, rx_tasklet);
+ struct net_device *dev = np->ndev;
+ int entry = np->cur_rx % RX_RING_SIZE;
+ int boguscnt = np->budget;
+ void __iomem *ioaddr = np->base;
+ int received = 0;
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while (1) {
+ struct netdev_desc *desc = &(np->rx_ring[entry]);
+ u32 frame_status = le32_to_cpu(desc->status);
+ int pkt_len;
+
+ if (--boguscnt < 0) {
+ goto not_done;
+ }
+ if (!(frame_status & DescOwn))
+ break;
+ pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
+ if (netif_msg_rx_status(np))
+ printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
+ frame_status);
+ if (frame_status & 0x001f4000) {
+ /* There was a error. */
+ if (netif_msg_rx_err(np))
+ printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
+ frame_status);
+ dev->stats.rx_errors++;
+ if (frame_status & 0x00100000)
+ dev->stats.rx_length_errors++;
+ if (frame_status & 0x00010000)
+ dev->stats.rx_fifo_errors++;
+ if (frame_status & 0x00060000)
+ dev->stats.rx_frame_errors++;
+ if (frame_status & 0x00080000)
+ dev->stats.rx_crc_errors++;
+ if (frame_status & 0x00100000) {
+ printk(KERN_WARNING "%s: Oversized Ethernet frame,"
+ " status %8.8x.\n",
+ dev->name, frame_status);
+ }
+ } else {
+ struct sk_buff *skb;
+#ifndef final_version
+ if (netif_msg_rx_status(np))
+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
+ ", bogus_cnt %d.\n",
+ pkt_len, boguscnt);
+#endif
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak &&
+ (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ dma_sync_single_for_cpu(&np->pci_dev->dev,
+ le32_to_cpu(desc->frag.addr),
+ np->rx_buf_sz, DMA_FROM_DEVICE);
+ skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
+ dma_sync_single_for_device(&np->pci_dev->dev,
+ le32_to_cpu(desc->frag.addr),
+ np->rx_buf_sz, DMA_FROM_DEVICE);
+ skb_put(skb, pkt_len);
+ } else {
+ dma_unmap_single(&np->pci_dev->dev,
+ le32_to_cpu(desc->frag.addr),
+ np->rx_buf_sz, DMA_FROM_DEVICE);
+ skb_put(skb = np->rx_skbuff[entry], pkt_len);
+ np->rx_skbuff[entry] = NULL;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
+ netif_rx(skb);
+ }
+ entry = (entry + 1) % RX_RING_SIZE;
+ received++;
+ }
+ np->cur_rx = entry;
+ refill_rx (dev);
+ np->budget -= received;
+ iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
+ return;
+
+not_done:
+ np->cur_rx = entry;
+ refill_rx (dev);
+ if (!received)
+ received = 1;
+ np->budget -= received;
+ if (np->budget <= 0)
+ np->budget = RX_BUDGET;
+ tasklet_schedule(&np->rx_tasklet);
+}
+
+static void refill_rx (struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int entry;
+
+ /* Refill the Rx ring buffers. */
+ for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
+ np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
+ struct sk_buff *skb;
+ dma_addr_t addr;
+
+ entry = np->dirty_rx % RX_RING_SIZE;
+ if (np->rx_skbuff[entry] == NULL) {
+ skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
+ np->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ addr = dma_map_single(&np->pci_dev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&np->pci_dev->dev, addr)) {
+ dev_kfree_skb_irq(skb);
+ np->rx_skbuff[entry] = NULL;
+ break;
+ }
+
+ np->rx_ring[entry].frag.addr = cpu_to_le32(addr);
+ }
+ /* Perhaps we need not reset this field. */
+ np->rx_ring[entry].frag.length =
+ cpu_to_le32(np->rx_buf_sz | LastFrag);
+ np->rx_ring[entry].status = 0;
+ }
+}
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ u16 mii_ctl, mii_advertise, mii_lpa;
+ int speed;
+
+ if (intr_status & LinkChange) {
+ if (mdio_wait_link(dev, 10) == 0) {
+ printk(KERN_INFO "%s: Link up\n", dev->name);
+ if (np->an_enable) {
+ mii_advertise = mdio_read(dev, np->phys[0],
+ MII_ADVERTISE);
+ mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
+ mii_advertise &= mii_lpa;
+ printk(KERN_INFO "%s: Link changed: ",
+ dev->name);
+ if (mii_advertise & ADVERTISE_100FULL) {
+ np->speed = 100;
+ printk("100Mbps, full duplex\n");
+ } else if (mii_advertise & ADVERTISE_100HALF) {
+ np->speed = 100;
+ printk("100Mbps, half duplex\n");
+ } else if (mii_advertise & ADVERTISE_10FULL) {
+ np->speed = 10;
+ printk("10Mbps, full duplex\n");
+ } else if (mii_advertise & ADVERTISE_10HALF) {
+ np->speed = 10;
+ printk("10Mbps, half duplex\n");
+ } else
+ printk("\n");
+
+ } else {
+ mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
+ speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
+ np->speed = speed;
+ printk(KERN_INFO "%s: Link changed: %dMbps ,",
+ dev->name, speed);
+ printk("%s duplex.\n",
+ (mii_ctl & BMCR_FULLDPLX) ?
+ "full" : "half");
+ }
+ check_duplex(dev);
+ if (np->flowctrl && np->mii_if.full_duplex) {
+ iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
+ ioaddr + MulticastFilter1+2);
+ iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
+ ioaddr + MACCtrl0);
+ }
+ netif_carrier_on(dev);
+ } else {
+ printk(KERN_INFO "%s: Link down\n", dev->name);
+ netif_carrier_off(dev);
+ }
+ }
+ if (intr_status & StatsMax) {
+ get_stats(dev);
+ }
+ if (intr_status & IntrPCIErr) {
+ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
+ dev->name, intr_status);
+ /* We must do a global reset of DMA to continue. */
+ }
+}
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ unsigned long flags;
+ u8 late_coll, single_coll, mult_coll;
+
+ spin_lock_irqsave(&np->statlock, flags);
+ /* The chip only need report frame silently dropped. */
+ dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
+ dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
+ dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
+ dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
+
+ mult_coll = ioread8(ioaddr + StatsMultiColl);
+ np->xstats.tx_multiple_collisions += mult_coll;
+ single_coll = ioread8(ioaddr + StatsOneColl);
+ np->xstats.tx_single_collisions += single_coll;
+ late_coll = ioread8(ioaddr + StatsLateColl);
+ np->xstats.tx_late_collisions += late_coll;
+ dev->stats.collisions += mult_coll
+ + single_coll
+ + late_coll;
+
+ np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
+ np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
+ np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
+ np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
+ np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
+ np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
+ np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
+
+ dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
+ dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
+ dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
+ dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
+
+ spin_unlock_irqrestore(&np->statlock, flags);
+
+ return &dev->stats;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ u16 mc_filter[4]; /* Multicast hash filter */
+ u32 rx_mode;
+ int i;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ } else if (!netdev_mc_empty(dev)) {
+ struct netdev_hw_addr *ha;
+ int bit;
+ int index;
+ int crc;
+ memset (mc_filter, 0, sizeof (mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
+ for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
+ if (crc & 0x80000000) index |= 1 << bit;
+ mc_filter[index/16] |= (1 << (index % 16));
+ }
+ rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
+ } else {
+ iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
+ return;
+ }
+ if (np->mii_if.full_duplex && np->flowctrl)
+ mc_filter[3] |= 0x0200;
+
+ for (i = 0; i < 4; i++)
+ iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
+ iowrite8(rx_mode, ioaddr + RxMode);
+}
+
+static int __set_mac_addr(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ u16 addr16;
+
+ addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
+ iowrite16(addr16, np->base + StationAddr);
+ addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
+ iowrite16(addr16, np->base + StationAddr+2);
+ addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
+ iowrite16(addr16, np->base + StationAddr+4);
+ return 0;
+}
+
+/* Invoked with rtnl_lock held */
+static int sundance_set_mac_addr(struct net_device *dev, void *data)
+{
+ const struct sockaddr *addr = data;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+ eth_hw_addr_set(dev, addr->sa_data);
+ __set_mac_addr(dev);
+
+ return 0;
+}
+
+static const struct {
+ const char name[ETH_GSTRING_LEN];
+} sundance_stats[] = {
+ { "tx_multiple_collisions" },
+ { "tx_single_collisions" },
+ { "tx_late_collisions" },
+ { "tx_deferred" },
+ { "tx_deferred_excessive" },
+ { "tx_aborted" },
+ { "tx_bcasts" },
+ { "rx_bcasts" },
+ { "tx_mcasts" },
+ { "rx_mcasts" },
+};
+
+static int check_if_running(struct net_device *dev)
+{
+ if (!netif_running(dev))
+ return -EINVAL;
+ return 0;
+}
+
+static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+}
+
+static int get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ spin_lock_irq(&np->lock);
+ mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
+ spin_unlock_irq(&np->lock);
+ return 0;
+}
+
+static int set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int res;
+ spin_lock_irq(&np->lock);
+ res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
+ spin_unlock_irq(&np->lock);
+ return res;
+}
+
+static int nway_reset(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return mii_nway_restart(&np->mii_if);
+}
+
+static u32 get_link(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return mii_link_ok(&np->mii_if);
+}
+
+static u32 get_msglevel(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return np->msg_enable;
+}
+
+static void set_msglevel(struct net_device *dev, u32 val)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ np->msg_enable = val;
+}
+
+static void get_strings(struct net_device *dev, u32 stringset,
+ u8 *data)
+{
+ if (stringset == ETH_SS_STATS)
+ memcpy(data, sundance_stats, sizeof(sundance_stats));
+}
+
+static int get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(sundance_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int i = 0;
+
+ get_stats(dev);
+ data[i++] = np->xstats.tx_multiple_collisions;
+ data[i++] = np->xstats.tx_single_collisions;
+ data[i++] = np->xstats.tx_late_collisions;
+ data[i++] = np->xstats.tx_deferred;
+ data[i++] = np->xstats.tx_deferred_excessive;
+ data[i++] = np->xstats.tx_aborted;
+ data[i++] = np->xstats.tx_bcasts;
+ data[i++] = np->xstats.rx_bcasts;
+ data[i++] = np->xstats.tx_mcasts;
+ data[i++] = np->xstats.rx_mcasts;
+}
+
+#ifdef CONFIG_PM
+
+static void sundance_get_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ u8 wol_bits;
+
+ wol->wolopts = 0;
+
+ wol->supported = (WAKE_PHY | WAKE_MAGIC);
+ if (!np->wol_enabled)
+ return;
+
+ wol_bits = ioread8(ioaddr + WakeEvent);
+ if (wol_bits & MagicPktEnable)
+ wol->wolopts |= WAKE_MAGIC;
+ if (wol_bits & LinkEventEnable)
+ wol->wolopts |= WAKE_PHY;
+}
+
+static int sundance_set_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ u8 wol_bits;
+
+ if (!device_can_wakeup(&np->pci_dev->dev))
+ return -EOPNOTSUPP;
+
+ np->wol_enabled = !!(wol->wolopts);
+ wol_bits = ioread8(ioaddr + WakeEvent);
+ wol_bits &= ~(WakePktEnable | MagicPktEnable |
+ LinkEventEnable | WolEnable);
+
+ if (np->wol_enabled) {
+ if (wol->wolopts & WAKE_MAGIC)
+ wol_bits |= (MagicPktEnable | WolEnable);
+ if (wol->wolopts & WAKE_PHY)
+ wol_bits |= (LinkEventEnable | WolEnable);
+ }
+ iowrite8(wol_bits, ioaddr + WakeEvent);
+
+ device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled);
+
+ return 0;
+}
+#else
+#define sundance_get_wol NULL
+#define sundance_set_wol NULL
+#endif /* CONFIG_PM */
+
+static const struct ethtool_ops ethtool_ops = {
+ .begin = check_if_running,
+ .get_drvinfo = get_drvinfo,
+ .nway_reset = nway_reset,
+ .get_link = get_link,
+ .get_wol = sundance_get_wol,
+ .set_wol = sundance_set_wol,
+ .get_msglevel = get_msglevel,
+ .set_msglevel = set_msglevel,
+ .get_strings = get_strings,
+ .get_sset_count = get_sset_count,
+ .get_ethtool_stats = get_ethtool_stats,
+ .get_link_ksettings = get_link_ksettings,
+ .set_link_ksettings = set_link_ksettings,
+};
+
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int rc;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ spin_lock_irq(&np->lock);
+ rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
+ spin_unlock_irq(&np->lock);
+
+ return rc;
+}
+
+static int netdev_close(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ struct sk_buff *skb;
+ int i;
+
+ /* Wait and kill tasklet */
+ tasklet_kill(&np->rx_tasklet);
+ tasklet_kill(&np->tx_tasklet);
+ np->cur_tx = 0;
+ np->dirty_tx = 0;
+ np->cur_task = 0;
+ np->last_tx = NULL;
+
+ netif_stop_queue(dev);
+
+ if (netif_msg_ifdown(np)) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
+ "Rx %4.4x Int %2.2x.\n",
+ dev->name, ioread8(ioaddr + TxStatus),
+ ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
+ }
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ iowrite16(0x0000, ioaddr + IntrEnable);
+
+ /* Disable Rx and Tx DMA for safely release resource */
+ iowrite32(0x500, ioaddr + DMACtrl);
+
+ /* Stop the chip's Tx and Rx processes. */
+ iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
+
+ for (i = 2000; i > 0; i--) {
+ if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
+ break;
+ mdelay(1);
+ }
+
+ iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
+ ioaddr + ASIC_HI_WORD(ASICCtrl));
+
+ for (i = 2000; i > 0; i--) {
+ if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
+ break;
+ mdelay(1);
+ }
+
+#ifdef __i386__
+ if (netif_msg_hw(np)) {
+ printk(KERN_DEBUG " Tx ring at %8.8x:\n",
+ (int)(np->tx_ring_dma));
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
+ i, np->tx_ring[i].status, np->tx_ring[i].frag.addr,
+ np->tx_ring[i].frag.length);
+ printk(KERN_DEBUG " Rx ring %8.8x:\n",
+ (int)(np->rx_ring_dma));
+ for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
+ printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
+ i, np->rx_ring[i].status, np->rx_ring[i].frag.addr,
+ np->rx_ring[i].frag.length);
+ }
+ }
+#endif /* __i386__ debugging only */
+
+ free_irq(np->pci_dev->irq, dev);
+
+ timer_delete_sync(&np->timer);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].status = 0;
+ skb = np->rx_skbuff[i];
+ if (skb) {
+ dma_unmap_single(&np->pci_dev->dev,
+ le32_to_cpu(np->rx_ring[i].frag.addr),
+ np->rx_buf_sz, DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ np->rx_skbuff[i] = NULL;
+ }
+ np->rx_ring[i].frag.addr = cpu_to_le32(0xBADF00D0); /* poison */
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_ring[i].next_desc = 0;
+ skb = np->tx_skbuff[i];
+ if (skb) {
+ dma_unmap_single(&np->pci_dev->dev,
+ le32_to_cpu(np->tx_ring[i].frag.addr),
+ skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb(skb);
+ np->tx_skbuff[i] = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static void sundance_remove1(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ struct netdev_private *np = netdev_priv(dev);
+ unregister_netdev(dev);
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
+ np->rx_ring, np->rx_ring_dma);
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
+ np->tx_ring, np->tx_ring_dma);
+ pci_iounmap(pdev, np->base);
+ pci_release_regions(pdev);
+ free_netdev(dev);
+ }
+}
+
+static int __maybe_unused sundance_suspend(struct device *dev_d)
+{
+ struct net_device *dev = dev_get_drvdata(dev_d);
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+
+ if (!netif_running(dev))
+ return 0;
+
+ netdev_close(dev);
+ netif_device_detach(dev);
+
+ if (np->wol_enabled) {
+ iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
+ iowrite16(RxEnable, ioaddr + MACCtrl1);
+ }
+
+ device_set_wakeup_enable(dev_d, np->wol_enabled);
+
+ return 0;
+}
+
+static int __maybe_unused sundance_resume(struct device *dev_d)
+{
+ struct net_device *dev = dev_get_drvdata(dev_d);
+ int err = 0;
+
+ if (!netif_running(dev))
+ return 0;
+
+ err = netdev_open(dev);
+ if (err) {
+ printk(KERN_ERR "%s: Can't resume interface!\n",
+ dev->name);
+ goto out;
+ }
+
+ netif_device_attach(dev);
+
+out:
+ return err;
+}
+
+static SIMPLE_DEV_PM_OPS(sundance_pm_ops, sundance_suspend, sundance_resume);
+
+static struct pci_driver sundance_driver = {
+ .name = DRV_NAME,
+ .id_table = sundance_pci_tbl,
+ .probe = sundance_probe1,
+ .remove = sundance_remove1,
+ .driver.pm = &sundance_pm_ops,
+};
+
+module_pci_driver(sundance_driver);
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 44af1d13d931..67275aa4f65b 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -416,8 +416,7 @@ static int ec_bhf_open(struct net_device *net_dev)
netif_start_queue(net_dev);
- hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- priv->hrtimer.function = ec_bhf_timer_fun;
+ hrtimer_setup(&priv->hrtimer, ec_bhf_timer_fun, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer_start(&priv->hrtimer, polling_frequency, HRTIMER_MODE_REL);
return 0;
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index e48b861e4ce1..270ff9aab335 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -562,7 +562,7 @@ struct be_adapter {
struct be_dma_mem mbox_mem_alloced;
struct be_mcc_obj mcc_obj;
- struct mutex mcc_lock; /* For serializing mcc cmds to BE card */
+ spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
spinlock_t mcc_cq_lock;
u16 cfg_num_rx_irqs; /* configured via set-channels */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 61adcebeef01..bb5d2fa15736 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -575,7 +575,7 @@ int be_process_mcc(struct be_adapter *adapter)
/* Wait till no more pending mcc requests are present */
static int be_mcc_wait_compl(struct be_adapter *adapter)
{
-#define mcc_timeout 12000 /* 12s timeout */
+#define mcc_timeout 120000 /* 12s timeout */
int i, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
@@ -589,7 +589,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
if (atomic_read(&mcc_obj->q.used) == 0)
break;
- usleep_range(500, 1000);
+ udelay(100);
}
if (i == mcc_timeout) {
dev_err(&adapter->pdev->dev, "FW not responding\n");
@@ -866,7 +866,7 @@ static bool use_mcc(struct be_adapter *adapter)
static int be_cmd_lock(struct be_adapter *adapter)
{
if (use_mcc(adapter)) {
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
return 0;
} else {
return mutex_lock_interruptible(&adapter->mbox_lock);
@@ -877,7 +877,7 @@ static int be_cmd_lock(struct be_adapter *adapter)
static void be_cmd_unlock(struct be_adapter *adapter)
{
if (use_mcc(adapter))
- return mutex_unlock(&adapter->mcc_lock);
+ return spin_unlock_bh(&adapter->mcc_lock);
else
return mutex_unlock(&adapter->mbox_lock);
}
@@ -1047,7 +1047,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
struct be_cmd_req_mac_query *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1076,7 +1076,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1088,7 +1088,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr,
struct be_cmd_req_pmac_add *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1113,7 +1113,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
status = -EPERM;
@@ -1131,7 +1131,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
if (pmac_id == -1)
return 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1151,7 +1151,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1414,7 +1414,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
struct be_dma_mem *q_mem = &rxq->dma_mem;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1444,7 +1444,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1508,7 +1508,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
struct be_cmd_req_q_destroy *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1525,7 +1525,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
q->created = false;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1593,7 +1593,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
struct be_cmd_req_hdr *hdr;
int status = 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1609,7 +1609,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
/* version 1 of the cmd is not supported only by BE2 */
if (BE2_chip(adapter))
hdr->version = 0;
- if (BE3_chip(adapter) || lancer_chip(adapter))
+ else if (BE3_chip(adapter) || lancer_chip(adapter))
hdr->version = 1;
else
hdr->version = 2;
@@ -1621,7 +1621,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
adapter->stats_cmd_sent = true;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1637,7 +1637,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
CMD_SUBSYSTEM_ETH))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1660,7 +1660,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
adapter->stats_cmd_sent = true;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1697,7 +1697,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
struct be_cmd_req_link_status *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
if (link_status)
*link_status = LINK_DOWN;
@@ -1736,7 +1736,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1747,7 +1747,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
struct be_cmd_req_get_cntl_addnl_attribs *req;
int status = 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1762,7 +1762,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
status = be_mcc_notify(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1811,7 +1811,7 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
if (!get_fat_cmd.va)
return -ENOMEM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
while (total_size) {
buf_size = min(total_size, (u32)60 * 1024);
@@ -1849,9 +1849,9 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
log_offset += buf_size;
}
err:
+ spin_unlock_bh(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
get_fat_cmd.va, get_fat_cmd.dma);
- mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1862,7 +1862,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
struct be_cmd_req_get_fw_version *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1885,7 +1885,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
sizeof(adapter->fw_on_flash));
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1899,7 +1899,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter,
struct be_cmd_req_modify_eq_delay *req;
int status = 0, i;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1922,7 +1922,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter,
status = be_mcc_notify(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1949,7 +1949,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
struct be_cmd_req_vlan_config *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1971,7 +1971,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1982,7 +1982,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
struct be_cmd_req_rx_filter *req = mem->va;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2015,7 +2015,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2046,7 +2046,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2066,7 +2066,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
return -EOPNOTSUPP;
@@ -2085,7 +2085,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2108,7 +2108,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2189,7 +2189,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
return 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2214,7 +2214,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2226,7 +2226,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
struct be_cmd_req_enable_disable_beacon *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2247,7 +2247,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2258,7 +2258,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
struct be_cmd_req_get_beacon_state *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2282,7 +2282,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2306,7 +2306,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
return -ENOMEM;
}
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2328,7 +2328,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
memcpy(data, resp->page_data + off, len);
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
return status;
}
@@ -2345,7 +2345,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
void *ctxt = NULL;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
adapter->flash_status = 0;
wrb = wrb_from_mccq(adapter);
@@ -2387,7 +2387,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
if (status)
goto err_unlock;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(60000)))
@@ -2406,7 +2406,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
return status;
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2460,7 +2460,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter,
struct be_mcc_wrb *wrb;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2478,7 +2478,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2491,7 +2491,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
struct lancer_cmd_resp_read_object *resp;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2525,7 +2525,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
}
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2537,7 +2537,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
struct be_cmd_write_flashrom *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
adapter->flash_status = 0;
wrb = wrb_from_mccq(adapter);
@@ -2562,7 +2562,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
if (status)
goto err_unlock;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(40000)))
@@ -2573,7 +2573,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
return status;
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2584,7 +2584,7 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
struct be_mcc_wrb *wrb;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2611,11 +2611,15 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
memcpy(flashed_crc, req->crc, 4);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
-static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
+/*
+ * Since the cookie is text, add a parsing-skipped space to keep it from
+ * ever being matched on storage holding this source file.
+ */
+static const char flash_cookie[32] __nonstring = "*** SE FLAS" "H DIRECTORY *** ";
static bool phy_flashing_required(struct be_adapter *adapter)
{
@@ -3217,7 +3221,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
struct be_cmd_req_acpi_wol_magic_config *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3234,7 +3238,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3249,7 +3253,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3272,7 +3276,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
if (status)
goto err_unlock;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(SET_LB_MODE_TIMEOUT)))
@@ -3281,7 +3285,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
return status;
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3298,7 +3302,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3324,7 +3328,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
if (status)
goto err;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
wait_for_completion(&adapter->et_cmd_compl);
resp = embedded_payload(wrb);
@@ -3332,7 +3336,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
return status;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3348,7 +3352,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3382,7 +3386,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3393,7 +3397,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
struct be_cmd_req_seeprom_read *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3409,7 +3413,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3424,7 +3428,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3469,7 +3473,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
}
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3479,7 +3483,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
struct be_cmd_req_set_qos *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3499,7 +3503,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3611,7 +3615,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
struct be_cmd_req_get_fn_privileges *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3643,7 +3647,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3655,7 +3659,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
struct be_cmd_req_set_fn_privileges *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3675,7 +3679,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3707,7 +3711,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
return -ENOMEM;
}
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3771,7 +3775,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
}
out:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
get_mac_list_cmd.va, get_mac_list_cmd.dma);
return status;
@@ -3831,7 +3835,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
if (!cmd.va)
return -ENOMEM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3852,8 +3856,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
status = be_mcc_notify_wait(adapter);
err:
+ spin_unlock_bh(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
- mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -3889,7 +3893,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3930,7 +3934,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3944,7 +3948,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
int status;
u16 vid;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3991,7 +3995,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4190,7 +4194,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
struct be_cmd_req_set_ext_fat_caps *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4206,7 +4210,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4684,7 +4688,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
if (iface == 0xFFFFFFFF)
return -1;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4701,7 +4705,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4735,7 +4739,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
struct be_cmd_resp_get_iface_list *resp;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4756,7 +4760,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4850,7 +4854,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
if (BEx_chip(adapter))
return 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4868,7 +4872,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
req->enable = 1;
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4941,7 +4945,7 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter,
u32 link_config = 0;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4969,7 +4973,7 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -5000,8 +5004,7 @@ int be_cmd_set_features(struct be_adapter *adapter)
struct be_mcc_wrb *wrb;
int status;
- if (mutex_lock_interruptible(&adapter->mcc_lock))
- return -1;
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -5039,7 +5042,7 @@ err:
dev_info(&adapter->pdev->dev,
"Adapter does not support HW error recovery\n");
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -5053,7 +5056,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
struct be_cmd_resp_hdr *resp;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -5076,7 +5079,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
EXPORT_SYMBOL(be_roce_mcc_cmd);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index d70818f06be7..5e2d3ddb5d43 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1415,7 +1415,7 @@ struct flash_section_entry {
} __packed;
struct flash_section_info {
- u8 cookie[32];
+ u8 cookie[32] __nonstring;
struct flash_section_hdr fsec_hdr;
struct flash_section_entry fsec_entry[32];
} __packed;
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index f001a649f58f..f9216326bdfe 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1073,10 +1073,19 @@ static void be_set_msg_level(struct net_device *netdev, u32 level)
adapter->msg_enable = level;
}
-static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
+static int be_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ u64 flow_type = cmd->flow_type;
u64 data = 0;
+ if (!be_multi_rxq(adapter)) {
+ dev_info(&adapter->pdev->dev,
+ "ethtool::get_rxfh: RX flow hashing is disabled\n");
+ return -EINVAL;
+ }
+
switch (flow_type) {
case TCP_V4_FLOW:
if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
@@ -1104,7 +1113,8 @@ static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
break;
}
- return data;
+ cmd->data = data;
+ return 0;
}
static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
@@ -1119,9 +1129,6 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
}
switch (cmd->cmd) {
- case ETHTOOL_GRXFH:
- cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type);
- break;
case ETHTOOL_GRXRINGS:
cmd->data = adapter->num_rx_qs;
break;
@@ -1132,11 +1139,19 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
return 0;
}
-static int be_set_rss_hash_opts(struct be_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+static int be_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
- int status;
+ struct be_adapter *adapter = netdev_priv(netdev);
u32 rss_flags = adapter->rss_info.rss_flags;
+ int status;
+
+ if (!be_multi_rxq(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "ethtool::set_rxfh: RX flow hashing is disabled\n");
+ return -EINVAL;
+ }
if (cmd->data != L3_RSS_FLAGS &&
cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS))
@@ -1195,28 +1210,6 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
return be_cmd_status(status);
}
-static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- int status = 0;
-
- if (!be_multi_rxq(adapter)) {
- dev_err(&adapter->pdev->dev,
- "ethtool::set_rxnfc: RX flow hashing is disabled\n");
- return -EINVAL;
- }
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- status = be_set_rss_hash_opts(adapter, cmd);
- break;
- default:
- return -EINVAL;
- }
-
- return status;
-}
-
static void be_get_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
@@ -1449,7 +1442,8 @@ const struct ethtool_ops be_ethtool_ops = {
.flash_device = be_do_flash,
.self_test = be_self_test,
.get_rxnfc = be_get_rxnfc,
- .set_rxnfc = be_set_rxnfc,
+ .get_rxfh_fields = be_get_rxfh_fields,
+ .set_rxfh_fields = be_set_rxfh_fields,
.get_rxfh_indir_size = be_get_rxfh_indir_size,
.get_rxfh_key_size = be_get_rxfh_key_size,
.get_rxfh = be_get_rxfh,
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 875fe379eea2..5bb31c8fab39 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1296,7 +1296,8 @@ static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
(adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
- struct sk_buff **skb)
+ struct sk_buff **skb,
+ struct be_wrb_params *wrb_params)
{
struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
bool os2bmc = false;
@@ -1360,7 +1361,7 @@ done:
* to BMC, asic expects the vlan to be inline in the packet.
*/
if (os2bmc)
- *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
+ *skb = be_insert_vlan_in_pkt(adapter, *skb, wrb_params);
return os2bmc;
}
@@ -1387,7 +1388,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
/* if os2bmc is enabled and if the pkt is destined to bmc,
* enqueue the pkt a 2nd time with mgmt bit set.
*/
- if (be_send_pkt_to_bmc(adapter, &skb)) {
+ if (be_send_pkt_to_bmc(adapter, &skb, &wrb_params)) {
BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
if (unlikely(!wrb_cnt))
@@ -1465,10 +1466,10 @@ static void be_tx_timeout(struct net_device *netdev, unsigned int txqueue)
ntohs(tcphdr->source));
dev_info(dev, "TCP dest port %d\n",
ntohs(tcphdr->dest));
- dev_info(dev, "TCP sequence num %d\n",
- ntohs(tcphdr->seq));
- dev_info(dev, "TCP ack_seq %d\n",
- ntohs(tcphdr->ack_seq));
+ dev_info(dev, "TCP sequence num %u\n",
+ ntohl(tcphdr->seq));
+ dev_info(dev, "TCP ack_seq %u\n",
+ ntohl(tcphdr->ack_seq));
} else if (ip_hdr(skb)->protocol ==
IPPROTO_UDP) {
udphdr = udp_hdr(skb);
@@ -4031,8 +4032,7 @@ static int be_vxlan_unset_port(struct net_device *netdev, unsigned int table,
static const struct udp_tunnel_nic_info be_udp_tunnels = {
.set_port = be_vxlan_set_port,
.unset_port = be_vxlan_unset_port,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
},
@@ -5667,8 +5667,8 @@ static int be_drv_init(struct be_adapter *adapter)
}
mutex_init(&adapter->mbox_lock);
- mutex_init(&adapter->mcc_lock);
mutex_init(&adapter->rx_filter_lock);
+ spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
init_completion(&adapter->et_cmd_compl);
diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
index f188fba021a6..03e19aea9ea4 100644
--- a/drivers/net/ethernet/engleder/tsnep.h
+++ b/drivers/net/ethernet/engleder/tsnep.h
@@ -176,7 +176,7 @@ struct tsnep_adapter {
struct tsnep_gcl gcl[2];
int next_gcl;
- struct hwtstamp_config hwtstamp_config;
+ struct kernel_hwtstamp_config hwtstamp_config;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
/* ptp clock lock */
@@ -203,7 +203,11 @@ extern const struct ethtool_ops tsnep_ethtool_ops;
int tsnep_ptp_init(struct tsnep_adapter *adapter);
void tsnep_ptp_cleanup(struct tsnep_adapter *adapter);
-int tsnep_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+int tsnep_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int tsnep_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
int tsnep_tc_init(struct tsnep_adapter *adapter);
void tsnep_tc_cleanup(struct tsnep_adapter *adapter);
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 95a5295d0361..b118407c30e8 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -67,6 +67,8 @@
#define TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE (TSNEP_TX_TYPE_XDP_NDO | TSNEP_TX_TYPE_MAP_PAGE)
#define TSNEP_TX_TYPE_XDP (TSNEP_TX_TYPE_XDP_TX | TSNEP_TX_TYPE_XDP_NDO)
#define TSNEP_TX_TYPE_XSK BIT(12)
+#define TSNEP_TX_TYPE_TSTAMP BIT(13)
+#define TSNEP_TX_TYPE_SKB_TSTAMP (TSNEP_TX_TYPE_SKB | TSNEP_TX_TYPE_TSTAMP)
#define TSNEP_XDP_TX BIT(0)
#define TSNEP_XDP_REDIRECT BIT(1)
@@ -221,20 +223,19 @@ static void tsnep_phy_link_status_change(struct net_device *netdev)
static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable)
{
- int retval;
-
- retval = phy_loopback(adapter->phydev, enable);
+ int speed;
- /* PHY link state change is not signaled if loopback is enabled, it
- * would delay a working loopback anyway, let's ensure that loopback
- * is working immediately by setting link mode directly
- */
- if (!retval && enable) {
- netif_carrier_on(adapter->netdev);
- tsnep_set_link_mode(adapter);
+ if (enable) {
+ if (adapter->phydev->autoneg == AUTONEG_DISABLE &&
+ adapter->phydev->speed == SPEED_100)
+ speed = SPEED_100;
+ else
+ speed = SPEED_1000;
+ } else {
+ speed = 0;
}
- return retval;
+ return phy_loopback(adapter->phydev, enable, speed);
}
static int tsnep_phy_open(struct tsnep_adapter *adapter)
@@ -387,8 +388,7 @@ static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length,
if (entry->skb) {
entry->properties = length & TSNEP_DESC_LENGTH_MASK;
entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
- if ((entry->type & TSNEP_TX_TYPE_SKB) &&
- (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS))
+ if ((entry->type & TSNEP_TX_TYPE_SKB_TSTAMP) == TSNEP_TX_TYPE_SKB_TSTAMP)
entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
/* toggle user flag to prevent false acknowledge
@@ -480,7 +480,8 @@ static int tsnep_tx_map_frag(skb_frag_t *frag, struct tsnep_tx_entry *entry,
return mapped;
}
-static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
+static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count,
+ bool do_tstamp)
{
struct device *dmadev = tx->adapter->dmadev;
struct tsnep_tx_entry *entry;
@@ -506,6 +507,9 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
entry->type = TSNEP_TX_TYPE_SKB_INLINE;
mapped = 0;
}
+
+ if (do_tstamp)
+ entry->type |= TSNEP_TX_TYPE_TSTAMP;
} else {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
@@ -559,11 +563,12 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
struct tsnep_tx *tx)
{
- int count = 1;
struct tsnep_tx_entry *entry;
+ bool do_tstamp = false;
+ int count = 1;
int length;
- int i;
int retval;
+ int i;
if (skb_shinfo(skb)->nr_frags > 0)
count += skb_shinfo(skb)->nr_frags;
@@ -580,7 +585,13 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
entry = &tx->entry[tx->write];
entry->skb = skb;
- retval = tsnep_tx_map(skb, tx, count);
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ tx->adapter->hwtstamp_config.tx_type == HWTSTAMP_TX_ON) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ do_tstamp = true;
+ }
+
+ retval = tsnep_tx_map(skb, tx, count, do_tstamp);
if (retval < 0) {
tsnep_tx_unmap(tx, tx->write, count);
dev_kfree_skb_any(entry->skb);
@@ -592,9 +603,6 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
}
length = retval;
- if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
-
for (i = 0; i < count; i++)
tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length,
i == count - 1);
@@ -845,15 +853,14 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
length = tsnep_tx_unmap(tx, tx->read, count);
- if ((entry->type & TSNEP_TX_TYPE_SKB) &&
- (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
+ if (((entry->type & TSNEP_TX_TYPE_SKB_TSTAMP) == TSNEP_TX_TYPE_SKB_TSTAMP) &&
(__le32_to_cpu(entry->desc_wb->properties) &
TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) {
struct skb_shared_hwtstamps hwtstamps;
u64 timestamp;
- if (skb_shinfo(entry->skb)->tx_flags &
- SKBTX_HW_TSTAMP_USE_CYCLES)
+ if (entry->skb->sk &&
+ READ_ONCE(entry->skb->sk->sk_tsflags) & SOF_TIMESTAMPING_BIND_PHC)
timestamp =
__le64_to_cpu(entry->desc_wb->counter);
else
@@ -1966,23 +1973,41 @@ failed:
static void tsnep_queue_enable(struct tsnep_queue *queue)
{
+ struct tsnep_adapter *adapter = queue->adapter;
+
+ netif_napi_set_irq(&queue->napi, queue->irq);
napi_enable(&queue->napi);
- tsnep_enable_irq(queue->adapter, queue->irq_mask);
+ tsnep_enable_irq(adapter, queue->irq_mask);
- if (queue->tx)
+ if (queue->tx) {
+ netif_queue_set_napi(adapter->netdev, queue->tx->queue_index,
+ NETDEV_QUEUE_TYPE_TX, &queue->napi);
tsnep_tx_enable(queue->tx);
+ }
- if (queue->rx)
+ if (queue->rx) {
+ netif_queue_set_napi(adapter->netdev, queue->rx->queue_index,
+ NETDEV_QUEUE_TYPE_RX, &queue->napi);
tsnep_rx_enable(queue->rx);
+ }
}
static void tsnep_queue_disable(struct tsnep_queue *queue)
{
- if (queue->tx)
+ struct tsnep_adapter *adapter = queue->adapter;
+
+ if (queue->rx)
+ netif_queue_set_napi(adapter->netdev, queue->rx->queue_index,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+
+ if (queue->tx) {
tsnep_tx_disable(queue->tx, &queue->napi);
+ netif_queue_set_napi(adapter->netdev, queue->tx->queue_index,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+ }
napi_disable(&queue->napi);
- tsnep_disable_irq(queue->adapter, queue->irq_mask);
+ tsnep_disable_irq(adapter, queue->irq_mask);
/* disable RX after NAPI polling has been disabled, because RX can be
* enabled during NAPI polling
@@ -2143,16 +2168,6 @@ static netdev_tx_t tsnep_netdev_xmit_frame(struct sk_buff *skb,
return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]);
}
-static int tsnep_netdev_ioctl(struct net_device *netdev, struct ifreq *ifr,
- int cmd)
-{
- if (!netif_running(netdev))
- return -EINVAL;
- if (cmd == SIOCSHWTSTAMP || cmd == SIOCGHWTSTAMP)
- return tsnep_ptp_ioctl(netdev, ifr, cmd);
- return phy_mii_ioctl(netdev->phydev, ifr, cmd);
-}
-
static void tsnep_netdev_set_multicast(struct net_device *netdev)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
@@ -2359,7 +2374,7 @@ static const struct net_device_ops tsnep_netdev_ops = {
.ndo_open = tsnep_netdev_open,
.ndo_stop = tsnep_netdev_close,
.ndo_start_xmit = tsnep_netdev_xmit_frame,
- .ndo_eth_ioctl = tsnep_netdev_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_rx_mode = tsnep_netdev_set_multicast,
.ndo_get_stats64 = tsnep_netdev_get_stats64,
.ndo_set_mac_address = tsnep_netdev_set_mac_address,
@@ -2369,6 +2384,8 @@ static const struct net_device_ops tsnep_netdev_ops = {
.ndo_bpf = tsnep_netdev_bpf,
.ndo_xdp_xmit = tsnep_netdev_xdp_xmit,
.ndo_xsk_wakeup = tsnep_netdev_xsk_wakeup,
+ .ndo_hwtstamp_get = tsnep_ptp_hwtstamp_get,
+ .ndo_hwtstamp_set = tsnep_ptp_hwtstamp_set,
};
static int tsnep_mac_init(struct tsnep_adapter *adapter)
diff --git a/drivers/net/ethernet/engleder/tsnep_ptp.c b/drivers/net/ethernet/engleder/tsnep_ptp.c
index 54fbf0126815..ae1308eb813d 100644
--- a/drivers/net/ethernet/engleder/tsnep_ptp.c
+++ b/drivers/net/ethernet/engleder/tsnep_ptp.c
@@ -19,57 +19,53 @@ void tsnep_get_system_time(struct tsnep_adapter *adapter, u64 *time)
*time = (((u64)high) << 32) | ((u64)low);
}
-int tsnep_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+int tsnep_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
- struct hwtstamp_config config;
-
- if (!ifr)
- return -EINVAL;
-
- if (cmd == SIOCSHWTSTAMP) {
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- case HWTSTAMP_TX_ON:
- break;
- default:
- return -ERANGE;
- }
-
- switch (config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- break;
- case HWTSTAMP_FILTER_ALL:
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_NTP_ALL:
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- break;
- default:
- return -ERANGE;
- }
-
- memcpy(&adapter->hwtstamp_config, &config,
- sizeof(adapter->hwtstamp_config));
+
+ *config = adapter->hwtstamp_config;
+ return 0;
+}
+
+int tsnep_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
}
- if (copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config,
- sizeof(adapter->hwtstamp_config)))
- return -EFAULT;
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+ adapter->hwtstamp_config = *config;
return 0;
}
diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig
index c699bd6bcbb9..474073c7f94d 100644
--- a/drivers/net/ethernet/faraday/Kconfig
+++ b/drivers/net/ethernet/faraday/Kconfig
@@ -31,6 +31,7 @@ config FTGMAC100
depends on ARM || COMPILE_TEST
depends on !64BIT || BROKEN
select PHYLIB
+ select FIXED_PHY
select MDIO_ASPEED if MACH_ASPEED_G6
select CRC32
help
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 17ec35e75a65..a863f7841210 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -9,6 +9,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
+#include <linux/reset.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -101,6 +102,8 @@ struct ftgmac100 {
/* AST2500/AST2600 RMII ref clock gate */
struct clk *rclk;
+ /* Aspeed reset control */
+ struct reset_control *rst;
/* Link management */
int cur_speed;
@@ -148,6 +151,23 @@ static int ftgmac100_reset_and_config_mac(struct ftgmac100 *priv)
{
u32 maccr = 0;
+ /* Aspeed RMII needs SCU reset to clear status */
+ if (priv->is_aspeed && priv->netdev->phydev->interface == PHY_INTERFACE_MODE_RMII) {
+ int err;
+
+ err = reset_control_assert(priv->rst);
+ if (err) {
+ dev_err(priv->dev, "Failed to reset mac (%d)\n", err);
+ return err;
+ }
+ usleep_range(10000, 20000);
+ err = reset_control_deassert(priv->rst);
+ if (err) {
+ dev_err(priv->dev, "Failed to deassert mac reset (%d)\n", err);
+ return err;
+ }
+ }
+
switch (priv->cur_speed) {
case SPEED_10:
case 0: /* no link */
@@ -1428,7 +1448,7 @@ static void ftgmac100_adjust_link(struct net_device *netdev)
/* Disable all interrupts */
iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
- /* Release phy lock to allow ftgmac100_reset to aquire it, keeping lock
+ /* Release phy lock to allow ftgmac100_reset to acquire it, keeping lock
* order consistent to prevent dead lock.
*/
if (netdev->phydev)
@@ -1730,16 +1750,17 @@ err_register_mdiobus:
static void ftgmac100_phy_disconnect(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
- if (!netdev->phydev)
+ if (!phydev)
return;
- phy_disconnect(netdev->phydev);
+ phy_disconnect(phydev);
if (of_phy_is_fixed_link(priv->dev->of_node))
of_phy_deregister_fixed_link(priv->dev->of_node);
if (priv->use_ncsi)
- fixed_phy_unregister(netdev->phydev);
+ fixed_phy_unregister(phydev);
}
static void ftgmac100_destroy_mdio(struct net_device *netdev)
@@ -1906,7 +1927,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
goto err_phy_connect;
}
- phydev = fixed_phy_register(PHY_POLL, &ncsi_phy_status, np);
+ phydev = fixed_phy_register(&ncsi_phy_status, np);
if (IS_ERR(phydev)) {
dev_err(&pdev->dev, "failed to register fixed PHY device\n");
err = PTR_ERR(phydev);
@@ -1968,6 +1989,12 @@ static int ftgmac100_probe(struct platform_device *pdev)
}
+ priv->rst = devm_reset_control_get_optional_exclusive(priv->dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ err = PTR_ERR(priv->rst);
+ goto err_phy_connect;
+ }
+
if (priv->is_aspeed) {
err = ftgmac100_setup_clk(priv);
if (err)
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index ed18450fd2cc..3c9961806f75 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -196,7 +196,7 @@ enum intr_status_bits {
ERI = 0x00000080, /* receive early int */
CNTOVF = 0x00000040, /* counter overflow */
RBU = 0x00000020, /* receive buffer unavailable */
- TBU = 0x00000010, /* transmit buffer unavilable */
+ TBU = 0x00000010, /* transmit buffer unavailable */
TI = 0x00000008, /* transmit interrupt */
RI = 0x00000004, /* receive interrupt */
RxErr = 0x00000002, /* receive error */
@@ -215,7 +215,7 @@ enum rx_mode_bits {
CR_W_RXMODEMASK = 0x000000e0,
CR_W_PROM = 0x00000080, /* promiscuous mode */
CR_W_AB = 0x00000040, /* accept broadcast */
- CR_W_AM = 0x00000020, /* accept mutlicast */
+ CR_W_AM = 0x00000020, /* accept multicast */
CR_W_ARP = 0x00000008, /* receive runt pkt */
CR_W_ALP = 0x00000004, /* receive long pkt */
CR_W_SEP = 0x00000002, /* receive error pkt */
@@ -1074,7 +1074,7 @@ static void allocate_rx_buffers(struct net_device *dev)
static void netdev_timer(struct timer_list *t)
{
- struct netdev_private *np = from_timer(np, t, timer);
+ struct netdev_private *np = timer_container_of(np, t, timer);
struct net_device *dev = np->mii.dev;
void __iomem *ioaddr = np->mem;
int old_crvalue = np->crvalue;
@@ -1163,7 +1163,7 @@ static void enable_rxtx(struct net_device *dev)
static void reset_timer(struct timer_list *t)
{
- struct netdev_private *np = from_timer(np, t, reset_timer);
+ struct netdev_private *np = timer_container_of(np, t, reset_timer);
struct net_device *dev = np->mii.dev;
unsigned long flags;
@@ -1900,8 +1900,8 @@ static int netdev_close(struct net_device *dev)
/* Stop the chip's Tx and Rx processes. */
stop_nic_rxtx(ioaddr, 0);
- del_timer_sync(&np->timer);
- del_timer_sync(&np->reset_timer);
+ timer_delete_sync(&np->timer);
+ timer_delete_sync(&np->reset_timer);
free_irq(np->pci_dev->irq, dev);
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 75401d2a5fb4..e2a591cf9601 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -28,6 +28,7 @@ config FEC
depends on PTP_1588_CLOCK_OPTIONAL
select CRC32
select PHYLIB
+ select FIXED_PHY if M5272
select PAGE_POOL
imply PAGE_POOL_STATS
imply NET_SELFTESTS
@@ -71,7 +72,6 @@ config FSL_XGMAC_MDIO
tristate "Freescale XGMAC MDIO"
select PHYLIB
depends on OF
- select MDIO_DEVRES
select OF_MDIO
help
This driver supports the MDIO bus on the Fman 10G Ethernet MACs, and
@@ -81,8 +81,7 @@ config UCC_GETH
tristate "Freescale QE Gigabit Ethernet"
depends on QUICC_ENGINE && PPC32
select FSL_PQ_MDIO
- select PHYLIB
- select FIXED_PHY
+ select PHYLINK
help
This driver supports the Gigabit Ethernet mode of the QUICC Engine,
which is available on some Freescale SOCs.
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index ac06b01fe934..3edc8d142dd5 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -28,7 +28,6 @@
#include <linux/percpu.h>
#include <linux/dma-mapping.h>
#include <linux/sort.h>
-#include <linux/phy_fixed.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <soc/fsl/bman.h>
@@ -1820,7 +1819,6 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
struct page *page, *head_page;
struct dpaa_bp *dpaa_bp;
void *vaddr, *sg_vaddr;
- int frag_off, frag_len;
struct sk_buff *skb;
dma_addr_t sg_addr;
int page_offset;
@@ -1863,6 +1861,11 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
* on Tx, if extra headers are added.
*/
WARN_ON(fd_off != priv->rx_headroom);
+ /* The offset to data start within the buffer holding
+ * the SGT should always be equal to the offset to data
+ * start within the first buffer holding the frame.
+ */
+ WARN_ON_ONCE(fd_off != qm_sg_entry_get_off(&sgt[i]));
skb_reserve(skb, fd_off);
skb_put(skb, qm_sg_entry_get_len(&sgt[i]));
} else {
@@ -1876,21 +1879,23 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
page = virt_to_page(sg_vaddr);
head_page = virt_to_head_page(sg_vaddr);
- /* Compute offset in (possibly tail) page */
+ /* Compute offset of sg_vaddr in (possibly tail) page */
page_offset = ((unsigned long)sg_vaddr &
(PAGE_SIZE - 1)) +
(page_address(page) - page_address(head_page));
- /* page_offset only refers to the beginning of sgt[i];
- * but the buffer itself may have an internal offset.
+
+ /* Non-initial SGT entries should not have a buffer
+ * offset.
*/
- frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset;
- frag_len = qm_sg_entry_get_len(&sgt[i]);
+ WARN_ON_ONCE(qm_sg_entry_get_off(&sgt[i]));
+
/* skb_add_rx_frag() does no checking on the page; if
* we pass it a tail page, we'll end up with
- * bad page accounting and eventually with segafults.
+ * bad page accounting and eventually with segfaults.
*/
- skb_add_rx_frag(skb, i - 1, head_page, frag_off,
- frag_len, dpaa_bp->size);
+ skb_add_rx_frag(skb, i - 1, head_page, page_offset,
+ qm_sg_entry_get_len(&sgt[i]),
+ dpaa_bp->size);
}
/* Update the pool count for the current {cpu x bpool} */
@@ -2275,7 +2280,7 @@ static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv,
new_xdpf->len = xdpf->len;
new_xdpf->headroom = priv->tx_headroom;
new_xdpf->frame_sz = DPAA_BP_RAW_SIZE;
- new_xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
+ new_xdpf->mem_type = MEM_TYPE_PAGE_ORDER0;
/* Release the initial buffer */
xdp_return_frame_rx_napi(xdpf);
@@ -2766,7 +2771,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use &&
!fman_port_get_hash_result_offset(priv->mac_dev->port[RX],
&hash_offset)) {
- hash = be32_to_cpu(*(u32 *)(vaddr + hash_offset));
+ hash = be32_to_cpu(*(__be32 *)(vaddr + hash_offset));
hash_valid = true;
}
@@ -3083,15 +3088,25 @@ static int dpaa_xdp_xmit(struct net_device *net_dev, int n,
return nxmit;
}
-static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int dpaa_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
{
struct dpaa_priv *priv = netdev_priv(dev);
- struct hwtstamp_config config;
- if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
- return -EFAULT;
+ config->tx_type = priv->tx_tstamp ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config->rx_filter = priv->rx_tstamp ? HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
- switch (config.tx_type) {
+ return 0;
+}
+
+static int dpaa_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct dpaa_priv *priv = netdev_priv(dev);
+
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
/* Couldn't disable rx/tx timestamping separately.
* Do nothing here.
@@ -3106,7 +3121,7 @@ static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return -ERANGE;
}
- if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
+ if (config->rx_filter == HWTSTAMP_FILTER_NONE) {
/* Couldn't disable rx/tx timestamping separately.
* Do nothing here.
*/
@@ -3115,28 +3130,17 @@ static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
priv->rx_tstamp = true;
/* TS is set for all frame types, not only those requested */
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
}
- return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
{
- int ret = -EINVAL;
struct dpaa_priv *priv = netdev_priv(net_dev);
- if (cmd == SIOCGMIIREG) {
- if (net_dev->phydev)
- return phylink_mii_ioctl(priv->mac_dev->phylink, rq,
- cmd);
- }
-
- if (cmd == SIOCSHWTSTAMP)
- return dpaa_ts_ioctl(net_dev, rq, cmd);
-
- return ret;
+ return phylink_mii_ioctl(priv->mac_dev->phylink, rq, cmd);
}
static const struct net_device_ops dpaa_ops = {
@@ -3145,7 +3149,6 @@ static const struct net_device_ops dpaa_ops = {
.ndo_stop = dpaa_eth_stop,
.ndo_tx_timeout = dpaa_tx_timeout,
.ndo_get_stats64 = dpaa_get_stats64,
- .ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = dpaa_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = dpaa_set_rx_mode,
@@ -3154,6 +3157,8 @@ static const struct net_device_ops dpaa_ops = {
.ndo_change_mtu = dpaa_change_mtu,
.ndo_bpf = dpaa_xdp,
.ndo_xdp_xmit = dpaa_xdp_xmit,
+ .ndo_hwtstamp_get = dpaa_hwtstamp_get,
+ .ndo_hwtstamp_set = dpaa_hwtstamp_set,
};
static int dpaa_napi_add(struct net_device *net_dev)
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
index 6f0e58a2a58a..9e1d44ae92cc 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
@@ -56,7 +56,7 @@ DECLARE_EVENT_CLASS(dpaa_eth_fd,
__entry->fd_format = qm_fd_get_format(fd);
__entry->fd_offset = qm_fd_get_offset(fd);
__entry->fd_length = qm_fd_get_length(fd);
- __entry->fd_status = fd->status;
+ __entry->fd_status = __be32_to_cpu(fd->status);
__assign_str(name);
),
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 9986f6e1f587..ed3fa80af8c3 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -263,8 +263,8 @@ static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
ethtool_puts(&data, dpaa_stats_global[i]);
}
-static int dpaa_get_hash_opts(struct net_device *dev,
- struct ethtool_rxnfc *cmd)
+static int dpaa_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
struct dpaa_priv *priv = netdev_priv(dev);
@@ -299,22 +299,6 @@ static int dpaa_get_hash_opts(struct net_device *dev,
return 0;
}
-static int dpaa_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
- u32 *unused)
-{
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_GRXFH:
- ret = dpaa_get_hash_opts(dev, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static void dpaa_set_hash(struct net_device *net_dev, bool enable)
{
struct mac_device *mac_dev;
@@ -329,8 +313,9 @@ static void dpaa_set_hash(struct net_device *net_dev, bool enable)
priv->keygen_in_use = enable;
}
-static int dpaa_set_hash_opts(struct net_device *dev,
- struct ethtool_rxnfc *nfc)
+static int dpaa_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
int ret = -EINVAL;
@@ -364,21 +349,6 @@ static int dpaa_set_hash_opts(struct net_device *dev,
return ret;
}
-static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
-{
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = dpaa_set_hash_opts(dev, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static int dpaa_get_ts_info(struct net_device *net_dev,
struct kernel_ethtool_ts_info *info)
{
@@ -401,8 +371,10 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
of_node_put(ptp_node);
}
- if (ptp_dev)
+ if (ptp_dev) {
ptp = platform_get_drvdata(ptp_dev);
+ put_device(&ptp_dev->dev);
+ }
if (ptp)
info->phc_index = ptp->phc_index;
@@ -495,6 +467,47 @@ revert_values:
return res;
}
+static void dpaa_get_pause_stats(struct net_device *net_dev,
+ struct ethtool_pause_stats *s)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev->get_pause_stats)
+ mac_dev->get_pause_stats(mac_dev->fman_mac, s);
+}
+
+static void dpaa_get_rmon_stats(struct net_device *net_dev,
+ struct ethtool_rmon_stats *s,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev->get_rmon_stats)
+ mac_dev->get_rmon_stats(mac_dev->fman_mac, s, ranges);
+}
+
+static void dpaa_get_eth_ctrl_stats(struct net_device *net_dev,
+ struct ethtool_eth_ctrl_stats *s)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev->get_eth_ctrl_stats)
+ mac_dev->get_eth_ctrl_stats(mac_dev->fman_mac, s);
+}
+
+static void dpaa_get_eth_mac_stats(struct net_device *net_dev,
+ struct ethtool_eth_mac_stats *s)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev->get_eth_mac_stats)
+ mac_dev->get_eth_mac_stats(mac_dev->fman_mac, s);
+}
+
const struct ethtool_ops dpaa_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
ETHTOOL_COALESCE_RX_MAX_FRAMES,
@@ -510,9 +523,13 @@ const struct ethtool_ops dpaa_ethtool_ops = {
.get_strings = dpaa_get_strings,
.get_link_ksettings = dpaa_get_link_ksettings,
.set_link_ksettings = dpaa_set_link_ksettings,
- .get_rxnfc = dpaa_get_rxnfc,
- .set_rxnfc = dpaa_set_rxnfc,
+ .get_rxfh_fields = dpaa_get_rxfh_fields,
+ .set_rxfh_fields = dpaa_set_rxfh_fields,
.get_ts_info = dpaa_get_ts_info,
.get_coalesce = dpaa_get_coalesce,
.set_coalesce = dpaa_set_coalesce,
+ .get_pause_stats = dpaa_get_pause_stats,
+ .get_rmon_stats = dpaa_get_rmon_stats,
+ .get_eth_ctrl_stats = dpaa_get_eth_ctrl_stats,
+ .get_eth_mac_stats = dpaa_get_eth_mac_stats,
};
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 29886a8ba73f..18d86badd6ea 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1077,8 +1077,7 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
dma_addr_t addr;
buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
- aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
- DPAA2_ETH_TX_BUF_ALIGN);
+ aligned_start = PTR_ALIGN(buffer_start, DPAA2_ETH_TX_BUF_ALIGN);
if (aligned_start >= skb->head)
buffer_start = aligned_start;
else
@@ -2585,40 +2584,52 @@ static int dpaa2_eth_set_features(struct net_device *net_dev,
return 0;
}
-static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int dpaa2_eth_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct dpaa2_eth_priv *priv = netdev_priv(dev);
- struct hwtstamp_config config;
if (!dpaa2_ptp)
return -EINVAL;
- if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
- return -EFAULT;
-
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
case HWTSTAMP_TX_ONESTEP_SYNC:
- priv->tx_tstamp_type = config.tx_type;
+ priv->tx_tstamp_type = config->tx_type;
break;
default:
return -ERANGE;
}
- if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
+ if (config->rx_filter == HWTSTAMP_FILTER_NONE) {
priv->rx_tstamp = false;
} else {
priv->rx_tstamp = true;
/* TS is set for all frame types, not only those requested */
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
}
if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
dpaa2_ptp_onestep_reg_update_method(priv);
- return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
+}
+
+static int dpaa2_eth_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+
+ if (!dpaa2_ptp)
+ return -EINVAL;
+
+ config->tx_type = priv->tx_tstamp_type;
+ config->rx_filter = priv->rx_tstamp ? HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
+
+ return 0;
}
static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -2626,9 +2637,6 @@ static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
struct dpaa2_eth_priv *priv = netdev_priv(dev);
int err;
- if (cmd == SIOCSHWTSTAMP)
- return dpaa2_eth_ts_ioctl(dev, rq, cmd);
-
mutex_lock(&priv->mac_lock);
if (dpaa2_eth_is_type_phy(priv)) {
@@ -3034,7 +3042,9 @@ static const struct net_device_ops dpaa2_eth_ops = {
.ndo_xsk_wakeup = dpaa2_xsk_wakeup,
.ndo_setup_tc = dpaa2_eth_setup_tc,
.ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid,
- .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid
+ .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid,
+ .ndo_hwtstamp_get = dpaa2_eth_hwtstamp_get,
+ .ndo_hwtstamp_set = dpaa2_eth_hwtstamp_set,
};
static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
@@ -3928,6 +3938,7 @@ static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv,
MEM_TYPE_PAGE_ORDER0, NULL);
if (err) {
dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
+ xdp_rxq_info_unreg(&fq->channel->xdp_rxq);
return err;
}
@@ -4421,17 +4432,25 @@ static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
return -EINVAL;
}
if (err)
- return err;
+ goto out;
}
err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_TX, &priv->tx_qdid);
if (err) {
dev_err(dev, "dpni_get_qdid() failed\n");
- return err;
+ goto out;
}
return 0;
+
+out:
+ while (i--) {
+ if (priv->fq[i].type == DPAA2_RX_FQ &&
+ xdp_rxq_info_is_reg(&priv->fq[i].channel->xdp_rxq))
+ xdp_rxq_info_unreg(&priv->fq[i].channel->xdp_rxq);
+ }
+ return err;
}
/* Allocate rings for storing incoming frame descriptors */
@@ -4646,12 +4665,19 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
return PTR_ERR(dpmac_dev);
}
- if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
+ if (IS_ERR(dpmac_dev))
return 0;
+ if (dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) {
+ err = 0;
+ goto out_put_device;
+ }
+
mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
- if (!mac)
- return -ENOMEM;
+ if (!mac) {
+ err = -ENOMEM;
+ goto out_put_device;
+ }
mac->mc_dev = dpmac_dev;
mac->mc_io = priv->mc_io;
@@ -4685,6 +4711,8 @@ err_close_mac:
dpaa2_mac_close(mac);
err_free_mac:
kfree(mac);
+out_put_device:
+ put_device(&dpmac_dev->dev);
return err;
}
@@ -4814,6 +4842,17 @@ static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv)
}
}
+static void dpaa2_eth_free_rx_xdp_rxq(struct dpaa2_eth_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ if (priv->fq[i].type == DPAA2_RX_FQ &&
+ xdp_rxq_info_is_reg(&priv->fq[i].channel->xdp_rxq))
+ xdp_rxq_info_unreg(&priv->fq[i].channel->xdp_rxq);
+ }
+}
+
static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
{
struct device *dev;
@@ -4844,7 +4883,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
priv->tx_tstamp_type = HWTSTAMP_TX_OFF;
priv->rx_tstamp = false;
- priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", 0, 0);
+ priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", WQ_PERCPU, 0);
if (!priv->dpaa2_ptp_wq) {
err = -ENOMEM;
goto err_wq_alloc;
@@ -5017,6 +5056,7 @@ err_alloc_percpu_extras:
free_percpu(priv->percpu_stats);
err_alloc_percpu_stats:
dpaa2_eth_del_ch_napi(priv);
+ dpaa2_eth_free_rx_xdp_rxq(priv);
err_bind:
dpaa2_eth_free_dpbps(priv);
err_dpbp_setup:
@@ -5069,6 +5109,7 @@ static void dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
free_percpu(priv->percpu_extras);
dpaa2_eth_del_ch_napi(priv);
+ dpaa2_eth_free_rx_xdp_rxq(priv);
dpaa2_eth_free_dpbps(priv);
dpaa2_eth_free_dpio(priv);
dpaa2_eth_free_dpni(priv);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 74ef77cb7078..baab4f1c908d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -711,6 +711,13 @@ static int dpaa2_eth_update_cls_rule(struct net_device *net_dev,
return 0;
}
+static u32 dpaa2_eth_get_rx_ring_count(struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ return dpaa2_eth_queue_count(priv);
+}
+
static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
{
@@ -719,16 +726,6 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
int i, j = 0;
switch (rxnfc->cmd) {
- case ETHTOOL_GRXFH:
- /* we purposely ignore cmd->flow_type for now, because the
- * classifier only supports a single set of fields for all
- * protocols
- */
- rxnfc->data = priv->rx_hash_fields;
- break;
- case ETHTOOL_GRXRINGS:
- rxnfc->data = dpaa2_eth_queue_count(priv);
- break;
case ETHTOOL_GRXCLSRLCNT:
rxnfc->rule_cnt = 0;
rxnfc->rule_cnt = dpaa2_eth_num_cls_rules(priv);
@@ -767,11 +764,6 @@ static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
int err = 0;
switch (rxnfc->cmd) {
- case ETHTOOL_SRXFH:
- if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
- return -EOPNOTSUPP;
- err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
- break;
case ETHTOOL_SRXCLSRLINS:
err = dpaa2_eth_update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
break;
@@ -785,6 +777,28 @@ static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
return err;
}
+static int dpaa2_eth_get_rxfh_fields(struct net_device *net_dev,
+ struct ethtool_rxfh_fields *rxnfc)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ /* we purposely ignore cmd->flow_type for now, because the
+ * classifier only supports a single set of fields for all
+ * protocols
+ */
+ rxnfc->data = priv->rx_hash_fields;
+ return 0;
+}
+
+static int dpaa2_eth_set_rxfh_fields(struct net_device *net_dev,
+ const struct ethtool_rxfh_fields *rxnfc,
+ struct netlink_ext_ack *extack)
+{
+ if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
+ return -EOPNOTSUPP;
+ return dpaa2_eth_set_hash(net_dev, rxnfc->data);
+}
+
int dpaa2_phc_index = -1;
EXPORT_SYMBOL(dpaa2_phc_index);
@@ -939,6 +953,9 @@ const struct ethtool_ops dpaa2_ethtool_ops = {
.get_strings = dpaa2_eth_get_strings,
.get_rxnfc = dpaa2_eth_get_rxnfc,
.set_rxnfc = dpaa2_eth_set_rxnfc,
+ .get_rx_ring_count = dpaa2_eth_get_rx_ring_count,
+ .get_rxfh_fields = dpaa2_eth_get_rxfh_fields,
+ .set_rxfh_fields = dpaa2_eth_set_rxfh_fields,
.get_ts_info = dpaa2_eth_get_ts_info,
.get_tunable = dpaa2_eth_get_tunable,
.set_tunable = dpaa2_eth_set_tunable,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index a293b08f36d4..b1e1ad9e4b48 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -780,13 +780,14 @@ struct ethsw_dump_ctx {
static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry,
struct ethsw_dump_ctx *dump)
{
+ struct ndo_fdb_dump_context *ctx = (void *)dump->cb->ctx;
int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC;
u32 portid = NETLINK_CB(dump->cb->skb).portid;
u32 seq = dump->cb->nlh->nlmsg_seq;
struct nlmsghdr *nlh;
struct ndmsg *ndm;
- if (dump->idx < dump->cb->args[2])
+ if (dump->idx < ctx->fdb_idx)
goto skip;
nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
@@ -1447,12 +1448,19 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
return PTR_ERR(dpmac_dev);
- if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
+ if (IS_ERR(dpmac_dev))
return 0;
+ if (dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) {
+ err = 0;
+ goto out_put_device;
+ }
+
mac = kzalloc(sizeof(*mac), GFP_KERNEL);
- if (!mac)
- return -ENOMEM;
+ if (!mac) {
+ err = -ENOMEM;
+ goto out_put_device;
+ }
mac->mc_dev = dpmac_dev;
mac->mc_io = port_priv->ethsw_data->mc_io;
@@ -1482,6 +1490,8 @@ err_close_mac:
dpaa2_mac_close(mac);
err_free_mac:
kfree(mac);
+out_put_device:
+ put_device(&dpmac_dev->dev);
return err;
}
@@ -2726,7 +2736,7 @@ static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw)
dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n");
goto err_get_attr;
}
- ethsw->bpid = dpbp_attrs.id;
+ ethsw->bpid = dpbp_attrs.bpid;
return 0;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
index a466c2379146..4b0ae7d9af92 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
@@ -448,7 +448,5 @@ bool dpaa2_xsk_tx(struct dpaa2_eth_priv *priv,
percpu_stats->tx_errors++;
}
- xsk_tx_release(ch->xsk_pool);
-
return total_enqueued == budget;
}
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index 4d75e6807e92..117038104b69 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -1,19 +1,39 @@
# SPDX-License-Identifier: GPL-2.0
config FSL_ENETC_CORE
tristate
+ select NXP_NETC_LIB if NXP_NTMP
help
This module supports common functionality between the PF and VF
drivers for the NXP ENETC controller.
If compiled as module (M), the module name is fsl-enetc-core.
+config NXP_ENETC_PF_COMMON
+ tristate
+ help
+ This module supports common functionality between drivers of
+ different versions of NXP ENETC PF controllers.
+
+ If compiled as module (M), the module name is nxp-enetc-pf-common.
+
+config NXP_NETC_LIB
+ tristate
+ help
+ This module provides common functionalities for both ENETC and NETC
+ Switch, such as NETC Table Management Protocol (NTMP) 2.0, common tc
+ flower and debugfs interfaces and so on.
+
+config NXP_NTMP
+ bool
+
config FSL_ENETC
tristate "ENETC PF driver"
+ depends on PTP_1588_CLOCK_OPTIONAL
depends on PCI_MSI
- select MDIO_DEVRES
select FSL_ENETC_CORE
select FSL_ENETC_IERB
select FSL_ENETC_MDIO
+ select NXP_ENETC_PF_COMMON
select PHYLINK
select PCS_LYNX
select DIMLIB
@@ -24,8 +44,27 @@ config FSL_ENETC
If compiled as module (M), the module name is fsl-enetc.
+config NXP_ENETC4
+ tristate "ENETC4 PF driver"
+ depends on PTP_1588_CLOCK_OPTIONAL
+ depends on PCI_MSI
+ select FSL_ENETC_CORE
+ select FSL_ENETC_MDIO
+ select NXP_ENETC_PF_COMMON
+ select NXP_NTMP
+ select PHYLINK
+ select DIMLIB
+ help
+ This driver supports NXP ENETC devices with major revision 4. ENETC is
+ as the NIC functionality in NETC, it supports virtualization/isolation
+ based on PCIe Single Root IO Virtualization (SR-IOV) and a full range
+ of TSN standards and NIC offload capabilities.
+
+ If compiled as module (M), the module name is nxp-enetc4.
+
config FSL_ENETC_VF
tristate "ENETC VF driver"
+ depends on PTP_1588_CLOCK_OPTIONAL
depends on PCI_MSI
select FSL_ENETC_CORE
select FSL_ENETC_MDIO
@@ -47,7 +86,7 @@ config FSL_ENETC_IERB
config FSL_ENETC_MDIO
tristate "ENETC MDIO driver"
- depends on PCI && MDIO_DEVRES && MDIO_BUS
+ depends on PCI && PHYLIB
help
This driver supports NXP ENETC Central MDIO controller as a PCIe
physical function (PF) device.
@@ -75,3 +114,17 @@ config FSL_ENETC_QOS
enable/disable from user space via Qos commands(tc). In the kernel
side, it can be loaded by Qos driver. Currently, it is only support
taprio(802.1Qbv) and Credit Based Shaper(802.1Qbu).
+
+config NXP_NETC_BLK_CTRL
+ tristate "NETC blocks control driver"
+ help
+ This driver configures Integrated Endpoint Register Block (IERB) and
+ Privileged Register Block (PRB) of NETC. For i.MX platforms, it also
+ includes the configuration of NETCMIX block.
+ The IERB contains registers that are used for pre-boot initialization,
+ debug, and non-customer configuration. The PRB controls global reset
+ and global error handling for NETC. The NETCMIX block is mainly used
+ to set MII protocol and PCS protocol of the links, it also contains
+ settings for some other functions.
+
+ If compiled as module (M), the module name is nxp-netc-blk-ctrl.
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
index b13cbbabb2ea..f1c5ad45fd76 100644
--- a/drivers/net/ethernet/freescale/enetc/Makefile
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -3,11 +3,21 @@
obj-$(CONFIG_FSL_ENETC_CORE) += fsl-enetc-core.o
fsl-enetc-core-y := enetc.o enetc_cbdr.o enetc_ethtool.o
+obj-$(CONFIG_NXP_ENETC_PF_COMMON) += nxp-enetc-pf-common.o
+nxp-enetc-pf-common-y := enetc_pf_common.o
+
+obj-$(CONFIG_NXP_NETC_LIB) += nxp-netc-lib.o
+nxp-netc-lib-y := ntmp.o
+
obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o
fsl-enetc-y := enetc_pf.o
fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o
fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
+obj-$(CONFIG_NXP_ENETC4) += nxp-enetc4.o
+nxp-enetc4-y := enetc4_pf.o
+nxp-enetc4-$(CONFIG_DEBUG_FS) += enetc4_debugfs.o
+
obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
fsl-enetc-vf-y := enetc_vf.o
@@ -19,3 +29,6 @@ fsl-enetc-mdio-y := enetc_pci_mdio.o enetc_mdio.o
obj-$(CONFIG_FSL_ENETC_PTP_CLOCK) += fsl-enetc-ptp.o
fsl-enetc-ptp-y := enetc_ptp.o
+
+obj-$(CONFIG_NXP_NETC_BLK_CTRL) += nxp-netc-blk-ctrl.o
+nxp-netc-blk-ctrl-y := netc_blk_ctrl.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 05dedea6185a..d5e5800b84ef 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -3,6 +3,7 @@
#include "enetc.h"
#include <linux/bpf_trace.h>
+#include <linux/clk.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/vmalloc.h>
@@ -13,25 +14,73 @@
u32 enetc_port_mac_rd(struct enetc_si *si, u32 reg)
{
+ /* ENETC with pseudo MAC does not have Ethernet MAC
+ * port registers.
+ */
+ if (enetc_is_pseudo_mac(si))
+ return 0;
+
return enetc_port_rd(&si->hw, reg);
}
EXPORT_SYMBOL_GPL(enetc_port_mac_rd);
void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val)
{
+ if (enetc_is_pseudo_mac(si))
+ return;
+
enetc_port_wr(&si->hw, reg, val);
if (si->hw_features & ENETC_SI_F_QBU)
- enetc_port_wr(&si->hw, reg + ENETC_PMAC_OFFSET, val);
+ enetc_port_wr(&si->hw, reg + si->drvdata->pmac_offset, val);
}
EXPORT_SYMBOL_GPL(enetc_port_mac_wr);
static void enetc_change_preemptible_tcs(struct enetc_ndev_priv *priv,
u8 preemptible_tcs)
{
+ if (!(priv->si->hw_features & ENETC_SI_F_QBU))
+ return;
+
priv->preemptible_tcs = preemptible_tcs;
enetc_mm_commit_preemptible_tcs(priv);
}
+static int enetc_mac_addr_hash_idx(const u8 *addr)
+{
+ u64 fold = __swab64(ether_addr_to_u64(addr)) >> 16;
+ u64 mask = 0;
+ int res = 0;
+ int i;
+
+ for (i = 0; i < 8; i++)
+ mask |= BIT_ULL(i * 6);
+
+ for (i = 0; i < 6; i++)
+ res |= (hweight64(fold & (mask << i)) & 0x1) << i;
+
+ return res;
+}
+
+void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter,
+ const unsigned char *addr)
+{
+ int idx = enetc_mac_addr_hash_idx(addr);
+
+ /* add hash table entry */
+ __set_bit(idx, filter->mac_hash_table);
+ filter->mac_addr_cnt++;
+}
+EXPORT_SYMBOL_GPL(enetc_add_mac_addr_ht_filter);
+
+void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter)
+{
+ filter->mac_addr_cnt = 0;
+
+ bitmap_zero(filter->mac_hash_table,
+ ENETC_MADDR_HASH_TBL_SZ);
+}
+EXPORT_SYMBOL_GPL(enetc_reset_mac_addr_filter);
+
static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv)
{
int num_tx_rings = priv->num_tx_rings;
@@ -142,22 +191,181 @@ static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp,
return 0;
}
+static bool enetc_tx_csum_offload_check(struct sk_buff *skb)
+{
+ switch (skb->csum_offset) {
+ case offsetof(struct tcphdr, check):
+ case offsetof(struct udphdr, check):
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool enetc_skb_is_ipv6(struct sk_buff *skb)
+{
+ return vlan_get_protocol(skb) == htons(ETH_P_IPV6);
+}
+
+static bool enetc_skb_is_tcp(struct sk_buff *skb)
+{
+ return skb->csum_offset == offsetof(struct tcphdr, check);
+}
+
+/**
+ * enetc_unwind_tx_frame() - Unwind the DMA mappings of a multi-buffer Tx frame
+ * @tx_ring: Pointer to the Tx ring on which the buffer descriptors are located
+ * @count: Number of Tx buffer descriptors which need to be unmapped
+ * @i: Index of the last successfully mapped Tx buffer descriptor
+ */
+static void enetc_unwind_tx_frame(struct enetc_bdr *tx_ring, int count, int i)
+{
+ while (count--) {
+ struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
+
+ enetc_free_tx_frame(tx_ring, tx_swbd);
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+ }
+}
+
+static void enetc_set_one_step_ts(struct enetc_si *si, bool udp, int offset)
+{
+ u32 val = ENETC_PM0_SINGLE_STEP_EN;
+
+ val |= ENETC_SET_SINGLE_STEP_OFFSET(offset);
+ if (udp)
+ val |= ENETC_PM0_SINGLE_STEP_CH;
+
+ /* The "Correction" field of a packet is updated based on the
+ * current time and the timestamp provided
+ */
+ enetc_port_mac_wr(si, ENETC_PM0_SINGLE_STEP, val);
+}
+
+static void enetc4_set_one_step_ts(struct enetc_si *si, bool udp, int offset)
+{
+ u32 val = PM_SINGLE_STEP_EN;
+
+ val |= PM_SINGLE_STEP_OFFSET_SET(offset);
+ if (udp)
+ val |= PM_SINGLE_STEP_CH;
+
+ enetc_port_mac_wr(si, ENETC4_PM_SINGLE_STEP(0), val);
+}
+
+static u32 enetc_update_ptp_sync_msg(struct enetc_ndev_priv *priv,
+ struct sk_buff *skb, bool csum_offload)
+{
+ struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
+ u16 tstamp_off = enetc_cb->origin_tstamp_off;
+ u16 corr_off = enetc_cb->correction_off;
+ struct enetc_si *si = priv->si;
+ struct enetc_hw *hw = &si->hw;
+ __be32 new_sec_l, new_nsec;
+ __be16 new_sec_h;
+ u32 lo, hi, nsec;
+ u8 *data;
+ u64 sec;
+
+ lo = enetc_rd_hot(hw, ENETC_SICTR0);
+ hi = enetc_rd_hot(hw, ENETC_SICTR1);
+ sec = (u64)hi << 32 | lo;
+ nsec = do_div(sec, 1000000000);
+
+ /* Update originTimestamp field of Sync packet
+ * - 48 bits seconds field
+ * - 32 bits nanseconds field
+ *
+ * In addition, if csum_offload is false, the UDP checksum needs
+ * to be updated by software after updating originTimestamp field,
+ * otherwise the hardware will calculate the wrong checksum when
+ * updating the correction field and update it to the packet.
+ */
+
+ data = skb_mac_header(skb);
+ new_sec_h = htons((sec >> 32) & 0xffff);
+ new_sec_l = htonl(sec & 0xffffffff);
+ new_nsec = htonl(nsec);
+ if (enetc_cb->udp && !csum_offload) {
+ struct udphdr *uh = udp_hdr(skb);
+ __be32 old_sec_l, old_nsec;
+ __be16 old_sec_h;
+
+ old_sec_h = *(__be16 *)(data + tstamp_off);
+ inet_proto_csum_replace2(&uh->check, skb, old_sec_h,
+ new_sec_h, false);
+
+ old_sec_l = *(__be32 *)(data + tstamp_off + 2);
+ inet_proto_csum_replace4(&uh->check, skb, old_sec_l,
+ new_sec_l, false);
+
+ old_nsec = *(__be32 *)(data + tstamp_off + 6);
+ inet_proto_csum_replace4(&uh->check, skb, old_nsec,
+ new_nsec, false);
+ }
+
+ *(__be16 *)(data + tstamp_off) = new_sec_h;
+ *(__be32 *)(data + tstamp_off + 2) = new_sec_l;
+ *(__be32 *)(data + tstamp_off + 6) = new_nsec;
+
+ /* Configure single-step register */
+ if (is_enetc_rev1(si))
+ enetc_set_one_step_ts(si, enetc_cb->udp, corr_off);
+ else
+ enetc4_set_one_step_ts(si, enetc_cb->udp, corr_off);
+
+ return lo & ENETC_TXBD_TSTAMP;
+}
+
static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
{
bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false;
struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
- struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
struct enetc_tx_swbd *tx_swbd;
int len = skb_headlen(skb);
union enetc_tx_bd temp_bd;
- u8 msgtype, twostep, udp;
+ bool csum_offload = false;
union enetc_tx_bd *txbd;
- u16 offset1, offset2;
int i, count = 0;
skb_frag_t *frag;
unsigned int f;
dma_addr_t dma;
u8 flags = 0;
+ u32 tstamp;
+
+ enetc_clear_tx_bd(&temp_bd);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ /* Can not support TSD and checksum offload at the same time */
+ if (priv->active_offloads & ENETC_F_TXCSUM &&
+ enetc_tx_csum_offload_check(skb) && !tx_ring->tsd_enable) {
+ temp_bd.l3_aux0 = FIELD_PREP(ENETC_TX_BD_L3_START,
+ skb_network_offset(skb));
+ temp_bd.l3_aux1 = FIELD_PREP(ENETC_TX_BD_L3_HDR_LEN,
+ skb_network_header_len(skb) / 4);
+ temp_bd.l3_aux1 |= FIELD_PREP(ENETC_TX_BD_L3T,
+ enetc_skb_is_ipv6(skb));
+ if (enetc_skb_is_tcp(skb))
+ temp_bd.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T,
+ ENETC_TXBD_L4T_TCP);
+ else
+ temp_bd.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T,
+ ENETC_TXBD_L4T_UDP);
+ flags |= ENETC_TXBD_FLAGS_CSUM_LSO | ENETC_TXBD_FLAGS_L4CS;
+ csum_offload = true;
+ } else if (skb_checksum_help(skb)) {
+ return 0;
+ }
+ }
+
+ if (enetc_cb->flag & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
+ do_onestep_tstamp = true;
+ tstamp = enetc_update_ptp_sync_msg(priv, skb, csum_offload);
+ } else if (enetc_cb->flag & ENETC_F_TX_TSTAMP) {
+ do_twostep_tstamp = true;
+ }
i = tx_ring->next_to_use;
txbd = ENETC_TXBD(*tx_ring, i);
@@ -169,7 +377,6 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
temp_bd.addr = cpu_to_le64(dma);
temp_bd.buf_len = cpu_to_le16(len);
- temp_bd.lstatus = 0;
tx_swbd = &tx_ring->tx_swbd[i];
tx_swbd->dma = dma;
@@ -179,17 +386,6 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
count++;
do_vlan = skb_vlan_tag_present(skb);
- if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
- if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, &offset1,
- &offset2) ||
- msgtype != PTP_MSGTYPE_SYNC || twostep)
- WARN_ONCE(1, "Bad packet for one-step timestamping\n");
- else
- do_onestep_tstamp = true;
- } else if (skb->cb[0] & ENETC_F_TX_TSTAMP) {
- do_twostep_tstamp = true;
- }
-
tx_swbd->do_twostep_tstamp = do_twostep_tstamp;
tx_swbd->qbv_en = !!(priv->active_offloads & ENETC_F_QBV);
tx_swbd->check_wb = tx_swbd->do_twostep_tstamp || tx_swbd->qbv_en;
@@ -232,38 +428,9 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
}
if (do_onestep_tstamp) {
- u32 lo, hi, val;
- u64 sec, nsec;
- u8 *data;
-
- lo = enetc_rd_hot(hw, ENETC_SICTR0);
- hi = enetc_rd_hot(hw, ENETC_SICTR1);
- sec = (u64)hi << 32 | lo;
- nsec = do_div(sec, 1000000000);
-
/* Configure extension BD */
- temp_bd.ext.tstamp = cpu_to_le32(lo & 0x3fffffff);
+ temp_bd.ext.tstamp = cpu_to_le32(tstamp);
e_flags |= ENETC_TXBD_E_FLAGS_ONE_STEP_PTP;
-
- /* Update originTimestamp field of Sync packet
- * - 48 bits seconds field
- * - 32 bits nanseconds field
- */
- data = skb_mac_header(skb);
- *(__be16 *)(data + offset2) =
- htons((sec >> 32) & 0xffff);
- *(__be32 *)(data + offset2 + 2) =
- htonl(sec & 0xffffffff);
- *(__be32 *)(data + offset2 + 6) = htonl(nsec);
-
- /* Configure single-step register */
- val = ENETC_PM0_SINGLE_STEP_EN;
- val |= ENETC_SET_SINGLE_STEP_OFFSET(offset1);
- if (udp)
- val |= ENETC_PM0_SINGLE_STEP_CH;
-
- enetc_port_mac_wr(priv->si, ENETC_PM0_SINGLE_STEP,
- val);
} else if (do_twostep_tstamp) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
@@ -325,25 +492,20 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
dma_err:
dev_err(tx_ring->dev, "DMA map error");
- do {
- tx_swbd = &tx_ring->tx_swbd[i];
- enetc_free_tx_frame(tx_ring, tx_swbd);
- if (i == 0)
- i = tx_ring->bd_count;
- i--;
- } while (count--);
+ enetc_unwind_tx_frame(tx_ring, count, i);
return 0;
}
-static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
- struct enetc_tx_swbd *tx_swbd,
- union enetc_tx_bd *txbd, int *i, int hdr_len,
- int data_len)
+static int enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ struct enetc_tx_swbd *tx_swbd,
+ union enetc_tx_bd *txbd, int *i, int hdr_len,
+ int data_len)
{
union enetc_tx_bd txbd_tmp;
u8 flags = 0, e_flags = 0;
dma_addr_t addr;
+ int count = 1;
enetc_clear_tx_bd(&txbd_tmp);
addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
@@ -386,7 +548,10 @@ static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
/* Write the BD */
txbd_tmp.ext.e_flags = e_flags;
*txbd = txbd_tmp;
+ count++;
}
+
+ return count;
}
static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
@@ -485,8 +650,233 @@ static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso
}
}
+static int enetc_lso_count_descs(const struct sk_buff *skb)
+{
+ /* 4 BDs: 1 BD for LSO header + 1 BD for extended BD + 1 BD
+ * for linear area data but not include LSO header, namely
+ * skb_headlen(skb) - lso_hdr_len (it may be 0, but that's
+ * okay, we only need to consider the worst case). And 1 BD
+ * for gap.
+ */
+ return skb_shinfo(skb)->nr_frags + 4;
+}
+
+static int enetc_lso_get_hdr_len(const struct sk_buff *skb)
+{
+ int hdr_len, tlen;
+
+ tlen = skb_is_gso_tcp(skb) ? tcp_hdrlen(skb) : sizeof(struct udphdr);
+ hdr_len = skb_transport_offset(skb) + tlen;
+
+ return hdr_len;
+}
+
+static void enetc_lso_start(struct sk_buff *skb, struct enetc_lso_t *lso)
+{
+ lso->lso_seg_size = skb_shinfo(skb)->gso_size;
+ lso->ipv6 = enetc_skb_is_ipv6(skb);
+ lso->tcp = skb_is_gso_tcp(skb);
+ lso->l3_hdr_len = skb_network_header_len(skb);
+ lso->l3_start = skb_network_offset(skb);
+ lso->hdr_len = enetc_lso_get_hdr_len(skb);
+ lso->total_len = skb->len - lso->hdr_len;
+}
+
+static void enetc_lso_map_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ int *i, struct enetc_lso_t *lso)
+{
+ union enetc_tx_bd txbd_tmp, *txbd;
+ struct enetc_tx_swbd *tx_swbd;
+ u16 frm_len, frm_len_ext;
+ u8 flags, e_flags = 0;
+ dma_addr_t addr;
+ char *hdr;
+
+ /* Get the first BD of the LSO BDs chain */
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+
+ /* Prepare LSO header: MAC + IP + TCP/UDP */
+ hdr = tx_ring->tso_headers + *i * TSO_HEADER_SIZE;
+ memcpy(hdr, skb->data, lso->hdr_len);
+ addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
+
+ /* {frm_len_ext, frm_len} indicates the total length of
+ * large transmit data unit. frm_len contains the 16 least
+ * significant bits and frm_len_ext contains the 4 most
+ * significant bits.
+ */
+ frm_len = lso->total_len & 0xffff;
+ frm_len_ext = (lso->total_len >> 16) & 0xf;
+
+ /* Set the flags of the first BD */
+ flags = ENETC_TXBD_FLAGS_EX | ENETC_TXBD_FLAGS_CSUM_LSO |
+ ENETC_TXBD_FLAGS_LSO | ENETC_TXBD_FLAGS_L4CS;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ txbd_tmp.addr = cpu_to_le64(addr);
+ txbd_tmp.hdr_len = cpu_to_le16(lso->hdr_len);
+
+ /* first BD needs frm_len and offload flags set */
+ txbd_tmp.frm_len = cpu_to_le16(frm_len);
+ txbd_tmp.flags = flags;
+
+ txbd_tmp.l3_aux0 = FIELD_PREP(ENETC_TX_BD_L3_START, lso->l3_start);
+ /* l3_hdr_size in 32-bits (4 bytes) */
+ txbd_tmp.l3_aux1 = FIELD_PREP(ENETC_TX_BD_L3_HDR_LEN,
+ lso->l3_hdr_len / 4);
+ if (lso->ipv6)
+ txbd_tmp.l3_aux1 |= ENETC_TX_BD_L3T;
+ else
+ txbd_tmp.l3_aux0 |= ENETC_TX_BD_IPCS;
+
+ txbd_tmp.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T, lso->tcp ?
+ ENETC_TXBD_L4T_TCP : ENETC_TXBD_L4T_UDP);
+
+ /* For the LSO header we do not set the dma address since
+ * we do not want it unmapped when we do cleanup. We still
+ * set len so that we count the bytes sent.
+ */
+ tx_swbd->len = lso->hdr_len;
+ tx_swbd->do_twostep_tstamp = false;
+ tx_swbd->check_wb = false;
+
+ /* Actually write the header in the BD */
+ *txbd = txbd_tmp;
+
+ /* Get the next BD, and the next BD is extended BD */
+ enetc_bdr_idx_inc(tx_ring, i);
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ if (skb_vlan_tag_present(skb)) {
+ /* Setup the VLAN fields */
+ txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
+ txbd_tmp.ext.tpid = ENETC_TPID_8021Q;
+ e_flags = ENETC_TXBD_E_FLAGS_VLAN_INS;
+ }
+
+ /* Write the BD */
+ txbd_tmp.ext.e_flags = e_flags;
+ txbd_tmp.ext.lso_sg_size = cpu_to_le16(lso->lso_seg_size);
+ txbd_tmp.ext.frm_len_ext = cpu_to_le16(frm_len_ext);
+ *txbd = txbd_tmp;
+}
+
+static int enetc_lso_map_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ int *i, struct enetc_lso_t *lso, int *count)
+{
+ union enetc_tx_bd txbd_tmp, *txbd = NULL;
+ struct enetc_tx_swbd *tx_swbd;
+ skb_frag_t *frag;
+ dma_addr_t dma;
+ u8 flags = 0;
+ int len, f;
+
+ len = skb_headlen(skb) - lso->hdr_len;
+ if (len > 0) {
+ dma = dma_map_single(tx_ring->dev, skb->data + lso->hdr_len,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ return -ENOMEM;
+
+ enetc_bdr_idx_inc(tx_ring, i);
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+ *count += 1;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ txbd_tmp.addr = cpu_to_le64(dma);
+ txbd_tmp.buf_len = cpu_to_le16(len);
+
+ tx_swbd->dma = dma;
+ tx_swbd->len = len;
+ tx_swbd->is_dma_page = 0;
+ tx_swbd->dir = DMA_TO_DEVICE;
+ }
+
+ frag = &skb_shinfo(skb)->frags[0];
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
+ if (txbd)
+ *txbd = txbd_tmp;
+
+ len = skb_frag_size(frag);
+ dma = skb_frag_dma_map(tx_ring->dev, frag);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ return -ENOMEM;
+
+ /* Get the next BD */
+ enetc_bdr_idx_inc(tx_ring, i);
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+ *count += 1;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ txbd_tmp.addr = cpu_to_le64(dma);
+ txbd_tmp.buf_len = cpu_to_le16(len);
+
+ tx_swbd->dma = dma;
+ tx_swbd->len = len;
+ tx_swbd->is_dma_page = 1;
+ tx_swbd->dir = DMA_TO_DEVICE;
+ }
+
+ /* Last BD needs 'F' bit set */
+ flags |= ENETC_TXBD_FLAGS_F;
+ txbd_tmp.flags = flags;
+ *txbd = txbd_tmp;
+
+ tx_swbd->is_eof = 1;
+ tx_swbd->skb = skb;
+
+ return 0;
+}
+
+static int enetc_lso_hw_offload(struct enetc_bdr *tx_ring, struct sk_buff *skb)
+{
+ struct enetc_tx_swbd *tx_swbd;
+ struct enetc_lso_t lso = {0};
+ int err, i, count = 0;
+
+ /* Initialize the LSO handler */
+ enetc_lso_start(skb, &lso);
+ i = tx_ring->next_to_use;
+
+ enetc_lso_map_hdr(tx_ring, skb, &i, &lso);
+ /* First BD and an extend BD */
+ count += 2;
+
+ err = enetc_lso_map_data(tx_ring, skb, &i, &lso, &count);
+ if (err)
+ goto dma_err;
+
+ /* Go to the next BD */
+ enetc_bdr_idx_inc(tx_ring, &i);
+ tx_ring->next_to_use = i;
+ enetc_update_tx_ring_tail(tx_ring);
+
+ return count;
+
+dma_err:
+ do {
+ tx_swbd = &tx_ring->tx_swbd[i];
+ enetc_free_tx_frame(tx_ring, tx_swbd);
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+ } while (--count);
+
+ return 0;
+}
+
static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
{
+ struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
int hdr_len, total_len, data_len;
struct enetc_tx_swbd *tx_swbd;
union enetc_tx_bd *txbd;
@@ -518,9 +908,9 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
/* compute the csum over the L4 header */
csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos);
- enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len);
+ count += enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd,
+ &i, hdr_len, data_len);
bd_data_num = 0;
- count++;
while (data_len > 0) {
int size;
@@ -544,15 +934,20 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd,
tso.data, size,
size == data_len);
- if (err)
+ if (err) {
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+
goto err_map_data;
+ }
data_len -= size;
count++;
bd_data_num++;
tso_build_data(skb, &tso, size);
- if (unlikely(bd_data_num >= ENETC_MAX_SKB_FRAGS && data_len))
+ if (unlikely(bd_data_num >= priv->max_frags && data_len))
goto err_chained_bd;
}
@@ -574,13 +969,7 @@ err_map_data:
dev_err(tx_ring->dev, "DMA map error");
err_chained_bd:
- do {
- tx_swbd = &tx_ring->tx_swbd[i];
- enetc_free_tx_frame(tx_ring, tx_swbd);
- if (i == 0)
- i = tx_ring->bd_count;
- i--;
- } while (count--);
+ enetc_unwind_tx_frame(tx_ring, count, i);
return 0;
}
@@ -588,12 +977,13 @@ err_chained_bd:
static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
+ struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_bdr *tx_ring;
- int count, err;
+ int count;
/* Queue one-step Sync packet if already locked */
- if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
+ if (enetc_cb->flag & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
if (test_and_set_bit_lock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS,
&priv->flags)) {
skb_queue_tail(&priv->tx_skbs, skb);
@@ -604,16 +994,28 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
tx_ring = priv->tx_ring[skb->queue_mapping];
if (skb_is_gso(skb)) {
- if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) {
- netif_stop_subqueue(ndev, tx_ring->index);
- return NETDEV_TX_BUSY;
- }
+ /* LSO data unit lengths of up to 256KB are supported */
+ if (priv->active_offloads & ENETC_F_LSO &&
+ (skb->len - enetc_lso_get_hdr_len(skb)) <=
+ ENETC_LSO_MAX_DATA_LEN) {
+ if (enetc_bd_unused(tx_ring) < enetc_lso_count_descs(skb)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
- enetc_lock_mdio();
- count = enetc_map_tx_tso_buffs(tx_ring, skb);
- enetc_unlock_mdio();
+ count = enetc_lso_hw_offload(tx_ring, skb);
+ } else {
+ if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
+
+ enetc_lock_mdio();
+ count = enetc_map_tx_tso_buffs(tx_ring, skb);
+ enetc_unlock_mdio();
+ }
} else {
- if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
+ if (unlikely(skb_shinfo(skb)->nr_frags > priv->max_frags))
if (unlikely(skb_linearize(skb)))
goto drop_packet_err;
@@ -623,11 +1025,6 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- err = skb_checksum_help(skb);
- if (err)
- goto drop_packet_err;
- }
enetc_lock_mdio();
count = enetc_map_tx_buffs(tx_ring, skb);
enetc_unlock_mdio();
@@ -636,7 +1033,7 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
if (unlikely(!count))
goto drop_packet_err;
- if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
+ if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED(priv->max_frags))
netif_stop_subqueue(ndev, tx_ring->index);
return NETDEV_TX_OK;
@@ -648,24 +1045,29 @@ drop_packet_err:
netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
{
+ struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
struct enetc_ndev_priv *priv = netdev_priv(ndev);
u8 udp, msgtype, twostep;
u16 offset1, offset2;
- /* Mark tx timestamp type on skb->cb[0] if requires */
+ /* Mark tx timestamp type on enetc_cb->flag if requires */
if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK)) {
- skb->cb[0] = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK;
- } else {
- skb->cb[0] = 0;
- }
+ (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK))
+ enetc_cb->flag = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK;
+ else
+ enetc_cb->flag = 0;
/* Fall back to two-step timestamp if not one-step Sync packet */
- if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
+ if (enetc_cb->flag & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep,
&offset1, &offset2) ||
- msgtype != PTP_MSGTYPE_SYNC || twostep != 0)
- skb->cb[0] = ENETC_F_TX_TSTAMP;
+ msgtype != PTP_MSGTYPE_SYNC || twostep != 0) {
+ enetc_cb->flag = ENETC_F_TX_TSTAMP;
+ } else {
+ enetc_cb->udp = !!udp;
+ enetc_cb->correction_off = offset1;
+ enetc_cb->origin_tstamp_off = offset2;
+ }
}
return enetc_start_xmit(skb, ndev);
@@ -700,8 +1102,9 @@ static void enetc_rx_dim_work(struct work_struct *w)
net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
struct enetc_int_vector *v =
container_of(dim, struct enetc_int_vector, rx_dim);
+ struct enetc_ndev_priv *priv = netdev_priv(v->rx_ring.ndev);
- v->rx_ictt = enetc_usecs_to_cycles(moder.usec);
+ v->rx_ictt = enetc_usecs_to_cycles(moder.usec, priv->sysclk_freq);
dim->state = DIM_START_MEASURE;
}
@@ -856,7 +1259,9 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
if (xdp_frame) {
xdp_return_frame(xdp_frame);
} else if (skb) {
- if (unlikely(skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
+ struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
+
+ if (unlikely(enetc_cb->flag & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
/* Start work to release lock for next one-step
* timestamping packet. And send one skb in
* tx_skbs queue if has.
@@ -903,7 +1308,8 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
__netif_subqueue_stopped(ndev, tx_ring->index) &&
!test_bit(ENETC_TX_DOWN, &priv->flags) &&
- (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
+ (enetc_bd_unused(tx_ring) >=
+ ENETC_TXBDS_MAX_NEEDED(priv->max_frags)))) {
netif_wake_subqueue(ndev, tx_ring->index);
}
@@ -1016,6 +1422,7 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring,
}
if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) {
+ struct enetc_hw *hw = &priv->si->hw;
__be16 tpid = 0;
switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) {
@@ -1026,22 +1433,18 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring,
tpid = htons(ETH_P_8021AD);
break;
case 2:
- tpid = htons(enetc_port_rd(&priv->si->hw,
- ENETC_PCVLANR1));
+ tpid = htons(enetc_rd_hot(hw, ENETC_SICVLANR1) &
+ SICVLANR_ETYPE);
break;
case 3:
- tpid = htons(enetc_port_rd(&priv->si->hw,
- ENETC_PCVLANR2));
- break;
- default:
- break;
+ tpid = htons(enetc_rd_hot(hw, ENETC_SICVLANR2) &
+ SICVLANR_ETYPE);
}
__vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt));
}
- if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK) &&
- (priv->active_offloads & ENETC_F_RX_TSTAMP))
+ if (priv->active_offloads & ENETC_F_RX_TSTAMP)
enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
}
@@ -1201,6 +1604,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
/* next descriptor to process */
i = rx_ring->next_to_clean;
+ enetc_lock_mdio();
+
while (likely(rx_frm_cnt < work_limit)) {
union enetc_rx_bd *rxbd;
struct sk_buff *skb;
@@ -1236,7 +1641,9 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
rx_byte_cnt += skb->len + ETH_HLEN;
rx_frm_cnt++;
+ enetc_unlock_mdio();
napi_gro_receive(napi, skb);
+ enetc_lock_mdio();
}
rx_ring->next_to_clean = i;
@@ -1244,6 +1651,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
rx_ring->stats.packets += rx_frm_cnt;
rx_ring->stats.bytes += rx_byte_cnt;
+ enetc_unlock_mdio();
+
return rx_frm_cnt;
}
@@ -1527,6 +1936,16 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
}
}
+static void enetc_bulk_flip_buff(struct enetc_bdr *rx_ring, int rx_ring_first,
+ int rx_ring_last)
+{
+ while (rx_ring_first != rx_ring_last) {
+ enetc_flip_rx_buff(rx_ring,
+ &rx_ring->rx_swbd[rx_ring_first]);
+ enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
+ }
+}
+
static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
struct napi_struct *napi, int work_limit,
struct bpf_prog *prog)
@@ -1543,13 +1962,14 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
/* next descriptor to process */
i = rx_ring->next_to_clean;
+ enetc_lock_mdio();
+
while (likely(rx_frm_cnt < work_limit)) {
union enetc_rx_bd *rxbd, *orig_rxbd;
- int orig_i, orig_cleaned_cnt;
struct xdp_buff xdp_buff;
struct sk_buff *skb;
+ int orig_i, err;
u32 bd_status;
- int err;
rxbd = enetc_rxbd(rx_ring, i);
bd_status = le32_to_cpu(rxbd->r.lstatus);
@@ -1564,7 +1984,6 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
break;
orig_rxbd = rxbd;
- orig_cleaned_cnt = cleaned_cnt;
orig_i = i;
enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i,
@@ -1592,17 +2011,25 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
rx_ring->stats.xdp_drops++;
break;
case XDP_PASS:
- rxbd = orig_rxbd;
- cleaned_cnt = orig_cleaned_cnt;
- i = orig_i;
-
- skb = enetc_build_skb(rx_ring, bd_status, &rxbd,
- &i, &cleaned_cnt,
- ENETC_RXB_DMA_SIZE_XDP);
- if (unlikely(!skb))
+ skb = xdp_build_skb_from_buff(&xdp_buff);
+ /* Probably under memory pressure, stop NAPI */
+ if (unlikely(!skb)) {
+ enetc_xdp_drop(rx_ring, orig_i, i);
+ rx_ring->stats.xdp_drops++;
goto out;
+ }
+
+ enetc_get_offloads(rx_ring, orig_rxbd, skb);
+ /* These buffers are about to be owned by the stack.
+ * Update our buffer cache (the rx_swbd array elements)
+ * with their other page halves.
+ */
+ enetc_bulk_flip_buff(rx_ring, orig_i, i);
+
+ enetc_unlock_mdio();
napi_gro_receive(napi, skb);
+ enetc_lock_mdio();
break;
case XDP_TX:
tx_ring = priv->xdp_tx_ring[rx_ring->index];
@@ -1620,7 +2047,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
enetc_xdp_drop(rx_ring, orig_i, i);
tx_ring->stats.xdp_tx_drops++;
} else {
- tx_ring->stats.xdp_tx += xdp_tx_bd_cnt;
+ tx_ring->stats.xdp_tx++;
rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt;
xdp_tx_frm_cnt++;
/* The XDP_TX enqueue was successful, so we
@@ -1637,16 +2064,14 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
}
break;
case XDP_REDIRECT:
+ enetc_unlock_mdio();
err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog);
+ enetc_lock_mdio();
if (unlikely(err)) {
enetc_xdp_drop(rx_ring, orig_i, i);
rx_ring->stats.xdp_redirect_failures++;
} else {
- while (orig_i != i) {
- enetc_flip_rx_buff(rx_ring,
- &rx_ring->rx_swbd[orig_i]);
- enetc_bdr_idx_inc(rx_ring, &orig_i);
- }
+ enetc_bulk_flip_buff(rx_ring, orig_i, i);
xdp_redirect_frm_cnt++;
rx_ring->stats.xdp_redirect++;
}
@@ -1661,8 +2086,11 @@ out:
rx_ring->stats.packets += rx_frm_cnt;
rx_ring->stats.bytes += rx_byte_cnt;
- if (xdp_redirect_frm_cnt)
+ if (xdp_redirect_frm_cnt) {
+ enetc_unlock_mdio();
xdp_do_flush();
+ enetc_lock_mdio();
+ }
if (xdp_tx_frm_cnt)
enetc_update_tx_ring_tail(tx_ring);
@@ -1671,6 +2099,8 @@ out:
enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) -
rx_ring->xdp.xdp_tx_in_flight);
+ enetc_unlock_mdio();
+
return rx_frm_cnt;
}
@@ -1689,6 +2119,7 @@ static int enetc_poll(struct napi_struct *napi, int budget)
for (i = 0; i < v->count_tx_rings; i++)
if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
complete = false;
+ enetc_unlock_mdio();
prog = rx_ring->xdp.prog;
if (prog)
@@ -1700,10 +2131,8 @@ static int enetc_poll(struct napi_struct *napi, int budget)
if (work_done)
v->rx_napi_work = true;
- if (!complete) {
- enetc_unlock_mdio();
+ if (!complete)
return budget;
- }
napi_complete_done(napi, work_done);
@@ -1712,6 +2141,7 @@ static int enetc_poll(struct napi_struct *napi, int budget)
v->rx_napi_work = false;
+ enetc_lock_mdio();
/* enable interrupts */
enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
@@ -1736,9 +2166,15 @@ void enetc_get_si_caps(struct enetc_si *si)
si->num_rx_rings = (val >> 16) & 0xff;
si->num_tx_rings = val & 0xff;
- val = enetc_rd(hw, ENETC_SIRFSCAPR);
- si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
- si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
+ val = enetc_rd(hw, ENETC_SIPCAPR0);
+ if (val & ENETC_SIPCAPR0_RFS) {
+ val = enetc_rd(hw, ENETC_SIRFSCAPR);
+ si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
+ si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
+ } else {
+ /* ENETC which not supports RFS */
+ si->num_fs_entries = 0;
+ }
si->num_rss = 0;
val = enetc_rd(hw, ENETC_SIPCAPR0);
@@ -1749,14 +2185,8 @@ void enetc_get_si_caps(struct enetc_si *si)
si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss);
}
- if (val & ENETC_SIPCAPR0_QBV)
- si->hw_features |= ENETC_SI_F_QBV;
-
- if (val & ENETC_SIPCAPR0_QBU)
- si->hw_features |= ENETC_SI_F_QBU;
-
- if (val & ENETC_SIPCAPR0_PSFP)
- si->hw_features |= ENETC_SI_F_PSFP;
+ if (val & ENETC_SIPCAPR0_LSO)
+ si->hw_features |= ENETC_SI_F_LSO;
}
EXPORT_SYMBOL_GPL(enetc_get_si_caps);
@@ -2046,13 +2476,35 @@ static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
for (i = 0; i < si->num_rss; i++)
rss_table[i] = i % num_groups;
- enetc_set_rss_table(si, rss_table, si->num_rss);
+ si->ops->set_rss_table(si, rss_table, si->num_rss);
kfree(rss_table);
return 0;
}
+static void enetc_set_lso_flags_mask(struct enetc_hw *hw)
+{
+ enetc_wr(hw, ENETC4_SILSOSFMR0,
+ SILSOSFMR0_VAL_SET(ENETC4_TCP_NL_SEG_FLAGS_DMASK,
+ ENETC4_TCP_NL_SEG_FLAGS_DMASK));
+ enetc_wr(hw, ENETC4_SILSOSFMR1, 0);
+}
+
+static void enetc_set_rss(struct net_device *ndev, int en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 reg;
+
+ enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
+
+ reg = enetc_rd(hw, ENETC_SIMR);
+ reg &= ~ENETC_SIMR_RSSE;
+ reg |= (en) ? ENETC_SIMR_RSSE : 0;
+ enetc_wr(hw, ENETC_SIMR, reg);
+}
+
int enetc_configure_si(struct enetc_ndev_priv *priv)
{
struct enetc_si *si = priv->si;
@@ -2066,10 +2518,16 @@ int enetc_configure_si(struct enetc_ndev_priv *priv)
/* enable SI */
enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
+ if (si->hw_features & ENETC_SI_F_LSO)
+ enetc_set_lso_flags_mask(hw);
+
if (si->num_rss) {
err = enetc_setup_default_rss_table(si, priv->num_rx_rings);
if (err)
return err;
+
+ if (priv->ndev->features & NETIF_F_RXHASH)
+ enetc_set_rss(priv->ndev, true);
}
return 0;
@@ -2090,9 +2548,9 @@ void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
*/
priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings);
priv->num_tx_rings = si->num_tx_rings;
- priv->bdr_int_num = cpus;
+ priv->bdr_int_num = priv->num_rx_rings;
priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL;
- priv->tx_ictt = ENETC_TXIC_TIMETHR;
+ priv->tx_ictt = enetc_usecs_to_cycles(600, priv->sysclk_freq);
}
EXPORT_SYMBOL_GPL(enetc_init_si_rings_params);
@@ -2501,10 +2959,14 @@ int enetc_open(struct net_device *ndev)
extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
- err = enetc_setup_irqs(priv);
+ err = clk_prepare_enable(priv->ref_clk);
if (err)
return err;
+ err = enetc_setup_irqs(priv);
+ if (err)
+ goto err_setup_irqs;
+
err = enetc_phylink_connect(ndev);
if (err)
goto err_phy_connect;
@@ -2536,6 +2998,8 @@ err_alloc_tx:
phylink_disconnect_phy(priv->phylink);
err_phy_connect:
enetc_free_irqs(priv);
+err_setup_irqs:
+ clk_disable_unprepare(priv->ref_clk);
return err;
}
@@ -2589,6 +3053,7 @@ int enetc_close(struct net_device *ndev)
enetc_assign_tx_resources(priv, NULL);
enetc_free_irqs(priv);
+ clk_disable_unprepare(priv->ref_clk);
return 0;
}
@@ -2855,22 +3320,6 @@ struct net_device_stats *enetc_get_stats(struct net_device *ndev)
}
EXPORT_SYMBOL_GPL(enetc_get_stats);
-static int enetc_set_rss(struct net_device *ndev, int en)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_hw *hw = &priv->si->hw;
- u32 reg;
-
- enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
-
- reg = enetc_rd(hw, ENETC_SIMR);
- reg &= ~ENETC_SIMR_RSSE;
- reg |= (en) ? ENETC_SIMR_RSSE : 0;
- enetc_wr(hw, ENETC_SIMR, reg);
-
- return 0;
-}
-
static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -2908,16 +3357,17 @@ void enetc_set_features(struct net_device *ndev, netdev_features_t features)
}
EXPORT_SYMBOL_GPL(enetc_set_features);
-static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
+int enetc_hwtstamp_set(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
int err, new_offloads = priv->active_offloads;
- struct hwtstamp_config config;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ if (!enetc_ptp_clock_is_enabled(priv->si))
+ return -EOPNOTSUPP;
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
break;
@@ -2926,6 +3376,10 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
new_offloads |= ENETC_F_TX_TSTAMP;
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
+ if (!enetc_si_is_pf(priv->si) ||
+ enetc_is_pseudo_mac(priv->si))
+ return -EOPNOTSUPP;
+
new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
new_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP;
break;
@@ -2933,13 +3387,13 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
new_offloads &= ~ENETC_F_RX_TSTAMP;
break;
default:
new_offloads |= ENETC_F_RX_TSTAMP;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
}
if ((new_offloads ^ priv->active_offloads) & ENETC_F_RX_TSTAMP) {
@@ -2952,42 +3406,36 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
priv->active_offloads = new_offloads;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
+EXPORT_SYMBOL_GPL(enetc_hwtstamp_set);
-static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
+int enetc_hwtstamp_get(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct hwtstamp_config config;
- config.flags = 0;
+ if (!enetc_ptp_clock_is_enabled(priv->si))
+ return -EOPNOTSUPP;
if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)
- config.tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
+ config->tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
else if (priv->active_offloads & ENETC_F_TX_TSTAMP)
- config.tx_type = HWTSTAMP_TX_ON;
+ config->tx_type = HWTSTAMP_TX_ON;
else
- config.tx_type = HWTSTAMP_TX_OFF;
+ config->tx_type = HWTSTAMP_TX_OFF;
- config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
- HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+ config->rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
+EXPORT_SYMBOL_GPL(enetc_hwtstamp_get);
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK)) {
- if (cmd == SIOCSHWTSTAMP)
- return enetc_hwtstamp_set(ndev, rq);
- if (cmd == SIOCGHWTSTAMP)
- return enetc_hwtstamp_get(ndev, rq);
- }
-
if (!priv->phylink)
return -EOPNOTSUPP;
@@ -2995,13 +3443,100 @@ int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
}
EXPORT_SYMBOL_GPL(enetc_ioctl);
+static int enetc_int_vector_init(struct enetc_ndev_priv *priv, int i,
+ int v_tx_rings)
+{
+ struct enetc_int_vector *v;
+ struct enetc_bdr *bdr;
+ int j, err;
+
+ v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
+ if (!v)
+ return -ENOMEM;
+
+ priv->int_vector[i] = v;
+ bdr = &v->rx_ring;
+ bdr->index = i;
+ bdr->ndev = priv->ndev;
+ bdr->dev = priv->dev;
+ bdr->bd_count = priv->rx_bd_count;
+ bdr->buffer_offset = ENETC_RXB_PAD;
+ priv->rx_ring[i] = bdr;
+
+ err = __xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0,
+ ENETC_RXB_DMA_SIZE_XDP);
+ if (err)
+ goto free_vector;
+
+ err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq, MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err) {
+ xdp_rxq_info_unreg(&bdr->xdp.rxq);
+ goto free_vector;
+ }
+
+ /* init defaults for adaptive IC */
+ if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
+ v->rx_ictt = 0x1;
+ v->rx_dim_en = true;
+ }
+
+ INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
+ netif_napi_add(priv->ndev, &v->napi, enetc_poll);
+ v->count_tx_rings = v_tx_rings;
+
+ for (j = 0; j < v_tx_rings; j++) {
+ int idx;
+
+ /* default tx ring mapping policy */
+ idx = priv->bdr_int_num * j + i;
+ __set_bit(idx, &v->tx_rings_map);
+ bdr = &v->tx_ring[j];
+ bdr->index = idx;
+ bdr->ndev = priv->ndev;
+ bdr->dev = priv->dev;
+ bdr->bd_count = priv->tx_bd_count;
+ priv->tx_ring[idx] = bdr;
+ }
+
+ return 0;
+
+free_vector:
+ priv->rx_ring[i] = NULL;
+ priv->int_vector[i] = NULL;
+ kfree(v);
+
+ return err;
+}
+
+static void enetc_int_vector_destroy(struct enetc_ndev_priv *priv, int i)
+{
+ struct enetc_int_vector *v = priv->int_vector[i];
+ struct enetc_bdr *rx_ring = &v->rx_ring;
+ int j, tx_ring_index;
+
+ xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
+ xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
+ netif_napi_del(&v->napi);
+ cancel_work_sync(&v->rx_dim.work);
+
+ for (j = 0; j < v->count_tx_rings; j++) {
+ tx_ring_index = priv->bdr_int_num * j + i;
+ priv->tx_ring[tx_ring_index] = NULL;
+ }
+
+ priv->rx_ring[i] = NULL;
+ priv->int_vector[i] = NULL;
+ kfree(v);
+}
+
int enetc_alloc_msix(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
+ int v_tx_rings, v_remainder;
int num_stack_tx_queues;
int first_xdp_tx_ring;
int i, n, err, nvec;
- int v_tx_rings;
nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
/* allocate MSIX for both messaging and Rx/Tx interrupts */
@@ -3015,64 +3550,17 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
/* # of tx rings per int vector */
v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
+ v_remainder = priv->num_tx_rings % priv->bdr_int_num;
for (i = 0; i < priv->bdr_int_num; i++) {
- struct enetc_int_vector *v;
- struct enetc_bdr *bdr;
- int j;
-
- v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
- if (!v) {
- err = -ENOMEM;
- goto fail;
- }
-
- priv->int_vector[i] = v;
-
- bdr = &v->rx_ring;
- bdr->index = i;
- bdr->ndev = priv->ndev;
- bdr->dev = priv->dev;
- bdr->bd_count = priv->rx_bd_count;
- bdr->buffer_offset = ENETC_RXB_PAD;
- priv->rx_ring[i] = bdr;
-
- err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
- if (err) {
- kfree(v);
- goto fail;
- }
+ /* Distribute the remaining TX rings to the first v_remainder
+ * interrupt vectors
+ */
+ int num_tx_rings = i < v_remainder ? v_tx_rings + 1 : v_tx_rings;
- err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq,
- MEM_TYPE_PAGE_SHARED, NULL);
- if (err) {
- xdp_rxq_info_unreg(&bdr->xdp.rxq);
- kfree(v);
+ err = enetc_int_vector_init(priv, i, num_tx_rings);
+ if (err)
goto fail;
- }
-
- /* init defaults for adaptive IC */
- if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
- v->rx_ictt = 0x1;
- v->rx_dim_en = true;
- }
- INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
- netif_napi_add(priv->ndev, &v->napi, enetc_poll);
- v->count_tx_rings = v_tx_rings;
-
- for (j = 0; j < v_tx_rings; j++) {
- int idx;
-
- /* default tx ring mapping policy */
- idx = priv->bdr_int_num * j + i;
- __set_bit(idx, &v->tx_rings_map);
- bdr = &v->tx_ring[j];
- bdr->index = idx;
- bdr->ndev = priv->ndev;
- bdr->dev = priv->dev;
- bdr->bd_count = priv->tx_bd_count;
- priv->tx_ring[idx] = bdr;
- }
}
num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
@@ -3092,16 +3580,8 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
return 0;
fail:
- while (i--) {
- struct enetc_int_vector *v = priv->int_vector[i];
- struct enetc_bdr *rx_ring = &v->rx_ring;
-
- xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
- xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
- netif_napi_del(&v->napi);
- cancel_work_sync(&v->rx_dim.work);
- kfree(v);
- }
+ while (i--)
+ enetc_int_vector_destroy(priv, i);
pci_free_irq_vectors(pdev);
@@ -3113,26 +3593,8 @@ void enetc_free_msix(struct enetc_ndev_priv *priv)
{
int i;
- for (i = 0; i < priv->bdr_int_num; i++) {
- struct enetc_int_vector *v = priv->int_vector[i];
- struct enetc_bdr *rx_ring = &v->rx_ring;
-
- xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
- xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
- netif_napi_del(&v->napi);
- cancel_work_sync(&v->rx_dim.work);
- }
-
- for (i = 0; i < priv->num_rx_rings; i++)
- priv->rx_ring[i] = NULL;
-
- for (i = 0; i < priv->num_tx_rings; i++)
- priv->tx_ring[i] = NULL;
-
- for (i = 0; i < priv->bdr_int_num; i++) {
- kfree(priv->int_vector[i]);
- priv->int_vector[i] = NULL;
- }
+ for (i = 0; i < priv->bdr_int_num; i++)
+ enetc_int_vector_destroy(priv, i);
/* disable all MSIX for this device */
pci_free_irq_vectors(priv->si->pdev);
@@ -3241,5 +3703,75 @@ void enetc_pci_remove(struct pci_dev *pdev)
}
EXPORT_SYMBOL_GPL(enetc_pci_remove);
+static const struct enetc_drvdata enetc_pf_data = {
+ .sysclk_freq = ENETC_CLK_400M,
+ .pmac_offset = ENETC_PMAC_OFFSET,
+ .max_frags = ENETC_MAX_SKB_FRAGS,
+ .eth_ops = &enetc_pf_ethtool_ops,
+};
+
+static const struct enetc_drvdata enetc4_pf_data = {
+ .sysclk_freq = ENETC_CLK_333M,
+ .tx_csum = true,
+ .max_frags = ENETC4_MAX_SKB_FRAGS,
+ .pmac_offset = ENETC4_PMAC_OFFSET,
+ .eth_ops = &enetc4_pf_ethtool_ops,
+};
+
+static const struct enetc_drvdata enetc4_ppm_data = {
+ .sysclk_freq = ENETC_CLK_333M,
+ .tx_csum = true,
+ .max_frags = ENETC4_MAX_SKB_FRAGS,
+ .eth_ops = &enetc4_ppm_ethtool_ops,
+};
+
+static const struct enetc_drvdata enetc_vf_data = {
+ .sysclk_freq = ENETC_CLK_400M,
+ .max_frags = ENETC_MAX_SKB_FRAGS,
+ .eth_ops = &enetc_vf_ethtool_ops,
+};
+
+static const struct enetc_platform_info enetc_info[] = {
+ { .revision = ENETC_REV_1_0,
+ .dev_id = ENETC_DEV_ID_PF,
+ .data = &enetc_pf_data,
+ },
+ { .revision = ENETC_REV_4_1,
+ .dev_id = NXP_ENETC_PF_DEV_ID,
+ .data = &enetc4_pf_data,
+ },
+ { .revision = ENETC_REV_1_0,
+ .dev_id = ENETC_DEV_ID_VF,
+ .data = &enetc_vf_data,
+ },
+ {
+ .revision = ENETC_REV_4_3,
+ .dev_id = NXP_ENETC_PPM_DEV_ID,
+ .data = &enetc4_ppm_data,
+ },
+ { .revision = ENETC_REV_4_3,
+ .dev_id = NXP_ENETC_PF_DEV_ID,
+ .data = &enetc4_pf_data,
+ },
+};
+
+int enetc_get_driver_data(struct enetc_si *si)
+{
+ u16 dev_id = si->pdev->device;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(enetc_info); i++) {
+ if (si->revision == enetc_info[i].revision &&
+ dev_id == enetc_info[i].dev_id) {
+ si->drvdata = enetc_info[i].data;
+
+ return 0;
+ }
+ }
+
+ return -ERANGE;
+}
+EXPORT_SYMBOL_GPL(enetc_get_driver_data);
+
MODULE_DESCRIPTION("NXP ENETC Ethernet driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index fb7d98d57783..dce27bd67a7d 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -8,12 +8,14 @@
#include <linux/dma-mapping.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
+#include <linux/fsl/ntmp.h>
#include <linux/if_vlan.h>
#include <linux/phylink.h>
#include <linux/dim.h>
#include <net/xdp.h>
#include "enetc_hw.h"
+#include "enetc4_hw.h"
#define ENETC_MAC_MAXFRM_SIZE 9600
#define ENETC_MAX_MTU (ENETC_MAC_MAXFRM_SIZE - \
@@ -21,6 +23,18 @@
#define ENETC_CBD_DATA_MEM_ALIGN 64
+#define ENETC_MADDR_HASH_TBL_SZ 64
+
+enum enetc_mac_addr_type {UC, MC, MADDR_TYPE};
+
+struct enetc_mac_filter {
+ union {
+ char mac_addr[ETH_ALEN];
+ DECLARE_BITMAP(mac_hash_table, ENETC_MADDR_HASH_TBL_SZ);
+ };
+ int mac_addr_cnt;
+};
+
struct enetc_tx_swbd {
union {
struct sk_buff *skb;
@@ -40,8 +54,29 @@ struct enetc_tx_swbd {
u8 qbv_en:1;
};
+struct enetc_skb_cb {
+ u8 flag;
+ bool udp;
+ u16 correction_off;
+ u16 origin_tstamp_off;
+};
+
+#define ENETC_SKB_CB(skb) ((struct enetc_skb_cb *)((skb)->cb))
+
+struct enetc_lso_t {
+ bool ipv6;
+ bool tcp;
+ u8 l3_hdr_len;
+ u8 hdr_len; /* LSO header length */
+ u8 l3_start;
+ u16 lso_seg_size;
+ int total_len; /* total data length, not include LSO header */
+};
+
+#define ENETC_LSO_MAX_DATA_LEN SZ_256K
+
#define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE
-#define ENETC_RXB_TRUESIZE 2048 /* PAGE_SIZE >> 1 */
+#define ENETC_RXB_TRUESIZE (PAGE_SIZE >> 1)
#define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */
#define ENETC_RXB_DMA_SIZE \
(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD)
@@ -58,22 +93,29 @@ struct enetc_rx_swbd {
/* ENETC overhead: optional extension BD + 1 BD gap */
#define ENETC_TXBDS_NEEDED(val) ((val) + 2)
-/* max # of chained Tx BDs is 15, including head and extension BD */
+/* For LS1028A, max # of chained Tx BDs is 15, including head and
+ * extension BD.
+ */
#define ENETC_MAX_SKB_FRAGS 13
-#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
+/* For ENETC v4 and later versions, max # of chained Tx BDs is 63,
+ * including head and extension BD, but the range of MAX_SKB_FRAGS
+ * is 17 ~ 45, so set ENETC4_MAX_SKB_FRAGS to MAX_SKB_FRAGS.
+ */
+#define ENETC4_MAX_SKB_FRAGS MAX_SKB_FRAGS
+#define ENETC_TXBDS_MAX_NEEDED(x) ENETC_TXBDS_NEEDED((x) + 1)
struct enetc_ring_stats {
- unsigned int packets;
- unsigned int bytes;
- unsigned int rx_alloc_errs;
- unsigned int xdp_drops;
- unsigned int xdp_tx;
- unsigned int xdp_tx_drops;
- unsigned int xdp_redirect;
- unsigned int xdp_redirect_failures;
- unsigned int recycles;
- unsigned int recycle_failures;
- unsigned int win_drop;
+ unsigned long packets;
+ unsigned long bytes;
+ unsigned long rx_alloc_errs;
+ unsigned long xdp_drops;
+ unsigned long xdp_tx;
+ unsigned long xdp_tx_drops;
+ unsigned long xdp_redirect;
+ unsigned long xdp_redirect_failures;
+ unsigned long recycles;
+ unsigned long recycle_failures;
+ unsigned long win_drop;
};
struct enetc_xdp_data {
@@ -184,7 +226,7 @@ static inline union enetc_rx_bd *enetc_rxbd(struct enetc_bdr *rx_ring, int i)
{
int hw_idx = i;
- if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK) && rx_ring->ext_en)
+ if (rx_ring->ext_en)
hw_idx = 2 * i;
return &(((union enetc_rx_bd *)rx_ring->bd_base)[hw_idx]);
@@ -198,7 +240,7 @@ static inline void enetc_rxbd_next(struct enetc_bdr *rx_ring,
new_rxbd++;
- if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK) && rx_ring->ext_en)
+ if (rx_ring->ext_en)
new_rxbd++;
if (unlikely(++new_index == rx_ring->bd_count)) {
@@ -230,6 +272,35 @@ enum enetc_errata {
#define ENETC_SI_F_PSFP BIT(0)
#define ENETC_SI_F_QBV BIT(1)
#define ENETC_SI_F_QBU BIT(2)
+#define ENETC_SI_F_LSO BIT(3)
+#define ENETC_SI_F_PPM BIT(4) /* pseudo MAC */
+
+struct enetc_drvdata {
+ u32 pmac_offset; /* Only valid for PSI which supports 802.1Qbu */
+ u8 tx_csum:1;
+ u8 max_frags;
+ u64 sysclk_freq;
+ const struct ethtool_ops *eth_ops;
+};
+
+struct enetc_platform_info {
+ u16 revision;
+ u16 dev_id;
+ const struct enetc_drvdata *data;
+};
+
+struct enetc_si;
+
+/*
+ * This structure defines the some common hooks for ENETC PSI and VSI.
+ * In addition, since VSI only uses the struct enetc_si as its private
+ * driver data, so this structure also define some hooks specifically
+ * for VSI. For VSI-specific hooks, the format is ‘vf_*()’.
+ */
+struct enetc_si_ops {
+ int (*get_rss_table)(struct enetc_si *si, u32 *table, int count);
+ int (*set_rss_table)(struct enetc_si *si, const u32 *table, int count);
+};
/* PCI IEP device data */
struct enetc_si {
@@ -239,18 +310,33 @@ struct enetc_si {
struct net_device *ndev; /* back ref. */
- struct enetc_cbdr cbd_ring;
+ union {
+ struct enetc_cbdr cbd_ring; /* Only ENETC 1.0 */
+ struct ntmp_user ntmp_user; /* ENETC 4.1 and later */
+ };
int num_rx_rings; /* how many rings are available in the SI */
int num_tx_rings;
int num_fs_entries;
int num_rss; /* number of RSS buckets */
unsigned short pad;
+ u16 revision;
int hw_features;
+ const struct enetc_drvdata *drvdata;
+ const struct enetc_si_ops *ops;
+
+ struct workqueue_struct *workqueue;
+ struct work_struct rx_mode_task;
+ struct dentry *debugfs_root;
};
#define ENETC_SI_ALIGN 32
+static inline bool is_enetc_rev1(struct enetc_si *si)
+{
+ return si->pdev->revision == ENETC_REV1;
+}
+
static inline void *enetc_si_priv(const struct enetc_si *si)
{
return (char *)si + ALIGN(sizeof(struct enetc_si), ENETC_SI_ALIGN);
@@ -277,6 +363,11 @@ static inline int enetc_pf_to_port(struct pci_dev *pf_pdev)
}
}
+static inline bool enetc_is_pseudo_mac(struct enetc_si *si)
+{
+ return si->hw_features & ENETC_SI_F_PPM;
+}
+
#define ENETC_MAX_NUM_TXQS 8
#define ENETC_INT_NAME_MAX (IFNAMSIZ + 8)
@@ -302,7 +393,7 @@ struct enetc_cls_rule {
int used;
};
-#define ENETC_MAX_BDR_INT 2 /* fixed to max # of available cpus */
+#define ENETC_MAX_BDR_INT 6 /* fixed to max # of available cpus */
struct psfp_cap {
u32 max_streamid;
u32 max_psfp_filter;
@@ -321,6 +412,8 @@ enum enetc_active_offloads {
ENETC_F_QBV = BIT(9),
ENETC_F_QCI = BIT(10),
ENETC_F_QBU = BIT(11),
+ ENETC_F_TXCSUM = BIT(12),
+ ENETC_F_LSO = BIT(13),
};
enum enetc_flags_bit {
@@ -341,7 +434,6 @@ enum enetc_ic_mode {
#define ENETC_RXIC_PKTTHR min_t(u32, 256, ENETC_RX_RING_DEFAULT_SIZE / 2)
#define ENETC_TXIC_PKTTHR min_t(u32, 128, ENETC_TX_RING_DEFAULT_SIZE / 2)
-#define ENETC_TXIC_TIMETHR enetc_usecs_to_cycles(600)
struct enetc_ndev_priv {
struct net_device *ndev;
@@ -356,6 +448,7 @@ struct enetc_ndev_priv {
u16 msg_enable;
u8 preemptible_tcs;
+ u8 max_frags; /* The maximum number of BDs for fragments */
enum enetc_active_offloads active_offloads;
@@ -389,6 +482,9 @@ struct enetc_ndev_priv {
* and link state updates
*/
struct mutex mm_lock;
+
+ struct clk *ref_clk; /* RGMII/RMII reference clock */
+ u64 sysclk_freq; /* NETC system clock frequency */
};
/* Messaging */
@@ -403,9 +499,6 @@ struct enetc_msg_cmd_set_primary_mac {
#define ENETC_CBDR_TIMEOUT 1000 /* usecs */
-/* PTP driver exports */
-extern int enetc_phc_index;
-
/* SI common */
u32 enetc_port_mac_rd(struct enetc_si *si, u32 reg);
void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val);
@@ -418,6 +511,10 @@ void enetc_init_si_rings_params(struct enetc_ndev_priv *priv);
int enetc_alloc_si_resources(struct enetc_ndev_priv *priv);
void enetc_free_si_resources(struct enetc_ndev_priv *priv);
int enetc_configure_si(struct enetc_ndev_priv *priv);
+int enetc_get_driver_data(struct enetc_si *si);
+void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter,
+ const unsigned char *addr);
+void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter);
int enetc_open(struct net_device *ndev);
int enetc_close(struct net_device *ndev);
@@ -433,7 +530,18 @@ int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
struct xdp_frame **frames, u32 flags);
+int enetc_hwtstamp_get(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config);
+int enetc_hwtstamp_set(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+
/* ethtool */
+extern const struct ethtool_ops enetc_pf_ethtool_ops;
+extern const struct ethtool_ops enetc4_pf_ethtool_ops;
+extern const struct ethtool_ops enetc_vf_ethtool_ops;
+extern const struct ethtool_ops enetc4_ppm_ethtool_ops;
+
void enetc_set_ethtool_ops(struct net_device *ndev);
void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link);
void enetc_mm_commit_preemptible_tcs(struct enetc_ndev_priv *priv);
@@ -442,15 +550,19 @@ void enetc_mm_commit_preemptible_tcs(struct enetc_ndev_priv *priv);
int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
struct enetc_cbdr *cbdr);
void enetc_teardown_cbdr(struct enetc_cbdr *cbdr);
+int enetc4_setup_cbdr(struct enetc_si *si);
+void enetc4_teardown_cbdr(struct enetc_si *si);
int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
char *mac_addr, int si_map);
int enetc_clear_mac_flt_entry(struct enetc_si *si, int index);
int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
int index);
-void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes);
+void enetc_set_rss_key(struct enetc_si *si, const u8 *bytes);
int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count);
int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count);
int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd);
+int enetc4_get_rss_table(struct enetc_si *si, u32 *table, int count);
+int enetc4_set_rss_table(struct enetc_si *si, const u32 *table, int count);
static inline void *enetc_cbd_alloc_data_mem(struct enetc_si *si,
struct enetc_cbd *cbd,
@@ -491,6 +603,14 @@ static inline void enetc_cbd_free_data_mem(struct enetc_si *si, int size,
void enetc_reset_ptcmsdur(struct enetc_hw *hw);
void enetc_set_ptcmsdur(struct enetc_hw *hw, u32 *queue_max_sdu);
+static inline bool enetc_ptp_clock_is_enabled(struct enetc_si *si)
+{
+ if (is_enetc_rev1(si))
+ return IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK);
+
+ return IS_ENABLED(CONFIG_PTP_NETC_V4_TIMER);
+}
+
#ifdef CONFIG_FSL_ENETC_QOS
int enetc_qos_query_caps(struct net_device *ndev, void *type_data);
int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c
new file mode 100644
index 000000000000..1b1591dce73d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright 2025 NXP */
+
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/string_choices.h>
+
+#include "enetc_pf.h"
+#include "enetc4_debugfs.h"
+
+static void enetc_show_si_mac_hash_filter(struct seq_file *s, int i)
+{
+ struct enetc_si *si = s->private;
+ struct enetc_hw *hw = &si->hw;
+ u32 hash_h, hash_l;
+
+ hash_l = enetc_port_rd(hw, ENETC4_PSIUMHFR0(i));
+ hash_h = enetc_port_rd(hw, ENETC4_PSIUMHFR1(i));
+ seq_printf(s, "SI %d unicast MAC hash filter: 0x%08x%08x\n",
+ i, hash_h, hash_l);
+
+ hash_l = enetc_port_rd(hw, ENETC4_PSIMMHFR0(i));
+ hash_h = enetc_port_rd(hw, ENETC4_PSIMMHFR1(i));
+ seq_printf(s, "SI %d multicast MAC hash filter: 0x%08x%08x\n",
+ i, hash_h, hash_l);
+}
+
+static int enetc_mac_filter_show(struct seq_file *s, void *data)
+{
+ struct enetc_si *si = s->private;
+ struct enetc_hw *hw = &si->hw;
+ struct maft_entry_data maft;
+ struct enetc_pf *pf;
+ int i, err, num_si;
+ u32 val;
+
+ pf = enetc_si_priv(si);
+ num_si = pf->caps.num_vsi + 1;
+
+ val = enetc_port_rd(hw, ENETC4_PSIPMMR);
+ for (i = 0; i < num_si; i++) {
+ seq_printf(s, "SI %d Unicast Promiscuous mode: %s\n", i,
+ str_enabled_disabled(PSIPMMR_SI_MAC_UP(i) & val));
+ seq_printf(s, "SI %d Multicast Promiscuous mode: %s\n", i,
+ str_enabled_disabled(PSIPMMR_SI_MAC_MP(i) & val));
+ }
+
+ /* MAC hash filter table */
+ for (i = 0; i < num_si; i++)
+ enetc_show_si_mac_hash_filter(s, i);
+
+ if (!pf->num_mfe)
+ return 0;
+
+ /* MAC address filter table */
+ seq_puts(s, "MAC address filter table\n");
+ for (i = 0; i < pf->num_mfe; i++) {
+ memset(&maft, 0, sizeof(maft));
+ err = ntmp_maft_query_entry(&si->ntmp_user, i, &maft);
+ if (err)
+ return err;
+
+ seq_printf(s, "Entry %d, MAC: %pM, SI bitmap: 0x%04x\n", i,
+ maft.keye.mac_addr, le16_to_cpu(maft.cfge.si_bitmap));
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(enetc_mac_filter);
+
+void enetc_create_debugfs(struct enetc_si *si)
+{
+ struct net_device *ndev = si->ndev;
+ struct dentry *root;
+
+ root = debugfs_create_dir(netdev_name(ndev), NULL);
+ if (IS_ERR(root))
+ return;
+
+ si->debugfs_root = root;
+
+ debugfs_create_file("mac_filter", 0444, root, si, &enetc_mac_filter_fops);
+}
+
+void enetc_remove_debugfs(struct enetc_si *si)
+{
+ debugfs_remove(si->debugfs_root);
+ si->debugfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h
new file mode 100644
index 000000000000..96caca35f79d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2025 NXP */
+
+#ifndef __ENETC4_DEBUGFS_H
+#define __ENETC4_DEBUGFS_H
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+void enetc_create_debugfs(struct enetc_si *si);
+void enetc_remove_debugfs(struct enetc_si *si);
+#else
+static inline void enetc_create_debugfs(struct enetc_si *si)
+{
+}
+
+static inline void enetc_remove_debugfs(struct enetc_si *si)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
new file mode 100644
index 000000000000..3ed0f7a02767
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * This header file defines the register offsets and bit fields
+ * of ENETC4 PF and VFs. Note that the same registers as ENETC
+ * version 1.0 are defined in the enetc_hw.h file.
+ *
+ * Copyright 2024 NXP
+ */
+#ifndef __ENETC4_HW_H_
+#define __ENETC4_HW_H_
+
+#define NXP_ENETC_VENDOR_ID 0x1131
+#define NXP_ENETC_PF_DEV_ID 0xe101
+#define NXP_ENETC_PPM_DEV_ID 0xe110
+
+/**********************Station interface registers************************/
+/* Station interface LSO segmentation flag mask register 0/1 */
+#define ENETC4_SILSOSFMR0 0x1300
+#define SILSOSFMR0_TCP_MID_SEG GENMASK(27, 16)
+#define SILSOSFMR0_TCP_1ST_SEG GENMASK(11, 0)
+#define SILSOSFMR0_VAL_SET(first, mid) (FIELD_PREP(SILSOSFMR0_TCP_MID_SEG, mid) | \
+ FIELD_PREP(SILSOSFMR0_TCP_1ST_SEG, first))
+
+#define ENETC4_SILSOSFMR1 0x1304
+#define SILSOSFMR1_TCP_LAST_SEG GENMASK(11, 0)
+#define ENETC4_TCP_FLAGS_FIN BIT(0)
+#define ENETC4_TCP_FLAGS_SYN BIT(1)
+#define ENETC4_TCP_FLAGS_RST BIT(2)
+#define ENETC4_TCP_FLAGS_PSH BIT(3)
+#define ENETC4_TCP_FLAGS_ACK BIT(4)
+#define ENETC4_TCP_FLAGS_URG BIT(5)
+#define ENETC4_TCP_FLAGS_ECE BIT(6)
+#define ENETC4_TCP_FLAGS_CWR BIT(7)
+#define ENETC4_TCP_FLAGS_NS BIT(8)
+/* According to tso_build_hdr(), clear all special flags for not last packet. */
+#define ENETC4_TCP_NL_SEG_FLAGS_DMASK (ENETC4_TCP_FLAGS_FIN | \
+ ENETC4_TCP_FLAGS_RST | ENETC4_TCP_FLAGS_PSH)
+
+/***************************ENETC port registers**************************/
+#define ENETC4_ECAPR0 0x0
+#define ECAPR0_RFS BIT(2)
+#define ECAPR0_TSD BIT(5)
+#define ECAPR0_RSS BIT(8)
+#define ECAPR0_RSC BIT(9)
+#define ECAPR0_LSO BIT(10)
+#define ECAPR0_WO BIT(13)
+
+#define ENETC4_ECAPR1 0x4
+#define ECAPR1_NUM_TCS GENMASK(6, 4)
+#define ECAPR1_NUM_MCH GENMASK(9, 8)
+#define ECAPR1_NUM_UCH GENMASK(11, 10)
+#define ECAPR1_NUM_MSIX GENMASK(22, 12)
+#define ECAPR1_NUM_VSI GENMASK(27, 24)
+#define ECAPR1_NUM_IPV BIT(31)
+
+#define ENETC4_ECAPR2 0x8
+#define ECAPR2_NUM_TX_BDR GENMASK(9, 0)
+#define ECAPR2_NUM_RX_BDR GENMASK(25, 16)
+
+#define ENETC4_PMR 0x10
+#define PMR_SI_EN(a) BIT((16 + (a)))
+
+/* Port Pause ON/OFF threshold register */
+#define ENETC4_PPAUONTR 0x108
+#define ENETC4_PPAUOFFTR 0x10c
+
+/* Port Station interface promiscuous MAC mode register */
+#define ENETC4_PSIPMMR 0x200
+#define PSIPMMR_SI_MAC_UP(a) BIT(a) /* a = SI index */
+#define PSIPMMR_SI_MAC_MP(a) BIT((a) + 16)
+
+/* Port Station interface promiscuous VLAN mode register */
+#define ENETC4_PSIPVMR 0x204
+
+/* Port RSS key register n. n = 0,1,2,...,9 */
+#define ENETC4_PRSSKR(n) ((n) * 0x4 + 0x250)
+
+/* Port station interface MAC address filtering capability register */
+#define ENETC4_PSIMAFCAPR 0x280
+#define PSIMAFCAPR_NUM_MAC_AFTE GENMASK(11, 0)
+
+/* Port station interface VLAN filtering capability register */
+#define ENETC4_PSIVLANFCAPR 0x2c0
+#define PSIVLANFCAPR_NUM_VLAN_FTE GENMASK(11, 0)
+
+/* Port station interface VLAN filtering mode register */
+#define ENETC4_PSIVLANFMR 0x2c4
+#define PSIVLANFMR_VS BIT(0)
+
+/* Port Station interface a primary MAC address registers */
+#define ENETC4_PSIPMAR0(a) ((a) * 0x80 + 0x2000)
+#define ENETC4_PSIPMAR1(a) ((a) * 0x80 + 0x2004)
+
+/* Port station interface a configuration register 0/2 */
+#define ENETC4_PSICFGR0(a) ((a) * 0x80 + 0x2010)
+#define PSICFGR0_VASE BIT(13)
+#define PSICFGR0_ASE BIT(15)
+#define PSICFGR0_ANTI_SPOOFING (PSICFGR0_VASE | PSICFGR0_ASE)
+
+#define ENETC4_PSICFGR2(a) ((a) * 0x80 + 0x2018)
+#define PSICFGR2_NUM_MSIX GENMASK(5, 0)
+
+/* Port station interface a unicast MAC hash filter register 0/1 */
+#define ENETC4_PSIUMHFR0(a) ((a) * 0x80 + 0x2050)
+#define ENETC4_PSIUMHFR1(a) ((a) * 0x80 + 0x2054)
+
+/* Port station interface a multicast MAC hash filter register 0/1 */
+#define ENETC4_PSIMMHFR0(a) ((a) * 0x80 + 0x2058)
+#define ENETC4_PSIMMHFR1(a) ((a) * 0x80 + 0x205c)
+
+/* Port station interface a VLAN hash filter register 0/1 */
+#define ENETC4_PSIVHFR0(a) ((a) * 0x80 + 0x2060)
+#define ENETC4_PSIVHFR1(a) ((a) * 0x80 + 0x2064)
+
+#define ENETC4_PMCAPR 0x4004
+#define PMCAPR_HD BIT(8)
+#define PMCAPR_FP GENMASK(10, 9)
+
+/* Port capability register */
+#define ENETC4_PCAPR 0x4000
+#define PCAPR_LINK_TYPE BIT(4)
+
+/* Port configuration register */
+#define ENETC4_PCR 0x4010
+#define PCR_HDR_FMT BIT(0)
+#define PCR_L2DOSE BIT(4)
+#define PCR_TIMER_CS BIT(8)
+#define PCR_PSPEED GENMASK(29, 16)
+#define PCR_PSPEED_VAL(speed) (((speed) / 10 - 1) << 16)
+
+/* Port MAC address register 0/1 */
+#define ENETC4_PMAR0 0x4020
+#define ENETC4_PMAR1 0x4024
+
+/* Port operational register */
+#define ENETC4_POR 0x4100
+
+/* Port traffic class a transmit maximum SDU register */
+#define ENETC4_PTCTMSDUR(a) ((a) * 0x20 + 0x4208)
+#define PTCTMSDUR_MAXSDU GENMASK(15, 0)
+#define PTCTMSDUR_SDU_TYPE GENMASK(17, 16)
+#define SDU_TYPE_PPDU 0
+#define SDU_TYPE_MPDU 1
+#define SDU_TYPE_MSDU 2
+
+#define ENETC4_PMAC_OFFSET 0x400
+#define ENETC4_PM_CMD_CFG(mac) (0x5008 + (mac) * 0x400)
+#define PM_CMD_CFG_TX_EN BIT(0)
+#define PM_CMD_CFG_RX_EN BIT(1)
+#define PM_CMD_CFG_PAUSE_FWD BIT(7)
+#define PM_CMD_CFG_PAUSE_IGN BIT(8)
+#define PM_CMD_CFG_TX_ADDR_INS BIT(9)
+#define PM_CMD_CFG_LOOP_EN BIT(10)
+#define PM_CMD_CFG_LPBK_MODE GENMASK(12, 11)
+#define LPBCK_MODE_EXT_TX_CLK 0
+#define LPBCK_MODE_MAC_LEVEL 1
+#define LPBCK_MODE_INT_TX_CLK 2
+#define PM_CMD_CFG_CNT_FRM_EN BIT(13)
+#define PM_CMD_CFG_TXP BIT(15)
+#define PM_CMD_CFG_SEND_IDLE BIT(16)
+#define PM_CMD_CFG_HD_FCEN BIT(18)
+#define PM_CMD_CFG_SFD BIT(21)
+#define PM_CMD_CFG_TX_FLUSH BIT(22)
+#define PM_CMD_CFG_TX_LOWP_EN BIT(23)
+#define PM_CMD_CFG_RX_LOWP_EMPTY BIT(24)
+#define PM_CMD_CFG_SWR BIT(26)
+#define PM_CMD_CFG_TS_MODE BIT(30)
+#define PM_CMD_CFG_MG BIT(31)
+
+/* Port MAC 0/1 Maximum Frame Length Register */
+#define ENETC4_PM_MAXFRM(mac) (0x5014 + (mac) * 0x400)
+
+/* Port internal MDIO base address, use to access PCS */
+#define ENETC4_PM_IMDIO_BASE 0x5030
+
+/* Port MAC 0/1 Pause Quanta Register */
+#define ENETC4_PM_PAUSE_QUANTA(mac) (0x5054 + (mac) * 0x400)
+
+/* Port MAC 0/1 Pause Quanta Threshold Register */
+#define ENETC4_PM_PAUSE_THRESH(mac) (0x5064 + (mac) * 0x400)
+
+#define ENETC4_PM_SINGLE_STEP(mac) (0x50c0 + (mac) * 0x400)
+#define PM_SINGLE_STEP_CH BIT(6)
+#define PM_SINGLE_STEP_OFFSET GENMASK(15, 7)
+#define PM_SINGLE_STEP_OFFSET_SET(o) FIELD_PREP(PM_SINGLE_STEP_OFFSET, o)
+#define PM_SINGLE_STEP_EN BIT(31)
+
+/* Port MAC 0 Interface Mode Control Register */
+#define ENETC4_PM_IF_MODE(mac) (0x5300 + (mac) * 0x400)
+#define PM_IF_MODE_IFMODE GENMASK(2, 0)
+#define IFMODE_XGMII 0
+#define IFMODE_RMII 3
+#define IFMODE_RGMII 4
+#define IFMODE_SGMII 5
+#define PM_IF_MODE_REVMII BIT(3)
+#define PM_IF_MODE_M10 BIT(4)
+#define PM_IF_MODE_HD BIT(6)
+#define PM_IF_MODE_SSP GENMASK(14, 13)
+#define SSP_100M 0
+#define SSP_10M 1
+#define SSP_1G 2
+#define PM_IF_MODE_ENA BIT(15)
+
+/* Port external MDIO Base address, use to access off-chip PHY */
+#define ENETC4_EMDIO_BASE 0x5c00
+
+/**********************ENETC Pseudo MAC port registers************************/
+/* Port pseudo MAC receive octets counter (64-bit) */
+#define ENETC4_PPMROCR 0x5080
+
+/* Port pseudo MAC receive unicast frame counter register (64-bit) */
+#define ENETC4_PPMRUFCR 0x5088
+
+/* Port pseudo MAC receive multicast frame counter register (64-bit) */
+#define ENETC4_PPMRMFCR 0x5090
+
+/* Port pseudo MAC receive broadcast frame counter register (64-bit) */
+#define ENETC4_PPMRBFCR 0x5098
+
+/* Port pseudo MAC transmit octets counter (64-bit) */
+#define ENETC4_PPMTOCR 0x50c0
+
+/* Port pseudo MAC transmit unicast frame counter register (64-bit) */
+#define ENETC4_PPMTUFCR 0x50c8
+
+/* Port pseudo MAC transmit multicast frame counter register (64-bit) */
+#define ENETC4_PPMTMFCR 0x50d0
+
+/* Port pseudo MAC transmit broadcast frame counter register (64-bit) */
+#define ENETC4_PPMTBFCR 0x50d8
+
+#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
new file mode 100644
index 000000000000..498346dd996a
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
@@ -0,0 +1,1102 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2024 NXP */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/unaligned.h>
+
+#include "enetc_pf_common.h"
+#include "enetc4_debugfs.h"
+
+#define ENETC_SI_MAX_RING_NUM 8
+
+#define ENETC_MAC_FILTER_TYPE_UC BIT(0)
+#define ENETC_MAC_FILTER_TYPE_MC BIT(1)
+#define ENETC_MAC_FILTER_TYPE_ALL (ENETC_MAC_FILTER_TYPE_UC | \
+ ENETC_MAC_FILTER_TYPE_MC)
+
+struct enetc_mac_addr {
+ u8 addr[ETH_ALEN];
+};
+
+static void enetc4_get_port_caps(struct enetc_pf *pf)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+ u32 val;
+
+ val = enetc_port_rd(hw, ENETC4_ECAPR1);
+ pf->caps.num_vsi = (val & ECAPR1_NUM_VSI) >> 24;
+ pf->caps.num_msix = ((val & ECAPR1_NUM_MSIX) >> 12) + 1;
+
+ val = enetc_port_rd(hw, ENETC4_ECAPR2);
+ pf->caps.num_rx_bdr = (val & ECAPR2_NUM_RX_BDR) >> 16;
+ pf->caps.num_tx_bdr = val & ECAPR2_NUM_TX_BDR;
+
+ val = enetc_port_rd(hw, ENETC4_PMCAPR);
+ pf->caps.half_duplex = (val & PMCAPR_HD) ? 1 : 0;
+
+ val = enetc_port_rd(hw, ENETC4_PSIMAFCAPR);
+ pf->caps.mac_filter_num = val & PSIMAFCAPR_NUM_MAC_AFTE;
+}
+
+static void enetc4_get_psi_hw_features(struct enetc_si *si)
+{
+ struct enetc_hw *hw = &si->hw;
+ u32 val;
+
+ val = enetc_port_rd(hw, ENETC4_PCAPR);
+ if (val & PCAPR_LINK_TYPE)
+ si->hw_features |= ENETC_SI_F_PPM;
+}
+
+static void enetc4_pf_set_si_primary_mac(struct enetc_hw *hw, int si,
+ const u8 *addr)
+{
+ u16 lower = get_unaligned_le16(addr + 4);
+ u32 upper = get_unaligned_le32(addr);
+
+ if (si != 0) {
+ __raw_writel(upper, hw->port + ENETC4_PSIPMAR0(si));
+ __raw_writew(lower, hw->port + ENETC4_PSIPMAR1(si));
+ } else {
+ __raw_writel(upper, hw->port + ENETC4_PMAR0);
+ __raw_writew(lower, hw->port + ENETC4_PMAR1);
+ }
+}
+
+static void enetc4_pf_get_si_primary_mac(struct enetc_hw *hw, int si,
+ u8 *addr)
+{
+ u32 upper;
+ u16 lower;
+
+ upper = __raw_readl(hw->port + ENETC4_PSIPMAR0(si));
+ lower = __raw_readw(hw->port + ENETC4_PSIPMAR1(si));
+
+ put_unaligned_le32(upper, addr);
+ put_unaligned_le16(lower, addr + 4);
+}
+
+static void enetc4_pf_set_si_mac_promisc(struct enetc_hw *hw, int si,
+ bool uc_promisc, bool mc_promisc)
+{
+ u32 val = enetc_port_rd(hw, ENETC4_PSIPMMR);
+
+ if (uc_promisc)
+ val |= PSIPMMR_SI_MAC_UP(si);
+ else
+ val &= ~PSIPMMR_SI_MAC_UP(si);
+
+ if (mc_promisc)
+ val |= PSIPMMR_SI_MAC_MP(si);
+ else
+ val &= ~PSIPMMR_SI_MAC_MP(si);
+
+ enetc_port_wr(hw, ENETC4_PSIPMMR, val);
+}
+
+static void enetc4_pf_set_si_uc_hash_filter(struct enetc_hw *hw, int si,
+ u64 hash)
+{
+ enetc_port_wr(hw, ENETC4_PSIUMHFR0(si), lower_32_bits(hash));
+ enetc_port_wr(hw, ENETC4_PSIUMHFR1(si), upper_32_bits(hash));
+}
+
+static void enetc4_pf_set_si_mc_hash_filter(struct enetc_hw *hw, int si,
+ u64 hash)
+{
+ enetc_port_wr(hw, ENETC4_PSIMMHFR0(si), lower_32_bits(hash));
+ enetc_port_wr(hw, ENETC4_PSIMMHFR1(si), upper_32_bits(hash));
+}
+
+static void enetc4_pf_set_loopback(struct net_device *ndev, bool en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_si *si = priv->si;
+ u32 val;
+
+ val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
+ val = u32_replace_bits(val, en ? 1 : 0, PM_CMD_CFG_LOOP_EN);
+ /* Default to select MAC level loopback mode if loopback is enabled. */
+ val = u32_replace_bits(val, en ? LPBCK_MODE_MAC_LEVEL : 0,
+ PM_CMD_CFG_LPBK_MODE);
+
+ enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
+}
+
+static void enetc4_pf_clear_maft_entries(struct enetc_pf *pf)
+{
+ int i;
+
+ for (i = 0; i < pf->num_mfe; i++)
+ ntmp_maft_delete_entry(&pf->si->ntmp_user, i);
+
+ pf->num_mfe = 0;
+}
+
+static int enetc4_pf_add_maft_entries(struct enetc_pf *pf,
+ struct enetc_mac_addr *mac,
+ int mac_cnt)
+{
+ struct maft_entry_data maft = {};
+ u16 si_bit = BIT(0);
+ int i, err;
+
+ maft.cfge.si_bitmap = cpu_to_le16(si_bit);
+ for (i = 0; i < mac_cnt; i++) {
+ ether_addr_copy(maft.keye.mac_addr, mac[i].addr);
+ err = ntmp_maft_add_entry(&pf->si->ntmp_user, i, &maft);
+ if (unlikely(err)) {
+ pf->num_mfe = i;
+ goto clear_maft_entries;
+ }
+ }
+
+ pf->num_mfe = mac_cnt;
+
+ return 0;
+
+clear_maft_entries:
+ enetc4_pf_clear_maft_entries(pf);
+
+ return err;
+}
+
+static int enetc4_pf_set_uc_exact_filter(struct enetc_pf *pf)
+{
+ int max_num_mfe = pf->caps.mac_filter_num;
+ struct enetc_mac_filter mac_filter = {};
+ struct net_device *ndev = pf->si->ndev;
+ struct enetc_hw *hw = &pf->si->hw;
+ struct enetc_mac_addr *mac_tbl;
+ struct netdev_hw_addr *ha;
+ int i = 0, err;
+ int mac_cnt;
+
+ netif_addr_lock_bh(ndev);
+
+ mac_cnt = netdev_uc_count(ndev);
+ if (!mac_cnt) {
+ netif_addr_unlock_bh(ndev);
+ /* clear both MAC hash and exact filters */
+ enetc4_pf_set_si_uc_hash_filter(hw, 0, 0);
+ enetc4_pf_clear_maft_entries(pf);
+
+ return 0;
+ }
+
+ if (mac_cnt > max_num_mfe) {
+ err = -ENOSPC;
+ goto unlock_netif_addr;
+ }
+
+ mac_tbl = kcalloc(mac_cnt, sizeof(*mac_tbl), GFP_ATOMIC);
+ if (!mac_tbl) {
+ err = -ENOMEM;
+ goto unlock_netif_addr;
+ }
+
+ netdev_for_each_uc_addr(ha, ndev) {
+ enetc_add_mac_addr_ht_filter(&mac_filter, ha->addr);
+ ether_addr_copy(mac_tbl[i++].addr, ha->addr);
+ }
+
+ netif_addr_unlock_bh(ndev);
+
+ /* Set temporary unicast hash filters in case of Rx loss when
+ * updating MAC address filter table
+ */
+ enetc4_pf_set_si_uc_hash_filter(hw, 0, *mac_filter.mac_hash_table);
+ enetc4_pf_clear_maft_entries(pf);
+
+ if (!enetc4_pf_add_maft_entries(pf, mac_tbl, i))
+ enetc4_pf_set_si_uc_hash_filter(hw, 0, 0);
+
+ kfree(mac_tbl);
+
+ return 0;
+
+unlock_netif_addr:
+ netif_addr_unlock_bh(ndev);
+
+ return err;
+}
+
+static void enetc4_pf_set_mac_hash_filter(struct enetc_pf *pf, int type)
+{
+ struct net_device *ndev = pf->si->ndev;
+ struct enetc_mac_filter *mac_filter;
+ struct enetc_hw *hw = &pf->si->hw;
+ struct netdev_hw_addr *ha;
+
+ netif_addr_lock_bh(ndev);
+ if (type & ENETC_MAC_FILTER_TYPE_UC) {
+ mac_filter = &pf->mac_filter[UC];
+ enetc_reset_mac_addr_filter(mac_filter);
+ netdev_for_each_uc_addr(ha, ndev)
+ enetc_add_mac_addr_ht_filter(mac_filter, ha->addr);
+
+ enetc4_pf_set_si_uc_hash_filter(hw, 0,
+ *mac_filter->mac_hash_table);
+ }
+
+ if (type & ENETC_MAC_FILTER_TYPE_MC) {
+ mac_filter = &pf->mac_filter[MC];
+ enetc_reset_mac_addr_filter(mac_filter);
+ netdev_for_each_mc_addr(ha, ndev)
+ enetc_add_mac_addr_ht_filter(mac_filter, ha->addr);
+
+ enetc4_pf_set_si_mc_hash_filter(hw, 0,
+ *mac_filter->mac_hash_table);
+ }
+ netif_addr_unlock_bh(ndev);
+}
+
+static void enetc4_pf_set_mac_filter(struct enetc_pf *pf, int type)
+{
+ /* Currently, the MAC address filter table (MAFT) only has 4 entries,
+ * and multiple multicast addresses for filtering will be configured
+ * in the default network configuration, so MAFT is only suitable for
+ * unicast filtering. If the number of unicast addresses exceeds the
+ * table capacity, the MAC hash filter will be used.
+ */
+ if (type & ENETC_MAC_FILTER_TYPE_UC && enetc4_pf_set_uc_exact_filter(pf)) {
+ /* Fall back to the MAC hash filter */
+ enetc4_pf_set_mac_hash_filter(pf, ENETC_MAC_FILTER_TYPE_UC);
+ /* Clear the old MAC exact filter */
+ enetc4_pf_clear_maft_entries(pf);
+ }
+
+ if (type & ENETC_MAC_FILTER_TYPE_MC)
+ enetc4_pf_set_mac_hash_filter(pf, ENETC_MAC_FILTER_TYPE_MC);
+}
+
+static const struct enetc_pf_ops enetc4_pf_ops = {
+ .set_si_primary_mac = enetc4_pf_set_si_primary_mac,
+ .get_si_primary_mac = enetc4_pf_get_si_primary_mac,
+};
+
+static int enetc4_pf_struct_init(struct enetc_si *si)
+{
+ struct enetc_pf *pf = enetc_si_priv(si);
+
+ pf->si = si;
+ pf->total_vfs = pci_sriov_get_totalvfs(si->pdev);
+ pf->ops = &enetc4_pf_ops;
+
+ enetc4_get_port_caps(pf);
+ enetc4_get_psi_hw_features(si);
+
+ return 0;
+}
+
+static u32 enetc4_psicfgr0_val_construct(bool is_vf, u32 num_tx_bdr, u32 num_rx_bdr)
+{
+ u32 val;
+
+ val = ENETC_PSICFGR0_SET_TXBDR(num_tx_bdr);
+ val |= ENETC_PSICFGR0_SET_RXBDR(num_rx_bdr);
+ val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
+
+ if (is_vf)
+ val |= ENETC_PSICFGR0_VTE | ENETC_PSICFGR0_SIVIE;
+
+ return val;
+}
+
+static void enetc4_default_rings_allocation(struct enetc_pf *pf)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+ u32 num_rx_bdr, num_tx_bdr, val;
+ u32 vf_tx_bdr, vf_rx_bdr;
+ int i, rx_rem, tx_rem;
+
+ if (pf->caps.num_rx_bdr < ENETC_SI_MAX_RING_NUM + pf->caps.num_vsi)
+ num_rx_bdr = pf->caps.num_rx_bdr - pf->caps.num_vsi;
+ else
+ num_rx_bdr = ENETC_SI_MAX_RING_NUM;
+
+ if (pf->caps.num_tx_bdr < ENETC_SI_MAX_RING_NUM + pf->caps.num_vsi)
+ num_tx_bdr = pf->caps.num_tx_bdr - pf->caps.num_vsi;
+ else
+ num_tx_bdr = ENETC_SI_MAX_RING_NUM;
+
+ val = enetc4_psicfgr0_val_construct(false, num_tx_bdr, num_rx_bdr);
+ enetc_port_wr(hw, ENETC4_PSICFGR0(0), val);
+
+ num_rx_bdr = pf->caps.num_rx_bdr - num_rx_bdr;
+ rx_rem = num_rx_bdr % pf->caps.num_vsi;
+ num_rx_bdr = num_rx_bdr / pf->caps.num_vsi;
+
+ num_tx_bdr = pf->caps.num_tx_bdr - num_tx_bdr;
+ tx_rem = num_tx_bdr % pf->caps.num_vsi;
+ num_tx_bdr = num_tx_bdr / pf->caps.num_vsi;
+
+ for (i = 0; i < pf->caps.num_vsi; i++) {
+ vf_tx_bdr = (i < tx_rem) ? num_tx_bdr + 1 : num_tx_bdr;
+ vf_rx_bdr = (i < rx_rem) ? num_rx_bdr + 1 : num_rx_bdr;
+ val = enetc4_psicfgr0_val_construct(true, vf_tx_bdr, vf_rx_bdr);
+ enetc_port_wr(hw, ENETC4_PSICFGR0(i + 1), val);
+ }
+}
+
+static void enetc4_allocate_si_rings(struct enetc_pf *pf)
+{
+ enetc4_default_rings_allocation(pf);
+}
+
+static void enetc4_pf_set_si_vlan_promisc(struct enetc_hw *hw, int si, bool en)
+{
+ u32 val = enetc_port_rd(hw, ENETC4_PSIPVMR);
+
+ if (en)
+ val |= BIT(si);
+ else
+ val &= ~BIT(si);
+
+ enetc_port_wr(hw, ENETC4_PSIPVMR, val);
+}
+
+static void enetc4_set_default_si_vlan_promisc(struct enetc_pf *pf)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+ int num_si = pf->caps.num_vsi + 1;
+ int i;
+
+ /* enforce VLAN promiscuous mode for all SIs */
+ for (i = 0; i < num_si; i++)
+ enetc4_pf_set_si_vlan_promisc(hw, i, true);
+}
+
+/* Allocate the number of MSI-X vectors for per SI. */
+static void enetc4_set_si_msix_num(struct enetc_pf *pf)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+ int i, num_msix, total_si;
+ u32 val;
+
+ total_si = pf->caps.num_vsi + 1;
+
+ num_msix = pf->caps.num_msix / total_si +
+ pf->caps.num_msix % total_si - 1;
+ val = num_msix & PSICFGR2_NUM_MSIX;
+ enetc_port_wr(hw, ENETC4_PSICFGR2(0), val);
+
+ num_msix = pf->caps.num_msix / total_si - 1;
+ val = num_msix & PSICFGR2_NUM_MSIX;
+ for (i = 0; i < pf->caps.num_vsi; i++)
+ enetc_port_wr(hw, ENETC4_PSICFGR2(i + 1), val);
+}
+
+static void enetc4_enable_all_si(struct enetc_pf *pf)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+ int num_si = pf->caps.num_vsi + 1;
+ u32 si_bitmap = 0;
+ int i;
+
+ /* Master enable for all SIs */
+ for (i = 0; i < num_si; i++)
+ si_bitmap |= PMR_SI_EN(i);
+
+ enetc_port_wr(hw, ENETC4_PMR, si_bitmap);
+}
+
+static void enetc4_configure_port_si(struct enetc_pf *pf)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+
+ enetc4_allocate_si_rings(pf);
+
+ /* Outer VLAN tag will be used for VLAN filtering */
+ enetc_port_wr(hw, ENETC4_PSIVLANFMR, PSIVLANFMR_VS);
+
+ enetc4_set_default_si_vlan_promisc(pf);
+
+ /* Disable SI MAC multicast & unicast promiscuous */
+ enetc_port_wr(hw, ENETC4_PSIPMMR, 0);
+
+ enetc4_set_si_msix_num(pf);
+
+ enetc4_enable_all_si(pf);
+}
+
+static void enetc4_pf_reset_tc_msdu(struct enetc_hw *hw)
+{
+ u32 val = ENETC_MAC_MAXFRM_SIZE;
+ int tc;
+
+ val = u32_replace_bits(val, SDU_TYPE_MPDU, PTCTMSDUR_SDU_TYPE);
+
+ for (tc = 0; tc < ENETC_NUM_TC; tc++)
+ enetc_port_wr(hw, ENETC4_PTCTMSDUR(tc), val);
+}
+
+static void enetc4_set_trx_frame_size(struct enetc_pf *pf)
+{
+ struct enetc_si *si = pf->si;
+
+ enetc_port_mac_wr(si, ENETC4_PM_MAXFRM(0),
+ ENETC_SET_MAXFRM(ENETC_MAC_MAXFRM_SIZE));
+
+ enetc4_pf_reset_tc_msdu(&si->hw);
+}
+
+static void enetc4_enable_trx(struct enetc_pf *pf)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+
+ /* Enable port transmit/receive */
+ enetc_port_wr(hw, ENETC4_POR, 0);
+}
+
+static void enetc4_configure_port(struct enetc_pf *pf)
+{
+ enetc4_configure_port_si(pf);
+ enetc4_set_trx_frame_size(pf);
+ enetc_set_default_rss_key(pf);
+ enetc4_enable_trx(pf);
+}
+
+static int enetc4_init_ntmp_user(struct enetc_si *si)
+{
+ struct ntmp_user *user = &si->ntmp_user;
+
+ /* For ENETC 4.1, all table versions are 0 */
+ memset(&user->tbl, 0, sizeof(user->tbl));
+
+ return enetc4_setup_cbdr(si);
+}
+
+static void enetc4_free_ntmp_user(struct enetc_si *si)
+{
+ enetc4_teardown_cbdr(si);
+}
+
+static int enetc4_pf_init(struct enetc_pf *pf)
+{
+ struct device *dev = &pf->si->pdev->dev;
+ int err;
+
+ /* Initialize the MAC address for PF and VFs */
+ err = enetc_setup_mac_addresses(dev->of_node, pf);
+ if (err) {
+ dev_err(dev, "Failed to set MAC addresses\n");
+ return err;
+ }
+
+ err = enetc4_init_ntmp_user(pf->si);
+ if (err) {
+ dev_err(dev, "Failed to init CBDR\n");
+ return err;
+ }
+
+ enetc4_configure_port(pf);
+
+ return 0;
+}
+
+static void enetc4_pf_free(struct enetc_pf *pf)
+{
+ enetc4_free_ntmp_user(pf->si);
+}
+
+static void enetc4_psi_do_set_rx_mode(struct work_struct *work)
+{
+ struct enetc_si *si = container_of(work, struct enetc_si, rx_mode_task);
+ struct enetc_pf *pf = enetc_si_priv(si);
+ struct net_device *ndev = si->ndev;
+ struct enetc_hw *hw = &si->hw;
+ bool uc_promisc = false;
+ bool mc_promisc = false;
+ int type = 0;
+
+ rtnl_lock();
+
+ if (ndev->flags & IFF_PROMISC) {
+ uc_promisc = true;
+ mc_promisc = true;
+ } else if (ndev->flags & IFF_ALLMULTI) {
+ mc_promisc = true;
+ type = ENETC_MAC_FILTER_TYPE_UC;
+ } else {
+ type = ENETC_MAC_FILTER_TYPE_ALL;
+ }
+
+ enetc4_pf_set_si_mac_promisc(hw, 0, uc_promisc, mc_promisc);
+
+ if (uc_promisc) {
+ enetc4_pf_set_si_uc_hash_filter(hw, 0, 0);
+ enetc4_pf_clear_maft_entries(pf);
+ }
+
+ if (mc_promisc)
+ enetc4_pf_set_si_mc_hash_filter(hw, 0, 0);
+
+ /* Set new MAC filter */
+ enetc4_pf_set_mac_filter(pf, type);
+
+ rtnl_unlock();
+}
+
+static void enetc4_pf_set_rx_mode(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_si *si = priv->si;
+
+ queue_work(si->workqueue, &si->rx_mode_task);
+}
+
+static int enetc4_pf_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = ndev->features ^ features;
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ bool promisc_en = !(features & NETIF_F_HW_VLAN_CTAG_FILTER);
+
+ enetc4_pf_set_si_vlan_promisc(hw, 0, promisc_en);
+ }
+
+ if (changed & NETIF_F_LOOPBACK)
+ enetc4_pf_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
+
+ enetc_set_features(ndev, features);
+
+ return 0;
+}
+
+static const struct net_device_ops enetc4_ndev_ops = {
+ .ndo_open = enetc_open,
+ .ndo_stop = enetc_close,
+ .ndo_start_xmit = enetc_xmit,
+ .ndo_get_stats = enetc_get_stats,
+ .ndo_set_mac_address = enetc_pf_set_mac_addr,
+ .ndo_set_rx_mode = enetc4_pf_set_rx_mode,
+ .ndo_set_features = enetc4_pf_set_features,
+ .ndo_vlan_rx_add_vid = enetc_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = enetc_vlan_rx_del_vid,
+ .ndo_eth_ioctl = enetc_ioctl,
+ .ndo_hwtstamp_get = enetc_hwtstamp_get,
+ .ndo_hwtstamp_set = enetc_hwtstamp_set,
+};
+
+static struct phylink_pcs *
+enetc4_pl_mac_select_pcs(struct phylink_config *config, phy_interface_t iface)
+{
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+
+ return pf->pcs;
+}
+
+static void enetc4_mac_config(struct enetc_pf *pf, unsigned int mode,
+ phy_interface_t phy_mode)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(pf->si->ndev);
+ struct enetc_si *si = pf->si;
+ u32 val;
+
+ if (enetc_is_pseudo_mac(si))
+ return;
+
+ val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0));
+ val &= ~(PM_IF_MODE_IFMODE | PM_IF_MODE_ENA);
+
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ val |= IFMODE_RGMII;
+ /* We need to enable auto-negotiation for the MAC
+ * if its RGMII interface support In-Band status.
+ */
+ if (phylink_autoneg_inband(mode))
+ val |= PM_IF_MODE_ENA;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ val |= IFMODE_RMII;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ val |= IFMODE_SGMII;
+ break;
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_USXGMII:
+ val |= IFMODE_XGMII;
+ break;
+ default:
+ dev_err(priv->dev,
+ "Unsupported PHY mode:%d\n", phy_mode);
+ return;
+ }
+
+ enetc_port_mac_wr(si, ENETC4_PM_IF_MODE(0), val);
+}
+
+static void enetc4_pl_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+
+ enetc4_mac_config(pf, mode, state->interface);
+}
+
+static void enetc4_set_port_speed(struct enetc_ndev_priv *priv, int speed)
+{
+ u32 old_speed = priv->speed;
+ u32 val;
+
+ if (speed == old_speed)
+ return;
+
+ val = enetc_port_rd(&priv->si->hw, ENETC4_PCR);
+ val &= ~PCR_PSPEED;
+
+ switch (speed) {
+ case SPEED_100:
+ case SPEED_1000:
+ case SPEED_2500:
+ case SPEED_10000:
+ val |= (PCR_PSPEED & PCR_PSPEED_VAL(speed));
+ break;
+ case SPEED_10:
+ default:
+ val |= (PCR_PSPEED & PCR_PSPEED_VAL(SPEED_10));
+ }
+
+ priv->speed = speed;
+ enetc_port_wr(&priv->si->hw, ENETC4_PCR, val);
+}
+
+static void enetc4_set_rgmii_mac(struct enetc_pf *pf, int speed, int duplex)
+{
+ struct enetc_si *si = pf->si;
+ u32 old_val, val;
+
+ old_val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0));
+ val = old_val & ~(PM_IF_MODE_ENA | PM_IF_MODE_M10 | PM_IF_MODE_REVMII);
+
+ switch (speed) {
+ case SPEED_1000:
+ val = u32_replace_bits(val, SSP_1G, PM_IF_MODE_SSP);
+ break;
+ case SPEED_100:
+ val = u32_replace_bits(val, SSP_100M, PM_IF_MODE_SSP);
+ break;
+ case SPEED_10:
+ val = u32_replace_bits(val, SSP_10M, PM_IF_MODE_SSP);
+ }
+
+ val = u32_replace_bits(val, duplex == DUPLEX_FULL ? 0 : 1,
+ PM_IF_MODE_HD);
+
+ if (val == old_val)
+ return;
+
+ enetc_port_mac_wr(si, ENETC4_PM_IF_MODE(0), val);
+}
+
+static void enetc4_set_rmii_mac(struct enetc_pf *pf, int speed, int duplex)
+{
+ struct enetc_si *si = pf->si;
+ u32 old_val, val;
+
+ old_val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0));
+ val = old_val & ~(PM_IF_MODE_ENA | PM_IF_MODE_SSP);
+
+ switch (speed) {
+ case SPEED_100:
+ val &= ~PM_IF_MODE_M10;
+ break;
+ case SPEED_10:
+ val |= PM_IF_MODE_M10;
+ }
+
+ val = u32_replace_bits(val, duplex == DUPLEX_FULL ? 0 : 1,
+ PM_IF_MODE_HD);
+
+ if (val == old_val)
+ return;
+
+ enetc_port_mac_wr(si, ENETC4_PM_IF_MODE(0), val);
+}
+
+static void enetc4_set_hd_flow_control(struct enetc_pf *pf, bool enable)
+{
+ struct enetc_si *si = pf->si;
+ u32 old_val, val;
+
+ if (!pf->caps.half_duplex)
+ return;
+
+ old_val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
+ val = u32_replace_bits(old_val, enable ? 1 : 0, PM_CMD_CFG_HD_FCEN);
+ if (val == old_val)
+ return;
+
+ enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
+}
+
+static void enetc4_set_rx_pause(struct enetc_pf *pf, bool rx_pause)
+{
+ struct enetc_si *si = pf->si;
+ u32 old_val, val;
+
+ old_val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
+ val = u32_replace_bits(old_val, rx_pause ? 0 : 1, PM_CMD_CFG_PAUSE_IGN);
+ if (val == old_val)
+ return;
+
+ enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
+}
+
+static void enetc4_set_tx_pause(struct enetc_pf *pf, int num_rxbdr, bool tx_pause)
+{
+ u32 pause_off_thresh = 0, pause_on_thresh = 0;
+ u32 init_quanta = 0, refresh_quanta = 0;
+ struct enetc_hw *hw = &pf->si->hw;
+ u32 rbmr, old_rbmr;
+ int i;
+
+ for (i = 0; i < num_rxbdr; i++) {
+ old_rbmr = enetc_rxbdr_rd(hw, i, ENETC_RBMR);
+ rbmr = u32_replace_bits(old_rbmr, tx_pause ? 1 : 0, ENETC_RBMR_CM);
+ if (rbmr == old_rbmr)
+ continue;
+
+ enetc_rxbdr_wr(hw, i, ENETC_RBMR, rbmr);
+ }
+
+ if (tx_pause) {
+ /* When the port first enters congestion, send a PAUSE request
+ * with the maximum number of quanta. When the port exits
+ * congestion, it will automatically send a PAUSE frame with
+ * zero quanta.
+ */
+ init_quanta = 0xffff;
+
+ /* Also, set up the refresh timer to send follow-up PAUSE
+ * frames at half the quanta value, in case the congestion
+ * condition persists.
+ */
+ refresh_quanta = 0xffff / 2;
+
+ /* Start emitting PAUSE frames when 3 large frames (or more
+ * smaller frames) have accumulated in the FIFO waiting to be
+ * DMAed to the RX ring.
+ */
+ pause_on_thresh = 3 * ENETC_MAC_MAXFRM_SIZE;
+ pause_off_thresh = 1 * ENETC_MAC_MAXFRM_SIZE;
+ }
+
+ enetc_port_mac_wr(pf->si, ENETC4_PM_PAUSE_QUANTA(0), init_quanta);
+ enetc_port_mac_wr(pf->si, ENETC4_PM_PAUSE_THRESH(0), refresh_quanta);
+ enetc_port_wr(hw, ENETC4_PPAUONTR, pause_on_thresh);
+ enetc_port_wr(hw, ENETC4_PPAUOFFTR, pause_off_thresh);
+}
+
+static void enetc4_enable_mac(struct enetc_pf *pf, bool en)
+{
+ struct enetc_si *si = pf->si;
+ u32 val;
+
+ val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
+ val &= ~(PM_CMD_CFG_TX_EN | PM_CMD_CFG_RX_EN);
+ val |= en ? (PM_CMD_CFG_TX_EN | PM_CMD_CFG_RX_EN) : 0;
+
+ enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
+}
+
+static void enetc4_pl_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy, unsigned int mode,
+ phy_interface_t interface, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+ struct enetc_si *si = pf->si;
+ struct enetc_ndev_priv *priv;
+ bool hd_fc = false;
+
+ priv = netdev_priv(si->ndev);
+ enetc4_set_port_speed(priv, speed);
+
+ if (!phylink_autoneg_inband(mode) &&
+ phy_interface_mode_is_rgmii(interface))
+ enetc4_set_rgmii_mac(pf, speed, duplex);
+
+ if (interface == PHY_INTERFACE_MODE_RMII)
+ enetc4_set_rmii_mac(pf, speed, duplex);
+
+ if (duplex == DUPLEX_FULL) {
+ /* When preemption is enabled, generation of PAUSE frames
+ * must be disabled, as stated in the IEEE 802.3 standard.
+ */
+ if (priv->active_offloads & ENETC_F_QBU)
+ tx_pause = false;
+ } else { /* DUPLEX_HALF */
+ if (tx_pause || rx_pause)
+ hd_fc = true;
+
+ /* As per 802.3 annex 31B, PAUSE frames are only supported
+ * when the link is configured for full duplex operation.
+ */
+ tx_pause = false;
+ rx_pause = false;
+ }
+
+ enetc4_set_hd_flow_control(pf, hd_fc);
+ enetc4_set_tx_pause(pf, priv->num_rx_rings, tx_pause);
+ enetc4_set_rx_pause(pf, rx_pause);
+ enetc4_enable_mac(pf, true);
+}
+
+static void enetc4_pl_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+
+ enetc4_enable_mac(pf, false);
+}
+
+static const struct phylink_mac_ops enetc_pl_mac_ops = {
+ .mac_select_pcs = enetc4_pl_mac_select_pcs,
+ .mac_config = enetc4_pl_mac_config,
+ .mac_link_up = enetc4_pl_mac_link_up,
+ .mac_link_down = enetc4_pl_mac_link_down,
+};
+
+static void enetc4_pci_remove(void *data)
+{
+ struct pci_dev *pdev = data;
+
+ enetc_pci_remove(pdev);
+}
+
+static int enetc4_link_init(struct enetc_ndev_priv *priv,
+ struct device_node *node)
+{
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ struct device *dev = priv->dev;
+ int err;
+
+ err = of_get_phy_mode(node, &pf->if_mode);
+ if (err) {
+ dev_err(dev, "Failed to get PHY mode\n");
+ return err;
+ }
+
+ err = enetc_mdiobus_create(pf, node);
+ if (err) {
+ dev_err(dev, "Failed to create MDIO bus\n");
+ return err;
+ }
+
+ err = enetc_phylink_create(priv, node, &enetc_pl_mac_ops);
+ if (err) {
+ dev_err(dev, "Failed to create phylink\n");
+ goto err_phylink_create;
+ }
+
+ return 0;
+
+err_phylink_create:
+ enetc_mdiobus_destroy(pf);
+
+ return err;
+}
+
+static void enetc4_link_deinit(struct enetc_ndev_priv *priv)
+{
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+
+ enetc_phylink_destroy(priv);
+ enetc_mdiobus_destroy(pf);
+}
+
+static int enetc4_psi_wq_task_init(struct enetc_si *si)
+{
+ char wq_name[24];
+
+ INIT_WORK(&si->rx_mode_task, enetc4_psi_do_set_rx_mode);
+ snprintf(wq_name, sizeof(wq_name), "enetc-%s", pci_name(si->pdev));
+ si->workqueue = create_singlethread_workqueue(wq_name);
+ if (!si->workqueue)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int enetc4_pf_netdev_create(struct enetc_si *si)
+{
+ struct device *dev = &si->pdev->dev;
+ struct enetc_ndev_priv *priv;
+ struct net_device *ndev;
+ int err;
+
+ ndev = alloc_etherdev_mqs(sizeof(struct enetc_ndev_priv),
+ si->num_tx_rings, si->num_rx_rings);
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+ priv->ref_clk = devm_clk_get_optional(dev, "ref");
+ if (IS_ERR(priv->ref_clk)) {
+ dev_err(dev, "Get reference clock failed\n");
+ err = PTR_ERR(priv->ref_clk);
+ goto err_clk_get;
+ }
+
+ enetc_pf_netdev_setup(si, ndev, &enetc4_ndev_ops);
+
+ enetc_init_si_rings_params(priv);
+
+ err = enetc_configure_si(priv);
+ if (err) {
+ dev_err(dev, "Failed to configure SI\n");
+ goto err_config_si;
+ }
+
+ err = enetc_alloc_msix(priv);
+ if (err) {
+ dev_err(dev, "Failed to alloc MSI-X\n");
+ goto err_alloc_msix;
+ }
+
+ err = enetc4_link_init(priv, dev->of_node);
+ if (err)
+ goto err_link_init;
+
+ err = enetc4_psi_wq_task_init(si);
+ if (err) {
+ dev_err(dev, "Failed to init workqueue\n");
+ goto err_wq_init;
+ }
+
+ err = register_netdev(ndev);
+ if (err) {
+ dev_err(dev, "Failed to register netdev\n");
+ goto err_reg_netdev;
+ }
+
+ return 0;
+
+err_reg_netdev:
+ destroy_workqueue(si->workqueue);
+err_wq_init:
+ enetc4_link_deinit(priv);
+err_link_init:
+ enetc_free_msix(priv);
+err_alloc_msix:
+err_config_si:
+err_clk_get:
+ free_netdev(ndev);
+
+ return err;
+}
+
+static void enetc4_pf_netdev_destroy(struct enetc_si *si)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(si->ndev);
+ struct net_device *ndev = si->ndev;
+
+ unregister_netdev(ndev);
+ cancel_work(&si->rx_mode_task);
+ destroy_workqueue(si->workqueue);
+ enetc4_link_deinit(priv);
+ enetc_free_msix(priv);
+ free_netdev(ndev);
+}
+
+static const struct enetc_si_ops enetc4_psi_ops = {
+ .get_rss_table = enetc4_get_rss_table,
+ .set_rss_table = enetc4_set_rss_table,
+};
+
+static int enetc4_pf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct enetc_si *si;
+ struct enetc_pf *pf;
+ int err;
+
+ err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf));
+ if (err)
+ return dev_err_probe(dev, err, "PCIe probing failed\n");
+
+ err = devm_add_action_or_reset(dev, enetc4_pci_remove, pdev);
+ if (err)
+ return err;
+
+ /* si is the private data. */
+ si = pci_get_drvdata(pdev);
+ if (!si->hw.port || !si->hw.global)
+ return dev_err_probe(dev, -ENODEV,
+ "Couldn't map PF only space\n");
+
+ si->revision = enetc_get_ip_revision(&si->hw);
+ si->ops = &enetc4_psi_ops;
+ err = enetc_get_driver_data(si);
+ if (err)
+ return dev_err_probe(dev, err,
+ "Could not get PF driver data\n");
+
+ err = enetc4_pf_struct_init(si);
+ if (err)
+ return err;
+
+ pf = enetc_si_priv(si);
+ err = enetc4_pf_init(pf);
+ if (err)
+ return err;
+
+ enetc_get_si_caps(si);
+
+ err = enetc4_pf_netdev_create(si);
+ if (err)
+ goto err_netdev_create;
+
+ enetc_create_debugfs(si);
+
+ return 0;
+
+err_netdev_create:
+ enetc4_pf_free(pf);
+
+ return err;
+}
+
+static void enetc4_pf_remove(struct pci_dev *pdev)
+{
+ struct enetc_si *si = pci_get_drvdata(pdev);
+ struct enetc_pf *pf = enetc_si_priv(si);
+
+ enetc_remove_debugfs(si);
+ enetc4_pf_netdev_destroy(si);
+ enetc4_pf_free(pf);
+}
+
+static const struct pci_device_id enetc4_pf_id_table[] = {
+ { PCI_DEVICE(NXP_ENETC_VENDOR_ID, NXP_ENETC_PF_DEV_ID) },
+ { PCI_DEVICE(NXP_ENETC_VENDOR_ID, NXP_ENETC_PPM_DEV_ID) },
+ { 0, } /* End of table. */
+};
+MODULE_DEVICE_TABLE(pci, enetc4_pf_id_table);
+
+static struct pci_driver enetc4_pf_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = enetc4_pf_id_table,
+ .probe = enetc4_pf_probe,
+ .remove = enetc4_pf_remove,
+};
+module_pci_driver(enetc4_pf_driver);
+
+MODULE_DESCRIPTION("ENETC4 PF Driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
index 20bfdf7fb4b4..3d5f31879d5c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
@@ -60,6 +60,44 @@ void enetc_teardown_cbdr(struct enetc_cbdr *cbdr)
}
EXPORT_SYMBOL_GPL(enetc_teardown_cbdr);
+int enetc4_setup_cbdr(struct enetc_si *si)
+{
+ struct ntmp_user *user = &si->ntmp_user;
+ struct device *dev = &si->pdev->dev;
+ struct enetc_hw *hw = &si->hw;
+ struct netc_cbdr_regs regs;
+
+ user->cbdr_num = 1;
+ user->dev = dev;
+ user->ring = devm_kcalloc(dev, user->cbdr_num,
+ sizeof(struct netc_cbdr), GFP_KERNEL);
+ if (!user->ring)
+ return -ENOMEM;
+
+ /* set CBDR cache attributes */
+ enetc_wr(hw, ENETC_SICAR2,
+ ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
+
+ regs.pir = hw->reg + ENETC_SICBDRPIR;
+ regs.cir = hw->reg + ENETC_SICBDRCIR;
+ regs.mr = hw->reg + ENETC_SICBDRMR;
+ regs.bar0 = hw->reg + ENETC_SICBDRBAR0;
+ regs.bar1 = hw->reg + ENETC_SICBDRBAR1;
+ regs.lenr = hw->reg + ENETC_SICBDRLENR;
+
+ return ntmp_init_cbdr(user->ring, dev, &regs);
+}
+EXPORT_SYMBOL_GPL(enetc4_setup_cbdr);
+
+void enetc4_teardown_cbdr(struct enetc_si *si)
+{
+ struct ntmp_user *user = &si->ntmp_user;
+
+ ntmp_free_cbdr(user->ring);
+ user->dev = NULL;
+}
+EXPORT_SYMBOL_GPL(enetc4_teardown_cbdr);
+
static void enetc_clean_cbdr(struct enetc_cbdr *ring)
{
struct enetc_cbd *dest_cbd;
@@ -256,3 +294,15 @@ int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count)
return enetc_cmd_rss_table(si, (u32 *)table, count, false);
}
EXPORT_SYMBOL_GPL(enetc_set_rss_table);
+
+int enetc4_get_rss_table(struct enetc_si *si, u32 *table, int count)
+{
+ return ntmp_rsst_query_entry(&si->ntmp_user, table, count);
+}
+EXPORT_SYMBOL_GPL(enetc4_get_rss_table);
+
+int enetc4_set_rss_table(struct enetc_si *si, const u32 *table, int count)
+{
+ return ntmp_rsst_update_entry(&si->ntmp_user, table, count);
+}
+EXPORT_SYMBOL_GPL(enetc4_set_rss_table);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index e1745b89362d..fed89d4f1e1d 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -4,6 +4,9 @@
#include <linux/ethtool_netlink.h>
#include <linux/net_tstamp.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/ptp_clock_kernel.h>
+
#include "enetc.h"
static const u32 enetc_si_regs[] = {
@@ -141,8 +144,8 @@ static const struct {
static const struct {
int reg;
- char name[ETH_GSTRING_LEN];
-} enetc_port_counters[] = {
+ char name[ETH_GSTRING_LEN] __nonstring;
+} enetc_pm_counters[] = {
{ ENETC_PM_REOCT(0), "MAC rx ethernet octets" },
{ ENETC_PM_RALN(0), "MAC rx alignment errors" },
{ ENETC_PM_RXPF(0), "MAC rx valid pause frames" },
@@ -194,6 +197,12 @@ static const struct {
{ ENETC_PM_TSCOL(0), "MAC tx single collisions" },
{ ENETC_PM_TLCOL(0), "MAC tx late collisions" },
{ ENETC_PM_TECOL(0), "MAC tx excessive collisions" },
+};
+
+static const struct {
+ int reg;
+ char name[ETH_GSTRING_LEN] __nonstring;
+} enetc_port_counters[] = {
{ ENETC_UFDMF, "SI MAC nomatch u-cast discards" },
{ ENETC_MFDMF, "SI MAC nomatch m-cast discards" },
{ ENETC_PBFDSIR, "SI MAC nomatch b-cast discards" },
@@ -240,6 +249,7 @@ static int enetc_get_sset_count(struct net_device *ndev, int sset)
return len;
len += ARRAY_SIZE(enetc_port_counters);
+ len += ARRAY_SIZE(enetc_pm_counters);
return len;
}
@@ -264,7 +274,10 @@ static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
break;
for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++)
- ethtool_puts(&data, enetc_port_counters[i].name);
+ ethtool_cpy(&data, enetc_port_counters[i].name);
+
+ for (i = 0; i < ARRAY_SIZE(enetc_pm_counters); i++)
+ ethtool_cpy(&data, enetc_pm_counters[i].name);
break;
}
@@ -302,13 +315,16 @@ static void enetc_get_ethtool_stats(struct net_device *ndev,
for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++)
data[o++] = enetc_port_rd(hw, enetc_port_counters[i].reg);
+
+ for (i = 0; i < ARRAY_SIZE(enetc_pm_counters); i++)
+ data[o++] = enetc_port_rd64(hw, enetc_pm_counters[i].reg);
}
static void enetc_pause_stats(struct enetc_hw *hw, int mac,
struct ethtool_pause_stats *pause_stats)
{
- pause_stats->tx_pause_frames = enetc_port_rd(hw, ENETC_PM_TXPF(mac));
- pause_stats->rx_pause_frames = enetc_port_rd(hw, ENETC_PM_RXPF(mac));
+ pause_stats->tx_pause_frames = enetc_port_rd64(hw, ENETC_PM_TXPF(mac));
+ pause_stats->rx_pause_frames = enetc_port_rd64(hw, ENETC_PM_RXPF(mac));
}
static void enetc_get_pause_stats(struct net_device *ndev,
@@ -335,31 +351,31 @@ static void enetc_get_pause_stats(struct net_device *ndev,
static void enetc_mac_stats(struct enetc_hw *hw, int mac,
struct ethtool_eth_mac_stats *s)
{
- s->FramesTransmittedOK = enetc_port_rd(hw, ENETC_PM_TFRM(mac));
- s->SingleCollisionFrames = enetc_port_rd(hw, ENETC_PM_TSCOL(mac));
- s->MultipleCollisionFrames = enetc_port_rd(hw, ENETC_PM_TMCOL(mac));
- s->FramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RFRM(mac));
- s->FrameCheckSequenceErrors = enetc_port_rd(hw, ENETC_PM_RFCS(mac));
- s->AlignmentErrors = enetc_port_rd(hw, ENETC_PM_RALN(mac));
- s->OctetsTransmittedOK = enetc_port_rd(hw, ENETC_PM_TEOCT(mac));
- s->FramesWithDeferredXmissions = enetc_port_rd(hw, ENETC_PM_TDFR(mac));
- s->LateCollisions = enetc_port_rd(hw, ENETC_PM_TLCOL(mac));
- s->FramesAbortedDueToXSColls = enetc_port_rd(hw, ENETC_PM_TECOL(mac));
- s->FramesLostDueToIntMACXmitError = enetc_port_rd(hw, ENETC_PM_TERR(mac));
- s->CarrierSenseErrors = enetc_port_rd(hw, ENETC_PM_TCRSE(mac));
- s->OctetsReceivedOK = enetc_port_rd(hw, ENETC_PM_REOCT(mac));
- s->FramesLostDueToIntMACRcvError = enetc_port_rd(hw, ENETC_PM_RDRNTP(mac));
- s->MulticastFramesXmittedOK = enetc_port_rd(hw, ENETC_PM_TMCA(mac));
- s->BroadcastFramesXmittedOK = enetc_port_rd(hw, ENETC_PM_TBCA(mac));
- s->MulticastFramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RMCA(mac));
- s->BroadcastFramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RBCA(mac));
+ s->FramesTransmittedOK = enetc_port_rd64(hw, ENETC_PM_TFRM(mac));
+ s->SingleCollisionFrames = enetc_port_rd64(hw, ENETC_PM_TSCOL(mac));
+ s->MultipleCollisionFrames = enetc_port_rd64(hw, ENETC_PM_TMCOL(mac));
+ s->FramesReceivedOK = enetc_port_rd64(hw, ENETC_PM_RFRM(mac));
+ s->FrameCheckSequenceErrors = enetc_port_rd64(hw, ENETC_PM_RFCS(mac));
+ s->AlignmentErrors = enetc_port_rd64(hw, ENETC_PM_RALN(mac));
+ s->OctetsTransmittedOK = enetc_port_rd64(hw, ENETC_PM_TEOCT(mac));
+ s->FramesWithDeferredXmissions = enetc_port_rd64(hw, ENETC_PM_TDFR(mac));
+ s->LateCollisions = enetc_port_rd64(hw, ENETC_PM_TLCOL(mac));
+ s->FramesAbortedDueToXSColls = enetc_port_rd64(hw, ENETC_PM_TECOL(mac));
+ s->FramesLostDueToIntMACXmitError = enetc_port_rd64(hw, ENETC_PM_TERR(mac));
+ s->CarrierSenseErrors = enetc_port_rd64(hw, ENETC_PM_TCRSE(mac));
+ s->OctetsReceivedOK = enetc_port_rd64(hw, ENETC_PM_REOCT(mac));
+ s->FramesLostDueToIntMACRcvError = enetc_port_rd64(hw, ENETC_PM_RDRNTP(mac));
+ s->MulticastFramesXmittedOK = enetc_port_rd64(hw, ENETC_PM_TMCA(mac));
+ s->BroadcastFramesXmittedOK = enetc_port_rd64(hw, ENETC_PM_TBCA(mac));
+ s->MulticastFramesReceivedOK = enetc_port_rd64(hw, ENETC_PM_RMCA(mac));
+ s->BroadcastFramesReceivedOK = enetc_port_rd64(hw, ENETC_PM_RBCA(mac));
}
static void enetc_ctrl_stats(struct enetc_hw *hw, int mac,
struct ethtool_eth_ctrl_stats *s)
{
- s->MACControlFramesTransmitted = enetc_port_rd(hw, ENETC_PM_TCNP(mac));
- s->MACControlFramesReceived = enetc_port_rd(hw, ENETC_PM_RCNP(mac));
+ s->MACControlFramesTransmitted = enetc_port_rd64(hw, ENETC_PM_TCNP(mac));
+ s->MACControlFramesReceived = enetc_port_rd64(hw, ENETC_PM_RCNP(mac));
}
static const struct ethtool_rmon_hist_range enetc_rmon_ranges[] = {
@@ -376,26 +392,26 @@ static const struct ethtool_rmon_hist_range enetc_rmon_ranges[] = {
static void enetc_rmon_stats(struct enetc_hw *hw, int mac,
struct ethtool_rmon_stats *s)
{
- s->undersize_pkts = enetc_port_rd(hw, ENETC_PM_RUND(mac));
- s->oversize_pkts = enetc_port_rd(hw, ENETC_PM_ROVR(mac));
- s->fragments = enetc_port_rd(hw, ENETC_PM_RFRG(mac));
- s->jabbers = enetc_port_rd(hw, ENETC_PM_RJBR(mac));
-
- s->hist[0] = enetc_port_rd(hw, ENETC_PM_R64(mac));
- s->hist[1] = enetc_port_rd(hw, ENETC_PM_R127(mac));
- s->hist[2] = enetc_port_rd(hw, ENETC_PM_R255(mac));
- s->hist[3] = enetc_port_rd(hw, ENETC_PM_R511(mac));
- s->hist[4] = enetc_port_rd(hw, ENETC_PM_R1023(mac));
- s->hist[5] = enetc_port_rd(hw, ENETC_PM_R1522(mac));
- s->hist[6] = enetc_port_rd(hw, ENETC_PM_R1523X(mac));
-
- s->hist_tx[0] = enetc_port_rd(hw, ENETC_PM_T64(mac));
- s->hist_tx[1] = enetc_port_rd(hw, ENETC_PM_T127(mac));
- s->hist_tx[2] = enetc_port_rd(hw, ENETC_PM_T255(mac));
- s->hist_tx[3] = enetc_port_rd(hw, ENETC_PM_T511(mac));
- s->hist_tx[4] = enetc_port_rd(hw, ENETC_PM_T1023(mac));
- s->hist_tx[5] = enetc_port_rd(hw, ENETC_PM_T1522(mac));
- s->hist_tx[6] = enetc_port_rd(hw, ENETC_PM_T1523X(mac));
+ s->undersize_pkts = enetc_port_rd64(hw, ENETC_PM_RUND(mac));
+ s->oversize_pkts = enetc_port_rd64(hw, ENETC_PM_ROVR(mac));
+ s->fragments = enetc_port_rd64(hw, ENETC_PM_RFRG(mac));
+ s->jabbers = enetc_port_rd64(hw, ENETC_PM_RJBR(mac));
+
+ s->hist[0] = enetc_port_rd64(hw, ENETC_PM_R64(mac));
+ s->hist[1] = enetc_port_rd64(hw, ENETC_PM_R127(mac));
+ s->hist[2] = enetc_port_rd64(hw, ENETC_PM_R255(mac));
+ s->hist[3] = enetc_port_rd64(hw, ENETC_PM_R511(mac));
+ s->hist[4] = enetc_port_rd64(hw, ENETC_PM_R1023(mac));
+ s->hist[5] = enetc_port_rd64(hw, ENETC_PM_R1522(mac));
+ s->hist[6] = enetc_port_rd64(hw, ENETC_PM_R1523X(mac));
+
+ s->hist_tx[0] = enetc_port_rd64(hw, ENETC_PM_T64(mac));
+ s->hist_tx[1] = enetc_port_rd64(hw, ENETC_PM_T127(mac));
+ s->hist_tx[2] = enetc_port_rd64(hw, ENETC_PM_T255(mac));
+ s->hist_tx[3] = enetc_port_rd64(hw, ENETC_PM_T511(mac));
+ s->hist_tx[4] = enetc_port_rd64(hw, ENETC_PM_T1023(mac));
+ s->hist_tx[5] = enetc_port_rd64(hw, ENETC_PM_T1522(mac));
+ s->hist_tx[6] = enetc_port_rd64(hw, ENETC_PM_T1523X(mac));
}
static void enetc_get_eth_mac_stats(struct net_device *ndev,
@@ -419,6 +435,48 @@ static void enetc_get_eth_mac_stats(struct net_device *ndev,
}
}
+static void enetc_ppm_mac_stats(struct enetc_si *si,
+ struct ethtool_eth_mac_stats *s)
+{
+ struct enetc_hw *hw = &si->hw;
+ u64 rufcr, rmfcr, rbfcr;
+ u64 tufcr, tmfcr, tbfcr;
+
+ rufcr = enetc_port_rd64(hw, ENETC4_PPMRUFCR);
+ rmfcr = enetc_port_rd64(hw, ENETC4_PPMRMFCR);
+ rbfcr = enetc_port_rd64(hw, ENETC4_PPMRBFCR);
+
+ tufcr = enetc_port_rd64(hw, ENETC4_PPMTUFCR);
+ tmfcr = enetc_port_rd64(hw, ENETC4_PPMTMFCR);
+ tbfcr = enetc_port_rd64(hw, ENETC4_PPMTBFCR);
+
+ s->FramesTransmittedOK = tufcr + tmfcr + tbfcr;
+ s->FramesReceivedOK = rufcr + rmfcr + rbfcr;
+ s->OctetsTransmittedOK = enetc_port_rd64(hw, ENETC4_PPMTOCR);
+ s->OctetsReceivedOK = enetc_port_rd64(hw, ENETC4_PPMROCR);
+ s->MulticastFramesXmittedOK = tmfcr;
+ s->BroadcastFramesXmittedOK = tbfcr;
+ s->MulticastFramesReceivedOK = rmfcr;
+ s->BroadcastFramesReceivedOK = rbfcr;
+}
+
+static void enetc_ppm_get_eth_mac_stats(struct net_device *ndev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ switch (mac_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ enetc_ppm_mac_stats(priv->si, mac_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ ethtool_aggregate_mac_stats(ndev, mac_stats);
+ break;
+ }
+}
+
static void enetc_get_eth_ctrl_stats(struct net_device *ndev,
struct ethtool_eth_ctrl_stats *ctrl_stats)
{
@@ -467,7 +525,8 @@ static void enetc_get_rmon_stats(struct net_device *ndev,
#define ENETC_RSSHASH_L3 (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO | RXH_IP_SRC | \
RXH_IP_DST)
#define ENETC_RSSHASH_L4 (ENETC_RSSHASH_L3 | RXH_L4_B_0_1 | RXH_L4_B_2_3)
-static int enetc_get_rsshash(struct ethtool_rxnfc *rxnfc)
+static int enetc_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *rxnfc)
{
static const u32 rsshash[] = {
[TCP_V4_FLOW] = ENETC_RSSHASH_L4,
@@ -574,6 +633,13 @@ done:
return enetc_set_fs_entry(si, &rfse, fs->location);
}
+static u32 enetc_get_rx_ring_count(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ return priv->num_rx_rings;
+}
+
static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc,
u32 *rule_locs)
{
@@ -581,12 +647,6 @@ static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc,
int i, j;
switch (rxnfc->cmd) {
- case ETHTOOL_GRXRINGS:
- rxnfc->data = priv->num_rx_rings;
- break;
- case ETHTOOL_GRXFH:
- /* get RSS hash config */
- return enetc_get_rsshash(rxnfc);
case ETHTOOL_GRXCLSRLCNT:
/* total number of entries */
rxnfc->data = priv->si->num_fs_entries;
@@ -677,36 +737,53 @@ static u32 enetc_get_rxfh_indir_size(struct net_device *ndev)
return priv->si->num_rss;
}
+static int enetc_get_rss_key_base(struct enetc_si *si)
+{
+ if (is_enetc_rev1(si))
+ return ENETC_PRSSK(0);
+
+ return ENETC4_PRSSKR(0);
+}
+
+static void enetc_get_rss_key(struct enetc_si *si, const u8 *key)
+{
+ int base = enetc_get_rss_key_base(si);
+ struct enetc_hw *hw = &si->hw;
+ int i;
+
+ for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
+ ((u32 *)key)[i] = enetc_port_rd(hw, base + i * 4);
+}
+
static int enetc_get_rxfh(struct net_device *ndev,
struct ethtool_rxfh_param *rxfh)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_hw *hw = &priv->si->hw;
- int err = 0, i;
+ struct enetc_si *si = priv->si;
+ int err = 0;
/* return hash function */
rxfh->hfunc = ETH_RSS_HASH_TOP;
/* return hash key */
- if (rxfh->key && hw->port)
- for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
- ((u32 *)rxfh->key)[i] = enetc_port_rd(hw,
- ENETC_PRSSK(i));
+ if (rxfh->key && enetc_si_is_pf(si))
+ enetc_get_rss_key(si, rxfh->key);
/* return RSS table */
if (rxfh->indir)
- err = enetc_get_rss_table(priv->si, rxfh->indir,
- priv->si->num_rss);
+ err = si->ops->get_rss_table(si, rxfh->indir, si->num_rss);
return err;
}
-void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes)
+void enetc_set_rss_key(struct enetc_si *si, const u8 *bytes)
{
+ int base = enetc_get_rss_key_base(si);
+ struct enetc_hw *hw = &si->hw;
int i;
for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
- enetc_port_wr(hw, ENETC_PRSSK(i), ((u32 *)bytes)[i]);
+ enetc_port_wr(hw, base + i * 4, ((u32 *)bytes)[i]);
}
EXPORT_SYMBOL_GPL(enetc_set_rss_key);
@@ -715,17 +792,16 @@ static int enetc_set_rxfh(struct net_device *ndev,
struct netlink_ext_ack *extack)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
int err = 0;
/* set hash key, if PF */
- if (rxfh->key && hw->port)
- enetc_set_rss_key(hw, rxfh->key);
+ if (rxfh->key && enetc_si_is_pf(si))
+ enetc_set_rss_key(si, rxfh->key);
/* set RSS table */
if (rxfh->indir)
- err = enetc_set_rss_table(priv->si, rxfh->indir,
- priv->si->num_rss);
+ err = si->ops->set_rss_table(si, rxfh->indir, si->num_rss);
return err;
}
@@ -762,9 +838,10 @@ static int enetc_get_coalesce(struct net_device *ndev,
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_int_vector *v = priv->int_vector[0];
+ u64 clk_freq = priv->sysclk_freq;
- ic->tx_coalesce_usecs = enetc_cycles_to_usecs(priv->tx_ictt);
- ic->rx_coalesce_usecs = enetc_cycles_to_usecs(v->rx_ictt);
+ ic->tx_coalesce_usecs = enetc_cycles_to_usecs(priv->tx_ictt, clk_freq);
+ ic->rx_coalesce_usecs = enetc_cycles_to_usecs(v->rx_ictt, clk_freq);
ic->tx_max_coalesced_frames = ENETC_TXIC_PKTTHR;
ic->rx_max_coalesced_frames = ENETC_RXIC_PKTTHR;
@@ -780,12 +857,13 @@ static int enetc_set_coalesce(struct net_device *ndev,
struct netlink_ext_ack *extack)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ u64 clk_freq = priv->sysclk_freq;
u32 rx_ictt, tx_ictt;
int i, ic_mode;
bool changed;
- tx_ictt = enetc_usecs_to_cycles(ic->tx_coalesce_usecs);
- rx_ictt = enetc_usecs_to_cycles(ic->rx_coalesce_usecs);
+ tx_ictt = enetc_usecs_to_cycles(ic->tx_coalesce_usecs, clk_freq);
+ rx_ictt = enetc_usecs_to_cycles(ic->rx_coalesce_usecs, clk_freq);
if (ic->rx_max_coalesced_frames != ENETC_RXIC_PKTTHR)
return -EOPNOTSUPP;
@@ -827,22 +905,61 @@ static int enetc_set_coalesce(struct net_device *ndev,
return 0;
}
-static int enetc_get_ts_info(struct net_device *ndev,
- struct kernel_ethtool_ts_info *info)
+static int enetc_get_phc_index_by_pdev(struct enetc_si *si)
{
- int *phc_idx;
-
- phc_idx = symbol_get(enetc_phc_index);
- if (phc_idx) {
- info->phc_index = *phc_idx;
- symbol_put(enetc_phc_index);
+ struct pci_bus *bus = si->pdev->bus;
+ struct pci_dev *timer_pdev;
+ unsigned int devfn;
+ int phc_index;
+
+ switch (si->revision) {
+ case ENETC_REV_1_0:
+ devfn = PCI_DEVFN(0, 4);
+ break;
+ case ENETC_REV_4_1:
+ devfn = PCI_DEVFN(24, 0);
+ break;
+ case ENETC_REV_4_3:
+ devfn = PCI_DEVFN(0, 1);
+ break;
+ default:
+ return -1;
}
- if (!IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK)) {
- info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
+ timer_pdev = pci_get_domain_bus_and_slot(pci_domain_nr(bus),
+ bus->number, devfn);
+ if (!timer_pdev)
+ return -1;
- return 0;
- }
+ phc_index = ptp_clock_index_by_dev(&timer_pdev->dev);
+ pci_dev_put(timer_pdev);
+
+ return phc_index;
+}
+
+static int enetc_get_phc_index(struct enetc_si *si)
+{
+ struct device_node *np = si->pdev->dev.of_node;
+ struct device_node *timer_np;
+ int phc_index;
+
+ if (!np)
+ return enetc_get_phc_index_by_pdev(si);
+
+ timer_np = of_parse_phandle(np, "ptp-timer", 0);
+ if (!timer_np)
+ return enetc_get_phc_index_by_pdev(si);
+
+ phc_index = ptp_clock_index_by_of_node(timer_np);
+ of_node_put(timer_np);
+
+ return phc_index;
+}
+
+static void enetc_get_ts_generic_info(struct net_device *ndev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
@@ -850,11 +967,34 @@ static int enetc_get_ts_info(struct net_device *ndev,
SOF_TIMESTAMPING_TX_SOFTWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON) |
- (1 << HWTSTAMP_TX_ONESTEP_SYNC);
+ (1 << HWTSTAMP_TX_ON);
+
+ if (enetc_si_is_pf(priv->si))
+ info->tx_types |= (1 << HWTSTAMP_TX_ONESTEP_SYNC);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
+}
+
+static int enetc_get_ts_info(struct net_device *ndev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_si *si = priv->si;
+
+ if (!enetc_ptp_clock_is_enabled(si))
+ goto timestamp_tx_sw;
+
+ info->phc_index = enetc_get_phc_index(si);
+ if (info->phc_index < 0)
+ goto timestamp_tx_sw;
+
+ enetc_get_ts_generic_info(ndev, info);
+
+ return 0;
+
+timestamp_tx_sw:
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
}
@@ -1165,7 +1305,7 @@ void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link)
}
EXPORT_SYMBOL_GPL(enetc_mm_link_state_update);
-static const struct ethtool_ops enetc_pf_ethtool_ops = {
+const struct ethtool_ops enetc_pf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
@@ -1178,12 +1318,14 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = {
.get_rmon_stats = enetc_get_rmon_stats,
.get_eth_ctrl_stats = enetc_get_eth_ctrl_stats,
.get_eth_mac_stats = enetc_get_eth_mac_stats,
+ .get_rx_ring_count = enetc_get_rx_ring_count,
.get_rxnfc = enetc_get_rxnfc,
.set_rxnfc = enetc_set_rxnfc,
.get_rxfh_key_size = enetc_get_rxfh_key_size,
.get_rxfh_indir_size = enetc_get_rxfh_indir_size,
.get_rxfh = enetc_get_rxfh,
.set_rxfh = enetc_set_rxfh,
+ .get_rxfh_fields = enetc_get_rxfh_fields,
.get_ringparam = enetc_get_ringparam,
.get_coalesce = enetc_get_coalesce,
.set_coalesce = enetc_set_coalesce,
@@ -1200,7 +1342,26 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = {
.get_mm_stats = enetc_get_mm_stats,
};
-static const struct ethtool_ops enetc_vf_ethtool_ops = {
+const struct ethtool_ops enetc4_ppm_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
+ .get_eth_mac_stats = enetc_ppm_get_eth_mac_stats,
+ .get_rx_ring_count = enetc_get_rx_ring_count,
+ .get_rxfh_key_size = enetc_get_rxfh_key_size,
+ .get_rxfh_indir_size = enetc_get_rxfh_indir_size,
+ .get_rxfh = enetc_get_rxfh,
+ .set_rxfh = enetc_set_rxfh,
+ .get_rxfh_fields = enetc_get_rxfh_fields,
+ .get_ringparam = enetc_get_ringparam,
+ .get_coalesce = enetc_get_coalesce,
+ .set_coalesce = enetc_set_coalesce,
+ .get_link_ksettings = enetc_get_link_ksettings,
+ .set_link_ksettings = enetc_set_link_ksettings,
+ .get_link = ethtool_op_get_link,
+};
+
+const struct ethtool_ops enetc_vf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
@@ -1209,15 +1370,40 @@ static const struct ethtool_ops enetc_vf_ethtool_ops = {
.get_sset_count = enetc_get_sset_count,
.get_strings = enetc_get_strings,
.get_ethtool_stats = enetc_get_ethtool_stats,
+ .get_rx_ring_count = enetc_get_rx_ring_count,
.get_rxnfc = enetc_get_rxnfc,
.set_rxnfc = enetc_set_rxnfc,
.get_rxfh_indir_size = enetc_get_rxfh_indir_size,
.get_rxfh = enetc_get_rxfh,
.set_rxfh = enetc_set_rxfh,
+ .get_rxfh_fields = enetc_get_rxfh_fields,
+ .get_ringparam = enetc_get_ringparam,
+ .get_coalesce = enetc_get_coalesce,
+ .set_coalesce = enetc_set_coalesce,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = enetc_get_ts_info,
+};
+
+const struct ethtool_ops enetc4_pf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_ringparam = enetc_get_ringparam,
.get_coalesce = enetc_get_coalesce,
.set_coalesce = enetc_set_coalesce,
+ .get_link_ksettings = enetc_get_link_ksettings,
+ .set_link_ksettings = enetc_set_link_ksettings,
.get_link = ethtool_op_get_link,
+ .get_wol = enetc_get_wol,
+ .set_wol = enetc_set_wol,
+ .get_pauseparam = enetc_get_pauseparam,
+ .set_pauseparam = enetc_set_pauseparam,
+ .get_rx_ring_count = enetc_get_rx_ring_count,
+ .get_rxfh_key_size = enetc_get_rxfh_key_size,
+ .get_rxfh_indir_size = enetc_get_rxfh_indir_size,
+ .get_rxfh = enetc_get_rxfh,
+ .set_rxfh = enetc_set_rxfh,
+ .get_rxfh_fields = enetc_get_rxfh_fields,
.get_ts_info = enetc_get_ts_info,
};
@@ -1225,9 +1411,6 @@ void enetc_set_ethtool_ops(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- if (enetc_si_is_pf(priv->si))
- ndev->ethtool_ops = &enetc_pf_ethtool_ops;
- else
- ndev->ethtool_ops = &enetc_vf_ethtool_ops;
+ ndev->ethtool_ops = priv->si->drvdata->eth_ops;
}
EXPORT_SYMBOL_GPL(enetc_set_ethtool_ops);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 1619943fb263..7b882b8921fe 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -6,6 +6,8 @@
#define ENETC_MM_VERIFY_SLEEP_US USEC_PER_MSEC
#define ENETC_MM_VERIFY_RETRIES 3
+#define ENETC_NUM_TC 8
+
/* ENETC device IDs */
#define ENETC_DEV_ID_PF 0xe100
#define ENETC_DEV_ID_VF 0xef00
@@ -21,10 +23,9 @@
#define ENETC_SICTR0 0x18
#define ENETC_SICTR1 0x1c
#define ENETC_SIPCAPR0 0x20
-#define ENETC_SIPCAPR0_PSFP BIT(9)
#define ENETC_SIPCAPR0_RSS BIT(8)
-#define ENETC_SIPCAPR0_QBV BIT(4)
-#define ENETC_SIPCAPR0_QBU BIT(3)
+#define ENETC_SIPCAPR0_RFS BIT(2)
+#define ENETC_SIPCAPR0_LSO BIT(1)
#define ENETC_SIPCAPR1 0x24
#define ENETC_SITGTGR 0x30
#define ENETC_SIRBGCR 0x38
@@ -42,6 +43,9 @@
#define ENETC_SIPMAR0 0x80
#define ENETC_SIPMAR1 0x84
+#define ENETC_SICVLANR1 0x90
+#define ENETC_SICVLANR2 0x94
+#define SICVLANR_ETYPE GENMASK(15, 0)
/* VF-PF Message passing */
#define ENETC_DEFAULT_MSG_SIZE 1024 /* and max size */
@@ -191,6 +195,9 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PCAPR0 0x0900
#define ENETC_PCAPR0_RXBDR(val) ((val) >> 24)
#define ENETC_PCAPR0_TXBDR(val) (((val) >> 16) & 0xff)
+#define ENETC_PCAPR0_PSFP BIT(9)
+#define ENETC_PCAPR0_QBV BIT(4)
+#define ENETC_PCAPR0_QBU BIT(3)
#define ENETC_PCAPR1 0x0904
#define ENETC_PSICFGR0(n) (0x0940 + (n) * 0xc) /* n = SI index */
#define ENETC_PSICFGR0_SET_TXBDR(val) ((val) & 0xff)
@@ -368,6 +375,11 @@ enum enetc_bdr_type {TX, RX};
/** Global regs, offset: 2_0000h */
#define ENETC_GLOBAL_BASE 0x20000
#define ENETC_G_EIPBRR0 0x0bf8
+#define EIPBRR0_REVISION GENMASK(15, 0)
+#define ENETC_REV_1_0 0x0100
+#define ENETC_REV_4_1 0X0401
+#define ENETC_REV_4_3 0x0403
+
#define ENETC_G_EIPBRR1 0x0bfc
#define ENETC_G_EPFBLPR(n) (0xd00 + 4 * (n))
#define ENETC_G_EPFBLPR1_XGMII 0x80000000
@@ -396,18 +408,22 @@ struct enetc_hw {
*/
extern rwlock_t enetc_mdio_lock;
+DECLARE_STATIC_KEY_FALSE(enetc_has_err050089);
+
/* use this locking primitive only on the fast datapath to
* group together multiple non-MDIO register accesses to
* minimize the overhead of the lock
*/
static inline void enetc_lock_mdio(void)
{
- read_lock(&enetc_mdio_lock);
+ if (static_branch_unlikely(&enetc_has_err050089))
+ read_lock(&enetc_mdio_lock);
}
static inline void enetc_unlock_mdio(void)
{
- read_unlock(&enetc_mdio_lock);
+ if (static_branch_unlikely(&enetc_has_err050089))
+ read_unlock(&enetc_mdio_lock);
}
/* use these accessors only on the fast datapath under
@@ -416,14 +432,16 @@ static inline void enetc_unlock_mdio(void)
*/
static inline u32 enetc_rd_reg_hot(void __iomem *reg)
{
- lockdep_assert_held(&enetc_mdio_lock);
+ if (static_branch_unlikely(&enetc_has_err050089))
+ lockdep_assert_held(&enetc_mdio_lock);
return ioread32(reg);
}
static inline void enetc_wr_reg_hot(void __iomem *reg, u32 val)
{
- lockdep_assert_held(&enetc_mdio_lock);
+ if (static_branch_unlikely(&enetc_has_err050089))
+ lockdep_assert_held(&enetc_mdio_lock);
iowrite32(val, reg);
}
@@ -452,9 +470,13 @@ static inline u32 _enetc_rd_mdio_reg_wa(void __iomem *reg)
unsigned long flags;
u32 val;
- write_lock_irqsave(&enetc_mdio_lock, flags);
- val = ioread32(reg);
- write_unlock_irqrestore(&enetc_mdio_lock, flags);
+ if (static_branch_unlikely(&enetc_has_err050089)) {
+ write_lock_irqsave(&enetc_mdio_lock, flags);
+ val = ioread32(reg);
+ write_unlock_irqrestore(&enetc_mdio_lock, flags);
+ } else {
+ val = ioread32(reg);
+ }
return val;
}
@@ -463,9 +485,13 @@ static inline void _enetc_wr_mdio_reg_wa(void __iomem *reg, u32 val)
{
unsigned long flags;
- write_lock_irqsave(&enetc_mdio_lock, flags);
- iowrite32(val, reg);
- write_unlock_irqrestore(&enetc_mdio_lock, flags);
+ if (static_branch_unlikely(&enetc_has_err050089)) {
+ write_lock_irqsave(&enetc_mdio_lock, flags);
+ iowrite32(val, reg);
+ write_unlock_irqrestore(&enetc_mdio_lock, flags);
+ } else {
+ iowrite32(val, reg);
+ }
}
#ifdef ioread64
@@ -485,7 +511,7 @@ static inline u64 _enetc_rd_reg64(void __iomem *reg)
tmp = ioread32(reg + 4);
} while (high != tmp);
- return le64_to_cpu((__le64)high << 32 | low);
+ return (u64)high << 32 | low;
}
#endif
@@ -511,6 +537,7 @@ static inline u64 _enetc_rd_reg64_wa(void __iomem *reg)
/* port register accessors - PF only */
#define enetc_port_rd(hw, off) enetc_rd_reg((hw)->port + (off))
#define enetc_port_wr(hw, off, val) enetc_wr_reg((hw)->port + (off), val)
+#define enetc_port_rd64(hw, off) _enetc_rd_reg64_wa((hw)->port + (off))
#define enetc_port_rd_mdio(hw, off) _enetc_rd_mdio_reg_wa((hw)->port + (off))
#define enetc_port_wr_mdio(hw, off, val) _enetc_wr_mdio_reg_wa(\
(hw)->port + (off), val)
@@ -533,11 +560,23 @@ static inline u64 _enetc_rd_reg64_wa(void __iomem *reg)
union enetc_tx_bd {
struct {
__le64 addr;
- __le16 buf_len;
+ union {
+ __le16 buf_len;
+ __le16 hdr_len; /* For LSO, ENETC 4.1 and later */
+ };
__le16 frm_len;
union {
struct {
- u8 reserved[3];
+ u8 l3_aux0;
+#define ENETC_TX_BD_L3_START GENMASK(6, 0)
+#define ENETC_TX_BD_IPCS BIT(7)
+ u8 l3_aux1;
+#define ENETC_TX_BD_L3_HDR_LEN GENMASK(6, 0)
+#define ENETC_TX_BD_L3T BIT(7)
+ u8 l4_aux;
+#define ENETC_TX_BD_L4T GENMASK(7, 5)
+#define ENETC_TXBD_L4T_UDP 1
+#define ENETC_TXBD_L4T_TCP 2
u8 flags;
}; /* default layout */
__le32 txstart;
@@ -548,23 +587,27 @@ union enetc_tx_bd {
__le32 tstamp;
__le16 tpid;
__le16 vid;
- u8 reserved[6];
+ __le16 lso_sg_size; /* For ENETC 4.1 and later */
+ __le16 frm_len_ext; /* For ENETC 4.1 and later */
+ u8 reserved[2];
u8 e_flags;
u8 flags;
} ext; /* Tx BD extension */
struct {
__le32 tstamp;
- u8 reserved[10];
+ u8 reserved[8];
+ __le16 lso_err_count; /* For ENETC 4.1 and later */
u8 status;
u8 flags;
} wb; /* writeback descriptor */
};
enum enetc_txbd_flags {
- ENETC_TXBD_FLAGS_RES0 = BIT(0), /* reserved */
+ ENETC_TXBD_FLAGS_L4CS = BIT(0), /* For ENETC 4.1 and later */
ENETC_TXBD_FLAGS_TSE = BIT(1),
+ ENETC_TXBD_FLAGS_LSO = BIT(1), /* For ENETC 4.1 and later */
ENETC_TXBD_FLAGS_W = BIT(2),
- ENETC_TXBD_FLAGS_RES3 = BIT(3), /* reserved */
+ ENETC_TXBD_FLAGS_CSUM_LSO = BIT(3), /* For ENETC 4.1 and later */
ENETC_TXBD_FLAGS_TXSTART = BIT(4),
ENETC_TXBD_FLAGS_EX = BIT(6),
ENETC_TXBD_FLAGS_F = BIT(7)
@@ -572,6 +615,7 @@ enum enetc_txbd_flags {
#define ENETC_TXBD_STATS_WIN BIT(7)
#define ENETC_TXBD_TXSTART_MASK GENMASK(24, 0)
#define ENETC_TXBD_FLAGS_OFFSET 24
+#define ENETC_TXBD_TSTAMP GENMASK(29, 0)
static inline __le32 enetc_txbd_set_tx_start(u64 tx_start, u8 flags)
{
@@ -633,6 +677,8 @@ union enetc_rx_bd {
#define ENETC_CBD_FLAGS_SF BIT(7) /* short format */
#define ENETC_CBD_STATUS_MASK 0xf
+#define ENETC_TPID_8021Q 0
+
struct enetc_cmd_rfse {
u8 smac_h[6];
u8 smac_m[6];
@@ -957,15 +1003,17 @@ struct enetc_cbd {
u8 status_flags;
};
-#define ENETC_CLK 400000000ULL
-static inline u32 enetc_cycles_to_usecs(u32 cycles)
+#define ENETC_CLK_400M 400000000ULL
+#define ENETC_CLK_333M 333000000ULL
+
+static inline u32 enetc_cycles_to_usecs(u32 cycles, u64 clk_freq)
{
- return (u32)div_u64(cycles * 1000000ULL, ENETC_CLK);
+ return (u32)div_u64(cycles * 1000000ULL, clk_freq);
}
-static inline u32 enetc_usecs_to_cycles(u32 usecs)
+static inline u32 enetc_usecs_to_cycles(u32 usecs, u64 clk_freq)
{
- return (u32)div_u64(usecs * ENETC_CLK, 1000000ULL);
+ return (u32)div_u64(usecs * clk_freq, 1000000ULL);
}
/* Port traffic class frame preemption register */
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
index a1b595bd7993..e108cac8288d 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
@@ -4,11 +4,35 @@
#include <linux/of_mdio.h>
#include "enetc_pf.h"
+#define NETC_EMDIO_VEN_ID 0x1131
+#define NETC_EMDIO_DEV_ID 0xee00
#define ENETC_MDIO_DEV_ID 0xee01
#define ENETC_MDIO_DEV_NAME "FSL PCIe IE Central MDIO"
#define ENETC_MDIO_BUS_NAME ENETC_MDIO_DEV_NAME " Bus"
#define ENETC_MDIO_DRV_NAME ENETC_MDIO_DEV_NAME " driver"
+DEFINE_STATIC_KEY_FALSE(enetc_has_err050089);
+EXPORT_SYMBOL_GPL(enetc_has_err050089);
+
+static void enetc_emdio_enable_err050089(struct pci_dev *pdev)
+{
+ if (pdev->vendor == PCI_VENDOR_ID_FREESCALE &&
+ pdev->device == ENETC_MDIO_DEV_ID) {
+ static_branch_inc(&enetc_has_err050089);
+ dev_info(&pdev->dev, "Enabled ERR050089 workaround\n");
+ }
+}
+
+static void enetc_emdio_disable_err050089(struct pci_dev *pdev)
+{
+ if (pdev->vendor == PCI_VENDOR_ID_FREESCALE &&
+ pdev->device == ENETC_MDIO_DEV_ID) {
+ static_branch_dec(&enetc_has_err050089);
+ if (!static_key_enabled(&enetc_has_err050089.key))
+ dev_info(&pdev->dev, "Disabled ERR050089 workaround\n");
+ }
+}
+
static int enetc_pci_mdio_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -62,6 +86,8 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
goto err_pci_mem_reg;
}
+ enetc_emdio_enable_err050089(pdev);
+
err = of_mdiobus_register(bus, dev->of_node);
if (err)
goto err_mdiobus_reg;
@@ -71,6 +97,7 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
return 0;
err_mdiobus_reg:
+ enetc_emdio_disable_err050089(pdev);
pci_release_region(pdev, 0);
err_pci_mem_reg:
pci_disable_device(pdev);
@@ -88,6 +115,9 @@ static void enetc_pci_mdio_remove(struct pci_dev *pdev)
struct enetc_mdio_priv *mdio_priv;
mdiobus_unregister(bus);
+
+ enetc_emdio_disable_err050089(pdev);
+
mdio_priv = bus->priv;
iounmap(mdio_priv->hw->port);
pci_release_region(pdev, 0);
@@ -96,6 +126,7 @@ static void enetc_pci_mdio_remove(struct pci_dev *pdev)
static const struct pci_device_id enetc_pci_mdio_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_MDIO_DEV_ID) },
+ { PCI_DEVICE(NETC_EMDIO_VEN_ID, NETC_EMDIO_DEV_ID) },
{ 0, } /* End of table. */
};
MODULE_DEVICE_TABLE(pci, enetc_pci_mdio_id_table);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 8f6b0bf48139..de0fb272c847 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -2,15 +2,13 @@
/* Copyright 2017-2019 NXP */
#include <linux/unaligned.h>
-#include <linux/mdio.h>
#include <linux/module.h>
-#include <linux/fsl/enetc_mdio.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
-#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/pcs-lynx.h>
#include "enetc_ierb.h"
-#include "enetc_pf.h"
+#include "enetc_pf_common.h"
#define ENETC_DRV_NAME_STR "ENETC PF driver"
@@ -33,18 +31,15 @@ static void enetc_pf_set_primary_mac_addr(struct enetc_hw *hw, int si,
__raw_writew(lower, hw->port + ENETC_PSIPMAR1(si));
}
-static int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr)
+static struct phylink_pcs *enetc_pf_create_pcs(struct enetc_pf *pf,
+ struct mii_bus *bus)
{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct sockaddr *saddr = addr;
-
- if (!is_valid_ether_addr(saddr->sa_data))
- return -EADDRNOTAVAIL;
-
- eth_hw_addr_set(ndev, saddr->sa_data);
- enetc_pf_set_primary_mac_addr(&priv->si->hw, 0, saddr->sa_data);
+ return lynx_pcs_create_mdiodev(bus, 0);
+}
- return 0;
+static void enetc_pf_destroy_pcs(struct phylink_pcs *pcs)
+{
+ lynx_pcs_destroy(pcs);
}
static void enetc_set_vlan_promisc(struct enetc_hw *hw, char si_map)
@@ -77,30 +72,6 @@ static void enetc_set_isol_vlan(struct enetc_hw *hw, int si, u16 vlan, u8 qos)
enetc_port_wr(hw, ENETC_PSIVLANR(si), val);
}
-static int enetc_mac_addr_hash_idx(const u8 *addr)
-{
- u64 fold = __swab64(ether_addr_to_u64(addr)) >> 16;
- u64 mask = 0;
- int res = 0;
- int i;
-
- for (i = 0; i < 8; i++)
- mask |= BIT_ULL(i * 6);
-
- for (i = 0; i < 6; i++)
- res |= (hweight64(fold & (mask << i)) & 0x1) << i;
-
- return res;
-}
-
-static void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter)
-{
- filter->mac_addr_cnt = 0;
-
- bitmap_zero(filter->mac_hash_table,
- ENETC_MADDR_HASH_TBL_SZ);
-}
-
static void enetc_add_mac_addr_em_filter(struct enetc_mac_filter *filter,
const unsigned char *addr)
{
@@ -109,16 +80,6 @@ static void enetc_add_mac_addr_em_filter(struct enetc_mac_filter *filter,
filter->mac_addr_cnt++;
}
-static void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter,
- const unsigned char *addr)
-{
- int idx = enetc_mac_addr_hash_idx(addr);
-
- /* add hash table entry */
- __set_bit(idx, filter->mac_hash_table);
- filter->mac_addr_cnt++;
-}
-
static void enetc_clear_mac_ht_flt(struct enetc_si *si, int si_idx, int type)
{
bool err = si->errata & ENETC_ERR_UCMCSWP;
@@ -255,67 +216,6 @@ static void enetc_pf_set_rx_mode(struct net_device *ndev)
enetc_port_wr(hw, ENETC_PSIPMR, psipmr);
}
-static void enetc_set_vlan_ht_filter(struct enetc_hw *hw, int si_idx,
- unsigned long hash)
-{
- enetc_port_wr(hw, ENETC_PSIVHFR0(si_idx), lower_32_bits(hash));
- enetc_port_wr(hw, ENETC_PSIVHFR1(si_idx), upper_32_bits(hash));
-}
-
-static int enetc_vid_hash_idx(unsigned int vid)
-{
- int res = 0;
- int i;
-
- for (i = 0; i < 6; i++)
- res |= (hweight8(vid & (BIT(i) | BIT(i + 6))) & 0x1) << i;
-
- return res;
-}
-
-static void enetc_sync_vlan_ht_filter(struct enetc_pf *pf, bool rehash)
-{
- int i;
-
- if (rehash) {
- bitmap_zero(pf->vlan_ht_filter, ENETC_VLAN_HT_SIZE);
-
- for_each_set_bit(i, pf->active_vlans, VLAN_N_VID) {
- int hidx = enetc_vid_hash_idx(i);
-
- __set_bit(hidx, pf->vlan_ht_filter);
- }
- }
-
- enetc_set_vlan_ht_filter(&pf->si->hw, 0, *pf->vlan_ht_filter);
-}
-
-static int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_pf *pf = enetc_si_priv(priv->si);
- int idx;
-
- __set_bit(vid, pf->active_vlans);
-
- idx = enetc_vid_hash_idx(vid);
- if (!__test_and_set_bit(idx, pf->vlan_ht_filter))
- enetc_sync_vlan_ht_filter(pf, false);
-
- return 0;
-}
-
-static int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_pf *pf = enetc_si_priv(priv->si);
-
- __clear_bit(vid, pf->active_vlans);
- enetc_sync_vlan_ht_filter(pf, true);
-
- return 0;
-}
-
static void enetc_set_loopback(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -393,56 +293,6 @@ static int enetc_pf_set_vf_spoofchk(struct net_device *ndev, int vf, bool en)
return 0;
}
-static int enetc_setup_mac_address(struct device_node *np, struct enetc_pf *pf,
- int si)
-{
- struct device *dev = &pf->si->pdev->dev;
- struct enetc_hw *hw = &pf->si->hw;
- u8 mac_addr[ETH_ALEN] = { 0 };
- int err;
-
- /* (1) try to get the MAC address from the device tree */
- if (np) {
- err = of_get_mac_address(np, mac_addr);
- if (err == -EPROBE_DEFER)
- return err;
- }
-
- /* (2) bootloader supplied MAC address */
- if (is_zero_ether_addr(mac_addr))
- enetc_pf_get_primary_mac_addr(hw, si, mac_addr);
-
- /* (3) choose a random one */
- if (is_zero_ether_addr(mac_addr)) {
- eth_random_addr(mac_addr);
- dev_info(dev, "no MAC address specified for SI%d, using %pM\n",
- si, mac_addr);
- }
-
- enetc_pf_set_primary_mac_addr(hw, si, mac_addr);
-
- return 0;
-}
-
-static int enetc_setup_mac_addresses(struct device_node *np,
- struct enetc_pf *pf)
-{
- int err, i;
-
- /* The PF might take its MAC from the device tree */
- err = enetc_setup_mac_address(np, pf, 0);
- if (err)
- return err;
-
- for (i = 0; i < pf->total_vfs; i++) {
- err = enetc_setup_mac_address(NULL, pf, i + 1);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static void enetc_port_assign_rfs_entries(struct enetc_si *si)
{
struct enetc_pf *pf = enetc_si_priv(si);
@@ -464,6 +314,23 @@ static void enetc_port_assign_rfs_entries(struct enetc_si *si)
enetc_port_wr(hw, ENETC_PRFSMR, ENETC_PRFSMR_RFSE);
}
+static void enetc_port_get_caps(struct enetc_si *si)
+{
+ struct enetc_hw *hw = &si->hw;
+ u32 val;
+
+ val = enetc_port_rd(hw, ENETC_PCAPR0);
+
+ if (val & ENETC_PCAPR0_QBV)
+ si->hw_features |= ENETC_SI_F_QBV;
+
+ if (val & ENETC_PCAPR0_QBU)
+ si->hw_features |= ENETC_SI_F_QBU;
+
+ if (val & ENETC_PCAPR0_PSFP)
+ si->hw_features |= ENETC_SI_F_PSFP;
+}
+
static void enetc_port_si_configure(struct enetc_si *si)
{
struct enetc_pf *pf = enetc_si_priv(si);
@@ -471,6 +338,8 @@ static void enetc_port_si_configure(struct enetc_si *si)
int num_rings, i;
u32 val;
+ enetc_port_get_caps(si);
+
val = enetc_port_rd(hw, ENETC_PCAPR0);
num_rings = min(ENETC_PCAPR0_RXBDR(val), ENETC_PCAPR0_TXBDR(val));
@@ -585,7 +454,6 @@ static void enetc_mac_enable(struct enetc_si *si, bool en)
static void enetc_configure_port(struct enetc_pf *pf)
{
- u8 hash_key[ENETC_RSSHASH_KEY_SIZE];
struct enetc_hw *hw = &pf->si->hw;
enetc_configure_port_mac(pf->si);
@@ -593,8 +461,7 @@ static void enetc_configure_port(struct enetc_pf *pf)
enetc_port_si_configure(pf->si);
/* set up hash key */
- get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE);
- enetc_set_rss_key(hw, hash_key);
+ enetc_set_default_rss_key(pf);
/* split up RFS entries */
enetc_port_assign_rfs_entries(pf->si);
@@ -665,19 +532,11 @@ static int enetc_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!num_vfs) {
enetc_msg_psi_free(pf);
- kfree(pf->vf_state);
pf->num_vfs = 0;
pci_disable_sriov(pdev);
} else {
pf->num_vfs = num_vfs;
- pf->vf_state = kcalloc(num_vfs, sizeof(struct enetc_vf_state),
- GFP_KERNEL);
- if (!pf->vf_state) {
- pf->num_vfs = 0;
- return -ENOMEM;
- }
-
err = enetc_msg_psi_init(pf);
if (err) {
dev_err(&pdev->dev, "enetc_msg_psi_init (%d)\n", err);
@@ -696,7 +555,6 @@ static int enetc_sriov_configure(struct pci_dev *pdev, int num_vfs)
err_en_sriov:
enetc_msg_psi_free(pf);
err_msg_psi:
- kfree(pf->vf_state);
pf->num_vfs = 0;
return err;
@@ -773,189 +631,10 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_setup_tc = enetc_pf_setup_tc,
.ndo_bpf = enetc_setup_bpf,
.ndo_xdp_xmit = enetc_xdp_xmit,
+ .ndo_hwtstamp_get = enetc_hwtstamp_get,
+ .ndo_hwtstamp_set = enetc_hwtstamp_set,
};
-static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
- const struct net_device_ops *ndev_ops)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
-
- SET_NETDEV_DEV(ndev, &si->pdev->dev);
- priv->ndev = ndev;
- priv->si = si;
- priv->dev = &si->pdev->dev;
- si->ndev = ndev;
-
- priv->msg_enable = (NETIF_MSG_WOL << 1) - 1;
- ndev->netdev_ops = ndev_ops;
- enetc_set_ethtool_ops(ndev);
- ndev->watchdog_timeo = 5 * HZ;
- ndev->max_mtu = ENETC_MAX_MTU;
-
- ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
- ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
- ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6;
-
- if (si->num_rss)
- ndev->hw_features |= NETIF_F_RXHASH;
-
- ndev->priv_flags |= IFF_UNICAST_FLT;
- ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
- NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
- NETDEV_XDP_ACT_NDO_XMIT_SG;
-
- if (si->hw_features & ENETC_SI_F_PSFP && !enetc_psfp_enable(priv)) {
- priv->active_offloads |= ENETC_F_QCI;
- ndev->features |= NETIF_F_HW_TC;
- ndev->hw_features |= NETIF_F_HW_TC;
- }
-
- /* pick up primary MAC address from SI */
- enetc_load_primary_mac_addr(&si->hw, ndev);
-}
-
-static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np)
-{
- struct device *dev = &pf->si->pdev->dev;
- struct enetc_mdio_priv *mdio_priv;
- struct mii_bus *bus;
- int err;
-
- bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
- if (!bus)
- return -ENOMEM;
-
- bus->name = "Freescale ENETC MDIO Bus";
- bus->read = enetc_mdio_read_c22;
- bus->write = enetc_mdio_write_c22;
- bus->read_c45 = enetc_mdio_read_c45;
- bus->write_c45 = enetc_mdio_write_c45;
- bus->parent = dev;
- mdio_priv = bus->priv;
- mdio_priv->hw = &pf->si->hw;
- mdio_priv->mdio_base = ENETC_EMDIO_BASE;
- snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
-
- err = of_mdiobus_register(bus, np);
- if (err)
- return dev_err_probe(dev, err, "cannot register MDIO bus\n");
-
- pf->mdio = bus;
-
- return 0;
-}
-
-static void enetc_mdio_remove(struct enetc_pf *pf)
-{
- if (pf->mdio)
- mdiobus_unregister(pf->mdio);
-}
-
-static int enetc_imdio_create(struct enetc_pf *pf)
-{
- struct device *dev = &pf->si->pdev->dev;
- struct enetc_mdio_priv *mdio_priv;
- struct phylink_pcs *phylink_pcs;
- struct mii_bus *bus;
- int err;
-
- bus = mdiobus_alloc_size(sizeof(*mdio_priv));
- if (!bus)
- return -ENOMEM;
-
- bus->name = "Freescale ENETC internal MDIO Bus";
- bus->read = enetc_mdio_read_c22;
- bus->write = enetc_mdio_write_c22;
- bus->read_c45 = enetc_mdio_read_c45;
- bus->write_c45 = enetc_mdio_write_c45;
- bus->parent = dev;
- bus->phy_mask = ~0;
- mdio_priv = bus->priv;
- mdio_priv->hw = &pf->si->hw;
- mdio_priv->mdio_base = ENETC_PM_IMDIO_BASE;
- snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev));
-
- err = mdiobus_register(bus);
- if (err) {
- dev_err(dev, "cannot register internal MDIO bus (%d)\n", err);
- goto free_mdio_bus;
- }
-
- phylink_pcs = lynx_pcs_create_mdiodev(bus, 0);
- if (IS_ERR(phylink_pcs)) {
- err = PTR_ERR(phylink_pcs);
- dev_err(dev, "cannot create lynx pcs (%d)\n", err);
- goto unregister_mdiobus;
- }
-
- pf->imdio = bus;
- pf->pcs = phylink_pcs;
-
- return 0;
-
-unregister_mdiobus:
- mdiobus_unregister(bus);
-free_mdio_bus:
- mdiobus_free(bus);
- return err;
-}
-
-static void enetc_imdio_remove(struct enetc_pf *pf)
-{
- if (pf->pcs)
- lynx_pcs_destroy(pf->pcs);
- if (pf->imdio) {
- mdiobus_unregister(pf->imdio);
- mdiobus_free(pf->imdio);
- }
-}
-
-static bool enetc_port_has_pcs(struct enetc_pf *pf)
-{
- return (pf->if_mode == PHY_INTERFACE_MODE_SGMII ||
- pf->if_mode == PHY_INTERFACE_MODE_1000BASEX ||
- pf->if_mode == PHY_INTERFACE_MODE_2500BASEX ||
- pf->if_mode == PHY_INTERFACE_MODE_USXGMII);
-}
-
-static int enetc_mdiobus_create(struct enetc_pf *pf, struct device_node *node)
-{
- struct device_node *mdio_np;
- int err;
-
- mdio_np = of_get_child_by_name(node, "mdio");
- if (mdio_np) {
- err = enetc_mdio_probe(pf, mdio_np);
-
- of_node_put(mdio_np);
- if (err)
- return err;
- }
-
- if (enetc_port_has_pcs(pf)) {
- err = enetc_imdio_create(pf);
- if (err) {
- enetc_mdio_remove(pf);
- return err;
- }
- }
-
- return 0;
-}
-
-static void enetc_mdiobus_destroy(struct enetc_pf *pf)
-{
- enetc_mdio_remove(pf);
- enetc_imdio_remove(pf);
-}
-
static struct phylink_pcs *
enetc_pl_mac_select_pcs(struct phylink_config *config, phy_interface_t iface)
{
@@ -1101,47 +780,6 @@ static const struct phylink_mac_ops enetc_mac_phylink_ops = {
.mac_link_down = enetc_pl_mac_link_down,
};
-static int enetc_phylink_create(struct enetc_ndev_priv *priv,
- struct device_node *node)
-{
- struct enetc_pf *pf = enetc_si_priv(priv->si);
- struct phylink *phylink;
- int err;
-
- pf->phylink_config.dev = &priv->ndev->dev;
- pf->phylink_config.type = PHYLINK_NETDEV;
- pf->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
- MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
-
- __set_bit(PHY_INTERFACE_MODE_INTERNAL,
- pf->phylink_config.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_SGMII,
- pf->phylink_config.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_1000BASEX,
- pf->phylink_config.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_2500BASEX,
- pf->phylink_config.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_USXGMII,
- pf->phylink_config.supported_interfaces);
- phy_interface_set_rgmii(pf->phylink_config.supported_interfaces);
-
- phylink = phylink_create(&pf->phylink_config, of_fwnode_handle(node),
- pf->if_mode, &enetc_mac_phylink_ops);
- if (IS_ERR(phylink)) {
- err = PTR_ERR(phylink);
- return err;
- }
-
- priv->phylink = phylink;
-
- return 0;
-}
-
-static void enetc_phylink_destroy(struct enetc_ndev_priv *priv)
-{
- phylink_destroy(priv->phylink);
-}
-
/* Initialize the entire shared memory for the flow steering entries
* of this port (PF + VFs)
*/
@@ -1191,21 +829,36 @@ static int enetc_pf_register_with_ierb(struct pci_dev *pdev)
{
struct platform_device *ierb_pdev;
struct device_node *ierb_node;
+ int ret;
ierb_node = of_find_compatible_node(NULL, NULL,
"fsl,ls1028a-enetc-ierb");
- if (!ierb_node || !of_device_is_available(ierb_node))
+ if (!ierb_node)
return -ENODEV;
+ if (!of_device_is_available(ierb_node)) {
+ of_node_put(ierb_node);
+ return -ENODEV;
+ }
+
ierb_pdev = of_find_device_by_node(ierb_node);
of_node_put(ierb_node);
if (!ierb_pdev)
return -EPROBE_DEFER;
- return enetc_ierb_register_pf(ierb_pdev, pdev);
+ ret = enetc_ierb_register_pf(ierb_pdev, pdev);
+
+ put_device(&ierb_pdev->dev);
+
+ return ret;
}
+static const struct enetc_si_ops enetc_psi_ops = {
+ .get_rss_table = enetc_get_rss_table,
+ .set_rss_table = enetc_set_rss_table,
+};
+
static struct enetc_si *enetc_psi_create(struct pci_dev *pdev)
{
struct enetc_si *si;
@@ -1224,6 +877,14 @@ static struct enetc_si *enetc_psi_create(struct pci_dev *pdev)
goto out_pci_remove;
}
+ si->revision = enetc_get_ip_revision(&si->hw);
+ si->ops = &enetc_psi_ops;
+ err = enetc_get_driver_data(si);
+ if (err) {
+ dev_err(&pdev->dev, "Could not get PF driver data\n");
+ goto out_pci_remove;
+ }
+
err = enetc_setup_cbdr(&pdev->dev, &si->hw, ENETC_CBDR_DEFAULT_SIZE,
&si->cbd_ring);
if (err)
@@ -1259,6 +920,14 @@ static void enetc_psi_destroy(struct pci_dev *pdev)
enetc_pci_remove(pdev);
}
+static const struct enetc_pf_ops enetc_pf_ops = {
+ .set_si_primary_mac = enetc_pf_set_primary_mac_addr,
+ .get_si_primary_mac = enetc_pf_get_primary_mac_addr,
+ .create_pcs = enetc_pf_create_pcs,
+ .destroy_pcs = enetc_pf_destroy_pcs,
+ .enable_psfp = enetc_psfp_enable,
+};
+
static int enetc_pf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -1285,7 +954,15 @@ static int enetc_pf_probe(struct pci_dev *pdev,
pf = enetc_si_priv(si);
pf->si = si;
+ pf->ops = &enetc_pf_ops;
+
pf->total_vfs = pci_sriov_get_totalvfs(pdev);
+ if (pf->total_vfs) {
+ pf->vf_state = kcalloc(pf->total_vfs, sizeof(struct enetc_vf_state),
+ GFP_KERNEL);
+ if (!pf->vf_state)
+ goto err_alloc_vf_state;
+ }
err = enetc_setup_mac_addresses(node, pf);
if (err)
@@ -1338,7 +1015,7 @@ static int enetc_pf_probe(struct pci_dev *pdev,
if (err)
goto err_mdiobus_create;
- err = enetc_phylink_create(priv, node);
+ err = enetc_phylink_create(priv, node, &enetc_mac_phylink_ops);
if (err)
goto err_phylink_create;
@@ -1363,6 +1040,8 @@ err_alloc_si_res:
free_netdev(ndev);
err_alloc_netdev:
err_setup_mac_addresses:
+ kfree(pf->vf_state);
+err_alloc_vf_state:
enetc_psi_destroy(pdev);
err_psi_create:
return err;
@@ -1389,6 +1068,7 @@ static void enetc_pf_remove(struct pci_dev *pdev)
enetc_free_si_resources(priv);
free_netdev(si->ndev);
+ kfree(pf->vf_state);
enetc_psi_destroy(pdev);
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
index c26bd66e4597..ae407e9e9ee7 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
@@ -5,19 +5,8 @@
#include <linux/phylink.h>
#define ENETC_PF_NUM_RINGS 8
-
-enum enetc_mac_addr_type {UC, MC, MADDR_TYPE};
#define ENETC_MAX_NUM_MAC_FLT ((ENETC_MAX_NUM_VFS + 1) * MADDR_TYPE)
-#define ENETC_MADDR_HASH_TBL_SZ 64
-struct enetc_mac_filter {
- union {
- char mac_addr[ETH_ALEN];
- DECLARE_BITMAP(mac_hash_table, ENETC_MADDR_HASH_TBL_SZ);
- };
- int mac_addr_cnt;
-};
-
#define ENETC_VLAN_HT_SIZE 64
enum enetc_vf_flags {
@@ -28,6 +17,25 @@ struct enetc_vf_state {
enum enetc_vf_flags flags;
};
+struct enetc_port_caps {
+ u32 half_duplex:1;
+ int num_vsi;
+ int num_msix;
+ int num_rx_bdr;
+ int num_tx_bdr;
+ int mac_filter_num;
+};
+
+struct enetc_pf;
+
+struct enetc_pf_ops {
+ void (*set_si_primary_mac)(struct enetc_hw *hw, int si, const u8 *addr);
+ void (*get_si_primary_mac)(struct enetc_hw *hw, int si, u8 *addr);
+ struct phylink_pcs *(*create_pcs)(struct enetc_pf *pf, struct mii_bus *bus);
+ void (*destroy_pcs)(struct phylink_pcs *pcs);
+ int (*enable_psfp)(struct enetc_ndev_priv *priv);
+};
+
struct enetc_pf {
struct enetc_si *si;
int num_vfs; /* number of active VFs, after sriov_init */
@@ -50,6 +58,11 @@ struct enetc_pf {
phy_interface_t if_mode;
struct phylink_config phylink_config;
+
+ struct enetc_port_caps caps;
+ const struct enetc_pf_ops *ops;
+
+ int num_mfe; /* number of mac address filter table entries */
};
#define phylink_to_enetc_pf(config) \
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
new file mode 100644
index 000000000000..76263b8566bb
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
@@ -0,0 +1,439 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2024 NXP */
+
+#include <linux/fsl/enetc_mdio.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+
+#include "enetc_pf_common.h"
+
+static void enetc_set_si_hw_addr(struct enetc_pf *pf, int si,
+ const u8 *mac_addr)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+
+ pf->ops->set_si_primary_mac(hw, si, mac_addr);
+}
+
+static void enetc_get_si_hw_addr(struct enetc_pf *pf, int si, u8 *mac_addr)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+
+ pf->ops->get_si_primary_mac(hw, si, mac_addr);
+}
+
+int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ struct sockaddr *saddr = addr;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ eth_hw_addr_set(ndev, saddr->sa_data);
+ enetc_set_si_hw_addr(pf, 0, saddr->sa_data);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_pf_set_mac_addr);
+
+static int enetc_setup_mac_address(struct device_node *np, struct enetc_pf *pf,
+ int si)
+{
+ struct device *dev = &pf->si->pdev->dev;
+ u8 mac_addr[ETH_ALEN] = { 0 };
+ int err;
+
+ /* (1) try to get the MAC address from the device tree */
+ if (np) {
+ err = of_get_mac_address(np, mac_addr);
+ if (err == -EPROBE_DEFER)
+ return err;
+ }
+
+ /* (2) bootloader supplied MAC address */
+ if (is_zero_ether_addr(mac_addr))
+ enetc_get_si_hw_addr(pf, si, mac_addr);
+
+ /* (3) choose a random one */
+ if (is_zero_ether_addr(mac_addr)) {
+ eth_random_addr(mac_addr);
+ dev_info(dev, "no MAC address specified for SI%d, using %pM\n",
+ si, mac_addr);
+ }
+
+ enetc_set_si_hw_addr(pf, si, mac_addr);
+
+ return 0;
+}
+
+int enetc_setup_mac_addresses(struct device_node *np, struct enetc_pf *pf)
+{
+ int err, i;
+
+ /* The PF might take its MAC from the device tree */
+ err = enetc_setup_mac_address(np, pf, 0);
+ if (err)
+ return err;
+
+ for (i = 0; i < pf->total_vfs; i++) {
+ err = enetc_setup_mac_address(NULL, pf, i + 1);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_setup_mac_addresses);
+
+void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
+ const struct net_device_ops *ndev_ops)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(si);
+
+ SET_NETDEV_DEV(ndev, &si->pdev->dev);
+ priv->ndev = ndev;
+ priv->si = si;
+ priv->dev = &si->pdev->dev;
+ si->ndev = ndev;
+
+ priv->msg_enable = (NETIF_MSG_WOL << 1) - 1;
+ priv->sysclk_freq = si->drvdata->sysclk_freq;
+ priv->max_frags = si->drvdata->max_frags;
+ ndev->netdev_ops = ndev_ops;
+ enetc_set_ethtool_ops(ndev);
+ ndev->watchdog_timeo = 5 * HZ;
+ ndev->max_mtu = ENETC_MAX_MTU;
+
+ ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
+ ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
+ ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
+
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+
+ if (si->drvdata->tx_csum)
+ priv->active_offloads |= ENETC_F_TXCSUM;
+
+ if (si->hw_features & ENETC_SI_F_LSO)
+ priv->active_offloads |= ENETC_F_LSO;
+
+ if (si->num_rss) {
+ ndev->hw_features |= NETIF_F_RXHASH;
+ ndev->features |= NETIF_F_RXHASH;
+ }
+
+ if (!enetc_is_pseudo_mac(si))
+ ndev->hw_features |= NETIF_F_LOOPBACK;
+
+ /* TODO: currently, i.MX95 ENETC driver does not support advanced features */
+ if (!is_enetc_rev1(si))
+ goto end;
+
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
+
+ if (si->hw_features & ENETC_SI_F_PSFP && pf->ops->enable_psfp &&
+ !pf->ops->enable_psfp(priv)) {
+ priv->active_offloads |= ENETC_F_QCI;
+ ndev->features |= NETIF_F_HW_TC;
+ ndev->hw_features |= NETIF_F_HW_TC;
+ }
+
+end:
+ /* pick up primary MAC address from SI */
+ enetc_load_primary_mac_addr(&si->hw, ndev);
+}
+EXPORT_SYMBOL_GPL(enetc_pf_netdev_setup);
+
+static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np)
+{
+ struct device *dev = &pf->si->pdev->dev;
+ struct enetc_mdio_priv *mdio_priv;
+ struct mii_bus *bus;
+ int err;
+
+ bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "Freescale ENETC MDIO Bus";
+ bus->read = enetc_mdio_read_c22;
+ bus->write = enetc_mdio_write_c22;
+ bus->read_c45 = enetc_mdio_read_c45;
+ bus->write_c45 = enetc_mdio_write_c45;
+ bus->parent = dev;
+ mdio_priv = bus->priv;
+ mdio_priv->hw = &pf->si->hw;
+
+ if (is_enetc_rev1(pf->si))
+ mdio_priv->mdio_base = ENETC_EMDIO_BASE;
+ else
+ mdio_priv->mdio_base = ENETC4_EMDIO_BASE;
+
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ err = of_mdiobus_register(bus, np);
+ if (err)
+ return dev_err_probe(dev, err, "cannot register MDIO bus\n");
+
+ pf->mdio = bus;
+
+ return 0;
+}
+
+static void enetc_mdio_remove(struct enetc_pf *pf)
+{
+ if (pf->mdio)
+ mdiobus_unregister(pf->mdio);
+}
+
+static int enetc_imdio_create(struct enetc_pf *pf)
+{
+ struct device *dev = &pf->si->pdev->dev;
+ struct enetc_mdio_priv *mdio_priv;
+ struct phylink_pcs *phylink_pcs;
+ struct mii_bus *bus;
+ int err;
+
+ if (!pf->ops->create_pcs) {
+ dev_err(dev, "Creating PCS is not supported\n");
+
+ return -EOPNOTSUPP;
+ }
+
+ bus = mdiobus_alloc_size(sizeof(*mdio_priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "Freescale ENETC internal MDIO Bus";
+ bus->read = enetc_mdio_read_c22;
+ bus->write = enetc_mdio_write_c22;
+ bus->read_c45 = enetc_mdio_read_c45;
+ bus->write_c45 = enetc_mdio_write_c45;
+ bus->parent = dev;
+ bus->phy_mask = ~0;
+ mdio_priv = bus->priv;
+ mdio_priv->hw = &pf->si->hw;
+
+ if (is_enetc_rev1(pf->si))
+ mdio_priv->mdio_base = ENETC_PM_IMDIO_BASE;
+ else
+ mdio_priv->mdio_base = ENETC4_PM_IMDIO_BASE;
+
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev));
+
+ err = mdiobus_register(bus);
+ if (err) {
+ dev_err(dev, "cannot register internal MDIO bus (%d)\n", err);
+ goto free_mdio_bus;
+ }
+
+ phylink_pcs = pf->ops->create_pcs(pf, bus);
+ if (IS_ERR(phylink_pcs)) {
+ err = PTR_ERR(phylink_pcs);
+ dev_err(dev, "cannot create lynx pcs (%d)\n", err);
+ goto unregister_mdiobus;
+ }
+
+ pf->imdio = bus;
+ pf->pcs = phylink_pcs;
+
+ return 0;
+
+unregister_mdiobus:
+ mdiobus_unregister(bus);
+free_mdio_bus:
+ mdiobus_free(bus);
+ return err;
+}
+
+static void enetc_imdio_remove(struct enetc_pf *pf)
+{
+ if (pf->pcs && pf->ops->destroy_pcs)
+ pf->ops->destroy_pcs(pf->pcs);
+
+ if (pf->imdio) {
+ mdiobus_unregister(pf->imdio);
+ mdiobus_free(pf->imdio);
+ }
+}
+
+static bool enetc_port_has_pcs(struct enetc_pf *pf)
+{
+ return (pf->if_mode == PHY_INTERFACE_MODE_SGMII ||
+ pf->if_mode == PHY_INTERFACE_MODE_1000BASEX ||
+ pf->if_mode == PHY_INTERFACE_MODE_2500BASEX ||
+ pf->if_mode == PHY_INTERFACE_MODE_USXGMII);
+}
+
+int enetc_mdiobus_create(struct enetc_pf *pf, struct device_node *node)
+{
+ struct device_node *mdio_np;
+ int err;
+
+ mdio_np = of_get_child_by_name(node, "mdio");
+ if (mdio_np) {
+ err = enetc_mdio_probe(pf, mdio_np);
+
+ of_node_put(mdio_np);
+ if (err)
+ return err;
+ }
+
+ if (enetc_port_has_pcs(pf)) {
+ err = enetc_imdio_create(pf);
+ if (err) {
+ enetc_mdio_remove(pf);
+ return err;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_mdiobus_create);
+
+void enetc_mdiobus_destroy(struct enetc_pf *pf)
+{
+ enetc_mdio_remove(pf);
+ enetc_imdio_remove(pf);
+}
+EXPORT_SYMBOL_GPL(enetc_mdiobus_destroy);
+
+int enetc_phylink_create(struct enetc_ndev_priv *priv, struct device_node *node,
+ const struct phylink_mac_ops *ops)
+{
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ struct phylink *phylink;
+ int err;
+
+ pf->phylink_config.dev = &priv->ndev->dev;
+ pf->phylink_config.type = PHYLINK_NETDEV;
+ pf->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
+
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_USXGMII,
+ pf->phylink_config.supported_interfaces);
+ phy_interface_set_rgmii(pf->phylink_config.supported_interfaces);
+
+ phylink = phylink_create(&pf->phylink_config, of_fwnode_handle(node),
+ pf->if_mode, ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ return err;
+ }
+
+ priv->phylink = phylink;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_phylink_create);
+
+void enetc_phylink_destroy(struct enetc_ndev_priv *priv)
+{
+ phylink_destroy(priv->phylink);
+}
+EXPORT_SYMBOL_GPL(enetc_phylink_destroy);
+
+void enetc_set_default_rss_key(struct enetc_pf *pf)
+{
+ u8 hash_key[ENETC_RSSHASH_KEY_SIZE] = {0};
+
+ /* set up hash key */
+ get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE);
+ enetc_set_rss_key(pf->si, hash_key);
+}
+EXPORT_SYMBOL_GPL(enetc_set_default_rss_key);
+
+static int enetc_vid_hash_idx(unsigned int vid)
+{
+ int res = 0;
+ int i;
+
+ for (i = 0; i < 6; i++)
+ res |= (hweight8(vid & (BIT(i) | BIT(i + 6))) & 0x1) << i;
+
+ return res;
+}
+
+static void enetc_refresh_vlan_ht_filter(struct enetc_pf *pf)
+{
+ int i;
+
+ bitmap_zero(pf->vlan_ht_filter, ENETC_VLAN_HT_SIZE);
+ for_each_set_bit(i, pf->active_vlans, VLAN_N_VID) {
+ int hidx = enetc_vid_hash_idx(i);
+
+ __set_bit(hidx, pf->vlan_ht_filter);
+ }
+}
+
+static void enetc_set_si_vlan_ht_filter(struct enetc_si *si,
+ int si_id, u64 hash)
+{
+ struct enetc_hw *hw = &si->hw;
+ int high_reg_off, low_reg_off;
+
+ if (is_enetc_rev1(si)) {
+ low_reg_off = ENETC_PSIVHFR0(si_id);
+ high_reg_off = ENETC_PSIVHFR1(si_id);
+ } else {
+ low_reg_off = ENETC4_PSIVHFR0(si_id);
+ high_reg_off = ENETC4_PSIVHFR1(si_id);
+ }
+
+ enetc_port_wr(hw, low_reg_off, lower_32_bits(hash));
+ enetc_port_wr(hw, high_reg_off, upper_32_bits(hash));
+}
+
+int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ int idx;
+
+ __set_bit(vid, pf->active_vlans);
+
+ idx = enetc_vid_hash_idx(vid);
+ if (!__test_and_set_bit(idx, pf->vlan_ht_filter))
+ enetc_set_si_vlan_ht_filter(pf->si, 0, *pf->vlan_ht_filter);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_vlan_rx_add_vid);
+
+int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+
+ if (__test_and_clear_bit(vid, pf->active_vlans)) {
+ enetc_refresh_vlan_ht_filter(pf);
+ enetc_set_si_vlan_ht_filter(pf->si, 0, *pf->vlan_ht_filter);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_vlan_rx_del_vid);
+
+MODULE_DESCRIPTION("NXP ENETC PF common functionality driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h
new file mode 100644
index 000000000000..96d4840a3107
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2024 NXP */
+
+#include "enetc_pf.h"
+
+int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr);
+int enetc_setup_mac_addresses(struct device_node *np, struct enetc_pf *pf);
+void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
+ const struct net_device_ops *ndev_ops);
+int enetc_mdiobus_create(struct enetc_pf *pf, struct device_node *node);
+void enetc_mdiobus_destroy(struct enetc_pf *pf);
+int enetc_phylink_create(struct enetc_ndev_priv *priv, struct device_node *node,
+ const struct phylink_mac_ops *ops);
+void enetc_phylink_destroy(struct enetc_ndev_priv *priv);
+void enetc_set_default_rss_key(struct enetc_pf *pf);
+int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid);
+int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid);
+
+static inline u16 enetc_get_ip_revision(struct enetc_hw *hw)
+{
+ return enetc_global_rd(hw, ENETC_G_EIPBRR0) & EIPBRR0_REVISION;
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
index 5243fc031058..b8413d3b4f16 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
@@ -7,9 +7,6 @@
#include "enetc.h"
-int enetc_phc_index = -1;
-EXPORT_SYMBOL_GPL(enetc_phc_index);
-
static struct ptp_clock_info enetc_ptp_caps = {
.owner = THIS_MODULE,
.name = "ENETC PTP clock",
@@ -92,7 +89,6 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
if (err)
goto err_no_clock;
- enetc_phc_index = ptp_qoriq->phc_index;
pci_set_drvdata(pdev, ptp_qoriq);
return 0;
@@ -118,7 +114,6 @@ static void enetc_ptp_remove(struct pci_dev *pdev)
{
struct ptp_qoriq *ptp_qoriq = pci_get_drvdata(pdev);
- enetc_phc_index = -1;
ptp_qoriq_free(ptp_qoriq);
pci_free_irq_vectors(pdev);
kfree(ptp_qoriq);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index b65da49dd926..ccf86651455c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -336,7 +336,7 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
*
* (enetClockFrequency / portTransmitRate) * 100
*/
- hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit,
+ hi_credit_reg = (u32)div_u64((priv->sysclk_freq * 100ULL) * hi_credit_bit,
port_transmit_rate * 1000000ULL);
enetc_port_wr(hw, ENETC_PTCCBSR1(tc), hi_credit_reg);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index dfcaac302e24..6c4b374bcb0e 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -78,11 +78,18 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct sockaddr *saddr = addr;
+ int err;
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
- return enetc_msg_vsi_set_primary_mac_addr(priv, saddr);
+ err = enetc_msg_vsi_set_primary_mac_addr(priv, saddr);
+ if (err)
+ return err;
+
+ eth_hw_addr_set(ndev, saddr->sa_data);
+
+ return 0;
}
static int enetc_vf_set_features(struct net_device *ndev,
@@ -114,6 +121,8 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_set_features = enetc_vf_set_features,
.ndo_eth_ioctl = enetc_ioctl,
.ndo_setup_tc = enetc_vf_setup_tc,
+ .ndo_hwtstamp_get = enetc_hwtstamp_get,
+ .ndo_hwtstamp_set = enetc_hwtstamp_set,
};
static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
@@ -128,6 +137,8 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
si->ndev = ndev;
priv->msg_enable = (NETIF_MSG_IFUP << 1) - 1;
+ priv->sysclk_freq = si->drvdata->sysclk_freq;
+ priv->max_frags = si->drvdata->max_frags;
ndev->netdev_ops = ndev_ops;
enetc_set_ethtool_ops(ndev);
ndev->watchdog_timeo = 5 * HZ;
@@ -136,21 +147,30 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
- if (si->num_rss)
+ if (si->num_rss) {
ndev->hw_features |= NETIF_F_RXHASH;
+ ndev->features |= NETIF_F_RXHASH;
+ }
/* pick up primary MAC address from SI */
enetc_load_primary_mac_addr(&si->hw, ndev);
}
+static const struct enetc_si_ops enetc_vsi_ops = {
+ .get_rss_table = enetc_get_rss_table,
+ .set_rss_table = enetc_set_rss_table,
+};
+
static int enetc_vf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -164,6 +184,14 @@ static int enetc_vf_probe(struct pci_dev *pdev,
return dev_err_probe(&pdev->dev, err, "PCI probing failed\n");
si = pci_get_drvdata(pdev);
+ si->revision = ENETC_REV_1_0;
+ si->ops = &enetc_vsi_ops;
+ err = enetc_get_driver_data(si);
+ if (err) {
+ dev_err_probe(&pdev->dev, err,
+ "Could not get VF driver data\n");
+ goto err_alloc_netdev;
+ }
enetc_get_si_caps(si);
diff --git a/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c b/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c
new file mode 100644
index 000000000000..443983fdecd9
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c
@@ -0,0 +1,845 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * NXP NETC Blocks Control Driver
+ *
+ * Copyright 2024 NXP
+ *
+ * This driver is used for pre-initialization of NETC, such as PCS and MII
+ * protocols, LDID, warm reset, etc. Therefore, all NETC device drivers can
+ * only be probed after the netc-blk-crtl driver has completed initialization.
+ * In addition, when the system enters suspend mode, IERB, PRB, and NETCMIX
+ * will be powered off, except for WOL. Therefore, when the system resumes,
+ * these blocks need to be reinitialized.
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/fsl/netc_global.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+
+/* NETCMIX registers */
+#define IMX95_CFG_LINK_IO_VAR 0x0
+#define IO_VAR_16FF_16G_SERDES 0x1
+#define IO_VAR(port, var) (((var) & 0xf) << ((port) << 2))
+
+#define IMX95_CFG_LINK_MII_PROT 0x4
+#define CFG_LINK_MII_PORT_0 GENMASK(3, 0)
+#define CFG_LINK_MII_PORT_1 GENMASK(7, 4)
+#define MII_PROT_MII 0x0
+#define MII_PROT_RMII 0x1
+#define MII_PROT_RGMII 0x2
+#define MII_PROT_SERIAL 0x3
+#define MII_PROT(port, prot) (((prot) & 0xf) << ((port) << 2))
+
+#define IMX95_CFG_LINK_PCS_PROT(a) (0x8 + (a) * 4)
+#define PCS_PROT_1G_SGMII BIT(0)
+#define PCS_PROT_2500M_SGMII BIT(1)
+#define PCS_PROT_XFI BIT(3)
+#define PCS_PROT_SFI BIT(4)
+#define PCS_PROT_10G_SXGMII BIT(6)
+
+#define IMX94_EXT_PIN_CONTROL 0x10
+#define MAC2_MAC3_SEL BIT(1)
+
+#define IMX94_NETC_LINK_CFG(a) (0x4c + (a) * 4)
+#define NETC_LINK_CFG_MII_PROT GENMASK(3, 0)
+#define NETC_LINK_CFG_IO_VAR GENMASK(19, 16)
+
+/* NETC privileged register block register */
+#define PRB_NETCRR 0x100
+#define NETCRR_SR BIT(0)
+#define NETCRR_LOCK BIT(1)
+
+#define PRB_NETCSR 0x104
+#define NETCSR_ERROR BIT(0)
+#define NETCSR_STATE BIT(1)
+
+/* NETC integrated endpoint register block register */
+#define IERB_EMDIOFAUXR 0x344
+#define IERB_T0FAUXR 0x444
+#define IERB_ETBCR(a) (0x300c + 0x100 * (a))
+#define IERB_LBCR(a) (0x1010 + 0x40 * (a))
+#define LBCR_MDIO_PHYAD_PRTAD(addr) (((addr) & 0x1f) << 8)
+
+#define IERB_EFAUXR(a) (0x3044 + 0x100 * (a))
+#define IERB_VFAUXR(a) (0x4004 + 0x40 * (a))
+#define FAUXR_LDID GENMASK(3, 0)
+
+/* Platform information */
+#define IMX95_ENETC0_BUS_DEVFN 0x0
+#define IMX95_ENETC1_BUS_DEVFN 0x40
+#define IMX95_ENETC2_BUS_DEVFN 0x80
+
+#define IMX94_ENETC0_BUS_DEVFN 0x100
+#define IMX94_ENETC1_BUS_DEVFN 0x140
+#define IMX94_ENETC2_BUS_DEVFN 0x180
+#define IMX94_TIMER0_BUS_DEVFN 0x1
+#define IMX94_TIMER1_BUS_DEVFN 0x101
+#define IMX94_TIMER2_BUS_DEVFN 0x181
+#define IMX94_ENETC0_LINK 3
+#define IMX94_ENETC1_LINK 4
+#define IMX94_ENETC2_LINK 5
+
+#define NETC_ENETC_ID(a) (a)
+#define NETC_TIMER_ID(a) (a)
+
+/* Flags for different platforms */
+#define NETC_HAS_NETCMIX BIT(0)
+
+struct netc_devinfo {
+ u32 flags;
+ int (*netcmix_init)(struct platform_device *pdev);
+ int (*ierb_init)(struct platform_device *pdev);
+};
+
+struct netc_blk_ctrl {
+ void __iomem *prb;
+ void __iomem *ierb;
+ void __iomem *netcmix;
+
+ const struct netc_devinfo *devinfo;
+ struct platform_device *pdev;
+ struct dentry *debugfs_root;
+};
+
+static void netc_reg_write(void __iomem *base, u32 offset, u32 val)
+{
+ netc_write(base + offset, val);
+}
+
+static u32 netc_reg_read(void __iomem *base, u32 offset)
+{
+ return netc_read(base + offset);
+}
+
+static int netc_of_pci_get_bus_devfn(struct device_node *np)
+{
+ u32 reg[5];
+ int error;
+
+ error = of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg));
+ if (error)
+ return error;
+
+ return (reg[0] >> 8) & 0xffff;
+}
+
+static int netc_get_link_mii_protocol(phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_MII:
+ return MII_PROT_MII;
+ case PHY_INTERFACE_MODE_RMII:
+ return MII_PROT_RMII;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return MII_PROT_RGMII;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_USXGMII:
+ return MII_PROT_SERIAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int imx95_netcmix_init(struct platform_device *pdev)
+{
+ struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ phy_interface_t interface;
+ int bus_devfn, mii_proto;
+ u32 val;
+ int err;
+
+ /* Default setting of MII protocol */
+ val = MII_PROT(0, MII_PROT_RGMII) | MII_PROT(1, MII_PROT_RGMII) |
+ MII_PROT(2, MII_PROT_SERIAL);
+
+ /* Update the link MII protocol through parsing phy-mode */
+ for_each_available_child_of_node_scoped(np, child) {
+ for_each_available_child_of_node_scoped(child, gchild) {
+ if (!of_device_is_compatible(gchild, "pci1131,e101"))
+ continue;
+
+ bus_devfn = netc_of_pci_get_bus_devfn(gchild);
+ if (bus_devfn < 0)
+ return -EINVAL;
+
+ if (bus_devfn == IMX95_ENETC2_BUS_DEVFN)
+ continue;
+
+ err = of_get_phy_mode(gchild, &interface);
+ if (err)
+ continue;
+
+ mii_proto = netc_get_link_mii_protocol(interface);
+ if (mii_proto < 0)
+ return -EINVAL;
+
+ switch (bus_devfn) {
+ case IMX95_ENETC0_BUS_DEVFN:
+ val = u32_replace_bits(val, mii_proto,
+ CFG_LINK_MII_PORT_0);
+ break;
+ case IMX95_ENETC1_BUS_DEVFN:
+ val = u32_replace_bits(val, mii_proto,
+ CFG_LINK_MII_PORT_1);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* Configure Link I/O variant */
+ netc_reg_write(priv->netcmix, IMX95_CFG_LINK_IO_VAR,
+ IO_VAR(2, IO_VAR_16FF_16G_SERDES));
+ /* Configure Link 2 PCS protocol */
+ netc_reg_write(priv->netcmix, IMX95_CFG_LINK_PCS_PROT(2),
+ PCS_PROT_10G_SXGMII);
+ netc_reg_write(priv->netcmix, IMX95_CFG_LINK_MII_PROT, val);
+
+ return 0;
+}
+
+static int imx94_enetc_get_link_id(struct device_node *np)
+{
+ int bus_devfn = netc_of_pci_get_bus_devfn(np);
+
+ /* Parse ENETC link number */
+ switch (bus_devfn) {
+ case IMX94_ENETC0_BUS_DEVFN:
+ return IMX94_ENETC0_LINK;
+ case IMX94_ENETC1_BUS_DEVFN:
+ return IMX94_ENETC1_LINK;
+ case IMX94_ENETC2_BUS_DEVFN:
+ return IMX94_ENETC2_LINK;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int imx94_link_config(struct netc_blk_ctrl *priv,
+ struct device_node *np, int link_id)
+{
+ phy_interface_t interface;
+ int mii_proto;
+ u32 val;
+
+ /* The node may be disabled and does not have a 'phy-mode'
+ * or 'phy-connection-type' property.
+ */
+ if (of_get_phy_mode(np, &interface))
+ return 0;
+
+ mii_proto = netc_get_link_mii_protocol(interface);
+ if (mii_proto < 0)
+ return mii_proto;
+
+ val = mii_proto & NETC_LINK_CFG_MII_PROT;
+ if (val == MII_PROT_SERIAL)
+ val = u32_replace_bits(val, IO_VAR_16FF_16G_SERDES,
+ NETC_LINK_CFG_IO_VAR);
+
+ netc_reg_write(priv->netcmix, IMX94_NETC_LINK_CFG(link_id), val);
+
+ return 0;
+}
+
+static int imx94_enetc_link_config(struct netc_blk_ctrl *priv,
+ struct device_node *np)
+{
+ int link_id = imx94_enetc_get_link_id(np);
+
+ if (link_id < 0)
+ return link_id;
+
+ return imx94_link_config(priv, np, link_id);
+}
+
+static int imx94_netcmix_init(struct platform_device *pdev)
+{
+ struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ u32 val;
+ int err;
+
+ for_each_child_of_node_scoped(np, child) {
+ for_each_child_of_node_scoped(child, gchild) {
+ if (!of_device_is_compatible(gchild, "pci1131,e101"))
+ continue;
+
+ err = imx94_enetc_link_config(priv, gchild);
+ if (err)
+ return err;
+ }
+ }
+
+ /* ENETC 0 and switch port 2 share the same parallel interface.
+ * Currently, the switch is not supported, so this interface is
+ * used by ENETC 0 by default.
+ */
+ val = netc_reg_read(priv->netcmix, IMX94_EXT_PIN_CONTROL);
+ val |= MAC2_MAC3_SEL;
+ netc_reg_write(priv->netcmix, IMX94_EXT_PIN_CONTROL, val);
+
+ return 0;
+}
+
+static bool netc_ierb_is_locked(struct netc_blk_ctrl *priv)
+{
+ return !!(netc_reg_read(priv->prb, PRB_NETCRR) & NETCRR_LOCK);
+}
+
+static int netc_lock_ierb(struct netc_blk_ctrl *priv)
+{
+ u32 val;
+
+ netc_reg_write(priv->prb, PRB_NETCRR, NETCRR_LOCK);
+
+ return read_poll_timeout(netc_reg_read, val, !(val & NETCSR_STATE),
+ 100, 2000, false, priv->prb, PRB_NETCSR);
+}
+
+static int netc_unlock_ierb_with_warm_reset(struct netc_blk_ctrl *priv)
+{
+ u32 val;
+
+ netc_reg_write(priv->prb, PRB_NETCRR, 0);
+
+ return read_poll_timeout(netc_reg_read, val, !(val & NETCRR_LOCK),
+ 1000, 100000, true, priv->prb, PRB_NETCRR);
+}
+
+static int netc_get_phy_addr(struct device_node *np)
+{
+ struct device_node *mdio_node, *phy_node;
+ u32 addr = 0;
+ int err = 0;
+
+ mdio_node = of_get_child_by_name(np, "mdio");
+ if (!mdio_node)
+ return 0;
+
+ phy_node = of_get_next_child(mdio_node, NULL);
+ if (!phy_node)
+ goto of_put_mdio_node;
+
+ err = of_property_read_u32(phy_node, "reg", &addr);
+ if (err)
+ goto of_put_phy_node;
+
+ if (addr >= PHY_MAX_ADDR)
+ err = -EINVAL;
+
+of_put_phy_node:
+ of_node_put(phy_node);
+
+of_put_mdio_node:
+ of_node_put(mdio_node);
+
+ return err ? err : addr;
+}
+
+static int netc_parse_emdio_phy_mask(struct device_node *np, u32 *phy_mask)
+{
+ u32 mask = 0;
+
+ for_each_child_of_node_scoped(np, child) {
+ u32 addr;
+ int err;
+
+ err = of_property_read_u32(child, "reg", &addr);
+ if (err)
+ return err;
+
+ if (addr >= PHY_MAX_ADDR)
+ return -EINVAL;
+
+ mask |= BIT(addr);
+ }
+
+ *phy_mask = mask;
+
+ return 0;
+}
+
+static int netc_get_emdio_phy_mask(struct device_node *np, u32 *phy_mask)
+{
+ for_each_child_of_node_scoped(np, child) {
+ for_each_child_of_node_scoped(child, gchild) {
+ if (!of_device_is_compatible(gchild, "pci1131,ee00"))
+ continue;
+
+ return netc_parse_emdio_phy_mask(gchild, phy_mask);
+ }
+ }
+
+ return 0;
+}
+
+static int imx95_enetc_mdio_phyaddr_config(struct platform_device *pdev)
+{
+ struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ int bus_devfn, addr, err;
+ u32 phy_mask = 0;
+
+ err = netc_get_emdio_phy_mask(np, &phy_mask);
+ if (err) {
+ dev_err(dev, "Failed to get PHY address mask\n");
+ return err;
+ }
+
+ /* Update the port EMDIO PHY address through parsing phy properties.
+ * This is needed when using the port EMDIO but it's harmless when
+ * using the central EMDIO. So apply it on all cases.
+ */
+ for_each_child_of_node_scoped(np, child) {
+ for_each_child_of_node_scoped(child, gchild) {
+ if (!of_device_is_compatible(gchild, "pci1131,e101"))
+ continue;
+
+ bus_devfn = netc_of_pci_get_bus_devfn(gchild);
+ if (bus_devfn < 0) {
+ dev_err(dev, "Failed to get BDF number\n");
+ return bus_devfn;
+ }
+
+ addr = netc_get_phy_addr(gchild);
+ if (addr < 0) {
+ dev_err(dev, "Failed to get PHY address\n");
+ return addr;
+ }
+
+ if (phy_mask & BIT(addr)) {
+ dev_err(dev,
+ "Find same PHY address in EMDIO and ENETC node\n");
+ return -EINVAL;
+ }
+
+ /* The default value of LaBCR[MDIO_PHYAD_PRTAD ] is
+ * 0, so no need to set the register.
+ */
+ if (!addr)
+ continue;
+
+ switch (bus_devfn) {
+ case IMX95_ENETC0_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(0),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ case IMX95_ENETC1_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(1),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ case IMX95_ENETC2_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(2),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int imx95_ierb_init(struct platform_device *pdev)
+{
+ struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
+
+ /* EMDIO : No MSI-X intterupt */
+ netc_reg_write(priv->ierb, IERB_EMDIOFAUXR, 0);
+ /* ENETC0 PF */
+ netc_reg_write(priv->ierb, IERB_EFAUXR(0), 0);
+ /* ENETC0 VF0 */
+ netc_reg_write(priv->ierb, IERB_VFAUXR(0), 1);
+ /* ENETC0 VF1 */
+ netc_reg_write(priv->ierb, IERB_VFAUXR(1), 2);
+ /* ENETC1 PF */
+ netc_reg_write(priv->ierb, IERB_EFAUXR(1), 3);
+ /* ENETC1 VF0 */
+ netc_reg_write(priv->ierb, IERB_VFAUXR(2), 5);
+ /* ENETC1 VF1 */
+ netc_reg_write(priv->ierb, IERB_VFAUXR(3), 6);
+ /* ENETC2 PF */
+ netc_reg_write(priv->ierb, IERB_EFAUXR(2), 4);
+ /* ENETC2 VF0 */
+ netc_reg_write(priv->ierb, IERB_VFAUXR(4), 5);
+ /* ENETC2 VF1 */
+ netc_reg_write(priv->ierb, IERB_VFAUXR(5), 6);
+ /* NETC TIMER */
+ netc_reg_write(priv->ierb, IERB_T0FAUXR, 7);
+
+ return imx95_enetc_mdio_phyaddr_config(pdev);
+}
+
+static int imx94_get_enetc_id(struct device_node *np)
+{
+ int bus_devfn = netc_of_pci_get_bus_devfn(np);
+
+ /* Parse ENETC offset */
+ switch (bus_devfn) {
+ case IMX94_ENETC0_BUS_DEVFN:
+ return NETC_ENETC_ID(0);
+ case IMX94_ENETC1_BUS_DEVFN:
+ return NETC_ENETC_ID(1);
+ case IMX94_ENETC2_BUS_DEVFN:
+ return NETC_ENETC_ID(2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int imx94_get_timer_id(struct device_node *np)
+{
+ int bus_devfn = netc_of_pci_get_bus_devfn(np);
+
+ /* Parse NETC PTP timer ID, the timer0 is on bus 0,
+ * the timer 1 and timer2 is on bus 1.
+ */
+ switch (bus_devfn) {
+ case IMX94_TIMER0_BUS_DEVFN:
+ return NETC_TIMER_ID(0);
+ case IMX94_TIMER1_BUS_DEVFN:
+ return NETC_TIMER_ID(1);
+ case IMX94_TIMER2_BUS_DEVFN:
+ return NETC_TIMER_ID(2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int imx94_enetc_update_tid(struct netc_blk_ctrl *priv,
+ struct device_node *np)
+{
+ struct device *dev = &priv->pdev->dev;
+ struct device_node *timer_np;
+ int eid, tid;
+
+ eid = imx94_get_enetc_id(np);
+ if (eid < 0) {
+ dev_err(dev, "Failed to get ENETC ID\n");
+ return eid;
+ }
+
+ timer_np = of_parse_phandle(np, "ptp-timer", 0);
+ if (!timer_np) {
+ /* If 'ptp-timer' is not present, the timer1 is the default
+ * timer of all standalone ENETCs, which is on the same PCIe
+ * bus as these ENETCs.
+ */
+ tid = NETC_TIMER_ID(1);
+ goto end;
+ }
+
+ tid = imx94_get_timer_id(timer_np);
+ of_node_put(timer_np);
+ if (tid < 0) {
+ dev_err(dev, "Failed to get NETC Timer ID\n");
+ return tid;
+ }
+
+end:
+ netc_reg_write(priv->ierb, IERB_ETBCR(eid), tid);
+
+ return 0;
+}
+
+static int imx94_enetc_mdio_phyaddr_config(struct netc_blk_ctrl *priv,
+ struct device_node *np,
+ u32 phy_mask)
+{
+ struct device *dev = &priv->pdev->dev;
+ int bus_devfn, addr;
+
+ bus_devfn = netc_of_pci_get_bus_devfn(np);
+ if (bus_devfn < 0) {
+ dev_err(dev, "Failed to get BDF number\n");
+ return bus_devfn;
+ }
+
+ addr = netc_get_phy_addr(np);
+ if (addr <= 0) {
+ dev_err(dev, "Failed to get PHY address\n");
+ return addr;
+ }
+
+ if (phy_mask & BIT(addr)) {
+ dev_err(dev,
+ "Find same PHY address in EMDIO and ENETC node\n");
+ return -EINVAL;
+ }
+
+ switch (bus_devfn) {
+ case IMX94_ENETC0_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(IMX94_ENETC0_LINK),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ case IMX94_ENETC1_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(IMX94_ENETC1_LINK),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ case IMX94_ENETC2_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(IMX94_ENETC2_LINK),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int imx94_ierb_init(struct platform_device *pdev)
+{
+ struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ u32 phy_mask = 0;
+ int err;
+
+ err = netc_get_emdio_phy_mask(np, &phy_mask);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to get PHY address mask\n");
+ return err;
+ }
+
+ for_each_child_of_node_scoped(np, child) {
+ for_each_child_of_node_scoped(child, gchild) {
+ if (!of_device_is_compatible(gchild, "pci1131,e101"))
+ continue;
+
+ err = imx94_enetc_update_tid(priv, gchild);
+ if (err)
+ return err;
+
+ err = imx94_enetc_mdio_phyaddr_config(priv, gchild,
+ phy_mask);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int netc_ierb_init(struct platform_device *pdev)
+{
+ struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
+ const struct netc_devinfo *devinfo = priv->devinfo;
+ int err;
+
+ if (netc_ierb_is_locked(priv)) {
+ err = netc_unlock_ierb_with_warm_reset(priv);
+ if (err) {
+ dev_err(&pdev->dev, "Unlock IERB failed.\n");
+ return err;
+ }
+ }
+
+ if (devinfo->ierb_init) {
+ err = devinfo->ierb_init(pdev);
+ if (err)
+ return err;
+ }
+
+ err = netc_lock_ierb(priv);
+ if (err) {
+ dev_err(&pdev->dev, "Lock IERB failed.\n");
+ return err;
+ }
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+static int netc_prb_show(struct seq_file *s, void *data)
+{
+ struct netc_blk_ctrl *priv = s->private;
+ u32 val;
+
+ val = netc_reg_read(priv->prb, PRB_NETCRR);
+ seq_printf(s, "[PRB NETCRR] Lock:%d SR:%d\n",
+ (val & NETCRR_LOCK) ? 1 : 0,
+ (val & NETCRR_SR) ? 1 : 0);
+
+ val = netc_reg_read(priv->prb, PRB_NETCSR);
+ seq_printf(s, "[PRB NETCSR] State:%d Error:%d\n",
+ (val & NETCSR_STATE) ? 1 : 0,
+ (val & NETCSR_ERROR) ? 1 : 0);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(netc_prb);
+
+static void netc_blk_ctrl_create_debugfs(struct netc_blk_ctrl *priv)
+{
+ struct dentry *root;
+
+ root = debugfs_create_dir("netc_blk_ctrl", NULL);
+ if (IS_ERR(root))
+ return;
+
+ priv->debugfs_root = root;
+
+ debugfs_create_file("prb", 0444, root, priv, &netc_prb_fops);
+}
+
+static void netc_blk_ctrl_remove_debugfs(struct netc_blk_ctrl *priv)
+{
+ debugfs_remove_recursive(priv->debugfs_root);
+ priv->debugfs_root = NULL;
+}
+
+#else
+
+static void netc_blk_ctrl_create_debugfs(struct netc_blk_ctrl *priv)
+{
+}
+
+static void netc_blk_ctrl_remove_debugfs(struct netc_blk_ctrl *priv)
+{
+}
+#endif
+
+static int netc_prb_check_error(struct netc_blk_ctrl *priv)
+{
+ if (netc_reg_read(priv->prb, PRB_NETCSR) & NETCSR_ERROR)
+ return -1;
+
+ return 0;
+}
+
+static const struct netc_devinfo imx95_devinfo = {
+ .flags = NETC_HAS_NETCMIX,
+ .netcmix_init = imx95_netcmix_init,
+ .ierb_init = imx95_ierb_init,
+};
+
+static const struct netc_devinfo imx94_devinfo = {
+ .flags = NETC_HAS_NETCMIX,
+ .netcmix_init = imx94_netcmix_init,
+ .ierb_init = imx94_ierb_init,
+};
+
+static const struct of_device_id netc_blk_ctrl_match[] = {
+ { .compatible = "nxp,imx95-netc-blk-ctrl", .data = &imx95_devinfo },
+ { .compatible = "nxp,imx94-netc-blk-ctrl", .data = &imx94_devinfo },
+ {},
+};
+MODULE_DEVICE_TABLE(of, netc_blk_ctrl_match);
+
+static int netc_blk_ctrl_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ const struct netc_devinfo *devinfo;
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *id;
+ struct netc_blk_ctrl *priv;
+ struct clk *ipg_clk;
+ void __iomem *regs;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pdev = pdev;
+ ipg_clk = devm_clk_get_optional_enabled(dev, "ipg");
+ if (IS_ERR(ipg_clk))
+ return dev_err_probe(dev, PTR_ERR(ipg_clk),
+ "Set ipg clock failed\n");
+
+ id = of_match_device(netc_blk_ctrl_match, dev);
+ if (!id)
+ return dev_err_probe(dev, -EINVAL, "Cannot match device\n");
+
+ devinfo = (struct netc_devinfo *)id->data;
+ if (!devinfo)
+ return dev_err_probe(dev, -EINVAL, "No device information\n");
+
+ priv->devinfo = devinfo;
+ regs = devm_platform_ioremap_resource_byname(pdev, "ierb");
+ if (IS_ERR(regs))
+ return dev_err_probe(dev, PTR_ERR(regs),
+ "Missing IERB resource\n");
+
+ priv->ierb = regs;
+ regs = devm_platform_ioremap_resource_byname(pdev, "prb");
+ if (IS_ERR(regs))
+ return dev_err_probe(dev, PTR_ERR(regs),
+ "Missing PRB resource\n");
+
+ priv->prb = regs;
+ if (devinfo->flags & NETC_HAS_NETCMIX) {
+ regs = devm_platform_ioremap_resource_byname(pdev, "netcmix");
+ if (IS_ERR(regs))
+ return dev_err_probe(dev, PTR_ERR(regs),
+ "Missing NETCMIX resource\n");
+ priv->netcmix = regs;
+ }
+
+ platform_set_drvdata(pdev, priv);
+ if (devinfo->netcmix_init) {
+ err = devinfo->netcmix_init(pdev);
+ if (err)
+ return dev_err_probe(dev, err,
+ "Initializing NETCMIX failed\n");
+ }
+
+ err = netc_ierb_init(pdev);
+ if (err)
+ return dev_err_probe(dev, err, "Initializing IERB failed\n");
+
+ if (netc_prb_check_error(priv) < 0)
+ dev_warn(dev, "The current IERB configuration is invalid\n");
+
+ netc_blk_ctrl_create_debugfs(priv);
+
+ err = of_platform_populate(node, NULL, NULL, dev);
+ if (err) {
+ netc_blk_ctrl_remove_debugfs(priv);
+ return dev_err_probe(dev, err, "of_platform_populate failed\n");
+ }
+
+ return 0;
+}
+
+static void netc_blk_ctrl_remove(struct platform_device *pdev)
+{
+ struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
+
+ of_platform_depopulate(&pdev->dev);
+ netc_blk_ctrl_remove_debugfs(priv);
+}
+
+static struct platform_driver netc_blk_ctrl_driver = {
+ .driver = {
+ .name = "nxp-netc-blk-ctrl",
+ .of_match_table = netc_blk_ctrl_match,
+ },
+ .probe = netc_blk_ctrl_probe,
+ .remove = netc_blk_ctrl_remove,
+};
+
+module_platform_driver(netc_blk_ctrl_driver);
+
+MODULE_DESCRIPTION("NXP NETC Blocks Control Driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/ntmp.c b/drivers/net/ethernet/freescale/enetc/ntmp.c
new file mode 100644
index 000000000000..0c1d343253bf
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/ntmp.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * NETC NTMP (NETC Table Management Protocol) 2.0 Library
+ * Copyright 2025 NXP
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/fsl/netc_global.h>
+#include <linux/iopoll.h>
+
+#include "ntmp_private.h"
+
+#define NETC_CBDR_TIMEOUT 1000 /* us */
+#define NETC_CBDR_DELAY_US 10
+#define NETC_CBDR_MR_EN BIT(31)
+
+#define NTMP_BASE_ADDR_ALIGN 128
+#define NTMP_DATA_ADDR_ALIGN 32
+
+/* Define NTMP Table ID */
+#define NTMP_MAFT_ID 1
+#define NTMP_RSST_ID 3
+
+/* Generic Update Actions for most tables */
+#define NTMP_GEN_UA_CFGEU BIT(0)
+#define NTMP_GEN_UA_STSEU BIT(1)
+
+#define NTMP_ENTRY_ID_SIZE 4
+#define RSST_ENTRY_NUM 64
+#define RSST_STSE_DATA_SIZE(n) ((n) * 8)
+#define RSST_CFGE_DATA_SIZE(n) (n)
+
+int ntmp_init_cbdr(struct netc_cbdr *cbdr, struct device *dev,
+ const struct netc_cbdr_regs *regs)
+{
+ int cbd_num = NETC_CBDR_BD_NUM;
+ size_t size;
+
+ size = cbd_num * sizeof(union netc_cbd) + NTMP_BASE_ADDR_ALIGN;
+ cbdr->addr_base = dma_alloc_coherent(dev, size, &cbdr->dma_base,
+ GFP_KERNEL);
+ if (!cbdr->addr_base)
+ return -ENOMEM;
+
+ cbdr->dma_size = size;
+ cbdr->bd_num = cbd_num;
+ cbdr->regs = *regs;
+ cbdr->dev = dev;
+
+ /* The base address of the Control BD Ring must be 128 bytes aligned */
+ cbdr->dma_base_align = ALIGN(cbdr->dma_base, NTMP_BASE_ADDR_ALIGN);
+ cbdr->addr_base_align = PTR_ALIGN(cbdr->addr_base,
+ NTMP_BASE_ADDR_ALIGN);
+
+ spin_lock_init(&cbdr->ring_lock);
+
+ cbdr->next_to_use = netc_read(cbdr->regs.pir);
+ cbdr->next_to_clean = netc_read(cbdr->regs.cir);
+
+ /* Step 1: Configure the base address of the Control BD Ring */
+ netc_write(cbdr->regs.bar0, lower_32_bits(cbdr->dma_base_align));
+ netc_write(cbdr->regs.bar1, upper_32_bits(cbdr->dma_base_align));
+
+ /* Step 2: Configure the number of BDs of the Control BD Ring */
+ netc_write(cbdr->regs.lenr, cbdr->bd_num);
+
+ /* Step 3: Enable the Control BD Ring */
+ netc_write(cbdr->regs.mr, NETC_CBDR_MR_EN);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ntmp_init_cbdr);
+
+void ntmp_free_cbdr(struct netc_cbdr *cbdr)
+{
+ /* Disable the Control BD Ring */
+ netc_write(cbdr->regs.mr, 0);
+ dma_free_coherent(cbdr->dev, cbdr->dma_size, cbdr->addr_base,
+ cbdr->dma_base);
+ memset(cbdr, 0, sizeof(*cbdr));
+}
+EXPORT_SYMBOL_GPL(ntmp_free_cbdr);
+
+static int ntmp_get_free_cbd_num(struct netc_cbdr *cbdr)
+{
+ return (cbdr->next_to_clean - cbdr->next_to_use - 1 +
+ cbdr->bd_num) % cbdr->bd_num;
+}
+
+static union netc_cbd *ntmp_get_cbd(struct netc_cbdr *cbdr, int index)
+{
+ return &((union netc_cbd *)(cbdr->addr_base_align))[index];
+}
+
+static void ntmp_clean_cbdr(struct netc_cbdr *cbdr)
+{
+ union netc_cbd *cbd;
+ int i;
+
+ i = cbdr->next_to_clean;
+ while (netc_read(cbdr->regs.cir) != i) {
+ cbd = ntmp_get_cbd(cbdr, i);
+ memset(cbd, 0, sizeof(*cbd));
+ i = (i + 1) % cbdr->bd_num;
+ }
+
+ cbdr->next_to_clean = i;
+}
+
+static int netc_xmit_ntmp_cmd(struct ntmp_user *user, union netc_cbd *cbd)
+{
+ union netc_cbd *cur_cbd;
+ struct netc_cbdr *cbdr;
+ int i, err;
+ u16 status;
+ u32 val;
+
+ /* Currently only i.MX95 ENETC is supported, and it only has one
+ * command BD ring
+ */
+ cbdr = &user->ring[0];
+
+ spin_lock_bh(&cbdr->ring_lock);
+
+ if (unlikely(!ntmp_get_free_cbd_num(cbdr)))
+ ntmp_clean_cbdr(cbdr);
+
+ i = cbdr->next_to_use;
+ cur_cbd = ntmp_get_cbd(cbdr, i);
+ *cur_cbd = *cbd;
+ dma_wmb();
+
+ /* Update producer index of both software and hardware */
+ i = (i + 1) % cbdr->bd_num;
+ cbdr->next_to_use = i;
+ netc_write(cbdr->regs.pir, i);
+
+ err = read_poll_timeout_atomic(netc_read, val, val == i,
+ NETC_CBDR_DELAY_US, NETC_CBDR_TIMEOUT,
+ true, cbdr->regs.cir);
+ if (unlikely(err))
+ goto cbdr_unlock;
+
+ dma_rmb();
+ /* Get the writeback command BD, because the caller may need
+ * to check some other fields of the response header.
+ */
+ *cbd = *cur_cbd;
+
+ /* Check the writeback error status */
+ status = le16_to_cpu(cbd->resp_hdr.error_rr) & NTMP_RESP_ERROR;
+ if (unlikely(status)) {
+ err = -EIO;
+ dev_err(user->dev, "Command BD error: 0x%04x\n", status);
+ }
+
+ ntmp_clean_cbdr(cbdr);
+ dma_wmb();
+
+cbdr_unlock:
+ spin_unlock_bh(&cbdr->ring_lock);
+
+ return err;
+}
+
+static int ntmp_alloc_data_mem(struct ntmp_dma_buf *data, void **buf_align)
+{
+ void *buf;
+
+ buf = dma_alloc_coherent(data->dev, data->size + NTMP_DATA_ADDR_ALIGN,
+ &data->dma, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ data->buf = buf;
+ *buf_align = PTR_ALIGN(buf, NTMP_DATA_ADDR_ALIGN);
+
+ return 0;
+}
+
+static void ntmp_free_data_mem(struct ntmp_dma_buf *data)
+{
+ dma_free_coherent(data->dev, data->size + NTMP_DATA_ADDR_ALIGN,
+ data->buf, data->dma);
+}
+
+static void ntmp_fill_request_hdr(union netc_cbd *cbd, dma_addr_t dma,
+ int len, int table_id, int cmd,
+ int access_method)
+{
+ dma_addr_t dma_align;
+
+ memset(cbd, 0, sizeof(*cbd));
+ dma_align = ALIGN(dma, NTMP_DATA_ADDR_ALIGN);
+ cbd->req_hdr.addr = cpu_to_le64(dma_align);
+ cbd->req_hdr.len = cpu_to_le32(len);
+ cbd->req_hdr.cmd = cmd;
+ cbd->req_hdr.access_method = FIELD_PREP(NTMP_ACCESS_METHOD,
+ access_method);
+ cbd->req_hdr.table_id = table_id;
+ cbd->req_hdr.ver_cci_rr = FIELD_PREP(NTMP_HDR_VERSION,
+ NTMP_HDR_VER2);
+ /* For NTMP version 2.0 or later version */
+ cbd->req_hdr.npf = cpu_to_le32(NTMP_NPF);
+}
+
+static void ntmp_fill_crd(struct ntmp_cmn_req_data *crd, u8 tblv,
+ u8 qa, u16 ua)
+{
+ crd->update_act = cpu_to_le16(ua);
+ crd->tblv_qact = NTMP_TBLV_QACT(tblv, qa);
+}
+
+static void ntmp_fill_crd_eid(struct ntmp_req_by_eid *rbe, u8 tblv,
+ u8 qa, u16 ua, u32 entry_id)
+{
+ ntmp_fill_crd(&rbe->crd, tblv, qa, ua);
+ rbe->entry_id = cpu_to_le32(entry_id);
+}
+
+static const char *ntmp_table_name(int tbl_id)
+{
+ switch (tbl_id) {
+ case NTMP_MAFT_ID:
+ return "MAC Address Filter Table";
+ case NTMP_RSST_ID:
+ return "RSS Table";
+ default:
+ return "Unknown Table";
+ };
+}
+
+static int ntmp_delete_entry_by_id(struct ntmp_user *user, int tbl_id,
+ u8 tbl_ver, u32 entry_id, u32 req_len,
+ u32 resp_len)
+{
+ struct ntmp_dma_buf data = {
+ .dev = user->dev,
+ .size = max(req_len, resp_len),
+ };
+ struct ntmp_req_by_eid *req;
+ union netc_cbd cbd;
+ int err;
+
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ ntmp_fill_crd_eid(req, tbl_ver, 0, 0, entry_id);
+ ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(req_len, resp_len),
+ tbl_id, NTMP_CMD_DELETE, NTMP_AM_ENTRY_ID);
+
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err)
+ dev_err(user->dev,
+ "Failed to delete entry 0x%x of %s, err: %pe",
+ entry_id, ntmp_table_name(tbl_id), ERR_PTR(err));
+
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+
+static int ntmp_query_entry_by_id(struct ntmp_user *user, int tbl_id,
+ u32 len, struct ntmp_req_by_eid *req,
+ dma_addr_t dma, bool compare_eid)
+{
+ struct ntmp_cmn_resp_query *resp;
+ int cmd = NTMP_CMD_QUERY;
+ union netc_cbd cbd;
+ u32 entry_id;
+ int err;
+
+ entry_id = le32_to_cpu(req->entry_id);
+ if (le16_to_cpu(req->crd.update_act))
+ cmd = NTMP_CMD_QU;
+
+ /* Request header */
+ ntmp_fill_request_hdr(&cbd, dma, len, tbl_id, cmd, NTMP_AM_ENTRY_ID);
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err) {
+ dev_err(user->dev,
+ "Failed to query entry 0x%x of %s, err: %pe\n",
+ entry_id, ntmp_table_name(tbl_id), ERR_PTR(err));
+ return err;
+ }
+
+ /* For a few tables, the first field of their response data is not
+ * entry_id, so directly return success.
+ */
+ if (!compare_eid)
+ return 0;
+
+ resp = (struct ntmp_cmn_resp_query *)req;
+ if (unlikely(le32_to_cpu(resp->entry_id) != entry_id)) {
+ dev_err(user->dev,
+ "%s: query EID 0x%x doesn't match response EID 0x%x\n",
+ ntmp_table_name(tbl_id), entry_id, le32_to_cpu(resp->entry_id));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int ntmp_maft_add_entry(struct ntmp_user *user, u32 entry_id,
+ struct maft_entry_data *maft)
+{
+ struct ntmp_dma_buf data = {
+ .dev = user->dev,
+ .size = sizeof(struct maft_req_add),
+ };
+ struct maft_req_add *req;
+ union netc_cbd cbd;
+ int err;
+
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ /* Set mac address filter table request data buffer */
+ ntmp_fill_crd_eid(&req->rbe, user->tbl.maft_ver, 0, 0, entry_id);
+ req->keye = maft->keye;
+ req->cfge = maft->cfge;
+
+ ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(data.size, 0),
+ NTMP_MAFT_ID, NTMP_CMD_ADD, NTMP_AM_ENTRY_ID);
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err)
+ dev_err(user->dev, "Failed to add MAFT entry 0x%x, err: %pe\n",
+ entry_id, ERR_PTR(err));
+
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ntmp_maft_add_entry);
+
+int ntmp_maft_query_entry(struct ntmp_user *user, u32 entry_id,
+ struct maft_entry_data *maft)
+{
+ struct ntmp_dma_buf data = {
+ .dev = user->dev,
+ .size = sizeof(struct maft_resp_query),
+ };
+ struct maft_resp_query *resp;
+ struct ntmp_req_by_eid *req;
+ int err;
+
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ ntmp_fill_crd_eid(req, user->tbl.maft_ver, 0, 0, entry_id);
+ err = ntmp_query_entry_by_id(user, NTMP_MAFT_ID,
+ NTMP_LEN(sizeof(*req), data.size),
+ req, data.dma, true);
+ if (err)
+ goto end;
+
+ resp = (struct maft_resp_query *)req;
+ maft->keye = resp->keye;
+ maft->cfge = resp->cfge;
+
+end:
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ntmp_maft_query_entry);
+
+int ntmp_maft_delete_entry(struct ntmp_user *user, u32 entry_id)
+{
+ return ntmp_delete_entry_by_id(user, NTMP_MAFT_ID, user->tbl.maft_ver,
+ entry_id, NTMP_EID_REQ_LEN, 0);
+}
+EXPORT_SYMBOL_GPL(ntmp_maft_delete_entry);
+
+int ntmp_rsst_update_entry(struct ntmp_user *user, const u32 *table,
+ int count)
+{
+ struct ntmp_dma_buf data = {.dev = user->dev};
+ struct rsst_req_update *req;
+ union netc_cbd cbd;
+ int err, i;
+
+ if (count != RSST_ENTRY_NUM)
+ /* HW only takes in a full 64 entry table */
+ return -EINVAL;
+
+ data.size = struct_size(req, groups, count);
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ /* Set the request data buffer */
+ ntmp_fill_crd_eid(&req->rbe, user->tbl.rsst_ver, 0,
+ NTMP_GEN_UA_CFGEU | NTMP_GEN_UA_STSEU, 0);
+ for (i = 0; i < count; i++)
+ req->groups[i] = (u8)(table[i]);
+
+ ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(data.size, 0),
+ NTMP_RSST_ID, NTMP_CMD_UPDATE, NTMP_AM_ENTRY_ID);
+
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err)
+ dev_err(user->dev, "Failed to update RSST entry, err: %pe\n",
+ ERR_PTR(err));
+
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ntmp_rsst_update_entry);
+
+int ntmp_rsst_query_entry(struct ntmp_user *user, u32 *table, int count)
+{
+ struct ntmp_dma_buf data = {.dev = user->dev};
+ struct ntmp_req_by_eid *req;
+ union netc_cbd cbd;
+ int err, i;
+ u8 *group;
+
+ if (count != RSST_ENTRY_NUM)
+ /* HW only takes in a full 64 entry table */
+ return -EINVAL;
+
+ data.size = NTMP_ENTRY_ID_SIZE + RSST_STSE_DATA_SIZE(count) +
+ RSST_CFGE_DATA_SIZE(count);
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ /* Set the request data buffer */
+ ntmp_fill_crd_eid(req, user->tbl.rsst_ver, 0, 0, 0);
+ ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(sizeof(*req), data.size),
+ NTMP_RSST_ID, NTMP_CMD_QUERY, NTMP_AM_ENTRY_ID);
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err) {
+ dev_err(user->dev, "Failed to query RSST entry, err: %pe\n",
+ ERR_PTR(err));
+ goto end;
+ }
+
+ group = (u8 *)req;
+ group += NTMP_ENTRY_ID_SIZE + RSST_STSE_DATA_SIZE(count);
+ for (i = 0; i < count; i++)
+ table[i] = group[i];
+
+end:
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ntmp_rsst_query_entry);
+
+MODULE_DESCRIPTION("NXP NETC Library");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/ntmp_private.h b/drivers/net/ethernet/freescale/enetc/ntmp_private.h
new file mode 100644
index 000000000000..34394e40fddd
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/ntmp_private.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * NTMP table request and response data buffer formats
+ * Copyright 2025 NXP
+ */
+
+#ifndef __NTMP_PRIVATE_H
+#define __NTMP_PRIVATE_H
+
+#include <linux/bitfield.h>
+#include <linux/fsl/ntmp.h>
+
+#define NTMP_EID_REQ_LEN 8
+#define NETC_CBDR_BD_NUM 256
+
+union netc_cbd {
+ struct {
+ __le64 addr;
+ __le32 len;
+#define NTMP_RESP_LEN GENMASK(19, 0)
+#define NTMP_REQ_LEN GENMASK(31, 20)
+#define NTMP_LEN(req, resp) (FIELD_PREP(NTMP_REQ_LEN, (req)) | \
+ ((resp) & NTMP_RESP_LEN))
+ u8 cmd;
+#define NTMP_CMD_DELETE BIT(0)
+#define NTMP_CMD_UPDATE BIT(1)
+#define NTMP_CMD_QUERY BIT(2)
+#define NTMP_CMD_ADD BIT(3)
+#define NTMP_CMD_QU (NTMP_CMD_QUERY | NTMP_CMD_UPDATE)
+ u8 access_method;
+#define NTMP_ACCESS_METHOD GENMASK(7, 4)
+#define NTMP_AM_ENTRY_ID 0
+#define NTMP_AM_EXACT_KEY 1
+#define NTMP_AM_SEARCH 2
+#define NTMP_AM_TERNARY_KEY 3
+ u8 table_id;
+ u8 ver_cci_rr;
+#define NTMP_HDR_VERSION GENMASK(5, 0)
+#define NTMP_HDR_VER2 2
+#define NTMP_CCI BIT(6)
+#define NTMP_RR BIT(7)
+ __le32 resv[3];
+ __le32 npf;
+#define NTMP_NPF BIT(15)
+ } req_hdr; /* NTMP Request Message Header Format */
+
+ struct {
+ __le32 resv0[3];
+ __le16 num_matched;
+ __le16 error_rr;
+#define NTMP_RESP_ERROR GENMASK(11, 0)
+#define NTMP_RESP_RR BIT(15)
+ __le32 resv1[4];
+ } resp_hdr; /* NTMP Response Message Header Format */
+};
+
+struct ntmp_dma_buf {
+ struct device *dev;
+ size_t size;
+ void *buf;
+ dma_addr_t dma;
+};
+
+struct ntmp_cmn_req_data {
+ __le16 update_act;
+ u8 dbg_opt;
+ u8 tblv_qact;
+#define NTMP_QUERY_ACT GENMASK(3, 0)
+#define NTMP_TBL_VER GENMASK(7, 4)
+#define NTMP_TBLV_QACT(v, a) (FIELD_PREP(NTMP_TBL_VER, (v)) | \
+ ((a) & NTMP_QUERY_ACT))
+};
+
+struct ntmp_cmn_resp_query {
+ __le32 entry_id;
+};
+
+/* Generic structure for request data by entry ID */
+struct ntmp_req_by_eid {
+ struct ntmp_cmn_req_data crd;
+ __le32 entry_id;
+};
+
+/* MAC Address Filter Table Request Data Buffer Format of Add action */
+struct maft_req_add {
+ struct ntmp_req_by_eid rbe;
+ struct maft_keye_data keye;
+ struct maft_cfge_data cfge;
+};
+
+/* MAC Address Filter Table Response Data Buffer Format of Query action */
+struct maft_resp_query {
+ __le32 entry_id;
+ struct maft_keye_data keye;
+ struct maft_cfge_data cfge;
+};
+
+/* RSS Table Request Data Buffer Format of Update action */
+struct rsst_req_update {
+ struct ntmp_req_by_eid rbe;
+ u8 groups[];
+};
+
+#endif
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 1cca0425d493..fd9a93d02f8e 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -14,19 +14,17 @@
#define FEC_H
/****************************************************************************/
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <linux/bpf.h>
#include <linux/clocksource.h>
+#include <linux/firmware/imx/sci.h>
#include <linux/net_tstamp.h>
#include <linux/pm_qos.h>
-#include <linux/bpf.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
-#include <dt-bindings/firmware/imx/rsrc.h>
-#include <linux/firmware/imx/sci.h>
#include <net/xdp.h>
-#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
- defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
+#if !defined(CONFIG_M5272) || defined(CONFIG_COMPILE_TEST)
/*
* Just figures, Motorola would have to change the offsets for
* registers in the same peripheral device on different models
@@ -115,7 +113,7 @@
#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */
#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */
#define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */
-#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */
+#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excessive collisions */
#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */
#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */
#define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */
@@ -242,23 +240,6 @@ struct bufdesc_ex {
__fec16 res0[4];
};
-/*
- * The following definitions courtesy of commproc.h, which where
- * Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
- */
-#define BD_SC_EMPTY ((ushort)0x8000) /* Receive is empty */
-#define BD_SC_READY ((ushort)0x8000) /* Transmit is ready */
-#define BD_SC_WRAP ((ushort)0x2000) /* Last buffer descriptor */
-#define BD_SC_INTRPT ((ushort)0x1000) /* Interrupt on change */
-#define BD_SC_CM ((ushort)0x0200) /* Continuous mode */
-#define BD_SC_ID ((ushort)0x0100) /* Rec'd too many idles */
-#define BD_SC_P ((ushort)0x0100) /* xmt preamble */
-#define BD_SC_BR ((ushort)0x0020) /* Break received */
-#define BD_SC_FR ((ushort)0x0010) /* Framing error */
-#define BD_SC_PR ((ushort)0x0008) /* Parity error */
-#define BD_SC_OV ((ushort)0x0002) /* Overrun */
-#define BD_SC_CD ((ushort)0x0001) /* ?? */
-
/* Buffer descriptor control/status used by Ethernet receive.
*/
#define BD_ENET_RX_EMPTY ((ushort)0x8000)
@@ -342,16 +323,17 @@ struct bufdesc_ex {
#define FEC_TX_BD_FTYPE(X) (((X) & 0xf) << 20)
/* The number of Tx and Rx buffers. These are allocated from the page
- * pool. The code may assume these are power of two, so it it best
+ * pool. The code may assume these are power of two, so it is best
* to keep them that size.
* We don't need to allocate pages for the transmitter. We just use
* the skbuffer directly.
*/
+#define FEC_DRV_RESERVE_SPACE (XDP_PACKET_HEADROOM + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define FEC_ENET_XDP_HEADROOM (XDP_PACKET_HEADROOM)
#define FEC_ENET_RX_PAGES 256
-#define FEC_ENET_RX_FRSIZE (PAGE_SIZE - FEC_ENET_XDP_HEADROOM \
- - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define FEC_ENET_RX_FRSIZE (PAGE_SIZE - FEC_DRV_RESERVE_SPACE)
#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
#define FEC_ENET_TX_FRSIZE 2048
@@ -460,7 +442,7 @@ struct bufdesc_ex {
#define FEC_QUIRK_SINGLE_MDIO (1 << 11)
/* Controller supports RACC register */
#define FEC_QUIRK_HAS_RACC (1 << 12)
-/* Controller supports interrupt coalesc */
+/* Controller supports interrupt coalesce */
#define FEC_QUIRK_HAS_COALESCE (1 << 13)
/* Interrupt doesn't wake CPU from deep idle */
#define FEC_QUIRK_ERR006687 (1 << 14)
@@ -495,7 +477,7 @@ struct bufdesc_ex {
*/
#define FEC_QUIRK_HAS_EEE (1 << 20)
-/* i.MX8QM ENET IP version add new feture to generate delayed TXC/RXC
+/* i.MX8QM ENET IP version add new feature to generate delayed TXC/RXC
* as an alternative option to make sure it works well with various PHYs.
* For the implementation of delayed clock, ENET takes synchronized 250MHz
* clocks to generate 2ns delay.
@@ -513,6 +495,9 @@ struct bufdesc_ex {
*/
#define FEC_QUIRK_HAS_MDIO_C45 BIT(24)
+/* Jumbo Frame support */
+#define FEC_QUIRK_JUMBO_FRAME BIT(25)
+
struct bufdesc_prop {
int qid;
/* Address of Rx and Tx buffers */
@@ -526,12 +511,6 @@ struct bufdesc_prop {
unsigned char dsize_log2;
};
-struct fec_enet_priv_txrx_info {
- int offset;
- struct page *page;
- struct sk_buff *skb;
-};
-
enum {
RX_XDP_REDIRECT = 0,
RX_XDP_PASS,
@@ -571,7 +550,7 @@ struct fec_enet_priv_tx_q {
struct fec_enet_priv_rx_q {
struct bufdesc_prop bd;
- struct fec_enet_priv_txrx_info rx_skb_info[RX_RING_SIZE];
+ struct page *rx_buf[RX_RING_SIZE];
/* page_pool */
struct page_pool *page_pool;
@@ -614,12 +593,14 @@ struct fec_enet_private {
unsigned int num_tx_queues;
unsigned int num_rx_queues;
- /* The saved address of a sent-in-place packet/buffer, for skfree(). */
struct fec_enet_priv_tx_q *tx_queue[FEC_ENET_MAX_TX_QS];
struct fec_enet_priv_rx_q *rx_queue[FEC_ENET_MAX_RX_QS];
unsigned int total_tx_ring_size;
unsigned int total_rx_ring_size;
+ unsigned int max_buf_size;
+ unsigned int pagepool_order;
+ unsigned int rx_frame_size;
struct platform_device *pdev;
@@ -662,7 +643,6 @@ struct fec_enet_private {
struct pm_qos_request pm_qos_req;
unsigned int tx_align;
- unsigned int rx_align;
/* hw interrupt coalesce */
unsigned int rx_pkts_itr;
@@ -671,8 +651,6 @@ struct fec_enet_private {
unsigned int tx_time_itr;
unsigned int itr_clk_rate;
- /* tx lpi eee mode */
- struct ethtool_keee eee;
unsigned int clk_ref_rate;
/* ptp clock period in ns*/
@@ -683,6 +661,7 @@ struct fec_enet_private {
unsigned int reload_period;
int pps_enable;
unsigned int next_counter;
+ bool perout_enable;
struct hrtimer perout_timer;
u64 perout_stime;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 1b55047c0237..c685a5c0cc51 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -22,56 +22,56 @@
* Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/pm_runtime.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/cacheflush.h>
+#include <linux/clk.h>
+#include <linux/crc32.h>
#include <linux/delay.h>
-#include <linux/netdevice.h>
+#include <linux/errno.h>
#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <net/ip.h>
-#include <net/page_pool/helpers.h>
-#include <net/selftests.h>
-#include <net/tso.h>
-#include <linux/tcp.h>
-#include <linux/udp.h>
+#include <linux/fec.h>
+#include <linux/filter.h>
+#include <linux/gpio/consumer.h>
#include <linux/icmp.h>
-#include <linux/spinlock.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/ip.h>
#include <linux/irq.h>
-#include <linux/clk.h>
-#include <linux/crc32.h>
-#include <linux/platform_device.h>
-#include <linux/property.h>
+#include <linux/kernel.h>
#include <linux/mdio.h>
-#include <linux/phy.h>
-#include <linux/fec.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/regulator/consumer.h>
-#include <linux/if_vlan.h>
+#include <linux/phy.h>
#include <linux/pinctrl/consumer.h>
-#include <linux/gpio/consumer.h>
+#include <linux/phy_fixed.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/prefetch.h>
-#include <linux/mfd/syscon.h>
+#include <linux/property.h>
+#include <linux/ptrace.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/workqueue.h>
+#include <net/ip.h>
+#include <net/page_pool/helpers.h>
+#include <net/selftests.h>
+#include <net/tso.h>
#include <soc/imx/cpuidle.h>
-#include <linux/filter.h>
-#include <linux/bpf.h>
-#include <linux/bpf_trace.h>
-
-#include <asm/cacheflush.h>
#include "fec.h"
@@ -131,7 +131,7 @@ static const struct fec_devinfo fec_mvf600_info = {
FEC_QUIRK_HAS_MDIO_C45,
};
-static const struct fec_devinfo fec_imx6x_info = {
+static const struct fec_devinfo fec_imx6sx_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
@@ -168,7 +168,8 @@ static const struct fec_devinfo fec_imx8qm_info = {
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
- FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45,
+ FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45 |
+ FEC_QUIRK_JUMBO_FRAME,
};
static const struct fec_devinfo fec_s32v234_info = {
@@ -196,7 +197,7 @@ static const struct of_device_id fec_dt_ids[] = {
{ .compatible = "fsl,imx28-fec", .data = &fec_imx28_info, },
{ .compatible = "fsl,imx6q-fec", .data = &fec_imx6q_info, },
{ .compatible = "fsl,mvf600-fec", .data = &fec_mvf600_info, },
- { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6x_info, },
+ { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6sx_info, },
{ .compatible = "fsl,imx6ul-fec", .data = &fec_imx6ul_info, },
{ .compatible = "fsl,imx8mq-fec", .data = &fec_imx8mq_info, },
{ .compatible = "fsl,imx8qm-fec", .data = &fec_imx8qm_info, },
@@ -234,6 +235,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
* 2048 byte skbufs are allocated. However, alignment requirements
* varies between FEC variants. Worst case is 64, so round down by 64.
*/
+#define MAX_JUMBO_BUF_SIZE (round_down(16384 - FEC_DRV_RESERVE_SPACE - 64, 64))
#define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64))
#define PKT_MINBUF_SIZE 64
@@ -251,12 +253,10 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
* size bits. Other FEC hardware does not, so we need to take that into
* account when setting it.
*/
-#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
- defined(CONFIG_ARM64)
-#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
+#ifndef CONFIG_M5272
+#define OPT_ARCH_HAS_MAX_FL 1
#else
-#define OPT_FRAME_SIZE 0
+#define OPT_ARCH_HAS_MAX_FL 0
#endif
/* FEC MII MMFR bits definition */
@@ -276,16 +276,19 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_ECR_MAGICEN BIT(2)
#define FEC_ECR_SLEEP BIT(3)
#define FEC_ECR_EN1588 BIT(4)
+#define FEC_ECR_SPEED BIT(5)
#define FEC_ECR_BYTESWP BIT(8)
/* FEC RCR bits definition */
#define FEC_RCR_LOOP BIT(0)
-#define FEC_RCR_HALFDPX BIT(1)
+#define FEC_RCR_DRT BIT(1)
#define FEC_RCR_MII BIT(2)
#define FEC_RCR_PROMISC BIT(3)
#define FEC_RCR_BC_REJ BIT(4)
#define FEC_RCR_FLOWCTL BIT(5)
+#define FEC_RCR_RGMII BIT(6)
#define FEC_RCR_RMII BIT(8)
#define FEC_RCR_10BASET BIT(9)
+#define FEC_RCR_NLC BIT(30)
/* TX WMARK bits */
#define FEC_TXWMRK_STRFWD BIT(8)
@@ -468,14 +471,14 @@ fec_enet_create_page_pool(struct fec_enet_private *fep,
{
struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
struct page_pool_params pp_params = {
- .order = 0,
+ .order = fep->pagepool_order,
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.pool_size = size,
.nid = dev_to_node(&fep->pdev->dev),
.dev = &fep->pdev->dev,
.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
.offset = FEC_ENET_XDP_HEADROOM,
- .max_len = FEC_ENET_RX_FRSIZE,
+ .max_len = fep->rx_frame_size,
};
int err;
@@ -714,7 +717,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
txq->bd.cur = bdp;
/* Trigger transmission start */
- writel(0, txq->bd.reg_desc_active);
+ if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active))
+ writel(0, txq->bd.reg_desc_active);
return 0;
}
@@ -840,6 +848,8 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
struct fec_enet_private *fep = netdev_priv(ndev);
int hdr_len, total_len, data_left;
struct bufdesc *bdp = txq->bd.cur;
+ struct bufdesc *tmp_bdp;
+ struct bufdesc_ex *ebdp;
struct tso_t tso;
unsigned int index = 0;
int ret;
@@ -913,7 +923,34 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
return 0;
err_release:
- /* TODO: Release all used data descriptors for TSO */
+ /* Release all used data descriptors for TSO */
+ tmp_bdp = txq->bd.cur;
+
+ while (tmp_bdp != bdp) {
+ /* Unmap data buffers */
+ if (tmp_bdp->cbd_bufaddr &&
+ !IS_TSO_HEADER(txq, fec32_to_cpu(tmp_bdp->cbd_bufaddr)))
+ dma_unmap_single(&fep->pdev->dev,
+ fec32_to_cpu(tmp_bdp->cbd_bufaddr),
+ fec16_to_cpu(tmp_bdp->cbd_datlen),
+ DMA_TO_DEVICE);
+
+ /* Clear standard buffer descriptor fields */
+ tmp_bdp->cbd_sc = 0;
+ tmp_bdp->cbd_datlen = 0;
+ tmp_bdp->cbd_bufaddr = 0;
+
+ /* Handle extended descriptor if enabled */
+ if (fep->bufdesc_ex) {
+ ebdp = (struct bufdesc_ex *)tmp_bdp;
+ ebdp->cbd_esc = 0;
+ }
+
+ tmp_bdp = fec_enet_get_nextdesc(tmp_bdp, &txq->bd);
+ }
+
+ dev_kfree_skb_any(skb);
+
return ret;
}
@@ -973,7 +1010,7 @@ static void fec_enet_bd_init(struct net_device *dev)
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
- bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
+ bdp->cbd_sc |= cpu_to_fec16(BD_ENET_RX_WRAP);
rxq->bd.cur = rxq->bd.base;
}
@@ -1009,7 +1046,9 @@ static void fec_enet_bd_init(struct net_device *dev)
struct page *page = txq->tx_buf[i].buf_p;
if (page)
- page_pool_put_page(page->pp, page, 0, false);
+ page_pool_put_page(pp_page_to_nmdesc(page)->pp,
+ page, 0,
+ false);
}
txq->tx_buf[i].buf_p = NULL;
@@ -1021,7 +1060,7 @@ static void fec_enet_bd_init(struct net_device *dev)
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
- bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
+ bdp->cbd_sc |= cpu_to_fec16(BD_ENET_TX_WRAP);
txq->dirty_tx = bdp;
}
}
@@ -1045,7 +1084,7 @@ static void fec_enet_enable_ring(struct net_device *ndev)
for (i = 0; i < fep->num_rx_queues; i++) {
rxq = fep->rx_queue[i];
writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
- writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
+ writel(fep->max_buf_size, fep->hwp + FEC_R_BUFF_SIZE(i));
/* enable DMA1/2 */
if (i)
@@ -1064,6 +1103,40 @@ static void fec_enet_enable_ring(struct net_device *ndev)
}
}
+/* Whack a reset. We should wait for this.
+ * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
+ * instead of reset MAC itself.
+ */
+static void fec_ctrl_reset(struct fec_enet_private *fep, bool allow_wol)
+{
+ u32 val;
+
+ if (!allow_wol || !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
+ if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
+ ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
+ writel(0, fep->hwp + FEC_ECNTRL);
+ } else {
+ writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
+ udelay(10);
+ }
+ } else {
+ val = readl(fep->hwp + FEC_ECNTRL);
+ val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
+ writel(val, fep->hwp + FEC_ECNTRL);
+ }
+}
+
+static void fec_set_hw_mac_addr(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
+ (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
+ fep->hwp + FEC_ADDR_LOW);
+ writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
+ fep->hwp + FEC_ADDR_HIGH);
+}
+
/*
* This function is called to start or restart the FEC during a link
* change, transmit timeout, or to reconfigure the FEC. The network
@@ -1073,34 +1146,22 @@ static void
fec_restart(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- u32 temp_mac[2];
- u32 rcntl = OPT_FRAME_SIZE | 0x04;
u32 ecntl = FEC_ECR_ETHEREN;
+ u32 rcntl = FEC_RCR_MII;
+
+ if (OPT_ARCH_HAS_MAX_FL)
+ rcntl |= (fep->netdev->mtu + ETH_HLEN + ETH_FCS_LEN) << 16;
if (fep->bufdesc_ex)
fec_ptp_save_state(fep);
- /* Whack a reset. We should wait for this.
- * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
- * instead of reset MAC itself.
- */
- if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
- ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
- writel(0, fep->hwp + FEC_ECNTRL);
- } else {
- writel(1, fep->hwp + FEC_ECNTRL);
- udelay(10);
- }
+ fec_ctrl_reset(fep, false);
/*
* enet-mac reset will reset mac address registers too,
* so need to reconfigure it.
*/
- memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
- writel((__force u32)cpu_to_be32(temp_mac[0]),
- fep->hwp + FEC_ADDR_LOW);
- writel((__force u32)cpu_to_be32(temp_mac[1]),
- fep->hwp + FEC_ADDR_HIGH);
+ fec_set_hw_mac_addr(ndev);
/* Clear any outstanding interrupt, except MDIO. */
writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT);
@@ -1115,7 +1176,7 @@ fec_restart(struct net_device *ndev)
writel(0x04, fep->hwp + FEC_X_CNTRL);
} else {
/* No Rcv on Xmit */
- rcntl |= 0x02;
+ rcntl |= FEC_RCR_DRT;
writel(0x0, fep->hwp + FEC_X_CNTRL);
}
@@ -1134,7 +1195,7 @@ fec_restart(struct net_device *ndev)
else
val &= ~FEC_RACC_OPTIONS;
writel(val, fep->hwp + FEC_RACC);
- writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
+ writel(min(fep->rx_frame_size, fep->max_buf_size), fep->hwp + FEC_FTRL);
}
#endif
@@ -1144,14 +1205,11 @@ fec_restart(struct net_device *ndev)
*/
if (fep->quirks & FEC_QUIRK_ENET_MAC) {
/* Enable flow control and length check */
- rcntl |= 0x40000000 | 0x00000020;
+ rcntl |= FEC_RCR_NLC | FEC_RCR_FLOWCTL;
/* RGMII, RMII or MII */
- if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
- fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
- fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
- fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
- rcntl |= (1 << 6);
+ if (phy_interface_mode_is_rgmii(fep->phy_interface))
+ rcntl |= FEC_RCR_RGMII;
else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
rcntl |= FEC_RCR_RMII;
else
@@ -1160,7 +1218,7 @@ fec_restart(struct net_device *ndev)
/* 1G, 100M or 10M */
if (ndev->phydev) {
if (ndev->phydev->speed == SPEED_1000)
- ecntl |= (1 << 5);
+ ecntl |= FEC_ECR_SPEED;
else if (ndev->phydev->speed == SPEED_100)
rcntl &= ~FEC_RCR_10BASET;
else
@@ -1224,8 +1282,18 @@ fec_restart(struct net_device *ndev)
if (fep->quirks & FEC_QUIRK_ENET_MAC) {
/* enable ENET endian swap */
ecntl |= FEC_ECR_BYTESWP;
- /* enable ENET store and forward mode */
- writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK);
+
+ /* When Jumbo Frame is enabled, the FIFO may not be large enough
+ * to hold an entire frame. In such cases, if the MTU exceeds
+ * (PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN), configure the interface
+ * to operate in cut-through mode, triggered by the FIFO threshold.
+ * Otherwise, enable the ENET store-and-forward mode.
+ */
+ if ((fep->quirks & FEC_QUIRK_JUMBO_FRAME) &&
+ (ndev->mtu > (PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN)))
+ writel(0xF, fep->hwp + FEC_X_WMRK);
+ else
+ writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK);
}
if (fep->bufdesc_ex)
@@ -1344,22 +1412,7 @@ fec_stop(struct net_device *ndev)
if (fep->bufdesc_ex)
fec_ptp_save_state(fep);
- /* Whack a reset. We should wait for this.
- * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
- * instead of reset MAC itself.
- */
- if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
- if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
- writel(0, fep->hwp + FEC_ECNTRL);
- } else {
- writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
- udelay(10);
- }
- } else {
- val = readl(fep->hwp + FEC_ECNTRL);
- val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
- writel(val, fep->hwp + FEC_ECNTRL);
- }
+ fec_ctrl_reset(fep, true);
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
@@ -1549,7 +1602,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
xdp_return_frame_rx_napi(xdpf);
} else { /* recycle pages of XDP_TX frames */
/* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */
- page_pool_put_page(page->pp, page, 0, true);
+ page_pool_put_page(pp_page_to_nmdesc(page)->pp, page,
+ 0, true);
}
txq->tx_buf[index].buf_p = NULL;
@@ -1591,19 +1645,21 @@ static void fec_enet_tx(struct net_device *ndev, int budget)
fec_enet_tx_queue(ndev, i, budget);
}
-static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
+static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
struct bufdesc *bdp, int index)
{
struct page *new_page;
dma_addr_t phys_addr;
new_page = page_pool_dev_alloc_pages(rxq->page_pool);
- WARN_ON(!new_page);
- rxq->rx_skb_info[index].page = new_page;
+ if (unlikely(!new_page))
+ return -ENOMEM;
- rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM;
+ rxq->rx_buf[index] = new_page;
phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
+
+ return 0;
}
static u32
@@ -1671,13 +1727,29 @@ xdp_err:
return ret;
}
+static void fec_enet_rx_vlan(const struct net_device *ndev, struct sk_buff *skb)
+{
+ if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ const struct vlan_ethhdr *vlan_header = skb_vlan_eth_hdr(skb);
+ const u16 vlan_tag = ntohs(vlan_header->h_vlan_TCI);
+
+ /* Push and remove the vlan tag */
+
+ memmove(skb->data + VLAN_HLEN, skb->data, ETH_ALEN * 2);
+ skb_pull(skb, VLAN_HLEN);
+ __vlan_hwaccel_put_tag(skb,
+ htons(ETH_P_8021Q),
+ vlan_tag);
+ }
+}
+
/* During a receive, the bd_rx.cur points to the current incoming buffer.
* When we update through the ring, if the next incoming buffer has
* not been given to the system, we just set the empty indicator,
* effectively tossing the packet.
*/
static int
-fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
+fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct fec_enet_priv_rx_q *rxq;
@@ -1685,11 +1757,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
unsigned short status;
struct sk_buff *skb;
ushort pkt_len;
- __u8 *data;
int pkt_received = 0;
struct bufdesc_ex *ebdp = NULL;
- bool vlan_packet_rcvd = false;
- u16 vlan_tag;
int index = 0;
bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
@@ -1698,9 +1767,9 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
int cpu = smp_processor_id();
struct xdp_buff xdp;
struct page *page;
+ __fec32 cbd_bufaddr;
u32 sub_len = 4;
-#if !defined(CONFIG_M5272)
/*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of
* FEC_RACC_SHIFT16 is set by default in the probe function.
*/
@@ -1708,7 +1777,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
data_start += 2;
sub_len += 2;
}
-#endif
#if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
/*
@@ -1723,7 +1791,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
* These get messed up if we get called due to a busy condition.
*/
bdp = rxq->bd.cur;
- xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq);
+ xdp_init_buff(&xdp, PAGE_SIZE << fep->pagepool_order, &rxq->xdp_rxq);
while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
@@ -1763,15 +1831,22 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
ndev->stats.rx_packets++;
pkt_len = fec16_to_cpu(bdp->cbd_datlen);
ndev->stats.rx_bytes += pkt_len;
+ if (fep->quirks & FEC_QUIRK_HAS_RACC)
+ ndev->stats.rx_bytes -= 2;
index = fec_enet_get_bd_index(bdp, &rxq->bd);
- page = rxq->rx_skb_info[index].page;
+ page = rxq->rx_buf[index];
+ cbd_bufaddr = bdp->cbd_bufaddr;
+ if (fec_enet_update_cbd(rxq, bdp, index)) {
+ ndev->stats.rx_dropped++;
+ goto rx_processing_done;
+ }
+
dma_sync_single_for_cpu(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
+ fec32_to_cpu(cbd_bufaddr),
pkt_len,
DMA_FROM_DEVICE);
prefetch(page_address(page));
- fec_enet_update_cbd(rxq, bdp, index);
if (xdp_prog) {
xdp_buff_clear_frags_flag(&xdp);
@@ -1788,7 +1863,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
* include that when passing upstream as it messes up
* bridging applications.
*/
- skb = build_skb(page_address(page), PAGE_SIZE);
+ skb = build_skb(page_address(page),
+ PAGE_SIZE << fep->pagepool_order);
if (unlikely(!skb)) {
page_pool_recycle_direct(rxq->page_pool, page);
ndev->stats.rx_dropped++;
@@ -1802,10 +1878,11 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
skb_mark_for_recycle(skb);
if (unlikely(need_swap)) {
+ u8 *data;
+
data = page_address(page) + FEC_ENET_XDP_HEADROOM;
swap_buffer(data, pkt_len);
}
- data = skb->data;
/* Extract the enhanced buffer descriptor */
ebdp = NULL;
@@ -1813,20 +1890,9 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
ebdp = (struct bufdesc_ex *)bdp;
/* If this is a VLAN packet remove the VLAN Tag */
- vlan_packet_rcvd = false;
- if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- fep->bufdesc_ex &&
- (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
- /* Push and remove the vlan tag */
- struct vlan_hdr *vlan_header =
- (struct vlan_hdr *) (data + ETH_HLEN);
- vlan_tag = ntohs(vlan_header->h_vlan_TCI);
-
- vlan_packet_rcvd = true;
-
- memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
- skb_pull(skb, VLAN_HLEN);
- }
+ if (fep->bufdesc_ex &&
+ (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN)))
+ fec_enet_rx_vlan(ndev, skb);
skb->protocol = eth_type_trans(skb, ndev);
@@ -1845,12 +1911,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
}
}
- /* Handle received VLAN packets */
- if (vlan_packet_rcvd)
- __vlan_hwaccel_put_tag(skb,
- htons(ETH_P_8021Q),
- vlan_tag);
-
skb_record_rx_queue(skb, queue_id);
napi_gro_receive(&fep->napi, skb);
@@ -1898,7 +1958,7 @@ static int fec_enet_rx(struct net_device *ndev, int budget)
/* Make sure that AVB queues are processed first. */
for (i = fep->num_rx_queues - 1; i >= 0; i--)
- done += fec_enet_rx_queue(ndev, budget - done, i);
+ done += fec_enet_rx_queue(ndev, i, budget - done);
return done;
}
@@ -2045,14 +2105,14 @@ static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
return us * (fep->clk_ref_rate / 1000) / 1000;
}
-static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
+static int fec_enet_eee_mode_set(struct net_device *ndev, u32 lpi_timer,
+ bool enable)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_keee *p = &fep->eee;
unsigned int sleep_cycle, wake_cycle;
if (enable) {
- sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer);
+ sleep_cycle = fec_enet_us_to_tx_cycle(ndev, lpi_timer);
wake_cycle = sleep_cycle;
} else {
sleep_cycle = 0;
@@ -2105,7 +2165,9 @@ static void fec_enet_adjust_link(struct net_device *ndev)
napi_enable(&fep->napi);
}
if (fep->quirks & FEC_QUIRK_HAS_EEE)
- fec_enet_eee_mode_set(ndev, phy_dev->enable_tx_lpi);
+ fec_enet_eee_mode_set(ndev,
+ phy_dev->eee_cfg.tx_lpi_timer,
+ phy_dev->enable_tx_lpi);
} else {
if (fep->link) {
netif_stop_queue(ndev);
@@ -2167,7 +2229,6 @@ static int fec_enet_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum)
ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
out:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -2216,7 +2277,6 @@ static int fec_enet_mdio_read_c45(struct mii_bus *bus, int mii_id,
ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
out:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -2248,7 +2308,6 @@ static int fec_enet_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum,
if (ret)
netdev_err(fep->netdev, "MDIO write timeout\n");
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -2292,7 +2351,6 @@ static int fec_enet_mdio_write_c45(struct mii_bus *bus, int mii_id,
netdev_err(fep->netdev, "MDIO write timeout\n");
out:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -2315,7 +2373,8 @@ static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
*/
phy_dev = of_phy_find_device(fep->phy_node);
phy_reset_after_clk_enable(phy_dev);
- put_device(&phy_dev->mdio.dev);
+ if (phy_dev)
+ put_device(&phy_dev->mdio.dev);
}
}
@@ -2411,11 +2470,8 @@ static int fec_enet_parse_rgmii_delay(struct fec_enet_private *fep,
static int fec_enet_mii_probe(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct phy_device *phy_dev = NULL;
- char mdio_bus_id[MII_BUS_ID_SIZE];
- char phy_name[MII_BUS_ID_SIZE + 3];
- int phy_id;
- int dev_id = fep->dev_id;
+ struct phy_device *phy_dev;
+ int ret;
if (fep->phy_node) {
phy_dev = of_phy_connect(ndev, fep->phy_node,
@@ -2427,30 +2483,28 @@ static int fec_enet_mii_probe(struct net_device *ndev)
}
} else {
/* check for attached phy */
- for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
- if (!mdiobus_is_registered_device(fep->mii_bus, phy_id))
- continue;
- if (dev_id--)
- continue;
- strscpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
- break;
- }
+ phy_dev = phy_find_first(fep->mii_bus);
+ if (fep->dev_id && phy_dev)
+ phy_dev = phy_find_next(fep->mii_bus, phy_dev);
- if (phy_id >= PHY_MAX_ADDR) {
+ if (!phy_dev) {
netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
- strscpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
- phy_id = 0;
+ phy_dev = fixed_phy_register_100fd();
+ if (IS_ERR(phy_dev)) {
+ netdev_err(ndev, "could not register fixed PHY\n");
+ return PTR_ERR(phy_dev);
+ }
}
- snprintf(phy_name, sizeof(phy_name),
- PHY_ID_FMT, mdio_bus_id, phy_id);
- phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
- fep->phy_interface);
- }
+ ret = phy_connect_direct(ndev, phy_dev, &fec_enet_adjust_link,
+ fep->phy_interface);
+ if (ret) {
+ if (phy_is_pseudo_fixed_link(phy_dev))
+ fixed_phy_unregister(phy_dev);
+ netdev_err(ndev, "could not attach to PHY\n");
+ return ret;
+ }
- if (IS_ERR(phy_dev)) {
- netdev_err(ndev, "could not attach to PHY\n");
- return PTR_ERR(phy_dev);
}
/* mask with MAC supported features */
@@ -2458,9 +2512,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
phy_set_max_speed(phy_dev, 1000);
phy_remove_link_mode(phy_dev,
ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
-#if !defined(CONFIG_M5272)
phy_support_sym_pause(phy_dev);
-#endif
}
else
phy_set_max_speed(phy_dev, 100);
@@ -2487,7 +2539,6 @@ static int fec_enet_mii_init(struct platform_device *pdev)
int err = -ENXIO;
u32 mii_speed, holdtime;
u32 bus_freq;
- int addr;
/*
* The i.MX28 dual fec interfaces are not equal.
@@ -2602,11 +2653,8 @@ static int fec_enet_mii_init(struct platform_device *pdev)
of_node_put(node);
/* find all the PHY devices on the bus and set mac_managed_pm to true */
- for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
- phydev = mdiobus_get_phy(fep->mii_bus, addr);
- if (phydev)
- phydev->mac_managed_pm = true;
- }
+ mdiobus_for_each_phy(fep->mii_bus, phydev)
+ phydev->mac_managed_pm = true;
mii_cnt++;
@@ -2655,9 +2703,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev)
}
/* List of registers that can be safety be read to dump them with ethtool */
-#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
- defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
+#if !defined(CONFIG_M5272) || defined(CONFIG_COMPILE_TEST)
static __u32 fec_enet_register_version = 2;
static u32 fec_enet_register_offset[] = {
FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
@@ -2731,30 +2777,22 @@ static u32 fec_enet_register_offset[] = {
static void fec_enet_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *regbuf)
{
+ u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
struct fec_enet_private *fep = netdev_priv(ndev);
u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
+ u32 *reg_list = fec_enet_register_offset;
struct device *dev = &fep->pdev->dev;
u32 *buf = (u32 *)regbuf;
u32 i, off;
int ret;
-#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
- defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
- u32 *reg_list;
- u32 reg_cnt;
-
- if (!of_machine_is_compatible("fsl,imx6ul")) {
- reg_list = fec_enet_register_offset;
- reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
- } else {
+
+#if !defined(CONFIG_M5272) || defined(CONFIG_COMPILE_TEST)
+ if (of_machine_is_compatible("fsl,imx6ul")) {
reg_list = fec_enet_register_offset_6ul;
reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul);
}
-#else
- /* coldfire */
- static u32 *reg_list = fec_enet_register_offset;
- static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
#endif
+
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return;
@@ -2774,7 +2812,6 @@ static void fec_enet_get_regs(struct net_device *ndev,
buf[off] = readl(&theregs[off]);
}
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
@@ -3081,27 +3118,25 @@ static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
static void fec_enet_itr_coal_set(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- int rx_itr, tx_itr;
+ u32 rx_itr = 0, tx_itr = 0;
+ int rx_ictt, tx_ictt;
- /* Must be greater than zero to avoid unpredictable behavior */
- if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
- !fep->tx_time_itr || !fep->tx_pkts_itr)
- return;
-
- /* Select enet system clock as Interrupt Coalescing
- * timer Clock Source
- */
- rx_itr = FEC_ITR_CLK_SEL;
- tx_itr = FEC_ITR_CLK_SEL;
+ rx_ictt = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
+ tx_ictt = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
- /* set ICFT and ICTT */
- rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
- rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
- tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
- tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));
+ if (rx_ictt > 0 && fep->rx_pkts_itr > 1) {
+ /* Enable with enet system clock as Interrupt Coalescing timer Clock Source */
+ rx_itr = FEC_ITR_EN | FEC_ITR_CLK_SEL;
+ rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
+ rx_itr |= FEC_ITR_ICTT(rx_ictt);
+ }
- rx_itr |= FEC_ITR_EN;
- tx_itr |= FEC_ITR_EN;
+ if (tx_ictt > 0 && fep->tx_pkts_itr > 1) {
+ /* Enable with enet system clock as Interrupt Coalescing timer Clock Source */
+ tx_itr = FEC_ITR_EN | FEC_ITR_CLK_SEL;
+ tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
+ tx_itr |= FEC_ITR_ICTT(tx_ictt);
+ }
writel(tx_itr, fep->hwp + FEC_TXIC0);
writel(rx_itr, fep->hwp + FEC_RXIC0);
@@ -3181,7 +3216,6 @@ static int
fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_keee *p = &fep->eee;
if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
return -EOPNOTSUPP;
@@ -3189,8 +3223,6 @@ fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
if (!netif_running(ndev))
return -ENETDOWN;
- edata->tx_lpi_timer = p->tx_lpi_timer;
-
return phy_ethtool_get_eee(ndev->phydev, edata);
}
@@ -3198,7 +3230,6 @@ static int
fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_keee *p = &fep->eee;
if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
return -EOPNOTSUPP;
@@ -3206,8 +3237,6 @@ fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
if (!netif_running(ndev))
return -ENETDOWN;
- p->tx_lpi_timer = edata->tx_lpi_timer;
-
return phy_ethtool_set_eee(ndev->phydev, edata);
}
@@ -3282,7 +3311,8 @@ static void fec_enet_free_buffers(struct net_device *ndev)
for (q = 0; q < fep->num_rx_queues; q++) {
rxq = fep->rx_queue[q];
for (i = 0; i < rxq->bd.ring_size; i++)
- page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false);
+ page_pool_put_full_page(rxq->page_pool, rxq->rx_buf[i],
+ false);
for (i = 0; i < XDP_STATS_TOTAL; i++)
rxq->stats[i] = 0;
@@ -3311,7 +3341,8 @@ static void fec_enet_free_buffers(struct net_device *ndev)
} else {
struct page *page = txq->tx_buf[i].buf_p;
- page_pool_put_page(page->pp, page, 0, false);
+ page_pool_put_page(pp_page_to_nmdesc(page)->pp,
+ page, 0, false);
}
txq->tx_buf[i].buf_p = NULL;
@@ -3407,6 +3438,19 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
return err;
}
+ /* Some platforms require the RX buffer must be 64 bytes alignment.
+ * Some platforms require 16 bytes alignment. And some platforms
+ * require 4 bytes alignment. But since the page pool have been
+ * introduced into the driver, the address of RX buffer is always
+ * the page address plus FEC_ENET_XDP_HEADROOM, and
+ * FEC_ENET_XDP_HEADROOM is 256 bytes. Therefore, this address can
+ * satisfy all platforms. To prevent future modifications to
+ * FEC_ENET_XDP_HEADROOM from ignoring this hardware limitation, a
+ * BUILD_BUG_ON() test has been added, which ensures that
+ * FEC_ENET_XDP_HEADROOM provides the required alignment.
+ */
+ BUILD_BUG_ON(FEC_ENET_XDP_HEADROOM & 0x3f);
+
for (i = 0; i < rxq->bd.ring_size; i++) {
page = page_pool_dev_alloc_pages(rxq->page_pool);
if (!page)
@@ -3415,8 +3459,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM;
bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
- rxq->rx_skb_info[i].page = page;
- rxq->rx_skb_info[i].offset = FEC_ENET_XDP_HEADROOM;
+ rxq->rx_buf[i] = page;
bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
if (fep->bufdesc_ex) {
@@ -3429,7 +3472,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
/* Set the last buffer to wrap. */
bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
- bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
+ bdp->cbd_sc |= cpu_to_fec16(BD_ENET_RX_WRAP);
return 0;
err_alloc:
@@ -3465,7 +3508,7 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
/* Set the last buffer to wrap. */
bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
- bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
+ bdp->cbd_sc |= cpu_to_fec16(BD_ENET_TX_WRAP);
return 0;
@@ -3558,7 +3601,6 @@ err_enet_mii_probe:
err_enet_alloc:
fec_enet_clk_enable(ndev, false);
clk_enable:
- pm_runtime_mark_last_busy(&fep->pdev->dev);
pm_runtime_put_autosuspend(&fep->pdev->dev);
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
return ret;
@@ -3568,8 +3610,9 @@ static int
fec_enet_close(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
+ struct phy_device *phy_dev = ndev->phydev;
- phy_stop(ndev->phydev);
+ phy_stop(phy_dev);
if (netif_device_present(ndev)) {
napi_disable(&fep->napi);
@@ -3577,7 +3620,10 @@ fec_enet_close(struct net_device *ndev)
fec_stop(ndev);
}
- phy_disconnect(ndev->phydev);
+ phy_disconnect(phy_dev);
+
+ if (!fep->phy_node && phy_is_pseudo_fixed_link(phy_dev))
+ fixed_phy_unregister(phy_dev);
if (fep->quirks & FEC_QUIRK_ERR006687)
imx6q_cpuidle_fec_irqs_unused();
@@ -3589,7 +3635,6 @@ fec_enet_close(struct net_device *ndev)
cpu_latency_qos_remove_request(&fep->pm_qos_req);
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
- pm_runtime_mark_last_busy(&fep->pdev->dev);
pm_runtime_put_autosuspend(&fep->pdev->dev);
fec_enet_free_buffers(ndev);
@@ -3662,7 +3707,6 @@ static void set_multicast_list(struct net_device *ndev)
static int
fec_set_mac_address(struct net_device *ndev, void *p)
{
- struct fec_enet_private *fep = netdev_priv(ndev);
struct sockaddr *addr = p;
if (addr) {
@@ -3679,11 +3723,8 @@ fec_set_mac_address(struct net_device *ndev, void *p)
if (!netif_running(ndev))
return 0;
- writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
- (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
- fep->hwp + FEC_ADDR_LOW);
- writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
- fep->hwp + FEC_ADDR_HIGH);
+ fec_set_hw_mac_addr(ndev);
+
return 0;
}
@@ -3983,6 +4024,23 @@ static int fec_hwtstamp_set(struct net_device *ndev,
return fec_ptp_set(ndev, config, extack);
}
+static int fec_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int order;
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ order = get_order(new_mtu + ETH_HLEN + ETH_FCS_LEN
+ + FEC_DRV_RESERVE_SPACE);
+ fep->rx_frame_size = (PAGE_SIZE << order) - FEC_DRV_RESERVE_SPACE;
+ fep->pagepool_order = order;
+ WRITE_ONCE(ndev->mtu, new_mtu);
+
+ return 0;
+}
+
static const struct net_device_ops fec_netdev_ops = {
.ndo_open = fec_enet_open,
.ndo_stop = fec_enet_close,
@@ -3992,6 +4050,7 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = fec_timeout,
.ndo_set_mac_address = fec_set_mac_address,
+ .ndo_change_mtu = fec_change_mtu,
.ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_features = fec_set_features,
.ndo_bpf = fec_enet_bpf,
@@ -4026,10 +4085,8 @@ static int fec_enet_init(struct net_device *ndev)
WARN_ON(dsize != (1 << dsize_log2));
#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
- fep->rx_align = 0xf;
fep->tx_align = 0xf;
#else
- fep->rx_align = 0x3;
fep->tx_align = 0x3;
#endif
fep->rx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
@@ -4118,10 +4175,8 @@ static int fec_enet_init(struct net_device *ndev)
fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
}
- if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
+ if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES)
fep->tx_align = 0;
- fep->rx_align = 0x3f;
- }
ndev->hw_features = ndev->features;
@@ -4339,11 +4394,9 @@ fec_probe(struct platform_device *pdev)
fep->num_rx_queues = num_rx_qs;
fep->num_tx_queues = num_tx_qs;
-#if !defined(CONFIG_M5272)
/* default enable pause frame auto negotiation */
if (fep->quirks & FEC_QUIRK_HAS_GBIT)
fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
-#endif
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);
@@ -4522,7 +4575,15 @@ fec_probe(struct platform_device *pdev)
fec_enet_clk_enable(ndev, false);
pinctrl_pm_select_sleep_state(&pdev->dev);
- ndev->max_mtu = PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN;
+ fep->pagepool_order = 0;
+ fep->rx_frame_size = FEC_ENET_RX_FRSIZE;
+
+ if (fep->quirks & FEC_QUIRK_JUMBO_FRAME)
+ fep->max_buf_size = MAX_JUMBO_BUF_SIZE;
+ else
+ fep->max_buf_size = PKT_MAXBUF_SIZE;
+
+ ndev->max_mtu = fep->max_buf_size - ETH_HLEN - ETH_FCS_LEN;
ret = register_netdev(ndev);
if (ret)
@@ -4536,7 +4597,6 @@ fec_probe(struct platform_device *pdev)
INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
- pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 2bfaf14f65c8..3fc29afc9854 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -619,7 +619,7 @@ static void mpc52xx_fec_hw_init(struct net_device *dev)
out_be32(&fec->rfifo_alarm, 0x0000030c);
out_be32(&fec->tfifo_alarm, 0x00000100);
- /* begin transmittion when 256 bytes are in FIFO (or EOF or FIFO full) */
+ /* begin transmission when 256 bytes are in FIFO (or EOF or FIFO full) */
out_be32(&fec->x_wmrk, FEC_FIFO_WMRK_256B);
/* enable crc generation */
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 7f6b57432071..4b7bad9a485d 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -7,31 +7,30 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/netdevice.h>
+#include <linux/errno.h>
#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
+#include <linux/fec.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/ioport.h>
#include <linux/irq.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/phy.h>
-#include <linux/fec.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_net.h>
+#include <linux/pci.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/ptrace.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
#include "fec.h"
@@ -97,7 +96,7 @@
* cyclecounter structure used to construct a ns counter from the
* arbitrary fixed point registers
*/
-static u64 fec_ptp_read(const struct cyclecounter *cc)
+static u64 fec_ptp_read(struct cyclecounter *cc)
{
struct fec_enet_private *fep =
container_of(cc, struct fec_enet_private, cc);
@@ -118,7 +117,7 @@ static u64 fec_ptp_read(const struct cyclecounter *cc)
* @fep: the fec_enet_private structure handle
* @enable: enable the channel pps output
*
- * This function enble the PPS ouput on the timer channel.
+ * This function enables the PPS output on the timer channel.
*/
static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
{
@@ -129,6 +128,12 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
spin_lock_irqsave(&fep->tmreg_lock, flags);
+ if (fep->perout_enable) {
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ dev_err(&fep->pdev->dev, "PEROUT is running");
+ return -EBUSY;
+ }
+
if (fep->pps_enable == enable) {
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
return 0;
@@ -173,7 +178,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
* very close to the second point, which means NSEC_PER_SEC
* - ts.tv_nsec is close to be zero(For example 20ns); Since the timer
* is still running when we calculate the first compare event, it is
- * possible that the remaining nanoseonds run out before the compare
+ * possible that the remaining nanoseconds run out before the compare
* counter is calculated and written into TCCR register. To avoid
* this possibility, we will set the compare event to be the next
* of next second. The current setting is 31-bit timer and wrap
@@ -244,6 +249,7 @@ static int fec_ptp_pps_perout(struct fec_enet_private *fep)
* the FEC_TCCR register in time and missed the start time.
*/
if (fep->perout_stime < curr_time + 100 * NSEC_PER_MSEC) {
+ fep->perout_enable = false;
dev_err(&fep->pdev->dev, "Current time is too close to the start time!\n");
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
return -1;
@@ -498,7 +504,10 @@ static int fec_ptp_pps_disable(struct fec_enet_private *fep, uint channel)
{
unsigned long flags;
+ hrtimer_cancel(&fep->perout_timer);
+
spin_lock_irqsave(&fep->tmreg_lock, flags);
+ fep->perout_enable = false;
writel(0, fep->hwp + FEC_TCSR(channel));
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
@@ -530,6 +539,8 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
return ret;
} else if (rq->type == PTP_CLK_REQ_PEROUT) {
+ u32 reload_period;
+
/* Reject requests with unsupported flags */
if (rq->perout.flags)
return -EOPNOTSUPP;
@@ -549,12 +560,14 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
return -EOPNOTSUPP;
}
- fep->reload_period = div_u64(period_ns, 2);
- if (on && fep->reload_period) {
+ reload_period = div_u64(period_ns, 2);
+ if (on && reload_period) {
+ u64 perout_stime;
+
/* Convert 1588 timestamp to ns*/
start_time.tv_sec = rq->perout.start.sec;
start_time.tv_nsec = rq->perout.start.nsec;
- fep->perout_stime = timespec64_to_ns(&start_time);
+ perout_stime = timespec64_to_ns(&start_time);
mutex_lock(&fep->ptp_clk_mutex);
if (!fep->ptp_clk_on) {
@@ -563,18 +576,41 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
return -EOPNOTSUPP;
}
spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+ if (fep->pps_enable) {
+ dev_err(&fep->pdev->dev, "PPS is running");
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ if (fep->perout_enable) {
+ dev_err(&fep->pdev->dev,
+ "PEROUT has been enabled\n");
+ ret = -EBUSY;
+ goto unlock;
+ }
+
/* Read current timestamp */
curr_time = timecounter_read(&fep->tc);
- spin_unlock_irqrestore(&fep->tmreg_lock, flags);
- mutex_unlock(&fep->ptp_clk_mutex);
+ if (perout_stime <= curr_time) {
+ dev_err(&fep->pdev->dev,
+ "Start time must be greater than current time\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
/* Calculate time difference */
- delta = fep->perout_stime - curr_time;
+ delta = perout_stime - curr_time;
+ fep->reload_period = reload_period;
+ fep->perout_stime = perout_stime;
+ fep->perout_enable = true;
- if (fep->perout_stime <= curr_time) {
- dev_err(&fep->pdev->dev, "Start time must larger than current time!\n");
- return -EINVAL;
- }
+unlock:
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ mutex_unlock(&fep->ptp_clk_mutex);
+
+ if (ret)
+ return ret;
/* Because the timer counter of FEC only has 31-bits, correspondingly,
* the time comparison register FEC_TCCR also only low 31 bits can be
@@ -682,8 +718,11 @@ static irqreturn_t fec_pps_interrupt(int irq, void *dev_id)
fep->next_counter = (fep->next_counter + fep->reload_period) &
fep->cc.mask;
- event.type = PTP_CLOCK_PPS;
- ptp_clock_event(fep->ptp_clock, &event);
+ if (fep->pps_enable) {
+ event.type = PTP_CLOCK_PPS;
+ ptp_clock_event(fep->ptp_clock, &event);
+ }
+
return IRQ_HANDLED;
}
@@ -739,8 +778,8 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
- hrtimer_init(&fep->perout_timer, CLOCK_REALTIME, HRTIMER_MODE_REL);
- fep->perout_timer.function = fec_ptp_pps_perout_handler;
+ hrtimer_setup(&fep->perout_timer, fec_ptp_pps_perout_handler, CLOCK_REALTIME,
+ HRTIMER_MODE_REL);
irq = platform_get_irq_byname_optional(pdev, "pps");
if (irq < 0)
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index fb416d60dcd7..11887458f050 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -2690,13 +2690,12 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
{
struct fman *fman;
struct device_node *fm_node, *muram_node;
+ void __iomem *base_addr;
struct resource *res;
u32 val, range[2];
int err, irq;
struct clk *clk;
u32 clk_rate;
- phys_addr_t phys_base_addr;
- resource_size_t mem_size;
fman = kzalloc(sizeof(*fman), GFP_KERNEL);
if (!fman)
@@ -2724,18 +2723,6 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
goto fman_node_put;
fman->dts_params.err_irq = err;
- /* Get the FM address */
- res = platform_get_resource(of_dev, IORESOURCE_MEM, 0);
- if (!res) {
- err = -EINVAL;
- dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n",
- __func__);
- goto fman_node_put;
- }
-
- phys_base_addr = res->start;
- mem_size = resource_size(res);
-
clk = of_clk_get(fm_node, 0);
if (IS_ERR(clk)) {
err = PTR_ERR(clk);
@@ -2803,24 +2790,16 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
}
}
- fman->dts_params.res =
- devm_request_mem_region(&of_dev->dev, phys_base_addr,
- mem_size, "fman");
- if (!fman->dts_params.res) {
- err = -EBUSY;
- dev_err(&of_dev->dev, "%s: request_mem_region() failed\n",
- __func__);
- goto fman_free;
- }
-
- fman->dts_params.base_addr =
- devm_ioremap(&of_dev->dev, phys_base_addr, mem_size);
- if (!fman->dts_params.base_addr) {
- err = -ENOMEM;
+ base_addr = devm_platform_get_and_ioremap_resource(of_dev, 0, &res);
+ if (IS_ERR(base_addr)) {
+ err = PTR_ERR(base_addr);
dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__);
goto fman_free;
}
+ fman->dts_params.base_addr = base_addr;
+ fman->dts_params.res = res;
+
fman->dev = &of_dev->dev;
err = of_platform_populate(fm_node, NULL, NULL, &of_dev->dev);
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 85617bb94959..51402dff72c5 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -755,12 +755,12 @@ static struct fman_mac *pcs_to_dtsec(struct phylink_pcs *pcs)
return container_of(pcs, struct fman_mac, pcs);
}
-static void dtsec_pcs_get_state(struct phylink_pcs *pcs,
+static void dtsec_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct fman_mac *dtsec = pcs_to_dtsec(pcs);
- phylink_mii_c22_pcs_get_state(dtsec->tbidev, state);
+ phylink_mii_c22_pcs_get_state(dtsec->tbidev, neg_mode, state);
}
static int dtsec_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
@@ -1446,7 +1446,6 @@ int dtsec_initialization(struct mac_device *mac_dev,
goto _return_fm_mac_free;
}
dtsec->pcs.ops = &dtsec_pcs_ops;
- dtsec->pcs.neg_mode = true;
dtsec->pcs.poll = true;
supported = mac_dev->phylink_config.supported_interfaces;
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 3925441143fa..c84f0336c94c 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -649,6 +649,7 @@ static u32 memac_if_mode(phy_interface_t interface)
return IF_MODE_GMII | IF_MODE_RGMII;
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
case PHY_INTERFACE_MODE_QSGMII:
return IF_MODE_GMII;
case PHY_INTERFACE_MODE_10GBASER:
@@ -667,6 +668,7 @@ static struct phylink_pcs *memac_select_pcs(struct phylink_config *config,
switch (iface) {
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
return memac->sgmii_pcs;
case PHY_INTERFACE_MODE_QSGMII:
return memac->qsgmii_pcs;
@@ -685,6 +687,7 @@ static int memac_prepare(struct phylink_config *config, unsigned int mode,
switch (iface) {
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
case PHY_INTERFACE_MODE_QSGMII:
case PHY_INTERFACE_MODE_10GBASER:
return phy_set_mode_ext(memac->serdes, PHY_MODE_ETHERNET,
@@ -897,6 +900,89 @@ static int memac_set_exception(struct fman_mac *memac,
return 0;
}
+static u64 memac_read64(void __iomem *reg)
+{
+ u32 low, high, tmp;
+
+ do {
+ high = ioread32be(reg + 4);
+ low = ioread32be(reg);
+ tmp = ioread32be(reg + 4);
+ } while (high != tmp);
+
+ return ((u64)high << 32) | low;
+}
+
+static void memac_get_pause_stats(struct fman_mac *memac,
+ struct ethtool_pause_stats *s)
+{
+ s->tx_pause_frames = memac_read64(&memac->regs->txpf_l);
+ s->rx_pause_frames = memac_read64(&memac->regs->rxpf_l);
+}
+
+static const struct ethtool_rmon_hist_range memac_rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 9600 },
+ {},
+};
+
+static void memac_get_rmon_stats(struct fman_mac *memac,
+ struct ethtool_rmon_stats *s,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ s->undersize_pkts = memac_read64(&memac->regs->rund_l);
+ s->oversize_pkts = memac_read64(&memac->regs->rovr_l);
+ s->fragments = memac_read64(&memac->regs->rfrg_l);
+ s->jabbers = memac_read64(&memac->regs->rjbr_l);
+
+ s->hist[0] = memac_read64(&memac->regs->r64_l);
+ s->hist[1] = memac_read64(&memac->regs->r127_l);
+ s->hist[2] = memac_read64(&memac->regs->r255_l);
+ s->hist[3] = memac_read64(&memac->regs->r511_l);
+ s->hist[4] = memac_read64(&memac->regs->r1023_l);
+ s->hist[5] = memac_read64(&memac->regs->r1518_l);
+ s->hist[6] = memac_read64(&memac->regs->r1519x_l);
+
+ s->hist_tx[0] = memac_read64(&memac->regs->t64_l);
+ s->hist_tx[1] = memac_read64(&memac->regs->t127_l);
+ s->hist_tx[2] = memac_read64(&memac->regs->t255_l);
+ s->hist_tx[3] = memac_read64(&memac->regs->t511_l);
+ s->hist_tx[4] = memac_read64(&memac->regs->t1023_l);
+ s->hist_tx[5] = memac_read64(&memac->regs->t1518_l);
+ s->hist_tx[6] = memac_read64(&memac->regs->t1519x_l);
+
+ *ranges = memac_rmon_ranges;
+}
+
+static void memac_get_eth_ctrl_stats(struct fman_mac *memac,
+ struct ethtool_eth_ctrl_stats *s)
+{
+ s->MACControlFramesTransmitted = memac_read64(&memac->regs->tcnp_l);
+ s->MACControlFramesReceived = memac_read64(&memac->regs->rcnp_l);
+}
+
+static void memac_get_eth_mac_stats(struct fman_mac *memac,
+ struct ethtool_eth_mac_stats *s)
+{
+ s->FramesTransmittedOK = memac_read64(&memac->regs->tfrm_l);
+ s->FramesReceivedOK = memac_read64(&memac->regs->rfrm_l);
+ s->FrameCheckSequenceErrors = memac_read64(&memac->regs->rfcs_l);
+ s->AlignmentErrors = memac_read64(&memac->regs->raln_l);
+ s->OctetsTransmittedOK = memac_read64(&memac->regs->teoct_l);
+ s->FramesLostDueToIntMACXmitError = memac_read64(&memac->regs->terr_l);
+ s->OctetsReceivedOK = memac_read64(&memac->regs->reoct_l);
+ s->FramesLostDueToIntMACRcvError = memac_read64(&memac->regs->rdrntp_l);
+ s->MulticastFramesXmittedOK = memac_read64(&memac->regs->tmca_l);
+ s->BroadcastFramesXmittedOK = memac_read64(&memac->regs->tbca_l);
+ s->MulticastFramesReceivedOK = memac_read64(&memac->regs->rmca_l);
+ s->BroadcastFramesReceivedOK = memac_read64(&memac->regs->rbca_l);
+}
+
static int memac_init(struct fman_mac *memac)
{
struct memac_cfg *memac_drv_param;
@@ -1089,6 +1175,10 @@ int memac_initialization(struct mac_device *mac_dev,
mac_dev->set_tstamp = memac_set_tstamp;
mac_dev->enable = memac_enable;
mac_dev->disable = memac_disable;
+ mac_dev->get_pause_stats = memac_get_pause_stats;
+ mac_dev->get_rmon_stats = memac_get_rmon_stats;
+ mac_dev->get_eth_ctrl_stats = memac_get_eth_ctrl_stats;
+ mac_dev->get_eth_mac_stats = memac_get_eth_mac_stats;
mac_dev->fman_mac = memac_config(mac_dev, params);
if (!mac_dev->fman_mac)
@@ -1225,7 +1315,8 @@ int memac_initialization(struct mac_device *mac_dev,
* be careful and not enable this if we are using MII or RGMII, since
* those configurations modes don't use in-band autonegotiation.
*/
- if (!of_property_read_bool(mac_node, "managed") &&
+ if (!of_property_present(mac_node, "managed") &&
+ mac_dev->phy_if != PHY_INTERFACE_MODE_2500BASEX &&
mac_dev->phy_if != PHY_INTERFACE_MODE_MII &&
!phy_interface_mode_is_rgmii(mac_dev->phy_if))
mac_dev->phylink_config.default_an_inband = true;
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index a39fcea6a77a..f27ff625fe29 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -14,8 +14,6 @@
#include <linux/device.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
-#include <linux/phy_fixed.h>
-#include <linux/phylink.h>
#include <linux/etherdevice.h>
#include <linux/libfdt_env.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
index 955ace338965..63c2c5b4f99e 100644
--- a/drivers/net/ethernet/freescale/fman/mac.h
+++ b/drivers/net/ethernet/freescale/fman/mac.h
@@ -16,6 +16,11 @@
#include "fman.h"
#include "fman_mac.h"
+struct ethtool_eth_ctrl_stats;
+struct ethtool_eth_mac_stats;
+struct ethtool_pause_stats;
+struct ethtool_rmon_stats;
+struct ethtool_rmon_hist_range;
struct fman_mac;
struct mac_priv_s;
@@ -46,6 +51,15 @@ struct mac_device {
enet_addr_t *eth_addr);
int (*remove_hash_mac_addr)(struct fman_mac *mac_dev,
enet_addr_t *eth_addr);
+ void (*get_pause_stats)(struct fman_mac *memac,
+ struct ethtool_pause_stats *s);
+ void (*get_rmon_stats)(struct fman_mac *memac,
+ struct ethtool_rmon_stats *s,
+ const struct ethtool_rmon_hist_range **ranges);
+ void (*get_eth_ctrl_stats)(struct fman_mac *memac,
+ struct ethtool_eth_ctrl_stats *s);
+ void (*get_eth_mac_stats)(struct fman_mac *memac,
+ struct ethtool_eth_mac_stats *s);
void (*update_speed)(struct mac_device *mac_dev, int speed);
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 56d2f79fb7e3..de88776dd2a2 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -479,10 +479,12 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
"missing 'reg' property in node %pOF\n",
tbi);
err = -EBUSY;
+ of_node_put(tbi);
goto error;
}
set_tbipa(*prop, pdev,
data->get_tbipa, priv->map, &res);
+ of_node_put(tbi);
}
}
@@ -491,8 +493,8 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
err = of_mdiobus_register(new_bus, np);
if (err) {
- dev_err(&pdev->dev, "cannot register %s as MDIO bus\n",
- new_bus->name);
+ dev_err_probe(&pdev->dev, err, "cannot register %s as MDIO bus\n",
+ new_bus->name);
goto error;
}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 435138f4699d..7c0f049f0938 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -97,6 +97,7 @@
#include <linux/phy_fixed.h>
#include <linux/of.h>
#include <linux/of_net.h>
+#include <linux/property.h>
#include "gianfar.h"
@@ -571,18 +572,6 @@ static int gfar_parse_group(struct device_node *np,
return 0;
}
-static int gfar_of_group_count(struct device_node *np)
-{
- struct device_node *child;
- int num = 0;
-
- for_each_available_child_of_node(np, child)
- if (of_node_name_eq(child, "queue-group"))
- num++;
-
- return num;
-}
-
/* Reads the controller's registers to determine what interface
* connects it to the PHY.
*/
@@ -654,8 +643,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
num_rx_qs = 1;
} else { /* MQ_MG_MODE */
/* get the actual number of supported groups */
- unsigned int num_grps = gfar_of_group_count(np);
+ unsigned int num_grps;
+ num_grps = device_get_named_child_node_count(&ofdev->dev,
+ "queue-group");
if (num_grps == 0 || num_grps > MAXGROUPS) {
dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
num_grps);
@@ -1647,20 +1638,11 @@ static void gfar_configure_serdes(struct net_device *dev)
*/
static int init_phy(struct net_device *dev)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct gfar_private *priv = netdev_priv(dev);
phy_interface_t interface = priv->interface;
struct phy_device *phydev;
struct ethtool_keee edata;
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
-
priv->oldlink = 0;
priv->oldspeed = 0;
priv->oldduplex = -1;
@@ -1675,9 +1657,8 @@ static int init_phy(struct net_device *dev)
if (interface == PHY_INTERFACE_MODE_SGMII)
gfar_configure_serdes(dev);
- /* Remove any features not supported by the controller */
- linkmode_and(phydev->supported, phydev->supported, mask);
- linkmode_copy(phydev->advertising, phydev->supported);
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT))
+ phy_set_max_speed(phydev, SPEED_100);
/* Add support for flow control */
phy_support_asym_pause(phydev);
@@ -2071,15 +2052,13 @@ static void gfar_timeout(struct net_device *dev, unsigned int txqueue)
schedule_work(&priv->reset_task);
}
-static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
+static int gfar_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
struct gfar_private *priv = netdev_priv(netdev);
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
priv->hwts_tx_en = 0;
break;
@@ -2092,7 +2071,7 @@ static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
if (priv->hwts_rx_en) {
priv->hwts_rx_en = 0;
@@ -2106,44 +2085,23 @@ static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
priv->hwts_rx_en = 1;
reset_gfar(netdev);
}
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
}
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
-static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+static int gfar_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config config;
struct gfar_private *priv = netdev_priv(netdev);
- config.flags = 0;
- config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- config.rx_filter = (priv->hwts_rx_en ?
- HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
-
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
-}
-
-static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct phy_device *phydev = dev->phydev;
-
- if (!netif_running(dev))
- return -EINVAL;
-
- if (cmd == SIOCSHWTSTAMP)
- return gfar_hwtstamp_set(dev, rq);
- if (cmd == SIOCGHWTSTAMP)
- return gfar_hwtstamp_get(dev, rq);
+ config->tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config->rx_filter = priv->hwts_rx_en ? HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
- if (!phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(phydev, rq, cmd);
+ return 0;
}
/* Interrupt Handler for Transmit complete */
@@ -3184,7 +3142,7 @@ static const struct net_device_ops gfar_netdev_ops = {
.ndo_set_features = gfar_set_features,
.ndo_set_rx_mode = gfar_set_multi,
.ndo_tx_timeout = gfar_timeout,
- .ndo_eth_ioctl = gfar_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_get_stats64 = gfar_get_stats64,
.ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = gfar_set_mac_addr,
@@ -3192,6 +3150,8 @@ static const struct net_device_ops gfar_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = gfar_netpoll,
#endif
+ .ndo_hwtstamp_get = gfar_hwtstamp_get,
+ .ndo_hwtstamp_set = gfar_hwtstamp_set,
};
/* Set up the ethernet device structure, private data,
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 781d92e703cb..6fa752d3b60d 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -781,14 +781,26 @@ err:
return ret;
}
-static int gfar_set_hash_opts(struct gfar_private *priv,
- struct ethtool_rxnfc *cmd)
+static int gfar_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
+ struct gfar_private *priv = netdev_priv(dev);
+ int ret;
+
+ if (test_bit(GFAR_RESETTING, &priv->state))
+ return -EBUSY;
+
+ mutex_lock(&priv->rx_queue_access);
+
+ ret = 0;
/* write the filer rules here */
if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
- return -EINVAL;
+ ret = -EINVAL;
- return 0;
+ mutex_unlock(&priv->rx_queue_access);
+
+ return ret;
}
static int gfar_check_filer_hardware(struct gfar_private *priv)
@@ -1398,9 +1410,6 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
mutex_lock(&priv->rx_queue_access);
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = gfar_set_hash_opts(priv, cmd);
- break;
case ETHTOOL_SRXCLSRLINS:
if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
cmd->fs.ring_cookie >= priv->num_rx_queues) ||
@@ -1422,6 +1431,13 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return ret;
}
+static u32 gfar_get_rx_ring_count(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ return priv->num_rx_queues;
+}
+
static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -1429,9 +1445,6 @@ static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int ret = 0;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = priv->num_rx_queues;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = priv->rx_list.count;
break;
@@ -1466,8 +1479,10 @@ static int gfar_get_ts_info(struct net_device *dev,
if (ptp_node) {
ptp_dev = of_find_device_by_node(ptp_node);
of_node_put(ptp_node);
- if (ptp_dev)
+ if (ptp_dev) {
ptp = platform_get_drvdata(ptp_dev);
+ put_device(&ptp_dev->dev);
+ }
}
if (ptp)
@@ -1508,6 +1523,8 @@ const struct ethtool_ops gfar_ethtool_ops = {
#endif
.set_rxnfc = gfar_set_nfc,
.get_rxnfc = gfar_get_nfc,
+ .get_rx_ring_count = gfar_get_rx_ring_count,
+ .set_rxfh_fields = gfar_set_rxfh_fields,
.get_ts_info = gfar_get_ts_info,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index d3ddca22d6b0..affd5a6c44e7 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -26,7 +26,7 @@
#include <linux/dma-mapping.h>
#include <linux/mii.h>
#include <linux/phy.h>
-#include <linux/phy_fixed.h>
+#include <linux/phylink.h>
#include <linux/workqueue.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -34,6 +34,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/platform_device.h>
+#include <linux/rtnetlink.h>
#include <linux/uaccess.h>
#include <asm/irq.h>
@@ -132,7 +133,6 @@ static const struct ucc_geth_info ugeth_primary_info = {
.transmitFlowControl = 1,
.maxGroupAddrInHash = 4,
.maxIndAddrInHash = 4,
- .prel = 7,
.maxFrameLength = 1518+16, /* Add extra bytes for VLANs etc. */
.minFrameLength = 64,
.maxD1Length = 1520+16, /* Add extra bytes for VLANs etc. */
@@ -1205,34 +1205,6 @@ static int init_mac_station_addr_regs(u8 address_byte_0,
return 0;
}
-static int init_check_frame_length_mode(int length_check,
- u32 __iomem *maccfg2_register)
-{
- u32 value = 0;
-
- value = in_be32(maccfg2_register);
-
- if (length_check)
- value |= MACCFG2_LC;
- else
- value &= ~MACCFG2_LC;
-
- out_be32(maccfg2_register, value);
- return 0;
-}
-
-static int init_preamble_length(u8 preamble_length,
- u32 __iomem *maccfg2_register)
-{
- if ((preamble_length < 3) || (preamble_length > 7))
- return -EINVAL;
-
- clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK,
- preamble_length << MACCFG2_PREL_SHIFT);
-
- return 0;
-}
-
static int init_rx_parameters(int reject_broadcast,
int receive_short_frames,
int promiscuous, u32 __iomem *upsmr_register)
@@ -1287,94 +1259,11 @@ static int init_min_frame_len(u16 min_frame_length,
return 0;
}
-static int adjust_enet_interface(struct ucc_geth_private *ugeth)
+static bool phy_interface_mode_is_reduced(phy_interface_t interface)
{
- struct ucc_geth_info *ug_info;
- struct ucc_geth __iomem *ug_regs;
- struct ucc_fast __iomem *uf_regs;
- int ret_val;
- u32 upsmr, maccfg2;
- u16 value;
-
- ugeth_vdbg("%s: IN", __func__);
-
- ug_info = ugeth->ug_info;
- ug_regs = ugeth->ug_regs;
- uf_regs = ugeth->uccf->uf_regs;
-
- /* Set MACCFG2 */
- maccfg2 = in_be32(&ug_regs->maccfg2);
- maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
- if ((ugeth->max_speed == SPEED_10) ||
- (ugeth->max_speed == SPEED_100))
- maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
- else if (ugeth->max_speed == SPEED_1000)
- maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
- maccfg2 |= ug_info->padAndCrc;
- out_be32(&ug_regs->maccfg2, maccfg2);
-
- /* Set UPSMR */
- upsmr = in_be32(&uf_regs->upsmr);
- upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M |
- UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM);
- if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII)
- upsmr |= UCC_GETH_UPSMR_RPM;
- switch (ugeth->max_speed) {
- case SPEED_10:
- upsmr |= UCC_GETH_UPSMR_R10M;
- fallthrough;
- case SPEED_100:
- if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI)
- upsmr |= UCC_GETH_UPSMR_RMM;
- }
- }
- if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- upsmr |= UCC_GETH_UPSMR_TBIM;
- }
- if (ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII)
- upsmr |= UCC_GETH_UPSMR_SGMM;
-
- out_be32(&uf_regs->upsmr, upsmr);
-
- /* Disable autonegotiation in tbi mode, because by default it
- comes up in autonegotiation mode. */
- /* Note that this depends on proper setting in utbipar register. */
- if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- struct ucc_geth_info *ug_info = ugeth->ug_info;
- struct phy_device *tbiphy;
-
- if (!ug_info->tbi_node)
- pr_warn("TBI mode requires that the device tree specify a tbi-handle\n");
-
- tbiphy = of_phy_find_device(ug_info->tbi_node);
- if (!tbiphy)
- pr_warn("Could not get TBI device\n");
-
- value = phy_read(tbiphy, ENET_TBI_MII_CR);
- value &= ~0x1000; /* Turn off autonegotiation */
- phy_write(tbiphy, ENET_TBI_MII_CR, value);
-
- put_device(&tbiphy->mdio.dev);
- }
-
- init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
-
- ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
- if (ret_val != 0) {
- if (netif_msg_probe(ugeth))
- pr_err("Preamble length must be between 3 and 7 inclusive\n");
- return ret_val;
- }
-
- return 0;
+ return phy_interface_mode_is_rgmii(interface) ||
+ interface == PHY_INTERFACE_MODE_RMII ||
+ interface == PHY_INTERFACE_MODE_RTBI;
}
static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
@@ -1545,108 +1434,7 @@ static void ugeth_activate(struct ucc_geth_private *ugeth)
/* allow to xmit again */
netif_tx_wake_all_queues(ugeth->ndev);
- __netdev_watchdog_up(ugeth->ndev);
-}
-
-/* Called every time the controller might need to be made
- * aware of new link state. The PHY code conveys this
- * information through variables in the ugeth structure, and this
- * function converts those variables into the appropriate
- * register values, and can bring down the device if needed.
- */
-
-static void adjust_link(struct net_device *dev)
-{
- struct ucc_geth_private *ugeth = netdev_priv(dev);
- struct ucc_geth __iomem *ug_regs;
- struct ucc_fast __iomem *uf_regs;
- struct phy_device *phydev = ugeth->phydev;
- int new_state = 0;
-
- ug_regs = ugeth->ug_regs;
- uf_regs = ugeth->uccf->uf_regs;
-
- if (phydev->link) {
- u32 tempval = in_be32(&ug_regs->maccfg2);
- u32 upsmr = in_be32(&uf_regs->upsmr);
- /* Now we make sure that we can be in full duplex mode.
- * If not, we operate in half-duplex mode. */
- if (phydev->duplex != ugeth->oldduplex) {
- new_state = 1;
- if (!(phydev->duplex))
- tempval &= ~(MACCFG2_FDX);
- else
- tempval |= MACCFG2_FDX;
- ugeth->oldduplex = phydev->duplex;
- }
-
- if (phydev->speed != ugeth->oldspeed) {
- new_state = 1;
- switch (phydev->speed) {
- case SPEED_1000:
- tempval = ((tempval &
- ~(MACCFG2_INTERFACE_MODE_MASK)) |
- MACCFG2_INTERFACE_MODE_BYTE);
- break;
- case SPEED_100:
- case SPEED_10:
- tempval = ((tempval &
- ~(MACCFG2_INTERFACE_MODE_MASK)) |
- MACCFG2_INTERFACE_MODE_NIBBLE);
- /* if reduced mode, re-set UPSMR.R10M */
- if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- if (phydev->speed == SPEED_10)
- upsmr |= UCC_GETH_UPSMR_R10M;
- else
- upsmr &= ~UCC_GETH_UPSMR_R10M;
- }
- break;
- default:
- if (netif_msg_link(ugeth))
- pr_warn(
- "%s: Ack! Speed (%d) is not 10/100/1000!",
- dev->name, phydev->speed);
- break;
- }
- ugeth->oldspeed = phydev->speed;
- }
-
- if (!ugeth->oldlink) {
- new_state = 1;
- ugeth->oldlink = 1;
- }
-
- if (new_state) {
- /*
- * To change the MAC configuration we need to disable
- * the controller. To do so, we have to either grab
- * ugeth->lock, which is a bad idea since 'graceful
- * stop' commands might take quite a while, or we can
- * quiesce driver's activity.
- */
- ugeth_quiesce(ugeth);
- ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
-
- out_be32(&ug_regs->maccfg2, tempval);
- out_be32(&uf_regs->upsmr, upsmr);
-
- ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
- ugeth_activate(ugeth);
- }
- } else if (ugeth->oldlink) {
- new_state = 1;
- ugeth->oldlink = 0;
- ugeth->oldspeed = 0;
- ugeth->oldduplex = -1;
- }
-
- if (new_state && netif_msg_link(ugeth))
- phy_print_status(phydev);
+ netdev_watchdog_up(ugeth->ndev);
}
/* Initialize TBI PHY interface for communicating with the
@@ -1664,8 +1452,7 @@ static void uec_configure_serdes(struct net_device *dev)
struct phy_device *tbiphy;
if (!ug_info->tbi_node) {
- dev_warn(&dev->dev, "SGMII mode requires that the device "
- "tree specify a tbi-handle\n");
+ dev_warn(&dev->dev, "SGMII mode requires that the device tree specify a tbi-handle\n");
return;
}
@@ -1696,34 +1483,145 @@ static void uec_configure_serdes(struct net_device *dev)
put_device(&tbiphy->mdio.dev);
}
-/* Configure the PHY for dev.
- * returns 0 if success. -1 if failure
- */
-static int init_phy(struct net_device *dev)
+static void ugeth_mac_link_up(struct phylink_config *config, struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
{
- struct ucc_geth_private *priv = netdev_priv(dev);
- struct ucc_geth_info *ug_info = priv->ug_info;
- struct phy_device *phydev;
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct ucc_geth_private *ugeth = netdev_priv(ndev);
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
+ struct ucc_fast __iomem *uf_regs = ugeth->uccf->uf_regs;
+ u32 old_maccfg2, maccfg2 = in_be32(&ug_regs->maccfg2);
+ u32 old_upsmr, upsmr = in_be32(&uf_regs->upsmr);
- priv->oldlink = 0;
- priv->oldspeed = 0;
- priv->oldduplex = -1;
+ old_maccfg2 = maccfg2;
+ old_upsmr = upsmr;
- phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0,
- priv->phy_interface);
- if (!phydev) {
- dev_err(&dev->dev, "Could not attach to PHY\n");
- return -ENODEV;
+ /* No length check */
+ maccfg2 &= ~MACCFG2_LC;
+ maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
+ upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M |
+ UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM);
+
+ if (speed == SPEED_10 || speed == SPEED_100)
+ maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
+ else if (speed == SPEED_1000)
+ maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
+
+ maccfg2 |= ug_info->padAndCrc;
+
+ if (phy_interface_mode_is_reduced(interface)) {
+
+ if (interface != PHY_INTERFACE_MODE_RMII)
+ upsmr |= UCC_GETH_UPSMR_RPM;
+
+ switch (speed) {
+ case SPEED_10:
+ upsmr |= UCC_GETH_UPSMR_R10M;
+ fallthrough;
+ case SPEED_100:
+ if (interface != PHY_INTERFACE_MODE_RTBI)
+ upsmr |= UCC_GETH_UPSMR_RMM;
+ }
}
- if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
- uec_configure_serdes(dev);
+ if (interface == PHY_INTERFACE_MODE_TBI ||
+ interface == PHY_INTERFACE_MODE_RTBI)
+ upsmr |= UCC_GETH_UPSMR_TBIM;
- phy_set_max_speed(phydev, priv->max_speed);
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ upsmr |= UCC_GETH_UPSMR_SGMM;
- priv->phydev = phydev;
+ if (duplex == DUPLEX_HALF)
+ maccfg2 &= ~(MACCFG2_FDX);
+ else
+ maccfg2 |= MACCFG2_FDX;
- return 0;
+ if (maccfg2 != old_maccfg2 || upsmr != old_upsmr) {
+ /*
+ * To change the MAC configuration we need to disable
+ * the controller. To do so, we have to either grab
+ * ugeth->lock, which is a bad idea since 'graceful
+ * stop' commands might take quite a while, or we can
+ * quiesce driver's activity.
+ */
+ ugeth_quiesce(ugeth);
+ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
+
+ out_be32(&ug_regs->maccfg2, maccfg2);
+ out_be32(&uf_regs->upsmr, upsmr);
+
+ ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
+ ugeth_activate(ugeth);
+ }
+
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ uec_configure_serdes(ndev);
+
+ if (!phylink_autoneg_inband(mode)) {
+ ug_info->aufc = 0;
+ ug_info->receiveFlowControl = rx_pause;
+ ug_info->transmitFlowControl = tx_pause;
+
+ init_flow_control_params(ug_info->aufc,
+ ug_info->receiveFlowControl,
+ ug_info->transmitFlowControl,
+ ug_info->pausePeriod,
+ ug_info->extensionField,
+ &ugeth->uccf->uf_regs->upsmr,
+ &ugeth->ug_regs->uempr,
+ &ugeth->ug_regs->maccfg1);
+ }
+
+ ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
+}
+
+static void ugeth_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct ucc_geth_private *ugeth = netdev_priv(ndev);
+
+ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
+}
+
+static void ugeth_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct ucc_geth_private *ugeth = netdev_priv(ndev);
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ u16 value;
+
+ if (state->interface == PHY_INTERFACE_MODE_TBI ||
+ state->interface == PHY_INTERFACE_MODE_RTBI) {
+ struct phy_device *tbiphy;
+
+ if (!ug_info->tbi_node)
+ pr_warn("TBI mode requires that the device tree specify a tbi-handle\n");
+
+ tbiphy = of_phy_find_device(ug_info->tbi_node);
+ if (!tbiphy)
+ pr_warn("Could not get TBI device\n");
+
+ value = phy_read(tbiphy, ENET_TBI_MII_CR);
+ value &= ~0x1000; /* Turn off autonegotiation */
+ phy_write(tbiphy, ENET_TBI_MII_CR, value);
+
+ put_device(&tbiphy->mdio.dev);
+ }
+
+ if (phylink_autoneg_inband(mode)) {
+ ug_info->aufc = 1;
+
+ init_flow_control_params(ug_info->aufc, 1, 1,
+ ug_info->pausePeriod,
+ ug_info->extensionField,
+ &ugeth->uccf->uf_regs->upsmr,
+ &ugeth->ug_regs->uempr,
+ &ugeth->ug_regs->maccfg1);
+ }
}
static void ugeth_dump_regs(struct ucc_geth_private *ugeth)
@@ -1995,7 +1893,6 @@ static void ucc_geth_set_multi(struct net_device *dev)
static void ucc_geth_stop(struct ucc_geth_private *ugeth)
{
struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
- struct phy_device *phydev = ugeth->phydev;
ugeth_vdbg("%s: IN", __func__);
@@ -2004,7 +1901,7 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
* Must be done before disabling the controller
* or deadlock may happen.
*/
- phy_stop(phydev);
+ phylink_stop(ugeth->phylink);
/* Disable the controller */
ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
@@ -3246,12 +3143,6 @@ static int ucc_geth_init_mac(struct ucc_geth_private *ugeth)
goto err;
}
- err = adjust_enet_interface(ugeth);
- if (err) {
- netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n");
- goto err;
- }
-
/* Set MACSTNADDR1, MACSTNADDR2 */
/* For more details see the hardware spec. */
init_mac_station_addr_regs(dev->dev_addr[0],
@@ -3263,12 +3154,6 @@ static int ucc_geth_init_mac(struct ucc_geth_private *ugeth)
&ugeth->ug_regs->macstnaddr1,
&ugeth->ug_regs->macstnaddr2);
- err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
- if (err) {
- netif_err(ugeth, ifup, dev, "Cannot enable net device, aborting\n");
- goto err;
- }
-
return 0;
err:
ucc_geth_stop(ugeth);
@@ -3291,10 +3176,10 @@ static int ucc_geth_open(struct net_device *dev)
return -EINVAL;
}
- err = init_phy(dev);
+ err = phylink_of_phy_connect(ugeth->phylink, ugeth->dev->of_node, 0);
if (err) {
- netif_err(ugeth, ifup, dev, "Cannot initialize PHY, aborting\n");
- return err;
+ dev_err(&dev->dev, "Could not attach to PHY\n");
+ return -ENODEV;
}
err = ucc_geth_init_mac(ugeth);
@@ -3310,13 +3195,13 @@ static int ucc_geth_open(struct net_device *dev)
goto err;
}
- phy_start(ugeth->phydev);
+ phylink_start(ugeth->phylink);
napi_enable(&ugeth->napi);
netdev_reset_queue(dev);
netif_start_queue(dev);
device_set_wakeup_capable(&dev->dev,
- qe_alive_during_sleep() || ugeth->phydev->irq);
+ qe_alive_during_sleep() || dev->phydev->irq);
device_set_wakeup_enable(&dev->dev, ugeth->wol_en);
return err;
@@ -3337,8 +3222,7 @@ static int ucc_geth_close(struct net_device *dev)
cancel_work_sync(&ugeth->timeout_work);
ucc_geth_stop(ugeth);
- phy_disconnect(ugeth->phydev);
- ugeth->phydev = NULL;
+ phylink_disconnect_phy(ugeth->phylink);
free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
@@ -3372,7 +3256,7 @@ static void ucc_geth_timeout_work(struct work_struct *work)
ucc_geth_stop(ugeth);
ucc_geth_init_mac(ugeth);
/* Must start PHY here */
- phy_start(ugeth->phydev);
+ phylink_start(ugeth->phylink);
netif_tx_start_all_queues(dev);
}
@@ -3397,6 +3281,7 @@ static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state)
{
struct net_device *ndev = platform_get_drvdata(ofdev);
struct ucc_geth_private *ugeth = netdev_priv(ndev);
+ bool mac_wol = false;
if (!netif_running(ndev))
return 0;
@@ -3410,14 +3295,17 @@ static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state)
*/
ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
- if (ugeth->wol_en & WAKE_MAGIC) {
+ if (ugeth->wol_en & WAKE_MAGIC && !ugeth->phy_wol_en) {
setbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD);
setbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE);
ucc_fast_enable(ugeth->uccf, COMM_DIR_RX_AND_TX);
- } else if (!(ugeth->wol_en & WAKE_PHY)) {
- phy_stop(ugeth->phydev);
+ mac_wol = true;
}
+ rtnl_lock();
+ phylink_suspend(ugeth->phylink, mac_wol);
+ rtnl_unlock();
+
return 0;
}
@@ -3451,12 +3339,9 @@ static int ucc_geth_resume(struct platform_device *ofdev)
}
}
- ugeth->oldlink = 0;
- ugeth->oldspeed = 0;
- ugeth->oldduplex = -1;
-
- phy_stop(ugeth->phydev);
- phy_start(ugeth->phydev);
+ rtnl_lock();
+ phylink_resume(ugeth->phylink);
+ rtnl_unlock();
napi_enable(&ugeth->napi);
netif_device_attach(ndev);
@@ -3469,32 +3354,6 @@ static int ucc_geth_resume(struct platform_device *ofdev)
#define ucc_geth_resume NULL
#endif
-static phy_interface_t to_phy_interface(const char *phy_connection_type)
-{
- if (strcasecmp(phy_connection_type, "mii") == 0)
- return PHY_INTERFACE_MODE_MII;
- if (strcasecmp(phy_connection_type, "gmii") == 0)
- return PHY_INTERFACE_MODE_GMII;
- if (strcasecmp(phy_connection_type, "tbi") == 0)
- return PHY_INTERFACE_MODE_TBI;
- if (strcasecmp(phy_connection_type, "rmii") == 0)
- return PHY_INTERFACE_MODE_RMII;
- if (strcasecmp(phy_connection_type, "rgmii") == 0)
- return PHY_INTERFACE_MODE_RGMII;
- if (strcasecmp(phy_connection_type, "rgmii-id") == 0)
- return PHY_INTERFACE_MODE_RGMII_ID;
- if (strcasecmp(phy_connection_type, "rgmii-txid") == 0)
- return PHY_INTERFACE_MODE_RGMII_TXID;
- if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0)
- return PHY_INTERFACE_MODE_RGMII_RXID;
- if (strcasecmp(phy_connection_type, "rtbi") == 0)
- return PHY_INTERFACE_MODE_RTBI;
- if (strcasecmp(phy_connection_type, "sgmii") == 0)
- return PHY_INTERFACE_MODE_SGMII;
-
- return PHY_INTERFACE_MODE_MII;
-}
-
static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
@@ -3502,10 +3361,7 @@ static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!netif_running(dev))
return -EINVAL;
- if (!ugeth->phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(ugeth->phydev, rq, cmd);
+ return phylink_mii_ioctl(ugeth->phylink, rq, cmd);
}
static const struct net_device_ops ucc_geth_netdev_ops = {
@@ -3513,7 +3369,6 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
.ndo_stop = ucc_geth_close,
.ndo_start_xmit = ucc_geth_start_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = ucc_geth_set_mac_addr,
.ndo_set_rx_mode = ucc_geth_set_multi,
.ndo_tx_timeout = ucc_geth_timeout,
@@ -3553,6 +3408,12 @@ static int ucc_geth_parse_clock(struct device_node *np, const char *which,
return 0;
}
+static const struct phylink_mac_ops ugeth_mac_ops = {
+ .mac_link_up = ugeth_mac_link_up,
+ .mac_link_down = ugeth_mac_link_down,
+ .mac_config = ugeth_mac_config,
+};
+
static int ucc_geth_probe(struct platform_device* ofdev)
{
struct device *device = &ofdev->dev;
@@ -3560,23 +3421,12 @@ static int ucc_geth_probe(struct platform_device* ofdev)
struct net_device *dev = NULL;
struct ucc_geth_private *ugeth = NULL;
struct ucc_geth_info *ug_info;
+ struct device_node *phy_node;
+ struct phylink *phylink;
struct resource res;
- int err, ucc_num, max_speed = 0;
+ int err, ucc_num;
const unsigned int *prop;
phy_interface_t phy_interface;
- static const int enet_to_speed[] = {
- SPEED_10, SPEED_10, SPEED_10,
- SPEED_100, SPEED_100, SPEED_100,
- SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000,
- };
- static const phy_interface_t enet_to_phy_interface[] = {
- PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII,
- PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII,
- PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII,
- PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII,
- PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI,
- PHY_INTERFACE_MODE_SGMII,
- };
ugeth_vdbg("%s: IN", __func__);
@@ -3591,77 +3441,56 @@ static int ucc_geth_probe(struct platform_device* ofdev)
if ((ucc_num < 0) || (ucc_num > 7))
return -ENODEV;
- ug_info = kmemdup(&ugeth_primary_info, sizeof(*ug_info), GFP_KERNEL);
- if (ug_info == NULL)
+ ug_info = devm_kmemdup(&ofdev->dev, &ugeth_primary_info,
+ sizeof(*ug_info), GFP_KERNEL);
+ if (!ug_info)
return -ENOMEM;
ug_info->uf_info.ucc_num = ucc_num;
err = ucc_geth_parse_clock(np, "rx", &ug_info->uf_info.rx_clock);
if (err)
- goto err_free_info;
+ return err;
err = ucc_geth_parse_clock(np, "tx", &ug_info->uf_info.tx_clock);
if (err)
- goto err_free_info;
+ return err;
err = of_address_to_resource(np, 0, &res);
if (err)
- goto err_free_info;
+ return err;
ug_info->uf_info.regs = res.start;
ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
- ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0);
- if (!ug_info->phy_node && of_phy_is_fixed_link(np)) {
- /*
- * In the case of a fixed PHY, the DT node associated
- * to the PHY is the Ethernet MAC DT node.
- */
- err = of_phy_register_fixed_link(np);
- if (err)
- goto err_free_info;
- ug_info->phy_node = of_node_get(np);
- }
-
/* Find the TBI PHY node. If it's not there, we don't support SGMII */
ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
- /* get the phy interface type, or default to MII */
- prop = of_get_property(np, "phy-connection-type", NULL);
- if (!prop) {
- /* handle interface property present in old trees */
- prop = of_get_property(ug_info->phy_node, "interface", NULL);
- if (prop != NULL) {
- phy_interface = enet_to_phy_interface[*prop];
- max_speed = enet_to_speed[*prop];
- } else
- phy_interface = PHY_INTERFACE_MODE_MII;
- } else {
- phy_interface = to_phy_interface((const char *)prop);
- }
-
- /* get speed, or derive from PHY interface */
- if (max_speed == 0)
- switch (phy_interface) {
- case PHY_INTERFACE_MODE_GMII:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_TBI:
- case PHY_INTERFACE_MODE_RTBI:
- case PHY_INTERFACE_MODE_SGMII:
- max_speed = SPEED_1000;
- break;
- default:
- max_speed = SPEED_100;
- break;
+ phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (phy_node) {
+ prop = of_get_property(phy_node, "interface", NULL);
+ if (prop) {
+ dev_err(&ofdev->dev,
+ "Device-tree property 'interface' is no longer supported. Please use 'phy-connection-type' instead.");
+ of_node_put(phy_node);
+ err = -EINVAL;
+ goto err_put_tbi;
}
+ of_node_put(phy_node);
+ }
+
+ err = of_get_phy_mode(np, &phy_interface);
+ if (err) {
+ dev_err(&ofdev->dev, "Invalid phy-connection-type");
+ goto err_put_tbi;
+ }
- if (max_speed == SPEED_1000) {
+ if (phy_interface == PHY_INTERFACE_MODE_GMII ||
+ phy_interface_mode_is_rgmii(phy_interface) ||
+ phy_interface == PHY_INTERFACE_MODE_TBI ||
+ phy_interface == PHY_INTERFACE_MODE_RTBI ||
+ phy_interface == PHY_INTERFACE_MODE_SGMII) {
unsigned int snums = qe_get_num_of_snums();
- /* configure muram FIFOs for gigabit operation */
ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT;
ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT;
ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT;
@@ -3687,11 +3516,10 @@ static int ucc_geth_probe(struct platform_device* ofdev)
ug_info->uf_info.irq);
/* Create an ethernet device instance */
- dev = alloc_etherdev(sizeof(*ugeth));
-
- if (dev == NULL) {
+ dev = devm_alloc_etherdev(&ofdev->dev, sizeof(*ugeth));
+ if (!dev) {
err = -ENOMEM;
- goto err_deregister_fixed_link;
+ goto err_put_tbi;
}
ugeth = netdev_priv(dev);
@@ -3718,21 +3546,50 @@ static int ucc_geth_probe(struct platform_device* ofdev)
dev->max_mtu = 1518;
ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
- ugeth->phy_interface = phy_interface;
- ugeth->max_speed = max_speed;
- /* Carrier starts down, phylib will bring it up */
- netif_carrier_off(dev);
+ ugeth->phylink_config.dev = &dev->dev;
+ ugeth->phylink_config.type = PHYLINK_NETDEV;
+
+ ugeth->phylink_config.mac_capabilities =
+ MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
+
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ ugeth->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ ugeth->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ ugeth->phylink_config.supported_interfaces);
+ phy_interface_set_rgmii(ugeth->phylink_config.supported_interfaces);
- err = register_netdev(dev);
+ if (ug_info->tbi_node) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ ugeth->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_TBI,
+ ugeth->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RTBI,
+ ugeth->phylink_config.supported_interfaces);
+ }
+
+ phylink = phylink_create(&ugeth->phylink_config, dev_fwnode(&dev->dev),
+ phy_interface, &ugeth_mac_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ goto err_put_tbi;
+ }
+
+ ugeth->phylink = phylink;
+
+ err = devm_register_netdev(&ofdev->dev, dev);
if (err) {
if (netif_msg_probe(ugeth))
pr_err("%s: Cannot register net device, aborting\n",
dev->name);
- goto err_free_netdev;
+ goto err_destroy_phylink;
}
- of_get_ethdev_address(np, dev);
+ err = of_get_ethdev_address(np, dev);
+ if (err == -EPROBE_DEFER)
+ goto err_destroy_phylink;
ugeth->ug_info = ug_info;
ugeth->dev = device;
@@ -3741,15 +3598,10 @@ static int ucc_geth_probe(struct platform_device* ofdev)
return 0;
-err_free_netdev:
- free_netdev(dev);
-err_deregister_fixed_link:
- if (of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
+err_destroy_phylink:
+ phylink_destroy(phylink);
+err_put_tbi:
of_node_put(ug_info->tbi_node);
- of_node_put(ug_info->phy_node);
-err_free_info:
- kfree(ug_info);
return err;
}
@@ -3758,16 +3610,10 @@ static void ucc_geth_remove(struct platform_device* ofdev)
{
struct net_device *dev = platform_get_drvdata(ofdev);
struct ucc_geth_private *ugeth = netdev_priv(dev);
- struct device_node *np = ofdev->dev.of_node;
- unregister_netdev(dev);
ucc_geth_memclean(ugeth);
- if (of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
+ phylink_destroy(ugeth->phylink);
of_node_put(ugeth->ug_info->tbi_node);
- of_node_put(ugeth->ug_info->phy_node);
- kfree(ugeth->ug_info);
- free_netdev(dev);
}
static const struct of_device_id ucc_geth_match[] = {
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index 4294ed096ebb..84f92f6384e7 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/phylink.h>
#include <linux/if_ether.h>
#include <soc/fsl/qe/immap_qe.h>
@@ -889,8 +890,6 @@ struct ucc_geth_hardware_statistics {
addresses */
#define TX_TIMEOUT (1*HZ)
-#define PHY_INIT_TIMEOUT 100000
-#define PHY_CHANGE_TIME 2
/* Fast Ethernet (10/100 Mbps) */
#define UCC_GETH_URFS_INIT 512 /* Rx virtual FIFO size
@@ -921,7 +920,8 @@ struct ucc_geth_hardware_statistics {
#define UCC_GETH_UPSMR_INIT UCC_GETH_UPSMR_RES1
#define UCC_GETH_MACCFG1_INIT 0
-#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1)
+#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1 | \
+ (7 << MACCFG2_PREL_SHIFT))
/* Ethernet Address Type. */
enum enet_addr_type {
@@ -1073,6 +1073,9 @@ struct ucc_geth_tad_params {
u16 vid;
};
+struct phylink;
+struct phylink_config;
+
/* GETH protocol initialization structure */
struct ucc_geth_info {
struct ucc_fast_info uf_info;
@@ -1088,7 +1091,6 @@ struct ucc_geth_info {
u8 miminumInterFrameGapEnforcement;
u8 backToBackInterFrameGap;
int ipAddressAlignment;
- int lengthCheckRx;
u32 mblinterval;
u16 nortsrbytetime;
u8 fracsiz;
@@ -1114,7 +1116,6 @@ struct ucc_geth_info {
int transmitFlowControl;
u8 maxGroupAddrInHash;
u8 maxIndAddrInHash;
- u8 prel;
u16 maxFrameLength;
u16 minFrameLength;
u16 maxD1Length;
@@ -1125,7 +1126,6 @@ struct ucc_geth_info {
u32 eventRegMask;
u16 pausePeriod;
u16 extensionField;
- struct device_node *phy_node;
struct device_node *tbi_node;
u8 weightfactor[NUM_TX_QUEUES];
u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES];
@@ -1210,14 +1210,12 @@ struct ucc_geth_private {
u16 skb_dirtytx[NUM_TX_QUEUES];
struct ugeth_mii_info *mii_info;
- struct phy_device *phydev;
- phy_interface_t phy_interface;
- int max_speed;
uint32_t msg_enable;
- int oldspeed;
- int oldduplex;
- int oldlink;
- int wol_en;
+ u32 wol_en;
+ u32 phy_wol_en;
+
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
struct device_node *node;
};
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 699f346faf5c..1fb49e5a414a 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -104,14 +104,8 @@ static int
uec_get_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- struct phy_device *phydev = ugeth->phydev;
- if (!phydev)
- return -ENODEV;
-
- phy_ethtool_ksettings_get(phydev, cmd);
-
- return 0;
+ return phylink_ethtool_ksettings_get(ugeth->phylink, cmd);
}
static int
@@ -119,12 +113,8 @@ uec_set_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- struct phy_device *phydev = ugeth->phydev;
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_ksettings_set(phydev, cmd);
+ return phylink_ethtool_ksettings_set(ugeth->phylink, cmd);
}
static void
@@ -133,12 +123,7 @@ uec_get_pauseparam(struct net_device *netdev,
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- pause->autoneg = ugeth->phydev->autoneg;
-
- if (ugeth->ug_info->receiveFlowControl)
- pause->rx_pause = 1;
- if (ugeth->ug_info->transmitFlowControl)
- pause->tx_pause = 1;
+ return phylink_ethtool_get_pauseparam(ugeth->phylink, pause);
}
static int
@@ -146,30 +131,11 @@ uec_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- int ret = 0;
ugeth->ug_info->receiveFlowControl = pause->rx_pause;
ugeth->ug_info->transmitFlowControl = pause->tx_pause;
- if (ugeth->phydev->autoneg) {
- if (netif_running(netdev)) {
- /* FIXME: automatically restart */
- netdev_info(netdev, "Please re-open the interface\n");
- }
- } else {
- struct ucc_geth_info *ug_info = ugeth->ug_info;
-
- ret = init_flow_control_params(ug_info->aufc,
- ug_info->receiveFlowControl,
- ug_info->transmitFlowControl,
- ug_info->pausePeriod,
- ug_info->extensionField,
- &ugeth->uccf->uf_regs->upsmr,
- &ugeth->ug_regs->uempr,
- &ugeth->ug_regs->maccfg1);
- }
-
- return ret;
+ return phylink_ethtool_set_pauseparam(ugeth->phylink, pause);
}
static uint32_t
@@ -343,28 +309,42 @@ uec_get_drvinfo(struct net_device *netdev,
static void uec_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- struct phy_device *phydev = ugeth->phydev;
- if (phydev && phydev->irq)
- wol->supported |= WAKE_PHY;
+ phylink_ethtool_get_wol(ugeth->phylink, wol);
+
if (qe_alive_during_sleep())
wol->supported |= WAKE_MAGIC;
- wol->wolopts = ugeth->wol_en;
+ wol->wolopts |= ugeth->wol_en;
}
static int uec_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- struct phy_device *phydev = ugeth->phydev;
+ int ret = 0;
- if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
- return -EINVAL;
- else if (wol->wolopts & WAKE_PHY && (!phydev || !phydev->irq))
+ ret = phylink_ethtool_set_wol(ugeth->phylink, wol);
+ if (ret == -EOPNOTSUPP) {
+ ugeth->phy_wol_en = 0;
+ } else if (ret) {
+ return ret;
+ } else {
+ ugeth->phy_wol_en = wol->wolopts;
+ goto out;
+ }
+
+ /* If the PHY isn't handling the WoL and the MAC is asked to more than
+ * WAKE_MAGIC, error-out
+ */
+ if (!ugeth->phy_wol_en &&
+ wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
- else if (wol->wolopts & WAKE_MAGIC && !qe_alive_during_sleep())
+
+ if (wol->wolopts & WAKE_MAGIC &&
+ !qe_alive_during_sleep())
return -EINVAL;
+out:
ugeth->wol_en = wol->wolopts;
device_set_wakeup_enable(&netdev->dev, ugeth->wol_en);
diff --git a/drivers/net/ethernet/fungible/funcore/fun_queue.c b/drivers/net/ethernet/fungible/funcore/fun_queue.c
index 8ab9f68434f5..d07ee3e4f52a 100644
--- a/drivers/net/ethernet/fungible/funcore/fun_queue.c
+++ b/drivers/net/ethernet/fungible/funcore/fun_queue.c
@@ -482,43 +482,6 @@ free_funq:
return NULL;
}
-/* Create a funq's CQ on the device. */
-static int fun_create_cq(struct fun_queue *funq)
-{
- struct fun_dev *fdev = funq->fdev;
- unsigned int rqid;
- int rc;
-
- rqid = funq->cq_flags & FUN_ADMIN_EPCQ_CREATE_FLAG_RQ ?
- funq->rqid : FUN_HCI_ID_INVALID;
- rc = fun_cq_create(fdev, funq->cq_flags, funq->cqid, rqid,
- funq->cqe_size_log2, funq->cq_depth,
- funq->cq_dma_addr, 0, 0, funq->cq_intcoal_nentries,
- funq->cq_intcoal_usec, funq->cq_vector, 0, 0,
- &funq->cqid, &funq->cq_db);
- if (!rc)
- dev_dbg(fdev->dev, "created CQ %u\n", funq->cqid);
-
- return rc;
-}
-
-/* Create a funq's SQ on the device. */
-static int fun_create_sq(struct fun_queue *funq)
-{
- struct fun_dev *fdev = funq->fdev;
- int rc;
-
- rc = fun_sq_create(fdev, funq->sq_flags, funq->sqid, funq->cqid,
- funq->sqe_size_log2, funq->sq_depth,
- funq->sq_dma_addr, funq->sq_intcoal_nentries,
- funq->sq_intcoal_usec, funq->cq_vector, 0, 0,
- 0, &funq->sqid, &funq->sq_db);
- if (!rc)
- dev_dbg(fdev->dev, "created SQ %u\n", funq->sqid);
-
- return rc;
-}
-
/* Create a funq's RQ on the device. */
int fun_create_rq(struct fun_queue *funq)
{
@@ -561,34 +524,6 @@ int fun_request_irq(struct fun_queue *funq, const char *devname,
return rc;
}
-/* Create all component queues of a funq on the device. */
-int fun_create_queue(struct fun_queue *funq)
-{
- int rc;
-
- rc = fun_create_cq(funq);
- if (rc)
- return rc;
-
- if (funq->rq_depth) {
- rc = fun_create_rq(funq);
- if (rc)
- goto release_cq;
- }
-
- rc = fun_create_sq(funq);
- if (rc)
- goto release_rq;
-
- return 0;
-
-release_rq:
- fun_destroy_sq(funq->fdev, funq->rqid);
-release_cq:
- fun_destroy_cq(funq->fdev, funq->cqid);
- return rc;
-}
-
void fun_free_irq(struct fun_queue *funq)
{
if (funq->irq_handler) {
diff --git a/drivers/net/ethernet/fungible/funcore/fun_queue.h b/drivers/net/ethernet/fungible/funcore/fun_queue.h
index 7fb53d0ae8b0..2d966afb187a 100644
--- a/drivers/net/ethernet/fungible/funcore/fun_queue.h
+++ b/drivers/net/ethernet/fungible/funcore/fun_queue.h
@@ -163,7 +163,6 @@ static inline void fun_set_cq_callback(struct fun_queue *funq, cq_callback_t cb,
}
int fun_create_rq(struct fun_queue *funq);
-int fun_create_queue(struct fun_queue *funq);
void fun_free_irq(struct fun_queue *funq);
int fun_request_irq(struct fun_queue *funq, const char *devname,
diff --git a/drivers/net/ethernet/fungible/funeth/funeth.h b/drivers/net/ethernet/fungible/funeth/funeth.h
index 1250e10d21db..55e705e239f8 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth.h
+++ b/drivers/net/ethernet/fungible/funeth/funeth.h
@@ -4,7 +4,7 @@
#define _FUNETH_H
#include <uapi/linux/if_ether.h>
-#include <uapi/linux/net_tstamp.h>
+#include <linux/net_tstamp.h>
#include <linux/mutex.h>
#include <linux/seqlock.h>
#include <linux/xarray.h>
@@ -121,7 +121,7 @@ struct funeth_priv {
u8 rx_coal_usec;
u8 rx_coal_count;
- struct hwtstamp_config hwtstamp_cfg;
+ struct kernel_hwtstamp_config hwtstamp_cfg;
/* cumulative queue stats from earlier queue instances */
u64 tx_packets;
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
index ba83dbf4ed22..1966dba512f8 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
@@ -930,7 +930,8 @@ static void fun_get_rmon_stats(struct net_device *netdev,
}
static void fun_get_fec_stats(struct net_device *netdev,
- struct ethtool_fec_stats *stats)
+ struct ethtool_fec_stats *stats,
+ struct ethtool_fec_hist *hist)
{
const struct funeth_priv *fp = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_main.c b/drivers/net/ethernet/fungible/funeth/funeth_main.c
index ac86179a0a81..792cddac6f1b 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_main.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_main.c
@@ -1014,26 +1014,25 @@ static int fun_get_port_attributes(struct net_device *netdev)
return 0;
}
-static int fun_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+static int fun_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
{
const struct funeth_priv *fp = netdev_priv(dev);
- return copy_to_user(ifr->ifr_data, &fp->hwtstamp_cfg,
- sizeof(fp->hwtstamp_cfg)) ? -EFAULT : 0;
+ *config = fp->hwtstamp_cfg;
+ return 0;
}
-static int fun_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+static int fun_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct funeth_priv *fp = netdev_priv(dev);
- struct hwtstamp_config cfg;
-
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
/* no TX HW timestamps */
- cfg.tx_type = HWTSTAMP_TX_OFF;
+ config->tx_type = HWTSTAMP_TX_OFF;
- switch (cfg.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
case HWTSTAMP_FILTER_ALL:
@@ -1051,26 +1050,14 @@ static int fun_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
- cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
- fp->hwtstamp_cfg = cfg;
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
-}
-
-static int fun_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return fun_hwtstamp_set(dev, ifr);
- case SIOCGHWTSTAMP:
- return fun_hwtstamp_get(dev, ifr);
- default:
- return -EOPNOTSUPP;
- }
+ fp->hwtstamp_cfg = *config;
+ return 0;
}
/* Prepare the queues for XDP. */
@@ -1340,7 +1327,6 @@ static const struct net_device_ops fun_netdev_ops = {
.ndo_change_mtu = fun_change_mtu,
.ndo_set_mac_address = fun_set_macaddr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_eth_ioctl = fun_ioctl,
.ndo_uninit = fun_uninit,
.ndo_bpf = fun_xdp,
.ndo_xdp_xmit = fun_xdp_xmit_frames,
@@ -1348,6 +1334,8 @@ static const struct net_device_ops fun_netdev_ops = {
.ndo_set_vf_vlan = fun_set_vf_vlan,
.ndo_set_vf_rate = fun_set_vf_rate,
.ndo_get_vf_config = fun_get_vf_config,
+ .ndo_hwtstamp_get = fun_hwtstamp_get,
+ .ndo_hwtstamp_set = fun_hwtstamp_set,
};
#define GSO_ENCAP_FLAGS (NETIF_F_GSO_GRE | NETIF_F_GSO_IPXIP4 | \
diff --git a/drivers/net/ethernet/google/Kconfig b/drivers/net/ethernet/google/Kconfig
index 564862a57124..14c9431e15e5 100644
--- a/drivers/net/ethernet/google/Kconfig
+++ b/drivers/net/ethernet/google/Kconfig
@@ -18,6 +18,7 @@ if NET_VENDOR_GOOGLE
config GVE
tristate "Google Virtual NIC (gVNIC) support"
depends on (PCI_MSI && (X86 || CPU_LITTLE_ENDIAN))
+ depends on PTP_1588_CLOCK_OPTIONAL
select PAGE_POOL
help
This driver supports Google Virtual NIC (gVNIC)"
diff --git a/drivers/net/ethernet/google/gve/Makefile b/drivers/net/ethernet/google/gve/Makefile
index 4520f1c07a63..e0ec227a50f7 100644
--- a/drivers/net/ethernet/google/gve/Makefile
+++ b/drivers/net/ethernet/google/gve/Makefile
@@ -1,5 +1,7 @@
# Makefile for the Google virtual Ethernet (gve) driver
obj-$(CONFIG_GVE) += gve.o
-gve-objs := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o gve_flow_rule.o \
+gve-y := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o gve_flow_rule.o \
gve_buffer_mgmt_dqo.o
+
+gve-$(CONFIG_PTP_1588_CLOCK) += gve_ptp.o
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index dd92949bb214..970d5ca8cdde 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -11,7 +11,9 @@
#include <linux/dmapool.h>
#include <linux/ethtool_netlink.h>
#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
#include <linux/pci.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/u64_stats_sync.h>
#include <net/page_pool/helpers.h>
#include <net/xdp.h>
@@ -57,7 +59,7 @@
#define GVE_DEFAULT_RX_BUFFER_SIZE 2048
-#define GVE_MAX_RX_BUFFER_SIZE 4096
+#define GVE_XDP_RX_BUFFER_SIZE_DQO 4096
#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
@@ -68,6 +70,9 @@
#define GVE_FLOW_RULE_IDS_CACHE_SIZE \
(GVE_ADMINQ_BUFFER_SIZE / sizeof(((struct gve_adminq_queried_flow_rule *)0)->location))
+#define GVE_RSS_KEY_SIZE 40
+#define GVE_RSS_INDIR_SIZE 128
+
#define GVE_XDP_ACTIONS 5
#define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
@@ -93,6 +98,8 @@
*/
#define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96
+#define GVE_DQO_RX_HWTSTAMP_VALID 0x1
+
/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
struct gve_rx_desc_queue {
struct gve_rx_desc *desc_ring; /* the descriptor ring */
@@ -102,7 +109,13 @@ struct gve_rx_desc_queue {
/* The page info for a single slot in the RX data queue */
struct gve_rx_slot_page_info {
- struct page *page;
+ /* netmem is used for DQO RDA mode
+ * page is used in all other modes
+ */
+ union {
+ struct page *page;
+ netmem_ref netmem;
+ };
void *page_address;
u32 page_offset; /* offset to write to in page */
unsigned int buf_size;
@@ -177,6 +190,9 @@ struct gve_rx_buf_state_dqo {
/* The page posted to HW. */
struct gve_rx_slot_page_info page_info;
+ /* XSK buffer */
+ struct xdp_buff *xsk_buff;
+
/* The DMA address corresponding to `page_info`. */
dma_addr_t addr;
@@ -189,6 +205,13 @@ struct gve_rx_buf_state_dqo {
s16 next;
};
+/* Wrapper for XDP Rx metadata */
+struct gve_xdp_buff {
+ struct xdp_buff xdp;
+ struct gve_priv *gve;
+ const struct gve_rx_compl_desc_dqo *compl_desc;
+};
+
/* `head` and `tail` are indices into an array, or -1 if empty. */
struct gve_index_list {
s16 head;
@@ -218,6 +241,11 @@ struct gve_rx_cnts {
/* Contains datapath state used to represent an RX queue. */
struct gve_rx_ring {
struct gve_priv *gve;
+
+ u16 packet_buffer_size; /* Size of buffer posted to NIC */
+ u16 packet_buffer_truesize; /* Total size of RX buffer */
+ u16 rx_headroom;
+
union {
/* GQI fields */
struct {
@@ -226,7 +254,6 @@ struct gve_rx_ring {
/* threshold for posting new buffs and descs */
u32 db_threshold;
- u16 packet_buffer_size;
u32 qpl_copy_pool_mask;
u32 qpl_copy_pool_head;
@@ -314,7 +341,6 @@ struct gve_rx_ring {
/* XDP stuff */
struct xdp_rxq_info xdp_rxq;
- struct xdp_rxq_info xsk_rxq;
struct xsk_buff_pool *xsk_pool;
struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */
};
@@ -383,10 +409,24 @@ enum gve_packet_state {
GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
/* No valid completion received within the specified timeout. */
GVE_PACKET_STATE_TIMED_OUT_COMPL,
+ /* XSK pending packet has received a packet/reinjection completion, or
+ * has timed out. At this point, the pending packet can be counted by
+ * xsk_tx_complete and freed.
+ */
+ GVE_PACKET_STATE_XSK_COMPLETE,
+};
+
+enum gve_tx_pending_packet_dqo_type {
+ GVE_TX_PENDING_PACKET_DQO_SKB,
+ GVE_TX_PENDING_PACKET_DQO_XDP_FRAME,
+ GVE_TX_PENDING_PACKET_DQO_XSK,
};
struct gve_tx_pending_packet_dqo {
- struct sk_buff *skb; /* skb for this packet */
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
/* 0th element corresponds to the linear portion of `skb`, should be
* unmapped with `dma_unmap_single`.
@@ -416,7 +456,10 @@ struct gve_tx_pending_packet_dqo {
/* Identifies the current state of the packet as defined in
* `enum gve_packet_state`.
*/
- u8 state;
+ u8 state : 3;
+
+ /* gve_tx_pending_packet_dqo_type */
+ u8 type : 2;
/* If packet is an outstanding miss completion, then the packet is
* freed if the corresponding re-injection completion is not received
@@ -438,6 +481,9 @@ struct gve_tx_ring {
/* DQO fields. */
struct {
+ /* Spinlock for XDP tx traffic */
+ spinlock_t xdp_lock;
+
/* Linked list of gve_tx_pending_packet_dqo. Index into
* pending_packets, or -1 if empty.
*
@@ -482,6 +528,8 @@ struct gve_tx_ring {
/* Cached value of `dqo_compl.free_tx_qpl_buf_cnt` */
u32 free_tx_qpl_buf_cnt;
};
+
+ atomic_t xsk_reorder_queue_tail;
} dqo_tx;
};
@@ -515,6 +563,9 @@ struct gve_tx_ring {
/* Last TX ring index fetched by HW */
atomic_t hw_tx_head;
+ u16 xsk_reorder_queue_head;
+ u16 xsk_reorder_queue_tail;
+
/* List to track pending packets which received a miss
* completion but not a corresponding reinjection.
*/
@@ -568,6 +619,8 @@ struct gve_tx_ring {
struct gve_tx_pending_packet_dqo *pending_packets;
s16 num_pending_packets;
+ u16 *xsk_reorder_queue;
+
u32 complq_mask; /* complq size is complq_mask + 1 */
/* QPL fields */
@@ -604,8 +657,6 @@ struct gve_tx_ring {
dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
struct u64_stats_sync statss; /* sync stats for 32bit archs */
struct xsk_buff_pool *xsk_pool;
- u32 xdp_xsk_wakeup;
- u32 xdp_xsk_done;
u64 xdp_xsk_sent;
u64 xdp_xmit;
u64 xdp_xmit_errors;
@@ -624,10 +675,18 @@ struct gve_notify_block {
u32 irq;
};
-/* Tracks allowed and current queue settings */
-struct gve_queue_config {
+/* Tracks allowed and current rx queue settings */
+struct gve_rx_queue_config {
u16 max_queues;
- u16 num_queues; /* current */
+ u16 num_queues;
+ u16 packet_buffer_size;
+};
+
+/* Tracks allowed and current tx queue settings */
+struct gve_tx_queue_config {
+ u16 max_queues;
+ u16 num_queues; /* number of TX queues, excluding XDP queues */
+ u16 num_xdp_queues;
};
/* Tracks the available and used qpl IDs */
@@ -651,11 +710,11 @@ struct gve_ptype_lut {
/* Parameters for allocating resources for tx queues */
struct gve_tx_alloc_rings_cfg {
- struct gve_queue_config *qcfg;
+ struct gve_tx_queue_config *qcfg;
+
+ u16 num_xdp_rings;
u16 ring_size;
- u16 start_idx;
- u16 num_rings;
bool raw_addressing;
/* Allocated resources are returned here */
@@ -665,13 +724,15 @@ struct gve_tx_alloc_rings_cfg {
/* Parameters for allocating resources for rx queues */
struct gve_rx_alloc_rings_cfg {
/* tx config is also needed to determine QPL ids */
- struct gve_queue_config *qcfg;
- struct gve_queue_config *qcfg_tx;
+ struct gve_rx_queue_config *qcfg_rx;
+ struct gve_tx_queue_config *qcfg_tx;
u16 ring_size;
u16 packet_buffer_size;
bool raw_addressing;
bool enable_header_split;
+ bool reset_rss;
+ bool xdp;
/* Allocated resources are returned here */
struct gve_rx_ring *rx;
@@ -722,6 +783,17 @@ struct gve_flow_rules_cache {
u32 rule_ids_cache_num;
};
+struct gve_rss_config {
+ u8 *hash_key;
+ u32 *hash_lut;
+};
+
+struct gve_ptp {
+ struct ptp_clock_info info;
+ struct ptp_clock *clock;
+ struct gve_priv *priv;
+};
+
struct gve_priv {
struct net_device *dev;
struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
@@ -751,10 +823,11 @@ struct gve_priv {
u32 rx_copybreak; /* copy packets smaller than this */
u16 default_num_queues; /* default num queues to set up */
- u16 num_xdp_queues;
- struct gve_queue_config tx_cfg;
- struct gve_queue_config rx_cfg;
- u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
+ struct gve_tx_queue_config tx_cfg;
+ struct gve_rx_queue_config rx_cfg;
+ unsigned long *xsk_pools; /* bitmap of RX queues with XSK pools */
+ u32 num_ntfy_blks; /* split between TX and RX so must be even */
+ int numa_node;
struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
__be32 __iomem *db_bar2; /* "array" of doorbells */
@@ -786,6 +859,7 @@ struct gve_priv {
u32 adminq_set_driver_parameter_cnt;
u32 adminq_report_stats_cnt;
u32 adminq_report_link_speed_cnt;
+ u32 adminq_report_nic_timestamp_cnt;
u32 adminq_get_ptype_map_cnt;
u32 adminq_verify_driver_compatibility_cnt;
u32 adminq_query_flow_rules_cnt;
@@ -823,7 +897,6 @@ struct gve_priv {
struct gve_ptype_lut *ptype_lut_dqo;
/* Must be a power of two. */
- u16 data_buffer_size_dqo;
u16 max_rx_buffer_size; /* device limit */
enum gve_queue_format queue_format;
@@ -842,6 +915,16 @@ struct gve_priv {
u16 rss_key_size;
u16 rss_lut_size;
+ bool cache_rss_config;
+ struct gve_rss_config rss_config;
+
+ /* True if the device supports reading the nic clock */
+ bool nic_timestamp_supported;
+ struct gve_ptp *ptp;
+ struct kernel_hwtstamp_config ts_config;
+ struct gve_nic_ts_report *nic_ts_report;
+ dma_addr_t nic_ts_report_bus;
+ u64 last_sync_nic_counter; /* Clock counter from last NIC TS report */
};
enum gve_service_task_flags_bit {
@@ -1024,27 +1107,16 @@ static inline bool gve_is_qpl(struct gve_priv *priv)
}
/* Returns the number of tx queue page lists */
-static inline u32 gve_num_tx_qpls(const struct gve_queue_config *tx_cfg,
- int num_xdp_queues,
+static inline u32 gve_num_tx_qpls(const struct gve_tx_queue_config *tx_cfg,
bool is_qpl)
{
if (!is_qpl)
return 0;
- return tx_cfg->num_queues + num_xdp_queues;
-}
-
-/* Returns the number of XDP tx queue page lists
- */
-static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
-{
- if (priv->queue_format != GVE_GQI_QPL_FORMAT)
- return 0;
-
- return priv->num_xdp_queues;
+ return tx_cfg->num_queues + tx_cfg->num_xdp_queues;
}
/* Returns the number of rx queue page lists */
-static inline u32 gve_num_rx_qpls(const struct gve_queue_config *rx_cfg,
+static inline u32 gve_num_rx_qpls(const struct gve_rx_queue_config *rx_cfg,
bool is_qpl)
{
if (!is_qpl)
@@ -1062,7 +1134,8 @@ static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
return priv->tx_cfg.max_queues + rx_qid;
}
-static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid)
+static inline u32 gve_get_rx_qpl_id(const struct gve_tx_queue_config *tx_cfg,
+ int rx_qid)
{
return tx_cfg->max_queues + rx_qid;
}
@@ -1072,7 +1145,7 @@ static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
return gve_tx_qpl_id(priv, 0);
}
-static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
+static inline u32 gve_rx_start_qpl_id(const struct gve_tx_queue_config *tx_cfg)
{
return gve_get_rx_qpl_id(tx_cfg, 0);
}
@@ -1101,9 +1174,15 @@ static inline bool gve_is_gqi(struct gve_priv *priv)
priv->queue_format == GVE_GQI_QPL_FORMAT;
}
+static inline bool gve_is_dqo(struct gve_priv *priv)
+{
+ return priv->queue_format == GVE_DQO_RDA_FORMAT ||
+ priv->queue_format == GVE_DQO_QPL_FORMAT;
+}
+
static inline u32 gve_num_tx_queues(struct gve_priv *priv)
{
- return priv->tx_cfg.num_queues + priv->num_xdp_queues;
+ return priv->tx_cfg.num_queues + priv->tx_cfg.num_xdp_queues;
}
static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id)
@@ -1116,6 +1195,17 @@ static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
return gve_xdp_tx_queue_id(priv, 0);
}
+static inline bool gve_supports_xdp_xmit(struct gve_priv *priv)
+{
+ switch (priv->queue_format) {
+ case GVE_GQI_QPL_FORMAT:
+ case GVE_DQO_RDA_FORMAT:
+ return true;
+ default:
+ return false;
+ }
+}
+
/* gqi napi handler defined in gve_main.c */
int gve_napi_poll(struct napi_struct *napi, int budget);
@@ -1133,13 +1223,18 @@ void gve_free_queue_page_list(struct gve_priv *priv,
u32 id);
/* tx handling */
netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
-int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
- u32 flags);
+int gve_xdp_xmit_gqi(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
+int gve_xdp_xmit_dqo(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
void *data, int len, void *frame_p);
void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
+int gve_xdp_xmit_one_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
+ struct xdp_frame *xdpf);
bool gve_tx_poll(struct gve_notify_block *block, int budget);
bool gve_xdp_poll(struct gve_notify_block *block, int budget);
+int gve_xsk_tx_poll(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg);
void gve_tx_free_rings_gqi(struct gve_priv *priv,
@@ -1165,9 +1260,12 @@ void gve_rx_free_rings_gqi(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg);
void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx);
void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
-u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit);
bool gve_header_split_supported(const struct gve_priv *priv);
-int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
+int gve_set_rx_buf_len_config(struct gve_priv *priv, u32 rx_buf_len,
+ struct netlink_ext_ack *extack,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
+int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
/* rx buffer handling */
int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs);
void gve_free_page_dqo(struct gve_priv *priv, struct gve_rx_buf_state_dqo *bs,
@@ -1196,7 +1294,8 @@ void gve_free_buffer(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state);
int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc);
struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
- struct gve_rx_ring *rx);
+ struct gve_rx_ring *rx,
+ bool xdp);
/* Reset */
void gve_schedule_reset(struct gve_priv *priv);
@@ -1208,14 +1307,35 @@ int gve_adjust_config(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
int gve_adjust_queues(struct gve_priv *priv,
- struct gve_queue_config new_rx_config,
- struct gve_queue_config new_tx_config);
+ struct gve_rx_queue_config new_rx_config,
+ struct gve_tx_queue_config new_tx_config,
+ bool reset_rss);
/* flow steering rule */
int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs);
int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
int gve_flow_rules_reset(struct gve_priv *priv);
+/* RSS config */
+int gve_init_rss_config(struct gve_priv *priv, u16 num_queues);
+/* PTP and timestamping */
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+int gve_clock_nic_ts_read(struct gve_priv *priv);
+int gve_init_clock(struct gve_priv *priv);
+void gve_teardown_clock(struct gve_priv *priv);
+#else /* CONFIG_PTP_1588_CLOCK */
+static inline int gve_clock_nic_ts_read(struct gve_priv *priv)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int gve_init_clock(struct gve_priv *priv)
+{
+ return 0;
+}
+
+static inline void gve_teardown_clock(struct gve_priv *priv) { }
+#endif /* CONFIG_PTP_1588_CLOCK */
/* report stats handling */
void gve_handle_report_stats(struct gve_priv *priv);
/* exported by ethtool.c */
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index e44e8b139633..b72cc0fa2ba2 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -46,6 +46,7 @@ void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
struct gve_device_option_flow_steering **dev_op_flow_steering,
struct gve_device_option_rss_config **dev_op_rss_config,
+ struct gve_device_option_nic_timestamp **dev_op_nic_timestamp,
struct gve_device_option_modify_ring **dev_op_modify_ring)
{
u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
@@ -225,6 +226,23 @@ void gve_parse_device_option(struct gve_priv *priv,
"RSS config");
*dev_op_rss_config = (void *)(option + 1);
break;
+ case GVE_DEV_OPT_ID_NIC_TIMESTAMP:
+ if (option_length < sizeof(**dev_op_nic_timestamp) ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_NIC_TIMESTAMP) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "Nic Timestamp",
+ (int)sizeof(**dev_op_nic_timestamp),
+ GVE_DEV_OPT_REQ_FEAT_MASK_NIC_TIMESTAMP,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_nic_timestamp))
+ dev_warn(&priv->pdev->dev,
+ GVE_DEVICE_OPTION_TOO_BIG_FMT,
+ "Nic Timestamp");
+ *dev_op_nic_timestamp = (void *)(option + 1);
+ break;
default:
/* If we don't recognize the option just continue
* without doing anything.
@@ -246,6 +264,7 @@ gve_process_device_options(struct gve_priv *priv,
struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
struct gve_device_option_flow_steering **dev_op_flow_steering,
struct gve_device_option_rss_config **dev_op_rss_config,
+ struct gve_device_option_nic_timestamp **dev_op_nic_timestamp,
struct gve_device_option_modify_ring **dev_op_modify_ring)
{
const int num_options = be16_to_cpu(descriptor->num_device_options);
@@ -269,6 +288,7 @@ gve_process_device_options(struct gve_priv *priv,
dev_op_dqo_rda, dev_op_jumbo_frames,
dev_op_dqo_qpl, dev_op_buffer_sizes,
dev_op_flow_steering, dev_op_rss_config,
+ dev_op_nic_timestamp,
dev_op_modify_ring);
dev_opt = next_opt;
}
@@ -306,6 +326,7 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
priv->adminq_set_driver_parameter_cnt = 0;
priv->adminq_report_stats_cnt = 0;
priv->adminq_report_link_speed_cnt = 0;
+ priv->adminq_report_nic_timestamp_cnt = 0;
priv->adminq_get_ptype_map_cnt = 0;
priv->adminq_query_flow_rules_cnt = 0;
priv->adminq_cfg_flow_rule_cnt = 0;
@@ -442,6 +463,8 @@ static int gve_adminq_kick_and_wait(struct gve_priv *priv)
int tail, head;
int i;
+ lockdep_assert_held(&priv->adminq_lock);
+
tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
head = priv->adminq_prod_cnt;
@@ -467,9 +490,6 @@ static int gve_adminq_kick_and_wait(struct gve_priv *priv)
return 0;
}
-/* This function is not threadsafe - the caller is responsible for any
- * necessary locks.
- */
static int gve_adminq_issue_cmd(struct gve_priv *priv,
union gve_adminq_command *cmd_orig)
{
@@ -477,6 +497,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
u32 opcode;
u32 tail;
+ lockdep_assert_held(&priv->adminq_lock);
+
tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
// Check if next command will overflow the buffer.
@@ -544,6 +566,9 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
case GVE_ADMINQ_REPORT_LINK_SPEED:
priv->adminq_report_link_speed_cnt++;
break;
+ case GVE_ADMINQ_REPORT_NIC_TIMESTAMP:
+ priv->adminq_report_nic_timestamp_cnt++;
+ break;
case GVE_ADMINQ_GET_PTYPE_MAP:
priv->adminq_get_ptype_map_cnt++;
break;
@@ -564,6 +589,7 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
break;
default:
dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
+ return -EINVAL;
}
return 0;
@@ -625,7 +651,7 @@ static int gve_adminq_execute_extended_cmd(struct gve_priv *priv, u32 opcode,
/* The device specifies that the management vector can either be the first irq
* or the last irq. ntfy_blk_msix_base_idx indicates the first irq assigned to
- * the ntfy blks. It if is 0 then the management vector is last, if it is 1 then
+ * the ntfy blks. If it is 0 then the management vector is last, if it is 1 then
* the management vector is first.
*
* gve arranges the msix vectors so that the management vector is last.
@@ -709,13 +735,19 @@ int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_que
int err;
int i;
+ mutex_lock(&priv->adminq_lock);
+
for (i = start_id; i < start_id + num_queues; i++) {
err = gve_adminq_create_tx_queue(priv, i);
if (err)
- return err;
+ goto out;
}
- return gve_adminq_kick_and_wait(priv);
+ err = gve_adminq_kick_and_wait(priv);
+
+out:
+ mutex_unlock(&priv->adminq_lock);
+ return err;
}
static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
@@ -731,6 +763,7 @@ static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
.ntfy_id = cpu_to_be32(rx->ntfy_id),
.queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
.rx_ring_size = cpu_to_be16(priv->rx_desc_cnt),
+ .packet_buffer_size = cpu_to_be16(rx->packet_buffer_size),
};
if (gve_is_gqi(priv)) {
@@ -743,7 +776,6 @@ static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
cpu_to_be64(rx->data.data_bus);
cmd->create_rx_queue.index = cpu_to_be32(queue_index);
cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
- cmd->create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
} else {
u32 qpl_id = 0;
@@ -756,8 +788,6 @@ static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
cpu_to_be64(rx->dqo.complq.bus);
cmd->create_rx_queue.rx_data_ring_addr =
cpu_to_be64(rx->dqo.bufq.bus);
- cmd->create_rx_queue.packet_buffer_size =
- cpu_to_be16(priv->data_buffer_size_dqo);
cmd->create_rx_queue.rx_buff_ring_size =
cpu_to_be16(priv->rx_desc_cnt);
cmd->create_rx_queue.enable_rsc =
@@ -790,13 +820,19 @@ int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
int err;
int i;
+ mutex_lock(&priv->adminq_lock);
+
for (i = 0; i < num_queues; i++) {
err = gve_adminq_create_rx_queue(priv, i);
if (err)
- return err;
+ goto out;
}
- return gve_adminq_kick_and_wait(priv);
+ err = gve_adminq_kick_and_wait(priv);
+
+out:
+ mutex_unlock(&priv->adminq_lock);
+ return err;
}
static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
@@ -822,13 +858,19 @@ int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_qu
int err;
int i;
+ mutex_lock(&priv->adminq_lock);
+
for (i = start_id; i < start_id + num_queues; i++) {
err = gve_adminq_destroy_tx_queue(priv, i);
if (err)
- return err;
+ goto out;
}
- return gve_adminq_kick_and_wait(priv);
+ err = gve_adminq_kick_and_wait(priv);
+
+out:
+ mutex_unlock(&priv->adminq_lock);
+ return err;
}
static void gve_adminq_make_destroy_rx_queue_cmd(union gve_adminq_command *cmd,
@@ -863,13 +905,19 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
int err;
int i;
+ mutex_lock(&priv->adminq_lock);
+
for (i = 0; i < num_queues; i++) {
err = gve_adminq_destroy_rx_queue(priv, i);
if (err)
- return err;
+ goto out;
}
- return gve_adminq_kick_and_wait(priv);
+ err = gve_adminq_kick_and_wait(priv);
+
+out:
+ mutex_unlock(&priv->adminq_lock);
+ return err;
}
static void gve_set_default_desc_cnt(struct gve_priv *priv,
@@ -885,6 +933,15 @@ static void gve_set_default_desc_cnt(struct gve_priv *priv,
priv->min_rx_desc_cnt = priv->rx_desc_cnt;
}
+static void gve_set_default_rss_sizes(struct gve_priv *priv)
+{
+ if (!gve_is_gqi(priv)) {
+ priv->rss_key_size = GVE_RSS_KEY_SIZE;
+ priv->rss_lut_size = GVE_RSS_INDIR_SIZE;
+ priv->cache_rss_config = true;
+ }
+}
+
static void gve_enable_supported_features(struct gve_priv *priv,
u32 supported_features_mask,
const struct gve_device_option_jumbo_frames
@@ -897,6 +954,8 @@ static void gve_enable_supported_features(struct gve_priv *priv,
*dev_op_flow_steering,
const struct gve_device_option_rss_config
*dev_op_rss_config,
+ const struct gve_device_option_nic_timestamp
+ *dev_op_nic_timestamp,
const struct gve_device_option_modify_ring
*dev_op_modify_ring)
{
@@ -928,6 +987,10 @@ static void gve_enable_supported_features(struct gve_priv *priv,
dev_info(&priv->pdev->dev,
"BUFFER SIZES device option enabled with max_rx_buffer_size of %u, header_buf_size of %u.\n",
priv->max_rx_buffer_size, priv->header_buf_size);
+ if (gve_is_dqo(priv) &&
+ priv->max_rx_buffer_size > GVE_DEFAULT_RX_BUFFER_SIZE)
+ priv->rx_cfg.packet_buffer_size =
+ priv->max_rx_buffer_size;
}
/* Read and store ring size ranges given by device */
@@ -968,11 +1031,20 @@ static void gve_enable_supported_features(struct gve_priv *priv,
be16_to_cpu(dev_op_rss_config->hash_key_size);
priv->rss_lut_size =
be16_to_cpu(dev_op_rss_config->hash_lut_size);
+ priv->cache_rss_config = false;
+ dev_dbg(&priv->pdev->dev,
+ "RSS device option enabled with key size of %u, lut size of %u.\n",
+ priv->rss_key_size, priv->rss_lut_size);
}
+
+ if (dev_op_nic_timestamp &&
+ (supported_features_mask & GVE_SUP_NIC_TIMESTAMP_MASK))
+ priv->nic_timestamp_supported = true;
}
int gve_adminq_describe_device(struct gve_priv *priv)
{
+ struct gve_device_option_nic_timestamp *dev_op_nic_timestamp = NULL;
struct gve_device_option_flow_steering *dev_op_flow_steering = NULL;
struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
@@ -1013,6 +1085,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
&dev_op_buffer_sizes,
&dev_op_flow_steering,
&dev_op_rss_config,
+ &dev_op_nic_timestamp,
&dev_op_modify_ring);
if (err)
goto free_device_descriptor;
@@ -1052,6 +1125,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
/* set default descriptor counts */
gve_set_default_desc_cnt(priv, descriptor);
+ gve_set_default_rss_sizes(priv);
+
/* DQO supports LRO. */
if (!gve_is_gqi(priv))
priv->dev->hw_features |= NETIF_F_LRO;
@@ -1075,7 +1150,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
gve_enable_supported_features(priv, supported_features_mask,
dev_op_jumbo_frames, dev_op_dqo_qpl,
dev_op_buffer_sizes, dev_op_flow_steering,
- dev_op_rss_config, dev_op_modify_ring);
+ dev_op_rss_config, dev_op_nic_timestamp,
+ dev_op_modify_ring);
free_device_descriptor:
dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
@@ -1128,20 +1204,6 @@ int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id)
return gve_adminq_execute_cmd(priv, &cmd);
}
-int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu)
-{
- union gve_adminq_command cmd;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.opcode = cpu_to_be32(GVE_ADMINQ_SET_DRIVER_PARAMETER);
- cmd.set_driver_param = (struct gve_adminq_set_driver_parameter) {
- .parameter_type = cpu_to_be32(GVE_SET_PARAM_MTU),
- .parameter_value = cpu_to_be64(mtu),
- };
-
- return gve_adminq_execute_cmd(priv, &cmd);
-}
-
int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
dma_addr_t stats_report_addr, u64 interval)
{
@@ -1201,6 +1263,22 @@ int gve_adminq_report_link_speed(struct gve_priv *priv)
return err;
}
+int gve_adminq_report_nic_ts(struct gve_priv *priv,
+ dma_addr_t nic_ts_report_addr)
+{
+ union gve_adminq_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_NIC_TIMESTAMP);
+ cmd.report_nic_ts = (struct gve_adminq_report_nic_ts) {
+ .nic_ts_report_len =
+ cpu_to_be64(sizeof(struct gve_nic_ts_report)),
+ .nic_ts_report_addr = cpu_to_be64(nic_ts_report_addr),
+ };
+
+ return gve_adminq_execute_cmd(priv, &cmd);
+}
+
int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
struct gve_ptype_lut *ptype_lut)
{
@@ -1248,10 +1326,10 @@ gve_adminq_configure_flow_rule(struct gve_priv *priv,
sizeof(struct gve_adminq_configure_flow_rule),
flow_rule_cmd);
- if (err) {
+ if (err == -ETIME) {
dev_err(&priv->pdev->dev, "Timeout to configure the flow rule, trigger reset");
gve_reset(priv, true);
- } else {
+ } else if (!err) {
priv->flow_rules_cache.rules_cache_synced = false;
}
@@ -1290,8 +1368,9 @@ int gve_adminq_reset_flow_rules(struct gve_priv *priv)
int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh)
{
+ const u32 *hash_lut_to_config = NULL;
+ const u8 *hash_key_to_config = NULL;
dma_addr_t lut_bus = 0, key_bus = 0;
- u16 key_size = 0, lut_size = 0;
union gve_adminq_command cmd;
__be32 *lut = NULL;
u8 hash_alg = 0;
@@ -1301,7 +1380,7 @@ int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *r
switch (rxfh->hfunc) {
case ETH_RSS_HASH_NO_CHANGE:
- break;
+ fallthrough;
case ETH_RSS_HASH_TOP:
hash_alg = ETH_RSS_HASH_TOP;
break;
@@ -1310,27 +1389,46 @@ int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *r
}
if (rxfh->indir) {
- lut_size = priv->rss_lut_size;
+ if (rxfh->indir_size != priv->rss_lut_size)
+ return -EINVAL;
+
+ hash_lut_to_config = rxfh->indir;
+ } else if (priv->cache_rss_config) {
+ hash_lut_to_config = priv->rss_config.hash_lut;
+ }
+
+ if (hash_lut_to_config) {
lut = dma_alloc_coherent(&priv->pdev->dev,
- lut_size * sizeof(*lut),
+ priv->rss_lut_size * sizeof(*lut),
&lut_bus, GFP_KERNEL);
if (!lut)
return -ENOMEM;
for (i = 0; i < priv->rss_lut_size; i++)
- lut[i] = cpu_to_be32(rxfh->indir[i]);
+ lut[i] = cpu_to_be32(hash_lut_to_config[i]);
}
if (rxfh->key) {
- key_size = priv->rss_key_size;
+ if (rxfh->key_size != priv->rss_key_size) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ hash_key_to_config = rxfh->key;
+ } else if (priv->cache_rss_config) {
+ hash_key_to_config = priv->rss_config.hash_key;
+ }
+
+ if (hash_key_to_config) {
key = dma_alloc_coherent(&priv->pdev->dev,
- key_size, &key_bus, GFP_KERNEL);
+ priv->rss_key_size,
+ &key_bus, GFP_KERNEL);
if (!key) {
err = -ENOMEM;
goto out;
}
- memcpy(key, rxfh->key, key_size);
+ memcpy(key, hash_key_to_config, priv->rss_key_size);
}
/* Zero-valued fields in the cmd.configure_rss instruct the device to
@@ -1344,8 +1442,10 @@ int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *r
BIT(GVE_RSS_HASH_TCPV6) |
BIT(GVE_RSS_HASH_UDPV6)),
.hash_alg = hash_alg,
- .hash_key_size = cpu_to_be16(key_size),
- .hash_lut_size = cpu_to_be16(lut_size),
+ .hash_key_size =
+ cpu_to_be16((key_bus) ? priv->rss_key_size : 0),
+ .hash_lut_size =
+ cpu_to_be16((lut_bus) ? priv->rss_lut_size : 0),
.hash_key_addr = cpu_to_be64(key_bus),
.hash_lut_addr = cpu_to_be64(lut_bus),
};
@@ -1355,11 +1455,11 @@ int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *r
out:
if (lut)
dma_free_coherent(&priv->pdev->dev,
- lut_size * sizeof(*lut),
+ priv->rss_lut_size * sizeof(*lut),
lut, lut_bus);
if (key)
dma_free_coherent(&priv->pdev->dev,
- key_size, key, key_bus);
+ priv->rss_key_size, key, key_bus);
return err;
}
@@ -1463,12 +1563,15 @@ static int gve_adminq_process_rss_query(struct gve_priv *priv,
rxfh->hfunc = descriptor->hash_alg;
rss_info_addr = (void *)(descriptor + 1);
- if (rxfh->key)
+ if (rxfh->key) {
+ rxfh->key_size = priv->rss_key_size;
memcpy(rxfh->key, rss_info_addr, priv->rss_key_size);
+ }
rss_info_addr += priv->rss_key_size;
lut = (__be32 *)rss_info_addr;
if (rxfh->indir) {
+ rxfh->indir_size = priv->rss_lut_size;
for (i = 0; i < priv->rss_lut_size; i++)
rxfh->indir[i] = be32_to_cpu(lut[i]);
}
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 863683de9694..22a74b6aa17e 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -27,6 +27,7 @@ enum gve_adminq_opcodes {
GVE_ADMINQ_GET_PTYPE_MAP = 0xE,
GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY = 0xF,
GVE_ADMINQ_QUERY_FLOW_RULES = 0x10,
+ GVE_ADMINQ_REPORT_NIC_TIMESTAMP = 0x11,
GVE_ADMINQ_QUERY_RSS = 0x12,
/* For commands that are larger than 56 bytes */
@@ -174,6 +175,12 @@ struct gve_device_option_rss_config {
static_assert(sizeof(struct gve_device_option_rss_config) == 8);
+struct gve_device_option_nic_timestamp {
+ __be32 supported_features_mask;
+};
+
+static_assert(sizeof(struct gve_device_option_nic_timestamp) == 4);
+
/* Terminology:
*
* RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
@@ -192,6 +199,7 @@ enum gve_dev_opt_id {
GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
GVE_DEV_OPT_ID_FLOW_STEERING = 0xb,
+ GVE_DEV_OPT_ID_NIC_TIMESTAMP = 0xd,
GVE_DEV_OPT_ID_RSS_CONFIG = 0xe,
};
@@ -206,6 +214,7 @@ enum gve_dev_opt_req_feat_mask {
GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_RSS_CONFIG = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_NIC_TIMESTAMP = 0x0,
};
enum gve_sup_feature_mask {
@@ -214,6 +223,7 @@ enum gve_sup_feature_mask {
GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
GVE_SUP_FLOW_STEERING_MASK = 1 << 5,
GVE_SUP_RSS_CONFIG_MASK = 1 << 7,
+ GVE_SUP_NIC_TIMESTAMP_MASK = 1 << 8,
};
#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
@@ -392,6 +402,21 @@ struct gve_adminq_report_link_speed {
static_assert(sizeof(struct gve_adminq_report_link_speed) == 8);
+struct gve_adminq_report_nic_ts {
+ __be64 nic_ts_report_len;
+ __be64 nic_ts_report_addr;
+};
+
+static_assert(sizeof(struct gve_adminq_report_nic_ts) == 16);
+
+struct gve_nic_ts_report {
+ __be64 nic_timestamp; /* NIC clock in nanoseconds */
+ __be64 reserved1;
+ __be64 reserved2;
+ __be64 reserved3;
+ __be64 reserved4;
+};
+
struct stats {
__be32 stat_name;
__be32 queue_id;
@@ -451,7 +476,7 @@ struct gve_ptype_entry {
};
struct gve_ptype_map {
- struct gve_ptype_entry ptypes[1 << 10]; /* PTYPES are always 10 bits. */
+ struct gve_ptype_entry ptypes[GVE_NUM_PTYPES]; /* PTYPES are always 10 bits. */
};
struct gve_adminq_get_ptype_map {
@@ -585,6 +610,7 @@ union gve_adminq_command {
struct gve_adminq_query_flow_rules query_flow_rules;
struct gve_adminq_configure_rss configure_rss;
struct gve_adminq_query_rss query_rss;
+ struct gve_adminq_report_nic_ts report_nic_ts;
struct gve_adminq_extended_command extended_command;
};
};
@@ -612,7 +638,6 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id);
int gve_adminq_register_page_list(struct gve_priv *priv,
struct gve_queue_page_list *qpl);
int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id);
-int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu);
int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
dma_addr_t stats_report_addr, u64 interval);
int gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
@@ -625,6 +650,8 @@ int gve_adminq_reset_flow_rules(struct gve_priv *priv);
int gve_adminq_query_flow_rules(struct gve_priv *priv, u16 query_opcode, u32 starting_loc);
int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh);
int gve_adminq_query_rss_config(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh);
+int gve_adminq_report_nic_ts(struct gve_priv *priv,
+ dma_addr_t nic_ts_report_addr);
struct gve_ptype_lut;
int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
index 403f0f335ba6..0e2b703c673a 100644
--- a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
@@ -4,6 +4,7 @@
* Copyright (C) 2015-2024 Google, Inc.
*/
+#include <net/xdp_sock_drv.h>
#include "gve.h"
#include "gve_utils.h"
@@ -29,6 +30,10 @@ struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx)
/* Point buf_state to itself to mark it as allocated */
buf_state->next = buffer_id;
+ /* Clear the buffer pointers */
+ buf_state->page_info.page = NULL;
+ buf_state->xsk_buff = NULL;
+
return buf_state;
}
@@ -139,7 +144,8 @@ int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
buf_state->page_info.page_offset = 0;
buf_state->page_info.page_address =
page_address(buf_state->page_info.page);
- buf_state->page_info.buf_size = priv->data_buffer_size_dqo;
+ buf_state->page_info.buf_size = rx->packet_buffer_truesize;
+ buf_state->page_info.pad = rx->rx_headroom;
buf_state->last_single_ref_offset = 0;
/* The page already has 1 ref. */
@@ -162,7 +168,7 @@ void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state)
void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state)
{
- const u16 data_buffer_size = priv->data_buffer_size_dqo;
+ const u16 data_buffer_size = rx->packet_buffer_truesize;
int pagecount;
/* Can't reuse if we only fit one buffer per page */
@@ -205,51 +211,60 @@ void gve_free_to_page_pool(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state,
bool allow_direct)
{
- struct page *page = buf_state->page_info.page;
+ netmem_ref netmem = buf_state->page_info.netmem;
- if (!page)
+ if (!netmem)
return;
- page_pool_put_full_page(page->pp, page, allow_direct);
- buf_state->page_info.page = NULL;
+ page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, allow_direct);
+ buf_state->page_info.netmem = 0;
}
static int gve_alloc_from_page_pool(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state)
{
- struct gve_priv *priv = rx->gve;
- struct page *page;
+ netmem_ref netmem;
- buf_state->page_info.buf_size = priv->data_buffer_size_dqo;
- page = page_pool_alloc(rx->dqo.page_pool,
- &buf_state->page_info.page_offset,
- &buf_state->page_info.buf_size, GFP_ATOMIC);
+ buf_state->page_info.buf_size = rx->packet_buffer_truesize;
+ netmem = page_pool_alloc_netmem(rx->dqo.page_pool,
+ &buf_state->page_info.page_offset,
+ &buf_state->page_info.buf_size,
+ GFP_ATOMIC);
- if (!page)
+ if (!netmem)
return -ENOMEM;
- buf_state->page_info.page = page;
- buf_state->page_info.page_address = page_address(page);
- buf_state->addr = page_pool_get_dma_addr(page);
+ buf_state->page_info.netmem = netmem;
+ buf_state->page_info.page_address = netmem_address(netmem);
+ buf_state->addr = page_pool_get_dma_addr_netmem(netmem);
+ buf_state->page_info.pad = rx->dqo.page_pool->p.offset;
return 0;
}
struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
- struct gve_rx_ring *rx)
+ struct gve_rx_ring *rx,
+ bool xdp)
{
u32 ntfy_id = gve_rx_idx_to_ntfy(priv, rx->q_num);
struct page_pool_params pp = {
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.order = 0,
.pool_size = GVE_PAGE_POOL_SIZE_MULTIPLIER * priv->rx_desc_cnt,
+ .nid = priv->numa_node,
.dev = &priv->pdev->dev,
.netdev = priv->dev,
.napi = &priv->ntfy_blocks[ntfy_id].napi,
.max_len = PAGE_SIZE,
- .dma_dir = DMA_FROM_DEVICE,
+ .dma_dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
+ .offset = xdp ? XDP_PACKET_HEADROOM : 0,
};
+ if (priv->header_split_enabled) {
+ pp.flags |= PP_FLAG_ALLOW_UNREADABLE_NETMEM;
+ pp.queue_idx = rx->q_num;
+ }
+
return page_pool_create(&pp);
}
@@ -269,7 +284,7 @@ void gve_reuse_buffer(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state)
{
if (rx->dqo.page_pool) {
- buf_state->page_info.page = NULL;
+ buf_state->page_info.netmem = 0;
gve_free_buf_state(rx, buf_state);
} else {
gve_dec_pagecnt_bias(&buf_state->page_info);
@@ -281,7 +296,24 @@ int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc)
{
struct gve_rx_buf_state_dqo *buf_state;
- if (rx->dqo.page_pool) {
+ if (rx->xsk_pool) {
+ buf_state = gve_alloc_buf_state(rx);
+ if (unlikely(!buf_state))
+ return -ENOMEM;
+
+ buf_state->xsk_buff = xsk_buff_alloc(rx->xsk_pool);
+ if (unlikely(!buf_state->xsk_buff)) {
+ xsk_set_rx_need_wakeup(rx->xsk_pool);
+ gve_free_buf_state(rx, buf_state);
+ return -ENOMEM;
+ }
+ /* Allocated xsk buffer. Clear wakeup in case it was set. */
+ xsk_clear_rx_need_wakeup(rx->xsk_pool);
+ desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
+ desc->buf_addr =
+ cpu_to_le64(xsk_buff_xdp_get_dma(buf_state->xsk_buff));
+ return 0;
+ } else if (rx->dqo.page_pool) {
buf_state = gve_alloc_buf_state(rx);
if (WARN_ON_ONCE(!buf_state))
return -ENOMEM;
@@ -301,7 +333,8 @@ int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc)
}
desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
desc->buf_addr = cpu_to_le64(buf_state->addr +
- buf_state->page_info.page_offset);
+ buf_state->page_info.page_offset +
+ buf_state->page_info.pad);
return 0;
diff --git a/drivers/net/ethernet/google/gve/gve_desc_dqo.h b/drivers/net/ethernet/google/gve/gve_desc_dqo.h
index f79cd0591110..f7786b03c744 100644
--- a/drivers/net/ethernet/google/gve/gve_desc_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_desc_dqo.h
@@ -236,7 +236,8 @@ struct gve_rx_compl_desc_dqo {
u8 status_error1;
- __le16 reserved5;
+ u8 reserved5;
+ u8 ts_sub_nsecs_low;
__le16 buf_id; /* Buffer ID which was sent on the buffer queue. */
union {
@@ -247,7 +248,8 @@ struct gve_rx_compl_desc_dqo {
};
__le32 hash;
__le32 reserved6;
- __le64 reserved7;
+ __le32 reserved7;
+ __le32 ts; /* timestamp in nanosecs */
} __packed;
static_assert(sizeof(struct gve_rx_compl_desc_dqo) == 32);
diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h
index e83773fb891f..5871f773f0c7 100644
--- a/drivers/net/ethernet/google/gve/gve_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_dqo.h
@@ -36,7 +36,10 @@ netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev);
netdev_features_t gve_features_check_dqo(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
+int gve_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp);
bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean);
+bool gve_xdp_poll_dqo(struct gve_notify_block *block);
+bool gve_xsk_tx_poll_dqo(struct gve_notify_block *block, int budget);
int gve_rx_poll_dqo(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg);
@@ -60,6 +63,7 @@ int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
struct napi_struct *napi);
void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx);
void gve_rx_write_doorbell_dqo(const struct gve_priv *priv, int queue_idx);
+void gve_xdp_tx_flush_dqo(struct gve_priv *priv, u32 xdp_qid);
static inline void
gve_tx_put_doorbell_dqo(const struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index bdfc6e77b2af..52500ae8348e 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -63,11 +63,11 @@ static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
"tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]",
"tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
- "tx_dma_mapping_error[%u]", "tx_xsk_wakeup[%u]",
- "tx_xsk_done[%u]", "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]"
+ "tx_dma_mapping_error[%u]",
+ "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]"
};
-static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
+static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] __nonstring_array = {
"adminq_prod_cnt", "adminq_cmd_fail", "adminq_timeouts",
"adminq_describe_device_cnt", "adminq_cfg_device_resources_cnt",
"adminq_register_page_list_cnt", "adminq_unregister_page_list_cnt",
@@ -76,7 +76,7 @@ static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
"adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt",
"adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt",
"adminq_query_flow_rules", "adminq_cfg_flow_rule", "adminq_cfg_rss_cnt",
- "adminq_query_rss_cnt",
+ "adminq_query_rss_cnt", "adminq_report_nic_timestamp_cnt",
};
static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
@@ -113,7 +113,7 @@ static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
i);
for (i = 0; i < ARRAY_SIZE(gve_gstrings_adminq_stats); i++)
- ethtool_puts(&s, gve_gstrings_adminq_stats[i]);
+ ethtool_cpy(&s, gve_gstrings_adminq_stats[i]);
break;
@@ -392,7 +392,9 @@ gve_get_ethtool_stats(struct net_device *netdev,
*/
data[i++] = 0;
data[i++] = 0;
- data[i++] = tx->dqo_tx.tail - tx->dqo_tx.head;
+ data[i++] =
+ (tx->dqo_tx.tail - tx->dqo_tx.head) &
+ tx->mask;
}
do {
start =
@@ -417,9 +419,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = value;
}
}
- /* XDP xsk counters */
- data[i++] = tx->xdp_xsk_wakeup;
- data[i++] = tx->xdp_xsk_done;
+ /* XDP counters */
do {
start = u64_stats_fetch_begin(&priv->tx[ring].statss);
data[i] = tx->xdp_xsk_sent;
@@ -456,6 +456,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = priv->adminq_cfg_flow_rule_cnt;
data[i++] = priv->adminq_cfg_rss_cnt;
data[i++] = priv->adminq_query_rss_cnt;
+ data[i++] = priv->adminq_report_nic_timestamp_cnt;
}
static void gve_get_channels(struct net_device *netdev,
@@ -477,11 +478,12 @@ static int gve_set_channels(struct net_device *netdev,
struct ethtool_channels *cmd)
{
struct gve_priv *priv = netdev_priv(netdev);
- struct gve_queue_config new_tx_cfg = priv->tx_cfg;
- struct gve_queue_config new_rx_cfg = priv->rx_cfg;
+ struct gve_tx_queue_config new_tx_cfg = priv->tx_cfg;
+ struct gve_rx_queue_config new_rx_cfg = priv->rx_cfg;
struct ethtool_channels old_settings;
int new_tx = cmd->tx_count;
int new_rx = cmd->rx_count;
+ bool reset_rss = false;
gve_get_channels(netdev, &old_settings);
@@ -492,22 +494,27 @@ static int gve_set_channels(struct net_device *netdev,
if (!new_rx || !new_tx)
return -EINVAL;
- if (priv->num_xdp_queues &&
- (new_tx != new_rx || (2 * new_tx > priv->tx_cfg.max_queues))) {
- dev_err(&priv->pdev->dev, "XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues");
- return -EINVAL;
- }
+ if (priv->xdp_prog) {
+ if (new_tx != new_rx ||
+ (2 * new_tx > priv->tx_cfg.max_queues)) {
+ dev_err(&priv->pdev->dev, "The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues when XDP program is installed");
+ return -EINVAL;
+ }
- if (!netif_running(netdev)) {
- priv->tx_cfg.num_queues = new_tx;
- priv->rx_cfg.num_queues = new_rx;
- return 0;
+ /* One XDP TX queue per RX queue. */
+ new_tx_cfg.num_xdp_queues = new_rx;
+ } else {
+ new_tx_cfg.num_xdp_queues = 0;
}
+ if (new_rx != priv->rx_cfg.num_queues &&
+ priv->cache_rss_config && !netif_is_rxfh_configured(netdev))
+ reset_rss = true;
+
new_tx_cfg.num_queues = new_tx;
new_rx_cfg.num_queues = new_rx;
- return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg);
+ return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg, reset_rss);
}
static void gve_get_ringparam(struct net_device *netdev,
@@ -522,6 +529,8 @@ static void gve_get_ringparam(struct net_device *netdev,
cmd->rx_pending = priv->rx_desc_cnt;
cmd->tx_pending = priv->tx_desc_cnt;
+ kernel_cmd->rx_buf_len = priv->rx_cfg.packet_buffer_size;
+
if (!gve_header_split_supported(priv))
kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
else if (priv->header_split_enabled)
@@ -530,34 +539,6 @@ static void gve_get_ringparam(struct net_device *netdev,
kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
-static int gve_adjust_ring_sizes(struct gve_priv *priv,
- u16 new_tx_desc_cnt,
- u16 new_rx_desc_cnt)
-{
- struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
- struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
- int err;
-
- /* get current queue configuration */
- gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
-
- /* copy over the new ring_size from ethtool */
- tx_alloc_cfg.ring_size = new_tx_desc_cnt;
- rx_alloc_cfg.ring_size = new_rx_desc_cnt;
-
- if (netif_running(priv->dev)) {
- err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
- if (err)
- return err;
- }
-
- /* Set new ring_size for the next up */
- priv->tx_desc_cnt = new_tx_desc_cnt;
- priv->rx_desc_cnt = new_rx_desc_cnt;
-
- return 0;
-}
-
static int gve_validate_req_ring_size(struct gve_priv *priv, u16 new_tx_desc_cnt,
u16 new_rx_desc_cnt)
{
@@ -577,34 +558,68 @@ static int gve_validate_req_ring_size(struct gve_priv *priv, u16 new_tx_desc_cnt
return 0;
}
+static int gve_set_ring_sizes_config(struct gve_priv *priv, u16 new_tx_desc_cnt,
+ u16 new_rx_desc_cnt,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+{
+ if (new_tx_desc_cnt == priv->tx_desc_cnt &&
+ new_rx_desc_cnt == priv->rx_desc_cnt)
+ return 0;
+
+ if (!priv->modify_ring_size_enabled) {
+ dev_err(&priv->pdev->dev, "Modify ring size is not supported.\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (gve_validate_req_ring_size(priv, new_tx_desc_cnt, new_rx_desc_cnt))
+ return -EINVAL;
+
+ tx_alloc_cfg->ring_size = new_tx_desc_cnt;
+ rx_alloc_cfg->ring_size = new_rx_desc_cnt;
+ return 0;
+}
+
static int gve_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *cmd,
struct kernel_ethtool_ringparam *kernel_cmd,
struct netlink_ext_ack *extack)
{
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
struct gve_priv *priv = netdev_priv(netdev);
- u16 new_tx_cnt, new_rx_cnt;
int err;
- err = gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split);
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+
+ err = gve_set_rx_buf_len_config(priv, kernel_cmd->rx_buf_len, extack,
+ &rx_alloc_cfg);
if (err)
return err;
- if (cmd->tx_pending == priv->tx_desc_cnt && cmd->rx_pending == priv->rx_desc_cnt)
- return 0;
-
- if (!priv->modify_ring_size_enabled) {
- dev_err(&priv->pdev->dev, "Modify ring size is not supported.\n");
- return -EOPNOTSUPP;
- }
-
- new_tx_cnt = cmd->tx_pending;
- new_rx_cnt = cmd->rx_pending;
+ err = gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split,
+ &rx_alloc_cfg);
+ if (err)
+ return err;
- if (gve_validate_req_ring_size(priv, new_tx_cnt, new_rx_cnt))
- return -EINVAL;
+ err = gve_set_ring_sizes_config(priv, cmd->tx_pending, cmd->rx_pending,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ if (err)
+ return err;
- return gve_adjust_ring_sizes(priv, new_tx_cnt, new_rx_cnt);
+ if (netif_running(priv->dev)) {
+ err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+ if (err)
+ return err;
+ } else {
+ /* Set ring params for the next up */
+ priv->rx_cfg.packet_buffer_size =
+ rx_alloc_cfg.packet_buffer_size;
+ priv->header_split_enabled = rx_alloc_cfg.enable_header_split;
+ priv->tx_desc_cnt = tx_alloc_cfg.ring_size;
+ priv->rx_desc_cnt = rx_alloc_cfg.ring_size;
+ }
+ return 0;
}
static int gve_user_reset(struct net_device *netdev, u32 *flags)
@@ -643,8 +658,7 @@ static int gve_set_tunable(struct net_device *netdev,
switch (etuna->id) {
case ETHTOOL_RX_COPYBREAK:
{
- u32 max_copybreak = gve_is_gqi(priv) ?
- GVE_DEFAULT_RX_BUFFER_SIZE : priv->data_buffer_size_dqo;
+ u32 max_copybreak = priv->rx_cfg.packet_buffer_size;
len = *(u32 *)value;
if (len > max_copybreak)
@@ -662,7 +676,7 @@ static u32 gve_get_priv_flags(struct net_device *netdev)
struct gve_priv *priv = netdev_priv(netdev);
u32 ret_flags = 0;
- /* Only 1 flag exists currently: report-stats (BIT(O)), so set that flag. */
+ /* Only 1 flag exists currently: report-stats (BIT(0)), so set that flag. */
if (priv->ethtool_flags & BIT(0))
ret_flags |= BIT(0);
return ret_flags;
@@ -700,7 +714,7 @@ static int gve_set_priv_flags(struct net_device *netdev, u32 flags)
memset(priv->stats_report->stats, 0, (tx_stats_num + rx_stats_num) *
sizeof(struct stats));
- del_timer_sync(&priv->stats_report_timer);
+ timer_delete_sync(&priv->stats_report_timer);
}
return 0;
}
@@ -793,9 +807,6 @@ static int gve_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
case ETHTOOL_SRXCLSRLDEL:
err = gve_del_flow_rule(priv, cmd);
break;
- case ETHTOOL_SRXFH:
- err = -EOPNOTSUPP;
- break;
default:
err = -EOPNOTSUPP;
break;
@@ -830,9 +841,6 @@ static int gve_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u
case ETHTOOL_GRXCLSRLALL:
err = gve_get_flow_rule_ids(priv, cmd, (u32 *)rule_locs);
break;
- case ETHTOOL_GRXFH:
- err = -EOPNOTSUPP;
- break;
default:
err = -EOPNOTSUPP;
break;
@@ -855,6 +863,25 @@ static u32 gve_get_rxfh_indir_size(struct net_device *netdev)
return priv->rss_lut_size;
}
+static void gve_get_rss_config_cache(struct gve_priv *priv,
+ struct ethtool_rxfh_param *rxfh)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+
+ if (rxfh->key) {
+ rxfh->key_size = priv->rss_key_size;
+ memcpy(rxfh->key, rss_config->hash_key, priv->rss_key_size);
+ }
+
+ if (rxfh->indir) {
+ rxfh->indir_size = priv->rss_lut_size;
+ memcpy(rxfh->indir, rss_config->hash_lut,
+ priv->rss_lut_size * sizeof(*rxfh->indir));
+ }
+}
+
static int gve_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
{
struct gve_priv *priv = netdev_priv(netdev);
@@ -862,23 +889,73 @@ static int gve_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rx
if (!priv->rss_key_size || !priv->rss_lut_size)
return -EOPNOTSUPP;
+ if (priv->cache_rss_config) {
+ gve_get_rss_config_cache(priv, rxfh);
+ return 0;
+ }
+
return gve_adminq_query_rss_config(priv, rxfh);
}
+static void gve_set_rss_config_cache(struct gve_priv *priv,
+ struct ethtool_rxfh_param *rxfh)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+
+ if (rxfh->key)
+ memcpy(rss_config->hash_key, rxfh->key, priv->rss_key_size);
+
+ if (rxfh->indir)
+ memcpy(rss_config->hash_lut, rxfh->indir,
+ priv->rss_lut_size * sizeof(*rxfh->indir));
+}
+
static int gve_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct gve_priv *priv = netdev_priv(netdev);
+ int err;
if (!priv->rss_key_size || !priv->rss_lut_size)
return -EOPNOTSUPP;
- return gve_adminq_configure_rss(priv, rxfh);
+ err = gve_adminq_configure_rss(priv, rxfh);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Fail to configure RSS config");
+ return err;
+ }
+
+ if (priv->cache_rss_config)
+ gve_set_rss_config_cache(priv, rxfh);
+
+ return 0;
+}
+
+static int gve_get_ts_info(struct net_device *netdev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ ethtool_op_get_ts_info(netdev, info);
+
+ if (priv->nic_timestamp_supported) {
+ info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ if (priv->ptp)
+ info->phc_index = ptp_clock_index(priv->ptp->clock);
+ }
+
+ return 0;
}
const struct ethtool_ops gve_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
- .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT |
+ ETHTOOL_RING_USE_RX_BUF_LEN,
.get_drvinfo = gve_get_drvinfo,
.get_strings = gve_get_strings,
.get_sset_count = gve_get_sset_count,
@@ -904,5 +981,5 @@ const struct ethtool_ops gve_ethtool_ops = {
.get_priv_flags = gve_get_priv_flags,
.set_priv_flags = gve_set_priv_flags,
.get_link_ksettings = gve_get_link_ksettings,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = gve_get_ts_info,
};
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index e171ca248f9a..a5a2b18d309b 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -4,6 +4,7 @@
* Copyright (C) 2015-2024 Google LLC
*/
+#include <linux/bitmap.h>
#include <linux/bpf.h>
#include <linux/cpumask.h>
#include <linux/etherdevice.h>
@@ -184,6 +185,43 @@ static void gve_free_flow_rule_caches(struct gve_priv *priv)
flow_rules_cache->rules_cache = NULL;
}
+static int gve_alloc_rss_config_cache(struct gve_priv *priv)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+
+ if (!priv->cache_rss_config)
+ return 0;
+
+ rss_config->hash_key = kcalloc(priv->rss_key_size,
+ sizeof(rss_config->hash_key[0]),
+ GFP_KERNEL);
+ if (!rss_config->hash_key)
+ return -ENOMEM;
+
+ rss_config->hash_lut = kcalloc(priv->rss_lut_size,
+ sizeof(rss_config->hash_lut[0]),
+ GFP_KERNEL);
+ if (!rss_config->hash_lut)
+ goto free_rss_key_cache;
+
+ return 0;
+
+free_rss_key_cache:
+ kfree(rss_config->hash_key);
+ rss_config->hash_key = NULL;
+ return -ENOMEM;
+}
+
+static void gve_free_rss_config_cache(struct gve_priv *priv)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+
+ kfree(rss_config->hash_key);
+ kfree(rss_config->hash_lut);
+
+ memset(rss_config, 0, sizeof(*rss_config));
+}
+
static int gve_alloc_counter_array(struct gve_priv *priv)
{
priv->counter_array =
@@ -231,7 +269,8 @@ static void gve_stats_report_schedule(struct gve_priv *priv)
static void gve_stats_report_timer(struct timer_list *t)
{
- struct gve_priv *priv = from_timer(priv, t, stats_report_timer);
+ struct gve_priv *priv = timer_container_of(priv, t,
+ stats_report_timer);
mod_timer(&priv->stats_report_timer,
round_jiffies(jiffies +
@@ -265,7 +304,7 @@ static void gve_free_stats_report(struct gve_priv *priv)
if (!priv->stats_report)
return;
- del_timer_sync(&priv->stats_report_timer);
+ timer_delete_sync(&priv->stats_report_timer);
dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
priv->stats_report, priv->stats_report_bus);
priv->stats_report = NULL;
@@ -333,6 +372,14 @@ int gve_napi_poll(struct napi_struct *napi, int budget)
if (block->rx) {
work_done = gve_rx_poll(block, budget);
+
+ /* Poll XSK TX as part of RX NAPI. Setup re-poll based on max of
+ * TX and RX work done.
+ */
+ if (priv->xdp_prog)
+ work_done = max_t(int, work_done,
+ gve_xsk_tx_poll(block, budget));
+
reschedule |= work_done == budget;
}
@@ -368,14 +415,24 @@ int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
bool reschedule = false;
int work_done = 0;
- if (block->tx)
- reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
+ if (block->tx) {
+ if (block->tx->q_num < priv->tx_cfg.num_queues)
+ reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
+ else
+ reschedule |= gve_xdp_poll_dqo(block);
+ }
if (!budget)
return 0;
if (block->rx) {
work_done = gve_rx_poll_dqo(block, budget);
+
+ /* Poll XSK TX as part of RX NAPI. Setup re-poll based on if
+ * either datapath has more work to do.
+ */
+ if (priv->xdp_prog)
+ reschedule |= gve_xsk_tx_poll_dqo(block, budget);
reschedule |= work_done == budget;
}
@@ -411,10 +468,19 @@ int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
return work_done;
}
+static const struct cpumask *gve_get_node_mask(struct gve_priv *priv)
+{
+ if (priv->numa_node == NUMA_NO_NODE)
+ return cpu_all_mask;
+ else
+ return cpumask_of_node(priv->numa_node);
+}
+
static int gve_alloc_notify_blocks(struct gve_priv *priv)
{
int num_vecs_requested = priv->num_ntfy_blks + 1;
- unsigned int active_cpus;
+ const struct cpumask *node_mask;
+ unsigned int cur_cpu;
int vecs_enabled;
int i, j;
int err;
@@ -453,8 +519,6 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues)
priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
}
- /* Half the notification blocks go to TX and half to RX */
- active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
/* Setup Management Vector - the last vector */
snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "gve-mgmnt@pci:%s",
@@ -483,6 +547,8 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
}
/* Setup the other blocks - the first n-1 vectors */
+ node_mask = gve_get_node_mask(priv);
+ cur_cpu = cpumask_first(node_mask);
for (i = 0; i < priv->num_ntfy_blks; i++) {
struct gve_notify_block *block = &priv->ntfy_blocks[i];
int msix_idx = i;
@@ -499,9 +565,17 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
goto abort_with_some_ntfy_blocks;
}
block->irq = priv->msix_vectors[msix_idx].vector;
- irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
- get_cpu_mask(i % active_cpus));
+ irq_set_affinity_and_hint(block->irq,
+ cpumask_of(cur_cpu));
block->irq_db_index = &priv->irq_db_indices[i].index;
+
+ cur_cpu = cpumask_next(cur_cpu, node_mask);
+ /* Wrap once CPUs in the node have been exhausted, or when
+ * starting RX queue affinities. TX and RX queues of the same
+ * index share affinity.
+ */
+ if (cur_cpu >= nr_cpu_ids || (i + 1) == priv->tx_cfg.max_queues)
+ cur_cpu = cpumask_first(node_mask);
}
return 0;
abort_with_some_ntfy_blocks:
@@ -567,12 +641,18 @@ static int gve_setup_device_resources(struct gve_priv *priv)
err = gve_alloc_flow_rule_caches(priv);
if (err)
return err;
- err = gve_alloc_counter_array(priv);
+ err = gve_alloc_rss_config_cache(priv);
if (err)
goto abort_with_flow_rule_caches;
- err = gve_alloc_notify_blocks(priv);
+ err = gve_alloc_counter_array(priv);
+ if (err)
+ goto abort_with_rss_config_cache;
+ err = gve_init_clock(priv);
if (err)
goto abort_with_counter;
+ err = gve_alloc_notify_blocks(priv);
+ if (err)
+ goto abort_with_clock;
err = gve_alloc_stats_report(priv);
if (err)
goto abort_with_ntfy_blocks;
@@ -603,6 +683,12 @@ static int gve_setup_device_resources(struct gve_priv *priv)
}
}
+ err = gve_init_rss_config(priv, priv->rx_cfg.num_queues);
+ if (err) {
+ dev_err(&priv->pdev->dev, "Failed to init RSS config");
+ goto abort_with_ptype_lut;
+ }
+
err = gve_adminq_report_stats(priv, priv->stats_report_len,
priv->stats_report_bus,
GVE_STATS_REPORT_TIMER_PERIOD);
@@ -619,8 +705,12 @@ abort_with_stats_report:
gve_free_stats_report(priv);
abort_with_ntfy_blocks:
gve_free_notify_blocks(priv);
+abort_with_clock:
+ gve_teardown_clock(priv);
abort_with_counter:
gve_free_counter_array(priv);
+abort_with_rss_config_cache:
+ gve_free_rss_config_cache(priv);
abort_with_flow_rule_caches:
gve_free_flow_rule_caches(priv);
@@ -661,9 +751,11 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
priv->ptype_lut_dqo = NULL;
gve_free_flow_rule_caches(priv);
+ gve_free_rss_config_cache(priv);
gve_free_counter_array(priv);
gve_free_notify_blocks(priv);
gve_free_stats_report(priv);
+ gve_teardown_clock(priv);
gve_clear_device_resources_ok(priv);
}
@@ -738,30 +830,13 @@ static struct gve_queue_page_list *gve_rx_get_qpl(struct gve_priv *priv, int idx
return rx->dqo.qpl;
}
-static int gve_register_xdp_qpls(struct gve_priv *priv)
-{
- int start_id;
- int err;
- int i;
-
- start_id = gve_xdp_tx_start_queue_id(priv);
- for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
- err = gve_register_qpl(priv, gve_tx_get_qpl(priv, i));
- /* This failure will trigger a reset - no need to clean up */
- if (err)
- return err;
- }
- return 0;
-}
-
static int gve_register_qpls(struct gve_priv *priv)
{
int num_tx_qpls, num_rx_qpls;
int err;
int i;
- num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv),
- gve_is_qpl(priv));
+ num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_is_qpl(priv));
num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
for (i = 0; i < num_tx_qpls; i++) {
@@ -779,30 +854,13 @@ static int gve_register_qpls(struct gve_priv *priv)
return 0;
}
-static int gve_unregister_xdp_qpls(struct gve_priv *priv)
-{
- int start_id;
- int err;
- int i;
-
- start_id = gve_xdp_tx_start_queue_id(priv);
- for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
- err = gve_unregister_qpl(priv, gve_tx_get_qpl(priv, i));
- /* This failure will trigger a reset - no need to clean */
- if (err)
- return err;
- }
- return 0;
-}
-
static int gve_unregister_qpls(struct gve_priv *priv)
{
int num_tx_qpls, num_rx_qpls;
int err;
int i;
- num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv),
- gve_is_qpl(priv));
+ num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_is_qpl(priv));
num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
for (i = 0; i < num_tx_qpls; i++) {
@@ -821,27 +879,6 @@ static int gve_unregister_qpls(struct gve_priv *priv)
return 0;
}
-static int gve_create_xdp_rings(struct gve_priv *priv)
-{
- int err;
-
- err = gve_adminq_create_tx_queues(priv,
- gve_xdp_tx_start_queue_id(priv),
- priv->num_xdp_queues);
- if (err) {
- netif_err(priv, drv, priv->dev, "failed to create %d XDP tx queues\n",
- priv->num_xdp_queues);
- /* This failure will trigger a reset - no need to clean
- * up
- */
- return err;
- }
- netif_dbg(priv, drv, priv->dev, "created %d XDP tx queues\n",
- priv->num_xdp_queues);
-
- return 0;
-}
-
static int gve_create_rings(struct gve_priv *priv)
{
int num_tx_queues = gve_num_tx_queues(priv);
@@ -897,7 +934,7 @@ static void init_xdp_sync_stats(struct gve_priv *priv)
int i;
/* Init stats */
- for (i = start_id; i < start_id + priv->num_xdp_queues; i++) {
+ for (i = start_id; i < start_id + priv->tx_cfg.num_xdp_queues; i++) {
int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
u64_stats_init(&priv->tx[i].statss);
@@ -925,19 +962,18 @@ static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
cfg->qcfg = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv);
cfg->ring_size = priv->tx_desc_cnt;
- cfg->start_idx = 0;
- cfg->num_rings = gve_num_tx_queues(priv);
+ cfg->num_xdp_rings = cfg->qcfg->num_xdp_queues;
cfg->tx = priv->tx;
}
-static void gve_tx_stop_rings(struct gve_priv *priv, int start_id, int num_rings)
+static void gve_tx_stop_rings(struct gve_priv *priv, int num_rings)
{
int i;
if (!priv->tx)
return;
- for (i = start_id; i < start_id + num_rings; i++) {
+ for (i = 0; i < num_rings; i++) {
if (gve_is_gqi(priv))
gve_tx_stop_ring_gqi(priv, i);
else
@@ -945,12 +981,11 @@ static void gve_tx_stop_rings(struct gve_priv *priv, int start_id, int num_rings
}
}
-static void gve_tx_start_rings(struct gve_priv *priv, int start_id,
- int num_rings)
+static void gve_tx_start_rings(struct gve_priv *priv, int num_rings)
{
int i;
- for (i = start_id; i < start_id + num_rings; i++) {
+ for (i = 0; i < num_rings; i++) {
if (gve_is_gqi(priv))
gve_tx_start_ring_gqi(priv, i);
else
@@ -958,28 +993,6 @@ static void gve_tx_start_rings(struct gve_priv *priv, int start_id,
}
}
-static int gve_alloc_xdp_rings(struct gve_priv *priv)
-{
- struct gve_tx_alloc_rings_cfg cfg = {0};
- int err = 0;
-
- if (!priv->num_xdp_queues)
- return 0;
-
- gve_tx_get_curr_alloc_cfg(priv, &cfg);
- cfg.start_idx = gve_xdp_tx_start_queue_id(priv);
- cfg.num_rings = priv->num_xdp_queues;
-
- err = gve_tx_alloc_rings_gqi(priv, &cfg);
- if (err)
- return err;
-
- gve_tx_start_rings(priv, cfg.start_idx, cfg.num_rings);
- init_xdp_sync_stats(priv);
-
- return 0;
-}
-
static int gve_queues_mem_alloc(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
@@ -1010,26 +1023,6 @@ free_tx:
return err;
}
-static int gve_destroy_xdp_rings(struct gve_priv *priv)
-{
- int start_id;
- int err;
-
- start_id = gve_xdp_tx_start_queue_id(priv);
- err = gve_adminq_destroy_tx_queues(priv,
- start_id,
- priv->num_xdp_queues);
- if (err) {
- netif_err(priv, drv, priv->dev,
- "failed to destroy XDP queues\n");
- /* This failure will trigger a reset - no need to clean up */
- return err;
- }
- netif_dbg(priv, drv, priv->dev, "destroyed XDP queues\n");
-
- return 0;
-}
-
static int gve_destroy_rings(struct gve_priv *priv)
{
int num_tx_queues = gve_num_tx_queues(priv);
@@ -1054,20 +1047,6 @@ static int gve_destroy_rings(struct gve_priv *priv)
return 0;
}
-static void gve_free_xdp_rings(struct gve_priv *priv)
-{
- struct gve_tx_alloc_rings_cfg cfg = {0};
-
- gve_tx_get_curr_alloc_cfg(priv, &cfg);
- cfg.start_idx = gve_xdp_tx_start_queue_id(priv);
- cfg.num_rings = priv->num_xdp_queues;
-
- if (priv->tx) {
- gve_tx_stop_rings(priv, cfg.start_idx, cfg.num_rings);
- gve_tx_free_rings_gqi(priv, &cfg);
- }
-}
-
static void gve_queues_mem_free(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_cfg,
struct gve_rx_alloc_rings_cfg *rx_cfg)
@@ -1085,7 +1064,7 @@ int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma,
enum dma_data_direction dir, gfp_t gfp_flags)
{
- *page = alloc_page(gfp_flags);
+ *page = alloc_pages_node(priv->numa_node, gfp_flags, 0);
if (!*page) {
priv->page_alloc_fail++;
return -ENOMEM;
@@ -1186,18 +1165,84 @@ static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
static void gve_turndown(struct gve_priv *priv);
static void gve_turnup(struct gve_priv *priv);
+static void gve_unreg_xsk_pool(struct gve_priv *priv, u16 qid)
+{
+ struct gve_rx_ring *rx;
+
+ if (!priv->rx)
+ return;
+
+ rx = &priv->rx[qid];
+ rx->xsk_pool = NULL;
+ if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
+ xdp_rxq_info_unreg_mem_model(&rx->xdp_rxq);
+
+ if (!priv->tx)
+ return;
+ priv->tx[gve_xdp_tx_queue_id(priv, qid)].xsk_pool = NULL;
+}
+
+static int gve_reg_xsk_pool(struct gve_priv *priv, struct net_device *dev,
+ struct xsk_buff_pool *pool, u16 qid)
+{
+ struct gve_rx_ring *rx;
+ u16 tx_qid;
+ int err;
+
+ rx = &priv->rx[qid];
+ err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL, pool);
+ if (err) {
+ gve_unreg_xsk_pool(priv, qid);
+ return err;
+ }
+
+ rx->xsk_pool = pool;
+
+ tx_qid = gve_xdp_tx_queue_id(priv, qid);
+ priv->tx[tx_qid].xsk_pool = pool;
+
+ return 0;
+}
+
+static void gve_unreg_xdp_info(struct gve_priv *priv)
+{
+ int i;
+
+ if (!priv->tx_cfg.num_xdp_queues || !priv->rx)
+ return;
+
+ for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ struct gve_rx_ring *rx = &priv->rx[i];
+
+ if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
+ xdp_rxq_info_unreg(&rx->xdp_rxq);
+
+ gve_unreg_xsk_pool(priv, i);
+ }
+}
+
+static struct xsk_buff_pool *gve_get_xsk_pool(struct gve_priv *priv, int qid)
+{
+ if (!test_bit(qid, priv->xsk_pools))
+ return NULL;
+
+ return xsk_get_pool_from_qid(priv->dev, qid);
+}
+
static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
{
struct napi_struct *napi;
struct gve_rx_ring *rx;
int err = 0;
- int i, j;
- u32 tx_qid;
+ int i;
- if (!priv->num_xdp_queues)
+ if (!priv->tx_cfg.num_xdp_queues)
return 0;
for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ struct xsk_buff_pool *xsk_pool;
+
rx = &priv->rx[i];
napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
@@ -1205,64 +1250,28 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
napi->napi_id);
if (err)
goto err;
- err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
- MEM_TYPE_PAGE_SHARED, NULL);
+
+ xsk_pool = gve_get_xsk_pool(priv, i);
+ if (xsk_pool)
+ err = gve_reg_xsk_pool(priv, dev, xsk_pool, i);
+ else if (gve_is_qpl(priv))
+ err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL);
+ else
+ err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ rx->dqo.page_pool);
if (err)
goto err;
- rx->xsk_pool = xsk_get_pool_from_qid(dev, i);
- if (rx->xsk_pool) {
- err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, i,
- napi->napi_id);
- if (err)
- goto err;
- err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq,
- MEM_TYPE_XSK_BUFF_POOL, NULL);
- if (err)
- goto err;
- xsk_pool_set_rxq_info(rx->xsk_pool,
- &rx->xsk_rxq);
- }
- }
-
- for (i = 0; i < priv->num_xdp_queues; i++) {
- tx_qid = gve_xdp_tx_queue_id(priv, i);
- priv->tx[tx_qid].xsk_pool = xsk_get_pool_from_qid(dev, i);
}
return 0;
err:
- for (j = i; j >= 0; j--) {
- rx = &priv->rx[j];
- if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
- xdp_rxq_info_unreg(&rx->xdp_rxq);
- if (xdp_rxq_info_is_reg(&rx->xsk_rxq))
- xdp_rxq_info_unreg(&rx->xsk_rxq);
- }
+ gve_unreg_xdp_info(priv);
return err;
}
-static void gve_unreg_xdp_info(struct gve_priv *priv)
-{
- int i, tx_qid;
-
- if (!priv->num_xdp_queues)
- return;
-
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- struct gve_rx_ring *rx = &priv->rx[i];
-
- xdp_rxq_info_unreg(&rx->xdp_rxq);
- if (rx->xsk_pool) {
- xdp_rxq_info_unreg(&rx->xsk_rxq);
- rx->xsk_pool = NULL;
- }
- }
-
- for (i = 0; i < priv->num_xdp_queues; i++) {
- tx_qid = gve_xdp_tx_queue_id(priv, i);
- priv->tx[tx_qid].xsk_pool = NULL;
- }
-}
static void gve_drain_page_cache(struct gve_priv *priv)
{
@@ -1275,15 +1284,14 @@ static void gve_drain_page_cache(struct gve_priv *priv)
static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg)
{
- cfg->qcfg = &priv->rx_cfg;
+ cfg->qcfg_rx = &priv->rx_cfg;
cfg->qcfg_tx = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv);
cfg->enable_header_split = priv->header_split_enabled;
cfg->ring_size = priv->rx_desc_cnt;
- cfg->packet_buffer_size = gve_is_gqi(priv) ?
- GVE_DEFAULT_RX_BUFFER_SIZE :
- priv->data_buffer_size_dqo;
+ cfg->packet_buffer_size = priv->rx_cfg.packet_buffer_size;
cfg->rx = priv->rx;
+ cfg->xdp = !!cfg->qcfg_tx->num_xdp_queues;
}
void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
@@ -1356,17 +1364,13 @@ static int gve_queues_start(struct gve_priv *priv,
/* Record new configs into priv */
priv->tx_cfg = *tx_alloc_cfg->qcfg;
- priv->rx_cfg = *rx_alloc_cfg->qcfg;
+ priv->tx_cfg.num_xdp_queues = tx_alloc_cfg->num_xdp_rings;
+ priv->rx_cfg = *rx_alloc_cfg->qcfg_rx;
priv->tx_desc_cnt = tx_alloc_cfg->ring_size;
priv->rx_desc_cnt = rx_alloc_cfg->ring_size;
- if (priv->xdp_prog)
- priv->num_xdp_queues = priv->rx_cfg.num_queues;
- else
- priv->num_xdp_queues = 0;
-
- gve_tx_start_rings(priv, 0, tx_alloc_cfg->num_rings);
- gve_rx_start_rings(priv, rx_alloc_cfg->qcfg->num_queues);
+ gve_tx_start_rings(priv, gve_num_tx_queues(priv));
+ gve_rx_start_rings(priv, rx_alloc_cfg->qcfg_rx->num_queues);
gve_init_sync_stats(priv);
err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
@@ -1380,12 +1384,18 @@ static int gve_queues_start(struct gve_priv *priv,
if (err)
goto stop_and_free_rings;
+ if (rx_alloc_cfg->reset_rss) {
+ err = gve_init_rss_config(priv, priv->rx_cfg.num_queues);
+ if (err)
+ goto reset;
+ }
+
err = gve_register_qpls(priv);
if (err)
goto reset;
priv->header_split_enabled = rx_alloc_cfg->enable_header_split;
- priv->data_buffer_size_dqo = rx_alloc_cfg->packet_buffer_size;
+ priv->rx_cfg.packet_buffer_size = rx_alloc_cfg->packet_buffer_size;
err = gve_create_rings(priv);
if (err)
@@ -1412,7 +1422,7 @@ reset:
/* return the original error */
return err;
stop_and_free_rings:
- gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv));
+ gve_tx_stop_rings(priv, gve_num_tx_queues(priv));
gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
gve_queues_mem_remove(priv);
return err;
@@ -1457,11 +1467,11 @@ static int gve_queues_stop(struct gve_priv *priv)
goto err;
gve_clear_device_rings_ok(priv);
}
- del_timer_sync(&priv->stats_report_timer);
+ timer_delete_sync(&priv->stats_report_timer);
gve_unreg_xdp_info(priv);
- gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv));
+ gve_tx_stop_rings(priv, gve_num_tx_queues(priv));
gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
priv->interface_down_cnt++;
@@ -1491,56 +1501,6 @@ static int gve_close(struct net_device *dev)
return 0;
}
-static int gve_remove_xdp_queues(struct gve_priv *priv)
-{
- int err;
-
- err = gve_destroy_xdp_rings(priv);
- if (err)
- return err;
-
- err = gve_unregister_xdp_qpls(priv);
- if (err)
- return err;
-
- gve_unreg_xdp_info(priv);
- gve_free_xdp_rings(priv);
-
- priv->num_xdp_queues = 0;
- return 0;
-}
-
-static int gve_add_xdp_queues(struct gve_priv *priv)
-{
- int err;
-
- priv->num_xdp_queues = priv->rx_cfg.num_queues;
-
- err = gve_alloc_xdp_rings(priv);
- if (err)
- goto err;
-
- err = gve_reg_xdp_info(priv, priv->dev);
- if (err)
- goto free_xdp_rings;
-
- err = gve_register_xdp_qpls(priv);
- if (err)
- goto free_xdp_rings;
-
- err = gve_create_xdp_rings(priv);
- if (err)
- goto free_xdp_rings;
-
- return 0;
-
-free_xdp_rings:
- gve_free_xdp_rings(priv);
-err:
- priv->num_xdp_queues = 0;
- return err;
-}
-
static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
{
if (!gve_get_napi_enabled(priv))
@@ -1558,6 +1518,19 @@ static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
}
}
+static int gve_configure_rings_xdp(struct gve_priv *priv,
+ u16 num_xdp_rings)
+{
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+ tx_alloc_cfg.num_xdp_rings = num_xdp_rings;
+
+ rx_alloc_cfg.xdp = !!num_xdp_rings;
+ return gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+}
+
static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
@@ -1570,42 +1543,49 @@ static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog,
WRITE_ONCE(priv->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
+
+ /* Update priv XDP queue configuration */
+ priv->tx_cfg.num_xdp_queues = priv->xdp_prog ?
+ priv->rx_cfg.num_queues : 0;
return 0;
}
- gve_turndown(priv);
- if (!old_prog && prog) {
- // Allocate XDP TX queues if an XDP program is
- // being installed
- err = gve_add_xdp_queues(priv);
- if (err)
- goto out;
- } else if (old_prog && !prog) {
- // Remove XDP TX queues if an XDP program is
- // being uninstalled
- err = gve_remove_xdp_queues(priv);
- if (err)
- goto out;
- }
+ if (!old_prog && prog)
+ err = gve_configure_rings_xdp(priv, priv->rx_cfg.num_queues);
+ else if (old_prog && !prog)
+ err = gve_configure_rings_xdp(priv, 0);
+
+ if (err)
+ goto out;
+
WRITE_ONCE(priv->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
out:
- gve_turnup(priv);
status = ioread32be(&priv->reg_bar0->device_status);
gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
return err;
}
+static int gve_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+
+ if (priv->queue_format == GVE_GQI_QPL_FORMAT)
+ return gve_xdp_xmit_gqi(dev, n, frames, flags);
+ else if (priv->queue_format == GVE_DQO_RDA_FORMAT)
+ return gve_xdp_xmit_dqo(dev, n, frames, flags);
+
+ return -EOPNOTSUPP;
+}
+
static int gve_xsk_pool_enable(struct net_device *dev,
struct xsk_buff_pool *pool,
u16 qid)
{
struct gve_priv *priv = netdev_priv(dev);
- struct napi_struct *napi;
- struct gve_rx_ring *rx;
- int tx_qid;
int err;
if (qid >= priv->rx_cfg.num_queues) {
@@ -1623,34 +1603,31 @@ static int gve_xsk_pool_enable(struct net_device *dev,
if (err)
return err;
- /* If XDP prog is not installed, return */
- if (!priv->xdp_prog)
- return 0;
+ set_bit(qid, priv->xsk_pools);
- rx = &priv->rx[qid];
- napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
- err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, qid, napi->napi_id);
- if (err)
- goto err;
+ /* If XDP prog is not installed or interface is down, return. */
+ if (!priv->xdp_prog || !netif_running(dev))
+ return 0;
- err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq,
- MEM_TYPE_XSK_BUFF_POOL, NULL);
+ err = gve_reg_xsk_pool(priv, dev, pool, qid);
if (err)
- goto err;
-
- xsk_pool_set_rxq_info(pool, &rx->xsk_rxq);
- rx->xsk_pool = pool;
-
- tx_qid = gve_xdp_tx_queue_id(priv, qid);
- priv->tx[tx_qid].xsk_pool = pool;
+ goto err_xsk_pool_dma_mapped;
+ /* Stop and start RDA queues to repost buffers. */
+ if (!gve_is_qpl(priv)) {
+ err = gve_configure_rings_xdp(priv, priv->rx_cfg.num_queues);
+ if (err)
+ goto err_xsk_pool_registered;
+ }
return 0;
-err:
- if (xdp_rxq_info_is_reg(&rx->xsk_rxq))
- xdp_rxq_info_unreg(&rx->xsk_rxq);
+err_xsk_pool_registered:
+ gve_unreg_xsk_pool(priv, qid);
+err_xsk_pool_dma_mapped:
+ clear_bit(qid, priv->xsk_pools);
xsk_pool_dma_unmap(pool,
- DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_WEAK_ORDERING);
return err;
}
@@ -1662,103 +1639,118 @@ static int gve_xsk_pool_disable(struct net_device *dev,
struct napi_struct *napi_tx;
struct xsk_buff_pool *pool;
int tx_qid;
+ int err;
- pool = xsk_get_pool_from_qid(dev, qid);
- if (!pool)
- return -EINVAL;
if (qid >= priv->rx_cfg.num_queues)
return -EINVAL;
- /* If XDP prog is not installed, unmap DMA and return */
- if (!priv->xdp_prog)
- goto done;
+ clear_bit(qid, priv->xsk_pools);
- tx_qid = gve_xdp_tx_queue_id(priv, qid);
- if (!netif_running(dev)) {
- priv->rx[qid].xsk_pool = NULL;
- xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
- priv->tx[tx_qid].xsk_pool = NULL;
- goto done;
+ pool = xsk_get_pool_from_qid(dev, qid);
+ if (pool)
+ xsk_pool_dma_unmap(pool,
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_WEAK_ORDERING);
+
+ if (!netif_running(dev) || !priv->tx_cfg.num_xdp_queues)
+ return 0;
+
+ /* Stop and start RDA queues to repost buffers. */
+ if (!gve_is_qpl(priv) && priv->xdp_prog) {
+ err = gve_configure_rings_xdp(priv, priv->rx_cfg.num_queues);
+ if (err)
+ return err;
}
napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi;
napi_disable(napi_rx); /* make sure current rx poll is done */
+ tx_qid = gve_xdp_tx_queue_id(priv, qid);
napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi;
napi_disable(napi_tx); /* make sure current tx poll is done */
- priv->rx[qid].xsk_pool = NULL;
- xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
- priv->tx[tx_qid].xsk_pool = NULL;
+ gve_unreg_xsk_pool(priv, qid);
smp_mb(); /* Make sure it is visible to the workers on datapath */
napi_enable(napi_rx);
- if (gve_rx_work_pending(&priv->rx[qid]))
- napi_schedule(napi_rx);
-
napi_enable(napi_tx);
- if (gve_tx_clean_pending(priv, &priv->tx[tx_qid]))
- napi_schedule(napi_tx);
+ if (gve_is_gqi(priv)) {
+ if (gve_rx_work_pending(&priv->rx[qid]))
+ napi_schedule(napi_rx);
+
+ if (gve_tx_clean_pending(priv, &priv->tx[tx_qid]))
+ napi_schedule(napi_tx);
+ }
-done:
- xsk_pool_dma_unmap(pool,
- DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
return 0;
}
static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
{
struct gve_priv *priv = netdev_priv(dev);
- int tx_queue_id = gve_xdp_tx_queue_id(priv, queue_id);
+ struct napi_struct *napi;
+
+ if (!gve_get_napi_enabled(priv))
+ return -ENETDOWN;
if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog)
return -EINVAL;
- if (flags & XDP_WAKEUP_TX) {
- struct gve_tx_ring *tx = &priv->tx[tx_queue_id];
- struct napi_struct *napi =
- &priv->ntfy_blocks[tx->ntfy_id].napi;
-
- if (!napi_if_scheduled_mark_missed(napi)) {
- /* Call local_bh_enable to trigger SoftIRQ processing */
- local_bh_disable();
- napi_schedule(napi);
- local_bh_enable();
- }
-
- tx->xdp_xsk_wakeup++;
+ napi = &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_id)].napi;
+ if (!napi_if_scheduled_mark_missed(napi)) {
+ /* Call local_bh_enable to trigger SoftIRQ processing */
+ local_bh_disable();
+ napi_schedule(napi);
+ local_bh_enable();
}
return 0;
}
-static int verify_xdp_configuration(struct net_device *dev)
+static int gve_verify_xdp_configuration(struct net_device *dev,
+ struct netlink_ext_ack *extack)
{
struct gve_priv *priv = netdev_priv(dev);
+ u16 max_xdp_mtu;
if (dev->features & NETIF_F_LRO) {
- netdev_warn(dev, "XDP is not supported when LRO is on.\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "XDP is not supported when LRO is on.");
return -EOPNOTSUPP;
}
- if (priv->queue_format != GVE_GQI_QPL_FORMAT) {
- netdev_warn(dev, "XDP is not supported in mode %d.\n",
- priv->queue_format);
+ if (priv->header_split_enabled) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "XDP is not supported when header-data split is enabled.");
return -EOPNOTSUPP;
}
- if (dev->mtu > GVE_DEFAULT_RX_BUFFER_SIZE - sizeof(struct ethhdr) - GVE_RX_PAD) {
- netdev_warn(dev, "XDP is not supported for mtu %d.\n",
- dev->mtu);
+ if (priv->rx_cfg.packet_buffer_size != SZ_2K) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "XDP is not supported for Rx buf len %d, only %d supported.",
+ priv->rx_cfg.packet_buffer_size, SZ_2K);
+ return -EOPNOTSUPP;
+ }
+
+ max_xdp_mtu = priv->rx_cfg.packet_buffer_size - sizeof(struct ethhdr);
+ if (priv->queue_format == GVE_GQI_QPL_FORMAT)
+ max_xdp_mtu -= GVE_RX_PAD;
+
+ if (dev->mtu > max_xdp_mtu) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "XDP is not supported for mtu %d.",
+ dev->mtu);
return -EOPNOTSUPP;
}
if (priv->rx_cfg.num_queues != priv->tx_cfg.num_queues ||
(2 * priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)) {
- netdev_warn(dev, "XDP load failed: The number of configured RX queues %d should be equal to the number of configured TX queues %d and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues %d",
- priv->rx_cfg.num_queues,
- priv->tx_cfg.num_queues,
+ netdev_warn(dev,
+ "XDP load failed: The number of configured RX queues %d should be equal to the number of configured TX queues %d and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues %d.",
+ priv->rx_cfg.num_queues, priv->tx_cfg.num_queues,
priv->tx_cfg.max_queues);
+ NL_SET_ERR_MSG_MOD(extack,
+ "XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues");
return -EINVAL;
}
return 0;
@@ -1769,7 +1761,7 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
struct gve_priv *priv = netdev_priv(dev);
int err;
- err = verify_xdp_configuration(dev);
+ err = gve_verify_xdp_configuration(dev, xdp->extack);
if (err)
return err;
switch (xdp->command) {
@@ -1785,6 +1777,26 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
+int gve_init_rss_config(struct gve_priv *priv, u16 num_queues)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+ struct ethtool_rxfh_param rxfh = {0};
+ u16 i;
+
+ if (!priv->cache_rss_config)
+ return 0;
+
+ for (i = 0; i < priv->rss_lut_size; i++)
+ rss_config->hash_lut[i] =
+ ethtool_rxfh_indir_default(i, num_queues);
+
+ netdev_rss_key_fill(rss_config->hash_key, priv->rss_key_size);
+
+ rxfh.hfunc = ETH_RSS_HASH_TOP;
+
+ return gve_adminq_configure_rss(priv, &rxfh);
+}
+
int gve_flow_rules_reset(struct gve_priv *priv)
{
if (!priv->max_flow_rules)
@@ -1799,7 +1811,7 @@ int gve_adjust_config(struct gve_priv *priv,
{
int err;
- /* Allocate resources for the new confiugration */
+ /* Allocate resources for the new configuration */
err = gve_queues_mem_alloc(priv, tx_alloc_cfg, rx_alloc_cfg);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -1832,8 +1844,9 @@ int gve_adjust_config(struct gve_priv *priv,
}
int gve_adjust_queues(struct gve_priv *priv,
- struct gve_queue_config new_rx_config,
- struct gve_queue_config new_tx_config)
+ struct gve_rx_queue_config new_rx_config,
+ struct gve_tx_queue_config new_tx_config,
+ bool reset_rss)
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
@@ -1844,14 +1857,19 @@ int gve_adjust_queues(struct gve_priv *priv,
/* Relay the new config from ethtool */
tx_alloc_cfg.qcfg = &new_tx_config;
rx_alloc_cfg.qcfg_tx = &new_tx_config;
- rx_alloc_cfg.qcfg = &new_rx_config;
- tx_alloc_cfg.num_rings = new_tx_config.num_queues;
+ rx_alloc_cfg.qcfg_rx = &new_rx_config;
+ rx_alloc_cfg.reset_rss = reset_rss;
if (netif_running(priv->dev)) {
err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
return err;
}
/* Set the config for the next up. */
+ if (reset_rss) {
+ err = gve_init_rss_config(priv, new_rx_config.num_queues);
+ if (err)
+ return err;
+ }
priv->tx_cfg = new_tx_config;
priv->rx_cfg = new_rx_config;
@@ -1880,7 +1898,7 @@ static void gve_turndown(struct gve_priv *priv)
netif_queue_set_napi(priv->dev, idx,
NETDEV_QUEUE_TYPE_TX, NULL);
- napi_disable(&block->napi);
+ napi_disable_locked(&block->napi);
}
for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
@@ -1891,14 +1909,19 @@ static void gve_turndown(struct gve_priv *priv)
netif_queue_set_napi(priv->dev, idx, NETDEV_QUEUE_TYPE_RX,
NULL);
- napi_disable(&block->napi);
+ napi_disable_locked(&block->napi);
}
/* Stop tx queues */
netif_tx_disable(priv->dev);
+ xdp_features_clear_redirect_target_locked(priv->dev);
+
gve_clear_napi_enabled(priv);
gve_clear_report_stats(priv);
+
+ /* Make sure that all traffic is finished processing. */
+ synchronize_net();
}
static void gve_turnup(struct gve_priv *priv)
@@ -1916,7 +1939,7 @@ static void gve_turnup(struct gve_priv *priv)
if (!gve_tx_was_added_to_block(priv, idx))
continue;
- napi_enable(&block->napi);
+ napi_enable_locked(&block->napi);
if (idx < priv->tx_cfg.num_queues)
netif_queue_set_napi(priv->dev, idx,
@@ -1944,7 +1967,7 @@ static void gve_turnup(struct gve_priv *priv)
if (!gve_rx_was_added_to_block(priv, idx))
continue;
- napi_enable(&block->napi);
+ napi_enable_locked(&block->napi);
netif_queue_set_napi(priv->dev, idx, NETDEV_QUEUE_TYPE_RX,
&block->napi);
@@ -1963,6 +1986,9 @@ static void gve_turnup(struct gve_priv *priv)
napi_schedule(&block->napi);
}
+ if (priv->tx_cfg.num_xdp_queues && gve_supports_xdp_xmit(priv))
+ xdp_features_set_redirect_target_locked(priv->dev, false);
+
gve_set_napi_enabled(priv);
}
@@ -1975,72 +2001,104 @@ static void gve_turnup_and_check_status(struct gve_priv *priv)
gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
}
-static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
+static struct gve_notify_block *gve_get_tx_notify_block(struct gve_priv *priv,
+ unsigned int txqueue)
{
- struct gve_notify_block *block;
- struct gve_tx_ring *tx = NULL;
- struct gve_priv *priv;
- u32 last_nic_done;
- u32 current_time;
u32 ntfy_idx;
- netdev_info(dev, "Timeout on tx queue, %d", txqueue);
- priv = netdev_priv(dev);
if (txqueue > priv->tx_cfg.num_queues)
- goto reset;
+ return NULL;
ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue);
if (ntfy_idx >= priv->num_ntfy_blks)
- goto reset;
+ return NULL;
+
+ return &priv->ntfy_blocks[ntfy_idx];
+}
+
+static bool gve_tx_timeout_try_q_kick(struct gve_priv *priv,
+ unsigned int txqueue)
+{
+ struct gve_notify_block *block;
+ u32 current_time;
- block = &priv->ntfy_blocks[ntfy_idx];
- tx = block->tx;
+ block = gve_get_tx_notify_block(priv, txqueue);
+
+ if (!block)
+ return false;
current_time = jiffies_to_msecs(jiffies);
- if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
- goto reset;
+ if (block->tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
+ return false;
- /* Check to see if there are missed completions, which will allow us to
- * kick the queue.
- */
- last_nic_done = gve_tx_load_event_counter(priv, tx);
- if (last_nic_done - tx->done) {
- netdev_info(dev, "Kicking queue %d", txqueue);
- iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
- napi_schedule(&block->napi);
- tx->last_kick_msec = current_time;
- goto out;
- } // Else reset.
+ netdev_info(priv->dev, "Kicking queue %d", txqueue);
+ napi_schedule(&block->napi);
+ block->tx->last_kick_msec = current_time;
+ return true;
+}
-reset:
- gve_schedule_reset(priv);
+static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct gve_notify_block *block;
+ struct gve_priv *priv;
-out:
- if (tx)
- tx->queue_timeout++;
+ netdev_info(dev, "Timeout on tx queue, %d", txqueue);
+ priv = netdev_priv(dev);
+
+ if (!gve_tx_timeout_try_q_kick(priv, txqueue))
+ gve_schedule_reset(priv);
+
+ block = gve_get_tx_notify_block(priv, txqueue);
+ if (block)
+ block->tx->queue_timeout++;
priv->tx_timeo_cnt++;
}
-u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hsplit)
+/* Header split is only supported on DQ RDA queue format. If XDP is enabled,
+ * header split is not allowed.
+ */
+bool gve_header_split_supported(const struct gve_priv *priv)
{
- if (enable_hsplit && priv->max_rx_buffer_size >= GVE_MAX_RX_BUFFER_SIZE)
- return GVE_MAX_RX_BUFFER_SIZE;
- else
- return GVE_DEFAULT_RX_BUFFER_SIZE;
+ return priv->header_buf_size &&
+ priv->queue_format == GVE_DQO_RDA_FORMAT && !priv->xdp_prog;
}
-/* header-split is not supported on non-DQO_RDA yet even if device advertises it */
-bool gve_header_split_supported(const struct gve_priv *priv)
+int gve_set_rx_buf_len_config(struct gve_priv *priv, u32 rx_buf_len,
+ struct netlink_ext_ack *extack,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
- return priv->header_buf_size && priv->queue_format == GVE_DQO_RDA_FORMAT;
+ u32 old_rx_buf_len = rx_alloc_cfg->packet_buffer_size;
+
+ if (rx_buf_len == old_rx_buf_len)
+ return 0;
+
+ /* device options may not always contain support for 4K buffers */
+ if (!gve_is_dqo(priv) || priv->max_rx_buffer_size < SZ_4K) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Modifying Rx buf len is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (priv->xdp_prog && rx_buf_len != SZ_2K) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Rx buf len can only be 2048 when XDP is on");
+ return -EINVAL;
+ }
+
+ if (rx_buf_len != SZ_2K && rx_buf_len != SZ_4K) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Rx buf len can only be 2048 or 4096");
+ return -EINVAL;
+ }
+ rx_alloc_cfg->packet_buffer_size = rx_buf_len;
+
+ return 0;
}
-int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
+int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
- struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
- struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
bool enable_hdr_split;
- int err = 0;
if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN)
return 0;
@@ -2058,14 +2116,9 @@ int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
if (enable_hdr_split == priv->header_split_enabled)
return 0;
- gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
-
- rx_alloc_cfg.enable_header_split = enable_hdr_split;
- rx_alloc_cfg.packet_buffer_size = gve_get_pkt_buf_size(priv, enable_hdr_split);
+ rx_alloc_cfg->enable_header_split = enable_hdr_split;
- if (netif_running(priv->dev))
- err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
- return err;
+ return 0;
}
static int gve_set_features(struct net_device *netdev,
@@ -2081,6 +2134,12 @@ static int gve_set_features(struct net_device *netdev,
if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
netdev->features ^= NETIF_F_LRO;
+ if (priv->xdp_prog && (netdev->features & NETIF_F_LRO)) {
+ netdev_warn(netdev,
+ "XDP is not supported when LRO is on.\n");
+ err = -EOPNOTSUPP;
+ goto revert_features;
+ }
if (netif_running(netdev)) {
err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
if (err)
@@ -2100,6 +2159,42 @@ revert_features:
return err;
}
+static int gve_get_ts_config(struct net_device *dev,
+ struct kernel_hwtstamp_config *kernel_config)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+
+ *kernel_config = priv->ts_config;
+ return 0;
+}
+
+static int gve_set_ts_config(struct net_device *dev,
+ struct kernel_hwtstamp_config *kernel_config,
+ struct netlink_ext_ack *extack)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+
+ if (kernel_config->tx_type != HWTSTAMP_TX_OFF) {
+ NL_SET_ERR_MSG_MOD(extack, "TX timestamping is not supported");
+ return -ERANGE;
+ }
+
+ if (kernel_config->rx_filter != HWTSTAMP_FILTER_NONE) {
+ if (!priv->nic_ts_report) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "RX timestamping is not supported");
+ kernel_config->rx_filter = HWTSTAMP_FILTER_NONE;
+ return -EOPNOTSUPP;
+ }
+
+ kernel_config->rx_filter = HWTSTAMP_FILTER_ALL;
+ }
+
+ priv->ts_config.rx_filter = kernel_config->rx_filter;
+
+ return 0;
+}
+
static const struct net_device_ops gve_netdev_ops = {
.ndo_start_xmit = gve_start_xmit,
.ndo_features_check = gve_features_check,
@@ -2111,6 +2206,8 @@ static const struct net_device_ops gve_netdev_ops = {
.ndo_bpf = gve_xdp,
.ndo_xdp_xmit = gve_xdp_xmit,
.ndo_xsk_wakeup = gve_xsk_wakeup,
+ .ndo_hwtstamp_get = gve_get_ts_config,
+ .ndo_hwtstamp_set = gve_set_ts_config,
};
static void gve_handle_status(struct gve_priv *priv, u32 status)
@@ -2136,7 +2233,9 @@ static void gve_handle_reset(struct gve_priv *priv)
if (gve_get_do_reset(priv)) {
rtnl_lock();
+ netdev_lock(priv->dev);
gve_reset(priv, false);
+ netdev_unlock(priv->dev);
rtnl_unlock();
}
}
@@ -2210,7 +2309,7 @@ void gve_handle_report_stats(struct gve_priv *priv)
};
stats[stats_idx++] = (struct stats) {
.stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
- .value = cpu_to_be64(priv->rx[0].fill_cnt),
+ .value = cpu_to_be64(priv->rx[idx].fill_cnt),
.queue_id = cpu_to_be32(idx),
};
}
@@ -2232,16 +2331,27 @@ static void gve_service_task(struct work_struct *work)
static void gve_set_netdev_xdp_features(struct gve_priv *priv)
{
+ xdp_features_t xdp_features;
+
if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
- priv->dev->xdp_features = NETDEV_XDP_ACT_BASIC;
- priv->dev->xdp_features |= NETDEV_XDP_ACT_REDIRECT;
- priv->dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
- priv->dev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
+ xdp_features = NETDEV_XDP_ACT_BASIC;
+ xdp_features |= NETDEV_XDP_ACT_REDIRECT;
+ xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
+ } else if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
+ xdp_features = NETDEV_XDP_ACT_BASIC;
+ xdp_features |= NETDEV_XDP_ACT_REDIRECT;
+ xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
} else {
- priv->dev->xdp_features = 0;
+ xdp_features = 0;
}
+
+ xdp_set_features_flag_locked(priv->dev, xdp_features);
}
+static const struct xdp_metadata_ops gve_xdp_metadata_ops = {
+ .xmo_rx_timestamp = gve_xdp_rx_timestamp,
+};
+
static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
{
int num_ntfy;
@@ -2289,7 +2399,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
goto err;
}
- /* Big TCP is only supported on DQ*/
+ /* Big TCP is only supported on DQO */
if (!gve_is_gqi(priv))
netif_set_tso_max_size(priv->dev, GVE_DQO_TX_MAX);
@@ -2299,6 +2409,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
*/
priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
priv->mgmt_msix_idx = priv->num_ntfy_blks;
+ priv->numa_node = dev_to_node(&priv->pdev->dev);
priv->tx_cfg.max_queues =
min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2);
@@ -2313,6 +2424,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
priv->rx_cfg.num_queues);
}
+ priv->tx_cfg.num_xdp_queues = 0;
dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
@@ -2324,11 +2436,29 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
priv->rx_coalesce_usecs = GVE_RX_IRQ_RATELIMIT_US_DQO;
}
+ priv->ts_config.tx_type = HWTSTAMP_TX_OFF;
+ priv->ts_config.rx_filter = HWTSTAMP_FILTER_NONE;
+
setup_device:
+ priv->xsk_pools = bitmap_zalloc(priv->rx_cfg.max_queues, GFP_KERNEL);
+ if (!priv->xsk_pools) {
+ err = -ENOMEM;
+ goto err;
+ }
+
gve_set_netdev_xdp_features(priv);
+ if (!gve_is_gqi(priv))
+ priv->dev->xdp_metadata_ops = &gve_xdp_metadata_ops;
+
err = gve_setup_device_resources(priv);
- if (!err)
- return 0;
+ if (err)
+ goto err_free_xsk_bitmap;
+
+ return 0;
+
+err_free_xsk_bitmap:
+ bitmap_free(priv->xsk_pools);
+ priv->xsk_pools = NULL;
err:
gve_adminq_free(&priv->pdev->dev, priv);
return err;
@@ -2338,6 +2468,8 @@ static void gve_teardown_priv_resources(struct gve_priv *priv)
{
gve_teardown_device_resources(priv);
gve_adminq_free(&priv->pdev->dev, priv);
+ bitmap_free(priv->xsk_pools);
+ priv->xsk_pools = NULL;
}
static void gve_trigger_reset(struct gve_priv *priv)
@@ -2693,7 +2825,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->service_task_flags = 0x0;
priv->state_flags = 0x0;
priv->ethtool_flags = 0x0;
- priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
+ priv->rx_cfg.packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
priv->max_rx_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_set_probe_in_progress(priv);
@@ -2712,6 +2844,9 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto abort_with_wq;
+ if (!gve_is_gqi(priv) && !gve_is_qpl(priv))
+ dev->netmem_tx = true;
+
err = register_netdev(dev);
if (err)
goto abort_with_gve_init;
@@ -2768,7 +2903,10 @@ static void gve_shutdown(struct pci_dev *pdev)
struct gve_priv *priv = netdev_priv(netdev);
bool was_up = netif_running(priv->dev);
+ netif_device_detach(netdev);
+
rtnl_lock();
+ netdev_lock(netdev);
if (was_up && gve_close(priv->dev)) {
/* If the dev was up, attempt to close, if close fails, reset */
gve_reset_and_teardown(priv, was_up);
@@ -2776,6 +2914,7 @@ static void gve_shutdown(struct pci_dev *pdev)
/* If the dev wasn't up or close worked, finish tearing down */
gve_teardown_priv_resources(priv);
}
+ netdev_unlock(netdev);
rtnl_unlock();
}
@@ -2788,6 +2927,7 @@ static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
priv->suspend_cnt++;
rtnl_lock();
+ netdev_lock(netdev);
if (was_up && gve_close(priv->dev)) {
/* If the dev was up, attempt to close, if close fails, reset */
gve_reset_and_teardown(priv, was_up);
@@ -2796,6 +2936,7 @@ static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
gve_teardown_priv_resources(priv);
}
priv->up_before_suspend = was_up;
+ netdev_unlock(netdev);
rtnl_unlock();
return 0;
}
@@ -2808,7 +2949,9 @@ static int gve_resume(struct pci_dev *pdev)
priv->resume_cnt++;
rtnl_lock();
+ netdev_lock(netdev);
err = gve_reset_recovery(priv, priv->up_before_suspend);
+ netdev_unlock(netdev);
rtnl_unlock();
return err;
}
diff --git a/drivers/net/ethernet/google/gve/gve_ptp.c b/drivers/net/ethernet/google/gve/gve_ptp.c
new file mode 100644
index 000000000000..073677d82ee8
--- /dev/null
+++ b/drivers/net/ethernet/google/gve/gve_ptp.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2025 Google LLC
+ */
+
+#include "gve.h"
+#include "gve_adminq.h"
+
+/* Interval to schedule a nic timestamp calibration, 250ms. */
+#define GVE_NIC_TS_SYNC_INTERVAL_MS 250
+
+/* Read the nic timestamp from hardware via the admin queue. */
+int gve_clock_nic_ts_read(struct gve_priv *priv)
+{
+ u64 nic_raw;
+ int err;
+
+ err = gve_adminq_report_nic_ts(priv, priv->nic_ts_report_bus);
+ if (err)
+ return err;
+
+ nic_raw = be64_to_cpu(priv->nic_ts_report->nic_timestamp);
+ WRITE_ONCE(priv->last_sync_nic_counter, nic_raw);
+
+ return 0;
+}
+
+static int gve_ptp_gettimex64(struct ptp_clock_info *info,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ return -EOPNOTSUPP;
+}
+
+static int gve_ptp_settime64(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
+static long gve_ptp_do_aux_work(struct ptp_clock_info *info)
+{
+ const struct gve_ptp *ptp = container_of(info, struct gve_ptp, info);
+ struct gve_priv *priv = ptp->priv;
+ int err;
+
+ if (gve_get_reset_in_progress(priv) || !gve_get_admin_queue_ok(priv))
+ goto out;
+
+ err = gve_clock_nic_ts_read(priv);
+ if (err && net_ratelimit())
+ dev_err(&priv->pdev->dev,
+ "%s read err %d\n", __func__, err);
+
+out:
+ return msecs_to_jiffies(GVE_NIC_TS_SYNC_INTERVAL_MS);
+}
+
+static const struct ptp_clock_info gve_ptp_caps = {
+ .owner = THIS_MODULE,
+ .name = "gve clock",
+ .gettimex64 = gve_ptp_gettimex64,
+ .settime64 = gve_ptp_settime64,
+ .do_aux_work = gve_ptp_do_aux_work,
+};
+
+static int gve_ptp_init(struct gve_priv *priv)
+{
+ struct gve_ptp *ptp;
+ int err;
+
+ if (!priv->nic_timestamp_supported) {
+ dev_dbg(&priv->pdev->dev, "Device does not support PTP\n");
+ return -EOPNOTSUPP;
+ }
+
+ priv->ptp = kzalloc(sizeof(*priv->ptp), GFP_KERNEL);
+ if (!priv->ptp)
+ return -ENOMEM;
+
+ ptp = priv->ptp;
+ ptp->info = gve_ptp_caps;
+ ptp->clock = ptp_clock_register(&ptp->info, &priv->pdev->dev);
+
+ if (IS_ERR(ptp->clock)) {
+ dev_err(&priv->pdev->dev, "PTP clock registration failed\n");
+ err = PTR_ERR(ptp->clock);
+ goto free_ptp;
+ }
+
+ ptp->priv = priv;
+ return 0;
+
+free_ptp:
+ kfree(ptp);
+ priv->ptp = NULL;
+ return err;
+}
+
+static void gve_ptp_release(struct gve_priv *priv)
+{
+ struct gve_ptp *ptp = priv->ptp;
+
+ if (!ptp)
+ return;
+
+ if (ptp->clock)
+ ptp_clock_unregister(ptp->clock);
+
+ kfree(ptp);
+ priv->ptp = NULL;
+}
+
+int gve_init_clock(struct gve_priv *priv)
+{
+ int err;
+
+ if (!priv->nic_timestamp_supported)
+ return 0;
+
+ err = gve_ptp_init(priv);
+ if (err)
+ return err;
+
+ priv->nic_ts_report =
+ dma_alloc_coherent(&priv->pdev->dev,
+ sizeof(struct gve_nic_ts_report),
+ &priv->nic_ts_report_bus,
+ GFP_KERNEL);
+ if (!priv->nic_ts_report) {
+ dev_err(&priv->pdev->dev, "%s dma alloc error\n", __func__);
+ err = -ENOMEM;
+ goto release_ptp;
+ }
+ err = gve_clock_nic_ts_read(priv);
+ if (err) {
+ dev_err(&priv->pdev->dev, "failed to read NIC clock %d\n", err);
+ goto release_nic_ts_report;
+ }
+ ptp_schedule_worker(priv->ptp->clock,
+ msecs_to_jiffies(GVE_NIC_TS_SYNC_INTERVAL_MS));
+
+ return 0;
+
+release_nic_ts_report:
+ dma_free_coherent(&priv->pdev->dev,
+ sizeof(struct gve_nic_ts_report),
+ priv->nic_ts_report, priv->nic_ts_report_bus);
+ priv->nic_ts_report = NULL;
+release_ptp:
+ gve_ptp_release(priv);
+ return err;
+}
+
+void gve_teardown_clock(struct gve_priv *priv)
+{
+ gve_ptp_release(priv);
+
+ if (priv->nic_ts_report) {
+ dma_free_coherent(&priv->pdev->dev,
+ sizeof(struct gve_nic_ts_report),
+ priv->nic_ts_report, priv->nic_ts_report_bus);
+ priv->nic_ts_report = NULL;
+ }
+}
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index acb73d4d0de6..ec424d2f4f57 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -141,12 +141,15 @@ void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
}
-static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info,
- dma_addr_t addr, struct page *page, __be64 *slot_addr)
+static void gve_setup_rx_buffer(struct gve_rx_ring *rx,
+ struct gve_rx_slot_page_info *page_info,
+ dma_addr_t addr, struct page *page,
+ __be64 *slot_addr)
{
page_info->page = page;
page_info->page_offset = 0;
page_info->page_address = page_address(page);
+ page_info->buf_size = rx->packet_buffer_size;
*slot_addr = cpu_to_be64(addr);
/* The page already has 1 ref */
page_ref_add(page, INT_MAX - 1);
@@ -171,7 +174,7 @@ static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
return err;
}
- gve_setup_rx_buffer(page_info, dma, page, &data_slot->addr);
+ gve_setup_rx_buffer(rx, page_info, dma, page, &data_slot->addr);
return 0;
}
@@ -189,8 +192,8 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
*/
slots = rx->mask + 1;
- rx->data.page_info = kvzalloc(slots *
- sizeof(*rx->data.page_info), GFP_KERNEL);
+ rx->data.page_info = kvcalloc_node(slots, sizeof(*rx->data.page_info),
+ GFP_KERNEL, priv->numa_node);
if (!rx->data.page_info)
return -ENOMEM;
@@ -199,7 +202,8 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
struct page *page = rx->data.qpl->pages[i];
dma_addr_t addr = i * PAGE_SIZE;
- gve_setup_rx_buffer(&rx->data.page_info[i], addr, page,
+ gve_setup_rx_buffer(rx, &rx->data.page_info[i], addr,
+ page,
&rx->data.data_ring[i].qpl_offset);
continue;
}
@@ -212,7 +216,8 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
if (!rx->data.raw_addressing) {
for (j = 0; j < rx->qpl_copy_pool_mask + 1; j++) {
- struct page *page = alloc_page(GFP_KERNEL);
+ struct page *page = alloc_pages_node(priv->numa_node,
+ GFP_KERNEL, 0);
if (!page) {
err = -ENOMEM;
@@ -222,6 +227,7 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
rx->qpl_copy_pool[j].page = page;
rx->qpl_copy_pool[j].page_offset = 0;
rx->qpl_copy_pool[j].page_address = page_address(page);
+ rx->qpl_copy_pool[j].buf_size = rx->packet_buffer_size;
/* The page already has 1 ref. */
page_ref_add(page, INT_MAX - 1);
@@ -283,6 +289,7 @@ int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
rx->gve = priv;
rx->q_num = idx;
+ rx->packet_buffer_size = cfg->packet_buffer_size;
rx->mask = slots - 1;
rx->data.raw_addressing = cfg->raw_addressing;
@@ -297,10 +304,9 @@ int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
rx->qpl_copy_pool_mask = min_t(u32, U32_MAX, slots * 2) - 1;
rx->qpl_copy_pool_head = 0;
- rx->qpl_copy_pool = kvcalloc(rx->qpl_copy_pool_mask + 1,
- sizeof(rx->qpl_copy_pool[0]),
- GFP_KERNEL);
-
+ rx->qpl_copy_pool = kvcalloc_node(rx->qpl_copy_pool_mask + 1,
+ sizeof(rx->qpl_copy_pool[0]),
+ GFP_KERNEL, priv->numa_node);
if (!rx->qpl_copy_pool) {
err = -ENOMEM;
goto abort_with_slots;
@@ -351,7 +357,6 @@ int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
rx->db_threshold = slots / 2;
gve_rx_init_ring_state_gqi(rx);
- rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_rx_ctx_clear(&rx->ctx);
return 0;
@@ -385,12 +390,12 @@ int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
int err = 0;
int i, j;
- rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
+ rx = kvcalloc(cfg->qcfg_rx->max_queues, sizeof(struct gve_rx_ring),
GFP_KERNEL);
if (!rx)
return -ENOMEM;
- for (i = 0; i < cfg->qcfg->num_queues; i++) {
+ for (i = 0; i < cfg->qcfg_rx->num_queues; i++) {
err = gve_rx_alloc_ring_gqi(priv, cfg, &rx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -419,7 +424,7 @@ void gve_rx_free_rings_gqi(struct gve_priv *priv,
if (!rx)
return;
- for (i = 0; i < cfg->qcfg->num_queues; i++)
+ for (i = 0; i < cfg->qcfg_rx->num_queues; i++)
gve_rx_free_ring_gqi(priv, &rx[i], cfg);
kvfree(rx);
@@ -590,7 +595,7 @@ static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx,
copy_page_info->pad = page_info->pad;
skb = gve_rx_add_frags(napi, copy_page_info,
- rx->packet_buffer_size, len, ctx);
+ copy_page_info->buf_size, len, ctx);
if (unlikely(!skb))
return NULL;
@@ -630,7 +635,8 @@ gve_rx_qpl(struct device *dev, struct net_device *netdev,
* device.
*/
if (page_info->can_flip) {
- skb = gve_rx_add_frags(napi, page_info, rx->packet_buffer_size, len, ctx);
+ skb = gve_rx_add_frags(napi, page_info, page_info->buf_size,
+ len, ctx);
/* No point in recycling if we didn't get the skb */
if (skb) {
/* Make sure that the page isn't freed. */
@@ -680,7 +686,7 @@ static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
skb = gve_rx_raw_addressing(&priv->pdev->dev, netdev,
page_info, len, napi,
data_slot,
- rx->packet_buffer_size, ctx);
+ page_info->buf_size, ctx);
} else {
skb = gve_rx_qpl(&priv->pdev->dev, netdev, rx,
page_info, len, napi, data_slot);
@@ -855,7 +861,7 @@ static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
void *old_data;
int xdp_act;
- xdp_init_buff(&xdp, rx->packet_buffer_size, &rx->xdp_rxq);
+ xdp_init_buff(&xdp, page_info->buf_size, &rx->xdp_rxq);
xdp_prepare_buff(&xdp, page_info->page_address +
page_info->page_offset, GVE_RX_PAD,
len, false);
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 8ac0047f1ada..f1bd8f5d5732 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -8,6 +8,7 @@
#include "gve_dqo.h"
#include "gve_adminq.h"
#include "gve_utils.h"
+#include <linux/bpf.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/skbuff.h>
@@ -15,6 +16,7 @@
#include <net/ip6_checksum.h>
#include <net/ipv6.h>
#include <net/tcp.h>
+#include <net/xdp_sock_drv.h>
static void gve_rx_free_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
{
@@ -109,10 +111,13 @@ static void gve_rx_reset_ring_dqo(struct gve_priv *priv, int idx)
void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
{
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
+ struct gve_rx_ring *rx = &priv->rx[idx];
if (!gve_rx_was_added_to_block(priv, idx))
return;
+ if (rx->dqo.page_pool)
+ page_pool_disable_direct_recycling(rx->dqo.page_pool);
gve_remove_napi(priv, ntfy_idx);
gve_rx_remove_from_block(priv, idx);
gve_rx_reset_ring_dqo(priv, idx);
@@ -145,6 +150,10 @@ void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
gve_free_to_page_pool(rx, bs, false);
else
gve_free_qpl_page_dqo(bs);
+ if (gve_buf_state_is_allocated(rx, bs) && bs->xsk_buff) {
+ xsk_buff_free(bs->xsk_buff);
+ bs->xsk_buff = NULL;
+ }
}
if (rx->dqo.qpl) {
@@ -221,12 +230,26 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
memset(rx, 0, sizeof(*rx));
rx->gve = priv;
rx->q_num = idx;
+ rx->packet_buffer_size = cfg->packet_buffer_size;
+
+ if (cfg->xdp) {
+ rx->packet_buffer_truesize = GVE_XDP_RX_BUFFER_SIZE_DQO;
+ rx->rx_headroom = XDP_PACKET_HEADROOM;
+ } else {
+ rx->packet_buffer_truesize = rx->packet_buffer_size;
+ rx->rx_headroom = 0;
+ }
+
+ /* struct gve_xdp_buff is overlaid on struct xdp_buff_xsk and utilizes
+ * the 24 byte field cb to store gve specific data.
+ */
+ XSK_CHECK_PRIV_TYPE(struct gve_xdp_buff);
rx->dqo.num_buf_states = cfg->raw_addressing ? buffer_queue_slots :
gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
- rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
- sizeof(rx->dqo.buf_states[0]),
- GFP_KERNEL);
+ rx->dqo.buf_states = kvcalloc_node(rx->dqo.num_buf_states,
+ sizeof(rx->dqo.buf_states[0]),
+ GFP_KERNEL, priv->numa_node);
if (!rx->dqo.buf_states)
return -ENOMEM;
@@ -251,7 +274,7 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
goto err;
if (cfg->raw_addressing) {
- pool = gve_rx_create_page_pool(priv, rx);
+ pool = gve_rx_create_page_pool(priv, rx, cfg->xdp);
if (IS_ERR(pool))
goto err;
@@ -297,12 +320,12 @@ int gve_rx_alloc_rings_dqo(struct gve_priv *priv,
int err;
int i;
- rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
+ rx = kvcalloc(cfg->qcfg_rx->max_queues, sizeof(struct gve_rx_ring),
GFP_KERNEL);
if (!rx)
return -ENOMEM;
- for (i = 0; i < cfg->qcfg->num_queues; i++) {
+ for (i = 0; i < cfg->qcfg_rx->num_queues; i++) {
err = gve_rx_alloc_ring_dqo(priv, cfg, &rx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -331,7 +354,7 @@ void gve_rx_free_rings_dqo(struct gve_priv *priv,
if (!rx)
return;
- for (i = 0; i < cfg->qcfg->num_queues; i++)
+ for (i = 0; i < cfg->qcfg_rx->num_queues; i++)
gve_rx_free_ring_dqo(priv, &rx[i], cfg);
kvfree(rx);
@@ -425,6 +448,53 @@ static void gve_rx_skb_hash(struct sk_buff *skb,
skb_set_hash(skb, le32_to_cpu(compl_desc->hash), hash_type);
}
+/* Expand the hardware timestamp to the full 64 bits of width, and add it to the
+ * skb.
+ *
+ * This algorithm works by using the passed hardware timestamp to generate a
+ * diff relative to the last read of the nic clock. This diff can be positive or
+ * negative, as it is possible that we have read the clock more recently than
+ * the hardware has received this packet. To detect this, we use the high bit of
+ * the diff, and assume that the read is more recent if the high bit is set. In
+ * this case we invert the process.
+ *
+ * Note that this means if the time delta between packet reception and the last
+ * clock read is greater than ~2 seconds, this will provide invalid results.
+ */
+static ktime_t gve_rx_get_hwtstamp(struct gve_priv *gve, u32 hwts)
+{
+ u64 last_read = READ_ONCE(gve->last_sync_nic_counter);
+ u32 low = (u32)last_read;
+ s32 diff = hwts - low;
+
+ return ns_to_ktime(last_read + diff);
+}
+
+static void gve_rx_skb_hwtstamp(struct gve_rx_ring *rx,
+ const struct gve_rx_compl_desc_dqo *desc)
+{
+ struct sk_buff *skb = rx->ctx.skb_head;
+
+ if (desc->ts_sub_nsecs_low & GVE_DQO_RX_HWTSTAMP_VALID)
+ skb_hwtstamps(skb)->hwtstamp =
+ gve_rx_get_hwtstamp(rx->gve, le32_to_cpu(desc->ts));
+}
+
+int gve_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
+{
+ const struct gve_xdp_buff *ctx = (void *)_ctx;
+
+ if (!ctx->gve->nic_ts_report)
+ return -ENODATA;
+
+ if (!(ctx->compl_desc->ts_sub_nsecs_low & GVE_DQO_RX_HWTSTAMP_VALID))
+ return -ENODATA;
+
+ *timestamp = gve_rx_get_hwtstamp(ctx->gve,
+ le32_to_cpu(ctx->compl_desc->ts));
+ return 0;
+}
+
static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx)
{
if (!rx->ctx.skb_head)
@@ -452,7 +522,7 @@ static int gve_rx_copy_ondemand(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state,
u16 buf_len)
{
- struct page *page = alloc_page(GFP_ATOMIC);
+ struct page *page = alloc_pages_node(rx->gve->numa_node, GFP_ATOMIC, 0);
int num_frags;
if (!page)
@@ -474,6 +544,25 @@ static int gve_rx_copy_ondemand(struct gve_rx_ring *rx,
return 0;
}
+static void gve_skb_add_rx_frag(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state,
+ int num_frags, u16 buf_len)
+{
+ if (rx->dqo.page_pool) {
+ skb_add_rx_frag_netmem(rx->ctx.skb_tail, num_frags,
+ buf_state->page_info.netmem,
+ buf_state->page_info.page_offset +
+ buf_state->page_info.pad, buf_len,
+ buf_state->page_info.buf_size);
+ } else {
+ skb_add_rx_frag(rx->ctx.skb_tail, num_frags,
+ buf_state->page_info.page,
+ buf_state->page_info.page_offset +
+ buf_state->page_info.pad, buf_len,
+ buf_state->page_info.buf_size);
+ }
+}
+
/* Chains multi skbs for single rx packet.
* Returns 0 if buffer is appended, -1 otherwise.
*/
@@ -511,14 +600,178 @@ static int gve_rx_append_frags(struct napi_struct *napi,
if (gve_rx_should_trigger_copy_ondemand(rx))
return gve_rx_copy_ondemand(rx, buf_state, buf_len);
- skb_add_rx_frag(rx->ctx.skb_tail, num_frags,
- buf_state->page_info.page,
- buf_state->page_info.page_offset,
- buf_len, buf_state->page_info.buf_size);
+ gve_skb_add_rx_frag(rx, buf_state, num_frags, buf_len);
gve_reuse_buffer(rx, buf_state);
return 0;
}
+static int gve_xdp_tx_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct xdp_buff *xdp)
+{
+ struct gve_tx_ring *tx;
+ struct xdp_frame *xdpf;
+ u32 tx_qid;
+ int err;
+
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf)) {
+ if (rx->xsk_pool)
+ xsk_buff_free(xdp);
+ return -ENOSPC;
+ }
+
+ tx_qid = gve_xdp_tx_queue_id(priv, rx->q_num);
+ tx = &priv->tx[tx_qid];
+ spin_lock(&tx->dqo_tx.xdp_lock);
+ err = gve_xdp_xmit_one_dqo(priv, tx, xdpf);
+ spin_unlock(&tx->dqo_tx.xdp_lock);
+
+ return err;
+}
+
+static void gve_xsk_done_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct xdp_buff *xdp, struct bpf_prog *xprog,
+ int xdp_act)
+{
+ switch (xdp_act) {
+ case XDP_ABORTED:
+ case XDP_DROP:
+ default:
+ xsk_buff_free(xdp);
+ break;
+ case XDP_TX:
+ if (unlikely(gve_xdp_tx_dqo(priv, rx, xdp)))
+ goto err;
+ break;
+ case XDP_REDIRECT:
+ if (unlikely(xdp_do_redirect(priv->dev, xdp, xprog)))
+ goto err;
+ break;
+ }
+
+ u64_stats_update_begin(&rx->statss);
+ if ((u32)xdp_act < GVE_XDP_ACTIONS)
+ rx->xdp_actions[xdp_act]++;
+ u64_stats_update_end(&rx->statss);
+ return;
+
+err:
+ u64_stats_update_begin(&rx->statss);
+ if (xdp_act == XDP_TX)
+ rx->xdp_tx_errors++;
+ if (xdp_act == XDP_REDIRECT)
+ rx->xdp_redirect_errors++;
+ u64_stats_update_end(&rx->statss);
+}
+
+static void gve_xdp_done_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct xdp_buff *xdp, struct bpf_prog *xprog,
+ int xdp_act,
+ struct gve_rx_buf_state_dqo *buf_state)
+{
+ int err;
+ switch (xdp_act) {
+ case XDP_ABORTED:
+ case XDP_DROP:
+ default:
+ gve_free_buffer(rx, buf_state);
+ break;
+ case XDP_TX:
+ err = gve_xdp_tx_dqo(priv, rx, xdp);
+ if (unlikely(err))
+ goto err;
+ gve_reuse_buffer(rx, buf_state);
+ break;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(priv->dev, xdp, xprog);
+ if (unlikely(err))
+ goto err;
+ gve_reuse_buffer(rx, buf_state);
+ break;
+ }
+ u64_stats_update_begin(&rx->statss);
+ if ((u32)xdp_act < GVE_XDP_ACTIONS)
+ rx->xdp_actions[xdp_act]++;
+ u64_stats_update_end(&rx->statss);
+ return;
+err:
+ u64_stats_update_begin(&rx->statss);
+ if (xdp_act == XDP_TX)
+ rx->xdp_tx_errors++;
+ else if (xdp_act == XDP_REDIRECT)
+ rx->xdp_redirect_errors++;
+ u64_stats_update_end(&rx->statss);
+ gve_free_buffer(rx, buf_state);
+ return;
+}
+
+static int gve_rx_xsk_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
+ const struct gve_rx_compl_desc_dqo *compl_desc,
+ struct gve_rx_buf_state_dqo *buf_state,
+ struct bpf_prog *xprog)
+{
+ struct xdp_buff *xdp = buf_state->xsk_buff;
+ int buf_len = compl_desc->packet_len;
+ struct gve_priv *priv = rx->gve;
+ struct gve_xdp_buff *gve_xdp;
+ int xdp_act;
+
+ xdp->data_end = xdp->data + buf_len;
+ xsk_buff_dma_sync_for_cpu(xdp);
+
+ gve_xdp = (void *)xdp;
+ gve_xdp->gve = priv;
+ gve_xdp->compl_desc = compl_desc;
+
+ if (xprog) {
+ xdp_act = bpf_prog_run_xdp(xprog, xdp);
+ buf_len = xdp->data_end - xdp->data;
+ if (xdp_act != XDP_PASS) {
+ gve_xsk_done_dqo(priv, rx, xdp, xprog, xdp_act);
+ gve_free_buf_state(rx, buf_state);
+ return 0;
+ }
+ }
+
+ /* Copy the data to skb */
+ rx->ctx.skb_head = gve_rx_copy_data(priv->dev, napi,
+ xdp->data, buf_len);
+ if (unlikely(!rx->ctx.skb_head)) {
+ xsk_buff_free(xdp);
+ gve_free_buf_state(rx, buf_state);
+ return -ENOMEM;
+ }
+ rx->ctx.skb_tail = rx->ctx.skb_head;
+
+ /* Free XSK buffer and Buffer state */
+ xsk_buff_free(xdp);
+ gve_free_buf_state(rx, buf_state);
+
+ /* Update Stats */
+ u64_stats_update_begin(&rx->statss);
+ rx->xdp_actions[XDP_PASS]++;
+ u64_stats_update_end(&rx->statss);
+ return 0;
+}
+
+static void gve_dma_sync(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state, u16 buf_len)
+{
+ struct gve_rx_slot_page_info *page_info = &buf_state->page_info;
+
+ if (rx->dqo.page_pool) {
+ page_pool_dma_sync_netmem_for_cpu(rx->dqo.page_pool,
+ page_info->netmem,
+ page_info->page_offset,
+ buf_len);
+ } else {
+ dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
+ page_info->page_offset +
+ page_info->pad,
+ buf_len, DMA_FROM_DEVICE);
+ }
+}
+
/* Returns 0 if descriptor is completed successfully.
* Returns -EINVAL if descriptor is invalid.
* Returns -ENOMEM if data cannot be copied to skb.
@@ -533,6 +786,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
const bool hsplit = compl_desc->split_header;
struct gve_rx_buf_state_dqo *buf_state;
struct gve_priv *priv = rx->gve;
+ struct bpf_prog *xprog;
u16 buf_len;
u16 hdr_len;
@@ -556,10 +810,19 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
buf_len = compl_desc->packet_len;
hdr_len = compl_desc->header_len;
+ xprog = READ_ONCE(priv->xdp_prog);
+ if (buf_state->xsk_buff)
+ return gve_rx_xsk_dqo(napi, rx, compl_desc, buf_state, xprog);
+
/* Page might have not been used for awhile and was likely last written
* by a different thread.
*/
- prefetch(buf_state->page_info.page);
+ if (rx->dqo.page_pool) {
+ if (!netmem_is_net_iov(buf_state->page_info.netmem))
+ prefetch(netmem_to_page(buf_state->page_info.netmem));
+ } else {
+ prefetch(buf_state->page_info.page);
+ }
/* Copy the header into the skb in the case of header split */
if (hsplit) {
@@ -584,12 +847,18 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
rx->rx_hsplit_unsplit_pkt += unsplit;
rx->rx_hsplit_bytes += hdr_len;
u64_stats_update_end(&rx->statss);
+ } else if (!rx->ctx.skb_head && rx->dqo.page_pool &&
+ netmem_is_net_iov(buf_state->page_info.netmem)) {
+ /* when header split is disabled, the header went to the packet
+ * buffer. If the packet buffer is a net_iov, those can't be
+ * easily mapped into the kernel space to access the header
+ * required to process the packet.
+ */
+ goto error;
}
/* Sync the portion of dma buffer for CPU to read. */
- dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
- buf_state->page_info.page_offset,
- buf_len, DMA_FROM_DEVICE);
+ gve_dma_sync(priv, rx, buf_state, buf_len);
/* Append to current skb if one exists. */
if (rx->ctx.skb_head) {
@@ -600,7 +869,39 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
return 0;
}
- if (eop && buf_len <= priv->rx_copybreak) {
+ if (xprog) {
+ struct gve_xdp_buff gve_xdp;
+ void *old_data;
+ int xdp_act;
+
+ xdp_init_buff(&gve_xdp.xdp, buf_state->page_info.buf_size,
+ &rx->xdp_rxq);
+ xdp_prepare_buff(&gve_xdp.xdp,
+ buf_state->page_info.page_address +
+ buf_state->page_info.page_offset,
+ buf_state->page_info.pad,
+ buf_len, false);
+ gve_xdp.gve = priv;
+ gve_xdp.compl_desc = compl_desc;
+
+ old_data = gve_xdp.xdp.data;
+ xdp_act = bpf_prog_run_xdp(xprog, &gve_xdp.xdp);
+ buf_state->page_info.pad += gve_xdp.xdp.data - old_data;
+ buf_len = gve_xdp.xdp.data_end - gve_xdp.xdp.data;
+ if (xdp_act != XDP_PASS) {
+ gve_xdp_done_dqo(priv, rx, &gve_xdp.xdp, xprog, xdp_act,
+ buf_state);
+ return 0;
+ }
+
+ u64_stats_update_begin(&rx->statss);
+ rx->xdp_actions[XDP_PASS]++;
+ u64_stats_update_end(&rx->statss);
+ }
+
+ if (eop && buf_len <= priv->rx_copybreak &&
+ !(rx->dqo.page_pool &&
+ netmem_is_net_iov(buf_state->page_info.netmem))) {
rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
&buf_state->page_info, buf_len);
if (unlikely(!rx->ctx.skb_head))
@@ -630,9 +931,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
if (rx->dqo.page_pool)
skb_mark_for_recycle(rx->ctx.skb_head);
- skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page,
- buf_state->page_info.page_offset, buf_len,
- buf_state->page_info.buf_size);
+ gve_skb_add_rx_frag(rx, buf_state, 0, buf_len);
gve_reuse_buffer(rx, buf_state);
return 0;
@@ -683,6 +982,9 @@ static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi,
if (feat & NETIF_F_RXCSUM)
gve_rx_skb_csum(rx->ctx.skb_head, desc, ptype);
+ if (rx->gve->ts_config.rx_filter == HWTSTAMP_FILTER_ALL)
+ gve_rx_skb_hwtstamp(rx, desc);
+
/* RSC packets must set gso_size otherwise the TCP stack will complain
* that packets are larger than MTU.
*/
@@ -702,16 +1004,27 @@ static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi,
int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
{
- struct napi_struct *napi = &block->napi;
- netdev_features_t feat = napi->dev->features;
-
- struct gve_rx_ring *rx = block->rx;
- struct gve_rx_compl_queue_dqo *complq = &rx->dqo.complq;
-
+ struct gve_rx_compl_queue_dqo *complq;
+ struct napi_struct *napi;
+ netdev_features_t feat;
+ struct gve_rx_ring *rx;
+ struct gve_priv *priv;
+ u64 xdp_redirects;
u32 work_done = 0;
u64 bytes = 0;
+ u64 xdp_txs;
int err;
+ napi = &block->napi;
+ feat = napi->dev->features;
+
+ rx = block->rx;
+ priv = rx->gve;
+ complq = &rx->dqo.complq;
+
+ xdp_redirects = rx->xdp_actions[XDP_REDIRECT];
+ xdp_txs = rx->xdp_actions[XDP_TX];
+
while (work_done < budget) {
struct gve_rx_compl_desc_dqo *compl_desc =
&complq->desc_ring[complq->head];
@@ -785,6 +1098,12 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
rx->ctx.skb_tail = NULL;
}
+ if (xdp_txs != rx->xdp_actions[XDP_TX])
+ gve_xdp_tx_flush_dqo(priv, rx->q_num);
+
+ if (xdp_redirects != rx->xdp_actions[XDP_REDIRECT])
+ xdp_do_flush();
+
gve_rx_post_buffers_dqo(rx);
u64_stats_update_begin(&rx->statss);
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index e7fb7d6d283d..97efc8d27e6f 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -206,7 +206,10 @@ void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx)
return;
gve_remove_napi(priv, ntfy_idx);
- gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
+ if (tx->q_num < priv->tx_cfg.num_queues)
+ gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
+ else
+ gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt);
netdev_tx_reset_queue(tx->netdev_txq);
gve_tx_remove_from_block(priv, idx);
}
@@ -331,27 +334,23 @@ int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg)
{
struct gve_tx_ring *tx = cfg->tx;
+ int total_queues;
int err = 0;
int i, j;
- if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
+ total_queues = cfg->qcfg->num_queues + cfg->num_xdp_rings;
+ if (total_queues > cfg->qcfg->max_queues) {
netif_err(priv, drv, priv->dev,
"Cannot alloc more than the max num of Tx rings\n");
return -EINVAL;
}
- if (cfg->start_idx == 0) {
- tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
- GFP_KERNEL);
- if (!tx)
- return -ENOMEM;
- } else if (!tx) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc tx rings from a nonzero start idx without tx array\n");
- return -EINVAL;
- }
+ tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
+ GFP_KERNEL);
+ if (!tx)
+ return -ENOMEM;
- for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) {
+ for (i = 0; i < total_queues; i++) {
err = gve_tx_alloc_ring_gqi(priv, cfg, &tx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -367,8 +366,7 @@ int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
cleanup:
for (j = 0; j < i; j++)
gve_tx_free_ring_gqi(priv, &tx[j], cfg);
- if (cfg->start_idx == 0)
- kvfree(tx);
+ kvfree(tx);
return err;
}
@@ -381,13 +379,11 @@ void gve_tx_free_rings_gqi(struct gve_priv *priv,
if (!tx)
return;
- for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++)
+ for (i = 0; i < cfg->qcfg->num_queues + cfg->qcfg->num_xdp_queues; i++)
gve_tx_free_ring_gqi(priv, &tx[i], cfg);
- if (cfg->start_idx == 0) {
- kvfree(tx);
- cfg->tx = NULL;
- }
+ kvfree(tx);
+ cfg->tx = NULL;
}
/* gve_tx_avail - Calculates the number of slots available in the ring
@@ -734,7 +730,9 @@ unmap_drop:
gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]);
}
drop:
+ u64_stats_update_begin(&tx->statss);
tx->dropped_pkt++;
+ u64_stats_update_end(&tx->statss);
return 0;
}
@@ -827,18 +825,21 @@ static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx,
return ndescs;
}
-int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
- u32 flags)
+int gve_xdp_xmit_gqi(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
{
struct gve_priv *priv = netdev_priv(dev);
struct gve_tx_ring *tx;
int i, err = 0, qid;
- if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK) || !priv->xdp_prog)
return -EINVAL;
+ if (!gve_get_napi_enabled(priv))
+ return -ENETDOWN;
+
qid = gve_xdp_tx_queue_id(priv,
- smp_processor_id() % priv->num_xdp_queues);
+ smp_processor_id() % priv->tx_cfg.num_xdp_queues);
tx = &priv->tx[qid];
@@ -953,14 +954,10 @@ static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
spin_lock(&tx->xdp_lock);
while (sent < budget) {
- if (!gve_can_tx(tx, GVE_TX_START_THRESH))
+ if (!gve_can_tx(tx, GVE_TX_START_THRESH) ||
+ !xsk_tx_peek_desc(tx->xsk_pool, &desc))
goto out;
- if (!xsk_tx_peek_desc(tx->xsk_pool, &desc)) {
- tx->xdp_xsk_done = tx->xdp_xsk_wakeup;
- goto out;
- }
-
data = xsk_buff_raw_get_data(tx->xsk_pool, desc.addr);
nsegs = gve_tx_fill_xdp(priv, tx, data, desc.len, NULL, true);
tx->req += nsegs;
@@ -975,33 +972,41 @@ out:
return sent;
}
+int gve_xsk_tx_poll(struct gve_notify_block *rx_block, int budget)
+{
+ struct gve_rx_ring *rx = rx_block->rx;
+ struct gve_priv *priv = rx->gve;
+ struct gve_tx_ring *tx;
+ int sent = 0;
+
+ tx = &priv->tx[gve_xdp_tx_queue_id(priv, rx->q_num)];
+ if (tx->xsk_pool) {
+ sent = gve_xsk_tx(priv, tx, budget);
+
+ u64_stats_update_begin(&tx->statss);
+ tx->xdp_xsk_sent += sent;
+ u64_stats_update_end(&tx->statss);
+ if (xsk_uses_need_wakeup(tx->xsk_pool))
+ xsk_set_tx_need_wakeup(tx->xsk_pool);
+ }
+
+ return sent;
+}
+
bool gve_xdp_poll(struct gve_notify_block *block, int budget)
{
struct gve_priv *priv = block->priv;
struct gve_tx_ring *tx = block->tx;
u32 nic_done;
- bool repoll;
u32 to_do;
/* Find out how much work there is to be done */
nic_done = gve_tx_load_event_counter(priv, tx);
to_do = min_t(u32, (nic_done - tx->done), budget);
gve_clean_xdp_done(priv, tx, to_do);
- repoll = nic_done != tx->done;
-
- if (tx->xsk_pool) {
- int sent = gve_xsk_tx(priv, tx, budget);
-
- u64_stats_update_begin(&tx->statss);
- tx->xdp_xsk_sent += sent;
- u64_stats_update_end(&tx->statss);
- repoll |= (sent == budget);
- if (xsk_uses_need_wakeup(tx->xsk_pool))
- xsk_set_tx_need_wakeup(tx->xsk_pool);
- }
/* If we still have work we want to repoll */
- return repoll;
+ return nic_done != tx->done;
}
bool gve_tx_poll(struct gve_notify_block *block, int budget)
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index f879426cb552..40b89b3e5a31 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -9,9 +9,11 @@
#include "gve_utils.h"
#include "gve_dqo.h"
#include <net/ip.h>
+#include <linux/bpf.h>
#include <linux/tcp.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
+#include <net/xdp_sock_drv.h>
/* Returns true if tx_bufs are available. */
static bool gve_has_free_tx_qpl_bufs(struct gve_tx_ring *tx, int count)
@@ -110,6 +112,14 @@ static bool gve_has_pending_packet(struct gve_tx_ring *tx)
return false;
}
+void gve_xdp_tx_flush_dqo(struct gve_priv *priv, u32 xdp_qid)
+{
+ u32 tx_qid = gve_xdp_tx_queue_id(priv, xdp_qid);
+ struct gve_tx_ring *tx = &priv->tx[tx_qid];
+
+ gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail);
+}
+
static struct gve_tx_pending_packet_dqo *
gve_alloc_pending_packet(struct gve_tx_ring *tx)
{
@@ -198,7 +208,8 @@ void gve_tx_stop_ring_dqo(struct gve_priv *priv, int idx)
gve_remove_napi(priv, ntfy_idx);
gve_clean_tx_done_dqo(priv, tx, /*napi=*/NULL);
- netdev_tx_reset_queue(tx->netdev_txq);
+ if (tx->netdev_txq)
+ netdev_tx_reset_queue(tx->netdev_txq);
gve_tx_clean_pending_packets(tx);
gve_tx_remove_from_block(priv, idx);
}
@@ -231,6 +242,9 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
tx->dqo.tx_ring = NULL;
}
+ kvfree(tx->dqo.xsk_reorder_queue);
+ tx->dqo.xsk_reorder_queue = NULL;
+
kvfree(tx->dqo.pending_packets);
tx->dqo.pending_packets = NULL;
@@ -276,7 +290,8 @@ void gve_tx_start_ring_dqo(struct gve_priv *priv, int idx)
gve_tx_add_to_block(priv, idx);
- tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
+ if (idx < priv->tx_cfg.num_queues)
+ tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
gve_add_napi(priv, ntfy_idx, gve_napi_poll_dqo);
}
@@ -295,6 +310,7 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
memset(tx, 0, sizeof(*tx));
tx->q_num = idx;
tx->dev = hdev;
+ spin_lock_init(&tx->dqo_tx.xdp_lock);
atomic_set_release(&tx->dqo_compl.hw_tx_head, 0);
/* Queue sizes must be a power of 2 */
@@ -333,6 +349,17 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
tx->dqo.pending_packets[tx->dqo.num_pending_packets - 1].next = -1;
atomic_set_release(&tx->dqo_compl.free_pending_packets, -1);
+
+ /* Only alloc xsk pool for XDP queues */
+ if (idx >= cfg->qcfg->num_queues && cfg->num_xdp_rings) {
+ tx->dqo.xsk_reorder_queue =
+ kvcalloc(tx->dqo.complq_mask + 1,
+ sizeof(tx->dqo.xsk_reorder_queue[0]),
+ GFP_KERNEL);
+ if (!tx->dqo.xsk_reorder_queue)
+ goto err;
+ }
+
tx->dqo_compl.miss_completions.head = -1;
tx->dqo_compl.miss_completions.tail = -1;
tx->dqo_compl.timed_out_completions.head = -1;
@@ -379,27 +406,23 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg)
{
struct gve_tx_ring *tx = cfg->tx;
+ int total_queues;
int err = 0;
int i, j;
- if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
+ total_queues = cfg->qcfg->num_queues + cfg->num_xdp_rings;
+ if (total_queues > cfg->qcfg->max_queues) {
netif_err(priv, drv, priv->dev,
"Cannot alloc more than the max num of Tx rings\n");
return -EINVAL;
}
- if (cfg->start_idx == 0) {
- tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
- GFP_KERNEL);
- if (!tx)
- return -ENOMEM;
- } else if (!tx) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc tx rings from a nonzero start idx without tx array\n");
- return -EINVAL;
- }
+ tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
+ GFP_KERNEL);
+ if (!tx)
+ return -ENOMEM;
- for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) {
+ for (i = 0; i < total_queues; i++) {
err = gve_tx_alloc_ring_dqo(priv, cfg, &tx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -415,8 +438,7 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
err:
for (j = 0; j < i; j++)
gve_tx_free_ring_dqo(priv, &tx[j], cfg);
- if (cfg->start_idx == 0)
- kvfree(tx);
+ kvfree(tx);
return err;
}
@@ -429,13 +451,11 @@ void gve_tx_free_rings_dqo(struct gve_priv *priv,
if (!tx)
return;
- for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++)
+ for (i = 0; i < cfg->qcfg->num_queues + cfg->qcfg->num_xdp_queues; i++)
gve_tx_free_ring_dqo(priv, &tx[i], cfg);
- if (cfg->start_idx == 0) {
- kvfree(tx);
- cfg->tx = NULL;
- }
+ kvfree(tx);
+ cfg->tx = NULL;
}
/* Returns the number of slots available in the ring */
@@ -446,12 +466,28 @@ static u32 num_avail_tx_slots(const struct gve_tx_ring *tx)
return tx->mask - num_used;
}
+/* Checks if the requested number of slots are available in the ring */
+static bool gve_has_tx_slots_available(struct gve_tx_ring *tx, u32 slots_req)
+{
+ u32 num_avail = num_avail_tx_slots(tx);
+
+ slots_req += GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP;
+
+ if (num_avail >= slots_req)
+ return true;
+
+ /* Update cached TX head pointer */
+ tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head);
+
+ return num_avail_tx_slots(tx) >= slots_req;
+}
+
static bool gve_has_avail_slots_tx_dqo(struct gve_tx_ring *tx,
int desc_count, int buf_count)
{
return gve_has_pending_packet(tx) &&
- num_avail_tx_slots(tx) >= desc_count &&
- gve_has_free_tx_qpl_bufs(tx, buf_count);
+ gve_has_tx_slots_available(tx, desc_count) &&
+ gve_has_free_tx_qpl_bufs(tx, buf_count);
}
/* Stops the queue if available descriptors is less than 'count'.
@@ -463,12 +499,6 @@ static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx,
if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count)))
return 0;
- /* Update cached TX head pointer */
- tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head);
-
- if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count)))
- return 0;
-
/* No space, so stop the queue */
tx->stop_queue++;
netif_tx_stop_queue(tx->netdev_txq);
@@ -479,8 +509,6 @@ static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx,
/* After stopping queue, check if we can transmit again in order to
* avoid TOCTOU bug.
*/
- tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head);
-
if (likely(!gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count)))
return -EBUSY;
@@ -507,11 +535,9 @@ static void gve_extract_tx_metadata_dqo(const struct sk_buff *skb,
}
static void gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring *tx, u32 *desc_idx,
- struct sk_buff *skb, u32 len, u64 addr,
+ bool enable_csum, u32 len, u64 addr,
s16 compl_tag, bool eop, bool is_gso)
{
- const bool checksum_offload_en = skb->ip_summed == CHECKSUM_PARTIAL;
-
while (len > 0) {
struct gve_tx_pkt_desc_dqo *desc =
&tx->dqo.tx_ring[*desc_idx].pkt;
@@ -522,7 +548,7 @@ static void gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring *tx, u32 *desc_idx,
.buf_addr = cpu_to_le64(addr),
.dtype = GVE_TX_PKT_DESC_DTYPE_DQO,
.end_of_packet = cur_eop,
- .checksum_offload_enable = checksum_offload_en,
+ .checksum_offload_enable = enable_csum,
.compl_tag = cpu_to_le16(compl_tag),
.buf_size = cur_len,
};
@@ -619,6 +645,25 @@ gve_tx_fill_general_ctx_desc(struct gve_tx_general_context_desc_dqo *desc,
};
}
+static void gve_tx_update_tail(struct gve_tx_ring *tx, u32 desc_idx)
+{
+ u32 last_desc_idx = (desc_idx - 1) & tx->mask;
+ u32 last_report_event_interval =
+ (last_desc_idx - tx->dqo_tx.last_re_idx) & tx->mask;
+
+ /* Commit the changes to our state */
+ tx->dqo_tx.tail = desc_idx;
+
+ /* Request a descriptor completion on the last descriptor of the
+ * packet if we are allowed to by the HW enforced interval.
+ */
+
+ if (unlikely(last_report_event_interval >= GVE_TX_MIN_RE_INTERVAL)) {
+ tx->dqo.tx_ring[last_desc_idx].pkt.report_event = true;
+ tx->dqo_tx.last_re_idx = last_desc_idx;
+ }
+}
+
static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
struct sk_buff *skb,
struct gve_tx_pending_packet_dqo *pkt,
@@ -626,6 +671,7 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
u32 *desc_idx,
bool is_gso)
{
+ bool enable_csum = skb->ip_summed == CHECKSUM_PARTIAL;
const struct skb_shared_info *shinfo = skb_shinfo(skb);
int i;
@@ -651,7 +697,7 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
++pkt->num_bufs;
- gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr,
+ gve_tx_fill_pkt_desc_dqo(tx, desc_idx, enable_csum, len, addr,
completion_tag,
/*eop=*/shinfo->nr_frags == 0, is_gso);
}
@@ -667,10 +713,11 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
goto err;
dma_unmap_len_set(pkt, len[pkt->num_bufs], len);
- dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
+ netmem_dma_unmap_addr_set(skb_frag_netmem(frag), pkt,
+ dma[pkt->num_bufs], addr);
++pkt->num_bufs;
- gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr,
+ gve_tx_fill_pkt_desc_dqo(tx, desc_idx, enable_csum, len, addr,
completion_tag, is_eop, is_gso);
}
@@ -715,6 +762,7 @@ static int gve_tx_add_skb_copy_dqo(struct gve_tx_ring *tx,
u32 *desc_idx,
bool is_gso)
{
+ bool enable_csum = skb->ip_summed == CHECKSUM_PARTIAL;
u32 copy_offset = 0;
dma_addr_t dma_addr;
u32 copy_len;
@@ -736,7 +784,7 @@ static int gve_tx_add_skb_copy_dqo(struct gve_tx_ring *tx,
copy_offset += copy_len;
dma_sync_single_for_device(tx->dev, dma_addr,
copy_len, DMA_TO_DEVICE);
- gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb,
+ gve_tx_fill_pkt_desc_dqo(tx, desc_idx, enable_csum,
copy_len,
dma_addr,
completion_tag,
@@ -770,7 +818,11 @@ static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx,
s16 completion_tag;
pkt = gve_alloc_pending_packet(tx);
+ if (!pkt)
+ return -ENOMEM;
+
pkt->skb = skb;
+ pkt->type = GVE_TX_PENDING_PACKET_DQO_SKB;
completion_tag = pkt - tx->dqo.pending_packets;
gve_extract_tx_metadata_dqo(skb, &metadata);
@@ -803,24 +855,7 @@ static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx,
tx->dqo_tx.posted_packet_desc_cnt += pkt->num_bufs;
- /* Commit the changes to our state */
- tx->dqo_tx.tail = desc_idx;
-
- /* Request a descriptor completion on the last descriptor of the
- * packet if we are allowed to by the HW enforced interval.
- */
- {
- u32 last_desc_idx = (desc_idx - 1) & tx->mask;
- u32 last_report_event_interval =
- (last_desc_idx - tx->dqo_tx.last_re_idx) & tx->mask;
-
- if (unlikely(last_report_event_interval >=
- GVE_TX_MIN_RE_INTERVAL)) {
- tx->dqo.tx_ring[last_desc_idx].pkt.report_event = true;
- tx->dqo_tx.last_re_idx = last_desc_idx;
- }
- }
-
+ gve_tx_update_tail(tx, desc_idx);
return 0;
err:
@@ -954,9 +989,8 @@ static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
/* Metadata + (optional TSO) + data descriptors. */
total_num_descs = 1 + skb_is_gso(skb) + num_buffer_descs;
- if (unlikely(gve_maybe_stop_tx_dqo(tx, total_num_descs +
- GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP,
- num_buffer_descs))) {
+ if (unlikely(gve_maybe_stop_tx_dqo(tx, total_num_descs,
+ num_buffer_descs))) {
return -1;
}
@@ -968,11 +1002,45 @@ static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
return 0;
drop:
+ u64_stats_update_begin(&tx->statss);
tx->dropped_pkt++;
+ u64_stats_update_end(&tx->statss);
dev_kfree_skb_any(skb);
return 0;
}
+static void gve_xsk_reorder_queue_push_dqo(struct gve_tx_ring *tx,
+ u16 completion_tag)
+{
+ u32 tail = atomic_read(&tx->dqo_tx.xsk_reorder_queue_tail);
+
+ tx->dqo.xsk_reorder_queue[tail] = completion_tag;
+ tail = (tail + 1) & tx->dqo.complq_mask;
+ atomic_set_release(&tx->dqo_tx.xsk_reorder_queue_tail, tail);
+}
+
+static struct gve_tx_pending_packet_dqo *
+gve_xsk_reorder_queue_head(struct gve_tx_ring *tx)
+{
+ u32 head = tx->dqo_compl.xsk_reorder_queue_head;
+
+ if (head == tx->dqo_compl.xsk_reorder_queue_tail) {
+ tx->dqo_compl.xsk_reorder_queue_tail =
+ atomic_read_acquire(&tx->dqo_tx.xsk_reorder_queue_tail);
+
+ if (head == tx->dqo_compl.xsk_reorder_queue_tail)
+ return NULL;
+ }
+
+ return &tx->dqo.pending_packets[tx->dqo.xsk_reorder_queue[head]];
+}
+
+static void gve_xsk_reorder_queue_pop_dqo(struct gve_tx_ring *tx)
+{
+ tx->dqo_compl.xsk_reorder_queue_head++;
+ tx->dqo_compl.xsk_reorder_queue_head &= tx->dqo.complq_mask;
+}
+
/* Transmit a given skb and ring the doorbell. */
netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev)
{
@@ -996,6 +1064,62 @@ netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static bool gve_xsk_tx_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
+ int budget)
+{
+ struct xsk_buff_pool *pool = tx->xsk_pool;
+ struct xdp_desc desc;
+ bool repoll = false;
+ int sent = 0;
+
+ spin_lock(&tx->dqo_tx.xdp_lock);
+ for (; sent < budget; sent++) {
+ struct gve_tx_pending_packet_dqo *pkt;
+ s16 completion_tag;
+ dma_addr_t addr;
+ u32 desc_idx;
+
+ if (unlikely(!gve_has_avail_slots_tx_dqo(tx, 1, 1))) {
+ repoll = true;
+ break;
+ }
+
+ if (!xsk_tx_peek_desc(pool, &desc))
+ break;
+
+ pkt = gve_alloc_pending_packet(tx);
+ pkt->type = GVE_TX_PENDING_PACKET_DQO_XSK;
+ pkt->num_bufs = 0;
+ completion_tag = pkt - tx->dqo.pending_packets;
+
+ addr = xsk_buff_raw_get_dma(pool, desc.addr);
+ xsk_buff_raw_dma_sync_for_device(pool, addr, desc.len);
+
+ desc_idx = tx->dqo_tx.tail;
+ gve_tx_fill_pkt_desc_dqo(tx, &desc_idx,
+ true, desc.len,
+ addr, completion_tag, true,
+ false);
+ ++pkt->num_bufs;
+ gve_tx_update_tail(tx, desc_idx);
+ tx->dqo_tx.posted_packet_desc_cnt += pkt->num_bufs;
+ gve_xsk_reorder_queue_push_dqo(tx, completion_tag);
+ }
+
+ if (sent) {
+ gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail);
+ xsk_tx_release(pool);
+ }
+
+ spin_unlock(&tx->dqo_tx.xdp_lock);
+
+ u64_stats_update_begin(&tx->statss);
+ tx->xdp_xsk_sent += sent;
+ u64_stats_update_end(&tx->statss);
+
+ return (sent == budget) || repoll;
+}
+
static void add_to_list(struct gve_tx_ring *tx, struct gve_index_list *list,
struct gve_tx_pending_packet_dqo *pending_packet)
{
@@ -1045,8 +1169,9 @@ static void gve_unmap_packet(struct device *dev,
dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]),
dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE);
for (i = 1; i < pkt->num_bufs; i++) {
- dma_unmap_page(dev, dma_unmap_addr(pkt, dma[i]),
- dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE);
+ netmem_dma_unmap_page_attrs(dev, dma_unmap_addr(pkt, dma[i]),
+ dma_unmap_len(pkt, len[i]),
+ DMA_TO_DEVICE, 0);
}
pkt->num_bufs = 0;
}
@@ -1109,16 +1234,35 @@ static void gve_handle_packet_completion(struct gve_priv *priv,
}
}
tx->dqo_tx.completed_packet_desc_cnt += pending_packet->num_bufs;
- if (tx->dqo.qpl)
- gve_free_tx_qpl_bufs(tx, pending_packet);
- else
+
+ switch (pending_packet->type) {
+ case GVE_TX_PENDING_PACKET_DQO_SKB:
+ if (tx->dqo.qpl)
+ gve_free_tx_qpl_bufs(tx, pending_packet);
+ else
+ gve_unmap_packet(tx->dev, pending_packet);
+ (*pkts)++;
+ *bytes += pending_packet->skb->len;
+
+ napi_consume_skb(pending_packet->skb, is_napi);
+ pending_packet->skb = NULL;
+ gve_free_pending_packet(tx, pending_packet);
+ break;
+ case GVE_TX_PENDING_PACKET_DQO_XDP_FRAME:
gve_unmap_packet(tx->dev, pending_packet);
+ (*pkts)++;
+ *bytes += pending_packet->xdpf->len;
- *bytes += pending_packet->skb->len;
- (*pkts)++;
- napi_consume_skb(pending_packet->skb, is_napi);
- pending_packet->skb = NULL;
- gve_free_pending_packet(tx, pending_packet);
+ xdp_return_frame(pending_packet->xdpf);
+ pending_packet->xdpf = NULL;
+ gve_free_pending_packet(tx, pending_packet);
+ break;
+ case GVE_TX_PENDING_PACKET_DQO_XSK:
+ pending_packet->state = GVE_PACKET_STATE_XSK_COMPLETE;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
}
static void gve_handle_miss_completion(struct gve_priv *priv,
@@ -1146,8 +1290,7 @@ static void gve_handle_miss_completion(struct gve_priv *priv,
/* jiffies can wraparound but time comparisons can handle overflows. */
pending_packet->timeout_jiffies =
jiffies +
- msecs_to_jiffies(GVE_REINJECT_COMPL_TIMEOUT *
- MSEC_PER_SEC);
+ secs_to_jiffies(GVE_REINJECT_COMPL_TIMEOUT);
add_to_list(tx, &tx->dqo_compl.miss_completions, pending_packet);
*bytes += pending_packet->skb->len;
@@ -1183,7 +1326,11 @@ static void remove_miss_completions(struct gve_priv *priv,
/* This indicates the packet was dropped. */
dev_kfree_skb_any(pending_packet->skb);
pending_packet->skb = NULL;
+
+ u64_stats_update_begin(&tx->statss);
tx->dropped_pkt++;
+ u64_stats_update_end(&tx->statss);
+
net_err_ratelimited("%s: No reinjection completion was received for: %d.\n",
priv->dev->name,
(int)(pending_packet - tx->dqo.pending_packets));
@@ -1191,8 +1338,7 @@ static void remove_miss_completions(struct gve_priv *priv,
pending_packet->state = GVE_PACKET_STATE_TIMED_OUT_COMPL;
pending_packet->timeout_jiffies =
jiffies +
- msecs_to_jiffies(GVE_DEALLOCATE_COMPL_TIMEOUT *
- MSEC_PER_SEC);
+ secs_to_jiffies(GVE_DEALLOCATE_COMPL_TIMEOUT);
/* Maintain pending packet in another list so the packet can be
* unallocated at a later time.
*/
@@ -1217,8 +1363,34 @@ static void remove_timed_out_completions(struct gve_priv *priv,
remove_from_list(tx, &tx->dqo_compl.timed_out_completions,
pending_packet);
+
+ /* Need to count XSK packets in xsk_tx_completed. */
+ if (pending_packet->type == GVE_TX_PENDING_PACKET_DQO_XSK)
+ pending_packet->state = GVE_PACKET_STATE_XSK_COMPLETE;
+ else
+ gve_free_pending_packet(tx, pending_packet);
+ }
+}
+
+static void gve_tx_process_xsk_completions(struct gve_tx_ring *tx)
+{
+ u32 num_xsks = 0;
+
+ while (true) {
+ struct gve_tx_pending_packet_dqo *pending_packet =
+ gve_xsk_reorder_queue_head(tx);
+
+ if (!pending_packet ||
+ pending_packet->state != GVE_PACKET_STATE_XSK_COMPLETE)
+ break;
+
+ num_xsks++;
+ gve_xsk_reorder_queue_pop_dqo(tx);
gve_free_pending_packet(tx, pending_packet);
}
+
+ if (num_xsks)
+ xsk_tx_completed(tx->xsk_pool, num_xsks);
}
int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
@@ -1291,13 +1463,17 @@ int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
num_descs_cleaned++;
}
- netdev_tx_completed_queue(tx->netdev_txq,
- pkt_compl_pkts + miss_compl_pkts,
- pkt_compl_bytes + miss_compl_bytes);
+ if (tx->netdev_txq)
+ netdev_tx_completed_queue(tx->netdev_txq,
+ pkt_compl_pkts + miss_compl_pkts,
+ pkt_compl_bytes + miss_compl_bytes);
remove_miss_completions(priv, tx);
remove_timed_out_completions(priv, tx);
+ if (tx->xsk_pool)
+ gve_tx_process_xsk_completions(tx);
+
u64_stats_update_begin(&tx->statss);
tx->bytes_done += pkt_compl_bytes + reinject_compl_bytes;
tx->pkt_done += pkt_compl_pkts + reinject_compl_pkts;
@@ -1329,3 +1505,111 @@ bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean)
compl_desc = &tx->dqo.compl_ring[tx->dqo_compl.head];
return compl_desc->generation != tx->dqo_compl.cur_gen_bit;
}
+
+bool gve_xsk_tx_poll_dqo(struct gve_notify_block *rx_block, int budget)
+{
+ struct gve_rx_ring *rx = rx_block->rx;
+ struct gve_priv *priv = rx->gve;
+ struct gve_tx_ring *tx;
+
+ tx = &priv->tx[gve_xdp_tx_queue_id(priv, rx->q_num)];
+ if (tx->xsk_pool)
+ return gve_xsk_tx_dqo(priv, tx, budget);
+
+ return 0;
+}
+
+bool gve_xdp_poll_dqo(struct gve_notify_block *block)
+{
+ struct gve_tx_compl_desc *compl_desc;
+ struct gve_tx_ring *tx = block->tx;
+ struct gve_priv *priv = block->priv;
+
+ gve_clean_tx_done_dqo(priv, tx, &block->napi);
+
+ /* Return true if we still have work. */
+ compl_desc = &tx->dqo.compl_ring[tx->dqo_compl.head];
+ return compl_desc->generation != tx->dqo_compl.cur_gen_bit;
+}
+
+int gve_xdp_xmit_one_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
+ struct xdp_frame *xdpf)
+{
+ struct gve_tx_pending_packet_dqo *pkt;
+ u32 desc_idx = tx->dqo_tx.tail;
+ s16 completion_tag;
+ int num_descs = 1;
+ dma_addr_t addr;
+ int err;
+
+ if (unlikely(!gve_has_tx_slots_available(tx, num_descs)))
+ return -EBUSY;
+
+ pkt = gve_alloc_pending_packet(tx);
+ if (unlikely(!pkt))
+ return -EBUSY;
+
+ pkt->type = GVE_TX_PENDING_PACKET_DQO_XDP_FRAME;
+ pkt->num_bufs = 0;
+ pkt->xdpf = xdpf;
+ completion_tag = pkt - tx->dqo.pending_packets;
+
+ /* Generate Packet Descriptor */
+ addr = dma_map_single(tx->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
+ err = dma_mapping_error(tx->dev, addr);
+ if (unlikely(err))
+ goto err;
+
+ dma_unmap_len_set(pkt, len[pkt->num_bufs], xdpf->len);
+ dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
+ pkt->num_bufs++;
+
+ gve_tx_fill_pkt_desc_dqo(tx, &desc_idx,
+ false, xdpf->len,
+ addr, completion_tag, true,
+ false);
+
+ gve_tx_update_tail(tx, desc_idx);
+ return 0;
+
+err:
+ pkt->xdpf = NULL;
+ pkt->num_bufs = 0;
+ gve_free_pending_packet(tx, pkt);
+ return err;
+}
+
+int gve_xdp_xmit_dqo(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_tx_ring *tx;
+ int i, err = 0, qid;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ qid = gve_xdp_tx_queue_id(priv,
+ smp_processor_id() % priv->tx_cfg.num_xdp_queues);
+
+ tx = &priv->tx[qid];
+
+ spin_lock(&tx->dqo_tx.xdp_lock);
+ for (i = 0; i < n; i++) {
+ err = gve_xdp_xmit_one_dqo(priv, tx, frames[i]);
+ if (err)
+ break;
+ }
+
+ if (flags & XDP_XMIT_FLUSH)
+ gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail);
+
+ spin_unlock(&tx->dqo_tx.xdp_lock);
+
+ u64_stats_update_begin(&tx->statss);
+ tx->xdp_xmit += n;
+ tx->xdp_xmit_errors += n - i;
+ u64_stats_update_end(&tx->statss);
+
+ return i ? i : err;
+}
diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c
index 30fef100257e..ace9b8698021 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.c
+++ b/drivers/net/ethernet/google/gve/gve_utils.c
@@ -110,13 +110,13 @@ void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
{
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
- netif_napi_add(priv->dev, &block->napi, gve_poll);
- netif_napi_set_irq(&block->napi, block->irq);
+ netif_napi_add_locked(priv->dev, &block->napi, gve_poll);
+ netif_napi_set_irq_locked(&block->napi, block->irq);
}
void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
{
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
- netif_napi_del(&block->napi);
+ netif_napi_del_locked(&block->napi);
}
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 65302c41bfb1..18eca7d12c20 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -148,8 +148,10 @@ config HIBMCGE
tristate "Hisilicon BMC Gigabit Ethernet Device Support"
depends on PCI && PCI_MSI
select PHYLIB
+ select FIXED_PHY
select MOTORCOMM_PHY
select REALTEK_PHY
+ select PAGE_POOL
help
If you wish to compile a kernel for a BMC with HIBMC-xx_gmac
then you should answer Y to this. This makes this driver suitable for use
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/Makefile b/drivers/net/ethernet/hisilicon/hibmcge/Makefile
index ae58ac38c206..d6610ba16855 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/Makefile
+++ b/drivers/net/ethernet/hisilicon/hibmcge/Makefile
@@ -3,6 +3,8 @@
# Makefile for the HISILICON BMC GE network device drivers.
#
+ccflags-y += -I$(src)
obj-$(CONFIG_HIBMCGE) += hibmcge.o
-hibmcge-objs = hbg_main.o hbg_hw.o hbg_mdio.o hbg_irq.o hbg_txrx.o hbg_ethtool.o
+hibmcge-objs = hbg_main.o hbg_hw.o hbg_mdio.o hbg_irq.o hbg_txrx.o hbg_ethtool.o \
+ hbg_debugfs.o hbg_err.o hbg_diagnose.o
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
index 96daf058d387..8e134da3e217 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
@@ -4,8 +4,10 @@
#ifndef __HBG_COMMON_H
#define __HBG_COMMON_H
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
+#include <net/page_pool/helpers.h>
#include "hbg_reg.h"
#define HBG_STATUS_DISABLE 0x0
@@ -16,6 +18,7 @@
#define HBG_PCU_CACHE_LINE_SIZE 32
#define HBG_TX_TIMEOUT_BUF_LEN 1024
#define HBG_RX_DESCR 0x01
+#define HBG_NO_PHY 0xFF
#define HBG_PACKET_HEAD_SIZE ((HBG_RX_SKIP1 + HBG_RX_SKIP2 + \
HBG_RX_DESCR) * HBG_PCU_CACHE_LINE_SIZE)
@@ -33,6 +36,16 @@ enum hbg_tx_state {
enum hbg_nic_state {
HBG_NIC_STATE_EVENT_HANDLING = 0,
+ HBG_NIC_STATE_RESETTING,
+ HBG_NIC_STATE_RESET_FAIL,
+ HBG_NIC_STATE_NEED_RESET, /* trigger a reset in scheduled task */
+ HBG_NIC_STATE_NP_LINK_FAIL,
+};
+
+enum hbg_reset_type {
+ HBG_RESET_TYPE_NONE = 0,
+ HBG_RESET_TYPE_FLR,
+ HBG_RESET_TYPE_FUNCTION,
};
struct hbg_buffer {
@@ -43,6 +56,12 @@ struct hbg_buffer {
dma_addr_t skb_dma;
u32 skb_len;
+ struct page *page;
+ void *page_addr;
+ dma_addr_t page_dma;
+ u32 page_size;
+ u32 page_offset;
+
enum hbg_dir dir;
struct hbg_ring *ring;
struct hbg_priv *priv;
@@ -66,12 +85,14 @@ struct hbg_ring {
struct hbg_priv *priv;
struct napi_struct napi;
char *tout_log_buf; /* tx timeout log buffer */
+ struct page_pool *page_pool; /* only for rx */
};
enum hbg_hw_event_type {
HBG_HW_EVENT_NONE = 0,
HBG_HW_EVENT_INIT, /* driver is loading */
HBG_HW_EVENT_RESET,
+ HBG_HW_EVENT_CORE_RESET,
};
struct hbg_dev_specs {
@@ -84,6 +105,7 @@ struct hbg_dev_specs {
u32 vlan_layers;
u32 max_mtu;
u32 min_mtu;
+ u32 uc_mac_num;
u32 max_frame_len;
u32 rx_buf_size;
@@ -94,14 +116,17 @@ struct hbg_irq_info {
u32 mask;
bool re_enable;
bool need_print;
- u64 count;
+ bool need_reset;
- void (*irq_handle)(struct hbg_priv *priv, struct hbg_irq_info *info);
+ void (*irq_handle)(struct hbg_priv *priv,
+ const struct hbg_irq_info *info);
};
struct hbg_vector {
char name[HBG_VECTOR_NUM][32];
- struct hbg_irq_info *info_array;
+
+ u64 *stats_array;
+ const struct hbg_irq_info *info_array;
u32 info_array_len;
};
@@ -114,6 +139,135 @@ struct hbg_mac {
u32 duplex;
u32 autoneg;
u32 link_status;
+ u32 pause_autoneg;
+};
+
+struct hbg_mac_table_entry {
+ u8 addr[ETH_ALEN];
+};
+
+struct hbg_mac_filter {
+ struct hbg_mac_table_entry *mac_table;
+ u32 table_max_len;
+ bool enabled;
+};
+
+/* saved for restore after rest */
+struct hbg_user_def {
+ struct ethtool_pauseparam pause_param;
+};
+
+struct hbg_stats {
+ u64 rx_desc_drop;
+ u64 rx_desc_l2_err_cnt;
+ u64 rx_desc_pkt_len_err_cnt;
+ u64 rx_desc_l3l4_err_cnt;
+ u64 rx_desc_l3_wrong_head_cnt;
+ u64 rx_desc_l3_csum_err_cnt;
+ u64 rx_desc_l3_len_err_cnt;
+ u64 rx_desc_l3_zero_ttl_cnt;
+ u64 rx_desc_l3_other_cnt;
+ u64 rx_desc_l4_err_cnt;
+ u64 rx_desc_l4_wrong_head_cnt;
+ u64 rx_desc_l4_len_err_cnt;
+ u64 rx_desc_l4_csum_err_cnt;
+ u64 rx_desc_l4_zero_port_num_cnt;
+ u64 rx_desc_l4_other_cnt;
+ u64 rx_desc_frag_cnt;
+ u64 rx_desc_ip_ver_err_cnt;
+ u64 rx_desc_ipv4_pkt_cnt;
+ u64 rx_desc_ipv6_pkt_cnt;
+ u64 rx_desc_no_ip_pkt_cnt;
+ u64 rx_desc_ip_pkt_cnt;
+ u64 rx_desc_tcp_pkt_cnt;
+ u64 rx_desc_udp_pkt_cnt;
+ u64 rx_desc_vlan_pkt_cnt;
+ u64 rx_desc_icmp_pkt_cnt;
+ u64 rx_desc_arp_pkt_cnt;
+ u64 rx_desc_rarp_pkt_cnt;
+ u64 rx_desc_multicast_pkt_cnt;
+ u64 rx_desc_broadcast_pkt_cnt;
+ u64 rx_desc_ipsec_pkt_cnt;
+ u64 rx_desc_ip_opt_pkt_cnt;
+ u64 rx_desc_key_not_match_cnt;
+
+ u64 rx_octets_total_ok_cnt;
+ u64 rx_uc_pkt_cnt;
+ u64 rx_mc_pkt_cnt;
+ u64 rx_bc_pkt_cnt;
+ u64 rx_vlan_pkt_cnt;
+ u64 rx_octets_bad_cnt;
+ u64 rx_octets_total_filt_cnt;
+ u64 rx_filt_pkt_cnt;
+ u64 rx_trans_pkt_cnt;
+ u64 rx_framesize_64;
+ u64 rx_framesize_65_127;
+ u64 rx_framesize_128_255;
+ u64 rx_framesize_256_511;
+ u64 rx_framesize_512_1023;
+ u64 rx_framesize_1024_1518;
+ u64 rx_framesize_bt_1518;
+ u64 rx_fcs_error_cnt;
+ u64 rx_data_error_cnt;
+ u64 rx_align_error_cnt;
+ u64 rx_pause_macctl_frame_cnt;
+ u64 rx_unknown_macctl_frame_cnt;
+ /* crc ok, > max_frm_size, < 2max_frm_size */
+ u64 rx_frame_long_err_cnt;
+ /* crc fail, > max_frm_size, < 2max_frm_size */
+ u64 rx_jabber_err_cnt;
+ /* > 2max_frm_size */
+ u64 rx_frame_very_long_err_cnt;
+ /* < 64byte, >= short_runts_thr */
+ u64 rx_frame_runt_err_cnt;
+ /* < short_runts_thr */
+ u64 rx_frame_short_err_cnt;
+ /* PCU: dropped when the RX FIFO is full.*/
+ u64 rx_overflow_cnt;
+ /* GMAC: the count of overflows of the RX FIFO */
+ u64 rx_overrun_cnt;
+ /* PCU: the count of buffer alloc errors in RX */
+ u64 rx_bufrq_err_cnt;
+ /* PCU: the count of write descriptor errors in RX */
+ u64 rx_we_err_cnt;
+ /* GMAC: the count of pkts that contain PAD but length is not 64 */
+ u64 rx_lengthfield_err_cnt;
+ u64 rx_fail_comma_cnt;
+
+ u64 rx_dma_err_cnt;
+ u64 rx_fifo_less_empty_thrsld_cnt;
+
+ u64 tx_octets_total_ok_cnt;
+ u64 tx_uc_pkt_cnt;
+ u64 tx_mc_pkt_cnt;
+ u64 tx_bc_pkt_cnt;
+ u64 tx_vlan_pkt_cnt;
+ u64 tx_octets_bad_cnt;
+ u64 tx_trans_pkt_cnt;
+ u64 tx_pause_frame_cnt;
+ u64 tx_framesize_64;
+ u64 tx_framesize_65_127;
+ u64 tx_framesize_128_255;
+ u64 tx_framesize_256_511;
+ u64 tx_framesize_512_1023;
+ u64 tx_framesize_1024_1518;
+ u64 tx_framesize_bt_1518;
+ /* GMAC: the count of times that frames fail to be transmitted
+ * due to internal errors.
+ */
+ u64 tx_underrun_err_cnt;
+ u64 tx_add_cs_fail_cnt;
+ /* PCU: the count of buffer free errors in TX */
+ u64 tx_bufrl_err_cnt;
+ u64 tx_crc_err_cnt;
+ u64 tx_drop_cnt;
+ u64 tx_excessive_length_drop_cnt;
+
+ u64 tx_timeout_cnt;
+ u64 tx_dma_err_cnt;
+
+ u64 np_link_fail_cnt;
+ u64 reset_fail_cnt;
};
struct hbg_priv {
@@ -126,6 +280,15 @@ struct hbg_priv {
struct hbg_vector vectors;
struct hbg_ring tx_ring;
struct hbg_ring rx_ring;
+ struct hbg_mac_filter filter;
+ enum hbg_reset_type reset_type;
+ struct hbg_user_def user_def;
+ struct hbg_stats stats;
+ unsigned long last_update_stats_time;
+ struct delayed_work service_task;
};
+void hbg_err_reset_task_schedule(struct hbg_priv *priv);
+void hbg_np_link_fail_task_schedule(struct hbg_priv *priv);
+
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c
new file mode 100644
index 000000000000..01ad82d2f5cc
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+#include <linux/seq_file.h>
+#include <linux/string_choices.h>
+#include "hbg_common.h"
+#include "hbg_debugfs.h"
+#include "hbg_hw.h"
+#include "hbg_irq.h"
+#include "hbg_txrx.h"
+
+static struct dentry *hbg_dbgfs_root;
+
+struct hbg_dbg_info {
+ const char *name;
+ int (*read)(struct seq_file *seq, void *data);
+};
+
+#define state_str_true_false(p, s) str_true_false(test_bit(s, &(p)->state))
+
+static void hbg_dbg_ring(struct hbg_priv *priv, struct hbg_ring *ring,
+ struct seq_file *s)
+{
+ u32 irq_mask = ring->dir == HBG_DIR_TX ? HBG_INT_MSK_TX_B :
+ HBG_INT_MSK_RX_B;
+
+ seq_printf(s, "ring used num: %u\n",
+ hbg_get_queue_used_num(ring));
+ seq_printf(s, "ring max num: %u\n", ring->len);
+ seq_printf(s, "ring head: %u, tail: %u\n", ring->head, ring->tail);
+ seq_printf(s, "fifo used num: %u\n",
+ hbg_hw_get_fifo_used_num(priv, ring->dir));
+ seq_printf(s, "fifo max num: %u\n",
+ hbg_get_spec_fifo_max_num(priv, ring->dir));
+ seq_printf(s, "irq enabled: %s\n",
+ str_true_false(hbg_hw_irq_is_enabled(priv, irq_mask)));
+}
+
+static int hbg_dbg_tx_ring(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ hbg_dbg_ring(priv, &priv->tx_ring, s);
+ return 0;
+}
+
+static int hbg_dbg_rx_ring(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ hbg_dbg_ring(priv, &priv->rx_ring, s);
+ return 0;
+}
+
+static int hbg_dbg_irq_info(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+ const struct hbg_irq_info *info;
+ u32 i;
+
+ for (i = 0; i < priv->vectors.info_array_len; i++) {
+ info = &priv->vectors.info_array[i];
+ seq_printf(s,
+ "%-20s: enabled: %-5s, reset: %-5s, logged: %-5s, count: %llu\n",
+ info->name,
+ str_true_false(hbg_hw_irq_is_enabled(priv,
+ info->mask)),
+ str_true_false(info->need_reset),
+ str_true_false(info->need_print),
+ priv->vectors.stats_array[i]);
+ }
+
+ return 0;
+}
+
+static int hbg_dbg_mac_table(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_mac_filter *filter;
+ u32 i;
+
+ filter = &priv->filter;
+ seq_printf(s, "mac addr max count: %u\n", filter->table_max_len);
+ seq_printf(s, "filter enabled: %s\n", str_true_false(filter->enabled));
+
+ for (i = 0; i < filter->table_max_len; i++) {
+ if (is_zero_ether_addr(filter->mac_table[i].addr))
+ continue;
+
+ seq_printf(s, "[%u] %pM\n", i, filter->mac_table[i].addr);
+ }
+
+ return 0;
+}
+
+static const char * const reset_type_str[] = {"None", "FLR", "Function"};
+
+static int hbg_dbg_nic_state(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+ bool np_link_fail;
+
+ seq_printf(s, "event handling state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_EVENT_HANDLING));
+ seq_printf(s, "resetting state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_RESETTING));
+ seq_printf(s, "reset fail state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_RESET_FAIL));
+ seq_printf(s, "last reset type: %s\n",
+ reset_type_str[priv->reset_type]);
+ seq_printf(s, "need reset state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_NEED_RESET));
+
+ np_link_fail = !hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR,
+ HBG_REG_AN_NEG_STATE_NP_LINK_OK_B);
+ seq_printf(s, "np_link fail state: %s\n", str_true_false(np_link_fail));
+
+ return 0;
+}
+
+static const struct hbg_dbg_info hbg_dbg_infos[] = {
+ { "tx_ring", hbg_dbg_tx_ring },
+ { "rx_ring", hbg_dbg_rx_ring },
+ { "irq_info", hbg_dbg_irq_info },
+ { "mac_table", hbg_dbg_mac_table },
+ { "nic_state", hbg_dbg_nic_state },
+};
+
+static void hbg_debugfs_uninit(void *data)
+{
+ debugfs_remove_recursive((struct dentry *)data);
+}
+
+void hbg_debugfs_init(struct hbg_priv *priv)
+{
+ const char *name = pci_name(priv->pdev);
+ struct device *dev = &priv->pdev->dev;
+ struct dentry *root;
+ u32 i;
+
+ root = debugfs_create_dir(name, hbg_dbgfs_root);
+
+ for (i = 0; i < ARRAY_SIZE(hbg_dbg_infos); i++)
+ debugfs_create_devm_seqfile(dev, hbg_dbg_infos[i].name,
+ root, hbg_dbg_infos[i].read);
+
+ /* Ignore the failure because debugfs is not a key feature. */
+ devm_add_action_or_reset(dev, hbg_debugfs_uninit, root);
+}
+
+void hbg_debugfs_register(void)
+{
+ hbg_dbgfs_root = debugfs_create_dir("hibmcge", NULL);
+}
+
+void hbg_debugfs_unregister(void)
+{
+ debugfs_remove_recursive(hbg_dbgfs_root);
+ hbg_dbgfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h
new file mode 100644
index 000000000000..80670d66bbeb
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef __HBG_DEBUGFS_H
+#define __HBG_DEBUGFS_H
+
+void hbg_debugfs_register(void);
+void hbg_debugfs_unregister(void);
+
+void hbg_debugfs_init(struct hbg_priv *priv);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c
new file mode 100644
index 000000000000..c0ce74cf7382
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2025 Hisilicon Limited.
+
+#include <linux/iopoll.h>
+#include <linux/phy.h>
+#include "hbg_common.h"
+#include "hbg_ethtool.h"
+#include "hbg_hw.h"
+#include "hbg_diagnose.h"
+
+#define HBG_MSG_DATA_MAX_NUM 64
+
+struct hbg_diagnose_message {
+ u32 opcode;
+ u32 status;
+ u32 data_num;
+ struct hbg_priv *priv;
+
+ u32 data[HBG_MSG_DATA_MAX_NUM];
+};
+
+#define HBG_HW_PUSH_WAIT_TIMEOUT_US (2 * 1000 * 1000)
+#define HBG_HW_PUSH_WAIT_INTERVAL_US (1 * 1000)
+
+enum hbg_push_cmd {
+ HBG_PUSH_CMD_IRQ = 0,
+ HBG_PUSH_CMD_STATS,
+ HBG_PUSH_CMD_LINK,
+};
+
+struct hbg_push_stats_info {
+ /* id is used to match the name of the current stats item.
+ * and is used for pretty print on BMC
+ */
+ u32 id;
+ u64 offset;
+};
+
+struct hbg_push_irq_info {
+ /* id is used to match the name of the current irq.
+ * and is used for pretty print on BMC
+ */
+ u32 id;
+ u32 mask;
+};
+
+#define HBG_PUSH_IRQ_I(name, id) {id, HBG_INT_MSK_##name##_B}
+static const struct hbg_push_irq_info hbg_push_irq_list[] = {
+ HBG_PUSH_IRQ_I(RX, 0),
+ HBG_PUSH_IRQ_I(TX, 1),
+ HBG_PUSH_IRQ_I(TX_PKT_CPL, 2),
+ HBG_PUSH_IRQ_I(MAC_MII_FIFO_ERR, 3),
+ HBG_PUSH_IRQ_I(MAC_PCS_RX_FIFO_ERR, 4),
+ HBG_PUSH_IRQ_I(MAC_PCS_TX_FIFO_ERR, 5),
+ HBG_PUSH_IRQ_I(MAC_APP_RX_FIFO_ERR, 6),
+ HBG_PUSH_IRQ_I(MAC_APP_TX_FIFO_ERR, 7),
+ HBG_PUSH_IRQ_I(SRAM_PARITY_ERR, 8),
+ HBG_PUSH_IRQ_I(TX_AHB_ERR, 9),
+ HBG_PUSH_IRQ_I(RX_BUF_AVL, 10),
+ HBG_PUSH_IRQ_I(REL_BUF_ERR, 11),
+ HBG_PUSH_IRQ_I(TXCFG_AVL, 12),
+ HBG_PUSH_IRQ_I(TX_DROP, 13),
+ HBG_PUSH_IRQ_I(RX_DROP, 14),
+ HBG_PUSH_IRQ_I(RX_AHB_ERR, 15),
+ HBG_PUSH_IRQ_I(MAC_FIFO_ERR, 16),
+ HBG_PUSH_IRQ_I(RBREQ_ERR, 17),
+ HBG_PUSH_IRQ_I(WE_ERR, 18),
+};
+
+#define HBG_PUSH_STATS_I(name, id) {id, HBG_STATS_FIELD_OFF(name)}
+static const struct hbg_push_stats_info hbg_push_stats_list[] = {
+ HBG_PUSH_STATS_I(rx_desc_drop, 0),
+ HBG_PUSH_STATS_I(rx_desc_l2_err_cnt, 1),
+ HBG_PUSH_STATS_I(rx_desc_pkt_len_err_cnt, 2),
+ HBG_PUSH_STATS_I(rx_desc_l3_wrong_head_cnt, 3),
+ HBG_PUSH_STATS_I(rx_desc_l3_csum_err_cnt, 4),
+ HBG_PUSH_STATS_I(rx_desc_l3_len_err_cnt, 5),
+ HBG_PUSH_STATS_I(rx_desc_l3_zero_ttl_cnt, 6),
+ HBG_PUSH_STATS_I(rx_desc_l3_other_cnt, 7),
+ HBG_PUSH_STATS_I(rx_desc_l4_err_cnt, 8),
+ HBG_PUSH_STATS_I(rx_desc_l4_wrong_head_cnt, 9),
+ HBG_PUSH_STATS_I(rx_desc_l4_len_err_cnt, 10),
+ HBG_PUSH_STATS_I(rx_desc_l4_csum_err_cnt, 11),
+ HBG_PUSH_STATS_I(rx_desc_l4_zero_port_num_cnt, 12),
+ HBG_PUSH_STATS_I(rx_desc_l4_other_cnt, 13),
+ HBG_PUSH_STATS_I(rx_desc_frag_cnt, 14),
+ HBG_PUSH_STATS_I(rx_desc_ip_ver_err_cnt, 15),
+ HBG_PUSH_STATS_I(rx_desc_ipv4_pkt_cnt, 16),
+ HBG_PUSH_STATS_I(rx_desc_ipv6_pkt_cnt, 17),
+ HBG_PUSH_STATS_I(rx_desc_no_ip_pkt_cnt, 18),
+ HBG_PUSH_STATS_I(rx_desc_ip_pkt_cnt, 19),
+ HBG_PUSH_STATS_I(rx_desc_tcp_pkt_cnt, 20),
+ HBG_PUSH_STATS_I(rx_desc_udp_pkt_cnt, 21),
+ HBG_PUSH_STATS_I(rx_desc_vlan_pkt_cnt, 22),
+ HBG_PUSH_STATS_I(rx_desc_icmp_pkt_cnt, 23),
+ HBG_PUSH_STATS_I(rx_desc_arp_pkt_cnt, 24),
+ HBG_PUSH_STATS_I(rx_desc_rarp_pkt_cnt, 25),
+ HBG_PUSH_STATS_I(rx_desc_multicast_pkt_cnt, 26),
+ HBG_PUSH_STATS_I(rx_desc_broadcast_pkt_cnt, 27),
+ HBG_PUSH_STATS_I(rx_desc_ipsec_pkt_cnt, 28),
+ HBG_PUSH_STATS_I(rx_desc_ip_opt_pkt_cnt, 29),
+ HBG_PUSH_STATS_I(rx_desc_key_not_match_cnt, 30),
+ HBG_PUSH_STATS_I(rx_octets_total_ok_cnt, 31),
+ HBG_PUSH_STATS_I(rx_uc_pkt_cnt, 32),
+ HBG_PUSH_STATS_I(rx_mc_pkt_cnt, 33),
+ HBG_PUSH_STATS_I(rx_bc_pkt_cnt, 34),
+ HBG_PUSH_STATS_I(rx_vlan_pkt_cnt, 35),
+ HBG_PUSH_STATS_I(rx_octets_bad_cnt, 36),
+ HBG_PUSH_STATS_I(rx_octets_total_filt_cnt, 37),
+ HBG_PUSH_STATS_I(rx_filt_pkt_cnt, 38),
+ HBG_PUSH_STATS_I(rx_trans_pkt_cnt, 39),
+ HBG_PUSH_STATS_I(rx_framesize_64, 40),
+ HBG_PUSH_STATS_I(rx_framesize_65_127, 41),
+ HBG_PUSH_STATS_I(rx_framesize_128_255, 42),
+ HBG_PUSH_STATS_I(rx_framesize_256_511, 43),
+ HBG_PUSH_STATS_I(rx_framesize_512_1023, 44),
+ HBG_PUSH_STATS_I(rx_framesize_1024_1518, 45),
+ HBG_PUSH_STATS_I(rx_framesize_bt_1518, 46),
+ HBG_PUSH_STATS_I(rx_fcs_error_cnt, 47),
+ HBG_PUSH_STATS_I(rx_data_error_cnt, 48),
+ HBG_PUSH_STATS_I(rx_align_error_cnt, 49),
+ HBG_PUSH_STATS_I(rx_frame_long_err_cnt, 50),
+ HBG_PUSH_STATS_I(rx_jabber_err_cnt, 51),
+ HBG_PUSH_STATS_I(rx_pause_macctl_frame_cnt, 52),
+ HBG_PUSH_STATS_I(rx_unknown_macctl_frame_cnt, 53),
+ HBG_PUSH_STATS_I(rx_frame_very_long_err_cnt, 54),
+ HBG_PUSH_STATS_I(rx_frame_runt_err_cnt, 55),
+ HBG_PUSH_STATS_I(rx_frame_short_err_cnt, 56),
+ HBG_PUSH_STATS_I(rx_overflow_cnt, 57),
+ HBG_PUSH_STATS_I(rx_bufrq_err_cnt, 58),
+ HBG_PUSH_STATS_I(rx_we_err_cnt, 59),
+ HBG_PUSH_STATS_I(rx_overrun_cnt, 60),
+ HBG_PUSH_STATS_I(rx_lengthfield_err_cnt, 61),
+ HBG_PUSH_STATS_I(rx_fail_comma_cnt, 62),
+ HBG_PUSH_STATS_I(rx_dma_err_cnt, 63),
+ HBG_PUSH_STATS_I(rx_fifo_less_empty_thrsld_cnt, 64),
+ HBG_PUSH_STATS_I(tx_octets_total_ok_cnt, 65),
+ HBG_PUSH_STATS_I(tx_uc_pkt_cnt, 66),
+ HBG_PUSH_STATS_I(tx_mc_pkt_cnt, 67),
+ HBG_PUSH_STATS_I(tx_bc_pkt_cnt, 68),
+ HBG_PUSH_STATS_I(tx_vlan_pkt_cnt, 69),
+ HBG_PUSH_STATS_I(tx_octets_bad_cnt, 70),
+ HBG_PUSH_STATS_I(tx_trans_pkt_cnt, 71),
+ HBG_PUSH_STATS_I(tx_pause_frame_cnt, 72),
+ HBG_PUSH_STATS_I(tx_framesize_64, 73),
+ HBG_PUSH_STATS_I(tx_framesize_65_127, 74),
+ HBG_PUSH_STATS_I(tx_framesize_128_255, 75),
+ HBG_PUSH_STATS_I(tx_framesize_256_511, 76),
+ HBG_PUSH_STATS_I(tx_framesize_512_1023, 77),
+ HBG_PUSH_STATS_I(tx_framesize_1024_1518, 78),
+ HBG_PUSH_STATS_I(tx_framesize_bt_1518, 79),
+ HBG_PUSH_STATS_I(tx_underrun_err_cnt, 80),
+ HBG_PUSH_STATS_I(tx_add_cs_fail_cnt, 81),
+ HBG_PUSH_STATS_I(tx_bufrl_err_cnt, 82),
+ HBG_PUSH_STATS_I(tx_crc_err_cnt, 83),
+ HBG_PUSH_STATS_I(tx_drop_cnt, 84),
+ HBG_PUSH_STATS_I(tx_excessive_length_drop_cnt, 85),
+ HBG_PUSH_STATS_I(tx_dma_err_cnt, 86),
+ HBG_PUSH_STATS_I(reset_fail_cnt, 87),
+};
+
+static int hbg_push_msg_send(struct hbg_priv *priv,
+ struct hbg_diagnose_message *msg)
+{
+ u32 header = 0;
+ u32 i;
+
+ if (msg->data_num == 0)
+ return 0;
+
+ for (i = 0; i < msg->data_num && i < HBG_MSG_DATA_MAX_NUM; i++)
+ hbg_reg_write(priv,
+ HBG_REG_MSG_DATA_BASE_ADDR + i * sizeof(u32),
+ msg->data[i]);
+
+ hbg_field_modify(header, HBG_REG_MSG_HEADER_OPCODE_M, msg->opcode);
+ hbg_field_modify(header, HBG_REG_MSG_HEADER_DATA_NUM_M, msg->data_num);
+ hbg_field_modify(header, HBG_REG_MSG_HEADER_RESP_CODE_M, ETIMEDOUT);
+
+ /* start status */
+ hbg_field_modify(header, HBG_REG_MSG_HEADER_STATUS_M, 1);
+
+ /* write header msg to start push */
+ hbg_reg_write(priv, HBG_REG_MSG_HEADER_ADDR, header);
+
+ /* wait done */
+ readl_poll_timeout(priv->io_base + HBG_REG_MSG_HEADER_ADDR, header,
+ !FIELD_GET(HBG_REG_MSG_HEADER_STATUS_M, header),
+ HBG_HW_PUSH_WAIT_INTERVAL_US,
+ HBG_HW_PUSH_WAIT_TIMEOUT_US);
+
+ msg->status = FIELD_GET(HBG_REG_MSG_HEADER_STATUS_M, header);
+ return -(int)FIELD_GET(HBG_REG_MSG_HEADER_RESP_CODE_M, header);
+}
+
+static int hbg_push_data(struct hbg_priv *priv,
+ u32 opcode, u32 *data, u32 data_num)
+{
+ struct hbg_diagnose_message msg = {0};
+ u32 data_left_num;
+ u32 i, j;
+ int ret;
+
+ msg.priv = priv;
+ msg.opcode = opcode;
+ for (i = 0; i < data_num / HBG_MSG_DATA_MAX_NUM + 1; i++) {
+ if (i * HBG_MSG_DATA_MAX_NUM >= data_num)
+ break;
+
+ data_left_num = data_num - i * HBG_MSG_DATA_MAX_NUM;
+ for (j = 0; j < data_left_num && j < HBG_MSG_DATA_MAX_NUM; j++)
+ msg.data[j] = data[i * HBG_MSG_DATA_MAX_NUM + j];
+
+ msg.data_num = j;
+ ret = hbg_push_msg_send(priv, &msg);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hbg_push_data_u64(struct hbg_priv *priv, u32 opcode,
+ u64 *data, u32 data_num)
+{
+ /* The length of u64 is twice that of u32,
+ * the data_num must be multiplied by 2.
+ */
+ return hbg_push_data(priv, opcode, (u32 *)data, data_num * 2);
+}
+
+static u64 hbg_get_irq_stats(struct hbg_vector *vectors, u32 mask)
+{
+ u32 i = 0;
+
+ for (i = 0; i < vectors->info_array_len; i++)
+ if (vectors->info_array[i].mask == mask)
+ return vectors->stats_array[i];
+
+ return 0;
+}
+
+static int hbg_push_irq_cnt(struct hbg_priv *priv)
+{
+ /* An id needs to be added for each data.
+ * Therefore, the data_num must be multiplied by 2.
+ */
+ u32 data_num = ARRAY_SIZE(hbg_push_irq_list) * 2;
+ struct hbg_vector *vectors = &priv->vectors;
+ const struct hbg_push_irq_info *info;
+ u32 i, j = 0;
+ u64 *data;
+ int ret;
+
+ data = kcalloc(data_num, sizeof(u64), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* An id needs to be added for each data.
+ * So i + 2 for each loop.
+ */
+ for (i = 0; i < data_num; i += 2) {
+ info = &hbg_push_irq_list[j++];
+ data[i] = info->id;
+ data[i + 1] = hbg_get_irq_stats(vectors, info->mask);
+ }
+
+ ret = hbg_push_data_u64(priv, HBG_PUSH_CMD_IRQ, data, data_num);
+ kfree(data);
+ return ret;
+}
+
+static int hbg_push_link_status(struct hbg_priv *priv)
+{
+ u32 link_status[2];
+
+ /* phy link status */
+ link_status[0] = priv->mac.phydev->link;
+ /* mac link status */
+ link_status[1] = hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR,
+ HBG_REG_AN_NEG_STATE_NP_LINK_OK_B);
+
+ return hbg_push_data(priv, HBG_PUSH_CMD_LINK,
+ link_status, ARRAY_SIZE(link_status));
+}
+
+static int hbg_push_stats(struct hbg_priv *priv)
+{
+ /* An id needs to be added for each data.
+ * Therefore, the data_num must be multiplied by 2.
+ */
+ u64 data_num = ARRAY_SIZE(hbg_push_stats_list) * 2;
+ struct hbg_stats *stats = &priv->stats;
+ const struct hbg_push_stats_info *info;
+ u32 i, j = 0;
+ u64 *data;
+ int ret;
+
+ data = kcalloc(data_num, sizeof(u64), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* An id needs to be added for each data.
+ * So i + 2 for each loop.
+ */
+ for (i = 0; i < data_num; i += 2) {
+ info = &hbg_push_stats_list[j++];
+ data[i] = info->id;
+ data[i + 1] = HBG_STATS_R(stats, info->offset);
+ }
+
+ ret = hbg_push_data_u64(priv, HBG_PUSH_CMD_STATS, data, data_num);
+ kfree(data);
+ return ret;
+}
+
+void hbg_diagnose_message_push(struct hbg_priv *priv)
+{
+ int ret;
+
+ if (test_bit(HBG_NIC_STATE_RESETTING, &priv->state))
+ return;
+
+ /* only 1 is the right value */
+ if (hbg_reg_read(priv, HBG_REG_PUSH_REQ_ADDR) != 1)
+ return;
+
+ ret = hbg_push_irq_cnt(priv);
+ if (ret) {
+ dev_err(&priv->pdev->dev,
+ "failed to push irq cnt, ret = %d\n", ret);
+ goto push_done;
+ }
+
+ ret = hbg_push_link_status(priv);
+ if (ret) {
+ dev_err(&priv->pdev->dev,
+ "failed to push link status, ret = %d\n", ret);
+ goto push_done;
+ }
+
+ ret = hbg_push_stats(priv);
+ if (ret)
+ dev_err(&priv->pdev->dev,
+ "failed to push stats, ret = %d\n", ret);
+
+push_done:
+ hbg_reg_write(priv, HBG_REG_PUSH_REQ_ADDR, 0);
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h
new file mode 100644
index 000000000000..ba04c6d8c03d
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2025 Hisilicon Limited. */
+
+#ifndef __HBG_DIAGNOSE_H
+#define __HBG_DIAGNOSE_H
+
+#include "hbg_common.h"
+
+void hbg_diagnose_message_push(struct hbg_priv *priv);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
new file mode 100644
index 000000000000..7234618e8e81
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/rtnetlink.h>
+#include "hbg_common.h"
+#include "hbg_err.h"
+#include "hbg_hw.h"
+
+static void hbg_restore_mac_table(struct hbg_priv *priv)
+{
+ struct hbg_mac_filter *filter = &priv->filter;
+ u64 addr;
+ u32 i;
+
+ for (i = 0; i < filter->table_max_len; i++)
+ if (!is_zero_ether_addr(filter->mac_table[i].addr)) {
+ addr = ether_addr_to_u64(filter->mac_table[i].addr);
+ hbg_hw_set_uc_addr(priv, addr, i);
+ }
+
+ hbg_hw_set_mac_filter_enable(priv, priv->filter.enabled);
+}
+
+static void hbg_restore_user_def_settings(struct hbg_priv *priv)
+{
+ /* The index of host mac is always 0. */
+ u64 rx_pause_addr = ether_addr_to_u64(priv->filter.mac_table[0].addr);
+ struct ethtool_pauseparam *pause_param = &priv->user_def.pause_param;
+
+ hbg_restore_mac_table(priv);
+ hbg_hw_set_mtu(priv, priv->netdev->mtu);
+ hbg_hw_set_pause_enable(priv, pause_param->tx_pause,
+ pause_param->rx_pause);
+ hbg_hw_set_rx_pause_mac_addr(priv, rx_pause_addr);
+}
+
+int hbg_rebuild(struct hbg_priv *priv)
+{
+ int ret;
+
+ ret = hbg_hw_init(priv);
+ if (ret)
+ return ret;
+
+ hbg_restore_user_def_settings(priv);
+ return 0;
+}
+
+static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type)
+{
+ int ret;
+
+ if (test_and_set_bit(HBG_NIC_STATE_RESETTING, &priv->state))
+ return -EBUSY;
+
+ if (netif_running(priv->netdev)) {
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
+ dev_warn(&priv->pdev->dev,
+ "failed to reset because port is up\n");
+ return -EBUSY;
+ }
+
+ netif_device_detach(priv->netdev);
+
+ priv->reset_type = type;
+ clear_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
+ ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_RESET);
+ if (ret) {
+ priv->stats.reset_fail_cnt++;
+ set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
+ }
+
+ return ret;
+}
+
+static int hbg_reset_done(struct hbg_priv *priv, enum hbg_reset_type type)
+{
+ int ret;
+
+ if (!test_bit(HBG_NIC_STATE_RESETTING, &priv->state) ||
+ type != priv->reset_type)
+ return 0;
+
+ ret = hbg_rebuild(priv);
+ if (ret) {
+ priv->stats.reset_fail_cnt++;
+ set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
+ dev_err(&priv->pdev->dev, "failed to rebuild after reset\n");
+ return ret;
+ }
+
+ netif_device_attach(priv->netdev);
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
+
+ dev_info(&priv->pdev->dev, "reset done\n");
+ return ret;
+}
+
+int hbg_reset(struct hbg_priv *priv)
+{
+ int ret;
+
+ ret = hbg_reset_prepare(priv, HBG_RESET_TYPE_FUNCTION);
+ if (ret)
+ return ret;
+
+ return hbg_reset_done(priv, HBG_RESET_TYPE_FUNCTION);
+}
+
+void hbg_err_reset(struct hbg_priv *priv)
+{
+ bool running;
+
+ rtnl_lock();
+ running = netif_running(priv->netdev);
+ if (running)
+ dev_close(priv->netdev);
+
+ if (hbg_reset(priv))
+ goto err_unlock;
+
+ if (running)
+ dev_open(priv->netdev, NULL);
+
+err_unlock:
+ rtnl_unlock();
+}
+
+static pci_ers_result_t hbg_pci_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+
+ if (state == pci_channel_io_perm_failure) {
+ netif_device_detach(netdev);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t hbg_pci_err_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+ pci_disable_device(pdev);
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev,
+ "failed to re-enable PCI device after reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
+ hbg_err_reset(priv);
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void hbg_pci_err_reset_prepare(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ hbg_reset_prepare(priv, HBG_RESET_TYPE_FLR);
+}
+
+static void hbg_pci_err_reset_done(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ hbg_reset_done(priv, HBG_RESET_TYPE_FLR);
+}
+
+static const struct pci_error_handlers hbg_pci_err_handler = {
+ .error_detected = hbg_pci_err_detected,
+ .slot_reset = hbg_pci_err_slot_reset,
+ .reset_prepare = hbg_pci_err_reset_prepare,
+ .reset_done = hbg_pci_err_reset_done,
+};
+
+void hbg_set_pci_err_handler(struct pci_driver *pdrv)
+{
+ pdrv->err_handler = &hbg_pci_err_handler;
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h
new file mode 100644
index 000000000000..fb9fbe7004e8
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef __HBG_ERR_H
+#define __HBG_ERR_H
+
+#include <linux/pci.h>
+
+void hbg_set_pci_err_handler(struct pci_driver *pdrv);
+int hbg_reset(struct hbg_priv *priv);
+int hbg_rebuild(struct hbg_priv *priv);
+void hbg_err_reset(struct hbg_priv *priv);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
index c3370114aef3..1d62ff913737 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
@@ -3,12 +3,495 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
+#include <linux/rtnetlink.h>
+#include "hbg_common.h"
+#include "hbg_err.h"
#include "hbg_ethtool.h"
+#include "hbg_hw.h"
+
+struct hbg_ethtool_stats {
+ char name[ETH_GSTRING_LEN];
+ unsigned long offset;
+ u32 reg; /* set to 0 if stats is not updated via dump reg */
+};
+
+#define HBG_STATS_I(stats) { #stats, HBG_STATS_FIELD_OFF(stats), 0}
+#define HBG_STATS_REG_I(stats, reg) { #stats, HBG_STATS_FIELD_OFF(stats), reg}
+
+static const struct hbg_ethtool_stats hbg_ethtool_stats_info[] = {
+ HBG_STATS_I(rx_desc_l2_err_cnt),
+ HBG_STATS_I(rx_desc_pkt_len_err_cnt),
+ HBG_STATS_I(rx_desc_l3_wrong_head_cnt),
+ HBG_STATS_I(rx_desc_l3_csum_err_cnt),
+ HBG_STATS_I(rx_desc_l3_len_err_cnt),
+ HBG_STATS_I(rx_desc_l3_zero_ttl_cnt),
+ HBG_STATS_I(rx_desc_l3_other_cnt),
+ HBG_STATS_I(rx_desc_l4_wrong_head_cnt),
+ HBG_STATS_I(rx_desc_l4_len_err_cnt),
+ HBG_STATS_I(rx_desc_l4_csum_err_cnt),
+ HBG_STATS_I(rx_desc_l4_zero_port_num_cnt),
+ HBG_STATS_I(rx_desc_l4_other_cnt),
+ HBG_STATS_I(rx_desc_ip_ver_err_cnt),
+ HBG_STATS_I(rx_desc_ipv4_pkt_cnt),
+ HBG_STATS_I(rx_desc_ipv6_pkt_cnt),
+ HBG_STATS_I(rx_desc_no_ip_pkt_cnt),
+ HBG_STATS_I(rx_desc_ip_pkt_cnt),
+ HBG_STATS_I(rx_desc_tcp_pkt_cnt),
+ HBG_STATS_I(rx_desc_udp_pkt_cnt),
+ HBG_STATS_I(rx_desc_vlan_pkt_cnt),
+ HBG_STATS_I(rx_desc_icmp_pkt_cnt),
+ HBG_STATS_I(rx_desc_arp_pkt_cnt),
+ HBG_STATS_I(rx_desc_rarp_pkt_cnt),
+ HBG_STATS_I(rx_desc_multicast_pkt_cnt),
+ HBG_STATS_I(rx_desc_broadcast_pkt_cnt),
+ HBG_STATS_I(rx_desc_ipsec_pkt_cnt),
+ HBG_STATS_I(rx_desc_ip_opt_pkt_cnt),
+ HBG_STATS_I(rx_desc_key_not_match_cnt),
+
+ HBG_STATS_REG_I(rx_octets_bad_cnt, HBG_REG_RX_OCTETS_BAD_ADDR),
+ HBG_STATS_REG_I(rx_octets_total_filt_cnt,
+ HBG_REG_RX_OCTETS_TOTAL_FILT_ADDR),
+ HBG_STATS_REG_I(rx_uc_pkt_cnt, HBG_REG_RX_UC_PKTS_ADDR),
+ HBG_STATS_REG_I(rx_vlan_pkt_cnt, HBG_REG_RX_TAGGED_ADDR),
+ HBG_STATS_REG_I(rx_filt_pkt_cnt, HBG_REG_RX_FILT_PKT_CNT_ADDR),
+ HBG_STATS_REG_I(rx_data_error_cnt, HBG_REG_RX_DATA_ERR_ADDR),
+ HBG_STATS_REG_I(rx_frame_long_err_cnt, HBG_REG_RX_LONG_ERRORS_ADDR),
+ HBG_STATS_REG_I(rx_jabber_err_cnt, HBG_REG_RX_JABBER_ERRORS_ADDR),
+ HBG_STATS_REG_I(rx_frame_very_long_err_cnt,
+ HBG_REG_RX_VERY_LONG_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_frame_runt_err_cnt, HBG_REG_RX_RUNT_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_frame_short_err_cnt, HBG_REG_RX_SHORT_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_overflow_cnt, HBG_REG_RX_OVER_FLOW_CNT_ADDR),
+ HBG_STATS_REG_I(rx_bufrq_err_cnt, HBG_REG_RX_BUFRQ_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_we_err_cnt, HBG_REG_RX_WE_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_overrun_cnt, HBG_REG_RX_OVERRUN_CNT_ADDR),
+ HBG_STATS_REG_I(rx_lengthfield_err_cnt,
+ HBG_REG_RX_LENGTHFIELD_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_fail_comma_cnt, HBG_REG_RX_FAIL_COMMA_CNT_ADDR),
+ HBG_STATS_I(rx_dma_err_cnt),
+ HBG_STATS_I(rx_fifo_less_empty_thrsld_cnt),
+
+ HBG_STATS_REG_I(tx_uc_pkt_cnt, HBG_REG_TX_UC_PKTS_ADDR),
+ HBG_STATS_REG_I(tx_vlan_pkt_cnt, HBG_REG_TX_TAGGED_ADDR),
+ HBG_STATS_REG_I(tx_octets_bad_cnt, HBG_REG_OCTETS_TRANSMITTED_BAD_ADDR),
+
+ HBG_STATS_REG_I(tx_underrun_err_cnt, HBG_REG_TX_UNDERRUN_ADDR),
+ HBG_STATS_REG_I(tx_add_cs_fail_cnt, HBG_REG_TX_CS_FAIL_CNT_ADDR),
+ HBG_STATS_REG_I(tx_bufrl_err_cnt, HBG_REG_TX_BUFRL_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(tx_crc_err_cnt, HBG_REG_TX_CRC_ERROR_ADDR),
+ HBG_STATS_REG_I(tx_drop_cnt, HBG_REG_TX_DROP_CNT_ADDR),
+ HBG_STATS_REG_I(tx_excessive_length_drop_cnt,
+ HBG_REG_TX_EXCESSIVE_LENGTH_DROP_ADDR),
+ HBG_STATS_I(tx_dma_err_cnt),
+ HBG_STATS_I(tx_timeout_cnt),
+ HBG_STATS_I(reset_fail_cnt),
+};
+
+static const struct hbg_ethtool_stats hbg_ethtool_rmon_stats_info[] = {
+ HBG_STATS_I(rx_desc_frag_cnt),
+ HBG_STATS_REG_I(rx_framesize_64, HBG_REG_RX_PKTS_64OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_65_127,
+ HBG_REG_RX_PKTS_65TO127OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_128_255,
+ HBG_REG_RX_PKTS_128TO255OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_256_511,
+ HBG_REG_RX_PKTS_256TO511OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_512_1023,
+ HBG_REG_RX_PKTS_512TO1023OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_1024_1518,
+ HBG_REG_RX_PKTS_1024TO1518OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_bt_1518,
+ HBG_REG_RX_PKTS_1519TOMAXOCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_64, HBG_REG_TX_PKTS_64OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_65_127,
+ HBG_REG_TX_PKTS_65TO127OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_128_255,
+ HBG_REG_TX_PKTS_128TO255OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_256_511,
+ HBG_REG_TX_PKTS_256TO511OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_512_1023,
+ HBG_REG_TX_PKTS_512TO1023OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_1024_1518,
+ HBG_REG_TX_PKTS_1024TO1518OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_bt_1518,
+ HBG_REG_TX_PKTS_1519TOMAXOCTETS_ADDR),
+};
+
+static const struct hbg_ethtool_stats hbg_ethtool_mac_stats_info[] = {
+ HBG_STATS_REG_I(rx_mc_pkt_cnt, HBG_REG_RX_MC_PKTS_ADDR),
+ HBG_STATS_REG_I(rx_bc_pkt_cnt, HBG_REG_RX_BC_PKTS_ADDR),
+ HBG_STATS_REG_I(rx_align_error_cnt, HBG_REG_RX_ALIGN_ERRORS_ADDR),
+ HBG_STATS_REG_I(rx_octets_total_ok_cnt,
+ HBG_REG_RX_OCTETS_TOTAL_OK_ADDR),
+ HBG_STATS_REG_I(rx_trans_pkt_cnt, HBG_REG_RX_TRANS_PKG_CNT_ADDR),
+ HBG_STATS_REG_I(rx_fcs_error_cnt, HBG_REG_RX_FCS_ERRORS_ADDR),
+ HBG_STATS_REG_I(tx_mc_pkt_cnt, HBG_REG_TX_MC_PKTS_ADDR),
+ HBG_STATS_REG_I(tx_bc_pkt_cnt, HBG_REG_TX_BC_PKTS_ADDR),
+ HBG_STATS_REG_I(tx_octets_total_ok_cnt,
+ HBG_REG_OCTETS_TRANSMITTED_OK_ADDR),
+ HBG_STATS_REG_I(tx_trans_pkt_cnt, HBG_REG_TX_TRANS_PKG_CNT_ADDR),
+};
+
+static const struct hbg_ethtool_stats hbg_ethtool_ctrl_stats_info[] = {
+ HBG_STATS_REG_I(rx_pause_macctl_frame_cnt,
+ HBG_REG_RX_PAUSE_MACCTL_FRAMCOUNTER_ADDR),
+ HBG_STATS_REG_I(tx_pause_frame_cnt, HBG_REG_TX_PAUSE_FRAMES_ADDR),
+ HBG_STATS_REG_I(rx_unknown_macctl_frame_cnt,
+ HBG_REG_RX_UNKNOWN_MACCTL_FRAMCOUNTER_ADDR),
+};
+
+enum hbg_reg_dump_type {
+ HBG_DUMP_REG_TYPE_SPEC = 0,
+ HBG_DUMP_REG_TYPE_MDIO,
+ HBG_DUMP_REG_TYPE_GMAC,
+ HBG_DUMP_REG_TYPE_PCU,
+};
+
+struct hbg_reg_info {
+ u32 type;
+ u32 offset;
+ u32 val;
+};
+
+#define HBG_DUMP_SPEC_I(offset) {HBG_DUMP_REG_TYPE_SPEC, offset, 0}
+#define HBG_DUMP_MDIO_I(offset) {HBG_DUMP_REG_TYPE_MDIO, offset, 0}
+#define HBG_DUMP_GMAC_I(offset) {HBG_DUMP_REG_TYPE_GMAC, offset, 0}
+#define HBG_DUMP_PCU_I(offset) {HBG_DUMP_REG_TYPE_PCU, offset, 0}
+
+static const struct hbg_reg_info hbg_dump_reg_infos[] = {
+ /* dev specs */
+ HBG_DUMP_SPEC_I(HBG_REG_SPEC_VALID_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_EVENT_REQ_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MAC_ID_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_PHY_ID_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MAC_ADDR_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MAC_ADDR_HIGH_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_UC_MAC_NUM_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MDIO_FREQ_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MAX_MTU_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MIN_MTU_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_TX_FIFO_NUM_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_RX_FIFO_NUM_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_VLAN_LAYERS_ADDR),
+
+ /* mdio */
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_COMMAND_ADDR),
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_ADDR_ADDR),
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_WDATA_ADDR),
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_RDATA_ADDR),
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_STA_ADDR),
+
+ /* gmac */
+ HBG_DUMP_GMAC_I(HBG_REG_DUPLEX_TYPE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_FD_FC_TYPE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_FC_TX_TIMER_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_FD_FC_ADDR_LOW_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_FD_FC_ADDR_HIGH_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_MAX_FRAME_SIZE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_PORT_MODE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_PORT_ENABLE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_PAUSE_ENABLE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_AN_NEG_STATE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_TRANSMIT_CTRL_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_REC_FILT_CTRL_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_LINE_LOOP_BACK_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_CF_CRC_STRIP_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_MODE_CHANGE_EN_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_LOOP_REG_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_RECV_CTRL_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_VLAN_CODE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_0_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_0_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_1_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_1_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_2_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_2_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_3_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_3_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_4_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_4_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_5_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_5_ADDR),
+
+ /* pcu */
+ HBG_DUMP_PCU_I(HBG_REG_TX_FIFO_THRSLD_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_FIFO_THRSLD_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CFG_FIFO_THRSLD_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_INTRPT_MSK_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_INTRPT_STAT_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_INTRPT_CLR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_TX_BUS_ERR_ADDR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_BUS_ERR_ADDR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_MAX_FRAME_LEN_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_DEBUG_ST_MCH_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_FIFO_CURR_STATUS_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_FIFO_HIST_STATUS_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_CFF_DATA_NUM_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_TX_PAUSE_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_CFF_ADDR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_BUF_SIZE_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_BUS_CTRL_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_CTRL_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_PKT_MODE_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_DBG_ST0_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_DBG_ST1_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_DBG_ST2_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_BUS_RST_EN_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_TXINT_MSK_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_TXINT_STAT_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_TXINT_CLR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_RXINT_MSK_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_RXINT_STAT_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_RXINT_CLR_ADDR),
+};
+
+static const u32 hbg_dump_type_base_array[] = {
+ [HBG_DUMP_REG_TYPE_SPEC] = 0,
+ [HBG_DUMP_REG_TYPE_MDIO] = HBG_REG_MDIO_BASE,
+ [HBG_DUMP_REG_TYPE_GMAC] = HBG_REG_SGMII_BASE,
+ [HBG_DUMP_REG_TYPE_PCU] = HBG_REG_SGMII_BASE,
+};
+
+static int hbg_ethtool_get_regs_len(struct net_device *netdev)
+{
+ return ARRAY_SIZE(hbg_dump_reg_infos) * sizeof(struct hbg_reg_info);
+}
+
+static void hbg_ethtool_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *data)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_reg_info *info;
+ u32 i, offset = 0;
+
+ regs->version = 0;
+ for (i = 0; i < ARRAY_SIZE(hbg_dump_reg_infos); i++) {
+ info = data + offset;
+
+ *info = hbg_dump_reg_infos[i];
+ info->val = hbg_reg_read(priv, info->offset);
+ info->offset -= hbg_dump_type_base_array[info->type];
+
+ offset += sizeof(*info);
+ }
+}
+
+static void hbg_ethtool_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *param)
+{
+ struct hbg_priv *priv = netdev_priv(net_dev);
+
+ param->autoneg = priv->mac.pause_autoneg;
+ hbg_hw_get_pause_enable(priv, &param->tx_pause, &param->rx_pause);
+}
+
+static int hbg_ethtool_set_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *param)
+{
+ struct hbg_priv *priv = netdev_priv(net_dev);
+
+ priv->mac.pause_autoneg = param->autoneg;
+ phy_set_asym_pause(priv->mac.phydev, param->rx_pause, param->tx_pause);
+
+ if (!param->autoneg)
+ hbg_hw_set_pause_enable(priv, param->tx_pause, param->rx_pause);
+
+ priv->user_def.pause_param = *param;
+ return 0;
+}
+
+static int hbg_ethtool_reset(struct net_device *netdev, u32 *flags)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ if (*flags != ETH_RESET_DEDICATED)
+ return -EOPNOTSUPP;
+
+ *flags = 0;
+ return hbg_reset(priv);
+}
+
+static void hbg_update_stats_by_info(struct hbg_priv *priv,
+ const struct hbg_ethtool_stats *info,
+ u32 info_len)
+{
+ const struct hbg_ethtool_stats *stats;
+ u32 i;
+
+ if (test_bit(HBG_NIC_STATE_RESETTING, &priv->state))
+ return;
+
+ for (i = 0; i < info_len; i++) {
+ stats = &info[i];
+ if (!stats->reg)
+ continue;
+
+ HBG_STATS_U(&priv->stats, stats->offset,
+ hbg_reg_read(priv, stats->reg));
+ }
+}
+
+void hbg_update_stats(struct hbg_priv *priv)
+{
+ hbg_update_stats_by_info(priv, hbg_ethtool_stats_info,
+ ARRAY_SIZE(hbg_ethtool_stats_info));
+ hbg_update_stats_by_info(priv, hbg_ethtool_rmon_stats_info,
+ ARRAY_SIZE(hbg_ethtool_rmon_stats_info));
+ hbg_update_stats_by_info(priv, hbg_ethtool_mac_stats_info,
+ ARRAY_SIZE(hbg_ethtool_mac_stats_info));
+ hbg_update_stats_by_info(priv, hbg_ethtool_ctrl_stats_info,
+ ARRAY_SIZE(hbg_ethtool_ctrl_stats_info));
+}
+
+static int hbg_ethtool_get_sset_count(struct net_device *netdev, int stringset)
+{
+ if (stringset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ return ARRAY_SIZE(hbg_ethtool_stats_info);
+}
+
+static void hbg_ethtool_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ u32 i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(hbg_ethtool_stats_info); i++)
+ ethtool_puts(&data, hbg_ethtool_stats_info[i].name);
+}
+
+static void hbg_ethtool_get_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ u32 i;
+
+ hbg_update_stats(priv);
+ for (i = 0; i < ARRAY_SIZE(hbg_ethtool_stats_info); i++)
+ *data++ = HBG_STATS_R(&priv->stats,
+ hbg_ethtool_stats_info[i].offset);
+}
+
+static void hbg_ethtool_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *epstats)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *stats = &priv->stats;
+
+ hbg_update_stats(priv);
+ epstats->rx_pause_frames = stats->rx_pause_macctl_frame_cnt;
+ epstats->tx_pause_frames = stats->tx_pause_frame_cnt;
+}
+
+static void hbg_ethtool_get_eth_mac_stats(struct net_device *netdev,
+ struct ethtool_eth_mac_stats *emstats)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *stats = &priv->stats;
+
+ hbg_update_stats(priv);
+ emstats->FramesTransmittedOK = stats->tx_trans_pkt_cnt;
+ emstats->FramesReceivedOK = stats->rx_trans_pkt_cnt;
+ emstats->FrameCheckSequenceErrors = stats->rx_fcs_error_cnt;
+ emstats->AlignmentErrors = stats->rx_align_error_cnt;
+ emstats->OctetsTransmittedOK = stats->tx_octets_total_ok_cnt;
+ emstats->OctetsReceivedOK = stats->rx_octets_total_ok_cnt;
+
+ emstats->MulticastFramesXmittedOK = stats->tx_mc_pkt_cnt;
+ emstats->BroadcastFramesXmittedOK = stats->tx_bc_pkt_cnt;
+ emstats->MulticastFramesReceivedOK = stats->rx_mc_pkt_cnt;
+ emstats->BroadcastFramesReceivedOK = stats->rx_bc_pkt_cnt;
+ emstats->InRangeLengthErrors = stats->rx_fcs_error_cnt +
+ stats->rx_jabber_err_cnt +
+ stats->rx_unknown_macctl_frame_cnt +
+ stats->rx_bufrq_err_cnt +
+ stats->rx_we_err_cnt;
+ emstats->OutOfRangeLengthField = stats->rx_frame_short_err_cnt +
+ stats->rx_frame_runt_err_cnt +
+ stats->rx_lengthfield_err_cnt +
+ stats->rx_frame_long_err_cnt +
+ stats->rx_frame_very_long_err_cnt;
+ emstats->FrameTooLongErrors = stats->rx_frame_long_err_cnt +
+ stats->rx_frame_very_long_err_cnt;
+}
+
+static void
+hbg_ethtool_get_eth_ctrl_stats(struct net_device *netdev,
+ struct ethtool_eth_ctrl_stats *ecstats)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *s = &priv->stats;
+
+ hbg_update_stats(priv);
+ ecstats->MACControlFramesTransmitted = s->tx_pause_frame_cnt;
+ ecstats->MACControlFramesReceived = s->rx_pause_macctl_frame_cnt;
+ ecstats->UnsupportedOpcodesReceived = s->rx_unknown_macctl_frame_cnt;
+}
+
+static const struct ethtool_rmon_hist_range hbg_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 4095 },
+};
+
+static void
+hbg_ethtool_get_rmon_stats(struct net_device *netdev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *stats = &priv->stats;
+
+ hbg_update_stats(priv);
+ rmon_stats->undersize_pkts = stats->rx_frame_short_err_cnt +
+ stats->rx_frame_runt_err_cnt +
+ stats->rx_lengthfield_err_cnt;
+ rmon_stats->oversize_pkts = stats->rx_frame_long_err_cnt +
+ stats->rx_frame_very_long_err_cnt;
+ rmon_stats->fragments = stats->rx_desc_frag_cnt;
+ rmon_stats->hist[0] = stats->rx_framesize_64;
+ rmon_stats->hist[1] = stats->rx_framesize_65_127;
+ rmon_stats->hist[2] = stats->rx_framesize_128_255;
+ rmon_stats->hist[3] = stats->rx_framesize_256_511;
+ rmon_stats->hist[4] = stats->rx_framesize_512_1023;
+ rmon_stats->hist[5] = stats->rx_framesize_1024_1518;
+ rmon_stats->hist[6] = stats->rx_framesize_bt_1518;
+
+ rmon_stats->hist_tx[0] = stats->tx_framesize_64;
+ rmon_stats->hist_tx[1] = stats->tx_framesize_65_127;
+ rmon_stats->hist_tx[2] = stats->tx_framesize_128_255;
+ rmon_stats->hist_tx[3] = stats->tx_framesize_256_511;
+ rmon_stats->hist_tx[4] = stats->tx_framesize_512_1023;
+ rmon_stats->hist_tx[5] = stats->tx_framesize_1024_1518;
+ rmon_stats->hist_tx[6] = stats->tx_framesize_bt_1518;
+
+ *ranges = hbg_rmon_ranges;
+}
static const struct ethtool_ops hbg_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_regs_len = hbg_ethtool_get_regs_len,
+ .get_regs = hbg_ethtool_get_regs,
+ .get_pauseparam = hbg_ethtool_get_pauseparam,
+ .set_pauseparam = hbg_ethtool_set_pauseparam,
+ .reset = hbg_ethtool_reset,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_sset_count = hbg_ethtool_get_sset_count,
+ .get_strings = hbg_ethtool_get_strings,
+ .get_ethtool_stats = hbg_ethtool_get_stats,
+ .get_pause_stats = hbg_ethtool_get_pause_stats,
+ .get_eth_mac_stats = hbg_ethtool_get_eth_mac_stats,
+ .get_eth_ctrl_stats = hbg_ethtool_get_eth_ctrl_stats,
+ .get_rmon_stats = hbg_ethtool_get_rmon_stats,
};
void hbg_ethtool_set_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h
index 628707ec2686..e173155b146a 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h
@@ -6,6 +6,11 @@
#include <linux/netdevice.h>
+#define HBG_STATS_FIELD_OFF(f) (offsetof(struct hbg_stats, f))
+#define HBG_STATS_R(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
+#define HBG_STATS_U(p, offset, val) (HBG_STATS_R(p, offset) += (val))
+
void hbg_ethtool_set_ops(struct net_device *netdev);
+void hbg_update_stats(struct hbg_priv *priv);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
index 05295c2ad439..d6e8ce8e351a 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
@@ -3,6 +3,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
#include <linux/iopoll.h>
#include <linux/minmax.h>
#include "hbg_common.h"
@@ -11,12 +12,21 @@
#define HBG_HW_EVENT_WAIT_TIMEOUT_US (2 * 1000 * 1000)
#define HBG_HW_EVENT_WAIT_INTERVAL_US (10 * 1000)
+#define HBG_MAC_LINK_WAIT_TIMEOUT_US (500 * 1000)
+#define HBG_MAC_LINK_WAIT_INTERVAL_US (5 * 1000)
/* little endian or big endian.
* ctrl means packet description, data means skb packet data
*/
#define HBG_ENDIAN_CTRL_LE_DATA_BE 0x0
#define HBG_PCU_FRAME_LEN_PLUS 4
+#define HBG_FIFO_TX_FULL_THRSLD 0x3F0
+#define HBG_FIFO_TX_EMPTY_THRSLD 0x1F0
+#define HBG_FIFO_RX_FULL_THRSLD 0x240
+#define HBG_FIFO_RX_EMPTY_THRSLD 0x190
+#define HBG_CFG_FIFO_FULL_THRSLD 0x10
+#define HBG_CFG_FIFO_EMPTY_THRSLD 0x01
+
static bool hbg_hw_spec_is_valid(struct hbg_priv *priv)
{
return hbg_reg_read(priv, HBG_REG_SPEC_VALID_ADDR) &&
@@ -67,6 +77,8 @@ static int hbg_hw_dev_specs_init(struct hbg_priv *priv)
specs->vlan_layers = hbg_reg_read(priv, HBG_REG_VLAN_LAYERS_ADDR);
specs->rx_fifo_num = hbg_reg_read(priv, HBG_REG_RX_FIFO_NUM_ADDR);
specs->tx_fifo_num = hbg_reg_read(priv, HBG_REG_TX_FIFO_NUM_ADDR);
+ specs->uc_mac_num = hbg_reg_read(priv, HBG_REG_UC_MAC_NUM_ADDR);
+
mac_addr = hbg_reg_read64(priv, HBG_REG_MAC_ADDR_ADDR);
u64_to_ether_addr(mac_addr, (u8 *)specs->mac_addr.sa_data);
@@ -135,9 +147,13 @@ void hbg_hw_irq_enable(struct hbg_priv *priv, u32 mask, bool enable)
hbg_reg_write(priv, HBG_REG_CF_INTRPT_MSK_ADDR, value);
}
-void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr)
+void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr, u32 index)
{
- hbg_reg_write64(priv, HBG_REG_STATION_ADDR_LOW_2_ADDR, mac_addr);
+ u32 addr;
+
+ /* mac addr is u64, so the addr offset is 0x8 */
+ addr = HBG_REG_STATION_ADDR_LOW_2_ADDR + (index * 0x8);
+ hbg_reg_write64(priv, addr, mac_addr);
}
static void hbg_hw_set_pcu_max_frame_len(struct hbg_priv *priv,
@@ -161,8 +177,21 @@ static void hbg_hw_set_mac_max_frame_len(struct hbg_priv *priv,
void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu)
{
- hbg_hw_set_pcu_max_frame_len(priv, mtu);
- hbg_hw_set_mac_max_frame_len(priv, mtu);
+ /* burst_len BIT(29) set to 1 can improve the TX performance.
+ * But packet drop occurs when mtu > 2000.
+ * So, BIT(29) reset to 0 when mtu > 2000.
+ */
+ u32 burst_len_bit = (mtu > 2000) ? 0 : 1;
+ u32 frame_len;
+
+ frame_len = mtu + VLAN_HLEN * priv->dev_specs.vlan_layers +
+ ETH_HLEN + ETH_FCS_LEN;
+
+ hbg_hw_set_pcu_max_frame_len(priv, frame_len);
+ hbg_hw_set_mac_max_frame_len(priv, frame_len);
+
+ hbg_reg_write_field(priv, HBG_REG_BRUST_LENGTH_ADDR,
+ HBG_REG_BRUST_LENGTH_B, burst_len_bit);
}
void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable)
@@ -201,10 +230,102 @@ void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr)
void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
{
+ u32 link_status;
+ int ret;
+
+ hbg_hw_mac_enable(priv, HBG_STATUS_DISABLE);
+
hbg_reg_write_field(priv, HBG_REG_PORT_MODE_ADDR,
HBG_REG_PORT_MODE_M, speed);
hbg_reg_write_field(priv, HBG_REG_DUPLEX_TYPE_ADDR,
HBG_REG_DUPLEX_B, duplex);
+
+ hbg_hw_event_notify(priv, HBG_HW_EVENT_CORE_RESET);
+
+ hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE);
+
+ if (priv->mac.phy_addr == HBG_NO_PHY)
+ return;
+
+ /* wait MAC link up */
+ ret = readl_poll_timeout(priv->io_base + HBG_REG_AN_NEG_STATE_ADDR,
+ link_status,
+ FIELD_GET(HBG_REG_AN_NEG_STATE_NP_LINK_OK_B,
+ link_status),
+ HBG_MAC_LINK_WAIT_INTERVAL_US,
+ HBG_MAC_LINK_WAIT_TIMEOUT_US);
+ if (ret)
+ hbg_np_link_fail_task_schedule(priv);
+}
+
+/* only support uc filter */
+void hbg_hw_set_mac_filter_enable(struct hbg_priv *priv, u32 enable)
+{
+ hbg_reg_write_field(priv, HBG_REG_REC_FILT_CTRL_ADDR,
+ HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B, enable);
+
+ /* only uc filter is supported, so set all bits of mc mask reg to 1 */
+ hbg_reg_write64(priv, HBG_REG_STATION_ADDR_LOW_MSK_0, U64_MAX);
+ hbg_reg_write64(priv, HBG_REG_STATION_ADDR_LOW_MSK_1, U64_MAX);
+}
+
+void hbg_hw_set_pause_enable(struct hbg_priv *priv, u32 tx_en, u32 rx_en)
+{
+ hbg_reg_write_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
+ HBG_REG_PAUSE_ENABLE_TX_B, tx_en);
+ hbg_reg_write_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
+ HBG_REG_PAUSE_ENABLE_RX_B, rx_en);
+
+ hbg_reg_write_field(priv, HBG_REG_REC_FILT_CTRL_ADDR,
+ HBG_REG_REC_FILT_CTRL_PAUSE_FRM_PASS_B, rx_en);
+}
+
+void hbg_hw_get_pause_enable(struct hbg_priv *priv, u32 *tx_en, u32 *rx_en)
+{
+ *tx_en = hbg_reg_read_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
+ HBG_REG_PAUSE_ENABLE_TX_B);
+ *rx_en = hbg_reg_read_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
+ HBG_REG_PAUSE_ENABLE_RX_B);
+}
+
+void hbg_hw_set_rx_pause_mac_addr(struct hbg_priv *priv, u64 mac_addr)
+{
+ hbg_reg_write64(priv, HBG_REG_FD_FC_ADDR_LOW_ADDR, mac_addr);
+}
+
+static void hbg_hw_set_fifo_thrsld(struct hbg_priv *priv,
+ u32 full, u32 empty, enum hbg_dir dir)
+{
+ u32 value = 0;
+
+ value |= FIELD_PREP(HBG_REG_FIFO_THRSLD_FULL_M, full);
+ value |= FIELD_PREP(HBG_REG_FIFO_THRSLD_EMPTY_M, empty);
+
+ if (dir & HBG_DIR_TX)
+ hbg_reg_write(priv, HBG_REG_TX_FIFO_THRSLD_ADDR, value);
+
+ if (dir & HBG_DIR_RX)
+ hbg_reg_write(priv, HBG_REG_RX_FIFO_THRSLD_ADDR, value);
+}
+
+static void hbg_hw_set_cfg_fifo_thrsld(struct hbg_priv *priv,
+ u32 full, u32 empty, enum hbg_dir dir)
+{
+ u32 value;
+
+ value = hbg_reg_read(priv, HBG_REG_CFG_FIFO_THRSLD_ADDR);
+
+ if (dir & HBG_DIR_TX) {
+ value |= FIELD_PREP(HBG_REG_CFG_FIFO_THRSLD_TX_FULL_M, full);
+ value |= FIELD_PREP(HBG_REG_CFG_FIFO_THRSLD_TX_EMPTY_M, empty);
+ }
+
+ if (dir & HBG_DIR_RX) {
+ value |= FIELD_PREP(HBG_REG_CFG_FIFO_THRSLD_RX_FULL_M, full);
+ value |= FIELD_PREP(HBG_REG_CFG_FIFO_THRSLD_RX_EMPTY_M, empty);
+ }
+
+ hbg_reg_write(priv, HBG_REG_CFG_FIFO_THRSLD_ADDR, value);
}
static void hbg_hw_init_transmit_ctrl(struct hbg_priv *priv)
@@ -267,5 +388,12 @@ int hbg_hw_init(struct hbg_priv *priv)
hbg_hw_init_rx_control(priv);
hbg_hw_init_transmit_ctrl(priv);
+
+ hbg_hw_set_fifo_thrsld(priv, HBG_FIFO_TX_FULL_THRSLD,
+ HBG_FIFO_TX_EMPTY_THRSLD, HBG_DIR_TX);
+ hbg_hw_set_fifo_thrsld(priv, HBG_FIFO_RX_FULL_THRSLD,
+ HBG_FIFO_RX_EMPTY_THRSLD, HBG_DIR_RX);
+ hbg_hw_set_cfg_fifo_thrsld(priv, HBG_CFG_FIFO_FULL_THRSLD,
+ HBG_CFG_FIFO_EMPTY_THRSLD, HBG_DIR_TX_RX);
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h
index 14fb39241c93..a4a049b5121d 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h
@@ -51,9 +51,13 @@ bool hbg_hw_irq_is_enabled(struct hbg_priv *priv, u32 mask);
void hbg_hw_irq_enable(struct hbg_priv *priv, u32 mask, bool enable);
void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu);
void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable);
-void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr);
+void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr, u32 index);
u32 hbg_hw_get_fifo_used_num(struct hbg_priv *priv, enum hbg_dir dir);
void hbg_hw_set_tx_desc(struct hbg_priv *priv, struct hbg_tx_desc *tx_desc);
void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr);
+void hbg_hw_set_mac_filter_enable(struct hbg_priv *priv, u32 enable);
+void hbg_hw_set_pause_enable(struct hbg_priv *priv, u32 tx_en, u32 rx_en);
+void hbg_hw_get_pause_enable(struct hbg_priv *priv, u32 *tx_en, u32 *rx_en);
+void hbg_hw_set_rx_pause_mac_addr(struct hbg_priv *priv, u64 mac_addr);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
index 25dd25f096fe..ae4cb35186d8 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
@@ -6,54 +6,66 @@
#include "hbg_hw.h"
static void hbg_irq_handle_err(struct hbg_priv *priv,
- struct hbg_irq_info *irq_info)
+ const struct hbg_irq_info *irq_info)
{
if (irq_info->need_print)
dev_err(&priv->pdev->dev,
"receive error interrupt: %s\n", irq_info->name);
+
+ if (irq_info->need_reset)
+ hbg_err_reset_task_schedule(priv);
}
static void hbg_irq_handle_tx(struct hbg_priv *priv,
- struct hbg_irq_info *irq_info)
+ const struct hbg_irq_info *irq_info)
{
napi_schedule(&priv->tx_ring.napi);
}
static void hbg_irq_handle_rx(struct hbg_priv *priv,
- struct hbg_irq_info *irq_info)
+ const struct hbg_irq_info *irq_info)
{
napi_schedule(&priv->rx_ring.napi);
}
-#define HBG_TXRX_IRQ_I(name, handle) \
- {#name, HBG_INT_MSK_##name##_B, false, false, 0, handle}
-#define HBG_ERR_IRQ_I(name, need_print) \
- {#name, HBG_INT_MSK_##name##_B, true, need_print, 0, hbg_irq_handle_err}
-
-static struct hbg_irq_info hbg_irqs[] = {
- HBG_TXRX_IRQ_I(RX, hbg_irq_handle_rx),
- HBG_TXRX_IRQ_I(TX, hbg_irq_handle_tx),
- HBG_ERR_IRQ_I(MAC_MII_FIFO_ERR, true),
- HBG_ERR_IRQ_I(MAC_PCS_RX_FIFO_ERR, true),
- HBG_ERR_IRQ_I(MAC_PCS_TX_FIFO_ERR, true),
- HBG_ERR_IRQ_I(MAC_APP_RX_FIFO_ERR, true),
- HBG_ERR_IRQ_I(MAC_APP_TX_FIFO_ERR, true),
- HBG_ERR_IRQ_I(SRAM_PARITY_ERR, true),
- HBG_ERR_IRQ_I(TX_AHB_ERR, true),
- HBG_ERR_IRQ_I(RX_BUF_AVL, false),
- HBG_ERR_IRQ_I(REL_BUF_ERR, true),
- HBG_ERR_IRQ_I(TXCFG_AVL, false),
- HBG_ERR_IRQ_I(TX_DROP, false),
- HBG_ERR_IRQ_I(RX_DROP, false),
- HBG_ERR_IRQ_I(RX_AHB_ERR, true),
- HBG_ERR_IRQ_I(MAC_FIFO_ERR, false),
- HBG_ERR_IRQ_I(RBREQ_ERR, false),
- HBG_ERR_IRQ_I(WE_ERR, false),
+static void hbg_irq_handle_rx_buf_val(struct hbg_priv *priv,
+ const struct hbg_irq_info *irq_info)
+{
+ priv->stats.rx_fifo_less_empty_thrsld_cnt++;
+ hbg_hw_irq_enable(priv, irq_info->mask, true);
+}
+
+#define HBG_IRQ_I(name, handle) \
+ {#name, HBG_INT_MSK_##name##_B, false, false, false, handle}
+#define HBG_ERR_IRQ_I(name, need_print, ndde_reset) \
+ {#name, HBG_INT_MSK_##name##_B, true, need_print, \
+ ndde_reset, hbg_irq_handle_err}
+
+static const struct hbg_irq_info hbg_irqs[] = {
+ HBG_IRQ_I(RX, hbg_irq_handle_rx),
+ HBG_IRQ_I(TX, hbg_irq_handle_tx),
+ HBG_ERR_IRQ_I(TX_PKT_CPL, true, true),
+ HBG_ERR_IRQ_I(MAC_MII_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(MAC_PCS_RX_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(MAC_PCS_TX_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(MAC_APP_RX_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(MAC_APP_TX_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(SRAM_PARITY_ERR, true, false),
+ HBG_ERR_IRQ_I(TX_AHB_ERR, true, true),
+ HBG_IRQ_I(RX_BUF_AVL, hbg_irq_handle_rx_buf_val),
+ HBG_ERR_IRQ_I(REL_BUF_ERR, true, false),
+ HBG_ERR_IRQ_I(TXCFG_AVL, false, false),
+ HBG_ERR_IRQ_I(TX_DROP, false, false),
+ HBG_ERR_IRQ_I(RX_DROP, false, false),
+ HBG_ERR_IRQ_I(RX_AHB_ERR, true, false),
+ HBG_ERR_IRQ_I(MAC_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(RBREQ_ERR, true, true),
+ HBG_ERR_IRQ_I(WE_ERR, true, true),
};
static irqreturn_t hbg_irq_handle(int irq_num, void *p)
{
- struct hbg_irq_info *info;
+ const struct hbg_irq_info *info;
struct hbg_priv *priv = p;
u32 status;
u32 i;
@@ -68,7 +80,7 @@ static irqreturn_t hbg_irq_handle(int irq_num, void *p)
hbg_hw_irq_enable(priv, info->mask, false);
hbg_hw_irq_clear(priv, info->mask);
- info->count++;
+ priv->vectors.stats_array[i]++;
if (info->irq_handle)
info->irq_handle(priv, info);
@@ -121,6 +133,12 @@ int hbg_irq_init(struct hbg_priv *priv)
irq_names_map[i]);
}
+ vectors->stats_array = devm_kcalloc(&priv->pdev->dev,
+ ARRAY_SIZE(hbg_irqs),
+ sizeof(u64), GFP_KERNEL);
+ if (!vectors->stats_array)
+ return -ENOMEM;
+
vectors->info_array = hbg_irqs;
vectors->info_array_len = ARRAY_SIZE(hbg_irqs);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
index 75505fb5cc4a..068da2fd1fea 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
@@ -5,18 +5,23 @@
#include <linux/if_vlan.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
+#include <linux/phy.h>
#include "hbg_common.h"
+#include "hbg_diagnose.h"
+#include "hbg_err.h"
#include "hbg_ethtool.h"
#include "hbg_hw.h"
#include "hbg_irq.h"
#include "hbg_mdio.h"
#include "hbg_txrx.h"
+#include "hbg_debugfs.h"
-static void hbg_change_mtu(struct hbg_priv *priv, int new_mtu);
+#define HBG_SUPPORT_FEATURES (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \
+ NETIF_F_RXCSUM)
static void hbg_all_irq_enable(struct hbg_priv *priv, bool enabled)
{
- struct hbg_irq_info *info;
+ const struct hbg_irq_info *info;
u32 i;
for (i = 0; i < priv->vectors.info_array_len; i++) {
@@ -55,11 +60,7 @@ static int hbg_hw_txrx_clear(struct hbg_priv *priv)
return ret;
/* After reset, regs need to be reconfigured */
- hbg_hw_init(priv);
- hbg_hw_set_uc_addr(priv, ether_addr_to_u64(priv->netdev->dev_addr));
- hbg_change_mtu(priv, priv->netdev->mtu);
-
- return 0;
+ return hbg_rebuild(priv);
}
static int hbg_net_stop(struct net_device *netdev)
@@ -74,31 +75,127 @@ static int hbg_net_stop(struct net_device *netdev)
return hbg_hw_txrx_clear(priv);
}
+static void hbg_update_promisc_mode(struct net_device *netdev, bool overflow)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ /* Only when not table_overflow, and netdev->flags not set IFF_PROMISC,
+ * The MAC filter will be enabled.
+ * Otherwise the filter will be disabled.
+ */
+ priv->filter.enabled = !(overflow || (netdev->flags & IFF_PROMISC));
+ hbg_hw_set_mac_filter_enable(priv, priv->filter.enabled);
+}
+
+static void hbg_set_mac_to_mac_table(struct hbg_priv *priv,
+ u32 index, const u8 *addr)
+{
+ if (addr) {
+ ether_addr_copy(priv->filter.mac_table[index].addr, addr);
+ hbg_hw_set_uc_addr(priv, ether_addr_to_u64(addr), index);
+ } else {
+ eth_zero_addr(priv->filter.mac_table[index].addr);
+ hbg_hw_set_uc_addr(priv, 0, index);
+ }
+}
+
+static int hbg_get_index_from_mac_table(struct hbg_priv *priv,
+ const u8 *addr, u32 *index)
+{
+ u32 i;
+
+ for (i = 0; i < priv->filter.table_max_len; i++)
+ if (ether_addr_equal(priv->filter.mac_table[i].addr, addr)) {
+ *index = i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int hbg_add_mac_to_filter(struct hbg_priv *priv, const u8 *addr)
+{
+ u32 index;
+
+ /* already exists */
+ if (!hbg_get_index_from_mac_table(priv, addr, &index))
+ return 0;
+
+ for (index = 0; index < priv->filter.table_max_len; index++)
+ if (is_zero_ether_addr(priv->filter.mac_table[index].addr)) {
+ hbg_set_mac_to_mac_table(priv, index, addr);
+ return 0;
+ }
+
+ return -ENOSPC;
+}
+
+static void hbg_del_mac_from_filter(struct hbg_priv *priv, const u8 *addr)
+{
+ u32 index;
+
+ /* not exists */
+ if (hbg_get_index_from_mac_table(priv, addr, &index))
+ return;
+
+ hbg_set_mac_to_mac_table(priv, index, NULL);
+}
+
+static int hbg_uc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ return hbg_add_mac_to_filter(priv, addr);
+}
+
+static int hbg_uc_unsync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ if (ether_addr_equal(netdev->dev_addr, (u8 *)addr))
+ return 0;
+
+ hbg_del_mac_from_filter(priv, addr);
+ return 0;
+}
+
+static void hbg_net_set_rx_mode(struct net_device *netdev)
+{
+ int ret;
+
+ ret = __dev_uc_sync(netdev, hbg_uc_sync, hbg_uc_unsync);
+
+ /* If ret != 0, overflow has occurred */
+ hbg_update_promisc_mode(netdev, !!ret);
+}
+
static int hbg_net_set_mac_address(struct net_device *netdev, void *addr)
{
struct hbg_priv *priv = netdev_priv(netdev);
u8 *mac_addr;
+ bool exists;
+ u32 index;
mac_addr = ((struct sockaddr *)addr)->sa_data;
if (!is_valid_ether_addr(mac_addr))
return -EADDRNOTAVAIL;
- hbg_hw_set_uc_addr(priv, ether_addr_to_u64(mac_addr));
- dev_addr_set(netdev, mac_addr);
+ /* The index of host mac is always 0.
+ * If new mac address already exists,
+ * delete the existing mac address and
+ * add it to the position with index 0.
+ */
+ exists = !hbg_get_index_from_mac_table(priv, mac_addr, &index);
+ hbg_set_mac_to_mac_table(priv, 0, mac_addr);
+ if (exists)
+ hbg_set_mac_to_mac_table(priv, index, NULL);
+ hbg_hw_set_rx_pause_mac_addr(priv, ether_addr_to_u64(mac_addr));
+ dev_addr_set(netdev, mac_addr);
return 0;
}
-static void hbg_change_mtu(struct hbg_priv *priv, int new_mtu)
-{
- u32 frame_len;
-
- frame_len = new_mtu + VLAN_HLEN * priv->dev_specs.vlan_layers +
- ETH_HLEN + ETH_FCS_LEN;
- hbg_hw_set_mtu(priv, frame_len);
-}
-
static int hbg_net_change_mtu(struct net_device *netdev, int new_mtu)
{
struct hbg_priv *priv = netdev_priv(netdev);
@@ -106,12 +203,12 @@ static int hbg_net_change_mtu(struct net_device *netdev, int new_mtu)
if (netif_running(netdev))
return -EBUSY;
- hbg_change_mtu(priv, new_mtu);
- WRITE_ONCE(netdev->mtu, new_mtu);
-
dev_dbg(&priv->pdev->dev,
"change mtu from %u to %u\n", netdev->mtu, new_mtu);
+ hbg_hw_set_mtu(priv, new_mtu);
+ WRITE_ONCE(netdev->mtu, new_mtu);
+
return 0;
}
@@ -122,6 +219,10 @@ static void hbg_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
char *buf = ring->tout_log_buf;
u32 pos = 0;
+ priv->stats.tx_timeout_cnt++;
+
+ pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos,
+ "tx_timeout cnt: %llu\n", priv->stats.tx_timeout_cnt);
pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos,
"ring used num: %u, fifo used num: %u\n",
hbg_get_queue_used_num(ring),
@@ -134,6 +235,39 @@ static void hbg_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
netdev_info(netdev, "%s", buf);
}
+static void hbg_net_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *h_stats = &priv->stats;
+
+ hbg_update_stats(priv);
+ dev_get_tstats64(netdev, stats);
+
+ /* fifo empty */
+ stats->tx_fifo_errors += h_stats->tx_drop_cnt;
+
+ stats->tx_dropped += h_stats->tx_excessive_length_drop_cnt +
+ h_stats->tx_drop_cnt;
+ stats->tx_errors += h_stats->tx_add_cs_fail_cnt +
+ h_stats->tx_bufrl_err_cnt +
+ h_stats->tx_underrun_err_cnt +
+ h_stats->tx_crc_err_cnt;
+ stats->rx_errors += h_stats->rx_data_error_cnt;
+ stats->multicast += h_stats->rx_mc_pkt_cnt;
+ stats->rx_dropped += h_stats->rx_desc_drop;
+ stats->rx_length_errors += h_stats->rx_frame_very_long_err_cnt +
+ h_stats->rx_frame_long_err_cnt +
+ h_stats->rx_frame_runt_err_cnt +
+ h_stats->rx_frame_short_err_cnt +
+ h_stats->rx_lengthfield_err_cnt;
+ stats->rx_frame_errors += h_stats->rx_desc_l2_err_cnt +
+ h_stats->rx_desc_l3l4_err_cnt;
+ stats->rx_fifo_errors += h_stats->rx_overflow_cnt +
+ h_stats->rx_overrun_cnt;
+ stats->rx_crc_errors += h_stats->rx_fcs_error_cnt;
+}
+
static const struct net_device_ops hbg_netdev_ops = {
.ndo_open = hbg_net_open,
.ndo_stop = hbg_net_stop,
@@ -142,8 +276,93 @@ static const struct net_device_ops hbg_netdev_ops = {
.ndo_set_mac_address = hbg_net_set_mac_address,
.ndo_change_mtu = hbg_net_change_mtu,
.ndo_tx_timeout = hbg_net_tx_timeout,
+ .ndo_set_rx_mode = hbg_net_set_rx_mode,
+ .ndo_get_stats64 = hbg_net_get_stats,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
};
+static void hbg_service_task(struct work_struct *work)
+{
+ struct hbg_priv *priv = container_of(work, struct hbg_priv,
+ service_task.work);
+
+ if (test_and_clear_bit(HBG_NIC_STATE_NEED_RESET, &priv->state))
+ hbg_err_reset(priv);
+
+ if (test_and_clear_bit(HBG_NIC_STATE_NP_LINK_FAIL, &priv->state))
+ hbg_fix_np_link_fail(priv);
+
+ hbg_diagnose_message_push(priv);
+
+ /* The type of statistics register is u32,
+ * To prevent the statistics register from overflowing,
+ * the driver dumps the statistics every 30 seconds.
+ */
+ if (time_after(jiffies, priv->last_update_stats_time + 30 * HZ)) {
+ hbg_update_stats(priv);
+ priv->last_update_stats_time = jiffies;
+ }
+
+ schedule_delayed_work(&priv->service_task,
+ msecs_to_jiffies(MSEC_PER_SEC));
+}
+
+void hbg_err_reset_task_schedule(struct hbg_priv *priv)
+{
+ set_bit(HBG_NIC_STATE_NEED_RESET, &priv->state);
+ schedule_delayed_work(&priv->service_task, 0);
+}
+
+void hbg_np_link_fail_task_schedule(struct hbg_priv *priv)
+{
+ set_bit(HBG_NIC_STATE_NP_LINK_FAIL, &priv->state);
+ schedule_delayed_work(&priv->service_task, 0);
+}
+
+static void hbg_cancel_delayed_work_sync(void *data)
+{
+ cancel_delayed_work_sync(data);
+}
+
+static int hbg_delaywork_init(struct hbg_priv *priv)
+{
+ INIT_DELAYED_WORK(&priv->service_task, hbg_service_task);
+ schedule_delayed_work(&priv->service_task, 0);
+ return devm_add_action_or_reset(&priv->pdev->dev,
+ hbg_cancel_delayed_work_sync,
+ &priv->service_task);
+}
+
+static int hbg_mac_filter_init(struct hbg_priv *priv)
+{
+ struct hbg_dev_specs *dev_specs = &priv->dev_specs;
+ struct hbg_mac_filter *filter = &priv->filter;
+ struct hbg_mac_table_entry *tmp_table;
+
+ tmp_table = devm_kcalloc(&priv->pdev->dev, dev_specs->uc_mac_num,
+ sizeof(*tmp_table), GFP_KERNEL);
+ if (!tmp_table)
+ return -ENOMEM;
+
+ filter->mac_table = tmp_table;
+ filter->table_max_len = dev_specs->uc_mac_num;
+ filter->enabled = true;
+
+ hbg_hw_set_mac_filter_enable(priv, filter->enabled);
+ return 0;
+}
+
+static void hbg_init_user_def(struct hbg_priv *priv)
+{
+ struct ethtool_pauseparam *pause_param = &priv->user_def.pause_param;
+
+ priv->mac.pause_autoneg = HBG_STATUS_ENABLE;
+
+ pause_param->autoneg = priv->mac.pause_autoneg;
+ hbg_hw_get_pause_enable(priv, &pause_param->tx_pause,
+ &pause_param->rx_pause);
+}
+
static int hbg_init(struct hbg_priv *priv)
{
int ret;
@@ -160,7 +379,21 @@ static int hbg_init(struct hbg_priv *priv)
if (ret)
return ret;
- return hbg_mdio_init(priv);
+ ret = hbg_mdio_init(priv);
+ if (ret)
+ return ret;
+
+ ret = hbg_mac_filter_init(priv);
+ if (ret)
+ return ret;
+
+ ret = hbg_delaywork_init(priv);
+ if (ret)
+ return ret;
+
+ hbg_debugfs_init(priv);
+ hbg_init_user_def(priv);
+ return 0;
}
static int hbg_pci_init(struct pci_dev *pdev)
@@ -184,7 +417,7 @@ static int hbg_pci_init(struct pci_dev *pdev)
priv->io_base = pcim_iomap_table(pdev)[0];
if (!priv->io_base)
- return dev_err_probe(dev, -ENOMEM, "failed to get io base\n");
+ return -ENOMEM;
pci_set_master(pdev);
return 0;
@@ -216,13 +449,18 @@ static int hbg_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
+ /* set default features */
+ netdev->features |= HBG_SUPPORT_FEATURES;
+ netdev->hw_features |= HBG_SUPPORT_FEATURES;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
netdev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
netdev->max_mtu = priv->dev_specs.max_mtu;
netdev->min_mtu = priv->dev_specs.min_mtu;
netdev->netdev_ops = &hbg_netdev_ops;
netdev->watchdog_timeo = 5 * HZ;
- hbg_change_mtu(priv, ETH_DATA_LEN);
+ hbg_hw_set_mtu(priv, ETH_DATA_LEN);
hbg_net_set_mac_address(priv->netdev, &priv->dev_specs.mac_addr);
hbg_ethtool_set_ops(netdev);
@@ -234,6 +472,22 @@ static int hbg_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
}
+static void hbg_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ dev_close(netdev);
+ rtnl_unlock();
+
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ if (system_state == SYSTEM_POWER_OFF)
+ pci_set_power_state(pdev, PCI_D3hot);
+}
+
static const struct pci_device_id hbg_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, 0x3730), 0},
{ }
@@ -244,8 +498,29 @@ static struct pci_driver hbg_driver = {
.name = "hibmcge",
.id_table = hbg_pci_tbl,
.probe = hbg_probe,
+ .shutdown = hbg_shutdown,
};
-module_pci_driver(hbg_driver);
+
+static int __init hbg_module_init(void)
+{
+ int ret;
+
+ hbg_debugfs_register();
+ hbg_set_pci_err_handler(&hbg_driver);
+ ret = pci_register_driver(&hbg_driver);
+ if (ret)
+ hbg_debugfs_unregister();
+
+ return ret;
+}
+module_init(hbg_module_init);
+
+static void __exit hbg_module_exit(void)
+{
+ pci_unregister_driver(&hbg_driver);
+ hbg_debugfs_unregister();
+}
+module_exit(hbg_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
index a3479fba8501..b6f0a2780ea8 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
@@ -2,6 +2,8 @@
// Copyright (c) 2024 Hisilicon Limited.
#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/rtnetlink.h>
#include "hbg_common.h"
#include "hbg_hw.h"
#include "hbg_mdio.h"
@@ -17,6 +19,8 @@
#define HBG_MDIO_OP_TIMEOUT_US (1 * 1000 * 1000)
#define HBG_MDIO_OP_INTERVAL_US (5 * 1000)
+#define HBG_NP_LINK_FAIL_RETRY_TIMES 5
+
static void hbg_mdio_set_command(struct hbg_mac *mac, u32 cmd)
{
hbg_reg_write(HBG_MAC_GET_PRIV(mac), HBG_REG_MDIO_COMMAND_ADDR, cmd);
@@ -114,6 +118,47 @@ static void hbg_mdio_init_hw(struct hbg_priv *priv)
hbg_mdio_set_command(mac, cmd);
}
+static void hbg_flowctrl_cfg(struct hbg_priv *priv)
+{
+ struct phy_device *phydev = priv->mac.phydev;
+ bool rx_pause;
+ bool tx_pause;
+
+ if (!priv->mac.pause_autoneg)
+ return;
+
+ phy_get_pause(phydev, &tx_pause, &rx_pause);
+ hbg_hw_set_pause_enable(priv, tx_pause, rx_pause);
+}
+
+void hbg_fix_np_link_fail(struct hbg_priv *priv)
+{
+ struct device *dev = &priv->pdev->dev;
+
+ rtnl_lock();
+
+ if (priv->stats.np_link_fail_cnt >= HBG_NP_LINK_FAIL_RETRY_TIMES) {
+ dev_err(dev, "failed to fix the MAC link status\n");
+ priv->stats.np_link_fail_cnt = 0;
+ goto unlock;
+ }
+
+ if (!priv->mac.phydev->link)
+ goto unlock;
+
+ priv->stats.np_link_fail_cnt++;
+ dev_err(dev, "failed to link between MAC and PHY, try to fix...\n");
+
+ /* Replace phy_reset() with phy_stop() and phy_start(),
+ * as suggested by Andrew.
+ */
+ hbg_phy_stop(priv);
+ hbg_phy_start(priv);
+
+unlock:
+ rtnl_unlock();
+}
+
static void hbg_phy_adjust_link(struct net_device *netdev)
{
struct hbg_priv *priv = netdev_priv(netdev);
@@ -140,6 +185,7 @@ static void hbg_phy_adjust_link(struct net_device *netdev)
priv->mac.duplex = phydev->duplex;
priv->mac.autoneg = phydev->autoneg;
hbg_hw_adjust_link(priv, speed, phydev->duplex);
+ hbg_flowctrl_cfg(priv);
}
priv->mac.link_status = phydev->link;
@@ -168,6 +214,7 @@ static int hbg_phy_connect(struct hbg_priv *priv)
return ret;
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ phy_support_asym_pause(phydev);
phy_attached_info(phydev);
return 0;
@@ -183,6 +230,39 @@ void hbg_phy_stop(struct hbg_priv *priv)
phy_stop(priv->mac.phydev);
}
+static void hbg_fixed_phy_uninit(void *data)
+{
+ fixed_phy_unregister((struct phy_device *)data);
+}
+
+static int hbg_fixed_phy_init(struct hbg_priv *priv)
+{
+ struct fixed_phy_status hbg_fixed_phy_status = {
+ .link = 1,
+ .speed = SPEED_1000,
+ .duplex = DUPLEX_FULL,
+ .pause = 1,
+ .asym_pause = 1,
+ };
+ struct device *dev = &priv->pdev->dev;
+ struct phy_device *phydev;
+ int ret;
+
+ phydev = fixed_phy_register(&hbg_fixed_phy_status, NULL);
+ if (IS_ERR(phydev)) {
+ dev_err_probe(dev, PTR_ERR(phydev),
+ "failed to register fixed PHY device\n");
+ return PTR_ERR(phydev);
+ }
+
+ ret = devm_add_action_or_reset(dev, hbg_fixed_phy_uninit, phydev);
+ if (ret)
+ return ret;
+
+ priv->mac.phydev = phydev;
+ return hbg_phy_connect(priv);
+}
+
int hbg_mdio_init(struct hbg_priv *priv)
{
struct device *dev = &priv->pdev->dev;
@@ -192,10 +272,12 @@ int hbg_mdio_init(struct hbg_priv *priv)
int ret;
mac->phy_addr = priv->dev_specs.phy_addr;
+ if (mac->phy_addr == HBG_NO_PHY)
+ return hbg_fixed_phy_init(priv);
+
mdio_bus = devm_mdiobus_alloc(dev);
if (!mdio_bus)
- return dev_err_probe(dev, -ENOMEM,
- "failed to alloc MDIO bus\n");
+ return -ENOMEM;
mdio_bus->parent = dev;
mdio_bus->priv = priv;
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h
index febd02a309c7..f3771c1bbd34 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h
@@ -9,4 +9,6 @@
int hbg_mdio_init(struct hbg_priv *priv);
void hbg_phy_start(struct hbg_priv *priv);
void hbg_phy_stop(struct hbg_priv *priv);
+void hbg_fix_np_link_fail(struct hbg_priv *priv);
+
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
index 57d81c6d7633..30b3903c8f2d 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
@@ -10,12 +10,21 @@
#define HBG_REG_MAC_ID_ADDR 0x0008
#define HBG_REG_PHY_ID_ADDR 0x000C
#define HBG_REG_MAC_ADDR_ADDR 0x0010
+#define HBG_REG_MAC_ADDR_HIGH_ADDR 0x0014
+#define HBG_REG_UC_MAC_NUM_ADDR 0x0018
#define HBG_REG_MDIO_FREQ_ADDR 0x0024
#define HBG_REG_MAX_MTU_ADDR 0x0028
#define HBG_REG_MIN_MTU_ADDR 0x002C
#define HBG_REG_TX_FIFO_NUM_ADDR 0x0030
#define HBG_REG_RX_FIFO_NUM_ADDR 0x0034
#define HBG_REG_VLAN_LAYERS_ADDR 0x0038
+#define HBG_REG_PUSH_REQ_ADDR 0x00F0
+#define HBG_REG_MSG_HEADER_ADDR 0x00F4
+#define HBG_REG_MSG_HEADER_OPCODE_M GENMASK(7, 0)
+#define HBG_REG_MSG_HEADER_STATUS_M GENMASK(11, 8)
+#define HBG_REG_MSG_HEADER_DATA_NUM_M GENMASK(19, 12)
+#define HBG_REG_MSG_HEADER_RESP_CODE_M GENMASK(27, 20)
+#define HBG_REG_MSG_DATA_BASE_ADDR 0x0100
/* MDIO */
#define HBG_REG_MDIO_BASE 0x8000
@@ -28,6 +37,7 @@
#define HBG_REG_MDIO_COMMAND_OP_M GENMASK(11, 10)
#define HBG_REG_MDIO_COMMAND_PRTAD_M GENMASK(9, 5)
#define HBG_REG_MDIO_COMMAND_DEVAD_M GENMASK(4, 0)
+#define HBG_REG_MDIO_ADDR_ADDR (HBG_REG_MDIO_BASE + 0x0004)
#define HBG_REG_MDIO_WDATA_ADDR (HBG_REG_MDIO_BASE + 0x0008)
#define HBG_REG_MDIO_WDATA_M GENMASK(15, 0)
#define HBG_REG_MDIO_RDATA_ADDR (HBG_REG_MDIO_BASE + 0x000C)
@@ -36,6 +46,10 @@
/* GMAC */
#define HBG_REG_SGMII_BASE 0x10000
#define HBG_REG_DUPLEX_TYPE_ADDR (HBG_REG_SGMII_BASE + 0x0008)
+#define HBG_REG_FD_FC_TYPE_ADDR (HBG_REG_SGMII_BASE + 0x000C)
+#define HBG_REG_FC_TX_TIMER_ADDR (HBG_REG_SGMII_BASE + 0x001C)
+#define HBG_REG_FD_FC_ADDR_LOW_ADDR (HBG_REG_SGMII_BASE + 0x0020)
+#define HBG_REG_FD_FC_ADDR_HIGH_ADDR (HBG_REG_SGMII_BASE + 0x0024)
#define HBG_REG_DUPLEX_B BIT(0)
#define HBG_REG_MAX_FRAME_SIZE_ADDR (HBG_REG_SGMII_BASE + 0x003C)
#define HBG_REG_PORT_MODE_ADDR (HBG_REG_SGMII_BASE + 0x0040)
@@ -43,20 +57,97 @@
#define HBG_REG_PORT_ENABLE_ADDR (HBG_REG_SGMII_BASE + 0x0044)
#define HBG_REG_PORT_ENABLE_RX_B BIT(1)
#define HBG_REG_PORT_ENABLE_TX_B BIT(2)
+#define HBG_REG_PAUSE_ENABLE_ADDR (HBG_REG_SGMII_BASE + 0x0048)
+#define HBG_REG_PAUSE_ENABLE_RX_B BIT(0)
+#define HBG_REG_PAUSE_ENABLE_TX_B BIT(1)
+#define HBG_REG_AN_NEG_STATE_ADDR (HBG_REG_SGMII_BASE + 0x0058)
+#define HBG_REG_AN_NEG_STATE_NP_LINK_OK_B BIT(15)
#define HBG_REG_TRANSMIT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0060)
#define HBG_REG_TRANSMIT_CTRL_PAD_EN_B BIT(7)
#define HBG_REG_TRANSMIT_CTRL_CRC_ADD_B BIT(6)
#define HBG_REG_TRANSMIT_CTRL_AN_EN_B BIT(5)
+#define HBG_REG_REC_FILT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0064)
+#define HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B BIT(0)
+#define HBG_REG_REC_FILT_CTRL_PAUSE_FRM_PASS_B BIT(4)
+#define HBG_REG_RX_OCTETS_TOTAL_OK_ADDR (HBG_REG_SGMII_BASE + 0x0080)
+#define HBG_REG_RX_OCTETS_BAD_ADDR (HBG_REG_SGMII_BASE + 0x0084)
+#define HBG_REG_RX_UC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0088)
+#define HBG_REG_RX_MC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x008C)
+#define HBG_REG_RX_BC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0090)
+#define HBG_REG_RX_PKTS_64OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0094)
+#define HBG_REG_RX_PKTS_65TO127OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0098)
+#define HBG_REG_RX_PKTS_128TO255OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x009C)
+#define HBG_REG_RX_PKTS_256TO511OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00A0)
+#define HBG_REG_RX_PKTS_512TO1023OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00A4)
+#define HBG_REG_RX_PKTS_1024TO1518OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00A8)
+#define HBG_REG_RX_PKTS_1519TOMAXOCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00AC)
+#define HBG_REG_RX_FCS_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00B0)
+#define HBG_REG_RX_TAGGED_ADDR (HBG_REG_SGMII_BASE + 0x00B4)
+#define HBG_REG_RX_DATA_ERR_ADDR (HBG_REG_SGMII_BASE + 0x00B8)
+#define HBG_REG_RX_ALIGN_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00BC)
+#define HBG_REG_RX_LONG_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00C0)
+#define HBG_REG_RX_JABBER_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00C4)
+#define HBG_REG_RX_PAUSE_MACCTL_FRAMCOUNTER_ADDR (HBG_REG_SGMII_BASE + 0x00C8)
+#define HBG_REG_RX_UNKNOWN_MACCTL_FRAMCOUNTER_ADDR (HBG_REG_SGMII_BASE + 0x00CC)
+#define HBG_REG_RX_VERY_LONG_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00D0)
+#define HBG_REG_RX_RUNT_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00D4)
+#define HBG_REG_RX_SHORT_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00D8)
+#define HBG_REG_RX_FILT_PKT_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00E8)
+#define HBG_REG_RX_OCTETS_TOTAL_FILT_ADDR (HBG_REG_SGMII_BASE + 0x00EC)
+#define HBG_REG_OCTETS_TRANSMITTED_OK_ADDR (HBG_REG_SGMII_BASE + 0x0100)
+#define HBG_REG_OCTETS_TRANSMITTED_BAD_ADDR (HBG_REG_SGMII_BASE + 0x0104)
+#define HBG_REG_TX_UC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0108)
+#define HBG_REG_TX_MC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x010C)
+#define HBG_REG_TX_BC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0110)
+#define HBG_REG_TX_PKTS_64OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0114)
+#define HBG_REG_TX_PKTS_65TO127OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0118)
+#define HBG_REG_TX_PKTS_128TO255OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x011C)
+#define HBG_REG_TX_PKTS_256TO511OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0120)
+#define HBG_REG_TX_PKTS_512TO1023OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0124)
+#define HBG_REG_TX_PKTS_1024TO1518OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0128)
+#define HBG_REG_TX_PKTS_1519TOMAXOCTETS_ADDR (HBG_REG_SGMII_BASE + 0x012C)
+#define HBG_REG_TX_EXCESSIVE_LENGTH_DROP_ADDR (HBG_REG_SGMII_BASE + 0x014C)
+#define HBG_REG_TX_UNDERRUN_ADDR (HBG_REG_SGMII_BASE + 0x0150)
+#define HBG_REG_TX_TAGGED_ADDR (HBG_REG_SGMII_BASE + 0x0154)
+#define HBG_REG_TX_CRC_ERROR_ADDR (HBG_REG_SGMII_BASE + 0x0158)
+#define HBG_REG_TX_PAUSE_FRAMES_ADDR (HBG_REG_SGMII_BASE + 0x015C)
+#define HBG_REG_LINE_LOOP_BACK_ADDR (HBG_REG_SGMII_BASE + 0x01A8)
#define HBG_REG_CF_CRC_STRIP_ADDR (HBG_REG_SGMII_BASE + 0x01B0)
#define HBG_REG_CF_CRC_STRIP_B BIT(0)
#define HBG_REG_MODE_CHANGE_EN_ADDR (HBG_REG_SGMII_BASE + 0x01B4)
#define HBG_REG_MODE_CHANGE_EN_B BIT(0)
+#define HBG_REG_LOOP_REG_ADDR (HBG_REG_SGMII_BASE + 0x01DC)
#define HBG_REG_RECV_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x01E0)
#define HBG_REG_RECV_CTRL_STRIP_PAD_EN_B BIT(3)
+#define HBG_REG_VLAN_CODE_ADDR (HBG_REG_SGMII_BASE + 0x01E8)
+#define HBG_REG_RX_OVERRUN_CNT_ADDR (HBG_REG_SGMII_BASE + 0x01EC)
+#define HBG_REG_RX_LENGTHFIELD_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x01F4)
+#define HBG_REG_RX_FAIL_COMMA_CNT_ADDR (HBG_REG_SGMII_BASE + 0x01F8)
+#define HBG_REG_STATION_ADDR_LOW_0_ADDR (HBG_REG_SGMII_BASE + 0x0200)
+#define HBG_REG_STATION_ADDR_HIGH_0_ADDR (HBG_REG_SGMII_BASE + 0x0204)
+#define HBG_REG_STATION_ADDR_LOW_1_ADDR (HBG_REG_SGMII_BASE + 0x0208)
+#define HBG_REG_STATION_ADDR_HIGH_1_ADDR (HBG_REG_SGMII_BASE + 0x020C)
#define HBG_REG_STATION_ADDR_LOW_2_ADDR (HBG_REG_SGMII_BASE + 0x0210)
#define HBG_REG_STATION_ADDR_HIGH_2_ADDR (HBG_REG_SGMII_BASE + 0x0214)
+#define HBG_REG_STATION_ADDR_LOW_3_ADDR (HBG_REG_SGMII_BASE + 0x0218)
+#define HBG_REG_STATION_ADDR_HIGH_3_ADDR (HBG_REG_SGMII_BASE + 0x021C)
+#define HBG_REG_STATION_ADDR_LOW_4_ADDR (HBG_REG_SGMII_BASE + 0x0220)
+#define HBG_REG_STATION_ADDR_HIGH_4_ADDR (HBG_REG_SGMII_BASE + 0x0224)
+#define HBG_REG_STATION_ADDR_LOW_5_ADDR (HBG_REG_SGMII_BASE + 0x0228)
+#define HBG_REG_STATION_ADDR_HIGH_5_ADDR (HBG_REG_SGMII_BASE + 0x022C)
+#define HBG_REG_STATION_ADDR_LOW_MSK_0 (HBG_REG_SGMII_BASE + 0x0230)
+#define HBG_REG_STATION_ADDR_LOW_MSK_1 (HBG_REG_SGMII_BASE + 0x0238)
/* PCU */
+#define HBG_REG_TX_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0420)
+#define HBG_REG_RX_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0424)
+#define HBG_REG_FIFO_THRSLD_FULL_M GENMASK(25, 16)
+#define HBG_REG_FIFO_THRSLD_EMPTY_M GENMASK(9, 0)
+#define HBG_REG_CFG_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0428)
+#define HBG_REG_CFG_FIFO_THRSLD_TX_FULL_M GENMASK(31, 24)
+#define HBG_REG_CFG_FIFO_THRSLD_TX_EMPTY_M GENMASK(23, 16)
+#define HBG_REG_CFG_FIFO_THRSLD_RX_FULL_M GENMASK(15, 8)
+#define HBG_REG_CFG_FIFO_THRSLD_RX_EMPTY_M GENMASK(7, 0)
#define HBG_REG_CF_INTRPT_MSK_ADDR (HBG_REG_SGMII_BASE + 0x042C)
#define HBG_INT_MSK_WE_ERR_B BIT(31)
#define HBG_INT_MSK_RBREQ_ERR_B BIT(30)
@@ -74,20 +165,34 @@
#define HBG_INT_MSK_MAC_PCS_TX_FIFO_ERR_B BIT(17)
#define HBG_INT_MSK_MAC_PCS_RX_FIFO_ERR_B BIT(16)
#define HBG_INT_MSK_MAC_MII_FIFO_ERR_B BIT(15)
+#define HBG_INT_MSK_TX_PKT_CPL_B BIT(14)
#define HBG_INT_MSK_TX_B BIT(1) /* just used in driver */
#define HBG_INT_MSK_RX_B BIT(0) /* just used in driver */
#define HBG_REG_CF_INTRPT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x0434)
#define HBG_REG_CF_INTRPT_CLR_ADDR (HBG_REG_SGMII_BASE + 0x0438)
+#define HBG_REG_TX_BUS_ERR_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x043C)
+#define HBG_REG_RX_BUS_ERR_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x0440)
#define HBG_REG_MAX_FRAME_LEN_ADDR (HBG_REG_SGMII_BASE + 0x0444)
#define HBG_REG_MAX_FRAME_LEN_M GENMASK(15, 0)
+#define HBG_REG_TX_DROP_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0448)
+#define HBG_REG_RX_OVER_FLOW_CNT_ADDR (HBG_REG_SGMII_BASE + 0x044C)
+#define HBG_REG_DEBUG_ST_MCH_ADDR (HBG_REG_SGMII_BASE + 0x0450)
+#define HBG_REG_FIFO_CURR_STATUS_ADDR (HBG_REG_SGMII_BASE + 0x0454)
+#define HBG_REG_FIFO_HIST_STATUS_ADDR (HBG_REG_SGMII_BASE + 0x0458)
#define HBG_REG_CF_CFF_DATA_NUM_ADDR (HBG_REG_SGMII_BASE + 0x045C)
#define HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M GENMASK(8, 0)
#define HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M GENMASK(24, 16)
+#define HBG_REG_TX_CS_FAIL_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0460)
+#define HBG_REG_RX_TRANS_PKG_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0464)
+#define HBG_REG_TX_TRANS_PKG_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0468)
+#define HBG_REG_CF_TX_PAUSE_ADDR (HBG_REG_SGMII_BASE + 0x0470)
#define HBG_REG_TX_CFF_ADDR_0_ADDR (HBG_REG_SGMII_BASE + 0x0488)
#define HBG_REG_TX_CFF_ADDR_1_ADDR (HBG_REG_SGMII_BASE + 0x048C)
#define HBG_REG_TX_CFF_ADDR_2_ADDR (HBG_REG_SGMII_BASE + 0x0490)
#define HBG_REG_TX_CFF_ADDR_3_ADDR (HBG_REG_SGMII_BASE + 0x0494)
#define HBG_REG_RX_CFF_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x04A0)
+#define HBG_REG_BRUST_LENGTH_ADDR (HBG_REG_SGMII_BASE + 0x04C4)
+#define HBG_REG_BRUST_LENGTH_B BIT(29)
#define HBG_REG_RX_BUF_SIZE_ADDR (HBG_REG_SGMII_BASE + 0x04E4)
#define HBG_REG_RX_BUF_SIZE_M GENMASK(15, 0)
#define HBG_REG_BUS_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x04E8)
@@ -101,6 +206,13 @@
#define HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE2_M GENMASK(3, 0)
#define HBG_REG_RX_PKT_MODE_ADDR (HBG_REG_SGMII_BASE + 0x04F4)
#define HBG_REG_RX_PKT_MODE_PARSE_MODE_M GENMASK(22, 21)
+#define HBG_REG_RX_BUFRQ_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x058C)
+#define HBG_REG_TX_BUFRL_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0590)
+#define HBG_REG_RX_WE_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0594)
+#define HBG_REG_DBG_ST0_ADDR (HBG_REG_SGMII_BASE + 0x05E4)
+#define HBG_REG_DBG_ST1_ADDR (HBG_REG_SGMII_BASE + 0x05E8)
+#define HBG_REG_DBG_ST2_ADDR (HBG_REG_SGMII_BASE + 0x05EC)
+#define HBG_REG_BUS_RST_EN_ADDR (HBG_REG_SGMII_BASE + 0x0688)
#define HBG_REG_CF_IND_TXINT_MSK_ADDR (HBG_REG_SGMII_BASE + 0x0694)
#define HBG_REG_IND_INTR_MASK_B BIT(0)
#define HBG_REG_CF_IND_TXINT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x0698)
@@ -139,5 +251,52 @@ struct hbg_rx_desc {
};
#define HBG_RX_DESC_W2_PKT_LEN_M GENMASK(31, 16)
+#define HBG_RX_DESC_W2_PORT_NUM_M GENMASK(15, 12)
+#define HBG_RX_DESC_W3_IP_OFFSET_M GENMASK(23, 16)
+#define HBG_RX_DESC_W3_VLAN_M GENMASK(15, 0)
+#define HBG_RX_DESC_W4_IP_TCP_UDP_M GENMASK(31, 30)
+#define HBG_RX_DESC_W4_IPSEC_B BIT(29)
+#define HBG_RX_DESC_W4_IP_VERSION_B BIT(28)
+#define HBG_RX_DESC_W4_L4_ERR_CODE_M GENMASK(26, 23)
+#define HBG_RX_DESC_W4_FRAG_B BIT(22)
+#define HBG_RX_DESC_W4_OPT_B BIT(21)
+#define HBG_RX_DESC_W4_IP_VERSION_ERR_B BIT(20)
+#define HBG_RX_DESC_W4_BRD_CST_B BIT(19)
+#define HBG_RX_DESC_W4_MUL_CST_B BIT(18)
+#define HBG_RX_DESC_W4_ARP_B BIT(17)
+#define HBG_RX_DESC_W4_RARP_B BIT(16)
+#define HBG_RX_DESC_W4_ICMP_B BIT(15)
+#define HBG_RX_DESC_W4_VLAN_FLAG_B BIT(14)
+#define HBG_RX_DESC_W4_DROP_B BIT(13)
+#define HBG_RX_DESC_W4_L3_ERR_CODE_M GENMASK(12, 9)
+#define HBG_RX_DESC_W4_L2_ERR_B BIT(8)
+#define HBG_RX_DESC_W4_IDX_MATCH_B BIT(7)
+#define HBG_RX_DESC_W4_PARSE_MODE_M GENMASK(6, 5)
+#define HBG_RX_DESC_W5_VALID_SIZE_M GENMASK(15, 0)
+
+enum hbg_l3_err_code {
+ HBG_L3_OK = 0,
+ HBG_L3_WRONG_HEAD,
+ HBG_L3_CSUM_ERR,
+ HBG_L3_LEN_ERR,
+ HBG_L3_ZERO_TTL,
+ HBG_L3_RSVD,
+};
+
+enum hbg_l4_err_code {
+ HBG_L4_OK = 0,
+ HBG_L4_WRONG_HEAD,
+ HBG_L4_LEN_ERR,
+ HBG_L4_CSUM_ERR,
+ HBG_L4_ZERO_PORT_NUM,
+ HBG_L4_RSVD,
+};
+
+enum hbg_pkt_type_code {
+ HBG_NO_IP_PKT = 0,
+ HBG_IP_PKT,
+ HBG_TCP_PKT,
+ HBG_UDP_PKT,
+};
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_trace.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_trace.h
new file mode 100644
index 000000000000..b70fd960da8d
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_trace.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2025 Hisilicon Limited. */
+
+/* This must be outside ifdef _HBG_TRACE_H */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hibmcge
+
+#if !defined(_HBG_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _HBG_TRACE_H_
+
+#include <linux/bitfield.h>
+#include <linux/pci.h>
+#include <linux/tracepoint.h>
+#include <linux/types.h>
+#include "hbg_reg.h"
+
+TRACE_EVENT(hbg_rx_desc,
+ TP_PROTO(struct hbg_priv *priv, u32 index,
+ struct hbg_rx_desc *rx_desc),
+ TP_ARGS(priv, index, rx_desc),
+
+ TP_STRUCT__entry(__field(u32, index)
+ __field(u8, port_num)
+ __field(u8, ip_offset)
+ __field(u8, parse_mode)
+ __field(u8, l4_error_code)
+ __field(u8, l3_error_code)
+ __field(u8, l2_error_code)
+ __field(u16, packet_len)
+ __field(u16, valid_size)
+ __field(u16, vlan)
+ __string(pciname, pci_name(priv->pdev))
+ __string(devname, priv->netdev->name)
+ ),
+
+ TP_fast_assign(__entry->index = index,
+ __entry->packet_len =
+ FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M,
+ rx_desc->word2);
+ __entry->port_num =
+ FIELD_GET(HBG_RX_DESC_W2_PORT_NUM_M,
+ rx_desc->word2);
+ __entry->ip_offset =
+ FIELD_GET(HBG_RX_DESC_W3_IP_OFFSET_M,
+ rx_desc->word3);
+ __entry->vlan =
+ FIELD_GET(HBG_RX_DESC_W3_VLAN_M,
+ rx_desc->word3);
+ __entry->parse_mode =
+ FIELD_GET(HBG_RX_DESC_W4_PARSE_MODE_M,
+ rx_desc->word4);
+ __entry->l4_error_code =
+ FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M,
+ rx_desc->word4);
+ __entry->l3_error_code =
+ FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M,
+ rx_desc->word4);
+ __entry->l2_error_code =
+ FIELD_GET(HBG_RX_DESC_W4_L2_ERR_B,
+ rx_desc->word4);
+ __entry->valid_size =
+ FIELD_GET(HBG_RX_DESC_W5_VALID_SIZE_M,
+ rx_desc->word5);
+ __assign_str(pciname);
+ __assign_str(devname);
+ ),
+
+ TP_printk("%s %s index:%u, port num:%u, len:%u, valid size:%u, ip_offset:%u, vlan:0x%04x, parse mode:%u, l4_err:0x%x, l3_err:0x%x, l2_err:0x%x",
+ __get_str(pciname), __get_str(devname), __entry->index,
+ __entry->port_num, __entry->packet_len,
+ __entry->valid_size, __entry->ip_offset, __entry->vlan,
+ __entry->parse_mode, __entry->l4_error_code,
+ __entry->l3_error_code, __entry->l2_error_code
+ )
+);
+
+#endif /* _HBG_TRACE_H_ */
+
+/* This must be outside ifdef _HBG_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hbg_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
index f4f256a0dfea..a4ea92c31c2f 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
@@ -7,6 +7,9 @@
#include "hbg_reg.h"
#include "hbg_txrx.h"
+#define CREATE_TRACE_POINTS
+#include "hbg_trace.h"
+
#define netdev_get_tx_ring(netdev) \
(&(((struct hbg_priv *)netdev_priv(netdev))->tx_ring))
@@ -28,6 +31,11 @@
typeof(ring) _ring = (ring); \
_ring->p = hbg_queue_next_prt(_ring->p, _ring); })
+#define hbg_get_page_order(ring) ({ \
+ typeof(ring) _ring = (ring); \
+ get_order(hbg_spec_max_frame_len(_ring->priv, _ring->dir)); })
+#define hbg_get_page_size(ring) (PAGE_SIZE << hbg_get_page_order((ring)))
+
#define HBG_TX_STOP_THRS 2
#define HBG_TX_START_THRS (2 * HBG_TX_STOP_THRS)
@@ -38,8 +46,14 @@ static int hbg_dma_map(struct hbg_buffer *buffer)
buffer->skb_dma = dma_map_single(&priv->pdev->dev,
buffer->skb->data, buffer->skb_len,
buffer_to_dma_dir(buffer));
- if (unlikely(dma_mapping_error(&priv->pdev->dev, buffer->skb_dma)))
+ if (unlikely(dma_mapping_error(&priv->pdev->dev, buffer->skb_dma))) {
+ if (buffer->dir == HBG_DIR_RX)
+ priv->stats.rx_dma_err_cnt++;
+ else
+ priv->stats.tx_dma_err_cnt++;
+
return -ENOMEM;
+ }
return 0;
}
@@ -56,6 +70,43 @@ static void hbg_dma_unmap(struct hbg_buffer *buffer)
buffer->skb_dma = 0;
}
+static void hbg_buffer_free_page(struct hbg_buffer *buffer)
+{
+ struct hbg_ring *ring = buffer->ring;
+
+ if (unlikely(!buffer->page))
+ return;
+
+ page_pool_put_full_page(ring->page_pool, buffer->page, false);
+
+ buffer->page = NULL;
+ buffer->page_dma = 0;
+ buffer->page_addr = NULL;
+ buffer->page_size = 0;
+ buffer->page_offset = 0;
+}
+
+static int hbg_buffer_alloc_page(struct hbg_buffer *buffer)
+{
+ struct hbg_ring *ring = buffer->ring;
+ u32 len = hbg_get_page_size(ring);
+ u32 offset;
+
+ if (unlikely(!ring->page_pool))
+ return 0;
+
+ buffer->page = page_pool_dev_alloc_frag(ring->page_pool, &offset, len);
+ if (unlikely(!buffer->page))
+ return -ENOMEM;
+
+ buffer->page_dma = page_pool_get_dma_addr(buffer->page) + offset;
+ buffer->page_addr = page_address(buffer->page) + offset;
+ buffer->page_size = len;
+ buffer->page_offset = offset;
+
+ return 0;
+}
+
static void hbg_init_tx_desc(struct hbg_buffer *buffer,
struct hbg_tx_desc *tx_desc)
{
@@ -129,24 +180,14 @@ static void hbg_buffer_free_skb(struct hbg_buffer *buffer)
buffer->skb = NULL;
}
-static int hbg_buffer_alloc_skb(struct hbg_buffer *buffer)
-{
- u32 len = hbg_spec_max_frame_len(buffer->priv, buffer->dir);
- struct hbg_priv *priv = buffer->priv;
-
- buffer->skb = netdev_alloc_skb(priv->netdev, len);
- if (unlikely(!buffer->skb))
- return -ENOMEM;
-
- buffer->skb_len = len;
- memset(buffer->skb->data, 0, HBG_PACKET_HEAD_SIZE);
- return 0;
-}
-
static void hbg_buffer_free(struct hbg_buffer *buffer)
{
- hbg_dma_unmap(buffer);
- hbg_buffer_free_skb(buffer);
+ if (buffer->skb) {
+ hbg_dma_unmap(buffer);
+ return hbg_buffer_free_skb(buffer);
+ }
+
+ hbg_buffer_free_page(buffer);
}
static int hbg_napi_tx_recycle(struct napi_struct *napi, int budget)
@@ -195,31 +236,217 @@ static int hbg_napi_tx_recycle(struct napi_struct *napi, int budget)
return packet_done;
}
+static bool hbg_rx_check_l3l4_error(struct hbg_priv *priv,
+ struct hbg_rx_desc *desc,
+ struct sk_buff *skb)
+{
+ bool rx_checksum_offload = !!(priv->netdev->features & NETIF_F_RXCSUM);
+
+ skb->ip_summed = rx_checksum_offload ?
+ CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
+
+ if (likely(!FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M, desc->word4) &&
+ !FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M, desc->word4)))
+ return true;
+
+ switch (FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M, desc->word4)) {
+ case HBG_L3_OK:
+ break;
+ case HBG_L3_WRONG_HEAD:
+ priv->stats.rx_desc_l3_wrong_head_cnt++;
+ return false;
+ case HBG_L3_CSUM_ERR:
+ skb->ip_summed = CHECKSUM_NONE;
+ priv->stats.rx_desc_l3_csum_err_cnt++;
+
+ /* Don't drop packets on csum validation failure,
+ * suggest by Jakub
+ */
+ break;
+ case HBG_L3_LEN_ERR:
+ priv->stats.rx_desc_l3_len_err_cnt++;
+ return false;
+ case HBG_L3_ZERO_TTL:
+ priv->stats.rx_desc_l3_zero_ttl_cnt++;
+ return false;
+ default:
+ priv->stats.rx_desc_l3_other_cnt++;
+ return false;
+ }
+
+ switch (FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M, desc->word4)) {
+ case HBG_L4_OK:
+ break;
+ case HBG_L4_WRONG_HEAD:
+ priv->stats.rx_desc_l4_wrong_head_cnt++;
+ return false;
+ case HBG_L4_LEN_ERR:
+ priv->stats.rx_desc_l4_len_err_cnt++;
+ return false;
+ case HBG_L4_CSUM_ERR:
+ skb->ip_summed = CHECKSUM_NONE;
+ priv->stats.rx_desc_l4_csum_err_cnt++;
+
+ /* Don't drop packets on csum validation failure,
+ * suggest by Jakub
+ */
+ break;
+ case HBG_L4_ZERO_PORT_NUM:
+ priv->stats.rx_desc_l4_zero_port_num_cnt++;
+ return false;
+ default:
+ priv->stats.rx_desc_l4_other_cnt++;
+ return false;
+ }
+
+ return true;
+}
+
+static void hbg_update_rx_ip_protocol_stats(struct hbg_priv *priv,
+ struct hbg_rx_desc *desc)
+{
+ if (unlikely(!FIELD_GET(HBG_RX_DESC_W4_IP_TCP_UDP_M, desc->word4))) {
+ priv->stats.rx_desc_no_ip_pkt_cnt++;
+ return;
+ }
+
+ if (unlikely(FIELD_GET(HBG_RX_DESC_W4_IP_VERSION_ERR_B, desc->word4))) {
+ priv->stats.rx_desc_ip_ver_err_cnt++;
+ return;
+ }
+
+ /* 0:ipv4, 1:ipv6 */
+ if (FIELD_GET(HBG_RX_DESC_W4_IP_VERSION_B, desc->word4))
+ priv->stats.rx_desc_ipv6_pkt_cnt++;
+ else
+ priv->stats.rx_desc_ipv4_pkt_cnt++;
+
+ switch (FIELD_GET(HBG_RX_DESC_W4_IP_TCP_UDP_M, desc->word4)) {
+ case HBG_IP_PKT:
+ priv->stats.rx_desc_ip_pkt_cnt++;
+ if (FIELD_GET(HBG_RX_DESC_W4_OPT_B, desc->word4))
+ priv->stats.rx_desc_ip_opt_pkt_cnt++;
+ if (FIELD_GET(HBG_RX_DESC_W4_FRAG_B, desc->word4))
+ priv->stats.rx_desc_frag_cnt++;
+
+ if (FIELD_GET(HBG_RX_DESC_W4_ICMP_B, desc->word4))
+ priv->stats.rx_desc_icmp_pkt_cnt++;
+ else if (FIELD_GET(HBG_RX_DESC_W4_IPSEC_B, desc->word4))
+ priv->stats.rx_desc_ipsec_pkt_cnt++;
+ break;
+ case HBG_TCP_PKT:
+ priv->stats.rx_desc_tcp_pkt_cnt++;
+ break;
+ case HBG_UDP_PKT:
+ priv->stats.rx_desc_udp_pkt_cnt++;
+ break;
+ default:
+ priv->stats.rx_desc_no_ip_pkt_cnt++;
+ break;
+ }
+}
+
+static void hbg_update_rx_protocol_stats(struct hbg_priv *priv,
+ struct hbg_rx_desc *desc)
+{
+ if (unlikely(!FIELD_GET(HBG_RX_DESC_W4_IDX_MATCH_B, desc->word4))) {
+ priv->stats.rx_desc_key_not_match_cnt++;
+ return;
+ }
+
+ if (FIELD_GET(HBG_RX_DESC_W4_BRD_CST_B, desc->word4))
+ priv->stats.rx_desc_broadcast_pkt_cnt++;
+ else if (FIELD_GET(HBG_RX_DESC_W4_MUL_CST_B, desc->word4))
+ priv->stats.rx_desc_multicast_pkt_cnt++;
+
+ if (FIELD_GET(HBG_RX_DESC_W4_VLAN_FLAG_B, desc->word4))
+ priv->stats.rx_desc_vlan_pkt_cnt++;
+
+ if (FIELD_GET(HBG_RX_DESC_W4_ARP_B, desc->word4)) {
+ priv->stats.rx_desc_arp_pkt_cnt++;
+ return;
+ } else if (FIELD_GET(HBG_RX_DESC_W4_RARP_B, desc->word4)) {
+ priv->stats.rx_desc_rarp_pkt_cnt++;
+ return;
+ }
+
+ hbg_update_rx_ip_protocol_stats(priv, desc);
+}
+
+static bool hbg_rx_pkt_check(struct hbg_priv *priv, struct hbg_rx_desc *desc,
+ struct sk_buff *skb)
+{
+ if (unlikely(FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, desc->word2) >
+ priv->dev_specs.max_frame_len)) {
+ priv->stats.rx_desc_pkt_len_err_cnt++;
+ return false;
+ }
+
+ if (unlikely(FIELD_GET(HBG_RX_DESC_W2_PORT_NUM_M, desc->word2) !=
+ priv->dev_specs.mac_id ||
+ FIELD_GET(HBG_RX_DESC_W4_DROP_B, desc->word4))) {
+ priv->stats.rx_desc_drop++;
+ return false;
+ }
+
+ if (unlikely(FIELD_GET(HBG_RX_DESC_W4_L2_ERR_B, desc->word4))) {
+ priv->stats.rx_desc_l2_err_cnt++;
+ return false;
+ }
+
+ if (unlikely(!hbg_rx_check_l3l4_error(priv, desc, skb))) {
+ priv->stats.rx_desc_l3l4_err_cnt++;
+ return false;
+ }
+
+ hbg_update_rx_protocol_stats(priv, desc);
+ return true;
+}
+
static int hbg_rx_fill_one_buffer(struct hbg_priv *priv)
{
struct hbg_ring *ring = &priv->rx_ring;
struct hbg_buffer *buffer;
int ret;
- if (hbg_queue_is_full(ring->ntc, ring->ntu, ring))
+ if (hbg_queue_is_full(ring->ntc, ring->ntu, ring) ||
+ hbg_fifo_is_full(priv, ring->dir))
return 0;
buffer = &ring->queue[ring->ntu];
- ret = hbg_buffer_alloc_skb(buffer);
+ ret = hbg_buffer_alloc_page(buffer);
if (unlikely(ret))
return ret;
- ret = hbg_dma_map(buffer);
- if (unlikely(ret)) {
- hbg_buffer_free_skb(buffer);
- return ret;
- }
+ memset(buffer->page_addr, 0, HBG_PACKET_HEAD_SIZE);
+ dma_sync_single_for_device(&priv->pdev->dev, buffer->page_dma,
+ HBG_PACKET_HEAD_SIZE, DMA_TO_DEVICE);
- hbg_hw_fill_buffer(priv, buffer->skb_dma);
+ hbg_hw_fill_buffer(priv, buffer->page_dma);
hbg_queue_move_next(ntu, ring);
return 0;
}
+static int hbg_rx_fill_buffers(struct hbg_priv *priv)
+{
+ u32 remained = hbg_hw_get_fifo_used_num(priv, HBG_DIR_RX);
+ u32 max_count = priv->dev_specs.rx_fifo_num;
+ u32 refill_count;
+ int ret;
+
+ if (unlikely(remained >= max_count))
+ return 0;
+
+ refill_count = max_count - remained;
+ while (refill_count--) {
+ ret = hbg_rx_fill_one_buffer(priv);
+ if (unlikely(ret))
+ break;
+ }
+
+ return ret;
+}
+
static bool hbg_sync_data_from_hw(struct hbg_priv *priv,
struct hbg_buffer *buffer)
{
@@ -228,13 +455,29 @@ static bool hbg_sync_data_from_hw(struct hbg_priv *priv,
/* make sure HW write desc complete */
dma_rmb();
- dma_sync_single_for_cpu(&priv->pdev->dev, buffer->skb_dma,
- buffer->skb_len, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(&priv->pdev->dev, buffer->page_dma,
+ buffer->page_size, DMA_FROM_DEVICE);
- rx_desc = (struct hbg_rx_desc *)buffer->skb->data;
+ rx_desc = (struct hbg_rx_desc *)buffer->page_addr;
return FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2) != 0;
}
+static int hbg_build_skb(struct hbg_priv *priv,
+ struct hbg_buffer *buffer, u32 pkt_len)
+{
+ net_prefetch(buffer->page_addr);
+
+ buffer->skb = napi_build_skb(buffer->page_addr, buffer->page_size);
+ if (unlikely(!buffer->skb))
+ return -ENOMEM;
+ skb_mark_for_recycle(buffer->skb);
+
+ /* page will be freed together with the skb */
+ buffer->page = NULL;
+
+ return 0;
+}
+
static int hbg_napi_rx_poll(struct napi_struct *napi, int budget)
{
struct hbg_ring *ring = container_of(napi, struct hbg_ring, napi);
@@ -244,29 +487,39 @@ static int hbg_napi_rx_poll(struct napi_struct *napi, int budget)
u32 packet_done = 0;
u32 pkt_len;
+ hbg_rx_fill_buffers(priv);
while (packet_done < budget) {
if (unlikely(hbg_queue_is_empty(ring->ntc, ring->ntu, ring)))
break;
buffer = &ring->queue[ring->ntc];
- if (unlikely(!buffer->skb))
+ if (unlikely(!buffer->page))
goto next_buffer;
if (unlikely(!hbg_sync_data_from_hw(priv, buffer)))
break;
- rx_desc = (struct hbg_rx_desc *)buffer->skb->data;
+ rx_desc = (struct hbg_rx_desc *)buffer->page_addr;
pkt_len = FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2);
+ trace_hbg_rx_desc(priv, ring->ntc, rx_desc);
- hbg_dma_unmap(buffer);
+ if (unlikely(hbg_build_skb(priv, buffer, pkt_len))) {
+ hbg_buffer_free_page(buffer);
+ goto next_buffer;
+ }
+
+ if (unlikely(!hbg_rx_pkt_check(priv, rx_desc, buffer->skb))) {
+ hbg_buffer_free_skb(buffer);
+ goto next_buffer;
+ }
skb_reserve(buffer->skb, HBG_PACKET_HEAD_SIZE + NET_IP_ALIGN);
skb_put(buffer->skb, pkt_len);
buffer->skb->protocol = eth_type_trans(buffer->skb,
priv->netdev);
-
dev_sw_netstats_rx_add(priv->netdev, pkt_len);
napi_gro_receive(napi, buffer->skb);
buffer->skb = NULL;
+ buffer->page = NULL;
next_buffer:
hbg_rx_fill_one_buffer(priv);
@@ -281,6 +534,42 @@ next_buffer:
return packet_done;
}
+static void hbg_ring_page_pool_destory(struct hbg_ring *ring)
+{
+ if (!ring->page_pool)
+ return;
+
+ page_pool_destroy(ring->page_pool);
+ ring->page_pool = NULL;
+}
+
+static int hbg_ring_page_pool_init(struct hbg_priv *priv, struct hbg_ring *ring)
+{
+ u32 buf_size = hbg_spec_max_frame_len(priv, ring->dir);
+ struct page_pool_params pp_params = {
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .order = hbg_get_page_order(ring),
+ .pool_size = ring->len * buf_size / hbg_get_page_size(ring),
+ .nid = dev_to_node(&priv->pdev->dev),
+ .dev = &priv->pdev->dev,
+ .napi = &ring->napi,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = 0,
+ .max_len = hbg_get_page_size(ring),
+ };
+ int ret = 0;
+
+ ring->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(ring->page_pool)) {
+ ret = PTR_ERR(ring->page_pool);
+ dev_err(&priv->pdev->dev,
+ "failed to create page pool, ret = %d\n", ret);
+ ring->page_pool = NULL;
+ }
+
+ return ret;
+}
+
static void hbg_ring_uninit(struct hbg_ring *ring)
{
struct hbg_buffer *buffer;
@@ -299,6 +588,7 @@ static void hbg_ring_uninit(struct hbg_ring *ring)
buffer->priv = NULL;
}
+ hbg_ring_page_pool_destory(ring);
dma_free_coherent(&ring->priv->pdev->dev,
ring->len * sizeof(*ring->queue),
ring->queue, ring->queue_dma);
@@ -314,8 +604,19 @@ static int hbg_ring_init(struct hbg_priv *priv, struct hbg_ring *ring,
{
struct hbg_buffer *buffer;
u32 i, len;
+ int ret;
len = hbg_get_spec_fifo_max_num(priv, dir) + 1;
+ /* To improve receiving performance under high-stress scenarios,
+ * in the `hbg_napi_rx_poll()`, we first use the other half of
+ * the buffer to receive packets from the hardware via the
+ * `hbg_rx_fill_buffers()`, and then process the packets in the
+ * original half of the buffer to avoid packet loss caused by
+ * hardware overflow as much as possible.
+ */
+ if (dir == HBG_DIR_RX)
+ len += hbg_get_spec_fifo_max_num(priv, dir);
+
ring->queue = dma_alloc_coherent(&priv->pdev->dev,
len * sizeof(*ring->queue),
&ring->queue_dma, GFP_KERNEL);
@@ -337,11 +638,23 @@ static int hbg_ring_init(struct hbg_priv *priv, struct hbg_ring *ring,
ring->ntu = 0;
ring->len = len;
- if (dir == HBG_DIR_TX)
+ if (dir == HBG_DIR_TX) {
netif_napi_add_tx(priv->netdev, &ring->napi, napi_poll);
- else
+ } else {
netif_napi_add(priv->netdev, &ring->napi, napi_poll);
+ ret = hbg_ring_page_pool_init(priv, ring);
+ if (ret) {
+ netif_napi_del(&ring->napi);
+ dma_free_coherent(&ring->priv->pdev->dev,
+ ring->len * sizeof(*ring->queue),
+ ring->queue, ring->queue_dma);
+ ring->queue = NULL;
+ ring->len = 0;
+ return ret;
+ }
+ }
+
napi_enable(&ring->napi);
return 0;
}
@@ -364,21 +677,16 @@ static int hbg_tx_ring_init(struct hbg_priv *priv)
static int hbg_rx_ring_init(struct hbg_priv *priv)
{
int ret;
- u32 i;
ret = hbg_ring_init(priv, &priv->rx_ring, hbg_napi_rx_poll, HBG_DIR_RX);
if (ret)
return ret;
- for (i = 0; i < priv->rx_ring.len - 1; i++) {
- ret = hbg_rx_fill_one_buffer(priv);
- if (ret) {
- hbg_ring_uninit(&priv->rx_ring);
- return ret;
- }
- }
+ ret = hbg_rx_fill_buffers(priv);
+ if (ret)
+ hbg_ring_uninit(&priv->rx_ring);
- return 0;
+ return ret;
}
int hbg_txrx_init(struct hbg_priv *priv)
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h
index 2883a5899ae2..8b6110599e10 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h
@@ -29,7 +29,12 @@ static inline bool hbg_fifo_is_full(struct hbg_priv *priv, enum hbg_dir dir)
static inline u32 hbg_get_queue_used_num(struct hbg_ring *ring)
{
- return (ring->ntu + ring->len - ring->ntc) % ring->len;
+ u32 len = READ_ONCE(ring->len);
+
+ if (!len)
+ return 0;
+
+ return (READ_ONCE(ring->ntu) + len - READ_ONCE(ring->ntc)) % len;
}
netdev_tx_t hbg_net_start_xmit(struct sk_buff *skb, struct net_device *netdev);
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index a376d4bdf281..18376bcc718a 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -934,8 +934,6 @@ static int hip04_mac_probe(struct platform_device *pdev)
priv->chan = arg.args[1] * RX_DESC_NUM;
priv->group = arg.args[2];
- hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-
/* BQL will try to keep the TX queue as short as possible, but it can't
* be faster than tx_coalesce_usecs, so we need a fast timeout here,
* but also long enough to gather up enough frames to ensure we don't
@@ -944,7 +942,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
*/
priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4;
priv->tx_coalesce_usecs = 200;
- priv->tx_coalesce_timer.function = tx_done;
+ hrtimer_setup(&priv->tx_coalesce_timer, tx_done, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
priv->map = syscon_node_to_regmap(arg.np);
of_node_put(arg.np);
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index d72657444ef3..2ae34d01fd36 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -512,7 +512,7 @@ struct hnae_ae_ops {
struct net_device_stats *net_stats);
void (*get_stats)(struct hnae_handle *handle, u64 *data);
void (*get_strings)(struct hnae_handle *handle,
- u32 stringset, u8 *data);
+ u32 stringset, u8 **data);
int (*get_sset_count)(struct hnae_handle *handle, int stringset);
void (*update_led_status)(struct hnae_handle *handle);
int (*set_led_id)(struct hnae_handle *handle,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index bc3e406f0139..8ce910f8d0cc 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -730,15 +730,14 @@ static void hns_ae_get_stats(struct hnae_handle *handle, u64 *data)
hns_dsaf_get_stats(vf_cb->dsaf_dev, p, vf_cb->port_index);
}
-static void hns_ae_get_strings(struct hnae_handle *handle,
- u32 stringset, u8 *data)
+static void hns_ae_get_strings(struct hnae_handle *handle, u32 stringset,
+ u8 **data)
{
int port;
int idx;
struct hns_mac_cb *mac_cb;
struct hns_ppe_cb *ppe_cb;
struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
- u8 *p = data;
struct hnae_vf_cb *vf_cb;
assert(handle);
@@ -748,19 +747,14 @@ static void hns_ae_get_strings(struct hnae_handle *handle,
mac_cb = hns_get_mac_cb(handle);
ppe_cb = hns_get_ppe_cb(handle);
- for (idx = 0; idx < handle->q_num; idx++) {
- hns_rcb_get_strings(stringset, p, idx);
- p += ETH_GSTRING_LEN * hns_rcb_get_ring_sset_count(stringset);
- }
-
- hns_ppe_get_strings(ppe_cb, stringset, p);
- p += ETH_GSTRING_LEN * hns_ppe_get_sset_count(stringset);
+ for (idx = 0; idx < handle->q_num; idx++)
+ hns_rcb_get_strings(stringset, data, idx);
- hns_mac_get_strings(mac_cb, stringset, p);
- p += ETH_GSTRING_LEN * hns_mac_get_sset_count(mac_cb, stringset);
+ hns_ppe_get_strings(ppe_cb, stringset, data);
+ hns_mac_get_strings(mac_cb, stringset, data);
if (mac_cb->mac_type == HNAE_PORT_SERVICE)
- hns_dsaf_get_strings(stringset, p, port, dsaf_dev);
+ hns_dsaf_get_strings(stringset, data, port, dsaf_dev);
}
static int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index bdb7afaabdd0..400933ca1a29 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -669,16 +669,15 @@ static void hns_gmac_get_stats(void *mac_drv, u64 *data)
}
}
-static void hns_gmac_get_strings(u32 stringset, u8 *data)
+static void hns_gmac_get_strings(u32 stringset, u8 **data)
{
- u8 *buff = data;
u32 i;
if (stringset != ETH_SS_STATS)
return;
for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++)
- ethtool_puts(&buff, g_gmac_stats_string[i].desc);
+ ethtool_puts(data, g_gmac_stats_string[i].desc);
}
static int hns_gmac_get_sset_count(int stringset)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 5fa9b2eeb929..bc6b269be299 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -1190,8 +1190,7 @@ void hns_mac_get_stats(struct hns_mac_cb *mac_cb, u64 *data)
mac_ctrl_drv->get_ethtool_stats(mac_ctrl_drv, data);
}
-void hns_mac_get_strings(struct hns_mac_cb *mac_cb,
- int stringset, u8 *data)
+void hns_mac_get_strings(struct hns_mac_cb *mac_cb, int stringset, u8 **data)
{
struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index edf0bcf76ac9..630f01cf7a71 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -378,7 +378,7 @@ struct mac_driver {
void (*get_regs)(void *mac_drv, void *data);
int (*get_regs_count)(void);
/* get strings name for ethtool statistic */
- void (*get_strings)(u32 stringset, u8 *data);
+ void (*get_strings)(u32 stringset, u8 **data);
/* get the number of strings*/
int (*get_sset_count)(int stringset);
@@ -445,7 +445,7 @@ int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb,
enum hnae_loop loop, int en);
void hns_mac_update_stats(struct hns_mac_cb *mac_cb);
void hns_mac_get_stats(struct hns_mac_cb *mac_cb, u64 *data);
-void hns_mac_get_strings(struct hns_mac_cb *mac_cb, int stringset, u8 *data);
+void hns_mac_get_strings(struct hns_mac_cb *mac_cb, int stringset, u8 **data);
int hns_mac_get_sset_count(struct hns_mac_cb *mac_cb, int stringset);
void hns_mac_get_regs(struct hns_mac_cb *mac_cb, void *data);
int hns_mac_get_regs_count(struct hns_mac_cb *mac_cb);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index eb60f45a3460..6b6ced37e490 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -2590,55 +2590,34 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
p[i] = 0xdddddddd;
}
-static char *hns_dsaf_get_node_stats_strings(char *data, int node,
- struct dsaf_device *dsaf_dev)
+static void hns_dsaf_get_node_stats_strings(u8 **data, int node,
+ struct dsaf_device *dsaf_dev)
{
- char *buff = data;
- int i;
bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
+ int i;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_pad_drop_pkts", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_manage_pkts", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkts", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkt_id", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pause_frame", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_release_buf_num", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_sbm_drop_pkts", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_crc_false_pkts", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_bp_drop_pkts", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_lookup_rslt_drop_pkts", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_local_rslt_fail_pkts", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_vlan_drop_pkts", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_stp_drop_pkts", node);
- buff += ETH_GSTRING_LEN;
+ ethtool_sprintf(data, "innod%d_pad_drop_pkts", node);
+ ethtool_sprintf(data, "innod%d_manage_pkts", node);
+ ethtool_sprintf(data, "innod%d_rx_pkts", node);
+ ethtool_sprintf(data, "innod%d_rx_pkt_id", node);
+ ethtool_sprintf(data, "innod%d_rx_pause_frame", node);
+ ethtool_sprintf(data, "innod%d_release_buf_num", node);
+ ethtool_sprintf(data, "innod%d_sbm_drop_pkts", node);
+ ethtool_sprintf(data, "innod%d_crc_false_pkts", node);
+ ethtool_sprintf(data, "innod%d_bp_drop_pkts", node);
+ ethtool_sprintf(data, "innod%d_lookup_rslt_drop_pkts", node);
+ ethtool_sprintf(data, "innod%d_local_rslt_fail_pkts", node);
+ ethtool_sprintf(data, "innod%d_vlan_drop_pkts", node);
+ ethtool_sprintf(data, "innod%d_stp_drop_pkts", node);
if (node < DSAF_SERVICE_NW_NUM && !is_ver1) {
for (i = 0; i < DSAF_PRIO_NR; i++) {
- snprintf(buff + 0 * ETH_GSTRING_LEN * DSAF_PRIO_NR,
- ETH_GSTRING_LEN, "inod%d_pfc_prio%d_pkts",
- node, i);
- snprintf(buff + 1 * ETH_GSTRING_LEN * DSAF_PRIO_NR,
- ETH_GSTRING_LEN, "onod%d_pfc_prio%d_pkts",
- node, i);
- buff += ETH_GSTRING_LEN;
+ ethtool_sprintf(data, "inod%d_pfc_prio%d_pkts", node,
+ i);
+ ethtool_sprintf(data, "onod%d_pfc_prio%d_pkts", node,
+ i);
}
- buff += 1 * DSAF_PRIO_NR * ETH_GSTRING_LEN;
}
- snprintf(buff, ETH_GSTRING_LEN, "onnod%d_tx_pkts", node);
- buff += ETH_GSTRING_LEN;
-
- return buff;
+ ethtool_sprintf(data, "onnod%d_tx_pkts", node);
}
static u64 *hns_dsaf_get_node_stats(struct dsaf_device *ddev, u64 *data,
@@ -2720,21 +2699,20 @@ int hns_dsaf_get_sset_count(struct dsaf_device *dsaf_dev, int stringset)
*@port:port index
*@dsaf_dev: dsaf device
*/
-void hns_dsaf_get_strings(int stringset, u8 *data, int port,
+void hns_dsaf_get_strings(int stringset, u8 **data, int port,
struct dsaf_device *dsaf_dev)
{
- char *buff = (char *)data;
int node = port;
if (stringset != ETH_SS_STATS)
return;
/* for ge/xge node info */
- buff = hns_dsaf_get_node_stats_strings(buff, node, dsaf_dev);
+ hns_dsaf_get_node_stats_strings(data, node, dsaf_dev);
/* for ppe node info */
node = port + DSAF_PPE_INODE_BASE;
- (void)hns_dsaf_get_node_stats_strings(buff, node, dsaf_dev);
+ hns_dsaf_get_node_stats_strings(data, node, dsaf_dev);
}
/**
@@ -3041,115 +3019,6 @@ static struct platform_driver g_dsaf_driver = {
module_platform_driver(g_dsaf_driver);
-/**
- * hns_dsaf_roce_reset - reset dsaf and roce
- * @dsaf_fwnode: Pointer to framework node for the dasf
- * @dereset: false - request reset , true - drop reset
- * return 0 - success , negative -fail
- */
-int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
-{
- struct dsaf_device *dsaf_dev;
- struct platform_device *pdev;
- u32 mp;
- u32 sl;
- u32 credit;
- int i;
- static const u32 port_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
- {DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0},
- {DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0},
- {DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0},
- {DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0},
- {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1},
- {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1},
- {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1},
- {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1},
- };
- static const u32 sl_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0},
- {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2},
- {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3},
- };
-
- /* find the platform device corresponding to fwnode */
- if (is_of_node(dsaf_fwnode)) {
- pdev = of_find_device_by_node(to_of_node(dsaf_fwnode));
- } else if (is_acpi_device_node(dsaf_fwnode)) {
- pdev = hns_dsaf_find_platform_device(dsaf_fwnode);
- } else {
- pr_err("fwnode is neither OF or ACPI type\n");
- return -EINVAL;
- }
-
- /* check if we were a success in fetching pdev */
- if (!pdev) {
- pr_err("couldn't find platform device for node\n");
- return -ENODEV;
- }
-
- /* retrieve the dsaf_device from the driver data */
- dsaf_dev = dev_get_drvdata(&pdev->dev);
- if (!dsaf_dev) {
- dev_err(&pdev->dev, "dsaf_dev is NULL\n");
- put_device(&pdev->dev);
- return -ENODEV;
- }
-
- /* now, make sure we are running on compatible SoC */
- if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
- dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n",
- dsaf_dev->ae_dev.name);
- put_device(&pdev->dev);
- return -ENODEV;
- }
-
- /* do reset or de-reset according to the flag */
- if (!dereset) {
- /* reset rocee-channels in dsaf and rocee */
- dsaf_dev->misc_op->hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK,
- false);
- dsaf_dev->misc_op->hns_dsaf_roce_srst(dsaf_dev, false);
- } else {
- /* configure dsaf tx roce correspond to port map and sl map */
- mp = dsaf_read_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG);
- for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++)
- dsaf_set_field(mp, 7 << i * 3, i * 3,
- port_map[i][DSAF_ROCE_6PORT_MODE]);
- dsaf_set_field(mp, 3 << i * 3, i * 3, 0);
- dsaf_write_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG, mp);
-
- sl = dsaf_read_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG);
- for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++)
- dsaf_set_field(sl, 3 << i * 2, i * 2,
- sl_map[i][DSAF_ROCE_6PORT_MODE]);
- dsaf_write_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG, sl);
-
- /* de-reset rocee-channels in dsaf and rocee */
- dsaf_dev->misc_op->hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK,
- true);
- msleep(SRST_TIME_INTERVAL);
- dsaf_dev->misc_op->hns_dsaf_roce_srst(dsaf_dev, true);
-
- /* enable dsaf channel rocee credit */
- credit = dsaf_read_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG);
- dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 0);
- dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
-
- dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1);
- dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
- }
-
- put_device(&pdev->dev);
-
- return 0;
-}
-EXPORT_SYMBOL(hns_dsaf_roce_reset);
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
MODULE_DESCRIPTION("HNS DSAF driver");
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 5526a10caac5..653dfbb25d1b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -42,29 +42,6 @@ struct hns_mac_cb;
#define HNS_MAX_WAIT_CNT 10000
-enum dsaf_roce_port_mode {
- DSAF_ROCE_6PORT_MODE,
- DSAF_ROCE_4PORT_MODE,
- DSAF_ROCE_2PORT_MODE,
- DSAF_ROCE_CHAN_MODE_NUM,
-};
-
-enum dsaf_roce_port_num {
- DSAF_ROCE_PORT_0,
- DSAF_ROCE_PORT_1,
- DSAF_ROCE_PORT_2,
- DSAF_ROCE_PORT_3,
- DSAF_ROCE_PORT_4,
- DSAF_ROCE_PORT_5,
-};
-
-enum dsaf_roce_qos_sl {
- DSAF_ROCE_SL_0,
- DSAF_ROCE_SL_1,
- DSAF_ROCE_SL_2,
- DSAF_ROCE_SL_3,
-};
-
#define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
#define HNS_DSAF_IS_DEBUG(dev) ((dev)->dsaf_mode == DSAF_MODE_DISABLE_SP)
@@ -307,9 +284,6 @@ struct dsaf_misc_op {
void (*ge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
void (*ppe_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
void (*ppe_comm_srst)(struct dsaf_device *dsaf_dev, bool dereset);
- void (*hns_dsaf_srst_chns)(struct dsaf_device *dsaf_dev, u32 msk,
- bool dereset);
- void (*hns_dsaf_roce_srst)(struct dsaf_device *dsaf_dev, bool dereset);
phy_interface_t (*get_phy_if)(struct hns_mac_cb *mac_cb);
int (*get_sfp_prsnt)(struct hns_mac_cb *mac_cb, int *sfp_prsnt);
@@ -442,7 +416,7 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 inode_num);
int hns_dsaf_get_sset_count(struct dsaf_device *dsaf_dev, int stringset);
void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port);
-void hns_dsaf_get_strings(int stringset, u8 *data, int port,
+void hns_dsaf_get_strings(int stringset, u8 **data, int port,
struct dsaf_device *dsaf_dev);
void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data);
@@ -463,6 +437,4 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
u8 mac_id, u8 port_num);
int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
-int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
-
#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 5df19c604d09..91391a49fcea 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -326,69 +326,6 @@ static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
HNS_XGE_RESET_FUNC, port, dereset);
}
-/**
- * hns_dsaf_srst_chns - reset dsaf channels
- * @dsaf_dev: dsaf device struct pointer
- * @msk: xbar channels mask value:
- * @dereset: false - request reset , true - drop reset
- *
- * bit0-5 for xge0-5
- * bit6-11 for ppe0-5
- * bit12-17 for roce0-5
- * bit18-19 for com/dfx
- */
-static void
-hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
-{
- u32 reg_addr;
-
- if (!dereset)
- reg_addr = DSAF_SUB_SC_DSAF_RESET_REQ_REG;
- else
- reg_addr = DSAF_SUB_SC_DSAF_RESET_DREQ_REG;
-
- dsaf_write_sub(dsaf_dev, reg_addr, msk);
-}
-
-/**
- * hns_dsaf_srst_chns_acpi - reset dsaf channels
- * @dsaf_dev: dsaf device struct pointer
- * @msk: xbar channels mask value:
- * @dereset: false - request reset , true - drop reset
- *
- * bit0-5 for xge0-5
- * bit6-11 for ppe0-5
- * bit12-17 for roce0-5
- * bit18-19 for com/dfx
- */
-static void
-hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
-{
- hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
- HNS_DSAF_CHN_RESET_FUNC,
- msk, dereset);
-}
-
-static void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset)
-{
- if (!dereset) {
- dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_RESET_REQ_REG, 1);
- } else {
- dsaf_write_sub(dsaf_dev,
- DSAF_SUB_SC_ROCEE_CLK_DIS_REG, 1);
- dsaf_write_sub(dsaf_dev,
- DSAF_SUB_SC_ROCEE_RESET_DREQ_REG, 1);
- msleep(20);
- dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_CLK_EN_REG, 1);
- }
-}
-
-static void hns_dsaf_roce_srst_acpi(struct dsaf_device *dsaf_dev, bool dereset)
-{
- hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
- HNS_ROCE_RESET_FUNC, 0, dereset);
-}
-
static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
bool dereset)
{
@@ -729,8 +666,6 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
misc_op->ge_srst = hns_dsaf_ge_srst_by_port;
misc_op->ppe_srst = hns_ppe_srst_by_port;
misc_op->ppe_comm_srst = hns_ppe_com_srst;
- misc_op->hns_dsaf_srst_chns = hns_dsaf_srst_chns;
- misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst;
misc_op->get_phy_if = hns_mac_get_phy_if;
misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
@@ -746,8 +681,6 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
misc_op->ge_srst = hns_dsaf_ge_srst_by_port_acpi;
misc_op->ppe_srst = hns_ppe_srst_by_port_acpi;
misc_op->ppe_comm_srst = hns_ppe_com_srst;
- misc_op->hns_dsaf_srst_chns = hns_dsaf_srst_chns_acpi;
- misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi;
misc_op->get_phy_if = hns_mac_get_phy_if_acpi;
misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt_acpi;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index a08d1f0a5a16..5013beb4d282 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -457,24 +457,23 @@ int hns_ppe_get_regs_count(void)
* @stringset: string set type
* @data: output string
*/
-void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 *data)
+void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 **data)
{
int index = ppe_cb->index;
- u8 *buff = data;
-
- ethtool_sprintf(&buff, "ppe%d_rx_sw_pkt", index);
- ethtool_sprintf(&buff, "ppe%d_rx_pkt_ok", index);
- ethtool_sprintf(&buff, "ppe%d_rx_drop_pkt_no_bd", index);
- ethtool_sprintf(&buff, "ppe%d_rx_alloc_buf_fail", index);
- ethtool_sprintf(&buff, "ppe%d_rx_alloc_buf_wait", index);
- ethtool_sprintf(&buff, "ppe%d_rx_pkt_drop_no_buf", index);
- ethtool_sprintf(&buff, "ppe%d_rx_pkt_err_fifo_full", index);
-
- ethtool_sprintf(&buff, "ppe%d_tx_bd", index);
- ethtool_sprintf(&buff, "ppe%d_tx_pkt", index);
- ethtool_sprintf(&buff, "ppe%d_tx_pkt_ok", index);
- ethtool_sprintf(&buff, "ppe%d_tx_pkt_err_fifo_empty", index);
- ethtool_sprintf(&buff, "ppe%d_tx_pkt_err_csum_fail", index);
+
+ ethtool_sprintf(data, "ppe%d_rx_sw_pkt", index);
+ ethtool_sprintf(data, "ppe%d_rx_pkt_ok", index);
+ ethtool_sprintf(data, "ppe%d_rx_drop_pkt_no_bd", index);
+ ethtool_sprintf(data, "ppe%d_rx_alloc_buf_fail", index);
+ ethtool_sprintf(data, "ppe%d_rx_alloc_buf_wait", index);
+ ethtool_sprintf(data, "ppe%d_rx_pkt_drop_no_buf", index);
+ ethtool_sprintf(data, "ppe%d_rx_pkt_err_fifo_full", index);
+
+ ethtool_sprintf(data, "ppe%d_tx_bd", index);
+ ethtool_sprintf(data, "ppe%d_tx_pkt", index);
+ ethtool_sprintf(data, "ppe%d_tx_pkt_ok", index);
+ ethtool_sprintf(data, "ppe%d_tx_pkt_err_fifo_empty", index);
+ ethtool_sprintf(data, "ppe%d_tx_pkt_err_csum_fail", index);
}
void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
index 7e00231c1acf..602c8e971fe4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -109,7 +109,7 @@ int hns_ppe_get_sset_count(int stringset);
int hns_ppe_get_regs_count(void);
void hns_ppe_get_regs(struct hns_ppe_cb *ppe_cb, void *data);
-void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 *data);
+void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 **data);
void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data);
void hns_ppe_set_tso_enable(struct hns_ppe_cb *ppe_cb, u32 value);
void hns_ppe_set_rss_key(struct hns_ppe_cb *ppe_cb,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 93344563a259..635b3a95dd82 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -195,11 +195,6 @@ void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val)
dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, !!val);
}
-void hns_rcb_start(struct hnae_queue *q, u32 val)
-{
- hns_rcb_ring_enable_hw(q, val);
-}
-
/**
*hns_rcb_common_init_commit_hw - make rcb common init completed
*@rcb_common: rcb common device
@@ -923,44 +918,42 @@ int hns_rcb_get_ring_regs_count(void)
*@data:strings name value
*@index:queue index
*/
-void hns_rcb_get_strings(int stringset, u8 *data, int index)
+void hns_rcb_get_strings(int stringset, u8 **data, int index)
{
- u8 *buff = data;
-
if (stringset != ETH_SS_STATS)
return;
- ethtool_sprintf(&buff, "tx_ring%d_rcb_pkt_num", index);
- ethtool_sprintf(&buff, "tx_ring%d_ppe_tx_pkt_num", index);
- ethtool_sprintf(&buff, "tx_ring%d_ppe_drop_pkt_num", index);
- ethtool_sprintf(&buff, "tx_ring%d_fbd_num", index);
-
- ethtool_sprintf(&buff, "tx_ring%d_pkt_num", index);
- ethtool_sprintf(&buff, "tx_ring%d_bytes", index);
- ethtool_sprintf(&buff, "tx_ring%d_err_cnt", index);
- ethtool_sprintf(&buff, "tx_ring%d_io_err", index);
- ethtool_sprintf(&buff, "tx_ring%d_sw_err", index);
- ethtool_sprintf(&buff, "tx_ring%d_seg_pkt", index);
- ethtool_sprintf(&buff, "tx_ring%d_restart_queue", index);
- ethtool_sprintf(&buff, "tx_ring%d_tx_busy", index);
-
- ethtool_sprintf(&buff, "rx_ring%d_rcb_pkt_num", index);
- ethtool_sprintf(&buff, "rx_ring%d_ppe_pkt_num", index);
- ethtool_sprintf(&buff, "rx_ring%d_ppe_drop_pkt_num", index);
- ethtool_sprintf(&buff, "rx_ring%d_fbd_num", index);
-
- ethtool_sprintf(&buff, "rx_ring%d_pkt_num", index);
- ethtool_sprintf(&buff, "rx_ring%d_bytes", index);
- ethtool_sprintf(&buff, "rx_ring%d_err_cnt", index);
- ethtool_sprintf(&buff, "rx_ring%d_io_err", index);
- ethtool_sprintf(&buff, "rx_ring%d_sw_err", index);
- ethtool_sprintf(&buff, "rx_ring%d_seg_pkt", index);
- ethtool_sprintf(&buff, "rx_ring%d_reuse_pg", index);
- ethtool_sprintf(&buff, "rx_ring%d_len_err", index);
- ethtool_sprintf(&buff, "rx_ring%d_non_vld_desc_err", index);
- ethtool_sprintf(&buff, "rx_ring%d_bd_num_err", index);
- ethtool_sprintf(&buff, "rx_ring%d_l2_err", index);
- ethtool_sprintf(&buff, "rx_ring%d_l3l4csum_err", index);
+ ethtool_sprintf(data, "tx_ring%d_rcb_pkt_num", index);
+ ethtool_sprintf(data, "tx_ring%d_ppe_tx_pkt_num", index);
+ ethtool_sprintf(data, "tx_ring%d_ppe_drop_pkt_num", index);
+ ethtool_sprintf(data, "tx_ring%d_fbd_num", index);
+
+ ethtool_sprintf(data, "tx_ring%d_pkt_num", index);
+ ethtool_sprintf(data, "tx_ring%d_bytes", index);
+ ethtool_sprintf(data, "tx_ring%d_err_cnt", index);
+ ethtool_sprintf(data, "tx_ring%d_io_err", index);
+ ethtool_sprintf(data, "tx_ring%d_sw_err", index);
+ ethtool_sprintf(data, "tx_ring%d_seg_pkt", index);
+ ethtool_sprintf(data, "tx_ring%d_restart_queue", index);
+ ethtool_sprintf(data, "tx_ring%d_tx_busy", index);
+
+ ethtool_sprintf(data, "rx_ring%d_rcb_pkt_num", index);
+ ethtool_sprintf(data, "rx_ring%d_ppe_pkt_num", index);
+ ethtool_sprintf(data, "rx_ring%d_ppe_drop_pkt_num", index);
+ ethtool_sprintf(data, "rx_ring%d_fbd_num", index);
+
+ ethtool_sprintf(data, "rx_ring%d_pkt_num", index);
+ ethtool_sprintf(data, "rx_ring%d_bytes", index);
+ ethtool_sprintf(data, "rx_ring%d_err_cnt", index);
+ ethtool_sprintf(data, "rx_ring%d_io_err", index);
+ ethtool_sprintf(data, "rx_ring%d_sw_err", index);
+ ethtool_sprintf(data, "rx_ring%d_seg_pkt", index);
+ ethtool_sprintf(data, "rx_ring%d_reuse_pg", index);
+ ethtool_sprintf(data, "rx_ring%d_len_err", index);
+ ethtool_sprintf(data, "rx_ring%d_non_vld_desc_err", index);
+ ethtool_sprintf(data, "rx_ring%d_bd_num_err", index);
+ ethtool_sprintf(data, "rx_ring%d_l2_err", index);
+ ethtool_sprintf(data, "rx_ring%d_l3l4csum_err", index);
}
void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index c1e9b6997853..68f81547dfb4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -116,7 +116,6 @@ int hns_rcb_buf_size2type(u32 buf_size);
int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index);
void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index);
int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common);
-void hns_rcb_start(struct hnae_queue *q, u32 val);
int hns_rcb_get_cfg(struct rcb_common_cb *rcb_common);
void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode,
u16 *max_vfn, u16 *max_q_per_vf);
@@ -157,7 +156,7 @@ int hns_rcb_get_ring_regs_count(void);
void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data);
-void hns_rcb_get_strings(int stringset, u8 *data, int index);
+void hns_rcb_get_strings(int stringset, u8 **data, int index);
void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size);
void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index c58833eb4830..dbc44c2c26c2 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -743,16 +743,15 @@ static void hns_xgmac_get_stats(void *mac_drv, u64 *data)
*@stringset: type of values in data
*@data:data for value of string name
*/
-static void hns_xgmac_get_strings(u32 stringset, u8 *data)
+static void hns_xgmac_get_strings(u32 stringset, u8 **data)
{
- u8 *buff = data;
u32 i;
if (stringset != ETH_SS_STATS)
return;
for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++)
- ethtool_puts(&buff, g_xgmac_stats_string[i].desc);
+ ethtool_puts(data, g_xgmac_stats_string[i].desc);
}
/**
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 42bb341fd80b..e905f10b894e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1402,7 +1402,7 @@ static void hns_nic_net_down(struct net_device *ndev)
if (test_and_set_bit(NIC_STATE_DOWN, &priv->state))
return;
- (void)del_timer_sync(&priv->service_timer);
+ (void) timer_delete_sync(&priv->service_timer);
netif_tx_stop_all_queues(ndev);
netif_carrier_off(ndev);
netif_tx_disable(ndev);
@@ -2075,7 +2075,7 @@ static void hns_nic_task_schedule(struct hns_nic_priv *priv)
static void hns_nic_service_timer(struct timer_list *t)
{
- struct hns_nic_priv *priv = from_timer(priv, t, service_timer);
+ struct hns_nic_priv *priv = timer_container_of(priv, t, service_timer);
(void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index a5bb306b2cf1..60a586a951a0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -266,9 +266,9 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
if (err)
goto out;
- err = phy_loopback(phy_dev, true);
+ err = phy_loopback(phy_dev, true, 0);
} else {
- err = phy_loopback(phy_dev, false);
+ err = phy_loopback(phy_dev, false, 0);
if (err)
goto out;
@@ -903,7 +903,6 @@ static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
- u8 *buff = data;
if (!h->dev->ops->get_strings) {
netdev_err(netdev, "h->dev->ops->get_strings is null!\n");
@@ -912,43 +911,43 @@ static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
if (stringset == ETH_SS_TEST) {
if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
- ethtool_puts(&buff,
+ ethtool_puts(&data,
hns_nic_test_strs[MAC_INTERNALLOOP_MAC]);
- ethtool_puts(&buff, hns_nic_test_strs[MAC_INTERNALLOOP_SERDES]);
+ ethtool_puts(&data, hns_nic_test_strs[MAC_INTERNALLOOP_SERDES]);
if ((netdev->phydev) && (!netdev->phydev->is_c45))
- ethtool_puts(&buff,
+ ethtool_puts(&data,
hns_nic_test_strs[MAC_INTERNALLOOP_PHY]);
} else {
- ethtool_puts(&buff, "rx_packets");
- ethtool_puts(&buff, "tx_packets");
- ethtool_puts(&buff, "rx_bytes");
- ethtool_puts(&buff, "tx_bytes");
- ethtool_puts(&buff, "rx_errors");
- ethtool_puts(&buff, "tx_errors");
- ethtool_puts(&buff, "rx_dropped");
- ethtool_puts(&buff, "tx_dropped");
- ethtool_puts(&buff, "multicast");
- ethtool_puts(&buff, "collisions");
- ethtool_puts(&buff, "rx_over_errors");
- ethtool_puts(&buff, "rx_crc_errors");
- ethtool_puts(&buff, "rx_frame_errors");
- ethtool_puts(&buff, "rx_fifo_errors");
- ethtool_puts(&buff, "rx_missed_errors");
- ethtool_puts(&buff, "tx_aborted_errors");
- ethtool_puts(&buff, "tx_carrier_errors");
- ethtool_puts(&buff, "tx_fifo_errors");
- ethtool_puts(&buff, "tx_heartbeat_errors");
- ethtool_puts(&buff, "rx_length_errors");
- ethtool_puts(&buff, "tx_window_errors");
- ethtool_puts(&buff, "rx_compressed");
- ethtool_puts(&buff, "tx_compressed");
- ethtool_puts(&buff, "netdev_rx_dropped");
- ethtool_puts(&buff, "netdev_tx_dropped");
-
- ethtool_puts(&buff, "netdev_tx_timeout");
-
- h->dev->ops->get_strings(h, stringset, buff);
+ ethtool_puts(&data, "rx_packets");
+ ethtool_puts(&data, "tx_packets");
+ ethtool_puts(&data, "rx_bytes");
+ ethtool_puts(&data, "tx_bytes");
+ ethtool_puts(&data, "rx_errors");
+ ethtool_puts(&data, "tx_errors");
+ ethtool_puts(&data, "rx_dropped");
+ ethtool_puts(&data, "tx_dropped");
+ ethtool_puts(&data, "multicast");
+ ethtool_puts(&data, "collisions");
+ ethtool_puts(&data, "rx_over_errors");
+ ethtool_puts(&data, "rx_crc_errors");
+ ethtool_puts(&data, "rx_frame_errors");
+ ethtool_puts(&data, "rx_fifo_errors");
+ ethtool_puts(&data, "rx_missed_errors");
+ ethtool_puts(&data, "tx_aborted_errors");
+ ethtool_puts(&data, "tx_carrier_errors");
+ ethtool_puts(&data, "tx_fifo_errors");
+ ethtool_puts(&data, "tx_heartbeat_errors");
+ ethtool_puts(&data, "rx_length_errors");
+ ethtool_puts(&data, "tx_window_errors");
+ ethtool_puts(&data, "rx_compressed");
+ ethtool_puts(&data, "tx_compressed");
+ ethtool_puts(&data, "netdev_rx_dropped");
+ ethtool_puts(&data, "netdev_tx_dropped");
+
+ ethtool_puts(&data, "netdev_tx_timeout");
+
+ h->dev->ops->get_strings(h, stringset, &data);
}
}
@@ -970,7 +969,7 @@ static int hns_get_sset_count(struct net_device *netdev, int stringset)
return -EOPNOTSUPP;
}
if (stringset == ETH_SS_TEST) {
- u32 cnt = (sizeof(hns_nic_test_strs) / ETH_GSTRING_LEN);
+ u32 cnt = ARRAY_SIZE(hns_nic_test_strs);
if (priv->ae_handle->phy_if == PHY_INTERFACE_MODE_XGMII)
cnt--;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 67b0bf310daa..b25fb400f476 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -25,8 +25,11 @@ void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo)
pci_id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
if (!pci_id)
continue;
- if (IS_ENABLED(CONFIG_PCI_IOV))
+ if (IS_ENABLED(CONFIG_PCI_IOV)) {
+ device_lock(&ae_dev->pdev->dev);
pci_disable_sriov(ae_dev->pdev);
+ device_unlock(&ae_dev->pdev->dev);
+ }
}
}
EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
@@ -37,6 +40,21 @@ EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
*/
static DEFINE_MUTEX(hnae3_common_lock);
+/* ensure the drivers being unloaded one by one */
+static DEFINE_MUTEX(hnae3_unload_lock);
+
+void hnae3_acquire_unload_lock(void)
+{
+ mutex_lock(&hnae3_unload_lock);
+}
+EXPORT_SYMBOL(hnae3_acquire_unload_lock);
+
+void hnae3_release_unload_lock(void)
+{
+ mutex_unlock(&hnae3_unload_lock);
+}
+EXPORT_SYMBOL(hnae3_release_unload_lock);
+
static bool hnae3_client_match(enum hnae3_client_type client_type)
{
if (client_type == HNAE3_CLIENT_KNIC ||
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 27dbe367f3d3..d7c3df1958f3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -339,6 +339,10 @@ enum hnae3_dbg_cmd {
HNAE3_DBG_CMD_UNKNOWN,
};
+#define hnae3_seq_file_to_ae_dev(s) (dev_get_drvdata((s)->private))
+#define hnae3_seq_file_to_handle(s) \
+ (((struct hnae3_ae_dev *)hnae3_seq_file_to_ae_dev(s))->handle)
+
enum hnae3_tc_map_mode {
HNAE3_TC_MAP_MODE_PRIO,
HNAE3_TC_MAP_MODE_DSCP,
@@ -434,8 +438,11 @@ struct hnae3_ae_dev {
u32 dev_version;
DECLARE_BITMAP(caps, HNAE3_DEV_CAPS_MAX_NUM);
void *priv;
+ struct hnae3_handle *handle;
};
+typedef int (*read_func)(struct seq_file *s, void *data);
+
/* This struct defines the operation on the handle.
*
* init_ae_dev(): (mandatory)
@@ -580,8 +587,6 @@ struct hnae3_ae_dev {
* Delete clsflower rule
* cls_flower_active
* Check if any cls flower rule exist
- * dbg_read_cmd
- * Execute debugfs read command.
* set_tx_hwts_info
* Save information for 1588 tx packet
* get_rx_hwts
@@ -594,6 +599,8 @@ struct hnae3_ae_dev {
* Get wake on lan info
* set_wol
* Config wake on lan
+ * dbg_get_read_func
+ * Return the read func for debugfs seq file
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -677,7 +684,7 @@ struct hnae3_ae_ops {
void (*get_mac_stats)(struct hnae3_handle *handle,
struct hns3_mac_stats *mac_stats);
void (*get_strings)(struct hnae3_handle *handle,
- u32 stringset, u8 *data);
+ u32 stringset, u8 **data);
int (*get_sset_count)(struct hnae3_handle *handle, int stringset);
void (*get_regs)(struct hnae3_handle *handle, u32 *version,
@@ -690,9 +697,9 @@ struct hnae3_ae_ops {
int (*set_rss)(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc);
int (*set_rss_tuple)(struct hnae3_handle *handle,
- struct ethtool_rxnfc *cmd);
+ const struct ethtool_rxfh_fields *cmd);
int (*get_rss_tuple)(struct hnae3_handle *handle,
- struct ethtool_rxnfc *cmd);
+ struct ethtool_rxfh_fields *cmd);
int (*get_tc_size)(struct hnae3_handle *handle);
@@ -748,8 +755,6 @@ struct hnae3_ae_ops {
void (*enable_fd)(struct hnae3_handle *handle, bool enable);
int (*add_arfs_entry)(struct hnae3_handle *handle, u16 queue_id,
u16 flow_id, struct flow_keys *fkeys);
- int (*dbg_read_cmd)(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
- char *buf, int len);
pci_ers_result_t (*handle_hw_ras_error)(struct hnae3_ae_dev *ae_dev);
bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
bool (*ae_dev_resetting)(struct hnae3_handle *handle);
@@ -796,6 +801,14 @@ struct hnae3_ae_ops {
struct ethtool_wolinfo *wol);
int (*set_wol)(struct hnae3_handle *handle,
struct ethtool_wolinfo *wol);
+ int (*dbg_get_read_func)(struct hnae3_handle *handle,
+ enum hnae3_dbg_cmd cmd,
+ read_func *func);
+ int (*hwtstamp_get)(struct hnae3_handle *handle,
+ struct kernel_hwtstamp_config *config);
+ int (*hwtstamp_set)(struct hnae3_handle *handle,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
};
struct hnae3_dcb_ops {
@@ -916,9 +929,6 @@ struct hnae3_handle {
u8 netdev_flags;
struct dentry *hnae3_dbgfs;
- /* protects concurrent contention between debugfs commands */
- struct mutex dbgfs_lock;
- char **dbgfs_buf;
/* Network interface message level enabled bits */
u32 msg_enable;
@@ -966,4 +976,6 @@ int hnae3_register_client(struct hnae3_client *client);
void hnae3_set_client_init_flag(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev,
unsigned int inited);
+void hnae3_acquire_unload_lock(void);
+void hnae3_release_unload_lock(void);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
index 4ad4e8ab2f1f..37396ca4ecfc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
@@ -348,7 +348,7 @@ static int hclge_comm_cmd_csq_clean(struct hclge_comm_hw *hw)
static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw)
{
u32 head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
- return head == hw->cmq.csq.next_to_use;
+ return head == (u32)hw->cmq.csq.next_to_use;
}
static u32 hclge_get_cmdq_tx_timeout(u16 opcode, u32 tx_timeout)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
index 4e2bb6556b1c..1eca53aaf598 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
@@ -151,7 +151,7 @@ EXPORT_SYMBOL_GPL(hclge_comm_set_rss_hash_key);
int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw,
struct hclge_comm_rss_cfg *rss_cfg,
- struct ethtool_rxnfc *nfc)
+ const struct ethtool_rxfh_fields *nfc)
{
struct hclge_comm_rss_input_tuple_cmd *req;
struct hclge_desc desc;
@@ -422,7 +422,7 @@ int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
}
EXPORT_SYMBOL_GPL(hclge_comm_set_rss_algo_key);
-static u8 hclge_comm_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
+static u8 hclge_comm_get_rss_hash_bits(const struct ethtool_rxfh_fields *nfc)
{
u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_COMM_S_PORT_BIT : 0;
@@ -448,7 +448,7 @@ static u8 hclge_comm_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
}
int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg,
- struct ethtool_rxnfc *nfc,
+ const struct ethtool_rxfh_fields *nfc,
struct hnae3_ae_dev *ae_dev,
struct hclge_comm_rss_input_tuple_cmd *req)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
index cdafa63fe38b..cbc02b50c6e7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
@@ -108,7 +108,7 @@ void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg,
int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
const u8 *key);
int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg,
- struct ethtool_rxnfc *nfc,
+ const struct ethtool_rxfh_fields *nfc,
struct hnae3_ae_dev *ae_dev,
struct hclge_comm_rss_input_tuple_cmd *req);
u64 hclge_comm_convert_rss_tuple(u8 tuple_sets);
@@ -129,5 +129,5 @@ int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg,
int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw,
struct hclge_comm_rss_cfg *rss_cfg,
- struct ethtool_rxnfc *nfc);
+ const struct ethtool_rxfh_fields *nfc);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
index 2b31188ff555..f9a3d6fc4416 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
@@ -36,27 +36,22 @@ int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle)
}
EXPORT_SYMBOL_GPL(hclge_comm_tqps_get_sset_count);
-u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
+void hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 **data)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- u8 *buff = data;
u16 i;
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_comm_tqp *tqp =
container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd", tqp->index);
- buff += ETH_GSTRING_LEN;
+ ethtool_sprintf(data, "txq%u_pktnum_rcd", tqp->index);
}
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_comm_tqp *tqp =
container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd", tqp->index);
- buff += ETH_GSTRING_LEN;
+ ethtool_sprintf(data, "rxq%u_pktnum_rcd", tqp->index);
}
-
- return buff;
}
EXPORT_SYMBOL_GPL(hclge_comm_tqps_get_strings);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h
index a46350162ee8..b9ff424c0bc2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h
@@ -32,7 +32,7 @@ struct hclge_comm_tqp {
u64 *hclge_comm_tqps_get_stats(struct hnae3_handle *handle, u64 *data);
int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle);
-u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data);
+void hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 **data);
void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle);
int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
struct hclge_comm_hw *hw);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 841e5af7b2be..4cce4f4ba6b0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -3,6 +3,8 @@
#include <linux/debugfs.h>
#include <linux/device.h>
+#include <linux/seq_file.h>
+#include <linux/string_choices.h>
#include "hnae3.h"
#include "hns3_debugfs.h"
@@ -39,323 +41,279 @@ static struct hns3_dbg_dentry_info hns3_dbg_dentry[] = {
};
static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd);
-static int hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd);
+static int hns3_dbg_common_init_t1(struct hnae3_handle *handle, u32 cmd);
+static int hns3_dbg_common_init_t2(struct hnae3_handle *handle, u32 cmd);
static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
{
.name = "tm_nodes",
.cmd = HNAE3_DBG_CMD_TM_NODES,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tm_priority",
.cmd = HNAE3_DBG_CMD_TM_PRI,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tm_qset",
.cmd = HNAE3_DBG_CMD_TM_QSET,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tm_map",
.cmd = HNAE3_DBG_CMD_TM_MAP,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN_1MB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tm_pg",
.cmd = HNAE3_DBG_CMD_TM_PG,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tm_port",
.cmd = HNAE3_DBG_CMD_TM_PORT,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tc_sch_info",
.cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "qos_pause_cfg",
.cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "qos_pri_map",
.cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "qos_dscp_map",
.cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "qos_buf_cfg",
.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "dev_info",
.cmd = HNAE3_DBG_CMD_DEV_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
{
.name = "tx_bd_queue",
.cmd = HNAE3_DBG_CMD_TX_BD,
.dentry = HNS3_DBG_DENTRY_TX_BD,
- .buf_len = HNS3_DBG_READ_LEN_5MB,
.init = hns3_dbg_bd_file_init,
},
{
.name = "rx_bd_queue",
.cmd = HNAE3_DBG_CMD_RX_BD,
.dentry = HNS3_DBG_DENTRY_RX_BD,
- .buf_len = HNS3_DBG_READ_LEN_4MB,
.init = hns3_dbg_bd_file_init,
},
{
.name = "uc",
.cmd = HNAE3_DBG_CMD_MAC_UC,
.dentry = HNS3_DBG_DENTRY_MAC,
- .buf_len = HNS3_DBG_READ_LEN_128KB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "mc",
.cmd = HNAE3_DBG_CMD_MAC_MC,
.dentry = HNS3_DBG_DENTRY_MAC,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "mng_tbl",
.cmd = HNAE3_DBG_CMD_MNG_TBL,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "loopback",
.cmd = HNAE3_DBG_CMD_LOOPBACK,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "interrupt_info",
.cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "reset_info",
.cmd = HNAE3_DBG_CMD_RESET_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "imp_info",
.cmd = HNAE3_DBG_CMD_IMP_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "ncl_config",
.cmd = HNAE3_DBG_CMD_NCL_CONFIG,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN_128KB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "mac_tnl_status",
.cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "bios_common",
.cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "ssu",
.cmd = HNAE3_DBG_CMD_REG_SSU,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "igu_egu",
.cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "rpu",
.cmd = HNAE3_DBG_CMD_REG_RPU,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "ncsi",
.cmd = HNAE3_DBG_CMD_REG_NCSI,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "rtc",
.cmd = HNAE3_DBG_CMD_REG_RTC,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "ppp",
.cmd = HNAE3_DBG_CMD_REG_PPP,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "rcb",
.cmd = HNAE3_DBG_CMD_REG_RCB,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "tqp",
.cmd = HNAE3_DBG_CMD_REG_TQP,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN_128KB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "mac",
.cmd = HNAE3_DBG_CMD_REG_MAC,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "dcb",
.cmd = HNAE3_DBG_CMD_REG_DCB,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "queue_map",
.cmd = HNAE3_DBG_CMD_QUEUE_MAP,
.dentry = HNS3_DBG_DENTRY_QUEUE,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
{
.name = "rx_queue_info",
.cmd = HNAE3_DBG_CMD_RX_QUEUE_INFO,
.dentry = HNS3_DBG_DENTRY_QUEUE,
- .buf_len = HNS3_DBG_READ_LEN_1MB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
{
.name = "tx_queue_info",
.cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO,
.dentry = HNS3_DBG_DENTRY_QUEUE,
- .buf_len = HNS3_DBG_READ_LEN_1MB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
{
.name = "fd_tcam",
.cmd = HNAE3_DBG_CMD_FD_TCAM,
.dentry = HNS3_DBG_DENTRY_FD,
- .buf_len = HNS3_DBG_READ_LEN_1MB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "service_task_info",
.cmd = HNAE3_DBG_CMD_SERV_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "vlan_config",
.cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "ptp_info",
.cmd = HNAE3_DBG_CMD_PTP_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "fd_counter",
.cmd = HNAE3_DBG_CMD_FD_COUNTER,
.dentry = HNS3_DBG_DENTRY_FD,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "umv_info",
.cmd = HNAE3_DBG_CMD_UMV_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t2,
},
{
.name = "page_pool_info",
.cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
{
.name = "coalesce_info",
.cmd = HNAE3_DBG_CMD_COAL_INFO,
.dentry = HNS3_DBG_DENTRY_COMMON,
- .buf_len = HNS3_DBG_READ_LEN_1MB,
- .init = hns3_dbg_common_file_init,
+ .init = hns3_dbg_common_init_t1,
},
};
@@ -420,71 +378,17 @@ static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
}
};
-static const struct hns3_dbg_item coal_info_items[] = {
- { "VEC_ID", 2 },
- { "ALGO_STATE", 2 },
- { "PROFILE_ID", 2 },
- { "CQE_MODE", 2 },
- { "TUNE_STATE", 2 },
- { "STEPS_LEFT", 2 },
- { "STEPS_RIGHT", 2 },
- { "TIRED", 2 },
- { "SW_GL", 2 },
- { "SW_QL", 2 },
- { "HW_GL", 2 },
- { "HW_QL", 2 },
-};
-
static const char * const dim_cqe_mode_str[] = { "EQE", "CQE" };
static const char * const dim_state_str[] = { "START", "IN_PROG", "APPLY" };
static const char * const
dim_tune_stat_str[] = { "ON_TOP", "TIRED", "RIGHT", "LEFT" };
-static void hns3_dbg_fill_content(char *content, u16 len,
- const struct hns3_dbg_item *items,
- const char **result, u16 size)
-{
-#define HNS3_DBG_LINE_END_LEN 2
- char *pos = content;
- u16 item_len;
- u16 i;
-
- if (!len) {
- return;
- } else if (len <= HNS3_DBG_LINE_END_LEN) {
- *pos++ = '\0';
- return;
- }
-
- memset(content, ' ', len);
- len -= HNS3_DBG_LINE_END_LEN;
-
- for (i = 0; i < size; i++) {
- item_len = strlen(items[i].name) + items[i].interval;
- if (len < item_len)
- break;
-
- if (result) {
- if (item_len < strlen(result[i]))
- break;
- memcpy(pos, result[i], strlen(result[i]));
- } else {
- memcpy(pos, items[i].name, strlen(items[i].name));
- }
- pos += item_len;
- len -= item_len;
- }
- *pos++ = '\n';
- *pos++ = '\0';
-}
-
static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector,
- char **result, int i, bool is_tx)
+ struct seq_file *s, int i, bool is_tx)
{
unsigned int gl_offset, ql_offset;
struct hns3_enet_coalesce *coal;
unsigned int reg_val;
- unsigned int j = 0;
struct dim *dim;
bool ql_enable;
@@ -502,191 +406,96 @@ static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector,
ql_enable = tqp_vector->rx_group.coal.ql_enable;
}
- sprintf(result[j++], "%d", i);
- sprintf(result[j++], "%s", dim->state < ARRAY_SIZE(dim_state_str) ?
- dim_state_str[dim->state] : "unknown");
- sprintf(result[j++], "%u", dim->profile_ix);
- sprintf(result[j++], "%s", dim->mode < ARRAY_SIZE(dim_cqe_mode_str) ?
- dim_cqe_mode_str[dim->mode] : "unknown");
- sprintf(result[j++], "%s",
- dim->tune_state < ARRAY_SIZE(dim_tune_stat_str) ?
- dim_tune_stat_str[dim->tune_state] : "unknown");
- sprintf(result[j++], "%u", dim->steps_left);
- sprintf(result[j++], "%u", dim->steps_right);
- sprintf(result[j++], "%u", dim->tired);
- sprintf(result[j++], "%u", coal->int_gl);
- sprintf(result[j++], "%u", coal->int_ql);
+ seq_printf(s, "%-8d", i);
+ seq_printf(s, "%-12s", dim->state < ARRAY_SIZE(dim_state_str) ?
+ dim_state_str[dim->state] : "unknown");
+ seq_printf(s, "%-12u", dim->profile_ix);
+ seq_printf(s, "%-10s", dim->mode < ARRAY_SIZE(dim_cqe_mode_str) ?
+ dim_cqe_mode_str[dim->mode] : "unknown");
+ seq_printf(s, "%-12s", dim->tune_state < ARRAY_SIZE(dim_tune_stat_str) ?
+ dim_tune_stat_str[dim->tune_state] : "unknown");
+ seq_printf(s, "%-12u%-13u%-7u%-7u%-7u", dim->steps_left,
+ dim->steps_right, dim->tired, coal->int_gl, coal->int_ql);
reg_val = readl(tqp_vector->mask_addr + gl_offset) &
HNS3_VECTOR_GL_MASK;
- sprintf(result[j++], "%u", reg_val);
+ seq_printf(s, "%-7u", reg_val);
if (ql_enable) {
reg_val = readl(tqp_vector->mask_addr + ql_offset) &
HNS3_VECTOR_QL_MASK;
- sprintf(result[j++], "%u", reg_val);
+ seq_printf(s, "%u\n", reg_val);
} else {
- sprintf(result[j++], "NA");
+ seq_puts(s, "NA\n");
}
}
-static void hns3_dump_coal_info(struct hnae3_handle *h, char *buf, int len,
- int *pos, bool is_tx)
+static void hns3_dump_coal_info(struct seq_file *s, bool is_tx)
{
- char data_str[ARRAY_SIZE(coal_info_items)][HNS3_DBG_DATA_STR_LEN];
- char *result[ARRAY_SIZE(coal_info_items)];
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
struct hns3_enet_tqp_vector *tqp_vector;
struct hns3_nic_priv *priv = h->priv;
- char content[HNS3_DBG_INFO_LEN];
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(coal_info_items); i++)
- result[i] = &data_str[i][0];
+ seq_printf(s, "%s interrupt coalesce info:\n", is_tx ? "tx" : "rx");
- *pos += scnprintf(buf + *pos, len - *pos,
- "%s interrupt coalesce info:\n",
- is_tx ? "tx" : "rx");
- hns3_dbg_fill_content(content, sizeof(content), coal_info_items,
- NULL, ARRAY_SIZE(coal_info_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ seq_puts(s, "VEC_ID ALGO_STATE PROFILE_ID CQE_MODE TUNE_STATE ");
+ seq_puts(s, "STEPS_LEFT STEPS_RIGHT TIRED SW_GL SW_QL ");
+ seq_puts(s, "HW_GL HW_QL\n");
for (i = 0; i < priv->vector_num; i++) {
tqp_vector = &priv->tqp_vector[i];
- hns3_get_coal_info(tqp_vector, result, i, is_tx);
- hns3_dbg_fill_content(content, sizeof(content), coal_info_items,
- (const char **)result,
- ARRAY_SIZE(coal_info_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ hns3_get_coal_info(tqp_vector, s, i, is_tx);
}
}
-static int hns3_dbg_coal_info(struct hnae3_handle *h, char *buf, int len)
+static int hns3_dbg_coal_info(struct seq_file *s, void *data)
{
- int pos = 0;
-
- hns3_dump_coal_info(h, buf, len, &pos, true);
- pos += scnprintf(buf + pos, len - pos, "\n");
- hns3_dump_coal_info(h, buf, len, &pos, false);
+ hns3_dump_coal_info(s, true);
+ seq_puts(s, "\n");
+ hns3_dump_coal_info(s, false);
return 0;
}
-static const struct hns3_dbg_item tx_spare_info_items[] = {
- { "QUEUE_ID", 2 },
- { "COPYBREAK", 2 },
- { "LEN", 7 },
- { "NTU", 4 },
- { "NTC", 4 },
- { "LTC", 4 },
- { "DMA", 17 },
-};
-
-static void hns3_dbg_tx_spare_info(struct hns3_enet_ring *ring, char *buf,
- int len, u32 ring_num, int *pos)
-{
- char data_str[ARRAY_SIZE(tx_spare_info_items)][HNS3_DBG_DATA_STR_LEN];
- struct hns3_tx_spare *tx_spare = ring->tx_spare;
- char *result[ARRAY_SIZE(tx_spare_info_items)];
- char content[HNS3_DBG_INFO_LEN];
- u32 i, j;
-
- if (!tx_spare) {
- *pos += scnprintf(buf + *pos, len - *pos,
- "tx spare buffer is not enabled\n");
- return;
- }
-
- for (i = 0; i < ARRAY_SIZE(tx_spare_info_items); i++)
- result[i] = &data_str[i][0];
-
- *pos += scnprintf(buf + *pos, len - *pos, "tx spare buffer info\n");
- hns3_dbg_fill_content(content, sizeof(content), tx_spare_info_items,
- NULL, ARRAY_SIZE(tx_spare_info_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
-
- for (i = 0; i < ring_num; i++) {
- j = 0;
- sprintf(result[j++], "%u", i);
- sprintf(result[j++], "%u", ring->tx_copybreak);
- sprintf(result[j++], "%u", tx_spare->len);
- sprintf(result[j++], "%u", tx_spare->next_to_use);
- sprintf(result[j++], "%u", tx_spare->next_to_clean);
- sprintf(result[j++], "%u", tx_spare->last_to_clean);
- sprintf(result[j++], "%pad", &tx_spare->dma);
- hns3_dbg_fill_content(content, sizeof(content),
- tx_spare_info_items,
- (const char **)result,
- ARRAY_SIZE(tx_spare_info_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
- }
-}
-
-static const struct hns3_dbg_item rx_queue_info_items[] = {
- { "QUEUE_ID", 2 },
- { "BD_NUM", 2 },
- { "BD_LEN", 2 },
- { "TAIL", 2 },
- { "HEAD", 2 },
- { "FBDNUM", 2 },
- { "PKTNUM", 5 },
- { "COPYBREAK", 2 },
- { "RING_EN", 2 },
- { "RX_RING_EN", 2 },
- { "BASE_ADDR", 10 },
-};
-
static void hns3_dump_rx_queue_info(struct hns3_enet_ring *ring,
- struct hnae3_ae_dev *ae_dev, char **result,
- u32 index)
+ struct seq_file *s, u32 index)
{
+ struct hnae3_ae_dev *ae_dev = hnae3_seq_file_to_ae_dev(s);
+ void __iomem *base = ring->tqp->io_base;
u32 base_add_l, base_add_h;
- u32 j = 0;
-
- sprintf(result[j++], "%u", index);
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_BD_NUM_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_BD_LEN_REG));
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_TAIL_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_HEAD_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_FBDNUM_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_PKTNUM_RECORD_REG));
- sprintf(result[j++], "%u", ring->rx_copybreak);
-
- sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_EN_REG) ? "on" : "off");
+ seq_printf(s, "%-10u", index);
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_RX_RING_BD_NUM_REG));
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_RX_RING_BD_LEN_REG));
+ seq_printf(s, "%-6u",
+ readl_relaxed(base + HNS3_RING_RX_RING_TAIL_REG));
+ seq_printf(s, "%-6u",
+ readl_relaxed(base + HNS3_RING_RX_RING_HEAD_REG));
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_RX_RING_FBDNUM_REG));
+ seq_printf(s, "%-11u", readl_relaxed(base +
+ HNS3_RING_RX_RING_PKTNUM_RECORD_REG));
+ seq_printf(s, "%-11u", ring->rx_copybreak);
+ seq_printf(s, "%-9s",
+ str_on_off(readl_relaxed(base + HNS3_RING_EN_REG)));
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
- sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_EN_REG) ? "on" : "off");
+ seq_printf(s, "%-12s", str_on_off(readl_relaxed(base +
+ HNS3_RING_RX_EN_REG)));
else
- sprintf(result[j++], "%s", "NA");
+ seq_printf(s, "%-12s", "NA");
- base_add_h = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_BASEADDR_H_REG);
- base_add_l = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_RING_BASEADDR_L_REG);
- sprintf(result[j++], "0x%08x%08x", base_add_h, base_add_l);
+ base_add_h = readl_relaxed(base + HNS3_RING_RX_RING_BASEADDR_H_REG);
+ base_add_l = readl_relaxed(base + HNS3_RING_RX_RING_BASEADDR_L_REG);
+ seq_printf(s, "0x%08x%08x\n", base_add_h, base_add_l);
}
-static int hns3_dbg_rx_queue_info(struct hnae3_handle *h,
- char *buf, int len)
+static int hns3_dbg_rx_queue_info(struct seq_file *s, void *data)
{
- char data_str[ARRAY_SIZE(rx_queue_info_items)][HNS3_DBG_DATA_STR_LEN];
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- char *result[ARRAY_SIZE(rx_queue_info_items)];
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
struct hns3_nic_priv *priv = h->priv;
- char content[HNS3_DBG_INFO_LEN];
struct hns3_enet_ring *ring;
- int pos = 0;
u32 i;
if (!priv->ring) {
@@ -694,12 +503,9 @@ static int hns3_dbg_rx_queue_info(struct hnae3_handle *h,
return -EFAULT;
}
- for (i = 0; i < ARRAY_SIZE(rx_queue_info_items); i++)
- result[i] = &data_str[i][0];
+ seq_puts(s, "QUEUE_ID BD_NUM BD_LEN TAIL HEAD FBDNUM ");
+ seq_puts(s, "PKTNUM COPYBREAK RING_EN RX_RING_EN BASE_ADDR\n");
- hns3_dbg_fill_content(content, sizeof(content), rx_queue_info_items,
- NULL, ARRAY_SIZE(rx_queue_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
for (i = 0; i < h->kinfo.num_tqps; i++) {
/* Each cycle needs to determine whether the instance is reset,
* to prevent reference to invalid memory. And need to ensure
@@ -710,86 +516,51 @@ static int hns3_dbg_rx_queue_info(struct hnae3_handle *h,
return -EPERM;
ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
- hns3_dump_rx_queue_info(ring, ae_dev, result, i);
- hns3_dbg_fill_content(content, sizeof(content),
- rx_queue_info_items,
- (const char **)result,
- ARRAY_SIZE(rx_queue_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ hns3_dump_rx_queue_info(ring, s, i);
}
return 0;
}
-static const struct hns3_dbg_item tx_queue_info_items[] = {
- { "QUEUE_ID", 2 },
- { "BD_NUM", 2 },
- { "TC", 2 },
- { "TAIL", 2 },
- { "HEAD", 2 },
- { "FBDNUM", 2 },
- { "OFFSET", 2 },
- { "PKTNUM", 5 },
- { "RING_EN", 2 },
- { "TX_RING_EN", 2 },
- { "BASE_ADDR", 10 },
-};
-
static void hns3_dump_tx_queue_info(struct hns3_enet_ring *ring,
- struct hnae3_ae_dev *ae_dev, char **result,
- u32 index)
+ struct seq_file *s, u32 index)
{
+ struct hnae3_ae_dev *ae_dev = hnae3_seq_file_to_ae_dev(s);
+ void __iomem *base = ring->tqp->io_base;
u32 base_add_l, base_add_h;
- u32 j = 0;
-
- sprintf(result[j++], "%u", index);
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_BD_NUM_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_TC_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_TAIL_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_HEAD_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_FBDNUM_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_OFFSET_REG));
-
- sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_PKTNUM_RECORD_REG));
- sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_EN_REG) ? "on" : "off");
+ seq_printf(s, "%-10u", index);
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_TX_RING_BD_NUM_REG));
+ seq_printf(s, "%-4u", readl_relaxed(base + HNS3_RING_TX_RING_TC_REG));
+ seq_printf(s, "%-6u", readl_relaxed(base + HNS3_RING_TX_RING_TAIL_REG));
+ seq_printf(s, "%-6u", readl_relaxed(base + HNS3_RING_TX_RING_HEAD_REG));
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_TX_RING_FBDNUM_REG));
+ seq_printf(s, "%-8u",
+ readl_relaxed(base + HNS3_RING_TX_RING_OFFSET_REG));
+ seq_printf(s, "%-11u",
+ readl_relaxed(base + HNS3_RING_TX_RING_PKTNUM_RECORD_REG));
+ seq_printf(s, "%-9s",
+ str_on_off(readl_relaxed(base + HNS3_RING_EN_REG)));
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
- sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_EN_REG) ? "on" : "off");
+ seq_printf(s, "%-12s",
+ str_on_off(readl_relaxed(base +
+ HNS3_RING_TX_EN_REG)));
else
- sprintf(result[j++], "%s", "NA");
+ seq_printf(s, "%-12s", "NA");
- base_add_h = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_BASEADDR_H_REG);
- base_add_l = readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_RING_BASEADDR_L_REG);
- sprintf(result[j++], "0x%08x%08x", base_add_h, base_add_l);
+ base_add_h = readl_relaxed(base + HNS3_RING_TX_RING_BASEADDR_H_REG);
+ base_add_l = readl_relaxed(base + HNS3_RING_TX_RING_BASEADDR_L_REG);
+ seq_printf(s, "0x%08x%08x\n", base_add_h, base_add_l);
}
-static int hns3_dbg_tx_queue_info(struct hnae3_handle *h,
- char *buf, int len)
+static int hns3_dbg_tx_queue_info(struct seq_file *s, void *data)
{
- char data_str[ARRAY_SIZE(tx_queue_info_items)][HNS3_DBG_DATA_STR_LEN];
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- char *result[ARRAY_SIZE(tx_queue_info_items)];
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
struct hns3_nic_priv *priv = h->priv;
- char content[HNS3_DBG_INFO_LEN];
struct hns3_enet_ring *ring;
- int pos = 0;
u32 i;
if (!priv->ring) {
@@ -797,12 +568,8 @@ static int hns3_dbg_tx_queue_info(struct hnae3_handle *h,
return -EFAULT;
}
- for (i = 0; i < ARRAY_SIZE(tx_queue_info_items); i++)
- result[i] = &data_str[i][0];
-
- hns3_dbg_fill_content(content, sizeof(content), tx_queue_info_items,
- NULL, ARRAY_SIZE(tx_queue_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_puts(s, "QUEUE_ID BD_NUM TC TAIL HEAD FBDNUM OFFSET ");
+ seq_puts(s, "PKTNUM RING_EN TX_RING_EN BASE_ADDR\n");
for (i = 0; i < h->kinfo.num_tqps; i++) {
/* Each cycle needs to determine whether the instance is reset,
@@ -814,339 +581,213 @@ static int hns3_dbg_tx_queue_info(struct hnae3_handle *h,
return -EPERM;
ring = &priv->ring[i];
- hns3_dump_tx_queue_info(ring, ae_dev, result, i);
- hns3_dbg_fill_content(content, sizeof(content),
- tx_queue_info_items,
- (const char **)result,
- ARRAY_SIZE(tx_queue_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ hns3_dump_tx_queue_info(ring, s, i);
}
- hns3_dbg_tx_spare_info(ring, buf, len, h->kinfo.num_tqps, &pos);
-
return 0;
}
-static const struct hns3_dbg_item queue_map_items[] = {
- { "local_queue_id", 2 },
- { "global_queue_id", 2 },
- { "vector_id", 2 },
-};
-
-static int hns3_dbg_queue_map(struct hnae3_handle *h, char *buf, int len)
+static int hns3_dbg_queue_map(struct seq_file *s, void *data)
{
- char data_str[ARRAY_SIZE(queue_map_items)][HNS3_DBG_DATA_STR_LEN];
- char *result[ARRAY_SIZE(queue_map_items)];
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
struct hns3_nic_priv *priv = h->priv;
- char content[HNS3_DBG_INFO_LEN];
- int pos = 0;
- int j;
u32 i;
if (!h->ae_algo->ops->get_global_queue_id)
return -EOPNOTSUPP;
- for (i = 0; i < ARRAY_SIZE(queue_map_items); i++)
- result[i] = &data_str[i][0];
+ seq_puts(s, "local_queue_id global_queue_id vector_id\n");
- hns3_dbg_fill_content(content, sizeof(content), queue_map_items,
- NULL, ARRAY_SIZE(queue_map_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
for (i = 0; i < h->kinfo.num_tqps; i++) {
if (!priv->ring || !priv->ring[i].tqp_vector)
continue;
- j = 0;
- sprintf(result[j++], "%u", i);
- sprintf(result[j++], "%u",
- h->ae_algo->ops->get_global_queue_id(h, i));
- sprintf(result[j++], "%d",
- priv->ring[i].tqp_vector->vector_irq);
- hns3_dbg_fill_content(content, sizeof(content), queue_map_items,
- (const char **)result,
- ARRAY_SIZE(queue_map_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_printf(s, "%-16u%-17u%d\n", i,
+ h->ae_algo->ops->get_global_queue_id(h, i),
+ priv->ring[i].tqp_vector->vector_irq);
}
return 0;
}
-static const struct hns3_dbg_item rx_bd_info_items[] = {
- { "BD_IDX", 3 },
- { "L234_INFO", 2 },
- { "PKT_LEN", 3 },
- { "SIZE", 4 },
- { "RSS_HASH", 4 },
- { "FD_ID", 2 },
- { "VLAN_TAG", 2 },
- { "O_DM_VLAN_ID_FB", 2 },
- { "OT_VLAN_TAG", 2 },
- { "BD_BASE_INFO", 2 },
- { "PTYPE", 2 },
- { "HW_CSUM", 2 },
-};
-
static void hns3_dump_rx_bd_info(struct hns3_nic_priv *priv,
- struct hns3_desc *desc, char **result, int idx)
+ struct hns3_desc *desc, struct seq_file *s,
+ int idx)
{
- unsigned int j = 0;
-
- sprintf(result[j++], "%d", idx);
- sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.l234_info));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.pkt_len));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.size));
- sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.rss_hash));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.fd_id));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.vlan_tag));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb));
- sprintf(result[j++], "%u", le16_to_cpu(desc->rx.ot_vlan_tag));
- sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.bd_base_info));
+ seq_printf(s, "%-9d%#-11x%-10u%-8u%#-12x%-7u%-10u%-17u%-13u%#-14x",
+ idx, le32_to_cpu(desc->rx.l234_info),
+ le16_to_cpu(desc->rx.pkt_len), le16_to_cpu(desc->rx.size),
+ le32_to_cpu(desc->rx.rss_hash), le16_to_cpu(desc->rx.fd_id),
+ le16_to_cpu(desc->rx.vlan_tag),
+ le16_to_cpu(desc->rx.o_dm_vlan_id_fb),
+ le16_to_cpu(desc->rx.ot_vlan_tag),
+ le32_to_cpu(desc->rx.bd_base_info));
+
if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) {
u32 ol_info = le32_to_cpu(desc->rx.ol_info);
- sprintf(result[j++], "%5lu", hnae3_get_field(ol_info,
- HNS3_RXD_PTYPE_M,
- HNS3_RXD_PTYPE_S));
- sprintf(result[j++], "%7u", le16_to_cpu(desc->csum));
+ seq_printf(s, "%-7lu%-9u\n",
+ hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
+ HNS3_RXD_PTYPE_S),
+ le16_to_cpu(desc->csum));
} else {
- sprintf(result[j++], "NA");
- sprintf(result[j++], "NA");
+ seq_puts(s, "NA NA\n");
}
}
-static int hns3_dbg_rx_bd_info(struct hns3_dbg_data *d, char *buf, int len)
+static int hns3_dbg_rx_bd_info(struct seq_file *s, void *private)
{
- char data_str[ARRAY_SIZE(rx_bd_info_items)][HNS3_DBG_DATA_STR_LEN];
- struct hns3_nic_priv *priv = d->handle->priv;
- char *result[ARRAY_SIZE(rx_bd_info_items)];
- char content[HNS3_DBG_INFO_LEN];
+ struct hns3_dbg_data *data = s->private;
+ struct hnae3_handle *h = data->handle;
+ struct hns3_nic_priv *priv = h->priv;
struct hns3_enet_ring *ring;
struct hns3_desc *desc;
unsigned int i;
- int pos = 0;
- if (d->qid >= d->handle->kinfo.num_tqps) {
- dev_err(&d->handle->pdev->dev,
- "queue%u is not in use\n", d->qid);
+ if (data->qid >= h->kinfo.num_tqps) {
+ dev_err(&h->pdev->dev, "queue%u is not in use\n", data->qid);
return -EINVAL;
}
- for (i = 0; i < ARRAY_SIZE(rx_bd_info_items); i++)
- result[i] = &data_str[i][0];
-
- pos += scnprintf(buf + pos, len - pos,
- "Queue %u rx bd info:\n", d->qid);
- hns3_dbg_fill_content(content, sizeof(content), rx_bd_info_items,
- NULL, ARRAY_SIZE(rx_bd_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_printf(s, "Queue %u rx bd info:\n", data->qid);
+ seq_puts(s, "BD_IDX L234_INFO PKT_LEN SIZE ");
+ seq_puts(s, "RSS_HASH FD_ID VLAN_TAG O_DM_VLAN_ID_FB ");
+ seq_puts(s, "OT_VLAN_TAG BD_BASE_INFO PTYPE HW_CSUM\n");
- ring = &priv->ring[d->qid + d->handle->kinfo.num_tqps];
+ ring = &priv->ring[data->qid + data->handle->kinfo.num_tqps];
for (i = 0; i < ring->desc_num; i++) {
desc = &ring->desc[i];
- hns3_dump_rx_bd_info(priv, desc, result, i);
- hns3_dbg_fill_content(content, sizeof(content),
- rx_bd_info_items, (const char **)result,
- ARRAY_SIZE(rx_bd_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ hns3_dump_rx_bd_info(priv, desc, s, i);
}
return 0;
}
-static const struct hns3_dbg_item tx_bd_info_items[] = {
- { "BD_IDX", 2 },
- { "ADDRESS", 13 },
- { "VLAN_TAG", 2 },
- { "SIZE", 2 },
- { "T_CS_VLAN_TSO", 2 },
- { "OT_VLAN_TAG", 3 },
- { "TV", 5 },
- { "OLT_VLAN_LEN", 2 },
- { "PAYLEN_OL4CS", 2 },
- { "BD_FE_SC_VLD", 2 },
- { "MSS_HW_CSUM", 0 },
-};
-
-static void hns3_dump_tx_bd_info(struct hns3_desc *desc, char **result, int idx)
+static void hns3_dump_tx_bd_info(struct hns3_desc *desc, struct seq_file *s,
+ int idx)
{
- unsigned int j = 0;
-
- sprintf(result[j++], "%d", idx);
- sprintf(result[j++], "%#llx", le64_to_cpu(desc->addr));
- sprintf(result[j++], "%u", le16_to_cpu(desc->tx.vlan_tag));
- sprintf(result[j++], "%u", le16_to_cpu(desc->tx.send_size));
- sprintf(result[j++], "%#x",
- le32_to_cpu(desc->tx.type_cs_vlan_tso_len));
- sprintf(result[j++], "%u", le16_to_cpu(desc->tx.outer_vlan_tag));
- sprintf(result[j++], "%u", le16_to_cpu(desc->tx.tv));
- sprintf(result[j++], "%u",
- le32_to_cpu(desc->tx.ol_type_vlan_len_msec));
- sprintf(result[j++], "%#x", le32_to_cpu(desc->tx.paylen_ol4cs));
- sprintf(result[j++], "%#x", le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri));
- sprintf(result[j++], "%u", le16_to_cpu(desc->tx.mss_hw_csum));
+ seq_printf(s, "%-8d%#-20llx%-10u%-6u%#-15x%-14u%-7u%-16u%#-14x%#-14x%-11u\n",
+ idx, le64_to_cpu(desc->addr),
+ le16_to_cpu(desc->tx.vlan_tag),
+ le16_to_cpu(desc->tx.send_size),
+ le32_to_cpu(desc->tx.type_cs_vlan_tso_len),
+ le16_to_cpu(desc->tx.outer_vlan_tag),
+ le16_to_cpu(desc->tx.tv),
+ le32_to_cpu(desc->tx.ol_type_vlan_len_msec),
+ le32_to_cpu(desc->tx.paylen_ol4cs),
+ le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri),
+ le16_to_cpu(desc->tx.mss_hw_csum));
}
-static int hns3_dbg_tx_bd_info(struct hns3_dbg_data *d, char *buf, int len)
+static int hns3_dbg_tx_bd_info(struct seq_file *s, void *private)
{
- char data_str[ARRAY_SIZE(tx_bd_info_items)][HNS3_DBG_DATA_STR_LEN];
- struct hns3_nic_priv *priv = d->handle->priv;
- char *result[ARRAY_SIZE(tx_bd_info_items)];
- char content[HNS3_DBG_INFO_LEN];
+ struct hns3_dbg_data *data = s->private;
+ struct hnae3_handle *h = data->handle;
+ struct hns3_nic_priv *priv = h->priv;
struct hns3_enet_ring *ring;
struct hns3_desc *desc;
unsigned int i;
- int pos = 0;
- if (d->qid >= d->handle->kinfo.num_tqps) {
- dev_err(&d->handle->pdev->dev,
- "queue%u is not in use\n", d->qid);
+ if (data->qid >= h->kinfo.num_tqps) {
+ dev_err(&h->pdev->dev, "queue%u is not in use\n", data->qid);
return -EINVAL;
}
- for (i = 0; i < ARRAY_SIZE(tx_bd_info_items); i++)
- result[i] = &data_str[i][0];
+ seq_printf(s, "Queue %u tx bd info:\n", data->qid);
+ seq_puts(s, "BD_IDX ADDRESS VLAN_TAG SIZE ");
+ seq_puts(s, "T_CS_VLAN_TSO OT_VLAN_TAG TV OLT_VLAN_LEN ");
+ seq_puts(s, "PAYLEN_OL4CS BD_FE_SC_VLD MSS_HW_CSUM\n");
- pos += scnprintf(buf + pos, len - pos,
- "Queue %u tx bd info:\n", d->qid);
- hns3_dbg_fill_content(content, sizeof(content), tx_bd_info_items,
- NULL, ARRAY_SIZE(tx_bd_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
-
- ring = &priv->ring[d->qid];
+ ring = &priv->ring[data->qid];
for (i = 0; i < ring->desc_num; i++) {
desc = &ring->desc[i];
- hns3_dump_tx_bd_info(desc, result, i);
- hns3_dbg_fill_content(content, sizeof(content),
- tx_bd_info_items, (const char **)result,
- ARRAY_SIZE(tx_bd_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ hns3_dump_tx_bd_info(desc, s, i);
}
return 0;
}
-static void
-hns3_dbg_dev_caps(struct hnae3_handle *h, char *buf, int len, int *pos)
+static void hns3_dbg_dev_caps(struct hnae3_handle *h, struct seq_file *s)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- const char * const str[] = {"no", "yes"};
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
unsigned long *caps = ae_dev->caps;
u32 i, state;
- *pos += scnprintf(buf + *pos, len - *pos, "dev capability:\n");
+ seq_puts(s, "dev capability:\n");
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cap); i++) {
state = test_bit(hns3_dbg_cap[i].cap_bit, caps);
- *pos += scnprintf(buf + *pos, len - *pos, "%s: %s\n",
- hns3_dbg_cap[i].name, str[state]);
+ seq_printf(s, "%s: %s\n", hns3_dbg_cap[i].name,
+ str_yes_no(state));
}
- *pos += scnprintf(buf + *pos, len - *pos, "\n");
+ seq_puts(s, "\n");
}
-static void
-hns3_dbg_dev_specs(struct hnae3_handle *h, char *buf, int len, int *pos)
+static void hns3_dbg_dev_specs(struct hnae3_handle *h, struct seq_file *s)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
struct hnae3_dev_specs *dev_specs = &ae_dev->dev_specs;
struct hnae3_knic_private_info *kinfo = &h->kinfo;
struct net_device *dev = kinfo->netdev;
- *pos += scnprintf(buf + *pos, len - *pos, "dev_spec:\n");
- *pos += scnprintf(buf + *pos, len - *pos, "MAC entry num: %u\n",
- dev_specs->mac_entry_num);
- *pos += scnprintf(buf + *pos, len - *pos, "MNG entry num: %u\n",
- dev_specs->mng_entry_num);
- *pos += scnprintf(buf + *pos, len - *pos, "MAX non tso bd num: %u\n",
- dev_specs->max_non_tso_bd_num);
- *pos += scnprintf(buf + *pos, len - *pos, "RSS ind tbl size: %u\n",
- dev_specs->rss_ind_tbl_size);
- *pos += scnprintf(buf + *pos, len - *pos, "RSS key size: %u\n",
- dev_specs->rss_key_size);
- *pos += scnprintf(buf + *pos, len - *pos, "RSS size: %u\n",
- kinfo->rss_size);
- *pos += scnprintf(buf + *pos, len - *pos, "Allocated RSS size: %u\n",
- kinfo->req_rss_size);
- *pos += scnprintf(buf + *pos, len - *pos,
- "Task queue pairs numbers: %u\n",
- kinfo->num_tqps);
- *pos += scnprintf(buf + *pos, len - *pos, "RX buffer length: %u\n",
- kinfo->rx_buf_len);
- *pos += scnprintf(buf + *pos, len - *pos, "Desc num per TX queue: %u\n",
- kinfo->num_tx_desc);
- *pos += scnprintf(buf + *pos, len - *pos, "Desc num per RX queue: %u\n",
- kinfo->num_rx_desc);
- *pos += scnprintf(buf + *pos, len - *pos,
- "Total number of enabled TCs: %u\n",
- kinfo->tc_info.num_tc);
- *pos += scnprintf(buf + *pos, len - *pos, "MAX INT QL: %u\n",
- dev_specs->int_ql_max);
- *pos += scnprintf(buf + *pos, len - *pos, "MAX INT GL: %u\n",
- dev_specs->max_int_gl);
- *pos += scnprintf(buf + *pos, len - *pos, "MAX TM RATE: %u\n",
- dev_specs->max_tm_rate);
- *pos += scnprintf(buf + *pos, len - *pos, "MAX QSET number: %u\n",
- dev_specs->max_qset_num);
- *pos += scnprintf(buf + *pos, len - *pos, "umv size: %u\n",
- dev_specs->umv_size);
- *pos += scnprintf(buf + *pos, len - *pos, "mc mac size: %u\n",
- dev_specs->mc_mac_size);
- *pos += scnprintf(buf + *pos, len - *pos, "MAC statistics number: %u\n",
- dev_specs->mac_stats_num);
- *pos += scnprintf(buf + *pos, len - *pos,
- "TX timeout threshold: %d seconds\n",
- dev->watchdog_timeo / HZ);
- *pos += scnprintf(buf + *pos, len - *pos, "Hilink Version: %u\n",
- dev_specs->hilink_version);
+ seq_puts(s, "dev_spec:\n");
+ seq_printf(s, "MAC entry num: %u\n", dev_specs->mac_entry_num);
+ seq_printf(s, "MNG entry num: %u\n", dev_specs->mng_entry_num);
+ seq_printf(s, "MAX non tso bd num: %u\n",
+ dev_specs->max_non_tso_bd_num);
+ seq_printf(s, "RSS ind tbl size: %u\n", dev_specs->rss_ind_tbl_size);
+ seq_printf(s, "RSS key size: %u\n", dev_specs->rss_key_size);
+ seq_printf(s, "RSS size: %u\n", kinfo->rss_size);
+ seq_printf(s, "Allocated RSS size: %u\n", kinfo->req_rss_size);
+ seq_printf(s, "Task queue pairs numbers: %u\n", kinfo->num_tqps);
+ seq_printf(s, "RX buffer length: %u\n", kinfo->rx_buf_len);
+ seq_printf(s, "Desc num per TX queue: %u\n", kinfo->num_tx_desc);
+ seq_printf(s, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
+ seq_printf(s, "Total number of enabled TCs: %u\n",
+ kinfo->tc_info.num_tc);
+ seq_printf(s, "MAX INT QL: %u\n", dev_specs->int_ql_max);
+ seq_printf(s, "MAX INT GL: %u\n", dev_specs->max_int_gl);
+ seq_printf(s, "MAX TM RATE: %u\n", dev_specs->max_tm_rate);
+ seq_printf(s, "MAX QSET number: %u\n", dev_specs->max_qset_num);
+ seq_printf(s, "umv size: %u\n", dev_specs->umv_size);
+ seq_printf(s, "mc mac size: %u\n", dev_specs->mc_mac_size);
+ seq_printf(s, "MAC statistics number: %u\n", dev_specs->mac_stats_num);
+ seq_printf(s, "TX timeout threshold: %d seconds\n",
+ dev->watchdog_timeo / HZ);
+ seq_printf(s, "mac tunnel number: %u\n", dev_specs->tnl_num);
+ seq_printf(s, "Hilink Version: %u\n", dev_specs->hilink_version);
}
-static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len)
+static int hns3_dbg_dev_info(struct seq_file *s, void *data)
{
- int pos = 0;
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
- hns3_dbg_dev_caps(h, buf, len, &pos);
-
- hns3_dbg_dev_specs(h, buf, len, &pos);
+ hns3_dbg_dev_caps(h, s);
+ hns3_dbg_dev_specs(h, s);
return 0;
}
-static const struct hns3_dbg_item page_pool_info_items[] = {
- { "QUEUE_ID", 2 },
- { "ALLOCATE_CNT", 2 },
- { "FREE_CNT", 6 },
- { "POOL_SIZE(PAGE_NUM)", 2 },
- { "ORDER", 2 },
- { "NUMA_ID", 2 },
- { "MAX_LEN", 2 },
-};
-
static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring,
- char **result, u32 index)
+ struct seq_file *s, u32 index)
{
- u32 j = 0;
-
- sprintf(result[j++], "%u", index);
- sprintf(result[j++], "%u",
- READ_ONCE(ring->page_pool->pages_state_hold_cnt));
- sprintf(result[j++], "%d",
- atomic_read(&ring->page_pool->pages_state_release_cnt));
- sprintf(result[j++], "%u", ring->page_pool->p.pool_size);
- sprintf(result[j++], "%u", ring->page_pool->p.order);
- sprintf(result[j++], "%d", ring->page_pool->p.nid);
- sprintf(result[j++], "%uK", ring->page_pool->p.max_len / 1024);
+ seq_printf(s, "%-10u%-14u%-14d%-21u%-7u%-9d%uK\n",
+ index,
+ READ_ONCE(ring->page_pool->pages_state_hold_cnt),
+ atomic_read(&ring->page_pool->pages_state_release_cnt),
+ ring->page_pool->p.pool_size,
+ ring->page_pool->p.order,
+ ring->page_pool->p.nid,
+ ring->page_pool->p.max_len / 1024);
}
-static int
-hns3_dbg_page_pool_info(struct hnae3_handle *h, char *buf, int len)
+static int hns3_dbg_page_pool_info(struct seq_file *s, void *data)
{
- char data_str[ARRAY_SIZE(page_pool_info_items)][HNS3_DBG_DATA_STR_LEN];
- char *result[ARRAY_SIZE(page_pool_info_items)];
+ struct hnae3_handle *h = hnae3_seq_file_to_handle(s);
struct hns3_nic_priv *priv = h->priv;
- char content[HNS3_DBG_INFO_LEN];
struct hns3_enet_ring *ring;
- int pos = 0;
u32 i;
if (!priv->ring) {
@@ -1159,184 +800,50 @@ hns3_dbg_page_pool_info(struct hnae3_handle *h, char *buf, int len)
return -EFAULT;
}
- for (i = 0; i < ARRAY_SIZE(page_pool_info_items); i++)
- result[i] = &data_str[i][0];
+ seq_puts(s, "QUEUE_ID ALLOCATE_CNT FREE_CNT ");
+ seq_puts(s, "POOL_SIZE(PAGE_NUM) ORDER NUMA_ID MAX_LEN\n");
- hns3_dbg_fill_content(content, sizeof(content), page_pool_info_items,
- NULL, ARRAY_SIZE(page_pool_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
for (i = 0; i < h->kinfo.num_tqps; i++) {
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
return -EPERM;
+
ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
- hns3_dump_page_pool_info(ring, result, i);
- hns3_dbg_fill_content(content, sizeof(content),
- page_pool_info_items,
- (const char **)result,
- ARRAY_SIZE(page_pool_info_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ hns3_dump_page_pool_info(ring, s, i);
}
return 0;
}
-static int hns3_dbg_get_cmd_index(struct hns3_dbg_data *dbg_data, u32 *index)
+static int hns3_dbg_bd_info_show(struct seq_file *s, void *private)
{
- u32 i;
-
- for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
- if (hns3_dbg_cmd[i].cmd == dbg_data->cmd) {
- *index = i;
- return 0;
- }
- }
-
- dev_err(&dbg_data->handle->pdev->dev, "unknown command(%d)\n",
- dbg_data->cmd);
- return -EINVAL;
-}
-
-static const struct hns3_dbg_func hns3_dbg_cmd_func[] = {
- {
- .cmd = HNAE3_DBG_CMD_QUEUE_MAP,
- .dbg_dump = hns3_dbg_queue_map,
- },
- {
- .cmd = HNAE3_DBG_CMD_DEV_INFO,
- .dbg_dump = hns3_dbg_dev_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_TX_BD,
- .dbg_dump_bd = hns3_dbg_tx_bd_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_RX_BD,
- .dbg_dump_bd = hns3_dbg_rx_bd_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_RX_QUEUE_INFO,
- .dbg_dump = hns3_dbg_rx_queue_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO,
- .dbg_dump = hns3_dbg_tx_queue_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO,
- .dbg_dump = hns3_dbg_page_pool_info,
- },
- {
- .cmd = HNAE3_DBG_CMD_COAL_INFO,
- .dbg_dump = hns3_dbg_coal_info,
- },
-};
-
-static int hns3_dbg_read_cmd(struct hns3_dbg_data *dbg_data,
- enum hnae3_dbg_cmd cmd, char *buf, int len)
-{
- const struct hnae3_ae_ops *ops = dbg_data->handle->ae_algo->ops;
- const struct hns3_dbg_func *cmd_func;
- u32 i;
-
- for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd_func); i++) {
- if (cmd == hns3_dbg_cmd_func[i].cmd) {
- cmd_func = &hns3_dbg_cmd_func[i];
- if (cmd_func->dbg_dump)
- return cmd_func->dbg_dump(dbg_data->handle, buf,
- len);
- else
- return cmd_func->dbg_dump_bd(dbg_data, buf,
- len);
- }
- }
-
- if (!ops->dbg_read_cmd)
- return -EOPNOTSUPP;
-
- return ops->dbg_read_cmd(dbg_data->handle, cmd, buf, len);
-}
-
-static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct hns3_dbg_data *dbg_data = filp->private_data;
- struct hnae3_handle *handle = dbg_data->handle;
- struct hns3_nic_priv *priv = handle->priv;
- ssize_t size = 0;
- char **save_buf;
- char *read_buf;
- u32 index;
- int ret;
-
- ret = hns3_dbg_get_cmd_index(dbg_data, &index);
- if (ret)
- return ret;
-
- mutex_lock(&handle->dbgfs_lock);
- save_buf = &handle->dbgfs_buf[index];
+ struct hns3_dbg_data *data = s->private;
+ struct hnae3_handle *h = data->handle;
+ struct hns3_nic_priv *priv = h->priv;
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
- test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) {
- ret = -EBUSY;
- goto out;
- }
-
- if (*save_buf) {
- read_buf = *save_buf;
- } else {
- read_buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL);
- if (!read_buf) {
- ret = -ENOMEM;
- goto out;
- }
-
- /* save the buffer addr until the last read operation */
- *save_buf = read_buf;
- }
-
- /* get data ready for the first time to read */
- if (!*ppos) {
- ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
- read_buf, hns3_dbg_cmd[index].buf_len);
- if (ret)
- goto out;
- }
-
- size = simple_read_from_buffer(buffer, count, ppos, read_buf,
- strlen(read_buf));
- if (size > 0) {
- mutex_unlock(&handle->dbgfs_lock);
- return size;
- }
+ test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return -EBUSY;
-out:
- /* free the buffer for the last read operation */
- if (*save_buf) {
- kvfree(*save_buf);
- *save_buf = NULL;
- }
+ if (data->cmd == HNAE3_DBG_CMD_TX_BD)
+ return hns3_dbg_tx_bd_info(s, private);
+ else if (data->cmd == HNAE3_DBG_CMD_RX_BD)
+ return hns3_dbg_rx_bd_info(s, private);
- mutex_unlock(&handle->dbgfs_lock);
- return ret;
+ return -EOPNOTSUPP;
}
-
-static const struct file_operations hns3_dbg_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = hns3_dbg_read,
-};
+DEFINE_SHOW_ATTRIBUTE(hns3_dbg_bd_info);
static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd)
{
- struct dentry *entry_dir;
struct hns3_dbg_data *data;
+ struct dentry *entry_dir;
u16 max_queue_num;
unsigned int i;
entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry;
max_queue_num = hns3_get_max_available_channels(handle);
- data = devm_kzalloc(&handle->pdev->dev, max_queue_num * sizeof(*data),
+ data = devm_kcalloc(&handle->pdev->dev, max_queue_num, sizeof(*data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -1349,45 +856,77 @@ static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd)
data[i].qid = i;
sprintf(name, "%s%u", hns3_dbg_cmd[cmd].name, i);
debugfs_create_file(name, 0400, entry_dir, &data[i],
- &hns3_dbg_fops);
+ &hns3_dbg_bd_info_fops);
}
return 0;
}
-static int
-hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd)
+static int hns3_dbg_common_init_t1(struct hnae3_handle *handle, u32 cmd)
{
- struct hns3_dbg_data *data;
+ struct device *dev = &handle->pdev->dev;
struct dentry *entry_dir;
+ read_func func = NULL;
+
+ switch (hns3_dbg_cmd[cmd].cmd) {
+ case HNAE3_DBG_CMD_TX_QUEUE_INFO:
+ func = hns3_dbg_tx_queue_info;
+ break;
+ case HNAE3_DBG_CMD_RX_QUEUE_INFO:
+ func = hns3_dbg_rx_queue_info;
+ break;
+ case HNAE3_DBG_CMD_QUEUE_MAP:
+ func = hns3_dbg_queue_map;
+ break;
+ case HNAE3_DBG_CMD_PAGE_POOL_INFO:
+ func = hns3_dbg_page_pool_info;
+ break;
+ case HNAE3_DBG_CMD_COAL_INFO:
+ func = hns3_dbg_coal_info;
+ break;
+ case HNAE3_DBG_CMD_DEV_INFO:
+ func = hns3_dbg_dev_info;
+ break;
+ default:
+ return -EINVAL;
+ }
- data = devm_kzalloc(&handle->pdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry;
+ debugfs_create_devm_seqfile(dev, hns3_dbg_cmd[cmd].name, entry_dir,
+ func);
+
+ return 0;
+}
+
+static int hns3_dbg_common_init_t2(struct hnae3_handle *handle, u32 cmd)
+{
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
+ struct device *dev = &handle->pdev->dev;
+ struct dentry *entry_dir;
+ read_func func;
+ int ret;
+
+ if (!ops->dbg_get_read_func)
+ return 0;
+
+ ret = ops->dbg_get_read_func(handle, hns3_dbg_cmd[cmd].cmd, &func);
+ if (ret)
+ return ret;
- data->handle = handle;
- data->cmd = hns3_dbg_cmd[cmd].cmd;
entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry;
- debugfs_create_file(hns3_dbg_cmd[cmd].name, 0400, entry_dir,
- data, &hns3_dbg_fops);
+ debugfs_create_devm_seqfile(dev, hns3_dbg_cmd[cmd].name, entry_dir,
+ func);
return 0;
}
int hns3_dbg_init(struct hnae3_handle *handle)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
const char *name = pci_name(handle->pdev);
int ret;
u32 i;
- handle->dbgfs_buf = devm_kcalloc(&handle->pdev->dev,
- ARRAY_SIZE(hns3_dbg_cmd),
- sizeof(*handle->dbgfs_buf),
- GFP_KERNEL);
- if (!handle->dbgfs_buf)
- return -ENOMEM;
-
hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry =
debugfs_create_dir(name, hns3_dbgfs_root);
handle->hnae3_dbgfs = hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry;
@@ -1397,8 +936,6 @@ int hns3_dbg_init(struct hnae3_handle *handle)
debugfs_create_dir(hns3_dbg_dentry[i].name,
handle->hnae3_dbgfs);
- mutex_init(&handle->dbgfs_lock);
-
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
if ((hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_TM_NODES &&
ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) ||
@@ -1427,24 +964,13 @@ int hns3_dbg_init(struct hnae3_handle *handle)
out:
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
- mutex_destroy(&handle->dbgfs_lock);
return ret;
}
void hns3_dbg_uninit(struct hnae3_handle *handle)
{
- u32 i;
-
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
-
- for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++)
- if (handle->dbgfs_buf[i]) {
- kvfree(handle->dbgfs_buf[i]);
- handle->dbgfs_buf[i] = NULL;
- }
-
- mutex_destroy(&handle->dbgfs_lock);
}
void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
index 4a5ef8a90a10..57c9d3fc1b27 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
@@ -6,15 +6,6 @@
#include "hnae3.h"
-#define HNS3_DBG_READ_LEN 65536
-#define HNS3_DBG_READ_LEN_128KB 0x20000
-#define HNS3_DBG_READ_LEN_1MB 0x100000
-#define HNS3_DBG_READ_LEN_4MB 0x400000
-#define HNS3_DBG_READ_LEN_5MB 0x500000
-#define HNS3_DBG_WRITE_LEN 1024
-
-#define HNS3_DBG_DATA_STR_LEN 32
-#define HNS3_DBG_INFO_LEN 256
#define HNS3_DBG_ITEM_NAME_LEN 32
#define HNS3_DBG_FILE_NAME_LEN 16
@@ -49,16 +40,9 @@ struct hns3_dbg_cmd_info {
const char *name;
enum hnae3_dbg_cmd cmd;
enum hns3_dbg_dentry_type dentry;
- u32 buf_len;
int (*init)(struct hnae3_handle *handle, unsigned int cmd);
};
-struct hns3_dbg_func {
- enum hnae3_dbg_cmd cmd;
- int (*dbg_dump)(struct hnae3_handle *handle, char *buf, int len);
- int (*dbg_dump_bd)(struct hns3_dbg_data *data, char *buf, int len);
-};
-
struct hns3_dbg_cap_info {
const char *name;
enum HNAE3_DEV_CAP_BITS cap_bit;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index fbfd3ee5648f..7a0654e2d3dd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -381,24 +381,6 @@ static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = {
#define HNS3_INVALID_PTYPE \
ARRAY_SIZE(hns3_rx_ptype_tbl)
-static void hns3_dma_map_sync(struct device *dev, unsigned long iova)
-{
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct iommu_iotlb_gather iotlb_gather;
- size_t granule;
-
- if (!domain || !iommu_is_dma_domain(domain))
- return;
-
- granule = 1 << __ffs(domain->pgsize_bitmap);
- iova = ALIGN_DOWN(iova, granule);
- iotlb_gather.start = iova;
- iotlb_gather.end = iova + granule - 1;
- iotlb_gather.pgsize = granule;
-
- iommu_iotlb_sync(domain, &iotlb_gather);
-}
-
static irqreturn_t hns3_irq_handle(int irq, void *vector)
{
struct hns3_enet_tqp_vector *tqp_vector = vector;
@@ -492,20 +474,14 @@ static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
writel(mask_en, tqp_vector->mask_addr);
}
-static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
+static void hns3_irq_enable(struct hns3_enet_tqp_vector *tqp_vector)
{
napi_enable(&tqp_vector->napi);
enable_irq(tqp_vector->vector_irq);
-
- /* enable vector */
- hns3_mask_vector_irq(tqp_vector, 1);
}
-static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
+static void hns3_irq_disable(struct hns3_enet_tqp_vector *tqp_vector)
{
- /* disable vector */
- hns3_mask_vector_irq(tqp_vector, 0);
-
disable_irq(tqp_vector->vector_irq);
napi_disable(&tqp_vector->napi);
cancel_work_sync(&tqp_vector->rx_group.dim.work);
@@ -572,9 +548,9 @@ void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector,
static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
struct hns3_nic_priv *priv)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal;
struct hns3_enet_coalesce *prx_coal = &priv->rx_coal;
@@ -726,11 +702,42 @@ static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
return 0;
}
+static void hns3_enable_irqs_and_tqps(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u16 i;
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_irq_enable(&priv->tqp_vector[i]);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_mask_vector_irq(&priv->tqp_vector[i], 1);
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_enable(h->kinfo.tqp[i]);
+}
+
+static void hns3_disable_irqs_and_tqps(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u16 i;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_disable(h->kinfo.tqp[i]);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_mask_vector_irq(&priv->tqp_vector[i], 0);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_irq_disable(&priv->tqp_vector[i]);
+}
+
static int hns3_nic_net_up(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
- int i, j;
int ret;
ret = hns3_nic_reset_all_ring(h);
@@ -739,23 +746,13 @@ static int hns3_nic_net_up(struct net_device *netdev)
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
- /* enable the vectors */
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_enable(&priv->tqp_vector[i]);
-
- /* enable rcb */
- for (j = 0; j < h->kinfo.num_tqps; j++)
- hns3_tqp_enable(h->kinfo.tqp[j]);
+ hns3_enable_irqs_and_tqps(netdev);
/* start the ae_dev */
ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
if (ret) {
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
- while (j--)
- hns3_tqp_disable(h->kinfo.tqp[j]);
-
- for (j = i - 1; j >= 0; j--)
- hns3_vector_disable(&priv->tqp_vector[j]);
+ hns3_disable_irqs_and_tqps(netdev);
}
return ret;
@@ -842,17 +839,9 @@ static void hns3_reset_tx_queue(struct hnae3_handle *h)
static void hns3_nic_net_down(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops;
- int i;
-
- /* disable vectors */
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_disable(&priv->tqp_vector[i]);
- /* disable rcb */
- for (i = 0; i < h->kinfo.num_tqps; i++)
- hns3_tqp_disable(h->kinfo.tqp[i]);
+ hns3_disable_irqs_and_tqps(netdev);
/* stop ae_dev */
ops = priv->ae_handle->ae_algo->ops;
@@ -972,7 +961,7 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev)
void hns3_request_update_promisc_mode(struct hnae3_handle *handle)
{
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
if (ops->request_update_promisc_mode)
ops->request_update_promisc_mode(handle);
@@ -1319,7 +1308,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
{
struct hns3_nic_priv *priv = netdev_priv(skb->dev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
union l4_hdr_info l4;
/* device version above V3(include V3), the hardware can
@@ -1519,7 +1508,7 @@ static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
* VLAN enabled, only one VLAN header is allowed in skb, otherwise it
* will cause RAS error.
*/
- ae_dev = pci_get_drvdata(handle->pdev);
+ ae_dev = hns3_get_ae_dev(handle);
if (unlikely(skb_vlan_tagged_multi(skb) &&
ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
handle->port_base_vlan_state ==
@@ -1705,8 +1694,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, dma_addr_t dma,
#define HNS3_LIKELY_BD_NUM 1
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
- unsigned int frag_buf_num;
- int k, sizeoflast;
+ unsigned int frag_buf_num, k;
+ int sizeoflast;
if (likely(size <= HNS3_MAX_BD_SIZE)) {
desc->addr = cpu_to_le64(dma);
@@ -1746,9 +1735,7 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
unsigned int type)
{
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
- struct hnae3_handle *handle = ring->tqp->handle;
struct device *dev = ring_to_dev(ring);
- struct hnae3_ae_dev *ae_dev;
unsigned int size;
dma_addr_t dma;
@@ -1780,13 +1767,6 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
return -ENOMEM;
}
- /* Add a SYNC command to sync io-pgtale to avoid errors in pgtable
- * prefetch
- */
- ae_dev = hns3_get_ae_dev(handle);
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
- hns3_dma_map_sync(dev, dma);
-
desc_cb->priv = priv;
desc_cb->length = size;
desc_cb->dma = dma;
@@ -1887,7 +1867,7 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
unsigned int bd_num, u8 max_non_tso_bd_num)
{
unsigned int tot_len = 0;
- int i;
+ unsigned int i;
for (i = 0; i < max_non_tso_bd_num - 1U; i++)
tot_len += bd_size[i];
@@ -1915,7 +1895,7 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
{
- int i;
+ u32 i;
for (i = 0; i < MAX_SKB_FRAGS; i++)
size[i] = skb_frag_size(&shinfo->frags[i]);
@@ -2130,7 +2110,7 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
*/
if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num &&
!ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) {
- /* This smp_store_release() pairs with smp_load_aquire() in
+ /* This smp_store_release() pairs with smp_load_acquire() in
* hns3_nic_reclaim_desc(). Ensure that the BD valid bit
* is updated.
*/
@@ -2146,7 +2126,7 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
return;
}
- /* This smp_store_release() pairs with smp_load_aquire() in
+ /* This smp_store_release() pairs with smp_load_acquire() in
* hns3_nic_reclaim_desc(). Ensure that the BD valid bit is updated.
*/
smp_store_release(&ring->last_to_use, ring->next_to_use);
@@ -2231,9 +2211,9 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
u32 nfrag = skb_shinfo(skb)->nr_frags + 1;
struct sg_table *sgt;
- int i, bd_num = 0;
+ int bd_num = 0;
dma_addr_t dma;
- u32 cb_len;
+ u32 cb_len, i;
int nents;
if (skb_has_frag_list(skb))
@@ -2439,6 +2419,35 @@ static int hns3_nic_do_ioctl(struct net_device *netdev,
return h->ae_algo->ops->do_ioctl(h, ifr, cmd);
}
+static int hns3_nic_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ if (!h->ae_algo->ops->hwtstamp_get)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->hwtstamp_get(h, config);
+}
+
+static int hns3_nic_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ if (!h->ae_algo->ops->hwtstamp_set)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->hwtstamp_set(h, config, extack);
+}
+
static int hns3_nic_set_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -2471,7 +2480,7 @@ static int hns3_nic_set_features(struct net_device *netdev,
if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
h->ae_algo->ops->cls_flower_active(h)) {
netdev_err(netdev,
- "there are offloaded TC filters active, cannot disable HW TC offload");
+ "there are offloaded TC filters active, cannot disable HW TC offload\n");
return -EINVAL;
}
@@ -2568,7 +2577,7 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
struct hnae3_handle *handle = priv->ae_handle;
struct rtnl_link_stats64 ring_total_stats;
struct hns3_enet_ring *ring;
- unsigned int idx;
+ int idx;
if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
return;
@@ -2794,7 +2803,7 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
static int hns3_get_timeout_queue(struct net_device *ndev)
{
- int i;
+ unsigned int i;
/* Find the stopped queue the same way the stack does */
for (i = 0; i < ndev->num_tx_queues; i++) {
@@ -2875,7 +2884,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = hns3_get_handle(ndev);
struct hns3_enet_ring *tx_ring;
- int timeout_queue;
+ u32 timeout_queue;
timeout_queue = hns3_get_timeout_queue(ndev);
if (timeout_queue >= ndev->num_tx_queues) {
@@ -3068,6 +3077,8 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_set_vf_rate = hns3_nic_set_vf_rate,
.ndo_set_vf_mac = hns3_nic_set_vf_mac,
.ndo_select_queue = hns3_nic_select_queue,
+ .ndo_hwtstamp_get = hns3_nic_hwtstamp_get,
+ .ndo_hwtstamp_set = hns3_nic_hwtstamp_set,
};
bool hns3_is_phys_func(struct pci_dev *pdev)
@@ -3845,7 +3856,7 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
{
__be16 type = skb->protocol;
struct tcphdr *th;
- int depth = 0;
+ u32 depth = 0;
while (eth_type_vlan(type)) {
struct vlan_hdr *vh;
@@ -4771,7 +4782,7 @@ map_ring_fail:
static void hns3_nic_init_coal_cfg(struct hns3_nic_priv *priv)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
@@ -5275,7 +5286,7 @@ static void hns3_info_show(struct hns3_nic_priv *priv)
static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv,
enum dim_cq_period_mode mode, bool is_tx)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
struct hnae3_handle *handle = priv->ae_handle;
int i;
@@ -5313,7 +5324,7 @@ void hns3_cq_period_mode_init(struct hns3_nic_priv *priv,
static void hns3_state_init(struct hnae3_handle *handle)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -5348,6 +5359,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
struct net_device *netdev;
int ret;
+ ae_dev->handle = handle;
+
handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
&max_rss_size);
netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
@@ -5922,8 +5935,6 @@ int hns3_set_channels(struct net_device *netdev,
void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
- struct hnae3_handle *h = priv->ae_handle;
- int i;
if (!if_running)
return;
@@ -5934,11 +5945,7 @@ void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
netif_carrier_off(ndev);
netif_tx_disable(ndev);
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_disable(&priv->tqp_vector[i]);
-
- for (i = 0; i < h->kinfo.num_tqps; i++)
- hns3_tqp_disable(h->kinfo.tqp[i]);
+ hns3_disable_irqs_and_tqps(ndev);
/* delay ring buffer clearing to hns3_reset_notify_uninit_enet
* during reset process, because driver may not be able
@@ -5954,7 +5961,6 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
- int i;
if (!if_running)
return;
@@ -5970,11 +5976,7 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_enable(&priv->tqp_vector[i]);
-
- for (i = 0; i < h->kinfo.num_tqps; i++)
- hns3_tqp_enable(h->kinfo.tqp[i]);
+ hns3_enable_irqs_and_tqps(ndev);
netif_tx_wake_all_queues(ndev);
@@ -5996,7 +5998,7 @@ static const struct hns3_hw_error_info hns3_hw_err[] = {
static void hns3_process_hw_error(struct hnae3_handle *handle,
enum hnae3_hw_error_type type)
{
- int i;
+ u32 i;
for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) {
if (hns3_hw_err[i].type == type) {
@@ -6023,8 +6025,8 @@ static int __init hns3_init_module(void)
{
int ret;
- pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
- pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
+ pr_debug("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
+ pr_debug("%s: %s\n", hns3_driver_name, hns3_copyright);
client.type = HNAE3_CLIENT_KNIC;
snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s",
@@ -6060,9 +6062,11 @@ module_init(hns3_init_module);
*/
static void __exit hns3_exit_module(void)
{
+ hnae3_acquire_unload_lock();
pci_unregister_driver(&hns3_driver);
hnae3_unregister_client(&client);
hns3_dbg_unregister_debugfs();
+ hnae3_release_unload_lock();
}
module_exit(hns3_exit_module);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index caf7a4df8585..933e3527ed82 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -623,7 +623,7 @@ struct hns3_reset_type_map {
enum hnae3_reset_type rst_type;
};
-static inline int ring_space(struct hns3_enet_ring *ring)
+static inline u32 ring_space(struct hns3_enet_ring *ring)
{
/* This smp_load_acquire() pairs with smp_store_release() in
* hns3_nic_reclaim_one_desc called by hns3_clean_tx_ring.
@@ -694,7 +694,7 @@ static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
/* iterator for handling rings in ring group */
#define hns3_for_each_ring(pos, head) \
- for (pos = (head).ring; (pos); pos = (pos)->next)
+ for ((pos) = (head).ring; (pos); (pos) = (pos)->next)
#define hns3_get_handle(ndev) \
(((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 97eaeec1952b..a5eefa28454c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -3,6 +3,7 @@
#include <linux/etherdevice.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/phy.h>
#include <linux/sfp.h>
@@ -85,7 +86,7 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset);
static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
int ret;
if (!h->ae_algo->ops->set_loopback ||
@@ -170,7 +171,7 @@ static void hns3_lp_setup_skb(struct sk_buff *skb)
* the purpose of mac or serdes selftest.
*/
handle = hns3_get_handle(ndev);
- ae_dev = pci_get_drvdata(handle->pdev);
+ ae_dev = hns3_get_ae_dev(handle);
if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
ethh->h_dest[5] += HNS3_NIC_LB_DST_MAC_ADDR;
eth_zero_addr(ethh->h_source);
@@ -435,7 +436,7 @@ static void hns3_self_test(struct net_device *ndev,
data[i] = HNS3_NIC_LB_TEST_UNEXECUTED;
if (hns3_nic_resetting(ndev)) {
- netdev_err(ndev, "dev resetting!");
+ netdev_err(ndev, "dev resetting!\n");
goto failure;
}
@@ -488,7 +489,7 @@ static const struct hns3_pflag_desc hns3_priv_flags[HNAE3_PFLAG_MAX] = {
static int hns3_get_sset_count(struct net_device *netdev, int stringset)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- const struct hnae3_ae_ops *ops = h->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(h);
if (!ops->get_sset_count)
return -EOPNOTSUPP;
@@ -509,73 +510,53 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset)
}
}
-static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
- u32 stat_count, u32 num_tqps, const char *prefix)
+static void hns3_update_strings(u8 **data, const struct hns3_stats *stats,
+ u32 stat_count, u32 num_tqps,
+ const char *prefix)
{
-#define MAX_PREFIX_SIZE (6 + 4)
- u32 size_left;
u32 i, j;
- u32 n1;
- for (i = 0; i < num_tqps; i++) {
- for (j = 0; j < stat_count; j++) {
- data[ETH_GSTRING_LEN - 1] = '\0';
-
- /* first, prepend the prefix string */
- n1 = scnprintf(data, MAX_PREFIX_SIZE, "%s%u_",
- prefix, i);
- size_left = (ETH_GSTRING_LEN - 1) - n1;
-
- /* now, concatenate the stats string to it */
- strncat(data, stats[j].stats_string, size_left);
- data += ETH_GSTRING_LEN;
- }
- }
-
- return data;
+ for (i = 0; i < num_tqps; i++)
+ for (j = 0; j < stat_count; j++)
+ ethtool_sprintf(data, "%s%u_%s", prefix, i,
+ stats[j].stats_string);
}
-static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data)
+static void hns3_get_strings_tqps(struct hnae3_handle *handle, u8 **data)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
const char tx_prefix[] = "txq";
const char rx_prefix[] = "rxq";
/* get strings for Tx */
- data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT,
- kinfo->num_tqps, tx_prefix);
+ hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT,
+ kinfo->num_tqps, tx_prefix);
/* get strings for Rx */
- data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT,
- kinfo->num_tqps, rx_prefix);
-
- return data;
+ hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT,
+ kinfo->num_tqps, rx_prefix);
}
static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- const struct hnae3_ae_ops *ops = h->ae_algo->ops;
- char *buff = (char *)data;
- int i;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(h);
+ u32 i;
if (!ops->get_strings)
return;
switch (stringset) {
case ETH_SS_STATS:
- buff = hns3_get_strings_tqps(h, buff);
- ops->get_strings(h, stringset, (u8 *)buff);
+ hns3_get_strings_tqps(h, &data);
+ ops->get_strings(h, stringset, &data);
break;
case ETH_SS_TEST:
- ops->get_strings(h, stringset, data);
+ ops->get_strings(h, stringset, &data);
break;
case ETH_SS_PRIV_FLAGS:
- for (i = 0; i < HNS3_PRIV_FLAGS_LEN; i++) {
- snprintf(buff, ETH_GSTRING_LEN, "%s",
- hns3_priv_flags[i].name);
- buff += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < HNS3_PRIV_FLAGS_LEN; i++)
+ ethtool_puts(&data, hns3_priv_flags[i].name);
break;
default:
break;
@@ -588,7 +569,7 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
struct hns3_nic_priv *nic_priv = handle->priv;
struct hns3_enet_ring *ring;
u8 *stat;
- int i, j;
+ u32 i, j;
/* get stats for Tx */
for (i = 0; i < kinfo->num_tqps; i++) {
@@ -711,7 +692,7 @@ static void hns3_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *param)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
if (!test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps))
return;
@@ -725,7 +706,7 @@ static int hns3_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *param)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
if (!test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps))
return -EOPNOTSUPP;
@@ -744,7 +725,7 @@ static int hns3_set_pauseparam(struct net_device *netdev,
static void hns3_get_ksettings(struct hnae3_handle *h,
struct ethtool_link_ksettings *cmd)
{
- const struct hnae3_ae_ops *ops = h->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(h);
/* 1.auto_neg & speed & duplex from cmd */
if (ops->get_ksettings_an_result)
@@ -770,7 +751,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
const struct hnae3_ae_ops *ops;
u8 module_type;
u8 media_type;
@@ -813,7 +794,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
break;
default:
- netdev_warn(netdev, "Unknown media type");
+ netdev_warn(netdev, "Unknown media type\n");
return 0;
}
@@ -833,7 +814,7 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
u8 module_type = HNAE3_MODULE_TYPE_UNKNOWN;
u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
u32 lane_num;
@@ -861,7 +842,7 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
if (cmd->base.duplex == DUPLEX_HALF &&
media_type != HNAE3_MEDIA_TYPE_COPPER) {
netdev_err(netdev,
- "only copper port supports half duplex!");
+ "only copper port supports half duplex!\n");
return -EINVAL;
}
@@ -880,8 +861,8 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
int ret;
/* Chip don't support this mode. */
@@ -951,7 +932,7 @@ static u32 hns3_get_rss_key_size(struct net_device *netdev)
static u32 hns3_get_rss_indir_size(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
return ae_dev->dev_specs.rss_ind_tbl_size;
}
@@ -973,7 +954,7 @@ static int hns3_set_rss(struct net_device *netdev,
struct netlink_ext_ack *extack)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
if (!h->ae_algo->ops->set_rss)
return -EOPNOTSUPP;
@@ -997,6 +978,16 @@ static int hns3_set_rss(struct net_device *netdev,
rxfh->hfunc);
}
+static int hns3_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->get_rss_tuple)
+ return h->ae_algo->ops->get_rss_tuple(h, cmd);
+ return -EOPNOTSUPP;
+}
+
static int hns3_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd,
u32 *rule_locs)
@@ -1007,10 +998,6 @@ static int hns3_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXRINGS:
cmd->data = h->kinfo.num_tqps;
return 0;
- case ETHTOOL_GRXFH:
- if (h->ae_algo->ops->get_rss_tuple)
- return h->ae_algo->ops->get_rss_tuple(h, cmd);
- return -EOPNOTSUPP;
case ETHTOOL_GRXCLSRLCNT:
if (h->ae_algo->ops->get_fd_rule_cnt)
return h->ae_algo->ops->get_fd_rule_cnt(h, cmd);
@@ -1043,8 +1030,8 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags)
{
enum hnae3_reset_type rst_type = HNAE3_NONE_RESET;
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- const struct hnae3_ae_ops *ops = h->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(h);
const struct hns3_reset_type_map *rst_type_map;
enum ethtool_reset_flags rst_flags;
u32 i, size;
@@ -1208,7 +1195,7 @@ static int hns3_set_tx_push(struct net_device *netdev, u32 tx_push)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
u32 old_state = test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
if (!test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps) && tx_push)
@@ -1218,7 +1205,7 @@ static int hns3_set_tx_push(struct net_device *netdev, u32 tx_push)
return 0;
netdev_dbg(netdev, "Changing tx push from %s to %s\n",
- old_state ? "on" : "off", tx_push ? "on" : "off");
+ str_on_off(old_state), str_on_off(tx_push));
if (tx_push)
set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
@@ -1294,15 +1281,22 @@ static int hns3_set_ringparam(struct net_device *ndev,
return ret;
}
+static int hns3_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->set_rss_tuple)
+ return h->ae_algo->ops->set_rss_tuple(h, cmd);
+ return -EOPNOTSUPP;
+}
+
static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- if (h->ae_algo->ops->set_rss_tuple)
- return h->ae_algo->ops->set_rss_tuple(h, cmd);
- return -EOPNOTSUPP;
case ETHTOOL_SRXCLSRLINS:
if (h->ae_algo->ops->add_fd_entry)
return h->ae_algo->ops->add_fd_entry(h, cmd);
@@ -1319,7 +1313,7 @@ static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
static int hns3_nway_reset(struct net_device *netdev)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
struct phy_device *phy = netdev->phydev;
int autoneg;
@@ -1327,7 +1321,7 @@ static int hns3_nway_reset(struct net_device *netdev)
return 0;
if (hns3_nic_resetting(netdev)) {
- netdev_err(netdev, "dev resetting!");
+ netdev_err(netdev, "dev resetting!\n");
return -EBUSY;
}
@@ -1396,7 +1390,7 @@ static int hns3_check_gl_coalesce_para(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
u32 rx_gl, tx_gl;
if (cmd->rx_coalesce_usecs > ae_dev->dev_specs.max_int_gl) {
@@ -1468,7 +1462,7 @@ static int hns3_check_ql_coalesce_param(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
if ((cmd->tx_max_coalesced_frames || cmd->rx_max_coalesced_frames) &&
!ae_dev->dev_specs.int_ql_max) {
@@ -1492,7 +1486,7 @@ hns3_check_cqe_coalesce_param(struct net_device *netdev,
struct kernel_ethtool_coalesce *kernel_coal)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
if ((kernel_coal->use_cqe_mode_tx || kernel_coal->use_cqe_mode_rx) &&
!hnae3_ae_dev_cq_supported(ae_dev)) {
@@ -1665,11 +1659,12 @@ static void hns3_set_msglevel(struct net_device *netdev, u32 msg_level)
}
static void hns3_get_fec_stats(struct net_device *netdev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
if (!hnae3_ae_dev_fec_stats_supported(ae_dev) || !ops->get_fec_stats)
return;
@@ -1719,8 +1714,8 @@ static int hns3_get_fecparam(struct net_device *netdev,
struct ethtool_fecparam *fec)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
u8 fec_ability;
u8 fec_mode;
@@ -1744,8 +1739,8 @@ static int hns3_set_fecparam(struct net_device *netdev,
struct ethtool_fecparam *fec)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
u32 fec_mode;
if (!test_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps))
@@ -1766,8 +1761,8 @@ static int hns3_get_module_info(struct net_device *netdev,
#define HNS3_SFF_8636_V1_3 0x03
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
struct hns3_sfp_type sfp_type;
int ret;
@@ -1816,8 +1811,8 @@ static int hns3_get_module_eeprom(struct net_device *netdev,
struct ethtool_eeprom *ee, u8 *data)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 ||
!ops->get_module_eeprom)
@@ -1968,7 +1963,7 @@ static int hns3_set_tunable(struct net_device *netdev,
int i, ret = 0;
if (hns3_nic_resetting(netdev) || !priv->ring) {
- netdev_err(netdev, "failed to set tunable value, dev resetting!");
+ netdev_err(netdev, "failed to set tunable value, dev resetting!\n");
return -EBUSY;
}
@@ -2157,6 +2152,8 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_rxfh_indir_size = hns3_get_rss_indir_size,
.get_rxfh = hns3_get_rss,
.set_rxfh = hns3_set_rss,
+ .get_rxfh_fields = hns3_get_rxfh_fields,
+ .set_rxfh_fields = hns3_set_rxfh_fields,
.get_link_ksettings = hns3_get_link_ksettings,
.get_channels = hns3_get_channels,
.set_channels = hns3_set_channels,
@@ -2194,6 +2191,8 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.get_rxfh_indir_size = hns3_get_rss_indir_size,
.get_rxfh = hns3_get_rss,
.set_rxfh = hns3_set_rss,
+ .get_rxfh_fields = hns3_get_rxfh_fields,
+ .set_rxfh_fields = hns3_set_rxfh_fields,
.get_link_ksettings = hns3_get_link_ksettings,
.set_link_ksettings = hns3_set_link_ksettings,
.nway_reset = hns3_nway_reset,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index debf143e9940..b76d25074e99 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -3,6 +3,7 @@
#include <linux/device.h>
#include <linux/sched/clock.h>
+#include <linux/string_choices.h>
#include "hclge_debugfs.h"
#include "hclge_err.h"
@@ -11,7 +12,9 @@
#include "hclge_tm.h"
#include "hnae3.h"
-static const char * const state_str[] = { "off", "on" };
+#define hclge_seq_file_to_hdev(s) \
+ (((struct hnae3_ae_dev *)hnae3_seq_file_to_ae_dev(s))->priv)
+
static const char * const hclge_mac_state_str[] = {
"TO_ADD", "TO_DEL", "ACTIVE"
};
@@ -721,48 +724,6 @@ static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
.cmd = HCLGE_OPC_DFX_TQP_REG } },
};
-/* make sure: len(name) + interval >= maxlen(item data) + 2,
- * for example, name = "pkt_num"(len: 7), the prototype of item data is u32,
- * and print as "%u"(maxlen: 10), so the interval should be at least 5.
- */
-static void hclge_dbg_fill_content(char *content, u16 len,
- const struct hclge_dbg_item *items,
- const char **result, u16 size)
-{
-#define HCLGE_DBG_LINE_END_LEN 2
- char *pos = content;
- u16 item_len;
- u16 i;
-
- if (!len) {
- return;
- } else if (len <= HCLGE_DBG_LINE_END_LEN) {
- *pos++ = '\0';
- return;
- }
-
- memset(content, ' ', len);
- len -= HCLGE_DBG_LINE_END_LEN;
-
- for (i = 0; i < size; i++) {
- item_len = strlen(items[i].name) + items[i].interval;
- if (len < item_len)
- break;
-
- if (result) {
- if (item_len < strlen(result[i]))
- break;
- memcpy(pos, result[i], strlen(result[i]));
- } else {
- memcpy(pos, items[i].name, strlen(items[i].name));
- }
- pos += item_len;
- len -= item_len;
- }
- *pos++ = '\n';
- *pos++ = '\0';
-}
-
static char *hclge_dbg_get_func_id_str(char *buf, u8 id)
{
if (id)
@@ -826,14 +787,14 @@ int hclge_dbg_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc_src,
static int
hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
const struct hclge_dbg_reg_type_info *reg_info,
- char *buf, int len, int *pos)
+ struct seq_file *s)
{
const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
+ u32 index, entry, i, cnt, min_num;
struct hclge_desc *desc_src;
- u32 index, entry, i, cnt;
- int bd_num, min_num, ret;
struct hclge_desc *desc;
+ int bd_num, ret;
ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
if (ret)
@@ -846,13 +807,12 @@ hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
for (i = 0, cnt = 0; i < min_num; i++, dfx_message++)
- *pos += scnprintf(buf + *pos, len - *pos, "item%u = %s\n",
- cnt++, dfx_message->message);
+ seq_printf(s, "item%u = %s\n", cnt++, dfx_message->message);
for (i = 0; i < cnt; i++)
- *pos += scnprintf(buf + *pos, len - *pos, "item%u\t", i);
+ seq_printf(s, "item%u\t", i);
- *pos += scnprintf(buf + *pos, len - *pos, "\n");
+ seq_puts(s, "\n");
for (index = 0; index < hdev->vport[0].alloc_tqps; index++) {
dfx_message = reg_info->dfx_msg;
@@ -867,10 +827,9 @@ hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
if (i > 0 && !entry)
desc++;
- *pos += scnprintf(buf + *pos, len - *pos, "%#x\t",
- le32_to_cpu(desc->data[entry]));
+ seq_printf(s, "%#x\t", le32_to_cpu(desc->data[entry]));
}
- *pos += scnprintf(buf + *pos, len - *pos, "\n");
+ seq_puts(s, "\n");
}
kfree(desc_src);
@@ -880,14 +839,14 @@ hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
static int
hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
const struct hclge_dbg_reg_type_info *reg_info,
- char *buf, int len, int *pos)
+ struct seq_file *s)
{
const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
struct hclge_desc *desc_src;
- int bd_num, min_num, ret;
+ int bd_num, min_num, ret, i;
struct hclge_desc *desc;
- u32 entry, i;
+ u32 entry;
ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
if (ret)
@@ -914,9 +873,8 @@ hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
if (!dfx_message->flag)
continue;
- *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
- dfx_message->message,
- le32_to_cpu(desc->data[entry]));
+ seq_printf(s, "%s: %#x\n", dfx_message->message,
+ le32_to_cpu(desc->data[entry]));
}
kfree(desc_src);
@@ -940,8 +898,8 @@ static const struct hclge_dbg_status_dfx_info hclge_dbg_mac_en_status[] = {
{HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, "mac_tx_oversize_truncate_en"}
};
-static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
- int len, int *pos)
+static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_config_mac_mode_cmd *req;
struct hclge_desc desc;
@@ -962,16 +920,15 @@ static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
for (i = 0; i < ARRAY_SIZE(hclge_dbg_mac_en_status); i++) {
offset = hclge_dbg_mac_en_status[i].offset;
- *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
- hclge_dbg_mac_en_status[i].message,
- hnae3_get_bit(loop_en, offset));
+ seq_printf(s, "%s: %#x\n", hclge_dbg_mac_en_status[i].message,
+ hnae3_get_bit(loop_en, offset));
}
return 0;
}
-static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf,
- int len, int *pos)
+static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_config_max_frm_size_cmd *req;
struct hclge_desc desc;
@@ -988,16 +945,14 @@ static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf,
req = (struct hclge_config_max_frm_size_cmd *)desc.data;
- *pos += scnprintf(buf + *pos, len - *pos, "max_frame_size: %u\n",
- le16_to_cpu(req->max_frm_size));
- *pos += scnprintf(buf + *pos, len - *pos, "min_frame_size: %u\n",
- req->min_frm_size);
+ seq_printf(s, "max_frame_size: %u\n", le16_to_cpu(req->max_frm_size));
+ seq_printf(s, "min_frame_size: %u\n", req->min_frm_size);
return 0;
}
-static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf,
- int len, int *pos)
+static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev,
+ struct seq_file *s)
{
#define HCLGE_MAC_SPEED_SHIFT 0
#define HCLGE_MAC_SPEED_MASK GENMASK(5, 0)
@@ -1018,33 +973,31 @@ static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf,
req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
- *pos += scnprintf(buf + *pos, len - *pos, "speed: %#lx\n",
- hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
- HCLGE_MAC_SPEED_SHIFT));
- *pos += scnprintf(buf + *pos, len - *pos, "duplex: %#x\n",
- hnae3_get_bit(req->speed_dup,
- HCLGE_MAC_DUPLEX_SHIFT));
+ seq_printf(s, "speed: %#lx\n",
+ hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
+ HCLGE_MAC_SPEED_SHIFT));
+ seq_printf(s, "duplex: %#x\n",
+ hnae3_get_bit(req->speed_dup, HCLGE_MAC_DUPLEX_SHIFT));
return 0;
}
-static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_mac(struct seq_file *s, void *data)
{
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
int ret;
- ret = hclge_dbg_dump_mac_enable_status(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_mac_enable_status(hdev, s);
if (ret)
return ret;
- ret = hclge_dbg_dump_mac_frame_size(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_mac_frame_size(hdev, s);
if (ret)
return ret;
- return hclge_dbg_dump_mac_speed_duplex(hdev, buf, len, &pos);
+ return hclge_dbg_dump_mac_speed_duplex(hdev, s);
}
-static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
@@ -1055,8 +1008,8 @@ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos,
- "qset_id roce_qset_mask nic_qset_mask qset_shaping_pass qset_bp_status\n");
+ seq_puts(s, "qset_id roce_qset_mask nic_qset_mask ");
+ seq_puts(s, "qset_shaping_pass qset_bp_status\n");
for (qset_id = 0; qset_id < qset_num; qset_id++) {
ret = hclge_dbg_cmd_send(hdev, &desc, qset_id, 1,
HCLGE_OPC_QSET_DFX_STS);
@@ -1065,17 +1018,14 @@ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
- *pos += scnprintf(buf + *pos, len - *pos,
- "%04u %#x %#x %#x %#x\n",
- qset_id, req.bit0, req.bit1, req.bit2,
- req.bit3);
+ seq_printf(s, "%04u %#-16x%#-15x%#-19x%#-x\n",
+ qset_id, req.bit0, req.bit1, req.bit2, req.bit3);
}
return 0;
}
-static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
@@ -1086,8 +1036,7 @@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos,
- "pri_id pri_mask pri_cshaping_pass pri_pshaping_pass\n");
+ seq_puts(s, "pri_id pri_mask pri_cshaping_pass pri_pshaping_pass\n");
for (pri_id = 0; pri_id < pri_num; pri_id++) {
ret = hclge_dbg_cmd_send(hdev, &desc, pri_id, 1,
HCLGE_OPC_PRI_DFX_STS);
@@ -1096,24 +1045,21 @@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
- *pos += scnprintf(buf + *pos, len - *pos,
- "%03u %#x %#x %#x\n",
- pri_id, req.bit0, req.bit1, req.bit2);
+ seq_printf(s, "%03u %#-10x%#-19x%#-x\n",
+ pri_id, req.bit0, req.bit1, req.bit2);
}
return 0;
}
-static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u8 pg_id;
int ret;
- *pos += scnprintf(buf + *pos, len - *pos,
- "pg_id pg_mask pg_cshaping_pass pg_pshaping_pass\n");
+ seq_puts(s, "pg_id pg_mask pg_cshaping_pass pg_pshaping_pass\n");
for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
ret = hclge_dbg_cmd_send(hdev, &desc, pg_id, 1,
HCLGE_OPC_PG_DFX_STS);
@@ -1122,47 +1068,41 @@ static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
- *pos += scnprintf(buf + *pos, len - *pos,
- "%03u %#x %#x %#x\n",
- pg_id, req.bit0, req.bit1, req.bit2);
+ seq_printf(s, "%03u %#-9x%#-18x%#-x\n",
+ pg_id, req.bit0, req.bit1, req.bit2);
}
return 0;
}
-static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_desc desc;
u16 nq_id;
int ret;
- *pos += scnprintf(buf + *pos, len - *pos,
- "nq_id sch_nic_queue_cnt sch_roce_queue_cnt\n");
+ seq_puts(s, "nq_id sch_nic_queue_cnt sch_roce_queue_cnt\n");
for (nq_id = 0; nq_id < hdev->num_tqps; nq_id++) {
ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
HCLGE_OPC_SCH_NQ_CNT);
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos, "%04u %#x",
- nq_id, le32_to_cpu(desc.data[1]));
+ seq_printf(s, "%04u %#-19x",
+ nq_id, le32_to_cpu(desc.data[1]));
ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
HCLGE_OPC_SCH_RQ_CNT);
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos,
- " %#x\n",
- le32_to_cpu(desc.data[1]));
+ seq_printf(s, "%#-x\n", le32_to_cpu(desc.data[1]));
}
return 0;
}
-static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
@@ -1176,16 +1116,13 @@ static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
- *pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
- req.bit0);
- *pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
- req.bit1);
+ seq_printf(s, "port_mask: %#x\n", req.bit0);
+ seq_printf(s, "port_shaping_pass: %#x\n", req.bit1);
return 0;
}
-static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
- int *pos)
+static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_desc desc[2];
u8 port_id = 0;
@@ -1196,32 +1133,23 @@ static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos, "SCH_NIC_NUM: %#x\n",
- le32_to_cpu(desc[0].data[1]));
- *pos += scnprintf(buf + *pos, len - *pos, "SCH_ROCE_NUM: %#x\n",
- le32_to_cpu(desc[0].data[2]));
+ seq_printf(s, "SCH_NIC_NUM: %#x\n", le32_to_cpu(desc[0].data[1]));
+ seq_printf(s, "SCH_ROCE_NUM: %#x\n", le32_to_cpu(desc[0].data[2]));
ret = hclge_dbg_cmd_send(hdev, desc, port_id, 2,
HCLGE_OPC_TM_INTERNAL_STS);
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos, "pri_bp: %#x\n",
- le32_to_cpu(desc[0].data[1]));
- *pos += scnprintf(buf + *pos, len - *pos, "fifo_dfx_info: %#x\n",
- le32_to_cpu(desc[0].data[2]));
- *pos += scnprintf(buf + *pos, len - *pos,
- "sch_roce_fifo_afull_gap: %#x\n",
- le32_to_cpu(desc[0].data[3]));
- *pos += scnprintf(buf + *pos, len - *pos,
- "tx_private_waterline: %#x\n",
- le32_to_cpu(desc[0].data[4]));
- *pos += scnprintf(buf + *pos, len - *pos, "tm_bypass_en: %#x\n",
- le32_to_cpu(desc[0].data[5]));
- *pos += scnprintf(buf + *pos, len - *pos, "SSU_TM_BYPASS_EN: %#x\n",
- le32_to_cpu(desc[1].data[0]));
- *pos += scnprintf(buf + *pos, len - *pos, "SSU_RESERVE_CFG: %#x\n",
- le32_to_cpu(desc[1].data[1]));
+ seq_printf(s, "pri_bp: %#x\n", le32_to_cpu(desc[0].data[1]));
+ seq_printf(s, "fifo_dfx_info: %#x\n", le32_to_cpu(desc[0].data[2]));
+ seq_printf(s, "sch_roce_fifo_afull_gap: %#x\n",
+ le32_to_cpu(desc[0].data[3]));
+ seq_printf(s, "tx_private_waterline: %#x\n",
+ le32_to_cpu(desc[0].data[4]));
+ seq_printf(s, "tm_bypass_en: %#x\n", le32_to_cpu(desc[0].data[5]));
+ seq_printf(s, "SSU_TM_BYPASS_EN: %#x\n", le32_to_cpu(desc[1].data[0]));
+ seq_printf(s, "SSU_RESERVE_CFG: %#x\n", le32_to_cpu(desc[1].data[1]));
if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER)
return 0;
@@ -1231,65 +1159,60 @@ static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- *pos += scnprintf(buf + *pos, len - *pos, "TC_MAP_SEL: %#x\n",
- le32_to_cpu(desc[0].data[1]));
- *pos += scnprintf(buf + *pos, len - *pos, "IGU_PFC_PRI_EN: %#x\n",
- le32_to_cpu(desc[0].data[2]));
- *pos += scnprintf(buf + *pos, len - *pos, "MAC_PFC_PRI_EN: %#x\n",
- le32_to_cpu(desc[0].data[3]));
- *pos += scnprintf(buf + *pos, len - *pos, "IGU_PRI_MAP_TC_CFG: %#x\n",
- le32_to_cpu(desc[0].data[4]));
- *pos += scnprintf(buf + *pos, len - *pos,
- "IGU_TX_PRI_MAP_TC_CFG: %#x\n",
- le32_to_cpu(desc[0].data[5]));
+ seq_printf(s, "TC_MAP_SEL: %#x\n", le32_to_cpu(desc[0].data[1]));
+ seq_printf(s, "IGU_PFC_PRI_EN: %#x\n", le32_to_cpu(desc[0].data[2]));
+ seq_printf(s, "MAC_PFC_PRI_EN: %#x\n", le32_to_cpu(desc[0].data[3]));
+ seq_printf(s, "IGU_PRI_MAP_TC_CFG: %#x\n",
+ le32_to_cpu(desc[0].data[4]));
+ seq_printf(s, "IGU_TX_PRI_MAP_TC_CFG: %#x\n",
+ le32_to_cpu(desc[0].data[5]));
return 0;
}
-static int hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_dcb(struct seq_file *s, void *data)
{
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
int ret;
- ret = hclge_dbg_dump_dcb_qset(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_dcb_qset(hdev, s);
if (ret)
return ret;
- ret = hclge_dbg_dump_dcb_pri(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_dcb_pri(hdev, s);
if (ret)
return ret;
- ret = hclge_dbg_dump_dcb_pg(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_dcb_pg(hdev, s);
if (ret)
return ret;
- ret = hclge_dbg_dump_dcb_queue(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_dcb_queue(hdev, s);
if (ret)
return ret;
- ret = hclge_dbg_dump_dcb_port(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_dcb_port(hdev, s);
if (ret)
return ret;
- return hclge_dbg_dump_dcb_tm(hdev, buf, len, &pos);
+ return hclge_dbg_dump_dcb_tm(hdev, s);
}
-static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev,
- enum hnae3_dbg_cmd cmd, char *buf, int len)
+static int hclge_dbg_dump_reg_cmd(enum hnae3_dbg_cmd cmd, struct seq_file *s)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
const struct hclge_dbg_reg_type_info *reg_info;
- int pos = 0, ret = 0;
- int i;
+ int ret = 0;
+ u32 i;
for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
reg_info = &hclge_dbg_reg_info[i];
if (cmd == reg_info->cmd) {
if (cmd == HNAE3_DBG_CMD_REG_TQP)
- return hclge_dbg_dump_reg_tqp(hdev, reg_info,
- buf, len, &pos);
+ return hclge_dbg_dump_reg_tqp(hdev,
+ reg_info, s);
- ret = hclge_dbg_dump_reg_common(hdev, reg_info, buf,
- len, &pos);
+ ret = hclge_dbg_dump_reg_common(hdev, reg_info, s);
if (ret)
break;
}
@@ -1298,12 +1221,57 @@ static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev,
return ret;
}
-static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_bios_reg_cmd(struct seq_file *s, void *data)
{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_BIOS_COMMON, s);
+}
+
+static int hclge_dbg_dump_ssu_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_SSU, s);
+}
+
+static int hclge_dbg_dump_igu_egu_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_IGU_EGU, s);
+}
+
+static int hclge_dbg_dump_rpu_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_RPU, s);
+}
+
+static int hclge_dbg_dump_ncsi_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_NCSI, s);
+}
+
+static int hclge_dbg_dump_rtc_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_RTC, s);
+}
+
+static int hclge_dbg_dump_ppp_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_PPP, s);
+}
+
+static int hclge_dbg_dump_rcb_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_RCB, s);
+}
+
+static int hclge_dbg_dump_tqp_reg_cmd(struct seq_file *s, void *data)
+{
+ return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_TQP, s);
+}
+
+static int hclge_dbg_dump_tc(struct seq_file *s, void *data)
+{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_ets_tc_weight_cmd *ets_weight;
+ const char *sch_mode_str;
struct hclge_desc desc;
- char *sch_mode_str;
- int pos = 0;
int ret;
u8 i;
@@ -1323,72 +1291,37 @@ static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len)
ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
- pos += scnprintf(buf + pos, len - pos, "enabled tc number: %u\n",
- hdev->tm_info.num_tc);
- pos += scnprintf(buf + pos, len - pos, "weight_offset: %u\n",
- ets_weight->weight_offset);
+ seq_printf(s, "enabled tc number: %u\n", hdev->tm_info.num_tc);
+ seq_printf(s, "weight_offset: %u\n", ets_weight->weight_offset);
- pos += scnprintf(buf + pos, len - pos, "TC MODE WEIGHT\n");
+ seq_puts(s, "TC MODE WEIGHT\n");
for (i = 0; i < HNAE3_MAX_TC; i++) {
sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp";
- pos += scnprintf(buf + pos, len - pos, "%u %4s %3u\n",
- i, sch_mode_str, ets_weight->tc_weight[i]);
+ seq_printf(s, "%u %4s %3u\n", i, sch_mode_str,
+ ets_weight->tc_weight[i]);
}
return 0;
}
-static const struct hclge_dbg_item tm_pg_items[] = {
- { "ID", 2 },
- { "PRI_MAP", 2 },
- { "MODE", 2 },
- { "DWRR", 2 },
- { "C_IR_B", 2 },
- { "C_IR_U", 2 },
- { "C_IR_S", 2 },
- { "C_BS_B", 2 },
- { "C_BS_S", 2 },
- { "C_FLAG", 2 },
- { "C_RATE(Mbps)", 2 },
- { "P_IR_B", 2 },
- { "P_IR_U", 2 },
- { "P_IR_S", 2 },
- { "P_BS_B", 2 },
- { "P_BS_S", 2 },
- { "P_FLAG", 2 },
- { "P_RATE(Mbps)", 0 }
-};
-
-static void hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para *para,
- char **result, u8 *index)
+static void hclge_dbg_fill_shaper_content(struct seq_file *s,
+ struct hclge_tm_shaper_para *para)
{
- sprintf(result[(*index)++], "%3u", para->ir_b);
- sprintf(result[(*index)++], "%3u", para->ir_u);
- sprintf(result[(*index)++], "%3u", para->ir_s);
- sprintf(result[(*index)++], "%3u", para->bs_b);
- sprintf(result[(*index)++], "%3u", para->bs_s);
- sprintf(result[(*index)++], "%3u", para->flag);
- sprintf(result[(*index)++], "%6u", para->rate);
+ seq_printf(s, "%-8u%-8u%-8u%-8u%-8u%-8u%-14u", para->ir_b, para->ir_u,
+ para->ir_s, para->bs_b, para->bs_s, para->flag, para->rate);
}
-static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str,
- char *buf, int len)
+static int hclge_dbg_dump_tm_pg(struct seq_file *s, void *data)
{
struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
- char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str;
- u8 pg_id, sch_mode, weight, pri_bit_map, i, j;
- char content[HCLGE_DBG_TM_INFO_LEN];
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
+ u8 pg_id, sch_mode, weight, pri_bit_map;
+ const char *sch_mode_str;
int ret;
- for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++) {
- result[i] = data_str;
- data_str += HCLGE_DBG_DATA_STR_LEN;
- }
-
- hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
- NULL, ARRAY_SIZE(tm_pg_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_puts(s, "ID PRI_MAP MODE DWRR C_IR_B C_IR_U C_IR_S C_BS_B ");
+ seq_puts(s, "C_BS_S C_FLAG C_RATE(Mbps) P_IR_B P_IR_U P_IR_S ");
+ seq_puts(s, "P_BS_B P_BS_S P_FLAG P_RATE(Mbps)\n");
for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
ret = hclge_tm_get_pg_to_pri_map(hdev, pg_id, &pri_bit_map);
@@ -1418,68 +1351,41 @@ static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str,
sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
"sp";
- j = 0;
- sprintf(result[j++], "%02u", pg_id);
- sprintf(result[j++], "0x%02x", pri_bit_map);
- sprintf(result[j++], "%4s", sch_mode_str);
- sprintf(result[j++], "%3u", weight);
- hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
- hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
-
- hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
- (const char **)result,
- ARRAY_SIZE(tm_pg_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_printf(s, "%02u 0x%-7x%-6s%-6u", pg_id, pri_bit_map,
+ sch_mode_str, weight);
+ hclge_dbg_fill_shaper_content(s, &c_shaper_para);
+ hclge_dbg_fill_shaper_content(s, &p_shaper_para);
+ seq_puts(s, "\n");
}
return 0;
}
-static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
-{
- char *data_str;
- int ret;
-
- data_str = kcalloc(ARRAY_SIZE(tm_pg_items),
- HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL);
- if (!data_str)
- return -ENOMEM;
-
- ret = __hclge_dbg_dump_tm_pg(hdev, data_str, buf, len);
-
- kfree(data_str);
-
- return ret;
-}
-
-static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tm_port(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_tm_shaper_para shaper_para;
- int pos = 0;
int ret;
ret = hclge_tm_get_port_shaper(hdev, &shaper_para);
if (ret)
return ret;
- pos += scnprintf(buf + pos, len - pos,
- "IR_B IR_U IR_S BS_B BS_S FLAG RATE(Mbps)\n");
- pos += scnprintf(buf + pos, len - pos,
- "%3u %3u %3u %3u %3u %1u %6u\n",
- shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
- shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
- shaper_para.rate);
+ seq_puts(s, "IR_B IR_U IR_S BS_B BS_S FLAG RATE(Mbps)\n");
+ seq_printf(s, "%3u %3u %3u %3u %3u %1u %6u\n",
+ shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
+ shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
+ shaper_para.rate);
return 0;
}
static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id,
- char *buf, int len)
+ struct seq_file *s)
{
u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
struct hclge_bp_to_qs_map_cmd *map;
struct hclge_desc desc;
- int pos = 0;
u8 group_id;
u8 grp_num;
u16 i = 0;
@@ -1505,27 +1411,27 @@ static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id,
qset_mapping[group_id] = le32_to_cpu(map->qs_bit_map);
}
- pos += scnprintf(buf + pos, len - pos, "INDEX | TM BP QSET MAPPING:\n");
+ seq_puts(s, "INDEX | TM BP QSET MAPPING:\n");
for (group_id = 0; group_id < grp_num / 8; group_id++) {
- pos += scnprintf(buf + pos, len - pos,
- "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
- group_id * 256, qset_mapping[i + 7],
- qset_mapping[i + 6], qset_mapping[i + 5],
- qset_mapping[i + 4], qset_mapping[i + 3],
- qset_mapping[i + 2], qset_mapping[i + 1],
- qset_mapping[i]);
+ seq_printf(s,
+ "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
+ group_id * 256, qset_mapping[i + 7],
+ qset_mapping[i + 6], qset_mapping[i + 5],
+ qset_mapping[i + 4], qset_mapping[i + 3],
+ qset_mapping[i + 2], qset_mapping[i + 1],
+ qset_mapping[i]);
i += 8;
}
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tm_map(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
u16 queue_id;
u16 qset_id;
u8 link_vld;
- int pos = 0;
u8 pri_id;
u8 tc_id;
int ret;
@@ -1544,32 +1450,28 @@ static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len)
if (ret)
return ret;
- pos += scnprintf(buf + pos, len - pos,
- "QUEUE_ID QSET_ID PRI_ID TC_ID\n");
- pos += scnprintf(buf + pos, len - pos,
- "%04u %4u %3u %2u\n",
- queue_id, qset_id, pri_id, tc_id);
+ seq_puts(s, "QUEUE_ID QSET_ID PRI_ID TC_ID\n");
+ seq_printf(s, "%04u %4u %3u %2u\n",
+ queue_id, qset_id, pri_id, tc_id);
if (!hnae3_dev_dcb_supported(hdev))
continue;
- ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, buf + pos,
- len - pos);
+ ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, s);
if (ret < 0)
return ret;
- pos += ret;
- pos += scnprintf(buf + pos, len - pos, "\n");
+ seq_puts(s, "\n");
}
return 0;
}
-static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tm_nodes(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_tm_nodes_cmd *nodes;
struct hclge_desc desc;
- int pos = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
@@ -1582,65 +1484,36 @@ static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
nodes = (struct hclge_tm_nodes_cmd *)desc.data;
- pos += scnprintf(buf + pos, len - pos, " BASE_ID MAX_NUM\n");
- pos += scnprintf(buf + pos, len - pos, "PG %4u %4u\n",
- nodes->pg_base_id, nodes->pg_num);
- pos += scnprintf(buf + pos, len - pos, "PRI %4u %4u\n",
- nodes->pri_base_id, nodes->pri_num);
- pos += scnprintf(buf + pos, len - pos, "QSET %4u %4u\n",
- le16_to_cpu(nodes->qset_base_id),
- le16_to_cpu(nodes->qset_num));
- pos += scnprintf(buf + pos, len - pos, "QUEUE %4u %4u\n",
- le16_to_cpu(nodes->queue_base_id),
- le16_to_cpu(nodes->queue_num));
+ seq_puts(s, " BASE_ID MAX_NUM\n");
+ seq_printf(s, "PG %4u %4u\n", nodes->pg_base_id,
+ nodes->pg_num);
+ seq_printf(s, "PRI %4u %4u\n", nodes->pri_base_id,
+ nodes->pri_num);
+ seq_printf(s, "QSET %4u %4u\n",
+ le16_to_cpu(nodes->qset_base_id),
+ le16_to_cpu(nodes->qset_num));
+ seq_printf(s, "QUEUE %4u %4u\n",
+ le16_to_cpu(nodes->queue_base_id),
+ le16_to_cpu(nodes->queue_num));
return 0;
}
-static const struct hclge_dbg_item tm_pri_items[] = {
- { "ID", 4 },
- { "MODE", 2 },
- { "DWRR", 2 },
- { "C_IR_B", 2 },
- { "C_IR_U", 2 },
- { "C_IR_S", 2 },
- { "C_BS_B", 2 },
- { "C_BS_S", 2 },
- { "C_FLAG", 2 },
- { "C_RATE(Mbps)", 2 },
- { "P_IR_B", 2 },
- { "P_IR_U", 2 },
- { "P_IR_S", 2 },
- { "P_BS_B", 2 },
- { "P_BS_S", 2 },
- { "P_FLAG", 2 },
- { "P_RATE(Mbps)", 0 }
-};
-
-static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tm_pri(struct seq_file *s, void *data)
{
struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
- char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str;
- char content[HCLGE_DBG_TM_INFO_LEN];
- u8 pri_num, sch_mode, weight, i, j;
- char *data_str;
- int pos, ret;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
+ u8 pri_num, sch_mode, weight, i;
+ const char *sch_mode_str;
+ int ret;
ret = hclge_tm_get_pri_num(hdev, &pri_num);
if (ret)
return ret;
- data_str = kcalloc(ARRAY_SIZE(tm_pri_items), HCLGE_DBG_DATA_STR_LEN,
- GFP_KERNEL);
- if (!data_str)
- return -ENOMEM;
-
- for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++)
- result[i] = &data_str[i * HCLGE_DBG_DATA_STR_LEN];
-
- hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
- NULL, ARRAY_SIZE(tm_pri_items));
- pos = scnprintf(buf, len, "%s", content);
+ seq_puts(s, "ID MODE DWRR C_IR_B C_IR_U C_IR_S C_BS_B ");
+ seq_puts(s, "C_BS_S C_FLAG C_RATE(Mbps) P_IR_B P_IR_U P_IR_S ");
+ seq_puts(s, "P_BS_B P_BS_S P_FLAG P_RATE(Mbps)\n");
for (i = 0; i < pri_num; i++) {
ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode);
@@ -1666,59 +1539,31 @@ static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
"sp";
- j = 0;
- sprintf(result[j++], "%04u", i);
- sprintf(result[j++], "%4s", sch_mode_str);
- sprintf(result[j++], "%3u", weight);
- hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
- hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
- hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
- (const char **)result,
- ARRAY_SIZE(tm_pri_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_printf(s, "%04u %-6s%-6u", i, sch_mode_str, weight);
+ hclge_dbg_fill_shaper_content(s, &c_shaper_para);
+ hclge_dbg_fill_shaper_content(s, &p_shaper_para);
+ seq_puts(s, "\n");
}
out:
- kfree(data_str);
return ret;
}
-static const struct hclge_dbg_item tm_qset_items[] = {
- { "ID", 4 },
- { "MAP_PRI", 2 },
- { "LINK_VLD", 2 },
- { "MODE", 2 },
- { "DWRR", 2 },
- { "IR_B", 2 },
- { "IR_U", 2 },
- { "IR_S", 2 },
- { "BS_B", 2 },
- { "BS_S", 2 },
- { "FLAG", 2 },
- { "RATE(Mbps)", 0 }
-};
-
-static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tm_qset(struct seq_file *s, void *data)
{
- char data_str[ARRAY_SIZE(tm_qset_items)][HCLGE_DBG_DATA_STR_LEN];
- char *result[ARRAY_SIZE(tm_qset_items)], *sch_mode_str;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
u8 priority, link_vld, sch_mode, weight;
struct hclge_tm_shaper_para shaper_para;
- char content[HCLGE_DBG_TM_INFO_LEN];
+ const char *sch_mode_str;
u16 qset_num, i;
- int ret, pos;
- u8 j;
+ int ret;
ret = hclge_tm_get_qset_num(hdev, &qset_num);
if (ret)
return ret;
- for (i = 0; i < ARRAY_SIZE(tm_qset_items); i++)
- result[i] = &data_str[i][0];
-
- hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
- NULL, ARRAY_SIZE(tm_qset_items));
- pos = scnprintf(buf, len, "%s", content);
+ seq_puts(s, "ID MAP_PRI LINK_VLD MODE DWRR IR_B IR_U IR_S ");
+ seq_puts(s, "BS_B BS_S FLAG RATE(Mbps)\n");
for (i = 0; i < qset_num; i++) {
ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld);
@@ -1740,29 +1585,22 @@ static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
"sp";
- j = 0;
- sprintf(result[j++], "%04u", i);
- sprintf(result[j++], "%4u", priority);
- sprintf(result[j++], "%4u", link_vld);
- sprintf(result[j++], "%4s", sch_mode_str);
- sprintf(result[j++], "%3u", weight);
- hclge_dbg_fill_shaper_content(&shaper_para, result, &j);
-
- hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
- (const char **)result,
- ARRAY_SIZE(tm_qset_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_printf(s, "%04u %-9u%-10u%-6s%-6u", i, priority, link_vld,
+ sch_mode_str, weight);
+ seq_printf(s, "%-6u%-6u%-6u%-6u%-6u%-6u%-14u\n",
+ shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
+ shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
+ shaper_para.rate);
}
return 0;
}
-static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_qos_pause_cfg(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_cfg_pause_param_cmd *pause_param;
struct hclge_desc desc;
- int pos = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
@@ -1775,23 +1613,21 @@ static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
- pos += scnprintf(buf + pos, len - pos, "pause_trans_gap: 0x%x\n",
- pause_param->pause_trans_gap);
- pos += scnprintf(buf + pos, len - pos, "pause_trans_time: 0x%x\n",
- le16_to_cpu(pause_param->pause_trans_time));
+ seq_printf(s, "pause_trans_gap: 0x%x\n", pause_param->pause_trans_gap);
+ seq_printf(s, "pause_trans_time: 0x%x\n",
+ le16_to_cpu(pause_param->pause_trans_time));
return 0;
}
#define HCLGE_DBG_TC_MASK 0x0F
-static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_qos_pri_map(struct seq_file *s, void *data)
{
#define HCLGE_DBG_TC_BIT_WIDTH 4
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_qos_pri_map_cmd *pri_map;
struct hclge_desc desc;
- int pos = 0;
u8 *pri_tc;
u8 tc, i;
int ret;
@@ -1806,33 +1642,33 @@ static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
- pos += scnprintf(buf + pos, len - pos, "vlan_to_pri: 0x%x\n",
- pri_map->vlan_pri);
- pos += scnprintf(buf + pos, len - pos, "PRI TC\n");
+ seq_printf(s, "vlan_to_pri: 0x%x\n", pri_map->vlan_pri);
+ seq_puts(s, "PRI TC\n");
pri_tc = (u8 *)pri_map;
for (i = 0; i < HNAE3_MAX_TC; i++) {
tc = pri_tc[i >> 1] >> ((i & 1) * HCLGE_DBG_TC_BIT_WIDTH);
tc &= HCLGE_DBG_TC_MASK;
- pos += scnprintf(buf + pos, len - pos, "%u %u\n", i, tc);
+ seq_printf(s, "%u %u\n", i, tc);
}
return 0;
}
-static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_qos_dscp_map(struct seq_file *s, void *data)
{
- struct hnae3_knic_private_info *kinfo = &hdev->vport[0].nic.kinfo;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
+ struct hnae3_knic_private_info *kinfo;
u8 *req0 = (u8 *)desc[0].data;
u8 *req1 = (u8 *)desc[1].data;
u8 dscp_tc[HNAE3_MAX_DSCP];
- int pos, ret;
+ int ret;
u8 i, j;
- pos = scnprintf(buf, len, "tc map mode: %s\n",
- tc_map_mode_str[kinfo->tc_map_mode]);
+ kinfo = &hdev->vport[0].nic.kinfo;
+
+ seq_printf(s, "tc map mode: %s\n", tc_map_mode_str[kinfo->tc_map_mode]);
if (kinfo->tc_map_mode != HNAE3_TC_MAP_MODE_DSCP)
return 0;
@@ -1847,7 +1683,7 @@ static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
return ret;
}
- pos += scnprintf(buf + pos, len - pos, "\nDSCP PRIO TC\n");
+ seq_puts(s, "\nDSCP PRIO TC\n");
/* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
@@ -1865,18 +1701,17 @@ static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
if (kinfo->dscp_prio[i] == HNAE3_PRIO_ID_INVALID)
continue;
- pos += scnprintf(buf + pos, len - pos, " %2u %u %u\n",
- i, kinfo->dscp_prio[i], dscp_tc[i]);
+ seq_printf(s, " %2u %u %u\n", i, kinfo->dscp_prio[i],
+ dscp_tc[i]);
}
return 0;
}
-static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, struct seq_file *s)
{
struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
struct hclge_desc desc;
- int pos = 0;
int i, ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true);
@@ -1889,19 +1724,17 @@ static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
- pos += scnprintf(buf + pos, len - pos,
- "tx_packet_buf_tc_%d: 0x%x\n", i,
- le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
+ seq_printf(s, "tx_packet_buf_tc_%d: 0x%x\n", i,
+ le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
struct hclge_desc desc;
- int pos = 0;
int i, ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true);
@@ -1912,26 +1745,24 @@ static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf,
return ret;
}
- pos += scnprintf(buf + pos, len - pos, "\n");
+ seq_puts(s, "\n");
rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
- pos += scnprintf(buf + pos, len - pos,
- "rx_packet_buf_tc_%d: 0x%x\n", i,
- le16_to_cpu(rx_buf_cmd->buf_num[i]));
+ seq_printf(s, "rx_packet_buf_tc_%d: 0x%x\n", i,
+ le16_to_cpu(rx_buf_cmd->buf_num[i]));
- pos += scnprintf(buf + pos, len - pos, "rx_share_buf: 0x%x\n",
- le16_to_cpu(rx_buf_cmd->shared_buf));
+ seq_printf(s, "rx_share_buf: 0x%x\n",
+ le16_to_cpu(rx_buf_cmd->shared_buf));
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_rx_com_wl *rx_com_wl;
struct hclge_desc desc;
- int pos = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true);
@@ -1943,21 +1774,19 @@ static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf,
}
rx_com_wl = (struct hclge_rx_com_wl *)desc.data;
- pos += scnprintf(buf + pos, len - pos, "\n");
- pos += scnprintf(buf + pos, len - pos,
- "rx_com_wl: high: 0x%x, low: 0x%x\n",
- le16_to_cpu(rx_com_wl->com_wl.high),
- le16_to_cpu(rx_com_wl->com_wl.low));
+ seq_puts(s, "\n");
+ seq_printf(s, "rx_com_wl: high: 0x%x, low: 0x%x\n",
+ le16_to_cpu(rx_com_wl->com_wl.high),
+ le16_to_cpu(rx_com_wl->com_wl.low));
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_rx_com_wl *rx_packet_cnt;
struct hclge_desc desc;
- int pos = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true);
@@ -1969,20 +1798,18 @@ static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf,
}
rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data;
- pos += scnprintf(buf + pos, len - pos,
- "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
- le16_to_cpu(rx_packet_cnt->com_wl.high),
- le16_to_cpu(rx_packet_cnt->com_wl.low));
+ seq_printf(s, "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
+ le16_to_cpu(rx_packet_cnt->com_wl.high),
+ le16_to_cpu(rx_packet_cnt->com_wl.low));
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev,
+ struct seq_file *s)
{
struct hclge_rx_priv_wl_buf *rx_priv_wl;
struct hclge_desc desc[2];
- int pos = 0;
int i, ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
@@ -1997,28 +1824,25 @@ static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
- pos += scnprintf(buf + pos, len - pos,
- "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
- le16_to_cpu(rx_priv_wl->tc_wl[i].high),
- le16_to_cpu(rx_priv_wl->tc_wl[i].low));
+ seq_printf(s, "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
+ le16_to_cpu(rx_priv_wl->tc_wl[i].high),
+ le16_to_cpu(rx_priv_wl->tc_wl[i].low));
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
- pos += scnprintf(buf + pos, len - pos,
- "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
- i + HCLGE_TC_NUM_ONE_DESC,
- le16_to_cpu(rx_priv_wl->tc_wl[i].high),
- le16_to_cpu(rx_priv_wl->tc_wl[i].low));
+ seq_printf(s, "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
+ i + HCLGE_TC_NUM_ONE_DESC,
+ le16_to_cpu(rx_priv_wl->tc_wl[i].high),
+ le16_to_cpu(rx_priv_wl->tc_wl[i].low));
- return pos;
+ return 0;
}
static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
- char *buf, int len)
+ struct seq_file *s)
{
struct hclge_rx_com_thrd *rx_com_thrd;
struct hclge_desc desc[2];
- int pos = 0;
int i, ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
@@ -2031,86 +1855,75 @@ static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
return ret;
}
- pos += scnprintf(buf + pos, len - pos, "\n");
+ seq_puts(s, "\n");
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
- pos += scnprintf(buf + pos, len - pos,
- "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
- le16_to_cpu(rx_com_thrd->com_thrd[i].high),
- le16_to_cpu(rx_com_thrd->com_thrd[i].low));
+ seq_printf(s, "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
+ le16_to_cpu(rx_com_thrd->com_thrd[i].high),
+ le16_to_cpu(rx_com_thrd->com_thrd[i].low));
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
- pos += scnprintf(buf + pos, len - pos,
- "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
- i + HCLGE_TC_NUM_ONE_DESC,
- le16_to_cpu(rx_com_thrd->com_thrd[i].high),
- le16_to_cpu(rx_com_thrd->com_thrd[i].low));
+ seq_printf(s, "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
+ i + HCLGE_TC_NUM_ONE_DESC,
+ le16_to_cpu(rx_com_thrd->com_thrd[i].high),
+ le16_to_cpu(rx_com_thrd->com_thrd[i].low));
- return pos;
+ return 0;
}
-static int hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_qos_buf_cfg(struct seq_file *s, void *data)
{
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
int ret;
- ret = hclge_dbg_dump_tx_buf_cfg(hdev, buf + pos, len - pos);
+ ret = hclge_dbg_dump_tx_buf_cfg(hdev, s);
if (ret < 0)
return ret;
- pos += ret;
- ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, buf + pos, len - pos);
+ ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, s);
if (ret < 0)
return ret;
- pos += ret;
- ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, buf + pos, len - pos);
+ ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, s);
if (ret < 0)
return ret;
- pos += ret;
- ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, buf + pos, len - pos);
+ ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, s);
if (ret < 0)
return ret;
- pos += ret;
- pos += scnprintf(buf + pos, len - pos, "\n");
+ seq_puts(s, "\n");
if (!hnae3_dev_dcb_supported(hdev))
return 0;
- ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, buf + pos, len - pos);
+ ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, s);
if (ret < 0)
return ret;
- pos += ret;
- ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, buf + pos,
- len - pos);
+ ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, s);
if (ret < 0)
return ret;
return 0;
}
-static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_mng_table(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_mac_ethertype_idx_rd_cmd *req0;
struct hclge_desc desc;
u32 msg_egress_port;
- int pos = 0;
int ret, i;
- pos += scnprintf(buf + pos, len - pos,
- "entry mac_addr mask ether ");
- pos += scnprintf(buf + pos, len - pos,
- "mask vlan mask i_map i_dir e_type ");
- pos += scnprintf(buf + pos, len - pos, "pf_id vf_id q_id drop\n");
+ seq_puts(s, "entry mac_addr mask ether ");
+ seq_puts(s, "mask vlan mask i_map i_dir e_type ");
+ seq_puts(s, "pf_id vf_id q_id drop\n");
for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
true);
- req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
+ req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)desc.data;
req0->index = cpu_to_le16(i);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2123,46 +1936,40 @@ static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len)
if (!req0->resp_code)
continue;
- pos += scnprintf(buf + pos, len - pos, "%02u %pM ",
- le16_to_cpu(req0->index), req0->mac_addr);
+ seq_printf(s, "%02u %pM ",
+ le16_to_cpu(req0->index), req0->mac_addr);
- pos += scnprintf(buf + pos, len - pos,
- "%x %04x %x %04x ",
- !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
- le16_to_cpu(req0->ethter_type),
- !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
- le16_to_cpu(req0->vlan_tag) &
- HCLGE_DBG_MNG_VLAN_TAG);
+ seq_printf(s, "%x %04x %x %04x ",
+ !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
+ le16_to_cpu(req0->ethter_type),
+ !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
+ le16_to_cpu(req0->vlan_tag) &
+ HCLGE_DBG_MNG_VLAN_TAG);
- pos += scnprintf(buf + pos, len - pos,
- "%x %02x %02x ",
- !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
- req0->i_port_bitmap, req0->i_port_direction);
+ seq_printf(s, "%x %02x %02x ",
+ !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
+ req0->i_port_bitmap, req0->i_port_direction);
msg_egress_port = le16_to_cpu(req0->egress_port);
- pos += scnprintf(buf + pos, len - pos,
- "%x %x %02x %04x %x\n",
- !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
- msg_egress_port & HCLGE_DBG_MNG_PF_ID,
- (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
- le16_to_cpu(req0->egress_queue),
- !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
+ seq_printf(s, "%x %x %02x %04x %x\n",
+ !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
+ msg_egress_port & HCLGE_DBG_MNG_PF_ID,
+ (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
+ le16_to_cpu(req0->egress_queue),
+ !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
}
return 0;
}
-#define HCLGE_DBG_TCAM_BUF_SIZE 256
-
static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
- char *tcam_buf,
+ struct seq_file *s,
struct hclge_dbg_tcam_msg tcam_msg)
{
struct hclge_fd_tcam_config_1_cmd *req1;
struct hclge_fd_tcam_config_2_cmd *req2;
struct hclge_fd_tcam_config_3_cmd *req3;
struct hclge_desc desc[3];
- int pos = 0;
int ret, i;
__le32 *req;
@@ -2184,27 +1991,23 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
if (ret)
return ret;
- pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "read result tcam key %s(%u):\n", sel_x ? "x" : "y",
- tcam_msg.loc);
+ seq_printf(s, "read result tcam key %s(%u):\n",
+ sel_x ? "x" : "y", tcam_msg.loc);
/* tcam_data0 ~ tcam_data1 */
req = (__le32 *)req1->tcam_data;
for (i = 0; i < 2; i++)
- pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "%08x\n", le32_to_cpu(*req++));
+ seq_printf(s, "%08x\n", le32_to_cpu(*req++));
/* tcam_data2 ~ tcam_data7 */
req = (__le32 *)req2->tcam_data;
for (i = 0; i < 6; i++)
- pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "%08x\n", le32_to_cpu(*req++));
+ seq_printf(s, "%08x\n", le32_to_cpu(*req++));
/* tcam_data8 ~ tcam_data12 */
req = (__le32 *)req3->tcam_data;
for (i = 0; i < 5; i++)
- pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "%08x\n", le32_to_cpu(*req++));
+ seq_printf(s, "%08x\n", le32_to_cpu(*req++));
return ret;
}
@@ -2228,14 +2031,13 @@ static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
return cnt;
}
-static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_fd_tcam(struct seq_file *s, void *data)
{
- u32 rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_dbg_tcam_msg tcam_msg;
int i, ret, rule_cnt;
u16 *rule_locs;
- char *tcam_buf;
- int pos = 0;
+ u32 rule_num;
if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
dev_err(&hdev->pdev->dev,
@@ -2243,6 +2045,7 @@ static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
return -EOPNOTSUPP;
}
+ rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
if (!hdev->hclge_fd_rule_num || !rule_num)
return 0;
@@ -2250,12 +2053,6 @@ static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
if (!rule_locs)
return -ENOMEM;
- tcam_buf = kzalloc(HCLGE_DBG_TCAM_BUF_SIZE, GFP_KERNEL);
- if (!tcam_buf) {
- kfree(rule_locs);
- return -ENOMEM;
- }
-
rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
if (rule_cnt < 0) {
ret = rule_cnt;
@@ -2269,38 +2066,34 @@ static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
tcam_msg.stage = HCLGE_FD_STAGE_1;
tcam_msg.loc = rule_locs[i];
- ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, tcam_msg);
+ ret = hclge_dbg_fd_tcam_read(hdev, true, s, tcam_msg);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get fd tcam key x, ret = %d\n", ret);
goto out;
}
- pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
-
- ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, tcam_msg);
+ ret = hclge_dbg_fd_tcam_read(hdev, false, s, tcam_msg);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get fd tcam key y, ret = %d\n", ret);
goto out;
}
- pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
}
out:
- kfree(tcam_buf);
kfree(rule_locs);
return ret;
}
-static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_fd_counter(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
struct hclge_fd_ad_cnt_read_cmd *req;
char str_id[HCLGE_DBG_ID_LEN];
struct hclge_desc desc;
- int pos = 0;
int ret;
u64 cnt;
u8 i;
@@ -2308,8 +2101,7 @@ static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
- pos += scnprintf(buf + pos, len - pos,
- "func_id\thit_times\n");
+ seq_puts(s, "func_id\thit_times\n");
for (i = 0; i < func_num; i++) {
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_CNT_OP, true);
@@ -2323,8 +2115,7 @@ static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
}
cnt = le64_to_cpu(req->cnt);
hclge_dbg_get_func_id_str(str_id, i);
- pos += scnprintf(buf + pos, len - pos,
- "%s\t%llu\n", str_id, cnt);
+ seq_printf(s, "%s\t%llu\n", str_id, cnt);
}
return 0;
@@ -2375,74 +2166,95 @@ int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
return 0;
}
-static int hclge_dbg_dump_serv_info(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_seq_dump_rst_info(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
+ u32 i, offset;
+
+ seq_printf(s, "PF reset count: %u\n", hdev->rst_stats.pf_rst_cnt);
+ seq_printf(s, "FLR reset count: %u\n", hdev->rst_stats.flr_rst_cnt);
+ seq_printf(s, "GLOBAL reset count: %u\n",
+ hdev->rst_stats.global_rst_cnt);
+ seq_printf(s, "IMP reset count: %u\n", hdev->rst_stats.imp_rst_cnt);
+ seq_printf(s, "reset done count: %u\n", hdev->rst_stats.reset_done_cnt);
+ seq_printf(s, "HW reset done count: %u\n",
+ hdev->rst_stats.hw_reset_done_cnt);
+ seq_printf(s, "reset count: %u\n", hdev->rst_stats.reset_cnt);
+ seq_printf(s, "reset fail count: %u\n", hdev->rst_stats.reset_fail_cnt);
+
+ for (i = 0; i < ARRAY_SIZE(hclge_dbg_rst_info); i++) {
+ offset = hclge_dbg_rst_info[i].offset;
+ seq_printf(s, "%s: 0x%x\n",
+ hclge_dbg_rst_info[i].message,
+ hclge_read_dev(&hdev->hw, offset));
+ }
+
+ seq_printf(s, "hdev state: 0x%lx\n", hdev->state);
+
+ return 0;
+}
+
+static int hclge_dbg_dump_serv_info(struct seq_file *s, void *data)
+{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
unsigned long rem_nsec;
- int pos = 0;
u64 lc;
lc = local_clock();
rem_nsec = do_div(lc, HCLGE_BILLION_NANO_SECONDS);
- pos += scnprintf(buf + pos, len - pos, "local_clock: [%5lu.%06lu]\n",
- (unsigned long)lc, rem_nsec / 1000);
- pos += scnprintf(buf + pos, len - pos, "delta: %u(ms)\n",
- jiffies_to_msecs(jiffies - hdev->last_serv_processed));
- pos += scnprintf(buf + pos, len - pos,
- "last_service_task_processed: %lu(jiffies)\n",
- hdev->last_serv_processed);
- pos += scnprintf(buf + pos, len - pos, "last_service_task_cnt: %lu\n",
- hdev->serv_processed_cnt);
+ seq_printf(s, "local_clock: [%5lu.%06lu]\n",
+ (unsigned long)lc, rem_nsec / 1000);
+ seq_printf(s, "delta: %u(ms)\n",
+ jiffies_to_msecs(jiffies - hdev->last_serv_processed));
+ seq_printf(s, "last_service_task_processed: %lu(jiffies)\n",
+ hdev->last_serv_processed);
+ seq_printf(s, "last_service_task_cnt: %lu\n", hdev->serv_processed_cnt);
return 0;
}
-static int hclge_dbg_dump_interrupt(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_interrupt(struct seq_file *s, void *data)
{
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
- pos += scnprintf(buf + pos, len - pos, "num_nic_msi: %u\n",
- hdev->num_nic_msi);
- pos += scnprintf(buf + pos, len - pos, "num_roce_msi: %u\n",
- hdev->num_roce_msi);
- pos += scnprintf(buf + pos, len - pos, "num_msi_used: %u\n",
- hdev->num_msi_used);
- pos += scnprintf(buf + pos, len - pos, "num_msi_left: %u\n",
- hdev->num_msi_left);
+ seq_printf(s, "num_nic_msi: %u\n", hdev->num_nic_msi);
+ seq_printf(s, "num_roce_msi: %u\n", hdev->num_roce_msi);
+ seq_printf(s, "num_msi_used: %u\n", hdev->num_msi_used);
+ seq_printf(s, "num_msi_left: %u\n", hdev->num_msi_left);
return 0;
}
-static void hclge_dbg_imp_info_data_print(struct hclge_desc *desc_src,
- char *buf, int len, u32 bd_num)
+static void hclge_dbg_imp_info_data_print(struct seq_file *s,
+ struct hclge_desc *desc_src,
+ u32 bd_num)
{
#define HCLGE_DBG_IMP_INFO_PRINT_OFFSET 0x2
struct hclge_desc *desc_index = desc_src;
u32 offset = 0;
- int pos = 0;
u32 i, j;
- pos += scnprintf(buf + pos, len - pos, "offset | data\n");
+ seq_puts(s, "offset | data\n");
for (i = 0; i < bd_num; i++) {
j = 0;
while (j < HCLGE_DESC_DATA_LEN - 1) {
- pos += scnprintf(buf + pos, len - pos, "0x%04x | ",
- offset);
- pos += scnprintf(buf + pos, len - pos, "0x%08x ",
- le32_to_cpu(desc_index->data[j++]));
- pos += scnprintf(buf + pos, len - pos, "0x%08x\n",
- le32_to_cpu(desc_index->data[j++]));
+ seq_printf(s, "0x%04x | ", offset);
+ seq_printf(s, "0x%08x ",
+ le32_to_cpu(desc_index->data[j++]));
+ seq_printf(s, "0x%08x\n",
+ le32_to_cpu(desc_index->data[j++]));
offset += sizeof(u32) * HCLGE_DBG_IMP_INFO_PRINT_OFFSET;
}
desc_index++;
}
}
-static int
-hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_get_imp_stats_info(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_get_imp_bd_cmd *req;
struct hclge_desc *desc_src;
struct hclge_desc desc;
@@ -2479,7 +2291,7 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
return ret;
}
- hclge_dbg_imp_info_data_print(desc_src, buf, len, bd_num);
+ hclge_dbg_imp_info_data_print(s, desc_src, bd_num);
kfree(desc_src);
@@ -2490,7 +2302,7 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
#define HCLGE_MAX_NCL_CONFIG_LENGTH 16384
static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
- char *buf, int len, int *pos)
+ struct seq_file *s)
{
#define HCLGE_CMD_DATA_NUM 6
@@ -2502,9 +2314,8 @@ static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
if (i == 0 && j == 0)
continue;
- *pos += scnprintf(buf + *pos, len - *pos,
- "0x%04x | 0x%08x\n", offset,
- le32_to_cpu(desc[i].data[j]));
+ seq_printf(s, "0x%04x | 0x%08x\n", offset,
+ le32_to_cpu(desc[i].data[j]));
offset += sizeof(u32);
*index -= sizeof(u32);
@@ -2515,19 +2326,18 @@ static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
}
}
-static int
-hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_ncl_config(struct seq_file *s, void *data)
{
#define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4)
struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
int index = HCLGE_MAX_NCL_CONFIG_LENGTH;
- int pos = 0;
u32 data0;
int ret;
- pos += scnprintf(buf + pos, len - pos, "offset | data\n");
+ seq_puts(s, "offset | data\n");
while (index > 0) {
data0 = HCLGE_MAX_NCL_CONFIG_LENGTH - index;
@@ -2540,27 +2350,26 @@ hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
if (ret)
return ret;
- hclge_ncl_config_data_print(desc, &index, buf, len, &pos);
+ hclge_ncl_config_data_print(desc, &index, s);
}
return 0;
}
-static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_loopback(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct phy_device *phydev = hdev->hw.mac.phydev;
struct hclge_config_mac_mode_cmd *req_app;
struct hclge_common_lb_cmd *req_common;
struct hclge_desc desc;
u8 loopback_en;
- int pos = 0;
int ret;
req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
req_common = (struct hclge_common_lb_cmd *)desc.data;
- pos += scnprintf(buf + pos, len - pos, "mac id: %u\n",
- hdev->hw.mac.mac_id);
+ seq_printf(s, "mac id: %u\n", hdev->hw.mac.mac_id);
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2572,8 +2381,7 @@ static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
HCLGE_MAC_APP_LP_B);
- pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n",
- state_str[loopback_en]);
+ seq_printf(s, "app loopback: %s\n", str_on_off(loopback_en));
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2584,24 +2392,22 @@ static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
return ret;
}
- loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
- pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n",
- state_str[loopback_en]);
+ loopback_en = req_common->enable &
+ HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ seq_printf(s, "serdes serial loopback: %s\n", str_on_off(loopback_en));
loopback_en = req_common->enable &
- HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
- pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n",
- state_str[loopback_en]);
+ HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
+ seq_printf(s, "serdes parallel loopback: %s\n",
+ str_on_off(loopback_en));
if (phydev) {
loopback_en = phydev->loopback_enabled;
- pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
- state_str[loopback_en]);
+ seq_printf(s, "phy loopback: %s\n", str_on_off(loopback_en));
} else if (hnae3_dev_phy_imp_supported(hdev)) {
loopback_en = req_common->enable &
HCLGE_CMD_GE_PHY_INNER_LOOP_B;
- pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
- state_str[loopback_en]);
+ seq_printf(s, "phy loopback: %s\n", str_on_off(loopback_en));
}
return 0;
@@ -2610,107 +2416,75 @@ static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
/* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
* @hdev: pointer to struct hclge_dev
*/
-static int
-hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_mac_tnl_status(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_mac_tnl_stats stats;
unsigned long rem_nsec;
- int pos = 0;
- pos += scnprintf(buf + pos, len - pos,
- "Recently generated mac tnl interruption:\n");
+ seq_puts(s, "Recently generated mac tnl interruption:\n");
while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
- pos += scnprintf(buf + pos, len - pos,
- "[%07lu.%03lu] status = 0x%x\n",
- (unsigned long)stats.time, rem_nsec / 1000,
- stats.status);
+ seq_printf(s, "[%07lu.%03lu] status = 0x%x\n",
+ (unsigned long)stats.time, rem_nsec / 1000,
+ stats.status);
}
return 0;
}
-
-static const struct hclge_dbg_item mac_list_items[] = {
- { "FUNC_ID", 2 },
- { "MAC_ADDR", 12 },
- { "STATE", 2 },
-};
-
-static void hclge_dbg_dump_mac_list(struct hclge_dev *hdev, char *buf, int len,
- bool is_unicast)
+static void hclge_dbg_dump_mac_list(struct seq_file *s, bool is_unicast)
{
- char data_str[ARRAY_SIZE(mac_list_items)][HCLGE_DBG_DATA_STR_LEN];
- char content[HCLGE_DBG_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
- char *result[ARRAY_SIZE(mac_list_items)];
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_mac_node *mac_node, *tmp;
struct hclge_vport *vport;
struct list_head *list;
u32 func_id;
- int pos = 0;
- int i;
- for (i = 0; i < ARRAY_SIZE(mac_list_items); i++)
- result[i] = &data_str[i][0];
-
- pos += scnprintf(buf + pos, len - pos, "%s MAC_LIST:\n",
- is_unicast ? "UC" : "MC");
- hclge_dbg_fill_content(content, sizeof(content), mac_list_items,
- NULL, ARRAY_SIZE(mac_list_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ seq_printf(s, "%s MAC_LIST:\n", is_unicast ? "UC" : "MC");
+ seq_puts(s, "FUNC_ID MAC_ADDR STATE\n");
for (func_id = 0; func_id < hdev->num_alloc_vport; func_id++) {
vport = &hdev->vport[func_id];
list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
spin_lock_bh(&vport->mac_list_lock);
list_for_each_entry_safe(mac_node, tmp, list, node) {
- i = 0;
- result[i++] = hclge_dbg_get_func_id_str(str_id,
- func_id);
- sprintf(result[i++], "%pM", mac_node->mac_addr);
- sprintf(result[i++], "%5s",
- hclge_mac_state_str[mac_node->state]);
- hclge_dbg_fill_content(content, sizeof(content),
- mac_list_items,
- (const char **)result,
- ARRAY_SIZE(mac_list_items));
- pos += scnprintf(buf + pos, len - pos, "%s", content);
+ if (func_id)
+ seq_printf(s, "vf%-7u", func_id - 1U);
+ else
+ seq_puts(s, "pf ");
+ seq_printf(s, "%pM ", mac_node->mac_addr);
+ seq_printf(s, "%5s\n",
+ hclge_mac_state_str[mac_node->state]);
}
spin_unlock_bh(&vport->mac_list_lock);
}
}
-static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_umv_info(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
u8 func_num = pci_num_vf(hdev->pdev) + 1;
struct hclge_vport *vport;
- int pos = 0;
u8 i;
- pos += scnprintf(buf, len, "num_alloc_vport : %u\n",
- hdev->num_alloc_vport);
- pos += scnprintf(buf + pos, len - pos, "max_umv_size : %u\n",
- hdev->max_umv_size);
- pos += scnprintf(buf + pos, len - pos, "wanted_umv_size : %u\n",
- hdev->wanted_umv_size);
- pos += scnprintf(buf + pos, len - pos, "priv_umv_size : %u\n",
- hdev->priv_umv_size);
+ seq_printf(s, "num_alloc_vport : %u\n", hdev->num_alloc_vport);
+ seq_printf(s, "max_umv_size : %u\n", hdev->max_umv_size);
+ seq_printf(s, "wanted_umv_size : %u\n", hdev->wanted_umv_size);
+ seq_printf(s, "priv_umv_size : %u\n", hdev->priv_umv_size);
mutex_lock(&hdev->vport_lock);
- pos += scnprintf(buf + pos, len - pos, "share_umv_size : %u\n",
- hdev->share_umv_size);
+ seq_printf(s, "share_umv_size : %u\n", hdev->share_umv_size);
for (i = 0; i < func_num; i++) {
vport = &hdev->vport[i];
- pos += scnprintf(buf + pos, len - pos,
- "vport(%u) used_umv_num : %u\n",
- i, vport->used_umv_num);
+ seq_printf(s, "vport(%u) used_umv_num : %u\n",
+ i, vport->used_umv_num);
}
mutex_unlock(&hdev->vport_lock);
- pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num : %u\n",
- hdev->used_mc_mac_num);
+ seq_printf(s, "used_mc_mac_num : %u\n", hdev->used_mc_mac_num);
return 0;
}
@@ -2852,38 +2626,12 @@ static int hclge_get_port_vlan_filter_bypass_state(struct hclge_dev *hdev,
return 0;
}
-static const struct hclge_dbg_item vlan_filter_items[] = {
- { "FUNC_ID", 2 },
- { "I_VF_VLAN_FILTER", 2 },
- { "E_VF_VLAN_FILTER", 2 },
- { "PORT_VLAN_FILTER_BYPASS", 0 }
-};
-
-static const struct hclge_dbg_item vlan_offload_items[] = {
- { "FUNC_ID", 2 },
- { "PVID", 4 },
- { "ACCEPT_TAG1", 2 },
- { "ACCEPT_TAG2", 2 },
- { "ACCEPT_UNTAG1", 2 },
- { "ACCEPT_UNTAG2", 2 },
- { "INSERT_TAG1", 2 },
- { "INSERT_TAG2", 2 },
- { "SHIFT_TAG", 2 },
- { "STRIP_TAG1", 2 },
- { "STRIP_TAG2", 2 },
- { "DROP_TAG1", 2 },
- { "DROP_TAG2", 2 },
- { "PRI_ONLY_TAG1", 2 },
- { "PRI_ONLY_TAG2", 0 }
-};
-
-static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
- int len, int *pos)
+static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev,
+ struct seq_file *s)
{
- char content[HCLGE_DBG_VLAN_FLTR_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
- const char *result[ARRAY_SIZE(vlan_filter_items)];
- u8 i, j, vlan_fe, bypass, ingress, egress;
u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
+ u8 i, vlan_fe, bypass, ingress, egress;
+ char str_id[HCLGE_DBG_ID_LEN];
int ret;
ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_PORT, 0,
@@ -2893,14 +2641,11 @@ static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
- *pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n",
- state_str[ingress]);
- *pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n",
- state_str[egress]);
+ seq_printf(s, "I_PORT_VLAN_FILTER: %s\n", str_on_off(ingress));
+ seq_printf(s, "E_PORT_VLAN_FILTER: %s\n", str_on_off(egress));
- hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items,
- NULL, ARRAY_SIZE(vlan_filter_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ seq_puts(s, "FUNC_ID I_VF_VLAN_FILTER E_VF_VLAN_FILTER ");
+ seq_puts(s, "PORT_VLAN_FILTER_BYPASS\n");
for (i = 0; i < func_num; i++) {
ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_VF, i,
@@ -2913,37 +2658,32 @@ static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
ret = hclge_get_port_vlan_filter_bypass_state(hdev, i, &bypass);
if (ret)
return ret;
- j = 0;
- result[j++] = hclge_dbg_get_func_id_str(str_id, i);
- result[j++] = state_str[ingress];
- result[j++] = state_str[egress];
- result[j++] =
- test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
- hdev->ae_dev->caps) ? state_str[bypass] : "NA";
- hclge_dbg_fill_content(content, sizeof(content),
- vlan_filter_items, result,
- ARRAY_SIZE(vlan_filter_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+
+ seq_printf(s, "%-9s%-18s%-18s%s\n",
+ hclge_dbg_get_func_id_str(str_id, i),
+ str_on_off(ingress), str_on_off(egress),
+ test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
+ hdev->ae_dev->caps) ?
+ str_on_off(bypass) : "NA");
}
- *pos += scnprintf(buf + *pos, len - *pos, "\n");
+ seq_puts(s, "\n");
return 0;
}
-static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
- int len, int *pos)
+static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev,
+ struct seq_file *s)
{
- char str_id[HCLGE_DBG_ID_LEN], str_pvid[HCLGE_DBG_ID_LEN];
- const char *result[ARRAY_SIZE(vlan_offload_items)];
- char content[HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN];
u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
struct hclge_dbg_vlan_cfg vlan_cfg;
+ char str_id[HCLGE_DBG_ID_LEN];
int ret;
- u8 i, j;
+ u8 i;
- hclge_dbg_fill_content(content, sizeof(content), vlan_offload_items,
- NULL, ARRAY_SIZE(vlan_offload_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ seq_puts(s, "FUNC_ID PVID ACCEPT_TAG1 ACCEPT_TAG2 ACCEPT_UNTAG1 ");
+ seq_puts(s, "ACCEPT_UNTAG2 INSERT_TAG1 INSERT_TAG2 SHIFT_TAG ");
+ seq_puts(s, "STRIP_TAG1 STRIP_TAG2 DROP_TAG1 DROP_TAG2 ");
+ seq_puts(s, "PRI_ONLY_TAG1 PRI_ONLY_TAG2\n");
for (i = 0; i < func_num; i++) {
ret = hclge_get_vlan_tx_offload_cfg(hdev, i, &vlan_cfg);
@@ -2954,107 +2694,92 @@ static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
if (ret)
return ret;
- sprintf(str_pvid, "%u", vlan_cfg.pvid);
- j = 0;
- result[j++] = hclge_dbg_get_func_id_str(str_id, i);
- result[j++] = str_pvid;
- result[j++] = state_str[vlan_cfg.accept_tag1];
- result[j++] = state_str[vlan_cfg.accept_tag2];
- result[j++] = state_str[vlan_cfg.accept_untag1];
- result[j++] = state_str[vlan_cfg.accept_untag2];
- result[j++] = state_str[vlan_cfg.insert_tag1];
- result[j++] = state_str[vlan_cfg.insert_tag2];
- result[j++] = state_str[vlan_cfg.shift_tag];
- result[j++] = state_str[vlan_cfg.strip_tag1];
- result[j++] = state_str[vlan_cfg.strip_tag2];
- result[j++] = state_str[vlan_cfg.drop_tag1];
- result[j++] = state_str[vlan_cfg.drop_tag2];
- result[j++] = state_str[vlan_cfg.pri_only1];
- result[j++] = state_str[vlan_cfg.pri_only2];
-
- hclge_dbg_fill_content(content, sizeof(content),
- vlan_offload_items, result,
- ARRAY_SIZE(vlan_offload_items));
- *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ seq_printf(s, "%-9s", hclge_dbg_get_func_id_str(str_id, i));
+ seq_printf(s, "%-6u", vlan_cfg.pvid);
+ seq_printf(s, "%-13s", str_on_off(vlan_cfg.accept_tag1));
+ seq_printf(s, "%-12s", str_on_off(vlan_cfg.accept_tag2));
+ seq_printf(s, "%-15s", str_on_off(vlan_cfg.accept_untag1));
+ seq_printf(s, "%-15s", str_on_off(vlan_cfg.accept_untag2));
+ seq_printf(s, "%-13s", str_on_off(vlan_cfg.insert_tag1));
+ seq_printf(s, "%-13s", str_on_off(vlan_cfg.insert_tag2));
+ seq_printf(s, "%-11s", str_on_off(vlan_cfg.shift_tag));
+ seq_printf(s, "%-12s", str_on_off(vlan_cfg.strip_tag1));
+ seq_printf(s, "%-12s", str_on_off(vlan_cfg.strip_tag2));
+ seq_printf(s, "%-11s", str_on_off(vlan_cfg.drop_tag1));
+ seq_printf(s, "%-11s", str_on_off(vlan_cfg.drop_tag2));
+ seq_printf(s, "%-15s", str_on_off(vlan_cfg.pri_only1));
+ seq_printf(s, "%s\n", str_on_off(vlan_cfg.pri_only2));
}
return 0;
}
-static int hclge_dbg_dump_vlan_config(struct hclge_dev *hdev, char *buf,
- int len)
+static int hclge_dbg_dump_vlan_config(struct seq_file *s, void *data)
{
- int pos = 0;
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
int ret;
- ret = hclge_dbg_dump_vlan_filter_config(hdev, buf, len, &pos);
+ ret = hclge_dbg_dump_vlan_filter_config(hdev, s);
if (ret)
return ret;
- return hclge_dbg_dump_vlan_offload_config(hdev, buf, len, &pos);
+ return hclge_dbg_dump_vlan_offload_config(hdev, s);
}
-static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_ptp_info(struct seq_file *s, void *data)
{
+ struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
struct hclge_ptp *ptp = hdev->ptp;
u32 sw_cfg = ptp->ptp_cfg;
unsigned int tx_start;
unsigned int last_rx;
- int pos = 0;
u32 hw_cfg;
int ret;
- pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n",
- ptp->info.name);
- pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n",
- test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags) ?
- "yes" : "no");
- pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n",
- test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ?
- "yes" : "no");
- pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n",
- test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags) ?
- "yes" : "no");
+ seq_printf(s, "phc %s's debug info:\n", ptp->info.name);
+ seq_printf(s, "ptp enable: %s\n",
+ str_yes_no(test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags)));
+ seq_printf(s, "ptp tx enable: %s\n",
+ str_yes_no(test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags)));
+ seq_printf(s, "ptp rx enable: %s\n",
+ str_yes_no(test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags)));
last_rx = jiffies_to_msecs(ptp->last_rx);
- pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n",
- last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC);
- pos += scnprintf(buf + pos, len - pos, "rx count: %lu\n", ptp->rx_cnt);
+ seq_printf(s, "last rx time: %lu.%lu\n",
+ last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC);
+ seq_printf(s, "rx count: %lu\n", ptp->rx_cnt);
tx_start = jiffies_to_msecs(ptp->tx_start);
- pos += scnprintf(buf + pos, len - pos, "last tx start time: %lu.%lu\n",
- tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC);
- pos += scnprintf(buf + pos, len - pos, "tx count: %lu\n", ptp->tx_cnt);
- pos += scnprintf(buf + pos, len - pos, "tx skipped count: %lu\n",
- ptp->tx_skipped);
- pos += scnprintf(buf + pos, len - pos, "tx timeout count: %lu\n",
- ptp->tx_timeout);
- pos += scnprintf(buf + pos, len - pos, "last tx seqid: %u\n",
- ptp->last_tx_seqid);
+ seq_printf(s, "last tx start time: %lu.%lu\n",
+ tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC);
+ seq_printf(s, "tx count: %lu\n", ptp->tx_cnt);
+ seq_printf(s, "tx skipped count: %lu\n", ptp->tx_skipped);
+ seq_printf(s, "tx timeout count: %lu\n", ptp->tx_timeout);
+ seq_printf(s, "last tx seqid: %u\n", ptp->last_tx_seqid);
+
ret = hclge_ptp_cfg_qry(hdev, &hw_cfg);
if (ret)
return ret;
- pos += scnprintf(buf + pos, len - pos, "sw_cfg: %#x, hw_cfg: %#x\n",
- sw_cfg, hw_cfg);
+ seq_printf(s, "sw_cfg: %#x, hw_cfg: %#x\n", sw_cfg, hw_cfg);
- pos += scnprintf(buf + pos, len - pos, "tx type: %d, rx filter: %d\n",
- ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter);
+ seq_printf(s, "tx type: %d, rx filter: %d\n",
+ ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter);
return 0;
}
-static int hclge_dbg_dump_mac_uc(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_mac_uc(struct seq_file *s, void *data)
{
- hclge_dbg_dump_mac_list(hdev, buf, len, true);
+ hclge_dbg_dump_mac_list(s, true);
return 0;
}
-static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len)
+static int hclge_dbg_dump_mac_mc(struct seq_file *s, void *data)
{
- hclge_dbg_dump_mac_list(hdev, buf, len, false);
+ hclge_dbg_dump_mac_list(s, false);
return 0;
}
@@ -3062,156 +2787,156 @@ static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len)
static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
{
.cmd = HNAE3_DBG_CMD_TM_NODES,
- .dbg_dump = hclge_dbg_dump_tm_nodes,
+ .dbg_read_func = hclge_dbg_dump_tm_nodes,
},
{
.cmd = HNAE3_DBG_CMD_TM_PRI,
- .dbg_dump = hclge_dbg_dump_tm_pri,
+ .dbg_read_func = hclge_dbg_dump_tm_pri,
},
{
.cmd = HNAE3_DBG_CMD_TM_QSET,
- .dbg_dump = hclge_dbg_dump_tm_qset,
+ .dbg_read_func = hclge_dbg_dump_tm_qset,
},
{
.cmd = HNAE3_DBG_CMD_TM_MAP,
- .dbg_dump = hclge_dbg_dump_tm_map,
+ .dbg_read_func = hclge_dbg_dump_tm_map,
},
{
.cmd = HNAE3_DBG_CMD_TM_PG,
- .dbg_dump = hclge_dbg_dump_tm_pg,
+ .dbg_read_func = hclge_dbg_dump_tm_pg,
},
{
.cmd = HNAE3_DBG_CMD_TM_PORT,
- .dbg_dump = hclge_dbg_dump_tm_port,
+ .dbg_read_func = hclge_dbg_dump_tm_port,
},
{
.cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
- .dbg_dump = hclge_dbg_dump_tc,
+ .dbg_read_func = hclge_dbg_dump_tc,
},
{
.cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
- .dbg_dump = hclge_dbg_dump_qos_pause_cfg,
+ .dbg_read_func = hclge_dbg_dump_qos_pause_cfg,
},
{
.cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
- .dbg_dump = hclge_dbg_dump_qos_pri_map,
+ .dbg_read_func = hclge_dbg_dump_qos_pri_map,
},
{
.cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
- .dbg_dump = hclge_dbg_dump_qos_dscp_map,
+ .dbg_read_func = hclge_dbg_dump_qos_dscp_map,
},
{
.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
- .dbg_dump = hclge_dbg_dump_qos_buf_cfg,
+ .dbg_read_func = hclge_dbg_dump_qos_buf_cfg,
},
{
.cmd = HNAE3_DBG_CMD_MAC_UC,
- .dbg_dump = hclge_dbg_dump_mac_uc,
+ .dbg_read_func = hclge_dbg_dump_mac_uc,
},
{
.cmd = HNAE3_DBG_CMD_MAC_MC,
- .dbg_dump = hclge_dbg_dump_mac_mc,
+ .dbg_read_func = hclge_dbg_dump_mac_mc,
},
{
.cmd = HNAE3_DBG_CMD_MNG_TBL,
- .dbg_dump = hclge_dbg_dump_mng_table,
+ .dbg_read_func = hclge_dbg_dump_mng_table,
},
{
.cmd = HNAE3_DBG_CMD_LOOPBACK,
- .dbg_dump = hclge_dbg_dump_loopback,
+ .dbg_read_func = hclge_dbg_dump_loopback,
},
{
.cmd = HNAE3_DBG_CMD_PTP_INFO,
- .dbg_dump = hclge_dbg_dump_ptp_info,
+ .dbg_read_func = hclge_dbg_dump_ptp_info,
},
{
.cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
- .dbg_dump = hclge_dbg_dump_interrupt,
+ .dbg_read_func = hclge_dbg_dump_interrupt,
},
{
.cmd = HNAE3_DBG_CMD_RESET_INFO,
- .dbg_dump = hclge_dbg_dump_rst_info,
+ .dbg_read_func = hclge_dbg_seq_dump_rst_info,
},
{
.cmd = HNAE3_DBG_CMD_IMP_INFO,
- .dbg_dump = hclge_dbg_get_imp_stats_info,
+ .dbg_read_func = hclge_dbg_get_imp_stats_info,
},
{
.cmd = HNAE3_DBG_CMD_NCL_CONFIG,
- .dbg_dump = hclge_dbg_dump_ncl_config,
+ .dbg_read_func = hclge_dbg_dump_ncl_config,
},
{
.cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_bios_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_SSU,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_ssu_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_igu_egu_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_RPU,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_rpu_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_NCSI,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_ncsi_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_RTC,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_rtc_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_PPP,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_ppp_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_RCB,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_rcb_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_TQP,
- .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ .dbg_read_func = hclge_dbg_dump_tqp_reg_cmd,
},
{
.cmd = HNAE3_DBG_CMD_REG_MAC,
- .dbg_dump = hclge_dbg_dump_mac,
+ .dbg_read_func = hclge_dbg_dump_mac,
},
{
.cmd = HNAE3_DBG_CMD_REG_DCB,
- .dbg_dump = hclge_dbg_dump_dcb,
+ .dbg_read_func = hclge_dbg_dump_dcb,
},
{
.cmd = HNAE3_DBG_CMD_FD_TCAM,
- .dbg_dump = hclge_dbg_dump_fd_tcam,
+ .dbg_read_func = hclge_dbg_dump_fd_tcam,
},
{
.cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
- .dbg_dump = hclge_dbg_dump_mac_tnl_status,
+ .dbg_read_func = hclge_dbg_dump_mac_tnl_status,
},
{
.cmd = HNAE3_DBG_CMD_SERV_INFO,
- .dbg_dump = hclge_dbg_dump_serv_info,
+ .dbg_read_func = hclge_dbg_dump_serv_info,
},
{
.cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
- .dbg_dump = hclge_dbg_dump_vlan_config,
+ .dbg_read_func = hclge_dbg_dump_vlan_config,
},
{
.cmd = HNAE3_DBG_CMD_FD_COUNTER,
- .dbg_dump = hclge_dbg_dump_fd_counter,
+ .dbg_read_func = hclge_dbg_dump_fd_counter,
},
{
.cmd = HNAE3_DBG_CMD_UMV_INFO,
- .dbg_dump = hclge_dbg_dump_umv_info,
+ .dbg_read_func = hclge_dbg_dump_umv_info,
},
};
-int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
- char *buf, int len)
+int hclge_dbg_get_read_func(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
+ read_func *func)
{
struct hclge_vport *vport = hclge_get_vport(handle);
const struct hclge_dbg_func *cmd_func;
@@ -3221,11 +2946,8 @@ int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
for (i = 0; i < ARRAY_SIZE(hclge_dbg_cmd_func); i++) {
if (cmd == hclge_dbg_cmd_func[i].cmd) {
cmd_func = &hclge_dbg_cmd_func[i];
- if (cmd_func->dbg_dump)
- return cmd_func->dbg_dump(hdev, buf, len);
- else
- return cmd_func->dbg_dump_reg(hdev, cmd, buf,
- len);
+ *func = cmd_func->dbg_read_func;
+ return 0;
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
index 2b998cbed826..317f79efd54c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -92,6 +92,7 @@ struct hclge_dbg_func {
int (*dbg_dump)(struct hclge_dev *hdev, char *buf, int len);
int (*dbg_dump_reg)(struct hclge_dev *hdev, enum hnae3_dbg_cmd cmd,
char *buf, int len);
+ read_func dbg_read_func;
};
struct hclge_dbg_status_dfx_info {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 728f4777e51f..cf8abbe01840 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -490,7 +490,7 @@ static int hclge_mac_update_stats_complete(struct hclge_dev *hdev)
desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1;
/* This may be called inside atomic sections,
- * so GFP_ATOMIC is more suitalbe here
+ * so GFP_ATOMIC is more suitable here
*/
desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
if (!desc)
@@ -582,7 +582,7 @@ static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
int size, u64 *data)
{
u64 *buf = data;
- u32 i;
+ int i;
for (i = 0; i < size; i++) {
if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
@@ -595,25 +595,21 @@ static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
return buf;
}
-static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
- const struct hclge_comm_stats_str strs[],
- int size, u8 *data)
+static void hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
+ const struct hclge_comm_stats_str strs[],
+ int size, u8 **data)
{
- char *buff = (char *)data;
- u32 i;
+ int i;
if (stringset != ETH_SS_STATS)
- return buff;
+ return;
for (i = 0; i < size; i++) {
if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
continue;
- snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
- buff = buff + ETH_GSTRING_LEN;
+ ethtool_puts(data, strs[i].desc);
}
-
- return (u8 *)buff;
}
static void hclge_update_stats_for_all(struct hclge_dev *hdev)
@@ -718,44 +714,38 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
}
static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
- u8 *data)
+ u8 **data)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- u8 *p = (char *)data;
+ const char *str;
int size;
if (stringset == ETH_SS_STATS) {
size = ARRAY_SIZE(g_mac_stats_string);
- p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
- size, p);
- p = hclge_comm_tqps_get_strings(handle, p);
+ hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
+ size, data);
+ hclge_comm_tqps_get_strings(handle, data);
} else if (stringset == ETH_SS_TEST) {
if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) {
- memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL],
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
+ str = hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL];
+ ethtool_puts(data, str);
}
if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
- memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
+ str = hns3_nic_test_strs[HNAE3_LOOP_APP];
+ ethtool_puts(data, str);
}
if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
- memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
+ str = hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES];
+ ethtool_puts(data, str);
}
if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
- memcpy(p,
- hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
+ str = hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES];
+ ethtool_puts(data, str);
}
if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
- memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
+ str = hns3_nic_test_strs[HNAE3_LOOP_PHY];
+ ethtool_puts(data, str);
}
}
}
@@ -2192,8 +2182,8 @@ static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
}
-static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
- struct hclge_pkt_buf_alloc *buf_alloc)
+static bool hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
{
#define COMPENSATE_BUFFER 0x3C00
#define COMPENSATE_HALF_MPS_NUM 5
@@ -2368,7 +2358,7 @@ static int hclge_common_thrd_config(struct hclge_dev *hdev,
for (i = 0; i < 2; i++) {
hclge_cmd_setup_basic_desc(&desc[i],
HCLGE_OPC_RX_COM_THRD_ALLOC, false);
- req = (struct hclge_rx_com_thrd *)&desc[i].data;
+ req = (struct hclge_rx_com_thrd *)desc[i].data;
/* The first descriptor set the NEXT bit to 1 */
if (i == 0)
@@ -2634,7 +2624,7 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lan
int ret;
duplex = hclge_check_speed_dup(duplex, speed);
- if (!mac->support_autoneg && mac->speed == speed &&
+ if (!mac->support_autoneg && mac->speed == (u32)speed &&
mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0))
return 0;
@@ -2662,7 +2652,7 @@ static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
if (ret)
return ret;
- hdev->hw.mac.req_speed = speed;
+ hdev->hw.mac.req_speed = (u32)speed;
hdev->hw.mac.req_duplex = duplex;
return 0;
@@ -3456,7 +3446,7 @@ static int hclge_tp_port_init(struct hclge_dev *hdev)
static int hclge_update_port_info(struct hclge_dev *hdev)
{
struct hclge_mac *mac = &hdev->hw.mac;
- int speed;
+ u32 speed;
int ret;
/* get the port info from SFP cmd if not copper port */
@@ -3781,7 +3771,7 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
HCLGE_NAME, pci_name(hdev->pdev));
ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
- IRQ_NOAUTOEN, hdev->misc_vector.name, hdev);
+ IRQF_NO_AUTOEN, hdev->misc_vector.name, hdev);
if (ret) {
hclge_free_vector(hdev, 0);
dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
@@ -4513,7 +4503,7 @@ static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
static void hclge_reset_timer(struct timer_list *t)
{
- struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
+ struct hclge_dev *hdev = timer_container_of(hdev, t, reset_timer);
/* if default_reset_request has no value, it means that this reset
* request has already be handled, so just return here
@@ -4882,7 +4872,7 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
}
static int hclge_set_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+ const struct ethtool_rxfh_fields *nfc)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -4900,7 +4890,7 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
}
static int hclge_get_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+ struct ethtool_rxfh_fields *nfc)
{
struct hclge_vport *vport = hclge_get_vport(handle);
u8 tuple_sets;
@@ -6999,7 +6989,7 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
struct hclge_dev *hdev = vport->back;
struct hclge_fd_rule *rule;
struct hlist_node *node2;
- int cnt = 0;
+ u32 cnt = 0;
if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
@@ -7885,7 +7875,7 @@ static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
if (ret)
return ret;
- return phy_loopback(phydev, true);
+ return phy_loopback(phydev, true, 0);
}
static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
@@ -7893,7 +7883,7 @@ static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
{
int ret;
- ret = phy_loopback(phydev, false);
+ ret = phy_loopback(phydev, false, 0);
if (ret)
return ret;
@@ -8010,7 +8000,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
ret = hclge_tqp_enable(handle, en);
if (ret)
dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
- en ? "enable" : "disable", ret);
+ str_enable_disable(en), ret);
return ret;
}
@@ -8233,14 +8223,14 @@ static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
word_num = vfid / 32;
bit_num = vfid % 32;
if (clr)
- desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
+ desc[1].data[word_num] &= cpu_to_le32(~(1U << bit_num));
else
desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
} else {
word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
bit_num = vfid % 32;
if (clr)
- desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
+ desc[2].data[word_num] &= cpu_to_le32(~(1U << bit_num));
else
desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
}
@@ -9302,7 +9292,7 @@ static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
static int init_mgr_tbl(struct hclge_dev *hdev)
{
int ret;
- int i;
+ u32 i;
for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
@@ -9439,8 +9429,7 @@ static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
/* this command reads phy id and register at the same time */
fallthrough;
case SIOCGMIIREG:
- data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
- return 0;
+ return hclge_read_phy_reg(hdev, data->reg_num, &data->val_out);
case SIOCSMIIREG:
return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
@@ -9455,15 +9444,8 @@ static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- switch (cmd) {
- case SIOCGHWTSTAMP:
- return hclge_ptp_get_cfg(hdev, ifr);
- case SIOCSHWTSTAMP:
- return hclge_ptp_set_cfg(hdev, ifr);
- default:
- if (!hdev->hw.mac.phydev)
- return hclge_mii_ioctl(hdev, ifr, cmd);
- }
+ if (!hdev->hw.mac.phydev)
+ return hclge_mii_ioctl(hdev, ifr, cmd);
return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
}
@@ -9586,33 +9568,36 @@ static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
return false;
}
-int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
+static int __hclge_enable_vport_vlan_filter(struct hclge_vport *vport,
+ bool request_en)
{
- struct hclge_dev *hdev = vport->back;
bool need_en;
int ret;
- mutex_lock(&hdev->vport_lock);
-
- vport->req_vlan_fltr_en = request_en;
-
need_en = hclge_need_enable_vport_vlan_filter(vport);
- if (need_en == vport->cur_vlan_fltr_en) {
- mutex_unlock(&hdev->vport_lock);
+ if (need_en == vport->cur_vlan_fltr_en)
return 0;
- }
ret = hclge_set_vport_vlan_filter(vport, need_en);
- if (ret) {
- mutex_unlock(&hdev->vport_lock);
+ if (ret)
return ret;
- }
vport->cur_vlan_fltr_en = need_en;
+ return 0;
+}
+
+int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
+{
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ mutex_lock(&hdev->vport_lock);
+ vport->req_vlan_fltr_en = request_en;
+ ret = __hclge_enable_vport_vlan_filter(vport, request_en);
mutex_unlock(&hdev->vport_lock);
- return 0;
+ return ret;
}
static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
@@ -10633,16 +10618,19 @@ static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
&vport->state))
continue;
- ret = hclge_enable_vport_vlan_filter(vport,
- vport->req_vlan_fltr_en);
+ mutex_lock(&hdev->vport_lock);
+ ret = __hclge_enable_vport_vlan_filter(vport,
+ vport->req_vlan_fltr_en);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to sync vlan filter state for vport%u, ret = %d\n",
vport->vport_id, ret);
set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
&vport->state);
+ mutex_unlock(&hdev->vport_lock);
return;
}
+ mutex_unlock(&hdev->vport_lock);
}
}
@@ -10723,7 +10711,7 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
mutex_lock(&hdev->vport_lock);
/* VF's mps must fit within hdev->mps */
- if (vport->vport_id && max_frm_size > hdev->mps) {
+ if (vport->vport_id && (u32)max_frm_size > hdev->mps) {
mutex_unlock(&hdev->vport_lock);
return -EINVAL;
} else if (vport->vport_id) {
@@ -10734,7 +10722,7 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
/* PF's mps must be greater then VF's mps */
for (i = 1; i < hdev->num_alloc_vport; i++)
- if (max_frm_size < hdev->vport[i].mps) {
+ if ((u32)max_frm_size < hdev->vport[i].mps) {
dev_err(&hdev->pdev->dev,
"failed to set pf mtu for less than vport %d, mps = %u.\n",
i, hdev->vport[i].mps);
@@ -11210,9 +11198,9 @@ static void hclge_info_show(struct hclge_dev *hdev)
dev_info(dev, "This is %s PF\n",
hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
dev_info(dev, "DCB %s\n",
- handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable");
+ str_enable_disable(handle->kinfo.tc_info.dcb_ets_active));
dev_info(dev, "MQPRIO %s\n",
- handle->kinfo.tc_info.mqprio_active ? "enable" : "disable");
+ str_enable_disable(handle->kinfo.tc_info.mqprio_active));
dev_info(dev, "Default tx spare buffer size: %u\n",
hdev->tx_spare_buf_size);
@@ -11224,7 +11212,7 @@ static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
{
struct hnae3_client *client = vport->nic.client;
struct hclge_dev *hdev = ae_dev->priv;
- int rst_cnt = hdev->rst_stats.reset_cnt;
+ u32 rst_cnt = hdev->rst_stats.reset_cnt;
int ret;
ret = client->ops->init_instance(&vport->nic);
@@ -11268,7 +11256,7 @@ static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
{
struct hclge_dev *hdev = ae_dev->priv;
struct hnae3_client *client;
- int rst_cnt;
+ u32 rst_cnt;
int ret;
if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
@@ -11433,7 +11421,7 @@ static int hclge_pci_init(struct hclge_dev *hdev)
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev,
- "can't set consistent PCI DMA");
+ "can't set consistent PCI DMA\n");
goto err_disable_device;
}
dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
@@ -11502,7 +11490,7 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
set_bit(HCLGE_STATE_REMOVING, &hdev->state);
if (hdev->reset_timer.function)
- del_timer_sync(&hdev->reset_timer);
+ timer_delete_sync(&hdev->reset_timer);
if (hdev->service_task.work.func)
cancel_delayed_work_sync(&hdev->service_task);
}
@@ -11986,7 +11974,7 @@ static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
if (ret) {
dev_err(&hdev->pdev->dev,
"Set vf %d mac spoof check %s failed, ret=%d\n",
- vf, enable ? "on" : "off", ret);
+ vf, str_on_off(enable), ret);
return ret;
}
@@ -11994,7 +11982,7 @@ static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
if (ret)
dev_err(&hdev->pdev->dev,
"Set vf %d vlan spoof check %s failed, ret=%d\n",
- vf, enable ? "on" : "off", ret);
+ vf, str_on_off(enable), ret);
return ret;
}
@@ -12098,7 +12086,7 @@ static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
int min_tx_rate, int max_tx_rate)
{
if (min_tx_rate != 0 ||
- max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
+ max_tx_rate < 0 || (u32)max_tx_rate > hdev->hw.mac.max_speed) {
dev_err(&hdev->pdev->dev,
"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
@@ -12123,7 +12111,7 @@ static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
if (!vport)
return -EINVAL;
- if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
+ if (!force && (u32)max_tx_rate == vport->vf_info.max_tx_rate)
return 0;
ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
@@ -12874,7 +12862,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_fd_all_rules = hclge_get_all_rules,
.enable_fd = hclge_enable_fd,
.add_arfs_entry = hclge_add_fd_entry_by_arfs,
- .dbg_read_cmd = hclge_dbg_read_cmd,
+ .dbg_get_read_func = hclge_dbg_get_read_func,
.handle_hw_ras_error = hclge_handle_hw_ras_error,
.get_hw_reset_stat = hclge_get_hw_reset_stat,
.ae_dev_resetting = hclge_ae_dev_resetting,
@@ -12905,6 +12893,8 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_dscp_prio = hclge_get_dscp_prio,
.get_wol = hclge_get_wol,
.set_wol = hclge_set_wol,
+ .hwtstamp_get = hclge_ptp_get_cfg,
+ .hwtstamp_set = hclge_ptp_set_cfg,
};
static struct hnae3_ae_algo ae_algo = {
@@ -12914,9 +12904,10 @@ static struct hnae3_ae_algo ae_algo = {
static int __init hclge_init(void)
{
- pr_info("%s is initializing\n", HCLGE_NAME);
+ pr_debug("%s is initializing\n", HCLGE_NAME);
- hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
+ hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0,
+ HCLGE_NAME);
if (!hclge_wq) {
pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
return -ENOMEM;
@@ -12929,9 +12920,11 @@ static int __init hclge_init(void)
static void __exit hclge_exit(void)
{
+ hnae3_acquire_unload_lock();
hnae3_unregister_ae_algo_prepare(&ae_algo);
hnae3_unregister_ae_algo(&ae_algo);
destroy_workqueue(hclge_wq);
+ hnae3_release_unload_lock();
}
module_init(hclge_init);
module_exit(hclge_exit);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index b9fc719880bb..032b472d2368 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -1142,8 +1142,8 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
int hclge_vport_start(struct hclge_vport *vport);
void hclge_vport_stop(struct hclge_vport *vport);
int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
-int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
- char *buf, int len);
+int hclge_dbg_get_read_func(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
+ read_func *func);
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 59c863306657..c7ff12a6c076 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -749,16 +749,17 @@ static int hclge_get_rss_key(struct hclge_vport *vport,
#define HCLGE_RSS_MBX_RESP_LEN 8
struct hclge_dev *hdev = vport->back;
struct hclge_comm_rss_cfg *rss_cfg;
+ int rss_hash_key_size;
u8 index;
index = mbx_req->msg.data[0];
rss_cfg = &hdev->rss_cfg;
+ rss_hash_key_size = sizeof(rss_cfg->rss_hash_key);
/* Check the query index of rss_hash_key from VF, make sure no
* more than the size of rss_hash_key.
*/
- if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) >
- sizeof(rss_cfg->rss_hash_key)) {
+ if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) > rss_hash_key_size) {
dev_warn(&hdev->pdev->dev,
"failed to get the rss hash key, the index(%u) invalid !\n",
index);
@@ -800,7 +801,7 @@ static void hclge_handle_link_change_event(struct hclge_dev *hdev,
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
{
- u32 tail = hclge_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
+ int tail = hclge_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
return tail == hw->hw.cmq.crq.next_to_use;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 80079657afeb..cf881108fa57 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -151,7 +151,7 @@ int hclge_mac_mdio_config(struct hclge_dev *hdev)
mdio_bus->parent = &hdev->pdev->dev;
mdio_bus->priv = hdev;
- mdio_bus->phy_mask = ~(1 << mac->phy_addr);
+ mdio_bus->phy_mask = ~(1U << mac->phy_addr);
ret = mdiobus_register(mdio_bus);
if (ret) {
dev_err(mdio_bus->parent,
@@ -258,7 +258,7 @@ void hclge_mac_start_phy(struct hclge_dev *hdev)
if (!phydev)
return;
- phy_loopback(phydev, false);
+ phy_loopback(phydev, false, 0);
phy_start(phydev);
}
@@ -274,7 +274,7 @@ void hclge_mac_stop_phy(struct hclge_dev *hdev)
phy_stop(phydev);
}
-u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr)
+int hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 *val)
{
struct hclge_phy_reg_cmd *req;
struct hclge_desc desc;
@@ -286,11 +286,14 @@ u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr)
req->reg_addr = cpu_to_le16(reg_addr);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
+ if (ret) {
dev_err(&hdev->pdev->dev,
"failed to read phy reg, ret = %d.\n", ret);
+ return ret;
+ }
- return le16_to_cpu(req->reg_val);
+ *val = le16_to_cpu(req->reg_val);
+ return 0;
}
int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
index 4200d0b6d931..21d434c82475 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
@@ -13,7 +13,7 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle);
void hclge_mac_disconnect_phy(struct hnae3_handle *handle);
void hclge_mac_start_phy(struct hclge_dev *hdev);
void hclge_mac_stop_phy(struct hclge_dev *hdev);
-u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr);
+int hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 *val);
int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index bab16c2191b2..0081c5281455 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -2,6 +2,7 @@
// Copyright (c) 2021 Hisilicon Limited.
#include <linux/skbuff.h>
+#include <linux/string_choices.h>
#include "hclge_main.h"
#include "hnae3.h"
@@ -203,13 +204,17 @@ static int hclge_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
return 0;
}
-int hclge_ptp_get_cfg(struct hclge_dev *hdev, struct ifreq *ifr)
+int hclge_ptp_get_cfg(struct hnae3_handle *handle,
+ struct kernel_hwtstamp_config *config)
{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state))
return -EOPNOTSUPP;
- return copy_to_user(ifr->ifr_data, &hdev->ptp->ts_cfg,
- sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
+ *config = hdev->ptp->ts_cfg;
+ return 0;
}
static int hclge_ptp_int_en(struct hclge_dev *hdev, bool en)
@@ -226,7 +231,7 @@ static int hclge_ptp_int_en(struct hclge_dev *hdev, bool en)
if (ret)
dev_err(&hdev->pdev->dev,
"failed to %s ptp interrupt, ret = %d\n",
- en ? "enable" : "disable", ret);
+ str_enable_disable(en), ret);
return ret;
}
@@ -268,7 +273,7 @@ static int hclge_ptp_cfg(struct hclge_dev *hdev, u32 cfg)
return ret;
}
-static int hclge_ptp_set_tx_mode(struct hwtstamp_config *cfg,
+static int hclge_ptp_set_tx_mode(struct kernel_hwtstamp_config *cfg,
unsigned long *flags, u32 *ptp_cfg)
{
switch (cfg->tx_type) {
@@ -286,7 +291,7 @@ static int hclge_ptp_set_tx_mode(struct hwtstamp_config *cfg,
return 0;
}
-static int hclge_ptp_set_rx_mode(struct hwtstamp_config *cfg,
+static int hclge_ptp_set_rx_mode(struct kernel_hwtstamp_config *cfg,
unsigned long *flags, u32 *ptp_cfg)
{
int rx_filter = cfg->rx_filter;
@@ -331,7 +336,7 @@ static int hclge_ptp_set_rx_mode(struct hwtstamp_config *cfg,
}
static int hclge_ptp_set_ts_mode(struct hclge_dev *hdev,
- struct hwtstamp_config *cfg)
+ struct kernel_hwtstamp_config *cfg)
{
unsigned long flags = hdev->ptp->flags;
u32 ptp_cfg = 0;
@@ -358,9 +363,12 @@ static int hclge_ptp_set_ts_mode(struct hclge_dev *hdev,
return 0;
}
-int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr)
+int hclge_ptp_set_cfg(struct hnae3_handle *handle,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config cfg;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
int ret;
if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state)) {
@@ -368,16 +376,13 @@ int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr)
return -EOPNOTSUPP;
}
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
-
- ret = hclge_ptp_set_ts_mode(hdev, &cfg);
+ ret = hclge_ptp_set_ts_mode(hdev, config);
if (ret)
return ret;
- hdev->ptp->ts_cfg = cfg;
+ hdev->ptp->ts_cfg = *config;
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
int hclge_ptp_get_ts_info(struct hnae3_handle *handle,
@@ -439,6 +444,13 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
ptp->info.settime64 = hclge_ptp_settime;
ptp->info.n_alarm = 0;
+
+ spin_lock_init(&ptp->lock);
+ ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
+ ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
+ hdev->ptp = ptp;
+
ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev);
if (IS_ERR(ptp->clock)) {
dev_err(&hdev->pdev->dev,
@@ -450,12 +462,6 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
return -ENODEV;
}
- spin_lock_init(&ptp->lock);
- ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
- ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
- ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
- hdev->ptp = ptp;
-
return 0;
}
@@ -483,7 +489,7 @@ int hclge_ptp_init(struct hclge_dev *hdev)
ret = hclge_ptp_get_cycle(hdev);
if (ret)
- return ret;
+ goto out;
}
ret = hclge_ptp_int_en(hdev, true);
@@ -495,14 +501,14 @@ int hclge_ptp_init(struct hclge_dev *hdev)
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to init freq, ret = %d\n", ret);
- goto out;
+ goto out_clear_int;
}
ret = hclge_ptp_set_ts_mode(hdev, &hdev->ptp->ts_cfg);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to init ts mode, ret = %d\n", ret);
- goto out;
+ goto out_clear_int;
}
ktime_get_real_ts64(&ts);
@@ -510,7 +516,7 @@ int hclge_ptp_init(struct hclge_dev *hdev)
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to init ts time, ret = %d\n", ret);
- goto out;
+ goto out_clear_int;
}
set_bit(HCLGE_STATE_PTP_EN, &hdev->state);
@@ -518,6 +524,9 @@ int hclge_ptp_init(struct hclge_dev *hdev)
return 0;
+out_clear_int:
+ clear_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags);
+ hclge_ptp_int_en(hdev, false);
out:
hclge_ptp_destroy_clock(hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
index 63483636c074..0162fa5ac146 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
@@ -25,7 +25,7 @@ struct ifreq;
#define HCLGE_PTP_TIME_SEC_H_MASK GENMASK(15, 0)
#define HCLGE_PTP_TIME_SEC_L_REG 0x54
#define HCLGE_PTP_TIME_NSEC_REG 0x58
-#define HCLGE_PTP_TIME_NSEC_MASK GENMASK(29, 0)
+#define HCLGE_PTP_TIME_NSEC_MASK 0x3fffffffLL
#define HCLGE_PTP_TIME_NSEC_NEG BIT(31)
#define HCLGE_PTP_TIME_SYNC_REG 0x5C
#define HCLGE_PTP_TIME_SYNC_EN BIT(0)
@@ -62,7 +62,7 @@ struct hclge_ptp {
unsigned long flags;
void __iomem *io_base;
struct ptp_clock_info info;
- struct hwtstamp_config ts_cfg;
+ struct kernel_hwtstamp_config ts_cfg;
spinlock_t lock; /* protects ptp registers */
u32 ptp_cfg;
u32 last_tx_seqid;
@@ -133,8 +133,11 @@ bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb);
void hclge_ptp_clean_tx_hwts(struct hclge_dev *hdev);
void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb,
u32 nsec, u32 sec);
-int hclge_ptp_get_cfg(struct hclge_dev *hdev, struct ifreq *ifr);
-int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr);
+int hclge_ptp_get_cfg(struct hnae3_handle *handle,
+ struct kernel_hwtstamp_config *config);
+int hclge_ptp_set_cfg(struct hnae3_handle *handle,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
int hclge_ptp_init(struct hclge_dev *hdev);
void hclge_ptp_uninit(struct hclge_dev *hdev);
int hclge_ptp_get_ts_info(struct hnae3_handle *handle,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 896f1eb172d3..8fcf220a120d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -130,12 +130,10 @@ static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
}
static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
- u8 *data)
+ u8 **data)
{
- u8 *p = (char *)data;
-
if (strset == ETH_SS_STATS)
- p = hclge_comm_tqps_get_strings(handle, p);
+ hclge_comm_tqps_get_strings(handle, data);
}
static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
@@ -608,7 +606,7 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
}
static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+ const struct ethtool_rxfh_fields *nfc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
int ret;
@@ -626,7 +624,7 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
}
static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+ struct ethtool_rxfh_fields *nfc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
u8 tuple_sets;
@@ -1294,9 +1292,8 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
rtnl_unlock();
}
-static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+static int hclgevf_en_hw_strip_rxvtag_cmd(struct hclgevf_dev *hdev, bool enable)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclge_vf_to_pf_msg send_msg;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
@@ -1305,6 +1302,19 @@ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
+static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int ret;
+
+ ret = hclgevf_en_hw_strip_rxvtag_cmd(hdev, enable);
+ if (ret)
+ return ret;
+
+ hdev->rxvtag_strip_en = enable;
+ return 0;
+}
+
static int hclgevf_reset_tqp(struct hnae3_handle *handle)
{
#define HCLGEVF_RESET_ALL_QUEUE_DONE 1U
@@ -2047,7 +2057,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
static void hclgevf_reset_timer(struct timer_list *t)
{
- struct hclgevf_dev *hdev = from_timer(hdev, t, reset_timer);
+ struct hclgevf_dev *hdev = timer_container_of(hdev, t, reset_timer);
hclgevf_clear_event_cause(hdev, HCLGEVF_VECTOR0_EVENT_RST);
hclgevf_reset_task_schedule(hdev);
@@ -2206,12 +2216,13 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
tc_valid, tc_size);
}
-static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
+static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev,
+ bool rxvtag_strip_en)
{
struct hnae3_handle *nic = &hdev->nic;
int ret;
- ret = hclgevf_en_hw_strip_rxvtag(nic, true);
+ ret = hclgevf_en_hw_strip_rxvtag(nic, rxvtag_strip_en);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to enable rx vlan offload, ret = %d\n", ret);
@@ -2315,6 +2326,7 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev)
clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
+ /* timer needs to be initialized before misc irq */
timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
mutex_init(&hdev->mbx_resp.mbx_mutex);
@@ -2453,7 +2465,7 @@ static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
struct hnae3_client *client)
{
struct hclgevf_dev *hdev = ae_dev->priv;
- int rst_cnt = hdev->rst_stats.rst_cnt;
+ u32 rst_cnt = hdev->rst_stats.rst_cnt;
int ret;
ret = client->ops->init_instance(&hdev->nic);
@@ -2613,7 +2625,7 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret) {
- dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
+ dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting\n");
goto err_disable_device;
}
@@ -2880,7 +2892,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
if (ret)
return ret;
- ret = hclgevf_init_vlan_config(hdev);
+ ret = hclgevf_init_vlan_config(hdev, hdev->rxvtag_strip_en);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed(%d) to initialize VLAN config\n", ret);
@@ -2995,7 +3007,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
- ret = hclgevf_init_vlan_config(hdev);
+ ret = hclgevf_init_vlan_config(hdev, true);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed(%d) to initialize VLAN config\n", ret);
@@ -3082,11 +3094,7 @@ static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
{
- struct hnae3_handle *nic = &hdev->nic;
- struct hnae3_knic_private_info *kinfo = &nic->kinfo;
-
- return min_t(u32, hdev->rss_size_max,
- hdev->num_tqps / kinfo->tc_info.num_tc);
+ return min(hdev->rss_size_max, hdev->num_tqps);
}
/**
@@ -3411,8 +3419,10 @@ static int __init hclgevf_init(void)
static void __exit hclgevf_exit(void)
{
+ hnae3_acquire_unload_lock();
hnae3_unregister_ae_algo(&ae_algovf);
destroy_workqueue(hclgevf_wq);
+ hnae3_release_unload_lock();
}
module_init(hclgevf_init);
module_exit(hclgevf_exit);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index cccef3228461..0208425ab594 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -253,6 +253,7 @@ struct hclgevf_dev {
int *vector_irq;
bool gro_en;
+ bool rxvtag_strip_en;
unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index 85c2a634c8f9..f5c99ca54369 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -159,7 +159,7 @@ static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw)
{
u32 tail = hclgevf_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
- return tail == hw->hw.cmq.crq.next_to_use;
+ return tail == (u32)hw->hw.cmq.crq.next_to_use;
}
static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
index 7d9d9dbc7560..9de01e344e27 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
@@ -127,37 +127,38 @@ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hnae3_queue *tqp;
- int i, j, reg_um;
+ int i, j, reg_num;
u32 *reg = data;
*version = hdev->fw_version;
reg += hclgevf_reg_get_header(reg);
/* fetching per-VF registers values from VF PCIe register space */
- reg_um = ARRAY_SIZE(cmdq_reg_addr_list);
- reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_CMDQ, reg_um, reg);
- for (i = 0; i < reg_um; i++)
+ reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_CMDQ, reg_num, reg);
+ for (i = 0; i < reg_num; i++)
*reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
- reg_um = ARRAY_SIZE(common_reg_addr_list);
- reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_COMMON, reg_um, reg);
- for (i = 0; i < reg_um; i++)
+ reg_num = ARRAY_SIZE(common_reg_addr_list);
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_COMMON, reg_num, reg);
+ for (i = 0; i < reg_num; i++)
*reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
- reg_um = ARRAY_SIZE(ring_reg_addr_list);
+ reg_num = ARRAY_SIZE(ring_reg_addr_list);
for (j = 0; j < hdev->num_tqps; j++) {
- reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_um, reg);
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_num, reg);
tqp = &hdev->htqp[j].q;
- for (i = 0; i < reg_um; i++)
+ for (i = 0; i < reg_num; i++)
*reg++ = readl_relaxed(tqp->io_base -
HCLGEVF_TQP_REG_OFFSET +
ring_reg_addr_list[i]);
}
- reg_um = ARRAY_SIZE(tqp_intr_reg_addr_list);
+ reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
for (j = 0; j < hdev->num_msi_used - 1; j++) {
- reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_TQP_INTR, reg_um, reg);
- for (i = 0; i < reg_um; i++)
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_TQP_INTR,
+ reg_num, reg);
+ for (i = 0; i < reg_num; i++)
*reg++ = hclgevf_read_dev(&hdev->hw,
tqp_intr_reg_addr_list[i] +
HCLGEVF_RING_INT_REG_OFFSET * j);
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index a1aa6c1f966e..6812be8dc64f 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -640,7 +640,7 @@ static struct platform_driver hns_mdio_driver = {
.driver = {
.name = MDIO_DRV_NAME,
.of_match_table = hns_mdio_match,
- .acpi_match_table = ACPI_PTR(hns_mdio_acpi_match),
+ .acpi_match_table = hns_mdio_acpi_match,
},
};
diff --git a/drivers/net/ethernet/huawei/Kconfig b/drivers/net/ethernet/huawei/Kconfig
index c05fce15eb51..7d0feb1da158 100644
--- a/drivers/net/ethernet/huawei/Kconfig
+++ b/drivers/net/ethernet/huawei/Kconfig
@@ -16,5 +16,6 @@ config NET_VENDOR_HUAWEI
if NET_VENDOR_HUAWEI
source "drivers/net/ethernet/huawei/hinic/Kconfig"
+source "drivers/net/ethernet/huawei/hinic3/Kconfig"
endif # NET_VENDOR_HUAWEI
diff --git a/drivers/net/ethernet/huawei/Makefile b/drivers/net/ethernet/huawei/Makefile
index 2549ad5afe6d..59865b882879 100644
--- a/drivers/net/ethernet/huawei/Makefile
+++ b/drivers/net/ethernet/huawei/Makefile
@@ -4,3 +4,4 @@
#
obj-$(CONFIG_HINIC) += hinic/
+obj-$(CONFIG_HINIC3) += hinic3/
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
index 03e42512a2d5..300bc267a259 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
@@ -443,8 +443,9 @@ int hinic_health_reporters_create(struct hinic_devlink_priv *priv)
struct devlink *devlink = priv_to_devlink(priv);
priv->hw_fault_reporter =
- devlink_health_reporter_create(devlink, &hinic_hw_fault_reporter_ops,
- 0, priv);
+ devlink_health_reporter_create(devlink,
+ &hinic_hw_fault_reporter_ops,
+ priv);
if (IS_ERR(priv->hw_fault_reporter)) {
dev_warn(&priv->hwdev->hwif->pdev->dev, "Failed to create hw fault reporter, err: %ld\n",
PTR_ERR(priv->hw_fault_reporter));
@@ -452,8 +453,9 @@ int hinic_health_reporters_create(struct hinic_devlink_priv *priv)
}
priv->fw_fault_reporter =
- devlink_health_reporter_create(devlink, &hinic_fw_fault_reporter_ops,
- 0, priv);
+ devlink_health_reporter_create(devlink,
+ &hinic_fw_fault_reporter_ops,
+ priv);
if (IS_ERR(priv->fw_fault_reporter)) {
dev_warn(&priv->hwdev->hwif->pdev->dev, "Failed to create fw fault reporter, err: %ld\n",
PTR_ERR(priv->fw_fault_reporter));
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index c559dd4291d3..e9f338e9dbe7 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -919,9 +919,10 @@ static int hinic_set_channels(struct net_device *netdev,
return 0;
}
-static int hinic_get_rss_hash_opts(struct hinic_dev *nic_dev,
- struct ethtool_rxnfc *cmd)
+static int hinic_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
struct hinic_rss_type rss_type = { 0 };
int err;
@@ -964,7 +965,7 @@ static int hinic_get_rss_hash_opts(struct hinic_dev *nic_dev,
return 0;
}
-static int set_l4_rss_hash_ops(struct ethtool_rxnfc *cmd,
+static int set_l4_rss_hash_ops(const struct ethtool_rxfh_fields *cmd,
struct hinic_rss_type *rss_type)
{
u8 rss_l4_en = 0;
@@ -1000,16 +1001,18 @@ static int set_l4_rss_hash_ops(struct ethtool_rxnfc *cmd,
return 0;
}
-static int hinic_set_rss_hash_opts(struct hinic_dev *nic_dev,
- struct ethtool_rxnfc *cmd)
+static int hinic_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
- struct hinic_rss_type *rss_type = &nic_dev->rss_type;
+ struct hinic_dev *nic_dev = netdev_priv(dev);
+ struct hinic_rss_type *rss_type;
int err;
- if (!(nic_dev->flags & HINIC_RSS_ENABLE)) {
- cmd->data = 0;
+ rss_type = &nic_dev->rss_type;
+
+ if (!(nic_dev->flags & HINIC_RSS_ENABLE))
return -EOPNOTSUPP;
- }
/* RSS does not support anything other than hashing
* to queues on src and dst IPs and ports
@@ -1108,26 +1111,6 @@ static int hinic_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXRINGS:
cmd->data = nic_dev->num_qps;
break;
- case ETHTOOL_GRXFH:
- err = hinic_get_rss_hash_opts(nic_dev, cmd);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
-static int hinic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- int err = 0;
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- err = hinic_set_rss_hash_opts(nic_dev, cmd);
- break;
default:
err = -EOPNOTSUPP;
break;
@@ -1797,11 +1780,12 @@ static const struct ethtool_ops hinic_ethtool_ops = {
.get_channels = hinic_get_channels,
.set_channels = hinic_set_channels,
.get_rxnfc = hinic_get_rxnfc,
- .set_rxnfc = hinic_set_rxnfc,
.get_rxfh_key_size = hinic_get_rxfh_key_size,
.get_rxfh_indir_size = hinic_get_rxfh_indir_size,
.get_rxfh = hinic_get_rxfh,
.set_rxfh = hinic_set_rxfh,
+ .get_rxfh_fields = hinic_get_rxfh_fields,
+ .set_rxfh_fields = hinic_set_rxfh_fields,
.get_sset_count = hinic_get_sset_count,
.get_ethtool_stats = hinic_get_ethtool_stats,
.get_strings = hinic_get_strings,
@@ -1829,11 +1813,12 @@ static const struct ethtool_ops hinicvf_ethtool_ops = {
.get_channels = hinic_get_channels,
.set_channels = hinic_set_channels,
.get_rxnfc = hinic_get_rxnfc,
- .set_rxnfc = hinic_set_rxnfc,
.get_rxfh_key_size = hinic_get_rxfh_key_size,
.get_rxfh_indir_size = hinic_get_rxfh_indir_size,
.get_rxfh = hinic_get_rxfh,
.set_rxfh = hinic_set_rxfh,
+ .get_rxfh_fields = hinic_get_rxfh_fields,
+ .set_rxfh_fields = hinic_set_rxfh_fields,
.get_sset_count = hinic_get_sset_count,
.get_ethtool_stats = hinic_get_ethtool_stats,
.get_strings = hinic_get_strings,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
index 045c47786a04..28114a59347e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
@@ -605,7 +605,7 @@ static void aeq_elements_init(struct hinic_eq *eq, u32 init_val)
/**
* ceq_elements_init - Initialize all the elements in the ceq
* @eq: the event queue
- * @init_val: value to init with it the elements
+ * @init_val: value to init the elements with
**/
static void ceq_elements_init(struct hinic_eq *eq, u32 init_val)
{
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
index 3f9c31d29215..97c1584dc05b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
@@ -861,7 +861,7 @@ static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
HINIC_MBOX_HEADER_SET(NOT_LAST_SEG, LAST) |
HINIC_MBOX_HEADER_SET(direction, DIRECTION) |
HINIC_MBOX_HEADER_SET(cmd, CMD) |
- /* The vf's offset to it's associated pf */
+ /* The vf's offset to its associated pf */
HINIC_MBOX_HEADER_SET(msg_info->msg_id, MSG_ID) |
HINIC_MBOX_HEADER_SET(msg_info->status, STATUS) |
HINIC_MBOX_HEADER_SET(hinic_global_func_id_hw(hwdev->hwif),
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 890f213da8d1..ae1f523d6841 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -172,6 +172,7 @@ err_init_txq:
hinic_sq_dbgfs_uninit(nic_dev);
devm_kfree(&netdev->dev, nic_dev->txqs);
+ nic_dev->txqs = NULL;
return err;
}
@@ -268,6 +269,7 @@ err_init_rxq:
hinic_rq_dbgfs_uninit(nic_dev);
devm_kfree(&netdev->dev, nic_dev->rxqs);
+ nic_dev->rxqs = NULL;
return err;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
index f81a43d2cdfc..486fb0e20bef 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c
@@ -469,7 +469,7 @@ int hinic_set_vlan_fliter(struct hinic_dev *nic_dev, u32 en)
err = HINIC_MGMT_CMD_UNSUPPORTED;
} else if (err || !out_size || vlan_filter.status) {
dev_err(&pdev->dev,
- "Failed to set vlan fliter, err: %d, status: 0x%x, out size: 0x%x\n",
+ "Failed to set vlan filter, err: %d, status: 0x%x, out size: 0x%x\n",
err, vlan_filter.status, out_size);
err = -EINVAL;
}
diff --git a/drivers/net/ethernet/huawei/hinic3/Kconfig b/drivers/net/ethernet/huawei/hinic3/Kconfig
new file mode 100644
index 000000000000..ce4331d1387b
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Huawei driver configuration
+#
+
+config HINIC3
+ tristate "Huawei 3rd generation network adapters (HINIC3) support"
+ # Fields of HW and management structures are little endian and are
+ # currently not converted
+ depends on !CPU_BIG_ENDIAN
+ depends on X86 || ARM64 || COMPILE_TEST
+ depends on PCI_MSI && 64BIT
+ select AUXILIARY_BUS
+ select PAGE_POOL
+ help
+ This driver supports HiNIC 3rd gen Network Adapter (HINIC3).
+ The driver is supported on X86_64 and ARM64 little endian.
+
+ To compile this driver as a module, choose M here.
+ The module will be called hinic3.
diff --git a/drivers/net/ethernet/huawei/hinic3/Makefile b/drivers/net/ethernet/huawei/hinic3/Makefile
new file mode 100644
index 000000000000..c3efa45a6a42
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+obj-$(CONFIG_HINIC3) += hinic3.o
+
+hinic3-objs := hinic3_cmdq.o \
+ hinic3_common.o \
+ hinic3_eqs.o \
+ hinic3_hw_cfg.o \
+ hinic3_hw_comm.o \
+ hinic3_hwdev.o \
+ hinic3_hwif.o \
+ hinic3_irq.o \
+ hinic3_lld.o \
+ hinic3_main.o \
+ hinic3_mbox.o \
+ hinic3_mgmt.o \
+ hinic3_netdev_ops.o \
+ hinic3_nic_cfg.o \
+ hinic3_nic_io.o \
+ hinic3_queue_common.o \
+ hinic3_rss.o \
+ hinic3_rx.o \
+ hinic3_tx.o \
+ hinic3_wq.o
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c
new file mode 100644
index 000000000000..ef539d1b69a3
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c
@@ -0,0 +1,915 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+
+#include "hinic3_cmdq.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+
+#define CMDQ_BUF_SIZE 2048
+#define CMDQ_WQEBB_SIZE 64
+
+#define CMDQ_CMD_TIMEOUT 5000
+#define CMDQ_ENABLE_WAIT_TIMEOUT 300
+
+#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK GENMASK_ULL(51, 0)
+#define CMDQ_CTXT_EQ_ID_MASK GENMASK_ULL(60, 53)
+#define CMDQ_CTXT_CEQ_ARM_MASK BIT_ULL(61)
+#define CMDQ_CTXT_CEQ_EN_MASK BIT_ULL(62)
+#define CMDQ_CTXT_HW_BUSY_BIT_MASK BIT_ULL(63)
+
+#define CMDQ_CTXT_WQ_BLOCK_PFN_MASK GENMASK_ULL(51, 0)
+#define CMDQ_CTXT_CI_MASK GENMASK_ULL(63, 52)
+#define CMDQ_CTXT_SET(val, member) \
+ FIELD_PREP(CMDQ_CTXT_##member##_MASK, val)
+
+#define CMDQ_WQE_HDR_BUFDESC_LEN_MASK GENMASK(7, 0)
+#define CMDQ_WQE_HDR_COMPLETE_FMT_MASK BIT(15)
+#define CMDQ_WQE_HDR_DATA_FMT_MASK BIT(22)
+#define CMDQ_WQE_HDR_COMPLETE_REQ_MASK BIT(23)
+#define CMDQ_WQE_HDR_COMPLETE_SECT_LEN_MASK GENMASK(28, 27)
+#define CMDQ_WQE_HDR_CTRL_LEN_MASK GENMASK(30, 29)
+#define CMDQ_WQE_HDR_HW_BUSY_BIT_MASK BIT(31)
+#define CMDQ_WQE_HDR_SET(val, member) \
+ FIELD_PREP(CMDQ_WQE_HDR_##member##_MASK, val)
+#define CMDQ_WQE_HDR_GET(val, member) \
+ FIELD_GET(CMDQ_WQE_HDR_##member##_MASK, le32_to_cpu(val))
+
+#define CMDQ_CTRL_PI_MASK GENMASK(15, 0)
+#define CMDQ_CTRL_CMD_MASK GENMASK(23, 16)
+#define CMDQ_CTRL_MOD_MASK GENMASK(28, 24)
+#define CMDQ_CTRL_HW_BUSY_BIT_MASK BIT(31)
+#define CMDQ_CTRL_SET(val, member) \
+ FIELD_PREP(CMDQ_CTRL_##member##_MASK, val)
+#define CMDQ_CTRL_GET(val, member) \
+ FIELD_GET(CMDQ_CTRL_##member##_MASK, val)
+
+#define CMDQ_WQE_ERRCODE_VAL_MASK GENMASK(30, 0)
+#define CMDQ_WQE_ERRCODE_GET(val, member) \
+ FIELD_GET(CMDQ_WQE_ERRCODE_##member##_MASK, le32_to_cpu(val))
+
+#define CMDQ_DB_INFO_HI_PROD_IDX_MASK GENMASK(7, 0)
+#define CMDQ_DB_INFO_SET(val, member) \
+ FIELD_PREP(CMDQ_DB_INFO_##member##_MASK, val)
+
+#define CMDQ_DB_HEAD_QUEUE_TYPE_MASK BIT(23)
+#define CMDQ_DB_HEAD_CMDQ_TYPE_MASK GENMASK(26, 24)
+#define CMDQ_DB_HEAD_SET(val, member) \
+ FIELD_PREP(CMDQ_DB_HEAD_##member##_MASK, val)
+
+#define CMDQ_CEQE_TYPE_MASK GENMASK(2, 0)
+#define CMDQ_CEQE_GET(val, member) \
+ FIELD_GET(CMDQ_CEQE_##member##_MASK, le32_to_cpu(val))
+
+#define CMDQ_WQE_HEADER(wqe) ((struct cmdq_header *)(wqe))
+#define CMDQ_WQE_COMPLETED(ctrl_info) \
+ CMDQ_CTRL_GET(le32_to_cpu(ctrl_info), HW_BUSY_BIT)
+
+#define CMDQ_PFN(addr) ((addr) >> 12)
+
+/* cmdq work queue's chip logical address table is up to 512B */
+#define CMDQ_WQ_CLA_SIZE 512
+
+/* Completion codes: send, direct sync, force stop */
+#define CMDQ_SEND_CMPT_CODE 10
+#define CMDQ_DIRECT_SYNC_CMPT_CODE 11
+#define CMDQ_FORCE_STOP_CMPT_CODE 12
+
+enum cmdq_data_format {
+ CMDQ_DATA_SGE = 0,
+ CMDQ_DATA_DIRECT = 1,
+};
+
+enum cmdq_ctrl_sect_len {
+ CMDQ_CTRL_SECT_LEN = 1,
+ CMDQ_CTRL_DIRECT_SECT_LEN = 2,
+};
+
+enum cmdq_bufdesc_len {
+ CMDQ_BUFDESC_LCMD_LEN = 2,
+ CMDQ_BUFDESC_SCMD_LEN = 3,
+};
+
+enum cmdq_completion_format {
+ CMDQ_COMPLETE_DIRECT = 0,
+ CMDQ_COMPLETE_SGE = 1,
+};
+
+enum cmdq_cmd_type {
+ CMDQ_CMD_DIRECT_RESP,
+ CMDQ_CMD_SGE_RESP,
+};
+
+#define CMDQ_WQE_NUM_WQEBBS 1
+
+static struct cmdq_wqe *cmdq_read_wqe(struct hinic3_wq *wq, u16 *ci)
+{
+ if (hinic3_wq_get_used(wq) == 0)
+ return NULL;
+
+ *ci = wq->cons_idx & wq->idx_mask;
+
+ return get_q_element(&wq->qpages, wq->cons_idx, NULL);
+}
+
+struct hinic3_cmd_buf *hinic3_alloc_cmd_buf(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmd_buf *cmd_buf;
+ struct hinic3_cmdqs *cmdqs;
+
+ cmdqs = hwdev->cmdqs;
+
+ cmd_buf = kmalloc(sizeof(*cmd_buf), GFP_ATOMIC);
+ if (!cmd_buf)
+ return NULL;
+
+ cmd_buf->buf = dma_pool_alloc(cmdqs->cmd_buf_pool, GFP_ATOMIC,
+ &cmd_buf->dma_addr);
+ if (!cmd_buf->buf) {
+ dev_err(hwdev->dev, "Failed to allocate cmdq cmd buf from the pool\n");
+ goto err_free_cmd_buf;
+ }
+
+ cmd_buf->size = cpu_to_le16(CMDQ_BUF_SIZE);
+ refcount_set(&cmd_buf->ref_cnt, 1);
+
+ return cmd_buf;
+
+err_free_cmd_buf:
+ kfree(cmd_buf);
+
+ return NULL;
+}
+
+void hinic3_free_cmd_buf(struct hinic3_hwdev *hwdev,
+ struct hinic3_cmd_buf *cmd_buf)
+{
+ struct hinic3_cmdqs *cmdqs;
+
+ if (!refcount_dec_and_test(&cmd_buf->ref_cnt))
+ return;
+
+ cmdqs = hwdev->cmdqs;
+
+ dma_pool_free(cmdqs->cmd_buf_pool, cmd_buf->buf, cmd_buf->dma_addr);
+ kfree(cmd_buf);
+}
+
+static void cmdq_clear_cmd_buf(struct hinic3_cmdq_cmd_info *cmd_info,
+ struct hinic3_hwdev *hwdev)
+{
+ if (cmd_info->buf_in) {
+ hinic3_free_cmd_buf(hwdev, cmd_info->buf_in);
+ cmd_info->buf_in = NULL;
+ }
+}
+
+static void clear_wqe_complete_bit(struct hinic3_cmdq *cmdq,
+ struct cmdq_wqe *wqe, u16 ci)
+{
+ struct cmdq_header *hdr = CMDQ_WQE_HEADER(wqe);
+ __le32 header_info = hdr->header_info;
+ enum cmdq_data_format df;
+ struct cmdq_ctrl *ctrl;
+
+ df = CMDQ_WQE_HDR_GET(header_info, DATA_FMT);
+ if (df == CMDQ_DATA_SGE)
+ ctrl = &wqe->wqe_lcmd.ctrl;
+ else
+ ctrl = &wqe->wqe_scmd.ctrl;
+
+ /* clear HW busy bit */
+ ctrl->ctrl_info = 0;
+ cmdq->cmd_infos[ci].cmd_type = HINIC3_CMD_TYPE_NONE;
+ wmb(); /* verify wqe is clear before updating ci */
+ hinic3_wq_put_wqebbs(&cmdq->wq, CMDQ_WQE_NUM_WQEBBS);
+}
+
+static void cmdq_update_cmd_status(struct hinic3_cmdq *cmdq, u16 prod_idx,
+ struct cmdq_wqe *wqe)
+{
+ struct hinic3_cmdq_cmd_info *cmd_info;
+ struct cmdq_wqe_lcmd *wqe_lcmd;
+ __le32 status_info;
+
+ wqe_lcmd = &wqe->wqe_lcmd;
+ cmd_info = &cmdq->cmd_infos[prod_idx];
+ if (cmd_info->errcode) {
+ status_info = wqe_lcmd->status.status_info;
+ *cmd_info->errcode = CMDQ_WQE_ERRCODE_GET(status_info, VAL);
+ }
+
+ if (cmd_info->direct_resp)
+ *cmd_info->direct_resp = wqe_lcmd->completion.resp.direct.val;
+}
+
+static void cmdq_sync_cmd_handler(struct hinic3_cmdq *cmdq,
+ struct cmdq_wqe *wqe, u16 ci)
+{
+ spin_lock(&cmdq->cmdq_lock);
+ cmdq_update_cmd_status(cmdq, ci, wqe);
+ if (cmdq->cmd_infos[ci].cmpt_code) {
+ *cmdq->cmd_infos[ci].cmpt_code = CMDQ_DIRECT_SYNC_CMPT_CODE;
+ cmdq->cmd_infos[ci].cmpt_code = NULL;
+ }
+
+ /* Ensure that completion code has been updated before updating done */
+ smp_wmb();
+ if (cmdq->cmd_infos[ci].done) {
+ complete(cmdq->cmd_infos[ci].done);
+ cmdq->cmd_infos[ci].done = NULL;
+ }
+ spin_unlock(&cmdq->cmdq_lock);
+
+ cmdq_clear_cmd_buf(&cmdq->cmd_infos[ci], cmdq->hwdev);
+ clear_wqe_complete_bit(cmdq, wqe, ci);
+}
+
+void hinic3_cmdq_ceq_handler(struct hinic3_hwdev *hwdev, __le32 ceqe_data)
+{
+ enum hinic3_cmdq_type cmdq_type = CMDQ_CEQE_GET(ceqe_data, TYPE);
+ struct hinic3_cmdqs *cmdqs = hwdev->cmdqs;
+ struct hinic3_cmdq_cmd_info *cmd_info;
+ struct cmdq_wqe_lcmd *wqe_lcmd;
+ struct hinic3_cmdq *cmdq;
+ struct cmdq_wqe *wqe;
+ __le32 ctrl_info;
+ u16 ci;
+
+ if (unlikely(cmdq_type >= ARRAY_SIZE(cmdqs->cmdq)))
+ return;
+
+ cmdq = &cmdqs->cmdq[cmdq_type];
+ while ((wqe = cmdq_read_wqe(&cmdq->wq, &ci)) != NULL) {
+ cmd_info = &cmdq->cmd_infos[ci];
+ switch (cmd_info->cmd_type) {
+ case HINIC3_CMD_TYPE_NONE:
+ return;
+ case HINIC3_CMD_TYPE_TIMEOUT:
+ dev_warn(hwdev->dev, "Cmdq timeout, q_id: %u, ci: %u\n",
+ cmdq_type, ci);
+ fallthrough;
+ case HINIC3_CMD_TYPE_FAKE_TIMEOUT:
+ cmdq_clear_cmd_buf(cmd_info, hwdev);
+ clear_wqe_complete_bit(cmdq, wqe, ci);
+ break;
+ default:
+ /* only arm bit is using scmd wqe,
+ * the other wqe is lcmd
+ */
+ wqe_lcmd = &wqe->wqe_lcmd;
+ ctrl_info = wqe_lcmd->ctrl.ctrl_info;
+ if (!CMDQ_WQE_COMPLETED(ctrl_info))
+ return;
+
+ dma_rmb();
+ /* For FORCE_STOP cmd_type, we also need to wait for
+ * the firmware processing to complete to prevent the
+ * firmware from accessing the released cmd_buf
+ */
+ if (cmd_info->cmd_type == HINIC3_CMD_TYPE_FORCE_STOP) {
+ cmdq_clear_cmd_buf(cmd_info, hwdev);
+ clear_wqe_complete_bit(cmdq, wqe, ci);
+ } else {
+ cmdq_sync_cmd_handler(cmdq, wqe, ci);
+ }
+
+ break;
+ }
+ }
+}
+
+static int wait_cmdqs_enable(struct hinic3_cmdqs *cmdqs)
+{
+ unsigned long end;
+
+ end = jiffies + msecs_to_jiffies(CMDQ_ENABLE_WAIT_TIMEOUT);
+ do {
+ if (cmdqs->status & HINIC3_CMDQ_ENABLE)
+ return 0;
+ usleep_range(1000, 2000);
+ } while (time_before(jiffies, end) && !cmdqs->disable_flag);
+
+ cmdqs->disable_flag = 1;
+
+ return -EBUSY;
+}
+
+static void cmdq_set_completion(struct cmdq_completion *complete,
+ struct hinic3_cmd_buf *buf_out)
+{
+ struct hinic3_sge *sge = &complete->resp.sge;
+
+ hinic3_set_sge(sge, buf_out->dma_addr, cpu_to_le32(CMDQ_BUF_SIZE));
+}
+
+static struct cmdq_wqe *cmdq_get_wqe(struct hinic3_wq *wq, u16 *pi)
+{
+ if (!hinic3_wq_free_wqebbs(wq))
+ return NULL;
+
+ return hinic3_wq_get_one_wqebb(wq, pi);
+}
+
+static void cmdq_set_lcmd_bufdesc(struct cmdq_wqe_lcmd *wqe,
+ struct hinic3_cmd_buf *buf_in)
+{
+ hinic3_set_sge(&wqe->buf_desc.sge, buf_in->dma_addr,
+ (__force __le32)buf_in->size);
+}
+
+static void cmdq_set_db(struct hinic3_cmdq *cmdq,
+ enum hinic3_cmdq_type cmdq_type, u16 prod_idx)
+{
+ u8 __iomem *db_base = cmdq->hwdev->cmdqs->cmdqs_db_base;
+ u16 db_ofs = (prod_idx & 0xFF) << 3;
+ struct cmdq_db db;
+
+ db.db_info = cpu_to_le32(CMDQ_DB_INFO_SET(prod_idx >> 8, HI_PROD_IDX));
+ db.db_head = cpu_to_le32(CMDQ_DB_HEAD_SET(1, QUEUE_TYPE) |
+ CMDQ_DB_HEAD_SET(cmdq_type, CMDQ_TYPE));
+ writeq(*(u64 *)&db, db_base + db_ofs);
+}
+
+static void cmdq_wqe_fill(struct cmdq_wqe *hw_wqe,
+ const struct cmdq_wqe *shadow_wqe)
+{
+ const struct cmdq_header *src = (struct cmdq_header *)shadow_wqe;
+ struct cmdq_header *dst = (struct cmdq_header *)hw_wqe;
+ size_t len;
+
+ len = sizeof(struct cmdq_wqe) - sizeof(struct cmdq_header);
+ memcpy(dst + 1, src + 1, len);
+ /* Ensure buffer len before updating header */
+ wmb();
+ WRITE_ONCE(*dst, *src);
+}
+
+static void cmdq_prepare_wqe_ctrl(struct cmdq_wqe *wqe, u8 wrapped,
+ u8 mod, u8 cmd, u16 prod_idx,
+ enum cmdq_completion_format complete_format,
+ enum cmdq_data_format data_format,
+ enum cmdq_bufdesc_len buf_len)
+{
+ struct cmdq_header *hdr = CMDQ_WQE_HEADER(wqe);
+ enum cmdq_ctrl_sect_len ctrl_len;
+ struct cmdq_wqe_lcmd *wqe_lcmd;
+ struct cmdq_wqe_scmd *wqe_scmd;
+ struct cmdq_ctrl *ctrl;
+
+ if (data_format == CMDQ_DATA_SGE) {
+ wqe_lcmd = &wqe->wqe_lcmd;
+ wqe_lcmd->status.status_info = 0;
+ ctrl = &wqe_lcmd->ctrl;
+ ctrl_len = CMDQ_CTRL_SECT_LEN;
+ } else {
+ wqe_scmd = &wqe->wqe_scmd;
+ wqe_scmd->status.status_info = 0;
+ ctrl = &wqe_scmd->ctrl;
+ ctrl_len = CMDQ_CTRL_DIRECT_SECT_LEN;
+ }
+
+ ctrl->ctrl_info =
+ cpu_to_le32(CMDQ_CTRL_SET(prod_idx, PI) |
+ CMDQ_CTRL_SET(cmd, CMD) |
+ CMDQ_CTRL_SET(mod, MOD));
+
+ hdr->header_info =
+ cpu_to_le32(CMDQ_WQE_HDR_SET(buf_len, BUFDESC_LEN) |
+ CMDQ_WQE_HDR_SET(complete_format, COMPLETE_FMT) |
+ CMDQ_WQE_HDR_SET(data_format, DATA_FMT) |
+ CMDQ_WQE_HDR_SET(1, COMPLETE_REQ) |
+ CMDQ_WQE_HDR_SET(3, COMPLETE_SECT_LEN) |
+ CMDQ_WQE_HDR_SET(ctrl_len, CTRL_LEN) |
+ CMDQ_WQE_HDR_SET(wrapped, HW_BUSY_BIT));
+}
+
+static void cmdq_set_lcmd_wqe(struct cmdq_wqe *wqe,
+ enum cmdq_cmd_type cmd_type,
+ struct hinic3_cmd_buf *buf_in,
+ struct hinic3_cmd_buf *buf_out,
+ u8 wrapped, u8 mod, u8 cmd, u16 prod_idx)
+{
+ enum cmdq_completion_format complete_format = CMDQ_COMPLETE_DIRECT;
+ struct cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
+
+ switch (cmd_type) {
+ case CMDQ_CMD_DIRECT_RESP:
+ wqe_lcmd->completion.resp.direct.val = 0;
+ break;
+ case CMDQ_CMD_SGE_RESP:
+ if (buf_out) {
+ complete_format = CMDQ_COMPLETE_SGE;
+ cmdq_set_completion(&wqe_lcmd->completion, buf_out);
+ }
+ break;
+ }
+
+ cmdq_prepare_wqe_ctrl(wqe, wrapped, mod, cmd, prod_idx, complete_format,
+ CMDQ_DATA_SGE, CMDQ_BUFDESC_LCMD_LEN);
+ cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in);
+}
+
+static int hinic3_cmdq_sync_timeout_check(struct hinic3_cmdq *cmdq,
+ struct cmdq_wqe *wqe, u16 pi)
+{
+ struct cmdq_wqe_lcmd *wqe_lcmd;
+ struct cmdq_ctrl *ctrl;
+ __le32 ctrl_info;
+
+ wqe_lcmd = &wqe->wqe_lcmd;
+ ctrl = &wqe_lcmd->ctrl;
+ ctrl_info = ctrl->ctrl_info;
+ if (!CMDQ_WQE_COMPLETED(ctrl_info)) {
+ dev_dbg(cmdq->hwdev->dev, "Cmdq sync command check busy bit not set\n");
+ return -EFAULT;
+ }
+ cmdq_update_cmd_status(cmdq, pi, wqe);
+
+ return 0;
+}
+
+static void clear_cmd_info(struct hinic3_cmdq_cmd_info *cmd_info,
+ const struct hinic3_cmdq_cmd_info *saved_cmd_info)
+{
+ if (cmd_info->errcode == saved_cmd_info->errcode)
+ cmd_info->errcode = NULL;
+
+ if (cmd_info->done == saved_cmd_info->done)
+ cmd_info->done = NULL;
+
+ if (cmd_info->direct_resp == saved_cmd_info->direct_resp)
+ cmd_info->direct_resp = NULL;
+}
+
+static int wait_cmdq_sync_cmd_completion(struct hinic3_cmdq *cmdq,
+ struct hinic3_cmdq_cmd_info *cmd_info,
+ struct hinic3_cmdq_cmd_info *saved_cmd_info,
+ u64 curr_msg_id, u16 curr_prod_idx,
+ struct cmdq_wqe *curr_wqe,
+ u32 timeout)
+{
+ ulong timeo = msecs_to_jiffies(timeout);
+ int err;
+
+ if (wait_for_completion_timeout(saved_cmd_info->done, timeo))
+ return 0;
+
+ spin_lock_bh(&cmdq->cmdq_lock);
+ if (cmd_info->cmpt_code == saved_cmd_info->cmpt_code)
+ cmd_info->cmpt_code = NULL;
+
+ if (*saved_cmd_info->cmpt_code == CMDQ_DIRECT_SYNC_CMPT_CODE) {
+ dev_dbg(cmdq->hwdev->dev, "Cmdq direct sync command has been completed\n");
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ return 0;
+ }
+
+ if (curr_msg_id == cmd_info->cmdq_msg_id) {
+ err = hinic3_cmdq_sync_timeout_check(cmdq, curr_wqe,
+ curr_prod_idx);
+ if (err)
+ cmd_info->cmd_type = HINIC3_CMD_TYPE_TIMEOUT;
+ else
+ cmd_info->cmd_type = HINIC3_CMD_TYPE_FAKE_TIMEOUT;
+ } else {
+ err = -ETIMEDOUT;
+ dev_err(cmdq->hwdev->dev,
+ "Cmdq sync command current msg id mismatch cmd_info msg id\n");
+ }
+
+ clear_cmd_info(cmd_info, saved_cmd_info);
+ spin_unlock_bh(&cmdq->cmdq_lock);
+
+ return err;
+}
+
+static int cmdq_sync_cmd_direct_resp(struct hinic3_cmdq *cmdq, u8 mod, u8 cmd,
+ struct hinic3_cmd_buf *buf_in,
+ __le64 *out_param)
+{
+ struct hinic3_cmdq_cmd_info *cmd_info, saved_cmd_info;
+ int cmpt_code = CMDQ_SEND_CMPT_CODE;
+ struct cmdq_wqe *curr_wqe, wqe = {};
+ struct hinic3_wq *wq = &cmdq->wq;
+ u16 curr_prod_idx, next_prod_idx;
+ struct completion done;
+ u64 curr_msg_id;
+ int errcode;
+ u8 wrapped;
+ int err;
+
+ spin_lock_bh(&cmdq->cmdq_lock);
+ curr_wqe = cmdq_get_wqe(wq, &curr_prod_idx);
+ if (!curr_wqe) {
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ return -EBUSY;
+ }
+
+ wrapped = cmdq->wrapped;
+ next_prod_idx = curr_prod_idx + CMDQ_WQE_NUM_WQEBBS;
+ if (next_prod_idx >= wq->q_depth) {
+ cmdq->wrapped ^= 1;
+ next_prod_idx -= wq->q_depth;
+ }
+
+ cmd_info = &cmdq->cmd_infos[curr_prod_idx];
+ init_completion(&done);
+ refcount_inc(&buf_in->ref_cnt);
+ cmd_info->cmd_type = HINIC3_CMD_TYPE_DIRECT_RESP;
+ cmd_info->done = &done;
+ cmd_info->errcode = &errcode;
+ cmd_info->direct_resp = out_param;
+ cmd_info->cmpt_code = &cmpt_code;
+ cmd_info->buf_in = buf_in;
+ saved_cmd_info = *cmd_info;
+ cmdq_set_lcmd_wqe(&wqe, CMDQ_CMD_DIRECT_RESP, buf_in, NULL,
+ wrapped, mod, cmd, curr_prod_idx);
+
+ cmdq_wqe_fill(curr_wqe, &wqe);
+ (cmd_info->cmdq_msg_id)++;
+ curr_msg_id = cmd_info->cmdq_msg_id;
+ cmdq_set_db(cmdq, HINIC3_CMDQ_SYNC, next_prod_idx);
+ spin_unlock_bh(&cmdq->cmdq_lock);
+
+ err = wait_cmdq_sync_cmd_completion(cmdq, cmd_info, &saved_cmd_info,
+ curr_msg_id, curr_prod_idx,
+ curr_wqe, CMDQ_CMD_TIMEOUT);
+ if (err) {
+ dev_err(cmdq->hwdev->dev,
+ "Cmdq sync command timeout, mod: %u, cmd: %u, prod idx: 0x%x\n",
+ mod, cmd, curr_prod_idx);
+ err = -ETIMEDOUT;
+ }
+
+ if (cmpt_code == CMDQ_FORCE_STOP_CMPT_CODE) {
+ dev_dbg(cmdq->hwdev->dev,
+ "Force stop cmdq cmd, mod: %u, cmd: %u\n", mod, cmd);
+ err = -EAGAIN;
+ }
+
+ smp_rmb(); /* read error code after completion */
+
+ return err ? err : errcode;
+}
+
+int hinic3_cmdq_direct_resp(struct hinic3_hwdev *hwdev, u8 mod, u8 cmd,
+ struct hinic3_cmd_buf *buf_in, __le64 *out_param)
+{
+ struct hinic3_cmdqs *cmdqs;
+ int err;
+
+ cmdqs = hwdev->cmdqs;
+ err = wait_cmdqs_enable(cmdqs);
+ if (err) {
+ dev_err(hwdev->dev, "Cmdq is disabled\n");
+ return err;
+ }
+
+ err = cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC3_CMDQ_SYNC],
+ mod, cmd, buf_in, out_param);
+
+ return err;
+}
+
+static void cmdq_init_queue_ctxt(struct hinic3_hwdev *hwdev, u8 cmdq_id,
+ struct comm_cmdq_ctxt_info *ctxt_info)
+{
+ const struct hinic3_cmdqs *cmdqs;
+ u64 cmdq_first_block_paddr, pfn;
+ const struct hinic3_wq *wq;
+
+ cmdqs = hwdev->cmdqs;
+ wq = &cmdqs->cmdq[cmdq_id].wq;
+ pfn = CMDQ_PFN(hinic3_wq_get_first_wqe_page_addr(wq));
+
+ ctxt_info->curr_wqe_page_pfn =
+ cpu_to_le64(CMDQ_CTXT_SET(1, HW_BUSY_BIT) |
+ CMDQ_CTXT_SET(1, CEQ_EN) |
+ CMDQ_CTXT_SET(1, CEQ_ARM) |
+ CMDQ_CTXT_SET(0, EQ_ID) |
+ CMDQ_CTXT_SET(pfn, CURR_WQE_PAGE_PFN));
+
+ if (!hinic3_wq_is_0_level_cla(wq)) {
+ cmdq_first_block_paddr = cmdqs->wq_block_paddr;
+ pfn = CMDQ_PFN(cmdq_first_block_paddr);
+ }
+
+ ctxt_info->wq_block_pfn = cpu_to_le64(CMDQ_CTXT_SET(wq->cons_idx, CI) |
+ CMDQ_CTXT_SET(pfn, WQ_BLOCK_PFN));
+}
+
+static int init_cmdq(struct hinic3_cmdq *cmdq, struct hinic3_hwdev *hwdev,
+ enum hinic3_cmdq_type q_type)
+{
+ int err;
+
+ cmdq->cmdq_type = q_type;
+ cmdq->wrapped = 1;
+ cmdq->hwdev = hwdev;
+
+ spin_lock_init(&cmdq->cmdq_lock);
+
+ cmdq->cmd_infos = kcalloc(cmdq->wq.q_depth, sizeof(*cmdq->cmd_infos),
+ GFP_KERNEL);
+ if (!cmdq->cmd_infos) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ return 0;
+}
+
+static int hinic3_set_cmdq_ctxt(struct hinic3_hwdev *hwdev, u8 cmdq_id)
+{
+ struct comm_cmd_set_cmdq_ctxt cmdq_ctxt = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ cmdq_init_queue_ctxt(hwdev, cmdq_id, &cmdq_ctxt.ctxt);
+ cmdq_ctxt.func_id = hinic3_global_func_id(hwdev);
+ cmdq_ctxt.cmdq_id = cmdq_id;
+
+ mgmt_msg_params_init_default(&msg_params, &cmdq_ctxt,
+ sizeof(cmdq_ctxt));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SET_CMDQ_CTXT, &msg_params);
+ if (err || cmdq_ctxt.head.status) {
+ dev_err(hwdev->dev, "Failed to set cmdq ctxt, err: %d, status: 0x%x\n",
+ err, cmdq_ctxt.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int hinic3_set_cmdq_ctxts(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmdqs *cmdqs = hwdev->cmdqs;
+ u8 cmdq_type;
+ int err;
+
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
+ err = hinic3_set_cmdq_ctxt(hwdev, cmdq_type);
+ if (err)
+ return err;
+ }
+
+ cmdqs->status |= HINIC3_CMDQ_ENABLE;
+ cmdqs->disable_flag = 0;
+
+ return 0;
+}
+
+static int create_cmdq_wq(struct hinic3_hwdev *hwdev,
+ struct hinic3_cmdqs *cmdqs)
+{
+ u8 cmdq_type;
+ int err;
+
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
+ err = hinic3_wq_create(hwdev, &cmdqs->cmdq[cmdq_type].wq,
+ CMDQ_DEPTH, CMDQ_WQEBB_SIZE);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to create cmdq wq\n");
+ goto err_destroy_wq;
+ }
+ }
+
+ /* 1-level Chip Logical Address (CLA) must put all
+ * cmdq's wq page addr in one wq block
+ */
+ if (!hinic3_wq_is_0_level_cla(&cmdqs->cmdq[HINIC3_CMDQ_SYNC].wq)) {
+ if (cmdqs->cmdq[HINIC3_CMDQ_SYNC].wq.qpages.num_pages >
+ CMDQ_WQ_CLA_SIZE / sizeof(u64)) {
+ err = -EINVAL;
+ dev_err(hwdev->dev,
+ "Cmdq number of wq pages exceeds limit: %lu\n",
+ CMDQ_WQ_CLA_SIZE / sizeof(u64));
+ goto err_destroy_wq;
+ }
+
+ cmdqs->wq_block_vaddr =
+ dma_alloc_coherent(hwdev->dev, HINIC3_MIN_PAGE_SIZE,
+ &cmdqs->wq_block_paddr, GFP_KERNEL);
+ if (!cmdqs->wq_block_vaddr) {
+ err = -ENOMEM;
+ goto err_destroy_wq;
+ }
+
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++)
+ memcpy((u8 *)cmdqs->wq_block_vaddr +
+ CMDQ_WQ_CLA_SIZE * cmdq_type,
+ cmdqs->cmdq[cmdq_type].wq.wq_block_vaddr,
+ cmdqs->cmdq[cmdq_type].wq.qpages.num_pages *
+ sizeof(__be64));
+ }
+
+ return 0;
+
+err_destroy_wq:
+ while (cmdq_type > 0) {
+ cmdq_type--;
+ hinic3_wq_destroy(hwdev, &cmdqs->cmdq[cmdq_type].wq);
+ }
+
+ return err;
+}
+
+static void destroy_cmdq_wq(struct hinic3_hwdev *hwdev,
+ struct hinic3_cmdqs *cmdqs)
+{
+ u8 cmdq_type;
+
+ if (cmdqs->wq_block_vaddr)
+ dma_free_coherent(hwdev->dev, HINIC3_MIN_PAGE_SIZE,
+ cmdqs->wq_block_vaddr, cmdqs->wq_block_paddr);
+
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++)
+ hinic3_wq_destroy(hwdev, &cmdqs->cmdq[cmdq_type].wq);
+}
+
+static int init_cmdqs(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmdqs *cmdqs;
+
+ cmdqs = kzalloc(sizeof(*cmdqs), GFP_KERNEL);
+ if (!cmdqs)
+ return -ENOMEM;
+
+ hwdev->cmdqs = cmdqs;
+ cmdqs->hwdev = hwdev;
+ cmdqs->cmdq_num = hwdev->max_cmdq;
+
+ cmdqs->cmd_buf_pool = dma_pool_create("hinic3_cmdq", hwdev->dev,
+ CMDQ_BUF_SIZE, CMDQ_BUF_SIZE, 0);
+ if (!cmdqs->cmd_buf_pool) {
+ dev_err(hwdev->dev, "Failed to create cmdq buffer pool\n");
+ kfree(cmdqs);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void cmdq_flush_sync_cmd(struct hinic3_cmdq_cmd_info *cmd_info)
+{
+ if (cmd_info->cmd_type != HINIC3_CMD_TYPE_DIRECT_RESP)
+ return;
+
+ cmd_info->cmd_type = HINIC3_CMD_TYPE_FORCE_STOP;
+
+ if (cmd_info->cmpt_code &&
+ *cmd_info->cmpt_code == CMDQ_SEND_CMPT_CODE)
+ *cmd_info->cmpt_code = CMDQ_FORCE_STOP_CMPT_CODE;
+
+ if (cmd_info->done) {
+ complete(cmd_info->done);
+ cmd_info->done = NULL;
+ cmd_info->cmpt_code = NULL;
+ cmd_info->direct_resp = NULL;
+ cmd_info->errcode = NULL;
+ }
+}
+
+static void hinic3_cmdq_flush_cmd(struct hinic3_cmdq *cmdq)
+{
+ struct hinic3_cmdq_cmd_info *cmd_info;
+ u16 ci;
+
+ spin_lock_bh(&cmdq->cmdq_lock);
+ while (cmdq_read_wqe(&cmdq->wq, &ci)) {
+ hinic3_wq_put_wqebbs(&cmdq->wq, CMDQ_WQE_NUM_WQEBBS);
+ cmd_info = &cmdq->cmd_infos[ci];
+ if (cmd_info->cmd_type == HINIC3_CMD_TYPE_DIRECT_RESP)
+ cmdq_flush_sync_cmd(cmd_info);
+ }
+ spin_unlock_bh(&cmdq->cmdq_lock);
+}
+
+void hinic3_cmdq_flush_sync_cmd(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmdq *cmdq;
+ u16 wqe_cnt, wqe_idx, i;
+ struct hinic3_wq *wq;
+
+ cmdq = &hwdev->cmdqs->cmdq[HINIC3_CMDQ_SYNC];
+ spin_lock_bh(&cmdq->cmdq_lock);
+ wq = &cmdq->wq;
+ wqe_cnt = hinic3_wq_get_used(wq);
+ for (i = 0; i < wqe_cnt; i++) {
+ wqe_idx = (wq->cons_idx + i) & wq->idx_mask;
+ cmdq_flush_sync_cmd(cmdq->cmd_infos + wqe_idx);
+ }
+ spin_unlock_bh(&cmdq->cmdq_lock);
+}
+
+static void hinic3_cmdq_reset_all_cmd_buf(struct hinic3_cmdq *cmdq)
+{
+ u16 i;
+
+ for (i = 0; i < cmdq->wq.q_depth; i++)
+ cmdq_clear_cmd_buf(&cmdq->cmd_infos[i], cmdq->hwdev);
+}
+
+int hinic3_reinit_cmdq_ctxts(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmdqs *cmdqs = hwdev->cmdqs;
+ u8 cmdq_type;
+
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
+ hinic3_cmdq_flush_cmd(&cmdqs->cmdq[cmdq_type]);
+ hinic3_cmdq_reset_all_cmd_buf(&cmdqs->cmdq[cmdq_type]);
+ cmdqs->cmdq[cmdq_type].wrapped = 1;
+ hinic3_wq_reset(&cmdqs->cmdq[cmdq_type].wq);
+ }
+
+ return hinic3_set_cmdq_ctxts(hwdev);
+}
+
+int hinic3_cmdqs_init(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmdqs *cmdqs;
+ void __iomem *db_base;
+ u8 cmdq_type;
+ int err;
+
+ err = init_cmdqs(hwdev);
+ if (err)
+ goto err_out;
+
+ cmdqs = hwdev->cmdqs;
+ err = create_cmdq_wq(hwdev, cmdqs);
+ if (err)
+ goto err_free_cmdqs;
+
+ err = hinic3_alloc_db_addr(hwdev, &db_base, NULL);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to allocate doorbell address\n");
+ goto err_destroy_cmdq_wq;
+ }
+ cmdqs->cmdqs_db_base = db_base;
+
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
+ err = init_cmdq(&cmdqs->cmdq[cmdq_type], hwdev, cmdq_type);
+ if (err) {
+ dev_err(hwdev->dev,
+ "Failed to initialize cmdq type : %d\n",
+ cmdq_type);
+ goto err_free_cmd_infos;
+ }
+ }
+
+ err = hinic3_set_cmdq_ctxts(hwdev);
+ if (err)
+ goto err_free_cmd_infos;
+
+ return 0;
+
+err_free_cmd_infos:
+ while (cmdq_type > 0) {
+ cmdq_type--;
+ kfree(cmdqs->cmdq[cmdq_type].cmd_infos);
+ }
+
+ hinic3_free_db_addr(hwdev, cmdqs->cmdqs_db_base);
+
+err_destroy_cmdq_wq:
+ destroy_cmdq_wq(hwdev, cmdqs);
+
+err_free_cmdqs:
+ dma_pool_destroy(cmdqs->cmd_buf_pool);
+ kfree(cmdqs);
+
+err_out:
+ return err;
+}
+
+void hinic3_cmdqs_free(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmdqs *cmdqs = hwdev->cmdqs;
+ u8 cmdq_type;
+
+ cmdqs->status &= ~HINIC3_CMDQ_ENABLE;
+
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
+ hinic3_cmdq_flush_cmd(&cmdqs->cmdq[cmdq_type]);
+ hinic3_cmdq_reset_all_cmd_buf(&cmdqs->cmdq[cmdq_type]);
+ kfree(cmdqs->cmdq[cmdq_type].cmd_infos);
+ }
+
+ hinic3_free_db_addr(hwdev, cmdqs->cmdqs_db_base);
+ destroy_cmdq_wq(hwdev, cmdqs);
+ dma_pool_destroy(cmdqs->cmd_buf_pool);
+ kfree(cmdqs);
+}
+
+bool hinic3_cmdq_idle(struct hinic3_cmdq *cmdq)
+{
+ return hinic3_wq_get_used(&cmdq->wq) == 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.h b/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.h
new file mode 100644
index 000000000000..f99c386a2780
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_CMDQ_H_
+#define _HINIC3_CMDQ_H_
+
+#include <linux/dmapool.h>
+
+#include "hinic3_hw_intf.h"
+#include "hinic3_wq.h"
+
+#define CMDQ_DEPTH 4096
+
+struct cmdq_db {
+ __le32 db_head;
+ __le32 db_info;
+};
+
+/* hw defined cmdq wqe header */
+struct cmdq_header {
+ __le32 header_info;
+ __le32 saved_data;
+};
+
+struct cmdq_lcmd_bufdesc {
+ struct hinic3_sge sge;
+ __le64 rsvd2;
+ __le64 rsvd3;
+};
+
+struct cmdq_status {
+ __le32 status_info;
+};
+
+struct cmdq_ctrl {
+ __le32 ctrl_info;
+};
+
+struct cmdq_direct_resp {
+ __le64 val;
+ __le64 rsvd;
+};
+
+struct cmdq_completion {
+ union {
+ struct hinic3_sge sge;
+ struct cmdq_direct_resp direct;
+ } resp;
+};
+
+struct cmdq_wqe_scmd {
+ struct cmdq_header header;
+ __le64 rsvd3;
+ struct cmdq_status status;
+ struct cmdq_ctrl ctrl;
+ struct cmdq_completion completion;
+ __le32 rsvd10[6];
+};
+
+struct cmdq_wqe_lcmd {
+ struct cmdq_header header;
+ struct cmdq_status status;
+ struct cmdq_ctrl ctrl;
+ struct cmdq_completion completion;
+ struct cmdq_lcmd_bufdesc buf_desc;
+};
+
+struct cmdq_wqe {
+ union {
+ struct cmdq_wqe_scmd wqe_scmd;
+ struct cmdq_wqe_lcmd wqe_lcmd;
+ };
+};
+
+static_assert(sizeof(struct cmdq_wqe) == 64);
+
+enum hinic3_cmdq_type {
+ HINIC3_CMDQ_SYNC = 0,
+ HINIC3_MAX_CMDQ_TYPES = 4
+};
+
+enum hinic3_cmdq_status {
+ HINIC3_CMDQ_ENABLE = BIT(0),
+};
+
+enum hinic3_cmdq_cmd_type {
+ HINIC3_CMD_TYPE_NONE,
+ HINIC3_CMD_TYPE_DIRECT_RESP,
+ HINIC3_CMD_TYPE_FAKE_TIMEOUT,
+ HINIC3_CMD_TYPE_TIMEOUT,
+ HINIC3_CMD_TYPE_FORCE_STOP,
+};
+
+struct hinic3_cmd_buf {
+ void *buf;
+ dma_addr_t dma_addr;
+ __le16 size;
+ refcount_t ref_cnt;
+};
+
+struct hinic3_cmdq_cmd_info {
+ enum hinic3_cmdq_cmd_type cmd_type;
+ struct completion *done;
+ int *errcode;
+ /* completion code */
+ int *cmpt_code;
+ __le64 *direct_resp;
+ u64 cmdq_msg_id;
+ struct hinic3_cmd_buf *buf_in;
+};
+
+struct hinic3_cmdq {
+ struct hinic3_wq wq;
+ enum hinic3_cmdq_type cmdq_type;
+ u8 wrapped;
+ /* synchronize command submission with completions via event queue */
+ spinlock_t cmdq_lock;
+ struct hinic3_cmdq_cmd_info *cmd_infos;
+ struct hinic3_hwdev *hwdev;
+};
+
+struct hinic3_cmdqs {
+ struct hinic3_hwdev *hwdev;
+ struct hinic3_cmdq cmdq[HINIC3_MAX_CMDQ_TYPES];
+ struct dma_pool *cmd_buf_pool;
+ /* doorbell area */
+ u8 __iomem *cmdqs_db_base;
+
+ /* When command queue uses multiple memory pages (1-level CLA), this
+ * block will hold aggregated indirection table for all command queues
+ * of cmdqs. Not used for small cmdq (0-level CLA).
+ */
+ dma_addr_t wq_block_paddr;
+ void *wq_block_vaddr;
+
+ u32 status;
+ u32 disable_flag;
+ u8 cmdq_num;
+};
+
+int hinic3_cmdqs_init(struct hinic3_hwdev *hwdev);
+void hinic3_cmdqs_free(struct hinic3_hwdev *hwdev);
+
+struct hinic3_cmd_buf *hinic3_alloc_cmd_buf(struct hinic3_hwdev *hwdev);
+void hinic3_free_cmd_buf(struct hinic3_hwdev *hwdev,
+ struct hinic3_cmd_buf *cmd_buf);
+void hinic3_cmdq_ceq_handler(struct hinic3_hwdev *hwdev, __le32 ceqe_data);
+
+int hinic3_cmdq_direct_resp(struct hinic3_hwdev *hwdev, u8 mod, u8 cmd,
+ struct hinic3_cmd_buf *buf_in, __le64 *out_param);
+
+void hinic3_cmdq_flush_sync_cmd(struct hinic3_hwdev *hwdev);
+int hinic3_reinit_cmdq_ctxts(struct hinic3_hwdev *hwdev);
+bool hinic3_cmdq_idle(struct hinic3_cmdq *cmdq);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_common.c b/drivers/net/ethernet/huawei/hinic3/hinic3_common.c
new file mode 100644
index 000000000000..fe4778d152cf
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_common.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+
+#include "hinic3_common.h"
+
+int hinic3_dma_zalloc_coherent_align(struct device *dev, u32 size, u32 align,
+ gfp_t flag,
+ struct hinic3_dma_addr_align *mem_align)
+{
+ dma_addr_t paddr, align_paddr;
+ void *vaddr, *align_vaddr;
+ u32 real_size = size;
+
+ vaddr = dma_alloc_coherent(dev, real_size, &paddr, flag);
+ if (!vaddr)
+ return -ENOMEM;
+
+ align_paddr = ALIGN(paddr, align);
+ if (align_paddr == paddr) {
+ align_vaddr = vaddr;
+ goto out;
+ }
+
+ dma_free_coherent(dev, real_size, vaddr, paddr);
+
+ /* realloc memory for align */
+ real_size = size + align;
+ vaddr = dma_alloc_coherent(dev, real_size, &paddr, flag);
+ if (!vaddr)
+ return -ENOMEM;
+
+ align_paddr = ALIGN(paddr, align);
+ align_vaddr = vaddr + (align_paddr - paddr);
+
+out:
+ mem_align->real_size = real_size;
+ mem_align->ori_vaddr = vaddr;
+ mem_align->ori_paddr = paddr;
+ mem_align->align_vaddr = align_vaddr;
+ mem_align->align_paddr = align_paddr;
+
+ return 0;
+}
+
+void hinic3_dma_free_coherent_align(struct device *dev,
+ struct hinic3_dma_addr_align *mem_align)
+{
+ dma_free_coherent(dev, mem_align->real_size,
+ mem_align->ori_vaddr, mem_align->ori_paddr);
+}
+
+int hinic3_wait_for_timeout(void *priv_data, wait_cpl_handler handler,
+ u32 wait_total_ms, u32 wait_once_us)
+{
+ enum hinic3_wait_return ret;
+ int err;
+
+ err = read_poll_timeout(handler, ret, ret == HINIC3_WAIT_PROCESS_CPL,
+ wait_once_us, wait_total_ms * USEC_PER_MSEC,
+ false, priv_data);
+
+ return err;
+}
+
+/* Data provided to/by cmdq is arranged in structs with little endian fields but
+ * every dword (32bits) should be swapped since HW swaps it again when it
+ * copies it from/to host memory.
+ */
+void hinic3_cmdq_buf_swab32(void *data, int len)
+{
+ swab32_array(data, len / sizeof(u32));
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_common.h b/drivers/net/ethernet/huawei/hinic3/hinic3_common.h
new file mode 100644
index 000000000000..a8fabfae90fb
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_common.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_COMMON_H_
+#define _HINIC3_COMMON_H_
+
+#include <linux/device.h>
+
+#define HINIC3_MIN_PAGE_SIZE 0x1000
+
+struct hinic3_dma_addr_align {
+ u32 real_size;
+
+ void *ori_vaddr;
+ dma_addr_t ori_paddr;
+
+ void *align_vaddr;
+ dma_addr_t align_paddr;
+};
+
+enum hinic3_wait_return {
+ HINIC3_WAIT_PROCESS_CPL = 0,
+ HINIC3_WAIT_PROCESS_WAITING = 1,
+};
+
+struct hinic3_sge {
+ __le32 hi_addr;
+ __le32 lo_addr;
+ __le32 len;
+ __le32 rsvd;
+};
+
+static inline void hinic3_set_sge(struct hinic3_sge *sge, dma_addr_t addr,
+ __le32 len)
+{
+ sge->hi_addr = cpu_to_le32(upper_32_bits(addr));
+ sge->lo_addr = cpu_to_le32(lower_32_bits(addr));
+ sge->len = len;
+ sge->rsvd = 0;
+}
+
+int hinic3_dma_zalloc_coherent_align(struct device *dev, u32 size, u32 align,
+ gfp_t flag,
+ struct hinic3_dma_addr_align *mem_align);
+void hinic3_dma_free_coherent_align(struct device *dev,
+ struct hinic3_dma_addr_align *mem_align);
+
+typedef enum hinic3_wait_return (*wait_cpl_handler)(void *priv_data);
+int hinic3_wait_for_timeout(void *priv_data, wait_cpl_handler handler,
+ u32 wait_total_ms, u32 wait_once_us);
+
+void hinic3_cmdq_buf_swab32(void *data, int len);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_csr.h b/drivers/net/ethernet/huawei/hinic3/hinic3_csr.h
new file mode 100644
index 000000000000..e7417e8efa99
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_csr.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_CSR_H_
+#define _HINIC3_CSR_H_
+
+#define HINIC3_CFG_REGS_FLAG 0x40000000
+#define HINIC3_REGS_FLAG_MASK 0x3FFFFFFF
+
+#define HINIC3_VF_CFG_REG_OFFSET 0x2000
+
+/* HW interface registers */
+#define HINIC3_CSR_FUNC_ATTR0_ADDR (HINIC3_CFG_REGS_FLAG + 0x0)
+#define HINIC3_CSR_FUNC_ATTR1_ADDR (HINIC3_CFG_REGS_FLAG + 0x4)
+#define HINIC3_CSR_FUNC_ATTR2_ADDR (HINIC3_CFG_REGS_FLAG + 0x8)
+#define HINIC3_CSR_FUNC_ATTR3_ADDR (HINIC3_CFG_REGS_FLAG + 0xC)
+#define HINIC3_CSR_FUNC_ATTR4_ADDR (HINIC3_CFG_REGS_FLAG + 0x10)
+#define HINIC3_CSR_FUNC_ATTR5_ADDR (HINIC3_CFG_REGS_FLAG + 0x14)
+#define HINIC3_CSR_FUNC_ATTR6_ADDR (HINIC3_CFG_REGS_FLAG + 0x18)
+
+#define HINIC3_FUNC_CSR_MAILBOX_DATA_OFF 0x80
+#define HINIC3_FUNC_CSR_MAILBOX_CONTROL_OFF (HINIC3_CFG_REGS_FLAG + 0x0100)
+#define HINIC3_FUNC_CSR_MAILBOX_INT_OFF (HINIC3_CFG_REGS_FLAG + 0x0104)
+#define HINIC3_FUNC_CSR_MAILBOX_RESULT_H_OFF (HINIC3_CFG_REGS_FLAG + 0x0108)
+#define HINIC3_FUNC_CSR_MAILBOX_RESULT_L_OFF (HINIC3_CFG_REGS_FLAG + 0x010C)
+
+#define HINIC3_CSR_DMA_ATTR_TBL_ADDR (HINIC3_CFG_REGS_FLAG + 0x380)
+#define HINIC3_CSR_DMA_ATTR_INDIR_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x390)
+
+/* MSI-X registers */
+#define HINIC3_CSR_FUNC_MSI_CLR_WR_ADDR (HINIC3_CFG_REGS_FLAG + 0x58)
+
+#define HINIC3_MSI_CLR_INDIR_RESEND_TIMER_CLR_MASK BIT(0)
+#define HINIC3_MSI_CLR_INDIR_INT_MSK_SET_MASK BIT(1)
+#define HINIC3_MSI_CLR_INDIR_INT_MSK_CLR_MASK BIT(2)
+#define HINIC3_MSI_CLR_INDIR_AUTO_MSK_SET_MASK BIT(3)
+#define HINIC3_MSI_CLR_INDIR_AUTO_MSK_CLR_MASK BIT(4)
+#define HINIC3_MSI_CLR_INDIR_SIMPLE_INDIR_IDX_MASK GENMASK(31, 22)
+#define HINIC3_MSI_CLR_INDIR_SET(val, member) \
+ FIELD_PREP(HINIC3_MSI_CLR_INDIR_##member##_MASK, val)
+
+/* EQ registers */
+#define HINIC3_AEQ_INDIR_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x210)
+#define HINIC3_CEQ_INDIR_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x290)
+
+#define HINIC3_EQ_INDIR_IDX_ADDR(type) \
+ ((type == HINIC3_AEQ) ? HINIC3_AEQ_INDIR_IDX_ADDR : \
+ HINIC3_CEQ_INDIR_IDX_ADDR)
+
+#define HINIC3_AEQ_MTT_OFF_BASE_ADDR (HINIC3_CFG_REGS_FLAG + 0x240)
+#define HINIC3_CEQ_MTT_OFF_BASE_ADDR (HINIC3_CFG_REGS_FLAG + 0x2C0)
+
+#define HINIC3_CSR_EQ_PAGE_OFF_STRIDE 8
+
+#define HINIC3_AEQ_HI_PHYS_ADDR_REG(pg_num) \
+ (HINIC3_AEQ_MTT_OFF_BASE_ADDR + (pg_num) * \
+ HINIC3_CSR_EQ_PAGE_OFF_STRIDE)
+
+#define HINIC3_AEQ_LO_PHYS_ADDR_REG(pg_num) \
+ (HINIC3_AEQ_MTT_OFF_BASE_ADDR + (pg_num) * \
+ HINIC3_CSR_EQ_PAGE_OFF_STRIDE + 4)
+
+#define HINIC3_CEQ_HI_PHYS_ADDR_REG(pg_num) \
+ (HINIC3_CEQ_MTT_OFF_BASE_ADDR + (pg_num) * \
+ HINIC3_CSR_EQ_PAGE_OFF_STRIDE)
+
+#define HINIC3_CEQ_LO_PHYS_ADDR_REG(pg_num) \
+ (HINIC3_CEQ_MTT_OFF_BASE_ADDR + (pg_num) * \
+ HINIC3_CSR_EQ_PAGE_OFF_STRIDE + 4)
+
+#define HINIC3_CSR_AEQ_CTRL_0_ADDR (HINIC3_CFG_REGS_FLAG + 0x200)
+#define HINIC3_CSR_AEQ_CTRL_1_ADDR (HINIC3_CFG_REGS_FLAG + 0x204)
+#define HINIC3_CSR_AEQ_PROD_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x20C)
+#define HINIC3_CSR_AEQ_CI_SIMPLE_INDIR_ADDR (HINIC3_CFG_REGS_FLAG + 0x50)
+
+#define HINIC3_CSR_CEQ_PROD_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x28c)
+#define HINIC3_CSR_CEQ_CI_SIMPLE_INDIR_ADDR (HINIC3_CFG_REGS_FLAG + 0x54)
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c b/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c
new file mode 100644
index 000000000000..01686472985b
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c
@@ -0,0 +1,776 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/delay.h>
+
+#include "hinic3_csr.h"
+#include "hinic3_eqs.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+
+#define AEQ_CTRL_0_INTR_IDX_MASK GENMASK(9, 0)
+#define AEQ_CTRL_0_DMA_ATTR_MASK GENMASK(17, 12)
+#define AEQ_CTRL_0_PCI_INTF_IDX_MASK GENMASK(22, 20)
+#define AEQ_CTRL_0_INTR_MODE_MASK BIT(31)
+#define AEQ_CTRL_0_SET(val, member) \
+ FIELD_PREP(AEQ_CTRL_0_##member##_MASK, val)
+
+#define AEQ_CTRL_1_LEN_MASK GENMASK(20, 0)
+#define AEQ_CTRL_1_ELEM_SIZE_MASK GENMASK(25, 24)
+#define AEQ_CTRL_1_PAGE_SIZE_MASK GENMASK(31, 28)
+#define AEQ_CTRL_1_SET(val, member) \
+ FIELD_PREP(AEQ_CTRL_1_##member##_MASK, val)
+
+#define CEQ_CTRL_0_INTR_IDX_MASK GENMASK(9, 0)
+#define CEQ_CTRL_0_DMA_ATTR_MASK GENMASK(17, 12)
+#define CEQ_CTRL_0_LIMIT_KICK_MASK GENMASK(23, 20)
+#define CEQ_CTRL_0_PCI_INTF_IDX_MASK GENMASK(25, 24)
+#define CEQ_CTRL_0_PAGE_SIZE_MASK GENMASK(30, 27)
+#define CEQ_CTRL_0_INTR_MODE_MASK BIT(31)
+#define CEQ_CTRL_0_SET(val, member) \
+ FIELD_PREP(CEQ_CTRL_0_##member##_MASK, val)
+
+#define CEQ_CTRL_1_LEN_MASK GENMASK(19, 0)
+#define CEQ_CTRL_1_SET(val, member) \
+ FIELD_PREP(CEQ_CTRL_1_##member##_MASK, val)
+
+#define CEQE_TYPE_MASK GENMASK(25, 23)
+#define CEQE_TYPE(type) \
+ FIELD_GET(CEQE_TYPE_MASK, le32_to_cpu(type))
+
+#define CEQE_DATA_MASK GENMASK(25, 0)
+#define CEQE_DATA(data) ((data) & cpu_to_le32(CEQE_DATA_MASK))
+
+#define EQ_ELEM_DESC_TYPE_MASK GENMASK(6, 0)
+#define EQ_ELEM_DESC_SRC_MASK BIT(7)
+#define EQ_ELEM_DESC_SIZE_MASK GENMASK(15, 8)
+#define EQ_ELEM_DESC_WRAPPED_MASK BIT(31)
+#define EQ_ELEM_DESC_GET(val, member) \
+ FIELD_GET(EQ_ELEM_DESC_##member##_MASK, le32_to_cpu(val))
+
+#define EQ_CI_SIMPLE_INDIR_CI_MASK GENMASK(20, 0)
+#define EQ_CI_SIMPLE_INDIR_ARMED_MASK BIT(21)
+#define EQ_CI_SIMPLE_INDIR_AEQ_IDX_MASK GENMASK(31, 30)
+#define EQ_CI_SIMPLE_INDIR_CEQ_IDX_MASK GENMASK(31, 24)
+#define EQ_CI_SIMPLE_INDIR_SET(val, member) \
+ FIELD_PREP(EQ_CI_SIMPLE_INDIR_##member##_MASK, val)
+
+#define EQ_CI_SIMPLE_INDIR_REG_ADDR(eq) \
+ (((eq)->type == HINIC3_AEQ) ? \
+ HINIC3_CSR_AEQ_CI_SIMPLE_INDIR_ADDR : \
+ HINIC3_CSR_CEQ_CI_SIMPLE_INDIR_ADDR)
+
+#define EQ_PROD_IDX_REG_ADDR(eq) \
+ (((eq)->type == HINIC3_AEQ) ? \
+ HINIC3_CSR_AEQ_PROD_IDX_ADDR : HINIC3_CSR_CEQ_PROD_IDX_ADDR)
+
+#define EQ_HI_PHYS_ADDR_REG(type, pg_num) \
+ (((type) == HINIC3_AEQ) ? \
+ HINIC3_AEQ_HI_PHYS_ADDR_REG(pg_num) : \
+ HINIC3_CEQ_HI_PHYS_ADDR_REG(pg_num))
+
+#define EQ_LO_PHYS_ADDR_REG(type, pg_num) \
+ (((type) == HINIC3_AEQ) ? \
+ HINIC3_AEQ_LO_PHYS_ADDR_REG(pg_num) : \
+ HINIC3_CEQ_LO_PHYS_ADDR_REG(pg_num))
+
+#define EQ_MSIX_RESEND_TIMER_CLEAR 1
+
+#define HINIC3_EQ_MAX_PAGES(eq) \
+ ((eq)->type == HINIC3_AEQ ? \
+ HINIC3_AEQ_MAX_PAGES : HINIC3_CEQ_MAX_PAGES)
+
+#define HINIC3_TASK_PROCESS_EQE_LIMIT 1024
+#define HINIC3_EQ_UPDATE_CI_STEP 64
+#define HINIC3_EQS_WQ_NAME "hinic3_eqs"
+
+#define HINIC3_EQ_VALID_SHIFT 31
+#define HINIC3_EQ_WRAPPED(eq) \
+ ((eq)->wrapped << HINIC3_EQ_VALID_SHIFT)
+
+#define HINIC3_EQ_WRAPPED_SHIFT 20
+#define HINIC3_EQ_CONS_IDX(eq) \
+ ((eq)->cons_idx | ((eq)->wrapped << HINIC3_EQ_WRAPPED_SHIFT))
+
+static const struct hinic3_aeq_elem *get_curr_aeq_elem(const struct hinic3_eq *eq)
+{
+ return get_q_element(&eq->qpages, eq->cons_idx, NULL);
+}
+
+static const __be32 *get_curr_ceq_elem(const struct hinic3_eq *eq)
+{
+ return get_q_element(&eq->qpages, eq->cons_idx, NULL);
+}
+
+int hinic3_aeq_register_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_aeq_type event,
+ hinic3_aeq_event_cb hwe_cb)
+{
+ struct hinic3_aeqs *aeqs;
+
+ aeqs = hwdev->aeqs;
+ aeqs->aeq_cb[event] = hwe_cb;
+ spin_lock_init(&aeqs->aeq_lock);
+
+ return 0;
+}
+
+void hinic3_aeq_unregister_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_aeq_type event)
+{
+ struct hinic3_aeqs *aeqs;
+
+ aeqs = hwdev->aeqs;
+
+ spin_lock_bh(&aeqs->aeq_lock);
+ aeqs->aeq_cb[event] = NULL;
+ spin_unlock_bh(&aeqs->aeq_lock);
+}
+
+int hinic3_ceq_register_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_ceq_event event,
+ hinic3_ceq_event_cb callback)
+{
+ struct hinic3_ceqs *ceqs;
+
+ ceqs = hwdev->ceqs;
+ ceqs->ceq_cb[event] = callback;
+ spin_lock_init(&ceqs->ceq_lock);
+
+ return 0;
+}
+
+void hinic3_ceq_unregister_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_ceq_event event)
+{
+ struct hinic3_ceqs *ceqs;
+
+ ceqs = hwdev->ceqs;
+
+ spin_lock_bh(&ceqs->ceq_lock);
+ ceqs->ceq_cb[event] = NULL;
+ spin_unlock_bh(&ceqs->ceq_lock);
+}
+
+/* Set consumer index in the hw. */
+static void set_eq_cons_idx(struct hinic3_eq *eq, u32 arm_state)
+{
+ u32 addr = EQ_CI_SIMPLE_INDIR_REG_ADDR(eq);
+ u32 eq_wrap_ci, val;
+
+ eq_wrap_ci = HINIC3_EQ_CONS_IDX(eq);
+ val = EQ_CI_SIMPLE_INDIR_SET(arm_state, ARMED);
+ if (eq->type == HINIC3_AEQ) {
+ val = val |
+ EQ_CI_SIMPLE_INDIR_SET(eq_wrap_ci, CI) |
+ EQ_CI_SIMPLE_INDIR_SET(eq->q_id, AEQ_IDX);
+ } else {
+ val = val |
+ EQ_CI_SIMPLE_INDIR_SET(eq_wrap_ci, CI) |
+ EQ_CI_SIMPLE_INDIR_SET(eq->q_id, CEQ_IDX);
+ }
+
+ hinic3_hwif_write_reg(eq->hwdev->hwif, addr, val);
+}
+
+static struct hinic3_ceqs *ceq_to_ceqs(const struct hinic3_eq *eq)
+{
+ return container_of(eq, struct hinic3_ceqs, ceq[eq->q_id]);
+}
+
+static void ceq_event_handler(struct hinic3_ceqs *ceqs, __le32 ceqe)
+{
+ enum hinic3_ceq_event event = CEQE_TYPE(ceqe);
+ struct hinic3_hwdev *hwdev = ceqs->hwdev;
+ __le32 ceqe_data = CEQE_DATA(ceqe);
+
+ if (event >= HINIC3_MAX_CEQ_EVENTS) {
+ dev_warn(hwdev->dev, "Ceq unknown event:%d, ceqe data: 0x%x\n",
+ event, ceqe_data);
+ return;
+ }
+
+ spin_lock_bh(&ceqs->ceq_lock);
+ if (ceqs->ceq_cb[event])
+ ceqs->ceq_cb[event](hwdev, ceqe_data);
+
+ spin_unlock_bh(&ceqs->ceq_lock);
+}
+
+static struct hinic3_aeqs *aeq_to_aeqs(const struct hinic3_eq *eq)
+{
+ return container_of(eq, struct hinic3_aeqs, aeq[eq->q_id]);
+}
+
+static void aeq_event_handler(struct hinic3_aeqs *aeqs, __le32 aeqe,
+ const struct hinic3_aeq_elem *aeqe_pos)
+{
+ struct hinic3_hwdev *hwdev = aeqs->hwdev;
+ u8 data[HINIC3_AEQE_DATA_SIZE], size;
+ enum hinic3_aeq_type event;
+ hinic3_aeq_event_cb hwe_cb;
+
+ if (EQ_ELEM_DESC_GET(aeqe, SRC))
+ return;
+
+ event = EQ_ELEM_DESC_GET(aeqe, TYPE);
+ if (event >= HINIC3_MAX_AEQ_EVENTS) {
+ dev_warn(hwdev->dev, "Aeq unknown event:%d\n", event);
+ return;
+ }
+
+ memcpy(data, aeqe_pos->aeqe_data, HINIC3_AEQE_DATA_SIZE);
+ swab32_array((u32 *)data, HINIC3_AEQE_DATA_SIZE / sizeof(u32));
+ size = EQ_ELEM_DESC_GET(aeqe, SIZE);
+
+ spin_lock_bh(&aeqs->aeq_lock);
+ hwe_cb = aeqs->aeq_cb[event];
+ if (hwe_cb)
+ hwe_cb(aeqs->hwdev, data, size);
+ spin_unlock_bh(&aeqs->aeq_lock);
+}
+
+static int aeq_irq_handler(struct hinic3_eq *eq)
+{
+ const struct hinic3_aeq_elem *aeqe_pos;
+ struct hinic3_aeqs *aeqs;
+ u32 i, eqe_cnt = 0;
+ __le32 aeqe;
+
+ aeqs = aeq_to_aeqs(eq);
+ for (i = 0; i < HINIC3_TASK_PROCESS_EQE_LIMIT; i++) {
+ aeqe_pos = get_curr_aeq_elem(eq);
+ aeqe = (__force __le32)swab32((__force __u32)aeqe_pos->desc);
+ /* HW updates wrapped bit, when it adds eq element event */
+ if (EQ_ELEM_DESC_GET(aeqe, WRAPPED) == eq->wrapped)
+ return 0;
+
+ /* Prevent speculative reads from element */
+ dma_rmb();
+ aeq_event_handler(aeqs, aeqe, aeqe_pos);
+ eq->cons_idx++;
+ if (eq->cons_idx == eq->eq_len) {
+ eq->cons_idx = 0;
+ eq->wrapped = !eq->wrapped;
+ }
+
+ if (++eqe_cnt >= HINIC3_EQ_UPDATE_CI_STEP) {
+ eqe_cnt = 0;
+ set_eq_cons_idx(eq, HINIC3_EQ_NOT_ARMED);
+ }
+ }
+
+ return -EAGAIN;
+}
+
+static int ceq_irq_handler(struct hinic3_eq *eq)
+{
+ struct hinic3_ceqs *ceqs;
+ u32 eqe_cnt = 0;
+ __be32 ceqe_raw;
+ __le32 ceqe;
+ u32 i;
+
+ ceqs = ceq_to_ceqs(eq);
+ for (i = 0; i < HINIC3_TASK_PROCESS_EQE_LIMIT; i++) {
+ ceqe_raw = *get_curr_ceq_elem(eq);
+ ceqe = (__force __le32)swab32((__force __u32)ceqe_raw);
+
+ /* HW updates wrapped bit, when it adds eq element event */
+ if (EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped)
+ return 0;
+
+ ceq_event_handler(ceqs, ceqe);
+ eq->cons_idx++;
+ if (eq->cons_idx == eq->eq_len) {
+ eq->cons_idx = 0;
+ eq->wrapped = !eq->wrapped;
+ }
+
+ if (++eqe_cnt >= HINIC3_EQ_UPDATE_CI_STEP) {
+ eqe_cnt = 0;
+ set_eq_cons_idx(eq, HINIC3_EQ_NOT_ARMED);
+ }
+ }
+
+ return -EAGAIN;
+}
+
+static void reschedule_aeq_handler(struct hinic3_eq *eq)
+{
+ struct hinic3_aeqs *aeqs = aeq_to_aeqs(eq);
+
+ queue_work(aeqs->workq, &eq->aeq_work);
+}
+
+static int eq_irq_handler(struct hinic3_eq *eq)
+{
+ int err;
+
+ if (eq->type == HINIC3_AEQ)
+ err = aeq_irq_handler(eq);
+ else
+ err = ceq_irq_handler(eq);
+
+ set_eq_cons_idx(eq, err ? HINIC3_EQ_NOT_ARMED :
+ HINIC3_EQ_ARMED);
+
+ return err;
+}
+
+static void aeq_irq_work(struct work_struct *work)
+{
+ struct hinic3_eq *eq = container_of(work, struct hinic3_eq, aeq_work);
+ int err;
+
+ err = eq_irq_handler(eq);
+ if (err)
+ reschedule_aeq_handler(eq);
+}
+
+static irqreturn_t aeq_interrupt(int irq, void *data)
+{
+ struct workqueue_struct *workq;
+ struct hinic3_eq *aeq = data;
+ struct hinic3_hwdev *hwdev;
+ struct hinic3_aeqs *aeqs;
+
+ aeqs = aeq_to_aeqs(aeq);
+ hwdev = aeq->hwdev;
+
+ /* clear resend timer cnt register */
+ workq = aeqs->workq;
+ hinic3_msix_intr_clear_resend_bit(hwdev, aeq->msix_entry_idx,
+ EQ_MSIX_RESEND_TIMER_CLEAR);
+ queue_work(workq, &aeq->aeq_work);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ceq_interrupt(int irq, void *data)
+{
+ struct hinic3_eq *ceq = data;
+ int err;
+
+ /* clear resend timer counters */
+ hinic3_msix_intr_clear_resend_bit(ceq->hwdev, ceq->msix_entry_idx,
+ EQ_MSIX_RESEND_TIMER_CLEAR);
+ err = eq_irq_handler(ceq);
+ if (err)
+ return IRQ_NONE;
+
+ return IRQ_HANDLED;
+}
+
+static int hinic3_set_ceq_ctrl_reg(struct hinic3_hwdev *hwdev, u16 q_id,
+ u32 ctrl0, u32 ctrl1)
+{
+ struct comm_cmd_set_ceq_ctrl_reg ceq_ctrl = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ ceq_ctrl.func_id = hinic3_global_func_id(hwdev);
+ ceq_ctrl.q_id = q_id;
+ ceq_ctrl.ctrl0 = ctrl0;
+ ceq_ctrl.ctrl1 = ctrl1;
+
+ mgmt_msg_params_init_default(&msg_params, &ceq_ctrl, sizeof(ceq_ctrl));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SET_CEQ_CTRL_REG, &msg_params);
+ if (err || ceq_ctrl.head.status) {
+ dev_err(hwdev->dev, "Failed to set ceq %u ctrl reg, err: %d status: 0x%x\n",
+ q_id, err, ceq_ctrl.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int set_eq_ctrls(struct hinic3_eq *eq)
+{
+ struct hinic3_hwif *hwif = eq->hwdev->hwif;
+ struct hinic3_queue_pages *qpages;
+ u8 pci_intf_idx, elem_size;
+ u32 mask, ctrl0, ctrl1;
+ u32 page_size_val;
+ int err;
+
+ qpages = &eq->qpages;
+ page_size_val = ilog2(qpages->page_size / HINIC3_MIN_PAGE_SIZE);
+ pci_intf_idx = hwif->attr.pci_intf_idx;
+
+ if (eq->type == HINIC3_AEQ) {
+ /* set ctrl0 using read-modify-write */
+ mask = AEQ_CTRL_0_INTR_IDX_MASK |
+ AEQ_CTRL_0_DMA_ATTR_MASK |
+ AEQ_CTRL_0_PCI_INTF_IDX_MASK |
+ AEQ_CTRL_0_INTR_MODE_MASK;
+ ctrl0 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_AEQ_CTRL_0_ADDR);
+ ctrl0 = (ctrl0 & ~mask) |
+ AEQ_CTRL_0_SET(eq->msix_entry_idx, INTR_IDX) |
+ AEQ_CTRL_0_SET(0, DMA_ATTR) |
+ AEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) |
+ AEQ_CTRL_0_SET(HINIC3_INTR_MODE_ARMED, INTR_MODE);
+ hinic3_hwif_write_reg(hwif, HINIC3_CSR_AEQ_CTRL_0_ADDR, ctrl0);
+
+ /* HW expects log2(number of 32 byte units). */
+ elem_size = qpages->elem_size_shift - 5;
+ ctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN) |
+ AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) |
+ AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
+ hinic3_hwif_write_reg(hwif, HINIC3_CSR_AEQ_CTRL_1_ADDR, ctrl1);
+ } else {
+ ctrl0 = CEQ_CTRL_0_SET(eq->msix_entry_idx, INTR_IDX) |
+ CEQ_CTRL_0_SET(0, DMA_ATTR) |
+ CEQ_CTRL_0_SET(0, LIMIT_KICK) |
+ CEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) |
+ CEQ_CTRL_0_SET(page_size_val, PAGE_SIZE) |
+ CEQ_CTRL_0_SET(HINIC3_INTR_MODE_ARMED, INTR_MODE);
+
+ ctrl1 = CEQ_CTRL_1_SET(eq->eq_len, LEN);
+
+ /* set ceq ctrl reg through mgmt cpu */
+ err = hinic3_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, ctrl0,
+ ctrl1);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void ceq_elements_init(struct hinic3_eq *eq, u32 init_val)
+{
+ __be32 *ceqe;
+ u32 i;
+
+ for (i = 0; i < eq->eq_len; i++) {
+ ceqe = get_q_element(&eq->qpages, i, NULL);
+ *ceqe = cpu_to_be32(init_val);
+ }
+
+ wmb(); /* Clear ceq elements bit */
+}
+
+static void aeq_elements_init(struct hinic3_eq *eq, u32 init_val)
+{
+ struct hinic3_aeq_elem *aeqe;
+ u32 i;
+
+ for (i = 0; i < eq->eq_len; i++) {
+ aeqe = get_q_element(&eq->qpages, i, NULL);
+ aeqe->desc = cpu_to_be32(init_val);
+ }
+
+ wmb(); /* Clear aeq elements bit */
+}
+
+static void eq_elements_init(struct hinic3_eq *eq, u32 init_val)
+{
+ if (eq->type == HINIC3_AEQ)
+ aeq_elements_init(eq, init_val);
+ else
+ ceq_elements_init(eq, init_val);
+}
+
+static int alloc_eq_pages(struct hinic3_eq *eq)
+{
+ struct hinic3_hwif *hwif = eq->hwdev->hwif;
+ struct hinic3_queue_pages *qpages;
+ dma_addr_t page_paddr;
+ u32 reg, init_val;
+ u16 pg_idx;
+ int err;
+
+ qpages = &eq->qpages;
+ err = hinic3_queue_pages_alloc(eq->hwdev, qpages, HINIC3_MIN_PAGE_SIZE);
+ if (err)
+ return err;
+
+ for (pg_idx = 0; pg_idx < qpages->num_pages; pg_idx++) {
+ page_paddr = qpages->pages[pg_idx].align_paddr;
+ reg = EQ_HI_PHYS_ADDR_REG(eq->type, pg_idx);
+ hinic3_hwif_write_reg(hwif, reg, upper_32_bits(page_paddr));
+ reg = EQ_LO_PHYS_ADDR_REG(eq->type, pg_idx);
+ hinic3_hwif_write_reg(hwif, reg, lower_32_bits(page_paddr));
+ }
+
+ init_val = HINIC3_EQ_WRAPPED(eq);
+ eq_elements_init(eq, init_val);
+
+ return 0;
+}
+
+static void eq_calc_page_size_and_num(struct hinic3_eq *eq, u32 elem_size)
+{
+ u32 max_pages, min_page_size, page_size, total_size;
+
+ /* No need for complicated arithmetic. All values must be power of 2.
+ * Multiplications give power of 2 and divisions give power of 2 without
+ * remainder.
+ */
+ max_pages = HINIC3_EQ_MAX_PAGES(eq);
+ min_page_size = HINIC3_MIN_PAGE_SIZE;
+ total_size = eq->eq_len * elem_size;
+
+ if (total_size <= max_pages * min_page_size)
+ page_size = min_page_size;
+ else
+ page_size = total_size / max_pages;
+
+ hinic3_queue_pages_init(&eq->qpages, eq->eq_len, page_size, elem_size);
+}
+
+static int request_eq_irq(struct hinic3_eq *eq)
+{
+ int err;
+
+ if (eq->type == HINIC3_AEQ) {
+ INIT_WORK(&eq->aeq_work, aeq_irq_work);
+ snprintf(eq->irq_name, sizeof(eq->irq_name),
+ "hinic3_aeq%u@pci:%s", eq->q_id,
+ pci_name(eq->hwdev->pdev));
+ err = request_irq(eq->irq_id, aeq_interrupt, 0,
+ eq->irq_name, eq);
+ } else {
+ snprintf(eq->irq_name, sizeof(eq->irq_name),
+ "hinic3_ceq%u@pci:%s", eq->q_id,
+ pci_name(eq->hwdev->pdev));
+ err = request_threaded_irq(eq->irq_id, NULL, ceq_interrupt,
+ IRQF_ONESHOT, eq->irq_name, eq);
+ }
+
+ return err;
+}
+
+static void reset_eq(struct hinic3_eq *eq)
+{
+ /* clear eq_len to force eqe drop in hardware */
+ if (eq->type == HINIC3_AEQ)
+ hinic3_hwif_write_reg(eq->hwdev->hwif,
+ HINIC3_CSR_AEQ_CTRL_1_ADDR, 0);
+ else
+ hinic3_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0);
+
+ hinic3_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0);
+}
+
+static int init_eq(struct hinic3_eq *eq, struct hinic3_hwdev *hwdev, u16 q_id,
+ u32 q_len, enum hinic3_eq_type type,
+ struct msix_entry *msix_entry)
+{
+ u32 elem_size;
+ int err;
+
+ eq->hwdev = hwdev;
+ eq->q_id = q_id;
+ eq->type = type;
+ eq->eq_len = q_len;
+
+ /* Indirect access should set q_id first */
+ hinic3_hwif_write_reg(hwdev->hwif, HINIC3_EQ_INDIR_IDX_ADDR(eq->type),
+ eq->q_id);
+
+ reset_eq(eq);
+
+ eq->cons_idx = 0;
+ eq->wrapped = 0;
+
+ elem_size = (type == HINIC3_AEQ) ? HINIC3_AEQE_SIZE : HINIC3_CEQE_SIZE;
+ eq_calc_page_size_and_num(eq, elem_size);
+
+ err = alloc_eq_pages(eq);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to allocate pages for eq\n");
+ return err;
+ }
+
+ eq->msix_entry_idx = msix_entry->entry;
+ eq->irq_id = msix_entry->vector;
+
+ err = set_eq_ctrls(eq);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set ctrls for eq\n");
+ goto err_free_queue_pages;
+ }
+
+ set_eq_cons_idx(eq, HINIC3_EQ_ARMED);
+
+ err = request_eq_irq(eq);
+ if (err) {
+ dev_err(hwdev->dev,
+ "Failed to request irq for the eq, err: %d\n", err);
+ goto err_free_queue_pages;
+ }
+
+ hinic3_set_msix_state(hwdev, eq->msix_entry_idx, HINIC3_MSIX_DISABLE);
+
+ return 0;
+
+err_free_queue_pages:
+ hinic3_queue_pages_free(hwdev, &eq->qpages);
+
+ return err;
+}
+
+static void remove_eq(struct hinic3_eq *eq)
+{
+ hinic3_set_msix_state(eq->hwdev, eq->msix_entry_idx,
+ HINIC3_MSIX_DISABLE);
+ free_irq(eq->irq_id, eq);
+ /* Indirect access should set q_id first */
+ hinic3_hwif_write_reg(eq->hwdev->hwif,
+ HINIC3_EQ_INDIR_IDX_ADDR(eq->type),
+ eq->q_id);
+
+ if (eq->type == HINIC3_AEQ) {
+ disable_work_sync(&eq->aeq_work);
+ /* clear eq_len to avoid hw access host memory */
+ hinic3_hwif_write_reg(eq->hwdev->hwif,
+ HINIC3_CSR_AEQ_CTRL_1_ADDR, 0);
+ } else {
+ hinic3_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0);
+ }
+
+ /* update consumer index to avoid invalid interrupt */
+ eq->cons_idx = hinic3_hwif_read_reg(eq->hwdev->hwif,
+ EQ_PROD_IDX_REG_ADDR(eq));
+ set_eq_cons_idx(eq, HINIC3_EQ_NOT_ARMED);
+ hinic3_queue_pages_free(eq->hwdev, &eq->qpages);
+}
+
+int hinic3_aeqs_init(struct hinic3_hwdev *hwdev, u16 num_aeqs,
+ struct msix_entry *msix_entries)
+{
+ struct hinic3_aeqs *aeqs;
+ u16 q_id;
+ int err;
+
+ aeqs = kzalloc(sizeof(*aeqs), GFP_KERNEL);
+ if (!aeqs)
+ return -ENOMEM;
+
+ hwdev->aeqs = aeqs;
+ aeqs->hwdev = hwdev;
+ aeqs->num_aeqs = num_aeqs;
+ aeqs->workq = alloc_workqueue(HINIC3_EQS_WQ_NAME, WQ_MEM_RECLAIM,
+ HINIC3_MAX_AEQS);
+ if (!aeqs->workq) {
+ dev_err(hwdev->dev, "Failed to initialize aeq workqueue\n");
+ err = -ENOMEM;
+ goto err_free_aeqs;
+ }
+
+ for (q_id = 0; q_id < num_aeqs; q_id++) {
+ err = init_eq(&aeqs->aeq[q_id], hwdev, q_id,
+ HINIC3_DEFAULT_AEQ_LEN, HINIC3_AEQ,
+ &msix_entries[q_id]);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init aeq %u\n",
+ q_id);
+ goto err_remove_eqs;
+ }
+ }
+ for (q_id = 0; q_id < num_aeqs; q_id++)
+ hinic3_set_msix_state(hwdev, aeqs->aeq[q_id].msix_entry_idx,
+ HINIC3_MSIX_ENABLE);
+
+ return 0;
+
+err_remove_eqs:
+ while (q_id > 0) {
+ q_id--;
+ remove_eq(&aeqs->aeq[q_id]);
+ }
+
+ destroy_workqueue(aeqs->workq);
+
+err_free_aeqs:
+ kfree(aeqs);
+
+ return err;
+}
+
+void hinic3_aeqs_free(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_aeqs *aeqs = hwdev->aeqs;
+ enum hinic3_aeq_type aeq_event;
+ struct hinic3_eq *eq;
+ u16 q_id;
+
+ for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) {
+ eq = aeqs->aeq + q_id;
+ remove_eq(eq);
+ hinic3_free_irq(hwdev, eq->irq_id);
+ }
+
+ for (aeq_event = 0; aeq_event < HINIC3_MAX_AEQ_EVENTS; aeq_event++)
+ hinic3_aeq_unregister_cb(hwdev, aeq_event);
+
+ destroy_workqueue(aeqs->workq);
+
+ kfree(aeqs);
+}
+
+int hinic3_ceqs_init(struct hinic3_hwdev *hwdev, u16 num_ceqs,
+ struct msix_entry *msix_entries)
+{
+ struct hinic3_ceqs *ceqs;
+ u16 q_id;
+ int err;
+
+ ceqs = kzalloc(sizeof(*ceqs), GFP_KERNEL);
+ if (!ceqs)
+ return -ENOMEM;
+
+ hwdev->ceqs = ceqs;
+ ceqs->hwdev = hwdev;
+ ceqs->num_ceqs = num_ceqs;
+
+ for (q_id = 0; q_id < num_ceqs; q_id++) {
+ err = init_eq(&ceqs->ceq[q_id], hwdev, q_id,
+ HINIC3_DEFAULT_CEQ_LEN, HINIC3_CEQ,
+ &msix_entries[q_id]);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init ceq %u\n",
+ q_id);
+ goto err_free_ceqs;
+ }
+ }
+ for (q_id = 0; q_id < num_ceqs; q_id++)
+ hinic3_set_msix_state(hwdev, ceqs->ceq[q_id].msix_entry_idx,
+ HINIC3_MSIX_ENABLE);
+
+ return 0;
+
+err_free_ceqs:
+ while (q_id > 0) {
+ q_id--;
+ remove_eq(&ceqs->ceq[q_id]);
+ }
+
+ kfree(ceqs);
+
+ return err;
+}
+
+void hinic3_ceqs_free(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_ceqs *ceqs = hwdev->ceqs;
+ enum hinic3_ceq_event ceq_event;
+ struct hinic3_eq *eq;
+ u16 q_id;
+
+ for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) {
+ eq = ceqs->ceq + q_id;
+ remove_eq(eq);
+ hinic3_free_irq(hwdev, eq->irq_id);
+ }
+
+ for (ceq_event = 0; ceq_event < HINIC3_MAX_CEQ_EVENTS; ceq_event++)
+ hinic3_ceq_unregister_cb(hwdev, ceq_event);
+
+ kfree(ceqs);
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.h b/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.h
new file mode 100644
index 000000000000..005a6e0745b3
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_EQS_H_
+#define _HINIC3_EQS_H_
+
+#include <linux/interrupt.h>
+
+#include "hinic3_hw_cfg.h"
+#include "hinic3_queue_common.h"
+
+#define HINIC3_MAX_AEQS 4
+#define HINIC3_MAX_CEQS 32
+
+#define HINIC3_AEQ_MAX_PAGES 4
+#define HINIC3_CEQ_MAX_PAGES 8
+
+#define HINIC3_AEQE_SIZE 64
+#define HINIC3_CEQE_SIZE 4
+
+#define HINIC3_AEQE_DESC_SIZE 4
+#define HINIC3_AEQE_DATA_SIZE (HINIC3_AEQE_SIZE - HINIC3_AEQE_DESC_SIZE)
+
+#define HINIC3_DEFAULT_AEQ_LEN 0x10000
+#define HINIC3_DEFAULT_CEQ_LEN 0x10000
+
+#define HINIC3_EQ_IRQ_NAME_LEN 64
+
+#define HINIC3_EQ_USLEEP_LOW_BOUND 900
+#define HINIC3_EQ_USLEEP_HIGH_BOUND 1000
+
+enum hinic3_eq_type {
+ HINIC3_AEQ = 0,
+ HINIC3_CEQ = 1,
+};
+
+enum hinic3_eq_intr_mode {
+ HINIC3_INTR_MODE_ARMED = 0,
+ HINIC3_INTR_MODE_ALWAYS = 1,
+};
+
+enum hinic3_eq_ci_arm_state {
+ HINIC3_EQ_NOT_ARMED = 0,
+ HINIC3_EQ_ARMED = 1,
+};
+
+struct hinic3_eq {
+ struct hinic3_hwdev *hwdev;
+ struct hinic3_queue_pages qpages;
+ u16 q_id;
+ enum hinic3_eq_type type;
+ u32 eq_len;
+ u32 cons_idx;
+ u8 wrapped;
+ u32 irq_id;
+ u16 msix_entry_idx;
+ char irq_name[HINIC3_EQ_IRQ_NAME_LEN];
+ struct work_struct aeq_work;
+};
+
+struct hinic3_aeq_elem {
+ u8 aeqe_data[HINIC3_AEQE_DATA_SIZE];
+ __be32 desc;
+};
+
+enum hinic3_aeq_type {
+ HINIC3_HW_INTER_INT = 0,
+ HINIC3_MBX_FROM_FUNC = 1,
+ HINIC3_MSG_FROM_FW = 2,
+ HINIC3_MAX_AEQ_EVENTS = 6,
+};
+
+typedef void (*hinic3_aeq_event_cb)(struct hinic3_hwdev *hwdev, u8 *data,
+ u8 size);
+
+struct hinic3_aeqs {
+ struct hinic3_hwdev *hwdev;
+ hinic3_aeq_event_cb aeq_cb[HINIC3_MAX_AEQ_EVENTS];
+ struct hinic3_eq aeq[HINIC3_MAX_AEQS];
+ u16 num_aeqs;
+ struct workqueue_struct *workq;
+ /* lock for aeq event flag */
+ spinlock_t aeq_lock;
+};
+
+enum hinic3_ceq_event {
+ HINIC3_CMDQ = 3,
+ HINIC3_MAX_CEQ_EVENTS = 6,
+};
+
+typedef void (*hinic3_ceq_event_cb)(struct hinic3_hwdev *hwdev,
+ __le32 ceqe_data);
+
+struct hinic3_ceqs {
+ struct hinic3_hwdev *hwdev;
+
+ hinic3_ceq_event_cb ceq_cb[HINIC3_MAX_CEQ_EVENTS];
+
+ struct hinic3_eq ceq[HINIC3_MAX_CEQS];
+ u16 num_ceqs;
+ /* lock for ceq event flag */
+ spinlock_t ceq_lock;
+};
+
+int hinic3_aeqs_init(struct hinic3_hwdev *hwdev, u16 num_aeqs,
+ struct msix_entry *msix_entries);
+void hinic3_aeqs_free(struct hinic3_hwdev *hwdev);
+int hinic3_aeq_register_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_aeq_type event,
+ hinic3_aeq_event_cb hwe_cb);
+void hinic3_aeq_unregister_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_aeq_type event);
+int hinic3_ceqs_init(struct hinic3_hwdev *hwdev, u16 num_ceqs,
+ struct msix_entry *msix_entries);
+void hinic3_ceqs_free(struct hinic3_hwdev *hwdev);
+int hinic3_ceq_register_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_ceq_event event,
+ hinic3_ceq_event_cb callback);
+void hinic3_ceq_unregister_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_ceq_event event);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c
new file mode 100644
index 000000000000..7827c1f626db
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/device.h>
+
+#include "hinic3_hw_cfg.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+
+#define HINIC3_CFG_MAX_QP 256
+
+static void hinic3_parse_pub_res_cap(struct hinic3_hwdev *hwdev,
+ struct hinic3_dev_cap *cap,
+ const struct cfg_cmd_dev_cap *dev_cap,
+ enum hinic3_func_type type)
+{
+ cap->port_id = dev_cap->port_id;
+ cap->supp_svcs_bitmap = dev_cap->svc_cap_en;
+}
+
+static void hinic3_parse_l2nic_res_cap(struct hinic3_hwdev *hwdev,
+ struct hinic3_dev_cap *cap,
+ const struct cfg_cmd_dev_cap *dev_cap,
+ enum hinic3_func_type type)
+{
+ struct hinic3_nic_service_cap *nic_svc_cap = &cap->nic_svc_cap;
+
+ nic_svc_cap->max_sqs = min(dev_cap->nic_max_sq_id + 1,
+ HINIC3_CFG_MAX_QP);
+}
+
+static void hinic3_parse_dev_cap(struct hinic3_hwdev *hwdev,
+ const struct cfg_cmd_dev_cap *dev_cap,
+ enum hinic3_func_type type)
+{
+ struct hinic3_dev_cap *cap = &hwdev->cfg_mgmt->cap;
+
+ /* Public resource */
+ hinic3_parse_pub_res_cap(hwdev, cap, dev_cap, type);
+
+ /* L2 NIC resource */
+ if (hinic3_support_nic(hwdev))
+ hinic3_parse_l2nic_res_cap(hwdev, cap, dev_cap, type);
+}
+
+static int get_cap_from_fw(struct hinic3_hwdev *hwdev,
+ enum hinic3_func_type type)
+{
+ struct mgmt_msg_params msg_params = {};
+ struct cfg_cmd_dev_cap dev_cap = {};
+ int err;
+
+ dev_cap.func_id = hinic3_global_func_id(hwdev);
+
+ mgmt_msg_params_init_default(&msg_params, &dev_cap, sizeof(dev_cap));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_CFGM,
+ CFG_CMD_GET_DEV_CAP, &msg_params);
+ if (err || dev_cap.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to get capability from FW, err: %d, status: 0x%x\n",
+ err, dev_cap.head.status);
+ return -EIO;
+ }
+
+ hinic3_parse_dev_cap(hwdev, &dev_cap, type);
+
+ return 0;
+}
+
+static int hinic3_init_irq_info(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cfg_mgmt_info *cfg_mgmt = hwdev->cfg_mgmt;
+ struct hinic3_hwif *hwif = hwdev->hwif;
+ u16 intr_num = hwif->attr.num_irqs;
+ struct hinic3_irq_info *irq_info;
+ u16 intr_needed;
+
+ intr_needed = hwif->attr.msix_flex_en ? (hwif->attr.num_aeqs +
+ hwif->attr.num_ceqs + hwif->attr.num_sq) : intr_num;
+ if (intr_needed > intr_num) {
+ dev_warn(hwdev->dev, "Irq num cfg %d is less than the needed irq num %d msix_flex_en %d\n",
+ intr_num, intr_needed, hwdev->hwif->attr.msix_flex_en);
+ intr_needed = intr_num;
+ }
+
+ irq_info = &cfg_mgmt->irq_info;
+ irq_info->irq = kcalloc(intr_num, sizeof(struct hinic3_irq),
+ GFP_KERNEL);
+ if (!irq_info->irq)
+ return -ENOMEM;
+
+ irq_info->num_irq_hw = intr_needed;
+ mutex_init(&irq_info->irq_mutex);
+
+ return 0;
+}
+
+static int hinic3_init_irq_alloc_info(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cfg_mgmt_info *cfg_mgmt = hwdev->cfg_mgmt;
+ struct hinic3_irq *irq = cfg_mgmt->irq_info.irq;
+ u16 nreq = cfg_mgmt->irq_info.num_irq_hw;
+ struct pci_dev *pdev = hwdev->pdev;
+ int actual_irq;
+ u16 i;
+
+ actual_irq = pci_alloc_irq_vectors(pdev, 2, nreq, PCI_IRQ_MSIX);
+ if (actual_irq < 0) {
+ dev_err(hwdev->dev, "Alloc msix entries with threshold 2 failed. actual_irq: %d\n",
+ actual_irq);
+ return -ENOMEM;
+ }
+
+ nreq = actual_irq;
+ cfg_mgmt->irq_info.num_irq = nreq;
+
+ for (i = 0; i < nreq; ++i) {
+ irq[i].msix_entry_idx = i;
+ irq[i].irq_id = pci_irq_vector(pdev, i);
+ irq[i].allocated = false;
+ }
+
+ return 0;
+}
+
+int hinic3_init_cfg_mgmt(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cfg_mgmt_info *cfg_mgmt;
+ int err;
+
+ cfg_mgmt = kzalloc(sizeof(*cfg_mgmt), GFP_KERNEL);
+ if (!cfg_mgmt)
+ return -ENOMEM;
+
+ hwdev->cfg_mgmt = cfg_mgmt;
+
+ err = hinic3_init_irq_info(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init hinic3_irq_mgmt_info, err: %d\n",
+ err);
+ goto err_free_cfg_mgmt;
+ }
+
+ err = hinic3_init_irq_alloc_info(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init hinic3_irq_info, err: %d\n",
+ err);
+ goto err_free_irq_info;
+ }
+
+ return 0;
+
+err_free_irq_info:
+ kfree(cfg_mgmt->irq_info.irq);
+ cfg_mgmt->irq_info.irq = NULL;
+err_free_cfg_mgmt:
+ kfree(cfg_mgmt);
+
+ return err;
+}
+
+void hinic3_free_cfg_mgmt(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cfg_mgmt_info *cfg_mgmt = hwdev->cfg_mgmt;
+
+ pci_free_irq_vectors(hwdev->pdev);
+ kfree(cfg_mgmt->irq_info.irq);
+ cfg_mgmt->irq_info.irq = NULL;
+ kfree(cfg_mgmt);
+}
+
+int hinic3_alloc_irqs(struct hinic3_hwdev *hwdev, u16 num,
+ struct msix_entry *alloc_arr, u16 *act_num)
+{
+ struct hinic3_irq_info *irq_info;
+ struct hinic3_irq *curr;
+ u16 i, found = 0;
+
+ irq_info = &hwdev->cfg_mgmt->irq_info;
+ mutex_lock(&irq_info->irq_mutex);
+ for (i = 0; i < irq_info->num_irq && found < num; i++) {
+ curr = irq_info->irq + i;
+ if (curr->allocated)
+ continue;
+ curr->allocated = true;
+ alloc_arr[found].vector = curr->irq_id;
+ alloc_arr[found].entry = curr->msix_entry_idx;
+ found++;
+ }
+ mutex_unlock(&irq_info->irq_mutex);
+
+ *act_num = found;
+
+ return found == 0 ? -ENOMEM : 0;
+}
+
+void hinic3_free_irq(struct hinic3_hwdev *hwdev, u32 irq_id)
+{
+ struct hinic3_irq_info *irq_info;
+ struct hinic3_irq *curr;
+ u16 i;
+
+ irq_info = &hwdev->cfg_mgmt->irq_info;
+ mutex_lock(&irq_info->irq_mutex);
+ for (i = 0; i < irq_info->num_irq; i++) {
+ curr = irq_info->irq + i;
+ if (curr->irq_id == irq_id) {
+ curr->allocated = false;
+ break;
+ }
+ }
+ mutex_unlock(&irq_info->irq_mutex);
+}
+
+int hinic3_init_capability(struct hinic3_hwdev *hwdev)
+{
+ return get_cap_from_fw(hwdev, HINIC3_FUNC_TYPE_VF);
+}
+
+bool hinic3_support_nic(struct hinic3_hwdev *hwdev)
+{
+ return hwdev->cfg_mgmt->cap.supp_svcs_bitmap &
+ BIT(HINIC3_SERVICE_T_NIC);
+}
+
+u16 hinic3_func_max_qnum(struct hinic3_hwdev *hwdev)
+{
+ return hwdev->cfg_mgmt->cap.nic_svc_cap.max_sqs;
+}
+
+u8 hinic3_physical_port_id(struct hinic3_hwdev *hwdev)
+{
+ return hwdev->cfg_mgmt->cap.port_id;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h
new file mode 100644
index 000000000000..58806199bf54
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_HW_CFG_H_
+#define _HINIC3_HW_CFG_H_
+
+#include <linux/mutex.h>
+#include <linux/pci.h>
+
+struct hinic3_hwdev;
+
+struct hinic3_irq {
+ u32 irq_id;
+ u16 msix_entry_idx;
+ bool allocated;
+};
+
+struct hinic3_irq_info {
+ struct hinic3_irq *irq;
+ u16 num_irq;
+ /* device max irq number */
+ u16 num_irq_hw;
+ /* protect irq alloc and free */
+ struct mutex irq_mutex;
+};
+
+struct hinic3_nic_service_cap {
+ u16 max_sqs;
+};
+
+/* Device capabilities */
+struct hinic3_dev_cap {
+ /* Bitmasks of services supported by device */
+ u16 supp_svcs_bitmap;
+ /* Physical port */
+ u8 port_id;
+ struct hinic3_nic_service_cap nic_svc_cap;
+};
+
+struct hinic3_cfg_mgmt_info {
+ struct hinic3_irq_info irq_info;
+ struct hinic3_dev_cap cap;
+};
+
+int hinic3_init_cfg_mgmt(struct hinic3_hwdev *hwdev);
+void hinic3_free_cfg_mgmt(struct hinic3_hwdev *hwdev);
+
+int hinic3_alloc_irqs(struct hinic3_hwdev *hwdev, u16 num,
+ struct msix_entry *alloc_arr, u16 *act_num);
+void hinic3_free_irq(struct hinic3_hwdev *hwdev, u32 irq_id);
+
+int hinic3_init_capability(struct hinic3_hwdev *hwdev);
+bool hinic3_support_nic(struct hinic3_hwdev *hwdev);
+u16 hinic3_func_max_qnum(struct hinic3_hwdev *hwdev);
+u8 hinic3_physical_port_id(struct hinic3_hwdev *hwdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c
new file mode 100644
index 000000000000..89638813df40
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/delay.h>
+
+#include "hinic3_cmdq.h"
+#include "hinic3_hw_comm.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+
+int hinic3_set_interrupt_cfg_direct(struct hinic3_hwdev *hwdev,
+ const struct hinic3_interrupt_info *info)
+{
+ struct comm_cmd_cfg_msix_ctrl_reg msix_cfg = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ msix_cfg.func_id = hinic3_global_func_id(hwdev);
+ msix_cfg.msix_index = info->msix_index;
+ msix_cfg.opcode = MGMT_MSG_CMD_OP_SET;
+
+ msix_cfg.lli_credit_cnt = info->lli_credit_limit;
+ msix_cfg.lli_timer_cnt = info->lli_timer_cfg;
+ msix_cfg.pending_cnt = info->pending_limit;
+ msix_cfg.coalesce_timer_cnt = info->coalesc_timer_cfg;
+ msix_cfg.resend_timer_cnt = info->resend_timer_cfg;
+
+ mgmt_msg_params_init_default(&msg_params, &msix_cfg, sizeof(msix_cfg));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_CFG_MSIX_CTRL_REG, &msg_params);
+ if (err || msix_cfg.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set interrupt config, err: %d, status: 0x%x\n",
+ err, msix_cfg.head.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag)
+{
+ struct comm_cmd_func_reset func_reset = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ func_reset.func_id = func_id;
+ func_reset.reset_flag = reset_flag;
+
+ mgmt_msg_params_init_default(&msg_params, &func_reset,
+ sizeof(func_reset));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_FUNC_RESET, &msg_params);
+ if (err || func_reset.head.status) {
+ dev_err(hwdev->dev, "Failed to reset func resources, reset_flag 0x%llx, err: %d, status: 0x%x\n",
+ reset_flag, err, func_reset.head.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int hinic3_comm_features_nego(struct hinic3_hwdev *hwdev, u8 opcode,
+ u64 *s_feature, u16 size)
+{
+ struct comm_cmd_feature_nego feature_nego = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ feature_nego.func_id = hinic3_global_func_id(hwdev);
+ feature_nego.opcode = opcode;
+ if (opcode == MGMT_MSG_CMD_OP_SET)
+ memcpy(feature_nego.s_feature, s_feature,
+ array_size(size, sizeof(u64)));
+
+ mgmt_msg_params_init_default(&msg_params, &feature_nego,
+ sizeof(feature_nego));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_FEATURE_NEGO, &msg_params);
+ if (err || feature_nego.head.status) {
+ dev_err(hwdev->dev, "Failed to negotiate feature, err: %d, status: 0x%x\n",
+ err, feature_nego.head.status);
+ return -EINVAL;
+ }
+
+ if (opcode == MGMT_MSG_CMD_OP_GET)
+ memcpy(s_feature, feature_nego.s_feature,
+ array_size(size, sizeof(u64)));
+
+ return 0;
+}
+
+int hinic3_get_comm_features(struct hinic3_hwdev *hwdev, u64 *s_feature,
+ u16 size)
+{
+ return hinic3_comm_features_nego(hwdev, MGMT_MSG_CMD_OP_GET, s_feature,
+ size);
+}
+
+int hinic3_set_comm_features(struct hinic3_hwdev *hwdev, u64 *s_feature,
+ u16 size)
+{
+ return hinic3_comm_features_nego(hwdev, MGMT_MSG_CMD_OP_SET, s_feature,
+ size);
+}
+
+int hinic3_get_global_attr(struct hinic3_hwdev *hwdev,
+ struct comm_global_attr *attr)
+{
+ struct comm_cmd_get_glb_attr get_attr = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ mgmt_msg_params_init_default(&msg_params, &get_attr, sizeof(get_attr));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_GET_GLOBAL_ATTR, &msg_params);
+ if (err || get_attr.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to get global attribute, err: %d, status: 0x%x\n",
+ err, get_attr.head.status);
+ return -EIO;
+ }
+
+ memcpy(attr, &get_attr.attr, sizeof(*attr));
+
+ return 0;
+}
+
+int hinic3_set_func_svc_used_state(struct hinic3_hwdev *hwdev, u16 svc_type,
+ u8 state)
+{
+ struct comm_cmd_set_func_svc_used_state used_state = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ used_state.func_id = hinic3_global_func_id(hwdev);
+ used_state.svc_type = svc_type;
+ used_state.used_state = state;
+
+ mgmt_msg_params_init_default(&msg_params, &used_state,
+ sizeof(used_state));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SET_FUNC_SVC_USED_STATE,
+ &msg_params);
+ if (err || used_state.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set func service used state, err: %d, status: 0x%x\n",
+ err, used_state.head.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic3_set_dma_attr_tbl(struct hinic3_hwdev *hwdev, u8 entry_idx, u8 st,
+ u8 at, u8 ph, u8 no_snooping, u8 tph_en)
+{
+ struct comm_cmd_set_dma_attr dma_attr = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ dma_attr.func_id = hinic3_global_func_id(hwdev);
+ dma_attr.entry_idx = entry_idx;
+ dma_attr.st = st;
+ dma_attr.at = at;
+ dma_attr.ph = ph;
+ dma_attr.no_snooping = no_snooping;
+ dma_attr.tph_en = tph_en;
+
+ mgmt_msg_params_init_default(&msg_params, &dma_attr, sizeof(dma_attr));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SET_DMA_ATTR, &msg_params);
+ if (err || dma_attr.head.status) {
+ dev_err(hwdev->dev, "Failed to set dma attr, err: %d, status: 0x%x\n",
+ err, dma_attr.head.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic3_set_wq_page_size(struct hinic3_hwdev *hwdev, u16 func_idx,
+ u32 page_size)
+{
+ struct comm_cmd_cfg_wq_page_size page_size_info = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ page_size_info.func_id = func_idx;
+ page_size_info.page_size = ilog2(page_size / HINIC3_MIN_PAGE_SIZE);
+ page_size_info.opcode = MGMT_MSG_CMD_OP_SET;
+
+ mgmt_msg_params_init_default(&msg_params, &page_size_info,
+ sizeof(page_size_info));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_CFG_PAGESIZE, &msg_params);
+ if (err || page_size_info.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set wq page size, err: %d, status: 0x%x\n",
+ err, page_size_info.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic3_set_cmdq_depth(struct hinic3_hwdev *hwdev, u16 cmdq_depth)
+{
+ struct comm_cmd_set_root_ctxt root_ctxt = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ root_ctxt.func_id = hinic3_global_func_id(hwdev);
+
+ root_ctxt.set_cmdq_depth = 1;
+ root_ctxt.cmdq_depth = ilog2(cmdq_depth);
+
+ mgmt_msg_params_init_default(&msg_params, &root_ctxt,
+ sizeof(root_ctxt));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SET_VAT, &msg_params);
+ if (err || root_ctxt.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set cmdq depth, err: %d, status: 0x%x\n",
+ err, root_ctxt.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+#define HINIC3_WAIT_CMDQ_IDLE_TIMEOUT 5000
+
+static enum hinic3_wait_return check_cmdq_stop_handler(void *priv_data)
+{
+ struct hinic3_hwdev *hwdev = priv_data;
+ enum hinic3_cmdq_type cmdq_type;
+ struct hinic3_cmdqs *cmdqs;
+
+ cmdqs = hwdev->cmdqs;
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
+ if (!hinic3_cmdq_idle(&cmdqs->cmdq[cmdq_type]))
+ return HINIC3_WAIT_PROCESS_WAITING;
+ }
+
+ return HINIC3_WAIT_PROCESS_CPL;
+}
+
+static int wait_cmdq_stop(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmdqs *cmdqs = hwdev->cmdqs;
+ enum hinic3_cmdq_type cmdq_type;
+ int err;
+
+ if (!(cmdqs->status & HINIC3_CMDQ_ENABLE))
+ return 0;
+
+ cmdqs->status &= ~HINIC3_CMDQ_ENABLE;
+ err = hinic3_wait_for_timeout(hwdev, check_cmdq_stop_handler,
+ HINIC3_WAIT_CMDQ_IDLE_TIMEOUT,
+ USEC_PER_MSEC);
+
+ if (err)
+ goto err_reenable_cmdq;
+
+ return 0;
+
+err_reenable_cmdq:
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
+ if (!hinic3_cmdq_idle(&cmdqs->cmdq[cmdq_type]))
+ dev_err(hwdev->dev, "Cmdq %d is busy\n", cmdq_type);
+ }
+ cmdqs->status |= HINIC3_CMDQ_ENABLE;
+
+ return err;
+}
+
+int hinic3_func_rx_tx_flush(struct hinic3_hwdev *hwdev)
+{
+ struct comm_cmd_clear_resource clear_db = {};
+ struct comm_cmd_clear_resource clr_res = {};
+ struct hinic3_hwif *hwif = hwdev->hwif;
+ struct mgmt_msg_params msg_params = {};
+ int ret = 0;
+ int err;
+
+ err = wait_cmdq_stop(hwdev);
+ if (err) {
+ dev_warn(hwdev->dev, "CMDQ is still working, CMDQ timeout value is unreasonable\n");
+ ret = err;
+ }
+
+ hinic3_toggle_doorbell(hwif, DISABLE_DOORBELL);
+
+ clear_db.func_id = hwif->attr.func_global_idx;
+ mgmt_msg_params_init_default(&msg_params, &clear_db, sizeof(clear_db));
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_FLUSH_DOORBELL, &msg_params);
+ if (err || clear_db.head.status) {
+ dev_warn(hwdev->dev, "Failed to flush doorbell, err: %d, status: 0x%x\n",
+ err, clear_db.head.status);
+ if (err)
+ ret = err;
+ else
+ ret = -EFAULT;
+ }
+
+ clr_res.func_id = hwif->attr.func_global_idx;
+ msg_params.buf_in = &clr_res;
+ msg_params.in_size = sizeof(clr_res);
+ err = hinic3_send_mbox_to_mgmt_no_ack(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_START_FLUSH,
+ &msg_params);
+ if (err) {
+ dev_warn(hwdev->dev, "Failed to notice flush message, err: %d\n",
+ err);
+ ret = err;
+ }
+
+ hinic3_toggle_doorbell(hwif, ENABLE_DOORBELL);
+
+ err = hinic3_reinit_cmdq_ctxts(hwdev);
+ if (err) {
+ dev_warn(hwdev->dev, "Failed to reinit cmdq\n");
+ ret = err;
+ }
+
+ return ret;
+}
+
+static int get_hw_rx_buf_size_idx(int rx_buf_sz, u16 *buf_sz_idx)
+{
+ /* Supported RX buffer sizes in bytes. Configured by array index. */
+ static const int supported_sizes[16] = {
+ [0] = 32, [1] = 64, [2] = 96, [3] = 128,
+ [4] = 192, [5] = 256, [6] = 384, [7] = 512,
+ [8] = 768, [9] = 1024, [10] = 1536, [11] = 2048,
+ [12] = 3072, [13] = 4096, [14] = 8192, [15] = 16384,
+ };
+ u16 idx;
+
+ /* Scan from biggest to smallest. Choose supported size that is equal or
+ * smaller. For smaller value HW will under-utilize posted buffers. For
+ * bigger value HW may overrun posted buffers.
+ */
+ idx = ARRAY_SIZE(supported_sizes);
+ while (idx > 0) {
+ idx--;
+ if (supported_sizes[idx] <= rx_buf_sz) {
+ *buf_sz_idx = idx;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+int hinic3_set_root_ctxt(struct hinic3_hwdev *hwdev, u32 rq_depth, u32 sq_depth,
+ int rx_buf_sz)
+{
+ struct comm_cmd_set_root_ctxt root_ctxt = {};
+ struct mgmt_msg_params msg_params = {};
+ u16 buf_sz_idx;
+ int err;
+
+ err = get_hw_rx_buf_size_idx(rx_buf_sz, &buf_sz_idx);
+ if (err)
+ return err;
+
+ root_ctxt.func_id = hinic3_global_func_id(hwdev);
+
+ root_ctxt.set_cmdq_depth = 0;
+ root_ctxt.cmdq_depth = 0;
+
+ root_ctxt.lro_en = 1;
+
+ root_ctxt.rq_depth = ilog2(rq_depth);
+ root_ctxt.rx_buf_sz = buf_sz_idx;
+ root_ctxt.sq_depth = ilog2(sq_depth);
+
+ mgmt_msg_params_init_default(&msg_params, &root_ctxt,
+ sizeof(root_ctxt));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SET_VAT, &msg_params);
+ if (err || root_ctxt.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set root context, err: %d, status: 0x%x\n",
+ err, root_ctxt.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic3_clean_root_ctxt(struct hinic3_hwdev *hwdev)
+{
+ struct comm_cmd_set_root_ctxt root_ctxt = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ root_ctxt.func_id = hinic3_global_func_id(hwdev);
+
+ mgmt_msg_params_init_default(&msg_params, &root_ctxt,
+ sizeof(root_ctxt));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SET_VAT, &msg_params);
+ if (err || root_ctxt.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set root context, err: %d, status: 0x%x\n",
+ err, root_ctxt.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h
new file mode 100644
index 000000000000..304f5691f0c2
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_HW_COMM_H_
+#define _HINIC3_HW_COMM_H_
+
+#include "hinic3_hw_intf.h"
+
+struct hinic3_hwdev;
+
+#define HINIC3_WQ_PAGE_SIZE_ORDER 8
+
+struct hinic3_interrupt_info {
+ u32 lli_set;
+ u32 interrupt_coalesc_set;
+ u16 msix_index;
+ u8 lli_credit_limit;
+ u8 lli_timer_cfg;
+ u8 pending_limit;
+ u8 coalesc_timer_cfg;
+ u8 resend_timer_cfg;
+};
+
+int hinic3_set_interrupt_cfg_direct(struct hinic3_hwdev *hwdev,
+ const struct hinic3_interrupt_info *info);
+int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag);
+
+int hinic3_get_comm_features(struct hinic3_hwdev *hwdev, u64 *s_feature,
+ u16 size);
+int hinic3_set_comm_features(struct hinic3_hwdev *hwdev, u64 *s_feature,
+ u16 size);
+int hinic3_get_global_attr(struct hinic3_hwdev *hwdev,
+ struct comm_global_attr *attr);
+int hinic3_set_func_svc_used_state(struct hinic3_hwdev *hwdev, u16 svc_type,
+ u8 state);
+int hinic3_set_dma_attr_tbl(struct hinic3_hwdev *hwdev, u8 entry_idx, u8 st,
+ u8 at, u8 ph, u8 no_snooping, u8 tph_en);
+
+int hinic3_set_wq_page_size(struct hinic3_hwdev *hwdev, u16 func_idx,
+ u32 page_size);
+int hinic3_set_cmdq_depth(struct hinic3_hwdev *hwdev, u16 cmdq_depth);
+int hinic3_func_rx_tx_flush(struct hinic3_hwdev *hwdev);
+int hinic3_set_root_ctxt(struct hinic3_hwdev *hwdev, u32 rq_depth, u32 sq_depth,
+ int rx_buf_sz);
+int hinic3_clean_root_ctxt(struct hinic3_hwdev *hwdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h
new file mode 100644
index 000000000000..623cf2d14cbc
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h
@@ -0,0 +1,264 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_HW_INTF_H_
+#define _HINIC3_HW_INTF_H_
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+#define MGMT_MSG_CMD_OP_SET 1
+#define MGMT_MSG_CMD_OP_GET 0
+
+#define MGMT_STATUS_PF_SET_VF_ALREADY 0x4
+#define MGMT_STATUS_EXIST 0x6
+#define MGMT_STATUS_CMD_UNSUPPORTED 0xFF
+
+#define MGMT_MSG_POLLING_TIMEOUT 0
+
+struct mgmt_msg_head {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+};
+
+struct mgmt_msg_params {
+ const void *buf_in;
+ u32 in_size;
+ void *buf_out;
+ u32 expected_out_size;
+ u32 timeout_ms;
+};
+
+/* CMDQ MODULE_TYPE */
+enum mgmt_mod_type {
+ /* HW communication module */
+ MGMT_MOD_COMM = 0,
+ /* L2NIC module */
+ MGMT_MOD_L2NIC = 1,
+ /* Configuration module */
+ MGMT_MOD_CFGM = 7,
+ MGMT_MOD_HILINK = 14,
+};
+
+static inline void mgmt_msg_params_init_default(struct mgmt_msg_params *msg_params,
+ void *inout_buf, u32 buf_size)
+{
+ msg_params->buf_in = inout_buf;
+ msg_params->buf_out = inout_buf;
+ msg_params->in_size = buf_size;
+ msg_params->expected_out_size = buf_size;
+ msg_params->timeout_ms = 0;
+}
+
+enum cfg_cmd {
+ CFG_CMD_GET_DEV_CAP = 0,
+};
+
+/* Device capabilities, defined by hw */
+struct cfg_cmd_dev_cap {
+ struct mgmt_msg_head head;
+
+ u16 func_id;
+ u16 rsvd1;
+
+ /* Public resources */
+ u8 host_id;
+ u8 ep_id;
+ u8 er_id;
+ u8 port_id;
+
+ u16 host_total_func;
+ u8 host_pf_num;
+ u8 pf_id_start;
+ u16 host_vf_num;
+ u16 vf_id_start;
+ u8 host_oq_id_mask_val;
+ u8 timer_en;
+ u8 host_valid_bitmap;
+ u8 rsvd_host;
+
+ u16 svc_cap_en;
+ u16 max_vf;
+ u8 flexq_en;
+ u8 valid_cos_bitmap;
+ u8 port_cos_valid_bitmap;
+ u8 rsvd2[45];
+
+ /* l2nic */
+ u16 nic_max_sq_id;
+ u16 nic_max_rq_id;
+ u16 nic_default_num_queues;
+
+ u8 rsvd3[250];
+};
+
+/* COMM Commands between Driver to fw */
+enum comm_cmd {
+ /* Commands for clearing FLR and resources */
+ COMM_CMD_FUNC_RESET = 0,
+ COMM_CMD_FEATURE_NEGO = 1,
+ COMM_CMD_FLUSH_DOORBELL = 2,
+ COMM_CMD_START_FLUSH = 3,
+ COMM_CMD_GET_GLOBAL_ATTR = 5,
+ COMM_CMD_SET_FUNC_SVC_USED_STATE = 7,
+
+ /* Driver Configuration Commands */
+ COMM_CMD_SET_CMDQ_CTXT = 20,
+ COMM_CMD_SET_VAT = 21,
+ COMM_CMD_CFG_PAGESIZE = 22,
+ COMM_CMD_CFG_MSIX_CTRL_REG = 23,
+ COMM_CMD_SET_CEQ_CTRL_REG = 24,
+ COMM_CMD_SET_DMA_ATTR = 25,
+};
+
+struct comm_cmd_cfg_msix_ctrl_reg {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u8 opcode;
+ u8 rsvd1;
+ u16 msix_index;
+ u8 pending_cnt;
+ u8 coalesce_timer_cnt;
+ u8 resend_timer_cnt;
+ u8 lli_timer_cnt;
+ u8 lli_credit_cnt;
+ u8 rsvd2[5];
+};
+
+enum comm_func_reset_bits {
+ COMM_FUNC_RESET_BIT_FLUSH = BIT(0),
+ COMM_FUNC_RESET_BIT_MQM = BIT(1),
+ COMM_FUNC_RESET_BIT_SMF = BIT(2),
+ COMM_FUNC_RESET_BIT_PF_BW_CFG = BIT(3),
+
+ COMM_FUNC_RESET_BIT_COMM = BIT(10),
+ /* clear mbox and aeq, The COMM_FUNC_RESET_BIT_COMM bit must be set */
+ COMM_FUNC_RESET_BIT_COMM_MGMT_CH = BIT(11),
+ /* clear cmdq and ceq, The COMM_FUNC_RESET_BIT_COMM bit must be set */
+ COMM_FUNC_RESET_BIT_COMM_CMD_CH = BIT(12),
+ COMM_FUNC_RESET_BIT_NIC = BIT(13),
+};
+
+#define COMM_FUNC_RESET_FLAG \
+ (COMM_FUNC_RESET_BIT_COMM | COMM_FUNC_RESET_BIT_COMM_CMD_CH | \
+ COMM_FUNC_RESET_BIT_FLUSH | COMM_FUNC_RESET_BIT_MQM | \
+ COMM_FUNC_RESET_BIT_SMF | COMM_FUNC_RESET_BIT_PF_BW_CFG)
+
+struct comm_cmd_func_reset {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u16 rsvd1[3];
+ u64 reset_flag;
+};
+
+#define COMM_MAX_FEATURE_QWORD 4
+struct comm_cmd_feature_nego {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u8 opcode;
+ u8 rsvd;
+ u64 s_feature[COMM_MAX_FEATURE_QWORD];
+};
+
+struct comm_global_attr {
+ u8 max_host_num;
+ u8 max_pf_num;
+ u16 vf_id_start;
+ /* for api cmd to mgmt cpu */
+ u8 mgmt_host_node_id;
+ u8 cmdq_num;
+ u8 rsvd1[34];
+};
+
+struct comm_cmd_get_glb_attr {
+ struct mgmt_msg_head head;
+ struct comm_global_attr attr;
+};
+
+enum comm_func_svc_type {
+ COMM_FUNC_SVC_T_COMM = 0,
+ COMM_FUNC_SVC_T_NIC = 1,
+};
+
+struct comm_cmd_set_func_svc_used_state {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u16 svc_type;
+ u8 used_state;
+ u8 rsvd[35];
+};
+
+struct comm_cmd_set_dma_attr {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u8 entry_idx;
+ u8 st;
+ u8 at;
+ u8 ph;
+ u8 no_snooping;
+ u8 tph_en;
+ u32 resv1;
+};
+
+struct comm_cmd_set_ceq_ctrl_reg {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u16 q_id;
+ u32 ctrl0;
+ u32 ctrl1;
+ u32 rsvd1;
+};
+
+struct comm_cmd_cfg_wq_page_size {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u8 opcode;
+ /* real_size=4KB*2^page_size, range(0~20) must be checked by driver */
+ u8 page_size;
+ u32 rsvd1;
+};
+
+struct comm_cmd_set_root_ctxt {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u8 set_cmdq_depth;
+ u8 cmdq_depth;
+ u16 rx_buf_sz;
+ u8 lro_en;
+ u8 rsvd1;
+ u16 sq_depth;
+ u16 rq_depth;
+ u64 rsvd2;
+};
+
+struct comm_cmdq_ctxt_info {
+ __le64 curr_wqe_page_pfn;
+ __le64 wq_block_pfn;
+};
+
+struct comm_cmd_set_cmdq_ctxt {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u8 cmdq_id;
+ u8 rsvd1[5];
+ struct comm_cmdq_ctxt_info ctxt;
+};
+
+struct comm_cmd_clear_resource {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u16 rsvd1[3];
+};
+
+/* Services supported by HW. HW uses these values when delivering events.
+ * HW supports multiple services that are not yet supported by driver
+ * (e.g. RoCE).
+ */
+enum hinic3_service_type {
+ HINIC3_SERVICE_T_NIC = 0,
+ /* MAX is only used by SW for array sizes. */
+ HINIC3_SERVICE_T_MAX = 1,
+};
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c
new file mode 100644
index 000000000000..95a213133be9
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c
@@ -0,0 +1,557 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include "hinic3_cmdq.h"
+#include "hinic3_csr.h"
+#include "hinic3_eqs.h"
+#include "hinic3_hw_comm.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+#include "hinic3_mgmt.h"
+
+#define HINIC3_PCIE_SNOOP 0
+#define HINIC3_PCIE_TPH_DISABLE 0
+
+#define HINIC3_DMA_ATTR_INDIR_IDX_MASK GENMASK(9, 0)
+#define HINIC3_DMA_ATTR_INDIR_IDX_SET(val, member) \
+ FIELD_PREP(HINIC3_DMA_ATTR_INDIR_##member##_MASK, val)
+
+#define HINIC3_DMA_ATTR_ENTRY_ST_MASK GENMASK(7, 0)
+#define HINIC3_DMA_ATTR_ENTRY_AT_MASK GENMASK(9, 8)
+#define HINIC3_DMA_ATTR_ENTRY_PH_MASK GENMASK(11, 10)
+#define HINIC3_DMA_ATTR_ENTRY_NO_SNOOPING_MASK BIT(12)
+#define HINIC3_DMA_ATTR_ENTRY_TPH_EN_MASK BIT(13)
+#define HINIC3_DMA_ATTR_ENTRY_SET(val, member) \
+ FIELD_PREP(HINIC3_DMA_ATTR_ENTRY_##member##_MASK, val)
+
+#define HINIC3_PCIE_ST_DISABLE 0
+#define HINIC3_PCIE_AT_DISABLE 0
+#define HINIC3_PCIE_PH_DISABLE 0
+#define HINIC3_PCIE_MSIX_ATTR_ENTRY 0
+
+#define HINIC3_DEFAULT_EQ_MSIX_PENDING_LIMIT 0
+#define HINIC3_DEFAULT_EQ_MSIX_COALESC_TIMER_CFG 0xFF
+#define HINIC3_DEFAULT_EQ_MSIX_RESEND_TIMER_CFG 7
+
+#define HINIC3_HWDEV_WQ_NAME "hinic3_hardware"
+#define HINIC3_WQ_MAX_REQ 10
+
+enum hinic3_hwdev_init_state {
+ HINIC3_HWDEV_MBOX_INITED = 2,
+ HINIC3_HWDEV_CMDQ_INITED = 3,
+};
+
+static int hinic3_comm_aeqs_init(struct hinic3_hwdev *hwdev)
+{
+ struct msix_entry aeq_msix_entries[HINIC3_MAX_AEQS];
+ u16 num_aeqs, resp_num_irq, i;
+ int err;
+
+ num_aeqs = hwdev->hwif->attr.num_aeqs;
+ if (num_aeqs > HINIC3_MAX_AEQS) {
+ dev_warn(hwdev->dev, "Adjust aeq num to %d\n",
+ HINIC3_MAX_AEQS);
+ num_aeqs = HINIC3_MAX_AEQS;
+ }
+ err = hinic3_alloc_irqs(hwdev, num_aeqs, aeq_msix_entries,
+ &resp_num_irq);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to alloc aeq irqs, num_aeqs: %u\n",
+ num_aeqs);
+ return err;
+ }
+
+ if (resp_num_irq < num_aeqs) {
+ dev_warn(hwdev->dev, "Adjust aeq num to %u\n",
+ resp_num_irq);
+ num_aeqs = resp_num_irq;
+ }
+
+ err = hinic3_aeqs_init(hwdev, num_aeqs, aeq_msix_entries);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init aeqs\n");
+ goto err_free_irqs;
+ }
+
+ return 0;
+
+err_free_irqs:
+ for (i = 0; i < num_aeqs; i++)
+ hinic3_free_irq(hwdev, aeq_msix_entries[i].vector);
+
+ return err;
+}
+
+static int hinic3_comm_ceqs_init(struct hinic3_hwdev *hwdev)
+{
+ struct msix_entry ceq_msix_entries[HINIC3_MAX_CEQS];
+ u16 num_ceqs, resp_num_irq, i;
+ int err;
+
+ num_ceqs = hwdev->hwif->attr.num_ceqs;
+ if (num_ceqs > HINIC3_MAX_CEQS) {
+ dev_warn(hwdev->dev, "Adjust ceq num to %d\n",
+ HINIC3_MAX_CEQS);
+ num_ceqs = HINIC3_MAX_CEQS;
+ }
+
+ err = hinic3_alloc_irqs(hwdev, num_ceqs, ceq_msix_entries,
+ &resp_num_irq);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to alloc ceq irqs, num_ceqs: %u\n",
+ num_ceqs);
+ return err;
+ }
+
+ if (resp_num_irq < num_ceqs) {
+ dev_warn(hwdev->dev, "Adjust ceq num to %u\n",
+ resp_num_irq);
+ num_ceqs = resp_num_irq;
+ }
+
+ err = hinic3_ceqs_init(hwdev, num_ceqs, ceq_msix_entries);
+ if (err) {
+ dev_err(hwdev->dev,
+ "Failed to init ceqs, err:%d\n", err);
+ goto err_free_irqs;
+ }
+
+ return 0;
+
+err_free_irqs:
+ for (i = 0; i < num_ceqs; i++)
+ hinic3_free_irq(hwdev, ceq_msix_entries[i].vector);
+
+ return err;
+}
+
+static int hinic3_comm_mbox_init(struct hinic3_hwdev *hwdev)
+{
+ int err;
+
+ err = hinic3_init_mbox(hwdev);
+ if (err)
+ return err;
+
+ hinic3_aeq_register_cb(hwdev, HINIC3_MBX_FROM_FUNC,
+ hinic3_mbox_func_aeqe_handler);
+ hinic3_aeq_register_cb(hwdev, HINIC3_MSG_FROM_FW,
+ hinic3_mgmt_msg_aeqe_handler);
+
+ set_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state);
+
+ return 0;
+}
+
+static void hinic3_comm_mbox_free(struct hinic3_hwdev *hwdev)
+{
+ spin_lock_bh(&hwdev->channel_lock);
+ clear_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state);
+ spin_unlock_bh(&hwdev->channel_lock);
+ hinic3_aeq_unregister_cb(hwdev, HINIC3_MBX_FROM_FUNC);
+ hinic3_aeq_unregister_cb(hwdev, HINIC3_MSG_FROM_FW);
+ hinic3_free_mbox(hwdev);
+}
+
+static int init_aeqs_msix_attr(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_aeqs *aeqs = hwdev->aeqs;
+ struct hinic3_interrupt_info info = {};
+ struct hinic3_eq *eq;
+ u16 q_id;
+ int err;
+
+ info.interrupt_coalesc_set = 1;
+ info.pending_limit = HINIC3_DEFAULT_EQ_MSIX_PENDING_LIMIT;
+ info.coalesc_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_COALESC_TIMER_CFG;
+ info.resend_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_RESEND_TIMER_CFG;
+
+ for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) {
+ eq = &aeqs->aeq[q_id];
+ info.msix_index = eq->msix_entry_idx;
+ err = hinic3_set_interrupt_cfg_direct(hwdev, &info);
+ if (err) {
+ dev_err(hwdev->dev, "Set msix attr for aeq %d failed\n",
+ q_id);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int init_ceqs_msix_attr(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_ceqs *ceqs = hwdev->ceqs;
+ struct hinic3_interrupt_info info = {};
+ struct hinic3_eq *eq;
+ u16 q_id;
+ int err;
+
+ info.interrupt_coalesc_set = 1;
+ info.pending_limit = HINIC3_DEFAULT_EQ_MSIX_PENDING_LIMIT;
+ info.coalesc_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_COALESC_TIMER_CFG;
+ info.resend_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_RESEND_TIMER_CFG;
+
+ for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) {
+ eq = &ceqs->ceq[q_id];
+ info.msix_index = eq->msix_entry_idx;
+ err = hinic3_set_interrupt_cfg_direct(hwdev, &info);
+ if (err) {
+ dev_err(hwdev->dev, "Set msix attr for ceq %u failed\n",
+ q_id);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int init_basic_mgmt_channel(struct hinic3_hwdev *hwdev)
+{
+ int err;
+
+ err = hinic3_comm_aeqs_init(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init async event queues\n");
+ return err;
+ }
+
+ err = hinic3_comm_mbox_init(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init mailbox\n");
+ goto err_free_comm_aeqs;
+ }
+
+ err = init_aeqs_msix_attr(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init aeqs msix attr\n");
+ goto err_free_comm_mbox;
+ }
+
+ return 0;
+
+err_free_comm_mbox:
+ hinic3_comm_mbox_free(hwdev);
+err_free_comm_aeqs:
+ hinic3_aeqs_free(hwdev);
+
+ return err;
+}
+
+static void free_base_mgmt_channel(struct hinic3_hwdev *hwdev)
+{
+ hinic3_comm_mbox_free(hwdev);
+ hinic3_aeqs_free(hwdev);
+}
+
+static int dma_attr_table_init(struct hinic3_hwdev *hwdev)
+{
+ u32 addr, val, dst_attr;
+
+ /* Indirect access, set entry_idx first */
+ addr = HINIC3_CSR_DMA_ATTR_INDIR_IDX_ADDR;
+ val = hinic3_hwif_read_reg(hwdev->hwif, addr);
+ val &= ~HINIC3_DMA_ATTR_ENTRY_AT_MASK;
+ val |= HINIC3_DMA_ATTR_INDIR_IDX_SET(HINIC3_PCIE_MSIX_ATTR_ENTRY, IDX);
+ hinic3_hwif_write_reg(hwdev->hwif, addr, val);
+
+ addr = HINIC3_CSR_DMA_ATTR_TBL_ADDR;
+ val = hinic3_hwif_read_reg(hwdev->hwif, addr);
+
+ dst_attr = HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_ST_DISABLE, ST) |
+ HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_AT_DISABLE, AT) |
+ HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_PH_DISABLE, PH) |
+ HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_SNOOP, NO_SNOOPING) |
+ HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_TPH_DISABLE, TPH_EN);
+ if (val == dst_attr)
+ return 0;
+
+ return hinic3_set_dma_attr_tbl(hwdev,
+ HINIC3_PCIE_MSIX_ATTR_ENTRY,
+ HINIC3_PCIE_ST_DISABLE,
+ HINIC3_PCIE_AT_DISABLE,
+ HINIC3_PCIE_PH_DISABLE,
+ HINIC3_PCIE_SNOOP,
+ HINIC3_PCIE_TPH_DISABLE);
+}
+
+static int init_basic_attributes(struct hinic3_hwdev *hwdev)
+{
+ struct comm_global_attr glb_attr;
+ int err;
+
+ err = hinic3_func_reset(hwdev, hinic3_global_func_id(hwdev),
+ COMM_FUNC_RESET_FLAG);
+ if (err)
+ return err;
+
+ err = hinic3_get_comm_features(hwdev, hwdev->features,
+ COMM_MAX_FEATURE_QWORD);
+ if (err)
+ return err;
+
+ dev_dbg(hwdev->dev, "Comm hw features: 0x%llx\n", hwdev->features[0]);
+
+ err = hinic3_get_global_attr(hwdev, &glb_attr);
+ if (err)
+ return err;
+
+ err = hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 1);
+ if (err)
+ return err;
+
+ err = dma_attr_table_init(hwdev);
+ if (err)
+ return err;
+
+ hwdev->max_cmdq = min(glb_attr.cmdq_num, HINIC3_MAX_CMDQ_TYPES);
+ dev_dbg(hwdev->dev,
+ "global attribute: max_host: 0x%x, max_pf: 0x%x, vf_id_start: 0x%x, mgmt node id: 0x%x, cmdq_num: 0x%x\n",
+ glb_attr.max_host_num, glb_attr.max_pf_num,
+ glb_attr.vf_id_start, glb_attr.mgmt_host_node_id,
+ glb_attr.cmdq_num);
+
+ return 0;
+}
+
+static int hinic3_comm_cmdqs_init(struct hinic3_hwdev *hwdev)
+{
+ int err;
+
+ err = hinic3_cmdqs_init(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init cmd queues\n");
+ return err;
+ }
+
+ hinic3_ceq_register_cb(hwdev, HINIC3_CMDQ, hinic3_cmdq_ceq_handler);
+
+ err = hinic3_set_cmdq_depth(hwdev, CMDQ_DEPTH);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set cmdq depth\n");
+ goto err_free_cmdqs;
+ }
+
+ set_bit(HINIC3_HWDEV_CMDQ_INITED, &hwdev->func_state);
+
+ return 0;
+
+err_free_cmdqs:
+ hinic3_cmdqs_free(hwdev);
+
+ return err;
+}
+
+static void hinic3_comm_cmdqs_free(struct hinic3_hwdev *hwdev)
+{
+ spin_lock_bh(&hwdev->channel_lock);
+ clear_bit(HINIC3_HWDEV_CMDQ_INITED, &hwdev->func_state);
+ spin_unlock_bh(&hwdev->channel_lock);
+
+ hinic3_ceq_unregister_cb(hwdev, HINIC3_CMDQ);
+ hinic3_cmdqs_free(hwdev);
+}
+
+static int init_cmdqs_channel(struct hinic3_hwdev *hwdev)
+{
+ int err;
+
+ err = hinic3_comm_ceqs_init(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init completion event queues\n");
+ return err;
+ }
+
+ err = init_ceqs_msix_attr(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init ceqs msix attr\n");
+ goto err_free_ceqs;
+ }
+
+ hwdev->wq_page_size = HINIC3_MIN_PAGE_SIZE << HINIC3_WQ_PAGE_SIZE_ORDER;
+ err = hinic3_set_wq_page_size(hwdev, hinic3_global_func_id(hwdev),
+ hwdev->wq_page_size);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set wq page size\n");
+ goto err_free_ceqs;
+ }
+
+ err = hinic3_comm_cmdqs_init(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init cmd queues\n");
+ goto err_reset_wq_page_size;
+ }
+
+ return 0;
+
+err_reset_wq_page_size:
+ hinic3_set_wq_page_size(hwdev, hinic3_global_func_id(hwdev),
+ HINIC3_MIN_PAGE_SIZE);
+err_free_ceqs:
+ hinic3_ceqs_free(hwdev);
+
+ return err;
+}
+
+static void hinic3_free_cmdqs_channel(struct hinic3_hwdev *hwdev)
+{
+ hinic3_comm_cmdqs_free(hwdev);
+ hinic3_ceqs_free(hwdev);
+}
+
+static int hinic3_init_comm_ch(struct hinic3_hwdev *hwdev)
+{
+ int err;
+
+ err = init_basic_mgmt_channel(hwdev);
+ if (err)
+ return err;
+
+ err = init_basic_attributes(hwdev);
+ if (err)
+ goto err_free_basic_mgmt_ch;
+
+ err = init_cmdqs_channel(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init cmdq channel\n");
+ goto err_clear_func_svc_used_state;
+ }
+
+ return 0;
+
+err_clear_func_svc_used_state:
+ hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 0);
+err_free_basic_mgmt_ch:
+ free_base_mgmt_channel(hwdev);
+
+ return err;
+}
+
+static void hinic3_uninit_comm_ch(struct hinic3_hwdev *hwdev)
+{
+ hinic3_free_cmdqs_channel(hwdev);
+ hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 0);
+ free_base_mgmt_channel(hwdev);
+}
+
+static DEFINE_IDA(hinic3_adev_ida);
+
+static int hinic3_adev_idx_alloc(void)
+{
+ return ida_alloc(&hinic3_adev_ida, GFP_KERNEL);
+}
+
+static void hinic3_adev_idx_free(int id)
+{
+ ida_free(&hinic3_adev_ida, id);
+}
+
+int hinic3_init_hwdev(struct pci_dev *pdev)
+{
+ struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
+ struct hinic3_hwdev *hwdev;
+ int err;
+
+ hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL);
+ if (!hwdev)
+ return -ENOMEM;
+
+ pci_adapter->hwdev = hwdev;
+ hwdev->adapter = pci_adapter;
+ hwdev->pdev = pci_adapter->pdev;
+ hwdev->dev = &pci_adapter->pdev->dev;
+ hwdev->func_state = 0;
+ hwdev->dev_id = hinic3_adev_idx_alloc();
+ spin_lock_init(&hwdev->channel_lock);
+
+ err = hinic3_init_hwif(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init hwif\n");
+ goto err_free_hwdev;
+ }
+
+ hwdev->workq = alloc_workqueue(HINIC3_HWDEV_WQ_NAME, WQ_MEM_RECLAIM,
+ HINIC3_WQ_MAX_REQ);
+ if (!hwdev->workq) {
+ dev_err(hwdev->dev, "Failed to alloc hardware workq\n");
+ err = -ENOMEM;
+ goto err_free_hwif;
+ }
+
+ err = hinic3_init_cfg_mgmt(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init config mgmt\n");
+ goto err_destroy_workqueue;
+ }
+
+ err = hinic3_init_comm_ch(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init communication channel\n");
+ goto err_free_cfg_mgmt;
+ }
+
+ err = hinic3_init_capability(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init capability\n");
+ goto err_uninit_comm_ch;
+ }
+
+ err = hinic3_set_comm_features(hwdev, hwdev->features,
+ COMM_MAX_FEATURE_QWORD);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set comm features\n");
+ goto err_uninit_comm_ch;
+ }
+
+ return 0;
+
+err_uninit_comm_ch:
+ hinic3_uninit_comm_ch(hwdev);
+err_free_cfg_mgmt:
+ hinic3_free_cfg_mgmt(hwdev);
+err_destroy_workqueue:
+ destroy_workqueue(hwdev->workq);
+err_free_hwif:
+ hinic3_free_hwif(hwdev);
+err_free_hwdev:
+ pci_adapter->hwdev = NULL;
+ hinic3_adev_idx_free(hwdev->dev_id);
+ kfree(hwdev);
+
+ return err;
+}
+
+void hinic3_free_hwdev(struct hinic3_hwdev *hwdev)
+{
+ u64 drv_features[COMM_MAX_FEATURE_QWORD] = {};
+
+ hinic3_set_comm_features(hwdev, drv_features, COMM_MAX_FEATURE_QWORD);
+ hinic3_func_rx_tx_flush(hwdev);
+ hinic3_uninit_comm_ch(hwdev);
+ hinic3_free_cfg_mgmt(hwdev);
+ destroy_workqueue(hwdev->workq);
+ hinic3_free_hwif(hwdev);
+ hinic3_adev_idx_free(hwdev->dev_id);
+ kfree(hwdev);
+}
+
+void hinic3_set_api_stop(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_mbox *mbox;
+
+ spin_lock_bh(&hwdev->channel_lock);
+ if (test_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state)) {
+ mbox = hwdev->mbox;
+ spin_lock(&mbox->mbox_lock);
+ if (mbox->event_flag == MBOX_EVENT_START)
+ mbox->event_flag = MBOX_EVENT_TIMEOUT;
+ spin_unlock(&mbox->mbox_lock);
+ }
+
+ if (test_bit(HINIC3_HWDEV_CMDQ_INITED, &hwdev->func_state))
+ hinic3_cmdq_flush_sync_cmd(hwdev);
+
+ spin_unlock_bh(&hwdev->channel_lock);
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h
new file mode 100644
index 000000000000..62e2745e9316
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_HWDEV_H_
+#define _HINIC3_HWDEV_H_
+
+#include <linux/auxiliary_bus.h>
+#include <linux/pci.h>
+
+#include "hinic3_hw_intf.h"
+
+struct hinic3_cmdqs;
+struct hinic3_hwif;
+
+enum hinic3_event_service_type {
+ HINIC3_EVENT_SRV_COMM = 0,
+ HINIC3_EVENT_SRV_NIC = 1
+};
+
+#define HINIC3_SRV_EVENT_TYPE(svc, type) (((svc) << 16) | (type))
+
+/* driver-specific data of pci_dev */
+struct hinic3_pcidev {
+ struct pci_dev *pdev;
+ struct hinic3_hwdev *hwdev;
+ /* Auxiliary devices */
+ struct hinic3_adev *hadev[HINIC3_SERVICE_T_MAX];
+
+ void __iomem *cfg_reg_base;
+ void __iomem *intr_reg_base;
+ void __iomem *db_base;
+ u64 db_dwqe_len;
+ u64 db_base_phy;
+
+ /* lock for attach/detach uld */
+ struct mutex pdev_mutex;
+ unsigned long state;
+};
+
+struct hinic3_hwdev {
+ struct hinic3_pcidev *adapter;
+ struct pci_dev *pdev;
+ struct device *dev;
+ int dev_id;
+ struct hinic3_hwif *hwif;
+ struct hinic3_cfg_mgmt_info *cfg_mgmt;
+ struct hinic3_aeqs *aeqs;
+ struct hinic3_ceqs *ceqs;
+ struct hinic3_mbox *mbox;
+ struct hinic3_cmdqs *cmdqs;
+ struct workqueue_struct *workq;
+ /* protect channel init and uninit */
+ spinlock_t channel_lock;
+ u64 features[COMM_MAX_FEATURE_QWORD];
+ u32 wq_page_size;
+ u8 max_cmdq;
+ ulong func_state;
+};
+
+struct hinic3_event_info {
+ /* enum hinic3_event_service_type */
+ u16 service;
+ u16 type;
+ u8 event_data[104];
+};
+
+struct hinic3_adev {
+ struct auxiliary_device adev;
+ struct hinic3_hwdev *hwdev;
+ enum hinic3_service_type svc_type;
+
+ void (*event)(struct auxiliary_device *adev,
+ struct hinic3_event_info *event);
+};
+
+int hinic3_init_hwdev(struct pci_dev *pdev);
+void hinic3_free_hwdev(struct hinic3_hwdev *hwdev);
+
+void hinic3_set_api_stop(struct hinic3_hwdev *hwdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c
new file mode 100644
index 000000000000..f76f140fb6f7
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/io.h>
+
+#include "hinic3_common.h"
+#include "hinic3_csr.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+
+#define HINIC3_HWIF_READY_TIMEOUT 10000
+#define HINIC3_DB_AND_OUTBOUND_EN_TIMEOUT 60000
+#define HINIC3_PCIE_LINK_DOWN 0xFFFFFFFF
+
+/* config BAR4/5 4MB, DB & DWQE both 2MB */
+#define HINIC3_DB_DWQE_SIZE 0x00400000
+
+/* db/dwqe page size: 4K */
+#define HINIC3_DB_PAGE_SIZE 0x00001000
+#define HINIC3_DWQE_OFFSET 0x00000800
+#define HINIC3_DB_MAX_AREAS (HINIC3_DB_DWQE_SIZE / HINIC3_DB_PAGE_SIZE)
+
+#define HINIC3_MAX_MSIX_ENTRY 2048
+
+#define HINIC3_AF0_FUNC_GLOBAL_IDX_MASK GENMASK(11, 0)
+#define HINIC3_AF0_P2P_IDX_MASK GENMASK(16, 12)
+#define HINIC3_AF0_PCI_INTF_IDX_MASK GENMASK(19, 17)
+#define HINIC3_AF0_FUNC_TYPE_MASK BIT(28)
+#define HINIC3_AF0_GET(val, member) \
+ FIELD_GET(HINIC3_AF0_##member##_MASK, val)
+
+#define HINIC3_AF1_AEQS_PER_FUNC_MASK GENMASK(9, 8)
+#define HINIC3_AF1_MGMT_INIT_STATUS_MASK BIT(30)
+#define HINIC3_AF1_GET(val, member) \
+ FIELD_GET(HINIC3_AF1_##member##_MASK, val)
+
+#define HINIC3_AF2_CEQS_PER_FUNC_MASK GENMASK(8, 0)
+#define HINIC3_AF2_IRQS_PER_FUNC_MASK GENMASK(26, 16)
+#define HINIC3_AF2_GET(val, member) \
+ FIELD_GET(HINIC3_AF2_##member##_MASK, val)
+
+#define HINIC3_AF4_DOORBELL_CTRL_MASK BIT(0)
+#define HINIC3_AF4_GET(val, member) \
+ FIELD_GET(HINIC3_AF4_##member##_MASK, val)
+#define HINIC3_AF4_SET(val, member) \
+ FIELD_PREP(HINIC3_AF4_##member##_MASK, val)
+
+#define HINIC3_AF5_OUTBOUND_CTRL_MASK BIT(0)
+#define HINIC3_AF5_GET(val, member) \
+ FIELD_GET(HINIC3_AF5_##member##_MASK, val)
+
+#define HINIC3_AF6_PF_STATUS_MASK GENMASK(15, 0)
+#define HINIC3_AF6_FUNC_MAX_SQ_MASK GENMASK(31, 23)
+#define HINIC3_AF6_MSIX_FLEX_EN_MASK BIT(22)
+#define HINIC3_AF6_GET(val, member) \
+ FIELD_GET(HINIC3_AF6_##member##_MASK, val)
+
+#define HINIC3_GET_REG_ADDR(reg) ((reg) & (HINIC3_REGS_FLAG_MASK))
+
+static void __iomem *hinic3_reg_addr(struct hinic3_hwif *hwif, u32 reg)
+{
+ return hwif->cfg_regs_base + HINIC3_GET_REG_ADDR(reg);
+}
+
+u32 hinic3_hwif_read_reg(struct hinic3_hwif *hwif, u32 reg)
+{
+ void __iomem *addr = hinic3_reg_addr(hwif, reg);
+
+ return ioread32be(addr);
+}
+
+void hinic3_hwif_write_reg(struct hinic3_hwif *hwif, u32 reg, u32 val)
+{
+ void __iomem *addr = hinic3_reg_addr(hwif, reg);
+
+ iowrite32be(val, addr);
+}
+
+static enum hinic3_wait_return check_hwif_ready_handler(void *priv_data)
+{
+ struct hinic3_hwdev *hwdev = priv_data;
+ u32 attr1;
+
+ attr1 = hinic3_hwif_read_reg(hwdev->hwif, HINIC3_CSR_FUNC_ATTR1_ADDR);
+
+ return HINIC3_AF1_GET(attr1, MGMT_INIT_STATUS) ?
+ HINIC3_WAIT_PROCESS_CPL : HINIC3_WAIT_PROCESS_WAITING;
+}
+
+static int wait_hwif_ready(struct hinic3_hwdev *hwdev)
+{
+ return hinic3_wait_for_timeout(hwdev, check_hwif_ready_handler,
+ HINIC3_HWIF_READY_TIMEOUT,
+ USEC_PER_MSEC);
+}
+
+/* Set attr struct from HW attr values. */
+static void set_hwif_attr(struct hinic3_func_attr *attr, u32 attr0, u32 attr1,
+ u32 attr2, u32 attr3, u32 attr6)
+{
+ attr->func_global_idx = HINIC3_AF0_GET(attr0, FUNC_GLOBAL_IDX);
+ attr->port_to_port_idx = HINIC3_AF0_GET(attr0, P2P_IDX);
+ attr->pci_intf_idx = HINIC3_AF0_GET(attr0, PCI_INTF_IDX);
+ attr->func_type = HINIC3_AF0_GET(attr0, FUNC_TYPE);
+
+ attr->num_aeqs = BIT(HINIC3_AF1_GET(attr1, AEQS_PER_FUNC));
+ attr->num_ceqs = HINIC3_AF2_GET(attr2, CEQS_PER_FUNC);
+ attr->num_irqs = HINIC3_AF2_GET(attr2, IRQS_PER_FUNC);
+ if (attr->num_irqs > HINIC3_MAX_MSIX_ENTRY)
+ attr->num_irqs = HINIC3_MAX_MSIX_ENTRY;
+
+ attr->num_sq = HINIC3_AF6_GET(attr6, FUNC_MAX_SQ);
+ attr->msix_flex_en = HINIC3_AF6_GET(attr6, MSIX_FLEX_EN);
+}
+
+/* Read attributes from HW and set attribute struct. */
+static int init_hwif_attr(struct hinic3_hwdev *hwdev)
+{
+ u32 attr0, attr1, attr2, attr3, attr6;
+ struct hinic3_hwif *hwif;
+
+ hwif = hwdev->hwif;
+ attr0 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR0_ADDR);
+ if (attr0 == HINIC3_PCIE_LINK_DOWN)
+ return -EFAULT;
+
+ attr1 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR1_ADDR);
+ if (attr1 == HINIC3_PCIE_LINK_DOWN)
+ return -EFAULT;
+
+ attr2 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR2_ADDR);
+ if (attr2 == HINIC3_PCIE_LINK_DOWN)
+ return -EFAULT;
+
+ attr3 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR3_ADDR);
+ if (attr3 == HINIC3_PCIE_LINK_DOWN)
+ return -EFAULT;
+
+ attr6 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR6_ADDR);
+ if (attr6 == HINIC3_PCIE_LINK_DOWN)
+ return -EFAULT;
+
+ set_hwif_attr(&hwif->attr, attr0, attr1, attr2, attr3, attr6);
+
+ if (!hwif->attr.num_ceqs) {
+ dev_err(hwdev->dev, "Ceq num cfg in fw is zero\n");
+ return -EFAULT;
+ }
+
+ if (!hwif->attr.num_irqs) {
+ dev_err(hwdev->dev,
+ "Irq num cfg in fw is zero, msix_flex_en %d\n",
+ hwif->attr.msix_flex_en);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static enum hinic3_doorbell_ctrl hinic3_get_doorbell_ctrl_status(struct hinic3_hwif *hwif)
+{
+ u32 attr4 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR4_ADDR);
+
+ return HINIC3_AF4_GET(attr4, DOORBELL_CTRL);
+}
+
+static enum hinic3_outbound_ctrl hinic3_get_outbound_ctrl_status(struct hinic3_hwif *hwif)
+{
+ u32 attr5 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR5_ADDR);
+
+ return HINIC3_AF5_GET(attr5, OUTBOUND_CTRL);
+}
+
+void hinic3_toggle_doorbell(struct hinic3_hwif *hwif,
+ enum hinic3_doorbell_ctrl flag)
+{
+ u32 addr, attr4;
+
+ addr = HINIC3_CSR_FUNC_ATTR4_ADDR;
+ attr4 = hinic3_hwif_read_reg(hwif, addr);
+
+ attr4 &= ~HINIC3_AF4_DOORBELL_CTRL_MASK;
+ attr4 |= HINIC3_AF4_SET(flag, DOORBELL_CTRL);
+
+ hinic3_hwif_write_reg(hwif, addr, attr4);
+}
+
+static int db_area_idx_init(struct hinic3_hwif *hwif, u64 db_base_phy,
+ u8 __iomem *db_base, u64 db_dwqe_len)
+{
+ struct hinic3_db_area *db_area = &hwif->db_area;
+ u32 db_max_areas;
+
+ hwif->db_base_phy = db_base_phy;
+ hwif->db_base = db_base;
+ hwif->db_dwqe_len = db_dwqe_len;
+
+ db_max_areas = db_dwqe_len > HINIC3_DB_DWQE_SIZE ?
+ HINIC3_DB_MAX_AREAS : db_dwqe_len / HINIC3_DB_PAGE_SIZE;
+ db_area->db_bitmap_array = bitmap_zalloc(db_max_areas, GFP_KERNEL);
+ if (!db_area->db_bitmap_array)
+ return -ENOMEM;
+
+ db_area->db_max_areas = db_max_areas;
+ spin_lock_init(&db_area->idx_lock);
+
+ return 0;
+}
+
+static void db_area_idx_free(struct hinic3_db_area *db_area)
+{
+ bitmap_free(db_area->db_bitmap_array);
+}
+
+static int get_db_idx(struct hinic3_hwif *hwif, u32 *idx)
+{
+ struct hinic3_db_area *db_area = &hwif->db_area;
+ u32 pg_idx;
+
+ spin_lock(&db_area->idx_lock);
+ pg_idx = find_first_zero_bit(db_area->db_bitmap_array,
+ db_area->db_max_areas);
+ if (pg_idx == db_area->db_max_areas) {
+ spin_unlock(&db_area->idx_lock);
+ return -ENOMEM;
+ }
+ set_bit(pg_idx, db_area->db_bitmap_array);
+ spin_unlock(&db_area->idx_lock);
+
+ *idx = pg_idx;
+
+ return 0;
+}
+
+static void free_db_idx(struct hinic3_hwif *hwif, u32 idx)
+{
+ struct hinic3_db_area *db_area = &hwif->db_area;
+
+ spin_lock(&db_area->idx_lock);
+ clear_bit(idx, db_area->db_bitmap_array);
+ spin_unlock(&db_area->idx_lock);
+}
+
+void hinic3_free_db_addr(struct hinic3_hwdev *hwdev, const u8 __iomem *db_base)
+{
+ struct hinic3_hwif *hwif;
+ uintptr_t distance;
+ u32 idx;
+
+ hwif = hwdev->hwif;
+ distance = db_base - hwif->db_base;
+ idx = distance / HINIC3_DB_PAGE_SIZE;
+
+ free_db_idx(hwif, idx);
+}
+
+int hinic3_alloc_db_addr(struct hinic3_hwdev *hwdev, void __iomem **db_base,
+ void __iomem **dwqe_base)
+{
+ struct hinic3_hwif *hwif;
+ u8 __iomem *addr;
+ u32 idx;
+ int err;
+
+ hwif = hwdev->hwif;
+
+ err = get_db_idx(hwif, &idx);
+ if (err)
+ return err;
+
+ addr = hwif->db_base + idx * HINIC3_DB_PAGE_SIZE;
+ *db_base = addr;
+
+ if (dwqe_base)
+ *dwqe_base = addr + HINIC3_DWQE_OFFSET;
+
+ return 0;
+}
+
+void hinic3_set_msix_state(struct hinic3_hwdev *hwdev, u16 msix_idx,
+ enum hinic3_msix_state flag)
+{
+ struct hinic3_hwif *hwif;
+ u8 int_msk = 1;
+ u32 mask_bits;
+ u32 addr;
+
+ hwif = hwdev->hwif;
+
+ if (flag)
+ mask_bits = HINIC3_MSI_CLR_INDIR_SET(int_msk, INT_MSK_SET);
+ else
+ mask_bits = HINIC3_MSI_CLR_INDIR_SET(int_msk, INT_MSK_CLR);
+ mask_bits = mask_bits |
+ HINIC3_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX);
+
+ addr = HINIC3_CSR_FUNC_MSI_CLR_WR_ADDR;
+ hinic3_hwif_write_reg(hwif, addr, mask_bits);
+}
+
+static void disable_all_msix(struct hinic3_hwdev *hwdev)
+{
+ u16 num_irqs = hwdev->hwif->attr.num_irqs;
+ u16 i;
+
+ for (i = 0; i < num_irqs; i++)
+ hinic3_set_msix_state(hwdev, i, HINIC3_MSIX_DISABLE);
+}
+
+void hinic3_msix_intr_clear_resend_bit(struct hinic3_hwdev *hwdev, u16 msix_idx,
+ u8 clear_resend_en)
+{
+ struct hinic3_hwif *hwif;
+ u32 msix_ctrl, addr;
+
+ hwif = hwdev->hwif;
+
+ msix_ctrl = HINIC3_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX) |
+ HINIC3_MSI_CLR_INDIR_SET(clear_resend_en, RESEND_TIMER_CLR);
+
+ addr = HINIC3_CSR_FUNC_MSI_CLR_WR_ADDR;
+ hinic3_hwif_write_reg(hwif, addr, msix_ctrl);
+}
+
+void hinic3_set_msix_auto_mask_state(struct hinic3_hwdev *hwdev, u16 msix_idx,
+ enum hinic3_msix_auto_mask flag)
+{
+ struct hinic3_hwif *hwif;
+ u32 mask_bits;
+ u32 addr;
+
+ hwif = hwdev->hwif;
+
+ if (flag)
+ mask_bits = HINIC3_MSI_CLR_INDIR_SET(1, AUTO_MSK_SET);
+ else
+ mask_bits = HINIC3_MSI_CLR_INDIR_SET(1, AUTO_MSK_CLR);
+
+ mask_bits = mask_bits |
+ HINIC3_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX);
+
+ addr = HINIC3_CSR_FUNC_MSI_CLR_WR_ADDR;
+ hinic3_hwif_write_reg(hwif, addr, mask_bits);
+}
+
+static enum hinic3_wait_return check_db_outbound_enable_handler(void *priv_data)
+{
+ enum hinic3_outbound_ctrl outbound_ctrl;
+ struct hinic3_hwif *hwif = priv_data;
+ enum hinic3_doorbell_ctrl db_ctrl;
+
+ db_ctrl = hinic3_get_doorbell_ctrl_status(hwif);
+ outbound_ctrl = hinic3_get_outbound_ctrl_status(hwif);
+ if (outbound_ctrl == ENABLE_OUTBOUND && db_ctrl == ENABLE_DOORBELL)
+ return HINIC3_WAIT_PROCESS_CPL;
+
+ return HINIC3_WAIT_PROCESS_WAITING;
+}
+
+static int wait_until_doorbell_and_outbound_enabled(struct hinic3_hwif *hwif)
+{
+ return hinic3_wait_for_timeout(hwif, check_db_outbound_enable_handler,
+ HINIC3_DB_AND_OUTBOUND_EN_TIMEOUT,
+ USEC_PER_MSEC);
+}
+
+int hinic3_init_hwif(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_pcidev *pci_adapter = hwdev->adapter;
+ struct hinic3_hwif *hwif;
+ u32 attr1, attr4, attr5;
+ int err;
+
+ hwif = kzalloc(sizeof(*hwif), GFP_KERNEL);
+ if (!hwif)
+ return -ENOMEM;
+
+ hwdev->hwif = hwif;
+ hwif->cfg_regs_base = (u8 __iomem *)pci_adapter->cfg_reg_base +
+ HINIC3_VF_CFG_REG_OFFSET;
+
+ err = db_area_idx_init(hwif, pci_adapter->db_base_phy,
+ pci_adapter->db_base,
+ pci_adapter->db_dwqe_len);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init db area.\n");
+ goto err_free_hwif;
+ }
+
+ err = wait_hwif_ready(hwdev);
+ if (err) {
+ attr1 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR1_ADDR);
+ dev_err(hwdev->dev, "Chip status is not ready, attr1:0x%x\n",
+ attr1);
+ goto err_free_db_area_idx;
+ }
+
+ err = init_hwif_attr(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Init hwif attr failed\n");
+ goto err_free_db_area_idx;
+ }
+
+ err = wait_until_doorbell_and_outbound_enabled(hwif);
+ if (err) {
+ attr4 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR4_ADDR);
+ attr5 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR5_ADDR);
+ dev_err(hwdev->dev, "HW doorbell/outbound is disabled, attr4 0x%x attr5 0x%x\n",
+ attr4, attr5);
+ goto err_free_db_area_idx;
+ }
+
+ disable_all_msix(hwdev);
+
+ return 0;
+
+err_free_db_area_idx:
+ db_area_idx_free(&hwif->db_area);
+err_free_hwif:
+ kfree(hwif);
+
+ return err;
+}
+
+void hinic3_free_hwif(struct hinic3_hwdev *hwdev)
+{
+ db_area_idx_free(&hwdev->hwif->db_area);
+ kfree(hwdev->hwif);
+}
+
+u16 hinic3_global_func_id(struct hinic3_hwdev *hwdev)
+{
+ return hwdev->hwif->attr.func_global_idx;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h
new file mode 100644
index 000000000000..c02904e861cc
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_HWIF_H_
+#define _HINIC3_HWIF_H_
+
+#include <linux/build_bug.h>
+#include <linux/spinlock_types.h>
+
+struct hinic3_hwdev;
+
+enum hinic3_func_type {
+ HINIC3_FUNC_TYPE_VF = 1,
+};
+
+struct hinic3_db_area {
+ unsigned long *db_bitmap_array;
+ u32 db_max_areas;
+ /* protect doorbell area alloc and free */
+ spinlock_t idx_lock;
+};
+
+struct hinic3_func_attr {
+ enum hinic3_func_type func_type;
+ u16 func_global_idx;
+ u16 global_vf_id_of_pf;
+ u16 num_irqs;
+ u16 num_sq;
+ u8 port_to_port_idx;
+ u8 pci_intf_idx;
+ u8 ppf_idx;
+ u8 num_aeqs;
+ u8 num_ceqs;
+ u8 msix_flex_en;
+};
+
+static_assert(sizeof(struct hinic3_func_attr) == 20);
+
+struct hinic3_hwif {
+ u8 __iomem *cfg_regs_base;
+ u64 db_base_phy;
+ u64 db_dwqe_len;
+ u8 __iomem *db_base;
+ struct hinic3_db_area db_area;
+ struct hinic3_func_attr attr;
+};
+
+enum hinic3_outbound_ctrl {
+ ENABLE_OUTBOUND = 0x0,
+ DISABLE_OUTBOUND = 0x1,
+};
+
+enum hinic3_doorbell_ctrl {
+ ENABLE_DOORBELL = 0,
+ DISABLE_DOORBELL = 1,
+};
+
+enum hinic3_msix_state {
+ HINIC3_MSIX_ENABLE,
+ HINIC3_MSIX_DISABLE,
+};
+
+enum hinic3_msix_auto_mask {
+ HINIC3_CLR_MSIX_AUTO_MASK,
+ HINIC3_SET_MSIX_AUTO_MASK,
+};
+
+u32 hinic3_hwif_read_reg(struct hinic3_hwif *hwif, u32 reg);
+void hinic3_hwif_write_reg(struct hinic3_hwif *hwif, u32 reg, u32 val);
+
+void hinic3_toggle_doorbell(struct hinic3_hwif *hwif,
+ enum hinic3_doorbell_ctrl flag);
+
+int hinic3_alloc_db_addr(struct hinic3_hwdev *hwdev, void __iomem **db_base,
+ void __iomem **dwqe_base);
+void hinic3_free_db_addr(struct hinic3_hwdev *hwdev, const u8 __iomem *db_base);
+
+int hinic3_init_hwif(struct hinic3_hwdev *hwdev);
+void hinic3_free_hwif(struct hinic3_hwdev *hwdev);
+
+void hinic3_set_msix_state(struct hinic3_hwdev *hwdev, u16 msix_idx,
+ enum hinic3_msix_state flag);
+void hinic3_msix_intr_clear_resend_bit(struct hinic3_hwdev *hwdev, u16 msix_idx,
+ u8 clear_resend_en);
+void hinic3_set_msix_auto_mask_state(struct hinic3_hwdev *hwdev, u16 msix_idx,
+ enum hinic3_msix_auto_mask flag);
+
+u16 hinic3_global_func_id(struct hinic3_hwdev *hwdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
new file mode 100644
index 000000000000..a69b361225e9
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/netdevice.h>
+
+#include "hinic3_hw_comm.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_rx.h"
+#include "hinic3_tx.h"
+
+static int hinic3_poll(struct napi_struct *napi, int budget)
+{
+ struct hinic3_irq_cfg *irq_cfg =
+ container_of(napi, struct hinic3_irq_cfg, napi);
+ struct hinic3_nic_dev *nic_dev;
+ bool busy = false;
+ int work_done;
+
+ nic_dev = netdev_priv(irq_cfg->netdev);
+
+ busy |= hinic3_tx_poll(irq_cfg->txq, budget);
+
+ if (unlikely(!budget))
+ return 0;
+
+ work_done = hinic3_rx_poll(irq_cfg->rxq, budget);
+ busy |= work_done >= budget;
+
+ if (busy)
+ return budget;
+
+ if (likely(napi_complete_done(napi, work_done)))
+ hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
+ HINIC3_MSIX_ENABLE);
+
+ return work_done;
+}
+
+static void qp_add_napi(struct hinic3_irq_cfg *irq_cfg)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev);
+
+ netif_napi_add(nic_dev->netdev, &irq_cfg->napi, hinic3_poll);
+ netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
+ NETDEV_QUEUE_TYPE_RX, &irq_cfg->napi);
+ netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
+ NETDEV_QUEUE_TYPE_TX, &irq_cfg->napi);
+ napi_enable(&irq_cfg->napi);
+}
+
+static void qp_del_napi(struct hinic3_irq_cfg *irq_cfg)
+{
+ napi_disable(&irq_cfg->napi);
+ netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+ netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+ netif_stop_subqueue(irq_cfg->netdev, irq_cfg->irq_id);
+ netif_napi_del(&irq_cfg->napi);
+}
+
+static irqreturn_t qp_irq(int irq, void *data)
+{
+ struct hinic3_irq_cfg *irq_cfg = data;
+ struct hinic3_nic_dev *nic_dev;
+
+ nic_dev = netdev_priv(irq_cfg->netdev);
+ hinic3_msix_intr_clear_resend_bit(nic_dev->hwdev,
+ irq_cfg->msix_entry_idx, 1);
+
+ napi_schedule(&irq_cfg->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int hinic3_request_irq(struct hinic3_irq_cfg *irq_cfg, u16 q_id)
+{
+ struct hinic3_interrupt_info info = {};
+ struct hinic3_nic_dev *nic_dev;
+ struct net_device *netdev;
+ int err;
+
+ netdev = irq_cfg->netdev;
+ nic_dev = netdev_priv(netdev);
+ qp_add_napi(irq_cfg);
+
+ info.msix_index = irq_cfg->msix_entry_idx;
+ info.interrupt_coalesc_set = 1;
+ info.pending_limit = nic_dev->intr_coalesce[q_id].pending_limit;
+ info.coalesc_timer_cfg =
+ nic_dev->intr_coalesce[q_id].coalesce_timer_cfg;
+ info.resend_timer_cfg = nic_dev->intr_coalesce[q_id].resend_timer_cfg;
+ err = hinic3_set_interrupt_cfg_direct(nic_dev->hwdev, &info);
+ if (err) {
+ netdev_err(netdev, "Failed to set RX interrupt coalescing attribute.\n");
+ qp_del_napi(irq_cfg);
+ return err;
+ }
+
+ err = request_irq(irq_cfg->irq_id, qp_irq, 0, irq_cfg->irq_name,
+ irq_cfg);
+ if (err) {
+ qp_del_napi(irq_cfg);
+ return err;
+ }
+
+ irq_set_affinity_hint(irq_cfg->irq_id, &irq_cfg->affinity_mask);
+
+ return 0;
+}
+
+static void hinic3_release_irq(struct hinic3_irq_cfg *irq_cfg)
+{
+ irq_set_affinity_hint(irq_cfg->irq_id, NULL);
+ free_irq(irq_cfg->irq_id, irq_cfg);
+}
+
+int hinic3_qps_irq_init(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct pci_dev *pdev = nic_dev->pdev;
+ struct hinic3_irq_cfg *irq_cfg;
+ struct msix_entry *msix_entry;
+ u32 local_cpu;
+ u16 q_id;
+ int err;
+
+ for (q_id = 0; q_id < nic_dev->q_params.num_qps; q_id++) {
+ msix_entry = &nic_dev->qps_msix_entries[q_id];
+ irq_cfg = &nic_dev->q_params.irq_cfg[q_id];
+
+ irq_cfg->irq_id = msix_entry->vector;
+ irq_cfg->msix_entry_idx = msix_entry->entry;
+ irq_cfg->netdev = netdev;
+ irq_cfg->txq = &nic_dev->txqs[q_id];
+ irq_cfg->rxq = &nic_dev->rxqs[q_id];
+ nic_dev->rxqs[q_id].irq_cfg = irq_cfg;
+
+ local_cpu = cpumask_local_spread(q_id, dev_to_node(&pdev->dev));
+ cpumask_set_cpu(local_cpu, &irq_cfg->affinity_mask);
+
+ snprintf(irq_cfg->irq_name, sizeof(irq_cfg->irq_name),
+ "%s_qp%u", netdev->name, q_id);
+
+ err = hinic3_request_irq(irq_cfg, q_id);
+ if (err) {
+ netdev_err(netdev, "Failed to request Rx irq\n");
+ goto err_release_irqs;
+ }
+
+ hinic3_set_msix_auto_mask_state(nic_dev->hwdev,
+ irq_cfg->msix_entry_idx,
+ HINIC3_SET_MSIX_AUTO_MASK);
+ hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
+ HINIC3_MSIX_ENABLE);
+ }
+
+ return 0;
+
+err_release_irqs:
+ while (q_id > 0) {
+ q_id--;
+ irq_cfg = &nic_dev->q_params.irq_cfg[q_id];
+ qp_del_napi(irq_cfg);
+ hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
+ HINIC3_MSIX_DISABLE);
+ hinic3_set_msix_auto_mask_state(nic_dev->hwdev,
+ irq_cfg->msix_entry_idx,
+ HINIC3_CLR_MSIX_AUTO_MASK);
+ hinic3_release_irq(irq_cfg);
+ }
+
+ return err;
+}
+
+void hinic3_qps_irq_uninit(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_irq_cfg *irq_cfg;
+ u16 q_id;
+
+ for (q_id = 0; q_id < nic_dev->q_params.num_qps; q_id++) {
+ irq_cfg = &nic_dev->q_params.irq_cfg[q_id];
+ qp_del_napi(irq_cfg);
+ hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
+ HINIC3_MSIX_DISABLE);
+ hinic3_set_msix_auto_mask_state(nic_dev->hwdev,
+ irq_cfg->msix_entry_idx,
+ HINIC3_CLR_MSIX_AUTO_MASK);
+ hinic3_release_irq(irq_cfg);
+ }
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c
new file mode 100644
index 000000000000..3db8241a3b0c
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+
+#include "hinic3_hw_cfg.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_lld.h"
+#include "hinic3_mgmt.h"
+#include "hinic3_pci_id_tbl.h"
+
+#define HINIC3_VF_PCI_CFG_REG_BAR 0
+#define HINIC3_PCI_INTR_REG_BAR 2
+#define HINIC3_PCI_DB_BAR 4
+
+#define HINIC3_EVENT_POLL_SLEEP_US 1000
+#define HINIC3_EVENT_POLL_TIMEOUT_US 10000000
+
+static struct hinic3_adev_device {
+ const char *name;
+} hinic3_adev_devices[HINIC3_SERVICE_T_MAX] = {
+ [HINIC3_SERVICE_T_NIC] = {
+ .name = "nic",
+ },
+};
+
+static bool hinic3_adev_svc_supported(struct hinic3_hwdev *hwdev,
+ enum hinic3_service_type svc_type)
+{
+ switch (svc_type) {
+ case HINIC3_SERVICE_T_NIC:
+ return hinic3_support_nic(hwdev);
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static void hinic3_comm_adev_release(struct device *dev)
+{
+ struct hinic3_adev *hadev = container_of(dev, struct hinic3_adev,
+ adev.dev);
+
+ kfree(hadev);
+}
+
+static struct hinic3_adev *hinic3_add_one_adev(struct hinic3_hwdev *hwdev,
+ enum hinic3_service_type svc_type)
+{
+ struct hinic3_adev *hadev;
+ const char *svc_name;
+ int ret;
+
+ hadev = kzalloc(sizeof(*hadev), GFP_KERNEL);
+ if (!hadev)
+ return NULL;
+
+ svc_name = hinic3_adev_devices[svc_type].name;
+ hadev->adev.name = svc_name;
+ hadev->adev.id = hwdev->dev_id;
+ hadev->adev.dev.parent = hwdev->dev;
+ hadev->adev.dev.release = hinic3_comm_adev_release;
+ hadev->svc_type = svc_type;
+ hadev->hwdev = hwdev;
+
+ ret = auxiliary_device_init(&hadev->adev);
+ if (ret) {
+ dev_err(hwdev->dev, "failed init adev %s %u\n",
+ svc_name, hwdev->dev_id);
+ kfree(hadev);
+ return NULL;
+ }
+
+ ret = auxiliary_device_add(&hadev->adev);
+ if (ret) {
+ dev_err(hwdev->dev, "failed to add adev %s %u\n",
+ svc_name, hwdev->dev_id);
+ auxiliary_device_uninit(&hadev->adev);
+ return NULL;
+ }
+
+ return hadev;
+}
+
+static void hinic3_del_one_adev(struct hinic3_hwdev *hwdev,
+ enum hinic3_service_type svc_type)
+{
+ struct hinic3_pcidev *pci_adapter = hwdev->adapter;
+ struct hinic3_adev *hadev;
+ int timeout;
+ bool state;
+
+ timeout = read_poll_timeout(test_and_set_bit, state, !state,
+ HINIC3_EVENT_POLL_SLEEP_US,
+ HINIC3_EVENT_POLL_TIMEOUT_US,
+ false, svc_type, &pci_adapter->state);
+
+ hadev = pci_adapter->hadev[svc_type];
+ auxiliary_device_delete(&hadev->adev);
+ auxiliary_device_uninit(&hadev->adev);
+ pci_adapter->hadev[svc_type] = NULL;
+ if (!timeout)
+ clear_bit(svc_type, &pci_adapter->state);
+}
+
+static int hinic3_attach_aux_devices(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_pcidev *pci_adapter = hwdev->adapter;
+ enum hinic3_service_type svc_type;
+
+ mutex_lock(&pci_adapter->pdev_mutex);
+
+ for (svc_type = 0; svc_type < HINIC3_SERVICE_T_MAX; svc_type++) {
+ if (!hinic3_adev_svc_supported(hwdev, svc_type))
+ continue;
+
+ pci_adapter->hadev[svc_type] = hinic3_add_one_adev(hwdev,
+ svc_type);
+ if (!pci_adapter->hadev[svc_type])
+ goto err_del_adevs;
+ }
+ mutex_unlock(&pci_adapter->pdev_mutex);
+
+ return 0;
+
+err_del_adevs:
+ while (svc_type > 0) {
+ svc_type--;
+ if (pci_adapter->hadev[svc_type]) {
+ hinic3_del_one_adev(hwdev, svc_type);
+ pci_adapter->hadev[svc_type] = NULL;
+ }
+ }
+ mutex_unlock(&pci_adapter->pdev_mutex);
+
+ return -ENOMEM;
+}
+
+static void hinic3_detach_aux_devices(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_pcidev *pci_adapter = hwdev->adapter;
+ int i;
+
+ mutex_lock(&pci_adapter->pdev_mutex);
+ for (i = 0; i < ARRAY_SIZE(hinic3_adev_devices); i++) {
+ if (pci_adapter->hadev[i])
+ hinic3_del_one_adev(hwdev, i);
+ }
+ mutex_unlock(&pci_adapter->pdev_mutex);
+}
+
+struct hinic3_hwdev *hinic3_adev_get_hwdev(struct auxiliary_device *adev)
+{
+ struct hinic3_adev *hadev;
+
+ hadev = container_of(adev, struct hinic3_adev, adev);
+
+ return hadev->hwdev;
+}
+
+void hinic3_adev_event_register(struct auxiliary_device *adev,
+ void (*event_handler)(struct auxiliary_device *adev,
+ struct hinic3_event_info *event))
+{
+ struct hinic3_adev *hadev;
+
+ hadev = container_of(adev, struct hinic3_adev, adev);
+ hadev->event = event_handler;
+}
+
+void hinic3_adev_event_unregister(struct auxiliary_device *adev)
+{
+ struct hinic3_adev *hadev;
+
+ hadev = container_of(adev, struct hinic3_adev, adev);
+ hadev->event = NULL;
+}
+
+static int hinic3_mapping_bar(struct pci_dev *pdev,
+ struct hinic3_pcidev *pci_adapter)
+{
+ pci_adapter->cfg_reg_base = pci_ioremap_bar(pdev,
+ HINIC3_VF_PCI_CFG_REG_BAR);
+ if (!pci_adapter->cfg_reg_base) {
+ dev_err(&pdev->dev, "Failed to map configuration regs\n");
+ return -ENOMEM;
+ }
+
+ pci_adapter->intr_reg_base = pci_ioremap_bar(pdev,
+ HINIC3_PCI_INTR_REG_BAR);
+ if (!pci_adapter->intr_reg_base) {
+ dev_err(&pdev->dev, "Failed to map interrupt regs\n");
+ goto err_unmap_cfg_reg_base;
+ }
+
+ pci_adapter->db_base_phy = pci_resource_start(pdev, HINIC3_PCI_DB_BAR);
+ pci_adapter->db_dwqe_len = pci_resource_len(pdev, HINIC3_PCI_DB_BAR);
+ pci_adapter->db_base = pci_ioremap_bar(pdev, HINIC3_PCI_DB_BAR);
+ if (!pci_adapter->db_base) {
+ dev_err(&pdev->dev, "Failed to map doorbell regs\n");
+ goto err_unmap_intr_reg_base;
+ }
+
+ return 0;
+
+err_unmap_intr_reg_base:
+ iounmap(pci_adapter->intr_reg_base);
+
+err_unmap_cfg_reg_base:
+ iounmap(pci_adapter->cfg_reg_base);
+
+ return -ENOMEM;
+}
+
+static void hinic3_unmapping_bar(struct hinic3_pcidev *pci_adapter)
+{
+ iounmap(pci_adapter->db_base);
+ iounmap(pci_adapter->intr_reg_base);
+ iounmap(pci_adapter->cfg_reg_base);
+}
+
+static int hinic3_pci_init(struct pci_dev *pdev)
+{
+ struct hinic3_pcidev *pci_adapter;
+ int err;
+
+ pci_adapter = kzalloc(sizeof(*pci_adapter), GFP_KERNEL);
+ if (!pci_adapter)
+ return -ENOMEM;
+
+ pci_adapter->pdev = pdev;
+ mutex_init(&pci_adapter->pdev_mutex);
+
+ pci_set_drvdata(pdev, pci_adapter);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ goto err_free_pci_adapter;
+ }
+
+ err = pci_request_regions(pdev, HINIC3_NIC_DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to request regions\n");
+ goto err_disable_device;
+ }
+
+ pci_set_master(pdev);
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set DMA mask\n");
+ goto err_release_regions;
+ }
+
+ return 0;
+
+err_release_regions:
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+
+err_disable_device:
+ pci_disable_device(pdev);
+
+err_free_pci_adapter:
+ pci_set_drvdata(pdev, NULL);
+ mutex_destroy(&pci_adapter->pdev_mutex);
+ kfree(pci_adapter);
+
+ return err;
+}
+
+static void hinic3_pci_uninit(struct pci_dev *pdev)
+{
+ struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ mutex_destroy(&pci_adapter->pdev_mutex);
+ kfree(pci_adapter);
+}
+
+static int hinic3_func_init(struct pci_dev *pdev,
+ struct hinic3_pcidev *pci_adapter)
+{
+ int err;
+
+ err = hinic3_init_hwdev(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize hardware device\n");
+ return err;
+ }
+
+ err = hinic3_attach_aux_devices(pci_adapter->hwdev);
+ if (err)
+ goto err_free_hwdev;
+
+ return 0;
+
+err_free_hwdev:
+ hinic3_free_hwdev(pci_adapter->hwdev);
+
+ return err;
+}
+
+static void hinic3_func_uninit(struct pci_dev *pdev)
+{
+ struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ hinic3_flush_mgmt_workq(pci_adapter->hwdev);
+ hinic3_detach_aux_devices(pci_adapter->hwdev);
+ hinic3_free_hwdev(pci_adapter->hwdev);
+}
+
+static int hinic3_probe_func(struct hinic3_pcidev *pci_adapter)
+{
+ struct pci_dev *pdev = pci_adapter->pdev;
+ int err;
+
+ err = hinic3_mapping_bar(pdev, pci_adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to map bar\n");
+ goto err_out;
+ }
+
+ err = hinic3_func_init(pdev, pci_adapter);
+ if (err)
+ goto err_unmap_bar;
+
+ return 0;
+
+err_unmap_bar:
+ hinic3_unmapping_bar(pci_adapter);
+
+err_out:
+ dev_err(&pdev->dev, "PCIe device probe function failed\n");
+
+ return err;
+}
+
+static void hinic3_remove_func(struct hinic3_pcidev *pci_adapter)
+{
+ struct pci_dev *pdev = pci_adapter->pdev;
+
+ hinic3_func_uninit(pdev);
+ hinic3_unmapping_bar(pci_adapter);
+}
+
+static int hinic3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct hinic3_pcidev *pci_adapter;
+ int err;
+
+ err = hinic3_pci_init(pdev);
+ if (err)
+ goto err_out;
+
+ pci_adapter = pci_get_drvdata(pdev);
+ err = hinic3_probe_func(pci_adapter);
+ if (err)
+ goto err_uninit_pci;
+
+ return 0;
+
+err_uninit_pci:
+ hinic3_pci_uninit(pdev);
+
+err_out:
+ dev_err(&pdev->dev, "PCIe device probe failed\n");
+
+ return err;
+}
+
+static void hinic3_remove(struct pci_dev *pdev)
+{
+ struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ hinic3_remove_func(pci_adapter);
+ hinic3_pci_uninit(pdev);
+}
+
+static const struct pci_device_id hinic3_pci_table[] = {
+ {PCI_VDEVICE(HUAWEI, PCI_DEV_ID_HINIC3_VF), 0},
+ {0, 0}
+
+};
+
+MODULE_DEVICE_TABLE(pci, hinic3_pci_table);
+
+static void hinic3_shutdown(struct pci_dev *pdev)
+{
+ struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ pci_disable_device(pdev);
+
+ if (pci_adapter)
+ hinic3_set_api_stop(pci_adapter->hwdev);
+}
+
+static struct pci_driver hinic3_driver = {
+ .name = HINIC3_NIC_DRV_NAME,
+ .id_table = hinic3_pci_table,
+ .probe = hinic3_probe,
+ .remove = hinic3_remove,
+ .shutdown = hinic3_shutdown,
+ .sriov_configure = pci_sriov_configure_simple
+};
+
+int hinic3_lld_init(void)
+{
+ return pci_register_driver(&hinic3_driver);
+}
+
+void hinic3_lld_exit(void)
+{
+ pci_unregister_driver(&hinic3_driver);
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_lld.h b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.h
new file mode 100644
index 000000000000..322b44803476
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_LLD_H_
+#define _HINIC3_LLD_H_
+
+#include <linux/auxiliary_bus.h>
+
+struct hinic3_event_info;
+
+#define HINIC3_NIC_DRV_NAME "hinic3"
+
+int hinic3_lld_init(void);
+void hinic3_lld_exit(void);
+void hinic3_adev_event_register(struct auxiliary_device *adev,
+ void (*event_handler)(struct auxiliary_device *adev,
+ struct hinic3_event_info *event));
+void hinic3_adev_event_unregister(struct auxiliary_device *adev);
+struct hinic3_hwdev *hinic3_adev_get_hwdev(struct auxiliary_device *adev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
new file mode 100644
index 000000000000..6d87d4d895ba
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
@@ -0,0 +1,409 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+
+#include "hinic3_common.h"
+#include "hinic3_hw_comm.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_lld.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+#include "hinic3_rss.h"
+#include "hinic3_rx.h"
+#include "hinic3_tx.h"
+
+#define HINIC3_NIC_DRV_DESC "Intelligent Network Interface Card Driver"
+
+#define HINIC3_RX_BUF_LEN 2048
+#define HINIC3_LRO_REPLENISH_THLD 256
+#define HINIC3_NIC_DEV_WQ_NAME "hinic3_nic_dev_wq"
+
+#define HINIC3_SQ_DEPTH 1024
+#define HINIC3_RQ_DEPTH 1024
+
+#define HINIC3_DEFAULT_TXRX_MSIX_PENDING_LIMIT 2
+#define HINIC3_DEFAULT_TXRX_MSIX_COALESC_TIMER_CFG 25
+#define HINIC3_DEFAULT_TXRX_MSIX_RESEND_TIMER_CFG 7
+
+static void init_intr_coal_param(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_intr_coal_info *info;
+ u16 i;
+
+ for (i = 0; i < nic_dev->max_qps; i++) {
+ info = &nic_dev->intr_coalesce[i];
+ info->pending_limit = HINIC3_DEFAULT_TXRX_MSIX_PENDING_LIMIT;
+ info->coalesce_timer_cfg = HINIC3_DEFAULT_TXRX_MSIX_COALESC_TIMER_CFG;
+ info->resend_timer_cfg = HINIC3_DEFAULT_TXRX_MSIX_RESEND_TIMER_CFG;
+ }
+}
+
+static int hinic3_init_intr_coalesce(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ nic_dev->intr_coalesce = kcalloc(nic_dev->max_qps,
+ sizeof(*nic_dev->intr_coalesce),
+ GFP_KERNEL);
+
+ if (!nic_dev->intr_coalesce)
+ return -ENOMEM;
+
+ init_intr_coal_param(netdev);
+
+ return 0;
+}
+
+static void hinic3_free_intr_coalesce(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ kfree(nic_dev->intr_coalesce);
+}
+
+static int hinic3_alloc_txrxqs(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ int err;
+
+ err = hinic3_alloc_txqs(netdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to alloc txqs\n");
+ return err;
+ }
+
+ err = hinic3_alloc_rxqs(netdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to alloc rxqs\n");
+ goto err_free_txqs;
+ }
+
+ err = hinic3_init_intr_coalesce(netdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init_intr_coalesce\n");
+ goto err_free_rxqs;
+ }
+
+ return 0;
+
+err_free_rxqs:
+ hinic3_free_rxqs(netdev);
+
+err_free_txqs:
+ hinic3_free_txqs(netdev);
+
+ return err;
+}
+
+static void hinic3_free_txrxqs(struct net_device *netdev)
+{
+ hinic3_free_intr_coalesce(netdev);
+ hinic3_free_rxqs(netdev);
+ hinic3_free_txqs(netdev);
+}
+
+static int hinic3_init_nic_dev(struct net_device *netdev,
+ struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct pci_dev *pdev = hwdev->pdev;
+
+ nic_dev->netdev = netdev;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ nic_dev->hwdev = hwdev;
+ nic_dev->pdev = pdev;
+
+ nic_dev->rx_buf_len = HINIC3_RX_BUF_LEN;
+ nic_dev->lro_replenish_thld = HINIC3_LRO_REPLENISH_THLD;
+ nic_dev->nic_svc_cap = hwdev->cfg_mgmt->cap.nic_svc_cap;
+
+ return 0;
+}
+
+static int hinic3_sw_init(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ int err;
+
+ nic_dev->q_params.sq_depth = HINIC3_SQ_DEPTH;
+ nic_dev->q_params.rq_depth = HINIC3_RQ_DEPTH;
+
+ hinic3_try_to_enable_rss(netdev);
+
+ /* VF driver always uses random MAC address. During VM migration to a
+ * new device, the new device should learn the VMs old MAC rather than
+ * provide its own MAC. The product design assumes that every VF is
+ * suspectable to migration so the device avoids offering MAC address
+ * to VFs.
+ */
+ eth_hw_addr_random(netdev);
+ err = hinic3_set_mac(hwdev, netdev->dev_addr, 0,
+ hinic3_global_func_id(hwdev));
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set default MAC\n");
+ goto err_clear_rss_config;
+ }
+
+ err = hinic3_alloc_txrxqs(netdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to alloc qps\n");
+ goto err_del_mac;
+ }
+
+ return 0;
+
+err_del_mac:
+ hinic3_del_mac(hwdev, netdev->dev_addr, 0,
+ hinic3_global_func_id(hwdev));
+err_clear_rss_config:
+ hinic3_clear_rss_config(netdev);
+
+ return err;
+}
+
+static void hinic3_sw_uninit(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ hinic3_free_txrxqs(netdev);
+ hinic3_del_mac(nic_dev->hwdev, netdev->dev_addr, 0,
+ hinic3_global_func_id(nic_dev->hwdev));
+ hinic3_clear_rss_config(netdev);
+}
+
+static void hinic3_assign_netdev_ops(struct net_device *netdev)
+{
+ hinic3_set_netdev_ops(netdev);
+}
+
+static void netdev_feature_init(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ netdev_features_t cso_fts = 0;
+ netdev_features_t tso_fts = 0;
+ netdev_features_t dft_fts;
+
+ dft_fts = NETIF_F_SG | NETIF_F_HIGHDMA;
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_CSUM))
+ cso_fts |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_SCTP_CRC))
+ cso_fts |= NETIF_F_SCTP_CRC;
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_TSO))
+ tso_fts |= NETIF_F_TSO | NETIF_F_TSO6;
+
+ netdev->features |= dft_fts | cso_fts | tso_fts;
+}
+
+static int hinic3_set_default_hw_feature(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ int err;
+
+ err = hinic3_set_nic_feature_to_hw(nic_dev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set nic features\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static void hinic3_link_status_change(struct net_device *netdev,
+ bool link_status_up)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (link_status_up) {
+ if (netif_carrier_ok(netdev))
+ return;
+
+ nic_dev->link_status_up = true;
+ netif_carrier_on(netdev);
+ netdev_dbg(netdev, "Link is up\n");
+ } else {
+ if (!netif_carrier_ok(netdev))
+ return;
+
+ nic_dev->link_status_up = false;
+ netif_carrier_off(netdev);
+ netdev_dbg(netdev, "Link is down\n");
+ }
+}
+
+static void hinic3_nic_event(struct auxiliary_device *adev,
+ struct hinic3_event_info *event)
+{
+ struct hinic3_nic_dev *nic_dev = dev_get_drvdata(&adev->dev);
+ struct net_device *netdev;
+
+ netdev = nic_dev->netdev;
+
+ switch (HINIC3_SRV_EVENT_TYPE(event->service, event->type)) {
+ case HINIC3_SRV_EVENT_TYPE(HINIC3_EVENT_SRV_NIC,
+ HINIC3_NIC_EVENT_LINK_UP):
+ hinic3_link_status_change(netdev, true);
+ break;
+ case HINIC3_SRV_EVENT_TYPE(HINIC3_EVENT_SRV_NIC,
+ HINIC3_NIC_EVENT_LINK_DOWN):
+ hinic3_link_status_change(netdev, false);
+ break;
+ default:
+ break;
+ }
+}
+
+static int hinic3_nic_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct hinic3_hwdev *hwdev = hinic3_adev_get_hwdev(adev);
+ struct pci_dev *pdev = hwdev->pdev;
+ struct hinic3_nic_dev *nic_dev;
+ struct net_device *netdev;
+ u16 max_qps, glb_func_id;
+ int err;
+
+ if (!hinic3_support_nic(hwdev)) {
+ dev_dbg(&adev->dev, "HW doesn't support nic\n");
+ return 0;
+ }
+
+ hinic3_adev_event_register(adev, hinic3_nic_event);
+
+ glb_func_id = hinic3_global_func_id(hwdev);
+ err = hinic3_func_reset(hwdev, glb_func_id, COMM_FUNC_RESET_BIT_NIC);
+ if (err) {
+ dev_err(&adev->dev, "Failed to reset function\n");
+ goto err_unregister_adev_event;
+ }
+
+ max_qps = hinic3_func_max_qnum(hwdev);
+ netdev = alloc_etherdev_mq(sizeof(*nic_dev), max_qps);
+ if (!netdev) {
+ dev_err(&adev->dev, "Failed to allocate netdev\n");
+ err = -ENOMEM;
+ goto err_unregister_adev_event;
+ }
+
+ nic_dev = netdev_priv(netdev);
+ dev_set_drvdata(&adev->dev, nic_dev);
+ err = hinic3_init_nic_dev(netdev, hwdev);
+ if (err)
+ goto err_free_netdev;
+
+ err = hinic3_init_nic_io(nic_dev);
+ if (err)
+ goto err_free_netdev;
+
+ err = hinic3_sw_init(netdev);
+ if (err)
+ goto err_free_nic_io;
+
+ hinic3_assign_netdev_ops(netdev);
+
+ netdev_feature_init(netdev);
+ err = hinic3_set_default_hw_feature(netdev);
+ if (err)
+ goto err_uninit_sw;
+
+ netif_carrier_off(netdev);
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_uninit_nic_feature;
+
+ return 0;
+
+err_uninit_nic_feature:
+ hinic3_update_nic_feature(nic_dev, 0);
+ hinic3_set_nic_feature_to_hw(nic_dev);
+
+err_uninit_sw:
+ hinic3_sw_uninit(netdev);
+
+err_free_nic_io:
+ hinic3_free_nic_io(nic_dev);
+
+err_free_netdev:
+ free_netdev(netdev);
+
+err_unregister_adev_event:
+ hinic3_adev_event_unregister(adev);
+ dev_err(&pdev->dev, "NIC service probe failed\n");
+
+ return err;
+}
+
+static void hinic3_nic_remove(struct auxiliary_device *adev)
+{
+ struct hinic3_nic_dev *nic_dev = dev_get_drvdata(&adev->dev);
+ struct net_device *netdev;
+
+ if (!hinic3_support_nic(nic_dev->hwdev))
+ return;
+
+ netdev = nic_dev->netdev;
+ unregister_netdev(netdev);
+
+ hinic3_update_nic_feature(nic_dev, 0);
+ hinic3_set_nic_feature_to_hw(nic_dev);
+ hinic3_sw_uninit(netdev);
+
+ hinic3_free_nic_io(nic_dev);
+
+ free_netdev(netdev);
+}
+
+static const struct auxiliary_device_id hinic3_nic_id_table[] = {
+ {
+ .name = HINIC3_NIC_DRV_NAME ".nic",
+ },
+ {}
+};
+
+static struct auxiliary_driver hinic3_nic_driver = {
+ .probe = hinic3_nic_probe,
+ .remove = hinic3_nic_remove,
+ .suspend = NULL,
+ .resume = NULL,
+ .name = "nic",
+ .id_table = hinic3_nic_id_table,
+};
+
+static __init int hinic3_nic_lld_init(void)
+{
+ int err;
+
+ err = hinic3_lld_init();
+ if (err)
+ return err;
+
+ err = auxiliary_driver_register(&hinic3_nic_driver);
+ if (err) {
+ hinic3_lld_exit();
+ return err;
+ }
+
+ return 0;
+}
+
+static __exit void hinic3_nic_lld_exit(void)
+{
+ auxiliary_driver_unregister(&hinic3_nic_driver);
+
+ hinic3_lld_exit();
+}
+
+module_init(hinic3_nic_lld_init);
+module_exit(hinic3_nic_lld_exit);
+
+MODULE_AUTHOR("Huawei Technologies CO., Ltd");
+MODULE_DESCRIPTION(HINIC3_NIC_DRV_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c
new file mode 100644
index 000000000000..cf67e26acece
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c
@@ -0,0 +1,860 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/dma-mapping.h>
+
+#include "hinic3_common.h"
+#include "hinic3_csr.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+
+#define MBOX_INT_DST_AEQN_MASK GENMASK(11, 10)
+#define MBOX_INT_SRC_RESP_AEQN_MASK GENMASK(13, 12)
+#define MBOX_INT_STAT_DMA_MASK GENMASK(19, 14)
+/* TX size, expressed in 4 bytes units */
+#define MBOX_INT_TX_SIZE_MASK GENMASK(24, 20)
+/* SO_RO == strong order, relaxed order */
+#define MBOX_INT_STAT_DMA_SO_RO_MASK GENMASK(26, 25)
+#define MBOX_INT_WB_EN_MASK BIT(28)
+#define MBOX_INT_SET(val, field) \
+ FIELD_PREP(MBOX_INT_##field##_MASK, val)
+
+#define MBOX_CTRL_TRIGGER_AEQE_MASK BIT(0)
+#define MBOX_CTRL_TX_STATUS_MASK BIT(1)
+#define MBOX_CTRL_DST_FUNC_MASK GENMASK(28, 16)
+#define MBOX_CTRL_SET(val, field) \
+ FIELD_PREP(MBOX_CTRL_##field##_MASK, val)
+
+#define MBOX_MSG_POLLING_TIMEOUT_MS 8000 // send msg seg timeout
+#define MBOX_COMP_POLLING_TIMEOUT_MS 40000 // response
+
+#define MBOX_MAX_BUF_SZ 2048
+#define MBOX_HEADER_SZ 8
+
+/* MBOX size is 64B, 8B for mbox_header, 8B reserved */
+#define MBOX_SEG_LEN 48
+#define MBOX_SEG_LEN_ALIGN 4
+#define MBOX_WB_STATUS_LEN 16
+
+#define MBOX_SEQ_ID_START_VAL 0
+#define MBOX_SEQ_ID_MAX_VAL 42
+#define MBOX_LAST_SEG_MAX_LEN \
+ (MBOX_MAX_BUF_SZ - MBOX_SEQ_ID_MAX_VAL * MBOX_SEG_LEN)
+
+/* mbox write back status is 16B, only first 4B is used */
+#define MBOX_WB_STATUS_ERRCODE_MASK 0xFFFF
+#define MBOX_WB_STATUS_MASK 0xFF
+#define MBOX_WB_ERROR_CODE_MASK 0xFF00
+#define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF
+#define MBOX_WB_STATUS_NOT_FINISHED 0x00
+
+#define MBOX_STATUS_FINISHED(wb) \
+ ((FIELD_PREP(MBOX_WB_STATUS_MASK, (wb))) != MBOX_WB_STATUS_NOT_FINISHED)
+#define MBOX_STATUS_SUCCESS(wb) \
+ ((FIELD_PREP(MBOX_WB_STATUS_MASK, (wb))) == \
+ MBOX_WB_STATUS_FINISHED_SUCCESS)
+#define MBOX_STATUS_ERRCODE(wb) \
+ ((wb) & MBOX_WB_ERROR_CODE_MASK)
+
+#define MBOX_DMA_MSG_QUEUE_DEPTH 32
+#define MBOX_AREA(hwif) \
+ ((hwif)->cfg_regs_base + HINIC3_FUNC_CSR_MAILBOX_DATA_OFF)
+
+#define MBOX_MQ_CI_OFFSET \
+ (HINIC3_CFG_REGS_FLAG + HINIC3_FUNC_CSR_MAILBOX_DATA_OFF + \
+ MBOX_HEADER_SZ + MBOX_SEG_LEN)
+
+#define MBOX_MQ_SYNC_CI_MASK GENMASK(7, 0)
+#define MBOX_MQ_ASYNC_CI_MASK GENMASK(15, 8)
+#define MBOX_MQ_CI_GET(val, field) \
+ FIELD_GET(MBOX_MQ_##field##_CI_MASK, val)
+
+#define MBOX_MGMT_FUNC_ID 0x1FFF
+#define MBOX_COMM_F_MBOX_SEGMENT BIT(3)
+
+static u8 *get_mobx_body_from_hdr(u8 *header)
+{
+ return header + MBOX_HEADER_SZ;
+}
+
+static struct hinic3_msg_desc *get_mbox_msg_desc(struct hinic3_mbox *mbox,
+ enum mbox_msg_direction_type dir,
+ u16 src_func_id)
+{
+ struct hinic3_msg_channel *msg_ch;
+
+ msg_ch = (src_func_id == MBOX_MGMT_FUNC_ID) ?
+ &mbox->mgmt_msg : mbox->func_msg;
+
+ return (dir == MBOX_MSG_SEND) ?
+ &msg_ch->recv_msg : &msg_ch->resp_msg;
+}
+
+static void resp_mbox_handler(struct hinic3_mbox *mbox,
+ const struct hinic3_msg_desc *msg_desc)
+{
+ spin_lock(&mbox->mbox_lock);
+ if (msg_desc->msg_info.msg_id == mbox->send_msg_id &&
+ mbox->event_flag == MBOX_EVENT_START)
+ mbox->event_flag = MBOX_EVENT_SUCCESS;
+ spin_unlock(&mbox->mbox_lock);
+}
+
+static bool mbox_segment_valid(struct hinic3_mbox *mbox,
+ struct hinic3_msg_desc *msg_desc,
+ __le64 mbox_header)
+{
+ u8 seq_id, seg_len, msg_id, mod;
+ __le16 src_func_idx, cmd;
+
+ seq_id = MBOX_MSG_HEADER_GET(mbox_header, SEQID);
+ seg_len = MBOX_MSG_HEADER_GET(mbox_header, SEG_LEN);
+ msg_id = MBOX_MSG_HEADER_GET(mbox_header, MSG_ID);
+ mod = MBOX_MSG_HEADER_GET(mbox_header, MODULE);
+ cmd = cpu_to_le16(MBOX_MSG_HEADER_GET(mbox_header, CMD));
+ src_func_idx = cpu_to_le16(MBOX_MSG_HEADER_GET(mbox_header,
+ SRC_GLB_FUNC_IDX));
+
+ if (seq_id > MBOX_SEQ_ID_MAX_VAL || seg_len > MBOX_SEG_LEN ||
+ (seq_id == MBOX_SEQ_ID_MAX_VAL && seg_len > MBOX_LAST_SEG_MAX_LEN))
+ goto err_seg;
+
+ if (seq_id == 0) {
+ msg_desc->seq_id = seq_id;
+ msg_desc->msg_info.msg_id = msg_id;
+ msg_desc->mod = mod;
+ msg_desc->cmd = cmd;
+ } else {
+ if (seq_id != msg_desc->seq_id + 1 ||
+ msg_id != msg_desc->msg_info.msg_id ||
+ mod != msg_desc->mod || cmd != msg_desc->cmd)
+ goto err_seg;
+
+ msg_desc->seq_id = seq_id;
+ }
+
+ return true;
+
+err_seg:
+ dev_err(mbox->hwdev->dev,
+ "Mailbox segment check failed, src func id: 0x%x, front seg info: seq id: 0x%x, msg id: 0x%x, mod: 0x%x, cmd: 0x%x\n",
+ src_func_idx, msg_desc->seq_id, msg_desc->msg_info.msg_id,
+ msg_desc->mod, msg_desc->cmd);
+ dev_err(mbox->hwdev->dev,
+ "Current seg info: seg len: 0x%x, seq id: 0x%x, msg id: 0x%x, mod: 0x%x, cmd: 0x%x\n",
+ seg_len, seq_id, msg_id, mod, cmd);
+
+ return false;
+}
+
+static void recv_mbox_handler(struct hinic3_mbox *mbox,
+ u8 *header, struct hinic3_msg_desc *msg_desc)
+{
+ __le64 mbox_header = *((__force __le64 *)header);
+ u8 *mbox_body = get_mobx_body_from_hdr(header);
+ u8 seq_id, seg_len;
+ int pos;
+
+ if (!mbox_segment_valid(mbox, msg_desc, mbox_header)) {
+ msg_desc->seq_id = MBOX_SEQ_ID_MAX_VAL;
+ return;
+ }
+
+ seq_id = MBOX_MSG_HEADER_GET(mbox_header, SEQID);
+ seg_len = MBOX_MSG_HEADER_GET(mbox_header, SEG_LEN);
+
+ pos = seq_id * MBOX_SEG_LEN;
+ memcpy(msg_desc->msg + pos, mbox_body, seg_len);
+
+ if (!MBOX_MSG_HEADER_GET(mbox_header, LAST))
+ return;
+
+ msg_desc->msg_len = cpu_to_le16(MBOX_MSG_HEADER_GET(mbox_header,
+ MSG_LEN));
+ msg_desc->msg_info.status = MBOX_MSG_HEADER_GET(mbox_header, STATUS);
+
+ if (MBOX_MSG_HEADER_GET(mbox_header, DIRECTION) == MBOX_MSG_RESP)
+ resp_mbox_handler(mbox, msg_desc);
+}
+
+void hinic3_mbox_func_aeqe_handler(struct hinic3_hwdev *hwdev, u8 *header,
+ u8 size)
+{
+ __le64 mbox_header = *((__force __le64 *)header);
+ enum mbox_msg_direction_type dir;
+ struct hinic3_msg_desc *msg_desc;
+ struct hinic3_mbox *mbox;
+ u16 src_func_id;
+
+ mbox = hwdev->mbox;
+ dir = MBOX_MSG_HEADER_GET(mbox_header, DIRECTION);
+ src_func_id = MBOX_MSG_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
+ msg_desc = get_mbox_msg_desc(mbox, dir, src_func_id);
+ recv_mbox_handler(mbox, header, msg_desc);
+}
+
+static int init_mbox_dma_queue(struct hinic3_hwdev *hwdev,
+ struct mbox_dma_queue *mq)
+{
+ u32 size;
+
+ mq->depth = MBOX_DMA_MSG_QUEUE_DEPTH;
+ mq->prod_idx = 0;
+ mq->cons_idx = 0;
+
+ size = mq->depth * MBOX_MAX_BUF_SZ;
+ mq->dma_buf_vaddr = dma_alloc_coherent(hwdev->dev, size,
+ &mq->dma_buf_paddr,
+ GFP_KERNEL);
+ if (!mq->dma_buf_vaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void uninit_mbox_dma_queue(struct hinic3_hwdev *hwdev,
+ struct mbox_dma_queue *mq)
+{
+ dma_free_coherent(hwdev->dev, mq->depth * MBOX_MAX_BUF_SZ,
+ mq->dma_buf_vaddr, mq->dma_buf_paddr);
+}
+
+static int hinic3_init_mbox_dma_queue(struct hinic3_mbox *mbox)
+{
+ u32 val;
+ int err;
+
+ err = init_mbox_dma_queue(mbox->hwdev, &mbox->sync_msg_queue);
+ if (err)
+ return err;
+
+ err = init_mbox_dma_queue(mbox->hwdev, &mbox->async_msg_queue);
+ if (err) {
+ uninit_mbox_dma_queue(mbox->hwdev, &mbox->sync_msg_queue);
+ return err;
+ }
+
+ val = hinic3_hwif_read_reg(mbox->hwdev->hwif, MBOX_MQ_CI_OFFSET);
+ val &= ~MBOX_MQ_SYNC_CI_MASK;
+ val &= ~MBOX_MQ_ASYNC_CI_MASK;
+ hinic3_hwif_write_reg(mbox->hwdev->hwif, MBOX_MQ_CI_OFFSET, val);
+
+ return 0;
+}
+
+static void hinic3_uninit_mbox_dma_queue(struct hinic3_mbox *mbox)
+{
+ uninit_mbox_dma_queue(mbox->hwdev, &mbox->sync_msg_queue);
+ uninit_mbox_dma_queue(mbox->hwdev, &mbox->async_msg_queue);
+}
+
+static int alloc_mbox_msg_channel(struct hinic3_msg_channel *msg_ch)
+{
+ msg_ch->resp_msg.msg = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
+ if (!msg_ch->resp_msg.msg)
+ return -ENOMEM;
+
+ msg_ch->recv_msg.msg = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
+ if (!msg_ch->recv_msg.msg) {
+ kfree(msg_ch->resp_msg.msg);
+ return -ENOMEM;
+ }
+
+ msg_ch->resp_msg.seq_id = MBOX_SEQ_ID_MAX_VAL;
+ msg_ch->recv_msg.seq_id = MBOX_SEQ_ID_MAX_VAL;
+
+ return 0;
+}
+
+static void free_mbox_msg_channel(struct hinic3_msg_channel *msg_ch)
+{
+ kfree(msg_ch->recv_msg.msg);
+ kfree(msg_ch->resp_msg.msg);
+}
+
+static int init_mgmt_msg_channel(struct hinic3_mbox *mbox)
+{
+ int err;
+
+ err = alloc_mbox_msg_channel(&mbox->mgmt_msg);
+ if (err) {
+ dev_err(mbox->hwdev->dev, "Failed to alloc mgmt message channel\n");
+ return err;
+ }
+
+ err = hinic3_init_mbox_dma_queue(mbox);
+ if (err) {
+ dev_err(mbox->hwdev->dev, "Failed to init mbox dma queue\n");
+ free_mbox_msg_channel(&mbox->mgmt_msg);
+ return err;
+ }
+
+ return 0;
+}
+
+static void uninit_mgmt_msg_channel(struct hinic3_mbox *mbox)
+{
+ hinic3_uninit_mbox_dma_queue(mbox);
+ free_mbox_msg_channel(&mbox->mgmt_msg);
+}
+
+static int hinic3_init_func_mbox_msg_channel(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_mbox *mbox;
+ int err;
+
+ mbox = hwdev->mbox;
+ mbox->func_msg = kzalloc(sizeof(*mbox->func_msg), GFP_KERNEL);
+ if (!mbox->func_msg)
+ return -ENOMEM;
+
+ err = alloc_mbox_msg_channel(mbox->func_msg);
+ if (err)
+ goto err_free_func_msg;
+
+ return 0;
+
+err_free_func_msg:
+ kfree(mbox->func_msg);
+ mbox->func_msg = NULL;
+
+ return err;
+}
+
+static void hinic3_uninit_func_mbox_msg_channel(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_mbox *mbox = hwdev->mbox;
+
+ free_mbox_msg_channel(mbox->func_msg);
+ kfree(mbox->func_msg);
+ mbox->func_msg = NULL;
+}
+
+static void prepare_send_mbox(struct hinic3_mbox *mbox)
+{
+ struct hinic3_send_mbox *send_mbox = &mbox->send_mbox;
+
+ send_mbox->data = MBOX_AREA(mbox->hwdev->hwif);
+}
+
+static int alloc_mbox_wb_status(struct hinic3_mbox *mbox)
+{
+ struct hinic3_send_mbox *send_mbox = &mbox->send_mbox;
+ struct hinic3_hwdev *hwdev = mbox->hwdev;
+ u32 addr_h, addr_l;
+
+ send_mbox->wb_vaddr = dma_alloc_coherent(hwdev->dev,
+ MBOX_WB_STATUS_LEN,
+ &send_mbox->wb_paddr,
+ GFP_KERNEL);
+ if (!send_mbox->wb_vaddr)
+ return -ENOMEM;
+
+ addr_h = upper_32_bits(send_mbox->wb_paddr);
+ addr_l = lower_32_bits(send_mbox->wb_paddr);
+ hinic3_hwif_write_reg(hwdev->hwif, HINIC3_FUNC_CSR_MAILBOX_RESULT_H_OFF,
+ addr_h);
+ hinic3_hwif_write_reg(hwdev->hwif, HINIC3_FUNC_CSR_MAILBOX_RESULT_L_OFF,
+ addr_l);
+
+ return 0;
+}
+
+static void free_mbox_wb_status(struct hinic3_mbox *mbox)
+{
+ struct hinic3_send_mbox *send_mbox = &mbox->send_mbox;
+ struct hinic3_hwdev *hwdev = mbox->hwdev;
+
+ hinic3_hwif_write_reg(hwdev->hwif, HINIC3_FUNC_CSR_MAILBOX_RESULT_H_OFF,
+ 0);
+ hinic3_hwif_write_reg(hwdev->hwif, HINIC3_FUNC_CSR_MAILBOX_RESULT_L_OFF,
+ 0);
+
+ dma_free_coherent(hwdev->dev, MBOX_WB_STATUS_LEN,
+ send_mbox->wb_vaddr, send_mbox->wb_paddr);
+}
+
+static int hinic3_mbox_pre_init(struct hinic3_hwdev *hwdev,
+ struct hinic3_mbox *mbox)
+{
+ mbox->hwdev = hwdev;
+ mutex_init(&mbox->mbox_send_lock);
+ spin_lock_init(&mbox->mbox_lock);
+
+ mbox->workq = create_singlethread_workqueue(HINIC3_MBOX_WQ_NAME);
+ if (!mbox->workq) {
+ dev_err(hwdev->dev, "Failed to initialize MBOX workqueue\n");
+ return -ENOMEM;
+ }
+ hwdev->mbox = mbox;
+
+ return 0;
+}
+
+int hinic3_init_mbox(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_mbox *mbox;
+ int err;
+
+ mbox = kzalloc(sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ err = hinic3_mbox_pre_init(hwdev, mbox);
+ if (err)
+ goto err_free_mbox;
+
+ err = init_mgmt_msg_channel(mbox);
+ if (err)
+ goto err_destroy_workqueue;
+
+ err = hinic3_init_func_mbox_msg_channel(hwdev);
+ if (err)
+ goto err_uninit_mgmt_msg_ch;
+
+ err = alloc_mbox_wb_status(mbox);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to alloc mbox write back status\n");
+ goto err_uninit_func_mbox_msg_ch;
+ }
+
+ prepare_send_mbox(mbox);
+
+ return 0;
+
+err_uninit_func_mbox_msg_ch:
+ hinic3_uninit_func_mbox_msg_channel(hwdev);
+
+err_uninit_mgmt_msg_ch:
+ uninit_mgmt_msg_channel(mbox);
+
+err_destroy_workqueue:
+ destroy_workqueue(mbox->workq);
+
+err_free_mbox:
+ kfree(mbox);
+
+ return err;
+}
+
+void hinic3_free_mbox(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_mbox *mbox = hwdev->mbox;
+
+ destroy_workqueue(mbox->workq);
+ free_mbox_wb_status(mbox);
+ hinic3_uninit_func_mbox_msg_channel(hwdev);
+ uninit_mgmt_msg_channel(mbox);
+ kfree(mbox);
+}
+
+#define MBOX_DMA_MSG_INIT_XOR_VAL 0x5a5a5a5a
+#define MBOX_XOR_DATA_ALIGN 4
+static u32 mbox_dma_msg_xor(u32 *data, u32 msg_len)
+{
+ u32 xor = MBOX_DMA_MSG_INIT_XOR_VAL;
+ u32 dw_len = msg_len / sizeof(u32);
+ u32 i;
+
+ for (i = 0; i < dw_len; i++)
+ xor ^= data[i];
+
+ return xor;
+}
+
+#define MBOX_MQ_ID_MASK(mq, idx) ((idx) & ((mq)->depth - 1))
+
+static bool is_msg_queue_full(struct mbox_dma_queue *mq)
+{
+ return MBOX_MQ_ID_MASK(mq, (mq)->prod_idx + 1) ==
+ MBOX_MQ_ID_MASK(mq, (mq)->cons_idx);
+}
+
+static int mbox_prepare_dma_entry(struct hinic3_mbox *mbox,
+ struct mbox_dma_queue *mq,
+ struct mbox_dma_msg *dma_msg,
+ const void *msg, u32 msg_len)
+{
+ u64 dma_addr, offset;
+ void *dma_vaddr;
+
+ if (is_msg_queue_full(mq)) {
+ dev_err(mbox->hwdev->dev, "Mbox sync message queue is busy, pi: %u, ci: %u\n",
+ mq->prod_idx, MBOX_MQ_ID_MASK(mq, mq->cons_idx));
+ return -EBUSY;
+ }
+
+ /* copy data to DMA buffer */
+ offset = mq->prod_idx * MBOX_MAX_BUF_SZ;
+ dma_vaddr = (u8 *)mq->dma_buf_vaddr + offset;
+ memcpy(dma_vaddr, msg, msg_len);
+ dma_addr = mq->dma_buf_paddr + offset;
+ dma_msg->dma_addr_high = cpu_to_le32(upper_32_bits(dma_addr));
+ dma_msg->dma_addr_low = cpu_to_le32(lower_32_bits(dma_addr));
+ dma_msg->msg_len = cpu_to_le32(msg_len);
+ /* The firmware obtains message based on 4B alignment. */
+ dma_msg->xor = cpu_to_le32(mbox_dma_msg_xor(dma_vaddr,
+ ALIGN(msg_len, MBOX_XOR_DATA_ALIGN)));
+ mq->prod_idx++;
+ mq->prod_idx = MBOX_MQ_ID_MASK(mq, mq->prod_idx);
+
+ return 0;
+}
+
+static int mbox_prepare_dma_msg(struct hinic3_mbox *mbox,
+ enum mbox_msg_ack_type ack_type,
+ struct mbox_dma_msg *dma_msg, const void *msg,
+ u32 msg_len)
+{
+ struct mbox_dma_queue *mq;
+ u32 val;
+
+ val = hinic3_hwif_read_reg(mbox->hwdev->hwif, MBOX_MQ_CI_OFFSET);
+ if (ack_type == MBOX_MSG_ACK) {
+ mq = &mbox->sync_msg_queue;
+ mq->cons_idx = MBOX_MQ_CI_GET(val, SYNC);
+ } else {
+ mq = &mbox->async_msg_queue;
+ mq->cons_idx = MBOX_MQ_CI_GET(val, ASYNC);
+ }
+
+ return mbox_prepare_dma_entry(mbox, mq, dma_msg, msg, msg_len);
+}
+
+static void clear_mbox_status(struct hinic3_send_mbox *mbox)
+{
+ __be64 *wb_status = mbox->wb_vaddr;
+
+ *wb_status = 0;
+ /* clear mailbox write back status */
+ wmb();
+}
+
+static void mbox_dword_write(const void *src, void __iomem *dst, u32 count)
+{
+ const __le32 *src32 = src;
+ u32 __iomem *dst32 = dst;
+ u32 i;
+
+ /* Data written to mbox is arranged in structs with little endian fields
+ * but when written to HW every dword (32bits) should be swapped since
+ * the HW will swap it again.
+ */
+ for (i = 0; i < count; i++)
+ __raw_writel(swab32((__force __u32)src32[i]), dst32 + i);
+}
+
+static void mbox_copy_header(struct hinic3_hwdev *hwdev,
+ struct hinic3_send_mbox *mbox, __le64 *header)
+{
+ mbox_dword_write(header, mbox->data, MBOX_HEADER_SZ / sizeof(__le32));
+}
+
+static void mbox_copy_send_data(struct hinic3_hwdev *hwdev,
+ struct hinic3_send_mbox *mbox, void *seg,
+ u32 seg_len)
+{
+ u32 __iomem *dst = (u32 __iomem *)(mbox->data + MBOX_HEADER_SZ);
+ u32 count, leftover, last_dword;
+ const __le32 *src = seg;
+
+ count = seg_len / sizeof(u32);
+ leftover = seg_len % sizeof(u32);
+ if (count > 0)
+ mbox_dword_write(src, dst, count);
+
+ if (leftover > 0) {
+ last_dword = 0;
+ memcpy(&last_dword, src + count, leftover);
+ mbox_dword_write(&last_dword, dst + count, 1);
+ }
+}
+
+static void write_mbox_msg_attr(struct hinic3_mbox *mbox,
+ u16 dst_func, u16 dst_aeqn, u32 seg_len)
+{
+ struct hinic3_hwif *hwif = mbox->hwdev->hwif;
+ u32 mbox_int, mbox_ctrl, tx_size;
+
+ tx_size = ALIGN(seg_len + MBOX_HEADER_SZ, MBOX_SEG_LEN_ALIGN) >> 2;
+
+ mbox_int = MBOX_INT_SET(dst_aeqn, DST_AEQN) |
+ MBOX_INT_SET(0, STAT_DMA) |
+ MBOX_INT_SET(tx_size, TX_SIZE) |
+ MBOX_INT_SET(0, STAT_DMA_SO_RO) |
+ MBOX_INT_SET(1, WB_EN);
+
+ mbox_ctrl = MBOX_CTRL_SET(1, TX_STATUS) |
+ MBOX_CTRL_SET(0, TRIGGER_AEQE) |
+ MBOX_CTRL_SET(dst_func, DST_FUNC);
+
+ hinic3_hwif_write_reg(hwif, HINIC3_FUNC_CSR_MAILBOX_INT_OFF, mbox_int);
+ hinic3_hwif_write_reg(hwif, HINIC3_FUNC_CSR_MAILBOX_CONTROL_OFF,
+ mbox_ctrl);
+}
+
+static u16 get_mbox_status(const struct hinic3_send_mbox *mbox)
+{
+ __be64 *wb_status = mbox->wb_vaddr;
+ u64 wb_val;
+
+ wb_val = be64_to_cpu(*wb_status);
+ /* verify reading before check */
+ rmb();
+
+ return wb_val & MBOX_WB_STATUS_ERRCODE_MASK;
+}
+
+static enum hinic3_wait_return check_mbox_wb_status(void *priv_data)
+{
+ struct hinic3_mbox *mbox = priv_data;
+ u16 wb_status;
+
+ wb_status = get_mbox_status(&mbox->send_mbox);
+
+ return MBOX_STATUS_FINISHED(wb_status) ?
+ HINIC3_WAIT_PROCESS_CPL : HINIC3_WAIT_PROCESS_WAITING;
+}
+
+static int send_mbox_seg(struct hinic3_mbox *mbox, __le64 header,
+ u16 dst_func, void *seg, u32 seg_len, void *msg_info)
+{
+ struct hinic3_send_mbox *send_mbox = &mbox->send_mbox;
+ struct hinic3_hwdev *hwdev = mbox->hwdev;
+ u8 num_aeqs = hwdev->hwif->attr.num_aeqs;
+ enum mbox_msg_direction_type dir;
+ u16 dst_aeqn, wb_status, errcode;
+ int err;
+
+ /* mbox to mgmt cpu, hardware doesn't care about dst aeq id */
+ if (num_aeqs > MBOX_MSG_AEQ_FOR_MBOX) {
+ dir = MBOX_MSG_HEADER_GET(header, DIRECTION);
+ dst_aeqn = (dir == MBOX_MSG_SEND) ?
+ MBOX_MSG_AEQ_FOR_EVENT : MBOX_MSG_AEQ_FOR_MBOX;
+ } else {
+ dst_aeqn = 0;
+ }
+
+ clear_mbox_status(send_mbox);
+ mbox_copy_header(hwdev, send_mbox, &header);
+ mbox_copy_send_data(hwdev, send_mbox, seg, seg_len);
+ write_mbox_msg_attr(mbox, dst_func, dst_aeqn, seg_len);
+
+ err = hinic3_wait_for_timeout(mbox, check_mbox_wb_status,
+ MBOX_MSG_POLLING_TIMEOUT_MS,
+ USEC_PER_MSEC);
+ wb_status = get_mbox_status(send_mbox);
+ if (err) {
+ dev_err(hwdev->dev, "Send mailbox segment timeout, wb status: 0x%x\n",
+ wb_status);
+ return err;
+ }
+
+ if (!MBOX_STATUS_SUCCESS(wb_status)) {
+ dev_err(hwdev->dev,
+ "Send mailbox segment to function %u error, wb status: 0x%x\n",
+ dst_func, wb_status);
+ errcode = MBOX_STATUS_ERRCODE(wb_status);
+ return errcode ? errcode : -EFAULT;
+ }
+
+ return 0;
+}
+
+static int send_mbox_msg(struct hinic3_mbox *mbox, u8 mod, u16 cmd,
+ const void *msg, u32 msg_len, u16 dst_func,
+ enum mbox_msg_direction_type direction,
+ enum mbox_msg_ack_type ack_type,
+ struct mbox_msg_info *msg_info)
+{
+ enum mbox_msg_data_type data_type = MBOX_MSG_DATA_INLINE;
+ struct hinic3_hwdev *hwdev = mbox->hwdev;
+ struct mbox_dma_msg dma_msg;
+ u32 seg_len = MBOX_SEG_LEN;
+ __le64 header = 0;
+ u32 seq_id = 0;
+ u16 rsp_aeq_id;
+ u8 *msg_seg;
+ int err = 0;
+ u32 left;
+
+ if (hwdev->hwif->attr.num_aeqs > MBOX_MSG_AEQ_FOR_MBOX)
+ rsp_aeq_id = MBOX_MSG_AEQ_FOR_MBOX;
+ else
+ rsp_aeq_id = 0;
+
+ if (dst_func == MBOX_MGMT_FUNC_ID &&
+ !(hwdev->features[0] & MBOX_COMM_F_MBOX_SEGMENT)) {
+ err = mbox_prepare_dma_msg(mbox, ack_type, &dma_msg,
+ msg, msg_len);
+ if (err)
+ goto err_send;
+
+ msg = &dma_msg;
+ msg_len = sizeof(dma_msg);
+ data_type = MBOX_MSG_DATA_DMA;
+ }
+
+ msg_seg = (u8 *)msg;
+ left = msg_len;
+
+ header = cpu_to_le64(MBOX_MSG_HEADER_SET(msg_len, MSG_LEN) |
+ MBOX_MSG_HEADER_SET(mod, MODULE) |
+ MBOX_MSG_HEADER_SET(seg_len, SEG_LEN) |
+ MBOX_MSG_HEADER_SET(ack_type, NO_ACK) |
+ MBOX_MSG_HEADER_SET(data_type, DATA_TYPE) |
+ MBOX_MSG_HEADER_SET(MBOX_SEQ_ID_START_VAL, SEQID) |
+ MBOX_MSG_HEADER_SET(direction, DIRECTION) |
+ MBOX_MSG_HEADER_SET(cmd, CMD) |
+ MBOX_MSG_HEADER_SET(msg_info->msg_id, MSG_ID) |
+ MBOX_MSG_HEADER_SET(rsp_aeq_id, AEQ_ID) |
+ MBOX_MSG_HEADER_SET(MBOX_MSG_FROM_MBOX, SOURCE) |
+ MBOX_MSG_HEADER_SET(!!msg_info->status, STATUS));
+
+ while (!(MBOX_MSG_HEADER_GET(header, LAST))) {
+ if (left <= MBOX_SEG_LEN) {
+ header &= cpu_to_le64(~MBOX_MSG_HEADER_SEG_LEN_MASK);
+ header |=
+ cpu_to_le64(MBOX_MSG_HEADER_SET(left, SEG_LEN) |
+ MBOX_MSG_HEADER_SET(1, LAST));
+ seg_len = left;
+ }
+
+ err = send_mbox_seg(mbox, header, dst_func, msg_seg,
+ seg_len, msg_info);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to send mbox seg, seq_id=0x%llx\n",
+ MBOX_MSG_HEADER_GET(header, SEQID));
+ goto err_send;
+ }
+
+ left -= MBOX_SEG_LEN;
+ msg_seg += MBOX_SEG_LEN;
+ seq_id++;
+ header &= cpu_to_le64(~MBOX_MSG_HEADER_SEG_LEN_MASK);
+ header |= cpu_to_le64(MBOX_MSG_HEADER_SET(seq_id, SEQID));
+ }
+
+err_send:
+ return err;
+}
+
+static void set_mbox_to_func_event(struct hinic3_mbox *mbox,
+ enum mbox_event_state event_flag)
+{
+ spin_lock(&mbox->mbox_lock);
+ mbox->event_flag = event_flag;
+ spin_unlock(&mbox->mbox_lock);
+}
+
+static enum hinic3_wait_return check_mbox_msg_finish(void *priv_data)
+{
+ struct hinic3_mbox *mbox = priv_data;
+
+ return (mbox->event_flag == MBOX_EVENT_SUCCESS) ?
+ HINIC3_WAIT_PROCESS_CPL : HINIC3_WAIT_PROCESS_WAITING;
+}
+
+static int wait_mbox_msg_completion(struct hinic3_mbox *mbox,
+ u32 timeout)
+{
+ u32 wait_time;
+ int err;
+
+ wait_time = (timeout != 0) ? timeout : MBOX_COMP_POLLING_TIMEOUT_MS;
+ err = hinic3_wait_for_timeout(mbox, check_mbox_msg_finish,
+ wait_time, USEC_PER_MSEC);
+ if (err) {
+ set_mbox_to_func_event(mbox, MBOX_EVENT_TIMEOUT);
+ return err;
+ }
+ set_mbox_to_func_event(mbox, MBOX_EVENT_END);
+
+ return 0;
+}
+
+int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
+ const struct mgmt_msg_params *msg_params)
+{
+ struct hinic3_mbox *mbox = hwdev->mbox;
+ struct mbox_msg_info msg_info = {};
+ struct hinic3_msg_desc *msg_desc;
+ u32 msg_len;
+ int err;
+
+ /* expect response message */
+ msg_desc = get_mbox_msg_desc(mbox, MBOX_MSG_RESP, MBOX_MGMT_FUNC_ID);
+ mutex_lock(&mbox->mbox_send_lock);
+ msg_info.msg_id = (mbox->send_msg_id + 1) & 0xF;
+ mbox->send_msg_id = msg_info.msg_id;
+ set_mbox_to_func_event(mbox, MBOX_EVENT_START);
+
+ err = send_mbox_msg(mbox, mod, cmd, msg_params->buf_in,
+ msg_params->in_size, MBOX_MGMT_FUNC_ID,
+ MBOX_MSG_SEND, MBOX_MSG_ACK, &msg_info);
+ if (err) {
+ dev_err(hwdev->dev, "Send mailbox mod %u, cmd %u failed, msg_id: %u, err: %d\n",
+ mod, cmd, msg_info.msg_id, err);
+ set_mbox_to_func_event(mbox, MBOX_EVENT_FAIL);
+ goto err_send;
+ }
+
+ if (wait_mbox_msg_completion(mbox, msg_params->timeout_ms)) {
+ dev_err(hwdev->dev,
+ "Send mbox msg timeout, msg_id: %u\n", msg_info.msg_id);
+ err = -ETIMEDOUT;
+ goto err_send;
+ }
+
+ if (mod != msg_desc->mod || cmd != le16_to_cpu(msg_desc->cmd)) {
+ dev_err(hwdev->dev,
+ "Invalid response mbox message, mod: 0x%x, cmd: 0x%x, expect mod: 0x%x, cmd: 0x%x\n",
+ msg_desc->mod, msg_desc->cmd, mod, cmd);
+ err = -EFAULT;
+ goto err_send;
+ }
+
+ if (msg_desc->msg_info.status) {
+ err = msg_desc->msg_info.status;
+ goto err_send;
+ }
+
+ if (msg_params->buf_out) {
+ msg_len = le16_to_cpu(msg_desc->msg_len);
+ if (msg_len != msg_params->expected_out_size) {
+ dev_err(hwdev->dev,
+ "Invalid response mbox message length: %u for mod %d cmd %u, expected length: %u\n",
+ msg_desc->msg_len, mod, cmd,
+ msg_params->expected_out_size);
+ err = -EFAULT;
+ goto err_send;
+ }
+
+ memcpy(msg_params->buf_out, msg_desc->msg, msg_len);
+ }
+
+err_send:
+ mutex_unlock(&mbox->mbox_send_lock);
+
+ return err;
+}
+
+int hinic3_send_mbox_to_mgmt_no_ack(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
+ const struct mgmt_msg_params *msg_params)
+{
+ struct hinic3_mbox *mbox = hwdev->mbox;
+ struct mbox_msg_info msg_info = {};
+ int err;
+
+ mutex_lock(&mbox->mbox_send_lock);
+ err = send_mbox_msg(mbox, mod, cmd, msg_params->buf_in,
+ msg_params->in_size, MBOX_MGMT_FUNC_ID,
+ MBOX_MSG_SEND, MBOX_MSG_NO_ACK, &msg_info);
+ if (err)
+ dev_err(hwdev->dev, "Send mailbox no ack failed\n");
+
+ mutex_unlock(&mbox->mbox_send_lock);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h
new file mode 100644
index 000000000000..e71629e95086
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_MBOX_H_
+#define _HINIC3_MBOX_H_
+
+#include <linux/bitfield.h>
+#include <linux/mutex.h>
+
+struct hinic3_hwdev;
+struct mgmt_msg_params;
+
+#define MBOX_MSG_HEADER_SRC_GLB_FUNC_IDX_MASK GENMASK_ULL(12, 0)
+#define MBOX_MSG_HEADER_STATUS_MASK BIT_ULL(13)
+#define MBOX_MSG_HEADER_SOURCE_MASK BIT_ULL(15)
+#define MBOX_MSG_HEADER_AEQ_ID_MASK GENMASK_ULL(17, 16)
+#define MBOX_MSG_HEADER_MSG_ID_MASK GENMASK_ULL(21, 18)
+#define MBOX_MSG_HEADER_CMD_MASK GENMASK_ULL(31, 22)
+#define MBOX_MSG_HEADER_MSG_LEN_MASK GENMASK_ULL(42, 32)
+#define MBOX_MSG_HEADER_MODULE_MASK GENMASK_ULL(47, 43)
+#define MBOX_MSG_HEADER_SEG_LEN_MASK GENMASK_ULL(53, 48)
+#define MBOX_MSG_HEADER_NO_ACK_MASK BIT_ULL(54)
+#define MBOX_MSG_HEADER_DATA_TYPE_MASK BIT_ULL(55)
+#define MBOX_MSG_HEADER_SEQID_MASK GENMASK_ULL(61, 56)
+#define MBOX_MSG_HEADER_LAST_MASK BIT_ULL(62)
+#define MBOX_MSG_HEADER_DIRECTION_MASK BIT_ULL(63)
+
+#define MBOX_MSG_HEADER_SET(val, member) \
+ FIELD_PREP(MBOX_MSG_HEADER_##member##_MASK, val)
+#define MBOX_MSG_HEADER_GET(val, member) \
+ FIELD_GET(MBOX_MSG_HEADER_##member##_MASK, le64_to_cpu(val))
+
+/* identifies if a segment belongs to a message or to a response. A VF is only
+ * expected to send messages and receive responses. PF driver could receive
+ * messages and send responses.
+ */
+enum mbox_msg_direction_type {
+ MBOX_MSG_SEND = 0,
+ MBOX_MSG_RESP = 1,
+};
+
+/* Indicates if mbox message expects a response (ack) or not */
+enum mbox_msg_ack_type {
+ MBOX_MSG_ACK = 0,
+ MBOX_MSG_NO_ACK = 1,
+};
+
+enum mbox_msg_data_type {
+ MBOX_MSG_DATA_INLINE = 0,
+ MBOX_MSG_DATA_DMA = 1,
+};
+
+enum mbox_msg_src_type {
+ MBOX_MSG_FROM_MBOX = 1,
+};
+
+enum mbox_msg_aeq_type {
+ MBOX_MSG_AEQ_FOR_EVENT = 0,
+ MBOX_MSG_AEQ_FOR_MBOX = 1,
+};
+
+#define HINIC3_MBOX_WQ_NAME "hinic3_mbox"
+
+struct mbox_msg_info {
+ u8 msg_id;
+ u8 status;
+};
+
+struct hinic3_msg_desc {
+ u8 *msg;
+ __le16 msg_len;
+ u8 seq_id;
+ u8 mod;
+ __le16 cmd;
+ struct mbox_msg_info msg_info;
+};
+
+struct hinic3_msg_channel {
+ struct hinic3_msg_desc resp_msg;
+ struct hinic3_msg_desc recv_msg;
+};
+
+struct hinic3_send_mbox {
+ u8 __iomem *data;
+ void *wb_vaddr;
+ dma_addr_t wb_paddr;
+};
+
+enum mbox_event_state {
+ MBOX_EVENT_START = 0,
+ MBOX_EVENT_FAIL = 1,
+ MBOX_EVENT_SUCCESS = 2,
+ MBOX_EVENT_TIMEOUT = 3,
+ MBOX_EVENT_END = 4,
+};
+
+struct mbox_dma_msg {
+ __le32 xor;
+ __le32 dma_addr_high;
+ __le32 dma_addr_low;
+ __le32 msg_len;
+ __le64 rsvd;
+};
+
+struct mbox_dma_queue {
+ void *dma_buf_vaddr;
+ dma_addr_t dma_buf_paddr;
+ u16 depth;
+ u16 prod_idx;
+ u16 cons_idx;
+};
+
+struct hinic3_mbox {
+ struct hinic3_hwdev *hwdev;
+ /* lock for send mbox message and ack message */
+ struct mutex mbox_send_lock;
+ struct hinic3_send_mbox send_mbox;
+ struct mbox_dma_queue sync_msg_queue;
+ struct mbox_dma_queue async_msg_queue;
+ struct workqueue_struct *workq;
+ /* driver and MGMT CPU */
+ struct hinic3_msg_channel mgmt_msg;
+ /* VF to PF */
+ struct hinic3_msg_channel *func_msg;
+ u8 send_msg_id;
+ enum mbox_event_state event_flag;
+ /* lock for mbox event flag */
+ spinlock_t mbox_lock;
+};
+
+void hinic3_mbox_func_aeqe_handler(struct hinic3_hwdev *hwdev, u8 *header,
+ u8 size);
+int hinic3_init_mbox(struct hinic3_hwdev *hwdev);
+void hinic3_free_mbox(struct hinic3_hwdev *hwdev);
+
+int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
+ const struct mgmt_msg_params *msg_params);
+int hinic3_send_mbox_to_mgmt_no_ack(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
+ const struct mgmt_msg_params *msg_params);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c
new file mode 100644
index 000000000000..c38d10cd7fac
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include "hinic3_eqs.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_mbox.h"
+#include "hinic3_mgmt.h"
+
+void hinic3_flush_mgmt_workq(struct hinic3_hwdev *hwdev)
+{
+ if (hwdev->aeqs)
+ flush_workqueue(hwdev->aeqs->workq);
+}
+
+void hinic3_mgmt_msg_aeqe_handler(struct hinic3_hwdev *hwdev, u8 *header,
+ u8 size)
+{
+ if (MBOX_MSG_HEADER_GET(*(__force __le64 *)header, SOURCE) ==
+ MBOX_MSG_FROM_MBOX)
+ hinic3_mbox_func_aeqe_handler(hwdev, header, size);
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h
new file mode 100644
index 000000000000..bbef3b32a6ec
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_MGMT_H_
+#define _HINIC3_MGMT_H_
+
+#include <linux/types.h>
+
+struct hinic3_hwdev;
+
+void hinic3_flush_mgmt_workq(struct hinic3_hwdev *hwdev);
+void hinic3_mgmt_msg_aeqe_handler(struct hinic3_hwdev *hwdev,
+ u8 *header, u8 size);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
new file mode 100644
index 000000000000..6cc0345c39e4
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_MGMT_INTERFACE_H_
+#define _HINIC3_MGMT_INTERFACE_H_
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/if_ether.h>
+
+#include "hinic3_hw_intf.h"
+
+struct l2nic_cmd_feature_nego {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u8 opcode;
+ u8 rsvd;
+ u64 s_feature[4];
+};
+
+enum l2nic_func_tbl_cfg_bitmap {
+ L2NIC_FUNC_TBL_CFG_INIT = 0,
+ L2NIC_FUNC_TBL_CFG_RX_BUF_SIZE = 1,
+ L2NIC_FUNC_TBL_CFG_MTU = 2,
+};
+
+struct l2nic_func_tbl_cfg {
+ u16 rx_wqe_buf_size;
+ u16 mtu;
+ u32 rsvd[9];
+};
+
+struct l2nic_cmd_set_func_tbl {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 rsvd;
+ u32 cfg_bitmap;
+ struct l2nic_func_tbl_cfg tbl_cfg;
+};
+
+struct l2nic_cmd_set_mac {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 vlan_id;
+ u16 rsvd1;
+ u8 mac[ETH_ALEN];
+};
+
+struct l2nic_cmd_update_mac {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 vlan_id;
+ u16 rsvd1;
+ u8 old_mac[ETH_ALEN];
+ u16 rsvd2;
+ u8 new_mac[ETH_ALEN];
+};
+
+struct l2nic_cmd_set_ci_attr {
+ struct mgmt_msg_head msg_head;
+ u16 func_idx;
+ u8 dma_attr_off;
+ u8 pending_limit;
+ u8 coalescing_time;
+ u8 intr_en;
+ u16 intr_idx;
+ u32 l2nic_sqn;
+ u32 rsvd;
+ u64 ci_addr;
+};
+
+struct l2nic_cmd_clear_qp_resource {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 rsvd1;
+};
+
+struct l2nic_cmd_force_pkt_drop {
+ struct mgmt_msg_head msg_head;
+ u8 port;
+ u8 rsvd1[3];
+};
+
+struct l2nic_cmd_set_vport_state {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 rsvd1;
+ /* 0--disable, 1--enable */
+ u8 state;
+ u8 rsvd2[3];
+};
+
+struct l2nic_cmd_set_dcb_state {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ /* 0 - get dcb state, 1 - set dcb state */
+ u8 op_code;
+ /* 0 - disable, 1 - enable dcb */
+ u8 state;
+ /* 0 - disable, 1 - enable dcb */
+ u8 port_state;
+ u8 rsvd[7];
+};
+
+#define L2NIC_RSS_TYPE_VALID_MASK BIT(23)
+#define L2NIC_RSS_TYPE_TCP_IPV6_EXT_MASK BIT(24)
+#define L2NIC_RSS_TYPE_IPV6_EXT_MASK BIT(25)
+#define L2NIC_RSS_TYPE_TCP_IPV6_MASK BIT(26)
+#define L2NIC_RSS_TYPE_IPV6_MASK BIT(27)
+#define L2NIC_RSS_TYPE_TCP_IPV4_MASK BIT(28)
+#define L2NIC_RSS_TYPE_IPV4_MASK BIT(29)
+#define L2NIC_RSS_TYPE_UDP_IPV6_MASK BIT(30)
+#define L2NIC_RSS_TYPE_UDP_IPV4_MASK BIT(31)
+#define L2NIC_RSS_TYPE_SET(val, member) \
+ FIELD_PREP(L2NIC_RSS_TYPE_##member##_MASK, val)
+#define L2NIC_RSS_TYPE_GET(val, member) \
+ FIELD_GET(L2NIC_RSS_TYPE_##member##_MASK, val)
+
+#define L2NIC_RSS_INDIR_SIZE 256
+#define L2NIC_RSS_KEY_SIZE 40
+
+/* IEEE 802.1Qaz std */
+#define L2NIC_DCB_COS_MAX 0x8
+
+struct l2nic_cmd_set_rss_ctx_tbl {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 rsvd1;
+ u32 context;
+};
+
+struct l2nic_cmd_cfg_rss_engine {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u8 opcode;
+ u8 hash_engine;
+ u8 rsvd1[4];
+};
+
+struct l2nic_cmd_cfg_rss_hash_key {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u8 opcode;
+ u8 rsvd1;
+ u8 key[L2NIC_RSS_KEY_SIZE];
+};
+
+struct l2nic_cmd_cfg_rss {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u8 rss_en;
+ u8 rq_priority_number;
+ u8 prio_tc[L2NIC_DCB_COS_MAX];
+ u16 num_qps;
+ u16 rsvd1;
+};
+
+/* Commands between NIC to fw */
+enum l2nic_cmd {
+ /* FUNC CFG */
+ L2NIC_CMD_SET_FUNC_TBL = 5,
+ L2NIC_CMD_SET_VPORT_ENABLE = 6,
+ L2NIC_CMD_SET_SQ_CI_ATTR = 8,
+ L2NIC_CMD_CLEAR_QP_RESOURCE = 11,
+ L2NIC_CMD_FEATURE_NEGO = 15,
+ L2NIC_CMD_SET_MAC = 21,
+ L2NIC_CMD_DEL_MAC = 22,
+ L2NIC_CMD_UPDATE_MAC = 23,
+ L2NIC_CMD_CFG_RSS = 60,
+ L2NIC_CMD_CFG_RSS_HASH_KEY = 63,
+ L2NIC_CMD_CFG_RSS_HASH_ENGINE = 64,
+ L2NIC_CMD_SET_RSS_CTX_TBL = 65,
+ L2NIC_CMD_QOS_DCB_STATE = 110,
+ L2NIC_CMD_FORCE_PKT_DROP = 113,
+ L2NIC_CMD_MAX = 256,
+};
+
+struct l2nic_cmd_rss_set_indir_tbl {
+ __le32 rsvd[4];
+ __le16 entry[L2NIC_RSS_INDIR_SIZE];
+};
+
+/* NIC CMDQ MODE */
+enum l2nic_ucode_cmd {
+ L2NIC_UCODE_CMD_MODIFY_QUEUE_CTX = 0,
+ L2NIC_UCODE_CMD_CLEAN_QUEUE_CTX = 1,
+ L2NIC_UCODE_CMD_SET_RSS_INDIR_TBL = 4,
+};
+
+/* hilink mac group command */
+enum mag_cmd {
+ MAG_CMD_GET_LINK_STATUS = 7,
+};
+
+/* firmware also use this cmd report link event to driver */
+struct mag_cmd_get_link_status {
+ struct mgmt_msg_head head;
+ u8 port_id;
+ /* 0:link down 1:link up */
+ u8 status;
+ u8 rsvd0[2];
+};
+
+enum hinic3_nic_feature_cap {
+ HINIC3_NIC_F_CSUM = BIT(0),
+ HINIC3_NIC_F_SCTP_CRC = BIT(1),
+ HINIC3_NIC_F_TSO = BIT(2),
+ HINIC3_NIC_F_LRO = BIT(3),
+ HINIC3_NIC_F_UFO = BIT(4),
+ HINIC3_NIC_F_RSS = BIT(5),
+ HINIC3_NIC_F_RX_VLAN_FILTER = BIT(6),
+ HINIC3_NIC_F_RX_VLAN_STRIP = BIT(7),
+ HINIC3_NIC_F_TX_VLAN_INSERT = BIT(8),
+ HINIC3_NIC_F_VXLAN_OFFLOAD = BIT(9),
+ HINIC3_NIC_F_FDIR = BIT(11),
+ HINIC3_NIC_F_PROMISC = BIT(12),
+ HINIC3_NIC_F_ALLMULTI = BIT(13),
+ HINIC3_NIC_F_RATE_LIMIT = BIT(16),
+};
+
+#define HINIC3_NIC_F_ALL_MASK 0x33bff
+#define HINIC3_NIC_DRV_DEFAULT_FEATURE 0x3f03f
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
new file mode 100644
index 000000000000..bbf22811a029
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
@@ -0,0 +1,496 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+
+#include "hinic3_hwif.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+#include "hinic3_rss.h"
+#include "hinic3_rx.h"
+#include "hinic3_tx.h"
+
+/* try to modify the number of irq to the target number,
+ * and return the actual number of irq.
+ */
+static u16 hinic3_qp_irq_change(struct net_device *netdev,
+ u16 dst_num_qp_irq)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct msix_entry *qps_msix_entries;
+ u16 resp_irq_num, irq_num_gap, i;
+ u16 idx;
+ int err;
+
+ qps_msix_entries = nic_dev->qps_msix_entries;
+ if (dst_num_qp_irq > nic_dev->num_qp_irq) {
+ irq_num_gap = dst_num_qp_irq - nic_dev->num_qp_irq;
+ err = hinic3_alloc_irqs(nic_dev->hwdev, irq_num_gap,
+ &qps_msix_entries[nic_dev->num_qp_irq],
+ &resp_irq_num);
+ if (err) {
+ netdev_err(netdev, "Failed to alloc irqs\n");
+ return nic_dev->num_qp_irq;
+ }
+
+ nic_dev->num_qp_irq += resp_irq_num;
+ } else if (dst_num_qp_irq < nic_dev->num_qp_irq) {
+ irq_num_gap = nic_dev->num_qp_irq - dst_num_qp_irq;
+ for (i = 0; i < irq_num_gap; i++) {
+ idx = (nic_dev->num_qp_irq - i) - 1;
+ hinic3_free_irq(nic_dev->hwdev,
+ qps_msix_entries[idx].vector);
+ qps_msix_entries[idx].vector = 0;
+ qps_msix_entries[idx].entry = 0;
+ }
+ nic_dev->num_qp_irq = dst_num_qp_irq;
+ }
+
+ return nic_dev->num_qp_irq;
+}
+
+static void hinic3_config_num_qps(struct net_device *netdev,
+ struct hinic3_dyna_txrxq_params *q_params)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 alloc_num_irq, cur_num_irq;
+ u16 dst_num_irq;
+
+ if (!test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags))
+ q_params->num_qps = 1;
+
+ if (nic_dev->num_qp_irq >= q_params->num_qps)
+ goto out;
+
+ cur_num_irq = nic_dev->num_qp_irq;
+
+ alloc_num_irq = hinic3_qp_irq_change(netdev, q_params->num_qps);
+ if (alloc_num_irq < q_params->num_qps) {
+ q_params->num_qps = alloc_num_irq;
+ netdev_warn(netdev, "Can not get enough irqs, adjust num_qps to %u\n",
+ q_params->num_qps);
+
+ /* The current irq may be in use, we must keep it */
+ dst_num_irq = max_t(u16, cur_num_irq, q_params->num_qps);
+ hinic3_qp_irq_change(netdev, dst_num_irq);
+ }
+
+out:
+ netdev_dbg(netdev, "No need to change irqs, num_qps is %u\n",
+ q_params->num_qps);
+}
+
+static int hinic3_setup_num_qps(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ nic_dev->num_qp_irq = 0;
+
+ nic_dev->qps_msix_entries = kcalloc(nic_dev->max_qps,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!nic_dev->qps_msix_entries)
+ return -ENOMEM;
+
+ hinic3_config_num_qps(netdev, &nic_dev->q_params);
+
+ return 0;
+}
+
+static void hinic3_destroy_num_qps(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 i;
+
+ for (i = 0; i < nic_dev->num_qp_irq; i++)
+ hinic3_free_irq(nic_dev->hwdev,
+ nic_dev->qps_msix_entries[i].vector);
+
+ kfree(nic_dev->qps_msix_entries);
+}
+
+static int hinic3_alloc_txrxq_resources(struct net_device *netdev,
+ struct hinic3_dyna_txrxq_params *q_params)
+{
+ int err;
+
+ q_params->txqs_res = kcalloc(q_params->num_qps,
+ sizeof(*q_params->txqs_res), GFP_KERNEL);
+ if (!q_params->txqs_res)
+ return -ENOMEM;
+
+ q_params->rxqs_res = kcalloc(q_params->num_qps,
+ sizeof(*q_params->rxqs_res), GFP_KERNEL);
+ if (!q_params->rxqs_res) {
+ err = -ENOMEM;
+ goto err_free_txqs_res_arr;
+ }
+
+ q_params->irq_cfg = kcalloc(q_params->num_qps,
+ sizeof(*q_params->irq_cfg), GFP_KERNEL);
+ if (!q_params->irq_cfg) {
+ err = -ENOMEM;
+ goto err_free_rxqs_res_arr;
+ }
+
+ err = hinic3_alloc_txqs_res(netdev, q_params->num_qps,
+ q_params->sq_depth, q_params->txqs_res);
+ if (err) {
+ netdev_err(netdev, "Failed to alloc txqs resource\n");
+ goto err_free_irq_cfg;
+ }
+
+ err = hinic3_alloc_rxqs_res(netdev, q_params->num_qps,
+ q_params->rq_depth, q_params->rxqs_res);
+ if (err) {
+ netdev_err(netdev, "Failed to alloc rxqs resource\n");
+ goto err_free_txqs_res;
+ }
+
+ return 0;
+
+err_free_txqs_res:
+ hinic3_free_txqs_res(netdev, q_params->num_qps, q_params->sq_depth,
+ q_params->txqs_res);
+err_free_irq_cfg:
+ kfree(q_params->irq_cfg);
+ q_params->irq_cfg = NULL;
+err_free_rxqs_res_arr:
+ kfree(q_params->rxqs_res);
+ q_params->rxqs_res = NULL;
+err_free_txqs_res_arr:
+ kfree(q_params->txqs_res);
+ q_params->txqs_res = NULL;
+
+ return err;
+}
+
+static void hinic3_free_txrxq_resources(struct net_device *netdev,
+ struct hinic3_dyna_txrxq_params *q_params)
+{
+ hinic3_free_rxqs_res(netdev, q_params->num_qps, q_params->rq_depth,
+ q_params->rxqs_res);
+ hinic3_free_txqs_res(netdev, q_params->num_qps, q_params->sq_depth,
+ q_params->txqs_res);
+
+ kfree(q_params->irq_cfg);
+ q_params->irq_cfg = NULL;
+
+ kfree(q_params->rxqs_res);
+ q_params->rxqs_res = NULL;
+
+ kfree(q_params->txqs_res);
+ q_params->txqs_res = NULL;
+}
+
+static int hinic3_configure_txrxqs(struct net_device *netdev,
+ struct hinic3_dyna_txrxq_params *q_params)
+{
+ int err;
+
+ err = hinic3_configure_txqs(netdev, q_params->num_qps,
+ q_params->sq_depth, q_params->txqs_res);
+ if (err) {
+ netdev_err(netdev, "Failed to configure txqs\n");
+ return err;
+ }
+
+ err = hinic3_configure_rxqs(netdev, q_params->num_qps,
+ q_params->rq_depth, q_params->rxqs_res);
+ if (err) {
+ netdev_err(netdev, "Failed to configure rxqs\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int hinic3_configure(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ netdev->min_mtu = HINIC3_MIN_MTU_SIZE;
+ netdev->max_mtu = HINIC3_MAX_JUMBO_FRAME_SIZE;
+ err = hinic3_set_port_mtu(netdev, netdev->mtu);
+ if (err) {
+ netdev_err(netdev, "Failed to set mtu\n");
+ return err;
+ }
+
+ /* Ensure DCB is disabled */
+ hinic3_sync_dcb_state(nic_dev->hwdev, 1, 0);
+
+ if (test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags)) {
+ err = hinic3_rss_init(netdev);
+ if (err) {
+ netdev_err(netdev, "Failed to init rss\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void hinic3_remove_configure(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags))
+ hinic3_rss_uninit(netdev);
+}
+
+static int hinic3_alloc_channel_resources(struct net_device *netdev,
+ struct hinic3_dyna_qp_params *qp_params,
+ struct hinic3_dyna_txrxq_params *trxq_params)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ qp_params->num_qps = trxq_params->num_qps;
+ qp_params->sq_depth = trxq_params->sq_depth;
+ qp_params->rq_depth = trxq_params->rq_depth;
+
+ err = hinic3_alloc_qps(nic_dev, qp_params);
+ if (err) {
+ netdev_err(netdev, "Failed to alloc qps\n");
+ return err;
+ }
+
+ err = hinic3_alloc_txrxq_resources(netdev, trxq_params);
+ if (err) {
+ netdev_err(netdev, "Failed to alloc txrxq resources\n");
+ hinic3_free_qps(nic_dev, qp_params);
+ return err;
+ }
+
+ return 0;
+}
+
+static void hinic3_free_channel_resources(struct net_device *netdev,
+ struct hinic3_dyna_qp_params *qp_params,
+ struct hinic3_dyna_txrxq_params *trxq_params)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ hinic3_free_txrxq_resources(netdev, trxq_params);
+ hinic3_free_qps(nic_dev, qp_params);
+}
+
+static int hinic3_open_channel(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ err = hinic3_init_qp_ctxts(nic_dev);
+ if (err) {
+ netdev_err(netdev, "Failed to init qps\n");
+ return err;
+ }
+
+ err = hinic3_configure_txrxqs(netdev, &nic_dev->q_params);
+ if (err) {
+ netdev_err(netdev, "Failed to configure txrxqs\n");
+ goto err_free_qp_ctxts;
+ }
+
+ err = hinic3_qps_irq_init(netdev);
+ if (err) {
+ netdev_err(netdev, "Failed to init txrxq irq\n");
+ goto err_free_qp_ctxts;
+ }
+
+ err = hinic3_configure(netdev);
+ if (err) {
+ netdev_err(netdev, "Failed to configure device resources\n");
+ goto err_uninit_qps_irq;
+ }
+
+ return 0;
+
+err_uninit_qps_irq:
+ hinic3_qps_irq_uninit(netdev);
+err_free_qp_ctxts:
+ hinic3_free_qp_ctxts(nic_dev);
+
+ return err;
+}
+
+static void hinic3_close_channel(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ hinic3_remove_configure(netdev);
+ hinic3_qps_irq_uninit(netdev);
+ hinic3_free_qp_ctxts(nic_dev);
+}
+
+static int hinic3_vport_up(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ bool link_status_up;
+ u16 glb_func_id;
+ int err;
+
+ glb_func_id = hinic3_global_func_id(nic_dev->hwdev);
+ err = hinic3_set_vport_enable(nic_dev->hwdev, glb_func_id, true);
+ if (err) {
+ netdev_err(netdev, "Failed to enable vport\n");
+ goto err_flush_qps_res;
+ }
+
+ err = netif_set_real_num_queues(netdev, nic_dev->q_params.num_qps,
+ nic_dev->q_params.num_qps);
+ if (err) {
+ netdev_err(netdev, "Failed to set real number of queues\n");
+ goto err_flush_qps_res;
+ }
+ netif_tx_start_all_queues(netdev);
+
+ err = hinic3_get_link_status(nic_dev->hwdev, &link_status_up);
+ if (!err && link_status_up)
+ netif_carrier_on(netdev);
+
+ return 0;
+
+err_flush_qps_res:
+ hinic3_flush_qps_res(nic_dev->hwdev);
+ /* wait to guarantee that no packets will be sent to host */
+ msleep(100);
+
+ return err;
+}
+
+static void hinic3_vport_down(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 glb_func_id;
+
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ glb_func_id = hinic3_global_func_id(nic_dev->hwdev);
+ hinic3_set_vport_enable(nic_dev->hwdev, glb_func_id, false);
+
+ hinic3_flush_txqs(netdev);
+ /* wait to guarantee that no packets will be sent to host */
+ msleep(100);
+ hinic3_flush_qps_res(nic_dev->hwdev);
+}
+
+static int hinic3_open(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_dyna_qp_params qp_params;
+ int err;
+
+ err = hinic3_init_nicio_res(nic_dev);
+ if (err) {
+ netdev_err(netdev, "Failed to init nicio resources\n");
+ return err;
+ }
+
+ err = hinic3_setup_num_qps(netdev);
+ if (err) {
+ netdev_err(netdev, "Failed to setup num_qps\n");
+ goto err_free_nicio_res;
+ }
+
+ err = hinic3_alloc_channel_resources(netdev, &qp_params,
+ &nic_dev->q_params);
+ if (err)
+ goto err_destroy_num_qps;
+
+ hinic3_init_qps(nic_dev, &qp_params);
+
+ err = hinic3_open_channel(netdev);
+ if (err)
+ goto err_uninit_qps;
+
+ err = hinic3_vport_up(netdev);
+ if (err)
+ goto err_close_channel;
+
+ return 0;
+
+err_close_channel:
+ hinic3_close_channel(netdev);
+err_uninit_qps:
+ hinic3_uninit_qps(nic_dev, &qp_params);
+ hinic3_free_channel_resources(netdev, &qp_params, &nic_dev->q_params);
+err_destroy_num_qps:
+ hinic3_destroy_num_qps(netdev);
+err_free_nicio_res:
+ hinic3_free_nicio_res(nic_dev);
+
+ return err;
+}
+
+static int hinic3_close(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_dyna_qp_params qp_params;
+
+ hinic3_vport_down(netdev);
+ hinic3_close_channel(netdev);
+ hinic3_uninit_qps(nic_dev, &qp_params);
+ hinic3_free_channel_resources(netdev, &qp_params, &nic_dev->q_params);
+
+ return 0;
+}
+
+static int hinic3_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ int err;
+
+ err = hinic3_set_port_mtu(netdev, new_mtu);
+ if (err) {
+ netdev_err(netdev, "Failed to change port mtu to %d\n",
+ new_mtu);
+ return err;
+ }
+
+ netdev_dbg(netdev, "Change mtu from %u to %d\n", netdev->mtu, new_mtu);
+ WRITE_ONCE(netdev->mtu, new_mtu);
+
+ return 0;
+}
+
+static int hinic3_set_mac_addr(struct net_device *netdev, void *addr)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct sockaddr *saddr = addr;
+ int err;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (ether_addr_equal(netdev->dev_addr, saddr->sa_data))
+ return 0;
+
+ err = hinic3_update_mac(nic_dev->hwdev, netdev->dev_addr,
+ saddr->sa_data, 0,
+ hinic3_global_func_id(nic_dev->hwdev));
+
+ if (err)
+ return err;
+
+ eth_hw_addr_set(netdev, saddr->sa_data);
+
+ return 0;
+}
+
+static const struct net_device_ops hinic3_netdev_ops = {
+ .ndo_open = hinic3_open,
+ .ndo_stop = hinic3_close,
+ .ndo_change_mtu = hinic3_change_mtu,
+ .ndo_set_mac_address = hinic3_set_mac_addr,
+ .ndo_start_xmit = hinic3_xmit_frame,
+};
+
+void hinic3_set_netdev_ops(struct net_device *netdev)
+{
+ netdev->netdev_ops = &hinic3_netdev_ops;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
new file mode 100644
index 000000000000..979f47ca77f9
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/if_vlan.h>
+
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+
+static int hinic3_feature_nego(struct hinic3_hwdev *hwdev, u8 opcode,
+ u64 *s_feature, u16 size)
+{
+ struct l2nic_cmd_feature_nego feature_nego = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ feature_nego.func_id = hinic3_global_func_id(hwdev);
+ feature_nego.opcode = opcode;
+ if (opcode == MGMT_MSG_CMD_OP_SET)
+ memcpy(feature_nego.s_feature, s_feature, size * sizeof(u64));
+
+ mgmt_msg_params_init_default(&msg_params, &feature_nego,
+ sizeof(feature_nego));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_FEATURE_NEGO, &msg_params);
+ if (err || feature_nego.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to negotiate nic feature, err:%d, status: 0x%x\n",
+ err, feature_nego.msg_head.status);
+ return -EIO;
+ }
+
+ if (opcode == MGMT_MSG_CMD_OP_GET)
+ memcpy(s_feature, feature_nego.s_feature, size * sizeof(u64));
+
+ return 0;
+}
+
+int hinic3_get_nic_feature_from_hw(struct hinic3_nic_dev *nic_dev)
+{
+ return hinic3_feature_nego(nic_dev->hwdev, MGMT_MSG_CMD_OP_GET,
+ &nic_dev->nic_io->feature_cap, 1);
+}
+
+int hinic3_set_nic_feature_to_hw(struct hinic3_nic_dev *nic_dev)
+{
+ return hinic3_feature_nego(nic_dev->hwdev, MGMT_MSG_CMD_OP_SET,
+ &nic_dev->nic_io->feature_cap, 1);
+}
+
+bool hinic3_test_support(struct hinic3_nic_dev *nic_dev,
+ enum hinic3_nic_feature_cap feature_bits)
+{
+ return (nic_dev->nic_io->feature_cap & feature_bits) == feature_bits;
+}
+
+void hinic3_update_nic_feature(struct hinic3_nic_dev *nic_dev, u64 feature_cap)
+{
+ nic_dev->nic_io->feature_cap = feature_cap;
+}
+
+static int hinic3_set_function_table(struct hinic3_hwdev *hwdev, u32 cfg_bitmap,
+ const struct l2nic_func_tbl_cfg *cfg)
+{
+ struct l2nic_cmd_set_func_tbl cmd_func_tbl = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ cmd_func_tbl.func_id = hinic3_global_func_id(hwdev);
+ cmd_func_tbl.cfg_bitmap = cfg_bitmap;
+ cmd_func_tbl.tbl_cfg = *cfg;
+
+ mgmt_msg_params_init_default(&msg_params, &cmd_func_tbl,
+ sizeof(cmd_func_tbl));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_SET_FUNC_TBL, &msg_params);
+ if (err || cmd_func_tbl.msg_head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set func table, bitmap: 0x%x, err: %d, status: 0x%x\n",
+ cfg_bitmap, err, cmd_func_tbl.msg_head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic3_init_function_table(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct l2nic_func_tbl_cfg func_tbl_cfg = {};
+ u32 cfg_bitmap;
+
+ func_tbl_cfg.mtu = 0x3FFF; /* default, max mtu */
+ func_tbl_cfg.rx_wqe_buf_size = nic_io->rx_buf_len;
+
+ cfg_bitmap = BIT(L2NIC_FUNC_TBL_CFG_INIT) |
+ BIT(L2NIC_FUNC_TBL_CFG_MTU) |
+ BIT(L2NIC_FUNC_TBL_CFG_RX_BUF_SIZE);
+
+ return hinic3_set_function_table(nic_dev->hwdev, cfg_bitmap,
+ &func_tbl_cfg);
+}
+
+int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct l2nic_func_tbl_cfg func_tbl_cfg = {};
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+
+ func_tbl_cfg.mtu = new_mtu;
+
+ return hinic3_set_function_table(hwdev, BIT(L2NIC_FUNC_TBL_CFG_MTU),
+ &func_tbl_cfg);
+}
+
+static int hinic3_check_mac_info(struct hinic3_hwdev *hwdev, u8 status,
+ u16 vlan_id)
+{
+ if ((status && status != MGMT_STATUS_EXIST) ||
+ ((vlan_id & BIT(15)) && status == MGMT_STATUS_EXIST)) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic3_set_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
+ u16 func_id)
+{
+ struct l2nic_cmd_set_mac mac_info = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ if ((vlan_id & HINIC3_VLAN_ID_MASK) >= VLAN_N_VID) {
+ dev_err(hwdev->dev, "Invalid VLAN number: %d\n",
+ (vlan_id & HINIC3_VLAN_ID_MASK));
+ return -EINVAL;
+ }
+
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ ether_addr_copy(mac_info.mac, mac_addr);
+
+ mgmt_msg_params_init_default(&msg_params, &mac_info, sizeof(mac_info));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_SET_MAC, &msg_params);
+ if (err || hinic3_check_mac_info(hwdev, mac_info.msg_head.status,
+ mac_info.vlan_id)) {
+ dev_err(hwdev->dev,
+ "Failed to update MAC, err: %d, status: 0x%x\n",
+ err, mac_info.msg_head.status);
+ return -EIO;
+ }
+
+ if (mac_info.msg_head.status == MGMT_STATUS_PF_SET_VF_ALREADY) {
+ dev_warn(hwdev->dev, "PF has already set VF mac, Ignore set operation\n");
+ return 0;
+ }
+
+ if (mac_info.msg_head.status == MGMT_STATUS_EXIST) {
+ dev_warn(hwdev->dev, "MAC is repeated. Ignore update operation\n");
+ return 0;
+ }
+
+ return 0;
+}
+
+int hinic3_del_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
+ u16 func_id)
+{
+ struct l2nic_cmd_set_mac mac_info = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ if ((vlan_id & HINIC3_VLAN_ID_MASK) >= VLAN_N_VID) {
+ dev_err(hwdev->dev, "Invalid VLAN number: %d\n",
+ (vlan_id & HINIC3_VLAN_ID_MASK));
+ return -EINVAL;
+ }
+
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ ether_addr_copy(mac_info.mac, mac_addr);
+
+ mgmt_msg_params_init_default(&msg_params, &mac_info, sizeof(mac_info));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_DEL_MAC, &msg_params);
+ if (err) {
+ dev_err(hwdev->dev,
+ "Failed to delete MAC, err: %d, status: 0x%x\n",
+ err, mac_info.msg_head.status);
+ return err;
+ }
+
+ return 0;
+}
+
+int hinic3_update_mac(struct hinic3_hwdev *hwdev, const u8 *old_mac,
+ u8 *new_mac, u16 vlan_id, u16 func_id)
+{
+ struct l2nic_cmd_update_mac mac_info = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ if ((vlan_id & HINIC3_VLAN_ID_MASK) >= VLAN_N_VID) {
+ dev_err(hwdev->dev, "Invalid VLAN number: %d\n",
+ (vlan_id & HINIC3_VLAN_ID_MASK));
+ return -EINVAL;
+ }
+
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ ether_addr_copy(mac_info.old_mac, old_mac);
+ ether_addr_copy(mac_info.new_mac, new_mac);
+
+ mgmt_msg_params_init_default(&msg_params, &mac_info, sizeof(mac_info));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_UPDATE_MAC, &msg_params);
+ if (err || hinic3_check_mac_info(hwdev, mac_info.msg_head.status,
+ mac_info.vlan_id)) {
+ dev_err(hwdev->dev,
+ "Failed to update MAC, err: %d, status: 0x%x\n",
+ err, mac_info.msg_head.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic3_set_ci_table(struct hinic3_hwdev *hwdev, struct hinic3_sq_attr *attr)
+{
+ struct l2nic_cmd_set_ci_attr cons_idx_attr = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ cons_idx_attr.func_idx = hinic3_global_func_id(hwdev);
+ cons_idx_attr.dma_attr_off = attr->dma_attr_off;
+ cons_idx_attr.pending_limit = attr->pending_limit;
+ cons_idx_attr.coalescing_time = attr->coalescing_time;
+
+ if (attr->intr_en) {
+ cons_idx_attr.intr_en = attr->intr_en;
+ cons_idx_attr.intr_idx = attr->intr_idx;
+ }
+
+ cons_idx_attr.l2nic_sqn = attr->l2nic_sqn;
+ cons_idx_attr.ci_addr = attr->ci_dma_base;
+
+ mgmt_msg_params_init_default(&msg_params, &cons_idx_attr,
+ sizeof(cons_idx_attr));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_SET_SQ_CI_ATTR, &msg_params);
+ if (err || cons_idx_attr.msg_head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set ci attribute table, err: %d, status: 0x%x\n",
+ err, cons_idx_attr.msg_head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic3_flush_qps_res(struct hinic3_hwdev *hwdev)
+{
+ struct l2nic_cmd_clear_qp_resource sq_res = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ sq_res.func_id = hinic3_global_func_id(hwdev);
+
+ mgmt_msg_params_init_default(&msg_params, &sq_res, sizeof(sq_res));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_CLEAR_QP_RESOURCE,
+ &msg_params);
+ if (err || sq_res.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to clear sq resources, err: %d, status: 0x%x\n",
+ err, sq_res.msg_head.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev)
+{
+ struct l2nic_cmd_force_pkt_drop pkt_drop = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ pkt_drop.port = hinic3_physical_port_id(hwdev);
+
+ mgmt_msg_params_init_default(&msg_params, &pkt_drop, sizeof(pkt_drop));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_FORCE_PKT_DROP, &msg_params);
+ if ((pkt_drop.msg_head.status != MGMT_STATUS_CMD_UNSUPPORTED &&
+ pkt_drop.msg_head.status) || err) {
+ dev_err(hwdev->dev,
+ "Failed to set force tx packets drop, err: %d, status: 0x%x\n",
+ err, pkt_drop.msg_head.status);
+ return -EFAULT;
+ }
+
+ return pkt_drop.msg_head.status;
+}
+
+int hinic3_sync_dcb_state(struct hinic3_hwdev *hwdev, u8 op_code, u8 state)
+{
+ struct l2nic_cmd_set_dcb_state dcb_state = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ dcb_state.op_code = op_code;
+ dcb_state.state = state;
+ dcb_state.func_id = hinic3_global_func_id(hwdev);
+
+ mgmt_msg_params_init_default(&msg_params, &dcb_state,
+ sizeof(dcb_state));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_QOS_DCB_STATE, &msg_params);
+ if (err || dcb_state.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set dcb state, err: %d, status: 0x%x\n",
+ err, dcb_state.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic3_get_link_status(struct hinic3_hwdev *hwdev, bool *link_status_up)
+{
+ struct mag_cmd_get_link_status get_link = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ get_link.port_id = hinic3_physical_port_id(hwdev);
+
+ mgmt_msg_params_init_default(&msg_params, &get_link, sizeof(get_link));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_HILINK,
+ MAG_CMD_GET_LINK_STATUS, &msg_params);
+ if (err || get_link.head.status) {
+ dev_err(hwdev->dev, "Failed to get link state, err: %d, status: 0x%x\n",
+ err, get_link.head.status);
+ return -EIO;
+ }
+
+ *link_status_up = !!get_link.status;
+
+ return 0;
+}
+
+int hinic3_set_vport_enable(struct hinic3_hwdev *hwdev, u16 func_id,
+ bool enable)
+{
+ struct l2nic_cmd_set_vport_state en_state = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ en_state.func_id = func_id;
+ en_state.state = enable ? 1 : 0;
+
+ mgmt_msg_params_init_default(&msg_params, &en_state, sizeof(en_state));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_SET_VPORT_ENABLE, &msg_params);
+ if (err || en_state.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to set vport state, err: %d, status: 0x%x\n",
+ err, en_state.msg_head.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
new file mode 100644
index 000000000000..b83b567fa542
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_NIC_CFG_H_
+#define _HINIC3_NIC_CFG_H_
+
+#include <linux/types.h>
+
+#include "hinic3_hw_intf.h"
+#include "hinic3_mgmt_interface.h"
+
+struct hinic3_hwdev;
+struct hinic3_nic_dev;
+
+#define HINIC3_MIN_MTU_SIZE 256
+#define HINIC3_MAX_JUMBO_FRAME_SIZE 9600
+
+#define HINIC3_VLAN_ID_MASK 0x7FFF
+
+enum hinic3_nic_event_type {
+ HINIC3_NIC_EVENT_LINK_DOWN = 0,
+ HINIC3_NIC_EVENT_LINK_UP = 1,
+};
+
+struct hinic3_sq_attr {
+ u8 dma_attr_off;
+ u8 pending_limit;
+ u8 coalescing_time;
+ u8 intr_en;
+ u16 intr_idx;
+ u32 l2nic_sqn;
+ u64 ci_dma_base;
+};
+
+int hinic3_get_nic_feature_from_hw(struct hinic3_nic_dev *nic_dev);
+int hinic3_set_nic_feature_to_hw(struct hinic3_nic_dev *nic_dev);
+bool hinic3_test_support(struct hinic3_nic_dev *nic_dev,
+ enum hinic3_nic_feature_cap feature_bits);
+void hinic3_update_nic_feature(struct hinic3_nic_dev *nic_dev, u64 feature_cap);
+
+int hinic3_init_function_table(struct hinic3_nic_dev *nic_dev);
+int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu);
+
+int hinic3_set_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
+ u16 func_id);
+int hinic3_del_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
+ u16 func_id);
+int hinic3_update_mac(struct hinic3_hwdev *hwdev, const u8 *old_mac,
+ u8 *new_mac, u16 vlan_id, u16 func_id);
+
+int hinic3_set_ci_table(struct hinic3_hwdev *hwdev,
+ struct hinic3_sq_attr *attr);
+int hinic3_flush_qps_res(struct hinic3_hwdev *hwdev);
+int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev);
+
+int hinic3_sync_dcb_state(struct hinic3_hwdev *hwdev, u8 op_code, u8 state);
+int hinic3_get_link_status(struct hinic3_hwdev *hwdev, bool *link_status_up);
+int hinic3_set_vport_enable(struct hinic3_hwdev *hwdev, u16 func_id,
+ bool enable);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
new file mode 100644
index 000000000000..5ba83261616c
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_NIC_DEV_H_
+#define _HINIC3_NIC_DEV_H_
+
+#include <linux/netdevice.h>
+
+#include "hinic3_hw_cfg.h"
+#include "hinic3_mgmt_interface.h"
+
+enum hinic3_flags {
+ HINIC3_RSS_ENABLE,
+};
+
+enum hinic3_rss_hash_type {
+ HINIC3_RSS_HASH_ENGINE_TYPE_XOR = 0,
+ HINIC3_RSS_HASH_ENGINE_TYPE_TOEP = 1,
+};
+
+struct hinic3_rss_type {
+ u8 tcp_ipv6_ext;
+ u8 ipv6_ext;
+ u8 tcp_ipv6;
+ u8 ipv6;
+ u8 tcp_ipv4;
+ u8 ipv4;
+ u8 udp_ipv6;
+ u8 udp_ipv4;
+};
+
+struct hinic3_irq_cfg {
+ struct net_device *netdev;
+ u16 msix_entry_idx;
+ /* provided by OS */
+ u32 irq_id;
+ char irq_name[IFNAMSIZ + 16];
+ struct napi_struct napi;
+ cpumask_t affinity_mask;
+ struct hinic3_txq *txq;
+ struct hinic3_rxq *rxq;
+};
+
+struct hinic3_dyna_txrxq_params {
+ u16 num_qps;
+ u32 sq_depth;
+ u32 rq_depth;
+
+ struct hinic3_dyna_txq_res *txqs_res;
+ struct hinic3_dyna_rxq_res *rxqs_res;
+ struct hinic3_irq_cfg *irq_cfg;
+};
+
+struct hinic3_intr_coal_info {
+ u8 pending_limit;
+ u8 coalesce_timer_cfg;
+ u8 resend_timer_cfg;
+};
+
+struct hinic3_nic_dev {
+ struct pci_dev *pdev;
+ struct net_device *netdev;
+ struct hinic3_hwdev *hwdev;
+ struct hinic3_nic_io *nic_io;
+
+ u16 max_qps;
+ u16 rx_buf_len;
+ u32 lro_replenish_thld;
+ unsigned long flags;
+ struct hinic3_nic_service_cap nic_svc_cap;
+
+ struct hinic3_dyna_txrxq_params q_params;
+ struct hinic3_txq *txqs;
+ struct hinic3_rxq *rxqs;
+
+ enum hinic3_rss_hash_type rss_hash_type;
+ struct hinic3_rss_type rss_type;
+ u8 *rss_hkey;
+ u16 *rss_indir;
+
+ u16 num_qp_irq;
+ struct msix_entry *qps_msix_entries;
+
+ struct hinic3_intr_coal_info *intr_coalesce;
+
+ bool link_status_up;
+};
+
+void hinic3_set_netdev_ops(struct net_device *netdev);
+int hinic3_qps_irq_init(struct net_device *netdev);
+void hinic3_qps_irq_uninit(struct net_device *netdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
new file mode 100644
index 000000000000..d86cd1ba4605
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
@@ -0,0 +1,885 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include "hinic3_cmdq.h"
+#include "hinic3_hw_comm.h"
+#include "hinic3_hw_intf.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+
+#define HINIC3_DEFAULT_TX_CI_PENDING_LIMIT 1
+#define HINIC3_DEFAULT_TX_CI_COALESCING_TIME 1
+#define HINIC3_DEFAULT_DROP_THD_ON (0xFFFF)
+#define HINIC3_DEFAULT_DROP_THD_OFF 0
+
+#define HINIC3_CI_Q_ADDR_SIZE (64)
+
+#define HINIC3_CI_TABLE_SIZE(num_qps) \
+ (ALIGN((num_qps) * HINIC3_CI_Q_ADDR_SIZE, HINIC3_MIN_PAGE_SIZE))
+
+#define HINIC3_CI_VADDR(base_addr, q_id) \
+ ((u8 *)(base_addr) + (q_id) * HINIC3_CI_Q_ADDR_SIZE)
+
+#define HINIC3_CI_PADDR(base_paddr, q_id) \
+ ((base_paddr) + (q_id) * HINIC3_CI_Q_ADDR_SIZE)
+
+#define SQ_WQ_PREFETCH_MAX 1
+#define SQ_WQ_PREFETCH_MIN 1
+#define SQ_WQ_PREFETCH_THRESHOLD 16
+
+#define RQ_WQ_PREFETCH_MAX 4
+#define RQ_WQ_PREFETCH_MIN 1
+#define RQ_WQ_PREFETCH_THRESHOLD 256
+
+/* (2048 - 8) / 64 */
+#define HINIC3_Q_CTXT_MAX 31
+
+enum hinic3_qp_ctxt_type {
+ HINIC3_QP_CTXT_TYPE_SQ = 0,
+ HINIC3_QP_CTXT_TYPE_RQ = 1,
+};
+
+struct hinic3_qp_ctxt_hdr {
+ __le16 num_queues;
+ __le16 queue_type;
+ __le16 start_qid;
+ __le16 rsvd;
+};
+
+struct hinic3_sq_ctxt {
+ __le32 ci_pi;
+ __le32 drop_mode_sp;
+ __le32 wq_pfn_hi_owner;
+ __le32 wq_pfn_lo;
+
+ __le32 rsvd0;
+ __le32 pkt_drop_thd;
+ __le32 global_sq_id;
+ __le32 vlan_ceq_attr;
+
+ __le32 pref_cache;
+ __le32 pref_ci_owner;
+ __le32 pref_wq_pfn_hi_ci;
+ __le32 pref_wq_pfn_lo;
+
+ __le32 rsvd8;
+ __le32 rsvd9;
+ __le32 wq_block_pfn_hi;
+ __le32 wq_block_pfn_lo;
+};
+
+struct hinic3_rq_ctxt {
+ __le32 ci_pi;
+ __le32 ceq_attr;
+ __le32 wq_pfn_hi_type_owner;
+ __le32 wq_pfn_lo;
+
+ __le32 rsvd[3];
+ __le32 cqe_sge_len;
+
+ __le32 pref_cache;
+ __le32 pref_ci_owner;
+ __le32 pref_wq_pfn_hi_ci;
+ __le32 pref_wq_pfn_lo;
+
+ __le32 pi_paddr_hi;
+ __le32 pi_paddr_lo;
+ __le32 wq_block_pfn_hi;
+ __le32 wq_block_pfn_lo;
+};
+
+struct hinic3_sq_ctxt_block {
+ struct hinic3_qp_ctxt_hdr cmdq_hdr;
+ struct hinic3_sq_ctxt sq_ctxt[HINIC3_Q_CTXT_MAX];
+};
+
+struct hinic3_rq_ctxt_block {
+ struct hinic3_qp_ctxt_hdr cmdq_hdr;
+ struct hinic3_rq_ctxt rq_ctxt[HINIC3_Q_CTXT_MAX];
+};
+
+struct hinic3_clean_queue_ctxt {
+ struct hinic3_qp_ctxt_hdr cmdq_hdr;
+ __le32 rsvd;
+};
+
+#define SQ_CTXT_SIZE(num_sqs) \
+ (sizeof(struct hinic3_qp_ctxt_hdr) + \
+ (num_sqs) * sizeof(struct hinic3_sq_ctxt))
+
+#define RQ_CTXT_SIZE(num_rqs) \
+ (sizeof(struct hinic3_qp_ctxt_hdr) + \
+ (num_rqs) * sizeof(struct hinic3_rq_ctxt))
+
+#define SQ_CTXT_PREF_CI_HI_SHIFT 12
+#define SQ_CTXT_PREF_CI_HI(val) ((val) >> SQ_CTXT_PREF_CI_HI_SHIFT)
+
+#define SQ_CTXT_PI_IDX_MASK GENMASK(15, 0)
+#define SQ_CTXT_CI_IDX_MASK GENMASK(31, 16)
+#define SQ_CTXT_CI_PI_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_##member##_MASK, val)
+
+#define SQ_CTXT_MODE_SP_FLAG_MASK BIT(0)
+#define SQ_CTXT_MODE_PKT_DROP_MASK BIT(1)
+#define SQ_CTXT_MODE_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_MODE_##member##_MASK, val)
+
+#define SQ_CTXT_WQ_PAGE_HI_PFN_MASK GENMASK(19, 0)
+#define SQ_CTXT_WQ_PAGE_OWNER_MASK BIT(23)
+#define SQ_CTXT_WQ_PAGE_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_WQ_PAGE_##member##_MASK, val)
+
+#define SQ_CTXT_PKT_DROP_THD_ON_MASK GENMASK(15, 0)
+#define SQ_CTXT_PKT_DROP_THD_OFF_MASK GENMASK(31, 16)
+#define SQ_CTXT_PKT_DROP_THD_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_PKT_DROP_##member##_MASK, val)
+
+#define SQ_CTXT_GLOBAL_SQ_ID_MASK GENMASK(12, 0)
+#define SQ_CTXT_GLOBAL_QUEUE_ID_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_##member##_MASK, val)
+
+#define SQ_CTXT_VLAN_INSERT_MODE_MASK GENMASK(20, 19)
+#define SQ_CTXT_VLAN_CEQ_EN_MASK BIT(23)
+#define SQ_CTXT_VLAN_CEQ_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_VLAN_##member##_MASK, val)
+
+#define SQ_CTXT_PREF_CACHE_THRESHOLD_MASK GENMASK(13, 0)
+#define SQ_CTXT_PREF_CACHE_MAX_MASK GENMASK(24, 14)
+#define SQ_CTXT_PREF_CACHE_MIN_MASK GENMASK(31, 25)
+
+#define SQ_CTXT_PREF_CI_HI_MASK GENMASK(3, 0)
+#define SQ_CTXT_PREF_OWNER_MASK BIT(4)
+
+#define SQ_CTXT_PREF_WQ_PFN_HI_MASK GENMASK(19, 0)
+#define SQ_CTXT_PREF_CI_LOW_MASK GENMASK(31, 20)
+#define SQ_CTXT_PREF_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_PREF_##member##_MASK, val)
+
+#define SQ_CTXT_WQ_BLOCK_PFN_HI_MASK GENMASK(22, 0)
+#define SQ_CTXT_WQ_BLOCK_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_WQ_BLOCK_##member##_MASK, val)
+
+#define RQ_CTXT_PI_IDX_MASK GENMASK(15, 0)
+#define RQ_CTXT_CI_IDX_MASK GENMASK(31, 16)
+#define RQ_CTXT_CI_PI_SET(val, member) \
+ FIELD_PREP(RQ_CTXT_##member##_MASK, val)
+
+#define RQ_CTXT_CEQ_ATTR_INTR_MASK GENMASK(30, 21)
+#define RQ_CTXT_CEQ_ATTR_EN_MASK BIT(31)
+#define RQ_CTXT_CEQ_ATTR_SET(val, member) \
+ FIELD_PREP(RQ_CTXT_CEQ_ATTR_##member##_MASK, val)
+
+#define RQ_CTXT_WQ_PAGE_HI_PFN_MASK GENMASK(19, 0)
+#define RQ_CTXT_WQ_PAGE_WQE_TYPE_MASK GENMASK(29, 28)
+#define RQ_CTXT_WQ_PAGE_OWNER_MASK BIT(31)
+#define RQ_CTXT_WQ_PAGE_SET(val, member) \
+ FIELD_PREP(RQ_CTXT_WQ_PAGE_##member##_MASK, val)
+
+#define RQ_CTXT_CQE_LEN_MASK GENMASK(29, 28)
+#define RQ_CTXT_CQE_LEN_SET(val, member) \
+ FIELD_PREP(RQ_CTXT_##member##_MASK, val)
+
+#define RQ_CTXT_PREF_CACHE_THRESHOLD_MASK GENMASK(13, 0)
+#define RQ_CTXT_PREF_CACHE_MAX_MASK GENMASK(24, 14)
+#define RQ_CTXT_PREF_CACHE_MIN_MASK GENMASK(31, 25)
+
+#define RQ_CTXT_PREF_CI_HI_MASK GENMASK(3, 0)
+#define RQ_CTXT_PREF_OWNER_MASK BIT(4)
+
+#define RQ_CTXT_PREF_WQ_PFN_HI_MASK GENMASK(19, 0)
+#define RQ_CTXT_PREF_CI_LOW_MASK GENMASK(31, 20)
+#define RQ_CTXT_PREF_SET(val, member) \
+ FIELD_PREP(RQ_CTXT_PREF_##member##_MASK, val)
+
+#define RQ_CTXT_WQ_BLOCK_PFN_HI_MASK GENMASK(22, 0)
+#define RQ_CTXT_WQ_BLOCK_SET(val, member) \
+ FIELD_PREP(RQ_CTXT_WQ_BLOCK_##member##_MASK, val)
+
+#define WQ_PAGE_PFN_SHIFT 12
+#define WQ_BLOCK_PFN_SHIFT 9
+#define WQ_PAGE_PFN(page_addr) ((page_addr) >> WQ_PAGE_PFN_SHIFT)
+#define WQ_BLOCK_PFN(page_addr) ((page_addr) >> WQ_BLOCK_PFN_SHIFT)
+
+int hinic3_init_nic_io(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic3_nic_io *nic_io;
+ int err;
+
+ nic_io = kzalloc(sizeof(*nic_io), GFP_KERNEL);
+ if (!nic_io)
+ return -ENOMEM;
+
+ nic_dev->nic_io = nic_io;
+
+ err = hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_NIC, 1);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set function svc used state\n");
+ goto err_free_nicio;
+ }
+
+ err = hinic3_init_function_table(nic_dev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init function table\n");
+ goto err_clear_func_svc_used_state;
+ }
+
+ nic_io->rx_buf_len = nic_dev->rx_buf_len;
+
+ err = hinic3_get_nic_feature_from_hw(nic_dev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to get nic features\n");
+ goto err_clear_func_svc_used_state;
+ }
+
+ nic_io->feature_cap &= HINIC3_NIC_F_ALL_MASK;
+ nic_io->feature_cap &= HINIC3_NIC_DRV_DEFAULT_FEATURE;
+ dev_dbg(hwdev->dev, "nic features: 0x%llx\n\n", nic_io->feature_cap);
+
+ return 0;
+
+err_clear_func_svc_used_state:
+ hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_NIC, 0);
+err_free_nicio:
+ nic_dev->nic_io = NULL;
+ kfree(nic_io);
+
+ return err;
+}
+
+void hinic3_free_nic_io(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+
+ hinic3_set_func_svc_used_state(nic_dev->hwdev, COMM_FUNC_SVC_T_NIC, 0);
+ nic_dev->nic_io = NULL;
+ kfree(nic_io);
+}
+
+int hinic3_init_nicio_res(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ void __iomem *db_base;
+ int err;
+
+ nic_io->max_qps = hinic3_func_max_qnum(hwdev);
+
+ err = hinic3_alloc_db_addr(hwdev, &db_base, NULL);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to allocate doorbell for sqs\n");
+ return err;
+ }
+ nic_io->sqs_db_addr = db_base;
+
+ err = hinic3_alloc_db_addr(hwdev, &db_base, NULL);
+ if (err) {
+ hinic3_free_db_addr(hwdev, nic_io->sqs_db_addr);
+ dev_err(hwdev->dev, "Failed to allocate doorbell for rqs\n");
+ return err;
+ }
+ nic_io->rqs_db_addr = db_base;
+
+ nic_io->ci_vaddr_base =
+ dma_alloc_coherent(hwdev->dev,
+ HINIC3_CI_TABLE_SIZE(nic_io->max_qps),
+ &nic_io->ci_dma_base,
+ GFP_KERNEL);
+ if (!nic_io->ci_vaddr_base) {
+ hinic3_free_db_addr(hwdev, nic_io->sqs_db_addr);
+ hinic3_free_db_addr(hwdev, nic_io->rqs_db_addr);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void hinic3_free_nicio_res(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+
+ dma_free_coherent(hwdev->dev,
+ HINIC3_CI_TABLE_SIZE(nic_io->max_qps),
+ nic_io->ci_vaddr_base, nic_io->ci_dma_base);
+
+ hinic3_free_db_addr(hwdev, nic_io->sqs_db_addr);
+ hinic3_free_db_addr(hwdev, nic_io->rqs_db_addr);
+}
+
+static int hinic3_create_sq(struct hinic3_hwdev *hwdev,
+ struct hinic3_io_queue *sq,
+ u16 q_id, u32 sq_depth, u16 sq_msix_idx)
+{
+ int err;
+
+ /* sq used & hardware request init 1 */
+ sq->owner = 1;
+
+ sq->q_id = q_id;
+ sq->msix_entry_idx = sq_msix_idx;
+
+ err = hinic3_wq_create(hwdev, &sq->wq, sq_depth,
+ BIT(HINIC3_SQ_WQEBB_SHIFT));
+ if (err) {
+ dev_err(hwdev->dev, "Failed to create tx queue %u wq\n",
+ q_id);
+ return err;
+ }
+
+ return 0;
+}
+
+static int hinic3_create_rq(struct hinic3_hwdev *hwdev,
+ struct hinic3_io_queue *rq,
+ u16 q_id, u32 rq_depth, u16 rq_msix_idx)
+{
+ int err;
+
+ rq->q_id = q_id;
+ rq->msix_entry_idx = rq_msix_idx;
+
+ err = hinic3_wq_create(hwdev, &rq->wq, rq_depth,
+ BIT(HINIC3_RQ_WQEBB_SHIFT +
+ HINIC3_NORMAL_RQ_WQE));
+ if (err) {
+ dev_err(hwdev->dev, "Failed to create rx queue %u wq\n",
+ q_id);
+ return err;
+ }
+
+ return 0;
+}
+
+static int hinic3_create_qp(struct hinic3_hwdev *hwdev,
+ struct hinic3_io_queue *sq,
+ struct hinic3_io_queue *rq, u16 q_id, u32 sq_depth,
+ u32 rq_depth, u16 qp_msix_idx)
+{
+ int err;
+
+ err = hinic3_create_sq(hwdev, sq, q_id, sq_depth, qp_msix_idx);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to create sq, qid: %u\n",
+ q_id);
+ return err;
+ }
+
+ err = hinic3_create_rq(hwdev, rq, q_id, rq_depth, qp_msix_idx);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to create rq, qid: %u\n",
+ q_id);
+ goto err_destroy_sq_wq;
+ }
+
+ return 0;
+
+err_destroy_sq_wq:
+ hinic3_wq_destroy(hwdev, &sq->wq);
+
+ return err;
+}
+
+static void hinic3_destroy_qp(struct hinic3_hwdev *hwdev,
+ struct hinic3_io_queue *sq,
+ struct hinic3_io_queue *rq)
+{
+ hinic3_wq_destroy(hwdev, &sq->wq);
+ hinic3_wq_destroy(hwdev, &rq->wq);
+}
+
+int hinic3_alloc_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params)
+{
+ struct msix_entry *qps_msix_entries = nic_dev->qps_msix_entries;
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic3_io_queue *sqs;
+ struct hinic3_io_queue *rqs;
+ u16 q_id;
+ int err;
+
+ if (qp_params->num_qps > nic_io->max_qps || !qp_params->num_qps)
+ return -EINVAL;
+
+ sqs = kcalloc(qp_params->num_qps, sizeof(*sqs), GFP_KERNEL);
+ if (!sqs) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ rqs = kcalloc(qp_params->num_qps, sizeof(*rqs), GFP_KERNEL);
+ if (!rqs) {
+ err = -ENOMEM;
+ goto err_free_sqs;
+ }
+
+ for (q_id = 0; q_id < qp_params->num_qps; q_id++) {
+ err = hinic3_create_qp(hwdev, &sqs[q_id], &rqs[q_id], q_id,
+ qp_params->sq_depth, qp_params->rq_depth,
+ qps_msix_entries[q_id].entry);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to allocate qp %u, err: %d\n",
+ q_id, err);
+ goto err_destroy_qp;
+ }
+ }
+
+ qp_params->sqs = sqs;
+ qp_params->rqs = rqs;
+
+ return 0;
+
+err_destroy_qp:
+ while (q_id > 0) {
+ q_id--;
+ hinic3_destroy_qp(hwdev, &sqs[q_id], &rqs[q_id]);
+ }
+ kfree(rqs);
+err_free_sqs:
+ kfree(sqs);
+err_out:
+ return err;
+}
+
+void hinic3_free_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params)
+{
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ u16 q_id;
+
+ for (q_id = 0; q_id < qp_params->num_qps; q_id++)
+ hinic3_destroy_qp(hwdev, &qp_params->sqs[q_id],
+ &qp_params->rqs[q_id]);
+
+ kfree(qp_params->sqs);
+ kfree(qp_params->rqs);
+}
+
+void hinic3_init_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_io_queue *sqs = qp_params->sqs;
+ struct hinic3_io_queue *rqs = qp_params->rqs;
+ u16 q_id;
+
+ nic_io->num_qps = qp_params->num_qps;
+ nic_io->sq = qp_params->sqs;
+ nic_io->rq = qp_params->rqs;
+ for (q_id = 0; q_id < nic_io->num_qps; q_id++) {
+ sqs[q_id].cons_idx_addr =
+ (u16 *)HINIC3_CI_VADDR(nic_io->ci_vaddr_base, q_id);
+ /* clear ci value */
+ WRITE_ONCE(*sqs[q_id].cons_idx_addr, 0);
+
+ sqs[q_id].db_addr = nic_io->sqs_db_addr;
+ rqs[q_id].db_addr = nic_io->rqs_db_addr;
+ }
+}
+
+void hinic3_uninit_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+
+ qp_params->sqs = nic_io->sq;
+ qp_params->rqs = nic_io->rq;
+ qp_params->num_qps = nic_io->num_qps;
+}
+
+static void hinic3_qp_prepare_cmdq_header(struct hinic3_qp_ctxt_hdr *qp_ctxt_hdr,
+ enum hinic3_qp_ctxt_type ctxt_type,
+ u16 num_queues, u16 q_id)
+{
+ qp_ctxt_hdr->queue_type = cpu_to_le16(ctxt_type);
+ qp_ctxt_hdr->num_queues = cpu_to_le16(num_queues);
+ qp_ctxt_hdr->start_qid = cpu_to_le16(q_id);
+ qp_ctxt_hdr->rsvd = 0;
+}
+
+static void hinic3_sq_prepare_ctxt(struct hinic3_io_queue *sq, u16 sq_id,
+ struct hinic3_sq_ctxt *sq_ctxt)
+{
+ u64 wq_page_addr, wq_page_pfn, wq_block_pfn;
+ u32 wq_block_pfn_hi, wq_block_pfn_lo;
+ u32 wq_page_pfn_hi, wq_page_pfn_lo;
+ u16 pi_start, ci_start;
+
+ ci_start = hinic3_get_sq_local_ci(sq);
+ pi_start = hinic3_get_sq_local_pi(sq);
+
+ wq_page_addr = hinic3_wq_get_first_wqe_page_addr(&sq->wq);
+
+ wq_page_pfn = WQ_PAGE_PFN(wq_page_addr);
+ wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
+ wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
+
+ wq_block_pfn = WQ_BLOCK_PFN(sq->wq.wq_block_paddr);
+ wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
+ wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
+
+ sq_ctxt->ci_pi =
+ cpu_to_le32(SQ_CTXT_CI_PI_SET(ci_start, CI_IDX) |
+ SQ_CTXT_CI_PI_SET(pi_start, PI_IDX));
+
+ sq_ctxt->drop_mode_sp =
+ cpu_to_le32(SQ_CTXT_MODE_SET(0, SP_FLAG) |
+ SQ_CTXT_MODE_SET(0, PKT_DROP));
+
+ sq_ctxt->wq_pfn_hi_owner =
+ cpu_to_le32(SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
+ SQ_CTXT_WQ_PAGE_SET(1, OWNER));
+
+ sq_ctxt->wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo);
+
+ sq_ctxt->pkt_drop_thd =
+ cpu_to_le32(SQ_CTXT_PKT_DROP_THD_SET(HINIC3_DEFAULT_DROP_THD_ON, THD_ON) |
+ SQ_CTXT_PKT_DROP_THD_SET(HINIC3_DEFAULT_DROP_THD_OFF, THD_OFF));
+
+ sq_ctxt->global_sq_id =
+ cpu_to_le32(SQ_CTXT_GLOBAL_QUEUE_ID_SET((u32)sq_id,
+ GLOBAL_SQ_ID));
+
+ /* enable insert c-vlan by default */
+ sq_ctxt->vlan_ceq_attr =
+ cpu_to_le32(SQ_CTXT_VLAN_CEQ_SET(0, CEQ_EN) |
+ SQ_CTXT_VLAN_CEQ_SET(1, INSERT_MODE));
+
+ sq_ctxt->rsvd0 = 0;
+
+ sq_ctxt->pref_cache =
+ cpu_to_le32(SQ_CTXT_PREF_SET(SQ_WQ_PREFETCH_MIN, CACHE_MIN) |
+ SQ_CTXT_PREF_SET(SQ_WQ_PREFETCH_MAX, CACHE_MAX) |
+ SQ_CTXT_PREF_SET(SQ_WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD));
+
+ sq_ctxt->pref_ci_owner =
+ cpu_to_le32(SQ_CTXT_PREF_SET(SQ_CTXT_PREF_CI_HI(ci_start), CI_HI) |
+ SQ_CTXT_PREF_SET(1, OWNER));
+
+ sq_ctxt->pref_wq_pfn_hi_ci =
+ cpu_to_le32(SQ_CTXT_PREF_SET(ci_start, CI_LOW) |
+ SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI));
+
+ sq_ctxt->pref_wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo);
+
+ sq_ctxt->wq_block_pfn_hi =
+ cpu_to_le32(SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI));
+
+ sq_ctxt->wq_block_pfn_lo = cpu_to_le32(wq_block_pfn_lo);
+}
+
+static void hinic3_rq_prepare_ctxt_get_wq_info(struct hinic3_io_queue *rq,
+ u32 *wq_page_pfn_hi,
+ u32 *wq_page_pfn_lo,
+ u32 *wq_block_pfn_hi,
+ u32 *wq_block_pfn_lo)
+{
+ u64 wq_page_addr, wq_page_pfn, wq_block_pfn;
+
+ wq_page_addr = hinic3_wq_get_first_wqe_page_addr(&rq->wq);
+
+ wq_page_pfn = WQ_PAGE_PFN(wq_page_addr);
+ *wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
+ *wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
+
+ wq_block_pfn = WQ_BLOCK_PFN(rq->wq.wq_block_paddr);
+ *wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
+ *wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
+}
+
+static void hinic3_rq_prepare_ctxt(struct hinic3_io_queue *rq,
+ struct hinic3_rq_ctxt *rq_ctxt)
+{
+ u32 wq_block_pfn_hi, wq_block_pfn_lo;
+ u32 wq_page_pfn_hi, wq_page_pfn_lo;
+ u16 pi_start, ci_start;
+
+ ci_start = (rq->wq.cons_idx & rq->wq.idx_mask) << HINIC3_NORMAL_RQ_WQE;
+ pi_start = (rq->wq.prod_idx & rq->wq.idx_mask) << HINIC3_NORMAL_RQ_WQE;
+
+ hinic3_rq_prepare_ctxt_get_wq_info(rq, &wq_page_pfn_hi, &wq_page_pfn_lo,
+ &wq_block_pfn_hi, &wq_block_pfn_lo);
+
+ rq_ctxt->ci_pi =
+ cpu_to_le32(RQ_CTXT_CI_PI_SET(ci_start, CI_IDX) |
+ RQ_CTXT_CI_PI_SET(pi_start, PI_IDX));
+
+ rq_ctxt->ceq_attr =
+ cpu_to_le32(RQ_CTXT_CEQ_ATTR_SET(0, EN) |
+ RQ_CTXT_CEQ_ATTR_SET(rq->msix_entry_idx, INTR));
+
+ rq_ctxt->wq_pfn_hi_type_owner =
+ cpu_to_le32(RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
+ RQ_CTXT_WQ_PAGE_SET(1, OWNER));
+
+ /* use 16Byte WQE */
+ rq_ctxt->wq_pfn_hi_type_owner |=
+ cpu_to_le32(RQ_CTXT_WQ_PAGE_SET(2, WQE_TYPE));
+ rq_ctxt->cqe_sge_len = cpu_to_le32(RQ_CTXT_CQE_LEN_SET(1, CQE_LEN));
+
+ rq_ctxt->wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo);
+
+ rq_ctxt->pref_cache =
+ cpu_to_le32(RQ_CTXT_PREF_SET(RQ_WQ_PREFETCH_MIN, CACHE_MIN) |
+ RQ_CTXT_PREF_SET(RQ_WQ_PREFETCH_MAX, CACHE_MAX) |
+ RQ_CTXT_PREF_SET(RQ_WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD));
+
+ rq_ctxt->pref_ci_owner =
+ cpu_to_le32(RQ_CTXT_PREF_SET(SQ_CTXT_PREF_CI_HI(ci_start), CI_HI) |
+ RQ_CTXT_PREF_SET(1, OWNER));
+
+ rq_ctxt->pref_wq_pfn_hi_ci =
+ cpu_to_le32(RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI) |
+ RQ_CTXT_PREF_SET(ci_start, CI_LOW));
+
+ rq_ctxt->pref_wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo);
+
+ rq_ctxt->wq_block_pfn_hi =
+ cpu_to_le32(RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI));
+
+ rq_ctxt->wq_block_pfn_lo = cpu_to_le32(wq_block_pfn_lo);
+}
+
+static int init_sq_ctxts(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic3_sq_ctxt_block *sq_ctxt_block;
+ u16 q_id, curr_id, max_ctxts, i;
+ struct hinic3_sq_ctxt *sq_ctxt;
+ struct hinic3_cmd_buf *cmd_buf;
+ struct hinic3_io_queue *sq;
+ __le64 out_param;
+ int err = 0;
+
+ cmd_buf = hinic3_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ dev_err(hwdev->dev, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ q_id = 0;
+ while (q_id < nic_io->num_qps) {
+ sq_ctxt_block = cmd_buf->buf;
+ sq_ctxt = sq_ctxt_block->sq_ctxt;
+
+ max_ctxts = (nic_io->num_qps - q_id) > HINIC3_Q_CTXT_MAX ?
+ HINIC3_Q_CTXT_MAX : (nic_io->num_qps - q_id);
+
+ hinic3_qp_prepare_cmdq_header(&sq_ctxt_block->cmdq_hdr,
+ HINIC3_QP_CTXT_TYPE_SQ, max_ctxts,
+ q_id);
+
+ for (i = 0; i < max_ctxts; i++) {
+ curr_id = q_id + i;
+ sq = &nic_io->sq[curr_id];
+ hinic3_sq_prepare_ctxt(sq, curr_id, &sq_ctxt[i]);
+ }
+
+ hinic3_cmdq_buf_swab32(sq_ctxt_block, sizeof(*sq_ctxt_block));
+
+ cmd_buf->size = cpu_to_le16(SQ_CTXT_SIZE(max_ctxts));
+ err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_UCODE_CMD_MODIFY_QUEUE_CTX,
+ cmd_buf, &out_param);
+ if (err || out_param) {
+ dev_err(hwdev->dev, "Failed to set SQ ctxts, err: %d, out_param: 0x%llx\n",
+ err, out_param);
+ err = -EFAULT;
+ break;
+ }
+
+ q_id += max_ctxts;
+ }
+
+ hinic3_free_cmd_buf(hwdev, cmd_buf);
+
+ return err;
+}
+
+static int init_rq_ctxts(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic3_rq_ctxt_block *rq_ctxt_block;
+ u16 q_id, curr_id, max_ctxts, i;
+ struct hinic3_rq_ctxt *rq_ctxt;
+ struct hinic3_cmd_buf *cmd_buf;
+ struct hinic3_io_queue *rq;
+ __le64 out_param;
+ int err = 0;
+
+ cmd_buf = hinic3_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ dev_err(hwdev->dev, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ q_id = 0;
+ while (q_id < nic_io->num_qps) {
+ rq_ctxt_block = cmd_buf->buf;
+ rq_ctxt = rq_ctxt_block->rq_ctxt;
+
+ max_ctxts = (nic_io->num_qps - q_id) > HINIC3_Q_CTXT_MAX ?
+ HINIC3_Q_CTXT_MAX : (nic_io->num_qps - q_id);
+
+ hinic3_qp_prepare_cmdq_header(&rq_ctxt_block->cmdq_hdr,
+ HINIC3_QP_CTXT_TYPE_RQ, max_ctxts,
+ q_id);
+
+ for (i = 0; i < max_ctxts; i++) {
+ curr_id = q_id + i;
+ rq = &nic_io->rq[curr_id];
+ hinic3_rq_prepare_ctxt(rq, &rq_ctxt[i]);
+ }
+
+ hinic3_cmdq_buf_swab32(rq_ctxt_block, sizeof(*rq_ctxt_block));
+
+ cmd_buf->size = cpu_to_le16(RQ_CTXT_SIZE(max_ctxts));
+
+ err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_UCODE_CMD_MODIFY_QUEUE_CTX,
+ cmd_buf, &out_param);
+ if (err || out_param) {
+ dev_err(hwdev->dev, "Failed to set RQ ctxts, err: %d, out_param: 0x%llx\n",
+ err, out_param);
+ err = -EFAULT;
+ break;
+ }
+
+ q_id += max_ctxts;
+ }
+
+ hinic3_free_cmd_buf(hwdev, cmd_buf);
+
+ return err;
+}
+
+static int init_qp_ctxts(struct hinic3_nic_dev *nic_dev)
+{
+ int err;
+
+ err = init_sq_ctxts(nic_dev);
+ if (err)
+ return err;
+
+ err = init_rq_ctxts(nic_dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int clean_queue_offload_ctxt(struct hinic3_nic_dev *nic_dev,
+ enum hinic3_qp_ctxt_type ctxt_type)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic3_clean_queue_ctxt *ctxt_block;
+ struct hinic3_cmd_buf *cmd_buf;
+ __le64 out_param;
+ int err;
+
+ cmd_buf = hinic3_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ dev_err(hwdev->dev, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ ctxt_block = cmd_buf->buf;
+ ctxt_block->cmdq_hdr.num_queues = cpu_to_le16(nic_io->max_qps);
+ ctxt_block->cmdq_hdr.queue_type = cpu_to_le16(ctxt_type);
+ ctxt_block->cmdq_hdr.start_qid = 0;
+ ctxt_block->cmdq_hdr.rsvd = 0;
+ ctxt_block->rsvd = 0;
+
+ hinic3_cmdq_buf_swab32(ctxt_block, sizeof(*ctxt_block));
+
+ cmd_buf->size = cpu_to_le16(sizeof(*ctxt_block));
+
+ err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_UCODE_CMD_CLEAN_QUEUE_CTX,
+ cmd_buf, &out_param);
+ if (err || out_param) {
+ dev_err(hwdev->dev, "Failed to clean queue offload ctxts, err: %d,out_param: 0x%llx\n",
+ err, out_param);
+
+ err = -EFAULT;
+ }
+
+ hinic3_free_cmd_buf(hwdev, cmd_buf);
+
+ return err;
+}
+
+static int clean_qp_offload_ctxt(struct hinic3_nic_dev *nic_dev)
+{
+ /* clean LRO/TSO context space */
+ return clean_queue_offload_ctxt(nic_dev, HINIC3_QP_CTXT_TYPE_SQ) ||
+ clean_queue_offload_ctxt(nic_dev, HINIC3_QP_CTXT_TYPE_RQ);
+}
+
+/* init qps ctxt and set sq ci attr and arm all sq */
+int hinic3_init_qp_ctxts(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic3_sq_attr sq_attr;
+ u32 rq_depth;
+ u16 q_id;
+ int err;
+
+ err = init_qp_ctxts(nic_dev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init QP ctxts\n");
+ return err;
+ }
+
+ /* clean LRO/TSO context space */
+ err = clean_qp_offload_ctxt(nic_dev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to clean qp offload ctxts\n");
+ return err;
+ }
+
+ rq_depth = nic_io->rq[0].wq.q_depth << HINIC3_NORMAL_RQ_WQE;
+
+ err = hinic3_set_root_ctxt(hwdev, rq_depth, nic_io->sq[0].wq.q_depth,
+ nic_io->rx_buf_len);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set root context\n");
+ return err;
+ }
+
+ for (q_id = 0; q_id < nic_io->num_qps; q_id++) {
+ sq_attr.ci_dma_base =
+ HINIC3_CI_PADDR(nic_io->ci_dma_base, q_id) >> 0x2;
+ sq_attr.pending_limit = HINIC3_DEFAULT_TX_CI_PENDING_LIMIT;
+ sq_attr.coalescing_time = HINIC3_DEFAULT_TX_CI_COALESCING_TIME;
+ sq_attr.intr_en = 1;
+ sq_attr.intr_idx = nic_io->sq[q_id].msix_entry_idx;
+ sq_attr.l2nic_sqn = q_id;
+ sq_attr.dma_attr_off = 0;
+ err = hinic3_set_ci_table(hwdev, &sq_attr);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set ci table\n");
+ goto err_clean_root_ctxt;
+ }
+ }
+
+ return 0;
+
+err_clean_root_ctxt:
+ hinic3_clean_root_ctxt(hwdev);
+
+ return err;
+}
+
+void hinic3_free_qp_ctxts(struct hinic3_nic_dev *nic_dev)
+{
+ hinic3_clean_root_ctxt(nic_dev->hwdev);
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h
new file mode 100644
index 000000000000..12eefabcf1db
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_NIC_IO_H_
+#define _HINIC3_NIC_IO_H_
+
+#include <linux/bitfield.h>
+
+#include "hinic3_wq.h"
+
+struct hinic3_nic_dev;
+
+#define HINIC3_SQ_WQEBB_SHIFT 4
+#define HINIC3_RQ_WQEBB_SHIFT 3
+#define HINIC3_SQ_WQEBB_SIZE BIT(HINIC3_SQ_WQEBB_SHIFT)
+
+/* ******************** RQ_CTRL ******************** */
+enum hinic3_rq_wqe_type {
+ HINIC3_NORMAL_RQ_WQE = 1,
+};
+
+/* ******************** SQ_CTRL ******************** */
+#define HINIC3_TX_MSS_DEFAULT 0x3E00
+#define HINIC3_TX_MSS_MIN 0x50
+#define HINIC3_MAX_SQ_SGE 18
+
+struct hinic3_io_queue {
+ struct hinic3_wq wq;
+ u8 owner;
+ u16 q_id;
+ u16 msix_entry_idx;
+ u8 __iomem *db_addr;
+ u16 *cons_idx_addr;
+} ____cacheline_aligned;
+
+static inline u16 hinic3_get_sq_local_ci(const struct hinic3_io_queue *sq)
+{
+ const struct hinic3_wq *wq = &sq->wq;
+
+ return wq->cons_idx & wq->idx_mask;
+}
+
+static inline u16 hinic3_get_sq_local_pi(const struct hinic3_io_queue *sq)
+{
+ const struct hinic3_wq *wq = &sq->wq;
+
+ return wq->prod_idx & wq->idx_mask;
+}
+
+static inline u16 hinic3_get_sq_hw_ci(const struct hinic3_io_queue *sq)
+{
+ const struct hinic3_wq *wq = &sq->wq;
+
+ return READ_ONCE(*sq->cons_idx_addr) & wq->idx_mask;
+}
+
+/* ******************** DB INFO ******************** */
+#define DB_INFO_QID_MASK GENMASK(12, 0)
+#define DB_INFO_CFLAG_MASK BIT(23)
+#define DB_INFO_COS_MASK GENMASK(26, 24)
+#define DB_INFO_TYPE_MASK GENMASK(31, 27)
+#define DB_INFO_SET(val, member) \
+ FIELD_PREP(DB_INFO_##member##_MASK, val)
+
+#define DB_PI_LOW_MASK 0xFFU
+#define DB_PI_HIGH_MASK 0xFFU
+#define DB_PI_HI_SHIFT 8
+#define DB_PI_LOW(pi) ((pi) & DB_PI_LOW_MASK)
+#define DB_PI_HIGH(pi) (((pi) >> DB_PI_HI_SHIFT) & DB_PI_HIGH_MASK)
+#define DB_ADDR(q, pi) ((u64 __iomem *)((q)->db_addr) + DB_PI_LOW(pi))
+#define DB_SRC_TYPE 1
+
+/* CFLAG_DATA_PATH */
+#define DB_CFLAG_DP_SQ 0
+#define DB_CFLAG_DP_RQ 1
+
+struct hinic3_nic_db {
+ __le32 db_info;
+ __le32 pi_hi;
+};
+
+static inline void hinic3_write_db(struct hinic3_io_queue *queue, int cos,
+ u8 cflag, u16 pi)
+{
+ struct hinic3_nic_db db;
+
+ db.db_info =
+ cpu_to_le32(DB_INFO_SET(DB_SRC_TYPE, TYPE) |
+ DB_INFO_SET(cflag, CFLAG) |
+ DB_INFO_SET(cos, COS) |
+ DB_INFO_SET(queue->q_id, QID));
+ db.pi_hi = cpu_to_le32(DB_PI_HIGH(pi));
+
+ writeq(*((u64 *)&db), DB_ADDR(queue, pi));
+}
+
+struct hinic3_dyna_qp_params {
+ u16 num_qps;
+ u32 sq_depth;
+ u32 rq_depth;
+
+ struct hinic3_io_queue *sqs;
+ struct hinic3_io_queue *rqs;
+};
+
+struct hinic3_nic_io {
+ struct hinic3_io_queue *sq;
+ struct hinic3_io_queue *rq;
+
+ u16 num_qps;
+ u16 max_qps;
+
+ /* Base address for consumer index of all tx queues. Each queue is
+ * given a full cache line to hold its consumer index. HW updates
+ * current consumer index as it consumes tx WQEs.
+ */
+ void *ci_vaddr_base;
+ dma_addr_t ci_dma_base;
+
+ u8 __iomem *sqs_db_addr;
+ u8 __iomem *rqs_db_addr;
+
+ u16 rx_buf_len;
+ u64 feature_cap;
+};
+
+int hinic3_init_nic_io(struct hinic3_nic_dev *nic_dev);
+void hinic3_free_nic_io(struct hinic3_nic_dev *nic_dev);
+
+int hinic3_init_nicio_res(struct hinic3_nic_dev *nic_dev);
+void hinic3_free_nicio_res(struct hinic3_nic_dev *nic_dev);
+
+int hinic3_alloc_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params);
+void hinic3_free_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params);
+void hinic3_init_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params);
+void hinic3_uninit_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params);
+
+int hinic3_init_qp_ctxts(struct hinic3_nic_dev *nic_dev);
+void hinic3_free_qp_ctxts(struct hinic3_nic_dev *nic_dev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_pci_id_tbl.h b/drivers/net/ethernet/huawei/hinic3/hinic3_pci_id_tbl.h
new file mode 100644
index 000000000000..86c88d0bb4bd
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_pci_id_tbl.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_PCI_ID_TBL_H_
+#define _HINIC3_PCI_ID_TBL_H_
+
+#define PCI_DEV_ID_HINIC3_VF 0x375F
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c
new file mode 100644
index 000000000000..fab9011de9ad
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/device.h>
+
+#include "hinic3_hwdev.h"
+#include "hinic3_queue_common.h"
+
+void hinic3_queue_pages_init(struct hinic3_queue_pages *qpages, u32 q_depth,
+ u32 page_size, u32 elem_size)
+{
+ u32 elem_per_page;
+
+ elem_per_page = min(page_size / elem_size, q_depth);
+
+ qpages->pages = NULL;
+ qpages->page_size = page_size;
+ qpages->num_pages = max(q_depth / elem_per_page, 1);
+ qpages->elem_size_shift = ilog2(elem_size);
+ qpages->elem_per_pg_shift = ilog2(elem_per_page);
+}
+
+static void __queue_pages_free(struct hinic3_hwdev *hwdev,
+ struct hinic3_queue_pages *qpages, u32 pg_cnt)
+{
+ while (pg_cnt > 0) {
+ pg_cnt--;
+ hinic3_dma_free_coherent_align(hwdev->dev,
+ qpages->pages + pg_cnt);
+ }
+ kfree(qpages->pages);
+ qpages->pages = NULL;
+}
+
+void hinic3_queue_pages_free(struct hinic3_hwdev *hwdev,
+ struct hinic3_queue_pages *qpages)
+{
+ __queue_pages_free(hwdev, qpages, qpages->num_pages);
+}
+
+int hinic3_queue_pages_alloc(struct hinic3_hwdev *hwdev,
+ struct hinic3_queue_pages *qpages, u32 align)
+{
+ u32 pg_idx;
+ int err;
+
+ qpages->pages = kcalloc(qpages->num_pages, sizeof(qpages->pages[0]),
+ GFP_KERNEL);
+ if (!qpages->pages)
+ return -ENOMEM;
+
+ if (align == 0)
+ align = qpages->page_size;
+
+ for (pg_idx = 0; pg_idx < qpages->num_pages; pg_idx++) {
+ err = hinic3_dma_zalloc_coherent_align(hwdev->dev,
+ qpages->page_size,
+ align,
+ GFP_KERNEL,
+ qpages->pages + pg_idx);
+ if (err) {
+ __queue_pages_free(hwdev, qpages, pg_idx);
+ return err;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h
new file mode 100644
index 000000000000..ec4cae0a0929
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_QUEUE_COMMON_H_
+#define _HINIC3_QUEUE_COMMON_H_
+
+#include <linux/types.h>
+
+#include "hinic3_common.h"
+
+struct hinic3_hwdev;
+
+struct hinic3_queue_pages {
+ /* Array of DMA-able pages that actually holds the queue entries. */
+ struct hinic3_dma_addr_align *pages;
+ /* Page size in bytes. */
+ u32 page_size;
+ /* Number of pages, must be power of 2. */
+ u16 num_pages;
+ u8 elem_size_shift;
+ u8 elem_per_pg_shift;
+};
+
+void hinic3_queue_pages_init(struct hinic3_queue_pages *qpages, u32 q_depth,
+ u32 page_size, u32 elem_size);
+int hinic3_queue_pages_alloc(struct hinic3_hwdev *hwdev,
+ struct hinic3_queue_pages *qpages, u32 align);
+void hinic3_queue_pages_free(struct hinic3_hwdev *hwdev,
+ struct hinic3_queue_pages *qpages);
+
+/* Get pointer to queue entry at the specified index. Index does not have to be
+ * masked to queue depth, only least significant bits will be used. Also
+ * provides remaining elements in same page (including the first one) in case
+ * caller needs multiple entries.
+ */
+static inline void *get_q_element(const struct hinic3_queue_pages *qpages,
+ u32 idx, u32 *remaining_in_page)
+{
+ const struct hinic3_dma_addr_align *page;
+ u32 page_idx, elem_idx, elem_per_pg, ofs;
+ u8 shift;
+
+ shift = qpages->elem_per_pg_shift;
+ page_idx = (idx >> shift) & (qpages->num_pages - 1);
+ elem_per_pg = 1 << shift;
+ elem_idx = idx & (elem_per_pg - 1);
+ if (remaining_in_page)
+ *remaining_in_page = elem_per_pg - elem_idx;
+ ofs = elem_idx << qpages->elem_size_shift;
+ page = qpages->pages + page_idx;
+ return (char *)page->align_vaddr + ofs;
+}
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c
new file mode 100644
index 000000000000..4ff1b2f79838
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/ethtool.h>
+
+#include "hinic3_cmdq.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_rss.h"
+
+static void hinic3_fillout_indir_tbl(struct net_device *netdev, u16 *indir)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 i, num_qps;
+
+ num_qps = nic_dev->q_params.num_qps;
+ for (i = 0; i < L2NIC_RSS_INDIR_SIZE; i++)
+ indir[i] = ethtool_rxfh_indir_default(i, num_qps);
+}
+
+static int hinic3_rss_cfg(struct hinic3_hwdev *hwdev, u8 rss_en, u16 num_qps)
+{
+ struct mgmt_msg_params msg_params = {};
+ struct l2nic_cmd_cfg_rss rss_cfg = {};
+ int err;
+
+ rss_cfg.func_id = hinic3_global_func_id(hwdev);
+ rss_cfg.rss_en = rss_en;
+ rss_cfg.rq_priority_number = 0;
+ rss_cfg.num_qps = num_qps;
+
+ mgmt_msg_params_init_default(&msg_params, &rss_cfg, sizeof(rss_cfg));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_CFG_RSS, &msg_params);
+ if (err || rss_cfg.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to set rss cfg, err: %d, status: 0x%x\n",
+ err, rss_cfg.msg_head.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hinic3_init_rss_parameters(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ nic_dev->rss_hash_type = HINIC3_RSS_HASH_ENGINE_TYPE_XOR;
+ nic_dev->rss_type.tcp_ipv6_ext = 1;
+ nic_dev->rss_type.ipv6_ext = 1;
+ nic_dev->rss_type.tcp_ipv6 = 1;
+ nic_dev->rss_type.ipv6 = 1;
+ nic_dev->rss_type.tcp_ipv4 = 1;
+ nic_dev->rss_type.ipv4 = 1;
+ nic_dev->rss_type.udp_ipv6 = 1;
+ nic_dev->rss_type.udp_ipv4 = 1;
+}
+
+static void decide_num_qps(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ unsigned int dev_cpus;
+
+ dev_cpus = netif_get_num_default_rss_queues();
+ nic_dev->q_params.num_qps = min(dev_cpus, nic_dev->max_qps);
+}
+
+static int alloc_rss_resource(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ nic_dev->rss_hkey = kmalloc_array(L2NIC_RSS_KEY_SIZE,
+ sizeof(nic_dev->rss_hkey[0]),
+ GFP_KERNEL);
+ if (!nic_dev->rss_hkey)
+ return -ENOMEM;
+
+ netdev_rss_key_fill(nic_dev->rss_hkey, L2NIC_RSS_KEY_SIZE);
+
+ nic_dev->rss_indir = kcalloc(L2NIC_RSS_INDIR_SIZE, sizeof(u16),
+ GFP_KERNEL);
+ if (!nic_dev->rss_indir) {
+ kfree(nic_dev->rss_hkey);
+ nic_dev->rss_hkey = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int hinic3_rss_set_indir_tbl(struct hinic3_hwdev *hwdev,
+ const u16 *indir_table)
+{
+ struct l2nic_cmd_rss_set_indir_tbl *indir_tbl;
+ struct hinic3_cmd_buf *cmd_buf;
+ __le64 out_param;
+ int err;
+ u32 i;
+
+ cmd_buf = hinic3_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ dev_err(hwdev->dev, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ cmd_buf->size = cpu_to_le16(sizeof(struct l2nic_cmd_rss_set_indir_tbl));
+ indir_tbl = cmd_buf->buf;
+ memset(indir_tbl, 0, sizeof(*indir_tbl));
+
+ for (i = 0; i < L2NIC_RSS_INDIR_SIZE; i++)
+ indir_tbl->entry[i] = cpu_to_le16(indir_table[i]);
+
+ hinic3_cmdq_buf_swab32(indir_tbl, sizeof(*indir_tbl));
+
+ err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_UCODE_CMD_SET_RSS_INDIR_TBL,
+ cmd_buf, &out_param);
+ if (err || out_param) {
+ dev_err(hwdev->dev, "Failed to set rss indir table\n");
+ err = -EFAULT;
+ }
+
+ hinic3_free_cmd_buf(hwdev, cmd_buf);
+
+ return err;
+}
+
+static int hinic3_set_rss_type(struct hinic3_hwdev *hwdev,
+ struct hinic3_rss_type rss_type)
+{
+ struct l2nic_cmd_set_rss_ctx_tbl ctx_tbl = {};
+ struct mgmt_msg_params msg_params = {};
+ u32 ctx;
+ int err;
+
+ ctx_tbl.func_id = hinic3_global_func_id(hwdev);
+ ctx = L2NIC_RSS_TYPE_SET(1, VALID) |
+ L2NIC_RSS_TYPE_SET(rss_type.ipv4, IPV4) |
+ L2NIC_RSS_TYPE_SET(rss_type.ipv6, IPV6) |
+ L2NIC_RSS_TYPE_SET(rss_type.ipv6_ext, IPV6_EXT) |
+ L2NIC_RSS_TYPE_SET(rss_type.tcp_ipv4, TCP_IPV4) |
+ L2NIC_RSS_TYPE_SET(rss_type.tcp_ipv6, TCP_IPV6) |
+ L2NIC_RSS_TYPE_SET(rss_type.tcp_ipv6_ext, TCP_IPV6_EXT) |
+ L2NIC_RSS_TYPE_SET(rss_type.udp_ipv4, UDP_IPV4) |
+ L2NIC_RSS_TYPE_SET(rss_type.udp_ipv6, UDP_IPV6);
+ ctx_tbl.context = ctx;
+
+ mgmt_msg_params_init_default(&msg_params, &ctx_tbl, sizeof(ctx_tbl));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_SET_RSS_CTX_TBL, &msg_params);
+
+ if (ctx_tbl.msg_head.status == MGMT_STATUS_CMD_UNSUPPORTED) {
+ return MGMT_STATUS_CMD_UNSUPPORTED;
+ } else if (err || ctx_tbl.msg_head.status) {
+ dev_err(hwdev->dev, "mgmt Failed to set rss context offload, err: %d, status: 0x%x\n",
+ err, ctx_tbl.msg_head.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hinic3_rss_cfg_hash_type(struct hinic3_hwdev *hwdev, u8 opcode,
+ enum hinic3_rss_hash_type *type)
+{
+ struct l2nic_cmd_cfg_rss_engine hash_type_cmd = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ hash_type_cmd.func_id = hinic3_global_func_id(hwdev);
+ hash_type_cmd.opcode = opcode;
+
+ if (opcode == MGMT_MSG_CMD_OP_SET)
+ hash_type_cmd.hash_engine = *type;
+
+ mgmt_msg_params_init_default(&msg_params, &hash_type_cmd,
+ sizeof(hash_type_cmd));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_CFG_RSS_HASH_ENGINE,
+ &msg_params);
+ if (err || hash_type_cmd.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to %s hash engine, err: %d, status: 0x%x\n",
+ opcode == MGMT_MSG_CMD_OP_SET ? "set" : "get",
+ err, hash_type_cmd.msg_head.status);
+ return -EIO;
+ }
+
+ if (opcode == MGMT_MSG_CMD_OP_GET)
+ *type = hash_type_cmd.hash_engine;
+
+ return 0;
+}
+
+static int hinic3_rss_set_hash_type(struct hinic3_hwdev *hwdev,
+ enum hinic3_rss_hash_type type)
+{
+ return hinic3_rss_cfg_hash_type(hwdev, MGMT_MSG_CMD_OP_SET, &type);
+}
+
+static int hinic3_config_rss_hw_resource(struct net_device *netdev,
+ u16 *indir_tbl)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ err = hinic3_rss_set_indir_tbl(nic_dev->hwdev, indir_tbl);
+ if (err)
+ return err;
+
+ err = hinic3_set_rss_type(nic_dev->hwdev, nic_dev->rss_type);
+ if (err)
+ return err;
+
+ return hinic3_rss_set_hash_type(nic_dev->hwdev, nic_dev->rss_hash_type);
+}
+
+static int hinic3_rss_cfg_hash_key(struct hinic3_hwdev *hwdev, u8 opcode,
+ u8 *key)
+{
+ struct l2nic_cmd_cfg_rss_hash_key hash_key = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ hash_key.func_id = hinic3_global_func_id(hwdev);
+ hash_key.opcode = opcode;
+
+ if (opcode == MGMT_MSG_CMD_OP_SET)
+ memcpy(hash_key.key, key, L2NIC_RSS_KEY_SIZE);
+
+ mgmt_msg_params_init_default(&msg_params, &hash_key, sizeof(hash_key));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_CFG_RSS_HASH_KEY, &msg_params);
+ if (err || hash_key.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to %s hash key, err: %d, status: 0x%x\n",
+ opcode == MGMT_MSG_CMD_OP_SET ? "set" : "get",
+ err, hash_key.msg_head.status);
+ return -EINVAL;
+ }
+
+ if (opcode == MGMT_MSG_CMD_OP_GET)
+ memcpy(key, hash_key.key, L2NIC_RSS_KEY_SIZE);
+
+ return 0;
+}
+
+static int hinic3_rss_set_hash_key(struct hinic3_hwdev *hwdev, u8 *key)
+{
+ return hinic3_rss_cfg_hash_key(hwdev, MGMT_MSG_CMD_OP_SET, key);
+}
+
+static int hinic3_set_hw_rss_parameters(struct net_device *netdev, u8 rss_en)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ err = hinic3_rss_set_hash_key(nic_dev->hwdev, nic_dev->rss_hkey);
+ if (err)
+ return err;
+
+ hinic3_fillout_indir_tbl(netdev, nic_dev->rss_indir);
+
+ err = hinic3_config_rss_hw_resource(netdev, nic_dev->rss_indir);
+ if (err)
+ return err;
+
+ err = hinic3_rss_cfg(nic_dev->hwdev, rss_en, nic_dev->q_params.num_qps);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int hinic3_rss_init(struct net_device *netdev)
+{
+ return hinic3_set_hw_rss_parameters(netdev, 1);
+}
+
+void hinic3_rss_uninit(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ hinic3_rss_cfg(nic_dev->hwdev, 0, 0);
+}
+
+void hinic3_clear_rss_config(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ kfree(nic_dev->rss_hkey);
+ nic_dev->rss_hkey = NULL;
+
+ kfree(nic_dev->rss_indir);
+ nic_dev->rss_indir = NULL;
+}
+
+void hinic3_try_to_enable_rss(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ int err;
+
+ nic_dev->max_qps = hinic3_func_max_qnum(hwdev);
+ if (nic_dev->max_qps <= 1 ||
+ !hinic3_test_support(nic_dev, HINIC3_NIC_F_RSS))
+ goto err_reset_q_params;
+
+ err = alloc_rss_resource(netdev);
+ if (err) {
+ nic_dev->max_qps = 1;
+ goto err_reset_q_params;
+ }
+
+ set_bit(HINIC3_RSS_ENABLE, &nic_dev->flags);
+ decide_num_qps(netdev);
+ hinic3_init_rss_parameters(netdev);
+ err = hinic3_set_hw_rss_parameters(netdev, 0);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set hardware rss parameters\n");
+ hinic3_clear_rss_config(netdev);
+ nic_dev->max_qps = 1;
+ goto err_reset_q_params;
+ }
+
+ return;
+
+err_reset_q_params:
+ clear_bit(HINIC3_RSS_ENABLE, &nic_dev->flags);
+ nic_dev->q_params.num_qps = nic_dev->max_qps;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rss.h b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.h
new file mode 100644
index 000000000000..78d82c2aca06
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_RSS_H_
+#define _HINIC3_RSS_H_
+
+#include <linux/netdevice.h>
+
+int hinic3_rss_init(struct net_device *netdev);
+void hinic3_rss_uninit(struct net_device *netdev);
+void hinic3_try_to_enable_rss(struct net_device *netdev);
+void hinic3_clear_rss_config(struct net_device *netdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
new file mode 100644
index 000000000000..16c00c3bb1ed
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/netdevice.h>
+#include <net/gro.h>
+#include <net/page_pool/helpers.h>
+
+#include "hinic3_hwdev.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+#include "hinic3_rx.h"
+
+#define HINIC3_RX_HDR_SIZE 256
+#define HINIC3_RX_BUFFER_WRITE 16
+
+#define HINIC3_RX_TCP_PKT 0x3
+#define HINIC3_RX_UDP_PKT 0x4
+#define HINIC3_RX_SCTP_PKT 0x7
+
+#define HINIC3_RX_IPV4_PKT 0
+#define HINIC3_RX_IPV6_PKT 1
+#define HINIC3_RX_INVALID_IP_TYPE 2
+
+#define HINIC3_RX_PKT_FORMAT_NON_TUNNEL 0
+#define HINIC3_RX_PKT_FORMAT_VXLAN 1
+
+#define HINIC3_LRO_PKT_HDR_LEN_IPV4 66
+#define HINIC3_LRO_PKT_HDR_LEN_IPV6 86
+#define HINIC3_LRO_PKT_HDR_LEN(cqe) \
+ (RQ_CQE_OFFOLAD_TYPE_GET((cqe)->offload_type, IP_TYPE) == \
+ HINIC3_RX_IPV6_PKT ? HINIC3_LRO_PKT_HDR_LEN_IPV6 : \
+ HINIC3_LRO_PKT_HDR_LEN_IPV4)
+
+int hinic3_alloc_rxqs(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct pci_dev *pdev = nic_dev->pdev;
+ u16 num_rxqs = nic_dev->max_qps;
+ struct hinic3_rxq *rxq;
+ u16 q_id;
+
+ nic_dev->rxqs = kcalloc(num_rxqs, sizeof(*nic_dev->rxqs), GFP_KERNEL);
+ if (!nic_dev->rxqs)
+ return -ENOMEM;
+
+ for (q_id = 0; q_id < num_rxqs; q_id++) {
+ rxq = &nic_dev->rxqs[q_id];
+ rxq->netdev = netdev;
+ rxq->dev = &pdev->dev;
+ rxq->q_id = q_id;
+ rxq->buf_len = nic_dev->rx_buf_len;
+ rxq->buf_len_shift = ilog2(nic_dev->rx_buf_len);
+ rxq->q_depth = nic_dev->q_params.rq_depth;
+ rxq->q_mask = nic_dev->q_params.rq_depth - 1;
+ }
+
+ return 0;
+}
+
+void hinic3_free_rxqs(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ kfree(nic_dev->rxqs);
+}
+
+static int rx_alloc_mapped_page(struct page_pool *page_pool,
+ struct hinic3_rx_info *rx_info, u16 buf_len)
+{
+ struct page *page;
+ u32 page_offset;
+
+ if (likely(rx_info->page))
+ return 0;
+
+ page = page_pool_dev_alloc_frag(page_pool, &page_offset, buf_len);
+ if (unlikely(!page))
+ return -ENOMEM;
+
+ rx_info->page = page;
+ rx_info->page_offset = page_offset;
+
+ return 0;
+}
+
+/* Associate fixed completion element to every wqe in the rq. Every rq wqe will
+ * always post completion to the same place.
+ */
+static void rq_associate_cqes(struct hinic3_rxq *rxq)
+{
+ struct hinic3_queue_pages *qpages;
+ struct hinic3_rq_wqe *rq_wqe;
+ dma_addr_t cqe_dma;
+ u32 i;
+
+ qpages = &rxq->rq->wq.qpages;
+
+ for (i = 0; i < rxq->q_depth; i++) {
+ rq_wqe = get_q_element(qpages, i, NULL);
+ cqe_dma = rxq->cqe_start_paddr +
+ i * sizeof(struct hinic3_rq_cqe);
+ rq_wqe->cqe_hi_addr = cpu_to_le32(upper_32_bits(cqe_dma));
+ rq_wqe->cqe_lo_addr = cpu_to_le32(lower_32_bits(cqe_dma));
+ }
+}
+
+static void rq_wqe_buf_set(struct hinic3_io_queue *rq, uint32_t wqe_idx,
+ dma_addr_t dma_addr, u16 len)
+{
+ struct hinic3_rq_wqe *rq_wqe;
+
+ rq_wqe = get_q_element(&rq->wq.qpages, wqe_idx, NULL);
+ rq_wqe->buf_hi_addr = cpu_to_le32(upper_32_bits(dma_addr));
+ rq_wqe->buf_lo_addr = cpu_to_le32(lower_32_bits(dma_addr));
+}
+
+static u32 hinic3_rx_fill_buffers(struct hinic3_rxq *rxq)
+{
+ u32 i, free_wqebbs = rxq->delta - 1;
+ struct hinic3_rx_info *rx_info;
+ dma_addr_t dma_addr;
+ int err;
+
+ for (i = 0; i < free_wqebbs; i++) {
+ rx_info = &rxq->rx_info[rxq->next_to_update];
+
+ err = rx_alloc_mapped_page(rxq->page_pool, rx_info,
+ rxq->buf_len);
+ if (unlikely(err))
+ break;
+
+ dma_addr = page_pool_get_dma_addr(rx_info->page) +
+ rx_info->page_offset;
+ rq_wqe_buf_set(rxq->rq, rxq->next_to_update, dma_addr,
+ rxq->buf_len);
+ rxq->next_to_update = (rxq->next_to_update + 1) & rxq->q_mask;
+ }
+
+ if (likely(i)) {
+ hinic3_write_db(rxq->rq, rxq->q_id & 3, DB_CFLAG_DP_RQ,
+ rxq->next_to_update << HINIC3_NORMAL_RQ_WQE);
+ rxq->delta -= i;
+ rxq->next_to_alloc = rxq->next_to_update;
+ }
+
+ return i;
+}
+
+static u32 hinic3_alloc_rx_buffers(struct hinic3_dyna_rxq_res *rqres,
+ u32 rq_depth, u16 buf_len)
+{
+ u32 free_wqebbs = rq_depth - 1;
+ u32 idx;
+ int err;
+
+ for (idx = 0; idx < free_wqebbs; idx++) {
+ err = rx_alloc_mapped_page(rqres->page_pool,
+ &rqres->rx_info[idx], buf_len);
+ if (err)
+ break;
+ }
+
+ return idx;
+}
+
+static void hinic3_free_rx_buffers(struct hinic3_dyna_rxq_res *rqres,
+ u32 q_depth)
+{
+ struct hinic3_rx_info *rx_info;
+ u32 i;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < q_depth; i++) {
+ rx_info = &rqres->rx_info[i];
+
+ if (rx_info->page) {
+ page_pool_put_full_page(rqres->page_pool,
+ rx_info->page, false);
+ rx_info->page = NULL;
+ }
+ }
+}
+
+static void hinic3_add_rx_frag(struct hinic3_rxq *rxq,
+ struct hinic3_rx_info *rx_info,
+ struct sk_buff *skb, u32 size)
+{
+ struct page *page;
+ u8 *va;
+
+ page = rx_info->page;
+ va = (u8 *)page_address(page) + rx_info->page_offset;
+ net_prefetch(va);
+
+ page_pool_dma_sync_for_cpu(rxq->page_pool, page, rx_info->page_offset,
+ rxq->buf_len);
+
+ if (size <= HINIC3_RX_HDR_SIZE && !skb_is_nonlinear(skb)) {
+ memcpy(__skb_put(skb, size), va,
+ ALIGN(size, sizeof(long)));
+ page_pool_put_full_page(rxq->page_pool, page, false);
+
+ return;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rx_info->page_offset, size, rxq->buf_len);
+ skb_mark_for_recycle(skb);
+}
+
+static void packaging_skb(struct hinic3_rxq *rxq, struct sk_buff *skb,
+ u32 sge_num, u32 pkt_len)
+{
+ struct hinic3_rx_info *rx_info;
+ u32 temp_pkt_len = pkt_len;
+ u32 temp_sge_num = sge_num;
+ u32 sw_ci;
+ u32 size;
+
+ sw_ci = rxq->cons_idx & rxq->q_mask;
+ while (temp_sge_num) {
+ rx_info = &rxq->rx_info[sw_ci];
+ sw_ci = (sw_ci + 1) & rxq->q_mask;
+ if (unlikely(temp_pkt_len > rxq->buf_len)) {
+ size = rxq->buf_len;
+ temp_pkt_len -= rxq->buf_len;
+ } else {
+ size = temp_pkt_len;
+ }
+
+ hinic3_add_rx_frag(rxq, rx_info, skb, size);
+
+ /* clear contents of buffer_info */
+ rx_info->page = NULL;
+ temp_sge_num--;
+ }
+}
+
+static u32 hinic3_get_sge_num(struct hinic3_rxq *rxq, u32 pkt_len)
+{
+ u32 sge_num;
+
+ sge_num = pkt_len >> rxq->buf_len_shift;
+ sge_num += (pkt_len & (rxq->buf_len - 1)) ? 1 : 0;
+
+ return sge_num;
+}
+
+static struct sk_buff *hinic3_fetch_rx_buffer(struct hinic3_rxq *rxq,
+ u32 pkt_len)
+{
+ struct sk_buff *skb;
+ u32 sge_num;
+
+ skb = napi_alloc_skb(&rxq->irq_cfg->napi, HINIC3_RX_HDR_SIZE);
+ if (unlikely(!skb))
+ return NULL;
+
+ sge_num = hinic3_get_sge_num(rxq, pkt_len);
+
+ net_prefetchw(skb->data);
+ packaging_skb(rxq, skb, sge_num, pkt_len);
+
+ rxq->cons_idx += sge_num;
+ rxq->delta += sge_num;
+
+ return skb;
+}
+
+static void hinic3_pull_tail(struct sk_buff *skb)
+{
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
+ unsigned int pull_len;
+ unsigned char *va;
+
+ va = skb_frag_address(frag);
+
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(skb->dev, va, HINIC3_RX_HDR_SIZE);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ skb_frag_off_add(frag, pull_len);
+
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+}
+
+static void hinic3_rx_csum(struct hinic3_rxq *rxq, u32 offload_type,
+ u32 status, struct sk_buff *skb)
+{
+ u32 pkt_fmt = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, TUNNEL_PKT_FORMAT);
+ u32 pkt_type = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE);
+ u32 ip_type = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, IP_TYPE);
+ u32 csum_err = RQ_CQE_STATUS_GET(status, CSUM_ERR);
+ struct net_device *netdev = rxq->netdev;
+
+ if (!(netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ if (unlikely(csum_err)) {
+ /* pkt type is recognized by HW, and csum is wrong */
+ skb->ip_summed = CHECKSUM_NONE;
+ return;
+ }
+
+ if (ip_type == HINIC3_RX_INVALID_IP_TYPE ||
+ !(pkt_fmt == HINIC3_RX_PKT_FORMAT_NON_TUNNEL ||
+ pkt_fmt == HINIC3_RX_PKT_FORMAT_VXLAN)) {
+ skb->ip_summed = CHECKSUM_NONE;
+ return;
+ }
+
+ switch (pkt_type) {
+ case HINIC3_RX_TCP_PKT:
+ case HINIC3_RX_UDP_PKT:
+ case HINIC3_RX_SCTP_PKT:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ break;
+ default:
+ skb->ip_summed = CHECKSUM_NONE;
+ break;
+ }
+}
+
+static void hinic3_lro_set_gso_params(struct sk_buff *skb, u16 num_lro)
+{
+ struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ __be16 proto;
+
+ proto = __vlan_get_protocol(skb, eth->h_proto, NULL);
+
+ skb_shinfo(skb)->gso_size = DIV_ROUND_UP(skb->len - skb_headlen(skb),
+ num_lro);
+ skb_shinfo(skb)->gso_type = proto == htons(ETH_P_IP) ?
+ SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
+ skb_shinfo(skb)->gso_segs = num_lro;
+}
+
+static int recv_one_pkt(struct hinic3_rxq *rxq, struct hinic3_rq_cqe *rx_cqe,
+ u32 pkt_len, u32 vlan_len, u32 status)
+{
+ struct net_device *netdev = rxq->netdev;
+ struct sk_buff *skb;
+ u32 offload_type;
+ u16 num_lro;
+
+ skb = hinic3_fetch_rx_buffer(rxq, pkt_len);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ /* place header in linear portion of buffer */
+ if (skb_is_nonlinear(skb))
+ hinic3_pull_tail(skb);
+
+ offload_type = le32_to_cpu(rx_cqe->offload_type);
+ hinic3_rx_csum(rxq, offload_type, status, skb);
+
+ num_lro = RQ_CQE_STATUS_GET(status, NUM_LRO);
+ if (num_lro)
+ hinic3_lro_set_gso_params(skb, num_lro);
+
+ skb_record_rx_queue(skb, rxq->q_id);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (skb_has_frag_list(skb)) {
+ napi_gro_flush(&rxq->irq_cfg->napi, false);
+ netif_receive_skb(skb);
+ } else {
+ napi_gro_receive(&rxq->irq_cfg->napi, skb);
+ }
+
+ return 0;
+}
+
+int hinic3_alloc_rxqs_res(struct net_device *netdev, u16 num_rq,
+ u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res)
+{
+ u64 cqe_mem_size = sizeof(struct hinic3_rq_cqe) * rq_depth;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct page_pool_params pp_params = {};
+ struct hinic3_dyna_rxq_res *rqres;
+ u32 pkt_idx;
+ int idx;
+
+ for (idx = 0; idx < num_rq; idx++) {
+ rqres = &rxqs_res[idx];
+ rqres->rx_info = kcalloc(rq_depth, sizeof(*rqres->rx_info),
+ GFP_KERNEL);
+ if (!rqres->rx_info)
+ goto err_free_rqres;
+
+ rqres->cqe_start_vaddr =
+ dma_alloc_coherent(&nic_dev->pdev->dev, cqe_mem_size,
+ &rqres->cqe_start_paddr, GFP_KERNEL);
+ if (!rqres->cqe_start_vaddr) {
+ netdev_err(netdev, "Failed to alloc rxq%d rx cqe\n",
+ idx);
+ goto err_free_rx_info;
+ }
+
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.pool_size = rq_depth * nic_dev->rx_buf_len /
+ PAGE_SIZE;
+ pp_params.nid = dev_to_node(&nic_dev->pdev->dev);
+ pp_params.dev = &nic_dev->pdev->dev;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pp_params.max_len = PAGE_SIZE;
+ rqres->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rqres->page_pool)) {
+ netdev_err(netdev, "Failed to create rxq%d page pool\n",
+ idx);
+ goto err_free_cqe;
+ }
+
+ pkt_idx = hinic3_alloc_rx_buffers(rqres, rq_depth,
+ nic_dev->rx_buf_len);
+ if (!pkt_idx) {
+ netdev_err(netdev, "Failed to alloc rxq%d rx buffers\n",
+ idx);
+ goto err_destroy_page_pool;
+ }
+ rqres->next_to_alloc = pkt_idx;
+ }
+
+ return 0;
+
+err_destroy_page_pool:
+ page_pool_destroy(rqres->page_pool);
+err_free_cqe:
+ dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size,
+ rqres->cqe_start_vaddr,
+ rqres->cqe_start_paddr);
+err_free_rx_info:
+ kfree(rqres->rx_info);
+err_free_rqres:
+ hinic3_free_rxqs_res(netdev, idx, rq_depth, rxqs_res);
+
+ return -ENOMEM;
+}
+
+void hinic3_free_rxqs_res(struct net_device *netdev, u16 num_rq,
+ u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res)
+{
+ u64 cqe_mem_size = sizeof(struct hinic3_rq_cqe) * rq_depth;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_dyna_rxq_res *rqres;
+ int idx;
+
+ for (idx = 0; idx < num_rq; idx++) {
+ rqres = &rxqs_res[idx];
+
+ hinic3_free_rx_buffers(rqres, rq_depth);
+ page_pool_destroy(rqres->page_pool);
+ dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size,
+ rqres->cqe_start_vaddr,
+ rqres->cqe_start_paddr);
+ kfree(rqres->rx_info);
+ }
+}
+
+int hinic3_configure_rxqs(struct net_device *netdev, u16 num_rq,
+ u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_dyna_rxq_res *rqres;
+ struct msix_entry *msix_entry;
+ struct hinic3_rxq *rxq;
+ u16 q_id;
+ u32 pkts;
+
+ for (q_id = 0; q_id < num_rq; q_id++) {
+ rxq = &nic_dev->rxqs[q_id];
+ rqres = &rxqs_res[q_id];
+ msix_entry = &nic_dev->qps_msix_entries[q_id];
+
+ rxq->irq_id = msix_entry->vector;
+ rxq->msix_entry_idx = msix_entry->entry;
+ rxq->next_to_update = 0;
+ rxq->next_to_alloc = rqres->next_to_alloc;
+ rxq->q_depth = rq_depth;
+ rxq->delta = rxq->q_depth;
+ rxq->q_mask = rxq->q_depth - 1;
+ rxq->cons_idx = 0;
+
+ rxq->cqe_arr = rqres->cqe_start_vaddr;
+ rxq->cqe_start_paddr = rqres->cqe_start_paddr;
+ rxq->rx_info = rqres->rx_info;
+ rxq->page_pool = rqres->page_pool;
+
+ rxq->rq = &nic_dev->nic_io->rq[rxq->q_id];
+
+ rq_associate_cqes(rxq);
+
+ pkts = hinic3_rx_fill_buffers(rxq);
+ if (!pkts) {
+ netdev_err(netdev, "Failed to fill Rx buffer\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(rxq->netdev);
+ u32 sw_ci, status, pkt_len, vlan_len;
+ struct hinic3_rq_cqe *rx_cqe;
+ u32 num_wqe = 0;
+ int nr_pkts = 0;
+ u16 num_lro;
+
+ while (likely(nr_pkts < budget)) {
+ sw_ci = rxq->cons_idx & rxq->q_mask;
+ rx_cqe = rxq->cqe_arr + sw_ci;
+ status = le32_to_cpu(rx_cqe->status);
+ if (!RQ_CQE_STATUS_GET(status, RXDONE))
+ break;
+
+ /* make sure we read rx_done before packet length */
+ rmb();
+
+ vlan_len = le32_to_cpu(rx_cqe->vlan_len);
+ pkt_len = RQ_CQE_SGE_GET(vlan_len, LEN);
+ if (recv_one_pkt(rxq, rx_cqe, pkt_len, vlan_len, status))
+ break;
+
+ nr_pkts++;
+ num_lro = RQ_CQE_STATUS_GET(status, NUM_LRO);
+ if (num_lro)
+ num_wqe += hinic3_get_sge_num(rxq, pkt_len);
+
+ rx_cqe->status = 0;
+
+ if (num_wqe >= nic_dev->lro_replenish_thld)
+ break;
+ }
+
+ if (rxq->delta >= HINIC3_RX_BUFFER_WRITE)
+ hinic3_rx_fill_buffers(rxq);
+
+ return nr_pkts;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
new file mode 100644
index 000000000000..44ae841a3648
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_RX_H_
+#define _HINIC3_RX_H_
+
+#include <linux/bitfield.h>
+#include <linux/netdevice.h>
+
+#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK GENMASK(4, 0)
+#define RQ_CQE_OFFOLAD_TYPE_IP_TYPE_MASK GENMASK(6, 5)
+#define RQ_CQE_OFFOLAD_TYPE_TUNNEL_PKT_FORMAT_MASK GENMASK(11, 8)
+#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK BIT(21)
+#define RQ_CQE_OFFOLAD_TYPE_GET(val, member) \
+ FIELD_GET(RQ_CQE_OFFOLAD_TYPE_##member##_MASK, val)
+
+#define RQ_CQE_SGE_VLAN_MASK GENMASK(15, 0)
+#define RQ_CQE_SGE_LEN_MASK GENMASK(31, 16)
+#define RQ_CQE_SGE_GET(val, member) \
+ FIELD_GET(RQ_CQE_SGE_##member##_MASK, val)
+
+#define RQ_CQE_STATUS_CSUM_ERR_MASK GENMASK(15, 0)
+#define RQ_CQE_STATUS_NUM_LRO_MASK GENMASK(23, 16)
+#define RQ_CQE_STATUS_RXDONE_MASK BIT(31)
+#define RQ_CQE_STATUS_GET(val, member) \
+ FIELD_GET(RQ_CQE_STATUS_##member##_MASK, val)
+
+/* RX Completion information that is provided by HW for a specific RX WQE */
+struct hinic3_rq_cqe {
+ __le32 status;
+ __le32 vlan_len;
+ __le32 offload_type;
+ __le32 rsvd3;
+ __le32 rsvd4;
+ __le32 rsvd5;
+ __le32 rsvd6;
+ __le32 pkt_info;
+};
+
+struct hinic3_rq_wqe {
+ __le32 buf_hi_addr;
+ __le32 buf_lo_addr;
+ __le32 cqe_hi_addr;
+ __le32 cqe_lo_addr;
+};
+
+struct hinic3_rx_info {
+ struct page *page;
+ u32 page_offset;
+};
+
+struct hinic3_rxq {
+ struct net_device *netdev;
+
+ u16 q_id;
+ u32 q_depth;
+ u32 q_mask;
+
+ u16 buf_len;
+ u32 buf_len_shift;
+
+ u32 cons_idx;
+ u32 delta;
+
+ u32 irq_id;
+ u16 msix_entry_idx;
+
+ /* cqe_arr and rx_info are arrays of rq_depth elements. Each element is
+ * statically associated (by index) to a specific rq_wqe.
+ */
+ struct hinic3_rq_cqe *cqe_arr;
+ struct hinic3_rx_info *rx_info;
+ struct page_pool *page_pool;
+
+ struct hinic3_io_queue *rq;
+
+ struct hinic3_irq_cfg *irq_cfg;
+ u16 next_to_alloc;
+ u16 next_to_update;
+ struct device *dev; /* device for DMA mapping */
+
+ dma_addr_t cqe_start_paddr;
+} ____cacheline_aligned;
+
+struct hinic3_dyna_rxq_res {
+ u16 next_to_alloc;
+ struct hinic3_rx_info *rx_info;
+ dma_addr_t cqe_start_paddr;
+ void *cqe_start_vaddr;
+ struct page_pool *page_pool;
+};
+
+int hinic3_alloc_rxqs(struct net_device *netdev);
+void hinic3_free_rxqs(struct net_device *netdev);
+
+int hinic3_alloc_rxqs_res(struct net_device *netdev, u16 num_rq,
+ u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res);
+void hinic3_free_rxqs_res(struct net_device *netdev, u16 num_rq,
+ u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res);
+int hinic3_configure_rxqs(struct net_device *netdev, u16 num_rq,
+ u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res);
+int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
new file mode 100644
index 000000000000..92c43c05e3f2
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
@@ -0,0 +1,779 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/if_vlan.h>
+#include <linux/iopoll.h>
+#include <net/ip6_checksum.h>
+#include <net/ipv6.h>
+#include <net/netdev_queues.h>
+
+#include "hinic3_hwdev.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+#include "hinic3_tx.h"
+#include "hinic3_wq.h"
+
+#define MIN_SKB_LEN 32
+
+int hinic3_alloc_txqs(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ u16 q_id, num_txqs = nic_dev->max_qps;
+ struct pci_dev *pdev = nic_dev->pdev;
+ struct hinic3_txq *txq;
+
+ if (!num_txqs) {
+ dev_err(hwdev->dev, "Cannot allocate zero size txqs\n");
+ return -EINVAL;
+ }
+
+ nic_dev->txqs = kcalloc(num_txqs, sizeof(*nic_dev->txqs), GFP_KERNEL);
+ if (!nic_dev->txqs)
+ return -ENOMEM;
+
+ for (q_id = 0; q_id < num_txqs; q_id++) {
+ txq = &nic_dev->txqs[q_id];
+ txq->netdev = netdev;
+ txq->q_id = q_id;
+ txq->q_depth = nic_dev->q_params.sq_depth;
+ txq->q_mask = nic_dev->q_params.sq_depth - 1;
+ txq->dev = &pdev->dev;
+ }
+
+ return 0;
+}
+
+void hinic3_free_txqs(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ kfree(nic_dev->txqs);
+}
+
+static void hinic3_set_buf_desc(struct hinic3_sq_bufdesc *buf_descs,
+ dma_addr_t addr, u32 len)
+{
+ buf_descs->hi_addr = cpu_to_le32(upper_32_bits(addr));
+ buf_descs->lo_addr = cpu_to_le32(lower_32_bits(addr));
+ buf_descs->len = cpu_to_le32(len);
+}
+
+static int hinic3_tx_map_skb(struct net_device *netdev, struct sk_buff *skb,
+ struct hinic3_txq *txq,
+ struct hinic3_tx_info *tx_info,
+ struct hinic3_sq_wqe_combo *wqe_combo)
+{
+ struct hinic3_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0;
+ struct hinic3_sq_bufdesc *buf_desc = wqe_combo->bds_head;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_dma_info *dma_info = tx_info->dma_info;
+ struct pci_dev *pdev = nic_dev->pdev;
+ skb_frag_t *frag;
+ u32 i, idx;
+ int err;
+
+ dma_info[0].dma = dma_map_single(&pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, dma_info[0].dma))
+ return -EFAULT;
+
+ dma_info[0].len = skb_headlen(skb);
+
+ wqe_desc->hi_addr = cpu_to_le32(upper_32_bits(dma_info[0].dma));
+ wqe_desc->lo_addr = cpu_to_le32(lower_32_bits(dma_info[0].dma));
+
+ wqe_desc->ctrl_len = cpu_to_le32(dma_info[0].len);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &(skb_shinfo(skb)->frags[i]);
+ if (unlikely(i == wqe_combo->first_bds_num))
+ buf_desc = wqe_combo->bds_sec2;
+
+ idx = i + 1;
+ dma_info[idx].dma = skb_frag_dma_map(&pdev->dev, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, dma_info[idx].dma)) {
+ err = -EFAULT;
+ goto err_unmap_page;
+ }
+ dma_info[idx].len = skb_frag_size(frag);
+
+ hinic3_set_buf_desc(buf_desc, dma_info[idx].dma,
+ dma_info[idx].len);
+ buf_desc++;
+ }
+
+ return 0;
+
+err_unmap_page:
+ while (idx > 1) {
+ idx--;
+ dma_unmap_page(&pdev->dev, dma_info[idx].dma,
+ dma_info[idx].len, DMA_TO_DEVICE);
+ }
+ dma_unmap_single(&pdev->dev, dma_info[0].dma, dma_info[0].len,
+ DMA_TO_DEVICE);
+
+ return err;
+}
+
+static void hinic3_tx_unmap_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct hinic3_dma_info *dma_info)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct pci_dev *pdev = nic_dev->pdev;
+ int i;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags;) {
+ i++;
+ dma_unmap_page(&pdev->dev,
+ dma_info[i].dma,
+ dma_info[i].len, DMA_TO_DEVICE);
+ }
+
+ dma_unmap_single(&pdev->dev, dma_info[0].dma,
+ dma_info[0].len, DMA_TO_DEVICE);
+}
+
+static void free_all_tx_skbs(struct net_device *netdev, u32 sq_depth,
+ struct hinic3_tx_info *tx_info_arr)
+{
+ struct hinic3_tx_info *tx_info;
+ u32 idx;
+
+ for (idx = 0; idx < sq_depth; idx++) {
+ tx_info = &tx_info_arr[idx];
+ if (tx_info->skb) {
+ hinic3_tx_unmap_skb(netdev, tx_info->skb,
+ tx_info->dma_info);
+ dev_kfree_skb_any(tx_info->skb);
+ tx_info->skb = NULL;
+ }
+ }
+}
+
+union hinic3_ip {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+};
+
+union hinic3_l4 {
+ struct tcphdr *tcp;
+ struct udphdr *udp;
+ unsigned char *hdr;
+};
+
+enum hinic3_l3_type {
+ HINIC3_L3_UNKNOWN = 0,
+ HINIC3_L3_IP6_PKT = 1,
+ HINIC3_L3_IP4_PKT_NO_CSUM = 2,
+ HINIC3_L3_IP4_PKT_CSUM = 3,
+};
+
+enum hinic3_l4_offload_type {
+ HINIC3_L4_OFFLOAD_DISABLE = 0,
+ HINIC3_L4_OFFLOAD_TCP = 1,
+ HINIC3_L4_OFFLOAD_STCP = 2,
+ HINIC3_L4_OFFLOAD_UDP = 3,
+};
+
+/* initialize l4 offset and offload */
+static void get_inner_l4_info(struct sk_buff *skb, union hinic3_l4 *l4,
+ u8 l4_proto, u32 *offset,
+ enum hinic3_l4_offload_type *l4_offload)
+{
+ switch (l4_proto) {
+ case IPPROTO_TCP:
+ *l4_offload = HINIC3_L4_OFFLOAD_TCP;
+ /* To be same with TSO, payload offset begins from payload */
+ *offset = (l4->tcp->doff << TCP_HDR_DATA_OFF_UNIT_SHIFT) +
+ TRANSPORT_OFFSET(l4->hdr, skb);
+ break;
+
+ case IPPROTO_UDP:
+ *l4_offload = HINIC3_L4_OFFLOAD_UDP;
+ *offset = TRANSPORT_OFFSET(l4->hdr, skb);
+ break;
+ default:
+ *l4_offload = HINIC3_L4_OFFLOAD_DISABLE;
+ *offset = 0;
+ }
+}
+
+static int hinic3_tx_csum(struct hinic3_txq *txq, struct hinic3_sq_task *task,
+ struct sk_buff *skb)
+{
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (skb->encapsulation) {
+ union hinic3_ip ip;
+ u8 l4_proto;
+
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1,
+ TUNNEL_FLAG));
+
+ ip.hdr = skb_network_header(skb);
+ if (ip.v4->version == 4) {
+ l4_proto = ip.v4->protocol;
+ } else if (ip.v4->version == 6) {
+ union hinic3_l4 l4;
+ unsigned char *exthdr;
+ __be16 frag_off;
+
+ exthdr = ip.hdr + sizeof(*ip.v6);
+ l4_proto = ip.v6->nexthdr;
+ l4.hdr = skb_transport_header(skb);
+ if (l4.hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
+ } else {
+ l4_proto = IPPROTO_RAW;
+ }
+
+ if (l4_proto != IPPROTO_UDP ||
+ ((struct udphdr *)skb_transport_header(skb))->dest !=
+ VXLAN_OFFLOAD_PORT_LE) {
+ /* Unsupported tunnel packet, disable csum offload */
+ skb_checksum_help(skb);
+ return 0;
+ }
+ }
+
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, INNER_L4_EN));
+
+ return 1;
+}
+
+static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic3_ip *ip,
+ union hinic3_l4 *l4,
+ enum hinic3_l3_type *l3_type, u8 *l4_proto)
+{
+ unsigned char *exthdr;
+ __be16 frag_off;
+
+ if (ip->v4->version == 4) {
+ *l3_type = HINIC3_L3_IP4_PKT_CSUM;
+ *l4_proto = ip->v4->protocol;
+ } else if (ip->v4->version == 6) {
+ *l3_type = HINIC3_L3_IP6_PKT;
+ exthdr = ip->hdr + sizeof(*ip->v6);
+ *l4_proto = ip->v6->nexthdr;
+ if (exthdr != l4->hdr) {
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ l4_proto, &frag_off);
+ }
+ } else {
+ *l3_type = HINIC3_L3_UNKNOWN;
+ *l4_proto = 0;
+ }
+}
+
+static void hinic3_set_tso_info(struct hinic3_sq_task *task, __le32 *queue_info,
+ enum hinic3_l4_offload_type l4_offload,
+ u32 offset, u32 mss)
+{
+ if (l4_offload == HINIC3_L4_OFFLOAD_TCP) {
+ *queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(1, TSO));
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1,
+ INNER_L4_EN));
+ } else if (l4_offload == HINIC3_L4_OFFLOAD_UDP) {
+ *queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(1, UFO));
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1,
+ INNER_L4_EN));
+ }
+
+ /* enable L3 calculation */
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, INNER_L3_EN));
+
+ *queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(offset >> 1, PLDOFF));
+
+ /* set MSS value */
+ *queue_info &= cpu_to_le32(~SQ_CTRL_QUEUE_INFO_MSS_MASK);
+ *queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(mss, MSS));
+}
+
+static __sum16 csum_magic(union hinic3_ip *ip, unsigned short proto)
+{
+ return (ip->v4->version == 4) ?
+ csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) :
+ csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0);
+}
+
+static int hinic3_tso(struct hinic3_sq_task *task, __le32 *queue_info,
+ struct sk_buff *skb)
+{
+ enum hinic3_l4_offload_type l4_offload;
+ enum hinic3_l3_type l3_type;
+ union hinic3_ip ip;
+ union hinic3_l4 l4;
+ u8 l4_proto;
+ u32 offset;
+ int err;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
+
+ if (skb->encapsulation) {
+ u32 gso_type = skb_shinfo(skb)->gso_type;
+ /* L3 checksum is always enabled */
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, OUT_L3_EN));
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1,
+ TUNNEL_FLAG));
+
+ l4.hdr = skb_transport_header(skb);
+ ip.hdr = skb_network_header(skb);
+
+ if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
+ l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP);
+ task->pkt_info0 |=
+ cpu_to_le32(SQ_TASK_INFO0_SET(1, OUT_L4_EN));
+ }
+
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+ } else {
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ }
+
+ get_inner_l3_l4_type(skb, &ip, &l4, &l3_type, &l4_proto);
+
+ if (l4_proto == IPPROTO_TCP)
+ l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP);
+
+ get_inner_l4_info(skb, &l4, l4_proto, &offset, &l4_offload);
+
+ hinic3_set_tso_info(task, queue_info, l4_offload, offset,
+ skb_shinfo(skb)->gso_size);
+
+ return 1;
+}
+
+static void hinic3_set_vlan_tx_offload(struct hinic3_sq_task *task,
+ u16 vlan_tag, u8 vlan_tpid)
+{
+ /* vlan_tpid: 0=select TPID0 in IPSU, 1=select TPID1 in IPSU
+ * 2=select TPID2 in IPSU, 3=select TPID3 in IPSU,
+ * 4=select TPID4 in IPSU
+ */
+ task->vlan_offload =
+ cpu_to_le32(SQ_TASK_INFO3_SET(vlan_tag, VLAN_TAG) |
+ SQ_TASK_INFO3_SET(vlan_tpid, VLAN_TPID) |
+ SQ_TASK_INFO3_SET(1, VLAN_TAG_VALID));
+}
+
+static u32 hinic3_tx_offload(struct sk_buff *skb, struct hinic3_sq_task *task,
+ __le32 *queue_info, struct hinic3_txq *txq)
+{
+ u32 offload = 0;
+ int tso_cs_en;
+
+ task->pkt_info0 = 0;
+ task->ip_identify = 0;
+ task->rsvd = 0;
+ task->vlan_offload = 0;
+
+ tso_cs_en = hinic3_tso(task, queue_info, skb);
+ if (tso_cs_en < 0) {
+ offload = HINIC3_TX_OFFLOAD_INVALID;
+ return offload;
+ } else if (tso_cs_en) {
+ offload |= HINIC3_TX_OFFLOAD_TSO;
+ } else {
+ tso_cs_en = hinic3_tx_csum(txq, task, skb);
+ if (tso_cs_en)
+ offload |= HINIC3_TX_OFFLOAD_CSUM;
+ }
+
+#define VLAN_INSERT_MODE_MAX 5
+ if (unlikely(skb_vlan_tag_present(skb))) {
+ /* select vlan insert mode by qid, default 802.1Q Tag type */
+ hinic3_set_vlan_tx_offload(task, skb_vlan_tag_get(skb),
+ txq->q_id % VLAN_INSERT_MODE_MAX);
+ offload |= HINIC3_TX_OFFLOAD_VLAN;
+ }
+
+ if (unlikely(SQ_CTRL_QUEUE_INFO_GET(*queue_info, PLDOFF) >
+ SQ_CTRL_MAX_PLDOFF)) {
+ offload = HINIC3_TX_OFFLOAD_INVALID;
+ return offload;
+ }
+
+ return offload;
+}
+
+static u16 hinic3_get_and_update_sq_owner(struct hinic3_io_queue *sq,
+ u16 curr_pi, u16 wqebb_cnt)
+{
+ u16 owner = sq->owner;
+
+ if (unlikely(curr_pi + wqebb_cnt >= sq->wq.q_depth))
+ sq->owner = !sq->owner;
+
+ return owner;
+}
+
+static u16 hinic3_set_wqe_combo(struct hinic3_txq *txq,
+ struct hinic3_sq_wqe_combo *wqe_combo,
+ u32 offload, u16 num_sge, u16 *curr_pi)
+{
+ struct hinic3_sq_bufdesc *first_part_wqebbs, *second_part_wqebbs;
+ u16 first_part_wqebbs_num, tmp_pi;
+
+ wqe_combo->ctrl_bd0 = hinic3_wq_get_one_wqebb(&txq->sq->wq, curr_pi);
+ if (!offload && num_sge == 1) {
+ wqe_combo->wqe_type = SQ_WQE_COMPACT_TYPE;
+ return hinic3_get_and_update_sq_owner(txq->sq, *curr_pi, 1);
+ }
+
+ wqe_combo->wqe_type = SQ_WQE_EXTENDED_TYPE;
+
+ if (offload) {
+ wqe_combo->task = hinic3_wq_get_one_wqebb(&txq->sq->wq,
+ &tmp_pi);
+ wqe_combo->task_type = SQ_WQE_TASKSECT_16BYTES;
+ } else {
+ wqe_combo->task_type = SQ_WQE_TASKSECT_46BITS;
+ }
+
+ if (num_sge > 1) {
+ /* first wqebb contain bd0, and bd size is equal to sq wqebb
+ * size, so we use (num_sge - 1) as wanted weqbb_cnt
+ */
+ hinic3_wq_get_multi_wqebbs(&txq->sq->wq, num_sge - 1, &tmp_pi,
+ &first_part_wqebbs,
+ &second_part_wqebbs,
+ &first_part_wqebbs_num);
+ wqe_combo->bds_head = first_part_wqebbs;
+ wqe_combo->bds_sec2 = second_part_wqebbs;
+ wqe_combo->first_bds_num = first_part_wqebbs_num;
+ }
+
+ return hinic3_get_and_update_sq_owner(txq->sq, *curr_pi,
+ num_sge + !!offload);
+}
+
+static void hinic3_prepare_sq_ctrl(struct hinic3_sq_wqe_combo *wqe_combo,
+ __le32 queue_info, int nr_descs, u16 owner)
+{
+ struct hinic3_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0;
+
+ if (wqe_combo->wqe_type == SQ_WQE_COMPACT_TYPE) {
+ wqe_desc->ctrl_len |=
+ cpu_to_le32(SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
+ SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) |
+ SQ_CTRL_SET(owner, OWNER));
+
+ /* compact wqe queue_info will transfer to chip */
+ wqe_desc->queue_info = 0;
+ return;
+ }
+
+ wqe_desc->ctrl_len |=
+ cpu_to_le32(SQ_CTRL_SET(nr_descs, BUFDESC_NUM) |
+ SQ_CTRL_SET(wqe_combo->task_type, TASKSECT_LEN) |
+ SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
+ SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) |
+ SQ_CTRL_SET(owner, OWNER));
+
+ wqe_desc->queue_info = queue_info;
+ wqe_desc->queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(1, UC));
+
+ if (!SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS)) {
+ wqe_desc->queue_info |=
+ cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_DEFAULT, MSS));
+ } else if (SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS) <
+ HINIC3_TX_MSS_MIN) {
+ /* mss should not be less than 80 */
+ wqe_desc->queue_info &=
+ cpu_to_le32(~SQ_CTRL_QUEUE_INFO_MSS_MASK);
+ wqe_desc->queue_info |=
+ cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_MIN, MSS));
+ }
+}
+
+static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb,
+ struct net_device *netdev,
+ struct hinic3_txq *txq)
+{
+ struct hinic3_sq_wqe_combo wqe_combo = {};
+ struct hinic3_tx_info *tx_info;
+ struct hinic3_sq_task task;
+ u16 wqebb_cnt, num_sge;
+ __le32 queue_info = 0;
+ u16 saved_wq_prod_idx;
+ u16 owner, pi = 0;
+ u8 saved_sq_owner;
+ u32 offload;
+ int err;
+
+ if (unlikely(skb->len < MIN_SKB_LEN)) {
+ if (skb_pad(skb, MIN_SKB_LEN - skb->len))
+ goto err_out;
+
+ skb->len = MIN_SKB_LEN;
+ }
+
+ num_sge = skb_shinfo(skb)->nr_frags + 1;
+ /* assume normal wqe format + 1 wqebb for task info */
+ wqebb_cnt = num_sge + 1;
+
+ if (unlikely(hinic3_wq_free_wqebbs(&txq->sq->wq) < wqebb_cnt)) {
+ if (likely(wqebb_cnt > txq->tx_stop_thrs))
+ txq->tx_stop_thrs = min(wqebb_cnt, txq->tx_start_thrs);
+
+ netif_subqueue_try_stop(netdev, txq->sq->q_id,
+ hinic3_wq_free_wqebbs(&txq->sq->wq),
+ txq->tx_start_thrs);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ offload = hinic3_tx_offload(skb, &task, &queue_info, txq);
+ if (unlikely(offload == HINIC3_TX_OFFLOAD_INVALID)) {
+ goto err_drop_pkt;
+ } else if (!offload) {
+ wqebb_cnt -= 1;
+ if (unlikely(num_sge == 1 &&
+ skb->len > HINIC3_COMPACT_WQEE_SKB_MAX_LEN))
+ goto err_drop_pkt;
+ }
+
+ saved_wq_prod_idx = txq->sq->wq.prod_idx;
+ saved_sq_owner = txq->sq->owner;
+
+ owner = hinic3_set_wqe_combo(txq, &wqe_combo, offload, num_sge, &pi);
+ if (offload)
+ *wqe_combo.task = task;
+
+ tx_info = &txq->tx_info[pi];
+ tx_info->skb = skb;
+ tx_info->wqebb_cnt = wqebb_cnt;
+
+ err = hinic3_tx_map_skb(netdev, skb, txq, tx_info, &wqe_combo);
+ if (err) {
+ /* Rollback work queue to reclaim the wqebb we did not use */
+ txq->sq->wq.prod_idx = saved_wq_prod_idx;
+ txq->sq->owner = saved_sq_owner;
+ goto err_drop_pkt;
+ }
+
+ netif_subqueue_sent(netdev, txq->sq->q_id, skb->len);
+ netif_subqueue_maybe_stop(netdev, txq->sq->q_id,
+ hinic3_wq_free_wqebbs(&txq->sq->wq),
+ txq->tx_stop_thrs,
+ txq->tx_start_thrs);
+
+ hinic3_prepare_sq_ctrl(&wqe_combo, queue_info, num_sge, owner);
+ hinic3_write_db(txq->sq, 0, DB_CFLAG_DP_SQ,
+ hinic3_get_sq_local_pi(txq->sq));
+
+ return NETDEV_TX_OK;
+
+err_drop_pkt:
+ dev_kfree_skb_any(skb);
+
+err_out:
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t hinic3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 q_id = skb_get_queue_mapping(skb);
+
+ if (unlikely(!netif_carrier_ok(netdev)))
+ goto err_drop_pkt;
+
+ if (unlikely(q_id >= nic_dev->q_params.num_qps))
+ goto err_drop_pkt;
+
+ return hinic3_send_one_skb(skb, netdev, &nic_dev->txqs[q_id]);
+
+err_drop_pkt:
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static bool is_hw_complete_sq_process(struct hinic3_io_queue *sq)
+{
+ u16 sw_pi, hw_ci;
+
+ sw_pi = hinic3_get_sq_local_pi(sq);
+ hw_ci = hinic3_get_sq_hw_ci(sq);
+
+ return sw_pi == hw_ci;
+}
+
+#define HINIC3_FLUSH_QUEUE_POLL_SLEEP_US 10000
+#define HINIC3_FLUSH_QUEUE_POLL_TIMEOUT_US 10000000
+static int hinic3_stop_sq(struct hinic3_txq *txq)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(txq->netdev);
+ int err, rc;
+
+ err = read_poll_timeout(hinic3_force_drop_tx_pkt, rc,
+ is_hw_complete_sq_process(txq->sq) || rc,
+ HINIC3_FLUSH_QUEUE_POLL_SLEEP_US,
+ HINIC3_FLUSH_QUEUE_POLL_TIMEOUT_US,
+ true, nic_dev->hwdev);
+ if (rc)
+ return rc;
+ else
+ return err;
+}
+
+/* packet transmission should be stopped before calling this function */
+void hinic3_flush_txqs(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 qid;
+ int err;
+
+ for (qid = 0; qid < nic_dev->q_params.num_qps; qid++) {
+ err = hinic3_stop_sq(&nic_dev->txqs[qid]);
+ netdev_tx_reset_subqueue(netdev, qid);
+ if (err)
+ netdev_err(netdev, "Failed to stop sq%u\n", qid);
+ }
+}
+
+#define HINIC3_BDS_PER_SQ_WQEBB \
+ (HINIC3_SQ_WQEBB_SIZE / sizeof(struct hinic3_sq_bufdesc))
+
+int hinic3_alloc_txqs_res(struct net_device *netdev, u16 num_sq,
+ u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res)
+{
+ struct hinic3_dyna_txq_res *tqres;
+ int idx;
+
+ for (idx = 0; idx < num_sq; idx++) {
+ tqres = &txqs_res[idx];
+
+ tqres->tx_info = kcalloc(sq_depth, sizeof(*tqres->tx_info),
+ GFP_KERNEL);
+ if (!tqres->tx_info)
+ goto err_free_tqres;
+
+ tqres->bds = kcalloc(sq_depth * HINIC3_BDS_PER_SQ_WQEBB +
+ HINIC3_MAX_SQ_SGE, sizeof(*tqres->bds),
+ GFP_KERNEL);
+ if (!tqres->bds) {
+ kfree(tqres->tx_info);
+ goto err_free_tqres;
+ }
+ }
+
+ return 0;
+
+err_free_tqres:
+ while (idx > 0) {
+ idx--;
+ tqres = &txqs_res[idx];
+
+ kfree(tqres->bds);
+ kfree(tqres->tx_info);
+ }
+
+ return -ENOMEM;
+}
+
+void hinic3_free_txqs_res(struct net_device *netdev, u16 num_sq,
+ u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res)
+{
+ struct hinic3_dyna_txq_res *tqres;
+ int idx;
+
+ for (idx = 0; idx < num_sq; idx++) {
+ tqres = &txqs_res[idx];
+
+ free_all_tx_skbs(netdev, sq_depth, tqres->tx_info);
+ kfree(tqres->bds);
+ kfree(tqres->tx_info);
+ }
+}
+
+int hinic3_configure_txqs(struct net_device *netdev, u16 num_sq,
+ u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_dyna_txq_res *tqres;
+ struct hinic3_txq *txq;
+ u16 q_id;
+ u32 idx;
+
+ for (q_id = 0; q_id < num_sq; q_id++) {
+ txq = &nic_dev->txqs[q_id];
+ tqres = &txqs_res[q_id];
+
+ txq->q_depth = sq_depth;
+ txq->q_mask = sq_depth - 1;
+
+ txq->tx_stop_thrs = min(HINIC3_DEFAULT_STOP_THRS,
+ sq_depth / 20);
+ txq->tx_start_thrs = min(HINIC3_DEFAULT_START_THRS,
+ sq_depth / 10);
+
+ txq->tx_info = tqres->tx_info;
+ for (idx = 0; idx < sq_depth; idx++)
+ txq->tx_info[idx].dma_info =
+ &tqres->bds[idx * HINIC3_BDS_PER_SQ_WQEBB];
+
+ txq->sq = &nic_dev->nic_io->sq[q_id];
+ }
+
+ return 0;
+}
+
+bool hinic3_tx_poll(struct hinic3_txq *txq, int budget)
+{
+ struct net_device *netdev = txq->netdev;
+ u16 hw_ci, sw_ci, q_id = txq->sq->q_id;
+ struct hinic3_tx_info *tx_info;
+ unsigned int bytes_compl = 0;
+ unsigned int pkts = 0;
+ u16 wqebb_cnt = 0;
+
+ hw_ci = hinic3_get_sq_hw_ci(txq->sq);
+ dma_rmb();
+ sw_ci = hinic3_get_sq_local_ci(txq->sq);
+
+ do {
+ tx_info = &txq->tx_info[sw_ci];
+
+ /* Did all wqebb of this wqe complete? */
+ if (hw_ci == sw_ci ||
+ ((hw_ci - sw_ci) & txq->q_mask) < tx_info->wqebb_cnt)
+ break;
+
+ sw_ci = (sw_ci + tx_info->wqebb_cnt) & txq->q_mask;
+ net_prefetch(&txq->tx_info[sw_ci]);
+
+ wqebb_cnt += tx_info->wqebb_cnt;
+ bytes_compl += tx_info->skb->len;
+ pkts++;
+
+ hinic3_tx_unmap_skb(netdev, tx_info->skb, tx_info->dma_info);
+ napi_consume_skb(tx_info->skb, budget);
+ tx_info->skb = NULL;
+ } while (likely(pkts < HINIC3_TX_POLL_WEIGHT));
+
+ hinic3_wq_put_wqebbs(&txq->sq->wq, wqebb_cnt);
+
+ netif_subqueue_completed_wake(netdev, q_id, pkts, bytes_compl,
+ hinic3_wq_free_wqebbs(&txq->sq->wq),
+ txq->tx_start_thrs);
+
+ return pkts == HINIC3_TX_POLL_WEIGHT;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h
new file mode 100644
index 000000000000..7e1b872ba752
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_TX_H_
+#define _HINIC3_TX_H_
+
+#include <linux/bitops.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/netdevice.h>
+#include <net/checksum.h>
+
+#define VXLAN_OFFLOAD_PORT_LE cpu_to_be16(4789)
+#define TCP_HDR_DATA_OFF_UNIT_SHIFT 2
+#define TRANSPORT_OFFSET(l4_hdr, skb) ((l4_hdr) - (skb)->data)
+
+#define HINIC3_COMPACT_WQEE_SKB_MAX_LEN 16383
+#define HINIC3_TX_POLL_WEIGHT 64
+#define HINIC3_DEFAULT_STOP_THRS 6
+#define HINIC3_DEFAULT_START_THRS 24
+
+enum sq_wqe_data_format {
+ SQ_NORMAL_WQE = 0,
+};
+
+enum sq_wqe_ec_type {
+ SQ_WQE_COMPACT_TYPE = 0,
+ SQ_WQE_EXTENDED_TYPE = 1,
+};
+
+enum sq_wqe_tasksect_len_type {
+ SQ_WQE_TASKSECT_46BITS = 0,
+ SQ_WQE_TASKSECT_16BYTES = 1,
+};
+
+enum hinic3_tx_offload_type {
+ HINIC3_TX_OFFLOAD_TSO = BIT(0),
+ HINIC3_TX_OFFLOAD_CSUM = BIT(1),
+ HINIC3_TX_OFFLOAD_VLAN = BIT(2),
+ HINIC3_TX_OFFLOAD_INVALID = BIT(3),
+ HINIC3_TX_OFFLOAD_ESP = BIT(4),
+};
+
+#define SQ_CTRL_BUFDESC_NUM_MASK GENMASK(26, 19)
+#define SQ_CTRL_TASKSECT_LEN_MASK BIT(27)
+#define SQ_CTRL_DATA_FORMAT_MASK BIT(28)
+#define SQ_CTRL_EXTENDED_MASK BIT(30)
+#define SQ_CTRL_OWNER_MASK BIT(31)
+#define SQ_CTRL_SET(val, member) \
+ FIELD_PREP(SQ_CTRL_##member##_MASK, val)
+
+#define SQ_CTRL_QUEUE_INFO_PLDOFF_MASK GENMASK(9, 2)
+#define SQ_CTRL_QUEUE_INFO_UFO_MASK BIT(10)
+#define SQ_CTRL_QUEUE_INFO_TSO_MASK BIT(11)
+#define SQ_CTRL_QUEUE_INFO_MSS_MASK GENMASK(26, 13)
+#define SQ_CTRL_QUEUE_INFO_UC_MASK BIT(28)
+
+#define SQ_CTRL_QUEUE_INFO_SET(val, member) \
+ FIELD_PREP(SQ_CTRL_QUEUE_INFO_##member##_MASK, val)
+#define SQ_CTRL_QUEUE_INFO_GET(val, member) \
+ FIELD_GET(SQ_CTRL_QUEUE_INFO_##member##_MASK, le32_to_cpu(val))
+
+#define SQ_CTRL_MAX_PLDOFF 221
+
+#define SQ_TASK_INFO0_TUNNEL_FLAG_MASK BIT(19)
+#define SQ_TASK_INFO0_INNER_L4_EN_MASK BIT(24)
+#define SQ_TASK_INFO0_INNER_L3_EN_MASK BIT(25)
+#define SQ_TASK_INFO0_OUT_L4_EN_MASK BIT(27)
+#define SQ_TASK_INFO0_OUT_L3_EN_MASK BIT(28)
+#define SQ_TASK_INFO0_SET(val, member) \
+ FIELD_PREP(SQ_TASK_INFO0_##member##_MASK, val)
+
+#define SQ_TASK_INFO3_VLAN_TAG_MASK GENMASK(15, 0)
+#define SQ_TASK_INFO3_VLAN_TPID_MASK GENMASK(18, 16)
+#define SQ_TASK_INFO3_VLAN_TAG_VALID_MASK BIT(19)
+#define SQ_TASK_INFO3_SET(val, member) \
+ FIELD_PREP(SQ_TASK_INFO3_##member##_MASK, val)
+
+struct hinic3_sq_wqe_desc {
+ __le32 ctrl_len;
+ __le32 queue_info;
+ __le32 hi_addr;
+ __le32 lo_addr;
+};
+
+struct hinic3_sq_task {
+ __le32 pkt_info0;
+ __le32 ip_identify;
+ __le32 rsvd;
+ __le32 vlan_offload;
+};
+
+struct hinic3_sq_wqe_combo {
+ struct hinic3_sq_wqe_desc *ctrl_bd0;
+ struct hinic3_sq_task *task;
+ struct hinic3_sq_bufdesc *bds_head;
+ struct hinic3_sq_bufdesc *bds_sec2;
+ u16 first_bds_num;
+ u32 wqe_type;
+ u32 task_type;
+};
+
+struct hinic3_dma_info {
+ dma_addr_t dma;
+ u32 len;
+};
+
+struct hinic3_tx_info {
+ struct sk_buff *skb;
+ u16 wqebb_cnt;
+ struct hinic3_dma_info *dma_info;
+};
+
+struct hinic3_txq {
+ struct net_device *netdev;
+ struct device *dev;
+
+ u16 q_id;
+ u16 tx_stop_thrs;
+ u16 tx_start_thrs;
+ u32 q_mask;
+ u32 q_depth;
+
+ struct hinic3_tx_info *tx_info;
+ struct hinic3_io_queue *sq;
+} ____cacheline_aligned;
+
+struct hinic3_dyna_txq_res {
+ struct hinic3_tx_info *tx_info;
+ struct hinic3_dma_info *bds;
+};
+
+int hinic3_alloc_txqs(struct net_device *netdev);
+void hinic3_free_txqs(struct net_device *netdev);
+
+int hinic3_alloc_txqs_res(struct net_device *netdev, u16 num_sq,
+ u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res);
+void hinic3_free_txqs_res(struct net_device *netdev, u16 num_sq,
+ u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res);
+int hinic3_configure_txqs(struct net_device *netdev, u16 num_sq,
+ u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res);
+
+netdev_tx_t hinic3_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+bool hinic3_tx_poll(struct hinic3_txq *txq, int budget);
+void hinic3_flush_txqs(struct net_device *netdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c
new file mode 100644
index 000000000000..bc3ffdc25cf6
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/dma-mapping.h>
+
+#include "hinic3_hwdev.h"
+#include "hinic3_wq.h"
+
+#define WQ_MIN_DEPTH 64
+#define WQ_MAX_DEPTH 65536
+#define WQ_PAGE_ADDR_SIZE sizeof(u64)
+#define WQ_MAX_NUM_PAGES (HINIC3_MIN_PAGE_SIZE / WQ_PAGE_ADDR_SIZE)
+
+static int wq_init_wq_block(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq)
+{
+ struct hinic3_queue_pages *qpages = &wq->qpages;
+ int i;
+
+ if (hinic3_wq_is_0_level_cla(wq)) {
+ wq->wq_block_paddr = qpages->pages[0].align_paddr;
+ wq->wq_block_vaddr = qpages->pages[0].align_vaddr;
+
+ return 0;
+ }
+
+ if (wq->qpages.num_pages > WQ_MAX_NUM_PAGES) {
+ dev_err(hwdev->dev, "wq num_pages exceed limit: %lu\n",
+ WQ_MAX_NUM_PAGES);
+ return -EFAULT;
+ }
+
+ wq->wq_block_vaddr = dma_alloc_coherent(hwdev->dev,
+ HINIC3_MIN_PAGE_SIZE,
+ &wq->wq_block_paddr,
+ GFP_KERNEL);
+ if (!wq->wq_block_vaddr)
+ return -ENOMEM;
+
+ for (i = 0; i < qpages->num_pages; i++)
+ wq->wq_block_vaddr[i] = cpu_to_be64(qpages->pages[i].align_paddr);
+
+ return 0;
+}
+
+static int wq_alloc_pages(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq)
+{
+ int err;
+
+ err = hinic3_queue_pages_alloc(hwdev, &wq->qpages, 0);
+ if (err)
+ return err;
+
+ err = wq_init_wq_block(hwdev, wq);
+ if (err) {
+ hinic3_queue_pages_free(hwdev, &wq->qpages);
+ return err;
+ }
+
+ return 0;
+}
+
+static void wq_free_pages(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq)
+{
+ if (!hinic3_wq_is_0_level_cla(wq))
+ dma_free_coherent(hwdev->dev,
+ HINIC3_MIN_PAGE_SIZE,
+ wq->wq_block_vaddr,
+ wq->wq_block_paddr);
+
+ hinic3_queue_pages_free(hwdev, &wq->qpages);
+}
+
+int hinic3_wq_create(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq,
+ u32 q_depth, u16 wqebb_size)
+{
+ u32 wq_page_size;
+
+ if (q_depth < WQ_MIN_DEPTH || q_depth > WQ_MAX_DEPTH ||
+ !is_power_of_2(q_depth) || !is_power_of_2(wqebb_size)) {
+ dev_err(hwdev->dev, "Invalid WQ: q_depth %u, wqebb_size %u\n",
+ q_depth, wqebb_size);
+ return -EINVAL;
+ }
+
+ wq_page_size = ALIGN(hwdev->wq_page_size, HINIC3_MIN_PAGE_SIZE);
+
+ memset(wq, 0, sizeof(*wq));
+ wq->q_depth = q_depth;
+ wq->idx_mask = q_depth - 1;
+
+ hinic3_queue_pages_init(&wq->qpages, q_depth, wq_page_size, wqebb_size);
+
+ return wq_alloc_pages(hwdev, wq);
+}
+
+void hinic3_wq_destroy(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq)
+{
+ wq_free_pages(hwdev, wq);
+}
+
+void hinic3_wq_reset(struct hinic3_wq *wq)
+{
+ struct hinic3_queue_pages *qpages = &wq->qpages;
+ u16 pg_idx;
+
+ wq->cons_idx = 0;
+ wq->prod_idx = 0;
+
+ for (pg_idx = 0; pg_idx < qpages->num_pages; pg_idx++)
+ memset(qpages->pages[pg_idx].align_vaddr, 0, qpages->page_size);
+}
+
+void hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq,
+ u16 num_wqebbs, u16 *prod_idx,
+ struct hinic3_sq_bufdesc **first_part_wqebbs,
+ struct hinic3_sq_bufdesc **second_part_wqebbs,
+ u16 *first_part_wqebbs_num)
+{
+ u32 idx, remaining;
+
+ idx = wq->prod_idx & wq->idx_mask;
+ wq->prod_idx += num_wqebbs;
+ *prod_idx = idx;
+ *first_part_wqebbs = get_q_element(&wq->qpages, idx, &remaining);
+ if (likely(remaining >= num_wqebbs)) {
+ *first_part_wqebbs_num = num_wqebbs;
+ *second_part_wqebbs = NULL;
+ } else {
+ *first_part_wqebbs_num = remaining;
+ idx += remaining;
+ *second_part_wqebbs = get_q_element(&wq->qpages, idx, NULL);
+ }
+}
+
+bool hinic3_wq_is_0_level_cla(const struct hinic3_wq *wq)
+{
+ return wq->qpages.num_pages == 1;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h
new file mode 100644
index 000000000000..9b3f012bec80
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_WQ_H_
+#define _HINIC3_WQ_H_
+
+#include <linux/io.h>
+
+#include "hinic3_queue_common.h"
+
+struct hinic3_sq_bufdesc {
+ /* 31-bits Length, L2NIC only uses length[17:0] */
+ __le32 len;
+ __le32 rsvd;
+ __le32 hi_addr;
+ __le32 lo_addr;
+};
+
+/* Work queue is used to submit elements (tx, rx, cmd) to hw.
+ * Driver is the producer that advances prod_idx. cons_idx is advanced when
+ * HW reports completions of previously submitted elements.
+ */
+struct hinic3_wq {
+ struct hinic3_queue_pages qpages;
+ /* Unmasked producer/consumer indices that are advanced to natural
+ * integer overflow regardless of queue depth.
+ */
+ u16 cons_idx;
+ u16 prod_idx;
+
+ u32 q_depth;
+ u16 idx_mask;
+
+ /* Work Queue (logical WQEBB array) is mapped to hw via Chip Logical
+ * Address (CLA) using 1 of 2 levels:
+ * level 0 - direct mapping of single wq page
+ * level 1 - indirect mapping of multiple pages via additional page
+ * table.
+ * When wq uses level 1, wq_block will hold the allocated indirection
+ * table.
+ */
+ dma_addr_t wq_block_paddr;
+ __be64 *wq_block_vaddr;
+} ____cacheline_aligned;
+
+/* Get number of elements in work queue that are in-use. */
+static inline u16 hinic3_wq_get_used(const struct hinic3_wq *wq)
+{
+ return READ_ONCE(wq->prod_idx) - READ_ONCE(wq->cons_idx);
+}
+
+static inline u16 hinic3_wq_free_wqebbs(struct hinic3_wq *wq)
+{
+ /* Don't allow queue to become completely full, report (free - 1). */
+ return wq->q_depth - hinic3_wq_get_used(wq) - 1;
+}
+
+static inline void *hinic3_wq_get_one_wqebb(struct hinic3_wq *wq, u16 *pi)
+{
+ *pi = wq->prod_idx & wq->idx_mask;
+ wq->prod_idx++;
+
+ return get_q_element(&wq->qpages, *pi, NULL);
+}
+
+static inline void hinic3_wq_put_wqebbs(struct hinic3_wq *wq, u16 num_wqebbs)
+{
+ wq->cons_idx += num_wqebbs;
+}
+
+static inline u64 hinic3_wq_get_first_wqe_page_addr(const struct hinic3_wq *wq)
+{
+ return wq->qpages.pages[0].align_paddr;
+}
+
+int hinic3_wq_create(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq,
+ u32 q_depth, u16 wqebb_size);
+void hinic3_wq_destroy(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq);
+void hinic3_wq_reset(struct hinic3_wq *wq);
+void hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq,
+ u16 num_wqebbs, u16 *prod_idx,
+ struct hinic3_sq_bufdesc **first_part_wqebbs,
+ struct hinic3_sq_bufdesc **second_part_wqebbs,
+ u16 *first_part_wqebbs_num);
+bool hinic3_wq_is_0_level_cla(const struct hinic3_wq *wq);
+
+#endif
diff --git a/drivers/net/ethernet/ibm/Kconfig b/drivers/net/ethernet/ibm/Kconfig
index c0c112d95b89..4f4b23465c47 100644
--- a/drivers/net/ethernet/ibm/Kconfig
+++ b/drivers/net/ethernet/ibm/Kconfig
@@ -27,6 +27,19 @@ config IBMVETH
To compile this driver as a module, choose M here. The module will
be called ibmveth.
+config IBMVETH_KUNIT_TEST
+ bool "KUnit test for IBM LAN Virtual Ethernet support" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ depends on KUNIT=y && IBMVETH=y
+ default KUNIT_ALL_TESTS
+ help
+ This builds unit tests for the IBM LAN Virtual Ethernet driver.
+
+ For more information on KUnit and unit tests in general, please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
source "drivers/net/ethernet/ibm/emac/Kconfig"
config EHEA
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 25b8a3556004..417dfa18daae 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2554,17 +2554,12 @@ static int emac_dt_mdio_probe(struct emac_instance *dev)
struct mii_bus *bus;
int res;
- mii_np = of_get_child_by_name(dev->ofdev->dev.of_node, "mdio");
+ mii_np = of_get_available_child_by_name(dev->ofdev->dev.of_node, "mdio");
if (!mii_np) {
dev_err(&dev->ofdev->dev, "no mdio definition found.");
return -ENODEV;
}
- if (!of_device_is_available(mii_np)) {
- res = -ENODEV;
- goto put_node;
- }
-
bus = devm_mdiobus_alloc(&dev->ofdev->dev);
if (!bus) {
res = -ENOMEM;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index b619a3ec245b..6f0821f1e798 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -39,8 +39,6 @@
#include "ibmveth.h"
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
-static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter,
- bool reuse);
static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
static struct kobj_type ktype_veth_pool;
@@ -213,94 +211,169 @@ static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
struct ibmveth_buff_pool *pool)
{
- u32 i;
- u32 count = pool->size - atomic_read(&pool->available);
- u32 buffers_added = 0;
- struct sk_buff *skb;
- unsigned int free_index, index;
- u64 correlator;
+ union ibmveth_buf_desc descs[IBMVETH_MAX_RX_PER_HCALL] = {0};
+ u32 remaining = pool->size - atomic_read(&pool->available);
+ u64 correlators[IBMVETH_MAX_RX_PER_HCALL] = {0};
unsigned long lpar_rc;
+ u32 buffers_added = 0;
+ u32 i, filled, batch;
+ struct vio_dev *vdev;
dma_addr_t dma_addr;
+ struct device *dev;
+ u32 index;
+
+ vdev = adapter->vdev;
+ dev = &vdev->dev;
mb();
- for (i = 0; i < count; ++i) {
- union ibmveth_buf_desc desc;
+ batch = adapter->rx_buffers_per_hcall;
+
+ while (remaining > 0) {
+ unsigned int free_index = pool->consumer_index;
+
+ /* Fill a batch of descriptors */
+ for (filled = 0; filled < min(remaining, batch); filled++) {
+ index = pool->free_map[free_index];
+ if (WARN_ON(index == IBM_VETH_INVALID_MAP)) {
+ adapter->replenish_add_buff_failure++;
+ netdev_info(adapter->netdev,
+ "Invalid map index %u, reset\n",
+ index);
+ schedule_work(&adapter->work);
+ break;
+ }
- free_index = pool->consumer_index;
- index = pool->free_map[free_index];
- skb = NULL;
+ if (!pool->skbuff[index]) {
+ struct sk_buff *skb = NULL;
- BUG_ON(index == IBM_VETH_INVALID_MAP);
+ skb = netdev_alloc_skb(adapter->netdev,
+ pool->buff_size);
+ if (!skb) {
+ adapter->replenish_no_mem++;
+ adapter->replenish_add_buff_failure++;
+ break;
+ }
- /* are we allocating a new buffer or recycling an old one */
- if (pool->skbuff[index])
- goto reuse;
+ dma_addr = dma_map_single(dev, skb->data,
+ pool->buff_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_kfree_skb_any(skb);
+ adapter->replenish_add_buff_failure++;
+ break;
+ }
- skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
+ pool->dma_addr[index] = dma_addr;
+ pool->skbuff[index] = skb;
+ } else {
+ /* re-use case */
+ dma_addr = pool->dma_addr[index];
+ }
- if (!skb) {
- netdev_dbg(adapter->netdev,
- "replenish: unable to allocate skb\n");
- adapter->replenish_no_mem++;
- break;
- }
+ if (rx_flush) {
+ unsigned int len;
- dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
- pool->buff_size, DMA_FROM_DEVICE);
+ len = adapter->netdev->mtu + IBMVETH_BUFF_OH;
+ len = min(pool->buff_size, len);
+ ibmveth_flush_buffer(pool->skbuff[index]->data,
+ len);
+ }
- if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
- goto failure;
+ descs[filled].fields.flags_len = IBMVETH_BUF_VALID |
+ pool->buff_size;
+ descs[filled].fields.address = dma_addr;
- pool->dma_addr[index] = dma_addr;
- pool->skbuff[index] = skb;
+ correlators[filled] = ((u64)pool->index << 32) | index;
+ *(u64 *)pool->skbuff[index]->data = correlators[filled];
- if (rx_flush) {
- unsigned int len = min(pool->buff_size,
- adapter->netdev->mtu +
- IBMVETH_BUFF_OH);
- ibmveth_flush_buffer(skb->data, len);
+ free_index++;
+ if (free_index >= pool->size)
+ free_index = 0;
}
-reuse:
- dma_addr = pool->dma_addr[index];
- desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
- desc.fields.address = dma_addr;
-
- correlator = ((u64)pool->index << 32) | index;
- *(u64 *)pool->skbuff[index]->data = correlator;
- lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
- desc.desc);
+ if (!filled)
+ break;
+ /* single buffer case*/
+ if (filled == 1)
+ lpar_rc = h_add_logical_lan_buffer(vdev->unit_address,
+ descs[0].desc);
+ else
+ /* Multi-buffer hcall */
+ lpar_rc = h_add_logical_lan_buffers(vdev->unit_address,
+ descs[0].desc,
+ descs[1].desc,
+ descs[2].desc,
+ descs[3].desc,
+ descs[4].desc,
+ descs[5].desc,
+ descs[6].desc,
+ descs[7].desc);
if (lpar_rc != H_SUCCESS) {
- netdev_warn(adapter->netdev,
- "%sadd_logical_lan failed %lu\n",
- skb ? "" : "When recycling: ", lpar_rc);
- goto failure;
+ dev_warn_ratelimited(dev,
+ "RX h_add_logical_lan failed: filled=%u, rc=%lu, batch=%u\n",
+ filled, lpar_rc, batch);
+ goto hcall_failure;
}
- pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
- pool->consumer_index++;
- if (pool->consumer_index >= pool->size)
- pool->consumer_index = 0;
+ /* Only update pool state after hcall succeeds */
+ for (i = 0; i < filled; i++) {
+ free_index = pool->consumer_index;
+ pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
- buffers_added++;
- adapter->replenish_add_buff_success++;
- }
+ pool->consumer_index++;
+ if (pool->consumer_index >= pool->size)
+ pool->consumer_index = 0;
+ }
- mb();
- atomic_add(buffers_added, &(pool->available));
- return;
+ buffers_added += filled;
+ adapter->replenish_add_buff_success += filled;
+ remaining -= filled;
-failure:
+ memset(&descs, 0, sizeof(descs));
+ memset(&correlators, 0, sizeof(correlators));
+ continue;
- if (dma_addr && !dma_mapping_error(&adapter->vdev->dev, dma_addr))
- dma_unmap_single(&adapter->vdev->dev,
- pool->dma_addr[index], pool->buff_size,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(pool->skbuff[index]);
- pool->skbuff[index] = NULL;
- adapter->replenish_add_buff_failure++;
+hcall_failure:
+ for (i = 0; i < filled; i++) {
+ index = correlators[i] & 0xffffffffUL;
+ dma_addr = pool->dma_addr[index];
+
+ if (pool->skbuff[index]) {
+ if (dma_addr &&
+ !dma_mapping_error(dev, dma_addr))
+ dma_unmap_single(dev, dma_addr,
+ pool->buff_size,
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(pool->skbuff[index]);
+ pool->skbuff[index] = NULL;
+ }
+ }
+ adapter->replenish_add_buff_failure += filled;
+
+ /*
+ * If multi rx buffers hcall is no longer supported by FW
+ * e.g. in the case of Live Parttion Migration
+ */
+ if (batch > 1 && lpar_rc == H_FUNCTION) {
+ /*
+ * Instead of retry submit single buffer individually
+ * here just set the max rx buffer per hcall to 1
+ * buffers will be respleshed next time
+ * when ibmveth_replenish_buffer_pool() is called again
+ * with single-buffer case
+ */
+ netdev_info(adapter->netdev,
+ "RX Multi buffers not supported by FW, rc=%lu\n",
+ lpar_rc);
+ adapter->rx_buffers_per_hcall = 1;
+ netdev_info(adapter->netdev,
+ "Next rx replesh will fall back to single-buffer hcall\n");
+ }
+ break;
+ }
mb();
atomic_add(buffers_added, &(pool->available));
@@ -370,20 +443,36 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
}
}
-/* remove a buffer from a pool */
-static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
- u64 correlator, bool reuse)
+/**
+ * ibmveth_remove_buffer_from_pool - remove a buffer from a pool
+ * @adapter: adapter instance
+ * @correlator: identifies pool and index
+ * @reuse: whether to reuse buffer
+ *
+ * Return:
+ * * %0 - success
+ * * %-EINVAL - correlator maps to pool or index out of range
+ * * %-EFAULT - pool and index map to null skb
+ */
+static int ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
+ u64 correlator, bool reuse)
{
unsigned int pool = correlator >> 32;
unsigned int index = correlator & 0xffffffffUL;
unsigned int free_index;
struct sk_buff *skb;
- BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
- BUG_ON(index >= adapter->rx_buff_pool[pool].size);
+ if (WARN_ON(pool >= IBMVETH_NUM_BUFF_POOLS) ||
+ WARN_ON(index >= adapter->rx_buff_pool[pool].size)) {
+ schedule_work(&adapter->work);
+ return -EINVAL;
+ }
skb = adapter->rx_buff_pool[pool].skbuff[index];
- BUG_ON(skb == NULL);
+ if (WARN_ON(!skb)) {
+ schedule_work(&adapter->work);
+ return -EFAULT;
+ }
/* if we are going to reuse the buffer then keep the pointers around
* but mark index as available. replenish will see the skb pointer and
@@ -411,6 +500,8 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
mb();
atomic_dec(&(adapter->rx_buff_pool[pool].available));
+
+ return 0;
}
/* get the current buffer on the rx queue */
@@ -420,24 +511,44 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada
unsigned int pool = correlator >> 32;
unsigned int index = correlator & 0xffffffffUL;
- BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
- BUG_ON(index >= adapter->rx_buff_pool[pool].size);
+ if (WARN_ON(pool >= IBMVETH_NUM_BUFF_POOLS) ||
+ WARN_ON(index >= adapter->rx_buff_pool[pool].size)) {
+ schedule_work(&adapter->work);
+ return NULL;
+ }
return adapter->rx_buff_pool[pool].skbuff[index];
}
-static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter,
- bool reuse)
+/**
+ * ibmveth_rxq_harvest_buffer - Harvest buffer from pool
+ *
+ * @adapter: pointer to adapter
+ * @reuse: whether to reuse buffer
+ *
+ * Context: called from ibmveth_poll
+ *
+ * Return:
+ * * %0 - success
+ * * other - non-zero return from ibmveth_remove_buffer_from_pool
+ */
+static int ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter,
+ bool reuse)
{
u64 cor;
+ int rc;
cor = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
- ibmveth_remove_buffer_from_pool(adapter, cor, reuse);
+ rc = ibmveth_remove_buffer_from_pool(adapter, cor, reuse);
+ if (unlikely(rc))
+ return rc;
if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
adapter->rx_queue.index = 0;
adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
}
+
+ return 0;
}
static void ibmveth_free_tx_ltb(struct ibmveth_adapter *adapter, int idx)
@@ -709,6 +820,35 @@ static int ibmveth_close(struct net_device *netdev)
return 0;
}
+/**
+ * ibmveth_reset - Handle scheduled reset work
+ *
+ * @w: pointer to work_struct embedded in adapter structure
+ *
+ * Context: This routine acquires rtnl_mutex and disables its NAPI through
+ * ibmveth_close. It can't be called directly in a context that has
+ * already acquired rtnl_mutex or disabled its NAPI, or directly from
+ * a poll routine.
+ *
+ * Return: void
+ */
+static void ibmveth_reset(struct work_struct *w)
+{
+ struct ibmveth_adapter *adapter = container_of(w, struct ibmveth_adapter, work);
+ struct net_device *netdev = adapter->netdev;
+
+ netdev_dbg(netdev, "reset starting\n");
+
+ rtnl_lock();
+
+ dev_close(adapter->netdev);
+ dev_open(adapter->netdev, NULL);
+
+ rtnl_unlock();
+
+ netdev_dbg(netdev, "reset complete\n");
+}
+
static int ibmveth_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd)
{
@@ -1324,7 +1464,8 @@ restart_poll:
wmb(); /* suggested by larson1 */
adapter->rx_invalid_buffer++;
netdev_dbg(netdev, "recycling invalid buffer\n");
- ibmveth_rxq_harvest_buffer(adapter, true);
+ if (unlikely(ibmveth_rxq_harvest_buffer(adapter, true)))
+ break;
} else {
struct sk_buff *skb, *new_skb;
int length = ibmveth_rxq_frame_length(adapter);
@@ -1334,6 +1475,8 @@ restart_poll:
__sum16 iph_check = 0;
skb = ibmveth_rxq_get_buffer(adapter);
+ if (unlikely(!skb))
+ break;
/* if the large packet bit is set in the rx queue
* descriptor, the mss will be written by PHYP eight
@@ -1357,10 +1500,12 @@ restart_poll:
if (rx_flush)
ibmveth_flush_buffer(skb->data,
length + offset);
- ibmveth_rxq_harvest_buffer(adapter, true);
+ if (unlikely(ibmveth_rxq_harvest_buffer(adapter, true)))
+ break;
skb = new_skb;
} else {
- ibmveth_rxq_harvest_buffer(adapter, false);
+ if (unlikely(ibmveth_rxq_harvest_buffer(adapter, false)))
+ break;
skb_reserve(skb, offset);
}
@@ -1407,7 +1552,10 @@ restart_poll:
* then check once more to make sure we are done.
*/
lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE);
- BUG_ON(lpar_rc != H_SUCCESS);
+ if (WARN_ON(lpar_rc != H_SUCCESS)) {
+ schedule_work(&adapter->work);
+ goto out;
+ }
if (ibmveth_rxq_pending_buffer(adapter) && napi_schedule(napi)) {
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
@@ -1428,7 +1576,7 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
if (napi_schedule_prep(&adapter->napi)) {
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
VIO_IRQ_DISABLE);
- BUG_ON(lpar_rc != H_SUCCESS);
+ WARN_ON(lpar_rc != H_SUCCESS);
__napi_schedule(&adapter->napi);
}
return IRQ_HANDLED;
@@ -1670,6 +1818,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->vdev = dev;
adapter->netdev = netdev;
+ INIT_WORK(&adapter->work, ibmveth_reset);
adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
ibmveth_init_link_settings(netdev);
@@ -1705,6 +1854,19 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
netdev->features |= NETIF_F_FRAGLIST;
}
+ if (ret == H_SUCCESS &&
+ (ret_attr & IBMVETH_ILLAN_RX_MULTI_BUFF_SUPPORT)) {
+ adapter->rx_buffers_per_hcall = IBMVETH_MAX_RX_PER_HCALL;
+ netdev_dbg(netdev,
+ "RX Multi-buffer hcall supported by FW, batch set to %u\n",
+ adapter->rx_buffers_per_hcall);
+ } else {
+ adapter->rx_buffers_per_hcall = 1;
+ netdev_dbg(netdev,
+ "RX Single-buffer hcall mode, batch set to %u\n",
+ adapter->rx_buffers_per_hcall);
+ }
+
netdev->min_mtu = IBMVETH_MIN_MTU;
netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH;
@@ -1762,6 +1924,8 @@ static void ibmveth_remove(struct vio_dev *dev)
struct ibmveth_adapter *adapter = netdev_priv(netdev);
int i;
+ cancel_work_sync(&adapter->work);
+
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
kobject_put(&adapter->rx_buff_pool[i].kobj);
@@ -1791,6 +1955,26 @@ static ssize_t veth_pool_show(struct kobject *kobj,
return 0;
}
+/**
+ * veth_pool_store - sysfs store handler for pool attributes
+ * @kobj: kobject embedded in pool
+ * @attr: attribute being changed
+ * @buf: value being stored
+ * @count: length of @buf in bytes
+ *
+ * Stores new value in pool attribute. Verifies the range of the new value for
+ * size and buff_size. Verifies that at least one pool remains available to
+ * receive MTU-sized packets.
+ *
+ * Context: Process context.
+ * Takes and releases rtnl_mutex to ensure correct ordering of close
+ * and open calls.
+ * Return:
+ * * %-EPERM - Not allowed to disabled all MTU-sized buffer pools
+ * * %-EINVAL - New pool size or buffer size is out of range
+ * * count - Return count for success
+ * * other - Return value from a failed ibmveth_open call
+ */
static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
@@ -1800,24 +1984,30 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
struct net_device *netdev = dev_get_drvdata(kobj_to_dev(kobj->parent));
struct ibmveth_adapter *adapter = netdev_priv(netdev);
long value = simple_strtol(buf, NULL, 10);
+ bool change = false;
+ u32 newbuff_size;
+ u32 oldbuff_size;
+ int newactive;
+ int oldactive;
+ u32 newsize;
+ u32 oldsize;
long rc;
+ rtnl_lock();
+
+ oldbuff_size = pool->buff_size;
+ oldactive = pool->active;
+ oldsize = pool->size;
+
+ newbuff_size = oldbuff_size;
+ newactive = oldactive;
+ newsize = oldsize;
+
if (attr == &veth_active_attr) {
- if (value && !pool->active) {
- if (netif_running(netdev)) {
- if (ibmveth_alloc_buffer_pool(pool)) {
- netdev_err(netdev,
- "unable to alloc pool\n");
- return -ENOMEM;
- }
- pool->active = 1;
- ibmveth_close(netdev);
- if ((rc = ibmveth_open(netdev)))
- return rc;
- } else {
- pool->active = 1;
- }
- } else if (!value && pool->active) {
+ if (value && !oldactive) {
+ newactive = 1;
+ change = true;
+ } else if (!value && oldactive) {
int mtu = netdev->mtu + IBMVETH_BUFF_OH;
int i;
/* Make sure there is a buffer pool with buffers that
@@ -1833,48 +2023,60 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
if (i == IBMVETH_NUM_BUFF_POOLS) {
netdev_err(netdev, "no active pool >= MTU\n");
- return -EPERM;
+ rc = -EPERM;
+ goto unlock_err;
}
- if (netif_running(netdev)) {
- ibmveth_close(netdev);
- pool->active = 0;
- if ((rc = ibmveth_open(netdev)))
- return rc;
- }
- pool->active = 0;
+ newactive = 0;
+ change = true;
}
} else if (attr == &veth_num_attr) {
if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
- return -EINVAL;
- } else {
- if (netif_running(netdev)) {
- ibmveth_close(netdev);
- pool->size = value;
- if ((rc = ibmveth_open(netdev)))
- return rc;
- } else {
- pool->size = value;
- }
+ rc = -EINVAL;
+ goto unlock_err;
+ }
+ if (value != oldsize) {
+ newsize = value;
+ change = true;
}
} else if (attr == &veth_size_attr) {
if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
- return -EINVAL;
- } else {
- if (netif_running(netdev)) {
- ibmveth_close(netdev);
- pool->buff_size = value;
- if ((rc = ibmveth_open(netdev)))
- return rc;
- } else {
- pool->buff_size = value;
+ rc = -EINVAL;
+ goto unlock_err;
+ }
+ if (value != oldbuff_size) {
+ newbuff_size = value;
+ change = true;
+ }
+ }
+
+ if (change) {
+ if (netif_running(netdev))
+ ibmveth_close(netdev);
+
+ pool->active = newactive;
+ pool->buff_size = newbuff_size;
+ pool->size = newsize;
+
+ if (netif_running(netdev)) {
+ rc = ibmveth_open(netdev);
+ if (rc) {
+ pool->active = oldactive;
+ pool->buff_size = oldbuff_size;
+ pool->size = oldsize;
+ goto unlock_err;
}
}
}
+ rtnl_unlock();
/* kick the interrupt handler to allocate/deallocate pools */
ibmveth_interrupt(netdev->irq, netdev);
return count;
+
+unlock_err:
+ rtnl_unlock();
+ return rc;
}
@@ -1947,3 +2149,132 @@ static void __exit ibmveth_module_exit(void)
module_init(ibmveth_module_init);
module_exit(ibmveth_module_exit);
+
+#ifdef CONFIG_IBMVETH_KUNIT_TEST
+#include <kunit/test.h>
+
+/**
+ * ibmveth_reset_kunit - reset routine for running in KUnit environment
+ *
+ * @w: pointer to work_struct embedded in adapter structure
+ *
+ * Context: Called in the KUnit environment. Does nothing.
+ *
+ * Return: void
+ */
+static void ibmveth_reset_kunit(struct work_struct *w)
+{
+ netdev_dbg(NULL, "reset_kunit starting\n");
+ netdev_dbg(NULL, "reset_kunit complete\n");
+}
+
+/**
+ * ibmveth_remove_buffer_from_pool_test - unit test for some of
+ * ibmveth_remove_buffer_from_pool
+ * @test: pointer to kunit structure
+ *
+ * Tests the error returns from ibmveth_remove_buffer_from_pool.
+ * ibmveth_remove_buffer_from_pool also calls WARN_ON, so dmesg should be
+ * checked to see that these warnings happened.
+ *
+ * Return: void
+ */
+static void ibmveth_remove_buffer_from_pool_test(struct kunit *test)
+{
+ struct ibmveth_adapter *adapter = kunit_kzalloc(test, sizeof(*adapter), GFP_KERNEL);
+ struct ibmveth_buff_pool *pool;
+ u64 correlator;
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter);
+
+ INIT_WORK(&adapter->work, ibmveth_reset_kunit);
+
+ /* Set sane values for buffer pools */
+ for (int i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
+ ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
+ pool_count[i], pool_size[i],
+ pool_active[i]);
+
+ pool = &adapter->rx_buff_pool[0];
+ pool->skbuff = kunit_kcalloc(test, pool->size, sizeof(void *), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pool->skbuff);
+
+ correlator = ((u64)IBMVETH_NUM_BUFF_POOLS << 32) | 0;
+ KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, false));
+ KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, true));
+
+ correlator = ((u64)0 << 32) | adapter->rx_buff_pool[0].size;
+ KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, false));
+ KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, true));
+
+ correlator = (u64)0 | 0;
+ pool->skbuff[0] = NULL;
+ KUNIT_EXPECT_EQ(test, -EFAULT, ibmveth_remove_buffer_from_pool(adapter, correlator, false));
+ KUNIT_EXPECT_EQ(test, -EFAULT, ibmveth_remove_buffer_from_pool(adapter, correlator, true));
+
+ flush_work(&adapter->work);
+}
+
+/**
+ * ibmveth_rxq_get_buffer_test - unit test for ibmveth_rxq_get_buffer
+ * @test: pointer to kunit structure
+ *
+ * Tests ibmveth_rxq_get_buffer. ibmveth_rxq_get_buffer also calls WARN_ON for
+ * the NULL returns, so dmesg should be checked to see that these warnings
+ * happened.
+ *
+ * Return: void
+ */
+static void ibmveth_rxq_get_buffer_test(struct kunit *test)
+{
+ struct ibmveth_adapter *adapter = kunit_kzalloc(test, sizeof(*adapter), GFP_KERNEL);
+ struct sk_buff *skb = kunit_kzalloc(test, sizeof(*skb), GFP_KERNEL);
+ struct ibmveth_buff_pool *pool;
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
+
+ INIT_WORK(&adapter->work, ibmveth_reset_kunit);
+
+ adapter->rx_queue.queue_len = 1;
+ adapter->rx_queue.index = 0;
+ adapter->rx_queue.queue_addr = kunit_kzalloc(test, sizeof(struct ibmveth_rx_q_entry),
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter->rx_queue.queue_addr);
+
+ /* Set sane values for buffer pools */
+ for (int i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
+ ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
+ pool_count[i], pool_size[i],
+ pool_active[i]);
+
+ pool = &adapter->rx_buff_pool[0];
+ pool->skbuff = kunit_kcalloc(test, pool->size, sizeof(void *), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pool->skbuff);
+
+ adapter->rx_queue.queue_addr[0].correlator = (u64)IBMVETH_NUM_BUFF_POOLS << 32 | 0;
+ KUNIT_EXPECT_PTR_EQ(test, NULL, ibmveth_rxq_get_buffer(adapter));
+
+ adapter->rx_queue.queue_addr[0].correlator = (u64)0 << 32 | adapter->rx_buff_pool[0].size;
+ KUNIT_EXPECT_PTR_EQ(test, NULL, ibmveth_rxq_get_buffer(adapter));
+
+ pool->skbuff[0] = skb;
+ adapter->rx_queue.queue_addr[0].correlator = (u64)0 << 32 | 0;
+ KUNIT_EXPECT_PTR_EQ(test, skb, ibmveth_rxq_get_buffer(adapter));
+
+ flush_work(&adapter->work);
+}
+
+static struct kunit_case ibmveth_test_cases[] = {
+ KUNIT_CASE(ibmveth_remove_buffer_from_pool_test),
+ KUNIT_CASE(ibmveth_rxq_get_buffer_test),
+ {}
+};
+
+static struct kunit_suite ibmveth_test_suite = {
+ .name = "ibmveth-kunit-test",
+ .test_cases = ibmveth_test_cases,
+};
+
+kunit_test_suite(ibmveth_test_suite);
+#endif
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 8468e2c59d7a..068f99df133e 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -28,6 +28,7 @@
#define IbmVethMcastRemoveFilter 0x2UL
#define IbmVethMcastClearFilterTable 0x3UL
+#define IBMVETH_ILLAN_RX_MULTI_BUFF_SUPPORT 0x0000000000040000UL
#define IBMVETH_ILLAN_LRG_SR_ENABLED 0x0000000000010000UL
#define IBMVETH_ILLAN_LRG_SND_SUPPORT 0x0000000000008000UL
#define IBMVETH_ILLAN_PADDED_PKT_CSUM 0x0000000000002000UL
@@ -46,6 +47,24 @@
#define h_add_logical_lan_buffer(ua, buf) \
plpar_hcall_norets(H_ADD_LOGICAL_LAN_BUFFER, ua, buf)
+static inline long h_add_logical_lan_buffers(unsigned long unit_address,
+ unsigned long desc1,
+ unsigned long desc2,
+ unsigned long desc3,
+ unsigned long desc4,
+ unsigned long desc5,
+ unsigned long desc6,
+ unsigned long desc7,
+ unsigned long desc8)
+{
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+
+ return plpar_hcall9(H_ADD_LOGICAL_LAN_BUFFERS,
+ retbuf, unit_address,
+ desc1, desc2, desc3, desc4,
+ desc5, desc6, desc7, desc8);
+}
+
/* FW allows us to send 6 descriptors but we only use one so mark
* the other 5 as unused (0)
*/
@@ -101,6 +120,7 @@ static inline long h_illan_attributes(unsigned long unit_address,
#define IBMVETH_MAX_TX_BUF_SIZE (1024 * 64)
#define IBMVETH_MAX_QUEUES 16U
#define IBMVETH_DEFAULT_QUEUES 8U
+#define IBMVETH_MAX_RX_PER_HCALL 8U
static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
static int pool_count[] = { 256, 512, 256, 256, 256 };
@@ -134,38 +154,40 @@ struct ibmveth_rx_q {
};
struct ibmveth_adapter {
- struct vio_dev *vdev;
- struct net_device *netdev;
- struct napi_struct napi;
- unsigned int mcastFilterSize;
- void * buffer_list_addr;
- void * filter_list_addr;
- void *tx_ltb_ptr[IBMVETH_MAX_QUEUES];
- unsigned int tx_ltb_size;
- dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES];
- dma_addr_t buffer_list_dma;
- dma_addr_t filter_list_dma;
- struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
- struct ibmveth_rx_q rx_queue;
- int rx_csum;
- int large_send;
- bool is_active_trunk;
-
- u64 fw_ipv6_csum_support;
- u64 fw_ipv4_csum_support;
- u64 fw_large_send_support;
- /* adapter specific stats */
- u64 replenish_task_cycles;
- u64 replenish_no_mem;
- u64 replenish_add_buff_failure;
- u64 replenish_add_buff_success;
- u64 rx_invalid_buffer;
- u64 rx_no_buffer;
- u64 tx_map_failed;
- u64 tx_send_failed;
- u64 tx_large_packets;
- u64 rx_large_packets;
- /* Ethtool settings */
+ struct vio_dev *vdev;
+ struct net_device *netdev;
+ struct napi_struct napi;
+ struct work_struct work;
+ unsigned int mcastFilterSize;
+ void *buffer_list_addr;
+ void *filter_list_addr;
+ void *tx_ltb_ptr[IBMVETH_MAX_QUEUES];
+ unsigned int tx_ltb_size;
+ dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES];
+ dma_addr_t buffer_list_dma;
+ dma_addr_t filter_list_dma;
+ struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
+ struct ibmveth_rx_q rx_queue;
+ int rx_csum;
+ int large_send;
+ bool is_active_trunk;
+ unsigned int rx_buffers_per_hcall;
+
+ u64 fw_ipv6_csum_support;
+ u64 fw_ipv4_csum_support;
+ u64 fw_large_send_support;
+ /* adapter specific stats */
+ u64 replenish_task_cycles;
+ u64 replenish_no_mem;
+ u64 replenish_add_buff_failure;
+ u64 replenish_add_buff_success;
+ u64 rx_invalid_buffer;
+ u64 rx_no_buffer;
+ u64 tx_map_failed;
+ u64 tx_send_failed;
+ u64 tx_large_packets;
+ u64 rx_large_packets;
+ /* Ethtool settings */
u8 duplex;
u32 speed;
};
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index e95ae0d39948..3808148c1fc7 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -234,11 +234,17 @@ static int ibmvnic_set_queue_affinity(struct ibmvnic_sub_crq_queue *queue,
(*stragglers)--;
}
/* atomic write is safer than writing bit by bit directly */
- for (i = 0; i < stride; i++) {
- cpumask_set_cpu(*cpu, mask);
- *cpu = cpumask_next_wrap(*cpu, cpu_online_mask,
- nr_cpu_ids, false);
+ for_each_online_cpu_wrap(i, *cpu) {
+ if (!stride--) {
+ /* For the next queue we start from the first
+ * unused CPU in this queue
+ */
+ *cpu = i;
+ break;
+ }
+ cpumask_set_cpu(i, mask);
}
+
/* set queue affinity mask */
cpumask_copy(queue->affinity_mask, mask);
rc = irq_set_affinity_and_hint(queue->irq, queue->affinity_mask);
@@ -256,7 +262,7 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
int num_rxqs = adapter->num_active_rx_scrqs, i_rxqs = 0;
int num_txqs = adapter->num_active_tx_scrqs, i_txqs = 0;
int total_queues, stride, stragglers, i;
- unsigned int num_cpu, cpu;
+ unsigned int num_cpu, cpu = 0;
bool is_rx_queue;
int rc = 0;
@@ -274,8 +280,6 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
stride = max_t(int, num_cpu / total_queues, 1);
/* number of leftover cpu's */
stragglers = num_cpu >= total_queues ? num_cpu % total_queues : 0;
- /* next available cpu to assign irq to */
- cpu = cpumask_next(-1, cpu_online_mask);
for (i = 0; i < total_queues; i++) {
is_rx_queue = false;
@@ -752,6 +756,17 @@ static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
adapter->rx_pool[i].active = 0;
}
+static void ibmvnic_set_safe_max_ind_descs(struct ibmvnic_adapter *adapter)
+{
+ if (adapter->cur_max_ind_descs > IBMVNIC_SAFE_IND_DESC) {
+ netdev_info(adapter->netdev,
+ "set max ind descs from %u to safe limit %u\n",
+ adapter->cur_max_ind_descs,
+ IBMVNIC_SAFE_IND_DESC);
+ adapter->cur_max_ind_descs = IBMVNIC_SAFE_IND_DESC;
+ }
+}
+
static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
struct ibmvnic_rx_pool *pool)
{
@@ -839,7 +854,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
/* if send_subcrq_indirect queue is full, flush to VIOS */
- if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
+ if (ind_bufp->index == adapter->cur_max_ind_descs ||
i == count - 1) {
lpar_rc =
send_subcrq_indirect(adapter, handle,
@@ -858,6 +873,14 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
failure:
if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
+
+ /* Detect platform limit H_PARAMETER */
+ if (lpar_rc == H_PARAMETER)
+ ibmvnic_set_safe_max_ind_descs(adapter);
+
+ /* For all error case, temporarily drop only this batch
+ * Rely on TCP/IP retransmissions to retry and recover
+ */
for (i = ind_bufp->index - 1; i >= 0; --i) {
struct ibmvnic_rx_buff *rx_buff;
@@ -2308,8 +2331,6 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
tx_pool->num_buffers - 1 :
tx_pool->consumer_index - 1;
tx_buff = &tx_pool->tx_buff[index];
- adapter->netdev->stats.tx_packets--;
- adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
adapter->tx_stats_buffers[queue_num].batched_packets--;
adapter->tx_stats_buffers[queue_num].bytes -=
tx_buff->skb->len;
@@ -2379,16 +2400,28 @@ static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
rc = send_subcrq_direct(adapter, handle,
(u64 *)ind_bufp->indir_arr);
- if (rc)
+ if (rc) {
+ dev_err_ratelimited(&adapter->vdev->dev,
+ "tx_flush failed, rc=%u (%llu entries dma=%pad handle=%llx)\n",
+ rc, entries, &dma_addr, handle);
+ /* Detect platform limit H_PARAMETER */
+ if (rc == H_PARAMETER)
+ ibmvnic_set_safe_max_ind_descs(adapter);
+
+ /* For all error case, temporarily drop only this batch
+ * Rely on TCP/IP retransmissions to retry and recover
+ */
ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
- else
+ } else {
ind_bufp->index = 0;
+ }
return rc;
}
static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ u32 cur_max_ind_descs = adapter->cur_max_ind_descs;
int queue_num = skb_get_queue_mapping(skb);
u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
struct device *dev = &adapter->vdev->dev;
@@ -2408,6 +2441,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
dma_addr_t data_dma_addr;
struct netdev_queue *txq;
unsigned long lpar_rc;
+ unsigned int skblen;
union sub_crq tx_crq;
unsigned int offset;
bool use_scrq_send_direct = false;
@@ -2522,6 +2556,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_buff->skb = skb;
tx_buff->index = bufidx;
tx_buff->pool_index = queue_num;
+ skblen = skb->len;
memset(&tx_crq, 0, sizeof(tx_crq));
tx_crq.v1.first = IBMVNIC_CRQ_CMD;
@@ -2586,7 +2621,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_crq.v1.n_crq_elem = num_entries;
tx_buff->num_entries = num_entries;
/* flush buffer if current entry can not fit */
- if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
+ if (num_entries + ind_bufp->index > cur_max_ind_descs) {
lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
if (lpar_rc != H_SUCCESS)
goto tx_flush_err;
@@ -2599,7 +2634,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
ind_bufp->index += num_entries;
if (__netdev_tx_sent_queue(txq, skb->len,
netdev_xmit_more() &&
- ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
+ ind_bufp->index < cur_max_ind_descs)) {
lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
if (lpar_rc != H_SUCCESS)
goto tx_err;
@@ -2614,7 +2649,7 @@ early_exit:
netif_stop_subqueue(netdev, queue_num);
}
- tx_bytes += skb->len;
+ tx_bytes += skblen;
txq_trans_cond_update(txq);
ret = NETDEV_TX_OK;
goto out;
@@ -2641,9 +2676,6 @@ tx_err:
}
out:
rcu_read_unlock();
- netdev->stats.tx_dropped += tx_dropped;
- netdev->stats.tx_bytes += tx_bytes;
- netdev->stats.tx_packets += tx_bpackets + tx_dpackets;
adapter->tx_send_failed += tx_send_failed;
adapter->tx_map_failed += tx_map_failed;
adapter->tx_stats_buffers[queue_num].batched_packets += tx_bpackets;
@@ -3446,6 +3478,25 @@ err:
return -ret;
}
+static void ibmvnic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ stats->rx_packets += adapter->rx_stats_buffers[i].packets;
+ stats->rx_bytes += adapter->rx_stats_buffers[i].bytes;
+ }
+
+ for (i = 0; i < adapter->req_tx_queues; i++) {
+ stats->tx_packets += adapter->tx_stats_buffers[i].batched_packets;
+ stats->tx_packets += adapter->tx_stats_buffers[i].direct_packets;
+ stats->tx_bytes += adapter->tx_stats_buffers[i].bytes;
+ stats->tx_dropped += adapter->tx_stats_buffers[i].dropped_packets;
+ }
+}
+
static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ibmvnic_adapter *adapter = netdev_priv(dev);
@@ -3561,8 +3612,6 @@ restart_poll:
length = skb->len;
napi_gro_receive(napi, skb); /* send it up */
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += length;
adapter->rx_stats_buffers[scrq_num].packets++;
adapter->rx_stats_buffers[scrq_num].bytes += length;
frames_processed++;
@@ -3672,6 +3721,7 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
.ndo_set_rx_mode = ibmvnic_set_multi,
.ndo_set_mac_address = ibmvnic_set_mac,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_get_stats64 = ibmvnic_get_stats64,
.ndo_tx_timeout = ibmvnic_tx_timeout,
.ndo_change_mtu = ibmvnic_change_mtu,
.ndo_features_check = ibmvnic_features_check,
@@ -3987,7 +4037,7 @@ static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
}
dma_free_coherent(dev,
- IBMVNIC_IND_ARR_SZ,
+ IBMVNIC_IND_MAX_ARR_SZ,
scrq->ind_buf.indir_arr,
scrq->ind_buf.indir_dma);
@@ -4044,7 +4094,7 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
scrq->ind_buf.indir_arr =
dma_alloc_coherent(dev,
- IBMVNIC_IND_ARR_SZ,
+ IBMVNIC_IND_MAX_ARR_SZ,
&scrq->ind_buf.indir_dma,
GFP_KERNEL);
@@ -4827,6 +4877,18 @@ static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
strscpy(vlcd->name, adapter->netdev->name, len);
}
+static void ibmvnic_print_hex_dump(struct net_device *dev, void *buf,
+ size_t len)
+{
+ unsigned char hex_str[16 * 3];
+
+ for (size_t i = 0; i < len; i += 16) {
+ hex_dump_to_buffer((unsigned char *)buf + i, len - i, 16, 8,
+ hex_str, sizeof(hex_str), false);
+ netdev_dbg(dev, "%s\n", hex_str);
+ }
+}
+
static int send_login(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
@@ -4937,10 +4999,8 @@ static int send_login(struct ibmvnic_adapter *adapter)
vnic_add_client_data(adapter, vlcd);
netdev_dbg(adapter->netdev, "Login Buffer:\n");
- for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
- netdev_dbg(adapter->netdev, "%016lx\n",
- ((unsigned long *)(adapter->login_buf))[i]);
- }
+ ibmvnic_print_hex_dump(adapter->netdev, adapter->login_buf,
+ adapter->login_buf_sz);
memset(&crq, 0, sizeof(crq));
crq.login.first = IBMVNIC_CRQ_CMD;
@@ -5317,15 +5377,13 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
- int i;
dma_unmap_single(dev, adapter->ip_offload_tok,
sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
- for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
- netdev_dbg(adapter->netdev, "%016lx\n",
- ((unsigned long *)(buf))[i]);
+ ibmvnic_print_hex_dump(adapter->netdev, buf,
+ sizeof(adapter->ip_offload_buf));
netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
@@ -5556,10 +5614,8 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
netdev->mtu = adapter->req_mtu - ETH_HLEN;
netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
- for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
- netdev_dbg(adapter->netdev, "%016lx\n",
- ((unsigned long *)(adapter->login_rsp_buf))[i]);
- }
+ ibmvnic_print_hex_dump(netdev, adapter->login_rsp_buf,
+ adapter->login_rsp_buf_sz);
/* Sanity checks */
if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
@@ -6344,6 +6400,19 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
rc = reset_sub_crq_queues(adapter);
}
} else {
+ if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
+ /* After an LPM, reset the max number of indirect
+ * subcrq descriptors per H_SEND_SUB_CRQ_INDIRECT
+ * hcall to the default max (e.g POWER8 -> POWER10)
+ *
+ * If the new destination platform does not support
+ * the higher limit max (e.g. POWER10-> POWER8 LPM)
+ * H_PARAMETER will trigger automatic fallback to the
+ * safe minimum limit.
+ */
+ adapter->cur_max_ind_descs = IBMVNIC_MAX_IND_DESCS;
+ }
+
rc = init_sub_crqs(adapter);
}
@@ -6495,6 +6564,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->wait_for_reset = false;
adapter->last_reset_time = jiffies;
+ adapter->cur_max_ind_descs = IBMVNIC_MAX_IND_DESCS;
rc = register_netdev(netdev);
if (rc) {
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index a189038d88df..480dc587078f 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -29,8 +29,9 @@
#define IBMVNIC_BUFFS_PER_POOL 100
#define IBMVNIC_MAX_QUEUES 16
#define IBMVNIC_MAX_QUEUE_SZ 4096
-#define IBMVNIC_MAX_IND_DESCS 16
-#define IBMVNIC_IND_ARR_SZ (IBMVNIC_MAX_IND_DESCS * 32)
+#define IBMVNIC_MAX_IND_DESCS 128
+#define IBMVNIC_SAFE_IND_DESC 16
+#define IBMVNIC_IND_MAX_ARR_SZ (IBMVNIC_MAX_IND_DESCS * 32)
#define IBMVNIC_TSO_BUF_SZ 65536
#define IBMVNIC_TSO_BUFS 64
@@ -211,7 +212,6 @@ struct ibmvnic_statistics {
u8 reserved[72];
} __packed __aligned(8);
-#define NUM_TX_STATS 3
struct ibmvnic_tx_queue_stats {
u64 batched_packets;
u64 direct_packets;
@@ -219,13 +219,18 @@ struct ibmvnic_tx_queue_stats {
u64 dropped_packets;
};
-#define NUM_RX_STATS 3
+#define NUM_TX_STATS \
+ (sizeof(struct ibmvnic_tx_queue_stats) / sizeof(u64))
+
struct ibmvnic_rx_queue_stats {
u64 packets;
u64 bytes;
u64 interrupts;
};
+#define NUM_RX_STATS \
+ (sizeof(struct ibmvnic_rx_queue_stats) / sizeof(u64))
+
struct ibmvnic_acl_buffer {
__be32 len;
__be32 version;
@@ -926,6 +931,7 @@ struct ibmvnic_adapter {
struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
dma_addr_t ip_offload_ctrl_tok;
u32 msg_enable;
+ u32 cur_max_ind_descs;
/* Vital Product Data (VPD) */
struct ibmvnic_vpd *vpd;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 20bc40eec487..288fa8ce53af 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -146,7 +146,10 @@ config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support"
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
+ select LIBIE_FWLOG if DEBUG_FS
select MDIO
+ select NET_DEVLINK
+ select PLDMFW
select PHYLIB
help
This driver supports Intel(R) 10GbE PCI Express family of
@@ -229,6 +232,7 @@ config I40E
depends on PCI
select AUXILIARY_BUS
select LIBIE
+ select LIBIE_ADMINQ
select NET_DEVLINK
help
This driver supports Intel(R) Ethernet Controller XL710 Family of
@@ -258,12 +262,14 @@ config I40E_DCB
config IAVF
tristate
select LIBIE
+ select LIBIE_ADMINQ
select NET_SHAPER
config I40EVF
tristate "Intel(R) Ethernet Adaptive Virtual Function support"
select IAVF
depends on PCI_MSI
+ depends on PTP_1588_CLOCK_OPTIONAL
help
This driver supports virtual functions for Intel XL710,
X710, X722, XXV710, and all devices advertising support for
@@ -290,8 +296,12 @@ config ICE
depends on GNSS || GNSS = n
select AUXILIARY_BUS
select DIMLIB
+ select LIBETH_XDP
select LIBIE
+ select LIBIE_ADMINQ
+ select LIBIE_FWLOG if DEBUG_FS
select NET_DEVLINK
+ select PACKING
select PLDMFW
select DPLL
help
@@ -335,7 +345,7 @@ config ICE_SWITCHDEV
config ICE_HWTS
bool "Support HW cross-timestamp on platforms with PTM support"
default y
- depends on ICE && X86
+ depends on ICE && X86 && PCIE_PTM
help
Say Y to enable hardware supported cross-timestamping on platforms
with PCIe PTM support. The cross-timestamp is available through
@@ -365,6 +375,7 @@ config IGC
default n
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
+ depends on ETHTOOL_NETLINK
help
This driver supports Intel(R) Ethernet Controller I225-LM/I225-V
family of adapters.
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
index 04c844ef4964..9a37dc76aef0 100644
--- a/drivers/net/ethernet/intel/Makefile
+++ b/drivers/net/ethernet/intel/Makefile
@@ -4,7 +4,7 @@
#
obj-$(CONFIG_LIBETH) += libeth/
-obj-$(CONFIG_LIBIE) += libie/
+obj-y += libie/
obj-$(CONFIG_E100) += e100.o
obj-$(CONFIG_E1000) += e1000/
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 3a5bbda235cb..5c56c1edd492 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1682,7 +1682,7 @@ static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
static void e100_watchdog(struct timer_list *t)
{
- struct nic *nic = from_timer(nic, t, watchdog);
+ struct nic *nic = timer_container_of(nic, t, watchdog);
struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
u32 speed;
@@ -2293,7 +2293,7 @@ static int e100_up(struct nic *nic)
return 0;
err_no_irq:
- del_timer_sync(&nic->watchdog);
+ timer_delete_sync(&nic->watchdog);
err_clean_cbs:
e100_clean_cbs(nic);
err_rx_clean_list:
@@ -2308,7 +2308,7 @@ static void e100_down(struct nic *nic)
netif_stop_queue(nic->netdev);
e100_hw_reset(nic);
free_irq(nic->pdev->irq, nic->netdev);
- del_timer_sync(&nic->watchdog);
+ timer_delete_sync(&nic->watchdog);
netif_carrier_off(nic->netdev);
e100_clean_cbs(nic);
e100_rx_clean_list(nic);
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 75f3fd1d8d6e..ea6ccf4b728b 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -116,7 +116,7 @@ struct e1000_adapter;
#define E1000_MASTER_SLAVE e1000_ms_hw_default
#endif
-#define E1000_MNG_VLAN_NONE (-1)
+#define E1000_MNG_VLAN_NONE 0xFFFF
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index d06d29c6c037..726365c567ef 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -806,7 +806,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
}
/* If Checksum is not Correct return error else test passed */
- if ((checksum != (u16)EEPROM_SUM) && !(*data))
+ if (checksum != EEPROM_SUM && !(*data))
*data = 2;
return *data;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index f9328f2e669f..0e5de52b1067 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -3970,7 +3970,7 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
return E1000_SUCCESS;
#endif
- if (checksum == (u16)EEPROM_SUM)
+ if (checksum == EEPROM_SUM)
return E1000_SUCCESS;
else {
e_dbg("EEPROM Checksum Invalid\n");
@@ -3997,7 +3997,7 @@ s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
}
checksum += eeprom_data;
}
- checksum = (u16)EEPROM_SUM - checksum;
+ checksum = EEPROM_SUM - checksum;
if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
e_dbg("EEPROM Write Error\n");
return -E1000_ERR_EEPROM;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 4de9b156b2be..292389aceb2d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -313,8 +313,7 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
} else {
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
}
- if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
- (vid != old_vid) &&
+ if (old_vid != E1000_MNG_VLAN_NONE && vid != old_vid &&
!test_bit(old_vid, adapter->active_vlans))
e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
old_vid);
@@ -477,10 +476,6 @@ static void e1000_down_and_stop(struct e1000_adapter *adapter)
cancel_delayed_work_sync(&adapter->phy_info_task);
cancel_delayed_work_sync(&adapter->fifo_stall_task);
-
- /* Only kill reset task if adapter is not resetting */
- if (!test_bit(__E1000_RESETTING, &adapter->flags))
- cancel_work_sync(&adapter->reset_task);
}
void e1000_down(struct e1000_adapter *adapter)
@@ -1266,6 +1261,10 @@ static void e1000_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
+ /* Only kill reset task if adapter is not resetting */
+ if (!test_bit(__E1000_RESETTING, &adapter->flags))
+ cancel_work_sync(&adapter->reset_task);
+
e1000_phy_hw_reset(hw);
kfree(adapter->tx_ring);
@@ -3509,7 +3508,9 @@ static void e1000_reset_task(struct work_struct *work)
container_of(work, struct e1000_adapter, reset_task);
e_err(drv, "Reset adapter\n");
+ rtnl_lock();
e1000_reinit_locked(adapter);
+ rtnl_unlock();
}
/**
@@ -5074,7 +5075,9 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
usleep_range(10000, 20000);
WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
+ rtnl_lock();
e1000_down(adapter);
+ rtnl_unlock();
}
status = er32(STATUS);
@@ -5235,16 +5238,20 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
+ rtnl_lock();
netif_device_detach(netdev);
- if (state == pci_channel_io_perm_failure)
+ if (state == pci_channel_io_perm_failure) {
+ rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
+ }
if (netif_running(netdev))
e1000_down(adapter);
if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
pci_disable_device(pdev);
+ rtnl_unlock();
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 5e2cfa73f889..ba331899d186 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -638,6 +638,9 @@
/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
#define NVM_SUM 0xBABA
+/* Uninitialized ("empty") checksum word value */
+#define NVM_CHECKSUM_UNINITIALIZED 0xFFFF
+
/* PBA (printed board assembly) number words */
#define NVM_PBA_OFFSET_0 8
#define NVM_PBA_OFFSET_1 9
@@ -803,4 +806,7 @@
/* SerDes Control */
#define E1000_GEN_POLL_TIMEOUT 640
+#define E1000_FEXTNVM12_PHYPD_CTRL_MASK 0x00C00000
+#define E1000_FEXTNVM12_PHYPD_CTRL_P1 0x00800000
+
#endif /* _E1000_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index ba9c19e6994c..aa08f397988e 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -64,7 +64,7 @@ struct e1000_info;
#define AUTO_ALL_MODES 0
#define E1000_EEPROM_APME 0x0400
-#define E1000_MNG_VLAN_NONE (-1)
+#define E1000_MNG_VLAN_NONE 0xFFFF
#define DEFAULT_JUMBO 9234
@@ -319,7 +319,7 @@ struct e1000_adapter {
u16 tx_ring_count;
u16 rx_ring_count;
- struct hwtstamp_config hwtstamp_config;
+ struct kernel_hwtstamp_config hwtstamp_config;
struct delayed_work systim_overflow_work;
struct sk_buff *tx_hwtstamp_skb;
unsigned long tx_hwtstamp_start;
@@ -461,6 +461,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
#define FLAG2_CHECK_RX_HWTSTAMP BIT(13)
#define FLAG2_CHECK_SYSTIM_OVERFLOW BIT(14)
#define FLAG2_ENABLE_S0IX_FLOWS BIT(15)
+#define FLAG2_DISABLE_K1 BIT(16)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 9364bc2b4eb1..7b1ac90b3de4 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -26,6 +26,8 @@ struct e1000_stats {
static const char e1000e_priv_flags_strings[][ETH_GSTRING_LEN] = {
#define E1000E_PRIV_FLAGS_S0IX_ENABLED BIT(0)
"s0ix-enabled",
+#define E1000E_PRIV_FLAGS_DISABLE_K1 BIT(1)
+ "disable-k1",
};
#define E1000E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(e1000e_priv_flags_strings)
@@ -550,11 +552,11 @@ static int e1000_set_eeprom(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u16 *eeprom_buff;
- void *ptr;
- int max_len;
+ int ret_val = 0;
+ size_t max_len;
int first_word;
int last_word;
- int ret_val = 0;
+ void *ptr;
u16 i;
if (eeprom->len == 0)
@@ -959,7 +961,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
}
/* If Checksum is not Correct return error else test passed */
- if ((checksum != (u16)NVM_SUM) && !(*data))
+ if (checksum != NVM_SUM && !(*data))
*data = 2;
return *data;
@@ -2096,54 +2098,47 @@ static void e1000_get_strings(struct net_device __always_unused *netdev,
}
}
-static int e1000_get_rxnfc(struct net_device *netdev,
- struct ethtool_rxnfc *info,
- u32 __always_unused *rule_locs)
+static int e1000_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *info)
{
- info->data = 0;
-
- switch (info->cmd) {
- case ETHTOOL_GRXFH: {
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 mrqc;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mrqc;
- mrqc = er32(MRQC);
+ info->data = 0;
- if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK))
- return 0;
+ mrqc = er32(MRQC);
- switch (info->flow_type) {
- case TCP_V4_FLOW:
- if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- fallthrough;
- case UDP_V4_FLOW:
- case SCTP_V4_FLOW:
- case AH_ESP_V4_FLOW:
- case IPV4_FLOW:
- if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
- info->data |= RXH_IP_SRC | RXH_IP_DST;
- break;
- case TCP_V6_FLOW:
- if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- fallthrough;
- case UDP_V6_FLOW:
- case SCTP_V6_FLOW:
- case AH_ESP_V6_FLOW:
- case IPV6_FLOW:
- if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
- info->data |= RXH_IP_SRC | RXH_IP_DST;
- break;
- default:
- break;
- }
+ if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK))
return 0;
- }
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case IPV4_FLOW:
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV6_FLOW:
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
default:
- return -EOPNOTSUPP;
+ break;
}
+ return 0;
}
static int e1000e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
@@ -2304,26 +2299,59 @@ static u32 e1000e_get_priv_flags(struct net_device *netdev)
if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
priv_flags |= E1000E_PRIV_FLAGS_S0IX_ENABLED;
+ if (adapter->flags2 & FLAG2_DISABLE_K1)
+ priv_flags |= E1000E_PRIV_FLAGS_DISABLE_K1;
+
return priv_flags;
}
static int e1000e_set_priv_flags(struct net_device *netdev, u32 priv_flags)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
unsigned int flags2 = adapter->flags2;
+ unsigned int changed;
- flags2 &= ~FLAG2_ENABLE_S0IX_FLOWS;
- if (priv_flags & E1000E_PRIV_FLAGS_S0IX_ENABLED) {
- struct e1000_hw *hw = &adapter->hw;
+ flags2 &= ~(FLAG2_ENABLE_S0IX_FLOWS | FLAG2_DISABLE_K1);
- if (hw->mac.type < e1000_pch_cnp)
+ if (priv_flags & E1000E_PRIV_FLAGS_S0IX_ENABLED) {
+ if (hw->mac.type < e1000_pch_cnp) {
+ e_err("S0ix is not supported on this device\n");
return -EINVAL;
+ }
+
flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
}
- if (flags2 != adapter->flags2)
+ if (priv_flags & E1000E_PRIV_FLAGS_DISABLE_K1) {
+ if (hw->mac.type < e1000_ich8lan) {
+ e_err("Disabling K1 is not supported on this device\n");
+ return -EINVAL;
+ }
+
+ flags2 |= FLAG2_DISABLE_K1;
+ }
+
+ changed = adapter->flags2 ^ flags2;
+ if (changed)
adapter->flags2 = flags2;
+ if (changed & FLAG2_DISABLE_K1) {
+ /* reset the hardware to apply the changes */
+ while (test_and_set_bit(__E1000_RESETTING,
+ &adapter->state))
+ usleep_range(1000, 2000);
+
+ if (netif_running(adapter->netdev)) {
+ e1000e_down(adapter, true);
+ e1000e_up(adapter);
+ } else {
+ e1000e_reset(adapter);
+ }
+
+ clear_bit(__E1000_RESETTING, &adapter->state);
+ }
+
return 0;
}
@@ -2352,7 +2380,7 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.get_sset_count = e1000e_get_sset_count,
.get_coalesce = e1000_get_coalesce,
.set_coalesce = e1000_set_coalesce,
- .get_rxnfc = e1000_get_rxnfc,
+ .get_rxfh_fields = e1000_get_rxfh_fields,
.get_ts_info = e1000e_get_ts_info,
.get_eee = e1000e_get_eee,
.set_eee = e1000e_set_eee,
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index ce227b56cf72..0ff8688ac3b8 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -286,6 +286,52 @@ static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
}
/**
+ * e1000_reconfigure_k1_params - reconfigure Kumeran K1 parameters.
+ * @hw: pointer to the HW structure
+ *
+ * By default K1 is enabled after MAC reset, so this function only
+ * disables it.
+ *
+ * Context: PHY semaphore must be held by caller.
+ * Return: 0 on success, negative on failure
+ */
+static s32 e1000_reconfigure_k1_params(struct e1000_hw *hw)
+{
+ u16 phy_timeout;
+ u32 fextnvm12;
+ s32 ret_val;
+
+ if (hw->mac.type < e1000_pch_mtp) {
+ if (hw->adapter->flags2 & FLAG2_DISABLE_K1)
+ return e1000_configure_k1_ich8lan(hw, false);
+ return 0;
+ }
+
+ /* Change Kumeran K1 power down state from P0s to P1 */
+ fextnvm12 = er32(FEXTNVM12);
+ fextnvm12 &= ~E1000_FEXTNVM12_PHYPD_CTRL_MASK;
+ fextnvm12 |= E1000_FEXTNVM12_PHYPD_CTRL_P1;
+ ew32(FEXTNVM12, fextnvm12);
+
+ /* Wait for the interface the settle */
+ usleep_range(1000, 1100);
+ if (hw->adapter->flags2 & FLAG2_DISABLE_K1)
+ return e1000_configure_k1_ich8lan(hw, false);
+
+ /* Change K1 exit timeout */
+ ret_val = e1e_rphy_locked(hw, I217_PHY_TIMEOUTS_REG,
+ &phy_timeout);
+ if (ret_val)
+ return ret_val;
+
+ phy_timeout &= ~I217_PHY_TIMEOUTS_K1_EXIT_TO_MASK;
+ phy_timeout |= 0xF00;
+
+ return e1e_wphy_locked(hw, I217_PHY_TIMEOUTS_REG,
+ phy_timeout);
+}
+
+/**
* e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
* @hw: pointer to the HW structure
*
@@ -327,15 +373,22 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
* LANPHYPC Value bit to force the interconnect to PCIe mode.
*/
switch (hw->mac.type) {
+ case e1000_pch_mtp:
+ case e1000_pch_lnp:
+ case e1000_pch_ptp:
+ case e1000_pch_nvp:
+ /* At this point the PHY might be inaccessible so don't
+ * propagate the failure
+ */
+ if (e1000_reconfigure_k1_params(hw))
+ e_dbg("Failed to reconfigure K1 parameters\n");
+
+ fallthrough;
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
- case e1000_pch_mtp:
- case e1000_pch_lnp:
- case e1000_pch_ptp:
- case e1000_pch_nvp:
if (e1000_phy_is_accessible_pchlan(hw))
break;
@@ -419,8 +472,20 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
* the PHY is in.
*/
ret_val = hw->phy.ops.check_reset_block(hw);
- if (ret_val)
+ if (ret_val) {
e_err("ME blocked access to PHY after reset\n");
+ goto out;
+ }
+
+ if (hw->mac.type >= e1000_pch_mtp) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val) {
+ e_err("Failed to reconfigure K1 parameters\n");
+ goto out;
+ }
+ ret_val = e1000_reconfigure_k1_params(hw);
+ hw->phy.ops.release(hw);
+ }
}
out:
@@ -1205,12 +1270,10 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
if (ret_val)
goto out;
- if (hw->mac.type != e1000_pch_mtp) {
- ret_val = e1000e_force_smbus(hw);
- if (ret_val) {
- e_dbg("Failed to force SMBUS: %d\n", ret_val);
- goto release;
- }
+ ret_val = e1000e_force_smbus(hw);
+ if (ret_val) {
+ e_dbg("Failed to force SMBUS: %d\n", ret_val);
+ goto release;
}
/* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
@@ -1273,13 +1336,6 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
}
release:
- if (hw->mac.type == e1000_pch_mtp) {
- ret_val = e1000e_force_smbus(hw);
- if (ret_val)
- e_dbg("Failed to force SMBUS over MTL system: %d\n",
- ret_val);
- }
-
hw->phy.ops.release(hw);
out:
if (ret_val)
@@ -4225,6 +4281,8 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
ret_val = e1000e_update_nvm_checksum(hw);
if (ret_val)
return ret_val;
+ } else if (hw->mac.type == e1000_pch_tgp) {
+ return 0;
}
}
@@ -4897,6 +4955,16 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
u16 i;
e1000_initialize_hw_bits_ich8lan(hw);
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_reconfigure_k1_params(hw);
+ hw->phy.ops.release(hw);
+ if (ret_val) {
+ e_dbg("Error failed to reconfigure K1 parameters\n");
+ return ret_val;
+ }
/* Initialize identification LED */
ret_val = mac->ops.id_led_init(hw);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 2504b11c3169..5feb589a9b5f 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -219,6 +219,10 @@
#define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28)
#define I217_PLL_CLOCK_GATE_MASK 0x07FF
+/* PHY Timeouts */
+#define I217_PHY_TIMEOUTS_REG PHY_REG(770, 21)
+#define I217_PHY_TIMEOUTS_K1_EXIT_TO_MASK 0x0FC0
+
#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */
/* Inband Control */
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index d7df2a0ed629..44249dd91bd6 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -331,8 +331,21 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
}
/* replace the entire MTA table */
- for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+ for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) {
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
+
+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ /*
+ * Do not queue up too many posted writes to prevent
+ * increased latency for other devices on the
+ * interconnect. Flush after each 8th posted write,
+ * to keep additional execution time low while still
+ * preventing increased latency.
+ */
+ if (!(i % 8) && i)
+ e1e_flush();
+ }
+ }
e1e_flush();
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 286155efcedf..ddbe2f7d8112 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2761,7 +2761,7 @@ static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
ew32(RCTL, rctl);
- if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
+ if (adapter->mng_vlan_id != E1000_MNG_VLAN_NONE) {
e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
adapter->mng_vlan_id);
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
@@ -2828,7 +2828,7 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
adapter->mng_vlan_id = vid;
}
- if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
+ if (old_vid != E1000_MNG_VLAN_NONE && vid != old_vid)
e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), old_vid);
}
@@ -3534,9 +3534,6 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
- case e1000_pch_mtp:
- case e1000_pch_lnp:
- case e1000_pch_ptp:
case e1000_pch_nvp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 24MHz frequency */
@@ -3552,6 +3549,17 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
adapter->cc.shift = shift;
}
break;
+ case e1000_pch_mtp:
+ case e1000_pch_lnp:
+ case e1000_pch_ptp:
+ /* System firmware can misreport this value, so set it to a
+ * stable 38400KHz frequency.
+ */
+ incperiod = INCPERIOD_38400KHZ;
+ incvalue = INCVALUE_38400KHZ;
+ shift = INCVALUE_SHIFT_38400KHZ;
+ adapter->cc.shift = shift;
+ break;
case e1000_82574:
case e1000_82583:
/* Stable 25MHz frequency */
@@ -3574,6 +3582,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
* e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable
* @adapter: board private structure
* @config: timestamp configuration
+ * @extack: netlink extended ACK for error report
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't cause any overhead
@@ -3587,7 +3596,8 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
* exception of "all V2 events regardless of level 2 or 4".
**/
static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct e1000_hw *hw = &adapter->hw;
u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
@@ -3598,8 +3608,10 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
bool is_l2 = false;
u32 regval;
- if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) {
+ NL_SET_ERR_MSG(extack, "No HW timestamp support");
return -EINVAL;
+ }
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
@@ -3608,6 +3620,7 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
case HWTSTAMP_TX_ON:
break;
default:
+ NL_SET_ERR_MSG(extack, "Unsupported TX HW timestamp type");
return -ERANGE;
}
@@ -3681,6 +3694,7 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
+ NL_SET_ERR_MSG(extack, "Unsupported RX HW timestamp filter");
return -ERANGE;
}
@@ -3693,7 +3707,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
ew32(TSYNCTXCTL, regval);
if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) !=
(regval & E1000_TSYNCTXCTL_ENABLED)) {
- e_err("Timesync Tx Control register not set as expected\n");
+ NL_SET_ERR_MSG(extack,
+ "Timesync Tx Control register not set as expected");
return -EAGAIN;
}
@@ -3706,7 +3721,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
E1000_TSYNCRXCTL_TYPE_MASK)) !=
(regval & (E1000_TSYNCRXCTL_ENABLED |
E1000_TSYNCRXCTL_TYPE_MASK))) {
- e_err("Timesync Rx Control register not set as expected\n");
+ NL_SET_ERR_MSG(extack,
+ "Timesync Rx Control register not set as expected");
return -EAGAIN;
}
@@ -3901,6 +3917,7 @@ static void e1000e_systim_reset(struct e1000_adapter *adapter)
{
struct ptp_clock_info *info = &adapter->ptp_clock_info;
struct e1000_hw *hw = &adapter->hw;
+ struct netlink_ext_ack extack = {};
unsigned long flags;
u32 timinca;
s32 ret_val;
@@ -3932,7 +3949,12 @@ static void e1000e_systim_reset(struct e1000_adapter *adapter)
spin_unlock_irqrestore(&adapter->systim_lock, flags);
/* restore the previous hwtstamp configuration settings */
- e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config);
+ ret_val = e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config,
+ &extack);
+ if (ret_val) {
+ if (extack._msg)
+ e_err("%s\n", extack._msg);
+ }
}
/**
@@ -4287,8 +4309,8 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
napi_synchronize(&adapter->napi);
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_info_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->phy_info_timer);
spin_lock(&adapter->stats64_lock);
e1000e_update_stats(adapter);
@@ -4414,7 +4436,7 @@ u64 e1000e_read_systim(struct e1000_adapter *adapter,
* e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
* @cc: cyclecounter structure
**/
-static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc)
+static u64 e1000e_cyclecounter_read(struct cyclecounter *cc)
{
struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
cc);
@@ -4839,7 +4861,8 @@ static void e1000e_update_phy_task(struct work_struct *work)
**/
static void e1000_update_phy_info(struct timer_list *t)
{
- struct e1000_adapter *adapter = from_timer(adapter, t, phy_info_timer);
+ struct e1000_adapter *adapter = timer_container_of(adapter, t,
+ phy_info_timer);
if (test_bit(__E1000_DOWN, &adapter->state))
return;
@@ -5175,7 +5198,8 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
**/
static void e1000_watchdog(struct timer_list *t)
{
- struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+ struct e1000_adapter *adapter = timer_container_of(adapter, t,
+ watchdog_timer);
/* Do the rest outside of interrupt context */
schedule_work(&adapter->watchdog_task);
@@ -6079,8 +6103,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
-static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
- int cmd)
+static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct mii_ioctl_data *data = if_mii(ifr);
@@ -6140,7 +6163,8 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
/**
* e1000e_hwtstamp_set - control hardware time stamping
* @netdev: network interface device structure
- * @ifr: interface request
+ * @config: timestamp configuration
+ * @extack: netlink extended ACK report
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't cause any overhead
@@ -6153,20 +6177,18 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
* specified. Matching the kind of event packet is not supported, with the
* exception of "all V2 events regardless of level 2 or 4".
**/
-static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
+static int e1000e_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- struct hwtstamp_config config;
int ret_val;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- ret_val = e1000e_config_hwtstamp(adapter, &config);
+ ret_val = e1000e_config_hwtstamp(adapter, config, extack);
if (ret_val)
return ret_val;
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
@@ -6178,38 +6200,23 @@ static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
* by hardware so notify the caller the requested packets plus
* some others are time stamped.
*/
- config.rx_filter = HWTSTAMP_FILTER_SOME;
+ config->rx_filter = HWTSTAMP_FILTER_SOME;
break;
default:
break;
}
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
+ return 0;
}
-static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+static int e1000e_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *kernel_config)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config,
- sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0;
-}
+ *kernel_config = adapter->hwtstamp_config;
-static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- case SIOCGMIIPHY:
- case SIOCGMIIREG:
- case SIOCSMIIREG:
- return e1000_mii_ioctl(netdev, ifr, cmd);
- case SIOCSHWTSTAMP:
- return e1000e_hwtstamp_set(netdev, ifr);
- case SIOCGHWTSTAMP:
- return e1000e_hwtstamp_get(netdev, ifr);
- default:
- return -EOPNOTSUPP;
- }
+ return 0;
}
static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
@@ -7188,7 +7195,6 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
"Cannot re-enable PCI device after reset.\n");
result = PCI_ERS_RESULT_DISCONNECT;
} else {
- pdev->state_saved = true;
pci_restore_state(pdev);
pci_set_master(pdev);
@@ -7346,9 +7352,11 @@ static const struct net_device_ops e1000e_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = e1000_netpoll,
#endif
- .ndo_set_features = e1000_set_features,
- .ndo_fix_features = e1000_fix_features,
+ .ndo_set_features = e1000_set_features,
+ .ndo_fix_features = e1000_fix_features,
.ndo_features_check = passthru_features_check,
+ .ndo_hwtstamp_get = e1000e_hwtstamp_get,
+ .ndo_hwtstamp_set = e1000e_hwtstamp_set,
};
/**
@@ -7666,6 +7674,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* init PTP hardware clock */
e1000e_ptp_init(adapter);
+ if (hw->mac.type >= e1000_pch_mtp)
+ adapter->flags2 |= FLAG2_DISABLE_K1;
+
/* reset the hardware with the new settings */
e1000e_reset(adapter);
@@ -7741,8 +7752,8 @@ static void e1000_remove(struct pci_dev *pdev)
* from being rescheduled.
*/
set_bit(__E1000_DOWN, &adapter->state);
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_info_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->phy_info_timer);
cancel_work_sync(&adapter->reset_task);
cancel_work_sync(&adapter->watchdog_task);
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index e609f4df86f4..4bde1c9de1b9 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -558,7 +558,13 @@ s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
checksum += nvm_data;
}
- if (checksum != (u16)NVM_SUM) {
+ if (hw->mac.type == e1000_pch_tgp &&
+ nvm_data == NVM_CHECKSUM_UNINITIALIZED) {
+ e_dbg("Uninitialized NVM Checksum on TGP platform - ignoring\n");
+ return 0;
+ }
+
+ if (checksum != NVM_SUM) {
e_dbg("NVM Checksum Invalid\n");
return -E1000_ERR_NVM;
}
@@ -588,7 +594,7 @@ s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
}
checksum += nvm_data;
}
- checksum = (u16)NVM_SUM - checksum;
+ checksum = NVM_SUM - checksum;
ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
if (ret_val)
e_dbg("NVM Write Error while updating checksum.\n");
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 89d57dd911dc..ec39e35f3857 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -229,14 +229,11 @@ static void e1000e_systim_overflow_work(struct work_struct *work)
systim_overflow_work.work);
struct e1000_hw *hw = &adapter->hw;
struct timespec64 ts;
- u64 ns;
/* Update the timecounter */
- ns = timecounter_read(&adapter->tc);
+ ts = ns_to_timespec64(timecounter_read(&adapter->tc));
- ts = ns_to_timespec64(ns);
- e_dbg("SYSTIM overflow check at %lld.%09lu\n",
- (long long) ts.tv_sec, ts.tv_nsec);
+ e_dbg("SYSTIM overflow check at %ptSp\n", &ts);
schedule_delayed_work(&adapter->systim_overflow_work,
E1000_SYSTIM_OVERFLOW_PERIOD);
@@ -295,15 +292,17 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
- case e1000_pch_mtp:
- case e1000_pch_lnp:
- case e1000_pch_ptp:
case e1000_pch_nvp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)
adapter->ptp_clock_info.max_adj = MAX_PPB_24MHZ;
else
adapter->ptp_clock_info.max_adj = MAX_PPB_38400KHZ;
break;
+ case e1000_pch_mtp:
+ case e1000_pch_lnp:
+ case e1000_pch_ptp:
+ adapter->ptp_clock_info.max_adj = MAX_PPB_38400KHZ;
+ break;
case e1000_82574:
case e1000_82583:
adapter->ptp_clock_info.max_adj = MAX_PPB_25MHZ;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index 6119a4108838..65a2816142d9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -189,13 +189,14 @@ struct fm10k_q_vector {
struct fm10k_ring_container rx, tx;
struct napi_struct napi;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
+
cpumask_t affinity_mask;
char name[IFNAMSIZ + 9];
#ifdef CONFIG_DEBUG_FS
struct dentry *dbg_q_vector;
#endif /* CONFIG_DEBUG_FS */
- struct rcu_head rcu; /* to avoid race with update stats on free */
/* for dynamic allocation of rings associated with this q_vector */
struct fm10k_ring ring[] ____cacheline_internodealigned_in_smp;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
index f51a63fca513..1f919a50c765 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
@@ -447,17 +447,16 @@ void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
/**
* fm10k_unbind_hw_stats_q - Unbind the queue counters from their queues
* @q: pointer to the ring of hardware statistics queue
- * @idx: index pointing to the start of the ring iteration
* @count: number of queues to iterate over
*
* Function invalidates the index values for the queues so any updates that
* may have happened are ignored and the base for the queue stats is reset.
**/
-void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count)
+void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 count)
{
u32 i;
- for (i = 0; i < count; i++, idx++, q++) {
+ for (i = 0; i < count; i++, q++) {
q->rx_stats_idx = 0;
q->tx_stats_idx = 0;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.h b/drivers/net/ethernet/intel/fm10k/fm10k_common.h
index 4c48fb73b3e7..13fca6a91a01 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.h
@@ -43,6 +43,6 @@ u32 fm10k_read_hw_stats_32b(struct fm10k_hw *hw, u32 addr,
void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
u32 idx, u32 count);
#define fm10k_unbind_hw_stats_32b(s) ((s)->base_h = 0)
-void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count);
+void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 count);
s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready);
#endif /* _FM10K_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 1bc5b6c0b897..76e42abca965 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -560,7 +560,7 @@ static int fm10k_set_ringparam(struct net_device *netdev,
/* allocate temporary buffer to store rings in */
i = max_t(int, interface->num_tx_queues, interface->num_rx_queues);
- temp_ring = vmalloc(array_size(i, sizeof(struct fm10k_ring)));
+ temp_ring = vmalloc_array(i, sizeof(struct fm10k_ring));
if (!temp_ring) {
err = -ENOMEM;
@@ -691,9 +691,11 @@ static int fm10k_set_coalesce(struct net_device *dev,
return 0;
}
-static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface,
- struct ethtool_rxnfc *cmd)
+static int fm10k_get_rssh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct fm10k_intfc *interface = netdev_priv(dev);
+
cmd->data = 0;
/* Report default options for RSS on fm10k */
@@ -732,30 +734,18 @@ static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface,
return 0;
}
-static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
- u32 __always_unused *rule_locs)
+static u32 fm10k_get_rx_ring_count(struct net_device *dev)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = interface->num_rx_queues;
- ret = 0;
- break;
- case ETHTOOL_GRXFH:
- ret = fm10k_get_rss_hash_opts(interface, cmd);
- break;
- default:
- break;
- }
- return ret;
+ return interface->num_rx_queues;
}
-static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface,
- struct ethtool_rxnfc *nfc)
+static int fm10k_set_rssh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct fm10k_intfc *interface = netdev_priv(dev);
int rss_ipv4_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
interface->flags);
int rss_ipv6_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
@@ -871,22 +861,6 @@ static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface,
return 0;
}
-static int fm10k_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
-{
- struct fm10k_intfc *interface = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = fm10k_set_rss_hash_opt(interface, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static int fm10k_mbx_test(struct fm10k_intfc *interface, u64 *data)
{
struct fm10k_hw *hw = &interface->hw;
@@ -1175,8 +1149,7 @@ static const struct ethtool_ops fm10k_ethtool_ops = {
.set_ringparam = fm10k_set_ringparam,
.get_coalesce = fm10k_get_coalesce,
.set_coalesce = fm10k_set_coalesce,
- .get_rxnfc = fm10k_get_rxnfc,
- .set_rxnfc = fm10k_set_rxnfc,
+ .get_rx_ring_count = fm10k_get_rx_ring_count,
.get_regs = fm10k_get_regs,
.get_regs_len = fm10k_get_regs_len,
.self_test = fm10k_self_test,
@@ -1186,6 +1159,8 @@ static const struct ethtool_ops fm10k_ethtool_ops = {
.get_rxfh_key_size = fm10k_get_rssrk_size,
.get_rxfh = fm10k_get_rssh,
.set_rxfh = fm10k_set_rssh,
+ .get_rxfh_fields = fm10k_get_rssh_fields,
+ .set_rxfh_fields = fm10k_set_rssh_fields,
.get_channels = fm10k_get_channels,
.set_channels = fm10k_set_channels,
.get_ts_info = ethtool_op_get_ts_info,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 142f07ca8bc0..b8c15b837fda 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -37,7 +37,7 @@ static int __init fm10k_init_module(void)
pr_info("%s\n", fm10k_copyright);
/* create driver workqueue */
- fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
+ fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_PERCPU, 0,
fm10k_driver_name);
if (!fm10k_workqueue)
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 92de609b7218..d75b8a50413d 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -199,8 +199,8 @@ static void fm10k_start_service_event(struct fm10k_intfc *interface)
**/
static void fm10k_service_timer(struct timer_list *t)
{
- struct fm10k_intfc *interface = from_timer(interface, t,
- service_timer);
+ struct fm10k_intfc *interface = timer_container_of(interface, t,
+ service_timer);
/* Reset the timer */
mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
@@ -2245,7 +2245,7 @@ static void fm10k_remove(struct pci_dev *pdev)
struct fm10k_intfc *interface = pci_get_drvdata(pdev);
struct net_device *netdev = interface->netdev;
- del_timer_sync(&interface->service_timer);
+ timer_delete_sync(&interface->service_timer);
fm10k_stop_service_event(interface);
fm10k_stop_macvlan_task(interface);
@@ -2423,12 +2423,6 @@ static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev)
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
-
- /* After second error pci->state_saved is false, this
- * resets it so EEH doesn't break.
- */
- pci_save_state(pdev);
-
pci_wake_from_d3(pdev, false);
result = PCI_ERS_RESULT_RECOVERED;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 98861cc6df7c..3394645a18fe 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1180,126 +1180,6 @@ s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid)
}
/**
- * fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF
- * @hw: Pointer to hardware structure
- * @results: Pointer array to message, results[0] is pointer to message
- * @mbx: Pointer to mailbox information structure
- *
- * This function is a default handler for MAC/VLAN requests from the VF.
- * The assumption is that in this case it is acceptable to just directly
- * hand off the message from the VF to the underlying shared code.
- **/
-s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
-{
- struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
- u8 mac[ETH_ALEN];
- u32 *result;
- int err = 0;
- bool set;
- u16 vlan;
- u32 vid;
-
- /* we shouldn't be updating rules on a disabled interface */
- if (!FM10K_VF_FLAG_ENABLED(vf_info))
- err = FM10K_ERR_PARAM;
-
- if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) {
- result = results[FM10K_MAC_VLAN_MSG_VLAN];
-
- /* record VLAN id requested */
- err = fm10k_tlv_attr_get_u32(result, &vid);
- if (err)
- return err;
-
- set = !(vid & FM10K_VLAN_CLEAR);
- vid &= ~FM10K_VLAN_CLEAR;
-
- /* if the length field has been set, this is a multi-bit
- * update request. For multi-bit requests, simply disallow
- * them when the pf_vid has been set. In this case, the PF
- * should have already cleared the VLAN_TABLE, and if we
- * allowed them, it could allow a rogue VF to receive traffic
- * on a VLAN it was not assigned. In the single-bit case, we
- * need to modify requests for VLAN 0 to use the default PF or
- * SW vid when assigned.
- */
-
- if (vid >> 16) {
- /* prevent multi-bit requests when PF has
- * administratively set the VLAN for this VF
- */
- if (vf_info->pf_vid)
- return FM10K_ERR_PARAM;
- } else {
- err = fm10k_iov_select_vid(vf_info, (u16)vid);
- if (err < 0)
- return err;
-
- vid = err;
- }
-
- /* update VSI info for VF in regards to VLAN table */
- err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
- }
-
- if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
- result = results[FM10K_MAC_VLAN_MSG_MAC];
-
- /* record unicast MAC address requested */
- err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
- if (err)
- return err;
-
- /* block attempts to set MAC for a locked device */
- if (is_valid_ether_addr(vf_info->mac) &&
- !ether_addr_equal(mac, vf_info->mac))
- return FM10K_ERR_PARAM;
-
- set = !(vlan & FM10K_VLAN_CLEAR);
- vlan &= ~FM10K_VLAN_CLEAR;
-
- err = fm10k_iov_select_vid(vf_info, vlan);
- if (err < 0)
- return err;
-
- vlan = (u16)err;
-
- /* notify switch of request for new unicast address */
- err = hw->mac.ops.update_uc_addr(hw, vf_info->glort,
- mac, vlan, set, 0);
- }
-
- if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
- result = results[FM10K_MAC_VLAN_MSG_MULTICAST];
-
- /* record multicast MAC address requested */
- err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
- if (err)
- return err;
-
- /* verify that the VF is allowed to request multicast */
- if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
- return FM10K_ERR_PARAM;
-
- set = !(vlan & FM10K_VLAN_CLEAR);
- vlan &= ~FM10K_VLAN_CLEAR;
-
- err = fm10k_iov_select_vid(vf_info, vlan);
- if (err < 0)
- return err;
-
- vlan = (u16)err;
-
- /* notify switch of request for new multicast address */
- err = hw->mac.ops.update_mc_addr(hw, vf_info->glort,
- mac, vlan, set);
- }
-
- return err;
-}
-
-/**
* fm10k_iov_supported_xcast_mode_pf - Determine best match for xcast mode
* @vf_info: VF info structure containing capability flags
* @mode: Requested xcast mode
@@ -1509,7 +1389,7 @@ static void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw,
fm10k_unbind_hw_stats_32b(&stats->nodesc_drop);
/* Unbind Queue Statistics */
- fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues);
+ fm10k_unbind_hw_stats_q(stats->q, hw->mac.max_queues);
/* Reinitialize bases for all stats */
fm10k_update_hw_stats_pf(hw, stats);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
index 8e814df709d2..ad3696893cb1 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
@@ -99,8 +99,6 @@ extern const struct fm10k_tlv_attr fm10k_err_msg_attr[];
s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid);
s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
-s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **,
- struct fm10k_mbx_info *);
s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *, u32 **,
struct fm10k_mbx_info *);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
index 7fb1961f2921..6861a0bdc14e 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
@@ -465,7 +465,7 @@ static void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw,
struct fm10k_hw_stats *stats)
{
/* Unbind Queue Statistics */
- fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues);
+ fm10k_unbind_hw_stats_q(stats->q, hw->mac.max_queues);
/* Reinitialize bases for all stats */
fm10k_update_hw_stats_vf(hw, stats);
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 2089a0e172bf..d2d03db2acec 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -88,6 +88,7 @@ enum i40e_state {
__I40E_SERVICE_SCHED,
__I40E_ADMINQ_EVENT_PENDING,
__I40E_MDD_EVENT_PENDING,
+ __I40E_MDD_VF_PRINT_PENDING,
__I40E_VFLR_EVENT_PENDING,
__I40E_RESET_RECOVERY_PENDING,
__I40E_TIMEOUT_RECOVERY_PENDING,
@@ -191,6 +192,7 @@ enum i40e_pf_flags {
*/
I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA,
I40E_FLAG_VF_VLAN_PRUNING_ENA,
+ I40E_FLAG_MDD_AUTO_RESET_VF,
I40E_PF_FLAGS_NBITS, /* must be last */
};
@@ -546,6 +548,7 @@ struct i40e_pf {
u16 empr_count; /* EMP reset count */
u16 pfr_count; /* PF reset count */
u16 sw_int_count; /* SW interrupt count */
+ u32 link_down_events;
struct mutex switch_mutex;
u16 lan_vsi; /* our default LAN VSI */
@@ -571,8 +574,12 @@ struct i40e_pf {
struct i40e_vf *vf;
int num_alloc_vfs; /* actual number of VFs allocated */
u32 vf_aq_requests;
+ /* If set to non-zero, the device uses this value
+ * as maximum number of MAC filters per VF.
+ */
+ u32 max_mac_per_vf;
u32 arq_overflows; /* Not fatal, possibly indicative of problems */
-
+ struct ratelimit_state mdd_message_rate_limit;
/* DCBx/DCBNL capability for PF that indicates
* whether DCBx is managed by firmware or host
* based agent (LLDPAD). Also, indicates what
@@ -658,7 +665,7 @@ struct i40e_pf {
struct ptp_clock_info ptp_caps;
struct sk_buff *ptp_tx_skb;
unsigned long ptp_tx_start;
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
struct timespec64 ptp_prev_hw_time;
struct work_struct ptp_extts0_work;
ktime_t ptp_reset_start;
@@ -755,6 +762,7 @@ enum i40e_filter_state {
I40E_FILTER_ACTIVE, /* Added to switch by FW */
I40E_FILTER_FAILED, /* Rejected by FW */
I40E_FILTER_REMOVE, /* To be removed */
+ I40E_FILTER_NEW_SYNC, /* New, not sent yet, is in i40e_sync_vsi_filters() */
/* There is no 'removed' state; the filter struct is freed */
};
struct i40e_mac_filter {
@@ -942,6 +950,7 @@ struct i40e_q_vector {
u16 reg_idx; /* register index of the interrupt */
struct napi_struct napi;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
struct i40e_ring_container rx;
struct i40e_ring_container tx;
@@ -952,7 +961,6 @@ struct i40e_q_vector {
cpumask_t affinity_mask;
struct irq_affinity_notify affinity_notify;
- struct rcu_head rcu; /* to avoid race with update stats on free */
char name[I40E_INT_NAME_STR_LEN];
bool arm_wb_state;
bool in_busy_poll;
@@ -1188,7 +1196,6 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
struct i40e_fdir_filter *input, bool add);
void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
u32 i40e_get_current_fd_count(struct i40e_pf *pf);
-u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf);
u32 i40e_get_current_atr_cnt(struct i40e_pf *pf);
u32 i40e_get_global_fd_count(struct i40e_pf *pf);
bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
@@ -1196,7 +1203,6 @@ void i40e_set_ethtool_ops(struct net_device *netdev);
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
const u8 *macaddr, s16 vlan);
void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f);
-void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan);
int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
u16 uplink, u32 param1);
@@ -1276,7 +1282,8 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
const u8 *macaddr);
int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr);
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
-int i40e_count_filters(struct i40e_vsi *vsi);
+int i40e_count_all_filters(struct i40e_vsi *vsi);
+int i40e_count_active_filters(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
static inline bool i40e_is_sw_dcb(struct i40e_pf *pf)
@@ -1301,8 +1308,11 @@ void i40e_ptp_tx_hang(struct i40e_pf *pf);
void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf);
void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index);
void i40e_ptp_set_increment(struct i40e_pf *pf);
-int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
-int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
+int i40e_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int i40e_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void i40e_ptp_save_hw_time(struct i40e_pf *pf);
void i40e_ptp_restore_hw_time(struct i40e_pf *pf);
void i40e_ptp_init(struct i40e_pf *pf);
@@ -1312,7 +1322,6 @@ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset);
int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
int i40e_get_partition_bw_setting(struct i40e_pf *pf);
int i40e_set_partition_bw_setting(struct i40e_pf *pf);
-int i40e_commit_partition_bw_setting(struct i40e_pf *pf);
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
void i40e_set_fec_in_flags(u8 fec_cfg, unsigned long *flags);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index f73f5930fc58..096ec46bb619 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -18,7 +18,7 @@ static int i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
(hw->aq.num_asq_entries *
- sizeof(struct i40e_aq_desc)),
+ sizeof(struct libie_aq_desc)),
I40E_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
return ret_code;
@@ -44,7 +44,7 @@ static int i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
(hw->aq.num_arq_entries *
- sizeof(struct i40e_aq_desc)),
+ sizeof(struct libie_aq_desc)),
I40E_ADMINQ_DESC_ALIGNMENT);
return ret_code;
@@ -80,7 +80,7 @@ static void i40e_free_adminq_arq(struct i40e_hw *hw)
**/
static int i40e_alloc_arq_bufs(struct i40e_hw *hw)
{
- struct i40e_aq_desc *desc;
+ struct libie_aq_desc *desc;
struct i40e_dma_mem *bi;
int ret_code;
int i;
@@ -108,9 +108,9 @@ static int i40e_alloc_arq_bufs(struct i40e_hw *hw)
/* now configure the descriptors for use */
desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
- desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_BUF);
if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
- desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
desc->opcode = 0;
/* This is in accordance with Admin queue design, there is no
* register for buffer size configuration
@@ -119,12 +119,12 @@ static int i40e_alloc_arq_bufs(struct i40e_hw *hw)
desc->retval = 0;
desc->cookie_high = 0;
desc->cookie_low = 0;
- desc->params.external.addr_high =
+ desc->params.generic.addr_high =
cpu_to_le32(upper_32_bits(bi->pa));
- desc->params.external.addr_low =
+ desc->params.generic.addr_low =
cpu_to_le32(lower_32_bits(bi->pa));
- desc->params.external.param0 = 0;
- desc->params.external.param1 = 0;
+ desc->params.generic.param0 = 0;
+ desc->params.generic.param1 = 0;
}
alloc_arq_bufs:
@@ -691,8 +691,8 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
struct i40e_adminq_ring *asq = &(hw->aq.asq);
struct i40e_asq_cmd_details *details;
u16 ntc = asq->next_to_clean;
- struct i40e_aq_desc desc_cb;
- struct i40e_aq_desc *desc;
+ struct libie_aq_desc desc_cb;
+ struct libie_aq_desc *desc;
desc = I40E_ADMINQ_DESC(*asq, ntc);
details = I40E_ADMINQ_DETAILS(*asq, ntc);
@@ -750,7 +750,7 @@ static bool i40e_asq_done(struct i40e_hw *hw)
**/
static int
i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
+ struct libie_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
@@ -758,7 +758,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
{
struct i40e_dma_mem *dma_buff = NULL;
struct i40e_asq_cmd_details *details;
- struct i40e_aq_desc *desc_on_ring;
+ struct libie_aq_desc *desc_on_ring;
bool cmd_completed = false;
u16 retval = 0;
int status = 0;
@@ -771,7 +771,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
goto asq_send_command_error;
}
- hw->aq.asq_last_status = I40E_AQ_RC_OK;
+ hw->aq.asq_last_status = LIBIE_AQ_RC_OK;
val = rd32(hw, I40E_PF_ATQH);
if (val >= hw->aq.num_asq_entries) {
@@ -851,9 +851,9 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
/* Update the address values in the desc with the pa value
* for respective buffer
*/
- desc_on_ring->params.external.addr_high =
+ desc_on_ring->params.generic.addr_high =
cpu_to_le32(upper_32_bits(dma_buff->pa));
- desc_on_ring->params.external.addr_low =
+ desc_on_ring->params.generic.addr_low =
cpu_to_le32(lower_32_bits(dma_buff->pa));
}
@@ -905,13 +905,13 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
retval &= 0xff;
}
cmd_completed = true;
- if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
+ if ((enum libie_aq_err)retval == LIBIE_AQ_RC_OK)
status = 0;
- else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
+ else if ((enum libie_aq_err)retval == LIBIE_AQ_RC_EBUSY)
status = -EBUSY;
else
status = -EIO;
- hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
+ hw->aq.asq_last_status = (enum libie_aq_err)retval;
}
i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
@@ -954,7 +954,7 @@ asq_send_command_error:
**/
int
i40e_asq_send_command_atomic(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
+ struct libie_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
@@ -972,7 +972,7 @@ i40e_asq_send_command_atomic(struct i40e_hw *hw,
}
int
-i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+i40e_asq_send_command(struct i40e_hw *hw, struct libie_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details)
{
@@ -996,12 +996,12 @@ i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
**/
int
i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
+ struct libie_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
bool is_atomic_context,
- enum i40e_admin_queue_err *aq_status)
+ enum libie_aq_err *aq_status)
{
int status;
@@ -1016,16 +1016,6 @@ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
return status;
}
-int
-i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc,
- void *buff, /* can be NULL */ u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status)
-{
- return i40e_asq_send_command_atomic_v2(hw, desc, buff, buff_size,
- cmd_details, true, aq_status);
-}
-
/**
* i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
* @desc: pointer to the temp descriptor (non DMA mem)
@@ -1033,13 +1023,13 @@ i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc,
*
* Fill the desc with default values
**/
-void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+void i40e_fill_default_direct_cmd_desc(struct libie_aq_desc *desc,
u16 opcode)
{
/* zero out the desc */
- memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ memset((void *)desc, 0, sizeof(struct libie_aq_desc));
desc->opcode = cpu_to_le16(opcode);
- desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_SI);
}
/**
@@ -1057,7 +1047,7 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
u16 *pending)
{
u16 ntc = hw->aq.arq.next_to_clean;
- struct i40e_aq_desc *desc;
+ struct libie_aq_desc *desc;
struct i40e_dma_mem *bi;
int ret_code = 0;
u16 desc_idx;
@@ -1091,9 +1081,9 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
desc_idx = ntc;
hw->aq.arq_last_status =
- (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
+ (enum libie_aq_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
- if (flags & I40E_AQ_FLAG_ERR) {
+ if (flags & LIBIE_AQ_FLAG_ERR) {
ret_code = -EIO;
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
@@ -1117,14 +1107,14 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
* size
*/
bi = &hw->aq.arq.r.arq_bi[ntc];
- memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ memset((void *)desc, 0, sizeof(struct libie_aq_desc));
- desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_BUF);
if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
- desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
desc->datalen = cpu_to_le16((u16)bi->size);
- desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
- desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
+ desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
/* set tail = the last cleaned desc index. */
wr32(hw, I40E_PF_ARQT, ntc);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 55b5bb884d73..1be97a3a86ce 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -9,7 +9,7 @@
#include "i40e_adminq_cmd.h"
#define I40E_ADMINQ_DESC(R, i) \
- (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
+ (&(((struct libie_aq_desc *)((R).desc_buf.va))[i]))
#define I40E_ADMINQ_DESC_ALIGNMENT 4096
@@ -39,7 +39,7 @@ struct i40e_asq_cmd_details {
u16 flags_dis;
bool async;
bool postpone;
- struct i40e_aq_desc *wb_desc;
+ struct libie_aq_desc *wb_desc;
};
#define I40E_ADMINQ_DETAILS(R, i) \
@@ -47,7 +47,7 @@ struct i40e_asq_cmd_details {
/* ARQ event information */
struct i40e_arq_event_info {
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
u16 msg_len;
u16 buf_len;
u8 *msg_buf;
@@ -72,8 +72,8 @@ struct i40e_adminq_info {
struct mutex arq_mutex; /* Receive queue lock */
/* last status values on send and receive queues */
- enum i40e_admin_queue_err asq_last_status;
- enum i40e_admin_queue_err arq_last_status;
+ enum libie_aq_err asq_last_status;
+ enum libie_aq_err arq_last_status;
};
/**
@@ -119,7 +119,7 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
#define I40E_AQ_LARGE_BUF 512
#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */
-void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+void i40e_fill_default_direct_cmd_desc(struct libie_aq_desc *desc,
u16 opcode);
#endif /* _I40E_ADMINQ_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index c8f35d4de271..cc02a85ad42b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -4,6 +4,8 @@
#ifndef _I40E_ADMINQ_CMD_H_
#define _I40E_ADMINQ_CMD_H_
+#include <linux/net/intel/libie/adminq.h>
+
#include <linux/bits.h>
#include <linux/types.h>
@@ -30,75 +32,6 @@
/* API version 1.10 for X722 devices adds ability to request FEC encoding */
#define I40E_MINOR_VER_FW_REQUEST_FEC_X722 0x000A
-struct i40e_aq_desc {
- __le16 flags;
- __le16 opcode;
- __le16 datalen;
- __le16 retval;
- __le32 cookie_high;
- __le32 cookie_low;
- union {
- struct {
- __le32 param0;
- __le32 param1;
- __le32 param2;
- __le32 param3;
- } internal;
- struct {
- __le32 param0;
- __le32 param1;
- __le32 addr_high;
- __le32 addr_low;
- } external;
- u8 raw[16];
- } params;
-};
-
-/* Flags sub-structure
- * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
- * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
- */
-
-/* command flags and offsets*/
-#define I40E_AQ_FLAG_ERR_SHIFT 2
-#define I40E_AQ_FLAG_LB_SHIFT 9
-#define I40E_AQ_FLAG_RD_SHIFT 10
-#define I40E_AQ_FLAG_BUF_SHIFT 12
-#define I40E_AQ_FLAG_SI_SHIFT 13
-
-#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
-#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
-#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
-#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
-
-/* error codes */
-enum i40e_admin_queue_err {
- I40E_AQ_RC_OK = 0, /* success */
- I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
- I40E_AQ_RC_ENOENT = 2, /* No such element */
- I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
- I40E_AQ_RC_EINTR = 4, /* operation interrupted */
- I40E_AQ_RC_EIO = 5, /* I/O error */
- I40E_AQ_RC_ENXIO = 6, /* No such resource */
- I40E_AQ_RC_E2BIG = 7, /* Arg too long */
- I40E_AQ_RC_EAGAIN = 8, /* Try again */
- I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
- I40E_AQ_RC_EACCES = 10, /* Permission denied */
- I40E_AQ_RC_EFAULT = 11, /* Bad address */
- I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
- I40E_AQ_RC_EEXIST = 13, /* object already exists */
- I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
- I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
- I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
- I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
- I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
- I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
- I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
- I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
- I40E_AQ_RC_EFBIG = 22, /* File too large */
-};
-
/* Admin Queue command opcodes */
enum i40e_admin_queue_opc {
/* aq commands */
@@ -320,21 +253,6 @@ struct i40e_aqc_get_version {
__le16 api_minor;
};
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
-
-/* Send driver version (indirect 0x0002) */
-struct i40e_aqc_driver_version {
- u8 driver_major_ver;
- u8 driver_minor_ver;
- u8 driver_build_ver;
- u8 driver_subbuild_ver;
- u8 reserved[4];
- __le32 address_high;
- __le32 address_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
-
/* Queue Shutdown (direct 0x0003) */
struct i40e_aqc_queue_shutdown {
__le32 driver_unloading;
@@ -352,75 +270,6 @@ struct i40e_aqc_set_pf_context {
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
-/* Request resource ownership (direct 0x0008)
- * Release resource ownership (direct 0x0009)
- */
-struct i40e_aqc_request_resource {
- __le16 resource_id;
- __le16 access_type;
- __le32 timeout;
- __le32 resource_number;
- u8 reserved[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
-
-/* Get function capabilities (indirect 0x000A)
- * Get device capabilities (indirect 0x000B)
- */
-struct i40e_aqc_list_capabilites {
- u8 command_flags;
- u8 pf_index;
- u8 reserved[2];
- __le32 count;
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
-
-struct i40e_aqc_list_capabilities_element_resp {
- __le16 id;
- u8 major_rev;
- u8 minor_rev;
- __le32 number;
- __le32 logical_id;
- __le32 phys_id;
- u8 reserved[16];
-};
-
-/* list of caps */
-
-#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
-#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
-#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
-#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
-#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
-#define I40E_AQ_CAP_ID_SRIOV 0x0012
-#define I40E_AQ_CAP_ID_VF 0x0013
-#define I40E_AQ_CAP_ID_VMDQ 0x0014
-#define I40E_AQ_CAP_ID_8021QBG 0x0015
-#define I40E_AQ_CAP_ID_8021QBR 0x0016
-#define I40E_AQ_CAP_ID_VSI 0x0017
-#define I40E_AQ_CAP_ID_DCB 0x0018
-#define I40E_AQ_CAP_ID_FCOE 0x0021
-#define I40E_AQ_CAP_ID_ISCSI 0x0022
-#define I40E_AQ_CAP_ID_RSS 0x0040
-#define I40E_AQ_CAP_ID_RXQ 0x0041
-#define I40E_AQ_CAP_ID_TXQ 0x0042
-#define I40E_AQ_CAP_ID_MSIX 0x0043
-#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
-#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
-#define I40E_AQ_CAP_ID_1588 0x0046
-#define I40E_AQ_CAP_ID_IWARP 0x0051
-#define I40E_AQ_CAP_ID_LED 0x0061
-#define I40E_AQ_CAP_ID_SDP 0x0062
-#define I40E_AQ_CAP_ID_MDIO 0x0063
-#define I40E_AQ_CAP_ID_WSR_PROT 0x0064
-#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080
-#define I40E_AQ_CAP_ID_FLEX10 0x00F1
-#define I40E_AQ_CAP_ID_CEM 0x00F2
-
/* Set CPPM Configuration (direct 0x0103) */
struct i40e_aqc_cppm_configuration {
__le16 command_flags;
@@ -1712,6 +1561,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
struct i40e_aq_set_mac_config {
__le16 max_frame_size;
u8 params;
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN BIT(2)
u8 tx_timer_priority; /* bitmap */
__le16 tx_timer_value;
__le16 fc_refresh_threshold;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 59263551c383..518bc738ea3b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -359,8 +359,8 @@ static void i40e_client_add_instance(struct i40e_pf *pf)
if (i40e_client_get_params(vsi, &cdev->lan_info.params))
goto free_cdev;
- mac = list_first_entry(&cdev->lan_info.netdev->dev_addrs.list,
- struct netdev_hw_addr, list);
+ mac = list_first_entry_or_null(&cdev->lan_info.netdev->dev_addrs.list,
+ struct netdev_hw_addr, list);
if (mac)
ether_addr_copy(cdev->lan_info.lanmac, mac->addr);
else
@@ -682,9 +682,7 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
if (err) {
dev_info(&pf->pdev->dev,
"couldn't get PF vsi config, err %pe aq_err %s\n",
- ERR_PTR(err),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
return -ENOENT;
}
@@ -711,8 +709,7 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
dev_info(&pf->pdev->dev,
"update VSI ctxt for PE failed, err %pe aq_err %s\n",
ERR_PTR(err),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
}
}
return err;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index e8031f1a9b4f..59f5c1e810eb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -69,66 +69,6 @@ int i40e_set_mac_type(struct i40e_hw *hw)
}
/**
- * i40e_aq_str - convert AQ err code to a string
- * @hw: pointer to the HW structure
- * @aq_err: the AQ error code to convert
- **/
-const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
-{
- switch (aq_err) {
- case I40E_AQ_RC_OK:
- return "OK";
- case I40E_AQ_RC_EPERM:
- return "I40E_AQ_RC_EPERM";
- case I40E_AQ_RC_ENOENT:
- return "I40E_AQ_RC_ENOENT";
- case I40E_AQ_RC_ESRCH:
- return "I40E_AQ_RC_ESRCH";
- case I40E_AQ_RC_EINTR:
- return "I40E_AQ_RC_EINTR";
- case I40E_AQ_RC_EIO:
- return "I40E_AQ_RC_EIO";
- case I40E_AQ_RC_ENXIO:
- return "I40E_AQ_RC_ENXIO";
- case I40E_AQ_RC_E2BIG:
- return "I40E_AQ_RC_E2BIG";
- case I40E_AQ_RC_EAGAIN:
- return "I40E_AQ_RC_EAGAIN";
- case I40E_AQ_RC_ENOMEM:
- return "I40E_AQ_RC_ENOMEM";
- case I40E_AQ_RC_EACCES:
- return "I40E_AQ_RC_EACCES";
- case I40E_AQ_RC_EFAULT:
- return "I40E_AQ_RC_EFAULT";
- case I40E_AQ_RC_EBUSY:
- return "I40E_AQ_RC_EBUSY";
- case I40E_AQ_RC_EEXIST:
- return "I40E_AQ_RC_EEXIST";
- case I40E_AQ_RC_EINVAL:
- return "I40E_AQ_RC_EINVAL";
- case I40E_AQ_RC_ENOTTY:
- return "I40E_AQ_RC_ENOTTY";
- case I40E_AQ_RC_ENOSPC:
- return "I40E_AQ_RC_ENOSPC";
- case I40E_AQ_RC_ENOSYS:
- return "I40E_AQ_RC_ENOSYS";
- case I40E_AQ_RC_ERANGE:
- return "I40E_AQ_RC_ERANGE";
- case I40E_AQ_RC_EFLUSHED:
- return "I40E_AQ_RC_EFLUSHED";
- case I40E_AQ_RC_BAD_ADDR:
- return "I40E_AQ_RC_BAD_ADDR";
- case I40E_AQ_RC_EMODE:
- return "I40E_AQ_RC_EMODE";
- case I40E_AQ_RC_EFBIG:
- return "I40E_AQ_RC_EFBIG";
- }
-
- snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
- return hw->err_str;
-}
-
-/**
* i40e_debug_aq
* @hw: debug mask related to admin queue
* @mask: debug mask
@@ -141,7 +81,7 @@ const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
void *buffer, u16 buf_len)
{
- struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+ struct libie_aq_desc *aq_desc = (struct libie_aq_desc *)desc;
u32 effective_mask = hw->debug_mask & mask;
char prefix[27];
u16 len;
@@ -164,12 +104,12 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
le32_to_cpu(aq_desc->cookie_low));
i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
"\tparam (0,1) 0x%08X 0x%08X\n",
- le32_to_cpu(aq_desc->params.internal.param0),
- le32_to_cpu(aq_desc->params.internal.param1));
+ le32_to_cpu(aq_desc->params.generic.param0),
+ le32_to_cpu(aq_desc->params.generic.param1));
i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
"\taddr (h,l) 0x%08X 0x%08X\n",
- le32_to_cpu(aq_desc->params.external.addr_high),
- le32_to_cpu(aq_desc->params.external.addr_low));
+ le32_to_cpu(aq_desc->params.generic.addr_high),
+ le32_to_cpu(aq_desc->params.generic.addr_low));
if (buffer && buf_len != 0 && len != 0 &&
(effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) {
@@ -214,14 +154,14 @@ bool i40e_check_asq_alive(struct i40e_hw *hw)
int i40e_aq_queue_shutdown(struct i40e_hw *hw,
bool unloading)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_queue_shutdown *cmd =
- (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+ struct i40e_aqc_queue_shutdown *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_queue_shutdown);
+ cmd = libie_aq_raw(&desc);
if (unloading)
cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
@@ -245,9 +185,8 @@ static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
u8 *lut, u16 lut_size,
bool set)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_set_rss_lut *cmd_resp =
- (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+ struct i40e_aqc_get_set_rss_lut *cmd_resp;
+ struct libie_aq_desc desc;
int status;
u16 flags;
@@ -258,9 +197,10 @@ static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_rss_lut);
+ cmd_resp = libie_aq_raw(&desc);
/* Indirect command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD);
vsi_id = FIELD_PREP(I40E_AQC_SET_RSS_LUT_VSI_ID_MASK, vsi_id) |
FIELD_PREP(I40E_AQC_SET_RSS_LUT_VSI_VALID, 1);
@@ -326,10 +266,9 @@ static int i40e_aq_get_set_rss_key(struct i40e_hw *hw,
struct i40e_aqc_get_set_rss_key_data *key,
bool set)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_set_rss_key *cmd_resp =
- (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+ struct i40e_aqc_get_set_rss_key *cmd_resp;
+ struct libie_aq_desc desc;
int status;
if (set)
@@ -339,9 +278,10 @@ static int i40e_aq_get_set_rss_key(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_rss_key);
+ cmd_resp = libie_aq_raw(&desc);
/* Indirect command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD);
vsi_id = FIELD_PREP(I40E_AQC_SET_RSS_KEY_VSI_ID_MASK, vsi_id) |
FIELD_PREP(I40E_AQC_SET_RSS_KEY_VSI_VALID, 1);
@@ -439,13 +379,13 @@ i40e_aq_mac_address_read(struct i40e_hw *hw,
struct i40e_aqc_mac_address_read_data *addrs,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_mac_address_read *cmd_data =
- (struct i40e_aqc_mac_address_read *)&desc.params.raw;
+ struct i40e_aqc_mac_address_read *cmd_data;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
- desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
+ cmd_data = libie_aq_raw(&desc);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_BUF);
status = i40e_asq_send_command(hw, &desc, addrs,
sizeof(*addrs), cmd_details);
@@ -465,13 +405,13 @@ int i40e_aq_mac_address_write(struct i40e_hw *hw,
u16 flags, u8 *mac_addr,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_mac_address_write *cmd_data =
- (struct i40e_aqc_mac_address_write *)&desc.params.raw;
+ struct i40e_aqc_mac_address_write *cmd_data;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_mac_address_write);
+ cmd_data = libie_aq_raw(&desc);
cmd_data->command_flags = cpu_to_le16(flags);
cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]);
cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) |
@@ -817,10 +757,11 @@ int i40e_pf_reset(struct i40e_hw *hw)
void i40e_clear_hw(struct i40e_hw *hw)
{
u32 num_queues, base_queue;
- u32 num_pf_int;
- u32 num_vf_int;
+ s32 num_pf_int;
+ s32 num_vf_int;
u32 num_vfs;
- u32 i, j;
+ s32 i;
+ u32 j;
u32 val;
u32 eol = 0x7ff;
@@ -1060,7 +1001,7 @@ i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
{
u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
if (!abilities)
@@ -1070,36 +1011,36 @@ i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_phy_abilities);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (abilities_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
if (qualified_modules)
- desc.params.external.param0 |=
+ desc.params.generic.param0 |=
cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
if (report_init)
- desc.params.external.param0 |=
+ desc.params.generic.param0 |=
cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
status = i40e_asq_send_command(hw, &desc, abilities,
abilities_size, cmd_details);
switch (hw->aq.asq_last_status) {
- case I40E_AQ_RC_EIO:
+ case LIBIE_AQ_RC_EIO:
status = -EIO;
break;
- case I40E_AQ_RC_EAGAIN:
+ case LIBIE_AQ_RC_EAGAIN:
usleep_range(1000, 2000);
total_delay++;
status = -EIO;
break;
- /* also covers I40E_AQ_RC_OK */
+ /* also covers LIBIE_AQ_RC_OK */
default:
break;
}
- } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) &&
+ } while ((hw->aq.asq_last_status == LIBIE_AQ_RC_EAGAIN) &&
(total_delay < max_delay));
if (status)
@@ -1136,9 +1077,8 @@ int i40e_aq_set_phy_config(struct i40e_hw *hw,
struct i40e_aq_set_phy_config *config,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aq_set_phy_config *cmd =
- (struct i40e_aq_set_phy_config *)&desc.params.raw;
+ struct i40e_aq_set_phy_config *cmd;
+ struct libie_aq_desc desc;
int status;
if (!config)
@@ -1147,6 +1087,7 @@ int i40e_aq_set_phy_config(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_config);
+ cmd = libie_aq_raw(&desc);
*cmd = *config;
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -1249,6 +1190,40 @@ int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
}
/**
+ * i40e_aq_set_mac_config - Configure MAC settings
+ * @hw: pointer to the hw struct
+ * @max_frame_size: Maximum Frame Size to be supported by the port
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set MAC configuration (0x0603). Note that max_frame_size must be greater
+ * than zero.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+int i40e_aq_set_mac_config(struct i40e_hw *hw, u16 max_frame_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_set_mac_config *cmd;
+ struct libie_aq_desc desc;
+
+ cmd = libie_aq_raw(&desc);
+
+ if (max_frame_size == 0)
+ return -EINVAL;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_mac_config);
+
+ cmd->max_frame_size = cpu_to_le16(max_frame_size);
+ cmd->params = I40E_AQ_SET_MAC_CONFIG_CRC_EN;
+
+#define I40E_AQ_SET_MAC_CONFIG_FC_DEFAULT_THRESHOLD 0x7FFF
+ cmd->fc_refresh_threshold =
+ cpu_to_le16(I40E_AQ_SET_MAC_CONFIG_FC_DEFAULT_THRESHOLD);
+
+ return i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+}
+
+/**
* i40e_aq_clear_pxe_mode
* @hw: pointer to the hw struct
* @cmd_details: pointer to command details structure or NULL
@@ -1258,14 +1233,14 @@ int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
int i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_clear_pxe *cmd =
- (struct i40e_aqc_clear_pxe *)&desc.params.raw;
+ struct i40e_aqc_clear_pxe *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_clear_pxe_mode);
+ cmd = libie_aq_raw(&desc);
cmd->rx_cnt = 0x2;
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -1287,14 +1262,14 @@ int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
bool enable_link,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_link_restart_an *cmd =
- (struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
+ struct i40e_aqc_set_link_restart_an *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_link_restart_an);
+ cmd = libie_aq_raw(&desc);
cmd->command = I40E_AQ_PHY_RESTART_AN;
if (enable_link)
cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
@@ -1319,16 +1294,16 @@ int i40e_aq_get_link_info(struct i40e_hw *hw,
bool enable_lse, struct i40e_link_status *link,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_link_status *resp =
- (struct i40e_aqc_get_link_status *)&desc.params.raw;
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ struct i40e_aqc_get_link_status *resp;
+ struct libie_aq_desc desc;
bool tx_pause, rx_pause;
u16 command_flags;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
+ resp = libie_aq_raw(&desc);
if (enable_lse)
command_flags = I40E_AQ_LSE_ENABLE;
else
@@ -1414,14 +1389,14 @@ int i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
u16 mask,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_phy_int_mask *cmd =
- (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
+ struct i40e_aqc_set_phy_int_mask *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_int_mask);
+ cmd = libie_aq_raw(&desc);
cmd->event_mask = cpu_to_le16(mask);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -1440,11 +1415,11 @@ int i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
int i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_lb_mode *cmd =
- (struct i40e_aqc_set_lb_mode *)&desc.params.raw;
+ struct i40e_aqc_set_lb_mode *cmd;
+ struct libie_aq_desc desc;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_lb_modes);
+ cmd = libie_aq_raw(&desc);
if (ena_lpbk) {
if (hw->nvm.version <= I40E_LEGACY_LOOPBACK_NVM_VER)
cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL_LEGACY);
@@ -1466,14 +1441,14 @@ int i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk,
int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_phy_debug *cmd =
- (struct i40e_aqc_set_phy_debug *)&desc.params.raw;
+ struct i40e_aqc_set_phy_debug *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_debug);
+ cmd = libie_aq_raw(&desc);
cmd->command_flags = cmd_flags;
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -1493,23 +1468,22 @@ int i40e_aq_add_vsi(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_get_update_vsi *cmd =
- (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
- struct i40e_aqc_add_get_update_vsi_completion *resp =
- (struct i40e_aqc_add_get_update_vsi_completion *)
- &desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp;
+ struct i40e_aqc_add_get_update_vsi *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_vsi);
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid);
cmd->connection_type = vsi_ctx->connection_type;
cmd->vf_id = vsi_ctx->vf_num;
cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info),
@@ -1537,15 +1511,14 @@ int i40e_aq_set_default_vsi(struct i40e_hw *hw,
u16 seid,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)
- &desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ cmd = libie_aq_raw(&desc);
cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
cmd->seid = cpu_to_le16(seid);
@@ -1565,15 +1538,14 @@ int i40e_aq_clear_default_vsi(struct i40e_hw *hw,
u16 seid,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)
- &desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ cmd = libie_aq_raw(&desc);
cmd->promiscuous_flags = cpu_to_le16(0);
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
cmd->seid = cpu_to_le16(seid);
@@ -1596,15 +1568,15 @@ int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details,
bool rx_only_promisc)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
u16 flags = 0;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ cmd = libie_aq_raw(&desc);
if (set) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
if (rx_only_promisc && i40e_is_aq_api_ver_ge(hw, 1, 5))
@@ -1635,15 +1607,15 @@ int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
u16 seid, bool set,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
u16 flags = 0;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ cmd = libie_aq_raw(&desc);
if (set)
flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
@@ -1670,15 +1642,15 @@ int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
u16 vid,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
u16 flags = 0;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ cmd = libie_aq_raw(&desc);
if (enable)
flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
@@ -1706,15 +1678,15 @@ int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
u16 vid,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
u16 flags = 0;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ cmd = libie_aq_raw(&desc);
if (enable) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
if (i40e_is_aq_api_ver_ge(hw, 1, 5))
@@ -1747,9 +1719,8 @@ int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
u16 flags = 0;
int status;
@@ -1759,6 +1730,7 @@ int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
if (enable)
flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
+ cmd = libie_aq_raw(&desc);
cmd->promiscuous_flags = cpu_to_le16(flags);
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
cmd->seid = cpu_to_le16(seid);
@@ -1782,14 +1754,14 @@ int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
u16 seid, bool set_filter,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ cmd = libie_aq_raw(&desc);
if (set_filter)
cmd->promiscuous_flags
|= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
@@ -1805,37 +1777,6 @@ int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
}
/**
- * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting
- * @hw: pointer to the hw struct
- * @seid: vsi number
- * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
- * @cmd_details: pointer to command details structure or NULL
- **/
-int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
- u16 seid, bool enable,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- u16 flags = 0;
- int status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_set_vsi_promiscuous_modes);
- if (enable)
- flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
-
- cmd->promiscuous_flags = cpu_to_le16(flags);
- cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN);
- cmd->seid = cpu_to_le16(seid);
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- return status;
-}
-
-/**
* i40e_aq_get_vsi_params - get VSI configuration info
* @hw: pointer to the hw struct
* @vsi_ctx: pointer to a vsi context struct
@@ -1845,20 +1786,19 @@ int i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_get_update_vsi *cmd =
- (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
- struct i40e_aqc_add_get_update_vsi_completion *resp =
- (struct i40e_aqc_add_get_update_vsi_completion *)
- &desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp;
+ struct i40e_aqc_add_get_update_vsi *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_vsi_parameters);
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), NULL);
@@ -1887,19 +1827,18 @@ int i40e_aq_update_vsi_params(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_get_update_vsi *cmd =
- (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
- struct i40e_aqc_add_get_update_vsi_completion *resp =
- (struct i40e_aqc_add_get_update_vsi_completion *)
- &desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp;
+ struct i40e_aqc_add_get_update_vsi *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_update_vsi_parameters);
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info),
@@ -1926,16 +1865,16 @@ int i40e_aq_get_switch_config(struct i40e_hw *hw,
u16 buf_size, u16 *start_seid,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_switch_seid *scfg =
- (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ struct i40e_aqc_switch_seid *scfg;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_switch_config);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ scfg = libie_aq_raw(&desc);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
scfg->seid = cpu_to_le16(*start_seid);
status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
@@ -1960,13 +1899,13 @@ int i40e_aq_set_switch_config(struct i40e_hw *hw,
u16 valid_flags, u8 mode,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_switch_config *scfg =
- (struct i40e_aqc_set_switch_config *)&desc.params.raw;
+ struct i40e_aqc_set_switch_config *scfg;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_switch_config);
+ scfg = libie_aq_raw(&desc);
scfg->flags = cpu_to_le16(flags);
scfg->valid_flags = cpu_to_le16(valid_flags);
scfg->mode = mode;
@@ -1998,11 +1937,11 @@ int i40e_aq_get_firmware_version(struct i40e_hw *hw,
u16 *api_major_version, u16 *api_minor_version,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_version *resp =
- (struct i40e_aqc_get_version *)&desc.params.raw;
+ struct i40e_aqc_get_version *resp;
+ struct libie_aq_desc desc;
int status;
+ resp = libie_aq_raw(&desc);
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -2035,22 +1974,22 @@ int i40e_aq_send_driver_version(struct i40e_hw *hw,
struct i40e_driver_version *dv,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_driver_version *cmd =
- (struct i40e_aqc_driver_version *)&desc.params.raw;
+ struct libie_aqc_driver_ver *cmd;
+ struct libie_aq_desc desc;
int status;
u16 len;
if (dv == NULL)
return -EINVAL;
+ cmd = libie_aq_raw(&desc);
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
- desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
- cmd->driver_major_ver = dv->major_version;
- cmd->driver_minor_ver = dv->minor_version;
- cmd->driver_build_ver = dv->build_version;
- cmd->driver_subbuild_ver = dv->subbuild_version;
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD);
+ cmd->major_ver = dv->major_version;
+ cmd->minor_ver = dv->minor_version;
+ cmd->build_ver = dv->build_version;
+ cmd->subbuild_ver = dv->subbuild_version;
len = 0;
while (len < sizeof(dv->driver_string) &&
@@ -2150,11 +2089,9 @@ int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
bool enable_stats,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_veb *cmd =
- (struct i40e_aqc_add_veb *)&desc.params.raw;
- struct i40e_aqc_add_veb_completion *resp =
- (struct i40e_aqc_add_veb_completion *)&desc.params.raw;
+ struct i40e_aqc_add_veb_completion *resp;
+ struct i40e_aqc_add_veb *cmd;
+ struct libie_aq_desc desc;
u16 veb_flags = 0;
int status;
@@ -2162,6 +2099,8 @@ int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
if (!!uplink_seid != !!downlink_seid)
return -EINVAL;
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
cmd->uplink_seid = cpu_to_le16(uplink_seid);
@@ -2208,15 +2147,14 @@ int i40e_aq_get_veb_parameters(struct i40e_hw *hw,
u16 *vebs_used, u16 *vebs_free,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
- (struct i40e_aqc_get_veb_parameters_completion *)
- &desc.params.raw;
+ struct i40e_aqc_get_veb_parameters_completion *cmd_resp;
+ struct libie_aq_desc desc;
int status;
if (veb_seid == 0)
return -EINVAL;
+ cmd_resp = libie_aq_raw(&desc);
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_veb_parameters);
cmd_resp->seid = cpu_to_le16(veb_seid);
@@ -2258,10 +2196,9 @@ get_veb_exit:
**/
static u16
i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,
- struct i40e_aq_desc *desc, u16 count, u16 seid)
+ struct libie_aq_desc *desc, u16 count, u16 seid)
{
- struct i40e_aqc_macvlan *cmd =
- (struct i40e_aqc_macvlan *)&desc->params.raw;
+ struct i40e_aqc_macvlan *cmd = libie_aq_raw(desc);
u16 buf_size;
int i;
@@ -2279,9 +2216,9 @@ i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,
mv_list[i].flags |=
cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
- desc->flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc->flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
if (buf_size > I40E_AQ_LARGE_BUF)
- desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc->flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
return buf_size;
}
@@ -2301,7 +2238,7 @@ i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
u16 buf_size;
if (count == 0 || !mv_list || !hw)
@@ -2332,9 +2269,9 @@ int
i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status)
+ enum libie_aq_err *aq_status)
{
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
u16 buf_size;
if (count == 0 || !mv_list || !hw)
@@ -2361,9 +2298,8 @@ i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_macvlan *cmd =
- (struct i40e_aqc_macvlan *)&desc.params.raw;
+ struct i40e_aqc_macvlan *cmd;
+ struct libie_aq_desc desc;
u16 buf_size;
int status;
@@ -2374,14 +2310,15 @@ i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
/* prep the rest of the request */
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
+ cmd = libie_aq_raw(&desc);
cmd->num_addresses = cpu_to_le16(count);
cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
cmd->seid[1] = 0;
cmd->seid[2] = 0;
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
cmd_details, true);
@@ -2408,10 +2345,10 @@ int
i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status)
+ enum libie_aq_err *aq_status)
{
struct i40e_aqc_macvlan *cmd;
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
u16 buf_size;
if (count == 0 || !mv_list || !hw)
@@ -2421,151 +2358,21 @@ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
/* prep the rest of the request */
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
- cmd = (struct i40e_aqc_macvlan *)&desc.params.raw;
+ cmd = libie_aq_raw(&desc);
cmd->num_addresses = cpu_to_le16(count);
cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
cmd->seid[1] = 0;
cmd->seid[2] = 0;
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
cmd_details, true, aq_status);
}
/**
- * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
- * @hw: pointer to the hw struct
- * @opcode: AQ opcode for add or delete mirror rule
- * @sw_seid: Switch SEID (to which rule refers)
- * @rule_type: Rule Type (ingress/egress/VLAN)
- * @id: Destination VSI SEID or Rule ID
- * @count: length of the list
- * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
- * @cmd_details: pointer to command details structure or NULL
- * @rule_id: Rule ID returned from FW
- * @rules_used: Number of rules used in internal switch
- * @rules_free: Number of rules free in internal switch
- *
- * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
- * VEBs/VEPA elements only
- **/
-static int i40e_mirrorrule_op(struct i40e_hw *hw,
- u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
- u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_delete_mirror_rule *cmd =
- (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
- struct i40e_aqc_add_delete_mirror_rule_completion *resp =
- (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
- u16 buf_size;
- int status;
-
- buf_size = count * sizeof(*mr_list);
-
- /* prep the rest of the request */
- i40e_fill_default_direct_cmd_desc(&desc, opcode);
- cmd->seid = cpu_to_le16(sw_seid);
- cmd->rule_type = cpu_to_le16(rule_type &
- I40E_AQC_MIRROR_RULE_TYPE_MASK);
- cmd->num_entries = cpu_to_le16(count);
- /* Dest VSI for add, rule_id for delete */
- cmd->destination = cpu_to_le16(id);
- if (mr_list) {
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
- I40E_AQ_FLAG_RD));
- if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
- }
-
- status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
- cmd_details);
- if (!status ||
- hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
- if (rule_id)
- *rule_id = le16_to_cpu(resp->rule_id);
- if (rules_used)
- *rules_used = le16_to_cpu(resp->mirror_rules_used);
- if (rules_free)
- *rules_free = le16_to_cpu(resp->mirror_rules_free);
- }
- return status;
-}
-
-/**
- * i40e_aq_add_mirrorrule - add a mirror rule
- * @hw: pointer to the hw struct
- * @sw_seid: Switch SEID (to which rule refers)
- * @rule_type: Rule Type (ingress/egress/VLAN)
- * @dest_vsi: SEID of VSI to which packets will be mirrored
- * @count: length of the list
- * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
- * @cmd_details: pointer to command details structure or NULL
- * @rule_id: Rule ID returned from FW
- * @rules_used: Number of rules used in internal switch
- * @rules_free: Number of rules free in internal switch
- *
- * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
- **/
-int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 dest_vsi, u16 count,
- __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free)
-{
- if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
- rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
- if (count == 0 || !mr_list)
- return -EINVAL;
- }
-
- return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
- rule_type, dest_vsi, count, mr_list,
- cmd_details, rule_id, rules_used, rules_free);
-}
-
-/**
- * i40e_aq_delete_mirrorrule - delete a mirror rule
- * @hw: pointer to the hw struct
- * @sw_seid: Switch SEID (to which rule refers)
- * @rule_type: Rule Type (ingress/egress/VLAN)
- * @count: length of the list
- * @rule_id: Rule ID that is returned in the receive desc as part of
- * add_mirrorrule.
- * @mr_list: list of mirrored VLAN IDs to be removed
- * @cmd_details: pointer to command details structure or NULL
- * @rules_used: Number of rules used in internal switch
- * @rules_free: Number of rules free in internal switch
- *
- * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
- **/
-int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 rule_id, u16 count,
- __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rules_used, u16 *rules_free)
-{
- /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
- if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
- /* count and mr_list shall be valid for rule_type INGRESS VLAN
- * mirroring. For other rule_type, count and rule_type should
- * not matter.
- */
- if (count == 0 || !mr_list)
- return -EINVAL;
- }
-
- return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
- rule_type, rule_id, count, mr_list,
- cmd_details, NULL, rules_used, rules_free);
-}
-
-/**
* i40e_aq_send_msg_to_vf
* @hw: pointer to the hardware structure
* @vfid: VF id to send msg
@@ -2581,21 +2388,21 @@ int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_pf_vf_message *cmd =
- (struct i40e_aqc_pf_vf_message *)&desc.params.raw;
+ struct i40e_aqc_pf_vf_message *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
+ cmd = libie_aq_raw(&desc);
cmd->id = cpu_to_le32(vfid);
desc.cookie_high = cpu_to_le32(v_opcode);
desc.cookie_low = cpu_to_le32(v_retval);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_SI);
if (msglen) {
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
- I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF |
+ LIBIE_AQ_FLAG_RD));
if (msglen > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(msglen);
}
status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
@@ -2616,9 +2423,8 @@ int i40e_aq_debug_read_register(struct i40e_hw *hw,
u32 reg_addr, u64 *reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_debug_reg_read_write *cmd_resp =
- (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+ struct i40e_aqc_debug_reg_read_write *cmd_resp;
+ struct libie_aq_desc desc;
int status;
if (reg_val == NULL)
@@ -2626,6 +2432,7 @@ int i40e_aq_debug_read_register(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
+ cmd_resp = libie_aq_raw(&desc);
cmd_resp->address = cpu_to_le32(reg_addr);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -2651,13 +2458,13 @@ int i40e_aq_debug_write_register(struct i40e_hw *hw,
u32 reg_addr, u64 reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_debug_reg_read_write *cmd =
- (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+ struct i40e_aqc_debug_reg_read_write *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
+ cmd = libie_aq_raw(&desc);
cmd->address = cpu_to_le32(reg_addr);
cmd->value_high = cpu_to_le32((u32)(reg_val >> 32));
cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF));
@@ -2684,16 +2491,16 @@ int i40e_aq_request_resource(struct i40e_hw *hw,
u8 sdp_number, u64 *timeout,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_request_resource *cmd_resp =
- (struct i40e_aqc_request_resource *)&desc.params.raw;
+ struct libie_aqc_req_res *cmd_resp;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
- cmd_resp->resource_id = cpu_to_le16(resource);
+ cmd_resp = libie_aq_raw(&desc);
+ cmd_resp->res_id = cpu_to_le16(resource);
cmd_resp->access_type = cpu_to_le16(access);
- cmd_resp->resource_number = cpu_to_le32(sdp_number);
+ cmd_resp->res_number = cpu_to_le32(sdp_number);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
/* The completion specifies the maximum time in ms that the driver
@@ -2702,7 +2509,7 @@ int i40e_aq_request_resource(struct i40e_hw *hw,
* busy return value and the timeout field indicates the maximum time
* the current owner of the resource has to free it.
*/
- if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
+ if (!status || hw->aq.asq_last_status == LIBIE_AQ_RC_EBUSY)
*timeout = le32_to_cpu(cmd_resp->timeout);
return status;
@@ -2722,15 +2529,15 @@ int i40e_aq_release_resource(struct i40e_hw *hw,
u8 sdp_number,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_request_resource *cmd =
- (struct i40e_aqc_request_resource *)&desc.params.raw;
+ struct libie_aqc_req_res *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
- cmd->resource_id = cpu_to_le16(resource);
- cmd->resource_number = cpu_to_le32(sdp_number);
+ cmd = libie_aq_raw(&desc);
+ cmd->res_id = cpu_to_le16(resource);
+ cmd->res_number = cpu_to_le32(sdp_number);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -2754,9 +2561,8 @@ int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
bool last_command,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_nvm_update *cmd =
- (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ struct i40e_aqc_nvm_update *cmd;
+ struct libie_aq_desc desc;
int status;
/* In offset the highest byte must be zeroed. */
@@ -2767,6 +2573,7 @@ int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
+ cmd = libie_aq_raw(&desc);
/* If this is the last command in a series, set the proper flag. */
if (last_command)
cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
@@ -2774,9 +2581,9 @@ int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
cmd->offset = cpu_to_le32(offset);
cmd->length = cpu_to_le16(length);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (length > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
@@ -2799,9 +2606,8 @@ int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, bool last_command,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_nvm_update *cmd =
- (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ struct i40e_aqc_nvm_update *cmd;
+ struct libie_aq_desc desc;
int status;
/* In offset the highest byte must be zeroed. */
@@ -2812,6 +2618,7 @@ int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
+ cmd = libie_aq_raw(&desc);
/* If this is the last command in a series, set the proper flag. */
if (last_command)
cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
@@ -2838,7 +2645,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
u32 cap_count,
enum i40e_admin_queue_opc list_type_opc)
{
- struct i40e_aqc_list_capabilities_element_resp *cap;
+ struct libie_aqc_list_caps_elem *cap;
u32 valid_functions, num_functions;
u32 number, logical_id, phys_id;
struct i40e_hw_capabilities *p;
@@ -2847,7 +2654,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
int status;
u32 i = 0;
- cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
+ cap = (struct libie_aqc_list_caps_elem *)buff;
if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
p = &hw->dev_caps;
@@ -2857,17 +2664,17 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
return;
for (i = 0; i < cap_count; i++, cap++) {
- id = le16_to_cpu(cap->id);
+ id = le16_to_cpu(cap->cap);
number = le32_to_cpu(cap->number);
logical_id = le32_to_cpu(cap->logical_id);
phys_id = le32_to_cpu(cap->phys_id);
- major_rev = cap->major_rev;
+ major_rev = cap->major_ver;
switch (id) {
- case I40E_AQ_CAP_ID_SWITCH_MODE:
+ case LIBIE_AQC_CAPS_SWITCH_MODE:
p->switch_mode = number;
break;
- case I40E_AQ_CAP_ID_MNG_MODE:
+ case LIBIE_AQC_CAPS_MNG_MODE:
p->management_mode = number;
if (major_rev > 1) {
p->mng_protocols_over_mctp = logical_id;
@@ -2878,76 +2685,76 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
p->mng_protocols_over_mctp = 0;
}
break;
- case I40E_AQ_CAP_ID_NPAR_ACTIVE:
+ case LIBIE_AQC_CAPS_NPAR_ACTIVE:
p->npar_enable = number;
break;
- case I40E_AQ_CAP_ID_OS2BMC_CAP:
+ case LIBIE_AQC_CAPS_OS2BMC_CAP:
p->os2bmc = number;
break;
- case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
+ case LIBIE_AQC_CAPS_VALID_FUNCTIONS:
p->valid_functions = number;
break;
- case I40E_AQ_CAP_ID_SRIOV:
+ case LIBIE_AQC_CAPS_SRIOV:
if (number == 1)
p->sr_iov_1_1 = true;
break;
- case I40E_AQ_CAP_ID_VF:
+ case LIBIE_AQC_CAPS_VF:
p->num_vfs = number;
p->vf_base_id = logical_id;
break;
- case I40E_AQ_CAP_ID_VMDQ:
+ case LIBIE_AQC_CAPS_VMDQ:
if (number == 1)
p->vmdq = true;
break;
- case I40E_AQ_CAP_ID_8021QBG:
+ case LIBIE_AQC_CAPS_8021QBG:
if (number == 1)
p->evb_802_1_qbg = true;
break;
- case I40E_AQ_CAP_ID_8021QBR:
+ case LIBIE_AQC_CAPS_8021QBR:
if (number == 1)
p->evb_802_1_qbh = true;
break;
- case I40E_AQ_CAP_ID_VSI:
+ case LIBIE_AQC_CAPS_VSI:
p->num_vsis = number;
break;
- case I40E_AQ_CAP_ID_DCB:
+ case LIBIE_AQC_CAPS_DCB:
if (number == 1) {
p->dcb = true;
p->enabled_tcmap = logical_id;
p->maxtc = phys_id;
}
break;
- case I40E_AQ_CAP_ID_FCOE:
+ case LIBIE_AQC_CAPS_FCOE:
if (number == 1)
p->fcoe = true;
break;
- case I40E_AQ_CAP_ID_ISCSI:
+ case LIBIE_AQC_CAPS_ISCSI:
if (number == 1)
p->iscsi = true;
break;
- case I40E_AQ_CAP_ID_RSS:
+ case LIBIE_AQC_CAPS_RSS:
p->rss = true;
p->rss_table_size = number;
p->rss_table_entry_width = logical_id;
break;
- case I40E_AQ_CAP_ID_RXQ:
+ case LIBIE_AQC_CAPS_RXQS:
p->num_rx_qp = number;
p->base_queue = phys_id;
break;
- case I40E_AQ_CAP_ID_TXQ:
+ case LIBIE_AQC_CAPS_TXQS:
p->num_tx_qp = number;
p->base_queue = phys_id;
break;
- case I40E_AQ_CAP_ID_MSIX:
+ case LIBIE_AQC_CAPS_MSIX:
p->num_msix_vectors = number;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: MSIX vector count = %d\n",
p->num_msix_vectors);
break;
- case I40E_AQ_CAP_ID_VF_MSIX:
+ case LIBIE_AQC_CAPS_VF_MSIX:
p->num_msix_vectors_vf = number;
break;
- case I40E_AQ_CAP_ID_FLEX10:
+ case LIBIE_AQC_CAPS_FLEX10:
if (major_rev == 1) {
if (number == 1) {
p->flex10_enable = true;
@@ -2963,42 +2770,42 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
p->flex10_mode = logical_id;
p->flex10_status = phys_id;
break;
- case I40E_AQ_CAP_ID_CEM:
+ case LIBIE_AQC_CAPS_CEM:
if (number == 1)
p->mgmt_cem = true;
break;
- case I40E_AQ_CAP_ID_IWARP:
+ case LIBIE_AQC_CAPS_RDMA:
if (number == 1)
p->iwarp = true;
break;
- case I40E_AQ_CAP_ID_LED:
+ case LIBIE_AQC_CAPS_LED:
if (phys_id < I40E_HW_CAP_MAX_GPIO)
p->led[phys_id] = true;
break;
- case I40E_AQ_CAP_ID_SDP:
+ case LIBIE_AQC_CAPS_SDP:
if (phys_id < I40E_HW_CAP_MAX_GPIO)
p->sdp[phys_id] = true;
break;
- case I40E_AQ_CAP_ID_MDIO:
+ case LIBIE_AQC_CAPS_MDIO:
if (number == 1) {
p->mdio_port_num = phys_id;
p->mdio_port_mode = logical_id;
}
break;
- case I40E_AQ_CAP_ID_1588:
+ case LIBIE_AQC_CAPS_1588:
if (number == 1)
p->ieee_1588 = true;
break;
- case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
+ case LIBIE_AQC_CAPS_FD:
p->fd = true;
p->fd_filters_guaranteed = number;
p->fd_filters_best_effort = logical_id;
break;
- case I40E_AQ_CAP_ID_WSR_PROT:
+ case LIBIE_AQC_CAPS_WSR_PROT:
p->wr_csr_prot = (u64)number;
p->wr_csr_prot |= (u64)logical_id << 32;
break;
- case I40E_AQ_CAP_ID_NVM_MGMT:
+ case LIBIE_AQC_CAPS_NVM_MGMT:
if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
p->sec_rev_disabled = true;
if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
@@ -3090,11 +2897,11 @@ int i40e_aq_discover_capabilities(struct i40e_hw *hw,
enum i40e_admin_queue_opc list_type_opc,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aqc_list_capabilites *cmd;
- struct i40e_aq_desc desc;
+ struct libie_aqc_list_caps *cmd;
+ struct libie_aq_desc desc;
int status = 0;
- cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
+ cmd = libie_aq_raw(&desc);
if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
@@ -3104,9 +2911,9 @@ int i40e_aq_discover_capabilities(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
*data_size = le16_to_cpu(desc.datalen);
@@ -3139,9 +2946,8 @@ int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
bool last_command, u8 preservation_flags,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_nvm_update *cmd =
- (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ struct i40e_aqc_nvm_update *cmd;
+ struct libie_aq_desc desc;
int status;
/* In offset the highest byte must be zeroed. */
@@ -3152,6 +2958,7 @@ int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
+ cmd = libie_aq_raw(&desc);
/* If this is the last command in a series, set the proper flag. */
if (last_command)
cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
@@ -3169,9 +2976,9 @@ int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
cmd->offset = cpu_to_le32(offset);
cmd->length = cpu_to_le16(length);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
if (length > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
@@ -3180,41 +2987,6 @@ i40e_aq_update_nvm_exit:
}
/**
- * i40e_aq_rearrange_nvm
- * @hw: pointer to the hw struct
- * @rearrange_nvm: defines direction of rearrangement
- * @cmd_details: pointer to command details structure or NULL
- *
- * Rearrange NVM structure, available only for transition FW
- **/
-int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
- u8 rearrange_nvm,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aqc_nvm_update *cmd;
- struct i40e_aq_desc desc;
- int status;
-
- cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
-
- rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
- I40E_AQ_NVM_REARRANGE_TO_STRUCT);
-
- if (!rearrange_nvm) {
- status = -EINVAL;
- goto i40e_aq_rearrange_nvm_exit;
- }
-
- cmd->command_flags |= rearrange_nvm;
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-i40e_aq_rearrange_nvm_exit:
- return status;
-}
-
-/**
* i40e_aq_get_lldp_mib
* @hw: pointer to the hw struct
* @bridge_type: type of bridge requested
@@ -3232,11 +3004,9 @@ int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
u16 *local_len, u16 *remote_len,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_get_mib *cmd =
- (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
- struct i40e_aqc_lldp_get_mib *resp =
- (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+ struct i40e_aqc_lldp_get_mib *resp;
+ struct i40e_aqc_lldp_get_mib *cmd;
+ struct libie_aq_desc desc;
int status;
if (buff_size == 0 || !buff)
@@ -3244,16 +3014,18 @@ int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
/* Indirect Command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
cmd->type |= FIELD_PREP(I40E_AQ_LLDP_BRIDGE_TYPE_MASK, bridge_type);
desc.datalen = cpu_to_le16(buff_size);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
if (!status) {
@@ -3282,19 +3054,19 @@ i40e_aq_set_lldp_mib(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aqc_lldp_set_local_mib *cmd;
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
+ cmd = libie_aq_raw(&desc);
if (buff_size == 0 || !buff)
return -EINVAL;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_lldp_set_local_mib);
/* Indirect Command */
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(buff_size);
cmd->type = mib_type;
@@ -3319,13 +3091,13 @@ int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
bool enable_update,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_update_mib *cmd =
- (struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
+ struct i40e_aqc_lldp_update_mib *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
+ cmd = libie_aq_raw(&desc);
if (!enable_update)
cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
@@ -3335,44 +3107,6 @@ int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
}
/**
- * i40e_aq_restore_lldp
- * @hw: pointer to the hw struct
- * @setting: pointer to factory setting variable or NULL
- * @restore: True if factory settings should be restored
- * @cmd_details: pointer to command details structure or NULL
- *
- * Restore LLDP Agent factory settings if @restore set to True. In other case
- * only returns factory setting in AQ response.
- **/
-int
-i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_restore *cmd =
- (struct i40e_aqc_lldp_restore *)&desc.params.raw;
- int status;
-
- if (!test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) {
- i40e_debug(hw, I40E_DEBUG_ALL,
- "Restore LLDP not supported by current FW version.\n");
- return -ENODEV;
- }
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
-
- if (restore)
- cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- if (setting)
- *setting = cmd->command & 1;
-
- return status;
-}
-
-/**
* i40e_aq_stop_lldp
* @hw: pointer to the hw struct
* @shutdown_agent: True if LLDP Agent needs to be Shutdown
@@ -3385,13 +3119,13 @@ int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
bool persist,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_stop *cmd =
- (struct i40e_aqc_lldp_stop *)&desc.params.raw;
+ struct i40e_aqc_lldp_stop *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
+ cmd = libie_aq_raw(&desc);
if (shutdown_agent)
cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
@@ -3419,13 +3153,13 @@ int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_start *cmd =
- (struct i40e_aqc_lldp_start *)&desc.params.raw;
+ struct i40e_aqc_lldp_start *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
+ cmd = libie_aq_raw(&desc);
cmd->command = I40E_AQ_LLDP_AGENT_START;
if (persist) {
@@ -3452,9 +3186,8 @@ int
i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_dcb_parameters *cmd =
- (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
+ struct i40e_aqc_set_dcb_parameters *cmd;
+ struct libie_aq_desc desc;
int status;
if (!test_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, hw->caps))
@@ -3463,6 +3196,7 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_dcb_parameters);
+ cmd = libie_aq_raw(&desc);
if (dcb_enable) {
cmd->valid_flags = I40E_DCB_VALID;
cmd->command = I40E_AQ_DCB_SET_AGENT;
@@ -3485,7 +3219,7 @@ int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
void *buff, u16 buff_size,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
if (buff_size == 0 || !buff)
@@ -3493,7 +3227,7 @@ int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
cmd_details);
@@ -3517,15 +3251,15 @@ int i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
u8 *filter_index,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_udp_tunnel *cmd =
- (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
- struct i40e_aqc_del_udp_tunnel_completion *resp =
- (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
+ struct i40e_aqc_del_udp_tunnel_completion *resp;
+ struct i40e_aqc_add_udp_tunnel *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
cmd->udp_port = cpu_to_le16(udp_port);
cmd->protocol_type = protocol_index;
@@ -3546,13 +3280,13 @@ int i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_remove_udp_tunnel *cmd =
- (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
+ struct i40e_aqc_remove_udp_tunnel *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
+ cmd = libie_aq_raw(&desc);
cmd->index = index;
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -3571,9 +3305,8 @@ int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_switch_seid *cmd =
- (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ struct i40e_aqc_switch_seid *cmd;
+ struct libie_aq_desc desc;
int status;
if (seid == 0)
@@ -3581,6 +3314,7 @@ int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
+ cmd = libie_aq_raw(&desc);
cmd->seid = cpu_to_le16(seid);
status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
@@ -3601,7 +3335,7 @@ int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
int i40e_aq_dcb_updated(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
@@ -3627,9 +3361,8 @@ static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
enum i40e_admin_queue_opc opcode,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_tx_sched_ind *cmd =
- (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
+ struct i40e_aqc_tx_sched_ind *cmd;
+ struct libie_aq_desc desc;
int status;
bool cmd_param_flag = false;
@@ -3656,12 +3389,13 @@ static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
i40e_fill_default_direct_cmd_desc(&desc, opcode);
+ cmd = libie_aq_raw(&desc);
/* Indirect command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (cmd_param_flag)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD);
if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(buff_size);
@@ -3684,14 +3418,14 @@ int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
u16 seid, u16 credit, u8 max_credit,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_configure_vsi_bw_limit *cmd =
- (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
+ struct i40e_aqc_configure_vsi_bw_limit *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_configure_vsi_bw_limit);
+ cmd = libie_aq_raw(&desc);
cmd->vsi_seid = cpu_to_le16(seid);
cmd->credit = cpu_to_le16(credit);
cmd->max_credit = max_credit;
@@ -4019,18 +3753,16 @@ int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
struct i40e_control_filter_stats *stats,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_remove_control_packet_filter *cmd =
- (struct i40e_aqc_add_remove_control_packet_filter *)
- &desc.params.raw;
- struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
- (struct i40e_aqc_add_remove_control_packet_filter_completion *)
- &desc.params.raw;
+ struct i40e_aqc_add_remove_control_packet_filter_completion *resp;
+ struct i40e_aqc_add_remove_control_packet_filter *cmd;
+ struct libie_aq_desc desc;
int status;
if (vsi_seid == 0)
return -EINVAL;
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
if (is_add) {
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_control_packet_filter);
@@ -4098,15 +3830,15 @@ static int i40e_aq_alternate_read(struct i40e_hw *hw,
u32 reg_addr0, u32 *reg_val0,
u32 reg_addr1, u32 *reg_val1)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_alternate_write *cmd_resp =
- (struct i40e_aqc_alternate_write *)&desc.params.raw;
+ struct i40e_aqc_alternate_write *cmd_resp;
+ struct libie_aq_desc desc;
int status;
if (!reg_val0)
return -EINVAL;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
+ cmd_resp = libie_aq_raw(&desc);
cmd_resp->address0 = cpu_to_le32(reg_addr0);
cmd_resp->address1 = cpu_to_le32(reg_addr1);
@@ -4134,10 +3866,10 @@ int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aqc_tx_sched_ind *cmd;
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
+ cmd = libie_aq_raw(&desc);
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx);
cmd->vsi_seid = cpu_to_le16(seid);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -4155,7 +3887,7 @@ int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
int i40e_aq_resume_port_tx(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
@@ -4232,11 +3964,9 @@ int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
u8 *ret_next_table, u32 *ret_next_index,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_debug_dump_internals *cmd =
- (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
- struct i40e_aqc_debug_dump_internals *resp =
- (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+ struct i40e_aqc_debug_dump_internals *resp;
+ struct i40e_aqc_debug_dump_internals *cmd;
+ struct libie_aq_desc desc;
int status;
if (buff_size == 0 || !buff)
@@ -4244,10 +3974,12 @@ int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_debug_dump_internals);
+ resp = libie_aq_raw(&desc);
+ cmd = libie_aq_raw(&desc);
/* Indirect Command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
cmd->cluster_id = cluster_id;
cmd->table_id = table_id;
@@ -4324,18 +4056,18 @@ i40e_aq_configure_partition_bw(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
u16 bwd_size = sizeof(*bw_data);
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_configure_partition_bw);
/* Indirect command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD);
if (bwd_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(bwd_size);
@@ -4570,84 +4302,6 @@ phy_write_end:
}
/**
- * i40e_write_phy_register
- * @hw: pointer to the HW structure
- * @page: registers page number
- * @reg: register address in the page
- * @phy_addr: PHY address on MDIO interface
- * @value: PHY register value
- *
- * Writes value to specified PHY register
- **/
-int i40e_write_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 value)
-{
- int status;
-
- switch (hw->device_id) {
- case I40E_DEV_ID_1G_BASE_T_X722:
- status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
- value);
- break;
- case I40E_DEV_ID_1G_BASE_T_BC:
- case I40E_DEV_ID_5G_BASE_T_BC:
- case I40E_DEV_ID_10G_BASE_T:
- case I40E_DEV_ID_10G_BASE_T4:
- case I40E_DEV_ID_10G_BASE_T_BC:
- case I40E_DEV_ID_10G_BASE_T_X722:
- case I40E_DEV_ID_25G_B:
- case I40E_DEV_ID_25G_SFP28:
- status = i40e_write_phy_register_clause45(hw, page, reg,
- phy_addr, value);
- break;
- default:
- status = -EIO;
- break;
- }
-
- return status;
-}
-
-/**
- * i40e_read_phy_register
- * @hw: pointer to the HW structure
- * @page: registers page number
- * @reg: register address in the page
- * @phy_addr: PHY address on MDIO interface
- * @value: PHY register value
- *
- * Reads specified PHY register value
- **/
-int i40e_read_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 *value)
-{
- int status;
-
- switch (hw->device_id) {
- case I40E_DEV_ID_1G_BASE_T_X722:
- status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
- value);
- break;
- case I40E_DEV_ID_1G_BASE_T_BC:
- case I40E_DEV_ID_5G_BASE_T_BC:
- case I40E_DEV_ID_10G_BASE_T:
- case I40E_DEV_ID_10G_BASE_T4:
- case I40E_DEV_ID_10G_BASE_T_BC:
- case I40E_DEV_ID_10G_BASE_T_X722:
- case I40E_DEV_ID_25G_B:
- case I40E_DEV_ID_25G_SFP28:
- status = i40e_read_phy_register_clause45(hw, page, reg,
- phy_addr, value);
- break;
- default:
- status = -EIO;
- break;
- }
-
- return status;
-}
-
-/**
* i40e_get_phy_address
* @hw: pointer to the HW structure
* @dev_num: PHY port num that address we want
@@ -4663,80 +4317,6 @@ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
}
/**
- * i40e_blink_phy_link_led
- * @hw: pointer to the HW structure
- * @time: time how long led will blinks in secs
- * @interval: gap between LED on and off in msecs
- *
- * Blinks PHY link LED
- **/
-int i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval)
-{
- u16 led_addr = I40E_PHY_LED_PROV_REG_1;
- u16 gpio_led_port;
- u8 phy_addr = 0;
- int status = 0;
- u16 led_ctl;
- u8 port_num;
- u16 led_reg;
- u32 i;
-
- i = rd32(hw, I40E_PFGEN_PORTNUM);
- port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
- phy_addr = i40e_get_phy_address(hw, port_num);
-
- for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
- led_addr++) {
- status = i40e_read_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- &led_reg);
- if (status)
- goto phy_blinking_end;
- led_ctl = led_reg;
- if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
- led_reg = 0;
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- led_reg);
- if (status)
- goto phy_blinking_end;
- break;
- }
- }
-
- if (time > 0 && interval > 0) {
- for (i = 0; i < time * 1000; i += interval) {
- status = i40e_read_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
- if (status)
- goto restore_config;
- if (led_reg & I40E_PHY_LED_MANUAL_ON)
- led_reg = 0;
- else
- led_reg = I40E_PHY_LED_MANUAL_ON;
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_reg);
- if (status)
- goto restore_config;
- msleep(interval);
- }
- }
-
-restore_config:
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_ctl);
-
-phy_blinking_end:
- return status;
-}
-
-/**
* i40e_led_get_reg - read LED register
* @hw: pointer to the HW structure
* @led_addr: LED register address
@@ -4919,9 +4499,8 @@ int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
u32 reg_addr, u32 *reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
- (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+ struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp;
+ struct libie_aq_desc desc;
int status;
if (!reg_val)
@@ -4929,6 +4508,7 @@ int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
+ cmd_resp = libie_aq_raw(&desc);
cmd_resp->address = cpu_to_le32(reg_addr);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -4957,7 +4537,7 @@ u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
if (!use_register) {
do_retry:
status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
- if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
+ if (hw->aq.asq_last_status == LIBIE_AQ_RC_EAGAIN && retry) {
usleep_range(1000, 2000);
retry--;
goto do_retry;
@@ -4985,13 +4565,13 @@ int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_rx_ctl_reg_read_write *cmd =
- (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+ struct i40e_aqc_rx_ctl_reg_read_write *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
+ cmd = libie_aq_raw(&desc);
cmd->address = cpu_to_le32(reg_addr);
cmd->value = cpu_to_le32(reg_val);
@@ -5019,7 +4599,7 @@ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
do_retry:
status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
reg_val, NULL);
- if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
+ if (hw->aq.asq_last_status == LIBIE_AQ_RC_EAGAIN && retry) {
usleep_range(1000, 2000);
retry--;
goto do_retry;
@@ -5078,14 +4658,14 @@ int i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_phy_register_access *cmd =
- (struct i40e_aqc_phy_register_access *)&desc.params.raw;
+ struct i40e_aqc_phy_register_access *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_register);
+ cmd = libie_aq_raw(&desc);
cmd->phy_interface = phy_select;
cmd->dev_address = dev_addr;
cmd->reg_address = cpu_to_le32(reg_addr);
@@ -5123,14 +4703,14 @@ int i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
u32 reg_addr, u32 *reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_phy_register_access *cmd =
- (struct i40e_aqc_phy_register_access *)&desc.params.raw;
+ struct i40e_aqc_phy_register_access *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_phy_register);
+ cmd = libie_aq_raw(&desc);
cmd->phy_interface = phy_select;
cmd->dev_address = dev_addr;
cmd->reg_address = cpu_to_le32(reg_addr);
@@ -5162,19 +4742,18 @@ int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
u32 *error_offset, u32 *error_info,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_write_personalization_profile *cmd =
- (struct i40e_aqc_write_personalization_profile *)
- &desc.params.raw;
+ struct i40e_aqc_write_personalization_profile *cmd;
struct i40e_aqc_write_ddp_resp *resp;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_write_personalization_profile);
- desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
+ cmd = libie_aq_raw(&desc);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD);
if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(buff_size);
@@ -5182,7 +4761,7 @@ int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
if (!status) {
- resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
+ resp = libie_aq_raw(&desc);
if (error_offset)
*error_offset = le32_to_cpu(resp->error_offset);
if (error_info)
@@ -5204,17 +4783,17 @@ int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
u16 buff_size, u8 flags,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_applied_profiles *cmd =
- (struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
+ struct i40e_aqc_get_applied_profiles *cmd;
+ struct libie_aq_desc desc;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_personalization_profile_list);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ cmd = libie_aq_raw(&desc);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(buff_size);
cmd->flags = flags;
@@ -5269,39 +4848,6 @@ i40e_find_segment_in_package(u32 segment_type,
(struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
/**
- * i40e_find_section_in_profile
- * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
- * @profile: pointer to the i40e segment header to be searched
- *
- * This function searches i40e segment for a particular section type. On
- * success it returns a pointer to the section header, otherwise it will
- * return NULL.
- **/
-struct i40e_profile_section_header *
-i40e_find_section_in_profile(u32 section_type,
- struct i40e_profile_segment *profile)
-{
- struct i40e_profile_section_header *sec;
- struct i40e_section_table *sec_tbl;
- u32 sec_off;
- u32 i;
-
- if (profile->header.type != SEGMENT_TYPE_I40E)
- return NULL;
-
- I40E_SECTION_TABLE(profile, sec_tbl);
-
- for (i = 0; i < sec_tbl->section_count; i++) {
- sec_off = sec_tbl->section_offset[i];
- sec = I40E_SECTION_HEADER(profile, sec_off);
- if (sec->section.type == section_type)
- return sec;
- }
-
- return NULL;
-}
-
-/**
* i40e_ddp_exec_aq_section - Execute generic AQ for DDP
* @hw: pointer to the hw struct
* @aq: command buffer containing all data to execute AQ
@@ -5309,7 +4855,7 @@ i40e_find_section_in_profile(u32 section_type,
static int i40e_ddp_exec_aq_section(struct i40e_hw *hw,
struct i40e_profile_aq_section *aq)
{
- struct i40e_aq_desc desc;
+ struct libie_aq_desc desc;
u8 *msg = NULL;
u16 msglen;
int status;
@@ -5320,10 +4866,10 @@ static int i40e_ddp_exec_aq_section(struct i40e_hw *hw,
msglen = aq->datalen;
if (msglen) {
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
- I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF |
+ LIBIE_AQ_FLAG_RD));
if (msglen > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(msglen);
msg = &aq->data[0];
}
@@ -5524,45 +5070,6 @@ i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
}
/**
- * i40e_add_pinfo_to_list
- * @hw: pointer to the hardware structure
- * @profile: pointer to the profile segment of the package
- * @profile_info_sec: buffer for information section
- * @track_id: package tracking id
- *
- * Register a profile to the list of loaded profiles.
- */
-int
-i40e_add_pinfo_to_list(struct i40e_hw *hw,
- struct i40e_profile_segment *profile,
- u8 *profile_info_sec, u32 track_id)
-{
- struct i40e_profile_section_header *sec = NULL;
- struct i40e_profile_info *pinfo;
- u32 offset = 0, info = 0;
- int status = 0;
-
- sec = (struct i40e_profile_section_header *)profile_info_sec;
- sec->tbl_size = 1;
- sec->data_end = sizeof(struct i40e_profile_section_header) +
- sizeof(struct i40e_profile_info);
- sec->section.type = SECTION_TYPE_INFO;
- sec->section.offset = sizeof(struct i40e_profile_section_header);
- sec->section.size = sizeof(struct i40e_profile_info);
- pinfo = (struct i40e_profile_info *)(profile_info_sec +
- sec->section.offset);
- pinfo->track_id = track_id;
- pinfo->version = profile->version;
- pinfo->op = I40E_DDP_ADD_TRACKID;
- memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
-
- status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
- track_id, &offset, &info, NULL);
-
- return status;
-}
-
-/**
* i40e_aq_add_cloud_filters
* @hw: pointer to the hardware structure
* @seid: VSI seid to add cloud filters from
@@ -5579,18 +5086,18 @@ i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_remove_cloud_filters *cmd =
- (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ struct i40e_aqc_add_remove_cloud_filters *cmd;
+ struct libie_aq_desc desc;
u16 buff_len;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_cloud_filters);
+ cmd = libie_aq_raw(&desc);
buff_len = filter_count * sizeof(*filters);
desc.datalen = cpu_to_le16(buff_len);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
cmd->num_filters = filter_count;
cmd->seid = cpu_to_le16(seid);
@@ -5616,9 +5123,8 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_bb *filters,
u8 filter_count)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_remove_cloud_filters *cmd =
- (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ struct i40e_aqc_add_remove_cloud_filters *cmd;
+ struct libie_aq_desc desc;
u16 buff_len;
int status;
int i;
@@ -5626,9 +5132,10 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_cloud_filters);
+ cmd = libie_aq_raw(&desc);
buff_len = filter_count * sizeof(*filters);
desc.datalen = cpu_to_le16(buff_len);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
cmd->num_filters = filter_count;
cmd->seid = cpu_to_le16(seid);
cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
@@ -5672,18 +5179,18 @@ i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_remove_cloud_filters *cmd =
- (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ struct i40e_aqc_add_remove_cloud_filters *cmd;
+ struct libie_aq_desc desc;
u16 buff_len;
int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_remove_cloud_filters);
+ cmd = libie_aq_raw(&desc);
buff_len = filter_count * sizeof(*filters);
desc.datalen = cpu_to_le16(buff_len);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
cmd->num_filters = filter_count;
cmd->seid = cpu_to_le16(seid);
@@ -5709,9 +5216,8 @@ i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_bb *filters,
u8 filter_count)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_remove_cloud_filters *cmd =
- (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ struct i40e_aqc_add_remove_cloud_filters *cmd;
+ struct libie_aq_desc desc;
u16 buff_len;
int status;
int i;
@@ -5719,9 +5225,10 @@ i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_remove_cloud_filters);
+ cmd = libie_aq_raw(&desc);
buff_len = filter_count * sizeof(*filters);
desc.datalen = cpu_to_le16(buff_len);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_RD));
cmd->num_filters = filter_count;
cmd->seid = cpu_to_le16(seid);
cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 8db1eb0c1768..9e0c9597aeb9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -750,7 +750,7 @@ static int i40e_get_ieee_dcb_config(struct i40e_hw *hw)
I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
&hw->remote_dcbx_config);
/* Don't treat ENOENT as an error for Remote MIBs */
- if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ if (hw->aq.asq_last_status == LIBIE_AQ_RC_ENOENT)
ret = 0;
out:
@@ -799,7 +799,7 @@ int i40e_get_dcb_config(struct i40e_hw *hw)
}
/* CEE mode not enabled try querying IEEE data */
- if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ if (hw->aq.asq_last_status == LIBIE_AQ_RC_ENOENT)
return i40e_get_ieee_dcb_config(hw);
if (ret)
@@ -816,7 +816,7 @@ int i40e_get_dcb_config(struct i40e_hw *hw)
I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
&hw->remote_dcbx_config);
/* Don't treat ENOENT as an error for Remote MIBs */
- if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ if (hw->aq.asq_last_status == LIBIE_AQ_RC_ENOENT)
ret = 0;
out:
@@ -925,11 +925,11 @@ i40e_get_fw_lldp_status(struct i40e_hw *hw,
if (!ret) {
*lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED;
- } else if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) {
+ } else if (hw->aq.asq_last_status == LIBIE_AQ_RC_ENOENT) {
/* MIB is not available yet but the agent is running */
*lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED;
ret = 0;
- } else if (hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ } else if (hw->aq.asq_last_status == LIBIE_AQ_RC_EPERM) {
*lldp_status = I40E_GET_FW_LLDP_STATUS_DISABLED;
ret = 0;
}
@@ -1491,19 +1491,6 @@ void i40e_dcb_hw_set_num_tc(struct i40e_hw *hw, u8 num_tc)
}
/**
- * i40e_dcb_hw_get_num_tc
- * @hw: pointer to the hw struct
- *
- * Returns number of traffic classes configured in HW
- **/
-u8 i40e_dcb_hw_get_num_tc(struct i40e_hw *hw)
-{
- u32 reg = rd32(hw, I40E_PRTDCB_GENC);
-
- return FIELD_GET(I40E_PRTDCB_GENC_NUMTC_MASK, reg);
-}
-
-/**
* i40e_dcb_hw_rx_ets_bw_config
* @hw: pointer to the hw struct
* @bw_share: Bandwidth share indexed per traffic class
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index d76497566e40..d5662c639c41 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -253,7 +253,6 @@ void i40e_dcb_hw_rx_cmd_monitor_config(struct i40e_hw *hw,
void i40e_dcb_hw_pfc_config(struct i40e_hw *hw,
u8 pfc_en, u8 *prio_tc);
void i40e_dcb_hw_set_num_tc(struct i40e_hw *hw, u8 num_tc);
-u8 i40e_dcb_hw_get_num_tc(struct i40e_hw *hw);
void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share,
u8 *mode, u8 *prio_type);
void i40e_dcb_hw_rx_up2tc_config(struct i40e_hw *hw, u8 *prio_tc);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 8aa43aefe84c..a2ccf4c5e30b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -136,7 +136,7 @@ static int i40e_dcbnl_ieee_setets(struct net_device *netdev,
dev_info(&pf->pdev->dev,
"Failed setting DCB ETS configuration err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -175,7 +175,7 @@ static int i40e_dcbnl_ieee_setpfc(struct net_device *netdev,
dev_info(&pf->pdev->dev,
"Failed setting DCB PFC configuration err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -226,7 +226,7 @@ static int i40e_dcbnl_ieee_setapp(struct net_device *netdev,
dev_info(&pf->pdev->dev,
"Failed setting DCB configuration err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -291,7 +291,7 @@ static int i40e_dcbnl_ieee_delapp(struct net_device *netdev,
dev_info(&pf->pdev->dev,
"Failed setting DCB configuration err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -EINVAL;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index abf624d770e6..c17b5d290f0a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -40,48 +40,6 @@ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
* setup, adding or removing filters, or other things. Many of
* these will be useful for some forms of unit testing.
**************************************************************/
-static char i40e_dbg_command_buf[256] = "";
-
-/**
- * i40e_dbg_command_read - read for command datum
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- **/
-static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct i40e_pf *pf = filp->private_data;
- struct i40e_vsi *main_vsi;
- int bytes_not_copied;
- int buf_size = 256;
- char *buf;
- int len;
-
- /* don't allow partial reads */
- if (*ppos != 0)
- return 0;
- if (count < buf_size)
- return -ENOSPC;
-
- buf = kzalloc(buf_size, GFP_KERNEL);
- if (!buf)
- return -ENOSPC;
-
- main_vsi = i40e_pf_get_main_vsi(pf);
- len = snprintf(buf, buf_size, "%s: %s\n", main_vsi->netdev->name,
- i40e_dbg_command_buf);
-
- bytes_not_copied = copy_to_user(buffer, buf, len);
- kfree(buf);
-
- if (bytes_not_copied)
- return -EFAULT;
-
- *ppos = len;
- return len;
-}
static char *i40e_filter_state_string[] = {
"INVALID",
@@ -89,6 +47,7 @@ static char *i40e_filter_state_string[] = {
"ACTIVE",
"FAILED",
"REMOVE",
+ "NEW_SYNC",
};
/**
@@ -488,7 +447,7 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n");
ring = &(hw->aq.asq);
for (i = 0; i < ring->count; i++) {
- struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+ struct libie_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
dev_info(&pf->pdev->dev,
" at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
@@ -501,7 +460,7 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n");
ring = &(hw->aq.arq);
for (i = 0; i < ring->count; i++) {
- struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+ struct libie_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
dev_info(&pf->pdev->dev,
" ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
@@ -721,7 +680,7 @@ static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id)
dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n",
vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs);
dev_info(&pf->pdev->dev, " num MDD=%lld\n",
- vf->num_mdd_events);
+ vf->mdd_tx_events.count + vf->mdd_rx_events.count);
} else {
dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id);
}
@@ -1267,10 +1226,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n");
}
} else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
- struct i40e_aq_desc *desc;
+ struct libie_aq_desc *desc;
int ret;
- desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc)
goto command_write_done;
cnt = sscanf(&cmd_buf[11],
@@ -1278,10 +1237,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
&desc->flags,
&desc->opcode, &desc->datalen, &desc->retval,
&desc->cookie_high, &desc->cookie_low,
- &desc->params.internal.param0,
- &desc->params.internal.param1,
- &desc->params.internal.param2,
- &desc->params.internal.param3);
+ &desc->params.generic.param0,
+ &desc->params.generic.param1,
+ &desc->params.generic.addr_high,
+ &desc->params.generic.addr_low);
if (cnt != 10) {
dev_info(&pf->pdev->dev,
"send aq_cmd: bad command string, cnt=%d\n",
@@ -1306,19 +1265,19 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
"AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
desc->flags, desc->opcode, desc->datalen, desc->retval,
desc->cookie_high, desc->cookie_low,
- desc->params.internal.param0,
- desc->params.internal.param1,
- desc->params.internal.param2,
- desc->params.internal.param3);
+ desc->params.generic.param0,
+ desc->params.generic.param1,
+ desc->params.generic.addr_high,
+ desc->params.generic.addr_low);
kfree(desc);
desc = NULL;
} else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) {
- struct i40e_aq_desc *desc;
+ struct libie_aq_desc *desc;
u16 buffer_len;
u8 *buff;
int ret;
- desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc)
goto command_write_done;
cnt = sscanf(&cmd_buf[20],
@@ -1326,10 +1285,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
&desc->flags,
&desc->opcode, &desc->datalen, &desc->retval,
&desc->cookie_high, &desc->cookie_low,
- &desc->params.internal.param0,
- &desc->params.internal.param1,
- &desc->params.internal.param2,
- &desc->params.internal.param3,
+ &desc->params.generic.param0,
+ &desc->params.generic.param1,
+ &desc->params.generic.addr_high,
+ &desc->params.generic.addr_low,
&buffer_len);
if (cnt != 11) {
dev_info(&pf->pdev->dev,
@@ -1349,7 +1308,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
desc = NULL;
goto command_write_done;
}
- desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc->flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
ret = i40e_asq_send_command(&pf->hw, desc, buff,
buffer_len, NULL);
if (!ret) {
@@ -1367,10 +1326,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
"AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
desc->flags, desc->opcode, desc->datalen, desc->retval,
desc->cookie_high, desc->cookie_low,
- desc->params.internal.param0,
- desc->params.internal.param1,
- desc->params.internal.param2,
- desc->params.internal.param3);
+ desc->params.generic.param0,
+ desc->params.generic.param1,
+ desc->params.generic.addr_high,
+ desc->params.generic.addr_low);
print_hex_dump(KERN_INFO, "AQ buffer WB: ",
DUMP_PREFIX_OFFSET, 16, 1,
buff, buffer_len, true);
@@ -1620,7 +1579,6 @@ command_write_done:
static const struct file_operations i40e_dbg_command_fops = {
.owner = THIS_MODULE,
.open = simple_open,
- .read = i40e_dbg_command_read,
.write = i40e_dbg_command_write,
};
@@ -1629,48 +1587,6 @@ static const struct file_operations i40e_dbg_command_fops = {
* The netdev_ops entry in debugfs is for giving the driver commands
* to be executed from the netdev operations.
**************************************************************/
-static char i40e_dbg_netdev_ops_buf[256] = "";
-
-/**
- * i40e_dbg_netdev_ops_read - read for netdev_ops datum
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- **/
-static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct i40e_pf *pf = filp->private_data;
- struct i40e_vsi *main_vsi;
- int bytes_not_copied;
- int buf_size = 256;
- char *buf;
- int len;
-
- /* don't allow partal reads */
- if (*ppos != 0)
- return 0;
- if (count < buf_size)
- return -ENOSPC;
-
- buf = kzalloc(buf_size, GFP_KERNEL);
- if (!buf)
- return -ENOSPC;
-
- main_vsi = i40e_pf_get_main_vsi(pf);
- len = snprintf(buf, buf_size, "%s: %s\n", main_vsi->netdev->name,
- i40e_dbg_netdev_ops_buf);
-
- bytes_not_copied = copy_to_user(buffer, buf, len);
- kfree(buf);
-
- if (bytes_not_copied)
- return -EFAULT;
-
- *ppos = len;
- return len;
-}
/**
* i40e_dbg_netdev_ops_write - write into netdev_ops datum
@@ -1684,35 +1600,36 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct i40e_pf *pf = filp->private_data;
+ char *cmd_buf, *buf_tmp;
int bytes_not_copied;
struct i40e_vsi *vsi;
- char *buf_tmp;
int vsi_seid;
int i, cnt;
/* don't allow partial writes */
if (*ppos != 0)
return 0;
- if (count >= sizeof(i40e_dbg_netdev_ops_buf))
- return -ENOSPC;
- memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf));
- bytes_not_copied = copy_from_user(i40e_dbg_netdev_ops_buf,
- buffer, count);
- if (bytes_not_copied)
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return count;
+ bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
+ if (bytes_not_copied) {
+ kfree(cmd_buf);
return -EFAULT;
- i40e_dbg_netdev_ops_buf[count] = '\0';
+ }
+ cmd_buf[count] = '\0';
- buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n');
+ buf_tmp = strchr(cmd_buf, '\n');
if (buf_tmp) {
*buf_tmp = '\0';
- count = buf_tmp - i40e_dbg_netdev_ops_buf + 1;
+ count = buf_tmp - cmd_buf + 1;
}
- if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
+ if (strncmp(cmd_buf, "change_mtu", 10) == 0) {
int mtu;
- cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
+ cnt = sscanf(&cmd_buf[11], "%i %i",
&vsi_seid, &mtu);
if (cnt != 2) {
dev_info(&pf->pdev->dev, "change_mtu <vsi_seid> <mtu>\n");
@@ -1734,8 +1651,8 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
}
- } else if (strncmp(i40e_dbg_netdev_ops_buf, "set_rx_mode", 11) == 0) {
- cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
+ } else if (strncmp(cmd_buf, "set_rx_mode", 11) == 0) {
+ cnt = sscanf(&cmd_buf[11], "%i", &vsi_seid);
if (cnt != 1) {
dev_info(&pf->pdev->dev, "set_rx_mode <vsi_seid>\n");
goto netdev_ops_write_done;
@@ -1755,8 +1672,8 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
}
- } else if (strncmp(i40e_dbg_netdev_ops_buf, "napi", 4) == 0) {
- cnt = sscanf(&i40e_dbg_netdev_ops_buf[4], "%i", &vsi_seid);
+ } else if (strncmp(cmd_buf, "napi", 4) == 0) {
+ cnt = sscanf(&cmd_buf[4], "%i", &vsi_seid);
if (cnt != 1) {
dev_info(&pf->pdev->dev, "napi <vsi_seid>\n");
goto netdev_ops_write_done;
@@ -1774,21 +1691,20 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
dev_info(&pf->pdev->dev, "napi called\n");
}
} else {
- dev_info(&pf->pdev->dev, "unknown command '%s'\n",
- i40e_dbg_netdev_ops_buf);
+ dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf);
dev_info(&pf->pdev->dev, "available commands\n");
dev_info(&pf->pdev->dev, " change_mtu <vsi_seid> <mtu>\n");
dev_info(&pf->pdev->dev, " set_rx_mode <vsi_seid>\n");
dev_info(&pf->pdev->dev, " napi <vsi_seid>\n");
}
netdev_ops_write_done:
+ kfree(cmd_buf);
return count;
}
static const struct file_operations i40e_dbg_netdev_ops_fops = {
.owner = THIS_MODULE,
.open = simple_open,
- .read = i40e_dbg_netdev_ops_read,
.write = i40e_dbg_netdev_ops_write,
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devlink.c b/drivers/net/ethernet/intel/i40e/i40e_devlink.c
index cc4e9e2addb7..229179ccc131 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devlink.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_devlink.c
@@ -5,6 +5,42 @@
#include "i40e.h"
#include "i40e_devlink.h"
+static int i40e_max_mac_per_vf_set(struct devlink *devlink,
+ u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct i40e_pf *pf = devlink_priv(devlink);
+
+ if (pf->num_alloc_vfs > 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot change max_mac_per_vf while SR-IOV is enabled");
+ return -EBUSY;
+ }
+
+ pf->max_mac_per_vf = ctx->val.vu32;
+ return 0;
+}
+
+static int i40e_max_mac_per_vf_get(struct devlink *devlink,
+ u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct i40e_pf *pf = devlink_priv(devlink);
+
+ ctx->val.vu32 = pf->max_mac_per_vf;
+ return 0;
+}
+
+static const struct devlink_param i40e_dl_params[] = {
+ DEVLINK_PARAM_GENERIC(MAX_MAC_PER_VF,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ i40e_max_mac_per_vf_get,
+ i40e_max_mac_per_vf_set,
+ NULL),
+};
+
static void i40e_info_get_dsn(struct i40e_pf *pf, char *buf, size_t len)
{
u8 dsn[8];
@@ -165,7 +201,18 @@ void i40e_free_pf(struct i40e_pf *pf)
**/
void i40e_devlink_register(struct i40e_pf *pf)
{
- devlink_register(priv_to_devlink(pf));
+ struct devlink *dl = priv_to_devlink(pf);
+ struct device *dev = &pf->pdev->dev;
+ int err;
+
+ err = devlink_params_register(dl, i40e_dl_params,
+ ARRAY_SIZE(i40e_dl_params));
+ if (err)
+ dev_err(dev,
+ "devlink params register failed with error %d", err);
+
+ devlink_register(dl);
+
}
/**
@@ -176,7 +223,11 @@ void i40e_devlink_register(struct i40e_pf *pf)
**/
void i40e_devlink_unregister(struct i40e_pf *pf)
{
- devlink_unregister(priv_to_devlink(pf));
+ struct devlink *dl = priv_to_devlink(pf);
+
+ devlink_unregister(dl);
+ devlink_params_unregister(dl, i40e_dl_params,
+ ARRAY_SIZE(i40e_dl_params));
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index f2506511bbff..f2c2646ea298 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -3,6 +3,7 @@
/* ethtool support for i40e */
+#include <linux/net/intel/libie/pctype.h>
#include "i40e_devids.h"
#include "i40e_diag.h"
#include "i40e_txrx_common.h"
@@ -459,6 +460,8 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
I40E_PRIV_FLAG("base-r-fec", I40E_FLAG_BASE_R_FEC, 0),
I40E_PRIV_FLAG("vf-vlan-pruning",
I40E_FLAG_VF_VLAN_PRUNING_ENA, 0),
+ I40E_PRIV_FLAG("mdd-auto-reset-vf",
+ I40E_FLAG_MDD_AUTO_RESET_VF, 0),
};
#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
@@ -1459,7 +1462,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
netdev_info(netdev,
"Set phy config failed, err %pe aq_err %s\n",
ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
err = -EAGAIN;
goto done;
}
@@ -1469,7 +1472,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
netdev_dbg(netdev,
"Updating link info failed with err %pe aq_err %s\n",
ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
} else {
netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
@@ -1517,7 +1520,7 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
netdev_info(netdev,
"Set phy config failed, err %pe aq_err %s\n",
ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
err = -EAGAIN;
goto done;
}
@@ -1531,7 +1534,7 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
netdev_dbg(netdev,
"Updating link info failed with err %pe aq_err %s\n",
ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
}
done:
@@ -1638,7 +1641,7 @@ static int i40e_nway_reset(struct net_device *netdev)
if (ret) {
netdev_info(netdev, "link restart failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
return -EIO;
}
@@ -1755,19 +1758,19 @@ static int i40e_set_pauseparam(struct net_device *netdev,
if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %pe aq_err %s\n",
ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
err = -EAGAIN;
}
if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
netdev_info(netdev, "Set fc failed on the set_phy_config call with err %pe aq_err %s\n",
ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
err = -EAGAIN;
}
if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
netdev_info(netdev, "Set fc failed on the get_link_info call with err %pe aq_err %s\n",
ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
err = -EAGAIN;
}
@@ -1915,13 +1918,13 @@ static int i40e_get_eeprom(struct net_device *netdev,
ret_val = i40e_aq_read_nvm(hw, 0x0, offset, len,
(u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
last, NULL);
- if (ret_val && hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ if (ret_val && hw->aq.asq_last_status == LIBIE_AQ_RC_EPERM) {
dev_info(&pf->pdev->dev,
"read NVM failed, invalid offset 0x%x\n",
offset);
break;
} else if (ret_val &&
- hw->aq.asq_last_status == I40E_AQ_RC_EACCES) {
+ hw->aq.asq_last_status == LIBIE_AQ_RC_EACCES) {
dev_info(&pf->pdev->dev,
"read NVM failed, access, offset 0x%x\n",
offset);
@@ -2747,6 +2750,15 @@ skip_ol_tests:
netif_info(pf, drv, netdev, "testing failed\n");
}
+static void i40e_get_link_ext_stats(struct net_device *netdev,
+ struct ethtool_link_ext_stats *stats)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ stats->link_down_events = pf->link_down_events;
+}
+
static void i40e_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
@@ -3127,15 +3139,12 @@ static int i40e_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
return __i40e_set_coalesce(netdev, ec, queue);
}
-/**
- * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
- * @pf: pointer to the physical function struct
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the flow is supported, else Invalid Input.
- **/
-static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
+static int i40e_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
u8 flow_pctype = 0;
u64 i_set = 0;
@@ -3144,16 +3153,16 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
switch (cmd->flow_type) {
case TCP_V4_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ flow_pctype = LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP;
break;
case UDP_V4_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ flow_pctype = LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP;
break;
case TCP_V6_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ flow_pctype = LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP;
break;
case UDP_V6_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ flow_pctype = LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP;
break;
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
@@ -3410,28 +3419,28 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
switch (rule->flow_type) {
case SCTP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP;
break;
case TCP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP;
break;
case UDP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP;
break;
case SCTP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP;
break;
case TCP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP;
break;
case UDP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP;
break;
case IP_USER_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER;
break;
case IPV6_USER_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER;
break;
default:
/* If we have stored a filter with a flow type not listed here
@@ -3513,6 +3522,20 @@ no_input_set:
}
/**
+ * i40e_get_rx_ring_count - get RX ring count
+ * @netdev: network interface device structure
+ *
+ * Return: number of RX rings.
+ **/
+static u32 i40e_get_rx_ring_count(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ return vsi->rss_size;
+}
+
+/**
* i40e_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -3529,13 +3552,6 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = vsi->rss_size;
- ret = 0;
- break;
- case ETHTOOL_GRXFH:
- ret = i40e_get_rss_hash_opts(pf, cmd);
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = pf->fdir_pf_active_filters;
/* report total rule count */
@@ -3564,7 +3580,7 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
* Returns value of bits to be set per user request
**/
static u64 i40e_get_rss_hash_bits(struct i40e_hw *hw,
- struct ethtool_rxnfc *nfc,
+ const struct ethtool_rxfh_fields *nfc,
u64 i_setc)
{
u64 i_set = i_setc;
@@ -3609,15 +3625,13 @@ static u64 i40e_get_rss_hash_bits(struct i40e_hw *hw,
}
#define FLOW_PCTYPES_SIZE 64
-/**
- * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
- * @pf: pointer to the physical function struct
- * @nfc: ethtool rxnfc command
- *
- * Returns Success if the flow input set is supported.
- **/
-static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
+static int i40e_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
@@ -3641,40 +3655,40 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
switch (nfc->flow_type) {
case TCP_V4_FLOW:
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes);
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes);
if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
pf->hw.caps))
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK,
flow_pctypes);
break;
case TCP_V6_FLOW:
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes);
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes);
if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
pf->hw.caps))
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK,
flow_pctypes);
break;
case UDP_V4_FLOW:
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes);
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes);
if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
pf->hw.caps)) {
- set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP,
flow_pctypes);
- set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP,
flow_pctypes);
}
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4);
break;
case UDP_V6_FLOW:
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes);
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes);
if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
pf->hw.caps)) {
- set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP,
flow_pctypes);
- set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP,
flow_pctypes);
}
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6);
break;
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -3683,7 +3697,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
if ((nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3))
return -EINVAL;
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER);
break;
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
@@ -3692,15 +3706,15 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
if ((nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3))
return -EINVAL;
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER);
break;
case IPV4_FLOW:
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4);
break;
case IPV6_FLOW:
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6);
break;
default:
return -EINVAL;
@@ -4310,36 +4324,36 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
switch (fsp->flow_type & ~FLOW_EXT) {
case SCTP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP;
fdir_filter_count = &pf->fd_sctp4_filter_cnt;
break;
case TCP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP;
fdir_filter_count = &pf->fd_tcp4_filter_cnt;
break;
case UDP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP;
fdir_filter_count = &pf->fd_udp4_filter_cnt;
break;
case SCTP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP;
fdir_filter_count = &pf->fd_sctp6_filter_cnt;
break;
case TCP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP;
fdir_filter_count = &pf->fd_tcp6_filter_cnt;
break;
case UDP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP;
fdir_filter_count = &pf->fd_udp6_filter_cnt;
break;
case IP_USER_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER;
fdir_filter_count = &pf->fd_ip4_filter_cnt;
flex_l3 = true;
break;
case IPV6_USER_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER;
fdir_filter_count = &pf->fd_ip6_filter_cnt;
flex_l3 = true;
break;
@@ -4675,8 +4689,8 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
* separate support, we'll always assume and enforce that the two flow
* types must have matching input sets.
*/
- if (index == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER)
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
+ if (index == LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER)
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_FRAG_IPV4,
new_mask);
/* Add the new offset and update table, if necessary */
@@ -4952,13 +4966,9 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
- struct i40e_pf *pf = vsi->back;
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = i40e_set_rss_hash_opt(pf, cmd);
- break;
case ETHTOOL_SRXCLSRLINS:
ret = i40e_add_fdir_ethtool(vsi, cmd);
break;
@@ -5249,9 +5259,9 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
DECLARE_BITMAP(orig_flags, I40E_PF_FLAGS_NBITS);
DECLARE_BITMAP(new_flags, I40E_PF_FLAGS_NBITS);
struct i40e_netdev_priv *np = netdev_priv(dev);
- enum i40e_admin_queue_err adq_err;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ enum libie_aq_err adq_err;
u32 reset_needed = 0;
int status;
u32 i, j;
@@ -5299,7 +5309,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
}
flags_complete:
- bitmap_xor(changed_flags, pf->flags, orig_flags, I40E_PF_FLAGS_NBITS);
+ bitmap_xor(changed_flags, new_flags, orig_flags, I40E_PF_FLAGS_NBITS);
if (test_bit(I40E_FLAG_FW_LLDP_DIS, changed_flags))
reset_needed = I40E_PF_RESET_AND_REBUILD_FLAG;
@@ -5371,12 +5381,11 @@ flags_complete:
valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags,
0, NULL);
- if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+ if (ret && pf->hw.aq.asq_last_status != LIBIE_AQ_RC_ESRCH) {
dev_info(&pf->pdev->dev,
"couldn't set switch config bits, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
/* not a fatal problem, just keep going */
}
}
@@ -5438,16 +5447,16 @@ flags_complete:
if (status) {
adq_err = pf->hw.aq.asq_last_status;
switch (adq_err) {
- case I40E_AQ_RC_EEXIST:
+ case LIBIE_AQ_RC_EEXIST:
dev_warn(&pf->pdev->dev,
"FW LLDP agent is already running\n");
reset_needed = 0;
break;
- case I40E_AQ_RC_EPERM:
+ case LIBIE_AQ_RC_EPERM:
dev_warn(&pf->pdev->dev,
"Device configuration forbids SW from starting the LLDP agent.\n");
return -EINVAL;
- case I40E_AQ_RC_EAGAIN:
+ case LIBIE_AQ_RC_EAGAIN:
dev_warn(&pf->pdev->dev,
"Stop FW LLDP agent command is still being processed, please try again in a second.\n");
return -EBUSY;
@@ -5455,8 +5464,7 @@ flags_complete:
dev_warn(&pf->pdev->dev,
"Starting FW LLDP agent failed: error: %pe, %s\n",
ERR_PTR(status),
- i40e_aq_str(&pf->hw,
- adq_err));
+ libie_aq_str(adq_err));
return -EINVAL;
}
}
@@ -5807,6 +5815,7 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.get_regs = i40e_get_regs,
.nway_reset = i40e_nway_reset,
.get_link = ethtool_op_get_link,
+ .get_link_ext_stats = i40e_get_link_ext_stats,
.get_wol = i40e_get_wol,
.set_wol = i40e_set_wol,
.set_eeprom = i40e_set_eeprom,
@@ -5820,6 +5829,7 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.set_msglevel = i40e_set_msglevel,
.get_rxnfc = i40e_get_rxnfc,
.set_rxnfc = i40e_set_rxnfc,
+ .get_rx_ring_count = i40e_get_rx_ring_count,
.self_test = i40e_diag_test,
.get_strings = i40e_get_strings,
.get_eee = i40e_get_eee,
@@ -5833,6 +5843,8 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.get_rxfh_indir_size = i40e_get_rxfh_indir_size,
.get_rxfh = i40e_get_rxfh,
.set_rxfh = i40e_set_rxfh,
+ .get_rxfh_fields = i40e_get_rxfh_fields,
+ .set_rxfh_fields = i40e_set_rxfh_fields,
.get_channels = i40e_get_channels,
.set_channels = i40e_set_channels,
.get_module_info = i40e_get_module_info,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 25295ae370b2..d8192aa23254 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3,6 +3,7 @@
#include <generated/utsrelease.h>
#include <linux/crash_dump.h>
+#include <linux/net/intel/libie/pctype.h>
#include <linux/if_bridge.h>
#include <linux/if_macvlan.h>
#include <linux/module.h>
@@ -99,7 +100,8 @@ module_param(debug, uint, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
-MODULE_IMPORT_NS(LIBIE);
+MODULE_IMPORT_NS("LIBIE");
+MODULE_IMPORT_NS("LIBIE_ADMINQ");
MODULE_LICENSE("GPL v2");
static struct workqueue_struct *i40e_wq;
@@ -1241,12 +1243,30 @@ void i40e_update_stats(struct i40e_vsi *vsi)
}
/**
- * i40e_count_filters - counts VSI mac filters
+ * i40e_count_all_filters - counts VSI MAC filters
* @vsi: the VSI to be searched
*
- * Returns count of mac filters
- **/
-int i40e_count_filters(struct i40e_vsi *vsi)
+ * Return: count of MAC filters in any state.
+ */
+int i40e_count_all_filters(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ int bkt, cnt = 0;
+
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
+ cnt++;
+
+ return cnt;
+}
+
+/**
+ * i40e_count_active_filters - counts VSI MAC filters
+ * @vsi: the VSI to be searched
+ *
+ * Return: count of active MAC filters.
+ */
+int i40e_count_active_filters(struct i40e_vsi *vsi)
{
struct i40e_mac_filter *f;
struct hlist_node *h;
@@ -1255,6 +1275,7 @@ int i40e_count_filters(struct i40e_vsi *vsi)
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
if (f->state == I40E_FILTER_NEW ||
+ f->state == I40E_FILTER_NEW_SYNC ||
f->state == I40E_FILTER_ACTIVE)
++cnt;
}
@@ -1441,6 +1462,8 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
new->f = add_head;
new->state = add_head->state;
+ if (add_head->state == I40E_FILTER_NEW)
+ add_head->state = I40E_FILTER_NEW_SYNC;
/* Add the new filter to the tmp list */
hlist_add_head(&new->hlist, tmp_add_list);
@@ -1550,6 +1573,8 @@ static int i40e_correct_vf_mac_vlan_filters(struct i40e_vsi *vsi,
return -ENOMEM;
new_mac->f = add_head;
new_mac->state = add_head->state;
+ if (add_head->state == I40E_FILTER_NEW)
+ add_head->state = I40E_FILTER_NEW_SYNC;
/* Add the new filter to the tmp list */
hlist_add_head(&new_mac->hlist, tmp_add_list);
@@ -1661,9 +1686,8 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
* @vsi: VSI to remove from
* @f: the filter to remove from the list
*
- * This function should be called instead of i40e_del_filter only if you know
- * the exact filter you will remove already, such as via i40e_find_filter or
- * i40e_find_mac.
+ * This function requires you've found * the exact filter you will remove
+ * already, such as via i40e_find_filter or i40e_find_mac.
*
* NOTE: This function is expected to be called with mac_filter_hash_lock
* being held.
@@ -1693,29 +1717,6 @@ void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
}
/**
- * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
- * @vsi: the VSI to be searched
- * @macaddr: the MAC address
- * @vlan: the VLAN
- *
- * NOTE: This function is expected to be called with mac_filter_hash_lock
- * being held.
- * ANOTHER NOTE: This function MUST be called from within the context of
- * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
- * instead of list_for_each_entry().
- **/
-void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
-{
- struct i40e_mac_filter *f;
-
- if (!vsi || !macaddr)
- return;
-
- f = i40e_find_filter(vsi, macaddr, vlan);
- __i40e_del_filter(vsi, f);
-}
-
-/**
* i40e_add_mac_filter - Add a MAC filter for all active VLANs
* @vsi: the VSI to be searched
* @macaddr: the mac address to be filtered
@@ -1832,7 +1833,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
if (ret)
netdev_info(netdev, "Ignoring error from firmware on LAA update, status %pe, AQ ret %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
}
/* schedule our worker thread which will take care of
@@ -1864,7 +1865,7 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
dev_info(&pf->pdev->dev,
"Cannot set RSS key, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
return ret;
}
}
@@ -1876,7 +1877,7 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
dev_info(&pf->pdev->dev,
"Cannot set RSS lut, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
return ret;
}
}
@@ -2358,19 +2359,18 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
int num_del, int *retval)
{
struct i40e_hw *hw = &vsi->back->hw;
- enum i40e_admin_queue_err aq_status;
+ enum libie_aq_err aq_status;
int aq_ret;
aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL,
&aq_status);
/* Explicitly ignore and do not report when firmware returns ENOENT */
- if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) {
+ if (aq_ret && !(aq_status == LIBIE_AQ_RC_ENOENT)) {
*retval = -EIO;
dev_info(&vsi->back->pdev->dev,
"ignoring delete macvlan error on %s, err %pe, aq_err %s\n",
- vsi_name, ERR_PTR(aq_ret),
- i40e_aq_str(hw, aq_status));
+ vsi_name, ERR_PTR(aq_ret), libie_aq_str(aq_status));
}
}
@@ -2393,7 +2393,7 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
int num_add)
{
struct i40e_hw *hw = &vsi->back->hw;
- enum i40e_admin_queue_err aq_status;
+ enum libie_aq_err aq_status;
int fcnt;
i40e_aq_add_macvlan_v2(hw, vsi->seid, list, num_add, NULL, &aq_status);
@@ -2404,19 +2404,17 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, promiscuous mode forced on\n",
- i40e_aq_str(hw, aq_status), vsi_name);
+ libie_aq_str(aq_status), vsi_name);
} else if (vsi->type == I40E_VSI_SRIOV ||
vsi->type == I40E_VSI_VMDQ1 ||
vsi->type == I40E_VSI_VMDQ2) {
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
- i40e_aq_str(hw, aq_status), vsi_name,
- vsi_name);
+ libie_aq_str(aq_status), vsi_name, vsi_name);
} else {
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
- i40e_aq_str(hw, aq_status), vsi_name,
- vsi->type);
+ libie_aq_str(aq_status), vsi_name, vsi->type);
}
}
}
@@ -2437,7 +2435,8 @@ static int
i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
struct i40e_mac_filter *f)
{
- bool enable = f->state == I40E_FILTER_NEW;
+ bool enable = f->state == I40E_FILTER_NEW ||
+ f->state == I40E_FILTER_NEW_SYNC;
struct i40e_hw *hw = &vsi->back->hw;
int aq_ret;
@@ -2458,8 +2457,7 @@ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
dev_warn(&vsi->back->pdev->dev,
"Error %s, forcing overflow promiscuous on %s\n",
- i40e_aq_str(hw, hw->aq.asq_last_status),
- vsi_name);
+ libie_aq_str(hw->aq.asq_last_status), vsi_name);
}
return aq_ret;
@@ -2500,7 +2498,7 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
dev_info(&pf->pdev->dev,
"Set default VSI failed, err %pe, aq_err %s\n",
ERR_PTR(aq_ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
}
} else {
aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
@@ -2512,7 +2510,7 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
dev_info(&pf->pdev->dev,
"set unicast promisc failed, err %pe, aq_err %s\n",
ERR_PTR(aq_ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
}
aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
hw,
@@ -2522,7 +2520,7 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
dev_info(&pf->pdev->dev,
"set multicast promisc failed, err %pe, aq_err %s\n",
ERR_PTR(aq_ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
}
}
@@ -2611,6 +2609,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* Add it to the hash list */
hlist_add_head(&new->hlist, &tmp_add_list);
+ f->state = I40E_FILTER_NEW_SYNC;
}
/* Count the number of active (current and new) VLAN
@@ -2762,7 +2761,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
spin_lock_bh(&vsi->mac_filter_hash_lock);
hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
/* Only update the state if we're still NEW */
- if (new->f->state == I40E_FILTER_NEW)
+ if (new->f->state == I40E_FILTER_NEW ||
+ new->f->state == I40E_FILTER_NEW_SYNC)
new->f->state = new->state;
hlist_del(&new->hlist);
netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
@@ -2828,7 +2828,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
"set multi promisc failed on %s, err %pe aq_err %s\n",
vsi_name,
ERR_PTR(aq_ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
} else {
dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
cur_multipromisc ? "entering" : "leaving");
@@ -2849,7 +2849,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cur_promisc ? "on" : "off",
vsi_name,
ERR_PTR(aq_ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
}
}
out:
@@ -2970,27 +2970,6 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
- * i40e_ioctl - Access the hwtstamp interface
- * @netdev: network interface device structure
- * @ifr: interface request data
- * @cmd: ioctl command
- **/
-int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_pf *pf = np->vsi->back;
-
- switch (cmd) {
- case SIOCGHWTSTAMP:
- return i40e_ptp_get_ts_config(pf, ifr);
- case SIOCSHWTSTAMP:
- return i40e_ptp_set_ts_config(pf, ifr);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-/**
* i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
* @vsi: the vsi being adjusted
**/
@@ -3019,8 +2998,7 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
dev_info(&vsi->back->pdev->dev,
"update vlan stripping failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&vsi->back->hw,
- vsi->back->hw.aq.asq_last_status));
+ libie_aq_str(vsi->back->hw.aq.asq_last_status));
}
}
@@ -3054,8 +3032,7 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
dev_info(&vsi->back->pdev->dev,
"update vlan stripping failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&vsi->back->hw,
- vsi->back->hw.aq.asq_last_status));
+ libie_aq_str(vsi->back->hw.aq.asq_last_status));
}
}
@@ -3299,8 +3276,7 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
dev_info(&vsi->back->pdev->dev,
"add pvid failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&vsi->back->hw,
- vsi->back->hw.aq.asq_last_status));
+ libie_aq_str(vsi->back->hw.aq.asq_last_status));
return -ENOENT;
}
@@ -4198,7 +4174,7 @@ free_queue_irqs:
irq_num = pf->msix_entries[base + vector].vector;
irq_set_affinity_notifier(irq_num, NULL);
irq_update_affinity_hint(irq_num, NULL);
- free_irq(irq_num, &vsi->q_vectors[vector]);
+ free_irq(irq_num, vsi->q_vectors[vector]);
}
return err;
}
@@ -5570,7 +5546,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"couldn't get PF vsi bw config, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -5581,7 +5557,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"couldn't get PF vsi ets bw config, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -5771,7 +5747,7 @@ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
if (ret) {
dev_info(&pf->pdev->dev, "Update vsi config failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
return ret;
}
/* update the local VSI info with updated queue map */
@@ -5827,7 +5803,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
dev_info(&pf->pdev->dev,
"Failed querying vsi bw info, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
goto out;
}
if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
@@ -5894,7 +5870,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
dev_info(&pf->pdev->dev,
"Update vsi tc config failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
goto out;
}
/* update the local VSI info with updated queue map */
@@ -5907,7 +5883,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
dev_info(&pf->pdev->dev,
"Failed updating vsi bw info, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
goto out;
}
@@ -6021,7 +5997,7 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
dev_err(&pf->pdev->dev,
"Failed set tx rate (%llu Mbps) for vsi->seid %u, err %pe aq_err %s\n",
max_tx_rate, seid, ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return ret;
}
@@ -6033,8 +6009,8 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
**/
static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
{
- enum i40e_admin_queue_err last_aq_status;
struct i40e_cloud_filter *cfilter;
+ enum libie_aq_err last_aq_status;
struct i40e_channel *ch, *ch_tmp;
struct i40e_pf *pf = vsi->back;
struct hlist_node *node;
@@ -6097,7 +6073,7 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"Failed to delete cloud filter, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, last_aq_status));
+ libie_aq_str(last_aq_status));
kfree(cfilter);
}
@@ -6232,7 +6208,7 @@ static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
dev_info(&pf->pdev->dev,
"Cannot set RSS lut, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
kfree(lut);
return ret;
}
@@ -6331,8 +6307,7 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
dev_info(&pf->pdev->dev,
"add new vsi failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -ENOENT;
}
@@ -6575,12 +6550,10 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
pf->last_sw_conf_valid_flags,
mode, NULL);
- if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
+ if (ret && hw->aq.asq_last_status != LIBIE_AQ_RC_ESRCH)
dev_err(&pf->pdev->dev,
"couldn't set switch config bits, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(hw,
- hw->aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status));
return ret;
}
@@ -6779,8 +6752,7 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
if (ret) {
dev_info(&pf->pdev->dev,
"VEB bw config failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
goto out;
}
@@ -6789,8 +6761,7 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
if (ret) {
dev_info(&pf->pdev->dev,
"Failed getting veb bw config, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
}
out:
@@ -6871,7 +6842,7 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
dev_info(&pf->pdev->dev,
"Resume Port Tx failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
/* Schedule PF reset to recover */
set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
@@ -6895,8 +6866,7 @@ static int i40e_suspend_port_tx(struct i40e_pf *pf)
if (ret) {
dev_info(&pf->pdev->dev,
"Suspend Port Tx failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
/* Schedule PF reset to recover */
set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
@@ -6935,8 +6905,7 @@ static int i40e_hw_set_dcb_config(struct i40e_pf *pf,
if (ret) {
dev_info(&pf->pdev->dev,
"Set DCB Config failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
goto out;
}
@@ -7052,8 +7021,7 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
if (ret) {
dev_info(&pf->pdev->dev,
"Modify Port ETS failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
goto out;
}
@@ -7092,8 +7060,7 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
if (ret) {
dev_info(&pf->pdev->dev,
"DCB Updated failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
goto out;
}
@@ -7176,8 +7143,7 @@ int i40e_dcb_sw_default_config(struct i40e_pf *pf)
if (err) {
dev_info(&pf->pdev->dev,
"Enable Port ETS failed, err %pe aq_err %s\n",
- ERR_PTR(err),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
err = -ENOENT;
goto out;
}
@@ -7250,14 +7216,13 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
dev_dbg(&pf->pdev->dev,
"DCBX offload is supported for this PF.\n");
}
- } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ } else if (pf->hw.aq.asq_last_status == LIBIE_AQ_RC_EPERM) {
dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
set_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags);
} else {
dev_info(&pf->pdev->dev,
"Query for DCB configuration failed, err %pe aq_err %s\n",
- ERR_PTR(err),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
}
out:
@@ -7513,8 +7478,7 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
if (err) {
dev_err(&pf->pdev->dev,
"failed to get phy cap., ret = %pe last_status = %s\n",
- ERR_PTR(err),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(hw->aq.asq_last_status));
return err;
}
speed = abilities.link_speed;
@@ -7525,8 +7489,7 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
if (err) {
dev_err(&pf->pdev->dev,
"failed to get phy cap., ret = %pe last_status = %s\n",
- ERR_PTR(err),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(hw->aq.asq_last_status));
return err;
}
@@ -7570,8 +7533,7 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
if (err) {
dev_err(&pf->pdev->dev,
"set phy config ret = %pe last_status = %s\n",
- ERR_PTR(err),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
return err;
}
@@ -7911,8 +7873,7 @@ static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
}
dev_info(&pf->pdev->dev,
"Error adding mac filter on macvlan err %pe, aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(hw, aq_err));
+ ERR_PTR(ret), libie_aq_str(aq_err));
netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
}
@@ -7984,8 +7945,7 @@ static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
if (ret) {
dev_info(&pf->pdev->dev,
"Update vsi tc config failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status));
return ret;
}
/* update the local VSI info with updated queue map */
@@ -8200,8 +8160,7 @@ static void i40e_fwd_del(struct net_device *netdev, void *vdev)
} else {
dev_info(&pf->pdev->dev,
"Error deleting mac filter on macvlan err %pe, aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(hw, aq_err));
+ ERR_PTR(ret), libie_aq_str(aq_err));
}
break;
}
@@ -9204,47 +9163,47 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
i40e_reset_fdir_filter_cnt(pf);
/* Reprogram the default input set for TCP/IPv4 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for TCP/IPv6 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP,
I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for UDP/IPv4 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for UDP/IPv6 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP,
I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for SCTP/IPv4 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for SCTP/IPv6 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP,
I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for Other/IPv4 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_FRAG_IPV4,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
/* Reprogram the default input set for Other/IPv6 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV6,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_FRAG_IPV6,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
}
@@ -9455,8 +9414,7 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
static int i40e_handle_lldp_event(struct i40e_pf *pf,
struct i40e_arq_event_info *e)
{
- struct i40e_aqc_lldp_get_mib *mib =
- (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
+ struct i40e_aqc_lldp_get_mib *mib = libie_aq_raw(&e->desc);
struct i40e_hw *hw = &pf->hw;
struct i40e_dcbx_config tmp_dcbx_cfg;
bool need_reconfig = false;
@@ -9513,8 +9471,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
dev_info(&pf->pdev->dev,
"Failed querying DCB configuration data from firmware, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
}
goto exit;
}
@@ -9595,8 +9552,7 @@ void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
struct i40e_arq_event_info *e)
{
- struct i40e_aqc_lan_overflow *data =
- (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
+ struct i40e_aqc_lan_overflow *data = libie_aq_raw(&e->desc);
u32 queue = le32_to_cpu(data->prtdcb_rupto);
u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
struct i40e_hw *hw = &pf->hw;
@@ -9621,19 +9577,6 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
}
/**
- * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
- * @pf: board private structure
- **/
-u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
-{
- u32 val, fcnt_prog;
-
- val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
- fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
- return fcnt_prog;
-}
-
-/**
* i40e_get_current_fd_count - Get total FD filters programmed for this PF
* @pf: board private structure
**/
@@ -9685,7 +9628,7 @@ static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
* settings. It is safe to restore the default input set
* because there are no active TCPv4 filter rules.
*/
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
@@ -9988,6 +9931,9 @@ static void i40e_link_event(struct i40e_pf *pf)
new_link == netif_carrier_ok(vsi->netdev)))
return;
+ if (!new_link && old_link)
+ pf->link_down_events++;
+
i40e_print_link_message(vsi, new_link);
/* Notify the base of the switch tree connected to
@@ -10126,8 +10072,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
static void i40e_handle_link_event(struct i40e_pf *pf,
struct i40e_arq_event_info *e)
{
- struct i40e_aqc_get_link_status *status =
- (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
+ struct i40e_aqc_get_link_status *status = libie_aq_raw(&e->desc);
/* Do a new status request to re-enable LSE reporting
* and load new status information into the hw struct
@@ -10335,8 +10280,7 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get PF vsi config, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
return;
}
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
@@ -10347,8 +10291,7 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
if (ret) {
dev_info(&pf->pdev->dev,
"update vsi switch failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
}
}
@@ -10371,8 +10314,7 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get PF vsi config, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
return;
}
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
@@ -10383,8 +10325,7 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
if (ret) {
dev_info(&pf->pdev->dev,
"update vsi switch failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
}
}
@@ -10499,12 +10440,12 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
static int i40e_get_capabilities(struct i40e_pf *pf,
enum i40e_admin_queue_opc list_type)
{
- struct i40e_aqc_list_capabilities_element_resp *cap_buf;
+ struct libie_aqc_list_caps_elem *cap_buf;
u16 data_size;
int buf_len;
int err;
- buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
+ buf_len = 40 * sizeof(struct libie_aqc_list_caps_elem);
do {
cap_buf = kzalloc(buf_len, GFP_KERNEL);
if (!cap_buf)
@@ -10517,15 +10458,14 @@ static int i40e_get_capabilities(struct i40e_pf *pf,
/* data loaded, buffer no longer needed */
kfree(cap_buf);
- if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
+ if (pf->hw.aq.asq_last_status == LIBIE_AQ_RC_ENOMEM) {
/* retry with a larger buffer */
buf_len = data_size;
- } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
+ } else if (pf->hw.aq.asq_last_status != LIBIE_AQ_RC_OK || err) {
dev_info(&pf->pdev->dev,
"capability discovery failed, err %pe aq_err %s\n",
ERR_PTR(err),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -ENODEV;
}
} while (err);
@@ -10662,8 +10602,7 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
dev_dbg(&pf->pdev->dev,
"Failed to rebuild cloud filter, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return ret;
}
}
@@ -10904,8 +10843,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
ret = i40e_init_adminq(&pf->hw);
if (ret) {
dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
goto clear_recovery;
}
i40e_get_oem_version(&pf->hw);
@@ -11016,8 +10954,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
if (ret)
dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
/* Rebuild the VSIs and VEBs that existed before reset.
* They are still in our local switch element arrays, so only
@@ -11115,8 +11052,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
if (ret)
dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
}
/* reinit the misc interrupt */
if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
@@ -11147,8 +11083,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
dev_warn(&pf->pdev->dev,
"Failed to restore promiscuous setting: %s, err %pe aq_err %s\n",
pf->cur_promisc ? "on" : "off",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
i40e_reset_all_vfs(pf, true);
@@ -11209,6 +11144,67 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
}
/**
+ * i40e_print_vf_mdd_event - print VF Tx/Rx malicious driver detect event
+ * @pf: board private structure
+ * @vf: pointer to the VF structure
+ * @is_tx: true - for Tx event, false - for Rx
+ */
+static void i40e_print_vf_mdd_event(struct i40e_pf *pf, struct i40e_vf *vf,
+ bool is_tx)
+{
+ dev_err(&pf->pdev->dev, is_tx ?
+ "%lld Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pm. mdd-auto-reset-vfs=%s\n" :
+ "%lld Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pm. mdd-auto-reset-vfs=%s\n",
+ is_tx ? vf->mdd_tx_events.count : vf->mdd_rx_events.count,
+ pf->hw.pf_id,
+ vf->vf_id,
+ vf->default_lan_addr.addr,
+ str_on_off(test_bit(I40E_FLAG_MDD_AUTO_RESET_VF, pf->flags)));
+}
+
+/**
+ * i40e_print_vfs_mdd_events - print VFs malicious driver detect event
+ * @pf: pointer to the PF structure
+ *
+ * Called from i40e_handle_mdd_event to rate limit and print VFs MDD events.
+ */
+static void i40e_print_vfs_mdd_events(struct i40e_pf *pf)
+{
+ unsigned int i;
+
+ /* check that there are pending MDD events to print */
+ if (!test_and_clear_bit(__I40E_MDD_VF_PRINT_PENDING, pf->state))
+ return;
+
+ if (!__ratelimit(&pf->mdd_message_rate_limit))
+ return;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++) {
+ struct i40e_vf *vf = &pf->vf[i];
+ bool is_printed = false;
+
+ /* only print Rx MDD event message if there are new events */
+ if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
+ vf->mdd_rx_events.last_printed = vf->mdd_rx_events.count;
+ i40e_print_vf_mdd_event(pf, vf, false);
+ is_printed = true;
+ }
+
+ /* only print Tx MDD event message if there are new events */
+ if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
+ vf->mdd_tx_events.last_printed = vf->mdd_tx_events.count;
+ i40e_print_vf_mdd_event(pf, vf, true);
+ is_printed = true;
+ }
+
+ if (is_printed && !test_bit(I40E_FLAG_MDD_AUTO_RESET_VF, pf->flags))
+ dev_info(&pf->pdev->dev,
+ "Use PF Control I/F to re-enable the VF #%d\n",
+ i);
+ }
+}
+
+/**
* i40e_handle_mdd_event
* @pf: pointer to the PF structure
*
@@ -11222,8 +11218,13 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
u32 reg;
int i;
- if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
+ if (!test_and_clear_bit(__I40E_MDD_EVENT_PENDING, pf->state)) {
+ /* Since the VF MDD event logging is rate limited, check if
+ * there are pending MDD events.
+ */
+ i40e_print_vfs_mdd_events(pf);
return;
+ }
/* find what triggered the MDD event */
reg = rd32(hw, I40E_GL_MDET_TX);
@@ -11267,36 +11268,48 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
/* see if one of the VFs needs its hand slapped */
for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
+ bool is_mdd_on_tx = false;
+ bool is_mdd_on_rx = false;
+
vf = &(pf->vf[i]);
reg = rd32(hw, I40E_VP_MDET_TX(i));
if (reg & I40E_VP_MDET_TX_VALID_MASK) {
+ set_bit(__I40E_MDD_VF_PRINT_PENDING, pf->state);
wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
- vf->num_mdd_events++;
- dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
- i);
- dev_info(&pf->pdev->dev,
- "Use PF Control I/F to re-enable the VF\n");
+ vf->mdd_tx_events.count++;
set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
+ is_mdd_on_tx = true;
}
reg = rd32(hw, I40E_VP_MDET_RX(i));
if (reg & I40E_VP_MDET_RX_VALID_MASK) {
+ set_bit(__I40E_MDD_VF_PRINT_PENDING, pf->state);
wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
- vf->num_mdd_events++;
- dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
- i);
- dev_info(&pf->pdev->dev,
- "Use PF Control I/F to re-enable the VF\n");
+ vf->mdd_rx_events.count++;
set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
+ is_mdd_on_rx = true;
+ }
+
+ if ((is_mdd_on_tx || is_mdd_on_rx) &&
+ test_bit(I40E_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
+ /* VF MDD event counters will be cleared by
+ * reset, so print the event prior to reset.
+ */
+ if (is_mdd_on_rx)
+ i40e_print_vf_mdd_event(pf, vf, false);
+ if (is_mdd_on_tx)
+ i40e_print_vf_mdd_event(pf, vf, true);
+
+ i40e_vc_reset_vf(vf, true);
}
}
- /* re-enable mdd interrupt cause */
- clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
reg = rd32(hw, I40E_PFINT_ICR0_ENA);
reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
wr32(hw, I40E_PFINT_ICR0_ENA, reg);
i40e_flush(hw);
+
+ i40e_print_vfs_mdd_events(pf);
}
/**
@@ -11363,7 +11376,7 @@ static void i40e_service_task(struct work_struct *work)
**/
static void i40e_service_timer(struct timer_list *t)
{
- struct i40e_pf *pf = from_timer(pf, t, service_timer);
+ struct i40e_pf *pf = timer_container_of(pf, t, service_timer);
mod_timer(&pf->service_timer,
round_jiffies(jiffies + pf->service_timer_period));
@@ -12276,8 +12289,7 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
dev_info(&pf->pdev->dev,
"Cannot get RSS key, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return ret;
}
}
@@ -12290,8 +12302,7 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
dev_info(&pf->pdev->dev,
"Cannot get RSS lut, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return ret;
}
}
@@ -12458,7 +12469,7 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
- hena |= i40e_pf_get_default_rss_hena(pf);
+ hena |= i40e_pf_get_default_rss_hashcfg(pf);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
@@ -12606,89 +12617,6 @@ int i40e_set_partition_bw_setting(struct i40e_pf *pf)
}
/**
- * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
- * @pf: board private structure
- **/
-int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
-{
- /* Commit temporary BW setting to permanent NVM image */
- enum i40e_admin_queue_err last_aq_status;
- u16 nvm_word;
- int ret;
-
- if (pf->hw.partition_id != 1) {
- dev_info(&pf->pdev->dev,
- "Commit BW only works on partition 1! This is partition %d",
- pf->hw.partition_id);
- ret = -EOPNOTSUPP;
- goto bw_commit_out;
- }
-
- /* Acquire NVM for read access */
- ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
- last_aq_status = pf->hw.aq.asq_last_status;
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Cannot acquire NVM for read access, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, last_aq_status));
- goto bw_commit_out;
- }
-
- /* Read word 0x10 of NVM - SW compatibility word 1 */
- ret = i40e_aq_read_nvm(&pf->hw,
- I40E_SR_NVM_CONTROL_WORD,
- 0x10, sizeof(nvm_word), &nvm_word,
- false, NULL);
- /* Save off last admin queue command status before releasing
- * the NVM
- */
- last_aq_status = pf->hw.aq.asq_last_status;
- i40e_release_nvm(&pf->hw);
- if (ret) {
- dev_info(&pf->pdev->dev, "NVM read error, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, last_aq_status));
- goto bw_commit_out;
- }
-
- /* Wait a bit for NVM release to complete */
- msleep(50);
-
- /* Acquire NVM for write access */
- ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
- last_aq_status = pf->hw.aq.asq_last_status;
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Cannot acquire NVM for write access, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, last_aq_status));
- goto bw_commit_out;
- }
- /* Write it back out unchanged to initiate update NVM,
- * which will force a write of the shadow (alt) RAM to
- * the NVM - thus storing the bandwidth values permanently.
- */
- ret = i40e_aq_update_nvm(&pf->hw,
- I40E_SR_NVM_CONTROL_WORD,
- 0x10, sizeof(nvm_word),
- &nvm_word, true, 0, NULL);
- /* Save off last admin queue command status before releasing
- * the NVM
- */
- last_aq_status = pf->hw.aq.asq_last_status;
- i40e_release_nvm(&pf->hw);
- if (ret)
- dev_info(&pf->pdev->dev,
- "BW settings NOT SAVED, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, last_aq_status));
-bw_commit_out:
-
- return ret;
-}
-
-/**
* i40e_is_total_port_shutdown_enabled - read NVM and return value
* if total port shutdown feature is enabled for this PF
* @pf: board private structure
@@ -13035,8 +12963,7 @@ static int i40e_udp_tunnel_set_port(struct net_device *netdev,
NULL);
if (ret) {
netdev_info(netdev, "add UDP port failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status));
return -EIO;
}
@@ -13055,8 +12982,7 @@ static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
if (ret) {
netdev_info(netdev, "delete UDP port failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status));
return -EIO;
}
@@ -13087,12 +13013,13 @@ static int i40e_get_phys_port_id(struct net_device *netdev,
* @addr: the MAC address entry being added
* @vid: VLAN ID
* @flags: instructions from stack about fdb operation
+ * @notified: whether notification was emitted
* @extack: netlink extended ack, unused currently
*/
static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
- u16 flags,
+ u16 flags, bool *notified,
struct netlink_ext_ack *extack)
{
struct i40e_netdev_priv *np = netdev_priv(dev);
@@ -13655,7 +13582,6 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = i40e_set_mac,
.ndo_change_mtu = i40e_change_mtu,
- .ndo_eth_ioctl = i40e_ioctl,
.ndo_tx_timeout = i40e_tx_timeout,
.ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
@@ -13683,6 +13609,8 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_xsk_wakeup = i40e_xsk_wakeup,
.ndo_dfwd_add_station = i40e_fwd_add,
.ndo_dfwd_del_station = i40e_fwd_del,
+ .ndo_hwtstamp_get = i40e_ptp_hwtstamp_get,
+ .ndo_hwtstamp_set = i40e_ptp_hwtstamp_set,
};
/**
@@ -13944,8 +13872,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"couldn't get PF vsi config, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
return -ENOENT;
}
vsi->info = ctxt.info;
@@ -13974,8 +13901,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"update vsi failed, err %d aq_err %s\n",
ret,
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
ret = -ENOENT;
goto err;
}
@@ -13994,8 +13920,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"update vsi failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
ret = -ENOENT;
goto err;
}
@@ -14018,8 +13943,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
"failed to configure TCs for main VSI tc_map 0x%08x, err %pe aq_err %s\n",
enabled_tc,
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
}
}
break;
@@ -14113,8 +14037,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
dev_info(&vsi->back->pdev->dev,
"add vsi failed, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
ret = -ENOENT;
goto err;
}
@@ -14144,8 +14067,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get vsi bw info, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
/* VSI is already added so not tearing that up */
ret = 0;
}
@@ -14593,8 +14515,7 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
if (ret) {
dev_info(&pf->pdev->dev,
"query veb bw config failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status));
goto out;
}
@@ -14603,8 +14524,7 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
if (ret) {
dev_info(&pf->pdev->dev,
"query veb bw ets config failed, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(hw->aq.asq_last_status));
goto out;
}
@@ -14792,8 +14712,7 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't add VEB, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
return -EPERM;
}
@@ -14803,16 +14722,14 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get VEB statistics idx, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
return -EPERM;
}
ret = i40e_veb_get_bw_info(veb);
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't get VEB bw info, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
return -ENOENT;
}
@@ -15007,9 +14924,7 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
if (ret) {
dev_info(&pf->pdev->dev,
"get switch config failed err %d aq_err %s\n",
- ret,
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ ret, libie_aq_str(pf->hw.aq.asq_last_status));
kfree(aq_buf);
return -ENOENT;
}
@@ -15054,8 +14969,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't fetch switch config, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(ret), libie_aq_str(pf->hw.aq.asq_last_status));
return ret;
}
i40e_pf_reset_stats(pf);
@@ -15078,12 +14992,11 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
NULL);
- if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+ if (ret && pf->hw.aq.asq_last_status != LIBIE_AQ_RC_ESRCH) {
dev_info(&pf->pdev->dev,
"couldn't set switch config bits, err %pe aq_err %s\n",
ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
/* not a fatal problem, just keep going */
}
pf->last_sw_conf_valid_flags = valid_flags;
@@ -15924,7 +15837,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port;
pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port;
- pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared;
pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS;
pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
@@ -15986,8 +15898,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
if (err)
dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n",
- ERR_PTR(err),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
+
+ /* VF MDD event logs are rate limited to one second intervals */
+ ratelimit_state_init(&pf->mdd_message_rate_limit, 1 * HZ, 1);
/* Reconfigure hardware for allowing smaller MSS in the case
* of TSO, so that we avoid the MDD being fired and causing
@@ -16006,8 +15920,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n",
ERR_PTR(err),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
}
/* The main driver is (mostly) up and happy. We need to set this state
* before setting up the misc vector or we get a race and the vector
@@ -16138,8 +16051,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
if (err)
dev_dbg(&pf->pdev->dev, "get requested speeds ret = %pe last_status = %s\n",
- ERR_PTR(err),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
/* set the FEC config due to the board capabilities */
@@ -16149,16 +16061,19 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
if (err)
dev_dbg(&pf->pdev->dev, "get supported phy types ret = %pe last_status = %s\n",
- ERR_PTR(err),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
- /* make sure the MFS hasn't been set lower than the default */
#define MAX_FRAME_SIZE_DEFAULT 0x2600
- val = FIELD_GET(I40E_PRTGL_SAH_MFS_MASK,
- rd32(&pf->hw, I40E_PRTGL_SAH));
- if (val < MAX_FRAME_SIZE_DEFAULT)
- dev_warn(&pdev->dev, "MFS for port %x (%d) has been set below the default (%d)\n",
- pf->hw.port, val, MAX_FRAME_SIZE_DEFAULT);
+
+ err = i40e_aq_set_mac_config(hw, MAX_FRAME_SIZE_DEFAULT, NULL);
+ if (err)
+ dev_warn(&pdev->dev, "set mac config ret = %pe last_status = %s\n",
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
+
+ /* Make sure the MFS is set to the expected value */
+ val = rd32(hw, I40E_PRTGL_SAH);
+ FIELD_MODIFY(I40E_PRTGL_SAH_MFS_MASK, &val, MAX_FRAME_SIZE_DEFAULT);
+ wr32(hw, I40E_PRTGL_SAH, val);
/* Add a filter to drop all Flow control frames from any VSI from being
* transmitted. By doing so we stop a malicious VF from sending out
@@ -16412,7 +16327,7 @@ static int i40e_io_suspend(struct i40e_pf *pf)
set_bit(__I40E_DOWN, pf->state);
/* Ensure service task will not be running */
- del_timer_sync(&pf->service_timer);
+ timer_delete_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task);
/* Client close must be called explicitly here because the timer
@@ -16540,7 +16455,6 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
pci_wake_from_d3(pdev, false);
reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
@@ -16611,7 +16525,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
set_bit(__I40E_SUSPENDED, pf->state);
set_bit(__I40E_DOWN, pf->state);
- del_timer_sync(&pf->service_timer);
+ timer_delete_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task);
i40e_cloud_filter_exit(pf);
i40e_fdir_teardown(pf);
@@ -16720,7 +16634,7 @@ static int __init i40e_init_module(void)
* since we need to be able to guarantee forward progress even under
* memory pressure.
*/
- i40e_wq = alloc_workqueue("%s", 0, 0, i40e_driver_name);
+ i40e_wq = alloc_workqueue("%s", WQ_PERCPU, 0, i40e_driver_name);
if (!i40e_wq) {
pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 7f0936f4e05e..ed3c54e36be3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -997,7 +997,7 @@ static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
u8 *bytes, int *perrno)
{
struct i40e_asq_cmd_details cmd_details;
- struct i40e_aq_desc *aq_desc;
+ struct libie_aq_desc *aq_desc;
u32 buff_size = 0;
u8 *buff = NULL;
u32 aq_desc_len;
@@ -1011,7 +1011,7 @@ static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
- aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_desc_len = sizeof(struct libie_aq_desc);
memset(&hw->nvm_wb_desc, 0, aq_desc_len);
/* get the aq descriptor */
@@ -1022,7 +1022,7 @@ static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
*perrno = -EINVAL;
return -EINVAL;
}
- aq_desc = (struct i40e_aq_desc *)bytes;
+ aq_desc = (struct libie_aq_desc *)bytes;
/* if data buffer needed, make sure it's ready */
aq_data_len = cmd->data_size - aq_desc_len;
@@ -1053,7 +1053,7 @@ static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_NVM,
"%s err %pe aq_err %s\n",
__func__, ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
return status;
}
@@ -1087,7 +1087,7 @@ static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
- aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_desc_len = sizeof(struct libie_aq_desc);
aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen);
/* check offset range */
@@ -1154,7 +1154,7 @@ static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
- aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_desc_len = sizeof(struct libie_aq_desc);
aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen);
/* check copylength range */
@@ -1442,7 +1442,7 @@ retry:
* so here we try to reacquire the semaphore then retry the write.
* We only do one retry, then give up.
*/
- if (status && hw->aq.asq_last_status == I40E_AQ_RC_EBUSY &&
+ if (status && hw->aq.asq_last_status == LIBIE_AQ_RC_EBUSY &&
!retry_attempt) {
u32 old_asq_status = hw->aq.asq_last_status;
int old_status = status;
@@ -1628,9 +1628,9 @@ void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
* @desc: AdminQ descriptor
**/
void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
- struct i40e_aq_desc *desc)
+ struct libie_aq_desc *desc)
{
- u32 aq_desc_len = sizeof(struct i40e_aq_desc);
+ u32 aq_desc_len = sizeof(struct libie_aq_desc);
if (opcode == hw->nvm_wait_opcode) {
memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 5a0699ca7ce5..26bb7bffe361 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -23,29 +23,22 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
struct i40e_arq_event_info *e,
u16 *events_pending);
int
-i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+i40e_asq_send_command(struct i40e_hw *hw, struct libie_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
int
-i40e_asq_send_command_v2(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
- void *buff, /* can be NULL */
- u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status);
-int
-i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+i40e_asq_send_command_atomic(struct i40e_hw *hw, struct libie_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
bool is_atomic_context);
int
i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
+ struct libie_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
bool is_atomic_context,
- enum i40e_admin_queue_err *aq_status);
+ enum libie_aq_err *aq_status);
/* debug function for adminq */
void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
@@ -53,7 +46,6 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
bool i40e_check_asq_alive(struct i40e_hw *hw);
int i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
-const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
@@ -72,8 +64,6 @@ int i40e_led_set_phy(struct i40e_hw *hw, bool on,
u16 led_addr, u32 mode);
int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
u16 *val);
-int i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval);
/* admin send queue commands */
@@ -108,6 +98,8 @@ int i40e_aq_set_mac_loopback(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_mac_config(struct i40e_hw *hw, u16 max_frame_size,
+ struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
@@ -141,9 +133,6 @@ int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details);
-int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
- u16 seid, bool enable,
- struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details);
@@ -167,7 +156,7 @@ int
i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status);
+ enum libie_aq_err *aq_status);
int i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details);
@@ -175,15 +164,7 @@ int
i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status);
-int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free);
-int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rules_used, u16 *rules_free);
+ enum libie_aq_err *aq_status);
int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
@@ -220,9 +201,6 @@ int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
bool last_command, u8 preservation_flags,
struct i40e_asq_cmd_details *cmd_details);
-int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
- u8 rearrange_nvm,
- struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
u8 mib_type, void *buff, u16 buff_size,
u16 *local_len, u16 *remote_len,
@@ -234,9 +212,6 @@ i40e_aq_set_lldp_mib(struct i40e_hw *hw,
int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
bool enable_update,
struct i40e_asq_cmd_details *cmd_details);
-int
-i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
- struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
bool persist,
struct i40e_asq_cmd_details *cmd_details);
@@ -365,7 +340,7 @@ int i40e_nvmupd_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *errno);
void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
- struct i40e_aq_desc *desc);
+ struct libie_aq_desc *desc);
void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
@@ -458,13 +433,7 @@ int i40e_read_phy_register_clause45(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 *value);
int i40e_write_phy_register_clause45(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 value);
-int i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
- u8 phy_addr, u16 *value);
-int i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
- u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
-int i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval);
int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
@@ -477,20 +446,12 @@ int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
struct i40e_generic_seg_header *
i40e_find_segment_in_package(u32 segment_type,
struct i40e_package_header *pkg_header);
-struct i40e_profile_section_header *
-i40e_find_section_in_profile(u32 section_type,
- struct i40e_profile_segment *profile);
int
i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
u32 track_id);
int
i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
u32 track_id);
-int
-i40e_add_pinfo_to_list(struct i40e_hw *hw,
- struct i40e_profile_segment *profile,
- u8 *profile_info_sec, u32 track_id);
-
/* i40e_ddp */
int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index b72a4b5d76b9..33535418178b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -550,7 +550,7 @@ static int i40e_ptp_enable_pin(struct i40e_pf *pf, unsigned int chan,
pins.gpio_4 = pf->ptp_pins->gpio_4;
/* To turn on the pin - find the corresponding one based on
- * the given index. To to turn the function off - find
+ * the given index. To turn the function off - find
* which pin had it assigned. Don't use ptp_find_pin here
* because it tries to lock the pincfg_mux which is locked by
* ptp_pin_store() that calls here.
@@ -912,23 +912,26 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
}
/**
- * i40e_ptp_get_ts_config - ioctl interface to read the HW timestamping
- * @pf: Board private structure
- * @ifr: ioctl data
+ * i40e_ptp_hwtstamp_get - interface to read the HW timestamping
+ * @netdev: Network device structure
+ * @config: Timestamping configuration structure
*
* Obtain the current hardware timestamping settigs as requested. To do this,
* keep a shadow copy of the timestamp settings rather than attempting to
* deconstruct it from the registers.
**/
-int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
+int i40e_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config *config = &pf->tstamp_config;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags))
return -EOPNOTSUPP;
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
+ *config = pf->tstamp_config;
+
+ return 0;
}
/**
@@ -1167,7 +1170,7 @@ int i40e_ptp_alloc_pins(struct i40e_pf *pf)
* more broad if the specific filter is not directly supported.
**/
static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
struct i40e_hw *hw = &pf->hw;
u32 tsyntype, regval;
@@ -1290,9 +1293,10 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
}
/**
- * i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping
- * @pf: Board private structure
- * @ifr: ioctl data
+ * i40e_ptp_hwtstamp_set - interface to control the HW timestamping
+ * @netdev: Network device structure
+ * @config: Timestamping configuration structure
+ * @extack: Netlink extended ack structure for error reporting
*
* Respond to the user filter requests and make the appropriate hardware
* changes here. The XL710 cannot support splitting of the Tx/Rx timestamping
@@ -1303,26 +1307,25 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
* as the user receives the timestamps they care about and the user is notified
* the filter has been broadened.
**/
-int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
+int i40e_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
int err;
if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags))
return -EOPNOTSUPP;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = i40e_ptp_set_timestamp_mode(pf, &config);
+ err = i40e_ptp_set_timestamp_mode(pf, config);
if (err)
return err;
/* save these settings for future reference */
- pf->tstamp_config = config;
+ pf->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index c006f716a3bd..cc0b9efc2637 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/bpf_trace.h>
+#include <linux/net/intel/libie/pctype.h>
#include <linux/net/intel/libie/rx.h>
#include <linux/prefetch.h>
#include <linux/sctp.h>
@@ -397,12 +398,12 @@ static int i40e_add_del_fdir_udp(struct i40e_vsi *vsi,
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_UDPIP_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP);
else
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_UDPIP6_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP);
if (ret) {
kfree(raw_packet);
@@ -444,12 +445,12 @@ static int i40e_add_del_fdir_tcp(struct i40e_vsi *vsi,
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_TCPIP_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP);
else
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_TCPIP6_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP);
if (ret) {
kfree(raw_packet);
@@ -499,12 +500,12 @@ static int i40e_add_del_fdir_sctp(struct i40e_vsi *vsi,
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_SCTPIP_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV4_SCTP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP);
else
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_SCTPIP6_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV6_SCTP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP);
if (ret) {
kfree(raw_packet);
@@ -543,11 +544,11 @@ static int i40e_add_del_fdir_ip(struct i40e_vsi *vsi,
int i;
if (ipv4) {
- iter_start = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
- iter_end = I40E_FILTER_PCTYPE_FRAG_IPV4;
+ iter_start = LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ iter_end = LIBIE_FILTER_PCTYPE_FRAG_IPV4;
} else {
- iter_start = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
- iter_end = I40E_FILTER_PCTYPE_FRAG_IPV6;
+ iter_start = LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ iter_end = LIBIE_FILTER_PCTYPE_FRAG_IPV6;
}
for (i = iter_start; i <= iter_end; i++) {
@@ -947,9 +948,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
if (!eop_desc)
break;
- /* prevent any other reads prior to eop_desc */
- smp_rmb();
-
i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
/* we have caught up to head, no work left to do */
if (tx_head == tx_desc)
@@ -2150,10 +2148,10 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
sizeof(skb_frag_t) * nr_frags);
- xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags,
- sinfo->xdp_frags_size,
- nr_frags * xdp->frame_sz,
- xdp_buff_is_frag_pfmemalloc(xdp));
+ xdp_update_skb_frags_info(skb, skinfo->nr_frags + nr_frags,
+ sinfo->xdp_frags_size,
+ nr_frags * xdp->frame_sz,
+ xdp_buff_get_skb_flags(xdp));
/* First buffer has already been processed, so bump ntc */
if (++rx_ring->next_to_clean == rx_ring->count)
@@ -2205,10 +2203,9 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
skb_metadata_set(skb, metasize);
if (unlikely(xdp_buff_has_frags(xdp))) {
- xdp_update_skb_shared_info(skb, nr_frags,
- sinfo->xdp_frags_size,
- nr_frags * xdp->frame_sz,
- xdp_buff_is_frag_pfmemalloc(xdp));
+ xdp_update_skb_frags_info(skb, nr_frags, sinfo->xdp_frags_size,
+ nr_frags * xdp->frame_sz,
+ xdp_buff_get_skb_flags(xdp));
i40e_process_rx_buffs(rx_ring, I40E_XDP_PASS, xdp);
} else {
@@ -2948,9 +2945,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
flex_ptype = FIELD_PREP(I40E_TXD_FLTR_QW0_QINDEX_MASK,
tx_ring->queue_index);
flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
- (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
+ (LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP <<
I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
- (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
+ (LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP <<
I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 7c26c9a2bf65..1e5fd63d47f4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -4,6 +4,7 @@
#ifndef _I40E_TXRX_H_
#define _I40E_TXRX_H_
+#include <linux/net/intel/libie/pctype.h>
#include <net/xdp.h>
#include "i40e_type.h"
@@ -71,30 +72,30 @@ enum i40e_dyn_idx {
#define I40E_SW_ITR I40E_IDX_ITR2
/* Supported RSS offloads */
-#define I40E_DEFAULT_RSS_HENA ( \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
- BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
-
-#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
-
-#define i40e_pf_get_default_rss_hena(pf) \
+#define I40E_DEFAULT_RSS_HASHCFG ( \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_L2_PAYLOAD))
+
+#define I40E_DEFAULT_RSS_HASHCFG_EXPANDED (I40E_DEFAULT_RSS_HASHCFG | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+
+#define i40e_pf_get_default_rss_hashcfg(pf) \
(test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, (pf)->hw.caps) ? \
- I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
+ I40E_DEFAULT_RSS_HASHCFG_EXPANDED : I40E_DEFAULT_RSS_HASHCFG)
/* Supported Rx Buffer Sizes (a multiple of 128) */
#define I40E_RXBUFFER_256 256
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 28568e126850..ed8bbdb586da 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -24,7 +24,7 @@
/* forward declaration */
struct i40e_hw;
-typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
+typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct libie_aq_desc *);
/* Data type manipulation macros. */
@@ -555,8 +555,8 @@ struct i40e_hw {
/* state of nvm update process */
enum i40e_nvmupd_state nvmupd_state;
- struct i40e_aq_desc nvm_wb_desc;
- struct i40e_aq_desc nvm_aq_event_desc;
+ struct libie_aq_desc nvm_wb_desc;
+ struct libie_aq_desc nvm_aq_event_desc;
struct i40e_virt_mem nvm_buff;
bool nvm_release_on_done;
u16 nvm_wait_opcode;
@@ -929,38 +929,6 @@ struct i40e_filter_program_desc {
#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \
I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
-/* Packet Classifier Types for filters */
-enum i40e_filter_pctype {
- /* Note: Values 0-28 are reserved for future use.
- * Value 29, 30, 32 are not supported on XL710 and X710.
- */
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
- I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
- I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
- I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
- I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Values 37-38 are reserved for future use.
- * Value 39, 40, 42 are not supported on XL710 and X710.
- */
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
- I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
- I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
- I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
- I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
- I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
- I40E_FILTER_PCTYPE_FRAG_IPV6 = 46,
- /* Note: Value 47 is reserved for future use */
- I40E_FILTER_PCTYPE_FCOE_OX = 48,
- I40E_FILTER_PCTYPE_FCOE_RX = 49,
- I40E_FILTER_PCTYPE_FCOE_OTHER = 50,
- /* Note: Values 51-62 are reserved for future use */
- I40E_FILTER_PCTYPE_L2_PAYLOAD = 63,
-};
-
enum i40e_filter_program_desc_dest {
I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0,
I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index dfa785e39458..8b30a3accd31 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -216,7 +216,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
* @notify_vf: notify vf about reset or not
* Reset VF handler.
**/
-static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
+void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
{
struct i40e_pf *pf = vf->pf;
int i;
@@ -448,7 +448,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
(qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
(pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
- (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
+ FIELD_PREP(I40E_QINT_RQCTL_ITR_INDX_MASK, itr_idx);
wr32(hw, reg_idx, reg);
}
@@ -653,6 +653,13 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
/* only set the required fields */
tx_ctx.base = info->dma_ring_addr / 128;
+
+ /* ring_len has to be multiple of 8 */
+ if (!IS_ALIGNED(info->ring_len, 8) ||
+ info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) {
+ ret = -EINVAL;
+ goto error_context;
+ }
tx_ctx.qlen = info->ring_len;
tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
tx_ctx.rdylist_act = 0;
@@ -716,6 +723,13 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
/* only set the required fields */
rx_ctx.base = info->dma_ring_addr / 128;
+
+ /* ring_len has to be multiple of 32 */
+ if (!IS_ALIGNED(info->ring_len, 32) ||
+ info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) {
+ ret = -EINVAL;
+ goto error_param;
+ }
rx_ctx.qlen = info->ring_len;
if (info->splithdr_enabled) {
@@ -812,7 +826,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
}
if (!idx) {
- u64 hena = i40e_pf_get_default_rss_hena(pf);
+ u64 hashcfg = i40e_pf_get_default_rss_hashcfg(pf);
u8 broadcast[ETH_ALEN];
vf->lan_vsi_idx = vsi->idx;
@@ -841,8 +855,9 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
dev_info(&pf->pdev->dev,
"Could not allocate VF broadcast filter\n");
spin_unlock_bh(&vsi->mac_filter_hash_lock);
- wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
- wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
+ wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hashcfg);
+ wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id),
+ (u32)(hashcfg >> 32));
/* program mac filter only for VF VSI */
ret = i40e_sync_vsi_filters(vsi);
if (ret)
@@ -1289,9 +1304,8 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
dev_err(&pf->pdev->dev,
"VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
- vf->vf_id,
- ERR_PTR(aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
+ vf->vf_id, ERR_PTR(aq_ret),
+ libie_aq_str(aq_err));
return aq_ret;
}
@@ -1305,9 +1319,8 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
dev_err(&pf->pdev->dev,
"VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
- vf->vf_id,
- ERR_PTR(aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
+ vf->vf_id, ERR_PTR(aq_ret),
+ libie_aq_str(aq_err));
}
return aq_ret;
@@ -1322,9 +1335,8 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
dev_err(&pf->pdev->dev,
"VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
- vf->vf_id,
- ERR_PTR(aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
+ vf->vf_id, ERR_PTR(aq_ret),
+ libie_aq_str(aq_err));
if (!aq_tmp)
aq_tmp = aq_ret;
@@ -1338,9 +1350,8 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
dev_err(&pf->pdev->dev,
"VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
- vf->vf_id,
- ERR_PTR(aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
+ vf->vf_id, ERR_PTR(aq_ret),
+ libie_aq_str(aq_err));
if (!aq_tmp)
aq_tmp = aq_ret;
@@ -1453,6 +1464,7 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
* functions that may still be running at this point.
*/
clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
+ clear_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states);
/* In the case of a VFLR, the HW has already reset the VF and we
* just need to clean up, so don't hit the VFRTRIG register.
@@ -1546,8 +1558,8 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
* @vf: pointer to the VF structure
* @flr: VFLR was issued or not
*
- * Returns true if the VF is in reset, resets successfully, or resets
- * are disabled and false otherwise.
+ * Return: True if reset was performed successfully or if resets are disabled.
+ * False if reset is already in progress.
**/
bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
{
@@ -1566,7 +1578,7 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
/* If VF is being reset already we don't need to continue. */
if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
- return true;
+ return false;
i40e_trigger_vf_reset(vf, flr);
@@ -2119,7 +2131,10 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
size_t len = 0;
int ret;
- if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
+ i40e_sync_vf_state(vf, I40E_VF_STATE_INIT);
+
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) ||
+ test_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states)) {
aq_ret = -EINVAL;
goto err;
}
@@ -2222,6 +2237,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
vf->default_lan_addr.addr);
}
set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
+ set_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states);
err:
/* send the response back to the VF */
@@ -2384,7 +2400,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
}
if (vf->adq_enabled) {
- if (idx >= ARRAY_SIZE(vf->ch)) {
+ if (idx >= vf->num_tc) {
aq_ret = -ENODEV;
goto error_param;
}
@@ -2405,7 +2421,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
* to its appropriate VSIs based on TC mapping
*/
if (vf->adq_enabled) {
- if (idx >= ARRAY_SIZE(vf->ch)) {
+ if (idx >= vf->num_tc) {
aq_ret = -ENODEV;
goto error_param;
}
@@ -2455,8 +2471,10 @@ static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
u16 vsi_queue_id, queue_id;
for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
- if (vf->adq_enabled) {
- vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
+ u16 idx = vsi_queue_id / I40E_MAX_VF_VSI;
+
+ if (vf->adq_enabled && idx < vf->num_tc) {
+ vsi_id = vf->ch[idx].vsi_id;
queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
} else {
queue_id = vsi_queue_id;
@@ -2844,24 +2862,6 @@ error_param:
(u8 *)&stats, sizeof(stats));
}
-/**
- * i40e_can_vf_change_mac
- * @vf: pointer to the VF info
- *
- * Return true if the VF is allowed to change its MAC filters, false otherwise
- */
-static bool i40e_can_vf_change_mac(struct i40e_vf *vf)
-{
- /* If the VF MAC address has been set administratively (via the
- * ndo_set_vf_mac command), then deny permission to the VF to
- * add/delete unicast MAC addresses, unless the VF is trusted
- */
- if (vf->pf_set_mac && !vf->trusted)
- return false;
-
- return true;
-}
-
#define I40E_MAX_MACVLAN_PER_HW 3072
#define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \
(num_ports))
@@ -2900,8 +2900,10 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
struct i40e_hw *hw = &pf->hw;
- int mac2add_cnt = 0;
- int i;
+ int i, mac_add_max, mac_add_cnt = 0;
+ bool vf_trusted;
+
+ vf_trusted = test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
for (i = 0; i < al->num_elements; i++) {
struct i40e_mac_filter *f;
@@ -2921,9 +2923,8 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
* The VF may request to set the MAC address filter already
* assigned to it so do not return an error in that case.
*/
- if (!i40e_can_vf_change_mac(vf) &&
- !is_multicast_ether_addr(addr) &&
- !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
+ if (!vf_trusted && !is_multicast_ether_addr(addr) &&
+ vf->pf_set_mac && !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
dev_err(&pf->pdev->dev,
"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
return -EPERM;
@@ -2932,31 +2933,50 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
/*count filters that really will be added*/
f = i40e_find_mac(vsi, addr);
if (!f)
- ++mac2add_cnt;
+ ++mac_add_cnt;
}
+ /* Determine the maximum number of MAC addresses this VF may use.
+ *
+ * - For untrusted VFs: use a fixed small limit.
+ *
+ * - For trusted VFs: limit is calculated by dividing total MAC
+ * filter pool across all VFs/ports.
+ *
+ * - User can override this by devlink param "max_mac_per_vf".
+ * If set its value is used as a strict cap for both trusted and
+ * untrusted VFs.
+ * Note:
+ * even when overridden, this is a theoretical maximum; hardware
+ * may reject additional MACs if the absolute HW limit is reached.
+ */
+ if (!vf_trusted)
+ mac_add_max = I40E_VC_MAX_MAC_ADDR_PER_VF;
+ else
+ mac_add_max = I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs, hw->num_ports);
+
+ if (pf->max_mac_per_vf > 0)
+ mac_add_max = pf->max_mac_per_vf;
- /* If this VF is not privileged, then we can't add more than a limited
- * number of addresses. Check to make sure that the additions do not
- * push us over the limit.
+ /* VF can replace all its filters in one step, in this case mac_add_max
+ * will be added as active and another mac_add_max will be in
+ * a to-be-removed state. Account for that.
*/
- if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
- if ((i40e_count_filters(vsi) + mac2add_cnt) >
- I40E_VC_MAX_MAC_ADDR_PER_VF) {
+ if ((i40e_count_active_filters(vsi) + mac_add_cnt) > mac_add_max ||
+ (i40e_count_all_filters(vsi) + mac_add_cnt) > 2 * mac_add_max) {
+ if (pf->max_mac_per_vf == mac_add_max && mac_add_max > 0) {
dev_err(&pf->pdev->dev,
- "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
+ "Cannot add more MAC addresses: VF reached its maximum allowed limit (%d)\n",
+ mac_add_max);
return -EPERM;
}
- /* If this VF is trusted, it can use more resources than untrusted.
- * However to ensure that every trusted VF has appropriate number of
- * resources, divide whole pool of resources per port and then across
- * all VFs.
- */
- } else {
- if ((i40e_count_filters(vsi) + mac2add_cnt) >
- I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs,
- hw->num_ports)) {
+ if (!vf_trusted) {
+ dev_err(&pf->pdev->dev,
+ "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
+ return -EPERM;
+ } else {
dev_err(&pf->pdev->dev,
- "Cannot add more MAC addresses, trusted VF exhausted it's resources\n");
+ "Cannot add more MAC addresses: trusted VF reached its maximum allowed limit (%d)\n",
+ mac_add_max);
return -EPERM;
}
}
@@ -3137,10 +3157,10 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
const u8 *addr = al->list[i].addr;
/* Allow to delete VF primary MAC only if it was not set
- * administratively by PF or if VF is trusted.
+ * administratively by PF.
*/
if (ether_addr_equal(addr, vf->default_lan_addr.addr)) {
- if (i40e_can_vf_change_mac(vf))
+ if (!vf->pf_set_mac)
was_unimac_deleted = true;
else
continue;
@@ -3447,15 +3467,15 @@ err:
}
/**
- * i40e_vc_get_rss_hena
+ * i40e_vc_get_rss_hashcfg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
*
- * Return the RSS HENA bits allowed by the hardware
+ * Return the RSS Hash configuration bits allowed by the hardware
**/
-static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
+static int i40e_vc_get_rss_hashcfg(struct i40e_vf *vf, u8 *msg)
{
- struct virtchnl_rss_hena *vrh = NULL;
+ struct virtchnl_rss_hashcfg *vrh = NULL;
struct i40e_pf *pf = vf->pf;
int aq_ret = 0;
int len = 0;
@@ -3464,7 +3484,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
aq_ret = -EINVAL;
goto err;
}
- len = sizeof(struct virtchnl_rss_hena);
+ len = sizeof(struct virtchnl_rss_hashcfg);
vrh = kzalloc(len, GFP_KERNEL);
if (!vrh) {
@@ -3472,26 +3492,26 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
len = 0;
goto err;
}
- vrh->hena = i40e_pf_get_default_rss_hena(pf);
+ vrh->hashcfg = i40e_pf_get_default_rss_hashcfg(pf);
err:
/* send the response back to the VF */
- aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
+ aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS,
aq_ret, (u8 *)vrh, len);
kfree(vrh);
return aq_ret;
}
/**
- * i40e_vc_set_rss_hena
+ * i40e_vc_set_rss_hashcfg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
*
- * Set the RSS HENA bits for the VF
+ * Set the RSS Hash configuration bits for the VF
**/
-static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
+static int i40e_vc_set_rss_hashcfg(struct i40e_vf *vf, u8 *msg)
{
- struct virtchnl_rss_hena *vrh =
- (struct virtchnl_rss_hena *)msg;
+ struct virtchnl_rss_hashcfg *vrh =
+ (struct virtchnl_rss_hashcfg *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
int aq_ret = 0;
@@ -3500,13 +3520,14 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
aq_ret = -EINVAL;
goto err;
}
- i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id),
+ (u32)vrh->hashcfg);
i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
- (u32)(vrh->hena >> 32));
+ (u32)(vrh->hashcfg >> 32));
/* send the response to the VF */
err:
- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HASHCFG, aq_ret);
}
/**
@@ -3589,7 +3610,7 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf,
/* action_meta is TC number here to which the filter is applied */
if (!tc_filter->action_meta ||
- tc_filter->action_meta > vf->num_tc) {
+ tc_filter->action_meta >= vf->num_tc) {
dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
vf->vf_id, tc_filter->action_meta);
goto err;
@@ -3746,8 +3767,7 @@ static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
dev_err(&pf->pdev->dev,
"VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
vf->vf_id, ERR_PTR(ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
hlist_del(&cfilter->cloud_node);
kfree(cfilter);
@@ -3849,7 +3869,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
dev_err(&pf->pdev->dev,
"VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
vf->vf_id, ERR_PTR(ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
goto err;
}
@@ -3887,6 +3907,8 @@ err:
aq_ret);
}
+#define I40E_MAX_VF_CLOUD_FILTER 0xFF00
+
/**
* i40e_vc_add_cloud_filter
* @vf: pointer to the VF info
@@ -3926,6 +3948,14 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
goto err_out;
}
+ if (vf->num_cloud_filters >= I40E_MAX_VF_CLOUD_FILTER) {
+ dev_warn(&pf->pdev->dev,
+ "VF %d: Max number of filters reached, can't apply cloud filter\n",
+ vf->vf_id);
+ aq_ret = -ENOSPC;
+ goto err_out;
+ }
+
cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
if (!cfilter) {
aq_ret = -ENOMEM;
@@ -3985,7 +4015,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
dev_err(&pf->pdev->dev,
"VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
vf->vf_id, ERR_PTR(aq_ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ libie_aq_str(pf->hw.aq.asq_last_status));
goto err_free;
}
@@ -4253,11 +4283,11 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
case VIRTCHNL_OP_CONFIG_RSS_LUT:
ret = i40e_vc_config_rss_lut(vf, msg);
break;
- case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
- ret = i40e_vc_get_rss_hena(vf, msg);
+ case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS:
+ ret = i40e_vc_get_rss_hashcfg(vf, msg);
break;
- case VIRTCHNL_OP_SET_RSS_HENA:
- ret = i40e_vc_set_rss_hena(vf, msg);
+ case VIRTCHNL_OP_SET_RSS_HASHCFG:
+ ret = i40e_vc_set_rss_hashcfg(vf, msg);
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
ret = i40e_vc_enable_vlan_stripping(vf, msg);
@@ -4328,7 +4358,10 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
if (reg & BIT(bit_idx))
/* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
- i40e_reset_vf(vf, true);
+ if (!i40e_reset_vf(vf, true)) {
+ /* At least one VF did not finish resetting, retry next time */
+ set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
+ }
}
return 0;
@@ -4770,6 +4803,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
unsigned long q_map;
struct i40e_vf *vf;
int abs_vf_id;
+ int old_link;
int ret = 0;
int tmp;
@@ -4788,6 +4822,17 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
vf = &pf->vf[vf_id];
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+ /* skip VF link state change if requested state is already set */
+ if (!vf->link_forced)
+ old_link = IFLA_VF_LINK_STATE_AUTO;
+ else if (vf->link_up)
+ old_link = IFLA_VF_LINK_STATE_ENABLE;
+ else
+ old_link = IFLA_VF_LINK_STATE_DISABLE;
+
+ if (link == old_link)
+ goto error_out;
+
pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = PF_EVENT_SEVERITY_INFO;
@@ -5003,7 +5048,7 @@ int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
vf_stats->broadcast = stats->rx_broadcast;
vf_stats->multicast = stats->rx_multicast;
vf_stats->rx_dropped = stats->rx_discards + stats->rx_discards_other;
- vf_stats->tx_dropped = stats->tx_discards;
+ vf_stats->tx_dropped = stats->tx_errors;
return 0;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 66f95e2f3146..f558b45725c8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -41,7 +41,8 @@ enum i40e_vf_states {
I40E_VF_STATE_MC_PROMISC,
I40E_VF_STATE_UC_PROMISC,
I40E_VF_STATE_PRE_ENABLE,
- I40E_VF_STATE_RESETTING
+ I40E_VF_STATE_RESETTING,
+ I40E_VF_STATE_RESOURCES_LOADED,
};
/* VF capabilities */
@@ -64,6 +65,12 @@ struct i40evf_channel {
u64 max_tx_rate; /* bandwidth rate allocation for VSIs */
};
+struct i40e_mdd_vf_events {
+ u64 count; /* total count of Rx|Tx events */
+ /* count number of the last printed event */
+ u64 last_printed;
+};
+
/* VF information structure */
struct i40e_vf {
struct i40e_pf *pf;
@@ -92,7 +99,9 @@ struct i40e_vf {
u8 num_queue_pairs; /* num of qps assigned to VF vsis */
u8 num_req_queues; /* num of requested qps */
- u64 num_mdd_events; /* num of mdd events detected */
+ /* num of mdd tx and rx events detected */
+ struct i40e_mdd_vf_events mdd_rx_events;
+ struct i40e_mdd_vf_events mdd_tx_events;
unsigned long vf_caps; /* vf's adv. capabilities */
unsigned long vf_states; /* vf's runtime states */
@@ -120,6 +129,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen);
int i40e_vc_process_vflr_event(struct i40e_pf *pf);
+void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf);
bool i40e_reset_vf(struct i40e_vf *vf, bool flr);
bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr);
void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 4e885df789ef..9f47388eaba5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2018 Intel Corporation. */
#include <linux/bpf_trace.h>
+#include <linux/unroll.h>
#include <net/xdp_sock_drv.h>
#include "i40e_txrx_common.h"
#include "i40e_xsk.h"
@@ -395,32 +396,6 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
WARN_ON_ONCE(1);
}
-static int
-i40e_add_xsk_frag(struct i40e_ring *rx_ring, struct xdp_buff *first,
- struct xdp_buff *xdp, const unsigned int size)
-{
- struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first);
-
- if (!xdp_buff_has_frags(first)) {
- sinfo->nr_frags = 0;
- sinfo->xdp_frags_size = 0;
- xdp_buff_set_frags_flag(first);
- }
-
- if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
- xsk_buff_free(first);
- return -ENOMEM;
- }
-
- __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
- virt_to_page(xdp->data_hard_start),
- XDP_PACKET_HEADROOM, size);
- sinfo->xdp_frags_size += size;
- xsk_buff_add_frag(xdp);
-
- return 0;
-}
-
/**
* i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
* @rx_ring: Rx ring
@@ -486,8 +461,10 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
if (!first)
first = bi;
- else if (i40e_add_xsk_frag(rx_ring, first, bi, size))
+ else if (!xsk_buff_add_frag(first, bi)) {
+ xsk_buff_free(first);
break;
+ }
if (++next_to_process == count)
next_to_process = 0;
@@ -553,7 +530,8 @@ static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *des
dma_addr_t dma;
u32 i;
- loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
+ unrolled_count(PKTS_PER_BATCH)
+ for (i = 0; i < PKTS_PER_BATCH; i++) {
u32 cmd = I40E_TX_DESC_CMD_ICRC | xsk_is_eop_desc(&desc[i]);
dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
index ef156fad52f2..dd16351a7af8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
@@ -6,7 +6,7 @@
#include <linux/types.h>
-/* This value should match the pragma in the loop_unrolled_for
+/* This value should match the pragma in the unrolled_count()
* macro. Why 4? It is strictly empirical. It seems to be a good
* compromise between the advantage of having simultaneous outstanding
* reads to the DMA array that can hide each others latency and the
@@ -14,14 +14,6 @@
*/
#define PKTS_PER_BATCH 4
-#ifdef __clang__
-#define loop_unrolled_for _Pragma("clang loop unroll_count(4)") for
-#elif __GNUC__ >= 8
-#define loop_unrolled_for _Pragma("GCC unroll 4") for
-#else
-#define loop_unrolled_for for
-#endif
-
struct i40e_ring;
struct i40e_vsi;
struct net_device;
diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile
index 356ac9faa5bf..e13720a728ff 100644
--- a/drivers/net/ethernet/intel/iavf/Makefile
+++ b/drivers/net/ethernet/intel/iavf/Makefile
@@ -13,3 +13,5 @@ obj-$(CONFIG_IAVF) += iavf.o
iavf-y := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \
iavf_adv_rss.o iavf_txrx.o iavf_common.o iavf_adminq.o
+
+iavf-$(CONFIG_PTP_1588_CLOCK) += iavf_ptp.o
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 532a0a595fe8..a87e0c6d4017 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -41,6 +41,7 @@
#include "iavf_txrx.h"
#include "iavf_fdir.h"
#include "iavf_adv_rss.h"
+#include "iavf_types.h"
#include <linux/bitmap.h>
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
@@ -82,7 +83,7 @@ struct iavf_vsi {
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
-#define IAVF_RX_DESC(R, i) (&(((union iavf_32byte_rx_desc *)((R)->desc))[i]))
+#define IAVF_RX_DESC(R, i) (&(((struct iavf_rx_desc *)((R)->desc))[i]))
#define IAVF_TX_DESC(R, i) (&(((struct iavf_tx_desc *)((R)->desc))[i]))
#define IAVF_TX_CTXTDESC(R, i) \
(&(((struct iavf_tx_context_desc *)((R)->desc))[i]))
@@ -113,8 +114,6 @@ struct iavf_q_vector {
u16 reg_idx; /* register index of the interrupt */
char name[IFNAMSIZ + 15];
bool arm_wb_state;
- cpumask_t affinity_mask;
- struct irq_affinity_notify affinity_notify;
};
/* Helper macros to switch between ints/sec and what the register uses.
@@ -267,10 +266,10 @@ struct iavf_adapter {
struct list_head vlan_filter_list;
int num_vlan_filters;
struct list_head mac_filter_list;
- struct mutex crit_lock;
/* Lock to protect accesses to MAC and VLAN lists */
spinlock_t mac_vlan_list_lock;
char misc_vector_name[IFNAMSIZ + 9];
+ u8 rxdid;
int num_active_queues;
int num_req_queues;
@@ -314,8 +313,8 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_CONFIGURE_RSS BIT_ULL(9) /* direct AQ config */
#define IAVF_FLAG_AQ_GET_CONFIG BIT_ULL(10)
/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */
-#define IAVF_FLAG_AQ_GET_HENA BIT_ULL(11)
-#define IAVF_FLAG_AQ_SET_HENA BIT_ULL(12)
+#define IAVF_FLAG_AQ_GET_RSS_HASHCFG BIT_ULL(11)
+#define IAVF_FLAG_AQ_SET_RSS_HASHCFG BIT_ULL(12)
#define IAVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13)
#define IAVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14)
#define IAVF_FLAG_AQ_SET_RSS_HFUNC BIT_ULL(15)
@@ -343,6 +342,17 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW BIT_ULL(39)
#define IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE BIT_ULL(40)
#define IAVF_FLAG_AQ_GET_QOS_CAPS BIT_ULL(41)
+#define IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS BIT_ULL(42)
+#define IAVF_FLAG_AQ_GET_PTP_CAPS BIT_ULL(43)
+#define IAVF_FLAG_AQ_SEND_PTP_CMD BIT_ULL(44)
+
+ /* AQ messages that must be sent after IAVF_FLAG_AQ_GET_CONFIG, in
+ * order to negotiated extended capabilities.
+ */
+#define IAVF_FLAG_AQ_EXTENDED_CAPS \
+ (IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS | \
+ IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS | \
+ IAVF_FLAG_AQ_GET_PTP_CAPS)
/* flags for processing extended capability messages during
* __IAVF_INIT_EXTENDED_CAPS. Each capability exchange requires
@@ -354,10 +364,18 @@ struct iavf_adapter {
u64 extended_caps;
#define IAVF_EXTENDED_CAP_SEND_VLAN_V2 BIT_ULL(0)
#define IAVF_EXTENDED_CAP_RECV_VLAN_V2 BIT_ULL(1)
+#define IAVF_EXTENDED_CAP_SEND_RXDID BIT_ULL(2)
+#define IAVF_EXTENDED_CAP_RECV_RXDID BIT_ULL(3)
+#define IAVF_EXTENDED_CAP_SEND_PTP BIT_ULL(4)
+#define IAVF_EXTENDED_CAP_RECV_PTP BIT_ULL(5)
#define IAVF_EXTENDED_CAPS \
(IAVF_EXTENDED_CAP_SEND_VLAN_V2 | \
- IAVF_EXTENDED_CAP_RECV_VLAN_V2)
+ IAVF_EXTENDED_CAP_RECV_VLAN_V2 | \
+ IAVF_EXTENDED_CAP_SEND_RXDID | \
+ IAVF_EXTENDED_CAP_RECV_RXDID | \
+ IAVF_EXTENDED_CAP_SEND_PTP | \
+ IAVF_EXTENDED_CAP_RECV_PTP)
/* Lock to prevent possible clobbering of
* current_netdev_promisc_flags
@@ -417,12 +435,18 @@ struct iavf_adapter {
VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
#define QOS_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_QOS)
+#define IAVF_RXDID_ALLOWED(a) \
+ ((a)->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+#define IAVF_PTP_ALLOWED(a) \
+ ((a)->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP)
struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */
struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
struct virtchnl_version_info pf_version;
#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
((_a)->pf_version.minor == 1))
struct virtchnl_vlan_caps vlan_v2_caps;
+ u64 supp_rxdids;
+ struct iavf_ptp ptp;
u16 msg_enable;
struct iavf_eth_stats current_stats;
struct virtchnl_qos_cap_list *qos_caps;
@@ -430,7 +454,7 @@ struct iavf_adapter {
u32 aq_wait_count;
/* RSS stuff */
enum virtchnl_rss_algorithm hfunc;
- u64 hena;
+ u64 rss_hashcfg;
u16 rss_key_size;
u16 rss_lut_size;
u8 *rss_key;
@@ -555,6 +579,10 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter);
int iavf_get_vf_config(struct iavf_adapter *adapter);
int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter);
int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter);
+int iavf_send_vf_supported_rxdids_msg(struct iavf_adapter *adapter);
+int iavf_get_vf_supported_rxdids(struct iavf_adapter *adapter);
+int iavf_send_vf_ptp_caps_msg(struct iavf_adapter *adapter);
+int iavf_get_vf_ptp_caps(struct iavf_adapter *adapter);
void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter);
u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter);
void iavf_irq_enable(struct iavf_adapter *adapter, bool flush);
@@ -570,8 +598,8 @@ void iavf_set_promiscuous(struct iavf_adapter *adapter);
bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter);
void iavf_request_stats(struct iavf_adapter *adapter);
int iavf_request_reset(struct iavf_adapter *adapter);
-void iavf_get_hena(struct iavf_adapter *adapter);
-void iavf_set_hena(struct iavf_adapter *adapter);
+void iavf_get_rss_hashcfg(struct iavf_adapter *adapter);
+void iavf_set_rss_hashcfg(struct iavf_adapter *adapter);
void iavf_set_rss_key(struct iavf_adapter *adapter);
void iavf_set_rss_lut(struct iavf_adapter *adapter);
void iavf_set_rss_hfunc(struct iavf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
index 82fcd18ad660..6937b7dd44cb 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
@@ -18,7 +18,7 @@ static enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
iavf_mem_atq_ring,
(hw->aq.num_asq_entries *
- sizeof(struct iavf_aq_desc)),
+ sizeof(struct libie_aq_desc)),
IAVF_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
return ret_code;
@@ -45,7 +45,7 @@ static enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
iavf_mem_arq_ring,
(hw->aq.num_arq_entries *
- sizeof(struct iavf_aq_desc)),
+ sizeof(struct libie_aq_desc)),
IAVF_ADMINQ_DESC_ALIGNMENT);
return ret_code;
@@ -81,7 +81,7 @@ static void iavf_free_adminq_arq(struct iavf_hw *hw)
**/
static enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
{
- struct iavf_aq_desc *desc;
+ struct libie_aq_desc *desc;
struct iavf_dma_mem *bi;
enum iavf_status ret_code;
int i;
@@ -111,9 +111,9 @@ static enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
/* now configure the descriptors for use */
desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
- desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_BUF);
if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
- desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB);
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
desc->opcode = 0;
/* This is in accordance with Admin queue design, there is no
* register for buffer size configuration
@@ -122,12 +122,12 @@ static enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
desc->retval = 0;
desc->cookie_high = 0;
desc->cookie_low = 0;
- desc->params.external.addr_high =
+ desc->params.generic.addr_high =
cpu_to_le32(upper_32_bits(bi->pa));
- desc->params.external.addr_low =
+ desc->params.generic.addr_low =
cpu_to_le32(lower_32_bits(bi->pa));
- desc->params.external.param0 = 0;
- desc->params.external.param1 = 0;
+ desc->params.generic.param0 = 0;
+ desc->params.generic.param1 = 0;
}
alloc_arq_bufs:
@@ -558,8 +558,8 @@ static u16 iavf_clean_asq(struct iavf_hw *hw)
struct iavf_adminq_ring *asq = &hw->aq.asq;
struct iavf_asq_cmd_details *details;
u16 ntc = asq->next_to_clean;
- struct iavf_aq_desc desc_cb;
- struct iavf_aq_desc *desc;
+ struct libie_aq_desc desc_cb;
+ struct libie_aq_desc *desc;
desc = IAVF_ADMINQ_DESC(*asq, ntc);
details = IAVF_ADMINQ_DETAILS(*asq, ntc);
@@ -573,7 +573,7 @@ static u16 iavf_clean_asq(struct iavf_hw *hw)
desc_cb = *desc;
cb_func(hw, &desc_cb);
}
- memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
+ memset((void *)desc, 0, sizeof(struct libie_aq_desc));
memset((void *)details, 0,
sizeof(struct iavf_asq_cmd_details));
ntc++;
@@ -615,14 +615,14 @@ bool iavf_asq_done(struct iavf_hw *hw)
* queue. It runs the queue, cleans the queue, etc
**/
enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
- struct iavf_aq_desc *desc,
+ struct libie_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct iavf_asq_cmd_details *cmd_details)
{
struct iavf_dma_mem *dma_buff = NULL;
struct iavf_asq_cmd_details *details;
- struct iavf_aq_desc *desc_on_ring;
+ struct libie_aq_desc *desc_on_ring;
bool cmd_completed = false;
enum iavf_status status = 0;
u16 retval = 0;
@@ -637,7 +637,7 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
goto asq_send_command_error;
}
- hw->aq.asq_last_status = IAVF_AQ_RC_OK;
+ hw->aq.asq_last_status = LIBIE_AQ_RC_OK;
val = rd32(hw, IAVF_VF_ATQH1);
if (val >= hw->aq.num_asq_entries) {
@@ -717,9 +717,9 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
/* Update the address values in the desc with the pa value
* for respective buffer
*/
- desc_on_ring->params.external.addr_high =
+ desc_on_ring->params.generic.addr_high =
cpu_to_le32(upper_32_bits(dma_buff->pa));
- desc_on_ring->params.external.addr_low =
+ desc_on_ring->params.generic.addr_low =
cpu_to_le32(lower_32_bits(dma_buff->pa));
}
@@ -766,13 +766,13 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
retval &= 0xff;
}
cmd_completed = true;
- if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK)
+ if ((enum libie_aq_err)retval == LIBIE_AQ_RC_OK)
status = 0;
- else if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_EBUSY)
+ else if ((enum libie_aq_err)retval == LIBIE_AQ_RC_EBUSY)
status = IAVF_ERR_NOT_READY;
else
status = IAVF_ERR_ADMIN_QUEUE_ERROR;
- hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval;
+ hw->aq.asq_last_status = (enum libie_aq_err)retval;
}
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
@@ -809,12 +809,12 @@ asq_send_command_error:
*
* Fill the desc with default values
**/
-void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, u16 opcode)
+void iavf_fill_default_direct_cmd_desc(struct libie_aq_desc *desc, u16 opcode)
{
/* zero out the desc */
- memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
+ memset((void *)desc, 0, sizeof(struct libie_aq_desc));
desc->opcode = cpu_to_le16(opcode);
- desc->flags = cpu_to_le16(IAVF_AQ_FLAG_SI);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_SI);
}
/**
@@ -832,7 +832,7 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
u16 *pending)
{
u16 ntc = hw->aq.arq.next_to_clean;
- struct iavf_aq_desc *desc;
+ struct libie_aq_desc *desc;
enum iavf_status ret_code = 0;
struct iavf_dma_mem *bi;
u16 desc_idx;
@@ -866,9 +866,9 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
desc_idx = ntc;
hw->aq.arq_last_status =
- (enum iavf_admin_queue_err)le16_to_cpu(desc->retval);
+ (enum libie_aq_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
- if (flags & IAVF_AQ_FLAG_ERR) {
+ if (flags & LIBIE_AQ_FLAG_ERR) {
ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
iavf_debug(hw,
IAVF_DEBUG_AQ_MESSAGE,
@@ -892,14 +892,14 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
* size
*/
bi = &hw->aq.arq.r.arq_bi[ntc];
- memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
+ memset((void *)desc, 0, sizeof(struct libie_aq_desc));
- desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_BUF);
if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
- desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB);
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
desc->datalen = cpu_to_le16((u16)bi->size);
- desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
- desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
+ desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
/* set tail = the last cleaned desc index. */
wr32(hw, IAVF_VF_ARQT1, ntc);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.h b/drivers/net/ethernet/intel/iavf/iavf_adminq.h
index 406506f64bdd..bbf5c4b3a2ae 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.h
@@ -9,7 +9,7 @@
#include "iavf_adminq_cmd.h"
#define IAVF_ADMINQ_DESC(R, i) \
- (&(((struct iavf_aq_desc *)((R).desc_buf.va))[i]))
+ (&(((struct libie_aq_desc *)((R).desc_buf.va))[i]))
#define IAVF_ADMINQ_DESC_ALIGNMENT 4096
@@ -39,7 +39,7 @@ struct iavf_asq_cmd_details {
u16 flags_dis;
bool async;
bool postpone;
- struct iavf_aq_desc *wb_desc;
+ struct libie_aq_desc *wb_desc;
};
#define IAVF_ADMINQ_DETAILS(R, i) \
@@ -47,7 +47,7 @@ struct iavf_asq_cmd_details {
/* ARQ event information */
struct iavf_arq_event_info {
- struct iavf_aq_desc desc;
+ struct libie_aq_desc desc;
u16 msg_len;
u16 buf_len;
u8 *msg_buf;
@@ -72,8 +72,8 @@ struct iavf_adminq_info {
struct mutex arq_mutex; /* Receive queue lock */
/* last status values on send and receive queues */
- enum iavf_admin_queue_err asq_last_status;
- enum iavf_admin_queue_err arq_last_status;
+ enum libie_aq_err asq_last_status;
+ enum libie_aq_err arq_last_status;
};
/**
@@ -123,6 +123,6 @@ static inline int iavf_aq_rc_to_posix(int aq_ret, int aq_rc)
#define IAVF_AQ_LARGE_BUF 512
#define IAVF_ASQ_CMD_TIMEOUT 250000 /* usecs */
-void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, u16 opcode);
+void iavf_fill_default_direct_cmd_desc(struct libie_aq_desc *desc, u16 opcode);
#endif /* _IAVF_ADMINQ_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h b/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h
index bc512308557b..0482c9ce9b9c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h
@@ -4,6 +4,8 @@
#ifndef _IAVF_ADMINQ_CMD_H_
#define _IAVF_ADMINQ_CMD_H_
+#include <linux/net/intel/libie/adminq.h>
+
/* This header file defines the iavf Admin Queue commands and is shared between
* iavf Firmware and Software.
*
@@ -21,87 +23,6 @@
/* API version 1.7 implements additional link and PHY-specific APIs */
#define IAVF_MINOR_VER_GET_LINK_INFO_XL710 0x0007
-struct iavf_aq_desc {
- __le16 flags;
- __le16 opcode;
- __le16 datalen;
- __le16 retval;
- __le32 cookie_high;
- __le32 cookie_low;
- union {
- struct {
- __le32 param0;
- __le32 param1;
- __le32 param2;
- __le32 param3;
- } internal;
- struct {
- __le32 param0;
- __le32 param1;
- __le32 addr_high;
- __le32 addr_low;
- } external;
- u8 raw[16];
- } params;
-};
-
-/* Flags sub-structure
- * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
- * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
- */
-
-/* command flags and offsets*/
-#define IAVF_AQ_FLAG_DD_SHIFT 0
-#define IAVF_AQ_FLAG_CMP_SHIFT 1
-#define IAVF_AQ_FLAG_ERR_SHIFT 2
-#define IAVF_AQ_FLAG_VFE_SHIFT 3
-#define IAVF_AQ_FLAG_LB_SHIFT 9
-#define IAVF_AQ_FLAG_RD_SHIFT 10
-#define IAVF_AQ_FLAG_VFC_SHIFT 11
-#define IAVF_AQ_FLAG_BUF_SHIFT 12
-#define IAVF_AQ_FLAG_SI_SHIFT 13
-#define IAVF_AQ_FLAG_EI_SHIFT 14
-#define IAVF_AQ_FLAG_FE_SHIFT 15
-
-#define IAVF_AQ_FLAG_DD BIT(IAVF_AQ_FLAG_DD_SHIFT) /* 0x1 */
-#define IAVF_AQ_FLAG_CMP BIT(IAVF_AQ_FLAG_CMP_SHIFT) /* 0x2 */
-#define IAVF_AQ_FLAG_ERR BIT(IAVF_AQ_FLAG_ERR_SHIFT) /* 0x4 */
-#define IAVF_AQ_FLAG_VFE BIT(IAVF_AQ_FLAG_VFE_SHIFT) /* 0x8 */
-#define IAVF_AQ_FLAG_LB BIT(IAVF_AQ_FLAG_LB_SHIFT) /* 0x200 */
-#define IAVF_AQ_FLAG_RD BIT(IAVF_AQ_FLAG_RD_SHIFT) /* 0x400 */
-#define IAVF_AQ_FLAG_VFC BIT(IAVF_AQ_FLAG_VFC_SHIFT) /* 0x800 */
-#define IAVF_AQ_FLAG_BUF BIT(IAVF_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define IAVF_AQ_FLAG_SI BIT(IAVF_AQ_FLAG_SI_SHIFT) /* 0x2000 */
-#define IAVF_AQ_FLAG_EI BIT(IAVF_AQ_FLAG_EI_SHIFT) /* 0x4000 */
-#define IAVF_AQ_FLAG_FE BIT(IAVF_AQ_FLAG_FE_SHIFT) /* 0x8000 */
-
-/* error codes */
-enum iavf_admin_queue_err {
- IAVF_AQ_RC_OK = 0, /* success */
- IAVF_AQ_RC_EPERM = 1, /* Operation not permitted */
- IAVF_AQ_RC_ENOENT = 2, /* No such element */
- IAVF_AQ_RC_ESRCH = 3, /* Bad opcode */
- IAVF_AQ_RC_EINTR = 4, /* operation interrupted */
- IAVF_AQ_RC_EIO = 5, /* I/O error */
- IAVF_AQ_RC_ENXIO = 6, /* No such resource */
- IAVF_AQ_RC_E2BIG = 7, /* Arg too long */
- IAVF_AQ_RC_EAGAIN = 8, /* Try again */
- IAVF_AQ_RC_ENOMEM = 9, /* Out of memory */
- IAVF_AQ_RC_EACCES = 10, /* Permission denied */
- IAVF_AQ_RC_EFAULT = 11, /* Bad address */
- IAVF_AQ_RC_EBUSY = 12, /* Device or resource busy */
- IAVF_AQ_RC_EEXIST = 13, /* object already exists */
- IAVF_AQ_RC_EINVAL = 14, /* Invalid argument */
- IAVF_AQ_RC_ENOTTY = 15, /* Not a typewriter */
- IAVF_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
- IAVF_AQ_RC_ENOSYS = 17, /* Function not implemented */
- IAVF_AQ_RC_ERANGE = 18, /* Parameter out of range */
- IAVF_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
- IAVF_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
- IAVF_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
- IAVF_AQ_RC_EFBIG = 22, /* File too large */
-};
-
/* Admin Queue command opcodes */
enum iavf_admin_queue_opc {
/* aq commands */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c
index a9e1da35e248..4d12dfe1b481 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c
@@ -91,6 +91,55 @@ iavf_fill_adv_rss_sctp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds)
}
/**
+ * iavf_fill_adv_rss_gtp_hdr - Fill GTP-related RSS protocol headers
+ * @proto_hdrs: pointer to the virtchnl protocol headers structure to populate
+ * @packet_hdrs: bitmask of packet header types to configure
+ * @hash_flds: RSS hash field configuration
+ *
+ * This function populates the virtchnl protocol header structure with
+ * appropriate GTP-related header types based on the specified packet_hdrs.
+ * It supports GTPC, GTPU with extension headers, and uplink/downlink PDU
+ * types. For certain GTPU types, it also appends an IPv4 header to enable
+ * hashing on the destination IP address.
+ *
+ * Return: 0 on success or -EOPNOTSUPP if the packet_hdrs value is unsupported.
+ */
+static int
+iavf_fill_adv_rss_gtp_hdr(struct virtchnl_proto_hdrs *proto_hdrs,
+ u32 packet_hdrs, u64 hash_flds)
+{
+ struct virtchnl_proto_hdr *hdr;
+
+ hdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1];
+
+ switch (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_GTP) {
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID:
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC:
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPC);
+ break;
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH:
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
+ break;
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP:
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_UP);
+ hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ iavf_fill_adv_rss_ip4_hdr(hdr, IAVF_ADV_RSS_HASH_FLD_IPV4_DA);
+ break;
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN:
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_DWN);
+ fallthrough;
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP:
+ hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ iavf_fill_adv_rss_ip4_hdr(hdr, IAVF_ADV_RSS_HASH_FLD_IPV4_DA);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
* iavf_fill_adv_rss_cfg_msg - fill the RSS configuration into virtchnl message
* @rss_cfg: the virtchnl message to be filled with RSS configuration setting
* @packet_hdrs: the RSS configuration protocol header types
@@ -103,6 +152,8 @@ int
iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg,
u32 packet_hdrs, u64 hash_flds, bool symm)
{
+ const u32 packet_l3_hdrs = packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L3;
+ const u32 packet_l4_hdrs = packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L4;
struct virtchnl_proto_hdrs *proto_hdrs = &rss_cfg->proto_hdrs;
struct virtchnl_proto_hdr *hdr;
@@ -113,31 +164,41 @@ iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg,
proto_hdrs->tunnel_level = 0; /* always outer layer */
- hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
- switch (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L3) {
- case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4:
- iavf_fill_adv_rss_ip4_hdr(hdr, hash_flds);
- break;
- case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6:
- iavf_fill_adv_rss_ip6_hdr(hdr, hash_flds);
- break;
- default:
- return -EINVAL;
+ if (packet_l3_hdrs) {
+ hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ switch (packet_l3_hdrs) {
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4:
+ iavf_fill_adv_rss_ip4_hdr(hdr, hash_flds);
+ break;
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6:
+ iavf_fill_adv_rss_ip6_hdr(hdr, hash_flds);
+ break;
+ default:
+ return -EINVAL;
+ }
}
- hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
- switch (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L4) {
- case IAVF_ADV_RSS_FLOW_SEG_HDR_TCP:
- iavf_fill_adv_rss_tcp_hdr(hdr, hash_flds);
- break;
- case IAVF_ADV_RSS_FLOW_SEG_HDR_UDP:
- iavf_fill_adv_rss_udp_hdr(hdr, hash_flds);
- break;
- case IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP:
- iavf_fill_adv_rss_sctp_hdr(hdr, hash_flds);
- break;
- default:
- return -EINVAL;
+ if (packet_l4_hdrs) {
+ hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ switch (packet_l4_hdrs) {
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_TCP:
+ iavf_fill_adv_rss_tcp_hdr(hdr, hash_flds);
+ break;
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_UDP:
+ iavf_fill_adv_rss_udp_hdr(hdr, hash_flds);
+ break;
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP:
+ iavf_fill_adv_rss_sctp_hdr(hdr, hash_flds);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_GTP) {
+ hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ if (iavf_fill_adv_rss_gtp_hdr(proto_hdrs, packet_hdrs, hash_flds))
+ return -EINVAL;
}
return 0;
@@ -186,6 +247,8 @@ iavf_print_adv_rss_cfg(struct iavf_adapter *adapter, struct iavf_adv_rss *rss,
proto = "UDP";
else if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP)
proto = "SCTP";
+ else if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_GTP)
+ proto = "GTP";
else
return;
@@ -211,6 +274,16 @@ iavf_print_adv_rss_cfg(struct iavf_adapter *adapter, struct iavf_adv_rss *rss,
IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT |
IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT))
strcat(hash_opt, "dst port,");
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_GTPC_TEID)
+ strcat(hash_opt, "gtp-c,");
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_GTPU_IP_TEID)
+ strcat(hash_opt, "gtp-u ip,");
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_GTPU_EH_TEID)
+ strcat(hash_opt, "gtp-u ext,");
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_GTPU_UP_TEID)
+ strcat(hash_opt, "gtp-u ul,");
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_GTPU_DWN_TEID)
+ strcat(hash_opt, "gtp-u dl,");
if (!action)
action = "";
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h
index e31eb2afebea..74cc9e0d528c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h
@@ -22,6 +22,12 @@ enum iavf_adv_rss_flow_seg_hdr {
IAVF_ADV_RSS_FLOW_SEG_HDR_TCP = 0x00000004,
IAVF_ADV_RSS_FLOW_SEG_HDR_UDP = 0x00000008,
IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP = 0x00000010,
+ IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC = 0x00000400,
+ IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID = 0x00000800,
+ IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP = 0x00001000,
+ IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH = 0x00002000,
+ IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN = 0x00004000,
+ IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP = 0x00008000,
};
#define IAVF_ADV_RSS_FLOW_SEG_HDR_L3 \
@@ -33,6 +39,14 @@ enum iavf_adv_rss_flow_seg_hdr {
IAVF_ADV_RSS_FLOW_SEG_HDR_UDP | \
IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP)
+#define IAVF_ADV_RSS_FLOW_SEG_HDR_GTP \
+ (IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC | \
+ IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID | \
+ IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP | \
+ IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH | \
+ IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN | \
+ IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP)
+
enum iavf_adv_rss_flow_field {
/* L3 */
IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV4_SA,
@@ -46,6 +60,17 @@ enum iavf_adv_rss_flow_field {
IAVF_ADV_RSS_FLOW_FIELD_IDX_UDP_DST_PORT,
IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_SRC_PORT,
IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_DST_PORT,
+ /* GTPC_TEID */
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPC_TEID,
+ /* GTPU_IP */
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_IP_TEID,
+ /* GTPU_EH */
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_EH_TEID,
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_EH_QFI,
+ /* GTPU_UP */
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_UP_TEID,
+ /* GTPU_DWN */
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_DWN_TEID,
/* The total number of enums must not exceed 64 */
IAVF_ADV_RSS_FLOW_FIELD_IDX_MAX
@@ -72,6 +97,12 @@ enum iavf_adv_rss_flow_field {
BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_SRC_PORT)
#define IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT \
BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_DST_PORT)
+#define IAVF_ADV_RSS_HASH_FLD_GTPC_TEID BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPC_TEID)
+#define IAVF_ADV_RSS_HASH_FLD_GTPU_IP_TEID BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_IP_TEID)
+#define IAVF_ADV_RSS_HASH_FLD_GTPU_EH_TEID BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_EH_TEID)
+#define IAVF_ADV_RSS_HASH_FLD_GTPU_UP_TEID BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_UP_TEID)
+#define IAVF_ADV_RSS_HASH_FLD_GTPU_DWN_TEID \
+ BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_DWN_TEID)
/* bookkeeping of advanced RSS configuration */
struct iavf_adv_rss {
diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c
index aa751ce3425b..614a886bca99 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_common.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_common.c
@@ -8,66 +8,6 @@
#include "iavf_prototype.h"
/**
- * iavf_aq_str - convert AQ err code to a string
- * @hw: pointer to the HW structure
- * @aq_err: the AQ error code to convert
- **/
-const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err)
-{
- switch (aq_err) {
- case IAVF_AQ_RC_OK:
- return "OK";
- case IAVF_AQ_RC_EPERM:
- return "IAVF_AQ_RC_EPERM";
- case IAVF_AQ_RC_ENOENT:
- return "IAVF_AQ_RC_ENOENT";
- case IAVF_AQ_RC_ESRCH:
- return "IAVF_AQ_RC_ESRCH";
- case IAVF_AQ_RC_EINTR:
- return "IAVF_AQ_RC_EINTR";
- case IAVF_AQ_RC_EIO:
- return "IAVF_AQ_RC_EIO";
- case IAVF_AQ_RC_ENXIO:
- return "IAVF_AQ_RC_ENXIO";
- case IAVF_AQ_RC_E2BIG:
- return "IAVF_AQ_RC_E2BIG";
- case IAVF_AQ_RC_EAGAIN:
- return "IAVF_AQ_RC_EAGAIN";
- case IAVF_AQ_RC_ENOMEM:
- return "IAVF_AQ_RC_ENOMEM";
- case IAVF_AQ_RC_EACCES:
- return "IAVF_AQ_RC_EACCES";
- case IAVF_AQ_RC_EFAULT:
- return "IAVF_AQ_RC_EFAULT";
- case IAVF_AQ_RC_EBUSY:
- return "IAVF_AQ_RC_EBUSY";
- case IAVF_AQ_RC_EEXIST:
- return "IAVF_AQ_RC_EEXIST";
- case IAVF_AQ_RC_EINVAL:
- return "IAVF_AQ_RC_EINVAL";
- case IAVF_AQ_RC_ENOTTY:
- return "IAVF_AQ_RC_ENOTTY";
- case IAVF_AQ_RC_ENOSPC:
- return "IAVF_AQ_RC_ENOSPC";
- case IAVF_AQ_RC_ENOSYS:
- return "IAVF_AQ_RC_ENOSYS";
- case IAVF_AQ_RC_ERANGE:
- return "IAVF_AQ_RC_ERANGE";
- case IAVF_AQ_RC_EFLUSHED:
- return "IAVF_AQ_RC_EFLUSHED";
- case IAVF_AQ_RC_BAD_ADDR:
- return "IAVF_AQ_RC_BAD_ADDR";
- case IAVF_AQ_RC_EMODE:
- return "IAVF_AQ_RC_EMODE";
- case IAVF_AQ_RC_EFBIG:
- return "IAVF_AQ_RC_EFBIG";
- }
-
- snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
- return hw->err_str;
-}
-
-/**
* iavf_stat_str - convert status err code to a string
* @hw: pointer to the HW structure
* @stat_err: the status error code to convert
@@ -228,7 +168,7 @@ const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err)
void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc,
void *buffer, u16 buf_len)
{
- struct iavf_aq_desc *aq_desc = (struct iavf_aq_desc *)desc;
+ struct libie_aq_desc *aq_desc = (struct libie_aq_desc *)desc;
u8 *buf = (u8 *)buffer;
if ((!(mask & hw->debug_mask)) || !desc)
@@ -244,11 +184,11 @@ void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc,
le32_to_cpu(aq_desc->cookie_high),
le32_to_cpu(aq_desc->cookie_low));
iavf_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
- le32_to_cpu(aq_desc->params.internal.param0),
- le32_to_cpu(aq_desc->params.internal.param1));
+ le32_to_cpu(aq_desc->params.generic.param0),
+ le32_to_cpu(aq_desc->params.generic.param1));
iavf_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
- le32_to_cpu(aq_desc->params.external.addr_high),
- le32_to_cpu(aq_desc->params.external.addr_low));
+ le32_to_cpu(aq_desc->params.generic.addr_high),
+ le32_to_cpu(aq_desc->params.generic.addr_low));
if (buffer && aq_desc->datalen) {
u16 len = le16_to_cpu(aq_desc->datalen);
@@ -297,11 +237,11 @@ bool iavf_check_asq_alive(struct iavf_hw *hw)
**/
enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading)
{
- struct iavf_aq_desc desc;
- struct iavf_aqc_queue_shutdown *cmd =
- (struct iavf_aqc_queue_shutdown *)&desc.params.raw;
+ struct iavf_aqc_queue_shutdown *cmd;
+ struct libie_aq_desc desc;
enum iavf_status status;
+ cmd = libie_aq_raw(&desc);
iavf_fill_default_direct_cmd_desc(&desc, iavf_aqc_opc_queue_shutdown);
if (unloading)
@@ -327,12 +267,13 @@ static enum iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw,
u8 *lut, u16 lut_size,
bool set)
{
+ struct iavf_aqc_get_set_rss_lut *cmd_resp;
+ struct libie_aq_desc desc;
enum iavf_status status;
- struct iavf_aq_desc desc;
- struct iavf_aqc_get_set_rss_lut *cmd_resp =
- (struct iavf_aqc_get_set_rss_lut *)&desc.params.raw;
u16 flags;
+ cmd_resp = libie_aq_raw(&desc);
+
if (set)
iavf_fill_default_direct_cmd_desc(&desc,
iavf_aqc_opc_set_rss_lut);
@@ -341,8 +282,8 @@ static enum iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw,
iavf_aqc_opc_get_rss_lut);
/* Indirect command */
- desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_BUF);
- desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD);
vsi_id = FIELD_PREP(IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK, vsi_id) |
FIELD_PREP(IAVF_AQC_SET_RSS_LUT_VSI_VALID, 1);
@@ -392,11 +333,12 @@ iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
struct iavf_aqc_get_set_rss_key_data *key,
bool set)
{
- enum iavf_status status;
- struct iavf_aq_desc desc;
- struct iavf_aqc_get_set_rss_key *cmd_resp =
- (struct iavf_aqc_get_set_rss_key *)&desc.params.raw;
u16 key_size = sizeof(struct iavf_aqc_get_set_rss_key_data);
+ struct iavf_aqc_get_set_rss_key *cmd_resp;
+ struct libie_aq_desc desc;
+ enum iavf_status status;
+
+ cmd_resp = libie_aq_raw(&desc);
if (set)
iavf_fill_default_direct_cmd_desc(&desc,
@@ -406,8 +348,8 @@ iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
iavf_aqc_opc_get_rss_key);
/* Indirect command */
- desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_BUF);
- desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD);
vsi_id = FIELD_PREP(IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK, vsi_id) |
FIELD_PREP(IAVF_AQC_SET_RSS_KEY_VSI_VALID, 1);
@@ -452,18 +394,18 @@ enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
struct iavf_asq_cmd_details *cmd_details)
{
struct iavf_asq_cmd_details details;
- struct iavf_aq_desc desc;
+ struct libie_aq_desc desc;
enum iavf_status status;
iavf_fill_default_direct_cmd_desc(&desc, iavf_aqc_opc_send_msg_to_pf);
- desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_SI);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_SI);
desc.cookie_high = cpu_to_le32(v_opcode);
desc.cookie_low = cpu_to_le32(v_retval);
if (msglen) {
- desc.flags |= cpu_to_le16((u16)(IAVF_AQ_FLAG_BUF
- | IAVF_AQ_FLAG_RD));
+ desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF
+ | LIBIE_AQ_FLAG_RD));
if (msglen > IAVF_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(msglen);
}
if (!cmd_details) {
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 74a1e9fe1821..2cc21289a707 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -4,6 +4,8 @@
#include <linux/bitfield.h>
#include <linux/uaccess.h>
+#include <net/netdev_lock.h>
+
/* ethtool support for iavf */
#include "iavf.h"
@@ -1256,9 +1258,10 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
{
struct ethtool_rx_flow_spec *fsp = &cmd->fs;
struct iavf_fdir_fltr *fltr;
- int count = 50;
int err;
+ netdev_assert_locked(adapter->netdev);
+
if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
@@ -1277,14 +1280,6 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
if (!fltr)
return -ENOMEM;
- while (!mutex_trylock(&adapter->crit_lock)) {
- if (--count == 0) {
- kfree(fltr);
- return -EINVAL;
- }
- udelay(1);
- }
-
err = iavf_add_fdir_fltr_info(adapter, fsp, fltr);
if (!err)
err = iavf_fdir_add_fltr(adapter, fltr);
@@ -1292,7 +1287,6 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
if (err)
kfree(fltr);
- mutex_unlock(&adapter->crit_lock);
return err;
}
@@ -1313,14 +1307,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
return iavf_fdir_del_fltr(adapter, false, fsp->location);
}
-/**
- * iavf_adv_rss_parse_hdrs - parses headers from RSS hash input
- * @cmd: ethtool rxnfc command
- *
- * This function parses the rxnfc command and returns intended
- * header types for RSS configuration
- */
-static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd)
+static u32 iavf_adv_rss_parse_hdrs(const struct ethtool_rxfh_fields *cmd)
{
u32 hdrs = IAVF_ADV_RSS_FLOW_SEG_HDR_NONE;
@@ -1349,6 +1336,56 @@ static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd)
hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP |
IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
break;
+ case GTPU_V4_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPC_V4_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_UDP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPC_TEID_V4_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_UDP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPU_EH_V4_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPU_UL_V4_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPU_DL_V4_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPU_V6_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPC_V6_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPC_TEID_V6_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPU_EH_V6_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPU_UL_V6_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPU_DL_V6_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
+ break;
default:
break;
}
@@ -1356,15 +1393,8 @@ static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd)
return hdrs;
}
-/**
- * iavf_adv_rss_parse_hash_flds - parses hash fields from RSS hash input
- * @cmd: ethtool rxnfc command
- * @symm: true if Symmetric Topelitz is set
- *
- * This function parses the rxnfc command and returns intended hash fields for
- * RSS configuration
- */
-static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd, bool symm)
+static u64
+iavf_adv_rss_parse_hash_flds(const struct ethtool_rxfh_fields *cmd, bool symm)
{
u64 hfld = IAVF_ADV_RSS_HASH_INVALID;
@@ -1373,6 +1403,12 @@ static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd, bool symm)
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
+ case GTPU_V4_FLOW:
+ case GTPC_V4_FLOW:
+ case GTPC_TEID_V4_FLOW:
+ case GTPU_EH_V4_FLOW:
+ case GTPU_UL_V4_FLOW:
+ case GTPU_DL_V4_FLOW:
if (cmd->data & RXH_IP_SRC)
hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_SA;
if (cmd->data & RXH_IP_DST)
@@ -1381,6 +1417,12 @@ static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd, bool symm)
case TCP_V6_FLOW:
case UDP_V6_FLOW:
case SCTP_V6_FLOW:
+ case GTPU_V6_FLOW:
+ case GTPC_V6_FLOW:
+ case GTPC_TEID_V6_FLOW:
+ case GTPU_EH_V6_FLOW:
+ case GTPU_UL_V6_FLOW:
+ case GTPU_DL_V6_FLOW:
if (cmd->data & RXH_IP_SRC)
hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_SA;
if (cmd->data & RXH_IP_DST)
@@ -1402,6 +1444,7 @@ static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd, bool symm)
break;
case UDP_V4_FLOW:
case UDP_V6_FLOW:
+ case GTPC_V4_FLOW:
if (cmd->data & RXH_L4_B_0_1)
hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT;
if (cmd->data & RXH_L4_B_2_3)
@@ -1418,28 +1461,51 @@ static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd, bool symm)
break;
}
}
+ if (cmd->data & RXH_GTP_TEID) {
+ switch (cmd->flow_type) {
+ case GTPC_TEID_V4_FLOW:
+ case GTPC_TEID_V6_FLOW:
+ hfld |= IAVF_ADV_RSS_HASH_FLD_GTPC_TEID;
+ break;
+ case GTPU_V4_FLOW:
+ case GTPU_V6_FLOW:
+ hfld |= IAVF_ADV_RSS_HASH_FLD_GTPU_IP_TEID;
+ break;
+ case GTPU_EH_V4_FLOW:
+ case GTPU_EH_V6_FLOW:
+ hfld |= IAVF_ADV_RSS_HASH_FLD_GTPU_EH_TEID;
+ break;
+ case GTPU_UL_V4_FLOW:
+ case GTPU_UL_V6_FLOW:
+ hfld |= IAVF_ADV_RSS_HASH_FLD_GTPU_UP_TEID;
+ break;
+ case GTPU_DL_V4_FLOW:
+ case GTPU_DL_V6_FLOW:
+ hfld |= IAVF_ADV_RSS_HASH_FLD_GTPU_DWN_TEID;
+ break;
+ default:
+ break;
+ }
+ }
return hfld;
}
-/**
- * iavf_set_adv_rss_hash_opt - Enable/Disable flow types for RSS hash
- * @adapter: pointer to the VF adapter structure
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the flow input set is supported.
- */
static int
-iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+iavf_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
struct iavf_adv_rss *rss_old, *rss_new;
bool rss_new_add = false;
- int count = 50, err = 0;
bool symm = false;
u64 hash_flds;
+ int err = 0;
u32 hdrs;
+ netdev_assert_locked(adapter->netdev);
+
if (!ADV_RSS_SUPPORT(adapter))
return -EOPNOTSUPP;
@@ -1463,15 +1529,6 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
return -EINVAL;
}
- while (!mutex_trylock(&adapter->crit_lock)) {
- if (--count == 0) {
- kfree(rss_new);
- return -EINVAL;
- }
-
- udelay(1);
- }
-
spin_lock_bh(&adapter->adv_rss_lock);
rss_old = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs);
if (rss_old) {
@@ -1500,25 +1557,16 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
if (!err)
iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
- mutex_unlock(&adapter->crit_lock);
-
if (!rss_new_add)
kfree(rss_new);
return err;
}
-/**
- * iavf_get_adv_rss_hash_opt - Retrieve hash fields for a given flow-type
- * @adapter: pointer to the VF adapter structure
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the flow input set is supported.
- */
static int
-iavf_get_adv_rss_hash_opt(struct iavf_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+iavf_get_rxfh_fields(struct net_device *netdev, struct ethtool_rxfh_fields *cmd)
{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
struct iavf_adv_rss *rss;
u64 hash_flds;
u32 hdrs;
@@ -1583,9 +1631,6 @@ static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
case ETHTOOL_SRXCLSRLDEL:
ret = iavf_del_fdir_ethtool(adapter, cmd);
break;
- case ETHTOOL_SRXFH:
- ret = iavf_set_adv_rss_hash_opt(adapter, cmd);
- break;
default:
break;
}
@@ -1594,6 +1639,19 @@ static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
}
/**
+ * iavf_get_rx_ring_count - get RX ring count
+ * @netdev: network interface device structure
+ *
+ * Return: number of RX rings.
+ **/
+static u32 iavf_get_rx_ring_count(struct net_device *netdev)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->num_active_queues;
+}
+
+/**
* iavf_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -1608,10 +1666,6 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = adapter->num_active_queues;
- ret = 0;
- break;
case ETHTOOL_GRXCLSRLCNT:
if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
break;
@@ -1627,9 +1681,6 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRLALL:
ret = iavf_get_fdir_fltr_ids(adapter, cmd, (u32 *)rule_locs);
break;
- case ETHTOOL_GRXFH:
- ret = iavf_get_adv_rss_hash_opt(adapter, cmd);
- break;
default:
break;
}
@@ -1808,7 +1859,7 @@ static int iavf_set_rxfh(struct net_device *netdev,
static const struct ethtool_ops iavf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE,
- .cap_rss_sym_xor_supported = true,
+ .supported_input_xfrm = RXH_XFRM_SYM_XOR,
.get_drvinfo = iavf_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = iavf_get_ringparam,
@@ -1824,9 +1875,12 @@ static const struct ethtool_ops iavf_ethtool_ops = {
.set_per_queue_coalesce = iavf_set_per_queue_coalesce,
.set_rxnfc = iavf_set_rxnfc,
.get_rxnfc = iavf_get_rxnfc,
+ .get_rx_ring_count = iavf_get_rx_ring_count,
.get_rxfh_indir_size = iavf_get_rxfh_indir_size,
.get_rxfh = iavf_get_rxfh,
.set_rxfh = iavf_set_rxfh,
+ .get_rxfh_fields = iavf_get_rxfh_fields,
+ .set_rxfh_fields = iavf_set_rxfh_fields,
.get_channels = iavf_get_channels,
.set_channels = iavf_set_channels,
.get_rxfh_key_size = iavf_get_rxfh_key_size,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 12ef160425aa..c2fbe443ef85 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -2,8 +2,10 @@
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/net/intel/libie/rx.h>
+#include <net/netdev_lock.h>
#include "iavf.h"
+#include "iavf_ptp.h"
#include "iavf_prototype.h"
/* All iavf tracepoints are defined by the include below, which must
* be included exactly once across the whole kernel with
@@ -46,8 +48,9 @@ MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
MODULE_ALIAS("i40evf");
MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
-MODULE_IMPORT_NS(LIBETH);
-MODULE_IMPORT_NS(LIBIE);
+MODULE_IMPORT_NS("LIBETH");
+MODULE_IMPORT_NS("LIBIE");
+MODULE_IMPORT_NS("LIBIE_ADMINQ");
MODULE_LICENSE("GPL v2");
static const struct net_device_ops iavf_netdev_ops;
@@ -526,33 +529,6 @@ static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
}
/**
- * iavf_irq_affinity_notify - Callback for affinity changes
- * @notify: context as to what irq was changed
- * @mask: the new affinity mask
- *
- * This is a callback function used by the irq_set_affinity_notifier function
- * so that we may register to receive changes to the irq affinity masks.
- **/
-static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct iavf_q_vector *q_vector =
- container_of(notify, struct iavf_q_vector, affinity_notify);
-
- cpumask_copy(&q_vector->affinity_mask, mask);
-}
-
-/**
- * iavf_irq_affinity_release - Callback for affinity notifier release
- * @ref: internal core kernel usage
- *
- * This is a callback function used by the irq_set_affinity_notifier function
- * to inform the current notification subscriber that they will no longer
- * receive notifications.
- **/
-static void iavf_irq_affinity_release(struct kref *ref) {}
-
-/**
* iavf_request_traffic_irqs - Initialize MSI-X interrupts
* @adapter: board private structure
* @basename: device basename
@@ -566,7 +542,6 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
unsigned int vector, q_vectors;
unsigned int rx_int_idx = 0, tx_int_idx = 0;
int irq_num, err;
- int cpu;
iavf_irq_disable(adapter);
/* Decrement for Other and TCP Timer vectors */
@@ -601,17 +576,6 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
"Request_irq failed, error: %d\n", err);
goto free_queue_irqs;
}
- /* register for affinity change notifications */
- q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
- q_vector->affinity_notify.release =
- iavf_irq_affinity_release;
- irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
- /* Spread the IRQ affinity hints across online CPUs. Note that
- * get_cpu_mask returns a mask with a permanent lifetime so
- * it's safe to use as a hint for irq_update_affinity_hint.
- */
- cpu = cpumask_local_spread(q_vector->v_idx, -1);
- irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
}
return 0;
@@ -620,8 +584,6 @@ free_queue_irqs:
while (vector) {
vector--;
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
- irq_set_affinity_notifier(irq_num, NULL);
- irq_update_affinity_hint(irq_num, NULL);
free_irq(irq_num, &adapter->q_vectors[vector]);
}
return err;
@@ -663,6 +625,7 @@ static int iavf_request_misc_irq(struct iavf_adapter *adapter)
**/
static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
{
+ struct iavf_q_vector *q_vector;
int vector, irq_num, q_vectors;
if (!adapter->msix_entries)
@@ -671,10 +634,10 @@ static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
q_vectors = adapter->num_msix_vectors - NONQ_VECS;
for (vector = 0; vector < q_vectors; vector++) {
+ q_vector = &adapter->q_vectors[vector];
+ netif_napi_set_irq_locked(&q_vector->napi, -1);
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
- irq_set_affinity_notifier(irq_num, NULL);
- irq_update_affinity_hint(irq_num, NULL);
- free_irq(irq_num, &adapter->q_vectors[vector]);
+ free_irq(irq_num, q_vector);
}
}
@@ -710,6 +673,47 @@ static void iavf_configure_tx(struct iavf_adapter *adapter)
}
/**
+ * iavf_select_rx_desc_format - Select Rx descriptor format
+ * @adapter: adapter private structure
+ *
+ * Select what Rx descriptor format based on availability and enabled
+ * features.
+ *
+ * Return: the desired RXDID to select for a given Rx queue, as defined by
+ * enum virtchnl_rxdid_format.
+ */
+static u8 iavf_select_rx_desc_format(const struct iavf_adapter *adapter)
+{
+ u64 rxdids = adapter->supp_rxdids;
+
+ /* If we did not negotiate VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC, we must
+ * stick with the default value of the legacy 32 byte format.
+ */
+ if (!IAVF_RXDID_ALLOWED(adapter))
+ return VIRTCHNL_RXDID_1_32B_BASE;
+
+ /* Rx timestamping requires the use of flexible NIC descriptors */
+ if (iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP)) {
+ if (rxdids & BIT(VIRTCHNL_RXDID_2_FLEX_SQ_NIC))
+ return VIRTCHNL_RXDID_2_FLEX_SQ_NIC;
+
+ pci_warn(adapter->pdev,
+ "Unable to negotiate flexible descriptor format\n");
+ }
+
+ /* Warn if the PF does not list support for the default legacy
+ * descriptor format. This shouldn't happen, as this is the format
+ * used if VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC is not supported. It is
+ * likely caused by a bug in the PF implementation failing to indicate
+ * support for the format.
+ */
+ if (!(rxdids & VIRTCHNL_RXDID_1_32B_BASE_M))
+ netdev_warn(adapter->netdev, "PF does not list support for default Rx descriptor format\n");
+
+ return VIRTCHNL_RXDID_1_32B_BASE;
+}
+
+/**
* iavf_configure_rx - Configure Receive Unit after Reset
* @adapter: board private structure
*
@@ -719,8 +723,12 @@ static void iavf_configure_rx(struct iavf_adapter *adapter)
{
struct iavf_hw *hw = &adapter->hw;
- for (u32 i = 0; i < adapter->num_active_queues; i++)
+ adapter->rxdid = iavf_select_rx_desc_format(adapter);
+
+ for (u32 i = 0; i < adapter->num_active_queues; i++) {
adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
+ adapter->rx_rings[i].rxdid = adapter->rxdid;
+ }
}
/**
@@ -773,6 +781,11 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
f->state = IAVF_VLAN_ADD;
adapter->num_vlan_filters++;
iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
+ } else if (f->state == IAVF_VLAN_REMOVE) {
+ /* IAVF_VLAN_REMOVE means that VLAN wasn't yet removed.
+ * We can safely only change the state here.
+ */
+ f->state = IAVF_VLAN_ACTIVE;
}
clearout:
@@ -793,8 +806,18 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
f = iavf_find_vlan(adapter, vlan);
if (f) {
- f->state = IAVF_VLAN_REMOVE;
- iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_VLAN_FILTER);
+ /* IAVF_ADD_VLAN means that VLAN wasn't even added yet.
+ * Remove it from the list.
+ */
+ if (f->state == IAVF_VLAN_ADD) {
+ list_del(&f->list);
+ kfree(f);
+ adapter->num_vlan_filters--;
+ } else {
+ f->state = IAVF_VLAN_REMOVE;
+ iavf_schedule_aq_request(adapter,
+ IAVF_FLAG_AQ_DEL_VLAN_FILTER);
+ }
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
@@ -1180,7 +1203,7 @@ static void iavf_napi_enable_all(struct iavf_adapter *adapter)
q_vector = &adapter->q_vectors[q_idx];
napi = &q_vector->napi;
- napi_enable(napi);
+ napi_enable_locked(napi);
}
}
@@ -1196,7 +1219,7 @@ static void iavf_napi_disable_all(struct iavf_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = &adapter->q_vectors[q_idx];
- napi_disable(&q_vector->napi);
+ napi_disable_locked(&q_vector->napi);
}
}
@@ -1225,11 +1248,11 @@ static void iavf_configure(struct iavf_adapter *adapter)
/**
* iavf_up_complete - Finish the last steps of bringing up a connection
* @adapter: board private structure
- *
- * Expects to be called while holding crit_lock.
- **/
+ */
static void iavf_up_complete(struct iavf_adapter *adapter)
{
+ netdev_assert_locked(adapter->netdev);
+
iavf_change_state(adapter, __IAVF_RUNNING);
clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
@@ -1348,13 +1371,13 @@ static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter)
/**
* iavf_down - Shutdown the connection processing
* @adapter: board private structure
- *
- * Expects to be called while holding crit_lock.
- **/
+ */
void iavf_down(struct iavf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ netdev_assert_locked(netdev);
+
if (adapter->state <= __IAVF_DOWN_PENDING)
return;
@@ -1672,7 +1695,7 @@ static int iavf_config_rss_aq(struct iavf_adapter *adapter)
if (status) {
dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
iavf_stat_str(hw, status),
- iavf_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
return iavf_status_to_errno(status);
}
@@ -1682,7 +1705,7 @@ static int iavf_config_rss_aq(struct iavf_adapter *adapter)
if (status) {
dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
iavf_stat_str(hw, status),
- iavf_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
return iavf_status_to_errno(status);
}
@@ -1761,12 +1784,13 @@ static int iavf_init_rss(struct iavf_adapter *adapter)
/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
if (adapter->vf_res->vf_cap_flags &
VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
- adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
+ adapter->rss_hashcfg =
+ IAVF_DEFAULT_RSS_HASHCFG_EXPANDED;
else
- adapter->hena = IAVF_DEFAULT_RSS_HENA;
+ adapter->rss_hashcfg = IAVF_DEFAULT_RSS_HASHCFG;
- wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
- wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
+ wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->rss_hashcfg);
+ wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->rss_hashcfg >> 32));
}
iavf_fill_rss_lut(adapter);
@@ -1784,7 +1808,7 @@ static int iavf_init_rss(struct iavf_adapter *adapter)
**/
static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
{
- int q_idx = 0, num_q_vectors;
+ int q_idx = 0, num_q_vectors, irq_num;
struct iavf_q_vector *q_vector;
num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
@@ -1794,14 +1818,15 @@ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
return -ENOMEM;
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ irq_num = adapter->msix_entries[q_idx + NONQ_VECS].vector;
q_vector = &adapter->q_vectors[q_idx];
q_vector->adapter = adapter;
q_vector->vsi = &adapter->vsi;
q_vector->v_idx = q_idx;
q_vector->reg_idx = q_idx;
- cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
- netif_napi_add(adapter->netdev, &q_vector->napi,
- iavf_napi_poll);
+ netif_napi_add_config_locked(adapter->netdev, &q_vector->napi,
+ iavf_napi_poll, q_idx);
+ netif_napi_set_irq_locked(&q_vector->napi, irq_num);
}
return 0;
@@ -1827,7 +1852,7 @@ static void iavf_free_q_vectors(struct iavf_adapter *adapter)
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
- netif_napi_del(&q_vector->napi);
+ netif_napi_del_locked(&q_vector->napi);
}
kfree(adapter->q_vectors);
adapter->q_vectors = NULL;
@@ -1963,21 +1988,21 @@ err:
* iavf_finish_config - do all netdev work that needs RTNL
* @work: our work_struct
*
- * Do work that needs both RTNL and crit_lock.
- **/
+ * Do work that needs RTNL.
+ */
static void iavf_finish_config(struct work_struct *work)
{
struct iavf_adapter *adapter;
+ bool netdev_released = false;
int pairs, err;
adapter = container_of(work, struct iavf_adapter, finish_config);
/* Always take RTNL first to prevent circular lock dependency;
- * The dev->lock is needed to update the queue number
+ * the dev->lock (== netdev lock) is needed to update the queue number.
*/
rtnl_lock();
- mutex_lock(&adapter->netdev->lock);
- mutex_lock(&adapter->crit_lock);
+ netdev_lock(adapter->netdev);
if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
adapter->netdev->reg_state == NETREG_REGISTERED &&
@@ -1988,26 +2013,33 @@ static void iavf_finish_config(struct work_struct *work)
switch (adapter->state) {
case __IAVF_DOWN:
+ /* Set the real number of queues when reset occurs while
+ * state == __IAVF_DOWN
+ */
+ pairs = adapter->num_active_queues;
+ netif_set_real_num_rx_queues(adapter->netdev, pairs);
+ netif_set_real_num_tx_queues(adapter->netdev, pairs);
+
if (adapter->netdev->reg_state != NETREG_REGISTERED) {
+ netdev_unlock(adapter->netdev);
+ netdev_released = true;
err = register_netdevice(adapter->netdev);
if (err) {
dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n",
err);
/* go back and try again.*/
+ netdev_lock(adapter->netdev);
iavf_free_rss(adapter);
iavf_free_misc_irq(adapter);
iavf_reset_interrupt_capability(adapter);
iavf_change_state(adapter,
__IAVF_INIT_CONFIG_ADAPTER);
+ netdev_unlock(adapter->netdev);
goto out;
}
}
-
- /* Set the real number of queues when reset occurs while
- * state == __IAVF_DOWN
- */
- fallthrough;
+ break;
case __IAVF_RUNNING:
pairs = adapter->num_active_queues;
netif_set_real_num_rx_queues(adapter->netdev, pairs);
@@ -2019,8 +2051,8 @@ static void iavf_finish_config(struct work_struct *work)
}
out:
- mutex_unlock(&adapter->crit_lock);
- mutex_unlock(&adapter->netdev->lock);
+ if (!netdev_released)
+ netdev_unlock(adapter->netdev);
rtnl_unlock();
}
@@ -2049,6 +2081,10 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
return iavf_send_vf_config_msg(adapter);
if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS)
return iavf_send_vf_offload_vlan_v2_msg(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS)
+ return iavf_send_vf_supported_rxdids_msg(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_GET_PTP_CAPS)
+ return iavf_send_vf_ptp_caps_msg(adapter);
if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
iavf_disable_queues(adapter);
return 0;
@@ -2122,12 +2158,12 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
return 0;
}
- if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
- iavf_get_hena(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_GET_RSS_HASHCFG) {
+ iavf_get_rss_hashcfg(adapter);
return 0;
}
- if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
- iavf_set_hena(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_HASHCFG) {
+ iavf_set_rss_hashcfg(adapter);
return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
@@ -2213,7 +2249,10 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD);
return 0;
}
-
+ if (adapter->aq_required & IAVF_FLAG_AQ_SEND_PTP_CMD) {
+ iavf_virtchnl_send_ptp_cmd(adapter);
+ return IAVF_SUCCESS;
+ }
if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
iavf_request_stats(adapter);
return 0;
@@ -2578,6 +2617,112 @@ err:
}
/**
+ * iavf_init_send_supported_rxdids - part of querying for supported RXDID
+ * formats
+ * @adapter: board private structure
+ *
+ * Function processes send of the request for supported RXDIDs to the PF.
+ * Must clear IAVF_EXTENDED_CAP_RECV_RXDID if the message is not sent, e.g.
+ * due to the PF not negotiating VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC.
+ */
+static void iavf_init_send_supported_rxdids(struct iavf_adapter *adapter)
+{
+ int ret;
+
+ ret = iavf_send_vf_supported_rxdids_msg(adapter);
+ if (ret == -EOPNOTSUPP) {
+ /* PF does not support VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC. In this
+ * case, we did not send the capability exchange message and
+ * do not expect a response.
+ */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_RXDID;
+ }
+
+ /* We sent the message, so move on to the next step */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_RXDID;
+}
+
+/**
+ * iavf_init_recv_supported_rxdids - part of querying for supported RXDID
+ * formats
+ * @adapter: board private structure
+ *
+ * Function processes receipt of the supported RXDIDs message from the PF.
+ **/
+static void iavf_init_recv_supported_rxdids(struct iavf_adapter *adapter)
+{
+ int ret;
+
+ memset(&adapter->supp_rxdids, 0, sizeof(adapter->supp_rxdids));
+
+ ret = iavf_get_vf_supported_rxdids(adapter);
+ if (ret)
+ goto err;
+
+ /* We've processed the PF response to the
+ * VIRTCHNL_OP_GET_SUPPORTED_RXDIDS message we sent previously.
+ */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_RXDID;
+ return;
+
+err:
+ /* We didn't receive a reply. Make sure we try sending again when
+ * __IAVF_INIT_FAILED attempts to recover.
+ */
+ adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_RXDID;
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
+}
+
+/**
+ * iavf_init_send_ptp_caps - part of querying for extended PTP capabilities
+ * @adapter: board private structure
+ *
+ * Function processes send of the request for 1588 PTP capabilities to the PF.
+ * Must clear IAVF_EXTENDED_CAP_SEND_PTP if the message is not sent, e.g.
+ * due to the PF not negotiating VIRTCHNL_VF_PTP_CAP
+ */
+static void iavf_init_send_ptp_caps(struct iavf_adapter *adapter)
+{
+ if (iavf_send_vf_ptp_caps_msg(adapter) == -EOPNOTSUPP) {
+ /* PF does not support VIRTCHNL_VF_PTP_CAP. In this case, we
+ * did not send the capability exchange message and do not
+ * expect a response.
+ */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_PTP;
+ }
+
+ /* We sent the message, so move on to the next step */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_PTP;
+}
+
+/**
+ * iavf_init_recv_ptp_caps - part of querying for supported PTP capabilities
+ * @adapter: board private structure
+ *
+ * Function processes receipt of the PTP capabilities supported on this VF.
+ **/
+static void iavf_init_recv_ptp_caps(struct iavf_adapter *adapter)
+{
+ memset(&adapter->ptp.hw_caps, 0, sizeof(adapter->ptp.hw_caps));
+
+ if (iavf_get_vf_ptp_caps(adapter))
+ goto err;
+
+ /* We've processed the PF response to the VIRTCHNL_OP_1588_PTP_GET_CAPS
+ * message we sent previously.
+ */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_PTP;
+ return;
+
+err:
+ /* We didn't receive a reply. Make sure we try sending again when
+ * __IAVF_INIT_FAILED attempts to recover.
+ */
+ adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_PTP;
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
+}
+
+/**
* iavf_init_process_extended_caps - Part of driver startup
* @adapter: board private structure
*
@@ -2601,6 +2746,24 @@ static void iavf_init_process_extended_caps(struct iavf_adapter *adapter)
return;
}
+ /* Process capability exchange for RXDID formats */
+ if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_RXDID) {
+ iavf_init_send_supported_rxdids(adapter);
+ return;
+ } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_RXDID) {
+ iavf_init_recv_supported_rxdids(adapter);
+ return;
+ }
+
+ /* Process capability exchange for PTP features */
+ if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_PTP) {
+ iavf_init_send_ptp_caps(adapter);
+ return;
+ } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_PTP) {
+ iavf_init_recv_ptp_caps(adapter);
+ return;
+ }
+
/* When we reach here, no further extended capabilities exchanges are
* necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER
*/
@@ -2692,6 +2855,9 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
if (QOS_ALLOWED(adapter))
adapter->aq_required |= IAVF_FLAG_AQ_GET_QOS_CAPS;
+ /* Setup initial PTP configuration */
+ iavf_ptp_init(adapter);
+
iavf_schedule_finish_config(adapter);
return;
@@ -2704,24 +2870,15 @@ err:
iavf_change_state(adapter, __IAVF_INIT_FAILED);
}
-/**
- * iavf_watchdog_task - Periodic call-back task
- * @work: pointer to work_struct
- **/
-static void iavf_watchdog_task(struct work_struct *work)
+static const int IAVF_NO_RESCHED = -1;
+
+/* return: msec delay for requeueing itself */
+static int iavf_watchdog_step(struct iavf_adapter *adapter)
{
- struct iavf_adapter *adapter = container_of(work,
- struct iavf_adapter,
- watchdog_task.work);
struct iavf_hw *hw = &adapter->hw;
u32 reg_val;
- if (!mutex_trylock(&adapter->crit_lock)) {
- if (adapter->state == __IAVF_REMOVE)
- return;
-
- goto restart_watchdog;
- }
+ netdev_assert_locked(adapter->netdev);
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
iavf_change_state(adapter, __IAVF_COMM_FAILED);
@@ -2729,34 +2886,19 @@ static void iavf_watchdog_task(struct work_struct *work)
switch (adapter->state) {
case __IAVF_STARTUP:
iavf_startup(adapter);
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(30));
- return;
+ return 30;
case __IAVF_INIT_VERSION_CHECK:
iavf_init_version_check(adapter);
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(30));
- return;
+ return 30;
case __IAVF_INIT_GET_RESOURCES:
iavf_init_get_resources(adapter);
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(1));
- return;
+ return 1;
case __IAVF_INIT_EXTENDED_CAPS:
iavf_init_process_extended_caps(adapter);
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(1));
- return;
+ return 1;
case __IAVF_INIT_CONFIG_ADAPTER:
iavf_init_config_adapter(adapter);
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(1));
- return;
+ return 1;
case __IAVF_INIT_FAILED:
if (test_bit(__IAVF_IN_REMOVE_TASK,
&adapter->crit_section)) {
@@ -2764,24 +2906,18 @@ static void iavf_watchdog_task(struct work_struct *work)
* watchdog task, iavf_remove should handle this state
* as it can loop forever
*/
- mutex_unlock(&adapter->crit_lock);
- return;
+ return IAVF_NO_RESCHED;
}
if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
dev_err(&adapter->pdev->dev,
"Failed to communicate with PF; waiting before retry\n");
adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
iavf_shutdown_adminq(hw);
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq,
- &adapter->watchdog_task, (5 * HZ));
- return;
+ return 5000;
}
/* Try again from failed step*/
iavf_change_state(adapter, adapter->last_state);
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ);
- return;
+ return 1000;
case __IAVF_COMM_FAILED:
if (test_bit(__IAVF_IN_REMOVE_TASK,
&adapter->crit_section)) {
@@ -2791,8 +2927,7 @@ static void iavf_watchdog_task(struct work_struct *work)
*/
iavf_change_state(adapter, __IAVF_INIT_FAILED);
adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
- mutex_unlock(&adapter->crit_lock);
- return;
+ return IAVF_NO_RESCHED;
}
reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
@@ -2810,16 +2945,9 @@ static void iavf_watchdog_task(struct work_struct *work)
}
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq,
- &adapter->watchdog_task,
- msecs_to_jiffies(10));
- return;
+ return 10;
case __IAVF_RESETTING:
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- HZ * 2);
- return;
+ return 2000;
case __IAVF_DOWN:
case __IAVF_DOWN_PENDING:
case __IAVF_TESTING:
@@ -2846,8 +2974,7 @@ static void iavf_watchdog_task(struct work_struct *work)
break;
case __IAVF_REMOVE:
default:
- mutex_unlock(&adapter->crit_lock);
- return;
+ return IAVF_NO_RESCHED;
}
/* check for hw reset */
@@ -2857,22 +2984,29 @@ static void iavf_watchdog_task(struct work_struct *work)
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
- mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(adapter->wq,
- &adapter->watchdog_task, HZ * 2);
- return;
}
- mutex_unlock(&adapter->crit_lock);
-restart_watchdog:
+ return adapter->aq_required ? 20 : 2000;
+}
+
+static void iavf_watchdog_task(struct work_struct *work)
+{
+ struct iavf_adapter *adapter = container_of(work,
+ struct iavf_adapter,
+ watchdog_task.work);
+ struct net_device *netdev = adapter->netdev;
+ int msec_delay;
+
+ netdev_lock(netdev);
+ msec_delay = iavf_watchdog_step(adapter);
+ /* note that we schedule a different task */
if (adapter->state >= __IAVF_DOWN)
queue_work(adapter->wq, &adapter->adminq_task);
- if (adapter->aq_required)
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(20));
- else
+
+ if (msec_delay != IAVF_NO_RESCHED)
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- HZ * 2);
+ msecs_to_jiffies(msec_delay));
+ netdev_unlock(netdev);
}
/**
@@ -2880,14 +3014,15 @@ restart_watchdog:
* @adapter: board private structure
*
* Set communication failed flag and free all resources.
- * NOTE: This function is expected to be called with crit_lock being held.
- **/
+ */
static void iavf_disable_vf(struct iavf_adapter *adapter)
{
struct iavf_mac_filter *f, *ftmp;
struct iavf_vlan_filter *fv, *fvtmp;
struct iavf_cloud_filter *cf, *cftmp;
+ netdev_assert_locked(adapter->netdev);
+
adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
/* We don't use netif_running() because it may be true prior to
@@ -2987,17 +3122,7 @@ static void iavf_reset_task(struct work_struct *work)
int i = 0, err;
bool running;
- /* When device is being removed it doesn't make sense to run the reset
- * task, just return in such a case.
- */
- mutex_lock(&netdev->lock);
- if (!mutex_trylock(&adapter->crit_lock)) {
- if (adapter->state != __IAVF_REMOVE)
- queue_work(adapter->wq, &adapter->reset_task);
-
- mutex_unlock(&netdev->lock);
- return;
- }
+ netdev_lock(netdev);
iavf_misc_irq_disable(adapter);
if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
@@ -3042,12 +3167,22 @@ static void iavf_reset_task(struct work_struct *work)
dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
reg_val);
iavf_disable_vf(adapter);
- mutex_unlock(&adapter->crit_lock);
- mutex_unlock(&netdev->lock);
+ netdev_unlock(netdev);
return; /* Do not attempt to reinit. It's dead, Jim. */
}
continue_reset:
+ /* If we are still early in the state machine, just restart. */
+ if (adapter->state <= __IAVF_INIT_FAILED) {
+ iavf_shutdown_adminq(hw);
+ iavf_change_state(adapter, __IAVF_STARTUP);
+ iavf_startup(adapter);
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ msecs_to_jiffies(30));
+ netdev_unlock(netdev);
+ return;
+ }
+
/* We don't use netif_running() because it may be true prior to
* ndo_open() returning, so we can't assume it means all our open
* tasks have finished, since we're not holding the rtnl_lock here.
@@ -3099,15 +3234,18 @@ continue_reset:
}
adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
- /* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been
- * sent/received yet, so VLAN_V2_ALLOWED() cannot is not reliable here,
- * however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won't be sent until
- * VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have
- * been successfully sent and negotiated
- */
- adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
+ /* Certain capabilities require an extended negotiation process using
+ * extra messages that must be processed after getting the VF
+ * configuration. The related checks such as VLAN_V2_ALLOWED() are not
+ * reliable here, since the configuration has not yet been negotiated.
+ *
+ * Always set these flags, since them related VIRTCHNL messages won't
+ * be sent until after VIRTCHNL_OP_GET_VF_RESOURCES.
+ */
+ adapter->aq_required |= IAVF_FLAG_AQ_EXTENDED_CAPS;
+
spin_lock_bh(&adapter->mac_vlan_list_lock);
/* Delete filter for the current MAC address, it could have
@@ -3183,8 +3321,7 @@ continue_reset:
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
wake_up(&adapter->reset_waitqueue);
- mutex_unlock(&adapter->crit_lock);
- mutex_unlock(&netdev->lock);
+ netdev_unlock(netdev);
return;
reset_err:
@@ -3194,8 +3331,7 @@ reset_err:
}
iavf_disable_vf(adapter);
- mutex_unlock(&adapter->crit_lock);
- mutex_unlock(&netdev->lock);
+ netdev_unlock(netdev);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
}
@@ -3207,6 +3343,7 @@ static void iavf_adminq_task(struct work_struct *work)
{
struct iavf_adapter *adapter =
container_of(work, struct iavf_adapter, adminq_task);
+ struct net_device *netdev = adapter->netdev;
struct iavf_hw *hw = &adapter->hw;
struct iavf_arq_event_info event;
enum virtchnl_ops v_op;
@@ -3214,13 +3351,7 @@ static void iavf_adminq_task(struct work_struct *work)
u32 val, oldval;
u16 pending;
- if (!mutex_trylock(&adapter->crit_lock)) {
- if (adapter->state == __IAVF_REMOVE)
- return;
-
- queue_work(adapter->wq, &adapter->adminq_task);
- goto out;
- }
+ netdev_lock(netdev);
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
goto unlock;
@@ -3287,8 +3418,7 @@ static void iavf_adminq_task(struct work_struct *work)
freedom:
kfree(event.msg_buf);
unlock:
- mutex_unlock(&adapter->crit_lock);
-out:
+ netdev_unlock(netdev);
/* re-enable Admin queue interrupt cause */
iavf_misc_irq_enable(adapter);
}
@@ -3667,10 +3797,8 @@ exit:
if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
return 0;
- mutex_lock(&netdev->lock);
netif_set_real_num_rx_queues(netdev, total_qps);
netif_set_real_num_tx_queues(netdev, total_qps);
- mutex_unlock(&netdev->lock);
return ret;
}
@@ -3983,8 +4111,8 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
struct flow_cls_offload *cls_flower)
{
int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
- struct iavf_cloud_filter *filter = NULL;
- int err = -EINVAL, count = 50;
+ struct iavf_cloud_filter *filter;
+ int err;
if (tc < 0) {
dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
@@ -3994,17 +4122,10 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
filter = kzalloc(sizeof(*filter), GFP_KERNEL);
if (!filter)
return -ENOMEM;
-
- while (!mutex_trylock(&adapter->crit_lock)) {
- if (--count == 0) {
- kfree(filter);
- return err;
- }
- udelay(1);
- }
-
filter->cookie = cls_flower->cookie;
+ netdev_lock(adapter->netdev);
+
/* bail out here if filter already exists */
spin_lock_bh(&adapter->cloud_filter_list_lock);
if (iavf_find_cf(adapter, &cls_flower->cookie)) {
@@ -4038,7 +4159,7 @@ err:
if (err)
kfree(filter);
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(adapter->netdev);
return err;
}
@@ -4335,33 +4456,20 @@ static int iavf_open(struct net_device *netdev)
struct iavf_adapter *adapter = netdev_priv(netdev);
int err;
+ netdev_assert_locked(netdev);
+
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
return -EIO;
}
- while (!mutex_trylock(&adapter->crit_lock)) {
- /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
- * is already taken and iavf_open is called from an upper
- * device's notifier reacting on NETDEV_REGISTER event.
- * We have to leave here to avoid dead lock.
- */
- if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
- return -EBUSY;
-
- usleep_range(500, 1000);
- }
-
- if (adapter->state != __IAVF_DOWN) {
- err = -EBUSY;
- goto err_unlock;
- }
+ if (adapter->state != __IAVF_DOWN)
+ return -EBUSY;
if (adapter->state == __IAVF_RUNNING &&
!test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) {
dev_dbg(&adapter->pdev->dev, "VF is already open.\n");
- err = 0;
- goto err_unlock;
+ return 0;
}
/* allocate transmit descriptors */
@@ -4380,9 +4488,7 @@ static int iavf_open(struct net_device *netdev)
goto err_req_irq;
spin_lock_bh(&adapter->mac_vlan_list_lock);
-
iavf_add_filter(adapter, adapter->hw.mac.addr);
-
spin_unlock_bh(&adapter->mac_vlan_list_lock);
/* Restore filters that were removed with IFF_DOWN */
@@ -4395,8 +4501,6 @@ static int iavf_open(struct net_device *netdev)
iavf_irq_enable(adapter, true);
- mutex_unlock(&adapter->crit_lock);
-
return 0;
err_req_irq:
@@ -4406,8 +4510,6 @@ err_setup_rx:
iavf_free_all_rx_resources(adapter);
err_setup_tx:
iavf_free_all_tx_resources(adapter);
-err_unlock:
- mutex_unlock(&adapter->crit_lock);
return err;
}
@@ -4429,12 +4531,10 @@ static int iavf_close(struct net_device *netdev)
u64 aq_to_restore;
int status;
- mutex_lock(&adapter->crit_lock);
+ netdev_assert_locked(netdev);
- if (adapter->state <= __IAVF_DOWN_PENDING) {
- mutex_unlock(&adapter->crit_lock);
+ if (adapter->state <= __IAVF_DOWN_PENDING)
return 0;
- }
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
/* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before
@@ -4465,7 +4565,7 @@ static int iavf_close(struct net_device *netdev)
iavf_change_state(adapter, __IAVF_DOWN_PENDING);
iavf_free_traffic_irqs(adapter);
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
/* We explicitly don't free resources here because the hardware is
* still active and can DMA into memory. Resources are cleared in
@@ -4483,10 +4583,10 @@ static int iavf_close(struct net_device *netdev)
msecs_to_jiffies(500));
if (!status)
netdev_warn(netdev, "Device resources not yet released\n");
+ netdev_lock(netdev);
- mutex_lock(&adapter->crit_lock);
adapter->aq_required |= aq_to_restore;
- mutex_unlock(&adapter->crit_lock);
+
return 0;
}
@@ -4948,6 +5048,25 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev,
return iavf_fix_strip_features(adapter, features);
}
+static int iavf_hwstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+
+ *config = adapter->ptp.hwtstamp_config;
+
+ return 0;
+}
+
+static int iavf_hwstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+
+ return iavf_ptp_set_ts_config(adapter, config, extack);
+}
+
static int
iavf_verify_shaper(struct net_shaper_binding *binding,
const struct net_shaper *shaper,
@@ -4976,15 +5095,16 @@ iavf_shaper_set(struct net_shaper_binding *binding,
struct iavf_adapter *adapter = netdev_priv(binding->netdev);
const struct net_shaper_handle *handle = &shaper->handle;
struct iavf_ring *tx_ring;
- int ret = 0;
+ int ret;
+
+ netdev_assert_locked(adapter->netdev);
- mutex_lock(&adapter->crit_lock);
if (handle->id >= adapter->num_active_queues)
- goto unlock;
+ return 0;
ret = iavf_verify_shaper(binding, shaper, extack);
if (ret)
- goto unlock;
+ return ret;
tx_ring = &adapter->tx_rings[handle->id];
@@ -4994,9 +5114,7 @@ iavf_shaper_set(struct net_shaper_binding *binding,
adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
-unlock:
- mutex_unlock(&adapter->crit_lock);
- return ret;
+ return 0;
}
static int iavf_shaper_del(struct net_shaper_binding *binding,
@@ -5006,9 +5124,10 @@ static int iavf_shaper_del(struct net_shaper_binding *binding,
struct iavf_adapter *adapter = netdev_priv(binding->netdev);
struct iavf_ring *tx_ring;
- mutex_lock(&adapter->crit_lock);
+ netdev_assert_locked(adapter->netdev);
+
if (handle->id >= adapter->num_active_queues)
- goto unlock;
+ return 0;
tx_ring = &adapter->tx_rings[handle->id];
tx_ring->q_shaper.bw_min = 0;
@@ -5017,8 +5136,6 @@ static int iavf_shaper_del(struct net_shaper_binding *binding,
adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
-unlock:
- mutex_unlock(&adapter->crit_lock);
return 0;
}
@@ -5056,6 +5173,8 @@ static const struct net_device_ops iavf_netdev_ops = {
.ndo_set_features = iavf_set_features,
.ndo_setup_tc = iavf_setup_tc,
.net_shaper_ops = &iavf_shaper_ops,
+ .ndo_hwtstamp_get = iavf_hwstamp_get,
+ .ndo_hwtstamp_set = iavf_hwstamp_set,
};
/**
@@ -5231,6 +5350,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_alloc_etherdev;
}
+ netif_set_affinity_auto(netdev);
SET_NETDEV_DEV(netdev, &pdev->dev);
pci_set_drvdata(pdev, netdev);
@@ -5277,10 +5397,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_alloc_qos_cap;
}
- /* set up the locks for the AQ, do this only once in probe
- * and destroy them only once in remove
- */
- mutex_init(&adapter->crit_lock);
mutex_init(&hw->aq.asq_mutex);
mutex_init(&hw->aq.arq_mutex);
@@ -5310,6 +5426,10 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup the wait queue for indicating virtchannel events */
init_waitqueue_head(&adapter->vc_waitqueue);
+ INIT_LIST_HEAD(&adapter->ptp.aq_cmds);
+ init_waitqueue_head(&adapter->ptp.phc_time_waitqueue);
+ mutex_init(&adapter->ptp.aq_cmd_lock);
+
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
/* Initialization goes on in the work. Do not add more of it below. */
@@ -5339,20 +5459,24 @@ static int iavf_suspend(struct device *dev_d)
{
struct net_device *netdev = dev_get_drvdata(dev_d);
struct iavf_adapter *adapter = netdev_priv(netdev);
+ bool running;
netif_device_detach(netdev);
- mutex_lock(&adapter->crit_lock);
-
- if (netif_running(netdev)) {
+ running = netif_running(netdev);
+ if (running)
rtnl_lock();
+ netdev_lock(netdev);
+
+ if (running)
iavf_down(adapter);
- rtnl_unlock();
- }
+
iavf_free_misc_irq(adapter);
iavf_reset_interrupt_capability(adapter);
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
+ if (running)
+ rtnl_unlock();
return 0;
}
@@ -5367,7 +5491,7 @@ static int iavf_resume(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
struct iavf_adapter *adapter;
- u32 err;
+ int err;
adapter = iavf_pdev_to_adapter(pdev);
@@ -5429,20 +5553,20 @@ static void iavf_remove(struct pci_dev *pdev)
* There are flows where register/unregister netdev may race.
*/
while (1) {
- mutex_lock(&adapter->crit_lock);
+ netdev_lock(netdev);
if (adapter->state == __IAVF_RUNNING ||
adapter->state == __IAVF_DOWN ||
adapter->state == __IAVF_INIT_FAILED) {
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
break;
}
/* Simply return if we already went through iavf_shutdown */
if (adapter->state == __IAVF_REMOVE) {
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
return;
}
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
usleep_range(500, 1000);
}
cancel_delayed_work_sync(&adapter->watchdog_task);
@@ -5451,7 +5575,7 @@ static void iavf_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
- mutex_lock(&adapter->crit_lock);
+ netdev_lock(netdev);
dev_info(&adapter->pdev->dev, "Removing device\n");
iavf_change_state(adapter, __IAVF_REMOVE);
@@ -5463,11 +5587,15 @@ static void iavf_remove(struct pci_dev *pdev)
msleep(50);
}
+ iavf_ptp_release(adapter);
+
iavf_misc_irq_disable(adapter);
/* Shut down all the garbage mashers on the detention level */
+ netdev_unlock(netdev);
cancel_work_sync(&adapter->reset_task);
cancel_delayed_work_sync(&adapter->watchdog_task);
cancel_work_sync(&adapter->adminq_task);
+ netdev_lock(netdev);
adapter->aq_required = 0;
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
@@ -5485,8 +5613,7 @@ static void iavf_remove(struct pci_dev *pdev)
/* destroy the locks only once, here */
mutex_destroy(&hw->aq.arq_mutex);
mutex_destroy(&hw->aq.asq_mutex);
- mutex_unlock(&adapter->crit_lock);
- mutex_destroy(&adapter->crit_lock);
+ netdev_unlock(netdev);
iounmap(hw->hw_addr);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_prototype.h b/drivers/net/ethernet/intel/iavf/iavf_prototype.h
index cac9d1a35a52..7f9f9dbf959a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_prototype.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_prototype.h
@@ -22,7 +22,7 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
struct iavf_arq_event_info *e,
u16 *events_pending);
enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
- struct iavf_aq_desc *desc,
+ struct libie_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct iavf_asq_cmd_details *cmd_details);
@@ -34,7 +34,6 @@ void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask,
bool iavf_check_asq_alive(struct iavf_hw *hw);
enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading);
-const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err);
const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err);
enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ptp.c b/drivers/net/ethernet/intel/iavf/iavf_ptp.c
new file mode 100644
index 000000000000..9cbd8c154031
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_ptp.c
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Intel Corporation. */
+
+#include "iavf.h"
+#include "iavf_ptp.h"
+
+#define iavf_clock_to_adapter(info) \
+ container_of_const(info, struct iavf_adapter, ptp.info)
+
+/**
+ * iavf_ptp_disable_rx_tstamp - Disable timestamping in Rx rings
+ * @adapter: private adapter structure
+ *
+ * Disable timestamp reporting for all Rx rings.
+ */
+static void iavf_ptp_disable_rx_tstamp(struct iavf_adapter *adapter)
+{
+ for (u32 i = 0; i < adapter->num_active_queues; i++)
+ adapter->rx_rings[i].flags &= ~IAVF_TXRX_FLAGS_HW_TSTAMP;
+}
+
+/**
+ * iavf_ptp_enable_rx_tstamp - Enable timestamping in Rx rings
+ * @adapter: private adapter structure
+ *
+ * Enable timestamp reporting for all Rx rings.
+ */
+static void iavf_ptp_enable_rx_tstamp(struct iavf_adapter *adapter)
+{
+ for (u32 i = 0; i < adapter->num_active_queues; i++)
+ adapter->rx_rings[i].flags |= IAVF_TXRX_FLAGS_HW_TSTAMP;
+}
+
+/**
+ * iavf_ptp_set_timestamp_mode - Set device timestamping mode
+ * @adapter: private adapter structure
+ * @config: pointer to kernel_hwtstamp_config
+ *
+ * Set the timestamping mode requested from the userspace.
+ *
+ * Note: this function always translates Rx timestamp requests for any packet
+ * category into HWTSTAMP_FILTER_ALL.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int iavf_ptp_set_timestamp_mode(struct iavf_adapter *adapter,
+ struct kernel_hwtstamp_config *config)
+{
+ /* Reserved for future extensions. */
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ break;
+ case HWTSTAMP_TX_ON:
+ return -EOPNOTSUPP;
+ default:
+ return -ERANGE;
+ }
+
+ if (config->rx_filter == HWTSTAMP_FILTER_NONE) {
+ iavf_ptp_disable_rx_tstamp(adapter);
+ return 0;
+ } else if (config->rx_filter > HWTSTAMP_FILTER_NTP_ALL) {
+ return -ERANGE;
+ } else if (!(iavf_ptp_cap_supported(adapter,
+ VIRTCHNL_1588_PTP_CAP_RX_TSTAMP))) {
+ return -EOPNOTSUPP;
+ }
+
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ iavf_ptp_enable_rx_tstamp(adapter);
+
+ return 0;
+}
+
+/**
+ * iavf_ptp_set_ts_config - Set timestamping configuration
+ * @adapter: private adapter structure
+ * @config: pointer to kernel_hwtstamp_config structure
+ * @extack: pointer to netlink_ext_ack structure
+ *
+ * Program the requested timestamping configuration to the device.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int iavf_ptp_set_ts_config(struct iavf_adapter *adapter,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = iavf_ptp_set_timestamp_mode(adapter, config);
+ if (err)
+ return err;
+
+ /* Save successful settings for future reference */
+ adapter->ptp.hwtstamp_config = *config;
+
+ return 0;
+}
+
+/**
+ * iavf_ptp_cap_supported - Check if a PTP capability is supported
+ * @adapter: private adapter structure
+ * @cap: the capability bitmask to check
+ *
+ * Return: true if every capability set in cap is also set in the enabled
+ * capabilities reported by the PF, false otherwise.
+ */
+bool iavf_ptp_cap_supported(const struct iavf_adapter *adapter, u32 cap)
+{
+ if (!IAVF_PTP_ALLOWED(adapter))
+ return false;
+
+ /* Only return true if every bit in cap is set in hw_caps.caps */
+ return (adapter->ptp.hw_caps.caps & cap) == cap;
+}
+
+/**
+ * iavf_allocate_ptp_cmd - Allocate a PTP command message structure
+ * @v_opcode: the virtchnl opcode
+ * @msglen: length in bytes of the associated virtchnl structure
+ *
+ * Allocates a PTP command message and pre-fills it with the provided message
+ * length and opcode.
+ *
+ * Return: allocated PTP command.
+ */
+static struct iavf_ptp_aq_cmd *iavf_allocate_ptp_cmd(enum virtchnl_ops v_opcode,
+ u16 msglen)
+{
+ struct iavf_ptp_aq_cmd *cmd;
+
+ cmd = kzalloc(struct_size(cmd, msg, msglen), GFP_KERNEL);
+ if (!cmd)
+ return NULL;
+
+ cmd->v_opcode = v_opcode;
+ cmd->msglen = msglen;
+
+ return cmd;
+}
+
+/**
+ * iavf_queue_ptp_cmd - Queue PTP command for sending over virtchnl
+ * @adapter: private adapter structure
+ * @cmd: the command structure to send
+ *
+ * Queue the given command structure into the PTP virtchnl command queue tos
+ * end to the PF.
+ */
+static void iavf_queue_ptp_cmd(struct iavf_adapter *adapter,
+ struct iavf_ptp_aq_cmd *cmd)
+{
+ mutex_lock(&adapter->ptp.aq_cmd_lock);
+ list_add_tail(&cmd->list, &adapter->ptp.aq_cmds);
+ mutex_unlock(&adapter->ptp.aq_cmd_lock);
+
+ adapter->aq_required |= IAVF_FLAG_AQ_SEND_PTP_CMD;
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+}
+
+/**
+ * iavf_send_phc_read - Send request to read PHC time
+ * @adapter: private adapter structure
+ *
+ * Send a request to obtain the PTP hardware clock time. This allocates the
+ * VIRTCHNL_OP_1588_PTP_GET_TIME message and queues it up to send to
+ * indirectly read the PHC time.
+ *
+ * This function does not wait for the reply from the PF.
+ *
+ * Return: 0 if success, error code otherwise.
+ */
+static int iavf_send_phc_read(struct iavf_adapter *adapter)
+{
+ struct iavf_ptp_aq_cmd *cmd;
+
+ if (!adapter->ptp.clock)
+ return -EOPNOTSUPP;
+
+ cmd = iavf_allocate_ptp_cmd(VIRTCHNL_OP_1588_PTP_GET_TIME,
+ sizeof(struct virtchnl_phc_time));
+ if (!cmd)
+ return -ENOMEM;
+
+ iavf_queue_ptp_cmd(adapter, cmd);
+
+ return 0;
+}
+
+/**
+ * iavf_read_phc_indirect - Indirectly read the PHC time via virtchnl
+ * @adapter: private adapter structure
+ * @ts: storage for the timestamp value
+ * @sts: system timestamp values before and after the read
+ *
+ * Used when the device does not have direct register access to the PHC time.
+ * Indirectly reads the time via the VIRTCHNL_OP_1588_PTP_GET_TIME, and waits
+ * for the reply from the PF.
+ *
+ * Based on some simple measurements using ftrace and phc2sys, this clock
+ * access method has about a ~110 usec latency even when the system is not
+ * under load. In order to achieve acceptable results when using phc2sys with
+ * the indirect clock access method, it is recommended to use more
+ * conservative proportional and integration constants with the P/I servo.
+ *
+ * Return: 0 if success, error code otherwise.
+ */
+static int iavf_read_phc_indirect(struct iavf_adapter *adapter,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ long ret;
+ int err;
+
+ adapter->ptp.phc_time_ready = false;
+
+ ptp_read_system_prets(sts);
+
+ err = iavf_send_phc_read(adapter);
+ if (err)
+ return err;
+
+ ret = wait_event_interruptible_timeout(adapter->ptp.phc_time_waitqueue,
+ adapter->ptp.phc_time_ready,
+ HZ);
+
+ ptp_read_system_postts(sts);
+
+ if (ret < 0)
+ return ret;
+ else if (!ret)
+ return -EBUSY;
+
+ *ts = ns_to_timespec64(adapter->ptp.cached_phc_time);
+
+ return 0;
+}
+
+static int iavf_ptp_gettimex64(struct ptp_clock_info *info,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct iavf_adapter *adapter = iavf_clock_to_adapter(info);
+
+ if (!adapter->ptp.clock)
+ return -EOPNOTSUPP;
+
+ return iavf_read_phc_indirect(adapter, ts, sts);
+}
+
+static int iavf_ptp_settime64(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * iavf_ptp_cache_phc_time - Cache PHC time for performing timestamp extension
+ * @adapter: private adapter structure
+ *
+ * Periodically cache the PHC time in order to allow for timestamp extension.
+ * This is required because the Tx and Rx timestamps only contain 32bits of
+ * nanoseconds. Timestamp extension allows calculating the corrected 64bit
+ * timestamp. This algorithm relies on the cached time being within ~1 second
+ * of the timestamp.
+ */
+static void iavf_ptp_cache_phc_time(struct iavf_adapter *adapter)
+{
+ if (!time_is_before_jiffies(adapter->ptp.cached_phc_updated + HZ))
+ return;
+
+ /* The response from virtchnl will store the time into
+ * cached_phc_time.
+ */
+ iavf_send_phc_read(adapter);
+}
+
+/**
+ * iavf_ptp_do_aux_work - Perform periodic work required for PTP support
+ * @info: PTP clock info structure
+ *
+ * Handler to take care of periodic work required for PTP operation. This
+ * includes the following tasks:
+ *
+ * 1) updating cached_phc_time
+ *
+ * cached_phc_time is used by the Tx and Rx timestamp flows in order to
+ * perform timestamp extension, by carefully comparing the timestamp
+ * 32bit nanosecond timestamps and determining the corrected 64bit
+ * timestamp value to report to userspace. This algorithm only works if
+ * the cached_phc_time is within ~1 second of the Tx or Rx timestamp
+ * event. This task periodically reads the PHC time and stores it, to
+ * ensure that timestamp extension operates correctly.
+ *
+ * Returns: time in jiffies until the periodic task should be re-scheduled.
+ */
+static long iavf_ptp_do_aux_work(struct ptp_clock_info *info)
+{
+ struct iavf_adapter *adapter = iavf_clock_to_adapter(info);
+
+ iavf_ptp_cache_phc_time(adapter);
+
+ /* Check work about twice a second */
+ return msecs_to_jiffies(500);
+}
+
+/**
+ * iavf_ptp_register_clock - Register a new PTP for userspace
+ * @adapter: private adapter structure
+ *
+ * Allocate and register a new PTP clock device if necessary.
+ *
+ * Return: 0 if success, error otherwise.
+ */
+static int iavf_ptp_register_clock(struct iavf_adapter *adapter)
+{
+ struct ptp_clock_info *ptp_info = &adapter->ptp.info;
+ struct device *dev = &adapter->pdev->dev;
+ struct ptp_clock *clock;
+
+ snprintf(ptp_info->name, sizeof(ptp_info->name), "%s-%s-clk",
+ KBUILD_MODNAME, dev_name(dev));
+ ptp_info->owner = THIS_MODULE;
+ ptp_info->gettimex64 = iavf_ptp_gettimex64;
+ ptp_info->settime64 = iavf_ptp_settime64;
+ ptp_info->do_aux_work = iavf_ptp_do_aux_work;
+
+ clock = ptp_clock_register(ptp_info, dev);
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
+
+ adapter->ptp.clock = clock;
+
+ dev_dbg(&adapter->pdev->dev, "PTP clock %s registered\n",
+ adapter->ptp.info.name);
+
+ return 0;
+}
+
+/**
+ * iavf_ptp_init - Initialize PTP support if capability was negotiated
+ * @adapter: private adapter structure
+ *
+ * Initialize PTP functionality, based on the capabilities that the PF has
+ * enabled for this VF.
+ */
+void iavf_ptp_init(struct iavf_adapter *adapter)
+{
+ int err;
+
+ if (!iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_READ_PHC)) {
+ pci_notice(adapter->pdev,
+ "Device does not have PTP clock support\n");
+ return;
+ }
+
+ err = iavf_ptp_register_clock(adapter);
+ if (err) {
+ pci_err(adapter->pdev,
+ "Failed to register PTP clock device (%p)\n",
+ ERR_PTR(err));
+ return;
+ }
+
+ for (int i = 0; i < adapter->num_active_queues; i++) {
+ struct iavf_ring *rx_ring = &adapter->rx_rings[i];
+
+ rx_ring->ptp = &adapter->ptp;
+ }
+
+ ptp_schedule_worker(adapter->ptp.clock, 0);
+}
+
+/**
+ * iavf_ptp_release - Disable PTP support
+ * @adapter: private adapter structure
+ *
+ * Release all PTP resources that were previously initialized.
+ */
+void iavf_ptp_release(struct iavf_adapter *adapter)
+{
+ struct iavf_ptp_aq_cmd *cmd, *tmp;
+
+ if (!adapter->ptp.clock)
+ return;
+
+ pci_dbg(adapter->pdev, "removing PTP clock %s\n",
+ adapter->ptp.info.name);
+ ptp_clock_unregister(adapter->ptp.clock);
+ adapter->ptp.clock = NULL;
+
+ /* Cancel any remaining uncompleted PTP clock commands */
+ mutex_lock(&adapter->ptp.aq_cmd_lock);
+ list_for_each_entry_safe(cmd, tmp, &adapter->ptp.aq_cmds, list) {
+ list_del(&cmd->list);
+ kfree(cmd);
+ }
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
+ mutex_unlock(&adapter->ptp.aq_cmd_lock);
+
+ adapter->ptp.hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ iavf_ptp_disable_rx_tstamp(adapter);
+}
+
+/**
+ * iavf_ptp_process_caps - Handle change in PTP capabilities
+ * @adapter: private adapter structure
+ *
+ * Handle any state changes necessary due to change in PTP capabilities, such
+ * as after a device reset or change in configuration from the PF.
+ */
+void iavf_ptp_process_caps(struct iavf_adapter *adapter)
+{
+ bool phc = iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_READ_PHC);
+
+ /* Check if the device gained or lost necessary access to support the
+ * PTP hardware clock. If so, driver must respond appropriately by
+ * creating or destroying the PTP clock device.
+ */
+ if (adapter->ptp.clock && !phc)
+ iavf_ptp_release(adapter);
+ else if (!adapter->ptp.clock && phc)
+ iavf_ptp_init(adapter);
+
+ /* Check if the device lost access to Rx timestamp incoming packets */
+ if (!iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP)) {
+ adapter->ptp.hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ iavf_ptp_disable_rx_tstamp(adapter);
+ }
+}
+
+/**
+ * iavf_ptp_extend_32b_timestamp - Convert a 32b nanoseconds timestamp to 64b
+ * nanoseconds
+ * @cached_phc_time: recently cached copy of PHC time
+ * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value
+ *
+ * Hardware captures timestamps which contain only 32 bits of nominal
+ * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
+ *
+ * Extend the 32bit nanosecond timestamp using the following algorithm and
+ * assumptions:
+ *
+ * 1) have a recently cached copy of the PHC time
+ * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1
+ * seconds) before or after the PHC time was captured.
+ * 3) calculate the delta between the cached time and the timestamp
+ * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was
+ * captured after the PHC time. In this case, the full timestamp is just
+ * the cached PHC time plus the delta.
+ * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the
+ * timestamp was captured *before* the PHC time, i.e. because the PHC
+ * cache was updated after the timestamp was captured by hardware. In this
+ * case, the full timestamp is the cached time minus the inverse delta.
+ *
+ * This algorithm works even if the PHC time was updated after a Tx timestamp
+ * was requested, but before the Tx timestamp event was reported from
+ * hardware.
+ *
+ * This calculation primarily relies on keeping the cached PHC time up to
+ * date. If the timestamp was captured more than 2^31 nanoseconds after the
+ * PHC time, it is possible that the lower 32bits of PHC time have
+ * overflowed more than once, and we might generate an incorrect timestamp.
+ *
+ * This is prevented by (a) periodically updating the cached PHC time once
+ * a second, and (b) discarding any Tx timestamp packet if it has waited for
+ * a timestamp for more than one second.
+ *
+ * Return: extended timestamp (to 64b).
+ */
+u64 iavf_ptp_extend_32b_timestamp(u64 cached_phc_time, u32 in_tstamp)
+{
+ u32 low = lower_32_bits(cached_phc_time);
+ u32 delta = in_tstamp - low;
+ u64 ns;
+
+ /* Do not assume that the in_tstamp is always more recent than the
+ * cached PHC time. If the delta is large, it indicates that the
+ * in_tstamp was taken in the past, and should be converted
+ * forward.
+ */
+ if (delta > S32_MAX)
+ ns = cached_phc_time - (low - in_tstamp);
+ else
+ ns = cached_phc_time + delta;
+
+ return ns;
+}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ptp.h b/drivers/net/ethernet/intel/iavf/iavf_ptp.h
new file mode 100644
index 000000000000..783b8f287cd9
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_ptp.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IAVF_PTP_H_
+#define _IAVF_PTP_H_
+
+#include "iavf_types.h"
+
+/* bit indicating whether a 40bit timestamp is valid */
+#define IAVF_PTP_40B_TSTAMP_VALID BIT(24)
+
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+void iavf_ptp_init(struct iavf_adapter *adapter);
+void iavf_ptp_release(struct iavf_adapter *adapter);
+void iavf_ptp_process_caps(struct iavf_adapter *adapter);
+bool iavf_ptp_cap_supported(const struct iavf_adapter *adapter, u32 cap);
+void iavf_virtchnl_send_ptp_cmd(struct iavf_adapter *adapter);
+int iavf_ptp_set_ts_config(struct iavf_adapter *adapter,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+u64 iavf_ptp_extend_32b_timestamp(u64 cached_phc_time, u32 in_tstamp);
+#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+static inline void iavf_ptp_init(struct iavf_adapter *adapter) { }
+static inline void iavf_ptp_release(struct iavf_adapter *adapter) { }
+static inline void iavf_ptp_process_caps(struct iavf_adapter *adapter) { }
+static inline bool iavf_ptp_cap_supported(const struct iavf_adapter *adapter,
+ u32 cap)
+{
+ return false;
+}
+
+static inline void iavf_virtchnl_send_ptp_cmd(struct iavf_adapter *adapter) { }
+static inline int iavf_ptp_set_ts_config(struct iavf_adapter *adapter,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ return -1;
+}
+
+static inline u64 iavf_ptp_extend_32b_timestamp(u64 cached_phc_time,
+ u32 in_tstamp)
+{
+ return 0;
+}
+
+#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+#endif /* _IAVF_PTP_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_trace.h b/drivers/net/ethernet/intel/iavf/iavf_trace.h
index 62212011c807..c5e4d1823886 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_trace.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_trace.h
@@ -112,7 +112,7 @@ DECLARE_EVENT_CLASS(
iavf_rx_template,
TP_PROTO(struct iavf_ring *ring,
- union iavf_32byte_rx_desc *desc,
+ struct iavf_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb),
@@ -140,7 +140,7 @@ DECLARE_EVENT_CLASS(
DEFINE_EVENT(
iavf_rx_template, iavf_clean_rx_irq,
TP_PROTO(struct iavf_ring *ring,
- union iavf_32byte_rx_desc *desc,
+ struct iavf_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb));
@@ -148,7 +148,7 @@ DEFINE_EVENT(
DEFINE_EVENT(
iavf_rx_template, iavf_clean_rx_irq_rx,
TP_PROTO(struct iavf_ring *ring,
- union iavf_32byte_rx_desc *desc,
+ struct iavf_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb));
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 26b424fd6718..363c42bf3dcf 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -8,6 +8,26 @@
#include "iavf.h"
#include "iavf_trace.h"
#include "iavf_prototype.h"
+#include "iavf_ptp.h"
+
+/**
+ * iavf_is_descriptor_done - tests DD bit in Rx descriptor
+ * @qw1: quad word 1 from descriptor to get Descriptor Done field from
+ * @flex: is the descriptor flex or legacy
+ *
+ * This function tests the descriptor done bit in specified descriptor. Because
+ * there are two types of descriptors (legacy and flex) the parameter rx_ring
+ * is used to distinguish.
+ *
+ * Return: true or false based on the state of DD bit in Rx descriptor.
+ */
+static bool iavf_is_descriptor_done(u64 qw1, bool flex)
+{
+ if (flex)
+ return FIELD_GET(IAVF_RXD_FLEX_DD_M, qw1);
+ else
+ return FIELD_GET(IAVF_RXD_LEGACY_DD_M, qw1);
+}
static __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
u32 td_tag)
@@ -703,7 +723,7 @@ static void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
for (u32 i = rx_ring->next_to_clean; i != rx_ring->next_to_use; ) {
const struct libeth_fqe *rx_fqes = &rx_ring->rx_fqes[i];
- page_pool_put_full_page(rx_ring->pp, rx_fqes->page, false);
+ libeth_rx_recycle_slow(rx_fqes->netmem);
if (unlikely(++i == rx_ring->count))
i = 0;
@@ -766,7 +786,7 @@ int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
u64_stats_init(&rx_ring->syncp);
/* Round up to nearest 4K */
- rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc);
+ rx_ring->size = rx_ring->count * sizeof(struct iavf_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
rx_ring->desc = dma_alloc_coherent(fq.pp->p.dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
@@ -845,7 +865,7 @@ bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
.count = rx_ring->count,
};
u16 ntu = rx_ring->next_to_use;
- union iavf_rx_desc *rx_desc;
+ struct iavf_rx_desc *rx_desc;
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count)
@@ -863,7 +883,7 @@ bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
- rx_desc->read.pkt_addr = cpu_to_le64(addr);
+ rx_desc->qw0 = cpu_to_le64(addr);
rx_desc++;
ntu++;
@@ -873,7 +893,7 @@ bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
}
/* clear the status bits for the next_to_use descriptor */
- rx_desc->wb.qword1.status_error_len = 0;
+ rx_desc->qw1 = 0;
cleaned_count--;
} while (cleaned_count);
@@ -896,60 +916,43 @@ no_buffers:
}
/**
- * iavf_rx_checksum - Indicate in skb if hw indicated a good cksum
+ * iavf_rx_csum - Indicate in skb if hw indicated a good checksum
* @vsi: the VSI we care about
* @skb: skb currently being received and modified
- * @rx_desc: the receive descriptor
+ * @decoded_pt: decoded ptype information
+ * @csum_bits: decoded Rx descriptor information
**/
-static void iavf_rx_checksum(struct iavf_vsi *vsi,
- struct sk_buff *skb,
- union iavf_rx_desc *rx_desc)
+static void iavf_rx_csum(const struct iavf_vsi *vsi, struct sk_buff *skb,
+ struct libeth_rx_pt decoded_pt,
+ struct libeth_rx_csum csum_bits)
{
- struct libeth_rx_pt decoded;
- u32 rx_error, rx_status;
bool ipv4, ipv6;
- u8 ptype;
- u64 qword;
skb->ip_summed = CHECKSUM_NONE;
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword);
-
- decoded = libie_rx_pt_parse(ptype);
- if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded))
- return;
-
- rx_error = FIELD_GET(IAVF_RXD_QW1_ERROR_MASK, qword);
- rx_status = FIELD_GET(IAVF_RXD_QW1_STATUS_MASK, qword);
-
/* did the hardware decode the packet and checksum? */
- if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT)))
+ if (unlikely(!csum_bits.l3l4p))
return;
- ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
- ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded_pt) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded_pt) == LIBETH_RX_PT_OUTER_IPV6;
- if (ipv4 &&
- (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) |
- BIT(IAVF_RX_DESC_ERROR_EIPE_SHIFT))))
+ if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe)))
goto checksum_fail;
/* likely incorrect csum if alternate IP extension headers found */
- if (ipv6 &&
- rx_status & BIT(IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT))
- /* don't increment checksum err here, non-fatal err */
+ if (unlikely(ipv6 && csum_bits.ipv6exadd))
return;
/* there was some L4 error, count error and punt packet to the stack */
- if (rx_error & BIT(IAVF_RX_DESC_ERROR_L4E_SHIFT))
+ if (unlikely(csum_bits.l4e))
goto checksum_fail;
/* handle packets that were not able to be checksummed due
* to arrival speed, in this case the stack can compute
* the csum.
*/
- if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT))
+ if (unlikely(csum_bits.pprs))
return;
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -960,52 +963,196 @@ checksum_fail:
}
/**
- * iavf_rx_hash - set the hash value in the skb
+ * iavf_legacy_rx_csum - Indicate in skb if hw indicated a good checksum
+ * @vsi: the VSI we care about
+ * @qw1: quad word 1
+ * @decoded_pt: decoded packet type
+ *
+ * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte
+ * descriptor writeback format.
+ *
+ * Return: decoded checksum bits.
+ **/
+static struct libeth_rx_csum
+iavf_legacy_rx_csum(const struct iavf_vsi *vsi, u64 qw1,
+ const struct libeth_rx_pt decoded_pt)
+{
+ struct libeth_rx_csum csum_bits = {};
+
+ if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded_pt))
+ return csum_bits;
+
+ csum_bits.ipe = FIELD_GET(IAVF_RXD_LEGACY_IPE_M, qw1);
+ csum_bits.eipe = FIELD_GET(IAVF_RXD_LEGACY_EIPE_M, qw1);
+ csum_bits.l4e = FIELD_GET(IAVF_RXD_LEGACY_L4E_M, qw1);
+ csum_bits.pprs = FIELD_GET(IAVF_RXD_LEGACY_PPRS_M, qw1);
+ csum_bits.l3l4p = FIELD_GET(IAVF_RXD_LEGACY_L3L4P_M, qw1);
+ csum_bits.ipv6exadd = FIELD_GET(IAVF_RXD_LEGACY_IPV6EXADD_M, qw1);
+
+ return csum_bits;
+}
+
+/**
+ * iavf_flex_rx_csum - Indicate in skb if hw indicated a good checksum
+ * @vsi: the VSI we care about
+ * @qw1: quad word 1
+ * @decoded_pt: decoded packet type
+ *
+ * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
+ *
+ * Return: decoded checksum bits.
+ **/
+static struct libeth_rx_csum
+iavf_flex_rx_csum(const struct iavf_vsi *vsi, u64 qw1,
+ const struct libeth_rx_pt decoded_pt)
+{
+ struct libeth_rx_csum csum_bits = {};
+
+ if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded_pt))
+ return csum_bits;
+
+ csum_bits.ipe = FIELD_GET(IAVF_RXD_FLEX_XSUM_IPE_M, qw1);
+ csum_bits.eipe = FIELD_GET(IAVF_RXD_FLEX_XSUM_EIPE_M, qw1);
+ csum_bits.l4e = FIELD_GET(IAVF_RXD_FLEX_XSUM_L4E_M, qw1);
+ csum_bits.eudpe = FIELD_GET(IAVF_RXD_FLEX_XSUM_EUDPE_M, qw1);
+ csum_bits.l3l4p = FIELD_GET(IAVF_RXD_FLEX_L3L4P_M, qw1);
+ csum_bits.ipv6exadd = FIELD_GET(IAVF_RXD_FLEX_IPV6EXADD_M, qw1);
+ csum_bits.nat = FIELD_GET(IAVF_RXD_FLEX_NAT_M, qw1);
+
+ return csum_bits;
+}
+
+/**
+ * iavf_legacy_rx_hash - set the hash value in the skb
+ * @ring: descriptor ring
+ * @qw0: quad word 0
+ * @qw1: quad word 1
+ * @skb: skb currently being received and modified
+ * @decoded_pt: decoded packet type
+ *
+ * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte
+ * descriptor writeback format.
+ **/
+static void iavf_legacy_rx_hash(const struct iavf_ring *ring, __le64 qw0,
+ __le64 qw1, struct sk_buff *skb,
+ const struct libeth_rx_pt decoded_pt)
+{
+ const __le64 rss_mask = cpu_to_le64(IAVF_RXD_LEGACY_FLTSTAT_M);
+ u32 hash;
+
+ if (!libeth_rx_pt_has_hash(ring->netdev, decoded_pt))
+ return;
+
+ if ((qw1 & rss_mask) == rss_mask) {
+ hash = le64_get_bits(qw0, IAVF_RXD_LEGACY_RSS_M);
+ libeth_rx_pt_set_hash(skb, hash, decoded_pt);
+ }
+}
+
+/**
+ * iavf_flex_rx_hash - set the hash value in the skb
* @ring: descriptor ring
- * @rx_desc: specific descriptor
+ * @qw1: quad word 1
* @skb: skb currently being received and modified
- * @rx_ptype: Rx packet type
+ * @decoded_pt: decoded packet type
+ *
+ * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
**/
-static void iavf_rx_hash(struct iavf_ring *ring,
- union iavf_rx_desc *rx_desc,
- struct sk_buff *skb,
- u8 rx_ptype)
+static void iavf_flex_rx_hash(const struct iavf_ring *ring, __le64 qw1,
+ struct sk_buff *skb,
+ const struct libeth_rx_pt decoded_pt)
{
- struct libeth_rx_pt decoded;
+ bool rss_valid;
u32 hash;
- const __le64 rss_mask =
- cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
- IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
- decoded = libie_rx_pt_parse(rx_ptype);
- if (!libeth_rx_pt_has_hash(ring->netdev, decoded))
+ if (!libeth_rx_pt_has_hash(ring->netdev, decoded_pt))
return;
- if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
- hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
- libeth_rx_pt_set_hash(skb, hash, decoded);
+ rss_valid = le64_get_bits(qw1, IAVF_RXD_FLEX_RSS_VALID_M);
+ if (rss_valid) {
+ hash = le64_get_bits(qw1, IAVF_RXD_FLEX_RSS_HASH_M);
+ libeth_rx_pt_set_hash(skb, hash, decoded_pt);
}
}
/**
+ * iavf_flex_rx_tstamp - Capture Rx timestamp from the descriptor
+ * @rx_ring: descriptor ring
+ * @qw2: quad word 2 of descriptor
+ * @qw3: quad word 3 of descriptor
+ * @skb: skb currently being received
+ *
+ * Read the Rx timestamp value from the descriptor and pass it to the stack.
+ *
+ * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
+ */
+static void iavf_flex_rx_tstamp(const struct iavf_ring *rx_ring, __le64 qw2,
+ __le64 qw3, struct sk_buff *skb)
+{
+ u32 tstamp;
+ u64 ns;
+
+ /* Skip processing if timestamps aren't enabled */
+ if (!(rx_ring->flags & IAVF_TXRX_FLAGS_HW_TSTAMP))
+ return;
+
+ /* Check if this Rx descriptor has a valid timestamp */
+ if (!le64_get_bits(qw2, IAVF_PTP_40B_TSTAMP_VALID))
+ return;
+
+ /* the ts_low field only contains the valid bit and sub-nanosecond
+ * precision, so we don't need to extract it.
+ */
+ tstamp = le64_get_bits(qw3, IAVF_RXD_FLEX_QW3_TSTAMP_HIGH_M);
+
+ ns = iavf_ptp_extend_32b_timestamp(rx_ring->ptp->cached_phc_time,
+ tstamp);
+
+ *skb_hwtstamps(skb) = (struct skb_shared_hwtstamps) {
+ .hwtstamp = ns_to_ktime(ns),
+ };
+}
+
+/**
* iavf_process_skb_fields - Populate skb header fields from Rx descriptor
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being populated
- * @rx_ptype: the packet type decoded by hardware
+ * @ptype: the packet type decoded by hardware
+ * @flex: is the descriptor flex or legacy
*
* This function checks the ring, descriptor, and packet information in
* order to populate the hash, checksum, VLAN, protocol, and
* other fields within the skb.
**/
-static void
-iavf_process_skb_fields(struct iavf_ring *rx_ring,
- union iavf_rx_desc *rx_desc, struct sk_buff *skb,
- u8 rx_ptype)
+static void iavf_process_skb_fields(const struct iavf_ring *rx_ring,
+ const struct iavf_rx_desc *rx_desc,
+ struct sk_buff *skb, u32 ptype,
+ bool flex)
{
- iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
-
- iavf_rx_checksum(rx_ring->vsi, skb, rx_desc);
+ struct libeth_rx_csum csum_bits;
+ struct libeth_rx_pt decoded_pt;
+ __le64 qw0 = rx_desc->qw0;
+ __le64 qw1 = rx_desc->qw1;
+ __le64 qw2 = rx_desc->qw2;
+ __le64 qw3 = rx_desc->qw3;
+
+ decoded_pt = libie_rx_pt_parse(ptype);
+
+ if (flex) {
+ iavf_flex_rx_hash(rx_ring, qw1, skb, decoded_pt);
+ iavf_flex_rx_tstamp(rx_ring, qw2, qw3, skb);
+ csum_bits = iavf_flex_rx_csum(rx_ring->vsi, le64_to_cpu(qw1),
+ decoded_pt);
+ } else {
+ iavf_legacy_rx_hash(rx_ring, qw0, qw1, skb, decoded_pt);
+ csum_bits = iavf_legacy_rx_csum(rx_ring->vsi, le64_to_cpu(qw1),
+ decoded_pt);
+ }
+ iavf_rx_csum(rx_ring->vsi, skb, decoded_pt, csum_bits);
skb_record_rx_queue(skb, rx_ring->queue_index);
@@ -1050,10 +1197,11 @@ static void iavf_add_rx_frag(struct sk_buff *skb,
const struct libeth_fqe *rx_buffer,
unsigned int size)
{
- u32 hr = rx_buffer->page->pp->p.offset;
+ u32 hr = netmem_get_pp(rx_buffer->netmem)->p.offset;
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
- rx_buffer->offset + hr, size, rx_buffer->truesize);
+ skb_add_rx_frag_netmem(skb, skb_shinfo(skb)->nr_frags,
+ rx_buffer->netmem, rx_buffer->offset + hr,
+ size, rx_buffer->truesize);
}
/**
@@ -1067,12 +1215,13 @@ static void iavf_add_rx_frag(struct sk_buff *skb,
static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer,
unsigned int size)
{
- u32 hr = rx_buffer->page->pp->p.offset;
+ struct page *buf_page = __netmem_to_page(rx_buffer->netmem);
+ u32 hr = pp_page_to_nmdesc(buf_page)->pp->p.offset;
struct sk_buff *skb;
void *va;
/* prefetch first cache line of first page */
- va = page_address(rx_buffer->page) + rx_buffer->offset;
+ va = page_address(buf_page) + rx_buffer->offset;
net_prefetch(va + hr);
/* build an skb around the page buffer */
@@ -1092,8 +1241,7 @@ static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer,
/**
* iavf_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
- * @rx_desc: Rx descriptor for current buffer
- * @skb: Current socket buffer containing buffer in progress
+ * @fields: Rx descriptor extracted fields
*
* This function updates next to clean. If the buffer is an EOP buffer
* this function exits returning false, otherwise it will place the
@@ -1101,8 +1249,7 @@ static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer,
* that this is in fact a non-EOP buffer.
**/
static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
- union iavf_rx_desc *rx_desc,
- struct sk_buff *skb)
+ struct libeth_rqe_info fields)
{
u32 ntc = rx_ring->next_to_clean + 1;
@@ -1113,8 +1260,7 @@ static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
prefetch(IAVF_RX_DESC(rx_ring, ntc));
/* if we are the last buffer then there is nothing else to do */
-#define IAVF_RXD_EOF BIT(IAVF_RX_DESC_STATUS_EOF_SHIFT)
- if (likely(iavf_test_staterr(rx_desc, IAVF_RXD_EOF)))
+ if (likely(fields.eop))
return false;
rx_ring->rx_stats.non_eop_descs++;
@@ -1123,6 +1269,109 @@ static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
}
/**
+ * iavf_extract_legacy_rx_fields - Extract fields from the Rx descriptor
+ * @rx_ring: rx descriptor ring
+ * @rx_desc: the descriptor to process
+ *
+ * Decode the Rx descriptor and extract relevant information including the
+ * size, VLAN tag, Rx packet type, end of packet field and RXE field value.
+ *
+ * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte
+ * descriptor writeback format.
+ *
+ * Return: fields extracted from the Rx descriptor.
+ */
+static struct libeth_rqe_info
+iavf_extract_legacy_rx_fields(const struct iavf_ring *rx_ring,
+ const struct iavf_rx_desc *rx_desc)
+{
+ u64 qw0 = le64_to_cpu(rx_desc->qw0);
+ u64 qw1 = le64_to_cpu(rx_desc->qw1);
+ u64 qw2 = le64_to_cpu(rx_desc->qw2);
+ struct libeth_rqe_info fields;
+ bool l2tag1p, l2tag2p;
+
+ fields.eop = FIELD_GET(IAVF_RXD_LEGACY_EOP_M, qw1);
+ fields.len = FIELD_GET(IAVF_RXD_LEGACY_LENGTH_M, qw1);
+
+ if (!fields.eop)
+ return fields;
+
+ fields.rxe = FIELD_GET(IAVF_RXD_LEGACY_RXE_M, qw1);
+ fields.ptype = FIELD_GET(IAVF_RXD_LEGACY_PTYPE_M, qw1);
+ fields.vlan = 0;
+
+ if (rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
+ l2tag1p = FIELD_GET(IAVF_RXD_LEGACY_L2TAG1P_M, qw1);
+ if (l2tag1p)
+ fields.vlan = FIELD_GET(IAVF_RXD_LEGACY_L2TAG1_M, qw0);
+ } else if (rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
+ l2tag2p = FIELD_GET(IAVF_RXD_LEGACY_L2TAG2P_M, qw2);
+ if (l2tag2p)
+ fields.vlan = FIELD_GET(IAVF_RXD_LEGACY_L2TAG2_M, qw2);
+ }
+
+ return fields;
+}
+
+/**
+ * iavf_extract_flex_rx_fields - Extract fields from the Rx descriptor
+ * @rx_ring: rx descriptor ring
+ * @rx_desc: the descriptor to process
+ *
+ * Decode the Rx descriptor and extract relevant information including the
+ * size, VLAN tag, Rx packet type, end of packet field and RXE field value.
+ *
+ * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
+ *
+ * Return: fields extracted from the Rx descriptor.
+ */
+static struct libeth_rqe_info
+iavf_extract_flex_rx_fields(const struct iavf_ring *rx_ring,
+ const struct iavf_rx_desc *rx_desc)
+{
+ struct libeth_rqe_info fields = {};
+ u64 qw0 = le64_to_cpu(rx_desc->qw0);
+ u64 qw1 = le64_to_cpu(rx_desc->qw1);
+ u64 qw2 = le64_to_cpu(rx_desc->qw2);
+ bool l2tag1p, l2tag2p;
+
+ fields.eop = FIELD_GET(IAVF_RXD_FLEX_EOP_M, qw1);
+ fields.len = FIELD_GET(IAVF_RXD_FLEX_PKT_LEN_M, qw0);
+
+ if (!fields.eop)
+ return fields;
+
+ fields.rxe = FIELD_GET(IAVF_RXD_FLEX_RXE_M, qw1);
+ fields.ptype = FIELD_GET(IAVF_RXD_FLEX_PTYPE_M, qw0);
+ fields.vlan = 0;
+
+ if (rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
+ l2tag1p = FIELD_GET(IAVF_RXD_FLEX_L2TAG1P_M, qw1);
+ if (l2tag1p)
+ fields.vlan = FIELD_GET(IAVF_RXD_FLEX_L2TAG1_M, qw1);
+ } else if (rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
+ l2tag2p = FIELD_GET(IAVF_RXD_FLEX_L2TAG2P_M, qw2);
+ if (l2tag2p)
+ fields.vlan = FIELD_GET(IAVF_RXD_FLEX_L2TAG2_2_M, qw2);
+ }
+
+ return fields;
+}
+
+static struct libeth_rqe_info
+iavf_extract_rx_fields(const struct iavf_ring *rx_ring,
+ const struct iavf_rx_desc *rx_desc,
+ bool flex)
+{
+ if (flex)
+ return iavf_extract_flex_rx_fields(rx_ring, rx_desc);
+ else
+ return iavf_extract_legacy_rx_fields(rx_ring, rx_desc);
+}
+
+/**
* iavf_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
@@ -1136,18 +1385,17 @@ static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
**/
static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
{
+ bool flex = rx_ring->rxdid == VIRTCHNL_RXDID_2_FLEX_SQ_NIC;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring);
bool failure = false;
while (likely(total_rx_packets < (unsigned int)budget)) {
+ struct libeth_rqe_info fields;
struct libeth_fqe *rx_buffer;
- union iavf_rx_desc *rx_desc;
- unsigned int size;
- u16 vlan_tag = 0;
- u8 rx_ptype;
- u64 qword;
+ struct iavf_rx_desc *rx_desc;
+ u64 qw1;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IAVF_RX_BUFFER_WRITE) {
@@ -1158,35 +1406,32 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
- /* status_error_len will always be zero for unused descriptors
- * because it's cleared in cleanup, and overlaps with hdr_addr
- * which is always zero because packet split isn't used, if the
- * hardware wrote DD then the length will be non-zero
- */
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
-
/* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we have
* verified the descriptor has been written back.
*/
dma_rmb();
-#define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT)
- if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD))
+
+ qw1 = le64_to_cpu(rx_desc->qw1);
+ /* If DD field (descriptor done) is unset then other fields are
+ * not valid
+ */
+ if (!iavf_is_descriptor_done(qw1, flex))
break;
- size = FIELD_GET(IAVF_RXD_QW1_LENGTH_PBUF_MASK, qword);
+ fields = iavf_extract_rx_fields(rx_ring, rx_desc, flex);
iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
rx_buffer = &rx_ring->rx_fqes[rx_ring->next_to_clean];
- if (!libeth_rx_sync_for_cpu(rx_buffer, size))
+ if (!libeth_rx_sync_for_cpu(rx_buffer, fields.len))
goto skip_data;
/* retrieve a buffer from the ring */
if (skb)
- iavf_add_rx_frag(skb, rx_buffer, size);
+ iavf_add_rx_frag(skb, rx_buffer, fields.len);
else
- skb = iavf_build_skb(rx_buffer, size);
+ skb = iavf_build_skb(rx_buffer, fields.len);
/* exit if we failed to retrieve a buffer */
if (!skb) {
@@ -1197,15 +1442,14 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
skip_data:
cleaned_count++;
- if (iavf_is_non_eop(rx_ring, rx_desc, skb) || unlikely(!skb))
+ if (iavf_is_non_eop(rx_ring, fields) || unlikely(!skb))
continue;
- /* ERR_MASK will only have valid bits if EOP set, and
- * what we are doing here is actually checking
- * IAVF_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
- * the error field
+ /* RXE field in descriptor is an indication of the MAC errors
+ * (like CRC, alignment, oversize etc). If it is set then iavf
+ * should finish.
*/
- if (unlikely(iavf_test_staterr(rx_desc, BIT(IAVF_RXD_QW1_ERROR_SHIFT)))) {
+ if (unlikely(fields.rxe)) {
dev_kfree_skb_any(skb);
skb = NULL;
continue;
@@ -1219,22 +1463,11 @@ skip_data:
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword);
-
/* populate checksum, VLAN, and protocol */
- iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
-
- if (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT) &&
- rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1)
- vlan_tag = le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1);
- if (rx_desc->wb.qword2.ext_status &
- cpu_to_le16(BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) &&
- rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2)
- vlan_tag = le16_to_cpu(rx_desc->wb.qword2.l2tag2_2);
+ iavf_process_skb_fields(rx_ring, rx_desc, skb, fields.ptype, flex);
iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
- iavf_receive_skb(rx_ring, skb, vlan_tag);
+ iavf_receive_skb(rx_ring, skb, fields.vlan);
skb = NULL;
/* update budget accounting */
@@ -1417,7 +1650,8 @@ int iavf_napi_poll(struct napi_struct *napi, int budget)
* continue to poll, otherwise we must stop polling so the
* interrupt can move to the correct cpu.
*/
- if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
+ if (!cpumask_test_cpu(cpu_id,
+ &q_vector->napi.config->affinity_mask)) {
/* Tell napi that we are done polling */
napi_complete_done(napi, work_done);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
index f97c702c0802..df49b0b1d54a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
@@ -4,6 +4,8 @@
#ifndef _IAVF_TXRX_H_
#define _IAVF_TXRX_H_
+#include <linux/net/intel/libie/pctype.h>
+
/* Interrupt Throttling and Rate Limiting Goodies */
#define IAVF_DEFAULT_IRQ_WORK 256
@@ -59,45 +61,26 @@ enum iavf_dyn_idx_t {
#define IAVF_PE_ITR IAVF_IDX_ITR2
/* Supported RSS offloads */
-#define IAVF_DEFAULT_RSS_HENA ( \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_UDP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV4) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_UDP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV6) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_L2_PAYLOAD))
-
-#define IAVF_DEFAULT_RSS_HENA_EXPANDED (IAVF_DEFAULT_RSS_HENA | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
-
-#define iavf_rx_desc iavf_32byte_rx_desc
-
-/**
- * iavf_test_staterr - tests bits in Rx descriptor status and error fields
- * @rx_desc: pointer to receive descriptor (in le64 format)
- * @stat_err_bits: value to mask
- *
- * This function does some fast chicanery in order to return the
- * value of the mask which is really only used for boolean tests.
- * The status_error_len doesn't need to be shifted because it begins
- * at offset zero.
- */
-static inline bool iavf_test_staterr(union iavf_rx_desc *rx_desc,
- const u64 stat_err_bits)
-{
- return !!(rx_desc->wb.qword1.status_error_len &
- cpu_to_le64(stat_err_bits));
-}
+#define IAVF_DEFAULT_RSS_HASHCFG ( \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_L2_PAYLOAD))
+
+#define IAVF_DEFAULT_RSS_HASHCFG_EXPANDED (IAVF_DEFAULT_RSS_HASHCFG | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IAVF_RX_INCREMENT(r, i) \
@@ -262,6 +245,8 @@ struct iavf_ring {
u16 next_to_use;
u16 next_to_clean;
+ u16 rxdid; /* Rx descriptor format */
+
u16 flags;
#define IAVF_TXR_FLAGS_WB_ON_ITR BIT(0)
#define IAVF_TXR_FLAGS_ARM_WB BIT(1)
@@ -269,6 +254,7 @@ struct iavf_ring {
#define IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(3)
#define IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(4)
#define IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2 BIT(5)
+#define IAVF_TXRX_FLAGS_HW_TSTAMP BIT(6)
/* stats structs */
struct iavf_queue_stats stats;
@@ -295,6 +281,8 @@ struct iavf_ring {
* for this ring.
*/
+ struct iavf_ptp *ptp;
+
u32 rx_buf_len;
struct net_shaper q_shaper;
bool q_shaper_update;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h
index f6b09e57abce..1d8cf29cb65a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_type.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_type.h
@@ -19,7 +19,7 @@
/* forward declaration */
struct iavf_hw;
-typedef void (*IAVF_ADMINQ_CALLBACK)(struct iavf_hw *, struct iavf_aq_desc *);
+typedef void (*IAVF_ADMINQ_CALLBACK)(struct iavf_hw *, struct libie_aq_desc *);
/* Data type manipulation macros. */
@@ -178,110 +178,116 @@ struct iavf_hw {
char err_str[16];
};
-/* RX Descriptors */
-union iavf_16byte_rx_desc {
- struct {
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
- } read;
- struct {
- struct {
- struct {
- union {
- __le16 mirroring_status;
- __le16 fcoe_ctx_id;
- } mirr_fcoe;
- __le16 l2tag1;
- } lo_dword;
- union {
- __le32 rss; /* RSS Hash */
- __le32 fd_id; /* Flow director filter id */
- __le32 fcoe_param; /* FCoE DDP Context id */
- } hi_dword;
- } qword0;
- struct {
- /* ext status/error/pktype/length */
- __le64 status_error_len;
- } qword1;
- } wb; /* writeback */
-};
-
-union iavf_32byte_rx_desc {
- struct {
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
- /* bit 0 of hdr_buffer_addr is DD bit */
- __le64 rsvd1;
- __le64 rsvd2;
- } read;
- struct {
- struct {
- struct {
- union {
- __le16 mirroring_status;
- __le16 fcoe_ctx_id;
- } mirr_fcoe;
- __le16 l2tag1;
- } lo_dword;
- union {
- __le32 rss; /* RSS Hash */
- __le32 fcoe_param; /* FCoE DDP Context id */
- /* Flow director filter id in case of
- * Programming status desc WB
- */
- __le32 fd_id;
- } hi_dword;
- } qword0;
- struct {
- /* status/error/pktype/length */
- __le64 status_error_len;
- } qword1;
- struct {
- __le16 ext_status; /* extended status */
- __le16 rsvd;
- __le16 l2tag2_1;
- __le16 l2tag2_2;
- } qword2;
- struct {
- union {
- __le32 flex_bytes_lo;
- __le32 pe_status;
- } lo_dword;
- union {
- __le32 flex_bytes_hi;
- __le32 fd_id;
- } hi_dword;
- } qword3;
- } wb; /* writeback */
-};
-
-enum iavf_rx_desc_status_bits {
- /* Note: These are predefined bit offsets */
- IAVF_RX_DESC_STATUS_DD_SHIFT = 0,
- IAVF_RX_DESC_STATUS_EOF_SHIFT = 1,
- IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
- IAVF_RX_DESC_STATUS_L3L4P_SHIFT = 3,
- IAVF_RX_DESC_STATUS_CRCP_SHIFT = 4,
- IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
- IAVF_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
- /* Note: Bit 8 is reserved in X710 and XL710 */
- IAVF_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
- IAVF_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
- IAVF_RX_DESC_STATUS_FLM_SHIFT = 11,
- IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
- IAVF_RX_DESC_STATUS_LPBK_SHIFT = 14,
- IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
- IAVF_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
- /* Note: For non-tunnel packets INT_UDP_0 is the right status for
- * UDP header
- */
- IAVF_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
- IAVF_RX_DESC_STATUS_LAST /* this entry must be last!!! */
-};
-
-#define IAVF_RXD_QW1_STATUS_SHIFT 0
-#define IAVF_RXD_QW1_STATUS_MASK ((BIT(IAVF_RX_DESC_STATUS_LAST) - 1) \
- << IAVF_RXD_QW1_STATUS_SHIFT)
+/**
+ * struct iavf_rx_desc - Receive descriptor (both legacy and flexible)
+ * @qw0: quad word 0 fields:
+ * Legacy: Descriptor Type; Mirror ID; L2TAG1P (S-TAG); Filter Status
+ * Flex: Descriptor Type; Mirror ID; UMBCAST; Packet Type; Flexible Flags
+ * Section 0; Packet Length; Header Length; Split Header Flag;
+ * Flexible Flags section 1 / Extended Status
+ * @qw1: quad word 1 fields:
+ * Legacy: Status Field; Error Field; Packet Type; Packet Length (packet,
+ * header, Split Header Flag)
+ * Flex: Status / Error 0 Field; L2TAG1P (S-TAG); Flexible Metadata
+ * Container #0; Flexible Metadata Container #1
+ * @qw2: quad word 2 fields:
+ * Legacy: Extended Status; 1st L2TAG2P (C-TAG); 2nd L2TAG2P (C-TAG)
+ * Flex: Status / Error 1 Field; Flexible Flags section 2; Timestamp Low;
+ * 1st L2TAG2 (C-TAG); 2nd L2TAG2 (C-TAG)
+ * @qw3: quad word 3 fields:
+ * Legacy: FD Filter ID / Flexible Bytes
+ * Flex: Flexible Metadata Container #2; Flexible Metadata Container #3;
+ * Flexible Metadata Container #4 / Timestamp High 0; Flexible
+ * Metadata Container #5 / Timestamp High 1;
+ */
+struct iavf_rx_desc {
+ aligned_le64 qw0;
+/* The hash signature (RSS) */
+#define IAVF_RXD_LEGACY_RSS_M GENMASK_ULL(63, 32)
+/* Stripped C-TAG VLAN from the receive packet */
+#define IAVF_RXD_LEGACY_L2TAG1_M GENMASK_ULL(33, 16)
+/* Packet type */
+#define IAVF_RXD_FLEX_PTYPE_M GENMASK_ULL(25, 16)
+/* Packet length */
+#define IAVF_RXD_FLEX_PKT_LEN_M GENMASK_ULL(45, 32)
+
+ aligned_le64 qw1;
+/* Descriptor done indication flag. */
+#define IAVF_RXD_LEGACY_DD_M BIT(0)
+/* End of packet. Set to 1 if this descriptor is the last one of the packet */
+#define IAVF_RXD_LEGACY_EOP_M BIT(1)
+/* L2 TAG 1 presence indication */
+#define IAVF_RXD_LEGACY_L2TAG1P_M BIT(2)
+/* Detectable L3 and L4 integrity check is processed by the HW */
+#define IAVF_RXD_LEGACY_L3L4P_M BIT(3)
+/* Set when an IPv6 packet contains a Destination Options Header or a Routing
+ * Header.
+ */
+#define IAVF_RXD_LEGACY_IPV6EXADD_M BIT(15)
+/* Receive MAC Errors: CRC; Alignment; Oversize; Undersizes; Length error */
+#define IAVF_RXD_LEGACY_RXE_M BIT(19)
+/* Checksum reports:
+ * - IPE: IP checksum error
+ * - L4E: L4 integrity error
+ * - EIPE: External IP header (tunneled packets)
+ */
+#define IAVF_RXD_LEGACY_IPE_M BIT(22)
+#define IAVF_RXD_LEGACY_L4E_M BIT(23)
+#define IAVF_RXD_LEGACY_EIPE_M BIT(24)
+/* Set for packets that skip checksum calculation in pre-parser */
+#define IAVF_RXD_LEGACY_PPRS_M BIT(26)
+/* Indicates the content in the Filter Status field */
+#define IAVF_RXD_LEGACY_FLTSTAT_M GENMASK_ULL(13, 12)
+/* Packet type */
+#define IAVF_RXD_LEGACY_PTYPE_M GENMASK_ULL(37, 30)
+/* Packet length */
+#define IAVF_RXD_LEGACY_LENGTH_M GENMASK_ULL(51, 38)
+/* Descriptor done indication flag */
+#define IAVF_RXD_FLEX_DD_M BIT(0)
+/* End of packet. Set to 1 if this descriptor is the last one of the packet */
+#define IAVF_RXD_FLEX_EOP_M BIT(1)
+/* Detectable L3 and L4 integrity check is processed by the HW */
+#define IAVF_RXD_FLEX_L3L4P_M BIT(3)
+/* Checksum reports:
+ * - IPE: IP checksum error
+ * - L4E: L4 integrity error
+ * - EIPE: External IP header (tunneled packets)
+ * - EUDPE: External UDP checksum error (tunneled packets)
+ */
+#define IAVF_RXD_FLEX_XSUM_IPE_M BIT(4)
+#define IAVF_RXD_FLEX_XSUM_L4E_M BIT(5)
+#define IAVF_RXD_FLEX_XSUM_EIPE_M BIT(6)
+#define IAVF_RXD_FLEX_XSUM_EUDPE_M BIT(7)
+/* Set when an IPv6 packet contains a Destination Options Header or a Routing
+ * Header.
+ */
+#define IAVF_RXD_FLEX_IPV6EXADD_M BIT(9)
+/* Receive MAC Errors: CRC; Alignment; Oversize; Undersizes; Length error */
+#define IAVF_RXD_FLEX_RXE_M BIT(10)
+/* Indicates that the RSS/HASH result is valid */
+#define IAVF_RXD_FLEX_RSS_VALID_M BIT(12)
+/* L2 TAG 1 presence indication */
+#define IAVF_RXD_FLEX_L2TAG1P_M BIT(13)
+/* Stripped L2 Tag from the receive packet */
+#define IAVF_RXD_FLEX_L2TAG1_M GENMASK_ULL(31, 16)
+/* The hash signature (RSS) */
+#define IAVF_RXD_FLEX_RSS_HASH_M GENMASK_ULL(63, 32)
+
+ aligned_le64 qw2;
+/* L2 Tag 2 Presence */
+#define IAVF_RXD_LEGACY_L2TAG2P_M BIT(0)
+/* Stripped S-TAG VLAN from the receive packet */
+#define IAVF_RXD_LEGACY_L2TAG2_M GENMASK_ULL(63, 32)
+/* Stripped S-TAG VLAN from the receive packet */
+#define IAVF_RXD_FLEX_L2TAG2_2_M GENMASK_ULL(63, 48)
+/* The packet is a UDP tunneled packet */
+#define IAVF_RXD_FLEX_NAT_M BIT(4)
+/* L2 Tag 2 Presence */
+#define IAVF_RXD_FLEX_L2TAG2P_M BIT(11)
+ aligned_le64 qw3;
+#define IAVF_RXD_FLEX_QW3_TSTAMP_HIGH_M GENMASK_ULL(63, 32)
+} __aligned(4 * sizeof(__le64));
+static_assert(sizeof(struct iavf_rx_desc) == 32);
#define IAVF_RXD_QW1_STATUS_TSYNINDX_SHIFT IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT
#define IAVF_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
@@ -298,22 +304,6 @@ enum iavf_rx_desc_fltstat_values {
IAVF_RX_DESC_FLTSTAT_RSS_HASH = 3,
};
-#define IAVF_RXD_QW1_ERROR_SHIFT 19
-#define IAVF_RXD_QW1_ERROR_MASK (0xFFUL << IAVF_RXD_QW1_ERROR_SHIFT)
-
-enum iavf_rx_desc_error_bits {
- /* Note: These are predefined bit offsets */
- IAVF_RX_DESC_ERROR_RXE_SHIFT = 0,
- IAVF_RX_DESC_ERROR_RECIPE_SHIFT = 1,
- IAVF_RX_DESC_ERROR_HBO_SHIFT = 2,
- IAVF_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
- IAVF_RX_DESC_ERROR_IPE_SHIFT = 3,
- IAVF_RX_DESC_ERROR_L4E_SHIFT = 4,
- IAVF_RX_DESC_ERROR_EIPE_SHIFT = 5,
- IAVF_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
- IAVF_RX_DESC_ERROR_PPRS_SHIFT = 7
-};
-
enum iavf_rx_desc_error_l3l4e_fcoe_masks {
IAVF_RX_DESC_ERROR_L3L4E_NONE = 0,
IAVF_RX_DESC_ERROR_L3L4E_PROT = 1,
@@ -322,13 +312,6 @@ enum iavf_rx_desc_error_l3l4e_fcoe_masks {
IAVF_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
};
-#define IAVF_RXD_QW1_PTYPE_SHIFT 30
-#define IAVF_RXD_QW1_PTYPE_MASK (0xFFULL << IAVF_RXD_QW1_PTYPE_SHIFT)
-
-#define IAVF_RXD_QW1_LENGTH_PBUF_SHIFT 38
-#define IAVF_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
- IAVF_RXD_QW1_LENGTH_PBUF_SHIFT)
-
#define IAVF_RXD_QW1_LENGTH_HBUF_SHIFT 52
#define IAVF_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
IAVF_RXD_QW1_LENGTH_HBUF_SHIFT)
@@ -347,6 +330,8 @@ enum iavf_rx_desc_ext_status_bits {
IAVF_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
};
+#define IAVF_RX_DESC_EXT_STATUS_L2TAG2P_M BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)
+
enum iavf_rx_desc_pe_status_bits {
/* Note: These are predefined bit offsets */
IAVF_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
@@ -478,38 +463,6 @@ enum iavf_tx_ctx_desc_cmd_bits {
IAVF_TX_CTX_DESC_SWPE = 0x40
};
-/* Packet Classifier Types for filters */
-enum iavf_filter_pctype {
- /* Note: Values 0-28 are reserved for future use.
- * Value 29, 30, 32 are not supported on XL710 and X710.
- */
- IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
- IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
- IAVF_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
- IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
- IAVF_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
- IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
- IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
- IAVF_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Values 37-38 are reserved for future use.
- * Value 39, 40, 42 are not supported on XL710 and X710.
- */
- IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
- IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
- IAVF_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
- IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
- IAVF_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
- IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
- IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
- IAVF_FILTER_PCTYPE_FRAG_IPV6 = 46,
- /* Note: Value 47 is reserved for future use */
- IAVF_FILTER_PCTYPE_FCOE_OX = 48,
- IAVF_FILTER_PCTYPE_FCOE_RX = 49,
- IAVF_FILTER_PCTYPE_FCOE_OTHER = 50,
- /* Note: Values 51-62 are reserved for future use */
- IAVF_FILTER_PCTYPE_L2_PAYLOAD = 63,
-};
-
#define IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT 30
#define IAVF_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT)
diff --git a/drivers/net/ethernet/intel/iavf/iavf_types.h b/drivers/net/ethernet/intel/iavf/iavf_types.h
new file mode 100644
index 000000000000..a095855122bf
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_types.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IAVF_TYPES_H_
+#define _IAVF_TYPES_H_
+
+#include "iavf_types.h"
+
+#include <linux/avf/virtchnl.h>
+#include <linux/ptp_clock_kernel.h>
+
+/* structure used to queue PTP commands for processing */
+struct iavf_ptp_aq_cmd {
+ struct list_head list;
+ enum virtchnl_ops v_opcode:16;
+ u16 msglen;
+ u8 msg[] __counted_by(msglen);
+};
+
+struct iavf_ptp {
+ wait_queue_head_t phc_time_waitqueue;
+ struct virtchnl_ptp_caps hw_caps;
+ struct ptp_clock_info info;
+ struct ptp_clock *clock;
+ struct list_head aq_cmds;
+ u64 cached_phc_time;
+ unsigned long cached_phc_updated;
+ /* Lock protecting access to the AQ command list */
+ struct mutex aq_cmd_lock;
+ struct kernel_hwtstamp_config hwtstamp_config;
+ bool phc_time_ready:1;
+};
+
+#endif /* _IAVF_TYPES_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 15d388b431c5..88156082a41d 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -4,6 +4,7 @@
#include <linux/net/intel/libie/rx.h>
#include "iavf.h"
+#include "iavf_ptp.h"
#include "iavf_prototype.h"
/**
@@ -28,7 +29,7 @@ static int iavf_send_pf_msg(struct iavf_adapter *adapter,
if (status)
dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, status %s, aq_err %s\n",
op, iavf_stat_str(hw, status),
- iavf_aq_str(hw, hw->aq.asq_last_status));
+ libie_aq_str(hw->aq.asq_last_status));
return iavf_status_to_errno(status);
}
@@ -78,6 +79,23 @@ iavf_poll_virtchnl_msg(struct iavf_hw *hw, struct iavf_arq_event_info *event,
return iavf_status_to_errno(status);
received_op =
(enum virtchnl_ops)le32_to_cpu(event->desc.cookie_high);
+
+ if (received_op == VIRTCHNL_OP_EVENT) {
+ struct iavf_adapter *adapter = hw->back;
+ struct virtchnl_pf_event *vpe =
+ (struct virtchnl_pf_event *)event->msg_buf;
+
+ if (vpe->event != VIRTCHNL_EVENT_RESET_IMPENDING)
+ continue;
+
+ dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n");
+ if (!(adapter->flags & IAVF_FLAG_RESET_PENDING))
+ iavf_schedule_reset(adapter,
+ IAVF_FLAG_RESET_PENDING);
+
+ return -EIO;
+ }
+
if (op_to_poll == received_op)
break;
}
@@ -144,9 +162,11 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_ENCAP |
VIRTCHNL_VF_OFFLOAD_TC_U32 |
VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
VIRTCHNL_VF_OFFLOAD_CRC |
VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
+ VIRTCHNL_VF_CAP_PTP |
VIRTCHNL_VF_OFFLOAD_ADQ |
VIRTCHNL_VF_OFFLOAD_USO |
VIRTCHNL_VF_OFFLOAD_FDIR_PF |
@@ -177,6 +197,54 @@ int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter)
NULL, 0);
}
+int iavf_send_vf_supported_rxdids_msg(struct iavf_adapter *adapter)
+{
+ adapter->aq_required &= ~IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS;
+
+ if (!IAVF_RXDID_ALLOWED(adapter))
+ return -EOPNOTSUPP;
+
+ adapter->current_op = VIRTCHNL_OP_GET_SUPPORTED_RXDIDS;
+
+ return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
+ NULL, 0);
+}
+
+/**
+ * iavf_send_vf_ptp_caps_msg - Send request for PTP capabilities
+ * @adapter: private adapter structure
+ *
+ * Send the VIRTCHNL_OP_1588_PTP_GET_CAPS command to the PF to request the PTP
+ * capabilities available to this device. This includes the following
+ * potential access:
+ *
+ * * READ_PHC - access to read the PTP hardware clock time
+ * * RX_TSTAMP - access to request Rx timestamps on all received packets
+ *
+ * The PF will reply with the same opcode a filled out copy of the
+ * virtchnl_ptp_caps structure which defines the specifics of which features
+ * are accessible to this device.
+ *
+ * Return: 0 if success, error code otherwise.
+ */
+int iavf_send_vf_ptp_caps_msg(struct iavf_adapter *adapter)
+{
+ struct virtchnl_ptp_caps hw_caps = {
+ .caps = VIRTCHNL_1588_PTP_CAP_READ_PHC |
+ VIRTCHNL_1588_PTP_CAP_RX_TSTAMP
+ };
+
+ adapter->aq_required &= ~IAVF_FLAG_AQ_GET_PTP_CAPS;
+
+ if (!IAVF_PTP_ALLOWED(adapter))
+ return -EOPNOTSUPP;
+
+ adapter->current_op = VIRTCHNL_OP_1588_PTP_GET_CAPS;
+
+ return iavf_send_pf_msg(adapter, VIRTCHNL_OP_1588_PTP_GET_CAPS,
+ (u8 *)&hw_caps, sizeof(hw_caps));
+}
+
/**
* iavf_validate_num_queues
* @adapter: adapter structure
@@ -263,6 +331,40 @@ int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
return err;
}
+int iavf_get_vf_supported_rxdids(struct iavf_adapter *adapter)
+{
+ struct iavf_arq_event_info event;
+ u64 rxdids;
+ int err;
+
+ event.msg_buf = (u8 *)&rxdids;
+ event.buf_len = sizeof(rxdids);
+
+ err = iavf_poll_virtchnl_msg(&adapter->hw, &event,
+ VIRTCHNL_OP_GET_SUPPORTED_RXDIDS);
+ if (!err)
+ adapter->supp_rxdids = rxdids;
+
+ return err;
+}
+
+int iavf_get_vf_ptp_caps(struct iavf_adapter *adapter)
+{
+ struct virtchnl_ptp_caps caps = {};
+ struct iavf_arq_event_info event;
+ int err;
+
+ event.msg_buf = (u8 *)&caps;
+ event.buf_len = sizeof(caps);
+
+ err = iavf_poll_virtchnl_msg(&adapter->hw, &event,
+ VIRTCHNL_OP_1588_PTP_GET_CAPS);
+ if (!err)
+ adapter->ptp.hw_caps = caps;
+
+ return err;
+}
+
/**
* iavf_configure_queues
* @adapter: adapter structure
@@ -275,6 +377,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
int pairs = adapter->num_active_queues;
struct virtchnl_queue_pair_info *vqpi;
u32 i, max_frame;
+ u8 rx_flags = 0;
size_t len;
max_frame = LIBIE_MAX_RX_FRM_LEN(adapter->rx_rings->pp->p.offset);
@@ -292,6 +395,9 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
if (!vqci)
return;
+ if (iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP))
+ rx_flags |= VIRTCHNL_PTP_RX_TSTAMP;
+
vqci->vsi_id = adapter->vsi_res->vsi_id;
vqci->num_queue_pairs = pairs;
vqpi = vqci->qpair;
@@ -309,9 +415,12 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
vqpi->rxq.max_pkt_size = max_frame;
vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
+ if (IAVF_RXDID_ALLOWED(adapter))
+ vqpi->rxq.rxdid = adapter->rxdid;
if (CRC_OFFLOAD_ALLOWED(adapter))
vqpi->rxq.crc_disable = !!(adapter->netdev->features &
NETIF_F_RXFCS);
+ vqpi->rxq.flags = rx_flags;
vqpi++;
}
@@ -684,7 +793,8 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
len = virtchnl_struct_size(vvfl, vlan_id, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
+ dev_info(&adapter->pdev->dev,
+ "virtchnl: Too many VLAN add (v1) requests; splitting into multiple messages to PF\n");
while (len > IAVF_MAX_AQ_BUF_SIZE)
len = virtchnl_struct_size(vvfl, vlan_id,
--count);
@@ -729,7 +839,8 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
len = virtchnl_struct_size(vvfl_v2, filters, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
+ dev_info(&adapter->pdev->dev,
+ "virtchnl: Too many VLAN add (v2) requests; splitting into multiple messages to PF\n");
while (len > IAVF_MAX_AQ_BUF_SIZE)
len = virtchnl_struct_size(vvfl_v2, filters,
--count);
@@ -832,7 +943,8 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
len = virtchnl_struct_size(vvfl, vlan_id, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
+ dev_info(&adapter->pdev->dev,
+ "virtchnl: Too many VLAN delete (v1) requests; splitting into multiple messages to PF\n");
while (len > IAVF_MAX_AQ_BUF_SIZE)
len = virtchnl_struct_size(vvfl, vlan_id,
--count);
@@ -878,7 +990,8 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
len = virtchnl_struct_size(vvfl_v2, filters, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
+ dev_info(&adapter->pdev->dev,
+ "virtchnl: Too many VLAN delete (v2) requests; splitting into multiple messages to PF\n");
while (len > IAVF_MAX_AQ_BUF_SIZE)
len = virtchnl_struct_size(vvfl_v2, filters,
--count);
@@ -1036,12 +1149,12 @@ void iavf_request_stats(struct iavf_adapter *adapter)
}
/**
- * iavf_get_hena
+ * iavf_get_rss_hashcfg
* @adapter: adapter structure
*
- * Request hash enable capabilities from PF
+ * Request RSS Hash enable bits from PF
**/
-void iavf_get_hena(struct iavf_adapter *adapter)
+void iavf_get_rss_hashcfg(struct iavf_adapter *adapter)
{
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -1049,20 +1162,20 @@ void iavf_get_hena(struct iavf_adapter *adapter)
adapter->current_op);
return;
}
- adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS;
- adapter->aq_required &= ~IAVF_FLAG_AQ_GET_HENA;
- iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, NULL, 0);
+ adapter->current_op = VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_GET_RSS_HASHCFG;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, NULL, 0);
}
/**
- * iavf_set_hena
+ * iavf_set_rss_hashcfg
* @adapter: adapter structure
*
* Request the PF to set our RSS hash capabilities
**/
-void iavf_set_hena(struct iavf_adapter *adapter)
+void iavf_set_rss_hashcfg(struct iavf_adapter *adapter)
{
- struct virtchnl_rss_hena vrh;
+ struct virtchnl_rss_hashcfg vrh;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -1070,10 +1183,10 @@ void iavf_set_hena(struct iavf_adapter *adapter)
adapter->current_op);
return;
}
- vrh.hena = adapter->hena;
- adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA;
- adapter->aq_required &= ~IAVF_FLAG_AQ_SET_HENA;
- iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, (u8 *)&vrh,
+ vrh.hashcfg = adapter->rss_hashcfg;
+ adapter->current_op = VIRTCHNL_OP_SET_RSS_HASHCFG;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_HASHCFG;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HASHCFG, (u8 *)&vrh,
sizeof(vrh));
}
@@ -1402,6 +1515,67 @@ void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2);
}
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+/**
+ * iavf_virtchnl_send_ptp_cmd - Send one queued PTP command
+ * @adapter: adapter private structure
+ *
+ * De-queue one PTP command request and send the command message to the PF.
+ * Clear IAVF_FLAG_AQ_SEND_PTP_CMD if no more messages are left to send.
+ */
+void iavf_virtchnl_send_ptp_cmd(struct iavf_adapter *adapter)
+{
+ struct iavf_ptp_aq_cmd *cmd;
+ int err;
+
+ if (!adapter->ptp.clock) {
+ /* This shouldn't be possible to hit, since no messages should
+ * be queued if PTP is not initialized.
+ */
+ pci_err(adapter->pdev, "PTP is not initialized\n");
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
+ return;
+ }
+
+ mutex_lock(&adapter->ptp.aq_cmd_lock);
+ cmd = list_first_entry_or_null(&adapter->ptp.aq_cmds,
+ struct iavf_ptp_aq_cmd, list);
+ if (!cmd) {
+ /* no further PTP messages to send */
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
+ goto out_unlock;
+ }
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ pci_err(adapter->pdev,
+ "Cannot send PTP command %d, command %d pending\n",
+ cmd->v_opcode, adapter->current_op);
+ goto out_unlock;
+ }
+
+ err = iavf_send_pf_msg(adapter, cmd->v_opcode, cmd->msg, cmd->msglen);
+ if (!err) {
+ /* Command was sent without errors, so we can remove it from
+ * the list and discard it.
+ */
+ list_del(&cmd->list);
+ kfree(cmd);
+ } else {
+ /* We failed to send the command, try again next cycle */
+ pci_err(adapter->pdev, "Failed to send PTP command %d\n",
+ cmd->v_opcode);
+ }
+
+ if (list_empty(&adapter->ptp.aq_cmds))
+ /* no further PTP messages to send */
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
+
+out_unlock:
+ mutex_unlock(&adapter->ptp.aq_cmd_lock);
+}
+#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+
/**
* iavf_print_link_message - print link up or down
* @adapter: adapter structure
@@ -2098,6 +2272,37 @@ static void iavf_activate_fdir_filters(struct iavf_adapter *adapter)
}
/**
+ * iavf_virtchnl_ptp_get_time - Respond to VIRTCHNL_OP_1588_PTP_GET_TIME
+ * @adapter: private adapter structure
+ * @data: the message from the PF
+ * @len: length of the message from the PF
+ *
+ * Handle the VIRTCHNL_OP_1588_PTP_GET_TIME message from the PF. This message
+ * is sent by the PF in response to the same op as a request from the VF.
+ * Extract the 64bit nanoseconds time from the message and store it in
+ * cached_phc_time. Then, notify any thread that is waiting for the update via
+ * the wait queue.
+ */
+static void iavf_virtchnl_ptp_get_time(struct iavf_adapter *adapter,
+ void *data, u16 len)
+{
+ struct virtchnl_phc_time *msg = data;
+
+ if (len != sizeof(*msg)) {
+ dev_err_once(&adapter->pdev->dev,
+ "Invalid VIRTCHNL_OP_1588_PTP_GET_TIME from PF. Got size %u, expected %zu\n",
+ len, sizeof(*msg));
+ return;
+ }
+
+ adapter->ptp.cached_phc_time = msg->time;
+ adapter->ptp.cached_phc_updated = jiffies;
+ adapter->ptp.phc_time_ready = true;
+
+ wake_up(&adapter->ptp.phc_time_waitqueue);
+}
+
+/**
* iavf_virtchnl_completion
* @adapter: adapter structure
* @v_opcode: opcode sent by PF
@@ -2509,6 +2714,25 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
aq_required;
}
break;
+ case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
+ if (msglen != sizeof(u64))
+ return;
+
+ adapter->supp_rxdids = *(u64 *)msg;
+
+ break;
+ case VIRTCHNL_OP_1588_PTP_GET_CAPS:
+ if (msglen != sizeof(adapter->ptp.hw_caps))
+ return;
+
+ adapter->ptp.hw_caps = *(struct virtchnl_ptp_caps *)msg;
+
+ /* process any state change needed due to new capabilities */
+ iavf_ptp_process_caps(adapter);
+ break;
+ case VIRTCHNL_OP_1588_PTP_GET_TIME:
+ iavf_virtchnl_ptp_get_time(adapter, msg, msglen);
+ break;
case VIRTCHNL_OP_ENABLE_QUEUES:
/* enable transmits */
iavf_irq_enable(adapter, true);
@@ -2532,11 +2756,12 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
if (v_opcode != adapter->current_op)
return;
break;
- case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
- struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
+ case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS: {
+ struct virtchnl_rss_hashcfg *vrh =
+ (struct virtchnl_rss_hashcfg *)msg;
if (msglen == sizeof(*vrh))
- adapter->hena = vrh->hena;
+ adapter->rss_hashcfg = vrh->hashcfg;
else
dev_warn(&adapter->pdev->dev,
"Invalid message %d from PF\n", v_opcode);
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 3307d551f431..5b2c666496e7 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -32,7 +32,8 @@ ice-y := ice_main.o \
ice_parser_rt.o \
ice_idc.o \
devlink/devlink.o \
- devlink/devlink_port.o \
+ devlink/health.o \
+ devlink/port.o \
ice_sf_eth.o \
ice_sf_vsi_vlan_ops.o \
ice_ddp.o \
@@ -41,18 +42,19 @@ ice-y := ice_main.o \
ice_ethtool.o \
ice_repr.o \
ice_tc_lib.o \
- ice_fwlog.o \
ice_debugfs.o \
ice_adapter.o
ice-$(CONFIG_PCI_IOV) += \
ice_sriov.o \
- ice_virtchnl.o \
- ice_virtchnl_allowlist.o \
- ice_virtchnl_fdir.o \
+ virt/allowlist.o \
+ virt/fdir.o \
+ virt/queues.o \
+ virt/virtchnl.o \
+ virt/rss.o \
ice_vf_mbx.o \
ice_vf_vsi_vlan_ops.o \
ice_vf_lib.o
-ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o ice_dpll.o
+ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o ice_dpll.o ice_tspll.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index 415445cefdb2..d88b7f3fd1f9 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -6,7 +6,7 @@
#include "ice.h"
#include "ice_lib.h"
#include "devlink.h"
-#include "devlink_port.h"
+#include "port.h"
#include "ice_eswitch.h"
#include "ice_fw_update.h"
#include "ice_dcb_lib.h"
@@ -293,7 +293,7 @@ static int ice_devlink_info_get(struct devlink *devlink,
err = ice_discover_dev_caps(hw, &ctx->dev_caps);
if (err) {
dev_dbg(dev, "Failed to discover device capabilities, status %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Unable to discover device capabilities");
goto out_free_ctx;
}
@@ -302,7 +302,7 @@ static int ice_devlink_info_get(struct devlink *devlink,
err = ice_get_inactive_orom_ver(hw, &ctx->pending_orom);
if (err) {
dev_dbg(dev, "Unable to read inactive Option ROM version data, status %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
/* disable display of pending Option ROM */
ctx->dev_caps.common_cap.nvm_update_pending_orom = false;
@@ -313,7 +313,7 @@ static int ice_devlink_info_get(struct devlink *devlink,
err = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm);
if (err) {
dev_dbg(dev, "Unable to read inactive NVM version data, status %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
/* disable display of pending Option ROM */
ctx->dev_caps.common_cap.nvm_update_pending_nvm = false;
@@ -324,7 +324,7 @@ static int ice_devlink_info_get(struct devlink *devlink,
err = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist);
if (err) {
dev_dbg(dev, "Unable to read inactive Netlist version data, status %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
/* disable display of pending Option ROM */
ctx->dev_caps.common_cap.nvm_update_pending_netlist = false;
@@ -368,14 +368,18 @@ static int ice_devlink_info_get(struct devlink *devlink,
}
break;
case ICE_VERSION_RUNNING:
- err = devlink_info_version_running_put(req, key, ctx->buf);
+ err = devlink_info_version_running_put_ext(req, key,
+ ctx->buf,
+ DEVLINK_INFO_VERSION_TYPE_COMPONENT);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set running version");
goto out_free_ctx;
}
break;
case ICE_VERSION_STORED:
- err = devlink_info_version_stored_put(req, key, ctx->buf);
+ err = devlink_info_version_stored_put_ext(req, key,
+ ctx->buf,
+ DEVLINK_INFO_VERSION_TYPE_COMPONENT);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set stored version");
goto out_free_ctx;
@@ -436,7 +440,7 @@ ice_devlink_reload_empr_start(struct ice_pf *pf,
err = ice_aq_nvm_update_empr(hw);
if (err) {
dev_err(dev, "Failed to trigger EMP device reset to reload firmware, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to trigger EMP device reset to reload firmware");
return err;
}
@@ -455,6 +459,7 @@ static void ice_devlink_reinit_down(struct ice_pf *pf)
rtnl_lock();
ice_vsi_decfg(ice_get_main_vsi(pf));
rtnl_unlock();
+ ice_deinit_pf(pf);
ice_deinit_dev(pf);
}
@@ -605,11 +610,13 @@ exit_release_res:
* @devlink: pointer to the devlink instance
* @id: the parameter ID to set
* @ctx: context to store the parameter value
+ * @extack: netlink extended ACK structure
*
* Return: zero on success and negative value on failure.
*/
static int ice_devlink_tx_sched_layers_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
int err;
@@ -977,6 +984,9 @@ static int ice_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv
/* preallocate memory for ice_sched_node */
node = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
*priv = node;
return 0;
@@ -1198,6 +1208,25 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate,
return status;
}
+static void ice_set_min_max_msix(struct ice_pf *pf)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ union devlink_param_value val;
+ int err;
+
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
+ &val);
+ if (!err)
+ pf->msix.min = val.vu32;
+
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
+ &val);
+ if (!err)
+ pf->msix.max = val.vu32;
+}
+
/**
* ice_devlink_reinit_up - do reinit of the given PF
* @pf: pointer to the PF struct
@@ -1205,11 +1234,28 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate,
static int ice_devlink_reinit_up(struct ice_pf *pf)
{
struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ struct device *dev = ice_pf_to_dev(pf);
+ bool need_dev_deinit = false;
int err;
+ err = ice_init_hw(&pf->hw);
+ if (err) {
+ dev_err(dev, "ice_init_hw failed: %d\n", err);
+ return err;
+ }
+
+ /* load MSI-X values */
+ ice_set_min_max_msix(pf);
+
err = ice_init_dev(pf);
if (err)
- return err;
+ goto unroll_hw_init;
+
+ err = ice_init_pf(pf);
+ if (err) {
+ dev_err(dev, "ice_init_pf failed: %d\n", err);
+ goto unroll_dev_init;
+ }
vsi->flags = ICE_VSI_FLAG_INIT;
@@ -1217,7 +1263,7 @@ static int ice_devlink_reinit_up(struct ice_pf *pf)
err = ice_vsi_cfg(vsi);
rtnl_unlock();
if (err)
- goto err_vsi_cfg;
+ goto unroll_pf_init;
/* No need to take devl_lock, it's already taken by devlink API */
err = ice_load(pf);
@@ -1230,8 +1276,14 @@ err_load:
rtnl_lock();
ice_vsi_decfg(vsi);
rtnl_unlock();
-err_vsi_cfg:
- ice_deinit_dev(pf);
+unroll_pf_init:
+ ice_deinit_pf(pf);
+unroll_dev_init:
+ need_dev_deinit = true;
+unroll_hw_init:
+ ice_deinit_hw(&pf->hw);
+ if (need_dev_deinit)
+ ice_deinit_dev(pf);
return err;
}
@@ -1299,11 +1351,17 @@ static const struct devlink_ops ice_sf_devlink_ops;
static int
ice_devlink_enable_roce_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
+
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
- ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? true : false;
+ ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2);
return 0;
}
@@ -1313,19 +1371,24 @@ static int ice_devlink_enable_roce_set(struct devlink *devlink, u32 id,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
bool roce_ena = ctx->val.vbool;
int ret;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
+
if (!roce_ena) {
ice_unplug_aux_dev(pf);
- pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
+ cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
return 0;
}
- pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
+ cdev->rdma_protocol |= IIDC_RDMA_PROTOCOL_ROCEV2;
ret = ice_plug_aux_dev(pf);
if (ret)
- pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
+ cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
return ret;
}
@@ -1336,11 +1399,16 @@ ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
+
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
return -EOPNOTSUPP;
- if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP) {
+ if (cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_IWARP) {
NL_SET_ERR_MSG_MOD(extack, "iWARP is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously");
return -EOPNOTSUPP;
}
@@ -1350,11 +1418,17 @@ ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
static int
ice_devlink_enable_iw_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
- ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
+
+ ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_IWARP);
return 0;
}
@@ -1364,19 +1438,24 @@ static int ice_devlink_enable_iw_set(struct devlink *devlink, u32 id,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
bool iw_ena = ctx->val.vbool;
int ret;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
+
if (!iw_ena) {
ice_unplug_aux_dev(pf);
- pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP;
+ cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_IWARP;
return 0;
}
- pf->rdma_mode |= IIDC_RDMA_PROTOCOL_IWARP;
+ cdev->rdma_protocol |= IIDC_RDMA_PROTOCOL_IWARP;
ret = ice_plug_aux_dev(pf);
if (ret)
- pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP;
+ cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_IWARP;
return ret;
}
@@ -1391,7 +1470,7 @@ ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id,
if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
return -EOPNOTSUPP;
- if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2) {
+ if (pf->cdev_info->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2) {
NL_SET_ERR_MSG_MOD(extack, "RoCEv2 is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously");
return -EOPNOTSUPP;
}
@@ -1447,11 +1526,13 @@ static int ice_devlink_local_fwd_str_to_mode(const char *mode_str)
* @devlink: Pointer to the devlink instance.
* @id: The parameter ID to set.
* @ctx: Context to store the parameter value.
+ * @extack: netlink extended ACK structure
*
* Return: Zero.
*/
static int ice_devlink_local_fwd_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
struct ice_port_info *pi;
@@ -1518,6 +1599,43 @@ static int ice_devlink_local_fwd_validate(struct devlink *devlink, u32 id,
return 0;
}
+static int
+ice_devlink_msix_max_pf_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ if (val.vu32 > pf->hw.func_caps.common_cap.num_msix_vectors)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+ice_devlink_msix_min_pf_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if (val.vu32 < ICE_MIN_MSIX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ice_devlink_enable_rdma_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ bool new_state = val.vbool;
+
+ if (new_state && !test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
enum ice_param_id {
ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
@@ -1533,6 +1651,17 @@ static const struct devlink_param ice_dvl_rdma_params[] = {
ice_devlink_enable_iw_get,
ice_devlink_enable_iw_set,
ice_devlink_enable_iw_validate),
+ DEVLINK_PARAM_GENERIC(ENABLE_RDMA, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, ice_devlink_enable_rdma_validate),
+};
+
+static const struct devlink_param ice_dvl_msix_params[] = {
+ DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MAX,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, ice_devlink_msix_max_pf_validate),
+ DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MIN,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, ice_devlink_msix_min_pf_validate),
};
static const struct devlink_param ice_dvl_sched_params[] = {
@@ -1636,6 +1765,7 @@ void ice_devlink_unregister(struct ice_pf *pf)
int ice_devlink_register_params(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
+ union devlink_param_value value;
struct ice_hw *hw = &pf->hw;
int status;
@@ -1644,10 +1774,39 @@ int ice_devlink_register_params(struct ice_pf *pf)
if (status)
return status;
+ status = devl_params_register(devlink, ice_dvl_msix_params,
+ ARRAY_SIZE(ice_dvl_msix_params));
+ if (status)
+ goto unregister_rdma_params;
+
if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
status = devl_params_register(devlink, ice_dvl_sched_params,
ARRAY_SIZE(ice_dvl_sched_params));
+ if (status)
+ goto unregister_msix_params;
+
+ value.vu32 = pf->msix.max;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
+ value);
+ value.vu32 = pf->msix.min;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
+ value);
+
+ value.vbool = test_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
+ value);
+ return 0;
+
+unregister_msix_params:
+ devl_params_unregister(devlink, ice_dvl_msix_params,
+ ARRAY_SIZE(ice_dvl_msix_params));
+unregister_rdma_params:
+ devl_params_unregister(devlink, ice_dvl_rdma_params,
+ ARRAY_SIZE(ice_dvl_rdma_params));
return status;
}
@@ -1658,6 +1817,8 @@ void ice_devlink_unregister_params(struct ice_pf *pf)
devl_params_unregister(devlink, ice_dvl_rdma_params,
ARRAY_SIZE(ice_dvl_rdma_params));
+ devl_params_unregister(devlink, ice_dvl_msix_params,
+ ARRAY_SIZE(ice_dvl_msix_params));
if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
devl_params_unregister(devlink, ice_dvl_sched_params,
diff --git a/drivers/net/ethernet/intel/ice/devlink/health.c b/drivers/net/ethernet/intel/ice/devlink/health.c
new file mode 100644
index 000000000000..8e9a8a8178d4
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/devlink/health.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_adminq_cmd.h" /* for enum ice_aqc_health_status_elem */
+#include "health.h"
+
+#define ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, obj, name) \
+ devlink_fmsg_put(fmsg, #name, (obj)->name)
+
+#define ICE_HEALTH_STATUS_DATA_SIZE 2
+
+struct ice_health_status {
+ enum ice_aqc_health_status code;
+ const char *description;
+ const char *solution;
+ const char *data_label[ICE_HEALTH_STATUS_DATA_SIZE];
+};
+
+/*
+ * In addition to the health status codes provided below, the firmware might
+ * generate Health Status Codes that are not pertinent to the end-user.
+ * For instance, Health Code 0x1002 is triggered when the command fails.
+ * Such codes should be disregarded by the end-user.
+ * The below lookup requires to be sorted by code.
+ */
+
+static const char ice_common_port_solutions[] =
+ "Check your cable connection. Change or replace the module or cable. Manually set speed and duplex.";
+static const char ice_port_number_label[] = "Port Number";
+static const char ice_update_nvm_solution[] = "Update to the latest NVM image.";
+
+static const struct ice_health_status ice_health_status_lookup[] = {
+ {ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT, "An unsupported module was detected.",
+ ice_common_port_solutions, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_TYPE, "Module type is not supported.",
+ "Change or replace the module or cable.", {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_QUAL, "Module is not qualified.",
+ ice_common_port_solutions, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_COMM,
+ "Device cannot communicate with the module.",
+ "Check your cable connection. Change or replace the module or cable. Manually set speed and duplex.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_CONFLICT, "Unresolved module conflict.",
+ "Manually set speed/duplex or change the port option. If the problem persists, use a cable/module that is found in the supported modules and cables list for this device.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT, "Module is not present.",
+ "Check that the module is inserted correctly. If the problem persists, use a cable/module that is found in the supported modules and cables list for this device.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED, "Underutilized module.",
+ "Change or replace the module or cable. Change the port option.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT, "An unsupported module was detected.",
+ ice_common_port_solutions, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG, "Invalid link configuration.",
+ NULL, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS, "Port hardware access error.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE, "A port is unreachable.",
+ "Change the port option. Update to the latest NVM image."},
+ {ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED, "Port speed is limited due to module.",
+ "Change the module or configure the port option to match the current module speed. Change the port option.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT,
+ "All configured link modes were attempted but failed to establish link. The device will restart the process to establish link.",
+ "Check link partner connection and configuration.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED,
+ "Port speed is limited by PHY capabilities.",
+ "Change the module to align to port option.", {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_NETLIST_TOPO, "LOM topology netlist is corrupted.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_NETLIST, "Unrecoverable netlist error.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_TOPO_CONFLICT, "Port topology conflict.",
+ "Change the port option. Update to the latest NVM image."},
+ {ICE_AQC_HEALTH_STATUS_ERR_LINK_HW_ACCESS, "Unrecoverable hardware access error.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_LINK_RUNTIME, "Unrecoverable runtime error.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_DNL_INIT, "Link management engine failed to initialize.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_PHY_FW_LOAD,
+ "Failed to load the firmware image in the external PHY.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_INFO_RECOVERY, "The device is in firmware recovery mode.",
+ ice_update_nvm_solution, {"Extended Error"}},
+ {ICE_AQC_HEALTH_STATUS_ERR_FLASH_ACCESS, "The flash chip cannot be accessed.",
+ "If issue persists, call customer support.", {"Access Type"}},
+ {ICE_AQC_HEALTH_STATUS_ERR_NVM_AUTH, "NVM authentication failed.",
+ ice_update_nvm_solution},
+ {ICE_AQC_HEALTH_STATUS_ERR_OROM_AUTH, "Option ROM authentication failed.",
+ ice_update_nvm_solution},
+ {ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH, "DDP package authentication failed.",
+ "Update to latest base driver and DDP package."},
+ {ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT, "NVM image is incompatible.",
+ ice_update_nvm_solution},
+ {ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT, "Option ROM is incompatible.",
+ ice_update_nvm_solution, {"Expected PCI Device ID", "Expected Module ID"}},
+ {ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB,
+ "Supplied MIB file is invalid. DCB reverted to default configuration.",
+ "Disable FW-LLDP and check DCBx system configuration.",
+ {ice_port_number_label, "MIB ID"}},
+};
+
+static int ice_health_status_lookup_compare(const void *a, const void *b)
+{
+ return ((struct ice_health_status *)a)->code - ((struct ice_health_status *)b)->code;
+}
+
+static const struct ice_health_status *ice_get_health_status(u16 code)
+{
+ struct ice_health_status key = { .code = code };
+
+ return bsearch(&key, ice_health_status_lookup, ARRAY_SIZE(ice_health_status_lookup),
+ sizeof(struct ice_health_status), ice_health_status_lookup_compare);
+}
+
+static void ice_describe_status_code(struct devlink_fmsg *fmsg,
+ struct ice_aqc_health_status_elem *hse)
+{
+ static const char *const aux_label[] = { "Aux Data 1", "Aux Data 2" };
+ const struct ice_health_status *health_code;
+ u32 internal_data[2];
+ u16 status_code;
+
+ status_code = le16_to_cpu(hse->health_status_code);
+
+ devlink_fmsg_put(fmsg, "Syndrome", status_code);
+ if (status_code) {
+ internal_data[0] = le32_to_cpu(hse->internal_data1);
+ internal_data[1] = le32_to_cpu(hse->internal_data2);
+
+ health_code = ice_get_health_status(status_code);
+ if (!health_code)
+ return;
+
+ devlink_fmsg_string_pair_put(fmsg, "Description", health_code->description);
+ if (health_code->solution)
+ devlink_fmsg_string_pair_put(fmsg, "Possible Solution",
+ health_code->solution);
+
+ for (size_t i = 0; i < ICE_HEALTH_STATUS_DATA_SIZE; i++) {
+ if (internal_data[i] != ICE_AQC_HEALTH_STATUS_UNDEFINED_DATA)
+ devlink_fmsg_u32_pair_put(fmsg,
+ health_code->data_label[i] ?
+ health_code->data_label[i] :
+ aux_label[i],
+ internal_data[i]);
+ }
+ }
+}
+
+static int
+ice_port_reporter_diagnose(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_health_reporter_priv(reporter);
+
+ ice_describe_status_code(fmsg, &pf->health_reporters.port_status);
+ return 0;
+}
+
+static int
+ice_port_reporter_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg,
+ void *priv_ctx, struct netlink_ext_ack __always_unused *extack)
+{
+ struct ice_pf *pf = devlink_health_reporter_priv(reporter);
+
+ ice_describe_status_code(fmsg, &pf->health_reporters.port_status);
+ return 0;
+}
+
+static int
+ice_fw_reporter_diagnose(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_health_reporter_priv(reporter);
+
+ ice_describe_status_code(fmsg, &pf->health_reporters.fw_status);
+ return 0;
+}
+
+static int
+ice_fw_reporter_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg,
+ void *priv_ctx, struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_health_reporter_priv(reporter);
+
+ ice_describe_status_code(fmsg, &pf->health_reporters.fw_status);
+ return 0;
+}
+
+static void ice_config_health_events(struct ice_pf *pf, bool enable)
+{
+ u8 enable_bits = 0;
+ int ret;
+
+ if (enable)
+ enable_bits = ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK |
+ ICE_AQC_HEALTH_STATUS_SET_GLOBAL_MASK;
+
+ ret = ice_aq_set_health_status_cfg(&pf->hw, enable_bits);
+ if (ret)
+ dev_err(ice_pf_to_dev(pf), "Failed to %s firmware health events, err %d aq_err %s\n",
+ str_enable_disable(enable), ret,
+ libie_aq_str(pf->hw.adminq.sq_last_status));
+}
+
+/**
+ * ice_process_health_status_event - Process the health status event from FW
+ * @pf: pointer to the PF structure
+ * @event: event structure containing the Health Status Event opcode
+ *
+ * Decode the Health Status Events and print the associated messages
+ */
+void ice_process_health_status_event(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+ const struct ice_aqc_health_status_elem *health_info;
+ const struct ice_aqc_get_health_status *cmd;
+ u16 count;
+
+ health_info = (struct ice_aqc_health_status_elem *)event->msg_buf;
+ cmd = libie_aq_raw(&event->desc);
+ count = le16_to_cpu(cmd->health_status_count);
+
+ if (count > (event->buf_len / sizeof(*health_info))) {
+ dev_err(ice_pf_to_dev(pf), "Received a health status event with invalid element count\n");
+ return;
+ }
+
+ for (size_t i = 0; i < count; i++) {
+ const struct ice_health_status *health_code;
+ u16 status_code;
+
+ status_code = le16_to_cpu(health_info->health_status_code);
+ health_code = ice_get_health_status(status_code);
+
+ if (health_code) {
+ switch (le16_to_cpu(health_info->event_source)) {
+ case ICE_AQC_HEALTH_STATUS_GLOBAL:
+ pf->health_reporters.fw_status = *health_info;
+ devlink_health_report(pf->health_reporters.fw,
+ "FW syndrome reported", NULL);
+ break;
+ case ICE_AQC_HEALTH_STATUS_PF:
+ case ICE_AQC_HEALTH_STATUS_PORT:
+ pf->health_reporters.port_status = *health_info;
+ devlink_health_report(pf->health_reporters.port,
+ "Port syndrome reported", NULL);
+ break;
+ default:
+ dev_err(ice_pf_to_dev(pf), "Health code with unknown source\n");
+ }
+ } else {
+ u32 data1, data2;
+ u16 source;
+
+ source = le16_to_cpu(health_info->event_source);
+ data1 = le32_to_cpu(health_info->internal_data1);
+ data2 = le32_to_cpu(health_info->internal_data2);
+ dev_dbg(ice_pf_to_dev(pf),
+ "Received internal health status code 0x%08x, source: 0x%08x, data1: 0x%08x, data2: 0x%08x",
+ status_code, source, data1, data2);
+ }
+ health_info++;
+ }
+}
+
+/**
+ * ice_devlink_health_report - boilerplate to call given @reporter
+ *
+ * @reporter: devlink health reporter to call, do nothing on NULL
+ * @msg: message to pass up, "event name" is fine
+ * @priv_ctx: typically some event struct
+ */
+static void ice_devlink_health_report(struct devlink_health_reporter *reporter,
+ const char *msg, void *priv_ctx)
+{
+ if (!reporter)
+ return;
+
+ /* We do not do auto recovering, so return value of the below function
+ * will always be 0, thus we do ignore it.
+ */
+ devlink_health_report(reporter, msg, priv_ctx);
+}
+
+struct ice_mdd_event {
+ enum ice_mdd_src src;
+ u16 vf_num;
+ u16 queue;
+ u8 pf_num;
+ u8 event;
+};
+
+static const char *ice_mdd_src_to_str(enum ice_mdd_src src)
+{
+ switch (src) {
+ case ICE_MDD_SRC_TX_PQM:
+ return "tx_pqm";
+ case ICE_MDD_SRC_TX_TCLAN:
+ return "tx_tclan";
+ case ICE_MDD_SRC_TX_TDPU:
+ return "tx_tdpu";
+ case ICE_MDD_SRC_RX:
+ return "rx";
+ default:
+ return "invalid";
+ }
+}
+
+static int
+ice_mdd_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_mdd_event *mdd_event = priv_ctx;
+ const char *src;
+
+ if (!mdd_event)
+ return 0;
+
+ src = ice_mdd_src_to_str(mdd_event->src);
+
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_put(fmsg, "src", src);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, pf_num);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, vf_num);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, event);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, queue);
+ devlink_fmsg_obj_nest_end(fmsg);
+
+ return 0;
+}
+
+/**
+ * ice_report_mdd_event - Report an MDD event through devlink health
+ * @pf: the PF device structure
+ * @src: the HW block that was the source of this MDD event
+ * @pf_num: the pf_num on which the MDD event occurred
+ * @vf_num: the vf_num on which the MDD event occurred
+ * @event: the event type of the MDD event
+ * @queue: the queue on which the MDD event occurred
+ *
+ * Report an MDD event that has occurred on this PF.
+ */
+void ice_report_mdd_event(struct ice_pf *pf, enum ice_mdd_src src, u8 pf_num,
+ u16 vf_num, u8 event, u16 queue)
+{
+ struct ice_mdd_event ev = {
+ .src = src,
+ .pf_num = pf_num,
+ .vf_num = vf_num,
+ .event = event,
+ .queue = queue,
+ };
+
+ ice_devlink_health_report(pf->health_reporters.mdd, "MDD event", &ev);
+}
+
+/**
+ * ice_fmsg_put_ptr - put hex value of pointer into fmsg
+ *
+ * @fmsg: devlink fmsg under construction
+ * @name: name to pass
+ * @ptr: 64 bit value to print as hex and put into fmsg
+ */
+static void ice_fmsg_put_ptr(struct devlink_fmsg *fmsg, const char *name,
+ void *ptr)
+{
+ char buf[sizeof(ptr) * 3];
+
+ sprintf(buf, "%p", ptr);
+ devlink_fmsg_put(fmsg, name, buf);
+}
+
+struct ice_tx_hang_event {
+ u32 head;
+ u32 intr;
+ u16 vsi_num;
+ u16 queue;
+ u16 next_to_clean;
+ u16 next_to_use;
+ struct ice_tx_ring *tx_ring;
+};
+
+static int ice_tx_hang_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_tx_hang_event *event = priv_ctx;
+ struct sk_buff *skb;
+
+ if (!event)
+ return 0;
+
+ skb = event->tx_ring->tx_buf->skb;
+ devlink_fmsg_obj_nest_start(fmsg);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, head);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, intr);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, vsi_num);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, queue);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, next_to_clean);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, next_to_use);
+ devlink_fmsg_put(fmsg, "irq-mapping", event->tx_ring->q_vector->name);
+ ice_fmsg_put_ptr(fmsg, "desc-ptr", event->tx_ring->desc);
+ ice_fmsg_put_ptr(fmsg, "dma-ptr", (void *)(long)event->tx_ring->dma);
+ ice_fmsg_put_ptr(fmsg, "skb-ptr", skb);
+ devlink_fmsg_binary_pair_put(fmsg, "desc", event->tx_ring->desc,
+ event->tx_ring->count * sizeof(struct ice_tx_desc));
+ devlink_fmsg_dump_skb(fmsg, skb);
+ devlink_fmsg_obj_nest_end(fmsg);
+
+ return 0;
+}
+
+void ice_prep_tx_hang_report(struct ice_pf *pf, struct ice_tx_ring *tx_ring,
+ u16 vsi_num, u32 head, u32 intr)
+{
+ struct ice_health_tx_hang_buf *buf = &pf->health_reporters.tx_hang_buf;
+
+ buf->tx_ring = tx_ring;
+ buf->vsi_num = vsi_num;
+ buf->head = head;
+ buf->intr = intr;
+}
+
+void ice_report_tx_hang(struct ice_pf *pf)
+{
+ struct ice_health_tx_hang_buf *buf = &pf->health_reporters.tx_hang_buf;
+ struct ice_tx_ring *tx_ring = buf->tx_ring;
+
+ struct ice_tx_hang_event ev = {
+ .head = buf->head,
+ .intr = buf->intr,
+ .vsi_num = buf->vsi_num,
+ .queue = tx_ring->q_index,
+ .next_to_clean = tx_ring->next_to_clean,
+ .next_to_use = tx_ring->next_to_use,
+ .tx_ring = tx_ring,
+ };
+
+ ice_devlink_health_report(pf->health_reporters.tx_hang, "Tx hang", &ev);
+}
+
+static struct devlink_health_reporter *
+ice_init_devlink_rep(struct ice_pf *pf,
+ const struct devlink_health_reporter_ops *ops)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ struct devlink_health_reporter *rep;
+
+ rep = devl_health_reporter_create(devlink, ops, pf);
+ if (IS_ERR(rep)) {
+ struct device *dev = ice_pf_to_dev(pf);
+
+ dev_err(dev, "failed to create devlink %s health report er",
+ ops->name);
+ return NULL;
+ }
+ return rep;
+}
+
+#define ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field) \
+ ._field = ice_##_name##_reporter_##_field,
+
+#define ICE_DEFINE_HEALTH_REPORTER_OPS_1(_name, _field1) \
+ static const struct devlink_health_reporter_ops ice_##_name##_reporter_ops = { \
+ .name = #_name, \
+ ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field1) \
+ }
+
+#define ICE_DEFINE_HEALTH_REPORTER_OPS_2(_name, _field1, _field2) \
+ static const struct devlink_health_reporter_ops ice_##_name##_reporter_ops = { \
+ .name = #_name, \
+ ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field1) \
+ ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field2) \
+ }
+
+ICE_DEFINE_HEALTH_REPORTER_OPS_1(mdd, dump);
+ICE_DEFINE_HEALTH_REPORTER_OPS_1(tx_hang, dump);
+ICE_DEFINE_HEALTH_REPORTER_OPS_2(fw, dump, diagnose);
+ICE_DEFINE_HEALTH_REPORTER_OPS_2(port, dump, diagnose);
+
+/**
+ * ice_health_init - allocate and init all ice devlink health reporters and
+ * accompanied data
+ *
+ * @pf: PF struct
+ */
+void ice_health_init(struct ice_pf *pf)
+{
+ struct ice_health *reps = &pf->health_reporters;
+
+ reps->mdd = ice_init_devlink_rep(pf, &ice_mdd_reporter_ops);
+ reps->tx_hang = ice_init_devlink_rep(pf, &ice_tx_hang_reporter_ops);
+
+ if (ice_is_fw_health_report_supported(&pf->hw)) {
+ reps->fw = ice_init_devlink_rep(pf, &ice_fw_reporter_ops);
+ reps->port = ice_init_devlink_rep(pf, &ice_port_reporter_ops);
+ ice_config_health_events(pf, true);
+ }
+}
+
+/**
+ * ice_deinit_devl_reporter - destroy given devlink health reporter
+ * @reporter: reporter to destroy
+ */
+static void ice_deinit_devl_reporter(struct devlink_health_reporter *reporter)
+{
+ if (reporter)
+ devl_health_reporter_destroy(reporter);
+}
+
+/**
+ * ice_health_deinit - deallocate all ice devlink health reporters and
+ * accompanied data
+ *
+ * @pf: PF struct
+ */
+void ice_health_deinit(struct ice_pf *pf)
+{
+ ice_deinit_devl_reporter(pf->health_reporters.mdd);
+ ice_deinit_devl_reporter(pf->health_reporters.tx_hang);
+ if (ice_is_fw_health_report_supported(&pf->hw)) {
+ ice_deinit_devl_reporter(pf->health_reporters.fw);
+ ice_deinit_devl_reporter(pf->health_reporters.port);
+ ice_config_health_events(pf, false);
+ }
+}
+
+static
+void ice_health_assign_healthy_state(struct devlink_health_reporter *reporter)
+{
+ if (reporter)
+ devlink_health_reporter_state_update(reporter,
+ DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
+}
+
+/**
+ * ice_health_clear - clear devlink health issues after a reset
+ * @pf: the PF device structure
+ *
+ * Mark the PF in healthy state again after a reset has completed.
+ */
+void ice_health_clear(struct ice_pf *pf)
+{
+ ice_health_assign_healthy_state(pf->health_reporters.mdd);
+ ice_health_assign_healthy_state(pf->health_reporters.tx_hang);
+}
diff --git a/drivers/net/ethernet/intel/ice/devlink/health.h b/drivers/net/ethernet/intel/ice/devlink/health.h
new file mode 100644
index 000000000000..5edfc4d2adce
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/devlink/health.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024, Intel Corporation. */
+
+#ifndef _HEALTH_H_
+#define _HEALTH_H_
+
+#include <linux/types.h>
+
+/**
+ * DOC: health.h
+ *
+ * This header file stores everything that is needed for broadly understood
+ * devlink health mechanism for ice driver.
+ */
+
+struct ice_aqc_health_status_elem;
+struct ice_pf;
+struct ice_tx_ring;
+struct ice_rq_event_info;
+
+enum ice_mdd_src {
+ ICE_MDD_SRC_TX_PQM,
+ ICE_MDD_SRC_TX_TCLAN,
+ ICE_MDD_SRC_TX_TDPU,
+ ICE_MDD_SRC_RX,
+};
+
+/**
+ * struct ice_health - stores ice devlink health reporters and accompanied data
+ * @fw: devlink health reporter for FW Health Status events
+ * @mdd: devlink health reporter for MDD detection event
+ * @port: devlink health reporter for Port Health Status events
+ * @tx_hang: devlink health reporter for tx_hang event
+ * @tx_hang_buf: pre-allocated place to put info for Tx hang reporter from
+ * non-sleeping context
+ * @tx_ring: ring that the hang occurred on
+ * @head: descriptor head
+ * @intr: interrupt register value
+ * @vsi_num: VSI owning the queue that the hang occurred on
+ * @fw_status: buffer for last received FW Status event
+ * @port_status: buffer for last received Port Status event
+ */
+struct ice_health {
+ struct devlink_health_reporter *fw;
+ struct devlink_health_reporter *mdd;
+ struct devlink_health_reporter *port;
+ struct devlink_health_reporter *tx_hang;
+ struct_group_tagged(ice_health_tx_hang_buf, tx_hang_buf,
+ struct ice_tx_ring *tx_ring;
+ u32 head;
+ u32 intr;
+ u16 vsi_num;
+ );
+ struct ice_aqc_health_status_elem fw_status;
+ struct ice_aqc_health_status_elem port_status;
+};
+
+void ice_process_health_status_event(struct ice_pf *pf,
+ struct ice_rq_event_info *event);
+
+void ice_health_init(struct ice_pf *pf);
+void ice_health_deinit(struct ice_pf *pf);
+void ice_health_clear(struct ice_pf *pf);
+
+void ice_prep_tx_hang_report(struct ice_pf *pf, struct ice_tx_ring *tx_ring,
+ u16 vsi_num, u32 head, u32 intr);
+void ice_report_mdd_event(struct ice_pf *pf, enum ice_mdd_src src, u8 pf_num,
+ u16 vf_num, u8 event, u16 queue);
+void ice_report_tx_hang(struct ice_pf *pf);
+
+#endif /* _HEALTH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c b/drivers/net/ethernet/intel/ice/devlink/port.c
index c6779d9dffff..63fb36fc4b3d 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
+++ b/drivers/net/ethernet/intel/ice/devlink/port.c
@@ -5,7 +5,7 @@
#include "ice.h"
#include "devlink.h"
-#include "devlink_port.h"
+#include "port.h"
#include "ice_lib.h"
#include "ice_fltr.h"
@@ -30,6 +30,8 @@ static const char *ice_devlink_port_opt_speed_str(u8 speed)
return "10";
case ICE_AQC_PORT_OPT_MAX_LANE_25G:
return "25";
+ case ICE_AQC_PORT_OPT_MAX_LANE_40G:
+ return "40";
case ICE_AQC_PORT_OPT_MAX_LANE_50G:
return "50";
case ICE_AQC_PORT_OPT_MAX_LANE_100G:
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h b/drivers/net/ethernet/intel/ice/devlink/port.h
index d60efc340945..e89ddd60eeac 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h
+++ b/drivers/net/ethernet/intel/ice/devlink/port.h
@@ -11,7 +11,7 @@
* struct ice_dynamic_port - Track dynamically added devlink port instance
* @hw_addr: the HW address for this port
* @active: true if the port has been activated
- * @attached: true it the prot is attached
+ * @attached: true if the prot is attached
* @devlink_port: the associated devlink port structure
* @pf: pointer to the PF private structure
* @vsi: the VSI associated with this port
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 680a81961ba1..147aaee192a7 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -67,6 +67,7 @@
#include "ice_sriov.h"
#include "ice_vf_mbx.h"
#include "ice_ptp.h"
+#include "ice_tspll.h"
#include "ice_fdir.h"
#include "ice_xsk.h"
#include "ice_arfs.h"
@@ -78,11 +79,16 @@
#include "ice_irq.h"
#include "ice_dpll.h"
#include "ice_adapter.h"
+#include "devlink/health.h"
#define ICE_BAR0 0
#define ICE_REQ_DESC_MULTIPLE 32
#define ICE_MIN_NUM_DESC 64
-#define ICE_MAX_NUM_DESC 8160
+#define ICE_MAX_NUM_DESC_E810 8160
+#define ICE_MAX_NUM_DESC_E830 8096
+#define ICE_MAX_NUM_DESC_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \
+ ICE_MAX_NUM_DESC_E830 : \
+ ICE_MAX_NUM_DESC_E810)
#define ICE_DFLT_MIN_RX_DESC 512
#define ICE_DFLT_NUM_TX_DESC 256
#define ICE_DFLT_NUM_RX_DESC 2048
@@ -96,9 +102,6 @@
#define ICE_MIN_LAN_OICR_MSIX 1
#define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
#define ICE_FDIR_MSIX 2
-#define ICE_RDMA_NUM_AEQ_MSIX 4
-#define ICE_MIN_RDMA_MSIX 2
-#define ICE_ESWITCH_MSIX 1
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
#define ICE_VSI_MAP_SCATTER 1
@@ -181,11 +184,9 @@
#define ice_for_each_chnl_tc(i) \
for ((i) = ICE_CHNL_START_TC; (i) < ICE_CHNL_MAX_TC; (i)++)
-#define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_UCAST_RX)
+#define ICE_UCAST_PROMISC_BITS ICE_PROMISC_UCAST_RX
-#define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_TX | \
- ICE_PROMISC_UCAST_RX | \
- ICE_PROMISC_VLAN_TX | \
+#define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_RX | \
ICE_PROMISC_VLAN_RX)
#define ICE_MCAST_PROMISC_BITS (ICE_PROMISC_MCAST_TX | ICE_PROMISC_MCAST_RX)
@@ -197,16 +198,17 @@
#define ice_pf_to_dev(pf) (&((pf)->pdev->dev))
-#define ice_pf_src_tmr_owned(pf) ((pf)->hw.func_caps.ts_func_info.src_tmr_owned)
-
enum ice_feature {
ICE_F_DSCP,
ICE_F_PHY_RCLK,
ICE_F_SMA_CTRL,
ICE_F_CGU,
ICE_F_GNSS,
+ ICE_F_TXTIME,
+ ICE_F_GCS,
ICE_F_ROCE_LAG,
ICE_F_SRIOV_LAG,
+ ICE_F_SRIOV_AA_LAG,
ICE_F_MBX_LIMIT,
ICE_F_MAX
};
@@ -349,6 +351,7 @@ struct ice_vsi {
u16 num_q_vectors;
/* tell if only dynamic irq allocation is allowed */
bool irq_dyn_alloc;
+ bool hsplit:1;
u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */
@@ -372,6 +375,8 @@ struct ice_vsi {
spinlock_t arfs_lock; /* protects aRFS hash table and filter state */
atomic_t *arfs_last_fltr_id;
+ u16 max_frame;
+
struct ice_aqc_vsi_props info; /* VSI properties */
struct ice_vsi_vlan_info vlan_info; /* vlan config to be restored */
@@ -404,7 +409,6 @@ struct ice_vsi {
u16 req_rxq; /* User requested Rx queues */
u16 num_rx_desc;
u16 num_tx_desc;
- u16 qset_handle[ICE_MAX_TRAFFIC_CLASS];
struct ice_tc_cfg tc_cfg;
struct bpf_prog *xdp_prog;
struct ice_tx_ring **xdp_rings; /* XDP ring array */
@@ -479,9 +483,6 @@ struct ice_q_vector {
struct ice_ring_container rx;
struct ice_ring_container tx;
- cpumask_t affinity_mask;
- struct irq_affinity_notify affinity_notify;
-
struct ice_channel *ch;
char name[ICE_INT_NAME_STR_LEN];
@@ -511,16 +512,17 @@ enum ice_pf_flags {
ICE_FLAG_MOD_POWER_UNSUPPORTED,
ICE_FLAG_PHY_FW_LOAD_FAILED,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
- ICE_FLAG_LEGACY_RX,
ICE_FLAG_VF_TRUE_PROMISC_ENA,
ICE_FLAG_MDD_AUTO_RESET_VF,
ICE_FLAG_VF_VLAN_PRUNING,
ICE_FLAG_LINK_LENIENT_MODE_ENA,
ICE_FLAG_PLUG_AUX_DEV,
ICE_FLAG_UNPLUG_AUX_DEV,
+ ICE_FLAG_AUX_DEV_CREATED,
ICE_FLAG_MTU_CHANGED,
ICE_FLAG_GNSS, /* GNSS successfully initialized */
ICE_FLAG_DPLL, /* SyncE/PTP dplls initialized */
+ ICE_FLAG_LLDP_AQ_FLTR,
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -543,6 +545,14 @@ struct ice_agg_node {
u8 valid;
};
+struct ice_pf_msix {
+ u32 cur;
+ u32 min;
+ u32 max;
+ u32 total;
+ u32 rest;
+};
+
struct ice_pf {
struct pci_dev *pdev;
struct ice_adapter *adapter;
@@ -555,15 +565,8 @@ struct ice_pf {
struct devlink_port devlink_port;
/* OS reserved IRQ details */
- struct msix_entry *msix_entries;
struct ice_irq_tracker irq_tracker;
- /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the
- * number of MSIX vectors needed for all SR-IOV VFs from the number of
- * MSIX vectors allowed on this PF.
- */
- u16 sriov_base_vector;
- unsigned long *sriov_irq_bm; /* bitmap to track irq usage */
- u16 sriov_irq_size; /* size of the irq_bm bitmap */
+ struct ice_virt_irq_tracker virt_irq_tracker;
u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */
@@ -572,9 +575,6 @@ struct ice_pf {
struct ice_sw *first_sw; /* first switch created by firmware */
u16 eswitch_mode; /* current mode of eswitch */
struct dentry *ice_debugfs_pf;
- struct dentry *ice_debugfs_pf_fwlog;
- /* keep track of all the dentrys for FW log modules */
- struct dentry **ice_debugfs_pf_fwlog_modules;
struct ice_vfs vfs;
DECLARE_BITMAP(features, ICE_F_MAX);
DECLARE_BITMAP(state, ICE_STATE_NBITS);
@@ -582,6 +582,7 @@ struct ice_pf {
DECLARE_BITMAP(misc_thread, ICE_MISC_THREAD_NBITS);
unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */
unsigned long *avail_rxqs; /* bitmap to track PF Rx queue usage */
+ unsigned long *txtime_txqs; /* bitmap to track PF Tx Time queue */
unsigned long serv_tmr_period;
unsigned long serv_tmr_prev;
struct timer_list serv_tmr;
@@ -596,7 +597,6 @@ struct ice_pf {
struct gnss_serial *gnss_serial;
struct gnss_device *gnss_dev;
u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */
- u16 rdma_base_vector;
/* spinlock to protect the AdminQ wait list */
spinlock_t aq_wait_lock;
@@ -613,7 +613,7 @@ struct ice_pf {
struct msi_map ll_ts_irq; /* LL_TS interrupt MSIX vector */
u16 max_pf_txqs; /* Total Tx queues PF wide */
u16 max_pf_rxqs; /* Total Rx queues PF wide */
- u16 num_lan_msix; /* Total MSIX vectors for base driver */
+ struct ice_pf_msix msix;
u16 num_lan_tx; /* num LAN Tx queues setup */
u16 num_lan_rx; /* num LAN Rx queues setup */
u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
@@ -622,6 +622,7 @@ struct ice_pf {
u16 globr_count; /* Global reset count */
u16 empr_count; /* EMP reset count */
u16 pfr_count; /* PF reset count */
+ u32 link_down_events;
u8 wol_ena : 1; /* software state of WoL */
u32 wakeup_reason; /* last wakeup reason */
@@ -629,14 +630,12 @@ struct ice_pf {
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
u8 stat_prev_loaded:1; /* has previous stats been loaded */
- u8 rdma_mode;
u16 dcbx_cap;
u32 tx_timeout_count;
unsigned long tx_timeout_last_recovery;
u32 tx_timeout_recovery_level;
char int_name[ICE_INT_NAME_STR_LEN];
char int_name_ll_ts[ICE_INT_NAME_STR_LEN];
- struct auxiliary_device *adev;
int aux_idx;
u32 sw_int_count;
/* count of tc_flower filters specific to channel (aka where filter
@@ -667,6 +666,8 @@ struct ice_pf {
struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES];
struct ice_dplls dplls;
struct device *hwmon_dev;
+ struct ice_health health_reporters;
+ struct iidc_rdma_core_dev_info *cdev_info;
u8 num_quanta_prof_used;
};
@@ -756,6 +757,31 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
}
/**
+ * ice_is_txtime_ena - check if Tx Time is enabled on the Tx ring
+ * @ring: pointer to Tx ring
+ *
+ * Return: true if the Tx ring has Tx Time enabled, false otherwise.
+ */
+static inline bool ice_is_txtime_ena(const struct ice_tx_ring *ring)
+{
+ struct ice_vsi *vsi = ring->vsi;
+ struct ice_pf *pf = vsi->back;
+
+ return test_bit(ring->q_index, pf->txtime_txqs);
+}
+
+/**
+ * ice_is_txtime_cfg - check if Tx Time is configured on the Tx ring
+ * @ring: pointer to Tx ring
+ *
+ * Return: true if the Tx ring is configured for Tx ring, false otherwise.
+ */
+static inline bool ice_is_txtime_cfg(const struct ice_tx_ring *ring)
+{
+ return !!(ring->flags & ICE_TX_FLAGS_TXTIME);
+}
+
+/**
* ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID
* @vsi: pointer to VSI
* @qid: index of a queue to look at XSK buff pool presence
@@ -912,11 +938,10 @@ static inline bool ice_is_adq_active(struct ice_pf *pf)
return false;
}
-void ice_debugfs_fwlog_init(struct ice_pf *pf);
+int ice_debugfs_pf_init(struct ice_pf *pf);
void ice_debugfs_pf_deinit(struct ice_pf *pf);
void ice_debugfs_init(void);
void ice_debugfs_exit(void);
-void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module);
bool netif_is_ice(const struct net_device *dev);
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
@@ -966,7 +991,6 @@ int ice_plug_aux_dev(struct ice_pf *pf);
void ice_unplug_aux_dev(struct ice_pf *pf);
int ice_init_rdma(struct ice_pf *pf);
void ice_deinit_rdma(struct ice_pf *pf);
-const char *ice_aq_str(enum ice_aq_err aq_err);
bool ice_is_wol_supported(struct ice_hw *hw);
void ice_fdir_del_all_fltrs(struct ice_vsi *vsi);
int
@@ -1007,11 +1031,15 @@ int ice_open(struct net_device *netdev);
int ice_open_internal(struct net_device *netdev);
int ice_stop(struct net_device *netdev);
void ice_service_task_schedule(struct ice_pf *pf);
+void ice_start_service_task(struct ice_pf *pf);
int ice_load(struct ice_pf *pf);
void ice_unload(struct ice_pf *pf);
void ice_adv_lnk_speed_maps_init(void);
+void ice_init_dev_hw(struct ice_pf *pf);
int ice_init_dev(struct ice_pf *pf);
void ice_deinit_dev(struct ice_pf *pf);
+int ice_init_pf(struct ice_pf *pf);
+void ice_deinit_pf(struct ice_pf *pf);
int ice_change_mtu(struct net_device *netdev, int new_mtu);
void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue);
int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp);
@@ -1047,10 +1075,63 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
}
-static inline enum ice_phy_model ice_get_phy_model(const struct ice_hw *hw)
+extern const struct xdp_metadata_ops ice_xdp_md_ops;
+
+/**
+ * ice_is_dual - Check if given config is multi-NAC
+ * @hw: pointer to HW structure
+ *
+ * Return: true if the device is running in mutli-NAC (Network
+ * Acceleration Complex) configuration variant, false otherwise
+ * (always false for non-E825 devices).
+ */
+static inline bool ice_is_dual(struct ice_hw *hw)
+{
+ return hw->mac_type == ICE_MAC_GENERIC_3K_E825 &&
+ (hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_DUAL_M);
+}
+
+/**
+ * ice_is_primary - Check if given device belongs to the primary complex
+ * @hw: pointer to HW structure
+ *
+ * Check if given PF/HW is running on primary complex in multi-NAC
+ * configuration.
+ *
+ * Return: true if the device is dual, false otherwise (always true
+ * for non-E825 devices).
+ */
+static inline bool ice_is_primary(struct ice_hw *hw)
{
- return hw->ptp.phy_model;
+ return hw->mac_type != ICE_MAC_GENERIC_3K_E825 ||
+ !ice_is_dual(hw) ||
+ (hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M);
}
-extern const struct xdp_metadata_ops ice_xdp_md_ops;
+/**
+ * ice_pf_src_tmr_owned - Check if a primary timer is owned by PF
+ * @pf: pointer to PF structure
+ *
+ * Return: true if PF owns primary timer, false otherwise.
+ */
+static inline bool ice_pf_src_tmr_owned(struct ice_pf *pf)
+{
+ return pf->hw.func_caps.ts_func_info.src_tmr_owned &&
+ ice_is_primary(&pf->hw);
+}
+
+/**
+ * ice_get_primary_hw - Get pointer to primary ice_hw structure
+ * @pf: pointer to PF structure
+ *
+ * Return: A pointer to ice_hw structure with access to timesync
+ * register space.
+ */
+static inline struct ice_hw *ice_get_primary_hw(struct ice_pf *pf)
+{
+ if (!pf->adapter->ctrl_pf)
+ return &pf->hw;
+ else
+ return &pf->adapter->ctrl_pf->hw;
+}
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c
index 01a08cfd0090..0a8a48cd4bce 100644
--- a/drivers/net/ethernet/intel/ice/ice_adapter.c
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright Red Hat
-#include <linux/bitfield.h>
#include <linux/cleanup.h>
#include <linux/mutex.h>
#include <linux/pci.h>
@@ -14,32 +13,45 @@
static DEFINE_XARRAY(ice_adapters);
static DEFINE_MUTEX(ice_adapters_mutex);
-/* PCI bus number is 8 bits. Slot is 5 bits. Domain can have the rest. */
-#define INDEX_FIELD_DOMAIN GENMASK(BITS_PER_LONG - 1, 13)
-#define INDEX_FIELD_DEV GENMASK(31, 16)
-#define INDEX_FIELD_BUS GENMASK(12, 5)
-#define INDEX_FIELD_SLOT GENMASK(4, 0)
+#define ICE_ADAPTER_FIXED_INDEX BIT_ULL(63)
-static unsigned long ice_adapter_index(const struct pci_dev *pdev)
-{
- unsigned int domain = pci_domain_nr(pdev->bus);
-
- WARN_ON(domain > FIELD_MAX(INDEX_FIELD_DOMAIN));
+#define ICE_ADAPTER_INDEX_E825C \
+ (ICE_DEV_ID_E825C_BACKPLANE | ICE_ADAPTER_FIXED_INDEX)
+static u64 ice_adapter_index(struct pci_dev *pdev)
+{
switch (pdev->device) {
case ICE_DEV_ID_E825C_BACKPLANE:
case ICE_DEV_ID_E825C_QSFP:
case ICE_DEV_ID_E825C_SFP:
case ICE_DEV_ID_E825C_SGMII:
- return FIELD_PREP(INDEX_FIELD_DEV, pdev->device);
+ /* E825C devices have multiple NACs which are connected to the
+ * same clock source, and which must share the same
+ * ice_adapter structure. We can't use the serial number since
+ * each NAC has its own NVM generated with its own unique
+ * Device Serial Number. Instead, rely on the embedded nature
+ * of the E825C devices, and use a fixed index. This relies on
+ * the fact that all E825C physical functions in a given
+ * system are part of the same overall device.
+ */
+ return ICE_ADAPTER_INDEX_E825C;
default:
- return FIELD_PREP(INDEX_FIELD_DOMAIN, domain) |
- FIELD_PREP(INDEX_FIELD_BUS, pdev->bus->number) |
- FIELD_PREP(INDEX_FIELD_SLOT, PCI_SLOT(pdev->devfn));
+ return pci_get_dsn(pdev) & ~ICE_ADAPTER_FIXED_INDEX;
}
}
-static struct ice_adapter *ice_adapter_new(void)
+static unsigned long ice_adapter_xa_index(struct pci_dev *pdev)
+{
+ u64 index = ice_adapter_index(pdev);
+
+#if BITS_PER_LONG == 64
+ return index;
+#else
+ return (u32)index ^ (u32)(index >> 32);
+#endif
+}
+
+static struct ice_adapter *ice_adapter_new(struct pci_dev *pdev)
{
struct ice_adapter *adapter;
@@ -47,7 +59,9 @@ static struct ice_adapter *ice_adapter_new(void)
if (!adapter)
return NULL;
+ adapter->index = ice_adapter_index(pdev);
spin_lock_init(&adapter->ptp_gltsyn_time_lock);
+ spin_lock_init(&adapter->txq_ctx_lock);
refcount_set(&adapter->refcount, 1);
mutex_init(&adapter->ports.lock);
@@ -77,25 +91,29 @@ static void ice_adapter_free(struct ice_adapter *adapter)
* Return: Pointer to ice_adapter on success.
* ERR_PTR() on error. -ENOMEM is the only possible error.
*/
-struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev)
+struct ice_adapter *ice_adapter_get(struct pci_dev *pdev)
{
- unsigned long index = ice_adapter_index(pdev);
struct ice_adapter *adapter;
+ unsigned long index;
int err;
+ index = ice_adapter_xa_index(pdev);
scoped_guard(mutex, &ice_adapters_mutex) {
- err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL);
- if (err == -EBUSY) {
- adapter = xa_load(&ice_adapters, index);
+ adapter = xa_load(&ice_adapters, index);
+ if (adapter) {
refcount_inc(&adapter->refcount);
+ WARN_ON_ONCE(adapter->index != ice_adapter_index(pdev));
return adapter;
}
+ err = xa_reserve(&ice_adapters, index, GFP_KERNEL);
if (err)
return ERR_PTR(err);
- adapter = ice_adapter_new();
- if (!adapter)
+ adapter = ice_adapter_new(pdev);
+ if (!adapter) {
+ xa_release(&ice_adapters, index);
return ERR_PTR(-ENOMEM);
+ }
xa_store(&ice_adapters, index, adapter, GFP_KERNEL);
}
return adapter;
@@ -110,11 +128,12 @@ struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev)
*
* Context: Process, may sleep.
*/
-void ice_adapter_put(const struct pci_dev *pdev)
+void ice_adapter_put(struct pci_dev *pdev)
{
- unsigned long index = ice_adapter_index(pdev);
struct ice_adapter *adapter;
+ unsigned long index;
+ index = ice_adapter_xa_index(pdev);
scoped_guard(mutex, &ice_adapters_mutex) {
adapter = xa_load(&ice_adapters, index);
if (WARN_ON(!adapter))
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.h b/drivers/net/ethernet/intel/ice/ice_adapter.h
index e233225848b3..e95266c7f20b 100644
--- a/drivers/net/ethernet/intel/ice/ice_adapter.h
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.h
@@ -27,22 +27,27 @@ struct ice_port_list {
/**
* struct ice_adapter - PCI adapter resources shared across PFs
+ * @refcount: Reference count. struct ice_pf objects hold the references.
* @ptp_gltsyn_time_lock: Spinlock protecting access to the GLTSYN_TIME
* register of the PTP clock.
- * @refcount: Reference count. struct ice_pf objects hold the references.
+ * @txq_ctx_lock: Spinlock protecting access to the GLCOMM_QTX_CNTX_CTL register
* @ctrl_pf: Control PF of the adapter
* @ports: Ports list
+ * @index: 64-bit index cached for collision detection on 32bit systems
*/
struct ice_adapter {
refcount_t refcount;
/* For access to the GLTSYN_TIME register */
spinlock_t ptp_gltsyn_time_lock;
+ /* For access to GLCOMM_QTX_CNTX_CTL register */
+ spinlock_t txq_ctx_lock;
struct ice_pf *ctrl_pf;
struct ice_port_list ports;
+ u64 index;
};
-struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev);
-void ice_adapter_put(const struct pci_dev *pdev);
+struct ice_adapter *ice_adapter_get(struct pci_dev *pdev);
+void ice_adapter_put(struct pci_dev *pdev);
#endif /* _ICE_ADAPTER_H */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 1f01f3501d6b..859e9c66f3e7 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -4,6 +4,8 @@
#ifndef _ICE_ADMINQ_CMD_H_
#define _ICE_ADMINQ_CMD_H_
+#include <linux/net/intel/libie/adminq.h>
+
/* This header file defines the Admin Queue commands, error codes and
* descriptor format. It is shared between Firmware and Software.
*/
@@ -12,37 +14,28 @@
#define ICE_AQC_TOPO_MAX_LEVEL_NUM 0x9
#define ICE_AQ_SET_MAC_FRAME_SIZE_MAX 9728
-struct ice_aqc_generic {
- __le32 param0;
- __le32 param1;
- __le32 addr_high;
- __le32 addr_low;
-};
+#define ICE_RXQ_CTX_SIZE_DWORDS 8
+#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
-/* Get version (direct 0x0001) */
-struct ice_aqc_get_ver {
- __le32 rom_ver;
- __le32 fw_build;
- u8 fw_branch;
- u8 fw_major;
- u8 fw_minor;
- u8 fw_patch;
- u8 api_branch;
- u8 api_major;
- u8 api_minor;
- u8 api_patch;
-};
-
-/* Send driver version (indirect 0x0002) */
-struct ice_aqc_driver_ver {
- u8 major_ver;
- u8 minor_ver;
- u8 build_ver;
- u8 subbuild_ver;
- u8 reserved[4];
- __le32 addr_high;
- __le32 addr_low;
-};
+typedef struct __packed { u8 buf[ICE_RXQ_CTX_SZ]; } ice_rxq_ctx_buf_t;
+
+/* The Tx queue context is 40 bytes, and includes some internal state. The
+ * Admin Queue buffers don't include the internal state, so only include the
+ * first 22 bytes of the context.
+ */
+#define ICE_TXQ_CTX_SZ 22
+
+typedef struct __packed { u8 buf[ICE_TXQ_CTX_SZ]; } ice_txq_ctx_buf_t;
+
+#define ICE_TXQ_CTX_FULL_SIZE_DWORDS 10
+#define ICE_TXQ_CTX_FULL_SZ \
+ (ICE_TXQ_CTX_FULL_SIZE_DWORDS * sizeof(u32))
+
+typedef struct __packed { u8 buf[ICE_TXQ_CTX_FULL_SZ]; } ice_txq_ctx_buf_full_t;
+
+#define ICE_TXTIME_CTX_SZ 25
+
+typedef struct __packed { u8 buf[ICE_TXTIME_CTX_SZ]; } ice_txtime_ctx_buf_t;
/* Queue Shutdown (direct 0x0003) */
struct ice_aqc_q_shutdown {
@@ -51,94 +44,6 @@ struct ice_aqc_q_shutdown {
u8 reserved[15];
};
-/* Request resource ownership (direct 0x0008)
- * Release resource ownership (direct 0x0009)
- */
-struct ice_aqc_req_res {
- __le16 res_id;
-#define ICE_AQC_RES_ID_NVM 1
-#define ICE_AQC_RES_ID_SDP 2
-#define ICE_AQC_RES_ID_CHNG_LOCK 3
-#define ICE_AQC_RES_ID_GLBL_LOCK 4
- __le16 access_type;
-#define ICE_AQC_RES_ACCESS_READ 1
-#define ICE_AQC_RES_ACCESS_WRITE 2
-
- /* Upon successful completion, FW writes this value and driver is
- * expected to release resource before timeout. This value is provided
- * in milliseconds.
- */
- __le32 timeout;
-#define ICE_AQ_RES_NVM_READ_DFLT_TIMEOUT_MS 3000
-#define ICE_AQ_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000
-#define ICE_AQ_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000
-#define ICE_AQ_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000
- /* For SDP: pin ID of the SDP */
- __le32 res_number;
- /* Status is only used for ICE_AQC_RES_ID_GLBL_LOCK */
- __le16 status;
-#define ICE_AQ_RES_GLBL_SUCCESS 0
-#define ICE_AQ_RES_GLBL_IN_PROG 1
-#define ICE_AQ_RES_GLBL_DONE 2
- u8 reserved[2];
-};
-
-/* Get function capabilities (indirect 0x000A)
- * Get device capabilities (indirect 0x000B)
- */
-struct ice_aqc_list_caps {
- u8 cmd_flags;
- u8 pf_index;
- u8 reserved[2];
- __le32 count;
- __le32 addr_high;
- __le32 addr_low;
-};
-
-/* Device/Function buffer entry, repeated per reported capability */
-struct ice_aqc_list_caps_elem {
- __le16 cap;
-#define ICE_AQC_CAPS_VALID_FUNCTIONS 0x0005
-#define ICE_AQC_CAPS_SRIOV 0x0012
-#define ICE_AQC_CAPS_VF 0x0013
-#define ICE_AQC_CAPS_VSI 0x0017
-#define ICE_AQC_CAPS_DCB 0x0018
-#define ICE_AQC_CAPS_RSS 0x0040
-#define ICE_AQC_CAPS_RXQS 0x0041
-#define ICE_AQC_CAPS_TXQS 0x0042
-#define ICE_AQC_CAPS_MSIX 0x0043
-#define ICE_AQC_CAPS_FD 0x0045
-#define ICE_AQC_CAPS_1588 0x0046
-#define ICE_AQC_CAPS_MAX_MTU 0x0047
-#define ICE_AQC_CAPS_NVM_VER 0x0048
-#define ICE_AQC_CAPS_PENDING_NVM_VER 0x0049
-#define ICE_AQC_CAPS_OROM_VER 0x004A
-#define ICE_AQC_CAPS_PENDING_OROM_VER 0x004B
-#define ICE_AQC_CAPS_NET_VER 0x004C
-#define ICE_AQC_CAPS_PENDING_NET_VER 0x004D
-#define ICE_AQC_CAPS_RDMA 0x0051
-#define ICE_AQC_CAPS_SENSOR_READING 0x0067
-#define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076
-#define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
-#define ICE_AQC_CAPS_NVM_MGMT 0x0080
-#define ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE 0x0085
-#define ICE_AQC_CAPS_NAC_TOPOLOGY 0x0087
-#define ICE_AQC_CAPS_FW_LAG_SUPPORT 0x0092
-#define ICE_AQC_BIT_ROCEV2_LAG 0x01
-#define ICE_AQC_BIT_SRIOV_LAG 0x02
-
- u8 major_ver;
- u8 minor_ver;
- /* Number of resources described by this capability */
- __le32 number;
- /* Only meaningful for some types of resources */
- __le32 logical_id;
- /* Only meaningful for some types of resources */
- __le32 phys_id;
- __le64 rsvd1;
- __le64 rsvd2;
-};
-
/* Manage MAC address, read command - indirect (0x0107)
* This struct is also used for the response
*/
@@ -1491,7 +1396,23 @@ struct ice_aqc_dnl_equa_param {
#define ICE_AQC_RX_EQU_POST1 (0x12 << ICE_AQC_RX_EQU_SHIFT)
#define ICE_AQC_RX_EQU_BFLF (0x13 << ICE_AQC_RX_EQU_SHIFT)
#define ICE_AQC_RX_EQU_BFHF (0x14 << ICE_AQC_RX_EQU_SHIFT)
-#define ICE_AQC_RX_EQU_DRATE (0x15 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_CTLE_GAINHF (0x20 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_CTLE_GAINLF (0x21 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_CTLE_GAINDC (0x22 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_CTLE_BW (0x23 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_GAIN (0x30 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_GAIN2 (0x31 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_2 (0x32 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_3 (0x33 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_4 (0x34 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_5 (0x35 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_6 (0x36 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_7 (0x37 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_8 (0x38 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_9 (0x39 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_10 (0x3A << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_11 (0x3B << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_12 (0x3C << ICE_AQC_RX_EQU_SHIFT)
#define ICE_AQC_TX_EQU_PRE1 0x0
#define ICE_AQC_TX_EQU_PRE3 0x3
#define ICE_AQC_TX_EQU_ATTEN 0x4
@@ -1648,6 +1569,8 @@ struct ice_aqc_get_port_options_elem {
#define ICE_AQC_PORT_OPT_MAX_LANE_25G 5
#define ICE_AQC_PORT_OPT_MAX_LANE_50G 6
#define ICE_AQC_PORT_OPT_MAX_LANE_100G 7
+#define ICE_AQC_PORT_OPT_MAX_LANE_200G 8
+#define ICE_AQC_PORT_OPT_MAX_LANE_40G 9
u8 global_scid[2];
u8 phy_scid[2];
@@ -1790,6 +1713,7 @@ struct ice_aqc_nvm_pass_comp_tbl {
#define ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED 0x0
#define ICE_AQ_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE 0x1
#define ICE_AQ_NVM_PASS_COMP_CAN_NOT_BE_UPDATED 0x2
+#define ICE_AQ_NVM_PASS_COMP_PARTIAL_CHECK 0x3
u8 component_response_code; /* Response only */
#define ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED_CODE 0x0
#define ICE_AQ_NVM_PASS_COMP_STAMP_IDENTICAL_CODE 0x1
@@ -2067,10 +1991,10 @@ struct ice_aqc_add_txqs_perq {
__le16 txq_id;
u8 rsvd[2];
__le32 q_teid;
- u8 txq_ctx[22];
+ ice_txq_ctx_buf_t txq_ctx;
u8 rsvd2[2];
struct ice_aqc_txsched_elem info;
-};
+} __packed;
/* The format of the command buffer for Add Tx LAN Queues (0x0C30)
* is an array of the following structs. Please note that the length of
@@ -2140,6 +2064,10 @@ struct ice_aqc_cfg_txqs {
#define ICE_AQC_Q_CFG_SRC_PRT_M 0x7
#define ICE_AQC_Q_CFG_DST_PRT_S 3
#define ICE_AQC_Q_CFG_DST_PRT_M (0x7 << ICE_AQC_Q_CFG_DST_PRT_S)
+#define ICE_AQC_Q_CFG_MODE_M GENMASK(7, 6)
+#define ICE_AQC_Q_CFG_MODE_SAME_PF 0x0
+#define ICE_AQC_Q_CFG_MODE_GIVE_OWN 0x1
+#define ICE_AQC_Q_CFG_MODE_KEEP_OWN 0x2
u8 time_out;
#define ICE_AQC_Q_CFG_TIMEOUT_S 2
#define ICE_AQC_Q_CFG_TIMEOUT_M (0x1F << ICE_AQC_Q_CFG_TIMEOUT_S)
@@ -2193,6 +2121,34 @@ struct ice_aqc_add_rdma_qset_data {
struct ice_aqc_add_tx_rdma_qset_entry rdma_qsets[];
};
+/* Set Tx Time LAN Queue (indirect 0x0C35) */
+struct ice_aqc_set_txtimeqs {
+ __le16 q_id;
+ __le16 q_amount;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* This is the descriptor of each queue entry for the Set Tx Time Queue
+ * command (0x0C35). Only used within struct ice_aqc_set_txtime_qgrp.
+ */
+struct ice_aqc_set_txtimeqs_perq {
+ u8 reserved[4];
+ ice_txtime_ctx_buf_t txtime_ctx;
+ u8 reserved1[3];
+};
+
+/* The format of the command buffer for Set Tx Time Queue (0x0C35)
+ * is an array of the following structs. Please note that the length of
+ * each struct ice_aqc_set_txtime_qgrp is variable due to the variable
+ * number of queues in each group!
+ */
+struct ice_aqc_set_txtime_qgrp {
+ u8 reserved[8];
+ struct ice_aqc_set_txtimeqs_perq txtimeqs[];
+};
+
/* Download Package (indirect 0x0C40) */
/* Also used for Update Package (indirect 0x0C41 and 0x0C42) */
struct ice_aqc_download_pkg {
@@ -2247,6 +2203,24 @@ struct ice_aqc_get_pkg_info_resp {
struct ice_aqc_get_pkg_info pkg_info[];
};
+#define ICE_CGU_INPUT_PHASE_OFFSET_BYTES 6
+
+struct ice_cgu_input_measure {
+ u8 phase_offset[ICE_CGU_INPUT_PHASE_OFFSET_BYTES];
+ __le32 freq;
+} __packed __aligned(sizeof(__le16));
+
+#define ICE_AQC_GET_CGU_IN_MEAS_DPLL_IDX_M ICE_M(0xf, 0)
+
+/* Get CGU input measure command response data structure (indirect 0x0C59) */
+struct ice_aqc_get_cgu_input_measure {
+ u8 dpll_idx_opt;
+ u8 length;
+ u8 rsvd[6];
+};
+
+#define ICE_AQC_GET_CGU_MAX_PHASE_ADJ GENMASK(30, 0)
+
/* Get CGU abilities command response data structure (indirect 0x0C61) */
struct ice_aqc_get_cgu_abilities {
u8 num_inputs;
@@ -2261,6 +2235,8 @@ struct ice_aqc_get_cgu_abilities {
u8 rsvd[3];
};
+#define ICE_AQC_CGU_IN_CFG_FLG2_REFSYNC_EN BIT(7)
+
/* Set CGU input config (direct 0x0C62) */
struct ice_aqc_set_cgu_input_config {
u8 input_idx;
@@ -2455,225 +2431,85 @@ struct ice_aqc_event_lan_overflow {
u8 reserved[8];
};
-enum ice_aqc_fw_logging_mod {
- ICE_AQC_FW_LOG_ID_GENERAL = 0,
- ICE_AQC_FW_LOG_ID_CTRL,
- ICE_AQC_FW_LOG_ID_LINK,
- ICE_AQC_FW_LOG_ID_LINK_TOPO,
- ICE_AQC_FW_LOG_ID_DNL,
- ICE_AQC_FW_LOG_ID_I2C,
- ICE_AQC_FW_LOG_ID_SDP,
- ICE_AQC_FW_LOG_ID_MDIO,
- ICE_AQC_FW_LOG_ID_ADMINQ,
- ICE_AQC_FW_LOG_ID_HDMA,
- ICE_AQC_FW_LOG_ID_LLDP,
- ICE_AQC_FW_LOG_ID_DCBX,
- ICE_AQC_FW_LOG_ID_DCB,
- ICE_AQC_FW_LOG_ID_XLR,
- ICE_AQC_FW_LOG_ID_NVM,
- ICE_AQC_FW_LOG_ID_AUTH,
- ICE_AQC_FW_LOG_ID_VPD,
- ICE_AQC_FW_LOG_ID_IOSF,
- ICE_AQC_FW_LOG_ID_PARSER,
- ICE_AQC_FW_LOG_ID_SW,
- ICE_AQC_FW_LOG_ID_SCHEDULER,
- ICE_AQC_FW_LOG_ID_TXQ,
- ICE_AQC_FW_LOG_ID_RSVD,
- ICE_AQC_FW_LOG_ID_POST,
- ICE_AQC_FW_LOG_ID_WATCHDOG,
- ICE_AQC_FW_LOG_ID_TASK_DISPATCH,
- ICE_AQC_FW_LOG_ID_MNG,
- ICE_AQC_FW_LOG_ID_SYNCE,
- ICE_AQC_FW_LOG_ID_HEALTH,
- ICE_AQC_FW_LOG_ID_TSDRV,
- ICE_AQC_FW_LOG_ID_PFREG,
- ICE_AQC_FW_LOG_ID_MDLVER,
- ICE_AQC_FW_LOG_ID_MAX,
-};
-
-/* Set FW Logging configuration (indirect 0xFF30)
- * Register for FW Logging (indirect 0xFF31)
- * Query FW Logging (indirect 0xFF32)
- * FW Log Event (indirect 0xFF33)
- */
-struct ice_aqc_fw_log {
- u8 cmd_flags;
-#define ICE_AQC_FW_LOG_CONF_UART_EN BIT(0)
-#define ICE_AQC_FW_LOG_CONF_AQ_EN BIT(1)
-#define ICE_AQC_FW_LOG_QUERY_REGISTERED BIT(2)
-#define ICE_AQC_FW_LOG_CONF_SET_VALID BIT(3)
-#define ICE_AQC_FW_LOG_AQ_REGISTER BIT(0)
-#define ICE_AQC_FW_LOG_AQ_QUERY BIT(2)
-
- u8 rsp_flag;
- __le16 fw_rt_msb;
- union {
- struct {
- __le32 fw_rt_lsb;
- } sync;
- struct {
- __le16 log_resolution;
-#define ICE_AQC_FW_LOG_MIN_RESOLUTION (1)
-#define ICE_AQC_FW_LOG_MAX_RESOLUTION (128)
-
- __le16 mdl_cnt;
- } cfg;
- } ops;
+enum ice_aqc_health_status_mask {
+ ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK = BIT(0),
+ ICE_AQC_HEALTH_STATUS_SET_ALL_PF_MASK = BIT(1),
+ ICE_AQC_HEALTH_STATUS_SET_GLOBAL_MASK = BIT(2),
+};
+
+/* Set Health Status (direct 0xFF20) */
+struct ice_aqc_set_health_status_cfg {
+ u8 event_source;
+ u8 reserved[15];
+};
+
+enum ice_aqc_health_status {
+ ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT = 0x101,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_TYPE = 0x102,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_QUAL = 0x103,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_COMM = 0x104,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_CONFLICT = 0x105,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT = 0x106,
+ ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED = 0x107,
+ ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT = 0x108,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_DIAGNOSTIC_FEATURE = 0x109,
+ ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG = 0x10B,
+ ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS = 0x10C,
+ ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE = 0x10D,
+ ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED = 0x10F,
+ ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT = 0x110,
+ ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED = 0x111,
+ ICE_AQC_HEALTH_STATUS_ERR_NETLIST_TOPO = 0x112,
+ ICE_AQC_HEALTH_STATUS_ERR_NETLIST = 0x113,
+ ICE_AQC_HEALTH_STATUS_ERR_TOPO_CONFLICT = 0x114,
+ ICE_AQC_HEALTH_STATUS_ERR_LINK_HW_ACCESS = 0x115,
+ ICE_AQC_HEALTH_STATUS_ERR_LINK_RUNTIME = 0x116,
+ ICE_AQC_HEALTH_STATUS_ERR_DNL_INIT = 0x117,
+ ICE_AQC_HEALTH_STATUS_ERR_PHY_NVM_PROG = 0x120,
+ ICE_AQC_HEALTH_STATUS_ERR_PHY_FW_LOAD = 0x121,
+ ICE_AQC_HEALTH_STATUS_INFO_RECOVERY = 0x500,
+ ICE_AQC_HEALTH_STATUS_ERR_FLASH_ACCESS = 0x501,
+ ICE_AQC_HEALTH_STATUS_ERR_NVM_AUTH = 0x502,
+ ICE_AQC_HEALTH_STATUS_ERR_OROM_AUTH = 0x503,
+ ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH = 0x504,
+ ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT = 0x505,
+ ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT = 0x506,
+ ICE_AQC_HEALTH_STATUS_ERR_NVM_SEC_VIOLATION = 0x507,
+ ICE_AQC_HEALTH_STATUS_ERR_OROM_SEC_VIOLATION = 0x508,
+ ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB = 0x509,
+ ICE_AQC_HEALTH_STATUS_ERR_MNG_TIMEOUT = 0x50A,
+ ICE_AQC_HEALTH_STATUS_ERR_BMC_RESET = 0x50B,
+ ICE_AQC_HEALTH_STATUS_ERR_LAST_MNG_FAIL = 0x50C,
+ ICE_AQC_HEALTH_STATUS_ERR_RESOURCE_ALLOC_FAIL = 0x50D,
+ ICE_AQC_HEALTH_STATUS_ERR_FW_LOOP = 0x1000,
+ ICE_AQC_HEALTH_STATUS_ERR_FW_PFR_FAIL = 0x1001,
+ ICE_AQC_HEALTH_STATUS_ERR_LAST_FAIL_AQ = 0x1002,
+};
+
+/* Get Health Status (indirect 0xFF22) */
+struct ice_aqc_get_health_status {
+ __le16 health_status_count;
+ u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
};
-/* Response Buffer for:
- * Set Firmware Logging Configuration (0xFF30)
- * Query FW Logging (0xFF32)
- */
-struct ice_aqc_fw_log_cfg_resp {
- __le16 module_identifier;
- u8 log_level;
- u8 rsvd0;
+enum ice_aqc_health_status_scope {
+ ICE_AQC_HEALTH_STATUS_PF = 0x1,
+ ICE_AQC_HEALTH_STATUS_PORT = 0x2,
+ ICE_AQC_HEALTH_STATUS_GLOBAL = 0x3,
};
-/**
- * struct ice_aq_desc - Admin Queue (AQ) descriptor
- * @flags: ICE_AQ_FLAG_* flags
- * @opcode: AQ command opcode
- * @datalen: length in bytes of indirect/external data buffer
- * @retval: return value from firmware
- * @cookie_high: opaque data high-half
- * @cookie_low: opaque data low-half
- * @params: command-specific parameters
- *
- * Descriptor format for commands the driver posts on the Admin Transmit Queue
- * (ATQ). The firmware writes back onto the command descriptor and returns
- * the result of the command. Asynchronous events that are not an immediate
- * result of the command are written to the Admin Receive Queue (ARQ) using
- * the same descriptor format. Descriptors are in little-endian notation with
- * 32-bit words.
+#define ICE_AQC_HEALTH_STATUS_UNDEFINED_DATA 0xDEADBEEF
+
+/* Get Health Status event buffer entry (0xFF22),
+ * repeated per reported health status.
*/
-struct ice_aq_desc {
- __le16 flags;
- __le16 opcode;
- __le16 datalen;
- __le16 retval;
- __le32 cookie_high;
- __le32 cookie_low;
- union {
- u8 raw[16];
- struct ice_aqc_generic generic;
- struct ice_aqc_get_ver get_ver;
- struct ice_aqc_driver_ver driver_ver;
- struct ice_aqc_q_shutdown q_shutdown;
- struct ice_aqc_req_res res_owner;
- struct ice_aqc_manage_mac_read mac_read;
- struct ice_aqc_manage_mac_write mac_write;
- struct ice_aqc_clear_pxe clear_pxe;
- struct ice_aqc_list_caps get_cap;
- struct ice_aqc_get_phy_caps get_phy;
- struct ice_aqc_set_phy_cfg set_phy;
- struct ice_aqc_restart_an restart_an;
- struct ice_aqc_set_phy_rec_clk_out set_phy_rec_clk_out;
- struct ice_aqc_get_phy_rec_clk_out get_phy_rec_clk_out;
- struct ice_aqc_get_sensor_reading get_sensor_reading;
- struct ice_aqc_get_sensor_reading_resp get_sensor_reading_resp;
- struct ice_aqc_gpio read_write_gpio;
- struct ice_aqc_sff_eeprom read_write_sff_param;
- struct ice_aqc_set_port_id_led set_port_id_led;
- struct ice_aqc_get_port_options get_port_options;
- struct ice_aqc_set_port_option set_port_option;
- struct ice_aqc_get_sw_cfg get_sw_conf;
- struct ice_aqc_set_port_params set_port_params;
- struct ice_aqc_sw_rules sw_rules;
- struct ice_aqc_add_get_recipe add_get_recipe;
- struct ice_aqc_recipe_to_profile recipe_to_profile;
- struct ice_aqc_get_topo get_topo;
- struct ice_aqc_sched_elem_cmd sched_elem_cmd;
- struct ice_aqc_query_txsched_res query_sched_res;
- struct ice_aqc_query_port_ets port_ets;
- struct ice_aqc_rl_profile rl_profile;
- struct ice_aqc_nvm nvm;
- struct ice_aqc_nvm_checksum nvm_checksum;
- struct ice_aqc_nvm_pkg_data pkg_data;
- struct ice_aqc_nvm_pass_comp_tbl pass_comp_tbl;
- struct ice_aqc_pf_vf_msg virt;
- struct ice_aqc_set_query_pfc_mode set_query_pfc_mode;
- struct ice_aqc_lldp_get_mib lldp_get_mib;
- struct ice_aqc_lldp_set_mib_change lldp_set_event;
- struct ice_aqc_lldp_stop lldp_stop;
- struct ice_aqc_lldp_start lldp_start;
- struct ice_aqc_lldp_set_local_mib lldp_set_mib;
- struct ice_aqc_lldp_stop_start_specific_agent lldp_agent_ctrl;
- struct ice_aqc_lldp_filter_ctrl lldp_filter_ctrl;
- struct ice_aqc_get_set_rss_lut get_set_rss_lut;
- struct ice_aqc_get_set_rss_key get_set_rss_key;
- struct ice_aqc_neigh_dev_req neigh_dev;
- struct ice_aqc_add_txqs add_txqs;
- struct ice_aqc_dis_txqs dis_txqs;
- struct ice_aqc_cfg_txqs cfg_txqs;
- struct ice_aqc_add_rdma_qset add_rdma_qset;
- struct ice_aqc_add_get_update_free_vsi vsi_cmd;
- struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
- struct ice_aqc_download_pkg download_pkg;
- struct ice_aqc_set_cgu_input_config set_cgu_input_config;
- struct ice_aqc_get_cgu_input_config get_cgu_input_config;
- struct ice_aqc_set_cgu_output_config set_cgu_output_config;
- struct ice_aqc_get_cgu_output_config get_cgu_output_config;
- struct ice_aqc_get_cgu_dpll_status get_cgu_dpll_status;
- struct ice_aqc_set_cgu_dpll_config set_cgu_dpll_config;
- struct ice_aqc_set_cgu_ref_prio set_cgu_ref_prio;
- struct ice_aqc_get_cgu_ref_prio get_cgu_ref_prio;
- struct ice_aqc_get_cgu_info get_cgu_info;
- struct ice_aqc_driver_shared_params drv_shared_params;
- struct ice_aqc_fw_log fw_log;
- struct ice_aqc_set_mac_lb set_mac_lb;
- struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
- struct ice_aqc_set_mac_cfg set_mac_cfg;
- struct ice_aqc_set_event_mask set_event_mask;
- struct ice_aqc_get_link_status get_link_status;
- struct ice_aqc_event_lan_overflow lan_overflow;
- struct ice_aqc_get_link_topo get_link_topo;
- struct ice_aqc_dnl_call_command dnl_call;
- struct ice_aqc_i2c read_write_i2c;
- struct ice_aqc_read_i2c_resp read_i2c_resp;
- struct ice_aqc_get_set_tx_topo get_set_tx_topo;
- } params;
-};
-
-/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */
-#define ICE_AQ_LG_BUF 512
-
-#define ICE_AQ_FLAG_DD_S 0
-#define ICE_AQ_FLAG_CMP_S 1
-#define ICE_AQ_FLAG_ERR_S 2
-#define ICE_AQ_FLAG_LB_S 9
-#define ICE_AQ_FLAG_RD_S 10
-#define ICE_AQ_FLAG_BUF_S 12
-#define ICE_AQ_FLAG_SI_S 13
-
-#define ICE_AQ_FLAG_DD BIT(ICE_AQ_FLAG_DD_S) /* 0x1 */
-#define ICE_AQ_FLAG_CMP BIT(ICE_AQ_FLAG_CMP_S) /* 0x2 */
-#define ICE_AQ_FLAG_ERR BIT(ICE_AQ_FLAG_ERR_S) /* 0x4 */
-#define ICE_AQ_FLAG_LB BIT(ICE_AQ_FLAG_LB_S) /* 0x200 */
-#define ICE_AQ_FLAG_RD BIT(ICE_AQ_FLAG_RD_S) /* 0x400 */
-#define ICE_AQ_FLAG_BUF BIT(ICE_AQ_FLAG_BUF_S) /* 0x1000 */
-#define ICE_AQ_FLAG_SI BIT(ICE_AQ_FLAG_SI_S) /* 0x2000 */
-
-/* error codes */
-enum ice_aq_err {
- ICE_AQ_RC_OK = 0, /* Success */
- ICE_AQ_RC_EPERM = 1, /* Operation not permitted */
- ICE_AQ_RC_ENOENT = 2, /* No such element */
- ICE_AQ_RC_ENOMEM = 9, /* Out of memory */
- ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
- ICE_AQ_RC_EEXIST = 13, /* Object already exists */
- ICE_AQ_RC_EINVAL = 14, /* Invalid argument */
- ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
- ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */
- ICE_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
- ICE_AQ_RC_ENOSEC = 24, /* Missing security manifest */
- ICE_AQ_RC_EBADSIG = 25, /* Bad RSA signature */
- ICE_AQ_RC_ESVN = 26, /* SVN number prohibits this package */
- ICE_AQ_RC_EBADMAN = 27, /* Manifest hash mismatch */
- ICE_AQ_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */
+struct ice_aqc_health_status_elem {
+ __le16 health_status_code;
+ __le16 event_source;
+ __le32 internal_data1;
+ __le32 internal_data2;
};
/* Admin Queue command opcodes */
@@ -2810,6 +2646,9 @@ enum ice_adminq_opc {
ice_aqc_opc_cfg_txqs = 0x0C32,
ice_aqc_opc_add_rdma_qset = 0x0C33,
+ /* Tx Time queue commands */
+ ice_aqc_opc_set_txtimeqs = 0x0C35,
+
/* package commands */
ice_aqc_opc_download_pkg = 0x0C40,
ice_aqc_opc_upload_section = 0x0C41,
@@ -2817,6 +2656,7 @@ enum ice_adminq_opc {
ice_aqc_opc_get_pkg_info_list = 0x0C43,
/* 1588/SyncE commands/events */
+ ice_aqc_opc_get_cgu_input_measure = 0x0C59,
ice_aqc_opc_get_cgu_abilities = 0x0C61,
ice_aqc_opc_set_cgu_input_config = 0x0C62,
ice_aqc_opc_get_cgu_input_config = 0x0C63,
@@ -2833,6 +2673,10 @@ enum ice_adminq_opc {
/* Standalone Commands/Events */
ice_aqc_opc_event_lan_overflow = 0x1001,
+ /* System Diagnostic commands */
+ ice_aqc_opc_set_health_status_cfg = 0xFF20,
+ ice_aqc_opc_get_health_status = 0xFF22,
+
/* FW Logging Commands */
ice_aqc_opc_fw_logs_config = 0xFF30,
ice_aqc_opc_fw_logs_register = 0xFF31,
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index 7cee365cc7d1..1f7834c03550 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -378,6 +378,50 @@ ice_arfs_is_perfect_flow_set(struct ice_hw *hw, __be16 l3_proto, u8 l4_proto)
}
/**
+ * ice_arfs_cmp - Check if aRFS filter matches this flow.
+ * @fltr_info: filter info of the saved ARFS entry.
+ * @fk: flow dissector keys.
+ * @n_proto: One of htons(ETH_P_IP) or htons(ETH_P_IPV6).
+ * @ip_proto: One of IPPROTO_TCP or IPPROTO_UDP.
+ *
+ * Since this function assumes limited values for n_proto and ip_proto, it
+ * is meant to be called only from ice_rx_flow_steer().
+ *
+ * Return:
+ * * true - fltr_info refers to the same flow as fk.
+ * * false - fltr_info and fk refer to different flows.
+ */
+static bool
+ice_arfs_cmp(const struct ice_fdir_fltr *fltr_info, const struct flow_keys *fk,
+ __be16 n_proto, u8 ip_proto)
+{
+ /* Determine if the filter is for IPv4 or IPv6 based on flow_type,
+ * which is one of ICE_FLTR_PTYPE_NONF_IPV{4,6}_{TCP,UDP}.
+ */
+ bool is_v4 = fltr_info->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
+ fltr_info->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+
+ /* Following checks are arranged in the quickest and most discriminative
+ * fields first for early failure.
+ */
+ if (is_v4)
+ return n_proto == htons(ETH_P_IP) &&
+ fltr_info->ip.v4.src_port == fk->ports.src &&
+ fltr_info->ip.v4.dst_port == fk->ports.dst &&
+ fltr_info->ip.v4.src_ip == fk->addrs.v4addrs.src &&
+ fltr_info->ip.v4.dst_ip == fk->addrs.v4addrs.dst &&
+ fltr_info->ip.v4.proto == ip_proto;
+
+ return fltr_info->ip.v6.src_port == fk->ports.src &&
+ fltr_info->ip.v6.dst_port == fk->ports.dst &&
+ fltr_info->ip.v6.proto == ip_proto &&
+ !memcmp(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src,
+ sizeof(struct in6_addr)) &&
+ !memcmp(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst,
+ sizeof(struct in6_addr));
+}
+
+/**
* ice_rx_flow_steer - steer the Rx flow to where application is being run
* @netdev: ptr to the netdev being adjusted
* @skb: buffer with required header information
@@ -448,6 +492,10 @@ ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
continue;
fltr_info = &arfs_entry->fltr_info;
+
+ if (!ice_arfs_cmp(fltr_info, &fk, n_proto, ip_proto))
+ continue;
+
ret = fltr_info->fltr_id;
if (fltr_info->q_index == rxq_idx ||
@@ -511,7 +559,7 @@ void ice_init_arfs(struct ice_vsi *vsi)
struct hlist_head *arfs_fltr_list;
unsigned int i;
- if (!vsi || vsi->type != ICE_VSI_PF)
+ if (!vsi || vsi->type != ICE_VSI_PF || ice_is_arfs_active(vsi))
return;
arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list),
@@ -571,25 +619,6 @@ void ice_clear_arfs(struct ice_vsi *vsi)
}
/**
- * ice_free_cpu_rx_rmap - free setup CPU reverse map
- * @vsi: the VSI to be forwarded to
- */
-void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
-{
- struct net_device *netdev;
-
- if (!vsi || vsi->type != ICE_VSI_PF)
- return;
-
- netdev = vsi->netdev;
- if (!netdev || !netdev->rx_cpu_rmap)
- return;
-
- free_irq_cpu_rmap(netdev->rx_cpu_rmap);
- netdev->rx_cpu_rmap = NULL;
-}
-
-/**
* ice_set_cpu_rx_rmap - setup CPU reverse map for each queue
* @vsi: the VSI to be forwarded to
*/
@@ -597,7 +626,6 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
{
struct net_device *netdev;
struct ice_pf *pf;
- int i;
if (!vsi || vsi->type != ICE_VSI_PF)
return 0;
@@ -610,18 +638,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n",
vsi->type, netdev->name, vsi->num_q_vectors);
- netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(vsi->num_q_vectors);
- if (unlikely(!netdev->rx_cpu_rmap))
- return -EINVAL;
-
- ice_for_each_q_vector(vsi, i)
- if (irq_cpu_rmap_add(netdev->rx_cpu_rmap,
- vsi->q_vectors[i]->irq.virq)) {
- ice_free_cpu_rx_rmap(vsi);
- return -EINVAL;
- }
-
- return 0;
+ return netif_enable_cpu_rmap(netdev, vsi->num_q_vectors);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.h b/drivers/net/ethernet/intel/ice/ice_arfs.h
index 9669ad9bf7b5..9706293128c3 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.h
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.h
@@ -45,7 +45,6 @@ int
ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
u16 rxq_idx, u32 flow_id);
void ice_clear_arfs(struct ice_vsi *vsi);
-void ice_free_cpu_rx_rmap(struct ice_vsi *vsi);
void ice_init_arfs(struct ice_vsi *vsi);
void ice_sync_arfs_fltrs(struct ice_pf *pf);
int ice_set_cpu_rx_rmap(struct ice_vsi *vsi);
@@ -56,7 +55,6 @@ ice_is_arfs_using_perfect_flow(struct ice_hw *hw,
enum ice_fltr_ptype flow_type);
#else
static inline void ice_clear_arfs(struct ice_vsi *vsi) { }
-static inline void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) { }
static inline void ice_init_arfs(struct ice_vsi *vsi) { }
static inline void ice_sync_arfs_fltrs(struct ice_pf *pf) { }
static inline void ice_remove_arfs(struct ice_pf *pf) { }
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 3a8e156d7d86..eadb1e3d12b3 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019, Intel Corporation. */
#include <net/xdp_sock_drv.h>
+#include <linux/net/intel/libie/rx.h>
#include "ice_base.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
@@ -147,16 +148,13 @@ skip_alloc:
q_vector->reg_idx = q_vector->irq.index;
q_vector->vf_reg_idx = q_vector->irq.index;
- /* only set affinity_mask if the CPU is online */
- if (cpu_online(v_idx))
- cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
-
/* This will not be called in the driver load path because the netdev
* will not be created yet. All other cases with register the NAPI
* handler here (i.e. resume, reset/rebuild, etc.)
*/
if (vsi->netdev)
- netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll);
+ netif_napi_add_config(vsi->netdev, &q_vector->napi,
+ ice_napi_poll, v_idx);
out:
/* tie q_vector and VSI together */
@@ -245,7 +243,8 @@ static void ice_cfg_itr_gran(struct ice_hw *hw)
* @ring: ring to get the absolute queue index
* @tc: traffic class number
*/
-static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc)
+static u16
+ice_calc_txq_handle(const struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc)
{
WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
@@ -253,7 +252,7 @@ static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8
return ring->q_index - ring->ch->base_q;
/* Idea here for calculation is that we subtract the number of queue
- * count from TC that ring belongs to from it's absolute queue index
+ * count from TC that ring belongs to from its absolute queue index
* and as a result we get the queue's index within TC.
*/
return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset;
@@ -275,35 +274,26 @@ static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring)
if (test_and_set_bit(ICE_TX_XPS_INIT_DONE, ring->xps_state))
return;
- netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask,
+ netif_set_xps_queue(ring->netdev,
+ &ring->q_vector->napi.config->affinity_mask,
ring->q_index);
}
/**
- * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
- * @ring: The Tx ring to configure
- * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
- * @pf_q: queue index in the PF space
+ * ice_set_txq_ctx_vmvf - set queue context VM/VF type and number by VSI type
+ * @ring: the Tx ring to configure
+ * @vmvf_type: VM/VF type
+ * @vmvf_num: VM/VF number
*
- * Configure the Tx descriptor ring in TLAN context.
+ * Return: 0 on success and a negative value on error.
*/
-static void
-ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
+static int
+ice_set_txq_ctx_vmvf(struct ice_tx_ring *ring, u8 *vmvf_type, u16 *vmvf_num)
{
struct ice_vsi *vsi = ring->vsi;
- struct ice_hw *hw = &vsi->back->hw;
-
- tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
-
- tlan_ctx->port_num = vsi->port_info->lport;
-
- /* Transmit Queue Length */
- tlan_ctx->qlen = ring->count;
-
- ice_set_cgd_num(tlan_ctx, ring->dcb_tc);
+ struct ice_hw *hw;
- /* PF number */
- tlan_ctx->pf_num = hw->pf_id;
+ hw = &vsi->back->hw;
/* queue belongs to a specific VSI type
* VF / VM index should be programmed per vmvf_type setting:
@@ -316,21 +306,60 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf
case ICE_VSI_CTRL:
case ICE_VSI_PF:
if (ring->ch)
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
+ *vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
else
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
+ *vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break;
case ICE_VSI_VF:
/* Firmware expects vmvf_num to be absolute VF ID */
- tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
+ *vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
+ *vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
break;
case ICE_VSI_SF:
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
+ *vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
break;
default:
- return;
+ dev_info(ice_pf_to_dev(vsi->back),
+ "Unable to set VMVF type for VSI type %d\n",
+ vsi->type);
+ return -EINVAL;
}
+ return 0;
+}
+
+/**
+ * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
+ * @ring: the Tx ring to configure
+ * @tlan_ctx: pointer to the Tx LAN queue context structure to be initialized
+ * @pf_q: queue index in the PF space
+ *
+ * Configure the Tx descriptor ring in TLAN context.
+ *
+ * Return: 0 on success and a negative value on error.
+ */
+static int
+ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
+{
+ struct ice_vsi *vsi = ring->vsi;
+ struct ice_hw *hw;
+ int err;
+
+ hw = &vsi->back->hw;
+ tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
+ tlan_ctx->port_num = vsi->port_info->lport;
+
+ /* Transmit Queue Length */
+ tlan_ctx->qlen = ring->count;
+
+ ice_set_cgd_num(tlan_ctx, ring->dcb_tc);
+
+ /* PF number */
+ tlan_ctx->pf_num = hw->pf_id;
+
+ err = ice_set_txq_ctx_vmvf(ring, &tlan_ctx->vmvf_type,
+ &tlan_ctx->vmvf_num);
+ if (err)
+ return err;
/* make sure the context is associated with the right VSI */
if (ring->ch)
@@ -357,22 +386,83 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf
* 1: Legacy Host Interface
*/
tlan_ctx->legacy_int = ICE_TX_LEGACY;
+
+ return 0;
}
/**
- * ice_rx_offset - Return expected offset into page to access data
- * @rx_ring: Ring we are requesting offset of
+ * ice_setup_txtime_ctx - setup a struct ice_txtime_ctx instance
+ * @ring: the tstamp ring to configure
+ * @txtime_ctx: pointer to the Tx time queue context structure to be initialized
*
- * Returns the offset value for ring into the data buffer.
+ * Return: 0 on success and a negative value on error.
*/
-static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
+static int
+ice_setup_txtime_ctx(const struct ice_tstamp_ring *ring,
+ struct ice_txtime_ctx *txtime_ctx)
{
- if (ice_ring_uses_build_skb(rx_ring))
- return ICE_SKB_PAD;
+ struct ice_tx_ring *tx_ring = ring->tx_ring;
+ struct ice_vsi *vsi = tx_ring->vsi;
+ struct ice_hw *hw = &vsi->back->hw;
+ int err;
+
+ txtime_ctx->base = ring->dma >> ICE_TXTIME_CTX_BASE_S;
+
+ /* Tx time Queue Length */
+ txtime_ctx->qlen = ring->count;
+ txtime_ctx->txtime_ena_q = 1;
+
+ /* PF number */
+ txtime_ctx->pf_num = hw->pf_id;
+
+ err = ice_set_txq_ctx_vmvf(tx_ring, &txtime_ctx->vmvf_type,
+ &txtime_ctx->vmvf_num);
+ if (err)
+ return err;
+
+ /* make sure the context is associated with the right VSI */
+ if (tx_ring->ch)
+ txtime_ctx->src_vsi = tx_ring->ch->vsi_num;
+ else
+ txtime_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
+
+ txtime_ctx->ts_res = ICE_TXTIME_CTX_RESOLUTION_128NS;
+ txtime_ctx->drbell_mode_32 = ICE_TXTIME_CTX_DRBELL_MODE_32;
+ txtime_ctx->ts_fetch_prof_id = ICE_TXTIME_CTX_FETCH_PROF_ID_0;
+
return 0;
}
/**
+ * ice_calc_ts_ring_count - calculate the number of Tx time stamp descriptors
+ * @tx_ring: Tx ring to calculate the count for
+ *
+ * Return: the number of Tx time stamp descriptors.
+ */
+u16 ice_calc_ts_ring_count(struct ice_tx_ring *tx_ring)
+{
+ u16 prof = ICE_TXTIME_CTX_FETCH_PROF_ID_0;
+ struct ice_vsi *vsi = tx_ring->vsi;
+ struct ice_hw *hw = &vsi->back->hw;
+ u16 max_fetch_desc = 0, fetch, i;
+ u32 reg;
+
+ for (i = 0; i < ICE_TXTIME_FETCH_PROFILE_CNT; i++) {
+ reg = rd32(hw, E830_GLTXTIME_FETCH_PROFILE(prof, 0));
+ fetch = FIELD_GET(E830_GLTXTIME_FETCH_PROFILE_FETCH_TS_DESC_M,
+ reg);
+ max_fetch_desc = max(fetch, max_fetch_desc);
+ }
+
+ if (!max_fetch_desc)
+ max_fetch_desc = ICE_TXTIME_FETCH_TS_DESC_DFLT;
+
+ max_fetch_desc = ALIGN(max_fetch_desc, ICE_REQ_DESC_MULTIPLE);
+
+ return tx_ring->count + max_fetch_desc;
+}
+
+/**
* ice_setup_rx_ctx - Configure a receive ring context
* @ring: The Rx ring to configure
*
@@ -434,8 +524,29 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
else
rlan_ctx.l2tsel = 1;
- rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
- rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
+ if (ring->hdr_pp) {
+ rlan_ctx.hbuf = ring->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
+ rlan_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
+
+ /*
+ * If the frame is TCP/UDP/SCTP, it will be split by the
+ * payload.
+ * If not, but it's an IPv4/IPv6 frame, it will be split by
+ * the IP header.
+ * If not IP, it will be split by the Ethernet header.
+ *
+ * In any case, the header buffer will never be left empty.
+ */
+ rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_L2 |
+ ICE_RLAN_RX_HSPLIT_0_SPLIT_IP |
+ ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP |
+ ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP;
+ } else {
+ rlan_ctx.hbuf = 0;
+ rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
+ rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
+ }
+
rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
/* This controls whether VLAN is stripped from inner headers
@@ -447,12 +558,15 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
/* Max packet size for this queue - must not be set to a larger value
* than 5 x DBUF
*/
- rlan_ctx.rxmax = min_t(u32, ring->max_frame,
+ rlan_ctx.rxmax = min_t(u32, vsi->max_frame,
ICE_MAX_CHAINED_RX_BUFS * ring->rx_buf_len);
/* Rx queue threshold in units of 64 */
rlan_ctx.lrxqthresh = 1;
+ /* Enable descriptor prefetch */
+ rlan_ctx.prefena = 1;
+
/* PF acts as uplink for switchdev; set flex descriptor with src_vsi
* metadata and flags to allow redirecting to PR netdev
*/
@@ -469,9 +583,6 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
*/
if (vsi->type != ICE_VSI_VF)
ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
- else
- ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3,
- false);
/* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
@@ -484,14 +595,6 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
if (vsi->type == ICE_VSI_VF)
return 0;
- /* configure Rx buffer alignment */
- if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
- ice_clear_ring_build_skb_ena(ring);
- else
- ice_set_ring_build_skb_ena(ring);
-
- ring->rx_offset = ice_rx_offset(ring);
-
/* init queue specific tail register */
ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
writel(0, ring->tail);
@@ -499,36 +602,51 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
return 0;
}
-static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring)
+static int ice_rxq_pp_create(struct ice_rx_ring *rq)
{
- void *ctx_ptr = &ring->pkt_ctx;
- struct xsk_cb_desc desc = {};
-
- XSK_CHECK_PRIV_TYPE(struct ice_xdp_buff);
- desc.src = &ctx_ptr;
- desc.off = offsetof(struct ice_xdp_buff, pkt_ctx) -
- sizeof(struct xdp_buff);
- desc.bytes = sizeof(ctx_ptr);
- xsk_pool_fill_cb(ring->xsk_pool, &desc);
-}
+ struct libeth_fq fq = {
+ .count = rq->count,
+ .nid = NUMA_NO_NODE,
+ .hsplit = rq->vsi->hsplit,
+ .xdp = ice_is_xdp_ena_vsi(rq->vsi),
+ .buf_len = LIBIE_MAX_RX_BUF_LEN,
+ };
+ int err;
-/**
- * ice_get_frame_sz - calculate xdp_buff::frame_sz
- * @rx_ring: the ring being configured
- *
- * Return frame size based on underlying PAGE_SIZE
- */
-static unsigned int ice_get_frame_sz(struct ice_rx_ring *rx_ring)
-{
- unsigned int frame_sz;
+ err = libeth_rx_fq_create(&fq, &rq->q_vector->napi);
+ if (err)
+ return err;
+
+ rq->pp = fq.pp;
+ rq->rx_fqes = fq.fqes;
+ rq->truesize = fq.truesize;
+ rq->rx_buf_len = fq.buf_len;
+
+ if (!fq.hsplit)
+ return 0;
+
+ fq = (struct libeth_fq){
+ .count = rq->count,
+ .type = LIBETH_FQE_HDR,
+ .nid = NUMA_NO_NODE,
+ .xdp = ice_is_xdp_ena_vsi(rq->vsi),
+ };
-#if (PAGE_SIZE >= 8192)
- frame_sz = rx_ring->rx_buf_len;
-#else
- frame_sz = ice_rx_pg_size(rx_ring) / 2;
-#endif
+ err = libeth_rx_fq_create(&fq, &rq->q_vector->napi);
+ if (err)
+ goto destroy;
- return frame_sz;
+ rq->hdr_pp = fq.pp;
+ rq->hdr_fqes = fq.fqes;
+ rq->hdr_truesize = fq.truesize;
+ rq->rx_hdr_len = fq.buf_len;
+
+ return 0;
+
+destroy:
+ ice_rxq_pp_destroy(rq);
+
+ return err;
}
/**
@@ -540,7 +658,8 @@ static unsigned int ice_get_frame_sz(struct ice_rx_ring *rx_ring)
static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
{
struct device *dev = ice_pf_to_dev(ring->vsi->back);
- u32 num_bufs = ICE_RX_DESC_UNUSED(ring);
+ u32 num_bufs = ICE_DESC_UNUSED(ring);
+ u32 rx_buf_len;
int err;
if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) {
@@ -554,15 +673,19 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
}
ice_rx_xsk_pool(ring);
+ err = ice_realloc_rx_xdp_bufs(ring, ring->xsk_pool);
+ if (err)
+ return err;
+
if (ring->xsk_pool) {
xdp_rxq_info_unreg(&ring->xdp_rxq);
- ring->rx_buf_len =
+ rx_buf_len =
xsk_pool_get_rx_frame_size(ring->xsk_pool);
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index,
ring->q_vector->napi.napi_id,
- ring->rx_buf_len);
+ rx_buf_len);
if (err)
return err;
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
@@ -571,36 +694,33 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
if (err)
return err;
xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
- ice_xsk_pool_fill_cb(ring);
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
} else {
+ err = ice_rxq_pp_create(ring);
+ if (err)
+ return err;
+
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index,
ring->q_vector->napi.napi_id,
ring->rx_buf_len);
if (err)
- return err;
+ goto err_destroy_fq;
}
-
- err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
- MEM_TYPE_PAGE_SHARED,
- NULL);
- if (err)
- return err;
+ xdp_rxq_info_attach_page_pool(&ring->xdp_rxq,
+ ring->pp);
}
}
- xdp_init_buff(&ring->xdp, ice_get_frame_sz(ring), &ring->xdp_rxq);
ring->xdp.data = NULL;
- ring->xdp_ext.pkt_ctx = &ring->pkt_ctx;
err = ice_setup_rx_ctx(ring);
if (err) {
dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
ring->q_index, err);
- return err;
+ goto err_destroy_fq;
}
if (ring->xsk_pool) {
@@ -625,9 +745,20 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
return 0;
}
- ice_alloc_rx_bufs(ring, num_bufs);
+ if (ring->vsi->type == ICE_VSI_CTRL)
+ ice_init_ctrl_rx_descs(ring, num_bufs);
+ else
+ err = ice_alloc_rx_bufs(ring, num_bufs);
+
+ if (err)
+ goto err_destroy_fq;
return 0;
+
+err_destroy_fq:
+ ice_rxq_pp_destroy(ring);
+
+ return err;
}
int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
@@ -648,18 +779,10 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
*/
static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi, struct ice_rx_ring *ring)
{
- if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
- ring->max_frame = ICE_MAX_FRAME_LEGACY_RX;
- ring->rx_buf_len = ICE_RXBUF_1664;
-#if (PAGE_SIZE < 8192)
- } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
- (vsi->netdev->mtu <= ETH_DATA_LEN)) {
- ring->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
- ring->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
-#endif
+ if (!vsi->netdev) {
+ vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
} else {
- ring->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
- ring->rx_buf_len = ICE_RXBUF_3072;
+ vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
}
}
@@ -797,13 +920,11 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
return 0;
err_out:
- while (v_idx--)
- ice_free_q_vector(vsi, v_idx);
- dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
- vsi->num_q_vectors, vsi->vsi_num, err);
- vsi->num_q_vectors = 0;
- return err;
+ dev_info(dev, "Failed to allocate %d q_vectors for VSI %d, new value %d",
+ vsi->num_q_vectors, vsi->vsi_num, v_idx);
+ vsi->num_q_vectors = v_idx;
+ return v_idx ? 0 : err;
}
/**
@@ -883,13 +1004,49 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
}
/**
+ * ice_cfg_tstamp - Configure Tx time stamp queue
+ * @tx_ring: Tx ring to be configured with timestamping
+ *
+ * Return: 0 on success and a negative value on error.
+ */
+static int
+ice_cfg_tstamp(struct ice_tx_ring *tx_ring)
+{
+ DEFINE_RAW_FLEX(struct ice_aqc_set_txtime_qgrp, txtime_qg_buf,
+ txtimeqs, 1);
+ u8 txtime_buf_len = struct_size(txtime_qg_buf, txtimeqs, 1);
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ struct ice_txtime_ctx txtime_ctx = {};
+ struct ice_vsi *vsi = tx_ring->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u16 pf_q = tx_ring->reg_idx;
+ int err;
+
+ err = ice_setup_txtime_ctx(tstamp_ring, &txtime_ctx);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "Failed to setup Tx time queue context for queue %d, error: %d\n",
+ pf_q, err);
+ return err;
+ }
+ ice_pack_txtime_ctx(&txtime_ctx,
+ &txtime_qg_buf->txtimeqs[0].txtime_ctx);
+
+ tstamp_ring->tail = hw->hw_addr + E830_GLQTX_TXTIME_DBELL_LSB(pf_q);
+ return ice_aq_set_txtimeq(hw, pf_q, 1, txtime_qg_buf,
+ txtime_buf_len, NULL);
+}
+
+/**
* ice_vsi_cfg_txq - Configure single Tx queue
* @vsi: the VSI that queue belongs to
* @ring: Tx ring to be configured
* @qg_buf: queue group buffer
+ *
+ * Return: 0 on success and a negative value on error.
*/
static int
-ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
+ice_vsi_cfg_txq(const struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf)
{
u8 buf_len = struct_size(qg_buf, txqs, 1);
@@ -898,19 +1055,23 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_channel *ch = ring->ch;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
+ u32 pf_q, vsi_idx;
int status;
- u16 pf_q;
u8 tc;
/* Configure XPS */
ice_cfg_xps_tx_ring(ring);
pf_q = ring->reg_idx;
- ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
+ status = ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
+ if (status) {
+ dev_err(ice_pf_to_dev(pf), "Failed to setup Tx context for queue %d, error: %d\n",
+ pf_q, status);
+ return status;
+ }
/* copy context contents into the qg_buf */
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
- ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
- ice_tlan_ctx_info);
+ ice_pack_txq_ctx(&tlan_ctx, &qg_buf->txqs[0].txq_ctx);
/* init queue specific tail reg. It is referred as
* transmit comm scheduler queue doorbell.
@@ -927,14 +1088,15 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
*/
ring->q_handle = ice_calc_txq_handle(vsi, ring, tc);
- if (ch)
- status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0,
- ring->q_handle, 1, qg_buf, buf_len,
- NULL);
- else
- status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
- ring->q_handle, 1, qg_buf, buf_len,
- NULL);
+ if (ch) {
+ tc = 0;
+ vsi_idx = ch->ch_vsi->idx;
+ } else {
+ vsi_idx = vsi->idx;
+ }
+
+ status = ice_ena_vsi_txq(vsi->port_info, vsi_idx, tc, ring->q_handle,
+ 1, qg_buf, buf_len, NULL);
if (status) {
dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n",
status);
@@ -949,7 +1111,32 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
if (pf_q == le16_to_cpu(txq->txq_id))
ring->txq_teid = le32_to_cpu(txq->q_teid);
+ if (ice_is_txtime_ena(ring)) {
+ status = ice_alloc_setup_tstamp_ring(ring);
+ if (status) {
+ dev_err(ice_pf_to_dev(pf),
+ "Failed to allocate Tx timestamp ring, error: %d\n",
+ status);
+ goto err_setup_tstamp;
+ }
+
+ status = ice_cfg_tstamp(ring);
+ if (status) {
+ dev_err(ice_pf_to_dev(pf), "Failed to set Tx Time queue context, error: %d\n",
+ status);
+ goto err_cfg_tstamp;
+ }
+ }
return 0;
+
+err_cfg_tstamp:
+ ice_free_tx_tstamp_ring(ring);
+err_setup_tstamp:
+ ice_dis_vsi_txq(vsi->port_info, vsi_idx, tc, 1, &ring->q_handle,
+ &ring->reg_idx, &ring->txq_teid, ICE_NO_RESET,
+ tlan_ctx.vmvf_num, NULL);
+
+ return status;
}
int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
@@ -1208,3 +1395,148 @@ ice_fill_txq_meta(const struct ice_vsi *vsi, struct ice_tx_ring *ring,
txq_meta->tc = tc;
}
}
+
+/**
+ * ice_qp_reset_stats - Resets all stats for rings of given index
+ * @vsi: VSI that contains rings of interest
+ * @q_idx: ring index in array
+ */
+static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
+{
+ struct ice_vsi_stats *vsi_stat;
+ struct ice_pf *pf;
+
+ pf = vsi->back;
+ if (!pf->vsi_stats)
+ return;
+
+ vsi_stat = pf->vsi_stats[vsi->idx];
+ if (!vsi_stat)
+ return;
+
+ memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0,
+ sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
+ memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
+ sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
+ if (vsi->xdp_rings)
+ memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0,
+ sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));
+}
+
+/**
+ * ice_qp_clean_rings - Cleans all the rings of a given index
+ * @vsi: VSI that contains rings of interest
+ * @q_idx: ring index in array
+ */
+static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
+{
+ ice_clean_tx_ring(vsi->tx_rings[q_idx]);
+ if (vsi->xdp_rings)
+ ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
+ ice_clean_rx_ring(vsi->rx_rings[q_idx]);
+}
+
+/**
+ * ice_qp_dis - Disables a queue pair
+ * @vsi: VSI of interest
+ * @q_idx: ring index in array
+ *
+ * Returns 0 on success, negative on failure.
+ */
+int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+{
+ struct ice_txq_meta txq_meta = { };
+ struct ice_q_vector *q_vector;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
+ int fail = 0;
+ int err;
+
+ if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
+ return -EINVAL;
+
+ tx_ring = vsi->tx_rings[q_idx];
+ rx_ring = vsi->rx_rings[q_idx];
+ q_vector = rx_ring->q_vector;
+
+ synchronize_net();
+ netif_carrier_off(vsi->netdev);
+ netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+
+ ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+ ice_qvec_toggle_napi(vsi, q_vector, false);
+
+ ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
+ if (!fail)
+ fail = err;
+ if (vsi->xdp_rings) {
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
+
+ memset(&txq_meta, 0, sizeof(txq_meta));
+ ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
+ &txq_meta);
+ if (!fail)
+ fail = err;
+ }
+
+ ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, false);
+ ice_qp_clean_rings(vsi, q_idx);
+ ice_qp_reset_stats(vsi, q_idx);
+
+ return fail;
+}
+
+/**
+ * ice_qp_ena - Enables a queue pair
+ * @vsi: VSI of interest
+ * @q_idx: ring index in array
+ *
+ * Returns 0 on success, negative on failure.
+ */
+int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
+{
+ struct ice_q_vector *q_vector;
+ int fail = 0;
+ bool link_up;
+ int err;
+
+ err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
+ if (!fail)
+ fail = err;
+
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
+
+ err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
+ if (!fail)
+ fail = err;
+ ice_set_ring_xdp(xdp_ring);
+ ice_tx_xsk_pool(vsi, q_idx);
+ }
+
+ err = ice_vsi_cfg_single_rxq(vsi, q_idx);
+ if (!fail)
+ fail = err;
+
+ q_vector = vsi->rx_rings[q_idx]->q_vector;
+ ice_qvec_cfg_msix(vsi, q_vector, q_idx);
+
+ err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
+ if (!fail)
+ fail = err;
+
+ ice_qvec_toggle_napi(vsi, q_vector, true);
+ ice_qvec_ena_irq(vsi, q_vector);
+
+ /* make sure NAPI sees updated ice_{t,x}_ring::xsk_pool */
+ synchronize_net();
+ ice_get_link_status(vsi->port_info, &link_up);
+ if (link_up) {
+ netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+ netif_carrier_on(vsi->netdev);
+ }
+
+ return fail;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h
index b711bc921928..d28294247599 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.h
+++ b/drivers/net/ethernet/intel/ice/ice_base.h
@@ -32,4 +32,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
void
ice_fill_txq_meta(const struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta);
+int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx);
+int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx);
+u16 ice_calc_ts_ring_count(struct ice_tx_ring *tx_ring);
#endif /* _ICE_BASE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_cgu_regs.h b/drivers/net/ethernet/intel/ice/ice_cgu_regs.h
deleted file mode 100644
index 10d9d74f3545..000000000000
--- a/drivers/net/ethernet/intel/ice/ice_cgu_regs.h
+++ /dev/null
@@ -1,181 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2018-2021, Intel Corporation. */
-
-#ifndef _ICE_CGU_REGS_H_
-#define _ICE_CGU_REGS_H_
-
-#define NAC_CGU_DWORD9 0x24
-union nac_cgu_dword9 {
- struct {
- u32 time_ref_freq_sel : 3;
- u32 clk_eref1_en : 1;
- u32 clk_eref0_en : 1;
- u32 time_ref_en : 1;
- u32 time_sync_en : 1;
- u32 one_pps_out_en : 1;
- u32 clk_ref_synce_en : 1;
- u32 clk_synce1_en : 1;
- u32 clk_synce0_en : 1;
- u32 net_clk_ref1_en : 1;
- u32 net_clk_ref0_en : 1;
- u32 clk_synce1_amp : 2;
- u32 misc6 : 1;
- u32 clk_synce0_amp : 2;
- u32 one_pps_out_amp : 2;
- u32 misc24 : 12;
- };
- u32 val;
-};
-
-#define NAC_CGU_DWORD16_E825C 0x40
-union nac_cgu_dword16_e825c {
- struct {
- u32 synce_remndr : 6;
- u32 synce_phlmt_en : 1;
- u32 misc13 : 17;
- u32 tspll_ck_refclkfreq : 8;
- };
- u32 val;
-};
-
-#define NAC_CGU_DWORD19 0x4c
-union nac_cgu_dword19 {
- struct {
- u32 tspll_fbdiv_intgr : 8;
- u32 fdpll_ulck_thr : 5;
- u32 misc15 : 3;
- u32 tspll_ndivratio : 4;
- u32 tspll_iref_ndivratio : 3;
- u32 misc19 : 1;
- u32 japll_ndivratio : 4;
- u32 japll_iref_ndivratio : 3;
- u32 misc27 : 1;
- };
- u32 val;
-};
-
-#define NAC_CGU_DWORD22 0x58
-union nac_cgu_dword22 {
- struct {
- u32 fdpll_frac_div_out_nc : 2;
- u32 fdpll_lock_int_for : 1;
- u32 synce_hdov_int_for : 1;
- u32 synce_lock_int_for : 1;
- u32 fdpll_phlead_slip_nc : 1;
- u32 fdpll_acc1_ovfl_nc : 1;
- u32 fdpll_acc2_ovfl_nc : 1;
- u32 synce_status_nc : 6;
- u32 fdpll_acc1f_ovfl : 1;
- u32 misc18 : 1;
- u32 fdpllclk_div : 4;
- u32 time1588clk_div : 4;
- u32 synceclk_div : 4;
- u32 synceclk_sel_div2 : 1;
- u32 fdpllclk_sel_div2 : 1;
- u32 time1588clk_sel_div2 : 1;
- u32 misc3 : 1;
- };
- u32 val;
-};
-
-#define NAC_CGU_DWORD23_E825C 0x5C
-union nac_cgu_dword23_e825c {
- struct {
- u32 cgupll_fbdiv_intgr : 10;
- u32 ux56pll_fbdiv_intgr : 10;
- u32 misc20 : 4;
- u32 ts_pll_enable : 1;
- u32 time_sync_tspll_align_sel : 1;
- u32 ext_synce_sel : 1;
- u32 ref1588_ck_div : 4;
- u32 time_ref_sel : 1;
-
- };
- u32 val;
-};
-
-#define NAC_CGU_DWORD24 0x60
-union nac_cgu_dword24 {
- struct {
- u32 tspll_fbdiv_frac : 22;
- u32 misc20 : 2;
- u32 ts_pll_enable : 1;
- u32 time_sync_tspll_align_sel : 1;
- u32 ext_synce_sel : 1;
- u32 ref1588_ck_div : 4;
- u32 time_ref_sel : 1;
- };
- u32 val;
-};
-
-#define TSPLL_CNTR_BIST_SETTINGS 0x344
-union tspll_cntr_bist_settings {
- struct {
- u32 i_irefgen_settling_time_cntr_7_0 : 8;
- u32 i_irefgen_settling_time_ro_standby_1_0 : 2;
- u32 reserved195 : 5;
- u32 i_plllock_sel_0 : 1;
- u32 i_plllock_sel_1 : 1;
- u32 i_plllock_cnt_6_0 : 7;
- u32 i_plllock_cnt_10_7 : 4;
- u32 reserved200 : 4;
- };
- u32 val;
-};
-
-#define TSPLL_RO_BWM_LF 0x370
-union tspll_ro_bwm_lf {
- struct {
- u32 bw_freqov_high_cri_7_0 : 8;
- u32 bw_freqov_high_cri_9_8 : 2;
- u32 biascaldone_cri : 1;
- u32 plllock_gain_tran_cri : 1;
- u32 plllock_true_lock_cri : 1;
- u32 pllunlock_flag_cri : 1;
- u32 afcerr_cri : 1;
- u32 afcdone_cri : 1;
- u32 feedfwrdgain_cal_cri_7_0 : 8;
- u32 m2fbdivmod_cri_7_0 : 8;
- };
- u32 val;
-};
-
-#define TSPLL_RO_LOCK_E825C 0x3f0
-union tspll_ro_lock_e825c {
- struct {
- u32 bw_freqov_high_cri_7_0 : 8;
- u32 bw_freqov_high_cri_9_8 : 2;
- u32 reserved455 : 1;
- u32 plllock_gain_tran_cri : 1;
- u32 plllock_true_lock_cri : 1;
- u32 pllunlock_flag_cri : 1;
- u32 afcerr_cri : 1;
- u32 afcdone_cri : 1;
- u32 feedfwrdgain_cal_cri_7_0 : 8;
- u32 reserved462 : 8;
- };
- u32 val;
-};
-
-#define TSPLL_BW_TDC_E825C 0x31c
-union tspll_bw_tdc_e825c {
- struct {
- u32 i_tdc_offset_lock_1_0 : 2;
- u32 i_bbthresh1_2_0 : 3;
- u32 i_bbthresh2_2_0 : 3;
- u32 i_tdcsel_1_0 : 2;
- u32 i_tdcovccorr_en_h : 1;
- u32 i_divretimeren : 1;
- u32 i_bw_ampmeas_window : 1;
- u32 i_bw_lowerbound_2_0 : 3;
- u32 i_bw_upperbound_2_0 : 3;
- u32 i_bw_mode_1_0 : 2;
- u32 i_ft_mode_sel_2_0 : 3;
- u32 i_bwphase_4_0 : 5;
- u32 i_plllock_sel_1_0 : 2;
- u32 i_afc_divratio : 1;
- };
- u32 val;
-};
-
-#endif /* _ICE_CGU_REGS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index b22e71dc59d4..046bc9c65c51 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -6,6 +6,7 @@
#include "ice_adminq_cmd.h"
#include "ice_flow.h"
#include "ice_ptp_hw.h"
+#include <linux/packing.h>
#define ICE_PF_RESET_WAIT_COUNT 300
#define ICE_MAX_NETLIST_SIZE 10
@@ -170,6 +171,15 @@ static int ice_set_mac_type(struct ice_hw *hw)
case ICE_DEV_ID_E830_XXV_QSFP:
case ICE_DEV_ID_E830C_SFP:
case ICE_DEV_ID_E830_XXV_SFP:
+ case ICE_DEV_ID_E835CC_BACKPLANE:
+ case ICE_DEV_ID_E835CC_QSFP56:
+ case ICE_DEV_ID_E835CC_SFP:
+ case ICE_DEV_ID_E835C_BACKPLANE:
+ case ICE_DEV_ID_E835C_QSFP:
+ case ICE_DEV_ID_E835C_SFP:
+ case ICE_DEV_ID_E835_L_BACKPLANE:
+ case ICE_DEV_ID_E835_L_QSFP:
+ case ICE_DEV_ID_E835_L_SFP:
hw->mac_type = ICE_MAC_E830;
break;
default:
@@ -185,7 +195,7 @@ static int ice_set_mac_type(struct ice_hw *hw)
* ice_is_generic_mac - check if device's mac_type is generic
* @hw: pointer to the hardware structure
*
- * Return: true if mac_type is generic (with SBQ support), false if not
+ * Return: true if mac_type is ICE_MAC_GENERIC*, false otherwise.
*/
bool ice_is_generic_mac(struct ice_hw *hw)
{
@@ -194,117 +204,39 @@ bool ice_is_generic_mac(struct ice_hw *hw)
}
/**
- * ice_is_e810
- * @hw: pointer to the hardware structure
- *
- * returns true if the device is E810 based, false if not.
- */
-bool ice_is_e810(struct ice_hw *hw)
-{
- return hw->mac_type == ICE_MAC_E810;
-}
-
-/**
- * ice_is_e810t
- * @hw: pointer to the hardware structure
+ * ice_is_pf_c827 - check if pf contains c827 phy
+ * @hw: pointer to the hw struct
*
- * returns true if the device is E810T based, false if not.
+ * Return: true if the device has c827 phy.
*/
-bool ice_is_e810t(struct ice_hw *hw)
+static bool ice_is_pf_c827(struct ice_hw *hw)
{
- switch (hw->device_id) {
- case ICE_DEV_ID_E810C_SFP:
- switch (hw->subsystem_device_id) {
- case ICE_SUBDEV_ID_E810T:
- case ICE_SUBDEV_ID_E810T2:
- case ICE_SUBDEV_ID_E810T3:
- case ICE_SUBDEV_ID_E810T4:
- case ICE_SUBDEV_ID_E810T6:
- case ICE_SUBDEV_ID_E810T7:
- return true;
- }
- break;
- case ICE_DEV_ID_E810C_QSFP:
- switch (hw->subsystem_device_id) {
- case ICE_SUBDEV_ID_E810T2:
- case ICE_SUBDEV_ID_E810T3:
- case ICE_SUBDEV_ID_E810T5:
- return true;
- }
- break;
- default:
- break;
- }
-
- return false;
-}
+ struct ice_aqc_get_link_topo cmd = {};
+ u8 node_part_number;
+ u16 node_handle;
+ int status;
-/**
- * ice_is_e822 - Check if a device is E822 family device
- * @hw: pointer to the hardware structure
- *
- * Return: true if the device is E822 based, false if not.
- */
-bool ice_is_e822(struct ice_hw *hw)
-{
- switch (hw->device_id) {
- case ICE_DEV_ID_E822C_BACKPLANE:
- case ICE_DEV_ID_E822C_QSFP:
- case ICE_DEV_ID_E822C_SFP:
- case ICE_DEV_ID_E822C_10G_BASE_T:
- case ICE_DEV_ID_E822C_SGMII:
- case ICE_DEV_ID_E822L_BACKPLANE:
- case ICE_DEV_ID_E822L_SFP:
- case ICE_DEV_ID_E822L_10G_BASE_T:
- case ICE_DEV_ID_E822L_SGMII:
- return true;
- default:
+ if (hw->mac_type != ICE_MAC_E810)
return false;
- }
-}
-/**
- * ice_is_e823
- * @hw: pointer to the hardware structure
- *
- * returns true if the device is E823-L or E823-C based, false if not.
- */
-bool ice_is_e823(struct ice_hw *hw)
-{
- switch (hw->device_id) {
- case ICE_DEV_ID_E823L_BACKPLANE:
- case ICE_DEV_ID_E823L_SFP:
- case ICE_DEV_ID_E823L_10G_BASE_T:
- case ICE_DEV_ID_E823L_1GBE:
- case ICE_DEV_ID_E823L_QSFP:
- case ICE_DEV_ID_E823C_BACKPLANE:
- case ICE_DEV_ID_E823C_QSFP:
- case ICE_DEV_ID_E823C_SFP:
- case ICE_DEV_ID_E823C_10G_BASE_T:
- case ICE_DEV_ID_E823C_SGMII:
+ if (hw->device_id != ICE_DEV_ID_E810C_QSFP)
return true;
- default:
+
+ cmd.addr.topo_params.node_type_ctx =
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) |
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT);
+ cmd.addr.topo_params.index = 0;
+
+ status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
+ &node_handle);
+
+ if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
return false;
- }
-}
-/**
- * ice_is_e825c - Check if a device is E825C family device
- * @hw: pointer to the hardware structure
- *
- * Return: true if the device is E825-C based, false if not.
- */
-bool ice_is_e825c(struct ice_hw *hw)
-{
- switch (hw->device_id) {
- case ICE_DEV_ID_E825C_BACKPLANE:
- case ICE_DEV_ID_E825C_QSFP:
- case ICE_DEV_ID_E825C_SFP:
- case ICE_DEV_ID_E825C_SGMII:
+ if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE)
return true;
- default:
- return false;
- }
+
+ return false;
}
/**
@@ -316,7 +248,7 @@ bool ice_is_e825c(struct ice_hw *hw)
*/
int ice_clear_pf_cfg(struct ice_hw *hw)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
@@ -344,12 +276,12 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
{
struct ice_aqc_manage_mac_read_resp *resp;
struct ice_aqc_manage_mac_read *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
u16 flags;
u8 i;
- cmd = &desc.params.mac_read;
+ cmd = libie_aq_raw(&desc);
if (buf_size < sizeof(*resp))
return -EINVAL;
@@ -398,12 +330,12 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
{
struct ice_aqc_get_phy_caps *cmd;
u16 pcaps_size = sizeof(*pcaps);
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
const char *prefix;
struct ice_hw *hw;
int status;
- cmd = &desc.params.get_phy;
+ cmd = libie_aq_raw(&desc);
if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
return -EINVAL;
@@ -492,9 +424,9 @@ ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
struct ice_sq_cd *cd)
{
struct ice_aqc_get_link_topo *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.get_link_topo;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
@@ -522,19 +454,20 @@ int
ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
u8 *node_part_number, u16 *node_handle)
{
- struct ice_aq_desc desc;
+ struct ice_aqc_get_link_topo *resp;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
- desc.params.get_link_topo = *cmd;
+ resp = libie_aq_raw(&desc);
+ *resp = *cmd;
if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
return -EINTR;
if (node_handle)
- *node_handle =
- le16_to_cpu(desc.params.get_link_topo.addr.handle);
+ *node_handle = le16_to_cpu(resp->addr.handle);
if (node_part_number)
- *node_part_number = desc.params.get_link_topo.node_part_num;
+ *node_part_number = resp->node_part_num;
return 0;
}
@@ -542,7 +475,8 @@ ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
/**
* ice_find_netlist_node
* @hw: pointer to the hw struct
- * @node_type_ctx: type of netlist node to look for
+ * @node_type: type of netlist node to look for
+ * @ctx: context of the search
* @node_part_number: node part number to look for
* @node_handle: output parameter if node found - optional
*
@@ -552,10 +486,12 @@ ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
* valid if the function returns zero, and should be ignored on any non-zero
* return value.
*
- * Returns: 0 if the node is found, -ENOENT if no handle was found, and
- * a negative error code on failure to access the AQ.
+ * Return:
+ * * 0 if the node is found,
+ * * -ENOENT if no handle was found,
+ * * negative error code on failure to access the AQ.
*/
-static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx,
+static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type, u8 ctx,
u8 node_part_number, u16 *node_handle)
{
u8 idx;
@@ -566,8 +502,8 @@ static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx,
int status;
cmd.addr.topo_params.node_type_ctx =
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M,
- node_type_ctx);
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, node_type) |
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ctx);
cmd.addr.topo_params.index = idx;
status = ice_aq_get_netlist_node(hw, &cmd,
@@ -754,8 +690,8 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *li_old, *li;
enum ice_media_type *hw_media_type;
struct ice_fc_info *hw_fc_info;
+ struct libie_aq_desc desc;
bool tx_pause, rx_pause;
- struct ice_aq_desc desc;
struct ice_hw *hw;
u16 cmd_flags;
int status;
@@ -770,7 +706,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
- resp = &desc.params.get_link_status;
+ resp = libie_aq_raw(&desc);
resp->cmd_flags = cpu_to_le16(cmd_flags);
resp->lport_num = pi->lport;
@@ -899,9 +835,9 @@ int
ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
{
struct ice_aqc_set_mac_cfg *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.set_mac_cfg;
+ cmd = libie_aq_raw(&desc);
if (max_frame_size == 0)
return -EINVAL;
@@ -1022,6 +958,64 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
}
/**
+ * ice_wait_for_fw - wait for full FW readiness
+ * @hw: pointer to the hardware structure
+ * @timeout: milliseconds that can elapse before timing out
+ *
+ * Return: 0 on success, -ETIMEDOUT on timeout.
+ */
+static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
+{
+ int fw_loading;
+ u32 elapsed = 0;
+
+ while (elapsed <= timeout) {
+ fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
+
+ /* firmware was not yet loaded, we have to wait more */
+ if (fw_loading) {
+ elapsed += 100;
+ msleep(100);
+ continue;
+ }
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int __fwlog_send_cmd(void *priv, struct libie_aq_desc *desc, void *buf,
+ u16 size)
+{
+ struct ice_hw *hw = priv;
+
+ return ice_aq_send_cmd(hw, desc, buf, size, NULL);
+}
+
+static int __fwlog_init(struct ice_hw *hw)
+{
+ struct ice_pf *pf = hw->back;
+ struct libie_fwlog_api api = {
+ .pdev = pf->pdev,
+ .send_cmd = __fwlog_send_cmd,
+ .priv = hw,
+ };
+ int err;
+
+ /* only support fw log commands on PF 0 */
+ if (hw->bus.func)
+ return -EINVAL;
+
+ err = ice_debugfs_pf_init(pf);
+ if (err)
+ return err;
+
+ api.debugfs_root = pf->ice_debugfs_pf;
+
+ return libie_fwlog_init(&hw->fwlog, &api);
+}
+
+/**
* ice_init_hw - main hardware initialization routine
* @hw: pointer to the hardware structure
*/
@@ -1049,7 +1043,7 @@ int ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_cqinit;
- status = ice_fwlog_init(hw);
+ status = __fwlog_init(hw);
if (status)
ice_debug(hw, ICE_DBG_FW_LOG, "Error initializing FW logging: %d\n",
status);
@@ -1167,11 +1161,27 @@ int ice_init_hw(struct ice_hw *hw)
status = ice_init_hw_tbls(hw);
if (status)
goto err_unroll_fltr_mgmt_struct;
+
+ ice_init_dev_hw(hw->back);
+
mutex_init(&hw->tnl_lock);
ice_init_chk_recipe_reuse_support(hw);
- return 0;
+ /* Some cards require longer initialization times
+ * due to necessity of loading FW from an external source.
+ * This can take even half a minute.
+ */
+ if (ice_is_pf_c827(hw)) {
+ status = ice_wait_for_fw(hw, 30000);
+ if (status) {
+ dev_err(ice_hw_to_dev(hw), "ice_wait_for_fw timed out");
+ goto err_unroll_fltr_mgmt_struct;
+ }
+ }
+ hw->lane_num = ice_get_phy_lane_number(hw);
+
+ return 0;
err_unroll_fltr_mgmt_struct:
ice_cleanup_fltr_mgmt_struct(hw);
err_unroll_sched:
@@ -1183,6 +1193,16 @@ err_unroll_cqinit:
return status;
}
+static void __fwlog_deinit(struct ice_hw *hw)
+{
+ /* only support fw log commands on PF 0 */
+ if (hw->bus.func)
+ return;
+
+ ice_debugfs_pf_deinit(hw->back);
+ libie_fwlog_deinit(&hw->fwlog);
+}
+
/**
* ice_deinit_hw - unroll initialization operations done by ice_init_hw
* @hw: pointer to the hardware structure
@@ -1201,8 +1221,7 @@ void ice_deinit_hw(struct ice_hw *hw)
ice_free_seg(hw);
ice_free_hw_tbls(hw);
mutex_destroy(&hw->tnl_lock);
-
- ice_fwlog_deinit(hw);
+ __fwlog_deinit(hw);
ice_destroy_all_ctrlq(hw);
/* Clear VSI contexts if not already cleared */
@@ -1357,39 +1376,51 @@ int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
}
/**
- * ice_copy_rxq_ctx_to_hw
+ * ice_copy_rxq_ctx_to_hw - Copy packed Rx queue context to HW registers
* @hw: pointer to the hardware structure
- * @ice_rxq_ctx: pointer to the rxq context
+ * @rxq_ctx: pointer to the packed Rx queue context
* @rxq_index: the index of the Rx queue
- *
- * Copies rxq context from dense structure to HW register space
*/
-static int
-ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
+static void ice_copy_rxq_ctx_to_hw(struct ice_hw *hw,
+ const ice_rxq_ctx_buf_t *rxq_ctx,
+ u32 rxq_index)
{
- u8 i;
+ /* Copy each dword separately to HW */
+ for (int i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
+ u32 ctx = ((const u32 *)rxq_ctx)[i];
- if (!ice_rxq_ctx)
- return -EINVAL;
+ wr32(hw, QRX_CONTEXT(i, rxq_index), ctx);
- if (rxq_index > QRX_CTRL_MAX_INDEX)
- return -EINVAL;
+ ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, ctx);
+ }
+}
- /* Copy each dword separately to HW */
- for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
- wr32(hw, QRX_CONTEXT(i, rxq_index),
- *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
+/**
+ * ice_copy_rxq_ctx_from_hw - Copy packed Rx Queue context from HW registers
+ * @hw: pointer to the hardware structure
+ * @rxq_ctx: pointer to the packed Rx queue context
+ * @rxq_index: the index of the Rx queue
+ */
+static void ice_copy_rxq_ctx_from_hw(struct ice_hw *hw,
+ ice_rxq_ctx_buf_t *rxq_ctx,
+ u32 rxq_index)
+{
+ u32 *ctx = (u32 *)rxq_ctx;
- ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
- *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
- }
+ /* Copy each dword separately from HW */
+ for (int i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++, ctx++) {
+ *ctx = rd32(hw, QRX_CONTEXT(i, rxq_index));
- return 0;
+ ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, *ctx);
+ }
}
+#define ICE_CTX_STORE(struct_name, struct_field, width, lsb) \
+ PACKED_FIELD((lsb) + (width) - 1, (lsb), struct struct_name, struct_field)
+
/* LAN Rx Queue Context */
-static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
- /* Field Width LSB */
+static const struct packed_field_u8 ice_rlan_ctx_fields[] = {
+ /* Field Width LSB */
ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
@@ -1410,35 +1441,90 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
- { 0 }
};
/**
- * ice_write_rxq_ctx
+ * ice_pack_rxq_ctx - Pack Rx queue context into a HW buffer
+ * @ctx: the Rx queue context to pack
+ * @buf: the HW buffer to pack into
+ *
+ * Pack the Rx queue context from the CPU-friendly unpacked buffer into its
+ * bit-packed HW layout.
+ */
+static void ice_pack_rxq_ctx(const struct ice_rlan_ctx *ctx,
+ ice_rxq_ctx_buf_t *buf)
+{
+ pack_fields(buf, sizeof(*buf), ctx, ice_rlan_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
+/**
+ * ice_unpack_rxq_ctx - Unpack Rx queue context from a HW buffer
+ * @buf: the HW buffer to unpack from
+ * @ctx: the Rx queue context to unpack
+ *
+ * Unpack the Rx queue context from the HW buffer into the CPU-friendly
+ * structure.
+ */
+static void ice_unpack_rxq_ctx(const ice_rxq_ctx_buf_t *buf,
+ struct ice_rlan_ctx *ctx)
+{
+ unpack_fields(buf, sizeof(*buf), ctx, ice_rlan_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
+/**
+ * ice_write_rxq_ctx - Write Rx Queue context to hardware
* @hw: pointer to the hardware structure
- * @rlan_ctx: pointer to the rxq context
+ * @rlan_ctx: pointer to the unpacked Rx queue context
* @rxq_index: the index of the Rx queue
*
- * Converts rxq context from sparse to dense structure and then writes
- * it to HW register space and enables the hardware to prefetch descriptors
- * instead of only fetching them on demand
+ * Pack the sparse Rx Queue context into dense hardware format and write it
+ * into the HW register space.
+ *
+ * Return: 0 on success, or -EINVAL if the Rx queue index is invalid.
*/
int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index)
{
- u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
+ ice_rxq_ctx_buf_t buf = {};
+
+ if (rxq_index > QRX_CTRL_MAX_INDEX)
+ return -EINVAL;
+
+ ice_pack_rxq_ctx(rlan_ctx, &buf);
+ ice_copy_rxq_ctx_to_hw(hw, &buf, rxq_index);
+
+ return 0;
+}
+
+/**
+ * ice_read_rxq_ctx - Read Rx queue context from HW
+ * @hw: pointer to the hardware structure
+ * @rlan_ctx: pointer to the Rx queue context
+ * @rxq_index: the index of the Rx queue
+ *
+ * Read the Rx queue context from the hardware registers, and unpack it into
+ * the sparse Rx queue context structure.
+ *
+ * Returns: 0 on success, or -EINVAL if the Rx queue index is invalid.
+ */
+int ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
+ u32 rxq_index)
+{
+ ice_rxq_ctx_buf_t buf = {};
- if (!rlan_ctx)
+ if (rxq_index > QRX_CTRL_MAX_INDEX)
return -EINVAL;
- rlan_ctx->prefena = 1;
+ ice_copy_rxq_ctx_from_hw(hw, &buf, rxq_index);
+ ice_unpack_rxq_ctx(&buf, rlan_ctx);
- ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
- return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
+ return 0;
}
/* LAN Tx Queue Context */
-const struct ice_ctx_ele ice_tlan_ctx_info[] = {
+static const struct packed_field_u8 ice_tlan_ctx_fields[] = {
/* Field Width LSB */
ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
@@ -1467,10 +1553,227 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
- ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
- { 0 }
};
+/**
+ * ice_pack_txq_ctx - Pack Tx queue context into Admin Queue buffer
+ * @ctx: the Tx queue context to pack
+ * @buf: the Admin Queue HW buffer to pack into
+ *
+ * Pack the Tx queue context from the CPU-friendly unpacked buffer into its
+ * bit-packed Admin Queue layout.
+ */
+void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf)
+{
+ pack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
+/**
+ * ice_pack_txq_ctx_full - Pack Tx queue context into a HW buffer
+ * @ctx: the Tx queue context to pack
+ * @buf: the HW buffer to pack into
+ *
+ * Pack the Tx queue context from the CPU-friendly unpacked buffer into its
+ * bit-packed HW layout, including the internal data portion.
+ */
+static void ice_pack_txq_ctx_full(const struct ice_tlan_ctx *ctx,
+ ice_txq_ctx_buf_full_t *buf)
+{
+ pack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
+/**
+ * ice_unpack_txq_ctx_full - Unpack Tx queue context from a HW buffer
+ * @buf: the HW buffer to unpack from
+ * @ctx: the Tx queue context to unpack
+ *
+ * Unpack the Tx queue context from the HW buffer (including the full internal
+ * state) into the CPU-friendly structure.
+ */
+static void ice_unpack_txq_ctx_full(const ice_txq_ctx_buf_full_t *buf,
+ struct ice_tlan_ctx *ctx)
+{
+ unpack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
+/**
+ * ice_copy_txq_ctx_from_hw - Copy Tx Queue context from HW registers
+ * @hw: pointer to the hardware structure
+ * @txq_ctx: pointer to the packed Tx queue context, including internal state
+ * @txq_index: the index of the Tx queue
+ *
+ * Copy Tx Queue context from HW register space to dense structure
+ */
+static void ice_copy_txq_ctx_from_hw(struct ice_hw *hw,
+ ice_txq_ctx_buf_full_t *txq_ctx,
+ u32 txq_index)
+{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+ u32 *ctx = (u32 *)txq_ctx;
+ u32 txq_base, reg;
+
+ /* Get Tx queue base within card space */
+ txq_base = rd32(hw, PFLAN_TX_QALLOC(hw->pf_id));
+ txq_base = FIELD_GET(PFLAN_TX_QALLOC_FIRSTQ_M, txq_base);
+
+ reg = FIELD_PREP(GLCOMM_QTX_CNTX_CTL_CMD_M,
+ GLCOMM_QTX_CNTX_CTL_CMD_READ) |
+ FIELD_PREP(GLCOMM_QTX_CNTX_CTL_QUEUE_ID_M,
+ txq_base + txq_index) |
+ GLCOMM_QTX_CNTX_CTL_CMD_EXEC_M;
+
+ /* Prevent other PFs on the same adapter from accessing the Tx queue
+ * context interface concurrently.
+ */
+ spin_lock(&pf->adapter->txq_ctx_lock);
+
+ wr32(hw, GLCOMM_QTX_CNTX_CTL, reg);
+ ice_flush(hw);
+
+ /* Copy each dword separately from HW */
+ for (int i = 0; i < ICE_TXQ_CTX_FULL_SIZE_DWORDS; i++, ctx++) {
+ *ctx = rd32(hw, GLCOMM_QTX_CNTX_DATA(i));
+
+ ice_debug(hw, ICE_DBG_QCTX, "qtxdata[%d]: %08X\n", i, *ctx);
+ }
+
+ spin_unlock(&pf->adapter->txq_ctx_lock);
+}
+
+/**
+ * ice_copy_txq_ctx_to_hw - Copy Tx Queue context into HW registers
+ * @hw: pointer to the hardware structure
+ * @txq_ctx: pointer to the packed Tx queue context, including internal state
+ * @txq_index: the index of the Tx queue
+ */
+static void ice_copy_txq_ctx_to_hw(struct ice_hw *hw,
+ const ice_txq_ctx_buf_full_t *txq_ctx,
+ u32 txq_index)
+{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+ u32 txq_base, reg;
+
+ /* Get Tx queue base within card space */
+ txq_base = rd32(hw, PFLAN_TX_QALLOC(hw->pf_id));
+ txq_base = FIELD_GET(PFLAN_TX_QALLOC_FIRSTQ_M, txq_base);
+
+ reg = FIELD_PREP(GLCOMM_QTX_CNTX_CTL_CMD_M,
+ GLCOMM_QTX_CNTX_CTL_CMD_WRITE_NO_DYN) |
+ FIELD_PREP(GLCOMM_QTX_CNTX_CTL_QUEUE_ID_M,
+ txq_base + txq_index) |
+ GLCOMM_QTX_CNTX_CTL_CMD_EXEC_M;
+
+ /* Prevent other PFs on the same adapter from accessing the Tx queue
+ * context interface concurrently.
+ */
+ spin_lock(&pf->adapter->txq_ctx_lock);
+
+ /* Copy each dword separately to HW */
+ for (int i = 0; i < ICE_TXQ_CTX_FULL_SIZE_DWORDS; i++) {
+ u32 ctx = ((const u32 *)txq_ctx)[i];
+
+ wr32(hw, GLCOMM_QTX_CNTX_DATA(i), ctx);
+
+ ice_debug(hw, ICE_DBG_QCTX, "qtxdata[%d]: %08X\n", i, ctx);
+ }
+
+ wr32(hw, GLCOMM_QTX_CNTX_CTL, reg);
+ ice_flush(hw);
+
+ spin_unlock(&pf->adapter->txq_ctx_lock);
+}
+
+/**
+ * ice_read_txq_ctx - Read Tx queue context from HW
+ * @hw: pointer to the hardware structure
+ * @tlan_ctx: pointer to the Tx queue context
+ * @txq_index: the index of the Tx queue
+ *
+ * Read the Tx queue context from the HW registers, then unpack it into the
+ * ice_tlan_ctx structure for use.
+ *
+ * Returns: 0 on success, or -EINVAL on an invalid Tx queue index.
+ */
+int ice_read_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx,
+ u32 txq_index)
+{
+ ice_txq_ctx_buf_full_t buf = {};
+
+ if (txq_index > QTX_COMM_HEAD_MAX_INDEX)
+ return -EINVAL;
+
+ ice_copy_txq_ctx_from_hw(hw, &buf, txq_index);
+ ice_unpack_txq_ctx_full(&buf, tlan_ctx);
+
+ return 0;
+}
+
+/**
+ * ice_write_txq_ctx - Write Tx queue context to HW
+ * @hw: pointer to the hardware structure
+ * @tlan_ctx: pointer to the Tx queue context
+ * @txq_index: the index of the Tx queue
+ *
+ * Pack the Tx queue context into the dense HW layout, then write it into the
+ * HW registers.
+ *
+ * Returns: 0 on success, or -EINVAL on an invalid Tx queue index.
+ */
+int ice_write_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx,
+ u32 txq_index)
+{
+ ice_txq_ctx_buf_full_t buf = {};
+
+ if (txq_index > QTX_COMM_HEAD_MAX_INDEX)
+ return -EINVAL;
+
+ ice_pack_txq_ctx_full(tlan_ctx, &buf);
+ ice_copy_txq_ctx_to_hw(hw, &buf, txq_index);
+
+ return 0;
+}
+
+/* Tx time Queue Context */
+static const struct packed_field_u8 ice_txtime_ctx_fields[] = {
+ /* Field Width LSB */
+ ICE_CTX_STORE(ice_txtime_ctx, base, 57, 0),
+ ICE_CTX_STORE(ice_txtime_ctx, pf_num, 3, 57),
+ ICE_CTX_STORE(ice_txtime_ctx, vmvf_num, 10, 60),
+ ICE_CTX_STORE(ice_txtime_ctx, vmvf_type, 2, 70),
+ ICE_CTX_STORE(ice_txtime_ctx, src_vsi, 10, 72),
+ ICE_CTX_STORE(ice_txtime_ctx, cpuid, 8, 82),
+ ICE_CTX_STORE(ice_txtime_ctx, tphrd_desc, 1, 90),
+ ICE_CTX_STORE(ice_txtime_ctx, qlen, 13, 91),
+ ICE_CTX_STORE(ice_txtime_ctx, timer_num, 1, 104),
+ ICE_CTX_STORE(ice_txtime_ctx, txtime_ena_q, 1, 105),
+ ICE_CTX_STORE(ice_txtime_ctx, drbell_mode_32, 1, 106),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_res, 4, 107),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_round_type, 2, 111),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_pacing_slot, 3, 113),
+ ICE_CTX_STORE(ice_txtime_ctx, merging_ena, 1, 116),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_fetch_prof_id, 4, 117),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_fetch_cache_line_aln_thld, 4, 121),
+ ICE_CTX_STORE(ice_txtime_ctx, tx_pipe_delay_mode, 1, 125),
+};
+
+/**
+ * ice_pack_txtime_ctx - pack Tx time queue context into a HW buffer
+ * @ctx: the Tx time queue context to pack
+ * @buf: the HW buffer to pack into
+ *
+ * Pack the Tx time queue context from the CPU-friendly unpacked buffer into
+ * its bit-packed HW layout.
+ */
+void ice_pack_txtime_ctx(const struct ice_txtime_ctx *ctx,
+ ice_txtime_ctx_buf_t *buf)
+{
+ pack_fields(buf, sizeof(*buf), ctx, ice_txtime_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
/* Sideband Queue command wrappers */
/**
@@ -1486,7 +1789,7 @@ ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd)
{
return ice_sq_send_cmd(hw, ice_get_sbq(hw),
- (struct ice_aq_desc *)desc, buf, buf_size, cd);
+ (struct libie_aq_desc *)desc, buf, buf_size, cd);
}
/**
@@ -1571,10 +1874,10 @@ static bool ice_should_retry_sq_send_cmd(u16 opcode)
*/
static int
ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
- struct ice_aq_desc *desc, void *buf, u16 buf_size,
+ struct libie_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc_cpy;
+ struct libie_aq_desc desc_cpy;
bool is_cmd_for_retry;
u8 idx = 0;
u16 opcode;
@@ -1595,7 +1898,7 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
if (!is_cmd_for_retry || !status ||
- hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
+ hw->adminq.sq_last_status != LIBIE_AQ_RC_EBUSY)
break;
memcpy(desc, &desc_cpy, sizeof(desc_cpy));
@@ -1618,10 +1921,10 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
* Helper function to send FW Admin Queue commands to the FW Admin Queue.
*/
int
-ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
+ice_aq_send_cmd(struct ice_hw *hw, struct libie_aq_desc *desc, void *buf,
u16 buf_size, struct ice_sq_cd *cd)
{
- struct ice_aqc_req_res *cmd = &desc->params.res_owner;
+ struct libie_aqc_req_res *cmd = libie_aq_raw(desc);
bool lock_acquired = false;
int status;
@@ -1652,7 +1955,7 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
case ice_aqc_opc_get_recipe_to_profile:
break;
case ice_aqc_opc_release_res:
- if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
+ if (le16_to_cpu(cmd->res_id) == LIBIE_AQC_RES_ID_GLBL_LOCK)
break;
fallthrough;
default:
@@ -1677,8 +1980,8 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
*/
int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
{
- struct ice_aqc_get_ver *resp;
- struct ice_aq_desc desc;
+ struct libie_aqc_get_ver *resp;
+ struct libie_aq_desc desc;
int status;
resp = &desc.params.get_ver;
@@ -1714,8 +2017,8 @@ int
ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
struct ice_sq_cd *cd)
{
- struct ice_aqc_driver_ver *cmd;
- struct ice_aq_desc desc;
+ struct libie_aqc_driver_ver *cmd;
+ struct libie_aq_desc desc;
u16 len;
cmd = &desc.params.driver_ver;
@@ -1725,7 +2028,7 @@ ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->major_ver = dv->major_ver;
cmd->minor_ver = dv->minor_ver;
cmd->build_ver = dv->build_ver;
@@ -1750,9 +2053,9 @@ ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
{
struct ice_aqc_q_shutdown *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.q_shutdown;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
@@ -1793,8 +2096,8 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
struct ice_sq_cd *cd)
{
- struct ice_aqc_req_res *cmd_resp;
- struct ice_aq_desc desc;
+ struct libie_aqc_req_res *cmd_resp;
+ struct libie_aq_desc desc;
int status;
cmd_resp = &desc.params.res_owner;
@@ -1816,20 +2119,20 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
/* Global config lock response utilizes an additional status field.
*
* If the Global config lock resource is held by some other driver, the
- * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
+ * command completes with LIBIE_AQ_RES_GLBL_IN_PROG in the status field
* and the timeout field indicates the maximum time the current owner
* of the resource has to free it.
*/
if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
- if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
+ if (le16_to_cpu(cmd_resp->status) == LIBIE_AQ_RES_GLBL_SUCCESS) {
*timeout = le32_to_cpu(cmd_resp->timeout);
return 0;
} else if (le16_to_cpu(cmd_resp->status) ==
- ICE_AQ_RES_GLBL_IN_PROG) {
+ LIBIE_AQ_RES_GLBL_IN_PROG) {
*timeout = le32_to_cpu(cmd_resp->timeout);
return -EIO;
} else if (le16_to_cpu(cmd_resp->status) ==
- ICE_AQ_RES_GLBL_DONE) {
+ LIBIE_AQ_RES_GLBL_DONE) {
return -EALREADY;
}
@@ -1842,7 +2145,7 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
* with a busy return value and the timeout field indicates the maximum
* time the current owner of the resource has to free it.
*/
- if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
+ if (!status || hw->adminq.sq_last_status == LIBIE_AQ_RC_EBUSY)
*timeout = le32_to_cpu(cmd_resp->timeout);
return status;
@@ -1861,8 +2164,8 @@ static int
ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
struct ice_sq_cd *cd)
{
- struct ice_aqc_req_res *cmd;
- struct ice_aq_desc desc;
+ struct libie_aqc_req_res *cmd;
+ struct libie_aq_desc desc;
cmd = &desc.params.res_owner;
@@ -1971,16 +2274,16 @@ int ice_aq_alloc_free_res(struct ice_hw *hw,
enum ice_adminq_opc opc)
{
struct ice_aqc_alloc_free_res_cmd *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.sw_res_ctrl;
+ cmd = libie_aq_raw(&desc);
if (!buf || buf_size < flex_array_size(buf, elem, 1))
return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->num_entries = cpu_to_le16(1);
@@ -2094,7 +2397,7 @@ static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
*/
static bool
ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
- struct ice_aqc_list_caps_elem *elem, const char *prefix)
+ struct libie_aqc_list_caps_elem *elem, const char *prefix)
{
u32 logical_id = le32_to_cpu(elem->logical_id);
u32 phys_id = le32_to_cpu(elem->phys_id);
@@ -2103,17 +2406,17 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
bool found = true;
switch (cap) {
- case ICE_AQC_CAPS_VALID_FUNCTIONS:
+ case LIBIE_AQC_CAPS_VALID_FUNCTIONS:
caps->valid_functions = number;
ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
caps->valid_functions);
break;
- case ICE_AQC_CAPS_SRIOV:
+ case LIBIE_AQC_CAPS_SRIOV:
caps->sr_iov_1_1 = (number == 1);
ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
caps->sr_iov_1_1);
break;
- case ICE_AQC_CAPS_DCB:
+ case LIBIE_AQC_CAPS_DCB:
caps->dcb = (number == 1);
caps->active_tc_bitmap = logical_id;
caps->maxtc = phys_id;
@@ -2122,7 +2425,7 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
caps->active_tc_bitmap);
ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
break;
- case ICE_AQC_CAPS_RSS:
+ case LIBIE_AQC_CAPS_RSS:
caps->rss_table_size = number;
caps->rss_table_entry_width = logical_id;
ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
@@ -2130,7 +2433,7 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
caps->rss_table_entry_width);
break;
- case ICE_AQC_CAPS_RXQS:
+ case LIBIE_AQC_CAPS_RXQS:
caps->num_rxq = number;
caps->rxq_first_id = phys_id;
ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
@@ -2138,7 +2441,7 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
caps->rxq_first_id);
break;
- case ICE_AQC_CAPS_TXQS:
+ case LIBIE_AQC_CAPS_TXQS:
caps->num_txq = number;
caps->txq_first_id = phys_id;
ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
@@ -2146,7 +2449,7 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
caps->txq_first_id);
break;
- case ICE_AQC_CAPS_MSIX:
+ case LIBIE_AQC_CAPS_MSIX:
caps->num_msix_vectors = number;
caps->msix_vector_first_id = phys_id;
ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
@@ -2154,55 +2457,59 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
caps->msix_vector_first_id);
break;
- case ICE_AQC_CAPS_PENDING_NVM_VER:
+ case LIBIE_AQC_CAPS_PENDING_NVM_VER:
caps->nvm_update_pending_nvm = true;
ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix);
break;
- case ICE_AQC_CAPS_PENDING_OROM_VER:
+ case LIBIE_AQC_CAPS_PENDING_OROM_VER:
caps->nvm_update_pending_orom = true;
ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix);
break;
- case ICE_AQC_CAPS_PENDING_NET_VER:
+ case LIBIE_AQC_CAPS_PENDING_NET_VER:
caps->nvm_update_pending_netlist = true;
ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix);
break;
- case ICE_AQC_CAPS_NVM_MGMT:
+ case LIBIE_AQC_CAPS_NVM_MGMT:
caps->nvm_unified_update =
(number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
true : false;
ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
caps->nvm_unified_update);
break;
- case ICE_AQC_CAPS_RDMA:
- caps->rdma = (number == 1);
+ case LIBIE_AQC_CAPS_RDMA:
+ if (IS_ENABLED(CONFIG_INFINIBAND_IRDMA))
+ caps->rdma = (number == 1);
ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma);
break;
- case ICE_AQC_CAPS_MAX_MTU:
+ case LIBIE_AQC_CAPS_MAX_MTU:
caps->max_mtu = number;
ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
prefix, caps->max_mtu);
break;
- case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
+ case LIBIE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
caps->pcie_reset_avoidance = (number > 0);
ice_debug(hw, ICE_DBG_INIT,
"%s: pcie_reset_avoidance = %d\n", prefix,
caps->pcie_reset_avoidance);
break;
- case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
+ case LIBIE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
caps->reset_restrict_support = (number == 1);
ice_debug(hw, ICE_DBG_INIT,
"%s: reset_restrict_support = %d\n", prefix,
caps->reset_restrict_support);
break;
- case ICE_AQC_CAPS_FW_LAG_SUPPORT:
- caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG);
+ case LIBIE_AQC_CAPS_FW_LAG_SUPPORT:
+ caps->roce_lag = number & LIBIE_AQC_BIT_ROCEV2_LAG;
ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n",
prefix, caps->roce_lag);
- caps->sriov_lag = !!(number & ICE_AQC_BIT_SRIOV_LAG);
+ caps->sriov_lag = number & LIBIE_AQC_BIT_SRIOV_LAG;
ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n",
prefix, caps->sriov_lag);
+ caps->sriov_aa_lag = number & LIBIE_AQC_BIT_SRIOV_AA_LAG;
+ ice_debug(hw, ICE_DBG_INIT, "%s: sriov_aa_lag = %u\n",
+ prefix, caps->sriov_aa_lag);
break;
- case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE:
+ case LIBIE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE:
caps->tx_sched_topo_comp_mode_en = (number == 1);
break;
default:
@@ -2256,7 +2563,7 @@ ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
*/
static void
ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
u32 logical_id = le32_to_cpu(cap->logical_id);
u32 number = le32_to_cpu(cap->number);
@@ -2279,7 +2586,7 @@ ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
*/
static void
ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
@@ -2298,7 +2605,7 @@ ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
*/
static void
ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
struct ice_ts_func_info *info = &func_p->ts_func_info;
u32 number = le32_to_cpu(cap->number);
@@ -2311,16 +2618,16 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
- if (!ice_is_e825c(hw)) {
+ if (hw->mac_type != ICE_MAC_GENERIC_3K_E825) {
info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number);
info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
} else {
- info->clk_freq = ICE_TIME_REF_FREQ_156_250;
- info->clk_src = ICE_CLK_SRC_TCXO;
+ info->clk_freq = ICE_TSPLL_FREQ_156_250;
+ info->clk_src = ICE_CLK_SRC_TIME_REF;
}
- if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
- info->time_ref = (enum ice_time_ref_freq)info->clk_freq;
+ if (info->clk_freq < NUM_ICE_TSPLL_FREQ) {
+ info->time_ref = (enum ice_tspll_freq)info->clk_freq;
} else {
/* Unknown clock frequency, so assume a (probably incorrect)
* default to avoid out-of-bounds look ups of frequency
@@ -2328,7 +2635,7 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
*/
ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n",
info->clk_freq);
- info->time_ref = ICE_TIME_REF_FREQ_25_000;
+ info->time_ref = ICE_TSPLL_FREQ_25_000;
}
ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
@@ -2397,7 +2704,7 @@ static void
ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
void *buf, u32 cap_count)
{
- struct ice_aqc_list_caps_elem *cap_resp;
+ struct libie_aqc_list_caps_elem *cap_resp;
u32 i;
cap_resp = buf;
@@ -2412,16 +2719,16 @@ ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
&cap_resp[i], "func caps");
switch (cap) {
- case ICE_AQC_CAPS_VF:
+ case LIBIE_AQC_CAPS_VF:
ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_VSI:
+ case LIBIE_AQC_CAPS_VSI:
ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_1588:
+ case LIBIE_AQC_CAPS_1588:
ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_FD:
+ case LIBIE_AQC_CAPS_FD:
ice_parse_fdir_func_caps(hw, func_p);
break;
default:
@@ -2465,7 +2772,7 @@ static int ice_func_id_to_logical_id(u32 active_function_bitmap, u8 pf_id)
*/
static void
ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
u32 number = le32_to_cpu(cap->number);
@@ -2486,7 +2793,7 @@ ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
*/
static void
ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
u32 number = le32_to_cpu(cap->number);
@@ -2505,7 +2812,7 @@ ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
*/
static void
ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
u32 number = le32_to_cpu(cap->number);
@@ -2524,7 +2831,7 @@ ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
*/
static void
ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
struct ice_ts_dev_info *info = &dev_p->ts_dev_info;
u32 logical_id = le32_to_cpu(cap->logical_id);
@@ -2544,6 +2851,7 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0);
info->ts_ll_int_read = ((number & ICE_TS_LL_TX_TS_INT_READ_M) != 0);
+ info->ll_phy_tmr_update = ((number & ICE_TS_LL_PHY_TMR_UPDATE_M) != 0);
info->ena_ports = logical_id;
info->tmr_own_map = phys_id;
@@ -2566,6 +2874,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->ts_ll_read);
ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_int_read = %u\n",
info->ts_ll_int_read);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: ll_phy_tmr_update = %u\n",
+ info->ll_phy_tmr_update);
ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
info->ena_ports);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
@@ -2582,7 +2892,7 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
*/
static void
ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
u32 number = le32_to_cpu(cap->number);
@@ -2602,7 +2912,7 @@ ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
*/
static void
ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
dev_p->supported_sensors = le32_to_cpu(cap->number);
@@ -2621,7 +2931,7 @@ ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
*/
static void ice_parse_nac_topo_dev_caps(struct ice_hw *hw,
struct ice_hw_dev_caps *dev_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
dev_p->nac_topo.mode = le32_to_cpu(cap->number);
dev_p->nac_topo.id = le32_to_cpu(cap->phys_id) & ICE_NAC_TOPO_ID_M;
@@ -2657,7 +2967,7 @@ static void
ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
void *buf, u32 cap_count)
{
- struct ice_aqc_list_caps_elem *cap_resp;
+ struct libie_aqc_list_caps_elem *cap_resp;
u32 i;
cap_resp = buf;
@@ -2672,25 +2982,25 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
&cap_resp[i], "dev caps");
switch (cap) {
- case ICE_AQC_CAPS_VALID_FUNCTIONS:
+ case LIBIE_AQC_CAPS_VALID_FUNCTIONS:
ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_VF:
+ case LIBIE_AQC_CAPS_VF:
ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_VSI:
+ case LIBIE_AQC_CAPS_VSI:
ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_1588:
+ case LIBIE_AQC_CAPS_1588:
ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_FD:
+ case LIBIE_AQC_CAPS_FD:
ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_SENSOR_READING:
+ case LIBIE_AQC_CAPS_SENSOR_READING:
ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_NAC_TOPOLOGY:
+ case LIBIE_AQC_CAPS_NAC_TOPOLOGY:
ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]);
break;
default:
@@ -2706,40 +3016,6 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
}
/**
- * ice_is_pf_c827 - check if pf contains c827 phy
- * @hw: pointer to the hw struct
- */
-bool ice_is_pf_c827(struct ice_hw *hw)
-{
- struct ice_aqc_get_link_topo cmd = {};
- u8 node_part_number;
- u16 node_handle;
- int status;
-
- if (hw->mac_type != ICE_MAC_E810)
- return false;
-
- if (hw->device_id != ICE_DEV_ID_E810C_QSFP)
- return true;
-
- cmd.addr.topo_params.node_type_ctx =
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) |
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT);
- cmd.addr.topo_params.index = 0;
-
- status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
- &node_handle);
-
- if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
- return false;
-
- if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE)
- return true;
-
- return false;
-}
-
-/**
* ice_is_phy_rclk_in_netlist
* @hw: pointer to the hw struct
*
@@ -2747,9 +3023,11 @@ bool ice_is_pf_c827(struct ice_hw *hw)
*/
bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw)
{
- if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY,
+ ICE_AQC_LINK_TOPO_NODE_CTX_PORT,
ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) &&
- ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY,
+ ICE_AQC_LINK_TOPO_NODE_CTX_PORT,
ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL))
return false;
@@ -2765,6 +3043,7 @@ bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw)
bool ice_is_clock_mux_in_netlist(struct ice_hw *hw)
{
if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX,
+ ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX,
NULL))
return false;
@@ -2785,12 +3064,14 @@ bool ice_is_clock_mux_in_netlist(struct ice_hw *hw)
bool ice_is_cgu_in_netlist(struct ice_hw *hw)
{
if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032,
NULL)) {
hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032;
return true;
} else if (!ice_find_netlist_node(hw,
ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384,
NULL)) {
hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384;
@@ -2809,6 +3090,7 @@ bool ice_is_cgu_in_netlist(struct ice_hw *hw)
bool ice_is_gps_in_netlist(struct ice_hw *hw)
{
if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS,
+ ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL))
return false;
@@ -2838,8 +3120,8 @@ int
ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
- struct ice_aqc_list_caps *cmd;
- struct ice_aq_desc desc;
+ struct libie_aqc_list_caps *cmd;
+ struct libie_aq_desc desc;
int status;
cmd = &desc.params.get_cap;
@@ -2880,7 +3162,7 @@ ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
* device will return, we can simply send a 4KB buffer, the maximum
* possible size that firmware can return.
*/
- cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
+ cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct libie_aqc_list_caps_elem);
status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
ice_aqc_opc_list_dev_caps, NULL);
@@ -2914,7 +3196,7 @@ ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
* device will return, we can simply send a 4KB buffer, the maximum
* possible size that firmware can return.
*/
- cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
+ cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct libie_aqc_list_caps_elem);
status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
ice_aqc_opc_list_func_caps, NULL);
@@ -3023,9 +3305,9 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd)
{
struct ice_aqc_manage_mac_write *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.mac_write;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
cmd->flags = flags;
@@ -3042,10 +3324,12 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
*/
static int ice_aq_clear_pxe_mode(struct ice_hw *hw)
{
- struct ice_aq_desc desc;
+ struct ice_aqc_clear_pxe *cmd;
+ struct libie_aq_desc desc;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
- desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
+ cmd->rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
}
@@ -3078,10 +3362,10 @@ ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan,
{
struct ice_aqc_set_port_params *cmd;
struct ice_hw *hw = pi->hw;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 cmd_flags = 0;
- cmd = &desc.params.set_port_params;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
if (double_vlan)
@@ -3108,6 +3392,7 @@ bool ice_is_100m_speed_supported(struct ice_hw *hw)
case ICE_DEV_ID_E822L_SGMII:
case ICE_DEV_ID_E823L_1GBE:
case ICE_DEV_ID_E823C_SGMII:
+ case ICE_DEV_ID_E825C_SGMII:
return true;
default:
return false;
@@ -3318,7 +3603,8 @@ int
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct ice_aqc_set_phy_cfg *cmd;
+ struct libie_aq_desc desc;
int status;
if (!cfg)
@@ -3333,8 +3619,9 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
}
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
- desc.params.set_phy.lport_num = pi->lport;
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ cmd = libie_aq_raw(&desc);
+ cmd->lport_num = pi->lport;
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
@@ -3350,7 +3637,7 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
cfg->link_fec_opt);
status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
- if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
+ if (hw->adminq.sq_last_status == LIBIE_AQ_RC_EMODE)
status = 0;
if (!status)
@@ -3407,17 +3694,17 @@ int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code,
{
struct ice_aqc_dnl_call_command *cmd;
struct ice_aqc_dnl_call buf = {};
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int err;
buf.sto.txrx_equa_reqs.data_in = cpu_to_le16(data_in);
buf.sto.txrx_equa_reqs.op_code_serdes_sel =
cpu_to_le16(op_code | (serdes_num & 0xF));
- cmd = &desc.params.dnl_call;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dnl_call);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF |
- ICE_AQ_FLAG_RD |
- ICE_AQ_FLAG_SI);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_BUF |
+ LIBIE_AQ_FLAG_RD |
+ LIBIE_AQ_FLAG_SI);
desc.datalen = cpu_to_le16(sizeof(struct ice_aqc_dnl_call));
cmd->activity_id = cpu_to_le16(ICE_AQC_ACT_ID_DNL);
@@ -3455,7 +3742,7 @@ static const u32 fec_reg[][ICE_FEC_MAX] = {
int ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
enum ice_fec_stats_types fec_type, u32 *output)
{
- u16 flag = (ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF | ICE_AQ_FLAG_SI);
+ u16 flag = (LIBIE_AQ_FLAG_RD | LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_SI);
struct ice_sbq_msg_input msg = {};
u32 receiver_id, reg_offset;
int err;
@@ -3475,7 +3762,7 @@ int ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
msg.msg_addr_low = lower_16_bits(reg_offset);
msg.msg_addr_high = receiver_id;
msg.opcode = ice_sbq_msg_rd;
- msg.dest_dev = rmn_0;
+ msg.dest_dev = ice_sbq_dev_phy_0;
err = ice_sbq_rw_reg(hw, &msg, flag);
if (err)
@@ -3878,9 +4165,9 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd)
{
struct ice_aqc_restart_an *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.restart_an;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
@@ -3908,9 +4195,9 @@ ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
struct ice_sq_cd *cd)
{
struct ice_aqc_set_event_mask *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.set_event_mask;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
@@ -3932,9 +4219,9 @@ int
ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
{
struct ice_aqc_set_mac_lb *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.set_mac_lb;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
if (ena_lpbk)
@@ -3957,9 +4244,9 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
{
struct ice_aqc_set_port_id_led *cmd;
struct ice_hw *hw = pi->hw;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.set_port_id_led;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
@@ -3995,7 +4282,7 @@ ice_aq_get_port_options(struct ice_hw *hw,
u8 *pending_option_idx, bool *pending_option_valid)
{
struct ice_aqc_get_port_options *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
u8 i;
@@ -4003,7 +4290,7 @@ ice_aq_get_port_options(struct ice_hw *hw,
if (*option_count < ICE_AQC_PORT_OPT_COUNT_M)
return -EINVAL;
- cmd = &desc.params.get_port_options;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options);
if (lport_valid)
@@ -4069,12 +4356,12 @@ ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
u8 new_option)
{
struct ice_aqc_set_port_option *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
if (new_option > ICE_AQC_PORT_OPT_COUNT_M)
return -EINVAL;
- cmd = &desc.params.set_port_option;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option);
if (lport_valid)
@@ -4087,6 +4374,68 @@ ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
}
/**
+ * ice_get_phy_lane_number - Get PHY lane number for current adapter
+ * @hw: pointer to the hw struct
+ *
+ * Return: PHY lane number on success, negative error code otherwise.
+ */
+int ice_get_phy_lane_number(struct ice_hw *hw)
+{
+ struct ice_aqc_get_port_options_elem *options;
+ unsigned int lport = 0;
+ unsigned int lane;
+ int err;
+
+ /* E82X does not have sequential IDs, lane number is PF ID.
+ * For E825 device, the exception is the variant with external
+ * PHY (0x579F), in which there is also 1:1 pf_id -> lane_number
+ * mapping.
+ */
+ if (hw->mac_type == ICE_MAC_GENERIC ||
+ hw->device_id == ICE_DEV_ID_E825C_SGMII)
+ return hw->pf_id;
+
+ options = kcalloc(ICE_AQC_PORT_OPT_MAX, sizeof(*options), GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ for (lane = 0; lane < ICE_MAX_PORT_PER_PCI_DEV; lane++) {
+ u8 options_count = ICE_AQC_PORT_OPT_MAX;
+ u8 speed, active_idx, pending_idx;
+ bool active_valid, pending_valid;
+
+ err = ice_aq_get_port_options(hw, options, &options_count, lane,
+ true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (err)
+ goto err;
+
+ if (!active_valid)
+ continue;
+
+ speed = options[active_idx].max_lane_speed;
+ /* If we don't get speed for this lane, it's unoccupied */
+ if (speed > ICE_AQC_PORT_OPT_MAX_LANE_40G)
+ continue;
+
+ if (hw->pf_id == lport) {
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825 &&
+ ice_is_dual(hw) && !ice_is_primary(hw))
+ lane += ICE_PORTS_PER_QUAD;
+ kfree(options);
+ return lane;
+ }
+ lport++;
+ }
+
+ /* PHY lane not found */
+ err = -ENXIO;
+err:
+ kfree(options);
+ return err;
+}
+
+/**
* ice_aq_sff_eeprom
* @hw: pointer to the HW struct
* @lport: bits [7:0] = logical port, bit [8] = logical port valid
@@ -4107,7 +4456,7 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
bool write, struct ice_sq_cd *cd)
{
struct ice_aqc_sff_eeprom *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 i2c_bus_addr;
int status;
@@ -4115,8 +4464,8 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
- cmd = &desc.params.read_write_sff_param;
- desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
+ cmd = libie_aq_raw(&desc);
+ desc.flags = cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->lport_num = (u8)(lport & 0xff);
cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
i2c_bus_addr = FIELD_PREP(ICE_AQC_SFF_I2CBUS_7BIT_M, bus_addr >> 1) |
@@ -4176,7 +4525,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw,
struct ice_aqc_get_set_rss_lut *desc_params;
enum ice_aqc_lut_flags flags;
enum ice_lut_size lut_size;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u8 *lut = params->lut;
@@ -4192,9 +4541,9 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw,
opcode = set ? ice_aqc_opc_set_rss_lut : ice_aqc_opc_get_rss_lut;
ice_fill_dflt_direct_cmd_desc(&desc, opcode);
if (set)
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
- desc_params = &desc.params.get_set_rss_lut;
+ desc_params = libie_aq_raw(&desc);
vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID);
@@ -4249,16 +4598,16 @@ __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
{
struct ice_aqc_get_set_rss_key *desc_params;
u16 key_size = sizeof(*key);
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
if (set) {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
} else {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
}
- desc_params = &desc.params.get_set_rss_key;
+ desc_params = libie_aq_raw(&desc);
desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID);
return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
@@ -4330,10 +4679,10 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
{
struct ice_aqc_add_tx_qgrp *list;
struct ice_aqc_add_txqs *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 i, sum_size = 0;
- cmd = &desc.params.add_txqs;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
@@ -4352,7 +4701,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
if (buf_size != sum_size)
return -EINVAL;
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->num_qgrps = num_qgrps;
@@ -4379,12 +4728,12 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
{
struct ice_aqc_dis_txq_item *item;
struct ice_aqc_dis_txqs *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 vmvf_and_timeout;
u16 i, sz = 0;
int status;
- cmd = &desc.params.dis_txqs;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
/* qg_list can be NULL only in VM/VF reset flow */
@@ -4425,7 +4774,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
/* set RD bit to indicate that command buffer is provided by the driver
* and it needs to be read by the firmware
*/
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
for (i = 0, item = qg_list; i < num_qgrps; i++) {
u16 item_size = struct_size(item, q_id, item->num_qs);
@@ -4457,40 +4806,42 @@ do_aq:
}
/**
- * ice_aq_cfg_lan_txq
+ * ice_aq_cfg_lan_txq - send AQ command 0x0C32 to FW
* @hw: pointer to the hardware structure
* @buf: buffer for command
* @buf_size: size of buffer in bytes
* @num_qs: number of queues being configured
* @oldport: origination lport
* @newport: destination lport
+ * @mode: cmd_type for move to use
* @cd: pointer to command details structure or NULL
*
* Move/Configure LAN Tx queue (0x0C32)
*
- * There is a better AQ command to use for moving nodes, so only coding
- * this one for configuring the node.
+ * Return: Zero on success, associated error code on failure.
*/
int
ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
u16 buf_size, u16 num_qs, u8 oldport, u8 newport,
- struct ice_sq_cd *cd)
+ u8 mode, struct ice_sq_cd *cd)
{
struct ice_aqc_cfg_txqs *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.cfg_txqs;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_txqs);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
if (!buf)
return -EINVAL;
- cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG;
+ cmd->cmd_type = mode;
cmd->num_qs = num_qs;
cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M);
cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_DST_PRT_M, newport);
+ cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_MODE_M,
+ ICE_AQC_Q_CFG_MODE_KEEP_OWN);
cmd->time_out = FIELD_PREP(ICE_AQC_Q_CFG_TIMEOUT_M, 5);
cmd->blocked_cgds = 0;
@@ -4518,10 +4869,10 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
{
struct ice_aqc_add_rdma_qset_data *list;
struct ice_aqc_add_rdma_qset *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 i, sum_size = 0;
- cmd = &desc.params.add_rdma_qset;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
@@ -4539,213 +4890,54 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
if (buf_size != sum_size)
return -EINVAL;
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->num_qset_grps = num_qset_grps;
return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);
}
-/* End of FW Admin Queue command wrappers */
-
-/**
- * ice_pack_ctx_byte - write a byte to a packed context structure
- * @src_ctx: unpacked source context structure
- * @dest_ctx: packed destination context data
- * @ce_info: context element description
- */
-static void ice_pack_ctx_byte(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- u8 src_byte, dest_byte, mask;
- u8 *from, *dest;
- u16 shift_width;
-
- /* copy from the next struct field */
- from = src_ctx + ce_info->offset;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
-
- src_byte = *from;
- src_byte <<= shift_width;
- src_byte &= mask;
-
- /* get the current bits from the target bit string */
- dest = dest_ctx + (ce_info->lsb / 8);
-
- memcpy(&dest_byte, dest, sizeof(dest_byte));
-
- dest_byte &= ~mask; /* get the bits not changing */
- dest_byte |= src_byte; /* add in the new bits */
-
- /* put it all back */
- memcpy(dest, &dest_byte, sizeof(dest_byte));
-}
-
-/**
- * ice_pack_ctx_word - write a word to a packed context structure
- * @src_ctx: unpacked source context structure
- * @dest_ctx: packed destination context data
- * @ce_info: context element description
- */
-static void ice_pack_ctx_word(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- u16 src_word, mask;
- __le16 dest_word;
- u8 *from, *dest;
- u16 shift_width;
-
- /* copy from the next struct field */
- from = src_ctx + ce_info->offset;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
-
- /* don't swizzle the bits until after the mask because the mask bits
- * will be in a different bit position on big endian machines
- */
- src_word = *(u16 *)from;
- src_word <<= shift_width;
- src_word &= mask;
-
- /* get the current bits from the target bit string */
- dest = dest_ctx + (ce_info->lsb / 8);
-
- memcpy(&dest_word, dest, sizeof(dest_word));
-
- dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
- dest_word |= cpu_to_le16(src_word); /* add in the new bits */
-
- /* put it all back */
- memcpy(dest, &dest_word, sizeof(dest_word));
-}
-
-/**
- * ice_pack_ctx_dword - write a dword to a packed context structure
- * @src_ctx: unpacked source context structure
- * @dest_ctx: packed destination context data
- * @ce_info: context element description
- */
-static void ice_pack_ctx_dword(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- u32 src_dword, mask;
- __le32 dest_dword;
- u8 *from, *dest;
- u16 shift_width;
-
- /* copy from the next struct field */
- from = src_ctx + ce_info->offset;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
-
- /* don't swizzle the bits until after the mask because the mask bits
- * will be in a different bit position on big endian machines
- */
- src_dword = *(u32 *)from;
- src_dword <<= shift_width;
- src_dword &= mask;
-
- /* get the current bits from the target bit string */
- dest = dest_ctx + (ce_info->lsb / 8);
-
- memcpy(&dest_dword, dest, sizeof(dest_dword));
-
- dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
- dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
-
- /* put it all back */
- memcpy(dest, &dest_dword, sizeof(dest_dword));
-}
-
/**
- * ice_pack_ctx_qword - write a qword to a packed context structure
- * @src_ctx: unpacked source context structure
- * @dest_ctx: packed destination context data
- * @ce_info: context element description
+ * ice_aq_set_txtimeq - set Tx time queues
+ * @hw: pointer to the hardware structure
+ * @txtimeq: first Tx time queue id to configure
+ * @q_count: number of queues to configure
+ * @txtime_qg: queue group to be set
+ * @buf_size: size of buffer for indirect command
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set Tx Time queue (0x0C35)
+ * Return: 0 on success or negative value on failure.
*/
-static void ice_pack_ctx_qword(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
+int
+ice_aq_set_txtimeq(struct ice_hw *hw, u16 txtimeq, u8 q_count,
+ struct ice_aqc_set_txtime_qgrp *txtime_qg, u16 buf_size,
+ struct ice_sq_cd *cd)
{
- u64 src_qword, mask;
- __le64 dest_qword;
- u8 *from, *dest;
- u16 shift_width;
+ struct ice_aqc_set_txtimeqs *cmd;
+ struct libie_aq_desc desc;
+ u16 size;
- /* copy from the next struct field */
- from = src_ctx + ce_info->offset;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = GENMASK_ULL(ce_info->width - 1 + shift_width, shift_width);
+ if (!txtime_qg || txtimeq > ICE_TXTIME_MAX_QUEUE ||
+ q_count < 1 || q_count > ICE_SET_TXTIME_MAX_Q_AMOUNT)
+ return -EINVAL;
- /* don't swizzle the bits until after the mask because the mask bits
- * will be in a different bit position on big endian machines
- */
- src_qword = *(u64 *)from;
- src_qword <<= shift_width;
- src_qword &= mask;
+ size = struct_size(txtime_qg, txtimeqs, q_count);
+ if (buf_size != size)
+ return -EINVAL;
- /* get the current bits from the target bit string */
- dest = dest_ctx + (ce_info->lsb / 8);
+ cmd = libie_aq_raw(&desc);
- memcpy(&dest_qword, dest, sizeof(dest_qword));
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_txtimeqs);
- dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
- dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
- /* put it all back */
- memcpy(dest, &dest_qword, sizeof(dest_qword));
+ cmd->q_id = cpu_to_le16(txtimeq);
+ cmd->q_amount = cpu_to_le16(q_count);
+ return ice_aq_send_cmd(hw, &desc, txtime_qg, buf_size, cd);
}
-/**
- * ice_set_ctx - set context bits in packed structure
- * @hw: pointer to the hardware structure
- * @src_ctx: pointer to a generic non-packed context structure
- * @dest_ctx: pointer to memory for the packed structure
- * @ce_info: List of Rx context elements
- */
-int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- int f;
-
- for (f = 0; ce_info[f].width; f++) {
- /* We have to deal with each element of the FW response
- * using the correct size so that we are correct regardless
- * of the endianness of the machine.
- */
- if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
- ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
- f, ce_info[f].width, ce_info[f].size_of);
- continue;
- }
- switch (ce_info[f].size_of) {
- case sizeof(u8):
- ice_pack_ctx_byte(src_ctx, dest_ctx, &ce_info[f]);
- break;
- case sizeof(u16):
- ice_pack_ctx_word(src_ctx, dest_ctx, &ce_info[f]);
- break;
- case sizeof(u32):
- ice_pack_ctx_dword(src_ctx, dest_ctx, &ce_info[f]);
- break;
- case sizeof(u64):
- ice_pack_ctx_qword(src_ctx, dest_ctx, &ce_info[f]);
- break;
- default:
- return -EINVAL;
- }
- }
-
- return 0;
-}
+/* End of FW Admin Queue command wrappers */
/**
* ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
@@ -5156,6 +5348,32 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
}
/**
+ * ice_aq_get_cgu_input_pin_measure - get input pin signal measurements
+ * @hw: pointer to the HW struct
+ * @dpll_idx: index of dpll to be measured
+ * @meas: array to be filled with results
+ * @meas_num: max number of results array can hold
+ *
+ * Get CGU measurements (0x0C59) of phase and frequency offsets for input
+ * pins on given dpll.
+ *
+ * Return: 0 on success or negative value on failure.
+ */
+int ice_aq_get_cgu_input_pin_measure(struct ice_hw *hw, u8 dpll_idx,
+ struct ice_cgu_input_measure *meas,
+ u16 meas_num)
+{
+ struct ice_aqc_get_cgu_input_measure *cmd;
+ struct libie_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_measure);
+ cmd = libie_aq_raw(&desc);
+ cmd->dpll_idx_opt = dpll_idx & ICE_AQC_GET_CGU_IN_MEAS_DPLL_IDX_M;
+
+ return ice_aq_send_cmd(hw, &desc, meas, meas_num * sizeof(*meas), NULL);
+}
+
+/**
* ice_aq_get_cgu_abilities - get cgu abilities
* @hw: pointer to the HW struct
* @abilities: CGU abilities
@@ -5167,7 +5385,7 @@ int
ice_aq_get_cgu_abilities(struct ice_hw *hw,
struct ice_aqc_get_cgu_abilities *abilities)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_abilities);
return ice_aq_send_cmd(hw, &desc, abilities, sizeof(*abilities), NULL);
@@ -5190,10 +5408,10 @@ ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2,
u32 freq, s32 phase_delay)
{
struct ice_aqc_set_cgu_input_config *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_input_config);
- cmd = &desc.params.set_cgu_input_config;
+ cmd = libie_aq_raw(&desc);
cmd->input_idx = input_idx;
cmd->flags1 = flags1;
cmd->flags2 = flags2;
@@ -5222,11 +5440,11 @@ ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type,
u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay)
{
struct ice_aqc_get_cgu_input_config *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int ret;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_config);
- cmd = &desc.params.get_cgu_input_config;
+ cmd = libie_aq_raw(&desc);
cmd->input_idx = input_idx;
ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
@@ -5265,10 +5483,10 @@ ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags,
u8 src_sel, u32 freq, s32 phase_delay)
{
struct ice_aqc_set_cgu_output_config *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_output_config);
- cmd = &desc.params.set_cgu_output_config;
+ cmd = libie_aq_raw(&desc);
cmd->output_idx = output_idx;
cmd->flags = flags;
cmd->src_sel = src_sel;
@@ -5295,11 +5513,11 @@ ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags,
u8 *src_sel, u32 *freq, u32 *src_freq)
{
struct ice_aqc_get_cgu_output_config *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int ret;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_output_config);
- cmd = &desc.params.get_cgu_output_config;
+ cmd = libie_aq_raw(&desc);
cmd->output_idx = output_idx;
ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
@@ -5336,11 +5554,11 @@ ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state,
u8 *eec_mode)
{
struct ice_aqc_get_cgu_dpll_status *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_dpll_status);
- cmd = &desc.params.get_cgu_dpll_status;
+ cmd = libie_aq_raw(&desc);
cmd->dpll_num = dpll_num;
status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
@@ -5374,10 +5592,10 @@ ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state,
u8 config, u8 eec_mode)
{
struct ice_aqc_set_cgu_dpll_config *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_dpll_config);
- cmd = &desc.params.set_cgu_dpll_config;
+ cmd = libie_aq_raw(&desc);
cmd->dpll_num = dpll_num;
cmd->ref_state = ref_state;
cmd->config = config;
@@ -5401,10 +5619,10 @@ ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx,
u8 ref_priority)
{
struct ice_aqc_set_cgu_ref_prio *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_ref_prio);
- cmd = &desc.params.set_cgu_ref_prio;
+ cmd = libie_aq_raw(&desc);
cmd->dpll_num = dpll_num;
cmd->ref_idx = ref_idx;
cmd->ref_priority = ref_priority;
@@ -5427,11 +5645,11 @@ ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx,
u8 *ref_prio)
{
struct ice_aqc_get_cgu_ref_prio *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_ref_prio);
- cmd = &desc.params.get_cgu_ref_prio;
+ cmd = libie_aq_raw(&desc);
cmd->dpll_num = dpll_num;
cmd->ref_idx = ref_idx;
@@ -5457,11 +5675,11 @@ ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver,
u32 *cgu_fw_ver)
{
struct ice_aqc_get_cgu_info *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_info);
- cmd = &desc.params.get_cgu_info;
+ cmd = libie_aq_raw(&desc);
status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
if (!status) {
@@ -5488,11 +5706,11 @@ ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable,
u32 *freq)
{
struct ice_aqc_set_phy_rec_clk_out *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_rec_clk_out);
- cmd = &desc.params.set_phy_rec_clk_out;
+ cmd = libie_aq_raw(&desc);
cmd->phy_output = phy_output;
cmd->port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT;
cmd->flags = enable & ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN;
@@ -5521,11 +5739,11 @@ ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num,
u8 *flags, u16 *node_handle)
{
struct ice_aqc_get_phy_rec_clk_out *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_rec_clk_out);
- cmd = &desc.params.get_phy_rec_clk_out;
+ cmd = libie_aq_raw(&desc);
cmd->phy_output = *phy_output;
status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
@@ -5553,11 +5771,11 @@ int ice_aq_get_sensor_reading(struct ice_hw *hw,
struct ice_aqc_get_sensor_reading_resp *data)
{
struct ice_aqc_get_sensor_reading *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading);
- cmd = &desc.params.get_sensor_reading;
+ cmd = libie_aq_raw(&desc);
#define ICE_INTERNAL_TEMP_SENSOR_FORMAT 0
#define ICE_INTERNAL_TEMP_SENSOR 0
cmd->sensor = ICE_INTERNAL_TEMP_SENSOR;
@@ -5565,7 +5783,7 @@ int ice_aq_get_sensor_reading(struct ice_hw *hw,
status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
if (!status)
- memcpy(data, &desc.params.get_sensor_reading_resp,
+ memcpy(data, &desc.params.raw,
sizeof(*data));
return status;
@@ -5762,13 +5980,13 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, u8 *data,
struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc = { 0 };
+ struct libie_aq_desc desc = { 0 };
struct ice_aqc_i2c *cmd;
u8 data_size;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
- cmd = &desc.params.read_write_i2c;
+ cmd = libie_aq_raw(&desc);
if (!data)
return -EINVAL;
@@ -5785,7 +6003,7 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
struct ice_aqc_read_i2c_resp *resp;
u8 i;
- resp = &desc.params.read_i2c_resp;
+ resp = libie_aq_raw(&desc);
for (i = 0; i < data_size; i++) {
*data = resp->i2c_data[i];
data++;
@@ -5817,12 +6035,12 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, const u8 *data,
struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc = { 0 };
+ struct libie_aq_desc desc = { 0 };
struct ice_aqc_i2c *cmd;
u8 data_size;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c);
- cmd = &desc.params.read_write_i2c;
+ cmd = libie_aq_raw(&desc);
data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params);
@@ -5841,6 +6059,95 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
}
/**
+ * ice_get_pca9575_handle - find and return the PCA9575 controller
+ * @hw: pointer to the hw struct
+ * @pca9575_handle: GPIO controller's handle
+ *
+ * Find and return the GPIO controller's handle in the netlist.
+ * When found - the value will be cached in the hw structure and following calls
+ * will return cached value.
+ *
+ * Return: 0 on success, -ENXIO when there's no PCA9575 present.
+ */
+int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
+{
+ struct ice_aqc_get_link_topo *cmd;
+ struct libie_aq_desc desc;
+ int err;
+ u8 idx;
+
+ /* If handle was read previously return cached value */
+ if (hw->io_expander_handle) {
+ *pca9575_handle = hw->io_expander_handle;
+ return 0;
+ }
+
+#define SW_PCA9575_SFP_TOPO_IDX 2
+#define SW_PCA9575_QSFP_TOPO_IDX 1
+
+ /* Check if the SW IO expander controlling SMA exists in the netlist. */
+ if (hw->device_id == ICE_DEV_ID_E810C_SFP)
+ idx = SW_PCA9575_SFP_TOPO_IDX;
+ else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
+ idx = SW_PCA9575_QSFP_TOPO_IDX;
+ else
+ return -ENXIO;
+
+ /* If handle was not detected read it from the netlist */
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+ cmd = libie_aq_raw(&desc);
+ cmd->addr.topo_params.node_type_ctx =
+ ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL;
+ cmd->addr.topo_params.index = idx;
+
+ err = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (err)
+ return -ENXIO;
+
+ /* Verify if we found the right IO expander type */
+ if (cmd->node_part_num != ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
+ return -ENXIO;
+
+ /* If present save the handle and return it */
+ hw->io_expander_handle =
+ le16_to_cpu(cmd->addr.handle);
+ *pca9575_handle = hw->io_expander_handle;
+
+ return 0;
+}
+
+/**
+ * ice_read_pca9575_reg - read the register from the PCA9575 controller
+ * @hw: pointer to the hw struct
+ * @offset: GPIO controller register offset
+ * @data: pointer to data to be read from the GPIO controller
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data)
+{
+ struct ice_aqc_link_topo_addr link_topo;
+ __le16 addr;
+ u16 handle;
+ int err;
+
+ memset(&link_topo, 0, sizeof(link_topo));
+
+ err = ice_get_pca9575_handle(hw, &handle);
+ if (err)
+ return err;
+
+ link_topo.handle = cpu_to_le16(handle);
+ link_topo.topo_params.node_type_ctx =
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
+ ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
+
+ addr = cpu_to_le16((u16)offset);
+
+ return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
+}
+
+/**
* ice_aq_set_gpio
* @hw: pointer to the hw struct
* @gpio_ctrl_handle: GPIO controller node handle
@@ -5854,11 +6161,11 @@ int
ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
struct ice_sq_cd *cd)
{
+ struct libie_aq_desc desc;
struct ice_aqc_gpio *cmd;
- struct ice_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
- cmd = &desc.params.read_write_gpio;
+ cmd = libie_aq_raw(&desc);
cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
cmd->gpio_num = pin_idx;
cmd->gpio_val = value ? 1 : 0;
@@ -5881,12 +6188,12 @@ int
ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
bool *value, struct ice_sq_cd *cd)
{
+ struct libie_aq_desc desc;
struct ice_aqc_gpio *cmd;
- struct ice_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
- cmd = &desc.params.read_write_gpio;
+ cmd = libie_aq_raw(&desc);
cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
cmd->gpio_num = pin_idx;
@@ -6023,6 +6330,44 @@ bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
}
/**
+ * ice_is_fw_health_report_supported - checks if firmware supports health events
+ * @hw: pointer to the hardware structure
+ *
+ * Return: true if firmware supports health status reports,
+ * false otherwise
+ */
+bool ice_is_fw_health_report_supported(struct ice_hw *hw)
+{
+ return ice_is_fw_api_min_ver(hw, ICE_FW_API_HEALTH_REPORT_MAJ,
+ ICE_FW_API_HEALTH_REPORT_MIN,
+ ICE_FW_API_HEALTH_REPORT_PATCH);
+}
+
+/**
+ * ice_aq_set_health_status_cfg - Configure FW health events
+ * @hw: pointer to the HW struct
+ * @event_source: type of diagnostic events to enable
+ *
+ * Configure the health status event types that the firmware will send to this
+ * PF. The supported event types are: PF-specific, all PFs, and global.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int ice_aq_set_health_status_cfg(struct ice_hw *hw, u8 event_source)
+{
+ struct ice_aqc_set_health_status_cfg *cmd;
+ struct libie_aq_desc desc;
+
+ cmd = libie_aq_raw(&desc);
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_health_status_cfg);
+
+ cmd->event_source = event_source;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
* ice_aq_set_lldp_mib - Set the LLDP MIB
* @hw: pointer to the HW struct
* @mib_type: Local, Remote or both Local and Remote MIBs
@@ -6037,16 +6382,16 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_set_local_mib *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.lldp_set_mib;
+ cmd = libie_aq_raw(&desc);
if (buf_size == 0 || !buf)
return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
- desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD);
desc.datalen = cpu_to_le16(buf_size);
cmd->type = mib_type;
@@ -6072,16 +6417,22 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
/**
* ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
* @hw: pointer to HW struct
- * @vsi_num: absolute HW index for VSI
+ * @vsi: VSI to add the filter to
* @add: boolean for if adding or removing a filter
+ *
+ * Return: 0 on success, -EOPNOTSUPP if the operation cannot be performed
+ * with this HW or VSI, otherwise an error corresponding to
+ * the AQ transaction result.
*/
-int
-ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
+int ice_lldp_fltr_add_remove(struct ice_hw *hw, struct ice_vsi *vsi, bool add)
{
struct ice_aqc_lldp_filter_ctrl *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.lldp_filter_ctrl;
+ if (vsi->type != ICE_VSI_PF || !ice_fw_supports_lldp_fltr_ctrl(hw))
+ return -EOPNOTSUPP;
+
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
@@ -6090,7 +6441,7 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
else
cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
- cmd->vsi_num = cpu_to_le16(vsi_num);
+ cmd->vsi_num = cpu_to_le16(vsi->vsi_num);
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
}
@@ -6101,7 +6452,7 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
*/
int ice_lldp_execute_pending_mib(struct ice_hw *hw)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib);
@@ -6157,3 +6508,86 @@ u32 ice_get_link_speed(u16 index)
return ice_aq_to_link_speed[index];
}
+
+/**
+ * ice_get_dest_cgu - get destination CGU dev for given HW
+ * @hw: pointer to the HW struct
+ *
+ * Get CGU client id for CGU register read/write operations.
+ *
+ * Return: CGU device id to use in SBQ transactions.
+ */
+static enum ice_sbq_dev_id ice_get_dest_cgu(struct ice_hw *hw)
+{
+ /* On dual complex E825 only complex 0 has functional CGU powering all
+ * the PHYs.
+ * SBQ destination device cgu points to CGU on a current complex and to
+ * access primary CGU from the secondary complex, the driver should use
+ * cgu_peer as a destination device.
+ */
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825 && ice_is_dual(hw) &&
+ !ice_is_primary(hw))
+ return ice_sbq_dev_cgu_peer;
+ return ice_sbq_dev_cgu;
+}
+
+/**
+ * ice_read_cgu_reg - Read a CGU register
+ * @hw: Pointer to the HW struct
+ * @addr: Register address to read
+ * @val: Storage for register value read
+ *
+ * Read the contents of a register of the Clock Generation Unit. Only
+ * applicable to E82X devices.
+ *
+ * Return: 0 on success, other error codes when failed to read from CGU.
+ */
+int ice_read_cgu_reg(struct ice_hw *hw, u32 addr, u32 *val)
+{
+ struct ice_sbq_msg_input cgu_msg = {
+ .dest_dev = ice_get_dest_cgu(hw),
+ .opcode = ice_sbq_msg_rd,
+ .msg_addr_low = addr
+ };
+ int err;
+
+ err = ice_sbq_rw_reg(hw, &cgu_msg, LIBIE_AQ_FLAG_RD);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
+ addr, err);
+ return err;
+ }
+
+ *val = cgu_msg.data;
+
+ return 0;
+}
+
+/**
+ * ice_write_cgu_reg - Write a CGU register
+ * @hw: Pointer to the HW struct
+ * @addr: Register address to write
+ * @val: Value to write into the register
+ *
+ * Write the specified value to a register of the Clock Generation Unit. Only
+ * applicable to E82X devices.
+ *
+ * Return: 0 on success, other error codes when failed to write to CGU.
+ */
+int ice_write_cgu_reg(struct ice_hw *hw, u32 addr, u32 val)
+{
+ struct ice_sbq_msg_input cgu_msg = {
+ .dest_dev = ice_get_dest_cgu(hw),
+ .opcode = ice_sbq_msg_wr,
+ .msg_addr_low = addr,
+ .data = val
+ };
+ int err;
+
+ err = ice_sbq_rw_reg(hw, &cgu_msg, LIBIE_AQ_FLAG_RD);
+ if (err)
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
+ addr, err);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 27208a60cece..e700ac0dc347 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -39,6 +39,47 @@
#define FEC_RECEIVER_ID_PCS0 (0x33 << FEC_RECV_ID_SHIFT)
#define FEC_RECEIVER_ID_PCS1 (0x34 << FEC_RECV_ID_SHIFT)
+#define ICE_CGU_R9 0x24
+#define ICE_CGU_R9_TIME_REF_FREQ_SEL GENMASK(2, 0)
+#define ICE_CGU_R9_CLK_EREF0_EN BIT(4)
+#define ICE_CGU_R9_TIME_REF_EN BIT(5)
+#define ICE_CGU_R9_TIME_SYNC_EN BIT(6)
+#define ICE_CGU_R9_ONE_PPS_OUT_EN BIT(7)
+#define ICE_CGU_R9_ONE_PPS_OUT_AMP GENMASK(19, 18)
+
+#define ICE_CGU_R16 0x40
+#define ICE_CGU_R16_TSPLL_CK_REFCLKFREQ GENMASK(31, 24)
+
+#define ICE_CGU_R19 0x4C
+#define ICE_CGU_R19_TSPLL_FBDIV_INTGR_E82X GENMASK(7, 0)
+#define ICE_CGU_R19_TSPLL_FBDIV_INTGR_E825 GENMASK(9, 0)
+#define ICE_CGU_R19_TSPLL_NDIVRATIO GENMASK(19, 16)
+
+#define ICE_CGU_R22 0x58
+#define ICE_CGU_R22_TIME1588CLK_DIV GENMASK(23, 20)
+#define ICE_CGU_R22_TIME1588CLK_DIV2 BIT(30)
+
+#define ICE_CGU_R23 0x5C
+#define ICE_CGU_R24 0x60
+#define ICE_CGU_R24_FBDIV_FRAC GENMASK(21, 0)
+#define ICE_CGU_R23_R24_TSPLL_ENABLE BIT(24)
+#define ICE_CGU_R23_R24_REF1588_CK_DIV GENMASK(30, 27)
+#define ICE_CGU_R23_R24_TIME_REF_SEL BIT(31)
+
+#define ICE_CGU_BW_TDC 0x31C
+#define ICE_CGU_BW_TDC_PLLLOCK_SEL GENMASK(30, 29)
+
+#define ICE_CGU_RO_LOCK 0x3F0
+#define ICE_CGU_RO_LOCK_TRUE_LOCK BIT(12)
+#define ICE_CGU_RO_LOCK_UNLOCK BIT(13)
+
+#define ICE_CGU_CNTR_BIST 0x344
+#define ICE_CGU_CNTR_BIST_PLLLOCK_SEL_0 BIT(15)
+#define ICE_CGU_CNTR_BIST_PLLLOCK_SEL_1 BIT(16)
+
+#define ICE_CGU_RO_BWM_LF 0x370
+#define ICE_CGU_RO_BWM_LF_TRUE_LOCK BIT(12)
+
int ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
int ice_check_reset(struct ice_hw *hw);
@@ -68,7 +109,7 @@ bool ice_is_sbq_supported(struct ice_hw *hw);
struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw);
int
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
- struct ice_aq_desc *desc, void *buf, u16 buf_size,
+ struct libie_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
void ice_clear_pxe_mode(struct ice_hw *hw);
int ice_get_caps(struct ice_hw *hw);
@@ -77,6 +118,12 @@ void ice_set_safe_mode_caps(struct ice_hw *hw);
int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
+int ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
+ u32 rxq_index);
+int ice_read_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx,
+ u32 txq_index);
+int ice_write_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx,
+ u32 txq_index);
int
ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params);
@@ -91,15 +138,14 @@ ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
-void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
-extern const struct ice_ctx_ele ice_tlan_ctx_info[];
-int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info);
+void ice_fill_dflt_direct_cmd_desc(struct libie_aq_desc *desc, u16 opcode);
+
+void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf);
extern struct mutex ice_global_cfg_lock_sw;
int
-ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,
+ice_aq_send_cmd(struct ice_hw *hw, struct libie_aq_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd);
int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
@@ -113,7 +159,6 @@ int
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
-bool ice_is_pf_c827(struct ice_hw *hw);
bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw);
bool ice_is_clock_mux_in_netlist(struct ice_hw *hw);
bool ice_is_cgu_in_netlist(struct ice_hw *hw);
@@ -133,7 +178,6 @@ int
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
bool ice_is_generic_mac(struct ice_hw *hw);
-bool ice_is_e810(struct ice_hw *hw);
int ice_clear_pf_cfg(struct ice_hw *hw);
int
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
@@ -143,6 +187,8 @@ int
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi);
bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps);
+bool ice_is_fw_health_report_supported(struct ice_hw *hw);
+int ice_aq_set_health_status_cfg(struct ice_hw *hw, u8 event_source);
int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code,
u8 serdes_num, int *output);
int
@@ -193,6 +239,7 @@ ice_aq_get_port_options(struct ice_hw *hw,
int
ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
u8 new_option);
+int ice_get_phy_lane_number(struct ice_hw *hw);
int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
@@ -223,12 +270,21 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
int
ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
u16 buf_size, u16 num_qs, u8 oldport, u8 newport,
- struct ice_sq_cd *cd);
+ u8 mode, struct ice_sq_cd *cd);
int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
+int
+ice_aq_set_txtimeq(struct ice_hw *hw, u16 txtimeq, u8 q_count,
+ struct ice_aqc_set_txtime_qgrp *txtime_qg,
+ u16 buf_size, struct ice_sq_cd *cd);
+void ice_pack_txtime_ctx(const struct ice_txtime_ctx *ctx,
+ ice_txtime_ctx_buf_t *buf);
int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flag);
+int ice_aq_get_cgu_input_pin_measure(struct ice_hw *hw, u8 dpll_idx,
+ struct ice_cgu_input_measure *meas,
+ u16 meas_num);
int
ice_aq_get_cgu_abilities(struct ice_hw *hw,
struct ice_aqc_get_cgu_abilities *abilities);
@@ -275,10 +331,6 @@ ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
-bool ice_is_e810t(struct ice_hw *hw);
-bool ice_is_e822(struct ice_hw *hw);
-bool ice_is_e823(struct ice_hw *hw);
-bool ice_is_e825c(struct ice_hw *hw);
int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
@@ -294,8 +346,7 @@ int
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
-int
-ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
+int ice_lldp_fltr_add_remove(struct ice_hw *hw, struct ice_vsi *vsi, bool add);
int ice_lldp_execute_pending_mib(struct ice_hw *hw);
int
ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
@@ -305,5 +356,9 @@ int
ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, const u8 *data,
struct ice_sq_cd *cd);
+int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle);
+int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data);
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
+int ice_read_cgu_reg(struct ice_hw *hw, u32 addr, u32 *val);
+int ice_write_cgu_reg(struct ice_hw *hw, u32 addr, u32 val);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index e3959ad442a2..dcb837cadd18 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -90,7 +90,7 @@ bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
static int
ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
+ size_t size = cq->num_sq_entries * sizeof(struct libie_aq_desc);
cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
&cq->sq.desc_buf.pa,
@@ -110,7 +110,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
static int
ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
+ size_t size = cq->num_rq_entries * sizeof(struct libie_aq_desc);
cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
&cq->rq.desc_buf.pa,
@@ -159,7 +159,7 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
/* allocate the mapped buffers */
for (i = 0; i < cq->num_rq_entries; i++) {
- struct ice_aq_desc *desc;
+ struct libie_aq_desc *desc;
struct ice_dma_mem *bi;
bi = &cq->rq.r.rq_bi[i];
@@ -173,9 +173,9 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
/* now configure the descriptors for use */
desc = ICE_CTL_Q_DESC(cq->rq, i);
- desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
- if (cq->rq_buf_size > ICE_AQ_LG_BUF)
- desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_BUF);
+ if (cq->rq_buf_size > LIBIE_AQ_LG_BUF)
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
desc->opcode = 0;
/* This is in accordance with control queue design, there is no
* register for buffer size configuration
@@ -858,7 +858,7 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
struct ice_ctl_q_ring *sq = &cq->sq;
u16 ntc = sq->next_to_clean;
- struct ice_aq_desc *desc;
+ struct libie_aq_desc *desc;
desc = ICE_CTL_Q_DESC(*sq, ntc);
@@ -912,7 +912,7 @@ static const char *ice_ctl_q_str(enum ice_ctl_q qtype)
static void ice_debug_cq(struct ice_hw *hw, struct ice_ctl_q_info *cq,
void *desc, void *buf, u16 buf_len, bool response)
{
- struct ice_aq_desc *cq_desc = desc;
+ struct libie_aq_desc *cq_desc = desc;
u16 datalen, flags;
if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) &&
@@ -939,7 +939,8 @@ static void ice_debug_cq(struct ice_hw *hw, struct ice_ctl_q_info *cq,
* by the DD and/or CMP flag set or a command with the RD flag set.
*/
if (buf && cq_desc->datalen &&
- (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP | ICE_AQ_FLAG_RD))) {
+ (flags & (LIBIE_AQ_FLAG_DD | LIBIE_AQ_FLAG_CMP |
+ LIBIE_AQ_FLAG_RD))) {
char prefix[] = KBUILD_MODNAME " 0x12341234 0x12341234 ";
sprintf(prefix, KBUILD_MODNAME " 0x%08X 0x%08X ",
@@ -992,11 +993,11 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
*/
int
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
- struct ice_aq_desc *desc, void *buf, u16 buf_size,
+ struct libie_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_dma_mem *dma_buf = NULL;
- struct ice_aq_desc *desc_on_ring;
+ struct libie_aq_desc *desc_on_ring;
bool cmd_completed = false;
int status = 0;
u16 retval = 0;
@@ -1007,7 +1008,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
return -EBUSY;
mutex_lock(&cq->sq_lock);
- cq->sq_last_status = ICE_AQ_RC_OK;
+ cq->sq_last_status = LIBIE_AQ_RC_OK;
if (!cq->sq.count) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
@@ -1028,9 +1029,9 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
goto sq_send_command_error;
}
- desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
- if (buf_size > ICE_AQ_LG_BUF)
- desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_BUF);
+ if (buf_size > LIBIE_AQ_LG_BUF)
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
}
val = rd32(hw, cq->sq.head);
@@ -1112,9 +1113,9 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
retval &= 0xff;
}
cmd_completed = true;
- if (!status && retval != ICE_AQ_RC_OK)
+ if (!status && retval != LIBIE_AQ_RC_OK)
status = -EIO;
- cq->sq_last_status = (enum ice_aq_err)retval;
+ cq->sq_last_status = (enum libie_aq_err)retval;
}
ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
@@ -1149,12 +1150,12 @@ sq_send_command_error:
*
* Fill the desc with default values
*/
-void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
+void ice_fill_dflt_direct_cmd_desc(struct libie_aq_desc *desc, u16 opcode)
{
/* zero out the desc */
memset(desc, 0, sizeof(*desc));
desc->opcode = cpu_to_le16(opcode);
- desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_SI);
}
/**
@@ -1172,9 +1173,9 @@ int
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending)
{
+ enum libie_aq_err rq_last_status;
u16 ntc = cq->rq.next_to_clean;
- enum ice_aq_err rq_last_status;
- struct ice_aq_desc *desc;
+ struct libie_aq_desc *desc;
struct ice_dma_mem *bi;
int ret_code = 0;
u16 desc_idx;
@@ -1207,9 +1208,9 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
desc = ICE_CTL_Q_DESC(cq->rq, ntc);
desc_idx = ntc;
- rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
+ rq_last_status = (enum libie_aq_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
- if (flags & ICE_AQ_FLAG_ERR) {
+ if (flags & LIBIE_AQ_FLAG_ERR) {
ret_code = -EIO;
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
le16_to_cpu(desc->opcode), rq_last_status);
@@ -1230,9 +1231,9 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
bi = &cq->rq.r.rq_bi[ntc];
memset(desc, 0, sizeof(*desc));
- desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
- if (cq->rq_buf_size > ICE_AQ_LG_BUF)
- desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_BUF);
+ if (cq->rq_buf_size > LIBIE_AQ_LG_BUF)
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
desc->datalen = cpu_to_le16(bi->size);
desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index ca97b7365a1b..788040dd662e 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -12,7 +12,7 @@
#define ICE_SBQ_MAX_BUF_LEN 512
#define ICE_CTL_Q_DESC(R, i) \
- (&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
+ (&(((struct libie_aq_desc *)((R).desc_buf.va))[i]))
#define ICE_CTL_Q_DESC_UNUSED(R) \
((u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
@@ -76,12 +76,12 @@ struct ice_ctl_q_ring {
/* sq transaction details */
struct ice_sq_cd {
- struct ice_aq_desc *wb_desc;
+ struct libie_aq_desc *wb_desc;
};
/* rq event information */
struct ice_rq_event_info {
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 msg_len;
u16 buf_len;
u8 *msg_buf;
@@ -96,7 +96,7 @@ struct ice_ctl_q_info {
u16 num_sq_entries; /* send queue depth */
u16 rq_buf_size; /* receive queue buffer size */
u16 sq_buf_size; /* send queue buffer size */
- enum ice_aq_err sq_last_status; /* last status on send queue */
+ enum libie_aq_err sq_last_status; /* last status on send queue */
struct mutex sq_lock; /* Send queue lock */
struct mutex rq_lock; /* Receive queue lock */
};
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 74418c445cc4..abea84f14658 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -24,10 +24,10 @@ ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_get_mib *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.lldp_get_mib;
+ cmd = libie_aq_raw(&desc);
if (buf_size == 0 || !buf)
return -EINVAL;
@@ -64,9 +64,9 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_set_mib_change *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.lldp_set_event;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_mib_change);
@@ -95,9 +95,9 @@ ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_stop *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.lldp_stop;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_stop);
@@ -121,9 +121,9 @@ ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
int ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_start *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.lldp_start;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_start);
@@ -677,11 +677,11 @@ ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
bool *dcbx_agent_status, struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_stop_start_specific_agent *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 opcode;
int status;
- cmd = &desc.params.lldp_agent_ctrl;
+ cmd = libie_aq_raw(&desc);
opcode = ice_aqc_opc_lldp_stop_start_specific_agent;
@@ -714,7 +714,7 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
struct ice_aqc_get_cee_dcb_cfg_resp *buff,
struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cee_dcb_cfg);
@@ -733,13 +733,13 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd)
{
struct ice_aqc_set_query_pfc_mode *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
if (pfc_mode > ICE_AQC_PFC_DSCP_BASED_PFC)
return -EINVAL;
- cmd = &desc.params.set_query_pfc_mode;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_pfc_mode);
@@ -914,7 +914,7 @@ static int ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg);
/* Don't treat ENOENT as an error for Remote MIBs */
- if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
+ if (pi->hw->adminq.sq_last_status == LIBIE_AQ_RC_ENOENT)
ret = 0;
out:
@@ -941,7 +941,7 @@ int ice_get_dcb_cfg(struct ice_port_info *pi)
/* CEE mode */
ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE);
ice_cee_to_dcb_cfg(&cee_cfg, pi);
- } else if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) {
+ } else if (pi->hw->adminq.sq_last_status == LIBIE_AQ_RC_ENOENT) {
/* CEE mode not enabled try querying IEEE data */
dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE;
@@ -965,7 +965,7 @@ void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi,
struct ice_aqc_lldp_get_mib *mib;
u8 change_type, dcbx_mode;
- mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
+ mib = libie_aq_raw(&event->desc);
change_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type);
if (change_type == ICE_AQ_LLDP_MIB_REMOTE)
@@ -1288,7 +1288,7 @@ ice_add_dscp_up_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
tlv->ouisubtype = htonl(ouisubtype);
/* bytes 0 - 63 - IPv4 DSCP2UP LUT */
- for (i = 0; i < ICE_DSCP_NUM_VAL; i++) {
+ for (i = 0; i < DSCP_MAX; i++) {
/* IPv4 mapping */
buf[i] = dcbcfg->dscp_map[i];
/* IPv6 mapping */
@@ -1537,12 +1537,12 @@ ice_aq_query_port_ets(struct ice_port_info *pi,
struct ice_sq_cd *cd)
{
struct ice_aqc_query_port_ets *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
if (!pi)
return -EINVAL;
- cmd = &desc.params.port_ets;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_port_ets);
cmd->port_teid = pi->root->info.node_teid;
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index a7c510832824..9fc8681cc58e 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -352,8 +352,8 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
struct ice_aqc_port_ets_elem buf = { 0 };
struct ice_dcbx_cfg *old_cfg, *curr_cfg;
struct device *dev = ice_pf_to_dev(pf);
+ struct iidc_rdma_event *event;
int ret = ICE_DCB_NO_HW_CHG;
- struct iidc_event *event;
struct ice_vsi *pf_vsi;
curr_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
@@ -405,7 +405,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
goto free_cfg;
}
- set_bit(IIDC_EVENT_BEFORE_TC_CHANGE, event->type);
+ set_bit(IIDC_RDMA_EVENT_BEFORE_TC_CHANGE, event->type);
ice_send_event_to_aux(pf, event);
kfree(event);
@@ -740,7 +740,9 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked)
{
struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
- struct iidc_event *event;
+ struct iidc_rdma_priv_dev_info *privd;
+ struct iidc_rdma_core_dev_info *cdev;
+ struct iidc_rdma_event *event;
u8 tc_map = 0;
int v, ret;
@@ -783,13 +785,17 @@ void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked)
if (vsi->type == ICE_VSI_PF)
ice_dcbnl_set_all(vsi);
}
- if (!locked) {
+
+ cdev = pf->cdev_info;
+ if (cdev && !locked) {
+ privd = cdev->iidc_priv;
+ ice_setup_dcb_qos_info(pf, &privd->qos_info);
/* Notify the AUX drivers that TC change is finished */
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event)
return;
- set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type);
+ set_bit(IIDC_RDMA_EVENT_AFTER_TC_CHANGE, event->type);
ice_send_event_to_aux(pf, event);
kfree(event);
}
@@ -846,7 +852,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
goto dcb_init_err;
}
- ice_cfg_sw_lldp(pf_vsi, false, true);
+ ice_cfg_sw_rx_lldp(pf, true);
pf->dcbx_cap = ice_dcb_get_mode(port_info, true);
return 0;
@@ -945,6 +951,37 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
}
/**
+ * ice_setup_dcb_qos_info - Setup DCB QoS information
+ * @pf: ptr to ice_pf
+ * @qos_info: QoS param instance
+ */
+void ice_setup_dcb_qos_info(struct ice_pf *pf, struct iidc_rdma_qos_params *qos_info)
+{
+ struct ice_dcbx_cfg *dcbx_cfg;
+ unsigned int i;
+ u32 up2tc;
+
+ if (!pf || !qos_info)
+ return;
+
+ dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
+
+ qos_info->num_tc = ice_dcb_get_num_tc(dcbx_cfg);
+
+ for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
+ qos_info->up2tc[i] = (up2tc >> (i * 3)) & 0x7;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ qos_info->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i];
+
+ qos_info->pfc_mode = dcbx_cfg->pfc_mode;
+ if (qos_info->pfc_mode == IIDC_DSCP_PFC_MODE)
+ for (i = 0; i < DSCP_MAX; i++)
+ qos_info->dscp_map[i] = dcbx_cfg->dscp_map[i];
+}
+
+/**
* ice_dcb_is_mib_change_pending - Check if MIB change is pending
* @state: MIB change state
*/
@@ -983,7 +1020,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
}
pi = pf->hw.port_info;
- mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
+ mib = libie_aq_raw(&event->desc);
/* Ignore if event is not for Nearest Bridge */
mib_type = FIELD_GET(ICE_AQ_LLDP_BRID_TYPE_M, mib->type);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 800879a88c5e..da9ba814b4e8 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -31,6 +31,9 @@ void
ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
struct ice_tx_buf *first);
void
+ice_setup_dcb_qos_info(struct ice_pf *pf,
+ struct iidc_rdma_qos_params *qos_info);
+void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event);
/**
@@ -134,5 +137,11 @@ static inline void ice_update_dcb_stats(struct ice_pf *pf) { }
static inline void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event) { }
static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, u8 dcb_tc) { }
+static inline void
+ice_setup_dcb_qos_info(struct ice_pf *pf, struct iidc_rdma_qos_params *qos_info)
+{
+ qos_info->num_tc = 1;
+ qos_info->tc_info[0].rel_bw = 100;
+}
#endif /* CONFIG_DCB */
#endif /* _ICE_DCB_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index 6d50b90a7359..a10c1c8d8697 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -754,7 +754,7 @@ static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app)
if (!ice_is_feature_supported(pf, ICE_F_DSCP))
return -EOPNOTSUPP;
- if (app->protocol >= ICE_DSCP_NUM_VAL) {
+ if (app->protocol >= DSCP_MAX) {
netdev_err(netdev, "DSCP value 0x%04X out of range\n",
app->protocol);
return -EINVAL;
@@ -931,7 +931,7 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
/* if the last DSCP mapping just got deleted, need to switch
* to L2 VLAN QoS mode
*/
- if (bitmap_empty(new_cfg->dscp_mapped, ICE_DSCP_NUM_VAL) &&
+ if (bitmap_empty(new_cfg->dscp_mapped, DSCP_MAX) &&
new_cfg->pfc_mode == ICE_QOS_MODE_DSCP) {
ret = ice_aq_set_pfc_mode(&pf->hw,
ICE_AQC_PFC_VLAN_BASED_PFC,
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index 272fd823a825..3b2d9c436979 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -1101,16 +1101,16 @@ struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
return &bld->buf;
}
-static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err)
+static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum libie_aq_err aq_err)
{
switch (aq_err) {
- case ICE_AQ_RC_ENOSEC:
- case ICE_AQ_RC_EBADSIG:
+ case LIBIE_AQ_RC_ENOSEC:
+ case LIBIE_AQ_RC_EBADSIG:
return ICE_DDP_PKG_FILE_SIGNATURE_INVALID;
- case ICE_AQ_RC_ESVN:
+ case LIBIE_AQ_RC_ESVN:
return ICE_DDP_PKG_FILE_REVISION_TOO_LOW;
- case ICE_AQ_RC_EBADMAN:
- case ICE_AQ_RC_EBADBUF:
+ case LIBIE_AQ_RC_EBADMAN:
+ case LIBIE_AQ_RC_EBADBUF:
return ICE_DDP_PKG_LOAD_ERROR;
default:
return ICE_DDP_PKG_ERR;
@@ -1180,7 +1180,7 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u32 *error_info, struct ice_sq_cd *cd)
{
struct ice_aqc_download_pkg *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
if (error_offset)
@@ -1188,9 +1188,9 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
if (error_info)
*error_info = 0;
- cmd = &desc.params.download_pkg;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
if (last_buf)
cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
@@ -1211,6 +1211,132 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
}
/**
+ * ice_is_buffer_metadata - determine if package buffer is a metadata buffer
+ * @buf: pointer to buffer header
+ * Return: whether given @buf is a metadata one.
+ */
+static bool ice_is_buffer_metadata(struct ice_buf_hdr *buf)
+{
+ return le32_to_cpu(buf->section_entry[0].type) & ICE_METADATA_BUF;
+}
+
+/**
+ * struct ice_ddp_send_ctx - sending context of current DDP segment
+ * @hw: pointer to the hardware struct
+ *
+ * Keeps current sending state (header, error) for the purpose of proper "last"
+ * bit setting in ice_aq_download_pkg(). Use via calls to ice_ddp_send_hunk().
+ */
+struct ice_ddp_send_ctx {
+ struct ice_hw *hw;
+/* private: only for ice_ddp_send_hunk() */
+ struct ice_buf_hdr *hdr;
+ int err;
+};
+
+static void ice_ddp_send_ctx_set_err(struct ice_ddp_send_ctx *ctx, int err)
+{
+ ctx->err = err;
+}
+
+/**
+ * ice_ddp_send_hunk - send one hunk of data to FW
+ * @ctx: current segment sending context
+ * @hunk: next hunk to send, size is always ICE_PKG_BUF_SIZE
+ *
+ * Send the next hunk of data to FW, retrying if needed.
+ *
+ * Notice: must be called once more with a NULL @hunk to finish up; such call
+ * will set up the "last" bit of an AQ request. After such call @ctx.hdr is
+ * cleared, @hw is still valid.
+ *
+ * Return: %ICE_DDP_PKG_SUCCESS if there were no problems; a sticky @err
+ * otherwise.
+ */
+static enum ice_ddp_state ice_ddp_send_hunk(struct ice_ddp_send_ctx *ctx,
+ struct ice_buf_hdr *hunk)
+{
+ struct ice_buf_hdr *prev_hunk = ctx->hdr;
+ struct ice_hw *hw = ctx->hw;
+ bool prev_was_last = !hunk;
+ enum libie_aq_err aq_err;
+ u32 offset, info;
+ int attempt, err;
+
+ if (ctx->err)
+ return ctx->err;
+
+ ctx->hdr = hunk;
+ if (!prev_hunk)
+ return ICE_DDP_PKG_SUCCESS; /* no problem so far */
+
+ for (attempt = 0; attempt < 5; attempt++) {
+ if (attempt)
+ msleep(20);
+
+ err = ice_aq_download_pkg(hw, prev_hunk, ICE_PKG_BUF_SIZE,
+ prev_was_last, &offset, &info, NULL);
+
+ aq_err = hw->adminq.sq_last_status;
+ if (aq_err != LIBIE_AQ_RC_ENOSEC &&
+ aq_err != LIBIE_AQ_RC_EBADSIG)
+ break;
+ }
+
+ if (err) {
+ ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
+ err, offset, info);
+ ctx->err = ice_map_aq_err_to_ddp_state(aq_err);
+ } else if (attempt) {
+ dev_dbg(ice_hw_to_dev(hw),
+ "ice_aq_download_pkg number of retries: %d\n", attempt);
+ }
+
+ return ctx->err;
+}
+
+/**
+ * ice_dwnld_cfg_bufs_no_lock
+ * @ctx: context of the current buffers section to send
+ * @bufs: pointer to an array of buffers
+ * @start: buffer index of first buffer to download
+ * @count: the number of buffers to download
+ *
+ * Downloads package configuration buffers to the firmware. Metadata buffers
+ * are skipped, and the first metadata buffer found indicates that the rest
+ * of the buffers are all metadata buffers.
+ */
+static enum ice_ddp_state
+ice_dwnld_cfg_bufs_no_lock(struct ice_ddp_send_ctx *ctx, struct ice_buf *bufs,
+ u32 start, u32 count)
+{
+ struct ice_buf_hdr *bh;
+ enum ice_ddp_state err;
+
+ if (!bufs || !count) {
+ ice_ddp_send_ctx_set_err(ctx, ICE_DDP_PKG_ERR);
+ return ICE_DDP_PKG_ERR;
+ }
+
+ bufs += start;
+
+ for (int i = 0; i < count; i++, bufs++) {
+ bh = (struct ice_buf_hdr *)bufs;
+ /* Metadata buffers should not be sent to FW,
+ * their presence means "we are done here".
+ */
+ if (ice_is_buffer_metadata(bh))
+ break;
+
+ err = ice_ddp_send_hunk(ctx, bh);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
* ice_get_pkg_seg_by_idx
* @pkg_hdr: pointer to the package header to be searched
* @idx: index of segment
@@ -1270,136 +1396,20 @@ ice_is_signing_seg_type_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx,
}
/**
- * ice_is_buffer_metadata - determine if package buffer is a metadata buffer
- * @buf: pointer to buffer header
- */
-static bool ice_is_buffer_metadata(struct ice_buf_hdr *buf)
-{
- if (le32_to_cpu(buf->section_entry[0].type) & ICE_METADATA_BUF)
- return true;
-
- return false;
-}
-
-/**
- * ice_is_last_download_buffer
- * @buf: pointer to current buffer header
- * @idx: index of the buffer in the current sequence
- * @count: the buffer count in the current sequence
- *
- * Note: this routine should only be called if the buffer is not the last buffer
- */
-static bool
-ice_is_last_download_buffer(struct ice_buf_hdr *buf, u32 idx, u32 count)
-{
- struct ice_buf *next_buf;
-
- if ((idx + 1) == count)
- return true;
-
- /* A set metadata flag in the next buffer will signal that the current
- * buffer will be the last buffer downloaded
- */
- next_buf = ((struct ice_buf *)buf) + 1;
-
- return ice_is_buffer_metadata((struct ice_buf_hdr *)next_buf);
-}
-
-/**
- * ice_dwnld_cfg_bufs_no_lock
- * @hw: pointer to the hardware structure
- * @bufs: pointer to an array of buffers
- * @start: buffer index of first buffer to download
- * @count: the number of buffers to download
- * @indicate_last: if true, then set last buffer flag on last buffer download
- *
- * Downloads package configuration buffers to the firmware. Metadata buffers
- * are skipped, and the first metadata buffer found indicates that the rest
- * of the buffers are all metadata buffers.
- */
-static enum ice_ddp_state
-ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
- u32 count, bool indicate_last)
-{
- enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
- struct ice_buf_hdr *bh;
- enum ice_aq_err err;
- u32 offset, info, i;
-
- if (!bufs || !count)
- return ICE_DDP_PKG_ERR;
-
- /* If the first buffer's first section has its metadata bit set
- * then there are no buffers to be downloaded, and the operation is
- * considered a success.
- */
- bh = (struct ice_buf_hdr *)(bufs + start);
- if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
- return ICE_DDP_PKG_SUCCESS;
-
- for (i = 0; i < count; i++) {
- bool last = false;
- int try_cnt = 0;
- int status;
-
- bh = (struct ice_buf_hdr *)(bufs + start + i);
-
- if (indicate_last)
- last = ice_is_last_download_buffer(bh, i, count);
-
- while (1) {
- status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE,
- last, &offset, &info,
- NULL);
- if (hw->adminq.sq_last_status != ICE_AQ_RC_ENOSEC &&
- hw->adminq.sq_last_status != ICE_AQ_RC_EBADSIG)
- break;
-
- try_cnt++;
-
- if (try_cnt == 5)
- break;
-
- msleep(20);
- }
-
- if (try_cnt)
- dev_dbg(ice_hw_to_dev(hw),
- "ice_aq_download_pkg number of retries: %d\n",
- try_cnt);
-
- /* Save AQ status from download package */
- if (status) {
- ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
- status, offset, info);
- err = hw->adminq.sq_last_status;
- state = ice_map_aq_err_to_ddp_state(err);
- break;
- }
-
- if (last)
- break;
- }
-
- return state;
-}
-
-/**
* ice_download_pkg_sig_seg - download a signature segment
- * @hw: pointer to the hardware structure
+ * @ctx: context of the current buffers section to send
* @seg: pointer to signature segment
*/
static enum ice_ddp_state
-ice_download_pkg_sig_seg(struct ice_hw *hw, struct ice_sign_seg *seg)
+ice_download_pkg_sig_seg(struct ice_ddp_send_ctx *ctx, struct ice_sign_seg *seg)
{
- return ice_dwnld_cfg_bufs_no_lock(hw, seg->buf_tbl.buf_array, 0,
- le32_to_cpu(seg->buf_tbl.buf_count),
- false);
+ return ice_dwnld_cfg_bufs_no_lock(ctx, seg->buf_tbl.buf_array, 0,
+ le32_to_cpu(seg->buf_tbl.buf_count));
}
/**
* ice_download_pkg_config_seg - download a config segment
- * @hw: pointer to the hardware structure
+ * @ctx: context of the current buffers section to send
* @pkg_hdr: pointer to package header
* @idx: segment index
* @start: starting buffer
@@ -1408,8 +1418,9 @@ ice_download_pkg_sig_seg(struct ice_hw *hw, struct ice_sign_seg *seg)
* Note: idx must reference a ICE segment
*/
static enum ice_ddp_state
-ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
- u32 idx, u32 start, u32 count)
+ice_download_pkg_config_seg(struct ice_ddp_send_ctx *ctx,
+ struct ice_pkg_hdr *pkg_hdr, u32 idx, u32 start,
+ u32 count)
{
struct ice_buf_table *bufs;
struct ice_seg *seg;
@@ -1425,46 +1436,56 @@ ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
if (start >= buf_count || start + count > buf_count)
return ICE_DDP_PKG_ERR;
- return ice_dwnld_cfg_bufs_no_lock(hw, bufs->buf_array, start, count,
- true);
+ return ice_dwnld_cfg_bufs_no_lock(ctx, bufs->buf_array, start, count);
+}
+
+static bool ice_is_last_sign_seg(u32 flags)
+{
+ return !(flags & ICE_SIGN_SEG_FLAGS_VALID) || /* behavior prior to valid */
+ (flags & ICE_SIGN_SEG_FLAGS_LAST);
}
/**
* ice_dwnld_sign_and_cfg_segs - download a signing segment and config segment
- * @hw: pointer to the hardware structure
+ * @ctx: context of the current buffers section to send
* @pkg_hdr: pointer to package header
* @idx: segment index (must be a signature segment)
*
* Note: idx must reference a signature segment
*/
static enum ice_ddp_state
-ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
- u32 idx)
+ice_dwnld_sign_and_cfg_segs(struct ice_ddp_send_ctx *ctx,
+ struct ice_pkg_hdr *pkg_hdr, u32 idx)
{
+ u32 conf_idx, start, count, flags;
enum ice_ddp_state state;
struct ice_sign_seg *seg;
- u32 conf_idx;
- u32 start;
- u32 count;
seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx);
if (!seg) {
state = ICE_DDP_PKG_ERR;
- goto exit;
+ ice_ddp_send_ctx_set_err(ctx, state);
+ return state;
}
count = le32_to_cpu(seg->signed_buf_count);
- state = ice_download_pkg_sig_seg(hw, seg);
+ state = ice_download_pkg_sig_seg(ctx, seg);
if (state || !count)
- goto exit;
+ return state;
conf_idx = le32_to_cpu(seg->signed_seg_idx);
start = le32_to_cpu(seg->signed_buf_start);
- state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start,
+ state = ice_download_pkg_config_seg(ctx, pkg_hdr, conf_idx, start,
count);
-exit:
+ /* finish up by sending last hunk with "last" flag set if requested by
+ * DDP content
+ */
+ flags = le32_to_cpu(seg->flags);
+ if (ice_is_last_sign_seg(flags))
+ state = ice_ddp_send_hunk(ctx, NULL);
+
return state;
}
@@ -1517,8 +1538,9 @@ ice_post_dwnld_pkg_actions(struct ice_hw *hw)
static enum ice_ddp_state
ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
{
- enum ice_aq_err aq_err = hw->adminq.sq_last_status;
+ enum libie_aq_err aq_err = hw->adminq.sq_last_status;
enum ice_ddp_state state = ICE_DDP_PKG_ERR;
+ struct ice_ddp_send_ctx ctx = { .hw = hw };
int status;
u32 i;
@@ -1539,7 +1561,7 @@ ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
hw->pkg_sign_type))
continue;
- state = ice_dwnld_sign_and_cfg_segs(hw, pkg_hdr, i);
+ state = ice_dwnld_sign_and_cfg_segs(&ctx, pkg_hdr, i);
if (state)
break;
}
@@ -1564,6 +1586,7 @@ ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
static enum ice_ddp_state
ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
{
+ struct ice_ddp_send_ctx ctx = { .hw = hw };
enum ice_ddp_state state;
struct ice_buf_hdr *bh;
int status;
@@ -1576,7 +1599,7 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
* considered a success.
*/
bh = (struct ice_buf_hdr *)bufs;
- if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
+ if (ice_is_buffer_metadata(bh))
return ICE_DDP_PKG_SUCCESS;
status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
@@ -1586,7 +1609,9 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status);
}
- state = ice_dwnld_cfg_bufs_no_lock(hw, bufs, 0, count, true);
+ ice_dwnld_cfg_bufs_no_lock(&ctx, bufs, 0, count);
+ /* finish up by sending last hunk with "last" flag set */
+ state = ice_ddp_send_hunk(&ctx, NULL);
if (!state)
state = ice_post_dwnld_pkg_actions(hw);
@@ -1663,7 +1688,7 @@ static int ice_aq_get_pkg_info_list(struct ice_hw *hw,
struct ice_aqc_get_pkg_info_resp *pkg_info,
u16 buf_size, struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
@@ -1687,7 +1712,7 @@ static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u32 *error_info, struct ice_sq_cd *cd)
{
struct ice_aqc_download_pkg *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
if (error_offset)
@@ -1695,9 +1720,9 @@ static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
if (error_info)
*error_info = 0;
- cmd = &desc.params.download_pkg;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
if (last_buf)
cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
@@ -1729,10 +1754,10 @@ static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u16 buf_size, struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
}
@@ -2277,6 +2302,8 @@ enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf,
return ICE_DDP_PKG_ERR;
buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
+ if (!buf_copy)
+ return ICE_DDP_PKG_ERR;
state = ice_init_pkg(hw, buf_copy, len);
if (!ice_is_init_pkg_successful(state)) {
@@ -2309,10 +2336,10 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
struct ice_sq_cd *cd, u8 *flags, bool set)
{
struct ice_aqc_get_set_tx_topo *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.get_set_tx_topo;
+ cmd = libie_aq_raw(&desc);
if (set) {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_tx_topo);
cmd->set_flags = ICE_AQC_TX_TOPO_FLAGS_ISSUED;
@@ -2321,22 +2348,22 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM |
ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW;
- if (ice_is_e825c(hw))
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
} else {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo);
cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM;
- }
- if (!ice_is_e825c(hw))
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ if (hw->mac_type == ICE_MAC_E810 ||
+ hw->mac_type == ICE_MAC_GENERIC)
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
+ }
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
if (status)
return status;
/* read the return flag values (first byte) for get operation */
if (!set && flags)
- *flags = desc.params.get_set_tx_topo.set_flags;
+ *flags = cmd->set_flags;
return 0;
}
@@ -2350,7 +2377,13 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
* The function will apply the new Tx topology from the package buffer
* if available.
*
- * Return: zero when update was successful, negative values otherwise.
+ * Return:
+ * * 0 - Successfully applied topology configuration.
+ * * -EBUSY - Failed to acquire global configuration lock.
+ * * -EEXIST - Topology configuration has already been applied.
+ * * -EIO - Unable to apply topology configuration.
+ * * -ENODEV - Failed to re-initialize device after applying configuration.
+ * * Other negative error codes indicate unexpected failures.
*/
int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len)
{
@@ -2383,7 +2416,7 @@ int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len)
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n");
- return status;
+ return -EIO;
}
/* Is default topology already applied ? */
@@ -2470,31 +2503,45 @@ update_topo:
ICE_GLOBAL_CFG_LOCK_TIMEOUT);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n");
- return status;
+ return -EBUSY;
}
/* Check if reset was triggered already. */
reg = rd32(hw, GLGEN_RSTAT);
if (reg & GLGEN_RSTAT_DEVSTATE_M) {
- /* Reset is in progress, re-init the HW again */
ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. Layer topology might be applied already\n");
ice_check_reset(hw);
- return 0;
+ /* Reset is in progress, re-init the HW again */
+ goto reinit_hw;
}
/* Set new topology */
status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true);
if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed setting Tx topology\n");
- return status;
+ ice_debug(hw, ICE_DBG_INIT, "Failed to set Tx topology, status %pe\n",
+ ERR_PTR(status));
+ /* only report -EIO here as the caller checks the error value
+ * and reports an informational error message informing that
+ * the driver failed to program Tx topology.
+ */
+ status = -EIO;
}
- /* New topology is updated, delay 1 second before issuing the CORER */
+ /* Even if Tx topology config failed, we need to CORE reset here to
+ * clear the global configuration lock. Delay 1 second to allow
+ * hardware to settle then issue a CORER
+ */
msleep(1000);
ice_reset(hw, ICE_RESET_CORER);
- /* CORER will clear the global lock, so no explicit call
- * required for release.
- */
+ ice_check_reset(hw);
+
+reinit_hw:
+ /* Since we triggered a CORER, re-initialize hardware */
+ ice_deinit_hw(hw);
+ if (ice_init_hw(hw)) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to re-init hardware after setting Tx topology\n");
+ return -ENODEV;
+ }
- return 0;
+ return status;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.h b/drivers/net/ethernet/intel/ice/ice_ddp.h
index 79551da2a4b0..8a2d57fc5dae 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.h
@@ -181,7 +181,10 @@ struct ice_sign_seg {
__le32 signed_seg_idx;
__le32 signed_buf_start;
__le32 signed_buf_count;
-#define ICE_SIGN_SEG_RESERVED_COUNT 44
+#define ICE_SIGN_SEG_FLAGS_VALID 0x80000000
+#define ICE_SIGN_SEG_FLAGS_LAST 0x00000001
+ __le32 flags;
+#define ICE_SIGN_SEG_RESERVED_COUNT 40
u8 reserved[ICE_SIGN_SEG_RESERVED_COUNT];
struct ice_buf_table buf_tbl;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c
index 9fc0fd95a13d..f450250fc827 100644
--- a/drivers/net/ethernet/intel/ice/ice_debugfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c
@@ -1,647 +1,20 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022, Intel Corporation. */
-#include <linux/fs.h>
#include <linux/debugfs.h>
-#include <linux/random.h>
-#include <linux/vmalloc.h>
#include "ice.h"
static struct dentry *ice_debugfs_root;
-/* create a define that has an extra module that doesn't really exist. this
- * is so we can add a module 'all' to easily enable/disable all the modules
- */
-#define ICE_NR_FW_LOG_MODULES (ICE_AQC_FW_LOG_ID_MAX + 1)
-
-/* the ordering in this array is important. it matches the ordering of the
- * values in the FW so the index is the same value as in ice_aqc_fw_logging_mod
- */
-static const char * const ice_fwlog_module_string[] = {
- "general",
- "ctrl",
- "link",
- "link_topo",
- "dnl",
- "i2c",
- "sdp",
- "mdio",
- "adminq",
- "hdma",
- "lldp",
- "dcbx",
- "dcb",
- "xlr",
- "nvm",
- "auth",
- "vpd",
- "iosf",
- "parser",
- "sw",
- "scheduler",
- "txq",
- "rsvd",
- "post",
- "watchdog",
- "task_dispatch",
- "mng",
- "synce",
- "health",
- "tsdrv",
- "pfreg",
- "mdlver",
- "all",
-};
-
-/* the ordering in this array is important. it matches the ordering of the
- * values in the FW so the index is the same value as in ice_fwlog_level
- */
-static const char * const ice_fwlog_level_string[] = {
- "none",
- "error",
- "warning",
- "normal",
- "verbose",
-};
-
-static const char * const ice_fwlog_log_size[] = {
- "128K",
- "256K",
- "512K",
- "1M",
- "2M",
-};
-
-/**
- * ice_fwlog_print_module_cfg - print current FW logging module configuration
- * @hw: pointer to the HW structure
- * @module: module to print
- * @s: the seq file to put data into
- */
-static void
-ice_fwlog_print_module_cfg(struct ice_hw *hw, int module, struct seq_file *s)
-{
- struct ice_fwlog_cfg *cfg = &hw->fwlog_cfg;
- struct ice_fwlog_module_entry *entry;
-
- if (module != ICE_AQC_FW_LOG_ID_MAX) {
- entry = &cfg->module_entries[module];
-
- seq_printf(s, "\tModule: %s, Log Level: %s\n",
- ice_fwlog_module_string[entry->module_id],
- ice_fwlog_level_string[entry->log_level]);
- } else {
- int i;
-
- for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
- entry = &cfg->module_entries[i];
-
- seq_printf(s, "\tModule: %s, Log Level: %s\n",
- ice_fwlog_module_string[entry->module_id],
- ice_fwlog_level_string[entry->log_level]);
- }
- }
-}
-
-static int ice_find_module_by_dentry(struct ice_pf *pf, struct dentry *d)
-{
- int i, module;
-
- module = -1;
- /* find the module based on the dentry */
- for (i = 0; i < ICE_NR_FW_LOG_MODULES; i++) {
- if (d == pf->ice_debugfs_pf_fwlog_modules[i]) {
- module = i;
- break;
- }
- }
-
- return module;
-}
-
-/**
- * ice_debugfs_module_show - read from 'module' file
- * @s: the opened file
- * @v: pointer to the offset
- */
-static int ice_debugfs_module_show(struct seq_file *s, void *v)
-{
- const struct file *filp = s->file;
- struct dentry *dentry;
- struct ice_pf *pf;
- int module;
-
- dentry = file_dentry(filp);
- pf = s->private;
-
- module = ice_find_module_by_dentry(pf, dentry);
- if (module < 0) {
- dev_info(ice_pf_to_dev(pf), "unknown module\n");
- return -EINVAL;
- }
-
- ice_fwlog_print_module_cfg(&pf->hw, module, s);
-
- return 0;
-}
-
-static int ice_debugfs_module_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, ice_debugfs_module_show, inode->i_private);
-}
-
-/**
- * ice_debugfs_module_write - write into 'module' file
- * @filp: the opened file
- * @buf: where to find the user's data
- * @count: the length of the user's data
- * @ppos: file position offset
- */
-static ssize_t
-ice_debugfs_module_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ice_pf *pf = file_inode(filp)->i_private;
- struct dentry *dentry = file_dentry(filp);
- struct device *dev = ice_pf_to_dev(pf);
- char user_val[16], *cmd_buf;
- int module, log_level, cnt;
-
- /* don't allow partial writes or invalid input */
- if (*ppos != 0 || count > 8)
- return -EINVAL;
-
- cmd_buf = memdup_user_nul(buf, count);
- if (IS_ERR(cmd_buf))
- return PTR_ERR(cmd_buf);
-
- module = ice_find_module_by_dentry(pf, dentry);
- if (module < 0) {
- dev_info(dev, "unknown module\n");
- return -EINVAL;
- }
-
- cnt = sscanf(cmd_buf, "%s", user_val);
- if (cnt != 1)
- return -EINVAL;
-
- log_level = sysfs_match_string(ice_fwlog_level_string, user_val);
- if (log_level < 0) {
- dev_info(dev, "unknown log level '%s'\n", user_val);
- return -EINVAL;
- }
-
- if (module != ICE_AQC_FW_LOG_ID_MAX) {
- ice_pf_fwlog_update_module(pf, log_level, module);
- } else {
- /* the module 'all' is a shortcut so that we can set
- * all of the modules to the same level quickly
- */
- int i;
-
- for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++)
- ice_pf_fwlog_update_module(pf, log_level, i);
- }
-
- return count;
-}
-
-static const struct file_operations ice_debugfs_module_fops = {
- .owner = THIS_MODULE,
- .open = ice_debugfs_module_open,
- .read = seq_read,
- .release = single_release,
- .write = ice_debugfs_module_write,
-};
-
-/**
- * ice_debugfs_nr_messages_read - read from 'nr_messages' file
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- */
-static ssize_t ice_debugfs_nr_messages_read(struct file *filp,
- char __user *buffer, size_t count,
- loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct ice_hw *hw = &pf->hw;
- char buff[32] = {};
-
- snprintf(buff, sizeof(buff), "%d\n",
- hw->fwlog_cfg.log_resolution);
-
- return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
-}
-
-/**
- * ice_debugfs_nr_messages_write - write into 'nr_messages' file
- * @filp: the opened file
- * @buf: where to find the user's data
- * @count: the length of the user's data
- * @ppos: file position offset
- */
-static ssize_t
-ice_debugfs_nr_messages_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- char user_val[8], *cmd_buf;
- s16 nr_messages;
- ssize_t ret;
-
- /* don't allow partial writes or invalid input */
- if (*ppos != 0 || count > 4)
- return -EINVAL;
-
- cmd_buf = memdup_user_nul(buf, count);
- if (IS_ERR(cmd_buf))
- return PTR_ERR(cmd_buf);
-
- ret = sscanf(cmd_buf, "%s", user_val);
- if (ret != 1)
- return -EINVAL;
-
- ret = kstrtos16(user_val, 0, &nr_messages);
- if (ret)
- return ret;
-
- if (nr_messages < ICE_AQC_FW_LOG_MIN_RESOLUTION ||
- nr_messages > ICE_AQC_FW_LOG_MAX_RESOLUTION) {
- dev_err(dev, "Invalid FW log number of messages %d, value must be between %d - %d\n",
- nr_messages, ICE_AQC_FW_LOG_MIN_RESOLUTION,
- ICE_AQC_FW_LOG_MAX_RESOLUTION);
- return -EINVAL;
- }
-
- hw->fwlog_cfg.log_resolution = nr_messages;
-
- return count;
-}
-
-static const struct file_operations ice_debugfs_nr_messages_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = ice_debugfs_nr_messages_read,
- .write = ice_debugfs_nr_messages_write,
-};
-
-/**
- * ice_debugfs_enable_read - read from 'enable' file
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- */
-static ssize_t ice_debugfs_enable_read(struct file *filp,
- char __user *buffer, size_t count,
- loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct ice_hw *hw = &pf->hw;
- char buff[32] = {};
-
- snprintf(buff, sizeof(buff), "%u\n",
- (u16)(hw->fwlog_cfg.options &
- ICE_FWLOG_OPTION_IS_REGISTERED) >> 3);
-
- return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
-}
-
-/**
- * ice_debugfs_enable_write - write into 'enable' file
- * @filp: the opened file
- * @buf: where to find the user's data
- * @count: the length of the user's data
- * @ppos: file position offset
- */
-static ssize_t
-ice_debugfs_enable_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct ice_hw *hw = &pf->hw;
- char user_val[8], *cmd_buf;
- bool enable;
- ssize_t ret;
-
- /* don't allow partial writes or invalid input */
- if (*ppos != 0 || count > 2)
- return -EINVAL;
-
- cmd_buf = memdup_user_nul(buf, count);
- if (IS_ERR(cmd_buf))
- return PTR_ERR(cmd_buf);
-
- ret = sscanf(cmd_buf, "%s", user_val);
- if (ret != 1)
- return -EINVAL;
-
- ret = kstrtobool(user_val, &enable);
- if (ret)
- goto enable_write_error;
-
- if (enable)
- hw->fwlog_cfg.options |= ICE_FWLOG_OPTION_ARQ_ENA;
- else
- hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_ARQ_ENA;
-
- ret = ice_fwlog_set(hw, &hw->fwlog_cfg);
- if (ret)
- goto enable_write_error;
-
- if (enable)
- ret = ice_fwlog_register(hw);
- else
- ret = ice_fwlog_unregister(hw);
-
- if (ret)
- goto enable_write_error;
-
- /* if we get here, nothing went wrong; return count since we didn't
- * really write anything
- */
- ret = (ssize_t)count;
-
-enable_write_error:
- /* This function always consumes all of the written input, or produces
- * an error. Check and enforce this. Otherwise, the write operation
- * won't complete properly.
- */
- if (WARN_ON(ret != (ssize_t)count && ret >= 0))
- ret = -EIO;
-
- return ret;
-}
-
-static const struct file_operations ice_debugfs_enable_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = ice_debugfs_enable_read,
- .write = ice_debugfs_enable_write,
-};
-
-/**
- * ice_debugfs_log_size_read - read from 'log_size' file
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- */
-static ssize_t ice_debugfs_log_size_read(struct file *filp,
- char __user *buffer, size_t count,
- loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct ice_hw *hw = &pf->hw;
- char buff[32] = {};
- int index;
-
- index = hw->fwlog_ring.index;
- snprintf(buff, sizeof(buff), "%s\n", ice_fwlog_log_size[index]);
-
- return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
-}
-
-/**
- * ice_debugfs_log_size_write - write into 'log_size' file
- * @filp: the opened file
- * @buf: where to find the user's data
- * @count: the length of the user's data
- * @ppos: file position offset
- */
-static ssize_t
-ice_debugfs_log_size_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- char user_val[8], *cmd_buf;
- ssize_t ret;
- int index;
-
- /* don't allow partial writes or invalid input */
- if (*ppos != 0 || count > 5)
- return -EINVAL;
-
- cmd_buf = memdup_user_nul(buf, count);
- if (IS_ERR(cmd_buf))
- return PTR_ERR(cmd_buf);
-
- ret = sscanf(cmd_buf, "%s", user_val);
- if (ret != 1)
- return -EINVAL;
-
- index = sysfs_match_string(ice_fwlog_log_size, user_val);
- if (index < 0) {
- dev_info(dev, "Invalid log size '%s'. The value must be one of 128K, 256K, 512K, 1M, 2M\n",
- user_val);
- ret = -EINVAL;
- goto log_size_write_error;
- } else if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED) {
- dev_info(dev, "FW logging is currently running. Please disable FW logging to change log_size\n");
- ret = -EINVAL;
- goto log_size_write_error;
- }
-
- /* free all the buffers and the tracking info and resize */
- ice_fwlog_realloc_rings(hw, index);
-
- /* if we get here, nothing went wrong; return count since we didn't
- * really write anything
- */
- ret = (ssize_t)count;
-
-log_size_write_error:
- /* This function always consumes all of the written input, or produces
- * an error. Check and enforce this. Otherwise, the write operation
- * won't complete properly.
- */
- if (WARN_ON(ret != (ssize_t)count && ret >= 0))
- ret = -EIO;
-
- return ret;
-}
-
-static const struct file_operations ice_debugfs_log_size_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = ice_debugfs_log_size_read,
- .write = ice_debugfs_log_size_write,
-};
-
-/**
- * ice_debugfs_data_read - read from 'data' file
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- */
-static ssize_t ice_debugfs_data_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct ice_hw *hw = &pf->hw;
- int data_copied = 0;
- bool done = false;
-
- if (ice_fwlog_ring_empty(&hw->fwlog_ring))
- return 0;
-
- while (!ice_fwlog_ring_empty(&hw->fwlog_ring) && !done) {
- struct ice_fwlog_data *log;
- u16 cur_buf_len;
-
- log = &hw->fwlog_ring.rings[hw->fwlog_ring.head];
- cur_buf_len = log->data_size;
- if (cur_buf_len >= count) {
- done = true;
- continue;
- }
-
- if (copy_to_user(buffer, log->data, cur_buf_len)) {
- /* if there is an error then bail and return whatever
- * the driver has copied so far
- */
- done = true;
- continue;
- }
-
- data_copied += cur_buf_len;
- buffer += cur_buf_len;
- count -= cur_buf_len;
- *ppos += cur_buf_len;
- ice_fwlog_ring_increment(&hw->fwlog_ring.head,
- hw->fwlog_ring.size);
- }
-
- return data_copied;
-}
-
-/**
- * ice_debugfs_data_write - write into 'data' file
- * @filp: the opened file
- * @buf: where to find the user's data
- * @count: the length of the user's data
- * @ppos: file position offset
- */
-static ssize_t
-ice_debugfs_data_write(struct file *filp, const char __user *buf, size_t count,
- loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- ssize_t ret;
-
- /* don't allow partial writes */
- if (*ppos != 0)
- return 0;
-
- /* any value is allowed to clear the buffer so no need to even look at
- * what the value is
- */
- if (!(hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED)) {
- hw->fwlog_ring.head = 0;
- hw->fwlog_ring.tail = 0;
- } else {
- dev_info(dev, "Can't clear FW log data while FW log running\n");
- ret = -EINVAL;
- goto nr_buffs_write_error;
- }
-
- /* if we get here, nothing went wrong; return count since we didn't
- * really write anything
- */
- ret = (ssize_t)count;
-
-nr_buffs_write_error:
- /* This function always consumes all of the written input, or produces
- * an error. Check and enforce this. Otherwise, the write operation
- * won't complete properly.
- */
- if (WARN_ON(ret != (ssize_t)count && ret >= 0))
- ret = -EIO;
-
- return ret;
-}
-
-static const struct file_operations ice_debugfs_data_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = ice_debugfs_data_read,
- .write = ice_debugfs_data_write,
-};
-
-/**
- * ice_debugfs_fwlog_init - setup the debugfs directory
- * @pf: the ice that is starting up
- */
-void ice_debugfs_fwlog_init(struct ice_pf *pf)
+int ice_debugfs_pf_init(struct ice_pf *pf)
{
const char *name = pci_name(pf->pdev);
- struct dentry *fw_modules_dir;
- struct dentry **fw_modules;
- int i;
-
- /* only support fw log commands on PF 0 */
- if (pf->hw.bus.func)
- return;
-
- /* allocate space for this first because if it fails then we don't
- * need to unwind
- */
- fw_modules = kcalloc(ICE_NR_FW_LOG_MODULES, sizeof(*fw_modules),
- GFP_KERNEL);
- if (!fw_modules)
- return;
pf->ice_debugfs_pf = debugfs_create_dir(name, ice_debugfs_root);
if (IS_ERR(pf->ice_debugfs_pf))
- goto err_create_module_files;
-
- pf->ice_debugfs_pf_fwlog = debugfs_create_dir("fwlog",
- pf->ice_debugfs_pf);
- if (IS_ERR(pf->ice_debugfs_pf))
- goto err_create_module_files;
+ return PTR_ERR(pf->ice_debugfs_pf);
- fw_modules_dir = debugfs_create_dir("modules",
- pf->ice_debugfs_pf_fwlog);
- if (IS_ERR(fw_modules_dir))
- goto err_create_module_files;
-
- for (i = 0; i < ICE_NR_FW_LOG_MODULES; i++) {
- fw_modules[i] = debugfs_create_file(ice_fwlog_module_string[i],
- 0600, fw_modules_dir, pf,
- &ice_debugfs_module_fops);
- if (IS_ERR(fw_modules[i]))
- goto err_create_module_files;
- }
-
- debugfs_create_file("nr_messages", 0600,
- pf->ice_debugfs_pf_fwlog, pf,
- &ice_debugfs_nr_messages_fops);
-
- pf->ice_debugfs_pf_fwlog_modules = fw_modules;
-
- debugfs_create_file("enable", 0600, pf->ice_debugfs_pf_fwlog,
- pf, &ice_debugfs_enable_fops);
-
- debugfs_create_file("log_size", 0600, pf->ice_debugfs_pf_fwlog,
- pf, &ice_debugfs_log_size_fops);
-
- debugfs_create_file("data", 0600, pf->ice_debugfs_pf_fwlog,
- pf, &ice_debugfs_data_fops);
-
- return;
-
-err_create_module_files:
- debugfs_remove_recursive(pf->ice_debugfs_pf_fwlog);
- kfree(fw_modules);
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index 34fd604132f5..bd4e66df0372 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -6,6 +6,24 @@
/* Device IDs */
#define ICE_DEV_ID_E822_SI_DFLT 0x1888
+/* Intel(R) Ethernet Controller E835-CC for backplane */
+#define ICE_DEV_ID_E835CC_BACKPLANE 0x1248
+/* Intel(R) Ethernet Controller E835-CC for QSFP */
+#define ICE_DEV_ID_E835CC_QSFP56 0x1249
+/* Intel(R) Ethernet Controller E835-CC for SFP */
+#define ICE_DEV_ID_E835CC_SFP 0x124A
+/* Intel(R) Ethernet Controller E835-C for backplane */
+#define ICE_DEV_ID_E835C_BACKPLANE 0x1261
+/* Intel(R) Ethernet Controller E835-C for QSFP */
+#define ICE_DEV_ID_E835C_QSFP 0x1262
+/* Intel(R) Ethernet Controller E835-C for SFP */
+#define ICE_DEV_ID_E835C_SFP 0x1263
+/* Intel(R) Ethernet Controller E835-L for backplane */
+#define ICE_DEV_ID_E835_L_BACKPLANE 0x1265
+/* Intel(R) Ethernet Controller E835-L for QSFP */
+#define ICE_DEV_ID_E835_L_QSFP 0x1266
+/* Intel(R) Ethernet Controller E835-L for SFP */
+#define ICE_DEV_ID_E835_L_SFP 0x1267
/* Intel(R) Ethernet Connection E823-L for backplane */
#define ICE_DEV_ID_E823L_BACKPLANE 0x124C
/* Intel(R) Ethernet Connection E823-L for SFP */
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index d5ad6d84007c..53b54e395a2e 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -11,6 +11,43 @@
#define ICE_DPLL_RCLK_NUM_PER_PF 1
#define ICE_DPLL_PIN_ESYNC_PULSE_HIGH_PERCENT 25
#define ICE_DPLL_PIN_GEN_RCLK_FREQ 1953125
+#define ICE_DPLL_PIN_PRIO_OUTPUT 0xff
+#define ICE_DPLL_INPUT_REF_NUM 10
+#define ICE_DPLL_PHASE_OFFSET_PERIOD 2
+#define ICE_DPLL_SW_PIN_INPUT_BASE_SFP 4
+#define ICE_DPLL_SW_PIN_INPUT_BASE_QSFP 6
+#define ICE_DPLL_SW_PIN_OUTPUT_BASE 0
+
+#define ICE_DPLL_PIN_SW_INPUT_ABS(in_idx) \
+ (ICE_DPLL_SW_PIN_INPUT_BASE_SFP + (in_idx))
+
+#define ICE_DPLL_PIN_SW_1_INPUT_ABS_IDX \
+ (ICE_DPLL_PIN_SW_INPUT_ABS(ICE_DPLL_PIN_SW_1_IDX))
+
+#define ICE_DPLL_PIN_SW_2_INPUT_ABS_IDX \
+ (ICE_DPLL_PIN_SW_INPUT_ABS(ICE_DPLL_PIN_SW_2_IDX))
+
+#define ICE_DPLL_PIN_SW_OUTPUT_ABS(out_idx) \
+ (ICE_DPLL_SW_PIN_OUTPUT_BASE + (out_idx))
+
+#define ICE_DPLL_PIN_SW_1_OUTPUT_ABS_IDX \
+ (ICE_DPLL_PIN_SW_OUTPUT_ABS(ICE_DPLL_PIN_SW_1_IDX))
+
+#define ICE_DPLL_PIN_SW_2_OUTPUT_ABS_IDX \
+ (ICE_DPLL_PIN_SW_OUTPUT_ABS(ICE_DPLL_PIN_SW_2_IDX))
+
+#define ICE_SR_PFA_DPLL_DEFAULTS 0x152
+#define ICE_DPLL_PFA_REF_SYNC_TYPE 0x2420
+#define ICE_DPLL_PFA_REF_SYNC_TYPE2 0x2424
+#define ICE_DPLL_PFA_END 0xFFFF
+#define ICE_DPLL_PFA_HEADER_LEN 4
+#define ICE_DPLL_PFA_ENTRY_LEN 3
+#define ICE_DPLL_PFA_MAILBOX_REF_SYNC_PIN_S 4
+#define ICE_DPLL_PFA_MASK_OFFSET 1
+#define ICE_DPLL_PFA_VALUE_OFFSET 2
+
+#define ICE_DPLL_E810C_SFP_NC_PINS 2
+#define ICE_DPLL_E810C_SFP_NC_START 4
/**
* enum ice_dpll_pin_type - enumerate ice pin types:
@@ -18,25 +55,61 @@
* @ICE_DPLL_PIN_TYPE_INPUT: input pin
* @ICE_DPLL_PIN_TYPE_OUTPUT: output pin
* @ICE_DPLL_PIN_TYPE_RCLK_INPUT: recovery clock input pin
+ * @ICE_DPLL_PIN_TYPE_SOFTWARE: software controlled SMA/U.FL pins
*/
enum ice_dpll_pin_type {
ICE_DPLL_PIN_INVALID,
ICE_DPLL_PIN_TYPE_INPUT,
ICE_DPLL_PIN_TYPE_OUTPUT,
ICE_DPLL_PIN_TYPE_RCLK_INPUT,
+ ICE_DPLL_PIN_TYPE_SOFTWARE,
};
static const char * const pin_type_name[] = {
[ICE_DPLL_PIN_TYPE_INPUT] = "input",
[ICE_DPLL_PIN_TYPE_OUTPUT] = "output",
[ICE_DPLL_PIN_TYPE_RCLK_INPUT] = "rclk-input",
+ [ICE_DPLL_PIN_TYPE_SOFTWARE] = "software",
};
+static const char * const ice_dpll_sw_pin_sma[] = { "SMA1", "SMA2" };
+static const char * const ice_dpll_sw_pin_ufl[] = { "U.FL1", "U.FL2" };
+
static const struct dpll_pin_frequency ice_esync_range[] = {
DPLL_PIN_FREQUENCY_RANGE(0, DPLL_PIN_FREQUENCY_1_HZ),
};
/**
+ * ice_dpll_is_sw_pin - check if given pin shall be controlled by SW
+ * @pf: private board structure
+ * @index: index of a pin as understood by FW
+ * @input: true for input, false for output
+ *
+ * Check if the pin shall be controlled by SW - instead of providing raw access
+ * for pin control. For E810 NIC with dpll there is additional MUX-related logic
+ * between SMA/U.FL pins/connectors and dpll device, best to give user access
+ * with series of wrapper functions as from user perspective they convey single
+ * functionality rather then separated pins.
+ *
+ * Return:
+ * * true - pin controlled by SW
+ * * false - pin not controlled by SW
+ */
+static bool ice_dpll_is_sw_pin(struct ice_pf *pf, u8 index, bool input)
+{
+ if (input && pf->hw.device_id == ICE_DEV_ID_E810C_QSFP)
+ index -= ICE_DPLL_SW_PIN_INPUT_BASE_QSFP -
+ ICE_DPLL_SW_PIN_INPUT_BASE_SFP;
+
+ if ((input && (index == ICE_DPLL_PIN_SW_1_INPUT_ABS_IDX ||
+ index == ICE_DPLL_PIN_SW_2_INPUT_ABS_IDX)) ||
+ (!input && (index == ICE_DPLL_PIN_SW_1_OUTPUT_ABS_IDX ||
+ index == ICE_DPLL_PIN_SW_2_OUTPUT_ABS_IDX)))
+ return true;
+ return false;
+}
+
+/**
* ice_dpll_is_reset - check if reset is in progress
* @pf: private board structure
* @extack: error reporting
@@ -95,9 +168,9 @@ ice_dpll_pin_freq_set(struct ice_pf *pf, struct ice_dpll_pin *pin,
}
if (ret) {
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to set pin freq:%u on pin:%u\n",
+ "err:%d %s failed to set pin freq:%u on pin:%u",
ret,
- ice_aq_str(pf->hw.adminq.sq_last_status),
+ libie_aq_str(pf->hw.adminq.sq_last_status),
freq, pin->idx);
return ret;
}
@@ -280,6 +353,87 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv,
}
/**
+ * ice_dpll_sw_pin_frequency_set - callback to set frequency of SW pin
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: pointer to dpll
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @frequency: on success holds pin's frequency
+ * @extack: error reporting
+ *
+ * Calls set frequency command for corresponding and active input/output pin.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error pin not active or couldn't get from hw
+ */
+static int
+ice_dpll_sw_pin_frequency_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 frequency, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *sma = pin_priv;
+ int ret;
+
+ if (!sma->active) {
+ NL_SET_ERR_MSG(extack, "pin is not active");
+ return -EINVAL;
+ }
+ if (sma->direction == DPLL_PIN_DIRECTION_INPUT)
+ ret = ice_dpll_input_frequency_set(NULL, sma->input, dpll,
+ dpll_priv, frequency,
+ extack);
+ else
+ ret = ice_dpll_output_frequency_set(NULL, sma->output, dpll,
+ dpll_priv, frequency,
+ extack);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_sw_pin_frequency_get - callback for get frequency of SW pin
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: pointer to dpll
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @frequency: on success holds pin's frequency
+ * @extack: error reporting
+ *
+ * Calls get frequency command for corresponding active input/output.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error pin not active or couldn't get from hw
+ */
+static int
+ice_dpll_sw_pin_frequency_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 *frequency, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *sma = pin_priv;
+ int ret;
+
+ if (!sma->active) {
+ *frequency = 0;
+ return 0;
+ }
+ if (sma->direction == DPLL_PIN_DIRECTION_INPUT) {
+ ret = ice_dpll_input_frequency_get(NULL, sma->input, dpll,
+ dpll_priv, frequency,
+ extack);
+ } else {
+ ret = ice_dpll_output_frequency_get(NULL, sma->output, dpll,
+ dpll_priv, frequency,
+ extack);
+ }
+
+ return ret;
+}
+
+/**
* ice_dpll_pin_enable - enable a pin on dplls
* @hw: board private hw structure
* @pin: pointer to a pin
@@ -322,8 +476,8 @@ ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin,
}
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to enable %s pin:%u\n",
- ret, ice_aq_str(hw->adminq.sq_last_status),
+ "err:%d %s failed to enable %s pin:%u",
+ ret, libie_aq_str(hw->adminq.sq_last_status),
pin_type_name[pin_type], pin->idx);
return ret;
@@ -367,14 +521,75 @@ ice_dpll_pin_disable(struct ice_hw *hw, struct ice_dpll_pin *pin,
}
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to disable %s pin:%u\n",
- ret, ice_aq_str(hw->adminq.sq_last_status),
+ "err:%d %s failed to disable %s pin:%u",
+ ret, libie_aq_str(hw->adminq.sq_last_status),
pin_type_name[pin_type], pin->idx);
return ret;
}
/**
+ * ice_dpll_sw_pins_update - update status of all SW pins
+ * @pf: private board struct
+ *
+ * Determine and update pin struct fields (direction/active) of their current
+ * values for all the SW controlled pins.
+ *
+ * Context: Call with pf->dplls.lock held
+ * Return:
+ * * 0 - OK
+ * * negative - error
+ */
+static int
+ice_dpll_sw_pins_update(struct ice_pf *pf)
+{
+ struct ice_dplls *d = &pf->dplls;
+ struct ice_dpll_pin *p;
+ u8 data = 0;
+ int ret;
+
+ ret = ice_read_sma_ctrl(&pf->hw, &data);
+ if (ret)
+ return ret;
+ /* no change since last check */
+ if (d->sma_data == data)
+ return 0;
+
+ /*
+ * SMA1/U.FL1 vs SMA2/U.FL2 are using different bit scheme to decide
+ * on their direction and if are active
+ */
+ p = &d->sma[ICE_DPLL_PIN_SW_1_IDX];
+ p->active = true;
+ p->direction = DPLL_PIN_DIRECTION_INPUT;
+ if (data & ICE_SMA1_DIR_EN) {
+ p->direction = DPLL_PIN_DIRECTION_OUTPUT;
+ if (data & ICE_SMA1_TX_EN)
+ p->active = false;
+ }
+
+ p = &d->sma[ICE_DPLL_PIN_SW_2_IDX];
+ p->active = true;
+ p->direction = DPLL_PIN_DIRECTION_INPUT;
+ if ((data & ICE_SMA2_INACTIVE_MASK) == ICE_SMA2_INACTIVE_MASK)
+ p->active = false;
+ else if (data & ICE_SMA2_DIR_EN)
+ p->direction = DPLL_PIN_DIRECTION_OUTPUT;
+
+ p = &d->ufl[ICE_DPLL_PIN_SW_1_IDX];
+ if (!(data & (ICE_SMA1_DIR_EN | ICE_SMA1_TX_EN)))
+ p->active = true;
+ else
+ p->active = false;
+
+ p = &d->ufl[ICE_DPLL_PIN_SW_2_IDX];
+ p->active = (data & ICE_SMA2_DIR_EN) && !(data & ICE_SMA2_UFL2_RX_DIS);
+ d->sma_data = data;
+
+ return 0;
+}
+
+/**
* ice_dpll_pin_state_update - update pin's state
* @pf: private board struct
* @pin: structure with pin attributes to be updated
@@ -471,6 +686,11 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
DPLL_PIN_STATE_DISCONNECTED;
}
break;
+ case ICE_DPLL_PIN_TYPE_SOFTWARE:
+ ret = ice_dpll_sw_pins_update(pf);
+ if (ret)
+ goto err;
+ break;
default:
return -EINVAL;
}
@@ -479,15 +699,15 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
err:
if (extack)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to update %s pin:%u\n",
+ "err:%d %s failed to update %s pin:%u",
ret,
- ice_aq_str(pf->hw.adminq.sq_last_status),
+ libie_aq_str(pf->hw.adminq.sq_last_status),
pin_type_name[pin_type], pin->idx);
else
dev_err_ratelimited(ice_pf_to_dev(pf),
"err:%d %s failed to update %s pin:%u\n",
ret,
- ice_aq_str(pf->hw.adminq.sq_last_status),
+ libie_aq_str(pf->hw.adminq.sq_last_status),
pin_type_name[pin_type], pin->idx);
return ret;
}
@@ -518,9 +738,9 @@ ice_dpll_hw_input_prio_set(struct ice_pf *pf, struct ice_dpll *dpll,
(u8)prio);
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to set pin prio:%u on pin:%u\n",
+ "err:%d %s failed to set pin prio:%u on pin:%u",
ret,
- ice_aq_str(pf->hw.adminq.sq_last_status),
+ libie_aq_str(pf->hw.adminq.sq_last_status),
prio, pin->idx);
else
dpll->input_prio[pin->idx] = prio;
@@ -588,6 +808,67 @@ static int ice_dpll_mode_get(const struct dpll_device *dpll, void *dpll_priv,
}
/**
+ * ice_dpll_phase_offset_monitor_set - set phase offset monitor state
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: feature state to be set
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Enable/disable phase offset monitor feature of dpll.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return: 0 - success
+ */
+static int ice_dpll_phase_offset_monitor_set(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_feature_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ if (state == DPLL_FEATURE_STATE_ENABLE)
+ d->phase_offset_monitor_period = ICE_DPLL_PHASE_OFFSET_PERIOD;
+ else
+ d->phase_offset_monitor_period = 0;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_phase_offset_monitor_get - get phase offset monitor state
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: on success holds current state of phase offset monitor
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Provides current state of phase offset monitor
+ * features on dpll device.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return: 0 - success
+ */
+static int ice_dpll_phase_offset_monitor_get(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_feature_state *state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ if (d->phase_offset_monitor_period)
+ *state = DPLL_FEATURE_STATE_ENABLE;
+ else
+ *state = DPLL_FEATURE_STATE_DISABLE;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
* ice_dpll_pin_state_set - set pin's state on dpll
* @pin: pointer to a pin
* @pin_priv: private data pointer passed on pin registration
@@ -793,6 +1074,270 @@ ice_dpll_input_state_get(const struct dpll_pin *pin, void *pin_priv,
}
/**
+ * ice_dpll_sma_direction_set - set direction of SMA pin
+ * @p: pointer to a pin
+ * @direction: requested direction of the pin
+ * @extack: error reporting
+ *
+ * Wrapper for dpll subsystem callback. Set direction of a SMA pin.
+ *
+ * Context: Call with pf->dplls.lock held
+ * Return:
+ * * 0 - success
+ * * negative - failed to get state
+ */
+static int ice_dpll_sma_direction_set(struct ice_dpll_pin *p,
+ enum dpll_pin_direction direction,
+ struct netlink_ext_ack *extack)
+{
+ u8 data;
+ int ret;
+
+ if (p->direction == direction && p->active)
+ return 0;
+ ret = ice_read_sma_ctrl(&p->pf->hw, &data);
+ if (ret)
+ return ret;
+
+ switch (p->idx) {
+ case ICE_DPLL_PIN_SW_1_IDX:
+ data &= ~ICE_SMA1_MASK;
+ if (direction == DPLL_PIN_DIRECTION_OUTPUT)
+ data |= ICE_SMA1_DIR_EN;
+ break;
+ case ICE_DPLL_PIN_SW_2_IDX:
+ if (direction == DPLL_PIN_DIRECTION_INPUT) {
+ data &= ~ICE_SMA2_DIR_EN;
+ } else {
+ data &= ~ICE_SMA2_TX_EN;
+ data |= ICE_SMA2_DIR_EN;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = ice_write_sma_ctrl(&p->pf->hw, data);
+ if (!ret)
+ ret = ice_dpll_pin_state_update(p->pf, p,
+ ICE_DPLL_PIN_TYPE_SOFTWARE,
+ extack);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_ufl_pin_state_set - set U.FL pin state on dpll device
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: requested state of the pin
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Set the state of a pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_ufl_pin_state_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv, *target;
+ struct ice_dpll *d = dpll_priv;
+ enum ice_dpll_pin_type type;
+ struct ice_pf *pf = p->pf;
+ struct ice_hw *hw;
+ bool enable;
+ u8 data;
+ int ret;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
+ mutex_lock(&pf->dplls.lock);
+ hw = &pf->hw;
+ ret = ice_read_sma_ctrl(hw, &data);
+ if (ret)
+ goto unlock;
+
+ ret = -EINVAL;
+ switch (p->idx) {
+ case ICE_DPLL_PIN_SW_1_IDX:
+ if (state == DPLL_PIN_STATE_CONNECTED) {
+ data &= ~ICE_SMA1_MASK;
+ enable = true;
+ } else if (state == DPLL_PIN_STATE_DISCONNECTED) {
+ data |= ICE_SMA1_TX_EN;
+ enable = false;
+ } else {
+ goto unlock;
+ }
+ target = p->output;
+ type = ICE_DPLL_PIN_TYPE_OUTPUT;
+ break;
+ case ICE_DPLL_PIN_SW_2_IDX:
+ if (state == DPLL_PIN_STATE_SELECTABLE) {
+ data |= ICE_SMA2_DIR_EN;
+ data &= ~ICE_SMA2_UFL2_RX_DIS;
+ enable = true;
+ } else if (state == DPLL_PIN_STATE_DISCONNECTED) {
+ data |= ICE_SMA2_UFL2_RX_DIS;
+ enable = false;
+ } else {
+ goto unlock;
+ }
+ target = p->input;
+ type = ICE_DPLL_PIN_TYPE_INPUT;
+ break;
+ default:
+ goto unlock;
+ }
+
+ ret = ice_write_sma_ctrl(hw, data);
+ if (ret)
+ goto unlock;
+ ret = ice_dpll_pin_state_update(pf, p, ICE_DPLL_PIN_TYPE_SOFTWARE,
+ extack);
+ if (ret)
+ goto unlock;
+
+ if (enable)
+ ret = ice_dpll_pin_enable(hw, target, d->dpll_idx, type, extack);
+ else
+ ret = ice_dpll_pin_disable(hw, target, type, extack);
+ if (!ret)
+ ret = ice_dpll_pin_state_update(pf, target, type, extack);
+
+unlock:
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_sw_pin_state_get - get SW pin state
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: on success holds state of the pin
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Check state of a SW pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_pin_state_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = p->pf;
+ int ret = 0;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ if (!p->active) {
+ *state = DPLL_PIN_STATE_DISCONNECTED;
+ goto unlock;
+ }
+
+ if (p->direction == DPLL_PIN_DIRECTION_INPUT) {
+ ret = ice_dpll_pin_state_update(pf, p->input,
+ ICE_DPLL_PIN_TYPE_INPUT,
+ extack);
+ if (ret)
+ goto unlock;
+ *state = p->input->state[d->dpll_idx];
+ } else {
+ ret = ice_dpll_pin_state_update(pf, p->output,
+ ICE_DPLL_PIN_TYPE_OUTPUT,
+ extack);
+ if (ret)
+ goto unlock;
+ *state = p->output->state[d->dpll_idx];
+ }
+unlock:
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_sma_pin_state_set - set SMA pin state on dpll device
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: requested state of the pin
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Set state of a pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failed to get state
+ */
+static int
+ice_dpll_sma_pin_state_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *sma = pin_priv, *target;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = sma->pf;
+ enum ice_dpll_pin_type type;
+ bool enable;
+ int ret;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
+ mutex_lock(&pf->dplls.lock);
+ if (!sma->active) {
+ ret = ice_dpll_sma_direction_set(sma, sma->direction, extack);
+ if (ret)
+ goto unlock;
+ }
+ if (sma->direction == DPLL_PIN_DIRECTION_INPUT) {
+ enable = state == DPLL_PIN_STATE_SELECTABLE;
+ target = sma->input;
+ type = ICE_DPLL_PIN_TYPE_INPUT;
+ } else {
+ enable = state == DPLL_PIN_STATE_CONNECTED;
+ target = sma->output;
+ type = ICE_DPLL_PIN_TYPE_OUTPUT;
+ }
+
+ if (enable)
+ ret = ice_dpll_pin_enable(&pf->hw, target, d->dpll_idx, type,
+ extack);
+ else
+ ret = ice_dpll_pin_disable(&pf->hw, target, type, extack);
+ if (!ret)
+ ret = ice_dpll_pin_state_update(pf, target, type, extack);
+
+unlock:
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
* ice_dpll_input_prio_get - get dpll's input prio
* @pin: pointer to a pin
* @pin_priv: private data pointer passed on pin registration
@@ -860,6 +1405,47 @@ ice_dpll_input_prio_set(const struct dpll_pin *pin, void *pin_priv,
return ret;
}
+static int
+ice_dpll_sw_input_prio_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u32 *prio, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ if (p->input && p->direction == DPLL_PIN_DIRECTION_INPUT)
+ *prio = d->input_prio[p->input->idx];
+ else
+ *prio = ICE_DPLL_PIN_PRIO_OUTPUT;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+static int
+ice_dpll_sw_input_prio_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u32 prio, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+ int ret;
+
+ if (!p->input || p->direction != DPLL_PIN_DIRECTION_INPUT)
+ return -EINVAL;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
+ mutex_lock(&pf->dplls.lock);
+ ret = ice_dpll_hw_input_prio_set(pf, d, p->input, prio, extack);
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
/**
* ice_dpll_input_direction - callback for get input pin direction
* @pin: pointer to a pin
@@ -911,6 +1497,76 @@ ice_dpll_output_direction(const struct dpll_pin *pin, void *pin_priv,
}
/**
+ * ice_dpll_pin_sma_direction_set - callback for set SMA pin direction
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @direction: requested pin direction
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting direction of a SMA pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_pin_sma_direction_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_direction direction,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_pf *pf = p->pf;
+ int ret;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
+ mutex_lock(&pf->dplls.lock);
+ ret = ice_dpll_sma_direction_set(p, direction, extack);
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_pin_sw_direction_get - callback for get SW pin direction
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @direction: on success holds pin direction
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting direction of a SMA pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_pin_sw_direction_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_direction *direction,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_pf *pf = p->pf;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ *direction = p->direction;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
* ice_dpll_pin_phase_adjust_get - callback for get pin phase adjust value
* @pin: pointer to a pin
* @pin_priv: private data pointer passed on pin registration
@@ -1004,9 +1660,9 @@ ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
mutex_unlock(&pf->dplls.lock);
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to set pin phase_adjust:%d for pin:%u on dpll:%u\n",
+ "err:%d %s failed to set pin phase_adjust:%d for pin:%u on dpll:%u",
ret,
- ice_aq_str(pf->hw.adminq.sq_last_status),
+ libie_aq_str(pf->hw.adminq.sq_last_status),
phase_adjust, p->idx, d->dpll_idx);
return ret;
@@ -1024,7 +1680,7 @@ ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
* Dpll subsystem callback. Wraps a handler for setting phase adjust on input
* pin.
*
- * Context: Calls a function which acquires pf->dplls.lock
+ * Context: Calls a function which acquires and releases pf->dplls.lock
* Return:
* * 0 - success
* * negative - error
@@ -1068,6 +1724,82 @@ ice_dpll_output_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
ICE_DPLL_PIN_TYPE_OUTPUT);
}
+/**
+ * ice_dpll_sw_phase_adjust_get - callback for get SW pin phase adjust
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @phase_adjust: on success holds phase adjust value
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Wraps a handler for getting phase adjust on sw
+ * pin.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_phase_adjust_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s32 *phase_adjust,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+
+ if (p->direction == DPLL_PIN_DIRECTION_INPUT)
+ return ice_dpll_pin_phase_adjust_get(p->input->pin, p->input,
+ dpll, dpll_priv,
+ phase_adjust, extack);
+ else
+ return ice_dpll_pin_phase_adjust_get(p->output->pin, p->output,
+ dpll, dpll_priv,
+ phase_adjust, extack);
+}
+
+/**
+ * ice_dpll_sw_phase_adjust_set - callback for set SW pin phase adjust value
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @phase_adjust: phase_adjust to be set
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Wraps a handler for setting phase adjust on output
+ * pin.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s32 phase_adjust,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+
+ if (!p->active) {
+ NL_SET_ERR_MSG(extack, "pin is not active");
+ return -EINVAL;
+ }
+ if (p->direction == DPLL_PIN_DIRECTION_INPUT)
+ return ice_dpll_pin_phase_adjust_set(p->input->pin, p->input,
+ dpll, dpll_priv,
+ phase_adjust, extack,
+ ICE_DPLL_PIN_TYPE_INPUT);
+ else
+ return ice_dpll_pin_phase_adjust_set(p->output->pin, p->output,
+ dpll, dpll_priv,
+ phase_adjust, extack,
+ ICE_DPLL_PIN_TYPE_OUTPUT);
+}
+
#define ICE_DPLL_PHASE_OFFSET_DIVIDER 100
#define ICE_DPLL_PHASE_OFFSET_FACTOR \
(DPLL_PHASE_OFFSET_DIVIDER / ICE_DPLL_PHASE_OFFSET_DIVIDER)
@@ -1093,12 +1825,16 @@ ice_dpll_phase_offset_get(const struct dpll_pin *pin, void *pin_priv,
const struct dpll_device *dpll, void *dpll_priv,
s64 *phase_offset, struct netlink_ext_ack *extack)
{
+ struct ice_dpll_pin *p = pin_priv;
struct ice_dpll *d = dpll_priv;
struct ice_pf *pf = d->pf;
mutex_lock(&pf->dplls.lock);
- if (d->active_input == pin)
+ if (d->active_input == pin || (p->input &&
+ d->active_input == p->input->pin))
*phase_offset = d->phase_offset * ICE_DPLL_PHASE_OFFSET_FACTOR;
+ else if (d->phase_offset_monitor_period)
+ *phase_offset = p->phase_offset * ICE_DPLL_PHASE_OFFSET_FACTOR;
else
*phase_offset = 0;
mutex_unlock(&pf->dplls.lock);
@@ -1315,6 +2051,219 @@ ice_dpll_input_esync_get(const struct dpll_pin *pin, void *pin_priv,
}
/**
+ * ice_dpll_sw_esync_set - callback for setting embedded sync on SW pin
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @freq: requested embedded sync frequency
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting embedded sync frequency value
+ * on SW pin.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_esync_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 freq, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+
+ if (!p->active) {
+ NL_SET_ERR_MSG(extack, "pin is not active");
+ return -EINVAL;
+ }
+ if (p->direction == DPLL_PIN_DIRECTION_INPUT)
+ return ice_dpll_input_esync_set(p->input->pin, p->input, dpll,
+ dpll_priv, freq, extack);
+ else
+ return ice_dpll_output_esync_set(p->output->pin, p->output,
+ dpll, dpll_priv, freq, extack);
+}
+
+/**
+ * ice_dpll_sw_esync_get - callback for getting embedded sync on SW pin
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @esync: on success holds embedded sync frequency and properties
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting embedded sync frequency value
+ * of SW pin.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_esync_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ struct dpll_pin_esync *esync,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+
+ if (p->direction == DPLL_PIN_DIRECTION_INPUT)
+ return ice_dpll_input_esync_get(p->input->pin, p->input, dpll,
+ dpll_priv, esync, extack);
+ else
+ return ice_dpll_output_esync_get(p->output->pin, p->output,
+ dpll, dpll_priv, esync,
+ extack);
+}
+
+/*
+ * ice_dpll_input_ref_sync_set - callback for setting reference sync feature
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @ref_pin: pin pointer for reference sync pair
+ * @ref_pin_priv: private data pointer of ref_pin
+ * @state: requested state for reference sync for pin pair
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting reference sync frequency
+ * feature for input pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_input_ref_sync_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *ref_pin, void *ref_pin_priv,
+ const enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_pf *pf = p->pf;
+ u8 flags_en = 0;
+ int ret;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+
+ if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN)
+ flags_en = ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN;
+ if (state == DPLL_PIN_STATE_CONNECTED)
+ flags_en |= ICE_AQC_CGU_IN_CFG_FLG2_REFSYNC_EN;
+ ret = ice_aq_set_input_pin_cfg(&pf->hw, p->idx, 0, flags_en, 0, 0);
+ if (!ret)
+ ret = ice_dpll_pin_state_update(pf, p, ICE_DPLL_PIN_TYPE_INPUT,
+ extack);
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_input_ref_sync_get - callback for getting reference sync config
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @ref_pin: pin pointer for reference sync pair
+ * @ref_pin_priv: private data pointer of ref_pin
+ * @state: on success holds reference sync state for pin pair
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting reference sync frequency
+ * feature for input pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_input_ref_sync_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *ref_pin, void *ref_pin_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_pf *pf = p->pf;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ if (p->flags[0] & ICE_AQC_CGU_IN_CFG_FLG2_REFSYNC_EN)
+ *state = DPLL_PIN_STATE_CONNECTED;
+ else
+ *state = DPLL_PIN_STATE_DISCONNECTED;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/*
+ * ice_dpll_sw_input_ref_sync_set - callback for setting reference sync feature
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @ref_pin: pin pointer for reference sync pair
+ * @ref_pin_priv: private data pointer of ref_pin
+ * @state: requested state for reference sync for pin pair
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting reference sync
+ * feature for input pins.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_input_ref_sync_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *ref_pin,
+ void *ref_pin_priv,
+ const enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+
+ return ice_dpll_input_ref_sync_set(pin, p->input, ref_pin, ref_pin_priv,
+ state, extack);
+}
+
+/**
+ * ice_dpll_sw_input_ref_sync_get - callback for getting reference sync config
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @ref_pin: pin pointer for reference sync pair
+ * @ref_pin_priv: private data pointer of ref_pin
+ * @state: on success holds reference sync state for pin pair
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting reference sync feature for
+ * input pins.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_input_ref_sync_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *ref_pin,
+ void *ref_pin_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+
+ return ice_dpll_input_ref_sync_get(pin, p->input, ref_pin, ref_pin_priv,
+ state, extack);
+}
+
+/**
* ice_dpll_rclk_state_on_pin_set - set a state on rclk pin
* @pin: pointer to a pin
* @pin_priv: private data pointer passed on pin registration
@@ -1362,9 +2311,9 @@ ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv,
&p->freq);
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to set pin state:%u for pin:%u on parent:%u\n",
+ "err:%d %s failed to set pin state:%u for pin:%u on parent:%u",
ret,
- ice_aq_str(pf->hw.adminq.sq_last_status),
+ libie_aq_str(pf->hw.adminq.sq_last_status),
state, p->idx, parent->idx);
unlock:
mutex_unlock(&pf->dplls.lock);
@@ -1427,6 +2376,37 @@ static const struct dpll_pin_ops ice_dpll_rclk_ops = {
.direction_get = ice_dpll_input_direction,
};
+static const struct dpll_pin_ops ice_dpll_pin_sma_ops = {
+ .state_on_dpll_set = ice_dpll_sma_pin_state_set,
+ .state_on_dpll_get = ice_dpll_sw_pin_state_get,
+ .direction_get = ice_dpll_pin_sw_direction_get,
+ .direction_set = ice_dpll_pin_sma_direction_set,
+ .prio_get = ice_dpll_sw_input_prio_get,
+ .prio_set = ice_dpll_sw_input_prio_set,
+ .frequency_get = ice_dpll_sw_pin_frequency_get,
+ .frequency_set = ice_dpll_sw_pin_frequency_set,
+ .phase_adjust_get = ice_dpll_sw_phase_adjust_get,
+ .phase_adjust_set = ice_dpll_sw_phase_adjust_set,
+ .phase_offset_get = ice_dpll_phase_offset_get,
+ .esync_set = ice_dpll_sw_esync_set,
+ .esync_get = ice_dpll_sw_esync_get,
+ .ref_sync_set = ice_dpll_sw_input_ref_sync_set,
+ .ref_sync_get = ice_dpll_sw_input_ref_sync_get,
+};
+
+static const struct dpll_pin_ops ice_dpll_pin_ufl_ops = {
+ .state_on_dpll_set = ice_dpll_ufl_pin_state_set,
+ .state_on_dpll_get = ice_dpll_sw_pin_state_get,
+ .direction_get = ice_dpll_pin_sw_direction_get,
+ .frequency_get = ice_dpll_sw_pin_frequency_get,
+ .frequency_set = ice_dpll_sw_pin_frequency_set,
+ .esync_set = ice_dpll_sw_esync_set,
+ .esync_get = ice_dpll_sw_esync_get,
+ .phase_adjust_get = ice_dpll_sw_phase_adjust_get,
+ .phase_adjust_set = ice_dpll_sw_phase_adjust_set,
+ .phase_offset_get = ice_dpll_phase_offset_get,
+};
+
static const struct dpll_pin_ops ice_dpll_input_ops = {
.frequency_get = ice_dpll_input_frequency_get,
.frequency_set = ice_dpll_input_frequency_set,
@@ -1440,6 +2420,8 @@ static const struct dpll_pin_ops ice_dpll_input_ops = {
.phase_offset_get = ice_dpll_phase_offset_get,
.esync_set = ice_dpll_input_esync_set,
.esync_get = ice_dpll_input_esync_get,
+ .ref_sync_set = ice_dpll_input_ref_sync_set,
+ .ref_sync_get = ice_dpll_input_ref_sync_get,
};
static const struct dpll_pin_ops ice_dpll_output_ops = {
@@ -1459,6 +2441,13 @@ static const struct dpll_device_ops ice_dpll_ops = {
.mode_get = ice_dpll_mode_get,
};
+static const struct dpll_device_ops ice_dpll_pom_ops = {
+ .lock_status_get = ice_dpll_lock_status_get,
+ .mode_get = ice_dpll_mode_get,
+ .phase_offset_monitor_set = ice_dpll_phase_offset_monitor_set,
+ .phase_offset_monitor_get = ice_dpll_phase_offset_monitor_get,
+};
+
/**
* ice_generate_clock_id - generates unique clock_id for registering dpll.
* @pf: board private structure
@@ -1504,6 +2493,110 @@ static void ice_dpll_notify_changes(struct ice_dpll *d)
}
/**
+ * ice_dpll_is_pps_phase_monitor - check if dpll capable of phase offset monitor
+ * @pf: pf private structure
+ *
+ * Check if firmware is capable of supporting admin command to provide
+ * phase offset monitoring on all the input pins on PPS dpll.
+ *
+ * Returns:
+ * * true - PPS dpll phase offset monitoring is supported
+ * * false - PPS dpll phase offset monitoring is not supported
+ */
+static bool ice_dpll_is_pps_phase_monitor(struct ice_pf *pf)
+{
+ struct ice_cgu_input_measure meas[ICE_DPLL_INPUT_REF_NUM];
+ int ret = ice_aq_get_cgu_input_pin_measure(&pf->hw, DPLL_TYPE_PPS, meas,
+ ARRAY_SIZE(meas));
+
+ if (ret && pf->hw.adminq.sq_last_status == LIBIE_AQ_RC_ESRCH)
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_dpll_pins_notify_mask - notify dpll subsystem about bulk pin changes
+ * @pins: array of ice_dpll_pin pointers registered within dpll subsystem
+ * @pin_num: number of pins
+ * @phase_offset_ntf_mask: bitmask of pin indexes to notify
+ *
+ * Iterate over array of pins and call dpll subsystem pin notify if
+ * corresponding pin index within bitmask is set.
+ *
+ * Context: Must be called while pf->dplls.lock is released.
+ */
+static void ice_dpll_pins_notify_mask(struct ice_dpll_pin *pins,
+ u8 pin_num,
+ u32 phase_offset_ntf_mask)
+{
+ int i = 0;
+
+ for (i = 0; i < pin_num; i++)
+ if (phase_offset_ntf_mask & (1 << i))
+ dpll_pin_change_ntf(pins[i].pin);
+}
+
+/**
+ * ice_dpll_pps_update_phase_offsets - update phase offset measurements
+ * @pf: pf private structure
+ * @phase_offset_pins_updated: returns mask of updated input pin indexes
+ *
+ * Read phase offset measurements for PPS dpll device and store values in
+ * input pins array. On success phase_offset_pins_updated - fills bitmask of
+ * updated input pin indexes, pins shall be notified.
+ *
+ * Context: Shall be called with pf->dplls.lock being locked.
+ * Returns:
+ * * 0 - success or no data available
+ * * negative - AQ failure
+ */
+static int ice_dpll_pps_update_phase_offsets(struct ice_pf *pf,
+ u32 *phase_offset_pins_updated)
+{
+ struct ice_cgu_input_measure meas[ICE_DPLL_INPUT_REF_NUM];
+ struct ice_dpll_pin *p;
+ s64 phase_offset, tmp;
+ int i, j, ret;
+
+ *phase_offset_pins_updated = 0;
+ ret = ice_aq_get_cgu_input_pin_measure(&pf->hw, DPLL_TYPE_PPS, meas,
+ ARRAY_SIZE(meas));
+ if (ret && pf->hw.adminq.sq_last_status == LIBIE_AQ_RC_EAGAIN) {
+ return 0;
+ } else if (ret) {
+ dev_err(ice_pf_to_dev(pf),
+ "failed to get input pin measurements dpll=%d, ret=%d %s\n",
+ DPLL_TYPE_PPS, ret,
+ libie_aq_str(pf->hw.adminq.sq_last_status));
+ return ret;
+ }
+ for (i = 0; i < pf->dplls.num_inputs; i++) {
+ p = &pf->dplls.inputs[i];
+ phase_offset = 0;
+ for (j = 0; j < ICE_CGU_INPUT_PHASE_OFFSET_BYTES; j++) {
+ tmp = meas[i].phase_offset[j];
+#ifdef __LITTLE_ENDIAN
+ phase_offset += tmp << 8 * j;
+#else
+ phase_offset += tmp << 8 *
+ (ICE_CGU_INPUT_PHASE_OFFSET_BYTES - 1 - j);
+#endif
+ }
+ phase_offset = sign_extend64(phase_offset, 47);
+ if (p->phase_offset != phase_offset) {
+ dev_dbg(ice_pf_to_dev(pf),
+ "phase offset changed for pin:%d old:%llx, new:%llx\n",
+ p->idx, p->phase_offset, phase_offset);
+ p->phase_offset = phase_offset;
+ *phase_offset_pins_updated |= (1 << i);
+ }
+ }
+
+ return 0;
+}
+
+/**
* ice_dpll_update_state - update dpll state
* @pf: pf private structure
* @d: pointer to queried dpll device
@@ -1534,7 +2627,7 @@ ice_dpll_update_state(struct ice_pf *pf, struct ice_dpll *d, bool init)
dev_err(ice_pf_to_dev(pf),
"update dpll=%d state failed, ret=%d %s\n",
d->dpll_idx, ret,
- ice_aq_str(pf->hw.adminq.sq_last_status));
+ libie_aq_str(pf->hw.adminq.sq_last_status));
return ret;
}
if (init) {
@@ -1589,14 +2682,19 @@ static void ice_dpll_periodic_work(struct kthread_work *work)
struct ice_pf *pf = container_of(d, struct ice_pf, dplls);
struct ice_dpll *de = &pf->dplls.eec;
struct ice_dpll *dp = &pf->dplls.pps;
+ u32 phase_offset_ntf = 0;
int ret = 0;
if (ice_is_reset_in_progress(pf->state))
goto resched;
mutex_lock(&pf->dplls.lock);
+ d->periodic_counter++;
ret = ice_dpll_update_state(pf, de, false);
if (!ret)
ret = ice_dpll_update_state(pf, dp, false);
+ if (!ret && dp->phase_offset_monitor_period &&
+ d->periodic_counter % dp->phase_offset_monitor_period == 0)
+ ret = ice_dpll_pps_update_phase_offsets(pf, &phase_offset_ntf);
if (ret) {
d->cgu_state_acq_err_num++;
/* stop rescheduling this worker */
@@ -1611,6 +2709,9 @@ static void ice_dpll_periodic_work(struct kthread_work *work)
mutex_unlock(&pf->dplls.lock);
ice_dpll_notify_changes(de);
ice_dpll_notify_changes(dp);
+ if (phase_offset_ntf)
+ ice_dpll_pins_notify_mask(d->inputs, d->num_inputs,
+ phase_offset_ntf);
resched:
/* Run twice a second or reschedule if update failed */
@@ -1620,6 +2721,88 @@ resched:
}
/**
+ * ice_dpll_init_ref_sync_inputs - initialize reference sync pin pairs
+ * @pf: pf private structure
+ *
+ * Read DPLL TLV capabilities and initialize reference sync pin pairs in
+ * dpll subsystem.
+ *
+ * Return:
+ * * 0 - success or nothing to do (no ref-sync tlv are present)
+ * * negative - AQ failure
+ */
+static int ice_dpll_init_ref_sync_inputs(struct ice_pf *pf)
+{
+ struct ice_dpll_pin *inputs = pf->dplls.inputs;
+ struct ice_hw *hw = &pf->hw;
+ u16 addr, len, end, hdr;
+ int ret;
+
+ ret = ice_get_pfa_module_tlv(hw, &hdr, &len, ICE_SR_PFA_DPLL_DEFAULTS);
+ if (ret) {
+ dev_err(ice_pf_to_dev(pf),
+ "Failed to read PFA dpll defaults TLV ret=%d\n", ret);
+ return ret;
+ }
+ end = hdr + len;
+
+ for (addr = hdr + ICE_DPLL_PFA_HEADER_LEN; addr < end;
+ addr += ICE_DPLL_PFA_ENTRY_LEN) {
+ unsigned long bit, ul_mask, offset;
+ u16 pin, mask, buf;
+ bool valid = false;
+
+ ret = ice_read_sr_word(hw, addr, &buf);
+ if (ret)
+ return ret;
+
+ switch (buf) {
+ case ICE_DPLL_PFA_REF_SYNC_TYPE:
+ case ICE_DPLL_PFA_REF_SYNC_TYPE2:
+ {
+ u16 mask_addr = addr + ICE_DPLL_PFA_MASK_OFFSET;
+ u16 val_addr = addr + ICE_DPLL_PFA_VALUE_OFFSET;
+
+ ret = ice_read_sr_word(hw, mask_addr, &mask);
+ if (ret)
+ return ret;
+ ret = ice_read_sr_word(hw, val_addr, &pin);
+ if (ret)
+ return ret;
+ if (buf == ICE_DPLL_PFA_REF_SYNC_TYPE)
+ pin >>= ICE_DPLL_PFA_MAILBOX_REF_SYNC_PIN_S;
+ valid = true;
+ break;
+ }
+ case ICE_DPLL_PFA_END:
+ addr = end;
+ break;
+ default:
+ continue;
+ }
+ if (!valid)
+ continue;
+
+ ul_mask = mask;
+ offset = 0;
+ for_each_set_bit(bit, &ul_mask, BITS_PER_TYPE(u16)) {
+ int i, j;
+
+ if (hw->device_id == ICE_DEV_ID_E810C_SFP &&
+ pin > ICE_DPLL_E810C_SFP_NC_START)
+ offset = -ICE_DPLL_E810C_SFP_NC_PINS;
+ i = pin + offset;
+ j = bit + offset;
+ if (i < 0 || j < 0)
+ return -ERANGE;
+ inputs[i].ref_sync = j;
+ }
+ }
+
+ return 0;
+}
+
+/**
* ice_dpll_release_pins - release pins resources from dpll subsystem
* @pins: pointer to pins array
* @count: number of pins
@@ -1689,7 +2872,38 @@ ice_dpll_unregister_pins(struct dpll_device *dpll, struct ice_dpll_pin *pins,
int i;
for (i = 0; i < count; i++)
- dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]);
+ if (!pins[i].hidden)
+ dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]);
+}
+
+/**
+ * ice_dpll_pin_ref_sync_register - register reference sync pins
+ * @pins: pointer to pins array
+ * @count: number of pins
+ *
+ * Register reference sync pins in dpll subsystem.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - registration failure reason
+ */
+static int
+ice_dpll_pin_ref_sync_register(struct ice_dpll_pin *pins, int count)
+{
+ int ret, i;
+
+ for (i = 0; i < count; i++) {
+ if (!pins[i].hidden && pins[i].ref_sync) {
+ int j = pins[i].ref_sync;
+
+ ret = dpll_pin_ref_sync_pair_add(pins[i].pin,
+ pins[j].pin);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
}
/**
@@ -1712,16 +2926,19 @@ ice_dpll_register_pins(struct dpll_device *dpll, struct ice_dpll_pin *pins,
int ret, i;
for (i = 0; i < count; i++) {
- ret = dpll_pin_register(dpll, pins[i].pin, ops, &pins[i]);
- if (ret)
- goto unregister_pins;
+ if (!pins[i].hidden) {
+ ret = dpll_pin_register(dpll, pins[i].pin, ops, &pins[i]);
+ if (ret)
+ goto unregister_pins;
+ }
}
return 0;
unregister_pins:
while (--i >= 0)
- dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]);
+ if (!pins[i].hidden)
+ dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]);
return ret;
}
@@ -1909,6 +3126,18 @@ static void ice_dpll_deinit_pins(struct ice_pf *pf, bool cgu)
ice_dpll_unregister_pins(de->dpll, outputs,
&ice_dpll_output_ops, num_outputs);
ice_dpll_release_pins(outputs, num_outputs);
+ if (!pf->dplls.generic) {
+ ice_dpll_deinit_direct_pins(cgu, pf->dplls.ufl,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_ufl_ops,
+ pf->dplls.pps.dpll,
+ pf->dplls.eec.dpll);
+ ice_dpll_deinit_direct_pins(cgu, pf->dplls.sma,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_sma_ops,
+ pf->dplls.pps.dpll,
+ pf->dplls.eec.dpll);
+ }
}
}
@@ -1926,8 +3155,7 @@ static void ice_dpll_deinit_pins(struct ice_pf *pf, bool cgu)
*/
static int ice_dpll_init_pins(struct ice_pf *pf, bool cgu)
{
- u32 rclk_idx;
- int ret;
+ int ret, count;
ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.inputs, 0,
pf->dplls.num_inputs,
@@ -1935,23 +3163,64 @@ static int ice_dpll_init_pins(struct ice_pf *pf, bool cgu)
pf->dplls.eec.dpll, pf->dplls.pps.dpll);
if (ret)
return ret;
+ count = pf->dplls.num_inputs;
if (cgu) {
ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.outputs,
- pf->dplls.num_inputs,
+ count,
pf->dplls.num_outputs,
&ice_dpll_output_ops,
pf->dplls.eec.dpll,
pf->dplls.pps.dpll);
if (ret)
goto deinit_inputs;
+ count += pf->dplls.num_outputs;
+ if (!pf->dplls.generic) {
+ ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.sma,
+ count,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_sma_ops,
+ pf->dplls.eec.dpll,
+ pf->dplls.pps.dpll);
+ if (ret)
+ goto deinit_outputs;
+ count += ICE_DPLL_PIN_SW_NUM;
+ ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.ufl,
+ count,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_ufl_ops,
+ pf->dplls.eec.dpll,
+ pf->dplls.pps.dpll);
+ if (ret)
+ goto deinit_sma;
+ count += ICE_DPLL_PIN_SW_NUM;
+ }
+ ret = ice_dpll_pin_ref_sync_register(pf->dplls.inputs,
+ pf->dplls.num_inputs);
+ if (ret)
+ goto deinit_ufl;
+ ret = ice_dpll_pin_ref_sync_register(pf->dplls.sma,
+ ICE_DPLL_PIN_SW_NUM);
+ if (ret)
+ goto deinit_ufl;
+ } else {
+ count += pf->dplls.num_outputs + 2 * ICE_DPLL_PIN_SW_NUM;
}
- rclk_idx = pf->dplls.num_inputs + pf->dplls.num_outputs + pf->hw.pf_id;
- ret = ice_dpll_init_rclk_pins(pf, &pf->dplls.rclk, rclk_idx,
+ ret = ice_dpll_init_rclk_pins(pf, &pf->dplls.rclk, count + pf->hw.pf_id,
&ice_dpll_rclk_ops);
if (ret)
- goto deinit_outputs;
+ goto deinit_ufl;
return 0;
+deinit_ufl:
+ ice_dpll_deinit_direct_pins(cgu, pf->dplls.ufl,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_ufl_ops,
+ pf->dplls.pps.dpll, pf->dplls.eec.dpll);
+deinit_sma:
+ ice_dpll_deinit_direct_pins(cgu, pf->dplls.sma,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_sma_ops,
+ pf->dplls.pps.dpll, pf->dplls.eec.dpll);
deinit_outputs:
ice_dpll_deinit_direct_pins(cgu, pf->dplls.outputs,
pf->dplls.num_outputs,
@@ -1977,7 +3246,7 @@ static void
ice_dpll_deinit_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu)
{
if (cgu)
- dpll_device_unregister(d->dpll, &ice_dpll_ops, d);
+ dpll_device_unregister(d->dpll, d->ops, d);
dpll_device_put(d->dpll);
}
@@ -2011,12 +3280,17 @@ ice_dpll_init_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu,
}
d->pf = pf;
if (cgu) {
+ const struct dpll_device_ops *ops = &ice_dpll_ops;
+
+ if (type == DPLL_TYPE_PPS && ice_dpll_is_pps_phase_monitor(pf))
+ ops = &ice_dpll_pom_ops;
ice_dpll_update_state(pf, d, true);
- ret = dpll_device_register(d->dpll, type, &ice_dpll_ops, d);
+ ret = dpll_device_register(d->dpll, type, ops, d);
if (ret) {
dpll_device_put(d->dpll);
return ret;
}
+ d->ops = ops;
}
return 0;
@@ -2053,7 +3327,7 @@ static int ice_dpll_init_worker(struct ice_pf *pf)
struct kthread_worker *kworker;
kthread_init_delayed_work(&d->work, ice_dpll_periodic_work);
- kworker = kthread_create_worker(0, "ice-dplls-%s",
+ kworker = kthread_run_worker(0, "ice-dplls-%s",
dev_name(ice_pf_to_dev(pf)));
if (IS_ERR(kworker))
return PTR_ERR(kworker);
@@ -2065,6 +3339,18 @@ static int ice_dpll_init_worker(struct ice_pf *pf)
}
/**
+ * ice_dpll_phase_range_set - initialize phase adjust range helper
+ * @range: pointer to phase adjust range struct to be initialized
+ * @phase_adj: a value to be used as min(-)/max(+) boundary
+ */
+static void ice_dpll_phase_range_set(struct dpll_pin_phase_adjust_range *range,
+ u32 phase_adj)
+{
+ range->min = -phase_adj;
+ range->max = phase_adj;
+}
+
+/**
* ice_dpll_init_info_pins_generic - initializes generic pins info
* @pf: board private structure
* @input: if input pins initialized
@@ -2105,8 +3391,8 @@ static int ice_dpll_init_info_pins_generic(struct ice_pf *pf, bool input)
for (i = 0; i < pin_num; i++) {
pins[i].idx = i;
pins[i].prop.board_label = labels[i];
- pins[i].prop.phase_range.min = phase_adj_max;
- pins[i].prop.phase_range.max = -phase_adj_max;
+ ice_dpll_phase_range_set(&pins[i].prop.phase_range,
+ phase_adj_max);
pins[i].prop.capabilities = cap;
pins[i].pf = pf;
ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
@@ -2152,6 +3438,7 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
struct ice_hw *hw = &pf->hw;
struct ice_dpll_pin *pins;
unsigned long caps;
+ u32 phase_adj_max;
u8 freq_supp_num;
bool input;
@@ -2159,18 +3446,22 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
case ICE_DPLL_PIN_TYPE_INPUT:
pins = pf->dplls.inputs;
num_pins = pf->dplls.num_inputs;
+ phase_adj_max = pf->dplls.input_phase_adj_max;
input = true;
break;
case ICE_DPLL_PIN_TYPE_OUTPUT:
pins = pf->dplls.outputs;
num_pins = pf->dplls.num_outputs;
+ phase_adj_max = pf->dplls.output_phase_adj_max;
input = false;
break;
default:
return -EINVAL;
}
- if (num_pins != ice_cgu_get_num_pins(hw, input))
+ if (num_pins != ice_cgu_get_num_pins(hw, input)) {
+ pf->dplls.generic = true;
return ice_dpll_init_info_pins_generic(pf, input);
+ }
for (i = 0; i < num_pins; i++) {
caps = 0;
@@ -2188,19 +3479,17 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
return ret;
caps |= (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE);
- pins[i].prop.phase_range.min =
- pf->dplls.input_phase_adj_max;
- pins[i].prop.phase_range.max =
- -pf->dplls.input_phase_adj_max;
+ if (ice_dpll_is_sw_pin(pf, i, true))
+ pins[i].hidden = true;
} else {
- pins[i].prop.phase_range.min =
- pf->dplls.output_phase_adj_max;
- pins[i].prop.phase_range.max =
- -pf->dplls.output_phase_adj_max;
ret = ice_cgu_get_output_pin_state_caps(hw, i, &caps);
if (ret)
return ret;
+ if (ice_dpll_is_sw_pin(pf, i, false))
+ pins[i].hidden = true;
}
+ ice_dpll_phase_range_set(&pins[i].prop.phase_range,
+ phase_adj_max);
pins[i].prop.capabilities = caps;
ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
if (ret)
@@ -2210,6 +3499,8 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
pins[i].prop.freq_supported_num = freq_supp_num;
pins[i].pf = pf;
}
+ if (input)
+ ret = ice_dpll_init_ref_sync_inputs(pf);
return ret;
}
@@ -2237,6 +3528,91 @@ static int ice_dpll_init_info_rclk_pin(struct ice_pf *pf)
}
/**
+ * ice_dpll_init_info_sw_pins - initializes software controlled pin information
+ * @pf: board private structure
+ *
+ * Init information for software controlled pins, cache them in
+ * pf->dplls.sma and pf->dplls.ufl.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - init failure reason
+ */
+static int ice_dpll_init_info_sw_pins(struct ice_pf *pf)
+{
+ u8 freq_supp_num, pin_abs_idx, input_idx_offset = 0;
+ struct ice_dplls *d = &pf->dplls;
+ struct ice_dpll_pin *pin;
+ u32 phase_adj_max, caps;
+ int i, ret;
+
+ if (pf->hw.device_id == ICE_DEV_ID_E810C_QSFP)
+ input_idx_offset = ICE_E810_RCLK_PINS_NUM;
+ phase_adj_max = max(d->input_phase_adj_max, d->output_phase_adj_max);
+ caps = DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
+ for (i = 0; i < ICE_DPLL_PIN_SW_NUM; i++) {
+ pin = &d->sma[i];
+ pin->idx = i;
+ pin->prop.type = DPLL_PIN_TYPE_EXT;
+ pin_abs_idx = ICE_DPLL_PIN_SW_INPUT_ABS(i) + input_idx_offset;
+ pin->prop.freq_supported =
+ ice_cgu_get_pin_freq_supp(&pf->hw, pin_abs_idx,
+ true, &freq_supp_num);
+ pin->prop.freq_supported_num = freq_supp_num;
+ pin->prop.capabilities =
+ (DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE |
+ DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
+ caps);
+ pin->pf = pf;
+ pin->prop.board_label = ice_dpll_sw_pin_sma[i];
+ pin->input = &d->inputs[pin_abs_idx];
+ if (pin->input->ref_sync)
+ pin->ref_sync = pin->input->ref_sync - pin_abs_idx;
+ pin->output = &d->outputs[ICE_DPLL_PIN_SW_OUTPUT_ABS(i)];
+ ice_dpll_phase_range_set(&pin->prop.phase_range, phase_adj_max);
+ }
+ for (i = 0; i < ICE_DPLL_PIN_SW_NUM; i++) {
+ pin = &d->ufl[i];
+ pin->idx = i;
+ pin->prop.type = DPLL_PIN_TYPE_EXT;
+ pin->prop.capabilities = caps;
+ pin->pf = pf;
+ pin->prop.board_label = ice_dpll_sw_pin_ufl[i];
+ if (i == ICE_DPLL_PIN_SW_1_IDX) {
+ pin->direction = DPLL_PIN_DIRECTION_OUTPUT;
+ pin_abs_idx = ICE_DPLL_PIN_SW_OUTPUT_ABS(i);
+ pin->prop.freq_supported =
+ ice_cgu_get_pin_freq_supp(&pf->hw, pin_abs_idx,
+ false,
+ &freq_supp_num);
+ pin->prop.freq_supported_num = freq_supp_num;
+ pin->input = NULL;
+ pin->output = &d->outputs[pin_abs_idx];
+ } else if (i == ICE_DPLL_PIN_SW_2_IDX) {
+ pin->direction = DPLL_PIN_DIRECTION_INPUT;
+ pin_abs_idx = ICE_DPLL_PIN_SW_INPUT_ABS(i) +
+ input_idx_offset;
+ pin->output = NULL;
+ pin->input = &d->inputs[pin_abs_idx];
+ pin->prop.freq_supported =
+ ice_cgu_get_pin_freq_supp(&pf->hw, pin_abs_idx,
+ true, &freq_supp_num);
+ pin->prop.freq_supported_num = freq_supp_num;
+ pin->prop.capabilities =
+ (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
+ caps);
+ }
+ ice_dpll_phase_range_set(&pin->prop.phase_range, phase_adj_max);
+ }
+ ret = ice_dpll_pin_state_update(pf, pin, ICE_DPLL_PIN_TYPE_SOFTWARE,
+ NULL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
* ice_dpll_init_pins_info - init pins info wrapper
* @pf: board private structure
* @pin_type: type of pins being initialized
@@ -2256,6 +3632,8 @@ ice_dpll_init_pins_info(struct ice_pf *pf, enum ice_dpll_pin_type pin_type)
return ice_dpll_init_info_direct_pins(pf, pin_type);
case ICE_DPLL_PIN_TYPE_RCLK_INPUT:
return ice_dpll_init_info_rclk_pin(pf);
+ case ICE_DPLL_PIN_TYPE_SOFTWARE:
+ return ice_dpll_init_info_sw_pins(pf);
default:
return -EINVAL;
}
@@ -2300,7 +3678,7 @@ static int ice_dpll_init_info(struct ice_pf *pf, bool cgu)
if (ret) {
dev_err(ice_pf_to_dev(pf),
"err:%d %s failed to read cgu abilities\n",
- ret, ice_aq_str(hw->adminq.sq_last_status));
+ ret, libie_aq_str(hw->adminq.sq_last_status));
return ret;
}
@@ -2308,8 +3686,10 @@ static int ice_dpll_init_info(struct ice_pf *pf, bool cgu)
dp->dpll_idx = abilities.pps_dpll_idx;
d->num_inputs = abilities.num_inputs;
d->num_outputs = abilities.num_outputs;
- d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj);
- d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj);
+ d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj) &
+ ICE_AQC_GET_CGU_MAX_PHASE_ADJ;
+ d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj) &
+ ICE_AQC_GET_CGU_MAX_PHASE_ADJ;
alloc_size = sizeof(*d->inputs) * d->num_inputs;
d->inputs = kzalloc(alloc_size, GFP_KERNEL);
@@ -2340,6 +3720,9 @@ static int ice_dpll_init_info(struct ice_pf *pf, bool cgu)
ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_OUTPUT);
if (ret)
goto deinit_info;
+ ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_SOFTWARE);
+ if (ret)
+ goto deinit_info;
}
ret = ice_get_cgu_rclk_pin_info(&pf->hw, &d->base_rclk_idx,
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.h b/drivers/net/ethernet/intel/ice/ice_dpll.h
index c320f1bf7d6d..c0da03384ce9 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.h
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.h
@@ -8,6 +8,18 @@
#define ICE_DPLL_RCLK_NUM_MAX 4
+/**
+ * enum ice_dpll_pin_sw - enumerate ice software pin indices:
+ * @ICE_DPLL_PIN_SW_1_IDX: index of first SW pin
+ * @ICE_DPLL_PIN_SW_2_IDX: index of second SW pin
+ * @ICE_DPLL_PIN_SW_NUM: number of SW pins in pair
+ */
+enum ice_dpll_pin_sw {
+ ICE_DPLL_PIN_SW_1_IDX,
+ ICE_DPLL_PIN_SW_2_IDX,
+ ICE_DPLL_PIN_SW_NUM
+};
+
/** ice_dpll_pin - store info about pins
* @pin: dpll pin structure
* @pf: pointer to pf, which has registered the dpll_pin
@@ -19,6 +31,8 @@
* @prop: pin properties
* @freq: current frequency of a pin
* @phase_adjust: current phase adjust value
+ * @phase_offset: monitored phase offset value
+ * @ref_sync: store id of reference sync pin
*/
struct ice_dpll_pin {
struct dpll_pin *pin;
@@ -31,7 +45,14 @@ struct ice_dpll_pin {
struct dpll_pin_properties prop;
u32 freq;
s32 phase_adjust;
+ struct ice_dpll_pin *input;
+ struct ice_dpll_pin *output;
+ enum dpll_pin_direction direction;
+ s64 phase_offset;
u8 status;
+ u8 ref_sync;
+ bool active;
+ bool hidden;
};
/** ice_dpll - store info required for DPLL control
@@ -47,8 +68,10 @@ struct ice_dpll_pin {
* @input_prio: priorities of each input
* @dpll_state: current dpll sync state
* @prev_dpll_state: last dpll sync state
+ * @phase_offset_monitor_period: period for phase offset monitor read frequency
* @active_input: pointer to active input pin
* @prev_input: pointer to previous active input pin
+ * @ops: holds the registered ops
*/
struct ice_dpll {
struct dpll_device *dpll;
@@ -64,8 +87,10 @@ struct ice_dpll {
enum dpll_lock_status dpll_state;
enum dpll_lock_status prev_dpll_state;
enum dpll_mode mode;
+ u32 phase_offset_monitor_period;
struct dpll_pin *active_input;
struct dpll_pin *prev_input;
+ const struct dpll_device_ops *ops;
};
/** ice_dplls - store info required for CCU (clock controlling unit)
@@ -84,6 +109,7 @@ struct ice_dpll {
* @clock_id: clock_id of dplls
* @input_phase_adj_max: max phase adjust value for an input pins
* @output_phase_adj_max: max phase adjust value for an output pins
+ * @periodic_counter: counter of periodic work executions
*/
struct ice_dplls {
struct kthread_worker *kworker;
@@ -93,14 +119,19 @@ struct ice_dplls {
struct ice_dpll pps;
struct ice_dpll_pin *inputs;
struct ice_dpll_pin *outputs;
+ struct ice_dpll_pin sma[ICE_DPLL_PIN_SW_NUM];
+ struct ice_dpll_pin ufl[ICE_DPLL_PIN_SW_NUM];
struct ice_dpll_pin rclk;
u8 num_inputs;
u8 num_outputs;
- int cgu_state_acq_err_num;
+ u8 sma_data;
u8 base_rclk_idx;
+ int cgu_state_acq_err_num;
u64 clock_id;
s32 input_phase_adj_max;
s32 output_phase_adj_max;
+ u32 periodic_counter;
+ bool generic;
};
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index c0b3e70a7ea3..2e4f0969035f 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -29,6 +29,7 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
return -ENODEV;
ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
+ ice_vsi_cfg_sw_lldp(uplink_vsi, true, false);
netif_addr_lock_bh(netdev);
__dev_uc_unsync(netdev, NULL);
@@ -38,8 +39,7 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
if (ice_vsi_add_vlan_zero(uplink_vsi))
goto err_vlan_zero;
- if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
- ICE_FLTR_RX))
+ if (ice_set_dflt_vsi(uplink_vsi))
goto err_def_rx;
if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
@@ -50,9 +50,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
if (vlan_ops->dis_rx_filtering(uplink_vsi))
goto err_vlan_filtering;
- if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
- goto err_override_uplink;
-
if (ice_vsi_update_local_lb(uplink_vsi, true))
goto err_override_local_lb;
@@ -64,8 +61,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
err_up:
ice_vsi_update_local_lb(uplink_vsi, false);
err_override_local_lb:
- ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
-err_override_uplink:
vlan_ops->ena_rx_filtering(uplink_vsi);
err_vlan_filtering:
ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
@@ -251,6 +246,10 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb,
u64 cd_cmd, dst_vsi;
if (!dst) {
+ struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
+
+ if (unlikely(eth->h_proto == htons(ETH_P_LLDP)))
+ return;
cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S;
off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
} else {
@@ -276,7 +275,6 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
ice_vsi_update_local_lb(uplink_vsi, false);
- ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
vlan_ops->ena_rx_filtering(uplink_vsi);
ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
ICE_FLTR_TX);
@@ -285,6 +283,7 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
ICE_FWD_TO_VSI);
+ ice_vsi_cfg_sw_lldp(uplink_vsi, true, true);
}
/**
@@ -509,10 +508,14 @@ err_create_repr:
*/
int ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf)
{
- struct ice_repr *repr = ice_repr_create_vf(vf);
struct devlink *devlink = priv_to_devlink(pf);
+ struct ice_repr *repr;
int err;
+ if (!ice_is_eswitch_mode_switchdev(pf))
+ return 0;
+
+ repr = ice_repr_create_vf(vf);
if (IS_ERR(repr))
return PTR_ERR(repr);
@@ -552,13 +555,14 @@ int ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf)
static void ice_eswitch_detach(struct ice_pf *pf, struct ice_repr *repr)
{
ice_eswitch_stop_reprs(pf);
+ repr->ops.rem(repr);
+
xa_erase(&pf->eswitch.reprs, repr->id);
if (xa_empty(&pf->eswitch.reprs))
ice_eswitch_disable_switchdev(pf);
ice_eswitch_release_repr(pf, repr);
- repr->ops.rem(repr);
ice_repr_destroy(repr);
if (xa_empty(&pf->eswitch.reprs)) {
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
index ac7db100e2cd..5c7dcf21b222 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -5,7 +5,7 @@
#define _ICE_ESWITCH_H_
#include <net/devlink.h>
-#include "devlink/devlink_port.h"
+#include "devlink/port.h"
#ifdef CONFIG_ICE_SWITCHDEV
void ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 2924ac61300d..969d4f8f9c02 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -10,6 +10,7 @@
#include "ice_lib.h"
#include "ice_dcb_lib.h"
#include <net/dcbnl.h>
+#include <net/libeth/rx.h>
struct ice_stats {
char stat_string[ETH_GSTRING_LEN];
@@ -340,7 +341,6 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_FLAG_VF_TRUE_PROMISC_ENA),
ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF),
ICE_PRIV_FLAG("vf-vlan-pruning", ICE_FLAG_VF_VLAN_PRUNING),
- ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX),
};
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
@@ -667,7 +667,8 @@ static int ice_get_port_topology(struct ice_hw *hw, u8 lport,
if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_100G)
port_topology->serdes_lane_count = 4;
- else if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_50G)
+ else if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_50G ||
+ max_speed == ICE_AQC_PORT_OPT_MAX_LANE_40G)
port_topology->serdes_lane_count = 2;
else
port_topology->serdes_lane_count = 1;
@@ -693,75 +694,52 @@ static int ice_get_port_topology(struct ice_hw *hw, u8 lport,
static int ice_get_tx_rx_equa(struct ice_hw *hw, u8 serdes_num,
struct ice_serdes_equalization_to_ethtool *ptr)
{
+ static const int tx = ICE_AQC_OP_CODE_TX_EQU;
+ static const int rx = ICE_AQC_OP_CODE_RX_EQU;
+ struct {
+ int data_in;
+ int opcode;
+ int *out;
+ } aq_params[] = {
+ { ICE_AQC_TX_EQU_PRE1, tx, &ptr->tx_equ_pre1 },
+ { ICE_AQC_TX_EQU_PRE3, tx, &ptr->tx_equ_pre3 },
+ { ICE_AQC_TX_EQU_ATTEN, tx, &ptr->tx_equ_atten },
+ { ICE_AQC_TX_EQU_POST1, tx, &ptr->tx_equ_post1 },
+ { ICE_AQC_TX_EQU_PRE2, tx, &ptr->tx_equ_pre2 },
+ { ICE_AQC_RX_EQU_PRE2, rx, &ptr->rx_equ_pre2 },
+ { ICE_AQC_RX_EQU_PRE1, rx, &ptr->rx_equ_pre1 },
+ { ICE_AQC_RX_EQU_POST1, rx, &ptr->rx_equ_post1 },
+ { ICE_AQC_RX_EQU_BFLF, rx, &ptr->rx_equ_bflf },
+ { ICE_AQC_RX_EQU_BFHF, rx, &ptr->rx_equ_bfhf },
+ { ICE_AQC_RX_EQU_CTLE_GAINHF, rx, &ptr->rx_equ_ctle_gainhf },
+ { ICE_AQC_RX_EQU_CTLE_GAINLF, rx, &ptr->rx_equ_ctle_gainlf },
+ { ICE_AQC_RX_EQU_CTLE_GAINDC, rx, &ptr->rx_equ_ctle_gaindc },
+ { ICE_AQC_RX_EQU_CTLE_BW, rx, &ptr->rx_equ_ctle_bw },
+ { ICE_AQC_RX_EQU_DFE_GAIN, rx, &ptr->rx_equ_dfe_gain },
+ { ICE_AQC_RX_EQU_DFE_GAIN2, rx, &ptr->rx_equ_dfe_gain_2 },
+ { ICE_AQC_RX_EQU_DFE_2, rx, &ptr->rx_equ_dfe_2 },
+ { ICE_AQC_RX_EQU_DFE_3, rx, &ptr->rx_equ_dfe_3 },
+ { ICE_AQC_RX_EQU_DFE_4, rx, &ptr->rx_equ_dfe_4 },
+ { ICE_AQC_RX_EQU_DFE_5, rx, &ptr->rx_equ_dfe_5 },
+ { ICE_AQC_RX_EQU_DFE_6, rx, &ptr->rx_equ_dfe_6 },
+ { ICE_AQC_RX_EQU_DFE_7, rx, &ptr->rx_equ_dfe_7 },
+ { ICE_AQC_RX_EQU_DFE_8, rx, &ptr->rx_equ_dfe_8 },
+ { ICE_AQC_RX_EQU_DFE_9, rx, &ptr->rx_equ_dfe_9 },
+ { ICE_AQC_RX_EQU_DFE_10, rx, &ptr->rx_equ_dfe_10 },
+ { ICE_AQC_RX_EQU_DFE_11, rx, &ptr->rx_equ_dfe_11 },
+ { ICE_AQC_RX_EQU_DFE_12, rx, &ptr->rx_equ_dfe_12 },
+ };
int err;
- err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_PRE1,
- ICE_AQC_OP_CODE_TX_EQU, serdes_num,
- &ptr->tx_equalization_pre1);
- if (err)
- return err;
-
- err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_PRE3,
- ICE_AQC_OP_CODE_TX_EQU, serdes_num,
- &ptr->tx_equalization_pre3);
- if (err)
- return err;
-
- err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_ATTEN,
- ICE_AQC_OP_CODE_TX_EQU, serdes_num,
- &ptr->tx_equalization_atten);
- if (err)
- return err;
-
- err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_POST1,
- ICE_AQC_OP_CODE_TX_EQU, serdes_num,
- &ptr->tx_equalization_post1);
- if (err)
- return err;
-
- err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_PRE2,
- ICE_AQC_OP_CODE_TX_EQU, serdes_num,
- &ptr->tx_equalization_pre2);
- if (err)
- return err;
-
- err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_PRE2,
- ICE_AQC_OP_CODE_RX_EQU, serdes_num,
- &ptr->rx_equalization_pre2);
- if (err)
- return err;
-
- err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_PRE1,
- ICE_AQC_OP_CODE_RX_EQU, serdes_num,
- &ptr->rx_equalization_pre1);
- if (err)
- return err;
-
- err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_POST1,
- ICE_AQC_OP_CODE_RX_EQU, serdes_num,
- &ptr->rx_equalization_post1);
- if (err)
- return err;
-
- err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_BFLF,
- ICE_AQC_OP_CODE_RX_EQU, serdes_num,
- &ptr->rx_equalization_bflf);
- if (err)
- return err;
-
- err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_BFHF,
- ICE_AQC_OP_CODE_RX_EQU, serdes_num,
- &ptr->rx_equalization_bfhf);
- if (err)
- return err;
-
- err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_DRATE,
- ICE_AQC_OP_CODE_RX_EQU, serdes_num,
- &ptr->rx_equalization_drate);
- if (err)
- return err;
+ for (int i = 0; i < ARRAY_SIZE(aq_params); i++) {
+ err = ice_aq_get_phy_equalization(hw, aq_params[i].data_in,
+ aq_params[i].opcode,
+ serdes_num, aq_params[i].out);
+ if (err)
+ break;
+ }
- return 0;
+ return err;
}
/**
@@ -816,8 +794,7 @@ static int ice_get_extended_regs(struct net_device *netdev, void *p)
static void
ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_hw *hw = &pf->hw;
u32 *regs_buf = (u32 *)p;
unsigned int i;
@@ -832,8 +809,7 @@ ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
static u32 ice_get_msglevel(struct net_device *netdev)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
#ifndef CONFIG_DYNAMIC_DEBUG
if (pf->hw.debug_mask)
@@ -846,8 +822,7 @@ static u32 ice_get_msglevel(struct net_device *netdev)
static void ice_set_msglevel(struct net_device *netdev, u32 data)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
#ifndef CONFIG_DYNAMIC_DEBUG
if (ICE_DBG_USER & data)
@@ -859,10 +834,17 @@ static void ice_set_msglevel(struct net_device *netdev, u32 data)
#endif /* !CONFIG_DYNAMIC_DEBUG */
}
+static void ice_get_link_ext_stats(struct net_device *netdev,
+ struct ethtool_link_ext_stats *stats)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ stats->link_down_events = pf->link_down_events;
+}
+
static int ice_get_eeprom_len(struct net_device *netdev)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
return (int)pf->hw.flash.flash_size;
}
@@ -871,9 +853,7 @@ static int
ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
u8 *bytes)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_hw *hw = &pf->hw;
struct device *dev;
int ret;
@@ -892,7 +872,7 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
ret = ice_acquire_nvm(hw, ICE_RES_READ);
if (ret) {
dev_err(dev, "ice_acquire_nvm failed, err %d aq_err %s\n",
- ret, ice_aq_str(hw->adminq.sq_last_status));
+ ret, libie_aq_str(hw->adminq.sq_last_status));
goto out;
}
@@ -900,7 +880,7 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
false);
if (ret) {
dev_err(dev, "ice_read_flat_nvm failed, err %d aq_err %s\n",
- ret, ice_aq_str(hw->adminq.sq_last_status));
+ ret, libie_aq_str(hw->adminq.sq_last_status));
goto release;
}
@@ -972,8 +952,7 @@ static u64 ice_link_test(struct net_device *netdev)
*/
static u64 ice_eeprom_test(struct net_device *netdev)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
netdev_info(netdev, "EEPROM test\n");
return !!(ice_nvm_validate_checksum(&pf->hw));
@@ -1252,8 +1231,9 @@ static int ice_diag_send(struct ice_tx_ring *tx_ring, u8 *data, u16 size)
*/
static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring)
{
- struct ice_rx_buf *rx_buf;
+ struct libeth_fqe *rx_buf;
int valid_frames, i;
+ struct page *page;
u8 *received_buf;
valid_frames = 0;
@@ -1268,8 +1248,10 @@ static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring)
cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)))))
continue;
- rx_buf = &rx_ring->rx_buf[i];
- received_buf = page_address(rx_buf->page) + rx_buf->page_offset;
+ rx_buf = &rx_ring->rx_fqes[i];
+ page = __netmem_to_page(rx_buf->netmem);
+ received_buf = page_address(page) + rx_buf->offset +
+ page->pp->p.offset;
if (ice_lbtest_check_frame(received_buf))
valid_frames++;
@@ -1287,9 +1269,8 @@ static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring)
*/
static u64 ice_loopback_test(struct net_device *netdev)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *orig_vsi = np->vsi, *test_vsi;
- struct ice_pf *pf = orig_vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vsi *test_vsi;
u8 *tx_frame __free(kfree) = NULL;
u8 broadcast[ETH_ALEN], ret = 0;
int num_frames, valid_frames;
@@ -1378,8 +1359,7 @@ lbtest_vsi_close:
*/
static u64 ice_intr_test(struct net_device *netdev)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
u16 swic_old = pf->sw_int_count;
netdev_info(netdev, "interrupt test\n");
@@ -1407,9 +1387,8 @@ static void
ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
u64 *data)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
bool if_running = netif_running(netdev);
- struct ice_pf *pf = np->vsi->back;
struct device *dev;
dev = ice_pf_to_dev(pf);
@@ -1733,9 +1712,7 @@ static int ice_nway_reset(struct net_device *netdev)
*/
static u32 ice_get_priv_flags(struct net_device *netdev)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
u32 i, ret_flags = 0;
for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
@@ -1841,7 +1818,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
/* Remove rule to direct LLDP packets to default VSI.
* The FW LLDP engine will now be consuming them.
*/
- ice_cfg_sw_lldp(vsi, false, false);
+ ice_cfg_sw_rx_lldp(vsi->back, false);
/* AQ command to start FW LLDP agent will return an
* error if the agent is already started
@@ -1882,10 +1859,6 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
ice_nway_reset(netdev);
}
}
- if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) {
- /* down and up VSI so that changes of Rx cfg are reflected. */
- ice_down_up(vsi);
- }
/* don't allow modification of this flag when a single VF is in
* promiscuous mode because it's not supported
*/
@@ -2811,14 +2784,7 @@ done:
return err;
}
-/**
- * ice_parse_hdrs - parses headers from RSS hash input
- * @nfc: ethtool rxnfc command
- *
- * This function parses the rxnfc command and returns intended
- * header types for RSS configuration
- */
-static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
+static u32 ice_parse_hdrs(const struct ethtool_rxfh_fields *nfc)
{
u32 hdrs = ICE_FLOW_SEG_HDR_NONE;
@@ -2883,15 +2849,7 @@ static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
return hdrs;
}
-/**
- * ice_parse_hash_flds - parses hash fields from RSS hash input
- * @nfc: ethtool rxnfc command
- * @symm: true if Symmetric Topelitz is set
- *
- * This function parses the rxnfc command and returns intended
- * hash fields for RSS configuration
- */
-static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm)
+static u64 ice_parse_hash_flds(const struct ethtool_rxfh_fields *nfc, bool symm)
{
u64 hfld = ICE_HASH_INVALID;
@@ -2988,16 +2946,13 @@ static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm)
return hfld;
}
-/**
- * ice_set_rss_hash_opt - Enable/Disable flow types for RSS hash
- * @vsi: the VSI being configured
- * @nfc: ethtool rxnfc command
- *
- * Returns Success if the flow input set is supported.
- */
static int
-ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
+ice_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_rss_hash_cfg cfg;
struct device *dev;
@@ -3043,14 +2998,11 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
return 0;
}
-/**
- * ice_get_rss_hash_opt - Retrieve hash fields for a given flow-type
- * @vsi: the VSI being configured
- * @nfc: ethtool rxnfc command
- */
-static void
-ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
+static int
+ice_get_rxfh_fields(struct net_device *netdev, struct ethtool_rxfh_fields *nfc)
{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct device *dev;
u64 hash_flds;
@@ -3063,21 +3015,21 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
if (ice_is_safe_mode(pf)) {
dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
vsi->vsi_num);
- return;
+ return 0;
}
hdrs = ice_parse_hdrs(nfc);
if (hdrs == ICE_FLOW_SEG_HDR_NONE) {
dev_dbg(dev, "Header type is not valid, vsi num = %d\n",
vsi->vsi_num);
- return;
+ return 0;
}
hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs, &symm);
if (hash_flds == ICE_HASH_INVALID) {
dev_dbg(dev, "No hash fields found for the given header type, vsi num = %d\n",
vsi->vsi_num);
- return;
+ return 0;
}
if (hash_flds & ICE_FLOW_HASH_FLD_IPV4_SA ||
@@ -3104,6 +3056,8 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
hash_flds & ICE_FLOW_HASH_FLD_GTPU_UP_TEID ||
hash_flds & ICE_FLOW_HASH_FLD_GTPU_DWN_TEID)
nfc->data |= (u64)RXH_GTP_TEID;
+
+ return 0;
}
/**
@@ -3123,8 +3077,6 @@ static int ice_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
return ice_add_fdir_ethtool(vsi, cmd);
case ETHTOOL_SRXCLSRLDEL:
return ice_del_fdir_ethtool(vsi, cmd);
- case ETHTOOL_SRXFH:
- return ice_set_rss_hash_opt(vsi, cmd);
default:
break;
}
@@ -3132,6 +3084,20 @@ static int ice_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
}
/**
+ * ice_get_rx_ring_count - get RX ring count
+ * @netdev: network interface device structure
+ *
+ * Return: number of RX rings.
+ */
+static u32 ice_get_rx_ring_count(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+
+ return vsi->rss_size;
+}
+
+/**
* ice_get_rxnfc - command to get Rx flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -3151,10 +3117,6 @@ ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
hw = &vsi->back->hw;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = vsi->rss_size;
- ret = 0;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = hw->fdir_active_fltr;
/* report total rule count */
@@ -3167,10 +3129,6 @@ ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRLALL:
ret = ice_get_fdir_fltr_ids(hw, cmd, (u32 *)rule_locs);
break;
- case ETHTOOL_GRXFH:
- ice_get_rss_hash_opt(vsi, cmd);
- ret = 0;
- break;
default:
break;
}
@@ -3185,9 +3143,11 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ struct ice_hw *hw;
- ring->rx_max_pending = ICE_MAX_NUM_DESC;
- ring->tx_max_pending = ICE_MAX_NUM_DESC;
+ hw = &vsi->back->hw;
+ ring->rx_max_pending = ICE_MAX_NUM_DESC_BY_MAC(hw);
+ ring->tx_max_pending = ICE_MAX_NUM_DESC_BY_MAC(hw);
if (vsi->tx_rings && vsi->rx_rings) {
ring->rx_pending = vsi->rx_rings[0]->count;
ring->tx_pending = vsi->tx_rings[0]->count;
@@ -3201,6 +3161,10 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
ring->rx_jumbo_max_pending = 0;
ring->rx_mini_pending = 0;
ring->rx_jumbo_pending = 0;
+
+ kernel_ring->tcp_data_split = vsi->hsplit ?
+ ETHTOOL_TCP_DATA_SPLIT_ENABLED :
+ ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
static int
@@ -3215,15 +3179,17 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int i, timeout = 50, err = 0;
+ struct ice_hw *hw = &pf->hw;
u16 new_rx_cnt, new_tx_cnt;
+ bool hsplit;
- if (ring->tx_pending > ICE_MAX_NUM_DESC ||
+ if (ring->tx_pending > ICE_MAX_NUM_DESC_BY_MAC(hw) ||
ring->tx_pending < ICE_MIN_NUM_DESC ||
- ring->rx_pending > ICE_MAX_NUM_DESC ||
+ ring->rx_pending > ICE_MAX_NUM_DESC_BY_MAC(hw) ||
ring->rx_pending < ICE_MIN_NUM_DESC) {
netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
ring->tx_pending, ring->rx_pending,
- ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC,
+ ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC_BY_MAC(hw),
ICE_REQ_DESC_MULTIPLE);
return -EINVAL;
}
@@ -3241,9 +3207,12 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n",
new_rx_cnt);
+ hsplit = kernel_ring->tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED;
+
/* if nothing to do return success */
if (new_tx_cnt == vsi->tx_rings[0]->count &&
- new_rx_cnt == vsi->rx_rings[0]->count) {
+ new_rx_cnt == vsi->rx_rings[0]->count &&
+ hsplit == vsi->hsplit) {
netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
return 0;
}
@@ -3273,6 +3242,8 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
vsi->xdp_rings[i]->count = new_tx_cnt;
vsi->num_tx_desc = (u16)new_tx_cnt;
vsi->num_rx_desc = (u16)new_rx_cnt;
+ vsi->hsplit = hsplit;
+
netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n");
goto done;
}
@@ -3296,6 +3267,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
tx_rings[i].count = new_tx_cnt;
tx_rings[i].desc = NULL;
tx_rings[i].tx_buf = NULL;
+ tx_rings[i].tstamp_ring = NULL;
tx_rings[i].tx_tstamps = &pf->ptp.port.tx;
err = ice_setup_tx_ring(&tx_rings[i]);
if (err) {
@@ -3355,7 +3327,8 @@ process_rx:
rx_rings[i].count = new_rx_cnt;
rx_rings[i].cached_phctime = pf->ptp.cached_phc_time;
rx_rings[i].desc = NULL;
- rx_rings[i].rx_buf = NULL;
+ rx_rings[i].xdp_buf = NULL;
+
/* this is to allow wr32 to have something to write to
* during early allocation of Rx buffers
*/
@@ -3364,10 +3337,6 @@ process_rx:
err = ice_setup_rx_ring(&rx_rings[i]);
if (err)
goto rx_unwind;
-
- /* allocate Rx buffers */
- err = ice_alloc_rx_bufs(&rx_rings[i],
- ICE_RX_DESC_UNUSED(&rx_rings[i]));
rx_unwind:
if (err) {
while (i) {
@@ -3381,6 +3350,8 @@ rx_unwind:
}
process_link:
+ vsi->hsplit = hsplit;
+
/* Bring interface down, copy in the new ring info, then restore the
* interface. if VSI is up, bring it down and then back up
*/
@@ -3580,15 +3551,15 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) {
netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
} else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) {
netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
} else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) {
netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
}
@@ -3630,11 +3601,10 @@ static int
ice_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- u32 rss_context = rxfh->rss_context;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
u16 qcount, offset;
- int err, num_tc, i;
+ int err, i;
u8 *lut;
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
@@ -3642,24 +3612,8 @@ ice_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
return -EOPNOTSUPP;
}
- if (rss_context && !ice_is_adq_active(pf)) {
- netdev_err(netdev, "RSS context cannot be non-zero when ADQ is not configured.\n");
- return -EINVAL;
- }
-
- qcount = vsi->mqprio_qopt.qopt.count[rss_context];
- offset = vsi->mqprio_qopt.qopt.offset[rss_context];
-
- if (rss_context && ice_is_adq_active(pf)) {
- num_tc = vsi->mqprio_qopt.qopt.num_tc;
- if (rss_context >= num_tc) {
- netdev_err(netdev, "RSS context:%d > num_tc:%d\n",
- rss_context, num_tc);
- return -EINVAL;
- }
- /* Use channel VSI of given TC */
- vsi = vsi->tc_map_vsi[rss_context];
- }
+ qcount = vsi->mqprio_qopt.qopt.count[0];
+ offset = vsi->mqprio_qopt.qopt.offset[0];
rxfh->hfunc = ETH_RSS_HASH_TOP;
if (vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ)
@@ -3719,9 +3673,6 @@ ice_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (rxfh->rss_context)
- return -EOPNOTSUPP;
-
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
/* RSS not supported return error here */
netdev_warn(netdev, "RSS is not configured on this VSI!\n");
@@ -3811,8 +3762,7 @@ ice_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
*/
static int ice_get_max_txq(struct ice_pf *pf)
{
- return min3(pf->num_lan_msix, (u16)num_online_cpus(),
- (u16)pf->hw.func_caps.common_cap.num_txq);
+ return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_txq);
}
/**
@@ -3821,8 +3771,7 @@ static int ice_get_max_txq(struct ice_pf *pf)
*/
static int ice_get_max_rxq(struct ice_pf *pf)
{
- return min3(pf->num_lan_msix, (u16)num_online_cpus(),
- (u16)pf->hw.func_caps.common_cap.num_rxq);
+ return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_rxq);
}
/**
@@ -3840,8 +3789,7 @@ static u32 ice_get_combined_cnt(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
- if (q_vector->rx.rx_ring && q_vector->tx.tx_ring)
- combined++;
+ combined += min(q_vector->num_ring_tx, q_vector->num_ring_rx);
}
return combined;
@@ -3922,7 +3870,7 @@ static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size)
err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
if (err)
dev_err(dev, "Cannot set RSS lut, err %d aq_err %s\n", err,
- ice_aq_str(hw->adminq.sq_last_status));
+ libie_aq_str(hw->adminq.sq_last_status));
kfree(lut);
return err;
@@ -3990,11 +3938,11 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
return -EINVAL;
}
- if (pf->adev) {
+ if (pf->cdev_info && pf->cdev_info->adev) {
mutex_lock(&pf->adev_mutex);
- device_lock(&pf->adev->dev);
+ device_lock(&pf->cdev_info->adev->dev);
locked = true;
- if (pf->adev->dev.driver) {
+ if (pf->cdev_info->adev->dev.driver) {
netdev_err(dev, "Cannot change channels when RDMA is active\n");
ret = -EBUSY;
goto adev_unlock;
@@ -4013,7 +3961,7 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
adev_unlock:
if (locked) {
- device_unlock(&pf->adev->dev);
+ device_unlock(&pf->cdev_info->adev->dev);
mutex_unlock(&pf->adev_mutex);
}
return ret;
@@ -4474,9 +4422,7 @@ static int
ice_get_module_info(struct net_device *netdev,
struct ethtool_modinfo *modinfo)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_hw *hw = &pf->hw;
u8 sff8472_comp = 0;
u8 sff8472_swap = 0;
@@ -4548,12 +4494,10 @@ static int
ice_get_module_eeprom(struct net_device *netdev,
struct ethtool_eeprom *ee, u8 *data)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
#define SFF_READ_BLOCK_SIZE 8
u8 value[SFF_READ_BLOCK_SIZE] = { 0 };
u8 addr = ICE_I2C_EEPROM_DEV_ADDR;
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
bool is_sfp = false;
unsigned int i, j;
@@ -4681,10 +4625,12 @@ static int ice_get_port_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
* ice_get_fec_stats - returns FEC correctable, uncorrectable stats per netdev
* @netdev: network interface device structure
* @fec_stats: buffer to hold FEC statistics for given port
+ * @hist: buffer to put FEC histogram statistics for given port
*
*/
static void ice_get_fec_stats(struct net_device *netdev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_port_topology port_topology;
@@ -4716,6 +4662,98 @@ static void ice_get_fec_stats(struct net_device *netdev,
pi->lport, err);
}
+static void ice_get_eth_mac_stats(struct net_device *netdev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_hw_port_stats *ps = &pf->stats;
+
+ mac_stats->FramesTransmittedOK = ps->eth.tx_unicast +
+ ps->eth.tx_multicast +
+ ps->eth.tx_broadcast;
+ mac_stats->FramesReceivedOK = ps->eth.rx_unicast +
+ ps->eth.rx_multicast +
+ ps->eth.rx_broadcast;
+ mac_stats->FrameCheckSequenceErrors = ps->crc_errors;
+ mac_stats->OctetsTransmittedOK = ps->eth.tx_bytes;
+ mac_stats->OctetsReceivedOK = ps->eth.rx_bytes;
+ mac_stats->MulticastFramesXmittedOK = ps->eth.tx_multicast;
+ mac_stats->BroadcastFramesXmittedOK = ps->eth.tx_broadcast;
+ mac_stats->MulticastFramesReceivedOK = ps->eth.rx_multicast;
+ mac_stats->BroadcastFramesReceivedOK = ps->eth.rx_broadcast;
+ mac_stats->InRangeLengthErrors = ps->rx_len_errors;
+ mac_stats->FrameTooLongErrors = ps->rx_oversize;
+}
+
+static void ice_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_hw_port_stats *ps = &pf->stats;
+
+ pause_stats->tx_pause_frames = ps->link_xon_tx + ps->link_xoff_tx;
+ pause_stats->rx_pause_frames = ps->link_xon_rx + ps->link_xoff_rx;
+}
+
+static const struct ethtool_rmon_hist_range ice_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1522 },
+ { 1523, 9522 },
+ {}
+};
+
+static void ice_get_rmon_stats(struct net_device *netdev,
+ struct ethtool_rmon_stats *rmon,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_hw_port_stats *ps = &pf->stats;
+
+ rmon->undersize_pkts = ps->rx_undersize;
+ rmon->oversize_pkts = ps->rx_oversize;
+ rmon->fragments = ps->rx_fragments;
+ rmon->jabbers = ps->rx_jabber;
+
+ rmon->hist[0] = ps->rx_size_64;
+ rmon->hist[1] = ps->rx_size_127;
+ rmon->hist[2] = ps->rx_size_255;
+ rmon->hist[3] = ps->rx_size_511;
+ rmon->hist[4] = ps->rx_size_1023;
+ rmon->hist[5] = ps->rx_size_1522;
+ rmon->hist[6] = ps->rx_size_big;
+
+ rmon->hist_tx[0] = ps->tx_size_64;
+ rmon->hist_tx[1] = ps->tx_size_127;
+ rmon->hist_tx[2] = ps->tx_size_255;
+ rmon->hist_tx[3] = ps->tx_size_511;
+ rmon->hist_tx[4] = ps->tx_size_1023;
+ rmon->hist_tx[5] = ps->tx_size_1522;
+ rmon->hist_tx[6] = ps->tx_size_big;
+
+ *ranges = ice_rmon_ranges;
+}
+
+/* ice_get_ts_stats - provide timestamping stats
+ * @netdev: the netdevice pointer from ethtool
+ * @ts_stats: the ethtool data structure to fill in
+ */
+static void ice_get_ts_stats(struct net_device *netdev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_ptp *ptp = &pf->ptp;
+
+ ts_stats->pkts = ptp->tx_hwtstamp_good;
+ ts_stats->err = ptp->tx_hwtstamp_skipped +
+ ptp->tx_hwtstamp_flushed +
+ ptp->tx_hwtstamp_discarded;
+ ts_stats->lost = ptp->tx_hwtstamp_timeouts;
+}
+
#define ICE_ETHTOOL_PFR (ETH_RESET_IRQ | ETH_RESET_DMA | \
ETH_RESET_FILTER | ETH_RESET_OFFLOAD)
@@ -4737,8 +4775,7 @@ static void ice_get_fec_stats(struct net_device *netdev,
*/
static int ice_ethtool_reset(struct net_device *dev, u32 *flags)
{
- struct ice_netdev_priv *np = netdev_priv(dev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(dev);
enum ice_reset_req reset;
switch (*flags) {
@@ -4792,15 +4829,18 @@ static int ice_repr_ethtool_reset(struct net_device *dev, u32 *flags)
}
static const struct ethtool_ops ice_ethtool_ops = {
- .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_RX_USECS_HIGH,
- .cap_rss_sym_xor_supported = true,
- .rxfh_per_ctx_key = true,
+ .supported_input_xfrm = RXH_XFRM_SYM_XOR,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
.get_fec_stats = ice_get_fec_stats,
+ .get_eth_mac_stats = ice_get_eth_mac_stats,
+ .get_pause_stats = ice_get_pause_stats,
+ .get_rmon_stats = ice_get_rmon_stats,
+ .get_ts_stats = ice_get_ts_stats,
.get_drvinfo = ice_get_drvinfo,
.get_regs_len = ice_get_regs_len,
.get_regs = ice_get_regs,
@@ -4810,6 +4850,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.set_msglevel = ice_set_msglevel,
.self_test = ice_self_test,
.get_link = ethtool_op_get_link,
+ .get_link_ext_stats = ice_get_link_ext_stats,
.get_eeprom_len = ice_get_eeprom_len,
.get_eeprom = ice_get_eeprom,
.get_coalesce = ice_get_coalesce,
@@ -4822,6 +4863,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.get_sset_count = ice_get_sset_count,
.get_rxnfc = ice_get_rxnfc,
.set_rxnfc = ice_set_rxnfc,
+ .get_rx_ring_count = ice_get_rx_ring_count,
.get_ringparam = ice_get_ringparam,
.set_ringparam = ice_set_ringparam,
.nway_reset = ice_nway_reset,
@@ -4832,6 +4874,8 @@ static const struct ethtool_ops ice_ethtool_ops = {
.get_rxfh_indir_size = ice_get_rxfh_indir_size,
.get_rxfh = ice_get_rxfh,
.set_rxfh = ice_set_rxfh,
+ .get_rxfh_fields = ice_get_rxfh_fields,
+ .set_rxfh_fields = ice_set_rxfh_fields,
.get_channels = ice_get_channels,
.set_channels = ice_set_channels,
.get_ts_info = ice_get_ts_info,
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.h b/drivers/net/ethernet/intel/ice/ice_ethtool.h
index 9acccae38625..23b2cfbc9684 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.h
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.h
@@ -10,17 +10,33 @@ struct ice_phy_type_to_ethtool {
};
struct ice_serdes_equalization_to_ethtool {
- int rx_equalization_pre2;
- int rx_equalization_pre1;
- int rx_equalization_post1;
- int rx_equalization_bflf;
- int rx_equalization_bfhf;
- int rx_equalization_drate;
- int tx_equalization_pre1;
- int tx_equalization_pre3;
- int tx_equalization_atten;
- int tx_equalization_post1;
- int tx_equalization_pre2;
+ int rx_equ_pre2;
+ int rx_equ_pre1;
+ int rx_equ_post1;
+ int rx_equ_bflf;
+ int rx_equ_bfhf;
+ int rx_equ_ctle_gainhf;
+ int rx_equ_ctle_gainlf;
+ int rx_equ_ctle_gaindc;
+ int rx_equ_ctle_bw;
+ int rx_equ_dfe_gain;
+ int rx_equ_dfe_gain_2;
+ int rx_equ_dfe_2;
+ int rx_equ_dfe_3;
+ int rx_equ_dfe_4;
+ int rx_equ_dfe_5;
+ int rx_equ_dfe_6;
+ int rx_equ_dfe_7;
+ int rx_equ_dfe_8;
+ int rx_equ_dfe_9;
+ int rx_equ_dfe_10;
+ int rx_equ_dfe_11;
+ int rx_equ_dfe_12;
+ int tx_equ_pre1;
+ int tx_equ_pre3;
+ int tx_equ_atten;
+ int tx_equ_post1;
+ int tx_equ_pre2;
};
struct ice_regdump_to_ethtool {
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index 5412eff8ef23..aceec184e89b 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -1605,22 +1605,19 @@ void ice_fdir_replay_fltrs(struct ice_pf *pf)
*/
int ice_fdir_create_dflt_rules(struct ice_pf *pf)
{
+ static const enum ice_fltr_ptype dflt_rules[] = {
+ ICE_FLTR_PTYPE_NONF_IPV4_TCP, ICE_FLTR_PTYPE_NONF_IPV4_UDP,
+ ICE_FLTR_PTYPE_NONF_IPV6_TCP, ICE_FLTR_PTYPE_NONF_IPV6_UDP,
+ };
int err;
/* Create perfect TCP and UDP rules in hardware. */
- err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_TCP);
- if (err)
- return err;
-
- err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_UDP);
- if (err)
- return err;
+ for (int i = 0; i < ARRAY_SIZE(dflt_rules); i++) {
+ err = ice_create_init_fdir_rule(pf, dflt_rules[i]);
- err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_TCP);
- if (err)
- return err;
-
- err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_UDP);
+ if (err)
+ break;
+ }
return err;
}
@@ -1830,11 +1827,12 @@ static int
ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
struct ice_fdir_fltr *input)
{
- u16 dest_vsi, q_index = 0;
+ s16 q_index = ICE_FDIR_NO_QUEUE_IDX;
u16 orig_q_index = 0;
struct ice_pf *pf;
struct ice_hw *hw;
int flow_type;
+ u16 dest_vsi;
u8 dest_ctl;
if (!vsi || !fsp || !input)
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
index 26b357c0ae15..b29fbdec9442 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -1121,7 +1121,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
* ice_fdir_has_frag - does flow type have 2 ptypes
* @flow: flow ptype
*
- * returns true is there is a fragment packet for this ptype
+ * Return: true if there is a fragment packet for this ptype
*/
bool ice_fdir_has_frag(enum ice_fltr_ptype flow)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h
index ab5b118daa2d..820023c0271f 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.h
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.h
@@ -53,6 +53,8 @@
*/
#define ICE_FDIR_IPV4_PKT_FLAG_MF 0x20
+#define ICE_FDIR_NO_QUEUE_IDX -1
+
enum ice_fltr_prgm_desc_dest {
ICE_FLTR_PRGM_DESC_DEST_DROP_PKT,
ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX,
@@ -186,7 +188,7 @@ struct ice_fdir_fltr {
u16 flex_fltr;
/* filter control */
- u16 q_index;
+ s16 q_index;
u16 orig_q_index;
u16 dest_vsi;
u8 dest_ctl;
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index ed95072ca6e3..c0dbec369366 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -574,9 +574,7 @@ ice_destroy_tunnel_end:
int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
unsigned int idx, struct udp_tunnel_info *ti)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
enum ice_tunnel_type tnl_type;
int status;
u16 index;
@@ -598,9 +596,7 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
unsigned int idx, struct udp_tunnel_info *ti)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
enum ice_tunnel_type tnl_type;
int status;
@@ -1479,7 +1475,7 @@ static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
hw->blk[blk].masks.count = per_pf;
- hw->blk[blk].masks.first = hw->pf_id * per_pf;
+ hw->blk[blk].masks.first = hw->logical_pf_id * per_pf;
memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks));
@@ -3043,16 +3039,16 @@ ice_disable_fd_swap(struct ice_hw *hw, u8 prof_id)
* the ID value used here.
*/
int
-ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
- const struct ice_ptype_attributes *attr, u16 attr_cnt,
- struct ice_fv_word *es, u16 *masks, bool symm, bool fd_swap)
+ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
+ unsigned long *ptypes, const struct ice_ptype_attributes *attr,
+ u16 attr_cnt, struct ice_fv_word *es, u16 *masks, bool symm,
+ bool fd_swap)
{
- u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
struct ice_prof_map *prof;
- u8 byte = 0;
- u8 prof_id;
int status;
+ u8 prof_id;
+ u16 ptype;
bitmap_zero(ptgs_used, ICE_XLT1_CNT);
@@ -3102,57 +3098,35 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
prof->context = 0;
/* build list of ptgs */
- while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
- u8 bit;
+ for_each_set_bit(ptype, ptypes, ICE_FLOW_PTYPE_MAX) {
+ u8 ptg;
- if (!ptypes[byte]) {
- bytes--;
- byte++;
+ /* The package should place all ptypes in a non-zero
+ * PTG, so the following call should never fail.
+ */
+ if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
continue;
- }
-
- /* Examine 8 bits per byte */
- for_each_set_bit(bit, (unsigned long *)&ptypes[byte],
- BITS_PER_BYTE) {
- u16 ptype;
- u8 ptg;
- ptype = byte * BITS_PER_BYTE + bit;
-
- /* The package should place all ptypes in a non-zero
- * PTG, so the following call should never fail.
- */
- if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
- continue;
+ /* If PTG is already added, skip and continue */
+ if (test_bit(ptg, ptgs_used))
+ continue;
- /* If PTG is already added, skip and continue */
- if (test_bit(ptg, ptgs_used))
- continue;
+ set_bit(ptg, ptgs_used);
+ /* Check to see there are any attributes for this ptype, and
+ * add them if found.
+ */
+ status = ice_add_prof_attrib(prof, ptg, ptype, attr, attr_cnt);
+ if (status == -ENOSPC)
+ break;
+ if (status) {
+ /* This is simple a ptype/PTG with no attribute */
+ prof->ptg[prof->ptg_cnt] = ptg;
+ prof->attr[prof->ptg_cnt].flags = 0;
+ prof->attr[prof->ptg_cnt].mask = 0;
- __set_bit(ptg, ptgs_used);
- /* Check to see there are any attributes for
- * this PTYPE, and add them if found.
- */
- status = ice_add_prof_attrib(prof, ptg, ptype,
- attr, attr_cnt);
- if (status == -ENOSPC)
+ if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
break;
- if (status) {
- /* This is simple a PTYPE/PTG with no
- * attribute
- */
- prof->ptg[prof->ptg_cnt] = ptg;
- prof->attr[prof->ptg_cnt].flags = 0;
- prof->attr[prof->ptg_cnt].mask = 0;
-
- if (++prof->ptg_cnt >=
- ICE_MAX_PTG_PER_PROFILE)
- break;
- }
}
-
- bytes--;
- byte++;
}
list_add(&prof->list, &hw->blk[blk].es.prof_map);
@@ -3604,6 +3578,19 @@ ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
}
/**
+ * ice_set_tcam_flags - set TCAM flag don't care mask
+ * @mask: mask for flags
+ * @dc_mask: pointer to the don't care mask
+ */
+static void ice_set_tcam_flags(u16 mask, u8 dc_mask[ICE_TCAM_KEY_VAL_SZ])
+{
+ u16 inverted_mask = ~mask;
+
+ /* flags are lowest u16 */
+ put_unaligned_le16(inverted_mask, dc_mask);
+}
+
+/**
* ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
* @hw: pointer to the HW struct
* @idx: the index of the TCAM entry to remove
@@ -3673,6 +3660,9 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
if (!p)
return -ENOMEM;
+ /* set don't care masks for TCAM flags */
+ ice_set_tcam_flags(tcam->attr.mask, dc_msk);
+
status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
tcam->ptg, vsig, 0, tcam->attr.flags,
vl_msk, dc_msk, nm_msk);
@@ -3699,6 +3689,34 @@ err_ice_prof_tcam_ena_dis:
}
/**
+ * ice_ptg_attr_in_use - determine if PTG and attribute pair is in use
+ * @ptg_attr: pointer to the PTG and attribute pair to check
+ * @ptgs_used: bitmap that denotes which PTGs are in use
+ * @attr_used: array of PTG and attributes pairs already used
+ * @attr_cnt: count of entries in the attr_used array
+ *
+ * Return: true if the PTG and attribute pair is in use, false otherwise.
+ */
+static bool
+ice_ptg_attr_in_use(struct ice_tcam_inf *ptg_attr, unsigned long *ptgs_used,
+ struct ice_tcam_inf *attr_used[], u16 attr_cnt)
+{
+ u16 i;
+
+ if (!test_bit(ptg_attr->ptg, ptgs_used))
+ return false;
+
+ /* the PTG is used, so now look for correct attributes */
+ for (i = 0; i < attr_cnt; i++)
+ if (attr_used[i]->ptg == ptg_attr->ptg &&
+ attr_used[i]->attr.flags == ptg_attr->attr.flags &&
+ attr_used[i]->attr.mask == ptg_attr->attr.mask)
+ return true;
+
+ return false;
+}
+
+/**
* ice_adj_prof_priorities - adjust profile based on priorities
* @hw: pointer to the HW struct
* @blk: hardware block
@@ -3710,10 +3728,16 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
struct list_head *chg)
{
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
+ struct ice_tcam_inf **attr_used;
struct ice_vsig_prof *t;
- int status;
+ u16 attr_used_cnt = 0;
+ int status = 0;
u16 idx;
+ attr_used = kcalloc(ICE_MAX_PTG_ATTRS, sizeof(*attr_used), GFP_KERNEL);
+ if (!attr_used)
+ return -ENOMEM;
+
bitmap_zero(ptgs_used, ICE_XLT1_CNT);
idx = vsig & ICE_VSIG_IDX_M;
@@ -3731,11 +3755,15 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
u16 i;
for (i = 0; i < t->tcam_count; i++) {
+ bool used;
+
/* Scan the priorities from newest to oldest.
* Make sure that the newest profiles take priority.
*/
- if (test_bit(t->tcam[i].ptg, ptgs_used) &&
- t->tcam[i].in_use) {
+ used = ice_ptg_attr_in_use(&t->tcam[i], ptgs_used,
+ attr_used, attr_used_cnt);
+
+ if (used && t->tcam[i].in_use) {
/* need to mark this PTG as never match, as it
* was already in use and therefore duplicate
* (and lower priority)
@@ -3745,9 +3773,8 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
&t->tcam[i],
chg);
if (status)
- return status;
- } else if (!test_bit(t->tcam[i].ptg, ptgs_used) &&
- !t->tcam[i].in_use) {
+ goto free_attr_used;
+ } else if (!used && !t->tcam[i].in_use) {
/* need to enable this PTG, as it in not in use
* and not enabled (highest priority)
*/
@@ -3756,15 +3783,21 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
&t->tcam[i],
chg);
if (status)
- return status;
+ goto free_attr_used;
}
/* keep track of used ptgs */
- __set_bit(t->tcam[i].ptg, ptgs_used);
+ set_bit(t->tcam[i].ptg, ptgs_used);
+ if (attr_used_cnt < ICE_MAX_PTG_ATTRS)
+ attr_used[attr_used_cnt++] = &t->tcam[i];
+ else
+ ice_debug(hw, ICE_DBG_INIT, "Warn: ICE_MAX_PTG_ATTRS exceeded\n");
}
}
- return 0;
+free_attr_used:
+ kfree(attr_used);
+ return status;
}
/**
@@ -3847,11 +3880,15 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
p->vsig = vsig;
p->tcam_idx = t->tcam[i].tcam_idx;
+ /* set don't care masks for TCAM flags */
+ ice_set_tcam_flags(t->tcam[i].attr.mask, dc_msk);
+
/* write the TCAM entry */
status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
t->tcam[i].prof_id,
- t->tcam[i].ptg, vsig, 0, 0,
- vl_msk, dc_msk, nm_msk);
+ t->tcam[i].ptg, vsig, 0,
+ t->tcam[i].attr.flags, vl_msk,
+ dc_msk, nm_msk);
if (status) {
devm_kfree(ice_hw_to_dev(hw), p);
goto err_ice_add_prof_id_vsig;
@@ -4165,9 +4202,6 @@ ice_flow_assoc_fdir_prof(struct ice_hw *hw, enum ice_block blk,
u16 vsi_num;
int status;
- if (blk != ICE_BLK_FD)
- return -EINVAL;
-
vsi_num = ice_get_hw_vsi_num(hw, dest_vsi);
status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl);
if (status) {
@@ -4176,6 +4210,9 @@ ice_flow_assoc_fdir_prof(struct ice_hw *hw, enum ice_block blk,
return status;
}
+ if (blk != ICE_BLK_FD)
+ return 0;
+
vsi_num = ice_get_hw_vsi_num(hw, fdir_vsi);
status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl);
if (status) {
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 28b0897adf32..ee5d9f9c9d53 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -39,9 +39,10 @@ bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype);
/* XLT2/VSI group functions */
int
-ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
- const struct ice_ptype_attributes *attr, u16 attr_cnt,
- struct ice_fv_word *es, u16 *masks, bool symm, bool fd_swap);
+ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
+ unsigned long *ptypes, const struct ice_ptype_attributes *attr,
+ u16 attr_cnt, struct ice_fv_word *es, u16 *masks, bool symm,
+ bool fd_swap);
struct ice_prof_map *
ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id);
int
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index 817beca591e0..80c9e7c749c2 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -187,6 +187,7 @@ struct ice_prof_map {
};
#define ICE_INVALID_TCAM 0xFFFF
+#define ICE_MAX_PTG_ATTRS 1024
struct ice_tcam_inf {
u16 tcam_idx;
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index d97b751052f2..c9b6d0a84bd1 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -5,6 +5,38 @@
#include "ice_flow.h"
#include <net/gre.h>
+/* Size of known protocol header fields */
+#define ICE_FLOW_FLD_SZ_ETH_TYPE 2
+#define ICE_FLOW_FLD_SZ_VLAN 2
+#define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
+#define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
+#define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
+#define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
+#define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
+#define ICE_FLOW_FLD_SZ_IPV4_ID 2
+#define ICE_FLOW_FLD_SZ_IPV6_ID 4
+#define ICE_FLOW_FLD_SZ_IP_CHKSUM 2
+#define ICE_FLOW_FLD_SZ_TCP_CHKSUM 2
+#define ICE_FLOW_FLD_SZ_UDP_CHKSUM 2
+#define ICE_FLOW_FLD_SZ_SCTP_CHKSUM 4
+#define ICE_FLOW_FLD_SZ_IP_DSCP 1
+#define ICE_FLOW_FLD_SZ_IP_TTL 1
+#define ICE_FLOW_FLD_SZ_IP_PROT 1
+#define ICE_FLOW_FLD_SZ_PORT 2
+#define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
+#define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
+#define ICE_FLOW_FLD_SZ_ICMP_CODE 1
+#define ICE_FLOW_FLD_SZ_ARP_OPER 2
+#define ICE_FLOW_FLD_SZ_GRE_KEYID 4
+#define ICE_FLOW_FLD_SZ_GTP_TEID 4
+#define ICE_FLOW_FLD_SZ_GTP_QFI 2
+#define ICE_FLOW_FLD_SZ_PFCP_SEID 8
+#define ICE_FLOW_FLD_SZ_ESP_SPI 4
+#define ICE_FLOW_FLD_SZ_AH_SPI 4
+#define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
+#define ICE_FLOW_FLD_SZ_L2TPV2_SESS_ID 2
+#define ICE_FLOW_FLD_SZ_L2TPV2_LEN_SESS_ID 2
+
/* Describe properties of a protocol header field */
struct ice_flow_field_info {
enum ice_flow_seg_hdr hdr;
@@ -20,6 +52,7 @@ struct ice_flow_field_info {
.mask = 0, \
}
+/* QFI: 6-bit field in GTP-U PDU Session Container (3GPP TS 38.415) */
#define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
.hdr = _hdr, \
.off = (_offset_bytes) * BITS_PER_BYTE, \
@@ -61,7 +94,33 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
/* ICE_FLOW_FIELD_IDX_IPV6_SA */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, sizeof(struct in6_addr)),
/* ICE_FLOW_FIELD_IDX_IPV6_DA */
- ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, sizeof(struct in6_addr)),
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV4_CHKSUM */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 10, ICE_FLOW_FLD_SZ_IP_CHKSUM),
+ /* ICE_FLOW_FIELD_IDX_IPV4_FRAG */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
+ ICE_FLOW_FLD_SZ_IPV4_ID),
+ /* ICE_FLOW_FIELD_IDX_IPV6_FRAG */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
+ ICE_FLOW_FLD_SZ_IPV6_ID),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
+ ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
+ ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
+ ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
+ ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
+ ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
+ ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
/* Transport */
/* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, sizeof(__be16)),
@@ -76,7 +135,14 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
/* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)),
/* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
- ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, 1),
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
+ /* ICE_FLOW_FIELD_IDX_TCP_CHKSUM */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 16, ICE_FLOW_FLD_SZ_TCP_CHKSUM),
+ /* ICE_FLOW_FIELD_IDX_UDP_CHKSUM */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 6, ICE_FLOW_FLD_SZ_UDP_CHKSUM),
+ /* ICE_FLOW_FIELD_IDX_SCTP_CHKSUM */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 8,
+ ICE_FLOW_FLD_SZ_SCTP_CHKSUM),
/* ARP */
/* ICE_FLOW_FIELD_IDX_ARP_SIP */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, sizeof(struct in_addr)),
@@ -108,9 +174,17 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22, sizeof(__be16),
0x3f00),
/* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
- ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12, sizeof(__be32)),
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
+ ICE_FLOW_FLD_SZ_GTP_TEID),
+ /* ICE_FLOW_FIELD_IDX_GTPU_UP_QFI */
+ ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_UP, 22,
+ ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
/* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
- ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12, sizeof(__be32)),
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
+ ICE_FLOW_FLD_SZ_GTP_TEID),
+ /* ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI */
+ ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_DWN, 22,
+ ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
/* PPPoE */
/* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2, sizeof(__be16)),
@@ -128,7 +202,16 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4, sizeof(__be32)),
/* NAT_T_ESP */
/* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
- ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8, sizeof(__be32)),
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
+ ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
+ /* L2TPV2 */
+ /* ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV2, 12,
+ ICE_FLOW_FLD_SZ_L2TPV2_SESS_ID),
+ /* L2TPV2_LEN */
+ /* ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV2, 14,
+ ICE_FLOW_FLD_SZ_L2TPV2_LEN_SESS_ID),
};
/* Bitmaps indicating relevant packet types for a particular protocol header
@@ -137,9 +220,9 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
*/
static const u32 ice_ptypes_mac_ofos[] = {
0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
- 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
- 0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
+ 0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00000707,
+ 0xFFFFF000, 0x000003FF, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -162,10 +245,10 @@ static const u32 ice_ptypes_macvlan_il[] = {
* include IPv4 other PTYPEs
*/
static const u32 ice_ptypes_ipv4_ofos[] = {
- 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
+ 0x1D800000, 0xBFBF7800, 0x000001DF, 0x00000000,
0x00000000, 0x00000155, 0x00000000, 0x00000000,
- 0x00000000, 0x000FC000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x000FC000, 0x000002A0, 0x00000000,
+ 0x00015000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -176,10 +259,10 @@ static const u32 ice_ptypes_ipv4_ofos[] = {
* IPv4 other PTYPEs
*/
static const u32 ice_ptypes_ipv4_ofos_all[] = {
- 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
+ 0x1D800000, 0x27BF7800, 0x00000000, 0x00000000,
0x00000000, 0x00000155, 0x00000000, 0x00000000,
- 0x00000000, 0x000FC000, 0x83E0F800, 0x00000101,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
+ 0x3FFD5000, 0x00000000, 0x02FBEFBC, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -191,7 +274,7 @@ static const u32 ice_ptypes_ipv4_il[] = {
0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
0x0000000E, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x001FF800, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0xC0FC0000, 0x0000000F, 0xBC0BC0BC, 0x00000BC0,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -202,10 +285,10 @@ static const u32 ice_ptypes_ipv4_il[] = {
* include IPv6 other PTYPEs
*/
static const u32 ice_ptypes_ipv6_ofos[] = {
- 0x00000000, 0x00000000, 0x77000000, 0x10002000,
+ 0x00000000, 0x00000000, 0x76000000, 0x10002000,
0x00000000, 0x000002AA, 0x00000000, 0x00000000,
- 0x00000000, 0x03F00000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x03F00000, 0x00000540, 0x00000000,
+ 0x0002A000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -216,10 +299,10 @@ static const u32 ice_ptypes_ipv6_ofos[] = {
* IPv6 other PTYPEs
*/
static const u32 ice_ptypes_ipv6_ofos_all[] = {
- 0x00000000, 0x00000000, 0x77000000, 0x10002000,
- 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
- 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x76000000, 0xFEFDE000,
+ 0x0000077E, 0x000002AA, 0x00000000, 0x00000000,
+ 0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
+ 0xC002A000, 0x000003FF, 0xBC000000, 0x0002FBEF,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -231,7 +314,7 @@ static const u32 ice_ptypes_ipv6_il[] = {
0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
0x00000770, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x3F000000, 0x000003F0, 0x02F02F00, 0x0002F02F,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -304,8 +387,8 @@ static const u32 ice_ptypes_ipv6_il_no_l4[] = {
static const u32 ice_ptypes_udp_il[] = {
0x81000000, 0x20204040, 0x04000010, 0x80810102,
0x00000040, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00410000, 0x90842000, 0x00000007,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00410000, 0x908427E0, 0x00000007,
+ 0x0413F000, 0x00000041, 0x10410410, 0x00004104,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -317,7 +400,7 @@ static const u32 ice_ptypes_tcp_il[] = {
0x04000000, 0x80810102, 0x10000040, 0x02040408,
0x00000102, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00820000, 0x21084000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x08200000, 0x00000082, 0x20820820, 0x00008208,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -329,7 +412,7 @@ static const u32 ice_ptypes_sctp_il[] = {
0x08000000, 0x01020204, 0x20000081, 0x04080810,
0x00000204, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x01040000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x10400000, 0x00000104, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -353,7 +436,7 @@ static const u32 ice_ptypes_icmp_il[] = {
0x00000000, 0x02040408, 0x40000102, 0x08101020,
0x00000408, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x42108000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x20800000, 0x00000208, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -365,7 +448,7 @@ static const u32 ice_ptypes_gre_of[] = {
0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
0x0000017E, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xBEFBEFBC, 0x0002FBEF,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -374,7 +457,7 @@ static const u32 ice_ptypes_gre_of[] = {
/* Packet types for packets with an Innermost/Last MAC header */
static const u32 ice_ptypes_mac_il[] = {
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x20000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -388,7 +471,7 @@ static const u32 ice_ptypes_mac_il[] = {
static const u32 ice_ptypes_gtpc[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000180, 0x00000000,
+ 0x00000000, 0x00000000, 0x000001E0, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -1421,7 +1504,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
}
/* Add a HW profile for this flow profile */
- status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
+ status = ice_add_prof(hw, blk, prof_id, params->ptypes,
params->attr, params->attr_cnt, params->es,
params->mask, symm, true);
if (status) {
@@ -1617,7 +1700,7 @@ ice_flow_set_parser_prof(struct ice_hw *hw, u16 dest_vsi, u16 fdir_vsi,
break;
}
- status = ice_add_prof(hw, blk, id, (u8 *)prof->ptypes,
+ status = ice_add_prof(hw, blk, id, prof->ptypes,
params->attr, params->attr_cnt,
params->es, params->mask, false, false);
if (status)
@@ -2325,6 +2408,130 @@ static void ice_rss_set_symm(struct ice_hw *hw, struct ice_flow_prof *prof)
}
/**
+ * ice_rss_cfg_raw_symm - Configure symmetric RSS for a raw parser profile
+ * @hw: device HW
+ * @prof: parser profile describing extracted FV (field vector) entries
+ * @prof_id: RSS profile identifier used to program symmetry registers
+ *
+ * The routine scans the parser profile's FV entries and looks for
+ * direction-sensitive pairs (L3 src/dst, L4 src/dst). When a pair is found,
+ * it programs XOR-based symmetry so that flows hash identically regardless
+ * of packet direction. This preserves CPU affinity for the same 5-tuple.
+ *
+ * Notes:
+ * - The size of each logical field (IPv4/IPv6 address, L4 port) is expressed
+ * in units of ICE_FLOW_FV_EXTRACT_SZ so we can step across fv[] correctly.
+ * - We guard against out-of-bounds access before looking at fv[i + len].
+ */
+static void ice_rss_cfg_raw_symm(struct ice_hw *hw,
+ const struct ice_parser_profile *prof,
+ u64 prof_id)
+{
+ for (size_t i = 0; i < prof->fv_num; i++) {
+ u8 proto_id = prof->fv[i].proto_id;
+ u16 src_off = 0, dst_off = 0;
+ size_t src_idx, dst_idx;
+ bool is_matched = false;
+ unsigned int len = 0;
+
+ switch (proto_id) {
+ /* IPv4 address pairs (outer/inner variants) */
+ case ICE_PROT_IPV4_OF_OR_S:
+ case ICE_PROT_IPV4_IL:
+ case ICE_PROT_IPV4_IL_IL:
+ len = ICE_FLOW_FLD_SZ_IPV4_ADDR /
+ ICE_FLOW_FV_EXTRACT_SZ;
+ src_off = ICE_FLOW_FIELD_IPV4_SRC_OFFSET;
+ dst_off = ICE_FLOW_FIELD_IPV4_DST_OFFSET;
+ break;
+
+ /* IPv6 address pairs (outer/inner variants) */
+ case ICE_PROT_IPV6_OF_OR_S:
+ case ICE_PROT_IPV6_IL:
+ case ICE_PROT_IPV6_IL_IL:
+ len = ICE_FLOW_FLD_SZ_IPV6_ADDR /
+ ICE_FLOW_FV_EXTRACT_SZ;
+ src_off = ICE_FLOW_FIELD_IPV6_SRC_OFFSET;
+ dst_off = ICE_FLOW_FIELD_IPV6_DST_OFFSET;
+ break;
+
+ /* L4 port pairs (TCP/UDP/SCTP) */
+ case ICE_PROT_TCP_IL:
+ case ICE_PROT_UDP_IL_OR_S:
+ case ICE_PROT_SCTP_IL:
+ len = ICE_FLOW_FLD_SZ_PORT / ICE_FLOW_FV_EXTRACT_SZ;
+ src_off = ICE_FLOW_FIELD_SRC_PORT_OFFSET;
+ dst_off = ICE_FLOW_FIELD_DST_PORT_OFFSET;
+ break;
+
+ default:
+ continue;
+ }
+
+ /* Bounds check before accessing fv[i + len]. */
+ if (i + len >= prof->fv_num)
+ continue;
+
+ /* Verify src/dst pairing for this protocol id. */
+ is_matched = prof->fv[i].offset == src_off &&
+ prof->fv[i + len].proto_id == proto_id &&
+ prof->fv[i + len].offset == dst_off;
+ if (!is_matched)
+ continue;
+
+ /* Program XOR symmetry for this field pair. */
+ src_idx = i;
+ dst_idx = i + len;
+
+ ice_rss_config_xor(hw, prof_id, src_idx, dst_idx, len);
+
+ /* Skip over the pair we just handled; the loop's ++i advances
+ * one more element, hence the --i after the jump.
+ */
+ i += (2 * len);
+ /* not strictly needed; keeps static analyzers happy */
+ if (i == 0)
+ break;
+ --i;
+ }
+}
+
+/* Max registers index per packet profile */
+#define ICE_SYMM_REG_INDEX_MAX 6
+
+/**
+ * ice_rss_update_raw_symm - update symmetric hash configuration
+ * for raw pattern
+ * @hw: pointer to the hardware structure
+ * @cfg: configure parameters for raw pattern
+ * @id: profile tracking ID
+ *
+ * Update symmetric hash configuration for raw pattern if required.
+ * Otherwise only clear to default.
+ */
+void
+ice_rss_update_raw_symm(struct ice_hw *hw,
+ struct ice_rss_raw_cfg *cfg, u64 id)
+{
+ struct ice_prof_map *map;
+ u8 prof_id, m;
+
+ mutex_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
+ map = ice_search_prof_id(hw, ICE_BLK_RSS, id);
+ if (map)
+ prof_id = map->prof_id;
+ mutex_unlock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
+ if (!map)
+ return;
+ /* clear to default */
+ for (m = 0; m < ICE_SYMM_REG_INDEX_MAX; m++)
+ wr32(hw, GLQF_HSYMM(prof_id, m), 0);
+
+ if (cfg->symm)
+ ice_rss_cfg_raw_symm(hw, &cfg->prof, prof_id);
+}
+
+/**
* ice_add_rss_cfg_sync - add an RSS configuration
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
@@ -2573,38 +2780,38 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
* convert its values to their appropriate flow L3, L4 values.
*/
#define ICE_FLOW_AVF_RSS_IPV4_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4))
#define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP))
#define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP))
#define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \
(ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \
- ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP))
+ ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP))
#define ICE_FLOW_AVF_RSS_IPV6_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6))
#define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP))
#define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP))
#define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \
(ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \
- ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP))
+ ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP))
/**
* ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
* @hw: pointer to the hardware structure
* @vsi: VF's VSI
- * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
+ * @avf_hash: hash bit fields (LIBIE_FILTER_PCTYPE_*) to configure
*
* This function will take the hash bitmap provided by the AVF driver via a
* message, convert it to ICE-compatible values, and configure RSS flow
@@ -2621,8 +2828,7 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, u64 avf_hash)
return -EINVAL;
vsi_handle = vsi->idx;
- if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
- !ice_is_vsi_valid(hw, vsi_handle))
+ if (!avf_hash || !ice_is_vsi_valid(hw, vsi_handle))
return -EINVAL;
/* Make sure no unsupported bits are specified */
@@ -2658,11 +2864,11 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, u64 avf_hash)
ICE_FLOW_HASH_UDP_PORT;
hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS;
} else if (hash_flds &
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) {
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP)) {
rss_hash = ICE_FLOW_HASH_IPV4 |
ICE_FLOW_HASH_SCTP_PORT;
hash_flds &=
- ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP);
+ ~BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP);
}
} else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) {
if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) {
@@ -2679,11 +2885,11 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, u64 avf_hash)
ICE_FLOW_HASH_UDP_PORT;
hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS;
} else if (hash_flds &
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) {
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP)) {
rss_hash = ICE_FLOW_HASH_IPV6 |
ICE_FLOW_HASH_SCTP_PORT;
hash_flds &=
- ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP);
+ ~BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index 6cb7bb879c98..6c6cdc8addb1 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -4,6 +4,8 @@
#ifndef _ICE_FLOW_H_
#define _ICE_FLOW_H_
+#include <linux/net/intel/libie/pctype.h>
+
#include "ice_flex_type.h"
#include "ice_parser.h"
@@ -20,6 +22,15 @@
#define ICE_FLOW_HASH_IPV6 \
(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA))
+#define ICE_FLOW_HASH_IPV6_PRE32 \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA))
+#define ICE_FLOW_HASH_IPV6_PRE48 \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA))
+#define ICE_FLOW_HASH_IPV6_PRE64 \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA))
#define ICE_FLOW_HASH_TCP_PORT \
(BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT))
@@ -38,6 +49,33 @@
#define ICE_HASH_SCTP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT)
#define ICE_HASH_SCTP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT)
+#define ICE_HASH_TCP_IPV6_PRE32 \
+ (ICE_FLOW_HASH_IPV6_PRE32 | ICE_FLOW_HASH_TCP_PORT)
+#define ICE_HASH_UDP_IPV6_PRE32 \
+ (ICE_FLOW_HASH_IPV6_PRE32 | ICE_FLOW_HASH_UDP_PORT)
+#define ICE_HASH_SCTP_IPV6_PRE32 \
+ (ICE_FLOW_HASH_IPV6_PRE32 | ICE_FLOW_HASH_SCTP_PORT)
+#define ICE_HASH_TCP_IPV6_PRE48 \
+ (ICE_FLOW_HASH_IPV6_PRE48 | ICE_FLOW_HASH_TCP_PORT)
+#define ICE_HASH_UDP_IPV6_PRE48 \
+ (ICE_FLOW_HASH_IPV6_PRE48 | ICE_FLOW_HASH_UDP_PORT)
+#define ICE_HASH_SCTP_IPV6_PRE48 \
+ (ICE_FLOW_HASH_IPV6_PRE48 | ICE_FLOW_HASH_SCTP_PORT)
+#define ICE_HASH_TCP_IPV6_PRE64 \
+ (ICE_FLOW_HASH_IPV6_PRE64 | ICE_FLOW_HASH_TCP_PORT)
+#define ICE_HASH_UDP_IPV6_PRE64 \
+ (ICE_FLOW_HASH_IPV6_PRE64 | ICE_FLOW_HASH_UDP_PORT)
+#define ICE_HASH_SCTP_IPV6_PRE64 \
+ (ICE_FLOW_HASH_IPV6_PRE64 | ICE_FLOW_HASH_SCTP_PORT)
+
+#define ICE_FLOW_HASH_GTP_TEID \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID))
+
+#define ICE_FLOW_HASH_GTP_IPV4_TEID \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_TEID)
+#define ICE_FLOW_HASH_GTP_IPV6_TEID \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_TEID)
+
#define ICE_FLOW_HASH_GTP_C_TEID \
(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID))
@@ -126,6 +164,23 @@
#define ICE_FLOW_HASH_NAT_T_ESP_IPV6_SPI \
(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
+#define ICE_FLOW_HASH_L2TPV2_SESS_ID \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID))
+#define ICE_FLOW_HASH_L2TPV2_SESS_ID_ETH \
+ (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_L2TPV2_SESS_ID)
+
+#define ICE_FLOW_HASH_L2TPV2_LEN_SESS_ID \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID))
+#define ICE_FLOW_HASH_L2TPV2_LEN_SESS_ID_ETH \
+ (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_L2TPV2_LEN_SESS_ID)
+
+#define ICE_FLOW_FIELD_IPV4_SRC_OFFSET 12
+#define ICE_FLOW_FIELD_IPV4_DST_OFFSET 16
+#define ICE_FLOW_FIELD_IPV6_SRC_OFFSET 8
+#define ICE_FLOW_FIELD_IPV6_DST_OFFSET 24
+#define ICE_FLOW_FIELD_SRC_PORT_OFFSET 0
+#define ICE_FLOW_FIELD_DST_PORT_OFFSET 2
+
/* Protocol header fields within a packet segment. A segment consists of one or
* more protocol headers that make up a logical group of protocol headers. Each
* logical group of protocol headers encapsulates or is encapsulated using/by
@@ -158,10 +213,13 @@ enum ice_flow_seg_hdr {
ICE_FLOW_SEG_HDR_AH = 0x00200000,
ICE_FLOW_SEG_HDR_NAT_T_ESP = 0x00400000,
ICE_FLOW_SEG_HDR_ETH_NON_IP = 0x00800000,
+ ICE_FLOW_SEG_HDR_GTPU_NON_IP = 0x01000000,
+ ICE_FLOW_SEG_HDR_L2TPV2 = 0x10000000,
/* The following is an additive bit for ICE_FLOW_SEG_HDR_IPV4 and
- * ICE_FLOW_SEG_HDR_IPV6 which include the IPV4 other PTYPEs
+ * ICE_FLOW_SEG_HDR_IPV6.
*/
- ICE_FLOW_SEG_HDR_IPV_OTHER = 0x20000000,
+ ICE_FLOW_SEG_HDR_IPV_FRAG = 0x40000000,
+ ICE_FLOW_SEG_HDR_IPV_OTHER = 0x80000000,
};
/* These segments all have the same PTYPES, but are otherwise distinguished by
@@ -198,6 +256,15 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_IPV4_DA,
ICE_FLOW_FIELD_IDX_IPV6_SA,
ICE_FLOW_FIELD_IDX_IPV6_DA,
+ ICE_FLOW_FIELD_IDX_IPV4_CHKSUM,
+ ICE_FLOW_FIELD_IDX_IPV4_ID,
+ ICE_FLOW_FIELD_IDX_IPV6_ID,
+ ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA,
+ ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA,
+ ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA,
+ ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA,
+ ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA,
+ ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA,
/* L4 */
ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
ICE_FLOW_FIELD_IDX_TCP_DST_PORT,
@@ -206,6 +273,9 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
ICE_FLOW_FIELD_IDX_TCP_FLAGS,
+ ICE_FLOW_FIELD_IDX_TCP_CHKSUM,
+ ICE_FLOW_FIELD_IDX_UDP_CHKSUM,
+ ICE_FLOW_FIELD_IDX_SCTP_CHKSUM,
/* ARP */
ICE_FLOW_FIELD_IDX_ARP_SIP,
ICE_FLOW_FIELD_IDX_ARP_DIP,
@@ -226,13 +296,13 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_GTPU_EH_QFI,
/* GTPU_UP */
ICE_FLOW_FIELD_IDX_GTPU_UP_TEID,
+ ICE_FLOW_FIELD_IDX_GTPU_UP_QFI,
/* GTPU_DWN */
ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID,
- /* PPPoE */
+ ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI,
ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID,
/* PFCP */
ICE_FLOW_FIELD_IDX_PFCP_SEID,
- /* L2TPv3 */
ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID,
/* ESP */
ICE_FLOW_FIELD_IDX_ESP_SPI,
@@ -240,10 +310,16 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_AH_SPI,
/* NAT_T ESP */
ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
+ /* L2TPV2 SESSION ID*/
+ ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID,
+ /* L2TPV2_LEN SESSION ID */
+ ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID,
/* The total number of enums must not exceed 64 */
ICE_FLOW_FIELD_IDX_MAX
};
+static_assert(ICE_FLOW_FIELD_IDX_MAX <= 64, "The total number of enums must not exceed 64");
+
#define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)
#define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)
#define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)
@@ -264,57 +340,27 @@ enum ice_flow_field {
#define ICE_FLOW_HASH_FLD_GTPU_DWN_TEID \
BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID)
-/* Flow headers and fields for AVF support */
-enum ice_flow_avf_hdr_field {
- /* Values 0 - 28 are reserved for future use */
- ICE_AVF_FLOW_FIELD_INVALID = 0,
- ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP = 29,
- ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP,
- ICE_AVF_FLOW_FIELD_IPV4_UDP,
- ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK,
- ICE_AVF_FLOW_FIELD_IPV4_TCP,
- ICE_AVF_FLOW_FIELD_IPV4_SCTP,
- ICE_AVF_FLOW_FIELD_IPV4_OTHER,
- ICE_AVF_FLOW_FIELD_FRAG_IPV4,
- /* Values 37-38 are reserved */
- ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP = 39,
- ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP,
- ICE_AVF_FLOW_FIELD_IPV6_UDP,
- ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK,
- ICE_AVF_FLOW_FIELD_IPV6_TCP,
- ICE_AVF_FLOW_FIELD_IPV6_SCTP,
- ICE_AVF_FLOW_FIELD_IPV6_OTHER,
- ICE_AVF_FLOW_FIELD_FRAG_IPV6,
- ICE_AVF_FLOW_FIELD_RSVD47,
- ICE_AVF_FLOW_FIELD_FCOE_OX,
- ICE_AVF_FLOW_FIELD_FCOE_RX,
- ICE_AVF_FLOW_FIELD_FCOE_OTHER,
- /* Values 51-62 are reserved */
- ICE_AVF_FLOW_FIELD_L2_PAYLOAD = 63,
- ICE_AVF_FLOW_FIELD_MAX
-};
-
/* Supported RSS offloads This macro is defined to support
- * VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware
+ * VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS ops. PF driver sends the RSS hardware
* capabilities to the caller of this ops.
*/
-#define ICE_DEFAULT_RSS_HENA ( \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
+#define ICE_DEFAULT_RSS_HASHCFG ( \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
enum ice_rss_cfg_hdr_type {
ICE_RSS_OUTER_HEADERS, /* take outer headers as inputset. */
@@ -324,6 +370,10 @@ enum ice_rss_cfg_hdr_type {
/* take inner headers as inputset for packet with outer ipv6. */
ICE_RSS_INNER_HEADERS_W_OUTER_IPV6,
/* take outer headers first then inner headers as inputset */
+ /* take inner as inputset for GTPoGRE with outer IPv4 + GRE. */
+ ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE,
+ /* take inner as inputset for GTPoGRE with outer IPv6 + GRE. */
+ ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE,
ICE_RSS_ANY_HEADERS
};
@@ -434,6 +484,12 @@ struct ice_flow_prof {
bool symm; /* Symmetric Hash for RSS */
};
+struct ice_rss_raw_cfg {
+ struct ice_parser_profile prof;
+ bool raw_ena;
+ bool symm;
+};
+
struct ice_rss_cfg {
struct list_head l_entry;
/* bitmap of VSIs added to the RSS entry */
@@ -472,4 +528,6 @@ int ice_add_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi,
int ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
const struct ice_rss_hash_cfg *cfg);
u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs, bool *symm);
+void ice_rss_update_raw_symm(struct ice_hw *hw,
+ struct ice_rss_raw_cfg *cfg, u64 id);
#endif /* _ICE_FLOW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index 2702a0da5c3e..973a13d3d92a 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -6,6 +6,7 @@
#include <linux/crc32.h>
#include <linux/pldmfw.h>
#include "ice.h"
+#include "ice_lib.h"
#include "ice_fw_update.h"
struct ice_fwu_priv {
@@ -67,7 +68,7 @@ ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length)
if (status) {
dev_err(dev, "Failed to send record package data to firmware, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to record package data to firmware");
return -EIO;
}
@@ -125,6 +126,10 @@ ice_check_component_response(struct ice_pf *pf, u16 id, u8 response, u8 code,
case ICE_AQ_NVM_PASS_COMP_CAN_NOT_BE_UPDATED:
dev_info(dev, "firmware has rejected updating %s\n", component);
break;
+ case ICE_AQ_NVM_PASS_COMP_PARTIAL_CHECK:
+ if (ice_is_recovery_mode(&pf->hw))
+ return 0;
+ break;
}
switch (code) {
@@ -252,7 +257,7 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
if (status) {
dev_err(dev, "Failed to transfer component table to firmware, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to transfer component table to firmware");
return -EIO;
}
@@ -294,7 +299,8 @@ int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
struct device *dev = ice_pf_to_dev(pf);
struct ice_aq_task task = {};
struct ice_hw *hw = &pf->hw;
- struct ice_aq_desc *desc;
+ struct libie_aq_desc *desc;
+ struct ice_aqc_nvm *cmd;
u32 completion_offset;
int err;
@@ -308,7 +314,7 @@ int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
if (err) {
dev_err(dev, "Failed to flash module 0x%02x with block of size %u at offset %u, err %d aq_err %s\n",
module, block_size, offset, err,
- ice_aq_str(hw->adminq.sq_last_status));
+ libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to program flash module");
return -EIO;
}
@@ -328,11 +334,12 @@ int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
}
desc = &task.event.desc;
- completion_module = le16_to_cpu(desc->params.nvm.module_typeid);
+ cmd = libie_aq_raw(desc);
+ completion_module = le16_to_cpu(cmd->module_typeid);
completion_retval = le16_to_cpu(desc->retval);
- completion_offset = le16_to_cpu(desc->params.nvm.offset_low);
- completion_offset |= desc->params.nvm.offset_high << 16;
+ completion_offset = le16_to_cpu(cmd->offset_low);
+ completion_offset |= cmd->offset_high << 16;
if (completion_module != module) {
dev_err(dev, "Unexpected module_typeid in write completion: got 0x%x, expected 0x%x\n",
@@ -351,7 +358,7 @@ int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
if (completion_retval) {
dev_err(dev, "Firmware failed to flash module 0x%02x with block of size %u at offset %u, err %s\n",
module, block_size, offset,
- ice_aq_str((enum ice_aq_err)completion_retval));
+ libie_aq_str((enum libie_aq_err)completion_retval));
NL_SET_ERR_MSG_MOD(extack, "Firmware failed to program flash module");
return -EIO;
}
@@ -364,7 +371,7 @@ int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
*/
if (reset_level && last_cmd && module == ICE_SR_1ST_NVM_BANK_PTR) {
if (hw->dev_caps.common_cap.pcie_reset_avoidance) {
- *reset_level = desc->params.nvm.cmd_flags &
+ *reset_level = cmd->cmd_flags &
ICE_AQC_NVM_RESET_LVL_M;
dev_dbg(dev, "Firmware reported required reset level as %u\n",
*reset_level);
@@ -482,7 +489,8 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
struct device *dev = ice_pf_to_dev(pf);
struct ice_aq_task task = {};
struct ice_hw *hw = &pf->hw;
- struct ice_aq_desc *desc;
+ struct libie_aq_desc *desc;
+ struct ice_aqc_nvm *cmd;
struct devlink *devlink;
int err;
@@ -498,7 +506,7 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
if (err) {
dev_err(dev, "Failed to erase %s (module 0x%02x), err %d aq_err %s\n",
component, module, err,
- ice_aq_str(hw->adminq.sq_last_status));
+ libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to erase flash module");
err = -EIO;
goto out_notify_devlink;
@@ -513,7 +521,8 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
}
desc = &task.event.desc;
- completion_module = le16_to_cpu(desc->params.nvm.module_typeid);
+ cmd = libie_aq_raw(desc);
+ completion_module = le16_to_cpu(cmd->module_typeid);
completion_retval = le16_to_cpu(desc->retval);
if (completion_module != module) {
@@ -525,9 +534,9 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
}
if (completion_retval) {
- dev_err(dev, "Firmware failed to erase %s (module 0x02%x), aq_err %s\n",
+ dev_err(dev, "Firmware failed to erase %s (module 0x%02x), aq_err %s\n",
component, module,
- ice_aq_str((enum ice_aq_err)completion_retval));
+ libie_aq_str((enum libie_aq_err)completion_retval));
NL_SET_ERR_MSG_MOD(extack, "Firmware failed to erase flash");
err = -EIO;
goto out_notify_devlink;
@@ -574,7 +583,7 @@ ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
err = ice_nvm_write_activate(hw, activate_flags, &response_flags);
if (err) {
dev_err(dev, "Failed to switch active flash banks, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to switch active flash banks");
return -EIO;
}
@@ -606,7 +615,7 @@ ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
completion_retval = le16_to_cpu(task.event.desc.retval);
if (completion_retval) {
dev_err(dev, "Firmware failed to switch active flash banks aq_err %s\n",
- ice_aq_str((enum ice_aq_err)completion_retval));
+ libie_aq_str((enum libie_aq_err)completion_retval));
NL_SET_ERR_MSG_MOD(extack, "Firmware failed to switch active flash banks");
return -EIO;
}
@@ -944,7 +953,7 @@ ice_cancel_pending_update(struct ice_pf *pf, const char *component,
err = ice_acquire_nvm(hw, ICE_RES_WRITE);
if (err) {
dev_err(dev, "Failed to acquire device flash lock, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock");
return err;
}
@@ -1004,13 +1013,20 @@ int ice_devlink_flash_update(struct devlink *devlink,
return -EOPNOTSUPP;
}
- if (!hw->dev_caps.common_cap.nvm_unified_update) {
+ if (!hw->dev_caps.common_cap.nvm_unified_update && !ice_is_recovery_mode(hw)) {
NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update");
return -EOPNOTSUPP;
}
memset(&priv, 0, sizeof(priv));
+ if (params->component && strcmp(params->component, "fw.mgmt") == 0) {
+ priv.context.mode = PLDMFW_UPDATE_MODE_SINGLE_COMPONENT;
+ priv.context.component_identifier = NVM_COMP_ID_NVM;
+ } else if (params->component) {
+ return -EOPNOTSUPP;
+ }
+
/* the E822 device needs a slightly different ops */
if (hw->mac_type == ICE_MAC_GENERIC)
priv.context.ops = &ice_fwu_ops_e822;
@@ -1030,7 +1046,7 @@ int ice_devlink_flash_update(struct devlink *devlink,
err = ice_acquire_nvm(hw, ICE_RES_WRITE);
if (err) {
dev_err(dev, "Failed to acquire device flash lock, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock");
return err;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.c b/drivers/net/ethernet/intel/ice/ice_fwlog.c
deleted file mode 100644
index 4fd15387a7e5..000000000000
--- a/drivers/net/ethernet/intel/ice/ice_fwlog.c
+++ /dev/null
@@ -1,472 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2022, Intel Corporation. */
-
-#include <linux/vmalloc.h>
-#include "ice.h"
-#include "ice_common.h"
-#include "ice_fwlog.h"
-
-bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings)
-{
- u16 head, tail;
-
- head = rings->head;
- tail = rings->tail;
-
- if (head < tail && (tail - head == (rings->size - 1)))
- return true;
- else if (head > tail && (tail == (head - 1)))
- return true;
-
- return false;
-}
-
-bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings)
-{
- return rings->head == rings->tail;
-}
-
-void ice_fwlog_ring_increment(u16 *item, u16 size)
-{
- *item = (*item + 1) & (size - 1);
-}
-
-static int ice_fwlog_alloc_ring_buffs(struct ice_fwlog_ring *rings)
-{
- int i, nr_bytes;
- u8 *mem;
-
- nr_bytes = rings->size * ICE_AQ_MAX_BUF_LEN;
- mem = vzalloc(nr_bytes);
- if (!mem)
- return -ENOMEM;
-
- for (i = 0; i < rings->size; i++) {
- struct ice_fwlog_data *ring = &rings->rings[i];
-
- ring->data_size = ICE_AQ_MAX_BUF_LEN;
- ring->data = mem;
- mem += ICE_AQ_MAX_BUF_LEN;
- }
-
- return 0;
-}
-
-static void ice_fwlog_free_ring_buffs(struct ice_fwlog_ring *rings)
-{
- int i;
-
- for (i = 0; i < rings->size; i++) {
- struct ice_fwlog_data *ring = &rings->rings[i];
-
- /* the first ring is the base memory for the whole range so
- * free it
- */
- if (!i)
- vfree(ring->data);
-
- ring->data = NULL;
- ring->data_size = 0;
- }
-}
-
-#define ICE_FWLOG_INDEX_TO_BYTES(n) ((128 * 1024) << (n))
-/**
- * ice_fwlog_realloc_rings - reallocate the FW log rings
- * @hw: pointer to the HW structure
- * @index: the new index to use to allocate memory for the log data
- *
- */
-void ice_fwlog_realloc_rings(struct ice_hw *hw, int index)
-{
- struct ice_fwlog_ring ring;
- int status, ring_size;
-
- /* convert the number of bytes into a number of 4K buffers. externally
- * the driver presents the interface to the FW log data as a number of
- * bytes because that's easy for users to understand. internally the
- * driver uses a ring of buffers because the driver doesn't know where
- * the beginning and end of any line of log data is so the driver has
- * to overwrite data as complete blocks. when the data is returned to
- * the user the driver knows that the data is correct and the FW log
- * can be correctly parsed by the tools
- */
- ring_size = ICE_FWLOG_INDEX_TO_BYTES(index) / ICE_AQ_MAX_BUF_LEN;
- if (ring_size == hw->fwlog_ring.size)
- return;
-
- /* allocate space for the new rings and buffers then release the
- * old rings and buffers. that way if we don't have enough
- * memory then we at least have what we had before
- */
- ring.rings = kcalloc(ring_size, sizeof(*ring.rings), GFP_KERNEL);
- if (!ring.rings)
- return;
-
- ring.size = ring_size;
-
- status = ice_fwlog_alloc_ring_buffs(&ring);
- if (status) {
- dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log ring data buffers\n");
- ice_fwlog_free_ring_buffs(&ring);
- kfree(ring.rings);
- return;
- }
-
- ice_fwlog_free_ring_buffs(&hw->fwlog_ring);
- kfree(hw->fwlog_ring.rings);
-
- hw->fwlog_ring.rings = ring.rings;
- hw->fwlog_ring.size = ring.size;
- hw->fwlog_ring.index = index;
- hw->fwlog_ring.head = 0;
- hw->fwlog_ring.tail = 0;
-}
-
-/**
- * ice_fwlog_init - Initialize FW logging configuration
- * @hw: pointer to the HW structure
- *
- * This function should be called on driver initialization during
- * ice_init_hw().
- */
-int ice_fwlog_init(struct ice_hw *hw)
-{
- /* only support fw log commands on PF 0 */
- if (hw->bus.func)
- return -EINVAL;
-
- ice_fwlog_set_supported(hw);
-
- if (ice_fwlog_supported(hw)) {
- int status;
-
- /* read the current config from the FW and store it */
- status = ice_fwlog_get(hw, &hw->fwlog_cfg);
- if (status)
- return status;
-
- hw->fwlog_ring.rings = kcalloc(ICE_FWLOG_RING_SIZE_DFLT,
- sizeof(*hw->fwlog_ring.rings),
- GFP_KERNEL);
- if (!hw->fwlog_ring.rings) {
- dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log rings\n");
- return -ENOMEM;
- }
-
- hw->fwlog_ring.size = ICE_FWLOG_RING_SIZE_DFLT;
- hw->fwlog_ring.index = ICE_FWLOG_RING_SIZE_INDEX_DFLT;
-
- status = ice_fwlog_alloc_ring_buffs(&hw->fwlog_ring);
- if (status) {
- dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log ring data buffers\n");
- ice_fwlog_free_ring_buffs(&hw->fwlog_ring);
- kfree(hw->fwlog_ring.rings);
- return status;
- }
-
- ice_debugfs_fwlog_init(hw->back);
- } else {
- dev_warn(ice_hw_to_dev(hw), "FW logging is not supported in this NVM image. Please update the NVM to get FW log support\n");
- }
-
- return 0;
-}
-
-/**
- * ice_fwlog_deinit - unroll FW logging configuration
- * @hw: pointer to the HW structure
- *
- * This function should be called in ice_deinit_hw().
- */
-void ice_fwlog_deinit(struct ice_hw *hw)
-{
- struct ice_pf *pf = hw->back;
- int status;
-
- /* only support fw log commands on PF 0 */
- if (hw->bus.func)
- return;
-
- ice_debugfs_pf_deinit(hw->back);
-
- /* make sure FW logging is disabled to not put the FW in a weird state
- * for the next driver load
- */
- hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_ARQ_ENA;
- status = ice_fwlog_set(hw, &hw->fwlog_cfg);
- if (status)
- dev_warn(ice_hw_to_dev(hw), "Unable to turn off FW logging, status: %d\n",
- status);
-
- kfree(pf->ice_debugfs_pf_fwlog_modules);
-
- pf->ice_debugfs_pf_fwlog_modules = NULL;
-
- status = ice_fwlog_unregister(hw);
- if (status)
- dev_warn(ice_hw_to_dev(hw), "Unable to unregister FW logging, status: %d\n",
- status);
-
- if (hw->fwlog_ring.rings) {
- ice_fwlog_free_ring_buffs(&hw->fwlog_ring);
- kfree(hw->fwlog_ring.rings);
- }
-}
-
-/**
- * ice_fwlog_supported - Cached for whether FW supports FW logging or not
- * @hw: pointer to the HW structure
- *
- * This will always return false if called before ice_init_hw(), so it must be
- * called after ice_init_hw().
- */
-bool ice_fwlog_supported(struct ice_hw *hw)
-{
- return hw->fwlog_supported;
-}
-
-/**
- * ice_aq_fwlog_set - Set FW logging configuration AQ command (0xFF30)
- * @hw: pointer to the HW structure
- * @entries: entries to configure
- * @num_entries: number of @entries
- * @options: options from ice_fwlog_cfg->options structure
- * @log_resolution: logging resolution
- */
-static int
-ice_aq_fwlog_set(struct ice_hw *hw, struct ice_fwlog_module_entry *entries,
- u16 num_entries, u16 options, u16 log_resolution)
-{
- struct ice_aqc_fw_log_cfg_resp *fw_modules;
- struct ice_aqc_fw_log *cmd;
- struct ice_aq_desc desc;
- int status;
- int i;
-
- fw_modules = kcalloc(num_entries, sizeof(*fw_modules), GFP_KERNEL);
- if (!fw_modules)
- return -ENOMEM;
-
- for (i = 0; i < num_entries; i++) {
- fw_modules[i].module_identifier =
- cpu_to_le16(entries[i].module_id);
- fw_modules[i].log_level = entries[i].log_level;
- }
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_config);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
-
- cmd = &desc.params.fw_log;
-
- cmd->cmd_flags = ICE_AQC_FW_LOG_CONF_SET_VALID;
- cmd->ops.cfg.log_resolution = cpu_to_le16(log_resolution);
- cmd->ops.cfg.mdl_cnt = cpu_to_le16(num_entries);
-
- if (options & ICE_FWLOG_OPTION_ARQ_ENA)
- cmd->cmd_flags |= ICE_AQC_FW_LOG_CONF_AQ_EN;
- if (options & ICE_FWLOG_OPTION_UART_ENA)
- cmd->cmd_flags |= ICE_AQC_FW_LOG_CONF_UART_EN;
-
- status = ice_aq_send_cmd(hw, &desc, fw_modules,
- sizeof(*fw_modules) * num_entries,
- NULL);
-
- kfree(fw_modules);
-
- return status;
-}
-
-/**
- * ice_fwlog_set - Set the firmware logging settings
- * @hw: pointer to the HW structure
- * @cfg: config used to set firmware logging
- *
- * This function should be called whenever the driver needs to set the firmware
- * logging configuration. It can be called on initialization, reset, or during
- * runtime.
- *
- * If the PF wishes to receive FW logging then it must register via
- * ice_fwlog_register. Note, that ice_fwlog_register does not need to be called
- * for init.
- */
-int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
-{
- if (!ice_fwlog_supported(hw))
- return -EOPNOTSUPP;
-
- return ice_aq_fwlog_set(hw, cfg->module_entries,
- ICE_AQC_FW_LOG_ID_MAX, cfg->options,
- cfg->log_resolution);
-}
-
-/**
- * ice_aq_fwlog_get - Get the current firmware logging configuration (0xFF32)
- * @hw: pointer to the HW structure
- * @cfg: firmware logging configuration to populate
- */
-static int ice_aq_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
-{
- struct ice_aqc_fw_log_cfg_resp *fw_modules;
- struct ice_aqc_fw_log *cmd;
- struct ice_aq_desc desc;
- u16 module_id_cnt;
- int status;
- void *buf;
- int i;
-
- memset(cfg, 0, sizeof(*cfg));
-
- buf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_query);
- cmd = &desc.params.fw_log;
-
- cmd->cmd_flags = ICE_AQC_FW_LOG_AQ_QUERY;
-
- status = ice_aq_send_cmd(hw, &desc, buf, ICE_AQ_MAX_BUF_LEN, NULL);
- if (status) {
- ice_debug(hw, ICE_DBG_FW_LOG, "Failed to get FW log configuration\n");
- goto status_out;
- }
-
- module_id_cnt = le16_to_cpu(cmd->ops.cfg.mdl_cnt);
- if (module_id_cnt < ICE_AQC_FW_LOG_ID_MAX) {
- ice_debug(hw, ICE_DBG_FW_LOG, "FW returned less than the expected number of FW log module IDs\n");
- } else if (module_id_cnt > ICE_AQC_FW_LOG_ID_MAX) {
- ice_debug(hw, ICE_DBG_FW_LOG, "FW returned more than expected number of FW log module IDs, setting module_id_cnt to software expected max %u\n",
- ICE_AQC_FW_LOG_ID_MAX);
- module_id_cnt = ICE_AQC_FW_LOG_ID_MAX;
- }
-
- cfg->log_resolution = le16_to_cpu(cmd->ops.cfg.log_resolution);
- if (cmd->cmd_flags & ICE_AQC_FW_LOG_CONF_AQ_EN)
- cfg->options |= ICE_FWLOG_OPTION_ARQ_ENA;
- if (cmd->cmd_flags & ICE_AQC_FW_LOG_CONF_UART_EN)
- cfg->options |= ICE_FWLOG_OPTION_UART_ENA;
- if (cmd->cmd_flags & ICE_AQC_FW_LOG_QUERY_REGISTERED)
- cfg->options |= ICE_FWLOG_OPTION_IS_REGISTERED;
-
- fw_modules = (struct ice_aqc_fw_log_cfg_resp *)buf;
-
- for (i = 0; i < module_id_cnt; i++) {
- struct ice_aqc_fw_log_cfg_resp *fw_module = &fw_modules[i];
-
- cfg->module_entries[i].module_id =
- le16_to_cpu(fw_module->module_identifier);
- cfg->module_entries[i].log_level = fw_module->log_level;
- }
-
-status_out:
- kfree(buf);
- return status;
-}
-
-/**
- * ice_fwlog_get - Get the firmware logging settings
- * @hw: pointer to the HW structure
- * @cfg: config to populate based on current firmware logging settings
- */
-int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
-{
- if (!ice_fwlog_supported(hw))
- return -EOPNOTSUPP;
-
- return ice_aq_fwlog_get(hw, cfg);
-}
-
-/**
- * ice_aq_fwlog_register - Register PF for firmware logging events (0xFF31)
- * @hw: pointer to the HW structure
- * @reg: true to register and false to unregister
- */
-static int ice_aq_fwlog_register(struct ice_hw *hw, bool reg)
-{
- struct ice_aq_desc desc;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_register);
-
- if (reg)
- desc.params.fw_log.cmd_flags = ICE_AQC_FW_LOG_AQ_REGISTER;
-
- return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
-}
-
-/**
- * ice_fwlog_register - Register the PF for firmware logging
- * @hw: pointer to the HW structure
- *
- * After this call the PF will start to receive firmware logging based on the
- * configuration set in ice_fwlog_set.
- */
-int ice_fwlog_register(struct ice_hw *hw)
-{
- int status;
-
- if (!ice_fwlog_supported(hw))
- return -EOPNOTSUPP;
-
- status = ice_aq_fwlog_register(hw, true);
- if (status)
- ice_debug(hw, ICE_DBG_FW_LOG, "Failed to register for firmware logging events over ARQ\n");
- else
- hw->fwlog_cfg.options |= ICE_FWLOG_OPTION_IS_REGISTERED;
-
- return status;
-}
-
-/**
- * ice_fwlog_unregister - Unregister the PF from firmware logging
- * @hw: pointer to the HW structure
- */
-int ice_fwlog_unregister(struct ice_hw *hw)
-{
- int status;
-
- if (!ice_fwlog_supported(hw))
- return -EOPNOTSUPP;
-
- status = ice_aq_fwlog_register(hw, false);
- if (status)
- ice_debug(hw, ICE_DBG_FW_LOG, "Failed to unregister from firmware logging events over ARQ\n");
- else
- hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_IS_REGISTERED;
-
- return status;
-}
-
-/**
- * ice_fwlog_set_supported - Set if FW logging is supported by FW
- * @hw: pointer to the HW struct
- *
- * If FW returns success to the ice_aq_fwlog_get call then it supports FW
- * logging, else it doesn't. Set the fwlog_supported flag accordingly.
- *
- * This function is only meant to be called during driver init to determine if
- * the FW support FW logging.
- */
-void ice_fwlog_set_supported(struct ice_hw *hw)
-{
- struct ice_fwlog_cfg *cfg;
- int status;
-
- hw->fwlog_supported = false;
-
- cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
- if (!cfg)
- return;
-
- /* don't call ice_fwlog_get() because that would check to see if FW
- * logging is supported which is what the driver is determining now
- */
- status = ice_aq_fwlog_get(hw, cfg);
- if (status)
- ice_debug(hw, ICE_DBG_FW_LOG, "ice_aq_fwlog_get failed, FW logging is not supported on this version of FW, status %d\n",
- status);
- else
- hw->fwlog_supported = true;
-
- kfree(cfg);
-}
diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.h b/drivers/net/ethernet/intel/ice/ice_fwlog.h
deleted file mode 100644
index 287e71fa4b86..000000000000
--- a/drivers/net/ethernet/intel/ice/ice_fwlog.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2022, Intel Corporation. */
-
-#ifndef _ICE_FWLOG_H_
-#define _ICE_FWLOG_H_
-#include "ice_adminq_cmd.h"
-
-struct ice_hw;
-
-/* Only a single log level should be set and all log levels under the set value
- * are enabled, e.g. if log level is set to ICE_FW_LOG_LEVEL_VERBOSE, then all
- * other log levels are included (except ICE_FW_LOG_LEVEL_NONE)
- */
-enum ice_fwlog_level {
- ICE_FWLOG_LEVEL_NONE = 0,
- ICE_FWLOG_LEVEL_ERROR = 1,
- ICE_FWLOG_LEVEL_WARNING = 2,
- ICE_FWLOG_LEVEL_NORMAL = 3,
- ICE_FWLOG_LEVEL_VERBOSE = 4,
- ICE_FWLOG_LEVEL_INVALID, /* all values >= this entry are invalid */
-};
-
-struct ice_fwlog_module_entry {
- /* module ID for the corresponding firmware logging event */
- u16 module_id;
- /* verbosity level for the module_id */
- u8 log_level;
-};
-
-struct ice_fwlog_cfg {
- /* list of modules for configuring log level */
- struct ice_fwlog_module_entry module_entries[ICE_AQC_FW_LOG_ID_MAX];
- /* options used to configure firmware logging */
- u16 options;
-#define ICE_FWLOG_OPTION_ARQ_ENA BIT(0)
-#define ICE_FWLOG_OPTION_UART_ENA BIT(1)
- /* set before calling ice_fwlog_init() so the PF registers for firmware
- * logging on initialization
- */
-#define ICE_FWLOG_OPTION_REGISTER_ON_INIT BIT(2)
- /* set in the ice_fwlog_get() response if the PF is registered for FW
- * logging events over ARQ
- */
-#define ICE_FWLOG_OPTION_IS_REGISTERED BIT(3)
-
- /* minimum number of log events sent per Admin Receive Queue event */
- u16 log_resolution;
-};
-
-struct ice_fwlog_data {
- u16 data_size;
- u8 *data;
-};
-
-struct ice_fwlog_ring {
- struct ice_fwlog_data *rings;
- u16 index;
- u16 size;
- u16 head;
- u16 tail;
-};
-
-#define ICE_FWLOG_RING_SIZE_INDEX_DFLT 3
-#define ICE_FWLOG_RING_SIZE_DFLT 256
-#define ICE_FWLOG_RING_SIZE_MAX 512
-
-bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings);
-bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings);
-void ice_fwlog_ring_increment(u16 *item, u16 size);
-void ice_fwlog_set_supported(struct ice_hw *hw);
-bool ice_fwlog_supported(struct ice_hw *hw);
-int ice_fwlog_init(struct ice_hw *hw);
-void ice_fwlog_deinit(struct ice_hw *hw);
-int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
-int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
-int ice_fwlog_register(struct ice_hw *hw);
-int ice_fwlog_unregister(struct ice_hw *hw);
-void ice_fwlog_realloc_rings(struct ice_hw *hw, int index);
-#endif /* _ICE_FWLOG_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
index f02e8ca55375..6b26290452d4 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.c
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
@@ -182,7 +182,7 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf)
pf->gnss_serial = gnss;
kthread_init_delayed_work(&gnss->read_work, ice_gnss_read);
- kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev));
+ kworker = kthread_run_worker(0, "ice-gnss-%s", dev_name(dev));
if (IS_ERR(kworker)) {
kfree(gnss);
return NULL;
@@ -381,32 +381,23 @@ void ice_gnss_exit(struct ice_pf *pf)
}
/**
- * ice_gnss_is_gps_present - Check if GPS HW is present
+ * ice_gnss_is_module_present - Check if GNSS HW is present
* @hw: pointer to HW struct
+ *
+ * Return: true when GNSS is present, false otherwise.
*/
-bool ice_gnss_is_gps_present(struct ice_hw *hw)
+bool ice_gnss_is_module_present(struct ice_hw *hw)
{
- if (!hw->func_caps.ts_func_info.src_tmr_owned)
- return false;
+ int err;
+ u8 data;
- if (!ice_is_gps_in_netlist(hw))
+ if (!hw->func_caps.ts_func_info.src_tmr_owned ||
+ !ice_is_gps_in_netlist(hw))
return false;
-#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
- if (ice_is_e810t(hw)) {
- int err;
- u8 data;
-
- err = ice_read_pca9575_reg(hw, ICE_PCA9575_P0_IN, &data);
- if (err || !!(data & ICE_P0_GNSS_PRSNT_N))
- return false;
- } else {
- return false;
- }
-#else
- if (!ice_is_e810t(hw))
+ err = ice_read_pca9575_reg(hw, ICE_PCA9575_P0_IN, &data);
+ if (err || !!(data & ICE_P0_GNSS_PRSNT_N))
return false;
-#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
return true;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h
index 75e567ad7059..15daf603ed7b 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.h
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.h
@@ -37,11 +37,11 @@ struct gnss_serial {
#if IS_ENABLED(CONFIG_GNSS)
void ice_gnss_init(struct ice_pf *pf);
void ice_gnss_exit(struct ice_pf *pf);
-bool ice_gnss_is_gps_present(struct ice_hw *hw);
+bool ice_gnss_is_module_present(struct ice_hw *hw);
#else
static inline void ice_gnss_init(struct ice_pf *pf) { }
static inline void ice_gnss_exit(struct ice_pf *pf) { }
-static inline bool ice_gnss_is_gps_present(struct ice_hw *hw)
+static inline bool ice_gnss_is_module_present(struct ice_hw *hw)
{
return false;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index dc88aea9f473..082ad33c53dc 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -16,8 +16,10 @@
#define GLCOMM_QUANTA_PROF_MAX_DESC_M ICE_M(0x3F, 24)
#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4))
#define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4))
+#define QTX_COMM_HEAD_MAX_INDEX 16383
#define QTX_COMM_HEAD_HEAD_S 0
#define QTX_COMM_HEAD_HEAD_M ICE_M(0x1FFF, 0)
+#define E830_GLQTX_TXTIME_DBELL_LSB(_DBQM) (0x002E0000 + ((_DBQM) * 8))
#define PF_FW_ARQBAH 0x00080180
#define PF_FW_ARQBAL 0x00080080
#define PF_FW_ARQH 0x00080380
@@ -272,6 +274,8 @@
#define VPINT_ALLOC_PCI_VALID_M BIT(31)
#define VPINT_MBX_CTL(_VSI) (0x0016A000 + ((_VSI) * 4))
#define VPINT_MBX_CTL_CAUSE_ENA_M BIT(30)
+#define PFLAN_TX_QALLOC(_PF) (0x001D2580 + ((_PF) * 4))
+#define PFLAN_TX_QALLOC_FIRSTQ_M GENMASK(13, 0)
#define GLLAN_RCTL_0 0x002941F8
#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4))
@@ -376,6 +380,15 @@
#define GLNVM_ULD_POR_DONE_1_M BIT(8)
#define GLNVM_ULD_PCIER_DONE_2_M BIT(9)
#define GLNVM_ULD_PE_DONE_M BIT(10)
+#define GLCOMM_QTX_CNTX_CTL 0x002D2DC8
+#define GLCOMM_QTX_CNTX_CTL_QUEUE_ID_M GENMASK(13, 0)
+#define GLCOMM_QTX_CNTX_CTL_CMD_M GENMASK(18, 16)
+#define GLCOMM_QTX_CNTX_CTL_CMD_READ 0
+#define GLCOMM_QTX_CNTX_CTL_CMD_WRITE 1
+#define GLCOMM_QTX_CNTX_CTL_CMD_RESET 3
+#define GLCOMM_QTX_CNTX_CTL_CMD_WRITE_NO_DYN 4
+#define GLCOMM_QTX_CNTX_CTL_CMD_EXEC_M BIT(19)
+#define GLCOMM_QTX_CNTX_DATA(_i) (0x002D2D40 + ((_i) * 4))
#define GLPCI_CNF2 0x000BE004
#define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1)
#define PF_FUNC_RID 0x0009E880
@@ -541,12 +554,26 @@
#define PFPM_WUS_MAG_M BIT(1)
#define PFPM_WUS_MNG_M BIT(3)
#define PFPM_WUS_FW_RST_WK_M BIT(31)
+#define E830_PRTMAC_TS_TX_MEM_VALID_H 0x001E2020
+#define E830_PRTMAC_TS_TX_MEM_VALID_L 0x001E2000
#define E830_PRTMAC_CL01_PS_QNT 0x001E32A0
#define E830_PRTMAC_CL01_PS_QNT_CL0_M GENMASK(15, 0)
#define E830_PRTMAC_CL01_QNT_THR 0x001E3320
#define E830_PRTMAC_CL01_QNT_THR_CL0_M GENMASK(15, 0)
+#define E830_PRTTSYN_TXTIME_H(_i) (0x001E5800 + ((_i) * 32))
+#define E830_PRTTSYN_TXTIME_L(_i) (0x001E5000 + ((_i) * 32))
+#define E830_GLPTM_ART_CTL 0x00088B50
+#define E830_GLPTM_ART_CTL_ACTIVE_M BIT(0)
+#define E830_GLPTM_ART_TIME_H 0x00088B54
+#define E830_GLPTM_ART_TIME_L 0x00088B58
+#define E830_GLTSYN_PTMTIME_H(_i) (0x00088B48 + ((_i) * 4))
+#define E830_GLTSYN_PTMTIME_L(_i) (0x00088B40 + ((_i) * 4))
+#define E830_PFPTM_SEM 0x00088B00
+#define E830_PFPTM_SEM_BUSY_M BIT(0)
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
+#define E830_GLTXTIME_FETCH_PROFILE(_i, _j) (0x002D3500 + ((_i) * 4 + (_j) * 64))
+#define E830_GLTXTIME_FETCH_PROFILE_FETCH_TS_DESC_M ICE_M(0x1FF, 0)
#define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH 0x00234000
#define E830_MBX_VF_DEC_TRIG(_VF) (0x00233800 + (_VF) * 4)
#define E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(_VF) (0x00233000 + (_VF) * 4)
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index 145b27f2a4ce..420d45c2558b 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -9,22 +9,25 @@
static DEFINE_XARRAY_ALLOC1(ice_aux_id);
/**
- * ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct
- * @pf: pointer to PF struct
+ * ice_get_auxiliary_drv - retrieve iidc_rdma_core_auxiliary_drv struct
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
*
* This function has to be called with a device_lock on the
- * pf->adev.dev to avoid race conditions.
+ * cdev->adev.dev to avoid race conditions.
+ *
+ * Return: pointer to the matched auxiliary driver struct
*/
-static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf)
+static struct iidc_rdma_core_auxiliary_drv *
+ice_get_auxiliary_drv(struct iidc_rdma_core_dev_info *cdev)
{
struct auxiliary_device *adev;
- adev = pf->adev;
+ adev = cdev->adev;
if (!adev || !adev->dev.driver)
return NULL;
- return container_of(adev->dev.driver, struct iidc_auxiliary_drv,
- adrv.driver);
+ return container_of(adev->dev.driver,
+ struct iidc_rdma_core_auxiliary_drv, adrv.driver);
}
/**
@@ -32,44 +35,54 @@ static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf)
* @pf: pointer to PF struct
* @event: event struct
*/
-void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
+void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_rdma_event *event)
{
- struct iidc_auxiliary_drv *iadrv;
+ struct iidc_rdma_core_auxiliary_drv *iadrv;
+ struct iidc_rdma_core_dev_info *cdev;
if (WARN_ON_ONCE(!in_task()))
return;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return;
+
mutex_lock(&pf->adev_mutex);
- if (!pf->adev)
+ if (!cdev->adev)
goto finish;
- device_lock(&pf->adev->dev);
- iadrv = ice_get_auxiliary_drv(pf);
+ device_lock(&cdev->adev->dev);
+ iadrv = ice_get_auxiliary_drv(cdev);
if (iadrv && iadrv->event_handler)
- iadrv->event_handler(pf, event);
- device_unlock(&pf->adev->dev);
+ iadrv->event_handler(cdev, event);
+ device_unlock(&cdev->adev->dev);
finish:
mutex_unlock(&pf->adev_mutex);
}
/**
* ice_add_rdma_qset - Add Leaf Node for RDMA Qset
- * @pf: PF struct
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
* @qset: Resource to be allocated
+ *
+ * Return: Zero on success or error code encountered
*/
-int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
+int ice_add_rdma_qset(struct iidc_rdma_core_dev_info *cdev,
+ struct iidc_rdma_qset_params *qset)
{
u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS];
struct ice_vsi *vsi;
struct device *dev;
+ struct ice_pf *pf;
u32 qset_teid;
u16 qs_handle;
int status;
int i;
- if (WARN_ON(!pf || !qset))
+ if (WARN_ON(!cdev || !qset))
return -EINVAL;
+ pf = pci_get_drvdata(cdev->pdev);
dev = ice_pf_to_dev(pf);
if (!ice_is_rdma_ena(pf))
@@ -100,7 +113,6 @@ int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
dev_err(dev, "Failed VSI RDMA Qset enable\n");
return status;
}
- vsi->qset_handle[qset->tc] = qset->qs_handle;
qset->teid = qset_teid;
return 0;
@@ -109,18 +121,23 @@ EXPORT_SYMBOL_GPL(ice_add_rdma_qset);
/**
* ice_del_rdma_qset - Delete leaf node for RDMA Qset
- * @pf: PF struct
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
* @qset: Resource to be freed
+ *
+ * Return: Zero on success, error code on failure
*/
-int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
+int ice_del_rdma_qset(struct iidc_rdma_core_dev_info *cdev,
+ struct iidc_rdma_qset_params *qset)
{
struct ice_vsi *vsi;
+ struct ice_pf *pf;
u32 teid;
u16 q_id;
- if (WARN_ON(!pf || !qset))
+ if (WARN_ON(!cdev || !qset))
return -EINVAL;
+ pf = pci_get_drvdata(cdev->pdev);
vsi = ice_find_vsi(pf, qset->vport_id);
if (!vsi) {
dev_err(ice_pf_to_dev(pf), "RDMA Invalid VSI\n");
@@ -130,36 +147,36 @@ int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
q_id = qset->qs_handle;
teid = qset->teid;
- vsi->qset_handle[qset->tc] = 0;
-
return ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id);
}
EXPORT_SYMBOL_GPL(ice_del_rdma_qset);
/**
* ice_rdma_request_reset - accept request from RDMA to perform a reset
- * @pf: struct for PF
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
* @reset_type: type of reset
+ *
+ * Return: Zero on success, error code on failure
*/
-int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type)
+int ice_rdma_request_reset(struct iidc_rdma_core_dev_info *cdev,
+ enum iidc_rdma_reset_type reset_type)
{
enum ice_reset_req reset;
+ struct ice_pf *pf;
- if (WARN_ON(!pf))
+ if (WARN_ON(!cdev))
return -EINVAL;
+ pf = pci_get_drvdata(cdev->pdev);
+
switch (reset_type) {
- case IIDC_PFR:
+ case IIDC_FUNC_RESET:
reset = ICE_RESET_PFR;
break;
- case IIDC_CORER:
+ case IIDC_DEV_RESET:
reset = ICE_RESET_CORER;
break;
- case IIDC_GLOBR:
- reset = ICE_RESET_GLOBR;
- break;
default:
- dev_err(ice_pf_to_dev(pf), "incorrect reset request\n");
return -EINVAL;
}
@@ -169,18 +186,23 @@ EXPORT_SYMBOL_GPL(ice_rdma_request_reset);
/**
* ice_rdma_update_vsi_filter - update main VSI filters for RDMA
- * @pf: pointer to struct for PF
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
* @vsi_id: VSI HW idx to update filter on
* @enable: bool whether to enable or disable filters
+ *
+ * Return: Zero on success, error code on failure
*/
-int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable)
+int ice_rdma_update_vsi_filter(struct iidc_rdma_core_dev_info *cdev,
+ u16 vsi_id, bool enable)
{
struct ice_vsi *vsi;
+ struct ice_pf *pf;
int status;
- if (WARN_ON(!pf))
+ if (WARN_ON(!cdev))
return -EINVAL;
+ pf = pci_get_drvdata(cdev->pdev);
vsi = ice_find_vsi(pf, vsi_id);
if (!vsi)
return -EINVAL;
@@ -201,88 +223,54 @@ int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable)
EXPORT_SYMBOL_GPL(ice_rdma_update_vsi_filter);
/**
- * ice_get_qos_params - parse QoS params for RDMA consumption
- * @pf: pointer to PF struct
- * @qos: set of QoS values
- */
-void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos)
-{
- struct ice_dcbx_cfg *dcbx_cfg;
- unsigned int i;
- u32 up2tc;
-
- dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
- up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
-
- qos->num_tc = ice_dcb_get_num_tc(dcbx_cfg);
- for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
- qos->up2tc[i] = (up2tc >> (i * 3)) & 0x7;
-
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i];
-
- qos->pfc_mode = dcbx_cfg->pfc_mode;
- if (qos->pfc_mode == IIDC_DSCP_PFC_MODE)
- for (i = 0; i < IIDC_MAX_DSCP_MAPPING; i++)
- qos->dscp_map[i] = dcbx_cfg->dscp_map[i];
-}
-EXPORT_SYMBOL_GPL(ice_get_qos_params);
-
-/**
- * ice_alloc_rdma_qvectors - Allocate vector resources for RDMA driver
- * @pf: board private structure to initialize
+ * ice_alloc_rdma_qvector - alloc vector resources reserved for RDMA driver
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
+ * @entry: MSI-X entry to be removed
+ *
+ * Return: Zero on success, error code on failure
*/
-static int ice_alloc_rdma_qvectors(struct ice_pf *pf)
+int ice_alloc_rdma_qvector(struct iidc_rdma_core_dev_info *cdev,
+ struct msix_entry *entry)
{
- if (ice_is_rdma_ena(pf)) {
- int i;
-
- pf->msix_entries = kcalloc(pf->num_rdma_msix,
- sizeof(*pf->msix_entries),
- GFP_KERNEL);
- if (!pf->msix_entries)
- return -ENOMEM;
+ struct msi_map map;
+ struct ice_pf *pf;
- /* RDMA is the only user of pf->msix_entries array */
- pf->rdma_base_vector = 0;
+ if (WARN_ON(!cdev))
+ return -EINVAL;
- for (i = 0; i < pf->num_rdma_msix; i++) {
- struct msix_entry *entry = &pf->msix_entries[i];
- struct msi_map map;
+ pf = pci_get_drvdata(cdev->pdev);
+ map = ice_alloc_irq(pf, true);
+ if (map.index < 0)
+ return -ENOMEM;
- map = ice_alloc_irq(pf, false);
- if (map.index < 0)
- break;
+ entry->entry = map.index;
+ entry->vector = map.virq;
- entry->entry = map.index;
- entry->vector = map.virq;
- }
- }
return 0;
}
+EXPORT_SYMBOL_GPL(ice_alloc_rdma_qvector);
/**
* ice_free_rdma_qvector - free vector resources reserved for RDMA driver
- * @pf: board private structure to initialize
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
+ * @entry: MSI-X entry to be removed
*/
-static void ice_free_rdma_qvector(struct ice_pf *pf)
+void ice_free_rdma_qvector(struct iidc_rdma_core_dev_info *cdev,
+ struct msix_entry *entry)
{
- int i;
+ struct msi_map map;
+ struct ice_pf *pf;
- if (!pf->msix_entries)
+ if (WARN_ON(!cdev || !entry))
return;
- for (i = 0; i < pf->num_rdma_msix; i++) {
- struct msi_map map;
-
- map.index = pf->msix_entries[i].entry;
- map.virq = pf->msix_entries[i].vector;
- ice_free_irq(pf, map);
- }
+ pf = pci_get_drvdata(cdev->pdev);
- kfree(pf->msix_entries);
- pf->msix_entries = NULL;
+ map.index = entry->entry;
+ map.virq = entry->vector;
+ ice_free_irq(pf, map);
}
+EXPORT_SYMBOL_GPL(ice_free_rdma_qvector);
/**
* ice_adev_release - function to be mapped to AUX dev's release op
@@ -290,19 +278,23 @@ static void ice_free_rdma_qvector(struct ice_pf *pf)
*/
static void ice_adev_release(struct device *dev)
{
- struct iidc_auxiliary_dev *iadev;
+ struct iidc_rdma_core_auxiliary_dev *iadev;
- iadev = container_of(dev, struct iidc_auxiliary_dev, adev.dev);
+ iadev = container_of(dev, struct iidc_rdma_core_auxiliary_dev,
+ adev.dev);
kfree(iadev);
}
/**
* ice_plug_aux_dev - allocate and register AUX device
* @pf: pointer to pf struct
+ *
+ * Return: Zero on success, error code on failure
*/
int ice_plug_aux_dev(struct ice_pf *pf)
{
- struct iidc_auxiliary_dev *iadev;
+ struct iidc_rdma_core_auxiliary_dev *iadev;
+ struct iidc_rdma_core_dev_info *cdev;
struct auxiliary_device *adev;
int ret;
@@ -312,17 +304,22 @@ int ice_plug_aux_dev(struct ice_pf *pf)
if (!ice_is_rdma_ena(pf))
return 0;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
+
iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
if (!iadev)
return -ENOMEM;
adev = &iadev->adev;
- iadev->pf = pf;
+ iadev->cdev_info = cdev;
adev->id = pf->aux_idx;
adev->dev.release = ice_adev_release;
adev->dev.parent = &pf->pdev->dev;
- adev->name = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? "roce" : "iwarp";
+ adev->name = cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2 ?
+ "roce" : "iwarp";
ret = auxiliary_device_init(adev);
if (ret) {
@@ -337,8 +334,9 @@ int ice_plug_aux_dev(struct ice_pf *pf)
}
mutex_lock(&pf->adev_mutex);
- pf->adev = adev;
+ cdev->adev = adev;
mutex_unlock(&pf->adev_mutex);
+ set_bit(ICE_FLAG_AUX_DEV_CREATED, pf->flags);
return 0;
}
@@ -350,15 +348,16 @@ void ice_unplug_aux_dev(struct ice_pf *pf)
{
struct auxiliary_device *adev;
+ if (!test_and_clear_bit(ICE_FLAG_AUX_DEV_CREATED, pf->flags))
+ return;
+
mutex_lock(&pf->adev_mutex);
- adev = pf->adev;
- pf->adev = NULL;
+ adev = pf->cdev_info->adev;
+ pf->cdev_info->adev = NULL;
mutex_unlock(&pf->adev_mutex);
- if (adev) {
- auxiliary_device_delete(adev);
- auxiliary_device_uninit(adev);
- }
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
}
/**
@@ -367,7 +366,9 @@ void ice_unplug_aux_dev(struct ice_pf *pf)
*/
int ice_init_rdma(struct ice_pf *pf)
{
+ struct iidc_rdma_priv_dev_info *privd;
struct device *dev = &pf->pdev->dev;
+ struct iidc_rdma_core_dev_info *cdev;
int ret;
if (!ice_is_rdma_ena(pf)) {
@@ -375,30 +376,50 @@ int ice_init_rdma(struct ice_pf *pf)
return 0;
}
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return -ENOMEM;
+
+ pf->cdev_info = cdev;
+
+ privd = kzalloc(sizeof(*privd), GFP_KERNEL);
+ if (!privd) {
+ ret = -ENOMEM;
+ goto err_privd_alloc;
+ }
+
+ privd->pf_id = pf->hw.pf_id;
ret = xa_alloc(&ice_aux_id, &pf->aux_idx, NULL, XA_LIMIT(1, INT_MAX),
GFP_KERNEL);
if (ret) {
dev_err(dev, "Failed to allocate device ID for AUX driver\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_alloc_xa;
}
- /* Reserve vector resources */
- ret = ice_alloc_rdma_qvectors(pf);
- if (ret < 0) {
- dev_err(dev, "failed to reserve vectors for RDMA\n");
- goto err_reserve_rdma_qvector;
- }
- pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
+ cdev->iidc_priv = privd;
+ privd->netdev = pf->vsi[0]->netdev;
+
+ privd->hw_addr = (u8 __iomem *)pf->hw.hw_addr;
+ cdev->pdev = pf->pdev;
+ privd->vport_id = pf->vsi[0]->vsi_num;
+
+ pf->cdev_info->rdma_protocol |= IIDC_RDMA_PROTOCOL_ROCEV2;
+ ice_setup_dcb_qos_info(pf, &privd->qos_info);
ret = ice_plug_aux_dev(pf);
if (ret)
goto err_plug_aux_dev;
return 0;
err_plug_aux_dev:
- ice_free_rdma_qvector(pf);
-err_reserve_rdma_qvector:
- pf->adev = NULL;
+ pf->cdev_info->adev = NULL;
xa_erase(&ice_aux_id, pf->aux_idx);
+err_alloc_xa:
+ kfree(privd);
+err_privd_alloc:
+ kfree(cdev);
+ pf->cdev_info = NULL;
+
return ret;
}
@@ -412,6 +433,8 @@ void ice_deinit_rdma(struct ice_pf *pf)
return;
ice_unplug_aux_dev(pf);
- ice_free_rdma_qvector(pf);
xa_erase(&ice_aux_id, pf->aux_idx);
+ kfree(pf->cdev_info->iidc_priv);
+ kfree(pf->cdev_info);
+ pf->cdev_info = NULL;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_idc_int.h b/drivers/net/ethernet/intel/ice/ice_idc_int.h
index 4b0c86757df9..17dbfcfb6a2a 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc_int.h
+++ b/drivers/net/ethernet/intel/ice/ice_idc_int.h
@@ -4,10 +4,11 @@
#ifndef _ICE_IDC_INT_H_
#define _ICE_IDC_INT_H_
-#include <linux/net/intel/iidc.h>
+#include <linux/net/intel/iidc_rdma.h>
+#include <linux/net/intel/iidc_rdma_ice.h>
struct ice_pf;
-void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event);
+void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_rdma_event *event);
#endif /* !_ICE_IDC_INT_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_irq.c b/drivers/net/ethernet/intel/ice/ice_irq.c
index ad82ff7d1995..30801fd375f0 100644
--- a/drivers/net/ethernet/intel/ice/ice_irq.c
+++ b/drivers/net/ethernet/intel/ice/ice_irq.c
@@ -20,6 +20,19 @@ ice_init_irq_tracker(struct ice_pf *pf, unsigned int max_vectors,
xa_init_flags(&pf->irq_tracker.entries, XA_FLAGS_ALLOC);
}
+static int
+ice_init_virt_irq_tracker(struct ice_pf *pf, u32 base, u32 num_entries)
+{
+ pf->virt_irq_tracker.bm = bitmap_zalloc(num_entries, GFP_KERNEL);
+ if (!pf->virt_irq_tracker.bm)
+ return -ENOMEM;
+
+ pf->virt_irq_tracker.num_entries = num_entries;
+ pf->virt_irq_tracker.base = base;
+
+ return 0;
+}
+
/**
* ice_deinit_irq_tracker - free xarray tracker
* @pf: board private structure
@@ -29,6 +42,11 @@ static void ice_deinit_irq_tracker(struct ice_pf *pf)
xa_destroy(&pf->irq_tracker.entries);
}
+static void ice_deinit_virt_irq_tracker(struct ice_pf *pf)
+{
+ bitmap_free(pf->virt_irq_tracker.bm);
+}
+
/**
* ice_free_irq_res - free a block of resources
* @pf: board private structure
@@ -45,7 +63,7 @@ static void ice_free_irq_res(struct ice_pf *pf, u16 index)
/**
* ice_get_irq_res - get an interrupt resource
* @pf: board private structure
- * @dyn_only: force entry to be dynamically allocated
+ * @dyn_allowed: allow entry to be dynamically allocated
*
* Allocate new irq entry in the free slot of the tracker. Since xarray
* is used, always allocate new entry at the lowest possible index. Set
@@ -53,11 +71,12 @@ static void ice_free_irq_res(struct ice_pf *pf, u16 index)
*
* Returns allocated irq entry or NULL on failure.
*/
-static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
+static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf,
+ bool dyn_allowed)
{
- struct xa_limit limit = { .max = pf->irq_tracker.num_entries,
+ struct xa_limit limit = { .max = pf->irq_tracker.num_entries - 1,
.min = 0 };
- unsigned int num_static = pf->irq_tracker.num_static;
+ unsigned int num_static = pf->irq_tracker.num_static - 1;
struct ice_irq_entry *entry;
unsigned int index;
int ret;
@@ -66,9 +85,9 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
if (!entry)
return NULL;
- /* skip preallocated entries if the caller says so */
- if (dyn_only)
- limit.min = num_static;
+ /* only already allocated if the caller says so */
+ if (!dyn_allowed)
+ limit.max = num_static;
ret = xa_alloc(&pf->irq_tracker.entries, &index, entry, limit,
GFP_KERNEL);
@@ -78,161 +97,18 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
entry = NULL;
} else {
entry->index = index;
- entry->dynamic = index >= num_static;
+ entry->dynamic = index > num_static;
}
return entry;
}
-/**
- * ice_reduce_msix_usage - Reduce usage of MSI-X vectors
- * @pf: board private structure
- * @v_remain: number of remaining MSI-X vectors to be distributed
- *
- * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled.
- * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of
- * remaining vectors.
- */
-static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain)
-{
- int v_rdma;
-
- if (!ice_is_rdma_ena(pf)) {
- pf->num_lan_msix = v_remain;
- return;
- }
-
- /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */
- v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
-
- if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) {
- dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n");
- clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
-
- pf->num_rdma_msix = 0;
- pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
- } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
- (v_remain - v_rdma < v_rdma)) {
- /* Support minimum RDMA and give remaining vectors to LAN MSIX
- */
- pf->num_rdma_msix = ICE_MIN_RDMA_MSIX;
- pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX;
- } else {
- /* Split remaining MSIX with RDMA after accounting for AEQ MSIX
- */
- pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
- ICE_RDMA_NUM_AEQ_MSIX;
- pf->num_lan_msix = v_remain - pf->num_rdma_msix;
- }
-}
-
-/**
- * ice_ena_msix_range - Request a range of MSIX vectors from the OS
- * @pf: board private structure
- *
- * Compute the number of MSIX vectors wanted and request from the OS. Adjust
- * device usage if there are not enough vectors. Return the number of vectors
- * reserved or negative on failure.
- */
-static int ice_ena_msix_range(struct ice_pf *pf)
+#define ICE_RDMA_AEQ_MSIX 1
+static int ice_get_default_msix_amount(struct ice_pf *pf)
{
- int num_cpus, hw_num_msix, v_other, v_wanted, v_actual;
- struct device *dev = ice_pf_to_dev(pf);
- int err;
-
- hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors;
- num_cpus = num_online_cpus();
-
- /* LAN miscellaneous handler */
- v_other = ICE_MIN_LAN_OICR_MSIX;
-
- /* Flow Director */
- if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
- v_other += ICE_FDIR_MSIX;
-
- /* switchdev */
- v_other += ICE_ESWITCH_MSIX;
-
- v_wanted = v_other;
-
- /* LAN traffic */
- pf->num_lan_msix = num_cpus;
- v_wanted += pf->num_lan_msix;
-
- /* RDMA auxiliary driver */
- if (ice_is_rdma_ena(pf)) {
- pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
- v_wanted += pf->num_rdma_msix;
- }
-
- if (v_wanted > hw_num_msix) {
- int v_remain;
-
- dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n",
- v_wanted, hw_num_msix);
-
- if (hw_num_msix < ICE_MIN_MSIX) {
- err = -ERANGE;
- goto exit_err;
- }
-
- v_remain = hw_num_msix - v_other;
- if (v_remain < ICE_MIN_LAN_TXRX_MSIX) {
- v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX;
- v_remain = ICE_MIN_LAN_TXRX_MSIX;
- }
-
- ice_reduce_msix_usage(pf, v_remain);
- v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other;
-
- dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n",
- pf->num_lan_msix);
- if (ice_is_rdma_ena(pf))
- dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n",
- pf->num_rdma_msix);
- }
-
- /* actually reserve the vectors */
- v_actual = pci_alloc_irq_vectors(pf->pdev, ICE_MIN_MSIX, v_wanted,
- PCI_IRQ_MSIX);
- if (v_actual < 0) {
- dev_err(dev, "unable to reserve MSI-X vectors\n");
- err = v_actual;
- goto exit_err;
- }
-
- if (v_actual < v_wanted) {
- dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
- v_wanted, v_actual);
-
- if (v_actual < ICE_MIN_MSIX) {
- /* error if we can't get minimum vectors */
- pci_free_irq_vectors(pf->pdev);
- err = -ERANGE;
- goto exit_err;
- } else {
- int v_remain = v_actual - v_other;
-
- if (v_remain < ICE_MIN_LAN_TXRX_MSIX)
- v_remain = ICE_MIN_LAN_TXRX_MSIX;
-
- ice_reduce_msix_usage(pf, v_remain);
-
- dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
- pf->num_lan_msix);
-
- if (ice_is_rdma_ena(pf))
- dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
- pf->num_rdma_msix);
- }
- }
-
- return v_actual;
-
-exit_err:
- pf->num_rdma_msix = 0;
- pf->num_lan_msix = 0;
- return err;
+ return ICE_MIN_LAN_OICR_MSIX + num_online_cpus() +
+ (test_bit(ICE_FLAG_FD_ENA, pf->flags) ? ICE_FDIR_MSIX : 0) +
+ (ice_is_rdma_ena(pf) ? num_online_cpus() + ICE_RDMA_AEQ_MSIX : 0);
}
/**
@@ -243,6 +119,7 @@ void ice_clear_interrupt_scheme(struct ice_pf *pf)
{
pci_free_irq_vectors(pf->pdev);
ice_deinit_irq_tracker(pf);
+ ice_deinit_virt_irq_tracker(pf);
}
/**
@@ -252,27 +129,38 @@ void ice_clear_interrupt_scheme(struct ice_pf *pf)
int ice_init_interrupt_scheme(struct ice_pf *pf)
{
int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
- int vectors, max_vectors;
+ int vectors;
- vectors = ice_ena_msix_range(pf);
+ /* load default PF MSI-X range */
+ if (!pf->msix.min)
+ pf->msix.min = ICE_MIN_MSIX;
- if (vectors < 0)
- return -ENOMEM;
+ if (!pf->msix.max)
+ pf->msix.max = min(total_vectors,
+ ice_get_default_msix_amount(pf));
+
+ pf->msix.total = total_vectors;
+ pf->msix.rest = total_vectors - pf->msix.max;
if (pci_msix_can_alloc_dyn(pf->pdev))
- max_vectors = total_vectors;
+ vectors = pf->msix.min;
else
- max_vectors = vectors;
+ vectors = pf->msix.max;
+
+ vectors = pci_alloc_irq_vectors(pf->pdev, pf->msix.min, vectors,
+ PCI_IRQ_MSIX);
+ if (vectors < 0)
+ return vectors;
- ice_init_irq_tracker(pf, max_vectors, vectors);
+ ice_init_irq_tracker(pf, pf->msix.max, vectors);
- return 0;
+ return ice_init_virt_irq_tracker(pf, pf->msix.max, pf->msix.rest);
}
/**
* ice_alloc_irq - Allocate new interrupt vector
* @pf: board private structure
- * @dyn_only: force dynamic allocation of the interrupt
+ * @dyn_allowed: allow dynamic allocation of the interrupt
*
* Allocate new interrupt vector for a given owner id.
* return struct msi_map with interrupt details and track
@@ -285,27 +173,22 @@ int ice_init_interrupt_scheme(struct ice_pf *pf)
* interrupt will be allocated with pci_msix_alloc_irq_at.
*
* Some callers may only support dynamically allocated interrupts.
- * This is indicated with dyn_only flag.
+ * This is indicated with dyn_allowed flag.
*
* On failure, return map with negative .index. The caller
* is expected to check returned map index.
*
*/
-struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only)
+struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_allowed)
{
- int sriov_base_vector = pf->sriov_base_vector;
struct msi_map map = { .index = -ENOENT };
struct device *dev = ice_pf_to_dev(pf);
struct ice_irq_entry *entry;
- entry = ice_get_irq_res(pf, dyn_only);
+ entry = ice_get_irq_res(pf, dyn_allowed);
if (!entry)
return map;
- /* fail if we're about to violate SRIOV vectors space */
- if (sriov_base_vector && entry->index >= sriov_base_vector)
- goto exit_free_res;
-
if (pci_msix_can_alloc_dyn(pf->pdev) && entry->dynamic) {
map = pci_msix_alloc_irq_at(pf->pdev, entry->index, NULL);
if (map.index < 0)
@@ -353,26 +236,40 @@ void ice_free_irq(struct ice_pf *pf, struct msi_map map)
}
/**
- * ice_get_max_used_msix_vector - Get the max used interrupt vector
- * @pf: board private structure
+ * ice_virt_get_irqs - get irqs for SR-IOV usacase
+ * @pf: pointer to PF structure
+ * @needed: number of irqs to get
*
- * Return index of maximum used interrupt vectors with respect to the
- * beginning of the MSIX table. Take into account that some interrupts
- * may have been dynamically allocated after MSIX was initially enabled.
+ * This returns the first MSI-X vector index in PF space that is used by this
+ * VF. This index is used when accessing PF relative registers such as
+ * GLINT_VECT2FUNC and GLINT_DYN_CTL.
+ * This will always be the OICR index in the AVF driver so any functionality
+ * using vf->first_vector_idx for queue configuration_id: id of VF which will
+ * use this irqs
*/
-int ice_get_max_used_msix_vector(struct ice_pf *pf)
+int ice_virt_get_irqs(struct ice_pf *pf, u32 needed)
{
- unsigned long start, index, max_idx;
- void *entry;
+ int res = bitmap_find_next_zero_area(pf->virt_irq_tracker.bm,
+ pf->virt_irq_tracker.num_entries,
+ 0, needed, 0);
- /* Treat all preallocated interrupts as used */
- start = pf->irq_tracker.num_static;
- max_idx = start - 1;
+ if (res >= pf->virt_irq_tracker.num_entries)
+ return -ENOENT;
- xa_for_each_start(&pf->irq_tracker.entries, index, entry, start) {
- if (index > max_idx)
- max_idx = index;
- }
+ bitmap_set(pf->virt_irq_tracker.bm, res, needed);
+
+ /* conversion from number in bitmap to global irq index */
+ return res + pf->virt_irq_tracker.base;
+}
- return max_idx;
+/**
+ * ice_virt_free_irqs - free irqs used by the VF
+ * @pf: pointer to PF structure
+ * @index: first index to be free
+ * @irqs: number of irqs to free
+ */
+void ice_virt_free_irqs(struct ice_pf *pf, u32 index, u32 irqs)
+{
+ bitmap_clear(pf->virt_irq_tracker.bm, index - pf->virt_irq_tracker.base,
+ irqs);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_irq.h b/drivers/net/ethernet/intel/ice/ice_irq.h
index f35efc08575e..b2f9dbafd57e 100644
--- a/drivers/net/ethernet/intel/ice/ice_irq.h
+++ b/drivers/net/ethernet/intel/ice/ice_irq.h
@@ -15,11 +15,22 @@ struct ice_irq_tracker {
u16 num_static; /* preallocated entries */
};
+struct ice_virt_irq_tracker {
+ unsigned long *bm; /* bitmap to track irq usage */
+ u32 num_entries;
+ /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the
+ * number of MSIX vectors needed for all SR-IOV VFs from the number of
+ * MSIX vectors allowed on this PF.
+ */
+ u32 base;
+};
+
int ice_init_interrupt_scheme(struct ice_pf *pf);
void ice_clear_interrupt_scheme(struct ice_pf *pf);
struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only);
void ice_free_irq(struct ice_pf *pf, struct msi_map map);
-int ice_get_max_used_msix_vector(struct ice_pf *pf);
+int ice_virt_get_irqs(struct ice_pf *pf, u32 needed);
+void ice_virt_free_irqs(struct ice_pf *pf, u32 index, u32 irqs);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 1ccb572ce285..d2576d606e10 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -10,12 +10,17 @@
#define ICE_LAG_RES_SHARED BIT(14)
#define ICE_LAG_RES_VALID BIT(15)
-#define LACP_TRAIN_PKT_LEN 16
-static const u8 lacp_train_pkt[LACP_TRAIN_PKT_LEN] = { 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0,
- 0x88, 0x09, 0, 0 };
+#define ICE_TRAIN_PKT_LEN 16
+static const u8 lacp_train_pkt[ICE_TRAIN_PKT_LEN] = { 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0x88, 0x09, 0, 0 };
+static const u8 act_act_train_pkt[ICE_TRAIN_PKT_LEN] = { 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0 };
#define ICE_RECIPE_LEN 64
+#define ICE_LAG_SRIOV_CP_RECIPE 10
+
static const u8 ice_dflt_vsi_rcp[ICE_RECIPE_LEN] = {
0x05, 0, 0, 0, 0x20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0x85, 0, 0x01, 0, 0, 0, 0xff, 0xff, 0x08, 0, 0, 0, 0, 0, 0, 0,
@@ -46,10 +51,10 @@ static void ice_lag_set_primary(struct ice_lag *lag)
}
/**
- * ice_lag_set_backup - set PF LAG state to Backup
+ * ice_lag_set_bkup - set PF LAG state to Backup
* @lag: LAG info struct
*/
-static void ice_lag_set_backup(struct ice_lag *lag)
+static void ice_lag_set_bkup(struct ice_lag *lag)
{
struct ice_pf *pf = lag->pf;
@@ -99,6 +104,28 @@ static bool netif_is_same_ice(struct ice_pf *pf, struct net_device *netdev)
}
/**
+ * ice_lag_config_eswitch - configure eswitch to work with LAG
+ * @lag: lag info struct
+ * @netdev: active network interface device struct
+ *
+ * Updates all port representors in eswitch to use @netdev for Tx.
+ *
+ * Configures the netdev to keep dst metadata (also used in representor Tx).
+ * This is required for an uplink without switchdev mode configured.
+ */
+static void ice_lag_config_eswitch(struct ice_lag *lag,
+ struct net_device *netdev)
+{
+ struct ice_repr *repr;
+ unsigned long id;
+
+ xa_for_each(&lag->pf->eswitch.reprs, id, repr)
+ repr->dst->u.port_info.lower_dev = netdev;
+
+ netif_keep_dst(netdev);
+}
+
+/**
* ice_netdev_to_lag - return pointer to associated lag struct from netdev
* @netdev: pointer to net_device struct to query
*/
@@ -210,13 +237,12 @@ ice_lag_cfg_fltr(struct ice_lag *lag, u32 act, u16 recipe_id, u16 *rule_idx,
u8 direction, bool add)
{
struct ice_sw_rule_lkup_rx_tx *s_rule;
+ struct ice_hw *hw = &lag->pf->hw;
u16 s_rule_sz, vsi_num;
- struct ice_hw *hw;
u8 *eth_hdr;
u32 opc;
int err;
- hw = &lag->pf->hw;
vsi_num = ice_get_hw_vsi_num(hw, 0);
s_rule_sz = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule);
@@ -314,26 +340,15 @@ ice_lag_cfg_drop_fltr(struct ice_lag *lag, bool add)
}
/**
- * ice_lag_cfg_pf_fltrs - set filters up for new active port
+ * ice_lag_cfg_pf_fltrs_act_bkup - set filters up for new active port
* @lag: local interfaces lag struct
- * @ptr: opaque data containing notifier event
+ * @bonding_info: netdev event bonding info
*/
static void
-ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr)
+ice_lag_cfg_pf_fltrs_act_bkup(struct ice_lag *lag,
+ struct netdev_bonding_info *bonding_info)
{
- struct netdev_notifier_bonding_info *info;
- struct netdev_bonding_info *bonding_info;
- struct net_device *event_netdev;
- struct device *dev;
-
- event_netdev = netdev_notifier_info_to_dev(ptr);
- /* not for this netdev */
- if (event_netdev != lag->netdev)
- return;
-
- info = (struct netdev_notifier_bonding_info *)ptr;
- bonding_info = &info->bonding_info;
- dev = ice_pf_to_dev(lag->pf);
+ struct device *dev = ice_pf_to_dev(lag->pf);
/* interface not active - remove old default VSI rule */
if (bonding_info->slave.state && lag->pf_rx_rule_id) {
@@ -354,6 +369,105 @@ ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr)
}
/**
+ * ice_lag_cfg_lp_fltr - configure lport filters
+ * @lag: local interface's lag struct
+ * @add: add or remove rule
+ * @cp: control packet only or general PF lport rule
+ */
+static void
+ice_lag_cfg_lp_fltr(struct ice_lag *lag, bool add, bool cp)
+{
+ struct ice_sw_rule_lkup_rx_tx *s_rule;
+ struct ice_vsi *vsi = lag->pf->vsi[0];
+ u16 buf_len, opc;
+
+ buf_len = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, ICE_TRAIN_PKT_LEN);
+ s_rule = kzalloc(buf_len, GFP_KERNEL);
+ if (!s_rule) {
+ netdev_warn(lag->netdev, "-ENOMEM error configuring CP filter\n");
+ return;
+ }
+
+ if (add) {
+ if (cp) {
+ s_rule->recipe_id =
+ cpu_to_le16(ICE_LAG_SRIOV_CP_RECIPE);
+ memcpy(s_rule->hdr_data, lacp_train_pkt,
+ ICE_TRAIN_PKT_LEN);
+ } else {
+ s_rule->recipe_id = cpu_to_le16(lag->act_act_recipe);
+ memcpy(s_rule->hdr_data, act_act_train_pkt,
+ ICE_TRAIN_PKT_LEN);
+ }
+
+ s_rule->src = cpu_to_le16(vsi->port_info->lport);
+ s_rule->act = cpu_to_le32(ICE_FWD_TO_VSI |
+ ICE_SINGLE_ACT_LAN_ENABLE |
+ ICE_SINGLE_ACT_VALID_BIT |
+ FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M,
+ vsi->vsi_num));
+ s_rule->hdr_len = cpu_to_le16(ICE_TRAIN_PKT_LEN);
+ s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
+ opc = ice_aqc_opc_add_sw_rules;
+ } else {
+ opc = ice_aqc_opc_remove_sw_rules;
+ if (cp)
+ s_rule->index = cpu_to_le16(lag->cp_rule_idx);
+ else
+ s_rule->index = cpu_to_le16(lag->act_act_rule_idx);
+ }
+ if (ice_aq_sw_rules(&lag->pf->hw, s_rule, buf_len, 1, opc, NULL)) {
+ netdev_warn(lag->netdev, "Error %s %s rule for aggregate\n",
+ add ? "ADDING" : "REMOVING",
+ cp ? "CONTROL PACKET" : "LPORT");
+ goto err_cp_free;
+ }
+
+ if (add) {
+ if (cp)
+ lag->cp_rule_idx = le16_to_cpu(s_rule->index);
+ else
+ lag->act_act_rule_idx = le16_to_cpu(s_rule->index);
+ } else {
+ if (cp)
+ lag->cp_rule_idx = 0;
+ else
+ lag->act_act_rule_idx = 0;
+ }
+
+err_cp_free:
+ kfree(s_rule);
+}
+
+/**
+ * ice_lag_cfg_pf_fltrs - set filters up for PF traffic
+ * @lag: local interfaces lag struct
+ * @ptr: opaque data containing notifier event
+ */
+static void
+ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr)
+{
+ struct netdev_notifier_bonding_info *info = ptr;
+ struct netdev_bonding_info *bonding_info;
+ struct net_device *event_netdev;
+
+ event_netdev = netdev_notifier_info_to_dev(ptr);
+ if (event_netdev != lag->netdev)
+ return;
+
+ bonding_info = &info->bonding_info;
+
+ if (lag->bond_aa) {
+ if (lag->need_fltr_cfg) {
+ ice_lag_cfg_lp_fltr(lag, true, false);
+ lag->need_fltr_cfg = false;
+ }
+ } else {
+ ice_lag_cfg_pf_fltrs_act_bkup(lag, bonding_info);
+ }
+}
+
+/**
* ice_display_lag_info - print LAG info
* @lag: LAG info struct
*/
@@ -402,12 +516,11 @@ static u16
ice_lag_qbuf_recfg(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *qbuf,
u16 vsi_num, u16 numq, u8 tc)
{
+ struct ice_pf *pf = hw->back;
struct ice_q_ctx *q_ctx;
u16 qid, count = 0;
- struct ice_pf *pf;
int i;
- pf = hw->back;
for (i = 0; i < numq; i++) {
q_ctx = ice_get_lan_q_ctx(hw, vsi_num, tc, i);
if (!q_ctx) {
@@ -577,7 +690,7 @@ ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport,
}
if (ice_aq_cfg_lan_txq(&lag->pf->hw, qbuf, qbuf_size, valq, oldport,
- newport, NULL)) {
+ newport, ICE_AQC_Q_CFG_TC_CHNG, NULL)) {
dev_warn(dev, "Failure to configure queues for LAG failover\n");
goto qbuf_err;
}
@@ -677,54 +790,6 @@ ice_lag_move_single_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport,
}
/**
- * ice_lag_move_new_vf_nodes - Move Tx scheduling nodes for a VF if required
- * @vf: the VF to move Tx nodes for
- *
- * Called just after configuring new VF queues. Check whether the VF Tx
- * scheduling nodes need to be updated to fail over to the active port. If so,
- * move them now.
- */
-void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
-{
- struct ice_lag_netdev_list ndlist;
- u8 pri_port, act_port;
- struct ice_lag *lag;
- struct ice_vsi *vsi;
- struct ice_pf *pf;
-
- vsi = ice_get_vf_vsi(vf);
-
- if (WARN_ON(!vsi))
- return;
-
- if (WARN_ON(vsi->type != ICE_VSI_VF))
- return;
-
- pf = vf->pf;
- lag = pf->lag;
-
- mutex_lock(&pf->lag_mutex);
- if (!lag->bonded)
- goto new_vf_unlock;
-
- pri_port = pf->hw.port_info->lport;
- act_port = lag->active_port;
-
- if (lag->upper_netdev)
- ice_lag_build_netdev_list(lag, &ndlist);
-
- if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) &&
- lag->bonded && lag->primary && pri_port != act_port &&
- !list_empty(lag->netdev_head))
- ice_lag_move_single_vf_nodes(lag, pri_port, act_port, vsi->idx);
-
- ice_lag_destroy_netdev_list(lag, &ndlist);
-
-new_vf_unlock:
- mutex_unlock(&pf->lag_mutex);
-}
-
-/**
* ice_lag_move_vf_nodes - move Tx scheduling nodes for all VFs to new port
* @lag: lag info struct
* @oldport: lport of previous interface
@@ -767,59 +832,60 @@ void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt)
ice_lag_destroy_netdev_list(lag, &ndlist);
}
-#define ICE_LAG_SRIOV_CP_RECIPE 10
-#define ICE_LAG_SRIOV_TRAIN_PKT_LEN 16
-
/**
- * ice_lag_cfg_cp_fltr - configure filter for control packets
- * @lag: local interface's lag struct
- * @add: add or remove rule
+ * ice_lag_prepare_vf_reset - helper to adjust vf lag for reset
+ * @lag: lag struct for interface that owns VF
+ *
+ * Context: must be called with the lag_mutex lock held.
+ *
+ * Return: active lport value or ICE_LAG_INVALID_PORT if nothing moved.
*/
-static void
-ice_lag_cfg_cp_fltr(struct ice_lag *lag, bool add)
+u8 ice_lag_prepare_vf_reset(struct ice_lag *lag)
{
- struct ice_sw_rule_lkup_rx_tx *s_rule = NULL;
- struct ice_vsi *vsi;
- u16 buf_len, opc;
-
- vsi = lag->pf->vsi[0];
-
- buf_len = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule,
- ICE_LAG_SRIOV_TRAIN_PKT_LEN);
- s_rule = kzalloc(buf_len, GFP_KERNEL);
- if (!s_rule) {
- netdev_warn(lag->netdev, "-ENOMEM error configuring CP filter\n");
- return;
- }
-
- if (add) {
- s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
- s_rule->recipe_id = cpu_to_le16(ICE_LAG_SRIOV_CP_RECIPE);
- s_rule->src = cpu_to_le16(vsi->port_info->lport);
- s_rule->act = cpu_to_le32(ICE_FWD_TO_VSI |
- ICE_SINGLE_ACT_LAN_ENABLE |
- ICE_SINGLE_ACT_VALID_BIT |
- FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, vsi->vsi_num));
- s_rule->hdr_len = cpu_to_le16(ICE_LAG_SRIOV_TRAIN_PKT_LEN);
- memcpy(s_rule->hdr_data, lacp_train_pkt, LACP_TRAIN_PKT_LEN);
- opc = ice_aqc_opc_add_sw_rules;
- } else {
- opc = ice_aqc_opc_remove_sw_rules;
- s_rule->index = cpu_to_le16(lag->cp_rule_idx);
- }
- if (ice_aq_sw_rules(&lag->pf->hw, s_rule, buf_len, 1, opc, NULL)) {
- netdev_warn(lag->netdev, "Error %s CP rule for fail-over\n",
- add ? "ADDING" : "REMOVING");
- goto cp_free;
+ u8 pri_prt, act_prt;
+
+ if (lag && lag->bonded && lag->primary && lag->upper_netdev) {
+ if (!lag->bond_aa) {
+ pri_prt = lag->pf->hw.port_info->lport;
+ act_prt = lag->active_port;
+ if (act_prt != pri_prt &&
+ act_prt != ICE_LAG_INVALID_PORT) {
+ ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
+ return act_prt;
+ }
+ } else {
+ if (lag->port_bitmap & ICE_LAGS_M) {
+ lag->port_bitmap &= ~ICE_LAGS_M;
+ ice_lag_aa_failover(lag, ICE_LAGP_IDX, NULL);
+ lag->port_bitmap |= ICE_LAGS_M;
+ }
+ }
}
- if (add)
- lag->cp_rule_idx = le16_to_cpu(s_rule->index);
- else
- lag->cp_rule_idx = 0;
+ return ICE_LAG_INVALID_PORT;
+}
-cp_free:
- kfree(s_rule);
+/**
+ * ice_lag_complete_vf_reset - helper for lag after reset
+ * @lag: lag struct for primary interface
+ * @act_prt: which port should be active for lag
+ *
+ * Context: must be called while holding the lag_mutex.
+ */
+void ice_lag_complete_vf_reset(struct ice_lag *lag, u8 act_prt)
+{
+ u8 pri_prt;
+
+ if (lag && lag->bonded && lag->primary) {
+ if (!lag->bond_aa) {
+ pri_prt = lag->pf->hw.port_info->lport;
+ if (act_prt != ICE_LAG_INVALID_PORT)
+ ice_lag_move_vf_nodes_cfg(lag, pri_prt,
+ act_prt);
+ } else {
+ ice_lag_aa_failover(lag, ICE_LAGS_IDX, NULL);
+ }
+ }
}
/**
@@ -831,13 +897,12 @@ cp_free:
*/
static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
{
- struct netdev_notifier_bonding_info *info;
+ struct netdev_notifier_bonding_info *info = ptr;
struct netdev_bonding_info *bonding_info;
struct net_device *event_netdev;
const char *lag_netdev_name;
event_netdev = netdev_notifier_info_to_dev(ptr);
- info = ptr;
lag_netdev_name = netdev_name(lag->netdev);
bonding_info = &info->bonding_info;
@@ -855,7 +920,7 @@ static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
}
if (bonding_info->slave.state)
- ice_lag_set_backup(lag);
+ ice_lag_set_bkup(lag);
else
ice_lag_set_primary(lag);
@@ -864,6 +929,295 @@ lag_out:
}
/**
+ * ice_lag_aa_qbuf_recfg - fill a single queue buffer for recfg cmd
+ * @hw: HW struct that contains the queue context
+ * @qbuf: pointer to single queue buffer
+ * @vsi_num: index of the VF VSI in PF space
+ * @qnum: queue index
+ *
+ * Return: Zero on success, error code on failure.
+ */
+static int
+ice_lag_aa_qbuf_recfg(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *qbuf,
+ u16 vsi_num, int qnum)
+{
+ struct ice_pf *pf = hw->back;
+ struct ice_q_ctx *q_ctx;
+ u16 q_id;
+
+ q_ctx = ice_get_lan_q_ctx(hw, vsi_num, 0, qnum);
+ if (!q_ctx) {
+ dev_dbg(ice_hw_to_dev(hw), "LAG queue %d no Q context\n", qnum);
+ return -ENOENT;
+ }
+
+ if (q_ctx->q_teid == ICE_INVAL_TEID) {
+ dev_dbg(ice_hw_to_dev(hw), "LAG queue %d INVAL TEID\n", qnum);
+ return -EINVAL;
+ }
+
+ if (q_ctx->q_handle == ICE_INVAL_Q_HANDLE) {
+ dev_dbg(ice_hw_to_dev(hw), "LAG queue %d INVAL Q HANDLE\n", qnum);
+ return -EINVAL;
+ }
+
+ q_id = pf->vsi[vsi_num]->txq_map[q_ctx->q_handle];
+ qbuf->queue_info[0].q_handle = cpu_to_le16(q_id);
+ qbuf->queue_info[0].tc = 0;
+ qbuf->queue_info[0].q_teid = cpu_to_le32(q_ctx->q_teid);
+
+ return 0;
+}
+
+/**
+ * ice_lag_aa_move_vf_qs - Move some/all VF queues to destination
+ * @lag: primary interface's lag struct
+ * @dest: index of destination port
+ * @vsi_num: index of VF VSI in PF space
+ * @all: if true move all queues to destination
+ * @odd: VF wide q indicator for odd/even
+ * @e_pf: PF struct for the event interface
+ *
+ * the parameter "all" is to control whether we are splitting the queues
+ * between two interfaces or moving them all to the destination interface
+ */
+static void ice_lag_aa_move_vf_qs(struct ice_lag *lag, u8 dest, u16 vsi_num,
+ bool all, bool *odd, struct ice_pf *e_pf)
+{
+ DEFINE_RAW_FLEX(struct ice_aqc_cfg_txqs_buf, qbuf, queue_info, 1);
+ struct ice_hw *old_hw, *new_hw, *pri_hw, *sec_hw;
+ struct device *dev = ice_pf_to_dev(lag->pf);
+ struct ice_vsi_ctx *pv_ctx, *sv_ctx;
+ struct ice_lag_netdev_list ndlist;
+ u16 num_q, qbuf_size, sec_vsi_num;
+ u8 pri_lport, sec_lport;
+ u32 pvf_teid, svf_teid;
+ u16 vf_id;
+
+ vf_id = lag->pf->vsi[vsi_num]->vf->vf_id;
+ /* If sec_vf[] not defined, then no second interface to share with */
+ if (lag->sec_vf[vf_id])
+ sec_vsi_num = lag->sec_vf[vf_id]->idx;
+ else
+ return;
+
+ pri_lport = lag->bond_lport_pri;
+ sec_lport = lag->bond_lport_sec;
+
+ if (pri_lport == ICE_LAG_INVALID_PORT ||
+ sec_lport == ICE_LAG_INVALID_PORT)
+ return;
+
+ if (!e_pf)
+ ice_lag_build_netdev_list(lag, &ndlist);
+
+ pri_hw = &lag->pf->hw;
+ if (e_pf && lag->pf != e_pf)
+ sec_hw = &e_pf->hw;
+ else
+ sec_hw = ice_lag_find_hw_by_lport(lag, sec_lport);
+
+ if (!pri_hw || !sec_hw)
+ return;
+
+ if (dest == ICE_LAGP_IDX) {
+ struct ice_vsi *vsi;
+
+ vsi = ice_get_main_vsi(lag->pf);
+ if (!vsi)
+ return;
+
+ old_hw = sec_hw;
+ new_hw = pri_hw;
+ ice_lag_config_eswitch(lag, vsi->netdev);
+ } else {
+ struct ice_pf *sec_pf = sec_hw->back;
+ struct ice_vsi *vsi;
+
+ vsi = ice_get_main_vsi(sec_pf);
+ if (!vsi)
+ return;
+
+ old_hw = pri_hw;
+ new_hw = sec_hw;
+ ice_lag_config_eswitch(lag, vsi->netdev);
+ }
+
+ pv_ctx = ice_get_vsi_ctx(pri_hw, vsi_num);
+ if (!pv_ctx) {
+ dev_warn(dev, "Unable to locate primary VSI %d context for LAG failover\n",
+ vsi_num);
+ return;
+ }
+
+ sv_ctx = ice_get_vsi_ctx(sec_hw, sec_vsi_num);
+ if (!sv_ctx) {
+ dev_warn(dev, "Unable to locate secondary VSI %d context for LAG failover\n",
+ vsi_num);
+ return;
+ }
+
+ num_q = pv_ctx->num_lan_q_entries[0];
+ qbuf_size = __struct_size(qbuf);
+
+ /* Suspend traffic for primary VSI VF */
+ pvf_teid = le32_to_cpu(pv_ctx->sched.vsi_node[0]->info.node_teid);
+ ice_sched_suspend_resume_elems(pri_hw, 1, &pvf_teid, true);
+
+ /* Suspend traffic for secondary VSI VF */
+ svf_teid = le32_to_cpu(sv_ctx->sched.vsi_node[0]->info.node_teid);
+ ice_sched_suspend_resume_elems(sec_hw, 1, &svf_teid, true);
+
+ for (int i = 0; i < num_q; i++) {
+ struct ice_sched_node *n_prt, *q_node, *parent;
+ struct ice_port_info *pi, *new_pi;
+ struct ice_vsi_ctx *src_ctx;
+ struct ice_sched_node *p;
+ struct ice_q_ctx *q_ctx;
+ u16 dst_vsi_num;
+
+ pi = old_hw->port_info;
+ new_pi = new_hw->port_info;
+
+ *odd = !(*odd);
+ if ((dest == ICE_LAGP_IDX && *odd && !all) ||
+ (dest == ICE_LAGS_IDX && !(*odd) && !all) ||
+ lag->q_home[vf_id][i] == dest)
+ continue;
+
+ if (dest == ICE_LAGP_IDX)
+ dst_vsi_num = vsi_num;
+ else
+ dst_vsi_num = sec_vsi_num;
+
+ n_prt = ice_sched_get_free_qparent(new_hw->port_info,
+ dst_vsi_num, 0,
+ ICE_SCHED_NODE_OWNER_LAN);
+ if (!n_prt)
+ continue;
+
+ q_ctx = ice_get_lan_q_ctx(pri_hw, vsi_num, 0, i);
+ if (!q_ctx)
+ continue;
+
+ if (dest == ICE_LAGP_IDX)
+ src_ctx = sv_ctx;
+ else
+ src_ctx = pv_ctx;
+
+ q_node = ice_sched_find_node_by_teid(src_ctx->sched.vsi_node[0],
+ q_ctx->q_teid);
+ if (!q_node)
+ continue;
+
+ qbuf->src_parent_teid = q_node->info.parent_teid;
+ qbuf->dst_parent_teid = n_prt->info.node_teid;
+
+ /* Move the node in the HW/FW */
+ if (ice_lag_aa_qbuf_recfg(pri_hw, qbuf, vsi_num, i))
+ continue;
+
+ if (dest == ICE_LAGP_IDX)
+ ice_aq_cfg_lan_txq(pri_hw, qbuf, qbuf_size, 1,
+ sec_lport, pri_lport,
+ ICE_AQC_Q_CFG_MOVE_TC_CHNG,
+ NULL);
+ else
+ ice_aq_cfg_lan_txq(pri_hw, qbuf, qbuf_size, 1,
+ pri_lport, sec_lport,
+ ICE_AQC_Q_CFG_MOVE_TC_CHNG,
+ NULL);
+
+ /* Move the node in the SW */
+ parent = q_node->parent;
+ if (!parent)
+ continue;
+
+ for (int n = 0; n < parent->num_children; n++) {
+ int j;
+
+ if (parent->children[n] != q_node)
+ continue;
+
+ for (j = n + 1; j < parent->num_children;
+ j++) {
+ parent->children[j - 1] =
+ parent->children[j];
+ }
+ parent->children[j] = NULL;
+ parent->num_children--;
+ break;
+ }
+
+ p = pi->sib_head[0][q_node->tx_sched_layer];
+ while (p) {
+ if (p->sibling == q_node) {
+ p->sibling = q_node->sibling;
+ break;
+ }
+ p = p->sibling;
+ }
+
+ if (pi->sib_head[0][q_node->tx_sched_layer] == q_node)
+ pi->sib_head[0][q_node->tx_sched_layer] =
+ q_node->sibling;
+
+ q_node->parent = n_prt;
+ q_node->info.parent_teid = n_prt->info.node_teid;
+ q_node->sibling = NULL;
+ p = new_pi->sib_head[0][q_node->tx_sched_layer];
+ if (p) {
+ while (p) {
+ if (!p->sibling) {
+ p->sibling = q_node;
+ break;
+ }
+ p = p->sibling;
+ }
+ } else {
+ new_pi->sib_head[0][q_node->tx_sched_layer] =
+ q_node;
+ }
+
+ n_prt->children[n_prt->num_children++] = q_node;
+ lag->q_home[vf_id][i] = dest;
+ }
+
+ ice_sched_suspend_resume_elems(pri_hw, 1, &pvf_teid, false);
+ ice_sched_suspend_resume_elems(sec_hw, 1, &svf_teid, false);
+
+ if (!e_pf)
+ ice_lag_destroy_netdev_list(lag, &ndlist);
+}
+
+/**
+ * ice_lag_aa_failover - move VF queues in A/A mode
+ * @lag: primary lag struct
+ * @dest: index of destination port
+ * @e_pf: PF struct for event port
+ */
+void ice_lag_aa_failover(struct ice_lag *lag, u8 dest, struct ice_pf *e_pf)
+{
+ bool odd = true, all = false;
+ int i;
+
+ /* Primary can be a target if down (cleanup), but secondary can't */
+ if (dest == ICE_LAGS_IDX && !(lag->port_bitmap & ICE_LAGS_M))
+ return;
+
+ /* Move all queues to a destination if only one port is active,
+ * or no ports are active and dest is primary.
+ */
+ if ((lag->port_bitmap ^ (ICE_LAGP_M | ICE_LAGS_M)) ||
+ (!lag->port_bitmap && dest == ICE_LAGP_IDX))
+ all = true;
+
+ ice_for_each_vsi(lag->pf, i)
+ if (lag->pf->vsi[i] && lag->pf->vsi[i]->type == ICE_VSI_VF)
+ ice_lag_aa_move_vf_qs(lag, dest, i, all, &odd, e_pf);
+}
+
+/**
* ice_lag_reclaim_vf_tc - move scheduling nodes back to primary interface
* @lag: primary interface lag struct
* @src_hw: HW struct current node location
@@ -879,13 +1233,12 @@ ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
struct ice_aqc_cfg_txqs_buf *qbuf;
+ struct ice_hw *hw = &lag->pf->hw;
struct ice_sched_node *n_prt;
__le32 teid, parent_teid;
struct ice_vsi_ctx *ctx;
- struct ice_hw *hw;
u32 tmp_teid;
- hw = &lag->pf->hw;
ctx = ice_get_vsi_ctx(hw, vsi_num);
if (!ctx) {
dev_warn(dev, "Unable to locate VSI context for LAG reclaim\n");
@@ -926,7 +1279,7 @@ ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
if (ice_aq_cfg_lan_txq(hw, qbuf, qbuf_size, numq,
src_hw->port_info->lport, hw->port_info->lport,
- NULL)) {
+ ICE_AQC_Q_CFG_TC_CHNG, NULL)) {
dev_warn(dev, "Failure to configure queues for LAG failover\n");
goto reclaim_qerr;
}
@@ -997,14 +1350,15 @@ static void ice_lag_link(struct ice_lag *lag)
lag->bonded = true;
lag->role = ICE_LAG_UNSET;
+ lag->need_fltr_cfg = true;
netdev_info(lag->netdev, "Shared SR-IOV resources in bond are active\n");
}
/**
- * ice_lag_unlink - handle unlink event
+ * ice_lag_act_bkup_unlink - handle unlink event for A/B bond
* @lag: LAG info struct
*/
-static void ice_lag_unlink(struct ice_lag *lag)
+static void ice_lag_act_bkup_unlink(struct ice_lag *lag)
{
u8 pri_port, act_port, loc_port;
struct ice_pf *pf = lag->pf;
@@ -1021,6 +1375,9 @@ static void ice_lag_unlink(struct ice_lag *lag)
ice_lag_move_vf_nodes(lag, act_port, pri_port);
lag->primary = false;
lag->active_port = ICE_LAG_INVALID_PORT;
+
+ /* Config primary's eswitch back to normal operation. */
+ ice_lag_config_eswitch(lag, lag->netdev);
} else {
struct ice_lag *primary_lag;
@@ -1037,10 +1394,32 @@ static void ice_lag_unlink(struct ice_lag *lag)
}
}
}
+}
- lag->bonded = false;
- lag->role = ICE_LAG_NONE;
- lag->upper_netdev = NULL;
+/**
+ * ice_lag_aa_unlink - handle unlink event for Active-Active bond
+ * @lag: LAG info struct
+ */
+static void ice_lag_aa_unlink(struct ice_lag *lag)
+{
+ struct ice_lag *pri_lag;
+
+ if (lag->primary) {
+ pri_lag = lag;
+ lag->port_bitmap &= ~ICE_LAGP_M;
+ } else {
+ pri_lag = ice_lag_find_primary(lag);
+ if (pri_lag)
+ pri_lag->port_bitmap &= ICE_LAGS_M;
+ }
+
+ if (pri_lag) {
+ ice_lag_aa_failover(pri_lag, ICE_LAGP_IDX, lag->pf);
+ if (lag->primary)
+ pri_lag->bond_lport_pri = ICE_LAG_INVALID_PORT;
+ else
+ pri_lag->bond_lport_sec = ICE_LAG_INVALID_PORT;
+ }
}
/**
@@ -1056,10 +1435,20 @@ static void ice_lag_link_unlink(struct ice_lag *lag, void *ptr)
if (netdev != lag->netdev)
return;
- if (info->linking)
+ if (info->linking) {
ice_lag_link(lag);
- else
- ice_lag_unlink(lag);
+ } else {
+ if (lag->bond_aa)
+ ice_lag_aa_unlink(lag);
+ else
+ ice_lag_act_bkup_unlink(lag);
+
+ lag->bonded = false;
+ lag->role = ICE_LAG_NONE;
+ lag->upper_netdev = NULL;
+ lag->bond_aa = false;
+ lag->need_fltr_cfg = false;
+ }
}
/**
@@ -1077,7 +1466,7 @@ ice_lag_set_swid(u16 primary_swid, struct ice_lag *local_lag,
{
struct ice_aqc_alloc_free_res_elem *buf;
struct ice_aqc_set_port_params *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 buf_len, swid;
int status, i;
@@ -1125,7 +1514,7 @@ ice_lag_set_swid(u16 primary_swid, struct ice_lag *local_lag,
else
swid = local_lag->pf->hw.port_info->sw_id;
- cmd = &desc.params.set_port_params;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
cmd->swid = cpu_to_le16(ICE_AQC_PORT_SWID_VALID | swid);
@@ -1157,11 +1546,8 @@ ice_lag_set_swid(u16 primary_swid, struct ice_lag *local_lag,
*/
static void ice_lag_primary_swid(struct ice_lag *lag, bool link)
{
- struct ice_hw *hw;
- u16 swid;
-
- hw = &lag->pf->hw;
- swid = hw->port_info->sw_id;
+ struct ice_hw *hw = &lag->pf->hw;
+ u16 swid = hw->port_info->sw_id;
if (ice_share_res(hw, ICE_AQC_RES_TYPE_SWID, link, swid))
dev_warn(ice_pf_to_dev(lag->pf), "Failure to set primary interface shared status\n");
@@ -1174,12 +1560,10 @@ static void ice_lag_primary_swid(struct ice_lag *lag, bool link)
*/
static void ice_lag_add_prune_list(struct ice_lag *lag, struct ice_pf *event_pf)
{
- u16 num_vsi, rule_buf_sz, vsi_list_id, event_vsi_num, prim_vsi_idx;
- struct ice_sw_rule_vsi_list *s_rule = NULL;
+ u16 rule_buf_sz, vsi_list_id, event_vsi_num, prim_vsi_idx, num_vsi = 1;
+ struct ice_sw_rule_vsi_list *s_rule;
struct device *dev;
- num_vsi = 1;
-
dev = ice_pf_to_dev(lag->pf);
event_vsi_num = event_pf->vsi[0]->vsi_num;
prim_vsi_idx = lag->pf->vsi[0]->idx;
@@ -1215,12 +1599,10 @@ static void ice_lag_add_prune_list(struct ice_lag *lag, struct ice_pf *event_pf)
*/
static void ice_lag_del_prune_list(struct ice_lag *lag, struct ice_pf *event_pf)
{
- u16 num_vsi, vsi_num, vsi_idx, rule_buf_sz, vsi_list_id;
- struct ice_sw_rule_vsi_list *s_rule = NULL;
+ u16 vsi_num, vsi_idx, rule_buf_sz, vsi_list_id, num_vsi = 1;
+ struct ice_sw_rule_vsi_list *s_rule;
struct device *dev;
- num_vsi = 1;
-
dev = ice_pf_to_dev(lag->pf);
vsi_num = event_pf->vsi[0]->vsi_num;
vsi_idx = lag->pf->vsi[0]->idx;
@@ -1268,6 +1650,11 @@ static void ice_lag_init_feature_support_flag(struct ice_pf *pf)
ice_set_feature_support(pf, ICE_F_SRIOV_LAG);
else
ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
+
+ if (caps->sriov_aa_lag && ice_pkg_has_lport_extract(&pf->hw))
+ ice_set_feature_support(pf, ICE_F_SRIOV_AA_LAG);
+ else
+ ice_clear_feature_support(pf, ICE_F_SRIOV_AA_LAG);
}
/**
@@ -1277,11 +1664,10 @@ static void ice_lag_init_feature_support_flag(struct ice_pf *pf)
*/
static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
{
- struct netdev_notifier_changeupper_info *info;
+ struct netdev_notifier_changeupper_info *info = ptr;
struct ice_lag *primary_lag;
struct net_device *netdev;
- info = ptr;
netdev = netdev_notifier_info_to_dev(ptr);
/* not for this netdev */
@@ -1296,25 +1682,47 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
*/
if (!primary_lag) {
lag->primary = true;
+ if (!ice_is_switchdev_running(lag->pf))
+ return;
+
/* Configure primary's SWID to be shared */
ice_lag_primary_swid(lag, true);
primary_lag = lag;
+ lag->bond_lport_pri = lag->pf->hw.port_info->lport;
+ lag->bond_lport_sec = ICE_LAG_INVALID_PORT;
+ lag->port_bitmap = 0;
} else {
u16 swid;
+ if (!ice_is_switchdev_running(primary_lag->pf))
+ return;
+
swid = primary_lag->pf->hw.port_info->sw_id;
ice_lag_set_swid(swid, lag, true);
ice_lag_add_prune_list(primary_lag, lag->pf);
- ice_lag_cfg_drop_fltr(lag, true);
+ primary_lag->bond_lport_sec =
+ lag->pf->hw.port_info->lport;
}
/* add filter for primary control packets */
- ice_lag_cfg_cp_fltr(lag, true);
+ ice_lag_cfg_lp_fltr(lag, true, true);
} else {
if (!primary_lag && lag->primary)
primary_lag = lag;
+ if (primary_lag) {
+ for (int i = 0; i < ICE_MAX_SRIOV_VFS; i++) {
+ if (primary_lag->sec_vf[i]) {
+ ice_vsi_release(primary_lag->sec_vf[i]);
+ primary_lag->sec_vf[i] = NULL;
+ }
+ }
+ }
+
if (!lag->primary) {
ice_lag_set_swid(0, lag, false);
+ if (primary_lag)
+ primary_lag->bond_lport_sec =
+ ICE_LAG_INVALID_PORT;
} else {
if (primary_lag && lag->primary) {
ice_lag_primary_swid(lag, false);
@@ -1322,7 +1730,7 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
}
}
/* remove filter for control packets */
- ice_lag_cfg_cp_fltr(lag, false);
+ ice_lag_cfg_lp_fltr(lag, false, !lag->bond_aa);
}
}
@@ -1335,7 +1743,7 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
*/
static void ice_lag_monitor_link(struct ice_lag *lag, void *ptr)
{
- struct netdev_notifier_changeupper_info *info;
+ struct netdev_notifier_changeupper_info *info = ptr;
struct ice_hw *prim_hw, *active_hw;
struct net_device *event_netdev;
struct ice_pf *pf;
@@ -1348,19 +1756,34 @@ static void ice_lag_monitor_link(struct ice_lag *lag, void *ptr)
if (!netif_is_same_ice(lag->pf, event_netdev))
return;
+ if (info->upper_dev != lag->upper_netdev)
+ return;
+
+ if (info->linking)
+ return;
+
pf = lag->pf;
prim_hw = &pf->hw;
prim_port = prim_hw->port_info->lport;
- info = (struct netdev_notifier_changeupper_info *)ptr;
- if (info->upper_dev != lag->upper_netdev)
- return;
-
- if (!info->linking) {
- /* Since there are only two interfaces allowed in SRIOV+LAG, if
- * one port is leaving, then nodes need to be on primary
- * interface.
- */
+ /* Since there are only two interfaces allowed in SRIOV+LAG, if
+ * one port is leaving, then nodes need to be on primary
+ * interface.
+ */
+ if (lag->bond_aa) {
+ struct ice_netdev_priv *e_ndp;
+ struct ice_pf *e_pf;
+
+ e_ndp = netdev_priv(event_netdev);
+ e_pf = e_ndp->vsi->back;
+
+ if (lag->bond_lport_pri != ICE_LAG_INVALID_PORT &&
+ lag->port_bitmap & ICE_LAGS_M) {
+ lag->port_bitmap &= ~ICE_LAGS_M;
+ ice_lag_aa_failover(lag, ICE_LAGP_IDX, e_pf);
+ lag->bond_lport_sec = ICE_LAG_INVALID_PORT;
+ }
+ } else {
if (prim_port != lag->active_port &&
lag->active_port != ICE_LAG_INVALID_PORT) {
active_hw = ice_lag_find_hw_by_lport(lag,
@@ -1372,45 +1795,32 @@ static void ice_lag_monitor_link(struct ice_lag *lag, void *ptr)
}
/**
- * ice_lag_monitor_active - main PF keep track of which port is active
+ * ice_lag_monitor_act_bkup - keep track of which port is active in A/B LAG
* @lag: lag info struct
- * @ptr: opaque data containing notifier event
+ * @b_info: bonding info
+ * @event_netdev: net_device got target netdev
*
* This function is for the primary PF to monitor changes in which port is
* active and handle changes for SRIOV VF functionality
*/
-static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
+static void ice_lag_monitor_act_bkup(struct ice_lag *lag,
+ struct netdev_bonding_info *b_info,
+ struct net_device *event_netdev)
{
- struct net_device *event_netdev, *event_upper;
- struct netdev_notifier_bonding_info *info;
- struct netdev_bonding_info *bonding_info;
struct ice_netdev_priv *event_np;
struct ice_pf *pf, *event_pf;
u8 prim_port, event_port;
- if (!lag->primary)
- return;
-
pf = lag->pf;
if (!pf)
return;
- event_netdev = netdev_notifier_info_to_dev(ptr);
- rcu_read_lock();
- event_upper = netdev_master_upper_dev_get_rcu(event_netdev);
- rcu_read_unlock();
- if (!netif_is_ice(event_netdev) || event_upper != lag->upper_netdev)
- return;
-
event_np = netdev_priv(event_netdev);
event_pf = event_np->vsi->back;
event_port = event_pf->hw.port_info->lport;
prim_port = pf->hw.port_info->lport;
- info = (struct netdev_notifier_bonding_info *)ptr;
- bonding_info = &info->bonding_info;
-
- if (!bonding_info->slave.state) {
+ if (!b_info->slave.state) {
/* if no port is currently active, then nodes and filters exist
* on primary port, check if we need to move them
*/
@@ -1419,6 +1829,7 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
ice_lag_move_vf_nodes(lag, prim_port,
event_port);
lag->active_port = event_port;
+ ice_lag_config_eswitch(lag, event_netdev);
return;
}
@@ -1428,6 +1839,7 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
/* new active port */
ice_lag_move_vf_nodes(lag, lag->active_port, event_port);
lag->active_port = event_port;
+ ice_lag_config_eswitch(lag, event_netdev);
} else {
/* port not set as currently active (e.g. new active port
* has already claimed the nodes and filters
@@ -1445,6 +1857,128 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
}
/**
+ * ice_lag_aa_clear_spoof - adjust the placeholder VSI spoofing for A/A LAG
+ * @vsi: placeholder VSI to adjust
+ */
+static void ice_lag_aa_clear_spoof(struct ice_vsi *vsi)
+{
+ ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
+}
+
+/**
+ * ice_lag_monitor_act_act - Keep track of active ports in A/A LAG
+ * @lag: lag struct for primary interface
+ * @b_info: bonding_info for event
+ * @event_netdev: net_device for target netdev
+ */
+static void ice_lag_monitor_act_act(struct ice_lag *lag,
+ struct netdev_bonding_info *b_info,
+ struct net_device *event_netdev)
+{
+ struct ice_netdev_priv *event_np;
+ u8 prim_port, event_port;
+ struct ice_pf *event_pf;
+
+ event_np = netdev_priv(event_netdev);
+ event_pf = event_np->vsi->back;
+ event_port = event_pf->hw.port_info->lport;
+ prim_port = lag->pf->hw.port_info->lport;
+
+ if (b_info->slave.link == BOND_LINK_UP) {
+ /* Port is coming up */
+ if (prim_port == event_port) {
+ /* Processing event for primary interface */
+ if (lag->bond_lport_pri == ICE_LAG_INVALID_PORT)
+ return;
+
+ if (!(lag->port_bitmap & ICE_LAGP_M)) {
+ /* Primary port was not marked up before, move
+ * some|all VF queues to it and mark as up
+ */
+ lag->port_bitmap |= ICE_LAGP_M;
+ ice_lag_aa_failover(lag, ICE_LAGP_IDX, event_pf);
+ }
+ } else {
+ if (lag->bond_lport_sec == ICE_LAG_INVALID_PORT)
+ return;
+
+ /* Create placeholder VSIs on secondary PF.
+ * The placeholder is necessary so that we have
+ * an element that represents the VF on the secondary
+ * interface's scheduling tree. This will be a tree
+ * root for scheduling nodes when they are moved to
+ * the secondary interface.
+ */
+ if (!lag->sec_vf[0]) {
+ struct ice_vsi_cfg_params params = {};
+ struct ice_vsi *nvsi;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ params.type = ICE_VSI_VF;
+ params.port_info = event_pf->hw.port_info;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ ice_for_each_vf(lag->pf, bkt, vf) {
+ params.vf = vf;
+ nvsi = ice_vsi_setup(event_pf,
+ &params);
+ ice_lag_aa_clear_spoof(nvsi);
+ lag->sec_vf[vf->vf_id] = nvsi;
+ }
+ }
+
+ if (!(lag->port_bitmap & ICE_LAGS_M)) {
+ /* Secondary port was not marked up before,
+ * move some|all VF queues to it and mark as up
+ */
+ lag->port_bitmap |= ICE_LAGS_M;
+ ice_lag_aa_failover(lag, ICE_LAGS_IDX, event_pf);
+ }
+ }
+ } else {
+ /* Port is going down */
+ if (prim_port == event_port) {
+ lag->port_bitmap &= ~ICE_LAGP_M;
+ ice_lag_aa_failover(lag, ICE_LAGS_IDX, event_pf);
+ } else {
+ lag->port_bitmap &= ~ICE_LAGS_M;
+ ice_lag_aa_failover(lag, ICE_LAGP_IDX, event_pf);
+ }
+ }
+}
+
+/**
+ * ice_lag_monitor_info - Calls relevant A/A or A/B monitoring function
+ * @lag: lag info struct
+ * @ptr: opaque data containing notifier event
+ *
+ * This function is for the primary PF to monitor changes in which port is
+ * active and handle changes for SRIOV VF functionality
+ */
+static void ice_lag_monitor_info(struct ice_lag *lag, void *ptr)
+{
+ struct netdev_notifier_bonding_info *info = ptr;
+ struct net_device *event_netdev, *event_upper;
+ struct netdev_bonding_info *bonding_info;
+
+ if (!lag->primary)
+ return;
+
+ event_netdev = netdev_notifier_info_to_dev(ptr);
+ bonding_info = &info->bonding_info;
+ rcu_read_lock();
+ event_upper = netdev_master_upper_dev_get_rcu(event_netdev);
+ rcu_read_unlock();
+ if (!netif_is_ice(event_netdev) || event_upper != lag->upper_netdev)
+ return;
+
+ if (lag->bond_aa)
+ ice_lag_monitor_act_act(lag, bonding_info, event_netdev);
+ else
+ ice_lag_monitor_act_bkup(lag, bonding_info, event_netdev);
+}
+/**
* ice_lag_chk_comp - evaluate bonded interface for feature support
* @lag: lag info struct
* @ptr: opaque data for netdev event info
@@ -1452,13 +1986,21 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
static bool
ice_lag_chk_comp(struct ice_lag *lag, void *ptr)
{
+ struct netdev_notifier_bonding_info *info = ptr;
struct net_device *event_netdev, *event_upper;
- struct netdev_notifier_bonding_info *info;
struct netdev_bonding_info *bonding_info;
struct list_head *tmp;
struct device *dev;
int count = 0;
+ /* All members need to know if bond A/A or A/B */
+ bonding_info = &info->bonding_info;
+ lag->bond_mode = bonding_info->master.bond_mode;
+ if (lag->bond_mode != BOND_MODE_ACTIVEBACKUP)
+ lag->bond_aa = true;
+ else
+ lag->bond_aa = false;
+
if (!lag->primary)
return true;
@@ -1479,13 +2021,9 @@ ice_lag_chk_comp(struct ice_lag *lag, void *ptr)
return false;
}
- info = (struct netdev_notifier_bonding_info *)ptr;
- bonding_info = &info->bonding_info;
- lag->bond_mode = bonding_info->master.bond_mode;
- if (lag->bond_mode != BOND_MODE_ACTIVEBACKUP) {
- dev_info(dev, "Bond Mode not ACTIVE-BACKUP - VF LAG disabled\n");
+ if (lag->bond_aa && !ice_is_feature_supported(lag->pf,
+ ICE_F_SRIOV_AA_LAG))
return false;
- }
list_for_each(tmp, lag->netdev_head) {
struct ice_dcbx_cfg *dcb_cfg, *peer_dcb_cfg;
@@ -1589,10 +2127,9 @@ ice_lag_unregister(struct ice_lag *lag, struct net_device *event_netdev)
static void
ice_lag_monitor_rdma(struct ice_lag *lag, void *ptr)
{
- struct netdev_notifier_changeupper_info *info;
+ struct netdev_notifier_changeupper_info *info = ptr;
struct net_device *netdev;
- info = ptr;
netdev = netdev_notifier_info_to_dev(ptr);
if (netdev != lag->netdev)
@@ -1640,12 +2177,29 @@ static void ice_lag_chk_disabled_bond(struct ice_lag *lag, void *ptr)
*/
static void ice_lag_disable_sriov_bond(struct ice_lag *lag)
{
- struct ice_netdev_priv *np;
- struct ice_pf *pf;
+ struct ice_pf *pf = ice_netdev_to_pf(lag->netdev);
- np = netdev_priv(lag->netdev);
- pf = np->vsi->back;
ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
+ ice_clear_feature_support(pf, ICE_F_SRIOV_AA_LAG);
+}
+
+/**
+ * ice_lag_preset_drop_fltr - preset drop filter for A/B bonds
+ * @lag: local lag struct
+ * @ptr: opaque data containing event
+ *
+ * Sets the initial drop filter for secondary interface in an
+ * active-backup bond
+ */
+static void ice_lag_preset_drop_fltr(struct ice_lag *lag, void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+
+ if (netdev != lag->netdev || lag->primary || !lag->need_fltr_cfg)
+ return;
+
+ ice_lag_cfg_drop_fltr(lag, true);
+ lag->need_fltr_cfg = false;
}
/**
@@ -1686,10 +2240,12 @@ static void ice_lag_process_event(struct work_struct *work)
ice_lag_unregister(lag_work->lag, netdev);
goto lag_cleanup;
}
- ice_lag_monitor_active(lag_work->lag,
- &lag_work->info.bonding_info);
ice_lag_cfg_pf_fltrs(lag_work->lag,
&lag_work->info.bonding_info);
+ ice_lag_preset_drop_fltr(lag_work->lag,
+ &lag_work->info.bonding_info);
+ ice_lag_monitor_info(lag_work->lag,
+ &lag_work->info.bonding_info);
}
ice_lag_info_event(lag_work->lag, &lag_work->info.bonding_info);
break;
@@ -1762,9 +2318,8 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
lag_work->lag = lag;
lag_work->event = event;
if (event == NETDEV_CHANGEUPPER) {
- struct netdev_notifier_changeupper_info *info;
+ struct netdev_notifier_changeupper_info *info = ptr;
- info = ptr;
upper_netdev = info->upper_dev;
} else {
upper_netdev = netdev_master_upper_dev_get(netdev);
@@ -1814,10 +2369,8 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
*/
static int ice_register_lag_handler(struct ice_lag *lag)
{
+ struct notifier_block *notif_blk = &lag->notif_block;
struct device *dev = ice_pf_to_dev(lag->pf);
- struct notifier_block *notif_blk;
-
- notif_blk = &lag->notif_block;
if (!notif_blk->notifier_call) {
notif_blk->notifier_call = ice_lag_event_handler;
@@ -1837,10 +2390,9 @@ static int ice_register_lag_handler(struct ice_lag *lag)
*/
static void ice_unregister_lag_handler(struct ice_lag *lag)
{
+ struct notifier_block *notif_blk = &lag->notif_block;
struct device *dev = ice_pf_to_dev(lag->pf);
- struct notifier_block *notif_blk;
- notif_blk = &lag->notif_block;
if (notif_blk->notifier_call) {
unregister_netdevice_notifier(notif_blk);
dev_dbg(dev, "LAG event handler unregistered\n");
@@ -1902,13 +2454,12 @@ ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw,
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
struct ice_aqc_cfg_txqs_buf *qbuf;
+ struct ice_hw *hw = &lag->pf->hw;
struct ice_sched_node *n_prt;
__le32 teid, parent_teid;
struct ice_vsi_ctx *ctx;
- struct ice_hw *hw;
u32 tmp_teid;
- hw = &lag->pf->hw;
ctx = ice_get_vsi_ctx(hw, vsi_num);
if (!ctx) {
dev_warn(dev, "LAG rebuild failed after reset due to VSI Context failure\n");
@@ -1945,7 +2496,8 @@ ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw,
}
if (ice_aq_cfg_lan_txq(hw, qbuf, qbuf_size, numq, hw->port_info->lport,
- dest_hw->port_info->lport, NULL)) {
+ dest_hw->port_info->lport,
+ ICE_AQC_Q_CFG_TC_CHNG, NULL)) {
dev_warn(dev, "Failure to configure queues for LAG reset rebuild\n");
goto sync_qerr;
}
@@ -2041,9 +2593,13 @@ int ice_init_lag(struct ice_pf *pf)
lag->netdev = vsi->netdev;
lag->role = ICE_LAG_NONE;
lag->active_port = ICE_LAG_INVALID_PORT;
+ lag->port_bitmap = 0x0;
lag->bonded = false;
+ lag->bond_aa = false;
+ lag->need_fltr_cfg = false;
lag->upper_netdev = NULL;
lag->notif_block.notifier_call = NULL;
+ memset(lag->sec_vf, 0, sizeof(lag->sec_vf));
err = ice_register_lag_handler(lag);
if (err) {
@@ -2061,6 +2617,11 @@ int ice_init_lag(struct ice_pf *pf)
if (err)
goto free_rcp_res;
+ err = ice_create_lag_recipe(&pf->hw, &lag->act_act_recipe,
+ ice_lport_rcp, 1);
+ if (err)
+ goto free_lport_res;
+
/* associate recipes to profiles */
for (n = 0; n < ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER; n++) {
err = ice_aq_get_recipe_to_profile(&pf->hw, n,
@@ -2070,7 +2631,8 @@ int ice_init_lag(struct ice_pf *pf)
if (recipe_bits & BIT(ICE_SW_LKUP_DFLT)) {
recipe_bits |= BIT(lag->pf_recipe) |
- BIT(lag->lport_recipe);
+ BIT(lag->lport_recipe) |
+ BIT(lag->act_act_recipe);
ice_aq_map_recipe_to_profile(&pf->hw, n,
recipe_bits, NULL);
}
@@ -2081,9 +2643,13 @@ int ice_init_lag(struct ice_pf *pf)
dev_dbg(dev, "INIT LAG complete\n");
return 0;
+free_lport_res:
+ ice_free_hw_res(&pf->hw, ICE_AQC_RES_TYPE_RECIPE, 1,
+ &lag->lport_recipe);
+
free_rcp_res:
ice_free_hw_res(&pf->hw, ICE_AQC_RES_TYPE_RECIPE, 1,
- &pf->lag->pf_recipe);
+ &lag->pf_recipe);
lag_error:
kfree(lag);
pf->lag = NULL;
@@ -2099,9 +2665,7 @@ lag_error:
*/
void ice_deinit_lag(struct ice_pf *pf)
{
- struct ice_lag *lag;
-
- lag = pf->lag;
+ struct ice_lag *lag = pf->lag;
if (!lag)
return;
@@ -2170,11 +2734,15 @@ void ice_lag_rebuild(struct ice_pf *pf)
ice_lag_move_vf_nodes_sync(prim_lag, &pf->hw);
}
- ice_lag_cfg_cp_fltr(lag, true);
+ if (!lag->bond_aa) {
+ ice_lag_cfg_lp_fltr(lag, true, true);
+ if (lag->pf_rx_rule_id)
+ if (ice_lag_cfg_dflt_fltr(lag, true))
+ dev_err(ice_pf_to_dev(pf), "Error adding default VSI rule in rebuild\n");
+ } else {
+ ice_lag_cfg_lp_fltr(lag, true, false);
+ }
- if (lag->pf_rx_rule_id)
- if (ice_lag_cfg_dflt_fltr(lag, true))
- dev_err(ice_pf_to_dev(pf), "Error adding default VSI rule in rebuild\n");
ice_clear_rdma_cap(pf);
lag_rebuild_out:
@@ -2193,7 +2761,8 @@ bool ice_lag_is_switchdev_running(struct ice_pf *pf)
struct ice_lag *lag = pf->lag;
struct net_device *tmp_nd;
- if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) || !lag)
+ if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) ||
+ !lag || !lag->upper_netdev)
return false;
rcu_read_lock();
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index bab2c83142a1..f77ebcd61042 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -14,7 +14,11 @@ enum ice_lag_role {
ICE_LAG_UNSET
};
-#define ICE_LAG_INVALID_PORT 0xFF
+#define ICE_LAG_INVALID_PORT 0xFF
+#define ICE_LAGP_IDX 0
+#define ICE_LAGS_IDX 1
+#define ICE_LAGP_M 0x1
+#define ICE_LAGS_M 0x2
#define ICE_LAG_RESET_RETRIES 5
#define ICE_SW_DEFAULT_PROFILE 0
@@ -41,12 +45,26 @@ struct ice_lag {
u8 active_port; /* lport value for the current active port */
u8 bonded:1; /* currently bonded */
u8 primary:1; /* this is primary */
+ u8 bond_aa:1; /* is this bond active-active */
+ u8 need_fltr_cfg:1; /* fltrs for A/A bond still need to be make */
+ u8 port_bitmap:2; /* bitmap of active ports */
+ u8 bond_lport_pri; /* lport values for primary PF */
+ u8 bond_lport_sec; /* lport values for secondary PF */
+
+ /* q_home keeps track of which interface the q is currently on */
+ u8 q_home[ICE_MAX_SRIOV_VFS][ICE_MAX_RSS_QS_PER_VF];
+
+ /* placeholder VSI for hanging VF queues from on secondary interface */
+ struct ice_vsi *sec_vf[ICE_MAX_SRIOV_VFS];
+
u16 pf_recipe;
u16 lport_recipe;
+ u16 act_act_recipe;
u16 pf_rx_rule_id;
u16 pf_tx_rule_id;
u16 cp_rule_idx;
u16 lport_rule_idx;
+ u16 act_act_rule_idx;
u8 role;
};
@@ -64,10 +82,12 @@ struct ice_lag_work {
} info;
};
-void ice_lag_move_new_vf_nodes(struct ice_vf *vf);
+void ice_lag_aa_failover(struct ice_lag *lag, u8 dest, struct ice_pf *e_pf);
int ice_init_lag(struct ice_pf *pf);
void ice_deinit_lag(struct ice_pf *pf);
void ice_lag_rebuild(struct ice_pf *pf);
bool ice_lag_is_switchdev_running(struct ice_pf *pf);
void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt);
+u8 ice_lag_prepare_vf_reset(struct ice_lag *lag);
+void ice_lag_complete_vf_reset(struct ice_lag *lag, u8 act_prt);
#endif /* _ICE_LAG_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 611577ebc29d..185672c7e17d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -229,7 +229,7 @@ struct ice_32b_rx_flex_desc_nic {
__le16 status_error1;
u8 flexi_flags2;
u8 ts_low;
- __le16 l2tag2_1st;
+ __le16 raw_csum;
__le16 l2tag2_2nd;
/* Qword 3 */
@@ -342,6 +342,9 @@ enum ice_flg64_bits {
/* for ice_32byte_rx_flex_desc.pkt_length member */
#define ICE_RX_FLX_DESC_PKT_LEN_M (0x3FFF) /* 14-bits */
+/* ice_32byte_rx_flex_desc::hdr_len_sph_flex_flags1 */
+#define ICE_RX_FLEX_DESC_HDR_LEN_M GENMASK(10, 0)
+
enum ice_rx_flex_desc_status_error_0_bits {
/* Note: These are predefined bit offsets */
ICE_RX_FLEX_DESC_STATUS0_DD_S = 0,
@@ -371,29 +374,21 @@ enum ice_rx_flex_desc_status_error_1_bits {
ICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
};
-#define ICE_RXQ_CTX_SIZE_DWORDS 8
-#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
#define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22
#define ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS 5
#define GLTCLAN_CQ_CNTX(i, CQ) (GLTCLAN_CQ_CNTX0(CQ) + ((i) * 0x0800))
-/* RLAN Rx queue context data
- *
- * The sizes of the variables may be larger than needed due to crossing byte
- * boundaries. If we do not have the width of the variable set to the correct
- * size then we could end up shifting bits off the top of the variable when the
- * variable is at the top of a byte and crosses over into the next byte.
- */
+/* RLAN Rx queue context data */
struct ice_rlan_ctx {
u16 head;
- u16 cpuid; /* bigger than needed, see above for reason */
+ u8 cpuid;
#define ICE_RLAN_BASE_S 7
u64 base;
u16 qlen;
#define ICE_RLAN_CTX_DBUF_S 7
- u16 dbuf; /* bigger than needed, see above for reason */
+ u8 dbuf;
#define ICE_RLAN_CTX_HBUF_S 6
- u16 hbuf; /* bigger than needed, see above for reason */
+ u8 hbuf;
u8 dtype;
u8 dsize;
u8 crcstrip;
@@ -401,29 +396,15 @@ struct ice_rlan_ctx {
u8 hsplit_0;
u8 hsplit_1;
u8 showiv;
- u32 rxmax; /* bigger than needed, see above for reason */
+ u16 rxmax;
u8 tphrdesc_ena;
u8 tphwdesc_ena;
u8 tphdata_ena;
u8 tphhead_ena;
- u16 lrxqthresh; /* bigger than needed, see above for reason */
+ u8 lrxqthresh;
u8 prefena; /* NOTE: normally must be set to 1 at init */
};
-struct ice_ctx_ele {
- u16 offset;
- u16 size_of;
- u16 width;
- u16 lsb;
-};
-
-#define ICE_CTX_STORE(_struct, _ele, _width, _lsb) { \
- .offset = offsetof(struct _struct, _ele), \
- .size_of = sizeof_field(struct _struct, _ele), \
- .width = _width, \
- .lsb = _lsb, \
-}
-
/* for hsplit_0 field of Rx RLAN context */
enum ice_rlan_ctx_rx_hsplit_0 {
ICE_RLAN_RX_HSPLIT_0_NO_SPLIT = 0,
@@ -500,10 +481,15 @@ enum ice_tx_desc_len_fields {
struct ice_tx_ctx_desc {
__le32 tunneling_params;
__le16 l2tag2;
- __le16 rsvd;
+ __le16 gcs;
__le64 qw1;
};
+#define ICE_TX_GCS_DESC_START_M GENMASK(7, 0)
+#define ICE_TX_GCS_DESC_OFFSET_M GENMASK(11, 8)
+#define ICE_TX_GCS_DESC_TYPE_M GENMASK(14, 12)
+#define ICE_TX_GCS_DESC_CSUM_PSH 1
+
#define ICE_TXD_CTX_QW1_CMD_S 4
#define ICE_TXD_CTX_QW1_CMD_M (0x7FUL << ICE_TXD_CTX_QW1_CMD_S)
@@ -551,18 +537,12 @@ enum ice_tx_ctx_desc_eipt_offload {
#define ICE_LAN_TXQ_MAX_QGRPS 127
#define ICE_LAN_TXQ_MAX_QDIS 1023
-/* Tx queue context data
- *
- * The sizes of the variables may be larger than needed due to crossing byte
- * boundaries. If we do not have the width of the variable set to the correct
- * size then we could end up shifting bits off the top of the variable when the
- * variable is at the top of a byte and crosses over into the next byte.
- */
+/* Tx queue context data */
struct ice_tlan_ctx {
#define ICE_TLAN_CTX_BASE_S 7
u64 base; /* base is defined in 128-byte units */
u8 port_num;
- u16 cgd_num; /* bigger than needed, see above for reason */
+ u8 cgd_num;
u8 pf_num;
u16 vmvf_num;
u8 vmvf_type;
@@ -573,7 +553,7 @@ struct ice_tlan_ctx {
u8 tsyn_ena;
u8 internal_usage_flag;
u8 alt_vlan;
- u16 cpuid; /* bigger than needed, see above for reason */
+ u8 cpuid;
u8 wb_mode;
u8 tphrd_desc;
u8 tphrd;
@@ -582,7 +562,7 @@ struct ice_tlan_ctx {
u16 qnum_in_func;
u8 itr_notification_mode;
u8 adjust_prof_id;
- u32 qlen; /* bigger than needed, see above for reason */
+ u16 qlen;
u8 quanta_prof_idx;
u8 tso_ena;
u16 tso_qnum;
@@ -590,7 +570,47 @@ struct ice_tlan_ctx {
u8 drop_ena;
u8 cache_prof_idx;
u8 pkt_shaper_prof_idx;
- u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */
+};
+
+#define ICE_TXTIME_TX_DESC_IDX_M GENMASK(12, 0)
+#define ICE_TXTIME_STAMP_M GENMASK(31, 13)
+
+/* Tx time stamp descriptor */
+struct ice_ts_desc {
+ __le32 tx_desc_idx_tstamp;
+};
+
+#define ICE_TS_DESC(R, i) (&(((struct ice_ts_desc *)((R)->desc))[i]))
+
+#define ICE_TXTIME_MAX_QUEUE 2047
+#define ICE_SET_TXTIME_MAX_Q_AMOUNT 127
+#define ICE_TXTIME_FETCH_TS_DESC_DFLT 8
+#define ICE_TXTIME_FETCH_PROFILE_CNT 16
+
+/* Tx Time queue context data */
+struct ice_txtime_ctx {
+#define ICE_TXTIME_CTX_BASE_S 7
+ u64 base; /* base is defined in 128-byte units */
+ u8 pf_num;
+ u16 vmvf_num;
+ u8 vmvf_type;
+ u16 src_vsi;
+ u8 cpuid;
+ u8 tphrd_desc;
+ u16 qlen;
+ u8 timer_num;
+ u8 txtime_ena_q;
+ u8 drbell_mode_32;
+#define ICE_TXTIME_CTX_DRBELL_MODE_32 1
+ u8 ts_res;
+#define ICE_TXTIME_CTX_RESOLUTION_128NS 7
+ u8 ts_round_type;
+ u8 ts_pacing_slot;
+#define ICE_TXTIME_CTX_FETCH_PROF_ID_0 0
+ u8 merging_ena;
+ u8 ts_fetch_prof_id;
+ u8 ts_fetch_cache_line_aln_thld;
+ u8 tx_pipe_delay_mode;
};
#endif /* _ICE_LAN_TX_RX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index d4e74f96a8ad..15621707fbf8 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -157,6 +157,16 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
}
}
+static u16 ice_get_rxq_count(struct ice_pf *pf)
+{
+ return min(ice_get_avail_rxq_count(pf), num_online_cpus());
+}
+
+static u16 ice_get_txq_count(struct ice_pf *pf)
+{
+ return min(ice_get_avail_txq_count(pf), num_online_cpus());
+}
+
/**
* ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
* @vsi: the VSI being configured
@@ -178,9 +188,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->alloc_txq = vsi->req_txq;
vsi->num_txq = vsi->req_txq;
} else {
- vsi->alloc_txq = min3(pf->num_lan_msix,
- ice_get_avail_txq_count(pf),
- (u16)num_online_cpus());
+ vsi->alloc_txq = ice_get_txq_count(pf);
}
pf->num_lan_tx = vsi->alloc_txq;
@@ -193,17 +201,13 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->alloc_rxq = vsi->req_rxq;
vsi->num_rxq = vsi->req_rxq;
} else {
- vsi->alloc_rxq = min3(pf->num_lan_msix,
- ice_get_avail_rxq_count(pf),
- (u16)num_online_cpus());
+ vsi->alloc_rxq = ice_get_rxq_count(pf);
}
}
pf->num_lan_rx = vsi->alloc_rxq;
- vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
- max_t(int, vsi->alloc_rxq,
- vsi->alloc_txq));
+ vsi->num_q_vectors = max(vsi->alloc_rxq, vsi->alloc_txq);
break;
case ICE_VSI_SF:
vsi->alloc_txq = 1;
@@ -480,8 +484,7 @@ static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data)
if (!q_vector->tx.tx_ring)
return IRQ_HANDLED;
-#define FDIR_RX_DESC_CLEAN_BUDGET 64
- ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET);
+ ice_clean_ctrl_rx_irq(q_vector->rx.rx_ring);
ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring);
return IRQ_HANDLED;
@@ -567,6 +570,8 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
return -ENOMEM;
}
+ vsi->irq_dyn_alloc = pci_msix_can_alloc_dyn(vsi->back->pdev);
+
switch (vsi->type) {
case ICE_VSI_PF:
case ICE_VSI_SF:
@@ -827,7 +832,13 @@ bool ice_is_safe_mode(struct ice_pf *pf)
*/
bool ice_is_rdma_ena(struct ice_pf *pf)
{
- return test_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ union devlink_param_value value;
+ int err;
+
+ err = devl_param_driverinit_value_get(priv_to_devlink(pf),
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
+ &value);
+ return err ? test_bit(ICE_FLAG_RDMA_ENA, pf->flags) : value.vbool;
}
/**
@@ -1173,12 +1184,11 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
static void
ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
- struct ice_pf *pf = vsi->back;
u16 qcount, qmap;
u8 offset = 0;
int pow;
- qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix);
+ qcount = vsi->num_rxq;
pow = order_base_2(qcount);
qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset);
@@ -1417,9 +1427,12 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->reg_idx = vsi->rxq_map[i];
ring->vsi = vsi;
ring->netdev = vsi->netdev;
- ring->dev = dev;
ring->count = vsi->num_rx_desc;
ring->cached_phctime = pf->ptp.cached_phc_time;
+
+ if (ice_is_feature_supported(pf, ICE_F_GCS))
+ ring->flags |= ICE_RX_FLAGS_RING_GCS;
+
WRITE_ONCE(vsi->rx_rings[i], ring);
}
@@ -1564,7 +1577,7 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
return;
}
- status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HENA);
+ status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HASHCFG);
if (status)
dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
vsi->vsi_num, status);
@@ -1700,6 +1713,12 @@ bool ice_pf_state_is_nominal(struct ice_pf *pf)
return true;
}
+#define ICE_FW_MODE_REC_M BIT(1)
+bool ice_is_recovery_mode(struct ice_hw *hw)
+{
+ return rd32(hw, GL_MNG_FWSM) & ICE_FW_MODE_REC_M;
+}
+
/**
* ice_update_eth_stats - Update VSI-specific ethernet statistics counters
* @vsi: the VSI to be updated
@@ -1758,9 +1777,8 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
* @prio: priority for the RXDID for this queue
* @ena_ts: true to enable timestamp and false to disable timestamp
*/
-void
-ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
- bool ena_ts)
+void ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
+ bool ena_ts)
{
int regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
@@ -2045,12 +2063,15 @@ static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
}
/**
- * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
+ * ice_vsi_cfg_sw_lldp - Config switch rules for LLDP packet handling
* @vsi: the VSI being configured
* @tx: bool to determine Tx or Rx rule
* @create: bool to determine create or remove Rule
+ *
+ * Adding an ethtype Tx rule to the uplink VSI results in it being applied
+ * to the whole port, so LLDP transmission for VFs will be blocked too.
*/
-void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
+void ice_vsi_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
{
int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag,
enum ice_sw_fwd_act_type act);
@@ -2065,19 +2086,59 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX,
ICE_DROP_PACKET);
} else {
- if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) {
- status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num,
- create);
- } else {
+ if (!test_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags)) {
status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX,
ICE_FWD_TO_VSI);
+ if (!status || !create)
+ goto report;
+
+ dev_info(dev,
+ "Failed to add generic LLDP Rx filter on VSI %i error: %d, falling back to specialized AQ control\n",
+ vsi->vsi_num, status);
}
+
+ status = ice_lldp_fltr_add_remove(&pf->hw, vsi, create);
+ if (!status)
+ set_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags);
+
}
+report:
if (status)
- dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n",
- create ? "adding" : "removing", tx ? "TX" : "RX",
- vsi->vsi_num, status);
+ dev_warn(dev, "Failed to %s %s LLDP rule on VSI %i error: %d\n",
+ create ? "add" : "remove", tx ? "Tx" : "Rx",
+ vsi->vsi_num, status);
+}
+
+/**
+ * ice_cfg_sw_rx_lldp - Enable/disable software handling of LLDP
+ * @pf: the PF being configured
+ * @enable: enable or disable
+ *
+ * Configure switch rules to enable/disable LLDP handling by software
+ * across PF.
+ */
+void ice_cfg_sw_rx_lldp(struct ice_pf *pf, bool enable)
+{
+ struct ice_vsi *vsi;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ vsi = ice_get_main_vsi(pf);
+ ice_vsi_cfg_sw_lldp(vsi, false, enable);
+
+ if (!test_bit(ICE_FLAG_SRIOV_ENA, pf->flags))
+ return;
+
+ ice_for_each_vf(pf, bkt, vf) {
+ vsi = ice_get_vf_vsi(vf);
+
+ if (WARN_ON(!vsi))
+ continue;
+
+ if (ice_vf_is_lldp_ena(vf))
+ ice_vsi_cfg_sw_lldp(vsi, false, enable);
+ }
}
/**
@@ -2508,7 +2569,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params)
if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) {
ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
ICE_DROP_PACKET);
- ice_cfg_sw_lldp(vsi, true, true);
+ ice_vsi_cfg_sw_lldp(vsi, true, true);
}
if (!vsi->agg_node)
@@ -2576,7 +2637,6 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
return;
vsi->irqs_ready = false;
- ice_free_cpu_rx_rmap(vsi);
ice_for_each_q_vector(vsi, i) {
int irq_num;
@@ -2589,12 +2649,6 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
vsi->q_vectors[i]->num_ring_rx))
continue;
- /* clear the affinity notifier in the IRQ descriptor */
- if (!IS_ENABLED(CONFIG_RFS_ACCEL))
- irq_set_affinity_notifier(irq_num, NULL);
-
- /* clear the affinity_hint in the IRQ descriptor */
- irq_update_affinity_hint(irq_num, NULL);
synchronize_irq(irq_num);
devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
}
@@ -2714,7 +2768,6 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
* @vsi: VSI pointer
*
* Associate queue[s] with napi for all vectors.
- * The caller must hold rtnl_lock.
*/
void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
{
@@ -2724,6 +2777,7 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
if (!netdev)
return;
+ ASSERT_RTNL();
ice_for_each_rxq(vsi, q_idx)
netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX,
&vsi->rx_rings[q_idx]->q_vector->napi);
@@ -2744,16 +2798,23 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
* @vsi: VSI pointer
*
* Clear the association between all VSI queues queue[s] and napi.
- * The caller must hold rtnl_lock.
*/
void ice_vsi_clear_napi_queues(struct ice_vsi *vsi)
{
struct net_device *netdev = vsi->netdev;
- int q_idx;
+ int q_idx, v_idx;
if (!netdev)
return;
+ ASSERT_RTNL();
+ /* Clear the NAPI's interrupt number */
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+
+ netif_napi_set_irq(&q_vector->napi, -1);
+ }
+
ice_for_each_txq(vsi, q_idx)
netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, NULL);
@@ -2777,8 +2838,10 @@ void ice_napi_add(struct ice_vsi *vsi)
return;
ice_for_each_q_vector(vsi, v_idx)
- netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
- ice_napi_poll);
+ netif_napi_add_config(vsi->netdev,
+ &vsi->q_vectors[v_idx]->napi,
+ ice_napi_poll,
+ v_idx);
}
/**
@@ -2803,9 +2866,11 @@ int ice_vsi_release(struct ice_vsi *vsi)
/* The Rx rule will only exist to remove if the LLDP FW
* engine is currently stopped
*/
- if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
- !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
- ice_cfg_sw_lldp(vsi, false, false);
+ if (!ice_is_safe_mode(pf) &&
+ !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) &&
+ (vsi->type == ICE_VSI_PF || (vsi->type == ICE_VSI_VF &&
+ ice_vf_is_lldp_ena(vsi->vf))))
+ ice_vsi_cfg_sw_lldp(vsi, false, false);
ice_vsi_decfg(vsi);
@@ -3133,7 +3198,7 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
if (!netdev)
return;
- /* CHNL VSI doesn't have it's own netdev, hence, no netdev_tc */
+ /* CHNL VSI doesn't have its own netdev, hence, no netdev_tc */
if (vsi->type == ICE_VSI_CHNL)
return;
@@ -3670,20 +3735,20 @@ int ice_set_link(struct ice_vsi *vsi, bool ena)
status = ice_aq_set_link_restart_an(pi, ena, NULL);
- /* if link is owned by manageability, FW will return ICE_AQ_RC_EMODE.
+ /* if link is owned by manageability, FW will return LIBIE_AQ_RC_EMODE.
* this is not a fatal error, so print a warning message and return
* a success code. Return an error if FW returns an error code other
- * than ICE_AQ_RC_EMODE
+ * than LIBIE_AQ_RC_EMODE
*/
if (status == -EIO) {
- if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
+ if (hw->adminq.sq_last_status == LIBIE_AQ_RC_EMODE)
dev_dbg(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n",
(ena ? "ON" : "OFF"), status,
- ice_aq_str(hw->adminq.sq_last_status));
+ libie_aq_str(hw->adminq.sq_last_status));
} else if (status) {
dev_err(dev, "can't set link to %s, err %d aq_err %s\n",
(ena ? "ON" : "OFF"), status,
- ice_aq_str(hw->adminq.sq_last_status));
+ libie_aq_str(hw->adminq.sq_last_status));
return status;
}
@@ -3874,15 +3939,18 @@ void ice_init_feature_support(struct ice_pf *pf)
ice_set_feature_support(pf, ICE_F_CGU);
if (ice_is_clock_mux_in_netlist(&pf->hw))
ice_set_feature_support(pf, ICE_F_SMA_CTRL);
- if (ice_gnss_is_gps_present(&pf->hw))
+ if (ice_gnss_is_module_present(&pf->hw))
ice_set_feature_support(pf, ICE_F_GNSS);
break;
default:
break;
}
- if (pf->hw.mac_type == ICE_MAC_E830)
+ if (pf->hw.mac_type == ICE_MAC_E830) {
ice_set_feature_support(pf, ICE_F_MBX_LIMIT);
+ ice_set_feature_support(pf, ICE_F_GCS);
+ ice_set_feature_support(pf, ICE_F_TXTIME);
+ }
}
/**
@@ -3929,24 +3997,6 @@ void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx)
}
/**
- * ice_vsi_ctx_set_allow_override - allow destination override on VSI
- * @ctx: pointer to VSI ctx structure
- */
-void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx)
-{
- ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
-}
-
-/**
- * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI
- * @ctx: pointer to VSI ctx structure
- */
-void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx)
-{
- ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
-}
-
-/**
* ice_vsi_update_local_lb - update sw block in VSI with local loopback bit
* @vsi: pointer to VSI structure
* @set: set or unset the bit
@@ -3970,3 +4020,38 @@ ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set)
vsi->info = ctx.info;
return 0;
}
+
+/**
+ * ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI
+ * @vsi: VSI used to update l2tsel on
+ * @l2tsel: l2tsel setting requested
+ *
+ * Use the l2tsel setting to update all of the Rx queue context bits for l2tsel.
+ * This will modify which descriptor field the first offloaded VLAN will be
+ * stripped into.
+ */
+void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 l2tsel_bit;
+ int i;
+
+ if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND)
+ l2tsel_bit = 0;
+ else
+ l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET);
+
+ for (i = 0; i < vsi->alloc_rxq; i++) {
+ u16 pfq = vsi->rxq_map[i];
+ u32 qrx_context_offset;
+ u32 regval;
+
+ qrx_context_offset =
+ QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq);
+
+ regval = rd32(hw, qrx_context_offset);
+ regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET);
+ regval |= l2tsel_bit;
+ wr32(hw, qrx_context_offset, regval);
+ }
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 10d6fc479a32..2cb1eb98b9da 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -11,6 +11,13 @@
#define ICE_VSI_FLAG_INIT BIT(0)
#define ICE_VSI_FLAG_NO_INIT 0
+#define ICE_L2TSEL_QRX_CONTEXT_REG_IDX 3
+#define ICE_L2TSEL_BIT_OFFSET 23
+enum ice_l2tsel {
+ ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND,
+ ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1,
+};
+
const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);
bool ice_pf_state_is_nominal(struct ice_pf *pf);
@@ -29,7 +36,8 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
-void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
+void ice_vsi_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
+void ice_cfg_sw_rx_lldp(struct ice_pf *pf, bool enable);
int ice_set_link(struct ice_vsi *vsi, bool ena);
@@ -90,6 +98,7 @@ void ice_set_q_vector_intrl(struct ice_q_vector *q_vector);
bool ice_is_safe_mode(struct ice_pf *pf);
bool ice_is_rdma_ena(struct ice_pf *pf);
+bool ice_is_recovery_mode(struct ice_hw *hw);
bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi);
bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi);
int ice_set_dflt_vsi(struct ice_vsi *vsi);
@@ -104,10 +113,6 @@ ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *))
void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx);
void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx);
-
-void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx);
-
-void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx);
int ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set);
int ice_vsi_add_vlan_zero(struct ice_vsi *vsi);
int ice_vsi_del_vlan_zero(struct ice_vsi *vsi);
@@ -118,4 +123,5 @@ void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f);
void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f);
void ice_init_feature_support(struct ice_pf *pf);
bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi);
+void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel);
#endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index a6f586f9bfd1..4bb68e7a00f5 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -14,7 +14,7 @@
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
#include "devlink/devlink.h"
-#include "devlink/devlink_port.h"
+#include "devlink/port.h"
#include "ice_sf_eth.h"
#include "ice_hwmon.h"
/* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
@@ -37,7 +37,11 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
#define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"
MODULE_DESCRIPTION(DRV_SUMMARY);
-MODULE_IMPORT_NS(LIBIE);
+MODULE_IMPORT_NS("LIBETH");
+MODULE_IMPORT_NS("LIBETH_XDP");
+MODULE_IMPORT_NS("LIBIE");
+MODULE_IMPORT_NS("LIBIE_ADMINQ");
+MODULE_IMPORT_NS("LIBIE_FWLOG");
MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
@@ -379,7 +383,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
* should go into promiscuous mode. There should be some
* space reserved for promiscuous filters.
*/
- if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
+ if (hw->adminq.sq_last_status == LIBIE_AQ_RC_ENOSPC &&
!test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
vsi->state)) {
promisc_forced_on = true;
@@ -1119,7 +1123,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
if (status)
dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
pi->lport, status,
- ice_aq_str(pi->hw->adminq.sq_last_status));
+ libie_aq_str(pi->hw->adminq.sq_last_status));
ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
@@ -1144,7 +1148,10 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
if (link_up == old_link && link_speed == old_link_speed)
return 0;
- ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
+ if (!link_up && old_link)
+ pf->link_down_events++;
+
+ ice_ptp_link_change(pf, link_up);
if (ice_is_dcb_active(pf)) {
if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
@@ -1247,32 +1254,6 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
}
/**
- * ice_get_fwlog_data - copy the FW log data from ARQ event
- * @pf: PF that the FW log event is associated with
- * @event: event structure containing FW log data
- */
-static void
-ice_get_fwlog_data(struct ice_pf *pf, struct ice_rq_event_info *event)
-{
- struct ice_fwlog_data *fwlog;
- struct ice_hw *hw = &pf->hw;
-
- fwlog = &hw->fwlog_ring.rings[hw->fwlog_ring.tail];
-
- memset(fwlog->data, 0, PAGE_SIZE);
- fwlog->data_size = le16_to_cpu(event->desc.datalen);
-
- memcpy(fwlog->data, event->msg_buf, fwlog->data_size);
- ice_fwlog_ring_increment(&hw->fwlog_ring.tail, hw->fwlog_ring.size);
-
- if (ice_fwlog_ring_full(&hw->fwlog_ring)) {
- /* the rings are full so bump the head to create room */
- ice_fwlog_ring_increment(&hw->fwlog_ring.head,
- hw->fwlog_ring.size);
- }
-}
-
-/**
* ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
* @pf: pointer to the PF private structure
* @task: intermediate helper storage and identifier for waiting
@@ -1562,11 +1543,15 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
}
break;
case ice_aqc_opc_fw_logs_event:
- ice_get_fwlog_data(pf, &event);
+ libie_get_fwlog_data(&hw->fwlog, event.msg_buf,
+ le16_to_cpu(event.desc.datalen));
break;
case ice_aqc_opc_lldp_set_mib_change:
ice_dcb_process_lldp_set_mib_change(pf, &event);
break;
+ case ice_aqc_opc_get_health_status:
+ ice_process_health_status_event(pf, &event);
+ break;
default:
dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
qtype, opcode);
@@ -1714,7 +1699,7 @@ static int ice_service_task_stop(struct ice_pf *pf)
ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
if (pf->serv_tmr.function)
- del_timer_sync(&pf->serv_tmr);
+ timer_delete_sync(&pf->serv_tmr);
if (pf->serv_task.func)
cancel_work_sync(&pf->serv_task);
@@ -1740,7 +1725,7 @@ static void ice_service_task_restart(struct ice_pf *pf)
*/
static void ice_service_timer(struct timer_list *t)
{
- struct ice_pf *pf = from_timer(pf, t, serv_tmr);
+ struct ice_pf *pf = timer_container_of(pf, t, serv_tmr);
mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
ice_service_task_schedule(pf);
@@ -1816,6 +1801,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
+ ice_report_mdd_event(pf, ICE_MDD_SRC_TX_PQM, pf_num, vf_num,
+ event, queue);
wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
}
@@ -1829,6 +1816,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
+ ice_report_mdd_event(pf, ICE_MDD_SRC_TX_TCLAN, pf_num, vf_num,
+ event, queue);
wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX);
}
@@ -1842,6 +1831,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_rx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
+ ice_report_mdd_event(pf, ICE_MDD_SRC_RX, pf_num, vf_num, event,
+ queue);
wr32(hw, GL_MDET_RX, 0xffffffff);
}
@@ -2355,6 +2346,18 @@ static void ice_check_media_subtask(struct ice_pf *pf)
}
}
+static void ice_service_task_recovery_mode(struct work_struct *work)
+{
+ struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
+
+ set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
+ ice_clean_adminq_subtask(pf);
+
+ ice_service_task_complete(pf);
+
+ mod_timer(&pf->serv_tmr, jiffies + msecs_to_jiffies(100));
+}
+
/**
* ice_service_task - manage and run subtasks
* @work: pointer to work_struct contained by the PF struct
@@ -2364,9 +2367,11 @@ static void ice_service_task(struct work_struct *work)
struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
unsigned long start_time = jiffies;
- /* subtasks */
+ if (pf->health_reporters.tx_hang_buf.tx_ring) {
+ ice_report_tx_hang(pf);
+ pf->health_reporters.tx_hang_buf.tx_ring = NULL;
+ }
- /* process reset requests first */
ice_reset_subtask(pf);
/* bail if a reset/recovery cycle is pending or rebuild failed */
@@ -2378,11 +2383,11 @@ static void ice_service_task(struct work_struct *work)
}
if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
- struct iidc_event *event;
+ struct iidc_rdma_event *event;
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (event) {
- set_bit(IIDC_EVENT_CRIT_ERR, event->type);
+ set_bit(IIDC_RDMA_EVENT_CRIT_ERR, event->type);
/* report the entire OICR value to AUX driver */
swap(event->reg, pf->oicr_err_reg);
ice_send_event_to_aux(pf, event);
@@ -2401,11 +2406,11 @@ static void ice_service_task(struct work_struct *work)
ice_plug_aux_dev(pf);
if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
- struct iidc_event *event;
+ struct iidc_rdma_event *event;
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (event) {
- set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
+ set_bit(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE, event->type);
ice_send_event_to_aux(pf, event);
kfree(event);
}
@@ -2505,34 +2510,6 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
}
/**
- * ice_irq_affinity_notify - Callback for affinity changes
- * @notify: context as to what irq was changed
- * @mask: the new affinity mask
- *
- * This is a callback function used by the irq_set_affinity_notifier function
- * so that we may register to receive changes to the irq affinity masks.
- */
-static void
-ice_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct ice_q_vector *q_vector =
- container_of(notify, struct ice_q_vector, affinity_notify);
-
- cpumask_copy(&q_vector->affinity_mask, mask);
-}
-
-/**
- * ice_irq_affinity_release - Callback for affinity notifier release
- * @ref: internal core kernel usage
- *
- * This is a callback function used by the irq_set_affinity_notifier function
- * to inform the current notification subscriber that they will no longer
- * receive notifications.
- */
-static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
-
-/**
* ice_vsi_ena_irq - Enable IRQ for the given VSI
* @vsi: the VSI being configured
*/
@@ -2595,19 +2572,6 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
err);
goto free_q_irqs;
}
-
- /* register for affinity change notifications */
- if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
- struct irq_affinity_notify *affinity_notify;
-
- affinity_notify = &q_vector->affinity_notify;
- affinity_notify->notify = ice_irq_affinity_notify;
- affinity_notify->release = ice_irq_affinity_release;
- irq_set_affinity_notifier(irq_num, affinity_notify);
- }
-
- /* assign the mask for this irq */
- irq_update_affinity_hint(irq_num, &q_vector->affinity_mask);
}
err = ice_set_cpu_rx_rmap(vsi);
@@ -2623,9 +2587,6 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
free_q_irqs:
while (vector--) {
irq_num = vsi->q_vectors[vector]->irq.virq;
- if (!IS_ENABLED(CONFIG_RFS_ACCEL))
- irq_set_affinity_notifier(irq_num, NULL);
- irq_update_affinity_hint(irq_num, NULL);
devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
}
return err;
@@ -2762,6 +2723,27 @@ void ice_map_xdp_rings(struct ice_vsi *vsi)
}
/**
+ * ice_unmap_xdp_rings - Unmap XDP rings from interrupt vectors
+ * @vsi: the VSI with XDP rings being unmapped
+ */
+static void ice_unmap_xdp_rings(struct ice_vsi *vsi)
+{
+ int v_idx;
+
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+ struct ice_tx_ring *ring;
+
+ ice_for_each_tx_ring(ring, q_vector->tx)
+ if (!ring->tx_buf || !ice_ring_is_xdp(ring))
+ break;
+
+ /* restore the value of last node prior to XDP setup */
+ q_vector->tx.tx_ring = ring;
+ }
+}
+
+/**
* ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
* @vsi: VSI to bring up Tx rings used by XDP
* @prog: bpf program that will be assigned to VSI
@@ -2824,7 +2806,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
if (status) {
dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
status);
- goto clear_xdp_rings;
+ goto unmap_xdp_rings;
}
/* assign the prog only when it's not already present on VSI;
@@ -2840,6 +2822,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
ice_vsi_assign_bpf_prog(vsi, prog);
return 0;
+unmap_xdp_rings:
+ ice_unmap_xdp_rings(vsi);
clear_xdp_rings:
ice_for_each_xdp_txq(vsi, i)
if (vsi->xdp_rings[i]) {
@@ -2856,6 +2840,8 @@ err_map_xdp:
mutex_unlock(&pf->avail_q_mutex);
devm_kfree(dev, vsi->xdp_rings);
+ vsi->xdp_rings = NULL;
+
return -ENOMEM;
}
@@ -2871,7 +2857,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_pf *pf = vsi->back;
- int i, v_idx;
+ int i;
/* q_vectors are freed in reset path so there's no point in detaching
* rings
@@ -2879,17 +2865,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
if (cfg_type == ICE_XDP_CFG_PART)
goto free_qmap;
- ice_for_each_q_vector(vsi, v_idx) {
- struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
- struct ice_tx_ring *ring;
-
- ice_for_each_tx_ring(ring, q_vector->tx)
- if (!ring->tx_buf || !ice_ring_is_xdp(ring))
- break;
-
- /* restore the value of last node prior to XDP setup */
- q_vector->tx.tx_ring = ring;
- }
+ ice_unmap_xdp_rings(vsi);
free_qmap:
mutex_lock(&pf->avail_q_mutex);
@@ -2983,10 +2959,7 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
*/
static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
{
- if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
- return ICE_RXBUF_1664;
- else
- return ICE_RXBUF_3072;
+ return ICE_RXBUF_3072;
}
/**
@@ -3034,28 +3007,24 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
if (xdp_ring_err) {
NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
+ goto resume_if;
} else {
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
ICE_XDP_CFG_FULL);
- if (xdp_ring_err)
+ if (xdp_ring_err) {
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
+ goto resume_if;
+ }
}
xdp_features_set_redirect_target(vsi->netdev, true);
- /* reallocate Rx queues that are used for zero-copy */
- xdp_ring_err = ice_realloc_zc_buf(vsi, true);
- if (xdp_ring_err)
- NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
xdp_features_clear_redirect_target(vsi->netdev);
xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
- /* reallocate Rx queues that were used for zero-copy */
- xdp_ring_err = ice_realloc_zc_buf(vsi, false);
- if (xdp_ring_err)
- NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
}
+resume_if:
if (if_running)
ret = ice_up(vsi);
@@ -3174,12 +3143,14 @@ static irqreturn_t ice_ll_ts_intr(int __always_unused irq, void *data)
hw = &pf->hw;
tx = &pf->ptp.port.tx;
spin_lock_irqsave(&tx->lock, flags);
- ice_ptp_complete_tx_single_tstamp(tx);
+ if (tx->init) {
+ ice_ptp_complete_tx_single_tstamp(tx);
- idx = find_next_bit_wrap(tx->in_use, tx->len,
- tx->last_ll_ts_idx_read + 1);
- if (idx != tx->len)
- ice_ptp_req_tx_single_tstamp(tx, idx);
+ idx = find_next_bit_wrap(tx->in_use, tx->len,
+ tx->last_ll_ts_idx_read + 1);
+ if (idx != tx->len)
+ ice_ptp_req_tx_single_tstamp(tx, idx);
+ }
spin_unlock_irqrestore(&tx->lock, flags);
val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
@@ -3281,22 +3252,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
if (oicr & PFINT_OICR_TSYN_TX_M) {
ena_mask &= ~PFINT_OICR_TSYN_TX_M;
- if (ice_pf_state_is_nominal(pf) &&
- pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) {
- struct ice_ptp_tx *tx = &pf->ptp.port.tx;
- unsigned long flags;
- u8 idx;
-
- spin_lock_irqsave(&tx->lock, flags);
- idx = find_next_bit_wrap(tx->in_use, tx->len,
- tx->last_ll_ts_idx_read + 1);
- if (idx != tx->len)
- ice_ptp_req_tx_single_tstamp(tx, idx);
- spin_unlock_irqrestore(&tx->lock, flags);
- } else if (ice_ptp_pf_handles_tx_interrupt(pf)) {
- set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
- ret = IRQ_WAKE_THREAD;
- }
+
+ ret = ice_ptp_ts_irq(pf);
}
if (oicr & PFINT_OICR_TSYN_EVNT_M) {
@@ -3666,6 +3623,15 @@ void ice_set_netdev_features(struct net_device *netdev)
*/
netdev->hw_features |= NETIF_F_RXFCS;
+ /* Allow core to manage IRQs affinity */
+ netif_set_affinity_auto(netdev);
+
+ /* Mutual exclusivity for TSO and GCS is enforced by the set features
+ * ndo callback.
+ */
+ if (ice_is_feature_supported(pf, ICE_F_GCS))
+ netdev->hw_features |= NETIF_F_HW_CSUM;
+
netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
}
@@ -3974,9 +3940,10 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf)
* ice_deinit_pf - Unrolls initialziations done by ice_init_pf
* @pf: board private structure to initialize
*/
-static void ice_deinit_pf(struct ice_pf *pf)
+void ice_deinit_pf(struct ice_pf *pf)
{
- ice_service_task_stop(pf);
+ /* note that we unroll also on ice_init_pf() failure here */
+
mutex_destroy(&pf->lag_mutex);
mutex_destroy(&pf->adev_mutex);
mutex_destroy(&pf->sw_mutex);
@@ -3994,9 +3961,17 @@ static void ice_deinit_pf(struct ice_pf *pf)
pf->avail_rxqs = NULL;
}
+ if (pf->txtime_txqs) {
+ bitmap_free(pf->txtime_txqs);
+ pf->txtime_txqs = NULL;
+ }
+
if (pf->ptp.clock)
ptp_clock_unregister(pf->ptp.clock);
+ if (!xa_empty(&pf->irq_tracker.entries))
+ ice_free_irq_msix_misc(pf);
+
xa_destroy(&pf->dyn_ports);
xa_destroy(&pf->sf_nums);
}
@@ -4043,21 +4018,32 @@ static void ice_set_pf_caps(struct ice_pf *pf)
}
clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
- if (func_caps->common_cap.ieee_1588 &&
- !(pf->hw.mac_type == ICE_MAC_E830))
+ if (func_caps->common_cap.ieee_1588)
set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
pf->max_pf_txqs = func_caps->common_cap.num_txq;
pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
}
+void ice_start_service_task(struct ice_pf *pf)
+{
+ timer_setup(&pf->serv_tmr, ice_service_timer, 0);
+ pf->serv_tmr_period = HZ;
+ INIT_WORK(&pf->serv_task, ice_service_task);
+ clear_bit(ICE_SERVICE_SCHED, pf->state);
+}
+
/**
* ice_init_pf - Initialize general software structures (struct ice_pf)
* @pf: board private structure to initialize
+ * Return: 0 on success, negative errno otherwise.
*/
-static int ice_init_pf(struct ice_pf *pf)
+int ice_init_pf(struct ice_pf *pf)
{
- ice_set_pf_caps(pf);
+ struct udp_tunnel_nic_info *udp_tunnel_nic = &pf->hw.udp_tunnel_nic;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ int err = -ENOMEM;
mutex_init(&pf->sw_mutex);
mutex_init(&pf->tc_mutex);
@@ -4070,23 +4056,7 @@ static int ice_init_pf(struct ice_pf *pf)
init_waitqueue_head(&pf->reset_wait_queue);
- /* setup service timer and periodic service task */
- timer_setup(&pf->serv_tmr, ice_service_timer, 0);
- pf->serv_tmr_period = HZ;
- INIT_WORK(&pf->serv_task, ice_service_task);
- clear_bit(ICE_SERVICE_SCHED, pf->state);
-
mutex_init(&pf->avail_q_mutex);
- pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
- if (!pf->avail_txqs)
- return -ENOMEM;
-
- pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
- if (!pf->avail_rxqs) {
- bitmap_free(pf->avail_txqs);
- pf->avail_txqs = NULL;
- return -ENOMEM;
- }
mutex_init(&pf->vfs.table_lock);
hash_init(pf->vfs.table);
@@ -4099,7 +4069,36 @@ static int ice_init_pf(struct ice_pf *pf)
xa_init(&pf->dyn_ports);
xa_init(&pf->sf_nums);
+ pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
+ pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
+ pf->txtime_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
+ if (!pf->avail_txqs || !pf->avail_rxqs || !pf->txtime_txqs)
+ goto undo_init;
+
+ udp_tunnel_nic->set_port = ice_udp_tunnel_set_port;
+ udp_tunnel_nic->unset_port = ice_udp_tunnel_unset_port;
+ udp_tunnel_nic->shared = &hw->udp_tunnel_shared;
+ udp_tunnel_nic->tables[0].n_entries = hw->tnl.valid_count[TNL_VXLAN];
+ udp_tunnel_nic->tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN;
+ udp_tunnel_nic->tables[1].n_entries = hw->tnl.valid_count[TNL_GENEVE];
+ udp_tunnel_nic->tables[1].tunnel_types = UDP_TUNNEL_TYPE_GENEVE;
+
+ /* In case of MSIX we are going to setup the misc vector right here
+ * to handle admin queue events etc. In case of legacy and MSI
+ * the misc functionality and queue processing is combined in
+ * the same vector and that gets setup at open.
+ */
+ err = ice_req_irq_msix_misc(pf);
+ if (err) {
+ dev_err(dev, "setup of misc vector failed: %d\n", err);
+ goto undo_init;
+ }
+
return 0;
+undo_init:
+ /* deinit handles half-initialized pf just fine */
+ ice_deinit_pf(pf);
+ return err;
}
/**
@@ -4229,7 +4228,7 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
} else {
vsi->info.sec_flags = ctxt->info.sec_flags;
vsi->info.sw_flags2 = ctxt->info.sw_flags2;
@@ -4540,21 +4539,55 @@ ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware)
dev_info(dev, "Tx scheduling layers switching feature disabled\n");
else
dev_info(dev, "Tx scheduling layers switching feature enabled\n");
- /* if there was a change in topology ice_cfg_tx_topo triggered
- * a CORER and we need to re-init hw
+ return 0;
+ } else if (err == -ENODEV) {
+ /* If we failed to re-initialize the device, we can no longer
+ * continue loading.
*/
- ice_deinit_hw(hw);
- err = ice_init_hw(hw);
-
+ dev_warn(dev, "Failed to initialize hardware after applying Tx scheduling configuration.\n");
return err;
} else if (err == -EIO) {
dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update to the latest DDP package and try again\n");
+ return 0;
+ } else if (err == -EEXIST) {
+ return 0;
}
+ /* Do not treat this as a fatal error. */
+ dev_info(dev, "Failed to apply Tx scheduling configuration, err %pe\n",
+ ERR_PTR(err));
return 0;
}
/**
+ * ice_init_supported_rxdids - Initialize supported Rx descriptor IDs
+ * @hw: pointer to the hardware structure
+ * @pf: pointer to pf structure
+ *
+ * The pf->supported_rxdids bitmap is used to indicate to VFs which descriptor
+ * formats the PF hardware supports. The exact list of supported RXDIDs
+ * depends on the loaded DDP package. The IDs can be determined by reading the
+ * GLFLXP_RXDID_FLAGS register after the DDP package is loaded.
+ *
+ * Note that the legacy 32-byte RXDID 0 is always supported but is not listed
+ * in the DDP package. The 16-byte legacy descriptor is never supported by
+ * VFs.
+ */
+static void ice_init_supported_rxdids(struct ice_hw *hw, struct ice_pf *pf)
+{
+ pf->supported_rxdids = BIT(ICE_RXDID_LEGACY_1);
+
+ for (int i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
+ u32 regval;
+
+ regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0));
+ if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
+ & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
+ pf->supported_rxdids |= BIT(i);
+ }
+}
+
+/**
* ice_init_ddp_config - DDP related configuration
* @hw: pointer to the hardware structure
* @pf: pointer to pf structure
@@ -4588,6 +4621,9 @@ static int ice_init_ddp_config(struct ice_hw *hw, struct ice_pf *pf)
ice_load_pkg(firmware, pf);
release_firmware(firmware);
+ /* Initialize the supported Rx descriptor IDs after loading DDP */
+ ice_init_supported_rxdids(hw, pf);
+
return 0;
}
@@ -4619,19 +4655,6 @@ static void ice_print_wake_reason(struct ice_pf *pf)
}
/**
- * ice_pf_fwlog_update_module - update 1 module
- * @pf: pointer to the PF struct
- * @log_level: log_level to use for the @module
- * @module: module to update
- */
-void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module)
-{
- struct ice_hw *hw = &pf->hw;
-
- hw->fwlog_cfg.module_entries[module].log_level = log_level;
-}
-
-/**
* ice_register_netdev - register netdev
* @vsi: pointer to the VSI struct
*/
@@ -4710,55 +4733,11 @@ static void ice_decfg_netdev(struct ice_vsi *vsi)
vsi->netdev = NULL;
}
-/**
- * ice_wait_for_fw - wait for full FW readiness
- * @hw: pointer to the hardware structure
- * @timeout: milliseconds that can elapse before timing out
- */
-static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
+void ice_init_dev_hw(struct ice_pf *pf)
{
- int fw_loading;
- u32 elapsed = 0;
-
- while (elapsed <= timeout) {
- fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
-
- /* firmware was not yet loaded, we have to wait more */
- if (fw_loading) {
- elapsed += 100;
- msleep(100);
- continue;
- }
- return 0;
- }
-
- return -ETIMEDOUT;
-}
-
-int ice_init_dev(struct ice_pf *pf)
-{
- struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
int err;
- err = ice_init_hw(hw);
- if (err) {
- dev_err(dev, "ice_init_hw failed: %d\n", err);
- return err;
- }
-
- /* Some cards require longer initialization times
- * due to necessity of loading FW from an external source.
- * This can take even half a minute.
- */
- if (ice_is_pf_c827(hw)) {
- err = ice_wait_for_fw(hw, 30000);
- if (err) {
- dev_err(dev, "ice_wait_for_fw timed out");
- return err;
- }
- }
-
ice_init_feature_support(pf);
err = ice_init_ddp_config(hw, pf);
@@ -4775,64 +4754,28 @@ int ice_init_dev(struct ice_pf *pf)
*/
ice_set_safe_mode_caps(hw);
}
+}
- err = ice_init_pf(pf);
- if (err) {
- dev_err(dev, "ice_init_pf failed: %d\n", err);
- goto err_init_pf;
- }
-
- pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
- pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
- pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
- pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
- if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
- pf->hw.udp_tunnel_nic.tables[0].n_entries =
- pf->hw.tnl.valid_count[TNL_VXLAN];
- pf->hw.udp_tunnel_nic.tables[0].tunnel_types =
- UDP_TUNNEL_TYPE_VXLAN;
- }
- if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
- pf->hw.udp_tunnel_nic.tables[1].n_entries =
- pf->hw.tnl.valid_count[TNL_GENEVE];
- pf->hw.udp_tunnel_nic.tables[1].tunnel_types =
- UDP_TUNNEL_TYPE_GENEVE;
- }
+int ice_init_dev(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+ ice_set_pf_caps(pf);
err = ice_init_interrupt_scheme(pf);
if (err) {
dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
- err = -EIO;
- goto err_init_interrupt_scheme;
+ return -EIO;
}
- /* In case of MSIX we are going to setup the misc vector right here
- * to handle admin queue events etc. In case of legacy and MSI
- * the misc functionality and queue processing is combined in
- * the same vector and that gets setup at open.
- */
- err = ice_req_irq_msix_misc(pf);
- if (err) {
- dev_err(dev, "setup of misc vector failed: %d\n", err);
- goto err_req_irq_msix_misc;
- }
+ ice_start_service_task(pf);
return 0;
-
-err_req_irq_msix_misc:
- ice_clear_interrupt_scheme(pf);
-err_init_interrupt_scheme:
- ice_deinit_pf(pf);
-err_init_pf:
- ice_deinit_hw(hw);
- return err;
}
void ice_deinit_dev(struct ice_pf *pf)
{
- ice_free_irq_msix_misc(pf);
- ice_deinit_pf(pf);
- ice_deinit_hw(&pf->hw);
+ ice_service_task_stop(pf);
/* Service task is already stopped, so call reset directly. */
ice_reset(&pf->hw, ICE_RESET_PFR);
@@ -5057,12 +5000,14 @@ static int ice_init_devlink(struct ice_pf *pf)
ice_devlink_init_regions(pf);
ice_devlink_register(pf);
+ ice_health_init(pf);
return 0;
}
static void ice_deinit_devlink(struct ice_pf *pf)
{
+ ice_health_deinit(pf);
ice_devlink_unregister(pf);
ice_devlink_destroy_regions(pf);
ice_devlink_unregister_params(pf);
@@ -5070,15 +5015,24 @@ static void ice_deinit_devlink(struct ice_pf *pf)
static int ice_init(struct ice_pf *pf)
{
+ struct device *dev = ice_pf_to_dev(pf);
int err;
- err = ice_init_dev(pf);
- if (err)
+ err = ice_init_pf(pf);
+ if (err) {
+ dev_err(dev, "ice_init_pf failed: %d\n", err);
return err;
+ }
+
+ if (pf->hw.mac_type == ICE_MAC_E830) {
+ err = pci_enable_ptm(pf->pdev, NULL);
+ if (err)
+ dev_dbg(dev, "PCIe PTM not supported by PCIe bus/controller\n");
+ }
err = ice_alloc_vsis(pf);
if (err)
- goto err_alloc_vsis;
+ goto unroll_pf_init;
err = ice_init_pf_sw(pf);
if (err)
@@ -5115,8 +5069,8 @@ err_init_link:
ice_deinit_pf_sw(pf);
err_init_pf_sw:
ice_dealloc_vsis(pf);
-err_alloc_vsis:
- ice_deinit_dev(pf);
+unroll_pf_init:
+ ice_deinit_pf(pf);
return err;
}
@@ -5127,7 +5081,7 @@ static void ice_deinit(struct ice_pf *pf)
ice_deinit_pf_sw(pf);
ice_dealloc_vsis(pf);
- ice_deinit_dev(pf);
+ ice_deinit_pf(pf);
}
/**
@@ -5175,11 +5129,12 @@ int ice_load(struct ice_pf *pf)
ice_napi_add(vsi);
+ ice_init_features(pf);
+
err = ice_init_rdma(pf);
if (err)
goto err_init_rdma;
- ice_init_features(pf);
ice_service_task_restart(pf);
clear_bit(ICE_DOWN, pf->state);
@@ -5187,6 +5142,7 @@ int ice_load(struct ice_pf *pf)
return 0;
err_init_rdma:
+ ice_deinit_features(pf);
ice_tc_indir_block_unregister(vsi);
err_tc_indir_block_register:
ice_unregister_netdev(vsi);
@@ -5210,14 +5166,44 @@ void ice_unload(struct ice_pf *pf)
devl_assert_locked(priv_to_devlink(pf));
- ice_deinit_features(pf);
ice_deinit_rdma(pf);
+ ice_deinit_features(pf);
ice_tc_indir_block_unregister(vsi);
ice_unregister_netdev(vsi);
ice_devlink_destroy_pf_port(pf);
ice_decfg_netdev(vsi);
}
+static int ice_probe_recovery_mode(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ dev_err(dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode\n");
+
+ INIT_HLIST_HEAD(&pf->aq_wait_list);
+ spin_lock_init(&pf->aq_wait_lock);
+ init_waitqueue_head(&pf->aq_wait_queue);
+
+ timer_setup(&pf->serv_tmr, ice_service_timer, 0);
+ pf->serv_tmr_period = HZ;
+ INIT_WORK(&pf->serv_task, ice_service_task_recovery_mode);
+ clear_bit(ICE_SERVICE_SCHED, pf->state);
+ err = ice_create_all_ctrlq(&pf->hw);
+ if (err)
+ return err;
+
+ scoped_guard(devl, priv_to_devlink(pf)) {
+ err = ice_init_devlink(pf);
+ if (err)
+ return err;
+ }
+
+ ice_service_task_restart(pf);
+
+ return 0;
+}
+
/**
* ice_probe - Device initialization routine
* @pdev: PCI device information struct
@@ -5229,6 +5215,7 @@ static int
ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
{
struct device *dev = &pdev->dev;
+ bool need_dev_deinit = false;
struct ice_adapter *adapter;
struct ice_pf *pf;
struct ice_hw *hw;
@@ -5281,13 +5268,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
}
pci_set_master(pdev);
-
- adapter = ice_adapter_get(pdev);
- if (IS_ERR(adapter))
- return PTR_ERR(adapter);
-
pf->pdev = pdev;
- pf->adapter = adapter;
pci_set_drvdata(pdev, pf);
set_bit(ICE_DOWN, pf->state);
/* Disable service task until DOWN bit is cleared */
@@ -5315,29 +5296,55 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
hw->debug_mask = debug;
#endif
+ if (ice_is_recovery_mode(hw))
+ return ice_probe_recovery_mode(pf);
+
+ err = ice_init_hw(hw);
+ if (err) {
+ dev_err(dev, "ice_init_hw failed: %d\n", err);
+ return err;
+ }
+
+ adapter = ice_adapter_get(pdev);
+ if (IS_ERR(adapter)) {
+ err = PTR_ERR(adapter);
+ goto unroll_hw_init;
+ }
+ pf->adapter = adapter;
+
+ err = ice_init_dev(pf);
+ if (err)
+ goto unroll_adapter;
+
err = ice_init(pf);
if (err)
- goto err_init;
+ goto unroll_dev_init;
devl_lock(priv_to_devlink(pf));
err = ice_load(pf);
if (err)
- goto err_load;
+ goto unroll_init;
err = ice_init_devlink(pf);
if (err)
- goto err_init_devlink;
+ goto unroll_load;
devl_unlock(priv_to_devlink(pf));
return 0;
-err_init_devlink:
+unroll_load:
ice_unload(pf);
-err_load:
+unroll_init:
devl_unlock(priv_to_devlink(pf));
ice_deinit(pf);
-err_init:
+unroll_dev_init:
+ need_dev_deinit = true;
+unroll_adapter:
ice_adapter_put(pdev);
+unroll_hw_init:
+ ice_deinit_hw(hw);
+ if (need_dev_deinit)
+ ice_deinit_dev(pf);
return err;
}
@@ -5399,7 +5406,7 @@ static void ice_setup_mc_magic_wake(struct ice_pf *pf)
status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
if (status)
dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
}
/**
@@ -5417,6 +5424,14 @@ static void ice_remove(struct pci_dev *pdev)
msleep(100);
}
+ if (ice_is_recovery_mode(&pf->hw)) {
+ ice_service_task_stop(pf);
+ scoped_guard(devl, priv_to_devlink(pf)) {
+ ice_deinit_devlink(pf);
+ }
+ return;
+ }
+
if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
set_bit(ICE_VF_RESETS_DISABLED, pf->state);
ice_free_vfs(pf);
@@ -5424,10 +5439,6 @@ static void ice_remove(struct pci_dev *pdev)
ice_hwmon_exit(pf);
- ice_service_task_stop(pf);
- ice_aq_cancel_waiting_tasks(pf);
- set_bit(ICE_DOWN, pf->state);
-
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
@@ -5445,6 +5456,11 @@ static void ice_remove(struct pci_dev *pdev)
ice_set_wake(pf);
ice_adapter_put(pdev);
+ ice_deinit_hw(&pf->hw);
+
+ ice_deinit_dev(pf);
+ ice_aq_cancel_waiting_tasks(pf);
+ set_bit(ICE_DOWN, pf->state);
}
/**
@@ -5637,7 +5653,6 @@ static int ice_resume(struct device *dev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- pci_save_state(pdev);
if (!pci_device_is_present(pdev))
return -ENODEV;
@@ -5737,7 +5752,6 @@ static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
pci_wake_from_d3(pdev, false);
/* Check for life */
@@ -5857,6 +5871,15 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_QSFP), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_SFP), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_SFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835CC_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835CC_QSFP56), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835CC_SFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835C_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835C_QSFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835C_SFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835_L_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835_L_QSFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835_L_SFP), },
/* required last entry */
{}
};
@@ -5900,7 +5923,7 @@ static int __init ice_module_init(void)
ice_adv_lnk_speed_maps_init();
- ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
+ ice_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, KBUILD_MODNAME);
if (!ice_wq) {
pr_err("Failed to create workqueue\n");
return status;
@@ -6125,12 +6148,14 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
* @addr: the MAC address entry being added
* @vid: VLAN ID
* @flags: instructions from stack about fdb operation
+ * @notified: whether notification was emitted
* @extack: netlink extended ack
*/
static int
ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
struct net_device *dev, const unsigned char *addr, u16 vid,
- u16 flags, struct netlink_ext_ack __always_unused *extack)
+ u16 flags, bool *notified,
+ struct netlink_ext_ack __always_unused *extack)
{
int err;
@@ -6164,12 +6189,14 @@ ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
* @dev: the net device pointer
* @addr: the MAC address entry being added
* @vid: VLAN ID
+ * @notified: whether notification was emitted
* @extack: netlink extended ack
*/
static int
ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
struct net_device *dev, const unsigned char *addr,
- __always_unused u16 vid, struct netlink_ext_ack *extack)
+ __always_unused u16 vid, bool *notified,
+ struct netlink_ext_ack *extack)
{
int err;
@@ -6373,10 +6400,12 @@ ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
int err = 0;
/* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
- * if either bit is set
+ * if either bit is set. In switchdev mode Rx filtering should never be
+ * enabled.
*/
- if (features &
- (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
+ if ((features &
+ (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) &&
+ !ice_is_eswitch_mode_switchdev(vsi->back))
err = vlan_ops->ena_rx_filtering(vsi);
else
err = vlan_ops->dis_rx_filtering(vsi);
@@ -6530,6 +6559,18 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
if (changed & NETIF_F_LOOPBACK)
ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
+ /* Due to E830 hardware limitations, TSO (NETIF_F_ALL_TSO) with GCS
+ * (NETIF_F_HW_CSUM) is not supported.
+ */
+ if (ice_is_feature_supported(pf, ICE_F_GCS) &&
+ ((features & NETIF_F_HW_CSUM) && (features & NETIF_F_ALL_TSO))) {
+ if (netdev->features & NETIF_F_HW_CSUM)
+ dev_err(ice_pf_to_dev(pf), "To enable TSO, you must first disable HW checksum.\n");
+ else
+ dev_err(ice_pf_to_dev(pf), "To enable HW checksum, you must first disable TSO.\n");
+ return -EIO;
+ }
+
return ret;
}
@@ -6753,7 +6794,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
ice_print_link_msg(vsi, true);
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
- ice_ptp_link_change(pf, pf->hw.pf_id, true);
+ ice_ptp_link_change(pf, true);
}
/* Perform an initial read of the statistics registers now to
@@ -7085,6 +7126,9 @@ void ice_update_pf_stats(struct ice_pf *pf)
&prev_ps->mac_remote_faults,
&cur_ps->mac_remote_faults);
+ ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
+ &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
+
ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
&prev_ps->rx_undersize, &cur_ps->rx_undersize);
@@ -7223,7 +7267,7 @@ int ice_down(struct ice_vsi *vsi)
if (vsi->netdev) {
vlan_err = ice_vsi_del_vlan_zero(vsi);
- ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
+ ice_ptp_link_change(vsi->back, false);
netif_carrier_off(vsi->netdev);
netif_tx_disable(vsi->netdev);
}
@@ -7445,7 +7489,8 @@ int ice_vsi_open(struct ice_vsi *vsi)
if (err)
goto err_setup_rx;
- ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
+ if (bitmap_empty(pf->txtime_txqs, pf->max_pf_txqs))
+ ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_SF) {
/* Notify the stack of the actual queue counts. */
@@ -7756,6 +7801,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
/* if we get here, reset flow is successful */
clear_bit(ICE_RESET_FAILED, pf->state);
+ ice_health_clear(pf);
+
ice_plug_aux_dev(pf);
if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
ice_lag_rebuild(pf);
@@ -7806,12 +7853,6 @@ int ice_change_mtu(struct net_device *netdev, int new_mtu)
frame_size - ICE_ETH_PKT_HDR_PAD);
return -EINVAL;
}
- } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
- if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) {
- netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n",
- ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD);
- return -EINVAL;
- }
}
/* if a reset is in progress, wait for some time for it to complete */
@@ -7842,69 +7883,6 @@ int ice_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
- * ice_eth_ioctl - Access the hwtstamp interface
- * @netdev: network interface device structure
- * @ifr: interface request data
- * @cmd: ioctl command
- */
-static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
-
- switch (cmd) {
- case SIOCGHWTSTAMP:
- return ice_ptp_get_ts_config(pf, ifr);
- case SIOCSHWTSTAMP:
- return ice_ptp_set_ts_config(pf, ifr);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-/**
- * ice_aq_str - convert AQ err code to a string
- * @aq_err: the AQ error code to convert
- */
-const char *ice_aq_str(enum ice_aq_err aq_err)
-{
- switch (aq_err) {
- case ICE_AQ_RC_OK:
- return "OK";
- case ICE_AQ_RC_EPERM:
- return "ICE_AQ_RC_EPERM";
- case ICE_AQ_RC_ENOENT:
- return "ICE_AQ_RC_ENOENT";
- case ICE_AQ_RC_ENOMEM:
- return "ICE_AQ_RC_ENOMEM";
- case ICE_AQ_RC_EBUSY:
- return "ICE_AQ_RC_EBUSY";
- case ICE_AQ_RC_EEXIST:
- return "ICE_AQ_RC_EEXIST";
- case ICE_AQ_RC_EINVAL:
- return "ICE_AQ_RC_EINVAL";
- case ICE_AQ_RC_ENOSPC:
- return "ICE_AQ_RC_ENOSPC";
- case ICE_AQ_RC_ENOSYS:
- return "ICE_AQ_RC_ENOSYS";
- case ICE_AQ_RC_EMODE:
- return "ICE_AQ_RC_EMODE";
- case ICE_AQ_RC_ENOSEC:
- return "ICE_AQ_RC_ENOSEC";
- case ICE_AQ_RC_EBADSIG:
- return "ICE_AQ_RC_EBADSIG";
- case ICE_AQ_RC_ESVN:
- return "ICE_AQ_RC_ESVN";
- case ICE_AQ_RC_EBADMAN:
- return "ICE_AQ_RC_EBADMAN";
- case ICE_AQ_RC_EBADBUF:
- return "ICE_AQ_RC_EBADBUF";
- }
-
- return "ICE_AQ_RC_UNKNOWN";
-}
-
-/**
* ice_set_rss_lut - Set RSS LUT
* @vsi: Pointer to VSI structure
* @lut: Lookup table
@@ -7929,7 +7907,7 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
status = ice_aq_set_rss_lut(hw, &params);
if (status)
dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
return status;
}
@@ -7952,7 +7930,7 @@ int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
if (status)
dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
return status;
}
@@ -7982,7 +7960,7 @@ int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
status = ice_aq_get_rss_lut(hw, &params);
if (status)
dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
return status;
}
@@ -8005,7 +7983,7 @@ int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
if (status)
dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
return status;
}
@@ -8078,9 +8056,7 @@ static int
ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev, u32 filter_mask, int nlflags)
{
- struct ice_netdev_priv *np = netdev_priv(dev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(dev);
u16 bmode;
bmode = pf->first_sw->bridge_mode;
@@ -8122,7 +8098,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (ret) {
dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
- bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
+ bmode, ret, libie_aq_str(hw->adminq.sq_last_status));
goto out;
}
/* Update sw flags for book keeping */
@@ -8150,8 +8126,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
u16 __always_unused flags,
struct netlink_ext_ack __always_unused *extack)
{
- struct ice_netdev_priv *np = netdev_priv(dev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(dev);
struct nlattr *attr, *br_spec;
struct ice_hw *hw = &pf->hw;
struct ice_sw *pf_sw;
@@ -8190,7 +8165,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (err) {
netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
mode, err,
- ice_aq_str(hw->adminq.sq_last_status));
+ libie_aq_str(hw->adminq.sq_last_status));
/* revert hw->evb_veb */
hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
return err;
@@ -8246,16 +8221,18 @@ void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
if (tx_ring) {
struct ice_hw *hw = &pf->hw;
- u32 head, val = 0;
+ u32 head, intr = 0;
head = FIELD_GET(QTX_COMM_HEAD_HEAD_M,
rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])));
/* Read interrupt register */
- val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
+ intr = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
vsi->vsi_num, txqueue, tx_ring->next_to_clean,
- head, tx_ring->next_to_use, val);
+ head, tx_ring->next_to_use, intr);
+
+ ice_prep_tx_hang_report(pf, tx_ring, vsi->vsi_num, head, intr);
}
pf->tx_timeout_last_recovery = jiffies;
@@ -8289,11 +8266,16 @@ void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
* @np: net device to configure
* @filter_dev: device on which filter is added
* @cls_flower: offload data
+ * @ingress: if the rule is added to an ingress block
+ *
+ * Return: 0 if the flower was successfully added or deleted,
+ * negative error code otherwise.
*/
static int
ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
struct net_device *filter_dev,
- struct flow_cls_offload *cls_flower)
+ struct flow_cls_offload *cls_flower,
+ bool ingress)
{
struct ice_vsi *vsi = np->vsi;
@@ -8302,7 +8284,7 @@ ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
switch (cls_flower->command) {
case FLOW_CLS_REPLACE:
- return ice_add_cls_flower(filter_dev, vsi, cls_flower);
+ return ice_add_cls_flower(filter_dev, vsi, cls_flower, ingress);
case FLOW_CLS_DESTROY:
return ice_del_cls_flower(vsi, cls_flower);
default:
@@ -8311,20 +8293,46 @@ ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
}
/**
- * ice_setup_tc_block_cb - callback handler registered for TC block
+ * ice_setup_tc_block_cb_ingress - callback handler for ingress TC block
* @type: TC SETUP type
* @type_data: TC flower offload data that contains user input
* @cb_priv: netdev private data
+ *
+ * Return: 0 if the setup was successful, negative error code otherwise.
*/
static int
-ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+ice_setup_tc_block_cb_ingress(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
{
struct ice_netdev_priv *np = cb_priv;
switch (type) {
case TC_SETUP_CLSFLOWER:
return ice_setup_tc_cls_flower(np, np->vsi->netdev,
- type_data);
+ type_data, true);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * ice_setup_tc_block_cb_egress - callback handler for egress TC block
+ * @type: TC SETUP type
+ * @type_data: TC flower offload data that contains user input
+ * @cb_priv: netdev private data
+ *
+ * Return: 0 if the setup was successful, negative error code otherwise.
+ */
+static int
+ice_setup_tc_block_cb_egress(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct ice_netdev_priv *np = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return ice_setup_tc_cls_flower(np, np->vsi->netdev,
+ type_data, false);
default:
return -EOPNOTSUPP;
}
@@ -9077,7 +9085,7 @@ static int ice_create_q_channels(struct ice_vsi *vsi)
list_add_tail(&ch->list, &vsi->ch_list);
vsi->tc_map_vsi[i] = ch->ch_vsi;
dev_dbg(ice_pf_to_dev(pf),
- "successfully created channel: VSI %pK\n", ch->ch_vsi);
+ "successfully created channel: VSI %p\n", ch->ch_vsi);
}
return 0;
@@ -9262,6 +9270,96 @@ exit:
return ret;
}
+/**
+ * ice_cfg_txtime - configure Tx Time for the Tx ring
+ * @tx_ring: pointer to the Tx ring structure
+ *
+ * Return: 0 on success, negative value on failure.
+ */
+static int ice_cfg_txtime(struct ice_tx_ring *tx_ring)
+{
+ int err, timeout = 50;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ struct ice_pf *pf;
+ u32 queue;
+
+ if (!tx_ring)
+ return -EINVAL;
+
+ vsi = tx_ring->vsi;
+ pf = vsi->back;
+ while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ usleep_range(1000, 2000);
+ }
+
+ queue = tx_ring->q_index;
+ dev = ice_pf_to_dev(pf);
+
+ /* Ignore return value, and always attempt to enable queue. */
+ ice_qp_dis(vsi, queue);
+
+ err = ice_qp_ena(vsi, queue);
+ if (err)
+ dev_err(dev, "Failed to enable Tx queue %d for TxTime configuration\n",
+ queue);
+
+ clear_bit(ICE_CFG_BUSY, pf->state);
+ return err;
+}
+
+/**
+ * ice_offload_txtime - set earliest TxTime first
+ * @netdev: network interface device structure
+ * @qopt_off: etf queue option offload from the skb to set
+ *
+ * Return: 0 on success, negative value on failure.
+ */
+static int ice_offload_txtime(struct net_device *netdev,
+ void *qopt_off)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+ struct tc_etf_qopt_offload *qopt;
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_tx_ring *tx_ring;
+ int ret = 0;
+
+ if (!ice_is_feature_supported(pf, ICE_F_TXTIME))
+ return -EOPNOTSUPP;
+
+ qopt = qopt_off;
+ if (!qopt_off || qopt->queue < 0 || qopt->queue >= vsi->num_txq)
+ return -EINVAL;
+
+ if (qopt->enable)
+ set_bit(qopt->queue, pf->txtime_txqs);
+ else
+ clear_bit(qopt->queue, pf->txtime_txqs);
+
+ if (netif_running(vsi->netdev)) {
+ tx_ring = vsi->tx_rings[qopt->queue];
+ ret = ice_cfg_txtime(tx_ring);
+ if (ret)
+ goto err;
+ }
+
+ netdev_info(netdev, "%s TxTime on queue: %i\n",
+ str_enable_disable(qopt->enable), qopt->queue);
+ return 0;
+
+err:
+ netdev_err(netdev, "Failed to %s TxTime on queue: %i\n",
+ str_enable_disable(qopt->enable), qopt->queue);
+
+ if (qopt->enable)
+ clear_bit(qopt->queue, pf->txtime_txqs);
+ return ret;
+}
+
static LIST_HEAD(ice_block_cb_list);
static int
@@ -9269,27 +9367,45 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ enum flow_block_binder_type binder_type;
+ struct iidc_rdma_core_dev_info *cdev;
struct ice_pf *pf = np->vsi->back;
+ flow_setup_cb_t *flower_handler;
bool locked = false;
int err;
switch (type) {
case TC_SETUP_BLOCK:
+ binder_type =
+ ((struct flow_block_offload *)type_data)->binder_type;
+
+ switch (binder_type) {
+ case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
+ flower_handler = ice_setup_tc_block_cb_ingress;
+ break;
+ case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
+ flower_handler = ice_setup_tc_block_cb_egress;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
return flow_block_cb_setup_simple(type_data,
&ice_block_cb_list,
- ice_setup_tc_block_cb,
- np, np, true);
+ flower_handler,
+ np, np, false);
case TC_SETUP_QDISC_MQPRIO:
if (ice_is_eswitch_mode_switchdev(pf)) {
netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n");
return -EOPNOTSUPP;
}
- if (pf->adev) {
+ cdev = pf->cdev_info;
+ if (cdev && cdev->adev) {
mutex_lock(&pf->adev_mutex);
- device_lock(&pf->adev->dev);
+ device_lock(&cdev->adev->dev);
locked = true;
- if (pf->adev->dev.driver) {
+ if (cdev->adev->dev.driver) {
netdev_err(netdev, "Cannot change qdisc when RDMA is active\n");
err = -EBUSY;
goto adev_unlock;
@@ -9303,10 +9419,12 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
adev_unlock:
if (locked) {
- device_unlock(&pf->adev->dev);
+ device_unlock(&cdev->adev->dev);
mutex_unlock(&pf->adev_mutex);
}
return err;
+ case TC_SETUP_QDISC_ETF:
+ return ice_offload_txtime(netdev, type_data);
default:
return -EOPNOTSUPP;
}
@@ -9339,7 +9457,7 @@ ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
case TC_SETUP_CLSFLOWER:
return ice_setup_tc_cls_flower(np, priv->netdev,
(struct flow_cls_offload *)
- type_data);
+ type_data, false);
default:
return -EOPNOTSUPP;
}
@@ -9442,8 +9560,7 @@ ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
*/
int ice_open(struct net_device *netdev)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
if (ice_is_reset_in_progress(pf->state)) {
netdev_err(netdev, "can't open net device while reset is in progress");
@@ -9646,7 +9763,6 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_change_mtu = ice_change_mtu,
.ndo_get_stats64 = ice_get_stats64,
.ndo_set_tx_maxrate = ice_set_tx_maxrate,
- .ndo_eth_ioctl = ice_eth_ioctl,
.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
.ndo_set_vf_mac = ice_set_vf_mac,
.ndo_get_vf_config = ice_get_vf_cfg,
@@ -9670,4 +9786,6 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_bpf = ice_xdp,
.ndo_xdp_xmit = ice_xdp_xmit,
.ndo_xsk_wakeup = ice_xsk_wakeup,
+ .ndo_hwtstamp_get = ice_ptp_hwtstamp_get,
+ .ndo_hwtstamp_set = ice_ptp_hwtstamp_set,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 59e8879ac059..7e187a804dfa 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -22,10 +22,10 @@ int ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command,
bool read_shadow_ram, struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
struct ice_aqc_nvm *cmd;
- cmd = &desc.params.nvm;
+ cmd = libie_aq_raw(&desc);
if (offset > ICE_AQC_NVM_MAX_OFFSET)
return -EINVAL;
@@ -125,10 +125,10 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command, u8 command_flags,
struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
struct ice_aqc_nvm *cmd;
- cmd = &desc.params.nvm;
+ cmd = libie_aq_raw(&desc);
/* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000)
@@ -146,7 +146,7 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
cmd->offset_high = (offset >> 16) & 0xFF;
cmd->length = cpu_to_le16(length);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
return ice_aq_send_cmd(hw, &desc, data, length, cd);
}
@@ -161,10 +161,10 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
*/
int ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
struct ice_aqc_nvm *cmd;
- cmd = &desc.params.nvm;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_erase);
@@ -869,7 +869,7 @@ static int ice_discover_flash_size(struct ice_hw *hw)
status = ice_read_flat_nvm(hw, offset, &len, &data, false);
if (status == -EIO &&
- hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) {
+ hw->adminq.sq_last_status == LIBIE_AQ_RC_EINVAL) {
ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n",
__func__, offset);
status = 0;
@@ -1182,14 +1182,14 @@ int ice_init_nvm(struct ice_hw *hw)
int ice_nvm_validate_checksum(struct ice_hw *hw)
{
struct ice_aqc_nvm_checksum *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status)
return status;
- cmd = &desc.params.nvm_checksum;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_checksum);
cmd->flags = ICE_AQC_NVM_CHECKSUM_VERIFY;
@@ -1226,11 +1226,11 @@ int ice_nvm_validate_checksum(struct ice_hw *hw)
*/
int ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags)
{
+ struct libie_aq_desc desc;
struct ice_aqc_nvm *cmd;
- struct ice_aq_desc desc;
int err;
- cmd = &desc.params.nvm;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate);
cmd->cmd_flags = (u8)(cmd_flags & 0xFF);
@@ -1252,7 +1252,7 @@ int ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags)
*/
int ice_aq_nvm_update_empr(struct ice_hw *hw)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_update_empr);
@@ -1278,15 +1278,15 @@ ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
u16 length, struct ice_sq_cd *cd)
{
struct ice_aqc_nvm_pkg_data *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
if (length != 0 && !data)
return -EINVAL;
- cmd = &desc.params.pkg_data;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_pkg_data);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
if (del_pkg_data_flag)
cmd->cmd_flags |= ICE_AQC_NVM_PKG_DELETE;
@@ -1316,17 +1316,17 @@ ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length,
u8 *comp_response_code, struct ice_sq_cd *cd)
{
struct ice_aqc_nvm_pass_comp_tbl *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
if (!data || !comp_response || !comp_response_code)
return -EINVAL;
- cmd = &desc.params.pass_comp_tbl;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc,
ice_aqc_opc_nvm_pass_component_tbl);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->transfer_flag = transfer_flag;
status = ice_aq_send_cmd(hw, &desc, data, length, cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_parser.h b/drivers/net/ethernet/intel/ice/ice_parser.h
index 6509d807627c..4f56d53d56b9 100644
--- a/drivers/net/ethernet/intel/ice/ice_parser.h
+++ b/drivers/net/ethernet/intel/ice/ice_parser.h
@@ -257,7 +257,6 @@ ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size,
/*** ICE_SID_RXPARSER_BOOST_TCAM and ICE_SID_LBL_RXPARSER_TMEM sections ***/
#define ICE_BST_TCAM_TABLE_SIZE 256
#define ICE_BST_TCAM_KEY_SIZE 20
-#define ICE_BST_KEY_TCAM_SIZE 19
/* Boost TCAM item */
struct ice_bst_tcam_item {
@@ -401,7 +400,6 @@ u16 ice_xlt_kb_flag_get(struct ice_xlt_kb *kb, u64 pkt_flag);
#define ICE_PARSER_GPR_NUM 128
#define ICE_PARSER_FLG_NUM 64
#define ICE_PARSER_ERR_NUM 16
-#define ICE_BST_KEY_SIZE 10
#define ICE_MARKER_ID_SIZE 9
#define ICE_MARKER_MAX_SIZE \
(ICE_MARKER_ID_SIZE * BITS_PER_BYTE - 1)
@@ -431,13 +429,13 @@ struct ice_parser_rt {
u8 pkt_buf[ICE_PARSER_MAX_PKT_LEN + ICE_PARSER_PKT_REV];
u16 pkt_len;
u16 po;
- u8 bst_key[ICE_BST_KEY_SIZE];
+ u8 bst_key[ICE_BST_TCAM_KEY_SIZE];
struct ice_pg_cam_key pg_key;
+ u8 pg_prio;
struct ice_alu *alu0;
struct ice_alu *alu1;
struct ice_alu *alu2;
struct ice_pg_cam_action *action;
- u8 pg_prio;
struct ice_gpr_pu pu;
u8 markers[ICE_MARKER_ID_SIZE];
bool protocols[ICE_PO_PAIR_SIZE];
diff --git a/drivers/net/ethernet/intel/ice/ice_parser_rt.c b/drivers/net/ethernet/intel/ice/ice_parser_rt.c
index dedf5e854e4b..3995d662e050 100644
--- a/drivers/net/ethernet/intel/ice/ice_parser_rt.c
+++ b/drivers/net/ethernet/intel/ice/ice_parser_rt.c
@@ -125,22 +125,20 @@ static void ice_bst_key_init(struct ice_parser_rt *rt,
else
key[idd] = imem->b_kb.prio;
- idd = ICE_BST_KEY_TCAM_SIZE - 1;
+ idd = ICE_BST_TCAM_KEY_SIZE - 2;
for (i = idd; i >= 0; i--) {
int j;
j = ho + idd - i;
if (j < ICE_PARSER_MAX_PKT_LEN)
- key[i] = rt->pkt_buf[ho + idd - i];
+ key[i] = rt->pkt_buf[j];
else
key[i] = 0;
}
- ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generated Boost TCAM Key:\n");
- ice_debug(rt->psr->hw, ICE_DBG_PARSER, "%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
- key[0], key[1], key[2], key[3], key[4],
- key[5], key[6], key[7], key[8], key[9]);
- ice_debug(rt->psr->hw, ICE_DBG_PARSER, "\n");
+ ice_debug_array_w_prefix(rt->psr->hw, ICE_DBG_PARSER,
+ KBUILD_MODNAME ": Generated Boost TCAM Key",
+ key, ICE_BST_TCAM_KEY_SIZE);
}
static u16 ice_bit_rev_u16(u16 v, int len)
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index 7c09ea0f03ba..725167d557a8 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -82,26 +82,46 @@ enum ice_sw_tunnel_type {
enum ice_prot_id {
ICE_PROT_ID_INVAL = 0,
ICE_PROT_MAC_OF_OR_S = 1,
+ ICE_PROT_MAC_O2 = 2,
ICE_PROT_MAC_IL = 4,
+ ICE_PROT_MAC_IN_MAC = 7,
ICE_PROT_ETYPE_OL = 9,
ICE_PROT_ETYPE_IL = 10,
+ ICE_PROT_PAY = 15,
+ ICE_PROT_EVLAN_O = 16,
+ ICE_PROT_VLAN_O = 17,
+ ICE_PROT_VLAN_IF = 18,
+ ICE_PROT_MPLS_OL_MINUS_1 = 27,
+ ICE_PROT_MPLS_OL_OR_OS = 28,
+ ICE_PROT_MPLS_IL = 29,
ICE_PROT_IPV4_OF_OR_S = 32,
ICE_PROT_IPV4_IL = 33,
+ ICE_PROT_IPV4_IL_IL = 34,
ICE_PROT_IPV6_OF_OR_S = 40,
ICE_PROT_IPV6_IL = 41,
+ ICE_PROT_IPV6_IL_IL = 42,
+ ICE_PROT_IPV6_NEXT_PROTO = 43,
+ ICE_PROT_IPV6_FRAG = 47,
ICE_PROT_TCP_IL = 49,
ICE_PROT_UDP_OF = 52,
ICE_PROT_UDP_IL_OR_S = 53,
ICE_PROT_GRE_OF = 64,
+ ICE_PROT_NSH_F = 84,
ICE_PROT_ESP_F = 88,
ICE_PROT_ESP_2 = 89,
ICE_PROT_SCTP_IL = 96,
ICE_PROT_ICMP_IL = 98,
ICE_PROT_ICMPV6_IL = 100,
+ ICE_PROT_VRRP_F = 101,
+ ICE_PROT_OSPF = 102,
ICE_PROT_PPPOE = 103,
ICE_PROT_L2TPV3 = 104,
+ ICE_PROT_ATAOE_OF = 114,
+ ICE_PROT_CTRL_OF = 116,
+ ICE_PROT_LLDP_OF = 117,
ICE_PROT_ARP_OF = 118,
ICE_PROT_META_ID = 255, /* when offset == metadata */
+ ICE_PROT_EAPOL_OF = 120,
ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
};
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index a999fface272..4c8d20f2d2c0 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -4,7 +4,6 @@
#include "ice.h"
#include "ice_lib.h"
#include "ice_trace.h"
-#include "ice_cgu_regs.h"
static const char ice_pin_names[][64] = {
"SDP0",
@@ -16,45 +15,43 @@ static const char ice_pin_names[][64] = {
};
static const struct ice_ptp_pin_desc ice_pin_desc_e82x[] = {
- /* name, gpio */
- { TIME_SYNC, { 4, -1 }},
- { ONE_PPS, { -1, 5 }},
+ /* name, gpio, delay */
+ { TIME_SYNC, { 4, -1 }, { 0, 0 }},
+ { ONE_PPS, { -1, 5 }, { 0, 11 }},
};
static const struct ice_ptp_pin_desc ice_pin_desc_e825c[] = {
- /* name, gpio */
- { SDP0, { 0, 0 }},
- { SDP1, { 1, 1 }},
- { SDP2, { 2, 2 }},
- { SDP3, { 3, 3 }},
- { TIME_SYNC, { 4, -1 }},
- { ONE_PPS, { -1, 5 }},
+ /* name, gpio, delay */
+ { SDP0, { 0, 0 }, { 15, 14 }},
+ { SDP1, { 1, 1 }, { 15, 14 }},
+ { SDP2, { 2, 2 }, { 15, 14 }},
+ { SDP3, { 3, 3 }, { 15, 14 }},
+ { TIME_SYNC, { 4, -1 }, { 11, 0 }},
+ { ONE_PPS, { -1, 5 }, { 0, 9 }},
};
static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = {
- /* name, gpio */
- { SDP0, { 0, 0 }},
- { SDP1, { 1, 1 }},
- { SDP2, { 2, 2 }},
- { SDP3, { 3, 3 }},
- { ONE_PPS, { -1, 5 }},
+ /* name, gpio, delay */
+ { SDP0, { 0, 0 }, { 0, 1 }},
+ { SDP1, { 1, 1 }, { 0, 1 }},
+ { SDP2, { 2, 2 }, { 0, 1 }},
+ { SDP3, { 3, 3 }, { 0, 1 }},
+ { ONE_PPS, { -1, 5 }, { 0, 1 }},
};
-static const char ice_pin_names_nvm[][64] = {
- "GNSS",
- "SMA1",
- "U.FL1",
- "SMA2",
- "U.FL2",
+static const char ice_pin_names_dpll[][64] = {
+ "SDP20",
+ "SDP21",
+ "SDP22",
+ "SDP23",
};
-static const struct ice_ptp_pin_desc ice_pin_desc_e810_sma[] = {
- /* name, gpio */
- { GNSS, { 1, -1 }},
- { SMA1, { 1, 0 }},
- { UFL1, { -1, 0 }},
- { SMA2, { 3, 2 }},
- { UFL2, { 3, -1 }},
+static const struct ice_ptp_pin_desc ice_pin_desc_dpll[] = {
+ /* name, gpio, delay */
+ { SDP0, { -1, 0 }, { 0, 1 }},
+ { SDP1, { 1, -1 }, { 0, 0 }},
+ { SDP2, { -1, 2 }, { 0, 1 }},
+ { SDP3, { 3, -1 }, { 0, 0 }},
};
static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf)
@@ -93,101 +90,6 @@ static int ice_ptp_find_pin_idx(struct ice_pf *pf, enum ptp_pin_function func,
}
/**
- * ice_ptp_update_sma_data - update SMA pins data according to pins setup
- * @pf: Board private structure
- * @sma_pins: parsed SMA pins status
- * @data: SMA data to update
- */
-static void ice_ptp_update_sma_data(struct ice_pf *pf, unsigned int sma_pins[],
- u8 *data)
-{
- const char *state1, *state2;
-
- /* Set the right state based on the desired configuration.
- * When bit is set, functionality is disabled.
- */
- *data &= ~ICE_ALL_SMA_MASK;
- if (!sma_pins[UFL1 - 1]) {
- if (sma_pins[SMA1 - 1] == PTP_PF_EXTTS) {
- state1 = "SMA1 Rx, U.FL1 disabled";
- *data |= ICE_SMA1_TX_EN;
- } else if (sma_pins[SMA1 - 1] == PTP_PF_PEROUT) {
- state1 = "SMA1 Tx U.FL1 disabled";
- *data |= ICE_SMA1_DIR_EN;
- } else {
- state1 = "SMA1 disabled, U.FL1 disabled";
- *data |= ICE_SMA1_MASK;
- }
- } else {
- /* U.FL1 Tx will always enable SMA1 Rx */
- state1 = "SMA1 Rx, U.FL1 Tx";
- }
-
- if (!sma_pins[UFL2 - 1]) {
- if (sma_pins[SMA2 - 1] == PTP_PF_EXTTS) {
- state2 = "SMA2 Rx, U.FL2 disabled";
- *data |= ICE_SMA2_TX_EN | ICE_SMA2_UFL2_RX_DIS;
- } else if (sma_pins[SMA2 - 1] == PTP_PF_PEROUT) {
- state2 = "SMA2 Tx, U.FL2 disabled";
- *data |= ICE_SMA2_DIR_EN | ICE_SMA2_UFL2_RX_DIS;
- } else {
- state2 = "SMA2 disabled, U.FL2 disabled";
- *data |= ICE_SMA2_MASK;
- }
- } else {
- if (!sma_pins[SMA2 - 1]) {
- state2 = "SMA2 disabled, U.FL2 Rx";
- *data |= ICE_SMA2_DIR_EN | ICE_SMA2_TX_EN;
- } else {
- state2 = "SMA2 Tx, U.FL2 Rx";
- *data |= ICE_SMA2_DIR_EN;
- }
- }
-
- dev_dbg(ice_pf_to_dev(pf), "%s, %s\n", state1, state2);
-}
-
-/**
- * ice_ptp_set_sma_cfg - set the configuration of the SMA control logic
- * @pf: Board private structure
- *
- * Return: 0 on success, negative error code otherwise
- */
-static int ice_ptp_set_sma_cfg(struct ice_pf *pf)
-{
- const struct ice_ptp_pin_desc *ice_pins = pf->ptp.ice_pin_desc;
- struct ptp_pin_desc *pins = pf->ptp.pin_desc;
- unsigned int sma_pins[ICE_SMA_PINS_NUM] = {};
- int err;
- u8 data;
-
- /* Read initial pin state value */
- err = ice_read_sma_ctrl(&pf->hw, &data);
- if (err)
- return err;
-
- /* Get SMA/U.FL pins states */
- for (int i = 0; i < pf->ptp.info.n_pins; i++)
- if (pins[i].func) {
- int name_idx = ice_pins[i].name_idx;
-
- switch (name_idx) {
- case SMA1:
- case UFL1:
- case SMA2:
- case UFL2:
- sma_pins[name_idx - 1] = pins[i].func;
- break;
- default:
- continue;
- }
- }
-
- ice_ptp_update_sma_data(pf, sma_pins, &data);
- return ice_write_sma_ctrl(&pf->hw, data);
-}
-
-/**
* ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device
* @pf: Board private structure
*
@@ -298,18 +200,30 @@ void ice_ptp_restore_timestamp_mode(struct ice_pf *pf)
* @sts: Optional parameter for holding a pair of system timestamps from
* the system clock. Will be ignored if NULL is given.
*/
-static u64
-ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
+u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
+ struct ptp_system_timestamp *sts)
{
struct ice_hw *hw = &pf->hw;
u32 hi, lo, lo2;
u8 tmr_idx;
+ if (!ice_is_primary(hw))
+ hw = ice_get_primary_hw(pf);
+
tmr_idx = ice_get_ptp_src_clock_index(hw);
guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
/* Read the system timestamp pre PHC read */
ptp_read_system_prets(sts);
+ if (hw->mac_type == ICE_MAC_E830) {
+ u64 clk_time = rd64(hw, E830_GLTSYN_TIME_L(tmr_idx));
+
+ /* Read the system timestamp post PHC read */
+ ptp_read_system_postts(sts);
+
+ return clk_time;
+ }
+
lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
/* Read the system timestamp post PHC read */
@@ -464,7 +378,9 @@ ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
*/
void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
{
+ struct ice_e810_params *params;
struct ice_ptp_port *ptp_port;
+ unsigned long flags;
struct sk_buff *skb;
struct ice_pf *pf;
@@ -473,6 +389,7 @@ void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
ptp_port = container_of(tx, struct ice_ptp_port, tx);
pf = ptp_port_to_pf(ptp_port);
+ params = &pf->hw.ptp.phy.e810;
/* Drop packets which have waited for more than 2 seconds */
if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
@@ -489,11 +406,17 @@ void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
+ spin_lock_irqsave(&params->atqbal_wq.lock, flags);
+
+ params->atqbal_flags |= ATQBAL_FLAGS_INTR_IN_PROGRESS;
+
/* Write TS index to read to the PF register so the FW can read it */
- wr32(&pf->hw, PF_SB_ATQBAL,
- TS_LL_READ_TS_INTR | FIELD_PREP(TS_LL_READ_TS_IDX, idx) |
- TS_LL_READ_TS);
+ wr32(&pf->hw, REG_LL_PROXY_H,
+ REG_LL_PROXY_H_TS_INTR_ENA | FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) |
+ REG_LL_PROXY_H_EXEC);
tx->last_ll_ts_idx_read = idx;
+
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
}
/**
@@ -504,35 +427,52 @@ void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
{
struct skb_shared_hwtstamps shhwtstamps = {};
u8 idx = tx->last_ll_ts_idx_read;
+ struct ice_e810_params *params;
struct ice_ptp_port *ptp_port;
u64 raw_tstamp, tstamp;
bool drop_ts = false;
struct sk_buff *skb;
+ unsigned long flags;
+ struct device *dev;
struct ice_pf *pf;
- u32 val;
+ u32 reg_ll_high;
if (!tx->init || tx->last_ll_ts_idx_read < 0)
return;
ptp_port = container_of(tx, struct ice_ptp_port, tx);
pf = ptp_port_to_pf(ptp_port);
+ dev = ice_pf_to_dev(pf);
+ params = &pf->hw.ptp.phy.e810;
ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
- val = rd32(&pf->hw, PF_SB_ATQBAL);
+ spin_lock_irqsave(&params->atqbal_wq.lock, flags);
+
+ if (!(params->atqbal_flags & ATQBAL_FLAGS_INTR_IN_PROGRESS))
+ dev_dbg(dev, "%s: low latency interrupt request not in progress?\n",
+ __func__);
+
+ /* Read the low 32 bit value */
+ raw_tstamp = rd32(&pf->hw, REG_LL_PROXY_L);
+ /* Read the status together with high TS part */
+ reg_ll_high = rd32(&pf->hw, REG_LL_PROXY_H);
+
+ /* Wake up threads waiting on low latency interface */
+ params->atqbal_flags &= ~ATQBAL_FLAGS_INTR_IN_PROGRESS;
+
+ wake_up_locked(&params->atqbal_wq);
+
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
/* When the bit is cleared, the TS is ready in the register */
- if (val & TS_LL_READ_TS) {
+ if (reg_ll_high & REG_LL_PROXY_H_EXEC) {
dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready");
return;
}
/* High 8 bit value of the TS is on the bits 16:23 */
- raw_tstamp = FIELD_GET(TS_LL_READ_TS_HIGH, val);
- raw_tstamp <<= 32;
-
- /* Read the low 32 bit value */
- raw_tstamp |= (u64)rd32(&pf->hw, PF_SB_ATQBAH);
+ raw_tstamp |= ((u64)FIELD_GET(REG_LL_PROXY_H_TS_HIGH, reg_ll_high)) << 32;
/* Devices using this interface always verify the timestamp differs
* relative to the last cached timestamp value.
@@ -560,6 +500,9 @@ void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
if (tstamp) {
shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
ice_trace(tx_tstamp_complete, skb, idx);
+
+ /* Count the number of Tx timestamps that succeeded */
+ pf->ptp.tx_hwtstamp_good++;
}
skb_tstamp_tx(skb, &shhwtstamps);
@@ -618,6 +561,7 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
{
struct ice_ptp_port *ptp_port;
unsigned long flags;
+ u32 tstamp_good = 0;
struct ice_pf *pf;
struct ice_hw *hw;
u64 tstamp_ready;
@@ -718,11 +662,16 @@ skip_ts_read:
if (tstamp) {
shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
ice_trace(tx_tstamp_complete, skb, idx);
+
+ /* Count the number of Tx timestamps that succeeded */
+ tstamp_good++;
}
skb_tstamp_tx(skb, &shhwtstamps);
dev_kfree_skb_any(skb);
}
+
+ pf->ptp.tx_hwtstamp_good += tstamp_good;
}
/**
@@ -946,28 +895,6 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
}
/**
- * ice_ptp_init_tx_eth56g - Initialize tracking for Tx timestamps
- * @pf: Board private structure
- * @tx: the Tx tracking structure to initialize
- * @port: the port this structure tracks
- *
- * Initialize the Tx timestamp tracker for this port. ETH56G PHYs
- * have independent memory blocks for all ports.
- *
- * Return: 0 for success, -ENOMEM when failed to allocate Tx tracker
- */
-static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx,
- u8 port)
-{
- tx->block = port;
- tx->offset = 0;
- tx->len = INDEX_PER_PORT_ETH56G;
- tx->has_ready_bitmap = 1;
-
- return ice_ptp_alloc_tx_tracker(tx);
-}
-
-/**
* ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps
* @pf: Board private structure
* @tx: the Tx tracking structure to initialize
@@ -977,9 +904,11 @@ static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx,
* the timestamp block is shared for all ports in the same quad. To avoid
* ports using the same timestamp index, logically break the block of
* registers into chunks based on the port number.
+ *
+ * Return: 0 on success, -ENOMEM when out of memory
*/
-static int
-ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
+static int ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx,
+ u8 port)
{
tx->block = ICE_GET_QUAD_NUM(port);
tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
@@ -990,24 +919,27 @@ ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
}
/**
- * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
+ * ice_ptp_init_tx - Initialize tracking for Tx timestamps
* @pf: Board private structure
* @tx: the Tx tracking structure to initialize
+ * @port: the port this structure tracks
*
- * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
- * port has its own block of timestamps, independent of the other ports.
+ * Initialize the Tx timestamp tracker for this PF. For all PHYs except E82X,
+ * each port has its own block of timestamps, independent of the other ports.
+ *
+ * Return: 0 on success, -ENOMEM when out of memory
*/
-static int
-ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
+static int ice_ptp_init_tx(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
{
- tx->block = pf->hw.port_info->lport;
+ tx->block = port;
tx->offset = 0;
- tx->len = INDEX_PER_PORT_E810;
+ tx->len = INDEX_PER_PORT;
+
/* The E810 PHY does not provide a timestamp ready bitmap. Instead,
* verify new timestamps against cached copy of the last read
* timestamp.
*/
- tx->has_ready_bitmap = 0;
+ tx->has_ready_bitmap = pf->hw.mac_type != ICE_MAC_E810;
return ice_ptp_alloc_tx_tracker(tx);
}
@@ -1292,20 +1224,21 @@ ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
struct ice_hw *hw = &pf->hw;
int err;
- if (ice_is_e810(hw))
- return 0;
-
mutex_lock(&ptp_port->ps_lock);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_stop_phy_timer_eth56g(hw, port, true);
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ err = 0;
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
err = ice_stop_phy_timer_e82x(hw, port, true);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_stop_phy_timer_eth56g(hw, port, true);
+ break;
default:
err = -ENODEV;
}
@@ -1335,19 +1268,17 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
unsigned long flags;
int err;
- if (ice_is_e810(hw))
- return 0;
-
if (!ptp_port->link_up)
return ice_ptp_port_phy_stop(ptp_port);
mutex_lock(&ptp_port->ps_lock);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_start_phy_timer_eth56g(hw, port);
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ err = 0;
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
/* Start the PHY timer in Vernier mode */
kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
@@ -1372,6 +1303,9 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work,
0);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_start_phy_timer_eth56g(hw, port);
+ break;
default:
err = -ENODEV;
}
@@ -1388,10 +1322,9 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
/**
* ice_ptp_link_change - Reconfigure PTP after link status change
* @pf: Board private structure
- * @port: Port for which the PHY start is set
* @linkup: Link is up or down
*/
-void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
+void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
{
struct ice_ptp_port *ptp_port;
struct ice_hw *hw = &pf->hw;
@@ -1399,14 +1332,7 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
if (pf->ptp.state != ICE_PTP_READY)
return;
- if (WARN_ON_ONCE(port >= hw->ptp.num_lports))
- return;
-
ptp_port = &pf->ptp.port;
- if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
- port *= 2;
- if (WARN_ON_ONCE(ptp_port->port_num != port))
- return;
/* Update cached link status for this port immediately */
ptp_port->link_up = linkup;
@@ -1414,12 +1340,14 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
/* Skip HW writes if reset is in progress */
if (pf->hw.reset_ongoing)
return;
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_E810:
- /* Do not reconfigure E810 PHY */
+
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ /* Do not reconfigure E810 or E830 PHY */
return;
- case ICE_PHY_ETH56G:
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
ice_ptp_port_phy_restart(ptp_port);
return;
default:
@@ -1447,46 +1375,45 @@ static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
ice_ptp_reset_ts_memory(hw);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G: {
- int port;
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ return 0;
+ case ICE_MAC_GENERIC: {
+ int quad;
- for (port = 0; port < hw->ptp.num_lports; port++) {
+ for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
+ quad++) {
int err;
- err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
+ err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
if (err) {
- dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
- port, err);
+ dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
+ quad, err);
return err;
}
}
return 0;
}
- case ICE_PHY_E82X: {
- int quad;
+ case ICE_MAC_GENERIC_3K_E825: {
+ int port;
- for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
- quad++) {
+ for (port = 0; port < hw->ptp.num_lports; port++) {
int err;
- err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
+ err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
if (err) {
- dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
- quad, err);
+ dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
+ port, err);
return err;
}
}
return 0;
}
- case ICE_PHY_E810:
- return 0;
- case ICE_PHY_UNSUP:
+ case ICE_MAC_UNKNOWN:
default:
- dev_warn(dev, "%s: Unexpected PHY model %d\n", __func__,
- ice_get_phy_model(hw));
return -EOPNOTSUPP;
}
}
@@ -1566,18 +1493,29 @@ void ice_ptp_extts_event(struct ice_pf *pf)
* Event is defined in GLTSYN_EVNT_0 register
*/
for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
+ int pin_desc_idx;
+
/* Check if channel is enabled */
- if (pf->ptp.ext_ts_irq & (1 << chan)) {
- lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
- hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
- event.timestamp = (((u64)hi) << 32) | lo;
- event.type = PTP_CLOCK_EXTTS;
- event.index = chan;
-
- /* Fire event */
- ptp_clock_event(pf->ptp.clock, &event);
- pf->ptp.ext_ts_irq &= ~(1 << chan);
+ if (!(pf->ptp.ext_ts_irq & (1 << chan)))
+ continue;
+
+ lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
+ hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
+ event.timestamp = (u64)hi << 32 | lo;
+
+ /* Add delay compensation */
+ pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan);
+ if (pin_desc_idx >= 0) {
+ const struct ice_ptp_pin_desc *desc;
+
+ desc = &pf->ptp.ice_pin_desc[pin_desc_idx];
+ event.timestamp -= desc->delay[0];
}
+
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = chan;
+ pf->ptp.ext_ts_irq &= ~(1 << chan);
+ ptp_clock_event(pf->ptp.clock, &event);
}
}
@@ -1600,14 +1538,6 @@ static int ice_ptp_cfg_extts(struct ice_pf *pf, struct ptp_extts_request *rq,
int pin_desc_idx;
u8 tmr_idx;
- /* Reject requests with unsupported flags */
-
- if (rq->flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
chan = rq->index;
@@ -1711,11 +1641,11 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
/* 0. Reset mode & out_en in AUX_OUT */
wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
- if (ice_is_e825c(hw)) {
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) {
int err;
/* Enable/disable CGU 1PPS output for E825C */
- err = ice_cgu_cfg_pps_out(hw, !!period);
+ err = ice_tspll_cfg_pps_out_e825c(hw, !!period);
if (err)
return err;
}
@@ -1754,6 +1684,7 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
8 + chan + (tmr_idx * 4));
wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
+ ice_flush(hw);
return 0;
}
@@ -1772,19 +1703,17 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
int on)
{
+ unsigned int gpio_pin, prop_delay_ns;
u64 clk, period, start, phase;
struct ice_hw *hw = &pf->hw;
- unsigned int gpio_pin;
int pin_desc_idx;
- if (rq->flags & ~PTP_PEROUT_PHASE)
- return -EOPNOTSUPP;
-
pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_PEROUT, rq->index);
if (pin_desc_idx < 0)
return -EIO;
gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[1];
+ prop_delay_ns = pf->ptp.ice_pin_desc[pin_desc_idx].delay[1];
period = rq->period.sec * NSEC_PER_SEC + rq->period.nsec;
/* If we're disabling the output or period is 0, clear out CLKO and TGT
@@ -1794,7 +1723,7 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
return ice_ptp_write_perout(hw, rq->index, gpio_pin, 0, 0);
if (strncmp(pf->ptp.pin_desc[pin_desc_idx].name, "1PPS", 64) == 0 &&
- period != NSEC_PER_SEC && hw->ptp.phy_model == ICE_PHY_E82X) {
+ period != NSEC_PER_SEC && hw->mac_type == ICE_MAC_GENERIC) {
dev_err(ice_pf_to_dev(pf), "1PPS pin supports only 1 s period\n");
return -EOPNOTSUPP;
}
@@ -1813,14 +1742,15 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
div64_u64_rem(start, period, &phase);
/* If we have only phase or start time is in the past, start the timer
- * at the next multiple of period, maintaining phase.
+ * at the next multiple of period, maintaining phase at least 0.5 second
+ * from now, so we have time to write it to HW.
*/
- clk = ice_ptp_read_src_clk_reg(pf, NULL);
- if (rq->flags & PTP_PEROUT_PHASE || start <= clk - ice_prop_delay(hw))
+ clk = ice_ptp_read_src_clk_reg(pf, NULL) + NSEC_PER_MSEC * 500;
+ if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns)
start = div64_u64(clk + period - 1, period) * period + phase;
/* Compensate for propagation delay from the generator to the pin. */
- start -= ice_prop_delay(hw);
+ start -= prop_delay_ns;
return ice_ptp_write_perout(hw, rq->index, gpio_pin, start, period);
}
@@ -1860,63 +1790,6 @@ static void ice_ptp_enable_all_perout(struct ice_pf *pf)
}
/**
- * ice_ptp_disable_shared_pin - Disable enabled pin that shares GPIO
- * @pf: Board private structure
- * @pin: Pin index
- * @func: Assigned function
- *
- * Return: 0 on success, negative error code otherwise
- */
-static int ice_ptp_disable_shared_pin(struct ice_pf *pf, unsigned int pin,
- enum ptp_pin_function func)
-{
- unsigned int gpio_pin;
-
- switch (func) {
- case PTP_PF_PEROUT:
- gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[1];
- break;
- case PTP_PF_EXTTS:
- gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[0];
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) {
- struct ptp_pin_desc *pin_desc = &pf->ptp.pin_desc[i];
- unsigned int chan = pin_desc->chan;
-
- /* Skip pin idx from the request */
- if (i == pin)
- continue;
-
- if (pin_desc->func == PTP_PF_PEROUT &&
- pf->ptp.ice_pin_desc[i].gpio[1] == gpio_pin) {
- pf->ptp.perout_rqs[chan].period.sec = 0;
- pf->ptp.perout_rqs[chan].period.nsec = 0;
- pin_desc->func = PTP_PF_NONE;
- pin_desc->chan = 0;
- dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared output GPIO pin %u\n",
- i, gpio_pin);
- return ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[chan],
- false);
- } else if (pf->ptp.pin_desc->func == PTP_PF_EXTTS &&
- pf->ptp.ice_pin_desc[i].gpio[0] == gpio_pin) {
- pf->ptp.extts_rqs[chan].flags &= ~PTP_ENABLE_FEATURE;
- pin_desc->func = PTP_PF_NONE;
- pin_desc->chan = 0;
- dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared input GPIO pin %u\n",
- i, gpio_pin);
- return ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[chan],
- false);
- }
- }
-
- return 0;
-}
-
-/**
* ice_verify_pin - verify if pin supports requested pin function
* @info: the driver's PTP info structure
* @pin: Pin index
@@ -1950,14 +1823,6 @@ static int ice_verify_pin(struct ptp_clock_info *info, unsigned int pin,
return -EOPNOTSUPP;
}
- /* On adapters with SMA_CTRL disable other pins that share same GPIO */
- if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
- ice_ptp_disable_shared_pin(pf, pin, func);
- pf->ptp.pin_desc[pin].func = func;
- pf->ptp.pin_desc[pin].chan = chan;
- return ice_ptp_set_sma_cfg(pf);
- }
-
return 0;
}
@@ -2048,7 +1913,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
/* For Vernier mode on E82X, we need to recalibrate after new settime.
* Start with marking timestamps as invalid.
*/
- if (ice_get_phy_model(hw) == ICE_PHY_E82X) {
+ if (hw->mac_type == ICE_MAC_GENERIC) {
err = ice_ptp_clear_phy_offset_ready_e82x(hw);
if (err)
dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n");
@@ -2072,7 +1937,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
ice_ptp_enable_all_perout(pf);
/* Recalibrate and re-enable timestamp blocks for E822/E823 */
- if (ice_get_phy_model(hw) == ICE_PHY_E82X)
+ if (hw->mac_type == ICE_MAC_GENERIC)
ice_ptp_restart_all_phy(pf);
exit:
if (err) {
@@ -2150,93 +2015,158 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
return 0;
}
+/**
+ * struct ice_crosststamp_cfg - Device cross timestamp configuration
+ * @lock_reg: The hardware semaphore lock to use
+ * @lock_busy: Bit in the semaphore lock indicating the lock is busy
+ * @ctl_reg: The hardware register to request cross timestamp
+ * @ctl_active: Bit in the control register to request cross timestamp
+ * @art_time_l: Lower 32-bits of ART system time
+ * @art_time_h: Upper 32-bits of ART system time
+ * @dev_time_l: Lower 32-bits of device time (per timer index)
+ * @dev_time_h: Upper 32-bits of device time (per timer index)
+ */
+struct ice_crosststamp_cfg {
+ /* HW semaphore lock register */
+ u32 lock_reg;
+ u32 lock_busy;
+
+ /* Capture control register */
+ u32 ctl_reg;
+ u32 ctl_active;
+
+ /* Time storage */
+ u32 art_time_l;
+ u32 art_time_h;
+ u32 dev_time_l[2];
+ u32 dev_time_h[2];
+};
+
+static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e82x = {
+ .lock_reg = PFHH_SEM,
+ .lock_busy = PFHH_SEM_BUSY_M,
+ .ctl_reg = GLHH_ART_CTL,
+ .ctl_active = GLHH_ART_CTL_ACTIVE_M,
+ .art_time_l = GLHH_ART_TIME_L,
+ .art_time_h = GLHH_ART_TIME_H,
+ .dev_time_l[0] = GLTSYN_HHTIME_L(0),
+ .dev_time_h[0] = GLTSYN_HHTIME_H(0),
+ .dev_time_l[1] = GLTSYN_HHTIME_L(1),
+ .dev_time_h[1] = GLTSYN_HHTIME_H(1),
+};
+
#ifdef CONFIG_ICE_HWTS
+static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e830 = {
+ .lock_reg = E830_PFPTM_SEM,
+ .lock_busy = E830_PFPTM_SEM_BUSY_M,
+ .ctl_reg = E830_GLPTM_ART_CTL,
+ .ctl_active = E830_GLPTM_ART_CTL_ACTIVE_M,
+ .art_time_l = E830_GLPTM_ART_TIME_L,
+ .art_time_h = E830_GLPTM_ART_TIME_H,
+ .dev_time_l[0] = E830_GLTSYN_PTMTIME_L(0),
+ .dev_time_h[0] = E830_GLTSYN_PTMTIME_H(0),
+ .dev_time_l[1] = E830_GLTSYN_PTMTIME_L(1),
+ .dev_time_h[1] = E830_GLTSYN_PTMTIME_H(1),
+};
+
+#endif /* CONFIG_ICE_HWTS */
+/**
+ * struct ice_crosststamp_ctx - Device cross timestamp context
+ * @snapshot: snapshot of system clocks for historic interpolation
+ * @pf: pointer to the PF private structure
+ * @cfg: pointer to hardware configuration for cross timestamp
+ */
+struct ice_crosststamp_ctx {
+ struct system_time_snapshot snapshot;
+ struct ice_pf *pf;
+ const struct ice_crosststamp_cfg *cfg;
+};
+
/**
- * ice_ptp_get_syncdevicetime - Get the cross time stamp info
+ * ice_capture_crosststamp - Capture a device/system cross timestamp
* @device: Current device time
* @system: System counter value read synchronously with device time
- * @ctx: Context provided by timekeeping code
+ * @__ctx: Context passed from ice_ptp_getcrosststamp
*
* Read device and system (ART) clock simultaneously and return the corrected
* clock values in ns.
+ *
+ * Return: zero on success, or a negative error code on failure.
*/
-static int
-ice_ptp_get_syncdevicetime(ktime_t *device,
- struct system_counterval_t *system,
- void *ctx)
+static int ice_capture_crosststamp(ktime_t *device,
+ struct system_counterval_t *system,
+ void *__ctx)
{
- struct ice_pf *pf = (struct ice_pf *)ctx;
- struct ice_hw *hw = &pf->hw;
- u32 hh_lock, hh_art_ctl;
- int i;
+ struct ice_crosststamp_ctx *ctx = __ctx;
+ const struct ice_crosststamp_cfg *cfg;
+ u32 lock, ctl, ts_lo, ts_hi, tmr_idx;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int err;
+ u64 ts;
-#define MAX_HH_HW_LOCK_TRIES 5
-#define MAX_HH_CTL_LOCK_TRIES 100
+ cfg = ctx->cfg;
+ pf = ctx->pf;
+ hw = &pf->hw;
- for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) {
- /* Get the HW lock */
- hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
- if (hh_lock & PFHH_SEM_BUSY_M) {
- usleep_range(10000, 15000);
- continue;
- }
- break;
- }
- if (hh_lock & PFHH_SEM_BUSY_M) {
- dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n");
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
+ if (tmr_idx > 1)
+ return -EINVAL;
+
+ /* Poll until we obtain the cross-timestamp hardware semaphore */
+ err = rd32_poll_timeout(hw, cfg->lock_reg, lock,
+ !(lock & cfg->lock_busy),
+ 10 * USEC_PER_MSEC, 50 * USEC_PER_MSEC);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "PTP failed to get cross timestamp lock\n");
return -EBUSY;
}
+ /* Snapshot system time for historic interpolation */
+ ktime_get_snapshot(&ctx->snapshot);
+
/* Program cmd to master timer */
ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
/* Start the ART and device clock sync sequence */
- hh_art_ctl = rd32(hw, GLHH_ART_CTL);
- hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M;
- wr32(hw, GLHH_ART_CTL, hh_art_ctl);
-
- for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) {
- /* Wait for sync to complete */
- hh_art_ctl = rd32(hw, GLHH_ART_CTL);
- if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) {
- udelay(1);
- continue;
- } else {
- u32 hh_ts_lo, hh_ts_hi, tmr_idx;
- u64 hh_ts;
-
- tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
- /* Read ART time */
- hh_ts_lo = rd32(hw, GLHH_ART_TIME_L);
- hh_ts_hi = rd32(hw, GLHH_ART_TIME_H);
- hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
- system->cycles = hh_ts;
- system->cs_id = CSID_X86_ART;
- /* Read Device source clock time */
- hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx));
- hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx));
- hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
- *device = ns_to_ktime(hh_ts);
- break;
- }
- }
+ ctl = rd32(hw, cfg->ctl_reg);
+ ctl |= cfg->ctl_active;
+ wr32(hw, cfg->ctl_reg, ctl);
+ /* Poll until hardware completes the capture */
+ err = rd32_poll_timeout(hw, cfg->ctl_reg, ctl, !(ctl & cfg->ctl_active),
+ 5, 20 * USEC_PER_MSEC);
+ if (err)
+ goto err_timeout;
+
+ /* Read ART system time */
+ ts_lo = rd32(hw, cfg->art_time_l);
+ ts_hi = rd32(hw, cfg->art_time_h);
+ ts = ((u64)ts_hi << 32) | ts_lo;
+ system->cycles = ts;
+ system->cs_id = CSID_X86_ART;
+ system->use_nsecs = true;
+
+ /* Read Device source clock time */
+ ts_lo = rd32(hw, cfg->dev_time_l[tmr_idx]);
+ ts_hi = rd32(hw, cfg->dev_time_h[tmr_idx]);
+ ts = ((u64)ts_hi << 32) | ts_lo;
+ *device = ns_to_ktime(ts);
+
+err_timeout:
/* Clear the master timer */
ice_ptp_src_cmd(hw, ICE_PTP_NOP);
/* Release HW lock */
- hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
- hh_lock = hh_lock & ~PFHH_SEM_BUSY_M;
- wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock);
-
- if (i == MAX_HH_CTL_LOCK_TRIES)
- return -ETIMEDOUT;
+ lock = rd32(hw, cfg->lock_reg);
+ lock &= ~cfg->lock_busy;
+ wr32(hw, cfg->lock_reg, lock);
- return 0;
+ return err;
}
/**
- * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp
+ * ice_ptp_getcrosststamp - Capture a device cross timestamp
* @info: the driver's PTP info structure
* @cts: The memory to fill the cross timestamp info
*
@@ -2244,41 +2174,55 @@ ice_ptp_get_syncdevicetime(ktime_t *device,
* clock. Fill the cross timestamp information and report it back to the
* caller.
*
- * This is only valid for E822 and E823 devices which have support for
- * generating the cross timestamp via PCIe PTM.
- *
* In order to correctly correlate the ART timestamp back to the TSC time, the
* CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
+ *
+ * Return: zero on success, or a negative error code on failure.
*/
-static int
-ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info,
- struct system_device_crosststamp *cts)
+static int ice_ptp_getcrosststamp(struct ptp_clock_info *info,
+ struct system_device_crosststamp *cts)
{
struct ice_pf *pf = ptp_info_to_pf(info);
+ struct ice_crosststamp_ctx ctx = {
+ .pf = pf,
+ };
+
+ switch (pf->hw.mac_type) {
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
+ ctx.cfg = &ice_crosststamp_cfg_e82x;
+ break;
+#ifdef CONFIG_ICE_HWTS
+ case ICE_MAC_E830:
+ ctx.cfg = &ice_crosststamp_cfg_e830;
+ break;
+#endif /* CONFIG_ICE_HWTS */
+ default:
+ return -EOPNOTSUPP;
+ }
- return get_device_system_crosststamp(ice_ptp_get_syncdevicetime,
- pf, NULL, cts);
+ return get_device_system_crosststamp(ice_capture_crosststamp, &ctx,
+ &ctx.snapshot, cts);
}
-#endif /* CONFIG_ICE_HWTS */
/**
- * ice_ptp_get_ts_config - ioctl interface to read the timestamping config
- * @pf: Board private structure
- * @ifr: ioctl data
+ * ice_ptp_hwtstamp_get - interface to read the timestamping config
+ * @netdev: Pointer to network interface device structure
+ * @config: Timestamping configuration structure
*
* Copy the timestamping config to user buffer
*/
-int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
+int ice_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config *config;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
if (pf->ptp.state != ICE_PTP_READY)
return -EIO;
- config = &pf->ptp.tstamp_config;
+ *config = pf->ptp.tstamp_config;
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
+ return 0;
}
/**
@@ -2286,8 +2230,8 @@ int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
* @pf: Board private structure
* @config: hwtstamp settings requested or saved
*/
-static int
-ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
+static int ice_ptp_set_timestamp_mode(struct ice_pf *pf,
+ struct kernel_hwtstamp_config *config)
{
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
@@ -2331,32 +2275,31 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
}
/**
- * ice_ptp_set_ts_config - ioctl interface to control the timestamping
- * @pf: Board private structure
- * @ifr: ioctl data
+ * ice_ptp_hwtstamp_set - interface to control the timestamping
+ * @netdev: Pointer to network interface device structure
+ * @config: Timestamping configuration structure
+ * @extack: Netlink extended ack structure for error reporting
*
* Get the user config and store it
*/
-int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
+int ice_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
int err;
if (pf->ptp.state != ICE_PTP_READY)
return -EAGAIN;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = ice_ptp_set_timestamp_mode(pf, &config);
+ err = ice_ptp_set_timestamp_mode(pf, config);
if (err)
return err;
/* Return the actual configuration set */
- config = pf->ptp.tstamp_config;
+ *config = pf->ptp.tstamp_config;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
/**
@@ -2402,14 +2345,14 @@ static void ice_ptp_setup_pin_cfg(struct ice_pf *pf)
for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) {
const struct ice_ptp_pin_desc *desc = &pf->ptp.ice_pin_desc[i];
struct ptp_pin_desc *pin = &pf->ptp.pin_desc[i];
- const char *name = NULL;
+ const char *name;
if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
name = ice_pin_names[desc->name_idx];
- else if (desc->name_idx != GPIO_NA)
- name = ice_pin_names_nvm[desc->name_idx];
- if (name)
- strscpy(pin->name, name, sizeof(pin->name));
+ else
+ name = ice_pin_names_dpll[desc->name_idx];
+
+ strscpy(pin->name, name, sizeof(pin->name));
pin->index = i;
}
@@ -2421,8 +2364,8 @@ static void ice_ptp_setup_pin_cfg(struct ice_pf *pf)
* ice_ptp_disable_pins - Disable PTP pins
* @pf: pointer to the PF structure
*
- * Disable the OS access to the SMA pins. Called to clear out the OS
- * indications of pin support when we fail to setup the SMA control register.
+ * Disable the OS access to the pins. Called to clear out the OS
+ * indications of pin support when we fail to setup pin array.
*/
static void ice_ptp_disable_pins(struct ice_pf *pf)
{
@@ -2463,40 +2406,30 @@ static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries,
for (i = 0; i < num_entries; i++) {
u16 entry = le16_to_cpu(entries[i]);
DECLARE_BITMAP(bitmap, GPIO_NA);
- unsigned int bitmap_idx;
+ unsigned int idx;
bool dir;
u16 gpio;
*bitmap = FIELD_GET(ICE_AQC_NVM_SDP_AC_PIN_M, entry);
+
+ /* Check if entry's pin bitmap is valid. */
+ if (bitmap_empty(bitmap, GPIO_NA))
+ continue;
+
dir = !!FIELD_GET(ICE_AQC_NVM_SDP_AC_DIR_M, entry);
gpio = FIELD_GET(ICE_AQC_NVM_SDP_AC_SDP_NUM_M, entry);
- for_each_set_bit(bitmap_idx, bitmap, GPIO_NA + 1) {
- unsigned int idx;
-
- /* Check if entry's pin bit is valid */
- if (bitmap_idx >= NUM_PTP_PINS_NVM &&
- bitmap_idx != GPIO_NA)
- continue;
- /* Check if pin already exists */
- for (idx = 0; idx < ICE_N_PINS_MAX; idx++)
- if (pins[idx].name_idx == bitmap_idx)
- break;
-
- if (idx == ICE_N_PINS_MAX) {
- /* Pin not found, setup its entry and name */
- idx = n_pins++;
- pins[idx].name_idx = bitmap_idx;
- if (bitmap_idx == GPIO_NA)
- strscpy(pf->ptp.pin_desc[idx].name,
- ice_pin_names[gpio],
- sizeof(pf->ptp.pin_desc[idx]
- .name));
- }
+ for (idx = 0; idx < ICE_N_PINS_MAX; idx++) {
+ if (pins[idx].name_idx == gpio)
+ break;
+ }
- /* Setup in/out GPIO number */
- pins[idx].gpio[dir] = gpio;
+ if (idx == ICE_N_PINS_MAX) {
+ /* Pin not found, setup its entry and name */
+ idx = n_pins++;
+ pins[idx].name_idx = gpio;
}
+ pins[idx].gpio[dir] = gpio;
}
for (i = 0; i < n_pins; i++) {
@@ -2520,18 +2453,14 @@ static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries,
*/
static void ice_ptp_set_funcs_e82x(struct ice_pf *pf)
{
-#ifdef CONFIG_ICE_HWTS
- if (boot_cpu_has(X86_FEATURE_ART) &&
- boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
- pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp_e82x;
+ pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
-#endif /* CONFIG_ICE_HWTS */
- if (ice_is_e825c(&pf->hw)) {
+ if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825) {
pf->ptp.ice_pin_desc = ice_pin_desc_e825c;
- pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e825c);
+ pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e825c);
} else {
pf->ptp.ice_pin_desc = ice_pin_desc_e82x;
- pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e82x);
+ pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e82x);
}
ice_ptp_setup_pin_cfg(pf);
}
@@ -2557,15 +2486,13 @@ static void ice_ptp_set_funcs_e810(struct ice_pf *pf)
if (err) {
/* SDP section does not exist in NVM or is corrupted */
if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
- ptp->ice_pin_desc = ice_pin_desc_e810_sma;
- ptp->info.n_pins =
- ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810_sma);
+ ptp->ice_pin_desc = ice_pin_desc_dpll;
+ ptp->info.n_pins = ARRAY_SIZE(ice_pin_desc_dpll);
} else {
pf->ptp.ice_pin_desc = ice_pin_desc_e810;
- pf->ptp.info.n_pins =
- ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810);
- err = 0;
+ pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e810);
}
+ err = 0;
} else {
desc = devm_kcalloc(ice_pf_to_dev(pf), ICE_N_PINS_MAX,
sizeof(struct ice_ptp_pin_desc),
@@ -2583,8 +2510,6 @@ static void ice_ptp_set_funcs_e810(struct ice_pf *pf)
ptp->info.pin_config = ptp->pin_desc;
ice_ptp_setup_pin_cfg(pf);
- if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
- err = ice_ptp_set_sma_cfg(pf);
err:
if (err) {
devm_kfree(ice_pf_to_dev(pf), desc);
@@ -2593,6 +2518,28 @@ err:
}
/**
+ * ice_ptp_set_funcs_e830 - Set specialized functions for E830 support
+ * @pf: Board private structure
+ *
+ * Assign functions to the PTP capabiltiies structure for E830 devices.
+ * Functions which operate across all device families should be set directly
+ * in ice_ptp_set_caps. Only add functions here which are distinct for E830
+ * devices.
+ */
+static void ice_ptp_set_funcs_e830(struct ice_pf *pf)
+{
+#ifdef CONFIG_ICE_HWTS
+ if (pcie_ptm_enabled(pf->pdev) && boot_cpu_has(X86_FEATURE_ART))
+ pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
+
+#endif /* CONFIG_ICE_HWTS */
+ /* Rest of the config is the same as base E810 */
+ pf->ptp.ice_pin_desc = ice_pin_desc_e810;
+ pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e810);
+ ice_ptp_setup_pin_cfg(pf);
+}
+
+/**
* ice_ptp_set_caps - Set PTP capabilities
* @pf: Board private structure
*/
@@ -2614,10 +2561,25 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
info->enable = ice_ptp_gpio_enable;
info->verify = ice_verify_pin;
- if (ice_is_e810(&pf->hw))
+ info->supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
+ info->supported_perout_flags = PTP_PEROUT_PHASE;
+
+ switch (pf->hw.mac_type) {
+ case ICE_MAC_E810:
ice_ptp_set_funcs_e810(pf);
- else
+ return;
+ case ICE_MAC_E830:
+ ice_ptp_set_funcs_e830(pf);
+ return;
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
ice_ptp_set_funcs_e82x(pf);
+ return;
+ default:
+ return;
+ }
}
/**
@@ -2728,6 +2690,68 @@ enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf)
}
/**
+ * ice_ptp_ts_irq - Process the PTP Tx timestamps in IRQ context
+ * @pf: Board private structure
+ *
+ * Return: IRQ_WAKE_THREAD if Tx timestamp read has to be handled in the bottom
+ * half of the interrupt and IRQ_HANDLED otherwise.
+ */
+irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ /* E810 capable of low latency timestamping with interrupt can
+ * request a single timestamp in the top half and wait for
+ * a second LL TS interrupt from the FW when it's ready.
+ */
+ if (hw->dev_caps.ts_dev_info.ts_ll_int_read) {
+ struct ice_ptp_tx *tx = &pf->ptp.port.tx;
+ u8 idx, last;
+
+ if (!ice_pf_state_is_nominal(pf))
+ return IRQ_HANDLED;
+
+ spin_lock(&tx->lock);
+ if (tx->init) {
+ last = tx->last_ll_ts_idx_read + 1;
+ idx = find_next_bit_wrap(tx->in_use, tx->len,
+ last);
+ if (idx != tx->len)
+ ice_ptp_req_tx_single_tstamp(tx, idx);
+ }
+ spin_unlock(&tx->lock);
+
+ return IRQ_HANDLED;
+ }
+ fallthrough; /* non-LL_TS E810 */
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
+ /* All other devices process timestamps in the bottom half due
+ * to sleeping or polling.
+ */
+ if (!ice_ptp_pf_handles_tx_interrupt(pf))
+ return IRQ_HANDLED;
+
+ set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
+ return IRQ_WAKE_THREAD;
+ case ICE_MAC_E830:
+ /* E830 can read timestamps in the top half using rd32() */
+ if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
+ /* Process outstanding Tx timestamps. If there
+ * is more work, re-arm the interrupt to trigger again.
+ */
+ wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
+ ice_flush(hw);
+ }
+ return IRQ_HANDLED;
+ default:
+ return IRQ_HANDLED;
+ }
+}
+
+/**
* ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt
* @pf: Board private structure
*
@@ -2747,7 +2771,7 @@ static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
bool trigger_oicr = false;
unsigned int i;
- if (ice_is_e810(hw))
+ if (!pf->ptp.port.tx.has_ready_bitmap)
return;
if (!ice_pf_src_tmr_owned(pf))
@@ -2794,6 +2818,32 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
}
/**
+ * ice_ptp_prepare_rebuild_sec - Prepare second NAC for PTP reset or rebuild
+ * @pf: Board private structure
+ * @rebuild: rebuild if true, prepare if false
+ * @reset_type: the reset type being performed
+ */
+static void ice_ptp_prepare_rebuild_sec(struct ice_pf *pf, bool rebuild,
+ enum ice_reset_req reset_type)
+{
+ struct list_head *entry;
+
+ list_for_each(entry, &pf->adapter->ports.ports) {
+ struct ice_ptp_port *port = list_entry(entry,
+ struct ice_ptp_port,
+ list_node);
+ struct ice_pf *peer_pf = ptp_port_to_pf(port);
+
+ if (!ice_is_primary(&peer_pf->hw)) {
+ if (rebuild)
+ ice_ptp_rebuild(peer_pf, reset_type);
+ else
+ ice_ptp_prepare_for_reset(peer_pf, reset_type);
+ }
+ }
+}
+
+/**
* ice_ptp_prepare_for_reset - Prepare PTP for reset
* @pf: Board private structure
* @reset_type: the reset type being performed
@@ -2801,6 +2851,7 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
{
struct ice_ptp *ptp = &pf->ptp;
+ struct ice_hw *hw = &pf->hw;
u8 src_tmr;
if (ptp->state != ICE_PTP_READY)
@@ -2816,6 +2867,9 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
if (reset_type == ICE_RESET_PFR)
return;
+ if (ice_pf_src_tmr_owned(pf) && hw->mac_type == ICE_MAC_GENERIC_3K_E825)
+ ice_ptp_prepare_rebuild_sec(pf, false, reset_type);
+
ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
/* Disable periodic outputs */
@@ -2849,6 +2903,10 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf)
if (err)
return err;
+ err = ice_tspll_init(hw);
+ if (err)
+ return err;
+
/* Acquire the global hardware lock */
if (!ice_ptp_lock(hw)) {
err = -EBUSY;
@@ -2882,14 +2940,12 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf)
*/
ice_ptp_flush_all_tx_tracker(pf);
- if (!ice_is_e810(hw)) {
- /* Enable quad interrupts */
- err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
- if (err)
- return err;
+ /* Enable quad interrupts */
+ err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
+ if (err)
+ return err;
- ice_ptp_restart_all_phy(pf);
- }
+ ice_ptp_restart_all_phy(pf);
/* Re-enable all periodic outputs and external timestamp events */
ice_ptp_enable_all_perout(pf);
@@ -2939,12 +2995,6 @@ err:
dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
}
-static bool ice_is_primary(struct ice_hw *hw)
-{
- return ice_is_e825c(hw) && ice_is_dual(hw) ?
- !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) : true;
-}
-
static int ice_ptp_setup_adapter(struct ice_pf *pf)
{
if (!ice_pf_src_tmr_owned(pf) || !ice_is_primary(&pf->hw))
@@ -2960,7 +3010,7 @@ static int ice_ptp_setup_pf(struct ice_pf *pf)
struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
struct ice_ptp *ptp = &pf->ptp;
- if (WARN_ON(!ctrl_ptp) || ice_get_phy_model(&pf->hw) == ICE_PHY_UNSUP)
+ if (WARN_ON(!ctrl_ptp) || pf->hw.mac_type == ICE_MAC_UNKNOWN)
return -ENODEV;
INIT_LIST_HEAD(&ptp->port.list_node);
@@ -2977,7 +3027,7 @@ static void ice_ptp_cleanup_pf(struct ice_pf *pf)
{
struct ice_ptp *ptp = &pf->ptp;
- if (ice_get_phy_model(&pf->hw) != ICE_PHY_UNSUP) {
+ if (pf->hw.mac_type != ICE_MAC_UNKNOWN) {
mutex_lock(&pf->adapter->ports.lock);
list_del(&ptp->port.list_node);
mutex_unlock(&pf->adapter->ports.lock);
@@ -3024,6 +3074,13 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
return err;
}
+ err = ice_tspll_init(hw);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "Failed to initialize CGU, status %d\n",
+ err);
+ return err;
+ }
+
/* Acquire the global hardware lock */
if (!ice_ptp_lock(hw)) {
err = -EBUSY;
@@ -3080,7 +3137,7 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
/* Allocate a kworker for handling work required for the ports
* connected to the PTP hardware clock.
*/
- kworker = kthread_create_worker(0, "ice-ptp-%s",
+ kworker = kthread_run_worker(0, "ice-ptp-%s",
dev_name(ice_pf_to_dev(pf)));
if (IS_ERR(kworker))
return PTR_ERR(kworker);
@@ -3097,6 +3154,8 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
* ice_ptp_init_port - Initialize PTP port structure
* @pf: Board private structure
* @ptp_port: PTP port structure
+ *
+ * Return: 0 on success, -ENODEV on invalid MAC type, -ENOMEM on failed alloc.
*/
static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
{
@@ -3104,16 +3163,14 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
mutex_init(&ptp_port->ps_lock);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_ptp_init_tx_eth56g(pf, &ptp_port->tx,
- ptp_port->port_num);
- case ICE_PHY_E810:
- return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
- case ICE_PHY_E82X:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_ptp_init_tx(pf, &ptp_port->tx, ptp_port->port_num);
+ case ICE_MAC_GENERIC:
kthread_init_delayed_work(&ptp_port->ov_work,
ice_ptp_wait_for_offsets);
-
return ice_ptp_init_tx_e82x(pf, &ptp_port->tx,
ptp_port->port_num);
default:
@@ -3132,8 +3189,8 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
*/
static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
{
- switch (ice_get_phy_model(&pf->hw)) {
- case ICE_PHY_E82X:
+ switch (pf->hw.mac_type) {
+ case ICE_MAC_GENERIC:
/* E822 based PHY has the clock owner process the interrupt
* for all ports.
*/
@@ -3168,6 +3225,12 @@ void ice_ptp_init(struct ice_pf *pf)
ptp->state = ICE_PTP_INITIALIZING;
+ if (hw->lane_num < 0) {
+ err = hw->lane_num;
+ goto err_exit;
+ }
+ ptp->port.port_num = hw->lane_num;
+
ice_ptp_init_hw(hw);
ice_ptp_init_tx_interrupt_mode(pf);
@@ -3188,13 +3251,9 @@ void ice_ptp_init(struct ice_pf *pf)
if (err)
goto err_exit;
- ptp->port.port_num = hw->pf_id;
- if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
- ptp->port.port_num = hw->pf_id * 2;
-
err = ice_ptp_init_port(pf, &ptp->port);
if (err)
- goto err_exit;
+ goto err_clean_pf;
/* Start the PHY timestamping block */
ice_ptp_reset_phy_timestamping(pf);
@@ -3211,13 +3270,19 @@ void ice_ptp_init(struct ice_pf *pf)
dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
return;
+err_clean_pf:
+ mutex_destroy(&ptp->port.ps_lock);
+ ice_ptp_cleanup_pf(pf);
err_exit:
/* If we registered a PTP clock, release it */
if (pf->ptp.clock) {
ptp_clock_unregister(ptp->clock);
pf->ptp.clock = NULL;
}
- ptp->state = ICE_PTP_ERROR;
+ /* Keep ICE_PTP_UNINIT state to avoid ambiguity at driver unload
+ * and to avoid duplicated resources release.
+ */
+ ptp->state = ICE_PTP_UNINIT;
dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
}
@@ -3230,9 +3295,19 @@ err_exit:
*/
void ice_ptp_release(struct ice_pf *pf)
{
- if (pf->ptp.state != ICE_PTP_READY)
+ if (pf->ptp.state == ICE_PTP_UNINIT)
return;
+ if (pf->ptp.state != ICE_PTP_READY) {
+ mutex_destroy(&pf->ptp.port.ps_lock);
+ ice_ptp_cleanup_pf(pf);
+ if (pf->ptp.clock) {
+ ptp_clock_unregister(pf->ptp.clock);
+ pf->ptp.clock = NULL;
+ }
+ return;
+ }
+
pf->ptp.state = ICE_PTP_UNINIT;
/* Disable timestamping for both Tx and Rx */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 824e73b677a4..27016aac4f1e 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -128,8 +128,7 @@ struct ice_ptp_tx {
/* Quad and port information for initializing timestamp blocks */
#define INDEX_PER_QUAD 64
#define INDEX_PER_PORT_E82X 16
-#define INDEX_PER_PORT_E810 64
-#define INDEX_PER_PORT_ETH56G 64
+#define INDEX_PER_PORT 64
/**
* struct ice_ptp_port - data used to initialize an external port for PTP
@@ -203,14 +202,12 @@ enum ice_ptp_pin_nvm {
/* Pin definitions for PTP */
#define ICE_N_PINS_MAX 6
-#define ICE_SMA_PINS_NUM 4
-#define ICE_PIN_DESC_ARR_LEN(_arr) (sizeof(_arr) / \
- sizeof(struct ice_ptp_pin_desc))
/**
* struct ice_ptp_pin_desc - hardware pin description data
* @name_idx: index of the name of pin in ice_pin_names
* @gpio: the associated GPIO input and output pins
+ * @delay: input and output signal delays in nanoseconds
*
* Structure describing a PTP-capable GPIO pin that extends ptp_pin_desc array
* for the device. Device families have separate sets of available pins with
@@ -219,6 +216,7 @@ enum ice_ptp_pin_nvm {
struct ice_ptp_pin_desc {
int name_idx;
int gpio[2];
+ unsigned int delay[2];
};
/**
@@ -239,6 +237,7 @@ struct ice_ptp_pin_desc {
* @clock: pointer to registered PTP clock device
* @tstamp_config: hardware timestamping configuration
* @reset_time: kernel time after clock stop on reset
+ * @tx_hwtstamp_good: number of completed Tx timestamp requests
* @tx_hwtstamp_skipped: number of Tx time stamp requests skipped
* @tx_hwtstamp_timeouts: number of Tx skbs discarded with no time stamp
* @tx_hwtstamp_flushed: number of Tx skbs flushed due to interface closed
@@ -261,8 +260,9 @@ struct ice_ptp {
struct ptp_extts_request extts_rqs[GLTSYN_EVNT_H_IDX_MAX];
struct ptp_clock_info info;
struct ptp_clock *clock;
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
u64 reset_time;
+ u64 tx_hwtstamp_good;
u32 tx_hwtstamp_skipped;
u32 tx_hwtstamp_timeouts;
u32 tx_hwtstamp_flushed;
@@ -293,8 +293,11 @@ struct ice_ptp {
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
int ice_ptp_clock_index(struct ice_pf *pf);
struct ice_pf;
-int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr);
-int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr);
+int ice_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int ice_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void ice_ptp_restore_timestamp_mode(struct ice_pf *pf);
void ice_ptp_extts_event(struct ice_pf *pf);
@@ -302,6 +305,9 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx);
void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx);
enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf);
+irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf);
+u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
+ struct ptp_system_timestamp *sts);
u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
const struct ice_pkt_ctx *pkt_ctx);
@@ -310,14 +316,18 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf,
enum ice_reset_req reset_type);
void ice_ptp_init(struct ice_pf *pf);
void ice_ptp_release(struct ice_pf *pf);
-void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup);
+void ice_ptp_link_change(struct ice_pf *pf, bool linkup);
#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
-static inline int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
+
+static inline int ice_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
return -EOPNOTSUPP;
}
-static inline int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
+static inline int ice_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
@@ -340,6 +350,17 @@ static inline bool ice_ptp_process_ts(struct ice_pf *pf)
return true;
}
+static inline irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf)
+{
+ return IRQ_HANDLED;
+}
+
+static inline u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
+ struct ptp_system_timestamp *sts)
+{
+ return 0;
+}
+
static inline u64
ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
const struct ice_pkt_ctx *pkt_ctx)
@@ -358,7 +379,7 @@ static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf,
}
static inline void ice_ptp_init(struct ice_pf *pf) { }
static inline void ice_ptp_release(struct ice_pf *pf) { }
-static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
+static inline void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
{
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
index 585ce200c60f..19dddd9b53dd 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
@@ -10,70 +10,25 @@
/* Constants defined for the PTP 1588 clock hardware. */
const struct ice_phy_reg_info_eth56g eth56g_phy_res[NUM_ETH56G_PHY_RES] = {
- /* ETH56G_PHY_REG_PTP */
- {
- /* base_addr */
- {
- 0x092000,
- 0x126000,
- 0x1BA000,
- 0x24E000,
- 0x2E2000,
- },
- /* step */
- 0x98,
+ [ETH56G_PHY_REG_PTP] = {
+ .base_addr = 0x092000,
+ .step = 0x98,
},
- /* ETH56G_PHY_MEM_PTP */
- {
- /* base_addr */
- {
- 0x093000,
- 0x127000,
- 0x1BB000,
- 0x24F000,
- 0x2E3000,
- },
- /* step */
- 0x200,
+ [ETH56G_PHY_MEM_PTP] = {
+ .base_addr = 0x093000,
+ .step = 0x200,
},
- /* ETH56G_PHY_REG_XPCS */
- {
- /* base_addr */
- {
- 0x000000,
- 0x009400,
- 0x128000,
- 0x1BC000,
- 0x250000,
- },
- /* step */
- 0x21000,
+ [ETH56G_PHY_REG_XPCS] = {
+ .base_addr = 0x000000,
+ .step = 0x21000,
},
- /* ETH56G_PHY_REG_MAC */
- {
- /* base_addr */
- {
- 0x085000,
- 0x119000,
- 0x1AD000,
- 0x241000,
- 0x2D5000,
- },
- /* step */
- 0x1000,
+ [ETH56G_PHY_REG_MAC] = {
+ .base_addr = 0x085000,
+ .step = 0x1000,
},
- /* ETH56G_PHY_REG_GPCS */
- {
- /* base_addr */
- {
- 0x084000,
- 0x118000,
- 0x1AC000,
- 0x240000,
- 0x2D4000,
- },
- /* step */
- 0x400,
+ [ETH56G_PHY_REG_GPCS] = {
+ .base_addr = 0x084000,
+ .step = 0x400,
},
};
@@ -131,7 +86,7 @@ struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = {
.rx_offset = {
.serdes = 0xffffeb27, /* -10.42424 */
.no_fec = 0xffffcccd, /* -25.6 */
- .fc = 0xfffe0014, /* -255.96 */
+ .fc = 0xfffc557b, /* -469.26 */
.sfd = 0x4a4, /* 2.32 */
.bs_ds = 0x32 /* 0.0969697 */
}
@@ -326,7 +281,7 @@ struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = {
/* struct ice_time_ref_info_e82x
*
- * E822 hardware can use different sources as the reference for the PTP
+ * E82X hardware can use different sources as the reference for the PTP
* hardware clock. Each clock has different characteristics such as a slightly
* different frequency, etc.
*
@@ -334,226 +289,53 @@ struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = {
* reference. See the struct ice_time_ref_info_e82x for information about the
* meaning of each constant.
*/
-const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
- /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
+const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TSPLL_FREQ] = {
+ /* ICE_TSPLL_FREQ_25_000 -> 25 MHz */
{
/* pll_freq */
823437500, /* 823.4375 MHz PLL */
/* nominal_incval */
0x136e44fabULL,
- /* pps_delay */
- 11,
},
- /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
+ /* ICE_TSPLL_FREQ_122_880 -> 122.88 MHz */
{
/* pll_freq */
783360000, /* 783.36 MHz */
/* nominal_incval */
0x146cc2177ULL,
- /* pps_delay */
- 12,
},
- /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
+ /* ICE_TSPLL_FREQ_125_000 -> 125 MHz */
{
/* pll_freq */
796875000, /* 796.875 MHz */
/* nominal_incval */
0x141414141ULL,
- /* pps_delay */
- 12,
},
- /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
+ /* ICE_TSPLL_FREQ_153_600 -> 153.6 MHz */
{
/* pll_freq */
816000000, /* 816 MHz */
/* nominal_incval */
0x139b9b9baULL,
- /* pps_delay */
- 12,
},
- /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
+ /* ICE_TSPLL_FREQ_156_250 -> 156.25 MHz */
{
/* pll_freq */
830078125, /* 830.78125 MHz */
/* nominal_incval */
0x134679aceULL,
- /* pps_delay */
- 11,
},
- /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
+ /* ICE_TSPLL_FREQ_245_760 -> 245.76 MHz */
{
/* pll_freq */
783360000, /* 783.36 MHz */
/* nominal_incval */
0x146cc2177ULL,
- /* pps_delay */
- 12,
- },
-};
-
-const struct ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
- /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
- {
- /* refclk_pre_div */
- 1,
- /* feedback_div */
- 197,
- /* frac_n_div */
- 2621440,
- /* post_pll_div */
- 6,
- },
-
- /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
- {
- /* refclk_pre_div */
- 5,
- /* feedback_div */
- 223,
- /* frac_n_div */
- 524288,
- /* post_pll_div */
- 7,
- },
-
- /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
- {
- /* refclk_pre_div */
- 5,
- /* feedback_div */
- 223,
- /* frac_n_div */
- 524288,
- /* post_pll_div */
- 7,
- },
-
- /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
- {
- /* refclk_pre_div */
- 5,
- /* feedback_div */
- 159,
- /* frac_n_div */
- 1572864,
- /* post_pll_div */
- 6,
- },
-
- /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
- {
- /* refclk_pre_div */
- 5,
- /* feedback_div */
- 159,
- /* frac_n_div */
- 1572864,
- /* post_pll_div */
- 6,
- },
-
- /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
- {
- /* refclk_pre_div */
- 10,
- /* feedback_div */
- 223,
- /* frac_n_div */
- 524288,
- /* post_pll_div */
- 7,
- },
-};
-
-const
-struct ice_cgu_pll_params_e825c e825c_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
- /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
- {
- /* tspll_ck_refclkfreq */
- 0x19,
- /* tspll_ndivratio */
- 1,
- /* tspll_fbdiv_intgr */
- 320,
- /* tspll_fbdiv_frac */
- 0,
- /* ref1588_ck_div */
- 0,
- },
-
- /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
- {
- /* tspll_ck_refclkfreq */
- 0x29,
- /* tspll_ndivratio */
- 3,
- /* tspll_fbdiv_intgr */
- 195,
- /* tspll_fbdiv_frac */
- 1342177280UL,
- /* ref1588_ck_div */
- 0,
- },
-
- /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
- {
- /* tspll_ck_refclkfreq */
- 0x3E,
- /* tspll_ndivratio */
- 2,
- /* tspll_fbdiv_intgr */
- 128,
- /* tspll_fbdiv_frac */
- 0,
- /* ref1588_ck_div */
- 0,
- },
-
- /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
- {
- /* tspll_ck_refclkfreq */
- 0x33,
- /* tspll_ndivratio */
- 3,
- /* tspll_fbdiv_intgr */
- 156,
- /* tspll_fbdiv_frac */
- 1073741824UL,
- /* ref1588_ck_div */
- 0,
- },
-
- /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
- {
- /* tspll_ck_refclkfreq */
- 0x1F,
- /* tspll_ndivratio */
- 5,
- /* tspll_fbdiv_intgr */
- 256,
- /* tspll_fbdiv_frac */
- 0,
- /* ref1588_ck_div */
- 0,
- },
-
- /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
- {
- /* tspll_ck_refclkfreq */
- 0x52,
- /* tspll_ndivratio */
- 3,
- /* tspll_fbdiv_intgr */
- 97,
- /* tspll_fbdiv_frac */
- 2818572288UL,
- /* ref1588_ck_div */
- 0,
},
};
@@ -761,9 +543,9 @@ const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD] = {
/* rx_desk_rsgb_par */
644531250, /* 644.53125 MHz Reed Solomon gearbox */
/* tx_desk_rsgb_pcs */
- 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ 390625000, /* 390.625 MHz Reed Solomon gearbox */
/* rx_desk_rsgb_pcs */
- 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ 390625000, /* 390.625 MHz Reed Solomon gearbox */
/* tx_fixed_delay */
1620,
/* pmd_adj_divisor */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index dfd49732bd5b..35680dbe4a7f 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -6,7 +6,6 @@
#include "ice_common.h"
#include "ice_ptp_hw.h"
#include "ice_ptp_consts.h"
-#include "ice_cgu_regs.h"
static struct dpll_pin_frequency ice_cgu_pin_freq_common[] = {
DPLL_PIN_FREQUENCY_1PPS,
@@ -150,7 +149,7 @@ static const struct ice_cgu_pin_desc ice_e823_zl_cgu_outputs[] = {
* | 8 bit s | | 32 bits |
* +---------------+ +---------------+
*
- * The increment value is added to the GLSTYN_TIME_R and GLSTYN_TIME_L
+ * The increment value is added to the GLTSYN_TIME_R and GLTSYN_TIME_L
* registers every clock source tick. Depending on the specific device
* configuration, the clock source frequency could be one of a number of
* values.
@@ -226,547 +225,6 @@ static u64 ice_ptp_read_src_incval(struct ice_hw *hw)
}
/**
- * ice_read_cgu_reg_e82x - Read a CGU register
- * @hw: pointer to the HW struct
- * @addr: Register address to read
- * @val: storage for register value read
- *
- * Read the contents of a register of the Clock Generation Unit. Only
- * applicable to E822 devices.
- *
- * Return: 0 on success, other error codes when failed to read from CGU
- */
-static int ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val)
-{
- struct ice_sbq_msg_input cgu_msg = {
- .opcode = ice_sbq_msg_rd,
- .dest_dev = cgu,
- .msg_addr_low = addr
- };
- int err;
-
- err = ice_sbq_rw_reg(hw, &cgu_msg, ICE_AQ_FLAG_RD);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
- addr, err);
- return err;
- }
-
- *val = cgu_msg.data;
-
- return 0;
-}
-
-/**
- * ice_write_cgu_reg_e82x - Write a CGU register
- * @hw: pointer to the HW struct
- * @addr: Register address to write
- * @val: value to write into the register
- *
- * Write the specified value to a register of the Clock Generation Unit. Only
- * applicable to E822 devices.
- *
- * Return: 0 on success, other error codes when failed to write to CGU
- */
-static int ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val)
-{
- struct ice_sbq_msg_input cgu_msg = {
- .opcode = ice_sbq_msg_wr,
- .dest_dev = cgu,
- .msg_addr_low = addr,
- .data = val
- };
- int err;
-
- err = ice_sbq_rw_reg(hw, &cgu_msg, ICE_AQ_FLAG_RD);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
- addr, err);
- return err;
- }
-
- return err;
-}
-
-/**
- * ice_clk_freq_str - Convert time_ref_freq to string
- * @clk_freq: Clock frequency
- *
- * Return: specified TIME_REF clock frequency converted to a string
- */
-static const char *ice_clk_freq_str(enum ice_time_ref_freq clk_freq)
-{
- switch (clk_freq) {
- case ICE_TIME_REF_FREQ_25_000:
- return "25 MHz";
- case ICE_TIME_REF_FREQ_122_880:
- return "122.88 MHz";
- case ICE_TIME_REF_FREQ_125_000:
- return "125 MHz";
- case ICE_TIME_REF_FREQ_153_600:
- return "153.6 MHz";
- case ICE_TIME_REF_FREQ_156_250:
- return "156.25 MHz";
- case ICE_TIME_REF_FREQ_245_760:
- return "245.76 MHz";
- default:
- return "Unknown";
- }
-}
-
-/**
- * ice_clk_src_str - Convert time_ref_src to string
- * @clk_src: Clock source
- *
- * Return: specified clock source converted to its string name
- */
-static const char *ice_clk_src_str(enum ice_clk_src clk_src)
-{
- switch (clk_src) {
- case ICE_CLK_SRC_TCXO:
- return "TCXO";
- case ICE_CLK_SRC_TIME_REF:
- return "TIME_REF";
- default:
- return "Unknown";
- }
-}
-
-/**
- * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit
- * @hw: pointer to the HW struct
- * @clk_freq: Clock frequency to program
- * @clk_src: Clock source to select (TIME_REF, or TCXO)
- *
- * Configure the Clock Generation Unit with the desired clock frequency and
- * time reference, enabling the PLL which drives the PTP hardware clock.
- *
- * Return:
- * * %0 - success
- * * %-EINVAL - input parameters are incorrect
- * * %-EBUSY - failed to lock TS PLL
- * * %other - CGU read/write failure
- */
-static int ice_cfg_cgu_pll_e82x(struct ice_hw *hw,
- enum ice_time_ref_freq clk_freq,
- enum ice_clk_src clk_src)
-{
- union tspll_ro_bwm_lf bwm_lf;
- union nac_cgu_dword19 dw19;
- union nac_cgu_dword22 dw22;
- union nac_cgu_dword24 dw24;
- union nac_cgu_dword9 dw9;
- int err;
-
- if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
- dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
- clk_freq);
- return -EINVAL;
- }
-
- if (clk_src >= NUM_ICE_CLK_SRC) {
- dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
- clk_src);
- return -EINVAL;
- }
-
- if (clk_src == ICE_CLK_SRC_TCXO &&
- clk_freq != ICE_TIME_REF_FREQ_25_000) {
- dev_warn(ice_hw_to_dev(hw),
- "TCXO only supports 25 MHz frequency\n");
- return -EINVAL;
- }
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
- if (err)
- return err;
-
- /* Log the current clock configuration */
- ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.ts_pll_enable ? "enabled" : "disabled",
- ice_clk_src_str(dw24.time_ref_sel),
- ice_clk_freq_str(dw9.time_ref_freq_sel),
- bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked");
-
- /* Disable the PLL before changing the clock source or frequency */
- if (dw24.ts_pll_enable) {
- dw24.ts_pll_enable = 0;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
- if (err)
- return err;
- }
-
- /* Set the frequency */
- dw9.time_ref_freq_sel = clk_freq;
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
- if (err)
- return err;
-
- /* Configure the TS PLL feedback divisor */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val);
- if (err)
- return err;
-
- dw19.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
- dw19.tspll_ndivratio = 1;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val);
- if (err)
- return err;
-
- /* Configure the TS PLL post divisor */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val);
- if (err)
- return err;
-
- dw22.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
- dw22.time1588clk_sel_div2 = 0;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val);
- if (err)
- return err;
-
- /* Configure the TS PLL pre divisor and clock source */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
- if (err)
- return err;
-
- dw24.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div;
- dw24.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
- dw24.time_ref_sel = clk_src;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
- if (err)
- return err;
-
- /* Finally, enable the PLL */
- dw24.ts_pll_enable = 1;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
- if (err)
- return err;
-
- /* Wait to verify if the PLL locks */
- usleep_range(1000, 5000);
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
- if (err)
- return err;
-
- if (!bwm_lf.plllock_true_lock_cri) {
- dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
- return -EBUSY;
- }
-
- /* Log the current clock configuration */
- ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.ts_pll_enable ? "enabled" : "disabled",
- ice_clk_src_str(dw24.time_ref_sel),
- ice_clk_freq_str(dw9.time_ref_freq_sel),
- bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked");
-
- return 0;
-}
-
-/**
- * ice_cfg_cgu_pll_e825c - Configure the Clock Generation Unit for E825-C
- * @hw: pointer to the HW struct
- * @clk_freq: Clock frequency to program
- * @clk_src: Clock source to select (TIME_REF, or TCXO)
- *
- * Configure the Clock Generation Unit with the desired clock frequency and
- * time reference, enabling the PLL which drives the PTP hardware clock.
- *
- * Return:
- * * %0 - success
- * * %-EINVAL - input parameters are incorrect
- * * %-EBUSY - failed to lock TS PLL
- * * %other - CGU read/write failure
- */
-static int ice_cfg_cgu_pll_e825c(struct ice_hw *hw,
- enum ice_time_ref_freq clk_freq,
- enum ice_clk_src clk_src)
-{
- union tspll_ro_lock_e825c ro_lock;
- union nac_cgu_dword16_e825c dw16;
- union nac_cgu_dword23_e825c dw23;
- union nac_cgu_dword19 dw19;
- union nac_cgu_dword22 dw22;
- union nac_cgu_dword24 dw24;
- union nac_cgu_dword9 dw9;
- int err;
-
- if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
- dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
- clk_freq);
- return -EINVAL;
- }
-
- if (clk_src >= NUM_ICE_CLK_SRC) {
- dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
- clk_src);
- return -EINVAL;
- }
-
- if (clk_src == ICE_CLK_SRC_TCXO &&
- clk_freq != ICE_TIME_REF_FREQ_156_250) {
- dev_warn(ice_hw_to_dev(hw),
- "TCXO only supports 156.25 MHz frequency\n");
- return -EINVAL;
- }
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD16_E825C, &dw16.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, &dw23.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_LOCK_E825C, &ro_lock.val);
- if (err)
- return err;
-
- /* Log the current clock configuration */
- ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.ts_pll_enable ? "enabled" : "disabled",
- ice_clk_src_str(dw23.time_ref_sel),
- ice_clk_freq_str(dw9.time_ref_freq_sel),
- ro_lock.plllock_true_lock_cri ? "locked" : "unlocked");
-
- /* Disable the PLL before changing the clock source or frequency */
- if (dw23.ts_pll_enable) {
- dw23.ts_pll_enable = 0;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C,
- dw23.val);
- if (err)
- return err;
- }
-
- /* Set the frequency */
- dw9.time_ref_freq_sel = clk_freq;
-
- /* Enable the correct receiver */
- if (clk_src == ICE_CLK_SRC_TCXO) {
- dw9.time_ref_en = 0;
- dw9.clk_eref0_en = 1;
- } else {
- dw9.time_ref_en = 1;
- dw9.clk_eref0_en = 0;
- }
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
- if (err)
- return err;
-
- /* Choose the referenced frequency */
- dw16.tspll_ck_refclkfreq =
- e825c_cgu_params[clk_freq].tspll_ck_refclkfreq;
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD16_E825C, dw16.val);
- if (err)
- return err;
-
- /* Configure the TS PLL feedback divisor */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val);
- if (err)
- return err;
-
- dw19.tspll_fbdiv_intgr =
- e825c_cgu_params[clk_freq].tspll_fbdiv_intgr;
- dw19.tspll_ndivratio =
- e825c_cgu_params[clk_freq].tspll_ndivratio;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val);
- if (err)
- return err;
-
- /* Configure the TS PLL post divisor */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val);
- if (err)
- return err;
-
- /* These two are constant for E825C */
- dw22.time1588clk_div = 5;
- dw22.time1588clk_sel_div2 = 0;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val);
- if (err)
- return err;
-
- /* Configure the TS PLL pre divisor and clock source */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, &dw23.val);
- if (err)
- return err;
-
- dw23.ref1588_ck_div =
- e825c_cgu_params[clk_freq].ref1588_ck_div;
- dw23.time_ref_sel = clk_src;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, dw23.val);
- if (err)
- return err;
-
- dw24.tspll_fbdiv_frac =
- e825c_cgu_params[clk_freq].tspll_fbdiv_frac;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
- if (err)
- return err;
-
- /* Finally, enable the PLL */
- dw23.ts_pll_enable = 1;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, dw23.val);
- if (err)
- return err;
-
- /* Wait to verify if the PLL locks */
- usleep_range(1000, 5000);
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_LOCK_E825C, &ro_lock.val);
- if (err)
- return err;
-
- if (!ro_lock.plllock_true_lock_cri) {
- dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
- return -EBUSY;
- }
-
- /* Log the current clock configuration */
- ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.ts_pll_enable ? "enabled" : "disabled",
- ice_clk_src_str(dw23.time_ref_sel),
- ice_clk_freq_str(dw9.time_ref_freq_sel),
- ro_lock.plllock_true_lock_cri ? "locked" : "unlocked");
-
- return 0;
-}
-
-#define ICE_ONE_PPS_OUT_AMP_MAX 3
-
-/**
- * ice_cgu_cfg_pps_out - Configure 1PPS output from CGU
- * @hw: pointer to the HW struct
- * @enable: true to enable 1PPS output, false to disable it
- *
- * Return: 0 on success, other negative error code when CGU read/write failed
- */
-int ice_cgu_cfg_pps_out(struct ice_hw *hw, bool enable)
-{
- union nac_cgu_dword9 dw9;
- int err;
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
- if (err)
- return err;
-
- dw9.one_pps_out_en = enable;
- dw9.one_pps_out_amp = enable * ICE_ONE_PPS_OUT_AMP_MAX;
- return ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
-}
-
-/**
- * ice_cfg_cgu_pll_dis_sticky_bits_e82x - disable TS PLL sticky bits
- * @hw: pointer to the HW struct
- *
- * Configure the Clock Generation Unit TS PLL sticky bits so they don't latch on
- * losing TS PLL lock, but always show current state.
- *
- * Return: 0 on success, other error codes when failed to read/write CGU
- */
-static int ice_cfg_cgu_pll_dis_sticky_bits_e82x(struct ice_hw *hw)
-{
- union tspll_cntr_bist_settings cntr_bist;
- int err;
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
- &cntr_bist.val);
- if (err)
- return err;
-
- /* Disable sticky lock detection so lock err reported is accurate */
- cntr_bist.i_plllock_sel_0 = 0;
- cntr_bist.i_plllock_sel_1 = 0;
-
- return ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
- cntr_bist.val);
-}
-
-/**
- * ice_cfg_cgu_pll_dis_sticky_bits_e825c - disable TS PLL sticky bits for E825-C
- * @hw: pointer to the HW struct
- *
- * Configure the Clock Generation Unit TS PLL sticky bits so they don't latch on
- * losing TS PLL lock, but always show current state.
- *
- * Return: 0 on success, other error codes when failed to read/write CGU
- */
-static int ice_cfg_cgu_pll_dis_sticky_bits_e825c(struct ice_hw *hw)
-{
- union tspll_bw_tdc_e825c bw_tdc;
- int err;
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_BW_TDC_E825C, &bw_tdc.val);
- if (err)
- return err;
-
- bw_tdc.i_plllock_sel_1_0 = 0;
-
- return ice_write_cgu_reg_e82x(hw, TSPLL_BW_TDC_E825C, bw_tdc.val);
-}
-
-/**
- * ice_init_cgu_e82x - Initialize CGU with settings from firmware
- * @hw: pointer to the HW structure
- *
- * Initialize the Clock Generation Unit of the E822 device.
- *
- * Return: 0 on success, other error codes when failed to read/write/cfg CGU
- */
-static int ice_init_cgu_e82x(struct ice_hw *hw)
-{
- struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
- int err;
-
- /* Disable sticky lock detection so lock err reported is accurate */
- if (ice_is_e825c(hw))
- err = ice_cfg_cgu_pll_dis_sticky_bits_e825c(hw);
- else
- err = ice_cfg_cgu_pll_dis_sticky_bits_e82x(hw);
- if (err)
- return err;
-
- /* Configure the CGU PLL using the parameters from the function
- * capabilities.
- */
- if (ice_is_e825c(hw))
- err = ice_cfg_cgu_pll_e825c(hw, ts_info->time_ref,
- (enum ice_clk_src)ts_info->clk_src);
- else
- err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref,
- (enum ice_clk_src)ts_info->clk_src);
-
- return err;
-}
-
-/**
* ice_ptp_tmr_cmd_to_src_reg - Convert to source timer command value
* @hw: pointer to HW struct
* @cmd: Timer command
@@ -827,8 +285,9 @@ static u32 ice_ptp_tmr_cmd_to_port_reg(struct ice_hw *hw,
/* Certain hardware families share the same register values for the
* port register and source timer register.
*/
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
return ice_ptp_tmr_cmd_to_src_reg(hw, cmd) & TS_CMD_MASK_E810;
default:
break;
@@ -873,8 +332,12 @@ static u32 ice_ptp_tmr_cmd_to_port_reg(struct ice_hw *hw,
*/
void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
u32 cmd_val = ice_ptp_tmr_cmd_to_src_reg(hw, cmd);
+ if (!ice_is_primary(hw))
+ hw = ice_get_primary_hw(pf);
+
wr32(hw, GLTSYN_CMD, cmd_val);
}
@@ -890,41 +353,78 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
{
struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+ if (!ice_is_primary(hw))
+ hw = ice_get_primary_hw(pf);
+
guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
ice_flush(hw);
}
+/**
+ * ice_ptp_cfg_sync_delay - Configure PHC to PHY synchronization delay
+ * @hw: pointer to HW struct
+ * @delay: delay between PHC and PHY SYNC command execution in nanoseconds
+ */
+static void ice_ptp_cfg_sync_delay(const struct ice_hw *hw, u32 delay)
+{
+ wr32(hw, GLTSYN_SYNC_DLAY, delay);
+ ice_flush(hw);
+}
+
/* 56G PHY device functions
*
* The following functions operate on devices with the ETH 56G PHY.
*/
/**
+ * ice_ptp_get_dest_dev_e825 - get destination PHY for given port number
+ * @hw: pointer to the HW struct
+ * @port: destination port
+ *
+ * Return: destination sideband queue PHY device.
+ */
+static enum ice_sbq_dev_id ice_ptp_get_dest_dev_e825(struct ice_hw *hw,
+ u8 port)
+{
+ u8 curr_phy, tgt_phy;
+
+ tgt_phy = port >= hw->ptp.ports_per_phy;
+ curr_phy = hw->lane_num >= hw->ptp.ports_per_phy;
+ /* In the driver, lanes 4..7 are in fact 0..3 on a second PHY.
+ * On a single complex E825C, PHY 0 is always destination device phy_0
+ * and PHY 1 is phy_0_peer.
+ * On dual complex E825C, device phy_0 points to PHY on a current
+ * complex and phy_0_peer to PHY on a different complex.
+ */
+ if ((!ice_is_dual(hw) && tgt_phy == 1) ||
+ (ice_is_dual(hw) && tgt_phy != curr_phy))
+ return ice_sbq_dev_phy_0_peer;
+ else
+ return ice_sbq_dev_phy_0;
+}
+
+/**
* ice_write_phy_eth56g - Write a PHY port register
* @hw: pointer to the HW struct
- * @phy_idx: PHY index
+ * @port: destination port
* @addr: PHY register address
* @val: Value to write
*
* Return: 0 on success, other error codes when failed to write to PHY
*/
-static int ice_write_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
- u32 val)
+static int ice_write_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 val)
{
- struct ice_sbq_msg_input phy_msg;
+ struct ice_sbq_msg_input msg = {
+ .dest_dev = ice_ptp_get_dest_dev_e825(hw, port),
+ .opcode = ice_sbq_msg_wr,
+ .msg_addr_low = lower_16_bits(addr),
+ .msg_addr_high = upper_16_bits(addr),
+ .data = val
+ };
int err;
- phy_msg.opcode = ice_sbq_msg_wr;
-
- phy_msg.msg_addr_low = lower_16_bits(addr);
- phy_msg.msg_addr_high = upper_16_bits(addr);
-
- phy_msg.data = val;
- phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx];
-
- err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD);
-
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err)
ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
err);
@@ -935,41 +435,36 @@ static int ice_write_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
/**
* ice_read_phy_eth56g - Read a PHY port register
* @hw: pointer to the HW struct
- * @phy_idx: PHY index
+ * @port: destination port
* @addr: PHY register address
* @val: Value to write
*
* Return: 0 on success, other error codes when failed to read from PHY
*/
-static int ice_read_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
- u32 *val)
+static int ice_read_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 *val)
{
- struct ice_sbq_msg_input phy_msg;
+ struct ice_sbq_msg_input msg = {
+ .dest_dev = ice_ptp_get_dest_dev_e825(hw, port),
+ .opcode = ice_sbq_msg_rd,
+ .msg_addr_low = lower_16_bits(addr),
+ .msg_addr_high = upper_16_bits(addr)
+ };
int err;
- phy_msg.opcode = ice_sbq_msg_rd;
-
- phy_msg.msg_addr_low = lower_16_bits(addr);
- phy_msg.msg_addr_high = upper_16_bits(addr);
-
- phy_msg.data = 0;
- phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx];
-
- err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD);
- if (err) {
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
+ if (err)
ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
err);
- return err;
- }
-
- *val = phy_msg.data;
+ else
+ *val = msg.data;
- return 0;
+ return err;
}
/**
* ice_phy_res_address_eth56g - Calculate a PHY port register address
- * @port: Port number to be written
+ * @hw: pointer to the HW struct
+ * @lane: Lane number to be written
* @res_type: resource type (register/memory)
* @offset: Offset from PHY port register base
* @addr: The result address
@@ -978,17 +473,19 @@ static int ice_read_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
* * %0 - success
* * %EINVAL - invalid port number or resource type
*/
-static int ice_phy_res_address_eth56g(u8 port, enum eth56g_res_type res_type,
- u32 offset, u32 *addr)
+static int ice_phy_res_address_eth56g(struct ice_hw *hw, u8 lane,
+ enum eth56g_res_type res_type,
+ u32 offset,
+ u32 *addr)
{
- u8 lane = port % ICE_PORTS_PER_QUAD;
- u8 phy = ICE_GET_QUAD_NUM(port);
-
if (res_type >= NUM_ETH56G_PHY_RES)
return -EINVAL;
- *addr = eth56g_phy_res[res_type].base[phy] +
+ /* Lanes 4..7 are in fact 0..3 on a second PHY */
+ lane %= hw->ptp.ports_per_phy;
+ *addr = eth56g_phy_res[res_type].base_addr +
lane * eth56g_phy_res[res_type].step + offset;
+
return 0;
}
@@ -1008,19 +505,17 @@ static int ice_phy_res_address_eth56g(u8 port, enum eth56g_res_type res_type,
static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
u32 val, enum eth56g_res_type res_type)
{
- u8 phy_port = port % hw->ptp.ports_per_phy;
- u8 phy_idx = port / hw->ptp.ports_per_phy;
u32 addr;
int err;
if (port >= hw->ptp.num_lports)
return -EINVAL;
- err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr);
+ err = ice_phy_res_address_eth56g(hw, port, res_type, offset, &addr);
if (err)
return err;
- return ice_write_phy_eth56g(hw, phy_idx, addr, val);
+ return ice_write_phy_eth56g(hw, port, addr, val);
}
/**
@@ -1039,19 +534,17 @@ static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
static int ice_read_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
u32 *val, enum eth56g_res_type res_type)
{
- u8 phy_port = port % hw->ptp.ports_per_phy;
- u8 phy_idx = port / hw->ptp.ports_per_phy;
u32 addr;
int err;
if (port >= hw->ptp.num_lports)
return -EINVAL;
- err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr);
+ err = ice_phy_res_address_eth56g(hw, port, res_type, offset, &addr);
if (err)
return err;
- return ice_read_phy_eth56g(hw, phy_idx, addr, val);
+ return ice_read_phy_eth56g(hw, port, addr, val);
}
/**
@@ -1201,6 +694,56 @@ static int ice_write_port_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset,
}
/**
+ * ice_write_quad_ptp_reg_eth56g - Write a PHY quad register
+ * @hw: pointer to the HW struct
+ * @offset: PHY register offset
+ * @port: Port number
+ * @val: Value to write
+ *
+ * Return:
+ * * %0 - success
+ * * %EIO - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
+ u32 offset, u32 val)
+{
+ u32 addr;
+
+ if (port >= hw->ptp.num_lports)
+ return -EIO;
+
+ addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base_addr + offset;
+
+ return ice_write_phy_eth56g(hw, port, addr, val);
+}
+
+/**
+ * ice_read_quad_ptp_reg_eth56g - Read a PHY quad register
+ * @hw: pointer to the HW struct
+ * @offset: PHY register offset
+ * @port: Port number
+ * @val: Value to read
+ *
+ * Return:
+ * * %0 - success
+ * * %EIO - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
+ u32 offset, u32 *val)
+{
+ u32 addr;
+
+ if (port >= hw->ptp.num_lports)
+ return -EIO;
+
+ addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base_addr + offset;
+
+ return ice_read_phy_eth56g(hw, port, addr, val);
+}
+
+/**
* ice_is_64b_phy_reg_eth56g - Check if this is a 64bit PHY register
* @low_addr: the low address to check
* @high_addr: on return, contains the high address of the 64bit register
@@ -1518,8 +1061,8 @@ static int ice_read_ptp_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx,
* lower 8 bits in the low register, and the upper 32 bits in the high
* register.
*/
- *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
-
+ *tstamp = FIELD_PREP(PHY_40B_HIGH_M, hi) |
+ FIELD_PREP(PHY_40B_LOW_M, lo);
return 0;
}
@@ -1918,7 +1461,6 @@ ice_phy_get_speed_eth56g(struct ice_link_status *li)
*/
static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
{
- u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
u32 val;
int err;
@@ -1933,8 +1475,8 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
switch (ice_phy_get_speed_eth56g(&hw->port_info->phy.link_info)) {
case ICE_ETH56G_LNK_SPD_1G:
case ICE_ETH56G_LNK_SPD_2_5G:
- err = ice_read_ptp_reg_eth56g(hw, port_blk,
- PHY_GPCS_CONFIG_REG0, &val);
+ err = ice_read_quad_ptp_reg_eth56g(hw, port,
+ PHY_GPCS_CONFIG_REG0, &val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read PHY_GPCS_CONFIG_REG0, status: %d",
err);
@@ -1945,8 +1487,8 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
val |= FIELD_PREP(PHY_GPCS_CONFIG_REG0_TX_THR_M,
ICE_ETH56G_NOMINAL_TX_THRESH);
- err = ice_write_ptp_reg_eth56g(hw, port_blk,
- PHY_GPCS_CONFIG_REG0, val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port,
+ PHY_GPCS_CONFIG_REG0, val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_GPCS_CONFIG_REG0, status: %d",
err);
@@ -1987,50 +1529,47 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
*/
int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port)
{
- u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
- u8 blk_port = port & (ICE_PORTS_PER_QUAD - 1);
+ u8 quad_lane = port % ICE_PORTS_PER_QUAD;
+ u32 addr, val, peer_delay;
bool enable, sfd_ena;
- u32 val, peer_delay;
int err;
enable = hw->ptp.phy.eth56g.onestep_ena;
peer_delay = hw->ptp.phy.eth56g.peer_delay;
sfd_ena = hw->ptp.phy.eth56g.sfd_ena;
- /* PHY_PTP_1STEP_CONFIG */
- err = ice_read_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, &val);
+ addr = PHY_PTP_1STEP_CONFIG;
+ err = ice_read_quad_ptp_reg_eth56g(hw, port, addr, &val);
if (err)
return err;
if (enable)
- val |= blk_port;
+ val |= BIT(quad_lane);
else
- val &= ~blk_port;
+ val &= ~BIT(quad_lane);
val &= ~(PHY_PTP_1STEP_T1S_UP64_M | PHY_PTP_1STEP_T1S_DELTA_M);
- err = ice_write_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
if (err)
return err;
- /* PHY_PTP_1STEP_PEER_DELAY */
+ addr = PHY_PTP_1STEP_PEER_DELAY(quad_lane);
val = FIELD_PREP(PHY_PTP_1STEP_PD_DELAY_M, peer_delay);
if (peer_delay)
val |= PHY_PTP_1STEP_PD_ADD_PD_M;
val |= PHY_PTP_1STEP_PD_DLY_V_M;
- err = ice_write_ptp_reg_eth56g(hw, port_blk,
- PHY_PTP_1STEP_PEER_DELAY(blk_port), val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
if (err)
return err;
val &= ~PHY_PTP_1STEP_PD_DLY_V_M;
- err = ice_write_ptp_reg_eth56g(hw, port_blk,
- PHY_PTP_1STEP_PEER_DELAY(blk_port), val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
if (err)
return err;
- /* PHY_MAC_XIF_MODE */
- err = ice_read_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, &val);
+ addr = PHY_MAC_XIF_MODE;
+ err = ice_read_mac_reg_eth56g(hw, port, addr, &val);
if (err)
return err;
@@ -2050,7 +1589,7 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port)
FIELD_PREP(PHY_MAC_XIF_TS_BIN_MODE_M, enable) |
FIELD_PREP(PHY_MAC_XIF_TS_SFD_ENA_M, sfd_ena);
- return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, val);
+ return ice_write_mac_reg_eth56g(hw, port, addr, val);
}
/**
@@ -2092,21 +1631,22 @@ static u32 ice_ptp_calc_bitslip_eth56g(struct ice_hw *hw, u8 port, u32 bs,
bool fc, bool rs,
enum ice_eth56g_link_spd spd)
{
- u8 port_offset = port & (ICE_PORTS_PER_QUAD - 1);
- u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
u32 bitslip;
int err;
if (!bs || rs)
return 0;
- if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G)
+ if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G) {
err = ice_read_gpcs_reg_eth56g(hw, port, PHY_GPCS_BITSLIP,
&bitslip);
- else
- err = ice_read_ptp_reg_eth56g(hw, port_blk,
- PHY_REG_SD_BIT_SLIP(port_offset),
- &bitslip);
+ } else {
+ u8 quad_lane = port % ICE_PORTS_PER_QUAD;
+ u32 addr;
+
+ addr = PHY_REG_SD_BIT_SLIP(quad_lane);
+ err = ice_read_quad_ptp_reg_eth56g(hw, port, addr, &bitslip);
+ }
if (err)
return 0;
@@ -2350,6 +1890,7 @@ int ice_phy_cfg_intr_eth56g(struct ice_hw *hw, u8 port, bool ena, u8 threshold)
static int ice_read_phy_and_phc_time_eth56g(struct ice_hw *hw, u8 port,
u64 *phy_time, u64 *phc_time)
{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
u64 tx_time, rx_time;
u32 zo, lo;
u8 tmr_idx;
@@ -2369,8 +1910,13 @@ static int ice_read_phy_and_phc_time_eth56g(struct ice_hw *hw, u8 port,
ice_ptp_exec_tmr_cmd(hw);
/* Read the captured PHC time from the shadow time registers */
- zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
- lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
+ if (ice_is_primary(hw)) {
+ zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
+ lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
+ } else {
+ zo = rd32(ice_get_primary_hw(pf), GLTSYN_SHTIME_0(tmr_idx));
+ lo = rd32(ice_get_primary_hw(pf), GLTSYN_SHTIME_L(tmr_idx));
+ }
*phc_time = (u64)lo << 32 | zo;
/* Read the captured PHY time from the PHY shadow registers */
@@ -2507,6 +2053,7 @@ int ice_stop_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool soft_reset)
*/
int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port)
{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
u32 lo, hi;
u64 incval;
u8 tmr_idx;
@@ -2532,8 +2079,13 @@ int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port)
if (err)
return err;
- lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
- hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
+ if (ice_is_primary(hw)) {
+ lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
+ hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
+ } else {
+ lo = rd32(ice_get_primary_hw(pf), GLTSYN_INCVAL_L(tmr_idx));
+ hi = rd32(ice_get_primary_hw(pf), GLTSYN_INCVAL_H(tmr_idx));
+ }
incval = (u64)hi << 32 | lo;
err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_REG_TIMETUS_L, incval);
@@ -2564,42 +2116,6 @@ int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port)
}
/**
- * ice_sb_access_ena_eth56g - Enable SB devices (PHY and others) access
- * @hw: pointer to HW struct
- * @enable: Enable or disable access
- *
- * Enable sideband devices (PHY and others) access.
- */
-static void ice_sb_access_ena_eth56g(struct ice_hw *hw, bool enable)
-{
- u32 val = rd32(hw, PF_SB_REM_DEV_CTL);
-
- if (enable)
- val |= BIT(eth56g_phy_0) | BIT(cgu) | BIT(eth56g_phy_1);
- else
- val &= ~(BIT(eth56g_phy_0) | BIT(cgu) | BIT(eth56g_phy_1));
-
- wr32(hw, PF_SB_REM_DEV_CTL, val);
-}
-
-/**
- * ice_ptp_init_phc_eth56g - Perform E82X specific PHC initialization
- * @hw: pointer to HW struct
- *
- * Perform PHC initialization steps specific to E82X devices.
- *
- * Return:
- * * %0 - success
- * * %other - failed to initialize CGU
- */
-static int ice_ptp_init_phc_eth56g(struct ice_hw *hw)
-{
- ice_sb_access_ena_eth56g(hw, true);
- /* Initialize the Clock Generation Unit */
- return ice_init_cgu_e82x(hw);
-}
-
-/**
* ice_ptp_read_tx_hwtstamp_status_eth56g - Get TX timestamp status
* @hw: pointer to the HW struct
* @ts_status: the timestamp mask pointer
@@ -2666,59 +2182,21 @@ static int ice_get_phy_tx_tstamp_ready_eth56g(struct ice_hw *hw, u8 port,
}
/**
- * ice_is_muxed_topo - detect breakout 2x50G topology for E825C
- * @hw: pointer to the HW struct
- *
- * Return: true if it's 2x50 breakout topology, false otherwise
- */
-static bool ice_is_muxed_topo(struct ice_hw *hw)
-{
- u8 link_topo;
- bool mux;
- u32 val;
-
- val = rd32(hw, GLGEN_SWITCH_MODE_CONFIG);
- mux = FIELD_GET(GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M, val);
- val = rd32(hw, GLGEN_MAC_LINK_TOPO);
- link_topo = FIELD_GET(GLGEN_MAC_LINK_TOPO_LINK_TOPO_M, val);
-
- return (mux && link_topo == ICE_LINK_TOPO_UP_TO_2_LINKS);
-}
-
-/**
- * ice_ptp_init_phy_e825c - initialize PHY parameters
+ * ice_ptp_init_phy_e825 - initialize PHY parameters
* @hw: pointer to the HW struct
*/
-static void ice_ptp_init_phy_e825c(struct ice_hw *hw)
+static void ice_ptp_init_phy_e825(struct ice_hw *hw)
{
struct ice_ptp_hw *ptp = &hw->ptp;
struct ice_eth56g_params *params;
- u8 phy;
- ptp->phy_model = ICE_PHY_ETH56G;
params = &ptp->phy.eth56g;
params->onestep_ena = false;
params->peer_delay = 0;
params->sfd_ena = false;
- params->phy_addr[0] = eth56g_phy_0;
- params->phy_addr[1] = eth56g_phy_1;
params->num_phys = 2;
ptp->ports_per_phy = 4;
ptp->num_lports = params->num_phys * ptp->ports_per_phy;
-
- ice_sb_access_ena_eth56g(hw, true);
- for (phy = 0; phy < params->num_phys; phy++) {
- u32 phy_rev;
- int err;
-
- err = ice_read_phy_eth56g(hw, phy, PHY_REG_REVISION, &phy_rev);
- if (err || phy_rev != PHY_REVISION_ETH56G) {
- ptp->phy_model = ICE_PHY_UNSUP;
- return;
- }
- }
-
- ptp->is_2x50g_muxed_topo = ice_is_muxed_topo(hw);
}
/* E822 family functions
@@ -2737,10 +2215,9 @@ static void ice_fill_phy_msg_e82x(struct ice_hw *hw,
struct ice_sbq_msg_input *msg, u8 port,
u16 offset)
{
- int phy_port, phy, quadtype;
+ int phy_port, quadtype;
phy_port = port % hw->ptp.ports_per_phy;
- phy = port / hw->ptp.ports_per_phy;
quadtype = ICE_GET_QUAD_NUM(port) %
ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy);
@@ -2752,12 +2229,7 @@ static void ice_fill_phy_msg_e82x(struct ice_hw *hw,
msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port);
}
- if (phy == 0)
- msg->dest_dev = rmn_0;
- else if (phy == 1)
- msg->dest_dev = rmn_1;
- else
- msg->dest_dev = rmn_2;
+ msg->dest_dev = ice_sbq_dev_phy_0;
}
/**
@@ -2876,7 +2348,7 @@ ice_read_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
ice_fill_phy_msg_e82x(hw, &msg, port, offset);
msg.opcode = ice_sbq_msg_rd;
- err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -2954,7 +2426,7 @@ ice_write_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 val)
msg.opcode = ice_sbq_msg_wr;
msg.data = val;
- err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -3080,7 +2552,7 @@ static int ice_fill_quad_msg_e82x(struct ice_hw *hw,
if (quad >= ICE_GET_QUAD_NUM(hw->ptp.num_lports))
return -EINVAL;
- msg->dest_dev = rmn_0;
+ msg->dest_dev = ice_sbq_dev_phy_0;
if (!(quad % ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy)))
addr = Q_0_BASE + offset;
@@ -3115,7 +2587,7 @@ ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
msg.opcode = ice_sbq_msg_rd;
- err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -3150,7 +2622,7 @@ ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
msg.opcode = ice_sbq_msg_wr;
msg.data = val;
- err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -3199,7 +2671,8 @@ ice_read_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
* lower 8 bits in the low register, and the upper 32 bits in the high
* register.
*/
- *tstamp = FIELD_PREP(TS_PHY_HIGH_M, hi) | FIELD_PREP(TS_PHY_LOW_M, lo);
+ *tstamp = FIELD_PREP(PHY_40B_HIGH_M, hi) |
+ FIELD_PREP(PHY_40B_LOW_M, lo);
return 0;
}
@@ -3301,7 +2774,6 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
*/
static int ice_ptp_init_phc_e82x(struct ice_hw *hw)
{
- int err;
u32 val;
/* Enable reading switch and PHY registers over the sideband queue */
@@ -3311,11 +2783,6 @@ static int ice_ptp_init_phc_e82x(struct ice_hw *hw)
val |= (PF_SB_REM_DEV_CTL_SWITCH_READ | PF_SB_REM_DEV_CTL_PHY0);
wr32(hw, PF_SB_REM_DEV_CTL, val);
- /* Initialize the Clock Generation Unit */
- err = ice_init_cgu_e82x(hw);
- if (err)
- return err;
-
/* Set window length for all the ports */
return ice_ptp_set_vernier_wl(hw);
}
@@ -4772,7 +4239,6 @@ int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold)
*/
static void ice_ptp_init_phy_e82x(struct ice_ptp_hw *ptp)
{
- ptp->phy_model = ICE_PHY_E82X;
ptp->num_lports = 8;
ptp->ports_per_phy = 8;
}
@@ -4799,9 +4265,9 @@ static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
msg.msg_addr_low = lower_16_bits(addr);
msg.msg_addr_high = upper_16_bits(addr);
msg.opcode = ice_sbq_msg_rd;
- msg.dest_dev = rmn_0;
+ msg.dest_dev = ice_sbq_dev_phy_0;
- err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -4829,10 +4295,10 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
msg.msg_addr_low = lower_16_bits(addr);
msg.msg_addr_high = upper_16_bits(addr);
msg.opcode = ice_sbq_msg_wr;
- msg.dest_dev = rmn_0;
+ msg.dest_dev = ice_sbq_dev_phy_0;
msg.data = val;
- err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -4856,33 +4322,46 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
static int
ice_read_phy_tstamp_ll_e810(struct ice_hw *hw, u8 idx, u8 *hi, u32 *lo)
{
+ struct ice_e810_params *params = &hw->ptp.phy.e810;
+ unsigned long flags;
u32 val;
- u8 i;
+ int err;
+
+ spin_lock_irqsave(&params->atqbal_wq.lock, flags);
+
+ /* Wait for any pending in-progress low latency interrupt */
+ err = wait_event_interruptible_locked_irq(params->atqbal_wq,
+ !(params->atqbal_flags &
+ ATQBAL_FLAGS_INTR_IN_PROGRESS));
+ if (err) {
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
+ return err;
+ }
/* Write TS index to read to the PF register so the FW can read it */
- val = FIELD_PREP(TS_LL_READ_TS_IDX, idx) | TS_LL_READ_TS;
- wr32(hw, PF_SB_ATQBAL, val);
+ val = FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) | REG_LL_PROXY_H_EXEC;
+ wr32(hw, REG_LL_PROXY_H, val);
/* Read the register repeatedly until the FW provides us the TS */
- for (i = TS_LL_READ_RETRIES; i > 0; i--) {
- val = rd32(hw, PF_SB_ATQBAL);
+ err = read_poll_timeout_atomic(rd32, val,
+ !FIELD_GET(REG_LL_PROXY_H_EXEC, val), 10,
+ REG_LL_PROXY_H_TIMEOUT_US, false, hw,
+ REG_LL_PROXY_H);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
+ return err;
+ }
- /* When the bit is cleared, the TS is ready in the register */
- if (!(FIELD_GET(TS_LL_READ_TS, val))) {
- /* High 8 bit value of the TS is on the bits 16:23 */
- *hi = FIELD_GET(TS_LL_READ_TS_HIGH, val);
+ /* High 8 bit value of the TS is on the bits 16:23 */
+ *hi = FIELD_GET(REG_LL_PROXY_H_TS_HIGH, val);
- /* Read the low 32 bit value and set the TS valid bit */
- *lo = rd32(hw, PF_SB_ATQBAH) | TS_VALID;
- return 0;
- }
+ /* Read the low 32 bit value and set the TS valid bit */
+ *lo = rd32(hw, REG_LL_PROXY_L) | TS_VALID;
- udelay(10);
- }
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
- /* FW failed to provide the TS in time */
- ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
- return -EINVAL;
+ return 0;
}
/**
@@ -4953,7 +4432,8 @@ ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
/* For E810 devices, the timestamp is reported with the lower 32 bits
* in the low register, and the upper 8 bits in the high register.
*/
- *tstamp = ((u64)hi) << TS_HIGH_S | ((u64)lo & TS_LOW_M);
+ *tstamp = FIELD_PREP(PHY_EXT_40B_HIGH_M, hi) |
+ FIELD_PREP(PHY_EXT_40B_LOW_M, lo);
return 0;
}
@@ -5016,8 +4496,7 @@ static int ice_ptp_init_phc_e810(struct ice_hw *hw)
u8 tmr_idx;
int err;
- /* Ensure synchronization delay is zero */
- wr32(hw, GLTSYN_SYNC_DLAY, 0);
+ ice_ptp_cfg_sync_delay(hw, ICE_E810_E830_SYNC_DELAY);
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
@@ -5065,6 +4544,55 @@ static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
}
/**
+ * ice_ptp_prep_phy_adj_ll_e810 - Prep PHY ports for a time adjustment
+ * @hw: pointer to HW struct
+ * @adj: adjustment value to program
+ *
+ * Use the low latency firmware interface to program PHY time adjustment to
+ * all PHY ports.
+ *
+ * Return: 0 on success, -EBUSY on timeout
+ */
+static int ice_ptp_prep_phy_adj_ll_e810(struct ice_hw *hw, s32 adj)
+{
+ const u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ struct ice_e810_params *params = &hw->ptp.phy.e810;
+ u32 val;
+ int err;
+
+ spin_lock_irq(&params->atqbal_wq.lock);
+
+ /* Wait for any pending in-progress low latency interrupt */
+ err = wait_event_interruptible_locked_irq(params->atqbal_wq,
+ !(params->atqbal_flags &
+ ATQBAL_FLAGS_INTR_IN_PROGRESS));
+ if (err) {
+ spin_unlock_irq(&params->atqbal_wq.lock);
+ return err;
+ }
+
+ wr32(hw, REG_LL_PROXY_L, adj);
+ val = FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_CMD_M, REG_LL_PROXY_H_PHY_TMR_CMD_ADJ) |
+ FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_IDX_M, tmr_idx) | REG_LL_PROXY_H_EXEC;
+ wr32(hw, REG_LL_PROXY_H, val);
+
+ /* Read the register repeatedly until the FW indicates completion */
+ err = read_poll_timeout_atomic(rd32, val,
+ !FIELD_GET(REG_LL_PROXY_H_EXEC, val),
+ 10, REG_LL_PROXY_H_TIMEOUT_US, false, hw,
+ REG_LL_PROXY_H);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY timer adjustment using low latency interface\n");
+ spin_unlock_irq(&params->atqbal_wq.lock);
+ return err;
+ }
+
+ spin_unlock_irq(&params->atqbal_wq.lock);
+
+ return 0;
+}
+
+/**
* ice_ptp_prep_phy_adj_e810 - Prep PHY port for a time adjustment
* @hw: pointer to HW struct
* @adj: adjustment value to program
@@ -5082,6 +4610,9 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
u8 tmr_idx;
int err;
+ if (hw->dev_caps.ts_dev_info.ll_phy_tmr_update)
+ return ice_ptp_prep_phy_adj_ll_e810(hw, adj);
+
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
/* Adjustments are represented as signed 2's complement values in
@@ -5105,6 +4636,56 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
}
/**
+ * ice_ptp_prep_phy_incval_ll_e810 - Prep PHY ports increment value change
+ * @hw: pointer to HW struct
+ * @incval: The new 40bit increment value to prepare
+ *
+ * Use the low latency firmware interface to program PHY time increment value
+ * for all PHY ports.
+ *
+ * Return: 0 on success, -EBUSY on timeout
+ */
+static int ice_ptp_prep_phy_incval_ll_e810(struct ice_hw *hw, u64 incval)
+{
+ const u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ struct ice_e810_params *params = &hw->ptp.phy.e810;
+ u32 val;
+ int err;
+
+ spin_lock_irq(&params->atqbal_wq.lock);
+
+ /* Wait for any pending in-progress low latency interrupt */
+ err = wait_event_interruptible_locked_irq(params->atqbal_wq,
+ !(params->atqbal_flags &
+ ATQBAL_FLAGS_INTR_IN_PROGRESS));
+ if (err) {
+ spin_unlock_irq(&params->atqbal_wq.lock);
+ return err;
+ }
+
+ wr32(hw, REG_LL_PROXY_L, lower_32_bits(incval));
+ val = FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_CMD_M, REG_LL_PROXY_H_PHY_TMR_CMD_FREQ) |
+ FIELD_PREP(REG_LL_PROXY_H_TS_HIGH, (u8)upper_32_bits(incval)) |
+ FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_IDX_M, tmr_idx) | REG_LL_PROXY_H_EXEC;
+ wr32(hw, REG_LL_PROXY_H, val);
+
+ /* Read the register repeatedly until the FW indicates completion */
+ err = read_poll_timeout_atomic(rd32, val,
+ !FIELD_GET(REG_LL_PROXY_H_EXEC, val),
+ 10, REG_LL_PROXY_H_TIMEOUT_US, false, hw,
+ REG_LL_PROXY_H);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY timer increment using low latency interface\n");
+ spin_unlock_irq(&params->atqbal_wq.lock);
+ return err;
+ }
+
+ spin_unlock_irq(&params->atqbal_wq.lock);
+
+ return 0;
+}
+
+/**
* ice_ptp_prep_phy_incval_e810 - Prep PHY port increment value change
* @hw: pointer to HW struct
* @incval: The new 40bit increment value to prepare
@@ -5119,6 +4700,9 @@ static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
u8 tmr_idx;
int err;
+ if (hw->dev_caps.ts_dev_info.ll_phy_tmr_update)
+ return ice_ptp_prep_phy_incval_ll_e810(hw, incval);
+
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
low = lower_32_bits(incval);
high = upper_32_bits(incval);
@@ -5178,68 +4762,6 @@ ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready)
*/
/**
- * ice_get_pca9575_handle
- * @hw: pointer to the hw struct
- * @pca9575_handle: GPIO controller's handle
- *
- * Find and return the GPIO controller's handle in the netlist.
- * When found - the value will be cached in the hw structure and following calls
- * will return cached value
- */
-static int
-ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
-{
- struct ice_aqc_get_link_topo *cmd;
- struct ice_aq_desc desc;
- int status;
- u8 idx;
-
- /* If handle was read previously return cached value */
- if (hw->io_expander_handle) {
- *pca9575_handle = hw->io_expander_handle;
- return 0;
- }
-
- /* If handle was not detected read it from the netlist */
- cmd = &desc.params.get_link_topo;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
-
- /* Set node type to GPIO controller */
- cmd->addr.topo_params.node_type_ctx =
- (ICE_AQC_LINK_TOPO_NODE_TYPE_M &
- ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
-
-#define SW_PCA9575_SFP_TOPO_IDX 2
-#define SW_PCA9575_QSFP_TOPO_IDX 1
-
- /* Check if the SW IO expander controlling SMA exists in the netlist. */
- if (hw->device_id == ICE_DEV_ID_E810C_SFP)
- idx = SW_PCA9575_SFP_TOPO_IDX;
- else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
- idx = SW_PCA9575_QSFP_TOPO_IDX;
- else
- return -EOPNOTSUPP;
-
- cmd->addr.topo_params.index = idx;
-
- status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
- if (status)
- return -EOPNOTSUPP;
-
- /* Verify if we found the right IO expander type */
- if (desc.params.get_link_topo.node_part_num !=
- ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
- return -EOPNOTSUPP;
-
- /* If present save the handle and return it */
- hw->io_expander_handle =
- le16_to_cpu(desc.params.get_link_topo.addr.handle);
- *pca9575_handle = hw->io_expander_handle;
-
- return 0;
-}
-
-/**
* ice_read_sma_ctrl
* @hw: pointer to the hw struct
* @data: pointer to data to be read from the GPIO controller
@@ -5304,37 +4826,6 @@ int ice_write_sma_ctrl(struct ice_hw *hw, u8 data)
}
/**
- * ice_read_pca9575_reg
- * @hw: pointer to the hw struct
- * @offset: GPIO controller register offset
- * @data: pointer to data to be read from the GPIO controller
- *
- * Read the register from the GPIO controller
- */
-int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data)
-{
- struct ice_aqc_link_topo_addr link_topo;
- __le16 addr;
- u16 handle;
- int err;
-
- memset(&link_topo, 0, sizeof(link_topo));
-
- err = ice_get_pca9575_handle(hw, &handle);
- if (err)
- return err;
-
- link_topo.handle = cpu_to_le16(handle);
- link_topo.topo_params.node_type_ctx =
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
- ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
-
- addr = cpu_to_le16((u16)offset);
-
- return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
-}
-
-/**
* ice_ptp_read_sdp_ac - read SDP available connections section from NVM
* @hw: pointer to the HW struct
* @entries: returns the SDP available connections section from NVM
@@ -5400,16 +4891,138 @@ exit:
*/
static void ice_ptp_init_phy_e810(struct ice_ptp_hw *ptp)
{
- ptp->phy_model = ICE_PHY_E810;
+ ptp->num_lports = 8;
+ ptp->ports_per_phy = 4;
+
+ init_waitqueue_head(&ptp->phy.e810.atqbal_wq);
+}
+
+/* E830 functions
+ *
+ * The following functions operate on the E830 series devices.
+ *
+ */
+
+/**
+ * ice_ptp_init_phc_e830 - Perform E830 specific PHC initialization
+ * @hw: pointer to HW struct
+ *
+ * Perform E830-specific PTP hardware clock initialization steps.
+ */
+static void ice_ptp_init_phc_e830(const struct ice_hw *hw)
+{
+ ice_ptp_cfg_sync_delay(hw, ICE_E810_E830_SYNC_DELAY);
+}
+
+/**
+ * ice_ptp_write_direct_incval_e830 - Prep PHY port increment value change
+ * @hw: pointer to HW struct
+ * @incval: The new 40bit increment value to prepare
+ *
+ * Prepare the PHY port for a new increment value by programming the PHC
+ * GLTSYN_INCVAL_L and GLTSYN_INCVAL_H registers. The actual change is
+ * completed by FW automatically.
+ */
+static void ice_ptp_write_direct_incval_e830(const struct ice_hw *hw,
+ u64 incval)
+{
+ u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ wr32(hw, GLTSYN_INCVAL_L(tmr_idx), lower_32_bits(incval));
+ wr32(hw, GLTSYN_INCVAL_H(tmr_idx), upper_32_bits(incval));
+}
+
+/**
+ * ice_ptp_write_direct_phc_time_e830 - Prepare PHY port with initial time
+ * @hw: Board private structure
+ * @time: Time to initialize the PHY port clock to
+ *
+ * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the
+ * initial clock time. The time will not actually be programmed until the
+ * driver issues an ICE_PTP_INIT_TIME command.
+ *
+ * The time value is the upper 32 bits of the PHY timer, usually in units of
+ * nominal nanoseconds.
+ */
+static void ice_ptp_write_direct_phc_time_e830(const struct ice_hw *hw,
+ u64 time)
+{
+ u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ wr32(hw, GLTSYN_TIME_0(tmr_idx), 0);
+ wr32(hw, GLTSYN_TIME_L(tmr_idx), lower_32_bits(time));
+ wr32(hw, GLTSYN_TIME_H(tmr_idx), upper_32_bits(time));
+}
+
+/**
+ * ice_ptp_port_cmd_e830 - Prepare all external PHYs for a timer command
+ * @hw: pointer to HW struct
+ * @cmd: Command to be sent to the port
+ *
+ * Prepare the external PHYs connected to this device for a timer sync
+ * command.
+ *
+ * Return: 0 on success, negative error code when PHY write failed
+ */
+static int ice_ptp_port_cmd_e830(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd);
+
+ return ice_write_phy_reg_e810(hw, E830_ETH_GLTSYN_CMD, val);
+}
+
+/**
+ * ice_read_phy_tstamp_e830 - Read a PHY timestamp out of the external PHY
+ * @hw: pointer to the HW struct
+ * @idx: the timestamp index to read
+ * @tstamp: on return, the 40bit timestamp value
+ *
+ * Read a 40bit timestamp value out of the timestamp block of the external PHY
+ * on the E830 device.
+ */
+static void ice_read_phy_tstamp_e830(const struct ice_hw *hw, u8 idx,
+ u64 *tstamp)
+{
+ u32 hi, lo;
+
+ hi = rd32(hw, E830_PRTTSYN_TXTIME_H(idx));
+ lo = rd32(hw, E830_PRTTSYN_TXTIME_L(idx));
+
+ /* For E830 devices, the timestamp is reported with the lower 32 bits
+ * in the low register, and the upper 8 bits in the high register.
+ */
+ *tstamp = FIELD_PREP(PHY_EXT_40B_HIGH_M, hi) |
+ FIELD_PREP(PHY_EXT_40B_LOW_M, lo);
+}
+
+/**
+ * ice_get_phy_tx_tstamp_ready_e830 - Read Tx memory status register
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to read
+ * @tstamp_ready: contents of the Tx memory status register
+ */
+static void ice_get_phy_tx_tstamp_ready_e830(const struct ice_hw *hw, u8 port,
+ u64 *tstamp_ready)
+{
+ *tstamp_ready = rd32(hw, E830_PRTMAC_TS_TX_MEM_VALID_H);
+ *tstamp_ready <<= 32;
+ *tstamp_ready |= rd32(hw, E830_PRTMAC_TS_TX_MEM_VALID_L);
+}
+
+/**
+ * ice_ptp_init_phy_e830 - initialize PHY parameters
+ * @ptp: pointer to the PTP HW struct
+ */
+static void ice_ptp_init_phy_e830(struct ice_ptp_hw *ptp)
+{
ptp->num_lports = 8;
ptp->ports_per_phy = 4;
}
/* Device agnostic functions
*
- * The following functions implement shared behavior common to both E822 and
- * E810 devices, possibly calling a device specific implementation where
- * necessary.
+ * The following functions implement shared behavior common to all devices,
+ * possibly calling a device specific implementation where necessary.
*/
/**
@@ -5472,14 +5085,22 @@ void ice_ptp_init_hw(struct ice_hw *hw)
{
struct ice_ptp_hw *ptp = &hw->ptp;
- if (ice_is_e822(hw) || ice_is_e823(hw))
- ice_ptp_init_phy_e82x(ptp);
- else if (ice_is_e810(hw))
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
ice_ptp_init_phy_e810(ptp);
- else if (ice_is_e825c(hw))
- ice_ptp_init_phy_e825c(hw);
- else
- ptp->phy_model = ICE_PHY_UNSUP;
+ break;
+ case ICE_MAC_E830:
+ ice_ptp_init_phy_e830(ptp);
+ break;
+ case ICE_MAC_GENERIC:
+ ice_ptp_init_phy_e82x(ptp);
+ break;
+ case ICE_MAC_GENERIC_3K_E825:
+ ice_ptp_init_phy_e825(hw);
+ break;
+ default:
+ return;
+ }
}
/**
@@ -5500,11 +5121,11 @@ void ice_ptp_init_hw(struct ice_hw *hw)
static int ice_ptp_write_port_cmd(struct ice_hw *hw, u8 port,
enum ice_ptp_tmr_cmd cmd)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_ptp_write_port_cmd_eth56g(hw, port, cmd);
- case ICE_PHY_E82X:
+ switch (hw->mac_type) {
+ case ICE_MAC_GENERIC:
return ice_ptp_write_port_cmd_e82x(hw, port, cmd);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_ptp_write_port_cmd_eth56g(hw, port, cmd);
default:
return -EOPNOTSUPP;
}
@@ -5565,9 +5186,11 @@ static int ice_ptp_port_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
u32 port;
/* PHY models which can program all ports simultaneously */
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_ptp_port_cmd_e810(hw, cmd);
+ case ICE_MAC_E830:
+ return ice_ptp_port_cmd_e830(hw, cmd);
default:
break;
}
@@ -5638,23 +5261,29 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
/* Source timers */
+ /* For E830 we don't need to use shadow registers, its automatic */
+ if (hw->mac_type == ICE_MAC_E830) {
+ ice_ptp_write_direct_phc_time_e830(hw, time);
+ return 0;
+ }
+
wr32(hw, GLTSYN_SHTIME_L(tmr_idx), lower_32_bits(time));
wr32(hw, GLTSYN_SHTIME_H(tmr_idx), upper_32_bits(time));
wr32(hw, GLTSYN_SHTIME_0(tmr_idx), 0);
/* PHY timers */
/* Fill Rx and Tx ports and send msg to PHY */
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_ptp_prep_phy_time_eth56g(hw,
- (u32)(time & 0xFFFFFFFF));
- break;
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
err = ice_ptp_prep_phy_time_e82x(hw, time & 0xFFFFFFFF);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_ptp_prep_phy_time_eth56g(hw,
+ (u32)(time & 0xFFFFFFFF));
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -5686,20 +5315,26 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ /* For E830 we don't need to use shadow registers, its automatic */
+ if (hw->mac_type == ICE_MAC_E830) {
+ ice_ptp_write_direct_incval_e830(hw, incval);
+ return 0;
+ }
+
/* Shadow Adjust */
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_ptp_prep_phy_incval_eth56g(hw, incval);
- break;
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
err = ice_ptp_prep_phy_incval_e810(hw, incval);
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
err = ice_ptp_prep_phy_incval_e82x(hw, incval);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_ptp_prep_phy_incval_eth56g(hw, incval);
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -5759,16 +5394,19 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_ptp_prep_phy_adj_eth56g(hw, adj);
- break;
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
err = ice_ptp_prep_phy_adj_e810(hw, adj);
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_E830:
+ /* E830 sync PHYs automatically after setting GLTSYN_SHADJ */
+ return 0;
+ case ICE_MAC_GENERIC:
err = ice_ptp_prep_phy_adj_e82x(hw, adj);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_ptp_prep_phy_adj_eth56g(hw, adj);
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -5792,13 +5430,16 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
*/
int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
- case ICE_PHY_E82X:
+ case ICE_MAC_E830:
+ ice_read_phy_tstamp_e830(hw, idx, tstamp);
+ return 0;
+ case ICE_MAC_GENERIC:
return ice_read_phy_tstamp_e82x(hw, block, idx, tstamp);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp);
default:
return -EOPNOTSUPP;
}
@@ -5822,13 +5463,13 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
*/
int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_clear_ptp_tstamp_eth56g(hw, block, idx);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_clear_phy_tstamp_e810(hw, block, idx);
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
return ice_clear_phy_tstamp_e82x(hw, block, idx);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_clear_ptp_tstamp_eth56g(hw, block, idx);
default:
return -EOPNOTSUPP;
}
@@ -5885,14 +5526,14 @@ static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx)
*/
void ice_ptp_reset_ts_memory(struct ice_hw *hw)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- ice_ptp_reset_ts_memory_eth56g(hw);
- break;
- case ICE_PHY_E82X:
+ switch (hw->mac_type) {
+ case ICE_MAC_GENERIC:
ice_ptp_reset_ts_memory_e82x(hw);
break;
- case ICE_PHY_E810:
+ case ICE_MAC_GENERIC_3K_E825:
+ ice_ptp_reset_ts_memory_eth56g(hw);
+ break;
+ case ICE_MAC_E810:
default:
return;
}
@@ -5914,13 +5555,16 @@ int ice_ptp_init_phc(struct ice_hw *hw)
/* Clear event err indications for auxiliary pins */
(void)rd32(hw, GLTSYN_STAT(src_idx));
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_ptp_init_phc_eth56g(hw);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_ptp_init_phc_e810(hw);
- case ICE_PHY_E82X:
+ case ICE_MAC_E830:
+ ice_ptp_init_phc_e830(hw);
+ return 0;
+ case ICE_MAC_GENERIC:
return ice_ptp_init_phc_e82x(hw);
+ case ICE_MAC_GENERIC_3K_E825:
+ return 0;
default:
return -EOPNOTSUPP;
}
@@ -5939,17 +5583,19 @@ int ice_ptp_init_phc(struct ice_hw *hw)
*/
int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_get_phy_tx_tstamp_ready_eth56g(hw, block,
- tstamp_ready);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_get_phy_tx_tstamp_ready_e810(hw, block,
tstamp_ready);
- case ICE_PHY_E82X:
+ case ICE_MAC_E830:
+ ice_get_phy_tx_tstamp_ready_e830(hw, block, tstamp_ready);
+ return 0;
+ case ICE_MAC_GENERIC:
return ice_get_phy_tx_tstamp_ready_e82x(hw, block,
tstamp_ready);
- break;
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_get_phy_tx_tstamp_ready_eth56g(hw, block,
+ tstamp_ready);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 47af7c5c79b8..5896b346e579 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -65,14 +65,14 @@ enum ice_eth56g_link_spd {
/**
* struct ice_phy_reg_info_eth56g - ETH56G PHY register parameters
- * @base: base address for each PHY block
+ * @base_addr: base address for each PHY block
* @step: step between PHY lanes
*
* Characteristic information for the various PHY register parameters in the
* ETH56G devices
*/
struct ice_phy_reg_info_eth56g {
- u32 base[NUM_ETH56G_PHY_RES];
+ u32 base_addr;
u32 step;
};
@@ -80,7 +80,6 @@ struct ice_phy_reg_info_eth56g {
* struct ice_time_ref_info_e82x
* @pll_freq: Frequency of PLL that drives timer ticks in Hz
* @nominal_incval: increment to generate nanoseconds in GLTSYN_TIME_L
- * @pps_delay: propagation delay of the PPS output signal
*
* Characteristic information for the various TIME_REF sources possible in the
* E822 devices
@@ -88,7 +87,6 @@ struct ice_phy_reg_info_eth56g {
struct ice_time_ref_info_e82x {
u64 pll_freq;
u64 nominal_incval;
- u8 pps_delay;
};
/**
@@ -196,23 +194,6 @@ struct ice_eth56g_mac_reg_cfg {
extern
const struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD];
-/**
- * struct ice_cgu_pll_params_e82x - E82X CGU parameters
- * @refclk_pre_div: Reference clock pre-divisor
- * @feedback_div: Feedback divisor
- * @frac_n_div: Fractional divisor
- * @post_pll_div: Post PLL divisor
- *
- * Clock Generation Unit parameters used to program the PLL based on the
- * selected TIME_REF frequency.
- */
-struct ice_cgu_pll_params_e82x {
- u32 refclk_pre_div;
- u32 feedback_div;
- u32 frac_n_div;
- u32 post_pll_div;
-};
-
#define E810C_QSFP_C827_0_HANDLE 2
#define E810C_QSFP_C827_1_HANDLE 3
enum ice_e810_c827_idx {
@@ -284,31 +265,6 @@ struct ice_cgu_pin_desc {
struct dpll_pin_frequency *freq_supp;
};
-extern const struct
-ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ];
-
-/**
- * struct ice_cgu_pll_params_e825c - E825C CGU parameters
- * @tspll_ck_refclkfreq: tspll_ck_refclkfreq selection
- * @tspll_ndivratio: ndiv ratio that goes directly to the pll
- * @tspll_fbdiv_intgr: TS PLL integer feedback divide
- * @tspll_fbdiv_frac: TS PLL fractional feedback divide
- * @ref1588_ck_div: clock divider for tspll ref
- *
- * Clock Generation Unit parameters used to program the PLL based on the
- * selected TIME_REF/TCXO frequency.
- */
-struct ice_cgu_pll_params_e825c {
- u32 tspll_ck_refclkfreq;
- u32 tspll_ndivratio;
- u32 tspll_fbdiv_intgr;
- u32 tspll_fbdiv_frac;
- u32 ref1588_ck_div;
-};
-
-extern const struct
-ice_cgu_pll_params_e825c e825c_cgu_params[NUM_ICE_TIME_REF_FREQ];
-
#define E810C_QSFP_C827_0_HANDLE 2
#define E810C_QSFP_C827_1_HANDLE 3
@@ -316,7 +272,7 @@ ice_cgu_pll_params_e825c e825c_cgu_params[NUM_ICE_TIME_REF_FREQ];
extern const struct ice_phy_reg_info_eth56g eth56g_phy_res[NUM_ETH56G_PHY_RES];
/* Table of constants related to possible TIME_REF sources */
-extern const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ];
+extern const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TSPLL_FREQ];
/* Table of constants for Vernier calibration on E822 */
extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD];
@@ -326,12 +282,10 @@ extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD];
*/
#define ICE_E810_PLL_FREQ 812500000
#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL
-#define ICE_E810_OUT_PROP_DELAY_NS 1
-#define ICE_E825C_OUT_PROP_DELAY_NS 11
+#define ICE_E810_E830_SYNC_DELAY 0
/* Device agnostic functions */
u8 ice_get_ptp_src_clock_index(struct ice_hw *hw);
-int ice_cgu_cfg_pps_out(struct ice_hw *hw, bool enable);
bool ice_ptp_lock(struct ice_hw *hw);
void ice_ptp_unlock(struct ice_hw *hw);
void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd);
@@ -360,7 +314,8 @@ void ice_ptp_reset_ts_memory_quad_e82x(struct ice_hw *hw, u8 quad);
*
* Returns the current TIME_REF from the capabilities structure.
*/
-static inline enum ice_time_ref_freq ice_e82x_time_ref(const struct ice_hw *hw)
+
+static inline enum ice_tspll_freq ice_e82x_time_ref(const struct ice_hw *hw)
{
return hw->func_caps.ts_func_info.time_ref;
}
@@ -374,26 +329,21 @@ static inline enum ice_time_ref_freq ice_e82x_time_ref(const struct ice_hw *hw)
* change, such as an update to the CGU registers.
*/
static inline void
-ice_set_e82x_time_ref(struct ice_hw *hw, enum ice_time_ref_freq time_ref)
+ice_set_e82x_time_ref(struct ice_hw *hw, enum ice_tspll_freq time_ref)
{
hw->func_caps.ts_func_info.time_ref = time_ref;
}
-static inline u64 ice_e82x_pll_freq(enum ice_time_ref_freq time_ref)
+static inline u64 ice_e82x_pll_freq(enum ice_tspll_freq time_ref)
{
return e82x_time_ref[time_ref].pll_freq;
}
-static inline u64 ice_e82x_nominal_incval(enum ice_time_ref_freq time_ref)
+static inline u64 ice_e82x_nominal_incval(enum ice_tspll_freq time_ref)
{
return e82x_time_ref[time_ref].nominal_incval;
}
-static inline u64 ice_e82x_pps_delay(enum ice_time_ref_freq time_ref)
-{
- return e82x_time_ref[time_ref].pps_delay;
-}
-
/* E822 Vernier calibration functions */
int ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset);
int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port);
@@ -404,7 +354,6 @@ int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold);
/* E810 family functions */
int ice_read_sma_ctrl(struct ice_hw *hw, u8 *data);
int ice_write_sma_ctrl(struct ice_hw *hw, u8 data);
-int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data);
int ice_ptp_read_sdp_ac(struct ice_hw *hw, __le16 *entries, uint *num_entries);
int ice_cgu_get_num_pins(struct ice_hw *hw, bool input);
enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input);
@@ -432,20 +381,6 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port);
#define ICE_ETH56G_NOMINAL_THRESH4 0x7777
#define ICE_ETH56G_NOMINAL_TX_THRESH 0x6
-static inline u64 ice_prop_delay(const struct ice_hw *hw)
-{
- switch (hw->ptp.phy_model) {
- case ICE_PHY_ETH56G:
- return ICE_E825C_OUT_PROP_DELAY_NS;
- case ICE_PHY_E810:
- return ICE_E810_OUT_PROP_DELAY_NS;
- case ICE_PHY_E82X:
- return ice_e82x_pps_delay(ice_e82x_time_ref(hw));
- default:
- return 0;
- }
-}
-
/**
* ice_get_base_incval - Get base clock increment value
* @hw: pointer to the HW struct
@@ -454,23 +389,19 @@ static inline u64 ice_prop_delay(const struct ice_hw *hw)
*/
static inline u64 ice_get_base_incval(struct ice_hw *hw)
{
- switch (hw->ptp.phy_model) {
- case ICE_PHY_ETH56G:
- return ICE_ETH56G_NOMINAL_INCVAL;
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
return ICE_PTP_NOMINAL_INCVAL_E810;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
return ice_e82x_nominal_incval(ice_e82x_time_ref(hw));
+ case ICE_MAC_GENERIC_3K_E825:
+ return ICE_ETH56G_NOMINAL_INCVAL;
default:
return 0;
}
}
-static inline bool ice_is_dual(struct ice_hw *hw)
-{
- return !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_DUAL_M);
-}
-
#define PFTSYN_SEM_BYTES 4
#define ICE_PTP_CLOCK_INDEX_0 0x00
@@ -673,28 +604,41 @@ static inline bool ice_is_dual(struct ice_hw *hw)
/* E810 timer command register */
#define E810_ETH_GLTSYN_CMD 0x03000344
+/* E830 timer command register */
+#define E830_ETH_GLTSYN_CMD 0x00088814
+
+/* E810 PHC time register */
+#define E830_GLTSYN_TIME_L(_tmr_idx) (0x0008A000 + 0x1000 * (_tmr_idx))
+
/* Source timer incval macros */
#define INCVAL_HIGH_M 0xFF
-/* Timestamp block macros */
+/* PHY 40b registers macros */
+#define PHY_EXT_40B_LOW_M GENMASK(31, 0)
+#define PHY_EXT_40B_HIGH_M GENMASK_ULL(39, 32)
+#define PHY_40B_LOW_M GENMASK(7, 0)
+#define PHY_40B_HIGH_M GENMASK_ULL(39, 8)
#define TS_VALID BIT(0)
#define TS_LOW_M 0xFFFFFFFF
#define TS_HIGH_M 0xFF
#define TS_HIGH_S 32
-#define TS_PHY_LOW_M 0xFF
-#define TS_PHY_HIGH_M 0xFFFFFFFF
-#define TS_PHY_HIGH_S 8
-
#define BYTES_PER_IDX_ADDR_L_U 8
#define BYTES_PER_IDX_ADDR_L 4
/* Tx timestamp low latency read definitions */
-#define TS_LL_READ_RETRIES 200
-#define TS_LL_READ_TS_HIGH GENMASK(23, 16)
-#define TS_LL_READ_TS_IDX GENMASK(29, 24)
-#define TS_LL_READ_TS_INTR BIT(30)
-#define TS_LL_READ_TS BIT(31)
+#define REG_LL_PROXY_H_TIMEOUT_US 2000
+#define REG_LL_PROXY_H_PHY_TMR_CMD_M GENMASK(7, 6)
+#define REG_LL_PROXY_H_PHY_TMR_CMD_ADJ 0x1
+#define REG_LL_PROXY_H_PHY_TMR_CMD_FREQ 0x2
+#define REG_LL_PROXY_H_TS_HIGH GENMASK(23, 16)
+#define REG_LL_PROXY_H_PHY_TMR_IDX_M BIT(24)
+#define REG_LL_PROXY_H_TS_IDX GENMASK(29, 24)
+#define REG_LL_PROXY_H_TS_INTR_ENA BIT(30)
+#define REG_LL_PROXY_H_EXEC BIT(31)
+
+#define REG_LL_PROXY_L PF_SB_ATQBAH
+#define REG_LL_PROXY_H PF_SB_ATQBAL
/* Internal PHY timestamp address */
#define TS_L(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U))
@@ -718,6 +662,7 @@ static inline bool ice_is_dual(struct ice_hw *hw)
#define ICE_SMA1_MASK (ICE_SMA1_DIR_EN | ICE_SMA1_TX_EN)
#define ICE_SMA2_MASK (ICE_SMA2_UFL2_RX_DIS | ICE_SMA2_DIR_EN | \
ICE_SMA2_TX_EN)
+#define ICE_SMA2_INACTIVE_MASK (ICE_SMA2_DIR_EN | ICE_SMA2_TX_EN)
#define ICE_ALL_SMA_MASK (ICE_SMA1_MASK | ICE_SMA2_MASK)
#define ICE_SMA_MIN_BIT 3
@@ -789,36 +734,19 @@ static inline bool ice_is_dual(struct ice_hw *hw)
#define PHY_MAC_XIF_TS_SFD_ENA_M ICE_M(0x1, 20)
#define PHY_MAC_XIF_GMII_TS_SEL_M ICE_M(0x1, 21)
-/* GPCS config register */
-#define PHY_GPCS_CONFIG_REG0 0x268
-#define PHY_GPCS_CONFIG_REG0_TX_THR_M ICE_M(0xF, 24)
-#define PHY_GPCS_BITSLIP 0x5C
-
#define PHY_TS_INT_CONFIG_THRESHOLD_M ICE_M(0x3F, 0)
#define PHY_TS_INT_CONFIG_ENA_M BIT(6)
-/* 1-step PTP config */
-#define PHY_PTP_1STEP_CONFIG 0x270
-#define PHY_PTP_1STEP_T1S_UP64_M ICE_M(0xF, 4)
-#define PHY_PTP_1STEP_T1S_DELTA_M ICE_M(0xF, 8)
-#define PHY_PTP_1STEP_PEER_DELAY(_port) (0x274 + 4 * (_port))
-#define PHY_PTP_1STEP_PD_ADD_PD_M ICE_M(0x1, 0)
-#define PHY_PTP_1STEP_PD_DELAY_M ICE_M(0x3fffffff, 1)
-#define PHY_PTP_1STEP_PD_DLY_V_M ICE_M(0x1, 31)
-
/* Macros to derive offsets for TimeStampLow and TimeStampHigh */
#define PHY_TSTAMP_L(x) (((x) * 8) + 0)
#define PHY_TSTAMP_U(x) (((x) * 8) + 4)
-#define PHY_REG_REVISION 0x85000
-
#define PHY_REG_DESKEW_0 0x94
#define PHY_REG_DESKEW_0_RLEVEL GENMASK(6, 0)
#define PHY_REG_DESKEW_0_RLEVEL_FRAC GENMASK(9, 7)
#define PHY_REG_DESKEW_0_RLEVEL_FRAC_W 3
#define PHY_REG_DESKEW_0_VALID GENMASK(10, 10)
-#define PHY_REG_GPCS_BITSLIP 0x5C
#define PHY_REG_SD_BIT_SLIP(_port_offset) (0x29C + 4 * (_port_offset))
#define PHY_REVISION_ETH56G 0x10200
#define PHY_VENDOR_TXLANE_THRESH 0x2000C
@@ -838,7 +766,21 @@ static inline bool ice_is_dual(struct ice_hw *hw)
#define PHY_MAC_BLOCKTIME 0x50
#define PHY_MAC_MARKERTIME 0x54
#define PHY_MAC_TX_OFFSET 0x58
+#define PHY_GPCS_BITSLIP 0x5C
#define PHY_PTP_INT_STATUS 0x7FD140
+/* ETH56G registers shared per quad */
+/* GPCS config register */
+#define PHY_GPCS_CONFIG_REG0 0x268
+#define PHY_GPCS_CONFIG_REG0_TX_THR_M GENMASK(27, 24)
+/* 1-step PTP config */
+#define PHY_PTP_1STEP_CONFIG 0x270
+#define PHY_PTP_1STEP_T1S_UP64_M GENMASK(7, 4)
+#define PHY_PTP_1STEP_T1S_DELTA_M GENMASK(11, 8)
+#define PHY_PTP_1STEP_PEER_DELAY(_quad_lane) (0x274 + 4 * (_quad_lane))
+#define PHY_PTP_1STEP_PD_ADD_PD_M BIT(0)
+#define PHY_PTP_1STEP_PD_DELAY_M GENMASK(30, 1)
+#define PHY_PTP_1STEP_PD_DLY_V_M BIT(31)
+
#endif /* _ICE_PTP_HW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index 970a99a52bf1..cb08746556a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -4,7 +4,7 @@
#include "ice.h"
#include "ice_eswitch.h"
#include "devlink/devlink.h"
-#include "devlink/devlink_port.h"
+#include "devlink/port.h"
#include "ice_sriov.h"
#include "ice_tc_lib.h"
#include "ice_dcb_lib.h"
@@ -219,7 +219,8 @@ ice_repr_setup_tc_cls_flower(struct ice_repr *repr,
{
switch (flower->command) {
case FLOW_CLS_REPLACE:
- return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower);
+ return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower,
+ true);
case FLOW_CLS_DESTROY:
return ice_del_cls_flower(repr->src_vsi, flower);
default:
@@ -336,6 +337,7 @@ void ice_repr_destroy(struct ice_repr *repr)
static void ice_repr_rem_vf(struct ice_repr *repr)
{
ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac);
+ ice_pass_vf_tx_lldp(repr->src_vsi, true);
unregister_netdev(repr->netdev);
ice_devlink_destroy_vf_port(repr->vf);
ice_virtchnl_set_dflt_ops(repr->vf);
@@ -417,6 +419,10 @@ static int ice_repr_add_vf(struct ice_repr *repr)
if (err)
goto err_netdev;
+ err = ice_drop_vf_tx_lldp(repr->src_vsi, true);
+ if (err)
+ goto err_drop_lldp;
+
err = ice_eswitch_cfg_vsi(repr->src_vsi, repr->parent_mac);
if (err)
goto err_cfg_vsi;
@@ -429,6 +435,8 @@ static int ice_repr_add_vf(struct ice_repr *repr)
return 0;
err_cfg_vsi:
+ ice_pass_vf_tx_lldp(repr->src_vsi, true);
+err_drop_lldp:
unregister_netdev(repr->netdev);
err_netdev:
ice_devlink_destroy_vf_port(vf);
diff --git a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
index 3b0054faf70c..21bb861febbf 100644
--- a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
@@ -46,13 +46,11 @@ struct ice_sbq_evt_desc {
u8 data[24];
};
-enum ice_sbq_msg_dev {
- eth56g_phy_0 = 0x02,
- rmn_0 = 0x02,
- rmn_1 = 0x03,
- rmn_2 = 0x04,
- cgu = 0x06,
- eth56g_phy_1 = 0x0D,
+enum ice_sbq_dev_id {
+ ice_sbq_dev_phy_0 = 0x02,
+ ice_sbq_dev_cgu = 0x06,
+ ice_sbq_dev_phy_0_peer = 0x0D,
+ ice_sbq_dev_cgu_peer = 0x0F,
};
enum ice_sbq_msg_opcode {
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 6ca13c5dcb14..fff0c1afdb41 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -85,6 +85,27 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
}
/**
+ * ice_sched_find_next_vsi_node - find the next node for a given VSI
+ * @vsi_node: VSI support node to start search with
+ *
+ * Return: Next VSI support node, or NULL.
+ *
+ * The function returns a pointer to the next node from the VSI layer
+ * assigned to the given VSI, or NULL if there is no such a node.
+ */
+static struct ice_sched_node *
+ice_sched_find_next_vsi_node(struct ice_sched_node *vsi_node)
+{
+ unsigned int vsi_handle = vsi_node->vsi_handle;
+
+ while ((vsi_node = vsi_node->sibling) != NULL)
+ if (vsi_node->vsi_handle == vsi_handle)
+ break;
+
+ return vsi_node;
+}
+
+/**
* ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
* @hw: pointer to the HW struct
* @cmd_opc: cmd opcode
@@ -102,13 +123,13 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
u16 *elems_resp, struct ice_sq_cd *cd)
{
struct ice_aqc_sched_elem_cmd *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.sched_elem_cmd;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
cmd->num_elem_req = cpu_to_le16(elems_req);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
if (!status && elems_resp)
*elems_resp = le16_to_cpu(cmd->num_elem_resp);
@@ -371,10 +392,10 @@ ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
u8 *num_branches, struct ice_sq_cd *cd)
{
struct ice_aqc_get_topo *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.get_topo;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
cmd->port_num = lport;
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
@@ -497,7 +518,7 @@ ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
struct ice_aqc_query_txsched_res_resp *buf,
struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);
return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
@@ -662,13 +683,13 @@ ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
{
struct ice_aqc_rl_profile *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.rl_profile;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, opcode);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->num_profiles = cpu_to_le16(num_profiles);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
if (!status && num_processed)
@@ -1084,8 +1105,10 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
if (parent->num_children < max_child_nodes) {
new_num_nodes = max_child_nodes - parent->num_children;
} else {
- /* This parent is full, try the next sibling */
- parent = parent->sibling;
+ /* This parent is full,
+ * try the next available sibling.
+ */
+ parent = ice_sched_find_next_vsi_node(parent);
/* Don't modify the first node TEID memory if the
* first node was added already in the above call.
* Instead send some temp memory for all other
@@ -1528,12 +1551,23 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
/* get the first queue group node from VSI sub-tree */
qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
while (qgrp_node) {
+ struct ice_sched_node *next_vsi_node;
+
/* make sure the qgroup node is part of the VSI subtree */
if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
if (qgrp_node->num_children < max_children &&
qgrp_node->owner == owner)
break;
qgrp_node = qgrp_node->sibling;
+ if (qgrp_node)
+ continue;
+
+ next_vsi_node = ice_sched_find_next_vsi_node(vsi_node);
+ if (!next_vsi_node)
+ break;
+
+ vsi_node = next_vsi_node;
+ qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
}
/* Select the best queue group */
@@ -1604,16 +1638,16 @@ ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
/**
* ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
* @hw: pointer to the HW struct
- * @num_qs: number of queues
+ * @num_new_qs: number of new queues that will be added to the tree
* @num_nodes: num nodes array
*
* This function calculates the number of VSI child nodes based on the
* number of queues.
*/
static void
-ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
+ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_new_qs, u16 *num_nodes)
{
- u16 num = num_qs;
+ u16 num = num_new_qs;
u8 i, qgl, vsil;
qgl = ice_sched_get_qgrp_layer(hw);
@@ -1779,7 +1813,11 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
if (!parent)
return -EIO;
- if (i == vsil)
+ /* Do not modify the VSI handle for already existing VSI nodes,
+ * (if no new VSI node was added to the tree).
+ * Assign the VSI handle only to newly added VSI nodes.
+ */
+ if (i == vsil && num_added)
parent->vsi_handle = vsi_handle;
}
@@ -1813,6 +1851,41 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
}
/**
+ * ice_sched_recalc_vsi_support_nodes - recalculate VSI support nodes count
+ * @hw: pointer to the HW struct
+ * @vsi_node: pointer to the leftmost VSI node that needs to be extended
+ * @new_numqs: new number of queues that has to be handled by the VSI
+ * @new_num_nodes: pointer to nodes count table to modify the VSI layer entry
+ *
+ * This function recalculates the number of supported nodes that need to
+ * be added after adding more Tx queues for a given VSI.
+ * The number of new VSI support nodes that shall be added will be saved
+ * to the @new_num_nodes table for the VSI layer.
+ */
+static void
+ice_sched_recalc_vsi_support_nodes(struct ice_hw *hw,
+ struct ice_sched_node *vsi_node,
+ unsigned int new_numqs, u16 *new_num_nodes)
+{
+ u32 vsi_nodes_cnt = 1;
+ u32 max_queue_cnt = 1;
+ u32 qgl, vsil;
+
+ qgl = ice_sched_get_qgrp_layer(hw);
+ vsil = ice_sched_get_vsi_layer(hw);
+
+ for (u32 i = vsil; i <= qgl; i++)
+ max_queue_cnt *= hw->max_children[i];
+
+ while ((vsi_node = ice_sched_find_next_vsi_node(vsi_node)) != NULL)
+ vsi_nodes_cnt++;
+
+ if (new_numqs > (max_queue_cnt * vsi_nodes_cnt))
+ new_num_nodes[vsil] = DIV_ROUND_UP(new_numqs, max_queue_cnt) -
+ vsi_nodes_cnt;
+}
+
+/**
* ice_sched_update_vsi_child_nodes - update VSI child nodes
* @pi: port information structure
* @vsi_handle: software VSI handle
@@ -1863,15 +1936,25 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
return status;
}
- if (new_numqs)
- ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
- /* Keep the max number of queue configuration all the time. Update the
- * tree only if number of queues > previous number of queues. This may
+ ice_sched_recalc_vsi_support_nodes(hw, vsi_node,
+ new_numqs, new_num_nodes);
+ ice_sched_calc_vsi_child_nodes(hw, new_numqs - prev_numqs,
+ new_num_nodes);
+
+ /* Never decrease the number of queues in the tree. Update the tree
+ * only if number of queues > previous number of queues. This may
* leave some extra nodes in the tree if number of queues < previous
* number but that wouldn't harm anything. Removing those extra nodes
* may complicate the code if those nodes are part of SRL or
* individually rate limited.
+ * Also, add the required VSI support nodes if the existing ones cannot
+ * handle the requested new number of queues.
*/
+ status = ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
+ new_num_nodes);
+ if (status)
+ return status;
+
status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
new_num_nodes, owner);
if (status)
@@ -2013,6 +2096,58 @@ static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
}
/**
+ * ice_sched_rm_vsi_subtree - remove all nodes assigned to a given VSI
+ * @pi: port information structure
+ * @vsi_node: pointer to the leftmost node of the VSI to be removed
+ * @owner: LAN or RDMA
+ * @tc: TC number
+ *
+ * Return: Zero in case of success, or -EBUSY if the VSI has leaf nodes in TC.
+ *
+ * This function removes all the VSI support nodes associated with a given VSI
+ * and its LAN or RDMA children nodes from the scheduler tree.
+ */
+static int
+ice_sched_rm_vsi_subtree(struct ice_port_info *pi,
+ struct ice_sched_node *vsi_node, u8 owner, u8 tc)
+{
+ u16 vsi_handle = vsi_node->vsi_handle;
+ bool all_vsi_nodes_removed = true;
+ int j = 0;
+
+ while (vsi_node) {
+ struct ice_sched_node *next_vsi_node;
+
+ if (ice_sched_is_leaf_node_present(vsi_node)) {
+ ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", tc);
+ return -EBUSY;
+ }
+ while (j < vsi_node->num_children) {
+ if (vsi_node->children[j]->owner == owner)
+ ice_free_sched_node(pi, vsi_node->children[j]);
+ else
+ j++;
+ }
+
+ next_vsi_node = ice_sched_find_next_vsi_node(vsi_node);
+
+ /* remove the VSI if it has no children */
+ if (!vsi_node->num_children)
+ ice_free_sched_node(pi, vsi_node);
+ else
+ all_vsi_nodes_removed = false;
+
+ vsi_node = next_vsi_node;
+ }
+
+ /* clean up aggregator related VSI info if any */
+ if (all_vsi_nodes_removed)
+ ice_sched_rm_agg_vsi_info(pi, vsi_handle);
+
+ return 0;
+}
+
+/**
* ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
* @pi: port information structure
* @vsi_handle: software VSI handle
@@ -2038,7 +2173,6 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
ice_for_each_traffic_class(i) {
struct ice_sched_node *vsi_node, *tc_node;
- u8 j = 0;
tc_node = ice_sched_get_tc_node(pi, i);
if (!tc_node)
@@ -2048,31 +2182,12 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
if (!vsi_node)
continue;
- if (ice_sched_is_leaf_node_present(vsi_node)) {
- ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i);
- status = -EBUSY;
+ status = ice_sched_rm_vsi_subtree(pi, vsi_node, owner, i);
+ if (status)
goto exit_sched_rm_vsi_cfg;
- }
- while (j < vsi_node->num_children) {
- if (vsi_node->children[j]->owner == owner) {
- ice_free_sched_node(pi, vsi_node->children[j]);
- /* reset the counter again since the num
- * children will be updated after node removal
- */
- j = 0;
- } else {
- j++;
- }
- }
- /* remove the VSI if it has no children */
- if (!vsi_node->num_children) {
- ice_free_sched_node(pi, vsi_node);
- vsi_ctx->sched.vsi_node[i] = NULL;
+ vsi_ctx->sched.vsi_node[i] = NULL;
- /* clean up aggregator related VSI info if any */
- ice_sched_rm_agg_vsi_info(pi, vsi_handle);
- }
if (owner == ICE_SCHED_NODE_OWNER_LAN)
vsi_ctx->sched.max_lanq[i] = 0;
else
diff --git a/drivers/net/ethernet/intel/ice/ice_sf_eth.c b/drivers/net/ethernet/intel/ice/ice_sf_eth.c
index 75d7147e1c01..1a2c94375ca7 100644
--- a/drivers/net/ethernet/intel/ice/ice_sf_eth.c
+++ b/drivers/net/ethernet/intel/ice/ice_sf_eth.c
@@ -5,8 +5,8 @@
#include "ice_txrx.h"
#include "ice_fltr.h"
#include "ice_sf_eth.h"
-#include "devlink/devlink_port.h"
#include "devlink/devlink.h"
+#include "devlink/port.h"
static const struct net_device_ops ice_sf_netdev_ops = {
.ndo_open = ice_open,
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index b83f99c01d91..6b1126ddb561 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -9,7 +9,7 @@
#include "ice_dcb_lib.h"
#include "ice_flow.h"
#include "ice_eswitch.h"
-#include "ice_virtchnl_allowlist.h"
+#include "virt/allowlist.h"
#include "ice_flex_pipe.h"
#include "ice_vf_vsi_vlan_ops.h"
#include "ice_vlan.h"
@@ -36,6 +36,7 @@ static void ice_free_vf_entries(struct ice_pf *pf)
hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) {
hash_del_rcu(&vf->entry);
+ ice_deinitialize_vf_entry(vf);
ice_put_vf(vf);
}
}
@@ -62,6 +63,7 @@ static void ice_free_vf_res(struct ice_vf *vf)
if (vf->lan_vsi_idx != ICE_NO_VSI) {
ice_vf_vsi_release(vf);
vf->num_mac = 0;
+ vf->num_mac_lldp = 0;
}
last_vector_idx = vf->first_vector_idx + vf->num_msix - 1;
@@ -123,27 +125,6 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
}
/**
- * ice_sriov_free_msix_res - Reset/free any used MSIX resources
- * @pf: pointer to the PF structure
- *
- * Since no MSIX entries are taken from the pf->irq_tracker then just clear
- * the pf->sriov_base_vector.
- *
- * Returns 0 on success, and -EINVAL on error.
- */
-static int ice_sriov_free_msix_res(struct ice_pf *pf)
-{
- if (!pf)
- return -EINVAL;
-
- bitmap_free(pf->sriov_irq_bm);
- pf->sriov_irq_size = 0;
- pf->sriov_base_vector = 0;
-
- return 0;
-}
-
-/**
* ice_free_vfs - Free all VFs
* @pf: pointer to the PF structure
*/
@@ -177,6 +158,7 @@ void ice_free_vfs(struct ice_pf *pf)
ice_eswitch_detach_vf(pf, vf);
ice_dis_vf_qs(vf);
+ ice_virt_free_irqs(pf, vf->first_vector_idx, vf->num_msix);
if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
/* disable VF qp mappings and set VF disable state */
@@ -193,16 +175,9 @@ void ice_free_vfs(struct ice_pf *pf)
wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
}
- /* clear malicious info since the VF is getting released */
- if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
- list_del(&vf->mbx_info.list_entry);
-
mutex_unlock(&vf->cfg_lock);
}
- if (ice_sriov_free_msix_res(pf))
- dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
-
vfs->num_qps_per = 0;
ice_free_vf_entries(pf);
@@ -372,40 +347,6 @@ void ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
}
/**
- * ice_sriov_set_msix_res - Set any used MSIX resources
- * @pf: pointer to PF structure
- * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
- *
- * This function allows SR-IOV resources to be taken from the end of the PF's
- * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
- * just set the pf->sriov_base_vector and return success.
- *
- * If there are not enough resources available, return an error. This should
- * always be caught by ice_set_per_vf_res().
- *
- * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
- * in the PF's space available for SR-IOV.
- */
-static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
-{
- u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
- int vectors_used = ice_get_max_used_msix_vector(pf);
- int sriov_base_vector;
-
- sriov_base_vector = total_vectors - num_msix_needed;
-
- /* make sure we only grab irq_tracker entries from the list end and
- * that we have enough available MSIX vectors
- */
- if (sriov_base_vector < vectors_used)
- return -EINVAL;
-
- pf->sriov_base_vector = sriov_base_vector;
-
- return 0;
-}
-
-/**
* ice_set_per_vf_res - check if vectors and queues are available
* @pf: pointer to the PF structure
* @num_vfs: the number of SR-IOV VFs being configured
@@ -429,11 +370,9 @@ static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
*/
static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
{
- int vectors_used = ice_get_max_used_msix_vector(pf);
u16 num_msix_per_vf, num_txq, num_rxq, avail_qs;
int msix_avail_per_vf, msix_avail_for_sriov;
struct device *dev = ice_pf_to_dev(pf);
- int err;
lockdep_assert_held(&pf->vfs.table_lock);
@@ -441,8 +380,7 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
return -EINVAL;
/* determine MSI-X resources per VF */
- msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
- vectors_used;
+ msix_avail_for_sriov = pf->virt_irq_tracker.num_entries;
msix_avail_per_vf = msix_avail_for_sriov / num_vfs;
if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
@@ -481,13 +419,6 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
return -ENOSPC;
}
- err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs);
- if (err) {
- dev_err(dev, "Unable to set MSI-X resources for %d VFs, err %d\n",
- num_vfs, err);
- return err;
- }
-
/* only allow equal Tx/Rx queue count (i.e. queue pairs) */
pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq);
pf->vfs.num_msix_per = num_msix_per_vf;
@@ -498,52 +429,6 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
}
/**
- * ice_sriov_get_irqs - get irqs for SR-IOV usacase
- * @pf: pointer to PF structure
- * @needed: number of irqs to get
- *
- * This returns the first MSI-X vector index in PF space that is used by this
- * VF. This index is used when accessing PF relative registers such as
- * GLINT_VECT2FUNC and GLINT_DYN_CTL.
- * This will always be the OICR index in the AVF driver so any functionality
- * using vf->first_vector_idx for queue configuration_id: id of VF which will
- * use this irqs
- *
- * Only SRIOV specific vectors are tracked in sriov_irq_bm. SRIOV vectors are
- * allocated from the end of global irq index. First bit in sriov_irq_bm means
- * last irq index etc. It simplifies extension of SRIOV vectors.
- * They will be always located from sriov_base_vector to the last irq
- * index. While increasing/decreasing sriov_base_vector can be moved.
- */
-static int ice_sriov_get_irqs(struct ice_pf *pf, u16 needed)
-{
- int res = bitmap_find_next_zero_area(pf->sriov_irq_bm,
- pf->sriov_irq_size, 0, needed, 0);
- /* conversion from number in bitmap to global irq index */
- int index = pf->sriov_irq_size - res - needed;
-
- if (res >= pf->sriov_irq_size || index < pf->sriov_base_vector)
- return -ENOENT;
-
- bitmap_set(pf->sriov_irq_bm, res, needed);
- return index;
-}
-
-/**
- * ice_sriov_free_irqs - free irqs used by the VF
- * @pf: pointer to PF structure
- * @vf: pointer to VF structure
- */
-static void ice_sriov_free_irqs(struct ice_pf *pf, struct ice_vf *vf)
-{
- /* Move back from first vector index to first index in bitmap */
- int bm_i = pf->sriov_irq_size - vf->first_vector_idx - vf->num_msix;
-
- bitmap_clear(pf->sriov_irq_bm, bm_i, vf->num_msix);
- vf->first_vector_idx = 0;
-}
-
-/**
* ice_init_vf_vsi_res - initialize/setup VF VSI resources
* @vf: VF to initialize/setup the VSI for
*
@@ -556,7 +441,7 @@ static int ice_init_vf_vsi_res(struct ice_vf *vf)
struct ice_vsi *vsi;
int err;
- vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
+ vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix);
if (vf->first_vector_idx < 0)
return -ENOMEM;
@@ -856,16 +741,10 @@ err_free_entries:
*/
static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
{
- int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
int ret;
- pf->sriov_irq_bm = bitmap_zalloc(total_vectors, GFP_KERNEL);
- if (!pf->sriov_irq_bm)
- return -ENOMEM;
- pf->sriov_irq_size = total_vectors;
-
/* Disable global interrupt 0 so we don't try to handle the VFLR. */
wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
@@ -918,7 +797,6 @@ err_unroll_intr:
/* rearm interrupts here */
ice_irq_dynamic_ena(hw, NULL, NULL);
clear_bit(ICE_OICR_INTR_DIS, pf->state);
- bitmap_free(pf->sriov_irq_bm);
return ret;
}
@@ -992,16 +870,7 @@ u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev)
{
struct ice_pf *pf = pci_get_drvdata(pdev);
- return pf->sriov_irq_size - ice_get_max_used_msix_vector(pf);
-}
-
-static int ice_sriov_move_base_vector(struct ice_pf *pf, int move)
-{
- if (pf->sriov_base_vector - move < ice_get_max_used_msix_vector(pf))
- return -ENOMEM;
-
- pf->sriov_base_vector -= move;
- return 0;
+ return pf->virt_irq_tracker.num_entries;
}
static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id)
@@ -1020,7 +889,8 @@ static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id)
continue;
ice_dis_vf_mappings(tmp_vf);
- ice_sriov_free_irqs(pf, tmp_vf);
+ ice_virt_free_irqs(pf, tmp_vf->first_vector_idx,
+ tmp_vf->num_msix);
vf_ids[to_remap] = tmp_vf->vf_id;
to_remap += 1;
@@ -1032,7 +902,7 @@ static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id)
continue;
tmp_vf->first_vector_idx =
- ice_sriov_get_irqs(pf, tmp_vf->num_msix);
+ ice_virt_get_irqs(pf, tmp_vf->num_msix);
/* there is no need to rebuild VSI as we are only changing the
* vector indexes not amount of MSI-X or queues
*/
@@ -1063,7 +933,6 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
bool needs_rebuild = false;
struct ice_vsi *vsi;
struct ice_vf *vf;
- int id;
if (!ice_get_num_vfs(pf))
return -ENOENT;
@@ -1082,17 +951,7 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
if (msix_vec_count < ICE_MIN_INTR_PER_VF)
return -EINVAL;
- /* Transition of PCI VF function number to function_id */
- for (id = 0; id < pci_num_vf(pdev); id++) {
- if (vf_dev->devfn == pci_iov_virtfn_devfn(pdev, id))
- break;
- }
-
- if (id == pci_num_vf(pdev))
- return -ENOENT;
-
- vf = ice_get_vf_by_id(pf, id);
-
+ vf = ice_get_vf_by_dev(pf, vf_dev);
if (!vf)
return -ENOENT;
@@ -1102,23 +961,24 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
return -ENOENT;
}
- prev_msix = vf->num_msix;
- prev_queues = vf->num_vf_qs;
-
- if (ice_sriov_move_base_vector(pf, msix_vec_count - prev_msix)) {
+ /* No need to rebuild if we're setting to the same value */
+ if (msix_vec_count == vf->num_msix) {
ice_put_vf(vf);
- return -ENOSPC;
+ return 0;
}
+ prev_msix = vf->num_msix;
+ prev_queues = vf->num_vf_qs;
+
ice_dis_vf_mappings(vf);
- ice_sriov_free_irqs(pf, vf);
+ ice_virt_free_irqs(pf, vf->first_vector_idx, vf->num_msix);
/* Remap all VFs beside the one is now configured */
ice_sriov_remap_vectors(pf, vf->vf_id);
vf->num_msix = msix_vec_count;
vf->num_vf_qs = queues;
- vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
+ vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix);
if (vf->first_vector_idx < 0)
goto unroll;
@@ -1147,7 +1007,8 @@ unroll:
vf->num_msix = prev_msix;
vf->num_vf_qs = prev_queues;
- vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
+
+ vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix);
if (vf->first_vector_idx < 0) {
ice_put_vf(vf);
return -EINVAL;
@@ -1300,10 +1161,12 @@ static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
void
ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
{
+ struct ice_aqc_event_lan_overflow *cmd;
u32 gldcb_rtctq, queue;
struct ice_vf *vf;
- gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
+ cmd = libie_aq_raw(&event->desc);
+ gldcb_rtctq = le32_to_cpu(cmd->prtdcb_ruptq);
dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
/* event returns device global Rx queue number */
@@ -1327,8 +1190,7 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
*/
int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_vsi *vf_vsi;
struct device *dev;
struct ice_vf *vf;
@@ -1537,6 +1399,9 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
mutex_lock(&vf->cfg_lock);
+ while (!trusted && vf->num_mac_lldp)
+ ice_vf_update_mac_lldp_num(vf, ice_get_vf_vsi(vf), false);
+
vf->trusted = trusted;
ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
index 96549ca5c52c..6c4fad09a527 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.h
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.h
@@ -3,9 +3,9 @@
#ifndef _ICE_SRIOV_H_
#define _ICE_SRIOV_H_
-#include "ice_virtchnl_fdir.h"
+#include "virt/fdir.h"
#include "ice_vf_lib.h"
-#include "ice_virtchnl.h"
+#include "virt/virtchnl.h"
/* Static VF transaction/status register def */
#define VF_DEVICE_STATUS 0xAA
@@ -64,6 +64,7 @@ bool
ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto);
u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev);
int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count);
+int ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id);
#else /* CONFIG_PCI_IOV */
static inline void ice_process_vflr_event(struct ice_pf *pf) { }
static inline void ice_free_vfs(struct ice_pf *pf) { }
@@ -164,5 +165,11 @@ ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
{
return -EOPNOTSUPP;
}
+
+static inline int ice_vf_vsi_dis_single_txq(struct ice_vf *vf,
+ struct ice_vsi *vsi, u16 q_id)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_SRIOV_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 0e740342e294..84848f0123e7 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -1511,11 +1511,11 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
struct ice_sq_cd *cd)
{
struct ice_aqc_get_sw_cfg *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
- cmd = &desc.params.get_sw_conf;
+ cmd = libie_aq_raw(&desc);
cmd->element = cpu_to_le16(*req_desc);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
@@ -1541,11 +1541,11 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
{
struct ice_aqc_add_update_free_vsi_resp *res;
struct ice_aqc_add_get_update_free_vsi *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.vsi_cmd;
- res = &desc.params.add_update_free_vsi_res;
+ cmd = libie_aq_raw(&desc);
+ res = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
@@ -1556,7 +1556,7 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), cd);
@@ -1585,11 +1585,11 @@ ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
{
struct ice_aqc_add_update_free_vsi_resp *resp;
struct ice_aqc_add_get_update_free_vsi *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.vsi_cmd;
- resp = &desc.params.add_update_free_vsi_res;
+ cmd = libie_aq_raw(&desc);
+ resp = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
@@ -1620,17 +1620,17 @@ ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
{
struct ice_aqc_add_update_free_vsi_resp *resp;
struct ice_aqc_add_get_update_free_vsi *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.vsi_cmd;
- resp = &desc.params.add_update_free_vsi_res;
+ cmd = libie_aq_raw(&desc);
+ resp = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), cd);
@@ -1944,7 +1944,8 @@ int
ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct ice_aqc_sw_rules *cmd;
+ struct libie_aq_desc desc;
int status;
if (opc != ice_aqc_opc_add_sw_rules &&
@@ -1953,13 +1954,13 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
+ cmd = libie_aq_raw(&desc);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
- desc.params.sw_rules.num_rules_fltr_entry_index =
- cpu_to_le16(num_rules);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
+ cmd->num_rules_fltr_entry_index = cpu_to_le16(num_rules);
status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
if (opc != ice_aqc_opc_add_sw_rules &&
- hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
+ hw->adminq.sq_last_status == LIBIE_AQ_RC_ENOENT)
status = -ENOENT;
if (!status) {
@@ -1989,14 +1990,14 @@ ice_aq_add_recipe(struct ice_hw *hw,
u16 num_recipes, struct ice_sq_cd *cd)
{
struct ice_aqc_add_get_recipe *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 buf_size;
- cmd = &desc.params.add_get_recipe;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
cmd->num_sub_recipes = cpu_to_le16(num_recipes);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
buf_size = num_recipes * sizeof(*s_recipe_list);
@@ -2026,14 +2027,14 @@ ice_aq_get_recipe(struct ice_hw *hw,
u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
{
struct ice_aqc_add_get_recipe *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 buf_size;
int status;
if (*num_recipes != ICE_MAX_NUM_RECIPES)
return -EINVAL;
- cmd = &desc.params.add_get_recipe;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
cmd->return_index = cpu_to_le16(recipe_root);
@@ -2118,9 +2119,9 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc,
struct ice_sq_cd *cd)
{
struct ice_aqc_recipe_to_profile *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.recipe_to_profile;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
cmd->profile_id = cpu_to_le16(profile_id);
/* Set the recipe ID bit in the bitmask to let the device know which
@@ -2144,10 +2145,10 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc,
struct ice_sq_cd *cd)
{
struct ice_aqc_recipe_to_profile *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.recipe_to_profile;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
cmd->profile_id = cpu_to_le16(profile_id);
@@ -3146,7 +3147,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
u16 vsi_handle_arr[2];
/* A rule already exists with the new VSI being added */
- if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
+ if (cur_fltr->vsi_handle == new_fltr->vsi_handle)
return -EEXIST;
vsi_handle_arr[0] = cur_fltr->vsi_handle;
@@ -4784,7 +4785,8 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
*/
if (found && recp[i].tun_type == rinfo->tun_type &&
recp[i].need_pass_l2 == rinfo->need_pass_l2 &&
- recp[i].allow_pass_l2 == rinfo->allow_pass_l2)
+ recp[i].allow_pass_l2 == rinfo->allow_pass_l2 &&
+ recp[i].priority == rinfo->priority)
return i; /* Return the recipe ID */
}
}
@@ -5977,7 +5979,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
/* A rule already exists with the new VSI being added */
if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
- return 0;
+ return -EEXIST;
/* Update the previously created VSI list set with
* the new VSI ID passed in
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index ea39b999a0d0..fb9ea7f8ef44 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -12,14 +12,11 @@
/**
* ice_tc_count_lkups - determine lookup count for switch filter
* @flags: TC-flower flags
- * @headers: Pointer to TC flower filter header structure
* @fltr: Pointer to outer TC filter structure
*
- * Determine lookup count based on TC flower input for switch filter.
+ * Return: lookup count based on TC flower input for a switch filter.
*/
-static int
-ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
- struct ice_tc_flower_fltr *fltr)
+static int ice_tc_count_lkups(u32 flags, struct ice_tc_flower_fltr *fltr)
{
int lkups_cnt = 1; /* 0th lookup is metadata */
@@ -684,26 +681,26 @@ static int ice_tc_setup_action(struct net_device *filter_dev,
fltr->action.fltr_act = action;
if (ice_is_port_repr_netdev(filter_dev) &&
- ice_is_port_repr_netdev(target_dev)) {
+ ice_is_port_repr_netdev(target_dev) &&
+ fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
repr = ice_netdev_to_repr(target_dev);
fltr->dest_vsi = repr->src_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
} else if (ice_is_port_repr_netdev(filter_dev) &&
- ice_tc_is_dev_uplink(target_dev)) {
+ ice_tc_is_dev_uplink(target_dev) &&
+ fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
repr = ice_netdev_to_repr(filter_dev);
fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
} else if (ice_tc_is_dev_uplink(filter_dev) &&
- ice_is_port_repr_netdev(target_dev)) {
+ ice_is_port_repr_netdev(target_dev) &&
+ fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
repr = ice_netdev_to_repr(target_dev);
fltr->dest_vsi = repr->src_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
} else {
NL_SET_ERR_MSG_MOD(fltr->extack,
- "Unsupported netdevice in switchdev mode");
+ "The action is not supported for this netdevice");
return -EINVAL;
}
@@ -716,13 +713,11 @@ ice_tc_setup_drop_action(struct net_device *filter_dev,
{
fltr->action.fltr_act = ICE_DROP_PACKET;
- if (ice_is_port_repr_netdev(filter_dev)) {
- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
- } else if (ice_tc_is_dev_uplink(filter_dev)) {
- fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
- } else {
+ if (!ice_tc_is_dev_uplink(filter_dev) &&
+ !(ice_is_port_repr_netdev(filter_dev) &&
+ fltr->direction == ICE_ESWITCH_FLTR_INGRESS)) {
NL_SET_ERR_MSG_MOD(fltr->extack,
- "Unsupported netdevice in switchdev mode");
+ "The action is not supported for this netdevice");
return -EINVAL;
}
@@ -767,10 +762,157 @@ static int ice_eswitch_tc_parse_action(struct net_device *filter_dev,
return 0;
}
+static bool ice_is_fltr_lldp(struct ice_tc_flower_fltr *fltr)
+{
+ return fltr->outer_headers.l2_key.n_proto == htons(ETH_P_LLDP);
+}
+
+static bool ice_is_fltr_pf_tx_lldp(struct ice_tc_flower_fltr *fltr)
+{
+ struct ice_vsi *vsi = fltr->src_vsi, *uplink;
+
+ if (!ice_is_switchdev_running(vsi->back))
+ return false;
+
+ uplink = vsi->back->eswitch.uplink_vsi;
+ return vsi == uplink && fltr->action.fltr_act == ICE_DROP_PACKET &&
+ ice_is_fltr_lldp(fltr) &&
+ fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
+ fltr->flags == ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
+}
+
+static bool ice_is_fltr_vf_tx_lldp(struct ice_tc_flower_fltr *fltr)
+{
+ struct ice_vsi *vsi = fltr->src_vsi, *uplink;
+
+ uplink = vsi->back->eswitch.uplink_vsi;
+ return fltr->src_vsi->type == ICE_VSI_VF && ice_is_fltr_lldp(fltr) &&
+ fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
+ fltr->dest_vsi == uplink;
+}
+
+static struct ice_tc_flower_fltr *
+ice_find_pf_tx_lldp_fltr(struct ice_pf *pf)
+{
+ struct ice_tc_flower_fltr *fltr;
+
+ hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node)
+ if (ice_is_fltr_pf_tx_lldp(fltr))
+ return fltr;
+
+ return NULL;
+}
+
+static bool ice_any_vf_lldp_tx_ena(struct ice_pf *pf)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ ice_for_each_vf(pf, bkt, vf)
+ if (vf->lldp_tx_ena)
+ return true;
+
+ return false;
+}
+
+int ice_pass_vf_tx_lldp(struct ice_vsi *vsi, bool deinit)
+{
+ struct ice_rule_query_data remove_entry = {
+ .rid = vsi->vf->lldp_recipe_id,
+ .rule_id = vsi->vf->lldp_rule_id,
+ .vsi_handle = vsi->idx,
+ };
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ if (vsi->vf->lldp_tx_ena)
+ return 0;
+
+ if (!deinit && !ice_find_pf_tx_lldp_fltr(vsi->back))
+ return -EINVAL;
+
+ if (!deinit && ice_any_vf_lldp_tx_ena(pf))
+ return -EINVAL;
+
+ err = ice_rem_adv_rule_by_id(&pf->hw, &remove_entry);
+ if (!err)
+ vsi->vf->lldp_tx_ena = true;
+
+ return err;
+}
+
+int ice_drop_vf_tx_lldp(struct ice_vsi *vsi, bool init)
+{
+ struct ice_rule_query_data rule_added;
+ struct ice_adv_rule_info rinfo = {
+ .priority = 7,
+ .src_vsi = vsi->idx,
+ .sw_act = {
+ .src = vsi->idx,
+ .flag = ICE_FLTR_TX,
+ .fltr_act = ICE_DROP_PACKET,
+ .vsi_handle = vsi->idx,
+ },
+ .flags_info.act_valid = true,
+ };
+ struct ice_adv_lkup_elem list[3];
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ if (!init && !vsi->vf->lldp_tx_ena)
+ return 0;
+
+ memset(list, 0, sizeof(list));
+ ice_rule_add_direction_metadata(&list[0]);
+ ice_rule_add_src_vsi_metadata(&list[1]);
+ list[2].type = ICE_ETYPE_OL;
+ list[2].h_u.ethertype.ethtype_id = htons(ETH_P_LLDP);
+ list[2].m_u.ethertype.ethtype_id = htons(0xFFFF);
+
+ err = ice_add_adv_rule(&pf->hw, list, ARRAY_SIZE(list), &rinfo,
+ &rule_added);
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "Failed to add an LLDP rule to VSI 0x%X: %d\n",
+ vsi->idx, err);
+ } else {
+ vsi->vf->lldp_recipe_id = rule_added.rid;
+ vsi->vf->lldp_rule_id = rule_added.rule_id;
+ vsi->vf->lldp_tx_ena = false;
+ }
+
+ return err;
+}
+
+static void ice_handle_add_pf_lldp_drop_rule(struct ice_vsi *vsi)
+{
+ struct ice_tc_flower_fltr *fltr;
+ struct ice_pf *pf = vsi->back;
+
+ hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node) {
+ if (!ice_is_fltr_vf_tx_lldp(fltr))
+ continue;
+ ice_pass_vf_tx_lldp(fltr->src_vsi, true);
+ break;
+ }
+}
+
+static void ice_handle_del_pf_lldp_drop_rule(struct ice_pf *pf)
+{
+ int i;
+
+ /* Make the VF LLDP fwd to uplink rule dormant */
+ ice_for_each_vsi(pf, i) {
+ struct ice_vsi *vf_vsi = pf->vsi[i];
+
+ if (vf_vsi && vf_vsi->type == ICE_VSI_VF)
+ ice_drop_vf_tx_lldp(vf_vsi, false);
+ }
+}
+
static int
ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
{
- struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
struct ice_adv_rule_info rule_info = { 0 };
struct ice_rule_query_data rule_added;
struct ice_hw *hw = &vsi->back->hw;
@@ -785,7 +927,10 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
return -EOPNOTSUPP;
}
- lkups_cnt = ice_tc_count_lkups(flags, headers, fltr);
+ if (ice_is_fltr_vf_tx_lldp(fltr))
+ return ice_pass_vf_tx_lldp(vsi, false);
+
+ lkups_cnt = ice_tc_count_lkups(flags, fltr);
list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
if (!list)
return -ENOMEM;
@@ -814,6 +959,11 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
rule_info.sw_act.src = hw->pf_id;
rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
} else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
+ !fltr->dest_vsi && vsi == vsi->back->eswitch.uplink_vsi) {
+ /* PF to Uplink */
+ rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.src = vsi->idx;
+ } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
fltr->dest_vsi == vsi->back->eswitch.uplink_vsi) {
/* VF to Uplink */
rule_info.sw_act.flag |= ICE_FLTR_TX;
@@ -846,11 +996,17 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist");
ret = -EINVAL;
goto exit;
+ } else if (ret == -ENOSPC) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter: insufficient space available.");
+ goto exit;
} else if (ret) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error");
goto exit;
}
+ if (ice_is_fltr_pf_tx_lldp(fltr))
+ ice_handle_add_pf_lldp_drop_rule(vsi);
+
/* store the output params, which are needed later for removing
* advanced switch filter
*/
@@ -985,7 +1141,6 @@ static int
ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
struct ice_tc_flower_fltr *tc_fltr)
{
- struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
struct ice_adv_rule_info rule_info = {0};
struct ice_rule_query_data rule_added;
struct ice_adv_lkup_elem *list;
@@ -1021,7 +1176,7 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
return PTR_ERR(dest_vsi);
}
- lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr);
+ lkups_cnt = ice_tc_count_lkups(flags, tc_fltr);
list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
if (!list)
return -ENOMEM;
@@ -1056,8 +1211,13 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
tc_fltr->action.fwd.q.hw_queue, lkups_cnt);
break;
case ICE_DROP_PACKET:
- rule_info.sw_act.flag |= ICE_FLTR_RX;
- rule_info.sw_act.src = hw->pf_id;
+ if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
+ rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.src = vsi->idx;
+ } else {
+ rule_info.sw_act.flag |= ICE_FLTR_RX;
+ rule_info.sw_act.src = hw->pf_id;
+ }
rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI;
break;
default:
@@ -1071,6 +1231,10 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
"Unable to add filter because it already exist");
ret = -EINVAL;
goto exit;
+ } else if (ret == -ENOSPC) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack,
+ "Unable to add filter: insufficient space available.");
+ goto exit;
} else if (ret) {
NL_SET_ERR_MSG_MOD(tc_fltr->extack,
"Unable to add filter due to error");
@@ -1463,11 +1627,16 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
* @filter_dev: Pointer to device on which filter is being added
* @f: Pointer to struct flow_cls_offload
* @fltr: Pointer to filter structure
+ * @ingress: if the rule is added to an ingress block
+ *
+ * Return: 0 if the flower was parsed successfully, -EINVAL if the flower
+ * cannot be parsed, -EOPNOTSUPP if such filter cannot be configured
+ * for the given VSI.
*/
static int
ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
struct flow_cls_offload *f,
- struct ice_tc_flower_fltr *fltr)
+ struct ice_tc_flower_fltr *fltr, bool ingress)
{
struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
@@ -1551,6 +1720,20 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
}
+ if (!ingress) {
+ bool switchdev =
+ ice_is_eswitch_mode_switchdev(vsi->back);
+
+ if (switchdev != (n_proto_key == ETH_P_LLDP)) {
+ NL_SET_ERR_MSG_FMT_MOD(fltr->extack,
+ "%sLLDP filtering is not supported on egress in %s mode",
+ switchdev ? "Non-" : "",
+ switchdev ? "switchdev" :
+ "legacy");
+ return -EOPNOTSUPP;
+ }
+ }
+
headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask);
headers->l3_key.ip_proto = match.key->ip_proto;
@@ -1726,6 +1909,14 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
return -EINVAL;
}
}
+
+ /* Ingress filter on representor results in an egress filter in HW
+ * and vice versa
+ */
+ ingress = ice_is_port_repr_netdev(filter_dev) ? !ingress : ingress;
+ fltr->direction = ingress ? ICE_ESWITCH_FLTR_INGRESS :
+ ICE_ESWITCH_FLTR_EGRESS;
+
return 0;
}
@@ -1939,6 +2130,12 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
struct ice_pf *pf = vsi->back;
int err;
+ if (ice_is_fltr_pf_tx_lldp(fltr))
+ ice_handle_del_pf_lldp_drop_rule(pf);
+
+ if (ice_is_fltr_vf_tx_lldp(fltr))
+ return ice_drop_vf_tx_lldp(vsi, false);
+
rule_rem.rid = fltr->rid;
rule_rem.rule_id = fltr->rule_id;
rule_rem.vsi_handle = fltr->dest_vsi_handle;
@@ -1975,14 +2172,18 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
* @vsi: Pointer to VSI
* @f: Pointer to flower offload structure
* @__fltr: Pointer to struct ice_tc_flower_fltr
+ * @ingress: if the rule is added to an ingress block
*
* This function parses TC-flower input fields, parses action,
* and adds a filter.
+ *
+ * Return: 0 if the filter was successfully added,
+ * negative error code otherwise.
*/
static int
ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
struct flow_cls_offload *f,
- struct ice_tc_flower_fltr **__fltr)
+ struct ice_tc_flower_fltr **__fltr, bool ingress)
{
struct ice_tc_flower_fltr *fltr;
int err;
@@ -1999,7 +2200,7 @@ ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
fltr->src_vsi = vsi;
INIT_HLIST_NODE(&fltr->tc_flower_node);
- err = ice_parse_cls_flower(netdev, vsi, f, fltr);
+ err = ice_parse_cls_flower(netdev, vsi, f, fltr, ingress);
if (err < 0)
goto err;
@@ -2042,10 +2243,13 @@ ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie)
* @netdev: Pointer to filter device
* @vsi: Pointer to VSI
* @cls_flower: Pointer to flower offload structure
+ * @ingress: if the rule is added to an ingress block
+ *
+ * Return: 0 if the flower was successfully added,
+ * negative error code otherwise.
*/
-int
-ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
- struct flow_cls_offload *cls_flower)
+int ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower, bool ingress)
{
struct netlink_ext_ack *extack = cls_flower->common.extack;
struct net_device *vsi_netdev = vsi->netdev;
@@ -2080,7 +2284,7 @@ ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
}
/* prep and add TC-flower filter in HW */
- err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr);
+ err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr, ingress);
if (err)
return err;
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
index d84f153517ec..8a3ab2f22af9 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
@@ -211,13 +211,14 @@ static inline int ice_chnl_dmac_fltr_cnt(struct ice_pf *pf)
}
struct ice_vsi *ice_locate_vsi_using_queue(struct ice_vsi *vsi, int queue);
-int
-ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
- struct flow_cls_offload *cls_flower);
-int
-ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower);
+int ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower, bool ingress);
+int ice_del_cls_flower(struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower);
void ice_replay_tc_fltrs(struct ice_pf *pf);
bool ice_is_tunnel_supported(struct net_device *dev);
+int ice_drop_vf_tx_lldp(struct ice_vsi *vsi, bool init);
+int ice_pass_vf_tx_lldp(struct ice_vsi *vsi, bool deinit);
static inline bool ice_is_forward_action(enum ice_sw_fwd_act_type fltr_act)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h
index 07aab6e130cd..4f35ef8d6b29 100644
--- a/drivers/net/ethernet/intel/ice/ice_trace.h
+++ b/drivers/net/ethernet/intel/ice/ice_trace.h
@@ -130,7 +130,7 @@ DECLARE_EVENT_CLASS(ice_tx_template,
__entry->buf = buf;
__assign_str(devname);),
- TP_printk("netdev: %s ring: %pK desc: %pK buf %pK", __get_str(devname),
+ TP_printk("netdev: %s ring: %p desc: %p buf %p", __get_str(devname),
__entry->ring, __entry->desc, __entry->buf)
);
@@ -158,7 +158,7 @@ DECLARE_EVENT_CLASS(ice_rx_template,
__entry->desc = desc;
__assign_str(devname);),
- TP_printk("netdev: %s ring: %pK desc: %pK", __get_str(devname),
+ TP_printk("netdev: %s ring: %p desc: %p", __get_str(devname),
__entry->ring, __entry->desc)
);
DEFINE_EVENT(ice_rx_template, ice_clean_rx_irq,
@@ -182,7 +182,7 @@ DECLARE_EVENT_CLASS(ice_rx_indicate_template,
__entry->skb = skb;
__assign_str(devname);),
- TP_printk("netdev: %s ring: %pK desc: %pK skb %pK", __get_str(devname),
+ TP_printk("netdev: %s ring: %p desc: %p skb %p", __get_str(devname),
__entry->ring, __entry->desc, __entry->skb)
);
@@ -205,7 +205,7 @@ DECLARE_EVENT_CLASS(ice_xmit_template,
__entry->skb = skb;
__assign_str(devname);),
- TP_printk("netdev: %s skb: %pK ring: %pK", __get_str(devname),
+ TP_printk("netdev: %s skb: %p ring: %p", __get_str(devname),
__entry->skb, __entry->ring)
);
@@ -228,7 +228,7 @@ DECLARE_EVENT_CLASS(ice_tx_tstamp_template,
TP_fast_assign(__entry->skb = skb;
__entry->idx = idx;),
- TP_printk("skb %pK idx %d",
+ TP_printk("skb %p idx %d",
__entry->skb, __entry->idx)
);
#define DEFINE_TX_TSTAMP_OP_EVENT(name) \
diff --git a/drivers/net/ethernet/intel/ice/ice_tspll.c b/drivers/net/ethernet/intel/ice/ice_tspll.c
new file mode 100644
index 000000000000..66320a4ab86f
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_tspll.c
@@ -0,0 +1,626 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_ptp_hw.h"
+
+static const struct
+ice_tspll_params_e82x e82x_tspll_params[NUM_ICE_TSPLL_FREQ] = {
+ [ICE_TSPLL_FREQ_25_000] = {
+ .refclk_pre_div = 1,
+ .post_pll_div = 6,
+ .feedback_div = 197,
+ .frac_n_div = 2621440,
+ },
+ [ICE_TSPLL_FREQ_122_880] = {
+ .refclk_pre_div = 5,
+ .post_pll_div = 7,
+ .feedback_div = 223,
+ .frac_n_div = 524288
+ },
+ [ICE_TSPLL_FREQ_125_000] = {
+ .refclk_pre_div = 5,
+ .post_pll_div = 7,
+ .feedback_div = 223,
+ .frac_n_div = 524288
+ },
+ [ICE_TSPLL_FREQ_153_600] = {
+ .refclk_pre_div = 5,
+ .post_pll_div = 6,
+ .feedback_div = 159,
+ .frac_n_div = 1572864
+ },
+ [ICE_TSPLL_FREQ_156_250] = {
+ .refclk_pre_div = 5,
+ .post_pll_div = 6,
+ .feedback_div = 159,
+ .frac_n_div = 1572864
+ },
+ [ICE_TSPLL_FREQ_245_760] = {
+ .refclk_pre_div = 10,
+ .post_pll_div = 7,
+ .feedback_div = 223,
+ .frac_n_div = 524288
+ },
+};
+
+/**
+ * ice_tspll_clk_freq_str - Convert time_ref_freq to string
+ * @clk_freq: Clock frequency
+ *
+ * Return: specified TIME_REF clock frequency converted to a string.
+ */
+static const char *ice_tspll_clk_freq_str(enum ice_tspll_freq clk_freq)
+{
+ switch (clk_freq) {
+ case ICE_TSPLL_FREQ_25_000:
+ return "25 MHz";
+ case ICE_TSPLL_FREQ_122_880:
+ return "122.88 MHz";
+ case ICE_TSPLL_FREQ_125_000:
+ return "125 MHz";
+ case ICE_TSPLL_FREQ_153_600:
+ return "153.6 MHz";
+ case ICE_TSPLL_FREQ_156_250:
+ return "156.25 MHz";
+ case ICE_TSPLL_FREQ_245_760:
+ return "245.76 MHz";
+ default:
+ return "Unknown";
+ }
+}
+
+/**
+ * ice_tspll_default_freq - Return default frequency for a MAC type
+ * @mac_type: MAC type
+ *
+ * Return: default TSPLL frequency for a correct MAC type, -ERANGE otherwise.
+ */
+static enum ice_tspll_freq ice_tspll_default_freq(enum ice_mac_type mac_type)
+{
+ switch (mac_type) {
+ case ICE_MAC_GENERIC:
+ return ICE_TSPLL_FREQ_25_000;
+ case ICE_MAC_GENERIC_3K_E825:
+ return ICE_TSPLL_FREQ_156_250;
+ default:
+ return -ERANGE;
+ }
+}
+
+/**
+ * ice_tspll_check_params - Check if TSPLL params are correct
+ * @hw: Pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF or TCXO)
+ *
+ * Return: true if TSPLL params are correct, false otherwise.
+ */
+static bool ice_tspll_check_params(struct ice_hw *hw,
+ enum ice_tspll_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ if (clk_freq >= NUM_ICE_TSPLL_FREQ) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid TSPLL frequency %u\n",
+ clk_freq);
+ return false;
+ }
+
+ if (clk_src >= NUM_ICE_CLK_SRC) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
+ clk_src);
+ return false;
+ }
+
+ if ((hw->mac_type == ICE_MAC_GENERIC_3K_E825 ||
+ clk_src == ICE_CLK_SRC_TCXO) &&
+ clk_freq != ice_tspll_default_freq(hw->mac_type)) {
+ dev_warn(ice_hw_to_dev(hw), "Unsupported frequency for this clock source\n");
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ice_tspll_clk_src_str - Convert time_ref_src to string
+ * @clk_src: Clock source
+ *
+ * Return: specified clock source converted to its string name
+ */
+static const char *ice_tspll_clk_src_str(enum ice_clk_src clk_src)
+{
+ switch (clk_src) {
+ case ICE_CLK_SRC_TCXO:
+ return "TCXO";
+ case ICE_CLK_SRC_TIME_REF:
+ return "TIME_REF";
+ default:
+ return "Unknown";
+ }
+}
+
+/**
+ * ice_tspll_log_cfg - Log current/new TSPLL configuration
+ * @hw: Pointer to the HW struct
+ * @enable: CGU enabled/disabled
+ * @clk_src: Current clock source
+ * @tspll_freq: Current clock frequency
+ * @lock: CGU lock status
+ * @new_cfg: true if this is a new config
+ */
+static void ice_tspll_log_cfg(struct ice_hw *hw, bool enable, u8 clk_src,
+ u8 tspll_freq, bool lock, bool new_cfg)
+{
+ dev_dbg(ice_hw_to_dev(hw),
+ "%s TSPLL configuration -- %s, src %s, freq %s, PLL %s\n",
+ new_cfg ? "New" : "Current", str_enabled_disabled(enable),
+ ice_tspll_clk_src_str((enum ice_clk_src)clk_src),
+ ice_tspll_clk_freq_str((enum ice_tspll_freq)tspll_freq),
+ lock ? "locked" : "unlocked");
+}
+
+/**
+ * ice_tspll_cfg_e82x - Configure the Clock Generation Unit TSPLL
+ * @hw: Pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF, or TCXO)
+ *
+ * Configure the Clock Generation Unit with the desired clock frequency and
+ * time reference, enabling the PLL which drives the PTP hardware clock.
+ *
+ * Return:
+ * * %0 - success
+ * * %-EINVAL - input parameters are incorrect
+ * * %-EBUSY - failed to lock TSPLL
+ * * %other - CGU read/write failure
+ */
+static int ice_tspll_cfg_e82x(struct ice_hw *hw, enum ice_tspll_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ u32 val, r9, r24;
+ int err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R9, &r9);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R24, &r24);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_RO_BWM_LF, &val);
+ if (err)
+ return err;
+
+ ice_tspll_log_cfg(hw, !!FIELD_GET(ICE_CGU_R23_R24_TSPLL_ENABLE, r24),
+ FIELD_GET(ICE_CGU_R23_R24_TIME_REF_SEL, r24),
+ FIELD_GET(ICE_CGU_R9_TIME_REF_FREQ_SEL, r9),
+ !!FIELD_GET(ICE_CGU_RO_BWM_LF_TRUE_LOCK, val),
+ false);
+
+ /* Disable the PLL before changing the clock source or frequency */
+ if (FIELD_GET(ICE_CGU_R23_R24_TSPLL_ENABLE, r24)) {
+ r24 &= ~ICE_CGU_R23_R24_TSPLL_ENABLE;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R24, r24);
+ if (err)
+ return err;
+ }
+
+ /* Set the frequency */
+ r9 &= ~ICE_CGU_R9_TIME_REF_FREQ_SEL;
+ r9 |= FIELD_PREP(ICE_CGU_R9_TIME_REF_FREQ_SEL, clk_freq);
+ err = ice_write_cgu_reg(hw, ICE_CGU_R9, r9);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL feedback divisor */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R19, &val);
+ if (err)
+ return err;
+
+ val &= ~(ICE_CGU_R19_TSPLL_FBDIV_INTGR_E82X | ICE_CGU_R19_TSPLL_NDIVRATIO);
+ val |= FIELD_PREP(ICE_CGU_R19_TSPLL_FBDIV_INTGR_E82X,
+ e82x_tspll_params[clk_freq].feedback_div);
+ val |= FIELD_PREP(ICE_CGU_R19_TSPLL_NDIVRATIO, 1);
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R19, val);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL post divisor */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R22, &val);
+ if (err)
+ return err;
+
+ val &= ~(ICE_CGU_R22_TIME1588CLK_DIV |
+ ICE_CGU_R22_TIME1588CLK_DIV2);
+ val |= FIELD_PREP(ICE_CGU_R22_TIME1588CLK_DIV,
+ e82x_tspll_params[clk_freq].post_pll_div);
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R22, val);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL pre divisor and clock source */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R24, &r24);
+ if (err)
+ return err;
+
+ r24 &= ~(ICE_CGU_R23_R24_REF1588_CK_DIV | ICE_CGU_R24_FBDIV_FRAC |
+ ICE_CGU_R23_R24_TIME_REF_SEL);
+ r24 |= FIELD_PREP(ICE_CGU_R23_R24_REF1588_CK_DIV,
+ e82x_tspll_params[clk_freq].refclk_pre_div);
+ r24 |= FIELD_PREP(ICE_CGU_R24_FBDIV_FRAC,
+ e82x_tspll_params[clk_freq].frac_n_div);
+ r24 |= FIELD_PREP(ICE_CGU_R23_R24_TIME_REF_SEL, clk_src);
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R24, r24);
+ if (err)
+ return err;
+
+ /* Wait to ensure everything is stable */
+ usleep_range(10, 20);
+
+ /* Finally, enable the PLL */
+ r24 |= ICE_CGU_R23_R24_TSPLL_ENABLE;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R24, r24);
+ if (err)
+ return err;
+
+ /* Wait at least 1 ms to verify if the PLL locks */
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_RO_BWM_LF, &val);
+ if (err)
+ return err;
+
+ if (!(val & ICE_CGU_RO_BWM_LF_TRUE_LOCK)) {
+ dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
+ return -EBUSY;
+ }
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R9, &r9);
+ if (err)
+ return err;
+ err = ice_read_cgu_reg(hw, ICE_CGU_R24, &r24);
+ if (err)
+ return err;
+
+ ice_tspll_log_cfg(hw, !!FIELD_GET(ICE_CGU_R23_R24_TSPLL_ENABLE, r24),
+ FIELD_GET(ICE_CGU_R23_R24_TIME_REF_SEL, r24),
+ FIELD_GET(ICE_CGU_R9_TIME_REF_FREQ_SEL, r9),
+ true, true);
+
+ return 0;
+}
+
+/**
+ * ice_tspll_dis_sticky_bits_e82x - disable TSPLL sticky bits
+ * @hw: Pointer to the HW struct
+ *
+ * Configure the Clock Generation Unit TSPLL sticky bits so they don't latch on
+ * losing TSPLL lock, but always show current state.
+ *
+ * Return: 0 on success, other error codes when failed to read/write CGU.
+ */
+static int ice_tspll_dis_sticky_bits_e82x(struct ice_hw *hw)
+{
+ u32 val;
+ int err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_CNTR_BIST, &val);
+ if (err)
+ return err;
+
+ val &= ~(ICE_CGU_CNTR_BIST_PLLLOCK_SEL_0 |
+ ICE_CGU_CNTR_BIST_PLLLOCK_SEL_1);
+
+ return ice_write_cgu_reg(hw, ICE_CGU_CNTR_BIST, val);
+}
+
+/**
+ * ice_tspll_cfg_e825c - Configure the TSPLL for E825-C
+ * @hw: Pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF, or TCXO)
+ *
+ * Configure the Clock Generation Unit with the desired clock frequency and
+ * time reference, enabling the PLL which drives the PTP hardware clock.
+ *
+ * Return:
+ * * %0 - success
+ * * %-EINVAL - input parameters are incorrect
+ * * %-EBUSY - failed to lock TSPLL
+ * * %other - CGU read/write failure
+ */
+static int ice_tspll_cfg_e825c(struct ice_hw *hw, enum ice_tspll_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ u32 val, r9, r23;
+ int err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R9, &r9);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R23, &r23);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_RO_LOCK, &val);
+ if (err)
+ return err;
+
+ ice_tspll_log_cfg(hw, !!FIELD_GET(ICE_CGU_R23_R24_TSPLL_ENABLE, r23),
+ FIELD_GET(ICE_CGU_R23_R24_TIME_REF_SEL, r23),
+ FIELD_GET(ICE_CGU_R9_TIME_REF_FREQ_SEL, r9),
+ !!FIELD_GET(ICE_CGU_RO_LOCK_TRUE_LOCK, val),
+ false);
+
+ /* Disable the PLL before changing the clock source or frequency */
+ if (FIELD_GET(ICE_CGU_R23_R24_TSPLL_ENABLE, r23)) {
+ r23 &= ~ICE_CGU_R23_R24_TSPLL_ENABLE;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R23, r23);
+ if (err)
+ return err;
+ }
+
+ if (FIELD_GET(ICE_CGU_R9_TIME_SYNC_EN, r9)) {
+ r9 &= ~ICE_CGU_R9_TIME_SYNC_EN;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R9, r9);
+ if (err)
+ return err;
+ }
+
+ /* Set the frequency and enable the correct receiver */
+ r9 &= ~(ICE_CGU_R9_TIME_REF_FREQ_SEL | ICE_CGU_R9_CLK_EREF0_EN |
+ ICE_CGU_R9_TIME_REF_EN);
+ r9 |= FIELD_PREP(ICE_CGU_R9_TIME_REF_FREQ_SEL, clk_freq);
+ if (clk_src == ICE_CLK_SRC_TCXO)
+ r9 |= ICE_CGU_R9_CLK_EREF0_EN;
+ else
+ r9 |= ICE_CGU_R9_TIME_REF_EN;
+ r9 |= ICE_CGU_R9_TIME_SYNC_EN;
+ err = ice_write_cgu_reg(hw, ICE_CGU_R9, r9);
+ if (err)
+ return err;
+
+ /* Choose the referenced frequency */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R16, &val);
+ if (err)
+ return err;
+ val &= ~ICE_CGU_R16_TSPLL_CK_REFCLKFREQ;
+ val |= FIELD_PREP(ICE_CGU_R16_TSPLL_CK_REFCLKFREQ,
+ ICE_TSPLL_CK_REFCLKFREQ_E825);
+ err = ice_write_cgu_reg(hw, ICE_CGU_R16, val);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL feedback divisor */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R19, &val);
+ if (err)
+ return err;
+
+ val &= ~(ICE_CGU_R19_TSPLL_FBDIV_INTGR_E825 |
+ ICE_CGU_R19_TSPLL_NDIVRATIO);
+ val |= FIELD_PREP(ICE_CGU_R19_TSPLL_FBDIV_INTGR_E825,
+ ICE_TSPLL_FBDIV_INTGR_E825);
+ val |= FIELD_PREP(ICE_CGU_R19_TSPLL_NDIVRATIO,
+ ICE_TSPLL_NDIVRATIO_E825);
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R19, val);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL post divisor, these two are constant */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R22, &val);
+ if (err)
+ return err;
+
+ val &= ~(ICE_CGU_R22_TIME1588CLK_DIV |
+ ICE_CGU_R22_TIME1588CLK_DIV2);
+ val |= FIELD_PREP(ICE_CGU_R22_TIME1588CLK_DIV, 5);
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R22, val);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL pre divisor (constant) and clock source */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R23, &r23);
+ if (err)
+ return err;
+
+ r23 &= ~(ICE_CGU_R23_R24_REF1588_CK_DIV | ICE_CGU_R23_R24_TIME_REF_SEL);
+ r23 |= FIELD_PREP(ICE_CGU_R23_R24_TIME_REF_SEL, clk_src);
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R23, r23);
+ if (err)
+ return err;
+
+ /* Clear the R24 register. */
+ err = ice_write_cgu_reg(hw, ICE_CGU_R24, 0);
+ if (err)
+ return err;
+
+ /* Wait to ensure everything is stable */
+ usleep_range(10, 20);
+
+ /* Finally, enable the PLL */
+ r23 |= ICE_CGU_R23_R24_TSPLL_ENABLE;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R23, r23);
+ if (err)
+ return err;
+
+ /* Wait at least 1 ms to verify if the PLL locks */
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_RO_LOCK, &val);
+ if (err)
+ return err;
+
+ if (!(val & ICE_CGU_RO_LOCK_TRUE_LOCK)) {
+ dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
+ return -EBUSY;
+ }
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R9, &r9);
+ if (err)
+ return err;
+ err = ice_read_cgu_reg(hw, ICE_CGU_R23, &r23);
+ if (err)
+ return err;
+
+ ice_tspll_log_cfg(hw, !!FIELD_GET(ICE_CGU_R23_R24_TSPLL_ENABLE, r23),
+ FIELD_GET(ICE_CGU_R23_R24_TIME_REF_SEL, r23),
+ FIELD_GET(ICE_CGU_R9_TIME_REF_FREQ_SEL, r9),
+ true, true);
+
+ return 0;
+}
+
+/**
+ * ice_tspll_dis_sticky_bits_e825c - disable TSPLL sticky bits for E825-C
+ * @hw: Pointer to the HW struct
+ *
+ * Configure the Clock Generation Unit TSPLL sticky bits so they don't latch on
+ * losing TSPLL lock, but always show current state.
+ *
+ * Return: 0 on success, other error codes when failed to read/write CGU.
+ */
+static int ice_tspll_dis_sticky_bits_e825c(struct ice_hw *hw)
+{
+ u32 val;
+ int err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_BW_TDC, &val);
+ if (err)
+ return err;
+
+ val &= ~ICE_CGU_BW_TDC_PLLLOCK_SEL;
+
+ return ice_write_cgu_reg(hw, ICE_CGU_BW_TDC, val);
+}
+
+/**
+ * ice_tspll_cfg_pps_out_e825c - Enable/disable 1PPS output and set amplitude
+ * @hw: pointer to the HW struct
+ * @enable: true to enable 1PPS output, false to disable it
+ *
+ * Return: 0 on success, other negative error code when CGU read/write failed.
+ */
+int ice_tspll_cfg_pps_out_e825c(struct ice_hw *hw, bool enable)
+{
+ u32 val;
+ int err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R9, &val);
+ if (err)
+ return err;
+
+ val &= ~(ICE_CGU_R9_ONE_PPS_OUT_EN | ICE_CGU_R9_ONE_PPS_OUT_AMP);
+ val |= FIELD_PREP(ICE_CGU_R9_ONE_PPS_OUT_EN, enable) |
+ ICE_CGU_R9_ONE_PPS_OUT_AMP;
+
+ return ice_write_cgu_reg(hw, ICE_CGU_R9, val);
+}
+
+/**
+ * ice_tspll_cfg - Configure the Clock Generation Unit TSPLL
+ * @hw: Pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF, or TCXO)
+ *
+ * Configure the Clock Generation Unit with the desired clock frequency and
+ * time reference, enabling the TSPLL which drives the PTP hardware clock.
+ *
+ * Return: 0 on success, -ERANGE on unsupported MAC type, other negative error
+ * codes when failed to configure CGU.
+ */
+static int ice_tspll_cfg(struct ice_hw *hw, enum ice_tspll_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ switch (hw->mac_type) {
+ case ICE_MAC_GENERIC:
+ return ice_tspll_cfg_e82x(hw, clk_freq, clk_src);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_tspll_cfg_e825c(hw, clk_freq, clk_src);
+ default:
+ return -ERANGE;
+ }
+}
+
+/**
+ * ice_tspll_dis_sticky_bits - disable TSPLL sticky bits
+ * @hw: Pointer to the HW struct
+ *
+ * Configure the Clock Generation Unit TSPLL sticky bits so they don't latch on
+ * losing TSPLL lock, but always show current state.
+ *
+ * Return: 0 on success, -ERANGE on unsupported MAC type.
+ */
+static int ice_tspll_dis_sticky_bits(struct ice_hw *hw)
+{
+ switch (hw->mac_type) {
+ case ICE_MAC_GENERIC:
+ return ice_tspll_dis_sticky_bits_e82x(hw);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_tspll_dis_sticky_bits_e825c(hw);
+ default:
+ return -ERANGE;
+ }
+}
+
+/**
+ * ice_tspll_init - Initialize TSPLL with settings from firmware
+ * @hw: Pointer to the HW structure
+ *
+ * Initialize the Clock Generation Unit of the E82X/E825 device.
+ *
+ * Return: 0 on success, other error codes when failed to read/write/cfg CGU.
+ */
+int ice_tspll_init(struct ice_hw *hw)
+{
+ struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
+ enum ice_tspll_freq tspll_freq;
+ enum ice_clk_src clk_src;
+ int err;
+
+ /* Only E822, E823 and E825 products support TSPLL */
+ if (hw->mac_type != ICE_MAC_GENERIC &&
+ hw->mac_type != ICE_MAC_GENERIC_3K_E825)
+ return 0;
+
+ tspll_freq = (enum ice_tspll_freq)ts_info->time_ref;
+ clk_src = (enum ice_clk_src)ts_info->clk_src;
+ if (!ice_tspll_check_params(hw, tspll_freq, clk_src))
+ return -EINVAL;
+
+ /* Disable sticky lock detection so lock status reported is accurate */
+ err = ice_tspll_dis_sticky_bits(hw);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL using the parameters from the function
+ * capabilities.
+ */
+ err = ice_tspll_cfg(hw, tspll_freq, clk_src);
+ if (err) {
+ dev_warn(ice_hw_to_dev(hw), "Failed to lock TSPLL to predefined frequency. Retrying with fallback frequency.\n");
+
+ /* Try to lock to internal TCXO as a fallback. */
+ tspll_freq = ice_tspll_default_freq(hw->mac_type);
+ clk_src = ICE_CLK_SRC_TCXO;
+ err = ice_tspll_cfg(hw, tspll_freq, clk_src);
+ if (err)
+ dev_warn(ice_hw_to_dev(hw), "Failed to lock TSPLL to fallback frequency.\n");
+ }
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_tspll.h b/drivers/net/ethernet/intel/ice/ice_tspll.h
new file mode 100644
index 000000000000..c0b1232cc07c
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_tspll.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025, Intel Corporation. */
+
+#ifndef _ICE_TSPLL_H_
+#define _ICE_TSPLL_H_
+
+/**
+ * struct ice_tspll_params_e82x - E82X TSPLL parameters
+ * @refclk_pre_div: Reference clock pre-divisor
+ * @post_pll_div: Post PLL divisor
+ * @feedback_div: Feedback divisor
+ * @frac_n_div: Fractional divisor
+ *
+ * Clock Generation Unit parameters used to program the PLL based on the
+ * selected TIME_REF/TCXO frequency.
+ */
+struct ice_tspll_params_e82x {
+ u8 refclk_pre_div;
+ u8 post_pll_div;
+ u8 feedback_div;
+ u32 frac_n_div;
+};
+
+#define ICE_TSPLL_CK_REFCLKFREQ_E825 0x1F
+#define ICE_TSPLL_NDIVRATIO_E825 5
+#define ICE_TSPLL_FBDIV_INTGR_E825 256
+
+int ice_tspll_cfg_pps_out_e825c(struct ice_hw *hw, bool enable);
+int ice_tspll_init(struct ice_hw *hw);
+
+#endif /* _ICE_TSPLL_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 5d2d7736fd5f..ad76768a4232 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -7,6 +7,8 @@
#include <linux/netdevice.h>
#include <linux/prefetch.h>
#include <linux/bpf_trace.h>
+#include <linux/net/intel/libie/rx.h>
+#include <net/libeth/xdp.h>
#include <net/dsfield.h>
#include <net/mpls.h>
#include <net/xdp.h>
@@ -20,7 +22,6 @@
#define ICE_RX_HDR_SIZE 256
-#define FDIR_DESC_RXDID 0x40
#define ICE_FDIR_CLEAN_DELAY 10
/**
@@ -112,7 +113,7 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
static void
ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
{
- if (dma_unmap_len(tx_buf, len))
+ if (tx_buf->type != ICE_TX_BUF_XDP_TX && dma_unmap_len(tx_buf, len))
dma_unmap_page(ring->dev,
dma_unmap_addr(tx_buf, dma),
dma_unmap_len(tx_buf, len),
@@ -126,7 +127,7 @@ ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
dev_kfree_skb_any(tx_buf->skb);
break;
case ICE_TX_BUF_XDP_TX:
- page_frag_free(tx_buf->raw_buf);
+ libeth_xdp_return_va(tx_buf->raw_buf, false);
break;
case ICE_TX_BUF_XDP_XMIT:
xdp_return_frame(tx_buf->xdpf);
@@ -145,6 +146,56 @@ static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring)
}
/**
+ * ice_clean_tstamp_ring - clean time stamp ring
+ * @tx_ring: Tx ring to clean the Time Stamp ring for
+ */
+static void ice_clean_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ u32 size;
+
+ if (!tstamp_ring->desc)
+ return;
+
+ size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc),
+ PAGE_SIZE);
+ memset(tstamp_ring->desc, 0, size);
+ tstamp_ring->next_to_use = 0;
+}
+
+/**
+ * ice_free_tstamp_ring - free time stamp resources per queue
+ * @tx_ring: Tx ring to free the Time Stamp ring for
+ */
+void ice_free_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ u32 size;
+
+ if (!tstamp_ring->desc)
+ return;
+
+ ice_clean_tstamp_ring(tx_ring);
+ size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc),
+ PAGE_SIZE);
+ dmam_free_coherent(tx_ring->dev, size, tstamp_ring->desc,
+ tstamp_ring->dma);
+ tstamp_ring->desc = NULL;
+}
+
+/**
+ * ice_free_tx_tstamp_ring - free time stamp resources per Tx ring
+ * @tx_ring: Tx ring to free the Time Stamp ring for
+ */
+void ice_free_tx_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ ice_free_tstamp_ring(tx_ring);
+ kfree_rcu(tx_ring->tstamp_ring, rcu);
+ tx_ring->tstamp_ring = NULL;
+ tx_ring->flags &= ~ICE_TX_FLAGS_TXTIME;
+}
+
+/**
* ice_clean_tx_ring - Free any empty Tx buffers
* @tx_ring: ring to be cleaned
*/
@@ -182,6 +233,9 @@ tx_skip_free:
/* cleanup Tx queue statistics */
netdev_tx_reset_queue(txring_txq(tx_ring));
+
+ if (ice_is_txtime_cfg(tx_ring))
+ ice_free_tx_tstamp_ring(tx_ring);
}
/**
@@ -333,6 +387,84 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
}
/**
+ * ice_alloc_tstamp_ring - allocate the Time Stamp ring
+ * @tx_ring: Tx ring to allocate the Time Stamp ring for
+ *
+ * Return: 0 on success, negative on error
+ */
+static int ice_alloc_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct ice_tstamp_ring *tstamp_ring;
+
+ /* allocate with kzalloc(), free with kfree_rcu() */
+ tstamp_ring = kzalloc(sizeof(*tstamp_ring), GFP_KERNEL);
+ if (!tstamp_ring)
+ return -ENOMEM;
+
+ tstamp_ring->tx_ring = tx_ring;
+ tx_ring->tstamp_ring = tstamp_ring;
+ tstamp_ring->desc = NULL;
+ tstamp_ring->count = ice_calc_ts_ring_count(tx_ring);
+ tx_ring->flags |= ICE_TX_FLAGS_TXTIME;
+ return 0;
+}
+
+/**
+ * ice_setup_tstamp_ring - allocate the Time Stamp ring
+ * @tx_ring: Tx ring to set up the Time Stamp ring for
+ *
+ * Return: 0 on success, negative on error
+ */
+static int ice_setup_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ struct device *dev = tx_ring->dev;
+ u32 size;
+
+ /* round up to nearest page */
+ size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc),
+ PAGE_SIZE);
+ tstamp_ring->desc = dmam_alloc_coherent(dev, size, &tstamp_ring->dma,
+ GFP_KERNEL);
+ if (!tstamp_ring->desc) {
+ dev_err(dev, "Unable to allocate memory for Time stamp Ring, size=%d\n",
+ size);
+ return -ENOMEM;
+ }
+
+ tstamp_ring->next_to_use = 0;
+ return 0;
+}
+
+/**
+ * ice_alloc_setup_tstamp_ring - Allocate and setup the Time Stamp ring
+ * @tx_ring: Tx ring to allocate and setup the Time Stamp ring for
+ *
+ * Return: 0 on success, negative on error
+ */
+int ice_alloc_setup_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct device *dev = tx_ring->dev;
+ int err;
+
+ err = ice_alloc_tstamp_ring(tx_ring);
+ if (err) {
+ dev_err(dev, "Unable to allocate Time stamp ring for Tx ring %d\n",
+ tx_ring->q_index);
+ return err;
+ }
+
+ err = ice_setup_tstamp_ring(tx_ring);
+ if (err) {
+ dev_err(dev, "Unable to setup Time stamp ring for Tx ring %d\n",
+ tx_ring->q_index);
+ ice_free_tx_tstamp_ring(tx_ring);
+ return err;
+ }
+ return 0;
+}
+
+/**
* ice_setup_tx_ring - Allocate the Tx descriptors
* @tx_ring: the Tx ring to set up
*
@@ -376,61 +508,67 @@ err:
return -ENOMEM;
}
+void ice_rxq_pp_destroy(struct ice_rx_ring *rq)
+{
+ struct libeth_fq fq = {
+ .fqes = rq->rx_fqes,
+ .pp = rq->pp,
+ };
+
+ libeth_rx_fq_destroy(&fq);
+ rq->rx_fqes = NULL;
+ rq->pp = NULL;
+
+ if (!rq->hdr_pp)
+ return;
+
+ fq.fqes = rq->hdr_fqes;
+ fq.pp = rq->hdr_pp;
+
+ libeth_rx_fq_destroy(&fq);
+ rq->hdr_fqes = NULL;
+ rq->hdr_pp = NULL;
+}
+
/**
* ice_clean_rx_ring - Free Rx buffers
* @rx_ring: ring to be cleaned
*/
void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
- struct xdp_buff *xdp = &rx_ring->xdp;
- struct device *dev = rx_ring->dev;
u32 size;
- u16 i;
-
- /* ring already cleared, nothing to do */
- if (!rx_ring->rx_buf)
- return;
if (rx_ring->xsk_pool) {
ice_xsk_clean_rx_ring(rx_ring);
goto rx_skip_free;
}
- if (xdp->data) {
- xdp_return_buff(xdp);
- xdp->data = NULL;
- }
+ /* ring already cleared, nothing to do */
+ if (!rx_ring->rx_fqes)
+ return;
+
+ libeth_xdp_return_stash(&rx_ring->xdp);
/* Free all the Rx ring sk_buffs */
- for (i = 0; i < rx_ring->count; i++) {
- struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
+ for (u32 i = rx_ring->next_to_clean; i != rx_ring->next_to_use; ) {
+ libeth_rx_recycle_slow(rx_ring->rx_fqes[i].netmem);
- if (!rx_buf->page)
- continue;
+ if (rx_ring->hdr_pp)
+ libeth_rx_recycle_slow(rx_ring->hdr_fqes[i].netmem);
- /* Invalidate cache lines that may have been written to by
- * device so that we avoid corrupting memory.
- */
- dma_sync_single_range_for_cpu(dev, rx_buf->dma,
- rx_buf->page_offset,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
-
- /* free resources associated with mapping */
- dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
- __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
-
- rx_buf->page = NULL;
- rx_buf->page_offset = 0;
+ if (unlikely(++i == rx_ring->count))
+ i = 0;
}
-rx_skip_free:
- if (rx_ring->xsk_pool)
- memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf)));
- else
- memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf)));
+ if (rx_ring->vsi->type == ICE_VSI_PF &&
+ xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) {
+ xdp_rxq_info_detach_mem_model(&rx_ring->xdp_rxq);
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+ }
+ ice_rxq_pp_destroy(rx_ring);
+
+rx_skip_free:
/* Zero out the descriptor ring */
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
PAGE_SIZE);
@@ -438,7 +576,6 @@ rx_skip_free:
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
- rx_ring->first_desc = 0;
rx_ring->next_to_use = 0;
}
@@ -450,26 +587,20 @@ rx_skip_free:
*/
void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
{
+ struct device *dev = ice_pf_to_dev(rx_ring->vsi->back);
u32 size;
ice_clean_rx_ring(rx_ring);
- if (rx_ring->vsi->type == ICE_VSI_PF)
- if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
- xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
WRITE_ONCE(rx_ring->xdp_prog, NULL);
if (rx_ring->xsk_pool) {
kfree(rx_ring->xdp_buf);
rx_ring->xdp_buf = NULL;
- } else {
- kfree(rx_ring->rx_buf);
- rx_ring->rx_buf = NULL;
}
if (rx_ring->desc) {
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
PAGE_SIZE);
- dmam_free_coherent(rx_ring->dev, size,
- rx_ring->desc, rx_ring->dma);
+ dmam_free_coherent(dev, size, rx_ring->desc, rx_ring->dma);
rx_ring->desc = NULL;
}
}
@@ -482,19 +613,9 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
*/
int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
{
- struct device *dev = rx_ring->dev;
+ struct device *dev = ice_pf_to_dev(rx_ring->vsi->back);
u32 size;
- if (!dev)
- return -ENOMEM;
-
- /* warn if we are about to overwrite the pointer */
- WARN_ON(rx_ring->rx_buf);
- rx_ring->rx_buf =
- kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
- if (!rx_ring->rx_buf)
- return -ENOMEM;
-
/* round up to nearest page */
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
PAGE_SIZE);
@@ -503,22 +624,16 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
if (!rx_ring->desc) {
dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
size);
- goto err;
+ return -ENOMEM;
}
rx_ring->next_to_use = 0;
rx_ring->next_to_clean = 0;
- rx_ring->first_desc = 0;
if (ice_is_xdp_ena_vsi(rx_ring->vsi))
WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
return 0;
-
-err:
- kfree(rx_ring->rx_buf);
- rx_ring->rx_buf = NULL;
- return -ENOMEM;
}
/**
@@ -527,15 +642,14 @@ err:
* @xdp: xdp_buff used as input to the XDP program
* @xdp_prog: XDP program to run
* @xdp_ring: ring to be used for XDP_TX action
- * @rx_buf: Rx buffer to store the XDP action
* @eop_desc: Last descriptor in packet to read metadata from
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
-static void
-ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+static u32
+ice_run_xdp(struct ice_rx_ring *rx_ring, struct libeth_xdp_buff *xdp,
struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
- struct ice_rx_buf *rx_buf, union ice_32b_rx_flex_desc *eop_desc)
+ union ice_32b_rx_flex_desc *eop_desc)
{
unsigned int ret = ICE_XDP_PASS;
u32 act;
@@ -543,23 +657,23 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
if (!xdp_prog)
goto exit;
- ice_xdp_meta_set_desc(xdp, eop_desc);
+ xdp->desc = eop_desc;
- act = bpf_prog_run_xdp(xdp_prog, xdp);
+ act = bpf_prog_run_xdp(xdp_prog, &xdp->base);
switch (act) {
case XDP_PASS:
break;
case XDP_TX:
if (static_branch_unlikely(&ice_xdp_locking_key))
spin_lock(&xdp_ring->tx_lock);
- ret = __ice_xmit_xdp_ring(xdp, xdp_ring, false);
+ ret = __ice_xmit_xdp_ring(&xdp->base, xdp_ring, false);
if (static_branch_unlikely(&ice_xdp_locking_key))
spin_unlock(&xdp_ring->tx_lock);
if (ret == ICE_XDP_CONSUMED)
goto out_failure;
break;
case XDP_REDIRECT:
- if (xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))
+ if (xdp_do_redirect(rx_ring->netdev, &xdp->base, xdp_prog))
goto out_failure;
ret = ICE_XDP_REDIR;
break;
@@ -571,10 +685,12 @@ out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_DROP:
+ libeth_xdp_return_buff(xdp);
ret = ICE_XDP_CONSUMED;
}
+
exit:
- ice_set_rx_bufs_act(xdp, rx_ring, ret);
+ return ret;
}
/**
@@ -661,50 +777,34 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
}
/**
- * ice_alloc_mapped_page - recycle or make a new page
- * @rx_ring: ring to use
- * @bi: rx_buf struct to modify
- *
- * Returns true if the page was successfully allocated or
- * reused.
+ * ice_init_ctrl_rx_descs - Initialize Rx descriptors for control vsi.
+ * @rx_ring: ring to init descriptors on
+ * @count: number of descriptors to initialize
*/
-static bool
-ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
+void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 count)
{
- struct page *page = bi->page;
- dma_addr_t dma;
-
- /* since we are recycling buffers we should seldom need to alloc */
- if (likely(page))
- return true;
+ union ice_32b_rx_flex_desc *rx_desc;
+ u32 ntu = rx_ring->next_to_use;
- /* alloc new page for storage */
- page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
- if (unlikely(!page)) {
- rx_ring->ring_stats->rx_stats.alloc_page_failed++;
- return false;
- }
+ if (!count)
+ return;
- /* map page for use */
- dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
+ rx_desc = ICE_RX_DESC(rx_ring, ntu);
- /* if mapping failed free memory back to system since
- * there isn't much point in holding memory we can't use
- */
- if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_pages(page, ice_rx_pg_order(rx_ring));
- rx_ring->ring_stats->rx_stats.alloc_page_failed++;
- return false;
- }
+ do {
+ rx_desc++;
+ ntu++;
+ if (unlikely(ntu == rx_ring->count)) {
+ rx_desc = ICE_RX_DESC(rx_ring, 0);
+ ntu = 0;
+ }
- bi->dma = dma;
- bi->page = page;
- bi->page_offset = rx_ring->rx_offset;
- page_ref_add(page, USHRT_MAX - 1);
- bi->pagecnt_bias = USHRT_MAX;
+ rx_desc->wb.status_error0 = 0;
+ count--;
+ } while (count);
- return true;
+ if (rx_ring->next_to_use != ntu)
+ ice_release_rx_desc(rx_ring, ntu);
}
/**
@@ -722,41 +822,60 @@ ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
*/
bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count)
{
+ const struct libeth_fq_fp hdr_fq = {
+ .pp = rx_ring->hdr_pp,
+ .fqes = rx_ring->hdr_fqes,
+ .truesize = rx_ring->hdr_truesize,
+ .count = rx_ring->count,
+ };
+ const struct libeth_fq_fp fq = {
+ .pp = rx_ring->pp,
+ .fqes = rx_ring->rx_fqes,
+ .truesize = rx_ring->truesize,
+ .count = rx_ring->count,
+ };
union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use;
- struct ice_rx_buf *bi;
/* do nothing if no valid netdev defined */
- if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
- !cleaned_count)
+ if (!rx_ring->netdev || !cleaned_count)
return false;
/* get the Rx descriptor and buffer based on next_to_use */
rx_desc = ICE_RX_DESC(rx_ring, ntu);
- bi = &rx_ring->rx_buf[ntu];
do {
- /* if we fail here, we have work remaining */
- if (!ice_alloc_mapped_page(rx_ring, bi))
- break;
+ dma_addr_t addr;
- /* sync the buffer for use by the device */
- dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
- bi->page_offset,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
+ addr = libeth_rx_alloc(&fq, ntu);
+ if (addr == DMA_MAPPING_ERROR) {
+ rx_ring->ring_stats->rx_stats.alloc_page_failed++;
+ break;
+ }
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
+ rx_desc->read.pkt_addr = cpu_to_le64(addr);
+
+ if (!hdr_fq.pp)
+ goto next;
+
+ addr = libeth_rx_alloc(&hdr_fq, ntu);
+ if (addr == DMA_MAPPING_ERROR) {
+ rx_ring->ring_stats->rx_stats.alloc_page_failed++;
+
+ libeth_rx_recycle_slow(fq.fqes[ntu].netmem);
+ break;
+ }
+
+ rx_desc->read.hdr_addr = cpu_to_le64(addr);
+next:
rx_desc++;
- bi++;
ntu++;
if (unlikely(ntu == rx_ring->count)) {
rx_desc = ICE_RX_DESC(rx_ring, 0);
- bi = rx_ring->rx_buf;
ntu = 0;
}
@@ -773,334 +892,41 @@ bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count)
}
/**
- * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
- * @rx_buf: Rx buffer to adjust
- * @size: Size of adjustment
- *
- * Update the offset within page so that Rx buf will be ready to be reused.
- * For systems with PAGE_SIZE < 8192 this function will flip the page offset
- * so the second half of page assigned to Rx buffer will be used, otherwise
- * the offset is moved by "size" bytes
- */
-static void
-ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
-{
-#if (PAGE_SIZE < 8192)
- /* flip page offset to other buffer */
- rx_buf->page_offset ^= size;
-#else
- /* move offset up to the next cache line */
- rx_buf->page_offset += size;
-#endif
-}
-
-/**
- * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
- * @rx_buf: buffer containing the page
- *
- * If page is reusable, we have a green light for calling ice_reuse_rx_page,
- * which will assign the current buffer to the buffer that next_to_alloc is
- * pointing to; otherwise, the DMA mapping needs to be destroyed and
- * page freed
- */
-static bool
-ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
-{
- unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
- struct page *page = rx_buf->page;
-
- /* avoid re-using remote and pfmemalloc pages */
- if (!dev_page_is_reusable(page))
- return false;
-
- /* if we are only owner of page we can reuse it */
- if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1))
- return false;
-#if (PAGE_SIZE >= 8192)
-#define ICE_LAST_OFFSET \
- (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_3072)
- if (rx_buf->page_offset > ICE_LAST_OFFSET)
- return false;
-#endif /* PAGE_SIZE >= 8192) */
-
- /* If we have drained the page fragment pool we need to update
- * the pagecnt_bias and page count so that we fully restock the
- * number of references the driver holds.
- */
- if (unlikely(pagecnt_bias == 1)) {
- page_ref_add(page, USHRT_MAX - 1);
- rx_buf->pagecnt_bias = USHRT_MAX;
- }
-
- return true;
-}
-
-/**
- * ice_add_xdp_frag - Add contents of Rx buffer to xdp buf as a frag
- * @rx_ring: Rx descriptor ring to transact packets on
- * @xdp: xdp buff to place the data into
- * @rx_buf: buffer containing page to add
- * @size: packet length from rx_desc
- *
- * This function will add the data contained in rx_buf->page to the xdp buf.
- * It will just attach the page as a frag.
- */
-static int
-ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
- struct ice_rx_buf *rx_buf, const unsigned int size)
-{
- struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
-
- if (!size)
- return 0;
-
- if (!xdp_buff_has_frags(xdp)) {
- sinfo->nr_frags = 0;
- sinfo->xdp_frags_size = 0;
- xdp_buff_set_frags_flag(xdp);
- }
-
- if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
- ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
- return -ENOMEM;
- }
-
- __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
- rx_buf->page_offset, size);
- sinfo->xdp_frags_size += size;
- /* remember frag count before XDP prog execution; bpf_xdp_adjust_tail()
- * can pop off frags but driver has to handle it on its own
- */
- rx_ring->nr_frags = sinfo->nr_frags;
-
- if (page_is_pfmemalloc(rx_buf->page))
- xdp_buff_set_frag_pfmemalloc(xdp);
-
- return 0;
-}
-
-/**
- * ice_reuse_rx_page - page flip buffer and store it back on the ring
- * @rx_ring: Rx descriptor ring to store buffers on
- * @old_buf: donor buffer to have page reused
- *
- * Synchronizes page for reuse by the adapter
- */
-static void
-ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
-{
- u16 nta = rx_ring->next_to_alloc;
- struct ice_rx_buf *new_buf;
-
- new_buf = &rx_ring->rx_buf[nta];
-
- /* update, and store next to alloc */
- nta++;
- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
- /* Transfer page from old buffer to new buffer.
- * Move each member individually to avoid possible store
- * forwarding stalls and unnecessary copy of skb.
- */
- new_buf->dma = old_buf->dma;
- new_buf->page = old_buf->page;
- new_buf->page_offset = old_buf->page_offset;
- new_buf->pagecnt_bias = old_buf->pagecnt_bias;
-}
-
-/**
- * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
- * @rx_ring: Rx descriptor ring to transact packets on
- * @size: size of buffer to add to skb
- * @ntc: index of next to clean element
- *
- * This function will pull an Rx buffer from the ring and synchronize it
- * for use by the CPU.
- */
-static struct ice_rx_buf *
-ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
- const unsigned int ntc)
-{
- struct ice_rx_buf *rx_buf;
-
- rx_buf = &rx_ring->rx_buf[ntc];
- rx_buf->pgcnt = page_count(rx_buf->page);
- prefetchw(rx_buf->page);
-
- if (!size)
- return rx_buf;
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
- rx_buf->page_offset, size,
- DMA_FROM_DEVICE);
-
- /* We have pulled a buffer for use, so decrement pagecnt_bias */
- rx_buf->pagecnt_bias--;
-
- return rx_buf;
-}
-
-/**
- * ice_build_skb - Build skb around an existing buffer
- * @rx_ring: Rx descriptor ring to transact packets on
- * @xdp: xdp_buff pointing to the data
- *
- * This function builds an skb around an existing XDP buffer, taking care
- * to set up the skb correctly and avoid any memcpy overhead. Driver has
- * already combined frags (if any) to skb_shared_info.
- */
-static struct sk_buff *
-ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
-{
- u8 metasize = xdp->data - xdp->data_meta;
- struct skb_shared_info *sinfo = NULL;
- unsigned int nr_frags;
- struct sk_buff *skb;
-
- if (unlikely(xdp_buff_has_frags(xdp))) {
- sinfo = xdp_get_shared_info_from_buff(xdp);
- nr_frags = sinfo->nr_frags;
- }
-
- /* Prefetch first cache line of first page. If xdp->data_meta
- * is unused, this points exactly as xdp->data, otherwise we
- * likely have a consumer accessing first few bytes of meta
- * data, and then actual data.
- */
- net_prefetch(xdp->data_meta);
- /* build an skb around the page buffer */
- skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
- if (unlikely(!skb))
- return NULL;
-
- /* must to record Rx queue, otherwise OS features such as
- * symmetric queue won't work
- */
- skb_record_rx_queue(skb, rx_ring->q_index);
-
- /* update pointers within the skb to store the data */
- skb_reserve(skb, xdp->data - xdp->data_hard_start);
- __skb_put(skb, xdp->data_end - xdp->data);
- if (metasize)
- skb_metadata_set(skb, metasize);
-
- if (unlikely(xdp_buff_has_frags(xdp)))
- xdp_update_skb_shared_info(skb, nr_frags,
- sinfo->xdp_frags_size,
- nr_frags * xdp->frame_sz,
- xdp_buff_is_frag_pfmemalloc(xdp));
-
- return skb;
-}
-
-/**
- * ice_construct_skb - Allocate skb and populate it
- * @rx_ring: Rx descriptor ring to transact packets on
- * @xdp: xdp_buff pointing to the data
+ * ice_clean_ctrl_rx_irq - Clean descriptors from flow director Rx ring
+ * @rx_ring: Rx descriptor ring for ctrl_vsi to transact packets on
*
- * This function allocates an skb. It then populates it with the page
- * data from the current receive descriptor, taking care to set up the
- * skb correctly.
+ * This function cleans Rx descriptors from the ctrl_vsi Rx ring used
+ * to set flow director rules on VFs.
*/
-static struct sk_buff *
-ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
+void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring)
{
- unsigned int size = xdp->data_end - xdp->data;
- struct skb_shared_info *sinfo = NULL;
- struct ice_rx_buf *rx_buf;
- unsigned int nr_frags = 0;
- unsigned int headlen;
- struct sk_buff *skb;
-
- /* prefetch first cache line of first page */
- net_prefetch(xdp->data);
-
- if (unlikely(xdp_buff_has_frags(xdp))) {
- sinfo = xdp_get_shared_info_from_buff(xdp);
- nr_frags = sinfo->nr_frags;
- }
-
- /* allocate a skb to store the frags */
- skb = napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE);
- if (unlikely(!skb))
- return NULL;
-
- rx_buf = &rx_ring->rx_buf[rx_ring->first_desc];
- skb_record_rx_queue(skb, rx_ring->q_index);
- /* Determine available headroom for copy */
- headlen = size;
- if (headlen > ICE_RX_HDR_SIZE)
- headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
-
- /* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
- sizeof(long)));
-
- /* if we exhaust the linear part then add what is left as a frag */
- size -= headlen;
- if (size) {
- /* besides adding here a partial frag, we are going to add
- * frags from xdp_buff, make sure there is enough space for
- * them
- */
- if (unlikely(nr_frags >= MAX_SKB_FRAGS - 1)) {
- dev_kfree_skb(skb);
- return NULL;
- }
- skb_add_rx_frag(skb, 0, rx_buf->page,
- rx_buf->page_offset + headlen, size,
- xdp->frame_sz);
- } else {
- /* buffer is unused, change the act that should be taken later
- * on; data was copied onto skb's linear part so there's no
- * need for adjusting page offset and we can reuse this buffer
- * as-is
- */
- rx_buf->act = ICE_SKB_CONSUMED;
- }
+ u32 ntc = rx_ring->next_to_clean;
+ unsigned int total_rx_pkts = 0;
+ u32 cnt = rx_ring->count;
- if (unlikely(xdp_buff_has_frags(xdp))) {
- struct skb_shared_info *skinfo = skb_shinfo(skb);
+ while (likely(total_rx_pkts < ICE_DFLT_IRQ_WORK)) {
+ struct ice_vsi *ctrl_vsi = rx_ring->vsi;
+ union ice_32b_rx_flex_desc *rx_desc;
+ u16 stat_err_bits;
- memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
- sizeof(skb_frag_t) * nr_frags);
+ rx_desc = ICE_RX_DESC(rx_ring, ntc);
- xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags,
- sinfo->xdp_frags_size,
- nr_frags * xdp->frame_sz,
- xdp_buff_is_frag_pfmemalloc(xdp));
- }
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
+ if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
+ break;
- return skb;
-}
+ dma_rmb();
-/**
- * ice_put_rx_buf - Clean up used buffer and either recycle or free
- * @rx_ring: Rx descriptor ring to transact packets on
- * @rx_buf: Rx buffer to pull data from
- *
- * This function will clean up the contents of the rx_buf. It will either
- * recycle the buffer or unmap it and free the associated resources.
- */
-static void
-ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
-{
- if (!rx_buf)
- return;
+ if (ctrl_vsi->vf)
+ ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
- if (ice_can_reuse_rx_page(rx_buf)) {
- /* hand second half of page back to the ring */
- ice_reuse_rx_page(rx_ring, rx_buf);
- } else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
- ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
- ICE_RX_DMA_ATTR);
- __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
+ if (++ntc == cnt)
+ ntc = 0;
+ total_rx_pkts++;
}
- /* clear contents of buffer_info */
- rx_buf->page = NULL;
+ rx_ring->next_to_clean = ntc;
+ ice_init_ctrl_rx_descs(rx_ring, ICE_DESC_UNUSED(rx_ring));
}
/**
@@ -1115,20 +941,19 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
*
* Returns amount of work completed
*/
-int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
- unsigned int offset = rx_ring->rx_offset;
- struct xdp_buff *xdp = &rx_ring->xdp;
- u32 cached_ntc = rx_ring->first_desc;
struct ice_tx_ring *xdp_ring = NULL;
struct bpf_prog *xdp_prog = NULL;
u32 ntc = rx_ring->next_to_clean;
+ LIBETH_XDP_ONSTACK_BUFF(xdp);
+ u32 cached_ntu, xdp_verdict;
u32 cnt = rx_ring->count;
u32 xdp_xmit = 0;
- u32 cached_ntu;
bool failure;
- u32 first;
+
+ libeth_xdp_init_buff(xdp, &rx_ring->xdp, &rx_ring->xdp_rxq);
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
if (xdp_prog) {
@@ -1139,19 +964,21 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
- struct ice_rx_buf *rx_buf;
+ struct libeth_fqe *rx_buf;
struct sk_buff *skb;
unsigned int size;
u16 stat_err_bits;
u16 vlan_tci;
+ bool rxe;
/* get the Rx desc from Rx ring based on 'next_to_clean' */
rx_desc = ICE_RX_DESC(rx_ring, ntc);
- /* status_error_len will always be zero for unused descriptors
- * because it's cleared in cleanup, and overlaps with hdr_addr
- * which is always zero because packet split isn't used, if the
- * hardware wrote DD then it will be non-zero
+ /*
+ * The DD bit will always be zero for unused descriptors
+ * because it's cleared in cleanup or when setting the DMA
+ * address of the header buffer, which never uses the DD bit.
+ * If the hardware wrote the descriptor, it will be non-zero.
*/
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
@@ -1164,85 +991,66 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
dma_rmb();
ice_trace(clean_rx_irq, rx_ring, rx_desc);
- if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
- struct ice_vsi *ctrl_vsi = rx_ring->vsi;
-
- if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
- ctrl_vsi->vf)
- ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
- if (++ntc == cnt)
- ntc = 0;
- rx_ring->first_desc = ntc;
- continue;
- }
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_HBO_S) |
+ BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
+ rxe = ice_test_staterr(rx_desc->wb.status_error0,
+ stat_err_bits);
+
+ if (!rx_ring->hdr_pp)
+ goto payload;
+
+ size = le16_get_bits(rx_desc->wb.hdr_len_sph_flex_flags1,
+ ICE_RX_FLEX_DESC_HDR_LEN_M);
+ if (unlikely(rxe))
+ size = 0;
+
+ rx_buf = &rx_ring->hdr_fqes[ntc];
+ libeth_xdp_process_buff(xdp, rx_buf, size);
+ rx_buf->netmem = 0;
+
+payload:
size = le16_to_cpu(rx_desc->wb.pkt_len) &
ICE_RX_FLX_DESC_PKT_LEN_M;
+ if (unlikely(rxe))
+ size = 0;
/* retrieve a buffer from the ring */
- rx_buf = ice_get_rx_buf(rx_ring, size, ntc);
-
- if (!xdp->data) {
- void *hard_start;
+ rx_buf = &rx_ring->rx_fqes[ntc];
+ libeth_xdp_process_buff(xdp, rx_buf, size);
- hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
- offset;
- xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
- xdp_buff_clear_frags_flag(xdp);
- } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
- break;
- }
if (++ntc == cnt)
ntc = 0;
/* skip if it is NOP desc */
- if (ice_is_non_eop(rx_ring, rx_desc))
+ if (ice_is_non_eop(rx_ring, rx_desc) || unlikely(!xdp->data))
continue;
- ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf, rx_desc);
- if (rx_buf->act == ICE_XDP_PASS)
+ xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc);
+ if (xdp_verdict == ICE_XDP_PASS)
goto construct_skb;
- total_rx_bytes += xdp_get_buff_len(xdp);
+
+ if (xdp_verdict & (ICE_XDP_TX | ICE_XDP_REDIR))
+ xdp_xmit |= xdp_verdict;
+ total_rx_bytes += xdp_get_buff_len(&xdp->base);
total_rx_pkts++;
xdp->data = NULL;
- rx_ring->first_desc = ntc;
- rx_ring->nr_frags = 0;
continue;
+
construct_skb:
- if (likely(ice_ring_uses_build_skb(rx_ring)))
- skb = ice_build_skb(rx_ring, xdp);
- else
- skb = ice_construct_skb(rx_ring, xdp);
- /* exit if we failed to retrieve a buffer */
- if (!skb) {
- rx_ring->ring_stats->rx_stats.alloc_page_failed++;
- rx_buf->act = ICE_XDP_CONSUMED;
- if (unlikely(xdp_buff_has_frags(xdp)))
- ice_set_rx_bufs_act(xdp, rx_ring,
- ICE_XDP_CONSUMED);
- xdp->data = NULL;
- rx_ring->first_desc = ntc;
- rx_ring->nr_frags = 0;
- break;
- }
+ skb = xdp_build_skb_from_buff(&xdp->base);
xdp->data = NULL;
- rx_ring->first_desc = ntc;
- rx_ring->nr_frags = 0;
- stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
- if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
- stat_err_bits))) {
- dev_kfree_skb_any(skb);
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+ libeth_xdp_return_buff_slow(xdp);
+ rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
continue;
}
vlan_tci = ice_get_vlan_tci(rx_desc);
- /* pad the skb if needed, to make a valid ethernet frame */
- if (eth_skb_pad(skb))
- continue;
-
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
@@ -1257,30 +1065,15 @@ construct_skb:
total_rx_pkts++;
}
- first = rx_ring->first_desc;
- while (cached_ntc != first) {
- struct ice_rx_buf *buf = &rx_ring->rx_buf[cached_ntc];
-
- if (buf->act & (ICE_XDP_TX | ICE_XDP_REDIR)) {
- ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
- xdp_xmit |= buf->act;
- } else if (buf->act & ICE_XDP_CONSUMED) {
- buf->pagecnt_bias++;
- } else if (buf->act == ICE_XDP_PASS) {
- ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
- }
-
- ice_put_rx_buf(rx_ring, buf);
- if (++cached_ntc >= cnt)
- cached_ntc = 0;
- }
rx_ring->next_to_clean = ntc;
/* return up to cleaned_count buffers to hardware */
- failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring));
+ failure = ice_alloc_rx_bufs(rx_ring, ICE_DESC_UNUSED(rx_ring));
if (xdp_xmit)
ice_finalize_xdp_rx(xdp_ring, xdp_xmit, cached_ntu);
+ libeth_xdp_save_buff(&rx_ring->xdp, xdp);
+
if (rx_ring->ring_stats)
ice_update_rx_ring_stats(rx_ring, total_rx_pkts,
total_rx_bytes);
@@ -1722,10 +1515,46 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
/* notify HW of packet */
kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount,
netdev_xmit_more());
- if (kick)
- /* notify HW of packet */
- writel(i, tx_ring->tail);
+ if (!kick)
+ return;
+ if (ice_is_txtime_cfg(tx_ring)) {
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ u32 tstamp_count = tstamp_ring->count;
+ u32 j = tstamp_ring->next_to_use;
+ struct ice_ts_desc *ts_desc;
+ struct timespec64 ts;
+ u32 tstamp;
+
+ ts = ktime_to_timespec64(first->skb->tstamp);
+ tstamp = ts.tv_nsec >> ICE_TXTIME_CTX_RESOLUTION_128NS;
+
+ ts_desc = ICE_TS_DESC(tstamp_ring, j);
+ ts_desc->tx_desc_idx_tstamp = ice_build_tstamp_desc(i, tstamp);
+
+ j++;
+ if (j == tstamp_count) {
+ u32 fetch = tstamp_count - tx_ring->count;
+
+ j = 0;
+
+ /* To prevent an MDD, when wrapping the tstamp ring
+ * create additional TS descriptors equal to the number
+ * of the fetch TS descriptors value. HW will merge the
+ * TS descriptors with the same timestamp value into a
+ * single descriptor.
+ */
+ for (; j < fetch; j++) {
+ ts_desc = ICE_TS_DESC(tstamp_ring, j);
+ ts_desc->tx_desc_idx_tstamp =
+ ice_build_tstamp_desc(i, tstamp);
+ }
+ }
+ tstamp_ring->next_to_use = j;
+ writel_relaxed(j, tstamp_ring->tail);
+ } else {
+ writel_relaxed(i, tx_ring->tail);
+ }
return;
dma_error:
@@ -1753,6 +1582,7 @@ dma_error:
static
int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
{
+ const struct ice_tx_ring *tx_ring = off->tx_ring;
u32 l4_len = 0, l3_len = 0, l2_len = 0;
struct sk_buff *skb = first->skb;
union {
@@ -1902,6 +1732,30 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
l3_len = l4.hdr - ip.hdr;
offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
+ if ((tx_ring->netdev->features & NETIF_F_HW_CSUM) &&
+ !(first->tx_flags & ICE_TX_FLAGS_TSO) &&
+ !skb_csum_is_sctp(skb)) {
+ /* Set GCS */
+ u16 csum_start = (skb->csum_start - skb->mac_header) / 2;
+ u16 csum_offset = skb->csum_offset / 2;
+ u16 gcs_params;
+
+ gcs_params = FIELD_PREP(ICE_TX_GCS_DESC_START_M, csum_start) |
+ FIELD_PREP(ICE_TX_GCS_DESC_OFFSET_M, csum_offset) |
+ FIELD_PREP(ICE_TX_GCS_DESC_TYPE_M,
+ ICE_TX_GCS_DESC_CSUM_PSH);
+
+ /* Unlike legacy HW checksums, GCS requires a context
+ * descriptor.
+ */
+ off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX;
+ off->cd_gcs_params = gcs_params;
+ /* Fill out CSO info in data descriptors */
+ off->td_offset |= offset;
+ off->td_cmd |= cmd;
+ return 1;
+ }
+
/* Enable L4 checksum offloads */
switch (l4_proto) {
case IPPROTO_TCP:
@@ -2359,17 +2213,20 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
/* allow CONTROL frames egress from main VSI if FW LLDP disabled */
eth = (struct ethhdr *)skb_mac_header(skb);
- if (unlikely((skb->priority == TC_PRIO_CONTROL ||
- eth->h_proto == htons(ETH_P_LLDP)) &&
- vsi->type == ICE_VSI_PF &&
- vsi->port_info->qos_cfg.is_sw_lldp))
+
+ if ((ice_is_switchdev_running(vsi->back) ||
+ ice_lag_is_switchdev_running(vsi->back)) &&
+ vsi->type != ICE_VSI_SF)
+ ice_eswitch_set_target_vsi(skb, &offload);
+ else if (unlikely((skb->priority == TC_PRIO_CONTROL ||
+ eth->h_proto == htons(ETH_P_LLDP)) &&
+ vsi->type == ICE_VSI_PF &&
+ vsi->port_info->qos_cfg.is_sw_lldp))
offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
ICE_TX_CTX_DESC_SWTCH_UPLINK <<
ICE_TXD_CTX_QW1_CMD_S);
ice_tstamp(tx_ring, skb, first, &offload);
- if (ice_is_switchdev_running(vsi->back) && vsi->type != ICE_VSI_SF)
- ice_eswitch_set_target_vsi(skb, &offload);
if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
struct ice_tx_ctx_desc *cdesc;
@@ -2383,7 +2240,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
/* setup context descriptor */
cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
- cdesc->rsvd = cpu_to_le16(0);
+ cdesc->gcs = cpu_to_le16(offload.cd_gcs_params);
cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index cb347c852ba9..e440c55d9e9f 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -4,6 +4,8 @@
#ifndef _ICE_TXRX_H_
#define _ICE_TXRX_H_
+#include <net/libeth/types.h>
+
#include "ice_type.h"
#define ICE_DFLT_IRQ_WORK 256
@@ -27,72 +29,6 @@
#define ICE_MAX_TXQ_PER_TXQG 128
-/* Attempt to maximize the headroom available for incoming frames. We use a 2K
- * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame.
- * This leaves us with 512 bytes of room. From that we need to deduct the
- * space needed for the shared info and the padding needed to IP align the
- * frame.
- *
- * Note: For cache line sizes 256 or larger this value is going to end
- * up negative. In these cases we should fall back to the legacy
- * receive path.
- */
-#if (PAGE_SIZE < 8192)
-#define ICE_2K_TOO_SMALL_WITH_PADDING \
- ((unsigned int)(NET_SKB_PAD + ICE_RXBUF_1536) > \
- SKB_WITH_OVERHEAD(ICE_RXBUF_2048))
-
-/**
- * ice_compute_pad - compute the padding
- * @rx_buf_len: buffer length
- *
- * Figure out the size of half page based on given buffer length and
- * then subtract the skb_shared_info followed by subtraction of the
- * actual buffer length; this in turn results in the actual space that
- * is left for padding usage
- */
-static inline int ice_compute_pad(int rx_buf_len)
-{
- int half_page_size;
-
- half_page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
- return SKB_WITH_OVERHEAD(half_page_size) - rx_buf_len;
-}
-
-/**
- * ice_skb_pad - determine the padding that we can supply
- *
- * Figure out the right Rx buffer size and based on that calculate the
- * padding
- */
-static inline int ice_skb_pad(void)
-{
- int rx_buf_len;
-
- /* If a 2K buffer cannot handle a standard Ethernet frame then
- * optimize padding for a 3K buffer instead of a 1.5K buffer.
- *
- * For a 3K buffer we need to add enough padding to allow for
- * tailroom due to NET_IP_ALIGN possibly shifting us out of
- * cache-line alignment.
- */
- if (ICE_2K_TOO_SMALL_WITH_PADDING)
- rx_buf_len = ICE_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
- else
- rx_buf_len = ICE_RXBUF_1536;
-
- /* if needed make room for NET_IP_ALIGN */
- rx_buf_len -= NET_IP_ALIGN;
-
- return ice_compute_pad(rx_buf_len);
-}
-
-#define ICE_SKB_PAD ice_skb_pad()
-#else
-#define ICE_2K_TOO_SMALL_WITH_PADDING false
-#define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
-#endif
-
/* We are assuming that the cache line is always 64 Bytes here for ice.
* In order to make sure that is a correct assumption there is a check in probe
* to print a warning if the read from GLPCI_CNF2 tells us that the cache line
@@ -112,10 +48,6 @@ static inline int ice_skb_pad(void)
(u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
-#define ICE_RX_DESC_UNUSED(R) \
- ((((R)->first_desc > (R)->next_to_use) ? 0 : (R)->count) + \
- (R)->first_desc - (R)->next_to_use - 1)
-
#define ICE_RING_QUARTER(R) ((R)->count >> 2)
#define ICE_TX_FLAGS_TSO BIT(0)
@@ -193,18 +125,10 @@ struct ice_tx_offload_params {
u32 td_l2tag1;
u32 cd_tunnel_params;
u16 cd_l2tag2;
+ u16 cd_gcs_params;
u8 header_len;
};
-struct ice_rx_buf {
- dma_addr_t dma;
- struct page *page;
- unsigned int page_offset;
- unsigned int pgcnt;
- unsigned int act;
- unsigned int pagecnt_bias;
-};
-
struct ice_q_stats {
u64 pkts;
u64 bytes;
@@ -262,15 +186,6 @@ struct ice_pkt_ctx {
__be16 vlan_proto;
};
-struct ice_xdp_buff {
- struct xdp_buff xdp_buff;
- const union ice_32b_rx_flex_desc *eop_desc;
- const struct ice_pkt_ctx *pkt_ctx;
-};
-
-/* Required for compatibility with xdp_buffs from xsk_pool */
-static_assert(offsetof(struct ice_xdp_buff, xdp_buff) == 0);
-
/* indices into GLINT_ITR registers */
#define ICE_RX_ITR ICE_IDX_ITR0
#define ICE_TX_ITR ICE_IDX_ITR1
@@ -310,10 +225,20 @@ enum ice_dynamic_itr {
#define ICE_TX_LEGACY 1
/* descriptor ring, associated with a VSI */
+struct ice_tstamp_ring {
+ struct ice_tx_ring *tx_ring; /* Backreference to associated Tx ring */
+ dma_addr_t dma; /* physical address of ring */
+ struct rcu_head rcu; /* to avoid race on free */
+ u8 __iomem *tail;
+ void *desc;
+ u16 next_to_use;
+ u16 count;
+} ____cacheline_internodealigned_in_smp;
+
struct ice_rx_ring {
/* CL1 - 1st cacheline starts here */
void *desc; /* Descriptor ring memory */
- struct device *dev; /* Used for DMA mapping */
+ struct page_pool *pp;
struct net_device *netdev; /* netdev ring maps to */
struct ice_vsi *vsi; /* Backreference to associated VSI */
struct ice_q_vector *q_vector; /* Backreference to associated vector */
@@ -325,14 +250,19 @@ struct ice_rx_ring {
u16 next_to_alloc;
union {
- struct ice_rx_buf *rx_buf;
+ struct libeth_fqe *rx_fqes;
struct xdp_buff **xdp_buf;
};
+
/* CL2 - 2nd cacheline starts here */
+ struct libeth_fqe *hdr_fqes;
+ struct page_pool *hdr_pp;
+
union {
- struct ice_xdp_buff xdp_ext;
- struct xdp_buff xdp;
+ struct libeth_xdp_buff_stash xdp;
+ struct libeth_xdp_buff *xsk;
};
+
/* CL3 - 3rd cacheline starts here */
union {
struct ice_pkt_ctx pkt_ctx;
@@ -342,12 +272,13 @@ struct ice_rx_ring {
};
};
struct bpf_prog *xdp_prog;
- u16 rx_offset;
/* used in interrupt processing */
u16 next_to_use;
u16 next_to_clean;
- u16 first_desc;
+
+ u32 hdr_truesize;
+ u32 truesize;
/* stats structs */
struct ice_ring_stats *ring_stats;
@@ -358,15 +289,14 @@ struct ice_rx_ring {
struct ice_tx_ring *xdp_ring;
struct ice_rx_ring *next; /* pointer to next ring in q_vector */
struct xsk_buff_pool *xsk_pool;
- u32 nr_frags;
- u16 max_frame;
+ u16 rx_hdr_len;
u16 rx_buf_len;
dma_addr_t dma; /* physical address of ring */
u8 dcb_tc; /* Traffic class of ring */
u8 ptp_rx;
-#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
#define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
#define ICE_RX_FLAGS_MULTIDEV BIT(3)
+#define ICE_RX_FLAGS_RING_GCS BIT(4)
u8 flags;
/* CL5 - 5th cacheline starts here */
struct xdp_rxq_info xdp_rxq;
@@ -402,29 +332,16 @@ struct ice_tx_ring {
spinlock_t tx_lock;
u32 txq_teid; /* Added Tx queue TEID */
/* CL4 - 4th cacheline starts here */
+ struct ice_tstamp_ring *tstamp_ring;
#define ICE_TX_FLAGS_RING_XDP BIT(0)
#define ICE_TX_FLAGS_RING_VLAN_L2TAG1 BIT(1)
#define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
+#define ICE_TX_FLAGS_TXTIME BIT(3)
u8 flags;
u8 dcb_tc; /* Traffic class of ring */
u16 quanta_prof_id;
} ____cacheline_internodealigned_in_smp;
-static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring)
-{
- return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB);
-}
-
-static inline void ice_set_ring_build_skb_ena(struct ice_rx_ring *ring)
-{
- ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB;
-}
-
-static inline void ice_clear_ring_build_skb_ena(struct ice_rx_ring *ring)
-{
- ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB;
-}
-
static inline bool ice_ring_ch_enabled(struct ice_tx_ring *ring)
{
return !!ring->ch;
@@ -479,17 +396,13 @@ struct ice_coalesce_stored {
static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring)
{
-#if (PAGE_SIZE < 8192)
- if (ring->rx_buf_len > (PAGE_SIZE / 2))
- return 1;
-#endif
return 0;
}
-#define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring))
-
union ice_32b_rx_flex_desc;
+void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 num_descs);
+void ice_rxq_pp_destroy(struct ice_rx_ring *rq);
bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, unsigned int cleaned_count);
netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
u16
@@ -499,12 +412,15 @@ void ice_clean_tx_ring(struct ice_tx_ring *tx_ring);
void ice_clean_rx_ring(struct ice_rx_ring *rx_ring);
int ice_setup_tx_ring(struct ice_tx_ring *tx_ring);
int ice_setup_rx_ring(struct ice_rx_ring *rx_ring);
+int ice_alloc_setup_tstamp_ring(struct ice_tx_ring *tx_ring);
void ice_free_tx_ring(struct ice_tx_ring *tx_ring);
void ice_free_rx_ring(struct ice_rx_ring *rx_ring);
int ice_napi_poll(struct napi_struct *napi, int budget);
int
ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
u8 *raw_packet);
-int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget);
void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring);
+void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring);
+void ice_free_tx_tstamp_ring(struct ice_tx_ring *tx_ring);
+void ice_free_tstamp_ring(struct ice_tx_ring *tx_ring);
#endif /* _ICE_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 2719f0e20933..956da38d63b0 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -3,6 +3,7 @@
#include <linux/filter.h>
#include <linux/net/intel/libie/rx.h>
+#include <net/libeth/xdp.h>
#include "ice_txrx_lib.h"
#include "ice_eswitch.h"
@@ -81,6 +82,23 @@ ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring,
}
/**
+ * ice_rx_gcs - Set generic checksum in skb
+ * @skb: skb currently being received and modified
+ * @rx_desc: receive descriptor
+ */
+static void ice_rx_gcs(struct sk_buff *skb,
+ const union ice_32b_rx_flex_desc *rx_desc)
+{
+ const struct ice_32b_rx_flex_desc_nic *desc;
+ u16 csum;
+
+ desc = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ csum = (__force u16)desc->raw_csum;
+ skb->csum = csum_unfold((__force __sum16)swab16(csum));
+}
+
+/**
* ice_rx_csum - Indicate in skb if checksum is good
* @ring: the ring we care about
* @skb: skb currently being received and modified
@@ -107,6 +125,15 @@ ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
+ if ((ring->flags & ICE_RX_FLAGS_RING_GCS) &&
+ rx_desc->wb.rxdid == ICE_RXDID_FLEX_NIC &&
+ (decoded.inner_prot == LIBETH_RX_PT_INNER_TCP ||
+ decoded.inner_prot == LIBETH_RX_PT_INNER_UDP ||
+ decoded.inner_prot == LIBETH_RX_PT_INNER_ICMP)) {
+ ice_rx_gcs(skb, rx_desc);
+ return;
+ }
+
/* check if HW has decoded the packet and checksum */
if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
return;
@@ -204,9 +231,12 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring,
if (ice_is_port_repr_netdev(netdev))
ice_repr_inc_rx_stats(netdev, skb->len);
+
+ /* __skb_push() is needed because xdp_build_skb_from_buff()
+ * calls eth_type_trans()
+ */
+ __skb_push(skb, ETH_HLEN);
skb->protocol = eth_type_trans(skb, netdev);
- } else {
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
}
ice_rx_csum(rx_ring, skb, rx_desc, ptype);
@@ -244,19 +274,18 @@ static void
ice_clean_xdp_tx_buf(struct device *dev, struct ice_tx_buf *tx_buf,
struct xdp_frame_bulk *bq)
{
- dma_unmap_single(dev, dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
-
switch (tx_buf->type) {
case ICE_TX_BUF_XDP_TX:
- page_frag_free(tx_buf->raw_buf);
+ libeth_xdp_return_va(tx_buf->raw_buf, true);
break;
case ICE_TX_BUF_XDP_XMIT:
+ dma_unmap_single(dev, dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
xdp_return_frame_bulk(tx_buf->xdpf, bq);
break;
}
+ dma_unmap_len_set(tx_buf, len, 0);
tx_buf->type = ICE_TX_BUF_EMPTY;
}
@@ -351,9 +380,11 @@ int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
struct ice_tx_buf *tx_buf;
u32 cnt = xdp_ring->count;
void *data = xdp->data;
+ struct page *page;
u32 nr_frags = 0;
u32 free_space;
u32 frag = 0;
+ u32 offset;
free_space = ICE_DESC_UNUSED(xdp_ring);
if (free_space < ICE_RING_QUARTER(xdp_ring))
@@ -373,24 +404,28 @@ int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
tx_head = &xdp_ring->tx_buf[ntu];
tx_buf = tx_head;
+ page = virt_to_page(data);
+ offset = offset_in_page(xdp->data);
+
for (;;) {
dma_addr_t dma;
- dma = dma_map_single(dev, data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma))
- goto dma_unmap;
-
- /* record length, and DMA address */
- dma_unmap_len_set(tx_buf, len, size);
- dma_unmap_addr_set(tx_buf, dma, dma);
-
if (frame) {
+ dma = dma_map_single(dev, data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto dma_unmap;
tx_buf->type = ICE_TX_BUF_FRAG;
} else {
+ dma = page_pool_get_dma_addr(page) + offset;
+ dma_sync_single_for_device(dev, dma, size, DMA_BIDIRECTIONAL);
tx_buf->type = ICE_TX_BUF_XDP_TX;
tx_buf->raw_buf = data;
}
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buf, len, size);
+ dma_unmap_addr_set(tx_buf, dma, dma);
+
tx_desc->buf_addr = cpu_to_le64(dma);
tx_desc->cmd_type_offset_bsz = ice_build_ctob(0, 0, size, 0);
@@ -404,6 +439,8 @@ int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
tx_desc = ICE_TX_DESC(xdp_ring, ntu);
tx_buf = &xdp_ring->tx_buf[ntu];
+ page = skb_frag_page(&sinfo->frags[frag]);
+ offset = skb_frag_off(&sinfo->frags[frag]);
data = skb_frag_address(&sinfo->frags[frag]);
size = skb_frag_size(&sinfo->frags[frag]);
frag++;
@@ -488,10 +525,13 @@ void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res,
*/
static int ice_xdp_rx_hw_ts(const struct xdp_md *ctx, u64 *ts_ns)
{
- const struct ice_xdp_buff *xdp_ext = (void *)ctx;
+ const struct libeth_xdp_buff *xdp_ext = (void *)ctx;
+ struct ice_rx_ring *rx_ring;
+
+ rx_ring = libeth_xdp_buff_to_rq(xdp_ext, typeof(*rx_ring), xdp_rxq);
- *ts_ns = ice_ptp_get_rx_hwts(xdp_ext->eop_desc,
- xdp_ext->pkt_ctx);
+ *ts_ns = ice_ptp_get_rx_hwts(xdp_ext->desc,
+ &rx_ring->pkt_ctx);
if (!*ts_ns)
return -ENODATA;
@@ -519,10 +559,10 @@ ice_xdp_rx_hash_type(const union ice_32b_rx_flex_desc *eop_desc)
static int ice_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
enum xdp_rss_hash_type *rss_type)
{
- const struct ice_xdp_buff *xdp_ext = (void *)ctx;
+ const struct libeth_xdp_buff *xdp_ext = (void *)ctx;
- *hash = ice_get_rx_hash(xdp_ext->eop_desc);
- *rss_type = ice_xdp_rx_hash_type(xdp_ext->eop_desc);
+ *hash = ice_get_rx_hash(xdp_ext->desc);
+ *rss_type = ice_xdp_rx_hash_type(xdp_ext->desc);
if (!likely(*hash))
return -ENODATA;
@@ -541,13 +581,16 @@ static int ice_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
static int ice_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto,
u16 *vlan_tci)
{
- const struct ice_xdp_buff *xdp_ext = (void *)ctx;
+ const struct libeth_xdp_buff *xdp_ext = (void *)ctx;
+ struct ice_rx_ring *rx_ring;
+
+ rx_ring = libeth_xdp_buff_to_rq(xdp_ext, typeof(*rx_ring), xdp_rxq);
- *vlan_proto = xdp_ext->pkt_ctx->vlan_proto;
+ *vlan_proto = rx_ring->pkt_ctx.vlan_proto;
if (!*vlan_proto)
return -ENODATA;
- *vlan_tci = ice_get_vlan_tci(xdp_ext->eop_desc);
+ *vlan_tci = ice_get_vlan_tci(xdp_ext->desc);
if (!*vlan_tci)
return -ENODATA;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index 79f960c6680d..6a3f10f7a53f 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -6,49 +6,6 @@
#include "ice.h"
/**
- * ice_set_rx_bufs_act - propagate Rx buffer action to frags
- * @xdp: XDP buffer representing frame (linear and frags part)
- * @rx_ring: Rx ring struct
- * act: action to store onto Rx buffers related to XDP buffer parts
- *
- * Set action that should be taken before putting Rx buffer from first frag
- * to the last.
- */
-static inline void
-ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
- const unsigned int act)
-{
- u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
- u32 nr_frags = rx_ring->nr_frags + 1;
- u32 idx = rx_ring->first_desc;
- u32 cnt = rx_ring->count;
- struct ice_rx_buf *buf;
-
- for (int i = 0; i < nr_frags; i++) {
- buf = &rx_ring->rx_buf[idx];
- buf->act = act;
-
- if (++idx == cnt)
- idx = 0;
- }
-
- /* adjust pagecnt_bias on frags freed by XDP prog */
- if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) {
- u32 delta = rx_ring->nr_frags - sinfo_frags;
-
- while (delta) {
- if (idx == 0)
- idx = cnt - 1;
- else
- idx--;
- buf = &rx_ring->rx_buf[idx];
- buf->pagecnt_bias--;
- delta--;
- }
- }
-}
-
-/**
* ice_test_staterr - tests bits in Rx descriptor status and error fields
* @status_err_n: Rx descriptor status_error0 or status_error1 bits
* @stat_err_bits: value to mask
@@ -97,6 +54,20 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
}
/**
+ * ice_build_tstamp_desc - build Tx time stamp descriptor
+ * @tx_desc: Tx LAN descriptor index
+ * @tstamp: time stamp
+ *
+ * Return: Tx time stamp descriptor
+ */
+static inline __le32
+ice_build_tstamp_desc(u16 tx_desc, u32 tstamp)
+{
+ return cpu_to_le32(FIELD_PREP(ICE_TXTIME_TX_DESC_IDX_M, tx_desc) |
+ FIELD_PREP(ICE_TXTIME_STAMP_M, tstamp));
+}
+
+/**
* ice_get_vlan_tci - get VLAN TCI from Rx flex descriptor
* @rx_desc: Rx 32b flex descriptor with RXDID=2
*
@@ -164,13 +135,4 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring,
void
ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci);
-static inline void
-ice_xdp_meta_set_desc(struct xdp_buff *xdp,
- union ice_32b_rx_flex_desc *eop_desc)
-{
- struct ice_xdp_buff *xdp_ext = container_of(xdp, struct ice_xdp_buff,
- xdp_buff);
-
- xdp_ext->eop_desc = eop_desc;
-}
#endif /* !_ICE_TXRX_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index adb168860711..6a2ec8389a8f 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -17,7 +17,9 @@
#include "ice_protocol_type.h"
#include "ice_sbq_cmd.h"
#include "ice_vlan_mode.h"
-#include "ice_fwlog.h"
+#include <linux/net/intel/libie/fwlog.h>
+#include <linux/wait.h>
+#include <net/dscp.h>
static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
@@ -291,8 +293,10 @@ struct ice_hw_common_caps {
u8 dcb;
u8 ieee_1588;
u8 rdma;
- u8 roce_lag;
- u8 sriov_lag;
+
+ bool roce_lag;
+ bool sriov_lag;
+ bool sriov_aa_lag;
bool nvm_update_pending_nvm;
bool nvm_update_pending_orom;
@@ -324,17 +328,17 @@ struct ice_hw_common_caps {
#define ICE_TS_TMR_IDX_ASSOC_M BIT(24)
/* TIME_REF clock rate specification */
-enum ice_time_ref_freq {
- ICE_TIME_REF_FREQ_25_000 = 0,
- ICE_TIME_REF_FREQ_122_880 = 1,
- ICE_TIME_REF_FREQ_125_000 = 2,
- ICE_TIME_REF_FREQ_153_600 = 3,
- ICE_TIME_REF_FREQ_156_250 = 4,
- ICE_TIME_REF_FREQ_245_760 = 5,
+enum ice_tspll_freq {
+ ICE_TSPLL_FREQ_25_000 = 0,
+ ICE_TSPLL_FREQ_122_880 = 1,
+ ICE_TSPLL_FREQ_125_000 = 2,
+ ICE_TSPLL_FREQ_153_600 = 3,
+ ICE_TSPLL_FREQ_156_250 = 4,
+ ICE_TSPLL_FREQ_245_760 = 5,
- NUM_ICE_TIME_REF_FREQ,
+ NUM_ICE_TSPLL_FREQ,
- ICE_TIME_REF_FREQ_INVALID = -1,
+ ICE_TSPLL_FREQ_INVALID = -1,
};
/* Clock source specification */
@@ -347,7 +351,7 @@ enum ice_clk_src {
struct ice_ts_func_info {
/* Function specific info */
- enum ice_time_ref_freq time_ref;
+ enum ice_tspll_freq time_ref;
u8 clk_freq;
u8 clk_src;
u8 tmr_index_assoc;
@@ -368,6 +372,7 @@ struct ice_ts_func_info {
#define ICE_TS_TMR1_ENA_M BIT(26)
#define ICE_TS_LL_TX_TS_READ_M BIT(28)
#define ICE_TS_LL_TX_TS_INT_READ_M BIT(29)
+#define ICE_TS_LL_PHY_TMR_UPDATE_M BIT(30)
struct ice_ts_dev_info {
/* Device specific info */
@@ -382,6 +387,7 @@ struct ice_ts_dev_info {
u8 tmr1_ena;
u8 ts_ll_read;
u8 ts_ll_int_read;
+ u8 ll_phy_tmr_update;
};
#define ICE_NAC_TOPO_PRIMARY_M BIT(0)
@@ -692,7 +698,6 @@ struct ice_dcb_app_priority_table {
#define ICE_MAX_USER_PRIORITY 8
#define ICE_DCBX_MAX_APPS 64
-#define ICE_DSCP_NUM_VAL 64
#define ICE_LLDPDU_SIZE 1500
#define ICE_TLV_STATUS_OPER 0x1
#define ICE_TLV_STATUS_SYNC 0x2
@@ -715,9 +720,9 @@ struct ice_dcbx_cfg {
u8 pfc_mode;
struct ice_dcb_app_priority_table app[ICE_DCBX_MAX_APPS];
/* when DSCP mapping defined by user set its bit to 1 */
- DECLARE_BITMAP(dscp_mapped, ICE_DSCP_NUM_VAL);
+ DECLARE_BITMAP(dscp_mapped, DSCP_MAX);
/* array holding DSCP -> UP/TC values for DSCP L3 QoS mode */
- u8 dscp_map[ICE_DSCP_NUM_VAL];
+ u8 dscp_map[DSCP_MAX];
u8 dcbx_mode;
#define ICE_DCBX_MODE_CEE 0x1
#define ICE_DCBX_MODE_IEEE 0x2
@@ -848,26 +853,26 @@ struct ice_mbx_data {
#define ICE_PORTS_PER_QUAD 4
#define ICE_GET_QUAD_NUM(port) ((port) / ICE_PORTS_PER_QUAD)
+#define ATQBAL_FLAGS_INTR_IN_PROGRESS BIT(0)
+
+struct ice_e810_params {
+ /* The wait queue lock also protects the low latency interface */
+ wait_queue_head_t atqbal_wq;
+ unsigned int atqbal_flags;
+};
+
struct ice_eth56g_params {
u8 num_phys;
- u8 phy_addr[2];
bool onestep_ena;
bool sfd_ena;
u32 peer_delay;
};
union ice_phy_params {
+ struct ice_e810_params e810;
struct ice_eth56g_params eth56g;
};
-/* PHY model */
-enum ice_phy_model {
- ICE_PHY_UNSUP = -1,
- ICE_PHY_E810 = 1,
- ICE_PHY_E82X,
- ICE_PHY_ETH56G,
-};
-
/* Global Link Topology */
enum ice_global_link_topo {
ICE_LINK_TOPO_UP_TO_2_LINKS,
@@ -877,11 +882,9 @@ enum ice_global_link_topo {
};
struct ice_ptp_hw {
- enum ice_phy_model phy_model;
union ice_phy_params phy;
u8 num_lports;
u8 ports_per_phy;
- bool is_2x50g_muxed_topo;
};
/* Port hardware description */
@@ -945,9 +948,7 @@ struct ice_hw {
u8 fw_patch; /* firmware patch version */
u32 fw_build; /* firmware build number */
- struct ice_fwlog_cfg fwlog_cfg;
- bool fwlog_supported; /* does hardware support FW logging? */
- struct ice_fwlog_ring fwlog_ring;
+ struct libie_fwlog fwlog;
/* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL
* register. Used for determining the ITR/INTRL granularity during
@@ -969,6 +970,7 @@ struct ice_hw {
u8 intrl_gran;
struct ice_ptp_hw ptp;
+ s8 lane_num;
/* Active package version (currently active) */
struct ice_pkg_ver active_pkg_ver;
@@ -1061,6 +1063,7 @@ struct ice_hw_port_stats {
u64 error_bytes; /* errbc */
u64 mac_local_faults; /* mlfc */
u64 mac_remote_faults; /* mrfc */
+ u64 rx_len_errors; /* rlec */
u64 link_xon_rx; /* lxonrxc */
u64 link_xoff_rx; /* lxoffrxc */
u64 link_xon_tx; /* lxontxc */
@@ -1216,4 +1219,9 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_FW_API_REPORT_DFLT_CFG_MIN 7
#define ICE_FW_API_REPORT_DFLT_CFG_PATCH 3
+/* AQ API version for Health Status support */
+#define ICE_FW_API_HEALTH_REPORT_MAJ 1
+#define ICE_FW_API_HEALTH_REPORT_MIN 7
+#define ICE_FW_API_HEALTH_REPORT_PATCH 6
+
#endif /* _ICE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index c7c0c2f50c26..de9e81ccee66 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -5,7 +5,7 @@
#include "ice.h"
#include "ice_lib.h"
#include "ice_fltr.h"
-#include "ice_virtchnl_allowlist.h"
+#include "virt/allowlist.h"
/* Public functions which may be accessed by all driver files */
@@ -226,6 +226,7 @@ static void ice_vf_clear_counters(struct ice_vf *vf)
vsi->num_vlan = 0;
vf->num_mac = 0;
+ vf->num_mac_lldp = 0;
memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
}
@@ -858,16 +859,13 @@ static void ice_notify_vf_reset(struct ice_vf *vf)
int ice_reset_vf(struct ice_vf *vf, u32 flags)
{
struct ice_pf *pf = vf->pf;
- struct ice_lag *lag;
struct ice_vsi *vsi;
- u8 act_prt, pri_prt;
struct device *dev;
int err = 0;
+ u8 act_prt;
bool rsd;
dev = ice_pf_to_dev(pf);
- act_prt = ICE_LAG_INVALID_PORT;
- pri_prt = pf->hw.port_info->lport;
if (flags & ICE_VF_RESET_NOTIFY)
ice_notify_vf_reset(vf);
@@ -883,16 +881,8 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
else
lockdep_assert_held(&vf->cfg_lock);
- lag = pf->lag;
mutex_lock(&pf->lag_mutex);
- if (lag && lag->bonded && lag->primary) {
- act_prt = lag->active_port;
- if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
- lag->upper_netdev)
- ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
- else
- act_prt = ICE_LAG_INVALID_PORT;
- }
+ act_prt = ice_lag_prepare_vf_reset(pf->lag);
if (ice_is_vf_disabled(vf)) {
vsi = ice_get_vf_vsi(vf);
@@ -978,9 +968,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
ice_reset_vf_mbx_cnt(vf);
out_unlock:
- if (lag && lag->bonded && lag->primary &&
- act_prt != ICE_LAG_INVALID_PORT)
- ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ ice_lag_complete_vf_reset(pf->lag, act_prt);
mutex_unlock(&pf->lag_mutex);
if (flags & ICE_VF_RESET_LOCK)
@@ -1021,6 +1009,9 @@ void ice_initialize_vf_entry(struct ice_vf *vf)
vf->num_msix = vfs->num_msix_per;
vf->num_vf_qs = vfs->num_qps_per;
+ /* set default RSS hash configuration */
+ vf->rss_hashcfg = ICE_DEFAULT_RSS_HASHCFG;
+
/* ctrl_vsi_idx will be set to a valid value only when iAVF
* creates its first fdir rule.
*/
@@ -1036,6 +1027,14 @@ void ice_initialize_vf_entry(struct ice_vf *vf)
mutex_init(&vf->cfg_lock);
}
+void ice_deinitialize_vf_entry(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
+ list_del(&vf->mbx_info.list_entry);
+}
+
/**
* ice_dis_vf_qs - Disable the VF queues
* @vf: pointer to the VF structure
@@ -1393,3 +1392,28 @@ struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi)
rcu_read_unlock();
return ctrl_vsi;
}
+
+/**
+ * ice_vf_update_mac_lldp_num - update the VF's number of LLDP addresses
+ * @vf: a VF to add the address to
+ * @vsi: the corresponding VSI
+ * @incr: is the rule added or removed
+ */
+void ice_vf_update_mac_lldp_num(struct ice_vf *vf, struct ice_vsi *vsi,
+ bool incr)
+{
+ bool lldp_by_fw = test_bit(ICE_FLAG_FW_LLDP_AGENT, vsi->back->flags);
+ bool was_ena = ice_vf_is_lldp_ena(vf) && !lldp_by_fw;
+ bool is_ena;
+
+ if (WARN_ON(!vsi)) {
+ vf->num_mac_lldp = 0;
+ return;
+ }
+
+ vf->num_mac_lldp += incr ? 1 : -1;
+ is_ena = ice_vf_is_lldp_ena(vf) && !lldp_by_fw;
+
+ if (was_ena != is_ena)
+ ice_vsi_cfg_sw_lldp(vsi, false, is_ena);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index 4261fe1c2bcd..7a9c75d1d07c 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -13,7 +13,7 @@
#include <linux/avf/virtchnl.h>
#include "ice_type.h"
#include "ice_flow.h"
-#include "ice_virtchnl_fdir.h"
+#include "virt/fdir.h"
#include "ice_vsi_vlan_ops.h"
#define ICE_MAX_SRIOV_VFS 256
@@ -53,6 +53,46 @@ struct ice_mdd_vf_events {
u16 last_printed;
};
+enum ice_hash_ip_ctx_type {
+ ICE_HASH_IP_CTX_IP = 0,
+ ICE_HASH_IP_CTX_IP_ESP,
+ ICE_HASH_IP_CTX_IP_UDP_ESP,
+ ICE_HASH_IP_CTX_IP_AH,
+ ICE_HASH_IP_CTX_IP_PFCP,
+ ICE_HASH_IP_CTX_IP_UDP,
+ ICE_HASH_IP_CTX_IP_TCP,
+ ICE_HASH_IP_CTX_IP_SCTP,
+ ICE_HASH_IP_CTX_MAX,
+};
+
+struct ice_vf_hash_ip_ctx {
+ struct ice_rss_hash_cfg ctx[ICE_HASH_IP_CTX_MAX];
+};
+
+enum ice_hash_gtpu_ctx_type {
+ ICE_HASH_GTPU_CTX_EH_IP = 0,
+ ICE_HASH_GTPU_CTX_EH_IP_UDP,
+ ICE_HASH_GTPU_CTX_EH_IP_TCP,
+ ICE_HASH_GTPU_CTX_UP_IP,
+ ICE_HASH_GTPU_CTX_UP_IP_UDP,
+ ICE_HASH_GTPU_CTX_UP_IP_TCP,
+ ICE_HASH_GTPU_CTX_DW_IP,
+ ICE_HASH_GTPU_CTX_DW_IP_UDP,
+ ICE_HASH_GTPU_CTX_DW_IP_TCP,
+ ICE_HASH_GTPU_CTX_MAX,
+};
+
+struct ice_vf_hash_gtpu_ctx {
+ struct ice_rss_hash_cfg ctx[ICE_HASH_GTPU_CTX_MAX];
+};
+
+struct ice_vf_hash_ctx {
+ struct ice_vf_hash_ip_ctx v4;
+ struct ice_vf_hash_ip_ctx v6;
+ struct ice_vf_hash_gtpu_ctx ipv4;
+ struct ice_vf_hash_gtpu_ctx ipv6;
+};
+
/* Structure to store fdir fv entry */
struct ice_fdir_prof_info {
struct ice_parser_profile prof;
@@ -66,6 +106,12 @@ struct ice_vf_qs_bw {
u8 tc;
};
+/* Structure to store RSS field vector entry */
+struct ice_rss_prof_info {
+ struct ice_parser_profile prof;
+ bool symm;
+};
+
/* VF operations */
struct ice_vf_ops {
enum ice_disq_rst_src reset_type;
@@ -106,8 +152,9 @@ struct ice_vf {
u16 ctrl_vsi_idx;
struct ice_vf_fdir fdir;
struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
- /* first vector index of this VF in the PF space */
- int first_vector_idx;
+ struct ice_rss_prof_info rss_prof_info[ICE_MAX_PTGS];
+ struct ice_vf_hash_ctx hash_ctx;
+ u64 rss_hashcfg; /* RSS hash configuration */
struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
struct virtchnl_version_info vf_ver;
u32 driver_caps; /* reported by VF driver */
@@ -124,13 +171,22 @@ struct ice_vf {
u8 spoofchk:1;
u8 link_forced:1;
u8 link_up:1; /* only valid if VF link is forced */
+ u8 lldp_tx_ena:1;
+
+ u16 num_msix; /* num of MSI-X configured on this VF */
+
+ u32 ptp_caps;
+
unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */
unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
+ /* first vector index of this VF in the PF space */
+ int first_vector_idx;
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
unsigned long vf_caps; /* VF's adv. capabilities */
u8 num_req_qs; /* num of queue pairs requested by VF */
u16 num_mac;
+ u16 num_mac_lldp;
u16 num_vf_qs; /* num of queue configured per VF */
u8 vlan_strip_ena; /* Outer and Inner VLAN strip enable */
#define ICE_INNER_VLAN_STRIP_ENA BIT(0)
@@ -146,7 +202,9 @@ struct ice_vf {
/* devlink port data */
struct devlink_port devlink_port;
- u16 num_msix; /* num of MSI-X configured on this VF */
+ u16 lldp_recipe_id;
+ u16 lldp_rule_id;
+
struct ice_vf_qs_bw qs_bw[ICE_MAX_RSS_QS_PER_VF];
};
@@ -177,6 +235,11 @@ static inline u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf)
return vf->port_vlan_info.tpid;
}
+static inline bool ice_vf_is_lldp_ena(struct ice_vf *vf)
+{
+ return vf->num_mac_lldp && vf->trusted;
+}
+
/* VF Hash Table access functions
*
* These functions provide abstraction for interacting with the VF hash table.
@@ -224,6 +287,18 @@ static inline u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf)
#ifdef CONFIG_PCI_IOV
struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id);
+
+static inline struct ice_vf *ice_get_vf_by_dev(struct ice_pf *pf,
+ struct pci_dev *vf_dev)
+{
+ int vf_id = pci_iov_vf_id(vf_dev);
+
+ if (vf_id < 0)
+ return NULL;
+
+ return ice_get_vf_by_id(pf, pci_iov_vf_id(vf_dev));
+}
+
void ice_put_vf(struct ice_vf *vf);
bool ice_has_vfs(struct ice_pf *pf);
u16 ice_get_num_vfs(struct ice_pf *pf);
@@ -242,12 +317,20 @@ ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m);
int ice_reset_vf(struct ice_vf *vf, u32 flags);
void ice_reset_all_vfs(struct ice_pf *pf);
struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi);
+void ice_vf_update_mac_lldp_num(struct ice_vf *vf, struct ice_vsi *vsi,
+ bool incr);
#else /* CONFIG_PCI_IOV */
static inline struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
{
return NULL;
}
+static inline struct ice_vf *ice_get_vf_by_dev(struct ice_pf *pf,
+ struct pci_dev *vf_dev)
+{
+ return NULL;
+}
+
static inline void ice_put_vf(struct ice_vf *vf)
{
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
index 0c7e77c0a09f..5392b0404986 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
@@ -24,6 +24,7 @@
#endif
void ice_initialize_vf_entry(struct ice_vf *vf);
+void ice_deinitialize_vf_entry(struct ice_vf *vf);
void ice_dis_vf_qs(struct ice_vf *vf);
int ice_check_vf_init(struct ice_vf *vf);
enum virtchnl_status_code ice_err_to_virt_err(int err);
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
index 75c8113e58ee..7798a5d4bc9d 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
@@ -23,18 +23,18 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
u8 *msg, u16 msglen, struct ice_sq_cd *cd)
{
struct ice_aqc_pf_vf_msg *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
- cmd = &desc.params.virt;
+ cmd = libie_aq_raw(&desc);
cmd->id = cpu_to_le32(vfid);
desc.cookie_high = cpu_to_le32(v_opcode);
desc.cookie_low = cpu_to_le32(v_retval);
if (msglen)
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vlan_mode.c b/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
index 1279c1ffe31c..fb526cb84776 100644
--- a/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
+++ b/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
@@ -63,7 +63,7 @@ static int
ice_aq_get_vlan_mode(struct ice_hw *hw,
struct ice_aqc_get_vlan_mode *get_params)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
if (!get_params)
return -EINVAL;
@@ -275,7 +275,7 @@ ice_aq_set_vlan_mode(struct ice_hw *hw,
struct ice_aqc_set_vlan_mode *set_params)
{
u8 rdma_packet, mng_vlan_prot_id;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
if (!set_params)
return -EINVAL;
@@ -295,7 +295,7 @@ ice_aq_set_vlan_mode(struct ice_hw *hw,
ice_fill_dflt_direct_cmd_desc(&desc,
ice_aqc_opc_set_vlan_mode_parameters);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
return ice_aq_send_cmd(hw, &desc, set_params, sizeof(*set_params),
NULL);
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
index 5291f2888ef8..ada78f83b3ac 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
@@ -113,7 +113,7 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err) {
dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
goto out;
}
@@ -169,7 +169,7 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err) {
dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %s\n",
- ena, err, ice_aq_str(hw->adminq.sq_last_status));
+ ena, err, libie_aq_str(hw->adminq.sq_last_status));
goto out;
}
@@ -258,7 +258,7 @@ static int __ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, u16 pvid_info)
ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (ret) {
dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %s\n",
- ret, ice_aq_str(hw->adminq.sq_last_status));
+ ret, libie_aq_str(hw->adminq.sq_last_status));
goto out;
}
@@ -306,7 +306,7 @@ int ice_vsi_clear_inner_port_vlan(struct ice_vsi *vsi)
ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (ret)
dev_err(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %s\n",
- ret, ice_aq_str(hw->adminq.sq_last_status));
+ ret, libie_aq_str(hw->adminq.sq_last_status));
kfree(ctxt);
return ret;
@@ -353,7 +353,7 @@ static int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
if (status) {
netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %s\n",
ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
- ice_aq_str(pf->hw.adminq.sq_last_status));
+ libie_aq_str(pf->hw.adminq.sq_last_status));
goto err_out;
}
@@ -497,7 +497,7 @@ int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi, u16 tpid)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
dev_err(ice_pf_to_dev(vsi->back), "update VSI for enabling outer VLAN stripping failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
else
vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
@@ -544,7 +544,7 @@ int ice_vsi_dis_outer_stripping(struct ice_vsi *vsi)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
dev_err(ice_pf_to_dev(vsi->back), "update VSI for disabling outer VLAN stripping failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
else
vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
@@ -604,7 +604,7 @@ int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
dev_err(ice_pf_to_dev(vsi->back), "update VSI for enabling outer VLAN insertion failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
else
vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
@@ -654,7 +654,7 @@ int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
dev_err(ice_pf_to_dev(vsi->back), "update VSI for disabling outer VLAN insertion failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
else
vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
@@ -720,7 +720,7 @@ __ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, u16 vlan_info, u16 tpid)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err) {
dev_err(ice_pf_to_dev(vsi->back), "update VSI for setting outer port based VLAN failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
} else {
vsi->info.port_based_outer_vlan = ctxt->info.port_based_outer_vlan;
vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
@@ -782,7 +782,7 @@ int ice_vsi_clear_outer_port_vlan(struct ice_vsi *vsi)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
dev_err(ice_pf_to_dev(vsi->back), "update VSI for clearing outer port based VLAN failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
kfree(ctxt);
return err;
@@ -830,7 +830,7 @@ int ice_vsi_clear_port_vlan(struct ice_vsi *vsi)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err) {
dev_err(ice_pf_to_dev(vsi->back), "update VSI for clearing port based VLAN failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
} else {
vsi->info.port_based_outer_vlan =
ctxt->info.port_based_outer_vlan;
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 334ae945d640..989ff1fd9110 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -2,6 +2,8 @@
/* Copyright (c) 2019, Intel Corporation. */
#include <linux/bpf_trace.h>
+#include <linux/unroll.h>
+#include <net/libeth/xdp.h>
#include <net/xdp_sock_drv.h>
#include <net/xdp.h>
#include "ice.h"
@@ -18,52 +20,12 @@ static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
}
/**
- * ice_qp_reset_stats - Resets all stats for rings of given index
- * @vsi: VSI that contains rings of interest
- * @q_idx: ring index in array
- */
-static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
-{
- struct ice_vsi_stats *vsi_stat;
- struct ice_pf *pf;
-
- pf = vsi->back;
- if (!pf->vsi_stats)
- return;
-
- vsi_stat = pf->vsi_stats[vsi->idx];
- if (!vsi_stat)
- return;
-
- memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0,
- sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
- memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
- sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
- if (vsi->xdp_rings)
- memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0,
- sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));
-}
-
-/**
- * ice_qp_clean_rings - Cleans all the rings of a given index
- * @vsi: VSI that contains rings of interest
- * @q_idx: ring index in array
- */
-static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
-{
- ice_clean_tx_ring(vsi->tx_rings[q_idx]);
- if (vsi->xdp_rings)
- ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
- ice_clean_rx_ring(vsi->rx_rings[q_idx]);
-}
-
-/**
* ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector
* @vsi: VSI that has netdev
* @q_vector: q_vector that has NAPI context
* @enable: true for enable, false for disable
*/
-static void
+void
ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
bool enable)
{
@@ -82,7 +44,7 @@ ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
* @rx_ring: Rx ring that will have its IRQ disabled
* @q_vector: queue vector
*/
-static void
+void
ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
struct ice_q_vector *q_vector)
{
@@ -112,7 +74,7 @@ ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
* @q_vector: queue vector
* @qid: queue index
*/
-static void
+void
ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector, u16 qid)
{
u16 reg_idx = q_vector->reg_idx;
@@ -142,7 +104,7 @@ ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector, u16 qid)
* @vsi: the VSI that contains queue vector
* @q_vector: queue vector
*/
-static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
+void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
@@ -153,111 +115,6 @@ static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
}
/**
- * ice_qp_dis - Disables a queue pair
- * @vsi: VSI of interest
- * @q_idx: ring index in array
- *
- * Returns 0 on success, negative on failure.
- */
-static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
-{
- struct ice_txq_meta txq_meta = { };
- struct ice_q_vector *q_vector;
- struct ice_tx_ring *tx_ring;
- struct ice_rx_ring *rx_ring;
- int fail = 0;
- int err;
-
- if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
- return -EINVAL;
-
- tx_ring = vsi->tx_rings[q_idx];
- rx_ring = vsi->rx_rings[q_idx];
- q_vector = rx_ring->q_vector;
-
- synchronize_net();
- netif_carrier_off(vsi->netdev);
- netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
-
- ice_qvec_dis_irq(vsi, rx_ring, q_vector);
- ice_qvec_toggle_napi(vsi, q_vector, false);
-
- ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
- err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
- if (!fail)
- fail = err;
- if (vsi->xdp_rings) {
- struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
-
- memset(&txq_meta, 0, sizeof(txq_meta));
- ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
- err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
- &txq_meta);
- if (!fail)
- fail = err;
- }
-
- ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, false);
- ice_qp_clean_rings(vsi, q_idx);
- ice_qp_reset_stats(vsi, q_idx);
-
- return fail;
-}
-
-/**
- * ice_qp_ena - Enables a queue pair
- * @vsi: VSI of interest
- * @q_idx: ring index in array
- *
- * Returns 0 on success, negative on failure.
- */
-static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
-{
- struct ice_q_vector *q_vector;
- int fail = 0;
- bool link_up;
- int err;
-
- err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
- if (!fail)
- fail = err;
-
- if (ice_is_xdp_ena_vsi(vsi)) {
- struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
-
- err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
- if (!fail)
- fail = err;
- ice_set_ring_xdp(xdp_ring);
- ice_tx_xsk_pool(vsi, q_idx);
- }
-
- err = ice_vsi_cfg_single_rxq(vsi, q_idx);
- if (!fail)
- fail = err;
-
- q_vector = vsi->rx_rings[q_idx]->q_vector;
- ice_qvec_cfg_msix(vsi, q_vector, q_idx);
-
- err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
- if (!fail)
- fail = err;
-
- ice_qvec_toggle_napi(vsi, q_vector, true);
- ice_qvec_ena_irq(vsi, q_vector);
-
- /* make sure NAPI sees updated ice_{t,x}_ring::xsk_pool */
- synchronize_net();
- ice_get_link_status(vsi->port_info, &link_up);
- if (link_up) {
- netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
- netif_carrier_on(vsi->netdev);
- }
-
- return fail;
-}
-
-/**
* ice_xsk_pool_disable - disable a buffer pool region
* @vsi: Current VSI
* @qid: queue ID
@@ -313,50 +170,18 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
* If allocation was successful, substitute buffer with allocated one.
* Returns 0 on success, negative on failure
*/
-static int
+int
ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
{
- size_t elem_size = pool_present ? sizeof(*rx_ring->xdp_buf) :
- sizeof(*rx_ring->rx_buf);
- void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
-
- if (!sw_ring)
- return -ENOMEM;
-
if (pool_present) {
- kfree(rx_ring->rx_buf);
- rx_ring->rx_buf = NULL;
- rx_ring->xdp_buf = sw_ring;
+ rx_ring->xdp_buf = kcalloc(rx_ring->count,
+ sizeof(*rx_ring->xdp_buf),
+ GFP_KERNEL);
+ if (!rx_ring->xdp_buf)
+ return -ENOMEM;
} else {
kfree(rx_ring->xdp_buf);
rx_ring->xdp_buf = NULL;
- rx_ring->rx_buf = sw_ring;
- }
-
- return 0;
-}
-
-/**
- * ice_realloc_zc_buf - reallocate XDP ZC queue pairs
- * @vsi: Current VSI
- * @zc: is zero copy set
- *
- * Reallocate buffer for rx_rings that might be used by XSK.
- * XDP requires more memory, than rx_buf provides.
- * Returns 0 on success, negative on failure
- */
-int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
-{
- struct ice_rx_ring *rx_ring;
- uint i;
-
- ice_for_each_rxq(vsi, i) {
- rx_ring = vsi->rx_rings[i];
- if (!rx_ring->xsk_pool)
- continue;
-
- if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
- return -ENOMEM;
}
return 0;
@@ -372,6 +197,7 @@ int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
*/
int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
{
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];
bool if_running, pool_present = !!pool;
int ret = 0, pool_failure = 0;
@@ -385,8 +211,6 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
ice_is_xdp_ena_vsi(vsi);
if (if_running) {
- struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];
-
ret = ice_qp_dis(vsi, qid);
if (ret) {
netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
@@ -447,11 +271,6 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
rx_desc->read.pkt_addr = cpu_to_le64(dma);
rx_desc->wb.status_error0 = 0;
- /* Put private info that changes on a per-packet basis
- * into xdp_buff_xsk->cb.
- */
- ice_xdp_meta_set_desc(*xdp, rx_desc);
-
rx_desc++;
xdp++;
}
@@ -537,69 +356,6 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
}
/**
- * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
- * @rx_ring: Rx ring
- * @xdp: Pointer to XDP buffer
- *
- * This function allocates a new skb from a zero-copy Rx buffer.
- *
- * Returns the skb on success, NULL on failure.
- */
-static struct sk_buff *
-ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
-{
- unsigned int totalsize = xdp->data_end - xdp->data_meta;
- unsigned int metasize = xdp->data - xdp->data_meta;
- struct skb_shared_info *sinfo = NULL;
- struct sk_buff *skb;
- u32 nr_frags = 0;
-
- if (unlikely(xdp_buff_has_frags(xdp))) {
- sinfo = xdp_get_shared_info_from_buff(xdp);
- nr_frags = sinfo->nr_frags;
- }
- net_prefetch(xdp->data_meta);
-
- skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize);
- if (unlikely(!skb))
- return NULL;
-
- memcpy(__skb_put(skb, totalsize), xdp->data_meta,
- ALIGN(totalsize, sizeof(long)));
-
- if (metasize) {
- skb_metadata_set(skb, metasize);
- __skb_pull(skb, metasize);
- }
-
- if (likely(!xdp_buff_has_frags(xdp)))
- goto out;
-
- for (int i = 0; i < nr_frags; i++) {
- struct skb_shared_info *skinfo = skb_shinfo(skb);
- skb_frag_t *frag = &sinfo->frags[i];
- struct page *page;
- void *addr;
-
- page = dev_alloc_page();
- if (!page) {
- dev_kfree_skb(skb);
- return NULL;
- }
- addr = page_to_virt(page);
-
- memcpy(addr, skb_frag_page(frag), skb_frag_size(frag));
-
- __skb_fill_page_desc_noacc(skinfo, skinfo->nr_frags++,
- addr, 0, skb_frag_size(frag));
- }
-
-out:
- xsk_buff_free(xdp);
- return skb;
-}
-
-/**
* ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
* @xdp_ring: XDP Tx ring
* @xsk_pool: AF_XDP buffer pool pointer
@@ -801,35 +557,6 @@ out_failure:
return result;
}
-static int
-ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
- struct xdp_buff *xdp, const unsigned int size)
-{
- struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first);
-
- if (!size)
- return 0;
-
- if (!xdp_buff_has_frags(first)) {
- sinfo->nr_frags = 0;
- sinfo->xdp_frags_size = 0;
- xdp_buff_set_frags_flag(first);
- }
-
- if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
- xsk_buff_free(first);
- return -ENOMEM;
- }
-
- __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
- virt_to_page(xdp->data_hard_start),
- XDP_PACKET_HEADROOM, size);
- sinfo->xdp_frags_size += size;
- xsk_buff_add_frag(xdp);
-
- return 0;
-}
-
/**
* ice_clean_rx_irq_zc - consumes packets from the hardware ring
* @rx_ring: AF_XDP Rx ring
@@ -842,10 +569,10 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
struct xsk_buff_pool *xsk_pool,
int budget)
{
+ struct xdp_buff *first = (struct xdp_buff *)rx_ring->xsk;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u32 ntc = rx_ring->next_to_clean;
u32 ntu = rx_ring->next_to_use;
- struct xdp_buff *first = NULL;
struct ice_tx_ring *xdp_ring;
unsigned int xdp_xmit = 0;
struct bpf_prog *xdp_prog;
@@ -859,9 +586,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
xdp_ring = rx_ring->xdp_ring;
- if (ntc != rx_ring->first_desc)
- first = *ice_xdp_buf(rx_ring, rx_ring->first_desc);
-
while (likely(total_rx_packets < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
unsigned int size, xdp_res = 0;
@@ -895,16 +619,19 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
if (!first) {
first = xdp;
- } else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) {
- break;
+ } else if (likely(size) && !xsk_buff_add_frag(first, xdp)) {
+ xsk_buff_free(first);
+ first = NULL;
}
if (++ntc == cnt)
ntc = 0;
- if (ice_is_non_eop(rx_ring, rx_desc))
+ if (ice_is_non_eop(rx_ring, rx_desc) || unlikely(!first))
continue;
+ ((struct libeth_xdp_buff *)first)->desc = rx_desc;
+
xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring,
xsk_pool);
if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) {
@@ -912,7 +639,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
} else if (xdp_res == ICE_XDP_EXIT) {
failure = true;
first = NULL;
- rx_ring->first_desc = ntc;
break;
} else if (xdp_res == ICE_XDP_CONSUMED) {
xsk_buff_free(first);
@@ -924,24 +650,20 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
total_rx_packets++;
first = NULL;
- rx_ring->first_desc = ntc;
continue;
construct_skb:
/* XDP_PASS path */
- skb = ice_construct_skb_zc(rx_ring, first);
+ skb = xdp_build_skb_from_zc(first);
if (!skb) {
+ xsk_buff_free(first);
+ first = NULL;
+
rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
- break;
+ continue;
}
first = NULL;
- rx_ring->first_desc = ntc;
-
- if (eth_skb_pad(skb)) {
- skb = NULL;
- continue;
- }
total_rx_bytes += skb->len;
total_rx_packets++;
@@ -953,7 +675,9 @@ construct_skb:
}
rx_ring->next_to_clean = ntc;
- entries_to_alloc = ICE_RX_DESC_UNUSED(rx_ring);
+ rx_ring->xsk = (struct libeth_xdp_buff *)first;
+
+ entries_to_alloc = ICE_DESC_UNUSED(rx_ring);
if (entries_to_alloc > ICE_RING_QUARTER(rx_ring))
failure |= !ice_alloc_rx_bufs_zc(rx_ring, xsk_pool,
entries_to_alloc);
@@ -1017,7 +741,8 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring,
struct ice_tx_desc *tx_desc;
u32 i;
- loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
+ unrolled_count(PKTS_PER_BATCH)
+ for (i = 0; i < PKTS_PER_BATCH; i++) {
dma_addr_t dma;
dma = xsk_buff_raw_get_dma(xsk_pool, descs[i].addr);
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index 45adeb513253..5275fcedc9e1 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -7,14 +7,6 @@
#define PKTS_PER_BATCH 8
-#ifdef __clang__
-#define loop_unrolled_for _Pragma("clang loop unroll_count(8)") for
-#elif __GNUC__ >= 8
-#define loop_unrolled_for _Pragma("GCC unroll 8") for
-#else
-#define loop_unrolled_for for
-#endif
-
struct ice_vsi;
#ifdef CONFIG_XDP_SOCKETS
@@ -30,7 +22,14 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool);
-int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
+int ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present);
+void ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
+ u16 qid);
+void ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
+ bool enable);
+void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector);
+void ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
+ struct ice_q_vector *q_vector);
#else
static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
struct xsk_buff_pool __always_unused *xsk_pool)
@@ -78,10 +77,25 @@ static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { }
static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { }
static inline int
-ice_realloc_zc_buf(struct ice_vsi __always_unused *vsi,
- bool __always_unused zc)
+ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring,
+ bool __always_unused pool_present)
{
return 0;
}
+
+static inline void
+ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
+ u16 qid) { }
+
+static inline void
+ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
+ bool enable) { }
+
+static inline void
+ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector) { }
+
+static inline void
+ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
+ struct ice_q_vector *q_vector) { }
#endif /* CONFIG_XDP_SOCKETS */
#endif /* !_ICE_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/virt/allowlist.c
index c105a82ee136..a07efec19c45 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+++ b/drivers/net/ethernet/intel/ice/virt/allowlist.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021, Intel Corporation. */
-#include "ice_virtchnl_allowlist.h"
+#include "allowlist.h"
/* Purpose of this file is to share functionality to allowlist or denylist
* opcodes used in PF <-> VF communication. Group of opcodes:
@@ -65,7 +65,7 @@ static const u32 vlan_v2_allowlist_opcodes[] = {
/* VIRTCHNL_VF_OFFLOAD_RSS_PF */
static const u32 rss_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_CONFIG_RSS_KEY, VIRTCHNL_OP_CONFIG_RSS_LUT,
- VIRTCHNL_OP_GET_RSS_HENA_CAPS, VIRTCHNL_OP_SET_RSS_HENA,
+ VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, VIRTCHNL_OP_SET_RSS_HASHCFG,
VIRTCHNL_OP_CONFIG_RSS_HFUNC,
};
@@ -84,6 +84,12 @@ static const u32 fdir_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_ADD_FDIR_FILTER, VIRTCHNL_OP_DEL_FDIR_FILTER,
};
+/* VIRTCHNL_VF_CAP_PTP */
+static const u32 ptp_allowlist_opcodes[] = {
+ VIRTCHNL_OP_1588_PTP_GET_CAPS,
+ VIRTCHNL_OP_1588_PTP_GET_TIME,
+};
+
static const u32 tc_allowlist_opcodes[] = {
VIRTCHNL_OP_GET_QOS_CAPS, VIRTCHNL_OP_CONFIG_QUEUE_BW,
VIRTCHNL_OP_CONFIG_QUANTA,
@@ -110,6 +116,7 @@ static const struct allowlist_opcode_info allowlist_opcodes[] = {
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_FDIR_PF, fdir_pf_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_VLAN_V2, vlan_v2_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_QOS, tc_allowlist_opcodes),
+ ALLOW_ITEM(VIRTCHNL_VF_CAP_PTP, ptp_allowlist_opcodes),
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h b/drivers/net/ethernet/intel/ice/virt/allowlist.h
index d3ae86ded219..d3ae86ded219 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h
+++ b/drivers/net/ethernet/intel/ice/virt/allowlist.h
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/virt/fdir.c
index 14e3f0f89c78..ae83c3914e29 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/virt/fdir.c
@@ -832,21 +832,27 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf,
struct virtchnl_proto_hdrs *proto,
struct virtchnl_fdir_fltr_conf *conf)
{
- u8 *pkt_buf, *msk_buf __free(kfree);
+ u8 *pkt_buf, *msk_buf __free(kfree) = NULL;
struct ice_parser_result rslt;
struct ice_pf *pf = vf->pf;
+ u16 pkt_len, udp_port = 0;
struct ice_parser *psr;
int status = -ENOMEM;
struct ice_hw *hw;
- u16 udp_port = 0;
- pkt_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL);
- msk_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL);
+ pkt_len = proto->raw.pkt_len;
+
+ if (!pkt_len || pkt_len > VIRTCHNL_MAX_SIZE_RAW_PACKET)
+ return -EINVAL;
+
+ pkt_buf = kzalloc(pkt_len, GFP_KERNEL);
+ msk_buf = kzalloc(pkt_len, GFP_KERNEL);
+
if (!pkt_buf || !msk_buf)
goto err_mem_alloc;
- memcpy(pkt_buf, proto->raw.spec, proto->raw.pkt_len);
- memcpy(msk_buf, proto->raw.mask, proto->raw.pkt_len);
+ memcpy(pkt_buf, proto->raw.spec, pkt_len);
+ memcpy(msk_buf, proto->raw.mask, pkt_len);
hw = &pf->hw;
@@ -862,7 +868,7 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf,
if (ice_get_open_tunnel_port(hw, &udp_port, TNL_VXLAN))
ice_parser_vxlan_tunnel_set(psr, udp_port, true);
- status = ice_parser_run(psr, pkt_buf, proto->raw.pkt_len, &rslt);
+ status = ice_parser_run(psr, pkt_buf, pkt_len, &rslt);
if (status)
goto err_parser_destroy;
@@ -876,7 +882,7 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf,
}
status = ice_parser_profile_init(&rslt, pkt_buf, msk_buf,
- proto->raw.pkt_len, ICE_BLK_FD,
+ pkt_len, ICE_BLK_FD,
conf->prof);
if (status)
goto err_parser_profile_init;
@@ -885,7 +891,7 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf,
ice_parser_profile_dump(hw, conf->prof);
/* Store raw flow info into @conf */
- conf->pkt_len = proto->raw.pkt_len;
+ conf->pkt_len = pkt_len;
conf->pkt_buf = pkt_buf;
conf->parser_ena = true;
@@ -1444,7 +1450,8 @@ err_free_pkt:
*/
static void ice_vf_fdir_timer(struct timer_list *t)
{
- struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr);
+ struct ice_vf_fdir_ctx *ctx_irq = timer_container_of(ctx_irq, t,
+ rx_tmr);
struct ice_vf_fdir_ctx *ctx_done;
struct ice_vf_fdir *fdir;
unsigned long flags;
@@ -1515,7 +1522,7 @@ ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc));
spin_unlock_irqrestore(&fdir->ctx_lock, flags);
- ret = del_timer(&ctx_irq->rx_tmr);
+ ret = timer_delete(&ctx_irq->rx_tmr);
if (!ret)
dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id);
@@ -1910,7 +1917,7 @@ static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf)
struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq;
unsigned long flags;
- del_timer(&ctx->rx_tmr);
+ timer_delete(&ctx->rx_tmr);
spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
@@ -2091,6 +2098,11 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
pf = vf->pf;
dev = ice_pf_to_dev(pf);
vf_vsi = ice_get_vf_vsi(vf);
+ if (!vf_vsi) {
+ dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err_exit;
+ }
#define ICE_VF_MAX_FDIR_FILTERS 128
if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) ||
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h b/drivers/net/ethernet/intel/ice/virt/fdir.h
index ac6dcab454b4..ac6dcab454b4 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
+++ b/drivers/net/ethernet/intel/ice/virt/fdir.h
diff --git a/drivers/net/ethernet/intel/ice/virt/queues.c b/drivers/net/ethernet/intel/ice/virt/queues.c
new file mode 100644
index 000000000000..f73d5a3e83d4
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/virt/queues.c
@@ -0,0 +1,975 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022, Intel Corporation. */
+
+#include "virtchnl.h"
+#include "queues.h"
+#include "ice_vf_lib_private.h"
+#include "ice.h"
+#include "ice_base.h"
+#include "ice_lib.h"
+
+/**
+ * ice_vc_get_max_frame_size - get max frame size allowed for VF
+ * @vf: VF used to determine max frame size
+ *
+ * Max frame size is determined based on the current port's max frame size and
+ * whether a port VLAN is configured on this VF. The VF is not aware whether
+ * it's in a port VLAN so the PF needs to account for this in max frame size
+ * checks and sending the max frame size to the VF.
+ */
+u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
+{
+ struct ice_port_info *pi = ice_vf_get_port_info(vf);
+ u16 max_frame_size;
+
+ max_frame_size = pi->phy.link_info.max_frame_size;
+
+ if (ice_vf_is_port_vlan_ena(vf))
+ max_frame_size -= VLAN_HLEN;
+
+ return max_frame_size;
+}
+
+/**
+ * ice_vc_isvalid_q_id
+ * @vsi: VSI to check queue ID against
+ * @qid: VSI relative queue ID
+ *
+ * check for the valid queue ID
+ */
+static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u16 qid)
+{
+ /* allocated Tx and Rx queues should be always equal for VF VSI */
+ return qid < vsi->alloc_txq;
+}
+
+/**
+ * ice_vc_isvalid_ring_len
+ * @ring_len: length of ring
+ *
+ * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
+ * or zero
+ */
+static bool ice_vc_isvalid_ring_len(u16 ring_len)
+{
+ return ring_len == 0 ||
+ (ring_len >= ICE_MIN_NUM_DESC &&
+ ring_len <= ICE_MAX_NUM_DESC_E810 &&
+ !(ring_len % ICE_REQ_DESC_MULTIPLE));
+}
+
+/**
+ * ice_vf_cfg_qs_bw - Configure per queue bandwidth
+ * @vf: pointer to the VF info
+ * @num_queues: number of queues to be configured
+ *
+ * Configure per queue bandwidth.
+ *
+ * Return: 0 on success or negative error value.
+ */
+static int ice_vf_cfg_qs_bw(struct ice_vf *vf, u16 num_queues)
+{
+ struct ice_hw *hw = &vf->pf->hw;
+ struct ice_vsi *vsi;
+ int ret;
+ u16 i;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ return -EINVAL;
+
+ for (i = 0; i < num_queues; i++) {
+ u32 p_rate, min_rate;
+ u8 tc;
+
+ p_rate = vf->qs_bw[i].peak;
+ min_rate = vf->qs_bw[i].committed;
+ tc = vf->qs_bw[i].tc;
+ if (p_rate)
+ ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
+ vf->qs_bw[i].queue_id,
+ ICE_MAX_BW, p_rate);
+ else
+ ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
+ vf->qs_bw[i].queue_id,
+ ICE_MAX_BW);
+ if (ret)
+ return ret;
+
+ if (min_rate)
+ ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
+ vf->qs_bw[i].queue_id,
+ ICE_MIN_BW, min_rate);
+ else
+ ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
+ vf->qs_bw[i].queue_id,
+ ICE_MIN_BW);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vf_cfg_q_quanta_profile - Configure quanta profile
+ * @vf: pointer to the VF info
+ * @quanta_prof_idx: pointer to the quanta profile index
+ * @quanta_size: quanta size to be set
+ *
+ * This function chooses available quanta profile and configures the register.
+ * The quanta profile is evenly divided by the number of device ports, and then
+ * available to the specific PF and VFs. The first profile for each PF is a
+ * reserved default profile. Only quanta size of the rest unused profile can be
+ * modified.
+ *
+ * Return: 0 on success or negative error value.
+ */
+static int ice_vf_cfg_q_quanta_profile(struct ice_vf *vf, u16 quanta_size,
+ u16 *quanta_prof_idx)
+{
+ const u16 n_desc = calc_quanta_desc(quanta_size);
+ struct ice_hw *hw = &vf->pf->hw;
+ const u16 n_cmd = 2 * n_desc;
+ struct ice_pf *pf = vf->pf;
+ u16 per_pf, begin_id;
+ u8 n_used;
+ u32 reg;
+
+ begin_id = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) / hw->dev_caps.num_funcs *
+ hw->logical_pf_id;
+
+ if (quanta_size == ICE_DFLT_QUANTA) {
+ *quanta_prof_idx = begin_id;
+ } else {
+ per_pf = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) /
+ hw->dev_caps.num_funcs;
+ n_used = pf->num_quanta_prof_used;
+ if (n_used < per_pf) {
+ *quanta_prof_idx = begin_id + 1 + n_used;
+ pf->num_quanta_prof_used++;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ reg = FIELD_PREP(GLCOMM_QUANTA_PROF_QUANTA_SIZE_M, quanta_size) |
+ FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_CMD_M, n_cmd) |
+ FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_DESC_M, n_desc);
+ wr32(hw, GLCOMM_QUANTA_PROF(*quanta_prof_idx), reg);
+
+ return 0;
+}
+
+/**
+ * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
+ * @vqs: virtchnl_queue_select structure containing bitmaps to validate
+ *
+ * Return true on successful validation, else false
+ */
+static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
+{
+ if ((!vqs->rx_queues && !vqs->tx_queues) ||
+ vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
+ vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
+ * @vsi: VSI of the VF to configure
+ * @q_idx: VF queue index used to determine the queue in the PF's space
+ */
+void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 pfq = vsi->txq_map[q_idx];
+ u32 reg;
+
+ reg = rd32(hw, QINT_TQCTL(pfq));
+
+ /* MSI-X index 0 in the VF's space is always for the OICR, which means
+ * this is most likely a poll mode VF driver, so don't enable an
+ * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
+ */
+ if (!(reg & QINT_TQCTL_MSIX_INDX_M))
+ return;
+
+ wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
+}
+
+/**
+ * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
+ * @vsi: VSI of the VF to configure
+ * @q_idx: VF queue index used to determine the queue in the PF's space
+ */
+void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 pfq = vsi->rxq_map[q_idx];
+ u32 reg;
+
+ reg = rd32(hw, QINT_RQCTL(pfq));
+
+ /* MSI-X index 0 in the VF's space is always for the OICR, which means
+ * this is most likely a poll mode VF driver, so don't enable an
+ * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
+ */
+ if (!(reg & QINT_RQCTL_MSIX_INDX_M))
+ return;
+
+ wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
+}
+
+/**
+ * ice_vc_ena_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to enable all or specific queue(s)
+ */
+int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ struct ice_vsi *vsi;
+ unsigned long q_map;
+ u16 vf_q_id;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_validate_vqs_bitmaps(vqs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Enable only Rx rings, Tx rings were enabled by the FW when the
+ * Tx queue group list was configured and the context bits were
+ * programmed using ice_vsi_cfg_txqs
+ */
+ q_map = vqs->rx_queues;
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if enabled */
+ if (test_bit(vf_q_id, vf->rxq_ena))
+ continue;
+
+ if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
+ vf_q_id, vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
+ set_bit(vf_q_id, vf->rxq_ena);
+ }
+
+ q_map = vqs->tx_queues;
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if enabled */
+ if (test_bit(vf_q_id, vf->txq_ena))
+ continue;
+
+ ice_vf_ena_txq_interrupt(vsi, vf_q_id);
+ set_bit(vf_q_id, vf->txq_ena);
+ }
+
+ /* Set flag to indicate that queues are enabled */
+ if (v_ret == VIRTCHNL_STATUS_SUCCESS)
+ set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vf_vsi_dis_single_txq - disable a single Tx queue
+ * @vf: VF to disable queue for
+ * @vsi: VSI for the VF
+ * @q_id: VF relative (0-based) queue ID
+ *
+ * Attempt to disable the Tx queue passed in. If the Tx queue was successfully
+ * disabled then clear q_id bit in the enabled queues bitmap and return
+ * success. Otherwise return error.
+ */
+int ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
+{
+ struct ice_txq_meta txq_meta = { 0 };
+ struct ice_tx_ring *ring;
+ int err;
+
+ if (!test_bit(q_id, vf->txq_ena))
+ dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
+ q_id, vsi->vsi_num);
+
+ ring = vsi->tx_rings[q_id];
+ if (!ring)
+ return -EINVAL;
+
+ ice_fill_txq_meta(vsi, ring, &txq_meta);
+
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta);
+ if (err) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
+ q_id, vsi->vsi_num);
+ return err;
+ }
+
+ /* Clear enabled queues flag */
+ clear_bit(q_id, vf->txq_ena);
+
+ return 0;
+}
+
+/**
+ * ice_vc_dis_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to disable all or specific queue(s)
+ */
+int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ struct ice_vsi *vsi;
+ unsigned long q_map;
+ u16 vf_q_id;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_validate_vqs_bitmaps(vqs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vqs->tx_queues) {
+ q_map = vqs->tx_queues;
+
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+ }
+ }
+
+ q_map = vqs->rx_queues;
+ /* speed up Rx queue disable by batching them if possible */
+ if (q_map &&
+ bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
+ if (ice_vsi_stop_all_rx_rings(vsi)) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
+ vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
+ } else if (q_map) {
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if not enabled */
+ if (!test_bit(vf_q_id, vf->rxq_ena))
+ continue;
+
+ if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
+ true)) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
+ vf_q_id, vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Clear enabled queues flag */
+ clear_bit(vf_q_id, vf->rxq_ena);
+ }
+ }
+
+ /* Clear enabled queues flag */
+ if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
+ clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_cfg_interrupt
+ * @vf: pointer to the VF info
+ * @vsi: the VSI being configured
+ * @map: vector map for mapping vectors to queues
+ * @q_vector: structure for interrupt vector
+ * configure the IRQ to queue map
+ */
+static enum virtchnl_status_code
+ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct virtchnl_vector_map *map,
+ struct ice_q_vector *q_vector)
+{
+ u16 vsi_q_id, vsi_q_id_idx;
+ unsigned long qmap;
+
+ q_vector->num_ring_rx = 0;
+ q_vector->num_ring_tx = 0;
+
+ qmap = map->rxq_map;
+ for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
+ vsi_q_id = vsi_q_id_idx;
+
+ if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
+ return VIRTCHNL_STATUS_ERR_PARAM;
+
+ q_vector->num_ring_rx++;
+ q_vector->rx.itr_idx = map->rxitr_idx;
+ vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
+ ice_cfg_rxq_interrupt(vsi, vsi_q_id,
+ q_vector->vf_reg_idx,
+ q_vector->rx.itr_idx);
+ }
+
+ qmap = map->txq_map;
+ for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
+ vsi_q_id = vsi_q_id_idx;
+
+ if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
+ return VIRTCHNL_STATUS_ERR_PARAM;
+
+ q_vector->num_ring_tx++;
+ q_vector->tx.itr_idx = map->txitr_idx;
+ vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
+ ice_cfg_txq_interrupt(vsi, vsi_q_id,
+ q_vector->vf_reg_idx,
+ q_vector->tx.itr_idx);
+ }
+
+ return VIRTCHNL_STATUS_SUCCESS;
+}
+
+/**
+ * ice_vc_cfg_irq_map_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the IRQ to queue map
+ */
+int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ u16 num_q_vectors_mapped, vsi_id, vector_id;
+ struct virtchnl_irq_map_info *irqmap_info;
+ struct virtchnl_vector_map *map;
+ struct ice_vsi *vsi;
+ int i;
+
+ irqmap_info = (struct virtchnl_irq_map_info *)msg;
+ num_q_vectors_mapped = irqmap_info->num_vectors;
+
+ /* Check to make sure number of VF vectors mapped is not greater than
+ * number of VF vectors originally allocated, and check that
+ * there is actually at least a single VF queue vector mapped
+ */
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+ vf->num_msix < num_q_vectors_mapped ||
+ !num_q_vectors_mapped) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < num_q_vectors_mapped; i++) {
+ struct ice_q_vector *q_vector;
+
+ map = &irqmap_info->vecmap[i];
+
+ vector_id = map->vector_id;
+ vsi_id = map->vsi_id;
+ /* vector_id is always 0-based for each VF, and can never be
+ * larger than or equal to the max allowed interrupts per VF
+ */
+ if (!(vector_id < vf->num_msix) ||
+ !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
+ (!vector_id && (map->rxq_map || map->txq_map))) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* No need to map VF miscellaneous or rogue vector */
+ if (!vector_id)
+ continue;
+
+ /* Subtract non queue vector from vector_id passed by VF
+ * to get actual number of VSI queue vector array index
+ */
+ q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
+ if (!q_vector) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* lookout for the invalid queue index */
+ v_ret = ice_cfg_interrupt(vf, vsi, map, q_vector);
+ if (v_ret)
+ goto error_param;
+ }
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_q_bw - Configure per queue bandwidth
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer which holds the command descriptor
+ *
+ * Configure VF queues bandwidth.
+ *
+ * Return: 0 on success or negative error value.
+ */
+int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_queues_bw_cfg *qbw =
+ (struct virtchnl_queues_bw_cfg *)msg;
+ struct ice_vsi *vsi;
+ u16 i;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+ !ice_vc_isvalid_vsi_id(vf, qbw->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (qbw->num_queues > ICE_MAX_RSS_QS_PER_VF ||
+ qbw->num_queues > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
+ dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
+ vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ for (i = 0; i < qbw->num_queues; i++) {
+ if (qbw->cfg[i].shaper.peak != 0 && vf->max_tx_rate != 0 &&
+ qbw->cfg[i].shaper.peak > vf->max_tx_rate) {
+ dev_warn(ice_pf_to_dev(vf->pf), "The maximum queue %d rate limit configuration may not take effect because the maximum TX rate for VF-%d is %d\n",
+ qbw->cfg[i].queue_id, vf->vf_id,
+ vf->max_tx_rate);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+ if (qbw->cfg[i].shaper.committed != 0 && vf->min_tx_rate != 0 &&
+ qbw->cfg[i].shaper.committed < vf->min_tx_rate) {
+ dev_warn(ice_pf_to_dev(vf->pf), "The minimum queue %d rate limit configuration may not take effect because the minimum TX rate for VF-%d is %d\n",
+ qbw->cfg[i].queue_id, vf->vf_id,
+ vf->min_tx_rate);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+ if (qbw->cfg[i].queue_id > vf->num_vf_qs) {
+ dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure invalid queue_id\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+ if (qbw->cfg[i].tc >= ICE_MAX_TRAFFIC_CLASS) {
+ dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure a traffic class higher than allowed\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+ }
+
+ for (i = 0; i < qbw->num_queues; i++) {
+ vf->qs_bw[i].queue_id = qbw->cfg[i].queue_id;
+ vf->qs_bw[i].peak = qbw->cfg[i].shaper.peak;
+ vf->qs_bw[i].committed = qbw->cfg[i].shaper.committed;
+ vf->qs_bw[i].tc = qbw->cfg[i].tc;
+ }
+
+ if (ice_vf_cfg_qs_bw(vf, qbw->num_queues))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+
+err:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUEUE_BW,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_q_quanta - Configure per queue quanta
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer which holds the command descriptor
+ *
+ * Configure VF queues quanta.
+ *
+ * Return: 0 on success or negative error value.
+ */
+int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg)
+{
+ u16 quanta_prof_id, quanta_size, start_qid, num_queues, end_qid, i;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_quanta_cfg *qquanta =
+ (struct virtchnl_quanta_cfg *)msg;
+ struct ice_vsi *vsi;
+ int ret;
+
+ start_qid = qquanta->queue_select.start_queue_id;
+ num_queues = qquanta->queue_select.num_queues;
+
+ if (check_add_overflow(start_qid, num_queues, &end_qid)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (end_qid > ICE_MAX_RSS_QS_PER_VF ||
+ end_qid > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
+ dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
+ vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ quanta_size = qquanta->quanta_size;
+ if (quanta_size > ICE_MAX_QUANTA_SIZE ||
+ quanta_size < ICE_MIN_QUANTA_SIZE) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (quanta_size % 64) {
+ dev_err(ice_pf_to_dev(vf->pf), "quanta size should be the product of 64\n");
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ ret = ice_vf_cfg_q_quanta_profile(vf, quanta_size,
+ &quanta_prof_id);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ goto err;
+ }
+
+ for (i = start_qid; i < end_qid; i++)
+ vsi->tx_rings[i]->quanta_prof_id = quanta_prof_id;
+
+err:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUANTA,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the Rx/Tx queues
+ */
+int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_vsi_queue_config_info *qci =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ struct virtchnl_queue_pair_info *qpi;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ int i = -1, q_idx;
+ bool ena_ts;
+ u8 act_prt;
+
+ mutex_lock(&pf->lag_mutex);
+ act_prt = ice_lag_prepare_vf_reset(pf->lag);
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ goto error_param;
+
+ if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id))
+ goto error_param;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ goto error_param;
+
+ if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
+ qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
+ dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
+ vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
+ goto error_param;
+ }
+
+ for (i = 0; i < qci->num_queue_pairs; i++) {
+ if (!qci->qpair[i].rxq.crc_disable)
+ continue;
+
+ if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC) ||
+ vf->vlan_strip_ena)
+ goto error_param;
+ }
+
+ for (i = 0; i < qci->num_queue_pairs; i++) {
+ qpi = &qci->qpair[i];
+ if (qpi->txq.vsi_id != qci->vsi_id ||
+ qpi->rxq.vsi_id != qci->vsi_id ||
+ qpi->rxq.queue_id != qpi->txq.queue_id ||
+ qpi->txq.headwb_enabled ||
+ !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
+ !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
+ !ice_vc_isvalid_q_id(vsi, qpi->txq.queue_id)) {
+ goto error_param;
+ }
+
+ q_idx = qpi->rxq.queue_id;
+
+ /* make sure selected "q_idx" is in valid range of queues
+ * for selected "vsi"
+ */
+ if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
+ goto error_param;
+ }
+
+ /* copy Tx queue info from VF into VSI */
+ if (qpi->txq.ring_len > 0) {
+ vsi->tx_rings[q_idx]->dma = qpi->txq.dma_ring_addr;
+ vsi->tx_rings[q_idx]->count = qpi->txq.ring_len;
+
+ /* Disable any existing queue first */
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx))
+ goto error_param;
+
+ /* Configure a queue with the requested settings */
+ if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
+ dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
+ vf->vf_id, q_idx);
+ goto error_param;
+ }
+ }
+
+ /* copy Rx queue info from VF into VSI */
+ if (qpi->rxq.ring_len > 0) {
+ u16 max_frame_size = ice_vc_get_max_frame_size(vf);
+ struct ice_rx_ring *ring = vsi->rx_rings[q_idx];
+ u32 rxdid;
+
+ ring->dma = qpi->rxq.dma_ring_addr;
+ ring->count = qpi->rxq.ring_len;
+
+ if (qpi->rxq.crc_disable)
+ ring->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
+ else
+ ring->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
+
+ if (qpi->rxq.databuffer_size != 0 &&
+ (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
+ qpi->rxq.databuffer_size < 1024))
+ goto error_param;
+
+ ring->rx_buf_len = qpi->rxq.databuffer_size;
+
+ if (qpi->rxq.max_pkt_size > max_frame_size ||
+ qpi->rxq.max_pkt_size < 64)
+ goto error_param;
+
+ vsi->max_frame = qpi->rxq.max_pkt_size;
+ /* add space for the port VLAN since the VF driver is
+ * not expected to account for it in the MTU
+ * calculation
+ */
+ if (ice_vf_is_port_vlan_ena(vf))
+ vsi->max_frame += VLAN_HLEN;
+
+ if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
+ dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
+ vf->vf_id, q_idx);
+ goto error_param;
+ }
+
+ /* If Rx flex desc is supported, select RXDID for Rx
+ * queues. Otherwise, use legacy 32byte descriptor
+ * format. Legacy 16byte descriptor is not supported.
+ * If this RXDID is selected, return error.
+ */
+ if (vf->driver_caps &
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
+ rxdid = qpi->rxq.rxdid;
+ if (!(BIT(rxdid) & pf->supported_rxdids))
+ goto error_param;
+ } else {
+ rxdid = ICE_RXDID_LEGACY_1;
+ }
+
+ ena_ts = ((vf->driver_caps &
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) &&
+ (vf->driver_caps & VIRTCHNL_VF_CAP_PTP) &&
+ (qpi->rxq.flags & VIRTCHNL_PTP_RX_TSTAMP));
+
+ ice_write_qrxflxp_cntxt(&vsi->back->hw,
+ vsi->rxq_map[q_idx], rxdid,
+ ICE_RXDID_PRIO, ena_ts);
+ }
+ }
+
+ ice_lag_complete_vf_reset(pf->lag, act_prt);
+ mutex_unlock(&pf->lag_mutex);
+
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ VIRTCHNL_STATUS_SUCCESS, NULL, 0);
+error_param:
+ /* disable whatever we can */
+ for (; i >= 0; i--) {
+ if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true))
+ dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n",
+ vf->vf_id, i);
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, i))
+ dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n",
+ vf->vf_id, i);
+ }
+
+ ice_lag_complete_vf_reset(pf->lag, act_prt);
+ mutex_unlock(&pf->lag_mutex);
+
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
+}
+
+/**
+ * ice_vc_request_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * VFs get a default number of queues but can use this message to request a
+ * different number. If the request is successful, PF will reset the VF and
+ * return 0. If unsuccessful, PF will send message informing VF of number of
+ * available queue pairs via virtchnl message response to VF.
+ */
+int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vf_res_request *vfres =
+ (struct virtchnl_vf_res_request *)msg;
+ u16 req_queues = vfres->num_queue_pairs;
+ struct ice_pf *pf = vf->pf;
+ u16 max_allowed_vf_queues;
+ u16 tx_rx_queue_left;
+ struct device *dev;
+ u16 cur_queues;
+
+ dev = ice_pf_to_dev(pf);
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ cur_queues = vf->num_vf_qs;
+ tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
+ ice_get_avail_rxq_count(pf));
+ max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
+ if (!req_queues) {
+ dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
+ vf->vf_id);
+ } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
+ dev_err(dev, "VF %d tried to request more than %d queues.\n",
+ vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
+ vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
+ } else if (req_queues > cur_queues &&
+ req_queues - cur_queues > tx_rx_queue_left) {
+ dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
+ vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
+ vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
+ ICE_MAX_RSS_QS_PER_VF);
+ } else {
+ /* request is successful, then reset VF */
+ vf->num_req_qs = req_queues;
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
+ dev_info(dev, "VF %d granted request of %u queues.\n",
+ vf->vf_id, req_queues);
+ return 0;
+ }
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
+ v_ret, (u8 *)vfres, sizeof(*vfres));
+}
+
diff --git a/drivers/net/ethernet/intel/ice/virt/queues.h b/drivers/net/ethernet/intel/ice/virt/queues.h
new file mode 100644
index 000000000000..c4a792cecea1
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/virt/queues.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2022, Intel Corporation. */
+
+#ifndef _ICE_VIRT_QUEUES_H_
+#define _ICE_VIRT_QUEUES_H_
+
+#include <linux/types.h>
+
+struct ice_vf;
+
+u16 ice_vc_get_max_frame_size(struct ice_vf *vf);
+int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg);
+int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg);
+int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg);
+int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg);
+int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg);
+int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg);
+int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg);
+
+#endif /* _ICE_VIRT_QUEUES_H_ */
diff --git a/drivers/net/ethernet/intel/ice/virt/rss.c b/drivers/net/ethernet/intel/ice/virt/rss.c
new file mode 100644
index 000000000000..085e69ec0cfc
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/virt/rss.c
@@ -0,0 +1,1922 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022, Intel Corporation. */
+
+#include "rss.h"
+#include "ice_vf_lib_private.h"
+#include "ice.h"
+
+#define FIELD_SELECTOR(proto_hdr_field) \
+ BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
+
+struct ice_vc_hdr_match_type {
+ u32 vc_hdr; /* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
+ u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */
+};
+
+static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = {
+ {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
+ {VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH},
+ {VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN},
+ {VIRTCHNL_PROTO_HDR_C_VLAN, ICE_FLOW_SEG_HDR_VLAN},
+ {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER},
+ {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER},
+ {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
+ {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
+ {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
+ {VIRTCHNL_PROTO_HDR_PPPOE, ICE_FLOW_SEG_HDR_PPPOE},
+ {VIRTCHNL_PROTO_HDR_GTPU_IP, ICE_FLOW_SEG_HDR_GTPU_IP},
+ {VIRTCHNL_PROTO_HDR_GTPU_EH, ICE_FLOW_SEG_HDR_GTPU_EH},
+ {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
+ ICE_FLOW_SEG_HDR_GTPU_DWN},
+ {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
+ ICE_FLOW_SEG_HDR_GTPU_UP},
+ {VIRTCHNL_PROTO_HDR_L2TPV3, ICE_FLOW_SEG_HDR_L2TPV3},
+ {VIRTCHNL_PROTO_HDR_ESP, ICE_FLOW_SEG_HDR_ESP},
+ {VIRTCHNL_PROTO_HDR_AH, ICE_FLOW_SEG_HDR_AH},
+ {VIRTCHNL_PROTO_HDR_PFCP, ICE_FLOW_SEG_HDR_PFCP_SESSION},
+ {VIRTCHNL_PROTO_HDR_GTPC, ICE_FLOW_SEG_HDR_GTPC},
+ {VIRTCHNL_PROTO_HDR_L2TPV2, ICE_FLOW_SEG_HDR_L2TPV2},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG, ICE_FLOW_SEG_HDR_IPV_FRAG},
+ {VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG, ICE_FLOW_SEG_HDR_IPV_FRAG},
+ {VIRTCHNL_PROTO_HDR_GRE, ICE_FLOW_SEG_HDR_GRE},
+};
+
+struct ice_vc_hash_field_match_type {
+ u32 vc_hdr; /* virtchnl headers
+ * (VIRTCHNL_PROTO_HDR_XXX)
+ */
+ u32 vc_hash_field; /* virtchnl hash fields selector
+ * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
+ */
+ u64 ice_hash_field; /* ice hash fields
+ * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
+ */
+};
+
+static const struct
+ice_vc_hash_field_match_type ice_vc_hash_field_list[] = {
+ {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
+ {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
+ {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
+ ICE_FLOW_HASH_ETH},
+ {VIRTCHNL_PROTO_HDR_ETH,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
+ {VIRTCHNL_PROTO_HDR_S_VLAN,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
+ {VIRTCHNL_PROTO_HDR_C_VLAN,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
+ ICE_FLOW_HASH_IPV4},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID)},
+ {VIRTCHNL_PROTO_HDR_IPV4,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
+ ICE_FLOW_HASH_IPV4},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
+ ICE_FLOW_HASH_IPV6},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_ID)},
+ {VIRTCHNL_PROTO_HDR_IPV6,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST),
+ ICE_FLOW_HASH_IPV6_PRE64},
+ {VIRTCHNL_PROTO_HDR_IPV6,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA)},
+ {VIRTCHNL_PROTO_HDR_IPV6,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA)},
+ {VIRTCHNL_PROTO_HDR_IPV6,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ ICE_FLOW_HASH_IPV6_PRE64 |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
+ ICE_FLOW_HASH_TCP_PORT},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM),
+ ICE_FLOW_HASH_TCP_PORT |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
+ ICE_FLOW_HASH_UDP_PORT},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM),
+ ICE_FLOW_HASH_UDP_PORT |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
+ ICE_FLOW_HASH_SCTP_PORT},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM),
+ ICE_FLOW_HASH_SCTP_PORT |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_PPPOE,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
+ {VIRTCHNL_PROTO_HDR_GTPU_IP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
+ {VIRTCHNL_PROTO_HDR_L2TPV3,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
+ {VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
+ {VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
+ {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
+ {VIRTCHNL_PROTO_HDR_GTPC,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPC_TEID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID)},
+ {VIRTCHNL_PROTO_HDR_L2TPV2,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV2_SESS_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID)},
+ {VIRTCHNL_PROTO_HDR_L2TPV2,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV2_LEN_SESS_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID)},
+};
+
+static int
+ice_vc_rss_hash_update(struct ice_hw *hw, struct ice_vsi *vsi, u8 hash_type)
+{
+ struct ice_vsi_ctx *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ /* clear previous hash_type */
+ ctx->info.q_opt_rss = vsi->info.q_opt_rss &
+ ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
+ /* hash_type is passed in as ICE_AQ_VSI_Q_OPT_RSS_<XOR|TPLZ|SYM_TPLZ */
+ ctx->info.q_opt_rss |= FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M,
+ hash_type);
+
+ /* Preserve existing queueing option setting */
+ ctx->info.q_opt_tc = vsi->info.q_opt_tc;
+ ctx->info.q_opt_flags = vsi->info.q_opt_flags;
+
+ ctx->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
+
+ ret = ice_update_vsi(hw, vsi->idx, ctx, NULL);
+ if (ret) {
+ dev_err(ice_hw_to_dev(hw), "update VSI for RSS failed, err %d aq_err %s\n",
+ ret, libie_aq_str(hw->adminq.sq_last_status));
+ } else {
+ vsi->info.q_opt_rss = ctx->info.q_opt_rss;
+ }
+
+ kfree(ctx);
+
+ return ret;
+}
+
+/**
+ * ice_vc_validate_pattern
+ * @vf: pointer to the VF info
+ * @proto: virtchnl protocol headers
+ *
+ * validate the pattern is supported or not.
+ *
+ * Return: true on success, false on error.
+ */
+bool
+ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto)
+{
+ bool is_ipv4 = false;
+ bool is_ipv6 = false;
+ bool is_udp = false;
+ u16 ptype = -1;
+ int i = 0;
+
+ while (i < proto->count &&
+ proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) {
+ switch (proto->proto_hdr[i].type) {
+ case VIRTCHNL_PROTO_HDR_ETH:
+ ptype = ICE_PTYPE_MAC_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_IPV4:
+ ptype = ICE_PTYPE_IPV4_PAY;
+ is_ipv4 = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_IPV6:
+ ptype = ICE_PTYPE_IPV6_PAY;
+ is_ipv6 = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_UDP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_UDP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_UDP_PAY;
+ is_udp = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_TCP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_TCP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_TCP_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_SCTP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_SCTP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_SCTP_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_GTPU_IP:
+ case VIRTCHNL_PROTO_HDR_GTPU_EH:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_GTPU;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_GTPU;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_L2TPV3:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_L2TPV3;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_L2TPV3;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_ESP:
+ if (is_ipv4)
+ ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP :
+ ICE_MAC_IPV4_ESP;
+ else if (is_ipv6)
+ ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP :
+ ICE_MAC_IPV6_ESP;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_AH:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_AH;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_AH;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_PFCP:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_PFCP_SESSION;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_PFCP_SESSION;
+ goto out;
+ default:
+ break;
+ }
+ i++;
+ }
+
+out:
+ return ice_hw_ptype_ena(&vf->pf->hw, ptype);
+}
+
+/**
+ * ice_vc_parse_rss_cfg - parses hash fields and headers from
+ * a specific virtchnl RSS cfg
+ * @hw: pointer to the hardware
+ * @rss_cfg: pointer to the virtchnl RSS cfg
+ * @hash_cfg: pointer to the HW hash configuration
+ *
+ * Return true if all the protocol header and hash fields in the RSS cfg could
+ * be parsed, else return false
+ *
+ * This function parses the virtchnl RSS cfg to be the intended
+ * hash fields and the intended header for RSS configuration
+ */
+static bool ice_vc_parse_rss_cfg(struct ice_hw *hw,
+ struct virtchnl_rss_cfg *rss_cfg,
+ struct ice_rss_hash_cfg *hash_cfg)
+{
+ const struct ice_vc_hash_field_match_type *hf_list;
+ const struct ice_vc_hdr_match_type *hdr_list;
+ int i, hf_list_len, hdr_list_len;
+ bool outer_ipv4 = false;
+ bool outer_ipv6 = false;
+ bool inner_hdr = false;
+ bool has_gre = false;
+
+ u32 *addl_hdrs = &hash_cfg->addl_hdrs;
+ u64 *hash_flds = &hash_cfg->hash_flds;
+
+ /* set outer layer RSS as default */
+ hash_cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
+
+ if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
+ hash_cfg->symm = true;
+ else
+ hash_cfg->symm = false;
+
+ hf_list = ice_vc_hash_field_list;
+ hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
+ hdr_list = ice_vc_hdr_list;
+ hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list);
+
+ for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
+ struct virtchnl_proto_hdr *proto_hdr =
+ &rss_cfg->proto_hdrs.proto_hdr[i];
+ u32 hdr_found = 0;
+ int j;
+
+ /* Find matched ice headers according to virtchnl headers.
+ * Also figure out the outer type of GTPU headers.
+ */
+ for (j = 0; j < hdr_list_len; j++) {
+ struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
+
+ if (proto_hdr->type == hdr_map.vc_hdr)
+ hdr_found = hdr_map.ice_hdr;
+ }
+
+ if (!hdr_found)
+ return false;
+
+ /* Find matched ice hash fields according to
+ * virtchnl hash fields.
+ */
+ for (j = 0; j < hf_list_len; j++) {
+ struct ice_vc_hash_field_match_type hf_map = hf_list[j];
+
+ if (proto_hdr->type == hf_map.vc_hdr &&
+ proto_hdr->field_selector == hf_map.vc_hash_field) {
+ *hash_flds |= hf_map.ice_hash_field;
+ break;
+ }
+ }
+
+ if (proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV4 && !inner_hdr)
+ outer_ipv4 = true;
+ else if (proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV6 &&
+ !inner_hdr)
+ outer_ipv6 = true;
+ /* for GRE and L2TPv2, take inner header as input set if no
+ * any field is selected from outer headers.
+ * for GTPU, take inner header and GTPU teid as input set.
+ */
+ else if ((proto_hdr->type == VIRTCHNL_PROTO_HDR_GTPU_IP ||
+ proto_hdr->type == VIRTCHNL_PROTO_HDR_GTPU_EH ||
+ proto_hdr->type == VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN ||
+ proto_hdr->type ==
+ VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP) ||
+ ((proto_hdr->type == VIRTCHNL_PROTO_HDR_L2TPV2 ||
+ proto_hdr->type == VIRTCHNL_PROTO_HDR_GRE) &&
+ *hash_flds == 0)) {
+ /* set inner_hdr flag, and clean up outer header */
+ inner_hdr = true;
+
+ /* clear outer headers */
+ *addl_hdrs = 0;
+
+ if (outer_ipv4 && outer_ipv6)
+ return false;
+
+ if (outer_ipv4)
+ hash_cfg->hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
+ else if (outer_ipv6)
+ hash_cfg->hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
+ else
+ hash_cfg->hdr_type = ICE_RSS_INNER_HEADERS;
+
+ if (has_gre && outer_ipv4)
+ hash_cfg->hdr_type =
+ ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE;
+ if (has_gre && outer_ipv6)
+ hash_cfg->hdr_type =
+ ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE;
+
+ if (proto_hdr->type == VIRTCHNL_PROTO_HDR_GRE)
+ has_gre = true;
+ }
+
+ *addl_hdrs |= hdr_found;
+
+ /* refine hash hdrs and fields for IP fragment */
+ if (VIRTCHNL_TEST_PROTO_HDR_FIELD(proto_hdr,
+ VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID) &&
+ proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV4_FRAG) {
+ *addl_hdrs |= ICE_FLOW_SEG_HDR_IPV_FRAG;
+ *addl_hdrs &= ~(ICE_FLOW_SEG_HDR_IPV_OTHER);
+ *hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID);
+ VIRTCHNL_DEL_PROTO_HDR_FIELD(proto_hdr,
+ VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID);
+ }
+ if (VIRTCHNL_TEST_PROTO_HDR_FIELD(proto_hdr,
+ VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID) &&
+ proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG) {
+ *addl_hdrs |= ICE_FLOW_SEG_HDR_IPV_FRAG;
+ *addl_hdrs &= ~(ICE_FLOW_SEG_HDR_IPV_OTHER);
+ *hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_ID);
+ VIRTCHNL_DEL_PROTO_HDR_FIELD(proto_hdr,
+ VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID);
+ }
+ }
+
+ /* refine gtpu header if we take outer as input set for a no inner
+ * ip gtpu flow.
+ */
+ if (hash_cfg->hdr_type == ICE_RSS_OUTER_HEADERS &&
+ *addl_hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
+ *addl_hdrs &= ~(ICE_FLOW_SEG_HDR_GTPU_IP);
+ *addl_hdrs |= ICE_FLOW_SEG_HDR_GTPU_NON_IP;
+ }
+
+ /* refine hash field for esp and nat-t-esp. */
+ if ((*addl_hdrs & ICE_FLOW_SEG_HDR_UDP) &&
+ (*addl_hdrs & ICE_FLOW_SEG_HDR_ESP)) {
+ *addl_hdrs &= ~(ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_UDP);
+ *addl_hdrs |= ICE_FLOW_SEG_HDR_NAT_T_ESP;
+ *hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI));
+ *hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI);
+ }
+
+ /* refine hash hdrs for L4 udp/tcp/sctp. */
+ if (*addl_hdrs & (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_SCTP) &&
+ *addl_hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)
+ *addl_hdrs &= ~ICE_FLOW_SEG_HDR_IPV_OTHER;
+
+ return true;
+}
+
+/**
+ * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
+ * RSS offloads
+ * @caps: VF driver negotiated capabilities
+ *
+ * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
+ * else return false
+ */
+static bool ice_vf_adv_rss_offload_ena(u32 caps)
+{
+ return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
+}
+
+/**
+ * ice_is_hash_cfg_valid - Check whether an RSS hash context is valid
+ * @cfg: RSS hash configuration to test
+ *
+ * Return: true if both @cfg->hash_flds and @cfg->addl_hdrs are non-zero; false otherwise.
+ */
+static bool ice_is_hash_cfg_valid(struct ice_rss_hash_cfg *cfg)
+{
+ return cfg->hash_flds && cfg->addl_hdrs;
+}
+
+/**
+ * ice_hash_cfg_reset - Reset an RSS hash context
+ * @cfg: RSS hash configuration to reset
+ *
+ * Reset fields of @cfg that store the active rule information.
+ */
+static void ice_hash_cfg_reset(struct ice_rss_hash_cfg *cfg)
+{
+ cfg->hash_flds = 0;
+ cfg->addl_hdrs = 0;
+ cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
+ cfg->symm = 0;
+}
+
+/**
+ * ice_hash_cfg_record - Record an RSS hash context
+ * @ctx: destination (global) RSS hash configuration
+ * @cfg: source RSS hash configuration to record
+ *
+ * Copy the active rule information from @cfg into @ctx.
+ */
+static void ice_hash_cfg_record(struct ice_rss_hash_cfg *ctx,
+ struct ice_rss_hash_cfg *cfg)
+{
+ ctx->hash_flds = cfg->hash_flds;
+ ctx->addl_hdrs = cfg->addl_hdrs;
+ ctx->hdr_type = cfg->hdr_type;
+ ctx->symm = cfg->symm;
+}
+
+/**
+ * ice_hash_moveout - Delete an RSS configuration (keep context)
+ * @vf: VF pointer
+ * @cfg: RSS hash configuration
+ *
+ * Return: 0 on success (including when already absent); -ENOENT if @cfg is
+ * invalid or VSI is missing; -EBUSY on hardware removal failure.
+ */
+static int
+ice_hash_moveout(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_hw *hw = &vf->pf->hw;
+ int ret;
+
+ if (!ice_is_hash_cfg_valid(cfg) || !vsi)
+ return -ENOENT;
+
+ ret = ice_rem_rss_cfg(hw, vsi->idx, cfg);
+ if (ret && ret != -ENOENT) {
+ dev_err(dev, "ice_rem_rss_cfg failed for VF %d, VSI %d, error:%d\n",
+ vf->vf_id, vf->lan_vsi_idx, ret);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_hash_moveback - Add an RSS hash configuration for a VF
+ * @vf: VF pointer
+ * @cfg: RSS hash configuration to apply
+ *
+ * Add @cfg to @vf if the context is valid and VSI exists; programs HW.
+ *
+ * Return:
+ * * 0 on success
+ * * -ENOENT if @cfg is invalid or VSI is missing
+ * * -EBUSY if hardware programming fails
+ */
+static int
+ice_hash_moveback(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_hw *hw = &vf->pf->hw;
+ int ret;
+
+ if (!ice_is_hash_cfg_valid(cfg) || !vsi)
+ return -ENOENT;
+
+ ret = ice_add_rss_cfg(hw, vsi, cfg);
+ if (ret) {
+ dev_err(dev, "ice_add_rss_cfg failed for VF %d, VSI %d, error:%d\n",
+ vf->vf_id, vf->lan_vsi_idx, ret);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_hash_remove - remove a RSS configuration
+ * @vf: pointer to the VF info
+ * @cfg: pointer to the RSS hash configuration
+ *
+ * This function will delete a RSS hash configuration and also delete the
+ * hash context which stores the rule info.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int
+ice_hash_remove(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
+{
+ int ret;
+
+ ret = ice_hash_moveout(vf, cfg);
+ if (ret && ret != -ENOENT)
+ return ret;
+
+ ice_hash_cfg_reset(cfg);
+
+ return 0;
+}
+
+struct ice_gtpu_ctx_action {
+ u32 ctx_idx;
+ const u32 *remove_list;
+ int remove_count;
+ const u32 *moveout_list;
+ int moveout_count;
+};
+
+/**
+ * ice_add_rss_cfg_pre_gtpu - Pre-process the GTPU RSS configuration
+ * @vf: pointer to the VF info
+ * @ctx: pointer to the context of the GTPU hash
+ * @ctx_idx: index of the hash context
+ *
+ * Pre-processes the GTPU hash configuration before adding a new
+ * hash context. It removes or reorders existing hash configurations that may
+ * conflict with the new one. For example, if a GTPU_UP or GTPU_DWN rule is
+ * configured after a GTPU_EH rule, the GTPU_EH hash will be matched first due
+ * to TCAM write and match order (top-down). In such cases, the GTPU_EH rule
+ * must be moved after the GTPU_UP/DWN rule. Conversely, if a GTPU_EH rule is
+ * configured after a GTPU_UP/DWN rule, the UP/DWN rules should be removed to
+ * avoid conflict.
+ *
+ * Return: 0 on success or a negative error code on failure
+ */
+static int ice_add_rss_cfg_pre_gtpu(struct ice_vf *vf,
+ struct ice_vf_hash_gtpu_ctx *ctx,
+ u32 ctx_idx)
+{
+ int ret, i;
+
+ static const u32 remove_eh_ip[] = {
+ ICE_HASH_GTPU_CTX_EH_IP_UDP, ICE_HASH_GTPU_CTX_EH_IP_TCP,
+ ICE_HASH_GTPU_CTX_UP_IP, ICE_HASH_GTPU_CTX_UP_IP_UDP,
+ ICE_HASH_GTPU_CTX_UP_IP_TCP, ICE_HASH_GTPU_CTX_DW_IP,
+ ICE_HASH_GTPU_CTX_DW_IP_UDP, ICE_HASH_GTPU_CTX_DW_IP_TCP,
+ };
+
+ static const u32 remove_eh_ip_udp[] = {
+ ICE_HASH_GTPU_CTX_UP_IP_UDP,
+ ICE_HASH_GTPU_CTX_DW_IP_UDP,
+ };
+ static const u32 moveout_eh_ip_udp[] = {
+ ICE_HASH_GTPU_CTX_UP_IP,
+ ICE_HASH_GTPU_CTX_UP_IP_TCP,
+ ICE_HASH_GTPU_CTX_DW_IP,
+ ICE_HASH_GTPU_CTX_DW_IP_TCP,
+ };
+
+ static const u32 remove_eh_ip_tcp[] = {
+ ICE_HASH_GTPU_CTX_UP_IP_TCP,
+ ICE_HASH_GTPU_CTX_DW_IP_TCP,
+ };
+ static const u32 moveout_eh_ip_tcp[] = {
+ ICE_HASH_GTPU_CTX_UP_IP,
+ ICE_HASH_GTPU_CTX_UP_IP_UDP,
+ ICE_HASH_GTPU_CTX_DW_IP,
+ ICE_HASH_GTPU_CTX_DW_IP_UDP,
+ };
+
+ static const u32 remove_up_ip[] = {
+ ICE_HASH_GTPU_CTX_UP_IP_UDP,
+ ICE_HASH_GTPU_CTX_UP_IP_TCP,
+ };
+ static const u32 moveout_up_ip[] = {
+ ICE_HASH_GTPU_CTX_EH_IP,
+ ICE_HASH_GTPU_CTX_EH_IP_UDP,
+ ICE_HASH_GTPU_CTX_EH_IP_TCP,
+ };
+
+ static const u32 moveout_up_ip_udp_tcp[] = {
+ ICE_HASH_GTPU_CTX_EH_IP,
+ ICE_HASH_GTPU_CTX_EH_IP_UDP,
+ ICE_HASH_GTPU_CTX_EH_IP_TCP,
+ };
+
+ static const u32 remove_dw_ip[] = {
+ ICE_HASH_GTPU_CTX_DW_IP_UDP,
+ ICE_HASH_GTPU_CTX_DW_IP_TCP,
+ };
+ static const u32 moveout_dw_ip[] = {
+ ICE_HASH_GTPU_CTX_EH_IP,
+ ICE_HASH_GTPU_CTX_EH_IP_UDP,
+ ICE_HASH_GTPU_CTX_EH_IP_TCP,
+ };
+
+ static const struct ice_gtpu_ctx_action actions[] = {
+ { ICE_HASH_GTPU_CTX_EH_IP, remove_eh_ip,
+ ARRAY_SIZE(remove_eh_ip), NULL, 0 },
+ { ICE_HASH_GTPU_CTX_EH_IP_UDP, remove_eh_ip_udp,
+ ARRAY_SIZE(remove_eh_ip_udp), moveout_eh_ip_udp,
+ ARRAY_SIZE(moveout_eh_ip_udp) },
+ { ICE_HASH_GTPU_CTX_EH_IP_TCP, remove_eh_ip_tcp,
+ ARRAY_SIZE(remove_eh_ip_tcp), moveout_eh_ip_tcp,
+ ARRAY_SIZE(moveout_eh_ip_tcp) },
+ { ICE_HASH_GTPU_CTX_UP_IP, remove_up_ip,
+ ARRAY_SIZE(remove_up_ip), moveout_up_ip,
+ ARRAY_SIZE(moveout_up_ip) },
+ { ICE_HASH_GTPU_CTX_UP_IP_UDP, NULL, 0, moveout_up_ip_udp_tcp,
+ ARRAY_SIZE(moveout_up_ip_udp_tcp) },
+ { ICE_HASH_GTPU_CTX_UP_IP_TCP, NULL, 0, moveout_up_ip_udp_tcp,
+ ARRAY_SIZE(moveout_up_ip_udp_tcp) },
+ { ICE_HASH_GTPU_CTX_DW_IP, remove_dw_ip,
+ ARRAY_SIZE(remove_dw_ip), moveout_dw_ip,
+ ARRAY_SIZE(moveout_dw_ip) },
+ { ICE_HASH_GTPU_CTX_DW_IP_UDP, NULL, 0, moveout_dw_ip,
+ ARRAY_SIZE(moveout_dw_ip) },
+ { ICE_HASH_GTPU_CTX_DW_IP_TCP, NULL, 0, moveout_dw_ip,
+ ARRAY_SIZE(moveout_dw_ip) },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(actions); i++) {
+ if (actions[i].ctx_idx != ctx_idx)
+ continue;
+
+ if (actions[i].remove_list) {
+ for (int j = 0; j < actions[i].remove_count; j++) {
+ u16 rm = actions[i].remove_list[j];
+
+ ret = ice_hash_remove(vf, &ctx->ctx[rm]);
+ if (ret && ret != -ENOENT)
+ return ret;
+ }
+ }
+
+ if (actions[i].moveout_list) {
+ for (int j = 0; j < actions[i].moveout_count; j++) {
+ u16 mv = actions[i].moveout_list[j];
+
+ ret = ice_hash_moveout(vf, &ctx->ctx[mv]);
+ if (ret && ret != -ENOENT)
+ return ret;
+ }
+ }
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_add_rss_cfg_pre_ip - Pre-process IP-layer RSS configuration
+ * @vf: VF pointer
+ * @ctx: IP L4 hash context (ESP/UDP-ESP/AH/PFCP and UDP/TCP/SCTP)
+ *
+ * Remove covered/recorded IP RSS configurations prior to adding a new one.
+ *
+ * Return: 0 on success; negative error code on failure.
+ */
+static int
+ice_add_rss_cfg_pre_ip(struct ice_vf *vf, struct ice_vf_hash_ip_ctx *ctx)
+{
+ int i, ret;
+
+ for (i = 1; i < ICE_HASH_IP_CTX_MAX; i++)
+ if (ice_is_hash_cfg_valid(&ctx->ctx[i])) {
+ ret = ice_hash_remove(vf, &ctx->ctx[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_calc_gtpu_ctx_idx - Calculate GTPU hash context index
+ * @hdrs: Bitmask of protocol headers prefixed with ICE_FLOW_SEG_HDR_*
+ *
+ * Determine the GTPU hash context index based on the combination of
+ * encapsulation headers (GTPU_EH, GTPU_UP, GTPU_DWN) and transport
+ * protocols (UDP, TCP) within IPv4 or IPv6 flows.
+ *
+ * Return: A valid context index (0-8) if the header combination is supported,
+ * or ICE_HASH_GTPU_CTX_MAX if the combination is invalid.
+ */
+static enum ice_hash_gtpu_ctx_type ice_calc_gtpu_ctx_idx(u32 hdrs)
+{
+ u32 eh_idx, ip_idx;
+
+ if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH)
+ eh_idx = 0;
+ else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP)
+ eh_idx = 1;
+ else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN)
+ eh_idx = 2;
+ else
+ return ICE_HASH_GTPU_CTX_MAX;
+
+ ip_idx = 0;
+ if (hdrs & ICE_FLOW_SEG_HDR_UDP)
+ ip_idx = 1;
+ else if (hdrs & ICE_FLOW_SEG_HDR_TCP)
+ ip_idx = 2;
+
+ if (hdrs & (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6))
+ return eh_idx * 3 + ip_idx;
+ else
+ return ICE_HASH_GTPU_CTX_MAX;
+}
+
+/**
+ * ice_map_ip_ctx_idx - map the index of the IP L4 hash context
+ * @hdrs: protocol headers prefix with ICE_FLOW_SEG_HDR_XXX.
+ *
+ * The IP L4 hash context use the index to classify for IPv4/IPv6 with
+ * ESP/UDP_ESP/AH/PFCP and non-tunnel UDP/TCP/SCTP
+ * this function map the index based on the protocol headers.
+ *
+ * Return: The mapped IP context index on success, or ICE_HASH_IP_CTX_MAX
+ * if no matching context is found.
+ */
+static u8 ice_map_ip_ctx_idx(u32 hdrs)
+{
+ u8 i;
+
+ static struct {
+ u32 hdrs;
+ u8 ctx_idx;
+ } ip_ctx_idx_map[] = {
+ { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER |
+ ICE_FLOW_SEG_HDR_ESP,
+ ICE_HASH_IP_CTX_IP_ESP },
+ { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER |
+ ICE_FLOW_SEG_HDR_NAT_T_ESP,
+ ICE_HASH_IP_CTX_IP_UDP_ESP },
+ { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER |
+ ICE_FLOW_SEG_HDR_AH,
+ ICE_HASH_IP_CTX_IP_AH },
+ { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER |
+ ICE_FLOW_SEG_HDR_PFCP_SESSION,
+ ICE_HASH_IP_CTX_IP_PFCP },
+ { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
+ ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_UDP,
+ ICE_HASH_IP_CTX_IP_UDP },
+ { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
+ ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_TCP,
+ ICE_HASH_IP_CTX_IP_TCP },
+ { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
+ ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_SCTP,
+ ICE_HASH_IP_CTX_IP_SCTP },
+ { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
+ ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER,
+ ICE_HASH_IP_CTX_IP },
+ { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER |
+ ICE_FLOW_SEG_HDR_ESP,
+ ICE_HASH_IP_CTX_IP_ESP },
+ { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER |
+ ICE_FLOW_SEG_HDR_NAT_T_ESP,
+ ICE_HASH_IP_CTX_IP_UDP_ESP },
+ { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER |
+ ICE_FLOW_SEG_HDR_AH,
+ ICE_HASH_IP_CTX_IP_AH },
+ { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER |
+ ICE_FLOW_SEG_HDR_PFCP_SESSION,
+ ICE_HASH_IP_CTX_IP_PFCP },
+ { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
+ ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_UDP,
+ ICE_HASH_IP_CTX_IP_UDP },
+ { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
+ ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_TCP,
+ ICE_HASH_IP_CTX_IP_TCP },
+ { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
+ ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_SCTP,
+ ICE_HASH_IP_CTX_IP_SCTP },
+ { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
+ ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER,
+ ICE_HASH_IP_CTX_IP },
+ /* the remaining mappings are used for default RSS */
+ { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_UDP,
+ ICE_HASH_IP_CTX_IP_UDP },
+ { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_TCP,
+ ICE_HASH_IP_CTX_IP_TCP },
+ { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_SCTP,
+ ICE_HASH_IP_CTX_IP_SCTP },
+ { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER,
+ ICE_HASH_IP_CTX_IP },
+ { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_UDP,
+ ICE_HASH_IP_CTX_IP_UDP },
+ { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_TCP,
+ ICE_HASH_IP_CTX_IP_TCP },
+ { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_SCTP,
+ ICE_HASH_IP_CTX_IP_SCTP },
+ { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER,
+ ICE_HASH_IP_CTX_IP },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(ip_ctx_idx_map); i++) {
+ if (hdrs == ip_ctx_idx_map[i].hdrs)
+ return ip_ctx_idx_map[i].ctx_idx;
+ }
+
+ return ICE_HASH_IP_CTX_MAX;
+}
+
+/**
+ * ice_add_rss_cfg_pre - Prepare RSS configuration context for a VF
+ * @vf: pointer to the VF structure
+ * @cfg: pointer to the RSS hash configuration
+ *
+ * Prepare the RSS hash context for a given VF based on the additional
+ * protocol headers specified in @cfg. This includes pre-configuration
+ * for IP and GTPU-based flows.
+ *
+ * If the configuration matches a known IP context, the function sets up
+ * the appropriate IP hash context. If the configuration includes GTPU
+ * headers, it prepares the GTPU-specific context accordingly.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int
+ice_add_rss_cfg_pre(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
+{
+ u32 ice_gtpu_ctx_idx = ice_calc_gtpu_ctx_idx(cfg->addl_hdrs);
+ u8 ip_ctx_idx = ice_map_ip_ctx_idx(cfg->addl_hdrs);
+
+ if (ip_ctx_idx == ICE_HASH_IP_CTX_IP) {
+ int ret = 0;
+
+ if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4)
+ ret = ice_add_rss_cfg_pre_ip(vf, &vf->hash_ctx.v4);
+ else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6)
+ ret = ice_add_rss_cfg_pre_ip(vf, &vf->hash_ctx.v6);
+
+ if (ret)
+ return ret;
+ }
+
+ if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) {
+ return ice_add_rss_cfg_pre_gtpu(vf, &vf->hash_ctx.ipv4,
+ ice_gtpu_ctx_idx);
+ } else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) {
+ return ice_add_rss_cfg_pre_gtpu(vf, &vf->hash_ctx.ipv6,
+ ice_gtpu_ctx_idx);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_add_rss_cfg_post_gtpu - Post-process GTPU RSS configuration
+ * @vf: pointer to the VF info
+ * @ctx: pointer to the context of the GTPU hash
+ * @cfg: pointer to the RSS hash configuration
+ * @ctx_idx: index of the hash context
+ *
+ * Post-processes the GTPU hash configuration after a new hash
+ * context has been successfully added. It updates the context with the new
+ * configuration and restores any previously removed hash contexts that need
+ * to be re-applied. This ensures proper TCAM rule ordering and avoids
+ * conflicts between overlapping GTPU rules.
+ *
+ * Return: 0 on success or a negative error code on failure
+ */
+static int ice_add_rss_cfg_post_gtpu(struct ice_vf *vf,
+ struct ice_vf_hash_gtpu_ctx *ctx,
+ struct ice_rss_hash_cfg *cfg, u32 ctx_idx)
+{
+ /* GTPU hash moveback lookup table indexed by context ID.
+ * Each entry is a bitmap indicating which contexts need moveback
+ * operations when the corresponding context index is processed.
+ */
+ static const unsigned long
+ ice_gtpu_moveback_tbl[ICE_HASH_GTPU_CTX_MAX] = {
+ [ICE_HASH_GTPU_CTX_EH_IP] = 0,
+ [ICE_HASH_GTPU_CTX_EH_IP_UDP] =
+ BIT(ICE_HASH_GTPU_CTX_UP_IP) |
+ BIT(ICE_HASH_GTPU_CTX_UP_IP_TCP) |
+ BIT(ICE_HASH_GTPU_CTX_DW_IP) |
+ BIT(ICE_HASH_GTPU_CTX_DW_IP_TCP),
+ [ICE_HASH_GTPU_CTX_EH_IP_TCP] =
+ BIT(ICE_HASH_GTPU_CTX_UP_IP) |
+ BIT(ICE_HASH_GTPU_CTX_UP_IP_UDP) |
+ BIT(ICE_HASH_GTPU_CTX_DW_IP) |
+ BIT(ICE_HASH_GTPU_CTX_DW_IP_UDP),
+ [ICE_HASH_GTPU_CTX_UP_IP] =
+ BIT(ICE_HASH_GTPU_CTX_EH_IP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP),
+ [ICE_HASH_GTPU_CTX_UP_IP_UDP] =
+ BIT(ICE_HASH_GTPU_CTX_EH_IP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP),
+ [ICE_HASH_GTPU_CTX_UP_IP_TCP] =
+ BIT(ICE_HASH_GTPU_CTX_EH_IP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP),
+ [ICE_HASH_GTPU_CTX_DW_IP] =
+ BIT(ICE_HASH_GTPU_CTX_EH_IP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP),
+ [ICE_HASH_GTPU_CTX_DW_IP_UDP] =
+ BIT(ICE_HASH_GTPU_CTX_EH_IP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP),
+ [ICE_HASH_GTPU_CTX_DW_IP_TCP] =
+ BIT(ICE_HASH_GTPU_CTX_EH_IP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP),
+ };
+ unsigned long moveback_mask;
+ int ret;
+ int i;
+
+ if (unlikely(ctx_idx >= ICE_HASH_GTPU_CTX_MAX))
+ return 0;
+
+ ctx->ctx[ctx_idx].addl_hdrs = cfg->addl_hdrs;
+ ctx->ctx[ctx_idx].hash_flds = cfg->hash_flds;
+ ctx->ctx[ctx_idx].hdr_type = cfg->hdr_type;
+ ctx->ctx[ctx_idx].symm = cfg->symm;
+
+ moveback_mask = ice_gtpu_moveback_tbl[ctx_idx];
+ for_each_set_bit(i, &moveback_mask, ICE_HASH_GTPU_CTX_MAX) {
+ ret = ice_hash_moveback(vf, &ctx->ctx[i]);
+ if (ret && ret != -ENOENT)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+ice_add_rss_cfg_post(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
+{
+ u32 ice_gtpu_ctx_idx = ice_calc_gtpu_ctx_idx(cfg->addl_hdrs);
+ u8 ip_ctx_idx = ice_map_ip_ctx_idx(cfg->addl_hdrs);
+
+ if (ip_ctx_idx && ip_ctx_idx < ICE_HASH_IP_CTX_MAX) {
+ if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4)
+ ice_hash_cfg_record(&vf->hash_ctx.v4.ctx[ip_ctx_idx], cfg);
+ else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6)
+ ice_hash_cfg_record(&vf->hash_ctx.v6.ctx[ip_ctx_idx], cfg);
+ }
+
+ if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) {
+ return ice_add_rss_cfg_post_gtpu(vf, &vf->hash_ctx.ipv4,
+ cfg, ice_gtpu_ctx_idx);
+ } else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) {
+ return ice_add_rss_cfg_post_gtpu(vf, &vf->hash_ctx.ipv6,
+ cfg, ice_gtpu_ctx_idx);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_rem_rss_cfg_post - post-process the RSS configuration
+ * @vf: pointer to the VF info
+ * @cfg: pointer to the RSS hash configuration
+ *
+ * Post process the RSS hash configuration after deleting a hash
+ * config. Such as, it will reset the hash context for the GTPU hash.
+ */
+static void
+ice_rem_rss_cfg_post(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
+{
+ u32 ice_gtpu_ctx_idx = ice_calc_gtpu_ctx_idx(cfg->addl_hdrs);
+ u8 ip_ctx_idx = ice_map_ip_ctx_idx(cfg->addl_hdrs);
+
+ if (ip_ctx_idx && ip_ctx_idx < ICE_HASH_IP_CTX_MAX) {
+ if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4)
+ ice_hash_cfg_reset(&vf->hash_ctx.v4.ctx[ip_ctx_idx]);
+ else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6)
+ ice_hash_cfg_reset(&vf->hash_ctx.v6.ctx[ip_ctx_idx]);
+ }
+
+ if (ice_gtpu_ctx_idx >= ICE_HASH_GTPU_CTX_MAX)
+ return;
+
+ if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4)
+ ice_hash_cfg_reset(&vf->hash_ctx.ipv4.ctx[ice_gtpu_ctx_idx]);
+ else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6)
+ ice_hash_cfg_reset(&vf->hash_ctx.ipv6.ctx[ice_gtpu_ctx_idx]);
+}
+
+/**
+ * ice_rem_rss_cfg_wrap - Wrapper for deleting an RSS configuration
+ * @vf: pointer to the VF info
+ * @cfg: pointer to the RSS hash configuration
+ *
+ * Wrapper function to delete a flow profile base on an RSS configuration,
+ * and also post process the hash context base on the rollback mechanism
+ * which handle some rules conflict by ice_add_rss_cfg_wrap.
+ *
+ * Return: 0 on success; negative error code on failure.
+ */
+static int
+ice_rem_rss_cfg_wrap(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_hw *hw = &vf->pf->hw;
+ int ret;
+
+ ret = ice_rem_rss_cfg(hw, vsi->idx, cfg);
+ /* We just ignore -ENOENT, because if two configurations share the same
+ * profile remove one of them actually removes both, since the
+ * profile is deleted.
+ */
+ if (ret && ret != -ENOENT) {
+ dev_err(dev, "ice_rem_rss_cfg failed for VF %d, VSI %d, error:%d\n",
+ vf->vf_id, vf->lan_vsi_idx, ret);
+ return ret;
+ }
+
+ ice_rem_rss_cfg_post(vf, cfg);
+
+ return 0;
+}
+
+/**
+ * ice_add_rss_cfg_wrap - Wrapper for adding an RSS configuration
+ * @vf: pointer to the VF info
+ * @cfg: pointer to the RSS hash configuration
+ *
+ * Add a flow profile based on an RSS configuration. Use a rollback
+ * mechanism to handle rule conflicts due to TCAM
+ * write sequence from top to down.
+ *
+ * Return: 0 on success; negative error code on failure.
+ */
+static int
+ice_add_rss_cfg_wrap(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_hw *hw = &vf->pf->hw;
+ int ret;
+
+ if (ice_add_rss_cfg_pre(vf, cfg))
+ return -EINVAL;
+
+ ret = ice_add_rss_cfg(hw, vsi, cfg);
+ if (ret) {
+ dev_err(dev, "ice_add_rss_cfg failed for VF %d, VSI %d, error:%d\n",
+ vf->vf_id, vf->lan_vsi_idx, ret);
+ return ret;
+ }
+
+ if (ice_add_rss_cfg_post(vf, cfg))
+ ret = -EINVAL;
+
+ return ret;
+}
+
+/**
+ * ice_parse_raw_rss_pattern - Parse raw pattern spec and mask for RSS
+ * @vf: pointer to the VF info
+ * @proto: pointer to the virtchnl protocol header
+ * @raw_cfg: pointer to the RSS raw pattern configuration
+ *
+ * Parser function to get spec and mask from virtchnl message, and parse
+ * them to get the corresponding profile and offset. The profile is used
+ * to add RSS configuration.
+ *
+ * Return: 0 on success; negative error code on failure.
+ */
+static int
+ice_parse_raw_rss_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto,
+ struct ice_rss_raw_cfg *raw_cfg)
+{
+ struct ice_parser_result pkt_parsed;
+ struct ice_hw *hw = &vf->pf->hw;
+ struct ice_parser_profile prof;
+ struct ice_parser *psr;
+ u8 *pkt_buf, *msk_buf;
+ u16 pkt_len;
+ int ret = 0;
+
+ pkt_len = proto->raw.pkt_len;
+ if (!pkt_len)
+ return -EINVAL;
+ if (pkt_len > VIRTCHNL_MAX_SIZE_RAW_PACKET)
+ pkt_len = VIRTCHNL_MAX_SIZE_RAW_PACKET;
+
+ pkt_buf = kzalloc(pkt_len, GFP_KERNEL);
+ msk_buf = kzalloc(pkt_len, GFP_KERNEL);
+ if (!pkt_buf || !msk_buf) {
+ ret = -ENOMEM;
+ goto free_alloc;
+ }
+
+ memcpy(pkt_buf, proto->raw.spec, pkt_len);
+ memcpy(msk_buf, proto->raw.mask, pkt_len);
+
+ psr = ice_parser_create(hw);
+ if (IS_ERR(psr)) {
+ ret = PTR_ERR(psr);
+ goto free_alloc;
+ }
+
+ ret = ice_parser_run(psr, pkt_buf, pkt_len, &pkt_parsed);
+ if (ret)
+ goto parser_destroy;
+
+ ret = ice_parser_profile_init(&pkt_parsed, pkt_buf, msk_buf,
+ pkt_len, ICE_BLK_RSS, &prof);
+ if (ret)
+ goto parser_destroy;
+
+ memcpy(&raw_cfg->prof, &prof, sizeof(prof));
+
+parser_destroy:
+ ice_parser_destroy(psr);
+free_alloc:
+ kfree(pkt_buf);
+ kfree(msk_buf);
+ return ret;
+}
+
+/**
+ * ice_add_raw_rss_cfg - add RSS configuration for raw pattern
+ * @vf: pointer to the VF info
+ * @cfg: pointer to the RSS raw pattern configuration
+ *
+ * This function adds the RSS configuration for raw pattern.
+ * Check if current profile is matched. If not, remove the old
+ * one and add the new profile to HW directly. Update the symmetric
+ * hash configuration as well.
+ *
+ * Return: 0 on success; negative error code on failure.
+ */
+static int
+ice_add_raw_rss_cfg(struct ice_vf *vf, struct ice_rss_raw_cfg *cfg)
+{
+ struct ice_parser_profile *prof = &cfg->prof;
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_rss_prof_info *rss_prof;
+ struct ice_hw *hw = &vf->pf->hw;
+ int i, ptg, ret = 0;
+ u16 vsi_handle;
+ u64 id;
+
+ vsi_handle = vf->lan_vsi_idx;
+ id = find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX);
+
+ ptg = hw->blk[ICE_BLK_RSS].xlt1.t[id];
+ rss_prof = &vf->rss_prof_info[ptg];
+
+ /* check if ptg already has a profile */
+ if (rss_prof->prof.fv_num) {
+ for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
+ if (rss_prof->prof.fv[i].proto_id !=
+ prof->fv[i].proto_id ||
+ rss_prof->prof.fv[i].offset !=
+ prof->fv[i].offset)
+ break;
+ }
+
+ /* current profile is matched, check symmetric hash */
+ if (i == ICE_MAX_FV_WORDS) {
+ if (rss_prof->symm != cfg->symm)
+ goto update_symm;
+ return ret;
+ }
+
+ /* current profile is not matched, remove it */
+ ret =
+ ice_rem_prof_id_flow(hw, ICE_BLK_RSS,
+ ice_get_hw_vsi_num(hw, vsi_handle),
+ id);
+ if (ret) {
+ dev_err(dev, "remove RSS flow failed\n");
+ return ret;
+ }
+
+ ret = ice_rem_prof(hw, ICE_BLK_RSS, id);
+ if (ret) {
+ dev_err(dev, "remove RSS profile failed\n");
+ return ret;
+ }
+ }
+
+ /* add new profile */
+ ret = ice_flow_set_parser_prof(hw, vsi_handle, 0, prof, ICE_BLK_RSS);
+ if (ret) {
+ dev_err(dev, "HW profile add failed\n");
+ return ret;
+ }
+
+ memcpy(&rss_prof->prof, prof, sizeof(struct ice_parser_profile));
+
+update_symm:
+ rss_prof->symm = cfg->symm;
+ ice_rss_update_raw_symm(hw, cfg, id);
+ return ret;
+}
+
+/**
+ * ice_rem_raw_rss_cfg - remove RSS configuration for raw pattern
+ * @vf: pointer to the VF info
+ * @cfg: pointer to the RSS raw pattern configuration
+ *
+ * This function removes the RSS configuration for raw pattern.
+ * Check if vsi group is already removed first. If not, remove the
+ * profile.
+ *
+ * Return: 0 on success; negative error code on failure.
+ */
+static int
+ice_rem_raw_rss_cfg(struct ice_vf *vf, struct ice_rss_raw_cfg *cfg)
+{
+ struct ice_parser_profile *prof = &cfg->prof;
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_hw *hw = &vf->pf->hw;
+ int ptg, ret = 0;
+ u16 vsig, vsi;
+ u64 id;
+
+ id = find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX);
+
+ ptg = hw->blk[ICE_BLK_RSS].xlt1.t[id];
+
+ memset(&vf->rss_prof_info[ptg], 0,
+ sizeof(struct ice_rss_prof_info));
+
+ /* check if vsig is already removed */
+ vsi = ice_get_hw_vsi_num(hw, vf->lan_vsi_idx);
+ if (vsi >= ICE_MAX_VSI) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ vsig = hw->blk[ICE_BLK_RSS].xlt2.vsis[vsi].vsig;
+ if (vsig) {
+ ret = ice_rem_prof_id_flow(hw, ICE_BLK_RSS, vsi, id);
+ if (ret)
+ goto err;
+
+ ret = ice_rem_prof(hw, ICE_BLK_RSS, id);
+ if (ret)
+ goto err;
+ }
+
+ return ret;
+
+err:
+ dev_err(dev, "HW profile remove failed\n");
+ return ret;
+}
+
+/**
+ * ice_vc_handle_rss_cfg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the message buffer
+ * @add: add a RSS config if true, otherwise delete a RSS config
+ *
+ * This function adds/deletes a RSS config
+ */
+int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
+{
+ u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
+ struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_hw *hw = &vf->pf->hw;
+ struct ice_vsi *vsi;
+ u8 hash_type;
+ bool symm;
+ int ret;
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ goto error_param;
+ }
+
+ if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
+ dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
+ rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
+ rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
+ dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
+ hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR :
+ ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
+
+ ret = ice_vc_rss_hash_update(hw, vsi, hash_type);
+ if (ret)
+ v_ret = ice_err_to_virt_err(ret);
+ goto error_param;
+ }
+
+ hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ :
+ ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
+ ret = ice_vc_rss_hash_update(hw, vsi, hash_type);
+ if (ret) {
+ v_ret = ice_err_to_virt_err(ret);
+ goto error_param;
+ }
+
+ symm = rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC;
+ /* Configure RSS hash for raw pattern */
+ if (rss_cfg->proto_hdrs.tunnel_level == 0 &&
+ rss_cfg->proto_hdrs.count == 0) {
+ struct ice_rss_raw_cfg raw_cfg;
+
+ if (ice_parse_raw_rss_pattern(vf, &rss_cfg->proto_hdrs,
+ &raw_cfg)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (add) {
+ raw_cfg.symm = symm;
+ if (ice_add_raw_rss_cfg(vf, &raw_cfg))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ } else {
+ if (ice_rem_raw_rss_cfg(vf, &raw_cfg))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ }
+ } else {
+ struct ice_rss_hash_cfg cfg;
+
+ /* Only check for none raw pattern case */
+ if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+ cfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
+ cfg.hash_flds = ICE_HASH_INVALID;
+ cfg.hdr_type = ICE_RSS_ANY_HEADERS;
+
+ if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &cfg)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (add) {
+ cfg.symm = symm;
+ if (ice_add_rss_cfg_wrap(vf, &cfg))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ } else {
+ if (ice_rem_rss_cfg_wrap(vf, &cfg))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ }
+ }
+
+error_param:
+ return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_config_rss_key
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS key
+ */
+int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_rss_key *vrk =
+ (struct virtchnl_rss_key *)msg;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_set_rss_key(vsi, vrk->key))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_config_rss_lut
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS LUT
+ */
+int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrl->lut_entries != ICE_LUT_VSI_SIZE) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_set_rss_lut(vsi, vrl->lut, ICE_LUT_VSI_SIZE))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_config_rss_hfunc
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS Hash function
+ */
+int ice_vc_config_rss_hfunc(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_hfunc *vrh = (struct virtchnl_rss_hfunc *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrh->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrh->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
+ hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ;
+
+ if (ice_set_rss_hfunc(vsi, hfunc))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_HFUNC, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_get_rss_hashcfg - return the RSS Hash configuration
+ * @vf: pointer to the VF info
+ */
+int ice_vc_get_rss_hashcfg(struct ice_vf *vf)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_rss_hashcfg *vrh = NULL;
+ int len = 0, ret;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ dev_err(ice_pf_to_dev(vf->pf), "RSS not supported by PF\n");
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ len = sizeof(struct virtchnl_rss_hashcfg);
+ vrh = kzalloc(len, GFP_KERNEL);
+ if (!vrh) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ len = 0;
+ goto err;
+ }
+
+ vrh->hashcfg = ICE_DEFAULT_RSS_HASHCFG;
+err:
+ /* send the response back to the VF */
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, v_ret,
+ (u8 *)vrh, len);
+ kfree(vrh);
+ return ret;
+}
+
+/**
+ * ice_vc_set_rss_hashcfg - set RSS Hash configuration bits for the VF
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ */
+int ice_vc_set_rss_hashcfg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_hashcfg *vrh = (struct virtchnl_rss_hashcfg *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ int status;
+
+ dev = ice_pf_to_dev(pf);
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ dev_err(dev, "RSS not supported by PF\n");
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ /* clear all previously programmed RSS configuration to allow VF drivers
+ * the ability to customize the RSS configuration and/or completely
+ * disable RSS
+ */
+ status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
+ if (status && !vrh->hashcfg) {
+ /* only report failure to clear the current RSS configuration if
+ * that was clearly the VF's intention (i.e. vrh->hashcfg = 0)
+ */
+ v_ret = ice_err_to_virt_err(status);
+ goto err;
+ } else if (status) {
+ /* allow the VF to update the RSS configuration even on failure
+ * to clear the current RSS confguration in an attempt to keep
+ * RSS in a working state
+ */
+ dev_warn(dev, "Failed to clear the RSS configuration for VF %u\n",
+ vf->vf_id);
+ }
+
+ if (vrh->hashcfg) {
+ status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hashcfg);
+ v_ret = ice_err_to_virt_err(status);
+ }
+
+ /* save the requested VF configuration */
+ if (!v_ret)
+ vf->rss_hashcfg = vrh->hashcfg;
+
+ /* send the response to the VF */
+err:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_RSS_HASHCFG, v_ret,
+ NULL, 0);
+}
+
diff --git a/drivers/net/ethernet/intel/ice/virt/rss.h b/drivers/net/ethernet/intel/ice/virt/rss.h
new file mode 100644
index 000000000000..784d4c43ce8b
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/virt/rss.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2022, Intel Corporation. */
+
+#ifndef _ICE_VIRT_RSS_H_
+#define _ICE_VIRT_RSS_H_
+
+#include <linux/types.h>
+
+struct ice_vf;
+
+int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add);
+int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg);
+int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg);
+int ice_vc_config_rss_hfunc(struct ice_vf *vf, u8 *msg);
+int ice_vc_get_rss_hashcfg(struct ice_vf *vf);
+int ice_vc_set_rss_hashcfg(struct ice_vf *vf, u8 *msg);
+
+#endif /* _ICE_VIRT_RSS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/virt/virtchnl.c
index aa2080747714..f3f921134379 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/virt/virtchnl.c
@@ -1,170 +1,20 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2022, Intel Corporation. */
-#include "ice_virtchnl.h"
+#include "virtchnl.h"
+#include "queues.h"
+#include "rss.h"
#include "ice_vf_lib_private.h"
#include "ice.h"
#include "ice_base.h"
#include "ice_lib.h"
#include "ice_fltr.h"
-#include "ice_virtchnl_allowlist.h"
+#include "allowlist.h"
#include "ice_vf_vsi_vlan_ops.h"
#include "ice_vlan.h"
#include "ice_flex_pipe.h"
#include "ice_dcb_lib.h"
-#define FIELD_SELECTOR(proto_hdr_field) \
- BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
-
-struct ice_vc_hdr_match_type {
- u32 vc_hdr; /* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
- u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */
-};
-
-static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = {
- {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
- {VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH},
- {VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN},
- {VIRTCHNL_PROTO_HDR_C_VLAN, ICE_FLOW_SEG_HDR_VLAN},
- {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
- ICE_FLOW_SEG_HDR_IPV_OTHER},
- {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
- ICE_FLOW_SEG_HDR_IPV_OTHER},
- {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
- {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
- {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
- {VIRTCHNL_PROTO_HDR_PPPOE, ICE_FLOW_SEG_HDR_PPPOE},
- {VIRTCHNL_PROTO_HDR_GTPU_IP, ICE_FLOW_SEG_HDR_GTPU_IP},
- {VIRTCHNL_PROTO_HDR_GTPU_EH, ICE_FLOW_SEG_HDR_GTPU_EH},
- {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
- ICE_FLOW_SEG_HDR_GTPU_DWN},
- {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
- ICE_FLOW_SEG_HDR_GTPU_UP},
- {VIRTCHNL_PROTO_HDR_L2TPV3, ICE_FLOW_SEG_HDR_L2TPV3},
- {VIRTCHNL_PROTO_HDR_ESP, ICE_FLOW_SEG_HDR_ESP},
- {VIRTCHNL_PROTO_HDR_AH, ICE_FLOW_SEG_HDR_AH},
- {VIRTCHNL_PROTO_HDR_PFCP, ICE_FLOW_SEG_HDR_PFCP_SESSION},
-};
-
-struct ice_vc_hash_field_match_type {
- u32 vc_hdr; /* virtchnl headers
- * (VIRTCHNL_PROTO_HDR_XXX)
- */
- u32 vc_hash_field; /* virtchnl hash fields selector
- * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
- */
- u64 ice_hash_field; /* ice hash fields
- * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
- */
-};
-
-static const struct
-ice_vc_hash_field_match_type ice_vc_hash_field_list[] = {
- {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
- {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
- {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
- ICE_FLOW_HASH_ETH},
- {VIRTCHNL_PROTO_HDR_ETH,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
- BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
- {VIRTCHNL_PROTO_HDR_S_VLAN,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
- {VIRTCHNL_PROTO_HDR_C_VLAN,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
- ICE_FLOW_HASH_IPV4},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
- ICE_FLOW_HASH_IPV6},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
- ICE_FLOW_HASH_TCP_PORT},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
- ICE_FLOW_HASH_UDP_PORT},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
- ICE_FLOW_HASH_SCTP_PORT},
- {VIRTCHNL_PROTO_HDR_PPPOE,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
- {VIRTCHNL_PROTO_HDR_GTPU_IP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
- {VIRTCHNL_PROTO_HDR_L2TPV3,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
- {VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
- BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
- {VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
- BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
- {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
-};
-
/**
* ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
* @pf: pointer to the PF structure
@@ -304,10 +154,10 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
msg, msglen, NULL);
- if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
+ if (aq_ret && pf->hw.mailboxq.sq_last_status != LIBIE_AQ_RC_ENOSYS) {
dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %s\n",
vf->vf_id, aq_ret,
- ice_aq_str(pf->hw.mailboxq.sq_last_status));
+ libie_aq_str(pf->hw.mailboxq.sq_last_status));
return -EIO;
}
@@ -338,28 +188,6 @@ static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
}
/**
- * ice_vc_get_max_frame_size - get max frame size allowed for VF
- * @vf: VF used to determine max frame size
- *
- * Max frame size is determined based on the current port's max frame size and
- * whether a port VLAN is configured on this VF. The VF is not aware whether
- * it's in a port VLAN so the PF needs to account for this in max frame size
- * checks and sending the max frame size to the VF.
- */
-static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
-{
- struct ice_port_info *pi = ice_vf_get_port_info(vf);
- u16 max_frame_size;
-
- max_frame_size = pi->phy.link_info.max_frame_size;
-
- if (ice_vf_is_port_vlan_ena(vf))
- max_frame_size -= VLAN_HLEN;
-
- return max_frame_size;
-}
-
-/**
* ice_vc_get_vlan_caps
* @hw: pointer to the hw
* @vf: pointer to the VF info
@@ -498,6 +326,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_QOS)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_QOS;
+ if (vf->driver_caps & VIRTCHNL_VF_CAP_PTP)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_PTP;
+
vfres->num_vsis = 1;
/* Tx and Rx queue are equal for VF */
vfres->num_queue_pairs = vsi->num_txq;
@@ -556,488 +387,6 @@ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
}
/**
- * ice_vc_isvalid_q_id
- * @vsi: VSI to check queue ID against
- * @qid: VSI relative queue ID
- *
- * check for the valid queue ID
- */
-static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u8 qid)
-{
- /* allocated Tx and Rx queues should be always equal for VF VSI */
- return qid < vsi->alloc_txq;
-}
-
-/**
- * ice_vc_isvalid_ring_len
- * @ring_len: length of ring
- *
- * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
- * or zero
- */
-static bool ice_vc_isvalid_ring_len(u16 ring_len)
-{
- return ring_len == 0 ||
- (ring_len >= ICE_MIN_NUM_DESC &&
- ring_len <= ICE_MAX_NUM_DESC &&
- !(ring_len % ICE_REQ_DESC_MULTIPLE));
-}
-
-/**
- * ice_vc_validate_pattern
- * @vf: pointer to the VF info
- * @proto: virtchnl protocol headers
- *
- * validate the pattern is supported or not.
- *
- * Return: true on success, false on error.
- */
-bool
-ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto)
-{
- bool is_ipv4 = false;
- bool is_ipv6 = false;
- bool is_udp = false;
- u16 ptype = -1;
- int i = 0;
-
- while (i < proto->count &&
- proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) {
- switch (proto->proto_hdr[i].type) {
- case VIRTCHNL_PROTO_HDR_ETH:
- ptype = ICE_PTYPE_MAC_PAY;
- break;
- case VIRTCHNL_PROTO_HDR_IPV4:
- ptype = ICE_PTYPE_IPV4_PAY;
- is_ipv4 = true;
- break;
- case VIRTCHNL_PROTO_HDR_IPV6:
- ptype = ICE_PTYPE_IPV6_PAY;
- is_ipv6 = true;
- break;
- case VIRTCHNL_PROTO_HDR_UDP:
- if (is_ipv4)
- ptype = ICE_PTYPE_IPV4_UDP_PAY;
- else if (is_ipv6)
- ptype = ICE_PTYPE_IPV6_UDP_PAY;
- is_udp = true;
- break;
- case VIRTCHNL_PROTO_HDR_TCP:
- if (is_ipv4)
- ptype = ICE_PTYPE_IPV4_TCP_PAY;
- else if (is_ipv6)
- ptype = ICE_PTYPE_IPV6_TCP_PAY;
- break;
- case VIRTCHNL_PROTO_HDR_SCTP:
- if (is_ipv4)
- ptype = ICE_PTYPE_IPV4_SCTP_PAY;
- else if (is_ipv6)
- ptype = ICE_PTYPE_IPV6_SCTP_PAY;
- break;
- case VIRTCHNL_PROTO_HDR_GTPU_IP:
- case VIRTCHNL_PROTO_HDR_GTPU_EH:
- if (is_ipv4)
- ptype = ICE_MAC_IPV4_GTPU;
- else if (is_ipv6)
- ptype = ICE_MAC_IPV6_GTPU;
- goto out;
- case VIRTCHNL_PROTO_HDR_L2TPV3:
- if (is_ipv4)
- ptype = ICE_MAC_IPV4_L2TPV3;
- else if (is_ipv6)
- ptype = ICE_MAC_IPV6_L2TPV3;
- goto out;
- case VIRTCHNL_PROTO_HDR_ESP:
- if (is_ipv4)
- ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP :
- ICE_MAC_IPV4_ESP;
- else if (is_ipv6)
- ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP :
- ICE_MAC_IPV6_ESP;
- goto out;
- case VIRTCHNL_PROTO_HDR_AH:
- if (is_ipv4)
- ptype = ICE_MAC_IPV4_AH;
- else if (is_ipv6)
- ptype = ICE_MAC_IPV6_AH;
- goto out;
- case VIRTCHNL_PROTO_HDR_PFCP:
- if (is_ipv4)
- ptype = ICE_MAC_IPV4_PFCP_SESSION;
- else if (is_ipv6)
- ptype = ICE_MAC_IPV6_PFCP_SESSION;
- goto out;
- default:
- break;
- }
- i++;
- }
-
-out:
- return ice_hw_ptype_ena(&vf->pf->hw, ptype);
-}
-
-/**
- * ice_vc_parse_rss_cfg - parses hash fields and headers from
- * a specific virtchnl RSS cfg
- * @hw: pointer to the hardware
- * @rss_cfg: pointer to the virtchnl RSS cfg
- * @hash_cfg: pointer to the HW hash configuration
- *
- * Return true if all the protocol header and hash fields in the RSS cfg could
- * be parsed, else return false
- *
- * This function parses the virtchnl RSS cfg to be the intended
- * hash fields and the intended header for RSS configuration
- */
-static bool ice_vc_parse_rss_cfg(struct ice_hw *hw,
- struct virtchnl_rss_cfg *rss_cfg,
- struct ice_rss_hash_cfg *hash_cfg)
-{
- const struct ice_vc_hash_field_match_type *hf_list;
- const struct ice_vc_hdr_match_type *hdr_list;
- int i, hf_list_len, hdr_list_len;
- u32 *addl_hdrs = &hash_cfg->addl_hdrs;
- u64 *hash_flds = &hash_cfg->hash_flds;
-
- /* set outer layer RSS as default */
- hash_cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
-
- if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
- hash_cfg->symm = true;
- else
- hash_cfg->symm = false;
-
- hf_list = ice_vc_hash_field_list;
- hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
- hdr_list = ice_vc_hdr_list;
- hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list);
-
- for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
- struct virtchnl_proto_hdr *proto_hdr =
- &rss_cfg->proto_hdrs.proto_hdr[i];
- bool hdr_found = false;
- int j;
-
- /* Find matched ice headers according to virtchnl headers. */
- for (j = 0; j < hdr_list_len; j++) {
- struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
-
- if (proto_hdr->type == hdr_map.vc_hdr) {
- *addl_hdrs |= hdr_map.ice_hdr;
- hdr_found = true;
- }
- }
-
- if (!hdr_found)
- return false;
-
- /* Find matched ice hash fields according to
- * virtchnl hash fields.
- */
- for (j = 0; j < hf_list_len; j++) {
- struct ice_vc_hash_field_match_type hf_map = hf_list[j];
-
- if (proto_hdr->type == hf_map.vc_hdr &&
- proto_hdr->field_selector == hf_map.vc_hash_field) {
- *hash_flds |= hf_map.ice_hash_field;
- break;
- }
- }
- }
-
- return true;
-}
-
-/**
- * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
- * RSS offloads
- * @caps: VF driver negotiated capabilities
- *
- * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
- * else return false
- */
-static bool ice_vf_adv_rss_offload_ena(u32 caps)
-{
- return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
-}
-
-/**
- * ice_vc_handle_rss_cfg
- * @vf: pointer to the VF info
- * @msg: pointer to the message buffer
- * @add: add a RSS config if true, otherwise delete a RSS config
- *
- * This function adds/deletes a RSS config
- */
-static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
-{
- u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
- struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_hw *hw = &vf->pf->hw;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
- goto error_param;
- }
-
- if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
- dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
- rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
- rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
- dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
- struct ice_vsi_ctx *ctx;
- u8 lut_type, hash_type;
- int status;
-
- lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
- hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR :
- ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx) {
- v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
- goto error_param;
- }
-
- ctx->info.q_opt_rss =
- FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) |
- FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type);
-
- /* Preserve existing queueing option setting */
- ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
- ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M);
- ctx->info.q_opt_tc = vsi->info.q_opt_tc;
- ctx->info.q_opt_flags = vsi->info.q_opt_rss;
-
- ctx->info.valid_sections =
- cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
-
- status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
- if (status) {
- dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- } else {
- vsi->info.q_opt_rss = ctx->info.q_opt_rss;
- }
-
- kfree(ctx);
- } else {
- struct ice_rss_hash_cfg cfg;
-
- /* Only check for none raw pattern case */
- if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
- cfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
- cfg.hash_flds = ICE_HASH_INVALID;
- cfg.hdr_type = ICE_RSS_ANY_HEADERS;
-
- if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &cfg)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (add) {
- if (ice_add_rss_cfg(hw, vsi, &cfg)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
- vsi->vsi_num, v_ret);
- }
- } else {
- int status;
-
- status = ice_rem_rss_cfg(hw, vsi->idx, &cfg);
- /* We just ignore -ENOENT, because if two configurations
- * share the same profile remove one of them actually
- * removes both, since the profile is deleted.
- */
- if (status && status != -ENOENT) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n",
- vf->vf_id, status);
- }
- }
- }
-
-error_param:
- return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_config_rss_key
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * Configure the VF's RSS key
- */
-static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_rss_key *vrk =
- (struct virtchnl_rss_key *)msg;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (ice_set_rss_key(vsi, vrk->key))
- v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
-error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_vc_config_rss_lut
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * Configure the VF's RSS LUT
- */
-static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
-{
- struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vrl->lut_entries != ICE_LUT_VSI_SIZE) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (ice_set_rss_lut(vsi, vrl->lut, ICE_LUT_VSI_SIZE))
- v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
-error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_vc_config_rss_hfunc
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * Configure the VF's RSS Hash function
- */
-static int ice_vc_config_rss_hfunc(struct ice_vf *vf, u8 *msg)
-{
- struct virtchnl_rss_hfunc *vrh = (struct virtchnl_rss_hfunc *)msg;
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vrh->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vrh->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
- hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ;
-
- if (ice_set_rss_hfunc(vsi, hfunc))
- v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
-error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_HFUNC, v_ret,
- NULL, 0);
-}
-
-/**
* ice_vc_get_qos_caps - Get current QoS caps from PF
* @vf: pointer to the VF info
*
@@ -1119,110 +468,6 @@ err:
}
/**
- * ice_vf_cfg_qs_bw - Configure per queue bandwidth
- * @vf: pointer to the VF info
- * @num_queues: number of queues to be configured
- *
- * Configure per queue bandwidth.
- *
- * Return: 0 on success or negative error value.
- */
-static int ice_vf_cfg_qs_bw(struct ice_vf *vf, u16 num_queues)
-{
- struct ice_hw *hw = &vf->pf->hw;
- struct ice_vsi *vsi;
- int ret;
- u16 i;
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi)
- return -EINVAL;
-
- for (i = 0; i < num_queues; i++) {
- u32 p_rate, min_rate;
- u8 tc;
-
- p_rate = vf->qs_bw[i].peak;
- min_rate = vf->qs_bw[i].committed;
- tc = vf->qs_bw[i].tc;
- if (p_rate)
- ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
- vf->qs_bw[i].queue_id,
- ICE_MAX_BW, p_rate);
- else
- ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
- vf->qs_bw[i].queue_id,
- ICE_MAX_BW);
- if (ret)
- return ret;
-
- if (min_rate)
- ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
- vf->qs_bw[i].queue_id,
- ICE_MIN_BW, min_rate);
- else
- ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
- vf->qs_bw[i].queue_id,
- ICE_MIN_BW);
-
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-/**
- * ice_vf_cfg_q_quanta_profile - Configure quanta profile
- * @vf: pointer to the VF info
- * @quanta_prof_idx: pointer to the quanta profile index
- * @quanta_size: quanta size to be set
- *
- * This function chooses available quanta profile and configures the register.
- * The quanta profile is evenly divided by the number of device ports, and then
- * available to the specific PF and VFs. The first profile for each PF is a
- * reserved default profile. Only quanta size of the rest unused profile can be
- * modified.
- *
- * Return: 0 on success or negative error value.
- */
-static int ice_vf_cfg_q_quanta_profile(struct ice_vf *vf, u16 quanta_size,
- u16 *quanta_prof_idx)
-{
- const u16 n_desc = calc_quanta_desc(quanta_size);
- struct ice_hw *hw = &vf->pf->hw;
- const u16 n_cmd = 2 * n_desc;
- struct ice_pf *pf = vf->pf;
- u16 per_pf, begin_id;
- u8 n_used;
- u32 reg;
-
- begin_id = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) / hw->dev_caps.num_funcs *
- hw->logical_pf_id;
-
- if (quanta_size == ICE_DFLT_QUANTA) {
- *quanta_prof_idx = begin_id;
- } else {
- per_pf = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) /
- hw->dev_caps.num_funcs;
- n_used = pf->num_quanta_prof_used;
- if (n_used < per_pf) {
- *quanta_prof_idx = begin_id + 1 + n_used;
- pf->num_quanta_prof_used++;
- } else {
- return -EINVAL;
- }
- }
-
- reg = FIELD_PREP(GLCOMM_QUANTA_PROF_QUANTA_SIZE_M, quanta_size) |
- FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_CMD_M, n_cmd) |
- FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_DESC_M, n_desc);
- wr32(hw, GLCOMM_QUANTA_PROF(*quanta_prof_idx), reg);
-
- return 0;
-}
-
-/**
* ice_vc_cfg_promiscuous_mode_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1404,744 +649,6 @@ error_param:
}
/**
- * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
- * @vqs: virtchnl_queue_select structure containing bitmaps to validate
- *
- * Return true on successful validation, else false
- */
-static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
-{
- if ((!vqs->rx_queues && !vqs->tx_queues) ||
- vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
- vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
- return false;
-
- return true;
-}
-
-/**
- * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
- * @vsi: VSI of the VF to configure
- * @q_idx: VF queue index used to determine the queue in the PF's space
- */
-static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
-{
- struct ice_hw *hw = &vsi->back->hw;
- u32 pfq = vsi->txq_map[q_idx];
- u32 reg;
-
- reg = rd32(hw, QINT_TQCTL(pfq));
-
- /* MSI-X index 0 in the VF's space is always for the OICR, which means
- * this is most likely a poll mode VF driver, so don't enable an
- * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
- */
- if (!(reg & QINT_TQCTL_MSIX_INDX_M))
- return;
-
- wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
-}
-
-/**
- * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
- * @vsi: VSI of the VF to configure
- * @q_idx: VF queue index used to determine the queue in the PF's space
- */
-static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
-{
- struct ice_hw *hw = &vsi->back->hw;
- u32 pfq = vsi->rxq_map[q_idx];
- u32 reg;
-
- reg = rd32(hw, QINT_RQCTL(pfq));
-
- /* MSI-X index 0 in the VF's space is always for the OICR, which means
- * this is most likely a poll mode VF driver, so don't enable an
- * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
- */
- if (!(reg & QINT_RQCTL_MSIX_INDX_M))
- return;
-
- wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
-}
-
-/**
- * ice_vc_ena_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to enable all or specific queue(s)
- */
-static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_queue_select *vqs =
- (struct virtchnl_queue_select *)msg;
- struct ice_vsi *vsi;
- unsigned long q_map;
- u16 vf_q_id;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_validate_vqs_bitmaps(vqs)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Enable only Rx rings, Tx rings were enabled by the FW when the
- * Tx queue group list was configured and the context bits were
- * programmed using ice_vsi_cfg_txqs
- */
- q_map = vqs->rx_queues;
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Skip queue if enabled */
- if (test_bit(vf_q_id, vf->rxq_ena))
- continue;
-
- if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
- vf_q_id, vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
- set_bit(vf_q_id, vf->rxq_ena);
- }
-
- q_map = vqs->tx_queues;
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Skip queue if enabled */
- if (test_bit(vf_q_id, vf->txq_ena))
- continue;
-
- ice_vf_ena_txq_interrupt(vsi, vf_q_id);
- set_bit(vf_q_id, vf->txq_ena);
- }
-
- /* Set flag to indicate that queues are enabled */
- if (v_ret == VIRTCHNL_STATUS_SUCCESS)
- set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_vf_vsi_dis_single_txq - disable a single Tx queue
- * @vf: VF to disable queue for
- * @vsi: VSI for the VF
- * @q_id: VF relative (0-based) queue ID
- *
- * Attempt to disable the Tx queue passed in. If the Tx queue was successfully
- * disabled then clear q_id bit in the enabled queues bitmap and return
- * success. Otherwise return error.
- */
-static int
-ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
-{
- struct ice_txq_meta txq_meta = { 0 };
- struct ice_tx_ring *ring;
- int err;
-
- if (!test_bit(q_id, vf->txq_ena))
- dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
- q_id, vsi->vsi_num);
-
- ring = vsi->tx_rings[q_id];
- if (!ring)
- return -EINVAL;
-
- ice_fill_txq_meta(vsi, ring, &txq_meta);
-
- err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta);
- if (err) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
- q_id, vsi->vsi_num);
- return err;
- }
-
- /* Clear enabled queues flag */
- clear_bit(q_id, vf->txq_ena);
-
- return 0;
-}
-
-/**
- * ice_vc_dis_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to disable all or specific queue(s)
- */
-static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_queue_select *vqs =
- (struct virtchnl_queue_select *)msg;
- struct ice_vsi *vsi;
- unsigned long q_map;
- u16 vf_q_id;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
- !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_validate_vqs_bitmaps(vqs)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vqs->tx_queues) {
- q_map = vqs->tx_queues;
-
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
- }
- }
-
- q_map = vqs->rx_queues;
- /* speed up Rx queue disable by batching them if possible */
- if (q_map &&
- bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
- if (ice_vsi_stop_all_rx_rings(vsi)) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
- vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
- } else if (q_map) {
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Skip queue if not enabled */
- if (!test_bit(vf_q_id, vf->rxq_ena))
- continue;
-
- if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
- true)) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
- vf_q_id, vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Clear enabled queues flag */
- clear_bit(vf_q_id, vf->rxq_ena);
- }
- }
-
- /* Clear enabled queues flag */
- if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
- clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_cfg_interrupt
- * @vf: pointer to the VF info
- * @vsi: the VSI being configured
- * @map: vector map for mapping vectors to queues
- * @q_vector: structure for interrupt vector
- * configure the IRQ to queue map
- */
-static enum virtchnl_status_code
-ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi,
- struct virtchnl_vector_map *map,
- struct ice_q_vector *q_vector)
-{
- u16 vsi_q_id, vsi_q_id_idx;
- unsigned long qmap;
-
- q_vector->num_ring_rx = 0;
- q_vector->num_ring_tx = 0;
-
- qmap = map->rxq_map;
- for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
- vsi_q_id = vsi_q_id_idx;
-
- if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
- return VIRTCHNL_STATUS_ERR_PARAM;
-
- q_vector->num_ring_rx++;
- q_vector->rx.itr_idx = map->rxitr_idx;
- vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
- ice_cfg_rxq_interrupt(vsi, vsi_q_id,
- q_vector->vf_reg_idx,
- q_vector->rx.itr_idx);
- }
-
- qmap = map->txq_map;
- for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
- vsi_q_id = vsi_q_id_idx;
-
- if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
- return VIRTCHNL_STATUS_ERR_PARAM;
-
- q_vector->num_ring_tx++;
- q_vector->tx.itr_idx = map->txitr_idx;
- vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
- ice_cfg_txq_interrupt(vsi, vsi_q_id,
- q_vector->vf_reg_idx,
- q_vector->tx.itr_idx);
- }
-
- return VIRTCHNL_STATUS_SUCCESS;
-}
-
-/**
- * ice_vc_cfg_irq_map_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to configure the IRQ to queue map
- */
-static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- u16 num_q_vectors_mapped, vsi_id, vector_id;
- struct virtchnl_irq_map_info *irqmap_info;
- struct virtchnl_vector_map *map;
- struct ice_vsi *vsi;
- int i;
-
- irqmap_info = (struct virtchnl_irq_map_info *)msg;
- num_q_vectors_mapped = irqmap_info->num_vectors;
-
- /* Check to make sure number of VF vectors mapped is not greater than
- * number of VF vectors originally allocated, and check that
- * there is actually at least a single VF queue vector mapped
- */
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
- vf->num_msix < num_q_vectors_mapped ||
- !num_q_vectors_mapped) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- for (i = 0; i < num_q_vectors_mapped; i++) {
- struct ice_q_vector *q_vector;
-
- map = &irqmap_info->vecmap[i];
-
- vector_id = map->vector_id;
- vsi_id = map->vsi_id;
- /* vector_id is always 0-based for each VF, and can never be
- * larger than or equal to the max allowed interrupts per VF
- */
- if (!(vector_id < vf->num_msix) ||
- !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
- (!vector_id && (map->rxq_map || map->txq_map))) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* No need to map VF miscellaneous or rogue vector */
- if (!vector_id)
- continue;
-
- /* Subtract non queue vector from vector_id passed by VF
- * to get actual number of VSI queue vector array index
- */
- q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
- if (!q_vector) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* lookout for the invalid queue index */
- v_ret = ice_cfg_interrupt(vf, vsi, map, q_vector);
- if (v_ret)
- goto error_param;
- }
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_vc_cfg_q_bw - Configure per queue bandwidth
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer which holds the command descriptor
- *
- * Configure VF queues bandwidth.
- *
- * Return: 0 on success or negative error value.
- */
-static int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_queues_bw_cfg *qbw =
- (struct virtchnl_queues_bw_cfg *)msg;
- struct ice_vsi *vsi;
- u16 i;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
- !ice_vc_isvalid_vsi_id(vf, qbw->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- if (qbw->num_queues > ICE_MAX_RSS_QS_PER_VF ||
- qbw->num_queues > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
- dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
- vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- for (i = 0; i < qbw->num_queues; i++) {
- if (qbw->cfg[i].shaper.peak != 0 && vf->max_tx_rate != 0 &&
- qbw->cfg[i].shaper.peak > vf->max_tx_rate)
- dev_warn(ice_pf_to_dev(vf->pf), "The maximum queue %d rate limit configuration may not take effect because the maximum TX rate for VF-%d is %d\n",
- qbw->cfg[i].queue_id, vf->vf_id,
- vf->max_tx_rate);
- if (qbw->cfg[i].shaper.committed != 0 && vf->min_tx_rate != 0 &&
- qbw->cfg[i].shaper.committed < vf->min_tx_rate)
- dev_warn(ice_pf_to_dev(vf->pf), "The minimum queue %d rate limit configuration may not take effect because the minimum TX rate for VF-%d is %d\n",
- qbw->cfg[i].queue_id, vf->vf_id,
- vf->max_tx_rate);
- }
-
- for (i = 0; i < qbw->num_queues; i++) {
- vf->qs_bw[i].queue_id = qbw->cfg[i].queue_id;
- vf->qs_bw[i].peak = qbw->cfg[i].shaper.peak;
- vf->qs_bw[i].committed = qbw->cfg[i].shaper.committed;
- vf->qs_bw[i].tc = qbw->cfg[i].tc;
- }
-
- if (ice_vf_cfg_qs_bw(vf, qbw->num_queues))
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-
-err:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUEUE_BW,
- v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_cfg_q_quanta - Configure per queue quanta
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer which holds the command descriptor
- *
- * Configure VF queues quanta.
- *
- * Return: 0 on success or negative error value.
- */
-static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- u16 quanta_prof_id, quanta_size, start_qid, end_qid, i;
- struct virtchnl_quanta_cfg *qquanta =
- (struct virtchnl_quanta_cfg *)msg;
- struct ice_vsi *vsi;
- int ret;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- end_qid = qquanta->queue_select.start_queue_id +
- qquanta->queue_select.num_queues;
- if (end_qid > ICE_MAX_RSS_QS_PER_VF ||
- end_qid > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
- dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
- vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- quanta_size = qquanta->quanta_size;
- if (quanta_size > ICE_MAX_QUANTA_SIZE ||
- quanta_size < ICE_MIN_QUANTA_SIZE) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- if (quanta_size % 64) {
- dev_err(ice_pf_to_dev(vf->pf), "quanta size should be the product of 64\n");
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- ret = ice_vf_cfg_q_quanta_profile(vf, quanta_size,
- &quanta_prof_id);
- if (ret) {
- v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
- goto err;
- }
-
- start_qid = qquanta->queue_select.start_queue_id;
- for (i = start_qid; i < end_qid; i++)
- vsi->tx_rings[i]->quanta_prof_id = quanta_prof_id;
-
-err:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUANTA,
- v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_cfg_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to configure the Rx/Tx queues
- */
-static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
-{
- struct virtchnl_vsi_queue_config_info *qci =
- (struct virtchnl_vsi_queue_config_info *)msg;
- struct virtchnl_queue_pair_info *qpi;
- struct ice_pf *pf = vf->pf;
- struct ice_lag *lag;
- struct ice_vsi *vsi;
- u8 act_prt, pri_prt;
- int i = -1, q_idx;
-
- lag = pf->lag;
- mutex_lock(&pf->lag_mutex);
- act_prt = ICE_LAG_INVALID_PORT;
- pri_prt = pf->hw.port_info->lport;
- if (lag && lag->bonded && lag->primary) {
- act_prt = lag->active_port;
- if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
- lag->upper_netdev)
- ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
- else
- act_prt = ICE_LAG_INVALID_PORT;
- }
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
- goto error_param;
-
- if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id))
- goto error_param;
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi)
- goto error_param;
-
- if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
- qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
- dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
- vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
- goto error_param;
- }
-
- for (i = 0; i < qci->num_queue_pairs; i++) {
- if (!qci->qpair[i].rxq.crc_disable)
- continue;
-
- if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC) ||
- vf->vlan_strip_ena)
- goto error_param;
- }
-
- for (i = 0; i < qci->num_queue_pairs; i++) {
- qpi = &qci->qpair[i];
- if (qpi->txq.vsi_id != qci->vsi_id ||
- qpi->rxq.vsi_id != qci->vsi_id ||
- qpi->rxq.queue_id != qpi->txq.queue_id ||
- qpi->txq.headwb_enabled ||
- !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
- !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
- !ice_vc_isvalid_q_id(vsi, qpi->txq.queue_id)) {
- goto error_param;
- }
-
- q_idx = qpi->rxq.queue_id;
-
- /* make sure selected "q_idx" is in valid range of queues
- * for selected "vsi"
- */
- if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
- goto error_param;
- }
-
- /* copy Tx queue info from VF into VSI */
- if (qpi->txq.ring_len > 0) {
- vsi->tx_rings[q_idx]->dma = qpi->txq.dma_ring_addr;
- vsi->tx_rings[q_idx]->count = qpi->txq.ring_len;
-
- /* Disable any existing queue first */
- if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx))
- goto error_param;
-
- /* Configure a queue with the requested settings */
- if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
- dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
- vf->vf_id, q_idx);
- goto error_param;
- }
- }
-
- /* copy Rx queue info from VF into VSI */
- if (qpi->rxq.ring_len > 0) {
- u16 max_frame_size = ice_vc_get_max_frame_size(vf);
- struct ice_rx_ring *ring = vsi->rx_rings[q_idx];
- u32 rxdid;
-
- ring->dma = qpi->rxq.dma_ring_addr;
- ring->count = qpi->rxq.ring_len;
-
- if (qpi->rxq.crc_disable)
- ring->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
- else
- ring->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
-
- if (qpi->rxq.databuffer_size != 0 &&
- (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
- qpi->rxq.databuffer_size < 1024))
- goto error_param;
- ring->rx_buf_len = qpi->rxq.databuffer_size;
- if (qpi->rxq.max_pkt_size > max_frame_size ||
- qpi->rxq.max_pkt_size < 64)
- goto error_param;
-
- ring->max_frame = qpi->rxq.max_pkt_size;
- /* add space for the port VLAN since the VF driver is
- * not expected to account for it in the MTU
- * calculation
- */
- if (ice_vf_is_port_vlan_ena(vf))
- ring->max_frame += VLAN_HLEN;
-
- if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
- dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
- vf->vf_id, q_idx);
- goto error_param;
- }
-
- /* If Rx flex desc is supported, select RXDID for Rx
- * queues. Otherwise, use legacy 32byte descriptor
- * format. Legacy 16byte descriptor is not supported.
- * If this RXDID is selected, return error.
- */
- if (vf->driver_caps &
- VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
- rxdid = qpi->rxq.rxdid;
- if (!(BIT(rxdid) & pf->supported_rxdids))
- goto error_param;
- } else {
- rxdid = ICE_RXDID_LEGACY_1;
- }
-
- ice_write_qrxflxp_cntxt(&vsi->back->hw,
- vsi->rxq_map[q_idx],
- rxdid, 0x03, false);
- }
- }
-
- if (lag && lag->bonded && lag->primary &&
- act_prt != ICE_LAG_INVALID_PORT)
- ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
- mutex_unlock(&pf->lag_mutex);
-
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
- VIRTCHNL_STATUS_SUCCESS, NULL, 0);
-error_param:
- /* disable whatever we can */
- for (; i >= 0; i--) {
- if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true))
- dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n",
- vf->vf_id, i);
- if (ice_vf_vsi_dis_single_txq(vf, vsi, i))
- dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n",
- vf->vf_id, i);
- }
-
- if (lag && lag->bonded && lag->primary &&
- act_prt != ICE_LAG_INVALID_PORT)
- ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
- mutex_unlock(&pf->lag_mutex);
-
- ice_lag_move_new_vf_nodes(vf);
-
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
- VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
-}
-
-/**
* ice_can_vf_change_mac
* @vf: pointer to the VF info
*
@@ -2234,6 +741,51 @@ ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
}
/**
+ * ice_is_mc_lldp_eth_addr - check if the given MAC is a multicast LLDP address
+ * @mac: address to check
+ *
+ * Return: true if the address is one of the three possible LLDP multicast
+ * addresses, false otherwise.
+ */
+static bool ice_is_mc_lldp_eth_addr(const u8 *mac)
+{
+ const u8 lldp_mac_base[] = {0x01, 0x80, 0xc2, 0x00, 0x00};
+
+ if (memcmp(mac, lldp_mac_base, sizeof(lldp_mac_base)))
+ return false;
+
+ return (mac[5] == 0x0e || mac[5] == 0x03 || mac[5] == 0x00);
+}
+
+/**
+ * ice_vc_can_add_mac - check if the VF is allowed to add a given MAC
+ * @vf: a VF to add the address to
+ * @mac: address to check
+ *
+ * Return: true if the VF is allowed to add such MAC address, false otherwise.
+ */
+static bool ice_vc_can_add_mac(const struct ice_vf *vf, const u8 *mac)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+
+ if (is_unicast_ether_addr(mac) &&
+ !ice_can_vf_change_mac((struct ice_vf *)vf)) {
+ dev_err(dev,
+ "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
+ return false;
+ }
+
+ if (!vf->trusted && ice_is_mc_lldp_eth_addr(mac)) {
+ dev_warn(dev,
+ "An untrusted VF %u is attempting to configure an LLDP multicast address\n",
+ vf->vf_id);
+ return false;
+ }
+
+ return true;
+}
+
+/**
* ice_vc_add_mac_addr - attempt to add the MAC address passed in
* @vf: pointer to the VF info
* @vsi: pointer to the VF's VSI
@@ -2251,10 +803,8 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
if (ether_addr_equal(mac_addr, vf->dev_lan_addr))
return 0;
- if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
- dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
+ if (!ice_vc_can_add_mac(vf, mac_addr))
return -EPERM;
- }
ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
if (ret == -EEXIST) {
@@ -2269,6 +819,8 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
return ret;
} else {
vf->num_mac++;
+ if (ice_is_mc_lldp_eth_addr(mac_addr))
+ ice_vf_update_mac_lldp_num(vf, vsi, true);
}
ice_vfhw_mac_add(vf, vc_ether_addr);
@@ -2363,6 +915,8 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
ice_vfhw_mac_del(vf, vc_ether_addr);
vf->num_mac--;
+ if (ice_is_mc_lldp_eth_addr(mac_addr))
+ ice_vf_update_mac_lldp_num(vf, vsi, false);
return 0;
}
@@ -2468,66 +1022,6 @@ static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
}
/**
- * ice_vc_request_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * VFs get a default number of queues but can use this message to request a
- * different number. If the request is successful, PF will reset the VF and
- * return 0. If unsuccessful, PF will send message informing VF of number of
- * available queue pairs via virtchnl message response to VF.
- */
-static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_vf_res_request *vfres =
- (struct virtchnl_vf_res_request *)msg;
- u16 req_queues = vfres->num_queue_pairs;
- struct ice_pf *pf = vf->pf;
- u16 max_allowed_vf_queues;
- u16 tx_rx_queue_left;
- struct device *dev;
- u16 cur_queues;
-
- dev = ice_pf_to_dev(pf);
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- cur_queues = vf->num_vf_qs;
- tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
- ice_get_avail_rxq_count(pf));
- max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
- if (!req_queues) {
- dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
- vf->vf_id);
- } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
- dev_err(dev, "VF %d tried to request more than %d queues.\n",
- vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
- vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
- } else if (req_queues > cur_queues &&
- req_queues - cur_queues > tx_rx_queue_left) {
- dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
- vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
- vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
- ICE_MAX_RSS_QS_PER_VF);
- } else {
- /* request is successful, then reset VF */
- vf->num_req_qs = req_queues;
- ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
- dev_info(dev, "VF %d granted request of %u queues.\n",
- vf->vf_id, req_queues);
- return 0;
- }
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
- v_ret, (u8 *)vfres, sizeof(*vfres));
-}
-
-/**
* ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
* @caps: VF driver negotiated capabilities
*
@@ -2542,7 +1036,7 @@ static bool ice_vf_vlan_offload_ena(u32 caps)
* ice_is_vlan_promisc_allowed - check if VLAN promiscuous config is allowed
* @vf: VF used to determine if VLAN promiscuous config is allowed
*/
-static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
+bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
{
if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
@@ -2554,17 +1048,27 @@ static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
/**
* ice_vf_ena_vlan_promisc - Enable Tx/Rx VLAN promiscuous for the VLAN
+ * @vf: VF to enable VLAN promisc on
* @vsi: VF's VSI used to enable VLAN promiscuous mode
* @vlan: VLAN used to enable VLAN promiscuous
*
* This function should only be called if VLAN promiscuous mode is allowed,
* which can be determined via ice_is_vlan_promisc_allowed().
*/
-static int ice_vf_ena_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
+int ice_vf_ena_vlan_promisc(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct ice_vlan *vlan)
{
- u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
+ u8 promisc_m = 0;
int status;
+ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
+ promisc_m |= ICE_UCAST_VLAN_PROMISC_BITS;
+ if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
+ promisc_m |= ICE_MCAST_VLAN_PROMISC_BITS;
+
+ if (!promisc_m)
+ return 0;
+
status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
vlan->vid);
if (status && status != -EEXIST)
@@ -2583,7 +1087,7 @@ static int ice_vf_ena_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
*/
static int ice_vf_dis_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
{
- u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
+ u8 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS | ICE_MCAST_VLAN_PROMISC_BITS;
int status;
status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
@@ -2738,7 +1242,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
} else if (vlan_promisc) {
- status = ice_vf_ena_vlan_promisc(vsi, &vlan);
+ status = ice_vf_ena_vlan_promisc(vf, vsi, &vlan);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
@@ -2910,108 +1414,6 @@ error_param:
}
/**
- * ice_vc_get_rss_hena - return the RSS HENA bits allowed by the hardware
- * @vf: pointer to the VF info
- */
-static int ice_vc_get_rss_hena(struct ice_vf *vf)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_rss_hena *vrh = NULL;
- int len = 0, ret;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- dev_err(ice_pf_to_dev(vf->pf), "RSS not supported by PF\n");
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- len = sizeof(struct virtchnl_rss_hena);
- vrh = kzalloc(len, GFP_KERNEL);
- if (!vrh) {
- v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
- len = 0;
- goto err;
- }
-
- vrh->hena = ICE_DEFAULT_RSS_HENA;
-err:
- /* send the response back to the VF */
- ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS, v_ret,
- (u8 *)vrh, len);
- kfree(vrh);
- return ret;
-}
-
-/**
- * ice_vc_set_rss_hena - set RSS HENA bits for the VF
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- */
-static int ice_vc_set_rss_hena(struct ice_vf *vf, u8 *msg)
-{
- struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
- struct device *dev;
- int status;
-
- dev = ice_pf_to_dev(pf);
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
- dev_err(dev, "RSS not supported by PF\n");
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- /* clear all previously programmed RSS configuration to allow VF drivers
- * the ability to customize the RSS configuration and/or completely
- * disable RSS
- */
- status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
- if (status && !vrh->hena) {
- /* only report failure to clear the current RSS configuration if
- * that was clearly the VF's intention (i.e. vrh->hena = 0)
- */
- v_ret = ice_err_to_virt_err(status);
- goto err;
- } else if (status) {
- /* allow the VF to update the RSS configuration even on failure
- * to clear the current RSS confguration in an attempt to keep
- * RSS in a working state
- */
- dev_warn(dev, "Failed to clear the RSS configuration for VF %u\n",
- vf->vf_id);
- }
-
- if (vrh->hena) {
- status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hena);
- v_ret = ice_err_to_virt_err(status);
- }
-
- /* send the response to the VF */
-err:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, v_ret,
- NULL, 0);
-}
-
-/**
* ice_vc_query_rxdid - query RXDID supported by DDP package
* @vf: pointer to VF info
*
@@ -3021,12 +1423,8 @@ err:
static int ice_vc_query_rxdid(struct ice_vf *vf)
{
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_supported_rxdids *rxdid = NULL;
- struct ice_hw *hw = &vf->pf->hw;
struct ice_pf *pf = vf->pf;
- int len = 0;
- int ret, i;
- u32 regval;
+ u64 rxdid;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -3038,35 +1436,11 @@ static int ice_vc_query_rxdid(struct ice_vf *vf)
goto err;
}
- len = sizeof(struct virtchnl_supported_rxdids);
- rxdid = kzalloc(len, GFP_KERNEL);
- if (!rxdid) {
- v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
- len = 0;
- goto err;
- }
-
- /* RXDIDs supported by DDP package can be read from the register
- * to get the supported RXDID bitmap. But the legacy 32byte RXDID
- * is not listed in DDP package, add it in the bitmap manually.
- * Legacy 16byte descriptor is not supported.
- */
- rxdid->supported_rxdids |= BIT(ICE_RXDID_LEGACY_1);
-
- for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
- regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0));
- if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
- & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
- rxdid->supported_rxdids |= BIT(i);
- }
-
- pf->supported_rxdids = rxdid->supported_rxdids;
+ rxdid = pf->supported_rxdids;
err:
- ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
- v_ret, (u8 *)rxdid, len);
- kfree(rxdid);
- return ret;
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
+ v_ret, (u8 *)&rxdid, sizeof(rxdid));
}
/**
@@ -3575,7 +1949,7 @@ ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
return err;
if (vlan_promisc) {
- err = ice_vf_ena_vlan_promisc(vsi, &vlan);
+ err = ice_vf_ena_vlan_promisc(vf, vsi, &vlan);
if (err)
return err;
}
@@ -3603,7 +1977,8 @@ ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
*/
if (!ice_is_dvm_ena(&vsi->back->hw)) {
if (vlan_promisc) {
- err = ice_vf_ena_vlan_promisc(vsi, &vlan);
+ err = ice_vf_ena_vlan_promisc(vf, vsi,
+ &vlan);
if (err)
return err;
}
@@ -3794,48 +2169,6 @@ ice_vc_ena_vlan_offload(struct ice_vsi *vsi,
return 0;
}
-#define ICE_L2TSEL_QRX_CONTEXT_REG_IDX 3
-#define ICE_L2TSEL_BIT_OFFSET 23
-enum ice_l2tsel {
- ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND,
- ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1,
-};
-
-/**
- * ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI
- * @vsi: VSI used to update l2tsel on
- * @l2tsel: l2tsel setting requested
- *
- * Use the l2tsel setting to update all of the Rx queue context bits for l2tsel.
- * This will modify which descriptor field the first offloaded VLAN will be
- * stripped into.
- */
-static void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel)
-{
- struct ice_hw *hw = &vsi->back->hw;
- u32 l2tsel_bit;
- int i;
-
- if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND)
- l2tsel_bit = 0;
- else
- l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET);
-
- for (i = 0; i < vsi->alloc_rxq; i++) {
- u16 pfq = vsi->rxq_map[i];
- u32 qrx_context_offset;
- u32 regval;
-
- qrx_context_offset =
- QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq);
-
- regval = rd32(hw, qrx_context_offset);
- regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET);
- regval |= l2tsel_bit;
- wr32(hw, qrx_context_offset, regval);
- }
-}
-
/**
* ice_vc_ena_vlan_stripping_v2_msg
* @vf: VF the message was received from
@@ -4109,6 +2442,59 @@ out:
v_ret, NULL, 0);
}
+static int ice_vc_get_ptp_cap(struct ice_vf *vf,
+ const struct virtchnl_ptp_caps *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ u32 caps = VIRTCHNL_1588_PTP_CAP_RX_TSTAMP |
+ VIRTCHNL_1588_PTP_CAP_READ_PHC;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ goto err;
+
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+
+ if (msg->caps & caps)
+ vf->ptp_caps = caps;
+
+err:
+ /* send the response back to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_1588_PTP_GET_CAPS, v_ret,
+ (u8 *)&vf->ptp_caps,
+ sizeof(struct virtchnl_ptp_caps));
+}
+
+static int ice_vc_get_phc_time(struct ice_vf *vf)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ struct virtchnl_phc_time *phc_time = NULL;
+ struct ice_pf *pf = vf->pf;
+ u32 len = 0;
+ int ret;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ goto err;
+
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+
+ phc_time = kzalloc(sizeof(*phc_time), GFP_KERNEL);
+ if (!phc_time) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ goto err;
+ }
+
+ len = sizeof(*phc_time);
+
+ phc_time->time = ice_ptp_read_src_clk_reg(pf, NULL);
+
+err:
+ /* send the response back to the VF */
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_1588_PTP_GET_TIME, v_ret,
+ (u8 *)phc_time, len);
+ kfree(phc_time);
+ return ret;
+}
+
static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.get_ver_msg = ice_vc_get_ver_msg,
.get_vf_res_msg = ice_vc_get_vf_res_msg,
@@ -4128,8 +2514,8 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.add_vlan_msg = ice_vc_add_vlan_msg,
.remove_vlan_msg = ice_vc_remove_vlan_msg,
.query_rxdid = ice_vc_query_rxdid,
- .get_rss_hena = ice_vc_get_rss_hena,
- .set_rss_hena_msg = ice_vc_set_rss_hena,
+ .get_rss_hashcfg = ice_vc_get_rss_hashcfg,
+ .set_rss_hashcfg = ice_vc_set_rss_hashcfg,
.ena_vlan_stripping = ice_vc_ena_vlan_stripping,
.dis_vlan_stripping = ice_vc_dis_vlan_stripping,
.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
@@ -4145,6 +2531,11 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.get_qos_caps = ice_vc_get_qos_caps,
.cfg_q_bw = ice_vc_cfg_q_bw,
.cfg_q_quanta = ice_vc_cfg_q_quanta,
+ .get_ptp_cap = ice_vc_get_ptp_cap,
+ .get_phc_time = ice_vc_get_phc_time,
+ /* If you add a new op here please make sure to add it to
+ * ice_virtchnl_repr_ops as well.
+ */
};
/**
@@ -4202,7 +2593,6 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
}
ice_vfhw_mac_add(vf, &al->list[i]);
- vf->num_mac++;
break;
}
@@ -4261,8 +2651,8 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
.add_vlan_msg = ice_vc_add_vlan_msg,
.remove_vlan_msg = ice_vc_remove_vlan_msg,
.query_rxdid = ice_vc_query_rxdid,
- .get_rss_hena = ice_vc_get_rss_hena,
- .set_rss_hena_msg = ice_vc_set_rss_hena,
+ .get_rss_hashcfg = ice_vc_get_rss_hashcfg,
+ .set_rss_hashcfg = ice_vc_set_rss_hashcfg,
.ena_vlan_stripping = ice_vc_ena_vlan_stripping,
.dis_vlan_stripping = ice_vc_dis_vlan_stripping,
.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
@@ -4275,6 +2665,11 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
.dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
.ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
.dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
+ .get_qos_caps = ice_vc_get_qos_caps,
+ .cfg_q_bw = ice_vc_cfg_q_bw,
+ .cfg_q_quanta = ice_vc_cfg_q_quanta,
+ .get_ptp_cap = ice_vc_get_ptp_cap,
+ .get_phc_time = ice_vc_get_phc_time,
};
/**
@@ -4458,11 +2853,11 @@ error_handler:
case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
err = ops->query_rxdid(vf);
break;
- case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
- err = ops->get_rss_hena(vf);
+ case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS:
+ err = ops->get_rss_hashcfg(vf);
break;
- case VIRTCHNL_OP_SET_RSS_HENA:
- err = ops->set_rss_hena_msg(vf, msg);
+ case VIRTCHNL_OP_SET_RSS_HASHCFG:
+ err = ops->set_rss_hashcfg(vf, msg);
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
err = ops->ena_vlan_stripping(vf);
@@ -4512,6 +2907,12 @@ error_handler:
case VIRTCHNL_OP_CONFIG_QUANTA:
err = ops->cfg_q_quanta(vf, msg);
break;
+ case VIRTCHNL_OP_1588_PTP_GET_CAPS:
+ err = ops->get_ptp_cap(vf, (const void *)msg);
+ break;
+ case VIRTCHNL_OP_1588_PTP_GET_TIME:
+ err = ops->get_phc_time(vf);
+ break;
case VIRTCHNL_OP_UNKNOWN:
default:
dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/virt/virtchnl.h
index 0c629aef9baf..71bb456e2d71 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+++ b/drivers/net/ethernet/intel/ice/virt/virtchnl.h
@@ -26,6 +26,9 @@
#define ICE_MAX_MACADDR_PER_VF 18
#define ICE_FLEX_DESC_RXDID_MAX_NUM 64
+/* Priority to be compared against previous priority from the pipe */
+#define ICE_RXDID_PRIO 0x03
+
/* VFs only get a single VSI. For ice hardware, the VF does not need to know
* its VSI index. However, the virtchnl interface requires a VSI number,
* mainly due to legacy hardware.
@@ -54,8 +57,8 @@ struct ice_virtchnl_ops {
int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg);
int (*remove_vlan_msg)(struct ice_vf *vf, u8 *msg);
int (*query_rxdid)(struct ice_vf *vf);
- int (*get_rss_hena)(struct ice_vf *vf);
- int (*set_rss_hena_msg)(struct ice_vf *vf, u8 *msg);
+ int (*get_rss_hashcfg)(struct ice_vf *vf);
+ int (*set_rss_hashcfg)(struct ice_vf *vf, u8 *msg);
int (*ena_vlan_stripping)(struct ice_vf *vf);
int (*dis_vlan_stripping)(struct ice_vf *vf);
int (*handle_rss_cfg_msg)(struct ice_vf *vf, u8 *msg, bool add);
@@ -72,6 +75,9 @@ struct ice_virtchnl_ops {
int (*cfg_q_tc_map)(struct ice_vf *vf, u8 *msg);
int (*cfg_q_bw)(struct ice_vf *vf, u8 *msg);
int (*cfg_q_quanta)(struct ice_vf *vf, u8 *msg);
+ int (*get_ptp_cap)(struct ice_vf *vf,
+ const struct virtchnl_ptp_caps *msg);
+ int (*get_phc_time)(struct ice_vf *vf);
};
#ifdef CONFIG_PCI_IOV
@@ -86,12 +92,31 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id);
void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
struct ice_mbx_data *mbxdata);
+void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx);
+void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx);
+int ice_vf_ena_vlan_promisc(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct ice_vlan *vlan);
+bool ice_is_vlan_promisc_allowed(struct ice_vf *vf);
#else /* CONFIG_PCI_IOV */
static inline void ice_virtchnl_set_dflt_ops(struct ice_vf *vf) { }
static inline void ice_virtchnl_set_repr_ops(struct ice_vf *vf) { }
static inline void ice_vc_notify_vf_link_state(struct ice_vf *vf) { }
static inline void ice_vc_notify_link_state(struct ice_pf *pf) { }
static inline void ice_vc_notify_reset(struct ice_pf *pf) { }
+static inline void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx) { }
+static inline void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx) { }
+
+static inline int ice_vf_ena_vlan_promisc(struct ice_vf *vf,
+ struct ice_vsi *vsi,
+ struct ice_vlan *vlan)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
+{
+ return false;
+}
static inline int
ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
diff --git a/drivers/net/ethernet/intel/idpf/Kconfig b/drivers/net/ethernet/intel/idpf/Kconfig
index 1addd663acad..adab2154125b 100644
--- a/drivers/net/ethernet/intel/idpf/Kconfig
+++ b/drivers/net/ethernet/intel/idpf/Kconfig
@@ -4,8 +4,9 @@
config IDPF
tristate "Intel(R) Infrastructure Data Path Function Support"
depends on PCI_MSI
+ depends on PTP_1588_CLOCK_OPTIONAL
select DIMLIB
- select LIBETH
+ select LIBETH_XDP
help
This driver supports Intel(R) Infrastructure Data Path Function
devices.
diff --git a/drivers/net/ethernet/intel/idpf/Makefile b/drivers/net/ethernet/intel/idpf/Makefile
index 2ce01a0b5898..651ddee942bd 100644
--- a/drivers/net/ethernet/intel/idpf/Makefile
+++ b/drivers/net/ethernet/intel/idpf/Makefile
@@ -10,6 +10,7 @@ idpf-y := \
idpf_controlq_setup.o \
idpf_dev.o \
idpf_ethtool.o \
+ idpf_idc.o \
idpf_lib.o \
idpf_main.o \
idpf_txrx.o \
@@ -17,3 +18,9 @@ idpf-y := \
idpf_vf_dev.o
idpf-$(CONFIG_IDPF_SINGLEQ) += idpf_singleq_txrx.o
+
+idpf-$(CONFIG_PTP_1588_CLOCK) += idpf_ptp.o
+idpf-$(CONFIG_PTP_1588_CLOCK) += idpf_virtchnl_ptp.o
+
+idpf-y += xdp.o
+idpf-y += xsk.o
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index 2c31ad87587a..8cfc68cbfa06 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -12,12 +12,16 @@ struct idpf_vport_max_q;
#include <net/pkt_sched.h>
#include <linux/aer.h>
#include <linux/etherdevice.h>
+#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/bitfield.h>
#include <linux/sctp.h>
#include <linux/ethtool_netlink.h>
#include <net/gro.h>
+#include <linux/net/intel/iidc_rdma.h>
+#include <linux/net/intel/iidc_rdma_idpf.h>
+
#include "virtchnl2.h"
#include "idpf_txrx.h"
#include "idpf_controlq.h"
@@ -36,6 +40,7 @@ struct idpf_vport_max_q;
#define IDPF_NUM_CHUNKS_PER_MSG(struct_sz, chunk_sz) \
((IDPF_CTLQ_MAX_BUF_LEN - (struct_sz)) / (chunk_sz))
+#define IDPF_WAIT_FOR_MARKER_TIMEO 500
#define IDPF_MAX_WAIT 500
/* available message levels */
@@ -126,14 +131,12 @@ enum idpf_cap_field {
/**
* enum idpf_vport_state - Current vport state
- * @__IDPF_VPORT_DOWN: Vport is down
- * @__IDPF_VPORT_UP: Vport is up
- * @__IDPF_VPORT_STATE_LAST: Must be last, number of states
+ * @IDPF_VPORT_UP: Vport is up
+ * @IDPF_VPORT_STATE_NBITS: Must be last, number of states
*/
enum idpf_vport_state {
- __IDPF_VPORT_DOWN,
- __IDPF_VPORT_UP,
- __IDPF_VPORT_STATE_LAST,
+ IDPF_VPORT_UP,
+ IDPF_VPORT_STATE_NBITS
};
/**
@@ -141,7 +144,10 @@ enum idpf_vport_state {
* @adapter: Adapter back pointer
* @vport: Vport back pointer
* @vport_id: Vport identifier
+ * @link_speed_mbps: Link speed in mbps
* @vport_idx: Relative vport index
+ * @max_tx_hdr_size: Max header length hardware can support
+ * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
* @state: See enum idpf_vport_state
* @netstats: Packet and byte stats
* @stats_lock: Lock to protect stats update
@@ -150,8 +156,11 @@ struct idpf_netdev_priv {
struct idpf_adapter *adapter;
struct idpf_vport *vport;
u32 vport_id;
+ u32 link_speed_mbps;
u16 vport_idx;
- enum idpf_vport_state state;
+ u16 max_tx_hdr_size;
+ u16 tx_max_bufs;
+ DECLARE_BITMAP(state, IDPF_VPORT_STATE_NBITS);
struct rtnl_link_stats64 netstats;
spinlock_t stats_lock;
};
@@ -187,22 +196,38 @@ struct idpf_vport_max_q {
* @mb_intr_reg_init: Mailbox interrupt register initialization
* @reset_reg_init: Reset register initialization
* @trigger_reset: Trigger a reset to occur
+ * @ptp_reg_init: PTP register initialization
*/
struct idpf_reg_ops {
- void (*ctlq_reg_init)(struct idpf_ctlq_create_info *cq);
+ void (*ctlq_reg_init)(struct idpf_adapter *adapter,
+ struct idpf_ctlq_create_info *cq);
int (*intr_reg_init)(struct idpf_vport *vport);
void (*mb_intr_reg_init)(struct idpf_adapter *adapter);
void (*reset_reg_init)(struct idpf_adapter *adapter);
void (*trigger_reset)(struct idpf_adapter *adapter,
enum idpf_flags trig_cause);
+ void (*ptp_reg_init)(const struct idpf_adapter *adapter);
};
+#define IDPF_MMIO_REG_NUM_STATIC 2
+#define IDPF_PF_MBX_REGION_SZ 4096
+#define IDPF_PF_RSTAT_REGION_SZ 2048
+#define IDPF_VF_MBX_REGION_SZ 10240
+#define IDPF_VF_RSTAT_REGION_SZ 2048
+
/**
* struct idpf_dev_ops - Device specific operations
* @reg_ops: Register operations
+ * @idc_init: IDC initialization
+ * @static_reg_info: array of mailbox and rstat register info
*/
struct idpf_dev_ops {
struct idpf_reg_ops reg_ops;
+
+ int (*idc_init)(struct idpf_adapter *adapter);
+
+ /* static_reg_info[0] is mailbox region, static_reg_info[1] is rstat */
+ struct resource static_reg_info[IDPF_MMIO_REG_NUM_STATIC];
};
/**
@@ -222,16 +247,28 @@ enum idpf_vport_reset_cause {
/**
* enum idpf_vport_flags - Vport flags
* @IDPF_VPORT_DEL_QUEUES: To send delete queues message
- * @IDPF_VPORT_SW_MARKER: Indicate TX pipe drain software marker packets
- * processing is done
* @IDPF_VPORT_FLAGS_NBITS: Must be last
*/
enum idpf_vport_flags {
IDPF_VPORT_DEL_QUEUES,
- IDPF_VPORT_SW_MARKER,
IDPF_VPORT_FLAGS_NBITS,
};
+/**
+ * struct idpf_tstamp_stats - Tx timestamp statistics
+ * @stats_sync: See struct u64_stats_sync
+ * @packets: Number of packets successfully timestamped by the hardware
+ * @discarded: Number of Tx skbs discarded due to cached PHC
+ * being too old to correctly extend timestamp
+ * @flushed: Number of Tx skbs flushed due to interface closed
+ */
+struct idpf_tstamp_stats {
+ struct u64_stats_sync stats_sync;
+ u64_stats_t packets;
+ u64_stats_t discarded;
+ u64_stats_t flushed;
+};
+
struct idpf_port_stats {
struct u64_stats_sync stats_sync;
u64_stats_t rx_hw_csum_err;
@@ -245,6 +282,12 @@ struct idpf_port_stats {
struct virtchnl2_vport_stats vport_stats;
};
+struct idpf_fsteer_fltr {
+ struct list_head list;
+ u32 loc;
+ u32 q_index;
+};
+
/**
* struct idpf_vport - Handle for netdevices and queue resources
* @num_txq: Number of allocated TX queues
@@ -257,6 +300,10 @@ struct idpf_port_stats {
* @txq_model: Split queue or single queue queuing model
* @txqs: Used only in hotpath to get to the right queue very fast
* @crc_enable: Enable CRC insertion offload
+ * @xdpsq_share: whether XDPSQ sharing is enabled
+ * @num_xdp_txq: number of XDPSQs
+ * @xdp_txq_offset: index of the first XDPSQ (== number of regular SQs)
+ * @xdp_prog: installed XDP program
* @num_rxq: Number of allocated RX queues
* @num_bufq: Number of allocated buffer queues
* @rxq_desc_count: RX queue descriptor count. *MUST* have enough descriptors
@@ -269,6 +316,7 @@ struct idpf_port_stats {
* group will yield total number of RX queues.
* @rxq_model: Splitq queue or single queue queuing model
* @rx_ptype_lkup: Lookup table for ptypes on RX
+ * @vdev_info: IDC vport device info pointer
* @adapter: back pointer to associated adapter
* @netdev: Associated net_device. Each vport should have one and only one
* associated netdev.
@@ -281,14 +329,19 @@ struct idpf_port_stats {
* @num_q_vectors: Number of IRQ vectors allocated
* @q_vectors: Array of queue vectors
* @q_vector_idxs: Starting index of queue vectors
+ * @noirq_dyn_ctl: register to enable/disable the vector for NOIRQ queues
+ * @noirq_dyn_ctl_ena: value to write to the above to enable it
+ * @noirq_v_idx: ID of the NOIRQ vector
* @max_mtu: device given max possible MTU
* @default_mac_addr: device will give a default MAC to use
* @rx_itr_profile: RX profiles for Dynamic Interrupt Moderation
* @tx_itr_profile: TX profiles for Dynamic Interrupt Moderation
* @port_stats: per port csum, header split, and other offload stats
* @link_up: True if link is up
- * @link_speed_mbps: Link speed in mbps
- * @sw_marker_wq: workqueue for marker packets
+ * @tx_tstamp_caps: Capabilities negotiated for Tx timestamping
+ * @tstamp_config: The Tx tstamp config
+ * @tstamp_task: Tx timestamping task
+ * @tstamp_stats: Tx timestamping statistics
*/
struct idpf_vport {
u16 num_txq;
@@ -302,6 +355,11 @@ struct idpf_vport {
struct idpf_tx_queue **txqs;
bool crc_enable;
+ bool xdpsq_share;
+ u16 num_xdp_txq;
+ u16 xdp_txq_offset;
+ struct bpf_prog *xdp_prog;
+
u16 num_rxq;
u16 num_bufq;
u32 rxq_desc_count;
@@ -312,6 +370,8 @@ struct idpf_vport {
u32 rxq_model;
struct libeth_rx_pt *rx_ptype_lkup;
+ struct iidc_rdma_vport_dev_info *vdev_info;
+
struct idpf_adapter *adapter;
struct net_device *netdev;
DECLARE_BITMAP(flags, IDPF_VPORT_FLAGS_NBITS);
@@ -324,6 +384,11 @@ struct idpf_vport {
u16 num_q_vectors;
struct idpf_q_vector *q_vectors;
u16 *q_vector_idxs;
+
+ void __iomem *noirq_dyn_ctl;
+ u32 noirq_dyn_ctl_ena;
+ u16 noirq_v_idx;
+
u16 max_mtu;
u8 default_mac_addr[ETH_ALEN];
u16 rx_itr_profile[IDPF_DIM_PROFILE_SLOTS];
@@ -331,9 +396,11 @@ struct idpf_vport {
struct idpf_port_stats port_stats;
bool link_up;
- u32 link_speed_mbps;
- wait_queue_head_t sw_marker_wq;
+ struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
+ struct kernel_hwtstamp_config tstamp_config;
+ struct work_struct tstamp_task;
+ struct idpf_tstamp_stats tstamp_stats;
};
/**
@@ -368,28 +435,53 @@ struct idpf_rss_data {
};
/**
+ * struct idpf_q_coalesce - User defined coalescing configuration values for
+ * a single queue.
+ * @tx_intr_mode: Dynamic TX ITR or not
+ * @rx_intr_mode: Dynamic RX ITR or not
+ * @tx_coalesce_usecs: TX interrupt throttling rate
+ * @rx_coalesce_usecs: RX interrupt throttling rate
+ *
+ * Used to restore user coalescing configuration after a reset.
+ */
+struct idpf_q_coalesce {
+ u32 tx_intr_mode;
+ u32 rx_intr_mode;
+ u32 tx_coalesce_usecs;
+ u32 rx_coalesce_usecs;
+};
+
+/**
* struct idpf_vport_user_config_data - User defined configuration values for
* each vport.
* @rss_data: See struct idpf_rss_data
+ * @q_coalesce: Array of per queue coalescing data
* @num_req_tx_qs: Number of user requested TX queues through ethtool
* @num_req_rx_qs: Number of user requested RX queues through ethtool
* @num_req_txq_desc: Number of user requested TX queue descriptors through
* ethtool
* @num_req_rxq_desc: Number of user requested RX queue descriptors through
* ethtool
+ * @xdp_prog: requested XDP program to install
* @user_flags: User toggled config flags
* @mac_filter_list: List of MAC filters
+ * @num_fsteer_fltrs: number of flow steering filters
+ * @flow_steer_list: list of flow steering filters
*
* Used to restore configuration after a reset as the vport will get wiped.
*/
struct idpf_vport_user_config_data {
struct idpf_rss_data rss_data;
+ struct idpf_q_coalesce *q_coalesce;
u16 num_req_tx_qs;
u16 num_req_rx_qs;
u32 num_req_txq_desc;
u32 num_req_rxq_desc;
+ struct bpf_prog *xdp_prog;
DECLARE_BITMAP(user_flags, __IDPF_USER_FLAGS_NBITS);
struct list_head mac_filter_list;
+ u32 num_fsteer_fltrs;
+ struct list_head flow_steer_list;
};
/**
@@ -478,6 +570,13 @@ struct idpf_vport_config {
struct idpf_vc_xn_manager;
+#define idpf_for_each_vport(adapter, iter) \
+ for (struct idpf_vport **__##iter = &(adapter)->vports[0], \
+ *iter = (adapter)->max_vports ? *__##iter : NULL; \
+ iter; \
+ iter = (++__##iter) < &(adapter)->vports[(adapter)->max_vports] ? \
+ *__##iter : NULL)
+
/**
* struct idpf_adapter - Device data struct generated on probe
* @pdev: PCI device struct given on probe
@@ -489,10 +588,11 @@ struct idpf_vc_xn_manager;
* @flags: See enum idpf_flags
* @reset_reg: See struct idpf_reset_reg
* @hw: Device access data
- * @num_req_msix: Requested number of MSIX vectors
* @num_avail_msix: Available number of MSIX vectors
* @num_msix_entries: Number of entries in MSIX table
* @msix_entries: MSIX table
+ * @num_rdma_msix_entries: Available number of MSIX vectors for RDMA
+ * @rdma_msix_entries: RDMA MSIX table
* @req_vec_chunks: Requested vector chunk data
* @mb_vector: Mailbox vector data
* @vector_stack: Stack to store the msix vector indexes
@@ -521,6 +621,7 @@ struct idpf_vc_xn_manager;
* @caps: Negotiated capabilities with device
* @vcxn_mngr: Virtchnl transaction manager
* @dev_ops: See idpf_dev_ops
+ * @cdev_info: IDC core device info pointer
* @num_vfs: Number of allocated VFs through sysfs. PF does not directly talk
* to VFs but is used to initialize them
* @crc_enable: Enable CRC insertion offload
@@ -530,6 +631,7 @@ struct idpf_vc_xn_manager;
* @vector_lock: Lock to protect vector distribution
* @queue_lock: Lock to protect queue distribution
* @vc_buf_lock: Lock to protect virtchnl buffer
+ * @ptp: Storage for PTP-related data
*/
struct idpf_adapter {
struct pci_dev *pdev;
@@ -542,10 +644,11 @@ struct idpf_adapter {
DECLARE_BITMAP(flags, IDPF_FLAGS_NBITS);
struct idpf_reset_reg reset_reg;
struct idpf_hw hw;
- u16 num_req_msix;
u16 num_avail_msix;
u16 num_msix_entries;
struct msix_entry *msix_entries;
+ u16 num_rdma_msix_entries;
+ struct msix_entry *rdma_msix_entries;
struct virtchnl2_alloc_vectors *req_vec_chunks;
struct idpf_q_vector mb_vector;
struct idpf_vector_lifo vector_stack;
@@ -578,6 +681,7 @@ struct idpf_adapter {
struct idpf_vc_xn_manager *vcxn_mngr;
struct idpf_dev_ops dev_ops;
+ struct iidc_rdma_core_dev_info *cdev_info;
int num_vfs;
bool crc_enable;
bool req_tx_splitq;
@@ -587,6 +691,8 @@ struct idpf_adapter {
struct mutex vector_lock;
struct mutex queue_lock;
struct mutex vc_buf_lock;
+
+ struct idpf_ptp *ptp;
};
/**
@@ -601,6 +707,11 @@ static inline int idpf_is_queue_model_split(u16 q_model)
q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT;
}
+static inline bool idpf_xdp_enabled(const struct idpf_vport *vport)
+{
+ return vport->adapter && vport->xdp_prog;
+}
+
#define idpf_is_cap_ena(adapter, field, flag) \
idpf_is_capability_ena(adapter, false, field, flag)
#define idpf_is_cap_ena_all(adapter, field, flag) \
@@ -609,17 +720,26 @@ static inline int idpf_is_queue_model_split(u16 q_model)
bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
enum idpf_cap_field field, u64 flag);
+/**
+ * idpf_is_rdma_cap_ena - Determine if RDMA is supported
+ * @adapter: private data struct
+ *
+ * Return: true if RDMA capability is enabled, false otherwise
+ */
+static inline bool idpf_is_rdma_cap_ena(struct idpf_adapter *adapter)
+{
+ return idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_RDMA);
+}
+
#define IDPF_CAP_RSS (\
- VIRTCHNL2_CAP_RSS_IPV4_TCP |\
- VIRTCHNL2_CAP_RSS_IPV4_TCP |\
- VIRTCHNL2_CAP_RSS_IPV4_UDP |\
- VIRTCHNL2_CAP_RSS_IPV4_SCTP |\
- VIRTCHNL2_CAP_RSS_IPV4_OTHER |\
- VIRTCHNL2_CAP_RSS_IPV6_TCP |\
- VIRTCHNL2_CAP_RSS_IPV6_TCP |\
- VIRTCHNL2_CAP_RSS_IPV6_UDP |\
- VIRTCHNL2_CAP_RSS_IPV6_SCTP |\
- VIRTCHNL2_CAP_RSS_IPV6_OTHER)
+ VIRTCHNL2_FLOW_IPV4_TCP |\
+ VIRTCHNL2_FLOW_IPV4_UDP |\
+ VIRTCHNL2_FLOW_IPV4_SCTP |\
+ VIRTCHNL2_FLOW_IPV4_OTHER |\
+ VIRTCHNL2_FLOW_IPV6_TCP |\
+ VIRTCHNL2_FLOW_IPV6_UDP |\
+ VIRTCHNL2_FLOW_IPV6_SCTP |\
+ VIRTCHNL2_FLOW_IPV6_OTHER)
#define IDPF_CAP_RSC (\
VIRTCHNL2_CAP_RSC_IPV4_TCP |\
@@ -629,13 +749,13 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 |\
VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6)
-#define IDPF_CAP_RX_CSUM_L4V4 (\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP |\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP)
+#define IDPF_CAP_TX_CSUM_L4V4 (\
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP |\
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP)
-#define IDPF_CAP_RX_CSUM_L4V6 (\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP)
+#define IDPF_CAP_TX_CSUM_L4V6 (\
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP |\
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP)
#define IDPF_CAP_RX_CSUM (\
VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 |\
@@ -644,11 +764,9 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\
VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP)
-#define IDPF_CAP_SCTP_CSUM (\
+#define IDPF_CAP_TX_SCTP_CSUM (\
VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP |\
- VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP |\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP |\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP)
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP)
#define IDPF_CAP_TUNNEL_TX_CSUM (\
VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL |\
@@ -664,6 +782,17 @@ static inline u16 idpf_get_reserved_vecs(struct idpf_adapter *adapter)
}
/**
+ * idpf_get_reserved_rdma_vecs - Get reserved RDMA vectors
+ * @adapter: private data struct
+ *
+ * Return: number of vectors reserved for RDMA
+ */
+static inline u16 idpf_get_reserved_rdma_vecs(struct idpf_adapter *adapter)
+{
+ return le16_to_cpu(adapter->caps.num_rdma_allocated_vectors);
+}
+
+/**
* idpf_get_default_vports - Get default number of vports
* @adapter: private data struct
*/
@@ -702,6 +831,34 @@ static inline u8 idpf_get_min_tx_pkt_len(struct idpf_adapter *adapter)
}
/**
+ * idpf_get_mbx_reg_addr - Get BAR0 mailbox register address
+ * @adapter: private data struct
+ * @reg_offset: register offset value
+ *
+ * Return: BAR0 mailbox register address based on register offset.
+ */
+static inline void __iomem *idpf_get_mbx_reg_addr(struct idpf_adapter *adapter,
+ resource_size_t reg_offset)
+{
+ return adapter->hw.mbx.vaddr + reg_offset;
+}
+
+/**
+ * idpf_get_rstat_reg_addr - Get BAR0 rstat register address
+ * @adapter: private data struct
+ * @reg_offset: register offset value
+ *
+ * Return: BAR0 rstat register address based on register offset.
+ */
+static inline void __iomem *idpf_get_rstat_reg_addr(struct idpf_adapter *adapter,
+ resource_size_t reg_offset)
+{
+ reg_offset -= adapter->dev_ops.static_reg_info[1].start;
+
+ return adapter->hw.rstat.vaddr + reg_offset;
+}
+
+/**
* idpf_get_reg_addr - Get BAR0 register address
* @adapter: private data struct
* @reg_offset: register offset value
@@ -711,7 +868,30 @@ static inline u8 idpf_get_min_tx_pkt_len(struct idpf_adapter *adapter)
static inline void __iomem *idpf_get_reg_addr(struct idpf_adapter *adapter,
resource_size_t reg_offset)
{
- return (void __iomem *)(adapter->hw.hw_addr + reg_offset);
+ struct idpf_hw *hw = &adapter->hw;
+
+ for (int i = 0; i < hw->num_lan_regs; i++) {
+ struct idpf_mmio_reg *region = &hw->lan_regs[i];
+
+ if (reg_offset >= region->addr_start &&
+ reg_offset < (region->addr_start + region->addr_len)) {
+ /* Convert the offset so that it is relative to the
+ * start of the region. Then add the base address of
+ * the region to get the final address.
+ */
+ reg_offset -= region->addr_start;
+
+ return region->vaddr + reg_offset;
+ }
+ }
+
+ /* It's impossible to hit this case with offsets from the CP. But if we
+ * do for any other reason, the kernel will panic on that register
+ * access. Might as well do it here to make it clear what's happening.
+ */
+ BUG();
+
+ return NULL;
}
/**
@@ -725,7 +905,7 @@ static inline bool idpf_is_reset_detected(struct idpf_adapter *adapter)
if (!adapter->hw.arq)
return true;
- return !(readl(idpf_get_reg_addr(adapter, adapter->hw.arq->reg.len)) &
+ return !(readl(idpf_get_mbx_reg_addr(adapter, adapter->hw.arq->reg.len)) &
adapter->hw.arq->reg.len_mask);
}
@@ -811,6 +991,13 @@ static inline void idpf_vport_ctrl_unlock(struct net_device *netdev)
mutex_unlock(&np->adapter->vport_ctrl_lock);
}
+static inline bool idpf_vport_ctrl_is_locked(struct net_device *netdev)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+
+ return mutex_is_locked(&np->adapter->vport_ctrl_lock);
+}
+
void idpf_statistics_task(struct work_struct *work);
void idpf_init_task(struct work_struct *work);
void idpf_service_task(struct work_struct *work);
@@ -834,5 +1021,16 @@ int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs);
u8 idpf_vport_get_hsplit(const struct idpf_vport *vport);
bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val);
-
+int idpf_idc_init(struct idpf_adapter *adapter);
+int idpf_idc_init_aux_core_dev(struct idpf_adapter *adapter,
+ enum iidc_function_type ftype);
+void idpf_idc_deinit_core_aux_device(struct iidc_rdma_core_dev_info *cdev_info);
+void idpf_idc_deinit_vport_aux_device(struct iidc_rdma_vport_dev_info *vdev_info);
+void idpf_idc_issue_reset_event(struct iidc_rdma_core_dev_info *cdev_info);
+void idpf_idc_vdev_mtu_event(struct iidc_rdma_vport_dev_info *vdev_info,
+ enum iidc_rdma_event_type event_type);
+
+int idpf_add_del_fsteer_filters(struct idpf_adapter *adapter,
+ struct virtchnl2_flow_rule_add_del *rule,
+ enum virtchnl2_op opcode);
#endif /* !_IDPF_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.c b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
index 4849590a5591..67894eda2d29 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_controlq.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
@@ -36,19 +36,19 @@ static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
{
/* Update tail to post pre-allocated buffers for rx queues */
if (is_rxq)
- wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1));
+ idpf_mbx_wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1));
/* For non-Mailbox control queues only TAIL need to be set */
if (cq->q_id != -1)
return;
/* Clear Head for both send or receive */
- wr32(hw, cq->reg.head, 0);
+ idpf_mbx_wr32(hw, cq->reg.head, 0);
/* set starting point */
- wr32(hw, cq->reg.bal, lower_32_bits(cq->desc_ring.pa));
- wr32(hw, cq->reg.bah, upper_32_bits(cq->desc_ring.pa));
- wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
+ idpf_mbx_wr32(hw, cq->reg.bal, lower_32_bits(cq->desc_ring.pa));
+ idpf_mbx_wr32(hw, cq->reg.bah, upper_32_bits(cq->desc_ring.pa));
+ idpf_mbx_wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
}
/**
@@ -96,7 +96,7 @@ static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
*/
static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
{
- mutex_lock(&cq->cq_lock);
+ spin_lock(&cq->cq_lock);
/* free ring buffers and the ring itself */
idpf_ctlq_dealloc_ring_res(hw, cq);
@@ -104,8 +104,7 @@ static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
/* Set ring_size to 0 to indicate uninitialized queue */
cq->ring_size = 0;
- mutex_unlock(&cq->cq_lock);
- mutex_destroy(&cq->cq_lock);
+ spin_unlock(&cq->cq_lock);
}
/**
@@ -173,7 +172,7 @@ int idpf_ctlq_add(struct idpf_hw *hw,
idpf_ctlq_init_regs(hw, cq, is_rxq);
- mutex_init(&cq->cq_lock);
+ spin_lock_init(&cq->cq_lock);
list_add(&cq->cq_list, &hw->cq_list_head);
@@ -272,7 +271,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
int err = 0;
int i;
- mutex_lock(&cq->cq_lock);
+ spin_lock(&cq->cq_lock);
/* Ensure there are enough descriptors to send all messages */
num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
@@ -329,10 +328,10 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
*/
dma_wmb();
- wr32(hw, cq->reg.tail, cq->next_to_use);
+ idpf_mbx_wr32(hw, cq->reg.tail, cq->next_to_use);
err_unlock:
- mutex_unlock(&cq->cq_lock);
+ spin_unlock(&cq->cq_lock);
return err;
}
@@ -364,7 +363,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
if (*clean_count > cq->ring_size)
return -EBADR;
- mutex_lock(&cq->cq_lock);
+ spin_lock(&cq->cq_lock);
ntc = cq->next_to_clean;
@@ -376,6 +375,9 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
if (!(le16_to_cpu(desc->flags) & IDPF_CTLQ_FLAG_DD))
break;
+ /* Ensure no other fields are read until DD flag is checked */
+ dma_rmb();
+
/* strip off FW internal code */
desc_err = le16_to_cpu(desc->ret_val) & 0xff;
@@ -394,7 +396,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
cq->next_to_clean = ntc;
- mutex_unlock(&cq->cq_lock);
+ spin_unlock(&cq->cq_lock);
/* Return number of descriptors actually cleaned */
*clean_count = i;
@@ -432,7 +434,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
if (*buff_count > 0)
buffs_avail = true;
- mutex_lock(&cq->cq_lock);
+ spin_lock(&cq->cq_lock);
if (tbp >= cq->ring_size)
tbp = 0;
@@ -518,10 +520,10 @@ post_buffs_out:
dma_wmb();
- wr32(hw, cq->reg.tail, cq->next_to_post);
+ idpf_mbx_wr32(hw, cq->reg.tail, cq->next_to_post);
}
- mutex_unlock(&cq->cq_lock);
+ spin_unlock(&cq->cq_lock);
/* return the number of buffers that were not posted */
*buff_count = *buff_count - i;
@@ -549,7 +551,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
u16 i;
/* take the lock before we start messing with the ring */
- mutex_lock(&cq->cq_lock);
+ spin_lock(&cq->cq_lock);
ntc = cq->next_to_clean;
@@ -563,6 +565,9 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
if (!(flags & IDPF_CTLQ_FLAG_DD))
break;
+ /* Ensure no other fields are read until DD flag is checked */
+ dma_rmb();
+
q_msg[i].vmvf_type = (flags &
(IDPF_CTLQ_FLAG_FTYPE_VM |
IDPF_CTLQ_FLAG_FTYPE_PF)) >>
@@ -608,7 +613,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
cq->next_to_clean = ntc;
- mutex_unlock(&cq->cq_lock);
+ spin_unlock(&cq->cq_lock);
*num_q_msg = i;
if (*num_q_msg == 0)
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.h b/drivers/net/ethernet/intel/idpf/idpf_controlq.h
index c1aba09e9856..de4ece40c2ff 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_controlq.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.h
@@ -94,12 +94,26 @@ struct idpf_mbxq_desc {
u32 pf_vf_id; /* used by CP when sending to PF */
};
+/* Max number of MMIO regions not including the mailbox and rstat regions in
+ * the fallback case when the whole bar is mapped.
+ */
+#define IDPF_MMIO_MAP_FALLBACK_MAX_REMAINING 3
+
+struct idpf_mmio_reg {
+ void __iomem *vaddr;
+ resource_size_t addr_start;
+ resource_size_t addr_len;
+};
+
/* Define the driver hardware struct to replace other control structs as needed
* Align to ctlq_hw_info
*/
struct idpf_hw {
- void __iomem *hw_addr;
- resource_size_t hw_addr_len;
+ struct idpf_mmio_reg mbx;
+ struct idpf_mmio_reg rstat;
+ /* Array of remaining LAN BAR regions */
+ int num_lan_regs;
+ struct idpf_mmio_reg *lan_regs;
struct idpf_adapter *back;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
index e8e046ef2f0d..3414c5f9a831 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
@@ -99,7 +99,7 @@ struct idpf_ctlq_info {
enum idpf_ctlq_type cq_type;
int q_id;
- struct mutex cq_lock; /* control queue lock */
+ spinlock_t cq_lock; /* control queue lock */
/* used for interrupt processing */
u16 next_to_use;
u16 next_to_clean;
@@ -123,9 +123,12 @@ struct idpf_ctlq_info {
/**
* enum idpf_mbx_opc - PF/VF mailbox commands
* @idpf_mbq_opc_send_msg_to_cp: used by PF or VF to send a message to its CP
+ * @idpf_mbq_opc_send_msg_to_peer_drv: used by PF or VF to send a message to
+ * any peer driver
*/
enum idpf_mbx_opc {
idpf_mbq_opc_send_msg_to_cp = 0x0801,
+ idpf_mbq_opc_send_msg_to_peer_drv = 0x0804,
};
/* API supported for control queue management */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c
index 6c913a703df6..3a04a6bd0d7c 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c
@@ -4,15 +4,19 @@
#include "idpf.h"
#include "idpf_lan_pf_regs.h"
#include "idpf_virtchnl.h"
+#include "idpf_ptp.h"
#define IDPF_PF_ITR_IDX_SPACING 0x4
/**
* idpf_ctlq_reg_init - initialize default mailbox registers
+ * @adapter: adapter structure
* @cq: pointer to the array of create control queues
*/
-static void idpf_ctlq_reg_init(struct idpf_ctlq_create_info *cq)
+static void idpf_ctlq_reg_init(struct idpf_adapter *adapter,
+ struct idpf_ctlq_create_info *cq)
{
+ resource_size_t mbx_start = adapter->dev_ops.static_reg_info[0].start;
int i;
for (i = 0; i < IDPF_NUM_DFLT_MBX_Q; i++) {
@@ -21,22 +25,22 @@ static void idpf_ctlq_reg_init(struct idpf_ctlq_create_info *cq)
switch (ccq->type) {
case IDPF_CTLQ_TYPE_MAILBOX_TX:
/* set head and tail registers in our local struct */
- ccq->reg.head = PF_FW_ATQH;
- ccq->reg.tail = PF_FW_ATQT;
- ccq->reg.len = PF_FW_ATQLEN;
- ccq->reg.bah = PF_FW_ATQBAH;
- ccq->reg.bal = PF_FW_ATQBAL;
+ ccq->reg.head = PF_FW_ATQH - mbx_start;
+ ccq->reg.tail = PF_FW_ATQT - mbx_start;
+ ccq->reg.len = PF_FW_ATQLEN - mbx_start;
+ ccq->reg.bah = PF_FW_ATQBAH - mbx_start;
+ ccq->reg.bal = PF_FW_ATQBAL - mbx_start;
ccq->reg.len_mask = PF_FW_ATQLEN_ATQLEN_M;
ccq->reg.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M;
ccq->reg.head_mask = PF_FW_ATQH_ATQH_M;
break;
case IDPF_CTLQ_TYPE_MAILBOX_RX:
/* set head and tail registers in our local struct */
- ccq->reg.head = PF_FW_ARQH;
- ccq->reg.tail = PF_FW_ARQT;
- ccq->reg.len = PF_FW_ARQLEN;
- ccq->reg.bah = PF_FW_ARQBAH;
- ccq->reg.bal = PF_FW_ARQBAL;
+ ccq->reg.head = PF_FW_ARQH - mbx_start;
+ ccq->reg.tail = PF_FW_ARQT - mbx_start;
+ ccq->reg.len = PF_FW_ARQLEN - mbx_start;
+ ccq->reg.bah = PF_FW_ARQBAH - mbx_start;
+ ccq->reg.bal = PF_FW_ARQBAL - mbx_start;
ccq->reg.len_mask = PF_FW_ARQLEN_ARQLEN_M;
ccq->reg.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M;
ccq->reg.head_mask = PF_FW_ARQH_ARQH_M;
@@ -73,7 +77,7 @@ static int idpf_intr_reg_init(struct idpf_vport *vport)
int num_vecs = vport->num_q_vectors;
struct idpf_vec_regs *reg_vals;
int num_regs, i, err = 0;
- u32 rx_itr, tx_itr;
+ u32 rx_itr, tx_itr, val;
u16 total_vecs;
total_vecs = idpf_get_reserved_vecs(vport->adapter);
@@ -101,6 +105,9 @@ static int idpf_intr_reg_init(struct idpf_vport *vport)
intr->dyn_ctl_itridx_s = PF_GLINT_DYN_CTL_ITR_INDX_S;
intr->dyn_ctl_intrvl_s = PF_GLINT_DYN_CTL_INTERVAL_S;
intr->dyn_ctl_wb_on_itr_m = PF_GLINT_DYN_CTL_WB_ON_ITR_M;
+ intr->dyn_ctl_swint_trig_m = PF_GLINT_DYN_CTL_SWINT_TRIG_M;
+ intr->dyn_ctl_sw_itridx_ena_m =
+ PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing,
IDPF_PF_ITR_IDX_SPACING);
@@ -114,6 +121,15 @@ static int idpf_intr_reg_init(struct idpf_vport *vport)
intr->tx_itr = idpf_get_reg_addr(adapter, tx_itr);
}
+ /* Data vector for NOIRQ queues */
+
+ val = reg_vals[vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC].dyn_ctl_reg;
+ vport->noirq_dyn_ctl = idpf_get_reg_addr(adapter, val);
+
+ val = PF_GLINT_DYN_CTL_WB_ON_ITR_M | PF_GLINT_DYN_CTL_INTENA_MSK_M |
+ FIELD_PREP(PF_GLINT_DYN_CTL_ITR_INDX_M, IDPF_NO_ITR_UPDATE_IDX);
+ vport->noirq_dyn_ctl_ena = val;
+
free_reg_vals:
kfree(reg_vals);
@@ -126,7 +142,7 @@ free_reg_vals:
*/
static void idpf_reset_reg_init(struct idpf_adapter *adapter)
{
- adapter->reset_reg.rstat = idpf_get_reg_addr(adapter, PFGEN_RSTAT);
+ adapter->reset_reg.rstat = idpf_get_rstat_reg_addr(adapter, PFGEN_RSTAT);
adapter->reset_reg.rstat_m = PFGEN_RSTAT_PFR_STATE_M;
}
@@ -140,9 +156,32 @@ static void idpf_trigger_reset(struct idpf_adapter *adapter,
{
u32 reset_reg;
- reset_reg = readl(idpf_get_reg_addr(adapter, PFGEN_CTRL));
+ reset_reg = readl(idpf_get_rstat_reg_addr(adapter, PFGEN_CTRL));
writel(reset_reg | PFGEN_CTRL_PFSWR,
- idpf_get_reg_addr(adapter, PFGEN_CTRL));
+ idpf_get_rstat_reg_addr(adapter, PFGEN_CTRL));
+}
+
+/**
+ * idpf_ptp_reg_init - Initialize required registers
+ * @adapter: Driver specific private structure
+ *
+ * Set the bits required for enabling shtime and cmd execution
+ */
+static void idpf_ptp_reg_init(const struct idpf_adapter *adapter)
+{
+ adapter->ptp->cmd.shtime_enable_mask = PF_GLTSYN_CMD_SYNC_SHTIME_EN_M;
+ adapter->ptp->cmd.exec_cmd_mask = PF_GLTSYN_CMD_SYNC_EXEC_CMD_M;
+}
+
+/**
+ * idpf_idc_register - register for IDC callbacks
+ * @adapter: Driver specific private structure
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_idc_register(struct idpf_adapter *adapter)
+{
+ return idpf_idc_init_aux_core_dev(adapter, IIDC_FUNCTION_TYPE_PF);
}
/**
@@ -156,6 +195,7 @@ static void idpf_reg_ops_init(struct idpf_adapter *adapter)
adapter->dev_ops.reg_ops.mb_intr_reg_init = idpf_mb_intr_reg_init;
adapter->dev_ops.reg_ops.reset_reg_init = idpf_reset_reg_init;
adapter->dev_ops.reg_ops.trigger_reset = idpf_trigger_reset;
+ adapter->dev_ops.reg_ops.ptp_reg_init = idpf_ptp_reg_init;
}
/**
@@ -165,4 +205,11 @@ static void idpf_reg_ops_init(struct idpf_adapter *adapter)
void idpf_dev_ops_init(struct idpf_adapter *adapter)
{
idpf_reg_ops_init(adapter);
+
+ adapter->dev_ops.idc_init = idpf_idc_register;
+
+ resource_set_range(&adapter->dev_ops.static_reg_info[0],
+ PF_FW_BASE, IDPF_PF_MBX_REGION_SZ);
+ resource_set_range(&adapter->dev_ops.static_reg_info[1],
+ PFGEN_RTRIG, IDPF_PF_RSTAT_REGION_SZ);
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index 3806ddd3ce4a..2589e124e41c 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -2,6 +2,27 @@
/* Copyright (C) 2023 Intel Corporation */
#include "idpf.h"
+#include "idpf_ptp.h"
+#include "idpf_virtchnl.h"
+
+/**
+ * idpf_get_rx_ring_count - get RX ring count
+ * @netdev: network interface device structure
+ *
+ * Return: number of RX rings.
+ */
+static u32 idpf_get_rx_ring_count(struct net_device *netdev)
+{
+ struct idpf_vport *vport;
+ u32 num_rxq;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+ num_rxq = vport->num_rxq;
+ idpf_vport_ctrl_unlock(netdev);
+
+ return num_rxq;
+}
/**
* idpf_get_rxnfc - command to get RX flow classification rules
@@ -12,26 +33,309 @@
* Returns Success if the command is supported.
*/
static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
- u32 __always_unused *rule_locs)
+ u32 *rule_locs)
{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_user_config_data *user_config;
+ struct idpf_fsteer_fltr *f;
struct idpf_vport *vport;
+ unsigned int cnt = 0;
+ int err = 0;
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
+ user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = vport->num_rxq;
- idpf_vport_ctrl_unlock(netdev);
-
- return 0;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = user_config->num_fsteer_fltrs;
+ cmd->data = idpf_fsteer_max_rules(vport);
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = -EINVAL;
+ list_for_each_entry(f, &user_config->flow_steer_list, list)
+ if (f->loc == cmd->fs.location) {
+ cmd->fs.ring_cookie = f->q_index;
+ err = 0;
+ break;
+ }
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ cmd->data = idpf_fsteer_max_rules(vport);
+ list_for_each_entry(f, &user_config->flow_steer_list, list) {
+ if (cnt == cmd->rule_cnt) {
+ err = -EMSGSIZE;
+ break;
+ }
+ rule_locs[cnt] = f->loc;
+ cnt++;
+ }
+ if (!err)
+ cmd->rule_cnt = user_config->num_fsteer_fltrs;
+ break;
default:
break;
}
idpf_vport_ctrl_unlock(netdev);
- return -EOPNOTSUPP;
+ return err;
+}
+
+static void idpf_fsteer_fill_ipv4(struct virtchnl2_proto_hdrs *hdrs,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ struct iphdr *iph;
+
+ hdrs->proto_hdr[0].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_IPV4);
+
+ iph = (struct iphdr *)hdrs->proto_hdr[0].buffer_spec;
+ iph->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
+ iph->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
+
+ iph = (struct iphdr *)hdrs->proto_hdr[0].buffer_mask;
+ iph->saddr = fsp->m_u.tcp_ip4_spec.ip4src;
+ iph->daddr = fsp->m_u.tcp_ip4_spec.ip4dst;
+}
+
+static void idpf_fsteer_fill_udp(struct virtchnl2_proto_hdrs *hdrs,
+ struct ethtool_rx_flow_spec *fsp,
+ bool v4)
+{
+ struct udphdr *udph, *udpm;
+
+ hdrs->proto_hdr[1].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_UDP);
+
+ udph = (struct udphdr *)hdrs->proto_hdr[1].buffer_spec;
+ udpm = (struct udphdr *)hdrs->proto_hdr[1].buffer_mask;
+
+ if (v4) {
+ udph->source = fsp->h_u.udp_ip4_spec.psrc;
+ udph->dest = fsp->h_u.udp_ip4_spec.pdst;
+ udpm->source = fsp->m_u.udp_ip4_spec.psrc;
+ udpm->dest = fsp->m_u.udp_ip4_spec.pdst;
+ } else {
+ udph->source = fsp->h_u.udp_ip6_spec.psrc;
+ udph->dest = fsp->h_u.udp_ip6_spec.pdst;
+ udpm->source = fsp->m_u.udp_ip6_spec.psrc;
+ udpm->dest = fsp->m_u.udp_ip6_spec.pdst;
+ }
+}
+
+static void idpf_fsteer_fill_tcp(struct virtchnl2_proto_hdrs *hdrs,
+ struct ethtool_rx_flow_spec *fsp,
+ bool v4)
+{
+ struct tcphdr *tcph, *tcpm;
+
+ hdrs->proto_hdr[1].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_TCP);
+
+ tcph = (struct tcphdr *)hdrs->proto_hdr[1].buffer_spec;
+ tcpm = (struct tcphdr *)hdrs->proto_hdr[1].buffer_mask;
+
+ if (v4) {
+ tcph->source = fsp->h_u.tcp_ip4_spec.psrc;
+ tcph->dest = fsp->h_u.tcp_ip4_spec.pdst;
+ tcpm->source = fsp->m_u.tcp_ip4_spec.psrc;
+ tcpm->dest = fsp->m_u.tcp_ip4_spec.pdst;
+ } else {
+ tcph->source = fsp->h_u.tcp_ip6_spec.psrc;
+ tcph->dest = fsp->h_u.tcp_ip6_spec.pdst;
+ tcpm->source = fsp->m_u.tcp_ip6_spec.psrc;
+ tcpm->dest = fsp->m_u.tcp_ip6_spec.pdst;
+ }
+}
+
+/**
+ * idpf_add_flow_steer - add a Flow Steering filter
+ * @netdev: network interface device structure
+ * @cmd: command to add Flow Steering filter
+ *
+ * Return: 0 on success and negative values for failure
+ */
+static int idpf_add_flow_steer(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct idpf_fsteer_fltr *fltr, *parent = NULL, *f;
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_user_config_data *user_config;
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct virtchnl2_flow_rule_add_del *rule;
+ struct idpf_vport_config *vport_config;
+ struct virtchnl2_rule_action_set *acts;
+ struct virtchnl2_flow_rule_info *info;
+ struct virtchnl2_proto_hdrs *hdrs;
+ struct idpf_vport *vport;
+ u32 flow_type, q_index;
+ u16 num_rxq;
+ int err;
+
+ vport = idpf_netdev_to_vport(netdev);
+ vport_config = vport->adapter->vport_config[np->vport_idx];
+ user_config = &vport_config->user_config;
+ num_rxq = user_config->num_req_rx_qs;
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+ if (flow_type != fsp->flow_type)
+ return -EINVAL;
+
+ if (!idpf_sideband_action_ena(vport, fsp) ||
+ !idpf_sideband_flow_type_ena(vport, flow_type))
+ return -EOPNOTSUPP;
+
+ if (user_config->num_fsteer_fltrs > idpf_fsteer_max_rules(vport))
+ return -ENOSPC;
+
+ q_index = fsp->ring_cookie;
+ if (q_index >= num_rxq)
+ return -EINVAL;
+
+ rule = kzalloc(struct_size(rule, rule_info, 1), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ rule->vport_id = cpu_to_le32(vport->vport_id);
+ rule->count = cpu_to_le32(1);
+ info = &rule->rule_info[0];
+ info->rule_id = cpu_to_le32(fsp->location);
+
+ hdrs = &info->rule_cfg.proto_hdrs;
+ hdrs->tunnel_level = 0;
+ hdrs->count = cpu_to_le32(2);
+
+ acts = &info->rule_cfg.action_set;
+ acts->count = cpu_to_le32(1);
+ acts->actions[0].action_type = cpu_to_le32(VIRTCHNL2_ACTION_QUEUE);
+ acts->actions[0].act_conf.q_id = cpu_to_le32(q_index);
+
+ switch (flow_type) {
+ case UDP_V4_FLOW:
+ idpf_fsteer_fill_ipv4(hdrs, fsp);
+ idpf_fsteer_fill_udp(hdrs, fsp, true);
+ break;
+ case TCP_V4_FLOW:
+ idpf_fsteer_fill_ipv4(hdrs, fsp);
+ idpf_fsteer_fill_tcp(hdrs, fsp, true);
+ break;
+ default:
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = idpf_add_del_fsteer_filters(vport->adapter, rule,
+ VIRTCHNL2_OP_ADD_FLOW_RULE);
+ if (err)
+ goto out;
+
+ if (info->status != cpu_to_le32(VIRTCHNL2_FLOW_RULE_SUCCESS)) {
+ err = -EIO;
+ goto out;
+ }
+
+ fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
+ if (!fltr) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ fltr->loc = fsp->location;
+ fltr->q_index = q_index;
+ list_for_each_entry(f, &user_config->flow_steer_list, list) {
+ if (f->loc >= fltr->loc)
+ break;
+ parent = f;
+ }
+
+ parent ? list_add(&fltr->list, &parent->list) :
+ list_add(&fltr->list, &user_config->flow_steer_list);
+
+ user_config->num_fsteer_fltrs++;
+
+out:
+ kfree(rule);
+ return err;
+}
+
+/**
+ * idpf_del_flow_steer - delete a Flow Steering filter
+ * @netdev: network interface device structure
+ * @cmd: command to add Flow Steering filter
+ *
+ * Return: 0 on success and negative values for failure
+ */
+static int idpf_del_flow_steer(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_user_config_data *user_config;
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct virtchnl2_flow_rule_add_del *rule;
+ struct idpf_vport_config *vport_config;
+ struct virtchnl2_flow_rule_info *info;
+ struct idpf_fsteer_fltr *f, *iter;
+ struct idpf_vport *vport;
+ int err;
+
+ vport = idpf_netdev_to_vport(netdev);
+ vport_config = vport->adapter->vport_config[np->vport_idx];
+ user_config = &vport_config->user_config;
+
+ if (!idpf_sideband_action_ena(vport, fsp))
+ return -EOPNOTSUPP;
+
+ rule = kzalloc(struct_size(rule, rule_info, 1), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ rule->vport_id = cpu_to_le32(vport->vport_id);
+ rule->count = cpu_to_le32(1);
+ info = &rule->rule_info[0];
+ info->rule_id = cpu_to_le32(fsp->location);
+
+ err = idpf_add_del_fsteer_filters(vport->adapter, rule,
+ VIRTCHNL2_OP_DEL_FLOW_RULE);
+ if (err)
+ goto out;
+
+ if (info->status != cpu_to_le32(VIRTCHNL2_FLOW_RULE_SUCCESS)) {
+ err = -EIO;
+ goto out;
+ }
+
+ list_for_each_entry_safe(f, iter,
+ &user_config->flow_steer_list, list) {
+ if (f->loc == fsp->location) {
+ list_del(&f->list);
+ kfree(f);
+ user_config->num_fsteer_fltrs--;
+ goto out;
+ }
+ }
+ err = -EINVAL;
+
+out:
+ kfree(rule);
+ return err;
+}
+
+static int idpf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ int ret = -EOPNOTSUPP;
+
+ idpf_vport_ctrl_lock(netdev);
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ ret = idpf_add_flow_steer(netdev, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = idpf_del_flow_steer(netdev, cmd);
+ break;
+ default:
+ break;
+ }
+
+ idpf_vport_ctrl_unlock(netdev);
+ return ret;
}
/**
@@ -46,7 +350,7 @@ static u32 idpf_get_rxfh_key_size(struct net_device *netdev)
struct idpf_vport_user_config_data *user_config;
if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
- return -EOPNOTSUPP;
+ return 0;
user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
@@ -65,7 +369,7 @@ static u32 idpf_get_rxfh_indir_size(struct net_device *netdev)
struct idpf_vport_user_config_data *user_config;
if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
- return -EOPNOTSUPP;
+ return 0;
user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
@@ -98,7 +402,7 @@ static int idpf_get_rxfh(struct net_device *netdev,
}
rss_data = &adapter->vport_config[np->vport_idx]->user_config.rss_data;
- if (np->state != __IDPF_VPORT_UP)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
goto unlock_mutex;
rxfh->hfunc = ETH_RSS_HASH_TOP;
@@ -148,7 +452,7 @@ static int idpf_set_rxfh(struct net_device *netdev,
}
rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
- if (np->state != __IDPF_VPORT_UP)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
goto unlock_mutex;
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
@@ -879,7 +1183,7 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- if (np->state != __IDPF_VPORT_UP) {
+ if (!test_bit(IDPF_VPORT_UP, np->state)) {
idpf_vport_ctrl_unlock(netdev);
return;
@@ -957,8 +1261,8 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
*
* returns pointer to rx vector
*/
-static struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
- int q_num)
+struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
+ u32 q_num)
{
int q_grp, q_idx;
@@ -978,8 +1282,8 @@ static struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
*
* returns pointer to tx vector
*/
-static struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
- int q_num)
+struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
+ u32 q_num)
{
int q_grp;
@@ -1031,7 +1335,7 @@ static int idpf_get_q_coalesce(struct net_device *netdev,
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- if (np->state != __IDPF_VPORT_UP)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
goto unlock_mutex;
if (q_num >= vport->num_rxq && q_num >= vport->num_txq) {
@@ -1089,12 +1393,14 @@ static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
/**
* __idpf_set_q_coalesce - set ITR values for specific queue
* @ec: ethtool structure from user to update ITR settings
+ * @q_coal: per queue coalesce settings
* @qv: queue vector for which itr values has to be set
* @is_rxq: is queue type rx
*
* Returns 0 on success, negative otherwise.
*/
static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
+ struct idpf_q_coalesce *q_coal,
struct idpf_q_vector *qv, bool is_rxq)
{
u32 use_adaptive_coalesce, coalesce_usecs;
@@ -1138,20 +1444,25 @@ static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
if (is_rxq) {
qv->rx_itr_value = coalesce_usecs;
+ q_coal->rx_coalesce_usecs = coalesce_usecs;
if (use_adaptive_coalesce) {
qv->rx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_coal->rx_intr_mode = IDPF_ITR_DYNAMIC;
} else {
qv->rx_intr_mode = !IDPF_ITR_DYNAMIC;
- idpf_vport_intr_write_itr(qv, qv->rx_itr_value,
- false);
+ q_coal->rx_intr_mode = !IDPF_ITR_DYNAMIC;
+ idpf_vport_intr_write_itr(qv, coalesce_usecs, false);
}
} else {
qv->tx_itr_value = coalesce_usecs;
+ q_coal->tx_coalesce_usecs = coalesce_usecs;
if (use_adaptive_coalesce) {
qv->tx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_coal->tx_intr_mode = IDPF_ITR_DYNAMIC;
} else {
qv->tx_intr_mode = !IDPF_ITR_DYNAMIC;
- idpf_vport_intr_write_itr(qv, qv->tx_itr_value, true);
+ q_coal->tx_intr_mode = !IDPF_ITR_DYNAMIC;
+ idpf_vport_intr_write_itr(qv, coalesce_usecs, true);
}
}
@@ -1164,6 +1475,7 @@ static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
/**
* idpf_set_q_coalesce - set ITR values for specific queue
* @vport: vport associated to the queue that need updating
+ * @q_coal: per queue coalesce settings
* @ec: coalesce settings to program the device with
* @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
* @is_rxq: is queue type rx
@@ -1171,6 +1483,7 @@ static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
* Return 0 on success, and negative on failure
*/
static int idpf_set_q_coalesce(const struct idpf_vport *vport,
+ struct idpf_q_coalesce *q_coal,
const struct ethtool_coalesce *ec,
int q_num, bool is_rxq)
{
@@ -1179,7 +1492,7 @@ static int idpf_set_q_coalesce(const struct idpf_vport *vport,
qv = is_rxq ? idpf_find_rxq_vec(vport, q_num) :
idpf_find_txq_vec(vport, q_num);
- if (qv && __idpf_set_q_coalesce(ec, qv, is_rxq))
+ if (qv && __idpf_set_q_coalesce(ec, q_coal, qv, is_rxq))
return -EINVAL;
return 0;
@@ -1200,23 +1513,29 @@ static int idpf_set_coalesce(struct net_device *netdev,
struct netlink_ext_ack *extack)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_user_config_data *user_config;
+ struct idpf_q_coalesce *q_coal;
struct idpf_vport *vport;
int i, err = 0;
+ user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
+
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- if (np->state != __IDPF_VPORT_UP)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
goto unlock_mutex;
for (i = 0; i < vport->num_txq; i++) {
- err = idpf_set_q_coalesce(vport, ec, i, false);
+ q_coal = &user_config->q_coalesce[i];
+ err = idpf_set_q_coalesce(vport, q_coal, ec, i, false);
if (err)
goto unlock_mutex;
}
for (i = 0; i < vport->num_rxq; i++) {
- err = idpf_set_q_coalesce(vport, ec, i, true);
+ q_coal = &user_config->q_coalesce[i];
+ err = idpf_set_q_coalesce(vport, q_coal, ec, i, true);
if (err)
goto unlock_mutex;
}
@@ -1238,20 +1557,25 @@ unlock_mutex:
static int idpf_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
struct ethtool_coalesce *ec)
{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_user_config_data *user_config;
+ struct idpf_q_coalesce *q_coal;
struct idpf_vport *vport;
int err;
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
+ user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
+ q_coal = &user_config->q_coalesce[q_num];
- err = idpf_set_q_coalesce(vport, ec, q_num, false);
+ err = idpf_set_q_coalesce(vport, q_coal, ec, q_num, false);
if (err) {
idpf_vport_ctrl_unlock(netdev);
return err;
}
- err = idpf_set_q_coalesce(vport, ec, q_num, true);
+ err = idpf_set_q_coalesce(vport, q_coal, ec, q_num, true);
idpf_vport_ctrl_unlock(netdev);
@@ -1296,27 +1620,142 @@ static void idpf_set_msglevel(struct net_device *netdev, u32 data)
static int idpf_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
- struct idpf_vport *vport;
-
- idpf_vport_ctrl_lock(netdev);
- vport = idpf_netdev_to_vport(netdev);
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
ethtool_link_ksettings_zero_link_mode(cmd, supported);
cmd->base.autoneg = AUTONEG_DISABLE;
cmd->base.port = PORT_NONE;
- if (vport->link_up) {
+ if (netif_carrier_ok(netdev)) {
cmd->base.duplex = DUPLEX_FULL;
- cmd->base.speed = vport->link_speed_mbps;
+ cmd->base.speed = np->link_speed_mbps;
} else {
cmd->base.duplex = DUPLEX_UNKNOWN;
cmd->base.speed = SPEED_UNKNOWN;
}
- idpf_vport_ctrl_unlock(netdev);
-
return 0;
}
+/**
+ * idpf_get_timestamp_filters - Get the supported timestamping mode
+ * @vport: Virtual port structure
+ * @info: ethtool timestamping info structure
+ *
+ * Get the Tx/Rx timestamp filters.
+ */
+static void idpf_get_timestamp_filters(const struct idpf_vport *vport,
+ struct kernel_ethtool_ts_info *info)
+{
+ info->so_timestamping = SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+
+ if (!vport->tx_tstamp_caps ||
+ vport->adapter->ptp->tx_tstamp_access == IDPF_PTP_NONE)
+ return;
+
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE;
+
+ info->tx_types |= BIT(HWTSTAMP_TX_ON);
+}
+
+/**
+ * idpf_get_ts_info - Get device PHC association
+ * @netdev: network interface device structure
+ * @info: ethtool timestamping info structure
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_get_ts_info(struct net_device *netdev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport *vport;
+ int err = 0;
+
+ if (!mutex_trylock(&np->adapter->vport_ctrl_lock))
+ return -EBUSY;
+
+ vport = idpf_netdev_to_vport(netdev);
+
+ if (!vport->adapter->ptp) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ if (idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PTP) &&
+ vport->adapter->ptp->clock) {
+ info->phc_index = ptp_clock_index(vport->adapter->ptp->clock);
+ idpf_get_timestamp_filters(vport, info);
+ } else {
+ pci_dbg(vport->adapter->pdev, "PTP clock not detected\n");
+ err = ethtool_op_get_ts_info(netdev, info);
+ }
+
+unlock:
+ mutex_unlock(&np->adapter->vport_ctrl_lock);
+
+ return err;
+}
+
+/**
+ * idpf_get_ts_stats - Collect HW tstamping statistics
+ * @netdev: network interface device structure
+ * @ts_stats: HW timestamping stats structure
+ *
+ * Collect HW timestamping statistics including successfully timestamped
+ * packets, discarded due to illegal values, flushed during releasing PTP and
+ * skipped due to lack of the free index.
+ */
+static void idpf_get_ts_stats(struct net_device *netdev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport *vport;
+ unsigned int start;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+ do {
+ start = u64_stats_fetch_begin(&vport->tstamp_stats.stats_sync);
+ ts_stats->pkts = u64_stats_read(&vport->tstamp_stats.packets);
+ ts_stats->lost = u64_stats_read(&vport->tstamp_stats.flushed);
+ ts_stats->err = u64_stats_read(&vport->tstamp_stats.discarded);
+ } while (u64_stats_fetch_retry(&vport->tstamp_stats.stats_sync, start));
+
+ if (!test_bit(IDPF_VPORT_UP, np->state))
+ goto exit;
+
+ for (u16 i = 0; i < vport->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+
+ for (u16 j = 0; j < txq_grp->num_txq; j++) {
+ struct idpf_tx_queue *txq = txq_grp->txqs[j];
+ struct idpf_tx_queue_stats *stats;
+ u64 ts;
+
+ if (!txq)
+ continue;
+
+ stats = &txq->q_stats;
+ do {
+ start = u64_stats_fetch_begin(&txq->stats_sync);
+
+ ts = u64_stats_read(&stats->tstamp_skipped);
+ } while (u64_stats_fetch_retry(&txq->stats_sync,
+ start));
+
+ ts_stats->lost += ts;
+ }
+ }
+
+exit:
+ idpf_vport_ctrl_unlock(netdev);
+}
+
static const struct ethtool_ops idpf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE,
@@ -1333,6 +1772,8 @@ static const struct ethtool_ops idpf_ethtool_ops = {
.get_sset_count = idpf_get_sset_count,
.get_channels = idpf_get_channels,
.get_rxnfc = idpf_get_rxnfc,
+ .set_rxnfc = idpf_set_rxnfc,
+ .get_rx_ring_count = idpf_get_rx_ring_count,
.get_rxfh_key_size = idpf_get_rxfh_key_size,
.get_rxfh_indir_size = idpf_get_rxfh_indir_size,
.get_rxfh = idpf_get_rxfh,
@@ -1341,6 +1782,8 @@ static const struct ethtool_ops idpf_ethtool_ops = {
.get_ringparam = idpf_get_ringparam,
.set_ringparam = idpf_set_ringparam,
.get_link_ksettings = idpf_get_link_ksettings,
+ .get_ts_info = idpf_get_ts_info,
+ .get_ts_stats = idpf_get_ts_stats,
};
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_idc.c b/drivers/net/ethernet/intel/idpf/idpf_idc.c
new file mode 100644
index 000000000000..7e20a07e98e5
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_idc.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2025 Intel Corporation */
+
+#include <linux/export.h>
+
+#include "idpf.h"
+#include "idpf_virtchnl.h"
+
+static DEFINE_IDA(idpf_idc_ida);
+
+#define IDPF_IDC_MAX_ADEV_NAME_LEN 15
+
+/**
+ * idpf_idc_init - Called to initialize IDC
+ * @adapter: driver private data structure
+ *
+ * Return: 0 on success or cap not enabled, error code on failure.
+ */
+int idpf_idc_init(struct idpf_adapter *adapter)
+{
+ int err;
+
+ if (!idpf_is_rdma_cap_ena(adapter) ||
+ !adapter->dev_ops.idc_init)
+ return 0;
+
+ err = adapter->dev_ops.idc_init(adapter);
+ if (err)
+ dev_err(&adapter->pdev->dev, "failed to initialize idc: %d\n",
+ err);
+
+ return err;
+}
+
+/**
+ * idpf_vport_adev_release - function to be mapped to aux dev's release op
+ * @dev: pointer to device to free
+ */
+static void idpf_vport_adev_release(struct device *dev)
+{
+ struct iidc_rdma_vport_auxiliary_dev *iadev;
+
+ iadev = container_of(dev, struct iidc_rdma_vport_auxiliary_dev, adev.dev);
+ kfree(iadev);
+ iadev = NULL;
+}
+
+/**
+ * idpf_plug_vport_aux_dev - allocate and register a vport Auxiliary device
+ * @cdev_info: IDC core device info pointer
+ * @vdev_info: IDC vport device info pointer
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_plug_vport_aux_dev(struct iidc_rdma_core_dev_info *cdev_info,
+ struct iidc_rdma_vport_dev_info *vdev_info)
+{
+ struct iidc_rdma_vport_auxiliary_dev *iadev;
+ char name[IDPF_IDC_MAX_ADEV_NAME_LEN];
+ struct auxiliary_device *adev;
+ int ret;
+
+ iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
+ if (!iadev)
+ return -ENOMEM;
+
+ adev = &iadev->adev;
+ vdev_info->adev = &iadev->adev;
+ iadev->vdev_info = vdev_info;
+
+ ret = ida_alloc(&idpf_idc_ida, GFP_KERNEL);
+ if (ret < 0) {
+ pr_err("failed to allocate unique device ID for Auxiliary driver\n");
+ goto err_ida_alloc;
+ }
+ adev->id = ret;
+ adev->dev.release = idpf_vport_adev_release;
+ adev->dev.parent = &cdev_info->pdev->dev;
+ sprintf(name, "%04x.rdma.vdev", cdev_info->pdev->vendor);
+ adev->name = name;
+
+ ret = auxiliary_device_init(adev);
+ if (ret)
+ goto err_aux_dev_init;
+
+ ret = auxiliary_device_add(adev);
+ if (ret)
+ goto err_aux_dev_add;
+
+ return 0;
+
+err_aux_dev_add:
+ auxiliary_device_uninit(adev);
+err_aux_dev_init:
+ ida_free(&idpf_idc_ida, adev->id);
+err_ida_alloc:
+ vdev_info->adev = NULL;
+ kfree(iadev);
+
+ return ret;
+}
+
+/**
+ * idpf_idc_init_aux_vport_dev - initialize vport Auxiliary Device(s)
+ * @vport: virtual port data struct
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_idc_init_aux_vport_dev(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct iidc_rdma_vport_dev_info *vdev_info;
+ struct iidc_rdma_core_dev_info *cdev_info;
+ struct virtchnl2_create_vport *vport_msg;
+ int err;
+
+ vport_msg = (struct virtchnl2_create_vport *)
+ adapter->vport_params_recvd[vport->idx];
+
+ if (!(le16_to_cpu(vport_msg->vport_flags) & VIRTCHNL2_VPORT_ENABLE_RDMA))
+ return 0;
+
+ vport->vdev_info = kzalloc(sizeof(*vdev_info), GFP_KERNEL);
+ if (!vport->vdev_info)
+ return -ENOMEM;
+
+ cdev_info = vport->adapter->cdev_info;
+
+ vdev_info = vport->vdev_info;
+ vdev_info->vport_id = vport->vport_id;
+ vdev_info->netdev = vport->netdev;
+ vdev_info->core_adev = cdev_info->adev;
+
+ err = idpf_plug_vport_aux_dev(cdev_info, vdev_info);
+ if (err) {
+ vport->vdev_info = NULL;
+ kfree(vdev_info);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_idc_vdev_mtu_event - Function to handle IDC vport mtu change events
+ * @vdev_info: IDC vport device info pointer
+ * @event_type: type of event to pass to handler
+ */
+void idpf_idc_vdev_mtu_event(struct iidc_rdma_vport_dev_info *vdev_info,
+ enum iidc_rdma_event_type event_type)
+{
+ struct iidc_rdma_vport_auxiliary_drv *iadrv;
+ struct iidc_rdma_event event = { };
+ struct auxiliary_device *adev;
+
+ if (!vdev_info)
+ /* RDMA is not enabled */
+ return;
+
+ set_bit(event_type, event.type);
+
+ device_lock(&vdev_info->adev->dev);
+ adev = vdev_info->adev;
+ if (!adev || !adev->dev.driver)
+ goto unlock;
+ iadrv = container_of(adev->dev.driver,
+ struct iidc_rdma_vport_auxiliary_drv,
+ adrv.driver);
+ if (iadrv->event_handler)
+ iadrv->event_handler(vdev_info, &event);
+unlock:
+ device_unlock(&vdev_info->adev->dev);
+}
+
+/**
+ * idpf_core_adev_release - function to be mapped to aux dev's release op
+ * @dev: pointer to device to free
+ */
+static void idpf_core_adev_release(struct device *dev)
+{
+ struct iidc_rdma_core_auxiliary_dev *iadev;
+
+ iadev = container_of(dev, struct iidc_rdma_core_auxiliary_dev, adev.dev);
+ kfree(iadev);
+ iadev = NULL;
+}
+
+/**
+ * idpf_plug_core_aux_dev - allocate and register an Auxiliary device
+ * @cdev_info: IDC core device info pointer
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_plug_core_aux_dev(struct iidc_rdma_core_dev_info *cdev_info)
+{
+ struct iidc_rdma_core_auxiliary_dev *iadev;
+ char name[IDPF_IDC_MAX_ADEV_NAME_LEN];
+ struct auxiliary_device *adev;
+ int ret;
+
+ iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
+ if (!iadev)
+ return -ENOMEM;
+
+ adev = &iadev->adev;
+ cdev_info->adev = adev;
+ iadev->cdev_info = cdev_info;
+
+ ret = ida_alloc(&idpf_idc_ida, GFP_KERNEL);
+ if (ret < 0) {
+ pr_err("failed to allocate unique device ID for Auxiliary driver\n");
+ goto err_ida_alloc;
+ }
+ adev->id = ret;
+ adev->dev.release = idpf_core_adev_release;
+ adev->dev.parent = &cdev_info->pdev->dev;
+ sprintf(name, "%04x.rdma.core", cdev_info->pdev->vendor);
+ adev->name = name;
+
+ ret = auxiliary_device_init(adev);
+ if (ret)
+ goto err_aux_dev_init;
+
+ ret = auxiliary_device_add(adev);
+ if (ret)
+ goto err_aux_dev_add;
+
+ return 0;
+
+err_aux_dev_add:
+ auxiliary_device_uninit(adev);
+err_aux_dev_init:
+ ida_free(&idpf_idc_ida, adev->id);
+err_ida_alloc:
+ cdev_info->adev = NULL;
+ kfree(iadev);
+
+ return ret;
+}
+
+/**
+ * idpf_unplug_aux_dev - unregister and free an Auxiliary device
+ * @adev: auxiliary device struct
+ */
+static void idpf_unplug_aux_dev(struct auxiliary_device *adev)
+{
+ if (!adev)
+ return;
+
+ ida_free(&idpf_idc_ida, adev->id);
+
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+}
+
+/**
+ * idpf_idc_issue_reset_event - Function to handle reset IDC event
+ * @cdev_info: IDC core device info pointer
+ */
+void idpf_idc_issue_reset_event(struct iidc_rdma_core_dev_info *cdev_info)
+{
+ enum iidc_rdma_event_type event_type = IIDC_RDMA_EVENT_WARN_RESET;
+ struct iidc_rdma_core_auxiliary_drv *iadrv;
+ struct iidc_rdma_event event = { };
+ struct auxiliary_device *adev;
+
+ if (!cdev_info)
+ /* RDMA is not enabled */
+ return;
+
+ set_bit(event_type, event.type);
+
+ device_lock(&cdev_info->adev->dev);
+
+ adev = cdev_info->adev;
+ if (!adev || !adev->dev.driver)
+ goto unlock;
+
+ iadrv = container_of(adev->dev.driver,
+ struct iidc_rdma_core_auxiliary_drv,
+ adrv.driver);
+ if (iadrv->event_handler)
+ iadrv->event_handler(cdev_info, &event);
+unlock:
+ device_unlock(&cdev_info->adev->dev);
+}
+
+/**
+ * idpf_idc_vport_dev_up - called when CORE is ready for vport aux devs
+ * @adapter: private data struct
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_idc_vport_dev_up(struct idpf_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_alloc_vports; i++) {
+ struct idpf_vport *vport = adapter->vports[i];
+
+ if (!vport)
+ continue;
+
+ if (!vport->vdev_info)
+ err = idpf_idc_init_aux_vport_dev(vport);
+ else
+ err = idpf_plug_vport_aux_dev(vport->adapter->cdev_info,
+ vport->vdev_info);
+ }
+
+ return err;
+}
+
+/**
+ * idpf_idc_vport_dev_down - called CORE is leaving vport aux dev support state
+ * @adapter: private data struct
+ */
+static void idpf_idc_vport_dev_down(struct idpf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_alloc_vports; i++) {
+ struct idpf_vport *vport = adapter->vports[i];
+
+ if (!vport)
+ continue;
+
+ idpf_unplug_aux_dev(vport->vdev_info->adev);
+ vport->vdev_info->adev = NULL;
+ }
+}
+
+/**
+ * idpf_idc_vport_dev_ctrl - Called by an Auxiliary Driver
+ * @cdev_info: IDC core device info pointer
+ * @up: RDMA core driver status
+ *
+ * This callback function is accessed by an Auxiliary Driver to indicate
+ * whether core driver is ready to support vport driver load or if vport
+ * drivers need to be taken down.
+ *
+ * Return: 0 on success or error code on failure.
+ */
+int idpf_idc_vport_dev_ctrl(struct iidc_rdma_core_dev_info *cdev_info, bool up)
+{
+ struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev);
+
+ if (up)
+ return idpf_idc_vport_dev_up(adapter);
+
+ idpf_idc_vport_dev_down(adapter);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(idpf_idc_vport_dev_ctrl);
+
+/**
+ * idpf_idc_request_reset - Called by an Auxiliary Driver
+ * @cdev_info: IDC core device info pointer
+ * @reset_type: function, core or other
+ *
+ * This callback function is accessed by an Auxiliary Driver to request a reset
+ * on the Auxiliary Device.
+ *
+ * Return: 0 on success or error code on failure.
+ */
+int idpf_idc_request_reset(struct iidc_rdma_core_dev_info *cdev_info,
+ enum iidc_rdma_reset_type __always_unused reset_type)
+{
+ struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev);
+
+ if (!idpf_is_reset_in_prog(adapter)) {
+ set_bit(IDPF_HR_FUNC_RESET, adapter->flags);
+ queue_delayed_work(adapter->vc_event_wq,
+ &adapter->vc_event_task,
+ msecs_to_jiffies(10));
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(idpf_idc_request_reset);
+
+/**
+ * idpf_idc_init_msix_data - initialize MSIX data for the cdev_info structure
+ * @adapter: driver private data structure
+ */
+static void
+idpf_idc_init_msix_data(struct idpf_adapter *adapter)
+{
+ struct iidc_rdma_core_dev_info *cdev_info;
+ struct iidc_rdma_priv_dev_info *privd;
+
+ if (!adapter->rdma_msix_entries)
+ return;
+
+ cdev_info = adapter->cdev_info;
+ privd = cdev_info->iidc_priv;
+
+ privd->msix_entries = adapter->rdma_msix_entries;
+ privd->msix_count = adapter->num_rdma_msix_entries;
+}
+
+/**
+ * idpf_idc_init_aux_core_dev - initialize Auxiliary Device(s)
+ * @adapter: driver private data structure
+ * @ftype: PF or VF
+ *
+ * Return: 0 on success or error code on failure.
+ */
+int idpf_idc_init_aux_core_dev(struct idpf_adapter *adapter,
+ enum iidc_function_type ftype)
+{
+ struct iidc_rdma_core_dev_info *cdev_info;
+ struct iidc_rdma_priv_dev_info *privd;
+ int err, i;
+
+ adapter->cdev_info = kzalloc(sizeof(*cdev_info), GFP_KERNEL);
+ if (!adapter->cdev_info)
+ return -ENOMEM;
+ cdev_info = adapter->cdev_info;
+
+ privd = kzalloc(sizeof(*privd), GFP_KERNEL);
+ if (!privd) {
+ err = -ENOMEM;
+ goto err_privd_alloc;
+ }
+
+ cdev_info->iidc_priv = privd;
+ cdev_info->pdev = adapter->pdev;
+ cdev_info->rdma_protocol = IIDC_RDMA_PROTOCOL_ROCEV2;
+ privd->ftype = ftype;
+
+ privd->mapped_mem_regions =
+ kcalloc(adapter->hw.num_lan_regs,
+ sizeof(struct iidc_rdma_lan_mapped_mem_region),
+ GFP_KERNEL);
+ if (!privd->mapped_mem_regions) {
+ err = -ENOMEM;
+ goto err_plug_aux_dev;
+ }
+
+ privd->num_memory_regions = cpu_to_le16(adapter->hw.num_lan_regs);
+ for (i = 0; i < adapter->hw.num_lan_regs; i++) {
+ privd->mapped_mem_regions[i].region_addr =
+ adapter->hw.lan_regs[i].vaddr;
+ privd->mapped_mem_regions[i].size =
+ cpu_to_le64(adapter->hw.lan_regs[i].addr_len);
+ privd->mapped_mem_regions[i].start_offset =
+ cpu_to_le64(adapter->hw.lan_regs[i].addr_start);
+ }
+
+ idpf_idc_init_msix_data(adapter);
+
+ err = idpf_plug_core_aux_dev(cdev_info);
+ if (err)
+ goto err_free_mem_regions;
+
+ return 0;
+
+err_free_mem_regions:
+ kfree(privd->mapped_mem_regions);
+ privd->mapped_mem_regions = NULL;
+err_plug_aux_dev:
+ kfree(privd);
+err_privd_alloc:
+ kfree(cdev_info);
+ adapter->cdev_info = NULL;
+
+ return err;
+}
+
+/**
+ * idpf_idc_deinit_core_aux_device - de-initialize Auxiliary Device(s)
+ * @cdev_info: IDC core device info pointer
+ */
+void idpf_idc_deinit_core_aux_device(struct iidc_rdma_core_dev_info *cdev_info)
+{
+ struct iidc_rdma_priv_dev_info *privd;
+
+ if (!cdev_info)
+ return;
+
+ idpf_unplug_aux_dev(cdev_info->adev);
+
+ privd = cdev_info->iidc_priv;
+ kfree(privd->mapped_mem_regions);
+ kfree(privd);
+ kfree(cdev_info);
+}
+
+/**
+ * idpf_idc_deinit_vport_aux_device - de-initialize Auxiliary Device(s)
+ * @vdev_info: IDC vport device info pointer
+ */
+void idpf_idc_deinit_vport_aux_device(struct iidc_rdma_vport_dev_info *vdev_info)
+{
+ if (!vdev_info)
+ return;
+
+ idpf_unplug_aux_dev(vdev_info->adev);
+
+ kfree(vdev_info);
+}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h b/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h
index 24edb8a6ec2e..cc9aa2b6a14a 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h
@@ -53,6 +53,10 @@
#define PF_FW_ATQH_ATQH_M GENMASK(9, 0)
#define PF_FW_ATQT (PF_FW_BASE + 0x24)
+/* Timesync registers */
+#define PF_GLTSYN_CMD_SYNC_EXEC_CMD_M GENMASK(1, 0)
+#define PF_GLTSYN_CMD_SYNC_SHTIME_EN_M BIT(2)
+
/* Interrupts */
#define PF_GLINT_BASE 0x08900000
#define PF_GLINT_DYN_CTL(_INT) (PF_GLINT_BASE + ((_INT) * 0x1000))
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
index 8c7f8ef8f1a1..20d5af64e750 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
@@ -186,13 +186,17 @@ struct idpf_base_tx_desc {
__le64 qw1; /* type_cmd_offset_bsz_l2tag1 */
}; /* read used with buffer queues */
-struct idpf_splitq_tx_compl_desc {
+struct idpf_splitq_4b_tx_compl_desc {
/* qid=[10:0] comptype=[13:11] rsvd=[14] gen=[15] */
__le16 qid_comptype_gen;
union {
__le16 q_head; /* Queue head */
__le16 compl_tag; /* Completion tag */
} q_head_compl_tag;
+}; /* writeback used with completion queues */
+
+struct idpf_splitq_tx_compl_desc {
+ struct idpf_splitq_4b_tx_compl_desc common;
u8 ts[3];
u8 rsvd; /* Reserved */
}; /* writeback used with completion queues */
@@ -282,7 +286,18 @@ struct idpf_flex_tx_tso_ctx_qw {
u8 flex;
};
-struct idpf_flex_tx_ctx_desc {
+union idpf_flex_tx_ctx_desc {
+ /* DTYPE = IDPF_TX_DESC_DTYPE_CTX (0x01) */
+ struct {
+ __le64 qw0;
+#define IDPF_TX_CTX_L2TAG2_M GENMASK_ULL(47, 32)
+ __le64 qw1;
+#define IDPF_TX_CTX_DTYPE_M GENMASK_ULL(3, 0)
+#define IDPF_TX_CTX_CMD_M GENMASK_ULL(15, 4)
+#define IDPF_TX_CTX_TSYN_REG_M GENMASK_ULL(47, 30)
+#define IDPF_TX_CTX_MSS_M GENMASK_ULL(50, 63)
+ } tsyn;
+
/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX (0x05) */
struct {
struct idpf_flex_tx_tso_ctx_qw qw0;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 4f20343e49a9..7a7e101afeb6 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -3,6 +3,9 @@
#include "idpf.h"
#include "idpf_virtchnl.h"
+#include "idpf_ptp.h"
+#include "xdp.h"
+#include "xsk.h"
static const struct net_device_ops idpf_netdev_ops;
@@ -87,6 +90,8 @@ void idpf_intr_rel(struct idpf_adapter *adapter)
idpf_deinit_vector_stack(adapter);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
+ kfree(adapter->rdma_msix_entries);
+ adapter->rdma_msix_entries = NULL;
}
/**
@@ -144,22 +149,6 @@ static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter)
}
/**
- * idpf_set_mb_vec_id - Set vector index for mailbox
- * @adapter: adapter structure to access the vector chunks
- *
- * The first vector id in the requested vector chunks from the CP is for
- * the mailbox
- */
-static void idpf_set_mb_vec_id(struct idpf_adapter *adapter)
-{
- if (adapter->req_vec_chunks)
- adapter->mb_vector.v_idx =
- le16_to_cpu(adapter->caps.mailbox_vector_id);
- else
- adapter->mb_vector.v_idx = 0;
-}
-
-/**
* idpf_mb_intr_init - Initialize the mailbox interrupt
* @adapter: adapter structure to store the mailbox vector
*/
@@ -314,13 +303,33 @@ rel_lock:
*/
int idpf_intr_req(struct idpf_adapter *adapter)
{
+ u16 num_lan_vecs, min_lan_vecs, num_rdma_vecs = 0, min_rdma_vecs = 0;
u16 default_vports = idpf_get_default_vports(adapter);
int num_q_vecs, total_vecs, num_vec_ids;
- int min_vectors, v_actual, err;
+ int min_vectors, actual_vecs, err;
unsigned int vector;
u16 *vecids;
+ int i;
total_vecs = idpf_get_reserved_vecs(adapter);
+ num_lan_vecs = total_vecs;
+ if (idpf_is_rdma_cap_ena(adapter)) {
+ num_rdma_vecs = idpf_get_reserved_rdma_vecs(adapter);
+ min_rdma_vecs = IDPF_MIN_RDMA_VEC;
+
+ if (!num_rdma_vecs) {
+ /* If idpf_get_reserved_rdma_vecs is 0, vectors are
+ * pulled from the LAN pool.
+ */
+ num_rdma_vecs = min_rdma_vecs;
+ } else if (num_rdma_vecs < min_rdma_vecs) {
+ dev_err(&adapter->pdev->dev,
+ "Not enough vectors reserved for RDMA (min: %u, current: %u)\n",
+ min_rdma_vecs, num_rdma_vecs);
+ return -EINVAL;
+ }
+ }
+
num_q_vecs = total_vecs - IDPF_MBX_Q_VEC;
err = idpf_send_alloc_vectors_msg(adapter, num_q_vecs);
@@ -331,52 +340,76 @@ int idpf_intr_req(struct idpf_adapter *adapter)
return -EAGAIN;
}
- min_vectors = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports;
- v_actual = pci_alloc_irq_vectors(adapter->pdev, min_vectors,
- total_vecs, PCI_IRQ_MSIX);
- if (v_actual < min_vectors) {
- dev_err(&adapter->pdev->dev, "Failed to allocate MSIX vectors: %d\n",
- v_actual);
- err = -EAGAIN;
+ min_lan_vecs = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports;
+ min_vectors = min_lan_vecs + min_rdma_vecs;
+ actual_vecs = pci_alloc_irq_vectors(adapter->pdev, min_vectors,
+ total_vecs, PCI_IRQ_MSIX);
+ if (actual_vecs < 0) {
+ dev_err(&adapter->pdev->dev, "Failed to allocate minimum MSIX vectors required: %d\n",
+ min_vectors);
+ err = actual_vecs;
goto send_dealloc_vecs;
}
- adapter->msix_entries = kcalloc(v_actual, sizeof(struct msix_entry),
- GFP_KERNEL);
+ if (idpf_is_rdma_cap_ena(adapter)) {
+ if (actual_vecs < total_vecs) {
+ dev_warn(&adapter->pdev->dev,
+ "Warning: %d vectors requested, only %d available. Defaulting to minimum (%d) for RDMA and remaining for LAN.\n",
+ total_vecs, actual_vecs, IDPF_MIN_RDMA_VEC);
+ num_rdma_vecs = IDPF_MIN_RDMA_VEC;
+ }
+
+ adapter->rdma_msix_entries = kcalloc(num_rdma_vecs,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!adapter->rdma_msix_entries) {
+ err = -ENOMEM;
+ goto free_irq;
+ }
+ }
+ num_lan_vecs = actual_vecs - num_rdma_vecs;
+ adapter->msix_entries = kcalloc(num_lan_vecs, sizeof(struct msix_entry),
+ GFP_KERNEL);
if (!adapter->msix_entries) {
err = -ENOMEM;
- goto free_irq;
+ goto free_rdma_msix;
}
- idpf_set_mb_vec_id(adapter);
+ adapter->mb_vector.v_idx = le16_to_cpu(adapter->caps.mailbox_vector_id);
- vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
+ vecids = kcalloc(actual_vecs, sizeof(u16), GFP_KERNEL);
if (!vecids) {
err = -ENOMEM;
goto free_msix;
}
- num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs,
+ num_vec_ids = idpf_get_vec_ids(adapter, vecids, actual_vecs,
&adapter->req_vec_chunks->vchunks);
- if (num_vec_ids < v_actual) {
+ if (num_vec_ids < actual_vecs) {
err = -EINVAL;
goto free_vecids;
}
- for (vector = 0; vector < v_actual; vector++) {
+ for (vector = 0; vector < num_lan_vecs; vector++) {
adapter->msix_entries[vector].entry = vecids[vector];
adapter->msix_entries[vector].vector =
pci_irq_vector(adapter->pdev, vector);
}
+ for (i = 0; i < num_rdma_vecs; vector++, i++) {
+ adapter->rdma_msix_entries[i].entry = vecids[vector];
+ adapter->rdma_msix_entries[i].vector =
+ pci_irq_vector(adapter->pdev, vector);
+ }
- adapter->num_req_msix = total_vecs;
- adapter->num_msix_entries = v_actual;
/* 'num_avail_msix' is used to distribute excess vectors to the vports
* after considering the minimum vectors required per each default
* vport
*/
- adapter->num_avail_msix = v_actual - min_vectors;
+ adapter->num_avail_msix = num_lan_vecs - min_lan_vecs;
+ adapter->num_msix_entries = num_lan_vecs;
+ if (idpf_is_rdma_cap_ena(adapter))
+ adapter->num_rdma_msix_entries = num_rdma_vecs;
/* Fill MSIX vector lifo stack with vector indexes */
err = idpf_init_vector_stack(adapter);
@@ -398,6 +431,9 @@ free_vecids:
free_msix:
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
+free_rdma_msix:
+ kfree(adapter->rdma_msix_entries);
+ adapter->rdma_msix_entries = NULL;
free_irq:
pci_free_irq_vectors(adapter->pdev);
send_dealloc_vecs:
@@ -483,7 +519,7 @@ static int idpf_del_mac_filter(struct idpf_vport *vport,
}
spin_unlock_bh(&vport_config->mac_filter_list_lock);
- if (np->state == __IDPF_VPORT_UP) {
+ if (test_bit(IDPF_VPORT_UP, np->state)) {
int err;
err = idpf_add_del_mac_filters(vport, np, false, async);
@@ -554,7 +590,7 @@ static int idpf_add_mac_filter(struct idpf_vport *vport,
if (err)
return err;
- if (np->state == __IDPF_VPORT_UP)
+ if (test_bit(IDPF_VPORT_UP, np->state))
err = idpf_add_del_mac_filters(vport, np, true, async);
return err;
@@ -703,8 +739,10 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vport_config *vport_config;
+ netdev_features_t other_offloads = 0;
+ netdev_features_t csum_offloads = 0;
+ netdev_features_t tso_offloads = 0;
netdev_features_t dflt_features;
- netdev_features_t offloads = 0;
struct idpf_netdev_priv *np;
struct net_device *netdev;
u16 idx = vport->idx;
@@ -721,6 +759,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
np->vport = vport;
np->vport_idx = vport->idx;
np->vport_id = vport->vport_id;
+ np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter);
vport->netdev = netdev;
return idpf_init_mac_addr(vport, netdev);
@@ -738,6 +777,8 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
np->adapter = adapter;
np->vport_idx = vport->idx;
np->vport_id = vport->vport_id;
+ np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter);
+ np->tx_max_bufs = idpf_get_max_tx_bufs(adapter);
spin_lock_init(&np->stats_lock);
@@ -766,54 +807,40 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
dflt_features |= NETIF_F_RXHASH;
- if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V4))
- dflt_features |= NETIF_F_IP_CSUM;
- if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V6))
- dflt_features |= NETIF_F_IPV6_CSUM;
+ if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS,
+ VIRTCHNL2_CAP_FLOW_STEER) &&
+ idpf_vport_is_cap_ena(vport, VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER))
+ dflt_features |= NETIF_F_NTUPLE;
+ if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V4))
+ csum_offloads |= NETIF_F_IP_CSUM;
+ if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V6))
+ csum_offloads |= NETIF_F_IPV6_CSUM;
if (idpf_is_cap_ena(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM))
- dflt_features |= NETIF_F_RXCSUM;
- if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_SCTP_CSUM))
- dflt_features |= NETIF_F_SCTP_CRC;
+ csum_offloads |= NETIF_F_RXCSUM;
+ if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_SCTP_CSUM))
+ csum_offloads |= NETIF_F_SCTP_CRC;
if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_TCP))
- dflt_features |= NETIF_F_TSO;
+ tso_offloads |= NETIF_F_TSO;
if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV6_TCP))
- dflt_features |= NETIF_F_TSO6;
+ tso_offloads |= NETIF_F_TSO6;
if (idpf_is_cap_ena_all(adapter, IDPF_SEG_CAPS,
VIRTCHNL2_CAP_SEG_IPV4_UDP |
VIRTCHNL2_CAP_SEG_IPV6_UDP))
- dflt_features |= NETIF_F_GSO_UDP_L4;
+ tso_offloads |= NETIF_F_GSO_UDP_L4;
if (idpf_is_cap_ena_all(adapter, IDPF_RSC_CAPS, IDPF_CAP_RSC))
- offloads |= NETIF_F_GRO_HW;
- /* advertise to stack only if offloads for encapsulated packets is
- * supported
- */
- if (idpf_is_cap_ena(vport->adapter, IDPF_SEG_CAPS,
- VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL)) {
- offloads |= NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_GRE |
- NETIF_F_GSO_GRE_CSUM |
- NETIF_F_GSO_PARTIAL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
- NETIF_F_GSO_IPXIP4 |
- NETIF_F_GSO_IPXIP6 |
- 0;
-
- if (!idpf_is_cap_ena_all(vport->adapter, IDPF_CSUM_CAPS,
- IDPF_CAP_TUNNEL_TX_CSUM))
- netdev->gso_partial_features |=
- NETIF_F_GSO_UDP_TUNNEL_CSUM;
-
- netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
- offloads |= NETIF_F_TSO_MANGLEID;
- }
+ other_offloads |= NETIF_F_GRO_HW;
if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LOOPBACK))
- offloads |= NETIF_F_LOOPBACK;
+ other_offloads |= NETIF_F_LOOPBACK;
+
+ netdev->features |= dflt_features | csum_offloads | tso_offloads;
+ netdev->hw_features |= netdev->features | other_offloads;
+ netdev->vlan_features |= netdev->features | other_offloads;
+ netdev->hw_enc_features |= dflt_features | other_offloads;
+ idpf_xdp_set_features(vport);
- netdev->features |= dflt_features;
- netdev->hw_features |= dflt_features | offloads;
- netdev->hw_enc_features |= dflt_features | offloads;
idpf_set_ethtool_ops(netdev);
+ netif_set_affinity_auto(netdev);
SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
/* carrier off on init to avoid Tx hangs */
@@ -861,14 +888,18 @@ static void idpf_remove_features(struct idpf_vport *vport)
/**
* idpf_vport_stop - Disable a vport
* @vport: vport to disable
+ * @rtnl: whether to take RTNL lock
*/
-static void idpf_vport_stop(struct idpf_vport *vport)
+static void idpf_vport_stop(struct idpf_vport *vport, bool rtnl)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
- if (np->state <= __IDPF_VPORT_DOWN)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
return;
+ if (rtnl)
+ rtnl_lock();
+
netif_carrier_off(vport->netdev);
netif_tx_disable(vport->netdev);
@@ -887,9 +918,13 @@ static void idpf_vport_stop(struct idpf_vport *vport)
vport->link_up = false;
idpf_vport_intr_deinit(vport);
+ idpf_xdp_rxq_info_deinit_all(vport);
idpf_vport_queues_rel(vport);
idpf_vport_intr_rel(vport);
- np->state = __IDPF_VPORT_DOWN;
+ clear_bit(IDPF_VPORT_UP, np->state);
+
+ if (rtnl)
+ rtnl_unlock();
}
/**
@@ -913,7 +948,7 @@ static int idpf_stop(struct net_device *netdev)
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- idpf_vport_stop(vport);
+ idpf_vport_stop(vport, false);
idpf_vport_ctrl_unlock(netdev);
@@ -927,15 +962,19 @@ static int idpf_stop(struct net_device *netdev)
static void idpf_decfg_netdev(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
+ u16 idx = vport->idx;
kfree(vport->rx_ptype_lkup);
vport->rx_ptype_lkup = NULL;
- unregister_netdev(vport->netdev);
- free_netdev(vport->netdev);
+ if (test_and_clear_bit(IDPF_VPORT_REG_NETDEV,
+ adapter->vport_config[idx]->flags)) {
+ unregister_netdev(vport->netdev);
+ free_netdev(vport->netdev);
+ }
vport->netdev = NULL;
- adapter->netdevs[vport->idx] = NULL;
+ adapter->netdevs[idx] = NULL;
}
/**
@@ -999,8 +1038,10 @@ static void idpf_vport_dealloc(struct idpf_vport *vport)
struct idpf_adapter *adapter = vport->adapter;
unsigned int i = vport->idx;
+ idpf_idc_deinit_vport_aux_device(vport->vdev_info);
+
idpf_deinit_mac_addr(vport);
- idpf_vport_stop(vport);
+ idpf_vport_stop(vport, true);
if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
idpf_decfg_netdev(vport);
@@ -1106,8 +1147,10 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
if (!vport)
return vport;
+ num_max_q = max(max_q->max_txq, max_q->max_rxq) + IDPF_RESERVED_VECS;
if (!adapter->vport_config[idx]) {
struct idpf_vport_config *vport_config;
+ struct idpf_q_coalesce *q_coal;
vport_config = kzalloc(sizeof(*vport_config), GFP_KERNEL);
if (!vport_config) {
@@ -1116,6 +1159,21 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
return NULL;
}
+ q_coal = kcalloc(num_max_q, sizeof(*q_coal), GFP_KERNEL);
+ if (!q_coal) {
+ kfree(vport_config);
+ kfree(vport);
+
+ return NULL;
+ }
+ for (int i = 0; i < num_max_q; i++) {
+ q_coal[i].tx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_coal[i].tx_coalesce_usecs = IDPF_ITR_TX_DEF;
+ q_coal[i].rx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_coal[i].rx_coalesce_usecs = IDPF_ITR_RX_DEF;
+ }
+ vport_config->user_config.q_coalesce = q_coal;
+
adapter->vport_config[idx] = vport_config;
}
@@ -1125,13 +1183,10 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
vport->default_vport = adapter->num_alloc_vports <
idpf_get_default_vports(adapter);
- num_max_q = max(max_q->max_txq, max_q->max_rxq);
vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
- if (!vport->q_vector_idxs) {
- kfree(vport);
+ if (!vport->q_vector_idxs)
+ goto free_vport;
- return NULL;
- }
idpf_vport_init(vport, max_q);
/* This alloc is done separate from the LUT because it's not strictly
@@ -1141,11 +1196,9 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
*/
rss_data = &adapter->vport_config[idx]->user_config.rss_data;
rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
- if (!rss_data->rss_key) {
- kfree(vport);
+ if (!rss_data->rss_key)
+ goto free_vector_idxs;
- return NULL;
- }
/* Initialize default rss key */
netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
@@ -1158,6 +1211,13 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
adapter->next_vport = idpf_get_free_slot(adapter);
return vport;
+
+free_vector_idxs:
+ kfree(vport->q_vector_idxs);
+free_vport:
+ kfree(vport);
+
+ return NULL;
}
/**
@@ -1261,13 +1321,13 @@ static void idpf_restore_features(struct idpf_vport *vport)
*/
static int idpf_set_real_num_queues(struct idpf_vport *vport)
{
- int err;
+ int err, txq = vport->num_txq - vport->num_xdp_txq;
err = netif_set_real_num_rx_queues(vport->netdev, vport->num_rxq);
if (err)
return err;
- return netif_set_real_num_tx_queues(vport->netdev, vport->num_txq);
+ return netif_set_real_num_tx_queues(vport->netdev, txq);
}
/**
@@ -1285,7 +1345,7 @@ static int idpf_up_complete(struct idpf_vport *vport)
netif_tx_start_all_queues(vport->netdev);
}
- np->state = __IDPF_VPORT_UP;
+ set_bit(IDPF_VPORT_UP, np->state);
return 0;
}
@@ -1322,17 +1382,21 @@ static void idpf_rx_init_buf_tail(struct idpf_vport *vport)
/**
* idpf_vport_open - Bring up a vport
* @vport: vport to bring up
+ * @rtnl: whether to take RTNL lock
*/
-static int idpf_vport_open(struct idpf_vport *vport)
+static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vport_config *vport_config;
int err;
- if (np->state != __IDPF_VPORT_DOWN)
+ if (test_bit(IDPF_VPORT_UP, np->state))
return -EBUSY;
+ if (rtnl)
+ rtnl_lock();
+
/* we do not allow interface up just yet */
netif_carrier_off(vport->netdev);
@@ -1340,7 +1404,7 @@ static int idpf_vport_open(struct idpf_vport *vport)
if (err) {
dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n",
vport->vport_id, err);
- return err;
+ goto err_rtnl_unlock;
}
err = idpf_vport_queues_alloc(vport);
@@ -1361,35 +1425,44 @@ static int idpf_vport_open(struct idpf_vport *vport)
goto queues_rel;
}
- err = idpf_rx_bufs_init_all(vport);
+ err = idpf_queue_reg_init(vport);
if (err) {
- dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
+ dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
vport->vport_id, err);
goto queues_rel;
}
- err = idpf_queue_reg_init(vport);
+ err = idpf_rx_bufs_init_all(vport);
if (err) {
- dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
+ dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
vport->vport_id, err);
goto queues_rel;
}
idpf_rx_init_buf_tail(vport);
+
+ err = idpf_xdp_rxq_info_init_all(vport);
+ if (err) {
+ netdev_err(vport->netdev,
+ "Failed to initialize XDP RxQ info for vport %u: %pe\n",
+ vport->vport_id, ERR_PTR(err));
+ goto intr_deinit;
+ }
+
idpf_vport_intr_ena(vport);
err = idpf_send_config_queues_msg(vport);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to configure queues for vport %u, %d\n",
vport->vport_id, err);
- goto intr_deinit;
+ goto rxq_deinit;
}
err = idpf_send_map_unmap_queue_vector_msg(vport, true);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to map queue vectors for vport %u: %d\n",
vport->vport_id, err);
- goto intr_deinit;
+ goto rxq_deinit;
}
err = idpf_send_enable_queues_msg(vport);
@@ -1427,6 +1500,9 @@ static int idpf_vport_open(struct idpf_vport *vport)
goto deinit_rss;
}
+ if (rtnl)
+ rtnl_unlock();
+
return 0;
deinit_rss:
@@ -1437,6 +1513,8 @@ disable_queues:
idpf_send_disable_queues_msg(vport);
unmap_queue_vectors:
idpf_send_map_unmap_queue_vector_msg(vport, false);
+rxq_deinit:
+ idpf_xdp_rxq_info_deinit_all(vport);
intr_deinit:
idpf_vport_intr_deinit(vport);
queues_rel:
@@ -1444,6 +1522,10 @@ queues_rel:
intr_rel:
idpf_vport_intr_rel(vport);
+err_rtnl_unlock:
+ if (rtnl)
+ rtnl_unlock();
+
return err;
}
@@ -1500,11 +1582,10 @@ void idpf_init_task(struct work_struct *work)
index = vport->idx;
vport_config = adapter->vport_config[index];
- init_waitqueue_head(&vport->sw_marker_wq);
-
spin_lock_init(&vport_config->mac_filter_list_lock);
INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
+ INIT_LIST_HEAD(&vport_config->user_config.flow_steer_list);
err = idpf_check_supported_desc_ids(vport);
if (err) {
@@ -1521,9 +1602,9 @@ void idpf_init_task(struct work_struct *work)
/* Once state is put into DOWN, driver is ready for dev_open */
np = netdev_priv(vport->netdev);
- np->state = __IDPF_VPORT_DOWN;
+ clear_bit(IDPF_VPORT_UP, np->state);
if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags))
- idpf_vport_open(vport);
+ idpf_vport_open(vport, true);
/* Spawn and return 'idpf_init_task' work queue until all the
* default vports are created
@@ -1536,13 +1617,22 @@ void idpf_init_task(struct work_struct *work)
}
for (index = 0; index < adapter->max_vports; index++) {
- if (adapter->netdevs[index] &&
- !test_bit(IDPF_VPORT_REG_NETDEV,
- adapter->vport_config[index]->flags)) {
- register_netdev(adapter->netdevs[index]);
- set_bit(IDPF_VPORT_REG_NETDEV,
- adapter->vport_config[index]->flags);
+ struct net_device *netdev = adapter->netdevs[index];
+ struct idpf_vport_config *vport_config;
+
+ vport_config = adapter->vport_config[index];
+
+ if (!netdev ||
+ test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags))
+ continue;
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register netdev for vport %d: %pe\n",
+ index, ERR_PTR(err));
+ continue;
}
+ set_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags);
}
/* As all the required vports are created, clear the reset flag
@@ -1711,7 +1801,7 @@ static void idpf_set_vport_state(struct idpf_adapter *adapter)
continue;
np = netdev_priv(adapter->netdevs[i]);
- if (np->state == __IDPF_VPORT_UP)
+ if (test_bit(IDPF_VPORT_UP, np->state))
set_bit(IDPF_VPORT_UP_REQUESTED,
adapter->vport_config[i]->flags);
}
@@ -1753,6 +1843,8 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter)
} else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) {
bool is_reset = idpf_is_reset_detected(adapter);
+ idpf_idc_issue_reset_event(adapter->cdev_info);
+
idpf_set_vport_state(adapter);
idpf_vc_core_deinit(adapter);
if (!is_reset)
@@ -1786,6 +1878,7 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter)
*/
err = idpf_vc_core_init(adapter);
if (err) {
+ cancel_delayed_work_sync(&adapter->mbx_task);
idpf_deinit_dflt_mbx(adapter);
goto unlock_mutex;
}
@@ -1799,6 +1892,10 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter)
unlock_mutex:
mutex_unlock(&adapter->vport_ctrl_lock);
+ /* Wait until all vports are created to init RDMA CORE AUX */
+ if (!err)
+ err = idpf_idc_init(adapter);
+
return err;
}
@@ -1815,11 +1912,19 @@ void idpf_vc_event_task(struct work_struct *work)
if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
return;
- if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags) ||
- test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
- set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
- idpf_init_hard_reset(adapter);
- }
+ if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags))
+ goto func_reset;
+
+ if (test_bit(IDPF_HR_DRV_LOAD, adapter->flags))
+ goto drv_load;
+
+ return;
+
+func_reset:
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
+drv_load:
+ set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
+ idpf_init_hard_reset(adapter);
}
/**
@@ -1834,7 +1939,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
enum idpf_vport_reset_cause reset_cause)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
- enum idpf_vport_state current_state = np->state;
+ bool vport_is_up = test_bit(IDPF_VPORT_UP, np->state);
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vport *new_vport;
int err;
@@ -1860,7 +1965,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
* mess with. Nothing below should use those variables from new_vport
* and should instead always refer to them in vport if they need to.
*/
- memcpy(new_vport, vport, offsetof(struct idpf_vport, link_speed_mbps));
+ memcpy(new_vport, vport, offsetof(struct idpf_vport, link_up));
/* Adjust resource parameters prior to reallocating resources */
switch (reset_cause) {
@@ -1874,6 +1979,9 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
idpf_vport_calc_num_q_desc(new_vport);
break;
case IDPF_SR_MTU_CHANGE:
+ idpf_idc_vdev_mtu_event(vport->vdev_info,
+ IIDC_RDMA_EVENT_BEFORE_MTU_CHANGE);
+ break;
case IDPF_SR_RSC_CHANGE:
break;
default:
@@ -1882,11 +1990,11 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
goto free_vport;
}
- if (current_state <= __IDPF_VPORT_DOWN) {
+ if (!vport_is_up) {
idpf_send_delete_queues_msg(vport);
} else {
set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags);
- idpf_vport_stop(vport);
+ idpf_vport_stop(vport, false);
}
idpf_deinit_rss(vport);
@@ -1906,7 +2014,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
/* Same comment as above regarding avoiding copying the wait_queues and
* mutexes applies here. We do not want to mess with those if possible.
*/
- memcpy(vport, new_vport, offsetof(struct idpf_vport, link_speed_mbps));
+ memcpy(vport, new_vport, offsetof(struct idpf_vport, link_up));
if (reset_cause == IDPF_SR_Q_CHANGE)
idpf_vport_alloc_vec_indexes(vport);
@@ -1915,24 +2023,26 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
if (err)
goto err_open;
- if (current_state == __IDPF_VPORT_UP)
- err = idpf_vport_open(vport);
+ if (vport_is_up)
+ err = idpf_vport_open(vport, false);
- kfree(new_vport);
-
- return err;
+ goto free_vport;
err_reset:
idpf_send_add_queues_msg(vport, vport->num_txq, vport->num_complq,
vport->num_rxq, vport->num_bufq);
err_open:
- if (current_state == __IDPF_VPORT_UP)
- idpf_vport_open(vport);
+ if (vport_is_up)
+ idpf_vport_open(vport, false);
free_vport:
kfree(new_vport);
+ if (reset_cause == IDPF_SR_MTU_CHANGE)
+ idpf_idc_vdev_mtu_event(vport->vdev_info,
+ IIDC_RDMA_EVENT_AFTER_MTU_CHANGE);
+
return err;
}
@@ -2158,8 +2268,13 @@ static int idpf_open(struct net_device *netdev)
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- err = idpf_vport_open(vport);
+ err = idpf_set_real_num_queues(vport);
+ if (err)
+ goto unlock;
+ err = idpf_vport_open(vport, false);
+
+unlock:
idpf_vport_ctrl_unlock(netdev);
return err;
@@ -2190,6 +2305,92 @@ static int idpf_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
+ * idpf_chk_tso_segment - Check skb is not using too many buffers
+ * @skb: send buffer
+ * @max_bufs: maximum number of buffers
+ *
+ * For TSO we need to count the TSO header and segment payload separately. As
+ * such we need to check cases where we have max_bufs-1 fragments or more as we
+ * can potentially require max_bufs+1 DMA transactions, 1 for the TSO header, 1
+ * for the segment payload in the first descriptor, and another max_buf-1 for
+ * the fragments.
+ *
+ * Returns true if the packet needs to be software segmented by core stack.
+ */
+static bool idpf_chk_tso_segment(const struct sk_buff *skb,
+ unsigned int max_bufs)
+{
+ const struct skb_shared_info *shinfo = skb_shinfo(skb);
+ const skb_frag_t *frag, *stale;
+ int nr_frags, sum;
+
+ /* no need to check if number of frags is less than max_bufs - 1 */
+ nr_frags = shinfo->nr_frags;
+ if (nr_frags < (max_bufs - 1))
+ return false;
+
+ /* We need to walk through the list and validate that each group
+ * of max_bufs-2 fragments totals at least gso_size.
+ */
+ nr_frags -= max_bufs - 2;
+ frag = &shinfo->frags[0];
+
+ /* Initialize size to the negative value of gso_size minus 1. We use
+ * this as the worst case scenario in which the frag ahead of us only
+ * provides one byte which is why we are limited to max_bufs-2
+ * descriptors for a single transmit as the header and previous
+ * fragment are already consuming 2 descriptors.
+ */
+ sum = 1 - shinfo->gso_size;
+
+ /* Add size of frags 0 through 4 to create our initial sum */
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+
+ /* Walk through fragments adding latest fragment, testing it, and
+ * then removing stale fragments from the sum.
+ */
+ for (stale = &shinfo->frags[0];; stale++) {
+ int stale_size = skb_frag_size(stale);
+
+ sum += skb_frag_size(frag++);
+
+ /* The stale fragment may present us with a smaller
+ * descriptor than the actual fragment size. To account
+ * for that we need to remove all the data on the front and
+ * figure out what the remainder would be in the last
+ * descriptor associated with the fragment.
+ */
+ if (stale_size > IDPF_TX_MAX_DESC_DATA) {
+ int align_pad = -(skb_frag_off(stale)) &
+ (IDPF_TX_MAX_READ_REQ_SIZE - 1);
+
+ sum -= align_pad;
+ stale_size -= align_pad;
+
+ do {
+ sum -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
+ stale_size -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
+ } while (stale_size > IDPF_TX_MAX_DESC_DATA);
+ }
+
+ /* if sum is negative we failed to make sufficient progress */
+ if (sum < 0)
+ return true;
+
+ if (!nr_frags--)
+ break;
+
+ sum -= stale_size;
+ }
+
+ return false;
+}
+
+/**
* idpf_features_check - Validate packet conforms to limits
* @skb: skb buffer
* @netdev: This port's netdev
@@ -2199,8 +2400,8 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
struct net_device *netdev,
netdev_features_t features)
{
- struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
- struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ u16 max_tx_hdr_size = np->max_tx_hdr_size;
size_t len;
/* No point in doing any of this if neither checksum nor GSO are
@@ -2210,12 +2411,15 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
if (skb->ip_summed != CHECKSUM_PARTIAL)
return features;
- /* We cannot support GSO if the MSS is going to be less than
- * 88 bytes. If it is then we need to drop support for GSO.
- */
- if (skb_is_gso(skb) &&
- (skb_shinfo(skb)->gso_size < IDPF_TX_TSO_MIN_MSS))
- features &= ~NETIF_F_GSO_MASK;
+ if (skb_is_gso(skb)) {
+ /* We cannot support GSO if the MSS is going to be less than
+ * 88 bytes. If it is then we need to drop support for GSO.
+ */
+ if (skb_shinfo(skb)->gso_size < IDPF_TX_TSO_MIN_MSS)
+ features &= ~NETIF_F_GSO_MASK;
+ else if (idpf_chk_tso_segment(skb, np->tx_max_bufs))
+ features &= ~NETIF_F_GSO_MASK;
+ }
/* Ensure MACLEN is <= 126 bytes (63 words) and not an odd size */
len = skb_network_offset(skb);
@@ -2223,7 +2427,7 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
goto unsupported;
len = skb_network_header_len(skb);
- if (unlikely(len > idpf_get_max_tx_hdr_size(adapter)))
+ if (unlikely(len > max_tx_hdr_size))
goto unsupported;
if (!skb->encapsulation)
@@ -2236,7 +2440,7 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
/* IPLEN can support at most 127 dwords */
len = skb_inner_network_header_len(skb);
- if (unlikely(len > idpf_get_max_tx_hdr_size(adapter)))
+ if (unlikely(len > max_tx_hdr_size))
goto unsupported;
/* No need to validate L4LEN as TCP is the only protocol with a
@@ -2262,6 +2466,7 @@ static int idpf_set_mac(struct net_device *netdev, void *p)
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_config *vport_config;
struct sockaddr *addr = p;
+ u8 old_mac_addr[ETH_ALEN];
struct idpf_vport *vport;
int err = 0;
@@ -2285,17 +2490,19 @@ static int idpf_set_mac(struct net_device *netdev, void *p)
if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
goto unlock_mutex;
+ ether_addr_copy(old_mac_addr, vport->default_mac_addr);
+ ether_addr_copy(vport->default_mac_addr, addr->sa_data);
vport_config = vport->adapter->vport_config[vport->idx];
err = idpf_add_mac_filter(vport, np, addr->sa_data, false);
if (err) {
__idpf_del_mac_filter(vport_config, addr->sa_data);
+ ether_addr_copy(vport->default_mac_addr, netdev->dev_addr);
goto unlock_mutex;
}
- if (is_valid_ether_addr(vport->default_mac_addr))
- idpf_del_mac_filter(vport, np, vport->default_mac_addr, false);
+ if (is_valid_ether_addr(old_mac_addr))
+ __idpf_del_mac_filter(vport_config, old_mac_addr);
- ether_addr_copy(vport->default_mac_addr, addr->sa_data);
eth_hw_addr_set(netdev, addr->sa_data);
unlock_mutex:
@@ -2315,8 +2522,12 @@ void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size)
struct idpf_adapter *adapter = hw->back;
size_t sz = ALIGN(size, 4096);
- mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz,
- &mem->pa, GFP_KERNEL);
+ /* The control queue resources are freed under a spinlock, contiguous
+ * pages will avoid IOMMU remapping and the use vmap (and vunmap in
+ * dma_free_*() path.
+ */
+ mem->va = dma_alloc_attrs(&adapter->pdev->dev, sz, &mem->pa,
+ GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS);
mem->size = sz;
return mem->va;
@@ -2331,13 +2542,67 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
{
struct idpf_adapter *adapter = hw->back;
- dma_free_coherent(&adapter->pdev->dev, mem->size,
- mem->va, mem->pa);
+ dma_free_attrs(&adapter->pdev->dev, mem->size,
+ mem->va, mem->pa, DMA_ATTR_FORCE_CONTIGUOUS);
mem->size = 0;
mem->va = NULL;
mem->pa = 0;
}
+static int idpf_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct idpf_vport *vport;
+ int err;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ if (!vport->link_up) {
+ idpf_vport_ctrl_unlock(netdev);
+ return -EPERM;
+ }
+
+ if (!idpf_ptp_is_vport_tx_tstamp_ena(vport) &&
+ !idpf_ptp_is_vport_rx_tstamp_ena(vport)) {
+ idpf_vport_ctrl_unlock(netdev);
+ return -EOPNOTSUPP;
+ }
+
+ err = idpf_ptp_set_timestamp_mode(vport, config);
+
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+}
+
+static int idpf_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct idpf_vport *vport;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ if (!vport->link_up) {
+ idpf_vport_ctrl_unlock(netdev);
+ return -EPERM;
+ }
+
+ if (!idpf_ptp_is_vport_tx_tstamp_ena(vport) &&
+ !idpf_ptp_is_vport_rx_tstamp_ena(vport)) {
+ idpf_vport_ctrl_unlock(netdev);
+ return 0;
+ }
+
+ *config = vport->tstamp_config;
+
+ idpf_vport_ctrl_unlock(netdev);
+
+ return 0;
+}
+
static const struct net_device_ops idpf_netdev_ops = {
.ndo_open = idpf_open,
.ndo_stop = idpf_stop,
@@ -2350,4 +2615,9 @@ static const struct net_device_ops idpf_netdev_ops = {
.ndo_get_stats64 = idpf_get_stats64,
.ndo_set_features = idpf_set_features,
.ndo_tx_timeout = idpf_tx_timeout,
+ .ndo_hwtstamp_get = idpf_hwtstamp_get,
+ .ndo_hwtstamp_set = idpf_hwtstamp_set,
+ .ndo_bpf = idpf_xdp,
+ .ndo_xdp_xmit = idpf_xdp_xmit,
+ .ndo_xsk_wakeup = idpf_xsk_wakeup,
};
diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
index db476b3314c8..de5d722cc21d 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
@@ -3,15 +3,94 @@
#include "idpf.h"
#include "idpf_devids.h"
+#include "idpf_lan_vf_regs.h"
#include "idpf_virtchnl.h"
#define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver"
+#define IDPF_NETWORK_ETHERNET_PROGIF 0x01
+#define IDPF_CLASS_NETWORK_ETHERNET_PROGIF \
+ (PCI_CLASS_NETWORK_ETHERNET << 8 | IDPF_NETWORK_ETHERNET_PROGIF)
+#define IDPF_VF_TEST_VAL 0xfeed0000u
+
MODULE_DESCRIPTION(DRV_SUMMARY);
-MODULE_IMPORT_NS(LIBETH);
+MODULE_IMPORT_NS("LIBETH");
+MODULE_IMPORT_NS("LIBETH_XDP");
MODULE_LICENSE("GPL");
/**
+ * idpf_get_device_type - Helper to find if it is a VF or PF device
+ * @pdev: PCI device information struct
+ *
+ * Return: PF/VF device ID or -%errno on failure.
+ */
+static int idpf_get_device_type(struct pci_dev *pdev)
+{
+ void __iomem *addr;
+ int ret;
+
+ addr = ioremap(pci_resource_start(pdev, 0) + VF_ARQBAL, 4);
+ if (!addr) {
+ pci_err(pdev, "Failed to allocate BAR0 mbx region\n");
+ return -EIO;
+ }
+
+ writel(IDPF_VF_TEST_VAL, addr);
+ if (readl(addr) == IDPF_VF_TEST_VAL)
+ ret = IDPF_DEV_ID_VF;
+ else
+ ret = IDPF_DEV_ID_PF;
+
+ iounmap(addr);
+
+ return ret;
+}
+
+/**
+ * idpf_dev_init - Initialize device specific parameters
+ * @adapter: adapter to initialize
+ * @ent: entry in idpf_pci_tbl
+ *
+ * Return: %0 on success, -%errno on failure.
+ */
+static int idpf_dev_init(struct idpf_adapter *adapter,
+ const struct pci_device_id *ent)
+{
+ int ret;
+
+ if (ent->class == IDPF_CLASS_NETWORK_ETHERNET_PROGIF) {
+ ret = idpf_get_device_type(adapter->pdev);
+ switch (ret) {
+ case IDPF_DEV_ID_VF:
+ idpf_vf_dev_ops_init(adapter);
+ adapter->crc_enable = true;
+ break;
+ case IDPF_DEV_ID_PF:
+ idpf_dev_ops_init(adapter);
+ break;
+ default:
+ return ret;
+ }
+
+ return 0;
+ }
+
+ switch (ent->device) {
+ case IDPF_DEV_ID_PF:
+ idpf_dev_ops_init(adapter);
+ break;
+ case IDPF_DEV_ID_VF:
+ idpf_vf_dev_ops_init(adapter);
+ adapter->crc_enable = true;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
* idpf_remove - Device removal routine
* @pdev: PCI device information struct
*/
@@ -62,6 +141,9 @@ destroy_wqs:
destroy_workqueue(adapter->vc_event_wq);
for (i = 0; i < adapter->max_vports; i++) {
+ if (!adapter->vport_config[i])
+ continue;
+ kfree(adapter->vport_config[i]->user_config.q_coalesce);
kfree(adapter->vport_config[i]);
adapter->vport_config[i] = NULL;
}
@@ -87,7 +169,12 @@ destroy_wqs:
*/
static void idpf_shutdown(struct pci_dev *pdev)
{
- idpf_remove(pdev);
+ struct idpf_adapter *adapter = pci_get_drvdata(pdev);
+
+ cancel_delayed_work_sync(&adapter->serv_task);
+ cancel_delayed_work_sync(&adapter->vc_event_task);
+ idpf_vc_core_deinit(adapter);
+ idpf_deinit_dflt_mbx(adapter);
if (system_state == SYSTEM_POWER_OFF)
pci_set_power_state(pdev, PCI_D3hot);
@@ -101,15 +188,37 @@ static void idpf_shutdown(struct pci_dev *pdev)
*/
static int idpf_cfg_hw(struct idpf_adapter *adapter)
{
+ resource_size_t res_start, mbx_start, rstat_start;
struct pci_dev *pdev = adapter->pdev;
struct idpf_hw *hw = &adapter->hw;
+ struct device *dev = &pdev->dev;
+ long len;
- hw->hw_addr = pcim_iomap_table(pdev)[0];
- if (!hw->hw_addr) {
- pci_err(pdev, "failed to allocate PCI iomap table\n");
+ res_start = pci_resource_start(pdev, 0);
+
+ /* Map mailbox space for virtchnl communication */
+ mbx_start = res_start + adapter->dev_ops.static_reg_info[0].start;
+ len = resource_size(&adapter->dev_ops.static_reg_info[0]);
+ hw->mbx.vaddr = devm_ioremap(dev, mbx_start, len);
+ if (!hw->mbx.vaddr) {
+ pci_err(pdev, "failed to allocate BAR0 mbx region\n");
return -ENOMEM;
}
+ hw->mbx.addr_start = adapter->dev_ops.static_reg_info[0].start;
+ hw->mbx.addr_len = len;
+
+ /* Map rstat space for resets */
+ rstat_start = res_start + adapter->dev_ops.static_reg_info[1].start;
+ len = resource_size(&adapter->dev_ops.static_reg_info[1]);
+ hw->rstat.vaddr = devm_ioremap(dev, rstat_start, len);
+ if (!hw->rstat.vaddr) {
+ pci_err(pdev, "failed to allocate BAR0 rstat region\n");
+
+ return -ENOMEM;
+ }
+ hw->rstat.addr_start = adapter->dev_ops.static_reg_info[1].start;
+ hw->rstat.addr_len = len;
hw->back = adapter;
@@ -136,33 +245,22 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->req_tx_splitq = true;
adapter->req_rx_splitq = true;
- switch (ent->device) {
- case IDPF_DEV_ID_PF:
- idpf_dev_ops_init(adapter);
- break;
- case IDPF_DEV_ID_VF:
- idpf_vf_dev_ops_init(adapter);
- adapter->crc_enable = true;
- break;
- default:
- err = -ENODEV;
- dev_err(&pdev->dev, "Unexpected dev ID 0x%x in idpf probe\n",
- ent->device);
- goto err_free;
- }
-
adapter->pdev = pdev;
err = pcim_enable_device(pdev);
if (err)
goto err_free;
- err = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ err = pcim_request_region(pdev, 0, pci_name(pdev));
if (err) {
- pci_err(pdev, "pcim_iomap_regions failed %pe\n", ERR_PTR(err));
+ pci_err(pdev, "pcim_request_region failed %pe\n", ERR_PTR(err));
goto err_free;
}
+ err = pci_enable_ptm(pdev, NULL);
+ if (err)
+ pci_dbg(pdev, "PCIe PTM is not supported by PCIe bus/controller\n");
+
/* set up for high or low dma */
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (err) {
@@ -174,7 +272,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
pci_set_drvdata(pdev, adapter);
- adapter->init_wq = alloc_workqueue("%s-%s-init", 0, 0,
+ adapter->init_wq = alloc_workqueue("%s-%s-init",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
dev_driver_string(dev),
dev_name(dev));
if (!adapter->init_wq) {
@@ -183,7 +282,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_free;
}
- adapter->serv_wq = alloc_workqueue("%s-%s-service", 0, 0,
+ adapter->serv_wq = alloc_workqueue("%s-%s-service",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
dev_driver_string(dev),
dev_name(dev));
if (!adapter->serv_wq) {
@@ -192,8 +292,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_serv_wq_alloc;
}
- adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", 0, 0,
- dev_driver_string(dev),
+ adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", WQ_UNBOUND | WQ_HIGHPRI,
+ 0, dev_driver_string(dev),
dev_name(dev));
if (!adapter->mbx_wq) {
dev_err(dev, "Failed to allocate mailbox workqueue\n");
@@ -201,7 +301,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_mbx_wq_alloc;
}
- adapter->stats_wq = alloc_workqueue("%s-%s-stats", 0, 0,
+ adapter->stats_wq = alloc_workqueue("%s-%s-stats",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
dev_driver_string(dev),
dev_name(dev));
if (!adapter->stats_wq) {
@@ -210,7 +311,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_stats_wq_alloc;
}
- adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event", 0, 0,
+ adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
dev_driver_string(dev),
dev_name(dev));
if (!adapter->vc_event_wq) {
@@ -222,11 +324,18 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* setup msglvl */
adapter->msg_enable = netif_msg_init(-1, IDPF_AVAIL_NETIF_M);
+ err = idpf_dev_init(adapter, ent);
+ if (err) {
+ dev_err(&pdev->dev, "Unexpected dev ID 0x%x in idpf probe\n",
+ ent->device);
+ goto destroy_vc_event_wq;
+ }
+
err = idpf_cfg_hw(adapter);
if (err) {
dev_err(dev, "Failed to configure HW structure for adapter: %d\n",
err);
- goto err_cfg_hw;
+ goto destroy_vc_event_wq;
}
mutex_init(&adapter->vport_ctrl_lock);
@@ -247,7 +356,7 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
-err_cfg_hw:
+destroy_vc_event_wq:
destroy_workqueue(adapter->vc_event_wq);
err_vc_event_wq_alloc:
destroy_workqueue(adapter->stats_wq);
@@ -267,6 +376,7 @@ err_free:
static const struct pci_device_id idpf_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, IDPF_DEV_ID_PF)},
{ PCI_VDEVICE(INTEL, IDPF_DEV_ID_VF)},
+ { PCI_DEVICE_CLASS(IDPF_CLASS_NETWORK_ETHERNET_PROGIF, ~0)},
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(pci, idpf_pci_tbl);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_mem.h b/drivers/net/ethernet/intel/idpf/idpf_mem.h
index b21a04fccf0f..2aaabdc02dd2 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_mem.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_mem.h
@@ -12,9 +12,9 @@ struct idpf_dma_mem {
size_t size;
};
-#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
-#define rd32(a, reg) readl((a)->hw_addr + (reg))
-#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
-#define rd64(a, reg) readq((a)->hw_addr + (reg))
+#define idpf_mbx_wr32(a, reg, value) writel((value), ((a)->mbx.vaddr + (reg)))
+#define idpf_mbx_rd32(a, reg) readl((a)->mbx.vaddr + (reg))
+#define idpf_mbx_wr64(a, reg, value) writeq((value), ((a)->mbx.vaddr + (reg)))
+#define idpf_mbx_rd64(a, reg) readq((a)->mbx.vaddr + (reg))
#endif /* _IDPF_MEM_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
new file mode 100644
index 000000000000..3e1052d070cf
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
@@ -0,0 +1,1021 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2024 Intel Corporation */
+
+#include "idpf.h"
+#include "idpf_ptp.h"
+
+/**
+ * idpf_ptp_get_access - Determine the access type of the PTP features
+ * @adapter: Driver specific private structure
+ * @direct: Capability that indicates the direct access
+ * @mailbox: Capability that indicates the mailbox access
+ *
+ * Return: the type of supported access for the PTP feature.
+ */
+static enum idpf_ptp_access
+idpf_ptp_get_access(const struct idpf_adapter *adapter, u32 direct, u32 mailbox)
+{
+ if (adapter->ptp->caps & direct)
+ return IDPF_PTP_DIRECT;
+ else if (adapter->ptp->caps & mailbox)
+ return IDPF_PTP_MAILBOX;
+ else
+ return IDPF_PTP_NONE;
+}
+
+/**
+ * idpf_ptp_get_features_access - Determine the access type of PTP features
+ * @adapter: Driver specific private structure
+ *
+ * Fulfill the adapter structure with type of the supported PTP features
+ * access.
+ */
+void idpf_ptp_get_features_access(const struct idpf_adapter *adapter)
+{
+ struct idpf_ptp *ptp = adapter->ptp;
+ u32 direct, mailbox;
+
+ /* Get the device clock time */
+ direct = VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME;
+ mailbox = VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB;
+ ptp->get_dev_clk_time_access = idpf_ptp_get_access(adapter,
+ direct,
+ mailbox);
+
+ /* Get the cross timestamp */
+ direct = VIRTCHNL2_CAP_PTP_GET_CROSS_TIME;
+ mailbox = VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB;
+ ptp->get_cross_tstamp_access = idpf_ptp_get_access(adapter,
+ direct,
+ mailbox);
+
+ /* Set the device clock time */
+ direct = VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME;
+ mailbox = VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME;
+ ptp->set_dev_clk_time_access = idpf_ptp_get_access(adapter,
+ direct,
+ mailbox);
+
+ /* Adjust the device clock time */
+ direct = VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK;
+ mailbox = VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB;
+ ptp->adj_dev_clk_time_access = idpf_ptp_get_access(adapter,
+ direct,
+ mailbox);
+
+ /* Tx timestamping */
+ direct = VIRTCHNL2_CAP_PTP_TX_TSTAMPS;
+ mailbox = VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB;
+ ptp->tx_tstamp_access = idpf_ptp_get_access(adapter,
+ direct,
+ mailbox);
+}
+
+/**
+ * idpf_ptp_enable_shtime - Enable shadow time and execute a command
+ * @adapter: Driver specific private structure
+ */
+static void idpf_ptp_enable_shtime(struct idpf_adapter *adapter)
+{
+ u32 shtime_enable, exec_cmd;
+
+ /* Get offsets */
+ shtime_enable = adapter->ptp->cmd.shtime_enable_mask;
+ exec_cmd = adapter->ptp->cmd.exec_cmd_mask;
+
+ /* Set the shtime en and the sync field */
+ writel(shtime_enable, adapter->ptp->dev_clk_regs.cmd_sync);
+ writel(exec_cmd | shtime_enable, adapter->ptp->dev_clk_regs.cmd_sync);
+}
+
+/**
+ * idpf_ptp_read_src_clk_reg_direct - Read directly the main timer value
+ * @adapter: Driver specific private structure
+ * @sts: Optional parameter for holding a pair of system timestamps from
+ * the system clock. Will be ignored when NULL is given.
+ *
+ * Return: the device clock time.
+ */
+static u64 idpf_ptp_read_src_clk_reg_direct(struct idpf_adapter *adapter,
+ struct ptp_system_timestamp *sts)
+{
+ struct idpf_ptp *ptp = adapter->ptp;
+ u32 hi, lo;
+
+ spin_lock(&ptp->read_dev_clk_lock);
+
+ /* Read the system timestamp pre PHC read */
+ ptp_read_system_prets(sts);
+
+ idpf_ptp_enable_shtime(adapter);
+
+ /* Read the system timestamp post PHC read */
+ ptp_read_system_postts(sts);
+
+ lo = readl(ptp->dev_clk_regs.dev_clk_ns_l);
+ hi = readl(ptp->dev_clk_regs.dev_clk_ns_h);
+
+ spin_unlock(&ptp->read_dev_clk_lock);
+
+ return ((u64)hi << 32) | lo;
+}
+
+/**
+ * idpf_ptp_read_src_clk_reg_mailbox - Read the main timer value through mailbox
+ * @adapter: Driver specific private structure
+ * @sts: Optional parameter for holding a pair of system timestamps from
+ * the system clock. Will be ignored when NULL is given.
+ * @src_clk: Returned main timer value in nanoseconds unit
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_read_src_clk_reg_mailbox(struct idpf_adapter *adapter,
+ struct ptp_system_timestamp *sts,
+ u64 *src_clk)
+{
+ struct idpf_ptp_dev_timers clk_time;
+ int err;
+
+ /* Read the system timestamp pre PHC read */
+ ptp_read_system_prets(sts);
+
+ err = idpf_ptp_get_dev_clk_time(adapter, &clk_time);
+ if (err)
+ return err;
+
+ /* Read the system timestamp post PHC read */
+ ptp_read_system_postts(sts);
+
+ *src_clk = clk_time.dev_clk_time_ns;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_read_src_clk_reg - Read the main timer value
+ * @adapter: Driver specific private structure
+ * @src_clk: Returned main timer value in nanoseconds unit
+ * @sts: Optional parameter for holding a pair of system timestamps from
+ * the system clock. Will be ignored if NULL is given.
+ *
+ * Return: the device clock time on success, -errno otherwise.
+ */
+static int idpf_ptp_read_src_clk_reg(struct idpf_adapter *adapter, u64 *src_clk,
+ struct ptp_system_timestamp *sts)
+{
+ switch (adapter->ptp->get_dev_clk_time_access) {
+ case IDPF_PTP_NONE:
+ return -EOPNOTSUPP;
+ case IDPF_PTP_MAILBOX:
+ return idpf_ptp_read_src_clk_reg_mailbox(adapter, sts, src_clk);
+ case IDPF_PTP_DIRECT:
+ *src_clk = idpf_ptp_read_src_clk_reg_direct(adapter, sts);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_ARM_ARCH_TIMER) || IS_ENABLED(CONFIG_X86)
+/**
+ * idpf_ptp_get_sync_device_time_direct - Get the cross time stamp values
+ * directly
+ * @adapter: Driver specific private structure
+ * @dev_time: 64bit main timer value
+ * @sys_time: 64bit system time value
+ */
+static void idpf_ptp_get_sync_device_time_direct(struct idpf_adapter *adapter,
+ u64 *dev_time, u64 *sys_time)
+{
+ u32 dev_time_lo, dev_time_hi, sys_time_lo, sys_time_hi;
+ struct idpf_ptp *ptp = adapter->ptp;
+
+ spin_lock(&ptp->read_dev_clk_lock);
+
+ idpf_ptp_enable_shtime(adapter);
+
+ dev_time_lo = readl(ptp->dev_clk_regs.dev_clk_ns_l);
+ dev_time_hi = readl(ptp->dev_clk_regs.dev_clk_ns_h);
+
+ sys_time_lo = readl(ptp->dev_clk_regs.sys_time_ns_l);
+ sys_time_hi = readl(ptp->dev_clk_regs.sys_time_ns_h);
+
+ spin_unlock(&ptp->read_dev_clk_lock);
+
+ *dev_time = (u64)dev_time_hi << 32 | dev_time_lo;
+ *sys_time = (u64)sys_time_hi << 32 | sys_time_lo;
+}
+
+/**
+ * idpf_ptp_get_sync_device_time_mailbox - Get the cross time stamp values
+ * through mailbox
+ * @adapter: Driver specific private structure
+ * @dev_time: 64bit main timer value expressed in nanoseconds
+ * @sys_time: 64bit system time value expressed in nanoseconds
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_get_sync_device_time_mailbox(struct idpf_adapter *adapter,
+ u64 *dev_time, u64 *sys_time)
+{
+ struct idpf_ptp_dev_timers cross_time;
+ int err;
+
+ err = idpf_ptp_get_cross_time(adapter, &cross_time);
+ if (err)
+ return err;
+
+ *dev_time = cross_time.dev_clk_time_ns;
+ *sys_time = cross_time.sys_time_ns;
+
+ return err;
+}
+
+/**
+ * idpf_ptp_get_sync_device_time - Get the cross time stamp info
+ * @device: Current device time
+ * @system: System counter value read synchronously with device time
+ * @ctx: Context provided by timekeeping code
+ *
+ * The device and the system clocks time read simultaneously.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_get_sync_device_time(ktime_t *device,
+ struct system_counterval_t *system,
+ void *ctx)
+{
+ struct idpf_adapter *adapter = ctx;
+ u64 ns_time_dev, ns_time_sys;
+ int err;
+
+ switch (adapter->ptp->get_cross_tstamp_access) {
+ case IDPF_PTP_NONE:
+ return -EOPNOTSUPP;
+ case IDPF_PTP_DIRECT:
+ idpf_ptp_get_sync_device_time_direct(adapter, &ns_time_dev,
+ &ns_time_sys);
+ break;
+ case IDPF_PTP_MAILBOX:
+ err = idpf_ptp_get_sync_device_time_mailbox(adapter,
+ &ns_time_dev,
+ &ns_time_sys);
+ if (err)
+ return err;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ *device = ns_to_ktime(ns_time_dev);
+
+ system->cs_id = IS_ENABLED(CONFIG_X86) ? CSID_X86_ART
+ : CSID_ARM_ARCH_COUNTER;
+ system->cycles = ns_time_sys;
+ system->use_nsecs = true;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_get_crosststamp - Capture a device cross timestamp
+ * @info: the driver's PTP info structure
+ * @cts: The memory to fill the cross timestamp info
+ *
+ * Capture a cross timestamp between the system time and the device PTP hardware
+ * clock.
+ *
+ * Return: cross timestamp value on success, -errno on failure.
+ */
+static int idpf_ptp_get_crosststamp(struct ptp_clock_info *info,
+ struct system_device_crosststamp *cts)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+
+ return get_device_system_crosststamp(idpf_ptp_get_sync_device_time,
+ adapter, NULL, cts);
+}
+#endif /* CONFIG_ARM_ARCH_TIMER || CONFIG_X86 */
+
+/**
+ * idpf_ptp_gettimex64 - Get the time of the clock
+ * @info: the driver's PTP info structure
+ * @ts: timespec64 structure to hold the current time value
+ * @sts: Optional parameter for holding a pair of system timestamps from
+ * the system clock. Will be ignored if NULL is given.
+ *
+ * Return: the device clock value in ns, after converting it into a timespec
+ * struct on success, -errno otherwise.
+ */
+static int idpf_ptp_gettimex64(struct ptp_clock_info *info,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+ u64 time_ns;
+ int err;
+
+ err = idpf_ptp_read_src_clk_reg(adapter, &time_ns, sts);
+ if (err)
+ return -EACCES;
+
+ *ts = ns_to_timespec64(time_ns);
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_update_phctime_rxq_grp - Update the cached PHC time for a given Rx
+ * queue group.
+ * @grp: receive queue group in which Rx timestamp is enabled
+ * @split: Indicates whether the queue model is split or single queue
+ * @systime: Cached system time
+ */
+static void
+idpf_ptp_update_phctime_rxq_grp(const struct idpf_rxq_group *grp, bool split,
+ u64 systime)
+{
+ struct idpf_rx_queue *rxq;
+ u16 i;
+
+ if (!split) {
+ for (i = 0; i < grp->singleq.num_rxq; i++) {
+ rxq = grp->singleq.rxqs[i];
+ if (rxq)
+ WRITE_ONCE(rxq->cached_phc_time, systime);
+ }
+ } else {
+ for (i = 0; i < grp->splitq.num_rxq_sets; i++) {
+ rxq = &grp->splitq.rxq_sets[i]->rxq;
+ if (rxq)
+ WRITE_ONCE(rxq->cached_phc_time, systime);
+ }
+ }
+}
+
+/**
+ * idpf_ptp_update_cached_phctime - Update the cached PHC time values
+ * @adapter: Driver specific private structure
+ *
+ * This function updates the system time values which are cached in the adapter
+ * structure and the Rx queues.
+ *
+ * This function must be called periodically to ensure that the cached value
+ * is never more than 2 seconds old.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_update_cached_phctime(struct idpf_adapter *adapter)
+{
+ u64 systime;
+ int err;
+
+ err = idpf_ptp_read_src_clk_reg(adapter, &systime, NULL);
+ if (err)
+ return -EACCES;
+
+ /* Update the cached PHC time stored in the adapter structure.
+ * These values are used to extend Tx timestamp values to 64 bit
+ * expected by the stack.
+ */
+ WRITE_ONCE(adapter->ptp->cached_phc_time, systime);
+ WRITE_ONCE(adapter->ptp->cached_phc_jiffies, jiffies);
+
+ idpf_for_each_vport(adapter, vport) {
+ bool split;
+
+ if (!vport || !vport->rxq_grps)
+ continue;
+
+ split = idpf_is_queue_model_split(vport->rxq_model);
+
+ for (u16 i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+
+ idpf_ptp_update_phctime_rxq_grp(grp, split, systime);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_settime64 - Set the time of the clock
+ * @info: the driver's PTP info structure
+ * @ts: timespec64 structure that holds the new time value
+ *
+ * Set the device clock to the user input value. The conversion from timespec
+ * to ns happens in the write function.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_settime64(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+ enum idpf_ptp_access access;
+ int err;
+ u64 ns;
+
+ access = adapter->ptp->set_dev_clk_time_access;
+ if (access != IDPF_PTP_MAILBOX)
+ return -EOPNOTSUPP;
+
+ ns = timespec64_to_ns(ts);
+
+ err = idpf_ptp_set_dev_clk_time(adapter, ns);
+ if (err) {
+ pci_err(adapter->pdev, "Failed to set the time, err: %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ err = idpf_ptp_update_cached_phctime(adapter);
+ if (err)
+ pci_warn(adapter->pdev,
+ "Unable to immediately update cached PHC time\n");
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment
+ * @info: the driver's PTP info structure
+ * @delta: Offset in nanoseconds to adjust the time by
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
+{
+ struct timespec64 now, then;
+ int err;
+
+ err = idpf_ptp_gettimex64(info, &now, NULL);
+ if (err)
+ return err;
+
+ then = ns_to_timespec64(delta);
+ now = timespec64_add(now, then);
+
+ return idpf_ptp_settime64(info, &now);
+}
+
+/**
+ * idpf_ptp_adjtime - Adjust the time of the clock by the indicated delta
+ * @info: the driver's PTP info structure
+ * @delta: Offset in nanoseconds to adjust the time by
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+ enum idpf_ptp_access access;
+ int err;
+
+ access = adapter->ptp->adj_dev_clk_time_access;
+ if (access != IDPF_PTP_MAILBOX)
+ return -EOPNOTSUPP;
+
+ /* Hardware only supports atomic adjustments using signed 32-bit
+ * integers. For any adjustment outside this range, perform
+ * a non-atomic get->adjust->set flow.
+ */
+ if (delta > S32_MAX || delta < S32_MIN)
+ return idpf_ptp_adjtime_nonatomic(info, delta);
+
+ err = idpf_ptp_adj_dev_clk_time(adapter, delta);
+ if (err) {
+ pci_err(adapter->pdev, "Failed to adjust the clock with delta %lld err: %pe\n",
+ delta, ERR_PTR(err));
+ return err;
+ }
+
+ err = idpf_ptp_update_cached_phctime(adapter);
+ if (err)
+ pci_warn(adapter->pdev,
+ "Unable to immediately update cached PHC time\n");
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_adjfine - Adjust clock increment rate
+ * @info: the driver's PTP info structure
+ * @scaled_ppm: Parts per million with 16-bit fractional field
+ *
+ * Adjust the frequency of the clock by the indicated scaled ppm from the
+ * base frequency.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+ enum idpf_ptp_access access;
+ u64 incval, diff;
+ int err;
+
+ access = adapter->ptp->adj_dev_clk_time_access;
+ if (access != IDPF_PTP_MAILBOX)
+ return -EOPNOTSUPP;
+
+ incval = adapter->ptp->base_incval;
+
+ diff = adjust_by_scaled_ppm(incval, scaled_ppm);
+ err = idpf_ptp_adj_dev_clk_fine(adapter, diff);
+ if (err)
+ pci_err(adapter->pdev, "Failed to adjust clock increment rate for scaled ppm %ld %pe\n",
+ scaled_ppm, ERR_PTR(err));
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_verify_pin - Verify if pin supports requested pin function
+ * @info: the driver's PTP info structure
+ * @pin: Pin index
+ * @func: Assigned function
+ * @chan: Assigned channel
+ *
+ * Return: EOPNOTSUPP as not supported yet.
+ */
+static int idpf_ptp_verify_pin(struct ptp_clock_info *info, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * idpf_ptp_gpio_enable - Enable/disable ancillary features of PHC
+ * @info: the driver's PTP info structure
+ * @rq: The requested feature to change
+ * @on: Enable/disable flag
+ *
+ * Return: EOPNOTSUPP as not supported yet.
+ */
+static int idpf_ptp_gpio_enable(struct ptp_clock_info *info,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * idpf_ptp_tstamp_extend_32b_to_64b - Convert a 32b nanoseconds Tx or Rx
+ * timestamp value to 64b.
+ * @cached_phc_time: recently cached copy of PHC time
+ * @in_timestamp: Ingress/egress 32b nanoseconds timestamp value
+ *
+ * Hardware captures timestamps which contain only 32 bits of nominal
+ * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
+ *
+ * Return: Tx timestamp value extended to 64 bits based on cached PHC time.
+ */
+u64 idpf_ptp_tstamp_extend_32b_to_64b(u64 cached_phc_time, u32 in_timestamp)
+{
+ u32 delta, phc_time_lo;
+ u64 ns;
+
+ /* Extract the lower 32 bits of the PHC time */
+ phc_time_lo = (u32)cached_phc_time;
+
+ /* Calculate the delta between the lower 32bits of the cached PHC
+ * time and the in_timestamp value.
+ */
+ delta = in_timestamp - phc_time_lo;
+
+ if (delta > U32_MAX / 2) {
+ /* Reverse the delta calculation here */
+ delta = phc_time_lo - in_timestamp;
+ ns = cached_phc_time - delta;
+ } else {
+ ns = cached_phc_time + delta;
+ }
+
+ return ns;
+}
+
+/**
+ * idpf_ptp_extend_ts - Convert a 40b timestamp to 64b nanoseconds
+ * @vport: Virtual port structure
+ * @in_tstamp: Ingress/egress timestamp value
+ *
+ * It is assumed that the caller verifies the timestamp is valid prior to
+ * calling this function.
+ *
+ * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC
+ * time stored in the device private PTP structure as the basis for timestamp
+ * extension.
+ *
+ * Return: Tx timestamp value extended to 64 bits.
+ */
+u64 idpf_ptp_extend_ts(struct idpf_vport *vport, u64 in_tstamp)
+{
+ struct idpf_ptp *ptp = vport->adapter->ptp;
+ unsigned long discard_time;
+
+ discard_time = ptp->cached_phc_jiffies + 2 * HZ;
+
+ if (time_is_before_jiffies(discard_time)) {
+ u64_stats_update_begin(&vport->tstamp_stats.stats_sync);
+ u64_stats_inc(&vport->tstamp_stats.discarded);
+ u64_stats_update_end(&vport->tstamp_stats.stats_sync);
+
+ return 0;
+ }
+
+ return idpf_ptp_tstamp_extend_32b_to_64b(ptp->cached_phc_time,
+ lower_32_bits(in_tstamp));
+}
+
+/**
+ * idpf_ptp_request_ts - Request an available Tx timestamp index
+ * @tx_q: Transmit queue on which the Tx timestamp is requested
+ * @skb: The SKB to associate with this timestamp request
+ * @idx: Index of the Tx timestamp latch
+ *
+ * Request tx timestamp index negotiated during PTP init that will be set into
+ * Tx descriptor.
+ *
+ * Return: 0 and the index that can be provided to Tx descriptor on success,
+ * -errno otherwise.
+ */
+int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
+ u32 *idx)
+{
+ struct idpf_ptp_tx_tstamp *ptp_tx_tstamp;
+ struct list_head *head;
+
+ /* Get the index from the free latches list */
+ spin_lock(&tx_q->cached_tstamp_caps->latches_lock);
+
+ head = &tx_q->cached_tstamp_caps->latches_free;
+ if (list_empty(head)) {
+ spin_unlock(&tx_q->cached_tstamp_caps->latches_lock);
+ return -ENOBUFS;
+ }
+
+ ptp_tx_tstamp = list_first_entry(head, struct idpf_ptp_tx_tstamp,
+ list_member);
+ list_del(&ptp_tx_tstamp->list_member);
+
+ ptp_tx_tstamp->skb = skb_get(skb);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ /* Move the element to the used latches list */
+ list_add(&ptp_tx_tstamp->list_member,
+ &tx_q->cached_tstamp_caps->latches_in_use);
+ spin_unlock(&tx_q->cached_tstamp_caps->latches_lock);
+
+ *idx = ptp_tx_tstamp->idx;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_set_rx_tstamp - Enable or disable Rx timestamping
+ * @vport: Virtual port structure
+ * @rx_filter: Receive timestamp filter
+ */
+static void idpf_ptp_set_rx_tstamp(struct idpf_vport *vport, int rx_filter)
+{
+ bool enable = true, splitq;
+
+ splitq = idpf_is_queue_model_split(vport->rxq_model);
+
+ if (rx_filter == HWTSTAMP_FILTER_NONE) {
+ enable = false;
+ vport->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ } else {
+ vport->tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
+ }
+
+ for (u16 i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+ struct idpf_rx_queue *rx_queue;
+ u16 j, num_rxq;
+
+ if (splitq)
+ num_rxq = grp->splitq.num_rxq_sets;
+ else
+ num_rxq = grp->singleq.num_rxq;
+
+ for (j = 0; j < num_rxq; j++) {
+ if (splitq)
+ rx_queue = &grp->splitq.rxq_sets[j]->rxq;
+ else
+ rx_queue = grp->singleq.rxqs[j];
+
+ if (enable)
+ idpf_queue_set(PTP, rx_queue);
+ else
+ idpf_queue_clear(PTP, rx_queue);
+ }
+ }
+}
+
+/**
+ * idpf_ptp_set_timestamp_mode - Setup driver for requested timestamp mode
+ * @vport: Virtual port structure
+ * @config: Hwtstamp settings requested or saved
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_set_timestamp_mode(struct idpf_vport *vport,
+ struct kernel_hwtstamp_config *config)
+{
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ break;
+ case HWTSTAMP_TX_ON:
+ if (!idpf_ptp_is_vport_tx_tstamp_ena(vport))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ vport->tstamp_config.tx_type = config->tx_type;
+ idpf_ptp_set_rx_tstamp(vport, config->rx_filter);
+ *config = vport->tstamp_config;
+
+ return 0;
+}
+
+/**
+ * idpf_tstamp_task - Delayed task to handle Tx tstamps
+ * @work: work_struct handle
+ */
+void idpf_tstamp_task(struct work_struct *work)
+{
+ struct idpf_vport *vport;
+
+ vport = container_of(work, struct idpf_vport, tstamp_task);
+
+ idpf_ptp_get_tx_tstamp(vport);
+}
+
+/**
+ * idpf_ptp_do_aux_work - Do PTP periodic work
+ * @info: Driver's PTP info structure
+ *
+ * Return: Number of jiffies to periodic work.
+ */
+static long idpf_ptp_do_aux_work(struct ptp_clock_info *info)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+
+ idpf_ptp_update_cached_phctime(adapter);
+
+ return msecs_to_jiffies(500);
+}
+
+/**
+ * idpf_ptp_set_caps - Set PTP capabilities
+ * @adapter: Driver specific private structure
+ *
+ * This function sets the PTP functions.
+ */
+static void idpf_ptp_set_caps(const struct idpf_adapter *adapter)
+{
+ struct ptp_clock_info *info = &adapter->ptp->info;
+
+ snprintf(info->name, sizeof(info->name), "%s-%s-clk",
+ KBUILD_MODNAME, pci_name(adapter->pdev));
+
+ info->owner = THIS_MODULE;
+ info->max_adj = adapter->ptp->max_adj;
+ info->gettimex64 = idpf_ptp_gettimex64;
+ info->settime64 = idpf_ptp_settime64;
+ info->adjfine = idpf_ptp_adjfine;
+ info->adjtime = idpf_ptp_adjtime;
+ info->verify = idpf_ptp_verify_pin;
+ info->enable = idpf_ptp_gpio_enable;
+ info->do_aux_work = idpf_ptp_do_aux_work;
+#if IS_ENABLED(CONFIG_ARM_ARCH_TIMER)
+ info->getcrosststamp = idpf_ptp_get_crosststamp;
+#elif IS_ENABLED(CONFIG_X86)
+ if (pcie_ptm_enabled(adapter->pdev) &&
+ boot_cpu_has(X86_FEATURE_ART) &&
+ boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
+ info->getcrosststamp = idpf_ptp_get_crosststamp;
+#endif /* CONFIG_ARM_ARCH_TIMER */
+}
+
+/**
+ * idpf_ptp_create_clock - Create PTP clock device for userspace
+ * @adapter: Driver specific private structure
+ *
+ * This function creates a new PTP clock device.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_create_clock(const struct idpf_adapter *adapter)
+{
+ struct ptp_clock *clock;
+
+ idpf_ptp_set_caps(adapter);
+
+ /* Attempt to register the clock before enabling the hardware. */
+ clock = ptp_clock_register(&adapter->ptp->info,
+ &adapter->pdev->dev);
+ if (IS_ERR(clock)) {
+ pci_err(adapter->pdev, "PTP clock creation failed: %pe\n",
+ clock);
+ return PTR_ERR(clock);
+ }
+
+ adapter->ptp->clock = clock;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_release_vport_tstamp - Release the Tx timestamps trakcers for a
+ * given vport.
+ * @vport: Virtual port structure
+ *
+ * Remove the queues and delete lists that tracks Tx timestamp entries for a
+ * given vport.
+ */
+static void idpf_ptp_release_vport_tstamp(struct idpf_vport *vport)
+{
+ struct idpf_ptp_tx_tstamp *ptp_tx_tstamp, *tmp;
+ struct list_head *head;
+
+ cancel_work_sync(&vport->tstamp_task);
+
+ /* Remove list with free latches */
+ spin_lock_bh(&vport->tx_tstamp_caps->latches_lock);
+
+ head = &vport->tx_tstamp_caps->latches_free;
+ list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) {
+ list_del(&ptp_tx_tstamp->list_member);
+ kfree(ptp_tx_tstamp);
+ }
+
+ /* Remove list with latches in use */
+ head = &vport->tx_tstamp_caps->latches_in_use;
+ u64_stats_update_begin(&vport->tstamp_stats.stats_sync);
+ list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) {
+ u64_stats_inc(&vport->tstamp_stats.flushed);
+
+ list_del(&ptp_tx_tstamp->list_member);
+ if (ptp_tx_tstamp->skb)
+ consume_skb(ptp_tx_tstamp->skb);
+
+ kfree(ptp_tx_tstamp);
+ }
+ u64_stats_update_end(&vport->tstamp_stats.stats_sync);
+
+ spin_unlock_bh(&vport->tx_tstamp_caps->latches_lock);
+
+ kfree(vport->tx_tstamp_caps);
+ vport->tx_tstamp_caps = NULL;
+}
+
+/**
+ * idpf_ptp_release_tstamp - Release the Tx timestamps trackers
+ * @adapter: Driver specific private structure
+ *
+ * Remove the queues and delete lists that tracks Tx timestamp entries.
+ */
+static void idpf_ptp_release_tstamp(struct idpf_adapter *adapter)
+{
+ idpf_for_each_vport(adapter, vport) {
+ if (!idpf_ptp_is_vport_tx_tstamp_ena(vport))
+ continue;
+
+ idpf_ptp_release_vport_tstamp(vport);
+ }
+}
+
+/**
+ * idpf_ptp_get_txq_tstamp_capability - Verify the timestamping capability
+ * for a given tx queue.
+ * @txq: Transmit queue
+ *
+ * Since performing timestamp flows requires reading the device clock value and
+ * the support in the Control Plane, the function checks both factors and
+ * summarizes the support for the timestamping.
+ *
+ * Return: true if the timestamping is supported, false otherwise.
+ */
+bool idpf_ptp_get_txq_tstamp_capability(struct idpf_tx_queue *txq)
+{
+ if (!txq || !txq->cached_tstamp_caps)
+ return false;
+ else if (txq->cached_tstamp_caps->access)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * idpf_ptp_init - Initialize PTP hardware clock support
+ * @adapter: Driver specific private structure
+ *
+ * Set up the device for interacting with the PTP hardware clock for all
+ * functions. Function will allocate and register a ptp_clock with the
+ * PTP_1588_CLOCK infrastructure.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_init(struct idpf_adapter *adapter)
+{
+ struct timespec64 ts;
+ int err;
+
+ if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PTP)) {
+ pci_dbg(adapter->pdev, "PTP capability is not detected\n");
+ return -EOPNOTSUPP;
+ }
+
+ adapter->ptp = kzalloc(sizeof(*adapter->ptp), GFP_KERNEL);
+ if (!adapter->ptp)
+ return -ENOMEM;
+
+ /* add a back pointer to adapter */
+ adapter->ptp->adapter = adapter;
+
+ if (adapter->dev_ops.reg_ops.ptp_reg_init)
+ adapter->dev_ops.reg_ops.ptp_reg_init(adapter);
+
+ err = idpf_ptp_get_caps(adapter);
+ if (err) {
+ pci_err(adapter->pdev, "Failed to get PTP caps err %d\n", err);
+ goto free_ptp;
+ }
+
+ err = idpf_ptp_create_clock(adapter);
+ if (err)
+ goto free_ptp;
+
+ if (adapter->ptp->get_dev_clk_time_access != IDPF_PTP_NONE)
+ ptp_schedule_worker(adapter->ptp->clock, 0);
+
+ /* Write the default increment time value if the clock adjustments
+ * are enabled.
+ */
+ if (adapter->ptp->adj_dev_clk_time_access != IDPF_PTP_NONE) {
+ err = idpf_ptp_adj_dev_clk_fine(adapter,
+ adapter->ptp->base_incval);
+ if (err)
+ goto remove_clock;
+ }
+
+ /* Write the initial time value if the set time operation is enabled */
+ if (adapter->ptp->set_dev_clk_time_access != IDPF_PTP_NONE) {
+ ts = ktime_to_timespec64(ktime_get_real());
+ err = idpf_ptp_settime64(&adapter->ptp->info, &ts);
+ if (err)
+ goto remove_clock;
+ }
+
+ spin_lock_init(&adapter->ptp->read_dev_clk_lock);
+
+ pci_dbg(adapter->pdev, "PTP init successful\n");
+
+ return 0;
+
+remove_clock:
+ if (adapter->ptp->get_dev_clk_time_access != IDPF_PTP_NONE)
+ ptp_cancel_worker_sync(adapter->ptp->clock);
+
+ ptp_clock_unregister(adapter->ptp->clock);
+ adapter->ptp->clock = NULL;
+
+free_ptp:
+ kfree(adapter->ptp);
+ adapter->ptp = NULL;
+
+ return err;
+}
+
+/**
+ * idpf_ptp_release - Clear PTP hardware clock support
+ * @adapter: Driver specific private structure
+ */
+void idpf_ptp_release(struct idpf_adapter *adapter)
+{
+ struct idpf_ptp *ptp = adapter->ptp;
+
+ if (!ptp)
+ return;
+
+ if (ptp->tx_tstamp_access != IDPF_PTP_NONE &&
+ ptp->get_dev_clk_time_access != IDPF_PTP_NONE)
+ idpf_ptp_release_tstamp(adapter);
+
+ if (ptp->clock) {
+ if (adapter->ptp->get_dev_clk_time_access != IDPF_PTP_NONE)
+ ptp_cancel_worker_sync(adapter->ptp->clock);
+
+ ptp_clock_unregister(ptp->clock);
+ }
+
+ kfree(ptp);
+ adapter->ptp = NULL;
+}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.h b/drivers/net/ethernet/intel/idpf/idpf_ptp.h
new file mode 100644
index 000000000000..785da03e4cf5
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.h
@@ -0,0 +1,379 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef _IDPF_PTP_H
+#define _IDPF_PTP_H
+
+#include <linux/ptp_clock_kernel.h>
+
+/**
+ * struct idpf_ptp_cmd - PTP command masks
+ * @exec_cmd_mask: mask to trigger command execution
+ * @shtime_enable_mask: mask to enable shadow time
+ */
+struct idpf_ptp_cmd {
+ u32 exec_cmd_mask;
+ u32 shtime_enable_mask;
+};
+
+/* struct idpf_ptp_dev_clk_regs - PTP device registers
+ * @dev_clk_ns_l: low part of the device clock register
+ * @dev_clk_ns_h: high part of the device clock register
+ * @phy_clk_ns_l: low part of the PHY clock register
+ * @phy_clk_ns_h: high part of the PHY clock register
+ * @sys_time_ns_l: low part of the system time register
+ * @sys_time_ns_h: high part of the system time register
+ * @incval_l: low part of the increment value register
+ * @incval_h: high part of the increment value register
+ * @shadj_l: low part of the shadow adjust register
+ * @shadj_h: high part of the shadow adjust register
+ * @phy_incval_l: low part of the PHY increment value register
+ * @phy_incval_h: high part of the PHY increment value register
+ * @phy_shadj_l: low part of the PHY shadow adjust register
+ * @phy_shadj_h: high part of the PHY shadow adjust register
+ * @cmd: PTP command register
+ * @phy_cmd: PHY command register
+ * @cmd_sync: PTP command synchronization register
+ */
+struct idpf_ptp_dev_clk_regs {
+ /* Main clock */
+ void __iomem *dev_clk_ns_l;
+ void __iomem *dev_clk_ns_h;
+
+ /* PHY timer */
+ void __iomem *phy_clk_ns_l;
+ void __iomem *phy_clk_ns_h;
+
+ /* System time */
+ void __iomem *sys_time_ns_l;
+ void __iomem *sys_time_ns_h;
+
+ /* Main timer adjustments */
+ void __iomem *incval_l;
+ void __iomem *incval_h;
+ void __iomem *shadj_l;
+ void __iomem *shadj_h;
+
+ /* PHY timer adjustments */
+ void __iomem *phy_incval_l;
+ void __iomem *phy_incval_h;
+ void __iomem *phy_shadj_l;
+ void __iomem *phy_shadj_h;
+
+ /* Command */
+ void __iomem *cmd;
+ void __iomem *phy_cmd;
+ void __iomem *cmd_sync;
+};
+
+/**
+ * enum idpf_ptp_access - the type of access to PTP operations
+ * @IDPF_PTP_NONE: no access
+ * @IDPF_PTP_DIRECT: direct access through BAR registers
+ * @IDPF_PTP_MAILBOX: access through mailbox messages
+ */
+enum idpf_ptp_access {
+ IDPF_PTP_NONE = 0,
+ IDPF_PTP_DIRECT,
+ IDPF_PTP_MAILBOX,
+};
+
+/**
+ * struct idpf_ptp_secondary_mbx - PTP secondary mailbox
+ * @peer_mbx_q_id: PTP mailbox queue ID
+ * @peer_id: Peer ID for PTP Device Control daemon
+ * @valid: indicates whether secondary mailblox is supported by the Control
+ * Plane
+ */
+struct idpf_ptp_secondary_mbx {
+ u16 peer_mbx_q_id;
+ u16 peer_id;
+ bool valid:1;
+};
+
+/**
+ * enum idpf_ptp_tx_tstamp_state - Tx timestamp states
+ * @IDPF_PTP_FREE: Tx timestamp index free to use
+ * @IDPF_PTP_REQUEST: Tx timestamp index set to the Tx descriptor
+ * @IDPF_PTP_READ_VALUE: Tx timestamp value ready to be read
+ */
+enum idpf_ptp_tx_tstamp_state {
+ IDPF_PTP_FREE,
+ IDPF_PTP_REQUEST,
+ IDPF_PTP_READ_VALUE,
+};
+
+/**
+ * struct idpf_ptp_tx_tstamp_status - Parameters to track Tx timestamp
+ * @skb: the pointer to the SKB that received the completion tag
+ * @state: the state of the Tx timestamp
+ */
+struct idpf_ptp_tx_tstamp_status {
+ struct sk_buff *skb;
+ enum idpf_ptp_tx_tstamp_state state;
+};
+
+/**
+ * struct idpf_ptp_tx_tstamp - Parameters for Tx timestamping
+ * @list_member: the list member structure
+ * @tx_latch_reg_offset_l: Tx tstamp latch low register offset
+ * @tx_latch_reg_offset_h: Tx tstamp latch high register offset
+ * @skb: the pointer to the SKB for this timestamp request
+ * @tstamp: the Tx tstamp value
+ * @idx: the index of the Tx tstamp
+ */
+struct idpf_ptp_tx_tstamp {
+ struct list_head list_member;
+ u32 tx_latch_reg_offset_l;
+ u32 tx_latch_reg_offset_h;
+ struct sk_buff *skb;
+ u64 tstamp;
+ u32 idx;
+};
+
+/**
+ * struct idpf_ptp_vport_tx_tstamp_caps - Tx timestamp capabilities
+ * @vport_id: the vport id
+ * @num_entries: the number of negotiated Tx timestamp entries
+ * @tstamp_ns_lo_bit: first bit for nanosecond part of the timestamp
+ * @latches_lock: the lock to the lists of free/used timestamp indexes
+ * @status_lock: the lock to the status tracker
+ * @access: indicates an access to Tx timestamp
+ * @latches_free: the list of the free Tx timestamps latches
+ * @latches_in_use: the list of the used Tx timestamps latches
+ * @tx_tstamp_status: Tx tstamp status tracker
+ */
+struct idpf_ptp_vport_tx_tstamp_caps {
+ u32 vport_id;
+ u16 num_entries;
+ u16 tstamp_ns_lo_bit;
+ spinlock_t latches_lock;
+ spinlock_t status_lock;
+ bool access:1;
+ struct list_head latches_free;
+ struct list_head latches_in_use;
+ struct idpf_ptp_tx_tstamp_status tx_tstamp_status[];
+};
+
+/**
+ * struct idpf_ptp - PTP parameters
+ * @info: structure defining PTP hardware capabilities
+ * @clock: pointer to registered PTP clock device
+ * @adapter: back pointer to the adapter
+ * @base_incval: base increment value of the PTP clock
+ * @max_adj: maximum adjustment of the PTP clock
+ * @cmd: HW specific command masks
+ * @cached_phc_time: a cached copy of the PHC time for timestamp extension
+ * @cached_phc_jiffies: jiffies when cached_phc_time was last updated
+ * @dev_clk_regs: the set of registers to access the device clock
+ * @caps: PTP capabilities negotiated with the Control Plane
+ * @get_dev_clk_time_access: access type for getting the device clock time
+ * @get_cross_tstamp_access: access type for the cross timestamping
+ * @set_dev_clk_time_access: access type for setting the device clock time
+ * @adj_dev_clk_time_access: access type for the adjusting the device clock
+ * @tx_tstamp_access: access type for the Tx timestamp value read
+ * @rsv: reserved bits
+ * @secondary_mbx: parameters for using dedicated PTP mailbox
+ * @read_dev_clk_lock: spinlock protecting access to the device clock read
+ * operation executed by the HW latch
+ */
+struct idpf_ptp {
+ struct ptp_clock_info info;
+ struct ptp_clock *clock;
+ struct idpf_adapter *adapter;
+ u64 base_incval;
+ u64 max_adj;
+ struct idpf_ptp_cmd cmd;
+ u64 cached_phc_time;
+ unsigned long cached_phc_jiffies;
+ struct idpf_ptp_dev_clk_regs dev_clk_regs;
+ u32 caps;
+ enum idpf_ptp_access get_dev_clk_time_access:2;
+ enum idpf_ptp_access get_cross_tstamp_access:2;
+ enum idpf_ptp_access set_dev_clk_time_access:2;
+ enum idpf_ptp_access adj_dev_clk_time_access:2;
+ enum idpf_ptp_access tx_tstamp_access:2;
+ u8 rsv;
+ struct idpf_ptp_secondary_mbx secondary_mbx;
+ spinlock_t read_dev_clk_lock;
+};
+
+/**
+ * idpf_ptp_info_to_adapter - get driver adapter struct from ptp_clock_info
+ * @info: pointer to ptp_clock_info struct
+ *
+ * Return: pointer to the corresponding adapter struct
+ */
+static inline struct idpf_adapter *
+idpf_ptp_info_to_adapter(const struct ptp_clock_info *info)
+{
+ const struct idpf_ptp *ptp = container_of_const(info, struct idpf_ptp,
+ info);
+ return ptp->adapter;
+}
+
+/**
+ * struct idpf_ptp_dev_timers - System time and device time values
+ * @sys_time_ns: system time value expressed in nanoseconds
+ * @dev_clk_time_ns: device clock time value expressed in nanoseconds
+ */
+struct idpf_ptp_dev_timers {
+ u64 sys_time_ns;
+ u64 dev_clk_time_ns;
+};
+
+/**
+ * idpf_ptp_is_vport_tx_tstamp_ena - Verify the Tx timestamping enablement for
+ * a given vport.
+ * @vport: Virtual port structure
+ *
+ * Tx timestamp capabilities are negotiated with the Control Plane only if the
+ * device clock value can be read, Tx timestamp access type is different than
+ * NONE, and the PTP clock for the adapter is created. When all those conditions
+ * are satisfied, Tx timestamp feature is enabled and tx_tstamp_caps is
+ * allocated and fulfilled.
+ *
+ * Return: true if the Tx timestamping is enabled, false otherwise.
+ */
+static inline bool idpf_ptp_is_vport_tx_tstamp_ena(struct idpf_vport *vport)
+{
+ if (!vport->tx_tstamp_caps)
+ return false;
+ else
+ return true;
+}
+
+/**
+ * idpf_ptp_is_vport_rx_tstamp_ena - Verify the Rx timestamping enablement for
+ * a given vport.
+ * @vport: Virtual port structure
+ *
+ * Rx timestamp feature is enabled if the PTP clock for the adapter is created
+ * and it is possible to read the value of the device clock. The second
+ * assumption comes from the need to extend the Rx timestamp value to 64 bit
+ * based on the current device clock time.
+ *
+ * Return: true if the Rx timestamping is enabled, false otherwise.
+ */
+static inline bool idpf_ptp_is_vport_rx_tstamp_ena(struct idpf_vport *vport)
+{
+ if (!vport->adapter->ptp ||
+ vport->adapter->ptp->get_dev_clk_time_access == IDPF_PTP_NONE)
+ return false;
+ else
+ return true;
+}
+
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+int idpf_ptp_init(struct idpf_adapter *adapter);
+void idpf_ptp_release(struct idpf_adapter *adapter);
+int idpf_ptp_get_caps(struct idpf_adapter *adapter);
+void idpf_ptp_get_features_access(const struct idpf_adapter *adapter);
+bool idpf_ptp_get_txq_tstamp_capability(struct idpf_tx_queue *txq);
+int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,
+ struct idpf_ptp_dev_timers *dev_clk_time);
+int idpf_ptp_get_cross_time(struct idpf_adapter *adapter,
+ struct idpf_ptp_dev_timers *cross_time);
+int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, u64 time);
+int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter, u64 incval);
+int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter, s64 delta);
+int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport);
+int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport);
+int idpf_ptp_set_timestamp_mode(struct idpf_vport *vport,
+ struct kernel_hwtstamp_config *config);
+u64 idpf_ptp_extend_ts(struct idpf_vport *vport, u64 in_tstamp);
+u64 idpf_ptp_tstamp_extend_32b_to_64b(u64 cached_phc_time, u32 in_timestamp);
+int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
+ u32 *idx);
+void idpf_tstamp_task(struct work_struct *work);
+#else /* CONFIG_PTP_1588_CLOCK */
+static inline int idpf_ptp_init(struct idpf_adapter *adapter)
+{
+ return 0;
+}
+
+static inline void idpf_ptp_release(struct idpf_adapter *adapter) { }
+
+static inline int idpf_ptp_get_caps(struct idpf_adapter *adapter)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void
+idpf_ptp_get_features_access(const struct idpf_adapter *adapter) { }
+
+static inline bool
+idpf_ptp_get_txq_tstamp_capability(struct idpf_tx_queue *txq)
+{
+ return false;
+}
+
+static inline int
+idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,
+ struct idpf_ptp_dev_timers *dev_clk_time)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+idpf_ptp_get_cross_time(struct idpf_adapter *adapter,
+ struct idpf_ptp_dev_timers *cross_time)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter,
+ u64 time)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter,
+ u64 incval)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter,
+ s64 delta)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+idpf_ptp_set_timestamp_mode(struct idpf_vport *vport,
+ struct kernel_hwtstamp_config *config)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline u64 idpf_ptp_extend_ts(struct idpf_vport *vport, u32 in_tstamp)
+{
+ return 0;
+}
+
+static inline u64 idpf_ptp_tstamp_extend_32b_to_64b(u64 cached_phc_time,
+ u32 in_timestamp)
+{
+ return 0;
+}
+
+static inline int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q,
+ struct sk_buff *skb, u32 *idx)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void idpf_tstamp_task(struct work_struct *work) { }
+#endif /* CONFIG_PTP_1588_CLOCK */
+#endif /* _IDPF_PTP_H */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index dfd7cf1d9aa0..e3ddf18dcbf5 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -1,8 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2023 Intel Corporation */
-#include <net/libeth/rx.h>
-#include <net/libeth/tx.h>
+#include <net/libeth/xdp.h>
#include "idpf.h"
@@ -180,6 +179,58 @@ static int idpf_tx_singleq_csum(struct sk_buff *skb,
}
/**
+ * idpf_tx_singleq_dma_map_error - handle TX DMA map errors
+ * @txq: queue to send buffer on
+ * @skb: send buffer
+ * @first: original first buffer info buffer for packet
+ * @idx: starting point on ring to unwind
+ */
+static void idpf_tx_singleq_dma_map_error(struct idpf_tx_queue *txq,
+ struct sk_buff *skb,
+ struct idpf_tx_buf *first, u16 idx)
+{
+ struct libeth_sq_napi_stats ss = { };
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = &ss,
+ };
+
+ u64_stats_update_begin(&txq->stats_sync);
+ u64_stats_inc(&txq->q_stats.dma_map_errs);
+ u64_stats_update_end(&txq->stats_sync);
+
+ /* clear dma mappings for failed tx_buf map */
+ for (;;) {
+ struct idpf_tx_buf *tx_buf;
+
+ tx_buf = &txq->tx_buf[idx];
+ libeth_tx_complete(tx_buf, &cp);
+ if (tx_buf == first)
+ break;
+ if (idx == 0)
+ idx = txq->desc_count;
+ idx--;
+ }
+
+ if (skb_is_gso(skb)) {
+ union idpf_tx_flex_desc *tx_desc;
+
+ /* If we failed a DMA mapping for a TSO packet, we will have
+ * used one additional descriptor for a context
+ * descriptor. Reset that here.
+ */
+ tx_desc = &txq->flex_tx[idx];
+ memset(tx_desc, 0, sizeof(*tx_desc));
+ if (idx == 0)
+ idx = txq->desc_count;
+ idx--;
+ }
+
+ /* Update tail in case netdev_xmit_more was previously true */
+ idpf_tx_buf_hw_update(txq, idx, false);
+}
+
+/**
* idpf_tx_singleq_map - Build the Tx base descriptor
* @tx_q: queue to send buffer on
* @first: first buffer info buffer to use
@@ -219,8 +270,9 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
- if (dma_mapping_error(tx_q->dev, dma))
- return idpf_tx_dma_map_error(tx_q, skb, first, i);
+ if (unlikely(dma_mapping_error(tx_q->dev, dma)))
+ return idpf_tx_singleq_dma_map_error(tx_q, skb,
+ first, i);
/* record length, and DMA address */
dma_unmap_len_set(tx_buf, len, size);
@@ -362,17 +414,18 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
{
struct idpf_tx_offload_params offload = { };
struct idpf_tx_buf *first;
- unsigned int count;
+ u32 count, buf_count = 1;
+ int csum, tso, needed;
__be16 protocol;
- int csum, tso;
- count = idpf_tx_desc_count_required(tx_q, skb);
+ count = idpf_tx_res_count_required(tx_q, skb, &buf_count);
if (unlikely(!count))
return idpf_tx_drop_skb(tx_q, skb);
- if (idpf_tx_maybe_stop_common(tx_q,
- count + IDPF_TX_DESCS_PER_CACHE_LINE +
- IDPF_TX_DESCS_FOR_CTX)) {
+ needed = count + IDPF_TX_DESCS_PER_CACHE_LINE + IDPF_TX_DESCS_FOR_CTX;
+ if (!netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
+ IDPF_DESC_UNUSED(tx_q),
+ needed, needed)) {
idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
u64_stats_update_begin(&tx_q->stats_sync);
@@ -517,7 +570,7 @@ fetch_next_txq_desc:
np = netdev_priv(tx_q->netdev);
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
- dont_wake = np->state != __IDPF_VPORT_UP ||
+ dont_wake = !test_bit(IDPF_VPORT_UP, np->state) ||
!netif_carrier_ok(tx_q->netdev);
__netif_txq_completed_wake(nq, ss.packets, ss.bytes,
IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
@@ -595,13 +648,13 @@ static bool idpf_rx_singleq_is_non_eop(const union virtchnl2_rx_desc *rx_desc)
*/
static void idpf_rx_singleq_csum(struct idpf_rx_queue *rxq,
struct sk_buff *skb,
- struct idpf_rx_csum_decoded csum_bits,
+ struct libeth_rx_csum csum_bits,
struct libeth_rx_pt decoded)
{
bool ipv4, ipv6;
/* check if Rx checksum is enabled */
- if (!libeth_rx_pt_has_checksum(rxq->netdev, decoded))
+ if (!libeth_rx_pt_has_checksum(rxq->xdp_rxq.dev, decoded))
return;
/* check if HW has decoded the packet and checksum */
@@ -661,10 +714,10 @@ checksum_fail:
*
* Return: parsed checksum status.
**/
-static struct idpf_rx_csum_decoded
+static struct libeth_rx_csum
idpf_rx_singleq_base_csum(const union virtchnl2_rx_desc *rx_desc)
{
- struct idpf_rx_csum_decoded csum_bits = { };
+ struct libeth_rx_csum csum_bits = { };
u32 rx_error, rx_status;
u64 qword;
@@ -696,10 +749,10 @@ idpf_rx_singleq_base_csum(const union virtchnl2_rx_desc *rx_desc)
*
* Return: parsed checksum status.
**/
-static struct idpf_rx_csum_decoded
+static struct libeth_rx_csum
idpf_rx_singleq_flex_csum(const union virtchnl2_rx_desc *rx_desc)
{
- struct idpf_rx_csum_decoded csum_bits = { };
+ struct libeth_rx_csum csum_bits = { };
u16 rx_status0, rx_status1;
rx_status0 = le16_to_cpu(rx_desc->flex_nic_wb.status_error0);
@@ -740,7 +793,7 @@ static void idpf_rx_singleq_base_hash(struct idpf_rx_queue *rx_q,
{
u64 mask, qw1;
- if (!libeth_rx_pt_has_hash(rx_q->netdev, decoded))
+ if (!libeth_rx_pt_has_hash(rx_q->xdp_rxq.dev, decoded))
return;
mask = VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSS_HASH_M;
@@ -768,7 +821,7 @@ static void idpf_rx_singleq_flex_hash(struct idpf_rx_queue *rx_q,
const union virtchnl2_rx_desc *rx_desc,
struct libeth_rx_pt decoded)
{
- if (!libeth_rx_pt_has_hash(rx_q->netdev, decoded))
+ if (!libeth_rx_pt_has_hash(rx_q->xdp_rxq.dev, decoded))
return;
if (FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_RSS_VALID_M,
@@ -780,7 +833,7 @@ static void idpf_rx_singleq_flex_hash(struct idpf_rx_queue *rx_q,
}
/**
- * idpf_rx_singleq_process_skb_fields - Populate skb header fields from Rx
+ * __idpf_rx_singleq_process_skb_fields - Populate skb header fields from Rx
* descriptor
* @rx_q: Rx ring being processed
* @skb: pointer to current skb being populated
@@ -792,16 +845,13 @@ static void idpf_rx_singleq_flex_hash(struct idpf_rx_queue *rx_q,
* other fields within the skb.
*/
static void
-idpf_rx_singleq_process_skb_fields(struct idpf_rx_queue *rx_q,
- struct sk_buff *skb,
- const union virtchnl2_rx_desc *rx_desc,
- u16 ptype)
+__idpf_rx_singleq_process_skb_fields(struct idpf_rx_queue *rx_q,
+ struct sk_buff *skb,
+ const union virtchnl2_rx_desc *rx_desc,
+ u16 ptype)
{
struct libeth_rx_pt decoded = rx_q->rx_ptype_lkup[ptype];
- struct idpf_rx_csum_decoded csum_bits;
-
- /* modifies the skb - consumes the enet header */
- skb->protocol = eth_type_trans(skb, rx_q->netdev);
+ struct libeth_rx_csum csum_bits;
/* Check if we're using base mode descriptor IDs */
if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M) {
@@ -813,7 +863,6 @@ idpf_rx_singleq_process_skb_fields(struct idpf_rx_queue *rx_q,
}
idpf_rx_singleq_csum(rx_q, skb, csum_bits, decoded);
- skb_record_rx_queue(skb, rx_q->idx);
}
/**
@@ -900,14 +949,14 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q,
*/
static void
idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_extracted *fields)
+ struct libeth_rqe_info *fields)
{
u64 qword;
qword = le64_to_cpu(rx_desc->base_wb.qword1.status_error_ptype_len);
- fields->size = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M, qword);
- fields->rx_ptype = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M, qword);
+ fields->len = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M, qword);
+ fields->ptype = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M, qword);
}
/**
@@ -923,12 +972,12 @@ idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc,
*/
static void
idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_extracted *fields)
+ struct libeth_rqe_info *fields)
{
- fields->size = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M,
- le16_to_cpu(rx_desc->flex_nic_wb.pkt_len));
- fields->rx_ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M,
- le16_to_cpu(rx_desc->flex_nic_wb.ptype_flex_flags0));
+ fields->len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M,
+ le16_to_cpu(rx_desc->flex_nic_wb.pkt_len));
+ fields->ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M,
+ le16_to_cpu(rx_desc->flex_nic_wb.ptype_flex_flags0));
}
/**
@@ -941,7 +990,7 @@ idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc,
static void
idpf_rx_singleq_extract_fields(const struct idpf_rx_queue *rx_q,
const union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_extracted *fields)
+ struct libeth_rqe_info *fields)
{
if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M)
idpf_rx_singleq_extract_base_fields(rx_desc, fields);
@@ -949,6 +998,32 @@ idpf_rx_singleq_extract_fields(const struct idpf_rx_queue *rx_q,
idpf_rx_singleq_extract_flex_fields(rx_desc, fields);
}
+static bool
+idpf_rx_singleq_process_skb_fields(struct sk_buff *skb,
+ const struct libeth_xdp_buff *xdp,
+ struct libeth_rq_napi_stats *rs)
+{
+ struct libeth_rqe_info fields;
+ struct idpf_rx_queue *rxq;
+
+ rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
+
+ idpf_rx_singleq_extract_fields(rxq, xdp->desc, &fields);
+ __idpf_rx_singleq_process_skb_fields(rxq, skb, xdp->desc,
+ fields.ptype);
+
+ return true;
+}
+
+static void idpf_xdp_run_pass(struct libeth_xdp_buff *xdp,
+ struct napi_struct *napi,
+ struct libeth_rq_napi_stats *rs,
+ const union virtchnl2_rx_desc *desc)
+{
+ libeth_xdp_run_pass(xdp, NULL, napi, rs, desc, NULL,
+ idpf_rx_singleq_process_skb_fields);
+}
+
/**
* idpf_rx_singleq_clean - Reclaim resources after receive completes
* @rx_q: rx queue to clean
@@ -958,15 +1033,16 @@ idpf_rx_singleq_extract_fields(const struct idpf_rx_queue *rx_q,
*/
static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
{
- unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
- struct sk_buff *skb = rx_q->skb;
+ struct libeth_rq_napi_stats rs = { };
u16 ntc = rx_q->next_to_clean;
+ LIBETH_XDP_ONSTACK_BUFF(xdp);
u16 cleaned_count = 0;
- bool failure = false;
+
+ libeth_xdp_init_buff(xdp, &rx_q->xdp, &rx_q->xdp_rxq);
/* Process Rx packets bounded by budget */
- while (likely(total_rx_pkts < (unsigned int)budget)) {
- struct idpf_rx_extracted fields = { };
+ while (likely(rs.packets < budget)) {
+ struct libeth_rqe_info fields = { };
union virtchnl2_rx_desc *rx_desc;
struct idpf_rx_buf *rx_buf;
@@ -992,73 +1068,41 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields);
rx_buf = &rx_q->rx_buf[ntc];
- if (!libeth_rx_sync_for_cpu(rx_buf, fields.size))
- goto skip_data;
-
- if (skb)
- idpf_rx_add_frag(rx_buf, skb, fields.size);
- else
- skb = idpf_rx_build_skb(rx_buf, fields.size);
-
- /* exit if we failed to retrieve a buffer */
- if (!skb)
- break;
-
-skip_data:
- rx_buf->page = NULL;
+ libeth_xdp_process_buff(xdp, rx_buf, fields.len);
+ rx_buf->netmem = 0;
IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc);
cleaned_count++;
/* skip if it is non EOP desc */
- if (idpf_rx_singleq_is_non_eop(rx_desc) || unlikely(!skb))
+ if (idpf_rx_singleq_is_non_eop(rx_desc) ||
+ unlikely(!xdp->data))
continue;
#define IDPF_RXD_ERR_S FIELD_PREP(VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M, \
VIRTCHNL2_RX_BASE_DESC_ERROR_RXE_M)
if (unlikely(idpf_rx_singleq_test_staterr(rx_desc,
IDPF_RXD_ERR_S))) {
- dev_kfree_skb_any(skb);
- skb = NULL;
+ libeth_xdp_return_buff_slow(xdp);
continue;
}
- /* pad skb if needed (to make valid ethernet frame) */
- if (eth_skb_pad(skb)) {
- skb = NULL;
- continue;
- }
-
- /* probably a little skewed due to removing CRC */
- total_rx_bytes += skb->len;
-
- /* protocol */
- idpf_rx_singleq_process_skb_fields(rx_q, skb,
- rx_desc, fields.rx_ptype);
-
- /* send completed skb up the stack */
- napi_gro_receive(rx_q->pp->p.napi, skb);
- skb = NULL;
-
- /* update budget accounting */
- total_rx_pkts++;
+ idpf_xdp_run_pass(xdp, rx_q->pp->p.napi, &rs, rx_desc);
}
- rx_q->skb = skb;
-
rx_q->next_to_clean = ntc;
+ libeth_xdp_save_buff(&rx_q->xdp, xdp);
page_pool_nid_changed(rx_q->pp, numa_mem_id());
if (cleaned_count)
- failure = idpf_rx_singleq_buf_hw_alloc_all(rx_q, cleaned_count);
+ idpf_rx_singleq_buf_hw_alloc_all(rx_q, cleaned_count);
u64_stats_update_begin(&rx_q->stats_sync);
- u64_stats_add(&rx_q->q_stats.packets, total_rx_pkts);
- u64_stats_add(&rx_q->q_stats.bytes, total_rx_bytes);
+ u64_stats_add(&rx_q->q_stats.packets, rs.packets);
+ u64_stats_add(&rx_q->q_stats.bytes, rs.bytes);
u64_stats_update_end(&rx_q->stats_sync);
- /* guarantee a trip back through this routine if there was a failure */
- return failure ? budget : (int)total_rx_pkts;
+ return rs.packets;
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index da2a5becf62f..1d91c56f7469 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -1,51 +1,36 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2023 Intel Corporation */
-#include <net/libeth/rx.h>
-#include <net/libeth/tx.h>
-
#include "idpf.h"
+#include "idpf_ptp.h"
#include "idpf_virtchnl.h"
+#include "xdp.h"
+#include "xsk.h"
-struct idpf_tx_stash {
- struct hlist_node hlist;
- struct libeth_sqe buf;
-};
-
-#define idpf_tx_buf_compl_tag(buf) (*(u32 *)&(buf)->priv)
+#define idpf_tx_buf_next(buf) (*(u32 *)&(buf)->priv)
LIBETH_SQE_CHECK_PRIV(u32);
-static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
- unsigned int count);
-
/**
- * idpf_buf_lifo_push - push a buffer pointer onto stack
- * @stack: pointer to stack struct
- * @buf: pointer to buf to push
+ * idpf_chk_linearize - Check if skb exceeds max descriptors per packet
+ * @skb: send buffer
+ * @max_bufs: maximum scatter gather buffers for single packet
+ * @count: number of buffers this packet needs
*
- * Returns 0 on success, negative on failure
- **/
-static int idpf_buf_lifo_push(struct idpf_buf_lifo *stack,
- struct idpf_tx_stash *buf)
+ * Make sure we don't exceed maximum scatter gather buffers for a single
+ * packet.
+ * TSO case has been handled earlier from idpf_features_check().
+ */
+static bool idpf_chk_linearize(const struct sk_buff *skb,
+ unsigned int max_bufs,
+ unsigned int count)
{
- if (unlikely(stack->top == stack->size))
- return -ENOSPC;
-
- stack->bufs[stack->top++] = buf;
-
- return 0;
-}
+ if (likely(count <= max_bufs))
+ return false;
-/**
- * idpf_buf_lifo_pop - pop a buffer pointer from stack
- * @stack: pointer to stack struct
- **/
-static struct idpf_tx_stash *idpf_buf_lifo_pop(struct idpf_buf_lifo *stack)
-{
- if (unlikely(!stack->top))
- return NULL;
+ if (skb_is_gso(skb))
+ return false;
- return stack->bufs[--stack->top];
+ return true;
}
/**
@@ -69,59 +54,42 @@ void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
}
}
-/**
- * idpf_tx_buf_rel_all - Free any empty Tx buffers
- * @txq: queue to be cleaned
- */
-static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
+static void idpf_tx_buf_clean(struct idpf_tx_queue *txq)
{
struct libeth_sq_napi_stats ss = { };
- struct idpf_buf_lifo *buf_stack;
- struct idpf_tx_stash *stash;
+ struct xdp_frame_bulk bq;
struct libeth_cq_pp cp = {
.dev = txq->dev,
+ .bq = &bq,
.ss = &ss,
};
- struct hlist_node *tmp;
- u32 i, tag;
- /* Buffers already cleared, nothing to do */
- if (!txq->tx_buf)
- return;
+ xdp_frame_bulk_init(&bq);
/* Free all the Tx buffer sk_buffs */
- for (i = 0; i < txq->desc_count; i++)
- libeth_tx_complete(&txq->tx_buf[i], &cp);
+ for (u32 i = 0; i < txq->buf_pool_size; i++)
+ libeth_tx_complete_any(&txq->tx_buf[i], &cp);
- kfree(txq->tx_buf);
- txq->tx_buf = NULL;
-
- if (!idpf_queue_has(FLOW_SCH_EN, txq))
- return;
+ xdp_flush_frame_bulk(&bq);
+}
- buf_stack = &txq->stash->buf_stack;
- if (!buf_stack->bufs)
+/**
+ * idpf_tx_buf_rel_all - Free any empty Tx buffers
+ * @txq: queue to be cleaned
+ */
+static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
+{
+ /* Buffers already cleared, nothing to do */
+ if (!txq->tx_buf)
return;
- /*
- * If a Tx timeout occurred, there are potentially still bufs in the
- * hash table, free them here.
- */
- hash_for_each_safe(txq->stash->sched_buf_hash, tag, tmp, stash,
- hlist) {
- if (!stash)
- continue;
-
- libeth_tx_complete(&stash->buf, &cp);
- hash_del(&stash->hlist);
- idpf_buf_lifo_push(buf_stack, stash);
- }
-
- for (i = 0; i < buf_stack->size; i++)
- kfree(buf_stack->bufs[i]);
+ if (idpf_queue_has(XSK, txq))
+ idpf_xsksq_clean(txq);
+ else
+ idpf_tx_buf_clean(txq);
- kfree(buf_stack->bufs);
- buf_stack->bufs = NULL;
+ kfree(txq->tx_buf);
+ txq->tx_buf = NULL;
}
/**
@@ -132,12 +100,24 @@ static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
*/
static void idpf_tx_desc_rel(struct idpf_tx_queue *txq)
{
+ bool xdp = idpf_queue_has(XDP, txq);
+
+ if (xdp)
+ libeth_xdpsq_deinit_timer(txq->timer);
+
idpf_tx_buf_rel_all(txq);
- netdev_tx_reset_subqueue(txq->netdev, txq->idx);
+
+ if (!xdp)
+ netdev_tx_reset_subqueue(txq->netdev, txq->idx);
+
+ idpf_xsk_clear_queue(txq, VIRTCHNL2_QUEUE_TYPE_TX);
if (!txq->desc_ring)
return;
+ if (!xdp && txq->refillq)
+ kfree(txq->refillq->ring);
+
dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma);
txq->desc_ring = NULL;
txq->next_to_use = 0;
@@ -152,12 +132,14 @@ static void idpf_tx_desc_rel(struct idpf_tx_queue *txq)
*/
static void idpf_compl_desc_rel(struct idpf_compl_queue *complq)
{
- if (!complq->comp)
+ idpf_xsk_clear_queue(complq, VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
+
+ if (!complq->desc_ring)
return;
dma_free_coherent(complq->netdev->dev.parent, complq->size,
- complq->comp, complq->dma);
- complq->comp = NULL;
+ complq->desc_ring, complq->dma);
+ complq->desc_ring = NULL;
complq->next_to_use = 0;
complq->next_to_clean = 0;
}
@@ -194,41 +176,18 @@ static void idpf_tx_desc_rel_all(struct idpf_vport *vport)
*/
static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
{
- struct idpf_buf_lifo *buf_stack;
- int buf_size;
- int i;
-
/* Allocate book keeping buffers only. Buffers to be supplied to HW
* are allocated by kernel network stack and received as part of skb
*/
- buf_size = sizeof(struct idpf_tx_buf) * tx_q->desc_count;
- tx_q->tx_buf = kzalloc(buf_size, GFP_KERNEL);
+ if (idpf_queue_has(FLOW_SCH_EN, tx_q))
+ tx_q->buf_pool_size = U16_MAX;
+ else
+ tx_q->buf_pool_size = tx_q->desc_count;
+ tx_q->tx_buf = kcalloc(tx_q->buf_pool_size, sizeof(*tx_q->tx_buf),
+ GFP_KERNEL);
if (!tx_q->tx_buf)
return -ENOMEM;
- if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
- return 0;
-
- buf_stack = &tx_q->stash->buf_stack;
-
- /* Initialize tx buf stack for out-of-order completions if
- * flow scheduling offload is enabled
- */
- buf_stack->bufs = kcalloc(tx_q->desc_count, sizeof(*buf_stack->bufs),
- GFP_KERNEL);
- if (!buf_stack->bufs)
- return -ENOMEM;
-
- buf_stack->size = tx_q->desc_count;
- buf_stack->top = tx_q->desc_count;
-
- for (i = 0; i < tx_q->desc_count; i++) {
- buf_stack->bufs[i] = kzalloc(sizeof(*buf_stack->bufs[i]),
- GFP_KERNEL);
- if (!buf_stack->bufs[i])
- return -ENOMEM;
- }
-
return 0;
}
@@ -243,6 +202,7 @@ static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
struct idpf_tx_queue *tx_q)
{
struct device *dev = tx_q->dev;
+ struct idpf_sw_queue *refillq;
int err;
err = idpf_tx_buf_alloc_all(tx_q);
@@ -266,6 +226,33 @@ static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
tx_q->next_to_clean = 0;
idpf_queue_set(GEN_CHK, tx_q);
+ idpf_xsk_setup_queue(vport, tx_q, VIRTCHNL2_QUEUE_TYPE_TX);
+
+ if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
+ return 0;
+
+ refillq = tx_q->refillq;
+ refillq->desc_count = tx_q->buf_pool_size;
+ refillq->ring = kcalloc(refillq->desc_count, sizeof(u32),
+ GFP_KERNEL);
+ if (!refillq->ring) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ for (unsigned int i = 0; i < refillq->desc_count; i++)
+ refillq->ring[i] =
+ FIELD_PREP(IDPF_RFL_BI_BUFID_M, i) |
+ FIELD_PREP(IDPF_RFL_BI_GEN_M,
+ idpf_queue_has(GEN_CHK, refillq));
+
+ /* Go ahead and flip the GEN bit since this counts as filling
+ * up the ring, i.e. we already ring wrapped.
+ */
+ idpf_queue_change(GEN_CHK, refillq);
+
+ tx_q->last_re = tx_q->desc_count - IDPF_TX_SPLITQ_RE_MIN_GAP;
+
return 0;
err_alloc:
@@ -284,18 +271,25 @@ err_alloc:
static int idpf_compl_desc_alloc(const struct idpf_vport *vport,
struct idpf_compl_queue *complq)
{
- complq->size = array_size(complq->desc_count, sizeof(*complq->comp));
+ u32 desc_size;
- complq->comp = dma_alloc_coherent(complq->netdev->dev.parent,
- complq->size, &complq->dma,
- GFP_KERNEL);
- if (!complq->comp)
+ desc_size = idpf_queue_has(FLOW_SCH_EN, complq) ?
+ sizeof(*complq->comp) : sizeof(*complq->comp_4b);
+ complq->size = array_size(complq->desc_count, desc_size);
+
+ complq->desc_ring = dma_alloc_coherent(complq->netdev->dev.parent,
+ complq->size, &complq->dma,
+ GFP_KERNEL);
+ if (!complq->desc_ring)
return -ENOMEM;
complq->next_to_use = 0;
complq->next_to_clean = 0;
idpf_queue_set(GEN_CHK, complq);
+ idpf_xsk_setup_queue(vport, complq,
+ VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
+
return 0;
}
@@ -316,8 +310,6 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
for (i = 0; i < vport->num_txq_grp; i++) {
for (j = 0; j < vport->txq_grps[i].num_txq; j++) {
struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j];
- u8 gen_bits = 0;
- u16 bufidx_mask;
err = idpf_tx_desc_alloc(vport, txq);
if (err) {
@@ -326,34 +318,6 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
i);
goto err_out;
}
-
- if (!idpf_is_queue_model_split(vport->txq_model))
- continue;
-
- txq->compl_tag_cur_gen = 0;
-
- /* Determine the number of bits in the bufid
- * mask and add one to get the start of the
- * generation bits
- */
- bufidx_mask = txq->desc_count - 1;
- while (bufidx_mask >> 1) {
- txq->compl_tag_gen_s++;
- bufidx_mask = bufidx_mask >> 1;
- }
- txq->compl_tag_gen_s++;
-
- gen_bits = IDPF_TX_SPLITQ_COMPL_TAG_WIDTH -
- txq->compl_tag_gen_s;
- txq->compl_tag_gen_max = GETMAXVAL(gen_bits);
-
- /* Set bufid mask based on location of first
- * gen bit; it cannot simply be the descriptor
- * ring size-1 since we can have size values
- * where not all of those bits are set.
- */
- txq->compl_tag_bufid_m =
- GETMAXVAL(txq->compl_tag_gen_s);
}
if (!idpf_is_queue_model_split(vport->txq_model))
@@ -382,12 +346,12 @@ err_out:
*/
static void idpf_rx_page_rel(struct libeth_fqe *rx_buf)
{
- if (unlikely(!rx_buf->page))
+ if (unlikely(!rx_buf->netmem))
return;
- page_pool_put_full_page(rx_buf->page->pp, rx_buf->page, false);
+ libeth_rx_recycle_slow(rx_buf->netmem);
- rx_buf->page = NULL;
+ rx_buf->netmem = 0;
rx_buf->offset = 0;
}
@@ -425,6 +389,11 @@ static void idpf_rx_buf_rel_bufq(struct idpf_buf_queue *bufq)
if (!bufq->buf)
return;
+ if (idpf_queue_has(XSK, bufq)) {
+ idpf_xskfq_rel(bufq);
+ return;
+ }
+
/* Free all the bufs allocated and given to hw on Rx queue */
for (u32 i = 0; i < bufq->desc_count; i++)
idpf_rx_page_rel(&bufq->buf[i]);
@@ -473,14 +442,14 @@ static void idpf_rx_desc_rel(struct idpf_rx_queue *rxq, struct device *dev,
if (!rxq)
return;
- if (rxq->skb) {
- dev_kfree_skb_any(rxq->skb);
- rxq->skb = NULL;
- }
+ if (!idpf_queue_has(XSK, rxq))
+ libeth_xdp_return_stash(&rxq->xdp);
if (!idpf_is_queue_model_split(model))
idpf_rx_buf_rel_all(rxq);
+ idpf_xsk_clear_queue(rxq, VIRTCHNL2_QUEUE_TYPE_RX);
+
rxq->next_to_alloc = 0;
rxq->next_to_clean = 0;
rxq->next_to_use = 0;
@@ -503,6 +472,7 @@ static void idpf_rx_desc_rel_bufq(struct idpf_buf_queue *bufq,
return;
idpf_rx_buf_rel_bufq(bufq);
+ idpf_xsk_clear_queue(bufq, VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
bufq->next_to_alloc = 0;
bufq->next_to_clean = 0;
@@ -585,6 +555,7 @@ static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq)
struct libeth_fq fq = {
.count = bufq->desc_count,
.type = LIBETH_FQE_HDR,
+ .xdp = idpf_xdp_enabled(bufq->q_vector->vport),
.nid = idpf_q_vector_to_mem(bufq->q_vector),
};
int ret;
@@ -602,18 +573,18 @@ static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq)
}
/**
- * idpf_rx_post_buf_refill - Post buffer id to refill queue
+ * idpf_post_buf_refill - Post buffer id to refill queue
* @refillq: refill queue to post to
* @buf_id: buffer id to post
*/
-static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
+static void idpf_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
{
u32 nta = refillq->next_to_use;
/* store the buffer ID and the SW maintained GEN bit to the refillq */
refillq->ring[nta] =
- FIELD_PREP(IDPF_RX_BI_BUFID_M, buf_id) |
- FIELD_PREP(IDPF_RX_BI_GEN_M,
+ FIELD_PREP(IDPF_RFL_BI_BUFID_M, buf_id) |
+ FIELD_PREP(IDPF_RFL_BI_GEN_M,
idpf_queue_has(GEN_CHK, refillq));
if (unlikely(++nta == refillq->desc_count)) {
@@ -784,10 +755,14 @@ static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
.count = bufq->desc_count,
.type = type,
.hsplit = idpf_queue_has(HSPLIT_EN, bufq),
+ .xdp = idpf_xdp_enabled(bufq->q_vector->vport),
.nid = idpf_q_vector_to_mem(bufq->q_vector),
};
int ret;
+ if (idpf_queue_has(XSK, bufq))
+ return idpf_xskfq_init(bufq);
+
ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi);
if (ret)
return ret;
@@ -811,6 +786,8 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport)
bool split = idpf_is_queue_model_split(vport->rxq_model);
int i, j, err;
+ idpf_xdp_copy_prog_to_rqs(vport, vport->xdp_prog);
+
for (i = 0; i < vport->num_rxq_grp; i++) {
struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
u32 truesize = 0;
@@ -881,6 +858,8 @@ static int idpf_rx_desc_alloc(const struct idpf_vport *vport,
rxq->next_to_use = 0;
idpf_queue_set(GEN_CHK, rxq);
+ idpf_xsk_setup_queue(vport, rxq, VIRTCHNL2_QUEUE_TYPE_RX);
+
return 0;
}
@@ -906,9 +885,10 @@ static int idpf_bufq_desc_alloc(const struct idpf_vport *vport,
bufq->next_to_alloc = 0;
bufq->next_to_clean = 0;
bufq->next_to_use = 0;
-
idpf_queue_set(GEN_CHK, bufq);
+ idpf_xsk_setup_queue(vport, bufq, VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
+
return 0;
}
@@ -942,8 +922,8 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
err = idpf_rx_desc_alloc(vport, q);
if (err) {
pci_err(vport->adapter->pdev,
- "Memory allocation for Rx Queue %u failed\n",
- i);
+ "Memory allocation for Rx queue %u from queue group %u failed\n",
+ j, i);
goto err_out;
}
}
@@ -959,8 +939,8 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
err = idpf_bufq_desc_alloc(vport, q);
if (err) {
pci_err(vport->adapter->pdev,
- "Memory allocation for Rx Buffer Queue %u failed\n",
- i);
+ "Memory allocation for Rx Buffer Queue %u from queue group %u failed\n",
+ j, i);
goto err_out;
}
}
@@ -974,6 +954,341 @@ err_out:
return err;
}
+static int idpf_init_queue_set(const struct idpf_queue_set *qs)
+{
+ const struct idpf_vport *vport = qs->vport;
+ bool splitq;
+ int err;
+
+ splitq = idpf_is_queue_model_split(vport->rxq_model);
+
+ for (u32 i = 0; i < qs->num; i++) {
+ const struct idpf_queue_ptr *q = &qs->qs[i];
+ struct idpf_buf_queue *bufq;
+
+ switch (q->type) {
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ err = idpf_rx_desc_alloc(vport, q->rxq);
+ if (err)
+ break;
+
+ err = idpf_xdp_rxq_info_init(q->rxq);
+ if (err)
+ break;
+
+ if (!splitq)
+ err = idpf_rx_bufs_init_singleq(q->rxq);
+
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ bufq = q->bufq;
+
+ err = idpf_bufq_desc_alloc(vport, bufq);
+ if (err)
+ break;
+
+ for (u32 j = 0; j < bufq->q_vector->num_bufq; j++) {
+ struct idpf_buf_queue * const *bufqs;
+ enum libeth_fqe_type type;
+ u32 ts;
+
+ bufqs = bufq->q_vector->bufq;
+ if (bufqs[j] != bufq)
+ continue;
+
+ if (j) {
+ type = LIBETH_FQE_SHORT;
+ ts = bufqs[j - 1]->truesize >> 1;
+ } else {
+ type = LIBETH_FQE_MTU;
+ ts = 0;
+ }
+
+ bufq->truesize = ts;
+
+ err = idpf_rx_bufs_init(bufq, type);
+ break;
+ }
+
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ err = idpf_tx_desc_alloc(vport, q->txq);
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ err = idpf_compl_desc_alloc(vport, q->complq);
+ break;
+ default:
+ continue;
+ }
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void idpf_clean_queue_set(const struct idpf_queue_set *qs)
+{
+ const struct idpf_vport *vport = qs->vport;
+ struct device *dev = vport->netdev->dev.parent;
+
+ for (u32 i = 0; i < qs->num; i++) {
+ const struct idpf_queue_ptr *q = &qs->qs[i];
+
+ switch (q->type) {
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ idpf_xdp_rxq_info_deinit(q->rxq, vport->rxq_model);
+ idpf_rx_desc_rel(q->rxq, dev, vport->rxq_model);
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ idpf_rx_desc_rel_bufq(q->bufq, dev);
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ idpf_tx_desc_rel(q->txq);
+
+ if (idpf_queue_has(XDP, q->txq)) {
+ q->txq->pending = 0;
+ q->txq->xdp_tx = 0;
+ } else {
+ q->txq->txq_grp->num_completions_pending = 0;
+ }
+
+ writel(q->txq->next_to_use, q->txq->tail);
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ idpf_compl_desc_rel(q->complq);
+ q->complq->num_completions = 0;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void idpf_qvec_ena_irq(struct idpf_q_vector *qv)
+{
+ if (qv->num_txq) {
+ u32 itr;
+
+ if (IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode))
+ itr = qv->vport->tx_itr_profile[qv->tx_dim.profile_ix];
+ else
+ itr = qv->tx_itr_value;
+
+ idpf_vport_intr_write_itr(qv, itr, true);
+ }
+
+ if (qv->num_rxq) {
+ u32 itr;
+
+ if (IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode))
+ itr = qv->vport->rx_itr_profile[qv->rx_dim.profile_ix];
+ else
+ itr = qv->rx_itr_value;
+
+ idpf_vport_intr_write_itr(qv, itr, false);
+ }
+
+ if (qv->num_txq || qv->num_rxq)
+ idpf_vport_intr_update_itr_ena_irq(qv);
+}
+
+/**
+ * idpf_vector_to_queue_set - create a queue set associated with the given
+ * queue vector
+ * @qv: queue vector corresponding to the queue pair
+ *
+ * Returns a pointer to a dynamically allocated array of pointers to all
+ * queues associated with a given queue vector (@qv).
+ * Please note that the caller is responsible to free the memory allocated
+ * by this function using kfree().
+ *
+ * Return: &idpf_queue_set on success, %NULL in case of error.
+ */
+static struct idpf_queue_set *
+idpf_vector_to_queue_set(struct idpf_q_vector *qv)
+{
+ bool xdp = qv->vport->xdp_txq_offset && !qv->num_xsksq;
+ struct idpf_vport *vport = qv->vport;
+ struct idpf_queue_set *qs;
+ u32 num;
+
+ num = qv->num_rxq + qv->num_bufq + qv->num_txq + qv->num_complq;
+ num += xdp ? qv->num_rxq * 2 : qv->num_xsksq * 2;
+ if (!num)
+ return NULL;
+
+ qs = idpf_alloc_queue_set(vport, num);
+ if (!qs)
+ return NULL;
+
+ num = 0;
+
+ for (u32 i = 0; i < qv->num_bufq; i++) {
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ qs->qs[num++].bufq = qv->bufq[i];
+ }
+
+ for (u32 i = 0; i < qv->num_rxq; i++) {
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_RX;
+ qs->qs[num++].rxq = qv->rx[i];
+ }
+
+ for (u32 i = 0; i < qv->num_txq; i++) {
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ qs->qs[num++].txq = qv->tx[i];
+ }
+
+ for (u32 i = 0; i < qv->num_complq; i++) {
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ qs->qs[num++].complq = qv->complq[i];
+ }
+
+ if (!vport->xdp_txq_offset)
+ goto finalize;
+
+ if (xdp) {
+ for (u32 i = 0; i < qv->num_rxq; i++) {
+ u32 idx = vport->xdp_txq_offset + qv->rx[i]->idx;
+
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ qs->qs[num++].txq = vport->txqs[idx];
+
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ qs->qs[num++].complq = vport->txqs[idx]->complq;
+ }
+ } else {
+ for (u32 i = 0; i < qv->num_xsksq; i++) {
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ qs->qs[num++].txq = qv->xsksq[i];
+
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ qs->qs[num++].complq = qv->xsksq[i]->complq;
+ }
+ }
+
+finalize:
+ if (num != qs->num) {
+ kfree(qs);
+ return NULL;
+ }
+
+ return qs;
+}
+
+static int idpf_qp_enable(const struct idpf_queue_set *qs, u32 qid)
+{
+ struct idpf_vport *vport = qs->vport;
+ struct idpf_q_vector *q_vector;
+ int err;
+
+ q_vector = idpf_find_rxq_vec(vport, qid);
+
+ err = idpf_init_queue_set(qs);
+ if (err) {
+ netdev_err(vport->netdev, "Could not initialize queues in pair %u: %pe\n",
+ qid, ERR_PTR(err));
+ return err;
+ }
+
+ if (!vport->xdp_txq_offset)
+ goto config;
+
+ q_vector->xsksq = kcalloc(DIV_ROUND_UP(vport->num_rxq_grp,
+ vport->num_q_vectors),
+ sizeof(*q_vector->xsksq), GFP_KERNEL);
+ if (!q_vector->xsksq)
+ return -ENOMEM;
+
+ for (u32 i = 0; i < qs->num; i++) {
+ const struct idpf_queue_ptr *q = &qs->qs[i];
+
+ if (q->type != VIRTCHNL2_QUEUE_TYPE_TX)
+ continue;
+
+ if (!idpf_queue_has(XSK, q->txq))
+ continue;
+
+ idpf_xsk_init_wakeup(q_vector);
+
+ q->txq->q_vector = q_vector;
+ q_vector->xsksq[q_vector->num_xsksq++] = q->txq;
+ }
+
+config:
+ err = idpf_send_config_queue_set_msg(qs);
+ if (err) {
+ netdev_err(vport->netdev, "Could not configure queues in pair %u: %pe\n",
+ qid, ERR_PTR(err));
+ return err;
+ }
+
+ err = idpf_send_enable_queue_set_msg(qs);
+ if (err) {
+ netdev_err(vport->netdev, "Could not enable queues in pair %u: %pe\n",
+ qid, ERR_PTR(err));
+ return err;
+ }
+
+ napi_enable(&q_vector->napi);
+ idpf_qvec_ena_irq(q_vector);
+
+ netif_start_subqueue(vport->netdev, qid);
+
+ return 0;
+}
+
+static int idpf_qp_disable(const struct idpf_queue_set *qs, u32 qid)
+{
+ struct idpf_vport *vport = qs->vport;
+ struct idpf_q_vector *q_vector;
+ int err;
+
+ q_vector = idpf_find_rxq_vec(vport, qid);
+ netif_stop_subqueue(vport->netdev, qid);
+
+ writel(0, q_vector->intr_reg.dyn_ctl);
+ napi_disable(&q_vector->napi);
+
+ err = idpf_send_disable_queue_set_msg(qs);
+ if (err) {
+ netdev_err(vport->netdev, "Could not disable queues in pair %u: %pe\n",
+ qid, ERR_PTR(err));
+ return err;
+ }
+
+ idpf_clean_queue_set(qs);
+
+ kfree(q_vector->xsksq);
+ q_vector->num_xsksq = 0;
+
+ return 0;
+}
+
+/**
+ * idpf_qp_switch - enable or disable queues associated with queue pair
+ * @vport: vport to switch the pair for
+ * @qid: index of the queue pair to switch
+ * @en: whether to enable or disable the pair
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int idpf_qp_switch(struct idpf_vport *vport, u32 qid, bool en)
+{
+ struct idpf_q_vector *q_vector = idpf_find_rxq_vec(vport, qid);
+ struct idpf_queue_set *qs __free(kfree) = NULL;
+
+ if (idpf_find_txq_vec(vport, qid) != q_vector)
+ return -EINVAL;
+
+ qs = idpf_vector_to_queue_set(q_vector);
+ if (!qs)
+ return -ENOMEM;
+
+ return en ? idpf_qp_enable(qs, qid) : idpf_qp_disable(qs, qid);
+}
+
/**
* idpf_txq_group_rel - Release all resources for txq groups
* @vport: vport to release txq groups on
@@ -994,6 +1309,11 @@ static void idpf_txq_group_rel(struct idpf_vport *vport)
struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
for (j = 0; j < txq_grp->num_txq; j++) {
+ if (flow_sch_en) {
+ kfree(txq_grp->txqs[j]->refillq);
+ txq_grp->txqs[j]->refillq = NULL;
+ }
+
kfree(txq_grp->txqs[j]);
txq_grp->txqs[j] = NULL;
}
@@ -1003,9 +1323,6 @@ static void idpf_txq_group_rel(struct idpf_vport *vport)
kfree(txq_grp->complq);
txq_grp->complq = NULL;
-
- if (flow_sch_en)
- kfree(txq_grp->stashes);
}
kfree(vport->txq_grps);
vport->txq_grps = NULL;
@@ -1087,8 +1404,12 @@ static void idpf_vport_queue_grp_rel_all(struct idpf_vport *vport)
*/
void idpf_vport_queues_rel(struct idpf_vport *vport)
{
+ idpf_xdp_copy_prog_to_rqs(vport, NULL);
+
idpf_tx_desc_rel_all(vport);
idpf_rx_desc_rel_all(vport);
+
+ idpf_xdpsqs_put(vport);
idpf_vport_queue_grp_rel_all(vport);
kfree(vport->txqs);
@@ -1107,6 +1428,8 @@ void idpf_vport_queues_rel(struct idpf_vport *vport)
*/
static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
{
+ struct idpf_ptp_vport_tx_tstamp_caps *caps = vport->tx_tstamp_caps;
+ struct work_struct *tstamp_task = &vport->tstamp_task;
int i, j, k = 0;
vport->txqs = kcalloc(vport->num_txq, sizeof(*vport->txqs),
@@ -1121,6 +1444,12 @@ static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
for (j = 0; j < tx_grp->num_txq; j++, k++) {
vport->txqs[k] = tx_grp->txqs[j];
vport->txqs[k]->idx = k;
+
+ if (!caps)
+ continue;
+
+ vport->txqs[k]->cached_tstamp_caps = caps;
+ vport->txqs[k]->tstamp_task = tstamp_task;
}
}
@@ -1154,6 +1483,18 @@ void idpf_vport_init_num_qs(struct idpf_vport *vport,
if (idpf_is_queue_model_split(vport->rxq_model))
vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq);
+ vport->xdp_prog = config_data->xdp_prog;
+ if (idpf_xdp_enabled(vport)) {
+ vport->xdp_txq_offset = config_data->num_req_tx_qs;
+ vport->num_xdp_txq = le16_to_cpu(vport_msg->num_tx_q) -
+ vport->xdp_txq_offset;
+ vport->xdpsq_share = libeth_xdpsq_shared(vport->num_xdp_txq);
+ } else {
+ vport->xdp_txq_offset = 0;
+ vport->num_xdp_txq = 0;
+ vport->xdpsq_share = false;
+ }
+
/* Adjust number of buffer queues per Rx queue group. */
if (!idpf_is_queue_model_split(vport->rxq_model)) {
vport->num_bufqs_per_qgrp = 0;
@@ -1225,22 +1566,17 @@ int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx,
int dflt_splitq_txq_grps = 0, dflt_singleq_txqs = 0;
int dflt_splitq_rxq_grps = 0, dflt_singleq_rxqs = 0;
u16 num_req_tx_qs = 0, num_req_rx_qs = 0;
+ struct idpf_vport_user_config_data *user;
struct idpf_vport_config *vport_config;
u16 num_txq_grps, num_rxq_grps;
- u32 num_qs;
+ u32 num_qs, num_xdpsq;
vport_config = adapter->vport_config[vport_idx];
if (vport_config) {
num_req_tx_qs = vport_config->user_config.num_req_tx_qs;
num_req_rx_qs = vport_config->user_config.num_req_rx_qs;
} else {
- int num_cpus;
-
- /* Restrict num of queues to cpus online as a default
- * configuration to give best performance. User can always
- * override to a max number of queues via ethtool.
- */
- num_cpus = num_online_cpus();
+ u32 num_cpus = netif_get_num_default_rss_queues();
dflt_splitq_txq_grps = min_t(int, max_q->max_txq, num_cpus);
dflt_singleq_txqs = min_t(int, max_q->max_txq, num_cpus);
@@ -1275,6 +1611,24 @@ int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx,
vport_msg->num_rx_bufq = 0;
}
+ if (!vport_config)
+ return 0;
+
+ user = &vport_config->user_config;
+ user->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q);
+ user->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q);
+
+ if (vport_config->user_config.xdp_prog)
+ num_xdpsq = libeth_xdpsq_num(user->num_req_rx_qs,
+ user->num_req_tx_qs,
+ vport_config->max_q.max_txq);
+ else
+ num_xdpsq = 0;
+
+ vport_msg->num_tx_q = cpu_to_le16(user->num_req_tx_qs + num_xdpsq);
+ if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->txq_model)))
+ vport_msg->num_tx_complq = vport_msg->num_tx_q;
+
return 0;
}
@@ -1324,14 +1678,13 @@ static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport,
static void idpf_rxq_set_descids(const struct idpf_vport *vport,
struct idpf_rx_queue *q)
{
- if (idpf_is_queue_model_split(vport->rxq_model)) {
- q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
- } else {
- if (vport->base_rxd)
- q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M;
- else
- q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
- }
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ return;
+
+ if (vport->base_rxd)
+ q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M;
+ else
+ q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
}
/**
@@ -1358,7 +1711,6 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
for (i = 0; i < vport->num_txq_grp; i++) {
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
struct idpf_adapter *adapter = vport->adapter;
- struct idpf_txq_stash *stashes;
int j;
tx_qgrp->vport = vport;
@@ -1371,15 +1723,6 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
goto err_alloc;
}
- if (split && flow_sch_en) {
- stashes = kcalloc(num_txq, sizeof(*stashes),
- GFP_KERNEL);
- if (!stashes)
- goto err_alloc;
-
- tx_qgrp->stashes = stashes;
- }
-
for (j = 0; j < tx_qgrp->num_txq; j++) {
struct idpf_tx_queue *q = tx_qgrp->txqs[j];
@@ -1389,6 +1732,7 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter);
q->netdev = vport->netdev;
q->txq_grp = tx_qgrp;
+ q->rel_q_id = j;
if (!split) {
q->clean_budget = vport->compln_clean_budget;
@@ -1399,12 +1743,14 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
if (!flow_sch_en)
continue;
- if (split) {
- q->stash = &stashes[j];
- hash_init(q->stash->sched_buf_hash);
- }
-
idpf_queue_set(FLOW_SCH_EN, q);
+
+ q->refillq = kzalloc(sizeof(*q->refillq), GFP_KERNEL);
+ if (!q->refillq)
+ goto err_alloc;
+
+ idpf_queue_set(GEN_CHK, q->refillq);
+ idpf_queue_set(RFL_GEN_CHK, q->refillq);
}
if (!split)
@@ -1547,7 +1893,6 @@ skip_splitq_rx_init:
setup_rxq:
q->desc_count = vport->rxq_desc_count;
q->rx_ptype_lkup = vport->rx_ptype_lkup;
- q->netdev = vport->netdev;
q->bufq_sets = rx_qgrp->splitq.bufq_sets;
q->idx = (i * num_rxq) + j;
q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
@@ -1608,15 +1953,19 @@ int idpf_vport_queues_alloc(struct idpf_vport *vport)
if (err)
goto err_out;
- err = idpf_tx_desc_alloc_all(vport);
+ err = idpf_vport_init_fast_path_txqs(vport);
if (err)
goto err_out;
- err = idpf_rx_desc_alloc_all(vport);
+ err = idpf_xdpsqs_get(vport);
if (err)
goto err_out;
- err = idpf_vport_init_fast_path_txqs(vport);
+ err = idpf_tx_desc_alloc_all(vport);
+ if (err)
+ goto err_out;
+
+ err = idpf_rx_desc_alloc_all(vport);
if (err)
goto err_out;
@@ -1629,105 +1978,37 @@ err_out:
}
/**
- * idpf_tx_handle_sw_marker - Handle queue marker packet
- * @tx_q: tx queue to handle software marker
+ * idpf_tx_read_tstamp - schedule a work to read Tx timestamp value
+ * @txq: queue to read the timestamp from
+ * @skb: socket buffer to provide Tx timestamp value
+ *
+ * Schedule a work to read Tx timestamp value generated once the packet is
+ * transmitted.
*/
-static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q)
+static void idpf_tx_read_tstamp(struct idpf_tx_queue *txq, struct sk_buff *skb)
{
- struct idpf_netdev_priv *priv = netdev_priv(tx_q->netdev);
- struct idpf_vport *vport = priv->vport;
- int i;
-
- idpf_queue_clear(SW_MARKER, tx_q);
- /* Hardware must write marker packets to all queues associated with
- * completion queues. So check if all queues received marker packets
- */
- for (i = 0; i < vport->num_txq; i++)
- /* If we're still waiting on any other TXQ marker completions,
- * just return now since we cannot wake up the marker_wq yet.
- */
- if (idpf_queue_has(SW_MARKER, vport->txqs[i]))
- return;
+ struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
+ struct idpf_ptp_tx_tstamp_status *tx_tstamp_status;
- /* Drain complete */
- set_bit(IDPF_VPORT_SW_MARKER, vport->flags);
- wake_up(&vport->sw_marker_wq);
-}
-
-/**
- * idpf_tx_clean_stashed_bufs - clean bufs that were stored for
- * out of order completions
- * @txq: queue to clean
- * @compl_tag: completion tag of packet to clean (from completion descriptor)
- * @cleaned: pointer to stats struct to track cleaned packets/bytes
- * @budget: Used to determine if we are in netpoll
- */
-static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq,
- u16 compl_tag,
- struct libeth_sq_napi_stats *cleaned,
- int budget)
-{
- struct idpf_tx_stash *stash;
- struct hlist_node *tmp_buf;
- struct libeth_cq_pp cp = {
- .dev = txq->dev,
- .ss = cleaned,
- .napi = budget,
- };
+ tx_tstamp_caps = txq->cached_tstamp_caps;
+ spin_lock_bh(&tx_tstamp_caps->status_lock);
- /* Buffer completion */
- hash_for_each_possible_safe(txq->stash->sched_buf_hash, stash, tmp_buf,
- hlist, compl_tag) {
- if (unlikely(idpf_tx_buf_compl_tag(&stash->buf) != compl_tag))
+ for (u32 i = 0; i < tx_tstamp_caps->num_entries; i++) {
+ tx_tstamp_status = &tx_tstamp_caps->tx_tstamp_status[i];
+ if (tx_tstamp_status->state != IDPF_PTP_FREE)
continue;
- hash_del(&stash->hlist);
- libeth_tx_complete(&stash->buf, &cp);
-
- /* Push shadow buf back onto stack */
- idpf_buf_lifo_push(&txq->stash->buf_stack, stash);
- }
-}
+ tx_tstamp_status->skb = skb;
+ tx_tstamp_status->state = IDPF_PTP_REQUEST;
-/**
- * idpf_stash_flow_sch_buffers - store buffer parameters info to be freed at a
- * later time (only relevant for flow scheduling mode)
- * @txq: Tx queue to clean
- * @tx_buf: buffer to store
- */
-static int idpf_stash_flow_sch_buffers(struct idpf_tx_queue *txq,
- struct idpf_tx_buf *tx_buf)
-{
- struct idpf_tx_stash *stash;
-
- if (unlikely(tx_buf->type <= LIBETH_SQE_CTX))
- return 0;
-
- stash = idpf_buf_lifo_pop(&txq->stash->buf_stack);
- if (unlikely(!stash)) {
- net_err_ratelimited("%s: No out-of-order TX buffers left!\n",
- netdev_name(txq->netdev));
-
- return -ENOMEM;
+ /* Fetch timestamp from completion descriptor through
+ * virtchnl msg to report to stack.
+ */
+ queue_work(system_unbound_wq, txq->tstamp_task);
+ break;
}
- /* Store buffer params in shadow buffer */
- stash->buf.skb = tx_buf->skb;
- stash->buf.bytes = tx_buf->bytes;
- stash->buf.packets = tx_buf->packets;
- stash->buf.type = tx_buf->type;
- stash->buf.nr_frags = tx_buf->nr_frags;
- dma_unmap_addr_set(&stash->buf, dma, dma_unmap_addr(tx_buf, dma));
- dma_unmap_len_set(&stash->buf, len, dma_unmap_len(tx_buf, len));
- idpf_tx_buf_compl_tag(&stash->buf) = idpf_tx_buf_compl_tag(tx_buf);
-
- /* Add buffer to buf_hash table to be freed later */
- hash_add(txq->stash->sched_buf_hash, &stash->hlist,
- idpf_tx_buf_compl_tag(&stash->buf));
-
- tx_buf->type = LIBETH_SQE_EMPTY;
-
- return 0;
+ spin_unlock_bh(&tx_tstamp_caps->status_lock);
}
#define idpf_tx_splitq_clean_bump_ntc(txq, ntc, desc, buf) \
@@ -1757,14 +2038,8 @@ do { \
* Separate packet completion events will be reported on the completion queue,
* and the buffers will be cleaned separately. The stats are not updated from
* this function when using flow-based scheduling.
- *
- * Furthermore, in flow scheduling mode, check to make sure there are enough
- * reserve buffers to stash the packet. If there are not, return early, which
- * will leave next_to_clean pointing to the packet that failed to be stashed.
- *
- * Return: false in the scenario above, true otherwise.
*/
-static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
+static void idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
int napi_budget,
struct libeth_sq_napi_stats *cleaned,
bool descs_only)
@@ -1778,7 +2053,12 @@ static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
.napi = napi_budget,
};
struct idpf_tx_buf *tx_buf;
- bool clean_complete = true;
+
+ if (descs_only) {
+ /* Bump ring index to mark as cleaned. */
+ tx_q->next_to_clean = end;
+ return;
+ }
tx_desc = &tx_q->flex_tx[ntc];
next_pending_desc = &tx_q->flex_tx[end];
@@ -1798,132 +2078,61 @@ static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
break;
eop_idx = tx_buf->rs_idx;
+ libeth_tx_complete(tx_buf, &cp);
- if (descs_only) {
- if (IDPF_TX_BUF_RSV_UNUSED(tx_q) < tx_buf->nr_frags) {
- clean_complete = false;
- goto tx_splitq_clean_out;
- }
-
- idpf_stash_flow_sch_buffers(tx_q, tx_buf);
+ /* unmap remaining buffers */
+ while (ntc != eop_idx) {
+ idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
+ tx_desc, tx_buf);
- while (ntc != eop_idx) {
- idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
- tx_desc, tx_buf);
- idpf_stash_flow_sch_buffers(tx_q, tx_buf);
- }
- } else {
+ /* unmap any remaining paged data */
libeth_tx_complete(tx_buf, &cp);
-
- /* unmap remaining buffers */
- while (ntc != eop_idx) {
- idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
- tx_desc, tx_buf);
-
- /* unmap any remaining paged data */
- libeth_tx_complete(tx_buf, &cp);
- }
}
fetch_next_txq_desc:
idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf);
}
-tx_splitq_clean_out:
tx_q->next_to_clean = ntc;
-
- return clean_complete;
}
-#define idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, buf) \
-do { \
- (buf)++; \
- (ntc)++; \
- if (unlikely((ntc) == (txq)->desc_count)) { \
- buf = (txq)->tx_buf; \
- ntc = 0; \
- } \
-} while (0)
-
/**
- * idpf_tx_clean_buf_ring - clean flow scheduling TX queue buffers
+ * idpf_tx_clean_bufs - clean flow scheduling TX queue buffers
* @txq: queue to clean
- * @compl_tag: completion tag of packet to clean (from completion descriptor)
+ * @buf_id: packet's starting buffer ID, from completion descriptor
* @cleaned: pointer to stats struct to track cleaned packets/bytes
* @budget: Used to determine if we are in netpoll
*
- * Cleans all buffers associated with the input completion tag either from the
- * TX buffer ring or from the hash table if the buffers were previously
- * stashed. Returns the byte/segment count for the cleaned packet associated
- * this completion tag.
+ * Clean all buffers associated with the packet starting at buf_id. Returns the
+ * byte/segment count for the cleaned packet.
*/
-static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag,
- struct libeth_sq_napi_stats *cleaned,
- int budget)
+static void idpf_tx_clean_bufs(struct idpf_tx_queue *txq, u32 buf_id,
+ struct libeth_sq_napi_stats *cleaned,
+ int budget)
{
- u16 idx = compl_tag & txq->compl_tag_bufid_m;
struct idpf_tx_buf *tx_buf = NULL;
struct libeth_cq_pp cp = {
.dev = txq->dev,
.ss = cleaned,
.napi = budget,
};
- u16 ntc, orig_idx = idx;
- tx_buf = &txq->tx_buf[idx];
+ tx_buf = &txq->tx_buf[buf_id];
+ if (tx_buf->type == LIBETH_SQE_SKB) {
+ if (skb_shinfo(tx_buf->skb)->tx_flags & SKBTX_IN_PROGRESS)
+ idpf_tx_read_tstamp(txq, tx_buf->skb);
- if (unlikely(tx_buf->type <= LIBETH_SQE_CTX ||
- idpf_tx_buf_compl_tag(tx_buf) != compl_tag))
- return false;
-
- if (tx_buf->type == LIBETH_SQE_SKB)
libeth_tx_complete(tx_buf, &cp);
+ idpf_post_buf_refill(txq->refillq, buf_id);
+ }
- idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
+ while (idpf_tx_buf_next(tx_buf) != IDPF_TXBUF_NULL) {
+ buf_id = idpf_tx_buf_next(tx_buf);
- while (idpf_tx_buf_compl_tag(tx_buf) == compl_tag) {
+ tx_buf = &txq->tx_buf[buf_id];
libeth_tx_complete(tx_buf, &cp);
- idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
+ idpf_post_buf_refill(txq->refillq, buf_id);
}
-
- /*
- * It's possible the packet we just cleaned was an out of order
- * completion, which means we can stash the buffers starting from
- * the original next_to_clean and reuse the descriptors. We need
- * to compare the descriptor ring next_to_clean packet's "first" buffer
- * to the "first" buffer of the packet we just cleaned to determine if
- * this is the case. Howevever, next_to_clean can point to either a
- * reserved buffer that corresponds to a context descriptor used for the
- * next_to_clean packet (TSO packet) or the "first" buffer (single
- * packet). The orig_idx from the packet we just cleaned will always
- * point to the "first" buffer. If next_to_clean points to a reserved
- * buffer, let's bump ntc once and start the comparison from there.
- */
- ntc = txq->next_to_clean;
- tx_buf = &txq->tx_buf[ntc];
-
- if (tx_buf->type == LIBETH_SQE_CTX)
- idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, tx_buf);
-
- /*
- * If ntc still points to a different "first" buffer, clean the
- * descriptor ring and stash all of the buffers for later cleaning. If
- * we cannot stash all of the buffers, next_to_clean will point to the
- * "first" buffer of the packet that could not be stashed and cleaning
- * will start there next time.
- */
- if (unlikely(tx_buf != &txq->tx_buf[orig_idx] &&
- !idpf_tx_splitq_clean(txq, orig_idx, budget, cleaned,
- true)))
- return true;
-
- /*
- * Otherwise, update next_to_clean to reflect the cleaning that was
- * done above.
- */
- txq->next_to_clean = idx;
-
- return true;
}
/**
@@ -1942,22 +2151,17 @@ static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq,
struct libeth_sq_napi_stats *cleaned,
int budget)
{
- u16 compl_tag;
+ /* RS completion contains queue head for queue based scheduling or
+ * completion tag for flow based scheduling.
+ */
+ u16 rs_compl_val = le16_to_cpu(desc->common.q_head_compl_tag.q_head);
if (!idpf_queue_has(FLOW_SCH_EN, txq)) {
- u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head);
-
- idpf_tx_splitq_clean(txq, head, budget, cleaned, false);
+ idpf_tx_splitq_clean(txq, rs_compl_val, budget, cleaned, false);
return;
}
- compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag);
-
- /* If we didn't clean anything on the ring, this packet must be
- * in the hash table. Go clean it there.
- */
- if (!idpf_tx_clean_buf_ring(txq, compl_tag, cleaned, budget))
- idpf_tx_clean_stashed_bufs(txq, compl_tag, cleaned, budget);
+ idpf_tx_clean_bufs(txq, rs_compl_val, cleaned, budget);
}
/**
@@ -1985,19 +2189,19 @@ static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget,
do {
struct libeth_sq_napi_stats cleaned_stats = { };
struct idpf_tx_queue *tx_q;
+ __le16 hw_head;
int rel_tx_qid;
- u16 hw_head;
u8 ctype; /* completion type */
u16 gen;
/* if the descriptor isn't done, no work yet to do */
- gen = le16_get_bits(tx_desc->qid_comptype_gen,
+ gen = le16_get_bits(tx_desc->common.qid_comptype_gen,
IDPF_TXD_COMPLQ_GEN_M);
if (idpf_queue_has(GEN_CHK, complq) != gen)
break;
/* Find necessary info of TX queue to clean buffers */
- rel_tx_qid = le16_get_bits(tx_desc->qid_comptype_gen,
+ rel_tx_qid = le16_get_bits(tx_desc->common.qid_comptype_gen,
IDPF_TXD_COMPLQ_QID_M);
if (rel_tx_qid >= complq->txq_grp->num_txq ||
!complq->txq_grp->txqs[rel_tx_qid]) {
@@ -2007,22 +2211,19 @@ static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget,
tx_q = complq->txq_grp->txqs[rel_tx_qid];
/* Determine completion type */
- ctype = le16_get_bits(tx_desc->qid_comptype_gen,
+ ctype = le16_get_bits(tx_desc->common.qid_comptype_gen,
IDPF_TXD_COMPLQ_COMPL_TYPE_M);
switch (ctype) {
case IDPF_TXD_COMPLT_RE:
- hw_head = le16_to_cpu(tx_desc->q_head_compl_tag.q_head);
+ hw_head = tx_desc->common.q_head_compl_tag.q_head;
- idpf_tx_splitq_clean(tx_q, hw_head, budget,
- &cleaned_stats, true);
+ idpf_tx_splitq_clean(tx_q, le16_to_cpu(hw_head),
+ budget, &cleaned_stats, true);
break;
case IDPF_TXD_COMPLT_RS:
idpf_tx_handle_rs_completion(tx_q, tx_desc,
&cleaned_stats, budget);
break;
- case IDPF_TXD_COMPLT_SW_MARKER:
- idpf_tx_handle_sw_marker(tx_q);
- break;
default:
netdev_err(tx_q->netdev,
"Unknown TX completion type: %d\n", ctype);
@@ -2074,8 +2275,7 @@ fetch_next_desc:
/* Update BQL */
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
- dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) ||
- np->state != __IDPF_VPORT_UP ||
+ dont_wake = !complq_ok || !test_bit(IDPF_VPORT_UP, np->state) ||
!netif_carrier_ok(tx_q->netdev);
/* Check if the TXQ needs to and can be restarted */
__netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes,
@@ -2096,6 +2296,69 @@ fetch_next_desc:
}
/**
+ * idpf_wait_for_sw_marker_completion - wait for SW marker of disabled Tx queue
+ * @txq: disabled Tx queue
+ *
+ * When Tx queue is requested for disabling, the CP sends a special completion
+ * descriptor called "SW marker", meaning the queue is ready to be destroyed.
+ * If, for some reason, the marker is not received within 500 ms, break the
+ * polling to not hang the driver.
+ */
+void idpf_wait_for_sw_marker_completion(const struct idpf_tx_queue *txq)
+{
+ struct idpf_compl_queue *complq;
+ unsigned long timeout;
+ bool flow, gen_flag;
+ u32 ntc;
+
+ if (!idpf_queue_has(SW_MARKER, txq))
+ return;
+
+ complq = idpf_queue_has(XDP, txq) ? txq->complq : txq->txq_grp->complq;
+ ntc = complq->next_to_clean;
+
+ flow = idpf_queue_has(FLOW_SCH_EN, complq);
+ gen_flag = idpf_queue_has(GEN_CHK, complq);
+
+ timeout = jiffies + msecs_to_jiffies(IDPF_WAIT_FOR_MARKER_TIMEO);
+
+ do {
+ struct idpf_splitq_4b_tx_compl_desc *tx_desc;
+ struct idpf_tx_queue *target;
+ u32 ctype_gen, id;
+
+ tx_desc = flow ? &complq->comp[ntc].common :
+ &complq->comp_4b[ntc];
+ ctype_gen = le16_to_cpu(tx_desc->qid_comptype_gen);
+
+ if (!!(ctype_gen & IDPF_TXD_COMPLQ_GEN_M) != gen_flag) {
+ usleep_range(500, 1000);
+ continue;
+ }
+
+ if (FIELD_GET(IDPF_TXD_COMPLQ_COMPL_TYPE_M, ctype_gen) !=
+ IDPF_TXD_COMPLT_SW_MARKER)
+ goto next;
+
+ id = FIELD_GET(IDPF_TXD_COMPLQ_QID_M, ctype_gen);
+ target = complq->txq_grp->txqs[id];
+
+ idpf_queue_clear(SW_MARKER, target);
+ if (target == txq)
+ break;
+
+next:
+ if (unlikely(++ntc == complq->desc_count)) {
+ ntc = 0;
+ gen_flag = !gen_flag;
+ }
+ } while (time_before(jiffies, timeout));
+
+ idpf_queue_assign(GEN_CHK, complq, gen_flag);
+ complq->next_to_clean = ntc;
+}
+
+/**
* idpf_tx_splitq_build_ctb - populate command tag and size for queue
* based scheduling descriptors
* @desc: descriptor to populate
@@ -2127,44 +2390,52 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
struct idpf_tx_splitq_params *params,
u16 td_cmd, u16 size)
{
- desc->flow.qw1.cmd_dtype = (u16)params->dtype | td_cmd;
+ *(u32 *)&desc->flow.qw1.cmd_dtype = (u8)(params->dtype | td_cmd);
desc->flow.qw1.rxr_bufsize = cpu_to_le16((u16)size);
desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
}
/**
+ * idpf_tx_splitq_has_room - check if enough Tx splitq resources are available
+ * @tx_q: the queue to be checked
+ * @descs_needed: number of descriptors required for this packet
+ * @bufs_needed: number of Tx buffers required for this packet
+ *
+ * Return: 0 if no room available, 1 otherwise
+ */
+static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 descs_needed,
+ u32 bufs_needed)
+{
+ if (IDPF_DESC_UNUSED(tx_q) < descs_needed ||
+ IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
+ IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) ||
+ idpf_tx_splitq_get_free_bufs(tx_q->refillq) < bufs_needed)
+ return 0;
+ return 1;
+}
+
+/**
* idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
* @tx_q: the queue to be checked
* @descs_needed: number of descriptors required for this packet
+ * @bufs_needed: number of buffers needed for this packet
*
- * Returns 0 if stop is not needed
+ * Return: 0 if stop is not needed
*/
static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
- unsigned int descs_needed)
+ u32 descs_needed,
+ u32 bufs_needed)
{
- if (idpf_tx_maybe_stop_common(tx_q, descs_needed))
- goto out;
-
- /* If there are too many outstanding completions expected on the
- * completion queue, stop the TX queue to give the device some time to
- * catch up
+ /* Since we have multiple resources to check for splitq, our
+ * start,stop_thrs becomes a boolean check instead of a count
+ * threshold.
*/
- if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
- IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq)))
- goto splitq_stop;
-
- /* Also check for available book keeping buffers; if we are low, stop
- * the queue to wait for more completions
- */
- if (unlikely(IDPF_TX_BUF_RSV_LOW(tx_q)))
- goto splitq_stop;
-
- return 0;
-
-splitq_stop:
- netif_stop_subqueue(tx_q->netdev, tx_q->idx);
+ if (netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
+ idpf_txq_has_room(tx_q, descs_needed,
+ bufs_needed),
+ 1, 1))
+ return 0;
-out:
u64_stats_update_begin(&tx_q->stats_sync);
u64_stats_inc(&tx_q->q_stats.q_busy);
u64_stats_update_end(&tx_q->stats_sync);
@@ -2190,12 +2461,6 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
tx_q->next_to_use = val;
- if (idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED)) {
- u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_inc(&tx_q->q_stats.q_busy);
- u64_stats_update_end(&tx_q->stats_sync);
- }
-
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -2209,14 +2474,16 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
}
/**
- * idpf_tx_desc_count_required - calculate number of Tx descriptors needed
+ * idpf_tx_res_count_required - get number of Tx resources needed for this pkt
* @txq: queue to send buffer on
* @skb: send buffer
+ * @bufs_needed: (output) number of buffers needed for this skb.
*
- * Returns number of data descriptors needed for this skb.
+ * Return: number of data descriptors and buffers needed for this skb.
*/
-unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
- struct sk_buff *skb)
+unsigned int idpf_tx_res_count_required(struct idpf_tx_queue *txq,
+ struct sk_buff *skb,
+ u32 *bufs_needed)
{
const struct skb_shared_info *shinfo;
unsigned int count = 0, i;
@@ -2227,6 +2494,7 @@ unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
return count;
shinfo = skb_shinfo(skb);
+ *bufs_needed += shinfo->nr_frags;
for (i = 0; i < shinfo->nr_frags; i++) {
unsigned int size;
@@ -2256,71 +2524,89 @@ unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
}
/**
- * idpf_tx_dma_map_error - handle TX DMA map errors
- * @txq: queue to send buffer on
- * @skb: send buffer
- * @first: original first buffer info buffer for packet
- * @idx: starting point on ring to unwind
+ * idpf_tx_splitq_bump_ntu - adjust NTU and generation
+ * @txq: the tx ring to wrap
+ * @ntu: ring index to bump
*/
-void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
- struct idpf_tx_buf *first, u16 idx)
+static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu)
{
- struct libeth_sq_napi_stats ss = { };
- struct libeth_cq_pp cp = {
- .dev = txq->dev,
- .ss = &ss,
- };
+ ntu++;
- u64_stats_update_begin(&txq->stats_sync);
- u64_stats_inc(&txq->q_stats.dma_map_errs);
- u64_stats_update_end(&txq->stats_sync);
+ if (ntu == txq->desc_count)
+ ntu = 0;
- /* clear dma mappings for failed tx_buf map */
- for (;;) {
- struct idpf_tx_buf *tx_buf;
+ return ntu;
+}
- tx_buf = &txq->tx_buf[idx];
- libeth_tx_complete(tx_buf, &cp);
- if (tx_buf == first)
- break;
- if (idx == 0)
- idx = txq->desc_count;
- idx--;
- }
+/**
+ * idpf_tx_get_free_buf_id - get a free buffer ID from the refill queue
+ * @refillq: refill queue to get buffer ID from
+ * @buf_id: return buffer ID
+ *
+ * Return: true if a buffer ID was found, false if not
+ */
+static bool idpf_tx_get_free_buf_id(struct idpf_sw_queue *refillq,
+ u32 *buf_id)
+{
+ u32 ntc = refillq->next_to_clean;
+ u32 refill_desc;
- if (skb_is_gso(skb)) {
- union idpf_tx_flex_desc *tx_desc;
+ refill_desc = refillq->ring[ntc];
- /* If we failed a DMA mapping for a TSO packet, we will have
- * used one additional descriptor for a context
- * descriptor. Reset that here.
- */
- tx_desc = &txq->flex_tx[idx];
- memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc));
- if (idx == 0)
- idx = txq->desc_count;
- idx--;
+ if (unlikely(idpf_queue_has(RFL_GEN_CHK, refillq) !=
+ !!(refill_desc & IDPF_RFL_BI_GEN_M)))
+ return false;
+
+ *buf_id = FIELD_GET(IDPF_RFL_BI_BUFID_M, refill_desc);
+
+ if (unlikely(++ntc == refillq->desc_count)) {
+ idpf_queue_change(RFL_GEN_CHK, refillq);
+ ntc = 0;
}
- /* Update tail in case netdev_xmit_more was previously true */
- idpf_tx_buf_hw_update(txq, idx, false);
+ refillq->next_to_clean = ntc;
+
+ return true;
}
/**
- * idpf_tx_splitq_bump_ntu - adjust NTU and generation
- * @txq: the tx ring to wrap
- * @ntu: ring index to bump
+ * idpf_tx_splitq_pkt_err_unmap - Unmap buffers and bump tail in case of error
+ * @txq: Tx queue to unwind
+ * @params: pointer to splitq params struct
+ * @first: starting buffer for packet to unmap
*/
-static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu)
+static void idpf_tx_splitq_pkt_err_unmap(struct idpf_tx_queue *txq,
+ struct idpf_tx_splitq_params *params,
+ struct idpf_tx_buf *first)
{
- ntu++;
+ struct idpf_sw_queue *refillq = txq->refillq;
+ struct libeth_sq_napi_stats ss = { };
+ struct idpf_tx_buf *tx_buf = first;
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = &ss,
+ };
- if (ntu == txq->desc_count) {
- ntu = 0;
- txq->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(txq);
+ u64_stats_update_begin(&txq->stats_sync);
+ u64_stats_inc(&txq->q_stats.dma_map_errs);
+ u64_stats_update_end(&txq->stats_sync);
+
+ libeth_tx_complete(tx_buf, &cp);
+ while (idpf_tx_buf_next(tx_buf) != IDPF_TXBUF_NULL) {
+ tx_buf = &txq->tx_buf[idpf_tx_buf_next(tx_buf)];
+ libeth_tx_complete(tx_buf, &cp);
}
- return ntu;
+ /* Update tail in case netdev_xmit_more was previously true. */
+ idpf_tx_buf_hw_update(txq, params->prev_ntu, false);
+
+ if (!refillq)
+ return;
+
+ /* Restore refillq state to avoid leaking tags. */
+ if (params->prev_refill_gen != idpf_queue_has(RFL_GEN_CHK, refillq))
+ idpf_queue_change(RFL_GEN_CHK, refillq);
+ refillq->next_to_clean = params->prev_refill_ntc;
}
/**
@@ -2344,6 +2630,7 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
struct netdev_queue *nq;
struct sk_buff *skb;
skb_frag_t *frag;
+ u32 next_buf_id;
u16 td_cmd = 0;
dma_addr_t dma;
@@ -2361,17 +2648,16 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
tx_buf = first;
first->nr_frags = 0;
- params->compl_tag =
- (tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i;
-
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
- if (dma_mapping_error(tx_q->dev, dma))
- return idpf_tx_dma_map_error(tx_q, skb, first, i);
+ if (unlikely(dma_mapping_error(tx_q->dev, dma))) {
+ idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL;
+ return idpf_tx_splitq_pkt_err_unmap(tx_q, params,
+ first);
+ }
first->nr_frags++;
- idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
tx_buf->type = LIBETH_SQE_FRAG;
/* record length, and DMA address */
@@ -2427,28 +2713,12 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
max_data);
if (unlikely(++i == tx_q->desc_count)) {
- tx_buf = tx_q->tx_buf;
tx_desc = &tx_q->flex_tx[0];
i = 0;
- tx_q->compl_tag_cur_gen =
- IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
} else {
- tx_buf++;
tx_desc++;
}
- /* Since this packet has a buffer that is going to span
- * multiple descriptors, it's going to leave holes in
- * to the TX buffer ring. To ensure these holes do not
- * cause issues in the cleaning routines, we will clear
- * them of any stale data and assign them the same
- * completion tag as the current packet. Then when the
- * packet is being cleaned, the cleaning routines will
- * simply pass over these holes and finish cleaning the
- * rest of the packet.
- */
- tx_buf->type = LIBETH_SQE_EMPTY;
-
/* Adjust the DMA offset and the remaining size of the
* fragment. On the first iteration of this loop,
* max_data will be >= 12K and <= 16K-1. On any
@@ -2473,15 +2743,25 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
if (unlikely(++i == tx_q->desc_count)) {
- tx_buf = tx_q->tx_buf;
tx_desc = &tx_q->flex_tx[0];
i = 0;
- tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
} else {
- tx_buf++;
tx_desc++;
}
+ if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
+ if (unlikely(!idpf_tx_get_free_buf_id(tx_q->refillq,
+ &next_buf_id))) {
+ idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL;
+ return idpf_tx_splitq_pkt_err_unmap(tx_q, params,
+ first);
+ }
+ } else {
+ next_buf_id = i;
+ }
+ idpf_tx_buf_next(tx_buf) = next_buf_id;
+ tx_buf = &tx_q->tx_buf[next_buf_id];
+
size = skb_frag_size(frag);
data_len -= size;
@@ -2496,6 +2776,7 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
/* write last descriptor with RS and EOP bits */
first->rs_idx = i;
+ idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL;
td_cmd |= params->eop_cmd;
idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
i = idpf_tx_splitq_bump_ntu(tx_q, i);
@@ -2585,111 +2866,6 @@ int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off)
return 1;
}
-/**
- * __idpf_chk_linearize - Check skb is not using too many buffers
- * @skb: send buffer
- * @max_bufs: maximum number of buffers
- *
- * For TSO we need to count the TSO header and segment payload separately. As
- * such we need to check cases where we have max_bufs-1 fragments or more as we
- * can potentially require max_bufs+1 DMA transactions, 1 for the TSO header, 1
- * for the segment payload in the first descriptor, and another max_buf-1 for
- * the fragments.
- */
-static bool __idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs)
-{
- const struct skb_shared_info *shinfo = skb_shinfo(skb);
- const skb_frag_t *frag, *stale;
- int nr_frags, sum;
-
- /* no need to check if number of frags is less than max_bufs - 1 */
- nr_frags = shinfo->nr_frags;
- if (nr_frags < (max_bufs - 1))
- return false;
-
- /* We need to walk through the list and validate that each group
- * of max_bufs-2 fragments totals at least gso_size.
- */
- nr_frags -= max_bufs - 2;
- frag = &shinfo->frags[0];
-
- /* Initialize size to the negative value of gso_size minus 1. We use
- * this as the worst case scenario in which the frag ahead of us only
- * provides one byte which is why we are limited to max_bufs-2
- * descriptors for a single transmit as the header and previous
- * fragment are already consuming 2 descriptors.
- */
- sum = 1 - shinfo->gso_size;
-
- /* Add size of frags 0 through 4 to create our initial sum */
- sum += skb_frag_size(frag++);
- sum += skb_frag_size(frag++);
- sum += skb_frag_size(frag++);
- sum += skb_frag_size(frag++);
- sum += skb_frag_size(frag++);
-
- /* Walk through fragments adding latest fragment, testing it, and
- * then removing stale fragments from the sum.
- */
- for (stale = &shinfo->frags[0];; stale++) {
- int stale_size = skb_frag_size(stale);
-
- sum += skb_frag_size(frag++);
-
- /* The stale fragment may present us with a smaller
- * descriptor than the actual fragment size. To account
- * for that we need to remove all the data on the front and
- * figure out what the remainder would be in the last
- * descriptor associated with the fragment.
- */
- if (stale_size > IDPF_TX_MAX_DESC_DATA) {
- int align_pad = -(skb_frag_off(stale)) &
- (IDPF_TX_MAX_READ_REQ_SIZE - 1);
-
- sum -= align_pad;
- stale_size -= align_pad;
-
- do {
- sum -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
- stale_size -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
- } while (stale_size > IDPF_TX_MAX_DESC_DATA);
- }
-
- /* if sum is negative we failed to make sufficient progress */
- if (sum < 0)
- return true;
-
- if (!nr_frags--)
- break;
-
- sum -= stale_size;
- }
-
- return false;
-}
-
-/**
- * idpf_chk_linearize - Check if skb exceeds max descriptors per packet
- * @skb: send buffer
- * @max_bufs: maximum scatter gather buffers for single packet
- * @count: number of buffers this packet needs
- *
- * Make sure we don't exceed maximum scatter gather buffers for a single
- * packet. We have to do some special checking around the boundary (max_bufs-1)
- * if TSO is on since we need count the TSO header and payload separately.
- * E.g.: a packet with 7 fragments can require 9 DMA transactions; 1 for TSO
- * header, 1 for segment payload, and then 7 for the fragments.
- */
-static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
- unsigned int count)
-{
- if (likely(count < max_bufs))
- return false;
- if (skb_is_gso(skb))
- return __idpf_chk_linearize(skb, max_bufs);
-
- return count > max_bufs;
-}
/**
* idpf_tx_splitq_get_ctx_desc - grab next desc and update buffer ring
@@ -2698,14 +2874,12 @@ static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
* Since the TX buffer rings mimics the descriptor ring, update the tx buffer
* ring entry to reflect that this index is a context descriptor
*/
-static struct idpf_flex_tx_ctx_desc *
+static union idpf_flex_tx_ctx_desc *
idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq)
{
- struct idpf_flex_tx_ctx_desc *desc;
+ union idpf_flex_tx_ctx_desc *desc;
int i = txq->next_to_use;
- txq->tx_buf[i].type = LIBETH_SQE_CTX;
-
/* grab the next descriptor */
desc = &txq->flex_ctx[i];
txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i);
@@ -2731,6 +2905,88 @@ netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb)
return NETDEV_TX_OK;
}
+#if (IS_ENABLED(CONFIG_PTP_1588_CLOCK))
+/**
+ * idpf_tx_tstamp - set up context descriptor for hardware timestamp
+ * @tx_q: queue to send buffer on
+ * @skb: pointer to the SKB we're sending
+ * @off: pointer to the offload struct
+ *
+ * Return: Positive index number on success, negative otherwise.
+ */
+static int idpf_tx_tstamp(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
+ struct idpf_tx_offload_params *off)
+{
+ int err, idx;
+
+ /* only timestamp the outbound packet if the user has requested it */
+ if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
+ return -1;
+
+ if (!idpf_ptp_get_txq_tstamp_capability(tx_q))
+ return -1;
+
+ /* Tx timestamps cannot be sampled when doing TSO */
+ if (off->tx_flags & IDPF_TX_FLAGS_TSO)
+ return -1;
+
+ /* Grab an open timestamp slot */
+ err = idpf_ptp_request_ts(tx_q, skb, &idx);
+ if (err) {
+ u64_stats_update_begin(&tx_q->stats_sync);
+ u64_stats_inc(&tx_q->q_stats.tstamp_skipped);
+ u64_stats_update_end(&tx_q->stats_sync);
+
+ return -1;
+ }
+
+ off->tx_flags |= IDPF_TX_FLAGS_TSYN;
+
+ return idx;
+}
+
+/**
+ * idpf_tx_set_tstamp_desc - Set the Tx descriptor fields needed to generate
+ * PHY Tx timestamp
+ * @ctx_desc: Context descriptor
+ * @idx: Index of the Tx timestamp latch
+ */
+static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc,
+ u32 idx)
+{
+ ctx_desc->tsyn.qw1 = le64_encode_bits(IDPF_TX_DESC_DTYPE_CTX,
+ IDPF_TX_CTX_DTYPE_M) |
+ le64_encode_bits(IDPF_TX_CTX_DESC_TSYN,
+ IDPF_TX_CTX_CMD_M) |
+ le64_encode_bits(idx, IDPF_TX_CTX_TSYN_REG_M);
+}
+#else /* CONFIG_PTP_1588_CLOCK */
+static int idpf_tx_tstamp(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
+ struct idpf_tx_offload_params *off)
+{
+ return -1;
+}
+
+static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc,
+ u32 idx)
+{ }
+#endif /* CONFIG_PTP_1588_CLOCK */
+
+/**
+ * idpf_tx_splitq_need_re - check whether RE bit needs to be set
+ * @tx_q: pointer to Tx queue
+ *
+ * Return: true if RE bit needs to be set, false otherwise
+ */
+static bool idpf_tx_splitq_need_re(struct idpf_tx_queue *tx_q)
+{
+ int gap = tx_q->next_to_use - tx_q->last_re;
+
+ gap += (gap < 0) ? tx_q->desc_count : 0;
+
+ return gap >= IDPF_TX_SPLITQ_RE_MIN_GAP;
+}
+
/**
* idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors
* @skb: send buffer
@@ -2741,12 +2997,16 @@ netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb)
static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
struct idpf_tx_queue *tx_q)
{
- struct idpf_tx_splitq_params tx_params = { };
+ struct idpf_tx_splitq_params tx_params = {
+ .prev_ntu = tx_q->next_to_use,
+ };
+ union idpf_flex_tx_ctx_desc *ctx_desc;
struct idpf_tx_buf *first;
- unsigned int count;
- int tso;
+ u32 count, buf_count = 1;
+ int tso, idx;
+ u32 buf_id;
- count = idpf_tx_desc_count_required(tx_q, skb);
+ count = idpf_tx_res_count_required(tx_q, skb, &buf_count);
if (unlikely(!count))
return idpf_tx_drop_skb(tx_q, skb);
@@ -2756,7 +3016,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
/* Check for splitq specific TX resources */
count += (IDPF_TX_DESCS_PER_CACHE_LINE + tso);
- if (idpf_tx_maybe_stop_splitq(tx_q, count)) {
+ if (idpf_tx_maybe_stop_splitq(tx_q, count, buf_count)) {
idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
return NETDEV_TX_BUSY;
@@ -2764,8 +3024,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
if (tso) {
/* If tso is needed, set up context desc */
- struct idpf_flex_tx_ctx_desc *ctx_desc =
- idpf_tx_splitq_get_ctx_desc(tx_q);
+ ctx_desc = idpf_tx_splitq_get_ctx_desc(tx_q);
ctx_desc->tso.qw1.cmd_dtype =
cpu_to_le16(IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |
@@ -2783,36 +3042,53 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
u64_stats_update_end(&tx_q->stats_sync);
}
- /* record the location of the first descriptor for this packet */
- first = &tx_q->tx_buf[tx_q->next_to_use];
- first->skb = skb;
-
- if (tso) {
- first->packets = tx_params.offload.tso_segs;
- first->bytes = skb->len +
- ((first->packets - 1) * tx_params.offload.tso_hdr_len);
- } else {
- first->packets = 1;
- first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
+ idx = idpf_tx_tstamp(tx_q, skb, &tx_params.offload);
+ if (idx != -1) {
+ ctx_desc = idpf_tx_splitq_get_ctx_desc(tx_q);
+ idpf_tx_set_tstamp_desc(ctx_desc, idx);
}
if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
+ struct idpf_sw_queue *refillq = tx_q->refillq;
+
+ /* Save refillq state in case of a packet rollback. Otherwise,
+ * the tags will be leaked since they will be popped from the
+ * refillq but never reposted during cleaning.
+ */
+ tx_params.prev_refill_gen =
+ idpf_queue_has(RFL_GEN_CHK, refillq);
+ tx_params.prev_refill_ntc = refillq->next_to_clean;
+
+ if (unlikely(!idpf_tx_get_free_buf_id(tx_q->refillq,
+ &buf_id))) {
+ if (tx_params.prev_refill_gen !=
+ idpf_queue_has(RFL_GEN_CHK, refillq))
+ idpf_queue_change(RFL_GEN_CHK, refillq);
+ refillq->next_to_clean = tx_params.prev_refill_ntc;
+
+ tx_q->next_to_use = tx_params.prev_ntu;
+ return idpf_tx_drop_skb(tx_q, skb);
+ }
+ tx_params.compl_tag = buf_id;
+
tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP;
- /* Set the RE bit to catch any packets that may have not been
- * stashed during RS completion cleaning. MIN_GAP is set to
- * MIN_RING size to ensure it will be set at least once each
- * time around the ring.
+ /* Set the RE bit to periodically "clean" the descriptor ring.
+ * MIN_GAP is set to MIN_RING size to ensure it will be set at
+ * least once each time around the ring.
*/
- if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) {
+ if (idpf_tx_splitq_need_re(tx_q)) {
tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE;
tx_q->txq_grp->num_completions_pending++;
+ tx_q->last_re = tx_q->next_to_use;
}
if (skb->ip_summed == CHECKSUM_PARTIAL)
tx_params.offload.td_cmd |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN;
} else {
+ buf_id = tx_q->next_to_use;
+
tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2;
tx_params.eop_cmd = IDPF_TXD_LAST_DESC_CMD;
@@ -2820,6 +3096,18 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
tx_params.offload.td_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN;
}
+ first = &tx_q->tx_buf[buf_id];
+ first->skb = skb;
+
+ if (tso) {
+ first->packets = tx_params.offload.tso_segs;
+ first->bytes = skb->len +
+ ((first->packets - 1) * tx_params.offload.tso_hdr_len);
+ } else {
+ first->packets = 1;
+ first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
+ }
+
idpf_tx_splitq_map(tx_q, &tx_params, first);
return NETDEV_TX_OK;
@@ -2834,10 +3122,11 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
*/
netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev)
{
- struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
+ const struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
struct idpf_tx_queue *tx_q;
- if (unlikely(skb_get_queue_mapping(skb) >= vport->num_txq)) {
+ if (unlikely(skb_get_queue_mapping(skb) >=
+ vport->num_txq - vport->num_xdp_txq)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -2874,7 +3163,7 @@ idpf_rx_hash(const struct idpf_rx_queue *rxq, struct sk_buff *skb,
{
u32 hash;
- if (!libeth_rx_pt_has_hash(rxq->netdev, decoded))
+ if (!libeth_rx_pt_has_hash(rxq->xdp_rxq.dev, decoded))
return;
hash = le16_to_cpu(rx_desc->hash1) |
@@ -2894,13 +3183,13 @@ idpf_rx_hash(const struct idpf_rx_queue *rxq, struct sk_buff *skb,
* skb->protocol must be set before this function is called
*/
static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb,
- struct idpf_rx_csum_decoded csum_bits,
+ struct libeth_rx_csum csum_bits,
struct libeth_rx_pt decoded)
{
bool ipv4, ipv6;
/* check if Rx checksum is enabled */
- if (!libeth_rx_pt_has_checksum(rxq->netdev, decoded))
+ if (!libeth_rx_pt_has_checksum(rxq->xdp_rxq.dev, decoded))
return;
/* check if HW has decoded the packet and checksum */
@@ -2922,7 +3211,7 @@ static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb,
if (unlikely(csum_bits.l4e))
goto checksum_fail;
- if (csum_bits.raw_csum_inv ||
+ if (!csum_bits.raw_csum_valid ||
decoded.inner_prot == LIBETH_RX_PT_INNER_SCTP) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
return;
@@ -2945,10 +3234,10 @@ checksum_fail:
*
* Return: parsed checksum status.
**/
-static struct idpf_rx_csum_decoded
+static struct libeth_rx_csum
idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
{
- struct idpf_rx_csum_decoded csum = { };
+ struct libeth_rx_csum csum = { };
u8 qword0, qword1;
qword0 = rx_desc->status_err0_qw0;
@@ -2964,9 +3253,9 @@ idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 *
qword1);
csum.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
qword0);
- csum.raw_csum_inv =
- le16_get_bits(rx_desc->ptype_err_fflags0,
- VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M);
+ csum.raw_csum_valid =
+ !le16_get_bits(rx_desc->ptype_err_fflags0,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M);
csum.raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
return csum;
@@ -3007,14 +3296,11 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
return -EINVAL;
rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len);
- if (unlikely(rsc_segments == 1))
- return 0;
NAPI_GRO_CB(skb)->count = rsc_segments;
skb_shinfo(skb)->gso_size = rsc_seg_len;
skb_reset_network_header(skb);
- len = skb->len - skb_transport_offset(skb);
if (ipv4) {
struct iphdr *ipv4h = ip_hdr(skb);
@@ -3023,6 +3309,7 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
/* Reset and set transport header offset in skb */
skb_set_transport_header(skb, sizeof(struct iphdr));
+ len = skb->len - skb_transport_offset(skb);
/* Compute the TCP pseudo header checksum*/
tcp_hdr(skb)->check =
@@ -3032,6 +3319,7 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+ len = skb->len - skb_transport_offset(skb);
tcp_hdr(skb)->check =
~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0);
}
@@ -3046,7 +3334,34 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
}
/**
- * idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor
+ * idpf_rx_hwtstamp - check for an RX timestamp and pass up the stack
+ * @rxq: pointer to the rx queue that receives the timestamp
+ * @rx_desc: pointer to rx descriptor containing timestamp
+ * @skb: skb to put timestamp in
+ */
+static void
+idpf_rx_hwtstamp(const struct idpf_rx_queue *rxq,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
+ struct sk_buff *skb)
+{
+ u64 cached_time, ts_ns;
+ u32 ts_high;
+
+ if (!(rx_desc->ts_low & VIRTCHNL2_RX_FLEX_TSTAMP_VALID))
+ return;
+
+ cached_time = READ_ONCE(rxq->cached_phc_time);
+
+ ts_high = le32_to_cpu(rx_desc->ts_high);
+ ts_ns = idpf_ptp_tstamp_extend_32b_to_64b(cached_time, ts_high);
+
+ *skb_hwtstamps(skb) = (struct skb_shared_hwtstamps) {
+ .hwtstamp = ns_to_ktime(ts_ns),
+ };
+}
+
+/**
+ * __idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor
* @rxq: Rx descriptor ring packet is being transacted on
* @skb: pointer to current skb being populated
* @rx_desc: Receive descriptor
@@ -3056,10 +3371,10 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
* other fields within the skb.
*/
static int
-idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
- const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
+__idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
{
- struct idpf_rx_csum_decoded csum_bits;
+ struct libeth_rx_csum csum_bits;
struct libeth_rx_pt decoded;
u16 rx_ptype;
@@ -3070,7 +3385,8 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
/* process RSS/hash */
idpf_rx_hash(rxq, skb, rx_desc, decoded);
- skb->protocol = eth_type_trans(skb, rxq->netdev);
+ if (idpf_queue_has(PTP, rxq))
+ idpf_rx_hwtstamp(rxq, rx_desc, skb);
if (le16_get_bits(rx_desc->hdrlen_flags,
VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
@@ -3079,30 +3395,27 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
csum_bits = idpf_rx_splitq_extract_csum_bits(rx_desc);
idpf_rx_csum(rxq, skb, csum_bits, decoded);
- skb_record_rx_queue(skb, rxq->idx);
-
return 0;
}
-/**
- * idpf_rx_add_frag - Add contents of Rx buffer to sk_buff as a frag
- * @rx_buf: buffer containing page to add
- * @skb: sk_buff to place the data into
- * @size: packet length from rx_desc
- *
- * This function will add the data contained in rx_buf->page to the skb.
- * It will just attach the page as a frag to the skb.
- * The function will then update the page offset.
- */
-void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
- unsigned int size)
+bool idpf_rx_process_skb_fields(struct sk_buff *skb,
+ const struct libeth_xdp_buff *xdp,
+ struct libeth_rq_napi_stats *rs)
{
- u32 hr = rx_buf->page->pp->p.offset;
+ struct idpf_rx_queue *rxq;
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
- rx_buf->offset + hr, size, rx_buf->truesize);
+ rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
+
+ return !__idpf_rx_process_skb_fields(rxq, skb, xdp->desc);
}
+LIBETH_XDP_DEFINE_START();
+LIBETH_XDP_DEFINE_RUN(static idpf_xdp_run_pass, idpf_xdp_run_prog,
+ idpf_xdp_tx_flush_bulk, idpf_rx_process_skb_fields);
+LIBETH_XDP_DEFINE_FINALIZE(static idpf_xdp_finalize_rx, idpf_xdp_tx_flush_bulk,
+ idpf_xdp_tx_finalize);
+LIBETH_XDP_DEFINE_END();
+
/**
* idpf_rx_hsplit_wa - handle header buffer overflows and split errors
* @hdr: Rx buffer for the headers
@@ -3123,51 +3436,28 @@ static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr,
struct libeth_fqe *buf, u32 data_len)
{
u32 copy = data_len <= L1_CACHE_BYTES ? data_len : ETH_HLEN;
+ struct page *hdr_page, *buf_page;
const void *src;
void *dst;
- if (!libeth_rx_sync_for_cpu(buf, copy))
+ if (unlikely(netmem_is_net_iov(buf->netmem)) ||
+ !libeth_rx_sync_for_cpu(buf, copy))
return 0;
- dst = page_address(hdr->page) + hdr->offset + hdr->page->pp->p.offset;
- src = page_address(buf->page) + buf->offset + buf->page->pp->p.offset;
- memcpy(dst, src, LARGEST_ALIGN(copy));
+ hdr_page = __netmem_to_page(hdr->netmem);
+ buf_page = __netmem_to_page(buf->netmem);
+ dst = page_address(hdr_page) + hdr->offset +
+ pp_page_to_nmdesc(hdr_page)->pp->p.offset;
+ src = page_address(buf_page) + buf->offset +
+ pp_page_to_nmdesc(buf_page)->pp->p.offset;
+ memcpy(dst, src, LARGEST_ALIGN(copy));
buf->offset += copy;
return copy;
}
/**
- * idpf_rx_build_skb - Allocate skb and populate it from header buffer
- * @buf: Rx buffer to pull data from
- * @size: the length of the packet
- *
- * This function allocates an skb. It then populates it with the page data from
- * the current receive descriptor, taking care to set up the skb correctly.
- */
-struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size)
-{
- u32 hr = buf->page->pp->p.offset;
- struct sk_buff *skb;
- void *va;
-
- va = page_address(buf->page) + buf->offset;
- prefetch(va + hr);
-
- skb = napi_build_skb(va, buf->truesize);
- if (unlikely(!skb))
- return NULL;
-
- skb_mark_for_recycle(skb);
-
- skb_reserve(skb, hr);
- __skb_put(skb, size);
-
- return skb;
-}
-
-/**
* idpf_rx_splitq_test_staterr - tests bits in Rx descriptor
* status and error fields
* @stat_err_field: field from descriptor to test bits in
@@ -3208,13 +3498,18 @@ static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_de
*/
static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
{
- int total_rx_bytes = 0, total_rx_pkts = 0;
struct idpf_buf_queue *rx_bufq = NULL;
- struct sk_buff *skb = rxq->skb;
+ struct libeth_rq_napi_stats rs = { };
u16 ntc = rxq->next_to_clean;
+ LIBETH_XDP_ONSTACK_BUFF(xdp);
+ LIBETH_XDP_ONSTACK_BULK(bq);
+
+ libeth_xdp_tx_init_bulk(&bq, rxq->xdp_prog, rxq->xdp_rxq.dev,
+ rxq->xdpsqs, rxq->num_xdp_txq);
+ libeth_xdp_init_buff(xdp, &rxq->xdp, &rxq->xdp_rxq);
/* Process Rx packets bounded by budget */
- while (likely(total_rx_pkts < budget)) {
+ while (likely(rs.packets < budget)) {
struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
struct libeth_fqe *hdr, *rx_buf = NULL;
struct idpf_sw_queue *refillq = NULL;
@@ -3228,18 +3523,14 @@ static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
/* get the Rx desc from Rx queue based on 'next_to_clean' */
rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb;
- /* This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc
- */
- dma_rmb();
-
/* if the descriptor isn't done, no work yet to do */
gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M);
-
if (idpf_queue_has(GEN_CHK, rxq) != gen_id)
break;
+ dma_rmb();
+
rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M,
rx_desc->rxdid_ucast);
if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) {
@@ -3284,84 +3575,47 @@ static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
hdr = &rx_bufq->hdr_buf[buf_id];
- if (unlikely(!hdr_len && !skb)) {
+ if (unlikely(!hdr_len && !xdp->data)) {
hdr_len = idpf_rx_hsplit_wa(hdr, rx_buf, pkt_len);
- pkt_len -= hdr_len;
+ /* If failed, drop both buffers by setting len to 0 */
+ pkt_len -= hdr_len ? : pkt_len;
u64_stats_update_begin(&rxq->stats_sync);
u64_stats_inc(&rxq->q_stats.hsplit_buf_ovf);
u64_stats_update_end(&rxq->stats_sync);
}
- if (libeth_rx_sync_for_cpu(hdr, hdr_len)) {
- skb = idpf_rx_build_skb(hdr, hdr_len);
- if (!skb)
- break;
+ if (libeth_xdp_process_buff(xdp, hdr, hdr_len))
+ rs.hsplit++;
- u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_inc(&rxq->q_stats.hsplit_pkts);
- u64_stats_update_end(&rxq->stats_sync);
- }
-
- hdr->page = NULL;
+ hdr->netmem = 0;
payload:
- if (!libeth_rx_sync_for_cpu(rx_buf, pkt_len))
- goto skip_data;
-
- if (skb)
- idpf_rx_add_frag(rx_buf, skb, pkt_len);
- else
- skb = idpf_rx_build_skb(rx_buf, pkt_len);
-
- /* exit if we failed to retrieve a buffer */
- if (!skb)
- break;
-
-skip_data:
- rx_buf->page = NULL;
+ libeth_xdp_process_buff(xdp, rx_buf, pkt_len);
+ rx_buf->netmem = 0;
- idpf_rx_post_buf_refill(refillq, buf_id);
+ idpf_post_buf_refill(refillq, buf_id);
IDPF_RX_BUMP_NTC(rxq, ntc);
/* skip if it is non EOP desc */
- if (!idpf_rx_splitq_is_eop(rx_desc) || unlikely(!skb))
+ if (!idpf_rx_splitq_is_eop(rx_desc) || unlikely(!xdp->data))
continue;
- /* pad skb if needed (to make valid ethernet frame) */
- if (eth_skb_pad(skb)) {
- skb = NULL;
- continue;
- }
-
- /* probably a little skewed due to removing CRC */
- total_rx_bytes += skb->len;
-
- /* protocol */
- if (unlikely(idpf_rx_process_skb_fields(rxq, skb, rx_desc))) {
- dev_kfree_skb_any(skb);
- skb = NULL;
- continue;
- }
-
- /* send completed skb up the stack */
- napi_gro_receive(rxq->napi, skb);
- skb = NULL;
-
- /* update budget accounting */
- total_rx_pkts++;
+ idpf_xdp_run_pass(xdp, &bq, rxq->napi, &rs, rx_desc);
}
+ idpf_xdp_finalize_rx(&bq);
+
rxq->next_to_clean = ntc;
+ libeth_xdp_save_buff(&rxq->xdp, xdp);
- rxq->skb = skb;
u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_add(&rxq->q_stats.packets, total_rx_pkts);
- u64_stats_add(&rxq->q_stats.bytes, total_rx_bytes);
+ u64_stats_add(&rxq->q_stats.packets, rs.packets);
+ u64_stats_add(&rxq->q_stats.bytes, rs.bytes);
+ u64_stats_add(&rxq->q_stats.hsplit_pkts, rs.hsplit);
u64_stats_update_end(&rxq->stats_sync);
- /* guarantee a trip back through this routine if there was a failure */
- return total_rx_pkts;
+ return rs.packets;
}
/**
@@ -3429,10 +3683,10 @@ static void idpf_rx_clean_refillq(struct idpf_buf_queue *bufq,
bool failure;
if (idpf_queue_has(RFL_GEN_CHK, refillq) !=
- !!(refill_desc & IDPF_RX_BI_GEN_M))
+ !!(refill_desc & IDPF_RFL_BI_GEN_M))
break;
- buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc);
+ buf_id = FIELD_GET(IDPF_RFL_BI_BUFID_M, refill_desc);
failure = idpf_rx_update_bufq_desc(bufq, buf_id, buf_desc);
if (failure)
break;
@@ -3504,7 +3758,7 @@ static irqreturn_t idpf_vport_intr_clean_queues(int __always_unused irq,
struct idpf_q_vector *q_vector = (struct idpf_q_vector *)data;
q_vector->total_events++;
- napi_schedule(&q_vector->napi);
+ napi_schedule_irqoff(&q_vector->napi);
return IRQ_HANDLED;
}
@@ -3545,6 +3799,8 @@ void idpf_vport_intr_rel(struct idpf_vport *vport)
for (u32 v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
+ kfree(q_vector->xsksq);
+ q_vector->xsksq = NULL;
kfree(q_vector->complq);
q_vector->complq = NULL;
kfree(q_vector->bufq);
@@ -3553,14 +3809,26 @@ void idpf_vport_intr_rel(struct idpf_vport *vport)
q_vector->tx = NULL;
kfree(q_vector->rx);
q_vector->rx = NULL;
-
- free_cpumask_var(q_vector->affinity_mask);
}
kfree(vport->q_vectors);
vport->q_vectors = NULL;
}
+static void idpf_q_vector_set_napi(struct idpf_q_vector *q_vector, bool link)
+{
+ struct napi_struct *napi = link ? &q_vector->napi : NULL;
+ struct net_device *dev = q_vector->vport->netdev;
+
+ for (u32 i = 0; i < q_vector->num_rxq; i++)
+ netif_queue_set_napi(dev, q_vector->rx[i]->idx,
+ NETDEV_QUEUE_TYPE_RX, napi);
+
+ for (u32 i = 0; i < q_vector->num_txq; i++)
+ netif_queue_set_napi(dev, q_vector->tx[i]->idx,
+ NETDEV_QUEUE_TYPE_TX, napi);
+}
+
/**
* idpf_vport_intr_rel_irq - Free the IRQ association with the OS
* @vport: main vport structure
@@ -3581,8 +3849,7 @@ static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
vidx = vport->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
- /* clear the affinity_mask in the IRQ descriptor */
- irq_set_affinity_hint(irq_num, NULL);
+ idpf_q_vector_set_napi(q_vector, false);
kfree(free_irq(irq_num, q_vector));
}
}
@@ -3596,6 +3863,8 @@ static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport)
struct idpf_q_vector *q_vector = vport->q_vectors;
int q_idx;
+ writel(0, vport->noirq_dyn_ctl);
+
for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++)
writel(0, q_vector[q_idx].intr_reg.dyn_ctl);
}
@@ -3603,21 +3872,31 @@ static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport)
/**
* idpf_vport_intr_buildreg_itr - Enable default interrupt generation settings
* @q_vector: pointer to q_vector
- * @type: itr index
- * @itr: itr value
*/
-static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector,
- const int type, u16 itr)
+static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector)
{
- u32 itr_val;
+ u32 itr_val = q_vector->intr_reg.dyn_ctl_intena_m;
+ int type = IDPF_NO_ITR_UPDATE_IDX;
+ u16 itr = 0;
+
+ if (q_vector->wb_on_itr) {
+ /*
+ * Trigger a software interrupt when exiting wb_on_itr, to make
+ * sure we catch any pending write backs that might have been
+ * missed due to interrupt state transition.
+ */
+ itr_val |= q_vector->intr_reg.dyn_ctl_swint_trig_m |
+ q_vector->intr_reg.dyn_ctl_sw_itridx_ena_m;
+ type = IDPF_SW_ITR_UPDATE_IDX;
+ itr = IDPF_ITR_20K;
+ }
itr &= IDPF_ITR_MASK;
/* Don't clear PBA because that can cause lost interrupts that
* came in while we were cleaning/polling
*/
- itr_val = q_vector->intr_reg.dyn_ctl_intena_m |
- (type << q_vector->intr_reg.dyn_ctl_itridx_s) |
- (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1));
+ itr_val |= (type << q_vector->intr_reg.dyn_ctl_itridx_s) |
+ (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1));
return itr_val;
}
@@ -3715,9 +3994,8 @@ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector)
/* net_dim() updates ITR out-of-band using a work item */
idpf_net_dim(q_vector);
+ intval = idpf_vport_intr_buildreg_itr(q_vector);
q_vector->wb_on_itr = false;
- intval = idpf_vport_intr_buildreg_itr(q_vector,
- IDPF_NO_ITR_UPDATE_IDX, 0);
writel(intval, q_vector->intr_reg.dyn_ctl);
}
@@ -3761,8 +4039,8 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
"Request_irq failed, error: %d\n", err);
goto free_q_irqs;
}
- /* assign the mask for this irq */
- irq_set_affinity_hint(irq_num, q_vector->affinity_mask);
+
+ idpf_q_vector_set_napi(q_vector, true);
}
return 0;
@@ -3830,6 +4108,8 @@ static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
if (qv->num_txq || qv->num_rxq)
idpf_vport_intr_update_itr_ena_irq(qv);
}
+
+ writel(vport->noirq_dyn_ctl_ena, vport->noirq_dyn_ctl);
}
/**
@@ -3979,7 +4259,9 @@ static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
struct idpf_rx_queue *rxq = q_vec->rx[i];
int pkts_cleaned_per_q;
- pkts_cleaned_per_q = idpf_rx_splitq_clean(rxq, budget_per_q);
+ pkts_cleaned_per_q = idpf_queue_has(XSK, rxq) ?
+ idpf_xskrq_poll(rxq, budget_per_q) :
+ idpf_rx_splitq_clean(rxq, budget_per_q);
/* if we clean as many as budgeted, we must not be done */
if (pkts_cleaned_per_q >= budget_per_q)
clean_complete = false;
@@ -3989,8 +4271,10 @@ static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
nid = numa_mem_id();
- for (i = 0; i < q_vec->num_bufq; i++)
- idpf_rx_clean_refillq_all(q_vec->bufq[i], nid);
+ for (i = 0; i < q_vec->num_bufq; i++) {
+ if (!idpf_queue_has(XSK, q_vec->bufq[i]))
+ idpf_rx_clean_refillq_all(q_vec->bufq[i], nid);
+ }
return clean_complete;
}
@@ -4004,7 +4288,7 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
{
struct idpf_q_vector *q_vector =
container_of(napi, struct idpf_q_vector, napi);
- bool clean_complete;
+ bool clean_complete = true;
int work_done = 0;
/* Handle case where we are called by netpoll with a budget of 0 */
@@ -4014,8 +4298,13 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
return 0;
}
- clean_complete = idpf_rx_splitq_clean_all(q_vector, budget, &work_done);
- clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
+ for (u32 i = 0; i < q_vector->num_xsksq; i++)
+ clean_complete &= idpf_xsk_xmit(q_vector->xsksq[i]);
+
+ clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget,
+ &work_done);
+ clean_complete &= idpf_rx_splitq_clean_all(q_vector, budget,
+ &work_done);
/* If work not completed, return budget and polling will return */
if (!clean_complete) {
@@ -4028,20 +4317,12 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
/* Exit the polling mode, but don't re-enable interrupts if stack might
* poll us due to busy-polling
*/
- if (likely(napi_complete_done(napi, work_done)))
+ if (napi_complete_done(napi, work_done))
idpf_vport_intr_update_itr_ena_irq(q_vector);
else
idpf_vport_intr_set_wb_on_itr(q_vector);
- /* Switch to poll mode in the tear-down path after sending disable
- * queues virtchnl message, as the interrupts will be disabled after
- * that
- */
- if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE,
- q_vector->tx[0])))
- return budget;
- else
- return work_done;
+ return work_done;
}
/**
@@ -4052,8 +4333,8 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
*/
static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
{
+ u16 num_txq_grp = vport->num_txq_grp - vport->num_xdp_txq;
bool split = idpf_is_queue_model_split(vport->rxq_model);
- u16 num_txq_grp = vport->num_txq_grp;
struct idpf_rxq_group *rx_qgrp;
struct idpf_txq_group *tx_qgrp;
u32 i, qv_idx, q_index;
@@ -4129,6 +4410,21 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
qv_idx++;
}
+
+ for (i = 0; i < vport->num_xdp_txq; i++) {
+ struct idpf_tx_queue *xdpsq;
+ struct idpf_q_vector *qv;
+
+ xdpsq = vport->txqs[vport->xdp_txq_offset + i];
+ if (!idpf_queue_has(XSK, xdpsq))
+ continue;
+
+ qv = idpf_find_rxq_vec(vport, i);
+ idpf_xsk_init_wakeup(qv);
+
+ xdpsq->q_vector = qv;
+ qv->xsksq[qv->num_xsksq++] = xdpsq;
+ }
}
/**
@@ -4149,6 +4445,8 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
for (i = 0; i < vport->num_q_vectors; i++)
vport->q_vectors[i].v_idx = vport->q_vector_idxs[i];
+ vport->noirq_v_idx = vport->q_vector_idxs[i];
+
return 0;
}
@@ -4162,6 +4460,8 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
for (i = 0; i < vport->num_q_vectors; i++)
vport->q_vectors[i].v_idx = vecids[vport->q_vector_idxs[i]];
+ vport->noirq_v_idx = vecids[vport->q_vector_idxs[i]];
+
kfree(vecids);
return 0;
@@ -4174,7 +4474,8 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
{
int (*napi_poll)(struct napi_struct *napi, int budget);
- u16 v_idx;
+ u16 v_idx, qv_idx;
+ int irq_num;
if (idpf_is_queue_model_split(vport->txq_model))
napi_poll = idpf_vport_splitq_napi_poll;
@@ -4183,12 +4484,12 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
+ qv_idx = vport->q_vector_idxs[v_idx];
+ irq_num = vport->adapter->msix_entries[qv_idx].vector;
- netif_napi_add(vport->netdev, &q_vector->napi, napi_poll);
-
- /* only set affinity_mask if the CPU is online */
- if (cpu_online(v_idx))
- cpumask_set_cpu(v_idx, q_vector->affinity_mask);
+ netif_napi_add_config(vport->netdev, &q_vector->napi,
+ napi_poll, v_idx);
+ netif_napi_set_irq(&q_vector->napi, irq_num);
}
}
@@ -4202,9 +4503,13 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
int idpf_vport_intr_alloc(struct idpf_vport *vport)
{
u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector;
+ struct idpf_vport_user_config_data *user_config;
struct idpf_q_vector *q_vector;
+ struct idpf_q_coalesce *q_coal;
u32 complqs_per_vector, v_idx;
+ u16 idx = vport->idx;
+ user_config = &vport->adapter->vport_config[idx]->user_config;
vport->q_vectors = kcalloc(vport->num_q_vectors,
sizeof(struct idpf_q_vector), GFP_KERNEL);
if (!vport->q_vectors)
@@ -4222,19 +4527,17 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
q_vector = &vport->q_vectors[v_idx];
+ q_coal = &user_config->q_coalesce[v_idx];
q_vector->vport = vport;
- q_vector->tx_itr_value = IDPF_ITR_TX_DEF;
- q_vector->tx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_vector->tx_itr_value = q_coal->tx_coalesce_usecs;
+ q_vector->tx_intr_mode = q_coal->tx_intr_mode;
q_vector->tx_itr_idx = VIRTCHNL2_ITR_IDX_1;
- q_vector->rx_itr_value = IDPF_ITR_RX_DEF;
- q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_vector->rx_itr_value = q_coal->rx_coalesce_usecs;
+ q_vector->rx_intr_mode = q_coal->rx_intr_mode;
q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
- if (!zalloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL))
- goto error;
-
q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx),
GFP_KERNEL);
if (!q_vector->tx)
@@ -4259,6 +4562,15 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
GFP_KERNEL);
if (!q_vector->complq)
goto error;
+
+ if (!vport->xdp_txq_offset)
+ continue;
+
+ q_vector->xsksq = kcalloc(rxqs_per_vector,
+ sizeof(*q_vector->xsksq),
+ GFP_KERNEL);
+ if (!q_vector->xsksq)
+ goto error;
}
return 0;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 9c1fe84108ed..75b977094741 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -7,8 +7,10 @@
#include <linux/dim.h>
#include <net/libeth/cache.h>
-#include <net/tcp.h>
+#include <net/libeth/types.h>
#include <net/netdev_queues.h>
+#include <net/tcp.h>
+#include <net/xdp.h>
#include "idpf_lan_txrx.h"
#include "virtchnl2_lan_desc.h"
@@ -57,6 +59,9 @@
/* Default vector sharing */
#define IDPF_MBX_Q_VEC 1
#define IDPF_MIN_Q_VEC 1
+#define IDPF_MIN_RDMA_VEC 2
+/* Data vector for NOIRQ queues */
+#define IDPF_RESERVED_VECS 1
#define IDPF_DFLT_TX_Q_DESC_COUNT 512
#define IDPF_DFLT_TX_COMPLQ_DESC_COUNT 512
@@ -107,8 +112,8 @@ do { \
*/
#define IDPF_TX_SPLITQ_RE_MIN_GAP 64
-#define IDPF_RX_BI_GEN_M BIT(16)
-#define IDPF_RX_BI_BUFID_M GENMASK(15, 0)
+#define IDPF_RFL_BI_GEN_M BIT(16)
+#define IDPF_RFL_BI_BUFID_M GENMASK(15, 0)
#define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M
#define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M
@@ -117,10 +122,6 @@ do { \
((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
(txq)->next_to_clean - (txq)->next_to_use - 1)
-#define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->stash->buf_stack.top)
-#define IDPF_TX_BUF_RSV_LOW(txq) (IDPF_TX_BUF_RSV_UNUSED(txq) < \
- (txq)->desc_count >> 2)
-
#define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq) ((txcq)->desc_count >> 1)
/* Determine the absolute number of completions pending, i.e. the number of
* completions that are expected to arrive on the TX completion queue.
@@ -130,11 +131,7 @@ do { \
0 : U32_MAX) + \
(txq)->num_completions_pending - (txq)->complq->num_completions)
-#define IDPF_TX_SPLITQ_COMPL_TAG_WIDTH 16
-/* Adjust the generation for the completion tag and wrap if necessary */
-#define IDPF_TX_ADJ_COMPL_TAG_GEN(txq) \
- ((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \
- 0 : (txq)->compl_tag_cur_gen)
+#define IDPF_TXBUF_NULL U32_MAX
#define IDPF_TXD_LAST_DESC_CMD (IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS)
@@ -142,6 +139,9 @@ do { \
#define IDPF_TX_FLAGS_IPV4 BIT(1)
#define IDPF_TX_FLAGS_IPV6 BIT(2)
#define IDPF_TX_FLAGS_TUNNEL BIT(3)
+#define IDPF_TX_FLAGS_TSYN BIT(4)
+
+struct libeth_rq_napi_stats;
union idpf_tx_flex_desc {
struct idpf_flex_tx_desc q; /* queue based scheduling */
@@ -151,18 +151,6 @@ union idpf_tx_flex_desc {
#define idpf_tx_buf libeth_sqe
/**
- * struct idpf_buf_lifo - LIFO for managing OOO completions
- * @top: Used to know how many buffers are left
- * @size: Total size of LIFO
- * @bufs: Backing array
- */
-struct idpf_buf_lifo {
- u16 top;
- u16 size;
- struct idpf_tx_stash **bufs;
-};
-
-/**
* struct idpf_tx_offload_params - Offload parameters for a given packet
* @tx_flags: Feature flags enabled for this packet
* @hdr_offsets: Offset parameter for single queue model
@@ -194,6 +182,9 @@ struct idpf_tx_offload_params {
* @compl_tag: Associated tag for completion
* @td_tag: Descriptor tunneling tag
* @offload: Offload parameters
+ * @prev_ntu: stored TxQ next_to_use in case of rollback
+ * @prev_refill_ntc: stored refillq next_to_clean in case of packet rollback
+ * @prev_refill_gen: stored refillq generation bit in case of packet rollback
*/
struct idpf_tx_splitq_params {
enum idpf_tx_desc_dtype_value dtype;
@@ -204,6 +195,10 @@ struct idpf_tx_splitq_params {
};
struct idpf_tx_offload_params offload;
+
+ u16 prev_ntu;
+ u16 prev_refill_ntc;
+ bool prev_refill_gen;
};
enum idpf_tx_ctx_desc_eipt_offload {
@@ -213,25 +208,6 @@ enum idpf_tx_ctx_desc_eipt_offload {
IDPF_TX_CTX_EXT_IP_IPV4 = 0x3
};
-/* Checksum offload bits decoded from the receive descriptor. */
-struct idpf_rx_csum_decoded {
- u32 l3l4p : 1;
- u32 ipe : 1;
- u32 eipe : 1;
- u32 eudpe : 1;
- u32 ipv6exadd : 1;
- u32 l4e : 1;
- u32 pprs : 1;
- u32 nat : 1;
- u32 raw_csum_inv : 1;
- u32 raw_csum : 16;
-};
-
-struct idpf_rx_extracted {
- unsigned int size;
- u16 rx_ptype;
-};
-
#define IDPF_TX_COMPLQ_CLEAN_BUDGET 256
#define IDPF_TX_MIN_PKT_LEN 17
#define IDPF_TX_DESCS_FOR_SKB_DATA_PTR 1
@@ -305,9 +281,13 @@ struct idpf_ptype_state {
* bit and Q_RFL_GEN is the SW bit.
* @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling
* @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions
- * @__IDPF_Q_POLL_MODE: Enable poll mode
* @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode
* @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq)
+ * @__IDPF_Q_PTP: indicates whether the Rx timestamping is enabled for the
+ * queue
+ * @__IDPF_Q_NOIRQ: queue is polling-driven and has no interrupt
+ * @__IDPF_Q_XDP: this is an XDP queue
+ * @__IDPF_Q_XSK: the queue has an XSk pool installed
* @__IDPF_Q_FLAGS_NBITS: Must be last
*/
enum idpf_queue_flags_t {
@@ -315,9 +295,12 @@ enum idpf_queue_flags_t {
__IDPF_Q_RFL_GEN_CHK,
__IDPF_Q_FLOW_SCH_EN,
__IDPF_Q_SW_MARKER,
- __IDPF_Q_POLL_MODE,
__IDPF_Q_CRC_EN,
__IDPF_Q_HSPLIT_EN,
+ __IDPF_Q_PTP,
+ __IDPF_Q_NOIRQ,
+ __IDPF_Q_XDP,
+ __IDPF_Q_XSK,
__IDPF_Q_FLAGS_NBITS,
};
@@ -354,6 +337,8 @@ struct idpf_vec_regs {
* @dyn_ctl_itridx_m: Mask for ITR index
* @dyn_ctl_intrvl_s: Register bit offset for ITR interval
* @dyn_ctl_wb_on_itr_m: Mask for WB on ITR feature
+ * @dyn_ctl_sw_itridx_ena_m: Mask for SW ITR index
+ * @dyn_ctl_swint_trig_m: Mask for dyn_ctl SW triggered interrupt enable
* @rx_itr: RX ITR register
* @tx_itr: TX ITR register
* @icr_ena: Interrupt cause register offset
@@ -367,6 +352,8 @@ struct idpf_intr_reg {
u32 dyn_ctl_itridx_m;
u32 dyn_ctl_intrvl_s;
u32 dyn_ctl_wb_on_itr_m;
+ u32 dyn_ctl_sw_itridx_ena_m;
+ u32 dyn_ctl_swint_trig_m;
void __iomem *rx_itr;
void __iomem *tx_itr;
void __iomem *icr_ena;
@@ -380,14 +367,17 @@ struct idpf_intr_reg {
* @num_txq: Number of TX queues
* @num_bufq: Number of buffer queues
* @num_complq: number of completion queues
+ * @num_xsksq: number of XSk send queues
* @rx: Array of RX queues to service
* @tx: Array of TX queues to service
* @bufq: Array of buffer queues to service
* @complq: array of completion queues
+ * @xsksq: array of XSk send queues
* @intr_reg: See struct idpf_intr_reg
- * @napi: napi handler
+ * @csd: XSk wakeup CSD
* @total_events: Number of interrupts processed
* @wb_on_itr: whether WB on ITR is enabled
+ * @napi: napi handler
* @tx_dim: Data for TX net_dim algorithm
* @tx_itr_value: TX interrupt throttling rate
* @tx_intr_mode: Dynamic ITR or not
@@ -397,7 +387,6 @@ struct idpf_intr_reg {
* @rx_intr_mode: Dynamic ITR or not
* @rx_itr_idx: RX ITR index
* @v_idx: Vector index
- * @affinity_mask: CPU affinity mask
*/
struct idpf_q_vector {
__cacheline_group_begin_aligned(read_mostly);
@@ -407,19 +396,24 @@ struct idpf_q_vector {
u16 num_txq;
u16 num_bufq;
u16 num_complq;
+ u16 num_xsksq;
struct idpf_rx_queue **rx;
struct idpf_tx_queue **tx;
struct idpf_buf_queue **bufq;
struct idpf_compl_queue **complq;
+ struct idpf_tx_queue **xsksq;
struct idpf_intr_reg intr_reg;
__cacheline_group_end_aligned(read_mostly);
__cacheline_group_begin_aligned(read_write);
- struct napi_struct napi;
+ call_single_data_t csd;
+
u16 total_events;
bool wb_on_itr;
+ struct napi_struct napi;
+
struct dim tx_dim;
u16 tx_itr_value;
bool tx_intr_mode;
@@ -434,13 +428,12 @@ struct idpf_q_vector {
__cacheline_group_begin_aligned(cold);
u16 v_idx;
- cpumask_var_t affinity_mask;
__cacheline_group_end_aligned(cold);
};
-libeth_cacheline_set_assert(struct idpf_q_vector, 112,
- 24 + sizeof(struct napi_struct) +
+libeth_cacheline_set_assert(struct idpf_q_vector, 136,
+ 56 + sizeof(struct napi_struct) +
2 * sizeof(struct dim),
- 8 + sizeof(cpumask_var_t));
+ 8);
struct idpf_rx_queue_stats {
u64_stats_t packets;
@@ -460,6 +453,7 @@ struct idpf_tx_queue_stats {
u64_stats_t q_busy;
u64_stats_t skb_drops;
u64_stats_t dma_map_errs;
+ u64_stats_t tstamp_skipped;
};
#define IDPF_ITR_DYNAMIC 1
@@ -471,44 +465,41 @@ struct idpf_tx_queue_stats {
#define IDPF_ITR_IS_DYNAMIC(itr_mode) (itr_mode)
#define IDPF_ITR_TX_DEF IDPF_ITR_20K
#define IDPF_ITR_RX_DEF IDPF_ITR_20K
+/* Index used for 'SW ITR' update in DYN_CTL register */
+#define IDPF_SW_ITR_UPDATE_IDX 2
/* Index used for 'No ITR' update in DYN_CTL register */
#define IDPF_NO_ITR_UPDATE_IDX 3
#define IDPF_ITR_IDX_SPACING(spacing, dflt) (spacing ? spacing : dflt)
#define IDPF_DIM_DEFAULT_PROFILE_IX 1
/**
- * struct idpf_txq_stash - Tx buffer stash for Flow-based scheduling mode
- * @buf_stack: Stack of empty buffers to store buffer info for out of order
- * buffer completions. See struct idpf_buf_lifo
- * @sched_buf_hash: Hash table to store buffers
- */
-struct idpf_txq_stash {
- struct idpf_buf_lifo buf_stack;
- DECLARE_HASHTABLE(sched_buf_hash, 12);
-} ____cacheline_aligned;
-
-/**
* struct idpf_rx_queue - software structure representing a receive queue
* @rx: universal receive descriptor array
* @single_buf: buffer descriptor array in singleq
* @desc_ring: virtual descriptor ring address
* @bufq_sets: Pointer to the array of buffer queues in splitq mode
* @napi: NAPI instance corresponding to this queue (splitq)
+ * @xdp_prog: attached XDP program
* @rx_buf: See struct &libeth_fqe
* @pp: Page pool pointer in singleq mode
- * @netdev: &net_device corresponding to this queue
* @tail: Tail offset. Used for both queue models single and split.
* @flags: See enum idpf_queue_flags_t
* @idx: For RX queue, it is used to index to total RX queue across groups and
* used for skb reporting.
* @desc_count: Number of descriptors
+ * @num_xdp_txq: total number of XDP Tx queues
+ * @xdpsqs: shortcut for XDP Tx queues array
* @rxdids: Supported RX descriptor ids
+ * @truesize: data buffer truesize in singleq
* @rx_ptype_lkup: LUT of Rx ptypes
+ * @xdp_rxq: XDP queue info
* @next_to_use: Next descriptor to use
* @next_to_clean: Next descriptor to clean
* @next_to_alloc: RX buffer to allocate at
- * @skb: Pointer to the skb
- * @truesize: data buffer truesize in singleq
+ * @xdp: XDP buffer with the current frame
+ * @xsk: current XDP buffer in XSk mode
+ * @pool: XSk pool if installed
+ * @cached_phc_time: Cached PHC time for the Rx queue
* @stats_sync: See struct u64_stats_sync
* @q_stats: See union idpf_rx_queue_stats
* @q_id: Queue id
@@ -532,30 +523,45 @@ struct idpf_rx_queue {
struct {
struct idpf_bufq_set *bufq_sets;
struct napi_struct *napi;
+ struct bpf_prog __rcu *xdp_prog;
};
struct {
struct libeth_fqe *rx_buf;
struct page_pool *pp;
+ void __iomem *tail;
};
};
- struct net_device *netdev;
- void __iomem *tail;
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
u16 idx;
u16 desc_count;
- u32 rxdids;
+ u32 num_xdp_txq;
+ union {
+ struct idpf_tx_queue **xdpsqs;
+ struct {
+ u32 rxdids;
+ u32 truesize;
+ };
+ };
const struct libeth_rx_pt *rx_ptype_lkup;
+
+ struct xdp_rxq_info xdp_rxq;
__cacheline_group_end_aligned(read_mostly);
__cacheline_group_begin_aligned(read_write);
- u16 next_to_use;
- u16 next_to_clean;
- u16 next_to_alloc;
+ u32 next_to_use;
+ u32 next_to_clean;
+ u32 next_to_alloc;
- struct sk_buff *skb;
- u32 truesize;
+ union {
+ struct libeth_xdp_buff_stash xdp;
+ struct {
+ struct libeth_xdp_buff *xsk;
+ struct xsk_buff_pool *pool;
+ };
+ };
+ u64 cached_phc_time;
struct u64_stats_sync stats_sync;
struct idpf_rx_queue_stats q_stats;
@@ -574,8 +580,11 @@ struct idpf_rx_queue {
u16 rx_max_pkt_size;
__cacheline_group_end_aligned(cold);
};
-libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
- 80 + sizeof(struct u64_stats_sync),
+libeth_cacheline_set_assert(struct idpf_rx_queue,
+ ALIGN(64, __alignof(struct xdp_rxq_info)) +
+ sizeof(struct xdp_rxq_info),
+ 96 + offsetof(struct idpf_rx_queue, q_stats) -
+ offsetofend(struct idpf_rx_queue, cached_phc_time),
32);
/**
@@ -587,36 +596,21 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
* @desc_ring: virtual descriptor ring address
* @tx_buf: See struct idpf_tx_buf
* @txq_grp: See struct idpf_txq_group
+ * @complq: corresponding completion queue in XDP mode
* @dev: Device back pointer for DMA mapping
+ * @pool: corresponding XSk pool if installed
* @tail: Tail offset. Used for both queue models single and split
* @flags: See enum idpf_queue_flags_t
* @idx: For TX queue, it is used as index to map between TX queue group and
* hot path TX pointers stored in vport. Used in both singleq/splitq.
* @desc_count: Number of descriptors
* @tx_min_pkt_len: Min supported packet length
- * @compl_tag_gen_s: Completion tag generation bit
- * The format of the completion tag will change based on the TXQ
- * descriptor ring size so that we can maintain roughly the same level
- * of "uniqueness" across all descriptor sizes. For example, if the
- * TXQ descriptor ring size is 64 (the minimum size supported), the
- * completion tag will be formatted as below:
- * 15 6 5 0
- * --------------------------------
- * | GEN=0-1023 |IDX = 0-63|
- * --------------------------------
- *
- * This gives us 64*1024 = 65536 possible unique values. Similarly, if
- * the TXQ descriptor ring size is 8160 (the maximum size supported),
- * the completion tag will be formatted as below:
- * 15 13 12 0
- * --------------------------------
- * |GEN | IDX = 0-8159 |
- * --------------------------------
- *
- * This gives us 8*8160 = 65280 possible unique values.
+ * @thresh: XDP queue cleaning threshold
* @netdev: &net_device corresponding to this queue
* @next_to_use: Next descriptor to use
* @next_to_clean: Next descriptor to clean
+ * @last_re: last descriptor index that RE bit was set
+ * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
* @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on
* the TX completion queue, it can be for any TXQ associated
* with that completion queue. This means we can clean up to
@@ -627,17 +621,21 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
* only once at the end of the cleaning routine.
* @clean_budget: singleq only, queue cleaning budget
* @cleaned_pkts: Number of packets cleaned for the above said case
- * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
- * @stash: Tx buffer stash for Flow-based scheduling mode
- * @compl_tag_bufid_m: Completion tag buffer id mask
- * @compl_tag_cur_gen: Used to keep track of current completion tag generation
- * @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset
+ * @refillq: Pointer to refill queue
+ * @pending: number of pending descriptors to send in QB
+ * @xdp_tx: number of pending &xdp_buff or &xdp_frame buffers
+ * @timer: timer for XDP Tx queue cleanup
+ * @xdp_lock: lock for XDP Tx queues sharing
+ * @cached_tstamp_caps: Tx timestamp capabilities negotiated with the CP
+ * @tstamp_task: Work that handles Tx timestamp read
* @stats_sync: See struct u64_stats_sync
* @q_stats: See union idpf_tx_queue_stats
* @q_id: Queue id
* @size: Length of descriptor ring in bytes
* @dma: Physical address of ring
* @q_vector: Backreference to associated vector
+ * @buf_pool_size: Total number of idpf_tx_buf
+ * @rel_q_id: relative virtchnl queue index
*/
struct idpf_tx_queue {
__cacheline_group_begin_aligned(read_mostly);
@@ -645,41 +643,61 @@ struct idpf_tx_queue {
struct idpf_base_tx_desc *base_tx;
struct idpf_base_tx_ctx_desc *base_ctx;
union idpf_tx_flex_desc *flex_tx;
- struct idpf_flex_tx_ctx_desc *flex_ctx;
+ union idpf_flex_tx_ctx_desc *flex_ctx;
void *desc_ring;
};
struct libeth_sqe *tx_buf;
- struct idpf_txq_group *txq_grp;
- struct device *dev;
+ union {
+ struct idpf_txq_group *txq_grp;
+ struct idpf_compl_queue *complq;
+ };
+ union {
+ struct device *dev;
+ struct xsk_buff_pool *pool;
+ };
void __iomem *tail;
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
u16 idx;
u16 desc_count;
- u16 tx_min_pkt_len;
- u16 compl_tag_gen_s;
+ union {
+ u16 tx_min_pkt_len;
+ u32 thresh;
+ };
struct net_device *netdev;
__cacheline_group_end_aligned(read_mostly);
__cacheline_group_begin_aligned(read_write);
- u16 next_to_use;
- u16 next_to_clean;
+ u32 next_to_use;
+ u32 next_to_clean;
union {
- u32 cleaned_bytes;
- u32 clean_budget;
- };
- u16 cleaned_pkts;
+ struct {
+ u16 last_re;
+ u16 tx_max_bufs;
+
+ union {
+ u32 cleaned_bytes;
+ u32 clean_budget;
+ };
+ u16 cleaned_pkts;
- u16 tx_max_bufs;
- struct idpf_txq_stash *stash;
+ struct idpf_sw_queue *refillq;
+ };
+ struct {
+ u32 pending;
+ u32 xdp_tx;
+
+ struct libeth_xdpsq_timer *timer;
+ struct libeth_xdpsq_lock xdp_lock;
+ };
+ };
- u16 compl_tag_bufid_m;
- u16 compl_tag_cur_gen;
- u16 compl_tag_gen_max;
+ struct idpf_ptp_vport_tx_tstamp_caps *cached_tstamp_caps;
+ struct work_struct *tstamp_task;
struct u64_stats_sync stats_sync;
struct idpf_tx_queue_stats q_stats;
@@ -691,25 +709,36 @@ struct idpf_tx_queue {
dma_addr_t dma;
struct idpf_q_vector *q_vector;
+
+ u32 buf_pool_size;
+ u32 rel_q_id;
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
- 88 + sizeof(struct u64_stats_sync),
- 24);
+ 104 +
+ offsetof(struct idpf_tx_queue, cached_tstamp_caps) -
+ offsetofend(struct idpf_tx_queue, timer) +
+ offsetof(struct idpf_tx_queue, q_stats) -
+ offsetofend(struct idpf_tx_queue, tstamp_task),
+ 32);
/**
* struct idpf_buf_queue - software structure representing a buffer queue
* @split_buf: buffer descriptor array
- * @hdr_buf: &libeth_fqe for header buffers
- * @hdr_pp: &page_pool for header buffers
* @buf: &libeth_fqe for data buffers
* @pp: &page_pool for data buffers
+ * @xsk_buf: &xdp_buff for XSk Rx buffers
+ * @pool: &xsk_buff_pool on XSk queues
+ * @hdr_buf: &libeth_fqe for header buffers
+ * @hdr_pp: &page_pool for header buffers
* @tail: Tail offset
* @flags: See enum idpf_queue_flags_t
* @desc_count: Number of descriptors
+ * @thresh: refill threshold in XSk
* @next_to_use: Next descriptor to use
* @next_to_clean: Next descriptor to clean
* @next_to_alloc: RX buffer to allocate at
+ * @pending: number of buffers to refill (Xsk)
* @hdr_truesize: truesize for buffer headers
* @truesize: truesize for data buffers
* @q_id: Queue id
@@ -723,14 +752,24 @@ libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
struct idpf_buf_queue {
__cacheline_group_begin_aligned(read_mostly);
struct virtchnl2_splitq_rx_buf_desc *split_buf;
+ union {
+ struct {
+ struct libeth_fqe *buf;
+ struct page_pool *pp;
+ };
+ struct {
+ struct libeth_xdp_buff **xsk_buf;
+ struct xsk_buff_pool *pool;
+ };
+ };
struct libeth_fqe *hdr_buf;
struct page_pool *hdr_pp;
- struct libeth_fqe *buf;
- struct page_pool *pp;
void __iomem *tail;
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
u32 desc_count;
+
+ u32 thresh;
__cacheline_group_end_aligned(read_mostly);
__cacheline_group_begin_aligned(read_write);
@@ -738,6 +777,7 @@ struct idpf_buf_queue {
u32 next_to_clean;
u32 next_to_alloc;
+ u32 pending;
u32 hdr_truesize;
u32 truesize;
__cacheline_group_end_aligned(read_write);
@@ -758,7 +798,9 @@ libeth_cacheline_set_assert(struct idpf_buf_queue, 64, 24, 32);
/**
* struct idpf_compl_queue - software structure representing a completion queue
- * @comp: completion descriptor array
+ * @comp: 8-byte completion descriptor array
+ * @comp_4b: 4-byte completion descriptor array
+ * @desc_ring: virtual descriptor ring address
* @txq_grp: See struct idpf_txq_group
* @flags: See enum idpf_queue_flags_t
* @desc_count: Number of descriptors
@@ -778,7 +820,12 @@ libeth_cacheline_set_assert(struct idpf_buf_queue, 64, 24, 32);
*/
struct idpf_compl_queue {
__cacheline_group_begin_aligned(read_mostly);
- struct idpf_splitq_tx_compl_desc *comp;
+ union {
+ struct idpf_splitq_tx_compl_desc *comp;
+ struct idpf_splitq_4b_tx_compl_desc *comp_4b;
+
+ void *desc_ring;
+ };
struct idpf_txq_group *txq_grp;
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
@@ -905,7 +952,6 @@ struct idpf_rxq_group {
* @vport: Vport back pointer
* @num_txq: Number of TX queues associated
* @txqs: Array of TX queue pointers
- * @stashes: array of OOO stashes for the queues
* @complq: Associated completion queue pointer, split queue only
* @num_completions_pending: Total number of completions pending for the
* completion queue, acculumated for all TX queues
@@ -920,7 +966,6 @@ struct idpf_txq_group {
u16 num_txq;
struct idpf_tx_queue *txqs[IDPF_LARGE_MAX_Q];
- struct idpf_txq_stash *stashes;
struct idpf_compl_queue *complq;
@@ -934,7 +979,7 @@ static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector)
if (!q_vector)
return NUMA_NO_NODE;
- cpu = cpumask_first(q_vector->affinity_mask);
+ cpu = cpumask_first(&q_vector->napi.config->affinity_mask);
return cpu < nr_cpu_ids ? cpu_to_mem(cpu) : NUMA_NO_NODE;
}
@@ -1013,6 +1058,17 @@ static inline void idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector *q_vector)
reg->dyn_ctl);
}
+/**
+ * idpf_tx_splitq_get_free_bufs - get number of free buf_ids in refillq
+ * @refillq: pointer to refillq containing buf_ids
+ */
+static inline u32 idpf_tx_splitq_get_free_bufs(struct idpf_sw_queue *refillq)
+{
+ return (refillq->next_to_use > refillq->next_to_clean ?
+ 0 : refillq->desc_count) +
+ refillq->next_to_use - refillq->next_to_clean - 1;
+}
+
int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
void idpf_vport_init_num_qs(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_msg);
@@ -1033,31 +1089,30 @@ int idpf_config_rss(struct idpf_vport *vport);
int idpf_init_rss(struct idpf_vport *vport);
void idpf_deinit_rss(struct idpf_vport *vport);
int idpf_rx_bufs_init_all(struct idpf_vport *vport);
-void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
- unsigned int size);
-struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size);
+
+struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
+ u32 q_num);
+struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
+ u32 q_num);
+int idpf_qp_switch(struct idpf_vport *vport, u32 qid, bool en);
+
void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
bool xmit_more);
unsigned int idpf_size_to_txd_count(unsigned int size);
netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb);
-void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
- struct idpf_tx_buf *first, u16 ring_idx);
-unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
- struct sk_buff *skb);
+unsigned int idpf_tx_res_count_required(struct idpf_tx_queue *txq,
+ struct sk_buff *skb, u32 *buf_count);
void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
struct idpf_tx_queue *tx_q);
netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev);
bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
u16 cleaned_count);
+bool idpf_rx_process_skb_fields(struct sk_buff *skb,
+ const struct libeth_xdp_buff *xdp,
+ struct libeth_rq_napi_stats *rs);
int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
-static inline bool idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q,
- u32 needed)
-{
- return !netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
- IDPF_DESC_UNUSED(tx_q),
- needed, needed);
-}
+void idpf_wait_for_sw_marker_completion(const struct idpf_tx_queue *txq);
#endif /* !_IDPF_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
index aad62e270ae4..4cc58c83688c 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
@@ -9,10 +9,13 @@
/**
* idpf_vf_ctlq_reg_init - initialize default mailbox registers
+ * @adapter: adapter structure
* @cq: pointer to the array of create control queues
*/
-static void idpf_vf_ctlq_reg_init(struct idpf_ctlq_create_info *cq)
+static void idpf_vf_ctlq_reg_init(struct idpf_adapter *adapter,
+ struct idpf_ctlq_create_info *cq)
{
+ resource_size_t mbx_start = adapter->dev_ops.static_reg_info[0].start;
int i;
for (i = 0; i < IDPF_NUM_DFLT_MBX_Q; i++) {
@@ -21,22 +24,22 @@ static void idpf_vf_ctlq_reg_init(struct idpf_ctlq_create_info *cq)
switch (ccq->type) {
case IDPF_CTLQ_TYPE_MAILBOX_TX:
/* set head and tail registers in our local struct */
- ccq->reg.head = VF_ATQH;
- ccq->reg.tail = VF_ATQT;
- ccq->reg.len = VF_ATQLEN;
- ccq->reg.bah = VF_ATQBAH;
- ccq->reg.bal = VF_ATQBAL;
+ ccq->reg.head = VF_ATQH - mbx_start;
+ ccq->reg.tail = VF_ATQT - mbx_start;
+ ccq->reg.len = VF_ATQLEN - mbx_start;
+ ccq->reg.bah = VF_ATQBAH - mbx_start;
+ ccq->reg.bal = VF_ATQBAL - mbx_start;
ccq->reg.len_mask = VF_ATQLEN_ATQLEN_M;
ccq->reg.len_ena_mask = VF_ATQLEN_ATQENABLE_M;
ccq->reg.head_mask = VF_ATQH_ATQH_M;
break;
case IDPF_CTLQ_TYPE_MAILBOX_RX:
/* set head and tail registers in our local struct */
- ccq->reg.head = VF_ARQH;
- ccq->reg.tail = VF_ARQT;
- ccq->reg.len = VF_ARQLEN;
- ccq->reg.bah = VF_ARQBAH;
- ccq->reg.bal = VF_ARQBAL;
+ ccq->reg.head = VF_ARQH - mbx_start;
+ ccq->reg.tail = VF_ARQT - mbx_start;
+ ccq->reg.len = VF_ARQLEN - mbx_start;
+ ccq->reg.bah = VF_ARQBAH - mbx_start;
+ ccq->reg.bal = VF_ARQBAL - mbx_start;
ccq->reg.len_mask = VF_ARQLEN_ARQLEN_M;
ccq->reg.len_ena_mask = VF_ARQLEN_ARQENABLE_M;
ccq->reg.head_mask = VF_ARQH_ARQH_M;
@@ -73,7 +76,7 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
int num_vecs = vport->num_q_vectors;
struct idpf_vec_regs *reg_vals;
int num_regs, i, err = 0;
- u32 rx_itr, tx_itr;
+ u32 rx_itr, tx_itr, val;
u16 total_vecs;
total_vecs = idpf_get_reserved_vecs(vport->adapter);
@@ -101,6 +104,9 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
intr->dyn_ctl_itridx_s = VF_INT_DYN_CTLN_ITR_INDX_S;
intr->dyn_ctl_intrvl_s = VF_INT_DYN_CTLN_INTERVAL_S;
intr->dyn_ctl_wb_on_itr_m = VF_INT_DYN_CTLN_WB_ON_ITR_M;
+ intr->dyn_ctl_swint_trig_m = VF_INT_DYN_CTLN_SWINT_TRIG_M;
+ intr->dyn_ctl_sw_itridx_ena_m =
+ VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_M;
spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing,
IDPF_VF_ITR_IDX_SPACING);
@@ -114,6 +120,15 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
intr->tx_itr = idpf_get_reg_addr(adapter, tx_itr);
}
+ /* Data vector for NOIRQ queues */
+
+ val = reg_vals[vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC].dyn_ctl_reg;
+ vport->noirq_dyn_ctl = idpf_get_reg_addr(adapter, val);
+
+ val = VF_INT_DYN_CTLN_WB_ON_ITR_M | VF_INT_DYN_CTLN_INTENA_MSK_M |
+ FIELD_PREP(VF_INT_DYN_CTLN_ITR_INDX_M, IDPF_NO_ITR_UPDATE_IDX);
+ vport->noirq_dyn_ctl_ena = val;
+
free_reg_vals:
kfree(reg_vals);
@@ -126,7 +141,7 @@ free_reg_vals:
*/
static void idpf_vf_reset_reg_init(struct idpf_adapter *adapter)
{
- adapter->reset_reg.rstat = idpf_get_reg_addr(adapter, VFGEN_RSTAT);
+ adapter->reset_reg.rstat = idpf_get_rstat_reg_addr(adapter, VFGEN_RSTAT);
adapter->reset_reg.rstat_m = VFGEN_RSTAT_VFR_STATE_M;
}
@@ -145,6 +160,17 @@ static void idpf_vf_trigger_reset(struct idpf_adapter *adapter,
}
/**
+ * idpf_idc_vf_register - register for IDC callbacks
+ * @adapter: Driver specific private structure
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_idc_vf_register(struct idpf_adapter *adapter)
+{
+ return idpf_idc_init_aux_core_dev(adapter, IIDC_FUNCTION_TYPE_VF);
+}
+
+/**
* idpf_vf_reg_ops_init - Initialize register API function pointers
* @adapter: Driver specific private structure
*/
@@ -164,4 +190,11 @@ static void idpf_vf_reg_ops_init(struct idpf_adapter *adapter)
void idpf_vf_dev_ops_init(struct idpf_adapter *adapter)
{
idpf_vf_reg_ops_init(adapter);
+
+ adapter->dev_ops.idc_init = idpf_idc_vf_register;
+
+ resource_set_range(&adapter->dev_ops.static_reg_info[0],
+ VF_BASE, IDPF_VF_MBX_REGION_SZ);
+ resource_set_range(&adapter->dev_ops.static_reg_info[1],
+ VFGEN_RSTAT, IDPF_VF_RSTAT_REGION_SZ);
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index 15c00a01f1c0..44cd4b466c48 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -1,92 +1,12 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2023 Intel Corporation */
+#include <linux/export.h>
#include <net/libeth/rx.h>
#include "idpf.h"
#include "idpf_virtchnl.h"
-
-#define IDPF_VC_XN_MIN_TIMEOUT_MSEC 2000
-#define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000)
-#define IDPF_VC_XN_IDX_M GENMASK(7, 0)
-#define IDPF_VC_XN_SALT_M GENMASK(15, 8)
-#define IDPF_VC_XN_RING_LEN U8_MAX
-
-/**
- * enum idpf_vc_xn_state - Virtchnl transaction status
- * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used
- * @IDPF_VC_XN_WAITING: expecting a reply, not yet received
- * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received,
- * buffer updated
- * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there
- * was an error, buffer not updated
- * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down
- * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the
- * return context; a callback may be provided to handle
- * return
- */
-enum idpf_vc_xn_state {
- IDPF_VC_XN_IDLE = 1,
- IDPF_VC_XN_WAITING,
- IDPF_VC_XN_COMPLETED_SUCCESS,
- IDPF_VC_XN_COMPLETED_FAILED,
- IDPF_VC_XN_SHUTDOWN,
- IDPF_VC_XN_ASYNC,
-};
-
-struct idpf_vc_xn;
-/* Callback for asynchronous messages */
-typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *,
- const struct idpf_ctlq_msg *);
-
-/**
- * struct idpf_vc_xn - Data structure representing virtchnl transactions
- * @completed: virtchnl event loop uses that to signal when a reply is
- * available, uses kernel completion API
- * @state: virtchnl event loop stores the data below, protected by the
- * completion's lock.
- * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be
- * truncated on its way to the receiver thread according to
- * reply_buf.iov_len.
- * @reply: Reference to the buffer(s) where the reply data should be written
- * to. May be 0-length (then NULL address permitted) if the reply data
- * should be ignored.
- * @async_handler: if sent asynchronously, a callback can be provided to handle
- * the reply when it's received
- * @vc_op: corresponding opcode sent with this transaction
- * @idx: index used as retrieval on reply receive, used for cookie
- * @salt: changed every message to make unique, used for cookie
- */
-struct idpf_vc_xn {
- struct completion completed;
- enum idpf_vc_xn_state state;
- size_t reply_sz;
- struct kvec reply;
- async_vc_cb async_handler;
- u32 vc_op;
- u8 idx;
- u8 salt;
-};
-
-/**
- * struct idpf_vc_xn_params - Parameters for executing transaction
- * @send_buf: kvec for send buffer
- * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length
- * @timeout_ms: timeout to wait for reply
- * @async: send message asynchronously, will not wait on completion
- * @async_handler: If sent asynchronously, optional callback handler. The user
- * must be careful when using async handlers as the memory for
- * the recv_buf _cannot_ be on stack if this is async.
- * @vc_op: virtchnl op to send
- */
-struct idpf_vc_xn_params {
- struct kvec send_buf;
- struct kvec recv_buf;
- int timeout_ms;
- bool async;
- async_vc_cb async_handler;
- u32 vc_op;
-};
+#include "idpf_ptp.h"
/**
* struct idpf_vc_xn_manager - Manager for tracking transactions
@@ -141,14 +61,14 @@ static void idpf_handle_event_link(struct idpf_adapter *adapter,
}
np = netdev_priv(vport->netdev);
- vport->link_speed_mbps = le32_to_cpu(v2e->link_speed);
+ np->link_speed_mbps = le32_to_cpu(v2e->link_speed);
if (vport->link_up == v2e->link_status)
return;
vport->link_up = v2e->link_status;
- if (np->state != __IDPF_VPORT_UP)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
return;
if (vport->link_up) {
@@ -235,6 +155,55 @@ err_kfree:
return err;
}
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+/**
+ * idpf_ptp_is_mb_msg - Check if the message is PTP-related
+ * @op: virtchnl opcode
+ *
+ * Return: true if msg is PTP-related, false otherwise.
+ */
+static bool idpf_ptp_is_mb_msg(u32 op)
+{
+ switch (op) {
+ case VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME:
+ case VIRTCHNL2_OP_PTP_GET_CROSS_TIME:
+ case VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME:
+ case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE:
+ case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME:
+ case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS:
+ case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * idpf_prepare_ptp_mb_msg - Prepare PTP related message
+ *
+ * @adapter: Driver specific private structure
+ * @op: virtchnl opcode
+ * @ctlq_msg: Corresponding control queue message
+ */
+static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
+ struct idpf_ctlq_msg *ctlq_msg)
+{
+ /* If the message is PTP-related and the secondary mailbox is available,
+ * send the message through the secondary mailbox.
+ */
+ if (!idpf_ptp_is_mb_msg(op) || !adapter->ptp->secondary_mbx.valid)
+ return;
+
+ ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_peer_drv;
+ ctlq_msg->func_id = adapter->ptp->secondary_mbx.peer_mbx_q_id;
+ ctlq_msg->host_id = adapter->ptp->secondary_mbx.peer_id;
+}
+#else /* !CONFIG_PTP_1588_CLOCK */
+static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
+ struct idpf_ctlq_msg *ctlq_msg)
+{ }
+#endif /* CONFIG_PTP_1588_CLOCK */
+
/**
* idpf_send_mb_msg - Send message over mailbox
* @adapter: Driver specific private structure
@@ -278,6 +247,9 @@ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp;
ctlq_msg->func_id = 0;
+
+ idpf_prepare_ptp_mb_msg(adapter, op, ctlq_msg);
+
ctlq_msg->data_len = msg_size;
ctlq_msg->cookie.mbx.chnl_opcode = op;
ctlq_msg->cookie.mbx.chnl_retval = 0;
@@ -376,7 +348,7 @@ static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr)
* All waiting threads will be woken-up and their transaction aborted. Further
* operations on that object will fail.
*/
-static void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)
+void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)
{
int i;
@@ -449,8 +421,8 @@ static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr,
* >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for
* error.
*/
-static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
- const struct idpf_vc_xn_params *params)
+ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
+ const struct idpf_vc_xn_params *params)
{
const struct kvec *send_buf = &params->send_buf;
struct idpf_vc_xn *xn;
@@ -517,8 +489,10 @@ static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
retval = -ENXIO;
goto only_unlock;
case IDPF_VC_XN_WAITING:
- dev_notice_ratelimited(&adapter->pdev->dev, "Transaction timed-out (op %d, %dms)\n",
- params->vc_op, params->timeout_ms);
+ dev_notice_ratelimited(&adapter->pdev->dev,
+ "Transaction timed-out (op:%d cookie:%04x vc_op:%d salt:%02x timeout:%dms)\n",
+ params->vc_op, cookie, xn->vc_op,
+ xn->salt, params->timeout_ms);
retval = -ETIME;
break;
case IDPF_VC_XN_COMPLETED_SUCCESS:
@@ -612,14 +586,16 @@ idpf_vc_xn_forward_reply(struct idpf_adapter *adapter,
return -EINVAL;
}
xn = &adapter->vcxn_mngr->ring[xn_idx];
+ idpf_vc_xn_lock(xn);
salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info);
if (xn->salt != salt) {
- dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (%02x != %02x)\n",
- xn->salt, salt);
+ dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (exp:%d@%02x(%d) != got:%d@%02x)\n",
+ xn->vc_op, xn->salt, xn->state,
+ ctlq_msg->cookie.mbx.chnl_opcode, salt);
+ idpf_vc_xn_unlock(xn);
return -EINVAL;
}
- idpf_vc_xn_lock(xn);
switch (xn->state) {
case IDPF_VC_XN_WAITING:
/* success */
@@ -726,9 +702,9 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter)
/* If post failed clear the only buffer we supplied */
if (post_err) {
if (dma_mem)
- dmam_free_coherent(&adapter->pdev->dev,
- dma_mem->size, dma_mem->va,
- dma_mem->pa);
+ dma_free_coherent(&adapter->pdev->dev,
+ dma_mem->size, dma_mem->va,
+ dma_mem->pa);
break;
}
@@ -740,34 +716,145 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter)
return err;
}
+struct idpf_chunked_msg_params {
+ u32 (*prepare_msg)(const struct idpf_vport *vport,
+ void *buf, const void *pos,
+ u32 num);
+
+ const void *chunks;
+ u32 num_chunks;
+
+ u32 chunk_sz;
+ u32 config_sz;
+
+ u32 vc_op;
+};
+
+struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport, u32 num)
+{
+ struct idpf_queue_set *qp;
+
+ qp = kzalloc(struct_size(qp, qs, num), GFP_KERNEL);
+ if (!qp)
+ return NULL;
+
+ qp->vport = vport;
+ qp->num = num;
+
+ return qp;
+}
+
/**
- * idpf_wait_for_marker_event - wait for software marker response
+ * idpf_send_chunked_msg - send VC message consisting of chunks
* @vport: virtual port data structure
+ * @params: message params
*
- * Returns 0 success, negative on failure.
- **/
-static int idpf_wait_for_marker_event(struct idpf_vport *vport)
+ * Helper function for preparing a message describing queues to be enabled
+ * or disabled.
+ *
+ * Return: the total size of the prepared message.
+ */
+static int idpf_send_chunked_msg(struct idpf_vport *vport,
+ const struct idpf_chunked_msg_params *params)
{
- int event;
- int i;
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = params->vc_op,
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ const void *pos = params->chunks;
+ u32 num_chunks, num_msgs, buf_sz;
+ void *buf __free(kfree) = NULL;
+ u32 totqs = params->num_chunks;
+
+ num_chunks = min(IDPF_NUM_CHUNKS_PER_MSG(params->config_sz,
+ params->chunk_sz), totqs);
+ num_msgs = DIV_ROUND_UP(totqs, num_chunks);
- for (i = 0; i < vport->num_txq; i++)
- idpf_queue_set(SW_MARKER, vport->txqs[i]);
+ buf_sz = params->config_sz + num_chunks * params->chunk_sz;
+ buf = kzalloc(buf_sz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- event = wait_event_timeout(vport->sw_marker_wq,
- test_and_clear_bit(IDPF_VPORT_SW_MARKER,
- vport->flags),
- msecs_to_jiffies(500));
+ xn_params.send_buf.iov_base = buf;
- for (i = 0; i < vport->num_txq; i++)
- idpf_queue_clear(POLL_MODE, vport->txqs[i]);
+ for (u32 i = 0; i < num_msgs; i++) {
+ ssize_t reply_sz;
- if (event)
- return 0;
+ memset(buf, 0, buf_sz);
+ xn_params.send_buf.iov_len = buf_sz;
+
+ if (params->prepare_msg(vport, buf, pos, num_chunks) != buf_sz)
+ return -EINVAL;
+
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+
+ pos += num_chunks * params->chunk_sz;
+ totqs -= num_chunks;
+
+ num_chunks = min(num_chunks, totqs);
+ buf_sz = params->config_sz + num_chunks * params->chunk_sz;
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_wait_for_marker_event_set - wait for software marker response for
+ * selected Tx queues
+ * @qs: set of the Tx queues
+ *
+ * Return: 0 success, -errno on failure.
+ */
+static int idpf_wait_for_marker_event_set(const struct idpf_queue_set *qs)
+{
+ struct idpf_tx_queue *txq;
+ bool markers_rcvd = true;
+
+ for (u32 i = 0; i < qs->num; i++) {
+ switch (qs->qs[i].type) {
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ txq = qs->qs[i].txq;
+
+ idpf_queue_set(SW_MARKER, txq);
+ idpf_wait_for_sw_marker_completion(txq);
+ markers_rcvd &= !idpf_queue_has(SW_MARKER, txq);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (!markers_rcvd) {
+ netdev_warn(qs->vport->netdev,
+ "Failed to receive marker packets\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_wait_for_marker_event - wait for software marker response
+ * @vport: virtual port data structure
+ *
+ * Return: 0 success, negative on failure.
+ **/
+static int idpf_wait_for_marker_event(struct idpf_vport *vport)
+{
+ struct idpf_queue_set *qs __free(kfree) = NULL;
+
+ qs = idpf_alloc_queue_set(vport, vport->num_txq);
+ if (!qs)
+ return -ENOMEM;
- dev_warn(&vport->adapter->pdev->dev, "Failed to receive marker packets\n");
+ for (u32 i = 0; i < qs->num; i++) {
+ qs->qs[i].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ qs->qs[i].txq = vport->txqs[i];
+ }
- return -ETIMEDOUT;
+ return idpf_wait_for_marker_event_set(qs);
}
/**
@@ -874,14 +961,14 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL);
caps.rss_caps =
- cpu_to_le64(VIRTCHNL2_CAP_RSS_IPV4_TCP |
- VIRTCHNL2_CAP_RSS_IPV4_UDP |
- VIRTCHNL2_CAP_RSS_IPV4_SCTP |
- VIRTCHNL2_CAP_RSS_IPV4_OTHER |
- VIRTCHNL2_CAP_RSS_IPV6_TCP |
- VIRTCHNL2_CAP_RSS_IPV6_UDP |
- VIRTCHNL2_CAP_RSS_IPV6_SCTP |
- VIRTCHNL2_CAP_RSS_IPV6_OTHER);
+ cpu_to_le64(VIRTCHNL2_FLOW_IPV4_TCP |
+ VIRTCHNL2_FLOW_IPV4_UDP |
+ VIRTCHNL2_FLOW_IPV4_SCTP |
+ VIRTCHNL2_FLOW_IPV4_OTHER |
+ VIRTCHNL2_FLOW_IPV6_TCP |
+ VIRTCHNL2_FLOW_IPV6_UDP |
+ VIRTCHNL2_FLOW_IPV6_SCTP |
+ VIRTCHNL2_FLOW_IPV6_OTHER);
caps.hsplit_caps =
cpu_to_le32(VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 |
@@ -893,10 +980,13 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
caps.other_caps =
cpu_to_le64(VIRTCHNL2_CAP_SRIOV |
+ VIRTCHNL2_CAP_RDMA |
+ VIRTCHNL2_CAP_LAN_MEMORY_REGIONS |
VIRTCHNL2_CAP_MACFILTER |
VIRTCHNL2_CAP_SPLITQ_QSCHED |
VIRTCHNL2_CAP_PROMISC |
- VIRTCHNL2_CAP_LOOPBACK);
+ VIRTCHNL2_CAP_LOOPBACK |
+ VIRTCHNL2_CAP_PTP);
xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS;
xn_params.send_buf.iov_base = &caps;
@@ -915,6 +1005,163 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
}
/**
+ * idpf_send_get_lan_memory_regions - Send virtchnl get LAN memory regions msg
+ * @adapter: Driver specific private struct
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_send_get_lan_memory_regions(struct idpf_adapter *adapter)
+{
+ struct virtchnl2_get_lan_memory_regions *rcvd_regions __free(kfree);
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS,
+ .recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN,
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ int num_regions, size;
+ struct idpf_hw *hw;
+ ssize_t reply_sz;
+ int err = 0;
+
+ rcvd_regions = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!rcvd_regions)
+ return -ENOMEM;
+
+ xn_params.recv_buf.iov_base = rcvd_regions;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+
+ num_regions = le16_to_cpu(rcvd_regions->num_memory_regions);
+ size = struct_size(rcvd_regions, mem_reg, num_regions);
+ if (reply_sz < size)
+ return -EIO;
+
+ if (size > IDPF_CTLQ_MAX_BUF_LEN)
+ return -EINVAL;
+
+ hw = &adapter->hw;
+ hw->lan_regs = kcalloc(num_regions, sizeof(*hw->lan_regs), GFP_KERNEL);
+ if (!hw->lan_regs)
+ return -ENOMEM;
+
+ for (int i = 0; i < num_regions; i++) {
+ hw->lan_regs[i].addr_len =
+ le64_to_cpu(rcvd_regions->mem_reg[i].size);
+ hw->lan_regs[i].addr_start =
+ le64_to_cpu(rcvd_regions->mem_reg[i].start_offset);
+ }
+ hw->num_lan_regs = num_regions;
+
+ return err;
+}
+
+/**
+ * idpf_calc_remaining_mmio_regs - calculate MMIO regions outside mbx and rstat
+ * @adapter: Driver specific private structure
+ *
+ * Called when idpf_send_get_lan_memory_regions is not supported. This will
+ * calculate the offsets and sizes for the regions before, in between, and
+ * after the mailbox and rstat MMIO mappings.
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_calc_remaining_mmio_regs(struct idpf_adapter *adapter)
+{
+ struct resource *rstat_reg = &adapter->dev_ops.static_reg_info[1];
+ struct resource *mbx_reg = &adapter->dev_ops.static_reg_info[0];
+ struct idpf_hw *hw = &adapter->hw;
+
+ hw->num_lan_regs = IDPF_MMIO_MAP_FALLBACK_MAX_REMAINING;
+ hw->lan_regs = kcalloc(hw->num_lan_regs, sizeof(*hw->lan_regs),
+ GFP_KERNEL);
+ if (!hw->lan_regs)
+ return -ENOMEM;
+
+ /* Region preceding mailbox */
+ hw->lan_regs[0].addr_start = 0;
+ hw->lan_regs[0].addr_len = mbx_reg->start;
+ /* Region between mailbox and rstat */
+ hw->lan_regs[1].addr_start = mbx_reg->end + 1;
+ hw->lan_regs[1].addr_len = rstat_reg->start -
+ hw->lan_regs[1].addr_start;
+ /* Region after rstat */
+ hw->lan_regs[2].addr_start = rstat_reg->end + 1;
+ hw->lan_regs[2].addr_len = pci_resource_len(adapter->pdev, 0) -
+ hw->lan_regs[2].addr_start;
+
+ return 0;
+}
+
+/**
+ * idpf_map_lan_mmio_regs - map remaining LAN BAR regions
+ * @adapter: Driver specific private structure
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_map_lan_mmio_regs(struct idpf_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct idpf_hw *hw = &adapter->hw;
+ resource_size_t res_start;
+
+ res_start = pci_resource_start(pdev, 0);
+
+ for (int i = 0; i < hw->num_lan_regs; i++) {
+ resource_size_t start;
+ long len;
+
+ len = hw->lan_regs[i].addr_len;
+ if (!len)
+ continue;
+ start = hw->lan_regs[i].addr_start + res_start;
+
+ hw->lan_regs[i].vaddr = devm_ioremap(&pdev->dev, start, len);
+ if (!hw->lan_regs[i].vaddr) {
+ pci_err(pdev, "failed to allocate BAR0 region\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_add_del_fsteer_filters - Send virtchnl add/del Flow Steering message
+ * @adapter: adapter info struct
+ * @rule: Flow steering rule to add/delete
+ * @opcode: VIRTCHNL2_OP_ADD_FLOW_RULE to add filter, or
+ * VIRTCHNL2_OP_DEL_FLOW_RULE to delete. All other values are invalid.
+ *
+ * Send ADD/DELETE flow steering virtchnl message and receive the result.
+ *
+ * Return: 0 on success, negative on failure.
+ */
+int idpf_add_del_fsteer_filters(struct idpf_adapter *adapter,
+ struct virtchnl2_flow_rule_add_del *rule,
+ enum virtchnl2_op opcode)
+{
+ int rule_count = le32_to_cpu(rule->count);
+ struct idpf_vc_xn_params xn_params = {};
+ ssize_t reply_sz;
+
+ if (opcode != VIRTCHNL2_OP_ADD_FLOW_RULE &&
+ opcode != VIRTCHNL2_OP_DEL_FLOW_RULE)
+ return -EINVAL;
+
+ xn_params.vc_op = opcode;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.async = false;
+ xn_params.send_buf.iov_base = rule;
+ xn_params.send_buf.iov_len = struct_size(rule, rule_info, rule_count);
+ xn_params.recv_buf.iov_base = rule;
+ xn_params.recv_buf.iov_len = struct_size(rule, rule_info, rule_count);
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ return reply_sz < 0 ? reply_sz : 0;
+}
+
+/**
* idpf_vport_alloc_max_qs - Allocate max queues for a vport
* @adapter: Driver specific private structure
* @max_q: vport max queue structure
@@ -925,21 +1172,35 @@ int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues;
struct virtchnl2_get_capabilities *caps = &adapter->caps;
u16 default_vports = idpf_get_default_vports(adapter);
- int max_rx_q, max_tx_q;
+ u32 max_rx_q, max_tx_q, max_buf_q, max_compl_q;
mutex_lock(&adapter->queue_lock);
+ /* Caps are device-wide. Give each vport an equal piece */
max_rx_q = le16_to_cpu(caps->max_rx_q) / default_vports;
max_tx_q = le16_to_cpu(caps->max_tx_q) / default_vports;
- if (adapter->num_alloc_vports < default_vports) {
- max_q->max_rxq = min_t(u16, max_rx_q, IDPF_MAX_Q);
- max_q->max_txq = min_t(u16, max_tx_q, IDPF_MAX_Q);
- } else {
- max_q->max_rxq = IDPF_MIN_Q;
- max_q->max_txq = IDPF_MIN_Q;
+ max_buf_q = le16_to_cpu(caps->max_rx_bufq) / default_vports;
+ max_compl_q = le16_to_cpu(caps->max_tx_complq) / default_vports;
+
+ if (adapter->num_alloc_vports >= default_vports) {
+ max_rx_q = IDPF_MIN_Q;
+ max_tx_q = IDPF_MIN_Q;
}
- max_q->max_bufq = max_q->max_rxq * IDPF_MAX_BUFQS_PER_RXQ_GRP;
- max_q->max_complq = max_q->max_txq;
+
+ /*
+ * Harmonize the numbers. The current implementation always creates
+ * `IDPF_MAX_BUFQS_PER_RXQ_GRP` buffer queues for each Rx queue and
+ * one completion queue for each Tx queue for best performance.
+ * If less buffer or completion queues is available, cap the number
+ * of the corresponding Rx/Tx queues.
+ */
+ max_rx_q = min(max_rx_q, max_buf_q / IDPF_MAX_BUFQS_PER_RXQ_GRP);
+ max_tx_q = min(max_tx_q, max_compl_q);
+
+ max_q->max_rxq = max_rx_q;
+ max_q->max_txq = max_tx_q;
+ max_q->max_bufq = max_rx_q * IDPF_MAX_BUFQS_PER_RXQ_GRP;
+ max_q->max_complq = max_tx_q;
if (avail_queues->avail_rxq < max_q->max_rxq ||
avail_queues->avail_txq < max_q->max_txq ||
@@ -1370,7 +1631,7 @@ int idpf_send_destroy_vport_msg(struct idpf_vport *vport)
xn_params.vc_op = VIRTCHNL2_OP_DESTROY_VPORT;
xn_params.send_buf.iov_base = &v_id;
xn_params.send_buf.iov_len = sizeof(v_id);
- xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
return reply_sz < 0 ? reply_sz : 0;
@@ -1418,236 +1679,368 @@ int idpf_send_disable_vport_msg(struct idpf_vport *vport)
xn_params.vc_op = VIRTCHNL2_OP_DISABLE_VPORT;
xn_params.send_buf.iov_base = &v_id;
xn_params.send_buf.iov_len = sizeof(v_id);
- xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
return reply_sz < 0 ? reply_sz : 0;
}
/**
- * idpf_send_config_tx_queues_msg - Send virtchnl config tx queues message
+ * idpf_fill_txq_config_chunk - fill chunk describing the Tx queue
+ * @vport: virtual port data structure
+ * @q: Tx queue to be inserted into VC chunk
+ * @qi: pointer to the buffer containing the VC chunk
+ */
+static void idpf_fill_txq_config_chunk(const struct idpf_vport *vport,
+ const struct idpf_tx_queue *q,
+ struct virtchnl2_txq_info *qi)
+{
+ u32 val;
+
+ qi->queue_id = cpu_to_le32(q->q_id);
+ qi->model = cpu_to_le16(vport->txq_model);
+ qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
+ qi->ring_len = cpu_to_le16(q->desc_count);
+ qi->dma_ring_addr = cpu_to_le64(q->dma);
+ qi->relative_queue_id = cpu_to_le16(q->rel_q_id);
+
+ if (!idpf_is_queue_model_split(vport->txq_model)) {
+ qi->sched_mode = cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
+ return;
+ }
+
+ if (idpf_queue_has(XDP, q))
+ val = q->complq->q_id;
+ else
+ val = q->txq_grp->complq->q_id;
+
+ qi->tx_compl_queue_id = cpu_to_le16(val);
+
+ if (idpf_queue_has(FLOW_SCH_EN, q))
+ val = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+ else
+ val = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+
+ qi->sched_mode = cpu_to_le16(val);
+}
+
+/**
+ * idpf_fill_complq_config_chunk - fill chunk describing the completion queue
* @vport: virtual port data structure
+ * @q: completion queue to be inserted into VC chunk
+ * @qi: pointer to the buffer containing the VC chunk
+ */
+static void idpf_fill_complq_config_chunk(const struct idpf_vport *vport,
+ const struct idpf_compl_queue *q,
+ struct virtchnl2_txq_info *qi)
+{
+ u32 val;
+
+ qi->queue_id = cpu_to_le32(q->q_id);
+ qi->model = cpu_to_le16(vport->txq_model);
+ qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
+ qi->ring_len = cpu_to_le16(q->desc_count);
+ qi->dma_ring_addr = cpu_to_le64(q->dma);
+
+ if (idpf_queue_has(FLOW_SCH_EN, q))
+ val = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+ else
+ val = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+
+ qi->sched_mode = cpu_to_le16(val);
+}
+
+/**
+ * idpf_prepare_cfg_txqs_msg - prepare message to configure selected Tx queues
+ * @vport: virtual port data structure
+ * @buf: buffer containing the message
+ * @pos: pointer to the first chunk describing the tx queue
+ * @num_chunks: number of chunks in the message
*
- * Send config tx queues virtchnl message. Returns 0 on success, negative on
- * failure.
+ * Helper function for preparing the message describing configuration of
+ * Tx queues.
+ *
+ * Return: the total size of the prepared message.
*/
-static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
+static u32 idpf_prepare_cfg_txqs_msg(const struct idpf_vport *vport,
+ void *buf, const void *pos,
+ u32 num_chunks)
+{
+ struct virtchnl2_config_tx_queues *ctq = buf;
+
+ ctq->vport_id = cpu_to_le32(vport->vport_id);
+ ctq->num_qinfo = cpu_to_le16(num_chunks);
+ memcpy(ctq->qinfo, pos, num_chunks * sizeof(*ctq->qinfo));
+
+ return struct_size(ctq, qinfo, num_chunks);
+}
+
+/**
+ * idpf_send_config_tx_queue_set_msg - send virtchnl config Tx queues
+ * message for selected queues
+ * @qs: set of the Tx queues to configure
+ *
+ * Send config queues virtchnl message for queues contained in the @qs array.
+ * The @qs array can contain Tx queues (or completion queues) only.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int idpf_send_config_tx_queue_set_msg(const struct idpf_queue_set *qs)
{
- struct virtchnl2_config_tx_queues *ctq __free(kfree) = NULL;
struct virtchnl2_txq_info *qi __free(kfree) = NULL;
- struct idpf_vc_xn_params xn_params = {};
- u32 config_sz, chunk_sz, buf_sz;
- int totqs, num_msgs, num_chunks;
- ssize_t reply_sz;
- int i, k = 0;
+ struct idpf_chunked_msg_params params = {
+ .vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES,
+ .prepare_msg = idpf_prepare_cfg_txqs_msg,
+ .config_sz = sizeof(struct virtchnl2_config_tx_queues),
+ .chunk_sz = sizeof(*qi),
+ };
- totqs = vport->num_txq + vport->num_complq;
- qi = kcalloc(totqs, sizeof(struct virtchnl2_txq_info), GFP_KERNEL);
+ qi = kcalloc(qs->num, sizeof(*qi), GFP_KERNEL);
if (!qi)
return -ENOMEM;
- /* Populate the queue info buffer with all queue context info */
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
- int j, sched_mode;
-
- for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
- qi[k].queue_id =
- cpu_to_le32(tx_qgrp->txqs[j]->q_id);
- qi[k].model =
- cpu_to_le16(vport->txq_model);
- qi[k].type =
- cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
- qi[k].ring_len =
- cpu_to_le16(tx_qgrp->txqs[j]->desc_count);
- qi[k].dma_ring_addr =
- cpu_to_le64(tx_qgrp->txqs[j]->dma);
- if (idpf_is_queue_model_split(vport->txq_model)) {
- struct idpf_tx_queue *q = tx_qgrp->txqs[j];
-
- qi[k].tx_compl_queue_id =
- cpu_to_le16(tx_qgrp->complq->q_id);
- qi[k].relative_queue_id = cpu_to_le16(j);
-
- if (idpf_queue_has(FLOW_SCH_EN, q))
- qi[k].sched_mode =
- cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_FLOW);
- else
- qi[k].sched_mode =
- cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
- } else {
- qi[k].sched_mode =
- cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
- }
- }
+ params.chunks = qi;
- if (!idpf_is_queue_model_split(vport->txq_model))
- continue;
+ for (u32 i = 0; i < qs->num; i++) {
+ if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX)
+ idpf_fill_txq_config_chunk(qs->vport, qs->qs[i].txq,
+ &qi[params.num_chunks++]);
+ else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION)
+ idpf_fill_complq_config_chunk(qs->vport,
+ qs->qs[i].complq,
+ &qi[params.num_chunks++]);
+ }
- qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
- qi[k].model = cpu_to_le16(vport->txq_model);
- qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
- qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count);
- qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma);
+ return idpf_send_chunked_msg(qs->vport, &params);
+}
- if (idpf_queue_has(FLOW_SCH_EN, tx_qgrp->complq))
- sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
- else
- sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
- qi[k].sched_mode = cpu_to_le16(sched_mode);
+/**
+ * idpf_send_config_tx_queues_msg - send virtchnl config Tx queues message
+ * @vport: virtual port data structure
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
+{
+ struct idpf_queue_set *qs __free(kfree) = NULL;
+ u32 totqs = vport->num_txq + vport->num_complq;
+ u32 k = 0;
+
+ qs = idpf_alloc_queue_set(vport, totqs);
+ if (!qs)
+ return -ENOMEM;
+
+ /* Populate the queue info buffer with all queue context info */
+ for (u32 i = 0; i < vport->num_txq_grp; i++) {
+ const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+
+ for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ qs->qs[k++].txq = tx_qgrp->txqs[j];
+ }
- k++;
+ if (idpf_is_queue_model_split(vport->txq_model)) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ qs->qs[k++].complq = tx_qgrp->complq;
+ }
}
/* Make sure accounting agrees */
if (k != totqs)
return -EINVAL;
- /* Chunk up the queue contexts into multiple messages to avoid
- * sending a control queue message buffer that is too large
- */
- config_sz = sizeof(struct virtchnl2_config_tx_queues);
- chunk_sz = sizeof(struct virtchnl2_txq_info);
+ return idpf_send_config_tx_queue_set_msg(qs);
+}
- num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
- totqs);
- num_msgs = DIV_ROUND_UP(totqs, num_chunks);
+/**
+ * idpf_fill_rxq_config_chunk - fill chunk describing the Rx queue
+ * @vport: virtual port data structure
+ * @q: Rx queue to be inserted into VC chunk
+ * @qi: pointer to the buffer containing the VC chunk
+ */
+static void idpf_fill_rxq_config_chunk(const struct idpf_vport *vport,
+ struct idpf_rx_queue *q,
+ struct virtchnl2_rxq_info *qi)
+{
+ const struct idpf_bufq_set *sets;
+
+ qi->queue_id = cpu_to_le32(q->q_id);
+ qi->model = cpu_to_le16(vport->rxq_model);
+ qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
+ qi->ring_len = cpu_to_le16(q->desc_count);
+ qi->dma_ring_addr = cpu_to_le64(q->dma);
+ qi->max_pkt_size = cpu_to_le32(q->rx_max_pkt_size);
+ qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark);
+ qi->qflags = cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE);
+ if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
+ qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
+
+ if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
+ qi->desc_ids = cpu_to_le64(q->rxdids);
- buf_sz = struct_size(ctq, qinfo, num_chunks);
- ctq = kzalloc(buf_sz, GFP_KERNEL);
- if (!ctq)
- return -ENOMEM;
+ return;
+ }
- xn_params.vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
- xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ sets = q->bufq_sets;
- for (i = 0, k = 0; i < num_msgs; i++) {
- memset(ctq, 0, buf_sz);
- ctq->vport_id = cpu_to_le32(vport->vport_id);
- ctq->num_qinfo = cpu_to_le16(num_chunks);
- memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks);
+ /*
+ * In splitq mode, RxQ buffer size should be set to that of the first
+ * buffer queue associated with this RxQ.
+ */
+ q->rx_buf_size = sets[0].bufq.rx_buf_size;
+ qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
- xn_params.send_buf.iov_base = ctq;
- xn_params.send_buf.iov_len = buf_sz;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- if (reply_sz < 0)
- return reply_sz;
+ qi->rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id);
+ if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
+ qi->bufq2_ena = IDPF_BUFQ2_ENA;
+ qi->rx_bufq2_id = cpu_to_le16(sets[1].bufq.q_id);
+ }
- k += num_chunks;
- totqs -= num_chunks;
- num_chunks = min(num_chunks, totqs);
- /* Recalculate buffer size */
- buf_sz = struct_size(ctq, qinfo, num_chunks);
+ q->rx_hbuf_size = sets[0].bufq.rx_hbuf_size;
+
+ if (idpf_queue_has(HSPLIT_EN, q)) {
+ qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
+ qi->hdr_buffer_size = cpu_to_le16(q->rx_hbuf_size);
}
- return 0;
+ qi->desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
}
/**
- * idpf_send_config_rx_queues_msg - Send virtchnl config rx queues message
+ * idpf_fill_bufq_config_chunk - fill chunk describing the buffer queue
* @vport: virtual port data structure
+ * @q: buffer queue to be inserted into VC chunk
+ * @qi: pointer to the buffer containing the VC chunk
+ */
+static void idpf_fill_bufq_config_chunk(const struct idpf_vport *vport,
+ const struct idpf_buf_queue *q,
+ struct virtchnl2_rxq_info *qi)
+{
+ qi->queue_id = cpu_to_le32(q->q_id);
+ qi->model = cpu_to_le16(vport->rxq_model);
+ qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
+ qi->ring_len = cpu_to_le16(q->desc_count);
+ qi->dma_ring_addr = cpu_to_le64(q->dma);
+ qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
+ qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark);
+ qi->desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
+ qi->buffer_notif_stride = IDPF_RX_BUF_STRIDE;
+ if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
+ qi->qflags = cpu_to_le16(VIRTCHNL2_RXQ_RSC);
+
+ if (idpf_queue_has(HSPLIT_EN, q)) {
+ qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
+ qi->hdr_buffer_size = cpu_to_le16(q->rx_hbuf_size);
+ }
+}
+
+/**
+ * idpf_prepare_cfg_rxqs_msg - prepare message to configure selected Rx queues
+ * @vport: virtual port data structure
+ * @buf: buffer containing the message
+ * @pos: pointer to the first chunk describing the rx queue
+ * @num_chunks: number of chunks in the message
*
- * Send config rx queues virtchnl message. Returns 0 on success, negative on
- * failure.
+ * Helper function for preparing the message describing configuration of
+ * Rx queues.
+ *
+ * Return: the total size of the prepared message.
*/
-static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
+static u32 idpf_prepare_cfg_rxqs_msg(const struct idpf_vport *vport,
+ void *buf, const void *pos,
+ u32 num_chunks)
+{
+ struct virtchnl2_config_rx_queues *crq = buf;
+
+ crq->vport_id = cpu_to_le32(vport->vport_id);
+ crq->num_qinfo = cpu_to_le16(num_chunks);
+ memcpy(crq->qinfo, pos, num_chunks * sizeof(*crq->qinfo));
+
+ return struct_size(crq, qinfo, num_chunks);
+}
+
+/**
+ * idpf_send_config_rx_queue_set_msg - send virtchnl config Rx queues message
+ * for selected queues.
+ * @qs: set of the Rx queues to configure
+ *
+ * Send config queues virtchnl message for queues contained in the @qs array.
+ * The @qs array can contain Rx queues (or buffer queues) only.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int idpf_send_config_rx_queue_set_msg(const struct idpf_queue_set *qs)
{
- struct virtchnl2_config_rx_queues *crq __free(kfree) = NULL;
struct virtchnl2_rxq_info *qi __free(kfree) = NULL;
- struct idpf_vc_xn_params xn_params = {};
- u32 config_sz, chunk_sz, buf_sz;
- int totqs, num_msgs, num_chunks;
- ssize_t reply_sz;
- int i, k = 0;
+ struct idpf_chunked_msg_params params = {
+ .vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES,
+ .prepare_msg = idpf_prepare_cfg_rxqs_msg,
+ .config_sz = sizeof(struct virtchnl2_config_rx_queues),
+ .chunk_sz = sizeof(*qi),
+ };
- totqs = vport->num_rxq + vport->num_bufq;
- qi = kcalloc(totqs, sizeof(struct virtchnl2_rxq_info), GFP_KERNEL);
+ qi = kcalloc(qs->num, sizeof(*qi), GFP_KERNEL);
if (!qi)
return -ENOMEM;
- /* Populate the queue info buffer with all queue context info */
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
- u16 num_rxq;
- int j;
-
- if (!idpf_is_queue_model_split(vport->rxq_model))
- goto setup_rxqs;
-
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
- struct idpf_buf_queue *bufq =
- &rx_qgrp->splitq.bufq_sets[j].bufq;
-
- qi[k].queue_id = cpu_to_le32(bufq->q_id);
- qi[k].model = cpu_to_le16(vport->rxq_model);
- qi[k].type =
- cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
- qi[k].desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
- qi[k].ring_len = cpu_to_le16(bufq->desc_count);
- qi[k].dma_ring_addr = cpu_to_le64(bufq->dma);
- qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size);
- qi[k].buffer_notif_stride = IDPF_RX_BUF_STRIDE;
- qi[k].rx_buffer_low_watermark =
- cpu_to_le16(bufq->rx_buffer_low_watermark);
- if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
- qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
- }
+ params.chunks = qi;
-setup_rxqs:
- if (idpf_is_queue_model_split(vport->rxq_model))
- num_rxq = rx_qgrp->splitq.num_rxq_sets;
- else
- num_rxq = rx_qgrp->singleq.num_rxq;
+ for (u32 i = 0; i < qs->num; i++) {
+ if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX)
+ idpf_fill_rxq_config_chunk(qs->vport, qs->qs[i].rxq,
+ &qi[params.num_chunks++]);
+ else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX_BUFFER)
+ idpf_fill_bufq_config_chunk(qs->vport, qs->qs[i].bufq,
+ &qi[params.num_chunks++]);
+ }
- for (j = 0; j < num_rxq; j++, k++) {
- const struct idpf_bufq_set *sets;
- struct idpf_rx_queue *rxq;
+ return idpf_send_chunked_msg(qs->vport, &params);
+}
- if (!idpf_is_queue_model_split(vport->rxq_model)) {
- rxq = rx_qgrp->singleq.rxqs[j];
- goto common_qi_fields;
- }
+/**
+ * idpf_send_config_rx_queues_msg - send virtchnl config Rx queues message
+ * @vport: virtual port data structure
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
+{
+ bool splitq = idpf_is_queue_model_split(vport->rxq_model);
+ struct idpf_queue_set *qs __free(kfree) = NULL;
+ u32 totqs = vport->num_rxq + vport->num_bufq;
+ u32 k = 0;
- rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
- sets = rxq->bufq_sets;
+ qs = idpf_alloc_queue_set(vport, totqs);
+ if (!qs)
+ return -ENOMEM;
- /* In splitq mode, RXQ buffer size should be
- * set to that of the first buffer queue
- * associated with this RXQ.
- */
- rxq->rx_buf_size = sets[0].bufq.rx_buf_size;
+ /* Populate the queue info buffer with all queue context info */
+ for (u32 i = 0; i < vport->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u32 num_rxq;
- qi[k].rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id);
- if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
- qi[k].bufq2_ena = IDPF_BUFQ2_ENA;
- qi[k].rx_bufq2_id =
- cpu_to_le16(sets[1].bufq.q_id);
- }
- qi[k].rx_buffer_low_watermark =
- cpu_to_le16(rxq->rx_buffer_low_watermark);
- if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
- qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
-
- rxq->rx_hbuf_size = sets[0].bufq.rx_hbuf_size;
-
- if (idpf_queue_has(HSPLIT_EN, rxq)) {
- qi[k].qflags |=
- cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
- qi[k].hdr_buffer_size =
- cpu_to_le16(rxq->rx_hbuf_size);
- }
+ if (!splitq) {
+ num_rxq = rx_qgrp->singleq.num_rxq;
+ goto rxq;
+ }
+
+ for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
+ }
+
+ num_rxq = rx_qgrp->splitq.num_rxq_sets;
-common_qi_fields:
- qi[k].queue_id = cpu_to_le32(rxq->q_id);
- qi[k].model = cpu_to_le16(vport->rxq_model);
- qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
- qi[k].ring_len = cpu_to_le16(rxq->desc_count);
- qi[k].dma_ring_addr = cpu_to_le64(rxq->dma);
- qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size);
- qi[k].data_buffer_size = cpu_to_le32(rxq->rx_buf_size);
- qi[k].qflags |=
- cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE);
- qi[k].desc_ids = cpu_to_le64(rxq->rxdids);
+rxq:
+ for (u32 j = 0; j < num_rxq; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX;
+
+ if (splitq)
+ qs->qs[k++].rxq =
+ &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ else
+ qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j];
}
}
@@ -1655,317 +2048,395 @@ common_qi_fields:
if (k != totqs)
return -EINVAL;
- /* Chunk up the queue contexts into multiple messages to avoid
- * sending a control queue message buffer that is too large
- */
- config_sz = sizeof(struct virtchnl2_config_rx_queues);
- chunk_sz = sizeof(struct virtchnl2_rxq_info);
+ return idpf_send_config_rx_queue_set_msg(qs);
+}
- num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
- totqs);
- num_msgs = DIV_ROUND_UP(totqs, num_chunks);
+/**
+ * idpf_prepare_ena_dis_qs_msg - prepare message to enable/disable selected
+ * queues
+ * @vport: virtual port data structure
+ * @buf: buffer containing the message
+ * @pos: pointer to the first chunk describing the queue
+ * @num_chunks: number of chunks in the message
+ *
+ * Helper function for preparing the message describing queues to be enabled
+ * or disabled.
+ *
+ * Return: the total size of the prepared message.
+ */
+static u32 idpf_prepare_ena_dis_qs_msg(const struct idpf_vport *vport,
+ void *buf, const void *pos,
+ u32 num_chunks)
+{
+ struct virtchnl2_del_ena_dis_queues *eq = buf;
+
+ eq->vport_id = cpu_to_le32(vport->vport_id);
+ eq->chunks.num_chunks = cpu_to_le16(num_chunks);
+ memcpy(eq->chunks.chunks, pos,
+ num_chunks * sizeof(*eq->chunks.chunks));
+
+ return struct_size(eq, chunks.chunks, num_chunks);
+}
+
+/**
+ * idpf_send_ena_dis_queue_set_msg - send virtchnl enable or disable queues
+ * message for selected queues
+ * @qs: set of the queues to enable or disable
+ * @en: whether to enable or disable queues
+ *
+ * Send enable or disable queues virtchnl message for queues contained
+ * in the @qs array.
+ * The @qs array can contain pointers to both Rx and Tx queues.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int idpf_send_ena_dis_queue_set_msg(const struct idpf_queue_set *qs,
+ bool en)
+{
+ struct virtchnl2_queue_chunk *qc __free(kfree) = NULL;
+ struct idpf_chunked_msg_params params = {
+ .vc_op = en ? VIRTCHNL2_OP_ENABLE_QUEUES :
+ VIRTCHNL2_OP_DISABLE_QUEUES,
+ .prepare_msg = idpf_prepare_ena_dis_qs_msg,
+ .config_sz = sizeof(struct virtchnl2_del_ena_dis_queues),
+ .chunk_sz = sizeof(*qc),
+ .num_chunks = qs->num,
+ };
- buf_sz = struct_size(crq, qinfo, num_chunks);
- crq = kzalloc(buf_sz, GFP_KERNEL);
- if (!crq)
+ qc = kcalloc(qs->num, sizeof(*qc), GFP_KERNEL);
+ if (!qc)
return -ENOMEM;
- xn_params.vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
- xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ params.chunks = qc;
- for (i = 0, k = 0; i < num_msgs; i++) {
- memset(crq, 0, buf_sz);
- crq->vport_id = cpu_to_le32(vport->vport_id);
- crq->num_qinfo = cpu_to_le16(num_chunks);
- memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks);
+ for (u32 i = 0; i < qs->num; i++) {
+ const struct idpf_queue_ptr *q = &qs->qs[i];
+ u32 qid;
- xn_params.send_buf.iov_base = crq;
- xn_params.send_buf.iov_len = buf_sz;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- if (reply_sz < 0)
- return reply_sz;
+ qc[i].type = cpu_to_le32(q->type);
+ qc[i].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
- k += num_chunks;
- totqs -= num_chunks;
- num_chunks = min(num_chunks, totqs);
- /* Recalculate buffer size */
- buf_sz = struct_size(crq, qinfo, num_chunks);
+ switch (q->type) {
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ qid = q->rxq->q_id;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ qid = q->txq->q_id;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ qid = q->bufq->q_id;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ qid = q->complq->q_id;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ qc[i].start_queue_id = cpu_to_le32(qid);
}
- return 0;
+ return idpf_send_chunked_msg(qs->vport, &params);
}
/**
- * idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable
- * queues message
+ * idpf_send_ena_dis_queues_msg - send virtchnl enable or disable queues
+ * message
* @vport: virtual port data structure
- * @ena: if true enable, false disable
+ * @en: whether to enable or disable queues
*
- * Send enable or disable queues virtchnl message. Returns 0 on success,
- * negative on failure.
+ * Return: 0 on success, -errno on failure.
*/
-static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena)
+static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool en)
{
- struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
- struct virtchnl2_queue_chunk *qc __free(kfree) = NULL;
- u32 num_msgs, num_chunks, num_txq, num_rxq, num_q;
- struct idpf_vc_xn_params xn_params = {};
- struct virtchnl2_queue_chunks *qcs;
- u32 config_sz, chunk_sz, buf_sz;
- ssize_t reply_sz;
- int i, j, k = 0;
+ struct idpf_queue_set *qs __free(kfree) = NULL;
+ u32 num_txq, num_q, k = 0;
+ bool split;
num_txq = vport->num_txq + vport->num_complq;
- num_rxq = vport->num_rxq + vport->num_bufq;
- num_q = num_txq + num_rxq;
- buf_sz = sizeof(struct virtchnl2_queue_chunk) * num_q;
- qc = kzalloc(buf_sz, GFP_KERNEL);
- if (!qc)
+ num_q = num_txq + vport->num_rxq + vport->num_bufq;
+
+ qs = idpf_alloc_queue_set(vport, num_q);
+ if (!qs)
return -ENOMEM;
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ split = idpf_is_queue_model_split(vport->txq_model);
- for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
- qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
- qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
- qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
- }
- }
- if (vport->num_txq != k)
- return -EINVAL;
+ for (u32 i = 0; i < vport->num_txq_grp; i++) {
+ const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
- if (!idpf_is_queue_model_split(vport->txq_model))
- goto setup_rx;
+ for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ qs->qs[k++].txq = tx_qgrp->txqs[j];
+ }
- for (i = 0; i < vport->num_txq_grp; i++, k++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ if (!split)
+ continue;
- qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
- qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
- qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ qs->qs[k++].complq = tx_qgrp->complq;
}
- if (vport->num_complq != (k - vport->num_txq))
+
+ if (k != num_txq)
return -EINVAL;
-setup_rx:
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ split = idpf_is_queue_model_split(vport->rxq_model);
- if (idpf_is_queue_model_split(vport->rxq_model))
+ for (u32 i = 0; i < vport->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u32 num_rxq;
+
+ if (split)
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
num_rxq = rx_qgrp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++, k++) {
- if (idpf_is_queue_model_split(vport->rxq_model)) {
- qc[k].start_queue_id =
- cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id);
- qc[k].type =
- cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
- } else {
- qc[k].start_queue_id =
- cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id);
- qc[k].type =
- cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
- }
- qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
- }
- }
- if (vport->num_rxq != k - (vport->num_txq + vport->num_complq))
- return -EINVAL;
-
- if (!idpf_is_queue_model_split(vport->rxq_model))
- goto send_msg;
+ for (u32 j = 0; j < num_rxq; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX;
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ if (split)
+ qs->qs[k++].rxq =
+ &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ else
+ qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j];
+ }
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
- const struct idpf_buf_queue *q;
+ if (!split)
+ continue;
- q = &rx_qgrp->splitq.bufq_sets[j].bufq;
- qc[k].type =
- cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
- qc[k].start_queue_id = cpu_to_le32(q->q_id);
- qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
+ for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
}
}
- if (vport->num_bufq != k - (vport->num_txq +
- vport->num_complq +
- vport->num_rxq))
+
+ if (k != num_q)
return -EINVAL;
-send_msg:
- /* Chunk up the queue info into multiple messages */
- config_sz = sizeof(struct virtchnl2_del_ena_dis_queues);
- chunk_sz = sizeof(struct virtchnl2_queue_chunk);
+ return idpf_send_ena_dis_queue_set_msg(qs, en);
+}
+
+/**
+ * idpf_prep_map_unmap_queue_set_vector_msg - prepare message to map or unmap
+ * queue set to the interrupt vector
+ * @vport: virtual port data structure
+ * @buf: buffer containing the message
+ * @pos: pointer to the first chunk describing the vector mapping
+ * @num_chunks: number of chunks in the message
+ *
+ * Helper function for preparing the message describing mapping queues to
+ * q_vectors.
+ *
+ * Return: the total size of the prepared message.
+ */
+static u32
+idpf_prep_map_unmap_queue_set_vector_msg(const struct idpf_vport *vport,
+ void *buf, const void *pos,
+ u32 num_chunks)
+{
+ struct virtchnl2_queue_vector_maps *vqvm = buf;
- num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
- num_q);
- num_msgs = DIV_ROUND_UP(num_q, num_chunks);
+ vqvm->vport_id = cpu_to_le32(vport->vport_id);
+ vqvm->num_qv_maps = cpu_to_le16(num_chunks);
+ memcpy(vqvm->qv_maps, pos, num_chunks * sizeof(*vqvm->qv_maps));
- buf_sz = struct_size(eq, chunks.chunks, num_chunks);
- eq = kzalloc(buf_sz, GFP_KERNEL);
- if (!eq)
+ return struct_size(vqvm, qv_maps, num_chunks);
+}
+
+/**
+ * idpf_send_map_unmap_queue_set_vector_msg - send virtchnl map or unmap
+ * queue set vector message
+ * @qs: set of the queues to map or unmap
+ * @map: true for map and false for unmap
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int
+idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs,
+ bool map)
+{
+ struct virtchnl2_queue_vector *vqv __free(kfree) = NULL;
+ struct idpf_chunked_msg_params params = {
+ .vc_op = map ? VIRTCHNL2_OP_MAP_QUEUE_VECTOR :
+ VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR,
+ .prepare_msg = idpf_prep_map_unmap_queue_set_vector_msg,
+ .config_sz = sizeof(struct virtchnl2_queue_vector_maps),
+ .chunk_sz = sizeof(*vqv),
+ .num_chunks = qs->num,
+ };
+ bool split;
+
+ vqv = kcalloc(qs->num, sizeof(*vqv), GFP_KERNEL);
+ if (!vqv)
return -ENOMEM;
- if (ena) {
- xn_params.vc_op = VIRTCHNL2_OP_ENABLE_QUEUES;
- xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- } else {
- xn_params.vc_op = VIRTCHNL2_OP_DISABLE_QUEUES;
- xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
- }
+ params.chunks = vqv;
- for (i = 0, k = 0; i < num_msgs; i++) {
- memset(eq, 0, buf_sz);
- eq->vport_id = cpu_to_le32(vport->vport_id);
- eq->chunks.num_chunks = cpu_to_le16(num_chunks);
- qcs = &eq->chunks;
- memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks);
+ split = idpf_is_queue_model_split(qs->vport->txq_model);
- xn_params.send_buf.iov_base = eq;
- xn_params.send_buf.iov_len = buf_sz;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- if (reply_sz < 0)
- return reply_sz;
+ for (u32 i = 0; i < qs->num; i++) {
+ const struct idpf_queue_ptr *q = &qs->qs[i];
+ const struct idpf_q_vector *vec;
+ u32 qid, v_idx, itr_idx;
+
+ vqv[i].queue_type = cpu_to_le32(q->type);
- k += num_chunks;
- num_q -= num_chunks;
- num_chunks = min(num_chunks, num_q);
- /* Recalculate buffer size */
- buf_sz = struct_size(eq, chunks.chunks, num_chunks);
+ switch (q->type) {
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ qid = q->rxq->q_id;
+
+ if (idpf_queue_has(NOIRQ, q->rxq))
+ vec = NULL;
+ else
+ vec = q->rxq->q_vector;
+
+ if (vec) {
+ v_idx = vec->v_idx;
+ itr_idx = vec->rx_itr_idx;
+ } else {
+ v_idx = qs->vport->noirq_v_idx;
+ itr_idx = VIRTCHNL2_ITR_IDX_0;
+ }
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ qid = q->txq->q_id;
+
+ if (idpf_queue_has(NOIRQ, q->txq))
+ vec = NULL;
+ else if (idpf_queue_has(XDP, q->txq))
+ vec = q->txq->complq->q_vector;
+ else if (split)
+ vec = q->txq->txq_grp->complq->q_vector;
+ else
+ vec = q->txq->q_vector;
+
+ if (vec) {
+ v_idx = vec->v_idx;
+ itr_idx = vec->tx_itr_idx;
+ } else {
+ v_idx = qs->vport->noirq_v_idx;
+ itr_idx = VIRTCHNL2_ITR_IDX_1;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ vqv[i].queue_id = cpu_to_le32(qid);
+ vqv[i].vector_id = cpu_to_le16(v_idx);
+ vqv[i].itr_idx = cpu_to_le32(itr_idx);
}
- return 0;
+ return idpf_send_chunked_msg(qs->vport, &params);
}
/**
- * idpf_send_map_unmap_queue_vector_msg - Send virtchnl map or unmap queue
- * vector message
+ * idpf_send_map_unmap_queue_vector_msg - send virtchnl map or unmap queue
+ * vector message
* @vport: virtual port data structure
* @map: true for map and false for unmap
*
- * Send map or unmap queue vector virtchnl message. Returns 0 on success,
- * negative on failure.
+ * Return: 0 on success, -errno on failure.
*/
int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
{
- struct virtchnl2_queue_vector_maps *vqvm __free(kfree) = NULL;
- struct virtchnl2_queue_vector *vqv __free(kfree) = NULL;
- struct idpf_vc_xn_params xn_params = {};
- u32 config_sz, chunk_sz, buf_sz;
- u32 num_msgs, num_chunks, num_q;
- ssize_t reply_sz;
- int i, j, k = 0;
+ struct idpf_queue_set *qs __free(kfree) = NULL;
+ u32 num_q = vport->num_txq + vport->num_rxq;
+ u32 k = 0;
- num_q = vport->num_txq + vport->num_rxq;
-
- buf_sz = sizeof(struct virtchnl2_queue_vector) * num_q;
- vqv = kzalloc(buf_sz, GFP_KERNEL);
- if (!vqv)
+ qs = idpf_alloc_queue_set(vport, num_q);
+ if (!qs)
return -ENOMEM;
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (u32 i = 0; i < vport->num_txq_grp; i++) {
+ const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
- for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
- vqv[k].queue_type =
- cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
- vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
-
- if (idpf_is_queue_model_split(vport->txq_model)) {
- vqv[k].vector_id =
- cpu_to_le16(tx_qgrp->complq->q_vector->v_idx);
- vqv[k].itr_idx =
- cpu_to_le32(tx_qgrp->complq->q_vector->tx_itr_idx);
- } else {
- vqv[k].vector_id =
- cpu_to_le16(tx_qgrp->txqs[j]->q_vector->v_idx);
- vqv[k].itr_idx =
- cpu_to_le32(tx_qgrp->txqs[j]->q_vector->tx_itr_idx);
- }
+ for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ qs->qs[k++].txq = tx_qgrp->txqs[j];
}
}
- if (vport->num_txq != k)
+ if (k != vport->num_txq)
return -EINVAL;
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
- u16 num_rxq;
+ for (u32 i = 0; i < vport->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u32 num_rxq;
if (idpf_is_queue_model_split(vport->rxq_model))
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
num_rxq = rx_qgrp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++, k++) {
- struct idpf_rx_queue *rxq;
+ for (u32 j = 0; j < num_rxq; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX;
if (idpf_is_queue_model_split(vport->rxq_model))
- rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ qs->qs[k++].rxq =
+ &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
- rxq = rx_qgrp->singleq.rxqs[j];
-
- vqv[k].queue_type =
- cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
- vqv[k].queue_id = cpu_to_le32(rxq->q_id);
- vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx);
- vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx);
+ qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j];
}
}
- if (idpf_is_queue_model_split(vport->txq_model)) {
- if (vport->num_rxq != k - vport->num_complq)
- return -EINVAL;
- } else {
- if (vport->num_rxq != k - vport->num_txq)
- return -EINVAL;
- }
+ if (k != num_q)
+ return -EINVAL;
- /* Chunk up the vector info into multiple messages */
- config_sz = sizeof(struct virtchnl2_queue_vector_maps);
- chunk_sz = sizeof(struct virtchnl2_queue_vector);
+ return idpf_send_map_unmap_queue_set_vector_msg(qs, map);
+}
- num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
- num_q);
- num_msgs = DIV_ROUND_UP(num_q, num_chunks);
+/**
+ * idpf_send_enable_queue_set_msg - send enable queues virtchnl message for
+ * selected queues
+ * @qs: set of the queues
+ *
+ * Send enable queues virtchnl message for queues contained in the @qs array.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int idpf_send_enable_queue_set_msg(const struct idpf_queue_set *qs)
+{
+ return idpf_send_ena_dis_queue_set_msg(qs, true);
+}
- buf_sz = struct_size(vqvm, qv_maps, num_chunks);
- vqvm = kzalloc(buf_sz, GFP_KERNEL);
- if (!vqvm)
- return -ENOMEM;
+/**
+ * idpf_send_disable_queue_set_msg - send disable queues virtchnl message for
+ * selected queues
+ * @qs: set of the queues
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int idpf_send_disable_queue_set_msg(const struct idpf_queue_set *qs)
+{
+ int err;
- if (map) {
- xn_params.vc_op = VIRTCHNL2_OP_MAP_QUEUE_VECTOR;
- xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- } else {
- xn_params.vc_op = VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR;
- xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
- }
+ err = idpf_send_ena_dis_queue_set_msg(qs, false);
+ if (err)
+ return err;
- for (i = 0, k = 0; i < num_msgs; i++) {
- memset(vqvm, 0, buf_sz);
- xn_params.send_buf.iov_base = vqvm;
- xn_params.send_buf.iov_len = buf_sz;
- vqvm->vport_id = cpu_to_le32(vport->vport_id);
- vqvm->num_qv_maps = cpu_to_le16(num_chunks);
- memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks);
+ return idpf_wait_for_marker_event_set(qs);
+}
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- if (reply_sz < 0)
- return reply_sz;
+/**
+ * idpf_send_config_queue_set_msg - send virtchnl config queues message for
+ * selected queues
+ * @qs: set of the queues
+ *
+ * Send config queues virtchnl message for queues contained in the @qs array.
+ * The @qs array can contain both Rx or Tx queues.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int idpf_send_config_queue_set_msg(const struct idpf_queue_set *qs)
+{
+ int err;
- k += num_chunks;
- num_q -= num_chunks;
- num_chunks = min(num_chunks, num_q);
- /* Recalculate buffer size */
- buf_sz = struct_size(vqvm, qv_maps, num_chunks);
- }
+ err = idpf_send_config_tx_queue_set_msg(qs);
+ if (err)
+ return err;
- return 0;
+ return idpf_send_config_rx_queue_set_msg(qs);
}
/**
@@ -1989,24 +2460,12 @@ int idpf_send_enable_queues_msg(struct idpf_vport *vport)
*/
int idpf_send_disable_queues_msg(struct idpf_vport *vport)
{
- int err, i;
+ int err;
err = idpf_send_ena_dis_queues_msg(vport, false);
if (err)
return err;
- /* switch to poll mode as interrupts will be disabled after disable
- * queues virtchnl message is sent
- */
- for (i = 0; i < vport->num_txq; i++)
- idpf_queue_set(POLL_MODE, vport->txqs[i]);
-
- /* schedule the napi to receive all the marker packets */
- local_bh_disable();
- for (i = 0; i < vport->num_q_vectors; i++)
- napi_schedule(&vport->q_vectors[i].napi);
- local_bh_enable();
-
return idpf_wait_for_marker_event(vport);
}
@@ -2071,7 +2530,7 @@ int idpf_send_delete_queues_msg(struct idpf_vport *vport)
num_chunks);
xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES;
- xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
xn_params.send_buf.iov_base = eq;
xn_params.send_buf.iov_len = buf_size;
reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
@@ -2235,7 +2694,7 @@ int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter)
xn_params.vc_op = VIRTCHNL2_OP_DEALLOC_VECTORS;
xn_params.send_buf.iov_base = vcs;
xn_params.send_buf.iov_len = buf_size;
- xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
if (reply_sz < 0)
return reply_sz;
@@ -2296,7 +2755,7 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
/* Don't send get_stats message if the link is down */
- if (np->state <= __IDPF_VPORT_DOWN)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
return 0;
stats_msg.vport_id = cpu_to_le32(vport->vport_id);
@@ -2825,7 +3284,7 @@ int idpf_init_dflt_mbx(struct idpf_adapter *adapter)
struct idpf_hw *hw = &adapter->hw;
int err;
- adapter->dev_ops.reg_ops.ctlq_reg_init(ctlq_info);
+ adapter->dev_ops.reg_ops.ctlq_reg_init(adapter, ctlq_info);
err = idpf_ctlq_init(hw, IDPF_NUM_DFLT_MBX_Q, ctlq_info);
if (err)
@@ -2985,6 +3444,30 @@ restart:
msleep(task_delay);
}
+ if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LAN_MEMORY_REGIONS)) {
+ err = idpf_send_get_lan_memory_regions(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to get LAN memory regions: %d\n",
+ err);
+ return -EINVAL;
+ }
+ } else {
+ /* Fallback to mapping the remaining regions of the entire BAR */
+ err = idpf_calc_remaining_mmio_regs(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to allocate BAR0 region(s): %d\n",
+ err);
+ return -ENOMEM;
+ }
+ }
+
+ err = idpf_map_lan_mmio_regs(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to map BAR0 region(s): %d\n",
+ err);
+ return -ENOMEM;
+ }
+
pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter));
num_max_vports = idpf_get_max_vports(adapter);
adapter->max_vports = num_max_vports;
@@ -3025,6 +3508,11 @@ restart:
goto err_intr_req;
}
+ err = idpf_ptp_init(adapter);
+ if (err)
+ pci_err(adapter->pdev, "PTP init failed, err=%pe\n",
+ ERR_PTR(err));
+
idpf_init_avail_queues(adapter);
/* Skew the delay for init tasks for each function based on fn number
@@ -3063,7 +3551,6 @@ init_failed:
adapter->state = __IDPF_VER_CHECK;
if (adapter->vcxn_mngr)
idpf_vc_xn_shutdown(adapter->vcxn_mngr);
- idpf_deinit_dflt_mbx(adapter);
set_bit(IDPF_HR_DRV_LOAD, adapter->flags);
queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task,
msecs_to_jiffies(task_delay));
@@ -3078,12 +3565,23 @@ init_failed:
*/
void idpf_vc_core_deinit(struct idpf_adapter *adapter)
{
+ bool remove_in_prog;
+
if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
return;
+ /* Avoid transaction timeouts when called during reset */
+ remove_in_prog = test_bit(IDPF_REMOVE_IN_PROG, adapter->flags);
+ if (!remove_in_prog)
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
+
+ idpf_ptp_release(adapter);
idpf_deinit_task(adapter);
+ idpf_idc_deinit_core_aux_device(adapter->cdev_info);
idpf_intr_rel(adapter);
- idpf_vc_xn_shutdown(adapter->vcxn_mngr);
+
+ if (remove_in_prog)
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
cancel_delayed_work_sync(&adapter->serv_task);
cancel_delayed_work_sync(&adapter->mbx_task);
@@ -3110,9 +3608,17 @@ int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
{
struct idpf_vector_info vec_info;
int num_alloc_vecs;
+ u32 req;
vec_info.num_curr_vecs = vport->num_q_vectors;
- vec_info.num_req_vecs = max(vport->num_txq, vport->num_rxq);
+ if (vec_info.num_curr_vecs)
+ vec_info.num_curr_vecs += IDPF_RESERVED_VECS;
+
+ /* XDPSQs are all bound to the NOIRQ vector from IDPF_RESERVED_VECS */
+ req = max(vport->num_txq - vport->num_xdp_txq, vport->num_rxq) +
+ IDPF_RESERVED_VECS;
+ vec_info.num_req_vecs = req;
+
vec_info.default_vport = vport->default_vport;
vec_info.index = vport->idx;
@@ -3125,7 +3631,7 @@ int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
return -EINVAL;
}
- vport->num_q_vectors = num_alloc_vecs;
+ vport->num_q_vectors = num_alloc_vecs - IDPF_RESERVED_VECS;
return 0;
}
@@ -3146,6 +3652,7 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
u16 rx_itr[] = {2, 8, 32, 96, 128};
struct idpf_rss_data *rss_data;
u16 idx = vport->idx;
+ int err;
vport_config = adapter->vport_config[idx];
rss_data = &vport_config->user_config.rss_data;
@@ -3180,6 +3687,18 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
idpf_vport_alloc_vec_indexes(vport);
vport->crc_enable = adapter->crc_enable;
+
+ if (!(vport_msg->vport_flags &
+ cpu_to_le16(VIRTCHNL2_VPORT_UPLINK_PORT)))
+ return;
+
+ err = idpf_ptp_get_vport_tstamps_caps(vport);
+ if (err) {
+ pci_dbg(vport->adapter->pdev, "Tx timestamping not supported\n");
+ return;
+ }
+
+ INIT_WORK(&vport->tstamp_task, idpf_tstamp_task);
}
/**
@@ -3490,6 +4009,79 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
}
/**
+ * idpf_vport_is_cap_ena - Check if vport capability is enabled
+ * @vport: Private data struct
+ * @flag: flag(s) to check
+ *
+ * Return: true if the capability is supported, false otherwise
+ */
+bool idpf_vport_is_cap_ena(struct idpf_vport *vport, u16 flag)
+{
+ struct virtchnl2_create_vport *vport_msg;
+
+ vport_msg = vport->adapter->vport_params_recvd[vport->idx];
+
+ return !!(le16_to_cpu(vport_msg->vport_flags) & flag);
+}
+
+/**
+ * idpf_sideband_flow_type_ena - Check if steering is enabled for flow type
+ * @vport: Private data struct
+ * @flow_type: flow type to check (from ethtool.h)
+ *
+ * Return: true if sideband filters are allowed for @flow_type, false otherwise
+ */
+bool idpf_sideband_flow_type_ena(struct idpf_vport *vport, u32 flow_type)
+{
+ struct virtchnl2_create_vport *vport_msg;
+ __le64 caps;
+
+ vport_msg = vport->adapter->vport_params_recvd[vport->idx];
+ caps = vport_msg->sideband_flow_caps;
+
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ return !!(caps & cpu_to_le64(VIRTCHNL2_FLOW_IPV4_TCP));
+ case UDP_V4_FLOW:
+ return !!(caps & cpu_to_le64(VIRTCHNL2_FLOW_IPV4_UDP));
+ default:
+ return false;
+ }
+}
+
+/**
+ * idpf_sideband_action_ena - Check if steering is enabled for action
+ * @vport: Private data struct
+ * @fsp: flow spec
+ *
+ * Return: true if sideband filters are allowed for @fsp, false otherwise
+ */
+bool idpf_sideband_action_ena(struct idpf_vport *vport,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ struct virtchnl2_create_vport *vport_msg;
+ unsigned int supp_actions;
+
+ vport_msg = vport->adapter->vport_params_recvd[vport->idx];
+ supp_actions = le32_to_cpu(vport_msg->sideband_flow_actions);
+
+ /* Actions Drop/Wake are not supported */
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC ||
+ fsp->ring_cookie == RX_CLS_FLOW_WAKE)
+ return false;
+
+ return !!(supp_actions & VIRTCHNL2_ACTION_QUEUE);
+}
+
+unsigned int idpf_fsteer_max_rules(struct idpf_vport *vport)
+{
+ struct virtchnl2_create_vport *vport_msg;
+
+ vport_msg = vport->adapter->vport_params_recvd[vport->idx];
+ return le32_to_cpu(vport_msg->flow_steer_max_rules);
+}
+
+/**
* idpf_get_vport_id: Get vport id
* @vport: virtual port structure
*
@@ -3504,6 +4096,16 @@ u32 idpf_get_vport_id(struct idpf_vport *vport)
return le32_to_cpu(vport_msg->vport_id);
}
+static void idpf_set_mac_type(struct idpf_vport *vport,
+ struct virtchnl2_mac_addr *mac_addr)
+{
+ bool is_primary;
+
+ is_primary = ether_addr_equal(vport->default_mac_addr, mac_addr->addr);
+ mac_addr->type = is_primary ? VIRTCHNL2_MAC_ADDR_PRIMARY :
+ VIRTCHNL2_MAC_ADDR_EXTRA;
+}
+
/**
* idpf_mac_filter_async_handler - Async callback for mac filters
* @adapter: private data struct
@@ -3633,6 +4235,7 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
list) {
if (add && f->add) {
ether_addr_copy(mac_addr[i].addr, f->macaddr);
+ idpf_set_mac_type(vport, &mac_addr[i]);
i++;
f->add = false;
if (i == total_filters)
@@ -3640,6 +4243,7 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
}
if (!add && f->remove) {
ether_addr_copy(mac_addr[i].addr, f->macaddr);
+ idpf_set_mac_type(vport, &mac_addr[i]);
i++;
f->remove = false;
if (i == total_filters)
@@ -3725,3 +4329,42 @@ int idpf_set_promiscuous(struct idpf_adapter *adapter,
return reply_sz < 0 ? reply_sz : 0;
}
+
+/**
+ * idpf_idc_rdma_vc_send_sync - virtchnl send callback for IDC registered drivers
+ * @cdev_info: IDC core device info pointer
+ * @send_msg: message to send
+ * @msg_size: size of message to send
+ * @recv_msg: message to populate on reception of response
+ * @recv_len: length of message copied into recv_msg or 0 on error
+ *
+ * Return: 0 on success or error code on failure.
+ */
+int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info,
+ u8 *send_msg, u16 msg_size,
+ u8 *recv_msg, u16 *recv_len)
+{
+ struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev);
+ struct idpf_vc_xn_params xn_params = { };
+ ssize_t reply_sz;
+ u16 recv_size;
+
+ if (!recv_msg || !recv_len || msg_size > IDPF_CTLQ_MAX_BUF_LEN)
+ return -EINVAL;
+
+ recv_size = min_t(u16, *recv_len, IDPF_CTLQ_MAX_BUF_LEN);
+ *recv_len = 0;
+ xn_params.vc_op = VIRTCHNL2_OP_RDMA;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = send_msg;
+ xn_params.send_buf.iov_len = msg_size;
+ xn_params.recv_buf.iov_base = recv_msg;
+ xn_params.recv_buf.iov_len = recv_size;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ *recv_len = reply_sz;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(idpf_idc_rdma_vc_send_sync);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
index 83da5d8da56b..eac3d15daa42 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
@@ -4,6 +4,89 @@
#ifndef _IDPF_VIRTCHNL_H_
#define _IDPF_VIRTCHNL_H_
+#include "virtchnl2.h"
+
+#define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000)
+#define IDPF_VC_XN_IDX_M GENMASK(7, 0)
+#define IDPF_VC_XN_SALT_M GENMASK(15, 8)
+#define IDPF_VC_XN_RING_LEN U8_MAX
+
+/**
+ * enum idpf_vc_xn_state - Virtchnl transaction status
+ * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used
+ * @IDPF_VC_XN_WAITING: expecting a reply, not yet received
+ * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received, buffer
+ * updated
+ * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there
+ * was an error, buffer not updated
+ * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down
+ * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the
+ * return context; a callback may be provided to handle
+ * return
+ */
+enum idpf_vc_xn_state {
+ IDPF_VC_XN_IDLE = 1,
+ IDPF_VC_XN_WAITING,
+ IDPF_VC_XN_COMPLETED_SUCCESS,
+ IDPF_VC_XN_COMPLETED_FAILED,
+ IDPF_VC_XN_SHUTDOWN,
+ IDPF_VC_XN_ASYNC,
+};
+
+struct idpf_vc_xn;
+/* Callback for asynchronous messages */
+typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *,
+ const struct idpf_ctlq_msg *);
+
+/**
+ * struct idpf_vc_xn - Data structure representing virtchnl transactions
+ * @completed: virtchnl event loop uses that to signal when a reply is
+ * available, uses kernel completion API
+ * @state: virtchnl event loop stores the data below, protected by the
+ * completion's lock.
+ * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be
+ * truncated on its way to the receiver thread according to
+ * reply_buf.iov_len.
+ * @reply: Reference to the buffer(s) where the reply data should be written
+ * to. May be 0-length (then NULL address permitted) if the reply data
+ * should be ignored.
+ * @async_handler: if sent asynchronously, a callback can be provided to handle
+ * the reply when it's received
+ * @vc_op: corresponding opcode sent with this transaction
+ * @idx: index used as retrieval on reply receive, used for cookie
+ * @salt: changed every message to make unique, used for cookie
+ */
+struct idpf_vc_xn {
+ struct completion completed;
+ enum idpf_vc_xn_state state;
+ size_t reply_sz;
+ struct kvec reply;
+ async_vc_cb async_handler;
+ u32 vc_op;
+ u8 idx;
+ u8 salt;
+};
+
+/**
+ * struct idpf_vc_xn_params - Parameters for executing transaction
+ * @send_buf: kvec for send buffer
+ * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length
+ * @timeout_ms: timeout to wait for reply
+ * @async: send message asynchronously, will not wait on completion
+ * @async_handler: If sent asynchronously, optional callback handler. The user
+ * must be careful when using async handlers as the memory for
+ * the recv_buf _cannot_ be on stack if this is async.
+ * @vc_op: virtchnl op to send
+ */
+struct idpf_vc_xn_params {
+ struct kvec send_buf;
+ struct kvec recv_buf;
+ int timeout_ms;
+ bool async;
+ async_vc_cb async_handler;
+ u32 vc_op;
+};
+
struct idpf_adapter;
struct idpf_netdev_priv;
struct idpf_vec_regs;
@@ -11,6 +94,8 @@ struct idpf_vport;
struct idpf_vport_max_q;
struct idpf_vport_user_config_data;
+ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
+ const struct idpf_vc_xn_params *params);
int idpf_init_dflt_mbx(struct idpf_adapter *adapter);
void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);
int idpf_vc_core_init(struct idpf_adapter *adapter);
@@ -21,10 +106,43 @@ int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
int idpf_queue_reg_init(struct idpf_vport *vport);
int idpf_vport_queue_ids_init(struct idpf_vport *vport);
+bool idpf_vport_is_cap_ena(struct idpf_vport *vport, u16 flag);
+bool idpf_sideband_flow_type_ena(struct idpf_vport *vport, u32 flow_type);
+bool idpf_sideband_action_ena(struct idpf_vport *vport,
+ struct ethtool_rx_flow_spec *fsp);
+unsigned int idpf_fsteer_max_rules(struct idpf_vport *vport);
+
int idpf_recv_mb_msg(struct idpf_adapter *adapter);
int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
u16 msg_size, u8 *msg, u16 cookie);
+struct idpf_queue_ptr {
+ enum virtchnl2_queue_type type;
+ union {
+ struct idpf_rx_queue *rxq;
+ struct idpf_tx_queue *txq;
+ struct idpf_buf_queue *bufq;
+ struct idpf_compl_queue *complq;
+ };
+};
+
+struct idpf_queue_set {
+ struct idpf_vport *vport;
+
+ u32 num;
+ struct idpf_queue_ptr qs[] __counted_by(num);
+};
+
+struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport, u32 num);
+
+int idpf_send_enable_queue_set_msg(const struct idpf_queue_set *qs);
+int idpf_send_disable_queue_set_msg(const struct idpf_queue_set *qs);
+int idpf_send_config_queue_set_msg(const struct idpf_queue_set *qs);
+
+int idpf_send_disable_queues_msg(struct idpf_vport *vport);
+int idpf_send_config_queues_msg(struct idpf_vport *vport);
+int idpf_send_enable_queues_msg(struct idpf_vport *vport);
+
void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
u32 idpf_get_vport_id(struct idpf_vport *vport);
int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
@@ -41,9 +159,6 @@ void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
u16 num_complq, u16 num_rx_q, u16 num_rx_bufq);
int idpf_send_delete_queues_msg(struct idpf_vport *vport);
-int idpf_send_enable_queues_msg(struct idpf_vport *vport);
-int idpf_send_disable_queues_msg(struct idpf_vport *vport);
-int idpf_send_config_queues_msg(struct idpf_vport *vport);
int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport);
int idpf_get_vec_ids(struct idpf_adapter *adapter,
@@ -66,5 +181,9 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport);
int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
+void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr);
+int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info,
+ u8 *send_msg, u16 msg_size,
+ u8 *recv_msg, u16 *recv_len);
#endif /* _IDPF_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c
new file mode 100644
index 000000000000..61cedb6f2854
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c
@@ -0,0 +1,673 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2024 Intel Corporation */
+
+#include "idpf.h"
+#include "idpf_ptp.h"
+#include "idpf_virtchnl.h"
+
+/**
+ * idpf_ptp_get_caps - Send virtchnl get ptp capabilities message
+ * @adapter: Driver specific private structure
+ *
+ * Send virtchnl get PTP capabilities message.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int idpf_ptp_get_caps(struct idpf_adapter *adapter)
+{
+ struct virtchnl2_ptp_get_caps *recv_ptp_caps_msg __free(kfree) = NULL;
+ struct virtchnl2_ptp_get_caps send_ptp_caps_msg = {
+ .caps = cpu_to_le32(VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME |
+ VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB |
+ VIRTCHNL2_CAP_PTP_GET_CROSS_TIME |
+ VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB |
+ VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB |
+ VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB)
+ };
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_GET_CAPS,
+ .send_buf.iov_base = &send_ptp_caps_msg,
+ .send_buf.iov_len = sizeof(send_ptp_caps_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ struct virtchnl2_ptp_cross_time_reg_offsets cross_tstamp_offsets;
+ struct virtchnl2_ptp_clk_adj_reg_offsets clk_adj_offsets;
+ struct virtchnl2_ptp_clk_reg_offsets clock_offsets;
+ struct idpf_ptp_secondary_mbx *scnd_mbx;
+ struct idpf_ptp *ptp = adapter->ptp;
+ enum idpf_ptp_access access_type;
+ u32 temp_offset;
+ int reply_sz;
+
+ recv_ptp_caps_msg = kzalloc(sizeof(struct virtchnl2_ptp_get_caps),
+ GFP_KERNEL);
+ if (!recv_ptp_caps_msg)
+ return -ENOMEM;
+
+ xn_params.recv_buf.iov_base = recv_ptp_caps_msg;
+ xn_params.recv_buf.iov_len = sizeof(*recv_ptp_caps_msg);
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ else if (reply_sz != sizeof(*recv_ptp_caps_msg))
+ return -EIO;
+
+ ptp->caps = le32_to_cpu(recv_ptp_caps_msg->caps);
+ ptp->base_incval = le64_to_cpu(recv_ptp_caps_msg->base_incval);
+ ptp->max_adj = le32_to_cpu(recv_ptp_caps_msg->max_adj);
+
+ scnd_mbx = &ptp->secondary_mbx;
+ scnd_mbx->peer_mbx_q_id = le16_to_cpu(recv_ptp_caps_msg->peer_mbx_q_id);
+
+ /* if the ptp_mb_q_id holds invalid value (0xffff), the secondary
+ * mailbox is not supported.
+ */
+ scnd_mbx->valid = scnd_mbx->peer_mbx_q_id != 0xffff;
+ if (scnd_mbx->valid)
+ scnd_mbx->peer_id = recv_ptp_caps_msg->peer_id;
+
+ /* Determine the access type for the PTP features */
+ idpf_ptp_get_features_access(adapter);
+
+ access_type = ptp->get_dev_clk_time_access;
+ if (access_type != IDPF_PTP_DIRECT)
+ goto cross_tstamp;
+
+ clock_offsets = recv_ptp_caps_msg->clk_offsets;
+
+ temp_offset = le32_to_cpu(clock_offsets.dev_clk_ns_l);
+ ptp->dev_clk_regs.dev_clk_ns_l = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clock_offsets.dev_clk_ns_h);
+ ptp->dev_clk_regs.dev_clk_ns_h = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clock_offsets.phy_clk_ns_l);
+ ptp->dev_clk_regs.phy_clk_ns_l = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clock_offsets.phy_clk_ns_h);
+ ptp->dev_clk_regs.phy_clk_ns_h = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clock_offsets.cmd_sync_trigger);
+ ptp->dev_clk_regs.cmd_sync = idpf_get_reg_addr(adapter, temp_offset);
+
+cross_tstamp:
+ access_type = ptp->get_cross_tstamp_access;
+ if (access_type != IDPF_PTP_DIRECT)
+ goto discipline_clock;
+
+ cross_tstamp_offsets = recv_ptp_caps_msg->cross_time_offsets;
+
+ temp_offset = le32_to_cpu(cross_tstamp_offsets.sys_time_ns_l);
+ ptp->dev_clk_regs.sys_time_ns_l = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(cross_tstamp_offsets.sys_time_ns_h);
+ ptp->dev_clk_regs.sys_time_ns_h = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(cross_tstamp_offsets.cmd_sync_trigger);
+ ptp->dev_clk_regs.cmd_sync = idpf_get_reg_addr(adapter, temp_offset);
+
+discipline_clock:
+ access_type = ptp->adj_dev_clk_time_access;
+ if (access_type != IDPF_PTP_DIRECT)
+ return 0;
+
+ clk_adj_offsets = recv_ptp_caps_msg->clk_adj_offsets;
+
+ /* Device clock offsets */
+ temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_cmd_type);
+ ptp->dev_clk_regs.cmd = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_incval_l);
+ ptp->dev_clk_regs.incval_l = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_incval_h);
+ ptp->dev_clk_regs.incval_h = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_shadj_l);
+ ptp->dev_clk_regs.shadj_l = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_shadj_h);
+ ptp->dev_clk_regs.shadj_h = idpf_get_reg_addr(adapter, temp_offset);
+
+ /* PHY clock offsets */
+ temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_cmd_type);
+ ptp->dev_clk_regs.phy_cmd = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_incval_l);
+ ptp->dev_clk_regs.phy_incval_l = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_incval_h);
+ ptp->dev_clk_regs.phy_incval_h = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_shadj_l);
+ ptp->dev_clk_regs.phy_shadj_l = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_shadj_h);
+ ptp->dev_clk_regs.phy_shadj_h = idpf_get_reg_addr(adapter, temp_offset);
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_get_dev_clk_time - Send virtchnl get device clk time message
+ * @adapter: Driver specific private structure
+ * @dev_clk_time: Pointer to the device clock structure where the value is set
+ *
+ * Send virtchnl get time message to get the time of the clock.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,
+ struct idpf_ptp_dev_timers *dev_clk_time)
+{
+ struct virtchnl2_ptp_get_dev_clk_time get_dev_clk_time_msg;
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME,
+ .send_buf.iov_base = &get_dev_clk_time_msg,
+ .send_buf.iov_len = sizeof(get_dev_clk_time_msg),
+ .recv_buf.iov_base = &get_dev_clk_time_msg,
+ .recv_buf.iov_len = sizeof(get_dev_clk_time_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ int reply_sz;
+ u64 dev_time;
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz != sizeof(get_dev_clk_time_msg))
+ return -EIO;
+
+ dev_time = le64_to_cpu(get_dev_clk_time_msg.dev_time_ns);
+ dev_clk_time->dev_clk_time_ns = dev_time;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_get_cross_time - Send virtchnl get cross time message
+ * @adapter: Driver specific private structure
+ * @cross_time: Pointer to the device clock structure where the value is set
+ *
+ * Send virtchnl get cross time message to get the time of the clock and the
+ * system time.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_get_cross_time(struct idpf_adapter *adapter,
+ struct idpf_ptp_dev_timers *cross_time)
+{
+ struct virtchnl2_ptp_get_cross_time cross_time_msg;
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_GET_CROSS_TIME,
+ .send_buf.iov_base = &cross_time_msg,
+ .send_buf.iov_len = sizeof(cross_time_msg),
+ .recv_buf.iov_base = &cross_time_msg,
+ .recv_buf.iov_len = sizeof(cross_time_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ int reply_sz;
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz != sizeof(cross_time_msg))
+ return -EIO;
+
+ cross_time->dev_clk_time_ns = le64_to_cpu(cross_time_msg.dev_time_ns);
+ cross_time->sys_time_ns = le64_to_cpu(cross_time_msg.sys_time_ns);
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_set_dev_clk_time - Send virtchnl set device time message
+ * @adapter: Driver specific private structure
+ * @time: New time value
+ *
+ * Send virtchnl set time message to set the time of the clock.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, u64 time)
+{
+ struct virtchnl2_ptp_set_dev_clk_time set_dev_clk_time_msg = {
+ .dev_time_ns = cpu_to_le64(time),
+ };
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME,
+ .send_buf.iov_base = &set_dev_clk_time_msg,
+ .send_buf.iov_len = sizeof(set_dev_clk_time_msg),
+ .recv_buf.iov_base = &set_dev_clk_time_msg,
+ .recv_buf.iov_len = sizeof(set_dev_clk_time_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ int reply_sz;
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz != sizeof(set_dev_clk_time_msg))
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_adj_dev_clk_time - Send virtchnl adj device clock time message
+ * @adapter: Driver specific private structure
+ * @delta: Offset in nanoseconds to adjust the time by
+ *
+ * Send virtchnl adj time message to adjust the clock by the indicated delta.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter, s64 delta)
+{
+ struct virtchnl2_ptp_adj_dev_clk_time adj_dev_clk_time_msg = {
+ .delta = cpu_to_le64(delta),
+ };
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME,
+ .send_buf.iov_base = &adj_dev_clk_time_msg,
+ .send_buf.iov_len = sizeof(adj_dev_clk_time_msg),
+ .recv_buf.iov_base = &adj_dev_clk_time_msg,
+ .recv_buf.iov_len = sizeof(adj_dev_clk_time_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ int reply_sz;
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz != sizeof(adj_dev_clk_time_msg))
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_adj_dev_clk_fine - Send virtchnl adj time message
+ * @adapter: Driver specific private structure
+ * @incval: Source timer increment value per clock cycle
+ *
+ * Send virtchnl adj fine message to adjust the frequency of the clock by
+ * incval.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter, u64 incval)
+{
+ struct virtchnl2_ptp_adj_dev_clk_fine adj_dev_clk_fine_msg = {
+ .incval = cpu_to_le64(incval),
+ };
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE,
+ .send_buf.iov_base = &adj_dev_clk_fine_msg,
+ .send_buf.iov_len = sizeof(adj_dev_clk_fine_msg),
+ .recv_buf.iov_base = &adj_dev_clk_fine_msg,
+ .recv_buf.iov_len = sizeof(adj_dev_clk_fine_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ int reply_sz;
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz != sizeof(adj_dev_clk_fine_msg))
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_get_vport_tstamps_caps - Send virtchnl to get tstamps caps for vport
+ * @vport: Virtual port structure
+ *
+ * Send virtchnl get vport tstamps caps message to receive the set of tstamp
+ * capabilities per vport.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport)
+{
+ struct virtchnl2_ptp_get_vport_tx_tstamp_caps send_tx_tstamp_caps;
+ struct virtchnl2_ptp_get_vport_tx_tstamp_caps *rcv_tx_tstamp_caps;
+ struct virtchnl2_ptp_tx_tstamp_latch_caps tx_tstamp_latch_caps;
+ struct idpf_ptp_vport_tx_tstamp_caps *tstamp_caps;
+ struct idpf_ptp_tx_tstamp *ptp_tx_tstamp, *tmp;
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS,
+ .send_buf.iov_base = &send_tx_tstamp_caps,
+ .send_buf.iov_len = sizeof(send_tx_tstamp_caps),
+ .recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN,
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ enum idpf_ptp_access tstamp_access, get_dev_clk_access;
+ struct idpf_ptp *ptp = vport->adapter->ptp;
+ struct list_head *head;
+ int err = 0, reply_sz;
+ u16 num_latches;
+ u32 size;
+
+ if (!ptp)
+ return -EOPNOTSUPP;
+
+ tstamp_access = ptp->tx_tstamp_access;
+ get_dev_clk_access = ptp->get_dev_clk_time_access;
+ if (tstamp_access == IDPF_PTP_NONE ||
+ get_dev_clk_access == IDPF_PTP_NONE)
+ return -EOPNOTSUPP;
+
+ rcv_tx_tstamp_caps = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!rcv_tx_tstamp_caps)
+ return -ENOMEM;
+
+ send_tx_tstamp_caps.vport_id = cpu_to_le32(vport->vport_id);
+ xn_params.recv_buf.iov_base = rcv_tx_tstamp_caps;
+
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0) {
+ err = reply_sz;
+ goto get_tstamp_caps_out;
+ }
+
+ num_latches = le16_to_cpu(rcv_tx_tstamp_caps->num_latches);
+ size = struct_size(rcv_tx_tstamp_caps, tstamp_latches, num_latches);
+ if (reply_sz != size) {
+ err = -EIO;
+ goto get_tstamp_caps_out;
+ }
+
+ size = struct_size(tstamp_caps, tx_tstamp_status, num_latches);
+ tstamp_caps = kzalloc(size, GFP_KERNEL);
+ if (!tstamp_caps) {
+ err = -ENOMEM;
+ goto get_tstamp_caps_out;
+ }
+
+ tstamp_caps->access = true;
+ tstamp_caps->num_entries = num_latches;
+
+ INIT_LIST_HEAD(&tstamp_caps->latches_in_use);
+ INIT_LIST_HEAD(&tstamp_caps->latches_free);
+
+ spin_lock_init(&tstamp_caps->latches_lock);
+ spin_lock_init(&tstamp_caps->status_lock);
+
+ tstamp_caps->tstamp_ns_lo_bit = rcv_tx_tstamp_caps->tstamp_ns_lo_bit;
+
+ for (u16 i = 0; i < tstamp_caps->num_entries; i++) {
+ __le32 offset_l, offset_h;
+
+ ptp_tx_tstamp = kzalloc(sizeof(*ptp_tx_tstamp), GFP_KERNEL);
+ if (!ptp_tx_tstamp) {
+ err = -ENOMEM;
+ goto err_free_ptp_tx_stamp_list;
+ }
+
+ tx_tstamp_latch_caps = rcv_tx_tstamp_caps->tstamp_latches[i];
+
+ if (tstamp_access != IDPF_PTP_DIRECT)
+ goto skip_offsets;
+
+ offset_l = tx_tstamp_latch_caps.tx_latch_reg_offset_l;
+ offset_h = tx_tstamp_latch_caps.tx_latch_reg_offset_h;
+ ptp_tx_tstamp->tx_latch_reg_offset_l = le32_to_cpu(offset_l);
+ ptp_tx_tstamp->tx_latch_reg_offset_h = le32_to_cpu(offset_h);
+
+skip_offsets:
+ ptp_tx_tstamp->idx = tx_tstamp_latch_caps.index;
+
+ list_add(&ptp_tx_tstamp->list_member,
+ &tstamp_caps->latches_free);
+
+ tstamp_caps->tx_tstamp_status[i].state = IDPF_PTP_FREE;
+ }
+
+ vport->tx_tstamp_caps = tstamp_caps;
+ kfree(rcv_tx_tstamp_caps);
+
+ return 0;
+
+err_free_ptp_tx_stamp_list:
+ head = &tstamp_caps->latches_free;
+ list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) {
+ list_del(&ptp_tx_tstamp->list_member);
+ kfree(ptp_tx_tstamp);
+ }
+
+ kfree(tstamp_caps);
+get_tstamp_caps_out:
+ kfree(rcv_tx_tstamp_caps);
+
+ return err;
+}
+
+/**
+ * idpf_ptp_update_tstamp_tracker - Update the Tx timestamp tracker based on
+ * the skb compatibility.
+ * @caps: Tx timestamp capabilities that monitor the latch status
+ * @skb: skb for which the tstamp value is returned through virtchnl message
+ * @current_state: Current state of the Tx timestamp latch
+ * @expected_state: Expected state of the Tx timestamp latch
+ *
+ * Find a proper skb tracker for which the Tx timestamp is received and change
+ * the state to expected value.
+ *
+ * Return: true if the tracker has been found and updated, false otherwise.
+ */
+static bool
+idpf_ptp_update_tstamp_tracker(struct idpf_ptp_vport_tx_tstamp_caps *caps,
+ struct sk_buff *skb,
+ enum idpf_ptp_tx_tstamp_state current_state,
+ enum idpf_ptp_tx_tstamp_state expected_state)
+{
+ bool updated = false;
+
+ spin_lock(&caps->status_lock);
+ for (u16 i = 0; i < caps->num_entries; i++) {
+ struct idpf_ptp_tx_tstamp_status *status;
+
+ status = &caps->tx_tstamp_status[i];
+
+ if (skb == status->skb && status->state == current_state) {
+ status->state = expected_state;
+ updated = true;
+ break;
+ }
+ }
+ spin_unlock(&caps->status_lock);
+
+ return updated;
+}
+
+/**
+ * idpf_ptp_get_tstamp_value - Get the Tx timestamp value and provide it
+ * back to the skb.
+ * @vport: Virtual port structure
+ * @tstamp_latch: Tx timestamp latch structure fulfilled by the Control Plane
+ * @ptp_tx_tstamp: Tx timestamp latch to add to the free list
+ *
+ * Read the value of the Tx timestamp for a given latch received from the
+ * Control Plane, extend it to 64 bit and provide back to the skb.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int
+idpf_ptp_get_tstamp_value(struct idpf_vport *vport,
+ struct virtchnl2_ptp_tx_tstamp_latch *tstamp_latch,
+ struct idpf_ptp_tx_tstamp *ptp_tx_tstamp)
+{
+ struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
+ struct skb_shared_hwtstamps shhwtstamps;
+ bool state_upd = false;
+ u8 tstamp_ns_lo_bit;
+ u64 tstamp;
+
+ tx_tstamp_caps = vport->tx_tstamp_caps;
+ tstamp_ns_lo_bit = tx_tstamp_caps->tstamp_ns_lo_bit;
+
+ ptp_tx_tstamp->tstamp = le64_to_cpu(tstamp_latch->tstamp);
+ ptp_tx_tstamp->tstamp >>= tstamp_ns_lo_bit;
+
+ state_upd = idpf_ptp_update_tstamp_tracker(tx_tstamp_caps,
+ ptp_tx_tstamp->skb,
+ IDPF_PTP_READ_VALUE,
+ IDPF_PTP_FREE);
+ if (!state_upd)
+ return -EINVAL;
+
+ tstamp = idpf_ptp_extend_ts(vport, ptp_tx_tstamp->tstamp);
+ shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
+ skb_tstamp_tx(ptp_tx_tstamp->skb, &shhwtstamps);
+ consume_skb(ptp_tx_tstamp->skb);
+ ptp_tx_tstamp->skb = NULL;
+
+ list_add(&ptp_tx_tstamp->list_member,
+ &tx_tstamp_caps->latches_free);
+
+ u64_stats_update_begin(&vport->tstamp_stats.stats_sync);
+ u64_stats_inc(&vport->tstamp_stats.packets);
+ u64_stats_update_end(&vport->tstamp_stats.stats_sync);
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_get_tx_tstamp_async_handler - Async callback for getting Tx tstamps
+ * @adapter: Driver specific private structure
+ * @xn: transaction for message
+ * @ctlq_msg: received message
+ *
+ * Read the tstamps Tx tstamp values from a received message and put them
+ * directly to the skb. The number of timestamps to read is specified by
+ * the virtchnl message.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int
+idpf_ptp_get_tx_tstamp_async_handler(struct idpf_adapter *adapter,
+ struct idpf_vc_xn *xn,
+ const struct idpf_ctlq_msg *ctlq_msg)
+{
+ struct virtchnl2_ptp_get_vport_tx_tstamp_latches *recv_tx_tstamp_msg;
+ struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
+ struct virtchnl2_ptp_tx_tstamp_latch tstamp_latch;
+ struct idpf_ptp_tx_tstamp *tx_tstamp, *tmp;
+ struct idpf_vport *tstamp_vport = NULL;
+ struct list_head *head;
+ u16 num_latches;
+ u32 vport_id;
+ int err = 0;
+
+ recv_tx_tstamp_msg = ctlq_msg->ctx.indirect.payload->va;
+ vport_id = le32_to_cpu(recv_tx_tstamp_msg->vport_id);
+
+ idpf_for_each_vport(adapter, vport) {
+ if (!vport)
+ continue;
+
+ if (vport->vport_id == vport_id) {
+ tstamp_vport = vport;
+ break;
+ }
+ }
+
+ if (!tstamp_vport || !tstamp_vport->tx_tstamp_caps)
+ return -EINVAL;
+
+ tx_tstamp_caps = tstamp_vport->tx_tstamp_caps;
+ num_latches = le16_to_cpu(recv_tx_tstamp_msg->num_latches);
+
+ spin_lock_bh(&tx_tstamp_caps->latches_lock);
+ head = &tx_tstamp_caps->latches_in_use;
+
+ for (u16 i = 0; i < num_latches; i++) {
+ tstamp_latch = recv_tx_tstamp_msg->tstamp_latches[i];
+
+ if (!tstamp_latch.valid)
+ continue;
+
+ if (list_empty(head)) {
+ err = -ENOBUFS;
+ goto unlock;
+ }
+
+ list_for_each_entry_safe(tx_tstamp, tmp, head, list_member) {
+ if (tstamp_latch.index == tx_tstamp->idx) {
+ list_del(&tx_tstamp->list_member);
+ err = idpf_ptp_get_tstamp_value(tstamp_vport,
+ &tstamp_latch,
+ tx_tstamp);
+ if (err)
+ goto unlock;
+
+ break;
+ }
+ }
+ }
+
+unlock:
+ spin_unlock_bh(&tx_tstamp_caps->latches_lock);
+
+ return err;
+}
+
+/**
+ * idpf_ptp_get_tx_tstamp - Send virtchnl get Tx timestamp latches message
+ * @vport: Virtual port structure
+ *
+ * Send virtchnl get Tx tstamp message to read the value of the HW timestamp.
+ * The message contains a list of indexes set in the Tx descriptors.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport)
+{
+ struct virtchnl2_ptp_get_vport_tx_tstamp_latches *send_tx_tstamp_msg;
+ struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP,
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ .async = true,
+ .async_handler = idpf_ptp_get_tx_tstamp_async_handler,
+ };
+ struct idpf_ptp_tx_tstamp *ptp_tx_tstamp;
+ int reply_sz, size, msg_size;
+ struct list_head *head;
+ bool state_upd;
+ u16 id = 0;
+
+ tx_tstamp_caps = vport->tx_tstamp_caps;
+ head = &tx_tstamp_caps->latches_in_use;
+
+ size = struct_size(send_tx_tstamp_msg, tstamp_latches,
+ tx_tstamp_caps->num_entries);
+ send_tx_tstamp_msg = kzalloc(size, GFP_KERNEL);
+ if (!send_tx_tstamp_msg)
+ return -ENOMEM;
+
+ spin_lock_bh(&tx_tstamp_caps->latches_lock);
+ list_for_each_entry(ptp_tx_tstamp, head, list_member) {
+ u8 idx;
+
+ state_upd = idpf_ptp_update_tstamp_tracker(tx_tstamp_caps,
+ ptp_tx_tstamp->skb,
+ IDPF_PTP_REQUEST,
+ IDPF_PTP_READ_VALUE);
+ if (!state_upd)
+ continue;
+
+ idx = ptp_tx_tstamp->idx;
+ send_tx_tstamp_msg->tstamp_latches[id].index = idx;
+ id++;
+ }
+ spin_unlock_bh(&tx_tstamp_caps->latches_lock);
+
+ msg_size = struct_size(send_tx_tstamp_msg, tstamp_latches, id);
+ send_tx_tstamp_msg->vport_id = cpu_to_le32(vport->vport_id);
+ send_tx_tstamp_msg->num_latches = cpu_to_le16(id);
+ xn_params.send_buf.iov_base = send_tx_tstamp_msg;
+ xn_params.send_buf.iov_len = msg_size;
+
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ kfree(send_tx_tstamp_msg);
+
+ return min(reply_sz, 0);
+}
diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2.h b/drivers/net/ethernet/intel/idpf/virtchnl2.h
index 63deb120359c..02ae447cc24a 100644
--- a/drivers/net/ethernet/intel/idpf/virtchnl2.h
+++ b/drivers/net/ethernet/intel/idpf/virtchnl2.h
@@ -62,12 +62,28 @@ enum virtchnl2_op {
VIRTCHNL2_OP_GET_PTYPE_INFO = 526,
/* Opcode 527 and 528 are reserved for VIRTCHNL2_OP_GET_PTYPE_ID and
* VIRTCHNL2_OP_GET_PTYPE_INFO_RAW.
- * Opcodes 529, 530, 531, 532 and 533 are reserved.
*/
+ VIRTCHNL2_OP_RDMA = 529,
+ /* Opcodes 530 through 533 are reserved. */
VIRTCHNL2_OP_LOOPBACK = 534,
VIRTCHNL2_OP_ADD_MAC_ADDR = 535,
VIRTCHNL2_OP_DEL_MAC_ADDR = 536,
VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE = 537,
+
+ /* TimeSync opcodes */
+ VIRTCHNL2_OP_PTP_GET_CAPS = 541,
+ VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP = 542,
+ VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME = 543,
+ VIRTCHNL2_OP_PTP_GET_CROSS_TIME = 544,
+ VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME = 545,
+ VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE = 546,
+ VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME = 547,
+ VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS = 548,
+ VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS = 549,
+ /* Opcode 550 is reserved */
+ VIRTCHNL2_OP_ADD_FLOW_RULE = 551,
+ VIRTCHNL2_OP_GET_FLOW_RULE = 552,
+ VIRTCHNL2_OP_DEL_FLOW_RULE = 553,
};
/**
@@ -141,22 +157,22 @@ enum virtchnl2_cap_seg {
VIRTCHNL2_CAP_SEG_TX_DOUBLE_TUNNEL = BIT(8),
};
-/* Receive Side Scaling Flow type capability flags */
-enum virtchnl2_cap_rss {
- VIRTCHNL2_CAP_RSS_IPV4_TCP = BIT(0),
- VIRTCHNL2_CAP_RSS_IPV4_UDP = BIT(1),
- VIRTCHNL2_CAP_RSS_IPV4_SCTP = BIT(2),
- VIRTCHNL2_CAP_RSS_IPV4_OTHER = BIT(3),
- VIRTCHNL2_CAP_RSS_IPV6_TCP = BIT(4),
- VIRTCHNL2_CAP_RSS_IPV6_UDP = BIT(5),
- VIRTCHNL2_CAP_RSS_IPV6_SCTP = BIT(6),
- VIRTCHNL2_CAP_RSS_IPV6_OTHER = BIT(7),
- VIRTCHNL2_CAP_RSS_IPV4_AH = BIT(8),
- VIRTCHNL2_CAP_RSS_IPV4_ESP = BIT(9),
- VIRTCHNL2_CAP_RSS_IPV4_AH_ESP = BIT(10),
- VIRTCHNL2_CAP_RSS_IPV6_AH = BIT(11),
- VIRTCHNL2_CAP_RSS_IPV6_ESP = BIT(12),
- VIRTCHNL2_CAP_RSS_IPV6_AH_ESP = BIT(13),
+/* Receive Side Scaling and Flow Steering Flow type capability flags */
+enum virtchnl2_flow_types {
+ VIRTCHNL2_FLOW_IPV4_TCP = BIT(0),
+ VIRTCHNL2_FLOW_IPV4_UDP = BIT(1),
+ VIRTCHNL2_FLOW_IPV4_SCTP = BIT(2),
+ VIRTCHNL2_FLOW_IPV4_OTHER = BIT(3),
+ VIRTCHNL2_FLOW_IPV6_TCP = BIT(4),
+ VIRTCHNL2_FLOW_IPV6_UDP = BIT(5),
+ VIRTCHNL2_FLOW_IPV6_SCTP = BIT(6),
+ VIRTCHNL2_FLOW_IPV6_OTHER = BIT(7),
+ VIRTCHNL2_FLOW_IPV4_AH = BIT(8),
+ VIRTCHNL2_FLOW_IPV4_ESP = BIT(9),
+ VIRTCHNL2_FLOW_IPV4_AH_ESP = BIT(10),
+ VIRTCHNL2_FLOW_IPV6_AH = BIT(11),
+ VIRTCHNL2_FLOW_IPV6_ESP = BIT(12),
+ VIRTCHNL2_FLOW_IPV6_AH_ESP = BIT(13),
};
/* Header split capability flags */
@@ -182,8 +198,9 @@ enum virtchnl2_cap_other {
VIRTCHNL2_CAP_RDMA = BIT_ULL(0),
VIRTCHNL2_CAP_SRIOV = BIT_ULL(1),
VIRTCHNL2_CAP_MACFILTER = BIT_ULL(2),
- VIRTCHNL2_CAP_FLOW_DIRECTOR = BIT_ULL(3),
- /* Queue based scheduling using split queue model */
+ /* Other capability 3 is available
+ * Queue based scheduling using split queue model
+ */
VIRTCHNL2_CAP_SPLITQ_QSCHED = BIT_ULL(4),
VIRTCHNL2_CAP_CRC = BIT_ULL(5),
VIRTCHNL2_CAP_ADQ = BIT_ULL(6),
@@ -197,16 +214,37 @@ enum virtchnl2_cap_other {
/* EDT: Earliest Departure Time capability used for Timing Wheel */
VIRTCHNL2_CAP_EDT = BIT_ULL(14),
VIRTCHNL2_CAP_ADV_RSS = BIT_ULL(15),
- VIRTCHNL2_CAP_FDIR = BIT_ULL(16),
+ /* Other capability 16 is available */
VIRTCHNL2_CAP_RX_FLEX_DESC = BIT_ULL(17),
VIRTCHNL2_CAP_PTYPE = BIT_ULL(18),
VIRTCHNL2_CAP_LOOPBACK = BIT_ULL(19),
/* Other capability 20 is reserved */
+ VIRTCHNL2_CAP_FLOW_STEER = BIT_ULL(21),
+ VIRTCHNL2_CAP_LAN_MEMORY_REGIONS = BIT_ULL(22),
/* this must be the last capability */
VIRTCHNL2_CAP_OEM = BIT_ULL(63),
};
+/**
+ * enum virtchnl2_action_types - Available actions for sideband flow steering
+ * @VIRTCHNL2_ACTION_DROP: Drop the packet
+ * @VIRTCHNL2_ACTION_PASSTHRU: Forward the packet to the next classifier/stage
+ * @VIRTCHNL2_ACTION_QUEUE: Forward the packet to a receive queue
+ * @VIRTCHNL2_ACTION_Q_GROUP: Forward the packet to a receive queue group
+ * @VIRTCHNL2_ACTION_MARK: Mark the packet with specific marker value
+ * @VIRTCHNL2_ACTION_COUNT: Increment the corresponding counter
+ */
+
+enum virtchnl2_action_types {
+ VIRTCHNL2_ACTION_DROP = BIT(0),
+ VIRTCHNL2_ACTION_PASSTHRU = BIT(1),
+ VIRTCHNL2_ACTION_QUEUE = BIT(2),
+ VIRTCHNL2_ACTION_Q_GROUP = BIT(3),
+ VIRTCHNL2_ACTION_MARK = BIT(4),
+ VIRTCHNL2_ACTION_COUNT = BIT(5),
+};
+
/* underlying device type */
enum virtchl2_device_type {
VIRTCHNL2_MEV_DEVICE = 0,
@@ -448,7 +486,7 @@ VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_version_info);
* @seg_caps: See enum virtchnl2_cap_seg.
* @hsplit_caps: See enum virtchnl2_cap_rx_hsplit_at.
* @rsc_caps: See enum virtchnl2_cap_rsc.
- * @rss_caps: See enum virtchnl2_cap_rss.
+ * @rss_caps: See enum virtchnl2_flow_types.
* @other_caps: See enum virtchnl2_cap_other.
* @mailbox_dyn_ctl: DYN_CTL register offset and vector id for mailbox
* provided by CP.
@@ -473,6 +511,8 @@ VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_version_info);
* segment offload.
* @max_hdr_buf_per_lso: Max number of header buffers that can be used for
* an LSO.
+ * @num_rdma_allocated_vectors: Maximum number of allocated RDMA vectors for
+ * the device.
* @pad1: Padding for future extensions.
*
* Dataplane driver sends this message to CP to negotiate capabilities and
@@ -520,7 +560,8 @@ struct virtchnl2_get_capabilities {
__le32 device_type;
u8 min_sso_packet_len;
u8 max_hdr_buf_per_lso;
- u8 pad1[10];
+ __le16 num_rdma_allocated_vectors;
+ u8 pad1[8];
};
VIRTCHNL2_CHECK_STRUCT_LEN(80, virtchnl2_get_capabilities);
@@ -560,6 +601,23 @@ struct virtchnl2_queue_reg_chunks {
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks);
/**
+ * enum virtchnl2_vport_flags - Vport flags that indicate vport capabilities.
+ * @VIRTCHNL2_VPORT_UPLINK_PORT: Representatives of underlying physical ports
+ * @VIRTCHNL2_VPORT_INLINE_FLOW_STEER: Inline flow steering enabled
+ * @VIRTCHNL2_VPORT_INLINE_FLOW_STEER_RXQ: Inline flow steering enabled
+ * with explicit Rx queue action
+ * @VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER: Sideband flow steering enabled
+ * @VIRTCHNL2_VPORT_ENABLE_RDMA: RDMA is enabled for this vport
+ */
+enum virtchnl2_vport_flags {
+ VIRTCHNL2_VPORT_UPLINK_PORT = BIT(0),
+ VIRTCHNL2_VPORT_INLINE_FLOW_STEER = BIT(1),
+ VIRTCHNL2_VPORT_INLINE_FLOW_STEER_RXQ = BIT(2),
+ VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER = BIT(3),
+ VIRTCHNL2_VPORT_ENABLE_RDMA = BIT(4),
+};
+
+/**
* struct virtchnl2_create_vport - Create vport config info.
* @vport_type: See enum virtchnl2_vport_type.
* @txq_model: See virtchnl2_queue_model.
@@ -577,10 +635,18 @@ VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks);
* @max_mtu: Max MTU. CP populates this field on response.
* @vport_id: Vport id. CP populates this field on response.
* @default_mac_addr: Default MAC address.
- * @pad: Padding.
+ * @vport_flags: See enum virtchnl2_vport_flags.
* @rx_desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions.
* @tx_desc_ids: See VIRTCHNL2_TX_DESC_IDS definitions.
* @pad1: Padding.
+ * @inline_flow_caps: Bit mask of supported inline-flow-steering
+ * flow types (See enum virtchnl2_flow_types)
+ * @sideband_flow_caps: Bit mask of supported sideband-flow-steering
+ * flow types (See enum virtchnl2_flow_types)
+ * @sideband_flow_actions: Bit mask of supported action types
+ * for sideband flow steering (See enum virtchnl2_action_types)
+ * @flow_steer_max_rules: Max rules allowed for inline and sideband
+ * flow steering combined
* @rss_algorithm: RSS algorithm.
* @rss_key_size: RSS key size.
* @rss_lut_size: RSS LUT size.
@@ -610,10 +676,14 @@ struct virtchnl2_create_vport {
__le16 max_mtu;
__le32 vport_id;
u8 default_mac_addr[ETH_ALEN];
- __le16 pad;
+ __le16 vport_flags;
__le64 rx_desc_ids;
__le64 tx_desc_ids;
- u8 pad1[72];
+ u8 pad1[48];
+ __le64 inline_flow_caps;
+ __le64 sideband_flow_caps;
+ __le32 sideband_flow_actions;
+ __le32 flow_steer_max_rules;
__le32 rss_algorithm;
__le16 rss_key_size;
__le16 rss_lut_size;
@@ -1270,4 +1340,474 @@ struct virtchnl2_promisc_info {
};
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_promisc_info);
+/**
+ * enum virtchnl2_ptp_caps - PTP capabilities
+ * @VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME: direct access to get the time of
+ * device clock
+ * @VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB: mailbox access to get the time of
+ * device clock
+ * @VIRTCHNL2_CAP_PTP_GET_CROSS_TIME: direct access to cross timestamp
+ * @VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB: mailbox access to cross timestamp
+ * @VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME: direct access to set the time of
+ * device clock
+ * @VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB: mailbox access to set the time of
+ * device clock
+ * @VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK: direct access to adjust the time of device
+ * clock
+ * @VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB: mailbox access to adjust the time of
+ * device clock
+ * @VIRTCHNL2_CAP_PTP_TX_TSTAMPS: direct access to the Tx timestamping
+ * @VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB: mailbox access to the Tx timestamping
+ *
+ * PF/VF negotiates a set of supported PTP capabilities with the Control Plane.
+ * There are two access methods - mailbox (_MB) and direct.
+ * PTP capabilities enables Main Timer operations: get/set/adjust Main Timer,
+ * cross timestamping and the Tx timestamping.
+ */
+enum virtchnl2_ptp_caps {
+ VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME = BIT(0),
+ VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB = BIT(1),
+ VIRTCHNL2_CAP_PTP_GET_CROSS_TIME = BIT(2),
+ VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB = BIT(3),
+ VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME = BIT(4),
+ VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB = BIT(5),
+ VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK = BIT(6),
+ VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB = BIT(7),
+ VIRTCHNL2_CAP_PTP_TX_TSTAMPS = BIT(8),
+ VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB = BIT(9),
+};
+
+/**
+ * struct virtchnl2_ptp_clk_reg_offsets - Offsets of device and PHY clocks
+ * registers.
+ * @dev_clk_ns_l: Device clock low register offset
+ * @dev_clk_ns_h: Device clock high register offset
+ * @phy_clk_ns_l: PHY clock low register offset
+ * @phy_clk_ns_h: PHY clock high register offset
+ * @cmd_sync_trigger: The command sync trigger register offset
+ * @pad: Padding for future extensions
+ */
+struct virtchnl2_ptp_clk_reg_offsets {
+ __le32 dev_clk_ns_l;
+ __le32 dev_clk_ns_h;
+ __le32 phy_clk_ns_l;
+ __le32 phy_clk_ns_h;
+ __le32 cmd_sync_trigger;
+ u8 pad[4];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_ptp_clk_reg_offsets);
+
+/**
+ * struct virtchnl2_ptp_cross_time_reg_offsets - Offsets of the device cross
+ * time registers.
+ * @sys_time_ns_l: System time low register offset
+ * @sys_time_ns_h: System time high register offset
+ * @cmd_sync_trigger: The command sync trigger register offset
+ * @pad: Padding for future extensions
+ */
+struct virtchnl2_ptp_cross_time_reg_offsets {
+ __le32 sys_time_ns_l;
+ __le32 sys_time_ns_h;
+ __le32 cmd_sync_trigger;
+ u8 pad[4];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_cross_time_reg_offsets);
+
+/**
+ * struct virtchnl2_ptp_clk_adj_reg_offsets - Offsets of device and PHY clocks
+ * adjustments registers.
+ * @dev_clk_cmd_type: Device clock command type register offset
+ * @dev_clk_incval_l: Device clock increment value low register offset
+ * @dev_clk_incval_h: Device clock increment value high registers offset
+ * @dev_clk_shadj_l: Device clock shadow adjust low register offset
+ * @dev_clk_shadj_h: Device clock shadow adjust high register offset
+ * @phy_clk_cmd_type: PHY timer command type register offset
+ * @phy_clk_incval_l: PHY timer increment value low register offset
+ * @phy_clk_incval_h: PHY timer increment value high register offset
+ * @phy_clk_shadj_l: PHY timer shadow adjust low register offset
+ * @phy_clk_shadj_h: PHY timer shadow adjust high register offset
+ */
+struct virtchnl2_ptp_clk_adj_reg_offsets {
+ __le32 dev_clk_cmd_type;
+ __le32 dev_clk_incval_l;
+ __le32 dev_clk_incval_h;
+ __le32 dev_clk_shadj_l;
+ __le32 dev_clk_shadj_h;
+ __le32 phy_clk_cmd_type;
+ __le32 phy_clk_incval_l;
+ __le32 phy_clk_incval_h;
+ __le32 phy_clk_shadj_l;
+ __le32 phy_clk_shadj_h;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_ptp_clk_adj_reg_offsets);
+
+/**
+ * struct virtchnl2_ptp_tx_tstamp_latch_caps - PTP Tx timestamp latch
+ * capabilities.
+ * @tx_latch_reg_offset_l: Tx timestamp latch low register offset
+ * @tx_latch_reg_offset_h: Tx timestamp latch high register offset
+ * @index: Latch index provided to the Tx descriptor
+ * @pad: Padding for future extensions
+ */
+struct virtchnl2_ptp_tx_tstamp_latch_caps {
+ __le32 tx_latch_reg_offset_l;
+ __le32 tx_latch_reg_offset_h;
+ u8 index;
+ u8 pad[7];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch_caps);
+
+/**
+ * struct virtchnl2_ptp_get_vport_tx_tstamp_caps - Structure that defines Tx
+ * tstamp entries.
+ * @vport_id: Vport number
+ * @num_latches: Total number of latches
+ * @tstamp_ns_lo_bit: First bit for nanosecond part of the timestamp
+ * @tstamp_ns_hi_bit: Last bit for nanosecond part of the timestamp
+ * @pad: Padding for future tstamp granularity extensions
+ * @tstamp_latches: Capabilities of Tx timestamp entries
+ *
+ * PF/VF sends this message to negotiate the Tx timestamp latches for each
+ * Vport.
+ *
+ * Associated with VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS.
+ */
+struct virtchnl2_ptp_get_vport_tx_tstamp_caps {
+ __le32 vport_id;
+ __le16 num_latches;
+ u8 tstamp_ns_lo_bit;
+ u8 tstamp_ns_hi_bit;
+ u8 pad[8];
+
+ struct virtchnl2_ptp_tx_tstamp_latch_caps tstamp_latches[]
+ __counted_by_le(num_latches);
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_vport_tx_tstamp_caps);
+
+/**
+ * struct virtchnl2_ptp_get_caps - Get PTP capabilities
+ * @caps: PTP capability bitmap. See enum virtchnl2_ptp_caps
+ * @max_adj: The maximum possible frequency adjustment
+ * @base_incval: The default timer increment value
+ * @peer_mbx_q_id: ID of the PTP Device Control daemon queue
+ * @peer_id: Peer ID for PTP Device Control daemon
+ * @secondary_mbx: Indicates to the driver that it should create a secondary
+ * mailbox to inetract with control plane for PTP
+ * @pad: Padding for future extensions
+ * @clk_offsets: Main timer and PHY registers offsets
+ * @cross_time_offsets: Cross time registers offsets
+ * @clk_adj_offsets: Offsets needed to adjust the PHY and the main timer
+ *
+ * PF/VF sends this message to negotiate PTP capabilities. CP updates bitmap
+ * with supported features and fulfills appropriate structures.
+ * If HW uses primary MBX for PTP: secondary_mbx is set to false.
+ * If HW uses secondary MBX for PTP: secondary_mbx is set to true.
+ * Control plane has 2 MBX and the driver has 1 MBX, send to peer
+ * driver may be used to send a message using valid ptp_peer_mb_q_id and
+ * ptp_peer_id.
+ * If HW does not use send to peer driver: secondary_mbx is no care field and
+ * peer_mbx_q_id holds invalid value (0xFFFF).
+ *
+ * Associated with VIRTCHNL2_OP_PTP_GET_CAPS.
+ */
+struct virtchnl2_ptp_get_caps {
+ __le32 caps;
+ __le32 max_adj;
+ __le64 base_incval;
+ __le16 peer_mbx_q_id;
+ u8 peer_id;
+ u8 secondary_mbx;
+ u8 pad[4];
+
+ struct virtchnl2_ptp_clk_reg_offsets clk_offsets;
+ struct virtchnl2_ptp_cross_time_reg_offsets cross_time_offsets;
+ struct virtchnl2_ptp_clk_adj_reg_offsets clk_adj_offsets;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(104, virtchnl2_ptp_get_caps);
+
+/**
+ * struct virtchnl2_ptp_tx_tstamp_latch - Structure that describes tx tstamp
+ * values, index and validity.
+ * @tstamp: Timestamp value
+ * @index: Timestamp index from which the value is read
+ * @valid: Timestamp validity
+ * @pad: Padding for future extensions
+ */
+struct virtchnl2_ptp_tx_tstamp_latch {
+ __le64 tstamp;
+ u8 index;
+ u8 valid;
+ u8 pad[6];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch);
+
+/**
+ * struct virtchnl2_ptp_get_vport_tx_tstamp_latches - Tx timestamp latches
+ * associated with the vport.
+ * @vport_id: Number of vport that requests the timestamp
+ * @num_latches: Number of latches
+ * @get_devtime_with_txtstmp: Flag to request device time along with Tx timestamp
+ * @pad: Padding for future extensions
+ * @device_time: device time if get_devtime_with_txtstmp was set in request
+ * @tstamp_latches: PTP TX timestamp latch
+ *
+ * PF/VF sends this message to receive a specified number of timestamps
+ * entries.
+ *
+ * Associated with VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP.
+ */
+struct virtchnl2_ptp_get_vport_tx_tstamp_latches {
+ __le32 vport_id;
+ __le16 num_latches;
+ u8 get_devtime_with_txtstmp;
+ u8 pad[1];
+ __le64 device_time;
+
+ struct virtchnl2_ptp_tx_tstamp_latch tstamp_latches[]
+ __counted_by_le(num_latches);
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_vport_tx_tstamp_latches);
+
+/**
+ * struct virtchnl2_ptp_get_dev_clk_time - Associated with message
+ * VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME.
+ * @dev_time_ns: Device clock time value in nanoseconds
+ *
+ * PF/VF sends this message to receive the time from the main timer.
+ */
+struct virtchnl2_ptp_get_dev_clk_time {
+ __le64 dev_time_ns;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_get_dev_clk_time);
+
+/**
+ * struct virtchnl2_ptp_get_cross_time: Associated with message
+ * VIRTCHNL2_OP_PTP_GET_CROSS_TIME.
+ * @sys_time_ns: System counter value expressed in nanoseconds, read
+ * synchronously with device time
+ * @dev_time_ns: Device clock time value expressed in nanoseconds
+ *
+ * PF/VF sends this message to receive the cross time.
+ */
+struct virtchnl2_ptp_get_cross_time {
+ __le64 sys_time_ns;
+ __le64 dev_time_ns;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_cross_time);
+
+/**
+ * struct virtchnl2_ptp_set_dev_clk_time: Associated with message
+ * VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME.
+ * @dev_time_ns: Device time value expressed in nanoseconds to set
+ *
+ * PF/VF sends this message to set the time of the main timer.
+ */
+struct virtchnl2_ptp_set_dev_clk_time {
+ __le64 dev_time_ns;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_set_dev_clk_time);
+
+/**
+ * struct virtchnl2_ptp_adj_dev_clk_fine: Associated with message
+ * VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE.
+ * @incval: Source timer increment value per clock cycle
+ *
+ * PF/VF sends this message to adjust the frequency of the main timer by the
+ * indicated increment value.
+ */
+struct virtchnl2_ptp_adj_dev_clk_fine {
+ __le64 incval;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_adj_dev_clk_fine);
+
+/**
+ * struct virtchnl2_ptp_adj_dev_clk_time: Associated with message
+ * VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME.
+ * @delta: Offset in nanoseconds to adjust the time by
+ *
+ * PF/VF sends this message to adjust the time of the main timer by the delta.
+ */
+struct virtchnl2_ptp_adj_dev_clk_time {
+ __le64 delta;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_adj_dev_clk_time);
+
+/**
+ * struct virtchnl2_mem_region - MMIO memory region
+ * @start_offset: starting offset of the MMIO memory region
+ * @size: size of the MMIO memory region
+ */
+struct virtchnl2_mem_region {
+ __le64 start_offset;
+ __le64 size;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_mem_region);
+
+/**
+ * struct virtchnl2_get_lan_memory_regions - List of LAN MMIO memory regions
+ * @num_memory_regions: number of memory regions
+ * @pad: Padding
+ * @mem_reg: List with memory region info
+ *
+ * PF/VF sends this message to learn what LAN MMIO memory regions it should map.
+ */
+struct virtchnl2_get_lan_memory_regions {
+ __le16 num_memory_regions;
+ u8 pad[6];
+ struct virtchnl2_mem_region mem_reg[];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_get_lan_memory_regions);
+
+#define VIRTCHNL2_MAX_NUM_PROTO_HDRS 4
+#define VIRTCHNL2_MAX_SIZE_RAW_PACKET 256
+#define VIRTCHNL2_MAX_NUM_ACTIONS 8
+
+/**
+ * struct virtchnl2_proto_hdr - represent one protocol header
+ * @hdr_type: See enum virtchnl2_proto_hdr_type
+ * @pad: padding
+ * @buffer_spec: binary buffer based on header type.
+ * @buffer_mask: mask applied on buffer_spec.
+ *
+ * Structure to hold protocol headers based on hdr_type
+ */
+struct virtchnl2_proto_hdr {
+ __le32 hdr_type;
+ u8 pad[4];
+ u8 buffer_spec[64];
+ u8 buffer_mask[64];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(136, virtchnl2_proto_hdr);
+
+/**
+ * struct virtchnl2_proto_hdrs - struct to represent match criteria
+ * @tunnel_level: specify where protocol header(s) start from.
+ * must be 0 when sending a raw packet request.
+ * 0 - from the outer layer
+ * 1 - from the first inner layer
+ * 2 - from the second inner layer
+ * @pad: Padding bytes
+ * @count: total number of protocol headers in proto_hdr. 0 for raw packet.
+ * @proto_hdr: Array of protocol headers
+ * @raw: struct holding raw packet buffer when count is 0
+ */
+struct virtchnl2_proto_hdrs {
+ u8 tunnel_level;
+ u8 pad[3];
+ __le32 count;
+ union {
+ struct virtchnl2_proto_hdr
+ proto_hdr[VIRTCHNL2_MAX_NUM_PROTO_HDRS];
+ struct {
+ __le16 pkt_len;
+ u8 spec[VIRTCHNL2_MAX_SIZE_RAW_PACKET];
+ u8 mask[VIRTCHNL2_MAX_SIZE_RAW_PACKET];
+ } raw;
+ };
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(552, virtchnl2_proto_hdrs);
+
+/**
+ * struct virtchnl2_rule_action - struct representing single action for a flow
+ * @action_type: see enum virtchnl2_action_types
+ * @act_conf: union representing action depending on action_type.
+ * @act_conf.q_id: queue id to redirect the packets to.
+ * @act_conf.q_grp_id: queue group id to redirect the packets to.
+ * @act_conf.ctr_id: used for count action. If input value 0xFFFFFFFF control
+ * plane assigns a new counter and returns the counter ID to
+ * the driver. If input value is not 0xFFFFFFFF then it must
+ * be an existing counter given to the driver for an earlier
+ * flow. Then this flow will share the counter.
+ * @act_conf.mark_id: Value used to mark the packets. Used for mark action.
+ * @act_conf.reserved: Reserved for future use.
+ */
+struct virtchnl2_rule_action {
+ __le32 action_type;
+ union {
+ __le32 q_id;
+ __le32 q_grp_id;
+ __le32 ctr_id;
+ __le32 mark_id;
+ u8 reserved[8];
+ } act_conf;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(12, virtchnl2_rule_action);
+
+/**
+ * struct virtchnl2_rule_action_set - struct representing multiple actions
+ * @count: number of valid actions in the action set of a rule
+ * @actions: array of struct virtchnl2_rule_action
+ */
+struct virtchnl2_rule_action_set {
+ /* action count must be less than VIRTCHNL2_MAX_NUM_ACTIONS */
+ __le32 count;
+ struct virtchnl2_rule_action actions[VIRTCHNL2_MAX_NUM_ACTIONS];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(100, virtchnl2_rule_action_set);
+
+/**
+ * struct virtchnl2_flow_rule - represent one flow steering rule
+ * @proto_hdrs: array of protocol header buffers representing match criteria
+ * @action_set: series of actions to be applied for given rule
+ * @priority: rule priority.
+ * @pad: padding for future extensions.
+ */
+struct virtchnl2_flow_rule {
+ struct virtchnl2_proto_hdrs proto_hdrs;
+ struct virtchnl2_rule_action_set action_set;
+ __le32 priority;
+ u8 pad[8];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(664, virtchnl2_flow_rule);
+
+enum virtchnl2_flow_rule_status {
+ VIRTCHNL2_FLOW_RULE_SUCCESS = 1,
+ VIRTCHNL2_FLOW_RULE_NORESOURCE = 2,
+ VIRTCHNL2_FLOW_RULE_EXIST = 3,
+ VIRTCHNL2_FLOW_RULE_TIMEOUT = 4,
+ VIRTCHNL2_FLOW_RULE_FLOW_TYPE_NOT_SUPPORTED = 5,
+ VIRTCHNL2_FLOW_RULE_MATCH_KEY_NOT_SUPPORTED = 6,
+ VIRTCHNL2_FLOW_RULE_ACTION_NOT_SUPPORTED = 7,
+ VIRTCHNL2_FLOW_RULE_ACTION_COMBINATION_INVALID = 8,
+ VIRTCHNL2_FLOW_RULE_ACTION_DATA_INVALID = 9,
+ VIRTCHNL2_FLOW_RULE_NOT_ADDED = 10,
+};
+
+/**
+ * struct virtchnl2_flow_rule_info: structure representing single flow rule
+ * @rule_id: rule_id associated with the flow_rule.
+ * @rule_cfg: structure representing rule.
+ * @status: status of rule programming. See enum virtchnl2_flow_rule_status.
+ */
+struct virtchnl2_flow_rule_info {
+ __le32 rule_id;
+ struct virtchnl2_flow_rule rule_cfg;
+ __le32 status;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(672, virtchnl2_flow_rule_info);
+
+/**
+ * struct virtchnl2_flow_rule_add_del - add/delete a flow steering rule
+ * @vport_id: vport id for which the rule is to be added or deleted.
+ * @count: Indicates number of rules to be added or deleted.
+ * @rule_info: Array of flow rules to be added or deleted.
+ *
+ * For VIRTCHNL2_OP_FLOW_RULE_ADD, rule_info contains list of rules to be
+ * added. If rule_id is 0xFFFFFFFF, then the rule is programmed and not cached.
+ *
+ * For VIRTCHNL2_OP_FLOW_RULE_DEL, there are two possibilities. The structure
+ * can contain either array of rule_ids or array of match keys to be deleted.
+ * When match keys are used the corresponding rule_ids must be 0xFFFFFFFF.
+ *
+ * status member of each rule indicates the result. Maximum of 6 rules can be
+ * added or deleted using this method. Driver has to retry in case of any
+ * failure of ADD or DEL opcode. CP doesn't retry in case of failure.
+ */
+struct virtchnl2_flow_rule_add_del {
+ __le32 vport_id;
+ __le32 count;
+ struct virtchnl2_flow_rule_info rule_info[] __counted_by_le(count);
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_flow_rule_add_del);
+
#endif /* _VIRTCHNL_2_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/xdp.c b/drivers/net/ethernet/intel/idpf/xdp.c
new file mode 100644
index 000000000000..958d16f87424
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/xdp.c
@@ -0,0 +1,486 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2025 Intel Corporation */
+
+#include "idpf.h"
+#include "idpf_virtchnl.h"
+#include "xdp.h"
+#include "xsk.h"
+
+static int idpf_rxq_for_each(const struct idpf_vport *vport,
+ int (*fn)(struct idpf_rx_queue *rxq, void *arg),
+ void *arg)
+{
+ bool splitq = idpf_is_queue_model_split(vport->rxq_model);
+
+ if (!vport->rxq_grps)
+ return -ENETDOWN;
+
+ for (u32 i = 0; i < vport->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u32 num_rxq;
+
+ if (splitq)
+ num_rxq = rx_qgrp->splitq.num_rxq_sets;
+ else
+ num_rxq = rx_qgrp->singleq.num_rxq;
+
+ for (u32 j = 0; j < num_rxq; j++) {
+ struct idpf_rx_queue *q;
+ int err;
+
+ if (splitq)
+ q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ else
+ q = rx_qgrp->singleq.rxqs[j];
+
+ err = fn(q, arg);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg)
+{
+ const struct idpf_vport *vport = rxq->q_vector->vport;
+ bool split = idpf_is_queue_model_split(vport->rxq_model);
+ int err;
+
+ err = __xdp_rxq_info_reg(&rxq->xdp_rxq, vport->netdev, rxq->idx,
+ rxq->q_vector->napi.napi_id,
+ rxq->rx_buf_size);
+ if (err)
+ return err;
+
+ if (idpf_queue_has(XSK, rxq)) {
+ err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ rxq->pool);
+ if (err)
+ goto unreg;
+ } else {
+ const struct page_pool *pp;
+
+ pp = split ? rxq->bufq_sets[0].bufq.pp : rxq->pp;
+ xdp_rxq_info_attach_page_pool(&rxq->xdp_rxq, pp);
+ }
+
+ if (!split)
+ return 0;
+
+ rxq->xdpsqs = &vport->txqs[vport->xdp_txq_offset];
+ rxq->num_xdp_txq = vport->num_xdp_txq;
+
+ return 0;
+
+unreg:
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+
+ return err;
+}
+
+int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq)
+{
+ return __idpf_xdp_rxq_info_init(rxq, NULL);
+}
+
+int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport)
+{
+ return idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_init, NULL);
+}
+
+static int __idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, void *arg)
+{
+ if (idpf_is_queue_model_split((size_t)arg)) {
+ rxq->xdpsqs = NULL;
+ rxq->num_xdp_txq = 0;
+ }
+
+ if (!idpf_queue_has(XSK, rxq))
+ xdp_rxq_info_detach_mem_model(&rxq->xdp_rxq);
+
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+
+ return 0;
+}
+
+void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model)
+{
+ __idpf_xdp_rxq_info_deinit(rxq, (void *)(size_t)model);
+}
+
+void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport)
+{
+ idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_deinit,
+ (void *)(size_t)vport->rxq_model);
+}
+
+static int idpf_xdp_rxq_assign_prog(struct idpf_rx_queue *rxq, void *arg)
+{
+ struct bpf_prog *prog = arg;
+ struct bpf_prog *old;
+
+ if (prog)
+ bpf_prog_inc(prog);
+
+ old = rcu_replace_pointer(rxq->xdp_prog, prog, lockdep_rtnl_is_held());
+ if (old)
+ bpf_prog_put(old);
+
+ return 0;
+}
+
+void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport,
+ struct bpf_prog *xdp_prog)
+{
+ idpf_rxq_for_each(vport, idpf_xdp_rxq_assign_prog, xdp_prog);
+}
+
+static void idpf_xdp_tx_timer(struct work_struct *work);
+
+int idpf_xdpsqs_get(const struct idpf_vport *vport)
+{
+ struct libeth_xdpsq_timer **timers __free(kvfree) = NULL;
+ struct net_device *dev;
+ u32 sqs;
+
+ if (!idpf_xdp_enabled(vport))
+ return 0;
+
+ timers = kvcalloc(vport->num_xdp_txq, sizeof(*timers), GFP_KERNEL);
+ if (!timers)
+ return -ENOMEM;
+
+ for (u32 i = 0; i < vport->num_xdp_txq; i++) {
+ timers[i] = kzalloc_node(sizeof(*timers[i]), GFP_KERNEL,
+ cpu_to_mem(i));
+ if (!timers[i]) {
+ for (int j = i - 1; j >= 0; j--)
+ kfree(timers[j]);
+
+ return -ENOMEM;
+ }
+ }
+
+ dev = vport->netdev;
+ sqs = vport->xdp_txq_offset;
+
+ for (u32 i = sqs; i < vport->num_txq; i++) {
+ struct idpf_tx_queue *xdpsq = vport->txqs[i];
+
+ xdpsq->complq = xdpsq->txq_grp->complq;
+ kfree(xdpsq->refillq);
+ xdpsq->refillq = NULL;
+
+ idpf_queue_clear(FLOW_SCH_EN, xdpsq);
+ idpf_queue_clear(FLOW_SCH_EN, xdpsq->complq);
+ idpf_queue_set(NOIRQ, xdpsq);
+ idpf_queue_set(XDP, xdpsq);
+ idpf_queue_set(XDP, xdpsq->complq);
+
+ xdpsq->timer = timers[i - sqs];
+ libeth_xdpsq_get(&xdpsq->xdp_lock, dev, vport->xdpsq_share);
+ libeth_xdpsq_init_timer(xdpsq->timer, xdpsq, &xdpsq->xdp_lock,
+ idpf_xdp_tx_timer);
+
+ xdpsq->pending = 0;
+ xdpsq->xdp_tx = 0;
+ xdpsq->thresh = libeth_xdp_queue_threshold(xdpsq->desc_count);
+ }
+
+ return 0;
+}
+
+void idpf_xdpsqs_put(const struct idpf_vport *vport)
+{
+ struct net_device *dev;
+ u32 sqs;
+
+ if (!idpf_xdp_enabled(vport))
+ return;
+
+ dev = vport->netdev;
+ sqs = vport->xdp_txq_offset;
+
+ for (u32 i = sqs; i < vport->num_txq; i++) {
+ struct idpf_tx_queue *xdpsq = vport->txqs[i];
+
+ if (!idpf_queue_has_clear(XDP, xdpsq))
+ continue;
+
+ libeth_xdpsq_deinit_timer(xdpsq->timer);
+ libeth_xdpsq_put(&xdpsq->xdp_lock, dev);
+
+ kfree(xdpsq->timer);
+ xdpsq->refillq = NULL;
+ idpf_queue_clear(NOIRQ, xdpsq);
+ }
+}
+
+static int idpf_xdp_parse_cqe(const struct idpf_splitq_4b_tx_compl_desc *desc,
+ bool gen)
+{
+ u32 val;
+
+#ifdef __LIBETH_WORD_ACCESS
+ val = *(const u32 *)desc;
+#else
+ val = ((u32)le16_to_cpu(desc->q_head_compl_tag.q_head) << 16) |
+ le16_to_cpu(desc->qid_comptype_gen);
+#endif
+ if (!!(val & IDPF_TXD_COMPLQ_GEN_M) != gen)
+ return -ENODATA;
+
+ if (unlikely((val & GENMASK(IDPF_TXD_COMPLQ_GEN_S - 1, 0)) !=
+ FIELD_PREP(IDPF_TXD_COMPLQ_COMPL_TYPE_M,
+ IDPF_TXD_COMPLT_RS)))
+ return -EINVAL;
+
+ return upper_16_bits(val);
+}
+
+u32 idpf_xdpsq_poll(struct idpf_tx_queue *xdpsq, u32 budget)
+{
+ struct idpf_compl_queue *cq = xdpsq->complq;
+ u32 tx_ntc = xdpsq->next_to_clean;
+ u32 tx_cnt = xdpsq->desc_count;
+ u32 ntc = cq->next_to_clean;
+ u32 cnt = cq->desc_count;
+ u32 done_frames;
+ bool gen;
+
+ gen = idpf_queue_has(GEN_CHK, cq);
+
+ for (done_frames = 0; done_frames < budget; ) {
+ int ret;
+
+ ret = idpf_xdp_parse_cqe(&cq->comp_4b[ntc], gen);
+ if (ret >= 0) {
+ done_frames = ret > tx_ntc ? ret - tx_ntc :
+ ret + tx_cnt - tx_ntc;
+ goto next;
+ }
+
+ switch (ret) {
+ case -ENODATA:
+ goto out;
+ case -EINVAL:
+ break;
+ }
+
+next:
+ if (unlikely(++ntc == cnt)) {
+ ntc = 0;
+ gen = !gen;
+ idpf_queue_change(GEN_CHK, cq);
+ }
+ }
+
+out:
+ cq->next_to_clean = ntc;
+
+ return done_frames;
+}
+
+static u32 idpf_xdpsq_complete(void *_xdpsq, u32 budget)
+{
+ struct libeth_xdpsq_napi_stats ss = { };
+ struct idpf_tx_queue *xdpsq = _xdpsq;
+ u32 tx_ntc = xdpsq->next_to_clean;
+ u32 tx_cnt = xdpsq->desc_count;
+ struct xdp_frame_bulk bq;
+ struct libeth_cq_pp cp = {
+ .dev = xdpsq->dev,
+ .bq = &bq,
+ .xss = &ss,
+ .napi = true,
+ };
+ u32 done_frames;
+
+ done_frames = idpf_xdpsq_poll(xdpsq, budget);
+ if (unlikely(!done_frames))
+ return 0;
+
+ xdp_frame_bulk_init(&bq);
+
+ for (u32 i = 0; likely(i < done_frames); i++) {
+ libeth_xdp_complete_tx(&xdpsq->tx_buf[tx_ntc], &cp);
+
+ if (unlikely(++tx_ntc == tx_cnt))
+ tx_ntc = 0;
+ }
+
+ xdp_flush_frame_bulk(&bq);
+
+ xdpsq->next_to_clean = tx_ntc;
+ xdpsq->pending -= done_frames;
+ xdpsq->xdp_tx -= cp.xdp_tx;
+
+ return done_frames;
+}
+
+static u32 idpf_xdp_tx_prep(void *_xdpsq, struct libeth_xdpsq *sq)
+{
+ struct idpf_tx_queue *xdpsq = _xdpsq;
+ u32 free;
+
+ libeth_xdpsq_lock(&xdpsq->xdp_lock);
+
+ free = xdpsq->desc_count - xdpsq->pending;
+ if (free < xdpsq->thresh)
+ free += idpf_xdpsq_complete(xdpsq, xdpsq->thresh);
+
+ *sq = (struct libeth_xdpsq){
+ .sqes = xdpsq->tx_buf,
+ .descs = xdpsq->desc_ring,
+ .count = xdpsq->desc_count,
+ .lock = &xdpsq->xdp_lock,
+ .ntu = &xdpsq->next_to_use,
+ .pending = &xdpsq->pending,
+ .xdp_tx = &xdpsq->xdp_tx,
+ };
+
+ return free;
+}
+
+LIBETH_XDP_DEFINE_START();
+LIBETH_XDP_DEFINE_TIMER(static idpf_xdp_tx_timer, idpf_xdpsq_complete);
+LIBETH_XDP_DEFINE_FLUSH_TX(idpf_xdp_tx_flush_bulk, idpf_xdp_tx_prep,
+ idpf_xdp_tx_xmit);
+LIBETH_XDP_DEFINE_FLUSH_XMIT(static idpf_xdp_xmit_flush_bulk, idpf_xdp_tx_prep,
+ idpf_xdp_tx_xmit);
+LIBETH_XDP_DEFINE_END();
+
+int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ const struct idpf_netdev_priv *np = netdev_priv(dev);
+ const struct idpf_vport *vport = np->vport;
+
+ if (unlikely(!netif_carrier_ok(dev) || !vport->link_up))
+ return -ENETDOWN;
+
+ return libeth_xdp_xmit_do_bulk(dev, n, frames, flags,
+ &vport->txqs[vport->xdp_txq_offset],
+ vport->num_xdp_txq,
+ idpf_xdp_xmit_flush_bulk,
+ idpf_xdp_tx_finalize);
+}
+
+static int idpf_xdpmo_rx_hash(const struct xdp_md *ctx, u32 *hash,
+ enum xdp_rss_hash_type *rss_type)
+{
+ const struct libeth_xdp_buff *xdp = (typeof(xdp))ctx;
+ struct idpf_xdp_rx_desc desc __uninitialized;
+ const struct idpf_rx_queue *rxq;
+ struct libeth_rx_pt pt;
+
+ rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
+
+ idpf_xdp_get_qw0(&desc, xdp->desc);
+
+ pt = rxq->rx_ptype_lkup[idpf_xdp_rx_pt(&desc)];
+ if (!libeth_rx_pt_has_hash(rxq->xdp_rxq.dev, pt))
+ return -ENODATA;
+
+ idpf_xdp_get_qw2(&desc, xdp->desc);
+
+ return libeth_xdpmo_rx_hash(hash, rss_type, idpf_xdp_rx_hash(&desc),
+ pt);
+}
+
+static const struct xdp_metadata_ops idpf_xdpmo = {
+ .xmo_rx_hash = idpf_xdpmo_rx_hash,
+};
+
+void idpf_xdp_set_features(const struct idpf_vport *vport)
+{
+ if (!idpf_is_queue_model_split(vport->rxq_model))
+ return;
+
+ libeth_xdp_set_features_noredir(vport->netdev, &idpf_xdpmo,
+ idpf_get_max_tx_bufs(vport->adapter),
+ libeth_xsktmo);
+}
+
+static int idpf_xdp_setup_prog(struct idpf_vport *vport,
+ const struct netdev_bpf *xdp)
+{
+ const struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ struct bpf_prog *old, *prog = xdp->prog;
+ struct idpf_vport_config *cfg;
+ int ret;
+
+ cfg = vport->adapter->vport_config[vport->idx];
+
+ if (test_bit(IDPF_REMOVE_IN_PROG, vport->adapter->flags) ||
+ !test_bit(IDPF_VPORT_REG_NETDEV, cfg->flags) ||
+ !!vport->xdp_prog == !!prog) {
+ if (test_bit(IDPF_VPORT_UP, np->state))
+ idpf_xdp_copy_prog_to_rqs(vport, prog);
+
+ old = xchg(&vport->xdp_prog, prog);
+ if (old)
+ bpf_prog_put(old);
+
+ cfg->user_config.xdp_prog = prog;
+
+ return 0;
+ }
+
+ if (!vport->num_xdp_txq && vport->num_txq == cfg->max_q.max_txq) {
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "No Tx queues available for XDP, please decrease the number of regular SQs");
+ return -ENOSPC;
+ }
+
+ old = cfg->user_config.xdp_prog;
+ cfg->user_config.xdp_prog = prog;
+
+ ret = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE);
+ if (ret) {
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "Could not reopen the vport after XDP setup");
+
+ cfg->user_config.xdp_prog = old;
+ old = prog;
+ }
+
+ if (old)
+ bpf_prog_put(old);
+
+ libeth_xdp_set_redirect(vport->netdev, vport->xdp_prog);
+
+ return ret;
+}
+
+int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct idpf_vport *vport;
+ int ret;
+
+ idpf_vport_ctrl_lock(dev);
+ vport = idpf_netdev_to_vport(dev);
+
+ if (!idpf_is_queue_model_split(vport->txq_model))
+ goto notsupp;
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ ret = idpf_xdp_setup_prog(vport, xdp);
+ break;
+ case XDP_SETUP_XSK_POOL:
+ ret = idpf_xsk_pool_setup(vport, xdp);
+ break;
+ default:
+notsupp:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ idpf_vport_ctrl_unlock(dev);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/idpf/xdp.h b/drivers/net/ethernet/intel/idpf/xdp.h
new file mode 100644
index 000000000000..479f5ef3c604
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/xdp.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Intel Corporation */
+
+#ifndef _IDPF_XDP_H_
+#define _IDPF_XDP_H_
+
+#include <net/libeth/xdp.h>
+
+#include "idpf_txrx.h"
+
+int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq);
+int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport);
+void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model);
+void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport);
+void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport,
+ struct bpf_prog *xdp_prog);
+
+int idpf_xdpsqs_get(const struct idpf_vport *vport);
+void idpf_xdpsqs_put(const struct idpf_vport *vport);
+
+u32 idpf_xdpsq_poll(struct idpf_tx_queue *xdpsq, u32 budget);
+bool idpf_xdp_tx_flush_bulk(struct libeth_xdp_tx_bulk *bq, u32 flags);
+
+/**
+ * idpf_xdp_tx_xmit - produce a single HW Tx descriptor out of XDP desc
+ * @desc: XDP descriptor to pull the DMA address and length from
+ * @i: descriptor index on the queue to fill
+ * @sq: XDP queue to produce the HW Tx descriptor on
+ * @priv: &xsk_tx_metadata_ops on XSk xmit or %NULL
+ */
+static inline void idpf_xdp_tx_xmit(struct libeth_xdp_tx_desc desc, u32 i,
+ const struct libeth_xdpsq *sq, u64 priv)
+{
+ struct idpf_flex_tx_desc *tx_desc = sq->descs;
+ u32 cmd;
+
+ cmd = FIELD_PREP(IDPF_FLEX_TXD_QW1_DTYPE_M,
+ IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2);
+ if (desc.flags & LIBETH_XDP_TX_LAST)
+ cmd |= FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M,
+ IDPF_TX_DESC_CMD_EOP);
+ if (priv && (desc.flags & LIBETH_XDP_TX_CSUM))
+ cmd |= FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M,
+ IDPF_TX_FLEX_DESC_CMD_CS_EN);
+
+ tx_desc = &tx_desc[i];
+ tx_desc->buf_addr = cpu_to_le64(desc.addr);
+#ifdef __LIBETH_WORD_ACCESS
+ *(u64 *)&tx_desc->qw1 = ((u64)desc.len << 48) | cmd;
+#else
+ tx_desc->qw1.buf_size = cpu_to_le16(desc.len);
+ tx_desc->qw1.cmd_dtype = cpu_to_le16(cmd);
+#endif
+}
+
+static inline void idpf_xdpsq_set_rs(const struct idpf_tx_queue *xdpsq)
+{
+ u32 ntu, cmd;
+
+ ntu = xdpsq->next_to_use;
+ if (unlikely(!ntu))
+ ntu = xdpsq->desc_count;
+
+ cmd = FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M, IDPF_TX_DESC_CMD_RS);
+#ifdef __LIBETH_WORD_ACCESS
+ *(u64 *)&xdpsq->flex_tx[ntu - 1].q.qw1 |= cmd;
+#else
+ xdpsq->flex_tx[ntu - 1].q.qw1.cmd_dtype |= cpu_to_le16(cmd);
+#endif
+}
+
+static inline void idpf_xdpsq_update_tail(const struct idpf_tx_queue *xdpsq)
+{
+ dma_wmb();
+ writel_relaxed(xdpsq->next_to_use, xdpsq->tail);
+}
+
+/**
+ * idpf_xdp_tx_finalize - finalize sending over XDPSQ
+ * @_xdpsq: XDP Tx queue
+ * @sent: whether any frames were sent
+ * @flush: whether to update RS bit and the tail register
+ *
+ * Set the RS bit ("end of batch"), bump the tail, and queue the cleanup timer.
+ * To be called after a NAPI polling loop, at the end of .ndo_xdp_xmit() etc.
+ */
+static inline void idpf_xdp_tx_finalize(void *_xdpsq, bool sent, bool flush)
+{
+ struct idpf_tx_queue *xdpsq = _xdpsq;
+
+ if ((!flush || unlikely(!sent)) &&
+ likely(xdpsq->desc_count - 1 != xdpsq->pending))
+ return;
+
+ libeth_xdpsq_lock(&xdpsq->xdp_lock);
+
+ idpf_xdpsq_set_rs(xdpsq);
+ idpf_xdpsq_update_tail(xdpsq);
+
+ libeth_xdpsq_queue_timer(xdpsq->timer);
+
+ libeth_xdpsq_unlock(&xdpsq->xdp_lock);
+}
+
+struct idpf_xdp_rx_desc {
+ aligned_u64 qw0;
+#define IDPF_XDP_RX_BUFQ BIT_ULL(47)
+#define IDPF_XDP_RX_GEN BIT_ULL(46)
+#define IDPF_XDP_RX_LEN GENMASK_ULL(45, 32)
+#define IDPF_XDP_RX_PT GENMASK_ULL(25, 16)
+
+ aligned_u64 qw1;
+#define IDPF_XDP_RX_BUF GENMASK_ULL(47, 32)
+#define IDPF_XDP_RX_EOP BIT_ULL(1)
+
+ aligned_u64 qw2;
+#define IDPF_XDP_RX_HASH GENMASK_ULL(31, 0)
+
+ aligned_u64 qw3;
+} __aligned(4 * sizeof(u64));
+static_assert(sizeof(struct idpf_xdp_rx_desc) ==
+ sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3));
+
+#define idpf_xdp_rx_bufq(desc) !!((desc)->qw0 & IDPF_XDP_RX_BUFQ)
+#define idpf_xdp_rx_gen(desc) !!((desc)->qw0 & IDPF_XDP_RX_GEN)
+#define idpf_xdp_rx_len(desc) FIELD_GET(IDPF_XDP_RX_LEN, (desc)->qw0)
+#define idpf_xdp_rx_pt(desc) FIELD_GET(IDPF_XDP_RX_PT, (desc)->qw0)
+#define idpf_xdp_rx_buf(desc) FIELD_GET(IDPF_XDP_RX_BUF, (desc)->qw1)
+#define idpf_xdp_rx_eop(desc) !!((desc)->qw1 & IDPF_XDP_RX_EOP)
+#define idpf_xdp_rx_hash(desc) FIELD_GET(IDPF_XDP_RX_HASH, (desc)->qw2)
+
+static inline void
+idpf_xdp_get_qw0(struct idpf_xdp_rx_desc *desc,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
+{
+#ifdef __LIBETH_WORD_ACCESS
+ desc->qw0 = ((const typeof(desc))rxd)->qw0;
+#else
+ desc->qw0 = ((u64)le16_to_cpu(rxd->pktlen_gen_bufq_id) << 32) |
+ ((u64)le16_to_cpu(rxd->ptype_err_fflags0) << 16);
+#endif
+}
+
+static inline void
+idpf_xdp_get_qw1(struct idpf_xdp_rx_desc *desc,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
+{
+#ifdef __LIBETH_WORD_ACCESS
+ desc->qw1 = ((const typeof(desc))rxd)->qw1;
+#else
+ desc->qw1 = ((u64)le16_to_cpu(rxd->buf_id) << 32) |
+ rxd->status_err0_qw1;
+#endif
+}
+
+static inline void
+idpf_xdp_get_qw2(struct idpf_xdp_rx_desc *desc,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
+{
+#ifdef __LIBETH_WORD_ACCESS
+ desc->qw2 = ((const typeof(desc))rxd)->qw2;
+#else
+ desc->qw2 = ((u64)rxd->hash3 << 24) |
+ ((u64)rxd->ff2_mirrid_hash2.hash2 << 16) |
+ le16_to_cpu(rxd->hash1);
+#endif
+}
+
+void idpf_xdp_set_features(const struct idpf_vport *vport);
+
+int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp);
+int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
+
+#endif /* _IDPF_XDP_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/xsk.c b/drivers/net/ethernet/intel/idpf/xsk.c
new file mode 100644
index 000000000000..fd2cc43ab43c
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/xsk.c
@@ -0,0 +1,633 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2025 Intel Corporation */
+
+#include <net/libeth/xsk.h>
+
+#include "idpf.h"
+#include "xdp.h"
+#include "xsk.h"
+
+static void idpf_xsk_tx_timer(struct work_struct *work);
+
+static void idpf_xsk_setup_rxq(const struct idpf_vport *vport,
+ struct idpf_rx_queue *rxq)
+{
+ struct xsk_buff_pool *pool;
+
+ pool = xsk_get_pool_from_qid(vport->netdev, rxq->idx);
+ if (!pool || !pool->dev || !xsk_buff_can_alloc(pool, 1))
+ return;
+
+ rxq->pool = pool;
+
+ idpf_queue_set(XSK, rxq);
+}
+
+static void idpf_xsk_setup_bufq(const struct idpf_vport *vport,
+ struct idpf_buf_queue *bufq)
+{
+ struct xsk_buff_pool *pool;
+ u32 qid = U32_MAX;
+
+ for (u32 i = 0; i < vport->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+
+ for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ if (&grp->splitq.bufq_sets[j].bufq == bufq) {
+ qid = grp->splitq.rxq_sets[0]->rxq.idx;
+ goto setup;
+ }
+ }
+ }
+
+setup:
+ pool = xsk_get_pool_from_qid(vport->netdev, qid);
+ if (!pool || !pool->dev || !xsk_buff_can_alloc(pool, 1))
+ return;
+
+ bufq->pool = pool;
+
+ idpf_queue_set(XSK, bufq);
+}
+
+static void idpf_xsk_setup_txq(const struct idpf_vport *vport,
+ struct idpf_tx_queue *txq)
+{
+ struct xsk_buff_pool *pool;
+ u32 qid;
+
+ idpf_queue_clear(XSK, txq);
+
+ if (!idpf_queue_has(XDP, txq))
+ return;
+
+ qid = txq->idx - vport->xdp_txq_offset;
+
+ pool = xsk_get_pool_from_qid(vport->netdev, qid);
+ if (!pool || !pool->dev)
+ return;
+
+ txq->pool = pool;
+ libeth_xdpsq_init_timer(txq->timer, txq, &txq->xdp_lock,
+ idpf_xsk_tx_timer);
+
+ idpf_queue_assign(NOIRQ, txq, xsk_uses_need_wakeup(pool));
+ idpf_queue_set(XSK, txq);
+}
+
+static void idpf_xsk_setup_complq(const struct idpf_vport *vport,
+ struct idpf_compl_queue *complq)
+{
+ const struct xsk_buff_pool *pool;
+ u32 qid;
+
+ idpf_queue_clear(XSK, complq);
+
+ if (!idpf_queue_has(XDP, complq))
+ return;
+
+ qid = complq->txq_grp->txqs[0]->idx - vport->xdp_txq_offset;
+
+ pool = xsk_get_pool_from_qid(vport->netdev, qid);
+ if (!pool || !pool->dev)
+ return;
+
+ idpf_queue_set(XSK, complq);
+}
+
+void idpf_xsk_setup_queue(const struct idpf_vport *vport, void *q,
+ enum virtchnl2_queue_type type)
+{
+ if (!idpf_xdp_enabled(vport))
+ return;
+
+ switch (type) {
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ idpf_xsk_setup_rxq(vport, q);
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ idpf_xsk_setup_bufq(vport, q);
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ idpf_xsk_setup_txq(vport, q);
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ idpf_xsk_setup_complq(vport, q);
+ break;
+ default:
+ break;
+ }
+}
+
+void idpf_xsk_clear_queue(void *q, enum virtchnl2_queue_type type)
+{
+ struct idpf_compl_queue *complq;
+ struct idpf_buf_queue *bufq;
+ struct idpf_rx_queue *rxq;
+ struct idpf_tx_queue *txq;
+
+ switch (type) {
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ rxq = q;
+ if (!idpf_queue_has_clear(XSK, rxq))
+ return;
+
+ rxq->pool = NULL;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ bufq = q;
+ if (!idpf_queue_has_clear(XSK, bufq))
+ return;
+
+ bufq->pool = NULL;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ txq = q;
+ if (!idpf_queue_has_clear(XSK, txq))
+ return;
+
+ idpf_queue_set(NOIRQ, txq);
+ txq->dev = txq->netdev->dev.parent;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ complq = q;
+ idpf_queue_clear(XSK, complq);
+ break;
+ default:
+ break;
+ }
+}
+
+void idpf_xsk_init_wakeup(struct idpf_q_vector *qv)
+{
+ libeth_xsk_init_wakeup(&qv->csd, &qv->napi);
+}
+
+void idpf_xsksq_clean(struct idpf_tx_queue *xdpsq)
+{
+ struct libeth_xdpsq_napi_stats ss = { };
+ u32 ntc = xdpsq->next_to_clean;
+ struct xdp_frame_bulk bq;
+ struct libeth_cq_pp cp = {
+ .dev = xdpsq->pool->dev,
+ .bq = &bq,
+ .xss = &ss,
+ };
+ u32 xsk_frames = 0;
+
+ xdp_frame_bulk_init(&bq);
+
+ while (ntc != xdpsq->next_to_use) {
+ struct libeth_sqe *sqe = &xdpsq->tx_buf[ntc];
+
+ if (sqe->type)
+ libeth_xdp_complete_tx(sqe, &cp);
+ else
+ xsk_frames++;
+
+ if (unlikely(++ntc == xdpsq->desc_count))
+ ntc = 0;
+ }
+
+ xdp_flush_frame_bulk(&bq);
+
+ if (xsk_frames)
+ xsk_tx_completed(xdpsq->pool, xsk_frames);
+}
+
+static noinline u32 idpf_xsksq_complete_slow(struct idpf_tx_queue *xdpsq,
+ u32 done)
+{
+ struct libeth_xdpsq_napi_stats ss = { };
+ u32 ntc = xdpsq->next_to_clean;
+ u32 cnt = xdpsq->desc_count;
+ struct xdp_frame_bulk bq;
+ struct libeth_cq_pp cp = {
+ .dev = xdpsq->pool->dev,
+ .bq = &bq,
+ .xss = &ss,
+ .napi = true,
+ };
+ u32 xsk_frames = 0;
+
+ xdp_frame_bulk_init(&bq);
+
+ for (u32 i = 0; likely(i < done); i++) {
+ struct libeth_sqe *sqe = &xdpsq->tx_buf[ntc];
+
+ if (sqe->type)
+ libeth_xdp_complete_tx(sqe, &cp);
+ else
+ xsk_frames++;
+
+ if (unlikely(++ntc == cnt))
+ ntc = 0;
+ }
+
+ xdp_flush_frame_bulk(&bq);
+
+ xdpsq->next_to_clean = ntc;
+ xdpsq->xdp_tx -= cp.xdp_tx;
+
+ return xsk_frames;
+}
+
+static __always_inline u32 idpf_xsksq_complete(void *_xdpsq, u32 budget)
+{
+ struct idpf_tx_queue *xdpsq = _xdpsq;
+ u32 tx_ntc = xdpsq->next_to_clean;
+ u32 tx_cnt = xdpsq->desc_count;
+ u32 done_frames;
+ u32 xsk_frames;
+
+ done_frames = idpf_xdpsq_poll(xdpsq, budget);
+ if (unlikely(!done_frames))
+ return 0;
+
+ if (likely(!xdpsq->xdp_tx)) {
+ tx_ntc += done_frames;
+ if (tx_ntc >= tx_cnt)
+ tx_ntc -= tx_cnt;
+
+ xdpsq->next_to_clean = tx_ntc;
+ xsk_frames = done_frames;
+
+ goto finalize;
+ }
+
+ xsk_frames = idpf_xsksq_complete_slow(xdpsq, done_frames);
+ if (xsk_frames)
+finalize:
+ xsk_tx_completed(xdpsq->pool, xsk_frames);
+
+ xdpsq->pending -= done_frames;
+
+ return done_frames;
+}
+
+static u32 idpf_xsk_tx_prep(void *_xdpsq, struct libeth_xdpsq *sq)
+{
+ struct idpf_tx_queue *xdpsq = _xdpsq;
+ u32 free;
+
+ libeth_xdpsq_lock(&xdpsq->xdp_lock);
+
+ free = xdpsq->desc_count - xdpsq->pending;
+ if (free < xdpsq->thresh)
+ free += idpf_xsksq_complete(xdpsq, xdpsq->thresh);
+
+ *sq = (struct libeth_xdpsq){
+ .pool = xdpsq->pool,
+ .sqes = xdpsq->tx_buf,
+ .descs = xdpsq->desc_ring,
+ .count = xdpsq->desc_count,
+ .lock = &xdpsq->xdp_lock,
+ .ntu = &xdpsq->next_to_use,
+ .pending = &xdpsq->pending,
+ .xdp_tx = &xdpsq->xdp_tx,
+ };
+
+ return free;
+}
+
+static u32 idpf_xsk_xmit_prep(void *_xdpsq, struct libeth_xdpsq *sq)
+{
+ struct idpf_tx_queue *xdpsq = _xdpsq;
+
+ *sq = (struct libeth_xdpsq){
+ .pool = xdpsq->pool,
+ .sqes = xdpsq->tx_buf,
+ .descs = xdpsq->desc_ring,
+ .count = xdpsq->desc_count,
+ .lock = &xdpsq->xdp_lock,
+ .ntu = &xdpsq->next_to_use,
+ .pending = &xdpsq->pending,
+ };
+
+ /*
+ * The queue is cleaned, the budget is already known, optimize out
+ * the second min() by passing the type limit.
+ */
+ return U32_MAX;
+}
+
+bool idpf_xsk_xmit(struct idpf_tx_queue *xsksq)
+{
+ u32 free;
+
+ libeth_xdpsq_lock(&xsksq->xdp_lock);
+
+ free = xsksq->desc_count - xsksq->pending;
+ if (free < xsksq->thresh)
+ free += idpf_xsksq_complete(xsksq, xsksq->thresh);
+
+ return libeth_xsk_xmit_do_bulk(xsksq->pool, xsksq,
+ min(free - 1, xsksq->thresh),
+ libeth_xsktmo, idpf_xsk_xmit_prep,
+ idpf_xdp_tx_xmit, idpf_xdp_tx_finalize);
+}
+
+LIBETH_XDP_DEFINE_START();
+LIBETH_XDP_DEFINE_TIMER(static idpf_xsk_tx_timer, idpf_xsksq_complete);
+LIBETH_XSK_DEFINE_FLUSH_TX(static idpf_xsk_tx_flush_bulk, idpf_xsk_tx_prep,
+ idpf_xdp_tx_xmit);
+LIBETH_XSK_DEFINE_RUN(static idpf_xsk_run_pass, idpf_xsk_run_prog,
+ idpf_xsk_tx_flush_bulk, idpf_rx_process_skb_fields);
+LIBETH_XSK_DEFINE_FINALIZE(static idpf_xsk_finalize_rx, idpf_xsk_tx_flush_bulk,
+ idpf_xdp_tx_finalize);
+LIBETH_XDP_DEFINE_END();
+
+static void idpf_xskfqe_init(const struct libeth_xskfq_fp *fq, u32 i)
+{
+ struct virtchnl2_splitq_rx_buf_desc *desc = fq->descs;
+
+ desc = &desc[i];
+#ifdef __LIBETH_WORD_ACCESS
+ *(u64 *)&desc->qword0 = i;
+#else
+ desc->qword0.buf_id = cpu_to_le16(i);
+#endif
+ desc->pkt_addr = cpu_to_le64(libeth_xsk_buff_xdp_get_dma(fq->fqes[i]));
+}
+
+static bool idpf_xskfq_refill_thresh(struct idpf_buf_queue *bufq, u32 count)
+{
+ struct libeth_xskfq_fp fq = {
+ .pool = bufq->pool,
+ .fqes = bufq->xsk_buf,
+ .descs = bufq->split_buf,
+ .ntu = bufq->next_to_use,
+ .count = bufq->desc_count,
+ };
+ u32 done;
+
+ done = libeth_xskfqe_alloc(&fq, count, idpf_xskfqe_init);
+ writel(fq.ntu, bufq->tail);
+
+ bufq->next_to_use = fq.ntu;
+ bufq->pending -= done;
+
+ return done == count;
+}
+
+static bool idpf_xskfq_refill(struct idpf_buf_queue *bufq)
+{
+ u32 count, rx_thresh = bufq->thresh;
+
+ count = ALIGN_DOWN(bufq->pending - 1, rx_thresh);
+
+ for (u32 i = 0; i < count; i += rx_thresh) {
+ if (unlikely(!idpf_xskfq_refill_thresh(bufq, rx_thresh)))
+ return false;
+ }
+
+ return true;
+}
+
+int idpf_xskfq_init(struct idpf_buf_queue *bufq)
+{
+ struct libeth_xskfq fq = {
+ .pool = bufq->pool,
+ .count = bufq->desc_count,
+ .nid = idpf_q_vector_to_mem(bufq->q_vector),
+ };
+ int ret;
+
+ ret = libeth_xskfq_create(&fq);
+ if (ret)
+ return ret;
+
+ bufq->xsk_buf = fq.fqes;
+ bufq->pending = fq.pending;
+ bufq->thresh = fq.thresh;
+ bufq->rx_buf_size = fq.buf_len;
+
+ if (!idpf_xskfq_refill(bufq))
+ netdev_err(bufq->pool->netdev,
+ "failed to allocate XSk buffers for qid %d\n",
+ bufq->pool->queue_id);
+
+ bufq->next_to_alloc = bufq->next_to_use;
+
+ idpf_queue_clear(HSPLIT_EN, bufq);
+ bufq->rx_hbuf_size = 0;
+
+ return 0;
+}
+
+void idpf_xskfq_rel(struct idpf_buf_queue *bufq)
+{
+ struct libeth_xskfq fq = {
+ .fqes = bufq->xsk_buf,
+ };
+
+ libeth_xskfq_destroy(&fq);
+
+ bufq->rx_buf_size = fq.buf_len;
+ bufq->thresh = fq.thresh;
+ bufq->pending = fq.pending;
+}
+
+struct idpf_xskfq_refill_set {
+ struct {
+ struct idpf_buf_queue *q;
+ u32 buf_id;
+ u32 pending;
+ } bufqs[IDPF_MAX_BUFQS_PER_RXQ_GRP];
+};
+
+static bool idpf_xskfq_refill_set(const struct idpf_xskfq_refill_set *set)
+{
+ bool ret = true;
+
+ for (u32 i = 0; i < ARRAY_SIZE(set->bufqs); i++) {
+ struct idpf_buf_queue *bufq = set->bufqs[i].q;
+ u32 ntc;
+
+ if (!bufq)
+ continue;
+
+ ntc = set->bufqs[i].buf_id;
+ if (unlikely(++ntc == bufq->desc_count))
+ ntc = 0;
+
+ bufq->next_to_clean = ntc;
+ bufq->pending += set->bufqs[i].pending;
+
+ if (bufq->pending > bufq->thresh)
+ ret &= idpf_xskfq_refill(bufq);
+ }
+
+ return ret;
+}
+
+int idpf_xskrq_poll(struct idpf_rx_queue *rxq, u32 budget)
+{
+ struct idpf_xskfq_refill_set set = { };
+ struct libeth_rq_napi_stats rs = { };
+ bool wake, gen, fail = false;
+ u32 ntc = rxq->next_to_clean;
+ struct libeth_xdp_buff *xdp;
+ LIBETH_XDP_ONSTACK_BULK(bq);
+ u32 cnt = rxq->desc_count;
+
+ wake = xsk_uses_need_wakeup(rxq->pool);
+ if (wake)
+ xsk_clear_rx_need_wakeup(rxq->pool);
+
+ gen = idpf_queue_has(GEN_CHK, rxq);
+
+ libeth_xsk_tx_init_bulk(&bq, rxq->xdp_prog, rxq->xdp_rxq.dev,
+ rxq->xdpsqs, rxq->num_xdp_txq);
+ xdp = rxq->xsk;
+
+ while (likely(rs.packets < budget)) {
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
+ struct idpf_xdp_rx_desc desc __uninitialized;
+ struct idpf_buf_queue *bufq;
+ u32 bufq_id, buf_id;
+
+ rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb;
+
+ idpf_xdp_get_qw0(&desc, rx_desc);
+ if (idpf_xdp_rx_gen(&desc) != gen)
+ break;
+
+ dma_rmb();
+
+ bufq_id = idpf_xdp_rx_bufq(&desc);
+ bufq = set.bufqs[bufq_id].q;
+ if (!bufq) {
+ bufq = &rxq->bufq_sets[bufq_id].bufq;
+ set.bufqs[bufq_id].q = bufq;
+ }
+
+ idpf_xdp_get_qw1(&desc, rx_desc);
+ buf_id = idpf_xdp_rx_buf(&desc);
+
+ set.bufqs[bufq_id].buf_id = buf_id;
+ set.bufqs[bufq_id].pending++;
+
+ xdp = libeth_xsk_process_buff(xdp, bufq->xsk_buf[buf_id],
+ idpf_xdp_rx_len(&desc));
+
+ if (unlikely(++ntc == cnt)) {
+ ntc = 0;
+ gen = !gen;
+ idpf_queue_change(GEN_CHK, rxq);
+ }
+
+ if (!idpf_xdp_rx_eop(&desc) || unlikely(!xdp))
+ continue;
+
+ fail = !idpf_xsk_run_pass(xdp, &bq, rxq->napi, &rs, rx_desc);
+ xdp = NULL;
+
+ if (fail)
+ break;
+ }
+
+ idpf_xsk_finalize_rx(&bq);
+
+ rxq->next_to_clean = ntc;
+ rxq->xsk = xdp;
+
+ fail |= !idpf_xskfq_refill_set(&set);
+
+ u64_stats_update_begin(&rxq->stats_sync);
+ u64_stats_add(&rxq->q_stats.packets, rs.packets);
+ u64_stats_add(&rxq->q_stats.bytes, rs.bytes);
+ u64_stats_update_end(&rxq->stats_sync);
+
+ if (!wake)
+ return unlikely(fail) ? budget : rs.packets;
+
+ if (unlikely(fail))
+ xsk_set_rx_need_wakeup(rxq->pool);
+
+ return rs.packets;
+}
+
+int idpf_xsk_pool_setup(struct idpf_vport *vport, struct netdev_bpf *bpf)
+{
+ struct xsk_buff_pool *pool = bpf->xsk.pool;
+ u32 qid = bpf->xsk.queue_id;
+ bool restart;
+ int ret;
+
+ if (pool && !IS_ALIGNED(xsk_pool_get_rx_frame_size(pool),
+ LIBETH_RX_BUF_STRIDE)) {
+ NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
+ "%s: HW doesn't support frames sizes not aligned to %u (qid %u: %u)",
+ netdev_name(vport->netdev),
+ LIBETH_RX_BUF_STRIDE, qid,
+ xsk_pool_get_rx_frame_size(pool));
+ return -EINVAL;
+ }
+
+ restart = idpf_xdp_enabled(vport) && netif_running(vport->netdev);
+ if (!restart)
+ goto pool;
+
+ ret = idpf_qp_switch(vport, qid, false);
+ if (ret) {
+ NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
+ "%s: failed to disable queue pair %u: %pe",
+ netdev_name(vport->netdev), qid,
+ ERR_PTR(ret));
+ return ret;
+ }
+
+pool:
+ ret = libeth_xsk_setup_pool(vport->netdev, qid, pool);
+ if (ret) {
+ NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
+ "%s: failed to configure XSk pool for pair %u: %pe",
+ netdev_name(vport->netdev), qid,
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ if (!restart)
+ return 0;
+
+ ret = idpf_qp_switch(vport, qid, true);
+ if (ret) {
+ NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
+ "%s: failed to enable queue pair %u: %pe",
+ netdev_name(vport->netdev), qid,
+ ERR_PTR(ret));
+ goto err_dis;
+ }
+
+ return 0;
+
+err_dis:
+ libeth_xsk_setup_pool(vport->netdev, qid, false);
+
+ return ret;
+}
+
+int idpf_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
+{
+ const struct idpf_netdev_priv *np = netdev_priv(dev);
+ const struct idpf_vport *vport = np->vport;
+ struct idpf_q_vector *q_vector;
+
+ if (unlikely(idpf_vport_ctrl_is_locked(dev)))
+ return -EBUSY;
+
+ if (unlikely(!vport->link_up))
+ return -ENETDOWN;
+
+ if (unlikely(!vport->num_xdp_txq))
+ return -ENXIO;
+
+ q_vector = idpf_find_rxq_vec(vport, qid);
+ if (unlikely(!q_vector->xsksq))
+ return -ENXIO;
+
+ libeth_xsk_wakeup(&q_vector->csd, qid);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/idpf/xsk.h b/drivers/net/ethernet/intel/idpf/xsk.h
new file mode 100644
index 000000000000..b622d08c03e8
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/xsk.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Intel Corporation */
+
+#ifndef _IDPF_XSK_H_
+#define _IDPF_XSK_H_
+
+#include <linux/types.h>
+
+enum virtchnl2_queue_type;
+struct idpf_buf_queue;
+struct idpf_q_vector;
+struct idpf_rx_queue;
+struct idpf_tx_queue;
+struct idpf_vport;
+struct net_device;
+struct netdev_bpf;
+
+void idpf_xsk_setup_queue(const struct idpf_vport *vport, void *q,
+ enum virtchnl2_queue_type type);
+void idpf_xsk_clear_queue(void *q, enum virtchnl2_queue_type type);
+void idpf_xsk_init_wakeup(struct idpf_q_vector *qv);
+
+int idpf_xskfq_init(struct idpf_buf_queue *bufq);
+void idpf_xskfq_rel(struct idpf_buf_queue *bufq);
+void idpf_xsksq_clean(struct idpf_tx_queue *xdpq);
+
+int idpf_xskrq_poll(struct idpf_rx_queue *rxq, u32 budget);
+bool idpf_xsk_xmit(struct idpf_tx_queue *xsksq);
+
+int idpf_xsk_pool_setup(struct idpf_vport *vport, struct netdev_bpf *xdp);
+int idpf_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
+
+#endif /* !_IDPF_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index 463c0d26b9d4..6c1b702fd992 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -8,4 +8,4 @@ obj-$(CONFIG_IGB) += igb.o
igb-y := igb_main.o igb_ethtool.o e1000_82575.o \
e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
- e1000_i210.o igb_ptp.o igb_hwmon.o
+ e1000_i210.o igb_ptp.o igb_hwmon.o igb_xsk.o
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 64dfc362d1dc..44a85ad749a4 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -2372,7 +2372,7 @@ static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
checksum += nvm_data;
}
- if (checksum != (u16) NVM_SUM) {
+ if (checksum != NVM_SUM) {
hw_dbg("NVM Checksum Invalid\n");
ret_val = -E1000_ERR_NVM;
goto out;
@@ -2406,7 +2406,7 @@ static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
}
checksum += nvm_data;
}
- checksum = (u16) NVM_SUM - checksum;
+ checksum = NVM_SUM - checksum;
ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
&checksum);
if (ret_val)
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 503b239868e8..9db29b231d6a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -602,7 +602,7 @@ static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
}
checksum += nvm_data;
}
- checksum = (u16) NVM_SUM - checksum;
+ checksum = NVM_SUM - checksum;
ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
&checksum);
if (ret_val) {
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index 2dcd64d6dec3..c8638502c2be 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -636,7 +636,7 @@ s32 igb_validate_nvm_checksum(struct e1000_hw *hw)
checksum += nvm_data;
}
- if (checksum != (u16) NVM_SUM) {
+ if (checksum != NVM_SUM) {
hw_dbg("NVM Checksum Invalid\n");
ret_val = -E1000_ERR_NVM;
goto out;
@@ -668,7 +668,7 @@ s32 igb_update_nvm_checksum(struct e1000_hw *hw)
}
checksum += nvm_data;
}
- checksum = (u16) NVM_SUM - checksum;
+ checksum = NVM_SUM - checksum;
ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
if (ret_val)
hw_dbg("NVM Write Error while updating checksum.\n");
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 3c2dc7bdebb5..0fff1df81b7b 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -18,8 +18,10 @@
#include <linux/i2c-algo-bit.h>
#include <linux/pci.h>
#include <linux/mdio.h>
+#include <linux/lockdep.h>
#include <net/xdp.h>
+#include <net/xdp_sock_drv.h>
struct igb_adapter;
@@ -86,6 +88,7 @@ struct igb_adapter;
#define IGB_XDP_CONSUMED BIT(0)
#define IGB_XDP_TX BIT(1)
#define IGB_XDP_REDIR BIT(2)
+#define IGB_XDP_EXIT BIT(3)
struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
@@ -214,7 +217,7 @@ static inline int igb_skb_pad(void)
#define IGB_MASTER_SLAVE e1000_ms_hw_default
#endif
-#define IGB_MNG_VLAN_NONE -1
+#define IGB_MNG_VLAN_NONE 0xFFFF
enum igb_tx_flags {
/* cmd_type flags */
@@ -255,6 +258,7 @@ enum igb_tx_flags {
enum igb_tx_buf_type {
IGB_TYPE_SKB = 0,
IGB_TYPE_XDP,
+ IGB_TYPE_XSK
};
/* wrapper around a pointer to a socket buffer,
@@ -320,6 +324,7 @@ struct igb_ring {
union { /* array of buffer info structs */
struct igb_tx_buffer *tx_buffer_info;
struct igb_rx_buffer *rx_buffer_info;
+ struct xdp_buff **rx_buffer_info_zc;
};
void *desc; /* descriptor ring memory */
unsigned long flags; /* ring specific flags */
@@ -357,6 +362,7 @@ struct igb_ring {
};
};
struct xdp_rxq_info xdp_rxq;
+ struct xsk_buff_pool *xsk_pool;
} ____cacheline_internodealigned_in_smp;
struct igb_q_vector {
@@ -384,7 +390,9 @@ enum e1000_ring_flags_t {
IGB_RING_FLAG_RX_SCTP_CSUM,
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
IGB_RING_FLAG_TX_CTX_IDX,
- IGB_RING_FLAG_TX_DETECT_HANG
+ IGB_RING_FLAG_TX_DETECT_HANG,
+ IGB_RING_FLAG_TX_DISABLED,
+ IGB_RING_FLAG_RX_ALLOC_FAILED,
};
#define ring_uses_large_buffer(ring) \
@@ -618,7 +626,7 @@ struct igb_adapter {
struct delayed_work ptp_overflow_work;
struct work_struct ptp_tx_work;
struct sk_buff *ptp_tx_skb;
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
unsigned long ptp_tx_start;
unsigned long last_rx_ptp_check;
unsigned long last_rx_timestamp;
@@ -715,6 +723,8 @@ enum igb_boards {
extern char igb_driver_name[];
+void igb_set_queue_napi(struct igb_adapter *adapter, int q_idx,
+ struct napi_struct *napi);
int igb_xmit_xdp_ring(struct igb_adapter *adapter,
struct igb_ring *ring,
struct xdp_frame *xdpf);
@@ -731,12 +741,21 @@ int igb_setup_tx_resources(struct igb_ring *);
int igb_setup_rx_resources(struct igb_ring *);
void igb_free_tx_resources(struct igb_ring *);
void igb_free_rx_resources(struct igb_ring *);
+void igb_clean_tx_ring(struct igb_ring *tx_ring);
+void igb_clean_rx_ring(struct igb_ring *rx_ring);
void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
+void igb_finalize_xdp(struct igb_adapter *adapter, unsigned int status);
+void igb_update_rx_stats(struct igb_q_vector *q_vector, unsigned int packets,
+ unsigned int bytes);
void igb_setup_tctl(struct igb_adapter *);
void igb_setup_rctl(struct igb_adapter *);
void igb_setup_srrctl(struct igb_adapter *, struct igb_ring *);
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
+int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp);
+void igb_process_skb_fields(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb);
void igb_alloc_rx_buffers(struct igb_ring *, u16);
void igb_update_stats(struct igb_adapter *);
bool igb_has_link(struct igb_adapter *adapter);
@@ -752,8 +771,11 @@ void igb_ptp_tx_hang(struct igb_adapter *adapter);
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
ktime_t *timestamp);
-int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
-int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
+int igb_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int igb_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
unsigned int igb_get_max_rss_queues(struct igb_adapter *);
#ifdef CONFIG_IGB_HWMON
@@ -797,6 +819,33 @@ static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring)
return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
}
+/* This function assumes __netif_tx_lock is held by the caller. */
+static inline void igb_xdp_ring_update_tail(struct igb_ring *ring)
+{
+ lockdep_assert_held(&txring_txq(ring)->_xmit_lock);
+
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch.
+ */
+ wmb();
+ writel(ring->next_to_use, ring->tail);
+}
+
+static inline struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter)
+{
+ unsigned int r_idx = smp_processor_id();
+
+ if (r_idx >= adapter->num_tx_queues)
+ r_idx = r_idx % adapter->num_tx_queues;
+
+ return adapter->tx_ring[r_idx];
+}
+
+static inline bool igb_xdp_is_enabled(struct igb_adapter *adapter)
+{
+ return !!READ_ONCE(adapter->xdp_prog);
+}
+
int igb_add_filter(struct igb_adapter *adapter,
struct igb_nfc_filter *input);
int igb_erase_filter(struct igb_adapter *adapter,
@@ -807,4 +856,17 @@ int igb_add_mac_steering_filter(struct igb_adapter *adapter,
int igb_del_mac_steering_filter(struct igb_adapter *adapter,
const u8 *addr, u8 queue, u8 flags);
+struct xsk_buff_pool *igb_xsk_pool(struct igb_adapter *adapter,
+ struct igb_ring *ring);
+int igb_xsk_pool_setup(struct igb_adapter *adapter,
+ struct xsk_buff_pool *pool,
+ u16 qid);
+bool igb_alloc_rx_buffers_zc(struct igb_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool, u16 count);
+void igb_clean_rx_ring_zc(struct igb_ring *rx_ring);
+int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector,
+ struct xsk_buff_pool *xsk_pool, const int budget);
+bool igb_xmit_zc(struct igb_ring *tx_ring, struct xsk_buff_pool *xsk_pool);
+int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
+
#endif /* _IGB_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index ca6ccbc13954..b507576b28b2 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -920,11 +920,11 @@ static int igb_set_ringparam(struct net_device *netdev,
}
if (adapter->num_tx_queues > adapter->num_rx_queues)
- temp_ring = vmalloc(array_size(sizeof(struct igb_ring),
- adapter->num_tx_queues));
+ temp_ring = vmalloc_array(adapter->num_tx_queues,
+ sizeof(struct igb_ring));
else
- temp_ring = vmalloc(array_size(sizeof(struct igb_ring),
- adapter->num_rx_queues));
+ temp_ring = vmalloc_array(adapter->num_rx_queues,
+ sizeof(struct igb_ring));
if (!temp_ring) {
err = -ENOMEM;
@@ -2081,11 +2081,8 @@ static void igb_diag_test(struct net_device *netdev,
} else {
dev_info(&adapter->pdev->dev, "online testing starting\n");
- /* PHY is powered down when interface is down */
- if (if_running && igb_link_test(adapter, &data[TEST_LINK]))
+ if (igb_link_test(adapter, &data[TEST_LINK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- else
- data[TEST_LINK] = 0;
/* Online tests aren't run; pass by default */
data[TEST_REG] = 0;
@@ -2284,7 +2281,7 @@ static int igb_get_sset_count(struct net_device *netdev, int sset)
case ETH_SS_PRIV_FLAGS:
return IGB_PRIV_FLAGS_STR_LEN;
default:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
}
@@ -2500,9 +2497,11 @@ static int igb_get_ethtool_nfc_all(struct igb_adapter *adapter,
return 0;
}
-static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+static int igb_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct igb_adapter *adapter = netdev_priv(dev);
+
cmd->data = 0;
/* Report default options for RSS on igb */
@@ -2542,6 +2541,13 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
return 0;
}
+static u32 igb_get_rx_ring_count(struct net_device *dev)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+
+ return adapter->num_rx_queues;
+}
+
static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -2549,10 +2555,6 @@ static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = adapter->num_rx_queues;
- ret = 0;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = adapter->nfc_filter_count;
ret = 0;
@@ -2563,9 +2565,6 @@ static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRLALL:
ret = igb_get_ethtool_nfc_all(adapter, cmd, rule_locs);
break;
- case ETHTOOL_GRXFH:
- ret = igb_get_rss_hash_opts(adapter, cmd);
- break;
default:
break;
}
@@ -2575,9 +2574,11 @@ static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \
IGB_FLAG_RSS_FIELD_IPV6_UDP)
-static int igb_set_rss_hash_opt(struct igb_adapter *adapter,
- struct ethtool_rxnfc *nfc)
+static int igb_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct igb_adapter *adapter = netdev_priv(dev);
u32 flags = adapter->flags;
/* RSS does not support anything other than hashing
@@ -3005,9 +3006,6 @@ static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = igb_set_rss_hash_opt(adapter, cmd);
- break;
case ETHTOOL_SRXCLSRLINS:
ret = igb_add_ethtool_nfc_entry(adapter, cmd);
break;
@@ -3478,6 +3476,7 @@ static const struct ethtool_ops igb_ethtool_ops = {
.get_ts_info = igb_get_ts_info,
.get_rxnfc = igb_get_rxnfc,
.set_rxnfc = igb_set_rxnfc,
+ .get_rx_ring_count = igb_get_rx_ring_count,
.get_eee = igb_get_eee,
.set_eee = igb_set_eee,
.get_module_info = igb_get_module_info,
@@ -3485,6 +3484,8 @@ static const struct ethtool_ops igb_ethtool_ops = {
.get_rxfh_indir_size = igb_get_rxfh_indir_size,
.get_rxfh = igb_get_rxfh,
.set_rxfh = igb_set_rxfh,
+ .get_rxfh_fields = igb_get_rxfh_fields,
+ .set_rxfh_fields = igb_set_rxfh_fields,
.get_channels = igb_get_channels,
.set_channels = igb_set_channels,
.get_priv_flags = igb_get_priv_flags,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index b83df5f94b1f..dbea37269d2c 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -33,7 +33,6 @@
#include <linux/bpf_trace.h>
#include <linux/pm_runtime.h>
#include <linux/etherdevice.h>
-#include <linux/lockdep.h>
#ifdef CONFIG_IGB_DCA
#include <linux/dca.h>
#endif
@@ -116,8 +115,6 @@ static void igb_configure_tx(struct igb_adapter *);
static void igb_configure_rx(struct igb_adapter *);
static void igb_clean_all_tx_rings(struct igb_adapter *);
static void igb_clean_all_rx_rings(struct igb_adapter *);
-static void igb_clean_tx_ring(struct igb_ring *);
-static void igb_clean_rx_ring(struct igb_ring *);
static void igb_set_rx_mode(struct net_device *);
static void igb_update_phy_info(struct timer_list *);
static void igb_watchdog(struct timer_list *);
@@ -475,12 +472,17 @@ rx_ring_summary:
for (i = 0; i < rx_ring->count; i++) {
const char *next_desc;
- struct igb_rx_buffer *buffer_info;
- buffer_info = &rx_ring->rx_buffer_info[i];
+ dma_addr_t dma = (dma_addr_t)0;
+ struct igb_rx_buffer *buffer_info = NULL;
rx_desc = IGB_RX_DESC(rx_ring, i);
u0 = (struct my_u0 *)rx_desc;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ if (!rx_ring->xsk_pool) {
+ buffer_info = &rx_ring->rx_buffer_info[i];
+ dma = buffer_info->dma;
+ }
+
if (i == rx_ring->next_to_use)
next_desc = " NTU";
else if (i == rx_ring->next_to_clean)
@@ -500,11 +502,11 @@ rx_ring_summary:
"R ", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
- (u64)buffer_info->dma,
+ (u64)dma,
next_desc);
if (netif_msg_pktdata(adapter) &&
- buffer_info->dma && buffer_info->page) {
+ buffer_info && dma && buffer_info->page) {
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
16, 1,
@@ -637,6 +639,10 @@ static int __init igb_init_module(void)
dca_register_notify(&dca_notifier);
#endif
ret = pci_register_driver(&igb_driver);
+#ifdef CONFIG_IGB_DCA
+ if (ret)
+ dca_unregister_notify(&dca_notifier);
+#endif
return ret;
}
@@ -907,7 +913,7 @@ static int igb_request_msix(struct igb_adapter *adapter)
int i, err = 0, vector = 0, free_vector = 0;
err = request_irq(adapter->msix_entries[vector].vector,
- igb_msix_other, IRQF_NO_THREAD, netdev->name, adapter);
+ igb_msix_other, 0, netdev->name, adapter);
if (err)
goto err_out;
@@ -941,6 +947,9 @@ static int igb_request_msix(struct igb_adapter *adapter)
q_vector);
if (err)
goto err_free;
+
+ netif_napi_set_irq(&q_vector->napi,
+ adapter->msix_entries[vector].vector);
}
igb_configure_msix(adapter);
@@ -1188,7 +1197,8 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
return -ENOMEM;
/* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll);
+ netif_napi_add_config(adapter->netdev, &q_vector->napi, igb_poll,
+ v_idx);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
@@ -1204,7 +1214,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
/* initialize pointer to rings */
ring = q_vector->ring;
- /* intialize ITR */
+ /* initialize ITR */
if (rxr_count) {
/* rx or rx/tx vector */
if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
@@ -1521,8 +1531,7 @@ static void igb_update_mng_vlan(struct igb_adapter *adapter)
adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
}
- if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
- (vid != old_vid) &&
+ if (old_vid != IGB_MNG_VLAN_NONE && vid != old_vid &&
!test_bit(old_vid, adapter->active_vlans)) {
/* remove VID from filter table */
igb_vfta_set(hw, vid, pf_id, false, true);
@@ -1986,7 +1995,11 @@ static void igb_configure(struct igb_adapter *adapter)
*/
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *ring = adapter->rx_ring[i];
- igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
+ if (ring->xsk_pool)
+ igb_alloc_rx_buffers_zc(ring, ring->xsk_pool,
+ igb_desc_unused(ring));
+ else
+ igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
}
}
@@ -2086,6 +2099,22 @@ static void igb_check_swap_media(struct igb_adapter *adapter)
wr32(E1000_CTRL_EXT, ctrl_ext);
}
+void igb_set_queue_napi(struct igb_adapter *adapter, int vector,
+ struct napi_struct *napi)
+{
+ struct igb_q_vector *q_vector = adapter->q_vector[vector];
+
+ if (q_vector->rx.ring)
+ netif_queue_set_napi(adapter->netdev,
+ q_vector->rx.ring->queue_index,
+ NETDEV_QUEUE_TYPE_RX, napi);
+
+ if (q_vector->tx.ring)
+ netif_queue_set_napi(adapter->netdev,
+ q_vector->tx.ring->queue_index,
+ NETDEV_QUEUE_TYPE_TX, napi);
+}
+
/**
* igb_up - Open the interface and prepare it to handle traffic
* @adapter: board private structure
@@ -2093,6 +2122,7 @@ static void igb_check_swap_media(struct igb_adapter *adapter)
int igb_up(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
+ struct napi_struct *napi;
int i;
/* hardware has been reset, we need to reload some things */
@@ -2100,8 +2130,11 @@ int igb_up(struct igb_adapter *adapter)
clear_bit(__IGB_DOWN, &adapter->state);
- for (i = 0; i < adapter->num_q_vectors; i++)
- napi_enable(&(adapter->q_vector[i]->napi));
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ napi = &adapter->q_vector[i]->napi;
+ napi_enable(napi);
+ igb_set_queue_napi(adapter, i, napi);
+ }
if (adapter->flags & IGB_FLAG_HAS_MSIX)
igb_configure_msix(adapter);
@@ -2171,12 +2204,13 @@ void igb_down(struct igb_adapter *adapter)
for (i = 0; i < adapter->num_q_vectors; i++) {
if (adapter->q_vector[i]) {
napi_synchronize(&adapter->q_vector[i]->napi);
+ igb_set_queue_napi(adapter, i, NULL);
napi_disable(&adapter->q_vector[i]->napi);
}
}
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_info_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->phy_info_timer);
/* record the stats before reset*/
spin_lock(&adapter->stats64_lock);
@@ -2486,7 +2520,7 @@ static int igb_set_features(struct net_device *netdev,
static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
- u16 flags,
+ u16 flags, bool *notified,
struct netlink_ext_ack *extack)
{
/* guarantee we can provide a unique filter for the unicast address */
@@ -2907,37 +2941,20 @@ static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf)
static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
+ struct igb_adapter *adapter = netdev_priv(dev);
+
switch (xdp->command) {
case XDP_SETUP_PROG:
return igb_xdp_setup(dev, xdp);
+ case XDP_SETUP_XSK_POOL:
+ return igb_xsk_pool_setup(adapter, xdp->xsk.pool,
+ xdp->xsk.queue_id);
default:
return -EINVAL;
}
}
-/* This function assumes __netif_tx_lock is held by the caller. */
-static void igb_xdp_ring_update_tail(struct igb_ring *ring)
-{
- lockdep_assert_held(&txring_txq(ring)->_xmit_lock);
-
- /* Force memory writes to complete before letting h/w know there
- * are new descriptors to fetch.
- */
- wmb();
- writel(ring->next_to_use, ring->tail);
-}
-
-static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter)
-{
- unsigned int r_idx = smp_processor_id();
-
- if (r_idx >= adapter->num_tx_queues)
- r_idx = r_idx % adapter->num_tx_queues;
-
- return adapter->tx_ring[r_idx];
-}
-
-static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
+int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
{
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
int cpu = smp_processor_id();
@@ -2951,7 +2968,8 @@ static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
/* During program transitions its possible adapter->xdp_prog is assigned
* but ring has not been configured yet. In this case simply abort xmit.
*/
- tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
+ tx_ring = igb_xdp_is_enabled(adapter) ?
+ igb_xdp_tx_queue_mapping(adapter) : NULL;
if (unlikely(!tx_ring))
return IGB_XDP_CONSUMED;
@@ -2984,10 +3002,14 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
/* During program transitions its possible adapter->xdp_prog is assigned
* but ring has not been configured yet. In this case simply abort xmit.
*/
- tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
+ tx_ring = igb_xdp_is_enabled(adapter) ?
+ igb_xdp_tx_queue_mapping(adapter) : NULL;
if (unlikely(!tx_ring))
return -ENXIO;
+ if (unlikely(test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags)))
+ return -ENXIO;
+
nq = txring_txq(tx_ring);
__netif_tx_lock(nq, cpu);
@@ -3038,6 +3060,9 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_setup_tc = igb_setup_tc,
.ndo_bpf = igb_xdp,
.ndo_xdp_xmit = igb_xdp_xmit,
+ .ndo_xsk_wakeup = igb_xsk_wakeup,
+ .ndo_hwtstamp_get = igb_ptp_hwtstamp_get,
+ .ndo_hwtstamp_set = igb_ptp_hwtstamp_set,
};
/**
@@ -3334,7 +3359,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_SUPP_NOFCS;
netdev->priv_flags |= IFF_UNICAST_FLT;
- netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
@@ -3860,8 +3886,8 @@ static void igb_remove(struct pci_dev *pdev)
* disable watchdog from being rescheduled.
*/
set_bit(__IGB_DOWN, &adapter->state);
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_info_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->phy_info_timer);
cancel_work_sync(&adapter->reset_task);
cancel_work_sync(&adapter->watchdog_task);
@@ -3906,7 +3932,7 @@ static void igb_remove(struct pci_dev *pdev)
*
* This function initializes the vf specific data storage and then attempts to
* allocate the VFs. The reason for ordering it this way is because it is much
- * mor expensive time wise to disable SR-IOV than it is to allocate and free
+ * more expensive time wise to disable SR-IOV than it is to allocate and free
* the memory for the VFs.
**/
static void igb_probe_vfs(struct igb_adapter *adapter)
@@ -4113,8 +4139,9 @@ static int igb_sw_init(struct igb_adapter *adapter)
static int __igb_open(struct net_device *netdev, bool resuming)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct napi_struct *napi;
int err;
int i;
@@ -4166,8 +4193,11 @@ static int __igb_open(struct net_device *netdev, bool resuming)
/* From here on the code is the same as igb_up() */
clear_bit(__IGB_DOWN, &adapter->state);
- for (i = 0; i < adapter->num_q_vectors; i++)
- napi_enable(&(adapter->q_vector[i]->napi));
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ napi = &adapter->q_vector[i]->napi;
+ napi_enable(napi);
+ igb_set_queue_napi(adapter, i, napi);
+ }
/* Clear any pending interrupts. */
rd32(E1000_TSICR);
@@ -4360,6 +4390,8 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
u64 tdba = ring->dma;
int reg_idx = ring->reg_idx;
+ WRITE_ONCE(ring->xsk_pool, igb_xsk_pool(adapter, ring));
+
wr32(E1000_TDLEN(reg_idx),
ring->count * sizeof(union e1000_adv_tx_desc));
wr32(E1000_TDBAL(reg_idx),
@@ -4716,12 +4748,17 @@ void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring)
struct e1000_hw *hw = &adapter->hw;
int reg_idx = ring->reg_idx;
u32 srrctl = 0;
+ u32 buf_size;
- srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
- if (ring_uses_large_buffer(ring))
- srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ if (ring->xsk_pool)
+ buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ else if (ring_uses_large_buffer(ring))
+ buf_size = IGB_RXBUFFER_3072;
else
- srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ buf_size = IGB_RXBUFFER_2048;
+
+ srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ srrctl |= buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
if (hw->mac.type >= e1000_82580)
srrctl |= E1000_SRRCTL_TIMESTAMP;
@@ -4753,8 +4790,17 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
u32 rxdctl = 0;
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
- WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
- MEM_TYPE_PAGE_SHARED, NULL));
+ WRITE_ONCE(ring->xsk_pool, igb_xsk_pool(adapter, ring));
+ if (ring->xsk_pool) {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL));
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
+ } else {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL));
+ }
/* disable the queue */
wr32(E1000_RXDCTL(reg_idx), 0);
@@ -4781,9 +4827,12 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
rxdctl |= IGB_RX_HTHRESH << 8;
rxdctl |= IGB_RX_WTHRESH << 16;
- /* initialize rx_buffer_info */
- memset(ring->rx_buffer_info, 0,
- sizeof(struct igb_rx_buffer) * ring->count);
+ if (ring->xsk_pool)
+ memset(ring->rx_buffer_info_zc, 0,
+ sizeof(*ring->rx_buffer_info_zc) * ring->count);
+ else
+ memset(ring->rx_buffer_info, 0,
+ sizeof(*ring->rx_buffer_info) * ring->count);
/* initialize Rx descriptor 0 */
rx_desc = IGB_RX_DESC(ring, 0);
@@ -4884,19 +4933,24 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
* igb_clean_tx_ring - Free Tx Buffers
* @tx_ring: ring to be cleaned
**/
-static void igb_clean_tx_ring(struct igb_ring *tx_ring)
+void igb_clean_tx_ring(struct igb_ring *tx_ring)
{
u16 i = tx_ring->next_to_clean;
struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
+ u32 xsk_frames = 0;
while (i != tx_ring->next_to_use) {
union e1000_adv_tx_desc *eop_desc, *tx_desc;
/* Free all the Tx ring sk_buffs or xdp frames */
- if (tx_buffer->type == IGB_TYPE_SKB)
+ if (tx_buffer->type == IGB_TYPE_SKB) {
dev_kfree_skb_any(tx_buffer->skb);
- else
+ } else if (tx_buffer->type == IGB_TYPE_XDP) {
xdp_return_frame(tx_buffer->xdpf);
+ } else if (tx_buffer->type == IGB_TYPE_XSK) {
+ xsk_frames++;
+ goto skip_for_xsk;
+ }
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -4927,6 +4981,7 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
DMA_TO_DEVICE);
}
+skip_for_xsk:
tx_buffer->next_to_watch = NULL;
/* move us one more past the eop_desc for start of next pkt */
@@ -4941,6 +4996,9 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
/* reset BQL for queue */
netdev_tx_reset_queue(txring_txq(tx_ring));
+ if (tx_ring->xsk_pool && xsk_frames)
+ xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
+
/* reset next_to_use and next_to_clean */
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
@@ -4971,8 +5029,13 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
rx_ring->xdp_prog = NULL;
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
- vfree(rx_ring->rx_buffer_info);
- rx_ring->rx_buffer_info = NULL;
+ if (rx_ring->xsk_pool) {
+ vfree(rx_ring->rx_buffer_info_zc);
+ rx_ring->rx_buffer_info_zc = NULL;
+ } else {
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ }
/* if not set, then don't free */
if (!rx_ring->desc)
@@ -5003,13 +5066,18 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
* igb_clean_rx_ring - Free Rx Buffers per Queue
* @rx_ring: ring to free buffers from
**/
-static void igb_clean_rx_ring(struct igb_ring *rx_ring)
+void igb_clean_rx_ring(struct igb_ring *rx_ring)
{
u16 i = rx_ring->next_to_clean;
dev_kfree_skb(rx_ring->skb);
rx_ring->skb = NULL;
+ if (rx_ring->xsk_pool) {
+ igb_clean_rx_ring_zc(rx_ring);
+ goto skip_for_xsk;
+ }
+
/* Free all the Rx ring sk_buffs */
while (i != rx_ring->next_to_alloc) {
struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
@@ -5037,6 +5105,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
i = 0;
}
+skip_for_xsk:
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -5396,7 +5465,8 @@ static void igb_spoof_check(struct igb_adapter *adapter)
*/
static void igb_update_phy_info(struct timer_list *t)
{
- struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer);
+ struct igb_adapter *adapter = timer_container_of(adapter, t,
+ phy_info_timer);
igb_get_phy_info(&adapter->hw);
}
@@ -5486,7 +5556,8 @@ static void igb_check_lvmmc(struct igb_adapter *adapter)
**/
static void igb_watchdog(struct timer_list *t)
{
- struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+ struct igb_adapter *adapter = timer_container_of(adapter, t,
+ watchdog_timer);
/* Do the rest outside of interrupt context */
schedule_work(&adapter->watchdog_task);
}
@@ -5686,11 +5757,29 @@ no_wait:
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 eics = 0;
- for (i = 0; i < adapter->num_q_vectors; i++)
- eics |= adapter->q_vector[i]->eims_value;
- wr32(E1000_EICS, eics);
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ struct igb_q_vector *q_vector = adapter->q_vector[i];
+ struct igb_ring *rx_ring;
+
+ if (!q_vector->rx.ring)
+ continue;
+
+ rx_ring = adapter->rx_ring[q_vector->rx.ring->queue_index];
+
+ if (test_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) {
+ eics |= q_vector->eims_value;
+ clear_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
+ }
+ }
+ if (eics)
+ wr32(E1000_EICS, eics);
} else {
- wr32(E1000_ICS, E1000_ICS_RXDMT0);
+ struct igb_ring *rx_ring = adapter->rx_ring[0];
+
+ if (test_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) {
+ clear_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
+ wr32(E1000_ICS, E1000_ICS_RXDMT0);
+ }
}
igb_spoof_check(adapter);
@@ -6463,6 +6552,9 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ if (unlikely(test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags)))
+ return NETDEV_TX_BUSY;
+
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
first->type = IGB_TYPE_SKB;
@@ -6618,7 +6710,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
struct igb_adapter *adapter = netdev_priv(netdev);
int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD;
- if (adapter->xdp_prog) {
+ if (igb_xdp_is_enabled(adapter)) {
int i;
for (i = 0; i < adapter->num_rx_queues; i++) {
@@ -8191,6 +8283,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
struct igb_q_vector *q_vector = container_of(napi,
struct igb_q_vector,
napi);
+ struct xsk_buff_pool *xsk_pool;
bool clean_complete = true;
int work_done = 0;
@@ -8202,7 +8295,12 @@ static int igb_poll(struct napi_struct *napi, int budget)
clean_complete = igb_clean_tx_irq(q_vector, budget);
if (q_vector->rx.ring) {
- int cleaned = igb_clean_rx_irq(q_vector, budget);
+ int cleaned;
+
+ xsk_pool = READ_ONCE(q_vector->rx.ring->xsk_pool);
+ cleaned = xsk_pool ?
+ igb_clean_rx_irq_zc(q_vector, xsk_pool, budget) :
+ igb_clean_rx_irq(q_vector, budget);
work_done += cleaned;
if (cleaned >= budget)
@@ -8231,13 +8329,18 @@ static int igb_poll(struct napi_struct *napi, int budget)
**/
static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
{
- struct igb_adapter *adapter = q_vector->adapter;
- struct igb_ring *tx_ring = q_vector->tx.ring;
- struct igb_tx_buffer *tx_buffer;
- union e1000_adv_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0;
+ struct igb_adapter *adapter = q_vector->adapter;
unsigned int budget = q_vector->tx.work_limit;
+ struct igb_ring *tx_ring = q_vector->tx.ring;
unsigned int i = tx_ring->next_to_clean;
+ union e1000_adv_tx_desc *tx_desc;
+ struct igb_tx_buffer *tx_buffer;
+ struct xsk_buff_pool *xsk_pool;
+ int cpu = smp_processor_id();
+ bool xsk_xmit_done = true;
+ struct netdev_queue *nq;
+ u32 xsk_frames = 0;
if (test_bit(__IGB_DOWN, &adapter->state))
return true;
@@ -8268,10 +8371,14 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
total_packets += tx_buffer->gso_segs;
/* free the skb */
- if (tx_buffer->type == IGB_TYPE_SKB)
+ if (tx_buffer->type == IGB_TYPE_SKB) {
napi_consume_skb(tx_buffer->skb, napi_budget);
- else
+ } else if (tx_buffer->type == IGB_TYPE_XDP) {
xdp_return_frame(tx_buffer->xdpf);
+ } else if (tx_buffer->type == IGB_TYPE_XSK) {
+ xsk_frames++;
+ goto skip_for_xsk;
+ }
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -8303,6 +8410,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
}
}
+skip_for_xsk:
/* move us one more past the eop_desc for start of next pkt */
tx_buffer++;
tx_desc++;
@@ -8331,6 +8439,21 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
q_vector->tx.total_bytes += total_bytes;
q_vector->tx.total_packets += total_packets;
+ xsk_pool = READ_ONCE(tx_ring->xsk_pool);
+ if (xsk_pool) {
+ if (xsk_frames)
+ xsk_tx_completed(xsk_pool, xsk_frames);
+ if (xsk_uses_need_wakeup(xsk_pool))
+ xsk_set_tx_need_wakeup(xsk_pool);
+
+ nq = txring_txq(tx_ring);
+ __netif_tx_lock(nq, cpu);
+ /* Avoid transmit queue timeout since we share it with the slow path */
+ txq_trans_cond_update(nq);
+ xsk_xmit_done = igb_xmit_zc(tx_ring, xsk_pool);
+ __netif_tx_unlock(nq);
+ }
+
if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
struct e1000_hw *hw = &adapter->hw;
@@ -8393,7 +8516,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
}
}
- return !!budget;
+ return !!budget && xsk_xmit_done;
}
/**
@@ -8584,9 +8707,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
return skb;
}
-static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
- struct igb_ring *rx_ring,
- struct xdp_buff *xdp)
+static int igb_run_xdp(struct igb_adapter *adapter, struct igb_ring *rx_ring,
+ struct xdp_buff *xdp)
{
int err, result = IGB_XDP_PASS;
struct bpf_prog *xdp_prog;
@@ -8626,7 +8748,7 @@ out_failure:
break;
}
xdp_out:
- return ERR_PTR(-result);
+ return result;
}
static unsigned int igb_rx_frame_truesize(struct igb_ring *rx_ring,
@@ -8752,10 +8874,6 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- /* XDP packets use error pointer so abort at this point */
- if (IS_ERR(skb))
- return true;
-
if (unlikely((igb_test_staterr(rx_desc,
E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
struct net_device *netdev = rx_ring->netdev;
@@ -8782,9 +8900,9 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,
* order to populate the hash, checksum, VLAN, timestamp, protocol, and
* other fields within the skb.
**/
-static void igb_process_skb_fields(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+void igb_process_skb_fields(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
struct net_device *dev = rx_ring->netdev;
@@ -8866,6 +8984,38 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring,
rx_buffer->page = NULL;
}
+void igb_finalize_xdp(struct igb_adapter *adapter, unsigned int status)
+{
+ int cpu = smp_processor_id();
+ struct netdev_queue *nq;
+
+ if (status & IGB_XDP_REDIR)
+ xdp_do_flush();
+
+ if (status & IGB_XDP_TX) {
+ struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
+
+ nq = txring_txq(tx_ring);
+ __netif_tx_lock(nq, cpu);
+ igb_xdp_ring_update_tail(tx_ring);
+ __netif_tx_unlock(nq);
+ }
+}
+
+void igb_update_rx_stats(struct igb_q_vector *q_vector, unsigned int packets,
+ unsigned int bytes)
+{
+ struct igb_ring *ring = q_vector->rx.ring;
+
+ u64_stats_update_begin(&ring->rx_syncp);
+ ring->rx_stats.packets += packets;
+ ring->rx_stats.bytes += bytes;
+ u64_stats_update_end(&ring->rx_syncp);
+
+ q_vector->rx.total_packets += packets;
+ q_vector->rx.total_bytes += bytes;
+}
+
static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
{
unsigned int total_bytes = 0, total_packets = 0;
@@ -8873,12 +9023,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
struct igb_ring *rx_ring = q_vector->rx.ring;
u16 cleaned_count = igb_desc_unused(rx_ring);
struct sk_buff *skb = rx_ring->skb;
- int cpu = smp_processor_id();
unsigned int xdp_xmit = 0;
- struct netdev_queue *nq;
struct xdp_buff xdp;
u32 frame_sz = 0;
int rx_buf_pgcnt;
+ int xdp_res = 0;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
@@ -8936,12 +9085,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size);
#endif
- skb = igb_run_xdp(adapter, rx_ring, &xdp);
+ xdp_res = igb_run_xdp(adapter, rx_ring, &xdp);
}
- if (IS_ERR(skb)) {
- unsigned int xdp_res = -PTR_ERR(skb);
-
+ if (xdp_res) {
if (xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR)) {
xdp_xmit |= xdp_res;
igb_rx_buffer_flip(rx_ring, rx_buffer, size);
@@ -8960,9 +9107,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
&xdp, timestamp);
/* exit if we failed to retrieve a buffer */
- if (!skb) {
+ if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_failed++;
rx_buffer->pagecnt_bias++;
+ set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
break;
}
@@ -8974,7 +9122,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
continue;
/* verify the packet layout is correct */
- if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
+ if (xdp_res || igb_cleanup_headers(rx_ring, rx_desc, skb)) {
skb = NULL;
continue;
}
@@ -8997,24 +9145,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
/* place incomplete frames back on ring for completion */
rx_ring->skb = skb;
- if (xdp_xmit & IGB_XDP_REDIR)
- xdp_do_flush();
-
- if (xdp_xmit & IGB_XDP_TX) {
- struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
-
- nq = txring_txq(tx_ring);
- __netif_tx_lock(nq, cpu);
- igb_xdp_ring_update_tail(tx_ring);
- __netif_tx_unlock(nq);
- }
+ if (xdp_xmit)
+ igb_finalize_xdp(adapter, xdp_xmit);
- u64_stats_update_begin(&rx_ring->rx_syncp);
- rx_ring->rx_stats.packets += total_packets;
- rx_ring->rx_stats.bytes += total_bytes;
- u64_stats_update_end(&rx_ring->rx_syncp);
- q_vector->rx.total_packets += total_packets;
- q_vector->rx.total_bytes += total_bytes;
+ igb_update_rx_stats(q_vector, total_packets, total_bytes);
if (cleaned_count)
igb_alloc_rx_buffers(rx_ring, cleaned_count);
@@ -9036,6 +9170,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_failed++;
+ set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
return false;
}
@@ -9052,6 +9187,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
__free_pages(page, igb_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_failed++;
+ set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
return false;
}
@@ -9181,10 +9317,6 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case SIOCGMIIREG:
case SIOCSMIIREG:
return igb_mii_ioctl(netdev, ifr, cmd);
- case SIOCGHWTSTAMP:
- return igb_ptp_get_ts_config(netdev, ifr);
- case SIOCSHWTSTAMP:
- return igb_ptp_set_ts_config(netdev, ifr);
default:
return -EOPNOTSUPP;
}
@@ -9467,7 +9599,6 @@ static int __igb_resume(struct device *dev, bool rpm)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- pci_save_state(pdev);
if (!pci_device_is_present(pdev))
return -ENODEV;
@@ -9590,8 +9721,11 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
+ rtnl_lock();
if (netif_running(netdev))
igb_down(adapter);
+ rtnl_unlock();
+
pci_disable_device(pdev);
/* Request a slot reset. */
@@ -9619,7 +9753,6 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -9650,16 +9783,21 @@ static void igb_io_resume(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
+ rtnl_lock();
if (netif_running(netdev)) {
if (!test_bit(__IGB_DOWN, &adapter->state)) {
dev_dbg(&pdev->dev, "Resuming from non-fatal error, do nothing.\n");
+ rtnl_unlock();
return;
}
+
if (igb_up(adapter)) {
dev_err(&pdev->dev, "igb_up failed after reset\n");
+ rtnl_unlock();
return;
}
}
+ rtnl_unlock();
netif_device_attach(netdev);
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index f94570556120..bd85d02ecadd 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -73,7 +73,7 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
static void igb_ptp_sdp_init(struct igb_adapter *adapter);
/* SYSTIM read access for the 82576 */
-static u64 igb_ptp_read_82576(const struct cyclecounter *cc)
+static u64 igb_ptp_read_82576(struct cyclecounter *cc)
{
struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
struct e1000_hw *hw = &igb->hw;
@@ -90,7 +90,7 @@ static u64 igb_ptp_read_82576(const struct cyclecounter *cc)
}
/* SYSTIM read access for the 82580 */
-static u64 igb_ptp_read_82580(const struct cyclecounter *cc)
+static u64 igb_ptp_read_82580(struct cyclecounter *cc)
{
struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
struct e1000_hw *hw = &igb->hw;
@@ -502,11 +502,10 @@ static int igb_ptp_feature_enable_82580(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
+ /* Both the rising and falling edge are timestamped */
+ if (rq->extts.flags & PTP_STRICT_FLAGS &&
+ (rq->extts.flags & PTP_ENABLE_FEATURE) &&
+ (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES)
return -EOPNOTSUPP;
if (on) {
@@ -652,13 +651,6 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* Reject requests failing to enable both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
@@ -848,14 +840,11 @@ static void igb_ptp_overflow_check(struct work_struct *work)
struct igb_adapter *igb =
container_of(work, struct igb_adapter, ptp_overflow_work.work);
struct timespec64 ts;
- u64 ns;
/* Update the timecounter */
- ns = timecounter_read(&igb->tc);
+ ts = ns_to_timespec64(timecounter_read(&igb->tc));
- ts = ns_to_timespec64(ns);
- pr_debug("igb overflow check at %lld.%09lu\n",
- (long long) ts.tv_sec, ts.tv_nsec);
+ pr_debug("igb overflow check at %ptSp\n", &ts);
schedule_delayed_work(&igb->ptp_overflow_work,
IGB_SYSTIM_OVERFLOW_PERIOD);
@@ -1102,21 +1091,22 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
}
/**
- * igb_ptp_get_ts_config - get hardware time stamping config
+ * igb_ptp_hwtstamp_get - get hardware time stamping config
* @netdev: netdev struct
- * @ifr: interface struct
+ * @config: timestamping configuration structure
*
* Get the hwtstamp_config settings to return to the user. Rather than attempt
* to deconstruct the settings from the registers, just return a shadow copy
* of the last known settings.
**/
-int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
+int igb_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- struct hwtstamp_config *config = &adapter->tstamp_config;
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
+ *config = adapter->tstamp_config;
+
+ return 0;
}
/**
@@ -1137,7 +1127,7 @@ int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
* level 2 or 4".
*/
static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
struct e1000_hw *hw = &adapter->hw;
u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
@@ -1283,30 +1273,26 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
}
/**
- * igb_ptp_set_ts_config - set hardware time stamping config
+ * igb_ptp_hwtstamp_set - set hardware time stamping config
* @netdev: netdev struct
- * @ifr: interface struct
- *
+ * @config: timestamping configuration structure
+ * @extack: netlink extended ack structure for error reporting
**/
-int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
+int igb_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- struct hwtstamp_config config;
int err;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = igb_ptp_set_timestamp_mode(adapter, &config);
+ err = igb_ptp_set_timestamp_mode(adapter, config);
if (err)
return err;
/* save these settings for future reference */
- memcpy(&adapter->tstamp_config, &config,
- sizeof(adapter->tstamp_config));
+ adapter->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
/**
@@ -1350,6 +1336,9 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.n_per_out = IGB_N_PEROUT;
adapter->ptp_caps.n_pins = IGB_N_SDP;
adapter->ptp_caps.pps = 0;
+ adapter->ptp_caps.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
adapter->ptp_caps.pin_config = adapter->sdp_config;
adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
@@ -1372,6 +1361,9 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.n_ext_ts = IGB_N_EXTTS;
adapter->ptp_caps.n_per_out = IGB_N_PEROUT;
adapter->ptp_caps.n_pins = IGB_N_SDP;
+ adapter->ptp_caps.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
adapter->ptp_caps.pps = 1;
adapter->ptp_caps.pin_config = adapter->sdp_config;
adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580;
diff --git a/drivers/net/ethernet/intel/igb/igb_xsk.c b/drivers/net/ethernet/intel/igb/igb_xsk.c
new file mode 100644
index 000000000000..30ce5fbb5b77
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/igb_xsk.c
@@ -0,0 +1,562 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2018 Intel Corporation. */
+
+#include <linux/bpf_trace.h>
+#include <net/xdp_sock_drv.h>
+#include <net/xdp.h>
+
+#include "e1000_hw.h"
+#include "igb.h"
+
+static int igb_realloc_rx_buffer_info(struct igb_ring *ring, bool pool_present)
+{
+ int size = pool_present ?
+ sizeof(*ring->rx_buffer_info_zc) * ring->count :
+ sizeof(*ring->rx_buffer_info) * ring->count;
+ void *buff_info = vmalloc(size);
+
+ if (!buff_info)
+ return -ENOMEM;
+
+ if (pool_present) {
+ vfree(ring->rx_buffer_info);
+ ring->rx_buffer_info = NULL;
+ ring->rx_buffer_info_zc = buff_info;
+ } else {
+ vfree(ring->rx_buffer_info_zc);
+ ring->rx_buffer_info_zc = NULL;
+ ring->rx_buffer_info = buff_info;
+ }
+
+ return 0;
+}
+
+static void igb_txrx_ring_disable(struct igb_adapter *adapter, u16 qid)
+{
+ struct igb_ring *tx_ring = adapter->tx_ring[qid];
+ struct igb_ring *rx_ring = adapter->rx_ring[qid];
+ struct e1000_hw *hw = &adapter->hw;
+
+ set_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags);
+
+ wr32(E1000_TXDCTL(tx_ring->reg_idx), 0);
+ wr32(E1000_RXDCTL(rx_ring->reg_idx), 0);
+
+ synchronize_net();
+
+ /* Rx/Tx share the same napi context. */
+ napi_disable(&rx_ring->q_vector->napi);
+
+ igb_clean_tx_ring(tx_ring);
+ igb_clean_rx_ring(rx_ring);
+
+ memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats));
+ memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats));
+}
+
+static void igb_txrx_ring_enable(struct igb_adapter *adapter, u16 qid)
+{
+ struct igb_ring *tx_ring = adapter->tx_ring[qid];
+ struct igb_ring *rx_ring = adapter->rx_ring[qid];
+
+ igb_configure_tx_ring(adapter, tx_ring);
+ igb_configure_rx_ring(adapter, rx_ring);
+
+ synchronize_net();
+
+ clear_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags);
+
+ /* call igb_desc_unused which always leaves
+ * at least 1 descriptor unused to make sure
+ * next_to_use != next_to_clean
+ */
+ if (rx_ring->xsk_pool)
+ igb_alloc_rx_buffers_zc(rx_ring, rx_ring->xsk_pool,
+ igb_desc_unused(rx_ring));
+ else
+ igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring));
+
+ /* Rx/Tx share the same napi context. */
+ napi_enable(&rx_ring->q_vector->napi);
+}
+
+struct xsk_buff_pool *igb_xsk_pool(struct igb_adapter *adapter,
+ struct igb_ring *ring)
+{
+ int qid = ring->queue_index;
+ struct xsk_buff_pool *pool;
+
+ pool = xsk_get_pool_from_qid(adapter->netdev, qid);
+
+ if (!igb_xdp_is_enabled(adapter))
+ return NULL;
+
+ return (pool && pool->dev) ? pool : NULL;
+}
+
+static int igb_xsk_pool_enable(struct igb_adapter *adapter,
+ struct xsk_buff_pool *pool,
+ u16 qid)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct igb_ring *rx_ring;
+ bool if_running;
+ int err;
+
+ if (qid >= adapter->num_rx_queues)
+ return -EINVAL;
+
+ if (qid >= netdev->real_num_rx_queues ||
+ qid >= netdev->real_num_tx_queues)
+ return -EINVAL;
+
+ err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IGB_RX_DMA_ATTR);
+ if (err)
+ return err;
+
+ rx_ring = adapter->rx_ring[qid];
+ if_running = netif_running(adapter->netdev) && igb_xdp_is_enabled(adapter);
+ if (if_running)
+ igb_txrx_ring_disable(adapter, qid);
+
+ if (if_running) {
+ err = igb_realloc_rx_buffer_info(rx_ring, true);
+ if (!err) {
+ igb_txrx_ring_enable(adapter, qid);
+ /* Kick start the NAPI context so that receiving will start */
+ err = igb_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
+ }
+
+ if (err) {
+ xsk_pool_dma_unmap(pool, IGB_RX_DMA_ATTR);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int igb_xsk_pool_disable(struct igb_adapter *adapter, u16 qid)
+{
+ struct xsk_buff_pool *pool;
+ struct igb_ring *rx_ring;
+ bool if_running;
+ int err;
+
+ pool = xsk_get_pool_from_qid(adapter->netdev, qid);
+ if (!pool)
+ return -EINVAL;
+
+ rx_ring = adapter->rx_ring[qid];
+ if_running = netif_running(adapter->netdev) && igb_xdp_is_enabled(adapter);
+ if (if_running)
+ igb_txrx_ring_disable(adapter, qid);
+
+ xsk_pool_dma_unmap(pool, IGB_RX_DMA_ATTR);
+
+ if (if_running) {
+ err = igb_realloc_rx_buffer_info(rx_ring, false);
+ if (err)
+ return err;
+
+ igb_txrx_ring_enable(adapter, qid);
+ }
+
+ return 0;
+}
+
+int igb_xsk_pool_setup(struct igb_adapter *adapter,
+ struct xsk_buff_pool *pool,
+ u16 qid)
+{
+ return pool ? igb_xsk_pool_enable(adapter, pool, qid) :
+ igb_xsk_pool_disable(adapter, qid);
+}
+
+static u16 igb_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
+ union e1000_adv_rx_desc *rx_desc, u16 count)
+{
+ dma_addr_t dma;
+ u16 buffs;
+ int i;
+
+ /* nothing to do */
+ if (!count)
+ return 0;
+
+ buffs = xsk_buff_alloc_batch(pool, xdp, count);
+ for (i = 0; i < buffs; i++) {
+ dma = xsk_buff_xdp_get_dma(*xdp);
+ rx_desc->read.pkt_addr = cpu_to_le64(dma);
+ rx_desc->wb.upper.length = 0;
+
+ rx_desc++;
+ xdp++;
+ }
+
+ return buffs;
+}
+
+bool igb_alloc_rx_buffers_zc(struct igb_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool, u16 count)
+{
+ u32 nb_buffs_extra = 0, nb_buffs = 0;
+ union e1000_adv_rx_desc *rx_desc;
+ u16 ntu = rx_ring->next_to_use;
+ u16 total_count = count;
+ struct xdp_buff **xdp;
+
+ rx_desc = IGB_RX_DESC(rx_ring, ntu);
+ xdp = &rx_ring->rx_buffer_info_zc[ntu];
+
+ if (ntu + count >= rx_ring->count) {
+ nb_buffs_extra = igb_fill_rx_descs(xsk_pool, xdp, rx_desc,
+ rx_ring->count - ntu);
+ if (nb_buffs_extra != rx_ring->count - ntu) {
+ ntu += nb_buffs_extra;
+ goto exit;
+ }
+ rx_desc = IGB_RX_DESC(rx_ring, 0);
+ xdp = rx_ring->rx_buffer_info_zc;
+ ntu = 0;
+ count -= nb_buffs_extra;
+ }
+
+ nb_buffs = igb_fill_rx_descs(xsk_pool, xdp, rx_desc, count);
+ ntu += nb_buffs;
+ if (ntu == rx_ring->count)
+ ntu = 0;
+
+ /* clear the length for the next_to_use descriptor */
+ rx_desc = IGB_RX_DESC(rx_ring, ntu);
+ rx_desc->wb.upper.length = 0;
+
+exit:
+ if (rx_ring->next_to_use != ntu) {
+ rx_ring->next_to_use = ntu;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(ntu, rx_ring->tail);
+ }
+
+ return total_count == (nb_buffs + nb_buffs_extra);
+}
+
+void igb_clean_rx_ring_zc(struct igb_ring *rx_ring)
+{
+ u16 ntc = rx_ring->next_to_clean;
+ u16 ntu = rx_ring->next_to_use;
+
+ while (ntc != ntu) {
+ struct xdp_buff *xdp = rx_ring->rx_buffer_info_zc[ntc];
+
+ xsk_buff_free(xdp);
+ ntc++;
+ if (ntc >= rx_ring->count)
+ ntc = 0;
+ }
+}
+
+static struct sk_buff *igb_construct_skb_zc(struct igb_ring *rx_ring,
+ struct xdp_buff *xdp,
+ ktime_t timestamp)
+{
+ unsigned int totalsize = xdp->data_end - xdp->data_meta;
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ struct sk_buff *skb;
+
+ net_prefetch(xdp->data_meta);
+
+ /* allocate a skb to store the frags */
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize);
+ if (unlikely(!skb))
+ return NULL;
+
+ if (timestamp)
+ skb_hwtstamps(skb)->hwtstamp = timestamp;
+
+ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
+ ALIGN(totalsize, sizeof(long)));
+
+ if (metasize) {
+ skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
+
+ return skb;
+}
+
+static int igb_run_xdp_zc(struct igb_adapter *adapter, struct igb_ring *rx_ring,
+ struct xdp_buff *xdp, struct xsk_buff_pool *xsk_pool,
+ struct bpf_prog *xdp_prog)
+{
+ int err, result = IGB_XDP_PASS;
+ u32 act;
+
+ prefetchw(xdp->data_hard_start); /* xdp_frame write */
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+
+ if (likely(act == XDP_REDIRECT)) {
+ err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
+ if (!err)
+ return IGB_XDP_REDIR;
+
+ if (xsk_uses_need_wakeup(xsk_pool) &&
+ err == -ENOBUFS)
+ result = IGB_XDP_EXIT;
+ else
+ result = IGB_XDP_CONSUMED;
+ goto out_failure;
+ }
+
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ result = igb_xdp_xmit_back(adapter, xdp);
+ if (result == IGB_XDP_CONSUMED)
+ goto out_failure;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(adapter->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+out_failure:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+ result = IGB_XDP_CONSUMED;
+ break;
+ }
+
+ return result;
+}
+
+int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector,
+ struct xsk_buff_pool *xsk_pool, const int budget)
+{
+ struct igb_adapter *adapter = q_vector->adapter;
+ unsigned int total_bytes = 0, total_packets = 0;
+ struct igb_ring *rx_ring = q_vector->rx.ring;
+ u32 ntc = rx_ring->next_to_clean;
+ struct bpf_prog *xdp_prog;
+ unsigned int xdp_xmit = 0;
+ bool failure = false;
+ u16 entries_to_alloc;
+ struct sk_buff *skb;
+
+ /* xdp_prog cannot be NULL in the ZC path */
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+
+ while (likely(total_packets < budget)) {
+ union e1000_adv_rx_desc *rx_desc;
+ ktime_t timestamp = 0;
+ struct xdp_buff *xdp;
+ unsigned int size;
+ int xdp_res = 0;
+
+ rx_desc = IGB_RX_DESC(rx_ring, ntc);
+ size = le16_to_cpu(rx_desc->wb.upper.length);
+ if (!size)
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * descriptor has been written back
+ */
+ dma_rmb();
+
+ xdp = rx_ring->rx_buffer_info_zc[ntc];
+ xsk_buff_set_size(xdp, size);
+ xsk_buff_dma_sync_for_cpu(xdp);
+
+ /* pull rx packet timestamp if available and valid */
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ int ts_hdr_len;
+
+ ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector,
+ xdp->data,
+ &timestamp);
+
+ xdp->data += ts_hdr_len;
+ xdp->data_meta += ts_hdr_len;
+ size -= ts_hdr_len;
+ }
+
+ xdp_res = igb_run_xdp_zc(adapter, rx_ring, xdp, xsk_pool,
+ xdp_prog);
+
+ if (xdp_res) {
+ if (likely(xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR))) {
+ xdp_xmit |= xdp_res;
+ } else if (xdp_res == IGB_XDP_EXIT) {
+ failure = true;
+ break;
+ } else if (xdp_res == IGB_XDP_CONSUMED) {
+ xsk_buff_free(xdp);
+ }
+
+ total_packets++;
+ total_bytes += size;
+ ntc++;
+ if (ntc == rx_ring->count)
+ ntc = 0;
+ continue;
+ }
+
+ skb = igb_construct_skb_zc(rx_ring, xdp, timestamp);
+
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+ rx_ring->rx_stats.alloc_failed++;
+ set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
+ break;
+ }
+
+ xsk_buff_free(xdp);
+ ntc++;
+ if (ntc == rx_ring->count)
+ ntc = 0;
+
+ if (eth_skb_pad(skb))
+ continue;
+
+ /* probably a little skewed due to removing CRC */
+ total_bytes += skb->len;
+
+ /* populate checksum, timestamp, VLAN, and protocol */
+ igb_process_skb_fields(rx_ring, rx_desc, skb);
+
+ napi_gro_receive(&q_vector->napi, skb);
+
+ /* update budget accounting */
+ total_packets++;
+ }
+
+ rx_ring->next_to_clean = ntc;
+
+ if (xdp_xmit)
+ igb_finalize_xdp(adapter, xdp_xmit);
+
+ igb_update_rx_stats(q_vector, total_packets, total_bytes);
+
+ entries_to_alloc = igb_desc_unused(rx_ring);
+ if (entries_to_alloc >= IGB_RX_BUFFER_WRITE)
+ failure |= !igb_alloc_rx_buffers_zc(rx_ring, xsk_pool,
+ entries_to_alloc);
+
+ if (xsk_uses_need_wakeup(xsk_pool)) {
+ if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
+ xsk_set_rx_need_wakeup(xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(xsk_pool);
+
+ return (int)total_packets;
+ }
+ return failure ? budget : (int)total_packets;
+}
+
+bool igb_xmit_zc(struct igb_ring *tx_ring, struct xsk_buff_pool *xsk_pool)
+{
+ unsigned int budget = igb_desc_unused(tx_ring);
+ u32 cmd_type, olinfo_status, nb_pkts, i = 0;
+ struct xdp_desc *descs = xsk_pool->tx_descs;
+ union e1000_adv_tx_desc *tx_desc = NULL;
+ struct igb_tx_buffer *tx_buffer_info;
+ unsigned int total_bytes = 0;
+ dma_addr_t dma;
+
+ if (!netif_carrier_ok(tx_ring->netdev))
+ return true;
+
+ if (test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags))
+ return true;
+
+ nb_pkts = xsk_tx_peek_release_desc_batch(xsk_pool, budget);
+ if (!nb_pkts)
+ return true;
+
+ for (; i < nb_pkts; i++) {
+ dma = xsk_buff_raw_get_dma(xsk_pool, descs[i].addr);
+ xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, descs[i].len);
+
+ tx_buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ tx_buffer_info->bytecount = descs[i].len;
+ tx_buffer_info->type = IGB_TYPE_XSK;
+ tx_buffer_info->xdpf = NULL;
+ tx_buffer_info->gso_segs = 1;
+ tx_buffer_info->time_stamp = jiffies;
+
+ tx_desc = IGB_TX_DESC(tx_ring, tx_ring->next_to_use);
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ /* put descriptor type bits */
+ cmd_type = E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_DEXT |
+ E1000_ADVTXD_DCMD_IFCS;
+ olinfo_status = descs[i].len << E1000_ADVTXD_PAYLEN_SHIFT;
+
+ /* FIXME: This sets the Report Status (RS) bit for every
+ * descriptor. One nice to have optimization would be to set it
+ * only for the last descriptor in the whole batch. See Intel
+ * ice driver for an example on how to do it.
+ */
+ cmd_type |= descs[i].len | IGB_TXD_DCMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+
+ total_bytes += descs[i].len;
+
+ tx_ring->next_to_use++;
+ tx_buffer_info->next_to_watch = tx_desc;
+ if (tx_ring->next_to_use == tx_ring->count)
+ tx_ring->next_to_use = 0;
+ }
+
+ netdev_tx_sent_queue(txring_txq(tx_ring), total_bytes);
+ igb_xdp_ring_update_tail(tx_ring);
+
+ return nb_pkts < budget;
+}
+
+int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct igb_ring *ring;
+ u32 eics = 0;
+
+ if (test_bit(__IGB_DOWN, &adapter->state))
+ return -ENETDOWN;
+
+ if (!igb_xdp_is_enabled(adapter))
+ return -EINVAL;
+
+ if (qid >= adapter->num_tx_queues)
+ return -EINVAL;
+
+ ring = adapter->tx_ring[qid];
+
+ if (test_bit(IGB_RING_FLAG_TX_DISABLED, &ring->flags))
+ return -ENETDOWN;
+
+ if (!READ_ONCE(ring->xsk_pool))
+ return -EINVAL;
+
+ if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
+ /* Cause software interrupt */
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
+ eics |= ring->q_vector->eims_value;
+ wr32(E1000_EICS, eics);
+ } else {
+ wr32(E1000_ICS, E1000_ICS_RXDMT0);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 83b97989a6bd..9c08ebfad804 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -30,10 +30,12 @@ static const struct igbvf_stats igbvf_gstrings_stats[] = {
{ "rx_bytes", IGBVF_STAT(stats.gorc, stats.base_gorc) },
{ "tx_bytes", IGBVF_STAT(stats.gotc, stats.base_gotc) },
{ "multicast", IGBVF_STAT(stats.mprc, stats.base_mprc) },
- { "lbrx_bytes", IGBVF_STAT(stats.gorlbc, stats.base_gorlbc) },
{ "lbrx_packets", IGBVF_STAT(stats.gprlbc, stats.base_gprlbc) },
+ { "lbtx_packets", IGBVF_STAT(stats.gptlbc, stats.base_gptlbc) },
+ { "lbrx_bytes", IGBVF_STAT(stats.gorlbc, stats.base_gorlbc) },
+ { "lbtx_bytes", IGBVF_STAT(stats.gotlbc, stats.base_gotlbc) },
{ "tx_restart_queue", IGBVF_STAT(restart_queue, zero_base) },
- { "rx_long_byte_count", IGBVF_STAT(stats.gorc, stats.base_gorc) },
+ { "tx_timeout_count", IGBVF_STAT(tx_timeout_count, zero_base) },
{ "rx_csum_offload_good", IGBVF_STAT(hw_csum_good, zero_base) },
{ "rx_csum_offload_errors", IGBVF_STAT(hw_csum_err, zero_base) },
{ "rx_header_split", IGBVF_STAT(rx_hdr_split, zero_base) },
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index 6ad35a00a287..da8e1fd47301 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -154,7 +154,6 @@ struct igbvf_ring {
/* board specific private data structure */
struct igbvf_adapter {
struct timer_list watchdog_timer;
- struct timer_list blink_timer;
struct work_struct reset_task;
struct work_struct watchdog_task;
@@ -162,15 +161,10 @@ struct igbvf_adapter {
const struct igbvf_info *ei;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- u32 bd_number;
u32 rx_buffer_len;
- u32 polling_interval;
- u16 mng_vlan_id;
u16 link_speed;
u16 link_duplex;
- spinlock_t tx_queue_lock; /* prevent concurrent tail updates */
-
/* track device up/down/testing state */
unsigned long state;
@@ -185,9 +179,6 @@ struct igbvf_adapter {
unsigned int restart_queue;
u32 txd_cmd;
- u32 tx_int_delay;
- u32 tx_abs_int_delay;
-
unsigned int total_tx_bytes;
unsigned int total_tx_packets;
unsigned int total_rx_bytes;
@@ -195,23 +186,15 @@ struct igbvf_adapter {
/* Tx stats */
u32 tx_timeout_count;
- u32 tx_fifo_head;
- u32 tx_head_addr;
- u32 tx_fifo_size;
- u32 tx_dma_failed;
/* Rx */
struct igbvf_ring *rx_ring;
- u32 rx_int_delay;
- u32 rx_abs_int_delay;
-
/* Rx stats */
u64 hw_csum_err;
u64 hw_csum_good;
u64 rx_hdr_split;
u32 alloc_rx_buff_failed;
- u32 rx_dma_failed;
unsigned int rx_ps_hdr_size;
u32 max_frame_size;
@@ -220,7 +203,6 @@ struct igbvf_adapter {
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
- spinlock_t stats_lock; /* prevent concurrent stats updates */
/* structs defined in e1000_hw.h */
struct e1000_hw hw;
@@ -232,26 +214,14 @@ struct igbvf_adapter {
struct e1000_vf_stats stats;
u64 zero_base;
- struct igbvf_ring test_tx_ring;
- struct igbvf_ring test_rx_ring;
- u32 test_icr;
-
u32 msg_enable;
struct msix_entry *msix_entries;
- int int_mode;
u32 eims_enable_mask;
u32 eims_other;
- u32 int_counter0;
- u32 int_counter1;
- u32 eeprom_wol;
u32 wol;
u32 pba;
- bool fc_autoneg;
-
- unsigned long led_status;
-
unsigned int flags;
unsigned long last_reset;
};
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 925d7286a8ee..ac57212ab02b 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -855,8 +855,6 @@ static irqreturn_t igbvf_msix_other(int irq, void *data)
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- adapter->int_counter1++;
-
hw->mac.get_link_status = 1;
if (!test_bit(__IGBVF_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
@@ -899,8 +897,6 @@ static irqreturn_t igbvf_intr_msix_rx(int irq, void *data)
struct net_device *netdev = data;
struct igbvf_adapter *adapter = netdev_priv(netdev);
- adapter->int_counter0++;
-
/* Write the ITR value calculated at the end of the
* previous interrupt.
*/
@@ -1239,7 +1235,7 @@ static int igbvf_vlan_rx_add_vid(struct net_device *netdev,
spin_lock_bh(&hw->mbx_lock);
if (hw->mac.ops.set_vfta(hw, vid, true)) {
- dev_warn(&adapter->pdev->dev, "Vlan id %d\n is not added", vid);
+ dev_warn(&adapter->pdev->dev, "Vlan id %d is not added\n", vid);
spin_unlock_bh(&hw->mbx_lock);
return -EINVAL;
}
@@ -1592,7 +1588,7 @@ void igbvf_down(struct igbvf_adapter *adapter)
igbvf_irq_disable(adapter);
- del_timer_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
/* record the stats before reset*/
igbvf_update_stats(adapter);
@@ -1633,10 +1629,6 @@ static int igbvf_sw_init(struct igbvf_adapter *adapter)
adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
- adapter->tx_int_delay = 8;
- adapter->tx_abs_int_delay = 32;
- adapter->rx_int_delay = 0;
- adapter->rx_abs_int_delay = 8;
adapter->requested_itr = 3;
adapter->current_itr = IGBVF_START_ITR;
@@ -1656,12 +1648,9 @@ static int igbvf_sw_init(struct igbvf_adapter *adapter)
if (igbvf_alloc_queues(adapter))
return -ENOMEM;
- spin_lock_init(&adapter->tx_queue_lock);
-
/* Explicitly disable IRQ since the NIC can be in any state. */
igbvf_irq_disable(adapter);
- spin_lock_init(&adapter->stats_lock);
spin_lock_init(&adapter->hw.mbx_lock);
set_bit(__IGBVF_DOWN, &adapter->state);
@@ -1903,7 +1892,8 @@ static bool igbvf_has_link(struct igbvf_adapter *adapter)
**/
static void igbvf_watchdog(struct timer_list *t)
{
- struct igbvf_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+ struct igbvf_adapter *adapter = timer_container_of(adapter, t,
+ watchdog_timer);
/* Do the rest outside of interrupt context */
schedule_work(&adapter->watchdog_task);
@@ -2714,7 +2704,6 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct igbvf_adapter *adapter;
struct e1000_hw *hw;
const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data];
- static int cards_found;
int err;
err = pci_enable_device_mem(pdev);
@@ -2786,8 +2775,6 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->watchdog_timeo = 5 * HZ;
strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
- adapter->bd_number = cards_found++;
-
netdev->hw_features = NETIF_F_SG |
NETIF_F_TSO |
NETIF_F_TSO6 |
@@ -2915,7 +2902,7 @@ static void igbvf_remove(struct pci_dev *pdev)
* disable it from being rescheduled.
*/
set_bit(__IGBVF_DOWN, &adapter->state);
- del_timer_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
cancel_work_sync(&adapter->reset_task);
cancel_work_sync(&adapter->watchdog_task);
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index eac0f966e0e4..a427f05814c1 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -40,6 +40,12 @@ void igc_ethtool_set_ops(struct net_device *);
#define IGC_MAX_TX_TSTAMP_REGS 4
+struct igc_fpe_t {
+ struct ethtool_mmsv mmsv;
+ u32 tx_min_frag_size;
+ bool tx_enabled;
+};
+
enum igc_mac_filter_type {
IGC_MAC_FILTER_TYPE_DST = 0,
IGC_MAC_FILTER_TYPE_SRC
@@ -158,6 +164,7 @@ struct igc_ring {
bool launchtime_enable; /* true if LaunchTime is enabled */
ktime_t last_tx_cycle; /* end of the cycle with a launchtime transmission */
ktime_t last_ff_cycle; /* Last cycle with an active first flag */
+ bool preemptible; /* True if preemptible queue, false if express queue */
u32 start_time;
u32 end_time;
@@ -308,7 +315,7 @@ struct igc_adapter {
*/
spinlock_t ptp_tx_lock;
struct igc_tx_timestamp_request tx_tstamp[IGC_MAX_TX_TSTAMP_REGS];
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
unsigned int ptp_flags;
/* System time value lock */
spinlock_t tmreg_lock;
@@ -319,6 +326,7 @@ struct igc_adapter {
struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */
ktime_t ptp_reset_start; /* Reset time in clock mono */
struct system_time_snapshot snapshot;
+ struct mutex ptm_lock; /* Only allow one PTM transaction at a time */
char fw_version[32];
@@ -332,9 +340,12 @@ struct igc_adapter {
struct timespec64 period;
} perout[IGC_N_PEROUT];
+ struct igc_fpe_t fpe;
+
/* LEDs */
struct mutex led_mutex;
struct igc_led_classdev *leds;
+ bool leds_available;
};
void igc_up(struct igc_adapter *adapter);
@@ -386,19 +397,16 @@ extern char igc_driver_name[];
#define IGC_FLAG_RX_LEGACY BIT(16)
#define IGC_FLAG_TSN_QBV_ENABLED BIT(17)
#define IGC_FLAG_TSN_QAV_ENABLED BIT(18)
-#define IGC_FLAG_TSN_LEGACY_ENABLED BIT(19)
+#define IGC_FLAG_TSN_PREEMPT_ENABLED BIT(19)
+#define IGC_FLAG_TSN_REVERSE_TXQ_PRIO BIT(20)
#define IGC_FLAG_TSN_ANY_ENABLED \
(IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED | \
- IGC_FLAG_TSN_LEGACY_ENABLED)
+ IGC_FLAG_TSN_PREEMPT_ENABLED)
#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
-#define IGC_MRQC_ENABLE_RSS_MQ 0x00000002
-#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
-#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
-
/* RX-desc Write-Back format RSS Type's */
enum igc_rss_type_num {
IGC_RSS_TYPE_NO_HASH = 0,
@@ -477,12 +485,30 @@ static inline u32 igc_rss_type(const union igc_adv_rx_desc *rx_desc)
* descriptors until either it has this many to write back, or the
* ITR timer expires.
*/
-#define IGC_RX_PTHRESH 8
-#define IGC_RX_HTHRESH 8
-#define IGC_TX_PTHRESH 8
-#define IGC_TX_HTHRESH 1
-#define IGC_RX_WTHRESH 4
-#define IGC_TX_WTHRESH 16
+#define IGC_RXDCTL_PTHRESH 8
+#define IGC_RXDCTL_HTHRESH 8
+#define IGC_RXDCTL_WTHRESH 4
+/* Ena specific Rx Queue */
+#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000
+/* Receive Software Flush */
+#define IGC_RXDCTL_SWFLUSH 0x04000000
+
+#define IGC_TXDCTL_PTHRESH_MASK GENMASK(4, 0)
+#define IGC_TXDCTL_HTHRESH_MASK GENMASK(12, 8)
+#define IGC_TXDCTL_WTHRESH_MASK GENMASK(20, 16)
+#define IGC_TXDCTL_QUEUE_ENABLE_MASK GENMASK(25, 25)
+#define IGC_TXDCTL_SWFLUSH_MASK GENMASK(26, 26)
+#define IGC_TXDCTL_PRIORITY_MASK GENMASK(27, 27)
+
+#define IGC_TXDCTL_PTHRESH(x) FIELD_PREP(IGC_TXDCTL_PTHRESH_MASK, (x))
+#define IGC_TXDCTL_HTHRESH(x) FIELD_PREP(IGC_TXDCTL_HTHRESH_MASK, (x))
+#define IGC_TXDCTL_WTHRESH(x) FIELD_PREP(IGC_TXDCTL_WTHRESH_MASK, (x))
+/* Ena specific Tx Queue */
+#define IGC_TXDCTL_QUEUE_ENABLE FIELD_PREP(IGC_TXDCTL_QUEUE_ENABLE_MASK, 1)
+/* Transmit Software Flush */
+#define IGC_TXDCTL_SWFLUSH FIELD_PREP(IGC_TXDCTL_SWFLUSH_MASK, 1)
+#define IGC_TXDCTL_PRIORITY(x) FIELD_PREP(IGC_TXDCTL_PRIORITY_MASK, (x))
+#define IGC_TXDCTL_PRIORITY_HIGH IGC_TXDCTL_PRIORITY(1)
#define IGC_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
@@ -577,6 +603,7 @@ struct igc_metadata_request {
struct xsk_tx_metadata *meta;
struct igc_ring *tx_ring;
u32 cmd_type;
+ u16 used_desc;
};
struct igc_q_vector {
@@ -605,6 +632,7 @@ enum igc_filter_match_flags {
IGC_FILTER_FLAG_DST_MAC_ADDR = BIT(3),
IGC_FILTER_FLAG_USER_DATA = BIT(4),
IGC_FILTER_FLAG_VLAN_ETYPE = BIT(5),
+ IGC_FILTER_FLAG_DEFAULT_QUEUE = BIT(6),
};
struct igc_nfc_filter {
@@ -632,10 +660,14 @@ struct igc_nfc_rule {
bool flex;
};
-/* IGC supports a total of 32 NFC rules: 16 MAC address based, 8 VLAN priority
- * based, 8 ethertype based and 32 Flex filter based rules.
+/* IGC supports a total of 65 NFC rules, listed below in order of priority:
+ * - 16 MAC address based filtering rules (highest priority)
+ * - 8 ethertype based filtering rules
+ * - 32 Flex filter based filtering rules
+ * - 8 VLAN priority based filtering rules
+ * - 1 default queue rule (lowest priority)
*/
-#define IGC_MAX_RXNFC_RULES 64
+#define IGC_MAX_RXNFC_RULES 65
struct igc_flex_filter {
u8 index;
@@ -734,14 +766,20 @@ struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter,
u32 location);
int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule);
void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule);
-
+void igc_disable_empty_addr_recv(struct igc_adapter *adapter);
+int igc_enable_empty_addr_recv(struct igc_adapter *adapter);
+struct igc_ring *igc_get_tx_ring(struct igc_adapter *adapter, int cpu);
+void igc_flush_tx_descriptors(struct igc_ring *ring);
void igc_ptp_init(struct igc_adapter *adapter);
void igc_ptp_reset(struct igc_adapter *adapter);
void igc_ptp_suspend(struct igc_adapter *adapter);
void igc_ptp_stop(struct igc_adapter *adapter);
ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf);
-int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
-int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
+int igc_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int igc_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void igc_ptp_tx_hang(struct igc_adapter *adapter);
void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts);
void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index 9fae8bdec2a7..1613b562d17c 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -68,6 +68,10 @@ static s32 igc_init_nvm_params_base(struct igc_hw *hw)
u32 eecd = rd32(IGC_EECD);
u16 size;
+ /* failed to read reg and got all F's */
+ if (!(~eecd))
+ return -ENXIO;
+
size = FIELD_GET(IGC_EECD_SIZE_EX_MASK, eecd);
/* Added to a constant, "size" becomes the left-shift value
@@ -221,6 +225,8 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
/* NVM initialization */
ret_val = igc_init_nvm_params_base(hw);
+ if (ret_val)
+ goto out;
switch (hw->mac.type) {
case igc_i225:
ret_val = igc_init_nvm_params_i225(hw);
diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h
index bf8cdfbba9ff..eaf17cd031c3 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.h
+++ b/drivers/net/ethernet/intel/igc/igc_base.h
@@ -49,6 +49,7 @@ struct igc_adv_tx_context_desc {
#define IGC_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
#define IGC_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
#define IGC_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define IGC_ADVTXD_PAYLEN_MASK 0XFFFFC000 /* Adv desc PAYLEN mask */
#define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
#define IGC_RAR_ENTRIES 16
@@ -85,14 +86,6 @@ union igc_adv_rx_desc {
} wb; /* writeback */
};
-/* Additional Transmit Descriptor Control definitions */
-#define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */
-#define IGC_TXDCTL_SWFLUSH 0x04000000 /* Transmit Software Flush */
-
-/* Additional Receive Descriptor Control definitions */
-#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */
-#define IGC_RXDCTL_SWFLUSH 0x04000000 /* Receive Software Flush */
-
/* SRRCTL bit definitions */
#define IGC_SRRCTL_BSIZEPKT_MASK GENMASK(6, 0)
#define IGC_SRRCTL_BSIZEPKT(x) FIELD_PREP(IGC_SRRCTL_BSIZEPKT_MASK, \
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 8e449904aa7d..498ba1522ca4 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -308,6 +308,8 @@
#define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */
#define IGC_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
#define IGC_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define IGC_TXD_POPTS_SMD_MASK 0x3000 /* Indicates whether it's SMD-V or SMD-R */
+
#define IGC_TXD_CMD_EOP 0x01000000 /* End of Packet */
#define IGC_TXD_CMD_IC 0x04000000 /* Insert Checksum */
#define IGC_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */
@@ -363,6 +365,8 @@
#define IGC_SRRCTL_TIMER0SEL(timer) (((timer) & 0x3) << 17)
/* Receive Descriptor bit definitions */
+#define IGC_RXD_STAT_SMD_TYPE_V 0x01 /* SMD-V Packet */
+#define IGC_RXD_STAT_SMD_TYPE_R 0x02 /* SMD-R Packet */
#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */
#define IGC_RXD_STAT_IXSM 0x04 /* Ignore checksum */
#define IGC_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
@@ -372,17 +376,22 @@
#define IGC_RXDEXT_STATERR_LB 0x00040000
/* Advanced Receive Descriptor bit definitions */
-#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
+#define IGC_RXDADV_STAT_SMD_TYPE_MASK 0x06000
+#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
#define IGC_RXDEXT_STATERR_L4E 0x20000000
#define IGC_RXDEXT_STATERR_IPE 0x40000000
#define IGC_RXDEXT_STATERR_RXE 0x80000000
+#define IGC_MRQC_ENABLE_RSS_MQ 0x00000002
#define IGC_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
#define IGC_MRQC_RSS_FIELD_IPV4 0x00020000
#define IGC_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
#define IGC_MRQC_RSS_FIELD_IPV6 0x00100000
#define IGC_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
+#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
+#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
+#define IGC_MRQC_DEFAULT_QUEUE_MASK GENMASK(5, 3)
/* Header split receive */
#define IGC_RFCTL_IPV6_EX_DIS 0x00010000
@@ -396,11 +405,47 @@
#define IGC_RCTL_PMCF 0x00800000 /* pass MAC control frames */
#define IGC_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
-#define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
-#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
-#define IGC_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */
-
-#define IGC_TXPBSIZE_TSN 0x04145145 /* 5k bytes buffer for each queue */
+/* Mask for RX packet buffer size */
+#define IGC_RXPBSIZE_EXP_MASK GENMASK(5, 0)
+#define IGC_BMC2OSPBSIZE_MASK GENMASK(11, 6)
+#define IGC_RXPBSIZE_BE_MASK GENMASK(17, 12)
+/* Mask for timestamp in RX buffer */
+#define IGC_RXPBS_CFG_TS_EN_MASK GENMASK(31, 31)
+/* High-priority RX packet buffer size (KB). Used for Express traffic when preemption is enabled */
+#define IGC_RXPBSIZE_EXP(x) FIELD_PREP(IGC_RXPBSIZE_EXP_MASK, (x))
+/* BMC to OS packet buffer size in KB */
+#define IGC_BMC2OSPBSIZE(x) FIELD_PREP(IGC_BMC2OSPBSIZE_MASK, (x))
+/* Low-priority RX packet buffer size (KB). Used for BE traffic when preemption is enabled */
+#define IGC_RXPBSIZE_BE(x) FIELD_PREP(IGC_RXPBSIZE_BE_MASK, (x))
+/* Enable RX packet buffer for timestamp descriptor, saving 16 bytes per packet if set */
+#define IGC_RXPBS_CFG_TS_EN FIELD_PREP(IGC_RXPBS_CFG_TS_EN_MASK, 1)
+/* Default value following I225/I226 SW User Manual Section 8.3.1 */
+#define IGC_RXPBSIZE_EXP_BMC_DEFAULT ( \
+ IGC_RXPBSIZE_EXP(34) | IGC_BMC2OSPBSIZE(2))
+#define IGC_RXPBSIZE_EXP_BMC_BE_TSN ( \
+ IGC_RXPBSIZE_EXP(15) | IGC_BMC2OSPBSIZE(2) | IGC_RXPBSIZE_BE(15))
+
+/* Mask for TX packet buffer size */
+#define IGC_TXPB0SIZE_MASK GENMASK(5, 0)
+#define IGC_TXPB1SIZE_MASK GENMASK(11, 6)
+#define IGC_TXPB2SIZE_MASK GENMASK(17, 12)
+#define IGC_TXPB3SIZE_MASK GENMASK(23, 18)
+/* Mask for OS to BMC packet buffer size */
+#define IGC_OS2BMCPBSIZE_MASK GENMASK(29, 24)
+/* TX Packet buffer size in KB */
+#define IGC_TXPB0SIZE(x) FIELD_PREP(IGC_TXPB0SIZE_MASK, (x))
+#define IGC_TXPB1SIZE(x) FIELD_PREP(IGC_TXPB1SIZE_MASK, (x))
+#define IGC_TXPB2SIZE(x) FIELD_PREP(IGC_TXPB2SIZE_MASK, (x))
+#define IGC_TXPB3SIZE(x) FIELD_PREP(IGC_TXPB3SIZE_MASK, (x))
+/* OS to BMC packet buffer size in KB */
+#define IGC_OS2BMCPBSIZE(x) FIELD_PREP(IGC_OS2BMCPBSIZE_MASK, (x))
+/* Default value following I225/I226 SW User Manual Section 8.3.2 */
+#define IGC_TXPBSIZE_DEFAULT ( \
+ IGC_TXPB0SIZE(20) | IGC_TXPB1SIZE(0) | IGC_TXPB2SIZE(0) | \
+ IGC_TXPB3SIZE(0) | IGC_OS2BMCPBSIZE(4))
+#define IGC_TXPBSIZE_TSN ( \
+ IGC_TXPB0SIZE(7) | IGC_TXPB1SIZE(7) | IGC_TXPB2SIZE(7) | \
+ IGC_TXPB3SIZE(7) | IGC_OS2BMCPBSIZE(4))
#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */
#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */
@@ -539,12 +584,15 @@
/* Transmit Scheduling */
#define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001
+#define IGC_TQAVCTRL_PREEMPT_ENA 0x00000002
#define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008
#define IGC_TQAVCTRL_FUTSCDDIS 0x00000080
+#define IGC_TQAVCTRL_MIN_FRAG_MASK 0x0000C000
#define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001
#define IGC_TXQCTL_STRICT_CYCLE 0x00000002
#define IGC_TXQCTL_STRICT_END 0x00000004
+#define IGC_TXQCTL_PREEMPTIBLE 0x00000008
#define IGC_TXQCTL_QAV_SEL_MASK 0x000000C0
#define IGC_TXQCTL_QAV_SEL_CBS0 0x00000080
#define IGC_TXQCTL_QAV_SEL_CBS1 0x000000C0
@@ -574,7 +622,10 @@
#define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x3f) << 2)
#define IGC_PTM_CTRL_PTM_TO(usec) (((usec) & 0xff) << 8)
-#define IGC_PTM_SHORT_CYC_DEFAULT 1 /* Default short cycle interval */
+/* A short cycle time of 1us theoretically should work, but appears to be too
+ * short in practice.
+ */
+#define IGC_PTM_SHORT_CYC_DEFAULT 4 /* Default short cycle interval */
#define IGC_PTM_CYC_TIME_DEFAULT 5 /* Default PTM cycle time */
#define IGC_PTM_TIMEOUT_DEFAULT 255 /* Default timeout for PTM errors */
@@ -593,6 +644,7 @@
#define IGC_PTM_STAT_T4M1_OVFL BIT(3) /* T4 minus T1 overflow */
#define IGC_PTM_STAT_ADJUST_1ST BIT(4) /* 1588 timer adjusted during 1st PTM cycle */
#define IGC_PTM_STAT_ADJUST_CYC BIT(5) /* 1588 timer adjusted during non-1st PTM cycle */
+#define IGC_PTM_STAT_ALL GENMASK(5, 0) /* Used to clear all status */
/* PCIe PTM Cycle Control */
#define IGC_PTM_CYCLE_CTRL_CYC_TIME(msec) ((msec) & 0x3ff) /* PTM Cycle Time (msec) */
diff --git a/drivers/net/ethernet/intel/igc/igc_diag.c b/drivers/net/ethernet/intel/igc/igc_diag.c
index cc621970c0cd..a43d7244ee70 100644
--- a/drivers/net/ethernet/intel/igc/igc_diag.c
+++ b/drivers/net/ethernet/intel/igc/igc_diag.c
@@ -173,8 +173,7 @@ bool igc_link_test(struct igc_adapter *adapter, u64 *data)
*data = 0;
/* add delay to give enough time for autonegotioation to finish */
- if (adapter->hw.mac.autoneg)
- ssleep(5);
+ ssleep(5);
link_up = igc_has_link(adapter);
if (!link_up) {
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 5b0c6f433767..e94c1922b97a 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -8,6 +8,7 @@
#include "igc.h"
#include "igc_diag.h"
+#include "igc_tsn.h"
/* forward declaration */
struct igc_stats {
@@ -121,9 +122,11 @@ static const char igc_gstrings_test[][ETH_GSTRING_LEN] = {
#define IGC_STATS_LEN \
(IGC_GLOBAL_STATS_LEN + IGC_NETDEV_STATS_LEN + IGC_QUEUE_STATS_LEN)
+#define IGC_PRIV_FLAGS_LEGACY_RX BIT(0)
+#define IGC_PRIV_FLAGS_REVERSE_TSN_TXQ_PRIO BIT(1)
static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = {
-#define IGC_PRIV_FLAGS_LEGACY_RX BIT(0)
"legacy-rx",
+ "reverse-tsn-txq-prio",
};
#define IGC_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igc_priv_flags_strings)
@@ -624,11 +627,11 @@ igc_ethtool_set_ringparam(struct net_device *netdev,
}
if (adapter->num_tx_queues > adapter->num_rx_queues)
- temp_ring = vmalloc(array_size(sizeof(struct igc_ring),
- adapter->num_tx_queues));
+ temp_ring = vmalloc_array(adapter->num_tx_queues,
+ sizeof(struct igc_ring));
else
- temp_ring = vmalloc(array_size(sizeof(struct igc_ring),
- adapter->num_rx_queues));
+ temp_ring = vmalloc_array(adapter->num_rx_queues,
+ sizeof(struct igc_ring));
if (!temp_ring) {
err = -ENOMEM;
@@ -807,7 +810,7 @@ static int igc_ethtool_get_sset_count(struct net_device *netdev, int sset)
case ETH_SS_PRIV_FLAGS:
return IGC_PRIV_FLAGS_STR_LEN;
default:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
}
@@ -1044,9 +1047,11 @@ static int igc_ethtool_get_nfc_rules(struct igc_adapter *adapter,
return 0;
}
-static int igc_ethtool_get_rss_hash_opts(struct igc_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+static int igc_ethtool_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct igc_adapter *adapter = netdev_priv(dev);
+
cmd->data = 0;
/* Report default options for RSS on igc */
@@ -1086,15 +1091,19 @@ static int igc_ethtool_get_rss_hash_opts(struct igc_adapter *adapter,
return 0;
}
+static u32 igc_ethtool_get_rx_ring_count(struct net_device *dev)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+
+ return adapter->num_rx_queues;
+}
+
static int igc_ethtool_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
struct igc_adapter *adapter = netdev_priv(dev);
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = adapter->num_rx_queues;
- return 0;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = adapter->nfc_rule_count;
return 0;
@@ -1102,8 +1111,6 @@ static int igc_ethtool_get_rxnfc(struct net_device *dev,
return igc_ethtool_get_nfc_rule(adapter, cmd);
case ETHTOOL_GRXCLSRLALL:
return igc_ethtool_get_nfc_rules(adapter, cmd, rule_locs);
- case ETHTOOL_GRXFH:
- return igc_ethtool_get_rss_hash_opts(adapter, cmd);
default:
return -EOPNOTSUPP;
}
@@ -1111,9 +1118,11 @@ static int igc_ethtool_get_rxnfc(struct net_device *dev,
#define UDP_RSS_FLAGS (IGC_FLAG_RSS_FIELD_IPV4_UDP | \
IGC_FLAG_RSS_FIELD_IPV6_UDP)
-static int igc_ethtool_set_rss_hash_opt(struct igc_adapter *adapter,
- struct ethtool_rxnfc *nfc)
+static int igc_ethtool_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct igc_adapter *adapter = netdev_priv(dev);
u32 flags = adapter->flags;
/* RSS does not support anything other than hashing
@@ -1278,6 +1287,24 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule,
rule->flex = true;
else
rule->flex = false;
+
+ /* The wildcard rule is only applied if:
+ * a) None of the other filtering rules match (match_flags is zero)
+ * b) The flow type is ETHER_FLOW only (no additional fields set)
+ * c) Mask for Source MAC address is not specified (all zeros)
+ * d) Mask for Destination MAC address is not specified (all zeros)
+ * e) Mask for L2 EtherType is not specified (zero)
+ *
+ * If all these conditions are met, the rule is treated as a wildcard
+ * rule. Default queue feature will be used, so that all packets that do
+ * not match any other rule will be routed to the default queue.
+ */
+ if (!rule->filter.match_flags &&
+ fsp->flow_type == ETHER_FLOW &&
+ is_zero_ether_addr(fsp->m_u.ether_spec.h_source) &&
+ is_zero_ether_addr(fsp->m_u.ether_spec.h_dest) &&
+ !fsp->m_u.ether_spec.h_proto)
+ rule->filter.match_flags = IGC_FILTER_FLAG_DEFAULT_QUEUE;
}
/**
@@ -1424,8 +1451,6 @@ static int igc_ethtool_set_rxnfc(struct net_device *dev,
struct igc_adapter *adapter = netdev_priv(dev);
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- return igc_ethtool_set_rss_hash_opt(adapter, cmd);
case ETHTOOL_SRXCLSRLINS:
return igc_ethtool_add_nfc_rule(adapter, cmd);
case ETHTOOL_SRXCLSRLDEL:
@@ -1599,6 +1624,9 @@ static u32 igc_ethtool_get_priv_flags(struct net_device *netdev)
if (adapter->flags & IGC_FLAG_RX_LEGACY)
priv_flags |= IGC_PRIV_FLAGS_LEGACY_RX;
+ if (adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO)
+ priv_flags |= IGC_PRIV_FLAGS_REVERSE_TSN_TXQ_PRIO;
+
return priv_flags;
}
@@ -1607,10 +1635,13 @@ static int igc_ethtool_set_priv_flags(struct net_device *netdev, u32 priv_flags)
struct igc_adapter *adapter = netdev_priv(netdev);
unsigned int flags = adapter->flags;
- flags &= ~IGC_FLAG_RX_LEGACY;
+ flags &= ~(IGC_FLAG_RX_LEGACY | IGC_FLAG_TSN_REVERSE_TXQ_PRIO);
if (priv_flags & IGC_PRIV_FLAGS_LEGACY_RX)
flags |= IGC_FLAG_RX_LEGACY;
+ if (priv_flags & IGC_PRIV_FLAGS_REVERSE_TSN_TXQ_PRIO)
+ flags |= IGC_FLAG_TSN_REVERSE_TXQ_PRIO;
+
if (flags != adapter->flags) {
adapter->flags = flags;
@@ -1781,6 +1812,83 @@ static int igc_ethtool_set_eee(struct net_device *netdev,
return 0;
}
+static int igc_ethtool_get_mm(struct net_device *netdev,
+ struct ethtool_mm_state *cmd)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_fpe_t *fpe = &adapter->fpe;
+
+ ethtool_mmsv_get_mm(&fpe->mmsv, cmd);
+ cmd->tx_min_frag_size = fpe->tx_min_frag_size;
+ cmd->rx_min_frag_size = IGC_RX_MIN_FRAG_SIZE;
+
+ return 0;
+}
+
+static int igc_ethtool_set_mm(struct net_device *netdev,
+ struct ethtool_mm_cfg *cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_fpe_t *fpe = &adapter->fpe;
+
+ fpe->tx_min_frag_size = igc_fpe_get_supported_frag_size(cmd->tx_min_frag_size);
+ if (fpe->tx_min_frag_size != cmd->tx_min_frag_size)
+ NL_SET_ERR_MSG_MOD(extack,
+ "tx-min-frag-size value set is unsupported. Rounded up to supported value (64, 128, 192, 256)");
+
+ if (fpe->mmsv.pmac_enabled != cmd->pmac_enabled) {
+ if (cmd->pmac_enabled)
+ static_branch_inc(&igc_fpe_enabled);
+ else
+ static_branch_dec(&igc_fpe_enabled);
+ }
+
+ ethtool_mmsv_set_mm(&fpe->mmsv, cmd);
+
+ return igc_tsn_offload_apply(adapter);
+}
+
+/**
+ * igc_ethtool_get_frame_ass_error - Get the frame assembly error count.
+ * @reg_value: Register value for IGC_PRMEXCPRCNT
+ * Return: The count of frame assembly errors.
+ */
+static u64 igc_ethtool_get_frame_ass_error(u32 reg_value)
+{
+ /* Out of order statistics */
+ u32 ooo_frame_cnt, ooo_frag_cnt;
+ u32 miss_frame_frag_cnt;
+
+ ooo_frame_cnt = FIELD_GET(IGC_PRMEXCPRCNT_OOO_FRAME_CNT, reg_value);
+ ooo_frag_cnt = FIELD_GET(IGC_PRMEXCPRCNT_OOO_FRAG_CNT, reg_value);
+ miss_frame_frag_cnt = FIELD_GET(IGC_PRMEXCPRCNT_MISS_FRAME_FRAG_CNT,
+ reg_value);
+
+ return ooo_frame_cnt + ooo_frag_cnt + miss_frame_frag_cnt;
+}
+
+static u64 igc_ethtool_get_frame_smd_error(u32 reg_value)
+{
+ return FIELD_GET(IGC_PRMEXCPRCNT_OOO_SMDC, reg_value);
+}
+
+static void igc_ethtool_get_mm_stats(struct net_device *dev,
+ struct ethtool_mm_stats *stats)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+ struct igc_hw *hw = &adapter->hw;
+ u32 reg_value;
+
+ reg_value = rd32(IGC_PRMEXCPRCNT);
+
+ stats->MACMergeFrameAssErrorCount = igc_ethtool_get_frame_ass_error(reg_value);
+ stats->MACMergeFrameSmdErrorCount = igc_ethtool_get_frame_smd_error(reg_value);
+ stats->MACMergeFrameAssOkCount = rd32(IGC_PRMPTDRCNT);
+ stats->MACMergeFragCountRx = rd32(IGC_PRMEVNTRCNT);
+ stats->MACMergeFragCountTx = rd32(IGC_PRMEVNTTCNT);
+}
+
static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
@@ -1821,11 +1929,8 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full);
/* set autoneg settings */
- if (hw->mac.autoneg == 1) {
- ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- Autoneg);
- }
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
/* Set pause flow control settings */
ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
@@ -1878,10 +1983,7 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
cmd->base.duplex = DUPLEX_UNKNOWN;
}
cmd->base.speed = speed;
- if (hw->mac.autoneg)
- cmd->base.autoneg = AUTONEG_ENABLE;
- else
- cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.autoneg = AUTONEG_ENABLE;
/* MDI-X => 2; MDI =>1; Invalid =>0 */
if (hw->phy.media_type == igc_media_type_copper)
@@ -1955,7 +2057,6 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev,
advertised |= ADVERTISE_10_HALF;
if (cmd->base.autoneg == AUTONEG_ENABLE) {
- hw->mac.autoneg = 1;
hw->phy.autoneg_advertised = advertised;
if (adapter->fc_autoneg)
hw->fc.requested_mode = igc_fc_default;
@@ -1997,6 +2098,9 @@ static void igc_ethtool_diag_test(struct net_device *netdev,
netdev_info(adapter->netdev, "Offline testing starting");
set_bit(__IGC_TESTING, &adapter->state);
+ /* power up PHY for link test */
+ igc_power_up_phy_copper(&adapter->hw);
+
/* Link test performed before hardware reset so autoneg doesn't
* interfere with test result
*/
@@ -2070,9 +2174,12 @@ static const struct ethtool_ops igc_ethtool_ops = {
.set_coalesce = igc_ethtool_set_coalesce,
.get_rxnfc = igc_ethtool_get_rxnfc,
.set_rxnfc = igc_ethtool_set_rxnfc,
+ .get_rx_ring_count = igc_ethtool_get_rx_ring_count,
.get_rxfh_indir_size = igc_ethtool_get_rxfh_indir_size,
.get_rxfh = igc_ethtool_get_rxfh,
.set_rxfh = igc_ethtool_set_rxfh,
+ .get_rxfh_fields = igc_ethtool_get_rxfh_fields,
+ .set_rxfh_fields = igc_ethtool_set_rxfh_fields,
.get_ts_info = igc_ethtool_get_ts_info,
.get_channels = igc_ethtool_get_channels,
.set_channels = igc_ethtool_set_channels,
@@ -2083,6 +2190,9 @@ static const struct ethtool_ops igc_ethtool_ops = {
.get_link_ksettings = igc_ethtool_get_link_ksettings,
.set_link_ksettings = igc_ethtool_set_link_ksettings,
.self_test = igc_ethtool_diag_test,
+ .get_mm = igc_ethtool_get_mm,
+ .get_mm_stats = igc_ethtool_get_mm_stats,
+ .set_mm = igc_ethtool_set_mm,
};
void igc_ethtool_set_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index e1c572e0d4ef..be8a49a86d09 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -92,7 +92,6 @@ struct igc_mac_info {
bool asf_firmware_present;
bool arc_subsystem_valid;
- bool autoneg;
bool autoneg_failed;
bool get_link_status;
};
@@ -280,9 +279,4 @@ struct net_device *igc_get_hw_dev(struct igc_hw *hw);
#define hw_dbg(format, arg...) \
netdev_dbg(igc_get_hw_dev(hw), format, ##arg)
-s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value);
-s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value);
-void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value);
-void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value);
-
#endif /* _IGC_HW_H_ */
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
index 0dd61719f1ed..5226d10cc95b 100644
--- a/drivers/net/ethernet/intel/igc/igc_i225.c
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -435,7 +435,7 @@ static s32 igc_update_nvm_checksum_i225(struct igc_hw *hw)
}
checksum += nvm_data;
}
- checksum = (u16)NVM_SUM - checksum;
+ checksum = NVM_SUM - checksum;
ret_val = igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
&checksum);
if (ret_val) {
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c
index a5c4b19d71a2..7ac6637f8db7 100644
--- a/drivers/net/ethernet/intel/igc/igc_mac.c
+++ b/drivers/net/ethernet/intel/igc/igc_mac.c
@@ -127,7 +127,7 @@ s32 igc_setup_link(struct igc_hw *hw)
goto out;
/* If requested flow control is set to default, set flow control
- * to the both 'rx' and 'tx' pause frames.
+ * to both 'rx' and 'tx' pause frames.
*/
if (hw->fc.requested_mode == igc_fc_default)
hw->fc.requested_mode = igc_fc_full;
@@ -386,14 +386,6 @@ s32 igc_check_for_copper_link(struct igc_hw *hw)
*/
igc_check_downshift(hw);
- /* If we are forcing speed/duplex, then we simply return since
- * we have already determined whether we have link or not.
- */
- if (!mac->autoneg) {
- ret_val = -IGC_ERR_CONFIG;
- goto out;
- }
-
/* Auto-Neg is enabled. Auto Speed Detection takes care
* of MAC speed/duplex configuration. So we only need to
* configure Collision Distance in the MAC.
@@ -468,173 +460,171 @@ s32 igc_config_fc_after_link_up(struct igc_hw *hw)
goto out;
}
- /* Check for the case where we have copper media and auto-neg is
- * enabled. In this case, we need to check and see if Auto-Neg
- * has completed, and if so, how the PHY and link partner has
- * flow control configured.
+ /* In auto-neg, we need to check and see if Auto-Neg has completed,
+ * and if so, how the PHY and link partner has flow control
+ * configured.
*/
- if (mac->autoneg) {
- /* Read the MII Status Register and check to see if AutoNeg
- * has completed. We read this twice because this reg has
- * some "sticky" (latched) bits.
- */
- ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
- &mii_status_reg);
- if (ret_val)
- goto out;
- ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
- &mii_status_reg);
- if (ret_val)
- goto out;
- if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
- hw_dbg("Copper PHY and Auto Neg has not completed.\n");
- goto out;
- }
+ /* Read the MII Status Register and check to see if AutoNeg
+ * has completed. We read this twice because this reg has
+ * some "sticky" (latched) bits.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
+ &mii_status_reg);
+ if (ret_val)
+ goto out;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
+ &mii_status_reg);
+ if (ret_val)
+ goto out;
- /* The AutoNeg process has completed, so we now need to
- * read both the Auto Negotiation Advertisement
- * Register (Address 4) and the Auto_Negotiation Base
- * Page Ability Register (Address 5) to determine how
- * flow control was negotiated.
- */
- ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
- &mii_nway_adv_reg);
- if (ret_val)
- goto out;
- ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
- &mii_nway_lp_ability_reg);
- if (ret_val)
- goto out;
- /* Two bits in the Auto Negotiation Advertisement Register
- * (Address 4) and two bits in the Auto Negotiation Base
- * Page Ability Register (Address 5) determine flow control
- * for both the PHY and the link partner. The following
- * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
- * 1999, describes these PAUSE resolution bits and how flow
- * control is determined based upon these settings.
- * NOTE: DC = Don't Care
- *
- * LOCAL DEVICE | LINK PARTNER
- * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
- *-------|---------|-------|---------|--------------------
- * 0 | 0 | DC | DC | igc_fc_none
- * 0 | 1 | 0 | DC | igc_fc_none
- * 0 | 1 | 1 | 0 | igc_fc_none
- * 0 | 1 | 1 | 1 | igc_fc_tx_pause
- * 1 | 0 | 0 | DC | igc_fc_none
- * 1 | DC | 1 | DC | igc_fc_full
- * 1 | 1 | 0 | 0 | igc_fc_none
- * 1 | 1 | 0 | 1 | igc_fc_rx_pause
- *
- * Are both PAUSE bits set to 1? If so, this implies
- * Symmetric Flow Control is enabled at both ends. The
- * ASM_DIR bits are irrelevant per the spec.
- *
- * For Symmetric Flow Control:
- *
- * LOCAL DEVICE | LINK PARTNER
- * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
- * 1 | DC | 1 | DC | IGC_fc_full
- *
- */
- if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
- /* Now we need to check if the user selected RX ONLY
- * of pause frames. In this case, we had to advertise
- * FULL flow control because we could not advertise RX
- * ONLY. Hence, we must now check to see if we need to
- * turn OFF the TRANSMISSION of PAUSE frames.
- */
- if (hw->fc.requested_mode == igc_fc_full) {
- hw->fc.current_mode = igc_fc_full;
- hw_dbg("Flow Control = FULL.\n");
- } else {
- hw->fc.current_mode = igc_fc_rx_pause;
- hw_dbg("Flow Control = RX PAUSE frames only.\n");
- }
- }
+ if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
+ hw_dbg("Copper PHY and Auto Neg has not completed.\n");
+ goto out;
+ }
- /* For receiving PAUSE frames ONLY.
- *
- * LOCAL DEVICE | LINK PARTNER
- * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
- * 0 | 1 | 1 | 1 | igc_fc_tx_pause
- */
- else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
- (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
- hw->fc.current_mode = igc_fc_tx_pause;
- hw_dbg("Flow Control = TX PAUSE frames only.\n");
- }
- /* For transmitting PAUSE frames ONLY.
- *
- * LOCAL DEVICE | LINK PARTNER
- * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
- * 1 | 1 | 0 | 1 | igc_fc_rx_pause
- */
- else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
- (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
- !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
- hw->fc.current_mode = igc_fc_rx_pause;
- hw_dbg("Flow Control = RX PAUSE frames only.\n");
- }
- /* Per the IEEE spec, at this point flow control should be
- * disabled. However, we want to consider that we could
- * be connected to a legacy switch that doesn't advertise
- * desired flow control, but can be forced on the link
- * partner. So if we advertised no flow control, that is
- * what we will resolve to. If we advertised some kind of
- * receive capability (Rx Pause Only or Full Flow Control)
- * and the link partner advertised none, we will configure
- * ourselves to enable Rx Flow Control only. We can do
- * this safely for two reasons: If the link partner really
- * didn't want flow control enabled, and we enable Rx, no
- * harm done since we won't be receiving any PAUSE frames
- * anyway. If the intent on the link partner was to have
- * flow control enabled, then by us enabling RX only, we
- * can at least receive pause frames and process them.
- * This is a good idea because in most cases, since we are
- * predominantly a server NIC, more times than not we will
- * be asked to delay transmission of packets than asking
- * our link partner to pause transmission of frames.
+ /* The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement
+ * Register (Address 4) and the Auto_Negotiation Base
+ * Page Ability Register (Address 5) to determine how
+ * flow control was negotiated.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
+ &mii_nway_adv_reg);
+ if (ret_val)
+ goto out;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
+ &mii_nway_lp_ability_reg);
+ if (ret_val)
+ goto out;
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (Address 4) and two bits in the Auto Negotiation Base
+ * Page Ability Register (Address 5) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | igc_fc_none
+ * 0 | 1 | 0 | DC | igc_fc_none
+ * 0 | 1 | 1 | 0 | igc_fc_none
+ * 0 | 1 | 1 | 1 | igc_fc_tx_pause
+ * 1 | 0 | 0 | DC | igc_fc_none
+ * 1 | DC | 1 | DC | igc_fc_full
+ * 1 | 1 | 0 | 0 | igc_fc_none
+ * 1 | 1 | 0 | 1 | igc_fc_rx_pause
+ *
+ * Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | IGC_fc_full
+ *
+ */
+ if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+ /* Now we need to check if the user selected RX ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise RX
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
*/
- else if ((hw->fc.requested_mode == igc_fc_none) ||
- (hw->fc.requested_mode == igc_fc_tx_pause) ||
- (hw->fc.strict_ieee)) {
- hw->fc.current_mode = igc_fc_none;
- hw_dbg("Flow Control = NONE.\n");
+ if (hw->fc.requested_mode == igc_fc_full) {
+ hw->fc.current_mode = igc_fc_full;
+ hw_dbg("Flow Control = FULL.\n");
} else {
hw->fc.current_mode = igc_fc_rx_pause;
hw_dbg("Flow Control = RX PAUSE frames only.\n");
}
+ }
- /* Now we need to do one last check... If we auto-
- * negotiated to HALF DUPLEX, flow control should not be
- * enabled per IEEE 802.3 spec.
- */
- ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
- if (ret_val) {
- hw_dbg("Error getting link speed and duplex\n");
- goto out;
- }
+ /* For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | igc_fc_tx_pause
+ */
+ else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc.current_mode = igc_fc_tx_pause;
+ hw_dbg("Flow Control = TX PAUSE frames only.\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | igc_fc_rx_pause
+ */
+ else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc.current_mode = igc_fc_rx_pause;
+ hw_dbg("Flow Control = RX PAUSE frames only.\n");
+ }
+ /* Per the IEEE spec, at this point flow control should be
+ * disabled. However, we want to consider that we could
+ * be connected to a legacy switch that doesn't advertise
+ * desired flow control, but can be forced on the link
+ * partner. So if we advertised no flow control, that is
+ * what we will resolve to. If we advertised some kind of
+ * receive capability (Rx Pause Only or Full Flow Control)
+ * and the link partner advertised none, we will configure
+ * ourselves to enable Rx Flow Control only. We can do
+ * this safely for two reasons: If the link partner really
+ * didn't want flow control enabled, and we enable Rx, no
+ * harm done since we won't be receiving any PAUSE frames
+ * anyway. If the intent on the link partner was to have
+ * flow control enabled, then by us enabling RX only, we
+ * can at least receive pause frames and process them.
+ * This is a good idea because in most cases, since we are
+ * predominantly a server NIC, more times than not we will
+ * be asked to delay transmission of packets than asking
+ * our link partner to pause transmission of frames.
+ */
+ else if ((hw->fc.requested_mode == igc_fc_none) ||
+ (hw->fc.requested_mode == igc_fc_tx_pause) ||
+ (hw->fc.strict_ieee)) {
+ hw->fc.current_mode = igc_fc_none;
+ hw_dbg("Flow Control = NONE.\n");
+ } else {
+ hw->fc.current_mode = igc_fc_rx_pause;
+ hw_dbg("Flow Control = RX PAUSE frames only.\n");
+ }
- if (duplex == HALF_DUPLEX)
- hw->fc.current_mode = igc_fc_none;
+ /* Now we need to do one last check... If we auto-
+ * negotiated to HALF DUPLEX, flow control should not be
+ * enabled per IEEE 802.3 spec.
+ */
+ ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
+ if (ret_val) {
+ hw_dbg("Error getting link speed and duplex\n");
+ goto out;
+ }
- /* Now we call a subroutine to actually force the MAC
- * controller to use the correct flow control settings.
- */
- ret_val = igc_force_mac_fc(hw);
- if (ret_val) {
- hw_dbg("Error forcing flow control settings\n");
- goto out;
- }
+ if (duplex == HALF_DUPLEX)
+ hw->fc.current_mode = igc_fc_none;
+
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ ret_val = igc_force_mac_fc(hw);
+ if (ret_val) {
+ hw_dbg("Error forcing flow control settings\n");
+ goto out;
}
out:
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 6e70bca15db1..7aafa60ba0c8 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -683,9 +683,9 @@ static void igc_configure_rx_ring(struct igc_adapter *adapter,
wr32(IGC_SRRCTL(reg_idx), srrctl);
- rxdctl |= IGC_RX_PTHRESH;
- rxdctl |= IGC_RX_HTHRESH << 8;
- rxdctl |= IGC_RX_WTHRESH << 16;
+ rxdctl |= IGC_RXDCTL_PTHRESH;
+ rxdctl |= IGC_RXDCTL_HTHRESH << 8;
+ rxdctl |= IGC_RXDCTL_WTHRESH << 16;
/* initialize rx_buffer_info */
memset(ring->rx_buffer_info, 0,
@@ -749,11 +749,9 @@ static void igc_configure_tx_ring(struct igc_adapter *adapter,
wr32(IGC_TDH(reg_idx), 0);
writel(0, ring->tail);
- txdctl |= IGC_TX_PTHRESH;
- txdctl |= IGC_TX_HTHRESH << 8;
- txdctl |= IGC_TX_WTHRESH << 16;
+ txdctl |= IGC_TXDCTL_PTHRESH(8) | IGC_TXDCTL_HTHRESH(1) |
+ IGC_TXDCTL_WTHRESH(16) | IGC_TXDCTL_QUEUE_ENABLE;
- txdctl |= IGC_TXDCTL_QUEUE_ENABLE;
wr32(IGC_TXDCTL(reg_idx), txdctl);
}
@@ -1092,10 +1090,12 @@ static int igc_init_empty_frame(struct igc_ring *ring,
dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE);
if (dma_mapping_error(ring->dev, dma)) {
- netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
+ net_err_ratelimited("%s: DMA mapping error for empty frame\n",
+ netdev_name(ring->netdev));
return -ENOMEM;
}
+ buffer->type = IGC_TX_BUFFER_TYPE_SKB;
buffer->skb = skb;
buffer->protocol = 0;
buffer->bytecount = skb->len;
@@ -1107,20 +1107,12 @@ static int igc_init_empty_frame(struct igc_ring *ring,
return 0;
}
-static int igc_init_tx_empty_descriptor(struct igc_ring *ring,
- struct sk_buff *skb,
- struct igc_tx_buffer *first)
+static void igc_init_tx_empty_descriptor(struct igc_ring *ring,
+ struct sk_buff *skb,
+ struct igc_tx_buffer *first)
{
union igc_adv_tx_desc *desc;
u32 cmd_type, olinfo_status;
- int err;
-
- if (!igc_desc_unused(ring))
- return -EBUSY;
-
- err = igc_init_empty_frame(ring, first, skb);
- if (err)
- return err;
cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
@@ -1139,8 +1131,6 @@ static int igc_init_tx_empty_descriptor(struct igc_ring *ring,
ring->next_to_use++;
if (ring->next_to_use == ring->count)
ring->next_to_use = 0;
-
- return 0;
}
#define IGC_EMPTY_FRAME_SIZE 60
@@ -1566,6 +1556,40 @@ static bool igc_request_tx_tstamp(struct igc_adapter *adapter, struct sk_buff *s
return false;
}
+static int igc_insert_empty_frame(struct igc_ring *tx_ring)
+{
+ struct igc_tx_buffer *empty_info;
+ struct sk_buff *empty_skb;
+ void *data;
+ int ret;
+
+ empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ empty_skb = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC);
+ if (unlikely(!empty_skb)) {
+ net_err_ratelimited("%s: skb alloc error for empty frame\n",
+ netdev_name(tx_ring->netdev));
+ return -ENOMEM;
+ }
+
+ data = skb_put(empty_skb, IGC_EMPTY_FRAME_SIZE);
+ memset(data, 0, IGC_EMPTY_FRAME_SIZE);
+
+ /* Prepare DMA mapping and Tx buffer information */
+ ret = igc_init_empty_frame(tx_ring, empty_info, empty_skb);
+ if (unlikely(ret)) {
+ dev_kfree_skb_any(empty_skb);
+ return ret;
+ }
+
+ /* Prepare advanced context descriptor for empty packet */
+ igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0);
+
+ /* Prepare advanced data descriptor for empty packet */
+ igc_init_tx_empty_descriptor(tx_ring, empty_skb, empty_info);
+
+ return 0;
+}
+
static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
struct igc_ring *tx_ring)
{
@@ -1585,6 +1609,7 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
* + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,
* + 2 desc gap to keep tail from touching head,
* + 1 desc for context descriptor,
+ * + 2 desc for inserting an empty packet for launch time,
* otherwise try next time
*/
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
@@ -1604,24 +1629,16 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty);
if (insert_empty) {
- struct igc_tx_buffer *empty_info;
- struct sk_buff *empty;
- void *data;
-
- empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
- empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC);
- if (!empty)
- goto done;
-
- data = skb_put(empty, IGC_EMPTY_FRAME_SIZE);
- memset(data, 0, IGC_EMPTY_FRAME_SIZE);
-
- igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0);
-
- if (igc_init_tx_empty_descriptor(tx_ring,
- empty,
- empty_info) < 0)
- dev_kfree_skb_any(empty);
+ /* Reset the launch time if the required empty frame fails to
+ * be inserted. However, this packet is not dropped, so it
+ * "dirties" the current Qbv cycle. This ensures that the
+ * upcoming packet, which is scheduled in the next Qbv cycle,
+ * does not require an empty frame. This way, the launch time
+ * continues to function correctly despite the current failure
+ * to insert the empty frame.
+ */
+ if (igc_insert_empty_frame(tx_ring))
+ launch_time = 0;
}
done:
@@ -1649,7 +1666,8 @@ done:
if (igc_request_tx_tstamp(adapter, skb, &tstamp_flags)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IGC_TX_FLAGS_TSTAMP | tstamp_flags;
- if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_USE_CYCLES)
+ if (skb->sk &&
+ READ_ONCE(skb->sk->sk_tsflags) & SOF_TIMESTAMPING_BIND_PHC)
tx_flags |= IGC_TX_FLAGS_TSTAMP_TIMER_1;
} else {
adapter->tx_hwtstamp_skipped++;
@@ -1667,6 +1685,15 @@ done:
first->tx_flags = tx_flags;
first->protocol = protocol;
+ /* For preemptible queue, manually pad the skb so that HW includes
+ * padding bytes in mCRC calculation
+ */
+ if (tx_ring->preemptible && skb->len < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN))
+ goto out_drop;
+ skb_put(skb, ETH_ZLEN - skb->len);
+ }
+
tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len);
if (tso < 0)
goto out_drop;
@@ -2123,10 +2150,6 @@ static bool igc_cleanup_headers(struct igc_ring *rx_ring,
union igc_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- /* XDP packets use error pointer so abort at this point */
- if (IS_ERR(skb))
- return true;
-
if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {
struct net_device *netdev = rx_ring->netdev;
@@ -2448,8 +2471,7 @@ unmap:
return -ENOMEM;
}
-static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter,
- int cpu)
+struct igc_ring *igc_get_tx_ring(struct igc_adapter *adapter, int cpu)
{
int index = cpu;
@@ -2473,7 +2495,7 @@ static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)
if (unlikely(!xdpf))
return -EFAULT;
- ring = igc_xdp_get_tx_ring(adapter, cpu);
+ ring = igc_get_tx_ring(adapter, cpu);
nq = txring_txq(ring);
__netif_tx_lock(nq, cpu);
@@ -2515,8 +2537,7 @@ out_failure:
}
}
-static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
- struct xdp_buff *xdp)
+static int igc_xdp_run_prog(struct igc_adapter *adapter, struct xdp_buff *xdp)
{
struct bpf_prog *prog;
int res;
@@ -2530,11 +2551,11 @@ static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
res = __igc_xdp_run_prog(adapter, prog, xdp);
out:
- return ERR_PTR(-res);
+ return res;
}
/* This function assumes __netif_tx_lock is held by the caller. */
-static void igc_flush_tx_descriptors(struct igc_ring *ring)
+void igc_flush_tx_descriptors(struct igc_ring *ring)
{
/* Once tail pointer is updated, hardware can fetch the descriptors
* any time so we issue a write membar here to ensure all memory
@@ -2551,7 +2572,7 @@ static void igc_finalize_xdp(struct igc_adapter *adapter, int status)
struct igc_ring *ring;
if (status & IGC_XDP_TX) {
- ring = igc_xdp_get_tx_ring(adapter, cpu);
+ ring = igc_get_tx_ring(adapter, cpu);
nq = txring_txq(ring);
__netif_tx_lock(nq, cpu);
@@ -2585,6 +2606,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = igc_desc_unused(rx_ring);
int xdp_status = 0, rx_buffer_pgcnt;
+ int xdp_res = 0;
while (likely(total_packets < budget)) {
struct igc_xdp_buff ctx = { .rx_ts = NULL };
@@ -2622,6 +2644,14 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
size -= IGC_TS_HDR_LEN;
}
+ if (igc_fpe_is_pmac_enabled(adapter) &&
+ igc_fpe_handle_mpacket(adapter, rx_desc, size, pktbuf)) {
+ /* Advance the ring next-to-clean */
+ igc_is_non_eop(rx_ring, rx_desc);
+ cleaned_count++;
+ continue;
+ }
+
if (!skb) {
xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq);
xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring),
@@ -2630,12 +2660,10 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
xdp_buff_clear_frags_flag(&ctx.xdp);
ctx.rx_desc = rx_desc;
- skb = igc_xdp_run_prog(adapter, &ctx.xdp);
+ xdp_res = igc_xdp_run_prog(adapter, &ctx.xdp);
}
- if (IS_ERR(skb)) {
- unsigned int xdp_res = -PTR_ERR(skb);
-
+ if (xdp_res) {
switch (xdp_res) {
case IGC_XDP_CONSUMED:
rx_buffer->pagecnt_bias++;
@@ -2657,7 +2685,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
skb = igc_construct_skb(rx_ring, rx_buffer, &ctx);
/* exit if we failed to retrieve a buffer */
- if (!skb) {
+ if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_failed++;
rx_buffer->pagecnt_bias++;
set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
@@ -2672,7 +2700,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
continue;
/* verify the packet layout is correct */
- if (igc_cleanup_headers(rx_ring, rx_desc, skb)) {
+ if (xdp_res || igc_cleanup_headers(rx_ring, rx_desc, skb)) {
skb = NULL;
continue;
}
@@ -2707,8 +2735,9 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
}
static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
- struct xdp_buff *xdp)
+ struct igc_xdp_buff *ctx)
{
+ struct xdp_buff *xdp = &ctx->xdp;
unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb;
@@ -2727,27 +2756,28 @@ static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
__skb_pull(skb, metasize);
}
+ if (ctx->rx_ts) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV;
+ skb_hwtstamps(skb)->netdev_data = ctx->rx_ts;
+ }
+
return skb;
}
static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,
union igc_adv_rx_desc *desc,
- struct xdp_buff *xdp,
- ktime_t timestamp)
+ struct igc_xdp_buff *ctx)
{
struct igc_ring *ring = q_vector->rx.ring;
struct sk_buff *skb;
- skb = igc_construct_skb_zc(ring, xdp);
+ skb = igc_construct_skb_zc(ring, ctx);
if (!skb) {
ring->rx_stats.alloc_failed++;
set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &ring->flags);
return;
}
- if (timestamp)
- skb_hwtstamps(skb)->hwtstamp = timestamp;
-
if (igc_cleanup_headers(ring, desc, skb))
return;
@@ -2783,7 +2813,6 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
union igc_adv_rx_desc *desc;
struct igc_rx_buffer *bi;
struct igc_xdp_buff *ctx;
- ktime_t timestamp = 0;
unsigned int size;
int res;
@@ -2813,6 +2842,8 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
*/
bi->xdp->data_meta += IGC_TS_HDR_LEN;
size -= IGC_TS_HDR_LEN;
+ } else {
+ ctx->rx_ts = NULL;
}
bi->xdp->data_end = bi->xdp->data + size;
@@ -2821,7 +2852,7 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
res = __igc_xdp_run_prog(adapter, prog, bi->xdp);
switch (res) {
case IGC_XDP_PASS:
- igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp);
+ igc_dispatch_skb_zc(q_vector, desc, ctx);
fallthrough;
case IGC_XDP_CONSUMED:
xsk_buff_free(bi->xdp);
@@ -2955,9 +2986,48 @@ static u64 igc_xsk_fill_timestamp(void *_priv)
return *(u64 *)_priv;
}
+static void igc_xsk_request_launch_time(u64 launch_time, void *_priv)
+{
+ struct igc_metadata_request *meta_req = _priv;
+ struct igc_ring *tx_ring = meta_req->tx_ring;
+ __le32 launch_time_offset;
+ bool insert_empty = false;
+ bool first_flag = false;
+ u16 used_desc = 0;
+
+ if (!tx_ring->launchtime_enable)
+ return;
+
+ launch_time_offset = igc_tx_launchtime(tx_ring,
+ ns_to_ktime(launch_time),
+ &first_flag, &insert_empty);
+ if (insert_empty) {
+ /* Disregard the launch time request if the required empty frame
+ * fails to be inserted.
+ */
+ if (igc_insert_empty_frame(tx_ring))
+ return;
+
+ meta_req->tx_buffer =
+ &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ /* Inserting an empty packet requires two descriptors:
+ * one data descriptor and one context descriptor.
+ */
+ used_desc += 2;
+ }
+
+ /* Use one context descriptor to specify launch time and first flag. */
+ igc_tx_ctxtdesc(tx_ring, launch_time_offset, first_flag, 0, 0, 0);
+ used_desc += 1;
+
+ /* Update the number of used descriptors in this request */
+ meta_req->used_desc += used_desc;
+}
+
const struct xsk_tx_metadata_ops igc_xsk_tx_metadata_ops = {
.tmo_request_timestamp = igc_xsk_request_timestamp,
.tmo_fill_timestamp = igc_xsk_fill_timestamp,
+ .tmo_request_launch_time = igc_xsk_request_launch_time,
};
static void igc_xdp_xmit_zc(struct igc_ring *ring)
@@ -2980,7 +3050,13 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring)
ntu = ring->next_to_use;
budget = igc_desc_unused(ring);
- while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) {
+ /* Packets with launch time require one data descriptor and one context
+ * descriptor. When the launch time falls into the next Qbv cycle, we
+ * may need to insert an empty packet, which requires two more
+ * descriptors. Therefore, to be safe, we always ensure we have at least
+ * 4 descriptors available.
+ */
+ while (budget >= 4 && xsk_tx_peek_desc(pool, &xdp_desc)) {
struct igc_metadata_request meta_req;
struct xsk_tx_metadata *meta = NULL;
struct igc_tx_buffer *bi;
@@ -3001,9 +3077,19 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring)
meta_req.tx_ring = ring;
meta_req.tx_buffer = bi;
meta_req.meta = meta;
+ meta_req.used_desc = 0;
xsk_tx_metadata_request(meta, &igc_xsk_tx_metadata_ops,
&meta_req);
+ /* xsk_tx_metadata_request() may have updated next_to_use */
+ ntu = ring->next_to_use;
+
+ /* xsk_tx_metadata_request() may have updated Tx buffer info */
+ bi = meta_req.tx_buffer;
+
+ /* xsk_tx_metadata_request() may use a few descriptors */
+ budget -= meta_req.used_desc;
+
tx_desc = IGC_TX_DESC(ring, ntu);
tx_desc->read.cmd_type_len = cpu_to_le32(meta_req.cmd_type);
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
@@ -3021,9 +3107,11 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring)
ntu++;
if (ntu == ring->count)
ntu = 0;
+
+ ring->next_to_use = ntu;
+ budget--;
}
- ring->next_to_use = ntu;
if (tx_desc) {
igc_flush_tx_descriptors(ring);
xsk_tx_release(pool);
@@ -3071,6 +3159,11 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))
break;
+ if (igc_fpe_is_pmac_enabled(adapter) &&
+ igc_fpe_transmitted_smd_v(tx_desc))
+ ethtool_mmsv_event_handle(&adapter->fpe.mmsv,
+ ETHTOOL_MMSV_LD_SENT_VERIFY_MPACKET);
+
/* Hold the completions while there's a pending tx hardware
* timestamp request from XDP Tx metadata.
*/
@@ -3781,6 +3874,22 @@ static void igc_del_flex_filter(struct igc_adapter *adapter,
wr32(IGC_WUFC, wufc);
}
+static void igc_set_default_queue_filter(struct igc_adapter *adapter, u32 queue)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 mrqc = rd32(IGC_MRQC);
+
+ mrqc &= ~IGC_MRQC_DEFAULT_QUEUE_MASK;
+ mrqc |= FIELD_PREP(IGC_MRQC_DEFAULT_QUEUE_MASK, queue);
+ wr32(IGC_MRQC, mrqc);
+}
+
+static void igc_reset_default_queue_filter(struct igc_adapter *adapter)
+{
+ /* Reset the default queue to its default value which is Queue 0 */
+ igc_set_default_queue_filter(adapter, 0);
+}
+
static int igc_enable_nfc_rule(struct igc_adapter *adapter,
struct igc_nfc_rule *rule)
{
@@ -3819,6 +3928,9 @@ static int igc_enable_nfc_rule(struct igc_adapter *adapter,
return err;
}
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_DEFAULT_QUEUE)
+ igc_set_default_queue_filter(adapter, rule->action);
+
return 0;
}
@@ -3846,6 +3958,9 @@ static void igc_disable_nfc_rule(struct igc_adapter *adapter,
if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
rule->filter.dst_addr);
+
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_DEFAULT_QUEUE)
+ igc_reset_default_queue_filter(adapter);
}
/**
@@ -3963,6 +4078,30 @@ static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
}
/**
+ * igc_enable_empty_addr_recv - Enable Rx of packets with all-zeroes MAC address
+ * @adapter: Pointer to the igc_adapter structure.
+ *
+ * Frame preemption verification requires that packets with the all-zeroes
+ * MAC address are allowed to be received by the driver. This function adds the
+ * all-zeroes destination address to the list of acceptable addresses.
+ *
+ * Return: 0 on success, negative value otherwise.
+ */
+int igc_enable_empty_addr_recv(struct igc_adapter *adapter)
+{
+ u8 empty[ETH_ALEN] = {};
+
+ return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, empty, -1);
+}
+
+void igc_disable_empty_addr_recv(struct igc_adapter *adapter)
+{
+ u8 empty[ETH_ALEN] = {};
+
+ igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, empty);
+}
+
+/**
* igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
@@ -4948,6 +5087,22 @@ static int igc_sw_init(struct igc_adapter *adapter)
return 0;
}
+static void igc_set_queue_napi(struct igc_adapter *adapter, int vector,
+ struct napi_struct *napi)
+{
+ struct igc_q_vector *q_vector = adapter->q_vector[vector];
+
+ if (q_vector->rx.ring)
+ netif_queue_set_napi(adapter->netdev,
+ q_vector->rx.ring->queue_index,
+ NETDEV_QUEUE_TYPE_RX, napi);
+
+ if (q_vector->tx.ring)
+ netif_queue_set_napi(adapter->netdev,
+ q_vector->tx.ring->queue_index,
+ NETDEV_QUEUE_TYPE_TX, napi);
+}
+
/**
* igc_up - Open the interface and prepare it to handle traffic
* @adapter: board private structure
@@ -4955,6 +5110,7 @@ static int igc_sw_init(struct igc_adapter *adapter)
void igc_up(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
+ struct napi_struct *napi;
int i = 0;
/* hardware has been reset, we need to reload some things */
@@ -4962,8 +5118,11 @@ void igc_up(struct igc_adapter *adapter)
clear_bit(__IGC_DOWN, &adapter->state);
- for (i = 0; i < adapter->num_q_vectors; i++)
- napi_enable(&adapter->q_vector[i]->napi);
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ napi = &adapter->q_vector[i]->napi;
+ napi_enable(napi);
+ igc_set_queue_napi(adapter, i, napi);
+ }
if (adapter->msix_entries)
igc_configure_msix(adapter);
@@ -5192,12 +5351,13 @@ void igc_down(struct igc_adapter *adapter)
for (i = 0; i < adapter->num_q_vectors; i++) {
if (adapter->q_vector[i]) {
napi_synchronize(&adapter->q_vector[i]->napi);
+ igc_set_queue_napi(adapter, i, NULL);
napi_disable(&adapter->q_vector[i]->napi);
}
}
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_info_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->phy_info_timer);
/* record the stats before reset*/
spin_lock(&adapter->stats64_lock);
@@ -5216,6 +5376,9 @@ void igc_down(struct igc_adapter *adapter)
igc_disable_all_tx_rings_hw(adapter);
igc_clean_all_tx_rings(adapter);
igc_clean_all_rx_rings(adapter);
+
+ if (adapter->fpe.mmsv.pmac_enabled)
+ ethtool_mmsv_stop(&adapter->fpe.mmsv);
}
void igc_reinit_locked(struct igc_adapter *adapter)
@@ -5576,6 +5739,9 @@ static int igc_request_msix(struct igc_adapter *adapter)
q_vector);
if (err)
goto err_free;
+
+ netif_napi_set_irq(&q_vector->napi,
+ adapter->msix_entries[vector].vector);
}
igc_configure_msix(adapter);
@@ -5612,7 +5778,8 @@ static void igc_clear_interrupt_scheme(struct igc_adapter *adapter)
*/
static void igc_update_phy_info(struct timer_list *t)
{
- struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer);
+ struct igc_adapter *adapter = timer_container_of(adapter, t,
+ phy_info_timer);
igc_get_phy_info(&adapter->hw);
}
@@ -5654,7 +5821,8 @@ bool igc_has_link(struct igc_adapter *adapter)
*/
static void igc_watchdog(struct timer_list *t)
{
- struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+ struct igc_adapter *adapter = timer_container_of(adapter, t,
+ watchdog_timer);
/* Do the rest outside of interrupt context */
schedule_work(&adapter->watchdog_task);
}
@@ -5737,6 +5905,10 @@ static void igc_watchdog_task(struct work_struct *work)
*/
igc_tsn_adjust_txtime_offset(adapter);
+ if (adapter->fpe.mmsv.pmac_enabled)
+ ethtool_mmsv_link_state_handle(&adapter->fpe.mmsv,
+ true);
+
if (adapter->link_speed != SPEED_1000)
goto no_wait;
@@ -5772,6 +5944,10 @@ no_wait:
netdev_info(netdev, "NIC Link is Down\n");
netif_carrier_off(netdev);
+ if (adapter->fpe.mmsv.pmac_enabled)
+ ethtool_mmsv_link_state_handle(&adapter->fpe.mmsv,
+ false);
+
/* link state has changed, schedule phy info update */
if (!test_bit(__IGC_DOWN, &adapter->state))
mod_timer(&adapter->phy_info_timer,
@@ -6018,6 +6194,7 @@ static int __igc_open(struct net_device *netdev, bool resuming)
struct igc_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = adapter->pdev;
struct igc_hw *hw = &adapter->hw;
+ struct napi_struct *napi;
int err = 0;
int i = 0;
@@ -6053,8 +6230,11 @@ static int __igc_open(struct net_device *netdev, bool resuming)
clear_bit(__IGC_DOWN, &adapter->state);
- for (i = 0; i < adapter->num_q_vectors; i++)
- napi_enable(&adapter->q_vector[i]->napi);
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ napi = &adapter->q_vector[i]->napi;
+ napi_enable(napi);
+ igc_set_queue_napi(adapter, i, napi);
+ }
/* Clear any pending interrupts. */
rd32(IGC_ICR);
@@ -6145,24 +6325,6 @@ int igc_close(struct net_device *netdev)
return 0;
}
-/**
- * igc_ioctl - Access the hwtstamp interface
- * @netdev: network interface device structure
- * @ifr: interface request data
- * @cmd: ioctl command
- **/
-static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- case SIOCGHWTSTAMP:
- return igc_ptp_get_ts_config(netdev, ifr);
- case SIOCSHWTSTAMP:
- return igc_ptp_set_ts_config(netdev, ifr);
- default:
- return -EOPNOTSUPP;
- }
-}
-
static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
bool enable)
{
@@ -6272,6 +6434,7 @@ static int igc_qbv_clear_schedule(struct igc_adapter *adapter)
ring->start_time = 0;
ring->end_time = NSEC_PER_SEC;
ring->max_sdu = 0;
+ ring->preemptible = false;
}
spin_lock_irqsave(&adapter->qbv_tx_lock, flags);
@@ -6337,6 +6500,13 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
if (!validate_schedule(adapter, qopt))
return -EINVAL;
+ if (qopt->mqprio.preemptible_tcs &&
+ !(adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO)) {
+ NL_SET_ERR_MSG_MOD(qopt->extack,
+ "reverse-tsn-txq-prio private flag must be enabled before setting preemptible tc");
+ return -ENODEV;
+ }
+
igc_ptp_read(adapter, &now);
if (igc_tsn_is_taprio_activated_by_user(adapter) &&
@@ -6428,6 +6598,8 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
ring->max_sdu = 0;
}
+ igc_fpe_save_preempt_queue(adapter, &qopt->mqprio);
+
return 0;
}
@@ -6547,7 +6719,8 @@ static int igc_tc_query_caps(struct igc_adapter *adapter,
case TC_SETUP_QDISC_TAPRIO: {
struct tc_taprio_caps *caps = base->caps;
- caps->broken_mqprio = true;
+ if (!(adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO))
+ caps->broken_mqprio = true;
if (hw->mac.type == igc_i225) {
caps->supports_queue_max_sdu = true;
@@ -6573,17 +6746,33 @@ static void igc_save_mqprio_params(struct igc_adapter *adapter, u8 num_tc,
adapter->queue_per_tc[i] = offset[i];
}
+static bool
+igc_tsn_is_tc_to_queue_priority_ordered(struct tc_mqprio_qopt_offload *mqprio)
+{
+ int num_tc = mqprio->qopt.num_tc;
+ int i;
+
+ for (i = 1; i < num_tc; i++) {
+ if (mqprio->qopt.offset[i - 1] > mqprio->qopt.offset[i])
+ return false;
+ }
+
+ return true;
+}
+
static int igc_tsn_enable_mqprio(struct igc_adapter *adapter,
struct tc_mqprio_qopt_offload *mqprio)
{
struct igc_hw *hw = &adapter->hw;
- int i;
+ int err, i;
if (hw->mac.type != igc_i225)
return -EOPNOTSUPP;
if (!mqprio->qopt.num_tc) {
adapter->strict_priority_enable = false;
+ igc_fpe_clear_preempt_queue(adapter);
+ netdev_reset_tc(adapter->netdev);
goto apply;
}
@@ -6604,17 +6793,32 @@ static int igc_tsn_enable_mqprio(struct igc_adapter *adapter,
}
}
- /* Preemption is not supported yet. */
- if (mqprio->preemptible_tcs) {
+ if (!igc_tsn_is_tc_to_queue_priority_ordered(mqprio)) {
NL_SET_ERR_MSG_MOD(mqprio->extack,
- "Preemption is not supported yet");
+ "tc to queue mapping must preserve increasing priority (higher tc -> higher queue)");
return -EOPNOTSUPP;
}
igc_save_mqprio_params(adapter, mqprio->qopt.num_tc,
mqprio->qopt.offset);
+ err = netdev_set_num_tc(adapter->netdev, adapter->num_tc);
+ if (err)
+ return err;
+
+ for (i = 0; i < adapter->num_tc; i++) {
+ err = netdev_set_tc_queue(adapter->netdev, i, 1,
+ adapter->queue_per_tc[i]);
+ if (err)
+ return err;
+ }
+
+ /* In case the card is configured with less than four queues. */
+ for (; i < IGC_MAX_TX_QUEUES; i++)
+ adapter->queue_per_tc[i] = i;
+
mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ igc_fpe_save_preempt_queue(adapter, mqprio);
apply:
return igc_tsn_offload_apply(adapter);
@@ -6677,7 +6881,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
- ring = igc_xdp_get_tx_ring(adapter, cpu);
+ ring = igc_get_tx_ring(adapter, cpu);
nq = txring_txq(ring);
__netif_tx_lock(nq, cpu);
@@ -6771,53 +6975,15 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_fix_features = igc_fix_features,
.ndo_set_features = igc_set_features,
.ndo_features_check = igc_features_check,
- .ndo_eth_ioctl = igc_ioctl,
.ndo_setup_tc = igc_setup_tc,
.ndo_bpf = igc_bpf,
.ndo_xdp_xmit = igc_xdp_xmit,
.ndo_xsk_wakeup = igc_xsk_wakeup,
.ndo_get_tstamp = igc_get_tstamp,
+ .ndo_hwtstamp_get = igc_ptp_hwtstamp_get,
+ .ndo_hwtstamp_set = igc_ptp_hwtstamp_set,
};
-/* PCIe configuration access */
-void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
-{
- struct igc_adapter *adapter = hw->back;
-
- pci_read_config_word(adapter->pdev, reg, value);
-}
-
-void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
-{
- struct igc_adapter *adapter = hw->back;
-
- pci_write_config_word(adapter->pdev, reg, *value);
-}
-
-s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
-{
- struct igc_adapter *adapter = hw->back;
-
- if (!pci_is_pcie(adapter->pdev))
- return -IGC_ERR_CONFIG;
-
- pcie_capability_read_word(adapter->pdev, reg, value);
-
- return IGC_SUCCESS;
-}
-
-s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
-{
- struct igc_adapter *adapter = hw->back;
-
- if (!pci_is_pcie(adapter->pdev))
- return -IGC_ERR_CONFIG;
-
- pcie_capability_write_word(adapter->pdev, reg, *value);
-
- return IGC_SUCCESS;
-}
-
u32 igc_rd32(struct igc_hw *hw, u32 reg)
{
struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw);
@@ -6983,6 +7149,17 @@ static int igc_probe(struct pci_dev *pdev,
adapter->port_num = hw->bus.func;
adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+ /* PCI config space info */
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->revision_id = pdev->revision;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+
+ /* Disable ASPM L1.2 on I226 devices to avoid packet loss */
+ if (igc_is_device_id_i226(hw))
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
+
err = pci_save_state(pdev);
if (err)
goto err_ioremap;
@@ -7005,13 +7182,6 @@ static int igc_probe(struct pci_dev *pdev,
netdev->mem_start = pci_resource_start(pdev, 0);
netdev->mem_end = pci_resource_end(pdev, 0);
- /* PCI config space info */
- hw->vendor_id = pdev->vendor;
- hw->device_id = pdev->device;
- hw->revision_id = pdev->revision;
- hw->subsystem_vendor_id = pdev->subsystem_vendor;
- hw->subsystem_device_id = pdev->subsystem_device;
-
/* Copy the default MAC and PHY function pointers */
memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
@@ -7062,6 +7232,9 @@ static int igc_probe(struct pci_dev *pdev,
netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_XSK_ZEROCOPY;
+ /* enable HW vlan tag insertion/stripping by default */
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
@@ -7094,8 +7267,8 @@ static int igc_probe(struct pci_dev *pdev,
}
/* configure RXPBSIZE and TXPBSIZE */
- wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT);
- wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
+ wr32(IGC_RXPBS, IGC_RXPBSIZE_EXP_BMC_DEFAULT);
+ wr32(IGC_TXPBS, IGC_TXPBSIZE_DEFAULT);
timer_setup(&adapter->watchdog_timer, igc_watchdog, 0);
timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0);
@@ -7103,12 +7276,11 @@ static int igc_probe(struct pci_dev *pdev,
INIT_WORK(&adapter->reset_task, igc_reset_task);
INIT_WORK(&adapter->watchdog_task, igc_watchdog_task);
- hrtimer_init(&adapter->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- adapter->hrtimer.function = &igc_qbv_scheduling_timer;
+ hrtimer_setup(&adapter->hrtimer, &igc_qbv_scheduling_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/* Initialize link properties that are user-changeable */
adapter->fc_autoneg = true;
- hw->mac.autoneg = true;
hw->phy.autoneg_advertised = 0xaf;
hw->fc.requested_mode = igc_fc_default;
@@ -7128,6 +7300,8 @@ static int igc_probe(struct pci_dev *pdev,
igc_tsn_clear_schedule(adapter);
+ igc_fpe_init(adapter);
+
/* reset the hardware with the new settings */
igc_reset(adapter);
@@ -7161,14 +7335,21 @@ static int igc_probe(struct pci_dev *pdev,
if (IS_ENABLED(CONFIG_IGC_LEDS)) {
err = igc_led_setup(adapter);
- if (err)
- goto err_register;
+ if (err) {
+ netdev_warn_once(netdev,
+ "LED init failed (%d); continuing without LED support\n",
+ err);
+ adapter->leds_available = false;
+ } else {
+ adapter->leds_available = true;
+ }
}
return 0;
err_register:
igc_release_hw_control(adapter);
+ igc_ptp_stop(adapter);
err_eeprom:
if (!igc_check_reset_block(hw))
igc_reset_phy(hw);
@@ -7210,14 +7391,14 @@ static void igc_remove(struct pci_dev *pdev)
set_bit(__IGC_DOWN, &adapter->state);
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_info_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->phy_info_timer);
cancel_work_sync(&adapter->reset_task);
cancel_work_sync(&adapter->watchdog_task);
hrtimer_cancel(&adapter->hrtimer);
- if (IS_ENABLED(CONFIG_IGC_LEDS))
+ if (IS_ENABLED(CONFIG_IGC_LEDS) && adapter->leds_available)
igc_led_free(adapter);
/* Release control of h/w to f/w. If f/w is AMT enabled, this
@@ -7339,7 +7520,7 @@ static void igc_deliver_wake_packet(struct net_device *netdev)
netif_rx(skb);
}
-static int igc_resume(struct device *dev)
+static int __igc_resume(struct device *dev, bool rpm)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -7349,7 +7530,6 @@ static int igc_resume(struct device *dev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- pci_save_state(pdev);
if (!pci_device_is_present(pdev))
return -ENODEV;
@@ -7363,6 +7543,9 @@ static int igc_resume(struct device *dev)
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
+ if (igc_is_device_id_i226(hw))
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
+
if (igc_init_interrupt_scheme(adapter, true)) {
netdev_err(netdev, "Unable to allocate memory for queues\n");
return -ENOMEM;
@@ -7382,7 +7565,11 @@ static int igc_resume(struct device *dev)
wr32(IGC_WUS, ~0);
if (netif_running(netdev)) {
+ if (!rpm)
+ rtnl_lock();
err = __igc_open(netdev, true);
+ if (!rpm)
+ rtnl_unlock();
if (!err)
netif_device_attach(netdev);
}
@@ -7390,9 +7577,14 @@ static int igc_resume(struct device *dev)
return err;
}
+static int igc_resume(struct device *dev)
+{
+ return __igc_resume(dev, false);
+}
+
static int igc_runtime_resume(struct device *dev)
{
- return igc_resume(dev);
+ return __igc_resume(dev, true);
}
static int igc_suspend(struct device *dev)
@@ -7437,14 +7629,18 @@ static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
struct net_device *netdev = pci_get_drvdata(pdev);
struct igc_adapter *adapter = netdev_priv(netdev);
+ rtnl_lock();
netif_device_detach(netdev);
- if (state == pci_channel_io_perm_failure)
+ if (state == pci_channel_io_perm_failure) {
+ rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
+ }
if (netif_running(netdev))
igc_down(adapter);
pci_disable_device(pdev);
+ rtnl_unlock();
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -7455,7 +7651,7 @@ static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot. Implementation
- * resembles the first-half of the igc_resume routine.
+ * resembles the first-half of the __igc_resume routine.
**/
static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
{
@@ -7470,11 +7666,13 @@ static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
+ if (igc_is_device_id_i226(hw))
+ pci_disable_link_state_locked(pdev, PCIE_LINK_STATE_L1_2);
+
/* In case of PCI error, adapter loses its HW address
* so we should re-assign it here.
*/
@@ -7494,7 +7692,7 @@ static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
*
* This callback is called when the error recovery driver tells us that
* its OK to resume normal operation. Implementation resembles the
- * second-half of the igc_resume routine.
+ * second-half of the __igc_resume routine.
*/
static void igc_io_resume(struct pci_dev *pdev)
{
diff --git a/drivers/net/ethernet/intel/igc/igc_nvm.c b/drivers/net/ethernet/intel/igc/igc_nvm.c
index 58f81aba0144..a47b8d39238c 100644
--- a/drivers/net/ethernet/intel/igc/igc_nvm.c
+++ b/drivers/net/ethernet/intel/igc/igc_nvm.c
@@ -36,56 +36,6 @@ static s32 igc_poll_eerd_eewr_done(struct igc_hw *hw, int ee_reg)
}
/**
- * igc_acquire_nvm - Generic request for access to EEPROM
- * @hw: pointer to the HW structure
- *
- * Set the EEPROM access request bit and wait for EEPROM access grant bit.
- * Return successful if access grant bit set, else clear the request for
- * EEPROM access and return -IGC_ERR_NVM (-1).
- */
-s32 igc_acquire_nvm(struct igc_hw *hw)
-{
- s32 timeout = IGC_NVM_GRANT_ATTEMPTS;
- u32 eecd = rd32(IGC_EECD);
- s32 ret_val = 0;
-
- wr32(IGC_EECD, eecd | IGC_EECD_REQ);
- eecd = rd32(IGC_EECD);
-
- while (timeout) {
- if (eecd & IGC_EECD_GNT)
- break;
- udelay(5);
- eecd = rd32(IGC_EECD);
- timeout--;
- }
-
- if (!timeout) {
- eecd &= ~IGC_EECD_REQ;
- wr32(IGC_EECD, eecd);
- hw_dbg("Could not acquire NVM grant\n");
- ret_val = -IGC_ERR_NVM;
- }
-
- return ret_val;
-}
-
-/**
- * igc_release_nvm - Release exclusive access to EEPROM
- * @hw: pointer to the HW structure
- *
- * Stop any current commands to the EEPROM and clear the EEPROM request bit.
- */
-void igc_release_nvm(struct igc_hw *hw)
-{
- u32 eecd;
-
- eecd = rd32(IGC_EECD);
- eecd &= ~IGC_EECD_REQ;
- wr32(IGC_EECD, eecd);
-}
-
-/**
* igc_read_nvm_eerd - Reads EEPROM using EERD register
* @hw: pointer to the HW structure
* @offset: offset of word in the EEPROM to read
@@ -173,7 +123,7 @@ s32 igc_validate_nvm_checksum(struct igc_hw *hw)
checksum += nvm_data;
}
- if (checksum != (u16)NVM_SUM) {
+ if (checksum != NVM_SUM) {
hw_dbg("NVM Checksum Invalid\n");
ret_val = -IGC_ERR_NVM;
goto out;
@@ -205,7 +155,7 @@ s32 igc_update_nvm_checksum(struct igc_hw *hw)
}
checksum += nvm_data;
}
- checksum = (u16)NVM_SUM - checksum;
+ checksum = NVM_SUM - checksum;
ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
if (ret_val)
hw_dbg("NVM Write Error while updating checksum.\n");
diff --git a/drivers/net/ethernet/intel/igc/igc_nvm.h b/drivers/net/ethernet/intel/igc/igc_nvm.h
index f9fc2e9cfb03..ab78d0c64547 100644
--- a/drivers/net/ethernet/intel/igc/igc_nvm.h
+++ b/drivers/net/ethernet/intel/igc/igc_nvm.h
@@ -4,8 +4,6 @@
#ifndef _IGC_NVM_H_
#define _IGC_NVM_H_
-s32 igc_acquire_nvm(struct igc_hw *hw);
-void igc_release_nvm(struct igc_hw *hw);
s32 igc_read_mac_addr(struct igc_hw *hw);
s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data);
s32 igc_validate_nvm_checksum(struct igc_hw *hw);
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index 2801e5f24df9..6c4d204aecfa 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -494,24 +494,12 @@ s32 igc_setup_copper_link(struct igc_hw *hw)
s32 ret_val = 0;
bool link;
- if (hw->mac.autoneg) {
- /* Setup autoneg and flow control advertisement and perform
- * autonegotiation.
- */
- ret_val = igc_copper_link_autoneg(hw);
- if (ret_val)
- goto out;
- } else {
- /* PHY will be set to 10H, 10F, 100H or 100F
- * depending on user settings.
- */
- hw_dbg("Forcing Speed and Duplex\n");
- ret_val = hw->phy.ops.force_speed_duplex(hw);
- if (ret_val) {
- hw_dbg("Error Forcing Speed and Duplex\n");
- goto out;
- }
- }
+ /* Setup autoneg and flow control advertisement and perform
+ * autonegotiation.
+ */
+ ret_val = igc_copper_link_autoneg(hw);
+ if (ret_val)
+ goto out;
/* Check link status. Wait up to 100 microseconds for link to become
* valid.
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 946edbad4302..b7b46d863bee 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -257,13 +257,6 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* Reject requests failing to enable both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
@@ -300,10 +293,6 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
return 0;
case PTP_CLK_REQ_PEROUT:
- /* Reject requests with unsupported flags */
- if (rq->perout.flags)
- return -EOPNOTSUPP;
-
if (on) {
pin = ptp_find_pin(igc->ptp_clock, PTP_PF_PEROUT,
rq->perout.index);
@@ -637,7 +626,7 @@ static void igc_ptp_enable_tx_timestamp(struct igc_adapter *adapter)
* Return: 0 in case of success, negative errno code otherwise.
*/
static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
@@ -864,48 +853,46 @@ void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter)
}
/**
- * igc_ptp_set_ts_config - set hardware time stamping config
+ * igc_ptp_hwtstamp_set - set hardware time stamping config
* @netdev: network interface device structure
- * @ifr: interface request data
+ * @config: timestamping configuration structure
+ * @extack: netlink extended ack structure for error reporting
*
**/
-int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
+int igc_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct igc_adapter *adapter = netdev_priv(netdev);
- struct hwtstamp_config config;
int err;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = igc_ptp_set_timestamp_mode(adapter, &config);
+ err = igc_ptp_set_timestamp_mode(adapter, config);
if (err)
return err;
/* save these settings for future reference */
- memcpy(&adapter->tstamp_config, &config,
- sizeof(adapter->tstamp_config));
+ adapter->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
/**
- * igc_ptp_get_ts_config - get hardware time stamping config
+ * igc_ptp_hwtstamp_get - get hardware time stamping config
* @netdev: network interface device structure
- * @ifr: interface request data
+ * @config: timestamping configuration structure
*
* Get the hwtstamp_config settings to return to the user. Rather than attempt
* to deconstruct the settings from the registers, just return a shadow copy
* of the last known settings.
**/
-int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
+int igc_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
struct igc_adapter *adapter = netdev_priv(netdev);
- struct hwtstamp_config *config = &adapter->tstamp_config;
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
+ *config = adapter->tstamp_config;
+
+ return 0;
}
/* The two conditions below must be met for cross timestamping via
@@ -974,45 +961,62 @@ static void igc_ptm_log_error(struct igc_adapter *adapter, u32 ptm_stat)
}
}
+/* The PTM lock: adapter->ptm_lock must be held when calling igc_ptm_trigger() */
+static void igc_ptm_trigger(struct igc_hw *hw)
+{
+ u32 ctrl;
+
+ /* To "manually" start the PTM cycle we need to set the
+ * trigger (TRIG) bit
+ */
+ ctrl = rd32(IGC_PTM_CTRL);
+ ctrl |= IGC_PTM_CTRL_TRIG;
+ wr32(IGC_PTM_CTRL, ctrl);
+ /* Perform flush after write to CTRL register otherwise
+ * transaction may not start
+ */
+ wrfl();
+}
+
+/* The PTM lock: adapter->ptm_lock must be held when calling igc_ptm_reset() */
+static void igc_ptm_reset(struct igc_hw *hw)
+{
+ u32 ctrl;
+
+ ctrl = rd32(IGC_PTM_CTRL);
+ ctrl &= ~IGC_PTM_CTRL_TRIG;
+ wr32(IGC_PTM_CTRL, ctrl);
+ /* Write to clear all status */
+ wr32(IGC_PTM_STAT, IGC_PTM_STAT_ALL);
+}
+
static int igc_phc_get_syncdevicetime(ktime_t *device,
struct system_counterval_t *system,
void *ctx)
{
- u32 stat, t2_curr_h, t2_curr_l, ctrl;
struct igc_adapter *adapter = ctx;
struct igc_hw *hw = &adapter->hw;
+ u32 stat, t2_curr_h, t2_curr_l;
int err, count = 100;
ktime_t t1, t2_curr;
- /* Get a snapshot of system clocks to use as historic value. */
- ktime_get_snapshot(&adapter->snapshot);
-
+ /* Doing this in a loop because in the event of a
+ * badly timed (ha!) system clock adjustment, we may
+ * get PTM errors from the PCI root, but these errors
+ * are transitory. Repeating the process returns valid
+ * data eventually.
+ */
do {
- /* Doing this in a loop because in the event of a
- * badly timed (ha!) system clock adjustment, we may
- * get PTM errors from the PCI root, but these errors
- * are transitory. Repeating the process returns valid
- * data eventually.
- */
-
- /* To "manually" start the PTM cycle we need to clear and
- * then set again the TRIG bit.
- */
- ctrl = rd32(IGC_PTM_CTRL);
- ctrl &= ~IGC_PTM_CTRL_TRIG;
- wr32(IGC_PTM_CTRL, ctrl);
- ctrl |= IGC_PTM_CTRL_TRIG;
- wr32(IGC_PTM_CTRL, ctrl);
+ /* Get a snapshot of system clocks to use as historic value. */
+ ktime_get_snapshot(&adapter->snapshot);
- /* The cycle only starts "for real" when software notifies
- * that it has read the registers, this is done by setting
- * VALID bit.
- */
- wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID);
+ igc_ptm_trigger(hw);
err = readx_poll_timeout(rd32, IGC_PTM_STAT, stat,
stat, IGC_PTM_STAT_SLEEP,
IGC_PTM_STAT_TIMEOUT);
+ igc_ptm_reset(hw);
+
if (err < 0) {
netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n");
return err;
@@ -1021,15 +1025,7 @@ static int igc_phc_get_syncdevicetime(ktime_t *device,
if ((stat & IGC_PTM_STAT_VALID) == IGC_PTM_STAT_VALID)
break;
- if (stat & ~IGC_PTM_STAT_VALID) {
- /* An error occurred, log it. */
- igc_ptm_log_error(adapter, stat);
- /* The STAT register is write-1-to-clear (W1C),
- * so write the previous error status to clear it.
- */
- wr32(IGC_PTM_STAT, stat);
- continue;
- }
+ igc_ptm_log_error(adapter, stat);
} while (--count);
if (!count) {
@@ -1061,9 +1057,16 @@ static int igc_ptp_getcrosststamp(struct ptp_clock_info *ptp,
{
struct igc_adapter *adapter = container_of(ptp, struct igc_adapter,
ptp_caps);
+ int ret;
- return get_device_system_crosststamp(igc_phc_get_syncdevicetime,
- adapter, &adapter->snapshot, cts);
+ /* This blocks until any in progress PTM transactions complete */
+ mutex_lock(&adapter->ptm_lock);
+
+ ret = get_device_system_crosststamp(igc_phc_get_syncdevicetime,
+ adapter, &adapter->snapshot, cts);
+ mutex_unlock(&adapter->ptm_lock);
+
+ return ret;
}
static int igc_ptp_getcyclesx64(struct ptp_clock_info *ptp,
@@ -1146,6 +1149,9 @@ void igc_ptp_init(struct igc_adapter *adapter)
adapter->ptp_caps.pin_config = adapter->sdp_config;
adapter->ptp_caps.n_ext_ts = IGC_N_EXTTS;
adapter->ptp_caps.n_per_out = IGC_N_PEROUT;
+ adapter->ptp_caps.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
adapter->ptp_caps.n_pins = IGC_N_SDP;
adapter->ptp_caps.verify = igc_ptp_verify_pin;
@@ -1162,6 +1168,7 @@ void igc_ptp_init(struct igc_adapter *adapter)
spin_lock_init(&adapter->ptp_tx_lock);
spin_lock_init(&adapter->free_timer_lock);
spin_lock_init(&adapter->tmreg_lock);
+ mutex_init(&adapter->ptm_lock);
adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
@@ -1174,6 +1181,7 @@ void igc_ptp_init(struct igc_adapter *adapter)
if (IS_ERR(adapter->ptp_clock)) {
adapter->ptp_clock = NULL;
netdev_err(netdev, "ptp_clock_register failed\n");
+ mutex_destroy(&adapter->ptm_lock);
} else if (adapter->ptp_clock) {
netdev_info(netdev, "PHC added\n");
adapter->ptp_flags |= IGC_PTP_ENABLED;
@@ -1203,10 +1211,12 @@ static void igc_ptm_stop(struct igc_adapter *adapter)
struct igc_hw *hw = &adapter->hw;
u32 ctrl;
+ mutex_lock(&adapter->ptm_lock);
ctrl = rd32(IGC_PTM_CTRL);
ctrl &= ~IGC_PTM_CTRL_EN;
wr32(IGC_PTM_CTRL, ctrl);
+ mutex_unlock(&adapter->ptm_lock);
}
/**
@@ -1237,13 +1247,18 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
**/
void igc_ptp_stop(struct igc_adapter *adapter)
{
+ if (!(adapter->ptp_flags & IGC_PTP_ENABLED))
+ return;
+
igc_ptp_suspend(adapter);
+ adapter->ptp_flags &= ~IGC_PTP_ENABLED;
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
netdev_info(adapter->netdev, "PHC removed\n");
adapter->ptp_flags &= ~IGC_PTP_ENABLED;
}
+ mutex_destroy(&adapter->ptm_lock);
}
/**
@@ -1255,13 +1270,18 @@ void igc_ptp_stop(struct igc_adapter *adapter)
void igc_ptp_reset(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
- u32 cycle_ctrl, ctrl;
+ u32 cycle_ctrl, ctrl, stat;
unsigned long flags;
u32 timadj;
+ if (!(adapter->ptp_flags & IGC_PTP_ENABLED))
+ return;
+
/* reset the tstamp_config */
igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
+ mutex_lock(&adapter->ptm_lock);
+
spin_lock_irqsave(&adapter->tmreg_lock, flags);
switch (adapter->hw.mac.type) {
@@ -1290,14 +1310,19 @@ void igc_ptp_reset(struct igc_adapter *adapter)
ctrl = IGC_PTM_CTRL_EN |
IGC_PTM_CTRL_START_NOW |
IGC_PTM_CTRL_SHRT_CYC(IGC_PTM_SHORT_CYC_DEFAULT) |
- IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT) |
- IGC_PTM_CTRL_TRIG;
+ IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT);
wr32(IGC_PTM_CTRL, ctrl);
/* Force the first cycle to run. */
- wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID);
+ igc_ptm_trigger(hw);
+ if (readx_poll_timeout_atomic(rd32, IGC_PTM_STAT, stat,
+ stat, IGC_PTM_STAT_SLEEP,
+ IGC_PTM_STAT_TIMEOUT))
+ netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n");
+
+ igc_ptm_reset(hw);
break;
default:
/* No work to do. */
@@ -1314,5 +1339,7 @@ void igc_ptp_reset(struct igc_adapter *adapter)
out:
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+ mutex_unlock(&adapter->ptm_lock);
+
wrfl();
}
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index 12ddc5793651..f343c6bfc6be 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -222,6 +222,22 @@
#define IGC_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */
+/* Time sync registers - preemption statistics */
+#define IGC_PRMPTDRCNT 0x04284 /* Good RX Preempted Packets */
+#define IGC_PRMEVNTTCNT 0x04298 /* TX Preemption event counter */
+#define IGC_PRMEVNTRCNT 0x0429C /* RX Preemption event counter */
+
+ /* Preemption Exception Counter */
+ #define IGC_PRMEXCPRCNT 0x42A0
+/* Received out of order packets with SMD-C */
+#define IGC_PRMEXCPRCNT_OOO_SMDC 0x000000FF
+/* Received out of order packets with SMD-C and wrong Frame CNT */
+#define IGC_PRMEXCPRCNT_OOO_FRAME_CNT 0x0000FF00
+/* Received out of order packets with SMD-C and wrong Frag CNT */
+#define IGC_PRMEXCPRCNT_OOO_FRAG_CNT 0x00FF0000
+/* Received packets with SMD-S and wrong Frag CNT and Frame CNT */
+#define IGC_PRMEXCPRCNT_MISS_FRAME_FRAG_CNT 0xFF000000
+
/* Transmit Scheduling Registers */
#define IGC_TQAVCTRL 0x3570
#define IGC_TXQCTL(_n) (0x3344 + 0x4 * (_n))
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
index 1e44374ca1ff..8a110145bfee 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
@@ -2,9 +2,206 @@
/* Copyright (c) 2019 Intel Corporation */
#include "igc.h"
+#include "igc_base.h"
#include "igc_hw.h"
#include "igc_tsn.h"
+#define MIN_MULTPLIER_TX_MIN_FRAG 0
+#define MAX_MULTPLIER_TX_MIN_FRAG 3
+/* Frag size is based on the Section 8.12.2 of the SW User Manual */
+#define TX_MIN_FRAG_SIZE 64
+#define TX_MAX_FRAG_SIZE (TX_MIN_FRAG_SIZE * \
+ (MAX_MULTPLIER_TX_MIN_FRAG + 1))
+
+enum tx_queue {
+ TX_QUEUE_0 = 0,
+ TX_QUEUE_1,
+ TX_QUEUE_2,
+ TX_QUEUE_3,
+};
+
+DEFINE_STATIC_KEY_FALSE(igc_fpe_enabled);
+
+static int igc_fpe_init_smd_frame(struct igc_ring *ring,
+ struct igc_tx_buffer *buffer,
+ struct sk_buff *skb)
+{
+ dma_addr_t dma = dma_map_single(ring->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(ring->dev, dma)) {
+ netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
+ return -ENOMEM;
+ }
+
+ buffer->skb = skb;
+ buffer->protocol = 0;
+ buffer->bytecount = skb->len;
+ buffer->gso_segs = 1;
+ buffer->time_stamp = jiffies;
+ dma_unmap_len_set(buffer, len, skb->len);
+ dma_unmap_addr_set(buffer, dma, dma);
+
+ return 0;
+}
+
+static int igc_fpe_init_tx_descriptor(struct igc_ring *ring,
+ struct sk_buff *skb,
+ enum igc_txd_popts_type type)
+{
+ u32 cmd_type, olinfo_status = 0;
+ struct igc_tx_buffer *buffer;
+ union igc_adv_tx_desc *desc;
+ int err;
+
+ if (!igc_desc_unused(ring))
+ return -EBUSY;
+
+ buffer = &ring->tx_buffer_info[ring->next_to_use];
+ err = igc_fpe_init_smd_frame(ring, buffer, skb);
+ if (err)
+ return err;
+
+ cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
+ IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
+ buffer->bytecount;
+
+ olinfo_status |= FIELD_PREP(IGC_ADVTXD_PAYLEN_MASK, buffer->bytecount);
+
+ switch (type) {
+ case SMD_V:
+ case SMD_R:
+ olinfo_status |= FIELD_PREP(IGC_TXD_POPTS_SMD_MASK, type);
+ break;
+ }
+
+ desc = IGC_TX_DESC(ring, ring->next_to_use);
+ desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+ desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma));
+
+ netdev_tx_sent_queue(txring_txq(ring), skb->len);
+
+ buffer->next_to_watch = desc;
+ ring->next_to_use = (ring->next_to_use + 1) % ring->count;
+
+ return 0;
+}
+
+static int igc_fpe_xmit_smd_frame(struct igc_adapter *adapter,
+ enum igc_txd_popts_type type)
+{
+ int cpu = smp_processor_id();
+ struct netdev_queue *nq;
+ struct igc_ring *ring;
+ struct sk_buff *skb;
+ int err;
+
+ ring = igc_get_tx_ring(adapter, cpu);
+ nq = txring_txq(ring);
+
+ skb = alloc_skb(SMD_FRAME_SIZE, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_zero(skb, SMD_FRAME_SIZE);
+
+ __netif_tx_lock(nq, cpu);
+
+ err = igc_fpe_init_tx_descriptor(ring, skb, type);
+ igc_flush_tx_descriptors(ring);
+
+ __netif_tx_unlock(nq);
+
+ return err;
+}
+
+static void igc_fpe_configure_tx(struct ethtool_mmsv *mmsv, bool tx_enable)
+{
+ struct igc_fpe_t *fpe = container_of(mmsv, struct igc_fpe_t, mmsv);
+ struct igc_adapter *adapter;
+
+ adapter = container_of(fpe, struct igc_adapter, fpe);
+ adapter->fpe.tx_enabled = tx_enable;
+
+ /* Update config since tx_enabled affects preemptible queue configuration */
+ igc_tsn_offload_apply(adapter);
+}
+
+static void igc_fpe_send_mpacket(struct ethtool_mmsv *mmsv,
+ enum ethtool_mpacket type)
+{
+ struct igc_fpe_t *fpe = container_of(mmsv, struct igc_fpe_t, mmsv);
+ struct igc_adapter *adapter;
+ int err;
+
+ adapter = container_of(fpe, struct igc_adapter, fpe);
+
+ if (type == ETHTOOL_MPACKET_VERIFY) {
+ err = igc_fpe_xmit_smd_frame(adapter, SMD_V);
+ if (err && net_ratelimit())
+ netdev_err(adapter->netdev, "Error sending SMD-V\n");
+ } else if (type == ETHTOOL_MPACKET_RESPONSE) {
+ err = igc_fpe_xmit_smd_frame(adapter, SMD_R);
+ if (err && net_ratelimit())
+ netdev_err(adapter->netdev, "Error sending SMD-R frame\n");
+ }
+}
+
+static const struct ethtool_mmsv_ops igc_mmsv_ops = {
+ .configure_tx = igc_fpe_configure_tx,
+ .send_mpacket = igc_fpe_send_mpacket,
+};
+
+void igc_fpe_init(struct igc_adapter *adapter)
+{
+ adapter->fpe.tx_min_frag_size = TX_MIN_FRAG_SIZE;
+ adapter->fpe.tx_enabled = false;
+ ethtool_mmsv_init(&adapter->fpe.mmsv, adapter->netdev, &igc_mmsv_ops);
+}
+
+void igc_fpe_clear_preempt_queue(struct igc_adapter *adapter)
+{
+ for (int i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *tx_ring = adapter->tx_ring[i];
+
+ tx_ring->preemptible = false;
+ }
+}
+
+static u32 igc_fpe_map_preempt_tc_to_queue(const struct igc_adapter *adapter,
+ unsigned long preemptible_tcs)
+{
+ struct net_device *dev = adapter->netdev;
+ u32 i, queue = 0;
+
+ for (i = 0; i < dev->num_tc; i++) {
+ u32 offset, count;
+
+ if (!(preemptible_tcs & BIT(i)))
+ continue;
+
+ offset = dev->tc_to_txq[i].offset;
+ count = dev->tc_to_txq[i].count;
+ queue |= GENMASK(offset + count - 1, offset);
+ }
+
+ return queue;
+}
+
+void igc_fpe_save_preempt_queue(struct igc_adapter *adapter,
+ const struct tc_mqprio_qopt_offload *mqprio)
+{
+ u32 preemptible_queue = igc_fpe_map_preempt_tc_to_queue(adapter,
+ mqprio->preemptible_tcs);
+
+ for (int i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *tx_ring = adapter->tx_ring[i];
+
+ tx_ring->preemptible = !!(preemptible_queue & BIT(i));
+ }
+}
+
static bool is_any_launchtime(struct igc_adapter *adapter)
{
int i;
@@ -37,17 +234,16 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter)
{
unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED;
- if (adapter->taprio_offload_enable)
- new_flags |= IGC_FLAG_TSN_QBV_ENABLED;
- if (is_any_launchtime(adapter))
+ if (adapter->taprio_offload_enable || is_any_launchtime(adapter) ||
+ adapter->strict_priority_enable)
new_flags |= IGC_FLAG_TSN_QBV_ENABLED;
if (is_cbs_enabled(adapter))
new_flags |= IGC_FLAG_TSN_QAV_ENABLED;
- if (adapter->strict_priority_enable)
- new_flags |= IGC_FLAG_TSN_LEGACY_ENABLED;
+ if (adapter->fpe.mmsv.pmac_enabled)
+ new_flags |= IGC_FLAG_TSN_PREEMPT_ENABLED;
return new_flags;
}
@@ -105,7 +301,7 @@ bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter)
adapter->taprio_offload_enable;
}
-static void igc_tsn_tx_arb(struct igc_adapter *adapter, u16 *queue_per_tc)
+static void igc_tsn_tx_arb(struct igc_adapter *adapter, bool reverse_prio)
{
struct igc_hw *hw = &adapter->hw;
u32 txarb;
@@ -117,56 +313,91 @@ static void igc_tsn_tx_arb(struct igc_adapter *adapter, u16 *queue_per_tc)
IGC_TXARB_TXQ_PRIO_2_MASK |
IGC_TXARB_TXQ_PRIO_3_MASK);
- txarb |= IGC_TXARB_TXQ_PRIO_0(queue_per_tc[3]);
- txarb |= IGC_TXARB_TXQ_PRIO_1(queue_per_tc[2]);
- txarb |= IGC_TXARB_TXQ_PRIO_2(queue_per_tc[1]);
- txarb |= IGC_TXARB_TXQ_PRIO_3(queue_per_tc[0]);
+ if (reverse_prio) {
+ txarb |= IGC_TXARB_TXQ_PRIO_0(TX_QUEUE_3);
+ txarb |= IGC_TXARB_TXQ_PRIO_1(TX_QUEUE_2);
+ txarb |= IGC_TXARB_TXQ_PRIO_2(TX_QUEUE_1);
+ txarb |= IGC_TXARB_TXQ_PRIO_3(TX_QUEUE_0);
+ } else {
+ txarb |= IGC_TXARB_TXQ_PRIO_0(TX_QUEUE_0);
+ txarb |= IGC_TXARB_TXQ_PRIO_1(TX_QUEUE_1);
+ txarb |= IGC_TXARB_TXQ_PRIO_2(TX_QUEUE_2);
+ txarb |= IGC_TXARB_TXQ_PRIO_3(TX_QUEUE_3);
+ }
wr32(IGC_TXARB, txarb);
}
+/**
+ * igc_tsn_set_rxpbsize - Set the receive packet buffer size
+ * @adapter: Pointer to the igc_adapter structure
+ * @rxpbs_exp_bmc_be: Value to set the receive packet buffer size, including
+ * express buffer, BMC buffer, and Best Effort buffer
+ *
+ * The IGC_RXPBS register value may include allocations for the Express buffer,
+ * BMC buffer, Best Effort buffer, and the timestamp descriptor buffer
+ * (IGC_RXPBS_CFG_TS_EN).
+ */
+static void igc_tsn_set_rxpbsize(struct igc_adapter *adapter,
+ u32 rxpbs_exp_bmc_be)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 rxpbs = rd32(IGC_RXPBS);
+
+ rxpbs &= ~(IGC_RXPBSIZE_EXP_MASK | IGC_BMC2OSPBSIZE_MASK |
+ IGC_RXPBSIZE_BE_MASK);
+ rxpbs |= rxpbs_exp_bmc_be;
+
+ wr32(IGC_RXPBS, rxpbs);
+}
+
/* Returns the TSN specific registers to their default values after
* the adapter is reset.
*/
static int igc_tsn_disable_offload(struct igc_adapter *adapter)
{
- u16 queue_per_tc[4] = { 3, 2, 1, 0 };
struct igc_hw *hw = &adapter->hw;
u32 tqavctrl;
int i;
wr32(IGC_GTXOFFSET, 0);
- wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
+ wr32(IGC_TXPBS, IGC_TXPBSIZE_DEFAULT);
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT);
+ igc_tsn_set_rxpbsize(adapter, IGC_RXPBSIZE_EXP_BMC_DEFAULT);
+
if (igc_is_device_id_i226(hw))
igc_tsn_restore_retx_default(adapter);
tqavctrl = rd32(IGC_TQAVCTRL);
tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN |
- IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS);
+ IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS |
+ IGC_TQAVCTRL_PREEMPT_ENA | IGC_TQAVCTRL_MIN_FRAG_MASK);
wr32(IGC_TQAVCTRL, tqavctrl);
for (i = 0; i < adapter->num_tx_queues; i++) {
+ int reg_idx = adapter->tx_ring[i]->reg_idx;
+ u32 txdctl;
+
wr32(IGC_TXQCTL(i), 0);
wr32(IGC_STQT(i), 0);
wr32(IGC_ENDQT(i), NSEC_PER_SEC);
+
+ txdctl = rd32(IGC_TXDCTL(reg_idx));
+ txdctl &= ~IGC_TXDCTL_PRIORITY_HIGH;
+ wr32(IGC_TXDCTL(reg_idx), txdctl);
}
wr32(IGC_QBVCYCLET_S, 0);
wr32(IGC_QBVCYCLET, NSEC_PER_SEC);
- /* Reset mqprio TC configuration. */
- netdev_reset_tc(adapter->netdev);
-
/* Restore the default Tx arbitration: Priority 0 has the highest
* priority and is assigned to queue 0 and so on and so forth.
*/
- igc_tsn_tx_arb(adapter, queue_per_tc);
+ igc_tsn_tx_arb(adapter, false);
adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED;
- adapter->flags &= ~IGC_FLAG_TSN_LEGACY_ENABLED;
return 0;
}
@@ -190,57 +421,53 @@ static void igc_tsn_set_retx_qbvfullthreshold(struct igc_adapter *adapter)
wr32(IGC_RETX_CTL, retxctl);
}
+static u8 igc_fpe_get_frag_size_mult(const struct igc_fpe_t *fpe)
+{
+ u8 mult = (fpe->tx_min_frag_size / TX_MIN_FRAG_SIZE) - 1;
+
+ return clamp_t(u8, mult, MIN_MULTPLIER_TX_MIN_FRAG,
+ MAX_MULTPLIER_TX_MIN_FRAG);
+}
+
+u32 igc_fpe_get_supported_frag_size(u32 frag_size)
+{
+ static const u32 supported_sizes[] = { 64, 128, 192, 256 };
+
+ /* Find the smallest supported size that is >= frag_size */
+ for (int i = 0; i < ARRAY_SIZE(supported_sizes); i++) {
+ if (frag_size <= supported_sizes[i])
+ return supported_sizes[i];
+ }
+
+ /* Should not happen */
+ return TX_MAX_FRAG_SIZE;
+}
+
static int igc_tsn_enable_offload(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
u32 tqavctrl, baset_l, baset_h;
u32 sec, nsec, cycle;
ktime_t base_time, systim;
+ u32 frag_size_mult;
int i;
wr32(IGC_TSAUXC, 0);
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);
wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN);
+ igc_tsn_set_rxpbsize(adapter, IGC_RXPBSIZE_EXP_BMC_BE_TSN);
+
if (igc_is_device_id_i226(hw))
igc_tsn_set_retx_qbvfullthreshold(adapter);
- if (adapter->strict_priority_enable) {
- int err;
-
- err = netdev_set_num_tc(adapter->netdev, adapter->num_tc);
- if (err)
- return err;
-
- for (i = 0; i < adapter->num_tc; i++) {
- err = netdev_set_tc_queue(adapter->netdev, i, 1,
- adapter->queue_per_tc[i]);
- if (err)
- return err;
- }
-
- /* In case the card is configured with less than four queues. */
- for (; i < IGC_MAX_TX_QUEUES; i++)
- adapter->queue_per_tc[i] = i;
-
- /* Configure queue priorities according to the user provided
- * mapping.
- */
- igc_tsn_tx_arb(adapter, adapter->queue_per_tc);
-
- /* Enable legacy TSN mode which will do strict priority without
- * any other TSN features.
- */
- tqavctrl = rd32(IGC_TQAVCTRL);
- tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN;
- tqavctrl &= ~IGC_TQAVCTRL_ENHANCED_QAV;
- wr32(IGC_TQAVCTRL, tqavctrl);
-
- return 0;
- }
+ if (adapter->strict_priority_enable ||
+ adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO)
+ igc_tsn_tx_arb(adapter, true);
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
+ u32 txdctl = rd32(IGC_TXDCTL(ring->reg_idx));
u32 txqctl = 0;
u16 cbs_value;
u32 tqavcc;
@@ -274,6 +501,22 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
if (ring->launchtime_enable)
txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT;
+ if (!adapter->fpe.tx_enabled) {
+ /* fpe inactive: clear both flags */
+ txqctl &= ~IGC_TXQCTL_PREEMPTIBLE;
+ txdctl &= ~IGC_TXDCTL_PRIORITY_HIGH;
+ } else if (ring->preemptible) {
+ /* fpe active + preemptible: enable preemptible queue + set low priority */
+ txqctl |= IGC_TXQCTL_PREEMPTIBLE;
+ txdctl &= ~IGC_TXDCTL_PRIORITY_HIGH;
+ } else {
+ /* fpe active + express: enable express queue + set high priority */
+ txqctl &= ~IGC_TXQCTL_PREEMPTIBLE;
+ txdctl |= IGC_TXDCTL_PRIORITY_HIGH;
+ }
+
+ wr32(IGC_TXDCTL(ring->reg_idx), txdctl);
+
/* Skip configuring CBS for Q2 and Q3 */
if (i > 1)
goto skip_cbs;
@@ -361,10 +604,16 @@ skip_cbs:
wr32(IGC_TXQCTL(i), txqctl);
}
- tqavctrl = rd32(IGC_TQAVCTRL) & ~IGC_TQAVCTRL_FUTSCDDIS;
-
+ tqavctrl = rd32(IGC_TQAVCTRL) & ~(IGC_TQAVCTRL_FUTSCDDIS |
+ IGC_TQAVCTRL_PREEMPT_ENA | IGC_TQAVCTRL_MIN_FRAG_MASK);
tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV;
+ if (adapter->fpe.mmsv.pmac_enabled)
+ tqavctrl |= IGC_TQAVCTRL_PREEMPT_ENA;
+
+ frag_size_mult = igc_fpe_get_frag_size_mult(&adapter->fpe);
+ tqavctrl |= FIELD_PREP(IGC_TQAVCTRL_MIN_FRAG_MASK, frag_size_mult);
+
adapter->qbv_count++;
cycle = adapter->cycle_time;
@@ -425,6 +674,14 @@ int igc_tsn_reset(struct igc_adapter *adapter)
unsigned int new_flags;
int err = 0;
+ if (adapter->fpe.mmsv.pmac_enabled) {
+ err = igc_enable_empty_addr_recv(adapter);
+ if (err && net_ratelimit())
+ netdev_err(adapter->netdev, "Error adding empty address to MAC filter\n");
+ } else {
+ igc_disable_empty_addr_recv(adapter);
+ }
+
new_flags = igc_tsn_new_flags(adapter);
if (!(new_flags & IGC_FLAG_TSN_ANY_ENABLED))
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.h b/drivers/net/ethernet/intel/igc/igc_tsn.h
index 98ec845a86bf..a95b893459d7 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.h
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.h
@@ -4,9 +4,66 @@
#ifndef _IGC_TSN_H_
#define _IGC_TSN_H_
+#include <net/pkt_sched.h>
+
+#define IGC_RX_MIN_FRAG_SIZE 60
+#define SMD_FRAME_SIZE 60
+
+enum igc_txd_popts_type {
+ SMD_V = 0x01,
+ SMD_R = 0x02,
+};
+
+DECLARE_STATIC_KEY_FALSE(igc_fpe_enabled);
+
+void igc_fpe_init(struct igc_adapter *adapter);
+void igc_fpe_clear_preempt_queue(struct igc_adapter *adapter);
+void igc_fpe_save_preempt_queue(struct igc_adapter *adapter,
+ const struct tc_mqprio_qopt_offload *mqprio);
+u32 igc_fpe_get_supported_frag_size(u32 frag_size);
int igc_tsn_offload_apply(struct igc_adapter *adapter);
int igc_tsn_reset(struct igc_adapter *adapter);
void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter);
bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter);
+static inline bool igc_fpe_is_pmac_enabled(struct igc_adapter *adapter)
+{
+ return static_branch_unlikely(&igc_fpe_enabled) &&
+ adapter->fpe.mmsv.pmac_enabled;
+}
+
+static inline bool igc_fpe_handle_mpacket(struct igc_adapter *adapter,
+ union igc_adv_rx_desc *rx_desc,
+ unsigned int size, void *pktbuf)
+{
+ u32 status_error = le32_to_cpu(rx_desc->wb.upper.status_error);
+ int smd;
+
+ smd = FIELD_GET(IGC_RXDADV_STAT_SMD_TYPE_MASK, status_error);
+ if (smd != IGC_RXD_STAT_SMD_TYPE_V && smd != IGC_RXD_STAT_SMD_TYPE_R)
+ return false;
+
+ if (size == SMD_FRAME_SIZE && mem_is_zero(pktbuf, SMD_FRAME_SIZE)) {
+ struct ethtool_mmsv *mmsv = &adapter->fpe.mmsv;
+ enum ethtool_mmsv_event event;
+
+ if (smd == IGC_RXD_STAT_SMD_TYPE_V)
+ event = ETHTOOL_MMSV_LP_SENT_VERIFY_MPACKET;
+ else
+ event = ETHTOOL_MMSV_LP_SENT_RESPONSE_MPACKET;
+
+ ethtool_mmsv_event_handle(mmsv, event);
+ }
+
+ return true;
+}
+
+static inline bool igc_fpe_transmitted_smd_v(union igc_adv_tx_desc *tx_desc)
+{
+ u32 olinfo_status = le32_to_cpu(tx_desc->read.olinfo_status);
+ u8 smd = FIELD_GET(IGC_TXD_POPTS_SMD_MASK, olinfo_status);
+
+ return smd == SMD_V;
+}
+
#endif /* _IGC_BASE_H */
diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.c b/drivers/net/ethernet/intel/igc/igc_xdp.c
index e27af72aada8..9eb47b4beb06 100644
--- a/drivers/net/ethernet/intel/igc/igc_xdp.c
+++ b/drivers/net/ethernet/intel/igc/igc_xdp.c
@@ -13,6 +13,8 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
struct net_device *dev = adapter->netdev;
bool if_running = netif_running(dev);
struct bpf_prog *old_prog;
+ bool need_update;
+ unsigned int i;
if (dev->mtu > ETH_DATA_LEN) {
/* For now, the driver doesn't support XDP functionality with
@@ -22,8 +24,14 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
return -EOPNOTSUPP;
}
- if (if_running)
- igc_close(dev);
+ need_update = !!adapter->xdp_prog != !!prog;
+ if (if_running && need_update) {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ igc_disable_rx_ring(adapter->rx_ring[i]);
+ igc_disable_tx_ring(adapter->tx_ring[i]);
+ napi_disable(&adapter->rx_ring[i]->q_vector->napi);
+ }
+ }
old_prog = xchg(&adapter->xdp_prog, prog);
if (old_prog)
@@ -34,8 +42,13 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
else
xdp_features_clear_redirect_target(dev);
- if (if_running)
- igc_open(dev);
+ if (if_running && need_update) {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ napi_enable(&adapter->rx_ring[i]->q_vector->napi);
+ igc_enable_tx_ring(adapter->tx_ring[i]);
+ igc_enable_rx_ring(adapter->rx_ring[i]);
+ }
+ }
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 965e5ce1b326..2e7738f41c58 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -1,15 +1,17 @@
# SPDX-License-Identifier: GPL-2.0
-# Copyright(c) 1999 - 2018 Intel Corporation.
+# Copyright(c) 1999 - 2024 Intel Corporation.
#
# Makefile for the Intel(R) 10GbE PCI Express ethernet driver
#
+subdir-ccflags-y += -I$(src)
obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-y := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \
- ixgbe_xsk.o
+ ixgbe_xsk.o ixgbe_e610.o devlink/devlink.o ixgbe_fw_update.o \
+ devlink/region.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c
new file mode 100644
index 000000000000..d227f4d2a2d1
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c
@@ -0,0 +1,558 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025, Intel Corporation. */
+
+#include "ixgbe.h"
+#include "devlink.h"
+#include "ixgbe_fw_update.h"
+
+struct ixgbe_info_ctx {
+ char buf[128];
+ struct ixgbe_orom_info pending_orom;
+ struct ixgbe_nvm_info pending_nvm;
+ struct ixgbe_netlist_info pending_netlist;
+ struct ixgbe_hw_dev_caps dev_caps;
+};
+
+enum ixgbe_devlink_version_type {
+ IXGBE_DL_VERSION_RUNNING,
+ IXGBE_DL_VERSION_STORED
+};
+
+static void ixgbe_info_get_dsn(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx)
+{
+ u8 dsn[8];
+
+ /* Copy the DSN into an array in Big Endian format */
+ put_unaligned_be64(pci_get_dsn(adapter->pdev), dsn);
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%8phD", dsn);
+}
+
+static void ixgbe_info_orom_ver(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_nvm_version nvm_ver;
+
+ ctx->buf[0] = '\0';
+
+ if (hw->mac.type == ixgbe_mac_e610) {
+ struct ixgbe_orom_info *orom = &adapter->hw.flash.orom;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_orom)
+ orom = &ctx->pending_orom;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
+ orom->major, orom->build, orom->patch);
+ return;
+ }
+
+ ixgbe_get_oem_prod_version(hw, &nvm_ver);
+ if (nvm_ver.oem_valid) {
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x",
+ nvm_ver.oem_major, nvm_ver.oem_minor,
+ nvm_ver.oem_release);
+
+ return;
+ }
+
+ ixgbe_get_orom_version(hw, &nvm_ver);
+ if (nvm_ver.or_valid)
+ snprintf(ctx->buf, sizeof(ctx->buf), "%d.%d.%d",
+ nvm_ver.or_major, nvm_ver.or_build, nvm_ver.or_patch);
+}
+
+static void ixgbe_info_eetrack(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_nvm_version nvm_ver;
+
+ if (hw->mac.type == ixgbe_mac_e610) {
+ u32 eetrack = hw->flash.nvm.eetrack;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_nvm)
+ eetrack = ctx->pending_nvm.eetrack;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", eetrack);
+ return;
+ }
+
+ ixgbe_get_oem_prod_version(hw, &nvm_ver);
+
+ /* No ETRACK version for OEM */
+ if (nvm_ver.oem_valid) {
+ ctx->buf[0] = '\0';
+ return;
+ }
+
+ ixgbe_get_etk_id(hw, &nvm_ver);
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm_ver.etk_id);
+}
+
+static void ixgbe_info_fw_api(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
+ hw->api_maj_ver, hw->api_min_ver, hw->api_patch);
+}
+
+static void ixgbe_info_fw_build(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", hw->fw_build);
+}
+
+static void ixgbe_info_fw_srev(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_nvm_info *nvm = &adapter->hw.flash.nvm;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_nvm)
+ nvm = &ctx->pending_nvm;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u", nvm->srev);
+}
+
+static void ixgbe_info_orom_srev(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_orom_info *orom = &adapter->hw.flash.orom;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_orom)
+ orom = &ctx->pending_orom;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u", orom->srev);
+}
+
+static void ixgbe_info_nvm_ver(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_nvm_info *nvm = &adapter->hw.flash.nvm;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_nvm)
+ nvm = &ctx->pending_nvm;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor);
+}
+
+static void ixgbe_info_netlist_ver(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_netlist_info *netlist = &adapter->hw.flash.netlist;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_netlist)
+ netlist = &ctx->pending_netlist;
+
+ /* The netlist version fields are BCD formatted */
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x",
+ netlist->major, netlist->minor,
+ netlist->type >> 16, netlist->type & 0xFFFF,
+ netlist->rev, netlist->cust_ver);
+}
+
+static void ixgbe_info_netlist_build(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_netlist_info *netlist = &adapter->hw.flash.netlist;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_netlist)
+ netlist = &ctx->pending_netlist;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
+}
+
+static int ixgbe_set_ctx_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_info_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ bool *pending_orom, *pending_nvm, *pending_netlist;
+ int err;
+
+ err = ixgbe_discover_dev_caps(hw, &ctx->dev_caps);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to discover device capabilities");
+ return err;
+ }
+
+ pending_orom = &ctx->dev_caps.common_cap.nvm_update_pending_orom;
+ pending_nvm = &ctx->dev_caps.common_cap.nvm_update_pending_nvm;
+ pending_netlist = &ctx->dev_caps.common_cap.nvm_update_pending_netlist;
+
+ if (*pending_orom) {
+ err = ixgbe_get_inactive_orom_ver(hw, &ctx->pending_orom);
+ if (err)
+ *pending_orom = false;
+ }
+
+ if (*pending_nvm) {
+ err = ixgbe_get_inactive_nvm_ver(hw, &ctx->pending_nvm);
+ if (err)
+ *pending_nvm = false;
+ }
+
+ if (*pending_netlist) {
+ err = ixgbe_get_inactive_netlist_ver(hw, &ctx->pending_netlist);
+ if (err)
+ *pending_netlist = false;
+ }
+
+ return 0;
+}
+
+static int ixgbe_devlink_info_get_e610(struct ixgbe_adapter *adapter,
+ struct devlink_info_req *req,
+ struct ixgbe_info_ctx *ctx)
+{
+ int err;
+
+ ixgbe_info_fw_api(adapter, ctx);
+ err = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API,
+ ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_fw_build(adapter, ctx);
+ err = devlink_info_version_running_put(req, "fw.mgmt.build", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_fw_srev(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req, "fw.mgmt.srev", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_orom_srev(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req, "fw.undi.srev", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_nvm_ver(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req, "fw.psid.api", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_netlist_ver(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req, "fw.netlist", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_netlist_build(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ return devlink_info_version_running_put(req, "fw.netlist.build",
+ ctx->buf);
+}
+
+static int
+ixgbe_devlink_pending_info_get_e610(struct ixgbe_adapter *adapter,
+ struct devlink_info_req *req,
+ struct ixgbe_info_ctx *ctx)
+{
+ int err;
+
+ ixgbe_info_orom_ver(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_UNDI,
+ ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_eetrack(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID,
+ ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_fw_srev(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req, "fw.mgmt.srev", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_orom_srev(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req, "fw.undi.srev", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_nvm_ver(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req, "fw.psid.api", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_netlist_ver(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req, "fw.netlist", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_netlist_build(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ return devlink_info_version_stored_put(req, "fw.netlist.build",
+ ctx->buf);
+}
+
+static int ixgbe_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_info_ctx *ctx;
+ int err;
+
+ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (hw->mac.type == ixgbe_mac_e610)
+ ixgbe_refresh_fw_version(adapter);
+
+ ixgbe_info_get_dsn(adapter, ctx);
+ err = devlink_info_serial_number_put(req, ctx->buf);
+ if (err)
+ goto free_ctx;
+
+ err = hw->eeprom.ops.read_pba_string(hw, ctx->buf, sizeof(ctx->buf));
+ if (err)
+ goto free_ctx;
+
+ err = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_ID,
+ ctx->buf);
+ if (err)
+ goto free_ctx;
+
+ ixgbe_info_orom_ver(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_UNDI,
+ ctx->buf);
+ if (err)
+ goto free_ctx;
+
+ ixgbe_info_eetrack(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID,
+ ctx->buf);
+ if (err || hw->mac.type != ixgbe_mac_e610)
+ goto free_ctx;
+
+ err = ixgbe_set_ctx_dev_caps(hw, ctx, extack);
+ if (err)
+ goto free_ctx;
+
+ err = ixgbe_devlink_info_get_e610(adapter, req, ctx);
+ if (err)
+ goto free_ctx;
+
+ err = ixgbe_devlink_pending_info_get_e610(adapter, req, ctx);
+free_ctx:
+ kfree(ctx);
+ return err;
+}
+
+/**
+ * ixgbe_devlink_reload_empr_start - Start EMP reset to activate new firmware
+ * @devlink: pointer to the devlink instance to reload
+ * @netns_change: if true, the network namespace is changing
+ * @action: the action to perform. Must be DEVLINK_RELOAD_ACTION_FW_ACTIVATE
+ * @limit: limits on what reload should do, such as not resetting
+ * @extack: netlink extended ACK structure
+ *
+ * Allow user to activate new Embedded Management Processor firmware by
+ * issuing device specific EMP reset. Called in response to
+ * a DEVLINK_CMD_RELOAD with the DEVLINK_RELOAD_ACTION_FW_ACTIVATE.
+ *
+ * Note that teardown and rebuild of the driver state happens automatically as
+ * part of an interrupt and watchdog task. This is because all physical
+ * functions on the device must be able to reset when an EMP reset occurs from
+ * any source.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_devlink_reload_empr_start(struct devlink *devlink,
+ bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 pending;
+ int err;
+
+ if (hw->mac.type != ixgbe_mac_e610)
+ return -EOPNOTSUPP;
+
+ err = ixgbe_get_pending_updates(adapter, &pending, extack);
+ if (err)
+ return err;
+
+ /* Pending is a bitmask of which flash banks have a pending update,
+ * including the main NVM bank, the Option ROM bank, and the netlist
+ * bank. If any of these bits are set, then there is a pending update
+ * waiting to be activated.
+ */
+ if (!pending) {
+ NL_SET_ERR_MSG_MOD(extack, "No pending firmware update");
+ return -ECANCELED;
+ }
+
+ if (adapter->fw_emp_reset_disabled) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "EMP reset is not available. To activate firmware, a reboot or power cycle is needed");
+ return -ECANCELED;
+ }
+
+ err = ixgbe_aci_nvm_update_empr(hw);
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to trigger EMP device reset to reload firmware");
+
+ return err;
+}
+
+/*Wait for 10 sec with 0.5 sec tic. EMPR takes no less than half of a sec */
+#define IXGBE_DEVLINK_RELOAD_TIMEOUT_SEC 20
+
+/**
+ * ixgbe_devlink_reload_empr_finish - finishes EMP reset
+ * @devlink: pointer to the devlink instance
+ * @action: the action to perform.
+ * @limit: limits on what reload should do
+ * @actions_performed: actions performed
+ * @extack: netlink extended ACK structure
+ *
+ * Wait for new NVM to be loaded during EMP reset.
+ *
+ * Return: -ETIME when timer is exceeded, 0 on success.
+ */
+static int ixgbe_devlink_reload_empr_finish(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i = 0;
+ u32 fwsm;
+
+ do {
+ /* Just right away after triggering EMP reset the FWSM register
+ * may be not cleared yet, so begin the loop with the delay
+ * in order to not check the not updated register.
+ */
+ mdelay(500);
+
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
+
+ if (i++ >= IXGBE_DEVLINK_RELOAD_TIMEOUT_SEC)
+ return -ETIME;
+
+ } while (!(fwsm & IXGBE_FWSM_FW_VAL_BIT));
+
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
+
+ adapter->flags2 &= ~(IXGBE_FLAG2_API_MISMATCH |
+ IXGBE_FLAG2_FW_ROLLBACK);
+
+ return 0;
+}
+
+static const struct devlink_ops ixgbe_devlink_ops = {
+ .info_get = ixgbe_devlink_info_get,
+ .supported_flash_update_params =
+ DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
+ .flash_update = ixgbe_flash_pldm_image,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
+ .reload_down = ixgbe_devlink_reload_empr_start,
+ .reload_up = ixgbe_devlink_reload_empr_finish,
+};
+
+/**
+ * ixgbe_allocate_devlink - Allocate devlink instance
+ * @dev: device to allocate devlink for
+ *
+ * Allocate a devlink instance for this physical function.
+ *
+ * Return: pointer to the device adapter structure on success,
+ * ERR_PTR(-ENOMEM) when allocation failed.
+ */
+struct ixgbe_adapter *ixgbe_allocate_devlink(struct device *dev)
+{
+ struct ixgbe_adapter *adapter;
+ struct devlink *devlink;
+
+ devlink = devlink_alloc(&ixgbe_devlink_ops, sizeof(*adapter), dev);
+ if (!devlink)
+ return ERR_PTR(-ENOMEM);
+
+ adapter = devlink_priv(devlink);
+ adapter->devlink = devlink;
+
+ return adapter;
+}
+
+/**
+ * ixgbe_devlink_set_switch_id - Set unique switch ID based on PCI DSN
+ * @adapter: pointer to the device adapter structure
+ * @ppid: struct with switch id information
+ */
+static void ixgbe_devlink_set_switch_id(struct ixgbe_adapter *adapter,
+ struct netdev_phys_item_id *ppid)
+{
+ u64 id = pci_get_dsn(adapter->pdev);
+
+ ppid->id_len = sizeof(id);
+ put_unaligned_be64(id, &ppid->id);
+}
+
+/**
+ * ixgbe_devlink_register_port - Register devlink port
+ * @adapter: pointer to the device adapter structure
+ *
+ * Create and register a devlink_port for this physical function.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+int ixgbe_devlink_register_port(struct ixgbe_adapter *adapter)
+{
+ struct devlink_port *devlink_port = &adapter->devlink_port;
+ struct devlink *devlink = adapter->devlink;
+ struct device *dev = &adapter->pdev->dev;
+ struct devlink_port_attrs attrs = {};
+ int err;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = adapter->hw.bus.func;
+ attrs.no_phys_port_name = 1;
+ ixgbe_devlink_set_switch_id(adapter, &attrs.switch_id);
+
+ devlink_port_attrs_set(devlink_port, &attrs);
+
+ err = devl_port_register(devlink, devlink_port, 0);
+ if (err) {
+ dev_err(dev,
+ "devlink port registration failed, err %d\n", err);
+ }
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/devlink/devlink.h b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.h
new file mode 100644
index 000000000000..381558058048
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025, Intel Corporation. */
+
+#ifndef _IXGBE_DEVLINK_H_
+#define _IXGBE_DEVLINK_H_
+
+struct ixgbe_adapter *ixgbe_allocate_devlink(struct device *dev);
+int ixgbe_devlink_register_port(struct ixgbe_adapter *adapter);
+void ixgbe_devlink_init_regions(struct ixgbe_adapter *adapter);
+void ixgbe_devlink_destroy_regions(struct ixgbe_adapter *adapter);
+
+#endif /* _IXGBE_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/devlink/region.c b/drivers/net/ethernet/intel/ixgbe/devlink/region.c
new file mode 100644
index 000000000000..478b4f435120
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/devlink/region.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025, Intel Corporation. */
+
+#include "ixgbe.h"
+#include "devlink.h"
+
+#define IXGBE_DEVLINK_READ_BLK_SIZE (1024 * 1024)
+
+static const struct devlink_region_ops ixgbe_nvm_region_ops;
+static const struct devlink_region_ops ixgbe_sram_region_ops;
+
+static int ixgbe_devlink_parse_region(struct ixgbe_hw *hw,
+ const struct devlink_region_ops *ops,
+ bool *read_shadow_ram, u32 *nvm_size)
+{
+ if (ops == &ixgbe_nvm_region_ops) {
+ *read_shadow_ram = false;
+ *nvm_size = hw->flash.flash_size;
+ } else if (ops == &ixgbe_sram_region_ops) {
+ *read_shadow_ram = true;
+ *nvm_size = hw->flash.sr_words * 2u;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_devlink_nvm_snapshot - Capture a snapshot of the NVM content
+ * @devlink: the devlink instance
+ * @ops: the devlink region being snapshotted
+ * @extack: extended ACK response structure
+ * @data: on exit points to snapshot data buffer
+ *
+ * This function is called in response to the DEVLINK_CMD_REGION_NEW cmd.
+ *
+ * Capture a snapshot of the whole requested NVM region.
+ *
+ * No need to worry with freeing @data, devlink core takes care if it.
+ *
+ * Return: 0 on success, -EOPNOTSUPP for unsupported regions, -EBUSY when
+ * cannot lock NVM, -ENOMEM when cannot alloc mem and -EIO when error
+ * occurs during reading.
+ */
+static int ixgbe_devlink_nvm_snapshot(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack, u8 **data)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool read_shadow_ram;
+ u8 *nvm_data, *buf;
+ u32 nvm_size, left;
+ u8 num_blks;
+ int err;
+
+ err = ixgbe_devlink_parse_region(hw, ops, &read_shadow_ram, &nvm_size);
+ if (err)
+ return err;
+
+ nvm_data = kvzalloc(nvm_size, GFP_KERNEL);
+ if (!nvm_data)
+ return -ENOMEM;
+
+ num_blks = DIV_ROUND_UP(nvm_size, IXGBE_DEVLINK_READ_BLK_SIZE);
+ buf = nvm_data;
+ left = nvm_size;
+
+ for (int i = 0; i < num_blks; i++) {
+ u32 read_sz = min_t(u32, IXGBE_DEVLINK_READ_BLK_SIZE, left);
+
+ /* Need to acquire NVM lock during each loop run because the
+ * total period of reading whole NVM is longer than the maximum
+ * period the lock can be taken defined by the IXGBE_NVM_TIMEOUT.
+ */
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_READ);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to acquire NVM semaphore");
+ kvfree(nvm_data);
+ return -EBUSY;
+ }
+
+ err = ixgbe_read_flat_nvm(hw, i * IXGBE_DEVLINK_READ_BLK_SIZE,
+ &read_sz, buf, read_shadow_ram);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read RAM content");
+ ixgbe_release_nvm(hw);
+ kvfree(nvm_data);
+ return -EIO;
+ }
+
+ ixgbe_release_nvm(hw);
+
+ buf += read_sz;
+ left -= read_sz;
+ }
+
+ *data = nvm_data;
+ return 0;
+}
+
+/**
+ * ixgbe_devlink_devcaps_snapshot - Capture a snapshot of device capabilities
+ * @devlink: the devlink instance
+ * @ops: the devlink region being snapshotted
+ * @extack: extended ACK response structure
+ * @data: on exit points to snapshot data buffer
+ *
+ * This function is called in response to the DEVLINK_CMD_REGION_NEW for
+ * the device-caps devlink region.
+ *
+ * Capture a snapshot of the device capabilities reported by firmware.
+ *
+ * No need to worry with freeing @data, devlink core takes care if it.
+ *
+ * Return: 0 on success, -ENOMEM when cannot alloc mem, or return code of
+ * the reading operation.
+ */
+static int ixgbe_devlink_devcaps_snapshot(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_aci_cmd_list_caps_elem *caps;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ caps = kvzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
+ if (!caps)
+ return -ENOMEM;
+
+ err = ixgbe_aci_list_caps(hw, caps, IXGBE_ACI_MAX_BUFFER_SIZE, NULL,
+ ixgbe_aci_opc_list_dev_caps);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read device capabilities");
+ kvfree(caps);
+ return err;
+ }
+
+ *data = (u8 *)caps;
+ return 0;
+}
+
+/**
+ * ixgbe_devlink_nvm_read - Read a portion of NVM flash content
+ * @devlink: the devlink instance
+ * @ops: the devlink region to snapshot
+ * @extack: extended ACK response structure
+ * @offset: the offset to start at
+ * @size: the amount to read
+ * @data: the data buffer to read into
+ *
+ * This function is called in response to DEVLINK_CMD_REGION_READ to directly
+ * read a section of the NVM contents.
+ *
+ * Read from either the nvm-flash region either shadow-ram region.
+ *
+ * Return: 0 on success, -EOPNOTSUPP for unsupported regions, -EBUSY when
+ * cannot lock NVM, -ERANGE when buffer limit exceeded and -EIO when error
+ * occurs during reading.
+ */
+static int ixgbe_devlink_nvm_read(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u64 offset, u32 size, u8 *data)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool read_shadow_ram;
+ u32 nvm_size;
+ int err;
+
+ err = ixgbe_devlink_parse_region(hw, ops, &read_shadow_ram, &nvm_size);
+ if (err)
+ return err;
+
+ if (offset + size > nvm_size) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot read beyond the region size");
+ return -ERANGE;
+ }
+
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_READ);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
+ return -EBUSY;
+ }
+
+ err = ixgbe_read_flat_nvm(hw, (u32)offset, &size, data, read_shadow_ram);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
+ ixgbe_release_nvm(hw);
+ return -EIO;
+ }
+
+ ixgbe_release_nvm(hw);
+ return 0;
+}
+
+static const struct devlink_region_ops ixgbe_nvm_region_ops = {
+ .name = "nvm-flash",
+ .destructor = kvfree,
+ .snapshot = ixgbe_devlink_nvm_snapshot,
+ .read = ixgbe_devlink_nvm_read,
+};
+
+static const struct devlink_region_ops ixgbe_sram_region_ops = {
+ .name = "shadow-ram",
+ .destructor = kvfree,
+ .snapshot = ixgbe_devlink_nvm_snapshot,
+ .read = ixgbe_devlink_nvm_read,
+};
+
+static const struct devlink_region_ops ixgbe_devcaps_region_ops = {
+ .name = "device-caps",
+ .destructor = kvfree,
+ .snapshot = ixgbe_devlink_devcaps_snapshot,
+};
+
+/**
+ * ixgbe_devlink_init_regions - Initialize devlink regions
+ * @adapter: adapter instance
+ *
+ * Create devlink regions used to enable access to dump the contents of the
+ * flash memory of the device.
+ */
+void ixgbe_devlink_init_regions(struct ixgbe_adapter *adapter)
+{
+ struct devlink *devlink = adapter->devlink;
+ struct device *dev = &adapter->pdev->dev;
+ u64 nvm_size, sram_size;
+
+ if (adapter->hw.mac.type != ixgbe_mac_e610)
+ return;
+
+ nvm_size = adapter->hw.flash.flash_size;
+ adapter->nvm_region = devl_region_create(devlink, &ixgbe_nvm_region_ops,
+ 1, nvm_size);
+ if (IS_ERR(adapter->nvm_region)) {
+ dev_err(dev,
+ "Failed to create NVM devlink region, err %ld\n",
+ PTR_ERR(adapter->nvm_region));
+ adapter->nvm_region = NULL;
+ }
+
+ sram_size = adapter->hw.flash.sr_words * 2u;
+ adapter->sram_region = devl_region_create(devlink, &ixgbe_sram_region_ops,
+ 1, sram_size);
+ if (IS_ERR(adapter->sram_region)) {
+ dev_err(dev,
+ "Failed to create shadow-ram devlink region, err %ld\n",
+ PTR_ERR(adapter->sram_region));
+ adapter->sram_region = NULL;
+ }
+
+ adapter->devcaps_region = devl_region_create(devlink,
+ &ixgbe_devcaps_region_ops,
+ 10, IXGBE_ACI_MAX_BUFFER_SIZE);
+ if (IS_ERR(adapter->devcaps_region)) {
+ dev_err(dev,
+ "Failed to create device-caps devlink region, err %ld\n",
+ PTR_ERR(adapter->devcaps_region));
+ adapter->devcaps_region = NULL;
+ }
+}
+
+/**
+ * ixgbe_devlink_destroy_regions - Destroy devlink regions
+ * @adapter: adapter instance
+ *
+ * Remove previously created regions for this adapter instance.
+ */
+void ixgbe_devlink_destroy_regions(struct ixgbe_adapter *adapter)
+{
+ if (adapter->hw.mac.type != ixgbe_mac_e610)
+ return;
+
+ if (adapter->nvm_region)
+ devl_region_destroy(adapter->nvm_region);
+
+ if (adapter->sram_region)
+ devl_region_destroy(adapter->sram_region);
+
+ if (adapter->devcaps_region)
+ devl_region_destroy(adapter->devcaps_region);
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 559b443c409f..dce4936708eb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef _IXGBE_H_
#define _IXGBE_H_
@@ -17,9 +17,12 @@
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
+#include <net/devlink.h>
+
#include "ixgbe_type.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb.h"
+#include "ixgbe_e610.h"
#if IS_ENABLED(CONFIG_FCOE)
#define IXGBE_FCOE
#include "ixgbe_fcoe.h"
@@ -173,6 +176,7 @@ enum ixgbe_tx_flags {
#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset)
#define IXGBE_82599_VF_DEVICE_ID 0x10ED
#define IXGBE_X540_VF_DEVICE_ID 0x1515
+#define IXGBE_E610_VF_DEVICE_ID 0x57AD
#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
{ \
@@ -425,6 +429,10 @@ enum ixgbe_ring_f_enum {
#define IXGBE_BAD_L2A_QUEUE 3
#define IXGBE_MAX_MACVLANS 63
+#define IXGBE_MAX_TX_QUEUES 128
+#define IXGBE_MAX_TX_DESCRIPTORS 40
+#define IXGBE_MAX_TX_VF_HANGS 4
+
DECLARE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key);
struct ixgbe_ring_feature {
@@ -503,9 +511,10 @@ struct ixgbe_q_vector {
struct ixgbe_ring_container rx, tx;
struct napi_struct napi;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
+
cpumask_t affinity_mask;
int numa_node;
- struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
/* for dynamic allocation of rings associated with this q_vector */
@@ -610,6 +619,11 @@ struct ixgbe_adapter {
struct bpf_prog *xdp_prog;
struct pci_dev *pdev;
struct mii_bus *mii_bus;
+ struct devlink *devlink;
+ struct devlink_port devlink_port;
+ struct devlink_region *nvm_region;
+ struct devlink_region *sram_region;
+ struct devlink_region *devcaps_region;
unsigned long state;
@@ -654,6 +668,7 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9)
#define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10)
#define IXGBE_FLAG2_PHY_INTERRUPT BIT(11)
+#define IXGBE_FLAG2_FW_ASYNC_EVENT BIT(12)
#define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
#define IXGBE_FLAG2_EEE_CAPABLE BIT(14)
#define IXGBE_FLAG2_EEE_ENABLED BIT(15)
@@ -661,6 +676,11 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_IPSEC_ENABLED BIT(17)
#define IXGBE_FLAG2_VF_IPSEC_ENABLED BIT(18)
#define IXGBE_FLAG2_AUTO_DISABLE_VF BIT(19)
+#define IXGBE_FLAG2_PHY_FW_LOAD_FAILED BIT(20)
+#define IXGBE_FLAG2_NO_MEDIA BIT(21)
+#define IXGBE_FLAG2_MOD_POWER_UNSUPPORTED BIT(22)
+#define IXGBE_FLAG2_API_MISMATCH BIT(23)
+#define IXGBE_FLAG2_FW_ROLLBACK BIT(24)
/* Tx fast path data */
int num_tx_queues;
@@ -737,6 +757,7 @@ struct ixgbe_adapter {
bool link_up;
unsigned long sfp_poll_time;
unsigned long link_check_timeout;
+ u32 link_down_events;
struct timer_list service_timer;
struct work_struct service_task;
@@ -749,6 +770,8 @@ struct ixgbe_adapter {
u32 atr_sample_rate;
spinlock_t fdir_perfect_lock;
+ bool fw_emp_reset_disabled;
+
#ifdef IXGBE_FCOE
struct ixgbe_fcoe fcoe;
#endif /* IXGBE_FCOE */
@@ -767,7 +790,7 @@ struct ixgbe_adapter {
struct ptp_clock_info ptp_caps;
struct work_struct ptp_tx_work;
struct sk_buff *ptp_tx_skb;
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
unsigned long ptp_tx_start;
unsigned long last_overflow_check;
unsigned long last_rx_ptp_check;
@@ -792,13 +815,13 @@ struct ixgbe_adapter {
u32 timer_event_accumulator;
u32 vferr_refcount;
struct ixgbe_mac_addr *mac_table;
+ u8 tx_hang_count[IXGBE_MAX_TX_QUEUES];
struct kobject *info_kobj;
+ u16 lse_mask;
#ifdef CONFIG_IXGBE_HWMON
struct hwmon_buff *ixgbe_hwmon_buff;
#endif /* CONFIG_IXGBE_HWMON */
-#ifdef CONFIG_DEBUG_FS
struct dentry *ixgbe_dbg_adapter;
-#endif /*CONFIG_DEBUG_FS*/
u8 default_up;
/* Bitmask indicating in use pools */
@@ -823,6 +846,17 @@ struct ixgbe_adapter {
spinlock_t vfs_lock;
};
+struct ixgbe_netdevice_priv {
+ struct ixgbe_adapter *adapter;
+};
+
+static inline struct ixgbe_adapter *ixgbe_from_netdev(struct net_device *netdev)
+{
+ struct ixgbe_netdevice_priv *priv = netdev_priv(netdev);
+
+ return priv->adapter;
+}
+
static inline int ixgbe_determine_xdp_q_idx(int cpu)
{
if (static_key_enabled(&ixgbe_xdp_locking_key))
@@ -849,6 +883,7 @@ static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
return IXGBE_MAX_RSS_INDICES_X550;
default:
return 0;
@@ -874,6 +909,7 @@ enum ixgbe_state_t {
__IXGBE_PTP_RUNNING,
__IXGBE_PTP_TX_IN_PROGRESS,
__IXGBE_RESET_REQUESTED,
+ __IXGBE_PHY_INIT_COMPLETE,
};
struct ixgbe_cb {
@@ -896,6 +932,7 @@ enum ixgbe_boards {
board_x550em_x_fw,
board_x550em_a,
board_x550em_a_fw,
+ board_e610,
};
extern const struct ixgbe_info ixgbe_82598_info;
@@ -906,6 +943,7 @@ extern const struct ixgbe_info ixgbe_X550EM_x_info;
extern const struct ixgbe_info ixgbe_x550em_x_fw_info;
extern const struct ixgbe_info ixgbe_x550em_a_info;
extern const struct ixgbe_info ixgbe_x550em_a_fw_info;
+extern const struct ixgbe_info ixgbe_e610_info;
#ifdef CONFIG_IXGBE_DCB
extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops;
#endif
@@ -934,6 +972,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter);
int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
u16 subdevice_id);
+void ixgbe_set_fw_version_e610(struct ixgbe_adapter *adapter);
+void ixgbe_refresh_fw_version(struct ixgbe_adapter *adapter);
#ifdef CONFIG_PCI_IOV
void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
#endif
@@ -1044,8 +1084,11 @@ static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
rx_ring->last_rx_timestamp = jiffies;
}
-int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
-int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
+int ixgbe_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int ixgbe_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 283a23150a4d..406c15f58034 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -6,6 +6,7 @@
#include <linux/sched.h>
#include "ixgbe.h"
+#include "ixgbe_mbx.h"
#include "ixgbe_phy.h"
#define IXGBE_82598_MAX_TX_QUEUES 32
@@ -44,7 +45,7 @@ static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
goto out;
/*
- * if capababilities version is type 1 we can write the
+ * if capabilities version is type 1 we can write the
* timeout of 10ms to 250ms through the GCR register
*/
if (!(gcr & IXGBE_GCR_CAP_VER2)) {
@@ -750,7 +751,7 @@ mac_reset_top:
/*
* Store the original AUTOC value if it has not been
* stored off yet. Otherwise restore the stored original
- * AUTOC value since the reset operation sets back to deaults.
+ * AUTOC value since the reset operation sets back to defaults.
*/
autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
if (hw->mac.orig_link_settings_stored == false) {
@@ -1168,6 +1169,7 @@ static const struct ixgbe_eeprom_operations eeprom_ops_82598 = {
.calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
.validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
+ .read_pba_string = &ixgbe_read_pba_string_generic,
};
static const struct ixgbe_phy_operations phy_ops_82598 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index cdaf087b4e85..3069b583fd81 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -198,7 +198,7 @@ static int prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
* @hw: pointer to hardware structure
* @autoc: value to write to AUTOC
* @locked: bool to indicate whether the SW/FW lock was already taken by
- * previous proc_autoc_read_82599.
+ * previous prot_autoc_read_82599.
*
* This part (82599) may need to hold a the SW/FW lock around all writes to
* AUTOC. Likewise after a write we need to do a pipeline reset.
@@ -1615,13 +1615,14 @@ int ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
break;
default:
break;
}
- /* store source and destination IP masks (big-enian) */
+ /* store source and destination IP masks (big-endian) */
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
~input_mask->formatted.src_ip[0]);
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
@@ -2229,6 +2230,7 @@ static const struct ixgbe_eeprom_operations eeprom_ops_82599 = {
.calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
.validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
+ .read_pba_string = &ixgbe_read_pba_string_generic,
};
static const struct ixgbe_phy_operations phy_ops_82599 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 3be1bfb16498..3ea6765f9c5d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -58,6 +58,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_A_SFP:
case IXGBE_DEV_ID_X550EM_A_SFP_N:
+ case IXGBE_DEV_ID_E610_SFP:
supported = false;
break;
default:
@@ -88,6 +89,8 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_10G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ case IXGBE_DEV_ID_E610_10G_T:
+ case IXGBE_DEV_ID_E610_2_5G_T:
supported = true;
break;
default:
@@ -241,7 +244,7 @@ int ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
*/
if (hw->phy.media_type == ixgbe_media_type_backplane) {
/* Need the SW/FW semaphore around AUTOC writes if 82599 and
- * LESM is on, likewise reset_pipeline requries the lock as
+ * LESM is on, likewise reset_pipeline requires the lock as
* it also writes AUTOC.
*/
ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
@@ -298,7 +301,7 @@ int ixgbe_start_hw_generic(struct ixgbe_hw *hw)
return ret_val;
}
- /* Cashe bit indicating need for crosstalk fix */
+ /* Cache bit indicating need for crosstalk fix */
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X550EM_x:
@@ -329,6 +332,7 @@ int ixgbe_start_hw_generic(struct ixgbe_hw *hw)
* Devices in the second generation:
* 82599
* X540
+ * E610
**/
int ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
{
@@ -469,9 +473,14 @@ int ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
}
}
- if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X540 ||
+ hw->mac.type == ixgbe_mac_e610) {
if (hw->phy.id == 0)
hw->phy.ops.identify(hw);
+ }
+
+ if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i);
hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i);
hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i);
@@ -660,7 +669,11 @@ int ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
hw->bus.type = ixgbe_bus_type_pci_express;
/* Get the negotiated link width and speed from PCI config space */
- link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS);
+ if (hw->mac.type == ixgbe_mac_e610)
+ link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS_E610);
+ else
+ link_status = ixgbe_read_pci_cfg_word(hw,
+ IXGBE_PCI_LINK_STATUS);
hw->bus.width = ixgbe_convert_bus_width(link_status);
hw->bus.speed = ixgbe_convert_bus_speed(link_status);
@@ -1726,9 +1739,9 @@ int ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
}
}
- checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+ checksum = IXGBE_EEPROM_SUM - checksum;
- return (int)checksum;
+ return checksum;
}
/**
@@ -2918,6 +2931,10 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
break;
+ case ixgbe_mac_e610:
+ pcie_offset = IXGBE_PCIE_MSIX_E610_CAPS;
+ max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
+ break;
default:
return 1;
}
@@ -3366,7 +3383,8 @@ int ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
*speed = IXGBE_LINK_SPEED_1GB_FULL;
break;
case IXGBE_LINKS_SPEED_100_82599:
- if ((hw->mac.type >= ixgbe_mac_X550) &&
+ if ((hw->mac.type >= ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_e610) &&
(links_reg & IXGBE_LINKS_SPEED_NON_STD))
*speed = IXGBE_LINK_SPEED_5GB_FULL;
else
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 6493abf189de..6639069ad528 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -194,6 +194,8 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg);
dev_err(&adapter->pdev->dev, format, ## arg)
#define e_dev_notice(format, arg...) \
dev_notice(&adapter->pdev->dev, format, ## arg)
+#define e_dbg(msglvl, format, arg...) \
+ netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg)
#define e_info(msglvl, format, arg...) \
netif_info(adapter, msglvl, adapter->netdev, format, ## arg)
#define e_err(msglvl, format, arg...) \
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index f2709b10c2e5..3dd5a16a14df 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include "ixgbe.h"
#include <linux/dcbnl.h>
@@ -118,14 +118,14 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max)
static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED);
}
static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
/* Fail command if not in CEE mode */
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
@@ -142,7 +142,7 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
u8 *perm_addr)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int i, j;
memset(perm_addr, 0xff, MAX_ADDR_LEN);
@@ -154,6 +154,7 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
for (j = 0; j < netdev->addr_len; j++, i++)
perm_addr[i] = adapter->hw.mac.san_addr[j];
break;
@@ -166,7 +167,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
u8 prio, u8 bwg_id, u8 bw_pct,
u8 up_map)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (prio != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio;
@@ -183,7 +184,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
u8 bw_pct)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
}
@@ -192,7 +193,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
u8 prio, u8 bwg_id, u8 bw_pct,
u8 up_map)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (prio != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio;
@@ -209,7 +210,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
u8 bw_pct)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
}
@@ -218,7 +219,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
u8 *prio, u8 *bwg_id, u8 *bw_pct,
u8 *up_map)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
*prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type;
*bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id;
@@ -229,7 +230,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
u8 *bw_pct)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
*bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id];
}
@@ -238,7 +239,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
u8 *prio, u8 *bwg_id, u8 *bw_pct,
u8 *up_map)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
*prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type;
*bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id;
@@ -249,7 +250,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
u8 *bw_pct)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
*bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id];
}
@@ -257,7 +258,7 @@ static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
u8 setting)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting;
if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc !=
@@ -268,14 +269,14 @@ static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
u8 *setting)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
*setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc;
}
static void ixgbe_dcbnl_devreset(struct net_device *dev)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
usleep_range(1000, 2000);
@@ -294,7 +295,7 @@ static void ixgbe_dcbnl_devreset(struct net_device *dev)
static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
struct ixgbe_hw *hw = &adapter->hw;
int ret = DCB_NO_HW_CHG;
@@ -382,7 +383,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
switch (capid) {
case DCB_CAP_ATTR_PG:
@@ -419,7 +420,7 @@ static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
switch (tcid) {
@@ -446,14 +447,14 @@ static int ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
return adapter->dcb_cfg.pfc_mode_enable;
}
static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->temp_dcb_cfg.pfc_mode_enable = state;
}
@@ -470,7 +471,7 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
*/
static int ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct dcb_app app = {
.selector = idtype,
.protocol = id,
@@ -485,7 +486,7 @@ static int ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
struct ieee_ets *ets)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets;
ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs;
@@ -505,7 +506,7 @@ static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
struct ieee_ets *ets)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
int i, err;
__u8 max_tc = 0;
@@ -558,7 +559,7 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc;
int i;
@@ -583,7 +584,7 @@ static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
u8 *prio_tc;
int err;
@@ -615,7 +616,7 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
struct dcb_app *app)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
int err;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
@@ -660,7 +661,7 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
struct dcb_app *app)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
int err;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
@@ -704,13 +705,13 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
return adapter->dcbx_cap;
}
static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ieee_ets ets = {0};
struct ieee_pfc pfc = {0};
int err = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
new file mode 100644
index 000000000000..c2f8189a0738
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
@@ -0,0 +1,4043 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Intel Corporation. */
+
+#include "ixgbe_common.h"
+#include "ixgbe_e610.h"
+#include "ixgbe_x550.h"
+#include "ixgbe_type.h"
+#include "ixgbe_x540.h"
+#include "ixgbe_mbx.h"
+#include "ixgbe_phy.h"
+
+/**
+ * ixgbe_should_retry_aci_send_cmd_execute - decide if ACI command should
+ * be resent
+ * @opcode: ACI opcode
+ *
+ * Check if ACI command should be sent again depending on the provided opcode.
+ * It may happen when CSR is busy during link state changes.
+ *
+ * Return: true if the sending command routine should be repeated,
+ * otherwise false.
+ */
+static bool ixgbe_should_retry_aci_send_cmd_execute(u16 opcode)
+{
+ switch (opcode) {
+ case ixgbe_aci_opc_disable_rxen:
+ case ixgbe_aci_opc_get_phy_caps:
+ case ixgbe_aci_opc_get_link_status:
+ case ixgbe_aci_opc_get_link_topo:
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ixgbe_aci_send_cmd_execute - execute sending FW Admin Command to FW Admin
+ * Command Interface
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ *
+ * Admin Command is sent using CSR by setting descriptor and buffer in specific
+ * registers.
+ *
+ * Return: the exit code of the operation.
+ * * - 0 - success.
+ * * - -EIO - CSR mechanism is not enabled.
+ * * - -EBUSY - CSR mechanism is busy.
+ * * - -EINVAL - buf_size is too big or
+ * invalid argument buf or buf_size.
+ * * - -ETIME - Admin Command X command timeout.
+ * * - -EIO - Admin Command X invalid state of HICR register or
+ * Admin Command failed because of bad opcode was returned or
+ * Admin Command failed with error Y.
+ */
+static int ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw,
+ struct libie_aq_desc *desc,
+ void *buf, u16 buf_size)
+{
+ u16 opcode, buf_tail_size = buf_size % 4;
+ u32 *raw_desc = (u32 *)desc;
+ u32 hicr, i, buf_tail = 0;
+ bool valid_buf = false;
+
+ hw->aci.last_status = LIBIE_AQ_RC_OK;
+
+ /* It's necessary to check if mechanism is enabled */
+ hicr = IXGBE_READ_REG(hw, IXGBE_PF_HICR);
+
+ if (!(hicr & IXGBE_PF_HICR_EN))
+ return -EIO;
+
+ if (hicr & IXGBE_PF_HICR_C) {
+ hw->aci.last_status = LIBIE_AQ_RC_EBUSY;
+ return -EBUSY;
+ }
+
+ opcode = le16_to_cpu(desc->opcode);
+
+ if (buf_size > IXGBE_ACI_MAX_BUFFER_SIZE)
+ return -EINVAL;
+
+ if (buf)
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_BUF);
+
+ if (desc->flags & cpu_to_le16(LIBIE_AQ_FLAG_BUF)) {
+ if ((buf && !buf_size) ||
+ (!buf && buf_size))
+ return -EINVAL;
+ if (buf && buf_size)
+ valid_buf = true;
+ }
+
+ if (valid_buf) {
+ if (buf_tail_size)
+ memcpy(&buf_tail, buf + buf_size - buf_tail_size,
+ buf_tail_size);
+
+ if (((buf_size + 3) & ~0x3) > LIBIE_AQ_LG_BUF)
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
+
+ desc->datalen = cpu_to_le16(buf_size);
+
+ if (desc->flags & cpu_to_le16(LIBIE_AQ_FLAG_RD)) {
+ for (i = 0; i < buf_size / 4; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_PF_HIBA(i), ((u32 *)buf)[i]);
+ if (buf_tail_size)
+ IXGBE_WRITE_REG(hw, IXGBE_PF_HIBA(i), buf_tail);
+ }
+ }
+
+ /* Descriptor is written to specific registers */
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_PF_HIDA(i), raw_desc[i]);
+
+ /* SW has to set PF_HICR.C bit and clear PF_HICR.SV and
+ * PF_HICR_EV
+ */
+ hicr = (IXGBE_READ_REG(hw, IXGBE_PF_HICR) | IXGBE_PF_HICR_C) &
+ ~(IXGBE_PF_HICR_SV | IXGBE_PF_HICR_EV);
+ IXGBE_WRITE_REG(hw, IXGBE_PF_HICR, hicr);
+
+#define MAX_SLEEP_RESP_US 1000
+#define MAX_TMOUT_RESP_SYNC_US 100000000
+
+ /* Wait for sync Admin Command response */
+ read_poll_timeout(IXGBE_READ_REG, hicr,
+ (hicr & IXGBE_PF_HICR_SV) ||
+ !(hicr & IXGBE_PF_HICR_C),
+ MAX_SLEEP_RESP_US, MAX_TMOUT_RESP_SYNC_US, true, hw,
+ IXGBE_PF_HICR);
+
+#define MAX_TMOUT_RESP_ASYNC_US 150000000
+
+ /* Wait for async Admin Command response */
+ read_poll_timeout(IXGBE_READ_REG, hicr,
+ (hicr & IXGBE_PF_HICR_EV) ||
+ !(hicr & IXGBE_PF_HICR_C),
+ MAX_SLEEP_RESP_US, MAX_TMOUT_RESP_ASYNC_US, true, hw,
+ IXGBE_PF_HICR);
+
+ /* Read sync Admin Command response */
+ if ((hicr & IXGBE_PF_HICR_SV)) {
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
+ raw_desc[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIDA(i));
+ raw_desc[i] = raw_desc[i];
+ }
+ }
+
+ /* Read async Admin Command response */
+ if ((hicr & IXGBE_PF_HICR_EV) && !(hicr & IXGBE_PF_HICR_C)) {
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
+ raw_desc[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIDA_2(i));
+ raw_desc[i] = raw_desc[i];
+ }
+ }
+
+ /* Handle timeout and invalid state of HICR register */
+ if (hicr & IXGBE_PF_HICR_C)
+ return -ETIME;
+
+ if (!(hicr & IXGBE_PF_HICR_SV) && !(hicr & IXGBE_PF_HICR_EV))
+ return -EIO;
+
+ /* For every command other than 0x0014 treat opcode mismatch
+ * as an error. Response to 0x0014 command read from HIDA_2
+ * is a descriptor of an event which is expected to contain
+ * different opcode than the command.
+ */
+ if (desc->opcode != cpu_to_le16(opcode) &&
+ opcode != ixgbe_aci_opc_get_fw_event)
+ return -EIO;
+
+ if (desc->retval) {
+ hw->aci.last_status = (enum libie_aq_err)
+ le16_to_cpu(desc->retval);
+ return -EIO;
+ }
+
+ /* Write a response values to a buf */
+ if (valid_buf) {
+ for (i = 0; i < buf_size / 4; i++)
+ ((u32 *)buf)[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIBA(i));
+ if (buf_tail_size) {
+ buf_tail = IXGBE_READ_REG(hw, IXGBE_PF_HIBA(i));
+ memcpy(buf + buf_size - buf_tail_size, &buf_tail,
+ buf_tail_size);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_send_cmd - send FW Admin Command to FW Admin Command Interface
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ *
+ * Helper function to send FW Admin Commands to the FW Admin Command Interface.
+ *
+ * Retry sending the FW Admin Command multiple times to the FW ACI
+ * if the EBUSY Admin Command error is returned.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct libie_aq_desc *desc,
+ void *buf, u16 buf_size)
+{
+ u16 opcode = le16_to_cpu(desc->opcode);
+ struct libie_aq_desc desc_cpy;
+ enum libie_aq_err last_status;
+ u8 idx = 0, *buf_cpy = NULL;
+ bool is_cmd_for_retry;
+ unsigned long timeout;
+ int err;
+
+ is_cmd_for_retry = ixgbe_should_retry_aci_send_cmd_execute(opcode);
+ if (is_cmd_for_retry) {
+ if (buf) {
+ buf_cpy = kmalloc(buf_size, GFP_KERNEL);
+ if (!buf_cpy)
+ return -ENOMEM;
+ *buf_cpy = *(u8 *)buf;
+ }
+ desc_cpy = *desc;
+ }
+
+ timeout = jiffies + msecs_to_jiffies(IXGBE_ACI_SEND_TIMEOUT_MS);
+ do {
+ mutex_lock(&hw->aci.lock);
+ err = ixgbe_aci_send_cmd_execute(hw, desc, buf, buf_size);
+ last_status = hw->aci.last_status;
+ mutex_unlock(&hw->aci.lock);
+
+ if (!is_cmd_for_retry || !err ||
+ last_status != LIBIE_AQ_RC_EBUSY)
+ break;
+
+ if (buf)
+ memcpy(buf, buf_cpy, buf_size);
+ *desc = desc_cpy;
+
+ msleep(IXGBE_ACI_SEND_DELAY_TIME_MS);
+ } while (++idx < IXGBE_ACI_SEND_MAX_EXECUTE &&
+ time_before(jiffies, timeout));
+
+ kfree(buf_cpy);
+
+ return err;
+}
+
+/**
+ * ixgbe_aci_check_event_pending - check if there are any pending events
+ * @hw: pointer to the HW struct
+ *
+ * Determine if there are any pending events.
+ *
+ * Return: true if there are any currently pending events
+ * otherwise false.
+ */
+bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw)
+{
+ u32 ep_bit_mask = hw->bus.func ? GL_FWSTS_EP_PF1 : GL_FWSTS_EP_PF0;
+ u32 fwsts = IXGBE_READ_REG(hw, GL_FWSTS);
+
+ return (fwsts & ep_bit_mask) ? true : false;
+}
+
+/**
+ * ixgbe_aci_get_event - get an event from ACI
+ * @hw: pointer to the HW struct
+ * @e: event information structure
+ * @pending: optional flag signaling that there are more pending events
+ *
+ * Obtain an event from ACI and return its content
+ * through 'e' using ACI command (0x0014).
+ * Provide information if there are more events
+ * to retrieve through 'pending'.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
+ bool *pending)
+{
+ struct libie_aq_desc desc;
+ int err;
+
+ if (!e || (!e->msg_buf && e->buf_len))
+ return -EINVAL;
+
+ mutex_lock(&hw->aci.lock);
+
+ /* Check if there are any events pending */
+ if (!ixgbe_aci_check_event_pending(hw)) {
+ err = -ENOENT;
+ goto aci_get_event_exit;
+ }
+
+ /* Obtain pending event */
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_fw_event);
+ err = ixgbe_aci_send_cmd_execute(hw, &desc, e->msg_buf, e->buf_len);
+ if (err)
+ goto aci_get_event_exit;
+
+ /* Returned 0x0014 opcode indicates that no event was obtained */
+ if (desc.opcode == cpu_to_le16(ixgbe_aci_opc_get_fw_event)) {
+ err = -ENOENT;
+ goto aci_get_event_exit;
+ }
+
+ /* Determine size of event data */
+ e->msg_len = min_t(u16, le16_to_cpu(desc.datalen), e->buf_len);
+ /* Write event descriptor to event info structure */
+ memcpy(&e->desc, &desc, sizeof(e->desc));
+
+ /* Check if there are any further events pending */
+ if (pending)
+ *pending = ixgbe_aci_check_event_pending(hw);
+
+aci_get_event_exit:
+ mutex_unlock(&hw->aci.lock);
+
+ return err;
+}
+
+/**
+ * ixgbe_fill_dflt_direct_cmd_desc - fill ACI descriptor with default values.
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Helper function to fill the descriptor desc with default values
+ * and the provided opcode.
+ */
+void ixgbe_fill_dflt_direct_cmd_desc(struct libie_aq_desc *desc, u16 opcode)
+{
+ /* Zero out the desc. */
+ memset(desc, 0, sizeof(*desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_SI);
+}
+
+/**
+ * ixgbe_aci_get_fw_ver - Get the firmware version
+ * @hw: pointer to the HW struct
+ *
+ * Get the firmware version using ACI command (0x0001).
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_aci_get_fw_ver(struct ixgbe_hw *hw)
+{
+ struct libie_aqc_get_ver *resp;
+ struct libie_aq_desc desc;
+ int err;
+
+ resp = &desc.params.get_ver;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_ver);
+
+ err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (!err) {
+ hw->fw_branch = resp->fw_branch;
+ hw->fw_maj_ver = resp->fw_major;
+ hw->fw_min_ver = resp->fw_minor;
+ hw->fw_patch = resp->fw_patch;
+ hw->fw_build = le32_to_cpu(resp->fw_build);
+ hw->api_branch = resp->api_branch;
+ hw->api_maj_ver = resp->api_major;
+ hw->api_min_ver = resp->api_minor;
+ hw->api_patch = resp->api_patch;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_aci_req_res - request a common resource
+ * @hw: pointer to the HW struct
+ * @res: resource ID
+ * @access: access type
+ * @sdp_number: resource number
+ * @timeout: the maximum time in ms that the driver may hold the resource
+ *
+ * Requests a common resource using the ACI command (0x0008).
+ * Specifies the maximum time the driver may hold the resource.
+ * If the requested resource is currently occupied by some other driver,
+ * a busy return value is returned and the timeout field value indicates the
+ * maximum time the current owner has to free it.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_aci_req_res(struct ixgbe_hw *hw, enum libie_aq_res_id res,
+ enum libie_aq_res_access_type access,
+ u8 sdp_number, u32 *timeout)
+{
+ struct libie_aqc_req_res *cmd_resp;
+ struct libie_aq_desc desc;
+ int err;
+
+ cmd_resp = &desc.params.res_owner;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_req_res);
+
+ cmd_resp->res_id = cpu_to_le16(res);
+ cmd_resp->access_type = cpu_to_le16(access);
+ cmd_resp->res_number = cpu_to_le32(sdp_number);
+ cmd_resp->timeout = cpu_to_le32(*timeout);
+ *timeout = 0;
+
+ err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ /* If the resource is held by some other driver, the command completes
+ * with a busy return value and the timeout field indicates the maximum
+ * time the current owner of the resource has to free it.
+ */
+ if (!err || hw->aci.last_status == LIBIE_AQ_RC_EBUSY)
+ *timeout = le32_to_cpu(cmd_resp->timeout);
+
+ return err;
+}
+
+/**
+ * ixgbe_aci_release_res - release a common resource using ACI
+ * @hw: pointer to the HW struct
+ * @res: resource ID
+ * @sdp_number: resource number
+ *
+ * Release a common resource using ACI command (0x0009).
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_aci_release_res(struct ixgbe_hw *hw, enum libie_aq_res_id res,
+ u8 sdp_number)
+{
+ struct libie_aqc_req_res *cmd;
+ struct libie_aq_desc desc;
+
+ cmd = &desc.params.res_owner;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_release_res);
+
+ cmd->res_id = cpu_to_le16(res);
+ cmd->res_number = cpu_to_le32(sdp_number);
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_acquire_res - acquire the ownership of a resource
+ * @hw: pointer to the HW structure
+ * @res: resource ID
+ * @access: access type (read or write)
+ * @timeout: timeout in milliseconds
+ *
+ * Make an attempt to acquire the ownership of a resource using
+ * the ixgbe_aci_req_res to utilize ACI.
+ * In case if some other driver has previously acquired the resource and
+ * performed any necessary updates, the -EALREADY is returned,
+ * and the caller does not obtain the resource and has no further work to do.
+ * If needed, the function will poll until the current lock owner timeouts.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_acquire_res(struct ixgbe_hw *hw, enum libie_aq_res_id res,
+ enum libie_aq_res_access_type access, u32 timeout)
+{
+#define IXGBE_RES_POLLING_DELAY_MS 10
+ u32 delay = IXGBE_RES_POLLING_DELAY_MS;
+ u32 res_timeout = timeout;
+ u32 retry_timeout;
+ int err;
+
+ err = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
+
+ /* A return code of -EALREADY means that another driver has
+ * previously acquired the resource and performed any necessary updates;
+ * in this case the caller does not obtain the resource and has no
+ * further work to do.
+ */
+ if (err == -EALREADY)
+ return err;
+
+ /* If necessary, poll until the current lock owner timeouts.
+ * Set retry_timeout to the timeout value reported by the FW in the
+ * response to the "Request Resource Ownership" (0x0008) Admin Command
+ * as it indicates the maximum time the current owner of the resource
+ * is allowed to hold it.
+ */
+ retry_timeout = res_timeout;
+ while (err && retry_timeout && res_timeout) {
+ msleep(delay);
+ retry_timeout = (retry_timeout > delay) ?
+ retry_timeout - delay : 0;
+ err = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
+
+ /* Success - lock acquired.
+ * -EALREADY - lock free, no work to do.
+ */
+ if (!err || err == -EALREADY)
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_release_res - release a common resource
+ * @hw: pointer to the HW structure
+ * @res: resource ID
+ *
+ * Release a common resource using ixgbe_aci_release_res.
+ */
+void ixgbe_release_res(struct ixgbe_hw *hw, enum libie_aq_res_id res)
+{
+ u32 total_delay = 0;
+ int err;
+
+ err = ixgbe_aci_release_res(hw, res, 0);
+
+ /* There are some rare cases when trying to release the resource
+ * results in an admin command timeout, so handle them correctly.
+ */
+ while (err == -ETIME &&
+ total_delay < IXGBE_ACI_RELEASE_RES_TIMEOUT) {
+ usleep_range(1000, 1500);
+ err = ixgbe_aci_release_res(hw, res, 0);
+ total_delay++;
+ }
+}
+
+/**
+ * ixgbe_parse_e610_caps - Parse common device/function capabilities
+ * @hw: pointer to the HW struct
+ * @caps: pointer to common capabilities structure
+ * @elem: the capability element to parse
+ * @prefix: message prefix for tracing capabilities
+ *
+ * Given a capability element, extract relevant details into the common
+ * capability structure.
+ *
+ * Return: true if the capability matches one of the common capability ids,
+ * false otherwise.
+ */
+static bool ixgbe_parse_e610_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_caps *caps,
+ struct libie_aqc_list_caps_elem *elem,
+ const char *prefix)
+{
+ u32 logical_id = le32_to_cpu(elem->logical_id);
+ u32 phys_id = le32_to_cpu(elem->phys_id);
+ u32 number = le32_to_cpu(elem->number);
+ u16 cap = le16_to_cpu(elem->cap);
+
+ switch (cap) {
+ case LIBIE_AQC_CAPS_VALID_FUNCTIONS:
+ caps->valid_functions = number;
+ break;
+ case LIBIE_AQC_CAPS_SRIOV:
+ caps->sr_iov_1_1 = (number == 1);
+ break;
+ case LIBIE_AQC_CAPS_VMDQ:
+ caps->vmdq = (number == 1);
+ break;
+ case LIBIE_AQC_CAPS_DCB:
+ caps->dcb = (number == 1);
+ caps->active_tc_bitmap = logical_id;
+ caps->maxtc = phys_id;
+ break;
+ case LIBIE_AQC_CAPS_RSS:
+ caps->rss_table_size = number;
+ caps->rss_table_entry_width = logical_id;
+ break;
+ case LIBIE_AQC_CAPS_RXQS:
+ caps->num_rxq = number;
+ caps->rxq_first_id = phys_id;
+ break;
+ case LIBIE_AQC_CAPS_TXQS:
+ caps->num_txq = number;
+ caps->txq_first_id = phys_id;
+ break;
+ case LIBIE_AQC_CAPS_MSIX:
+ caps->num_msix_vectors = number;
+ caps->msix_vector_first_id = phys_id;
+ break;
+ case LIBIE_AQC_CAPS_NVM_VER:
+ break;
+ case LIBIE_AQC_CAPS_PENDING_NVM_VER:
+ caps->nvm_update_pending_nvm = true;
+ break;
+ case LIBIE_AQC_CAPS_PENDING_OROM_VER:
+ caps->nvm_update_pending_orom = true;
+ break;
+ case LIBIE_AQC_CAPS_PENDING_NET_VER:
+ caps->nvm_update_pending_netlist = true;
+ break;
+ case LIBIE_AQC_CAPS_NVM_MGMT:
+ caps->nvm_unified_update =
+ (number & IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
+ true : false;
+ break;
+ case LIBIE_AQC_CAPS_MAX_MTU:
+ caps->max_mtu = number;
+ break;
+ case LIBIE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
+ caps->pcie_reset_avoidance = (number > 0);
+ break;
+ case LIBIE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
+ caps->reset_restrict_support = (number == 1);
+ break;
+ case LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG0:
+ case LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG1:
+ case LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG2:
+ case LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG3:
+ {
+ u8 index = cap - LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG0;
+
+ caps->ext_topo_dev_img_ver_high[index] = number;
+ caps->ext_topo_dev_img_ver_low[index] = logical_id;
+ caps->ext_topo_dev_img_part_num[index] =
+ FIELD_GET(IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M, phys_id);
+ caps->ext_topo_dev_img_load_en[index] =
+ (phys_id & IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
+ caps->ext_topo_dev_img_prog_en[index] =
+ (phys_id & IXGBE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
+ break;
+ }
+ default:
+ /* Not one of the recognized common capabilities */
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ixgbe_parse_valid_functions_cap - Parse LIBIE_AQC_CAPS_VALID_FUNCTIONS caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse LIBIE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
+ */
+static void
+ixgbe_parse_valid_functions_cap(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct libie_aqc_list_caps_elem *cap)
+{
+ dev_p->num_funcs = hweight32(le32_to_cpu(cap->number));
+}
+
+/**
+ * ixgbe_parse_vf_dev_caps - Parse LIBIE_AQC_CAPS_VF device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse LIBIE_AQC_CAPS_VF for device capabilities.
+ */
+static void ixgbe_parse_vf_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct libie_aqc_list_caps_elem *cap)
+{
+ dev_p->num_vfs_exposed = le32_to_cpu(cap->number);
+}
+
+/**
+ * ixgbe_parse_vsi_dev_caps - Parse LIBIE_AQC_CAPS_VSI device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse LIBIE_AQC_CAPS_VSI for device capabilities.
+ */
+static void ixgbe_parse_vsi_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct libie_aqc_list_caps_elem *cap)
+{
+ dev_p->num_vsi_allocd_to_host = le32_to_cpu(cap->number);
+}
+
+/**
+ * ixgbe_parse_fdir_dev_caps - Parse LIBIE_AQC_CAPS_FD device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse LIBIE_AQC_CAPS_FD for device capabilities.
+ */
+static void ixgbe_parse_fdir_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct libie_aqc_list_caps_elem *cap)
+{
+ dev_p->num_flow_director_fltr = le32_to_cpu(cap->number);
+}
+
+/**
+ * ixgbe_parse_dev_caps - Parse device capabilities
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @buf: buffer containing the device capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper device to parse device (0x000B) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ixgbe_parse_e610_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the device capabilities structured.
+ */
+static void ixgbe_parse_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ void *buf, u32 cap_count)
+{
+ struct libie_aqc_list_caps_elem *cap_resp;
+ u32 i;
+
+ cap_resp = (struct libie_aqc_list_caps_elem *)buf;
+
+ memset(dev_p, 0, sizeof(*dev_p));
+
+ for (i = 0; i < cap_count; i++) {
+ u16 cap = le16_to_cpu(cap_resp[i].cap);
+
+ ixgbe_parse_e610_caps(hw, &dev_p->common_cap, &cap_resp[i],
+ "dev caps");
+
+ switch (cap) {
+ case LIBIE_AQC_CAPS_VALID_FUNCTIONS:
+ ixgbe_parse_valid_functions_cap(hw, dev_p,
+ &cap_resp[i]);
+ break;
+ case LIBIE_AQC_CAPS_VF:
+ ixgbe_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ case LIBIE_AQC_CAPS_VSI:
+ ixgbe_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ case LIBIE_AQC_CAPS_FD:
+ ixgbe_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ default:
+ /* Don't list common capabilities as unknown */
+ break;
+ }
+ }
+}
+
+/**
+ * ixgbe_parse_vf_func_caps - Parse LIBIE_AQC_CAPS_VF function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for LIBIE_AQC_CAPS_VF.
+ */
+static void ixgbe_parse_vf_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ struct libie_aqc_list_caps_elem *cap)
+{
+ func_p->num_allocd_vfs = le32_to_cpu(cap->number);
+ func_p->vf_base_id = le32_to_cpu(cap->logical_id);
+}
+
+/**
+ * ixgbe_get_num_per_func - determine number of resources per PF
+ * @hw: pointer to the HW structure
+ * @max: value to be evenly split between each PF
+ *
+ * Determine the number of valid functions by going through the bitmap returned
+ * from parsing capabilities and use this to calculate the number of resources
+ * per PF based on the max value passed in.
+ *
+ * Return: the number of resources per PF or 0, if no PFs are available.
+ */
+static u32 ixgbe_get_num_per_func(struct ixgbe_hw *hw, u32 max)
+{
+#define IXGBE_CAPS_VALID_FUNCS_M GENMASK(7, 0)
+ u8 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
+ IXGBE_CAPS_VALID_FUNCS_M);
+
+ return funcs ? (max / funcs) : 0;
+}
+
+/**
+ * ixgbe_parse_vsi_func_caps - Parse LIBIE_AQC_CAPS_VSI function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for LIBIE_AQC_CAPS_VSI.
+ */
+static void ixgbe_parse_vsi_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ struct libie_aqc_list_caps_elem *cap)
+{
+ func_p->guar_num_vsi = ixgbe_get_num_per_func(hw, IXGBE_MAX_VSI);
+}
+
+/**
+ * ixgbe_parse_func_caps - Parse function capabilities
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @buf: buffer containing the function capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper function to parse function (0x000A) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ixgbe_parse_e610_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the function capabilities structured.
+ */
+static void ixgbe_parse_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ void *buf, u32 cap_count)
+{
+ struct libie_aqc_list_caps_elem *cap_resp;
+ u32 i;
+
+ cap_resp = (struct libie_aqc_list_caps_elem *)buf;
+
+ memset(func_p, 0, sizeof(*func_p));
+
+ for (i = 0; i < cap_count; i++) {
+ u16 cap = le16_to_cpu(cap_resp[i].cap);
+
+ ixgbe_parse_e610_caps(hw, &func_p->common_cap,
+ &cap_resp[i], "func caps");
+
+ switch (cap) {
+ case LIBIE_AQC_CAPS_VF:
+ ixgbe_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
+ break;
+ case LIBIE_AQC_CAPS_VSI:
+ ixgbe_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
+ break;
+ default:
+ /* Don't list common capabilities as unknown */
+ break;
+ }
+ }
+}
+
+/**
+ * ixgbe_aci_list_caps - query function/device capabilities
+ * @hw: pointer to the HW struct
+ * @buf: a buffer to hold the capabilities
+ * @buf_size: size of the buffer
+ * @cap_count: if not NULL, set to the number of capabilities reported
+ * @opc: capabilities type to discover, device or function
+ *
+ * Get the function (0x000A) or device (0x000B) capabilities description from
+ * firmware and store it in the buffer.
+ *
+ * If the cap_count pointer is not NULL, then it is set to the number of
+ * capabilities firmware will report. Note that if the buffer size is too
+ * small, it is possible the command will return -ENOMEM. The
+ * cap_count will still be updated in this case. It is recommended that the
+ * buffer size be set to IXGBE_ACI_MAX_BUFFER_SIZE (the largest possible
+ * buffer that firmware could return) to avoid this.
+ *
+ * Return: the exit code of the operation.
+ * Exit code of -ENOMEM means the buffer size is too small.
+ */
+int ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
+ u32 *cap_count, enum ixgbe_aci_opc opc)
+{
+ struct libie_aqc_list_caps *cmd;
+ struct libie_aq_desc desc;
+ int err;
+
+ cmd = &desc.params.get_cap;
+
+ if (opc != ixgbe_aci_opc_list_func_caps &&
+ opc != ixgbe_aci_opc_list_dev_caps)
+ return -EINVAL;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, opc);
+ err = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
+
+ if (cap_count)
+ *cap_count = le32_to_cpu(cmd->count);
+
+ return err;
+}
+
+/**
+ * ixgbe_discover_dev_caps - Read and extract device capabilities
+ * @hw: pointer to the hardware structure
+ * @dev_caps: pointer to device capabilities structure
+ *
+ * Read the device capabilities and extract them into the dev_caps structure
+ * for later use.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_caps)
+{
+ u32 cap_count;
+ u8 *cbuf;
+ int err;
+
+ cbuf = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
+ if (!cbuf)
+ return -ENOMEM;
+
+ /* Although the driver doesn't know the number of capabilities the
+ * device will return, we can simply send a 4KB buffer, the maximum
+ * possible size that firmware can return.
+ */
+ cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
+ sizeof(struct libie_aqc_list_caps_elem);
+
+ err = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
+ &cap_count,
+ ixgbe_aci_opc_list_dev_caps);
+ if (!err)
+ ixgbe_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
+
+ kfree(cbuf);
+
+ return 0;
+}
+
+/**
+ * ixgbe_discover_func_caps - Read and extract function capabilities
+ * @hw: pointer to the hardware structure
+ * @func_caps: pointer to function capabilities structure
+ *
+ * Read the function capabilities and extract them into the func_caps structure
+ * for later use.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_discover_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_caps)
+{
+ u32 cap_count;
+ u8 *cbuf;
+ int err;
+
+ cbuf = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
+ if (!cbuf)
+ return -ENOMEM;
+
+ /* Although the driver doesn't know the number of capabilities the
+ * device will return, we can simply send a 4KB buffer, the maximum
+ * possible size that firmware can return.
+ */
+ cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
+ sizeof(struct libie_aqc_list_caps_elem);
+
+ err = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
+ &cap_count,
+ ixgbe_aci_opc_list_func_caps);
+ if (!err)
+ ixgbe_parse_func_caps(hw, func_caps, cbuf, cap_count);
+
+ kfree(cbuf);
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_caps - get info about the HW
+ * @hw: pointer to the hardware structure
+ *
+ * Retrieve both device and function capabilities.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_caps(struct ixgbe_hw *hw)
+{
+ int err;
+
+ err = ixgbe_discover_dev_caps(hw, &hw->dev_caps);
+ if (err)
+ return err;
+
+ return ixgbe_discover_func_caps(hw, &hw->func_caps);
+}
+
+/**
+ * ixgbe_aci_disable_rxen - disable RX
+ * @hw: pointer to the HW struct
+ *
+ * Request a safe disable of Receive Enable using ACI command (0x000C).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_disable_rxen(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_disable_rxen *cmd;
+ struct libie_aq_desc desc;
+
+ cmd = libie_aq_raw(&desc);
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_disable_rxen);
+
+ cmd->lport_num = hw->bus.func;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_get_phy_caps - returns PHY capabilities
+ * @hw: pointer to the HW struct
+ * @qual_mods: report qualified modules
+ * @report_mode: report mode capabilities
+ * @pcaps: structure for PHY capabilities to be filled
+ *
+ * Returns the various PHY capabilities supported on the Port
+ * using ACI command (0x0600).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps)
+{
+ struct ixgbe_aci_cmd_get_phy_caps *cmd;
+ u16 pcaps_size = sizeof(*pcaps);
+ struct libie_aq_desc desc;
+ int err;
+
+ cmd = libie_aq_raw(&desc);
+
+ if (!pcaps || (report_mode & ~IXGBE_ACI_REPORT_MODE_M))
+ return -EINVAL;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_phy_caps);
+
+ if (qual_mods)
+ cmd->param0 |= cpu_to_le16(IXGBE_ACI_GET_PHY_RQM);
+
+ cmd->param0 |= cpu_to_le16(report_mode);
+ err = ixgbe_aci_send_cmd(hw, &desc, pcaps, pcaps_size);
+ if (!err && report_mode == IXGBE_ACI_REPORT_TOPO_CAP_MEDIA) {
+ hw->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
+ hw->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
+ memcpy(hw->link.link_info.module_type, &pcaps->module_type,
+ sizeof(hw->link.link_info.module_type));
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
+ * @caps: PHY ability structure to copy data from
+ * @cfg: PHY configuration structure to copy data to
+ *
+ * Helper function to copy data from PHY capabilities data structure
+ * to PHY configuration data structure
+ */
+void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
+{
+ if (!caps || !cfg)
+ return;
+
+ memset(cfg, 0, sizeof(*cfg));
+ cfg->phy_type_low = caps->phy_type_low;
+ cfg->phy_type_high = caps->phy_type_high;
+ cfg->caps = caps->caps;
+ cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
+ cfg->eee_cap = caps->eee_cap;
+ cfg->eeer_value = caps->eeer_value;
+ cfg->link_fec_opt = caps->link_fec_options;
+ cfg->module_compliance_enforcement =
+ caps->module_compliance_enforcement;
+}
+
+/**
+ * ixgbe_aci_set_phy_cfg - set PHY configuration
+ * @hw: pointer to the HW struct
+ * @cfg: structure with PHY configuration data to be set
+ *
+ * Set the various PHY configuration parameters supported on the Port
+ * using ACI command (0x0601).
+ * One or more of the Set PHY config parameters may be ignored in an MFP
+ * mode as the PF may not have the privilege to set some of the PHY Config
+ * parameters.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
+{
+ struct ixgbe_aci_cmd_set_phy_cfg *cmd;
+ struct libie_aq_desc desc;
+ int err;
+
+ if (!cfg)
+ return -EINVAL;
+
+ cmd = libie_aq_raw(&desc);
+ /* Ensure that only valid bits of cfg->caps can be turned on. */
+ cfg->caps &= IXGBE_ACI_PHY_ENA_VALID_MASK;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_phy_cfg);
+ cmd->lport_num = hw->bus.func;
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
+
+ err = ixgbe_aci_send_cmd(hw, &desc, cfg, sizeof(*cfg));
+ if (!err)
+ hw->phy.curr_user_phy_cfg = *cfg;
+
+ return err;
+}
+
+/**
+ * ixgbe_aci_set_link_restart_an - set up link and restart AN
+ * @hw: pointer to the HW struct
+ * @ena_link: if true: enable link, if false: disable link
+ *
+ * Function sets up the link and restarts the Auto-Negotiation over the link.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link)
+{
+ struct ixgbe_aci_cmd_restart_an *cmd;
+ struct libie_aq_desc desc;
+
+ cmd = libie_aq_raw(&desc);
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_restart_an);
+
+ cmd->cmd_flags = IXGBE_ACI_RESTART_AN_LINK_RESTART;
+ cmd->lport_num = hw->bus.func;
+ if (ena_link)
+ cmd->cmd_flags |= IXGBE_ACI_RESTART_AN_LINK_ENABLE;
+ else
+ cmd->cmd_flags &= ~IXGBE_ACI_RESTART_AN_LINK_ENABLE;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_is_media_cage_present - check if media cage is present
+ * @hw: pointer to the HW struct
+ *
+ * Identify presence of media cage using the ACI command (0x06E0).
+ *
+ * Return: true if media cage is present, else false. If no cage, then
+ * media type is backplane or BASE-T.
+ */
+static bool ixgbe_is_media_cage_present(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_link_topo *cmd;
+ struct libie_aq_desc desc;
+
+ cmd = libie_aq_raw(&desc);
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
+
+ cmd->addr.topo_params.node_type_ctx =
+ FIELD_PREP(IXGBE_ACI_LINK_TOPO_NODE_CTX_M,
+ IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT);
+
+ /* Set node type. */
+ cmd->addr.topo_params.node_type_ctx |=
+ FIELD_PREP(IXGBE_ACI_LINK_TOPO_NODE_TYPE_M,
+ IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE);
+
+ /* Node type cage can be used to determine if cage is present. If AQC
+ * returns error (ENOENT), then no cage present. If no cage present then
+ * connection type is backplane or BASE-T.
+ */
+ return !ixgbe_aci_get_netlist_node(hw, cmd, NULL, NULL);
+}
+
+/**
+ * ixgbe_get_media_type_from_phy_type - Gets media type based on phy type
+ * @hw: pointer to the HW struct
+ *
+ * Try to identify the media type based on the phy type.
+ * If more than one media type, the ixgbe_media_type_unknown is returned.
+ * First, phy_type_low is checked, then phy_type_high.
+ * If none are identified, the ixgbe_media_type_unknown is returned
+ *
+ * Return: type of a media based on phy type in form of enum.
+ */
+static enum ixgbe_media_type
+ixgbe_get_media_type_from_phy_type(struct ixgbe_hw *hw)
+{
+ struct ixgbe_link_status *hw_link_info;
+
+ if (!hw)
+ return ixgbe_media_type_unknown;
+
+ hw_link_info = &hw->link.link_info;
+ if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
+ /* If more than one media type is selected, report unknown */
+ return ixgbe_media_type_unknown;
+
+ if (hw_link_info->phy_type_low) {
+ /* 1G SGMII is a special case where some DA cable PHYs
+ * may show this as an option when it really shouldn't
+ * be since SGMII is meant to be between a MAC and a PHY
+ * in a backplane. Try to detect this case and handle it
+ */
+ if (hw_link_info->phy_type_low == IXGBE_PHY_TYPE_LOW_1G_SGMII &&
+ (hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
+ IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
+ hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
+ IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
+ return ixgbe_media_type_da;
+
+ switch (hw_link_info->phy_type_low) {
+ case IXGBE_PHY_TYPE_LOW_1000BASE_SX:
+ case IXGBE_PHY_TYPE_LOW_1000BASE_LX:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_SR:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_LR:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_SR:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_LR:
+ return ixgbe_media_type_fiber;
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
+ case IXGBE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
+ return ixgbe_media_type_fiber;
+ case IXGBE_PHY_TYPE_LOW_100BASE_TX:
+ case IXGBE_PHY_TYPE_LOW_1000BASE_T:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_T:
+ case IXGBE_PHY_TYPE_LOW_5GBASE_T:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_T:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_T:
+ return ixgbe_media_type_copper;
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_DA:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_CR:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_CR_S:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_CR1:
+ return ixgbe_media_type_da;
+ case IXGBE_PHY_TYPE_LOW_25G_AUI_C2C:
+ if (ixgbe_is_media_cage_present(hw))
+ return ixgbe_media_type_aui;
+ fallthrough;
+ case IXGBE_PHY_TYPE_LOW_1000BASE_KX:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_KX:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_X:
+ case IXGBE_PHY_TYPE_LOW_5GBASE_KR:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1:
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_C2C:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_KR:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_KR1:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_KR_S:
+ return ixgbe_media_type_backplane;
+ }
+ } else {
+ switch (hw_link_info->phy_type_high) {
+ case IXGBE_PHY_TYPE_HIGH_10BASE_T:
+ return ixgbe_media_type_copper;
+ }
+ }
+ return ixgbe_media_type_unknown;
+}
+
+/**
+ * ixgbe_update_link_info - update status of the HW network link
+ * @hw: pointer to the HW struct
+ *
+ * Update the status of the HW network link.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_update_link_info(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps;
+ struct ixgbe_link_status *li;
+ int err;
+
+ if (!hw)
+ return -EINVAL;
+
+ li = &hw->link.link_info;
+
+ err = ixgbe_aci_get_link_info(hw, true, NULL);
+ if (err)
+ return err;
+
+ if (!(li->link_info & IXGBE_ACI_MEDIA_AVAILABLE))
+ return 0;
+
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
+ if (!pcaps)
+ return -ENOMEM;
+
+ err = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ pcaps);
+
+ if (!err)
+ memcpy(li->module_type, &pcaps->module_type,
+ sizeof(li->module_type));
+
+ kfree(pcaps);
+
+ return err;
+}
+
+/**
+ * ixgbe_get_link_status - get status of the HW network link
+ * @hw: pointer to the HW struct
+ * @link_up: pointer to bool (true/false = linkup/linkdown)
+ *
+ * Variable link_up is true if link is up, false if link is down.
+ * The variable link_up is invalid if status is non zero. As a
+ * result of this call, link status reporting becomes enabled
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up)
+{
+ if (!hw || !link_up)
+ return -EINVAL;
+
+ if (hw->link.get_link_info) {
+ int err = ixgbe_update_link_info(hw);
+
+ if (err)
+ return err;
+ }
+
+ *link_up = hw->link.link_info.link_info & IXGBE_ACI_LINK_UP;
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_get_link_info - get the link status
+ * @hw: pointer to the HW struct
+ * @ena_lse: enable/disable LinkStatusEvent reporting
+ * @link: pointer to link status structure - optional
+ *
+ * Get the current Link Status using ACI command (0x607).
+ * The current link can be optionally provided to update
+ * the status.
+ *
+ * Return: the link status of the adapter.
+ */
+int ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
+ struct ixgbe_link_status *link)
+{
+ struct ixgbe_aci_cmd_get_link_status_data link_data = {};
+ struct ixgbe_aci_cmd_get_link_status *resp;
+ struct ixgbe_link_status *li_old, *li;
+ struct ixgbe_fc_info *hw_fc_info;
+ struct libie_aq_desc desc;
+ bool tx_pause, rx_pause;
+ u8 cmd_flags;
+ int err;
+
+ if (!hw)
+ return -EINVAL;
+
+ li_old = &hw->link.link_info_old;
+ li = &hw->link.link_info;
+ hw_fc_info = &hw->fc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
+ cmd_flags = (ena_lse) ? IXGBE_ACI_LSE_ENA : IXGBE_ACI_LSE_DIS;
+ resp = libie_aq_raw(&desc);
+ resp->cmd_flags = cpu_to_le16(cmd_flags);
+ resp->lport_num = hw->bus.func;
+
+ err = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
+ if (err)
+ return err;
+
+ /* Save off old link status information. */
+ *li_old = *li;
+
+ /* Update current link status information. */
+ li->link_speed = le16_to_cpu(link_data.link_speed);
+ li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
+ li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
+ li->link_info = link_data.link_info;
+ li->link_cfg_err = link_data.link_cfg_err;
+ li->an_info = link_data.an_info;
+ li->ext_info = link_data.ext_info;
+ li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
+ li->fec_info = link_data.cfg & IXGBE_ACI_FEC_MASK;
+ li->topo_media_conflict = link_data.topo_media_conflict;
+ li->pacing = link_data.cfg & (IXGBE_ACI_CFG_PACING_M |
+ IXGBE_ACI_CFG_PACING_TYPE_M);
+
+ /* Update fc info. */
+ tx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_TX);
+ rx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_RX);
+ if (tx_pause && rx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_full;
+ else if (tx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_tx_pause;
+ else if (rx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_rx_pause;
+ else
+ hw_fc_info->current_mode = ixgbe_fc_none;
+
+ li->lse_ena = !!(le16_to_cpu(resp->cmd_flags) &
+ IXGBE_ACI_LSE_IS_ENABLED);
+
+ /* Save link status information. */
+ if (link)
+ *link = *li;
+
+ /* Flag cleared so calling functions don't call AQ again. */
+ hw->link.get_link_info = false;
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_set_event_mask - set event mask
+ * @hw: pointer to the HW struct
+ * @port_num: port number of the physical function
+ * @mask: event mask to be set
+ *
+ * Set the event mask using ACI command (0x0613).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask)
+{
+ struct ixgbe_aci_cmd_set_event_mask *cmd;
+ struct libie_aq_desc desc;
+
+ cmd = libie_aq_raw(&desc);
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_event_mask);
+
+ cmd->lport_num = port_num;
+
+ cmd->event_mask = cpu_to_le16(mask);
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_configure_lse - enable/disable link status events
+ * @hw: pointer to the HW struct
+ * @activate: true for enable lse, false otherwise
+ * @mask: event mask to be set; a set bit means deactivation of the
+ * corresponding event
+ *
+ * Set the event mask and then enable or disable link status events
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask)
+{
+ int err;
+
+ err = ixgbe_aci_set_event_mask(hw, (u8)hw->bus.func, mask);
+ if (err)
+ return err;
+
+ /* Enabling link status events generation by fw. */
+ return ixgbe_aci_get_link_info(hw, activate, NULL);
+}
+
+/**
+ * ixgbe_start_hw_e610 - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Get firmware version and start the hardware using the generic
+ * start_hw() and ixgbe_start_hw_gen2() functions.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_start_hw_e610(struct ixgbe_hw *hw)
+{
+ int err;
+
+ err = ixgbe_aci_get_fw_ver(hw);
+ if (err)
+ return err;
+
+ err = ixgbe_start_hw_generic(hw);
+ if (err)
+ return err;
+
+ ixgbe_start_hw_gen2(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_set_port_id_led - set LED value for the given port
+ * @hw: pointer to the HW struct
+ * @orig_mode: set LED original mode
+ *
+ * Set LED value for the given port (0x06E9)
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode)
+{
+ struct ixgbe_aci_cmd_set_port_id_led *cmd;
+ struct libie_aq_desc desc;
+
+ cmd = libie_aq_raw(&desc);
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_port_id_led);
+
+ cmd->lport_num = (u8)hw->bus.func;
+ cmd->lport_num_valid = IXGBE_ACI_PORT_ID_PORT_NUM_VALID;
+
+ if (orig_mode)
+ cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_ORIG;
+ else
+ cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_BLINK;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_get_media_type_e610 - Gets media type
+ * @hw: pointer to the HW struct
+ *
+ * In order to get the media type, the function gets PHY
+ * capabilities and later on use them to identify the PHY type
+ * checking phy_type_high and phy_type_low.
+ *
+ * Return: the type of media in form of ixgbe_media_type enum
+ * or ixgbe_media_type_unknown in case of an error.
+ */
+enum ixgbe_media_type ixgbe_get_media_type_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ int rc;
+
+ rc = ixgbe_update_link_info(hw);
+ if (rc)
+ return ixgbe_media_type_unknown;
+
+ /* If there is no link but PHY (dongle) is available SW should use
+ * Get PHY Caps admin command instead of Get Link Status, find most
+ * significant bit that is set in PHY types reported by the command
+ * and use it to discover media type.
+ */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP) &&
+ (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) {
+ int highest_bit;
+
+ /* Get PHY Capabilities */
+ rc = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ &pcaps);
+ if (rc)
+ return ixgbe_media_type_unknown;
+
+ highest_bit = fls64(le64_to_cpu(pcaps.phy_type_high));
+ if (highest_bit) {
+ hw->link.link_info.phy_type_high =
+ BIT_ULL(highest_bit - 1);
+ hw->link.link_info.phy_type_low = 0;
+ } else {
+ highest_bit = fls64(le64_to_cpu(pcaps.phy_type_low));
+ if (highest_bit) {
+ hw->link.link_info.phy_type_low =
+ BIT_ULL(highest_bit - 1);
+ hw->link.link_info.phy_type_high = 0;
+ }
+ }
+ }
+
+ /* Based on link status or search above try to discover media type. */
+ hw->phy.media_type = ixgbe_get_media_type_from_phy_type(hw);
+
+ return hw->phy.media_type;
+}
+
+/**
+ * ixgbe_setup_link_e610 - Set up link
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait: true when waiting for completion is needed
+ *
+ * Set up the link with the specified speed.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_setup_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait)
+{
+ /* Simply request FW to perform proper PHY setup */
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
+}
+
+/**
+ * ixgbe_check_link_e610 - Determine link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true when link is up
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Determine if the link is up and the current link speed
+ * using ACI command (0x0607).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_check_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete)
+{
+ int err;
+ u32 i;
+
+ if (!speed || !link_up)
+ return -EINVAL;
+
+ /* Set get_link_info flag to ensure that fresh
+ * link information will be obtained from FW
+ * by sending Get Link Status admin command.
+ */
+ hw->link.get_link_info = true;
+
+ /* Update link information in adapter context. */
+ err = ixgbe_get_link_status(hw, link_up);
+ if (err)
+ return err;
+
+ /* Wait for link up if it was requested. */
+ if (link_up_wait_to_complete && !(*link_up)) {
+ for (i = 0; i < hw->mac.max_link_up_time; i++) {
+ msleep(100);
+ hw->link.get_link_info = true;
+ err = ixgbe_get_link_status(hw, link_up);
+ if (err)
+ return err;
+ if (*link_up)
+ break;
+ }
+ }
+
+ /* Use link information in adapter context updated by the call
+ * to ixgbe_get_link_status() to determine current link speed.
+ * Link speed information is valid only when link up was
+ * reported by FW.
+ */
+ if (*link_up) {
+ switch (hw->link.link_info.link_speed) {
+ case IXGBE_ACI_LINK_SPEED_10MB:
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_100MB:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_1000MB:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_2500MB:
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_5GB:
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_10GB:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ break;
+ }
+ } else {
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_link_capabilities_e610 - Determine link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: true when autoneg or autotry is enabled
+ *
+ * Determine speed and AN parameters of a link.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_link_capabilities_e610(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ if (!speed || !autoneg)
+ return -EINVAL;
+
+ *autoneg = true;
+ *speed = hw->phy.speeds_supported;
+
+ return 0;
+}
+
+/**
+ * ixgbe_cfg_phy_fc - Configure PHY Flow Control (FC) data based on FC mode
+ * @hw: pointer to hardware structure
+ * @cfg: PHY configuration data to set FC mode
+ * @req_mode: FC mode to configure
+ *
+ * Configures PHY Flow Control according to the provided configuration.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
+ enum ixgbe_fc_mode req_mode)
+{
+ u8 pause_mask = 0x0;
+
+ if (!cfg)
+ return -EINVAL;
+
+ switch (req_mode) {
+ case ixgbe_fc_full:
+ pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+ pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ixgbe_fc_rx_pause:
+ pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ixgbe_fc_tx_pause:
+ pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+ break;
+ default:
+ break;
+ }
+
+ /* Clear the old pause settings. */
+ cfg->caps &= ~(IXGBE_ACI_PHY_EN_TX_LINK_PAUSE |
+ IXGBE_ACI_PHY_EN_RX_LINK_PAUSE);
+
+ /* Set the new capabilities. */
+ cfg->caps |= pause_mask;
+
+ return 0;
+}
+
+/**
+ * ixgbe_setup_fc_e610 - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Set up flow control. This has to be done during init time.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_setup_fc_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps = {};
+ struct ixgbe_aci_cmd_set_phy_cfg_data cfg = {};
+ int err;
+
+ /* Get the current PHY config */
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG, &pcaps);
+ if (err)
+ return err;
+
+ ixgbe_copy_phy_caps_to_cfg(&pcaps, &cfg);
+
+ /* Configure the set PHY data */
+ err = ixgbe_cfg_phy_fc(hw, &cfg, hw->fc.requested_mode);
+ if (err)
+ return err;
+
+ /* If the capabilities have changed, then set the new config */
+ if (cfg.caps != pcaps.caps) {
+ cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ err = ixgbe_aci_set_phy_cfg(hw, &cfg);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_fc_autoneg_e610 - Configure flow control
+ * @hw: pointer to hardware structure
+ *
+ * Configure Flow Control.
+ */
+void ixgbe_fc_autoneg_e610(struct ixgbe_hw *hw)
+{
+ int err;
+
+ /* Get current link err.
+ * Current FC mode will be stored in the hw context.
+ */
+ err = ixgbe_aci_get_link_info(hw, false, NULL);
+ if (err)
+ goto no_autoneg;
+
+ /* Check if the link is up */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP))
+ goto no_autoneg;
+
+ /* Check if auto-negotiation has completed */
+ if (!(hw->link.link_info.an_info & IXGBE_ACI_AN_COMPLETED))
+ goto no_autoneg;
+
+ hw->fc.fc_was_autonegged = true;
+ return;
+
+no_autoneg:
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+}
+
+/**
+ * ixgbe_disable_rx_e610 - Disable RX unit
+ * @hw: pointer to hardware structure
+ *
+ * Disable RX DMA unit on E610 with use of ACI command (0x000C).
+ *
+ * Return: the exit code of the operation.
+ */
+void ixgbe_disable_rx_e610(struct ixgbe_hw *hw)
+{
+ u32 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ u32 pfdtxgswc;
+ int err;
+
+ if (!(rxctrl & IXGBE_RXCTRL_RXEN))
+ return;
+
+ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
+ pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+ hw->mac.set_lben = true;
+ } else {
+ hw->mac.set_lben = false;
+ }
+
+ err = ixgbe_aci_disable_rxen(hw);
+
+ /* If we fail - disable RX using register write */
+ if (err) {
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ rxctrl &= ~IXGBE_RXCTRL_RXEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
+ }
+ }
+}
+
+/**
+ * ixgbe_fw_recovery_mode_e610 - Check FW NVM recovery mode
+ * @hw: pointer to hardware structure
+ *
+ * Check FW NVM recovery mode by reading the value of
+ * the dedicated register.
+ *
+ * Return: true if FW is in recovery mode, otherwise false.
+ */
+static bool ixgbe_fw_recovery_mode_e610(struct ixgbe_hw *hw)
+{
+ u32 fwsm = IXGBE_READ_REG(hw, IXGBE_GL_MNG_FWSM);
+
+ return !!(fwsm & IXGBE_GL_MNG_FWSM_RECOVERY_M);
+}
+
+/**
+ * ixgbe_fw_rollback_mode_e610 - Check FW NVM rollback mode
+ * @hw: pointer to hardware structure
+ *
+ * Check FW NVM rollback mode by reading the value of
+ * the dedicated register.
+ *
+ * Return: true if FW is in rollback mode, otherwise false.
+ */
+static bool ixgbe_fw_rollback_mode_e610(struct ixgbe_hw *hw)
+{
+ u32 fwsm = IXGBE_READ_REG(hw, IXGBE_GL_MNG_FWSM);
+
+ return !!(fwsm & IXGBE_GL_MNG_FWSM_ROLLBACK_M);
+}
+
+/**
+ * ixgbe_init_phy_ops_e610 - PHY specific init
+ * @hw: pointer to hardware structure
+ *
+ * Initialize any function pointers that were not able to be
+ * set during init_shared_code because the PHY type was not known.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_init_phy_ops_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
+ phy->ops.set_phy_power = ixgbe_set_phy_power_e610;
+ else
+ phy->ops.set_phy_power = NULL;
+
+ /* Identify the PHY */
+ return phy->ops.identify(hw);
+}
+
+/**
+ * ixgbe_identify_phy_e610 - Identify PHY
+ * @hw: pointer to hardware structure
+ *
+ * Determine PHY type, supported speeds and PHY ID.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_identify_phy_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ u64 phy_type_low, phy_type_high;
+ int err;
+
+ /* Set PHY type */
+ hw->phy.type = ixgbe_phy_fw;
+
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, &pcaps);
+ if (err)
+ return err;
+
+ if (!(pcaps.module_compliance_enforcement &
+ IXGBE_ACI_MOD_ENFORCE_STRICT_MODE)) {
+ /* Handle lenient mode */
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA,
+ &pcaps);
+ if (err)
+ return err;
+ }
+
+ /* Determine supported speeds */
+ hw->phy.speeds_supported = IXGBE_LINK_SPEED_UNKNOWN;
+ phy_type_high = le64_to_cpu(pcaps.phy_type_high);
+ phy_type_low = le64_to_cpu(pcaps.phy_type_low);
+
+ if (phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_10M_SGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10_FULL;
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_100BASE_TX ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_100M_SGMII ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_100M_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_T ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_SX ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_LX ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_KX ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_1G_SGMII ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_1G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_T ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_X ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_KX ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_SGMII ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_T ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_KR ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_5G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_DA ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_SR ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_LR ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_C2C ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_10G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ /* Initialize autoneg speeds */
+ if (!hw->phy.autoneg_advertised)
+ hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+
+ /* Set PHY ID */
+ memcpy(&hw->phy.id, pcaps.phy_id_oui, sizeof(u32));
+
+ hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_10_FULL |
+ IXGBE_LINK_SPEED_100_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
+
+ return 0;
+}
+
+/**
+ * ixgbe_identify_module_e610 - Identify SFP module type
+ * @hw: pointer to hardware structure
+ *
+ * Identify the SFP module type.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_identify_module_e610(struct ixgbe_hw *hw)
+{
+ bool media_available;
+ u8 module_type;
+ int err;
+
+ err = ixgbe_update_link_info(hw);
+ if (err)
+ return err;
+
+ media_available =
+ (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE);
+
+ if (media_available) {
+ hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+
+ /* Get module type from hw context updated by
+ * ixgbe_update_link_info()
+ */
+ module_type = hw->link.link_info.module_type[IXGBE_ACI_MOD_TYPE_IDENT];
+
+ if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE) ||
+ (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE)) {
+ hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
+ } else if (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR) {
+ hw->phy.sfp_type = ixgbe_sfp_type_sr;
+ } else if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR) ||
+ (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM)) {
+ hw->phy.sfp_type = ixgbe_sfp_type_lr;
+ }
+ } else {
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_setup_phy_link_e610 - Sets up firmware-controlled PHYs
+ * @hw: pointer to hardware structure
+ *
+ * Set the parameters for the firmware-controlled PHYs.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_setup_phy_link_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ struct ixgbe_aci_cmd_set_phy_cfg_data pcfg;
+ u8 rmode = IXGBE_ACI_REPORT_TOPO_CAP_MEDIA;
+ u64 sup_phy_type_low, sup_phy_type_high;
+ u64 phy_type_low = 0, phy_type_high = 0;
+ int err;
+
+ err = ixgbe_aci_get_link_info(hw, false, NULL);
+ if (err)
+ return err;
+
+ /* If media is not available get default config. */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE))
+ rmode = IXGBE_ACI_REPORT_DFLT_CFG;
+
+ err = ixgbe_aci_get_phy_caps(hw, false, rmode, &pcaps);
+ if (err)
+ return err;
+
+ sup_phy_type_low = le64_to_cpu(pcaps.phy_type_low);
+ sup_phy_type_high = le64_to_cpu(pcaps.phy_type_high);
+
+ /* Get Active configuration to avoid unintended changes. */
+ err = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_ACTIVE_CFG,
+ &pcaps);
+ if (err)
+ return err;
+
+ ixgbe_copy_phy_caps_to_cfg(&pcaps, &pcfg);
+
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) {
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_10BASE_T;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_10M_SGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_100BASE_TX;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_100M_SGMII;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_100M_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_T;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_SX;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_LX;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_KX;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1G_SGMII;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_1G_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_T;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_X;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_KX;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_SGMII;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_T;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_KR;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_5G_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_T;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_DA;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_SR;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_LR;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_C2C;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_10G_USXGMII;
+ }
+
+ /* Mask the set values to avoid requesting unsupported link types. */
+ phy_type_low &= sup_phy_type_low;
+ pcfg.phy_type_low = cpu_to_le64(phy_type_low);
+ phy_type_high &= sup_phy_type_high;
+ pcfg.phy_type_high = cpu_to_le64(phy_type_high);
+
+ if (pcfg.phy_type_high != pcaps.phy_type_high ||
+ pcfg.phy_type_low != pcaps.phy_type_low ||
+ pcfg.caps != pcaps.caps) {
+ pcfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+ pcfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ err = ixgbe_aci_set_phy_cfg(hw, &pcfg);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_set_phy_power_e610 - Control power for copper PHY
+ * @hw: pointer to hardware structure
+ * @on: true for on, false for off
+ *
+ * Set the power on/off of the PHY
+ * by getting its capabilities and setting the appropriate
+ * configuration parameters.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_set_phy_power_e610(struct ixgbe_hw *hw, bool on)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = {};
+ struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = {};
+ int err;
+
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG,
+ &phy_caps);
+ if (err)
+ return err;
+
+ ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+ if (on)
+ phy_cfg.caps &= ~IXGBE_ACI_PHY_ENA_LOW_POWER;
+ else
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LOW_POWER;
+
+ /* PHY is already in requested power mode. */
+ if (phy_caps.caps == phy_cfg.caps)
+ return 0;
+
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ return ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+}
+
+/**
+ * ixgbe_enter_lplu_e610 - Transition to low power states
+ * @hw: pointer to hardware structure
+ *
+ * Configures Low Power Link Up on transition to low power states
+ * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
+ * X557 PHY immediately prior to entering LPLU.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_enter_lplu_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = {};
+ struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = {};
+ int err;
+
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG,
+ &phy_caps);
+ if (err)
+ return err;
+
+ ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+ phy_cfg.low_power_ctrl_an |= IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG;
+
+ return ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+}
+
+/**
+ * ixgbe_init_eeprom_params_e610 - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the EEPROM parameters ixgbe_eeprom_info within the ixgbe_hw
+ * struct in order to set up EEPROM access.
+ *
+ * Return: the operation exit code.
+ */
+int ixgbe_init_eeprom_params_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 gens_stat;
+ u8 sr_size;
+
+ if (eeprom->type != ixgbe_eeprom_uninitialized)
+ return 0;
+
+ eeprom->type = ixgbe_flash;
+
+ gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
+ sr_size = FIELD_GET(GLNVM_GENS_SR_SIZE_M, gens_stat);
+
+ /* Switching to words (sr_size contains power of 2). */
+ eeprom->word_size = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
+
+ hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", eeprom->type,
+ eeprom->word_size);
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_get_netlist_node - get a node handle
+ * @hw: pointer to the hw struct
+ * @cmd: get_link_topo AQ structure
+ * @node_part_number: output node part number if node found
+ * @node_handle: output node handle parameter if node found
+ *
+ * Get the netlist node and assigns it to
+ * the provided handle using ACI command (0x06E0).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle)
+{
+ struct ixgbe_aci_cmd_get_link_topo *resp;
+ struct libie_aq_desc desc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
+ resp = libie_aq_raw(&desc);
+ *resp = *cmd;
+
+ if (ixgbe_aci_send_cmd(hw, &desc, NULL, 0))
+ return -EOPNOTSUPP;
+
+ if (node_handle)
+ *node_handle = le16_to_cpu(resp->addr.handle);
+ if (node_part_number)
+ *node_part_number = resp->node_part_num;
+
+ return 0;
+}
+
+/**
+ * ixgbe_acquire_nvm - Generic request for acquiring the NVM ownership
+ * @hw: pointer to the HW structure
+ * @access: NVM access type (read or write)
+ *
+ * Request NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_acquire_nvm(struct ixgbe_hw *hw, enum libie_aq_res_access_type access)
+{
+ u32 fla;
+
+ /* Skip if we are in blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA);
+ if ((fla & IXGBE_GLNVM_FLA_LOCKED_M) == 0)
+ return 0;
+
+ return ixgbe_acquire_res(hw, LIBIE_AQC_RES_ID_NVM, access,
+ IXGBE_NVM_TIMEOUT);
+}
+
+/**
+ * ixgbe_release_nvm - Generic request for releasing the NVM ownership
+ * @hw: pointer to the HW structure
+ *
+ * Release NVM ownership.
+ */
+void ixgbe_release_nvm(struct ixgbe_hw *hw)
+{
+ u32 fla;
+
+ /* Skip if we are in blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA);
+ if ((fla & IXGBE_GLNVM_FLA_LOCKED_M) == 0)
+ return;
+
+ ixgbe_release_res(hw, LIBIE_AQC_RES_ID_NVM);
+}
+
+/**
+ * ixgbe_aci_read_nvm - read NVM
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be read (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @read_shadow_ram: tell if this is a shadow RAM read
+ *
+ * Read the NVM using ACI command (0x0701).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram)
+{
+ struct ixgbe_aci_cmd_nvm *cmd;
+ struct libie_aq_desc desc;
+
+ if (offset > IXGBE_ACI_NVM_MAX_OFFSET)
+ return -EINVAL;
+
+ cmd = libie_aq_raw(&desc);
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_read);
+
+ if (!read_shadow_ram && module_typeid == IXGBE_ACI_NVM_START_POINT)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_FLASH_ONLY;
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
+ cmd->module_typeid = cpu_to_le16(module_typeid);
+ cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
+ cmd->offset_high = (offset >> 16) & 0xFF;
+ cmd->length = cpu_to_le16(length);
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, length);
+}
+
+/**
+ * ixgbe_aci_erase_nvm - erase NVM sector
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ *
+ * Erase the NVM sector using the ACI command (0x0702).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid)
+{
+ struct ixgbe_aci_cmd_nvm *cmd;
+ struct libie_aq_desc desc;
+ __le16 len;
+ int err;
+
+ /* Read a length value from SR, so module_typeid is equal to 0,
+ * calculate offset where module size is placed from bytes to words
+ * set last command and read from SR values to true.
+ */
+ err = ixgbe_aci_read_nvm(hw, 0, 2 * module_typeid + 2, 2, &len, true,
+ true);
+ if (err)
+ return err;
+
+ cmd = libie_aq_raw(&desc);
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_erase);
+
+ cmd->module_typeid = cpu_to_le16(module_typeid);
+ cmd->length = len;
+ cmd->offset_low = 0;
+ cmd->offset_high = 0;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_update_nvm - update NVM
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be written (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @command_flags: command parameters
+ *
+ * Update the NVM using the ACI command (0x0703).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 command_flags)
+{
+ struct ixgbe_aci_cmd_nvm *cmd;
+ struct libie_aq_desc desc;
+
+ cmd = libie_aq_raw(&desc);
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000)
+ return -EINVAL;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_write);
+
+ cmd->cmd_flags |= command_flags;
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
+ cmd->module_typeid = cpu_to_le16(module_typeid);
+ cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
+ cmd->offset_high = FIELD_GET(IXGBE_ACI_NVM_OFFSET_HI_U_MASK, offset);
+ cmd->length = cpu_to_le16(length);
+
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, length);
+}
+
+/**
+ * ixgbe_nvm_write_activate - NVM activate write
+ * @hw: pointer to the HW struct
+ * @cmd_flags: flags for write activate command
+ * @response_flags: response indicators from firmware
+ *
+ * Update the control word with the required banks' validity bits
+ * and dumps the Shadow RAM to flash using ACI command (0x0707).
+ *
+ * cmd_flags controls which banks to activate, the preservation level to use
+ * when activating the NVM bank, and whether an EMP reset is required for
+ * activation.
+ *
+ * Note that the 16bit cmd_flags value is split between two separate 1 byte
+ * flag values in the descriptor.
+ *
+ * On successful return of the firmware command, the response_flags variable
+ * is updated with the flags reported by firmware indicating certain status,
+ * such as whether EMP reset is enabled.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
+ u8 *response_flags)
+{
+ struct ixgbe_aci_cmd_nvm *cmd;
+ struct libie_aq_desc desc;
+ s32 err;
+
+ cmd = libie_aq_raw(&desc);
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_nvm_write_activate);
+
+ cmd->cmd_flags = (u8)(cmd_flags & 0xFF);
+ cmd->offset_high = (u8)FIELD_GET(IXGBE_ACI_NVM_OFFSET_HI_A_MASK,
+ cmd_flags);
+
+ err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (!err && response_flags)
+ *response_flags = cmd->cmd_flags;
+
+ return err;
+}
+
+/**
+ * ixgbe_nvm_validate_checksum - validate checksum
+ * @hw: pointer to the HW struct
+ *
+ * Verify NVM PFA checksum validity using ACI command (0x0706).
+ * If the checksum verification failed, IXGBE_ERR_NVM_CHECKSUM is returned.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_nvm_checksum *cmd;
+ struct libie_aq_desc desc;
+ int err;
+
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_READ);
+ if (err)
+ return err;
+
+ cmd = libie_aq_raw(&desc);
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
+ cmd->flags = IXGBE_ACI_NVM_CHECKSUM_VERIFY;
+
+ err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ ixgbe_release_nvm(hw);
+
+ if (!err && cmd->checksum !=
+ cpu_to_le16(IXGBE_ACI_NVM_CHECKSUM_CORRECT)) {
+ struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter,
+ hw);
+
+ err = -EIO;
+ netdev_err(adapter->netdev, "Invalid Shadow Ram checksum");
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_discover_flash_size - Discover the available flash size
+ * @hw: pointer to the HW struct
+ *
+ * The device flash could be up to 16MB in size. However, it is possible that
+ * the actual size is smaller. Use bisection to determine the accessible size
+ * of flash memory.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_discover_flash_size(struct ixgbe_hw *hw)
+{
+ u32 min_size = 0, max_size = IXGBE_ACI_NVM_MAX_OFFSET + 1;
+ int err;
+
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_READ);
+ if (err)
+ return err;
+
+ while ((max_size - min_size) > 1) {
+ u32 offset = (max_size + min_size) / 2;
+ u32 len = 1;
+ u8 data;
+
+ err = ixgbe_read_flat_nvm(hw, offset, &len, &data, false);
+ if (err == -EIO &&
+ hw->aci.last_status == LIBIE_AQ_RC_EINVAL) {
+ err = 0;
+ max_size = offset;
+ } else if (!err) {
+ min_size = offset;
+ } else {
+ /* an unexpected error occurred */
+ goto err_read_flat_nvm;
+ }
+ }
+
+ hw->flash.flash_size = max_size;
+
+err_read_flat_nvm:
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ixgbe_read_sr_base_address - Read the value of a Shadow RAM pointer word
+ * @hw: pointer to the HW structure
+ * @offset: the word offset of the Shadow RAM word to read
+ * @pointer: pointer value read from Shadow RAM
+ *
+ * Read the given Shadow RAM word, and convert it to a pointer value specified
+ * in bytes. This function assumes the specified offset is a valid pointer
+ * word.
+ *
+ * Each pointer word specifies whether it is stored in word size or 4KB
+ * sector size by using the highest bit. The reported pointer value will be in
+ * bytes, intended for flat NVM reads.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_sr_base_address(struct ixgbe_hw *hw, u16 offset,
+ u32 *pointer)
+{
+ u16 value;
+ int err;
+
+ err = ixgbe_read_ee_aci_e610(hw, offset, &value);
+ if (err)
+ return err;
+
+ /* Determine if the pointer is in 4KB or word units */
+ if (value & IXGBE_SR_NVM_PTR_4KB_UNITS)
+ *pointer = (value & ~IXGBE_SR_NVM_PTR_4KB_UNITS) * SZ_4K;
+ else
+ *pointer = value * sizeof(u16);
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_sr_area_size - Read an area size from a Shadow RAM word
+ * @hw: pointer to the HW structure
+ * @offset: the word offset of the Shadow RAM to read
+ * @size: size value read from the Shadow RAM
+ *
+ * Read the given Shadow RAM word, and convert it to an area size value
+ * specified in bytes. This function assumes the specified offset is a valid
+ * area size word.
+ *
+ * Each area size word is specified in 4KB sector units. This function reports
+ * the size in bytes, intended for flat NVM reads.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_sr_area_size(struct ixgbe_hw *hw, u16 offset, u32 *size)
+{
+ u16 value;
+ int err;
+
+ err = ixgbe_read_ee_aci_e610(hw, offset, &value);
+ if (err)
+ return err;
+
+ /* Area sizes are always specified in 4KB units */
+ *size = value * SZ_4K;
+
+ return 0;
+}
+
+/**
+ * ixgbe_determine_active_flash_banks - Discover active bank for each module
+ * @hw: pointer to the HW struct
+ *
+ * Read the Shadow RAM control word and determine which banks are active for
+ * the NVM, OROM, and Netlist modules. Also read and calculate the associated
+ * pointer and size. These values are then cached into the ixgbe_flash_info
+ * structure for later use in order to calculate the correct offset to read
+ * from the active module.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_determine_active_flash_banks(struct ixgbe_hw *hw)
+{
+ struct ixgbe_bank_info *banks = &hw->flash.banks;
+ u16 ctrl_word;
+ int err;
+
+ err = ixgbe_read_ee_aci_e610(hw, IXGBE_E610_SR_NVM_CTRL_WORD,
+ &ctrl_word);
+ if (err)
+ return err;
+
+ if (FIELD_GET(IXGBE_SR_CTRL_WORD_1_M, ctrl_word) !=
+ IXGBE_SR_CTRL_WORD_VALID)
+ return -ENODATA;
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NVM_BANK))
+ banks->nvm_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->nvm_bank = IXGBE_2ND_FLASH_BANK;
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_OROM_BANK))
+ banks->orom_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->orom_bank = IXGBE_2ND_FLASH_BANK;
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NETLIST_BANK))
+ banks->netlist_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->netlist_bank = IXGBE_2ND_FLASH_BANK;
+
+ err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_1ST_NVM_BANK_PTR,
+ &banks->nvm_ptr);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_NVM_BANK_SIZE,
+ &banks->nvm_size);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_1ST_OROM_BANK_PTR,
+ &banks->orom_ptr);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_OROM_BANK_SIZE,
+ &banks->orom_size);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_NETLIST_BANK_PTR,
+ &banks->netlist_ptr);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_NETLIST_BANK_SIZE,
+ &banks->netlist_size);
+
+ return err;
+}
+
+/**
+ * ixgbe_get_flash_bank_offset - Get offset into requested flash bank
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive flash bank
+ * @module: the module to read from
+ *
+ * Based on the module, lookup the module offset from the beginning of the
+ * flash.
+ *
+ * Return: the flash offset. Note that a value of zero is invalid and must be
+ * treated as an error.
+ */
+static int ixgbe_get_flash_bank_offset(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u16 module)
+{
+ struct ixgbe_bank_info *banks = &hw->flash.banks;
+ enum ixgbe_flash_bank active_bank;
+ bool second_bank_active;
+ u32 offset, size;
+
+ switch (module) {
+ case IXGBE_E610_SR_1ST_NVM_BANK_PTR:
+ offset = banks->nvm_ptr;
+ size = banks->nvm_size;
+ active_bank = banks->nvm_bank;
+ break;
+ case IXGBE_E610_SR_1ST_OROM_BANK_PTR:
+ offset = banks->orom_ptr;
+ size = banks->orom_size;
+ active_bank = banks->orom_bank;
+ break;
+ case IXGBE_E610_SR_NETLIST_BANK_PTR:
+ offset = banks->netlist_ptr;
+ size = banks->netlist_size;
+ active_bank = banks->netlist_bank;
+ break;
+ default:
+ return 0;
+ }
+
+ switch (active_bank) {
+ case IXGBE_1ST_FLASH_BANK:
+ second_bank_active = false;
+ break;
+ case IXGBE_2ND_FLASH_BANK:
+ second_bank_active = true;
+ break;
+ default:
+ return 0;
+ }
+
+ /* The second flash bank is stored immediately following the first
+ * bank. Based on whether the 1st or 2nd bank is active, and whether
+ * we want the active or inactive bank, calculate the desired offset.
+ */
+ switch (bank) {
+ case IXGBE_ACTIVE_FLASH_BANK:
+ return offset + (second_bank_active ? size : 0);
+ case IXGBE_INACTIVE_FLASH_BANK:
+ return offset + (second_bank_active ? 0 : size);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_flash_module - Read a word from one of the main NVM modules
+ * @hw: pointer to the HW structure
+ * @bank: which bank of the module to read
+ * @module: the module to read
+ * @offset: the offset into the module in bytes
+ * @data: storage for the word read from the flash
+ * @length: bytes of data to read
+ *
+ * Read data from the specified flash module. The bank parameter indicates
+ * whether or not to read from the active bank or the inactive bank of that
+ * module.
+ *
+ * The word will be read using flat NVM access, and relies on the
+ * hw->flash.banks data being setup by ixgbe_determine_active_flash_banks()
+ * during initialization.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_flash_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u16 module, u32 offset, u8 *data, u32 length)
+{
+ u32 start;
+ int err;
+
+ start = ixgbe_get_flash_bank_offset(hw, bank, module);
+ if (!start)
+ return -EINVAL;
+
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_READ);
+ if (err)
+ return err;
+
+ err = ixgbe_read_flat_nvm(hw, start + offset, &length, data, false);
+
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ixgbe_read_nvm_module - Read from the active main NVM module
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from active or inactive NVM module
+ * @offset: offset into the NVM module to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the active NVM module. This includes the CSS
+ * header at the start of the NVM module.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_nvm_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ __le16 data_local;
+ int err;
+
+ err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_1ST_NVM_BANK_PTR,
+ offset * sizeof(data_local),
+ (u8 *)&data_local,
+ sizeof(data_local));
+ if (!err)
+ *data = le16_to_cpu(data_local);
+
+ return err;
+}
+
+/**
+ * ixgbe_read_netlist_module - Read data from the netlist module area
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive module
+ * @offset: offset into the netlist to read from
+ * @data: storage for returned word value
+ *
+ * Read a word from the specified netlist bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_netlist_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ __le16 data_local;
+ int err;
+
+ err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_NETLIST_BANK_PTR,
+ offset * sizeof(data_local),
+ (u8 *)&data_local, sizeof(data_local));
+ if (!err)
+ *data = le16_to_cpu(data_local);
+
+ return err;
+}
+
+/**
+ * ixgbe_read_orom_module - Read from the active Option ROM module
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from active or inactive OROM module
+ * @offset: offset into the OROM module to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the active Option ROM module of the flash.
+ * Note that unlike the NVM module, the CSS data is stored at the end of the
+ * module instead of at the beginning.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_orom_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ __le16 data_local;
+ int err;
+
+ err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_1ST_OROM_BANK_PTR,
+ offset * sizeof(data_local),
+ (u8 *)&data_local, sizeof(data_local));
+ if (!err)
+ *data = le16_to_cpu(data_local);
+
+ return err;
+}
+
+/**
+ * ixgbe_get_nvm_css_hdr_len - Read the CSS header length
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @hdr_len: storage for header length in words
+ *
+ * Read the CSS header length from the NVM CSS header and add the
+ * Authentication header size, and then convert to words.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_nvm_css_hdr_len(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 *hdr_len)
+{
+ u16 hdr_len_l, hdr_len_h;
+ u32 hdr_len_dword;
+ int err;
+
+ err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_L,
+ &hdr_len_l);
+ if (err)
+ return err;
+
+ err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_H,
+ &hdr_len_h);
+ if (err)
+ return err;
+
+ /* CSS header length is in DWORD, so convert to words and add
+ * authentication header size.
+ */
+ hdr_len_dword = (hdr_len_h << 16) | hdr_len_l;
+ *hdr_len = hdr_len_dword * 2 + IXGBE_NVM_AUTH_HEADER_LEN;
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_nvm_sr_copy - Read a word from the Shadow RAM copy
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive NVM module
+ * @offset: offset into the Shadow RAM copy to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the copy of the Shadow RAM found in the
+ * specified NVM module.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_nvm_sr_copy(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ u32 hdr_len;
+ int err;
+
+ err = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len);
+ if (err)
+ return err;
+
+ hdr_len = round_up(hdr_len, IXGBE_HDR_LEN_ROUNDUP);
+
+ return ixgbe_read_nvm_module(hw, bank, hdr_len + offset, data);
+}
+
+/**
+ * ixgbe_get_nvm_srev - Read the security revision from the NVM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @srev: storage for security revision
+ *
+ * Read the security revision out of the CSS header of the active NVM module
+ * bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_nvm_srev(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank, u32 *srev)
+{
+ u16 srev_l, srev_h;
+ int err;
+
+ err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_L, &srev_l);
+ if (err)
+ return err;
+
+ err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_H, &srev_h);
+ if (err)
+ return err;
+
+ *srev = (srev_h << 16) | srev_l;
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_orom_civd_data - Get the combo version information from Option ROM
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash module
+ * @civd: storage for the Option ROM CIVD data.
+ *
+ * Searches through the Option ROM flash contents to locate the CIVD data for
+ * the image.
+ *
+ * Return: -ENOMEM when cannot allocate memory, -EDOM for checksum violation,
+ * -ENODATA when cannot find proper data, -EIO for faulty read or
+ * 0 on success.
+ *
+ * On success @civd stores collected data.
+ */
+static int
+ixgbe_get_orom_civd_data(struct ixgbe_hw *hw, enum ixgbe_bank_select bank,
+ struct ixgbe_orom_civd_info *civd)
+{
+ u32 orom_size = hw->flash.banks.orom_size;
+ u8 *orom_data;
+ u32 offset;
+ int err;
+
+ orom_data = kzalloc(orom_size, GFP_KERNEL);
+ if (!orom_data)
+ return -ENOMEM;
+
+ err = ixgbe_read_flash_module(hw, bank,
+ IXGBE_E610_SR_1ST_OROM_BANK_PTR, 0,
+ orom_data, orom_size);
+ if (err) {
+ err = -EIO;
+ goto cleanup;
+ }
+
+ /* The CIVD section is located in the Option ROM aligned to 512 bytes.
+ * The first 4 bytes must contain the ASCII characters "$CIV".
+ * A simple modulo 256 sum of all of the bytes of the structure must
+ * equal 0.
+ */
+ for (offset = 0; offset + SZ_512 <= orom_size; offset += SZ_512) {
+ struct ixgbe_orom_civd_info *tmp;
+ u8 sum = 0;
+ u32 i;
+
+ BUILD_BUG_ON(sizeof(*tmp) > SZ_512);
+
+ tmp = (struct ixgbe_orom_civd_info *)&orom_data[offset];
+
+ /* Skip forward until we find a matching signature */
+ if (memcmp(IXGBE_OROM_CIV_SIGNATURE, tmp->signature,
+ sizeof(tmp->signature)))
+ continue;
+
+ /* Verify that the simple checksum is zero */
+ for (i = 0; i < sizeof(*tmp); i++)
+ sum += ((u8 *)tmp)[i];
+
+ if (sum) {
+ err = -EDOM;
+ goto cleanup;
+ }
+
+ *civd = *tmp;
+ err = 0;
+
+ goto cleanup;
+ }
+
+ err = -ENODATA;
+cleanup:
+ kfree(orom_data);
+ return err;
+}
+
+/**
+ * ixgbe_get_orom_srev - Read the security revision from the OROM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from active or inactive flash module
+ * @srev: storage for security revision
+ *
+ * Read the security revision out of the CSS header of the active OROM module
+ * bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_orom_srev(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 *srev)
+{
+ u32 orom_size_word = hw->flash.banks.orom_size / 2;
+ u32 css_start, hdr_len;
+ u16 srev_l, srev_h;
+ int err;
+
+ err = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len);
+ if (err)
+ return err;
+
+ if (orom_size_word < hdr_len)
+ return -EINVAL;
+
+ /* Calculate how far into the Option ROM the CSS header starts. Note
+ * that ixgbe_read_orom_module takes a word offset.
+ */
+ css_start = orom_size_word - hdr_len;
+ err = ixgbe_read_orom_module(hw, bank,
+ css_start + IXGBE_NVM_CSS_SREV_L,
+ &srev_l);
+ if (err)
+ return err;
+
+ err = ixgbe_read_orom_module(hw, bank,
+ css_start + IXGBE_NVM_CSS_SREV_H,
+ &srev_h);
+ if (err)
+ return err;
+
+ *srev = srev_h << 16 | srev_l;
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_orom_ver_info - Read Option ROM version information
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash module
+ * @orom: pointer to Option ROM info structure
+ *
+ * Read Option ROM version and security revision from the Option ROM flash
+ * section.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_orom_ver_info(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ struct ixgbe_orom_info *orom)
+{
+ struct ixgbe_orom_civd_info civd;
+ u32 combo_ver;
+ int err;
+
+ err = ixgbe_get_orom_civd_data(hw, bank, &civd);
+ if (err)
+ return err;
+
+ combo_ver = get_unaligned_le32(&civd.combo_ver);
+
+ orom->major = (u8)FIELD_GET(IXGBE_OROM_VER_MASK, combo_ver);
+ orom->patch = (u8)FIELD_GET(IXGBE_OROM_VER_PATCH_MASK, combo_ver);
+ orom->build = (u16)FIELD_GET(IXGBE_OROM_VER_BUILD_MASK, combo_ver);
+
+ return ixgbe_get_orom_srev(hw, bank, &orom->srev);
+}
+
+/**
+ * ixgbe_get_inactive_orom_ver - Read Option ROM version from the inactive bank
+ * @hw: pointer to the HW structure
+ * @orom: storage for Option ROM version information
+ *
+ * Read the Option ROM version and security revision data for the inactive
+ * section of flash. Used to access version data for a pending update that has
+ * not yet been activated.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_inactive_orom_ver(struct ixgbe_hw *hw,
+ struct ixgbe_orom_info *orom)
+{
+ return ixgbe_get_orom_ver_info(hw, IXGBE_INACTIVE_FLASH_BANK, orom);
+}
+
+/**
+ * ixgbe_get_nvm_ver_info - Read NVM version information
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @nvm: pointer to NVM info structure
+ *
+ * Read the NVM EETRACK ID and map version of the main NVM image bank, filling
+ * in the nvm info structure.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_nvm_ver_info(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ struct ixgbe_nvm_info *nvm)
+{
+ u16 eetrack_lo, eetrack_hi, ver;
+ int err;
+
+ err = ixgbe_read_nvm_sr_copy(hw, bank,
+ IXGBE_E610_SR_NVM_DEV_STARTER_VER, &ver);
+ if (err)
+ return err;
+
+ nvm->major = FIELD_GET(IXGBE_E610_NVM_VER_HI_MASK, ver);
+ nvm->minor = FIELD_GET(IXGBE_E610_NVM_VER_LO_MASK, ver);
+
+ err = ixgbe_read_nvm_sr_copy(hw, bank, IXGBE_E610_SR_NVM_EETRACK_LO,
+ &eetrack_lo);
+ if (err)
+ return err;
+
+ err = ixgbe_read_nvm_sr_copy(hw, bank, IXGBE_E610_SR_NVM_EETRACK_HI,
+ &eetrack_hi);
+ if (err)
+ return err;
+
+ nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
+
+ ixgbe_get_nvm_srev(hw, bank, &nvm->srev);
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_inactive_nvm_ver - Read Option ROM version from the inactive bank
+ * @hw: pointer to the HW structure
+ * @nvm: storage for Option ROM version information
+ *
+ * Read the NVM EETRACK ID, Map version, and security revision of the
+ * inactive NVM bank. Used to access version data for a pending update that
+ * has not yet been activated.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm)
+{
+ return ixgbe_get_nvm_ver_info(hw, IXGBE_INACTIVE_FLASH_BANK, nvm);
+}
+
+/**
+ * ixgbe_get_active_nvm_ver - Read Option ROM version from the active bank
+ * @hw: pointer to the HW structure
+ * @nvm: storage for Option ROM version information
+ *
+ * Reads the NVM EETRACK ID, Map version, and security revision of the
+ * active NVM bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_active_nvm_ver(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_info *nvm)
+{
+ return ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK, nvm);
+}
+
+/**
+ * ixgbe_get_netlist_info - Read the netlist version information
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @netlist: pointer to netlist version info structure
+ *
+ * Get the netlist version information from the requested bank. Reads the Link
+ * Topology section to find the Netlist ID block and extract the relevant
+ * information into the netlist version structure.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_netlist_info(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ struct ixgbe_netlist_info *netlist)
+{
+ u16 module_id, length, node_count, i;
+ u16 *id_blk;
+ int err;
+
+ err = ixgbe_read_netlist_module(hw, bank, IXGBE_NETLIST_TYPE_OFFSET,
+ &module_id);
+ if (err)
+ return err;
+
+ if (module_id != IXGBE_NETLIST_LINK_TOPO_MOD_ID)
+ return -EIO;
+
+ err = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_MODULE_LEN,
+ &length);
+ if (err)
+ return err;
+
+ /* Sanity check that we have at least enough words to store the
+ * netlist ID block.
+ */
+ if (length < IXGBE_NETLIST_ID_BLK_SIZE)
+ return -EIO;
+
+ err = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_NODE_COUNT,
+ &node_count);
+ if (err)
+ return err;
+
+ node_count &= IXGBE_LINK_TOPO_NODE_COUNT_M;
+
+ id_blk = kcalloc(IXGBE_NETLIST_ID_BLK_SIZE, sizeof(*id_blk), GFP_KERNEL);
+ if (!id_blk)
+ return -ENOMEM;
+
+ /* Read out the entire Netlist ID Block at once. */
+ err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_NETLIST_BANK_PTR,
+ IXGBE_NETLIST_ID_BLK_OFFSET(node_count) *
+ sizeof(*id_blk), (u8 *)id_blk,
+ IXGBE_NETLIST_ID_BLK_SIZE *
+ sizeof(*id_blk));
+ if (err)
+ goto free_id_blk;
+
+ for (i = 0; i < IXGBE_NETLIST_ID_BLK_SIZE; i++)
+ id_blk[i] = le16_to_cpu(((__le16 *)id_blk)[i]);
+
+ netlist->major = id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW];
+ netlist->minor = id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW];
+ netlist->type = id_blk[IXGBE_NETLIST_ID_BLK_TYPE_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_TYPE_LOW];
+ netlist->rev = id_blk[IXGBE_NETLIST_ID_BLK_REV_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_REV_LOW];
+ netlist->cust_ver = id_blk[IXGBE_NETLIST_ID_BLK_CUST_VER];
+ /* Read the left most 4 bytes of SHA */
+ netlist->hash = id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(15)] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(14)];
+
+free_id_blk:
+ kfree(id_blk);
+ return err;
+}
+
+/**
+ * ixgbe_get_inactive_netlist_ver - Read netlist version from the inactive bank
+ * @hw: pointer to the HW struct
+ * @netlist: pointer to netlist version info structure
+ *
+ * Read the netlist version data from the inactive netlist bank. Used to
+ * extract version data of a pending flash update in order to display the
+ * version data.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_inactive_netlist_ver(struct ixgbe_hw *hw,
+ struct ixgbe_netlist_info *netlist)
+{
+ return ixgbe_get_netlist_info(hw, IXGBE_INACTIVE_FLASH_BANK, netlist);
+}
+
+/**
+ * ixgbe_get_flash_data - get flash data
+ * @hw: pointer to the HW struct
+ *
+ * Read and populate flash data such as Shadow RAM size,
+ * max_timeout and blank_nvm_mode
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_flash_data(struct ixgbe_hw *hw)
+{
+ struct ixgbe_flash_info *flash = &hw->flash;
+ u32 fla, gens_stat;
+ u8 sr_size;
+ int err;
+
+ /* The SR size is stored regardless of the NVM programming mode
+ * as the blank mode may be used in the factory line.
+ */
+ gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
+ sr_size = FIELD_GET(GLNVM_GENS_SR_SIZE_M, gens_stat);
+
+ /* Switching to words (sr_size contains power of 2) */
+ flash->sr_words = BIT(sr_size) * (SZ_1K / sizeof(u16));
+
+ /* Check if we are in the normal or blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA);
+ if (fla & IXGBE_GLNVM_FLA_LOCKED_M) {
+ flash->blank_nvm_mode = false;
+ } else {
+ flash->blank_nvm_mode = true;
+ return -EIO;
+ }
+
+ err = ixgbe_discover_flash_size(hw);
+ if (err)
+ return err;
+
+ err = ixgbe_determine_active_flash_banks(hw);
+ if (err)
+ return err;
+
+ err = ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK,
+ &flash->nvm);
+ if (err)
+ return err;
+
+ err = ixgbe_get_orom_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK,
+ &flash->orom);
+ if (err)
+ return err;
+
+ err = ixgbe_get_netlist_info(hw, IXGBE_ACTIVE_FLASH_BANK,
+ &flash->netlist);
+ return err;
+}
+
+/**
+ * ixgbe_aci_nvm_update_empr - update NVM using EMPR
+ * @hw: pointer to the HW struct
+ *
+ * Force EMP reset using ACI command (0x0709). This command allows SW to
+ * request an EMPR to activate new FW.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_nvm_update_empr(struct ixgbe_hw *hw)
+{
+ struct libie_aq_desc desc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_update_empr);
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/* ixgbe_nvm_set_pkg_data - NVM set package data
+ * @hw: pointer to the HW struct
+ * @del_pkg_data_flag: If is set then the current pkg_data store by FW
+ * is deleted.
+ * If bit is set to 1, then buffer should be size 0.
+ * @data: pointer to buffer
+ * @length: length of the buffer
+ *
+ * Set package data using ACI command (0x070A).
+ * This command is equivalent to the reception of
+ * a PLDM FW Update GetPackageData cmd. This command should be sent
+ * as part of the NVM update as the first cmd in the flow.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_nvm_set_pkg_data(struct ixgbe_hw *hw, bool del_pkg_data_flag,
+ u8 *data, u16 length)
+{
+ struct ixgbe_aci_cmd_nvm_pkg_data *cmd;
+ struct libie_aq_desc desc;
+
+ if (length != 0 && !data)
+ return -EINVAL;
+
+ cmd = libie_aq_raw(&desc);
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_pkg_data);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
+
+ if (del_pkg_data_flag)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_PKG_DELETE;
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, length);
+}
+
+/* ixgbe_nvm_pass_component_tbl - NVM pass component table
+ * @hw: pointer to the HW struct
+ * @data: pointer to buffer
+ * @length: length of the buffer
+ * @transfer_flag: parameter for determining stage of the update
+ * @comp_response: a pointer to the response from the 0x070B ACI.
+ * @comp_response_code: a pointer to the response code from the 0x070B ACI.
+ *
+ * Pass component table using ACI command (0x070B). This command is equivalent
+ * to the reception of a PLDM FW Update PassComponentTable cmd.
+ * This command should be sent once per component. It can be only sent after
+ * Set Package Data cmd and before actual update. FW will assume these
+ * commands are going to be sent until the TransferFlag is set to End or
+ * StartAndEnd.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_nvm_pass_component_tbl(struct ixgbe_hw *hw, u8 *data, u16 length,
+ u8 transfer_flag, u8 *comp_response,
+ u8 *comp_response_code)
+{
+ struct ixgbe_aci_cmd_nvm_pass_comp_tbl *cmd;
+ struct libie_aq_desc desc;
+ int err;
+
+ if (!data || !comp_response || !comp_response_code)
+ return -EINVAL;
+
+ cmd = libie_aq_raw(&desc);
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_nvm_pass_component_tbl);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
+
+ cmd->transfer_flag = transfer_flag;
+ err = ixgbe_aci_send_cmd(hw, &desc, data, length);
+ if (!err) {
+ *comp_response = cmd->component_response;
+ *comp_response_code = cmd->component_response_code;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_read_sr_word_aci - Reads Shadow RAM via ACI
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using ixgbe_read_flat_nvm.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ u32 bytes = sizeof(u16);
+ u16 data_local;
+ int err;
+
+ err = ixgbe_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
+ (u8 *)&data_local, true);
+ if (err)
+ return err;
+
+ *data = data_local;
+ return 0;
+}
+
+/**
+ * ixgbe_read_flat_nvm - Read portion of NVM by flat offset
+ * @hw: pointer to the HW struct
+ * @offset: offset from beginning of NVM
+ * @length: (in) number of bytes to read; (out) number of bytes actually read
+ * @data: buffer to return data in (sized to fit the specified length)
+ * @read_shadow_ram: if true, read from shadow RAM instead of NVM
+ *
+ * Reads a portion of the NVM, as a flat memory space. This function correctly
+ * breaks read requests across Shadow RAM sectors, prevents Shadow RAM size
+ * from being exceeded in case of Shadow RAM read requests and ensures that no
+ * single read request exceeds the maximum 4KB read for a single admin command.
+ *
+ * Returns an error code on failure. Note that the data pointer may be
+ * partially updated if some reads succeed before a failure.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length,
+ u8 *data, bool read_shadow_ram)
+{
+ u32 inlen = *length;
+ u32 bytes_read = 0;
+ bool last_cmd;
+ int err;
+
+ /* Verify the length of the read if this is for the Shadow RAM */
+ if (read_shadow_ram && ((offset + inlen) >
+ (hw->eeprom.word_size * 2u)))
+ return -EINVAL;
+
+ do {
+ u32 read_size, sector_offset;
+
+ /* ixgbe_aci_read_nvm cannot read more than 4KB at a time.
+ * Additionally, a read from the Shadow RAM may not cross over
+ * a sector boundary. Conveniently, the sector size is also 4KB.
+ */
+ sector_offset = offset % IXGBE_ACI_MAX_BUFFER_SIZE;
+ read_size = min_t(u32,
+ IXGBE_ACI_MAX_BUFFER_SIZE - sector_offset,
+ inlen - bytes_read);
+
+ last_cmd = !(bytes_read + read_size < inlen);
+
+ /* ixgbe_aci_read_nvm takes the length as a u16. Our read_size
+ * is calculated using a u32, but the IXGBE_ACI_MAX_BUFFER_SIZE
+ * maximum size guarantees that it will fit within the 2 bytes.
+ */
+ err = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_START_POINT,
+ offset, (u16)read_size,
+ data + bytes_read, last_cmd,
+ read_shadow_ram);
+ if (err)
+ break;
+
+ bytes_read += read_size;
+ offset += read_size;
+ } while (!last_cmd);
+
+ *length = bytes_read;
+ return err;
+}
+
+/**
+ * ixgbe_read_sr_buf_aci - Read Shadow RAM buffer via ACI
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM words to read (0x000000 - 0x001FFF)
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Read 16 bit words (data buf) from the Shadow RAM. Acquire/release the NVM
+ * ownership.
+ *
+ * Return: the operation exit code.
+ */
+int ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words,
+ u16 *data)
+{
+ u32 bytes = *words * 2;
+ int err;
+
+ err = ixgbe_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);
+ if (err)
+ return err;
+
+ *words = bytes / 2;
+
+ for (int i = 0; i < *words; i++)
+ data[i] = le16_to_cpu(((__le16 *)data)[i]);
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_ee_aci_e610 - Read EEPROM word using the admin command.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the ACI.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding with reading.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_read_ee_aci_e610(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ int err;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ err = hw->eeprom.ops.init_params(hw);
+ if (err)
+ return err;
+ }
+
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_READ);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_word_aci(hw, offset, data);
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ixgbe_read_ee_aci_buffer_e610 - Read EEPROM words via ACI
+ * @hw: pointer to hardware structure
+ * @offset: offset of words in the EEPROM to read
+ * @words: number of words to read
+ * @data: words to read from the EEPROM
+ *
+ * Read 16 bit words from the EEPROM via the ACI. Initialize the EEPROM params
+ * prior to the read. Acquire/release the NVM ownership.
+ *
+ * Return: the operation exit code.
+ */
+int ixgbe_read_ee_aci_buffer_e610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ int err;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ err = hw->eeprom.ops.init_params(hw);
+ if (err)
+ return err;
+ }
+
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_READ);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_buf_aci(hw, offset, &words, data);
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ixgbe_validate_eeprom_checksum_e610 - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw *hw, u16 *checksum_val)
+{
+ int err;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ err = hw->eeprom.ops.init_params(hw);
+ if (err)
+ return err;
+ }
+
+ err = ixgbe_nvm_validate_checksum(hw);
+ if (err)
+ return err;
+
+ if (checksum_val) {
+ u16 tmp_checksum;
+
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_READ);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_word_aci(hw, IXGBE_E610_SR_SW_CHECKSUM_WORD,
+ &tmp_checksum);
+ ixgbe_release_nvm(hw);
+
+ if (!err)
+ *checksum_val = tmp_checksum;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_reset_hw_e610 - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, and performs a reset.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_reset_hw_e610(struct ixgbe_hw *hw)
+{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ u32 ctrl, i;
+ int err;
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ err = hw->mac.ops.stop_adapter(hw);
+ if (err)
+ goto reset_hw_out;
+
+ /* Flush pending Tx transactions. */
+ ixgbe_clear_tx_pending(hw);
+
+ hw->phy.ops.init(hw);
+mac_reset_top:
+ err = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (err)
+ return -EBUSY;
+ ctrl = IXGBE_CTRL_RST;
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+ /* Poll for reset bit to self-clear indicating reset is complete */
+ for (i = 0; i < 10; i++) {
+ udelay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST_MASK))
+ break;
+ }
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
+ struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter,
+ hw);
+
+ err = -EIO;
+ netdev_err(adapter->netdev, "Reset polling failed to complete.");
+ }
+
+ /* Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to allow time
+ * for any pending HW events to complete.
+ */
+ msleep(100);
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ /* Set the Rx packet buffer size. */
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), GENMASK(18, 17));
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /* Maximum number of Receive Address Registers. */
+#define IXGBE_MAX_NUM_RAR 128
+
+ /* Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to the
+ * maximum number of Receive Address Registers, since we modify this
+ * value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = IXGBE_MAX_NUM_RAR;
+ hw->mac.ops.init_rx_addrs(hw);
+
+ /* Initialize bus function number */
+ hw->mac.ops.set_lan_id(hw);
+
+reset_hw_out:
+ return err;
+}
+
+/**
+ * ixgbe_get_pfa_module_tlv - Read sub module TLV from NVM PFA
+ * @hw: pointer to hardware structure
+ * @module_tlv: pointer to module TLV to return
+ * @module_tlv_len: pointer to module TLV length to return
+ * @module_type: module type requested
+ *
+ * Find the requested sub module TLV type from the Preserved Field
+ * Area (PFA) and returns the TLV pointer and length. The caller can
+ * use these to read the variable length TLV value.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_pfa_module_tlv(struct ixgbe_hw *hw, u16 *module_tlv,
+ u16 *module_tlv_len, u16 module_type)
+{
+ u16 pfa_len, pfa_ptr, pfa_end_ptr;
+ u16 next_tlv;
+ int err;
+
+ err = ixgbe_read_ee_aci_e610(hw, IXGBE_E610_SR_PFA_PTR, &pfa_ptr);
+ if (err)
+ return err;
+
+ err = ixgbe_read_ee_aci_e610(hw, pfa_ptr, &pfa_len);
+ if (err)
+ return err;
+
+ /* Starting with first TLV after PFA length, iterate through the list
+ * of TLVs to find the requested one.
+ */
+ next_tlv = pfa_ptr + 1;
+ pfa_end_ptr = pfa_ptr + pfa_len;
+ while (next_tlv < pfa_end_ptr) {
+ u16 tlv_sub_module_type, tlv_len;
+
+ /* Read TLV type */
+ err = ixgbe_read_ee_aci_e610(hw, next_tlv,
+ &tlv_sub_module_type);
+ if (err)
+ break;
+
+ /* Read TLV length */
+ err = ixgbe_read_ee_aci_e610(hw, next_tlv + 1, &tlv_len);
+ if (err)
+ break;
+
+ if (tlv_sub_module_type == module_type) {
+ if (tlv_len) {
+ *module_tlv = next_tlv;
+ *module_tlv_len = tlv_len;
+ return 0;
+ }
+ return -EIO;
+ }
+ /* Check next TLV, i.e. current TLV pointer + length + 2 words
+ * (for current TLV's type and length).
+ */
+ next_tlv = next_tlv + tlv_len + 2;
+ }
+ /* Module does not exist */
+ return -ENODATA;
+}
+
+/**
+ * ixgbe_read_pba_string_e610 - Read PBA string from NVM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the NVM
+ * @pba_num_size: part number string buffer length
+ *
+ * Read the part number string from the NVM.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_pba_string_e610(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ u16 pba_tlv, pba_tlv_len;
+ u16 pba_word, pba_size;
+ int err;
+
+ *pba_num = '\0';
+
+ err = ixgbe_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
+ IXGBE_E610_SR_PBA_BLOCK_PTR);
+ if (err)
+ return err;
+
+ /* pba_size is the next word */
+ err = ixgbe_read_ee_aci_e610(hw, (pba_tlv + 2), &pba_size);
+ if (err)
+ return err;
+
+ if (pba_tlv_len < pba_size)
+ return -EINVAL;
+
+ /* Subtract one to get PBA word count (PBA Size word is included in
+ * total size).
+ */
+ pba_size--;
+
+ if (pba_num_size < (((u32)pba_size * 2) + 1))
+ return -EINVAL;
+
+ for (u16 i = 0; i < pba_size; i++) {
+ err = ixgbe_read_ee_aci_e610(hw, (pba_tlv + 2 + 1) + i,
+ &pba_word);
+ if (err)
+ return err;
+
+ pba_num[(i * 2)] = FIELD_GET(IXGBE_E610_SR_PBA_BLOCK_MASK,
+ pba_word);
+ pba_num[(i * 2) + 1] = pba_word & 0xFF;
+ }
+
+ pba_num[(pba_size * 2)] = '\0';
+
+ return err;
+}
+
+static int __fwlog_send_cmd(void *priv, struct libie_aq_desc *desc, void *buf,
+ u16 size)
+{
+ struct ixgbe_hw *hw = priv;
+
+ return ixgbe_aci_send_cmd(hw, desc, buf, size);
+}
+
+int ixgbe_fwlog_init(struct ixgbe_hw *hw)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ struct libie_fwlog_api api = {
+ .pdev = adapter->pdev,
+ .send_cmd = __fwlog_send_cmd,
+ .debugfs_root = adapter->ixgbe_dbg_adapter,
+ .priv = hw,
+ };
+
+ if (hw->mac.type != ixgbe_mac_e610)
+ return -EOPNOTSUPP;
+
+ return libie_fwlog_init(&hw->fwlog, &api);
+}
+
+void ixgbe_fwlog_deinit(struct ixgbe_hw *hw)
+{
+ if (hw->mac.type != ixgbe_mac_e610)
+ return;
+
+ libie_fwlog_deinit(&hw->fwlog);
+}
+
+static const struct ixgbe_mac_operations mac_ops_e610 = {
+ .init_hw = ixgbe_init_hw_generic,
+ .start_hw = ixgbe_start_hw_e610,
+ .clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic,
+ .enable_rx_dma = ixgbe_enable_rx_dma_generic,
+ .get_mac_addr = ixgbe_get_mac_addr_generic,
+ .get_device_caps = ixgbe_get_device_caps_generic,
+ .stop_adapter = ixgbe_stop_adapter_generic,
+ .set_lan_id = ixgbe_set_lan_id_multi_port_pcie,
+ .set_rxpba = ixgbe_set_rxpba_generic,
+ .check_link = ixgbe_check_link_e610,
+ .blink_led_start = ixgbe_blink_led_start_X540,
+ .blink_led_stop = ixgbe_blink_led_stop_X540,
+ .set_rar = ixgbe_set_rar_generic,
+ .clear_rar = ixgbe_clear_rar_generic,
+ .set_vmdq = ixgbe_set_vmdq_generic,
+ .set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic,
+ .clear_vmdq = ixgbe_clear_vmdq_generic,
+ .init_rx_addrs = ixgbe_init_rx_addrs_generic,
+ .update_mc_addr_list = ixgbe_update_mc_addr_list_generic,
+ .enable_mc = ixgbe_enable_mc_generic,
+ .disable_mc = ixgbe_disable_mc_generic,
+ .clear_vfta = ixgbe_clear_vfta_generic,
+ .set_vfta = ixgbe_set_vfta_generic,
+ .fc_enable = ixgbe_fc_enable_generic,
+ .set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550,
+ .init_uta_tables = ixgbe_init_uta_tables_generic,
+ .set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing,
+ .set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing,
+ .set_source_address_pruning =
+ ixgbe_set_source_address_pruning_x550,
+ .set_ethertype_anti_spoofing =
+ ixgbe_set_ethertype_anti_spoofing_x550,
+ .disable_rx_buff = ixgbe_disable_rx_buff_generic,
+ .enable_rx_buff = ixgbe_enable_rx_buff_generic,
+ .enable_rx = ixgbe_enable_rx_generic,
+ .disable_rx = ixgbe_disable_rx_e610,
+ .led_on = ixgbe_led_on_generic,
+ .led_off = ixgbe_led_off_generic,
+ .init_led_link_act = ixgbe_init_led_link_act_generic,
+ .reset_hw = ixgbe_reset_hw_e610,
+ .get_fw_ver = ixgbe_aci_get_fw_ver,
+ .get_media_type = ixgbe_get_media_type_e610,
+ .setup_link = ixgbe_setup_link_e610,
+ .fw_recovery_mode = ixgbe_fw_recovery_mode_e610,
+ .fw_rollback_mode = ixgbe_fw_rollback_mode_e610,
+ .get_nvm_ver = ixgbe_get_active_nvm_ver,
+ .get_link_capabilities = ixgbe_get_link_capabilities_e610,
+ .get_bus_info = ixgbe_get_bus_info_generic,
+ .acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540,
+ .release_swfw_sync = ixgbe_release_swfw_sync_X540,
+ .init_swfw_sync = ixgbe_init_swfw_sync_X540,
+ .prot_autoc_read = prot_autoc_read_generic,
+ .prot_autoc_write = prot_autoc_write_generic,
+ .setup_fc = ixgbe_setup_fc_e610,
+ .fc_autoneg = ixgbe_fc_autoneg_e610,
+ .enable_mdd = ixgbe_enable_mdd_x550,
+ .disable_mdd = ixgbe_disable_mdd_x550,
+ .restore_mdd_vf = ixgbe_restore_mdd_vf_x550,
+ .handle_mdd = ixgbe_handle_mdd_x550,
+};
+
+static const struct ixgbe_phy_operations phy_ops_e610 = {
+ .init = ixgbe_init_phy_ops_e610,
+ .identify = ixgbe_identify_phy_e610,
+ .identify_sfp = ixgbe_identify_module_e610,
+ .setup_link_speed = ixgbe_setup_phy_link_speed_generic,
+ .setup_link = ixgbe_setup_phy_link_e610,
+ .enter_lplu = ixgbe_enter_lplu_e610,
+};
+
+static const struct ixgbe_eeprom_operations eeprom_ops_e610 = {
+ .read = ixgbe_read_ee_aci_e610,
+ .read_buffer = ixgbe_read_ee_aci_buffer_e610,
+ .validate_checksum = ixgbe_validate_eeprom_checksum_e610,
+ .read_pba_string = ixgbe_read_pba_string_e610,
+ .init_params = ixgbe_init_eeprom_params_e610,
+};
+
+const struct ixgbe_info ixgbe_e610_info = {
+ .mac = ixgbe_mac_e610,
+ .get_invariants = ixgbe_get_invariants_X540,
+ .mac_ops = &mac_ops_e610,
+ .eeprom_ops = &eeprom_ops_e610,
+ .phy_ops = &phy_ops_e610,
+ .mbx_ops = &mbx_ops_generic,
+ .mvals = ixgbe_mvals_x550em_a,
+};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h
new file mode 100644
index 000000000000..11916b979d28
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IXGBE_E610_H_
+#define _IXGBE_E610_H_
+
+#include "ixgbe_type.h"
+
+int ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct libie_aq_desc *desc,
+ void *buf, u16 buf_size);
+bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw);
+int ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
+ bool *pending);
+void ixgbe_fill_dflt_direct_cmd_desc(struct libie_aq_desc *desc, u16 opcode);
+int ixgbe_acquire_res(struct ixgbe_hw *hw, enum libie_aq_res_id res,
+ enum libie_aq_res_access_type access, u32 timeout);
+void ixgbe_release_res(struct ixgbe_hw *hw, enum libie_aq_res_id res);
+int ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
+ u32 *cap_count, enum ixgbe_aci_opc opc);
+int ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_caps);
+int ixgbe_discover_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_caps);
+int ixgbe_get_caps(struct ixgbe_hw *hw);
+int ixgbe_aci_disable_rxen(struct ixgbe_hw *hw);
+int ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps);
+void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+int ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+int ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link);
+int ixgbe_update_link_info(struct ixgbe_hw *hw);
+int ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up);
+int ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
+ struct ixgbe_link_status *link);
+int ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask);
+int ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask);
+int ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode);
+enum ixgbe_media_type ixgbe_get_media_type_e610(struct ixgbe_hw *hw);
+int ixgbe_setup_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait);
+int ixgbe_check_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
+int ixgbe_get_link_capabilities_e610(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg);
+int ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
+ enum ixgbe_fc_mode req_mode);
+int ixgbe_setup_fc_e610(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg_e610(struct ixgbe_hw *hw);
+void ixgbe_disable_rx_e610(struct ixgbe_hw *hw);
+int ixgbe_init_phy_ops_e610(struct ixgbe_hw *hw);
+int ixgbe_identify_phy_e610(struct ixgbe_hw *hw);
+int ixgbe_identify_module_e610(struct ixgbe_hw *hw);
+int ixgbe_setup_phy_link_e610(struct ixgbe_hw *hw);
+int ixgbe_set_phy_power_e610(struct ixgbe_hw *hw, bool on);
+int ixgbe_enter_lplu_e610(struct ixgbe_hw *hw);
+int ixgbe_init_eeprom_params_e610(struct ixgbe_hw *hw);
+int ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle);
+int ixgbe_acquire_nvm(struct ixgbe_hw *hw,
+ enum libie_aq_res_access_type access);
+void ixgbe_release_nvm(struct ixgbe_hw *hw);
+int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram);
+int ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw);
+int ixgbe_get_inactive_orom_ver(struct ixgbe_hw *hw,
+ struct ixgbe_orom_info *orom);
+int ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
+int ixgbe_get_inactive_netlist_ver(struct ixgbe_hw *hw,
+ struct ixgbe_netlist_info *netlist);
+int ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data);
+int ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length,
+ u8 *data, bool read_shadow_ram);
+int ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words,
+ u16 *data);
+int ixgbe_read_ee_aci_e610(struct ixgbe_hw *hw, u16 offset, u16 *data);
+int ixgbe_read_ee_aci_buffer_e610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+int ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw *hw, u16 *checksum_val);
+int ixgbe_reset_hw_e610(struct ixgbe_hw *hw);
+int ixgbe_get_flash_data(struct ixgbe_hw *hw);
+int ixgbe_aci_nvm_update_empr(struct ixgbe_hw *hw);
+int ixgbe_nvm_set_pkg_data(struct ixgbe_hw *hw, bool del_pkg_data_flag,
+ u8 *data, u16 length);
+int ixgbe_nvm_pass_component_tbl(struct ixgbe_hw *hw, u8 *data, u16 length,
+ u8 transfer_flag, u8 *comp_response,
+ u8 *comp_response_code);
+int ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid);
+int ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 command_flags);
+int ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
+ u8 *response_flags);
+int ixgbe_fwlog_init(struct ixgbe_hw *hw);
+void ixgbe_fwlog_deinit(struct ixgbe_hw *hw);
+
+#endif /* _IXGBE_E610_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 9482e0cca8b7..2ad81f687a84 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
/* ethtool support for ixgbe */
@@ -213,7 +213,7 @@ static void ixgbe_set_advertising_10gtypes(struct ixgbe_hw *hw,
static int ixgbe_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
ixgbe_link_speed supported_link;
bool autoneg = false;
@@ -458,7 +458,7 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev,
static int ixgbe_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 advertised, old;
int err = 0;
@@ -535,7 +535,7 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev,
static void ixgbe_get_pause_stats(struct net_device *netdev,
struct ethtool_pause_stats *stats)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw_stats *hwstats = &adapter->stats;
stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc;
@@ -545,7 +545,7 @@ static void ixgbe_get_pause_stats(struct net_device *netdev,
static void ixgbe_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (ixgbe_device_supports_autoneg_fc(hw) &&
@@ -564,10 +564,26 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
}
}
+static void ixgbe_set_pauseparam_finalize(struct net_device *netdev,
+ struct ixgbe_fc_info *fc)
+{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* If the thing changed then we'll update and use new autoneg. */
+ if (memcmp(fc, &hw->fc, sizeof(*fc))) {
+ hw->fc = *fc;
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+ else
+ ixgbe_reset(adapter);
+ }
+}
+
static int ixgbe_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_fc_info fc = hw->fc;
@@ -592,27 +608,52 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
else
fc.requested_mode = ixgbe_fc_none;
- /* if the thing changed then we'll update and use new autoneg */
- if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
- hw->fc = fc;
- if (netif_running(netdev))
- ixgbe_reinit_locked(adapter);
- else
- ixgbe_reset(adapter);
+ ixgbe_set_pauseparam_finalize(netdev, &fc);
+
+ return 0;
+}
+
+static int ixgbe_set_pauseparam_e610(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_fc_info fc = hw->fc;
+
+ if (!ixgbe_device_supports_autoneg_fc(hw))
+ return -EOPNOTSUPP;
+
+ if (pause->autoneg == AUTONEG_DISABLE) {
+ netdev_info(netdev,
+ "Cannot disable autonegotiation on this device.\n");
+ return -EOPNOTSUPP;
}
+ fc.disable_fc_autoneg = false;
+
+ if (pause->rx_pause && pause->tx_pause)
+ fc.requested_mode = ixgbe_fc_full;
+ else if (pause->rx_pause)
+ fc.requested_mode = ixgbe_fc_rx_pause;
+ else if (pause->tx_pause)
+ fc.requested_mode = ixgbe_fc_tx_pause;
+ else
+ fc.requested_mode = ixgbe_fc_none;
+
+ ixgbe_set_pauseparam_finalize(netdev, &fc);
+
return 0;
}
static u32 ixgbe_get_msglevel(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
return adapter->msg_enable;
}
static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->msg_enable = data;
}
@@ -627,7 +668,7 @@ static int ixgbe_get_regs_len(struct net_device *netdev)
static void ixgbe_get_regs(struct net_device *netdev,
struct ethtool_regs *regs, void *p)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 *regs_buff = p;
u8 i;
@@ -690,6 +731,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
break;
@@ -991,16 +1033,24 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[1144] = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
}
+static void ixgbe_get_link_ext_stats(struct net_device *netdev,
+ struct ethtool_link_ext_stats *stats)
+{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+
+ stats->link_down_events = adapter->link_down_events;
+}
+
static int ixgbe_get_eeprom_len(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
return adapter->hw.eeprom.word_size * 2;
}
static int ixgbe_get_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u16 *eeprom_buff;
int first_word, last_word, eeprom_len;
@@ -1036,7 +1086,7 @@ static int ixgbe_get_eeprom(struct net_device *netdev,
static int ixgbe_set_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u16 *eeprom_buff;
void *ptr;
@@ -1103,10 +1153,22 @@ err:
return ret_val;
}
+void ixgbe_refresh_fw_version(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ ixgbe_get_flash_data(hw);
+ ixgbe_set_fw_version_e610(adapter);
+}
+
static void ixgbe_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+
+ /* need to refresh info for e610 in case fw reloads in runtime */
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ ixgbe_refresh_fw_version(adapter);
strscpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
@@ -1160,7 +1222,7 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
@@ -1175,7 +1237,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_ring *temp_ring;
int i, j, err = 0;
u32 new_rx_count, new_tx_count;
@@ -1216,7 +1278,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
/* allocate temporary buffer to store rings in */
i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
adapter->num_rx_queues);
- temp_ring = vmalloc(array_size(i, sizeof(struct ixgbe_ring)));
+ temp_ring = vmalloc_array(i, sizeof(struct ixgbe_ring));
if (!temp_ring) {
err = -ENOMEM;
@@ -1335,7 +1397,7 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
static void ixgbe_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct rtnl_link_stats64 temp;
const struct rtnl_link_stats64 *net_stats;
unsigned int start;
@@ -1613,6 +1675,7 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
toggle = 0x7FFFF30F;
test = reg_test_82599;
break;
@@ -1708,7 +1771,7 @@ static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
static irqreturn_t ixgbe_test_intr(int irq, void *data)
{
struct net_device *netdev = (struct net_device *) data;
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
@@ -1874,6 +1937,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
reg_data |= IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
@@ -1935,6 +1999,7 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
reg_data |= IXGBE_MACC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
@@ -2179,7 +2244,7 @@ out:
static void ixgbe_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, u64 *data)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
bool if_running = netif_running(netdev);
if (ixgbe_removed(adapter->hw.hw_addr)) {
@@ -2302,7 +2367,7 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
static void ixgbe_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
wol->supported = WAKE_UCAST | WAKE_MCAST |
WAKE_BCAST | WAKE_MAGIC;
@@ -2324,7 +2389,7 @@ static void ixgbe_get_wol(struct net_device *netdev,
static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE |
WAKE_FILTER))
@@ -2349,9 +2414,53 @@ static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return 0;
}
+static int ixgbe_set_wol_acpi(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 grc;
+
+ if (ixgbe_wol_exclusion(adapter, wol))
+ return wol->wolopts ? -EOPNOTSUPP : 0;
+
+ /* disable APM wakeup */
+ grc = IXGBE_READ_REG(hw, IXGBE_GRC_X550EM_a);
+ grc &= ~IXGBE_GRC_APME;
+ IXGBE_WRITE_REG(hw, IXGBE_GRC_X550EM_a, grc);
+
+ /* erase existing filters */
+ IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
+ adapter->wol = 0;
+
+ if (wol->wolopts & WAKE_UCAST)
+ adapter->wol |= IXGBE_WUFC_EX;
+ if (wol->wolopts & WAKE_MCAST)
+ adapter->wol |= IXGBE_WUFC_MC;
+ if (wol->wolopts & WAKE_BCAST)
+ adapter->wol |= IXGBE_WUFC_BC;
+
+ IXGBE_WRITE_REG(hw, IXGBE_WUC, IXGBE_WUC_PME_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wol);
+
+ hw->wol_enabled = adapter->wol;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ return 0;
+}
+
+static int ixgbe_set_wol_e610(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ if (wol->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST))
+ return ixgbe_set_wol_acpi(netdev, wol);
+ else
+ return ixgbe_set_wol(netdev, wol);
+}
+
static int ixgbe_nway_reset(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
@@ -2362,7 +2471,7 @@ static int ixgbe_nway_reset(struct net_device *netdev)
static int ixgbe_set_phys_id(struct net_device *netdev,
enum ethtool_phys_id_state state)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (!hw->mac.ops.led_on || !hw->mac.ops.led_off)
@@ -2390,12 +2499,32 @@ static int ixgbe_set_phys_id(struct net_device *netdev,
return 0;
}
+static int ixgbe_set_phys_id_e610(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+ bool led_active;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ led_active = true;
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ led_active = false;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ixgbe_aci_set_port_id_led(&adapter->hw, !led_active);
+}
+
static int ixgbe_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
/* only valid if in constant ITR mode */
if (adapter->rx_itr_setting <= 1)
@@ -2451,7 +2580,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_q_vector *q_vector;
int i;
u16 tx_itr_param, rx_itr_param, tx_itr_prev;
@@ -2624,9 +2753,11 @@ static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
return 0;
}
-static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+static int ixgbe_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
+
cmd->data = 0;
/* Report default options for RSS on ixgbe */
@@ -2674,18 +2805,21 @@ static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
return 64;
}
+static u32 ixgbe_get_rx_ring_count(struct net_device *dev)
+{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
+
+ return min_t(u32, adapter->num_rx_queues,
+ ixgbe_rss_indir_tbl_max(adapter));
+}
+
static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = min_t(int, adapter->num_rx_queues,
- ixgbe_rss_indir_tbl_max(adapter));
- ret = 0;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = adapter->fdir_filter_count;
ret = 0;
@@ -2696,9 +2830,6 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRLALL:
ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
break;
- case ETHTOOL_GRXFH:
- ret = ixgbe_get_rss_hash_opts(adapter, cmd);
- break;
default:
break;
}
@@ -2950,9 +3081,11 @@ static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
#define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
-static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
- struct ethtool_rxnfc *nfc)
+static int ixgbe_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
u32 flags2 = adapter->flags2;
/*
@@ -3065,7 +3198,7 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
@@ -3075,9 +3208,6 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
case ETHTOOL_SRXCLSRLDEL:
ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
break;
- case ETHTOOL_SRXFH:
- ret = ixgbe_set_rss_hash_opt(adapter, cmd);
- break;
default:
break;
}
@@ -3092,7 +3222,7 @@ static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
static u32 ixgbe_rss_indir_size(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
return ixgbe_rss_indir_tbl_entries(adapter);
}
@@ -3112,7 +3242,7 @@ static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
static int ixgbe_get_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
rxfh->hfunc = ETH_RSS_HASH_TOP;
@@ -3130,7 +3260,7 @@ static int ixgbe_set_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int i;
u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
@@ -3172,7 +3302,7 @@ static int ixgbe_set_rxfh(struct net_device *netdev,
static int ixgbe_get_ts_info(struct net_device *dev,
struct kernel_ethtool_ts_info *info)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
/* we always support timestamping disabled */
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
@@ -3181,6 +3311,7 @@ static int ixgbe_get_ts_info(struct net_device *dev,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
break;
case ixgbe_mac_X540:
@@ -3247,7 +3378,7 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
static void ixgbe_get_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
/* report maximum channels */
ch->max_combined = ixgbe_max_channels(adapter);
@@ -3284,7 +3415,7 @@ static void ixgbe_get_channels(struct net_device *dev,
static int ixgbe_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
unsigned int count = ch->combined_count;
u8 max_rss_indices = ixgbe_max_rss_indices(adapter);
@@ -3322,7 +3453,7 @@ static int ixgbe_set_channels(struct net_device *dev,
static int ixgbe_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
u8 sff8472_rev, addr_mode;
bool page_swap = false;
@@ -3368,7 +3499,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
struct ethtool_eeprom *ee,
u8 *data)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
int status = -EFAULT;
u8 databyte = 0xFF;
@@ -3443,13 +3574,13 @@ ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_keee *edata)
for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed)
- linkmode_set_bit(ixgbe_lp_map[i].link_mode,
+ linkmode_set_bit(ixgbe_ls_map[i].link_mode,
edata->supported);
}
for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed)
- linkmode_set_bit(ixgbe_lp_map[i].link_mode,
+ linkmode_set_bit(ixgbe_ls_map[i].link_mode,
edata->advertised);
}
@@ -3464,7 +3595,7 @@ ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_keee *edata)
static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
@@ -3478,7 +3609,7 @@ static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
struct ethtool_keee eee_data;
int ret_val;
@@ -3533,7 +3664,7 @@ static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
static u32 ixgbe_get_priv_flags(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
u32 priv_flags = 0;
if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
@@ -3550,7 +3681,7 @@ static u32 ixgbe_get_priv_flags(struct net_device *netdev)
static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
unsigned int flags2 = adapter->flags2;
unsigned int i;
@@ -3597,6 +3728,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_wol = ixgbe_set_wol,
.nway_reset = ixgbe_nway_reset,
.get_link = ethtool_op_get_link,
+ .get_link_ext_stats = ixgbe_get_link_ext_stats,
.get_eeprom_len = ixgbe_get_eeprom_len,
.get_eeprom = ixgbe_get_eeprom,
.set_eeprom = ixgbe_set_eeprom,
@@ -3614,12 +3746,64 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_ethtool_stats = ixgbe_get_ethtool_stats,
.get_coalesce = ixgbe_get_coalesce,
.set_coalesce = ixgbe_set_coalesce,
+ .get_rx_ring_count = ixgbe_get_rx_ring_count,
+ .get_rxnfc = ixgbe_get_rxnfc,
+ .set_rxnfc = ixgbe_set_rxnfc,
+ .get_rxfh_indir_size = ixgbe_rss_indir_size,
+ .get_rxfh_key_size = ixgbe_get_rxfh_key_size,
+ .get_rxfh = ixgbe_get_rxfh,
+ .set_rxfh = ixgbe_set_rxfh,
+ .get_rxfh_fields = ixgbe_get_rxfh_fields,
+ .set_rxfh_fields = ixgbe_set_rxfh_fields,
+ .get_eee = ixgbe_get_eee,
+ .set_eee = ixgbe_set_eee,
+ .get_channels = ixgbe_get_channels,
+ .set_channels = ixgbe_set_channels,
+ .get_priv_flags = ixgbe_get_priv_flags,
+ .set_priv_flags = ixgbe_set_priv_flags,
+ .get_ts_info = ixgbe_get_ts_info,
+ .get_module_info = ixgbe_get_module_info,
+ .get_module_eeprom = ixgbe_get_module_eeprom,
+ .get_link_ksettings = ixgbe_get_link_ksettings,
+ .set_link_ksettings = ixgbe_set_link_ksettings,
+};
+
+static const struct ethtool_ops ixgbe_ethtool_ops_e610 = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
+ .get_drvinfo = ixgbe_get_drvinfo,
+ .get_regs_len = ixgbe_get_regs_len,
+ .get_regs = ixgbe_get_regs,
+ .get_wol = ixgbe_get_wol,
+ .set_wol = ixgbe_set_wol_e610,
+ .nway_reset = ixgbe_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_link_ext_stats = ixgbe_get_link_ext_stats,
+ .get_eeprom_len = ixgbe_get_eeprom_len,
+ .get_eeprom = ixgbe_get_eeprom,
+ .set_eeprom = ixgbe_set_eeprom,
+ .get_ringparam = ixgbe_get_ringparam,
+ .set_ringparam = ixgbe_set_ringparam,
+ .get_pause_stats = ixgbe_get_pause_stats,
+ .get_pauseparam = ixgbe_get_pauseparam,
+ .set_pauseparam = ixgbe_set_pauseparam_e610,
+ .get_msglevel = ixgbe_get_msglevel,
+ .set_msglevel = ixgbe_set_msglevel,
+ .self_test = ixgbe_diag_test,
+ .get_strings = ixgbe_get_strings,
+ .set_phys_id = ixgbe_set_phys_id_e610,
+ .get_sset_count = ixgbe_get_sset_count,
+ .get_ethtool_stats = ixgbe_get_ethtool_stats,
+ .get_coalesce = ixgbe_get_coalesce,
+ .set_coalesce = ixgbe_set_coalesce,
+ .get_rx_ring_count = ixgbe_get_rx_ring_count,
.get_rxnfc = ixgbe_get_rxnfc,
.set_rxnfc = ixgbe_set_rxnfc,
.get_rxfh_indir_size = ixgbe_rss_indir_size,
.get_rxfh_key_size = ixgbe_get_rxfh_key_size,
.get_rxfh = ixgbe_get_rxfh,
.set_rxfh = ixgbe_set_rxfh,
+ .get_rxfh_fields = ixgbe_get_rxfh_fields,
+ .set_rxfh_fields = ixgbe_set_rxfh_fields,
.get_eee = ixgbe_get_eee,
.set_eee = ixgbe_set_eee,
.get_channels = ixgbe_get_channels,
@@ -3635,5 +3819,10 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
void ixgbe_set_ethtool_ops(struct net_device *netdev)
{
- netdev->ethtool_ops = &ixgbe_ethtool_ops;
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ netdev->ethtool_ops = &ixgbe_ethtool_ops_e610;
+ else
+ netdev->ethtool_ops = &ixgbe_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 955dced844a9..011fda9c6193 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -56,7 +56,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
if (xid >= netdev->fcoe_ddp_xid)
return 0;
- adapter = netdev_priv(netdev);
+ adapter = ixgbe_from_netdev(netdev);
fcoe = &adapter->fcoe;
ddp = &fcoe->ddp[xid];
if (!ddp->udl)
@@ -153,7 +153,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
if (!netdev || !sgl)
return 0;
- adapter = netdev_priv(netdev);
+ adapter = ixgbe_from_netdev(netdev);
if (xid >= netdev->fcoe_ddp_xid) {
e_warn(drv, "xid=0x%x out-of-range\n", xid);
return 0;
@@ -744,7 +744,7 @@ void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
* ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources
* @adapter: ixgbe adapter
*
- * Sets up ddp context resouces
+ * Sets up ddp context resources
*
* Returns : 0 indicates success or -EINVAL on failure
*/
@@ -834,7 +834,7 @@ static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter)
*/
int ixgbe_fcoe_enable(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
atomic_inc(&fcoe->refcnt);
@@ -881,7 +881,7 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
*/
int ixgbe_fcoe_disable(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (!atomic_dec_and_test(&adapter->fcoe.refcnt))
return -EINVAL;
@@ -927,7 +927,7 @@ int ixgbe_fcoe_disable(struct net_device *netdev)
int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
{
u16 prefix = 0xffff;
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_mac_info *mac = &adapter->hw.mac;
switch (type) {
@@ -967,7 +967,7 @@ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
struct netdev_fcoe_hbainfo *info)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u64 dsn;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c
new file mode 100644
index 000000000000..e5479fc07a07
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c
@@ -0,0 +1,707 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2025 Intel Corporation. */
+
+#include <linux/crc32.h>
+#include <linux/pldmfw.h>
+#include <linux/uuid.h>
+
+#include "ixgbe.h"
+#include "ixgbe_fw_update.h"
+
+struct ixgbe_fwu_priv {
+ struct pldmfw context;
+
+ struct ixgbe_adapter *adapter;
+ struct netlink_ext_ack *extack;
+
+ /* Track which NVM banks to activate at the end of the update */
+ u8 activate_flags;
+ bool emp_reset_available;
+};
+
+/**
+ * ixgbe_send_package_data - Send record package data to firmware
+ * @context: PLDM fw update structure
+ * @data: pointer to the package data
+ * @length: length of the package data
+ *
+ * Send a copy of the package data associated with the PLDM record matching
+ * this device to the firmware.
+ *
+ * Note that this function sends an AdminQ command that will fail unless the
+ * NVM resource has been acquired.
+ *
+ * Return: zero on success, or a negative error code on failure.
+ */
+static int ixgbe_send_package_data(struct pldmfw *context,
+ const u8 *data, u16 length)
+{
+ struct ixgbe_fwu_priv *priv = container_of(context,
+ struct ixgbe_fwu_priv,
+ context);
+ struct ixgbe_adapter *adapter = priv->adapter;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 *package_data;
+ int err;
+
+ package_data = kmemdup(data, length, GFP_KERNEL);
+ if (!package_data)
+ return -ENOMEM;
+
+ err = ixgbe_nvm_set_pkg_data(hw, false, package_data, length);
+
+ kfree(package_data);
+
+ return err;
+}
+
+/**
+ * ixgbe_check_component_response - Report firmware response to a component
+ * @adapter: device private data structure
+ * @response: indicates whether this component can be updated
+ * @code: code indicating reason for response
+ * @extack: netlink extended ACK structure
+ *
+ * Check whether firmware indicates if this component can be updated. Report
+ * a suitable error message over the netlink extended ACK if the component
+ * cannot be updated.
+ *
+ * Return: 0 if the component can be updated, or -ECANCELED if the
+ * firmware indicates the component cannot be updated.
+ */
+static int ixgbe_check_component_response(struct ixgbe_adapter *adapter,
+ u8 response, u8 code,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ switch (response) {
+ case IXGBE_ACI_NVM_PASS_COMP_CAN_BE_UPDATED:
+ /* Firmware indicated this update is good to proceed. */
+ return 0;
+ case IXGBE_ACI_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Firmware recommends not updating, as it may result in a downgrade. Continuing anyways");
+ return 0;
+ case IXGBE_ACI_NVM_PASS_COMP_CAN_NOT_BE_UPDATED:
+ NL_SET_ERR_MSG_MOD(extack, "Firmware has rejected updating.");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_PARTIAL_CHECK:
+ if (hw->mac.ops.fw_recovery_mode &&
+ hw->mac.ops.fw_recovery_mode(hw))
+ return 0;
+ break;
+ }
+
+ switch (code) {
+ case IXGBE_ACI_NVM_PASS_COMP_STAMP_IDENTICAL_CODE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component comparison stamp is identical to running image");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_STAMP_LOWER:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component comparison stamp is lower than running image");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_INVALID_STAMP_CODE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component comparison stamp is invalid");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_CONFLICT_CODE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component table conflict occurred");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_PRE_REQ_NOT_MET_CODE:
+ NL_SET_ERR_MSG_MOD(extack, "Component pre-requisites not met");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_NOT_SUPPORTED_CODE:
+ NL_SET_ERR_MSG_MOD(extack, "Component not supported");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_CANNOT_DOWNGRADE_CODE:
+ NL_SET_ERR_MSG_MOD(extack, "Component cannot be downgraded");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_INCOMPLETE_IMAGE_CODE:
+ NL_SET_ERR_MSG_MOD(extack, "Incomplete component image");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_VER_STR_IDENTICAL_CODE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component version is identical to running image");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_VER_STR_LOWER_CODE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component version is lower than the running image");
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Received unexpected response code from firmware");
+ break;
+ }
+
+ return -ECANCELED;
+}
+
+/**
+ * ixgbe_send_component_table - Send PLDM component table to firmware
+ * @context: PLDM fw update structure
+ * @component: the component to process
+ * @transfer_flag: relative transfer order of this component
+ *
+ * Read relevant data from the component and forward it to the device
+ * firmware. Check the response to determine if the firmware indicates that
+ * the update can proceed.
+ *
+ * This function sends ACI commands related to the NVM, and assumes that
+ * the NVM resource has been acquired.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_send_component_table(struct pldmfw *context,
+ struct pldmfw_component *component,
+ u8 transfer_flag)
+{
+ struct ixgbe_fwu_priv *priv = container_of(context,
+ struct ixgbe_fwu_priv,
+ context);
+ struct ixgbe_adapter *adapter = priv->adapter;
+ struct netlink_ext_ack *extack = priv->extack;
+ struct ixgbe_aci_cmd_nvm_comp_tbl *comp_tbl;
+ u8 comp_response, comp_response_code;
+ struct ixgbe_hw *hw = &adapter->hw;
+ size_t length;
+ int err;
+
+ switch (component->identifier) {
+ case NVM_COMP_ID_OROM:
+ case NVM_COMP_ID_NVM:
+ case NVM_COMP_ID_NETLIST:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to update due to unknown firmware component");
+ return -EOPNOTSUPP;
+ }
+
+ length = struct_size(comp_tbl, cvs, component->version_len);
+ comp_tbl = kzalloc(length, GFP_KERNEL);
+ if (!comp_tbl)
+ return -ENOMEM;
+
+ comp_tbl->comp_class = cpu_to_le16(component->classification);
+ comp_tbl->comp_id = cpu_to_le16(component->identifier);
+ comp_tbl->comp_class_idx = FWU_COMP_CLASS_IDX_NOT_USE;
+ comp_tbl->comp_cmp_stamp = cpu_to_le32(component->comparison_stamp);
+ comp_tbl->cvs_type = component->version_type;
+ comp_tbl->cvs_len = component->version_len;
+
+ memcpy(comp_tbl->cvs, component->version_string,
+ component->version_len);
+
+ err = ixgbe_nvm_pass_component_tbl(hw, (u8 *)comp_tbl, length,
+ transfer_flag, &comp_response,
+ &comp_response_code);
+
+ kfree(comp_tbl);
+
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to transfer component table to firmware");
+ return -EIO;
+ }
+
+ return ixgbe_check_component_response(adapter,
+ comp_response,
+ comp_response_code, extack);
+}
+
+/**
+ * ixgbe_write_one_nvm_block - Write an NVM block and await completion response
+ * @adapter: the PF data structure
+ * @module: the module to write to
+ * @offset: offset in bytes
+ * @block_size: size of the block to write, up to 4k
+ * @block: pointer to block of data to write
+ * @last_cmd: whether this is the last command
+ * @extack: netlink extended ACK structure
+ *
+ * Write a block of data to a flash module, and await for the completion
+ * response message from firmware.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * On successful return, reset level indicates the device reset required to
+ * complete the update.
+ *
+ * 0 - IXGBE_ACI_NVM_POR_FLAG - A full power on is required
+ * 1 - IXGBE_ACI_NVM_PERST_FLAG - A cold PCIe reset is required
+ * 2 - IXGBE_ACI_NVM_EMPR_FLAG - An EMP reset is required
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_write_one_nvm_block(struct ixgbe_adapter *adapter,
+ u16 module, u32 offset,
+ u16 block_size, u8 *block, bool last_cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ return ixgbe_aci_update_nvm(hw, module, offset, block_size, block,
+ last_cmd, 0);
+}
+
+/**
+ * ixgbe_write_nvm_module - Write data to an NVM module
+ * @adapter: the PF driver structure
+ * @module: the module id to program
+ * @component: the name of the component being updated
+ * @image: buffer of image data to write to the NVM
+ * @length: length of the buffer
+ * @extack: netlink extended ACK structure
+ *
+ * Loop over the data for a given NVM module and program it in 4 Kb
+ * blocks. Notify devlink core of progress after each block is programmed.
+ * Loops over a block of data and programs the NVM in 4k block chunks.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_write_nvm_module(struct ixgbe_adapter *adapter, u16 module,
+ const char *component, const u8 *image,
+ u32 length,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink *devlink = adapter->devlink;
+ u32 offset = 0;
+ bool last_cmd;
+ u8 *block;
+ int err;
+
+ devlink_flash_update_status_notify(devlink, "Flashing",
+ component, 0, length);
+
+ block = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
+ if (!block)
+ return -ENOMEM;
+
+ do {
+ u32 block_size;
+
+ block_size = min_t(u32, IXGBE_ACI_MAX_BUFFER_SIZE,
+ length - offset);
+ last_cmd = !(offset + block_size < length);
+
+ memcpy(block, image + offset, block_size);
+
+ err = ixgbe_write_one_nvm_block(adapter, module, offset,
+ block_size, block, last_cmd,
+ extack);
+ if (err)
+ break;
+
+ offset += block_size;
+
+ devlink_flash_update_status_notify(devlink, "Flashing",
+ component, offset, length);
+ } while (!last_cmd);
+
+ if (err)
+ devlink_flash_update_status_notify(devlink, "Flashing failed",
+ component, length, length);
+ else
+ devlink_flash_update_status_notify(devlink, "Flashing done",
+ component, length, length);
+
+ kfree(block);
+
+ return err;
+}
+
+/* Length in seconds to wait before timing out when erasing a flash module.
+ * Yes, erasing really can take minutes to complete.
+ */
+#define IXGBE_FW_ERASE_TIMEOUT 300
+
+/**
+ * ixgbe_erase_nvm_module - Erase an NVM module and await firmware completion
+ * @adapter: the PF data structure
+ * @module: the module to erase
+ * @component: name of the component being updated
+ * @extack: netlink extended ACK structure
+ *
+ * Erase the inactive NVM bank associated with this module, and await for
+ * a completion response message from firmware.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_erase_nvm_module(struct ixgbe_adapter *adapter, u16 module,
+ const char *component,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink *devlink = adapter->devlink;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ devlink_flash_update_timeout_notify(devlink, "Erasing", component,
+ IXGBE_FW_ERASE_TIMEOUT);
+
+ err = ixgbe_aci_erase_nvm(hw, module);
+ if (err)
+ devlink_flash_update_status_notify(devlink, "Erasing failed",
+ component, 0, 0);
+ else
+ devlink_flash_update_status_notify(devlink, "Erasing done",
+ component, 0, 0);
+
+ return err;
+}
+
+/**
+ * ixgbe_switch_flash_banks - Tell firmware to switch NVM banks
+ * @adapter: Pointer to the PF data structure
+ * @activate_flags: flags used for the activation command
+ * @emp_reset_available: on return, indicates if EMP reset is available
+ * @extack: netlink extended ACK structure
+ *
+ * Notify firmware to activate the newly written flash banks, and wait for the
+ * firmware response.
+ *
+ * Return: 0 on success or an error code on failure.
+ */
+static int ixgbe_switch_flash_banks(struct ixgbe_adapter *adapter,
+ u8 activate_flags,
+ bool *emp_reset_available,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 response_flags;
+ int err;
+
+ err = ixgbe_nvm_write_activate(hw, activate_flags, &response_flags);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to switch active flash banks");
+ return err;
+ }
+
+ if (emp_reset_available) {
+ if (hw->dev_caps.common_cap.reset_restrict_support)
+ *emp_reset_available =
+ response_flags & IXGBE_ACI_NVM_EMPR_ENA;
+ else
+ *emp_reset_available = true;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_flash_component - Flash a component of the NVM
+ * @context: PLDM fw update structure
+ * @component: the component table to program
+ *
+ * Program the flash contents for a given component. First, determine the
+ * module id. Then, erase the secondary bank for this module. Finally, write
+ * the contents of the component to the NVM.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_flash_component(struct pldmfw *context,
+ struct pldmfw_component *component)
+{
+ struct ixgbe_fwu_priv *priv = container_of(context,
+ struct ixgbe_fwu_priv,
+ context);
+ struct netlink_ext_ack *extack = priv->extack;
+ struct ixgbe_adapter *adapter = priv->adapter;
+ const char *name;
+ u16 module;
+ int err;
+ u8 flag;
+
+ switch (component->identifier) {
+ case NVM_COMP_ID_OROM:
+ module = IXGBE_E610_SR_1ST_OROM_BANK_PTR;
+ flag = IXGBE_ACI_NVM_ACTIV_SEL_OROM;
+ name = "fw.undi";
+ break;
+ case NVM_COMP_ID_NVM:
+ module = IXGBE_E610_SR_1ST_NVM_BANK_PTR;
+ flag = IXGBE_ACI_NVM_ACTIV_SEL_NVM;
+ name = "fw.mgmt";
+ break;
+ case NVM_COMP_ID_NETLIST:
+ module = IXGBE_E610_SR_NETLIST_BANK_PTR;
+ flag = IXGBE_ACI_NVM_ACTIV_SEL_NETLIST;
+ name = "fw.netlist";
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* Mark this component for activating at the end. */
+ priv->activate_flags |= flag;
+
+ err = ixgbe_erase_nvm_module(adapter, module, name, extack);
+ if (err)
+ return err;
+
+ return ixgbe_write_nvm_module(adapter, module, name,
+ component->component_data,
+ component->component_size, extack);
+}
+
+/**
+ * ixgbe_finalize_update - Perform last steps to complete device update
+ * @context: PLDM fw update structure
+ *
+ * Called as the last step of the update process. Complete the update by
+ * telling the firmware to switch active banks, and perform a reset of
+ * configured.
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+static int ixgbe_finalize_update(struct pldmfw *context)
+{
+ struct ixgbe_fwu_priv *priv = container_of(context,
+ struct ixgbe_fwu_priv,
+ context);
+ struct ixgbe_adapter *adapter = priv->adapter;
+ struct netlink_ext_ack *extack = priv->extack;
+ struct devlink *devlink = adapter->devlink;
+ int err;
+
+ /* Finally, notify firmware to activate the written NVM banks */
+ err = ixgbe_switch_flash_banks(adapter, priv->activate_flags,
+ &priv->emp_reset_available, extack);
+ if (err)
+ return err;
+
+ adapter->fw_emp_reset_disabled = !priv->emp_reset_available;
+
+ if (!adapter->fw_emp_reset_disabled)
+ devlink_flash_update_status_notify(devlink,
+ "Suggested is to activate new firmware by devlink reload, if it doesn't work then a power cycle is required",
+ NULL, 0, 0);
+
+ return 0;
+}
+
+static const struct pldmfw_ops ixgbe_fwu_ops_e610 = {
+ .match_record = &pldmfw_op_pci_match_record,
+ .send_package_data = &ixgbe_send_package_data,
+ .send_component_table = &ixgbe_send_component_table,
+ .flash_component = &ixgbe_flash_component,
+ .finalize_update = &ixgbe_finalize_update,
+};
+
+/**
+ * ixgbe_get_pending_updates - Check if the component has a pending update
+ * @adapter: the PF driver structure
+ * @pending: on return, bitmap of updates pending
+ * @extack: Netlink extended ACK
+ *
+ * Check if the device has any pending updates on any flash components.
+ *
+ * Return: 0 on success, or a negative error code on failure. Update
+ * pending with the bitmap of pending updates.
+ */
+int ixgbe_get_pending_updates(struct ixgbe_adapter *adapter, u8 *pending,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_hw_dev_caps *dev_caps;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ dev_caps = kzalloc(sizeof(*dev_caps), GFP_KERNEL);
+ if (!dev_caps)
+ return -ENOMEM;
+
+ err = ixgbe_discover_dev_caps(hw, dev_caps);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to read device capabilities");
+ kfree(dev_caps);
+ return -EIO;
+ }
+
+ *pending = 0;
+
+ if (dev_caps->common_cap.nvm_update_pending_nvm)
+ *pending |= IXGBE_ACI_NVM_ACTIV_SEL_NVM;
+
+ if (dev_caps->common_cap.nvm_update_pending_orom)
+ *pending |= IXGBE_ACI_NVM_ACTIV_SEL_OROM;
+
+ if (dev_caps->common_cap.nvm_update_pending_netlist)
+ *pending |= IXGBE_ACI_NVM_ACTIV_SEL_NETLIST;
+
+ kfree(dev_caps);
+
+ return 0;
+}
+
+/**
+ * ixgbe_cancel_pending_update - Cancel any pending update for a component
+ * @adapter: the PF driver structure
+ * @component: if not NULL, the name of the component being updated
+ * @extack: Netlink extended ACK structure
+ *
+ * Cancel any pending update for the specified component. If component is
+ * NULL, all device updates will be canceled.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_cancel_pending_update(struct ixgbe_adapter *adapter,
+ const char *component,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink *devlink = adapter->devlink;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 pending;
+ int err;
+
+ err = ixgbe_get_pending_updates(adapter, &pending, extack);
+ if (err)
+ return err;
+
+ /* If the flash_update request is for a specific component, ignore all
+ * of the other components.
+ */
+ if (component) {
+ if (strcmp(component, "fw.mgmt") == 0)
+ pending &= IXGBE_ACI_NVM_ACTIV_SEL_NVM;
+ else if (strcmp(component, "fw.undi") == 0)
+ pending &= IXGBE_ACI_NVM_ACTIV_SEL_OROM;
+ else if (strcmp(component, "fw.netlist") == 0)
+ pending &= IXGBE_ACI_NVM_ACTIV_SEL_NETLIST;
+ else
+ return -EINVAL;
+ }
+
+ /* There is no previous pending update, so this request may continue */
+ if (!pending)
+ return 0;
+
+ /* In order to allow overwriting a previous pending update, notify
+ * firmware to cancel that update by issuing the appropriate command.
+ */
+ devlink_flash_update_status_notify(devlink,
+ "Canceling previous pending update",
+ component, 0, 0);
+
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_WRITE);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to acquire device flash lock");
+ return -EIO;
+ }
+
+ pending |= IXGBE_ACI_NVM_REVERT_LAST_ACTIV;
+ err = ixgbe_switch_flash_banks(adapter, pending, NULL, extack);
+
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ixgbe_flash_pldm_image - Write a PLDM-formatted firmware image to the device
+ * @devlink: pointer to devlink associated with the device to update
+ * @params: devlink flash update parameters
+ * @extack: netlink extended ACK structure
+ *
+ * Parse the data for a given firmware file, verifying that it is a valid PLDM
+ * formatted image that matches this device.
+ *
+ * Extract the device record Package Data and Component Tables and send them
+ * to the firmware. Extract and write the flash data for each of the three
+ * main flash components, "fw.mgmt", "fw.undi", and "fw.netlist". Notify
+ * firmware once the data is written to the inactive banks.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int ixgbe_flash_pldm_image(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct device *dev = &adapter->pdev->dev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_fwu_priv priv;
+ u8 preservation;
+ int err;
+
+ if (hw->mac.type != ixgbe_mac_e610)
+ return -EOPNOTSUPP;
+
+ switch (params->overwrite_mask) {
+ case 0:
+ /* preserve all settings and identifiers */
+ preservation = IXGBE_ACI_NVM_PRESERVE_ALL;
+ break;
+ case DEVLINK_FLASH_OVERWRITE_SETTINGS:
+ /* Overwrite settings, but preserve vital information such as
+ * device identifiers.
+ */
+ preservation = IXGBE_ACI_NVM_PRESERVE_SELECTED;
+ break;
+ case (DEVLINK_FLASH_OVERWRITE_SETTINGS |
+ DEVLINK_FLASH_OVERWRITE_IDENTIFIERS):
+ /* overwrite both settings and identifiers, preserve nothing */
+ preservation = IXGBE_ACI_NVM_NO_PRESERVATION;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Requested overwrite mask is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ /* Cannot get caps in recovery mode, so lack of nvm_unified_update bit
+ * cannot lead to error
+ */
+ if (!hw->dev_caps.common_cap.nvm_unified_update &&
+ (hw->mac.ops.fw_recovery_mode &&
+ !hw->mac.ops.fw_recovery_mode(hw))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Current firmware does not support unified update");
+ return -EOPNOTSUPP;
+ }
+
+ memset(&priv, 0, sizeof(priv));
+
+ priv.context.ops = &ixgbe_fwu_ops_e610;
+ priv.context.dev = dev;
+ priv.extack = extack;
+ priv.adapter = adapter;
+ priv.activate_flags = preservation;
+
+ devlink_flash_update_status_notify(devlink,
+ "Preparing to flash", NULL, 0, 0);
+
+ err = ixgbe_cancel_pending_update(adapter, NULL, extack);
+ if (err)
+ return err;
+
+ err = ixgbe_acquire_nvm(hw, LIBIE_AQC_RES_ACCESS_WRITE);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to acquire device flash lock");
+ return -EIO;
+ }
+
+ err = pldmfw_flash_image(&priv.context, params->fw);
+ if (err == -ENOENT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Firmware image has no record matching this device");
+ } else if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to flash PLDM image");
+ }
+
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.h
new file mode 100644
index 000000000000..abdd708c93df
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2025 Intel Corporation. */
+
+#ifndef _IXGBE_FW_UPDATE_H_
+#define _IXGBE_FW_UPDATE_H_
+
+int ixgbe_flash_pldm_image(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack);
+int ixgbe_get_pending_updates(struct ixgbe_adapter *adapter, u8 *pending,
+ struct netlink_ext_ack *extack);
+#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index 866024f2b9ee..d1f4073b36f9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -9,7 +9,7 @@
#define IXGBE_IPSEC_KEY_BITS 160
static const char aes_gcm_name[] = "rfc4106(gcm(aes))";
-static void ixgbe_ipsec_del_sa(struct xfrm_state *xs);
+static void ixgbe_ipsec_del_sa(struct net_device *dev, struct xfrm_state *xs);
/**
* ixgbe_ipsec_set_tx_sa - set the Tx SA registers
@@ -321,7 +321,7 @@ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
if (r->used) {
if (r->mode & IXGBE_RXTXMOD_VF)
- ixgbe_ipsec_del_sa(r->xs);
+ ixgbe_ipsec_del_sa(adapter->netdev, r->xs);
else
ixgbe_ipsec_set_rx_sa(hw, i, r->xs->id.spi,
r->key, r->salt,
@@ -330,7 +330,7 @@ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
if (t->used) {
if (t->mode & IXGBE_RXTXMOD_VF)
- ixgbe_ipsec_del_sa(t->xs);
+ ixgbe_ipsec_del_sa(adapter->netdev, t->xs);
else
ixgbe_ipsec_set_tx_sa(hw, i, t->key, t->salt);
}
@@ -417,6 +417,7 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
/**
* ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol
+ * @dev: pointer to net device
* @xs: pointer to xfrm_state struct
* @mykey: pointer to key array to populate
* @mysalt: pointer to salt value to populate
@@ -424,10 +425,10 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
* This copies the protocol keys and salt to our own data tables. The
* 82599 family only supports the one algorithm.
**/
-static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
+static int ixgbe_ipsec_parse_proto_keys(struct net_device *dev,
+ struct xfrm_state *xs,
u32 *mykey, u32 *mysalt)
{
- struct net_device *dev = xs->xso.real_dev;
unsigned char *key_data;
char *alg_name = NULL;
int key_len;
@@ -473,12 +474,13 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
/**
* ixgbe_ipsec_check_mgmt_ip - make sure there is no clash with mgmt IP filters
+ * @dev: pointer to net device
* @xs: pointer to transformer state struct
**/
-static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
+static int ixgbe_ipsec_check_mgmt_ip(struct net_device *dev,
+ struct xfrm_state *xs)
{
- struct net_device *dev = xs->xso.real_dev;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
u32 mfval, manc, reg;
int num_filters = 4;
@@ -556,14 +558,15 @@ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
/**
* ixgbe_ipsec_add_sa - program device with a security association
+ * @dev: pointer to device to program
* @xs: pointer to transformer state struct
* @extack: extack point to fill failure reason
**/
-static int ixgbe_ipsec_add_sa(struct xfrm_state *xs,
+static int ixgbe_ipsec_add_sa(struct net_device *dev,
+ struct xfrm_state *xs,
struct netlink_ext_ack *extack)
{
- struct net_device *dev = xs->xso.real_dev;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_ipsec *ipsec = adapter->ipsec;
struct ixgbe_hw *hw = &adapter->hw;
int checked, match, first;
@@ -581,7 +584,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs,
return -EINVAL;
}
- if (ixgbe_ipsec_check_mgmt_ip(xs)) {
+ if (ixgbe_ipsec_check_mgmt_ip(dev, xs)) {
NL_SET_ERR_MSG_MOD(extack, "IPsec IP addr clash with mgmt filters");
return -EINVAL;
}
@@ -615,7 +618,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs,
rsa.decrypt = xs->ealg || xs->aead;
/* get the key and salt */
- ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
+ ret = ixgbe_ipsec_parse_proto_keys(dev, xs, rsa.key, &rsa.salt);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Rx SA table");
return ret;
@@ -724,7 +727,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs,
if (xs->id.proto & IPPROTO_ESP)
tsa.encrypt = xs->ealg || xs->aead;
- ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
+ ret = ixgbe_ipsec_parse_proto_keys(dev, xs, tsa.key, &tsa.salt);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Tx SA table");
memset(&tsa, 0, sizeof(tsa));
@@ -752,12 +755,12 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs,
/**
* ixgbe_ipsec_del_sa - clear out this specific SA
+ * @dev: pointer to device to program
* @xs: pointer to transformer state struct
**/
-static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
+static void ixgbe_ipsec_del_sa(struct net_device *dev, struct xfrm_state *xs)
{
- struct net_device *dev = xs->xso.real_dev;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_ipsec *ipsec = adapter->ipsec;
struct ixgbe_hw *hw = &adapter->hw;
u32 zerobuf[4] = {0, 0, 0, 0};
@@ -817,30 +820,9 @@ static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
}
}
-/**
- * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload
- * @skb: current data packet
- * @xs: pointer to transformer state struct
- **/
-static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
-{
- if (xs->props.family == AF_INET) {
- /* Offload with IPv4 options is not supported yet */
- if (ip_hdr(skb)->ihl != 5)
- return false;
- } else {
- /* Offload with IPv6 extension headers is not support yet */
- if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
- return false;
- }
-
- return true;
-}
-
static const struct xfrmdev_ops ixgbe_xfrmdev_ops = {
.xdo_dev_state_add = ixgbe_ipsec_add_sa,
.xdo_dev_state_delete = ixgbe_ipsec_del_sa,
- .xdo_dev_offload_ok = ixgbe_ipsec_offload_ok,
};
/**
@@ -862,7 +844,8 @@ void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf)
continue;
if (ipsec->rx_tbl[i].mode & IXGBE_RXTXMOD_VF &&
ipsec->rx_tbl[i].vf == vf)
- ixgbe_ipsec_del_sa(ipsec->rx_tbl[i].xs);
+ ixgbe_ipsec_del_sa(adapter->netdev,
+ ipsec->rx_tbl[i].xs);
}
/* search tx sa table */
@@ -871,7 +854,8 @@ void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf)
continue;
if (ipsec->tx_tbl[i].mode & IXGBE_RXTXMOD_VF &&
ipsec->tx_tbl[i].vf == vf)
- ixgbe_ipsec_del_sa(ipsec->tx_tbl[i].xs);
+ ixgbe_ipsec_del_sa(adapter->netdev,
+ ipsec->tx_tbl[i].xs);
}
}
@@ -951,7 +935,7 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
memcpy(xs->aead->alg_name, aes_gcm_name, sizeof(aes_gcm_name));
/* set up the HW offload */
- err = ixgbe_ipsec_add_sa(xs, NULL);
+ err = ixgbe_ipsec_add_sa(adapter->netdev, xs, NULL);
if (err)
goto err_aead;
@@ -1055,7 +1039,7 @@ int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
xs = ipsec->tx_tbl[sa_idx].xs;
}
- ixgbe_ipsec_del_sa(xs);
+ ixgbe_ipsec_del_sa(adapter->netdev, xs);
/* remove the xs that was made-up in the add request */
kfree_sensitive(xs);
@@ -1073,7 +1057,7 @@ int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
struct ixgbe_ipsec_tx_data *itd)
{
- struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(tx_ring->netdev);
struct ixgbe_ipsec *ipsec = adapter->ipsec;
struct xfrm_state *xs;
struct sec_path *sp;
@@ -1163,7 +1147,7 @@ void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(rx_ring->netdev);
__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
__le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 16fa621ce0ff..a1d04914fbbc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include "ixgbe.h"
#include "ixgbe_sriov.h"
@@ -107,6 +107,7 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (num_tcs > 4) {
/*
* TCs : TC0/1 TC2/3 TC4-7
@@ -317,7 +318,7 @@ static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter)
* ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
* @adapter: board private structure to initialize
*
- * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
+ * When SR-IOV (Single Root IO Virtualization) is enabled, allocate queues
* and VM pools where appropriate. Also assign queues based on DCB
* priorities and map accordingly..
*
@@ -491,7 +492,7 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
* ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
* @adapter: board private structure to initialize
*
- * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
+ * When SR-IOV (Single Root IO Virtualization) is enabled, allocate queues
* and VM pools where appropriate. If RSS is available, then also try and
* enable RSS and map accordingly.
*
@@ -890,7 +891,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
IXGBE_ITR_ADAPTIVE_LATENCY;
- /* intialize ITR */
+ /* initialize ITR */
if (txr_count && !rxr_count) {
/* tx only vector */
if (adapter->tx_itr_setting == 1)
@@ -1292,7 +1293,8 @@ void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
/* set bits to identify this as an advanced context descriptor */
- type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
+ type_tucmd |= IXGBE_TXD_CMD_DEXT |
+ FIELD_PREP(IXGBE_ADVTXD_DTYP_MASK, IXGBE_ADVTXD_DTYP_CTXT);
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
context_desc->fceof_saidx = cpu_to_le32(fceof_saidx);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 8b8404d8c946..034618e79169 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/types.h>
#include <linux/module.h>
@@ -9,6 +9,7 @@
#include <linux/string.h>
#include <linux/in.h>
#include <linux/interrupt.h>
+#include <linux/iopoll.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/sctp.h>
@@ -42,11 +43,14 @@
#include "ixgbe.h"
#include "ixgbe_common.h"
+#include "ixgbe_e610.h"
#include "ixgbe_dcb_82599.h"
+#include "ixgbe_mbx.h"
#include "ixgbe_phy.h"
#include "ixgbe_sriov.h"
#include "ixgbe_model.h"
#include "ixgbe_txrx_common.h"
+#include "devlink/devlink.h"
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
@@ -72,6 +76,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_x550em_x_fw] = &ixgbe_x550em_x_fw_info,
[board_x550em_a] = &ixgbe_x550em_a_info,
[board_x550em_a_fw] = &ixgbe_x550em_a_fw_info,
+ [board_e610] = &ixgbe_e610_info,
};
/* ixgbe_pci_tbl - PCI Device ID Table
@@ -130,6 +135,11 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_BACKPLANE), board_e610},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_SFP), board_e610},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_10G_T), board_e610},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_2_5G_T), board_e610},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_SGMII), board_e610},
/* required last entry */
{0, }
};
@@ -162,6 +172,7 @@ static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+MODULE_IMPORT_NS("LIBIE_FWLOG");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL v2");
@@ -172,6 +183,8 @@ static struct workqueue_struct *ixgbe_wq;
static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
+static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *);
+static void ixgbe_watchdog_update_link(struct ixgbe_adapter *);
static const struct net_device_ops ixgbe_netdev_ops;
@@ -235,8 +248,11 @@ static int ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
* bandwidth details should be gathered from the parent bus instead of from the
* device. Used to ensure that various locations all have the correct device ID
* checks.
+ *
+ * Return: true if information should be collected from the parent bus, false
+ * otherwise
*/
-static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
+static bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
{
switch (hw->device_id) {
case IXGBE_DEV_ID_82599_SFP_SF_QP:
@@ -875,6 +891,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (direction == -1) {
/* other causes */
msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -914,6 +931,7 @@ void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
mask = (qmask & 0xFFFFFFFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
mask = (qmask >> 32);
@@ -951,10 +969,6 @@ static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++)
clear_bit(__IXGBE_HANG_CHECK_ARMED,
&adapter->tx_ring[i]->state);
-
- for (i = 0; i < adapter->num_xdp_queues; i++)
- clear_bit(__IXGBE_HANG_CHECK_ARMED,
- &adapter->xdp_ring[i]->state);
}
static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
@@ -1024,7 +1038,49 @@ static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
return ((head <= tail) ? tail : tail + ring->count) - head;
}
-static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
+/**
+ * ixgbe_get_vf_idx - provide VF index number based on queue index
+ * @adapter: pointer to the adapter struct
+ * @queue: Tx queue identifier
+ * @vf: output VF index
+ *
+ * Provide VF index number associated to the input queue.
+ *
+ * Returns: 0 if VF provided or error number.
+ */
+static int ixgbe_get_vf_idx(struct ixgbe_adapter *adapter, u16 queue, u16 *vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 queue_count;
+ u32 reg;
+
+ if (queue >= adapter->num_tx_queues)
+ return -EINVAL;
+
+ /* Determine number of queues by checking
+ * number of virtual functions
+ */
+ reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ switch (reg & IXGBE_GCR_EXT_VT_MODE_MASK) {
+ case IXGBE_GCR_EXT_VT_MODE_64:
+ queue_count = IXGBE_64VFS_QUEUES;
+ break;
+ case IXGBE_GCR_EXT_VT_MODE_32:
+ queue_count = IXGBE_32VFS_QUEUES;
+ break;
+ case IXGBE_GCR_EXT_VT_MODE_16:
+ queue_count = IXGBE_16VFS_QUEUES;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *vf = queue / queue_count;
+
+ return 0;
+}
+
+static bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
{
u32 tx_done = ixgbe_get_tx_completed(tx_ring);
u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
@@ -1080,7 +1136,7 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
static int ixgbe_tx_maxrate(struct net_device *netdev,
int queue_index, u32 maxrate)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 bcnrc_val = ixgbe_link_mbps(adapter);
@@ -1143,6 +1199,148 @@ void ixgbe_update_rx_ring_stats(struct ixgbe_ring *rx_ring,
}
/**
+ * ixgbe_pf_handle_tx_hang - handle Tx hang on PF
+ * @tx_ring: tx ring number
+ * @next: next ring
+ *
+ * Prints a message containing details about the tx hang.
+ */
+static void ixgbe_pf_handle_tx_hang(struct ixgbe_ring *tx_ring,
+ unsigned int next)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ e_err(drv, "Detected Tx Unit Hang\n"
+ " Tx Queue <%d>\n"
+ " TDH, TDT <%x>, <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "tx_buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " jiffies <%lx>\n",
+ tx_ring->queue_index,
+ IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
+ IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
+ tx_ring->next_to_use, next,
+ tx_ring->tx_buffer_info[next].time_stamp, jiffies);
+
+ netif_stop_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
+}
+
+/**
+ * ixgbe_vf_handle_tx_hang - handle Tx hang on VF
+ * @adapter: structure containing ring specific data
+ * @vf: VF index
+ *
+ * Print a message containing details about malicious driver detection.
+ * Set malicious VF link down if the detection happened several times.
+ */
+static void ixgbe_vf_handle_tx_hang(struct ixgbe_adapter *adapter, u16 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (adapter->hw.mac.type != ixgbe_mac_e610)
+ return;
+
+ e_warn(drv,
+ "Malicious Driver Detection tx hang detected on PF %d VF %d MAC: %pM",
+ hw->bus.func, vf, adapter->vfinfo[vf].vf_mac_addresses);
+
+ adapter->tx_hang_count[vf]++;
+ if (adapter->tx_hang_count[vf] == IXGBE_MAX_TX_VF_HANGS) {
+ ixgbe_set_vf_link_state(adapter, vf,
+ IFLA_VF_LINK_STATE_DISABLE);
+ adapter->tx_hang_count[vf] = 0;
+ }
+}
+
+static u32 ixgbe_poll_tx_icache(struct ixgbe_hw *hw, u16 queue, u16 idx)
+{
+ IXGBE_WRITE_REG(hw, IXGBE_TXDESCIC, queue * idx);
+ return IXGBE_READ_REG(hw, IXGBE_TXDESCIC);
+}
+
+/**
+ * ixgbe_check_illegal_queue - search for queue with illegal packet
+ * @adapter: structure containing ring specific data
+ * @queue: queue index
+ *
+ * Check if tx descriptor connected with input queue
+ * contains illegal packet.
+ *
+ * Returns: true if queue contain illegal packet.
+ */
+static bool ixgbe_check_illegal_queue(struct ixgbe_adapter *adapter,
+ u16 queue)
+{
+ u32 hdr_len_reg, mss_len_reg, type_reg;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 mss_len, header_len, reg;
+
+ for (u16 i = 0; i < IXGBE_MAX_TX_DESCRIPTORS; i++) {
+ /* HW will clear bit IXGBE_TXDESCIC_READY when address
+ * is written to address field. HW will set this bit
+ * when iCache read is done, and data is ready at TIC_DWx.
+ * Set descriptor address.
+ */
+ read_poll_timeout(ixgbe_poll_tx_icache, reg,
+ !(reg & IXGBE_TXDESCIC_READY), 0, 0, false,
+ hw, queue, i);
+
+ /* read tx descriptor access registers */
+ hdr_len_reg = IXGBE_READ_REG(hw, IXGBE_TIC_DW2(IXGBE_VLAN_MACIP_LENS_REG));
+ type_reg = IXGBE_READ_REG(hw, IXGBE_TIC_DW2(IXGBE_TYPE_TUCMD_MLHL));
+ mss_len_reg = IXGBE_READ_REG(hw, IXGBE_TIC_DW2(IXGBE_MSS_L4LEN_IDX));
+
+ /* check if Advanced Context Descriptor */
+ if (FIELD_GET(IXGBE_ADVTXD_DTYP_MASK, type_reg) !=
+ IXGBE_ADVTXD_DTYP_CTXT)
+ continue;
+
+ /* check for illegal MSS and Header length */
+ mss_len = FIELD_GET(IXGBE_ADVTXD_MSS_MASK, mss_len_reg);
+ header_len = FIELD_GET(IXGBE_ADVTXD_HEADER_LEN_MASK,
+ hdr_len_reg);
+ if ((mss_len + header_len) > SZ_16K) {
+ e_warn(probe, "mss len + header len too long\n");
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * ixgbe_handle_mdd_event - handle mdd event
+ * @adapter: structure containing ring specific data
+ * @tx_ring: tx descriptor ring to handle
+ *
+ * Reset VF driver if malicious vf detected or
+ * illegal packet in an any queue detected.
+ */
+static void ixgbe_handle_mdd_event(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *tx_ring)
+{
+ u16 vf, q;
+
+ if (adapter->vfinfo && ixgbe_check_mdd_event(adapter)) {
+ /* vf mdd info and malicious vf detected */
+ if (!ixgbe_get_vf_idx(adapter, tx_ring->queue_index, &vf))
+ ixgbe_vf_handle_tx_hang(adapter, vf);
+ } else {
+ /* malicious vf not detected */
+ for (q = 0; q < IXGBE_MAX_TX_QUEUES; q++) {
+ if (ixgbe_check_illegal_queue(adapter, q) &&
+ !ixgbe_get_vf_idx(adapter, q, &vf))
+ /* illegal queue detected */
+ ixgbe_vf_handle_tx_hang(adapter, vf);
+ }
+ }
+}
+
+/**
* ixgbe_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: structure containing interrupt and ring information
* @tx_ring: tx ring to clean
@@ -1248,27 +1446,14 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
total_bytes);
adapter->tx_ipsec += total_ipsec;
+ if (ring_is_xdp(tx_ring))
+ return !!budget;
+
if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
- /* schedule immediate reset if we believe we hung */
- struct ixgbe_hw *hw = &adapter->hw;
- e_err(drv, "Detected Tx Unit Hang %s\n"
- " Tx Queue <%d>\n"
- " TDH, TDT <%x>, <%x>\n"
- " next_to_use <%x>\n"
- " next_to_clean <%x>\n"
- "tx_buffer_info[next_to_clean]\n"
- " time_stamp <%lx>\n"
- " jiffies <%lx>\n",
- ring_is_xdp(tx_ring) ? "(XDP)" : "",
- tx_ring->queue_index,
- IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
- IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
- tx_ring->next_to_use, i,
- tx_ring->tx_buffer_info[i].time_stamp, jiffies);
-
- if (!ring_is_xdp(tx_ring))
- netif_stop_subqueue(tx_ring->netdev,
- tx_ring->queue_index);
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ ixgbe_handle_mdd_event(adapter, tx_ring);
+
+ ixgbe_pf_handle_tx_hang(tx_ring, i);
e_info(probe,
"tx hang %d detected on queue %d, resetting adapter\n",
@@ -1281,9 +1466,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
return true;
}
- if (ring_is_xdp(tx_ring))
- return !!budget;
-
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
if (!__netif_txq_completed_wake(txq, total_packets, total_bytes,
@@ -1908,10 +2090,6 @@ bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
{
struct net_device *netdev = rx_ring->netdev;
- /* XDP packets use error pointer so abort at this point */
- if (IS_ERR(skb))
- return true;
-
/* Verify netdev is present, and that packet does not have any
* errors that would be unacceptable to the netdev.
*/
@@ -2094,7 +2272,7 @@ static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
/* hand second half of page back to the ring */
ixgbe_reuse_rx_page(rx_ring, rx_buffer);
} else {
- if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) {
+ if (skb && IXGBE_CB(skb)->dma == rx_buffer->dma) {
/* the page has been released from the ring */
IXGBE_CB(skb)->page_released = true;
} else {
@@ -2188,7 +2366,7 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
struct sk_buff *skb;
/* Prefetch first cache line of first page. If xdp->data_meta
- * is unused, this points extactly as xdp->data, otherwise we
+ * is unused, this points exactly as xdp->data, otherwise we
* likely have a consumer accessing first few bytes of meta
* data, and then actual data.
*/
@@ -2219,9 +2397,9 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
return skb;
}
-static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring,
- struct xdp_buff *xdp)
+static int ixgbe_run_xdp(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring,
+ struct xdp_buff *xdp)
{
int err, result = IXGBE_XDP_PASS;
struct bpf_prog *xdp_prog;
@@ -2271,7 +2449,7 @@ out_failure:
break;
}
xdp_out:
- return ERR_PTR(-result);
+ return result;
}
static unsigned int ixgbe_rx_frame_truesize(struct ixgbe_ring *rx_ring,
@@ -2311,7 +2489,7 @@ static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring,
* This function provides a "bounce buffer" approach to Rx interrupt
* processing. The advantage to this is that on systems that have
* expensive overhead for IOMMU access this provides a means of avoiding
- * it by maintaining the mapping of the page to the syste.
+ * it by maintaining the mapping of the page to the system.
*
* Returns amount of work completed
**/
@@ -2329,6 +2507,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
unsigned int offset = rx_ring->rx_offset;
unsigned int xdp_xmit = 0;
struct xdp_buff xdp;
+ int xdp_res = 0;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
@@ -2374,12 +2553,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size);
#endif
- skb = ixgbe_run_xdp(adapter, rx_ring, &xdp);
+ xdp_res = ixgbe_run_xdp(adapter, rx_ring, &xdp);
}
- if (IS_ERR(skb)) {
- unsigned int xdp_res = -PTR_ERR(skb);
-
+ if (xdp_res) {
if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
xdp_xmit |= xdp_res;
ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
@@ -2399,7 +2576,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
}
/* exit if we failed to retrieve a buffer */
- if (!skb) {
+ if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
rx_buffer->pagecnt_bias++;
break;
@@ -2413,7 +2590,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
continue;
/* verify the packet layout is correct */
- if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
+ if (xdp_res || ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
continue;
/* probably a little skewed due to removing CRC */
@@ -2514,6 +2691,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
ixgbe_set_ivar(adapter, -1, 1, v_idx);
break;
default:
@@ -2527,6 +2705,9 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
IXGBE_EIMS_MAILBOX |
IXGBE_EIMS_LSC);
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ mask &= ~IXGBE_EIMS_FW_EVENT;
+
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
}
@@ -2743,6 +2924,7 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/*
* set the WDIS bit to not clear the timer bits and cause an
* immediate assertion of the interrupt
@@ -2965,6 +3147,226 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
}
}
+/**
+ * ixgbe_check_phy_fw_load - check if PHY FW load failed
+ * @adapter: pointer to adapter structure
+ * @link_cfg_err: bitmap from the link info structure
+ *
+ * Check if external PHY FW load failed and print an error message if it did.
+ */
+static void ixgbe_check_phy_fw_load(struct ixgbe_adapter *adapter,
+ u8 link_cfg_err)
+{
+ if (!(link_cfg_err & IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
+ adapter->flags2 &= ~IXGBE_FLAG2_PHY_FW_LOAD_FAILED;
+ return;
+ }
+
+ if (adapter->flags2 & IXGBE_FLAG2_PHY_FW_LOAD_FAILED)
+ return;
+
+ if (link_cfg_err & IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
+ netdev_err(adapter->netdev, "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
+ adapter->flags2 |= IXGBE_FLAG2_PHY_FW_LOAD_FAILED;
+ }
+}
+
+/**
+ * ixgbe_check_module_power - check module power level
+ * @adapter: pointer to adapter structure
+ * @link_cfg_err: bitmap from the link info structure
+ *
+ * Check module power level returned by a previous call to aci_get_link_info
+ * and print error messages if module power level is not supported.
+ */
+static void ixgbe_check_module_power(struct ixgbe_adapter *adapter,
+ u8 link_cfg_err)
+{
+ /* If module power level is supported, clear the flag. */
+ if (!(link_cfg_err & (IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT |
+ IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED))) {
+ adapter->flags2 &= ~IXGBE_FLAG2_MOD_POWER_UNSUPPORTED;
+ return;
+ }
+
+ /* If IXGBE_FLAG2_MOD_POWER_UNSUPPORTED was previously set and the
+ * above block didn't clear this bit, there's nothing to do.
+ */
+ if (adapter->flags2 & IXGBE_FLAG2_MOD_POWER_UNSUPPORTED)
+ return;
+
+ if (link_cfg_err & IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT) {
+ netdev_err(adapter->netdev, "The installed module is incompatible with the device's NVM image. Cannot start link.\n");
+ adapter->flags2 |= IXGBE_FLAG2_MOD_POWER_UNSUPPORTED;
+ } else if (link_cfg_err & IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED) {
+ netdev_err(adapter->netdev, "The module's power requirements exceed the device's power supply. Cannot start link.\n");
+ adapter->flags2 |= IXGBE_FLAG2_MOD_POWER_UNSUPPORTED;
+ }
+}
+
+/**
+ * ixgbe_check_link_cfg_err - check if link configuration failed
+ * @adapter: pointer to adapter structure
+ * @link_cfg_err: bitmap from the link info structure
+ *
+ * Print if any link configuration failure happens due to the value in the
+ * link_cfg_err parameter in the link info structure.
+ */
+static void ixgbe_check_link_cfg_err(struct ixgbe_adapter *adapter,
+ u8 link_cfg_err)
+{
+ ixgbe_check_module_power(adapter, link_cfg_err);
+ ixgbe_check_phy_fw_load(adapter, link_cfg_err);
+}
+
+/**
+ * ixgbe_process_link_status_event - process the link event
+ * @adapter: pointer to adapter structure
+ * @link_up: true if the physical link is up and false if it is down
+ * @link_speed: current link speed received from the link event
+ *
+ * Return: 0 on success or negative value on failure.
+ */
+static int
+ixgbe_process_link_status_event(struct ixgbe_adapter *adapter, bool link_up,
+ u16 link_speed)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int status;
+
+ /* Update the link info structures and re-enable link events,
+ * don't bail on failure due to other book keeping needed.
+ */
+ status = ixgbe_update_link_info(hw);
+ if (status)
+ e_dev_err("Failed to update link status, err %d aq_err %d\n",
+ status, hw->aci.last_status);
+
+ ixgbe_check_link_cfg_err(adapter, hw->link.link_info.link_cfg_err);
+
+ /* Check if the link state is up after updating link info, and treat
+ * this event as an UP event since the link is actually UP now.
+ */
+ if (hw->link.link_info.link_info & IXGBE_ACI_LINK_UP)
+ link_up = true;
+
+ /* Turn off PHY if media was removed. */
+ if (!(adapter->flags2 & IXGBE_FLAG2_NO_MEDIA) &&
+ !(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE))
+ adapter->flags2 |= IXGBE_FLAG2_NO_MEDIA;
+
+ if (link_up == adapter->link_up &&
+ link_up == netif_carrier_ok(adapter->netdev) &&
+ link_speed == adapter->link_speed)
+ return 0;
+
+ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+ adapter->link_check_timeout = jiffies;
+ ixgbe_watchdog_update_link(adapter);
+
+ if (link_up)
+ ixgbe_watchdog_link_is_up(adapter);
+ else
+ ixgbe_watchdog_link_is_down(adapter);
+
+ return 0;
+}
+
+/**
+ * ixgbe_handle_link_status_event - handle link status event via ACI
+ * @adapter: pointer to adapter structure
+ * @e: event structure containing link status info
+ */
+static void
+ixgbe_handle_link_status_event(struct ixgbe_adapter *adapter,
+ struct ixgbe_aci_event *e)
+{
+ struct ixgbe_aci_cmd_get_link_status_data *link_data;
+ u16 link_speed;
+ bool link_up;
+
+ link_data = (struct ixgbe_aci_cmd_get_link_status_data *)e->msg_buf;
+
+ link_up = !!(link_data->link_info & IXGBE_ACI_LINK_UP);
+ link_speed = le16_to_cpu(link_data->link_speed);
+
+ if (ixgbe_process_link_status_event(adapter, link_up, link_speed))
+ e_dev_warn("Could not process link status event");
+}
+
+/**
+ * ixgbe_schedule_fw_event - schedule Firmware event
+ * @adapter: pointer to the adapter structure
+ *
+ * If the adapter is not in down, removing or resetting state,
+ * an event is scheduled.
+ */
+static void ixgbe_schedule_fw_event(struct ixgbe_adapter *adapter)
+{
+ if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
+ !test_bit(__IXGBE_REMOVING, &adapter->state) &&
+ !test_bit(__IXGBE_RESETTING, &adapter->state)) {
+ adapter->flags2 |= IXGBE_FLAG2_FW_ASYNC_EVENT;
+ ixgbe_service_event_schedule(adapter);
+ }
+}
+
+/**
+ * ixgbe_aci_event_cleanup - release msg_buf memory
+ * @event: pointer to the event holding msg_buf to be released
+ *
+ * Clean memory allocated for event's msg_buf. Implements auto memory cleanup.
+ */
+static void ixgbe_aci_event_cleanup(struct ixgbe_aci_event *event)
+{
+ kfree(event->msg_buf);
+}
+
+/**
+ * ixgbe_handle_fw_event - handle Firmware event
+ * @adapter: pointer to the adapter structure
+ *
+ * Obtain an event from the ACI and then and then process it according to the
+ * type of the event and the opcode.
+ */
+static void ixgbe_handle_fw_event(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_aci_event event __cleanup(ixgbe_aci_event_cleanup);
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool pending = false;
+ int err;
+
+ if (adapter->flags2 & IXGBE_FLAG2_FW_ASYNC_EVENT)
+ adapter->flags2 &= ~IXGBE_FLAG2_FW_ASYNC_EVENT;
+ event.buf_len = IXGBE_ACI_MAX_BUFFER_SIZE;
+ event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
+ if (!event.msg_buf)
+ return;
+
+ do {
+ err = ixgbe_aci_get_event(hw, &event, &pending);
+ if (err)
+ break;
+
+ switch (le16_to_cpu(event.desc.opcode)) {
+ case ixgbe_aci_opc_get_link_status:
+ ixgbe_handle_link_status_event(adapter, &event);
+ break;
+ case ixgbe_aci_opc_temp_tca_event:
+ e_crit(drv, "%s\n", ixgbe_overheat_msg);
+ ixgbe_down(adapter);
+ break;
+ case libie_aqc_opc_fw_logs_event:
+ libie_get_fwlog_data(&hw->fwlog, event.msg_buf,
+ le16_to_cpu(event.desc.datalen));
+ break;
+ default:
+ e_warn(hw, "unknown FW async event captured\n");
+ break;
+ }
+ } while (pending);
+}
+
static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
u64 qmask)
{
@@ -2981,6 +3383,7 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
mask = (qmask & 0xFFFFFFFF);
if (mask)
IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
@@ -3034,6 +3437,9 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_e610:
+ mask |= IXGBE_EIMS_FW_EVENT;
+ fallthrough;
case ixgbe_mac_x550em_a:
if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
@@ -3090,12 +3496,16 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
if (eicr & IXGBE_EICR_MAILBOX)
ixgbe_msg_task(adapter);
+ if (eicr & IXGBE_EICR_FW_EVENT)
+ ixgbe_schedule_fw_event(adapter);
+
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
(eicr & IXGBE_EICR_GPI_SDP0_X540)) {
adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
@@ -3333,6 +3743,9 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
if (eicr & IXGBE_EICR_LSC)
ixgbe_check_lsc(adapter);
+ if (eicr & IXGBE_EICR_FW_EVENT)
+ ixgbe_schedule_fw_event(adapter);
+
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
ixgbe_check_sfp_event(adapter, eicr);
@@ -3341,6 +3754,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (eicr & IXGBE_EICR_ECC) {
e_info(link, "Received ECC Err, initiating reset\n");
set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
@@ -3441,6 +3855,7 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
@@ -3688,8 +4103,12 @@ void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
#endif
{
- int i;
bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ if (hw->mac.ops.disable_mdd)
+ hw->mac.ops.disable_mdd(hw);
if (adapter->ixgbe_ieee_pfc)
pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
@@ -3711,6 +4130,9 @@ static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++)
ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
}
+
+ if (hw->mac.ops.enable_mdd)
+ hw->mac.ops.enable_mdd(hw);
}
#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
@@ -4358,6 +4780,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (adapter->num_vfs)
rdrxctl |= IXGBE_RDRXCTL_PSP;
fallthrough;
@@ -4433,7 +4856,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
/* add VID to filter table */
@@ -4492,7 +4915,7 @@ void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
/* remove VID from filter table */
@@ -4525,6 +4948,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
@@ -4563,6 +4987,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
@@ -4637,7 +5062,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
/* pull VLAN ID from VLVF */
vid = vlvf & VLAN_VID_MASK;
- /* only concern outselves with a certain range */
+ /* only concern ourselves with a certain range */
if (vid < vid_start || vid >= vid_end)
continue;
@@ -4715,7 +5140,7 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
**/
static int ixgbe_write_mc_addr_list(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (!netif_running(netdev))
@@ -4891,7 +5316,7 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int ret;
ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0));
@@ -4901,7 +5326,7 @@ static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0));
@@ -4919,7 +5344,7 @@ static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
**/
void ixgbe_set_rx_mode(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
netdev_features_t features = netdev->features;
@@ -5021,7 +5446,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
static int ixgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
struct udp_tunnel_info ti;
@@ -5147,6 +5572,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
dv_id = IXGBE_DV_X540(link, tc);
break;
default:
@@ -5207,6 +5633,7 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
dv_id = IXGBE_LOW_DV_X540(tc);
break;
default:
@@ -5509,6 +5936,48 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_enable_link_status_events - enable link status events
+ * @adapter: pointer to the adapter structure
+ * @mask: event mask to be set
+ *
+ * Enables link status events by invoking ixgbe_configure_lse()
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_enable_link_status_events(struct ixgbe_adapter *adapter,
+ u16 mask)
+{
+ int err;
+
+ err = ixgbe_configure_lse(&adapter->hw, true, mask);
+ if (err)
+ return err;
+
+ adapter->lse_mask = mask;
+ return 0;
+}
+
+/**
+ * ixgbe_disable_link_status_events - disable link status events
+ * @adapter: pointer to the adapter structure
+ *
+ * Disables link status events by invoking ixgbe_configure_lse()
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_disable_link_status_events(struct ixgbe_adapter *adapter)
+{
+ int err;
+
+ err = ixgbe_configure_lse(&adapter->hw, false, adapter->lse_mask);
+ if (err)
+ return err;
+
+ adapter->lse_mask = 0;
+ return 0;
+}
+
+/**
* ixgbe_sfp_link_config - set up SFP+ link
* @adapter: pointer to private adapter struct
**/
@@ -5531,13 +6000,21 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
* ixgbe_non_sfp_link_config - set up non-SFP+ link
* @hw: pointer to private hardware struct
*
- * Returns 0 on success, negative on failure
+ * Configure non-SFP link.
+ *
+ * Return: 0 on success, negative on failure
**/
static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
{
- u32 speed;
+ struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter,
+ hw);
+ u16 mask = ~((u16)(IXGBE_ACI_LINK_EVENT_UPDOWN |
+ IXGBE_ACI_LINK_EVENT_MEDIA_NA |
+ IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL |
+ IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL));
bool autoneg, link_up = false;
int ret = -EIO;
+ u32 speed;
if (hw->mac.ops.check_link)
ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
@@ -5560,13 +6037,53 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
if (ret)
return ret;
- if (hw->mac.ops.setup_link)
+ if (hw->mac.ops.setup_link) {
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ ret = ixgbe_enable_link_status_events(adapter, mask);
+ if (ret)
+ return ret;
+ }
ret = hw->mac.ops.setup_link(hw, speed, link_up);
+ }
return ret;
}
/**
+ * ixgbe_check_media_subtask - check for media
+ * @adapter: pointer to adapter structure
+ *
+ * If media is available then initialize PHY user configuration. Configure the
+ * PHY if the interface is up.
+ */
+static void ixgbe_check_media_subtask(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* No need to check for media if it's already present */
+ if (!(adapter->flags2 & IXGBE_FLAG2_NO_MEDIA))
+ return;
+
+ /* Refresh link info and check if media is present */
+ if (ixgbe_update_link_info(hw))
+ return;
+
+ ixgbe_check_link_cfg_err(adapter, hw->link.link_info.link_cfg_err);
+
+ if (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE) {
+ /* PHY settings are reset on media insertion, reconfigure
+ * PHY to preserve settings.
+ */
+ if (!(ixgbe_non_sfp_link_config(&adapter->hw)))
+ adapter->flags2 &= ~IXGBE_FLAG2_NO_MEDIA;
+
+ /* A Link Status Event will be generated; the event handler
+ * will complete bringing the interface up
+ */
+ }
+}
+
+/**
* ixgbe_clear_vf_stats_counters - Clear out VF stats after reset
* @adapter: board private structure
*
@@ -5629,6 +6146,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
default:
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
@@ -5979,6 +6497,7 @@ dma_engine_disable:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
~IXGBE_DMATXCTL_TE));
@@ -6197,7 +6716,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
- del_timer_sync(&adapter->service_timer);
+ timer_delete_sync(&adapter->service_timer);
if (adapter->num_vfs) {
/* Clear EITR Select mapping */
@@ -6223,6 +6742,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
ixgbe_clean_all_tx_rings(adapter);
ixgbe_clean_all_rx_rings(adapter);
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ ixgbe_disable_link_status_events(adapter);
}
/**
@@ -6257,7 +6778,7 @@ static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
**/
static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
/* Do the reset outside of interrupt context */
ixgbe_tx_timeout_reset(adapter);
@@ -6278,6 +6799,7 @@ static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
break;
case ixgbe_mac_X540:
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
break;
@@ -6341,6 +6863,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_device_id = pdev->subsystem_device;
+ hw->mac.max_link_up_time = IXGBE_LINK_UP_TIME;
+
/* get_invariants needs the device IDs */
ii->get_invariants(hw);
@@ -6454,6 +6978,13 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
break;
}
+ /* Make sure the SWFW semaphore is in a valid state */
+ if (hw->mac.ops.init_swfw_sync)
+ hw->mac.ops.init_swfw_sync(hw);
+
+ if (hw->mac.type == ixgbe_mac_e610)
+ mutex_init(&hw->aci.lock);
+
#ifdef IXGBE_FCOE
/* FCoE support exists, always init the FCoE lock */
spin_lock_init(&adapter->fcoe.lock);
@@ -6503,7 +7034,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
/* initialize eeprom parameters */
- if (ixgbe_init_eeprom_params_generic(hw)) {
+ if (hw->eeprom.ops.init_params(hw)) {
e_dev_err("EEPROM initialization failed\n");
return -EIO;
}
@@ -6819,7 +7350,7 @@ static int ixgbe_max_xdp_frame_size(struct ixgbe_adapter *adapter)
**/
static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (ixgbe_enabled_xdp_adapter(adapter)) {
int new_frame_size = new_mtu + IXGBE_PKT_HDR_PAD;
@@ -6866,7 +7397,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
**/
int ixgbe_open(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
int err, queues;
@@ -6908,6 +7439,19 @@ int ixgbe_open(struct net_device *netdev)
ixgbe_up_complete(adapter);
udp_tunnel_nic_reset_ntf(netdev);
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ int err = ixgbe_update_link_info(&adapter->hw);
+
+ if (err)
+ e_dev_err("Failed to update link info, err %d.\n", err);
+
+ ixgbe_check_link_cfg_err(adapter,
+ adapter->hw.link.link_info.link_cfg_err);
+
+ err = ixgbe_non_sfp_link_config(&adapter->hw);
+ if (err)
+ e_dev_err("Link setup failed, err %d.\n", err);
+ }
return 0;
@@ -6957,7 +7501,7 @@ static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
**/
int ixgbe_close(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
ixgbe_ptp_stop(adapter);
@@ -7061,6 +7605,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
pci_wake_from_d3(pdev, !!wufc);
break;
default:
@@ -7208,6 +7753,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
hwstats->pxonrxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
break;
@@ -7220,11 +7766,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
for (i = 0; i < 16; i++) {
hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
- if ((hw->mac.type == ixgbe_mac_82599EB) ||
- (hw->mac.type == ixgbe_mac_X540) ||
- (hw->mac.type == ixgbe_mac_X550) ||
- (hw->mac.type == ixgbe_mac_X550EM_x) ||
- (hw->mac.type == ixgbe_mac_x550em_a)) {
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540 ||
+ hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_x550em_a ||
+ hw->mac.type == ixgbe_mac_e610) {
hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
@@ -7250,6 +7797,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/* OS2BMC stats are X540 and later */
hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
@@ -7432,12 +7980,9 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
return;
/* Force detection of hung controller */
- if (netif_carrier_ok(adapter->netdev)) {
+ if (netif_carrier_ok(adapter->netdev))
for (i = 0; i < adapter->num_tx_queues; i++)
set_check_for_tx_hang(adapter->tx_ring[i]);
- for (i = 0; i < adapter->num_xdp_queues; i++)
- set_check_for_tx_hang(adapter->xdp_ring[i]);
- }
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
/*
@@ -7550,6 +8095,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
case ixgbe_mac_82599EB: {
u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
@@ -7599,6 +8145,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
netif_carrier_on(netdev);
ixgbe_check_vf_rate_limit(adapter);
+ if (adapter->num_vfs && hw->mac.ops.enable_mdd)
+ hw->mac.ops.enable_mdd(hw);
+
/* enable transmits */
netif_tx_wake_all_queues(adapter->netdev);
@@ -7626,6 +8175,8 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
if (!netif_carrier_ok(netdev))
return;
+ adapter->link_down_events++;
+
/* poll for SFP+ cable when link is down */
if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
@@ -7651,13 +8202,6 @@ static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
return true;
}
- for (i = 0; i < adapter->num_xdp_queues; i++) {
- struct ixgbe_ring *ring = adapter->xdp_ring[i];
-
- if (ring->next_to_use != ring->next_to_clean)
- return true;
- }
-
return false;
}
@@ -7945,7 +8489,8 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
**/
static void ixgbe_service_timer(struct timer_list *t)
{
- struct ixgbe_adapter *adapter = from_timer(adapter, t, service_timer);
+ struct ixgbe_adapter *adapter = timer_container_of(adapter, t,
+ service_timer);
unsigned long next_event_offset;
/* poll faster when waiting for link */
@@ -8000,6 +8545,34 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
rtnl_unlock();
}
+static int ixgbe_check_fw_api_mismatch(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (hw->mac.type != ixgbe_mac_e610)
+ return 0;
+
+ if (hw->mac.ops.get_fw_ver && hw->mac.ops.get_fw_ver(hw))
+ return 0;
+
+ if (hw->api_maj_ver > IXGBE_FW_API_VER_MAJOR) {
+ e_dev_err("The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
+
+ adapter->flags2 |= IXGBE_FLAG2_API_MISMATCH;
+ return -EOPNOTSUPP;
+ } else if (hw->api_maj_ver == IXGBE_FW_API_VER_MAJOR &&
+ hw->api_min_ver > IXGBE_FW_API_VER_MINOR + IXGBE_FW_API_VER_DIFF_ALLOWED) {
+ e_dev_info("The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
+ adapter->flags2 |= IXGBE_FLAG2_API_MISMATCH;
+ } else if (hw->api_maj_ver < IXGBE_FW_API_VER_MAJOR ||
+ hw->api_min_ver < IXGBE_FW_API_VER_MINOR - IXGBE_FW_API_VER_DIFF_ALLOWED) {
+ e_dev_info("The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+ adapter->flags2 |= IXGBE_FLAG2_API_MISMATCH;
+ }
+
+ return 0;
+}
+
/**
* ixgbe_check_fw_error - Check firmware for errors
* @adapter: the adapter private structure
@@ -8010,12 +8583,14 @@ static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 fwsm;
+ int err;
/* read fwsm.ext_err_ind register and log errors */
fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
+ /* skip if E610's FW is reloading, warning in that case may be misleading */
if (fwsm & IXGBE_FWSM_EXT_ERR_IND_MASK ||
- !(fwsm & IXGBE_FWSM_FW_VAL_BIT))
+ (!(fwsm & IXGBE_FWSM_FW_VAL_BIT) && !(hw->mac.type == ixgbe_mac_e610)))
e_dev_warn("Warning firmware error detected FWSM: 0x%08X\n",
fwsm);
@@ -8023,10 +8598,53 @@ static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter)
e_dev_err("Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
return true;
}
+ if (!(adapter->flags2 & IXGBE_FLAG2_API_MISMATCH)) {
+ err = ixgbe_check_fw_api_mismatch(adapter);
+ if (err)
+ return true;
+ }
+
+ /* return here if FW rollback mode has been already detected */
+ if (adapter->flags2 & IXGBE_FLAG2_FW_ROLLBACK)
+ return false;
+
+ if (hw->mac.ops.fw_rollback_mode && hw->mac.ops.fw_rollback_mode(hw)) {
+ struct ixgbe_nvm_info *nvm_info = &adapter->hw.flash.nvm;
+ char ver_buff[64] = "";
+
+ if (hw->mac.ops.get_fw_ver && hw->mac.ops.get_fw_ver(hw))
+ goto no_version;
+
+ if (hw->mac.ops.get_nvm_ver &&
+ hw->mac.ops.get_nvm_ver(hw, nvm_info))
+ goto no_version;
+
+ snprintf(ver_buff, sizeof(ver_buff),
+ "Current version is NVM:%x.%x.%x, FW:%d.%d. ",
+ nvm_info->major, nvm_info->minor, nvm_info->eetrack,
+ hw->fw_maj_ver, hw->fw_maj_ver);
+no_version:
+ e_dev_warn("Firmware rollback mode detected. %sDevice may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode.",
+ ver_buff);
+
+ adapter->flags2 |= IXGBE_FLAG2_FW_ROLLBACK;
+ }
return false;
}
+static void ixgbe_recovery_service_task(struct work_struct *work)
+{
+ struct ixgbe_adapter *adapter = container_of(work,
+ struct ixgbe_adapter,
+ service_task);
+
+ ixgbe_handle_fw_event(adapter);
+ ixgbe_service_event_complete(adapter);
+
+ mod_timer(&adapter->service_timer, jiffies + msecs_to_jiffies(100));
+}
+
/**
* ixgbe_service_task - manages and runs subtasks
* @work: pointer to work_struct containing our data
@@ -8046,11 +8664,21 @@ static void ixgbe_service_task(struct work_struct *work)
return;
}
if (ixgbe_check_fw_error(adapter)) {
- if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+ if (adapter->mii_bus) {
+ mdiobus_unregister(adapter->mii_bus);
+ adapter->mii_bus = NULL;
+ }
unregister_netdev(adapter->netdev);
+ }
ixgbe_service_event_complete(adapter);
return;
}
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ if (adapter->flags2 & IXGBE_FLAG2_FW_ASYNC_EVENT)
+ ixgbe_handle_fw_event(adapter);
+ ixgbe_check_media_subtask(adapter);
+ }
ixgbe_reset_subtask(adapter);
ixgbe_phy_interrupt_subtask(adapter);
ixgbe_sfp_detection_subtask(adapter);
@@ -8632,7 +9260,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_FCOE):
case htons(ETH_P_FIP):
- adapter = netdev_priv(dev);
+ adapter = ixgbe_from_netdev(dev);
if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
break;
@@ -8891,7 +9519,7 @@ static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
struct net_device *netdev,
struct ixgbe_ring *ring)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_ring *tx_ring;
/*
@@ -8923,7 +9551,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
**/
static int ixgbe_set_mac(struct net_device *netdev, void *p)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
struct sockaddr *addr = p;
@@ -8941,7 +9569,7 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
static int
ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u16 value;
int rc;
@@ -8967,7 +9595,7 @@ ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
u16 addr, u16 value)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (adapter->mii_bus) {
@@ -8987,13 +9615,9 @@ static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
switch (cmd) {
- case SIOCSHWTSTAMP:
- return ixgbe_ptp_set_ts_config(adapter, req);
- case SIOCGHWTSTAMP:
- return ixgbe_ptp_get_ts_config(adapter, req);
case SIOCGMIIPHY:
if (!adapter->hw.phy.ops.read_reg)
return -EOPNOTSUPP;
@@ -9013,7 +9637,7 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
static int ixgbe_add_sanmac_netdev(struct net_device *dev)
{
int err = 0;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
if (is_valid_ether_addr(hw->mac.san_addr)) {
@@ -9037,7 +9661,7 @@ static int ixgbe_add_sanmac_netdev(struct net_device *dev)
static int ixgbe_del_sanmac_netdev(struct net_device *dev)
{
int err = 0;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_mac_info *mac = &adapter->hw.mac;
if (is_valid_ether_addr(mac->san_addr)) {
@@ -9068,7 +9692,7 @@ static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
static void ixgbe_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int i;
rcu_read_lock();
@@ -9111,7 +9735,7 @@ static void ixgbe_get_stats64(struct net_device *netdev,
static int ixgbe_ndo_get_vf_stats(struct net_device *netdev, int vf,
struct ifla_vf_stats *vf_stats)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (vf < 0 || vf >= adapter->num_vfs)
return -EINVAL;
@@ -9228,7 +9852,7 @@ static int ixgbe_reassign_macvlan_pool(struct net_device *vdev,
static void ixgbe_defrag_macvlan_pools(struct net_device *dev)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct netdev_nested_priv priv = {
.data = (void *)adapter,
};
@@ -9249,7 +9873,7 @@ static void ixgbe_defrag_macvlan_pools(struct net_device *dev)
*/
int ixgbe_setup_tc(struct net_device *dev, u8 tc)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
/* Hardware supports up to 8 traffic classes */
@@ -9807,7 +10431,7 @@ static LIST_HEAD(ixgbe_block_cb_list);
static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
switch (type) {
case TC_SETUP_BLOCK:
@@ -9835,7 +10459,7 @@ void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
#endif
void ixgbe_do_reset(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
@@ -9846,7 +10470,7 @@ void ixgbe_do_reset(struct net_device *netdev)
static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
netdev_features_t features)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
/* If Rx checksum is disabled, then RSC/LRO should also be disabled */
if (!(features & NETIF_F_RXCSUM))
@@ -9883,7 +10507,7 @@ static void ixgbe_reset_l2fw_offload(struct ixgbe_adapter *adapter)
static int ixgbe_set_features(struct net_device *netdev,
netdev_features_t features)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
netdev_features_t changed = netdev->features ^ features;
bool need_reset = false;
@@ -9954,12 +10578,12 @@ static int ixgbe_set_features(struct net_device *netdev,
static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
- u16 flags,
+ u16 flags, bool *notified,
struct netlink_ext_ack *extack)
{
/* guarantee we can provide a unique filter for the unicast address */
if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
u16 pool = VMDQ_P(0);
if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool))
@@ -10047,7 +10671,7 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
struct nlmsghdr *nlh, u16 flags,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct nlattr *attr, *br_spec;
int rem;
@@ -10075,7 +10699,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev,
u32 filter_mask, int nlflags)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return 0;
@@ -10087,7 +10711,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(pdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(pdev);
struct ixgbe_fwd_adapter *accel;
int tcs = adapter->hw_tcs ? : 1;
int pool, err;
@@ -10184,7 +10808,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
{
struct ixgbe_fwd_adapter *accel = priv;
- struct ixgbe_adapter *adapter = netdev_priv(pdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(pdev);
unsigned int rxbase = accel->rx_base_queue;
unsigned int i;
@@ -10262,7 +10886,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
{
int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct bpf_prog *old_prog;
bool need_reset;
int num_queues;
@@ -10334,7 +10958,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
switch (xdp->command) {
case XDP_SETUP_PROG:
@@ -10369,7 +10993,7 @@ void ixgbe_xdp_ring_update_tail_locked(struct ixgbe_ring *ring)
static int ixgbe_xdp_xmit(struct net_device *dev, int n,
struct xdp_frame **frames, u32 flags)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_ring *ring;
int nxmit = 0;
int i;
@@ -10377,6 +11001,10 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
return -ENETDOWN;
+ if (!netif_carrier_ok(adapter->netdev) ||
+ !netif_running(adapter->netdev))
+ return -ENETDOWN;
+
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
@@ -10457,6 +11085,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_bpf = ixgbe_xdp,
.ndo_xdp_xmit = ixgbe_xdp_xmit,
.ndo_xsk_wakeup = ixgbe_xsk_wakeup,
+ .ndo_hwtstamp_get = ixgbe_ptp_hwtstamp_get,
+ .ndo_hwtstamp_set = ixgbe_ptp_hwtstamp_set,
};
static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter,
@@ -10659,7 +11289,7 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
* ixgbe_enumerate_functions - Get the number of ports this device has
* @adapter: adapter structure
*
- * This function enumerates the phsyical functions co-located on a single slot,
+ * This function enumerates the physical functions co-located on a single slot,
* in order to determine how many ports a device has. This is most useful in
* determining the required GT/s of PCIe bandwidth necessary for optimal
* performance.
@@ -10770,6 +11400,24 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
}
/**
+ * ixgbe_set_fw_version_e610 - Set FW version specifically on E610 adapters
+ * @adapter: the adapter private structure
+ *
+ * This function is used by probe and ethtool to determine the FW version to
+ * format to display. The FW version is taken from the EEPROM/NVM.
+ *
+ */
+void ixgbe_set_fw_version_e610(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_orom_info *orom = &adapter->hw.flash.orom;
+ struct ixgbe_nvm_info *nvm = &adapter->hw.flash.nvm;
+
+ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ "%x.%02x 0x%x %d.%d.%d", nvm->major, nvm->minor,
+ nvm->eetrack, orom->major, orom->build, orom->patch);
+}
+
+/**
* ixgbe_set_fw_version - Set FW version
* @adapter: the adapter private structure
*
@@ -10781,6 +11429,11 @@ static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_nvm_version nvm_ver;
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ ixgbe_set_fw_version_e610(adapter);
+ return;
+ }
+
ixgbe_get_oem_prod_version(hw, &nvm_ver);
if (nvm_ver.oem_valid) {
snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
@@ -10805,6 +11458,66 @@ static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_recovery_probe - Handle FW recovery mode during probe
+ * @adapter: the adapter private structure
+ *
+ * Perform limited driver initialization when FW error is detected.
+ *
+ * Return: 0 on successful probe for E610, -EIO if recovery mode is detected
+ * for non-E610 adapter, error status code on any other case.
+ */
+static int ixgbe_recovery_probe(struct ixgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool disable_dev;
+ int err = -EIO;
+
+ if (hw->mac.type != ixgbe_mac_e610)
+ goto clean_up_probe;
+
+ ixgbe_get_hw_control(adapter);
+ mutex_init(&hw->aci.lock);
+ err = ixgbe_get_flash_data(&adapter->hw);
+ if (err)
+ goto shutdown_aci;
+
+ timer_setup(&adapter->service_timer, ixgbe_service_timer, 0);
+ INIT_WORK(&adapter->service_task, ixgbe_recovery_service_task);
+ set_bit(__IXGBE_SERVICE_INITED, &adapter->state);
+ clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
+
+ if (hw->mac.ops.get_bus_info)
+ hw->mac.ops.get_bus_info(hw);
+
+ pci_set_drvdata(pdev, adapter);
+ /* We are creating devlink interface so NIC can be managed,
+ * e.g. new NVM image loaded
+ */
+ devl_lock(adapter->devlink);
+ ixgbe_devlink_register_port(adapter);
+ SET_NETDEV_DEVLINK_PORT(adapter->netdev,
+ &adapter->devlink_port);
+ ixgbe_devlink_init_regions(adapter);
+ devl_register(adapter->devlink);
+ devl_unlock(adapter->devlink);
+
+ return 0;
+shutdown_aci:
+ mutex_destroy(&adapter->hw.aci.lock);
+ ixgbe_release_hw_control(adapter);
+clean_up_probe:
+ disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
+ free_netdev(netdev);
+ devlink_free(adapter->devlink);
+ pci_release_mem_regions(pdev);
+ if (disable_dev)
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
* ixgbe_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in ixgbe_pci_tbl
@@ -10818,6 +11531,7 @@ static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
+ struct ixgbe_netdevice_priv *netdev_priv_wrapper;
struct ixgbe_adapter *adapter = NULL;
struct ixgbe_hw *hw;
const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
@@ -10867,9 +11581,17 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#else
indices = IXGBE_MAX_RSS_INDICES;
#endif
+ } else if (ii->mac == ixgbe_mac_e610) {
+ indices = IXGBE_MAX_RSS_INDICES_X550;
+ }
+
+ adapter = ixgbe_allocate_devlink(&pdev->dev);
+ if (IS_ERR(adapter)) {
+ err = PTR_ERR(adapter);
+ goto err_devlink;
}
- netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
+ netdev = alloc_etherdev_mq(sizeof(*netdev_priv_wrapper), indices);
if (!netdev) {
err = -ENOMEM;
goto err_alloc_etherdev;
@@ -10877,7 +11599,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
SET_NETDEV_DEV(netdev, &pdev->dev);
- adapter = netdev_priv(netdev);
+ netdev_priv_wrapper = netdev_priv(netdev);
+ netdev_priv_wrapper->adapter = adapter;
adapter->netdev = netdev;
adapter->pdev = pdev;
@@ -10893,11 +11616,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_ioremap;
}
- netdev->netdev_ops = &ixgbe_netdev_ops;
- ixgbe_set_ethtool_ops(netdev);
- netdev->watchdog_timeo = 5 * HZ;
- strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
-
/* Setup hw api */
hw->mac.ops = *ii->mac_ops;
hw->mac.type = ii->mac;
@@ -10927,17 +11645,36 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->phy.mdio.mdio_read = ixgbe_mdio_read;
hw->phy.mdio.mdio_write = ixgbe_mdio_write;
+ netdev->netdev_ops = &ixgbe_netdev_ops;
+ ixgbe_set_ethtool_ops(netdev);
+ netdev->watchdog_timeo = 5 * HZ;
+ strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
+
/* setup the private structure */
err = ixgbe_sw_init(adapter, ii);
if (err)
goto err_sw_init;
+ if (ixgbe_check_fw_error(adapter))
+ return ixgbe_recovery_probe(adapter);
+
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ err = ixgbe_get_caps(&adapter->hw);
+ if (err)
+ dev_err(&pdev->dev, "ixgbe_get_caps failed %d\n", err);
+
+ err = ixgbe_get_flash_data(&adapter->hw);
+ if (err)
+ goto err_sw_init;
+ }
+
if (adapter->hw.mac.type == ixgbe_mac_82599EB)
adapter->flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF;
switch (adapter->hw.mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_e610:
netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550;
break;
case ixgbe_mac_x550em_a:
@@ -10947,10 +11684,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
- /* Make sure the SWFW semaphore is in a valid state */
- if (hw->mac.ops.init_swfw_sync)
- hw->mac.ops.init_swfw_sync(hw);
-
/* Make it possible the adapter to be woken up via WOL */
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
@@ -10958,6 +11691,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
break;
default:
@@ -11102,11 +11836,6 @@ skip_sriov:
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
netdev->features |= NETIF_F_LRO;
- if (ixgbe_check_fw_error(adapter)) {
- err = -EIO;
- goto err_sw_init;
- }
-
/* make sure the EEPROM is good */
if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
e_dev_err("The EEPROM Checksum Is Not Valid\n");
@@ -11187,7 +11916,7 @@ skip_sriov:
if (expected_gts > 0)
ixgbe_check_minimum_link(adapter, expected_gts);
- err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
+ err = hw->eeprom.ops.read_pba_string(hw, part_str, sizeof(part_str));
if (err)
strscpy(part_str, "Unknown", sizeof(part_str));
if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
@@ -11213,6 +11942,11 @@ skip_sriov:
}
strcpy(netdev->name, "eth%d");
pci_set_drvdata(pdev, adapter);
+
+ devl_lock(adapter->devlink);
+ ixgbe_devlink_register_port(adapter);
+ SET_NETDEV_DEVLINK_PORT(adapter->netdev, &adapter->devlink_port);
+
err = register_netdev(netdev);
if (err)
goto err_register;
@@ -11267,14 +12001,25 @@ skip_sriov:
if (err)
goto err_netdev;
+ ixgbe_devlink_init_regions(adapter);
+ devl_register(adapter->devlink);
+ devl_unlock(adapter->devlink);
+
+ if (ixgbe_fwlog_init(hw))
+ e_dev_info("Firmware logging not supported\n");
+
return 0;
err_netdev:
unregister_netdev(netdev);
err_register:
+ devl_port_unregister(&adapter->devlink_port);
+ devl_unlock(adapter->devlink);
ixgbe_release_hw_control(adapter);
ixgbe_clear_interrupt_scheme(adapter);
err_sw_init:
+ if (hw->mac.type == ixgbe_mac_e610)
+ mutex_destroy(&adapter->hw.aci.lock);
ixgbe_disable_sriov(adapter);
adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
iounmap(adapter->io_addr);
@@ -11286,7 +12031,9 @@ err_ioremap:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
err_alloc_etherdev:
+ devlink_free(adapter->devlink);
pci_release_mem_regions(pdev);
+err_devlink:
err_pci_reg:
err_dma:
if (!adapter || disable_dev)
@@ -11299,7 +12046,7 @@ err_dma:
* @pdev: PCI device information struct
*
* ixgbe_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device. The could be caused by a
+ * that it should release a PCI device. This could be caused by a
* Hot-Plug event, or because the driver is going to be removed from
* memory.
**/
@@ -11315,11 +12062,18 @@ static void ixgbe_remove(struct pci_dev *pdev)
return;
netdev = adapter->netdev;
+ devl_lock(adapter->devlink);
+ devl_unregister(adapter->devlink);
+ ixgbe_devlink_destroy_regions(adapter);
+ ixgbe_fwlog_deinit(&adapter->hw);
ixgbe_dbg_adapter_exit(adapter);
set_bit(__IXGBE_REMOVING, &adapter->state);
cancel_work_sync(&adapter->service_task);
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ ixgbe_disable_link_status_events(adapter);
+
if (adapter->mii_bus)
mdiobus_unregister(adapter->mii_bus);
@@ -11345,6 +12099,9 @@ static void ixgbe_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
+ devl_port_unregister(&adapter->devlink_port);
+ devl_unlock(adapter->devlink);
+
ixgbe_stop_ipsec_offload(adapter);
ixgbe_clear_interrupt_scheme(adapter);
@@ -11374,8 +12131,13 @@ static void ixgbe_remove(struct pci_dev *pdev)
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ mutex_destroy(&adapter->hw.aci.lock);
+
if (disable_dev)
pci_disable_device(pdev);
+
+ devlink_free(adapter->devlink);
}
/**
@@ -11451,6 +12213,9 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
case ixgbe_mac_x550em_a:
device_id = IXGBE_DEV_ID_X550EM_A_VF;
break;
+ case ixgbe_mac_e610:
+ device_id = IXGBE_DEV_ID_E610_VF;
+ break;
default:
device_id = 0;
break;
@@ -11533,7 +12298,6 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
adapter->hw.hw_addr = adapter->io_addr;
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
pci_wake_from_d3(pdev, false);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index d67d77e5dacc..788b5af07c70 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -283,6 +283,7 @@ static int ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
break;
default:
@@ -407,6 +408,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
hw->mac.type != ixgbe_mac_X550 &&
hw->mac.type != ixgbe_mac_X550EM_x &&
hw->mac.type != ixgbe_mac_x550em_a &&
+ hw->mac.type != ixgbe_mac_e610 &&
hw->mac.type != ixgbe_mac_X540)
return;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index bd205306934b..0334ed4b8fa3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -4,7 +4,7 @@
#ifndef _IXGBE_MBX_H_
#define _IXGBE_MBX_H_
-#include "ixgbe_type.h"
+#include <linux/types.h>
#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
@@ -34,7 +34,7 @@
#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
clear to send requests */
#define IXGBE_VT_MSGINFO_SHIFT 16
-/* bits 23:16 are used for exra info for certain messages */
+/* bits 23:16 are used for extra info for certain messages */
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
/* definitions to support mailbox API version negotiation */
@@ -50,6 +50,9 @@ enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */
ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */
+ ixgbe_mbox_api_15, /* API version 1.5, linux/freebsd VF driver */
+ ixgbe_mbox_api_16, /* API version 1.6, linux/freebsd VF driver */
+ ixgbe_mbox_api_17, /* API version 1.7, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
@@ -86,6 +89,12 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */
+/* mailbox API, version 1.6 VF requests */
+#define IXGBE_VF_GET_PF_LINK_STATE 0x11 /* request PF to send link info */
+
+/* mailbox API, version 1.7 VF requests */
+#define IXGBE_VF_FEATURES_NEGOTIATE 0x12 /* get features supported by PF */
+
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
@@ -96,6 +105,14 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+/* features negotiated between PF/VF */
+#define IXGBEVF_PF_SUP_IPSEC BIT(0)
+#define IXGBEVF_PF_SUP_ESX_MBX BIT(1)
+
+#define IXGBE_SUPPORTED_FEATURES IXGBEVF_PF_SUP_IPSEC
+
+struct ixgbe_hw;
+
int ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
int ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
int ixgbe_check_for_msg(struct ixgbe_hw *, u16);
@@ -105,6 +122,18 @@ int ixgbe_check_for_rst(struct ixgbe_hw *, u16);
void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
#endif /* CONFIG_PCI_IOV */
+struct ixgbe_mbx_operations {
+ int (*init_params)(struct ixgbe_hw *hw);
+ int (*read)(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 vf_number);
+ int (*write)(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 vf_number);
+ int (*read_posted)(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id);
+ int (*write_posted)(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id);
+ int (*check_for_msg)(struct ixgbe_hw *hw, u16 vf_number);
+ int (*check_for_ack)(struct ixgbe_hw *hw, u16 vf_number);
+ int (*check_for_rst)(struct ixgbe_hw *hw, u16 vf_number);
+};
+
extern const struct ixgbe_mbx_operations mbx_ops_generic;
#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 07eaa3c3f4d3..2449e4cf2679 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -167,7 +167,7 @@ int ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 val, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
- int max_retry = 1;
+ int max_retry = 3;
int retry = 0;
u8 reg_high;
u8 csum;
@@ -1117,7 +1117,7 @@ int ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
MDIO_MMD_AN, &autoneg_reg);
- if (hw->mac.type == ixgbe_mac_X550) {
+ if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_e610) {
/* Set or unset auto-negotiation 5G advertisement */
autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) &&
@@ -1233,6 +1233,7 @@ static int ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
switch (hw->mac.type) {
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
break;
@@ -1322,7 +1323,7 @@ int ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
* @hw: pointer to hardware structure
*
* Restart autonegotiation and PHY and waits for completion.
- * This function always returns success, this is nessary since
+ * This function always returns success, this is necessary since
* it is called via a function pointer that could call other
* functions that could return an error.
**/
@@ -2284,7 +2285,7 @@ static int ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
- u32 max_retry = 1;
+ u32 max_retry = 3;
u32 retry = 0;
int status;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 14aa2ca51f70..81179c60af4e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -40,7 +40,7 @@
#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
-#define IXGBE_SFF_BASEBX10_CAPABLE 0x64
+#define IXGBE_SFF_BASEBX10_CAPABLE 0x40
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 9339edbd9082..6885d2343c48 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -140,6 +140,7 @@
* proper mult and shift to convert the cycles into nanoseconds of time.
*/
#define IXGBE_X550_BASE_PERIOD 0xC80000000ULL
+#define IXGBE_E610_BASE_PERIOD 0x333333333ULL
#define INCVALUE_MASK 0x7FFFFFFF
#define ISGN 0x80000000
@@ -326,7 +327,7 @@ static void ixgbe_ptp_setup_sdp_X550(struct ixgbe_adapter *adapter)
* result of SYSTIME is 32bits of "billions of cycles" and 32 bits of
* "cycles", rather than seconds and nanoseconds.
*/
-static u64 ixgbe_ptp_read_X550(const struct cyclecounter *cc)
+static u64 ixgbe_ptp_read_X550(struct cyclecounter *cc)
{
struct ixgbe_adapter *adapter =
container_of(cc, struct ixgbe_adapter, hw_cc);
@@ -363,7 +364,7 @@ static u64 ixgbe_ptp_read_X550(const struct cyclecounter *cc)
* cyclecounter structure used to construct a ns counter from the
* arbitrary fixed point registers
*/
-static u64 ixgbe_ptp_read_82599(const struct cyclecounter *cc)
+static u64 ixgbe_ptp_read_82599(struct cyclecounter *cc)
{
struct ixgbe_adapter *adapter =
container_of(cc, struct ixgbe_adapter, hw_cc);
@@ -415,6 +416,7 @@ static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/* Upper 32 bits represent billions of cycles, lower 32 bits
* represent cycles. However, we use timespec64_to_ns for the
* correct math even though the units haven't been corrected
@@ -492,11 +494,13 @@ static int ixgbe_ptp_adjfine_X550(struct ptp_clock_info *ptp, long scaled_ppm)
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
struct ixgbe_hw *hw = &adapter->hw;
+ u64 rate, base;
bool neg_adj;
- u64 rate;
u32 inca;
- neg_adj = diff_by_scaled_ppm(IXGBE_X550_BASE_PERIOD, scaled_ppm, &rate);
+ base = hw->mac.type == ixgbe_mac_e610 ? IXGBE_E610_BASE_PERIOD :
+ IXGBE_X550_BASE_PERIOD;
+ neg_adj = diff_by_scaled_ppm(base, scaled_ppm, &rate);
/* warn if rate is too large */
if (rate >= INCVALUE_MASK)
@@ -559,6 +563,7 @@ static int ixgbe_ptp_gettimex(struct ptp_clock_info *ptp,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/* Upper 32 bits represent billions of cycles, lower 32 bits
* represent cycles. However, we use timespec64_to_ns for the
* correct math even though the units haven't been corrected
@@ -636,7 +641,7 @@ static int ixgbe_ptp_feature_enable(struct ptp_clock_info *ptp,
* disabled
*/
if (rq->type != PTP_CLK_REQ_PPS || !adapter->ptp_setup_sdp)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (on)
adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED;
@@ -931,20 +936,22 @@ void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *q_vector,
}
/**
- * ixgbe_ptp_get_ts_config - get current hardware timestamping configuration
- * @adapter: pointer to adapter structure
- * @ifr: ioctl data
+ * ixgbe_ptp_hwtstamp_get - get current hardware timestamping configuration
+ * @netdev: pointer to net device structure
+ * @config: timestamping configuration structure
*
* This function returns the current timestamping settings. Rather than
* attempt to deconstruct registers to fill in the values, simply keep a copy
* of the old settings around, and return a copy when requested.
*/
-int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
+int ixgbe_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config *config = &adapter->tstamp_config;
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+
+ *config = adapter->tstamp_config;
- return copy_to_user(ifr->ifr_data, config,
- sizeof(*config)) ? -EFAULT : 0;
+ return 0;
}
/**
@@ -973,7 +980,7 @@ int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
* mode, if required to support the specifically requested mode.
*/
static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
@@ -1067,6 +1074,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/* enable timestamping all packets only if at least some
* packets were requested. Otherwise, play nice and disable
* timestamping
@@ -1123,31 +1131,29 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
}
/**
- * ixgbe_ptp_set_ts_config - user entry point for timestamp mode
- * @adapter: pointer to adapter struct
- * @ifr: ioctl data
+ * ixgbe_ptp_hwtstamp_set - user entry point for timestamp mode
+ * @netdev: pointer to net device structure
+ * @config: timestamping configuration structure
+ * @extack: netlink extended ack structure for error reporting
*
* Set hardware to requested mode. If unsupported, return an error with no
* changes. Otherwise, store the mode for future reference.
*/
-int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
+int ixgbe_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int err;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = ixgbe_ptp_set_timestamp_mode(adapter, &config);
+ err = ixgbe_ptp_set_timestamp_mode(adapter, config);
if (err)
return err;
/* save these settings for future reference */
- memcpy(&adapter->tstamp_config, &config,
- sizeof(adapter->tstamp_config));
+ adapter->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
static void ixgbe_ptp_link_speed_adjust(struct ixgbe_adapter *adapter,
@@ -1233,6 +1239,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
fallthrough;
case ixgbe_mac_x550em_a:
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
cc.read = ixgbe_ptp_read_X550;
break;
case ixgbe_mac_X540:
@@ -1280,6 +1287,7 @@ static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
/* Reset SYSTIME registers to 0 */
@@ -1407,6 +1415,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 30000000;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index e71715f5da22..ee133d6749b3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -18,6 +18,7 @@
#include "ixgbe.h"
#include "ixgbe_type.h"
+#include "ixgbe_mbx.h"
#include "ixgbe_sriov.h"
#ifdef CONFIG_PCI_IOV
@@ -206,6 +207,7 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs)
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
{
unsigned int num_vfs = adapter->num_vfs, vf;
+ struct ixgbe_hw *hw = &adapter->hw;
unsigned long flags;
int rss;
@@ -236,6 +238,9 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return 0;
+ if (hw->mac.ops.disable_mdd)
+ hw->mac.ops.disable_mdd(hw);
+
#ifdef CONFIG_PCI_IOV
/*
* If our VFs are assigned we cannot shut down SR-IOV
@@ -505,6 +510,8 @@ static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
/* Version 1.1 supports jumbo frames on VFs if PF has
* jumbo frames enabled which means legacy VFs are
* disabled
@@ -701,7 +708,7 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
u32 reg_val;
u32 queue;
- /* remove VLAN filters beloning to this VF */
+ /* remove VLAN filters belonging to this VF */
ixgbe_clear_vf_vlans(adapter, vf);
/* add back PF assigned VLAN or VLAN 0 */
@@ -1041,13 +1048,15 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
adapter->vfinfo[vf].vf_api = api;
return 0;
default:
break;
}
- e_info(drv, "VF %d requested invalid api version %u\n", vf, api);
+ e_dbg(drv, "VF %d requested unsupported api version %u\n", vf, api);
return -1;
}
@@ -1067,6 +1076,8 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
break;
default:
return -1;
@@ -1107,6 +1118,8 @@ static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
/* verify the PF is supporting the correct API */
switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_17:
+ case ixgbe_mbox_api_16:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12:
@@ -1140,6 +1153,8 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter,
/* verify the PF is supporting the correct API */
switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_17:
+ case ixgbe_mbox_api_16:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12:
@@ -1169,6 +1184,8 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
fallthrough;
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
break;
default:
return -EOPNOTSUPP;
@@ -1239,6 +1256,8 @@ static int ixgbe_get_vf_link_state(struct ixgbe_adapter *adapter,
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
break;
default:
return -EOPNOTSUPP;
@@ -1249,6 +1268,65 @@ static int ixgbe_get_vf_link_state(struct ixgbe_adapter *adapter,
return 0;
}
+/**
+ * ixgbe_send_vf_link_status - send link status data to VF
+ * @adapter: pointer to adapter struct
+ * @msgbuf: pointer to message buffers
+ * @vf: VF identifier
+ *
+ * Reply for IXGBE_VF_GET_PF_LINK_STATE mbox command sending link status data.
+ *
+ * Return: 0 on success or -EOPNOTSUPP when operation is not supported.
+ */
+static int ixgbe_send_vf_link_status(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
+ if (hw->mac.type != ixgbe_mac_e610)
+ return -EOPNOTSUPP;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ /* Simply provide stored values as watchdog & link status events take
+ * care of its freshness.
+ */
+ msgbuf[1] = adapter->link_speed;
+ msgbuf[2] = adapter->link_up;
+
+ return 0;
+}
+
+/**
+ * ixgbe_negotiate_vf_features - negotiate supported features with VF driver
+ * @adapter: pointer to adapter struct
+ * @msgbuf: pointer to message buffers
+ * @vf: VF identifier
+ *
+ * Return: 0 on success or -EOPNOTSUPP when operation is not supported.
+ */
+static int ixgbe_negotiate_vf_features(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ u32 features = msgbuf[1];
+
+ switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_17:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ features &= IXGBE_SUPPORTED_FEATURES;
+ msgbuf[1] = features;
+
+ return 0;
+}
+
static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
{
u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
@@ -1323,6 +1401,12 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
case IXGBE_VF_IPSEC_DEL:
retval = ixgbe_ipsec_vf_del_sa(adapter, msgbuf, vf);
break;
+ case IXGBE_VF_GET_PF_LINK_STATE:
+ retval = ixgbe_send_vf_link_status(adapter, msgbuf, vf);
+ break;
+ case IXGBE_VF_FEATURES_NEGOTIATE:
+ retval = ixgbe_negotiate_vf_features(adapter, msgbuf, vf);
+ break;
default:
e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
retval = -EIO;
@@ -1352,12 +1436,59 @@ static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
ixgbe_write_mbx(hw, &msg, 1, vf);
}
+/**
+ * ixgbe_check_mdd_event - check for MDD event on all VFs
+ * @adapter: pointer to ixgbe adapter
+ *
+ * Return: true if there is a VF on which MDD event occurred, false otherwise.
+ */
+bool ixgbe_check_mdd_event(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ DECLARE_BITMAP(vf_bitmap, 64);
+ bool ret = false;
+ int i;
+
+ if (!hw->mac.ops.handle_mdd)
+ return false;
+
+ /* Did we have a malicious event */
+ bitmap_zero(vf_bitmap, 64);
+ hw->mac.ops.handle_mdd(hw, vf_bitmap);
+
+ /* Log any blocked queues and release lock */
+ for_each_set_bit(i, vf_bitmap, 64) {
+ dev_warn(&adapter->pdev->dev,
+ "Malicious event on VF %d tx:%x rx:%x\n", i,
+ IXGBE_READ_REG(hw, IXGBE_LVMMC_TX),
+ IXGBE_READ_REG(hw, IXGBE_LVMMC_RX));
+
+ if (hw->mac.ops.restore_mdd_vf) {
+ u32 ping;
+
+ hw->mac.ops.restore_mdd_vf(hw, i);
+
+ /* get the VF to rebuild its queues */
+ adapter->vfinfo[i].clear_to_send = 0;
+ ping = IXGBE_PF_CONTROL_MSG |
+ IXGBE_VT_MSGTYPE_CTS;
+ ixgbe_write_mbx(hw, &ping, 1, i);
+ }
+
+ ret = true;
+ }
+
+ return ret;
+}
+
void ixgbe_msg_task(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
unsigned long flags;
u32 vf;
+ ixgbe_check_mdd_event(adapter);
+
spin_lock_irqsave(&adapter->vfs_lock, flags);
for (vf = 0; vf < adapter->num_vfs; vf++) {
/* process any reset requests */
@@ -1417,7 +1548,7 @@ void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter)
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int retval;
if (vf >= adapter->num_vfs)
@@ -1525,7 +1656,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
u8 qos, __be16 vlan_proto)
{
int err = 0;
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
return -EINVAL;
@@ -1643,7 +1774,7 @@ void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
int max_tx_rate)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int link_speed;
/* verify VF is active */
@@ -1678,7 +1809,7 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (vf >= adapter->num_vfs)
@@ -1756,7 +1887,7 @@ void ixgbe_set_vf_link_state(struct ixgbe_adapter *adapter, int vf, int state)
**/
int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int ret = 0;
if (vf < 0 || vf >= adapter->num_vfs) {
@@ -1793,7 +1924,7 @@ int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state)
int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
bool setting)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
/* This operation is currently supported only for 82599 and x540
* devices.
@@ -1812,7 +1943,7 @@ int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (vf >= adapter->num_vfs)
return -EINVAL;
@@ -1835,7 +1966,7 @@ int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (vf >= adapter->num_vfs)
return -EINVAL;
ivi->vf = vf;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 0690ecb8dfa3..bc4cab976bf9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -15,6 +15,7 @@
#ifdef CONFIG_PCI_IOV
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
#endif
+bool ixgbe_check_mdd_event(struct ixgbe_adapter *adapter);
void ixgbe_msg_task(struct ixgbe_adapter *adapter);
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 346e3d9114a8..b1bfeb21537a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef _IXGBE_TYPE_H_
#define _IXGBE_TYPE_H_
@@ -7,6 +7,8 @@
#include <linux/types.h>
#include <linux/mdio.h>
#include <linux/netdevice.h>
+#include <linux/net/intel/libie/fwlog.h>
+#include "ixgbe_type_e610.h"
/* Device IDs */
#define IXGBE_DEV_ID_82598 0x10B6
@@ -71,12 +73,19 @@
#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4
#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5
+#define IXGBE_DEV_ID_E610_BACKPLANE 0x57AE
+#define IXGBE_DEV_ID_E610_SFP 0x57AF
+#define IXGBE_DEV_ID_E610_10G_T 0x57B0
+#define IXGBE_DEV_ID_E610_2_5G_T 0x57B1
+#define IXGBE_DEV_ID_E610_SGMII 0x57B2
+
/* VF Device IDs */
#define IXGBE_DEV_ID_82599_VF 0x10ED
#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5
+#define IXGBE_DEV_ID_E610_VF 0x57AD
#define IXGBE_CAT(r, m) IXGBE_##r##_##m
@@ -230,7 +239,7 @@ struct ixgbe_thermal_sensor_data {
#define NVM_VER_INVALID 0xFFFF
#define NVM_ETK_VALID 0x8000
#define NVM_INVALID_PTR 0xFFFF
-#define NVM_VER_SIZE 32 /* version sting size */
+#define NVM_VER_SIZE 32 /* version string size */
struct ixgbe_nvm_version {
u32 etk_id;
@@ -394,6 +403,8 @@ struct ixgbe_nvm_version {
#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4))
#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4))
#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4))
+#define IXGBE_LVMMC_RX 0x2FA8
+#define IXGBE_LVMMC_TX 0x8108
#define IXGBE_WQBR_RX(_i) (0x2FB0 + ((_i) * 4)) /* 4 total */
#define IXGBE_WQBR_TX(_i) (0x8130 + ((_i) * 4)) /* 4 total */
#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/
@@ -1034,6 +1045,7 @@ struct ixgbe_nvm_version {
#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001
#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
+#define IXGBE_GCR_EXT_VT_MODE_MASK 0x00000003
#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
IXGBE_GCR_EXT_VT_MODE_64)
@@ -1600,7 +1612,7 @@ enum {
#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */
#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */
-#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */
+#define IXGBE_EICR_FW_EVENT 0x00200000 /* Async FW event */
#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */
#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */
@@ -1636,6 +1648,7 @@ enum {
#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EICS_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EICS_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw)
@@ -1654,6 +1667,7 @@ enum {
#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMS_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermel Sensor Event */
#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
@@ -1673,6 +1687,7 @@ enum {
#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMC_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EIMC_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw)
@@ -2010,7 +2025,7 @@ enum {
/* EEPROM Addressing bits based on type (0-small, 1-large) */
#define IXGBE_EEC_ADDR_SIZE 0x00000400
#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */
-#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */
+#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD allows 14 bits for addr. */
#define IXGBE_EEC_SIZE_SHIFT 11
#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6
@@ -2068,6 +2083,7 @@ enum {
#define IXGBE_SAN_MAC_ADDR_PTR 0x28
#define IXGBE_DEVICE_CAPS 0x2C
#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
+#define IXGBE_PCIE_MSIX_E610_CAPS 0xB2
#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
#define IXGBE_MAX_MSIX_VECTORS_82599 0x40
#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
@@ -2168,6 +2184,7 @@ enum {
#define IXGBE_PCI_DEVICE_STATUS 0xAA
#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
#define IXGBE_PCI_LINK_STATUS 0xB2
+#define IXGBE_PCI_LINK_STATUS_E610 0x82
#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
#define IXGBE_PCI_LINK_WIDTH 0x3F0
#define IXGBE_PCI_LINK_WIDTH_1 0x10
@@ -2288,6 +2305,7 @@ enum {
#define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00
#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
+#define IXGBE_FCTRL_TPE 0x00000080 /* Tag Promiscuous Ena*/
#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */
#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */
@@ -2351,6 +2369,7 @@ enum {
/* Multiple Transmit Queue Command Register */
#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */
#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */
+#define IXGBE_MTQC_NUM_TC_OR_Q 0xC /* Number of TCs or TxQs per pool */
#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */
#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */
#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */
@@ -2731,6 +2750,28 @@ enum ixgbe_fdir_pballoc_type {
#define FW_PHY_INFO_ID_HI_MASK 0xFFFF0000u
#define FW_PHY_INFO_ID_LO_MASK 0x0000FFFFu
+/* There are only 3 options for VFs creation on this device:
+ * 16 VFs pool with 8 queues each
+ * 32 VFs pool with 4 queues each
+ * 64 VFs pool with 2 queues each
+ *
+ * That means reading some VF registers that map VF to queue depending on
+ * chosen option. Define values that help dealing with each scenario.
+ */
+/* Number of queues based on VFs pool */
+#define IXGBE_16VFS_QUEUES 8
+#define IXGBE_32VFS_QUEUES 4
+#define IXGBE_64VFS_QUEUES 2
+/* Mask for getting queues bits based on VFs pool */
+#define IXGBE_16VFS_BITMASK GENMASK(IXGBE_16VFS_QUEUES - 1, 0)
+#define IXGBE_32VFS_BITMASK GENMASK(IXGBE_32VFS_QUEUES - 1, 0)
+#define IXGBE_64VFS_BITMASK GENMASK(IXGBE_64VFS_QUEUES - 1, 0)
+/* Convert queue index to register number.
+ * We have 4 registers with 32 queues in each.
+ */
+#define IXGBE_QUEUES_PER_REG 32
+#define IXGBE_QUEUES_REG_AMOUNT 4
+
/* Host Interface Command Structures */
struct ixgbe_hic_hdr {
u8 cmd;
@@ -2896,6 +2937,13 @@ struct ixgbe_adv_tx_context_desc {
__le32 mss_l4len_idx;
};
+enum {
+ IXGBE_VLAN_MACIP_LENS_REG = 0,
+ IXGBE_FCEOF_SAIDX_REG = 1,
+ IXGBE_TYPE_TUCMD_MLHL = 2,
+ IXGBE_MSS_L4LEN_IDX = 3,
+};
+
/* Adv Transmit Descriptor Config Masks */
#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */
#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */
@@ -2903,7 +2951,7 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */
#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */
#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
-#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */
+#define IXGBE_ADVTXD_DTYP_CTXT 0x2 /* Advanced Context Desc */
#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
@@ -2952,6 +3000,8 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_FCOEF_EOF_MASK (3u << 10) /* FC EOF index */
#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+#define IXGBE_ADVTXD_MSS_MASK GENMASK(31, IXGBE_ADVTXD_MSS_SHIFT)
+#define IXGBE_ADVTXD_HEADER_LEN_MASK GENMASK(8, 0)
/* Autonegotiation advertised speeds */
typedef u32 ixgbe_autoneg_advertised;
@@ -2970,6 +3020,29 @@ typedef u32 ixgbe_link_speed;
IXGBE_LINK_SPEED_1GB_FULL | \
IXGBE_LINK_SPEED_10GB_FULL)
+/* Physical layer type */
+typedef u64 ixgbe_physical_layer;
+#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0
+#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x00001
+#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x00002
+#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x00004
+#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x00008
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x00010
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x00020
+#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x00040
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x00080
+#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x00100
+#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x00200
+#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x00400
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x00800
+#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x01000
+#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x02000
+#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x04000
+#define IXGBE_PHYSICAL_LAYER_10BASE_T 0x08000
+#define IXGBE_PHYSICAL_LAYER_2500BASE_KX 0x10000
+#define IXGBE_PHYSICAL_LAYER_2500BASE_T 0x20000
+#define IXGBE_PHYSICAL_LAYER_5000BASE_T 0x40000
+
/* Flow Control Data Sheet defined values
* Calculation and defines taken from 802.1bb Annex O
*/
@@ -3145,6 +3218,8 @@ enum ixgbe_mac_type {
ixgbe_mac_X550,
ixgbe_mac_X550EM_x,
ixgbe_mac_x550em_a,
+ ixgbe_mac_e610,
+ ixgbe_mac_e610_vf,
ixgbe_num_macs
};
@@ -3224,7 +3299,9 @@ enum ixgbe_media_type {
ixgbe_media_type_copper,
ixgbe_media_type_backplane,
ixgbe_media_type_cx4,
- ixgbe_media_type_virtual
+ ixgbe_media_type_virtual,
+ ixgbe_media_type_da,
+ ixgbe_media_type_aui,
};
/* Flow Control Settings */
@@ -3233,7 +3310,8 @@ enum ixgbe_fc_mode {
ixgbe_fc_rx_pause,
ixgbe_fc_tx_pause,
ixgbe_fc_full,
- ixgbe_fc_default
+ ixgbe_fc_default,
+ ixgbe_fc_pfc,
};
/* Smart Speed Settings */
@@ -3403,6 +3481,8 @@ struct ixgbe_eeprom_operations {
int (*validate_checksum)(struct ixgbe_hw *, u16 *);
int (*update_checksum)(struct ixgbe_hw *);
int (*calc_checksum)(struct ixgbe_hw *);
+ int (*read_pba_string)(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
};
struct ixgbe_mac_operations {
@@ -3411,6 +3491,7 @@ struct ixgbe_mac_operations {
int (*start_hw)(struct ixgbe_hw *);
int (*clear_hw_cntrs)(struct ixgbe_hw *);
enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
+ int (*get_fw_ver)(struct ixgbe_hw *hw);
int (*get_mac_addr)(struct ixgbe_hw *, u8 *);
int (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
int (*get_device_caps)(struct ixgbe_hw *, u16 *);
@@ -3479,6 +3560,8 @@ struct ixgbe_mac_operations {
int (*get_thermal_sensor_data)(struct ixgbe_hw *);
int (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
bool (*fw_recovery_mode)(struct ixgbe_hw *hw);
+ bool (*fw_rollback_mode)(struct ixgbe_hw *hw);
+ int (*get_nvm_ver)(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
void (*disable_rx)(struct ixgbe_hw *hw);
void (*enable_rx)(struct ixgbe_hw *hw);
void (*set_source_address_pruning)(struct ixgbe_hw *, bool,
@@ -3491,6 +3574,12 @@ struct ixgbe_mac_operations {
int (*dmac_config_tcs)(struct ixgbe_hw *hw);
int (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *);
int (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32);
+
+ /* MDD events */
+ void (*enable_mdd)(struct ixgbe_hw *hw);
+ void (*disable_mdd)(struct ixgbe_hw *hw);
+ void (*restore_mdd_vf)(struct ixgbe_hw *hw, u32 vf);
+ void (*handle_mdd)(struct ixgbe_hw *hw, unsigned long *vf_bitmap);
};
struct ixgbe_phy_operations {
@@ -3533,6 +3622,9 @@ struct ixgbe_link_operations {
struct ixgbe_link_info {
struct ixgbe_link_operations ops;
u8 addr;
+ struct ixgbe_link_status link_info;
+ struct ixgbe_link_status link_info_old;
+ u8 get_link_info;
};
struct ixgbe_eeprom_info {
@@ -3575,6 +3667,7 @@ struct ixgbe_mac_info {
u8 san_mac_rar_index;
struct ixgbe_thermal_sensor_data thermal_sensor_data;
bool set_lben;
+ u32 max_link_up_time;
u8 led_link_act;
};
@@ -3599,19 +3692,10 @@ struct ixgbe_phy_info {
bool reset_if_overtemp;
bool qsfp_shared_i2c_bus;
u32 nw_mng_if_sel;
-};
-
-#include "ixgbe_mbx.h"
-
-struct ixgbe_mbx_operations {
- int (*init_params)(struct ixgbe_hw *hw);
- int (*read)(struct ixgbe_hw *, u32 *, u16, u16);
- int (*write)(struct ixgbe_hw *, u32 *, u16, u16);
- int (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
- int (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
- int (*check_for_msg)(struct ixgbe_hw *, u16);
- int (*check_for_ack)(struct ixgbe_hw *, u16);
- int (*check_for_rst)(struct ixgbe_hw *, u16);
+ u64 phy_type_low;
+ u64 phy_type_high;
+ u16 curr_user_speed_req;
+ struct ixgbe_aci_cmd_set_phy_cfg_data curr_user_phy_cfg;
};
struct ixgbe_mbx_stats {
@@ -3623,6 +3707,8 @@ struct ixgbe_mbx_stats {
u32 rsts;
};
+struct ixgbe_mbx_operations;
+
struct ixgbe_mbx_info {
const struct ixgbe_mbx_operations *ops;
struct ixgbe_mbx_stats stats;
@@ -3654,6 +3740,20 @@ struct ixgbe_hw {
bool allow_unsupported_sfp;
bool wol_enabled;
bool need_crosstalk_fix;
+ u8 api_branch;
+ u8 api_maj_ver;
+ u8 api_min_ver;
+ u8 api_patch;
+ u8 fw_branch;
+ u8 fw_maj_ver;
+ u8 fw_min_ver;
+ u8 fw_patch;
+ u32 fw_build;
+ struct ixgbe_aci_info aci;
+ struct ixgbe_flash_info flash;
+ struct ixgbe_hw_dev_caps dev_caps;
+ struct ixgbe_hw_func_caps func_caps;
+ struct libie_fwlog fwlog;
};
struct ixgbe_info {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
new file mode 100644
index 000000000000..ff8d640a50b1
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
@@ -0,0 +1,1032 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IXGBE_TYPE_E610_H_
+#define _IXGBE_TYPE_E610_H_
+
+#include <linux/net/intel/libie/adminq.h>
+
+#define BYTES_PER_DWORD 4
+
+/* General E610 defines */
+#define IXGBE_MAX_VSI 768
+
+/* Checksum and Shadow RAM pointers */
+#define IXGBE_E610_SR_NVM_CTRL_WORD 0x00
+#define IXGBE_E610_SR_PBA_BLOCK_PTR 0x16
+#define IXGBE_E610_SR_PBA_BLOCK_MASK GENMASK(15, 8)
+#define IXGBE_E610_SR_NVM_DEV_STARTER_VER 0x18
+#define IXGBE_E610_SR_NVM_EETRACK_LO 0x2D
+#define IXGBE_E610_SR_NVM_EETRACK_HI 0x2E
+#define IXGBE_E610_NVM_VER_LO_MASK GENMASK(7, 0)
+#define IXGBE_E610_NVM_VER_HI_MASK GENMASK(15, 12)
+#define IXGBE_E610_SR_SW_CHECKSUM_WORD 0x3F
+#define IXGBE_E610_SR_PFA_PTR 0x40
+#define IXGBE_E610_SR_1ST_NVM_BANK_PTR 0x42
+#define IXGBE_E610_SR_NVM_BANK_SIZE 0x43
+#define IXGBE_E610_SR_1ST_OROM_BANK_PTR 0x44
+#define IXGBE_E610_SR_OROM_BANK_SIZE 0x45
+#define IXGBE_E610_SR_NETLIST_BANK_PTR 0x46
+#define IXGBE_E610_SR_NETLIST_BANK_SIZE 0x47
+
+/* The OROM version topology */
+#define IXGBE_OROM_VER_PATCH_MASK GENMASK_ULL(7, 0)
+#define IXGBE_OROM_VER_BUILD_MASK GENMASK_ULL(23, 8)
+#define IXGBE_OROM_VER_MASK GENMASK_ULL(31, 24)
+
+/* CSS Header words */
+#define IXGBE_NVM_CSS_HDR_LEN_L 0x02
+#define IXGBE_NVM_CSS_HDR_LEN_H 0x03
+#define IXGBE_NVM_CSS_SREV_L 0x14
+#define IXGBE_NVM_CSS_SREV_H 0x15
+
+#define IXGBE_HDR_LEN_ROUNDUP 32
+
+/* Length of Authentication header section in words */
+#define IXGBE_NVM_AUTH_HEADER_LEN 0x08
+
+/* Shadow RAM related */
+#define IXGBE_SR_WORDS_IN_1KB 512
+
+/* The Netlist ID Block is located after all of the Link Topology nodes. */
+#define IXGBE_NETLIST_ID_BLK_SIZE 0x30
+#define IXGBE_NETLIST_ID_BLK_OFFSET(n) IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0004 + 2 * (n))
+
+/* netlist ID block field offsets (word offsets) */
+#define IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW 0x02
+#define IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH 0x03
+#define IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW 0x04
+#define IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH 0x05
+#define IXGBE_NETLIST_ID_BLK_TYPE_LOW 0x06
+#define IXGBE_NETLIST_ID_BLK_TYPE_HIGH 0x07
+#define IXGBE_NETLIST_ID_BLK_REV_LOW 0x08
+#define IXGBE_NETLIST_ID_BLK_REV_HIGH 0x09
+#define IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(n) (0x0A + (n))
+#define IXGBE_NETLIST_ID_BLK_CUST_VER 0x2F
+
+/* The Link Topology Netlist section is stored as a series of words. It is
+ * stored in the NVM as a TLV, with the first two words containing the type
+ * and length.
+ */
+#define IXGBE_NETLIST_LINK_TOPO_MOD_ID 0x011B
+#define IXGBE_NETLIST_TYPE_OFFSET 0x0000
+#define IXGBE_NETLIST_LEN_OFFSET 0x0001
+
+/* The Link Topology section follows the TLV header. When reading the netlist
+ * using ixgbe_read_netlist_module, we need to account for the 2-word TLV
+ * header.
+ */
+#define IXGBE_NETLIST_LINK_TOPO_OFFSET(n) ((n) + 2)
+#define IXGBE_LINK_TOPO_MODULE_LEN IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0000)
+#define IXGBE_LINK_TOPO_NODE_COUNT IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0001)
+#define IXGBE_LINK_TOPO_NODE_COUNT_M GENMASK_ULL(9, 0)
+
+/* Firmware Status Register (GL_FWSTS) */
+#define GL_FWSTS 0x00083048 /* Reset Source: POR */
+#define GL_FWSTS_EP_PF0 BIT(24)
+#define GL_FWSTS_EP_PF1 BIT(25)
+
+/* Global NVM General Status Register */
+#define GLNVM_GENS 0x000B6100 /* Reset Source: POR */
+#define GLNVM_GENS_SR_SIZE_M GENMASK(7, 5)
+
+#define IXGBE_GL_MNG_FWSM 0x000B6134 /* Reset Source: POR */
+#define IXGBE_GL_MNG_FWSM_RECOVERY_M BIT(1)
+#define IXGBE_GL_MNG_FWSM_ROLLBACK_M BIT(2)
+
+/* Flash Access Register */
+#define IXGBE_GLNVM_FLA 0x000B6108 /* Reset Source: POR */
+#define IXGBE_GLNVM_FLA_LOCKED_S 6
+#define IXGBE_GLNVM_FLA_LOCKED_M BIT(6)
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define IXGBE_SR_CTRL_WORD_1_M GENMASK(7, 6)
+#define IXGBE_SR_CTRL_WORD_VALID BIT(0)
+#define IXGBE_SR_CTRL_WORD_OROM_BANK BIT(3)
+#define IXGBE_SR_CTRL_WORD_NETLIST_BANK BIT(4)
+#define IXGBE_SR_CTRL_WORD_NVM_BANK BIT(5)
+#define IXGBE_SR_NVM_PTR_4KB_UNITS BIT(15)
+
+/* Admin Command Interface (ACI) registers */
+#define IXGBE_PF_HIDA(_i) (0x00085000 + ((_i) * 4))
+#define IXGBE_PF_HIDA_2(_i) (0x00085020 + ((_i) * 4))
+#define IXGBE_PF_HIBA(_i) (0x00084000 + ((_i) * 4))
+#define IXGBE_PF_HICR 0x00082048
+
+#define IXGBE_PF_HICR_EN BIT(0)
+#define IXGBE_PF_HICR_C BIT(1)
+#define IXGBE_PF_HICR_SV BIT(2)
+#define IXGBE_PF_HICR_EV BIT(3)
+
+#define IXGBE_FW_API_VER_MAJOR 0x01
+#define IXGBE_FW_API_VER_MINOR 0x07
+#define IXGBE_FW_API_VER_DIFF_ALLOWED 0x02
+
+#define IXGBE_ACI_DESC_SIZE 32
+#define IXGBE_ACI_DESC_SIZE_IN_DWORDS (IXGBE_ACI_DESC_SIZE / BYTES_PER_DWORD)
+
+#define IXGBE_ACI_MAX_BUFFER_SIZE 4096 /* Size in bytes */
+#define IXGBE_ACI_SEND_DELAY_TIME_MS 10
+#define IXGBE_ACI_SEND_MAX_EXECUTE 3
+#define IXGBE_ACI_SEND_TIMEOUT_MS \
+ (IXGBE_ACI_SEND_MAX_EXECUTE * IXGBE_ACI_SEND_DELAY_TIME_MS)
+/* [ms] timeout of waiting for sync response */
+#define IXGBE_ACI_SYNC_RESPONSE_TIMEOUT 100000
+/* [ms] timeout of waiting for async response */
+#define IXGBE_ACI_ASYNC_RESPONSE_TIMEOUT 150000
+/* [ms] timeout of waiting for resource release */
+#define IXGBE_ACI_RELEASE_RES_TIMEOUT 10000
+
+/* Admin Command Interface (ACI) opcodes */
+enum ixgbe_aci_opc {
+ ixgbe_aci_opc_get_ver = 0x0001,
+ ixgbe_aci_opc_driver_ver = 0x0002,
+ ixgbe_aci_opc_get_exp_err = 0x0005,
+
+ /* resource ownership */
+ ixgbe_aci_opc_req_res = 0x0008,
+ ixgbe_aci_opc_release_res = 0x0009,
+
+ /* device/function capabilities */
+ ixgbe_aci_opc_list_func_caps = 0x000A,
+ ixgbe_aci_opc_list_dev_caps = 0x000B,
+
+ /* safe disable of RXEN */
+ ixgbe_aci_opc_disable_rxen = 0x000C,
+
+ /* FW events */
+ ixgbe_aci_opc_get_fw_event = 0x0014,
+
+ /* PHY commands */
+ ixgbe_aci_opc_get_phy_caps = 0x0600,
+ ixgbe_aci_opc_set_phy_cfg = 0x0601,
+ ixgbe_aci_opc_restart_an = 0x0605,
+ ixgbe_aci_opc_get_link_status = 0x0607,
+ ixgbe_aci_opc_set_event_mask = 0x0613,
+ ixgbe_aci_opc_get_link_topo = 0x06E0,
+ ixgbe_aci_opc_get_link_topo_pin = 0x06E1,
+ ixgbe_aci_opc_read_i2c = 0x06E2,
+ ixgbe_aci_opc_write_i2c = 0x06E3,
+ ixgbe_aci_opc_read_mdio = 0x06E4,
+ ixgbe_aci_opc_write_mdio = 0x06E5,
+ ixgbe_aci_opc_set_gpio_by_func = 0x06E6,
+ ixgbe_aci_opc_get_gpio_by_func = 0x06E7,
+ ixgbe_aci_opc_set_port_id_led = 0x06E9,
+ ixgbe_aci_opc_set_gpio = 0x06EC,
+ ixgbe_aci_opc_get_gpio = 0x06ED,
+ ixgbe_aci_opc_sff_eeprom = 0x06EE,
+ ixgbe_aci_opc_prog_topo_dev_nvm = 0x06F2,
+ ixgbe_aci_opc_read_topo_dev_nvm = 0x06F3,
+
+ /* NVM commands */
+ ixgbe_aci_opc_nvm_read = 0x0701,
+ ixgbe_aci_opc_nvm_erase = 0x0702,
+ ixgbe_aci_opc_nvm_write = 0x0703,
+ ixgbe_aci_opc_nvm_cfg_read = 0x0704,
+ ixgbe_aci_opc_nvm_cfg_write = 0x0705,
+ ixgbe_aci_opc_nvm_checksum = 0x0706,
+ ixgbe_aci_opc_nvm_write_activate = 0x0707,
+ ixgbe_aci_opc_nvm_sr_dump = 0x0707,
+ ixgbe_aci_opc_nvm_save_factory_settings = 0x0708,
+ ixgbe_aci_opc_nvm_update_empr = 0x0709,
+ ixgbe_aci_opc_nvm_pkg_data = 0x070A,
+ ixgbe_aci_opc_nvm_pass_component_tbl = 0x070B,
+
+ /* Alternate Structure Commands */
+ ixgbe_aci_opc_write_alt_direct = 0x0900,
+ ixgbe_aci_opc_write_alt_indirect = 0x0901,
+ ixgbe_aci_opc_read_alt_direct = 0x0902,
+ ixgbe_aci_opc_read_alt_indirect = 0x0903,
+ ixgbe_aci_opc_done_alt_write = 0x0904,
+ ixgbe_aci_opc_clear_port_alt_write = 0x0906,
+
+ /* TCA Events */
+ ixgbe_aci_opc_temp_tca_event = 0x0C94,
+
+ /* debug commands */
+ ixgbe_aci_opc_debug_dump_internals = 0xFF08,
+
+ /* SystemDiagnostic commands */
+ ixgbe_aci_opc_set_health_status_config = 0xFF20,
+ ixgbe_aci_opc_get_supported_health_status_codes = 0xFF21,
+ ixgbe_aci_opc_get_health_status = 0xFF22,
+ ixgbe_aci_opc_clear_health_status = 0xFF23,
+};
+
+#define IXGBE_DRV_VER_STR_LEN_E610 32
+
+/* Get Expanded Error Code (0x0005, direct) */
+struct ixgbe_aci_cmd_get_exp_err {
+ __le32 reason;
+#define IXGBE_ACI_EXPANDED_ERROR_NOT_PROVIDED 0xFFFFFFFF
+ __le32 identifier;
+ u8 rsvd[8];
+};
+
+/* FW update timeout definitions are in milliseconds */
+#define IXGBE_NVM_TIMEOUT 180000
+
+/* Disable RXEN (direct 0x000C) */
+struct ixgbe_aci_cmd_disable_rxen {
+ u8 lport_num;
+ u8 reserved[15];
+};
+
+/* Get PHY capabilities (indirect 0x0600) */
+struct ixgbe_aci_cmd_get_phy_caps {
+ u8 lport_num;
+ u8 reserved;
+ __le16 param0;
+ /* 18.0 - Report qualified modules */
+#define IXGBE_ACI_GET_PHY_RQM BIT(0)
+ /* 18.1 - 18.3 : Report mode
+ * 000b - Report topology capabilities, without media
+ * 001b - Report topology capabilities, with media
+ * 010b - Report Active configuration
+ * 011b - Report PHY Type and FEC mode capabilities
+ * 100b - Report Default capabilities
+ */
+#define IXGBE_ACI_REPORT_MODE_M GENMASK(3, 1)
+#define IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA 0
+#define IXGBE_ACI_REPORT_TOPO_CAP_MEDIA BIT(1)
+#define IXGBE_ACI_REPORT_ACTIVE_CFG BIT(2)
+#define IXGBE_ACI_REPORT_DFLT_CFG BIT(3)
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* This is #define of PHY type (Extended):
+ * The first set of defines is for phy_type_low.
+ */
+#define IXGBE_PHY_TYPE_LOW_100BASE_TX BIT_ULL(0)
+#define IXGBE_PHY_TYPE_LOW_100M_SGMII BIT_ULL(1)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_T BIT_ULL(2)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_SX BIT_ULL(3)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_LX BIT_ULL(4)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_KX BIT_ULL(5)
+#define IXGBE_PHY_TYPE_LOW_1G_SGMII BIT_ULL(6)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_T BIT_ULL(7)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_X BIT_ULL(8)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_KX BIT_ULL(9)
+#define IXGBE_PHY_TYPE_LOW_5GBASE_T BIT_ULL(10)
+#define IXGBE_PHY_TYPE_LOW_5GBASE_KR BIT_ULL(11)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_T BIT_ULL(12)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_DA BIT_ULL(13)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_SR BIT_ULL(14)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_LR BIT_ULL(15)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 BIT_ULL(16)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC BIT_ULL(17)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_C2C BIT_ULL(18)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_T BIT_ULL(19)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_CR BIT_ULL(20)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_CR_S BIT_ULL(21)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_CR1 BIT_ULL(22)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_SR BIT_ULL(23)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_LR BIT_ULL(24)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_KR BIT_ULL(25)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_KR_S BIT_ULL(26)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_KR1 BIT_ULL(27)
+#define IXGBE_PHY_TYPE_LOW_25G_AUI_AOC_ACC BIT_ULL(28)
+#define IXGBE_PHY_TYPE_LOW_25G_AUI_C2C BIT_ULL(29)
+#define IXGBE_PHY_TYPE_LOW_MAX_INDEX 29
+/* The second set of defines is for phy_type_high. */
+#define IXGBE_PHY_TYPE_HIGH_10BASE_T BIT_ULL(1)
+#define IXGBE_PHY_TYPE_HIGH_10M_SGMII BIT_ULL(2)
+#define IXGBE_PHY_TYPE_HIGH_2500M_SGMII BIT_ULL(56)
+#define IXGBE_PHY_TYPE_HIGH_100M_USXGMII BIT_ULL(57)
+#define IXGBE_PHY_TYPE_HIGH_1G_USXGMII BIT_ULL(58)
+#define IXGBE_PHY_TYPE_HIGH_2500M_USXGMII BIT_ULL(59)
+#define IXGBE_PHY_TYPE_HIGH_5G_USXGMII BIT_ULL(60)
+#define IXGBE_PHY_TYPE_HIGH_10G_USXGMII BIT_ULL(61)
+#define IXGBE_PHY_TYPE_HIGH_MAX_INDEX 61
+
+struct ixgbe_aci_cmd_get_phy_caps_data {
+ __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ u8 caps;
+#define IXGBE_ACI_PHY_EN_TX_LINK_PAUSE BIT(0)
+#define IXGBE_ACI_PHY_EN_RX_LINK_PAUSE BIT(1)
+#define IXGBE_ACI_PHY_LOW_POWER_MODE BIT(2)
+#define IXGBE_ACI_PHY_EN_LINK BIT(3)
+#define IXGBE_ACI_PHY_AN_MODE BIT(4)
+#define IXGBE_ACI_PHY_EN_MOD_QUAL BIT(5)
+#define IXGBE_ACI_PHY_EN_LESM BIT(6)
+#define IXGBE_ACI_PHY_EN_AUTO_FEC BIT(7)
+#define IXGBE_ACI_PHY_CAPS_MASK GENMASK(7, 0)
+ u8 low_power_ctrl_an;
+#define IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE28 BIT(1)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE73 BIT(2)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE37 BIT(3)
+ __le16 eee_cap;
+#define IXGBE_ACI_PHY_EEE_EN_100BASE_TX BIT(0)
+#define IXGBE_ACI_PHY_EEE_EN_1000BASE_T BIT(1)
+#define IXGBE_ACI_PHY_EEE_EN_10GBASE_T BIT(2)
+#define IXGBE_ACI_PHY_EEE_EN_1000BASE_KX BIT(3)
+#define IXGBE_ACI_PHY_EEE_EN_10GBASE_KR BIT(4)
+#define IXGBE_ACI_PHY_EEE_EN_25GBASE_KR BIT(5)
+#define IXGBE_ACI_PHY_EEE_EN_10BASE_T BIT(11)
+ __le16 eeer_value;
+ u8 phy_id_oui[4]; /* PHY/Module ID connected on the port */
+ u8 phy_fw_ver[8];
+ u8 link_fec_options;
+#define IXGBE_ACI_PHY_FEC_10G_KR_40G_KR4_EN BIT(0)
+#define IXGBE_ACI_PHY_FEC_10G_KR_40G_KR4_REQ BIT(1)
+#define IXGBE_ACI_PHY_FEC_25G_RS_528_REQ BIT(2)
+#define IXGBE_ACI_PHY_FEC_25G_KR_REQ BIT(3)
+#define IXGBE_ACI_PHY_FEC_25G_RS_544_REQ BIT(4)
+#define IXGBE_ACI_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6)
+#define IXGBE_ACI_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7)
+#define IXGBE_ACI_PHY_FEC_MASK 0xdf
+ u8 module_compliance_enforcement;
+#define IXGBE_ACI_MOD_ENFORCE_STRICT_MODE BIT(0)
+ u8 extended_compliance_code;
+#define IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE 3
+ u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE];
+#define IXGBE_ACI_MOD_TYPE_BYTE0_SFP_PLUS 0xA0
+#define IXGBE_ACI_MOD_TYPE_BYTE0_QSFP_PLUS 0x80
+#define IXGBE_ACI_MOD_TYPE_IDENT 1
+#define IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE BIT(0)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE BIT(1)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR BIT(4)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR BIT(5)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM BIT(6)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_ER BIT(7)
+#define IXGBE_ACI_MOD_TYPE_BYTE2_SFP_PLUS 0xA0
+#define IXGBE_ACI_MOD_TYPE_BYTE2_QSFP_PLUS 0x86
+ u8 qualified_module_count;
+ u8 rsvd2[7]; /* Bytes 47:41 reserved */
+#define IXGBE_ACI_QUAL_MOD_COUNT_MAX 16
+ struct {
+ u8 v_oui[3];
+ u8 rsvd3;
+ u8 v_part[16];
+ __le32 v_rev;
+ __le64 rsvd4;
+ } qual_modules[IXGBE_ACI_QUAL_MOD_COUNT_MAX];
+};
+
+/* Set PHY capabilities (direct 0x0601)
+ * NOTE: This command must be followed by setup link and restart auto-neg
+ */
+struct ixgbe_aci_cmd_set_phy_cfg {
+ u8 lport_num;
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Set PHY config command data structure */
+struct ixgbe_aci_cmd_set_phy_cfg_data {
+ __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ u8 caps;
+#define IXGBE_ACI_PHY_ENA_VALID_MASK 0xef
+#define IXGBE_ACI_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
+#define IXGBE_ACI_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
+#define IXGBE_ACI_PHY_ENA_LOW_POWER BIT(2)
+#define IXGBE_ACI_PHY_ENA_LINK BIT(3)
+#define IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT BIT(5)
+#define IXGBE_ACI_PHY_ENA_LESM BIT(6)
+#define IXGBE_ACI_PHY_ENA_AUTO_FEC BIT(7)
+ u8 low_power_ctrl_an;
+ __le16 eee_cap; /* Value from ixgbe_aci_get_phy_caps */
+ __le16 eeer_value; /* Use defines from ixgbe_aci_get_phy_caps */
+ u8 link_fec_opt; /* Use defines from ixgbe_aci_get_phy_caps */
+ u8 module_compliance_enforcement;
+};
+
+/* Restart AN command data structure (direct 0x0605)
+ * Also used for response, with only the lport_num field present.
+ */
+struct ixgbe_aci_cmd_restart_an {
+ u8 lport_num;
+ u8 reserved;
+ u8 cmd_flags;
+#define IXGBE_ACI_RESTART_AN_LINK_RESTART BIT(1)
+#define IXGBE_ACI_RESTART_AN_LINK_ENABLE BIT(2)
+ u8 reserved2[13];
+};
+
+/* Get link status (indirect 0x0607), also used for Link Status Event */
+struct ixgbe_aci_cmd_get_link_status {
+ u8 lport_num;
+ u8 reserved;
+ __le16 cmd_flags;
+#define IXGBE_ACI_LSE_M GENMASK(1, 0)
+#define IXGBE_ACI_LSE_NOP 0x0
+#define IXGBE_ACI_LSE_DIS 0x2
+#define IXGBE_ACI_LSE_ENA 0x3
+ /* only response uses this flag */
+#define IXGBE_ACI_LSE_IS_ENABLED 0x1
+ __le32 reserved2;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get link status response data structure, also used for Link Status Event */
+struct ixgbe_aci_cmd_get_link_status_data {
+ u8 topo_media_conflict;
+#define IXGBE_ACI_LINK_TOPO_CONFLICT BIT(0)
+#define IXGBE_ACI_LINK_MEDIA_CONFLICT BIT(1)
+#define IXGBE_ACI_LINK_TOPO_CORRUPT BIT(2)
+#define IXGBE_ACI_LINK_TOPO_UNREACH_PRT BIT(4)
+#define IXGBE_ACI_LINK_TOPO_UNDRUTIL_PRT BIT(5)
+#define IXGBE_ACI_LINK_TOPO_UNDRUTIL_MEDIA BIT(6)
+#define IXGBE_ACI_LINK_TOPO_UNSUPP_MEDIA BIT(7)
+ u8 link_cfg_err;
+#define IXGBE_ACI_LINK_CFG_ERR BIT(0)
+#define IXGBE_ACI_LINK_CFG_COMPLETED BIT(1)
+#define IXGBE_ACI_LINK_ACT_PORT_OPT_INVAL BIT(2)
+#define IXGBE_ACI_LINK_FEAT_ID_OR_CONFIG_ID_INVAL BIT(3)
+#define IXGBE_ACI_LINK_TOPO_CRITICAL_SDP_ERR BIT(4)
+#define IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED BIT(5)
+#define IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE BIT(6)
+#define IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT BIT(7)
+ u8 link_info;
+#define IXGBE_ACI_LINK_UP BIT(0) /* Link Status */
+#define IXGBE_ACI_LINK_FAULT BIT(1)
+#define IXGBE_ACI_LINK_FAULT_TX BIT(2)
+#define IXGBE_ACI_LINK_FAULT_RX BIT(3)
+#define IXGBE_ACI_LINK_FAULT_REMOTE BIT(4)
+#define IXGBE_ACI_LINK_UP_PORT BIT(5) /* External Port Link Status */
+#define IXGBE_ACI_MEDIA_AVAILABLE BIT(6)
+#define IXGBE_ACI_SIGNAL_DETECT BIT(7)
+ u8 an_info;
+#define IXGBE_ACI_AN_COMPLETED BIT(0)
+#define IXGBE_ACI_LP_AN_ABILITY BIT(1)
+#define IXGBE_ACI_PD_FAULT BIT(2) /* Parallel Detection Fault */
+#define IXGBE_ACI_FEC_EN BIT(3)
+#define IXGBE_ACI_PHY_LOW_POWER BIT(4) /* Low Power State */
+#define IXGBE_ACI_LINK_PAUSE_TX BIT(5)
+#define IXGBE_ACI_LINK_PAUSE_RX BIT(6)
+#define IXGBE_ACI_QUALIFIED_MODULE BIT(7)
+ u8 ext_info;
+#define IXGBE_ACI_LINK_PHY_TEMP_ALARM BIT(0)
+#define IXGBE_ACI_LINK_EXCESSIVE_ERRORS BIT(1) /* Excessive Link Errors */
+ /* Port Tx Suspended */
+#define IXGBE_ACI_LINK_TX_ACTIVE 0
+#define IXGBE_ACI_LINK_TX_DRAINED 1
+#define IXGBE_ACI_LINK_TX_FLUSHED 3
+ u8 lb_status;
+#define IXGBE_ACI_LINK_LB_PHY_LCL BIT(0)
+#define IXGBE_ACI_LINK_LB_PHY_RMT BIT(1)
+#define IXGBE_ACI_LINK_LB_MAC_LCL BIT(2)
+ __le16 max_frame_size;
+ u8 cfg;
+#define IXGBE_ACI_LINK_25G_KR_FEC_EN BIT(0)
+#define IXGBE_ACI_LINK_25G_RS_528_FEC_EN BIT(1)
+#define IXGBE_ACI_LINK_25G_RS_544_FEC_EN BIT(2)
+#define IXGBE_ACI_FEC_MASK GENMASK(2, 0)
+ /* Pacing Config */
+#define IXGBE_ACI_CFG_PACING_M GENMASK(6, 3)
+#define IXGBE_ACI_CFG_PACING_TYPE_M BIT(7)
+#define IXGBE_ACI_CFG_PACING_TYPE_AVG 0
+#define IXGBE_ACI_CFG_PACING_TYPE_FIXED IXGBE_ACI_CFG_PACING_TYPE_M
+ /* External Device Power Ability */
+ u8 power_desc;
+#define IXGBE_ACI_PWR_CLASS_M GENMASK(5, 0)
+#define IXGBE_ACI_LINK_PWR_BASET_LOW_HIGH 0
+#define IXGBE_ACI_LINK_PWR_BASET_HIGH 1
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_1 0
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_2 1
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_3 2
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_4 3
+ __le16 link_speed;
+#define IXGBE_ACI_LINK_SPEED_M GENMASK(10, 0)
+#define IXGBE_ACI_LINK_SPEED_10MB BIT(0)
+#define IXGBE_ACI_LINK_SPEED_100MB BIT(1)
+#define IXGBE_ACI_LINK_SPEED_1000MB BIT(2)
+#define IXGBE_ACI_LINK_SPEED_2500MB BIT(3)
+#define IXGBE_ACI_LINK_SPEED_5GB BIT(4)
+#define IXGBE_ACI_LINK_SPEED_10GB BIT(5)
+#define IXGBE_ACI_LINK_SPEED_20GB BIT(6)
+#define IXGBE_ACI_LINK_SPEED_25GB BIT(7)
+#define IXGBE_ACI_LINK_SPEED_40GB BIT(8)
+#define IXGBE_ACI_LINK_SPEED_50GB BIT(9)
+#define IXGBE_ACI_LINK_SPEED_100GB BIT(10)
+#define IXGBE_ACI_LINK_SPEED_200GB BIT(11)
+#define IXGBE_ACI_LINK_SPEED_UNKNOWN BIT(15)
+ __le16 reserved3;
+ u8 ext_fec_status;
+#define IXGBE_ACI_LINK_RS_272_FEC_EN BIT(0) /* RS 272 FEC enabled */
+ u8 reserved4;
+ __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
+ /* Get link status version 2 link partner data */
+ __le64 lp_phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
+ __le64 lp_phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
+ u8 lp_fec_adv;
+#define IXGBE_ACI_LINK_LP_10G_KR_FEC_CAP BIT(0)
+#define IXGBE_ACI_LINK_LP_25G_KR_FEC_CAP BIT(1)
+#define IXGBE_ACI_LINK_LP_RS_528_FEC_CAP BIT(2)
+#define IXGBE_ACI_LINK_LP_50G_KR_272_FEC_CAP BIT(3)
+#define IXGBE_ACI_LINK_LP_100G_KR_272_FEC_CAP BIT(4)
+#define IXGBE_ACI_LINK_LP_200G_KR_272_FEC_CAP BIT(5)
+ u8 lp_fec_req;
+#define IXGBE_ACI_LINK_LP_10G_KR_FEC_REQ BIT(0)
+#define IXGBE_ACI_LINK_LP_25G_KR_FEC_REQ BIT(1)
+#define IXGBE_ACI_LINK_LP_RS_528_FEC_REQ BIT(2)
+#define IXGBE_ACI_LINK_LP_KR_272_FEC_REQ BIT(3)
+ u8 lp_flowcontrol;
+#define IXGBE_ACI_LINK_LP_PAUSE_ADV BIT(0)
+#define IXGBE_ACI_LINK_LP_ASM_DIR_ADV BIT(1)
+ u8 reserved5[5];
+} __packed;
+
+/* Set event mask command (direct 0x0613) */
+struct ixgbe_aci_cmd_set_event_mask {
+ u8 lport_num;
+ u8 reserved[7];
+ __le16 event_mask;
+#define IXGBE_ACI_LINK_EVENT_UPDOWN BIT(1)
+#define IXGBE_ACI_LINK_EVENT_MEDIA_NA BIT(2)
+#define IXGBE_ACI_LINK_EVENT_LINK_FAULT BIT(3)
+#define IXGBE_ACI_LINK_EVENT_PHY_TEMP_ALARM BIT(4)
+#define IXGBE_ACI_LINK_EVENT_EXCESSIVE_ERRORS BIT(5)
+#define IXGBE_ACI_LINK_EVENT_SIGNAL_DETECT BIT(6)
+#define IXGBE_ACI_LINK_EVENT_AN_COMPLETED BIT(7)
+#define IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL BIT(8)
+#define IXGBE_ACI_LINK_EVENT_PORT_TX_SUSPENDED BIT(9)
+#define IXGBE_ACI_LINK_EVENT_TOPO_CONFLICT BIT(10)
+#define IXGBE_ACI_LINK_EVENT_MEDIA_CONFLICT BIT(11)
+#define IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL BIT(12)
+ u8 reserved1[6];
+};
+
+struct ixgbe_aci_cmd_link_topo_params {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define IXGBE_ACI_LINK_TOPO_PORT_NUM_VALID BIT(0)
+ u8 node_type_ctx;
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_M GENMASK(3, 0)
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_PHY 0
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_GPIO_CTRL 1
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_MUX_CTRL 2
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_LED_CTRL 3
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_LED 4
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_THERMAL 5
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE 6
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_MEZZ 7
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_ID_EEPROM 8
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CLK_CTRL 9
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CLK_MUX 10
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_GPS 11
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_S 4
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_M GENMASK(7, 4)
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_GLOBAL 0
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_BOARD 1
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT 2
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE 3
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE_HANDLE 4
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_DIRECT_BUS_ACCESS 5
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE_HANDLE_BUS_ADDRESS 6
+ u8 index;
+};
+
+struct ixgbe_aci_cmd_link_topo_addr {
+ struct ixgbe_aci_cmd_link_topo_params topo_params;
+ __le16 handle;
+/* Used to decode the handle field */
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_M BIT(9)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_LOM BIT(9)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0
+};
+
+/* Get Link Topology Handle (direct, 0x06E0) */
+struct ixgbe_aci_cmd_get_link_topo {
+ struct ixgbe_aci_cmd_link_topo_addr addr;
+ u8 node_part_num;
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_ZL30632_80032 0x24
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_SI5384 0x25
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_C827 0x31
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX 0x47
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_GEN_GPS 0x48
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_E610_PTC 0x49
+ u8 rsvd[9];
+};
+
+/* Get Link Topology Pin (direct, 0x06E1) */
+struct ixgbe_aci_cmd_get_link_topo_pin {
+ struct ixgbe_aci_cmd_link_topo_addr addr;
+ u8 input_io_params;
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_GPIO 0
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RESET_N 1
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_INT_N 2
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_PRESENT_N 3
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_TX_DIS 4
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_MODSEL_N 5
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_LPMODE 6
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_TX_FAULT 7
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RX_LOSS 8
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RS0 9
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RS1 10
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_EEPROM_WP 11
+/* 12 repeats intentionally due to two different uses depending on context */
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_LED 12
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RED_LED 12
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_GREEN_LED 13
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_BLUE_LED 14
+#define IXGBE_ACI_LINK_TOPO_INPUT_IO_TYPE_GPIO 3
+/* Use IXGBE_ACI_LINK_TOPO_NODE_TYPE_* for the type values */
+ u8 output_io_params;
+/* Use IXGBE_ACI_LINK_TOPO_NODE_TYPE_* for the type values */
+ u8 output_io_flags;
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_POLARITY BIT(5)
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_VALUE BIT(6)
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_DRIVEN BIT(7)
+ u8 rsvd[7];
+};
+
+/* Set Port Identification LED (direct, 0x06E9) */
+struct ixgbe_aci_cmd_set_port_id_led {
+ u8 lport_num;
+ u8 lport_num_valid;
+ u8 ident_mode;
+ u8 rsvd[13];
+};
+
+#define IXGBE_ACI_PORT_ID_PORT_NUM_VALID BIT(0)
+#define IXGBE_ACI_PORT_IDENT_LED_ORIG 0
+#define IXGBE_ACI_PORT_IDENT_LED_BLINK BIT(0)
+
+/* Read/Write SFF EEPROM command (indirect 0x06EE) */
+struct ixgbe_aci_cmd_sff_eeprom {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define IXGBE_ACI_SFF_PORT_NUM_VALID BIT(0)
+ __le16 i2c_bus_addr;
+#define IXGBE_ACI_SFF_I2CBUS_7BIT_M GENMASK(6, 0)
+#define IXGBE_ACI_SFF_I2CBUS_10BIT_M GENMASK(9, 0)
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_M BIT(10)
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_7BIT 0
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_10BIT IXGBE_ACI_SFF_I2CBUS_TYPE_M
+#define IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE 0
+#define IXGBE_ACI_SFF_UPDATE_PAGE 1
+#define IXGBE_ACI_SFF_UPDATE_BANK 2
+#define IXGBE_ACI_SFF_UPDATE_PAGE_BANK 3
+#define IXGBE_ACI_SFF_IS_WRITE BIT(15)
+ __le16 i2c_offset;
+ u8 module_bank;
+ u8 module_page;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Write commands (indirect 0x0703)
+ * NVM Write Activate commands (direct 0x0707)
+ * NVM Shadow RAM Dump commands (direct 0x0707)
+ */
+struct ixgbe_aci_cmd_nvm {
+#define IXGBE_ACI_NVM_MAX_OFFSET 0xFFFFFF
+ __le16 offset_low;
+ u8 offset_high; /* For Write Activate offset_high is used as flags2 */
+#define IXGBE_ACI_NVM_OFFSET_HI_A_MASK GENMASK(15, 8)
+#define IXGBE_ACI_NVM_OFFSET_HI_U_MASK GENMASK(23, 16)
+ u8 cmd_flags;
+#define IXGBE_ACI_NVM_LAST_CMD BIT(0)
+#define IXGBE_ACI_NVM_PCIR_REQ BIT(0) /* Used by NVM Write reply */
+#define IXGBE_ACI_NVM_PRESERVE_ALL BIT(1)
+#define IXGBE_ACI_NVM_ACTIV_SEL_NVM BIT(3) /* Write Activate/SR Dump only */
+#define IXGBE_ACI_NVM_ACTIV_SEL_OROM BIT(4)
+#define IXGBE_ACI_NVM_ACTIV_SEL_NETLIST BIT(5)
+#define IXGBE_ACI_NVM_SPECIAL_UPDATE BIT(6)
+#define IXGBE_ACI_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */
+#define IXGBE_ACI_NVM_FLASH_ONLY BIT(7)
+#define IXGBE_ACI_NVM_RESET_LVL_M GENMASK(1, 0) /* Write reply only */
+#define IXGBE_ACI_NVM_POR_FLAG 0
+#define IXGBE_ACI_NVM_PERST_FLAG 1
+#define IXGBE_ACI_NVM_EMPR_FLAG 2
+#define IXGBE_ACI_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */
+#define IXGBE_ACI_NVM_NO_PRESERVATION 0x0
+#define IXGBE_ACI_NVM_PRESERVE_SELECTED 0x6
+
+ /* For Write Activate, several flags are sent as part of a separate
+ * flags2 field using a separate byte. For simplicity of the software
+ * interface, we pass the flags as a 16 bit value so these flags are
+ * all offset by 8 bits
+ */
+#define IXGBE_ACI_NVM_ACTIV_REQ_EMPR BIT(8) /* NVM Write Activate only */
+ __le16 module_typeid;
+ __le16 length;
+#define IXGBE_ACI_NVM_ERASE_LEN 0xFFFF
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* NVM Module_Type ID, needed offset and read_len for
+ * struct ixgbe_aci_cmd_nvm.
+ */
+#define IXGBE_ACI_NVM_START_POINT 0
+
+/* NVM Checksum Command (direct, 0x0706) */
+struct ixgbe_aci_cmd_nvm_checksum {
+ u8 flags;
+#define IXGBE_ACI_NVM_CHECKSUM_VERIFY BIT(0)
+#define IXGBE_ACI_NVM_CHECKSUM_RECALC BIT(1)
+ u8 rsvd;
+ __le16 checksum; /* Used only by response */
+#define IXGBE_ACI_NVM_CHECKSUM_CORRECT 0xBABA
+ u8 rsvd2[12];
+};
+
+/* Used for NVM Set Package Data command - 0x070A */
+struct ixgbe_aci_cmd_nvm_pkg_data {
+ u8 reserved[3];
+ u8 cmd_flags;
+#define IXGBE_ACI_NVM_PKG_DELETE BIT(0) /* used for command call */
+
+ u32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Used for Pass Component Table command - 0x070B */
+struct ixgbe_aci_cmd_nvm_pass_comp_tbl {
+ u8 component_response; /* Response only */
+#define IXGBE_ACI_NVM_PASS_COMP_CAN_BE_UPDATED 0x0
+#define IXGBE_ACI_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE 0x1
+#define IXGBE_ACI_NVM_PASS_COMP_CAN_NOT_BE_UPDATED 0x2
+#define IXGBE_ACI_NVM_PASS_COMP_PARTIAL_CHECK 0x3
+ u8 component_response_code; /* Response only */
+#define IXGBE_ACI_NVM_PASS_COMP_CAN_BE_UPDATED_CODE 0x0
+#define IXGBE_ACI_NVM_PASS_COMP_STAMP_IDENTICAL_CODE 0x1
+#define IXGBE_ACI_NVM_PASS_COMP_STAMP_LOWER 0x2
+#define IXGBE_ACI_NVM_PASS_COMP_INVALID_STAMP_CODE 0x3
+#define IXGBE_ACI_NVM_PASS_COMP_CONFLICT_CODE 0x4
+#define IXGBE_ACI_NVM_PASS_COMP_PRE_REQ_NOT_MET_CODE 0x5
+#define IXGBE_ACI_NVM_PASS_COMP_NOT_SUPPORTED_CODE 0x6
+#define IXGBE_ACI_NVM_PASS_COMP_CANNOT_DOWNGRADE_CODE 0x7
+#define IXGBE_ACI_NVM_PASS_COMP_INCOMPLETE_IMAGE_CODE 0x8
+#define IXGBE_ACI_NVM_PASS_COMP_VER_STR_IDENTICAL_CODE 0xA
+#define IXGBE_ACI_NVM_PASS_COMP_VER_STR_LOWER_CODE 0xB
+ u8 reserved;
+ u8 transfer_flag;
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ixgbe_aci_cmd_nvm_comp_tbl {
+ __le16 comp_class;
+#define NVM_COMP_CLASS_ALL_FW 0x000A
+
+ __le16 comp_id;
+#define NVM_COMP_ID_OROM 0x5
+#define NVM_COMP_ID_NVM 0x6
+#define NVM_COMP_ID_NETLIST 0x8
+
+ u8 comp_class_idx;
+#define FWU_COMP_CLASS_IDX_NOT_USE 0x0
+
+ __le32 comp_cmp_stamp;
+ u8 cvs_type;
+#define NVM_CVS_TYPE_ASCII 0x1
+
+ u8 cvs_len;
+ u8 cvs[]; /* Component Version String */
+} __packed;
+
+/* E610-specific adapter context structures */
+
+struct ixgbe_link_status {
+ /* Refer to ixgbe_aci_phy_type for bits definition */
+ u64 phy_type_low;
+ u64 phy_type_high;
+ u16 max_frame_size;
+ u16 link_speed;
+ u16 req_speeds;
+ u8 topo_media_conflict;
+ u8 link_cfg_err;
+ u8 lse_ena; /* Link Status Event notification */
+ u8 link_info;
+ u8 an_info;
+ u8 ext_info;
+ u8 fec_info;
+ u8 pacing;
+ /* Refer to #define from module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE]
+ * of ixgbe_aci_get_phy_caps structure
+ */
+ u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE];
+};
+
+/* Common HW capabilities for SW use */
+struct ixgbe_hw_caps {
+ /* Write CSR protection */
+ u64 wr_csr_prot;
+ u32 switching_mode;
+ /* switching mode supported - EVB switching (including cloud) */
+#define IXGBE_NVM_IMAGE_TYPE_EVB 0x0
+
+ /* Manageability mode & supported protocols over MCTP */
+ u32 mgmt_mode;
+#define IXGBE_MGMT_MODE_PASS_THRU_MODE_M GENMASK(3, 0)
+#define IXGBE_MGMT_MODE_CTL_INTERFACE_M GENMASK(7, 4)
+#define IXGBE_MGMT_MODE_REDIR_SB_INTERFACE_M GENMASK(11, 8)
+
+ u32 mgmt_protocols_mctp;
+#define IXGBE_MGMT_MODE_PROTO_RSVD BIT(0)
+#define IXGBE_MGMT_MODE_PROTO_PLDM BIT(1)
+#define IXGBE_MGMT_MODE_PROTO_OEM BIT(2)
+#define IXGBE_MGMT_MODE_PROTO_NC_SI BIT(3)
+
+ u32 os2bmc;
+ u32 valid_functions;
+ /* DCB capabilities */
+ u32 active_tc_bitmap;
+ u32 maxtc;
+
+ /* RSS related capabilities */
+ u32 rss_table_size; /* 512 for PFs and 64 for VFs */
+ u32 rss_table_entry_width; /* RSS Entry width in bits */
+
+ /* Tx/Rx queues */
+ u32 num_rxq; /* Number/Total Rx queues */
+ u32 rxq_first_id; /* First queue ID for Rx queues */
+ u32 num_txq; /* Number/Total Tx queues */
+ u32 txq_first_id; /* First queue ID for Tx queues */
+
+ /* MSI-X vectors */
+ u32 num_msix_vectors;
+ u32 msix_vector_first_id;
+
+ /* Max MTU for function or device */
+ u32 max_mtu;
+
+ /* WOL related */
+ u32 num_wol_proxy_fltr;
+ u32 wol_proxy_vsi_seid;
+
+ /* LED/SDP pin count */
+ u32 led_pin_num;
+ u32 sdp_pin_num;
+
+ /* LED/SDP - Supports up to 12 LED pins and 8 SDP signals */
+#define IXGBE_MAX_SUPPORTED_GPIO_LED 12
+#define IXGBE_MAX_SUPPORTED_GPIO_SDP 8
+ u8 led[IXGBE_MAX_SUPPORTED_GPIO_LED];
+ u8 sdp[IXGBE_MAX_SUPPORTED_GPIO_SDP];
+ /* SR-IOV virtualization */
+ u8 sr_iov_1_1; /* SR-IOV enabled */
+ /* VMDQ */
+ u8 vmdq; /* VMDQ supported */
+
+ /* EVB capabilities */
+ u8 evb_802_1_qbg; /* Edge Virtual Bridging */
+ u8 evb_802_1_qbh; /* Bridge Port Extension */
+
+ u8 dcb;
+ u8 iscsi;
+ u8 ieee_1588;
+ u8 mgmt_cem;
+
+ /* WoL and APM support */
+#define IXGBE_WOL_SUPPORT_M BIT(0)
+#define IXGBE_ACPI_PROG_MTHD_M BIT(1)
+#define IXGBE_PROXY_SUPPORT_M BIT(2)
+ u8 apm_wol_support;
+ u8 acpi_prog_mthd;
+ u8 proxy_support;
+ bool nvm_update_pending_nvm;
+ bool nvm_update_pending_orom;
+ bool nvm_update_pending_netlist;
+#define IXGBE_NVM_PENDING_NVM_IMAGE BIT(0)
+#define IXGBE_NVM_PENDING_OROM BIT(1)
+#define IXGBE_NVM_PENDING_NETLIST BIT(2)
+ bool sec_rev_disabled;
+ bool update_disabled;
+ bool nvm_unified_update;
+ bool netlist_auth;
+#define IXGBE_NVM_MGMT_SEC_REV_DISABLED BIT(0)
+#define IXGBE_NVM_MGMT_UPDATE_DISABLED BIT(1)
+#define IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3)
+#define IXGBE_NVM_MGMT_NETLIST_AUTH_SUPPORT BIT(5)
+ bool no_drop_policy_support;
+ /* PCIe reset avoidance */
+ bool pcie_reset_avoidance; /* false: not supported, true: supported */
+ /* Post update reset restriction */
+ bool reset_restrict_support; /* false: not supported, true: supported */
+
+ /* External topology device images within the NVM */
+#define IXGBE_EXT_TOPO_DEV_IMG_COUNT 4
+ u32 ext_topo_dev_img_ver_high[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+ u32 ext_topo_dev_img_ver_low[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+ u8 ext_topo_dev_img_part_num[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S 8
+#define IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M GENMASK(15, 8)
+ bool ext_topo_dev_img_load_en[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN BIT(0)
+ bool ext_topo_dev_img_prog_en[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_PROG_EN BIT(1)
+} __packed;
+
+#define IXGBE_OROM_CIV_SIGNATURE "$CIV"
+
+struct ixgbe_orom_civd_info {
+ u8 signature[4]; /* Must match ASCII '$CIV' characters */
+ u8 checksum; /* Simple modulo 256 sum of all structure bytes must equal 0 */
+ __le32 combo_ver; /* Combo Image Version number */
+ u8 combo_name_len; /* Length of the unicode combo image version string, max of 32 */
+ __le16 combo_name[32]; /* Unicode string representing the Combo Image version */
+} __packed;
+
+/* Function specific capabilities */
+struct ixgbe_hw_func_caps {
+ u32 num_allocd_vfs; /* Number of allocated VFs */
+ u32 vf_base_id; /* Logical ID of the first VF */
+ u32 guar_num_vsi;
+ struct ixgbe_hw_caps common_cap;
+ bool no_drop_policy_ena;
+};
+
+/* Device wide capabilities */
+struct ixgbe_hw_dev_caps {
+ struct ixgbe_hw_caps common_cap;
+ u32 num_vfs_exposed; /* Total number of VFs exposed */
+ u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
+ u32 num_flow_director_fltr; /* Number of FD filters available */
+ u32 num_funcs;
+};
+
+/* ACI event information */
+struct ixgbe_aci_event {
+ struct libie_aq_desc desc;
+ u8 *msg_buf;
+ u16 msg_len;
+ u16 buf_len;
+};
+
+struct ixgbe_aci_info {
+ struct mutex lock; /* admin command interface lock */
+ enum libie_aq_err last_status; /* last status of sent admin command */
+};
+
+enum ixgbe_bank_select {
+ IXGBE_ACTIVE_FLASH_BANK,
+ IXGBE_INACTIVE_FLASH_BANK,
+};
+
+/* Option ROM version information */
+struct ixgbe_orom_info {
+ u8 major; /* Major version of OROM */
+ u8 patch; /* Patch version of OROM */
+ u16 build; /* Build version of OROM */
+ u32 srev; /* Security revision */
+};
+
+/* NVM version information */
+struct ixgbe_nvm_info {
+ u32 eetrack;
+ u32 srev;
+ u8 major;
+ u8 minor;
+} __packed;
+
+/* netlist version information */
+struct ixgbe_netlist_info {
+ u32 major; /* major high/low */
+ u32 minor; /* minor high/low */
+ u32 type; /* type high/low */
+ u32 rev; /* revision high/low */
+ u32 hash; /* SHA-1 hash word */
+ u16 cust_ver; /* customer version */
+} __packed;
+
+/* Enumeration of possible flash banks for the NVM, OROM, and Netlist modules
+ * of the flash image.
+ */
+enum ixgbe_flash_bank {
+ IXGBE_INVALID_FLASH_BANK,
+ IXGBE_1ST_FLASH_BANK,
+ IXGBE_2ND_FLASH_BANK,
+};
+
+/* information for accessing NVM, OROM, and Netlist flash banks */
+struct ixgbe_bank_info {
+ u32 nvm_ptr; /* Pointer to 1st NVM bank */
+ u32 nvm_size; /* Size of NVM bank */
+ u32 orom_ptr; /* Pointer to 1st OROM bank */
+ u32 orom_size; /* Size of OROM bank */
+ u32 netlist_ptr; /* Ptr to 1st Netlist bank */
+ u32 netlist_size; /* Size of Netlist bank */
+ enum ixgbe_flash_bank nvm_bank; /* Active NVM bank */
+ enum ixgbe_flash_bank orom_bank; /* Active OROM bank */
+ enum ixgbe_flash_bank netlist_bank; /* Active Netlist bank */
+};
+
+/* Flash Chip Information */
+struct ixgbe_flash_info {
+ struct ixgbe_orom_info orom; /* Option ROM version info */
+ u32 flash_size; /* Available flash size in bytes */
+ struct ixgbe_nvm_info nvm; /* NVM version information */
+ struct ixgbe_netlist_info netlist; /* Netlist version info */
+ struct ixgbe_bank_info banks; /* Flash Bank information */
+ u16 sr_words; /* Shadow RAM size in words */
+ u8 blank_nvm_mode; /* is NVM empty (no FW present) */
+};
+
+#endif /* _IXGBE_TYPE_E610_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index f1ffa398f6df..e67e2feb045b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include "ixgbe.h"
+#include "ixgbe_mbx.h"
#include "ixgbe_phy.h"
#include "ixgbe_x540.h"
@@ -46,7 +47,7 @@ int ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
}
/**
- * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires
+ * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilities
* @hw: pointer to hardware structure
* @speed: new link speed
* @autoneg_wait_to_complete: true when waiting for completion is needed
@@ -65,7 +66,9 @@ int ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
* Resets the hardware by resetting the transmit and receive units, masks
* and clears all interrupts, perform a PHY reset, and perform a link (MAC)
* reset.
- **/
+ *
+ * Return: 0 on success or negative value on failure
+ */
int ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
@@ -132,10 +135,14 @@ mac_reset_top:
hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES;
hw->mac.ops.init_rx_addrs(hw);
+ /* The following is not supported by E610. */
+ if (hw->mac.type == ixgbe_mac_e610)
+ return status;
+
/* Store the permanent SAN mac address */
hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
- /* Add the SAN MAC address to the RAR only if it's a valid address */
+ /* Add the SAN MAC address to RAR if it's a valid address */
if (is_valid_ether_addr(hw->mac.san_addr)) {
/* Save the SAN MAC RAR index */
hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
@@ -366,9 +373,9 @@ static int ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
}
}
- checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+ checksum = IXGBE_EEPROM_SUM - checksum;
- return (int)checksum;
+ return checksum;
}
/**
@@ -887,6 +894,7 @@ static const struct ixgbe_eeprom_operations eeprom_ops_X540 = {
.calc_checksum = &ixgbe_calc_eeprom_checksum_X540,
.validate_checksum = &ixgbe_validate_eeprom_checksum_X540,
.update_checksum = &ixgbe_update_eeprom_checksum_X540,
+ .read_pba_string = &ixgbe_read_pba_string_generic,
};
static const struct ixgbe_phy_operations phy_ops_X540 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
index b69a680d3ab5..6ed360c5b605 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
@@ -1,5 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
+
+#ifndef _IXGBE_X540_H_
+#define _IXGBE_X540_H_
#include "ixgbe_type.h"
@@ -17,3 +20,5 @@ int ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw);
int ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
+
+#endif /* _IXGBE_X540_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index a5f644934445..76d2fa3ef518 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1,9 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include "ixgbe_x540.h"
+#include "ixgbe_x550.h"
#include "ixgbe_type.h"
#include "ixgbe_common.h"
+#include "ixgbe_mbx.h"
#include "ixgbe_phy.h"
static int ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed);
@@ -18,7 +20,7 @@ static int ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
struct ixgbe_phy_info *phy = &hw->phy;
struct ixgbe_link_info *link = &hw->link;
- /* Start with X540 invariants, since so simular */
+ /* Start with X540 invariants, since so similar */
ixgbe_get_invariants_X540(hw);
if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
@@ -46,7 +48,7 @@ static int ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw)
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
- /* Start with X540 invariants, since so simular */
+ /* Start with X540 invariants, since so similar */
ixgbe_get_invariants_X540(hw);
if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
@@ -683,7 +685,7 @@ static int ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
return 0;
}
-/** ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the
+/** ixgbe_read_iosf_sb_reg_x550 - Reads a value to specified register of the
* IOSF device
* @hw: pointer to hardware structure
* @reg_addr: 32 bit PHY register to write
@@ -845,7 +847,7 @@ static int ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
/** ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
* @hw: pointer to hardware structure
- * @offset: offset of word in the EEPROM to read
+ * @offset: offset of word in the EEPROM to read
* @words: number of words
* @data: word(s) read from the EEPROM
*
@@ -1058,9 +1060,9 @@ static int ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
return status;
}
- checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+ checksum = IXGBE_EEPROM_SUM - checksum;
- return (int)checksum;
+ return checksum;
}
/** ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
@@ -1161,7 +1163,7 @@ static int ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
return status;
}
-/** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
+/** ixgbe_write_ee_hostif_data_X550 - Write EEPROM word using hostif
* @hw: pointer to hardware structure
* @offset: offset of word in the EEPROM to write
* @data: word write to the EEPROM
@@ -1251,7 +1253,7 @@ static int ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
/**
* ixgbe_fw_recovery_mode_X550 - Check FW NVM recovery mode
- * @hw: pointer t hardware structure
+ * @hw: pointer to hardware structure
*
* Returns true if in FW NVM recovery mode.
*/
@@ -1265,7 +1267,7 @@ static bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw)
/** ixgbe_disable_rx_x550 - Disable RX unit
*
- * Enables the Rx DMA unit for x550
+ * Disables the Rx DMA unit for x550
**/
static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
{
@@ -1752,7 +1754,7 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
/* If no SFP module present, then return success. Return success since
- * SFP not present error is not excepted in the setup MAC link flow.
+ * SFP not present error is not accepted in the setup MAC link flow.
*/
if (ret_val == -ENOENT)
return 0;
@@ -1802,7 +1804,7 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
/* If no SFP module present, then return success. Return success since
- * SFP not present error is not excepted in the setup MAC link flow.
+ * SFP not present error is not accepted in the setup MAC link flow.
*/
if (ret_val == -ENOENT)
return 0;
@@ -2316,13 +2318,13 @@ static int ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
}
/**
- * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause
+ * ixgbe_get_lasi_ext_t_x550em - Determine external Base T PHY interrupt cause
* @hw: pointer to hardware structure
* @lsc: pointer to boolean flag which indicates whether external Base T
* PHY interrupt is lsc
* @is_overtemp: indicate whether an overtemp event encountered
*
- * Determime if external Base T PHY interrupt cause is high temperature
+ * Determine if external Base T PHY interrupt cause is high temperature
* failure alarm or link status change.
**/
static int ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc,
@@ -2626,7 +2628,7 @@ static int ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
}
/** ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link
- * @hw: point to hardware structure
+ * @hw: pointer to hardware structure
*
* Configures the link between the integrated KR PHY and the external X557 PHY
* The driver will call this function when it gets a link status change
@@ -2667,7 +2669,7 @@ static int ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
if (status)
return status;
- /* If link is not still up, then no setup is necessary so return */
+ /* If the link is still not up, no setup is necessary */
status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
if (status)
return status;
@@ -2743,7 +2745,7 @@ static int ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
return -EINVAL;
- /* To turn on the LED, set mode to ON. */
+ /* To turn off the LED, set mode to OFF. */
hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
MDIO_MMD_VEND1, &phy_data);
phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK;
@@ -2766,12 +2768,12 @@ static int ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
* Sends driver version number to firmware through the manageability
* block. On success return 0
* else returns -EBUSY when encountering an error acquiring
- * semaphore, -EIO when command fails or -ENIVAL when incorrect
+ * semaphore, -EIO when command fails or -EINVAL when incorrect
* params passed.
**/
-static int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
- u8 build, u8 sub, u16 len,
- const char *driver_ver)
+int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub, u16 len,
+ const char *driver_ver)
{
struct ixgbe_hic_drv_info2 fw_cmd;
int ret_val;
@@ -2810,7 +2812,7 @@ static int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
return ret_val;
}
-/** ixgbe_get_lcd_x550em - Determine lowest common denominator
+/** ixgbe_get_lcd_t_x550em - Determine lowest common denominator
* @hw: pointer to hardware structure
* @lcd_speed: pointer to lowest common link speed
*
@@ -3173,7 +3175,7 @@ static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
/* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
- * PHY address. This register field was has only been used for X552.
+ * PHY address. This register field has only been used for X552.
*/
if (hw->mac.type == ixgbe_mac_x550em_a &&
hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
@@ -3314,7 +3316,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
return media_type;
}
-/** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
+/** ixgbe_init_ext_t_x550em - Start (uninstall) the external Base T PHY.
** @hw: pointer to hardware structure
**/
static int ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
@@ -3504,14 +3506,14 @@ mac_reset_top:
return status;
}
-/** ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype
+/** ixgbe_set_ethertype_anti_spoofing_x550 - Enable/Disable Ethertype
* anti-spoofing
* @hw: pointer to hardware structure
* @enable: enable or disable switch for Ethertype anti-spoofing
* @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
**/
-static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
- bool enable, int vf)
+void ixgbe_set_ethertype_anti_spoofing_x550(struct ixgbe_hw *hw,
+ bool enable, int vf)
{
int vf_target_reg = vf >> 3;
int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
@@ -3526,14 +3528,14 @@ static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
}
-/** ixgbe_set_source_address_pruning_X550 - Enable/Disbale src address pruning
+/** ixgbe_set_source_address_pruning_x550 - Enable/Disable src address pruning
* @hw: pointer to hardware structure
* @enable: enable or disable source address pruning
* @pool: Rx pool to set source address pruning for
**/
-static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw,
- bool enable,
- unsigned int pool)
+void ixgbe_set_source_address_pruning_x550(struct ixgbe_hw *hw,
+ bool enable,
+ unsigned int pool)
{
u64 pfflp;
@@ -3733,7 +3735,7 @@ static int ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
* @hw: pointer to hardware structure
* @mask: Mask to specify which semaphore to release
*
- * Release the SWFW semaphore and puts the shared PHY token as needed
+ * Release the SWFW semaphore and puts back the shared PHY token as needed
*/
static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
{
@@ -3754,7 +3756,7 @@ static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
* @phy_data: Pointer to read data from PHY register
*
* Reads a value from a specified PHY register using the SWFW lock and PHY
- * Token. The PHY Token is needed since the MDIO is shared between to MAC
+ * Token. The PHY Token is needed since the MDIO is shared between two MAC
* instances.
*/
static int ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
@@ -3798,6 +3800,122 @@ static int ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
return status;
}
+static void ixgbe_set_mdd_x550(struct ixgbe_hw *hw, bool ena)
+{
+ u32 reg_dma, reg_rdr;
+
+ reg_dma = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ reg_rdr = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+
+ if (ena) {
+ reg_dma |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
+ reg_rdr |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
+ } else {
+ reg_dma &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
+ reg_rdr &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_dma);
+ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_rdr);
+}
+
+/**
+ * ixgbe_enable_mdd_x550 - enable malicious driver detection
+ * @hw: pointer to hardware structure
+ */
+void ixgbe_enable_mdd_x550(struct ixgbe_hw *hw)
+{
+ ixgbe_set_mdd_x550(hw, true);
+}
+
+/**
+ * ixgbe_disable_mdd_x550 - disable malicious driver detection
+ * @hw: pointer to hardware structure
+ */
+void ixgbe_disable_mdd_x550(struct ixgbe_hw *hw)
+{
+ ixgbe_set_mdd_x550(hw, false);
+}
+
+/**
+ * ixgbe_restore_mdd_vf_x550 - restore VF that was disabled during MDD event
+ * @hw: pointer to hardware structure
+ * @vf: vf index
+ */
+void ixgbe_restore_mdd_vf_x550(struct ixgbe_hw *hw, u32 vf)
+{
+ u32 idx, reg, val, num_qs, start_q, bitmask;
+
+ /* Map VF to queues */
+ reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ switch (reg & IXGBE_MRQC_MRQE_MASK) {
+ case IXGBE_MRQC_VMDQRT8TCEN:
+ num_qs = IXGBE_16VFS_QUEUES;
+ bitmask = IXGBE_16VFS_BITMASK;
+ break;
+ case IXGBE_MRQC_VMDQRSS32EN:
+ case IXGBE_MRQC_VMDQRT4TCEN:
+ num_qs = IXGBE_32VFS_QUEUES;
+ bitmask = IXGBE_32VFS_BITMASK;
+ break;
+ default:
+ num_qs = IXGBE_64VFS_QUEUES;
+ bitmask = IXGBE_64VFS_BITMASK;
+ break;
+ }
+ start_q = vf * num_qs;
+
+ /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
+ idx = start_q / IXGBE_QUEUES_PER_REG;
+ val = bitmask << (start_q % IXGBE_QUEUES_PER_REG);
+ IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), val);
+ IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), val);
+}
+
+/**
+ * ixgbe_handle_mdd_x550 - handle malicious driver detection event
+ * @hw: pointer to hardware structure
+ * @vf_bitmap: output vf bitmap of malicious vfs
+ */
+void ixgbe_handle_mdd_x550(struct ixgbe_hw *hw, unsigned long *vf_bitmap)
+{
+ u32 i, j, reg, q, div, vf;
+ unsigned long wqbr;
+
+ /* figure out pool size for mapping to vf's */
+ reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ switch (reg & IXGBE_MRQC_MRQE_MASK) {
+ case IXGBE_MRQC_VMDQRT8TCEN:
+ div = IXGBE_16VFS_QUEUES;
+ break;
+ case IXGBE_MRQC_VMDQRSS32EN:
+ case IXGBE_MRQC_VMDQRT4TCEN:
+ div = IXGBE_32VFS_QUEUES;
+ break;
+ default:
+ div = IXGBE_64VFS_QUEUES;
+ break;
+ }
+
+ /* Read WQBR_TX and WQBR_RX and check for malicious queues */
+ for (i = 0; i < IXGBE_QUEUES_REG_AMOUNT; i++) {
+ wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i)) |
+ IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
+ if (!wqbr)
+ continue;
+
+ /* Get malicious queue */
+ for_each_set_bit(j, (unsigned long *)&wqbr,
+ IXGBE_QUEUES_PER_REG) {
+ /* Get queue from bitmask */
+ q = j + (i * IXGBE_QUEUES_PER_REG);
+ /* Map queue to vf */
+ vf = q / div;
+ set_bit(vf, vf_bitmap);
+ }
+ }
+}
+
#define X550_COMMON_MAC \
.init_hw = &ixgbe_init_hw_generic, \
.start_hw = &ixgbe_start_hw_X540, \
@@ -3830,9 +3948,9 @@ static int ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \
.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \
.set_source_address_pruning = \
- &ixgbe_set_source_address_pruning_X550, \
+ &ixgbe_set_source_address_pruning_x550, \
.set_ethertype_anti_spoofing = \
- &ixgbe_set_ethertype_anti_spoofing_X550, \
+ &ixgbe_set_ethertype_anti_spoofing_x550, \
.disable_rx_buff = &ixgbe_disable_rx_buff_generic, \
.enable_rx_buff = &ixgbe_enable_rx_buff_generic, \
.get_thermal_sensor_data = NULL, \
@@ -3861,6 +3979,10 @@ static const struct ixgbe_mac_operations mac_ops_X550 = {
.prot_autoc_write = prot_autoc_write_generic,
.setup_fc = ixgbe_setup_fc_generic,
.fc_autoneg = ixgbe_fc_autoneg,
+ .enable_mdd = ixgbe_enable_mdd_x550,
+ .disable_mdd = ixgbe_disable_mdd_x550,
+ .restore_mdd_vf = ixgbe_restore_mdd_vf_x550,
+ .handle_mdd = ixgbe_handle_mdd_x550,
};
static const struct ixgbe_mac_operations mac_ops_X550EM_x = {
@@ -3957,6 +4079,7 @@ static const struct ixgbe_mac_operations mac_ops_x550em_a_fw = {
.validate_checksum = &ixgbe_validate_eeprom_checksum_X550, \
.update_checksum = &ixgbe_update_eeprom_checksum_X550, \
.calc_checksum = &ixgbe_calc_eeprom_checksum_X550, \
+ .read_pba_string = &ixgbe_read_pba_string_generic, \
static const struct ixgbe_eeprom_operations eeprom_ops_X550 = {
X550_COMMON_EEP
@@ -4046,7 +4169,7 @@ static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = {
IXGBE_MVALS_INIT(X550EM_x)
};
-static const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = {
+const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = {
IXGBE_MVALS_INIT(X550EM_a)
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h
new file mode 100644
index 000000000000..2a11147fb1bc
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IXGBE_X550_H_
+#define _IXGBE_X550_H_
+
+#include "ixgbe_type.h"
+
+extern const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT];
+
+int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub, u16 len,
+ const char *driver_ver);
+void ixgbe_set_source_address_pruning_x550(struct ixgbe_hw *hw,
+ bool enable,
+ unsigned int pool);
+void ixgbe_set_ethertype_anti_spoofing_x550(struct ixgbe_hw *hw,
+ bool enable, int vf);
+
+void ixgbe_enable_mdd_x550(struct ixgbe_hw *hw);
+void ixgbe_disable_mdd_x550(struct ixgbe_hw *hw);
+void ixgbe_restore_mdd_vf_x550(struct ixgbe_hw *hw, u32 vf);
+void ixgbe_handle_mdd_x550(struct ixgbe_hw *hw, unsigned long *vf_bitmap);
+
+#endif /* _IXGBE_X550_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 3e3b471e53f0..7b941505a9d0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -398,7 +398,7 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
dma_addr_t dma;
u32 cmd_type;
- while (budget-- > 0) {
+ while (likely(budget)) {
if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
work_done = false;
break;
@@ -433,6 +433,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
xdp_ring->next_to_use++;
if (xdp_ring->next_to_use == xdp_ring->count)
xdp_ring->next_to_use = 0;
+
+ budget--;
}
if (tx_desc) {
@@ -508,7 +510,7 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_ring *ring;
if (test_bit(__IXGBE_DOWN, &adapter->state))
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 5f08779c0e4e..e177d1d58696 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef _IXGBEVF_DEFINES_H_
#define _IXGBEVF_DEFINES_H_
@@ -16,6 +16,9 @@
#define IXGBE_DEV_ID_X550_VF_HV 0x1564
#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
+#define IXGBE_DEV_ID_E610_VF 0x57AD
+#define IXGBE_SUBDEV_ID_E610_VF_HV 0x00FF
+
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 8
#define IXGBE_VF_MAX_RX_QUEUES 8
@@ -25,6 +28,7 @@
/* Link speed */
typedef u32 ixgbe_link_speed;
+#define IXGBE_LINK_SPEED_UNKNOWN 0
#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
#define IXGBE_LINK_SPEED_100_FULL 0x0008
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 7ac53171b041..537a60d5276f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -276,9 +276,9 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
}
if (new_tx_count != adapter->tx_ring_count) {
- tx_ring = vmalloc(array_size(sizeof(*tx_ring),
- adapter->num_tx_queues +
- adapter->num_xdp_queues));
+ tx_ring = vmalloc_array(adapter->num_tx_queues +
+ adapter->num_xdp_queues,
+ sizeof(*tx_ring));
if (!tx_ring) {
err = -ENOMEM;
goto clear_reset;
@@ -867,19 +867,11 @@ static int ixgbevf_set_coalesce(struct net_device *netdev,
return 0;
}
-static int ixgbevf_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
- u32 *rules __always_unused)
+static u32 ixgbevf_get_rx_ring_count(struct net_device *dev)
{
struct ixgbevf_adapter *adapter = netdev_priv(dev);
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = adapter->num_rx_queues;
- return 0;
- default:
- hw_dbg(&adapter->hw, "Command parameters not supported\n");
- return -EOPNOTSUPP;
- }
+ return adapter->num_rx_queues;
}
static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev)
@@ -987,7 +979,7 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = {
.get_ethtool_stats = ixgbevf_get_ethtool_stats,
.get_coalesce = ixgbevf_get_coalesce,
.set_coalesce = ixgbevf_set_coalesce,
- .get_rxnfc = ixgbevf_get_rxnfc,
+ .get_rx_ring_count = ixgbevf_get_rx_ring_count,
.get_rxfh_indir_size = ixgbevf_get_rxfh_indir_size,
.get_rxfh_key_size = ixgbevf_get_rxfh_key_size,
.get_rxfh = ixgbevf_get_rxfh,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
index 66cf17f19408..fce35924ff8b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -201,6 +201,7 @@ struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec,
/**
* ixgbevf_ipsec_parse_proto_keys - find the key and salt based on the protocol
+ * @dev: pointer to net device to program
* @xs: pointer to xfrm_state struct
* @mykey: pointer to key array to populate
* @mysalt: pointer to salt value to populate
@@ -208,10 +209,10 @@ struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec,
* This copies the protocol keys and salt to our own data tables. The
* 82599 family only supports the one algorithm.
**/
-static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
+static int ixgbevf_ipsec_parse_proto_keys(struct net_device *dev,
+ struct xfrm_state *xs,
u32 *mykey, u32 *mysalt)
{
- struct net_device *dev = xs->xso.real_dev;
unsigned char *key_data;
char *alg_name = NULL;
int key_len;
@@ -256,13 +257,14 @@ static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
/**
* ixgbevf_ipsec_add_sa - program device with a security association
+ * @dev: pointer to net device to program
* @xs: pointer to transformer state struct
* @extack: extack point to fill failure reason
**/
-static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs,
+static int ixgbevf_ipsec_add_sa(struct net_device *dev,
+ struct xfrm_state *xs,
struct netlink_ext_ack *extack)
{
- struct net_device *dev = xs->xso.real_dev;
struct ixgbevf_adapter *adapter;
struct ixgbevf_ipsec *ipsec;
u16 sa_idx;
@@ -271,6 +273,9 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs,
adapter = netdev_priv(dev);
ipsec = adapter->ipsec;
+ if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC))
+ return -EOPNOTSUPP;
+
if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for IPsec offload");
return -EINVAL;
@@ -310,7 +315,8 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs,
rsa.decrypt = xs->ealg || xs->aead;
/* get the key and salt */
- ret = ixgbevf_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
+ ret = ixgbevf_ipsec_parse_proto_keys(dev, xs, rsa.key,
+ &rsa.salt);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Rx SA table");
return ret;
@@ -363,7 +369,8 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs,
if (xs->id.proto & IPPROTO_ESP)
tsa.encrypt = xs->ealg || xs->aead;
- ret = ixgbevf_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
+ ret = ixgbevf_ipsec_parse_proto_keys(dev, xs, tsa.key,
+ &tsa.salt);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Tx SA table");
memset(&tsa, 0, sizeof(tsa));
@@ -388,11 +395,12 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs,
/**
* ixgbevf_ipsec_del_sa - clear out this specific SA
+ * @dev: pointer to net device to program
* @xs: pointer to transformer state struct
**/
-static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
+static void ixgbevf_ipsec_del_sa(struct net_device *dev,
+ struct xfrm_state *xs)
{
- struct net_device *dev = xs->xso.real_dev;
struct ixgbevf_adapter *adapter;
struct ixgbevf_ipsec *ipsec;
u16 sa_idx;
@@ -400,6 +408,9 @@ static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
adapter = netdev_priv(dev);
ipsec = adapter->ipsec;
+ if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC))
+ return;
+
if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
@@ -428,30 +439,9 @@ static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
}
}
-/**
- * ixgbevf_ipsec_offload_ok - can this packet use the xfrm hw offload
- * @skb: current data packet
- * @xs: pointer to transformer state struct
- **/
-static bool ixgbevf_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
-{
- if (xs->props.family == AF_INET) {
- /* Offload with IPv4 options is not supported yet */
- if (ip_hdr(skb)->ihl != 5)
- return false;
- } else {
- /* Offload with IPv6 extension headers is not support yet */
- if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
- return false;
- }
-
- return true;
-}
-
static const struct xfrmdev_ops ixgbevf_xfrmdev_ops = {
.xdo_dev_state_add = ixgbevf_ipsec_add_sa,
.xdo_dev_state_delete = ixgbevf_ipsec_del_sa,
- .xdo_dev_offload_ok = ixgbevf_ipsec_offload_ok,
};
/**
@@ -628,8 +618,11 @@ void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter)
size_t size;
switch (adapter->hw.api_version) {
+ case ixgbe_mbox_api_17:
+ if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC))
+ return;
+ break;
case ixgbe_mbox_api_14:
- case ixgbe_mbox_api_15:
break;
default:
return;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 130cb868774c..516a6fdd23d0 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef _IXGBEVF_H_
#define _IXGBEVF_H_
@@ -241,23 +241,7 @@ struct ixgbevf_q_vector {
char name[IFNAMSIZ + 9];
/* for dynamic allocation of rings associated with this q_vector */
- struct ixgbevf_ring ring[0] ____cacheline_internodealigned_in_smp;
-#ifdef CONFIG_NET_RX_BUSY_POLL
- unsigned int state;
-#define IXGBEVF_QV_STATE_IDLE 0
-#define IXGBEVF_QV_STATE_NAPI 1 /* NAPI owns this QV */
-#define IXGBEVF_QV_STATE_POLL 2 /* poll owns this QV */
-#define IXGBEVF_QV_STATE_DISABLED 4 /* QV is disabled */
-#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL)
-#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED)
-#define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */
-#define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */
-#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | \
- IXGBEVF_QV_STATE_POLL_YIELD)
-#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | \
- IXGBEVF_QV_STATE_POLL_YIELD)
- spinlock_t lock;
-#endif /* CONFIG_NET_RX_BUSY_POLL */
+ struct ixgbevf_ring ring[] ____cacheline_internodealigned_in_smp;
};
/* microsecond values for various ITR rates shifted by 2 to fit itr register
@@ -346,7 +330,6 @@ struct ixgbevf_adapter {
int num_rx_queues;
struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */
u64 hw_csum_rx_error;
- u64 hw_rx_no_dma_resources;
int num_msix_vectors;
u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed;
@@ -363,8 +346,13 @@ struct ixgbevf_adapter {
/* structs defined in ixgbe_vf.h */
struct ixgbe_hw hw;
u16 msg_enable;
- /* Interrupt Throttle Rate */
- u32 eitr_param;
+
+ u32 pf_features;
+#define IXGBEVF_PF_SUP_IPSEC BIT(0)
+#define IXGBEVF_PF_SUP_ESX_MBX BIT(1)
+
+#define IXGBEVF_SUPPORTED_FEATURES (IXGBEVF_PF_SUP_IPSEC | \
+ IXGBEVF_PF_SUP_ESX_MBX)
struct ixgbevf_hw_stats stats;
@@ -418,6 +406,8 @@ enum ixgbevf_boards {
board_X550EM_x_vf,
board_X550EM_x_vf_hv,
board_x550em_a_vf,
+ board_e610_vf,
+ board_e610_vf_hv,
};
enum ixgbevf_xcast_modes {
@@ -434,12 +424,13 @@ extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy;
extern const struct ixgbevf_info ixgbevf_x550em_a_vf_info;
+extern const struct ixgbevf_info ixgbevf_e610_vf_info;
extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X550_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info;
-extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops;
+extern const struct ixgbevf_info ixgbevf_e610_vf_hv_info;
/* needed by ethtool.c */
extern const char ixgbevf_driver_name[];
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 149911e3002a..d5ce20f47def 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
/******************************************************************************
Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
@@ -39,7 +39,7 @@ static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
static char ixgbevf_copyright[] =
- "Copyright (c) 2009 - 2018 Intel Corporation.";
+ "Copyright (c) 2009 - 2024 Intel Corporation.";
static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_82599_vf] = &ixgbevf_82599_vf_info,
@@ -51,6 +51,8 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
[board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
[board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info,
+ [board_e610_vf] = &ixgbevf_e610_vf_info,
+ [board_e610_vf_hv] = &ixgbevf_e610_vf_hv_info,
};
/* ixgbevf_pci_tbl - PCI Device ID Table
@@ -71,6 +73,9 @@ static const struct pci_device_id ixgbevf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf },
+ {PCI_VDEVICE_SUB(INTEL, IXGBE_DEV_ID_E610_VF, PCI_ANY_ID,
+ IXGBE_SUBDEV_ID_E610_VF_HV), board_e610_vf_hv},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_VF), board_e610_vf},
/* required last entry */
{0, }
};
@@ -732,10 +737,6 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- /* XDP packets use error pointer so abort at this point */
- if (IS_ERR(skb))
- return true;
-
/* verify that the packet does not have any known errors */
if (unlikely(ixgbevf_test_staterr(rx_desc,
IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
@@ -1044,9 +1045,9 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
return IXGBEVF_XDP_TX;
}
-static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *rx_ring,
- struct xdp_buff *xdp)
+static int ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring,
+ struct xdp_buff *xdp)
{
int result = IXGBEVF_XDP_PASS;
struct ixgbevf_ring *xdp_ring;
@@ -1080,7 +1081,7 @@ out_failure:
break;
}
xdp_out:
- return ERR_PTR(-result);
+ return result;
}
static unsigned int ixgbevf_rx_frame_truesize(struct ixgbevf_ring *rx_ring,
@@ -1122,6 +1123,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct sk_buff *skb = rx_ring->skb;
bool xdp_xmit = false;
struct xdp_buff xdp;
+ int xdp_res = 0;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
@@ -1165,11 +1167,11 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size);
#endif
- skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
+ xdp_res = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
}
- if (IS_ERR(skb)) {
- if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) {
+ if (xdp_res) {
+ if (xdp_res == IXGBEVF_XDP_TX) {
xdp_xmit = true;
ixgbevf_rx_buffer_flip(rx_ring, rx_buffer,
size);
@@ -1189,7 +1191,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
}
/* exit if we failed to retrieve a buffer */
- if (!skb) {
+ if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
rx_buffer->pagecnt_bias++;
break;
@@ -1203,7 +1205,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
continue;
/* verify the packet layout is correct */
- if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
+ if (xdp_res || ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
skb = NULL;
continue;
}
@@ -2269,10 +2271,36 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
}
+/**
+ * ixgbevf_set_features - Set features supported by PF
+ * @adapter: pointer to the adapter struct
+ *
+ * Negotiate with PF supported features and then set pf_features accordingly.
+ */
+static void ixgbevf_set_features(struct ixgbevf_adapter *adapter)
+{
+ u32 *pf_features = &adapter->pf_features;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ err = hw->mac.ops.negotiate_features(hw, pf_features);
+ if (err && err != -EOPNOTSUPP)
+ netdev_dbg(adapter->netdev,
+ "PF feature negotiation failed.\n");
+
+ /* Address also pre API 1.7 cases */
+ if (hw->api_version == ixgbe_mbox_api_14)
+ *pf_features |= IXGBEVF_PF_SUP_IPSEC;
+ else if (hw->api_version == ixgbe_mbox_api_15)
+ *pf_features |= IXGBEVF_PF_SUP_ESX_MBX;
+}
+
static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
static const int api[] = {
+ ixgbe_mbox_api_17,
+ ixgbe_mbox_api_16,
ixgbe_mbox_api_15,
ixgbe_mbox_api_14,
ixgbe_mbox_api_13,
@@ -2292,7 +2320,9 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
idx++;
}
- if (hw->api_version >= ixgbe_mbox_api_15) {
+ ixgbevf_set_features(adapter);
+
+ if (adapter->pf_features & IXGBEVF_PF_SUP_ESX_MBX) {
hw->mbx.ops.init_params(hw);
memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
sizeof(struct ixgbe_mbx_operations));
@@ -2512,7 +2542,7 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
ixgbevf_napi_disable_all(adapter);
- del_timer_sync(&adapter->service_timer);
+ timer_delete_sync(&adapter->service_timer);
/* disable transmits in the hardware now that interrupts are off */
for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -2649,6 +2679,8 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_15:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
if (adapter->xdp_prog &&
hw->mac.max_tx_queues == rss)
rss = rss > 3 ? 2 : 1;
@@ -3174,8 +3206,8 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
**/
static void ixgbevf_service_timer(struct timer_list *t)
{
- struct ixgbevf_adapter *adapter = from_timer(adapter, t,
- service_timer);
+ struct ixgbevf_adapter *adapter = timer_container_of(adapter, t,
+ service_timer);
/* Reset the timer */
mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
@@ -4321,7 +4353,7 @@ static int ixgbevf_resume(struct device *dev_d)
struct pci_dev *pdev = to_pci_dev(dev_d);
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- u32 err;
+ int err;
adapter->hw.hw_addr = adapter->io_addr;
smp_mb__before_atomic();
@@ -4643,6 +4675,8 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_15:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
(ETH_HLEN + ETH_FCS_LEN);
break;
@@ -4693,6 +4727,9 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case ixgbe_mac_X540_vf:
dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
break;
+ case ixgbe_mac_e610_vf:
+ dev_info(&pdev->dev, "Intel(R) E610 Virtual Function\n");
+ break;
case ixgbe_mac_82599_vf:
default:
dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index a55dd978f7ca..24d0237e7a99 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -505,15 +505,3 @@ const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy = {
.check_for_ack = ixgbevf_check_for_ack_vf,
.check_for_rst = ixgbevf_check_for_rst_vf,
};
-
-/* Mailbox operations when running on Hyper-V.
- * On Hyper-V, PF/VF communication is not through the
- * hardware mailbox; this communication is through
- * a software mediated path.
- * Most mail box operations are noop while running on
- * Hyper-V.
- */
-const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops = {
- .init_params = ixgbevf_init_mbx_params_vf,
- .check_for_rst = ixgbevf_check_for_rst_vf,
-};
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index 835bbcc5cc8e..a8ed23ee66aa 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -66,6 +66,8 @@ enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */
ixgbe_mbox_api_15, /* API version 1.5, linux/freebsd VF driver */
+ ixgbe_mbox_api_16, /* API version 1.6, linux/freebsd VF driver */
+ ixgbe_mbox_api_17, /* API version 1.7, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
@@ -102,6 +104,12 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */
+/* mailbox API, version 1.6 VF requests */
+#define IXGBE_VF_GET_PF_LINK_STATE 0x11 /* request PF to send link info */
+
+/* mailbox API, version 1.7 VF requests */
+#define IXGBE_VF_FEATURES_NEGOTIATE 0x12 /* get features supported by PF*/
+
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 1641d00d8ed3..74d320879513 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include "vf.h"
#include "ixgbevf.h"
@@ -255,7 +255,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
memset(msgbuf, 0, sizeof(msgbuf));
/* If index is one then this is the start of a new list and needs
- * indication to the PF so it can do it's own list management.
+ * indication to the PF so it can do its own list management.
* If it is zero then that tells the PF to just clear all of
* this VF's macvlans and there is no new list.
*/
@@ -313,6 +313,8 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
* is not supported for this device type.
*/
switch (hw->api_version) {
+ case ixgbe_mbox_api_17:
+ case ixgbe_mbox_api_16:
case ixgbe_mbox_api_15:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
@@ -382,6 +384,8 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
* or if the operation is not supported for this device type.
*/
switch (hw->api_version) {
+ case ixgbe_mbox_api_17:
+ case ixgbe_mbox_api_16:
case ixgbe_mbox_api_15:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
@@ -552,6 +556,8 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_15:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
break;
default:
return -EOPNOTSUPP;
@@ -625,6 +631,85 @@ static s32 ixgbevf_hv_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
}
/**
+ * ixgbevf_get_pf_link_state - Get PF's link status
+ * @hw: pointer to the HW structure
+ * @speed: link speed
+ * @link_up: indicate if link is up/down
+ *
+ * Ask PF to provide link_up state and speed of the link.
+ *
+ * Return: IXGBE_ERR_MBX in the case of mailbox error,
+ * -EOPNOTSUPP if the op is not supported or 0 on success.
+ */
+static int ixgbevf_get_pf_link_state(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up)
+{
+ u32 msgbuf[3] = {};
+ int err;
+
+ switch (hw->api_version) {
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ msgbuf[0] = IXGBE_VF_GET_PF_LINK_STATE;
+
+ err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
+ err = IXGBE_ERR_MBX;
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ /* No need to set @link_up to false as it will be done by
+ * ixgbe_check_mac_link_vf().
+ */
+ } else {
+ *speed = msgbuf[1];
+ *link_up = msgbuf[2];
+ }
+
+ return err;
+}
+
+/**
+ * ixgbevf_negotiate_features_vf - negotiate supported features with PF driver
+ * @hw: pointer to the HW structure
+ * @pf_features: bitmask of features supported by PF
+ *
+ * Return: IXGBE_ERR_MBX in the case of mailbox error,
+ * -EOPNOTSUPP if the op is not supported or 0 on success.
+ */
+static int ixgbevf_negotiate_features_vf(struct ixgbe_hw *hw, u32 *pf_features)
+{
+ u32 msgbuf[2] = {};
+ int err;
+
+ switch (hw->api_version) {
+ case ixgbe_mbox_api_17:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ msgbuf[0] = IXGBE_VF_FEATURES_NEGOTIATE;
+ msgbuf[1] = IXGBEVF_SUPPORTED_FEATURES;
+
+ err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+
+ if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
+ err = IXGBE_ERR_MBX;
+ *pf_features = 0x0;
+ } else {
+ *pf_features = msgbuf[1];
+ }
+
+ return err;
+}
+
+/**
* ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
* @hw: pointer to the HW structure
* @vlan: 12 bit VLAN ID
@@ -659,6 +744,58 @@ mbx_err:
}
/**
+ * ixgbe_read_vflinks - Read VFLINKS register
+ * @hw: pointer to the HW structure
+ * @speed: link speed
+ * @link_up: indicate if link is up/down
+ *
+ * Get linkup status and link speed from the VFLINKS register.
+ */
+static void ixgbe_read_vflinks(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up)
+{
+ u32 vflinks = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ /* if link status is down no point in checking to see if PF is up */
+ if (!(vflinks & IXGBE_LINKS_UP)) {
+ *link_up = false;
+ return;
+ }
+
+ /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+ * before the link status is correct
+ */
+ if (hw->mac.type == ixgbe_mac_82599_vf) {
+ for (int i = 0; i < 5; i++) {
+ udelay(100);
+ vflinks = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ if (!(vflinks & IXGBE_LINKS_UP)) {
+ *link_up = false;
+ return;
+ }
+ }
+ }
+
+ /* We reached this point so there's link */
+ *link_up = true;
+
+ switch (vflinks & IXGBE_LINKS_SPEED_82599) {
+ case IXGBE_LINKS_SPEED_10G_82599:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_1G_82599:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_100_82599:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
+}
+
+/**
* ixgbevf_hv_set_vfta_vf - * Hyper-V variant - just a stub.
* @hw: unused
* @vlan: unused
@@ -702,10 +839,10 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
bool *link_up,
bool autoneg_wait_to_complete)
{
+ struct ixgbevf_adapter *adapter = hw->back;
struct ixgbe_mbx_info *mbx = &hw->mbx;
struct ixgbe_mac_info *mac = &hw->mac;
s32 ret_val = 0;
- u32 links_reg;
u32 in_msg = 0;
/* If we were hit with a reset drop the link */
@@ -715,43 +852,21 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
if (!mac->get_link_status)
goto out;
- /* if link status is down no point in checking to see if pf is up */
- links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
- if (!(links_reg & IXGBE_LINKS_UP))
- goto out;
-
- /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
- * before the link status is correct
- */
- if (mac->type == ixgbe_mac_82599_vf) {
- int i;
-
- for (i = 0; i < 5; i++) {
- udelay(100);
- links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
-
- if (!(links_reg & IXGBE_LINKS_UP))
- goto out;
- }
- }
-
- switch (links_reg & IXGBE_LINKS_SPEED_82599) {
- case IXGBE_LINKS_SPEED_10G_82599:
- *speed = IXGBE_LINK_SPEED_10GB_FULL;
- break;
- case IXGBE_LINKS_SPEED_1G_82599:
- *speed = IXGBE_LINK_SPEED_1GB_FULL;
- break;
- case IXGBE_LINKS_SPEED_100_82599:
- *speed = IXGBE_LINK_SPEED_100_FULL;
- break;
+ if (hw->mac.type == ixgbe_mac_e610_vf) {
+ ret_val = ixgbevf_get_pf_link_state(hw, speed, link_up);
+ if (ret_val)
+ goto out;
+ } else {
+ ixgbe_read_vflinks(hw, speed, link_up);
+ if (*link_up == false)
+ goto out;
}
/* if the read failed it could just be a mailbox collision, best wait
* until we are called again and don't report an error
*/
if (mbx->ops.read(hw, &in_msg, 1)) {
- if (hw->api_version >= ixgbe_mbox_api_15)
+ if (adapter->pf_features & IXGBEVF_PF_SUP_ESX_MBX)
mac->get_link_status = false;
goto out;
}
@@ -951,6 +1066,8 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_15:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
break;
default:
return 0;
@@ -1005,6 +1122,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
.setup_link = ixgbevf_setup_mac_link_vf,
.check_link = ixgbevf_check_mac_link_vf,
.negotiate_api_version = ixgbevf_negotiate_api_version_vf,
+ .negotiate_features = ixgbevf_negotiate_features_vf,
.set_rar = ixgbevf_set_rar_vf,
.update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
.update_xcast_mode = ixgbevf_update_xcast_mode,
@@ -1076,3 +1194,13 @@ const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
.mac = ixgbe_mac_x550em_a_vf,
.mac_ops = &ixgbevf_mac_ops,
};
+
+const struct ixgbevf_info ixgbevf_e610_vf_info = {
+ .mac = ixgbe_mac_e610_vf,
+ .mac_ops = &ixgbevf_mac_ops,
+};
+
+const struct ixgbevf_info ixgbevf_e610_vf_hv_info = {
+ .mac = ixgbe_mac_e610_vf,
+ .mac_ops = &ixgbevf_hv_mac_ops,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index b4eef5b6c172..4f19b8900c29 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef __IXGBE_VF_H__
#define __IXGBE_VF_H__
@@ -26,6 +26,7 @@ struct ixgbe_mac_operations {
s32 (*stop_adapter)(struct ixgbe_hw *);
s32 (*get_bus_info)(struct ixgbe_hw *);
s32 (*negotiate_api_version)(struct ixgbe_hw *hw, int api);
+ int (*negotiate_features)(struct ixgbe_hw *hw, u32 *pf_features);
/* Link */
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
@@ -54,6 +55,8 @@ enum ixgbe_mac_type {
ixgbe_mac_X550_vf,
ixgbe_mac_X550EM_x_vf,
ixgbe_mac_x550em_a_vf,
+ ixgbe_mac_e610,
+ ixgbe_mac_e610_vf,
ixgbe_num_macs
};
diff --git a/drivers/net/ethernet/intel/libeth/Kconfig b/drivers/net/ethernet/intel/libeth/Kconfig
index 480293b71dbc..2445b979c499 100644
--- a/drivers/net/ethernet/intel/libeth/Kconfig
+++ b/drivers/net/ethernet/intel/libeth/Kconfig
@@ -1,9 +1,15 @@
# SPDX-License-Identifier: GPL-2.0-only
-# Copyright (C) 2024 Intel Corporation
+# Copyright (C) 2024-2025 Intel Corporation
config LIBETH
- tristate
+ tristate "Common Ethernet library (libeth)" if COMPILE_TEST
select PAGE_POOL
help
libeth is a common library containing routines shared between several
drivers, but not yet promoted to the generic kernel API.
+
+config LIBETH_XDP
+ tristate "Common XDP library (libeth_xdp)" if COMPILE_TEST
+ select LIBETH
+ help
+ XDP and XSk helpers based on libeth hotpath management.
diff --git a/drivers/net/ethernet/intel/libeth/Makefile b/drivers/net/ethernet/intel/libeth/Makefile
index 52492b081132..350bc0b38bad 100644
--- a/drivers/net/ethernet/intel/libeth/Makefile
+++ b/drivers/net/ethernet/intel/libeth/Makefile
@@ -1,6 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
-# Copyright (C) 2024 Intel Corporation
+# Copyright (C) 2024-2025 Intel Corporation
obj-$(CONFIG_LIBETH) += libeth.o
libeth-y := rx.o
+libeth-y += tx.o
+
+obj-$(CONFIG_LIBETH_XDP) += libeth_xdp.o
+
+libeth_xdp-y += xdp.o
+libeth_xdp-y += xsk.o
diff --git a/drivers/net/ethernet/intel/libeth/priv.h b/drivers/net/ethernet/intel/libeth/priv.h
new file mode 100644
index 000000000000..9b811d31015c
--- /dev/null
+++ b/drivers/net/ethernet/intel/libeth/priv.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Intel Corporation */
+
+#ifndef __LIBETH_PRIV_H
+#define __LIBETH_PRIV_H
+
+#include <linux/types.h>
+
+/* XDP */
+
+enum xdp_action;
+struct libeth_xdp_buff;
+struct libeth_xdp_tx_frame;
+struct skb_shared_info;
+struct xdp_frame_bulk;
+
+extern const struct xsk_tx_metadata_ops libeth_xsktmo_slow;
+
+void libeth_xsk_tx_return_bulk(const struct libeth_xdp_tx_frame *bq,
+ u32 count);
+u32 libeth_xsk_prog_exception(struct libeth_xdp_buff *xdp, enum xdp_action act,
+ int ret);
+
+struct libeth_xdp_ops {
+ void (*bulk)(const struct skb_shared_info *sinfo,
+ struct xdp_frame_bulk *bq, bool frags);
+ void (*xsk)(struct libeth_xdp_buff *xdp);
+};
+
+void libeth_attach_xdp(const struct libeth_xdp_ops *ops);
+
+static inline void libeth_detach_xdp(void)
+{
+ libeth_attach_xdp(NULL);
+}
+
+#endif /* __LIBETH_PRIV_H */
diff --git a/drivers/net/ethernet/intel/libeth/rx.c b/drivers/net/ethernet/intel/libeth/rx.c
index f20926669318..62521a1f4ec9 100644
--- a/drivers/net/ethernet/intel/libeth/rx.c
+++ b/drivers/net/ethernet/intel/libeth/rx.c
@@ -1,5 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (C) 2024 Intel Corporation */
+/* Copyright (C) 2024-2025 Intel Corporation */
+
+#define DEFAULT_SYMBOL_NAMESPACE "LIBETH"
+
+#include <linux/export.h>
#include <net/libeth/rx.h>
@@ -68,7 +72,7 @@ static u32 libeth_rx_hw_len_truesize(const struct page_pool_params *pp,
static bool libeth_rx_page_pool_params(struct libeth_fq *fq,
struct page_pool_params *pp)
{
- pp->offset = LIBETH_SKB_HEADROOM;
+ pp->offset = fq->xdp ? LIBETH_XDP_HEADROOM : LIBETH_SKB_HEADROOM;
/* HW-writeable / syncable length per one page */
pp->max_len = LIBETH_RX_PAGE_LEN(pp->offset);
@@ -155,11 +159,12 @@ int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi)
.dev = napi->dev->dev.parent,
.netdev = napi->dev,
.napi = napi,
- .dma_dir = DMA_FROM_DEVICE,
};
struct libeth_fqe *fqes;
struct page_pool *pool;
- bool ret;
+ int ret;
+
+ pp.dma_dir = fq->xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
if (!fq->hsplit)
ret = libeth_rx_page_pool_params(fq, &pp);
@@ -173,20 +178,28 @@ int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi)
return PTR_ERR(pool);
fqes = kvcalloc_node(fq->count, sizeof(*fqes), GFP_KERNEL, fq->nid);
- if (!fqes)
+ if (!fqes) {
+ ret = -ENOMEM;
goto err_buf;
+ }
+
+ ret = xdp_reg_page_pool(pool);
+ if (ret)
+ goto err_mem;
fq->fqes = fqes;
fq->pp = pool;
return 0;
+err_mem:
+ kvfree(fqes);
err_buf:
page_pool_destroy(pool);
- return -ENOMEM;
+ return ret;
}
-EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_create, LIBETH);
+EXPORT_SYMBOL_GPL(libeth_rx_fq_create);
/**
* libeth_rx_fq_destroy - destroy a &page_pool created by libeth
@@ -194,22 +207,23 @@ EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_create, LIBETH);
*/
void libeth_rx_fq_destroy(struct libeth_fq *fq)
{
+ xdp_unreg_page_pool(fq->pp);
kvfree(fq->fqes);
page_pool_destroy(fq->pp);
}
-EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_destroy, LIBETH);
+EXPORT_SYMBOL_GPL(libeth_rx_fq_destroy);
/**
- * libeth_rx_recycle_slow - recycle a libeth page from the NAPI context
- * @page: page to recycle
+ * libeth_rx_recycle_slow - recycle libeth netmem
+ * @netmem: network memory to recycle
*
* To be used on exceptions or rare cases not requiring fast inline recycling.
*/
-void libeth_rx_recycle_slow(struct page *page)
+void __cold libeth_rx_recycle_slow(netmem_ref netmem)
{
- page_pool_recycle_direct(page->pp, page);
+ page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, false);
}
-EXPORT_SYMBOL_NS_GPL(libeth_rx_recycle_slow, LIBETH);
+EXPORT_SYMBOL_GPL(libeth_rx_recycle_slow);
/* Converting abstract packet type numbers into a software structure with
* the packet parameters to do O(1) lookup on Rx.
@@ -251,7 +265,7 @@ void libeth_rx_pt_gen_hash_type(struct libeth_rx_pt *pt)
pt->hash_type |= libeth_rx_pt_xdp_iprot[pt->inner_prot];
pt->hash_type |= libeth_rx_pt_xdp_pl[pt->payload_layer];
}
-EXPORT_SYMBOL_NS_GPL(libeth_rx_pt_gen_hash_type, LIBETH);
+EXPORT_SYMBOL_GPL(libeth_rx_pt_gen_hash_type);
/* Module */
diff --git a/drivers/net/ethernet/intel/libeth/tx.c b/drivers/net/ethernet/intel/libeth/tx.c
new file mode 100644
index 000000000000..e0167f43d2a8
--- /dev/null
+++ b/drivers/net/ethernet/intel/libeth/tx.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2025 Intel Corporation */
+
+#define DEFAULT_SYMBOL_NAMESPACE "LIBETH"
+
+#include <net/libeth/xdp.h>
+
+#include "priv.h"
+
+/* Tx buffer completion */
+
+DEFINE_STATIC_CALL_NULL(bulk, libeth_xdp_return_buff_bulk);
+DEFINE_STATIC_CALL_NULL(xsk, libeth_xsk_buff_free_slow);
+
+/**
+ * libeth_tx_complete_any - perform Tx completion for one SQE of any type
+ * @sqe: Tx buffer to complete
+ * @cp: polling params
+ *
+ * Can be used to complete both regular and XDP SQEs, for example when
+ * destroying queues.
+ * When libeth_xdp is not loaded, XDPSQEs won't be handled.
+ */
+void libeth_tx_complete_any(struct libeth_sqe *sqe, struct libeth_cq_pp *cp)
+{
+ if (sqe->type >= __LIBETH_SQE_XDP_START)
+ __libeth_xdp_complete_tx(sqe, cp, static_call(bulk),
+ static_call(xsk));
+ else
+ libeth_tx_complete(sqe, cp);
+}
+EXPORT_SYMBOL_GPL(libeth_tx_complete_any);
+
+/* Module */
+
+void libeth_attach_xdp(const struct libeth_xdp_ops *ops)
+{
+ static_call_update(bulk, ops ? ops->bulk : NULL);
+ static_call_update(xsk, ops ? ops->xsk : NULL);
+}
+EXPORT_SYMBOL_GPL(libeth_attach_xdp);
diff --git a/drivers/net/ethernet/intel/libeth/xdp.c b/drivers/net/ethernet/intel/libeth/xdp.c
new file mode 100644
index 000000000000..d4ac027d9584
--- /dev/null
+++ b/drivers/net/ethernet/intel/libeth/xdp.c
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2025 Intel Corporation */
+
+#define DEFAULT_SYMBOL_NAMESPACE "LIBETH_XDP"
+
+#include <linux/export.h>
+
+#include <net/libeth/xdp.h>
+
+#include "priv.h"
+
+/* XDPSQ sharing */
+
+DEFINE_STATIC_KEY_FALSE(libeth_xdpsq_share);
+EXPORT_SYMBOL_GPL(libeth_xdpsq_share);
+
+void __libeth_xdpsq_get(struct libeth_xdpsq_lock *lock,
+ const struct net_device *dev)
+{
+ bool warn;
+
+ spin_lock_init(&lock->lock);
+ lock->share = true;
+
+ warn = !static_key_enabled(&libeth_xdpsq_share);
+ static_branch_inc(&libeth_xdpsq_share);
+
+ if (warn && net_ratelimit())
+ netdev_warn(dev, "XDPSQ sharing enabled, possible XDP Tx slowdown\n");
+}
+EXPORT_SYMBOL_GPL(__libeth_xdpsq_get);
+
+void __libeth_xdpsq_put(struct libeth_xdpsq_lock *lock,
+ const struct net_device *dev)
+{
+ static_branch_dec(&libeth_xdpsq_share);
+
+ if (!static_key_enabled(&libeth_xdpsq_share) && net_ratelimit())
+ netdev_notice(dev, "XDPSQ sharing disabled\n");
+
+ lock->share = false;
+}
+EXPORT_SYMBOL_GPL(__libeth_xdpsq_put);
+
+void __acquires(&lock->lock)
+__libeth_xdpsq_lock(struct libeth_xdpsq_lock *lock)
+{
+ spin_lock(&lock->lock);
+}
+EXPORT_SYMBOL_GPL(__libeth_xdpsq_lock);
+
+void __releases(&lock->lock)
+__libeth_xdpsq_unlock(struct libeth_xdpsq_lock *lock)
+{
+ spin_unlock(&lock->lock);
+}
+EXPORT_SYMBOL_GPL(__libeth_xdpsq_unlock);
+
+/* XDPSQ clean-up timers */
+
+/**
+ * libeth_xdpsq_init_timer - initialize an XDPSQ clean-up timer
+ * @timer: timer to initialize
+ * @xdpsq: queue this timer belongs to
+ * @lock: corresponding XDPSQ lock
+ * @poll: queue polling/completion function
+ *
+ * XDPSQ clean-up timers must be set up before using at the queue configuration
+ * time. Set the required pointers and the cleaning callback.
+ */
+void libeth_xdpsq_init_timer(struct libeth_xdpsq_timer *timer, void *xdpsq,
+ struct libeth_xdpsq_lock *lock,
+ void (*poll)(struct work_struct *work))
+{
+ timer->xdpsq = xdpsq;
+ timer->lock = lock;
+
+ INIT_DELAYED_WORK(&timer->dwork, poll);
+}
+EXPORT_SYMBOL_GPL(libeth_xdpsq_init_timer);
+
+/* ``XDP_TX`` bulking */
+
+static void __cold
+libeth_xdp_tx_return_one(const struct libeth_xdp_tx_frame *frm)
+{
+ if (frm->len_fl & LIBETH_XDP_TX_MULTI)
+ libeth_xdp_return_frags(frm->data + frm->soff, true);
+
+ libeth_xdp_return_va(frm->data, true);
+}
+
+static void __cold
+libeth_xdp_tx_return_bulk(const struct libeth_xdp_tx_frame *bq, u32 count)
+{
+ for (u32 i = 0; i < count; i++) {
+ const struct libeth_xdp_tx_frame *frm = &bq[i];
+
+ if (!(frm->len_fl & LIBETH_XDP_TX_FIRST))
+ continue;
+
+ libeth_xdp_tx_return_one(frm);
+ }
+}
+
+static void __cold libeth_trace_xdp_exception(const struct net_device *dev,
+ const struct bpf_prog *prog,
+ u32 act)
+{
+ trace_xdp_exception(dev, prog, act);
+}
+
+/**
+ * libeth_xdp_tx_exception - handle Tx exceptions of XDP frames
+ * @bq: XDP Tx frame bulk
+ * @sent: number of frames sent successfully (from this bulk)
+ * @flags: internal libeth_xdp flags (XSk, .ndo_xdp_xmit etc.)
+ *
+ * Cold helper used by __libeth_xdp_tx_flush_bulk(), do not call directly.
+ * Reports XDP Tx exceptions, frees the frames that won't be sent or adjust
+ * the Tx bulk to try again later.
+ */
+void __cold libeth_xdp_tx_exception(struct libeth_xdp_tx_bulk *bq, u32 sent,
+ u32 flags)
+{
+ const struct libeth_xdp_tx_frame *pos = &bq->bulk[sent];
+ u32 left = bq->count - sent;
+
+ if (!(flags & LIBETH_XDP_TX_NDO))
+ libeth_trace_xdp_exception(bq->dev, bq->prog, XDP_TX);
+
+ if (!(flags & LIBETH_XDP_TX_DROP)) {
+ memmove(bq->bulk, pos, left * sizeof(*bq->bulk));
+ bq->count = left;
+
+ return;
+ }
+
+ if (flags & LIBETH_XDP_TX_XSK)
+ libeth_xsk_tx_return_bulk(pos, left);
+ else if (!(flags & LIBETH_XDP_TX_NDO))
+ libeth_xdp_tx_return_bulk(pos, left);
+ else
+ libeth_xdp_xmit_return_bulk(pos, left, bq->dev);
+
+ bq->count = 0;
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_tx_exception);
+
+/* .ndo_xdp_xmit() implementation */
+
+u32 __cold libeth_xdp_xmit_return_bulk(const struct libeth_xdp_tx_frame *bq,
+ u32 count, const struct net_device *dev)
+{
+ u32 n = 0;
+
+ for (u32 i = 0; i < count; i++) {
+ const struct libeth_xdp_tx_frame *frm = &bq[i];
+ dma_addr_t dma;
+
+ if (frm->flags & LIBETH_XDP_TX_FIRST)
+ dma = *libeth_xdp_xmit_frame_dma(frm->xdpf);
+ else
+ dma = dma_unmap_addr(frm, dma);
+
+ dma_unmap_page(dev->dev.parent, dma, dma_unmap_len(frm, len),
+ DMA_TO_DEVICE);
+
+ /* Actual xdp_frames are freed by the core */
+ n += !!(frm->flags & LIBETH_XDP_TX_FIRST);
+ }
+
+ return n;
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_xmit_return_bulk);
+
+/* Rx polling path */
+
+/**
+ * libeth_xdp_load_stash - recreate an &xdp_buff from libeth_xdp buffer stash
+ * @dst: target &libeth_xdp_buff to initialize
+ * @src: source stash
+ *
+ * External helper used by libeth_xdp_init_buff(), do not call directly.
+ * Recreate an onstack &libeth_xdp_buff using the stash saved earlier.
+ * The only field untouched (rxq) is initialized later in the
+ * abovementioned function.
+ */
+void libeth_xdp_load_stash(struct libeth_xdp_buff *dst,
+ const struct libeth_xdp_buff_stash *src)
+{
+ dst->data = src->data;
+ dst->base.data_end = src->data + src->len;
+ dst->base.data_meta = src->data;
+ dst->base.data_hard_start = src->data - src->headroom;
+
+ dst->base.frame_sz = src->frame_sz;
+ dst->base.flags = src->flags;
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_load_stash);
+
+/**
+ * libeth_xdp_save_stash - convert &xdp_buff to a libeth_xdp buffer stash
+ * @dst: target &libeth_xdp_buff_stash to initialize
+ * @src: source XDP buffer
+ *
+ * External helper used by libeth_xdp_save_buff(), do not call directly.
+ * Use the fields from the passed XDP buffer to initialize the stash on the
+ * queue, so that a partially received frame can be finished later during
+ * the next NAPI poll.
+ */
+void libeth_xdp_save_stash(struct libeth_xdp_buff_stash *dst,
+ const struct libeth_xdp_buff *src)
+{
+ dst->data = src->data;
+ dst->headroom = src->data - src->base.data_hard_start;
+ dst->len = src->base.data_end - src->data;
+
+ dst->frame_sz = src->base.frame_sz;
+ dst->flags = src->base.flags;
+
+ WARN_ON_ONCE(dst->flags != src->base.flags);
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_save_stash);
+
+void __libeth_xdp_return_stash(struct libeth_xdp_buff_stash *stash)
+{
+ LIBETH_XDP_ONSTACK_BUFF(xdp);
+
+ libeth_xdp_load_stash(xdp, stash);
+ libeth_xdp_return_buff_slow(xdp);
+
+ stash->data = NULL;
+}
+EXPORT_SYMBOL_GPL(__libeth_xdp_return_stash);
+
+/**
+ * libeth_xdp_return_buff_slow - free &libeth_xdp_buff
+ * @xdp: buffer to free/return
+ *
+ * Slowpath version of libeth_xdp_return_buff() to be called on exceptions,
+ * queue clean-ups etc., without unwanted inlining.
+ */
+void __cold libeth_xdp_return_buff_slow(struct libeth_xdp_buff *xdp)
+{
+ __libeth_xdp_return_buff(xdp, false);
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_return_buff_slow);
+
+/**
+ * libeth_xdp_buff_add_frag - add frag to XDP buffer
+ * @xdp: head XDP buffer
+ * @fqe: Rx buffer containing the frag
+ * @len: frag length reported by HW
+ *
+ * External helper used by libeth_xdp_process_buff(), do not call directly.
+ * Frees both head and frag buffers on error.
+ *
+ * Return: true success, false on error (no space for a new frag).
+ */
+bool libeth_xdp_buff_add_frag(struct libeth_xdp_buff *xdp,
+ const struct libeth_fqe *fqe,
+ u32 len)
+{
+ netmem_ref netmem = fqe->netmem;
+
+ if (!xdp_buff_add_frag(&xdp->base, netmem,
+ fqe->offset + netmem_get_pp(netmem)->p.offset,
+ len, fqe->truesize))
+ goto recycle;
+
+ return true;
+
+recycle:
+ libeth_rx_recycle_slow(netmem);
+ libeth_xdp_return_buff_slow(xdp);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_buff_add_frag);
+
+/**
+ * libeth_xdp_prog_exception - handle XDP prog exceptions
+ * @bq: XDP Tx bulk
+ * @xdp: buffer to process
+ * @act: original XDP prog verdict
+ * @ret: error code if redirect failed
+ *
+ * External helper used by __libeth_xdp_run_prog() and
+ * __libeth_xsk_run_prog_slow(), do not call directly.
+ * Reports invalid @act, XDP exception trace event and frees the buffer.
+ *
+ * Return: libeth_xdp XDP prog verdict.
+ */
+u32 __cold libeth_xdp_prog_exception(const struct libeth_xdp_tx_bulk *bq,
+ struct libeth_xdp_buff *xdp,
+ enum xdp_action act, int ret)
+{
+ if (act > XDP_REDIRECT)
+ bpf_warn_invalid_xdp_action(bq->dev, bq->prog, act);
+
+ libeth_trace_xdp_exception(bq->dev, bq->prog, act);
+
+ if (xdp->base.rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
+ return libeth_xsk_prog_exception(xdp, act, ret);
+
+ libeth_xdp_return_buff_slow(xdp);
+
+ return LIBETH_XDP_DROP;
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_prog_exception);
+
+/* Tx buffer completion */
+
+static void libeth_xdp_put_netmem_bulk(netmem_ref netmem,
+ struct xdp_frame_bulk *bq)
+{
+ if (unlikely(bq->count == XDP_BULK_QUEUE_SIZE))
+ xdp_flush_frame_bulk(bq);
+
+ bq->q[bq->count++] = netmem;
+}
+
+/**
+ * libeth_xdp_return_buff_bulk - free &xdp_buff as part of a bulk
+ * @sinfo: shared info corresponding to the buffer
+ * @bq: XDP frame bulk to store the buffer
+ * @frags: whether the buffer has frags
+ *
+ * Same as xdp_return_frame_bulk(), but for &libeth_xdp_buff, speeds up Tx
+ * completion of ``XDP_TX`` buffers and allows to free them in same bulks
+ * with &xdp_frame buffers.
+ */
+void libeth_xdp_return_buff_bulk(const struct skb_shared_info *sinfo,
+ struct xdp_frame_bulk *bq, bool frags)
+{
+ if (!frags)
+ goto head;
+
+ for (u32 i = 0; i < sinfo->nr_frags; i++)
+ libeth_xdp_put_netmem_bulk(skb_frag_netmem(&sinfo->frags[i]),
+ bq);
+
+head:
+ libeth_xdp_put_netmem_bulk(virt_to_netmem(sinfo), bq);
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_return_buff_bulk);
+
+/* Misc */
+
+/**
+ * libeth_xdp_queue_threshold - calculate XDP queue clean/refill threshold
+ * @count: number of descriptors in the queue
+ *
+ * The threshold is the limit at which RQs start to refill (when the number of
+ * empty buffers exceeds it) and SQs get cleaned up (when the number of free
+ * descriptors goes below it). To speed up hotpath processing, threshold is
+ * always pow-2, closest to 1/4 of the queue length.
+ * Don't call it on hotpath, calculate and cache the threshold during the
+ * queue initialization.
+ *
+ * Return: the calculated threshold.
+ */
+u32 libeth_xdp_queue_threshold(u32 count)
+{
+ u32 quarter, low, high;
+
+ if (likely(is_power_of_2(count)))
+ return count >> 2;
+
+ quarter = DIV_ROUND_CLOSEST(count, 4);
+ low = rounddown_pow_of_two(quarter);
+ high = roundup_pow_of_two(quarter);
+
+ return high - quarter <= quarter - low ? high : low;
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_queue_threshold);
+
+/**
+ * __libeth_xdp_set_features - set XDP features for netdev
+ * @dev: &net_device to configure
+ * @xmo: XDP metadata ops (Rx hints)
+ * @zc_segs: maximum number of S/G frags the HW can transmit
+ * @tmo: XSk Tx metadata ops (Tx hints)
+ *
+ * Set all the features libeth_xdp supports. Only the first argument is
+ * necessary; without the third one (zero), XSk support won't be advertised.
+ * Use the non-underscored versions in drivers instead.
+ */
+void __libeth_xdp_set_features(struct net_device *dev,
+ const struct xdp_metadata_ops *xmo,
+ u32 zc_segs,
+ const struct xsk_tx_metadata_ops *tmo)
+{
+ xdp_set_features_flag(dev,
+ NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ (zc_segs ? NETDEV_XDP_ACT_XSK_ZEROCOPY : 0) |
+ NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG);
+ dev->xdp_metadata_ops = xmo;
+
+ tmo = tmo == libeth_xsktmo ? &libeth_xsktmo_slow : tmo;
+
+ dev->xdp_zc_max_segs = zc_segs ? : 1;
+ dev->xsk_tx_metadata_ops = zc_segs ? tmo : NULL;
+}
+EXPORT_SYMBOL_GPL(__libeth_xdp_set_features);
+
+/**
+ * libeth_xdp_set_redirect - toggle the XDP redirect feature
+ * @dev: &net_device to configure
+ * @enable: whether XDP is enabled
+ *
+ * Use this when XDPSQs are not always available to dynamically enable
+ * and disable redirect feature.
+ */
+void libeth_xdp_set_redirect(struct net_device *dev, bool enable)
+{
+ if (enable)
+ xdp_features_set_redirect_target(dev, true);
+ else
+ xdp_features_clear_redirect_target(dev);
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_set_redirect);
+
+/* Module */
+
+static const struct libeth_xdp_ops xdp_ops __initconst = {
+ .bulk = libeth_xdp_return_buff_bulk,
+ .xsk = libeth_xsk_buff_free_slow,
+};
+
+static int __init libeth_xdp_module_init(void)
+{
+ libeth_attach_xdp(&xdp_ops);
+
+ return 0;
+}
+module_init(libeth_xdp_module_init);
+
+static void __exit libeth_xdp_module_exit(void)
+{
+ libeth_detach_xdp();
+}
+module_exit(libeth_xdp_module_exit);
+
+MODULE_DESCRIPTION("Common Ethernet library - XDP infra");
+MODULE_IMPORT_NS("LIBETH");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/intel/libeth/xsk.c b/drivers/net/ethernet/intel/libeth/xsk.c
new file mode 100644
index 000000000000..846e902e31b6
--- /dev/null
+++ b/drivers/net/ethernet/intel/libeth/xsk.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2025 Intel Corporation */
+
+#define DEFAULT_SYMBOL_NAMESPACE "LIBETH_XDP"
+
+#include <linux/export.h>
+
+#include <net/libeth/xsk.h>
+
+#include "priv.h"
+
+/* ``XDP_TX`` bulking */
+
+void __cold libeth_xsk_tx_return_bulk(const struct libeth_xdp_tx_frame *bq,
+ u32 count)
+{
+ for (u32 i = 0; i < count; i++)
+ libeth_xsk_buff_free_slow(bq[i].xsk);
+}
+
+/* XSk TMO */
+
+const struct xsk_tx_metadata_ops libeth_xsktmo_slow = {
+ .tmo_request_checksum = libeth_xsktmo_req_csum,
+};
+
+/* Rx polling path */
+
+/**
+ * libeth_xsk_buff_free_slow - free an XSk Rx buffer
+ * @xdp: buffer to free
+ *
+ * Slowpath version of xsk_buff_free() to be used on exceptions, cleanups etc.
+ * to avoid unwanted inlining.
+ */
+void libeth_xsk_buff_free_slow(struct libeth_xdp_buff *xdp)
+{
+ xsk_buff_free(&xdp->base);
+}
+EXPORT_SYMBOL_GPL(libeth_xsk_buff_free_slow);
+
+/**
+ * libeth_xsk_buff_add_frag - add frag to XSk Rx buffer
+ * @head: head buffer
+ * @xdp: frag buffer
+ *
+ * External helper used by libeth_xsk_process_buff(), do not call directly.
+ * Frees both main and frag buffers on error.
+ *
+ * Return: main buffer with attached frag on success, %NULL on error (no space
+ * for a new frag).
+ */
+struct libeth_xdp_buff *libeth_xsk_buff_add_frag(struct libeth_xdp_buff *head,
+ struct libeth_xdp_buff *xdp)
+{
+ if (!xsk_buff_add_frag(&head->base, &xdp->base))
+ goto free;
+
+ return head;
+
+free:
+ libeth_xsk_buff_free_slow(xdp);
+ libeth_xsk_buff_free_slow(head);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(libeth_xsk_buff_add_frag);
+
+/**
+ * libeth_xsk_buff_stats_frags - update onstack RQ stats with XSk frags info
+ * @rs: onstack stats to update
+ * @xdp: buffer to account
+ *
+ * External helper used by __libeth_xsk_run_pass(), do not call directly.
+ * Adds buffer's frags count and total len to the onstack stats.
+ */
+void libeth_xsk_buff_stats_frags(struct libeth_rq_napi_stats *rs,
+ const struct libeth_xdp_buff *xdp)
+{
+ libeth_xdp_buff_stats_frags(rs, xdp);
+}
+EXPORT_SYMBOL_GPL(libeth_xsk_buff_stats_frags);
+
+/**
+ * __libeth_xsk_run_prog_slow - process the non-``XDP_REDIRECT`` verdicts
+ * @xdp: buffer to process
+ * @bq: Tx bulk for queueing on ``XDP_TX``
+ * @act: verdict to process
+ * @ret: error code if ``XDP_REDIRECT`` failed
+ *
+ * External helper used by __libeth_xsk_run_prog(), do not call directly.
+ * ``XDP_REDIRECT`` is the most common and hottest verdict on XSk, thus
+ * it is processed inline. The rest goes here for out-of-line processing,
+ * together with redirect errors.
+ *
+ * Return: libeth_xdp XDP prog verdict.
+ */
+u32 __libeth_xsk_run_prog_slow(struct libeth_xdp_buff *xdp,
+ const struct libeth_xdp_tx_bulk *bq,
+ enum xdp_action act, int ret)
+{
+ switch (act) {
+ case XDP_DROP:
+ xsk_buff_free(&xdp->base);
+
+ return LIBETH_XDP_DROP;
+ case XDP_TX:
+ return LIBETH_XDP_TX;
+ case XDP_PASS:
+ return LIBETH_XDP_PASS;
+ default:
+ break;
+ }
+
+ return libeth_xdp_prog_exception(bq, xdp, act, ret);
+}
+EXPORT_SYMBOL_GPL(__libeth_xsk_run_prog_slow);
+
+/**
+ * libeth_xsk_prog_exception - handle XDP prog exceptions on XSk
+ * @xdp: buffer to process
+ * @act: verdict returned by the prog
+ * @ret: error code if ``XDP_REDIRECT`` failed
+ *
+ * Internal. Frees the buffer and, if the queue uses XSk wakeups, stop the
+ * current NAPI poll when there are no free buffers left.
+ *
+ * Return: libeth_xdp's XDP prog verdict.
+ */
+u32 __cold libeth_xsk_prog_exception(struct libeth_xdp_buff *xdp,
+ enum xdp_action act, int ret)
+{
+ const struct xdp_buff_xsk *xsk;
+ u32 __ret = LIBETH_XDP_DROP;
+
+ if (act != XDP_REDIRECT)
+ goto drop;
+
+ xsk = container_of(&xdp->base, typeof(*xsk), xdp);
+ if (xsk_uses_need_wakeup(xsk->pool) && ret == -ENOBUFS)
+ __ret = LIBETH_XDP_ABORTED;
+
+drop:
+ libeth_xsk_buff_free_slow(xdp);
+
+ return __ret;
+}
+
+/* Refill */
+
+/**
+ * libeth_xskfq_create - create an XSkFQ
+ * @fq: fill queue to initialize
+ *
+ * Allocates the FQEs and initializes the fields used by libeth_xdp: number
+ * of buffers to refill, refill threshold and buffer len.
+ *
+ * Return: %0 on success, -errno otherwise.
+ */
+int libeth_xskfq_create(struct libeth_xskfq *fq)
+{
+ fq->fqes = kvcalloc_node(fq->count, sizeof(*fq->fqes), GFP_KERNEL,
+ fq->nid);
+ if (!fq->fqes)
+ return -ENOMEM;
+
+ fq->pending = fq->count;
+ fq->thresh = libeth_xdp_queue_threshold(fq->count);
+ fq->buf_len = xsk_pool_get_rx_frame_size(fq->pool);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(libeth_xskfq_create);
+
+/**
+ * libeth_xskfq_destroy - destroy an XSkFQ
+ * @fq: fill queue to destroy
+ *
+ * Zeroes the used fields and frees the FQEs array.
+ */
+void libeth_xskfq_destroy(struct libeth_xskfq *fq)
+{
+ fq->buf_len = 0;
+ fq->thresh = 0;
+ fq->pending = 0;
+
+ kvfree(fq->fqes);
+}
+EXPORT_SYMBOL_GPL(libeth_xskfq_destroy);
+
+/* .ndo_xsk_wakeup */
+
+static void libeth_xsk_napi_sched(void *info)
+{
+ __napi_schedule_irqoff(info);
+}
+
+/**
+ * libeth_xsk_init_wakeup - initialize libeth XSk wakeup structure
+ * @csd: struct to initialize
+ * @napi: NAPI corresponding to this queue
+ *
+ * libeth_xdp uses inter-processor interrupts to perform XSk wakeups. In order
+ * to do that, the corresponding CSDs must be initialized when creating the
+ * queues.
+ */
+void libeth_xsk_init_wakeup(call_single_data_t *csd, struct napi_struct *napi)
+{
+ INIT_CSD(csd, libeth_xsk_napi_sched, napi);
+}
+EXPORT_SYMBOL_GPL(libeth_xsk_init_wakeup);
+
+/**
+ * libeth_xsk_wakeup - perform an XSk wakeup
+ * @csd: CSD corresponding to the queue
+ * @qid: the stack queue index
+ *
+ * Try to mark the NAPI as missed first, so that it could be rescheduled.
+ * If it's not, schedule it on the corresponding CPU using IPIs (or directly
+ * if already running on it).
+ */
+void libeth_xsk_wakeup(call_single_data_t *csd, u32 qid)
+{
+ struct napi_struct *napi = csd->info;
+
+ if (napi_if_scheduled_mark_missed(napi) ||
+ unlikely(!napi_schedule_prep(napi)))
+ return;
+
+ if (unlikely(qid >= nr_cpu_ids))
+ qid %= nr_cpu_ids;
+
+ if (qid != raw_smp_processor_id() && cpu_online(qid))
+ smp_call_function_single_async(qid, csd);
+ else
+ __napi_schedule(napi);
+}
+EXPORT_SYMBOL_GPL(libeth_xsk_wakeup);
+
+/* Pool setup */
+
+#define LIBETH_XSK_DMA_ATTR \
+ (DMA_ATTR_WEAK_ORDERING | DMA_ATTR_SKIP_CPU_SYNC)
+
+/**
+ * libeth_xsk_setup_pool - setup or destroy an XSk pool for a queue
+ * @dev: target &net_device
+ * @qid: stack queue index to configure
+ * @enable: whether to enable or disable the pool
+ *
+ * Check that @qid is valid and then map or unmap the pool.
+ *
+ * Return: %0 on success, -errno otherwise.
+ */
+int libeth_xsk_setup_pool(struct net_device *dev, u32 qid, bool enable)
+{
+ struct xsk_buff_pool *pool;
+
+ pool = xsk_get_pool_from_qid(dev, qid);
+ if (!pool)
+ return -EINVAL;
+
+ if (enable)
+ return xsk_pool_dma_map(pool, dev->dev.parent,
+ LIBETH_XSK_DMA_ATTR);
+ else
+ xsk_pool_dma_unmap(pool, LIBETH_XSK_DMA_ATTR);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(libeth_xsk_setup_pool);
diff --git a/drivers/net/ethernet/intel/libie/Kconfig b/drivers/net/ethernet/intel/libie/Kconfig
index 33aff6bc8f81..70831c7e336e 100644
--- a/drivers/net/ethernet/intel/libie/Kconfig
+++ b/drivers/net/ethernet/intel/libie/Kconfig
@@ -8,3 +8,18 @@ config LIBIE
libie (Intel Ethernet library) is a common library built on top of
libeth and containing vendor-specific routines shared between several
Intel Ethernet drivers.
+
+config LIBIE_ADMINQ
+ tristate
+ help
+ Helper functions used by Intel Ethernet drivers for administration
+ queue command interface (aka adminq).
+
+config LIBIE_FWLOG
+ tristate
+ select LIBIE_ADMINQ
+ help
+ Library to support firmware logging on device that have support
+ for it. Firmware logging is using admin queue interface to communicate
+ with the device. Debugfs is a user interface used to config logging
+ and dump all collected logs.
diff --git a/drivers/net/ethernet/intel/libie/Makefile b/drivers/net/ethernet/intel/libie/Makefile
index ffd27fab916a..db57fc6780ea 100644
--- a/drivers/net/ethernet/intel/libie/Makefile
+++ b/drivers/net/ethernet/intel/libie/Makefile
@@ -4,3 +4,11 @@
obj-$(CONFIG_LIBIE) += libie.o
libie-y := rx.o
+
+obj-$(CONFIG_LIBIE_ADMINQ) += libie_adminq.o
+
+libie_adminq-y := adminq.o
+
+obj-$(CONFIG_LIBIE_FWLOG) += libie_fwlog.o
+
+libie_fwlog-y := fwlog.o
diff --git a/drivers/net/ethernet/intel/libie/adminq.c b/drivers/net/ethernet/intel/libie/adminq.c
new file mode 100644
index 000000000000..7b4ff479e7e5
--- /dev/null
+++ b/drivers/net/ethernet/intel/libie/adminq.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2025 Intel Corporation */
+
+#include <linux/module.h>
+#include <linux/net/intel/libie/adminq.h>
+
+static const char * const libie_aq_str_arr[] = {
+#define LIBIE_AQ_STR(x) \
+ [LIBIE_AQ_RC_##x] = "LIBIE_AQ_RC_" #x
+ LIBIE_AQ_STR(OK),
+ LIBIE_AQ_STR(EPERM),
+ LIBIE_AQ_STR(ENOENT),
+ LIBIE_AQ_STR(ESRCH),
+ LIBIE_AQ_STR(EIO),
+ LIBIE_AQ_STR(EAGAIN),
+ LIBIE_AQ_STR(ENOMEM),
+ LIBIE_AQ_STR(EACCES),
+ LIBIE_AQ_STR(EBUSY),
+ LIBIE_AQ_STR(EEXIST),
+ LIBIE_AQ_STR(EINVAL),
+ LIBIE_AQ_STR(ENOSPC),
+ LIBIE_AQ_STR(ENOSYS),
+ LIBIE_AQ_STR(EMODE),
+ LIBIE_AQ_STR(ENOSEC),
+ LIBIE_AQ_STR(EBADSIG),
+ LIBIE_AQ_STR(ESVN),
+ LIBIE_AQ_STR(EBADMAN),
+ LIBIE_AQ_STR(EBADBUF),
+#undef LIBIE_AQ_STR
+ "LIBIE_AQ_RC_UNKNOWN",
+};
+
+#define __LIBIE_AQ_STR_NUM (ARRAY_SIZE(libie_aq_str_arr) - 1)
+
+/**
+ * libie_aq_str - get error string based on aq error
+ * @err: admin queue error type
+ *
+ * Return: error string for passed error code
+ */
+const char *libie_aq_str(enum libie_aq_err err)
+{
+ if (err >= ARRAY_SIZE(libie_aq_str_arr) ||
+ !libie_aq_str_arr[err])
+ err = __LIBIE_AQ_STR_NUM;
+
+ return libie_aq_str_arr[err];
+}
+EXPORT_SYMBOL_NS_GPL(libie_aq_str, "LIBIE_ADMINQ");
+
+MODULE_DESCRIPTION("Intel(R) Ethernet common library - adminq helpers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/intel/libie/fwlog.c b/drivers/net/ethernet/intel/libie/fwlog.c
new file mode 100644
index 000000000000..f39cc11cb7c5
--- /dev/null
+++ b/drivers/net/ethernet/intel/libie/fwlog.c
@@ -0,0 +1,1115 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, Intel Corporation. */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/net/intel/libie/fwlog.h>
+#include <linux/pci.h>
+#include <linux/random.h>
+#include <linux/vmalloc.h>
+
+#define DEFAULT_SYMBOL_NAMESPACE "LIBIE_FWLOG"
+
+/* create a define that has an extra module that doesn't really exist. this
+ * is so we can add a module 'all' to easily enable/disable all the modules
+ */
+#define LIBIE_NR_FW_LOG_MODULES (LIBIE_AQC_FW_LOG_ID_MAX + 1)
+
+/* the ordering in this array is important. it matches the ordering of the
+ * values in the FW so the index is the same value as in
+ * libie_aqc_fw_logging_mod
+ */
+static const char * const libie_fwlog_module_string[] = {
+ "general",
+ "ctrl",
+ "link",
+ "link_topo",
+ "dnl",
+ "i2c",
+ "sdp",
+ "mdio",
+ "adminq",
+ "hdma",
+ "lldp",
+ "dcbx",
+ "dcb",
+ "xlr",
+ "nvm",
+ "auth",
+ "vpd",
+ "iosf",
+ "parser",
+ "sw",
+ "scheduler",
+ "txq",
+ "rsvd",
+ "post",
+ "watchdog",
+ "task_dispatch",
+ "mng",
+ "synce",
+ "health",
+ "tsdrv",
+ "pfreg",
+ "mdlver",
+ "all",
+};
+
+/* the ordering in this array is important. it matches the ordering of the
+ * values in the FW so the index is the same value as in libie_fwlog_level
+ */
+static const char * const libie_fwlog_level_string[] = {
+ "none",
+ "error",
+ "warning",
+ "normal",
+ "verbose",
+};
+
+static const char * const libie_fwlog_log_size[] = {
+ "128K",
+ "256K",
+ "512K",
+ "1M",
+ "2M",
+};
+
+static bool libie_fwlog_ring_empty(struct libie_fwlog_ring *rings)
+{
+ return rings->head == rings->tail;
+}
+
+static void libie_fwlog_ring_increment(u16 *item, u16 size)
+{
+ *item = (*item + 1) & (size - 1);
+}
+
+static int libie_fwlog_alloc_ring_buffs(struct libie_fwlog_ring *rings)
+{
+ int i, nr_bytes;
+ u8 *mem;
+
+ nr_bytes = rings->size * LIBIE_AQ_MAX_BUF_LEN;
+ mem = vzalloc(nr_bytes);
+ if (!mem)
+ return -ENOMEM;
+
+ for (i = 0; i < rings->size; i++) {
+ struct libie_fwlog_data *ring = &rings->rings[i];
+
+ ring->data_size = LIBIE_AQ_MAX_BUF_LEN;
+ ring->data = mem;
+ mem += LIBIE_AQ_MAX_BUF_LEN;
+ }
+
+ return 0;
+}
+
+static void libie_fwlog_free_ring_buffs(struct libie_fwlog_ring *rings)
+{
+ int i;
+
+ for (i = 0; i < rings->size; i++) {
+ struct libie_fwlog_data *ring = &rings->rings[i];
+
+ /* the first ring is the base memory for the whole range so
+ * free it
+ */
+ if (!i)
+ vfree(ring->data);
+
+ ring->data = NULL;
+ ring->data_size = 0;
+ }
+}
+
+#define LIBIE_FWLOG_INDEX_TO_BYTES(n) ((128 * 1024) << (n))
+/**
+ * libie_fwlog_realloc_rings - reallocate the FW log rings
+ * @fwlog: pointer to the fwlog structure
+ * @index: the new index to use to allocate memory for the log data
+ *
+ */
+static void libie_fwlog_realloc_rings(struct libie_fwlog *fwlog, int index)
+{
+ struct libie_fwlog_ring ring;
+ int status, ring_size;
+
+ /* convert the number of bytes into a number of 4K buffers. externally
+ * the driver presents the interface to the FW log data as a number of
+ * bytes because that's easy for users to understand. internally the
+ * driver uses a ring of buffers because the driver doesn't know where
+ * the beginning and end of any line of log data is so the driver has
+ * to overwrite data as complete blocks. when the data is returned to
+ * the user the driver knows that the data is correct and the FW log
+ * can be correctly parsed by the tools
+ */
+ ring_size = LIBIE_FWLOG_INDEX_TO_BYTES(index) / LIBIE_AQ_MAX_BUF_LEN;
+ if (ring_size == fwlog->ring.size)
+ return;
+
+ /* allocate space for the new rings and buffers then release the
+ * old rings and buffers. that way if we don't have enough
+ * memory then we at least have what we had before
+ */
+ ring.rings = kcalloc(ring_size, sizeof(*ring.rings), GFP_KERNEL);
+ if (!ring.rings)
+ return;
+
+ ring.size = ring_size;
+
+ status = libie_fwlog_alloc_ring_buffs(&ring);
+ if (status) {
+ dev_warn(&fwlog->pdev->dev, "Unable to allocate memory for FW log ring data buffers\n");
+ libie_fwlog_free_ring_buffs(&ring);
+ kfree(ring.rings);
+ return;
+ }
+
+ libie_fwlog_free_ring_buffs(&fwlog->ring);
+ kfree(fwlog->ring.rings);
+
+ fwlog->ring.rings = ring.rings;
+ fwlog->ring.size = ring.size;
+ fwlog->ring.index = index;
+ fwlog->ring.head = 0;
+ fwlog->ring.tail = 0;
+}
+
+/**
+ * libie_fwlog_supported - Cached for whether FW supports FW logging or not
+ * @fwlog: pointer to the fwlog structure
+ *
+ * This will always return false if called before libie_init_hw(), so it must be
+ * called after libie_init_hw().
+ */
+static bool libie_fwlog_supported(struct libie_fwlog *fwlog)
+{
+ return fwlog->supported;
+}
+
+/**
+ * libie_aq_fwlog_set - Set FW logging configuration AQ command (0xFF30)
+ * @fwlog: pointer to the fwlog structure
+ * @entries: entries to configure
+ * @num_entries: number of @entries
+ * @options: options from libie_fwlog_cfg->options structure
+ * @log_resolution: logging resolution
+ */
+static int
+libie_aq_fwlog_set(struct libie_fwlog *fwlog,
+ struct libie_fwlog_module_entry *entries, u16 num_entries,
+ u16 options, u16 log_resolution)
+{
+ struct libie_aqc_fw_log_cfg_resp *fw_modules;
+ struct libie_aq_desc desc = {0};
+ struct libie_aqc_fw_log *cmd;
+ int status;
+ int i;
+
+ fw_modules = kcalloc(num_entries, sizeof(*fw_modules), GFP_KERNEL);
+ if (!fw_modules)
+ return -ENOMEM;
+
+ for (i = 0; i < num_entries; i++) {
+ fw_modules[i].module_identifier =
+ cpu_to_le16(entries[i].module_id);
+ fw_modules[i].log_level = entries[i].log_level;
+ }
+
+ desc.opcode = cpu_to_le16(libie_aqc_opc_fw_logs_config);
+ desc.flags = cpu_to_le16(LIBIE_AQ_FLAG_SI) |
+ cpu_to_le16(LIBIE_AQ_FLAG_RD);
+
+ cmd = libie_aq_raw(&desc);
+
+ cmd->cmd_flags = LIBIE_AQC_FW_LOG_CONF_SET_VALID;
+ cmd->ops.cfg.log_resolution = cpu_to_le16(log_resolution);
+ cmd->ops.cfg.mdl_cnt = cpu_to_le16(num_entries);
+
+ if (options & LIBIE_FWLOG_OPTION_ARQ_ENA)
+ cmd->cmd_flags |= LIBIE_AQC_FW_LOG_CONF_AQ_EN;
+ if (options & LIBIE_FWLOG_OPTION_UART_ENA)
+ cmd->cmd_flags |= LIBIE_AQC_FW_LOG_CONF_UART_EN;
+
+ status = fwlog->send_cmd(fwlog->priv, &desc, fw_modules,
+ sizeof(*fw_modules) * num_entries);
+
+ kfree(fw_modules);
+
+ return status;
+}
+
+/**
+ * libie_fwlog_set - Set the firmware logging settings
+ * @fwlog: pointer to the fwlog structure
+ * @cfg: config used to set firmware logging
+ *
+ * This function should be called whenever the driver needs to set the firmware
+ * logging configuration. It can be called on initialization, reset, or during
+ * runtime.
+ *
+ * If the PF wishes to receive FW logging then it must register via
+ * libie_fwlog_register. Note, that libie_fwlog_register does not need to be called
+ * for init.
+ */
+static int libie_fwlog_set(struct libie_fwlog *fwlog,
+ struct libie_fwlog_cfg *cfg)
+{
+ if (!libie_fwlog_supported(fwlog))
+ return -EOPNOTSUPP;
+
+ return libie_aq_fwlog_set(fwlog, cfg->module_entries,
+ LIBIE_AQC_FW_LOG_ID_MAX, cfg->options,
+ cfg->log_resolution);
+}
+
+/**
+ * libie_aq_fwlog_register - Register PF for firmware logging events (0xFF31)
+ * @fwlog: pointer to the fwlog structure
+ * @reg: true to register and false to unregister
+ */
+static int libie_aq_fwlog_register(struct libie_fwlog *fwlog, bool reg)
+{
+ struct libie_aq_desc desc = {0};
+ struct libie_aqc_fw_log *cmd;
+
+ desc.opcode = cpu_to_le16(libie_aqc_opc_fw_logs_register);
+ desc.flags = cpu_to_le16(LIBIE_AQ_FLAG_SI);
+ cmd = libie_aq_raw(&desc);
+
+ if (reg)
+ cmd->cmd_flags = LIBIE_AQC_FW_LOG_AQ_REGISTER;
+
+ return fwlog->send_cmd(fwlog->priv, &desc, NULL, 0);
+}
+
+/**
+ * libie_fwlog_register - Register the PF for firmware logging
+ * @fwlog: pointer to the fwlog structure
+ *
+ * After this call the PF will start to receive firmware logging based on the
+ * configuration set in libie_fwlog_set.
+ */
+static int libie_fwlog_register(struct libie_fwlog *fwlog)
+{
+ int status;
+
+ if (!libie_fwlog_supported(fwlog))
+ return -EOPNOTSUPP;
+
+ status = libie_aq_fwlog_register(fwlog, true);
+ if (status)
+ dev_dbg(&fwlog->pdev->dev, "Failed to register for firmware logging events over ARQ\n");
+ else
+ fwlog->cfg.options |= LIBIE_FWLOG_OPTION_IS_REGISTERED;
+
+ return status;
+}
+
+/**
+ * libie_fwlog_unregister - Unregister the PF from firmware logging
+ * @fwlog: pointer to the fwlog structure
+ */
+static int libie_fwlog_unregister(struct libie_fwlog *fwlog)
+{
+ int status;
+
+ if (!libie_fwlog_supported(fwlog))
+ return -EOPNOTSUPP;
+
+ status = libie_aq_fwlog_register(fwlog, false);
+ if (status)
+ dev_dbg(&fwlog->pdev->dev, "Failed to unregister from firmware logging events over ARQ\n");
+ else
+ fwlog->cfg.options &= ~LIBIE_FWLOG_OPTION_IS_REGISTERED;
+
+ return status;
+}
+
+/**
+ * libie_fwlog_print_module_cfg - print current FW logging module configuration
+ * @cfg: pointer to the fwlog cfg structure
+ * @module: module to print
+ * @s: the seq file to put data into
+ */
+static void
+libie_fwlog_print_module_cfg(struct libie_fwlog_cfg *cfg, int module,
+ struct seq_file *s)
+{
+ struct libie_fwlog_module_entry *entry;
+
+ if (module != LIBIE_AQC_FW_LOG_ID_MAX) {
+ entry = &cfg->module_entries[module];
+
+ seq_printf(s, "\tModule: %s, Log Level: %s\n",
+ libie_fwlog_module_string[entry->module_id],
+ libie_fwlog_level_string[entry->log_level]);
+ } else {
+ int i;
+
+ for (i = 0; i < LIBIE_AQC_FW_LOG_ID_MAX; i++) {
+ entry = &cfg->module_entries[i];
+
+ seq_printf(s, "\tModule: %s, Log Level: %s\n",
+ libie_fwlog_module_string[entry->module_id],
+ libie_fwlog_level_string[entry->log_level]);
+ }
+ }
+}
+
+static int libie_find_module_by_dentry(struct dentry **modules, struct dentry *d)
+{
+ int i, module;
+
+ module = -1;
+ /* find the module based on the dentry */
+ for (i = 0; i < LIBIE_NR_FW_LOG_MODULES; i++) {
+ if (d == modules[i]) {
+ module = i;
+ break;
+ }
+ }
+
+ return module;
+}
+
+/**
+ * libie_debugfs_module_show - read from 'module' file
+ * @s: the opened file
+ * @v: pointer to the offset
+ */
+static int libie_debugfs_module_show(struct seq_file *s, void *v)
+{
+ struct libie_fwlog *fwlog = s->private;
+ const struct file *filp = s->file;
+ struct dentry *dentry;
+ int module;
+
+ dentry = file_dentry(filp);
+
+ module = libie_find_module_by_dentry(fwlog->debugfs_modules, dentry);
+ if (module < 0) {
+ dev_info(&fwlog->pdev->dev, "unknown module\n");
+ return -EINVAL;
+ }
+
+ libie_fwlog_print_module_cfg(&fwlog->cfg, module, s);
+
+ return 0;
+}
+
+static int libie_debugfs_module_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, libie_debugfs_module_show, inode->i_private);
+}
+
+/**
+ * libie_debugfs_module_write - write into 'module' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+libie_debugfs_module_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = file_inode(filp)->i_private;
+ struct dentry *dentry = file_dentry(filp);
+ struct device *dev = &fwlog->pdev->dev;
+ char user_val[16], *cmd_buf;
+ int module, log_level, cnt;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 8)
+ return -EINVAL;
+
+ cmd_buf = memdup_user_nul(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ module = libie_find_module_by_dentry(fwlog->debugfs_modules, dentry);
+ if (module < 0) {
+ dev_info(dev, "unknown module\n");
+ return -EINVAL;
+ }
+
+ cnt = sscanf(cmd_buf, "%s", user_val);
+ if (cnt != 1)
+ return -EINVAL;
+
+ log_level = sysfs_match_string(libie_fwlog_level_string, user_val);
+ if (log_level < 0) {
+ dev_info(dev, "unknown log level '%s'\n", user_val);
+ return -EINVAL;
+ }
+
+ if (module != LIBIE_AQC_FW_LOG_ID_MAX) {
+ fwlog->cfg.module_entries[module].log_level = log_level;
+ } else {
+ /* the module 'all' is a shortcut so that we can set
+ * all of the modules to the same level quickly
+ */
+ int i;
+
+ for (i = 0; i < LIBIE_AQC_FW_LOG_ID_MAX; i++)
+ fwlog->cfg.module_entries[i].log_level = log_level;
+ }
+
+ return count;
+}
+
+static const struct file_operations libie_debugfs_module_fops = {
+ .owner = THIS_MODULE,
+ .open = libie_debugfs_module_open,
+ .read = seq_read,
+ .release = single_release,
+ .write = libie_debugfs_module_write,
+};
+
+/**
+ * libie_debugfs_nr_messages_read - read from 'nr_messages' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t libie_debugfs_nr_messages_read(struct file *filp,
+ char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ char buff[32] = {};
+
+ snprintf(buff, sizeof(buff), "%d\n",
+ fwlog->cfg.log_resolution);
+
+ return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
+}
+
+/**
+ * libie_debugfs_nr_messages_write - write into 'nr_messages' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+libie_debugfs_nr_messages_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ struct device *dev = &fwlog->pdev->dev;
+ char user_val[8], *cmd_buf;
+ s16 nr_messages;
+ ssize_t ret;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 4)
+ return -EINVAL;
+
+ cmd_buf = memdup_user_nul(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ ret = sscanf(cmd_buf, "%s", user_val);
+ if (ret != 1)
+ return -EINVAL;
+
+ ret = kstrtos16(user_val, 0, &nr_messages);
+ if (ret)
+ return ret;
+
+ if (nr_messages < LIBIE_AQC_FW_LOG_MIN_RESOLUTION ||
+ nr_messages > LIBIE_AQC_FW_LOG_MAX_RESOLUTION) {
+ dev_err(dev, "Invalid FW log number of messages %d, value must be between %d - %d\n",
+ nr_messages, LIBIE_AQC_FW_LOG_MIN_RESOLUTION,
+ LIBIE_AQC_FW_LOG_MAX_RESOLUTION);
+ return -EINVAL;
+ }
+
+ fwlog->cfg.log_resolution = nr_messages;
+
+ return count;
+}
+
+static const struct file_operations libie_debugfs_nr_messages_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = libie_debugfs_nr_messages_read,
+ .write = libie_debugfs_nr_messages_write,
+};
+
+/**
+ * libie_debugfs_enable_read - read from 'enable' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t libie_debugfs_enable_read(struct file *filp,
+ char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ char buff[32] = {};
+
+ snprintf(buff, sizeof(buff), "%u\n",
+ (u16)(fwlog->cfg.options &
+ LIBIE_FWLOG_OPTION_IS_REGISTERED) >> 3);
+
+ return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
+}
+
+/**
+ * libie_debugfs_enable_write - write into 'enable' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+libie_debugfs_enable_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ char user_val[8], *cmd_buf;
+ bool enable;
+ ssize_t ret;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 2)
+ return -EINVAL;
+
+ cmd_buf = memdup_user_nul(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ ret = sscanf(cmd_buf, "%s", user_val);
+ if (ret != 1)
+ return -EINVAL;
+
+ ret = kstrtobool(user_val, &enable);
+ if (ret)
+ goto enable_write_error;
+
+ if (enable)
+ fwlog->cfg.options |= LIBIE_FWLOG_OPTION_ARQ_ENA;
+ else
+ fwlog->cfg.options &= ~LIBIE_FWLOG_OPTION_ARQ_ENA;
+
+ ret = libie_fwlog_set(fwlog, &fwlog->cfg);
+ if (ret)
+ goto enable_write_error;
+
+ if (enable)
+ ret = libie_fwlog_register(fwlog);
+ else
+ ret = libie_fwlog_unregister(fwlog);
+
+ if (ret)
+ goto enable_write_error;
+
+ /* if we get here, nothing went wrong; return count since we didn't
+ * really write anything
+ */
+ ret = (ssize_t)count;
+
+enable_write_error:
+ /* This function always consumes all of the written input, or produces
+ * an error. Check and enforce this. Otherwise, the write operation
+ * won't complete properly.
+ */
+ if (WARN_ON(ret != (ssize_t)count && ret >= 0))
+ ret = -EIO;
+
+ return ret;
+}
+
+static const struct file_operations libie_debugfs_enable_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = libie_debugfs_enable_read,
+ .write = libie_debugfs_enable_write,
+};
+
+/**
+ * libie_debugfs_log_size_read - read from 'log_size' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t libie_debugfs_log_size_read(struct file *filp,
+ char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ char buff[32] = {};
+ int index;
+
+ index = fwlog->ring.index;
+ snprintf(buff, sizeof(buff), "%s\n", libie_fwlog_log_size[index]);
+
+ return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
+}
+
+/**
+ * libie_debugfs_log_size_write - write into 'log_size' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+libie_debugfs_log_size_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ struct device *dev = &fwlog->pdev->dev;
+ char user_val[8], *cmd_buf;
+ ssize_t ret;
+ int index;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 5)
+ return -EINVAL;
+
+ cmd_buf = memdup_user_nul(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ ret = sscanf(cmd_buf, "%s", user_val);
+ if (ret != 1)
+ return -EINVAL;
+
+ index = sysfs_match_string(libie_fwlog_log_size, user_val);
+ if (index < 0) {
+ dev_info(dev, "Invalid log size '%s'. The value must be one of 128K, 256K, 512K, 1M, 2M\n",
+ user_val);
+ ret = -EINVAL;
+ goto log_size_write_error;
+ } else if (fwlog->cfg.options & LIBIE_FWLOG_OPTION_IS_REGISTERED) {
+ dev_info(dev, "FW logging is currently running. Please disable FW logging to change log_size\n");
+ ret = -EINVAL;
+ goto log_size_write_error;
+ }
+
+ /* free all the buffers and the tracking info and resize */
+ libie_fwlog_realloc_rings(fwlog, index);
+
+ /* if we get here, nothing went wrong; return count since we didn't
+ * really write anything
+ */
+ ret = (ssize_t)count;
+
+log_size_write_error:
+ /* This function always consumes all of the written input, or produces
+ * an error. Check and enforce this. Otherwise, the write operation
+ * won't complete properly.
+ */
+ if (WARN_ON(ret != (ssize_t)count && ret >= 0))
+ ret = -EIO;
+
+ return ret;
+}
+
+static const struct file_operations libie_debugfs_log_size_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = libie_debugfs_log_size_read,
+ .write = libie_debugfs_log_size_write,
+};
+
+/**
+ * libie_debugfs_data_read - read from 'data' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t libie_debugfs_data_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ int data_copied = 0;
+ bool done = false;
+
+ if (libie_fwlog_ring_empty(&fwlog->ring))
+ return 0;
+
+ while (!libie_fwlog_ring_empty(&fwlog->ring) && !done) {
+ struct libie_fwlog_data *log;
+ u16 cur_buf_len;
+
+ log = &fwlog->ring.rings[fwlog->ring.head];
+ cur_buf_len = log->data_size;
+ if (cur_buf_len >= count) {
+ done = true;
+ continue;
+ }
+
+ if (copy_to_user(buffer, log->data, cur_buf_len)) {
+ /* if there is an error then bail and return whatever
+ * the driver has copied so far
+ */
+ done = true;
+ continue;
+ }
+
+ data_copied += cur_buf_len;
+ buffer += cur_buf_len;
+ count -= cur_buf_len;
+ *ppos += cur_buf_len;
+ libie_fwlog_ring_increment(&fwlog->ring.head, fwlog->ring.size);
+ }
+
+ return data_copied;
+}
+
+/**
+ * libie_debugfs_data_write - write into 'data' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+libie_debugfs_data_write(struct file *filp, const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ struct device *dev = &fwlog->pdev->dev;
+ ssize_t ret;
+
+ /* don't allow partial writes */
+ if (*ppos != 0)
+ return 0;
+
+ /* any value is allowed to clear the buffer so no need to even look at
+ * what the value is
+ */
+ if (!(fwlog->cfg.options & LIBIE_FWLOG_OPTION_IS_REGISTERED)) {
+ fwlog->ring.head = 0;
+ fwlog->ring.tail = 0;
+ } else {
+ dev_info(dev, "Can't clear FW log data while FW log running\n");
+ ret = -EINVAL;
+ goto nr_buffs_write_error;
+ }
+
+ /* if we get here, nothing went wrong; return count since we didn't
+ * really write anything
+ */
+ ret = (ssize_t)count;
+
+nr_buffs_write_error:
+ /* This function always consumes all of the written input, or produces
+ * an error. Check and enforce this. Otherwise, the write operation
+ * won't complete properly.
+ */
+ if (WARN_ON(ret != (ssize_t)count && ret >= 0))
+ ret = -EIO;
+
+ return ret;
+}
+
+static const struct file_operations libie_debugfs_data_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = libie_debugfs_data_read,
+ .write = libie_debugfs_data_write,
+};
+
+/**
+ * libie_debugfs_fwlog_init - setup the debugfs directory
+ * @fwlog: pointer to the fwlog structure
+ * @root: debugfs root entry on which fwlog director will be registered
+ */
+static void libie_debugfs_fwlog_init(struct libie_fwlog *fwlog,
+ struct dentry *root)
+{
+ struct dentry *fw_modules_dir;
+ struct dentry **fw_modules;
+ int i;
+
+ /* allocate space for this first because if it fails then we don't
+ * need to unwind
+ */
+ fw_modules = kcalloc(LIBIE_NR_FW_LOG_MODULES, sizeof(*fw_modules),
+ GFP_KERNEL);
+ if (!fw_modules)
+ return;
+
+ fwlog->debugfs = debugfs_create_dir("fwlog", root);
+ if (IS_ERR(fwlog->debugfs))
+ goto err_create_module_files;
+
+ fw_modules_dir = debugfs_create_dir("modules", fwlog->debugfs);
+ if (IS_ERR(fw_modules_dir))
+ goto err_create_module_files;
+
+ for (i = 0; i < LIBIE_NR_FW_LOG_MODULES; i++) {
+ fw_modules[i] = debugfs_create_file(libie_fwlog_module_string[i],
+ 0600, fw_modules_dir, fwlog,
+ &libie_debugfs_module_fops);
+ if (IS_ERR(fw_modules[i]))
+ goto err_create_module_files;
+ }
+
+ debugfs_create_file("nr_messages", 0600, fwlog->debugfs, fwlog,
+ &libie_debugfs_nr_messages_fops);
+
+ fwlog->debugfs_modules = fw_modules;
+
+ debugfs_create_file("enable", 0600, fwlog->debugfs, fwlog,
+ &libie_debugfs_enable_fops);
+
+ debugfs_create_file("log_size", 0600, fwlog->debugfs, fwlog,
+ &libie_debugfs_log_size_fops);
+
+ debugfs_create_file("data", 0600, fwlog->debugfs, fwlog,
+ &libie_debugfs_data_fops);
+
+ return;
+
+err_create_module_files:
+ debugfs_remove_recursive(fwlog->debugfs);
+ kfree(fw_modules);
+}
+
+static bool libie_fwlog_ring_full(struct libie_fwlog_ring *rings)
+{
+ u16 head, tail;
+
+ head = rings->head;
+ tail = rings->tail;
+
+ if (head < tail && (tail - head == (rings->size - 1)))
+ return true;
+ else if (head > tail && (tail == (head - 1)))
+ return true;
+
+ return false;
+}
+
+/**
+ * libie_aq_fwlog_get - Get the current firmware logging configuration (0xFF32)
+ * @fwlog: pointer to the fwlog structure
+ * @cfg: firmware logging configuration to populate
+ */
+static int libie_aq_fwlog_get(struct libie_fwlog *fwlog,
+ struct libie_fwlog_cfg *cfg)
+{
+ struct libie_aqc_fw_log_cfg_resp *fw_modules;
+ struct libie_aq_desc desc = {0};
+ struct libie_aqc_fw_log *cmd;
+ u16 module_id_cnt;
+ int status;
+ void *buf;
+ int i;
+
+ memset(cfg, 0, sizeof(*cfg));
+
+ buf = kzalloc(LIBIE_AQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ desc.opcode = cpu_to_le16(libie_aqc_opc_fw_logs_query);
+ desc.flags = cpu_to_le16(LIBIE_AQ_FLAG_SI);
+ cmd = libie_aq_raw(&desc);
+
+ cmd->cmd_flags = LIBIE_AQC_FW_LOG_AQ_QUERY;
+
+ status = fwlog->send_cmd(fwlog->priv, &desc, buf, LIBIE_AQ_MAX_BUF_LEN);
+ if (status) {
+ dev_dbg(&fwlog->pdev->dev, "Failed to get FW log configuration\n");
+ goto status_out;
+ }
+
+ module_id_cnt = le16_to_cpu(cmd->ops.cfg.mdl_cnt);
+ if (module_id_cnt < LIBIE_AQC_FW_LOG_ID_MAX) {
+ dev_dbg(&fwlog->pdev->dev, "FW returned less than the expected number of FW log module IDs\n");
+ } else if (module_id_cnt > LIBIE_AQC_FW_LOG_ID_MAX) {
+ dev_dbg(&fwlog->pdev->dev, "FW returned more than expected number of FW log module IDs, setting module_id_cnt to software expected max %u\n",
+ LIBIE_AQC_FW_LOG_ID_MAX);
+ module_id_cnt = LIBIE_AQC_FW_LOG_ID_MAX;
+ }
+
+ cfg->log_resolution = le16_to_cpu(cmd->ops.cfg.log_resolution);
+ if (cmd->cmd_flags & LIBIE_AQC_FW_LOG_CONF_AQ_EN)
+ cfg->options |= LIBIE_FWLOG_OPTION_ARQ_ENA;
+ if (cmd->cmd_flags & LIBIE_AQC_FW_LOG_CONF_UART_EN)
+ cfg->options |= LIBIE_FWLOG_OPTION_UART_ENA;
+ if (cmd->cmd_flags & LIBIE_AQC_FW_LOG_QUERY_REGISTERED)
+ cfg->options |= LIBIE_FWLOG_OPTION_IS_REGISTERED;
+
+ fw_modules = (struct libie_aqc_fw_log_cfg_resp *)buf;
+
+ for (i = 0; i < module_id_cnt; i++) {
+ struct libie_aqc_fw_log_cfg_resp *fw_module = &fw_modules[i];
+
+ cfg->module_entries[i].module_id =
+ le16_to_cpu(fw_module->module_identifier);
+ cfg->module_entries[i].log_level = fw_module->log_level;
+ }
+
+status_out:
+ kfree(buf);
+ return status;
+}
+
+/**
+ * libie_fwlog_set_supported - Set if FW logging is supported by FW
+ * @fwlog: pointer to the fwlog structure
+ *
+ * If FW returns success to the libie_aq_fwlog_get call then it supports FW
+ * logging, else it doesn't. Set the fwlog_supported flag accordingly.
+ *
+ * This function is only meant to be called during driver init to determine if
+ * the FW support FW logging.
+ */
+static void libie_fwlog_set_supported(struct libie_fwlog *fwlog)
+{
+ struct libie_fwlog_cfg *cfg;
+ int status;
+
+ fwlog->supported = false;
+
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return;
+
+ status = libie_aq_fwlog_get(fwlog, cfg);
+ if (status)
+ dev_dbg(&fwlog->pdev->dev, "libie_aq_fwlog_get failed, FW logging is not supported on this version of FW, status %d\n",
+ status);
+ else
+ fwlog->supported = true;
+
+ kfree(cfg);
+}
+
+/**
+ * libie_fwlog_init - Initialize FW logging configuration
+ * @fwlog: pointer to the fwlog structure
+ * @api: api structure to init fwlog
+ *
+ * This function should be called on driver initialization during
+ * libie_init_hw().
+ */
+int libie_fwlog_init(struct libie_fwlog *fwlog, struct libie_fwlog_api *api)
+{
+ fwlog->api = *api;
+ libie_fwlog_set_supported(fwlog);
+
+ if (libie_fwlog_supported(fwlog)) {
+ int status;
+
+ /* read the current config from the FW and store it */
+ status = libie_aq_fwlog_get(fwlog, &fwlog->cfg);
+ if (status)
+ return status;
+
+ fwlog->ring.rings = kcalloc(LIBIE_FWLOG_RING_SIZE_DFLT,
+ sizeof(*fwlog->ring.rings),
+ GFP_KERNEL);
+ if (!fwlog->ring.rings) {
+ dev_warn(&fwlog->pdev->dev, "Unable to allocate memory for FW log rings\n");
+ return -ENOMEM;
+ }
+
+ fwlog->ring.size = LIBIE_FWLOG_RING_SIZE_DFLT;
+ fwlog->ring.index = LIBIE_FWLOG_RING_SIZE_INDEX_DFLT;
+
+ status = libie_fwlog_alloc_ring_buffs(&fwlog->ring);
+ if (status) {
+ dev_warn(&fwlog->pdev->dev, "Unable to allocate memory for FW log ring data buffers\n");
+ libie_fwlog_free_ring_buffs(&fwlog->ring);
+ kfree(fwlog->ring.rings);
+ return status;
+ }
+
+ libie_debugfs_fwlog_init(fwlog, api->debugfs_root);
+ } else {
+ dev_warn(&fwlog->pdev->dev, "FW logging is not supported in this NVM image. Please update the NVM to get FW log support\n");
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(libie_fwlog_init);
+
+/**
+ * libie_fwlog_deinit - unroll FW logging configuration
+ * @fwlog: pointer to the fwlog structure
+ *
+ * This function should be called in libie_deinit_hw().
+ */
+void libie_fwlog_deinit(struct libie_fwlog *fwlog)
+{
+ int status;
+
+ /* make sure FW logging is disabled to not put the FW in a weird state
+ * for the next driver load
+ */
+ fwlog->cfg.options &= ~LIBIE_FWLOG_OPTION_ARQ_ENA;
+ status = libie_fwlog_set(fwlog, &fwlog->cfg);
+ if (status)
+ dev_warn(&fwlog->pdev->dev, "Unable to turn off FW logging, status: %d\n",
+ status);
+
+ kfree(fwlog->debugfs_modules);
+
+ fwlog->debugfs_modules = NULL;
+
+ status = libie_fwlog_unregister(fwlog);
+ if (status)
+ dev_warn(&fwlog->pdev->dev, "Unable to unregister FW logging, status: %d\n",
+ status);
+
+ if (fwlog->ring.rings) {
+ libie_fwlog_free_ring_buffs(&fwlog->ring);
+ kfree(fwlog->ring.rings);
+ }
+}
+EXPORT_SYMBOL_GPL(libie_fwlog_deinit);
+
+/**
+ * libie_get_fwlog_data - copy the FW log data from ARQ event
+ * @fwlog: fwlog that the FW log event is associated with
+ * @buf: event buffer pointer
+ * @len: len of event descriptor
+ */
+void libie_get_fwlog_data(struct libie_fwlog *fwlog, u8 *buf, u16 len)
+{
+ struct libie_fwlog_data *log;
+
+ log = &fwlog->ring.rings[fwlog->ring.tail];
+
+ memset(log->data, 0, PAGE_SIZE);
+ log->data_size = len;
+
+ memcpy(log->data, buf, log->data_size);
+ libie_fwlog_ring_increment(&fwlog->ring.tail, fwlog->ring.size);
+
+ if (libie_fwlog_ring_full(&fwlog->ring)) {
+ /* the rings are full so bump the head to create room */
+ libie_fwlog_ring_increment(&fwlog->ring.head, fwlog->ring.size);
+ }
+}
+EXPORT_SYMBOL_GPL(libie_get_fwlog_data);
+
+void libie_fwlog_reregister(struct libie_fwlog *fwlog)
+{
+ if (!(fwlog->cfg.options & LIBIE_FWLOG_OPTION_IS_REGISTERED))
+ return;
+
+ if (libie_fwlog_register(fwlog))
+ fwlog->cfg.options &= ~LIBIE_FWLOG_OPTION_IS_REGISTERED;
+}
+EXPORT_SYMBOL_GPL(libie_fwlog_reregister);
+
+MODULE_DESCRIPTION("Intel(R) Ethernet common library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/intel/libie/rx.c b/drivers/net/ethernet/intel/libie/rx.c
index aceb8d8813c4..6fda656afa9c 100644
--- a/drivers/net/ethernet/intel/libie/rx.c
+++ b/drivers/net/ethernet/intel/libie/rx.c
@@ -1,6 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (C) 2024 Intel Corporation */
+/* Copyright (C) 2024-2025 Intel Corporation */
+#define DEFAULT_SYMBOL_NAMESPACE "LIBIE"
+
+#include <linux/export.h>
#include <linux/net/intel/libie/rx.h>
/* O(1) converting i40e/ice/iavf's 8/10-bit hardware packet type to a parsed
@@ -116,8 +119,8 @@ const struct libeth_rx_pt libie_rx_pt_lut[LIBIE_RX_PT_NUM] = {
LIBIE_RX_PT_IP(4),
LIBIE_RX_PT_IP(6),
};
-EXPORT_SYMBOL_NS_GPL(libie_rx_pt_lut, LIBIE);
+EXPORT_SYMBOL_GPL(libie_rx_pt_lut);
MODULE_DESCRIPTION("Intel(R) Ethernet common library");
-MODULE_IMPORT_NS(LIBETH);
+MODULE_IMPORT_NS("LIBETH");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 87c7e6251a4f..891a94d89f4b 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -900,7 +900,8 @@ static void korina_check_media(struct net_device *dev, unsigned int init_media)
static void korina_poll_media(struct timer_list *t)
{
- struct korina_private *lp = from_timer(lp, t, media_check_timer);
+ struct korina_private *lp = timer_container_of(lp, t,
+ media_check_timer);
struct net_device *dev = lp->dev;
korina_check_media(dev, 0);
@@ -1239,7 +1240,7 @@ static int korina_close(struct net_device *dev)
struct korina_private *lp = netdev_priv(dev);
u32 tmp;
- del_timer(&lp->media_check_timer);
+ timer_delete(&lp->media_check_timer);
/* Disable interrupts */
disable_irq(lp->rx_irq);
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 660dff5426e7..83ce3bfefa5c 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -90,7 +90,6 @@ struct ltq_etop_priv {
struct net_device *netdev;
struct platform_device *pdev;
struct ltq_eth_data *pldata;
- struct resource *res;
struct mii_bus *mii_bus;
@@ -643,31 +642,14 @@ ltq_etop_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct ltq_etop_priv *priv;
- struct resource *res;
int err;
int i;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to get etop resource\n");
- err = -ENOENT;
- goto err_out;
- }
-
- res = devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), dev_name(&pdev->dev));
- if (!res) {
- dev_err(&pdev->dev, "failed to request etop resource\n");
- err = -EBUSY;
- goto err_out;
- }
-
- ltq_etop_membase = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!ltq_etop_membase) {
+ ltq_etop_membase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ltq_etop_membase)) {
dev_err(&pdev->dev, "failed to remap etop engine %d\n",
pdev->id);
- err = -ENOMEM;
+ err = PTR_ERR(ltq_etop_membase);
goto err_out;
}
@@ -679,7 +661,6 @@ ltq_etop_probe(struct platform_device *pdev)
dev->netdev_ops = &ltq_eth_netdev_ops;
dev->ethtool_ops = &ltq_etop_ethtool_ops;
priv = netdev_priv(dev);
- priv->res = res;
priv->pdev = pdev;
priv->pldata = dev_get_platdata(&pdev->dev);
priv->netdev = dev;
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 837295fecd17..50f7c59e8a04 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -34,7 +34,6 @@ config MV643XX_ETH
config MVMDIO
tristate "Marvell MDIO interface support"
depends on HAS_IOMEM
- select MDIO_DEVRES
select PHYLIB
help
This driver supports the MDIO interface found in the network
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index a06048719e84..0ab52c57c648 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1333,7 +1333,8 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
static void mib_counters_timer_wrapper(struct timer_list *t)
{
- struct mv643xx_eth_private *mp = from_timer(mp, t, mib_counters_timer);
+ struct mv643xx_eth_private *mp = timer_container_of(mp, t,
+ mib_counters_timer);
mib_counters_update(mp);
mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
}
@@ -2247,7 +2248,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
if (unlikely(mp->oom)) {
mp->oom = 0;
- del_timer(&mp->rx_oom);
+ timer_delete(&mp->rx_oom);
}
work_done = 0;
@@ -2306,7 +2307,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
static inline void oom_timer_wrapper(struct timer_list *t)
{
- struct mv643xx_eth_private *mp = from_timer(mp, t, rx_oom);
+ struct mv643xx_eth_private *mp = timer_container_of(mp, t, rx_oom);
napi_schedule(&mp->napi);
}
@@ -2521,7 +2522,7 @@ static int mv643xx_eth_stop(struct net_device *dev)
napi_disable(&mp->napi);
- del_timer_sync(&mp->rx_oom);
+ timer_delete_sync(&mp->rx_oom);
netif_carrier_off(dev);
if (dev->phydev)
@@ -2531,7 +2532,7 @@ static int mv643xx_eth_stop(struct net_device *dev)
port_reset(mp);
mv643xx_eth_get_stats(dev);
mib_counters_update(mp);
- del_timer_sync(&mp->mib_counters_timer);
+ timer_delete_sync(&mp->mib_counters_timer);
for (i = 0; i < mp->rxq_count; i++)
rxq_deinit(mp->rxq + i);
@@ -2704,9 +2705,15 @@ static struct platform_device *port_platdev[3];
static void mv643xx_eth_shared_of_remove(void)
{
+ struct mv643xx_eth_platform_data *pd;
int n;
for (n = 0; n < 3; n++) {
+ if (!port_platdev[n])
+ continue;
+ pd = dev_get_platdata(&port_platdev[n]->dev);
+ if (pd)
+ of_node_put(pd->phy_node);
platform_device_del(port_platdev[n]);
port_platdev[n] = NULL;
}
@@ -2769,8 +2776,10 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
}
ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num);
- if (!ppdev)
- return -ENOMEM;
+ if (!ppdev) {
+ ret = -ENOMEM;
+ goto put_err;
+ }
ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
ppdev->dev.of_node = pnp;
@@ -2792,6 +2801,8 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
port_err:
platform_device_put(ppdev);
+put_err:
+ of_node_put(ppd.phy_node);
return ret;
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 1fb285fa0bdb..7af44f858fa3 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -284,8 +284,12 @@
MVNETA_TXQ_BUCKET_REFILL_PERIOD))
#define MVNETA_LPI_CTRL_0 0x2cc0
+#define MVNETA_LPI_CTRL_0_TS (0xff << 8)
#define MVNETA_LPI_CTRL_1 0x2cc4
-#define MVNETA_LPI_REQUEST_ENABLE BIT(0)
+#define MVNETA_LPI_CTRL_1_REQUEST_ENABLE BIT(0)
+#define MVNETA_LPI_CTRL_1_REQUEST_FORCE BIT(1)
+#define MVNETA_LPI_CTRL_1_MANUAL_MODE BIT(2)
+#define MVNETA_LPI_CTRL_1_TW (0xfff << 4)
#define MVNETA_LPI_CTRL_2 0x2cc8
#define MVNETA_LPI_STATUS 0x2ccc
@@ -541,10 +545,6 @@ struct mvneta_port {
struct mvneta_bm_pool *pool_short;
int bm_win_id;
- bool eee_enabled;
- bool eee_active;
- bool tx_lpi_enabled;
-
u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
@@ -2342,7 +2342,7 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
prefetch(data);
xdp_buff_clear_frags_flag(xdp);
xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE,
- data_len, false);
+ data_len, true);
}
static void
@@ -2396,6 +2396,7 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
struct xdp_buff *xdp, u32 desc_status)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ u32 metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb;
u8 num_frags;
@@ -2410,13 +2411,14 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb_put(skb, xdp->data_end - xdp->data);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->ip_summed = mvneta_rx_csum(pp, desc_status);
if (unlikely(xdp_buff_has_frags(xdp)))
- xdp_update_skb_shared_info(skb, num_frags,
- sinfo->xdp_frags_size,
- num_frags * xdp->frame_sz,
- xdp_buff_is_frag_pfmemalloc(xdp));
+ xdp_update_skb_frags_info(skb, num_frags, sinfo->xdp_frags_size,
+ num_frags * xdp->frame_sz,
+ xdp_buff_get_skb_flags(xdp));
return skb;
}
@@ -2982,6 +2984,13 @@ out:
if (txq->count >= txq->tx_stop_threshold)
netif_tx_stop_queue(nq);
+ /* This is not really the true transmit point, since we batch
+ * up several before hitting the hardware, but is the best we
+ * can do without more complexity to walk the packets in the
+ * pending section of the transmit queue.
+ */
+ skb_tx_timestamp(skb);
+
if (!netdev_xmit_more() || netif_xmit_stopped(nq) ||
txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
mvneta_txq_pend_desc_add(pp, txq, frags);
@@ -3960,23 +3969,30 @@ static struct mvneta_port *mvneta_pcs_to_port(struct phylink_pcs *pcs)
return container_of(pcs, struct mvneta_port, phylink_pcs);
}
-static int mvneta_pcs_validate(struct phylink_pcs *pcs,
- unsigned long *supported,
- const struct phylink_link_state *state)
+static unsigned int mvneta_pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
{
- /* We only support QSGMII, SGMII, 802.3z and RGMII modes.
- * When in 802.3z mode, we must have AN enabled:
+ /* When operating in an 802.3z mode, we must have AN enabled:
* "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
* When <PortType> = 1 (1000BASE-X) this field must be set to 1."
+ * Therefore, inband is "required".
*/
- if (phy_interface_mode_is_8023z(state->interface) &&
- !phylink_test(state->advertising, Autoneg))
- return -EINVAL;
+ if (phy_interface_mode_is_8023z(interface))
+ return LINK_INBAND_ENABLE;
- return 0;
+ /* QSGMII, SGMII and RGMII can be configured to use inband
+ * signalling of the AN result. Indicate these as "possible".
+ */
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ interface == PHY_INTERFACE_MODE_QSGMII ||
+ phy_interface_mode_is_rgmii(interface))
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+
+ /* For any other modes, indicate that inband is not supported. */
+ return LINK_INBAND_DISABLE;
}
-static void mvneta_pcs_get_state(struct phylink_pcs *pcs,
+static void mvneta_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
@@ -4071,7 +4087,7 @@ static void mvneta_pcs_an_restart(struct phylink_pcs *pcs)
}
static const struct phylink_pcs_ops mvneta_phylink_pcs_ops = {
- .pcs_validate = mvneta_pcs_validate,
+ .pcs_inband_caps = mvneta_pcs_inband_caps,
.pcs_get_state = mvneta_pcs_get_state,
.pcs_config = mvneta_pcs_config,
.pcs_an_restart = mvneta_pcs_an_restart,
@@ -4206,18 +4222,6 @@ static int mvneta_mac_finish(struct phylink_config *config, unsigned int mode,
return 0;
}
-static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
-{
- u32 lpi_ctl1;
-
- lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
- if (enable)
- lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE;
- else
- lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE;
- mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
-}
-
static void mvneta_mac_link_down(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
@@ -4233,9 +4237,6 @@ static void mvneta_mac_link_down(struct phylink_config *config,
val |= MVNETA_GMAC_FORCE_LINK_DOWN;
mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
}
-
- pp->eee_active = false;
- mvneta_set_eee(pp, false);
}
static void mvneta_mac_link_up(struct phylink_config *config,
@@ -4284,11 +4285,56 @@ static void mvneta_mac_link_up(struct phylink_config *config,
}
mvneta_port_up(pp);
+}
+
+static void mvneta_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct mvneta_port *pp = netdev_priv(to_net_dev(config->dev));
+ u32 lpi1;
- if (phy && pp->eee_enabled) {
- pp->eee_active = phy_init_eee(phy, false) >= 0;
- mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
+ lpi1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
+ lpi1 &= ~(MVNETA_LPI_CTRL_1_REQUEST_ENABLE |
+ MVNETA_LPI_CTRL_1_REQUEST_FORCE |
+ MVNETA_LPI_CTRL_1_MANUAL_MODE);
+ mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi1);
+}
+
+static int mvneta_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct mvneta_port *pp = netdev_priv(to_net_dev(config->dev));
+ u32 ts, tw, lpi0, lpi1, status;
+
+ status = mvreg_read(pp, MVNETA_GMAC_STATUS);
+ if (status & MVNETA_GMAC_SPEED_1000) {
+ /* At 1G speeds, the timer resolution are 1us, and
+ * 802.3 says tw is 16.5us. Round up to 17us.
+ */
+ tw = 17;
+ ts = timer;
+ } else {
+ /* At 100M speeds, the timer resolutions are 10us, and
+ * 802.3 says tw is 30us.
+ */
+ tw = 3;
+ ts = DIV_ROUND_UP(timer, 10);
}
+
+ if (ts > 255)
+ ts = 255;
+
+ /* Configure ts */
+ lpi0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
+ lpi0 = u32_replace_bits(lpi0, ts, MVNETA_LPI_CTRL_0_TS);
+ mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi0);
+
+ /* Configure tw and enable LPI generation */
+ lpi1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
+ lpi1 = u32_replace_bits(lpi1, tw, MVNETA_LPI_CTRL_1_TW);
+ lpi1 |= MVNETA_LPI_CTRL_1_REQUEST_ENABLE;
+ mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi1);
+
+ return 0;
}
static const struct phylink_mac_ops mvneta_phylink_ops = {
@@ -4298,6 +4344,8 @@ static const struct phylink_mac_ops mvneta_phylink_ops = {
.mac_finish = mvneta_mac_finish,
.mac_link_down = mvneta_mac_link_down,
.mac_link_up = mvneta_mac_link_up,
+ .mac_disable_tx_lpi = mvneta_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = mvneta_mac_enable_tx_lpi,
};
static int mvneta_mdio_probe(struct mvneta_port *pp)
@@ -4385,6 +4433,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
if (pp->neta_armada3700)
return 0;
+ netdev_lock(port->napi.dev);
spin_lock(&pp->lock);
/*
* Configuring the driver for a new CPU while the driver is
@@ -4392,6 +4441,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
*/
if (pp->is_stopped) {
spin_unlock(&pp->lock);
+ netdev_unlock(port->napi.dev);
return 0;
}
netif_tx_stop_all_queues(pp->dev);
@@ -4411,7 +4461,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
/* Mask all ethernet port interrupts */
on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
- napi_enable(&port->napi);
+ napi_enable_locked(&port->napi);
/*
* Enable per-CPU interrupts on the CPU that is
@@ -4432,6 +4482,8 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
MVNETA_CAUSE_LINK_CHANGE);
netif_tx_start_all_queues(pp->dev);
spin_unlock(&pp->lock);
+ netdev_unlock(port->napi.dev);
+
return 0;
}
@@ -4564,7 +4616,7 @@ static int mvneta_stop(struct net_device *dev)
/* Inform that we are stopping so we don't want to setup the
* driver for new CPUs in the notifiers. The code of the
* notifier for CPU online is protected by the same spinlock,
- * so when we get the lock, the notifer work is done.
+ * so when we get the lock, the notifier work is done.
*/
spin_lock(&pp->lock);
pp->is_stopped = true;
@@ -4960,19 +5012,9 @@ static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
return MVNETA_RSS_LU_TABLE_SIZE;
}
-static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
- struct ethtool_rxnfc *info,
- u32 *rules __always_unused)
+static u32 mvneta_ethtool_get_rx_ring_count(struct net_device *dev)
{
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = rxq_number;
- return 0;
- case ETHTOOL_GRXFH:
- return -EOPNOTSUPP;
- default:
- return -EOPNOTSUPP;
- }
+ return rxq_number;
}
static int mvneta_config_rss(struct mvneta_port *pp)
@@ -5099,14 +5141,6 @@ static int mvneta_ethtool_get_eee(struct net_device *dev,
struct ethtool_keee *eee)
{
struct mvneta_port *pp = netdev_priv(dev);
- u32 lpi_ctl0;
-
- lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
-
- eee->eee_enabled = pp->eee_enabled;
- eee->eee_active = pp->eee_active;
- eee->tx_lpi_enabled = pp->tx_lpi_enabled;
- eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale;
return phylink_ethtool_get_eee(pp->phylink, eee);
}
@@ -5115,7 +5149,6 @@ static int mvneta_ethtool_set_eee(struct net_device *dev,
struct ethtool_keee *eee)
{
struct mvneta_port *pp = netdev_priv(dev);
- u32 lpi_ctl0;
/* The Armada 37x documents do not give limits for this other than
* it being an 8-bit register.
@@ -5123,16 +5156,6 @@ static int mvneta_ethtool_set_eee(struct net_device *dev,
if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
return -EINVAL;
- lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
- lpi_ctl0 &= ~(0xff << 8);
- lpi_ctl0 |= eee->tx_lpi_timer << 8;
- mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0);
-
- pp->eee_enabled = eee->eee_enabled;
- pp->tx_lpi_enabled = eee->tx_lpi_enabled;
-
- mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled);
-
return phylink_ethtool_set_eee(pp->phylink, eee);
}
@@ -5325,13 +5348,14 @@ static const struct ethtool_ops mvneta_eth_tool_ops = {
.get_ethtool_stats = mvneta_ethtool_get_stats,
.get_sset_count = mvneta_ethtool_get_sset_count,
.get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
- .get_rxnfc = mvneta_ethtool_get_rxnfc,
+ .get_rx_ring_count = mvneta_ethtool_get_rx_ring_count,
.get_rxfh = mvneta_ethtool_get_rxfh,
.set_rxfh = mvneta_ethtool_set_rxfh,
.get_link_ksettings = mvneta_ethtool_get_link_ksettings,
.set_link_ksettings = mvneta_ethtool_set_link_ksettings,
.get_wol = mvneta_ethtool_get_wol,
.set_wol = mvneta_ethtool_set_wol,
+ .get_ts_info = ethtool_op_get_ts_info,
.get_eee = mvneta_ethtool_get_eee,
.set_eee = mvneta_ethtool_set_eee,
};
@@ -5446,6 +5470,9 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
!phy_interface_mode_is_rgmii(phy_mode))
return -EINVAL;
+ /* Ensure LPI is disabled */
+ mvneta_mac_disable_tx_lpi(&pp->phylink_config);
+
return 0;
}
@@ -5530,13 +5557,19 @@ static int mvneta_probe(struct platform_device *pdev)
clk_prepare_enable(pp->clk_bus);
pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops;
- pp->phylink_pcs.neg_mode = true;
pp->phylink_config.dev = &dev->dev;
pp->phylink_config.type = PHYLINK_NETDEV;
pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 |
MAC_100 | MAC_1000FD | MAC_2500FD;
+ /* Setup EEE. Choose 250us idle. Only supported in SGMII modes. */
+ __set_bit(PHY_INTERFACE_MODE_QSGMII, pp->phylink_config.lpi_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII, pp->phylink_config.lpi_interfaces);
+ pp->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD;
+ pp->phylink_config.lpi_timer_default = 250;
+ pp->phylink_config.eee_enabled_default = true;
+
phy_interface_set_rgmii(pp->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_QSGMII,
pp->phylink_config.supported_interfaces);
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.h b/drivers/net/ethernet/marvell/mvneta_bm.h
index e47783ce77e0..57ac039df6f7 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.h
+++ b/drivers/net/ethernet/marvell/mvneta_bm.h
@@ -115,7 +115,7 @@ struct mvneta_bm_pool {
/* Packet size */
int pkt_size;
- /* Size of the buffer acces through DMA*/
+ /* Size of the buffer access through DMA */
u32 buf_size;
/* BPPE virtual base address */
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 9e02e4367bec..061fcd444d50 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -481,6 +481,11 @@
#define MVPP22_GMAC_INT_SUM_MASK 0xa4
#define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1)
#define MVPP22_GMAC_INT_SUM_MASK_PTP BIT(2)
+#define MVPP2_GMAC_LPI_CTRL0 0xc0
+#define MVPP2_GMAC_LPI_CTRL0_TS_MASK GENMASK(15, 8)
+#define MVPP2_GMAC_LPI_CTRL1 0xc4
+#define MVPP2_GMAC_LPI_CTRL1_REQ_EN BIT(0)
+#define MVPP2_GMAC_LPI_CTRL1_TW_MASK GENMASK(15, 4)
/* Per-port XGMAC registers. PPv2.2 and PPv2.3, only for GOP port 0,
* relative to port->base.
@@ -1108,6 +1113,9 @@ struct mvpp2 {
/* Spinlocks for CM3 shared memory configuration */
spinlock_t mss_spinlock;
+
+ /* Spinlock for shared PRS parser memory and shadow table */
+ spinlock_t prs_spinlock;
};
struct mvpp2_pcpu_stats {
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index 1641791a2d5b..44b201817d94 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -324,7 +324,7 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
MVPP2_PRS_RI_VLAN_MASK),
/* Non IP flow, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG,
- MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_TAGGED,
0, 0),
};
@@ -1618,7 +1618,8 @@ int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 port_ctx,
return 0;
}
-int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
+int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port,
+ const struct ethtool_rxfh_fields *info)
{
u16 hash_opts = 0;
u32 flow_type;
@@ -1656,7 +1657,8 @@ int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
return mvpp2_port_rss_hash_opts_set(port, flow_type, hash_opts);
}
-int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
+int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port,
+ struct ethtool_rxfh_fields *info)
{
unsigned long hash_opts;
u32 flow_type;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
index 85c9c6e80678..caadf3aea95d 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
@@ -272,8 +272,10 @@ int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 rss_ctx,
int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 rss_ctx,
u32 *indir);
-int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info);
-int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info);
+int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port,
+ struct ethtool_rxfh_fields *info);
+int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port,
+ const struct ethtool_rxfh_fields *info);
void mvpp2_cls_init(struct mvpp2 *priv);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 571631a30320..33426fded919 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -3915,13 +3915,13 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
while (rx_done < rx_todo) {
struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
+ u32 rx_status, timestamp, metasize = 0;
struct mvpp2_bm_pool *bm_pool;
struct page_pool *pp = NULL;
struct sk_buff *skb;
unsigned int frag_size;
dma_addr_t dma_addr;
phys_addr_t phys_addr;
- u32 rx_status, timestamp;
int pool, rx_bytes, err, ret;
struct page *page;
void *data;
@@ -3983,7 +3983,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq);
xdp_prepare_buff(&xdp, data,
MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM,
- rx_bytes, false);
+ rx_bytes, true);
ret = mvpp2_run_xdp(port, xdp_prog, &xdp, pp, &ps);
@@ -3999,6 +3999,8 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
ps.rx_bytes += rx_bytes;
continue;
}
+
+ metasize = xdp.data - xdp.data_meta;
}
if (frag_size)
@@ -4038,6 +4040,8 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
skb_put(skb, rx_bytes);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->ip_summed = mvpp2_rx_csum(port, rx_status);
skb->protocol = eth_type_trans(skb, dev);
@@ -4435,6 +4439,8 @@ out:
txq_pcpu->count += frags;
aggr_txq->count += frags;
+ skb_tx_timestamp(skb);
+
/* Enable transmit */
wmb();
mvpp2_aggr_txq_pend_desc_add(port, frags);
@@ -5169,38 +5175,40 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_dropped = dev->stats.tx_dropped;
}
-static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
+static int mvpp2_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
+ struct mvpp2_port *port = netdev_priv(dev);
void __iomem *ptp;
u32 gcr, int_mask;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ if (!port->hwtstamp)
+ return -EOPNOTSUPP;
- if (config.tx_type != HWTSTAMP_TX_OFF &&
- config.tx_type != HWTSTAMP_TX_ON)
+ if (config->tx_type != HWTSTAMP_TX_OFF &&
+ config->tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
int_mask = gcr = 0;
- if (config.tx_type != HWTSTAMP_TX_OFF) {
+ if (config->tx_type != HWTSTAMP_TX_OFF) {
gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET;
int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 |
MVPP22_PTP_INT_MASK_QUEUE0;
}
/* It seems we must also release the TX reset when enabling the TSU */
- if (config.rx_filter != HWTSTAMP_FILTER_NONE)
+ if (config->rx_filter != HWTSTAMP_FILTER_NONE)
gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET |
MVPP22_PTP_GCR_TX_RESET;
if (gcr & MVPP22_PTP_GCR_TSU_ENABLE)
mvpp22_tai_start(port->priv->tai);
- if (config.rx_filter != HWTSTAMP_FILTER_NONE) {
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ if (config->rx_filter != HWTSTAMP_FILTER_NONE) {
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
mvpp2_modify(ptp + MVPP22_PTP_GCR,
MVPP22_PTP_GCR_RX_RESET |
MVPP22_PTP_GCR_TX_RESET |
@@ -5221,26 +5229,22 @@ static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE))
mvpp22_tai_stop(port->priv->tai);
- port->tx_hwtstamp_type = config.tx_type;
-
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
+ port->tx_hwtstamp_type = config->tx_type;
return 0;
}
-static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
+static int mvpp2_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config config;
-
- memset(&config, 0, sizeof(config));
+ struct mvpp2_port *port = netdev_priv(dev);
- config.tx_type = port->tx_hwtstamp_type;
- config.rx_filter = port->rx_hwtstamp ?
- HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+ if (!port->hwtstamp)
+ return -EOPNOTSUPP;
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
+ config->tx_type = port->tx_hwtstamp_type;
+ config->rx_filter = port->rx_hwtstamp ? HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
return 0;
}
@@ -5250,14 +5254,14 @@ static int mvpp2_ethtool_get_ts_info(struct net_device *dev,
{
struct mvpp2_port *port = netdev_priv(dev);
+ ethtool_op_get_ts_info(dev, info);
if (!port->hwtstamp)
- return -EOPNOTSUPP;
+ return 0;
info->phc_index = mvpp22_tai_ptp_clock_index(port->priv->tai);
- info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
info->tx_types = BIT(HWTSTAMP_TX_OFF) |
BIT(HWTSTAMP_TX_ON);
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
@@ -5270,18 +5274,6 @@ static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mvpp2_port *port = netdev_priv(dev);
- switch (cmd) {
- case SIOCSHWTSTAMP:
- if (port->hwtstamp)
- return mvpp2_set_ts_config(port, ifr);
- break;
-
- case SIOCGHWTSTAMP:
- if (port->hwtstamp)
- return mvpp2_get_ts_config(port, ifr);
- break;
- }
-
if (!port->phylink)
return -ENOTSUPP;
@@ -5588,6 +5580,13 @@ static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev,
return phylink_ethtool_ksettings_set(port->phylink, cmd);
}
+static u32 mvpp2_ethtool_get_rx_ring_count(struct net_device *dev)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ return port->nrxqs;
+}
+
static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *info, u32 *rules)
{
@@ -5598,12 +5597,6 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
return -EOPNOTSUPP;
switch (info->cmd) {
- case ETHTOOL_GRXFH:
- ret = mvpp2_ethtool_rxfh_get(port, info);
- break;
- case ETHTOOL_GRXRINGS:
- info->data = port->nrxqs;
- break;
case ETHTOOL_GRXCLSRLCNT:
info->rule_cnt = port->n_rfs_rules;
break;
@@ -5638,9 +5631,6 @@ static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
return -EOPNOTSUPP;
switch (info->cmd) {
- case ETHTOOL_SRXFH:
- ret = mvpp2_ethtool_rxfh_set(port, info);
- break;
case ETHTOOL_SRXCLSRLINS:
ret = mvpp2_ethtool_cls_rule_ins(port, info);
break;
@@ -5757,6 +5747,51 @@ static int mvpp2_ethtool_set_rxfh(struct net_device *dev,
return mvpp2_modify_rxfh_context(dev, NULL, rxfh, extack);
}
+static int mvpp2_ethtool_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!mvpp22_rss_is_supported(port))
+ return -EOPNOTSUPP;
+
+ return mvpp2_ethtool_rxfh_get(port, info);
+}
+
+static int mvpp2_ethtool_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *info,
+ struct netlink_ext_ack *extack)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!mvpp22_rss_is_supported(port))
+ return -EOPNOTSUPP;
+
+ return mvpp2_ethtool_rxfh_set(port, info);
+}
+
+static int mvpp2_ethtool_get_eee(struct net_device *dev,
+ struct ethtool_keee *eee)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->phylink)
+ return -EOPNOTSUPP;
+
+ return phylink_ethtool_get_eee(port->phylink, eee);
+}
+
+static int mvpp2_ethtool_set_eee(struct net_device *dev,
+ struct ethtool_keee *eee)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->phylink)
+ return -EOPNOTSUPP;
+
+ return phylink_ethtool_set_eee(port->phylink, eee);
+}
+
/* Device ops */
static const struct net_device_ops mvpp2_netdev_ops = {
@@ -5773,6 +5808,8 @@ static const struct net_device_ops mvpp2_netdev_ops = {
.ndo_set_features = mvpp2_set_features,
.ndo_bpf = mvpp2_xdp,
.ndo_xdp_xmit = mvpp2_xdp_xmit,
+ .ndo_hwtstamp_get = mvpp2_hwtstamp_get,
+ .ndo_hwtstamp_set = mvpp2_hwtstamp_set,
};
static const struct ethtool_ops mvpp2_eth_tool_ops = {
@@ -5794,14 +5831,19 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
.set_pauseparam = mvpp2_ethtool_set_pause_param,
.get_link_ksettings = mvpp2_ethtool_get_link_ksettings,
.set_link_ksettings = mvpp2_ethtool_set_link_ksettings,
+ .get_rx_ring_count = mvpp2_ethtool_get_rx_ring_count,
.get_rxnfc = mvpp2_ethtool_get_rxnfc,
.set_rxnfc = mvpp2_ethtool_set_rxnfc,
.get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
.get_rxfh = mvpp2_ethtool_get_rxfh,
.set_rxfh = mvpp2_ethtool_set_rxfh,
+ .get_rxfh_fields = mvpp2_ethtool_get_rxfh_fields,
+ .set_rxfh_fields = mvpp2_ethtool_set_rxfh_fields,
.create_rxfh_context = mvpp2_create_rxfh_context,
.modify_rxfh_context = mvpp2_modify_rxfh_context,
.remove_rxfh_context = mvpp2_remove_rxfh_context,
+ .get_eee = mvpp2_ethtool_get_eee,
+ .set_eee = mvpp2_ethtool_set_eee,
};
/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
@@ -6187,7 +6229,14 @@ static struct mvpp2_port *mvpp2_pcs_gmac_to_port(struct phylink_pcs *pcs)
return container_of(pcs, struct mvpp2_port, pcs_gmac);
}
+static unsigned int mvpp2_xjg_pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ return LINK_INBAND_DISABLE;
+}
+
static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mvpp2_port *port = mvpp2_pcs_xlg_to_port(pcs);
@@ -6220,26 +6269,35 @@ static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
}
static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = {
+ .pcs_inband_caps = mvpp2_xjg_pcs_inband_caps,
.pcs_get_state = mvpp2_xlg_pcs_get_state,
.pcs_config = mvpp2_xlg_pcs_config,
};
-static int mvpp2_gmac_pcs_validate(struct phylink_pcs *pcs,
- unsigned long *supported,
- const struct phylink_link_state *state)
+static unsigned int mvpp2_gmac_pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
{
- /* When in 802.3z mode, we must have AN enabled:
+ /* When operating in an 802.3z mode, we must have AN enabled:
* Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
* When <PortType> = 1 (1000BASE-X) this field must be set to 1.
+ * Therefore, inband is "required".
*/
- if (phy_interface_mode_is_8023z(state->interface) &&
- !phylink_test(state->advertising, Autoneg))
- return -EINVAL;
+ if (phy_interface_mode_is_8023z(interface))
+ return LINK_INBAND_ENABLE;
- return 0;
+ /* SGMII and RGMII can be configured to use inband signalling of the
+ * AN result. Indicate these as "possible".
+ */
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_rgmii(interface))
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+
+ /* For any other modes, indicate that inband is not supported. */
+ return LINK_INBAND_DISABLE;
}
static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
@@ -6343,7 +6401,7 @@ static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
}
static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = {
- .pcs_validate = mvpp2_gmac_pcs_validate,
+ .pcs_inband_caps = mvpp2_gmac_pcs_inband_caps,
.pcs_get_state = mvpp2_gmac_pcs_get_state,
.pcs_config = mvpp2_gmac_pcs_config,
.pcs_an_restart = mvpp2_gmac_pcs_an_restart,
@@ -6665,6 +6723,55 @@ static void mvpp2_mac_link_down(struct phylink_config *config,
mvpp2_port_disable(port);
}
+static void mvpp2_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+
+ mvpp2_modify(port->base + MVPP2_GMAC_LPI_CTRL1,
+ MVPP2_GMAC_LPI_CTRL1_REQ_EN, 0);
+}
+
+static int mvpp2_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+ u32 ts, tw, lpi1, status;
+
+ status = readl(port->base + MVPP2_GMAC_STATUS0);
+ if (status & MVPP2_GMAC_STATUS0_GMII_SPEED) {
+ /* At 1G speeds, the timer resolution are 1us, and
+ * 802.3 says tw is 16.5us. Round up to 17us.
+ */
+ tw = 17;
+ ts = timer;
+ } else {
+ /* At 100M speeds, the timer resolutions are 10us, and
+ * 802.3 says tw is 30us.
+ */
+ tw = 3;
+ ts = DIV_ROUND_UP(timer, 10);
+ }
+
+ if (ts > 255)
+ ts = 255;
+
+ /* Configure ts */
+ mvpp2_modify(port->base + MVPP2_GMAC_LPI_CTRL0,
+ MVPP2_GMAC_LPI_CTRL0_TS_MASK,
+ FIELD_PREP(MVPP2_GMAC_LPI_CTRL0_TS_MASK, ts));
+
+ lpi1 = readl(port->base + MVPP2_GMAC_LPI_CTRL1);
+
+ /* Configure tw */
+ lpi1 = u32_replace_bits(lpi1, tw, MVPP2_GMAC_LPI_CTRL1_TW_MASK);
+
+ /* Enable LPI generation */
+ writel(lpi1 | MVPP2_GMAC_LPI_CTRL1_REQ_EN,
+ port->base + MVPP2_GMAC_LPI_CTRL1);
+
+ return 0;
+}
+
static const struct phylink_mac_ops mvpp2_phylink_ops = {
.mac_select_pcs = mvpp2_select_pcs,
.mac_prepare = mvpp2_mac_prepare,
@@ -6672,6 +6779,8 @@ static const struct phylink_mac_ops mvpp2_phylink_ops = {
.mac_finish = mvpp2_mac_finish,
.mac_link_up = mvpp2_mac_link_up,
.mac_link_down = mvpp2_mac_link_down,
+ .mac_enable_tx_lpi = mvpp2_mac_enable_tx_lpi,
+ .mac_disable_tx_lpi = mvpp2_mac_disable_tx_lpi,
};
/* Work-around for ACPI */
@@ -6901,9 +7010,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
for (thread = 0; thread < priv->nthreads; thread++) {
port_pcpu = per_cpu_ptr(port->pcpu, thread);
- hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED_SOFT);
- port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
+ hrtimer_setup(&port_pcpu->tx_done_timer, mvpp2_hr_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED_SOFT);
port_pcpu->timer_scheduled = false;
port_pcpu->dev = dev;
}
@@ -6940,9 +7048,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->dev_port = port->id;
port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
- port->pcs_gmac.neg_mode = true;
port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops;
- port->pcs_xlg.neg_mode = true;
if (!mvpp2_use_acpi_compat_mode(port_fwnode)) {
port->phylink_config.dev = &dev->dev;
@@ -6950,6 +7056,15 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->phylink_config.mac_capabilities =
MAC_2500FD | MAC_1000FD | MAC_100 | MAC_10;
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ port->phylink_config.lpi_interfaces);
+
+ port->phylink_config.lpi_capabilities = MAC_1000FD | MAC_100FD;
+
+ /* Setup EEE. Choose 250us idle. */
+ port->phylink_config.lpi_timer_default = 250;
+ port->phylink_config.eee_enabled_default = true;
+
if (port->priv->global_tx_fc)
port->phylink_config.mac_capabilities |=
MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
@@ -7024,6 +7139,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
goto err_free_port_pcpu;
}
port->phylink = phylink;
+
+ mvpp2_mac_disable_tx_lpi(&port->phylink_config);
} else {
dev_warn(&pdev->dev, "Use link irqs for port#%d. FW update required\n", port->id);
port->phylink = NULL;
@@ -7627,8 +7744,9 @@ static int mvpp2_probe(struct platform_device *pdev)
if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23)
priv->hw_version = MVPP23;
- /* Init mss lock */
+ /* Init locks for shared packet processor resources */
spin_lock_init(&priv->mss_spinlock);
+ spin_lock_init(&priv->prs_spinlock);
/* Initialize network controller */
err = mvpp2_init(pdev, priv);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index 9af22f497a40..93e978bdf303 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -23,6 +23,8 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
{
int i;
+ lockdep_assert_held(&priv->prs_spinlock);
+
if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
return -EINVAL;
@@ -43,11 +45,13 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
}
/* Initialize tcam entry from hw */
-int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
- int tid)
+static int __mvpp2_prs_init_from_hw(struct mvpp2 *priv,
+ struct mvpp2_prs_entry *pe, int tid)
{
int i;
+ lockdep_assert_held(&priv->prs_spinlock);
+
if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
return -EINVAL;
@@ -73,6 +77,18 @@ int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
return 0;
}
+int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
+ int tid)
+{
+ int err;
+
+ spin_lock_bh(&priv->prs_spinlock);
+ err = __mvpp2_prs_init_from_hw(priv, pe, tid);
+ spin_unlock_bh(&priv->prs_spinlock);
+
+ return err;
+}
+
/* Invalidate tcam hw entry */
static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
{
@@ -374,7 +390,7 @@ static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
bits = mvpp2_prs_sram_ai_get(&pe);
/* Sram store classification lookup ID in AI bits [5:0] */
@@ -441,7 +457,7 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
/* Entry exist - update port only */
- mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
+ __mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
} else {
/* Entry doesn't exist - create new */
memset(&pe, 0, sizeof(pe));
@@ -469,14 +485,17 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
}
/* Set port to unicast or multicast promiscuous mode */
-void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
- enum mvpp2_prs_l2_cast l2_cast, bool add)
+static void __mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
+ enum mvpp2_prs_l2_cast l2_cast,
+ bool add)
{
struct mvpp2_prs_entry pe;
unsigned char cast_match;
unsigned int ri;
int tid;
+ lockdep_assert_held(&priv->prs_spinlock);
+
if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
cast_match = MVPP2_PRS_UCAST_VAL;
tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
@@ -489,7 +508,7 @@ void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
/* promiscuous mode - Accept unknown unicast or multicast packets */
if (priv->prs_shadow[tid].valid) {
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
} else {
memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
@@ -522,6 +541,14 @@ void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
mvpp2_prs_hw_write(priv, &pe);
}
+void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
+ enum mvpp2_prs_l2_cast l2_cast, bool add)
+{
+ spin_lock_bh(&priv->prs_spinlock);
+ __mvpp2_prs_mac_promisc_set(priv, port, l2_cast, add);
+ spin_unlock_bh(&priv->prs_spinlock);
+}
+
/* Set entry for dsa packets */
static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
bool tagged, bool extend)
@@ -539,7 +566,7 @@ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
if (priv->prs_shadow[tid].valid) {
/* Entry exist - update port only */
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
} else {
/* Entry doesn't exist - create new */
memset(&pe, 0, sizeof(pe));
@@ -610,7 +637,7 @@ static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
if (priv->prs_shadow[tid].valid) {
/* Entry exist - update port only */
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
} else {
/* Entry doesn't exist - create new */
memset(&pe, 0, sizeof(pe));
@@ -673,7 +700,7 @@ static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid);
if (!match)
continue;
@@ -726,7 +753,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
ri_bits = mvpp2_prs_sram_ri_get(&pe);
if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
MVPP2_PRS_RI_VLAN_DOUBLE)
@@ -760,7 +787,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
} else {
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
}
/* Update ports' mask */
mvpp2_prs_tcam_port_map_set(&pe, port_map);
@@ -800,7 +827,7 @@ static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) &&
mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2);
@@ -849,7 +876,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
ri_bits = mvpp2_prs_sram_ri_get(&pe);
ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
@@ -880,7 +907,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
} else {
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
}
/* Update ports' mask */
@@ -1213,8 +1240,8 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv)
/* Create dummy entries for drop all and promiscuous modes */
mvpp2_prs_drop_fc(priv);
mvpp2_prs_mac_drop_all_set(priv, 0, false);
- mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
- mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
+ __mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
+ __mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
}
/* Set default entries for various types of dsa packets */
@@ -1533,12 +1560,6 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
struct mvpp2_prs_entry pe;
int err;
- priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
- MVPP2_PRS_DBL_VLANS_MAX,
- GFP_KERNEL);
- if (!priv->prs_double_vlans)
- return -ENOMEM;
-
/* Double VLAN: 0x88A8, 0x8100 */
err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021AD, ETH_P_8021Q,
MVPP2_PRS_PORT_MASK);
@@ -1941,7 +1962,7 @@ static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask)
port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
continue;
- mvpp2_prs_init_from_hw(port->priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(port->priv, &pe, tid);
mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
@@ -1970,6 +1991,8 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
memset(&pe, 0, sizeof(pe));
+ spin_lock_bh(&priv->prs_spinlock);
+
/* Scan TCAM and see if entry with this <vid,port> already exist */
tid = mvpp2_prs_vid_range_find(port, vid, mask);
@@ -1988,8 +2011,10 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
/* There isn't room for a new VID filter */
- if (tid < 0)
+ if (tid < 0) {
+ spin_unlock_bh(&priv->prs_spinlock);
return tid;
+ }
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
pe.index = tid;
@@ -1997,7 +2022,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
/* Mask all ports */
mvpp2_prs_tcam_port_map_set(&pe, 0);
} else {
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
}
/* Enable the current port */
@@ -2019,6 +2044,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
mvpp2_prs_hw_write(priv, &pe);
+ spin_unlock_bh(&priv->prs_spinlock);
return 0;
}
@@ -2028,15 +2054,16 @@ void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
struct mvpp2 *priv = port->priv;
int tid;
- /* Scan TCAM and see if entry with this <vid,port> already exist */
- tid = mvpp2_prs_vid_range_find(port, vid, 0xfff);
+ spin_lock_bh(&priv->prs_spinlock);
- /* No such entry */
- if (tid < 0)
- return;
+ /* Invalidate TCAM entry with this <vid,port>, if it exists */
+ tid = mvpp2_prs_vid_range_find(port, vid, 0xfff);
+ if (tid >= 0) {
+ mvpp2_prs_hw_inv(priv, tid);
+ priv->prs_shadow[tid].valid = false;
+ }
- mvpp2_prs_hw_inv(priv, tid);
- priv->prs_shadow[tid].valid = false;
+ spin_unlock_bh(&priv->prs_spinlock);
}
/* Remove all existing VID filters on this port */
@@ -2045,6 +2072,8 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
struct mvpp2 *priv = port->priv;
int tid;
+ spin_lock_bh(&priv->prs_spinlock);
+
for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
if (priv->prs_shadow[tid].valid) {
@@ -2052,6 +2081,8 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
priv->prs_shadow[tid].valid = false;
}
}
+
+ spin_unlock_bh(&priv->prs_spinlock);
}
/* Remove VID filering entry for this port */
@@ -2060,10 +2091,14 @@ void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
struct mvpp2 *priv = port->priv;
+ spin_lock_bh(&priv->prs_spinlock);
+
/* Invalidate the guard entry */
mvpp2_prs_hw_inv(priv, tid);
priv->prs_shadow[tid].valid = false;
+
+ spin_unlock_bh(&priv->prs_spinlock);
}
/* Add guard entry that drops packets when no VID is matched on this port */
@@ -2079,6 +2114,8 @@ void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
memset(&pe, 0, sizeof(pe));
+ spin_lock_bh(&priv->prs_spinlock);
+
pe.index = tid;
reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
@@ -2111,6 +2148,8 @@ void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
/* Update shadow table */
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
mvpp2_prs_hw_write(priv, &pe);
+
+ spin_unlock_bh(&priv->prs_spinlock);
}
/* Parser default initialization */
@@ -2118,6 +2157,20 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
{
int err, index, i;
+ priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
+ sizeof(*priv->prs_shadow),
+ GFP_KERNEL);
+ if (!priv->prs_shadow)
+ return -ENOMEM;
+
+ priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
+ MVPP2_PRS_DBL_VLANS_MAX,
+ GFP_KERNEL);
+ if (!priv->prs_double_vlans)
+ return -ENOMEM;
+
+ spin_lock_bh(&priv->prs_spinlock);
+
/* Enable tcam table */
mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
@@ -2136,12 +2189,6 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
mvpp2_prs_hw_inv(priv, index);
- priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
- sizeof(*priv->prs_shadow),
- GFP_KERNEL);
- if (!priv->prs_shadow)
- return -ENOMEM;
-
/* Always start from lookup = 0 */
for (index = 0; index < MVPP2_MAX_PORTS; index++)
mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
@@ -2158,26 +2205,13 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
mvpp2_prs_vid_init(priv);
err = mvpp2_prs_etype_init(priv);
- if (err)
- return err;
-
- err = mvpp2_prs_vlan_init(pdev, priv);
- if (err)
- return err;
-
- err = mvpp2_prs_pppoe_init(priv);
- if (err)
- return err;
-
- err = mvpp2_prs_ip6_init(priv);
- if (err)
- return err;
-
- err = mvpp2_prs_ip4_init(priv);
- if (err)
- return err;
+ err = err ? : mvpp2_prs_vlan_init(pdev, priv);
+ err = err ? : mvpp2_prs_pppoe_init(priv);
+ err = err ? : mvpp2_prs_ip6_init(priv);
+ err = err ? : mvpp2_prs_ip4_init(priv);
- return 0;
+ spin_unlock_bh(&priv->prs_spinlock);
+ return err;
}
/* Compare MAC DA with tcam entry data */
@@ -2217,7 +2251,7 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
(priv->prs_shadow[tid].udf != udf_type))
continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
@@ -2229,7 +2263,8 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
}
/* Update parser's mac da entry */
-int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
+static int __mvpp2_prs_mac_da_accept(struct mvpp2_port *port,
+ const u8 *da, bool add)
{
unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
struct mvpp2 *priv = port->priv;
@@ -2261,7 +2296,7 @@ int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
/* Mask all ports */
mvpp2_prs_tcam_port_map_set(&pe, 0);
} else {
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
}
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
@@ -2317,6 +2352,17 @@ int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
return 0;
}
+int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
+{
+ int err;
+
+ spin_lock_bh(&port->priv->prs_spinlock);
+ err = __mvpp2_prs_mac_da_accept(port, da, add);
+ spin_unlock_bh(&port->priv->prs_spinlock);
+
+ return err;
+}
+
int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
{
struct mvpp2_port *port = netdev_priv(dev);
@@ -2345,6 +2391,8 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
unsigned long pmap;
int index, tid;
+ spin_lock_bh(&priv->prs_spinlock);
+
for (tid = MVPP2_PE_MAC_RANGE_START;
tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
@@ -2354,7 +2402,7 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
(priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(priv, &pe, tid);
pmap = mvpp2_prs_tcam_port_map_get(&pe);
@@ -2375,14 +2423,17 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
continue;
/* Remove entry from TCAM */
- mvpp2_prs_mac_da_accept(port, da, false);
+ __mvpp2_prs_mac_da_accept(port, da, false);
}
+
+ spin_unlock_bh(&priv->prs_spinlock);
}
int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
{
switch (type) {
case MVPP2_TAG_TYPE_EDSA:
+ spin_lock_bh(&priv->prs_spinlock);
/* Add port to EDSA entries */
mvpp2_prs_dsa_tag_set(priv, port, true,
MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
@@ -2393,9 +2444,11 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
mvpp2_prs_dsa_tag_set(priv, port, false,
MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+ spin_unlock_bh(&priv->prs_spinlock);
break;
case MVPP2_TAG_TYPE_DSA:
+ spin_lock_bh(&priv->prs_spinlock);
/* Add port to DSA entries */
mvpp2_prs_dsa_tag_set(priv, port, true,
MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
@@ -2406,10 +2459,12 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
mvpp2_prs_dsa_tag_set(priv, port, false,
MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+ spin_unlock_bh(&priv->prs_spinlock);
break;
case MVPP2_TAG_TYPE_MH:
case MVPP2_TAG_TYPE_NONE:
+ spin_lock_bh(&priv->prs_spinlock);
/* Remove port form EDSA and DSA entries */
mvpp2_prs_dsa_tag_set(priv, port, false,
MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
@@ -2419,6 +2474,7 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
mvpp2_prs_dsa_tag_set(priv, port, false,
MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+ spin_unlock_bh(&priv->prs_spinlock);
break;
default:
@@ -2437,11 +2493,15 @@ int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask)
memset(&pe, 0, sizeof(pe));
+ spin_lock_bh(&priv->prs_spinlock);
+
tid = mvpp2_prs_tcam_first_free(priv,
MVPP2_PE_LAST_FREE_TID,
MVPP2_PE_FIRST_FREE_TID);
- if (tid < 0)
+ if (tid < 0) {
+ spin_unlock_bh(&priv->prs_spinlock);
return tid;
+ }
pe.index = tid;
@@ -2461,6 +2521,7 @@ int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask)
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
mvpp2_prs_hw_write(priv, &pe);
+ spin_unlock_bh(&priv->prs_spinlock);
return 0;
}
@@ -2472,6 +2533,8 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port)
memset(&pe, 0, sizeof(pe));
+ spin_lock_bh(&port->priv->prs_spinlock);
+
tid = mvpp2_prs_flow_find(port->priv, port->id);
/* Such entry not exist */
@@ -2480,8 +2543,10 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port)
tid = mvpp2_prs_tcam_first_free(port->priv,
MVPP2_PE_LAST_FREE_TID,
MVPP2_PE_FIRST_FREE_TID);
- if (tid < 0)
+ if (tid < 0) {
+ spin_unlock_bh(&port->priv->prs_spinlock);
return tid;
+ }
pe.index = tid;
@@ -2492,13 +2557,14 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port)
/* Update shadow table */
mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS);
} else {
- mvpp2_prs_init_from_hw(port->priv, &pe, tid);
+ __mvpp2_prs_init_from_hw(port->priv, &pe, tid);
}
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id));
mvpp2_prs_hw_write(port->priv, &pe);
+ spin_unlock_bh(&port->priv->prs_spinlock);
return 0;
}
@@ -2509,11 +2575,14 @@ int mvpp2_prs_hits(struct mvpp2 *priv, int index)
if (index > MVPP2_PRS_TCAM_SRAM_SIZE)
return -EINVAL;
+ spin_lock_bh(&priv->prs_spinlock);
+
mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index);
val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG);
val &= MVPP2_PRS_TCAM_HIT_CNT_MASK;
+ spin_unlock_bh(&priv->prs_spinlock);
return val;
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
index 4f4d58189118..01c6c0a2f283 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
@@ -150,17 +150,14 @@ octep_get_ethtool_stats(struct net_device *netdev,
iface_rx_stats,
iface_tx_stats);
- for (q = 0; q < oct->num_oqs; q++) {
- struct octep_iq *iq = oct->iq[q];
- struct octep_oq *oq = oct->oq[q];
-
- tx_packets += iq->stats.instr_completed;
- tx_bytes += iq->stats.bytes_sent;
- tx_busy_errors += iq->stats.tx_busy;
-
- rx_packets += oq->stats.packets;
- rx_bytes += oq->stats.bytes;
- rx_alloc_errors += oq->stats.alloc_failures;
+ for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
+ tx_packets += oct->stats_iq[q].instr_completed;
+ tx_bytes += oct->stats_iq[q].bytes_sent;
+ tx_busy_errors += oct->stats_iq[q].tx_busy;
+
+ rx_packets += oct->stats_oq[q].packets;
+ rx_bytes += oct->stats_oq[q].bytes;
+ rx_alloc_errors += oct->stats_oq[q].alloc_failures;
}
i = 0;
data[i++] = rx_packets;
@@ -198,22 +195,18 @@ octep_get_ethtool_stats(struct net_device *netdev,
data[i++] = iface_rx_stats->err_pkts;
/* Per Tx Queue stats */
- for (q = 0; q < oct->num_iqs; q++) {
- struct octep_iq *iq = oct->iq[q];
-
- data[i++] = iq->stats.instr_posted;
- data[i++] = iq->stats.instr_completed;
- data[i++] = iq->stats.bytes_sent;
- data[i++] = iq->stats.tx_busy;
+ for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
+ data[i++] = oct->stats_iq[q].instr_posted;
+ data[i++] = oct->stats_iq[q].instr_completed;
+ data[i++] = oct->stats_iq[q].bytes_sent;
+ data[i++] = oct->stats_iq[q].tx_busy;
}
/* Per Rx Queue stats */
- for (q = 0; q < oct->num_oqs; q++) {
- struct octep_oq *oq = oct->oq[q];
-
- data[i++] = oq->stats.packets;
- data[i++] = oq->stats.bytes;
- data[i++] = oq->stats.alloc_failures;
+ for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
+ data[i++] = oct->stats_oq[q].packets;
+ data[i++] = oct->stats_oq[q].bytes;
+ data[i++] = oct->stats_oq[q].alloc_failures;
}
}
@@ -444,6 +437,15 @@ static int octep_set_link_ksettings(struct net_device *netdev,
return 0;
}
+static void octep_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct octep_device *oct = netdev_priv(dev);
+
+ channel->max_combined = CFG_GET_PORTS_MAX_IO_RINGS(oct->conf);
+ channel->combined_count = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+}
+
static const struct ethtool_ops octep_ethtool_ops = {
.get_drvinfo = octep_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -452,6 +454,7 @@ static const struct ethtool_ops octep_ethtool_ops = {
.get_ethtool_stats = octep_get_ethtool_stats,
.get_link_ksettings = octep_get_link_ksettings,
.set_link_ksettings = octep_set_link_ksettings,
+ .get_channels = octep_get_channels,
};
void octep_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index 549436efc204..bcea3fc26a8c 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -822,7 +822,7 @@ static inline int octep_iq_full_check(struct octep_iq *iq)
if (unlikely(IQ_INSTR_SPACE(iq) >
OCTEP_WAKE_QUEUE_THRESHOLD)) {
netif_start_subqueue(iq->netdev, iq->q_no);
- iq->stats.restart_cnt++;
+ iq->stats->restart_cnt++;
return 0;
}
@@ -960,7 +960,7 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
wmb();
/* Ring Doorbell to notify the NIC of new packets */
writel(iq->fill_cnt, iq->doorbell_reg);
- iq->stats.instr_posted += iq->fill_cnt;
+ iq->stats->instr_posted += iq->fill_cnt;
iq->fill_cnt = 0;
return NETDEV_TX_OK;
@@ -991,37 +991,24 @@ dma_map_err:
static void octep_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
- u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
struct octep_device *oct = netdev_priv(netdev);
+ u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
int q;
- if (netif_running(netdev))
- octep_ctrl_net_get_if_stats(oct,
- OCTEP_CTRL_NET_INVALID_VFID,
- &oct->iface_rx_stats,
- &oct->iface_tx_stats);
-
tx_packets = 0;
tx_bytes = 0;
rx_packets = 0;
rx_bytes = 0;
- for (q = 0; q < oct->num_oqs; q++) {
- struct octep_iq *iq = oct->iq[q];
- struct octep_oq *oq = oct->oq[q];
-
- tx_packets += iq->stats.instr_completed;
- tx_bytes += iq->stats.bytes_sent;
- rx_packets += oq->stats.packets;
- rx_bytes += oq->stats.bytes;
+ for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
+ tx_packets += oct->stats_iq[q].instr_completed;
+ tx_bytes += oct->stats_iq[q].bytes_sent;
+ rx_packets += oct->stats_oq[q].packets;
+ rx_bytes += oct->stats_oq[q].bytes;
}
stats->tx_packets = tx_packets;
stats->tx_bytes = tx_bytes;
stats->rx_packets = rx_packets;
stats->rx_bytes = rx_bytes;
- stats->multicast = oct->iface_rx_stats.mcast_pkts;
- stats->rx_errors = oct->iface_rx_stats.err_pkts;
- stats->collisions = oct->iface_tx_stats.xscol;
- stats->tx_fifo_errors = oct->iface_tx_stats.undflw;
}
/**
@@ -1137,6 +1124,59 @@ static int octep_set_features(struct net_device *dev, netdev_features_t features
return err;
}
+static bool octep_is_vf_valid(struct octep_device *oct, int vf)
+{
+ if (vf >= CFG_GET_ACTIVE_VFS(oct->conf)) {
+ netdev_err(oct->netdev, "Invalid VF ID %d\n", vf);
+ return false;
+ }
+
+ return true;
+}
+
+static int octep_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivi)
+{
+ struct octep_device *oct = netdev_priv(dev);
+
+ if (!octep_is_vf_valid(oct, vf))
+ return -EINVAL;
+
+ ivi->vf = vf;
+ ether_addr_copy(ivi->mac, oct->vf_info[vf].mac_addr);
+ ivi->spoofchk = true;
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ ivi->trusted = false;
+
+ return 0;
+}
+
+static int octep_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+{
+ struct octep_device *oct = netdev_priv(dev);
+ int err;
+
+ if (!octep_is_vf_valid(oct, vf))
+ return -EINVAL;
+
+ if (!is_valid_ether_addr(mac)) {
+ dev_err(&oct->pdev->dev, "Invalid MAC Address %pM\n", mac);
+ return -EADDRNOTAVAIL;
+ }
+
+ dev_dbg(&oct->pdev->dev, "set vf-%d mac to %pM\n", vf, mac);
+ ether_addr_copy(oct->vf_info[vf].mac_addr, mac);
+ oct->vf_info[vf].flags |= OCTEON_PFVF_FLAG_MAC_SET_BY_PF;
+
+ err = octep_ctrl_net_set_mac_addr(oct, vf, mac, true);
+ if (err)
+ dev_err(&oct->pdev->dev,
+ "Set VF%d MAC address failed via host control Mbox\n",
+ vf);
+
+ return err;
+}
+
static const struct net_device_ops octep_netdev_ops = {
.ndo_open = octep_open,
.ndo_stop = octep_stop,
@@ -1146,6 +1186,8 @@ static const struct net_device_ops octep_netdev_ops = {
.ndo_set_mac_address = octep_set_mac,
.ndo_change_mtu = octep_change_mtu,
.ndo_set_features = octep_set_features,
+ .ndo_get_vf_config = octep_get_vf_config,
+ .ndo_set_vf_mac = octep_set_vf_mac
};
/**
@@ -1197,7 +1239,7 @@ static void octep_hb_timeout_task(struct work_struct *work)
miss_cnt);
rtnl_lock();
if (netif_running(oct->netdev))
- octep_stop(oct->netdev);
+ dev_close(oct->netdev);
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
index fee59e0e0138..81ac4267811c 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
@@ -220,6 +220,7 @@ struct octep_iface_link_info {
/* The Octeon VF device specific info data structure.*/
struct octep_pfvf_info {
u8 mac_addr[ETH_ALEN];
+ u32 flags;
u32 mbox_version;
};
@@ -257,11 +258,17 @@ struct octep_device {
/* Pointers to Octeon Tx queues */
struct octep_iq *iq[OCTEP_MAX_IQ];
+ /* Per iq stats */
+ struct octep_iq_stats stats_iq[OCTEP_MAX_IQ];
+
/* Rx queues (OQ: Output Queue) */
u16 num_oqs;
/* Pointers to Octeon Rx queues */
struct octep_oq *oq[OCTEP_MAX_OQ];
+ /* Per oq stats */
+ struct octep_oq_stats stats_oq[OCTEP_MAX_OQ];
+
/* Hardware port number of the PCIe interface */
u16 pcie_port;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
index e6eb98d70f3c..0867fab61b19 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
@@ -156,12 +156,23 @@ static void octep_pfvf_set_mac_addr(struct octep_device *oct, u32 vf_id,
{
int err;
+ if (oct->vf_info[vf_id].flags & OCTEON_PFVF_FLAG_MAC_SET_BY_PF) {
+ dev_err(&oct->pdev->dev,
+ "VF%d attempted to override administrative set MAC address\n",
+ vf_id);
+ rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ return;
+ }
+
err = octep_ctrl_net_set_mac_addr(oct, vf_id, cmd.s_set_mac.mac_addr, true);
if (err) {
rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
- dev_err(&oct->pdev->dev, "Set VF MAC address failed via host control Mbox\n");
+ dev_err(&oct->pdev->dev, "Set VF%d MAC address failed via host control Mbox\n",
+ vf_id);
return;
}
+
+ ether_addr_copy(oct->vf_info[vf_id].mac_addr, cmd.s_set_mac.mac_addr);
rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
}
@@ -171,12 +182,21 @@ static void octep_pfvf_get_mac_addr(struct octep_device *oct, u32 vf_id,
{
int err;
+ if (oct->vf_info[vf_id].flags & OCTEON_PFVF_FLAG_MAC_SET_BY_PF) {
+ dev_dbg(&oct->pdev->dev, "VF%d MAC address set by PF\n", vf_id);
+ ether_addr_copy(rsp->s_set_mac.mac_addr,
+ oct->vf_info[vf_id].mac_addr);
+ rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+ return;
+ }
err = octep_ctrl_net_get_mac_addr(oct, vf_id, rsp->s_set_mac.mac_addr);
if (err) {
rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
- dev_err(&oct->pdev->dev, "Get VF MAC address failed via host control Mbox\n");
+ dev_err(&oct->pdev->dev, "Get VF%d MAC address failed via host control Mbox\n",
+ vf_id);
return;
}
+ ether_addr_copy(oct->vf_info[vf_id].mac_addr, rsp->s_set_mac.mac_addr);
rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
}
@@ -186,6 +206,8 @@ static void octep_pfvf_dev_remove(struct octep_device *oct, u32 vf_id,
{
int err;
+ /* Reset VF-specific information maintained by the PF */
+ memset(&oct->vf_info[vf_id], 0, sizeof(struct octep_pfvf_info));
err = octep_ctrl_net_dev_remove(oct, vf_id);
if (err) {
rsp->s.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h
index 0dc6eead292a..386a095a99bc 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h
@@ -8,8 +8,6 @@
#ifndef _OCTEP_PFVF_MBOX_H_
#define _OCTEP_PFVF_MBOX_H_
-/* VF flags */
-#define OCTEON_PFVF_FLAG_MAC_SET_BY_PF BIT_ULL(0) /* PF has set VF MAC address */
#define OCTEON_SDP_16K_HW_FRS 16380UL
#define OCTEON_SDP_64K_HW_FRS 65531UL
@@ -23,6 +21,10 @@ enum octep_pfvf_mbox_version {
#define OCTEP_PFVF_MBOX_VERSION_CURRENT OCTEP_PFVF_MBOX_VERSION_V2
+/* VF flags */
+/* PF has set VF MAC address */
+#define OCTEON_PFVF_FLAG_MAC_SET_BY_PF BIT(0)
+
enum octep_pfvf_mbox_opcode {
OCTEP_PFVF_MBOX_CMD_VERSION,
OCTEP_PFVF_MBOX_CMD_SET_MTU,
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
index 8af75cb37c3e..82b6b19e76b4 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
@@ -87,7 +87,7 @@ static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq)
page = dev_alloc_page();
if (unlikely(!page)) {
dev_err(oq->dev, "refill: rx buffer alloc failed\n");
- oq->stats.alloc_failures++;
+ oq->stats->alloc_failures++;
break;
}
@@ -98,7 +98,7 @@ static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq)
"OQ-%d buffer refill: DMA mapping error!\n",
oq->q_no);
put_page(page);
- oq->stats.alloc_failures++;
+ oq->stats->alloc_failures++;
break;
}
oq->buff_info[refill_idx].page = page;
@@ -134,6 +134,7 @@ static int octep_setup_oq(struct octep_device *oct, int q_no)
oq->netdev = oct->netdev;
oq->dev = &oct->pdev->dev;
oq->q_no = q_no;
+ oq->stats = &oct->stats_oq[q_no];
oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
oq->ring_size_mask = oq->max_count - 1;
oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
@@ -443,7 +444,7 @@ static int __octep_oq_process_rx(struct octep_device *oct,
if (!skb) {
octep_oq_drop_rx(oq, buff_info,
&read_idx, &desc_used);
- oq->stats.alloc_failures++;
+ oq->stats->alloc_failures++;
continue;
}
skb_reserve(skb, data_offset);
@@ -494,8 +495,8 @@ static int __octep_oq_process_rx(struct octep_device *oct,
oq->host_read_idx = read_idx;
oq->refill_count += desc_used;
- oq->stats.packets += pkt;
- oq->stats.bytes += rx_bytes;
+ oq->stats->packets += pkt;
+ oq->stats->bytes += rx_bytes;
return pkt;
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
index 3b08e2d560dc..b4696c93d0e6 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
@@ -186,8 +186,8 @@ struct octep_oq {
*/
u8 __iomem *pkts_sent_reg;
- /* Statistics for this OQ. */
- struct octep_oq_stats stats;
+ /* Pointer to statistics for this OQ. */
+ struct octep_oq_stats *stats;
/* Packets pending to be processed */
u32 pkts_pending;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
index 06851b78aa28..08ee90013fef 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
@@ -81,9 +81,9 @@ int octep_iq_process_completions(struct octep_iq *iq, u16 budget)
}
iq->pkts_processed += compl_pkts;
- iq->stats.instr_completed += compl_pkts;
- iq->stats.bytes_sent += compl_bytes;
- iq->stats.sgentry_sent += compl_sg;
+ iq->stats->instr_completed += compl_pkts;
+ iq->stats->bytes_sent += compl_bytes;
+ iq->stats->sgentry_sent += compl_sg;
iq->flush_index = fi;
netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes);
@@ -187,6 +187,7 @@ static int octep_setup_iq(struct octep_device *oct, int q_no)
iq->netdev = oct->netdev;
iq->dev = &oct->pdev->dev;
iq->q_no = q_no;
+ iq->stats = &oct->stats_iq[q_no];
iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
iq->ring_size_mask = iq->max_count - 1;
iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
index 875a2c34091f..58fb39dda977 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
@@ -170,8 +170,8 @@ struct octep_iq {
*/
u16 flush_index;
- /* Statistics for this input queue. */
- struct octep_iq_stats stats;
+ /* Pointer to statistics for this input queue. */
+ struct octep_iq_stats *stats;
/* Pointer to the Virtual Base addr of the input ring. */
struct octep_tx_desc_hw *desc_ring;
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
index 7b21439a315f..241a7e7c7ad2 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
@@ -114,12 +114,9 @@ static void octep_vf_get_ethtool_stats(struct net_device *netdev,
iface_tx_stats = &oct->iface_tx_stats;
iface_rx_stats = &oct->iface_rx_stats;
- for (q = 0; q < oct->num_oqs; q++) {
- struct octep_vf_iq *iq = oct->iq[q];
- struct octep_vf_oq *oq = oct->oq[q];
-
- tx_busy_errors += iq->stats.tx_busy;
- rx_alloc_errors += oq->stats.alloc_failures;
+ for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) {
+ tx_busy_errors += oct->stats_iq[q].tx_busy;
+ rx_alloc_errors += oct->stats_oq[q].alloc_failures;
}
i = 0;
data[i++] = rx_alloc_errors;
@@ -134,22 +131,18 @@ static void octep_vf_get_ethtool_stats(struct net_device *netdev,
data[i++] = iface_rx_stats->dropped_octets_fifo_full;
/* Per Tx Queue stats */
- for (q = 0; q < oct->num_iqs; q++) {
- struct octep_vf_iq *iq = oct->iq[q];
-
- data[i++] = iq->stats.instr_posted;
- data[i++] = iq->stats.instr_completed;
- data[i++] = iq->stats.bytes_sent;
- data[i++] = iq->stats.tx_busy;
+ for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) {
+ data[i++] = oct->stats_iq[q].instr_posted;
+ data[i++] = oct->stats_iq[q].instr_completed;
+ data[i++] = oct->stats_iq[q].bytes_sent;
+ data[i++] = oct->stats_iq[q].tx_busy;
}
/* Per Rx Queue stats */
for (q = 0; q < oct->num_oqs; q++) {
- struct octep_vf_oq *oq = oct->oq[q];
-
- data[i++] = oq->stats.packets;
- data[i++] = oq->stats.bytes;
- data[i++] = oq->stats.alloc_failures;
+ data[i++] = oct->stats_oq[q].packets;
+ data[i++] = oct->stats_oq[q].bytes;
+ data[i++] = oct->stats_oq[q].alloc_failures;
}
}
@@ -251,6 +244,15 @@ static int octep_vf_get_link_ksettings(struct net_device *netdev,
return 0;
}
+static void octep_vf_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct octep_vf_device *oct = netdev_priv(dev);
+
+ channel->max_combined = CFG_GET_PORTS_MAX_IO_RINGS(oct->conf);
+ channel->combined_count = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+}
+
static const struct ethtool_ops octep_vf_ethtool_ops = {
.get_drvinfo = octep_vf_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -258,6 +260,7 @@ static const struct ethtool_ops octep_vf_ethtool_ops = {
.get_sset_count = octep_vf_get_sset_count,
.get_ethtool_stats = octep_vf_get_ethtool_stats,
.get_link_ksettings = octep_vf_get_link_ksettings,
+ .get_channels = octep_vf_get_channels,
};
void octep_vf_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
index 7e6771c9cdbb..420c3f4cf741 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
@@ -18,8 +18,6 @@
#include "octep_vf_config.h"
#include "octep_vf_main.h"
-struct workqueue_struct *octep_vf_wq;
-
/* Supported Devices */
static const struct pci_device_id octep_vf_pci_id_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_VF)},
@@ -574,7 +572,7 @@ static int octep_vf_iq_full_check(struct octep_vf_iq *iq)
* caused queues to get re-enabled after
* being stopped
*/
- iq->stats.restart_cnt++;
+ iq->stats->restart_cnt++;
fallthrough;
case 1: /* Queue left enabled, since IQ is not yet full*/
return 0;
@@ -731,7 +729,7 @@ ring_dbell:
/* Flush the hw descriptors before writing to doorbell */
smp_wmb();
writel(iq->fill_cnt, iq->doorbell_reg);
- iq->stats.instr_posted += iq->fill_cnt;
+ iq->stats->instr_posted += iq->fill_cnt;
iq->fill_cnt = 0;
return NETDEV_TX_OK;
}
@@ -786,27 +784,16 @@ static void octep_vf_get_stats64(struct net_device *netdev,
tx_bytes = 0;
rx_packets = 0;
rx_bytes = 0;
- for (q = 0; q < oct->num_oqs; q++) {
- struct octep_vf_iq *iq = oct->iq[q];
- struct octep_vf_oq *oq = oct->oq[q];
-
- tx_packets += iq->stats.instr_completed;
- tx_bytes += iq->stats.bytes_sent;
- rx_packets += oq->stats.packets;
- rx_bytes += oq->stats.bytes;
+ for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) {
+ tx_packets += oct->stats_iq[q].instr_completed;
+ tx_bytes += oct->stats_iq[q].bytes_sent;
+ rx_packets += oct->stats_oq[q].packets;
+ rx_bytes += oct->stats_oq[q].bytes;
}
stats->tx_packets = tx_packets;
stats->tx_bytes = tx_bytes;
stats->rx_packets = rx_packets;
stats->rx_bytes = rx_bytes;
- if (!octep_vf_get_if_stats(oct)) {
- stats->multicast = oct->iface_rx_stats.mcast_pkts;
- stats->rx_errors = oct->iface_rx_stats.err_pkts;
- stats->rx_dropped = oct->iface_rx_stats.dropped_pkts_fifo_full +
- oct->iface_rx_stats.err_pkts;
- stats->rx_missed_errors = oct->iface_rx_stats.dropped_pkts_fifo_full;
- stats->tx_dropped = oct->iface_tx_stats.dropped;
- }
}
/**
@@ -846,7 +833,9 @@ static void octep_vf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
struct octep_vf_device *oct = netdev_priv(netdev);
netdev_hold(netdev, NULL, GFP_ATOMIC);
- schedule_work(&oct->tx_timeout_task);
+ if (!schedule_work(&oct->tx_timeout_task))
+ netdev_put(netdev, NULL);
+
}
static int octep_vf_set_mac(struct net_device *netdev, void *p)
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
index 5769f62545cd..b9f13506f462 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
@@ -246,11 +246,17 @@ struct octep_vf_device {
/* Pointers to Octeon Tx queues */
struct octep_vf_iq *iq[OCTEP_VF_MAX_IQ];
+ /* Per iq stats */
+ struct octep_vf_iq_stats stats_iq[OCTEP_VF_MAX_IQ];
+
/* Rx queues (OQ: Output Queue) */
u16 num_oqs;
/* Pointers to Octeon Rx queues */
struct octep_vf_oq *oq[OCTEP_VF_MAX_OQ];
+ /* Per oq stats */
+ struct octep_vf_oq_stats stats_oq[OCTEP_VF_MAX_OQ];
+
/* Hardware port number of the PCIe interface */
u16 pcie_port;
@@ -314,8 +320,6 @@ static inline u16 OCTEP_VF_MINOR_REV(struct octep_vf_device *oct)
#define octep_vf_read_csr64(octep_vf_dev, reg_off) \
readq((octep_vf_dev)->mmio.hw_addr + (reg_off))
-extern struct workqueue_struct *octep_vf_wq;
-
int octep_vf_device_setup(struct octep_vf_device *oct);
int octep_vf_setup_iqs(struct octep_vf_device *oct);
void octep_vf_free_iqs(struct octep_vf_device *oct);
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
index 82821bc28634..d70c8be3cfc4 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
@@ -87,7 +87,7 @@ static int octep_vf_oq_refill(struct octep_vf_device *oct, struct octep_vf_oq *o
page = dev_alloc_page();
if (unlikely(!page)) {
dev_err(oq->dev, "refill: rx buffer alloc failed\n");
- oq->stats.alloc_failures++;
+ oq->stats->alloc_failures++;
break;
}
@@ -98,7 +98,7 @@ static int octep_vf_oq_refill(struct octep_vf_device *oct, struct octep_vf_oq *o
"OQ-%d buffer refill: DMA mapping error!\n",
oq->q_no);
put_page(page);
- oq->stats.alloc_failures++;
+ oq->stats->alloc_failures++;
break;
}
oq->buff_info[refill_idx].page = page;
@@ -134,6 +134,7 @@ static int octep_vf_setup_oq(struct octep_vf_device *oct, int q_no)
oq->netdev = oct->netdev;
oq->dev = &oct->pdev->dev;
oq->q_no = q_no;
+ oq->stats = &oct->stats_oq[q_no];
oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
oq->ring_size_mask = oq->max_count - 1;
oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
@@ -458,8 +459,8 @@ static int __octep_vf_oq_process_rx(struct octep_vf_device *oct,
oq->host_read_idx = read_idx;
oq->refill_count += desc_used;
- oq->stats.packets += pkt;
- oq->stats.bytes += rx_bytes;
+ oq->stats->packets += pkt;
+ oq->stats->bytes += rx_bytes;
return pkt;
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
index fe46838b5200..9e296b7d7e34 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
@@ -187,7 +187,7 @@ struct octep_vf_oq {
u8 __iomem *pkts_sent_reg;
/* Statistics for this OQ. */
- struct octep_vf_oq_stats stats;
+ struct octep_vf_oq_stats *stats;
/* Packets pending to be processed */
u32 pkts_pending;
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
index 47a5c054fdb6..8180e5ce3d7e 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
@@ -82,9 +82,9 @@ int octep_vf_iq_process_completions(struct octep_vf_iq *iq, u16 budget)
}
iq->pkts_processed += compl_pkts;
- iq->stats.instr_completed += compl_pkts;
- iq->stats.bytes_sent += compl_bytes;
- iq->stats.sgentry_sent += compl_sg;
+ iq->stats->instr_completed += compl_pkts;
+ iq->stats->bytes_sent += compl_bytes;
+ iq->stats->sgentry_sent += compl_sg;
iq->flush_index = fi;
netif_subqueue_completed_wake(iq->netdev, iq->q_no, compl_pkts,
@@ -186,6 +186,7 @@ static int octep_vf_setup_iq(struct octep_vf_device *oct, int q_no)
iq->netdev = oct->netdev;
iq->dev = &oct->pdev->dev;
iq->q_no = q_no;
+ iq->stats = &oct->stats_iq[q_no];
iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
iq->ring_size_mask = iq->max_count - 1;
iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
index f338b975103c..1cede90e3a5f 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
@@ -129,7 +129,7 @@ struct octep_vf_iq {
u16 flush_index;
/* Statistics for this input queue. */
- struct octep_vf_iq_stats stats;
+ struct octep_vf_iq_stats *stats;
/* Pointer to the Virtual Base addr of the input ring. */
struct octep_vf_tx_desc_hw *desc_ring;
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
index a32d85d6f599..35c4f5f64f58 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Kconfig
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -46,3 +46,11 @@ config OCTEONTX2_VF
depends on OCTEONTX2_PF
help
This driver supports Marvell's OcteonTX2 NIC virtual function.
+
+config RVU_ESWITCH
+ tristate "Marvell RVU E-Switch support"
+ depends on OCTEONTX2_PF
+ default m
+ help
+ This driver supports Marvell's RVU E-Switch that
+ provides internal SRIOV packet steering and switching.
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index 3cf4c8285c90..244de500963e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -11,4 +11,6 @@ rvu_mbox-y := mbox.o rvu_trace.o
rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \
rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \
- rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o
+ rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o \
+ rvu_rep.o cn20k/mbox_init.o cn20k/nix.o cn20k/debugfs.o \
+ cn20k/npa.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 27935c54b91b..42044cd810b1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -21,8 +21,7 @@
#include "rvu.h"
#include "lmac_common.h"
-#define DRV_NAME "Marvell-CGX/RPM"
-#define DRV_STRING "Marvell CGX/RPM Driver"
+#define DRV_NAME "Marvell-CGX-RPM"
#define CGX_RX_STAT_GLOBAL_INDEX 9
@@ -66,8 +65,18 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
/* Supported devices */
static const struct pci_device_id cgx_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) },
- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CN10K_A) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF10K_A) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF10K_B) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CN10K_B) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CN20KA) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF20KA) },
{ 0, } /* end of table */
};
@@ -112,6 +121,11 @@ struct mac_ops *get_mac_ops(void *cgxd)
return ((struct cgx *)cgxd)->mac_ops;
}
+u32 cgx_get_fifo_len(void *cgxd)
+{
+ return ((struct cgx *)cgxd)->fifo_len;
+}
+
void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
{
writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
@@ -209,6 +223,24 @@ u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id)
return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT;
}
+static u8 cgx_get_nix_resetbit(struct cgx *cgx)
+{
+ int first_lmac;
+ u8 p2x;
+
+ /* non 98XX silicons supports only NIX0 block */
+ if (cgx->pdev->subsystem_device != PCI_SUBSYS_DEVID_98XX)
+ return CGX_NIX0_RESET;
+
+ first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
+ p2x = cgx_lmac_get_p2x(cgx->cgx_id, first_lmac);
+
+ if (p2x == CMR_P2X_SEL_NIX1)
+ return CGX_NIX1_RESET;
+ else
+ return CGX_NIX0_RESET;
+}
+
/* Ensure the required lock for event queue(where asynchronous events are
* posted) is acquired before calling this API. Else an asynchronous event(with
* latest link status) can reach the destination before this function returns
@@ -501,7 +533,7 @@ static u32 cgx_get_lmac_fifo_len(void *cgxd, int lmac_id)
u8 num_lmacs;
u32 fifo_len;
- fifo_len = cgx->mac_ops->fifo_len;
+ fifo_len = cgx->fifo_len;
num_lmacs = cgx->mac_ops->get_nr_lmacs(cgx);
switch (num_lmacs) {
@@ -684,6 +716,11 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
+
+ /* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */
+ if (idx >= CGX_RX_STAT_GLOBAL_INDEX)
+ lmac_id = 0;
+
*rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
return 0;
}
@@ -1144,17 +1181,25 @@ static int cgx_link_usertable_index_map(int speed)
static void set_mod_args(struct cgx_set_link_mode_args *args,
u32 speed, u8 duplex, u8 autoneg, u64 mode)
{
- /* Fill default values incase of user did not pass
- * valid parameters
+ int mode_baseidx;
+ u8 cgx_mode;
+
+ if (args->multimode) {
+ args->mode |= mode;
+ return;
+ }
+
+ /* Derive mode_base_idx and mode fields based
+ * on cgx_mode value
*/
- if (args->duplex == DUPLEX_UNKNOWN)
- args->duplex = duplex;
- if (args->speed == SPEED_UNKNOWN)
- args->speed = speed;
- if (args->an == AUTONEG_UNKNOWN)
- args->an = autoneg;
+ cgx_mode = find_first_bit((unsigned long *)&mode,
+ CGX_MODE_MAX);
args->mode = mode;
- args->ports = 0;
+ mode_baseidx = cgx_mode - 41;
+ if (mode_baseidx > 0) {
+ args->mode_baseidx = 1;
+ args->mode = BIT_ULL(mode_baseidx);
+ }
}
static void otx2_map_ethtool_link_modes(u64 bitmask,
@@ -1162,16 +1207,16 @@ static void otx2_map_ethtool_link_modes(u64 bitmask,
{
switch (bitmask) {
case ETHTOOL_LINK_MODE_10baseT_Half_BIT:
- set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII));
+ set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII_10M_BIT));
break;
case ETHTOOL_LINK_MODE_10baseT_Full_BIT:
- set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII));
+ set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII_10M_BIT));
break;
case ETHTOOL_LINK_MODE_100baseT_Half_BIT:
- set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII));
+ set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII_100M_BIT));
break;
case ETHTOOL_LINK_MODE_100baseT_Full_BIT:
- set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII));
+ set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII_100M_BIT));
break;
case ETHTOOL_LINK_MODE_1000baseT_Half_BIT:
set_mod_args(args, 1000, 1, 1, BIT_ULL(CGX_MODE_SGMII));
@@ -1443,25 +1488,36 @@ int cgx_get_fwdata_base(u64 *base)
}
int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
+ struct cgx_lmac_fwdata_s *linkmodes,
int cgx_id, int lmac_id)
{
struct cgx *cgx = cgxd;
u64 req = 0, resp;
+ u8 bit;
if (!cgx)
return -ENODEV;
- if (args.mode)
- otx2_map_ethtool_link_modes(args.mode, &args);
- if (!args.speed && args.duplex && !args.an)
- return -EINVAL;
+ for_each_set_bit(bit, args.advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS)
+ otx2_map_ethtool_link_modes(bit, &args);
+
+ if (args.multimode) {
+ if (linkmodes->advertised_link_modes_own != CGX_CMD_OWN_NS)
+ return -EBUSY;
+
+ linkmodes->advertised_link_modes = args.mode;
+ /* Update ownership */
+ linkmodes->advertised_link_modes_own = CGX_CMD_OWN_FIRMWARE;
+ args.mode = GENMASK_ULL(41, 0);
+ }
req = FIELD_SET(CMDREG_ID, CGX_CMD_MODE_CHANGE, req);
req = FIELD_SET(CMDMODECHANGE_SPEED,
cgx_link_usertable_index_map(args.speed), req);
req = FIELD_SET(CMDMODECHANGE_DUPLEX, args.duplex, req);
req = FIELD_SET(CMDMODECHANGE_AN, args.an, req);
- req = FIELD_SET(CMDMODECHANGE_PORT, args.ports, req);
+ req = FIELD_SET(CMDMODECHANGE_MODE_BASEIDX, args.mode_baseidx, req);
req = FIELD_SET(CMDMODECHANGE_FLAGS, args.mode, req);
return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
@@ -1647,9 +1703,11 @@ unsigned long cgx_get_lmac_bmap(void *cgxd)
static int cgx_lmac_init(struct cgx *cgx)
{
+ u8 max_dmac_filters;
struct lmac *lmac;
+ int err, filter;
+ unsigned int i;
u64 lmac_list;
- int i, err;
/* lmac_list specifies which lmacs are enabled
* when bit n is set to 1, LMAC[n] is enabled
@@ -1675,7 +1733,7 @@ static int cgx_lmac_init(struct cgx *cgx)
err = -ENOMEM;
goto err_lmac_free;
}
- sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
+ sprintf(lmac->name, "cgx_fwi_%u_%u", cgx->cgx_id, i);
if (cgx->mac_ops->non_contiguous_serdes_lane) {
lmac->lmac_id = __ffs64(lmac_list);
lmac_list &= ~BIT_ULL(lmac->lmac_id);
@@ -1688,6 +1746,8 @@ static int cgx_lmac_init(struct cgx *cgx)
cgx->mac_ops->dmac_filter_count /
cgx->lmac_count;
+ max_dmac_filters = lmac->mac_to_index_bmap.max;
+
err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
if (err)
goto err_name_free;
@@ -1717,8 +1777,19 @@ static int cgx_lmac_init(struct cgx *cgx)
set_bit(lmac->lmac_id, &cgx->lmac_bmap);
cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
lmac->lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac->lmac_id);
+
+ /* Disable stale DMAC filters for sane state */
+ for (filter = 0; filter < max_dmac_filters; filter++)
+ cgx_lmac_addr_del(cgx->cgx_id, lmac->lmac_id, filter);
+
+ /* As cgx_lmac_addr_del does not clear entry for index 0
+ * so it needs to be done explicitly
+ */
+ cgx_lmac_addr_reset(cgx->cgx_id, lmac->lmac_id);
}
+ /* Start X2P reset on given MAC block */
+ cgx->mac_ops->mac_x2p_reset(cgx, true);
return cgx_lmac_verify_fwi_version(cgx);
err_bitmap_free:
@@ -1764,7 +1835,7 @@ static void cgx_populate_features(struct cgx *cgx)
u64 cfg;
cfg = cgx_read(cgx, 0, CGX_CONST);
- cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
+ cgx->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
cgx->max_lmac_per_mac = FIELD_GET(CGX_CONST_MAX_LMACS, cfg);
if (is_dev_rpm(cgx))
@@ -1784,6 +1855,45 @@ static u8 cgx_get_rxid_mapoffset(struct cgx *cgx)
return 0x60;
}
+static void cgx_x2p_reset(void *cgxd, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ int lmac_id;
+ u64 cfg;
+
+ if (enable) {
+ for_each_set_bit(lmac_id, &cgx->lmac_bmap, cgx->max_lmac_per_mac)
+ cgx->mac_ops->mac_enadis_rx(cgx, lmac_id, false);
+
+ usleep_range(1000, 2000);
+
+ cfg = cgx_read(cgx, 0, CGXX_CMR_GLOBAL_CONFIG);
+ cfg |= cgx_get_nix_resetbit(cgx) | CGX_NSCI_DROP;
+ cgx_write(cgx, 0, CGXX_CMR_GLOBAL_CONFIG, cfg);
+ } else {
+ cfg = cgx_read(cgx, 0, CGXX_CMR_GLOBAL_CONFIG);
+ cfg &= ~(cgx_get_nix_resetbit(cgx) | CGX_NSCI_DROP);
+ cgx_write(cgx, 0, CGXX_CMR_GLOBAL_CONFIG, cfg);
+ }
+}
+
+static int cgx_enadis_rx(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
+ if (enable)
+ cfg |= DATA_PKT_RX_EN;
+ else
+ cfg &= ~DATA_PKT_RX_EN;
+ cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
+ return 0;
+}
+
static struct mac_ops cgx_mac_ops = {
.name = "cgx",
.csr_offset = 0,
@@ -1815,6 +1925,8 @@ static struct mac_ops cgx_mac_ops = {
.mac_get_pfc_frm_cfg = cgx_lmac_get_pfc_frm_cfg,
.mac_reset = cgx_lmac_reset,
.mac_stats_reset = cgx_stats_reset,
+ .mac_x2p_reset = cgx_x2p_reset,
+ .mac_enadis_rx = cgx_enadis_rx,
};
static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -1851,6 +1963,12 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_disable_device;
}
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "DMA mask config failed, abort\n");
+ goto err_release_regions;
+ }
+
/* MAP configuration registers */
cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
if (!cgx->reg_base) {
@@ -1859,6 +1977,14 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_release_regions;
}
+ if (!is_cn20k(pdev) &&
+ !is_cgx_mapped_to_nix(pdev->subsystem_device, cgx->cgx_id)) {
+ dev_notice(dev, "CGX %d not mapped to NIX, skipping probe\n",
+ cgx->cgx_id);
+ err = -ENODEV;
+ goto err_release_regions;
+ }
+
cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
if (!cgx->lmac_count) {
dev_notice(dev, "CGX %d LMAC count is zero, skipping probe\n", cgx->cgx_id);
@@ -1868,7 +1994,7 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nvec = pci_msix_vec_count(cgx->pdev);
err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
- if (err < 0 || err != nvec) {
+ if (err < 0) {
dev_err(dev, "Request for %d msix vectors failed, err %d\n",
nvec, err);
goto err_release_regions;
@@ -1879,7 +2005,7 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* init wq for processing linkup requests */
INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
- cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
+ cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", WQ_PERCPU, 0);
if (!cgx->cgx_cmd_workq) {
dev_err(dev, "alloc workqueue failed for cgx cmd");
err = -ENOMEM;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index dc9ace30554a..92ccf343dfe0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -32,6 +32,10 @@
#define CGX_LMAC_TYPE_MASK 0xF
#define CGXX_CMRX_INT 0x040
#define FW_CGX_INT BIT_ULL(1)
+#define CGXX_CMR_GLOBAL_CONFIG 0x08
+#define CGX_NIX0_RESET BIT_ULL(2)
+#define CGX_NIX1_RESET BIT_ULL(3)
+#define CGX_NSCI_DROP BIT_ULL(9)
#define CGXX_CMRX_INT_ENA_W1S 0x058
#define CGXX_CMRX_RX_ID_MAP 0x060
#define CGXX_CMRX_RX_STAT0 0x070
@@ -157,16 +161,13 @@ int cgx_get_link_info(void *cgxd, int lmac_id,
struct cgx_link_user_info *linfo);
int cgx_lmac_linkup_start(void *cgxd);
int cgx_get_fwdata_base(u64 *base);
-int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
- u8 *tx_pause, u8 *rx_pause);
-int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
- u8 tx_pause, u8 rx_pause);
void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable);
u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id);
int cgx_set_fec(u64 fec, int cgx_id, int lmac_id);
int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp);
int cgx_get_phy_fec_stats(void *cgxd, int lmac_id);
int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
+ struct cgx_lmac_fwdata_s *linkmodes,
int cgx_id, int lmac_id);
u64 cgx_features_get(void *cgxd);
struct mac_ops *get_mac_ops(void *cgxd);
@@ -185,4 +186,5 @@ int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
int pfvf_idx);
int cgx_lmac_reset(void *cgxd, int lmac_id, u8 pf_req_flr);
+u32 cgx_get_fifo_len(void *cgxd);
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
index d4a27c882a5b..39352d451cc3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -95,7 +95,31 @@ enum CGX_MODE_ {
CGX_MODE_100G_C2M,
CGX_MODE_100G_CR4,
CGX_MODE_100G_KR4,
- CGX_MODE_MAX /* = 29 */
+ CGX_MODE_LAUI_2_C2C_BIT,
+ CGX_MODE_LAUI_2_C2M_BIT,
+ CGX_MODE_50GBASE_CR2_C_BIT,
+ CGX_MODE_50GBASE_KR2_C_BIT, /* = 30 */
+ CGX_MODE_100GAUI_2_C2C_BIT,
+ CGX_MODE_100GAUI_2_C2M_BIT,
+ CGX_MODE_100GBASE_CR2_BIT,
+ CGX_MODE_100GBASE_KR2_BIT,
+ CGX_MODE_SFI_1G_BIT,
+ CGX_MODE_25GBASE_CR_C_BIT,
+ CGX_MODE_25GBASE_KR_C_BIT,
+ CGX_MODE_SGMII_10M_BIT,
+ CGX_MODE_SGMII_100M_BIT, /* = 39 */
+ CGX_MODE_2500_BASEX_BIT = 42, /* Mode group 1 */
+ CGX_MODE_5000_BASEX_BIT,
+ CGX_MODE_O_USGMII_BIT,
+ CGX_MODE_Q_USGMII_BIT,
+ CGX_MODE_2_5G_USXGMII_BIT,
+ CGX_MODE_5G_USXGMII_BIT,
+ CGX_MODE_10G_SXGMII_BIT,
+ CGX_MODE_10G_DXGMII_BIT,
+ CGX_MODE_10G_QXGMII_BIT,
+ CGX_MODE_TP_BIT,
+ CGX_MODE_FIBER_BIT,
+ CGX_MODE_MAX /* = 53 */
};
/* REQUEST ID types. Input to firmware */
enum cgx_cmd_id {
@@ -258,7 +282,12 @@ struct cgx_lnk_sts {
#define CMDMODECHANGE_SPEED GENMASK_ULL(11, 8)
#define CMDMODECHANGE_DUPLEX GENMASK_ULL(12, 12)
#define CMDMODECHANGE_AN GENMASK_ULL(13, 13)
-#define CMDMODECHANGE_PORT GENMASK_ULL(21, 14)
+/* this field categorize the mode ID(FLAGS) range to accommodate
+ * more modes.
+ * To specify mode ID range of 0 - 41, this field will be 0.
+ * To specify mode ID range of 42 - 83, this field will be 1.
+ */
+#define CMDMODECHANGE_MODE_BASEIDX GENMASK_ULL(21, 20)
#define CMDMODECHANGE_FLAGS GENMASK_ULL(63, 22)
/* LINK_BRING_UP command timeout */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h
new file mode 100644
index 000000000000..4285b5d6a6a2
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#ifndef CN20K_API_H
+#define CN20K_API_H
+
+#include "../rvu.h"
+
+struct ng_rvu {
+ struct mbox_ops *rvu_mbox_ops;
+ struct qmem *pf_mbox_addr;
+ struct qmem *vf_mbox_addr;
+};
+
+/* Mbox related APIs */
+int cn20k_rvu_mbox_init(struct rvu *rvu, int type, int num);
+int cn20k_rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
+ int num, int type, unsigned long *pf_bmap);
+void cn20k_free_mbox_memory(struct rvu *rvu);
+int cn20k_register_afpf_mbox_intr(struct rvu *rvu);
+int cn20k_register_afvf_mbox_intr(struct rvu *rvu, int pf_vec_start);
+void cn20k_rvu_enable_mbox_intr(struct rvu *rvu);
+void cn20k_rvu_unregister_interrupts(struct rvu *rvu);
+int cn20k_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ void *reg_base, int direction, int ndevs);
+void cn20k_rvu_enable_afvf_intr(struct rvu *rvu, int vfs);
+void cn20k_rvu_disable_afvf_intr(struct rvu *rvu, int vfs);
+#endif /* CN20K_API_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.c
new file mode 100644
index 000000000000..498968bf4cf5
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "struct.h"
+#include "debugfs.h"
+
+void print_nix_cn20k_sq_ctx(struct seq_file *m,
+ struct nix_cn20k_sq_ctx_s *sq_ctx)
+{
+ seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
+ sq_ctx->ena, sq_ctx->qint_idx);
+ seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
+ sq_ctx->substream, sq_ctx->sdp_mcast);
+ seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
+ sq_ctx->cq, sq_ctx->sqe_way_mask);
+
+ seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
+ sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
+ seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
+ sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
+ seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
+ sq_ctx->default_chan, sq_ctx->sqb_count);
+
+ seq_printf(m, "W1: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
+ seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
+ seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
+ sq_ctx->sqb_aura, sq_ctx->sq_int);
+ seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
+ sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
+
+ seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
+ sq_ctx->max_sqe_size, sq_ctx->cq_limit);
+ seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
+ sq_ctx->lmt_dis, sq_ctx->mnq_dis);
+ seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
+ sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
+ seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
+ sq_ctx->tail_offset, sq_ctx->smenq_offset);
+ seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
+ sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
+
+ seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
+ sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
+ seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
+ seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
+ seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
+ seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
+ sq_ctx->smenq_next_sqb);
+
+ seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
+
+ seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
+ seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
+ sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
+ seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
+ sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
+ seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
+ sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
+
+ seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
+ (u64)sq_ctx->scm_lso_rem);
+ seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
+ seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
+ seq_printf(m, "W13: aged_drop_octs \t\t\t%llu\n\n",
+ (u64)sq_ctx->aged_drop_octs);
+ seq_printf(m, "W13: aged_drop_pkts \t\t\t%llu\n\n",
+ (u64)sq_ctx->aged_drop_pkts);
+ seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
+ (u64)sq_ctx->dropped_octs);
+ seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
+ (u64)sq_ctx->dropped_pkts);
+}
+
+void print_nix_cn20k_cq_ctx(struct seq_file *m,
+ struct nix_cn20k_aq_enq_rsp *rsp)
+{
+ struct nix_cn20k_cq_ctx_s *cq_ctx = &rsp->cq;
+
+ seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
+
+ seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
+ seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
+ cq_ctx->avg_con, cq_ctx->cint_idx);
+ seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
+ cq_ctx->cq_err, cq_ctx->qint_idx);
+ seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
+ cq_ctx->bpid, cq_ctx->bp_ena);
+
+ seq_printf(m, "W1: lbpid_high \t\t\t0x%03x\n", cq_ctx->lbpid_high);
+ seq_printf(m, "W1: lbpid_med \t\t\t0x%03x\n", cq_ctx->lbpid_med);
+ seq_printf(m, "W1: lbpid_low \t\t\t0x%03x\n", cq_ctx->lbpid_low);
+ seq_printf(m, "(W1: lbpid) \t\t\t0x%03x\n",
+ cq_ctx->lbpid_high << 6 | cq_ctx->lbpid_med << 3 |
+ cq_ctx->lbpid_low);
+ seq_printf(m, "W1: lbp_ena \t\t\t\t%d\n\n", cq_ctx->lbp_ena);
+
+ seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
+ cq_ctx->update_time, cq_ctx->avg_level);
+ seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
+ cq_ctx->head, cq_ctx->tail);
+
+ seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
+ cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
+ seq_printf(m, "W3: qsize \t\t\t%d\nW3:stashing \t\t\t%d\n",
+ cq_ctx->qsize, cq_ctx->stashing);
+
+ seq_printf(m, "W3: caching \t\t\t%d\n", cq_ctx->caching);
+ seq_printf(m, "W3: lbp_frac \t\t\t%d\n", cq_ctx->lbp_frac);
+ seq_printf(m, "W3: stash_thresh \t\t\t%d\n",
+ cq_ctx->stash_thresh);
+
+ seq_printf(m, "W3: msh_valid \t\t\t%d\nW3:msh_dst \t\t\t%d\n",
+ cq_ctx->msh_valid, cq_ctx->msh_dst);
+
+ seq_printf(m, "W3: cpt_drop_err_en \t\t\t%d\n",
+ cq_ctx->cpt_drop_err_en);
+ seq_printf(m, "W3: ena \t\t\t%d\n",
+ cq_ctx->ena);
+ seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
+ cq_ctx->drop_ena, cq_ctx->drop);
+ seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
+
+ seq_printf(m, "W4: lbpid_ext \t\t\t\t%d\n\n", cq_ctx->lbpid_ext);
+ seq_printf(m, "W4: bpid_ext \t\t\t\t%d\n\n", cq_ctx->bpid_ext);
+}
+
+void print_npa_cn20k_aura_ctx(struct seq_file *m,
+ struct npa_cn20k_aq_enq_rsp *rsp)
+{
+ struct npa_cn20k_aura_s *aura = &rsp->aura;
+
+ seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
+
+ seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
+ aura->ena, aura->pool_caching);
+ seq_printf(m, "W1: avg con\t\t%d\n", aura->avg_con);
+ seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
+ aura->pool_drop_ena, aura->aura_drop_ena);
+ seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
+ aura->bp_ena, aura->aura_drop);
+ seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
+ aura->shift, aura->avg_level);
+
+ seq_printf(m, "W2: count\t\t%llu\nW2: nix_bpid\t\t%d\n",
+ (u64)aura->count, aura->bpid);
+
+ seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
+ (u64)aura->limit, aura->bp, aura->fc_ena);
+
+ seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
+ aura->fc_up_crossing, aura->fc_stype);
+ seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
+
+ seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
+
+ seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
+ aura->pool_drop, aura->update_time);
+ seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
+ aura->err_int, aura->err_int_ena);
+ seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
+ aura->thresh_int, aura->thresh_int_ena);
+ seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
+ aura->thresh_up, aura->thresh_qint_idx);
+ seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
+
+ seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
+ seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
+}
+
+void print_npa_cn20k_pool_ctx(struct seq_file *m,
+ struct npa_cn20k_aq_enq_rsp *rsp)
+{
+ struct npa_cn20k_pool_s *pool = &rsp->pool;
+
+ seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
+
+ seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
+ pool->ena, pool->nat_align);
+ seq_printf(m, "W1: stack_caching\t%d\n",
+ pool->stack_caching);
+ seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
+ pool->buf_offset, pool->buf_size);
+
+ seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
+ pool->stack_max_pages, pool->stack_pages);
+
+ seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
+ pool->stack_offset, pool->shift, pool->avg_level);
+ seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
+ pool->avg_con, pool->fc_ena, pool->fc_stype);
+ seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
+ pool->fc_hyst_bits, pool->fc_up_crossing);
+ seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
+
+ seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
+
+ seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
+
+ seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
+
+ seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
+ pool->err_int, pool->err_int_ena);
+ seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
+ seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
+ pool->thresh_int_ena, pool->thresh_up);
+ seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
+ pool->thresh_qint_idx, pool->err_qint_idx);
+ seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.h
new file mode 100644
index 000000000000..a2e3a2cd6edb
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 CGX driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#ifndef DEBUFS_H
+#define DEBUFS_H
+
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "struct.h"
+#include "../mbox.h"
+
+void print_nix_cn20k_sq_ctx(struct seq_file *m,
+ struct nix_cn20k_sq_ctx_s *sq_ctx);
+void print_nix_cn20k_cq_ctx(struct seq_file *m,
+ struct nix_cn20k_aq_enq_rsp *rsp);
+void print_npa_cn20k_aura_ctx(struct seq_file *m,
+ struct npa_cn20k_aq_enq_rsp *rsp);
+void print_npa_cn20k_pool_ctx(struct seq_file *m,
+ struct npa_cn20k_aq_enq_rsp *rsp);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c
new file mode 100644
index 000000000000..bd3aab7770dd
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include "rvu_trace.h"
+#include "mbox.h"
+#include "reg.h"
+#include "api.h"
+
+static irqreturn_t cn20k_afvf_mbox_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_irq_data *rvu_irq_data = rvu_irq;
+ struct rvu *rvu = rvu_irq_data->rvu;
+ u64 intr;
+
+ /* Sync with mbox memory region */
+ rmb();
+
+ /* Clear interrupts */
+ intr = rvupf_read64(rvu, rvu_irq_data->intr_status);
+ rvupf_write64(rvu, rvu_irq_data->intr_status, intr);
+
+ if (intr)
+ trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr);
+
+ rvu_irq_data->afvf_queue_work_hdlr(&rvu->afvf_wq_info, rvu_irq_data->start,
+ rvu_irq_data->mdevs, intr);
+
+ return IRQ_HANDLED;
+}
+
+int cn20k_register_afvf_mbox_intr(struct rvu *rvu, int pf_vec_start)
+{
+ struct rvu_irq_data *irq_data;
+ int intr_vec, offset, vec = 0;
+ int err;
+
+ /* irq data for 4 VFPF intr vectors */
+ irq_data = devm_kcalloc(rvu->dev, 4,
+ sizeof(struct rvu_irq_data), GFP_KERNEL);
+ if (!irq_data)
+ return -ENOMEM;
+
+ for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; intr_vec <=
+ RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1;
+ intr_vec++, vec++) {
+ switch (intr_vec) {
+ case RVU_MBOX_PF_INT_VEC_VFPF_MBOX0:
+ irq_data[vec].intr_status =
+ RVU_MBOX_PF_VFPF_INTX(0);
+ irq_data[vec].start = 0;
+ irq_data[vec].mdevs = 64;
+ break;
+ case RVU_MBOX_PF_INT_VEC_VFPF_MBOX1:
+ irq_data[vec].intr_status =
+ RVU_MBOX_PF_VFPF_INTX(1);
+ irq_data[vec].start = 64;
+ irq_data[vec].mdevs = 64;
+ break;
+ case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX0:
+ irq_data[vec].intr_status =
+ RVU_MBOX_PF_VFPF1_INTX(0);
+ irq_data[vec].start = 0;
+ irq_data[vec].mdevs = 64;
+ break;
+ case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1:
+ irq_data[vec].intr_status = RVU_MBOX_PF_VFPF1_INTX(1);
+ irq_data[vec].start = 64;
+ irq_data[vec].mdevs = 64;
+ break;
+ }
+ irq_data[vec].afvf_queue_work_hdlr =
+ rvu_queue_work;
+ offset = pf_vec_start + intr_vec;
+ irq_data[vec].vec_num = offset;
+ irq_data[vec].rvu = rvu;
+
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAF VFAF%d Mbox%d",
+ vec / 2, vec % 2);
+ err = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu->ng_rvu->rvu_mbox_ops->afvf_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE],
+ &irq_data[vec]);
+ if (err) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for AFVF mbox irq\n");
+ return err;
+ }
+ rvu->irq_allocated[offset] = true;
+ }
+
+ return 0;
+}
+
+/* CN20K mbox PFx => AF irq handler */
+static irqreturn_t cn20k_mbox_pf_common_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_irq_data *rvu_irq_data = rvu_irq;
+ struct rvu *rvu = rvu_irq_data->rvu;
+ u64 intr;
+
+ /* Clear interrupts */
+ intr = rvu_read64(rvu, BLKADDR_RVUM, rvu_irq_data->intr_status);
+ rvu_write64(rvu, BLKADDR_RVUM, rvu_irq_data->intr_status, intr);
+
+ if (intr)
+ trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr);
+
+ /* Sync with mbox memory region */
+ rmb();
+
+ rvu_irq_data->rvu_queue_work_hdlr(&rvu->afpf_wq_info,
+ rvu_irq_data->start,
+ rvu_irq_data->mdevs, intr);
+
+ return IRQ_HANDLED;
+}
+
+void cn20k_rvu_enable_mbox_intr(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ /* Clear spurious irqs, if any */
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_MBOX_AF_PFAF_INT(0), INTR_MASK(hw->total_pfs));
+
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_MBOX_AF_PFAF_INT(1), INTR_MASK(hw->total_pfs - 64));
+
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_MBOX_AF_PFAF1_INT(0), INTR_MASK(hw->total_pfs));
+
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_MBOX_AF_PFAF1_INT(1), INTR_MASK(hw->total_pfs - 64));
+
+ /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1S(0),
+ INTR_MASK(hw->total_pfs) & ~1ULL);
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1S(1),
+ INTR_MASK(hw->total_pfs - 64));
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1S(0),
+ INTR_MASK(hw->total_pfs) & ~1ULL);
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1S(1),
+ INTR_MASK(hw->total_pfs - 64));
+}
+
+void cn20k_rvu_unregister_interrupts(struct rvu *rvu)
+{
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1C(0),
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1C(1),
+ INTR_MASK(rvu->hw->total_pfs - 64));
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1C(0),
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1C(1),
+ INTR_MASK(rvu->hw->total_pfs - 64));
+}
+
+int cn20k_register_afpf_mbox_intr(struct rvu *rvu)
+{
+ struct rvu_irq_data *irq_data;
+ int intr_vec, ret, vec = 0;
+
+ /* irq data for 4 PF intr vectors */
+ irq_data = devm_kcalloc(rvu->dev, 4,
+ sizeof(struct rvu_irq_data), GFP_KERNEL);
+ if (!irq_data)
+ return -ENOMEM;
+
+ for (intr_vec = RVU_AF_CN20K_INT_VEC_PFAF_MBOX0; intr_vec <=
+ RVU_AF_CN20K_INT_VEC_PFAF1_MBOX1; intr_vec++,
+ vec++) {
+ switch (intr_vec) {
+ case RVU_AF_CN20K_INT_VEC_PFAF_MBOX0:
+ irq_data[vec].intr_status =
+ RVU_MBOX_AF_PFAF_INT(0);
+ irq_data[vec].start = 0;
+ irq_data[vec].mdevs = 64;
+ break;
+ case RVU_AF_CN20K_INT_VEC_PFAF_MBOX1:
+ irq_data[vec].intr_status =
+ RVU_MBOX_AF_PFAF_INT(1);
+ irq_data[vec].start = 64;
+ irq_data[vec].mdevs = 96;
+ break;
+ case RVU_AF_CN20K_INT_VEC_PFAF1_MBOX0:
+ irq_data[vec].intr_status =
+ RVU_MBOX_AF_PFAF1_INT(0);
+ irq_data[vec].start = 0;
+ irq_data[vec].mdevs = 64;
+ break;
+ case RVU_AF_CN20K_INT_VEC_PFAF1_MBOX1:
+ irq_data[vec].intr_status =
+ RVU_MBOX_AF_PFAF1_INT(1);
+ irq_data[vec].start = 64;
+ irq_data[vec].mdevs = 96;
+ break;
+ }
+ irq_data[vec].rvu_queue_work_hdlr = rvu_queue_work;
+ irq_data[vec].vec_num = intr_vec;
+ irq_data[vec].rvu = rvu;
+
+ /* Register mailbox interrupt handler */
+ sprintf(&rvu->irq_name[intr_vec * NAME_SIZE],
+ "RVUAF PFAF%d Mbox%d",
+ vec / 2, vec % 2);
+ ret = request_irq(pci_irq_vector(rvu->pdev, intr_vec),
+ rvu->ng_rvu->rvu_mbox_ops->pf_intr_handler, 0,
+ &rvu->irq_name[intr_vec * NAME_SIZE],
+ &irq_data[vec]);
+ if (ret)
+ return ret;
+
+ rvu->irq_allocated[intr_vec] = true;
+ }
+
+ return 0;
+}
+
+int cn20k_rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
+ int num, int type, unsigned long *pf_bmap)
+{
+ int region;
+ u64 bar;
+
+ if (type == TYPE_AFVF) {
+ for (region = 0; region < num; region++) {
+ if (!test_bit(region, pf_bmap))
+ continue;
+
+ bar = (u64)phys_to_virt((u64)rvu->ng_rvu->vf_mbox_addr->base);
+ bar += region * MBOX_SIZE;
+ mbox_addr[region] = (void *)bar;
+
+ if (!mbox_addr[region])
+ return -ENOMEM;
+ }
+ return 0;
+ }
+
+ for (region = 0; region < num; region++) {
+ if (!test_bit(region, pf_bmap))
+ continue;
+
+ bar = (u64)phys_to_virt((u64)rvu->ng_rvu->pf_mbox_addr->base);
+ bar += region * MBOX_SIZE;
+
+ mbox_addr[region] = (void *)bar;
+
+ if (!mbox_addr[region])
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int rvu_alloc_mbox_memory(struct rvu *rvu, int type,
+ int ndevs, int mbox_size)
+{
+ struct qmem *mbox_addr;
+ dma_addr_t iova;
+ int pf, err;
+
+ /* Allocate contiguous memory for mailbox communication.
+ * eg: AF <=> PFx mbox memory
+ * This allocated memory is split into chunks of MBOX_SIZE
+ * and setup into each of the RVU PFs. In HW this memory will
+ * get aliased to an offset within BAR2 of those PFs.
+ *
+ * AF will access mbox memory using direct physical addresses
+ * and PFs will access the same shared memory from BAR2.
+ *
+ * PF <=> VF mbox memory also works in the same fashion.
+ * AFPF, PFVF requires IOVA to be used to maintain the mailbox msgs
+ */
+
+ err = qmem_alloc(rvu->dev, &mbox_addr, ndevs, mbox_size);
+ if (err)
+ return -ENOMEM;
+
+ switch (type) {
+ case TYPE_AFPF:
+ rvu->ng_rvu->pf_mbox_addr = mbox_addr;
+ iova = (u64)mbox_addr->iova;
+ for (pf = 0; pf < ndevs; pf++) {
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFX_ADDR(pf),
+ (u64)iova);
+ iova += mbox_size;
+ }
+ break;
+ case TYPE_AFVF:
+ rvu->ng_rvu->vf_mbox_addr = mbox_addr;
+ rvupf_write64(rvu, RVU_PF_VF_MBOX_ADDR, (u64)mbox_addr->iova);
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static struct mbox_ops cn20k_mbox_ops = {
+ .pf_intr_handler = cn20k_mbox_pf_common_intr_handler,
+ .afvf_intr_handler = cn20k_afvf_mbox_intr_handler,
+};
+
+int cn20k_rvu_mbox_init(struct rvu *rvu, int type, int ndevs)
+{
+ int dev;
+
+ if (!is_cn20k(rvu->pdev))
+ return 0;
+
+ rvu->ng_rvu->rvu_mbox_ops = &cn20k_mbox_ops;
+
+ if (type == TYPE_AFVF) {
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_PF_VF_CFG, ilog2(MBOX_SIZE));
+ } else {
+ for (dev = 0; dev < ndevs; dev++)
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_MBOX_AF_PFX_CFG(dev), ilog2(MBOX_SIZE));
+ }
+
+ return rvu_alloc_mbox_memory(rvu, type, ndevs, MBOX_SIZE);
+}
+
+void cn20k_free_mbox_memory(struct rvu *rvu)
+{
+ if (!is_cn20k(rvu->pdev))
+ return;
+
+ qmem_free(rvu->dev, rvu->ng_rvu->pf_mbox_addr);
+ qmem_free(rvu->dev, rvu->ng_rvu->vf_mbox_addr);
+}
+
+void cn20k_rvu_disable_afvf_intr(struct rvu *rvu, int vfs)
+{
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
+
+ if (vfs <= 64)
+ return;
+
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+}
+
+void cn20k_rvu_enable_afvf_intr(struct rvu *rvu, int vfs)
+{
+ /* Clear any pending interrupts and enable AF VF interrupts for
+ * the first 64 VFs.
+ */
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INTX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INTX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(0), INTR_MASK(vfs));
+
+ /* FLR */
+ rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
+
+ /* Same for remaining VFs, if any. */
+ if (vfs <= 64)
+ return;
+
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INTX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INTX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+
+ rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+}
+
+int rvu_alloc_cint_qint_mem(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ int blkaddr, int nixlf)
+{
+ int qints, hwctx_size, err;
+ u64 cfg, ctx_cfg;
+
+ if (is_rvu_otx2(rvu) || is_cn20k(rvu->pdev))
+ return 0;
+
+ ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
+ /* Alloc memory for CQINT's HW contexts */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ qints = (cfg >> 24) & 0xFFF;
+ hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
+ if (err)
+ return -ENOMEM;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
+ (u64)pfvf->cq_ints_ctx->iova);
+
+ /* Alloc memory for QINT's HW contexts */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ qints = (cfg >> 12) & 0xFFF;
+ hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
+ if (err)
+ return -ENOMEM;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
+ (u64)pfvf->nix_qints_ctx->iova);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/nix.c b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/nix.c
new file mode 100644
index 000000000000..aa2016fd1bba
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/nix.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "struct.h"
+#include "../rvu.h"
+
+int rvu_mbox_handler_nix_cn20k_aq_enq(struct rvu *rvu,
+ struct nix_cn20k_aq_enq_req *req,
+ struct nix_cn20k_aq_enq_rsp *rsp)
+{
+ return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
+ (struct nix_aq_enq_rsp *)rsp);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npa.c b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npa.c
new file mode 100644
index 000000000000..fe8f926c8b75
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npa.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "struct.h"
+#include "../rvu.h"
+
+int rvu_mbox_handler_npa_cn20k_aq_enq(struct rvu *rvu,
+ struct npa_cn20k_aq_enq_req *req,
+ struct npa_cn20k_aq_enq_rsp *rsp)
+{
+ return rvu_npa_aq_enq_inst(rvu, (struct npa_aq_enq_req *)req,
+ (struct npa_aq_enq_rsp *)rsp);
+}
+EXPORT_SYMBOL(rvu_mbox_handler_npa_cn20k_aq_enq);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h
new file mode 100644
index 000000000000..affb39803120
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#ifndef RVU_MBOX_REG_H
+#define RVU_MBOX_REG_H
+#include "../rvu.h"
+#include "../rvu_reg.h"
+
+/* RVUM block registers */
+#define RVU_PF_DISC (0x0)
+#define RVU_PRIV_PFX_DISC(a) (0x8000208 | (a) << 16)
+#define RVU_PRIV_HWVFX_DISC(a) (0xD000000 | (a) << 12)
+
+/* Mbox Registers */
+/* RVU AF BAR0 Mbox registers for AF => PFx */
+#define RVU_MBOX_AF_PFX_ADDR(a) (0x5000 | (a) << 4)
+#define RVU_MBOX_AF_PFX_CFG(a) (0x6000 | (a) << 4)
+#define RVU_MBOX_AF_AFPFX_TRIGX(a) (0x9000 | (a) << 3)
+#define RVU_MBOX_AF_PFAF_INT(a) (0x2980 | (a) << 6)
+#define RVU_MBOX_AF_PFAF_INT_W1S(a) (0x2988 | (a) << 6)
+#define RVU_MBOX_AF_PFAF_INT_ENA_W1S(a) (0x2990 | (a) << 6)
+#define RVU_MBOX_AF_PFAF_INT_ENA_W1C(a) (0x2998 | (a) << 6)
+#define RVU_MBOX_AF_PFAF1_INT(a) (0x29A0 | (a) << 6)
+#define RVU_MBOX_AF_PFAF1_INT_W1S(a) (0x29A8 | (a) << 6)
+#define RVU_MBOX_AF_PFAF1_INT_ENA_W1S(a) (0x29B0 | (a) << 6)
+#define RVU_MBOX_AF_PFAF1_INT_ENA_W1C(a) (0x29B8 | (a) << 6)
+
+/* RVU PF => AF mbox registers */
+#define RVU_MBOX_PF_PFAF_TRIGX(a) (0xC00 | (a) << 3)
+#define RVU_MBOX_PF_INT (0xC20)
+#define RVU_MBOX_PF_INT_W1S (0xC28)
+#define RVU_MBOX_PF_INT_ENA_W1S (0xC30)
+#define RVU_MBOX_PF_INT_ENA_W1C (0xC38)
+
+#define RVU_AF_BAR2_SEL (0x9000000)
+#define RVU_AF_BAR2_PFID (0x16400)
+#define NIX_CINTX_INT_W1S(a) (0xd30 | (a) << 12)
+#define NIX_QINTX_CNT(a) (0xc00 | (a) << 12)
+
+#define RVU_MBOX_AF_VFAF_INT(a) (0x3000 | (a) << 6)
+#define RVU_MBOX_AF_VFAF_INT_W1S(a) (0x3008 | (a) << 6)
+#define RVU_MBOX_AF_VFAF_INT_ENA_W1S(a) (0x3010 | (a) << 6)
+#define RVU_MBOX_AF_VFAF_INT_ENA_W1C(a) (0x3018 | (a) << 6)
+#define RVU_MBOX_AF_VFAF_INT_ENA_W1C(a) (0x3018 | (a) << 6)
+#define RVU_MBOX_AF_VFAF1_INT(a) (0x3020 | (a) << 6)
+#define RVU_MBOX_AF_VFAF1_INT_W1S(a) (0x3028 | (a) << 6)
+#define RVU_MBOX_AF_VFAF1_IN_ENA_W1S(a) (0x3030 | (a) << 6)
+#define RVU_MBOX_AF_VFAF1_IN_ENA_W1C(a) (0x3038 | (a) << 6)
+
+#define RVU_MBOX_AF_AFVFX_TRIG(a, b) (0x10000 | (a) << 4 | (b) << 3)
+#define RVU_MBOX_AF_VFX_ADDR(a) (0x20000 | (a) << 4)
+#define RVU_MBOX_AF_VFX_CFG(a) (0x28000 | (a) << 4)
+
+#define RVU_MBOX_PF_VFX_PFVF_TRIGX(a) (0x2000 | (a) << 3)
+
+#define RVU_MBOX_PF_VFPF_INTX(a) (0x1000 | (a) << 3)
+#define RVU_MBOX_PF_VFPF_INT_W1SX(a) (0x1020 | (a) << 3)
+#define RVU_MBOX_PF_VFPF_INT_ENA_W1SX(a) (0x1040 | (a) << 3)
+#define RVU_MBOX_PF_VFPF_INT_ENA_W1CX(a) (0x1060 | (a) << 3)
+
+#define RVU_MBOX_PF_VFPF1_INTX(a) (0x1080 | (a) << 3)
+#define RVU_MBOX_PF_VFPF1_INT_W1SX(a) (0x10a0 | (a) << 3)
+#define RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(a) (0x10c0 | (a) << 3)
+#define RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(a) (0x10e0 | (a) << 3)
+
+#define RVU_MBOX_PF_VF_ADDR (0xC40)
+#define RVU_MBOX_PF_LMTLINE_ADDR (0xC48)
+#define RVU_MBOX_PF_VF_CFG (0xC60)
+
+#define RVU_MBOX_VF_VFPF_TRIGX(a) (0x3000 | (a) << 3)
+#define RVU_MBOX_VF_INT (0x20)
+#define RVU_MBOX_VF_INT_W1S (0x28)
+#define RVU_MBOX_VF_INT_ENA_W1S (0x30)
+#define RVU_MBOX_VF_INT_ENA_W1C (0x38)
+
+#define RVU_MBOX_VF_VFAF_TRIGX(a) (0x2000 | (a) << 3)
+#endif /* RVU_MBOX_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h
new file mode 100644
index 000000000000..763f6cabd7c2
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h
@@ -0,0 +1,380 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#ifndef STRUCT_H
+#define STRUCT_H
+
+#define NIX_MAX_CTX_SIZE 128
+
+/*
+ * CN20k RVU PF MBOX Interrupt Vector Enumeration
+ *
+ * Vectors 0 - 3 are compatible with pre cn20k and hence
+ * existing macros are being reused.
+ */
+enum rvu_mbox_pf_int_vec_e {
+ RVU_MBOX_PF_INT_VEC_VFPF_MBOX0 = 0x4,
+ RVU_MBOX_PF_INT_VEC_VFPF_MBOX1 = 0x5,
+ RVU_MBOX_PF_INT_VEC_VFPF1_MBOX0 = 0x6,
+ RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1 = 0x7,
+ RVU_MBOX_PF_INT_VEC_AFPF_MBOX = 0x8,
+ RVU_MBOX_PF_INT_VEC_CNT = 0x9,
+};
+
+/* RVU Admin function Interrupt Vector Enumeration */
+enum rvu_af_cn20k_int_vec_e {
+ RVU_AF_CN20K_INT_VEC_POISON = 0x0,
+ RVU_AF_CN20K_INT_VEC_PFFLR0 = 0x1,
+ RVU_AF_CN20K_INT_VEC_PFFLR1 = 0x2,
+ RVU_AF_CN20K_INT_VEC_PFME0 = 0x3,
+ RVU_AF_CN20K_INT_VEC_PFME1 = 0x4,
+ RVU_AF_CN20K_INT_VEC_GEN = 0x5,
+ RVU_AF_CN20K_INT_VEC_PFAF_MBOX0 = 0x6,
+ RVU_AF_CN20K_INT_VEC_PFAF_MBOX1 = 0x7,
+ RVU_AF_CN20K_INT_VEC_PFAF1_MBOX0 = 0x8,
+ RVU_AF_CN20K_INT_VEC_PFAF1_MBOX1 = 0x9,
+ RVU_AF_CN20K_INT_VEC_CNT = 0xa,
+};
+
+struct nix_cn20k_sq_ctx_s {
+ u64 ena : 1; /* W0 */
+ u64 qint_idx : 6;
+ u64 substream : 20;
+ u64 sdp_mcast : 1;
+ u64 cq : 20;
+ u64 sqe_way_mask : 16;
+ u64 smq : 11; /* W1 */
+ u64 cq_ena : 1;
+ u64 xoff : 1;
+ u64 sso_ena : 1;
+ u64 smq_rr_weight : 14;
+ u64 default_chan : 12;
+ u64 sqb_count : 16;
+ u64 reserved_120_120 : 1;
+ u64 smq_rr_count_lb : 7;
+ u64 smq_rr_count_ub : 25; /* W2 */
+ u64 sqb_aura : 20;
+ u64 sq_int : 8;
+ u64 sq_int_ena : 8;
+ u64 sqe_stype : 2;
+ u64 reserved_191_191 : 1;
+ u64 max_sqe_size : 2; /* W3 */
+ u64 cq_limit : 8;
+ u64 lmt_dis : 1;
+ u64 mnq_dis : 1;
+ u64 smq_next_sq : 20;
+ u64 smq_lso_segnum : 8;
+ u64 tail_offset : 6;
+ u64 smenq_offset : 6;
+ u64 head_offset : 6;
+ u64 smenq_next_sqb_vld : 1;
+ u64 smq_pend : 1;
+ u64 smq_next_sq_vld : 1;
+ u64 reserved_253_255 : 3;
+ u64 next_sqb : 64; /* W4 */
+ u64 tail_sqb : 64; /* W5 */
+ u64 smenq_sqb : 64; /* W6 */
+ u64 smenq_next_sqb : 64; /* W7 */
+ u64 head_sqb : 64; /* W8 */
+ u64 reserved_576_583 : 8; /* W9 */
+ u64 vfi_lso_total : 18;
+ u64 vfi_lso_sizem1 : 3;
+ u64 vfi_lso_sb : 8;
+ u64 vfi_lso_mps : 14;
+ u64 vfi_lso_vlan0_ins_ena : 1;
+ u64 vfi_lso_vlan1_ins_ena : 1;
+ u64 vfi_lso_vld : 1;
+ u64 reserved_630_639 : 10;
+ u64 scm_lso_rem : 18; /* W10 */
+ u64 reserved_658_703 : 46;
+ u64 octs : 48; /* W11 */
+ u64 reserved_752_767 : 16;
+ u64 pkts : 48; /* W12 */
+ u64 reserved_816_831 : 16;
+ u64 aged_drop_octs : 32; /* W13 */
+ u64 aged_drop_pkts : 32;
+ u64 dropped_octs : 48; /* W14 */
+ u64 reserved_944_959 : 16;
+ u64 dropped_pkts : 48; /* W15 */
+ u64 reserved_1008_1023 : 16;
+};
+
+static_assert(sizeof(struct nix_cn20k_sq_ctx_s) == NIX_MAX_CTX_SIZE);
+
+struct nix_cn20k_cq_ctx_s {
+ u64 base : 64; /* W0 */
+ u64 lbp_ena : 1; /* W1 */
+ u64 lbpid_low : 3;
+ u64 bp_ena : 1;
+ u64 lbpid_med : 3;
+ u64 bpid : 9;
+ u64 lbpid_high : 3;
+ u64 qint_idx : 7;
+ u64 cq_err : 1;
+ u64 cint_idx : 7;
+ u64 avg_con : 9;
+ u64 wrptr : 20;
+ u64 tail : 20; /* W2 */
+ u64 head : 20;
+ u64 avg_level : 8;
+ u64 update_time : 16;
+ u64 bp : 8; /* W3 */
+ u64 drop : 8;
+ u64 drop_ena : 1;
+ u64 ena : 1;
+ u64 cpt_drop_err_en : 1;
+ u64 reserved_211_211 : 1;
+ u64 msh_dst : 11;
+ u64 msh_valid : 1;
+ u64 stash_thresh : 4;
+ u64 lbp_frac : 4;
+ u64 caching : 1;
+ u64 stashing : 1;
+ u64 reserved_234_235 : 2;
+ u64 qsize : 4;
+ u64 cq_err_int : 8;
+ u64 cq_err_int_ena : 8;
+ u64 bpid_ext : 2; /* W4 */
+ u64 reserved_258_259 : 2;
+ u64 lbpid_ext : 2;
+ u64 reserved_262_319 : 58;
+ u64 reserved_320_383 : 64; /* W5 */
+ u64 reserved_384_447 : 64; /* W6 */
+ u64 reserved_448_511 : 64; /* W7 */
+ u64 padding[8];
+};
+
+static_assert(sizeof(struct nix_cn20k_sq_ctx_s) == NIX_MAX_CTX_SIZE);
+
+struct nix_cn20k_rq_ctx_s {
+ u64 ena : 1;
+ u64 sso_ena : 1;
+ u64 ipsech_ena : 1;
+ u64 ena_wqwd : 1;
+ u64 cq : 20;
+ u64 reserved_24_34 : 11;
+ u64 port_il4_dis : 1;
+ u64 port_ol4_dis : 1;
+ u64 lenerr_dis : 1;
+ u64 csum_il4_dis : 1;
+ u64 csum_ol4_dis : 1;
+ u64 len_il4_dis : 1;
+ u64 len_il3_dis : 1;
+ u64 len_ol4_dis : 1;
+ u64 len_ol3_dis : 1;
+ u64 wqe_aura : 20;
+ u64 spb_aura : 20;
+ u64 lpb_aura : 20;
+ u64 sso_grp : 10;
+ u64 sso_tt : 2;
+ u64 pb_caching : 2;
+ u64 wqe_caching : 1;
+ u64 xqe_drop_ena : 1;
+ u64 spb_drop_ena : 1;
+ u64 lpb_drop_ena : 1;
+ u64 pb_stashing : 1;
+ u64 ipsecd_drop_en : 1;
+ u64 chi_ena : 1;
+ u64 reserved_125_127 : 3;
+ u64 band_prof_id_l : 10;
+ u64 sso_fc_ena : 1;
+ u64 policer_ena : 1;
+ u64 spb_sizem1 : 6;
+ u64 wqe_skip : 2;
+ u64 spb_high_sizem1 : 3;
+ u64 spb_ena : 1;
+ u64 lpb_sizem1 : 12;
+ u64 first_skip : 7;
+ u64 reserved_171_171 : 1;
+ u64 later_skip : 6;
+ u64 xqe_imm_size : 6;
+ u64 band_prof_id_h : 4;
+ u64 reserved_188_189 : 2;
+ u64 xqe_imm_copy : 1;
+ u64 xqe_hdr_split : 1;
+ u64 xqe_drop : 8;
+ u64 xqe_pass : 8;
+ u64 wqe_pool_drop : 8;
+ u64 wqe_pool_pass : 8;
+ u64 spb_aura_drop : 8;
+ u64 spb_aura_pass : 8;
+ u64 spb_pool_drop : 8;
+ u64 spb_pool_pass : 8;
+ u64 lpb_aura_drop : 8;
+ u64 lpb_aura_pass : 8;
+ u64 lpb_pool_drop : 8;
+ u64 lpb_pool_pass : 8;
+ u64 reserved_288_291 : 4;
+ u64 rq_int : 8;
+ u64 rq_int_ena : 8;
+ u64 qint_idx : 7;
+ u64 reserved_315_319 : 5;
+ u64 ltag : 24;
+ u64 good_utag : 8;
+ u64 bad_utag : 8;
+ u64 flow_tagw : 6;
+ u64 ipsec_vwqe : 1;
+ u64 vwqe_ena : 1;
+ u64 vtime_wait : 8;
+ u64 max_vsize_exp : 4;
+ u64 vwqe_skip : 2;
+ u64 reserved_382_383 : 2;
+ u64 octs : 48;
+ u64 reserved_432_447 : 16;
+ u64 pkts : 48;
+ u64 reserved_496_511 : 16;
+ u64 drop_octs : 48;
+ u64 reserved_560_575 : 16;
+ u64 drop_pkts : 48;
+ u64 reserved_624_639 : 16;
+ u64 re_pkts : 48;
+ u64 reserved_688_703 : 16;
+ u64 reserved_704_767 : 64;
+ u64 reserved_768_831 : 64;
+ u64 reserved_832_895 : 64;
+ u64 reserved_896_959 : 64;
+ u64 reserved_960_1023 : 64;
+};
+
+static_assert(sizeof(struct nix_cn20k_rq_ctx_s) == NIX_MAX_CTX_SIZE);
+
+struct npa_cn20k_aura_s {
+ u64 pool_addr; /* W0 */
+ u64 ena : 1; /* W1 */
+ u64 reserved_65 : 2;
+ u64 pool_caching : 1;
+ u64 reserved_68 : 16;
+ u64 avg_con : 9;
+ u64 reserved_93 : 1;
+ u64 pool_drop_ena : 1;
+ u64 aura_drop_ena : 1;
+ u64 bp_ena : 1;
+ u64 reserved_97_103 : 7;
+ u64 aura_drop : 8;
+ u64 shift : 6;
+ u64 reserved_118_119 : 2;
+ u64 avg_level : 8;
+ u64 count : 36; /* W2 */
+ u64 reserved_164_167 : 4;
+ u64 bpid : 12;
+ u64 reserved_180_191 : 12;
+ u64 limit : 36; /* W3 */
+ u64 reserved_228_231 : 4;
+ u64 bp : 7;
+ u64 reserved_239_243 : 5;
+ u64 fc_ena : 1;
+ u64 fc_up_crossing : 1;
+ u64 fc_stype : 2;
+ u64 fc_hyst_bits : 4;
+ u64 reserved_252_255 : 4;
+ u64 fc_addr; /* W4 */
+ u64 pool_drop : 8; /* W5 */
+ u64 update_time : 16;
+ u64 err_int : 8;
+ u64 err_int_ena : 8;
+ u64 thresh_int : 1;
+ u64 thresh_int_ena : 1;
+ u64 thresh_up : 1;
+ u64 reserved_363 : 1;
+ u64 thresh_qint_idx : 7;
+ u64 reserved_371 : 1;
+ u64 err_qint_idx : 7;
+ u64 reserved_379_383 : 5;
+ u64 thresh : 36; /* W6*/
+ u64 rsvd_423_420 : 4;
+ u64 fc_msh_dst : 11;
+ u64 reserved_435_438 : 4;
+ u64 op_dpc_ena : 1;
+ u64 op_dpc_set : 5;
+ u64 reserved_445_445 : 1;
+ u64 stream_ctx : 1;
+ u64 unified_ctx : 1;
+ u64 reserved_448_511; /* W7 */
+ u64 padding[8];
+};
+
+static_assert(sizeof(struct npa_cn20k_aura_s) == NIX_MAX_CTX_SIZE);
+
+struct npa_cn20k_pool_s {
+ u64 stack_base; /* W0 */
+ u64 ena : 1;
+ u64 nat_align : 1;
+ u64 reserved_66_67 : 2;
+ u64 stack_caching : 1;
+ u64 reserved_69_87 : 19;
+ u64 buf_offset : 12;
+ u64 reserved_100_103 : 4;
+ u64 buf_size : 12;
+ u64 reserved_116_119 : 4;
+ u64 ref_cnt_prof : 3;
+ u64 reserved_123_127 : 5;
+ u64 stack_max_pages : 32;
+ u64 stack_pages : 32;
+ uint64_t bp_0 : 7;
+ uint64_t bp_1 : 7;
+ uint64_t bp_2 : 7;
+ uint64_t bp_3 : 7;
+ uint64_t bp_4 : 7;
+ uint64_t bp_5 : 7;
+ uint64_t bp_6 : 7;
+ uint64_t bp_7 : 7;
+ uint64_t bp_ena_0 : 1;
+ uint64_t bp_ena_1 : 1;
+ uint64_t bp_ena_2 : 1;
+ uint64_t bp_ena_3 : 1;
+ uint64_t bp_ena_4 : 1;
+ uint64_t bp_ena_5 : 1;
+ uint64_t bp_ena_6 : 1;
+ uint64_t bp_ena_7 : 1;
+ u64 stack_offset : 4;
+ u64 reserved_260_263 : 4;
+ u64 shift : 6;
+ u64 reserved_270_271 : 2;
+ u64 avg_level : 8;
+ u64 avg_con : 9;
+ u64 fc_ena : 1;
+ u64 fc_stype : 2;
+ u64 fc_hyst_bits : 4;
+ u64 fc_up_crossing : 1;
+ u64 reserved_297_299 : 3;
+ u64 update_time : 16;
+ u64 reserved_316_319 : 4;
+ u64 fc_addr; /* W5 */
+ u64 ptr_start; /* W6 */
+ u64 ptr_end; /* W7 */
+ u64 bpid_0 : 12;
+ u64 reserved_524_535 : 12;
+ u64 err_int : 8;
+ u64 err_int_ena : 8;
+ u64 thresh_int : 1;
+ u64 thresh_int_ena : 1;
+ u64 thresh_up : 1;
+ u64 reserved_555 : 1;
+ u64 thresh_qint_idx : 7;
+ u64 reserved_563 : 1;
+ u64 err_qint_idx : 7;
+ u64 reserved_571_575 : 5;
+ u64 thresh : 36;
+ u64 rsvd_612_615 : 4;
+ u64 fc_msh_dst : 11;
+ u64 reserved_627_630 : 4;
+ u64 op_dpc_ena : 1;
+ u64 op_dpc_set : 5;
+ u64 reserved_637_637 : 1;
+ u64 stream_ctx : 1;
+ u64 reserved_639 : 1;
+ u64 reserved_640_703; /* W10 */
+ u64 reserved_704_767; /* W11 */
+ u64 reserved_768_831; /* W12 */
+ u64 reserved_832_895; /* W13 */
+ u64 reserved_896_959; /* W14 */
+ u64 reserved_960_1023; /* W15 */
+};
+
+static_assert(sizeof(struct npa_cn20k_pool_s) == NIX_MAX_CTX_SIZE);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index 2436c1ff9ba4..8a08bebf08c2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -39,7 +39,7 @@ struct qmem {
void *base;
dma_addr_t iova;
int alloc_sz;
- u16 entry_sz;
+ u32 entry_sz;
u8 align;
u32 qsize;
};
@@ -156,8 +156,10 @@ enum nix_scheduler {
#define NIC_HW_MIN_FRS 40
#define NIC_HW_MAX_FRS 9212
#define SDP_HW_MAX_FRS 65535
+#define SDP_HW_MIN_FRS 16
#define CN10K_LMAC_LINK_MAX_FRS 16380 /* 16k - FCS */
#define CN10K_LBK_LINK_MAX_FRS 65535 /* 64k */
+#define SDP_LINK_CREDIT 0x320202
/* NIX RX action operation*/
#define NIX_RX_ACTIONOP_DROP (0x0ull)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
index 9ffc6790c513..6180e68e1765 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -72,7 +72,6 @@ struct mac_ops {
u8 irq_offset;
u8 int_ena_bit;
u8 lmac_fwi;
- u32 fifo_len;
bool non_contiguous_serdes_lane;
/* RPM & CGX differs in number of Receive/transmit stats */
u8 rx_stats_cnt;
@@ -133,6 +132,8 @@ struct mac_ops {
int (*get_fec_stats)(void *cgxd, int lmac_id,
struct cgx_fec_stats_rsp *rsp);
int (*mac_stats_reset)(void *cgxd, int lmac_id);
+ void (*mac_x2p_reset)(void *cgxd, bool enable);
+ int (*mac_enadis_rx)(void *cgxd, int lmac_id, bool enable);
};
struct cgx {
@@ -142,6 +143,10 @@ struct cgx {
u8 lmac_count;
/* number of LMACs per MAC could be 4 or 8 */
u8 max_lmac_per_mac;
+ /* length of fifo varies depending on the number
+ * of LMACS
+ */
+ u32 fifo_len;
#define MAX_LMAC_COUNT 8
struct lmac *lmac_idmap[MAX_LMAC_COUNT];
struct work_struct cgx_cmd_work;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index 1e5aa5397504..75872d257eca 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -10,8 +10,11 @@
#include <linux/pci.h>
#include "rvu_reg.h"
+#include "cn20k/reg.h"
+#include "cn20k/api.h"
#include "mbox.h"
#include "rvu_trace.h"
+#include "rvu.h"
static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
@@ -28,8 +31,10 @@ void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
mdev->rsp_size = 0;
tx_hdr->num_msgs = 0;
tx_hdr->msg_size = 0;
+ tx_hdr->sig = 0;
rx_hdr->num_msgs = 0;
rx_hdr->msg_size = 0;
+ rx_hdr->sig = 0;
}
EXPORT_SYMBOL(__otx2_mbox_reset);
@@ -53,9 +58,98 @@ void otx2_mbox_destroy(struct otx2_mbox *mbox)
}
EXPORT_SYMBOL(otx2_mbox_destroy);
+int cn20k_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ void *reg_base, int direction, int ndevs)
+{
+ switch (direction) {
+ case MBOX_DIR_AFPF:
+ case MBOX_DIR_PFVF:
+ mbox->tx_start = MBOX_DOWN_TX_START;
+ mbox->rx_start = MBOX_DOWN_RX_START;
+ mbox->tx_size = MBOX_DOWN_TX_SIZE;
+ mbox->rx_size = MBOX_DOWN_RX_SIZE;
+ break;
+ case MBOX_DIR_PFAF:
+ case MBOX_DIR_VFPF:
+ mbox->tx_start = MBOX_DOWN_RX_START;
+ mbox->rx_start = MBOX_DOWN_TX_START;
+ mbox->tx_size = MBOX_DOWN_RX_SIZE;
+ mbox->rx_size = MBOX_DOWN_TX_SIZE;
+ break;
+ case MBOX_DIR_AFPF_UP:
+ case MBOX_DIR_PFVF_UP:
+ mbox->tx_start = MBOX_UP_TX_START;
+ mbox->rx_start = MBOX_UP_RX_START;
+ mbox->tx_size = MBOX_UP_TX_SIZE;
+ mbox->rx_size = MBOX_UP_RX_SIZE;
+ break;
+ case MBOX_DIR_PFAF_UP:
+ case MBOX_DIR_VFPF_UP:
+ mbox->tx_start = MBOX_UP_RX_START;
+ mbox->rx_start = MBOX_UP_TX_START;
+ mbox->tx_size = MBOX_UP_RX_SIZE;
+ mbox->rx_size = MBOX_UP_TX_SIZE;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ switch (direction) {
+ case MBOX_DIR_AFPF:
+ mbox->trigger = RVU_MBOX_AF_AFPFX_TRIGX(1);
+ mbox->tr_shift = 4;
+ break;
+ case MBOX_DIR_AFPF_UP:
+ mbox->trigger = RVU_MBOX_AF_AFPFX_TRIGX(0);
+ mbox->tr_shift = 4;
+ break;
+ case MBOX_DIR_PFAF:
+ mbox->trigger = RVU_MBOX_PF_PFAF_TRIGX(0);
+ mbox->tr_shift = 0;
+ break;
+ case MBOX_DIR_PFAF_UP:
+ mbox->trigger = RVU_MBOX_PF_PFAF_TRIGX(1);
+ mbox->tr_shift = 0;
+ break;
+ case MBOX_DIR_PFVF:
+ mbox->trigger = RVU_MBOX_PF_VFX_PFVF_TRIGX(1);
+ mbox->tr_shift = 4;
+ break;
+ case MBOX_DIR_PFVF_UP:
+ mbox->trigger = RVU_MBOX_PF_VFX_PFVF_TRIGX(0);
+ mbox->tr_shift = 4;
+ break;
+ case MBOX_DIR_VFPF:
+ mbox->trigger = RVU_MBOX_VF_VFPF_TRIGX(0);
+ mbox->tr_shift = 0;
+ break;
+ case MBOX_DIR_VFPF_UP:
+ mbox->trigger = RVU_MBOX_VF_VFPF_TRIGX(1);
+ mbox->tr_shift = 0;
+ break;
+ default:
+ return -ENODEV;
+ }
+ mbox->reg_base = reg_base;
+ mbox->pdev = pdev;
+
+ mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL);
+ if (!mbox->dev) {
+ otx2_mbox_destroy(mbox);
+ return -ENOMEM;
+ }
+ mbox->ndevs = ndevs;
+
+ return 0;
+}
+
static int otx2_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev,
void *reg_base, int direction, int ndevs)
{
+ if (is_cn20k(pdev))
+ return cn20k_mbox_setup(mbox, pdev, reg_base,
+ direction, ndevs);
+
switch (direction) {
case MBOX_DIR_AFPF:
case MBOX_DIR_PFVF:
@@ -188,14 +282,13 @@ int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
{
unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT);
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
- struct device *sender = &mbox->pdev->dev;
while (!time_after(jiffies, timeout)) {
if (mdev->num_msgs == mdev->msgs_acked)
return 0;
usleep_range(800, 1000);
}
- dev_dbg(sender, "timed out while waiting for rsp\n");
+ trace_otx2_msg_wait_rsp(mbox->pdev);
return -EIO;
}
EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
@@ -219,6 +312,7 @@ static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data)
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr, *rx_hdr;
void *hw_mbase = mdev->hwbase;
+ struct mbox_msghdr *msg;
u64 intr_val;
tx_hdr = hw_mbase + mbox->tx_start;
@@ -234,7 +328,10 @@ static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data)
spin_lock(&mdev->mbox_lock);
- tx_hdr->msg_size = mdev->msg_size;
+ if (!tx_hdr->sig) {
+ tx_hdr->msg_size = mdev->msg_size;
+ tx_hdr->num_msgs = mdev->num_msgs;
+ }
/* Reset header for next messages */
mdev->msg_size = 0;
@@ -248,10 +345,12 @@ static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data)
* messages. So this should be written after writing all the messages
* to the shared memory.
*/
- tx_hdr->num_msgs = mdev->num_msgs;
rx_hdr->num_msgs = 0;
- trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size);
+ msg = (struct mbox_msghdr *)(hw_mbase + mbox->tx_start + msgs_offset);
+
+ trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size,
+ msg->id, msg->pcifunc);
spin_unlock(&mdev->mbox_lock);
@@ -306,6 +405,7 @@ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
{
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_msghdr *msghdr = NULL;
+ struct mbox_hdr *mboxhdr = NULL;
spin_lock(&mdev->mbox_lock);
size = ALIGN(size, MBOX_MSG_ALIGN);
@@ -329,6 +429,11 @@ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
mdev->msg_size += size;
mdev->rsp_size += size_rsp;
msghdr->next_msgoff = mdev->msg_size + msgs_offset;
+
+ mboxhdr = mdev->mbase + mbox->tx_start;
+ /* Clear the msg header region */
+ memset(mboxhdr, 0, msgs_offset);
+
exit:
spin_unlock(&mdev->mbox_lock);
@@ -445,6 +550,14 @@ const char *otx2_mbox_id2name(u16 id)
#define M(_name, _id, _1, _2, _3) case _id: return # _name;
MBOX_MESSAGES
#undef M
+
+#define M(_name, _id, _1, _2, _3) case _id: return # _name;
+ MBOX_UP_CGX_MESSAGES
+#undef M
+
+#define M(_name, _id, _1, _2, _3) case _id: return # _name;
+ MBOX_UP_CPT_MESSAGES
+#undef M
default:
return "INVALID ID";
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 6ea2f3071fe8..a3e273126e4e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -10,9 +10,11 @@
#include <linux/etherdevice.h>
#include <linux/sizes.h>
+#include <linux/ethtool.h>
#include "rvu_struct.h"
#include "common.h"
+#include "cn20k/struct.h"
#define MBOX_SIZE SZ_64K
@@ -50,6 +52,11 @@
#define MBOX_DIR_PFVF_UP 6 /* PF sends messages to VF */
#define MBOX_DIR_VFPF_UP 7 /* VF replies to PF */
+enum {
+ TYPE_AFVF,
+ TYPE_AFPF,
+};
+
struct otx2_mbox_dev {
void *mbase; /* This dev's mbox region */
void *hwbase;
@@ -78,6 +85,8 @@ struct otx2_mbox {
struct mbox_hdr {
u64 msg_size; /* Total msgs size embedded */
u16 num_msgs; /* No of msgs embedded */
+ u16 opt_msg;
+ u8 sig;
};
/* Header which precedes every msg and is also part of it */
@@ -144,6 +153,9 @@ M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \
msg_rsp) \
M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \
M(PTP_GET_CAP, 0x00c, ptp_get_cap, msg_req, ptp_get_cap_rsp) \
+M(GET_REP_CNT, 0x00d, get_rep_cnt, msg_req, get_rep_cnt_rsp) \
+M(ESW_CFG, 0x00e, esw_cfg, esw_cfg_req, msg_rsp) \
+M(REP_EVENT_NOTIFY, 0x00f, rep_event_notify, rep_event, msg_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \
@@ -191,6 +203,8 @@ M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
M(NPA_LF_FREE, 0x401, npa_lf_free, msg_req, msg_rsp) \
M(NPA_AQ_ENQ, 0x402, npa_aq_enq, npa_aq_enq_req, npa_aq_enq_rsp) \
M(NPA_HWCTX_DISABLE, 0x403, npa_hwctx_disable, hwctx_disable_req, msg_rsp)\
+M(NPA_CN20K_AQ_ENQ, 0x404, npa_cn20k_aq_enq, npa_cn20k_aq_enq_req, \
+ npa_cn20k_aq_enq_rsp) \
/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \
/* TIM mbox IDs (range 0x800 - 0x9FF) */ \
/* CPT mbox IDs (range 0xA00 - 0xBFF) */ \
@@ -310,6 +324,10 @@ M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \
msg_rsp) \
M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
nix_bandprof_get_hwinfo_rsp) \
+M(NIX_CPT_BP_ENABLE, 0x8020, nix_cpt_bp_enable, nix_bp_cfg_req, \
+ nix_bp_cfg_rsp) \
+M(NIX_CPT_BP_DISABLE, 0x8021, nix_cpt_bp_disable, nix_bp_cfg_req, \
+ msg_rsp) \
M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg, \
msg_req, nix_inline_ipsec_cfg) \
M(NIX_MCAST_GRP_CREATE, 0x802b, nix_mcast_grp_create, nix_mcast_grp_create_req, \
@@ -319,6 +337,9 @@ M(NIX_MCAST_GRP_DESTROY, 0x802c, nix_mcast_grp_destroy, nix_mcast_grp_destroy_re
M(NIX_MCAST_GRP_UPDATE, 0x802d, nix_mcast_grp_update, \
nix_mcast_grp_update_req, \
nix_mcast_grp_update_rsp) \
+M(NIX_LF_STATS, 0x802e, nix_lf_stats, nix_stats_req, nix_stats_rsp) \
+M(NIX_CN20K_AQ_ENQ, 0x802f, nix_cn20k_aq_enq, nix_cn20k_aq_enq_req, \
+ nix_cn20k_aq_enq_rsp) \
/* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \
M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \
mcs_alloc_rsrc_rsp) \
@@ -380,12 +401,16 @@ M(CPT_INST_LMTST, 0xD00, cpt_inst_lmtst, cpt_inst_lmtst_req, msg_rsp)
#define MBOX_UP_MCS_MESSAGES \
M(MCS_INTR_NOTIFY, 0xE00, mcs_intr_notify, mcs_intr_info, msg_rsp)
+#define MBOX_UP_REP_MESSAGES \
+M(REP_EVENT_UP_NOTIFY, 0xEF0, rep_event_up_notify, rep_event, msg_rsp) \
+
enum {
#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id,
MBOX_MESSAGES
MBOX_UP_CGX_MESSAGES
MBOX_UP_CPT_MESSAGES
MBOX_UP_MCS_MESSAGES
+MBOX_UP_REP_MESSAGES
#undef M
};
@@ -512,6 +537,8 @@ struct get_hw_cap_rsp {
u8 nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
u8 nix_shaping; /* Is shaping and coloring supported */
u8 npc_hash_extract; /* Is hash extract supported */
+#define HW_CAP_MACSEC BIT_ULL(1)
+ u64 hw_caps;
};
/* CGX mbox message formats */
@@ -636,11 +663,17 @@ struct cgx_lmac_fwdata_s {
u64 supported_link_modes;
/* only applicable if AN is supported */
u64 advertised_fec;
- u64 advertised_link_modes;
+ u64 advertised_link_modes_own:1; /* CGX_CMD_OWN */
+ u64 advertised_link_modes:63;
/* Only applicable if SFP/QSFP slot is present */
struct sfp_eeprom_s sfp_eeprom;
struct phy_s phy;
-#define LMAC_FWDATA_RESERVED_MEM 1021
+ u32 lmac_type;
+ u32 portm_idx;
+ u64 mgmt_port:1;
+ u64 advertised_an:1;
+ u64 port;
+#define LMAC_FWDATA_RESERVED_MEM 1018
u64 reserved[LMAC_FWDATA_RESERVED_MEM];
};
@@ -653,12 +686,13 @@ struct cgx_set_link_mode_args {
u32 speed;
u8 duplex;
u8 an;
- u8 ports;
+ u8 mode_baseidx;
+ u8 multimode;
u64 mode;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
};
struct cgx_set_link_mode_req {
-#define AUTONEG_UNKNOWN 0xff
struct mbox_msghdr hdr;
struct cgx_set_link_mode_args args;
};
@@ -802,6 +836,39 @@ struct npa_aq_enq_rsp {
};
};
+struct npa_cn20k_aq_enq_req {
+ struct mbox_msghdr hdr;
+ u32 aura_id;
+ u8 ctype;
+ u8 op;
+ union {
+ /* Valid when op == WRITE/INIT and ctype == AURA.
+ * LF fills the pool_id in aura.pool_addr. AF will translate
+ * the pool_id to pool context pointer.
+ */
+ struct npa_cn20k_aura_s aura;
+ /* Valid when op == WRITE/INIT and ctype == POOL */
+ struct npa_cn20k_pool_s pool;
+ };
+ /* Mask data when op == WRITE (1=write, 0=don't write) */
+ union {
+ /* Valid when op == WRITE and ctype == AURA */
+ struct npa_cn20k_aura_s aura_mask;
+ /* Valid when op == WRITE and ctype == POOL */
+ struct npa_cn20k_pool_s pool_mask;
+ };
+};
+
+struct npa_cn20k_aq_enq_rsp {
+ struct mbox_msghdr hdr;
+ union {
+ /* Valid when op == READ and ctype == AURA */
+ struct npa_cn20k_aura_s aura;
+ /* Valid when op == READ and ctype == POOL */
+ struct npa_cn20k_pool_s pool;
+ };
+};
+
/* Disable all contexts of type 'ctype' */
struct hwctx_disable_req {
struct mbox_msghdr hdr;
@@ -910,6 +977,42 @@ struct nix_lf_free_req {
u64 flags;
};
+/* CN20K NIX AQ enqueue msg */
+struct nix_cn20k_aq_enq_req {
+ struct mbox_msghdr hdr;
+ u32 qidx;
+ u8 ctype;
+ u8 op;
+ union {
+ struct nix_cn20k_rq_ctx_s rq;
+ struct nix_cn20k_sq_ctx_s sq;
+ struct nix_cn20k_cq_ctx_s cq;
+ struct nix_rsse_s rss;
+ struct nix_rx_mce_s mce;
+ struct nix_bandprof_s prof;
+ };
+ union {
+ struct nix_cn20k_rq_ctx_s rq_mask;
+ struct nix_cn20k_sq_ctx_s sq_mask;
+ struct nix_cn20k_cq_ctx_s cq_mask;
+ struct nix_rsse_s rss_mask;
+ struct nix_rx_mce_s mce_mask;
+ struct nix_bandprof_s prof_mask;
+ };
+};
+
+struct nix_cn20k_aq_enq_rsp {
+ struct mbox_msghdr hdr;
+ union {
+ struct nix_cn20k_rq_ctx_s rq;
+ struct nix_cn20k_sq_ctx_s sq;
+ struct nix_cn20k_cq_ctx_s cq;
+ struct nix_rsse_s rss;
+ struct nix_rx_mce_s mce;
+ struct nix_bandprof_s prof;
+ };
+};
+
/* CN10K NIX AQ enqueue msg */
struct nix_cn10k_aq_enq_req {
struct mbox_msghdr hdr;
@@ -1364,6 +1467,37 @@ struct nix_bandprof_get_hwinfo_rsp {
u32 policer_timeunit;
};
+struct nix_stats_req {
+ struct mbox_msghdr hdr;
+ u8 reset;
+ u16 pcifunc;
+ u64 rsvd;
+};
+
+struct nix_stats_rsp {
+ struct mbox_msghdr hdr;
+ u16 pcifunc;
+ struct {
+ u64 octs;
+ u64 ucast;
+ u64 bcast;
+ u64 mcast;
+ u64 drop;
+ u64 drop_octs;
+ u64 drop_mcast;
+ u64 drop_bcast;
+ u64 err;
+ u64 rsvd[5];
+ } rx;
+ struct {
+ u64 ucast;
+ u64 bcast;
+ u64 mcast;
+ u64 drop;
+ u64 octs;
+ } tx;
+};
+
/* NPC mbox message structs */
#define NPC_MCAM_ENTRY_INVALID 0xFFFF
@@ -1525,6 +1659,41 @@ struct ptp_get_cap_rsp {
u64 cap;
};
+struct get_rep_cnt_rsp {
+ struct mbox_msghdr hdr;
+ u16 rep_cnt;
+ u16 rep_pf_map[64];
+ u64 rsvd;
+};
+
+struct esw_cfg_req {
+ struct mbox_msghdr hdr;
+ u8 ena;
+ u64 rsvd;
+};
+
+struct rep_evt_data {
+ u8 port_state;
+ u8 vf_state;
+ u16 rx_mode;
+ u16 rx_flags;
+ u16 mtu;
+ u8 mac[ETH_ALEN];
+ u64 rsvd[5];
+};
+
+struct rep_event {
+ struct mbox_msghdr hdr;
+ u16 pcifunc;
+#define RVU_EVENT_PORT_STATE BIT_ULL(0)
+#define RVU_EVENT_PFVF_STATE BIT_ULL(1)
+#define RVU_EVENT_MTU_CHANGE BIT_ULL(2)
+#define RVU_EVENT_RX_MODE_CHANGE BIT_ULL(3)
+#define RVU_EVENT_MAC_ADDR_CHANGE BIT_ULL(4)
+ u16 event;
+ struct rep_evt_data evt_data;
+};
+
struct flow_msg {
unsigned char dmac[6];
unsigned char smac[6];
@@ -1563,6 +1732,7 @@ struct flow_msg {
u8 icmp_type;
u8 icmp_code;
__be16 tcp_flags;
+ u16 sq_id;
};
struct npc_install_flow_req {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
index d39d86e694cc..a80c8e7c94f2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
@@ -97,7 +97,7 @@ int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event)
if (pcifunc & RVU_PFVF_FUNC_MASK)
pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
else
- pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
+ pfvf = &mcs->pf[rvu_get_pf(rvu->pdev, pcifunc)];
event->intr_mask &= pfvf->intr_mask;
@@ -123,7 +123,7 @@ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
struct mcs_intr_info *req;
int pf;
- pf = rvu_get_pf(event->pcifunc);
+ pf = rvu_get_pf(rvu->pdev, event->pcifunc);
mutex_lock(&rvu->mbox_lock);
@@ -143,6 +143,8 @@ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
+ otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
+
mutex_unlock(&rvu->mbox_lock);
return 0;
@@ -191,7 +193,7 @@ int rvu_mbox_handler_mcs_intr_cfg(struct rvu *rvu,
if (pcifunc & RVU_PFVF_FUNC_MASK)
pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
else
- pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
+ pfvf = &mcs->pf[rvu_get_pf(rvu->pdev, pcifunc)];
mcs->pf_map[0] = pcifunc;
pfvf->intr_mask = req->intr_mask;
@@ -911,7 +913,7 @@ int rvu_mcs_init(struct rvu *rvu)
/* Initialize the wq for handling mcs interrupts */
INIT_LIST_HEAD(&rvu->mcs_intrq_head);
INIT_WORK(&rvu->mcs_intr_work, mcs_intr_handler_task);
- rvu->mcs_intr_wq = alloc_workqueue("mcs_intr_wq", 0, 0);
+ rvu->mcs_intr_wq = alloc_workqueue("mcs_intr_wq", WQ_PERCPU, 0);
if (!rvu->mcs_intr_wq) {
dev_err(rvu->dev, "mcs alloc workqueue failed\n");
return -ENOMEM;
@@ -925,7 +927,6 @@ void rvu_mcs_exit(struct rvu *rvu)
if (!rvu->mcs_intr_wq)
return;
- flush_workqueue(rvu->mcs_intr_wq);
destroy_workqueue(rvu->mcs_intr_wq);
rvu->mcs_intr_wq = NULL;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index bcc96eed2481..66749b3649c1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -545,8 +545,7 @@ static int ptp_probe(struct pci_dev *pdev,
spin_lock_init(&ptp->ptp_lock);
if (cn10k_ptp_errata(ptp)) {
ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec;
- hrtimer_init(&ptp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ptp->hrtimer.function = ptp_reset_thresh;
+ hrtimer_setup(&ptp->hrtimer, ptp_reset_thresh, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
} else {
ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index 1b34cf9c9703..2e9945446199 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -39,6 +39,8 @@ static struct mac_ops rpm_mac_ops = {
.mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg,
.mac_reset = rpm_lmac_reset,
.mac_stats_reset = rpm_stats_reset,
+ .mac_x2p_reset = rpm_x2p_reset,
+ .mac_enadis_rx = rpm_enadis_rx,
};
static struct mac_ops rpm2_mac_ops = {
@@ -72,6 +74,8 @@ static struct mac_ops rpm2_mac_ops = {
.mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg,
.mac_reset = rpm_lmac_reset,
.mac_stats_reset = rpm_stats_reset,
+ .mac_x2p_reset = rpm_x2p_reset,
+ .mac_enadis_rx = rpm_enadis_rx,
};
bool is_dev_rpm2(void *rpmd)
@@ -467,7 +471,7 @@ u8 rpm_get_lmac_type(void *rpmd, int lmac_id)
int err;
req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_LINK_STS, req);
- err = cgx_fwi_cmd_generic(req, &resp, rpm, 0);
+ err = cgx_fwi_cmd_generic(req, &resp, rpm, lmac_id);
if (!err)
return FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, resp);
return err;
@@ -480,7 +484,7 @@ u32 rpm_get_lmac_fifo_len(void *rpmd, int lmac_id)
u8 num_lmacs;
u32 fifo_len;
- fifo_len = rpm->mac_ops->fifo_len;
+ fifo_len = rpm->fifo_len;
num_lmacs = rpm->mac_ops->get_nr_lmacs(rpm);
switch (num_lmacs) {
@@ -533,9 +537,9 @@ u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id)
*/
max_lmac = (rpm_read(rpm, 0, CGX_CONST) >> 24) & 0xFF;
if (max_lmac > 4)
- fifo_len = rpm->mac_ops->fifo_len / 2;
+ fifo_len = rpm->fifo_len / 2;
else
- fifo_len = rpm->mac_ops->fifo_len;
+ fifo_len = rpm->fifo_len;
if (lmac_id < 4) {
num_lmacs = hweight8(lmac_info & 0xF);
@@ -699,46 +703,51 @@ int rpm_get_fec_stats(void *rpmd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
if (rpm->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE)
return 0;
+ /* latched registers FCFECX_CW_HI/RSFEC_STAT_FAST_DATA_HI_CDC are common
+ * for all counters. Acquire lock to ensure serialized reads
+ */
+ mutex_lock(&rpm->lock);
if (rpm->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
- val_lo = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_VL0_CCW_LO);
- val_hi = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_CW_HI);
+ val_lo = rpm_read(rpm, 0, RPMX_MTI_FCFECX_VL0_CCW_LO(lmac_id));
+ val_hi = rpm_read(rpm, 0, RPMX_MTI_FCFECX_CW_HI(lmac_id));
rsp->fec_corr_blks = (val_hi << 16 | val_lo);
- val_lo = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_VL0_NCCW_LO);
- val_hi = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_CW_HI);
+ val_lo = rpm_read(rpm, 0, RPMX_MTI_FCFECX_VL0_NCCW_LO(lmac_id));
+ val_hi = rpm_read(rpm, 0, RPMX_MTI_FCFECX_CW_HI(lmac_id));
rsp->fec_uncorr_blks = (val_hi << 16 | val_lo);
/* 50G uses 2 Physical serdes lines */
if (rpm->lmac_idmap[lmac_id]->link_info.lmac_type_id ==
LMAC_MODE_50G_R) {
- val_lo = rpm_read(rpm, lmac_id,
- RPMX_MTI_FCFECX_VL1_CCW_LO);
- val_hi = rpm_read(rpm, lmac_id,
- RPMX_MTI_FCFECX_CW_HI);
+ val_lo = rpm_read(rpm, 0,
+ RPMX_MTI_FCFECX_VL1_CCW_LO(lmac_id));
+ val_hi = rpm_read(rpm, 0,
+ RPMX_MTI_FCFECX_CW_HI(lmac_id));
rsp->fec_corr_blks += (val_hi << 16 | val_lo);
- val_lo = rpm_read(rpm, lmac_id,
- RPMX_MTI_FCFECX_VL1_NCCW_LO);
- val_hi = rpm_read(rpm, lmac_id,
- RPMX_MTI_FCFECX_CW_HI);
+ val_lo = rpm_read(rpm, 0,
+ RPMX_MTI_FCFECX_VL1_NCCW_LO(lmac_id));
+ val_hi = rpm_read(rpm, 0,
+ RPMX_MTI_FCFECX_CW_HI(lmac_id));
rsp->fec_uncorr_blks += (val_hi << 16 | val_lo);
}
} else {
/* enable RS-FEC capture */
- cfg = rpm_read(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL);
+ cfg = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_STATN_CONTROL);
cfg |= RPMX_RSFEC_RX_CAPTURE | BIT(lmac_id);
- rpm_write(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL, cfg);
+ rpm_write(rpm, 0, RPMX_MTI_RSFEC_STAT_STATN_CONTROL, cfg);
val_lo = rpm_read(rpm, 0,
RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_2);
- val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC);
+ val_hi = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_FAST_DATA_HI_CDC);
rsp->fec_corr_blks = (val_hi << 32 | val_lo);
val_lo = rpm_read(rpm, 0,
RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_3);
- val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC);
+ val_hi = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_FAST_DATA_HI_CDC);
rsp->fec_uncorr_blks = (val_hi << 32 | val_lo);
}
+ mutex_unlock(&rpm->lock);
return 0;
}
@@ -763,3 +772,41 @@ int rpm_lmac_reset(void *rpmd, int lmac_id, u8 pf_req_flr)
return 0;
}
+
+void rpm_x2p_reset(void *rpmd, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ int lmac_id;
+ u64 cfg;
+
+ if (enable) {
+ for_each_set_bit(lmac_id, &rpm->lmac_bmap, rpm->max_lmac_per_mac)
+ rpm->mac_ops->mac_enadis_rx(rpm, lmac_id, false);
+
+ usleep_range(1000, 2000);
+
+ cfg = rpm_read(rpm, 0, RPMX_CMR_GLOBAL_CFG);
+ rpm_write(rpm, 0, RPMX_CMR_GLOBAL_CFG, cfg | RPM_NIX0_RESET);
+ } else {
+ cfg = rpm_read(rpm, 0, RPMX_CMR_GLOBAL_CFG);
+ cfg &= ~RPM_NIX0_RESET;
+ rpm_write(rpm, 0, RPMX_CMR_GLOBAL_CFG, cfg);
+ }
+}
+
+int rpm_enadis_rx(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ if (enable)
+ cfg |= RPM_RX_EN;
+ else
+ cfg &= ~RPM_RX_EN;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
index 34b11deb0f3c..b8d3972e096a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -17,6 +17,8 @@
/* Registers */
#define RPMX_CMRX_CFG 0x00
+#define RPMX_CMR_GLOBAL_CFG 0x08
+#define RPM_NIX0_RESET BIT_ULL(3)
#define RPMX_RX_TS_PREPEND BIT_ULL(22)
#define RPMX_TX_PTP_1S_SUPPORT BIT_ULL(17)
#define RPMX_CMRX_RX_ID_MAP 0x80
@@ -84,16 +86,18 @@
/* FEC stats */
#define RPMX_MTI_STAT_STATN_CONTROL 0x10018
#define RPMX_MTI_STAT_DATA_HI_CDC 0x10038
-#define RPMX_RSFEC_RX_CAPTURE BIT_ULL(27)
+#define RPMX_RSFEC_RX_CAPTURE BIT_ULL(28)
#define RPMX_CMD_CLEAR_RX BIT_ULL(30)
#define RPMX_CMD_CLEAR_TX BIT_ULL(31)
+#define RPMX_MTI_RSFEC_STAT_STATN_CONTROL 0x40018
+#define RPMX_MTI_RSFEC_STAT_FAST_DATA_HI_CDC 0x40000
#define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_2 0x40050
#define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_3 0x40058
-#define RPMX_MTI_FCFECX_VL0_CCW_LO 0x38618
-#define RPMX_MTI_FCFECX_VL0_NCCW_LO 0x38620
-#define RPMX_MTI_FCFECX_VL1_CCW_LO 0x38628
-#define RPMX_MTI_FCFECX_VL1_NCCW_LO 0x38630
-#define RPMX_MTI_FCFECX_CW_HI 0x38638
+#define RPMX_MTI_FCFECX_VL0_CCW_LO(a) (0x38618 + ((a) * 0x40))
+#define RPMX_MTI_FCFECX_VL0_NCCW_LO(a) (0x38620 + ((a) * 0x40))
+#define RPMX_MTI_FCFECX_VL1_CCW_LO(a) (0x38628 + ((a) * 0x40))
+#define RPMX_MTI_FCFECX_VL1_NCCW_LO(a) (0x38630 + ((a) * 0x40))
+#define RPMX_MTI_FCFECX_CW_HI(a) (0x38638 + ((a) * 0x40))
/* CN10KB CSR Declaration */
#define RPM2_CMRX_SW_INT 0x1b0
@@ -137,4 +141,6 @@ bool is_dev_rpm2(void *rpmd);
int rpm_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp);
int rpm_lmac_reset(void *rpmd, int lmac_id, u8 pf_req_flr);
int rpm_stats_reset(void *rpmd, int lmac_id);
+void rpm_x2p_reset(void *rpmd, bool enable);
+int rpm_enadis_rx(void *rpmd, int lmac_id, bool enable);
#endif /* RPM_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 1a97fb9032fa..2d78e08f985f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -20,6 +20,8 @@
#include "rvu_trace.h"
#include "rvu_npc_hash.h"
+#include "cn20k/reg.h"
+#include "cn20k/api.h"
#define DRV_NAME "rvu_af"
#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
@@ -34,10 +36,8 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
int type, int num,
void (mbox_handler)(struct work_struct *),
void (mbox_up_handler)(struct work_struct *));
-enum {
- TYPE_AFVF,
- TYPE_AFPF,
-};
+static irqreturn_t rvu_mbox_pf_intr_handler(int irq, void *rvu_irq);
+static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq);
/* Supported devices */
static const struct pci_device_id rvu_id_table[] = {
@@ -294,7 +294,7 @@ int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
devnum = rvu_get_hwvf(rvu, pcifunc);
} else {
is_pf = true;
- devnum = rvu_get_pf(pcifunc);
+ devnum = rvu_get_pf(rvu->pdev, pcifunc);
}
/* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or
@@ -359,7 +359,7 @@ static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
devnum = rvu_get_hwvf(rvu, pcifunc);
} else {
is_pf = true;
- devnum = rvu_get_pf(pcifunc);
+ devnum = rvu_get_pf(rvu->pdev, pcifunc);
}
block->fn_map[lf] = attach ? pcifunc : 0;
@@ -400,11 +400,6 @@ static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
}
-inline int rvu_get_pf(u16 pcifunc)
-{
- return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
-}
-
void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
{
u64 cfg;
@@ -422,7 +417,7 @@ int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
int pf, func;
u64 cfg;
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
func = pcifunc & RVU_PFVF_FUNC_MASK;
/* Get first HWVF attached to this PF */
@@ -437,7 +432,7 @@ struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
if (pcifunc & RVU_PFVF_FUNC_MASK)
return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
else
- return &rvu->pf[rvu_get_pf(pcifunc)];
+ return &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)];
}
static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
@@ -445,7 +440,7 @@ static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
int pf, vf, nvfs;
u64 cfg;
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
if (pf >= rvu->hw->total_pfs)
return false;
@@ -760,6 +755,11 @@ static void rvu_free_hw_resources(struct rvu *rvu)
rvu_reset_msix(rvu);
mutex_destroy(&rvu->rsrc_lock);
+
+ /* Free the QINT/CINT memory */
+ pfvf = &rvu->pf[RVU_AFPF];
+ qmem_free(rvu->dev, pfvf->nix_qints_ctx);
+ qmem_free(rvu->dev, pfvf->cq_ints_ctx);
}
static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
@@ -1162,6 +1162,10 @@ cpt:
}
rvu_program_channels(rvu);
+ cgx_start_linkup(rvu);
+
+ rvu_block_bcast_xon(rvu, BLKADDR_NIX0);
+ rvu_block_bcast_xon(rvu, BLKADDR_NIX1);
err = rvu_mcs_init(rvu);
if (err) {
@@ -1392,8 +1396,6 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
if (blkaddr < 0)
return;
- if (blktype == BLKTYPE_NIX)
- rvu_nix_reset_mac(pfvf, pcifunc);
block = &hw->block[blkaddr];
@@ -1406,6 +1408,10 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
if (lf < 0) /* This should never happen */
continue;
+ if (blktype == BLKTYPE_NIX) {
+ rvu_nix_reset_mac(pfvf, pcifunc);
+ rvu_npc_clear_ucast_entry(rvu, pcifunc, lf);
+ }
/* Disable the LF */
rvu_write64(rvu, blkaddr, block->lfcfg_reg |
(lf << block->lfshift), 0x00ULL);
@@ -1484,7 +1490,7 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
/* All CGX mapped PFs are set with assigned NIX block during init */
- if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
+ if (is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) {
blkaddr = pf->nix_blkaddr;
} else if (is_lbk_vf(rvu, pcifunc)) {
vf = pcifunc - 1;
@@ -1498,7 +1504,7 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
}
/* if SDP1 then the blkaddr is NIX1 */
- if (is_sdp_pfvf(pcifunc) && pf->sdp_info->node_id == 1)
+ if (is_sdp_pfvf(rvu, pcifunc) && pf->sdp_info->node_id == 1)
blkaddr = BLKADDR_NIX1;
switch (blkaddr) {
@@ -2003,7 +2009,7 @@ int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
vf = pcifunc & RVU_PFVF_FUNC_MASK;
cfg = rvu_read64(rvu, BLKADDR_RVUM,
- RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc)));
+ RVU_PRIV_PFX_CFG(rvu_get_pf(rvu->pdev, pcifunc)));
numvfs = (cfg >> 12) & 0xFF;
if (vf && vf <= numvfs)
@@ -2030,6 +2036,9 @@ int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
rsp->nix_shaping = hw->cap.nix_shaping;
rsp->npc_hash_extract = hw->cap.npc_hash_extract;
+ if (rvu->mcs_blk_cnt)
+ rsp->hw_caps = HW_CAP_MACSEC;
+
return 0;
}
@@ -2172,7 +2181,7 @@ static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
if (rsp && err) \
rsp->hdr.rc = err; \
\
- trace_otx2_msg_process(mbox->pdev, _id, err); \
+ trace_otx2_msg_process(mbox->pdev, _id, err, req->pcifunc); \
return rsp ? err : -ENOMEM; \
}
MBOX_MESSAGES
@@ -2217,15 +2226,30 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type, bool poll)
offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+ if (req_hdr->sig && !(is_rvu_otx2(rvu) || is_cn20k(rvu->pdev))) {
+ req_hdr->opt_msg = mw->mbox_wrk[devid].num_msgs;
+ rvu_write64(rvu, BLKADDR_NIX0, RVU_AF_BAR2_SEL,
+ RVU_AF_BAR2_PFID);
+ if (type == TYPE_AFPF)
+ rvu_write64(rvu, BLKADDR_NIX0,
+ AF_BAR2_ALIASX(0, NIX_CINTX_INT_W1S(devid)),
+ 0x1);
+ else
+ rvu_write64(rvu, BLKADDR_NIX0,
+ AF_BAR2_ALIASX(0, NIX_QINTX_CNT(devid)),
+ 0x1);
+ usleep_range(5000, 6000);
+ goto done;
+ }
+
for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
msg = mdev->mbase + offset;
/* Set which PF/VF sent this message based on mbox IRQ */
switch (type) {
case TYPE_AFPF:
- msg->pcifunc &=
- ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
- msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT);
+ msg->pcifunc &= rvu_pcifunc_pf_mask(rvu->pdev);
+ msg->pcifunc |= rvu_make_pcifunc(rvu->pdev, devid, 0);
break;
case TYPE_AFVF:
msg->pcifunc &=
@@ -2243,16 +2267,17 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type, bool poll)
if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
err, otx2_mbox_id2name(msg->id),
- msg->id, rvu_get_pf(msg->pcifunc),
+ msg->id, rvu_get_pf(rvu->pdev, msg->pcifunc),
(msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
else
dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
err, otx2_mbox_id2name(msg->id),
msg->id, devid);
}
+done:
mw->mbox_wrk[devid].num_msgs = 0;
- if (poll)
+ if (!is_cn20k(mbox->pdev) && poll)
otx2_mbox_wait_for_zero(mbox, devid);
/* Send mbox responses to VF/PF */
@@ -2358,13 +2383,21 @@ static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
__rvu_mbox_up_handler(mwork, TYPE_AFVF);
}
-static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
+static int rvu_get_mbox_regions(struct rvu *rvu, void __iomem **mbox_addr,
int num, int type, unsigned long *pf_bmap)
{
struct rvu_hwinfo *hw = rvu->hw;
int region;
u64 bar4;
+ /* For cn20k platform AF mailbox region is allocated by software
+ * and the corresponding IOVA is programmed in hardware unlike earlier
+ * silicons where software uses the hardware region after ioremap.
+ */
+ if (is_cn20k(rvu->pdev))
+ return cn20k_rvu_get_mbox_regions(rvu, (void *)mbox_addr,
+ num, type, pf_bmap);
+
/* For cn10k platform VF mailbox regions of a PF follows after the
* PF <-> AF mailbox region. Whereas for Octeontx2 it is read from
* RVU_PF_VF_BAR4_ADDR register.
@@ -2383,7 +2416,7 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
bar4 += region * MBOX_SIZE;
}
- mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
+ mbox_addr[region] = ioremap_wc(bar4, MBOX_SIZE);
if (!mbox_addr[region])
goto error;
}
@@ -2406,7 +2439,7 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
RVU_AF_PF_BAR4_ADDR);
bar4 += region * MBOX_SIZE;
}
- mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
+ mbox_addr[region] = ioremap_wc(bar4, MBOX_SIZE);
if (!mbox_addr[region])
goto error;
}
@@ -2414,20 +2447,26 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
error:
while (region--)
- iounmap((void __iomem *)mbox_addr[region]);
+ iounmap(mbox_addr[region]);
return -ENOMEM;
}
+static struct mbox_ops rvu_mbox_ops = {
+ .pf_intr_handler = rvu_mbox_pf_intr_handler,
+ .afvf_intr_handler = rvu_mbox_intr_handler,
+};
+
static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
int type, int num,
void (mbox_handler)(struct work_struct *),
void (mbox_up_handler)(struct work_struct *))
{
- int err = -EINVAL, i, dir, dir_up;
+ void __iomem **mbox_regions;
+ struct ng_rvu *ng_rvu_mbox;
+ int err, i, dir, dir_up;
void __iomem *reg_base;
struct rvu_work *mwork;
unsigned long *pf_bmap;
- void **mbox_regions;
const char *name;
u64 cfg;
@@ -2435,6 +2474,12 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
if (!pf_bmap)
return -ENOMEM;
+ ng_rvu_mbox = kzalloc(sizeof(*ng_rvu_mbox), GFP_KERNEL);
+ if (!ng_rvu_mbox) {
+ err = -ENOMEM;
+ goto free_bitmap;
+ }
+
/* RVU VFs */
if (type == TYPE_AFVF)
bitmap_set(pf_bmap, 0, num);
@@ -2448,12 +2493,20 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
}
}
+ rvu->ng_rvu = ng_rvu_mbox;
+
+ rvu->ng_rvu->rvu_mbox_ops = &rvu_mbox_ops;
+
+ err = cn20k_rvu_mbox_init(rvu, type, num);
+ if (err)
+ goto free_mem;
+
mutex_init(&rvu->mbox_lock);
- mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
+ mbox_regions = kcalloc(num, sizeof(void __iomem *), GFP_KERNEL);
if (!mbox_regions) {
err = -ENOMEM;
- goto free_bitmap;
+ goto free_qmem;
}
switch (type) {
@@ -2476,11 +2529,12 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
goto free_regions;
break;
default:
+ err = -EINVAL;
goto free_regions;
}
mw->mbox_wq = alloc_workqueue("%s",
- WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
+ WQ_HIGHPRI | WQ_MEM_RECLAIM,
num, name);
if (!mw->mbox_wq) {
err = -ENOMEM;
@@ -2523,7 +2577,11 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
mwork->rvu = rvu;
INIT_WORK(&mwork->work, mbox_up_handler);
}
- goto free_regions;
+
+ kfree(mbox_regions);
+ bitmap_free(pf_bmap);
+
+ return 0;
exit:
destroy_workqueue(mw->mbox_wq);
@@ -2532,6 +2590,10 @@ unmap_regions:
iounmap((void __iomem *)mbox_regions[num]);
free_regions:
kfree(mbox_regions);
+free_qmem:
+ cn20k_free_mbox_memory(rvu);
+free_mem:
+ kfree(rvu->ng_rvu);
free_bitmap:
bitmap_free(pf_bmap);
return err;
@@ -2558,8 +2620,8 @@ static void rvu_mbox_destroy(struct mbox_wq_info *mw)
otx2_mbox_destroy(&mw->mbox_up);
}
-static void rvu_queue_work(struct mbox_wq_info *mw, int first,
- int mdevs, u64 intr)
+void rvu_queue_work(struct mbox_wq_info *mw, int first,
+ int mdevs, u64 intr)
{
struct otx2_mbox_dev *mdev;
struct otx2_mbox *mbox;
@@ -2633,7 +2695,7 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
- vfs -= 64;
+ vfs = 64;
}
intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
@@ -2650,6 +2712,11 @@ static void rvu_enable_mbox_intr(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
+ if (is_cn20k(rvu->pdev)) {
+ cn20k_rvu_enable_mbox_intr(rvu);
+ return;
+ }
+
/* Clear spurious irqs, if any */
rvu_write64(rvu, BLKADDR_RVUM,
RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
@@ -2767,7 +2834,7 @@ static void rvu_flr_handler(struct work_struct *work)
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
numvfs = (cfg >> 12) & 0xFF;
- pcifunc = pf << RVU_PFVF_PF_SHIFT;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
for (vf = 0; vf < numvfs; vf++)
__rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
@@ -2903,9 +2970,12 @@ static void rvu_unregister_interrupts(struct rvu *rvu)
rvu_cpt_unregister_interrupts(rvu);
- /* Disable the Mbox interrupt */
- rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
- INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+ if (!is_cn20k(rvu->pdev))
+ /* Disable the Mbox interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+ else
+ cn20k_rvu_unregister_interrupts(rvu);
/* Disable the PF FLR interrupt */
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
@@ -2938,6 +3008,10 @@ static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu)
* VF interrupts can be handled. Offset equal to zero means
* that PF vectors are not configured and overlapping AF vectors.
*/
+ if (is_cn20k(rvu->pdev))
+ return (pfvf->msix.max >= RVU_AF_CN20K_INT_VEC_CNT +
+ RVU_MBOX_PF_INT_VEC_CNT) && offset;
+
return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) &&
offset;
}
@@ -2968,18 +3042,30 @@ static int rvu_register_interrupts(struct rvu *rvu)
return ret;
}
- /* Register mailbox interrupt handler */
- sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
- ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
- rvu_mbox_pf_intr_handler, 0,
- &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
- if (ret) {
- dev_err(rvu->dev,
- "RVUAF: IRQ registration failed for mbox irq\n");
- goto fail;
- }
+ if (!is_cn20k(rvu->pdev)) {
+ /* Register mailbox interrupt handler */
+ sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE],
+ "RVUAF Mbox");
+ ret = request_irq(pci_irq_vector
+ (rvu->pdev, RVU_AF_INT_VEC_MBOX),
+ rvu->ng_rvu->rvu_mbox_ops->pf_intr_handler, 0,
+ &rvu->irq_name[RVU_AF_INT_VEC_MBOX *
+ NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for mbox\n");
+ goto fail;
+ }
- rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
+ rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
+ } else {
+ ret = cn20k_register_afpf_mbox_intr(rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for mbox\n");
+ goto fail;
+ }
+ }
/* Enable mailbox interrupts from all PFs */
rvu_enable_mbox_intr(rvu);
@@ -3034,34 +3120,40 @@ static int rvu_register_interrupts(struct rvu *rvu)
/* Get PF MSIX vectors offset. */
pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM,
RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
+ if (!is_cn20k(rvu->pdev)) {
+ /* Register MBOX0 interrupt. */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu->ng_rvu->rvu_mbox_ops->afvf_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE],
+ rvu);
+ if (ret)
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for Mbox0\n");
- /* Register MBOX0 interrupt. */
- offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
- sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
- ret = request_irq(pci_irq_vector(rvu->pdev, offset),
- rvu_mbox_intr_handler, 0,
- &rvu->irq_name[offset * NAME_SIZE],
- rvu);
- if (ret)
- dev_err(rvu->dev,
- "RVUAF: IRQ registration failed for Mbox0\n");
-
- rvu->irq_allocated[offset] = true;
+ rvu->irq_allocated[offset] = true;
- /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
- * simply increment current offset by 1.
- */
- offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
- sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
- ret = request_irq(pci_irq_vector(rvu->pdev, offset),
- rvu_mbox_intr_handler, 0,
- &rvu->irq_name[offset * NAME_SIZE],
- rvu);
- if (ret)
- dev_err(rvu->dev,
- "RVUAF: IRQ registration failed for Mbox1\n");
+ /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
+ * simply increment current offset by 1.
+ */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu->ng_rvu->rvu_mbox_ops->afvf_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE],
+ rvu);
+ if (ret)
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for Mbox1\n");
- rvu->irq_allocated[offset] = true;
+ rvu->irq_allocated[offset] = true;
+ } else {
+ ret = cn20k_register_afvf_mbox_intr(rvu, pf_vec_start);
+ if (ret)
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for Mbox\n");
+ }
/* Register FLR interrupt handler for AF's VFs */
offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0;
@@ -3172,6 +3264,9 @@ static void rvu_disable_afvf_intr(struct rvu *rvu)
{
int vfs = rvu->vfs;
+ if (is_cn20k(rvu->pdev))
+ return cn20k_rvu_disable_afvf_intr(rvu, vfs);
+
rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
@@ -3188,6 +3283,9 @@ static void rvu_enable_afvf_intr(struct rvu *rvu)
{
int vfs = rvu->vfs;
+ if (is_cn20k(rvu->pdev))
+ return cn20k_rvu_enable_afvf_intr(rvu, vfs);
+
/* Clear any pending interrupts and enable AF VF interrupts for
* the first 64 VFs.
*/
@@ -3432,6 +3530,9 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ptp_start(rvu, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate,
rvu->fwdata->ptp_ext_tstamp);
+ /* Alloc CINT and QINT memory */
+ rvu_alloc_cint_qint_mem(rvu, &rvu->pf[RVU_AFPF], BLKADDR_NIX0,
+ (rvu->hw->block[BLKADDR_NIX0].lf.max));
return 0;
err_dl:
rvu_unregister_dl(rvu);
@@ -3483,6 +3584,9 @@ static void rvu_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
devm_kfree(&pdev->dev, rvu->hw);
+ if (is_cn20k(rvu->pdev))
+ cn20k_free_mbox_memory(rvu);
+ kfree(rvu->ng_rvu);
devm_kfree(&pdev->dev, rvu);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 5016ba82e142..e85dac2c806d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -10,6 +10,7 @@
#include <linux/pci.h>
#include <net/devlink.h>
+#include <linux/soc/marvell/silicons.h>
#include "rvu_struct.h"
#include "rvu_devlink.h"
@@ -30,6 +31,8 @@
#define PCI_SUBSYS_DEVID_CNF10K_A 0xBA00
#define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00
#define PCI_SUBSYS_DEVID_CN10K_B 0xBD00
+#define PCI_SUBSYS_DEVID_CN20KA 0xC220
+#define PCI_SUBSYS_DEVID_CNF20KA 0xC320
/* PCI BAR nos */
#define PCI_AF_REG_BAR_NUM 0
@@ -41,12 +44,39 @@
#define MAX_CPT_BLKS 2
/* PF_FUNC */
-#define RVU_PFVF_PF_SHIFT 10
-#define RVU_PFVF_PF_MASK 0x3F
-#define RVU_PFVF_FUNC_SHIFT 0
-#define RVU_PFVF_FUNC_MASK 0x3FF
+#define RVU_OTX2_PFVF_PF_SHIFT 10
+#define RVU_OTX2_PFVF_PF_MASK 0x3F
+#define RVU_PFVF_FUNC_SHIFT 0
+#define RVU_PFVF_FUNC_MASK 0x3FF
+#define RVU_CN20K_PFVF_PF_SHIFT 9
+#define RVU_CN20K_PFVF_PF_MASK 0x7F
+
+static inline u16 rvu_make_pcifunc(struct pci_dev *pdev, int pf, int func)
+{
+ if (is_cn20k(pdev))
+ return ((pf & RVU_CN20K_PFVF_PF_MASK) <<
+ RVU_CN20K_PFVF_PF_SHIFT) |
+ ((func & RVU_PFVF_FUNC_MASK) <<
+ RVU_PFVF_FUNC_SHIFT);
+ else
+ return ((pf & RVU_OTX2_PFVF_PF_MASK) <<
+ RVU_OTX2_PFVF_PF_SHIFT) |
+ ((func & RVU_PFVF_FUNC_MASK) <<
+ RVU_PFVF_FUNC_SHIFT);
+}
+
+static inline int rvu_pcifunc_pf_mask(struct pci_dev *pdev)
+{
+ if (is_cn20k(pdev))
+ return ~(RVU_CN20K_PFVF_PF_MASK << RVU_CN20K_PFVF_PF_SHIFT);
+ else
+ return ~(RVU_OTX2_PFVF_PF_MASK << RVU_OTX2_PFVF_PF_SHIFT);
+}
+
+#define RVU_AFPF 25
#ifdef CONFIG_DEBUG_FS
+
struct dump_ctx {
int lf;
int id;
@@ -444,6 +474,23 @@ struct mbox_wq_info {
struct workqueue_struct *mbox_wq;
};
+struct rvu_irq_data {
+ u64 intr_status;
+ void (*rvu_queue_work_hdlr)(struct mbox_wq_info *mw, int first,
+ int mdevs, u64 intr);
+ void (*afvf_queue_work_hdlr)(struct mbox_wq_info *mw, int first,
+ int mdevs, u64 intr);
+ struct rvu *rvu;
+ int vec_num;
+ int start;
+ int mdevs;
+};
+
+struct mbox_ops {
+ irqreturn_t (*pf_intr_handler)(int irq, void *rvu_irq);
+ irqreturn_t (*afvf_intr_handler)(int irq, void *rvu_irq);
+};
+
struct channel_fwdata {
struct sdp_node_info info;
u8 valid;
@@ -451,6 +498,14 @@ struct channel_fwdata {
u8 reserved[RVU_CHANL_INFO_RESERVED];
};
+struct altaf_intr_notify {
+ unsigned long flr_pf_bmap[2];
+ unsigned long flr_vf_bmap[2];
+ unsigned long gint_paddr;
+ unsigned long gint_iova_addr;
+ unsigned long reserved[6];
+};
+
struct rvu_fwdata {
#define RVU_FWDATA_HEADER_MAGIC 0xCFDA /* Custom Firmware Data*/
#define RVU_FWDATA_VERSION 0x0001
@@ -470,7 +525,8 @@ struct rvu_fwdata {
u32 ptp_ext_clk_rate;
u32 ptp_ext_tstamp;
struct channel_fwdata channel_data;
-#define FWDATA_RESERVED_MEM 958
+ struct altaf_intr_notify altaf_intr_info;
+#define FWDATA_RESERVED_MEM 946
u64 reserved[FWDATA_RESERVED_MEM];
#define CGX_MAX 9
#define CGX_LMACS_MAX 4
@@ -513,6 +569,11 @@ struct rvu_switch {
u16 start_entry;
};
+struct rep_evtq_ent {
+ struct list_head node;
+ struct rep_event event;
+};
+
struct rvu {
void __iomem *afreg_base;
void __iomem *pfreg_base;
@@ -525,6 +586,7 @@ struct rvu {
struct mutex alias_lock; /* Serialize bar2 alias access */
int vfs; /* Number of VFs attached to RVU */
u16 vf_devid; /* VF devices id */
+ bool def_rule_cntr_en;
int nix_blkaddr[MAX_NIX_BLKS];
/* Mbox */
@@ -594,6 +656,18 @@ struct rvu {
spinlock_t cpt_intr_lock;
struct mutex mbox_lock; /* Serialize mbox up and down msgs */
+ u16 rep_pcifunc;
+ bool altaf_ready;
+ int rep_cnt;
+ u16 *rep2pfvf_map;
+ u8 rep_mode;
+ struct work_struct rep_evt_work;
+ struct workqueue_struct *rep_evt_wq;
+ struct list_head rep_evtq_head;
+ /* Representor event lock */
+ spinlock_t rep_evtq_lock;
+
+ struct ng_rvu *ng_rvu;
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -719,6 +793,20 @@ static inline bool is_cn10kb(struct rvu *rvu)
return false;
}
+static inline bool is_cgx_mapped_to_nix(unsigned short id, u8 cgx_id)
+{
+ /* On CNF10KA and CNF10KB silicons only two CGX blocks are connected
+ * to NIX.
+ */
+ if (id == PCI_SUBSYS_DEVID_CNF10K_A || id == PCI_SUBSYS_DEVID_CNF10K_B)
+ return cgx_id <= 1;
+
+ return !(cgx_id && !(id == PCI_SUBSYS_DEVID_96XX ||
+ id == PCI_SUBSYS_DEVID_98XX ||
+ id == PCI_SUBSYS_DEVID_CN10K_A ||
+ id == PCI_SUBSYS_DEVID_CN10K_B));
+}
+
static inline bool is_rvu_npc_hash_extract_en(struct rvu *rvu)
{
u64 npc_const3;
@@ -819,7 +907,6 @@ int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start);
bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr);
-int rvu_get_pf(u16 pcifunc);
struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr);
@@ -848,15 +935,33 @@ void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq);
/* SDP APIs */
int rvu_sdp_init(struct rvu *rvu);
-bool is_sdp_pfvf(u16 pcifunc);
-bool is_sdp_pf(u16 pcifunc);
+bool is_sdp_pfvf(struct rvu *rvu, u16 pcifunc);
+bool is_sdp_pf(struct rvu *rvu, u16 pcifunc);
bool is_sdp_vf(struct rvu *rvu, u16 pcifunc);
+static inline bool is_rep_dev(struct rvu *rvu, u16 pcifunc)
+{
+ if (rvu->rep_pcifunc && rvu->rep_pcifunc == pcifunc)
+ return true;
+
+ return false;
+}
+
+static inline int rvu_get_pf(struct pci_dev *pdev, u16 pcifunc)
+{
+ if (is_cn20k(pdev))
+ return (pcifunc >> RVU_CN20K_PFVF_PF_SHIFT) &
+ RVU_CN20K_PFVF_PF_MASK;
+ else
+ return (pcifunc >> RVU_OTX2_PFVF_PF_SHIFT) &
+ RVU_OTX2_PFVF_PF_MASK;
+}
+
/* CGX APIs */
static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf)
{
return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs) &&
- !is_sdp_pf(pf << RVU_PFVF_PF_SHIFT);
+ !is_sdp_pf(rvu, rvu_make_pcifunc(rvu->pdev, pf, 0));
}
static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
@@ -868,7 +973,7 @@ static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
static inline bool is_cgx_vf(struct rvu *rvu, u16 pcifunc)
{
return ((pcifunc & RVU_PFVF_FUNC_MASK) &&
- is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)));
+ is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc)));
}
#define M(_name, _id, fn_name, req, rsp) \
@@ -876,6 +981,10 @@ int rvu_mbox_handler_ ## fn_name(struct rvu *, struct req *, struct rsp *);
MBOX_MESSAGES
#undef M
+/* Mbox APIs */
+void rvu_queue_work(struct mbox_wq_info *mw, int first,
+ int mdevs, u64 intr);
+
int rvu_cgx_init(struct rvu *rvu);
int rvu_cgx_exit(struct rvu *rvu);
void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu);
@@ -930,6 +1039,11 @@ int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc,
int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
u32 mcast_grp_idx, u16 mcam_index);
void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc);
+int rvu_alloc_cint_qint_mem(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ int blkaddr, int nixlf);
+void rvu_block_bcast_xon(struct rvu *rvu, int blkaddr);
+int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp);
/* NPC APIs */
void rvu_npc_freemem(struct rvu *rvu);
@@ -944,8 +1058,6 @@ void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
bool enable);
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan);
-void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
- bool enable);
void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
u64 chan);
void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
@@ -960,13 +1072,19 @@ void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index);
-
+void __rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc,
+ struct rvu_npc_mcam_rule *rule);
+void __rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
+ struct rvu_npc_mcam_rule *rule,
+ struct npc_install_flow_rsp *rsp);
void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
int blkaddr, int *alloc_cnt,
int *enable_cnt);
void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
int blkaddr, int *alloc_cnt,
int *enable_cnt);
+void rvu_npc_clear_ucast_entry(struct rvu *rvu, int pcifunc, int nixlf);
+
bool is_npc_intf_tx(u8 intf);
bool is_npc_intf_rx(u8 intf);
bool is_npc_interface_valid(struct rvu *rvu, u8 intf);
@@ -985,6 +1103,7 @@ void npc_set_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 src, struct mcam_entry *entry,
u8 *intf, u8 *ena);
+int npc_config_cntr_default_entries(struct rvu *rvu, bool enable);
bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc);
bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
u32 rvu_cgx_get_fifolen(struct rvu *rvu);
@@ -997,6 +1116,7 @@ int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_
int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause);
void rvu_mac_reset(struct rvu *rvu, u16 pcifunc);
u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac);
+void cgx_start_linkup(struct rvu *rvu);
int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
int type);
bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr,
@@ -1045,7 +1165,8 @@ int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr);
/* RVU Switch */
void rvu_switch_enable(struct rvu *rvu);
void rvu_switch_disable(struct rvu *rvu);
-void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc);
+void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc, bool ena);
+void rvu_switch_enable_lbk_link(struct rvu *rvu, u16 pcifunc, bool ena);
int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
u64 pkind, u8 var_len_off, u8 var_len_off_mask,
@@ -1058,4 +1179,9 @@ int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc);
void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena);
void rvu_mcs_exit(struct rvu *rvu);
+/* Representor APIs */
+int rvu_rep_pf_init(struct rvu *rvu);
+int rvu_rep_install_mcam_rules(struct rvu *rvu);
+void rvu_rep_update_rules(struct rvu *rvu, u16 pcifunc, bool ena);
+int rvu_rep_notify_pfvf_state(struct rvu *rvu, u16 pcifunc, bool enable);
#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 266ecbc1b97a..3abd750a4bd7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -34,7 +34,7 @@ static struct _req_type __maybe_unused \
return NULL; \
req->hdr.sig = OTX2_MBOX_REQ_SIG; \
req->hdr.id = _id; \
- trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \
+ trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req), 0); \
return req; \
}
@@ -272,6 +272,8 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pfid);
+ otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
+
mutex_unlock(&rvu->mbox_lock);
} while (pfmap);
}
@@ -313,7 +315,7 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu)
spin_lock_init(&rvu->cgx_evq_lock);
INIT_LIST_HEAD(&rvu->cgx_evq_head);
INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
- rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
+ rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", WQ_PERCPU, 0);
if (!rvu->cgx_evh_wq) {
dev_err(rvu->dev, "alloc workqueue failed");
return -ENOMEM;
@@ -349,6 +351,7 @@ static void rvu_cgx_wq_destroy(struct rvu *rvu)
int rvu_cgx_init(struct rvu *rvu)
{
+ struct mac_ops *mac_ops;
int cgx, err;
void *cgxd;
@@ -375,6 +378,15 @@ int rvu_cgx_init(struct rvu *rvu)
if (err)
return err;
+ /* Clear X2P reset on all MAC blocks */
+ for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) {
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ continue;
+ mac_ops = get_mac_ops(cgxd);
+ mac_ops->mac_x2p_reset(cgxd, false);
+ }
+
/* Register for CGX events */
err = cgx_lmac_event_handler_init(rvu);
if (err)
@@ -382,10 +394,26 @@ int rvu_cgx_init(struct rvu *rvu)
mutex_init(&rvu->cgx_cfg_lock);
- /* Ensure event handler registration is completed, before
- * we turn on the links
- */
- mb();
+ return 0;
+}
+
+void cgx_start_linkup(struct rvu *rvu)
+{
+ unsigned long lmac_bmap;
+ struct mac_ops *mac_ops;
+ int cgx, lmac, err;
+ void *cgxd;
+
+ /* Enable receive on all LMACS */
+ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ continue;
+ mac_ops = get_mac_ops(cgxd);
+ lmac_bmap = cgx_get_lmac_bmap(cgxd);
+ for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx)
+ mac_ops->mac_enadis_rx(cgxd, lmac, true);
+ }
/* Do link up for all CGX ports */
for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
@@ -398,8 +426,6 @@ int rvu_cgx_init(struct rvu *rvu)
"Link up process failed to start on cgx %d\n",
cgx);
}
-
- return 0;
}
int rvu_cgx_exit(struct rvu *rvu)
@@ -431,7 +457,7 @@ int rvu_cgx_exit(struct rvu *rvu)
inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
{
if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
- !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
+ !is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc)))
return false;
return true;
}
@@ -458,7 +484,7 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
void *cgxd;
@@ -475,7 +501,7 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
void *cgxd;
@@ -500,7 +526,7 @@ int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
int i = 0, lmac_count = 0;
struct mac_ops *mac_ops;
u8 max_dmac_filters;
@@ -551,7 +577,7 @@ int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req,
void *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
struct mac_ops *mac_ops;
int stat = 0, err = 0;
u64 tx_stat, rx_stat;
@@ -607,7 +633,7 @@ int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req,
int rvu_mbox_handler_cgx_stats_rst(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
struct rvu_pfvf *parent_pf;
struct mac_ops *mac_ops;
u8 cgx_idx, lmac;
@@ -637,7 +663,7 @@ int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
struct msg_req *req,
struct cgx_fec_stats_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
struct mac_ops *mac_ops;
u8 cgx_idx, lmac;
void *cgxd;
@@ -655,17 +681,20 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
+ struct rvu_pfvf *pfvf;
u8 cgx_id, lmac_id;
- if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
- return -EPERM;
+ if (!is_pf_cgxmapped(rvu, pf))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
if (rvu_npc_exact_has_match_table(rvu))
return rvu_npc_exact_mac_addr_set(rvu, req, rsp);
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ pfvf = &rvu->pf[pf];
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
return 0;
@@ -675,7 +704,7 @@ int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
struct cgx_mac_addr_add_req *req,
struct cgx_mac_addr_add_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
int rc = 0;
@@ -699,7 +728,7 @@ int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
struct cgx_mac_addr_del_req *req,
struct msg_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
@@ -717,7 +746,7 @@ int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
struct cgx_max_dmac_entries_get_rsp
*rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
/* If msg is received from PFs(which are not mapped to CGX LMACs)
@@ -743,20 +772,12 @@ int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
- u8 cgx_id, lmac_id;
- int rc = 0;
- u64 cfg;
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
- if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
- return -EPERM;
-
- rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, req->hdr.pcifunc)))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
- rsp->hdr.rc = rc;
- cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
- /* copy 48 bit mac address to req->mac_addr */
- u64_to_ether_addr(cfg, rsp->mac_addr);
+ ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
return 0;
}
@@ -764,7 +785,7 @@ int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
@@ -783,7 +804,7 @@ int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
@@ -802,7 +823,7 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
void *cgxd;
@@ -838,7 +859,7 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
- if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc)))
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, req->hdr.pcifunc)))
return -EPERM;
return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
@@ -852,7 +873,7 @@ int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req,
static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, pcifunc))
@@ -891,7 +912,7 @@ int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
u8 cgx_id, lmac_id;
int pf, err;
- pf = rvu_get_pf(req->hdr.pcifunc);
+ pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
if (!is_pf_cgxmapped(rvu, pf))
return -ENODEV;
@@ -907,7 +928,7 @@ int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
struct msg_req *req,
struct cgx_features_info_msg *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_idx, lmac;
void *cgxd;
@@ -923,13 +944,12 @@ int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
u32 rvu_cgx_get_fifolen(struct rvu *rvu)
{
- struct mac_ops *mac_ops;
- u32 fifo_len;
+ void *cgxd = rvu_first_cgx_pdata(rvu);
- mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
- fifo_len = mac_ops ? mac_ops->fifo_len : 0;
+ if (!cgxd)
+ return 0;
- return fifo_len;
+ return cgx_get_fifo_len(cgxd);
}
u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac)
@@ -950,7 +970,7 @@ u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac)
static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
@@ -980,7 +1000,7 @@ int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 rx_pfc = 0, tx_pfc = 0;
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
@@ -1021,7 +1041,7 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
struct cgx_pause_frm_cfg *req,
struct cgx_pause_frm_cfg *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
int err = 0;
@@ -1048,7 +1068,7 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!is_pf_cgxmapped(rvu, pf))
@@ -1081,7 +1101,7 @@ int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
/* Assumes LF of a PF and all of its VF belongs to the same
* NIX block
*/
- pcifunc = pf << RVU_PFVF_PF_SHIFT;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
return 0;
@@ -1108,10 +1128,10 @@ int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
struct rvu_pfvf *parent_pf, *pfvf;
int cgx_users, err = 0;
- if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc)))
return 0;
- parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
+ parent_pf = &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)];
pfvf = rvu_get_pfvf(rvu, pcifunc);
mutex_lock(&rvu->cgx_cfg_lock);
@@ -1154,7 +1174,7 @@ int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
struct fec_mode *req,
struct fec_mode *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!is_pf_cgxmapped(rvu, pf))
@@ -1170,7 +1190,7 @@ int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
struct cgx_fw_data *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!rvu->fwdata)
@@ -1197,7 +1217,8 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
struct cgx_set_link_mode_req *req,
struct cgx_set_link_mode_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
+ struct cgx_lmac_fwdata_s *linkmodes;
u8 cgx_idx, lmac;
void *cgxd;
@@ -1206,14 +1227,20 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
cgxd = rvu_cgx_pdata(cgx_idx, rvu);
- rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
+ if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX)
+ linkmodes = &rvu->fwdata->cgx_fw_data_usx[cgx_idx][lmac];
+ else
+ linkmodes = &rvu->fwdata->cgx_fw_data[cgx_idx][lmac];
+
+ rsp->status = cgx_set_link_mode(cgxd, req->args, linkmodes,
+ cgx_idx, lmac);
return 0;
}
int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
struct msg_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
@@ -1231,7 +1258,7 @@ int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
struct cgx_mac_addr_update_req *req,
struct cgx_mac_addr_update_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
@@ -1247,7 +1274,7 @@ int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause,
u8 rx_pause, u16 pfc_en)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 rx_8023 = 0, tx_8023 = 0;
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
@@ -1285,7 +1312,7 @@ int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu,
struct cgx_pfc_cfg *req,
struct cgx_pfc_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
void *cgxd;
@@ -1310,7 +1337,7 @@ int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu,
void rvu_mac_reset(struct rvu *rvu, u16 pcifunc)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
struct mac_ops *mac_ops;
struct cgx *cgxd;
u8 cgx, lmac;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
index 7fa98aeb3663..d2163da28d18 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -13,19 +13,26 @@
/* RVU LMTST */
#define LMT_TBL_OP_READ 0
#define LMT_TBL_OP_WRITE 1
-#define LMT_MAP_TABLE_SIZE (128 * 1024)
#define LMT_MAPTBL_ENTRY_SIZE 16
+#define LMT_MAX_VFS 256
+
+#define LMT_MAP_ENTRY_ENA BIT_ULL(20)
+#define LMT_MAP_ENTRY_LINES GENMASK_ULL(18, 16)
/* Function to perform operations (read/write) on lmtst map table */
static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
int lmt_tbl_op)
{
void __iomem *lmt_map_base;
- u64 tbl_base;
+ u64 tbl_base, cfg;
+ int pfs, vfs;
tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+ cfg = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
+ vfs = 1 << (cfg & 0xF);
+ pfs = 1 << ((cfg >> 4) & 0x7);
- lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE);
+ lmt_map_base = ioremap_wc(tbl_base, pfs * vfs * LMT_MAPTBL_ENTRY_SIZE);
if (!lmt_map_base) {
dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
return -ENOMEM;
@@ -35,6 +42,13 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
*val = readq(lmt_map_base + index);
} else {
writeq((*val), (lmt_map_base + index));
+
+ cfg = FIELD_PREP(LMT_MAP_ENTRY_ENA, 0x1);
+ /* 2048 LMTLINES */
+ cfg |= FIELD_PREP(LMT_MAP_ENTRY_LINES, 0x6);
+
+ writeq(cfg, (lmt_map_base + (index + 8)));
+
/* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S
* changes effective. Write 1 for flush and read is being used as a
* barrier and sets up a data dependency. Write to 0 after a write
@@ -52,7 +66,7 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
#define LMT_MAP_TBL_W1_OFF 8
static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
{
- return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) +
+ return ((rvu_get_pf(rvu->pdev, pcifunc) * LMT_MAX_VFS) +
(pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
}
@@ -69,7 +83,7 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
mutex_lock(&rvu->rsrc_lock);
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
- pf = rvu_get_pf(pcifunc) & 0x1F;
+ pf = rvu_get_pf(rvu->pdev, pcifunc) & RVU_OTX2_PFVF_PF_MASK;
val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
@@ -141,7 +155,7 @@ int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu,
int err = 0;
u64 val;
- /* Check if PF_FUNC wants to use it's own local memory as LMTLINE
+ /* Check if PF_FUNC wants to use its own local memory as LMTLINE
* region, if so, convert that IOVA to physical address and
* populate LMT table with that address
*/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index 3c5bbaf12e59..f404117bf6c8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -410,7 +410,7 @@ static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc)
{
int cpt_pf_num = rvu->cpt_pf_num;
- if (rvu_get_pf(pcifunc) != cpt_pf_num)
+ if (rvu_get_pf(rvu->pdev, pcifunc) != cpt_pf_num)
return false;
if (pcifunc & RVU_PFVF_FUNC_MASK)
return false;
@@ -422,7 +422,7 @@ static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc)
{
int cpt_pf_num = rvu->cpt_pf_num;
- if (rvu_get_pf(pcifunc) != cpt_pf_num)
+ if (rvu_get_pf(rvu->pdev, pcifunc) != cpt_pf_num)
return false;
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
return false;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 8c700ee4a82b..15d3cb0b9da6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -21,6 +21,8 @@
#include "rvu_npc_hash.h"
#include "mcs.h"
+#include "cn20k/debugfs.h"
+
#define DEBUGFS_DIR_NAME "octeontx2"
enum {
@@ -45,33 +47,6 @@ enum {
CGX_STAT18,
};
-/* NIX TX stats */
-enum nix_stat_lf_tx {
- TX_UCAST = 0x0,
- TX_BCAST = 0x1,
- TX_MCAST = 0x2,
- TX_DROP = 0x3,
- TX_OCTS = 0x4,
- TX_STATS_ENUM_LAST,
-};
-
-/* NIX RX stats */
-enum nix_stat_lf_rx {
- RX_OCTS = 0x0,
- RX_UCAST = 0x1,
- RX_BCAST = 0x2,
- RX_MCAST = 0x3,
- RX_DROP = 0x4,
- RX_DROP_OCTS = 0x5,
- RX_FCS = 0x6,
- RX_ERR = 0x7,
- RX_DRP_BCAST = 0x8,
- RX_DRP_MCAST = 0x9,
- RX_DRP_L3BCAST = 0xa,
- RX_DRP_L3MCAST = 0xb,
- RX_STATS_ENUM_LAST,
-};
-
static char *cgx_rx_stats_fields[] = {
[CGX_STAT0] = "Received packets",
[CGX_STAT1] = "Octets of received packets",
@@ -580,6 +555,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
u64 lmt_addr, val, tbl_base;
int pf, vf, num_vfs, hw_vfs;
void __iomem *lmt_map_base;
+ int apr_pfs, apr_vfs;
int buf_size = 10240;
size_t off = 0;
int index = 0;
@@ -595,8 +571,12 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
return -ENOMEM;
tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+ val = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
+ apr_vfs = 1 << (val & 0xF);
+ apr_pfs = 1 << ((val >> 4) & 0x7);
- lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
+ lmt_map_base = ioremap_wc(tbl_base, apr_pfs * apr_vfs *
+ LMT_MAPTBL_ENTRY_SIZE);
if (!lmt_map_base) {
dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
kfree(buf);
@@ -618,7 +598,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t",
pf);
- index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
+ index = pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE;
off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
(tbl_base + index));
lmt_addr = readq(lmt_map_base + index);
@@ -631,7 +611,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
/* Reading num of VFs per PF */
rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
for (vf = 0; vf < num_vfs; vf++) {
- index = (pf * rvu->hw->total_vfs * 16) +
+ index = (pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE) +
((vf + 1) * LMT_MAPTBL_ENTRY_SIZE);
off += scnprintf(&buf[off], buf_size - 1 - off,
"PF%d:VF%d \t\t", pf, vf);
@@ -710,7 +690,7 @@ static int get_max_column_width(struct rvu *rvu)
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
- pcifunc = pf << 10 | vf;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf);
if (!pcifunc)
continue;
@@ -781,7 +761,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
off = 0;
flag = 0;
- pcifunc = pf << 10 | vf;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf);
if (!pcifunc)
continue;
@@ -864,7 +844,7 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
cgx[0] = 0;
lmac[0] = 0;
- pcifunc = pf << 10;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
pfvf = rvu_get_pfvf(rvu, pcifunc);
if (pfvf->nix_blkaddr == BLKADDR_NIX0)
@@ -889,6 +869,71 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
+static int rvu_dbg_rvu_fwdata_display(struct seq_file *s, void *unused)
+{
+ struct rvu *rvu = s->private;
+ struct rvu_fwdata *fwdata;
+ u8 mac[ETH_ALEN];
+ int count = 0, i;
+
+ if (!rvu->fwdata)
+ return -EAGAIN;
+
+ fwdata = rvu->fwdata;
+ seq_puts(s, "\nRVU Firmware Data:\n");
+ seq_puts(s, "\n\t\tPTP INFORMATION\n");
+ seq_puts(s, "\t\t===============\n");
+ seq_printf(s, "\t\texternal clockrate \t :%x\n",
+ fwdata->ptp_ext_clk_rate);
+ seq_printf(s, "\t\texternal timestamp \t :%x\n",
+ fwdata->ptp_ext_tstamp);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\n\t\tSDP CHANNEL INFORMATION\n");
+ seq_puts(s, "\t\t=======================\n");
+ seq_printf(s, "\t\tValid \t\t\t :%x\n", fwdata->channel_data.valid);
+ seq_printf(s, "\t\tNode ID \t\t :%x\n",
+ fwdata->channel_data.info.node_id);
+ seq_printf(s, "\t\tNumber of VFs \t\t :%x\n",
+ fwdata->channel_data.info.max_vfs);
+ seq_printf(s, "\t\tNumber of PF-Rings \t :%x\n",
+ fwdata->channel_data.info.num_pf_rings);
+ seq_printf(s, "\t\tPF SRN \t\t\t :%x\n",
+ fwdata->channel_data.info.pf_srn);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\n\t\tPF-INDEX MACADDRESS\n");
+ seq_puts(s, "\t\t====================\n");
+ for (i = 0; i < PF_MACNUM_MAX; i++) {
+ u64_to_ether_addr(fwdata->pf_macs[i], mac);
+ if (!is_zero_ether_addr(mac)) {
+ seq_printf(s, "\t\t %d %pM\n", i, mac);
+ count++;
+ }
+ }
+
+ if (!count)
+ seq_puts(s, "\t\tNo valid address found\n");
+
+ seq_puts(s, "\n\t\tVF-INDEX MACADDRESS\n");
+ seq_puts(s, "\t\t====================\n");
+ count = 0;
+ for (i = 0; i < VF_MACNUM_MAX; i++) {
+ u64_to_ether_addr(fwdata->vf_macs[i], mac);
+ if (!is_zero_ether_addr(mac)) {
+ seq_printf(s, "\t\t %d %pM\n", i, mac);
+ count++;
+ }
+ }
+
+ if (!count)
+ seq_puts(s, "\t\tNo valid address found\n");
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(rvu_fwdata, rvu_fwdata_display, NULL);
+
static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
u16 *pcifunc)
{
@@ -944,19 +989,18 @@ static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
/* The 'qsize' entry dumps current Aura/Pool context Qsize
* and each context's current enable/disable status in a bitmap.
*/
-static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
+static int rvu_dbg_qsize_display(struct seq_file *s, void *unsused,
int blktype)
{
- void (*print_qsize)(struct seq_file *filp,
+ void (*print_qsize)(struct seq_file *s,
struct rvu_pfvf *pfvf) = NULL;
- struct dentry *current_dir;
struct rvu_pfvf *pfvf;
struct rvu *rvu;
int qsize_id;
u16 pcifunc;
int blkaddr;
- rvu = filp->private;
+ rvu = s->private;
switch (blktype) {
case BLKTYPE_NPA:
qsize_id = rvu->rvu_dbg.npa_qsize_id;
@@ -972,32 +1016,28 @@ static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
return -EINVAL;
}
- if (blktype == BLKTYPE_NPA) {
+ if (blktype == BLKTYPE_NPA)
blkaddr = BLKADDR_NPA;
- } else {
- current_dir = filp->file->f_path.dentry->d_parent;
- blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
- BLKADDR_NIX1 : BLKADDR_NIX0);
- }
+ else
+ blkaddr = debugfs_get_aux_num(s->file);
if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
- print_qsize(filp, pfvf);
+ print_qsize(s, pfvf);
return 0;
}
-static ssize_t rvu_dbg_qsize_write(struct file *filp,
+static ssize_t rvu_dbg_qsize_write(struct file *file,
const char __user *buffer, size_t count,
loff_t *ppos, int blktype)
{
char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
- struct seq_file *seqfile = filp->private_data;
+ struct seq_file *seqfile = file->private_data;
char *cmd_buf, *cmd_buf_tmp, *subtoken;
struct rvu *rvu = seqfile->private;
- struct dentry *current_dir;
int blkaddr;
u16 pcifunc;
int ret, lf;
@@ -1023,13 +1063,10 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
goto qsize_write_done;
}
- if (blktype == BLKTYPE_NPA) {
+ if (blktype == BLKTYPE_NPA)
blkaddr = BLKADDR_NPA;
- } else {
- current_dir = filp->f_path.dentry->d_parent;
- blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
- BLKADDR_NIX1 : BLKADDR_NIX0);
- }
+ else
+ blkaddr = debugfs_get_aux_num(file);
if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
ret = -EINVAL;
@@ -1066,6 +1103,11 @@ static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
struct npa_aura_s *aura = &rsp->aura;
struct rvu *rvu = m->private;
+ if (is_cn20k(rvu->pdev)) {
+ print_npa_cn20k_aura_ctx(m, (struct npa_cn20k_aq_enq_rsp *)rsp);
+ return;
+ }
+
seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
@@ -1114,6 +1156,11 @@ static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
struct npa_pool_s *pool = &rsp->pool;
struct rvu *rvu = m->private;
+ if (is_cn20k(rvu->pdev)) {
+ print_npa_cn20k_pool_ctx(m, (struct npa_cn20k_aq_enq_rsp *)rsp);
+ return;
+ }
+
seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
@@ -1616,6 +1663,9 @@ static void print_tm_tree(struct seq_file *m,
int blkaddr;
u64 cfg;
+ if (!sq_ctx->ena)
+ return;
+
blkaddr = nix_hw->blkaddr;
schq = sq_ctx->smq;
@@ -1974,10 +2024,16 @@ static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
struct nix_hw *nix_hw = m->private;
struct rvu *rvu = nix_hw->rvu;
+ if (is_cn20k(rvu->pdev)) {
+ print_nix_cn20k_sq_ctx(m, (struct nix_cn20k_sq_ctx_s *)sq_ctx);
+ return;
+ }
+
if (!is_rvu_otx2(rvu)) {
print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
return;
}
+
seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
sq_ctx->sqe_way_mask, sq_ctx->cq);
seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
@@ -2068,7 +2124,9 @@ static void print_nix_cn10k_rq_ctx(struct seq_file *m,
seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
- seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
+ seq_printf(m, "W2: band_prof_id \t\t%d\n",
+ (u16)rq_ctx->band_prof_id_h << 10 | rq_ctx->band_prof_id);
+
seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
@@ -2190,6 +2248,11 @@ static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
struct nix_hw *nix_hw = m->private;
struct rvu *rvu = nix_hw->rvu;
+ if (is_cn20k(rvu->pdev)) {
+ print_nix_cn20k_cq_ctx(m, (struct nix_cn20k_aq_enq_rsp *)rsp);
+ return;
+ }
+
seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
@@ -2219,6 +2282,7 @@ static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
cq_ctx->qsize, cq_ctx->caching);
+
seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
cq_ctx->substream, cq_ctx->ena);
if (!is_rvu_otx2(rvu)) {
@@ -2580,7 +2644,10 @@ static void print_band_prof_ctx(struct seq_file *m,
(prof->rc_action == 1) ? "DROP" : "RED";
seq_printf(m, "W1: rc_action\t\t%s\n", str);
seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
- seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
+
+ seq_printf(m, "W1: band_prof_id\t%d\n",
+ (u16)prof->band_prof_id_h << 7 | prof->band_prof_id);
+
seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
@@ -2653,10 +2720,10 @@ static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
pcifunc = ipolicer->pfvf_map[idx];
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
seq_printf(m, "Allocated to :: PF %d\n",
- rvu_get_pf(pcifunc));
+ rvu_get_pf(rvu->pdev, pcifunc));
else
seq_printf(m, "Allocated to :: PF %d VF %d\n",
- rvu_get_pf(pcifunc),
+ rvu_get_pf(rvu->pdev, pcifunc),
(pcifunc & RVU_PFVF_FUNC_MASK) - 1);
print_band_prof_ctx(m, &aq_rsp.prof);
}
@@ -2731,8 +2798,8 @@ static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
&rvu_dbg_nix_ndc_tx_hits_miss_fops);
debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
&rvu_dbg_nix_ndc_rx_hits_miss_fops);
- debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_qsize_fops);
+ debugfs_create_file_aux_num("qsize", 0600, rvu->rvu_dbg.nix, rvu,
+ blkaddr, &rvu_dbg_nix_qsize_fops);
debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
&rvu_dbg_nix_band_prof_ctx_fops);
debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
@@ -2749,6 +2816,9 @@ static void rvu_dbg_npa_init(struct rvu *rvu)
&rvu_dbg_npa_aura_ctx_fops);
debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
&rvu_dbg_npa_pool_ctx_fops);
+
+ if (is_cn20k(rvu->pdev)) /* NDC not appliable for cn20k */
+ return;
debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
&rvu_dbg_npa_ndc_cache_fops);
debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
@@ -2881,28 +2951,14 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
return err;
}
-static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
+static int rvu_dbg_derive_lmacid(struct seq_file *s)
{
- struct dentry *current_dir;
- char *buf;
-
- current_dir = filp->file->f_path.dentry->d_parent;
- buf = strrchr(current_dir->d_name.name, 'c');
- if (!buf)
- return -EINVAL;
-
- return kstrtoint(buf + 1, 10, lmac_id);
+ return debugfs_get_aux_num(s->file);
}
-static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+static int rvu_dbg_cgx_stat_display(struct seq_file *s, void *unused)
{
- int lmac_id, err;
-
- err = rvu_dbg_derive_lmacid(filp, &lmac_id);
- if (!err)
- return cgx_print_stats(filp, lmac_id);
-
- return err;
+ return cgx_print_stats(s, rvu_dbg_derive_lmacid(s));
}
RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
@@ -2960,18 +3016,103 @@ static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
return 0;
}
-static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
+static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *s, void *unused)
+{
+ return cgx_print_dmac_flt(s, rvu_dbg_derive_lmacid(s));
+}
+
+RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
+
+static int cgx_print_fwdata(struct seq_file *s, int lmac_id)
{
- int err, lmac_id;
+ struct cgx_lmac_fwdata_s *fwdata;
+ void *cgxd = s->private;
+ struct phy_s *phy;
+ struct rvu *rvu;
+ int cgx_id, i;
+
+ rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
+ if (!rvu)
+ return -ENODEV;
- err = rvu_dbg_derive_lmacid(filp, &lmac_id);
- if (!err)
- return cgx_print_dmac_flt(filp, lmac_id);
+ if (!rvu->fwdata)
+ return -EAGAIN;
- return err;
+ cgx_id = cgx_get_cgxid(cgxd);
+
+ if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX)
+ fwdata = &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id];
+ else
+ fwdata = &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id];
+
+ seq_puts(s, "\nFIRMWARE SHARED:\n");
+ seq_puts(s, "\t\tSUPPORTED LINK INFORMATION\t\t\n");
+ seq_puts(s, "\t\t==========================\n");
+ seq_printf(s, "\t\t Link modes \t\t :%llx\n",
+ fwdata->supported_link_modes);
+ seq_printf(s, "\t\t Autoneg \t\t :%llx\n", fwdata->supported_an);
+ seq_printf(s, "\t\t FEC \t\t\t :%llx\n", fwdata->supported_fec);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\t\tADVERTISED LINK INFORMATION\t\t\n");
+ seq_puts(s, "\t\t==========================\n");
+ seq_printf(s, "\t\t Link modes \t\t :%llx\n",
+ (u64)fwdata->advertised_link_modes);
+ seq_printf(s, "\t\t Autoneg \t\t :%x\n", fwdata->advertised_an);
+ seq_printf(s, "\t\t FEC \t\t\t :%llx\n", fwdata->advertised_fec);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\t\tLMAC CONFIG\t\t\n");
+ seq_puts(s, "\t\t============\n");
+ seq_printf(s, "\t\t rw_valid \t\t :%x\n", fwdata->rw_valid);
+ seq_printf(s, "\t\t lmac_type \t\t :%x\n", fwdata->lmac_type);
+ seq_printf(s, "\t\t portm_idx \t\t :%x\n", fwdata->portm_idx);
+ seq_printf(s, "\t\t mgmt_port \t\t :%x\n", fwdata->mgmt_port);
+ seq_printf(s, "\t\t Link modes own \t :%llx\n",
+ (u64)fwdata->advertised_link_modes_own);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\n\t\tEEPROM DATA\n");
+ seq_puts(s, "\t\t===========\n");
+ seq_printf(s, "\t\t sff_id \t\t :%x\n", fwdata->sfp_eeprom.sff_id);
+ seq_puts(s, "\t\t data \t\t\t :\n");
+ seq_puts(s, "\t\t");
+ for (i = 0; i < SFP_EEPROM_SIZE; i++) {
+ seq_printf(s, "%x", fwdata->sfp_eeprom.buf[i]);
+ if ((i + 1) % 16 == 0) {
+ seq_puts(s, "\n");
+ seq_puts(s, "\t\t");
+ }
+ }
+ seq_puts(s, "\n");
+
+ phy = &fwdata->phy;
+ seq_puts(s, "\n\t\tPHY INFORMATION\n");
+ seq_puts(s, "\t\t===============\n");
+ seq_printf(s, "\t\t Mod type configurable \t\t :%x\n",
+ phy->misc.can_change_mod_type);
+ seq_printf(s, "\t\t Mod type \t\t\t :%x\n", phy->misc.mod_type);
+ seq_printf(s, "\t\t Support FEC \t\t\t :%x\n", phy->misc.has_fec_stats);
+ seq_printf(s, "\t\t RSFEC corrected words \t\t :%x\n",
+ phy->fec_stats.rsfec_corr_cws);
+ seq_printf(s, "\t\t RSFEC uncorrected words \t :%x\n",
+ phy->fec_stats.rsfec_uncorr_cws);
+ seq_printf(s, "\t\t BRFEC corrected words \t\t :%x\n",
+ phy->fec_stats.brfec_corr_blks);
+ seq_printf(s, "\t\t BRFEC uncorrected words \t :%x\n",
+ phy->fec_stats.brfec_uncorr_blks);
+ seq_puts(s, "\n");
+
+ return 0;
}
-RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
+static int rvu_dbg_cgx_fwdata_display(struct seq_file *s, void *unused)
+{
+ return cgx_print_fwdata(s, rvu_dbg_derive_lmacid(s));
+}
+
+RVU_DEBUG_SEQ_FOPS(cgx_fwdata, cgx_fwdata_display, NULL);
static void rvu_dbg_cgx_init(struct rvu *rvu)
{
@@ -3007,11 +3148,14 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
rvu->rvu_dbg.lmac =
debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
- debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
- cgx, &rvu_dbg_cgx_stat_fops);
- debugfs_create_file("mac_filter", 0600,
- rvu->rvu_dbg.lmac, cgx,
+ debugfs_create_file_aux_num("stats", 0600, rvu->rvu_dbg.lmac,
+ cgx, lmac_id, &rvu_dbg_cgx_stat_fops);
+ debugfs_create_file_aux_num("mac_filter", 0600,
+ rvu->rvu_dbg.lmac, cgx, lmac_id,
&rvu_dbg_cgx_dmac_flt_fops);
+ debugfs_create_file("fwdata", 0600,
+ rvu->rvu_dbg.lmac, cgx,
+ &rvu_dbg_cgx_fwdata_fops);
}
}
}
@@ -3033,10 +3177,10 @@ static void rvu_print_npc_mcam_info(struct seq_file *s,
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
- rvu_get_pf(pcifunc));
+ rvu_get_pf(rvu->pdev, pcifunc));
else
seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
- rvu_get_pf(pcifunc),
+ rvu_get_pf(rvu->pdev, pcifunc),
(pcifunc & RVU_PFVF_FUNC_MASK) - 1);
if (entry_acnt) {
@@ -3099,13 +3243,13 @@ static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
seq_puts(filp, "\n\t\t Current allocation\n");
seq_puts(filp, "\t\t====================\n");
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
- pcifunc = (pf << RVU_PFVF_PF_SHIFT);
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
numvfs = (cfg >> 12) & 0xFF;
for (vf = 0; vf < numvfs; vf++) {
- pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, (vf + 1));
rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
}
}
@@ -3376,7 +3520,7 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
mutex_lock(&mcam->lock);
list_for_each_entry(iter, &mcam->mcam_rules, list) {
- pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+ pf = rvu_get_pf(rvu->pdev, iter->owner);
seq_printf(s, "\n\tInstalled by: PF%d ", pf);
if (iter->owner & RVU_PFVF_FUNC_MASK) {
@@ -3394,7 +3538,7 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
rvu_dbg_npc_mcam_show_flows(s, iter);
if (is_npc_intf_rx(iter->intf)) {
target = iter->rx_action.pf_func;
- pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+ pf = rvu_get_pf(rvu->pdev, target);
seq_printf(s, "\tForward to: PF%d ", pf);
if (target & RVU_PFVF_FUNC_MASK) {
@@ -3841,6 +3985,9 @@ static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
{
+ if (is_cn20k(rvu->pdev))
+ return "cn20k";
+
if (!is_rvu_otx2(rvu))
return "cn10k";
else
@@ -3858,6 +4005,9 @@ void rvu_dbg_init(struct rvu *rvu)
debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
rvu, &rvu_dbg_lmtst_map_table_fops);
+ debugfs_create_file("rvu_fwdata", 0444, rvu->rvu_dbg.root, rvu,
+ &rvu_dbg_rvu_fwdata_fops);
+
if (!cgx_get_cgxcnt_max())
goto create;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 7498ab429963..0f9953eaf1b0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -207,7 +207,7 @@ static void rvu_nix_unregister_interrupts(struct rvu *rvu)
rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false;
}
- for (i = NIX_AF_INT_VEC_AF_ERR; i < NIX_AF_INT_VEC_CNT; i++)
+ for (i = NIX_AF_INT_VEC_GEN; i < NIX_AF_INT_VEC_CNT; i++)
if (rvu->irq_allocated[offs + i]) {
free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
rvu->irq_allocated[offs + i] = false;
@@ -505,7 +505,9 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
rvu_reporters->nix_event_ctx = nix_event_context;
rvu_reporters->rvu_hw_nix_intr_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_intr_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_nix_intr_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_nix_intr_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_nix_intr reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter));
@@ -513,7 +515,9 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
}
rvu_reporters->rvu_hw_nix_gen_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_gen_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_nix_gen_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_nix_gen_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_nix_gen reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter));
@@ -521,7 +525,9 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
}
rvu_reporters->rvu_hw_nix_err_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_err_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_nix_err_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_nix_err_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_nix_err reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter));
@@ -529,7 +535,9 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
}
rvu_reporters->rvu_hw_nix_ras_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_ras_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_nix_ras_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_nix_ras_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_nix_ras reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter));
@@ -1051,7 +1059,9 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
rvu_reporters->npa_event_ctx = npa_event_context;
rvu_reporters->rvu_hw_npa_intr_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_intr_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_npa_intr_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_npa_intr_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_npa_intr reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter));
@@ -1059,7 +1069,9 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
}
rvu_reporters->rvu_hw_npa_gen_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_gen_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_npa_gen_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_npa_gen_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_npa_gen reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter));
@@ -1067,7 +1079,9 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
}
rvu_reporters->rvu_hw_npa_err_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_err_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_npa_err_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_npa_err_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_npa_err reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter));
@@ -1075,7 +1089,9 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
}
rvu_reporters->rvu_hw_npa_ras_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_ras_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_npa_ras_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_npa_ras_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_npa_ras reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter));
@@ -1217,7 +1233,8 @@ static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id,
}
static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@@ -1238,11 +1255,13 @@ enum rvu_af_dl_param_id {
RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
+ RVU_AF_DEVLINK_PARAM_ID_NPC_DEF_RULE_CNTR_ENABLE,
RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
};
static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@@ -1297,7 +1316,8 @@ static int rvu_af_npc_exact_feature_validate(struct devlink *devlink, u32 id,
}
static int rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@@ -1358,8 +1378,36 @@ static int rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink *devlink
return 0;
}
+static int rvu_af_dl_npc_def_rule_cntr_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+
+ ctx->val.vbool = rvu->def_rule_cntr_en;
+
+ return 0;
+}
+
+static int rvu_af_dl_npc_def_rule_cntr_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ int err;
+
+ err = npc_config_cntr_default_entries(rvu, ctx->val.vbool);
+ if (!err)
+ rvu->def_rule_cntr_en = ctx->val.vbool;
+
+ return err;
+}
+
static int rvu_af_dl_nix_maxlf_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@@ -1444,6 +1492,11 @@ static const struct devlink_param rvu_af_dl_params[] = {
rvu_af_dl_npc_mcam_high_zone_percent_get,
rvu_af_dl_npc_mcam_high_zone_percent_set,
rvu_af_dl_npc_mcam_high_zone_percent_validate),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_DEF_RULE_CNTR_ENABLE,
+ "npc_def_rule_cntr", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_npc_def_rule_cntr_get,
+ rvu_af_dl_npc_def_rule_cntr_set, NULL),
DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
"nix_maxlf", DEVLINK_PARAM_TYPE_U16,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
@@ -1468,6 +1521,9 @@ static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
struct rvu *rvu = rvu_dl->rvu;
struct rvu_switch *rswitch;
+ if (rvu->rep_mode)
+ return -EOPNOTSUPP;
+
rswitch = &rvu->rswitch;
*mode = rswitch->mode;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index da69350c6f76..2f485a930edd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -31,6 +31,7 @@ static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
u32 leaf_prof);
static const char *nix_get_ctx_name(int ctype);
+static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc);
enum mc_tbl_sz {
MC_TBL_SZ_256,
@@ -312,7 +313,10 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
/* TLs aggegating traffic are shared across PF and VFs */
if (lvl >= hw->cap.nix_tx_aggr_lvl) {
- if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
+ if ((nix_get_tx_link(rvu, map_func) !=
+ nix_get_tx_link(rvu, pcifunc)) &&
+ (rvu_get_pf(rvu->pdev, map_func) !=
+ rvu_get_pf(rvu->pdev, pcifunc)))
return false;
else
return true;
@@ -336,7 +340,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
bool from_vf;
int err;
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
type != NIX_INTF_TYPE_SDP)
return 0;
@@ -360,7 +364,6 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
rvu_npc_set_pkind(rvu, pkind, pfvf);
-
break;
case NIX_INTF_TYPE_LBK:
vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
@@ -414,7 +417,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
break;
case NIX_INTF_TYPE_SDP:
from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
- parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
+ parent_pf = &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)];
sdp_info = parent_pf->sdp_info;
if (!sdp_info) {
dev_err(rvu->dev, "Invalid sdp_info pointer\n");
@@ -567,9 +570,17 @@ void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc)
mutex_unlock(&rvu->rsrc_lock);
}
-int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
- struct nix_bp_cfg_req *req,
- struct msg_rsp *rsp)
+static u16 nix_get_channel(u16 chan, bool cpt_link)
+{
+ /* CPT channel for a given link channel is always
+ * assumed to be BIT(11) set in link channel.
+ */
+ return cpt_link ? chan | BIT(11) : chan;
+}
+
+static int nix_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp, bool cpt_link)
{
u16 pcifunc = req->hdr.pcifunc;
int blkaddr, pf, type, err;
@@ -577,13 +588,20 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
struct rvu_pfvf *pfvf;
struct nix_hw *nix_hw;
struct nix_bp *bp;
+ u16 chan_v;
u64 cfg;
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
return 0;
+ if (is_sdp_pfvf(rvu, pcifunc))
+ type = NIX_INTF_TYPE_SDP;
+
+ if (cpt_link && !rvu->hw->cpt_links)
+ return 0;
+
pfvf = rvu_get_pfvf(rvu, pcifunc);
err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
if (err)
@@ -592,8 +610,9 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
bp = &nix_hw->bp;
chan_base = pfvf->rx_chan_base + req->chan_base;
for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
- rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
+ chan_v = nix_get_channel(chan, cpt_link);
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v));
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v),
cfg & ~BIT_ULL(16));
if (type == NIX_INTF_TYPE_LBK) {
@@ -612,6 +631,20 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ return nix_bp_disable(rvu, req, rsp, false);
+}
+
+int rvu_mbox_handler_nix_cpt_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ return nix_bp_disable(rvu, req, rsp, true);
+}
+
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id)
{
@@ -691,20 +724,22 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
return bpid;
}
-int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
- struct nix_bp_cfg_req *req,
- struct nix_bp_cfg_rsp *rsp)
+static int nix_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp,
+ bool cpt_link)
{
int blkaddr, pf, type, chan_id = 0;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_pfvf *pfvf;
u16 chan_base, chan;
s16 bpid, bpid_base;
+ u16 chan_v;
u64 cfg;
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
- if (is_sdp_pfvf(pcifunc))
+ if (is_sdp_pfvf(rvu, pcifunc))
type = NIX_INTF_TYPE_SDP;
/* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */
@@ -712,6 +747,9 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
type != NIX_INTF_TYPE_SDP)
return 0;
+ if (cpt_link && !rvu->hw->cpt_links)
+ return 0;
+
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
@@ -725,9 +763,11 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
return -EINVAL;
}
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
+ chan_v = nix_get_channel(chan, cpt_link);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v));
cfg &= ~GENMASK_ULL(8, 0);
- rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v),
cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
chan_id++;
bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
@@ -745,6 +785,20 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ return nix_bp_enable(rvu, req, rsp, false);
+}
+
+int rvu_mbox_handler_nix_cpt_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ return nix_bp_enable(rvu, req, rsp, true);
+}
+
static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
u64 format, bool v4, u64 *fidx)
{
@@ -965,6 +1019,12 @@ static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req,
{
struct nix_cn10k_aq_enq_req *aq_req;
+ if (is_cn20k(rvu->pdev)) {
+ *smq = ((struct nix_cn20k_aq_enq_req *)req)->sq.smq;
+ *smq_mask = ((struct nix_cn20k_aq_enq_req *)req)->sq_mask.smq;
+ return;
+ }
+
if (!is_rvu_otx2(rvu)) {
aq_req = (struct nix_cn10k_aq_enq_req *)req;
*smq = aq_req->sq.smq;
@@ -1095,36 +1155,36 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
case NIX_AQ_INSTOP_WRITE:
if (req->ctype == NIX_AQ_CTYPE_RQ)
memcpy(mask, &req->rq_mask,
- sizeof(struct nix_rq_ctx_s));
+ NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_SQ)
memcpy(mask, &req->sq_mask,
- sizeof(struct nix_sq_ctx_s));
+ NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_CQ)
memcpy(mask, &req->cq_mask,
- sizeof(struct nix_cq_ctx_s));
+ NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_RSS)
memcpy(mask, &req->rss_mask,
- sizeof(struct nix_rsse_s));
+ NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(mask, &req->mce_mask,
- sizeof(struct nix_rx_mce_s));
+ NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
memcpy(mask, &req->prof_mask,
- sizeof(struct nix_bandprof_s));
+ NIX_MAX_CTX_SIZE);
fallthrough;
case NIX_AQ_INSTOP_INIT:
if (req->ctype == NIX_AQ_CTYPE_RQ)
- memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
+ memcpy(ctx, &req->rq, NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_SQ)
- memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
+ memcpy(ctx, &req->sq, NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_CQ)
- memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
+ memcpy(ctx, &req->cq, NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_RSS)
- memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
+ memcpy(ctx, &req->rss, NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_MCE)
- memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
+ memcpy(ctx, &req->mce, NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
- memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
+ memcpy(ctx, &req->prof, NIX_MAX_CTX_SIZE);
break;
case NIX_AQ_INSTOP_NOP:
case NIX_AQ_INSTOP_READ:
@@ -1189,22 +1249,22 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
if (req->op == NIX_AQ_INSTOP_READ) {
if (req->ctype == NIX_AQ_CTYPE_RQ)
memcpy(&rsp->rq, ctx,
- sizeof(struct nix_rq_ctx_s));
+ NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_SQ)
memcpy(&rsp->sq, ctx,
- sizeof(struct nix_sq_ctx_s));
+ NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_CQ)
memcpy(&rsp->cq, ctx,
- sizeof(struct nix_cq_ctx_s));
+ NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_RSS)
memcpy(&rsp->rss, ctx,
- sizeof(struct nix_rsse_s));
+ NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(&rsp->mce, ctx,
- sizeof(struct nix_rx_mce_s));
+ NIX_MAX_CTX_SIZE);
else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
memcpy(&rsp->prof, ctx,
- sizeof(struct nix_bandprof_s));
+ NIX_MAX_CTX_SIZE);
}
}
@@ -1235,8 +1295,8 @@ static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
/* Make copy of original context & mask which are required
* for resubmission
*/
- memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s));
- memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s));
+ memcpy(&aq_req.cq_mask, &req->cq_mask, NIX_MAX_CTX_SIZE);
+ memcpy(&aq_req.cq, &req->cq, NIX_MAX_CTX_SIZE);
/* exclude fields which HW can update */
aq_req.cq_mask.cq_err = 0;
@@ -1255,7 +1315,7 @@ static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
* updated fields are masked out for request and response
* comparison
*/
- for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64);
+ for (word = 0; word < NIX_MAX_CTX_SIZE / sizeof(u64);
word++) {
*(u64 *)((u8 *)&aq_rsp.cq + word * 8) &=
(*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
@@ -1263,14 +1323,14 @@ static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
(*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
}
- if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s)))
+ if (memcmp(&aq_req.cq, &aq_rsp.cq, NIX_MAX_CTX_SIZE))
return NIX_AF_ERR_AQ_CTX_RETRY_WRITE;
return 0;
}
-static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
- struct nix_aq_enq_rsp *rsp)
+int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp)
{
struct nix_hw *nix_hw;
int err, retries = 5;
@@ -1614,8 +1674,14 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
cfg = NPC_TX_DEF_PKIND;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
+ if (is_rep_dev(rvu, pcifunc)) {
+ pfvf->tx_chan_base = RVU_SWITCH_LBK_CHAN;
+ pfvf->tx_chan_cnt = 1;
+ goto exit;
+ }
+
intf = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
- if (is_sdp_pfvf(pcifunc))
+ if (is_sdp_pfvf(rvu, pcifunc))
intf = NIX_INTF_TYPE_SDP;
err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp,
@@ -1684,6 +1750,9 @@ int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
+ if (is_rep_dev(rvu, pcifunc))
+ goto free_lf;
+
if (req->flags & NIX_LF_DISABLE_FLOWS)
rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
else
@@ -1695,6 +1764,7 @@ int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
nix_interface_deinit(rvu, pcifunc, nixlf);
+free_lf:
/* Reset this NIX LF */
err = rvu_lf_reset(rvu, block, nixlf);
if (err) {
@@ -1735,7 +1805,8 @@ int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
if (rc < 0) {
dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
- rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
+ rvu_get_pf(rvu->pdev, pcifunc),
+ pcifunc & RVU_PFVF_FUNC_MASK);
return NIX_AF_ERR_MARK_CFG_FAIL;
}
@@ -1987,7 +2058,7 @@ static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr,
static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
{
struct rvu_hwinfo *hw = rvu->hw;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 cgx_id = 0, lmac_id = 0;
if (is_lbk_vf(rvu, pcifunc)) {/* LBK links */
@@ -2005,9 +2076,10 @@ static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
int link, int *start, int *end)
{
struct rvu_hwinfo *hw = rvu->hw;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
- if (is_lbk_vf(rvu, pcifunc)) { /* LBK links */
+ /* LBK links */
+ if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc)) {
*start = hw->cap.nix_txsch_per_cgx_lmac * link;
*end = *start + hw->cap.nix_txsch_per_lbk_lmac;
} else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
@@ -2362,7 +2434,7 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
{
struct nix_smq_flush_ctx *smq_flush_ctx;
int err, restore_tx_en = 0, i;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 cgx_id = 0, lmac_id = 0;
u16 tl2_tl3_link_schq;
u8 link, link_level;
@@ -2756,11 +2828,11 @@ void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc,
{
struct rvu_hwinfo *hw = rvu->hw;
int lbk_link_start, lbk_links;
- u8 pf = rvu_get_pf(pcifunc);
+ u8 pf = rvu_get_pf(rvu->pdev, pcifunc);
int schq;
u64 cfg;
- if (!is_pf_cgxmapped(rvu, pf))
+ if (!is_pf_cgxmapped(rvu, pf) && !is_rep_dev(rvu, pcifunc))
return;
cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0;
@@ -3126,7 +3198,8 @@ static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
if (err) {
dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
- rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
+ rvu_get_pf(rvu->pdev, pcifunc),
+ pcifunc & RVU_PFVF_FUNC_MASK);
return err;
}
return 0;
@@ -3394,7 +3467,7 @@ int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
dev_err(rvu->dev,
"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
__func__, idx, mce_list->max,
- pcifunc >> RVU_PFVF_PF_SHIFT);
+ rvu_get_pf(rvu->pdev, pcifunc));
return -EINVAL;
}
@@ -3446,7 +3519,8 @@ void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
struct rvu_pfvf *pfvf;
if (!hw->cap.nix_rx_multicast ||
- !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
+ !is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev,
+ pcifunc & ~RVU_PFVF_FUNC_MASK))) {
*mce_list = NULL;
*mce_idx = 0;
return;
@@ -3480,13 +3554,13 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
int pf;
/* skip multicast pkt replication for AF's VFs & SDP links */
- if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(rvu, pcifunc))
return 0;
if (!hw->cap.nix_rx_multicast)
return 0;
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
if (!is_pf_cgxmapped(rvu, pf))
return 0;
@@ -3555,7 +3629,7 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
for (idx = 0; idx < (numvfs + 1); idx++) {
/* idx-0 is for PF, followed by VFs */
- pcifunc = (pf << RVU_PFVF_PF_SHIFT);
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
pcifunc |= idx;
/* Add dummy entries now, so that we don't have to check
* for whether AQ_OP should be INIT/WRITE later on.
@@ -4393,8 +4467,6 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
ether_addr_copy(pfvf->default_mac, req->mac_addr);
- rvu_switch_update_rules(rvu, pcifunc);
-
return 0;
}
@@ -4492,7 +4564,7 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
static void nix_find_link_frs(struct rvu *rvu,
struct nix_frs_cfg *req, u16 pcifunc)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
struct rvu_pfvf *pfvf;
int maxlen, minlen;
int numvfs, hwvf;
@@ -4539,7 +4611,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
int blkaddr, link = -1;
struct nix_hw *nix_hw;
struct rvu_pfvf *pfvf;
@@ -4555,7 +4627,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
if (!nix_hw)
return NIX_AF_ERR_INVALID_NIXBLK;
- if (is_lbk_vf(rvu, pcifunc))
+ if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc))
rvu_get_lbk_link_max_frs(rvu, &max_mtu);
else
rvu_get_lmac_link_max_frs(rvu, &max_mtu);
@@ -4583,6 +4655,8 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
/* For VFs of PF0 ingress is LBK port, so config LBK link */
pfvf = rvu_get_pfvf(rvu, pcifunc);
link = hw->cgx_links + pfvf->lbkid;
+ } else if (is_rep_dev(rvu, pcifunc)) {
+ link = hw->cgx_links + 0;
}
if (link < 0)
@@ -4656,6 +4730,9 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
+ /* Set SDP link credit */
+ rvu_write64(rvu, blkaddr, NIX_AF_SDP_LINK_CREDIT, SDP_LINK_CREDIT);
+
/* Set default min/max packet lengths allowed on NIX Rx links.
*
* With HW reset minlen value of 60byte, HW will treat ARP pkts
@@ -4674,7 +4751,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
if (hw->sdp_links) {
link = hw->cgx_links + hw->lbk_links;
rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
- SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
+ SDP_HW_MAX_FRS << 16 | SDP_HW_MIN_FRS);
}
/* Get MCS external bypass status for CN10K-B */
@@ -4979,7 +5056,7 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
(ltdefs->rx_apad1.ltype_match << 4) |
ltdefs->rx_apad1.ltype_mask);
- /* Receive ethertype defination register defines layer
+ /* Receive ethertype definition register defines layer
* information in NPC_RESULT_S to identify the Ethertype
* location in L2 header. Used for Ethertype overwriting
* in inline IPsec flow.
@@ -5166,7 +5243,7 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
{
u16 pcifunc = req->hdr.pcifunc;
struct rvu_pfvf *pfvf;
- int nixlf, err;
+ int nixlf, err, pf;
err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
if (err)
@@ -5182,7 +5259,11 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
pfvf = rvu_get_pfvf(rvu, pcifunc);
set_bit(NIXLF_INITIALIZED, &pfvf->flags);
- rvu_switch_update_rules(rvu, pcifunc);
+ rvu_switch_update_rules(rvu, pcifunc, true);
+
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
+ if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode)
+ rvu_rep_notify_pfvf_state(rvu, pcifunc, true);
return rvu_cgx_start_stop_io(rvu, pcifunc, true);
}
@@ -5192,7 +5273,7 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
{
u16 pcifunc = req->hdr.pcifunc;
struct rvu_pfvf *pfvf;
- int nixlf, err;
+ int nixlf, err, pf;
err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
if (err)
@@ -5210,8 +5291,12 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
if (err)
return err;
+ rvu_switch_update_rules(rvu, pcifunc, false);
rvu_cgx_tx_enable(rvu, pcifunc, true);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
+ if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode)
+ rvu_rep_notify_pfvf_state(rvu, pcifunc, false);
return 0;
}
@@ -5221,7 +5306,7 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct hwctx_disable_req ctx_req;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
u64 sa_base;
@@ -5239,6 +5324,9 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
+ if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode)
+ rvu_rep_notify_pfvf_state(rvu, pcifunc, false);
+
rvu_cgx_start_stop_io(rvu, pcifunc, false);
if (pfvf->sq_ctx) {
@@ -5307,7 +5395,7 @@ static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
int nixlf;
u64 cfg;
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
return 0;
@@ -5730,6 +5818,8 @@ static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw)
}
}
+#define NIX_BW_PROF_HI_MASK GENMASK(10, 7)
+
static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
struct nix_hw *nix_hw, u16 pcifunc)
{
@@ -5768,7 +5858,8 @@ static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
return -EINVAL;
ipolicer = &nix_hw->ipolicer[hi_layer];
- prof_idx = req->prof.band_prof_id;
+ prof_idx = FIELD_PREP(NIX_BW_PROF_HI_MASK, req->prof.band_prof_id_h);
+ prof_idx |= req->prof.band_prof_id;
if (prof_idx >= ipolicer->band_prof.max ||
ipolicer->pfvf_map[prof_idx] != pcifunc)
return -EINVAL;
@@ -5933,8 +6024,10 @@ static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
aq_req->op = NIX_AQ_INSTOP_WRITE;
aq_req->qidx = leaf_prof;
- aq_req->prof.band_prof_id = mid_prof;
+ aq_req->prof.band_prof_id = mid_prof & 0x7F;
aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
+ aq_req->prof.band_prof_id_h = FIELD_GET(NIX_BW_PROF_HI_MASK, mid_prof);
+ aq_req->prof_mask.band_prof_id_h = GENMASK(3, 0);
aq_req->prof.hl_en = 1;
aq_req->prof_mask.hl_en = 1;
@@ -5943,6 +6036,8 @@ static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
(struct nix_aq_enq_rsp *)aq_rsp);
}
+#define NIX_RQ_PROF_HI_MASK GENMASK(13, 10)
+
int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
u16 rq_idx, u16 match_id)
{
@@ -5974,7 +6069,8 @@ int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
return 0;
/* Get the bandwidth profile ID mapped to this RQ */
- leaf_prof = aq_rsp.rq.band_prof_id;
+ leaf_prof = FIELD_PREP(NIX_RQ_PROF_HI_MASK, aq_rsp.rq.band_prof_id_h);
+ leaf_prof |= aq_rsp.rq.band_prof_id;
ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
ipolicer->match_id[leaf_prof] = match_id;
@@ -6012,7 +6108,10 @@ int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
* to different RQs and marked with same match_id
* are rate limited in a aggregate fashion
*/
- mid_prof = aq_rsp.prof.band_prof_id;
+ mid_prof = FIELD_PREP(NIX_BW_PROF_HI_MASK,
+ aq_rsp.prof.band_prof_id_h);
+ mid_prof |= aq_rsp.prof.band_prof_id;
+
rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
&aq_req, &aq_rsp,
leaf_prof, mid_prof);
@@ -6134,7 +6233,8 @@ static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
if (!aq_rsp.prof.hl_en)
return;
- mid_prof = aq_rsp.prof.band_prof_id;
+ mid_prof = FIELD_PREP(NIX_BW_PROF_HI_MASK, aq_rsp.prof.band_prof_id_h);
+ mid_prof |= aq_rsp.prof.band_prof_id;
ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
ipolicer->ref_count[mid_prof]--;
/* If ref_count is zero, free mid layer profile */
@@ -6534,3 +6634,19 @@ unlock_grp:
return ret;
}
+
+/* On CN10k and older series of silicons, hardware may incorrectly
+ * assert XOFF on certain channels. Issue a write on NIX_AF_RX_CHANX_CFG
+ * to broadcacst XON on the same.
+ */
+void rvu_block_bcast_xon(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_block *block = &rvu->hw->block[blkaddr];
+ u64 cfg;
+
+ if (!block->implemented || is_cn20k(rvu->pdev))
+ return;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(0));
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(0), cfg);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
index 4f5ca5ab13a4..e2a33e46b48a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -464,6 +464,23 @@ int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
return 0;
}
+static void npa_aq_ndc_config(struct rvu *rvu, struct rvu_block *block)
+{
+ u64 cfg;
+
+ if (is_cn20k(rvu->pdev)) /* NDC not applicable to cn20k */
+ return;
+
+ /* Do not bypass NDC cache */
+ cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
+ cfg &= ~0x03DULL;
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+ /* Disable caching of stack pages */
+ cfg |= 0x10ULL;
+#endif
+ rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
+}
+
static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
{
u64 cfg;
@@ -479,14 +496,7 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
#endif
- /* Do not bypass NDC cache */
- cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
- cfg &= ~0x03DULL;
-#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
- /* Disable caching of stack pages */
- cfg |= 0x10ULL;
-#endif
- rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
+ npa_aq_ndc_config(rvu, block);
/* For CN10K NPA BATCH DMA set 35 cache lines */
if (!is_rvu_otx2(rvu)) {
@@ -567,6 +577,9 @@ int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr)
int bank, max_bank, line, max_line, err;
u64 reg, ndc_af_const;
+ if (is_cn20k(rvu->pdev)) /* NDC not applicable to cn20k */
+ return 0;
+
/* Set the ENABLE bit(63) to '0' */
reg = rvu_read64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL);
rvu_write64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, reg & GENMASK_ULL(62, 0));
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 97722ce8c4cb..c7c70429eb6c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -147,7 +147,9 @@ static int npc_get_ucast_mcam_index(struct npc_mcam *mcam, u16 pcifunc,
int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
u16 pcifunc, int nixlf, int type)
{
- int pf = rvu_get_pf(pcifunc);
+ struct rvu_hwinfo *hw = container_of(mcam, struct rvu_hwinfo, mcam);
+ struct rvu *rvu = hw->rvu;
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
int index;
/* Check if this is for a PF */
@@ -698,7 +700,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
/* RX_ACTION set to MCAST for CGX PF's */
if (hw->cap.nix_rx_multicast && pfvf->use_mce_list &&
- is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
+ is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) {
*(u64 *)&action = 0;
action.op = NIX_RX_ACTIONOP_MCAST;
pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
@@ -820,24 +822,6 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
-void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
- bool enable)
-{
- struct npc_mcam *mcam = &rvu->hw->mcam;
- int blkaddr, index;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
- if (blkaddr < 0)
- return;
-
- /* Get 'pcifunc' of PF device */
- pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
-
- index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
- NIXLF_BCAST_ENTRY);
- npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
-}
-
void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
u64 chan)
{
@@ -1125,6 +1109,7 @@ void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
int nixlf, bool enable)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct npc_mcam *mcam = &rvu->hw->mcam;
int index, blkaddr;
@@ -1133,9 +1118,12 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
return;
/* Ucast MCAM match entry of this PF/VF */
- index = npc_get_nixlf_mcam_index(mcam, pcifunc,
- nixlf, NIXLF_UCAST_ENTRY);
- npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+ if (npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC),
+ pfvf->nix_rx_intf)) {
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+ }
/* Nothing to do for VFs, on platforms where pkt replication
* is not supported
@@ -2691,6 +2679,49 @@ void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx)
npc_mcam_set_bit(mcam, entry_idx);
}
+int npc_config_cntr_default_entries(struct rvu *rvu, bool enable)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct npc_install_flow_rsp rsp;
+ struct rvu_npc_mcam_rule *rule;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return -EINVAL;
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (!is_mcam_entry_enabled(rvu, mcam, blkaddr, rule->entry))
+ continue;
+ if (!rule->default_rule)
+ continue;
+ if (enable && !rule->has_cntr) { /* Alloc and map new counter */
+ __rvu_mcam_add_counter_to_rule(rvu, rule->owner,
+ rule, &rsp);
+ if (rsp.counter < 0) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate cntr for default rule (err=%d)\n",
+ __func__, rsp.counter);
+ break;
+ }
+ npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ rule->entry, rsp.counter);
+ /* Reset counter before use */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MATCH_STATX(rule->cntr), 0x0);
+ }
+
+ /* Free and unmap counter */
+ if (!enable && rule->has_cntr)
+ __rvu_mcam_remove_counter_from_rule(rvu, rule->owner,
+ rule);
+ }
+ mutex_unlock(&mcam->lock);
+
+ return 0;
+}
+
int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
struct npc_mcam_alloc_entry_req *req,
struct npc_mcam_alloc_entry_rsp *rsp)
@@ -2975,9 +3006,9 @@ int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu,
return rc;
}
-int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
- struct npc_mcam_alloc_counter_req *req,
- struct npc_mcam_alloc_counter_rsp *rsp)
+static int __npc_mcam_alloc_counter(struct rvu *rvu,
+ struct npc_mcam_alloc_counter_req *req,
+ struct npc_mcam_alloc_counter_rsp *rsp)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
u16 pcifunc = req->hdr.pcifunc;
@@ -2998,11 +3029,9 @@ int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
if (!req->contig && req->count > NPC_MAX_NONCONTIG_COUNTERS)
return NPC_MCAM_INVALID_REQ;
- mutex_lock(&mcam->lock);
/* Check if unused counters are available or not */
if (!rvu_rsrc_free_count(&mcam->counters)) {
- mutex_unlock(&mcam->lock);
return NPC_MCAM_ALLOC_FAILED;
}
@@ -3035,12 +3064,27 @@ int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
}
}
- mutex_unlock(&mcam->lock);
return 0;
}
-int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
- struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp)
+int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
+ struct npc_mcam_alloc_counter_req *req,
+ struct npc_mcam_alloc_counter_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int err;
+
+ mutex_lock(&mcam->lock);
+
+ err = __npc_mcam_alloc_counter(rvu, req, rsp);
+
+ mutex_unlock(&mcam->lock);
+ return err;
+}
+
+static int __npc_mcam_free_counter(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req,
+ struct msg_rsp *rsp)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
u16 index, entry = 0;
@@ -3050,10 +3094,8 @@ int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
if (blkaddr < 0)
return NPC_MCAM_INVALID_REQ;
- mutex_lock(&mcam->lock);
err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
if (err) {
- mutex_unlock(&mcam->lock);
return err;
}
@@ -3077,10 +3119,66 @@ int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
index, req->cntr);
}
- mutex_unlock(&mcam->lock);
return 0;
}
+int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int err;
+
+ mutex_lock(&mcam->lock);
+
+ err = __npc_mcam_free_counter(rvu, req, rsp);
+
+ mutex_unlock(&mcam->lock);
+
+ return err;
+}
+
+void __rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc,
+ struct rvu_npc_mcam_rule *rule)
+{
+ struct npc_mcam_oper_counter_req free_req = { 0 };
+ struct msg_rsp free_rsp;
+
+ if (!rule->has_cntr)
+ return;
+
+ free_req.hdr.pcifunc = pcifunc;
+ free_req.cntr = rule->cntr;
+
+ __npc_mcam_free_counter(rvu, &free_req, &free_rsp);
+ rule->has_cntr = false;
+}
+
+void __rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
+ struct rvu_npc_mcam_rule *rule,
+ struct npc_install_flow_rsp *rsp)
+{
+ struct npc_mcam_alloc_counter_req cntr_req = { 0 };
+ struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 };
+ int err;
+
+ cntr_req.hdr.pcifunc = pcifunc;
+ cntr_req.contig = true;
+ cntr_req.count = 1;
+
+ /* we try to allocate a counter to track the stats of this
+ * rule. If counter could not be allocated then proceed
+ * without counter because counters are limited than entries.
+ */
+ err = __npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp);
+ if (!err && cntr_rsp.count) {
+ rule->cntr = cntr_rsp.cntr;
+ rule->has_cntr = true;
+ rsp->counter = rule->cntr;
+ } else {
+ rsp->counter = err;
+ }
+}
+
int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu,
struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp)
{
@@ -3338,7 +3436,7 @@ int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
int blkaddr, nixlf, rc, intf_mode;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u64 rxpkind, txpkind;
u8 cgx_id, lmac_id;
@@ -3478,3 +3576,33 @@ int rvu_mbox_handler_npc_mcam_entry_stats(struct rvu *rvu,
return 0;
}
+
+void rvu_npc_clear_ucast_entry(struct rvu *rvu, int pcifunc, int nixlf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule;
+ int ucast_idx, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, ucast_idx, false);
+
+ npc_set_mcam_action(rvu, mcam, blkaddr, ucast_idx, 0);
+
+ npc_clear_mcam_entry(rvu, mcam, blkaddr, ucast_idx);
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (rule->entry == ucast_idx) {
+ list_del(&rule->list);
+ kfree(rule);
+ break;
+ }
+ }
+ mutex_unlock(&mcam->lock);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 150635de2bd5..b56395ac5a74 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -606,8 +606,8 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
if (!npc_check_field(rvu, blkaddr, NPC_LB, intf))
*features &= ~BIT_ULL(NPC_OUTER_VID);
- /* Set SPI flag only if AH/ESP and IPSEC_SPI are in the key */
- if (npc_check_field(rvu, blkaddr, NPC_IPSEC_SPI, intf) &&
+ /* Allow extracting SPI field from AH and ESP headers at same offset */
+ if (npc_is_field_present(rvu, NPC_IPSEC_SPI, intf) &&
(*features & (BIT_ULL(NPC_IPPROTO_ESP) | BIT_ULL(NPC_IPPROTO_AH))))
*features |= BIT_ULL(NPC_IPSEC_SPI);
@@ -1081,44 +1081,26 @@ static void rvu_mcam_add_rule(struct npc_mcam *mcam,
static void rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc,
struct rvu_npc_mcam_rule *rule)
{
- struct npc_mcam_oper_counter_req free_req = { 0 };
- struct msg_rsp free_rsp;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
- if (!rule->has_cntr)
- return;
+ mutex_lock(&mcam->lock);
- free_req.hdr.pcifunc = pcifunc;
- free_req.cntr = rule->cntr;
+ __rvu_mcam_remove_counter_from_rule(rvu, pcifunc, rule);
- rvu_mbox_handler_npc_mcam_free_counter(rvu, &free_req, &free_rsp);
- rule->has_cntr = false;
+ mutex_unlock(&mcam->lock);
}
static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
struct rvu_npc_mcam_rule *rule,
struct npc_install_flow_rsp *rsp)
{
- struct npc_mcam_alloc_counter_req cntr_req = { 0 };
- struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 };
- int err;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
- cntr_req.hdr.pcifunc = pcifunc;
- cntr_req.contig = true;
- cntr_req.count = 1;
+ mutex_lock(&mcam->lock);
- /* we try to allocate a counter to track the stats of this
- * rule. If counter could not be allocated then proceed
- * without counter because counters are limited than entries.
- */
- err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req,
- &cntr_rsp);
- if (!err && cntr_rsp.count) {
- rule->cntr = cntr_rsp.cntr;
- rule->has_cntr = true;
- rsp->counter = rule->cntr;
- } else {
- rsp->counter = err;
- }
+ __rvu_mcam_add_counter_to_rule(rvu, pcifunc, rule, rsp);
+
+ mutex_unlock(&mcam->lock);
}
static int npc_mcast_update_action_index(struct rvu *rvu, struct npc_install_flow_req *req,
@@ -1416,6 +1398,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
struct npc_install_flow_rsp *rsp)
{
bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK);
+ bool from_rep_dev = !!is_rep_dev(rvu, req->hdr.pcifunc);
struct rvu_switch *rswitch = &rvu->rswitch;
int blkaddr, nixlf, err;
struct rvu_pfvf *pfvf;
@@ -1469,18 +1452,21 @@ process_flow:
* hence modify pcifunc accordingly.
*/
- /* AF installing for a PF/VF */
- if (!req->hdr.pcifunc)
+ if (!req->hdr.pcifunc) {
+ /* AF installing for a PF/VF */
target = req->vf;
- /* PF installing for its VF */
- else if (!from_vf && req->vf) {
+ } else if (!from_vf && req->vf && !from_rep_dev) {
+ /* PF installing for its VF */
target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf;
pf_set_vfs_mac = req->default_rule &&
(req->features & BIT_ULL(NPC_DMAC));
- }
- /* msg received from PF/VF */
- else
+ } else if (from_rep_dev && req->vf) {
+ /* Representor device installing for a representee */
+ target = req->vf;
+ } else {
+ /* msg received from PF/VF */
target = req->hdr.pcifunc;
+ }
/* ignore chan_mask in case pf func is not AF, revisit later */
if (!is_pffunc_af(req->hdr.pcifunc))
@@ -1492,8 +1478,10 @@ process_flow:
pfvf = rvu_get_pfvf(rvu, target);
+ if (from_rep_dev)
+ req->channel = pfvf->rx_chan_base;
/* PF installing for its VF */
- if (req->hdr.pcifunc && !from_vf && req->vf)
+ if (req->hdr.pcifunc && !from_vf && req->vf && !from_rep_dev)
set_bit(PF_SET_VF_CFG, &pfvf->flags);
/* update req destination mac addr */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
index d2661e7fabdb..999f6d93c7fe 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
@@ -1465,7 +1465,7 @@ static int rvu_npc_exact_update_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_
int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
{
struct npc_exact_table *table;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 cgx_id, lmac_id;
u32 drop_mcam_idx;
bool *promisc;
@@ -1512,7 +1512,7 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
{
struct npc_exact_table *table;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 cgx_id, lmac_id;
u32 drop_mcam_idx;
bool *promisc;
@@ -1560,7 +1560,7 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
struct msg_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u32 seq_id = req->index;
struct rvu_pfvf *pfvf;
u8 cgx_id, lmac_id;
@@ -1593,7 +1593,7 @@ int rvu_npc_exact_mac_addr_update(struct rvu *rvu,
struct cgx_mac_addr_update_req *req,
struct cgx_mac_addr_update_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
struct npc_exact_table_entry *entry;
struct npc_exact_table *table;
struct rvu_pfvf *pfvf;
@@ -1675,7 +1675,7 @@ int rvu_npc_exact_mac_addr_add(struct rvu *rvu,
struct cgx_mac_addr_add_req *req,
struct cgx_mac_addr_add_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
struct rvu_pfvf *pfvf;
u8 cgx_id, lmac_id;
int rc = 0;
@@ -1711,7 +1711,7 @@ int rvu_npc_exact_mac_addr_del(struct rvu *rvu,
struct cgx_mac_addr_del_req *req,
struct msg_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
int rc;
rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
@@ -1736,7 +1736,7 @@ int rvu_npc_exact_mac_addr_del(struct rvu *rvu,
int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u32 seq_id = req->index;
struct rvu_pfvf *pfvf;
u8 cgx_id, lmac_id;
@@ -2001,7 +2001,7 @@ int rvu_npc_exact_init(struct rvu *rvu)
}
/* Filter rules are only for PF */
- pcifunc = RVU_PFFUNC(i, 0);
+ pcifunc = RVU_PFFUNC(rvu->pdev, i, 0);
dev_dbg(rvu->dev,
"%s:Drop rule cgx=%d lmac=%d chan(val=0x%llx, mask=0x%llx\n",
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
index 57a09328d46b..cb25cf478f1f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
@@ -139,9 +139,7 @@ static struct npc_mcam_kex_hash npc_mkex_hash_default __maybe_unused = {
#define NPC_MCAM_DROP_RULE_MAX 30
#define NPC_MCAM_SDP_DROP_RULE_IDX 0
-#define RVU_PFFUNC(pf, func) \
- ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \
- (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT))
+#define RVU_PFFUNC(pdev, pf, func) rvu_make_pcifunc(pdev, pf, func)
enum npc_exact_opc_type {
NPC_EXACT_OPC_MEM,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 2b299fa85159..62cdc714ba57 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -445,6 +445,7 @@
#define NIX_CONST_MAX_BPIDS GENMASK_ULL(23, 12)
#define NIX_CONST_SDP_CHANS GENMASK_ULL(11, 0)
+#define NIX_VLAN_ETYPE_MASK GENMASK_ULL(63, 48)
#define NIX_AF_MDQ_PARENT_MASK GENMASK_ULL(24, 16)
#define NIX_AF_TL4_PARENT_MASK GENMASK_ULL(23, 16)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
new file mode 100644
index 000000000000..4415d0ce9aef
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
@@ -0,0 +1,469 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu.h"
+#include "rvu_reg.h"
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+static struct _req_type __maybe_unused \
+*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
+{ \
+ struct _req_type *req; \
+ \
+ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
+ &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
+ sizeof(struct _rsp_type)); \
+ if (!req) \
+ return NULL; \
+ req->hdr.sig = OTX2_MBOX_REQ_SIG; \
+ req->hdr.id = _id; \
+ return req; \
+}
+
+MBOX_UP_REP_MESSAGES
+#undef M
+
+static int rvu_rep_up_notify(struct rvu *rvu, struct rep_event *event)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, event->pcifunc);
+ struct rep_event *msg;
+ int pf;
+
+ pf = rvu_get_pf(rvu->pdev, event->pcifunc);
+
+ if (event->event & RVU_EVENT_MAC_ADDR_CHANGE)
+ ether_addr_copy(pfvf->mac_addr, event->evt_data.mac);
+
+ mutex_lock(&rvu->mbox_lock);
+ msg = otx2_mbox_alloc_msg_rep_event_up_notify(rvu, pf);
+ if (!msg) {
+ mutex_unlock(&rvu->mbox_lock);
+ return -ENOMEM;
+ }
+
+ msg->hdr.pcifunc = event->pcifunc;
+ msg->event = event->event;
+
+ memcpy(&msg->evt_data, &event->evt_data, sizeof(struct rep_evt_data));
+
+ otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf);
+
+ otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
+
+ otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
+
+ mutex_unlock(&rvu->mbox_lock);
+ return 0;
+}
+
+static void rvu_rep_wq_handler(struct work_struct *work)
+{
+ struct rvu *rvu = container_of(work, struct rvu, rep_evt_work);
+ struct rep_evtq_ent *qentry;
+ struct rep_event *event;
+ unsigned long flags;
+
+ do {
+ spin_lock_irqsave(&rvu->rep_evtq_lock, flags);
+ qentry = list_first_entry_or_null(&rvu->rep_evtq_head,
+ struct rep_evtq_ent,
+ node);
+ if (qentry)
+ list_del(&qentry->node);
+
+ spin_unlock_irqrestore(&rvu->rep_evtq_lock, flags);
+ if (!qentry)
+ break; /* nothing more to process */
+
+ event = &qentry->event;
+
+ rvu_rep_up_notify(rvu, event);
+ kfree(qentry);
+ } while (1);
+}
+
+int rvu_mbox_handler_rep_event_notify(struct rvu *rvu, struct rep_event *req,
+ struct msg_rsp *rsp)
+{
+ struct rep_evtq_ent *qentry;
+
+ qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
+ if (!qentry)
+ return -ENOMEM;
+
+ qentry->event = *req;
+ spin_lock(&rvu->rep_evtq_lock);
+ list_add_tail(&qentry->node, &rvu->rep_evtq_head);
+ spin_unlock(&rvu->rep_evtq_lock);
+ queue_work(rvu->rep_evt_wq, &rvu->rep_evt_work);
+ return 0;
+}
+
+int rvu_rep_notify_pfvf_state(struct rvu *rvu, u16 pcifunc, bool enable)
+{
+ struct rep_event *req;
+ int pf;
+
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc)))
+ return 0;
+
+ pf = rvu_get_pf(rvu->pdev, rvu->rep_pcifunc);
+
+ mutex_lock(&rvu->mbox_lock);
+ req = otx2_mbox_alloc_msg_rep_event_up_notify(rvu, pf);
+ if (!req) {
+ mutex_unlock(&rvu->mbox_lock);
+ return -ENOMEM;
+ }
+
+ req->hdr.pcifunc = rvu->rep_pcifunc;
+ req->event |= RVU_EVENT_PFVF_STATE;
+ req->pcifunc = pcifunc;
+ req->evt_data.vf_state = enable;
+
+ otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf);
+ otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
+
+ mutex_unlock(&rvu->mbox_lock);
+ return 0;
+}
+
+#define RVU_LF_RX_STATS(reg) \
+ rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, reg))
+
+#define RVU_LF_TX_STATS(reg) \
+ rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, reg))
+
+int rvu_mbox_handler_nix_lf_stats(struct rvu *rvu,
+ struct nix_stats_req *req,
+ struct nix_stats_rsp *rsp)
+{
+ u16 pcifunc = req->pcifunc;
+ int nixlf, blkaddr, err;
+ struct msg_req rst_req;
+ struct msg_rsp rst_rsp;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return 0;
+
+ if (req->reset) {
+ rst_req.hdr.pcifunc = pcifunc;
+ return rvu_mbox_handler_nix_stats_rst(rvu, &rst_req, &rst_rsp);
+ }
+ rsp->rx.octs = RVU_LF_RX_STATS(RX_OCTS);
+ rsp->rx.ucast = RVU_LF_RX_STATS(RX_UCAST);
+ rsp->rx.bcast = RVU_LF_RX_STATS(RX_BCAST);
+ rsp->rx.mcast = RVU_LF_RX_STATS(RX_MCAST);
+ rsp->rx.drop = RVU_LF_RX_STATS(RX_DROP);
+ rsp->rx.err = RVU_LF_RX_STATS(RX_ERR);
+ rsp->rx.drop_octs = RVU_LF_RX_STATS(RX_DROP_OCTS);
+ rsp->rx.drop_mcast = RVU_LF_RX_STATS(RX_DRP_MCAST);
+ rsp->rx.drop_bcast = RVU_LF_RX_STATS(RX_DRP_BCAST);
+
+ rsp->tx.octs = RVU_LF_TX_STATS(TX_OCTS);
+ rsp->tx.ucast = RVU_LF_TX_STATS(TX_UCAST);
+ rsp->tx.bcast = RVU_LF_TX_STATS(TX_BCAST);
+ rsp->tx.mcast = RVU_LF_TX_STATS(TX_MCAST);
+ rsp->tx.drop = RVU_LF_TX_STATS(TX_DROP);
+
+ rsp->pcifunc = req->pcifunc;
+ return 0;
+}
+
+static u16 rvu_rep_get_vlan_id(struct rvu *rvu, u16 pcifunc)
+{
+ int id;
+
+ for (id = 0; id < rvu->rep_cnt; id++)
+ if (rvu->rep2pfvf_map[id] == pcifunc)
+ return id;
+ return 0;
+}
+
+static int rvu_rep_tx_vlan_cfg(struct rvu *rvu, u16 pcifunc,
+ u16 vlan_tci, int *vidx)
+{
+ struct nix_vtag_config_rsp rsp = {};
+ struct nix_vtag_config req = {};
+ u64 etype = ETH_P_8021Q;
+ int err;
+
+ /* Insert vlan tag */
+ req.hdr.pcifunc = pcifunc;
+ req.vtag_size = VTAGSIZE_T4;
+ req.cfg_type = 0; /* tx vlan cfg */
+ req.tx.cfg_vtag0 = true;
+ req.tx.vtag0 = FIELD_PREP(NIX_VLAN_ETYPE_MASK, etype) | vlan_tci;
+
+ err = rvu_mbox_handler_nix_vtag_cfg(rvu, &req, &rsp);
+ if (err) {
+ dev_err(rvu->dev, "Tx vlan config failed\n");
+ return err;
+ }
+ *vidx = rsp.vtag0_idx;
+ return 0;
+}
+
+static int rvu_rep_rx_vlan_cfg(struct rvu *rvu, u16 pcifunc)
+{
+ struct nix_vtag_config req = {};
+ struct nix_vtag_config_rsp rsp;
+
+ /* config strip, capture and size */
+ req.hdr.pcifunc = pcifunc;
+ req.vtag_size = VTAGSIZE_T4;
+ req.cfg_type = 1; /* rx vlan cfg */
+ req.rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
+ req.rx.strip_vtag = true;
+ req.rx.capture_vtag = false;
+
+ return rvu_mbox_handler_nix_vtag_cfg(rvu, &req, &rsp);
+}
+
+static int rvu_rep_install_rx_rule(struct rvu *rvu, u16 pcifunc,
+ u16 entry, bool rte)
+{
+ struct npc_install_flow_req req = {};
+ struct npc_install_flow_rsp rsp = {};
+ struct rvu_pfvf *pfvf;
+ u16 vlan_tci, rep_id;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ /* To steer the traffic from Representee to Representor */
+ rep_id = rvu_rep_get_vlan_id(rvu, pcifunc);
+ if (rte) {
+ vlan_tci = rep_id | BIT_ULL(8);
+ req.vf = rvu->rep_pcifunc;
+ req.op = NIX_RX_ACTIONOP_UCAST;
+ req.index = rep_id;
+ } else {
+ vlan_tci = rep_id;
+ req.vf = pcifunc;
+ req.op = NIX_RX_ACTION_DEFAULT;
+ }
+
+ rvu_rep_rx_vlan_cfg(rvu, req.vf);
+ req.entry = entry;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_VLAN_ETYPE_CTAG);
+ req.vtag0_valid = true;
+ req.vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
+ req.packet.vlan_etype = cpu_to_be16(ETH_P_8021Q);
+ req.mask.vlan_etype = cpu_to_be16(ETH_P_8021Q);
+ req.packet.vlan_tci = cpu_to_be16(vlan_tci);
+ req.mask.vlan_tci = cpu_to_be16(0xffff);
+
+ req.channel = RVU_SWITCH_LBK_CHAN;
+ req.chan_mask = 0xffff;
+ req.intf = pfvf->nix_rx_intf;
+
+ return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+static int rvu_rep_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry,
+ bool rte)
+{
+ struct npc_install_flow_req req = {};
+ struct npc_install_flow_rsp rsp = {};
+ struct rvu_pfvf *pfvf;
+ int vidx, err;
+ u16 vlan_tci;
+ u8 lbkid;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ vlan_tci = rvu_rep_get_vlan_id(rvu, pcifunc);
+ if (rte)
+ vlan_tci |= BIT_ULL(8);
+
+ err = rvu_rep_tx_vlan_cfg(rvu, pcifunc, vlan_tci, &vidx);
+ if (err)
+ return err;
+
+ lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ if (rte) {
+ req.vf = pcifunc;
+ } else {
+ req.vf = rvu->rep_pcifunc;
+ req.packet.sq_id = vlan_tci;
+ req.mask.sq_id = 0xffff;
+ }
+
+ req.entry = entry;
+ req.intf = pfvf->nix_tx_intf;
+ req.op = NIX_TX_ACTIONOP_UCAST_CHAN;
+ req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN;
+ req.set_cntr = 1;
+ req.vtag0_def = vidx;
+ req.vtag0_op = 1;
+ return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+int rvu_rep_install_mcam_rules(struct rvu *rvu)
+{
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ u16 start = rswitch->start_entry;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc, entry = 0;
+ int pf, vf, numvfs;
+ int err, nixlf, i;
+ u8 rep;
+
+ for (pf = 1; pf < hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
+ rvu_get_nix_blkaddr(rvu, pcifunc);
+ rep = true;
+ for (i = 0; i < 2; i++) {
+ err = rvu_rep_install_rx_rule(rvu, pcifunc,
+ start + entry, rep);
+ if (err)
+ return err;
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+
+ err = rvu_rep_install_tx_rule(rvu, pcifunc,
+ start + entry, rep);
+ if (err)
+ return err;
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+ rep = false;
+ }
+
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
+ for (vf = 0; vf < numvfs; vf++) {
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf + 1);
+ rvu_get_nix_blkaddr(rvu, pcifunc);
+
+ /* Skip installimg rules if nixlf is not attached */
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
+ if (err)
+ continue;
+ rep = true;
+ for (i = 0; i < 2; i++) {
+ err = rvu_rep_install_rx_rule(rvu, pcifunc,
+ start + entry,
+ rep);
+ if (err)
+ return err;
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+
+ err = rvu_rep_install_tx_rule(rvu, pcifunc,
+ start + entry,
+ rep);
+ if (err)
+ return err;
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+ rep = false;
+ }
+ }
+ }
+
+ /* Initialize the wq for handling REP events */
+ spin_lock_init(&rvu->rep_evtq_lock);
+ INIT_LIST_HEAD(&rvu->rep_evtq_head);
+ INIT_WORK(&rvu->rep_evt_work, rvu_rep_wq_handler);
+ rvu->rep_evt_wq = alloc_workqueue("rep_evt_wq", WQ_PERCPU, 0);
+ if (!rvu->rep_evt_wq) {
+ dev_err(rvu->dev, "REP workqueue allocation failed\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void rvu_rep_update_rules(struct rvu *rvu, u16 pcifunc, bool ena)
+{
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u32 max = rswitch->used_entries;
+ int blkaddr;
+ u16 entry;
+
+ if (!rswitch->used_entries)
+ return;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+
+ if (blkaddr < 0)
+ return;
+
+ rvu_switch_enable_lbk_link(rvu, pcifunc, ena);
+ mutex_lock(&mcam->lock);
+ for (entry = 0; entry < max; entry++) {
+ if (rswitch->entry2pcifunc[entry] == pcifunc)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, entry, ena);
+ }
+ mutex_unlock(&mcam->lock);
+}
+
+int rvu_rep_pf_init(struct rvu *rvu)
+{
+ u16 pcifunc = rvu->rep_pcifunc;
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ set_bit(NIXLF_INITIALIZED, &pfvf->flags);
+ rvu_switch_enable_lbk_link(rvu, pcifunc, true);
+ rvu_rep_rx_vlan_cfg(rvu, pcifunc);
+ return 0;
+}
+
+int rvu_mbox_handler_esw_cfg(struct rvu *rvu, struct esw_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ if (req->hdr.pcifunc != rvu->rep_pcifunc)
+ return 0;
+
+ rvu->rep_mode = req->ena;
+
+ if (!rvu->rep_mode)
+ rvu_npc_free_mcam_entries(rvu, req->hdr.pcifunc, -1);
+
+ return 0;
+}
+
+int rvu_mbox_handler_get_rep_cnt(struct rvu *rvu, struct msg_req *req,
+ struct get_rep_cnt_rsp *rsp)
+{
+ int pf, vf, numvfs, hwvf, rep = 0;
+ u16 pcifunc;
+
+ rvu->rep_pcifunc = req->hdr.pcifunc;
+ rsp->rep_cnt = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs;
+ rvu->rep_cnt = rsp->rep_cnt;
+
+ rvu->rep2pfvf_map = devm_kzalloc(rvu->dev, rvu->rep_cnt *
+ sizeof(u16), GFP_KERNEL);
+ if (!rvu->rep2pfvf_map)
+ return -ENOMEM;
+
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
+ rvu->rep2pfvf_map[rep] = pcifunc;
+ rsp->rep_pf_map[rep] = pcifunc;
+ rep++;
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
+ for (vf = 0; vf < numvfs; vf++) {
+ rvu->rep2pfvf_map[rep] = pcifunc |
+ ((vf + 1) & RVU_PFVF_FUNC_MASK);
+ rsp->rep_pf_map[rep] = rvu->rep2pfvf_map[rep];
+ rep++;
+ }
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
index 38cfe148f4b7..e4a5f9fa6fd4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
@@ -17,9 +17,9 @@
/* SDP PF number */
static int sdp_pf_num[MAX_SDP] = {-1, -1};
-bool is_sdp_pfvf(u16 pcifunc)
+bool is_sdp_pfvf(struct rvu *rvu, u16 pcifunc)
{
- u16 pf = rvu_get_pf(pcifunc);
+ u16 pf = rvu_get_pf(rvu->pdev, pcifunc);
u32 found = 0, i = 0;
while (i < MAX_SDP) {
@@ -34,9 +34,9 @@ bool is_sdp_pfvf(u16 pcifunc)
return true;
}
-bool is_sdp_pf(u16 pcifunc)
+bool is_sdp_pf(struct rvu *rvu, u16 pcifunc)
{
- return (is_sdp_pfvf(pcifunc) &&
+ return (is_sdp_pfvf(rvu, pcifunc) &&
!(pcifunc & RVU_PFVF_FUNC_MASK));
}
@@ -46,7 +46,7 @@ bool is_sdp_vf(struct rvu *rvu, u16 pcifunc)
if (!(pcifunc & ~RVU_PFVF_FUNC_MASK))
return (rvu->vf_devid == RVU_SDP_VF_DEVID);
- return (is_sdp_pfvf(pcifunc) &&
+ return (is_sdp_pfvf(rvu, pcifunc) &&
!!(pcifunc & RVU_PFVF_FUNC_MASK));
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index fc8da2090657..8e868f815de1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -13,6 +13,8 @@
#define RVU_MULTI_BLK_VER 0x7ULL
+#define NIX_MAX_CTX_SIZE 128
+
/* RVU Block Address Enumeration */
enum rvu_block_addr_e {
BLKADDR_RVUM = 0x0ULL,
@@ -33,7 +35,8 @@ enum rvu_block_addr_e {
BLKADDR_NDC_NIX1_RX = 0x10ULL,
BLKADDR_NDC_NIX1_TX = 0x11ULL,
BLKADDR_APR = 0x16ULL,
- BLK_COUNT = 0x17ULL,
+ BLKADDR_MBOX = 0x1bULL,
+ BLK_COUNT = 0x1cULL,
};
/* RVU Block Type Enumeration */
@@ -49,7 +52,8 @@ enum rvu_block_type_e {
BLKTYPE_TIM = 0x8,
BLKTYPE_CPT = 0x9,
BLKTYPE_NDC = 0xa,
- BLKTYPE_MAX = 0xa,
+ BLKTYPE_MBOX = 0x13,
+ BLKTYPE_MAX = 0x13,
};
/* RVU Admin function Interrupt Vector Enumeration */
@@ -368,8 +372,12 @@ struct nix_cq_ctx_s {
u64 qsize : 4;
u64 cq_err_int : 8;
u64 cq_err_int_ena : 8;
+ /* Ensure all context sizes are 128 bytes */
+ u64 padding[12];
};
+static_assert(sizeof(struct nix_cq_ctx_s) == NIX_MAX_CTX_SIZE);
+
/* CN10K NIX Receive queue context structure */
struct nix_cn10k_rq_ctx_s {
u64 ena : 1;
@@ -411,7 +419,8 @@ struct nix_cn10k_rq_ctx_s {
u64 rsvd_171 : 1;
u64 later_skip : 6;
u64 xqe_imm_size : 6;
- u64 rsvd_189_184 : 6;
+ u64 band_prof_id_h : 4;
+ u64 rsvd_189_188 : 2;
u64 xqe_imm_copy : 1;
u64 xqe_hdr_split : 1;
u64 xqe_drop : 8; /* W3 */
@@ -458,6 +467,8 @@ struct nix_cn10k_rq_ctx_s {
u64 rsvd_1023_960; /* W15 */
};
+static_assert(sizeof(struct nix_cn10k_rq_ctx_s) == NIX_MAX_CTX_SIZE);
+
/* CN10K NIX Send queue context structure */
struct nix_cn10k_sq_ctx_s {
u64 ena : 1;
@@ -521,6 +532,8 @@ struct nix_cn10k_sq_ctx_s {
u64 rsvd_1023_1008 : 16;
};
+static_assert(sizeof(struct nix_cn10k_sq_ctx_s) == NIX_MAX_CTX_SIZE);
+
/* NIX Receive queue context structure */
struct nix_rq_ctx_s {
u64 ena : 1;
@@ -592,6 +605,8 @@ struct nix_rq_ctx_s {
u64 rsvd_1023_960; /* W15 */
};
+static_assert(sizeof(struct nix_rq_ctx_s) == NIX_MAX_CTX_SIZE);
+
/* NIX sqe sizes */
enum nix_maxsqesz {
NIX_MAXSQESZ_W16 = 0x0,
@@ -666,13 +681,18 @@ struct nix_sq_ctx_s {
u64 rsvd_1023_1008 : 16;
};
+static_assert(sizeof(struct nix_sq_ctx_s) == NIX_MAX_CTX_SIZE);
+
/* NIX Receive side scaling entry structure*/
struct nix_rsse_s {
uint32_t rq : 20;
uint32_t reserved_20_31 : 12;
-
+ /* Ensure all context sizes are minimum 128 bytes */
+ u64 padding[15];
};
+static_assert(sizeof(struct nix_rsse_s) == NIX_MAX_CTX_SIZE);
+
/* NIX receive multicast/mirror entry structure */
struct nix_rx_mce_s {
uint64_t op : 2;
@@ -682,8 +702,12 @@ struct nix_rx_mce_s {
uint64_t rsvd_31_24 : 8;
uint64_t pf_func : 16;
uint64_t next : 16;
+ /* Ensure all context sizes are minimum 128 bytes */
+ u64 padding[15];
};
+static_assert(sizeof(struct nix_rx_mce_s) == NIX_MAX_CTX_SIZE);
+
enum nix_band_prof_layers {
BAND_PROF_LEAF_LAYER = 0,
BAND_PROF_INVAL_LAYER = 1,
@@ -734,7 +758,8 @@ struct nix_bandprof_s {
uint64_t rc_action : 2;
uint64_t meter_algo : 2;
uint64_t band_prof_id : 7;
- uint64_t reserved_111_118 : 8;
+ uint64_t band_prof_id_h : 4;
+ uint64_t reserved_115_118 : 4;
uint64_t hl_en : 1;
uint64_t reserved_120_127 : 8;
uint64_t ts : 48; /* W2 */
@@ -767,6 +792,8 @@ struct nix_bandprof_s {
uint64_t reserved_1008_1023 : 16;
};
+static_assert(sizeof(struct nix_bandprof_s) == NIX_MAX_CTX_SIZE);
+
enum nix_lsoalg {
NIX_LSOALG_NOP,
NIX_LSOALG_ADD_SEGNUM,
@@ -823,4 +850,30 @@ enum nix_tx_vtag_op {
#define VTAG_STRIP BIT_ULL(4)
#define VTAG_CAPTURE BIT_ULL(5)
+/* NIX TX stats */
+enum nix_stat_lf_tx {
+ TX_UCAST = 0x0,
+ TX_BCAST = 0x1,
+ TX_MCAST = 0x2,
+ TX_DROP = 0x3,
+ TX_OCTS = 0x4,
+ TX_STATS_ENUM_LAST,
+};
+
+/* NIX RX stats */
+enum nix_stat_lf_rx {
+ RX_OCTS = 0x0,
+ RX_UCAST = 0x1,
+ RX_BCAST = 0x2,
+ RX_MCAST = 0x3,
+ RX_DROP = 0x4,
+ RX_DROP_OCTS = 0x5,
+ RX_FCS = 0x6,
+ RX_ERR = 0x7,
+ RX_DRP_BCAST = 0x8,
+ RX_DRP_MCAST = 0x9,
+ RX_DRP_L3BCAST = 0xa,
+ RX_DRP_L3MCAST = 0xb,
+ RX_STATS_ENUM_LAST,
+};
#endif /* RVU_STRUCT_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
index 854045ed3b06..49ce38685a7e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
@@ -8,7 +8,7 @@
#include <linux/bitfield.h>
#include "rvu.h"
-static void rvu_switch_enable_lbk_link(struct rvu *rvu, u16 pcifunc, bool enable)
+void rvu_switch_enable_lbk_link(struct rvu *rvu, u16 pcifunc, bool enable)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct nix_hw *nix_hw;
@@ -93,7 +93,7 @@ static int rvu_switch_install_rules(struct rvu *rvu)
if (!is_pf_cgxmapped(rvu, pf))
continue;
- pcifunc = pf << 10;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
/* rvu_get_nix_blkaddr sets up the corresponding NIX block
* address and NIX RX and TX interfaces for a pcifunc.
* Generally it is called during attach call of a pcifunc but it
@@ -126,7 +126,7 @@ static int rvu_switch_install_rules(struct rvu *rvu)
rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
for (vf = 0; vf < numvfs; vf++) {
- pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, (vf + 1));
rvu_get_nix_blkaddr(rvu, pcifunc);
err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
@@ -166,6 +166,8 @@ void rvu_switch_enable(struct rvu *rvu)
alloc_req.contig = true;
alloc_req.count = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs;
+ if (rvu->rep_mode)
+ alloc_req.count = alloc_req.count * 4;
ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
&alloc_rsp);
if (ret) {
@@ -189,7 +191,12 @@ void rvu_switch_enable(struct rvu *rvu)
rswitch->used_entries = alloc_rsp.count;
rswitch->start_entry = alloc_rsp.entry;
- ret = rvu_switch_install_rules(rvu);
+ if (rvu->rep_mode) {
+ rvu_rep_pf_init(rvu);
+ ret = rvu_rep_install_mcam_rules(rvu);
+ } else {
+ ret = rvu_switch_install_rules(rvu);
+ }
if (ret)
goto uninstall_rules;
@@ -222,11 +229,14 @@ void rvu_switch_disable(struct rvu *rvu)
if (!rswitch->used_entries)
return;
+ if (rvu->rep_mode)
+ goto free_ents;
+
for (pf = 1; pf < hw->total_pfs; pf++) {
if (!is_pf_cgxmapped(rvu, pf))
continue;
- pcifunc = pf << 10;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
if (err)
dev_err(rvu->dev,
@@ -238,7 +248,7 @@ void rvu_switch_disable(struct rvu *rvu)
rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
for (vf = 0; vf < numvfs; vf++) {
- pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, (vf + 1));
err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
if (err)
dev_err(rvu->dev,
@@ -249,6 +259,7 @@ void rvu_switch_disable(struct rvu *rvu)
}
}
+free_ents:
uninstall_req.start = rswitch->start_entry;
uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
free_req.all = 1;
@@ -258,12 +269,15 @@ void rvu_switch_disable(struct rvu *rvu)
kfree(rswitch->entry2pcifunc);
}
-void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc)
+void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc, bool ena)
{
struct rvu_switch *rswitch = &rvu->rswitch;
u32 max = rswitch->used_entries;
u16 entry;
+ if (rvu->rep_mode)
+ return rvu_rep_update_rules(rvu, pcifunc, ena);
+
if (!rswitch->used_entries)
return;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
index 775fd4c35794..19e0d16b12f6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
@@ -11,3 +11,5 @@
EXPORT_TRACEPOINT_SYMBOL(otx2_msg_alloc);
EXPORT_TRACEPOINT_SYMBOL(otx2_msg_interrupt);
EXPORT_TRACEPOINT_SYMBOL(otx2_msg_process);
+EXPORT_TRACEPOINT_SYMBOL(otx2_msg_status);
+EXPORT_TRACEPOINT_SYMBOL(otx2_parse_dump);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
index 5704520f9b02..4cd0fc4b0d20 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
@@ -18,33 +18,42 @@
#include "mbox.h"
TRACE_EVENT(otx2_msg_alloc,
- TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size),
- TP_ARGS(pdev, id, size),
+ TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size, u16 pcifunc),
+ TP_ARGS(pdev, id, size, pcifunc),
TP_STRUCT__entry(__string(dev, pci_name(pdev))
__field(u16, id)
__field(u64, size)
+ __field(u16, pcifunc)
),
TP_fast_assign(__assign_str(dev);
__entry->id = id;
__entry->size = size;
+ __entry->pcifunc = pcifunc;
),
- TP_printk("[%s] msg:(%s) size:%lld\n", __get_str(dev),
- otx2_mbox_id2name(__entry->id), __entry->size)
+ TP_printk("[%s] msg:(%s) size:%lld pcifunc:0x%x\n", __get_str(dev),
+ otx2_mbox_id2name(__entry->id), __entry->size,
+ __entry->pcifunc)
);
TRACE_EVENT(otx2_msg_send,
- TP_PROTO(const struct pci_dev *pdev, u16 num_msgs, u64 msg_size),
- TP_ARGS(pdev, num_msgs, msg_size),
+ TP_PROTO(const struct pci_dev *pdev, u16 num_msgs, u64 msg_size,
+ u16 id, u16 pcifunc),
+ TP_ARGS(pdev, num_msgs, msg_size, id, pcifunc),
TP_STRUCT__entry(__string(dev, pci_name(pdev))
__field(u16, num_msgs)
__field(u64, msg_size)
+ __field(u16, id)
+ __field(u16, pcifunc)
),
TP_fast_assign(__assign_str(dev);
__entry->num_msgs = num_msgs;
__entry->msg_size = msg_size;
+ __entry->id = id;
+ __entry->pcifunc = pcifunc;
),
- TP_printk("[%s] sent %d msg(s) of size:%lld\n", __get_str(dev),
- __entry->num_msgs, __entry->msg_size)
+ TP_printk("[%s] sent %d msg(s) of size:%lld msg:(%s) pcifunc:0x%x\n",
+ __get_str(dev), __entry->num_msgs, __entry->msg_size,
+ otx2_mbox_id2name(__entry->id), __entry->pcifunc)
);
TRACE_EVENT(otx2_msg_check,
@@ -81,18 +90,73 @@ TRACE_EVENT(otx2_msg_interrupt,
);
TRACE_EVENT(otx2_msg_process,
- TP_PROTO(const struct pci_dev *pdev, u16 id, int err),
- TP_ARGS(pdev, id, err),
+ TP_PROTO(const struct pci_dev *pdev, u16 id, int err, u16 pcifunc),
+ TP_ARGS(pdev, id, err, pcifunc),
TP_STRUCT__entry(__string(dev, pci_name(pdev))
__field(u16, id)
__field(int, err)
+ __field(u16, pcifunc)
),
TP_fast_assign(__assign_str(dev);
__entry->id = id;
__entry->err = err;
+ __entry->pcifunc = pcifunc;
+ ),
+ TP_printk("[%s] msg:(%s) error:%d pcifunc:0x%x\n", __get_str(dev),
+ otx2_mbox_id2name(__entry->id),
+ __entry->err, __entry->pcifunc)
+);
+
+TRACE_EVENT(otx2_msg_wait_rsp,
+ TP_PROTO(const struct pci_dev *pdev),
+ TP_ARGS(pdev),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ ),
+ TP_fast_assign(__assign_str(dev)
+ ),
+ TP_printk("[%s] timed out while waiting for response\n",
+ __get_str(dev))
+);
+
+TRACE_EVENT(otx2_msg_status,
+ TP_PROTO(const struct pci_dev *pdev, const char *msg, u16 num_msgs),
+ TP_ARGS(pdev, msg, num_msgs),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __string(str, msg)
+ __field(u16, num_msgs)
+ ),
+ TP_fast_assign(__assign_str(dev);
+ __assign_str(str);
+ __entry->num_msgs = num_msgs;
+ ),
+ TP_printk("[%s] %s num_msgs:%d\n", __get_str(dev),
+ __get_str(str), __entry->num_msgs)
+);
+
+TRACE_EVENT(otx2_parse_dump,
+ TP_PROTO(const struct pci_dev *pdev, char *msg, u64 *word),
+ TP_ARGS(pdev, msg, word),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __string(str, msg)
+ __field(u64, w0)
+ __field(u64, w1)
+ __field(u64, w2)
+ __field(u64, w3)
+ __field(u64, w4)
+ __field(u64, w5)
+ ),
+ TP_fast_assign(__assign_str(dev);
+ __assign_str(str);
+ __entry->w0 = *(word + 0);
+ __entry->w1 = *(word + 1);
+ __entry->w2 = *(word + 2);
+ __entry->w3 = *(word + 3);
+ __entry->w4 = *(word + 4);
+ __entry->w5 = *(word + 5);
),
- TP_printk("[%s] msg:(%s) error:%d\n", __get_str(dev),
- otx2_mbox_id2name(__entry->id), __entry->err)
+ TP_printk("[%s] nix parse %s W0:%#llx W1:%#llx W2:%#llx W3:%#llx W4:%#llx W5:%#llx\n",
+ __get_str(dev), __get_str(str), __entry->w0, __entry->w1, __entry->w2,
+ __entry->w3, __entry->w4, __entry->w5)
);
#endif /* __RVU_TRACE_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 64a97a0a10ed..883e9f4d601c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -5,13 +5,16 @@
obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o otx2_ptp.o
obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o
+obj-$(CONFIG_RVU_ESWITCH) += rvu_rep.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
- otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
- otx2_devlink.o qos_sq.o qos.o
+ otx2_flows.o otx2_tc.o cn10k.o cn20k.o otx2_dmac_flt.o \
+ otx2_devlink.o qos_sq.o qos.o otx2_xsk.o
rvu_nicvf-y := otx2_vf.o
+rvu_rep-y := rep.o
rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
+rvu_nicpf-$(CONFIG_XFRM_OFFLOAD) += cn10k_ipsec.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index 7417087b6db5..3e1bf22cba69 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -14,6 +14,9 @@ static struct dev_hw_ops otx2_hw_ops = {
.sqe_flush = otx2_sqe_flush,
.aura_freeptr = otx2_aura_freeptr,
.refill_pool_ptrs = otx2_refill_pool_ptrs,
+ .pfaf_mbox_intr_handler = otx2_pfaf_mbox_intr_handler,
+ .aura_aq_init = otx2_aura_aq_init,
+ .pool_aq_init = otx2_pool_aq_init,
};
static struct dev_hw_ops cn10k_hw_ops = {
@@ -21,8 +24,22 @@ static struct dev_hw_ops cn10k_hw_ops = {
.sqe_flush = cn10k_sqe_flush,
.aura_freeptr = cn10k_aura_freeptr,
.refill_pool_ptrs = cn10k_refill_pool_ptrs,
+ .pfaf_mbox_intr_handler = otx2_pfaf_mbox_intr_handler,
+ .aura_aq_init = otx2_aura_aq_init,
+ .pool_aq_init = otx2_pool_aq_init,
};
+void otx2_init_hw_ops(struct otx2_nic *pfvf)
+{
+ if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
+ pfvf->hw_ops = &otx2_hw_ops;
+ return;
+ }
+
+ pfvf->hw_ops = &cn10k_hw_ops;
+}
+EXPORT_SYMBOL(otx2_init_hw_ops);
+
int cn10k_lmtst_init(struct otx2_nic *pfvf)
{
@@ -30,12 +47,9 @@ int cn10k_lmtst_init(struct otx2_nic *pfvf)
struct otx2_lmt_info *lmt_info;
int err, cpu;
- if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
- pfvf->hw_ops = &otx2_hw_ops;
+ if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag))
return 0;
- }
- pfvf->hw_ops = &cn10k_hw_ops;
/* Total LMTLINES = num_online_cpus() * 32 (For Burst flush).*/
pfvf->tot_lmt_lines = (num_online_cpus() * LMT_BURST_SIZE);
pfvf->hw.lmt_info = alloc_percpu(struct otx2_lmt_info);
@@ -72,7 +86,7 @@ int cn10k_lmtst_init(struct otx2_nic *pfvf)
}
EXPORT_SYMBOL(cn10k_lmtst_init);
-int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
+int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura)
{
struct nix_cn10k_aq_enq_req *aq;
struct otx2_nic *pfvf = dev;
@@ -88,7 +102,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.ena = 1;
aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
- aq->sq.default_chan = pfvf->hw.tx_chan_base;
+ aq->sq.default_chan = pfvf->hw.tx_chan_base + chan_offset;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
aq->sq.sqb_aura = sqb_aura;
aq->sq.sq_int_ena = NIX_SQINT_BITS;
@@ -112,9 +126,12 @@ int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
struct otx2_nic *pfvf = dev;
int cnt = cq->pool_ptrs;
u64 ptrs[NPA_MAX_BURST];
+ struct otx2_pool *pool;
dma_addr_t bufptr;
int num_ptrs = 1;
+ pool = &pfvf->qset.pool[cq->cq_idx];
+
/* Refill pool with new buffers */
while (cq->pool_ptrs) {
if (otx2_alloc_buffer(pfvf, cq, &bufptr)) {
@@ -124,7 +141,9 @@ int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
break;
}
cq->pool_ptrs--;
- ptrs[num_ptrs] = (u64)bufptr + OTX2_HEAD_ROOM;
+ ptrs[num_ptrs] = pool->xsk_pool ?
+ (u64)bufptr : (u64)bufptr + OTX2_HEAD_ROOM;
+
num_ptrs++;
if (num_ptrs == NPA_MAX_BURST || cq->pool_ptrs == 0) {
__cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
@@ -322,6 +341,12 @@ int cn10k_map_unmap_rq_policer(struct otx2_nic *pfvf, int rq_idx,
aq->rq.band_prof_id = policer;
aq->rq_mask.band_prof_id = GENMASK(9, 0);
+ /* If policer id is greater than 1023 then it implies hardware supports
+ * more leaf profiles. In that case use band_prof_id_h for 4 MSBs.
+ */
+ aq->rq.band_prof_id_h = policer >> 10;
+ aq->rq_mask.band_prof_id_h = GENMASK(3, 0);
+
/* Fill AQ info */
aq->qidx = rq_idx;
aq->ctype = NIX_AQ_CTYPE_RQ;
@@ -352,9 +377,12 @@ int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf)
mutex_lock(&pfvf->mbox.lock);
/* Remove RQ's policer mapping */
- for (qidx = 0; qidx < hw->rx_queues; qidx++)
- cn10k_map_unmap_rq_policer(pfvf, qidx,
- hw->matchall_ipolicer, false);
+ for (qidx = 0; qidx < hw->rx_queues; qidx++) {
+ rc = cn10k_map_unmap_rq_policer(pfvf, qidx, hw->matchall_ipolicer, false);
+ if (rc)
+ dev_warn(pfvf->dev, "Failed to unmap RQ %d's policer (error %d).",
+ qidx, rc);
+ }
rc = cn10k_free_leaf_profile(pfvf, hw->matchall_ipolicer);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
index c1861f7de254..945ab10bd4ed 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
@@ -26,7 +26,7 @@ static inline int mtu_to_dwrr_weight(struct otx2_nic *pfvf, int mtu)
int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
-int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
+int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura);
int cn10k_lmtst_init(struct otx2_nic *pfvf);
int cn10k_free_all_ipolicers(struct otx2_nic *pfvf);
int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf);
@@ -39,4 +39,5 @@ int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf);
int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
u32 burst, u64 rate, bool pps);
int cn10k_free_leaf_profile(struct otx2_nic *pfvf, u16 leaf);
+void otx2_init_hw_ops(struct otx2_nic *pfvf);
#endif /* CN10K_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
new file mode 100644
index 000000000000..77543d472345
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
@@ -0,0 +1,1042 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell IPSEC offload driver
+ *
+ * Copyright (C) 2024 Marvell.
+ */
+
+#include <net/xfrm.h>
+#include <linux/netdevice.h>
+#include <linux/bitfield.h>
+#include <crypto/aead.h>
+#include <crypto/gcm.h>
+
+#include "otx2_common.h"
+#include "otx2_struct.h"
+#include "cn10k_ipsec.h"
+
+static bool is_dev_support_ipsec_offload(struct pci_dev *pdev)
+{
+ return is_dev_cn10ka_b0(pdev) || is_dev_cn10kb(pdev);
+}
+
+static bool cn10k_cpt_device_set_inuse(struct otx2_nic *pf)
+{
+ enum cn10k_cpt_hw_state_e state;
+
+ while (true) {
+ state = atomic_cmpxchg(&pf->ipsec.cpt_state,
+ CN10K_CPT_HW_AVAILABLE,
+ CN10K_CPT_HW_IN_USE);
+ if (state == CN10K_CPT_HW_AVAILABLE)
+ return true;
+ if (state == CN10K_CPT_HW_UNAVAILABLE)
+ return false;
+
+ mdelay(1);
+ }
+}
+
+static void cn10k_cpt_device_set_available(struct otx2_nic *pf)
+{
+ atomic_set(&pf->ipsec.cpt_state, CN10K_CPT_HW_AVAILABLE);
+}
+
+static void cn10k_cpt_device_set_unavailable(struct otx2_nic *pf)
+{
+ atomic_set(&pf->ipsec.cpt_state, CN10K_CPT_HW_UNAVAILABLE);
+}
+
+static int cn10k_outb_cptlf_attach(struct otx2_nic *pf)
+{
+ struct rsrc_attach *attach;
+ int ret = -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ /* Get memory to put this msg */
+ attach = otx2_mbox_alloc_msg_attach_resources(&pf->mbox);
+ if (!attach)
+ goto unlock;
+
+ attach->cptlfs = true;
+ attach->modify = true;
+
+ /* Send attach request to AF */
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+
+unlock:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static int cn10k_outb_cptlf_detach(struct otx2_nic *pf)
+{
+ struct rsrc_detach *detach;
+ int ret = -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ detach = otx2_mbox_alloc_msg_detach_resources(&pf->mbox);
+ if (!detach)
+ goto unlock;
+
+ detach->partial = true;
+ detach->cptlfs = true;
+
+ /* Send detach request to AF */
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+
+unlock:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static int cn10k_outb_cptlf_alloc(struct otx2_nic *pf)
+{
+ struct cpt_lf_alloc_req_msg *req;
+ int ret = -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cpt_lf_alloc(&pf->mbox);
+ if (!req)
+ goto unlock;
+
+ /* PF function */
+ req->nix_pf_func = pf->pcifunc;
+ /* Enable SE-IE Engine Group */
+ req->eng_grpmsk = 1 << CN10K_DEF_CPT_IPSEC_EGRP;
+
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+
+unlock:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static void cn10k_outb_cptlf_free(struct otx2_nic *pf)
+{
+ mutex_lock(&pf->mbox.lock);
+ otx2_mbox_alloc_msg_cpt_lf_free(&pf->mbox);
+ otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+}
+
+static int cn10k_outb_cptlf_config(struct otx2_nic *pf)
+{
+ struct cpt_inline_ipsec_cfg_msg *req;
+ int ret = -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cpt_inline_ipsec_cfg(&pf->mbox);
+ if (!req)
+ goto unlock;
+
+ req->dir = CPT_INLINE_OUTBOUND;
+ req->enable = 1;
+ req->nix_pf_func = pf->pcifunc;
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+unlock:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static void cn10k_outb_cptlf_iq_enable(struct otx2_nic *pf)
+{
+ u64 reg_val;
+
+ /* Set Execution Enable of instruction queue */
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ reg_val |= BIT_ULL(16);
+ otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val);
+
+ /* Set iqueue's enqueuing */
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_CTL);
+ reg_val |= BIT_ULL(0);
+ otx2_write64(pf, CN10K_CPT_LF_CTL, reg_val);
+}
+
+static void cn10k_outb_cptlf_iq_disable(struct otx2_nic *pf)
+{
+ u32 inflight, grb_cnt, gwb_cnt;
+ u32 nq_ptr, dq_ptr;
+ int timeout = 20;
+ u64 reg_val;
+ int cnt;
+
+ /* Disable instructions enqueuing */
+ otx2_write64(pf, CN10K_CPT_LF_CTL, 0ull);
+
+ /* Wait for instruction queue to become empty.
+ * CPT_LF_INPROG.INFLIGHT count is zero
+ */
+ do {
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val);
+ if (!inflight)
+ break;
+
+ usleep_range(10000, 20000);
+ if (timeout-- < 0) {
+ netdev_err(pf->netdev, "Timeout to cleanup CPT IQ\n");
+ break;
+ }
+ } while (1);
+
+ /* Disable executions in the LF's queue,
+ * the queue should be empty at this point
+ */
+ reg_val &= ~BIT_ULL(16);
+ otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val);
+
+ /* Wait for instruction queue to become empty */
+ cnt = 0;
+ do {
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ if (reg_val & BIT_ULL(31))
+ cnt = 0;
+ else
+ cnt++;
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_Q_GRP_PTR);
+ nq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val);
+ dq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val);
+ } while ((cnt < 10) && (nq_ptr != dq_ptr));
+
+ cnt = 0;
+ do {
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val);
+ grb_cnt = FIELD_GET(CPT_LF_INPROG_GRB_CNT, reg_val);
+ gwb_cnt = FIELD_GET(CPT_LF_INPROG_GWB_CNT, reg_val);
+ if (inflight == 0 && gwb_cnt < 40 &&
+ (grb_cnt == 0 || grb_cnt == 40))
+ cnt++;
+ else
+ cnt = 0;
+ } while (cnt < 10);
+}
+
+/* Allocate memory for CPT outbound Instruction queue.
+ * Instruction queue memory format is:
+ * -----------------------------
+ * | Instruction Group memory |
+ * | (CPT_LF_Q_SIZE[SIZE_DIV40] |
+ * | x 16 Bytes) |
+ * | |
+ * ----------------------------- <-- CPT_LF_Q_BASE[ADDR]
+ * | Flow Control (128 Bytes) |
+ * | |
+ * -----------------------------
+ * | Instruction Memory |
+ * | (CPT_LF_Q_SIZE[SIZE_DIV40] |
+ * | × 40 × 64 bytes) |
+ * | |
+ * -----------------------------
+ */
+static int cn10k_outb_cptlf_iq_alloc(struct otx2_nic *pf)
+{
+ struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq;
+
+ iq->size = CN10K_CPT_INST_QLEN_BYTES + CN10K_CPT_Q_FC_LEN +
+ CN10K_CPT_INST_GRP_QLEN_BYTES + OTX2_ALIGN;
+
+ iq->real_vaddr = dma_alloc_coherent(pf->dev, iq->size,
+ &iq->real_dma_addr, GFP_KERNEL);
+ if (!iq->real_vaddr)
+ return -ENOMEM;
+
+ /* iq->vaddr/dma_addr points to Flow Control location */
+ iq->vaddr = iq->real_vaddr + CN10K_CPT_INST_GRP_QLEN_BYTES;
+ iq->dma_addr = iq->real_dma_addr + CN10K_CPT_INST_GRP_QLEN_BYTES;
+
+ /* Align pointers */
+ iq->vaddr = PTR_ALIGN(iq->vaddr, OTX2_ALIGN);
+ iq->dma_addr = PTR_ALIGN(iq->dma_addr, OTX2_ALIGN);
+ return 0;
+}
+
+static void cn10k_outb_cptlf_iq_free(struct otx2_nic *pf)
+{
+ struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq;
+
+ if (iq->real_vaddr)
+ dma_free_coherent(pf->dev, iq->size, iq->real_vaddr,
+ iq->real_dma_addr);
+
+ iq->real_vaddr = NULL;
+ iq->vaddr = NULL;
+}
+
+static int cn10k_outb_cptlf_iq_init(struct otx2_nic *pf)
+{
+ u64 reg_val;
+ int ret;
+
+ /* Allocate Memory for CPT IQ */
+ ret = cn10k_outb_cptlf_iq_alloc(pf);
+ if (ret)
+ return ret;
+
+ /* Disable IQ */
+ cn10k_outb_cptlf_iq_disable(pf);
+
+ /* Set IQ base address */
+ otx2_write64(pf, CN10K_CPT_LF_Q_BASE, pf->ipsec.iq.dma_addr);
+
+ /* Set IQ size */
+ reg_val = FIELD_PREP(CPT_LF_Q_SIZE_DIV40, CN10K_CPT_SIZE_DIV40 +
+ CN10K_CPT_EXTRA_SIZE_DIV40);
+ otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, reg_val);
+
+ return 0;
+}
+
+static int cn10k_outb_cptlf_init(struct otx2_nic *pf)
+{
+ int ret;
+
+ /* Initialize CPTLF Instruction Queue (IQ) */
+ ret = cn10k_outb_cptlf_iq_init(pf);
+ if (ret)
+ return ret;
+
+ /* Configure CPTLF for outbound ipsec offload */
+ ret = cn10k_outb_cptlf_config(pf);
+ if (ret)
+ goto iq_clean;
+
+ /* Enable CPTLF IQ */
+ cn10k_outb_cptlf_iq_enable(pf);
+ return 0;
+iq_clean:
+ cn10k_outb_cptlf_iq_free(pf);
+ return ret;
+}
+
+static int cn10k_outb_cpt_init(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ int ret;
+
+ /* Attach a CPT LF for outbound ipsec offload */
+ ret = cn10k_outb_cptlf_attach(pf);
+ if (ret)
+ return ret;
+
+ /* Allocate a CPT LF for outbound ipsec offload */
+ ret = cn10k_outb_cptlf_alloc(pf);
+ if (ret)
+ goto detach;
+
+ /* Initialize the CPTLF for outbound ipsec offload */
+ ret = cn10k_outb_cptlf_init(pf);
+ if (ret)
+ goto lf_free;
+
+ pf->ipsec.io_addr = (__force u64)otx2_get_regaddr(pf,
+ CN10K_CPT_LF_NQX(0));
+
+ /* Set ipsec offload enabled for this device */
+ pf->flags |= OTX2_FLAG_IPSEC_OFFLOAD_ENABLED;
+
+ cn10k_cpt_device_set_available(pf);
+ return 0;
+
+lf_free:
+ cn10k_outb_cptlf_free(pf);
+detach:
+ cn10k_outb_cptlf_detach(pf);
+ return ret;
+}
+
+static int cn10k_outb_cpt_clean(struct otx2_nic *pf)
+{
+ int ret;
+
+ if (!cn10k_cpt_device_set_inuse(pf)) {
+ netdev_err(pf->netdev, "CPT LF device unavailable\n");
+ return -ENODEV;
+ }
+
+ /* Set ipsec offload disabled for this device */
+ pf->flags &= ~OTX2_FLAG_IPSEC_OFFLOAD_ENABLED;
+
+ /* Disable CPTLF Instruction Queue (IQ) */
+ cn10k_outb_cptlf_iq_disable(pf);
+
+ /* Set IQ base address and size to 0 */
+ otx2_write64(pf, CN10K_CPT_LF_Q_BASE, 0);
+ otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, 0);
+
+ /* Free CPTLF IQ */
+ cn10k_outb_cptlf_iq_free(pf);
+
+ /* Free and detach CPT LF */
+ cn10k_outb_cptlf_free(pf);
+ ret = cn10k_outb_cptlf_detach(pf);
+ if (ret)
+ netdev_err(pf->netdev, "Failed to detach CPT LF\n");
+
+ cn10k_cpt_device_set_unavailable(pf);
+ return ret;
+}
+
+static void cn10k_cpt_inst_flush(struct otx2_nic *pf, struct cpt_inst_s *inst,
+ u64 size)
+{
+ struct otx2_lmt_info *lmt_info;
+ u64 val = 0, tar_addr = 0;
+
+ lmt_info = per_cpu_ptr(pf->hw.lmt_info, smp_processor_id());
+ /* FIXME: val[0:10] LMT_ID.
+ * [12:15] no of LMTST - 1 in the burst.
+ * [19:63] data size of each LMTST in the burst except first.
+ */
+ val = (lmt_info->lmt_id & 0x7FF);
+ /* Target address for LMTST flush tells HW how many 128bit
+ * words are present.
+ * tar_addr[6:4] size of first LMTST - 1 in units of 128b.
+ */
+ tar_addr |= pf->ipsec.io_addr | (((size / 16) - 1) & 0x7) << 4;
+ dma_wmb();
+ memcpy((u64 *)lmt_info->lmt_addr, inst, size);
+ cn10k_lmt_flush(val, tar_addr);
+}
+
+static int cn10k_wait_for_cpt_respose(struct otx2_nic *pf,
+ struct cpt_res_s *res)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+ u64 *completion_ptr = (u64 *)res;
+
+ do {
+ if (time_after(jiffies, timeout)) {
+ netdev_err(pf->netdev, "CPT response timeout\n");
+ return -EBUSY;
+ }
+ } while ((READ_ONCE(*completion_ptr) & CN10K_CPT_COMP_E_MASK) ==
+ CN10K_CPT_COMP_E_NOTDONE);
+
+ if (!(res->compcode == CN10K_CPT_COMP_E_GOOD ||
+ res->compcode == CN10K_CPT_COMP_E_WARN) || res->uc_compcode) {
+ netdev_err(pf->netdev, "compcode=%x doneint=%x\n",
+ res->compcode, res->doneint);
+ netdev_err(pf->netdev, "uc_compcode=%x uc_info=%llx esn=%llx\n",
+ res->uc_compcode, (u64)res->uc_info, res->esn);
+ }
+ return 0;
+}
+
+static int cn10k_outb_write_sa(struct otx2_nic *pf, struct qmem *sa_info)
+{
+ dma_addr_t res_iova, dptr_iova, sa_iova;
+ struct cn10k_tx_sa_s *sa_dptr;
+ struct cpt_inst_s inst = {};
+ struct cpt_res_s *res;
+ u32 sa_size, off;
+ u64 *sptr, *dptr;
+ u64 reg_val;
+ int ret;
+
+ sa_iova = sa_info->iova;
+ if (!sa_iova)
+ return -EINVAL;
+
+ res = dma_alloc_coherent(pf->dev, sizeof(struct cpt_res_s),
+ &res_iova, GFP_ATOMIC);
+ if (!res)
+ return -ENOMEM;
+
+ sa_size = sizeof(struct cn10k_tx_sa_s);
+ sa_dptr = dma_alloc_coherent(pf->dev, sa_size, &dptr_iova, GFP_ATOMIC);
+ if (!sa_dptr) {
+ dma_free_coherent(pf->dev, sizeof(struct cpt_res_s), res,
+ res_iova);
+ return -ENOMEM;
+ }
+
+ sptr = (__force u64 *)sa_info->base;
+ dptr = (__force u64 *)sa_dptr;
+ for (off = 0; off < (sa_size / 8); off++)
+ *(dptr + off) = (__force u64)cpu_to_be64(*(sptr + off));
+
+ res->compcode = CN10K_CPT_COMP_E_NOTDONE;
+ inst.res_addr = res_iova;
+ inst.dptr = (u64)dptr_iova;
+ inst.param2 = sa_size >> 3;
+ inst.dlen = sa_size;
+ inst.opcode_major = CN10K_IPSEC_MAJOR_OP_WRITE_SA;
+ inst.opcode_minor = CN10K_IPSEC_MINOR_OP_WRITE_SA;
+ inst.cptr = sa_iova;
+ inst.ctx_val = 1;
+ inst.egrp = CN10K_DEF_CPT_IPSEC_EGRP;
+
+ /* Check if CPT-LF available */
+ if (!cn10k_cpt_device_set_inuse(pf)) {
+ ret = -ENODEV;
+ goto free_mem;
+ }
+
+ cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s));
+ dma_wmb();
+ ret = cn10k_wait_for_cpt_respose(pf, res);
+ if (ret)
+ goto set_available;
+
+ /* Trigger CTX flush to write dirty data back to DRAM */
+ reg_val = FIELD_PREP(CPT_LF_CTX_FLUSH_CPTR, sa_iova >> 7);
+ otx2_write64(pf, CN10K_CPT_LF_CTX_FLUSH, reg_val);
+
+set_available:
+ cn10k_cpt_device_set_available(pf);
+free_mem:
+ dma_free_coherent(pf->dev, sa_size, sa_dptr, dptr_iova);
+ dma_free_coherent(pf->dev, sizeof(struct cpt_res_s), res, res_iova);
+ return ret;
+}
+
+static int cn10k_ipsec_get_hw_ctx_offset(void)
+{
+ /* Offset on Hardware-context offset in word */
+ return (offsetof(struct cn10k_tx_sa_s, hw_ctx) / sizeof(u64)) & 0x7F;
+}
+
+static int cn10k_ipsec_get_ctx_push_size(void)
+{
+ /* Context push size is round up and in multiple of 8 Byte */
+ return (roundup(offsetof(struct cn10k_tx_sa_s, hw_ctx), 8) / 8) & 0x7F;
+}
+
+static int cn10k_ipsec_get_aes_key_len(int key_len)
+{
+ /* key_len is aes key length in bytes */
+ switch (key_len) {
+ case 16:
+ return CN10K_IPSEC_SA_AES_KEY_LEN_128;
+ case 24:
+ return CN10K_IPSEC_SA_AES_KEY_LEN_192;
+ default:
+ return CN10K_IPSEC_SA_AES_KEY_LEN_256;
+ }
+}
+
+static void cn10k_outb_prepare_sa(struct xfrm_state *x,
+ struct cn10k_tx_sa_s *sa_entry)
+{
+ int key_len = (x->aead->alg_key_len + 7) / 8;
+ struct net_device *netdev = x->xso.dev;
+ u8 *key = x->aead->alg_key;
+ struct otx2_nic *pf;
+ u32 *tmp_salt;
+ u64 *tmp_key;
+ int idx;
+
+ memset(sa_entry, 0, sizeof(struct cn10k_tx_sa_s));
+
+ /* context size, 128 Byte aligned up */
+ pf = netdev_priv(netdev);
+ sa_entry->ctx_size = (pf->ipsec.sa_size / OTX2_ALIGN) & 0xF;
+ sa_entry->hw_ctx_off = cn10k_ipsec_get_hw_ctx_offset();
+ sa_entry->ctx_push_size = cn10k_ipsec_get_ctx_push_size();
+
+ /* Ucode to skip two words of CPT_CTX_HW_S */
+ sa_entry->ctx_hdr_size = 1;
+
+ /* Allow Atomic operation (AOP) */
+ sa_entry->aop_valid = 1;
+
+ /* Outbound, ESP TRANSPORT/TUNNEL Mode, AES-GCM with */
+ sa_entry->sa_dir = CN10K_IPSEC_SA_DIR_OUTB;
+ sa_entry->ipsec_protocol = CN10K_IPSEC_SA_IPSEC_PROTO_ESP;
+ sa_entry->enc_type = CN10K_IPSEC_SA_ENCAP_TYPE_AES_GCM;
+ sa_entry->iv_src = CN10K_IPSEC_SA_IV_SRC_PACKET;
+ if (x->props.mode == XFRM_MODE_TUNNEL)
+ sa_entry->ipsec_mode = CN10K_IPSEC_SA_IPSEC_MODE_TUNNEL;
+ else
+ sa_entry->ipsec_mode = CN10K_IPSEC_SA_IPSEC_MODE_TRANSPORT;
+
+ /* Last 4 bytes are salt */
+ key_len -= 4;
+ sa_entry->aes_key_len = cn10k_ipsec_get_aes_key_len(key_len);
+ memcpy(sa_entry->cipher_key, key, key_len);
+ tmp_key = (u64 *)sa_entry->cipher_key;
+
+ for (idx = 0; idx < key_len / 8; idx++)
+ tmp_key[idx] = (__force u64)cpu_to_be64(tmp_key[idx]);
+
+ memcpy(&sa_entry->iv_gcm_salt, key + key_len, 4);
+ tmp_salt = (u32 *)&sa_entry->iv_gcm_salt;
+ *tmp_salt = (__force u32)cpu_to_be32(*tmp_salt);
+
+ /* Write SA context data to memory before enabling */
+ wmb();
+
+ /* Enable SA */
+ sa_entry->sa_valid = 1;
+}
+
+static int cn10k_ipsec_validate_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ if (x->props.aalgo != SADB_AALG_NONE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload authenticated xfrm states");
+ return -EINVAL;
+ }
+ if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only AES-GCM-ICV16 xfrm state may be offloaded");
+ return -EINVAL;
+ }
+ if (x->props.calgo != SADB_X_CALG_NONE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload compressed xfrm states");
+ return -EINVAL;
+ }
+ if (x->props.flags & XFRM_STATE_ESN) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload ESN xfrm states");
+ return -EINVAL;
+ }
+ if (x->props.family != AF_INET && x->props.family != AF_INET6) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only IPv4/v6 xfrm states may be offloaded");
+ return -EINVAL;
+ }
+ if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload other than crypto-mode");
+ return -EINVAL;
+ }
+ if (x->props.mode != XFRM_MODE_TRANSPORT &&
+ x->props.mode != XFRM_MODE_TUNNEL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only tunnel/transport xfrm states may be offloaded");
+ return -EINVAL;
+ }
+ if (x->id.proto != IPPROTO_ESP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only ESP xfrm state may be offloaded");
+ return -EINVAL;
+ }
+ if (x->encap) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Encapsulated xfrm state may not be offloaded");
+ return -EINVAL;
+ }
+ if (!x->aead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states without aead");
+ return -EINVAL;
+ }
+
+ if (x->aead->alg_icv_len != 128) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states with AEAD ICV length other than 128bit");
+ return -EINVAL;
+ }
+ if (x->aead->alg_key_len != 128 + 32 &&
+ x->aead->alg_key_len != 192 + 32 &&
+ x->aead->alg_key_len != 256 + 32) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states with AEAD key length other than 128/192/256bit");
+ return -EINVAL;
+ }
+ if (x->tfcpad) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states with tfc padding");
+ return -EINVAL;
+ }
+ if (!x->geniv) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states without geniv");
+ return -EINVAL;
+ }
+ if (strcmp(x->geniv, "seqiv")) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states with geniv other than seqiv");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int cn10k_ipsec_inb_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "xfrm inbound offload not supported");
+ return -EOPNOTSUPP;
+}
+
+static int cn10k_ipsec_outb_add_state(struct net_device *dev,
+ struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ struct cn10k_tx_sa_s *sa_entry;
+ struct qmem *sa_info;
+ struct otx2_nic *pf;
+ int err;
+
+ err = cn10k_ipsec_validate_state(x, extack);
+ if (err)
+ return err;
+
+ pf = netdev_priv(dev);
+
+ err = qmem_alloc(pf->dev, &sa_info, pf->ipsec.sa_size, OTX2_ALIGN);
+ if (err)
+ return err;
+
+ sa_entry = (struct cn10k_tx_sa_s *)sa_info->base;
+ cn10k_outb_prepare_sa(x, sa_entry);
+
+ err = cn10k_outb_write_sa(pf, sa_info);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error writing outbound SA");
+ qmem_free(pf->dev, sa_info);
+ return err;
+ }
+
+ x->xso.offload_handle = (unsigned long)sa_info;
+ /* Enable static branch when first SA setup */
+ if (!pf->ipsec.outb_sa_count)
+ static_branch_enable(&cn10k_ipsec_sa_enabled);
+ pf->ipsec.outb_sa_count++;
+ return 0;
+}
+
+static int cn10k_ipsec_add_state(struct net_device *dev,
+ struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
+ return cn10k_ipsec_inb_add_state(x, extack);
+ else
+ return cn10k_ipsec_outb_add_state(dev, x, extack);
+}
+
+static void cn10k_ipsec_del_state(struct net_device *dev, struct xfrm_state *x)
+{
+ struct cn10k_tx_sa_s *sa_entry;
+ struct qmem *sa_info;
+ struct otx2_nic *pf;
+ int err;
+
+ if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
+ return;
+
+ pf = netdev_priv(dev);
+
+ sa_info = (struct qmem *)x->xso.offload_handle;
+ sa_entry = (struct cn10k_tx_sa_s *)sa_info->base;
+ memset(sa_entry, 0, sizeof(struct cn10k_tx_sa_s));
+ /* Disable SA in CPT h/w */
+ sa_entry->ctx_push_size = cn10k_ipsec_get_ctx_push_size();
+ sa_entry->ctx_size = (pf->ipsec.sa_size / OTX2_ALIGN) & 0xF;
+ sa_entry->aop_valid = 1;
+
+ err = cn10k_outb_write_sa(pf, sa_info);
+ if (err)
+ netdev_err(dev, "Error (%d) deleting SA\n", err);
+
+ x->xso.offload_handle = 0;
+ qmem_free(pf->dev, sa_info);
+
+ /* If no more SA's then update netdev feature for potential change
+ * in NETIF_F_HW_ESP.
+ */
+ if (!--pf->ipsec.outb_sa_count)
+ queue_work(pf->ipsec.sa_workq, &pf->ipsec.sa_work);
+}
+
+static const struct xfrmdev_ops cn10k_ipsec_xfrmdev_ops = {
+ .xdo_dev_state_add = cn10k_ipsec_add_state,
+ .xdo_dev_state_delete = cn10k_ipsec_del_state,
+};
+
+static void cn10k_ipsec_sa_wq_handler(struct work_struct *work)
+{
+ struct cn10k_ipsec *ipsec = container_of(work, struct cn10k_ipsec,
+ sa_work);
+ struct otx2_nic *pf = container_of(ipsec, struct otx2_nic, ipsec);
+
+ /* Disable static branch when no more SA enabled */
+ static_branch_disable(&cn10k_ipsec_sa_enabled);
+ rtnl_lock();
+ netdev_update_features(pf->netdev);
+ rtnl_unlock();
+}
+
+int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+
+ /* IPsec offload supported on cn10k */
+ if (!is_dev_support_ipsec_offload(pf->pdev))
+ return -EOPNOTSUPP;
+
+ /* Initialize CPT for outbound ipsec offload */
+ if (enable)
+ return cn10k_outb_cpt_init(netdev);
+
+ /* Don't do CPT cleanup if SA installed */
+ if (pf->ipsec.outb_sa_count) {
+ netdev_err(pf->netdev, "SA installed on this device\n");
+ return -EBUSY;
+ }
+
+ return cn10k_outb_cpt_clean(pf);
+}
+
+int cn10k_ipsec_init(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ u32 sa_size;
+
+ if (!is_dev_support_ipsec_offload(pf->pdev))
+ return 0;
+
+ /* Each SA entry size is 128 Byte round up in size */
+ sa_size = sizeof(struct cn10k_tx_sa_s) % OTX2_ALIGN ?
+ (sizeof(struct cn10k_tx_sa_s) / OTX2_ALIGN + 1) *
+ OTX2_ALIGN : sizeof(struct cn10k_tx_sa_s);
+ pf->ipsec.sa_size = sa_size;
+
+ INIT_WORK(&pf->ipsec.sa_work, cn10k_ipsec_sa_wq_handler);
+ pf->ipsec.sa_workq = alloc_workqueue("cn10k_ipsec_sa_workq",
+ WQ_PERCPU, 0);
+ if (!pf->ipsec.sa_workq) {
+ netdev_err(pf->netdev, "SA alloc workqueue failed\n");
+ return -ENOMEM;
+ }
+
+ /* Set xfrm device ops */
+ netdev->xfrmdev_ops = &cn10k_ipsec_xfrmdev_ops;
+ netdev->hw_features |= NETIF_F_HW_ESP;
+ netdev->hw_enc_features |= NETIF_F_HW_ESP;
+
+ cn10k_cpt_device_set_unavailable(pf);
+ return 0;
+}
+EXPORT_SYMBOL(cn10k_ipsec_init);
+
+void cn10k_ipsec_clean(struct otx2_nic *pf)
+{
+ if (!is_dev_support_ipsec_offload(pf->pdev))
+ return;
+
+ if (!(pf->flags & OTX2_FLAG_IPSEC_OFFLOAD_ENABLED))
+ return;
+
+ if (pf->ipsec.sa_workq) {
+ destroy_workqueue(pf->ipsec.sa_workq);
+ pf->ipsec.sa_workq = NULL;
+ }
+
+ cn10k_outb_cpt_clean(pf);
+}
+EXPORT_SYMBOL(cn10k_ipsec_clean);
+
+static u16 cn10k_ipsec_get_ip_data_len(struct xfrm_state *x,
+ struct sk_buff *skb)
+{
+ struct ipv6hdr *ipv6h;
+ struct iphdr *iph;
+ u8 *src;
+
+ src = (u8 *)skb->data + ETH_HLEN;
+
+ if (x->props.family == AF_INET) {
+ iph = (struct iphdr *)src;
+ return ntohs(iph->tot_len);
+ }
+
+ ipv6h = (struct ipv6hdr *)src;
+ return ntohs(ipv6h->payload_len) + sizeof(struct ipv6hdr);
+}
+
+/* Prepare CPT and NIX SQE scatter/gather subdescriptor structure.
+ * SG of NIX and CPT are same in size.
+ * Layout of a NIX SQE and CPT SG entry:
+ * -----------------------------
+ * | CPT Scatter Gather |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ * | NIX SQE |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ */
+bool otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int num_segs, int *offset)
+{
+ struct cpt_sg_s *cpt_sg = NULL;
+ struct nix_sqe_sg_s *sg = NULL;
+ u64 dma_addr, *iova = NULL;
+ u64 *cpt_iova = NULL;
+ u16 *sg_lens = NULL;
+ int seg, len;
+
+ sq->sg[sq->head].num_segs = 0;
+ cpt_sg = (struct cpt_sg_s *)(sq->sqe_base - sq->sqe_size);
+
+ for (seg = 0; seg < num_segs; seg++) {
+ if ((seg % MAX_SEGS_PER_SG) == 0) {
+ sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
+ sg->ld_type = NIX_SEND_LDTYPE_LDD;
+ sg->subdc = NIX_SUBDC_SG;
+ sg->segs = 0;
+ sg_lens = (void *)sg;
+ iova = (void *)sg + sizeof(*sg);
+ /* Next subdc always starts at a 16byte boundary.
+ * So if sg->segs is whether 2 or 3, offset += 16bytes.
+ */
+ if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
+ *offset += sizeof(*sg) + (3 * sizeof(u64));
+ else
+ *offset += sizeof(*sg) + sizeof(u64);
+
+ cpt_sg += (seg / MAX_SEGS_PER_SG) * 4;
+ cpt_iova = (void *)cpt_sg + sizeof(*cpt_sg);
+ }
+ dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
+ if (dma_mapping_error(pfvf->dev, dma_addr))
+ return false;
+
+ sg_lens[seg % MAX_SEGS_PER_SG] = len;
+ sg->segs++;
+ *iova++ = dma_addr;
+ *cpt_iova++ = dma_addr;
+
+ /* Save DMA mapping info for later unmapping */
+ sq->sg[sq->head].dma_addr[seg] = dma_addr;
+ sq->sg[sq->head].size[seg] = len;
+ sq->sg[sq->head].num_segs++;
+
+ *cpt_sg = *(struct cpt_sg_s *)sg;
+ cpt_sg->rsvd_63_50 = 0;
+ }
+
+ sq->sg[sq->head].skb = (u64)skb;
+ return true;
+}
+
+static u16 cn10k_ipsec_get_param1(u8 iv_offset)
+{
+ u16 param1_val;
+
+ /* Set Crypto mode, disable L3/L4 checksum */
+ param1_val = CN10K_IPSEC_INST_PARAM1_DIS_L4_CSUM |
+ CN10K_IPSEC_INST_PARAM1_DIS_L3_CSUM;
+ param1_val |= (u16)iv_offset << CN10K_IPSEC_INST_PARAM1_IV_OFFSET_SHIFT;
+ return param1_val;
+}
+
+bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq, struct sk_buff *skb,
+ int num_segs, int size)
+{
+ struct cpt_inst_s inst;
+ struct cpt_res_s *res;
+ struct xfrm_state *x;
+ struct qmem *sa_info;
+ dma_addr_t dptr_iova;
+ struct sec_path *sp;
+ u8 encap_offset;
+ u8 auth_offset;
+ u8 gthr_size;
+ u8 iv_offset;
+ u16 dlen;
+
+ /* Check for IPSEC offload enabled */
+ if (!(pf->flags & OTX2_FLAG_IPSEC_OFFLOAD_ENABLED))
+ goto drop;
+
+ sp = skb_sec_path(skb);
+ if (unlikely(!sp->len))
+ goto drop;
+
+ x = xfrm_input_state(skb);
+ if (unlikely(!x))
+ goto drop;
+
+ if (x->props.mode != XFRM_MODE_TRANSPORT &&
+ x->props.mode != XFRM_MODE_TUNNEL)
+ goto drop;
+
+ dlen = cn10k_ipsec_get_ip_data_len(x, skb);
+ if (dlen == 0 && netif_msg_tx_err(pf)) {
+ netdev_err(pf->netdev, "Invalid IP header, ip-length zero\n");
+ goto drop;
+ }
+
+ /* Check for valid SA context */
+ sa_info = (struct qmem *)x->xso.offload_handle;
+ if (!sa_info)
+ goto drop;
+
+ memset(&inst, 0, sizeof(struct cpt_inst_s));
+
+ /* Get authentication offset */
+ if (x->props.family == AF_INET)
+ auth_offset = sizeof(struct iphdr);
+ else
+ auth_offset = sizeof(struct ipv6hdr);
+
+ /* IV offset is after ESP header */
+ iv_offset = auth_offset + sizeof(struct ip_esp_hdr);
+ /* Encap will start after IV */
+ encap_offset = iv_offset + GCM_RFC4106_IV_SIZE;
+
+ /* CPT Instruction word-1 */
+ res = (struct cpt_res_s *)(sq->cpt_resp->base + (64 * sq->head));
+ res->compcode = 0;
+ inst.res_addr = sq->cpt_resp->iova + (64 * sq->head);
+
+ /* CPT Instruction word-2 */
+ inst.rvu_pf_func = pf->pcifunc;
+
+ /* CPT Instruction word-3:
+ * Set QORD to force CPT_RES_S write completion
+ */
+ inst.qord = 1;
+
+ /* CPT Instruction word-4 */
+ /* inst.dlen should not include ICV length */
+ inst.dlen = dlen + ETH_HLEN - (x->aead->alg_icv_len / 8);
+ inst.opcode_major = CN10K_IPSEC_MAJOR_OP_OUTB_IPSEC;
+ inst.param1 = cn10k_ipsec_get_param1(iv_offset);
+
+ inst.param2 = encap_offset <<
+ CN10K_IPSEC_INST_PARAM2_ENC_DATA_OFFSET_SHIFT;
+ inst.param2 |= (u16)auth_offset <<
+ CN10K_IPSEC_INST_PARAM2_AUTH_DATA_OFFSET_SHIFT;
+
+ /* CPT Instruction word-5 */
+ gthr_size = num_segs / MAX_SEGS_PER_SG;
+ gthr_size = (num_segs % MAX_SEGS_PER_SG) ? gthr_size + 1 : gthr_size;
+
+ gthr_size &= 0xF;
+ dptr_iova = (sq->sqe_ring->iova + (sq->head * (sq->sqe_size * 2)));
+ inst.dptr = dptr_iova | ((u64)gthr_size << 60);
+
+ /* CPT Instruction word-6 */
+ inst.rptr = inst.dptr;
+
+ /* CPT Instruction word-7 */
+ inst.cptr = sa_info->iova;
+ inst.ctx_val = 1;
+ inst.egrp = CN10K_DEF_CPT_IPSEC_EGRP;
+
+ /* CPT Instruction word-0 */
+ inst.nixtxl = (size / 16) - 1;
+ inst.dat_offset = ETH_HLEN;
+ inst.nixtx_offset = sq->sqe_size;
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ /* Finally Flush the CPT instruction */
+ sq->head++;
+ sq->head &= (sq->sqe_cnt - 1);
+ cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s));
+ return true;
+drop:
+ dev_kfree_skb_any(skb);
+ return false;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
new file mode 100644
index 000000000000..43fbce0d6039
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell IPSEC offload driver
+ *
+ * Copyright (C) 2024 Marvell.
+ */
+
+#ifndef CN10K_IPSEC_H
+#define CN10K_IPSEC_H
+
+#include <linux/types.h>
+
+DECLARE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled);
+
+/* CPT instruction size in bytes */
+#define CN10K_CPT_INST_SIZE 64
+
+/* CPT instruction (CPT_INST_S) queue length */
+#define CN10K_CPT_INST_QLEN 8200
+
+/* CPT instruction queue size passed to HW is in units of
+ * 40*CPT_INST_S messages.
+ */
+#define CN10K_CPT_SIZE_DIV40 (CN10K_CPT_INST_QLEN / 40)
+
+/* CPT needs 320 free entries */
+#define CN10K_CPT_INST_QLEN_EXTRA_BYTES (320 * CN10K_CPT_INST_SIZE)
+#define CN10K_CPT_EXTRA_SIZE_DIV40 (320 / 40)
+
+/* CPT instruction queue length in bytes */
+#define CN10K_CPT_INST_QLEN_BYTES \
+ ((CN10K_CPT_SIZE_DIV40 * 40 * CN10K_CPT_INST_SIZE) + \
+ CN10K_CPT_INST_QLEN_EXTRA_BYTES)
+
+/* CPT instruction group queue length in bytes */
+#define CN10K_CPT_INST_GRP_QLEN_BYTES \
+ ((CN10K_CPT_SIZE_DIV40 + CN10K_CPT_EXTRA_SIZE_DIV40) * 16)
+
+/* CPT FC length in bytes */
+#define CN10K_CPT_Q_FC_LEN 128
+
+/* Default CPT engine group for ipsec offload */
+#define CN10K_DEF_CPT_IPSEC_EGRP 1
+
+/* CN10K CPT LF registers */
+#define CPT_LFBASE (BLKTYPE_CPT << RVU_FUNC_BLKADDR_SHIFT)
+#define CN10K_CPT_LF_CTL (CPT_LFBASE | 0x10)
+#define CN10K_CPT_LF_INPROG (CPT_LFBASE | 0x40)
+#define CN10K_CPT_LF_Q_BASE (CPT_LFBASE | 0xf0)
+#define CN10K_CPT_LF_Q_SIZE (CPT_LFBASE | 0x100)
+#define CN10K_CPT_LF_Q_INST_PTR (CPT_LFBASE | 0x110)
+#define CN10K_CPT_LF_Q_GRP_PTR (CPT_LFBASE | 0x120)
+#define CN10K_CPT_LF_NQX(a) (CPT_LFBASE | 0x400 | (a) << 3)
+#define CN10K_CPT_LF_CTX_FLUSH (CPT_LFBASE | 0x510)
+
+/* IPSEC Instruction opcodes */
+#define CN10K_IPSEC_MAJOR_OP_WRITE_SA 0x01UL
+#define CN10K_IPSEC_MINOR_OP_WRITE_SA 0x09UL
+#define CN10K_IPSEC_MAJOR_OP_OUTB_IPSEC 0x2AUL
+
+enum cn10k_cpt_comp_e {
+ CN10K_CPT_COMP_E_NOTDONE = 0x00,
+ CN10K_CPT_COMP_E_GOOD = 0x01,
+ CN10K_CPT_COMP_E_FAULT = 0x02,
+ CN10K_CPT_COMP_E_HWERR = 0x04,
+ CN10K_CPT_COMP_E_INSTERR = 0x05,
+ CN10K_CPT_COMP_E_WARN = 0x06,
+ CN10K_CPT_COMP_E_MASK = 0x3F
+};
+
+struct cn10k_cpt_inst_queue {
+ u8 *vaddr;
+ u8 *real_vaddr;
+ dma_addr_t dma_addr;
+ dma_addr_t real_dma_addr;
+ u32 size;
+};
+
+enum cn10k_cpt_hw_state_e {
+ CN10K_CPT_HW_UNAVAILABLE,
+ CN10K_CPT_HW_AVAILABLE,
+ CN10K_CPT_HW_IN_USE
+};
+
+struct cn10k_ipsec {
+ /* Outbound CPT */
+ u64 io_addr;
+ atomic_t cpt_state;
+ struct cn10k_cpt_inst_queue iq;
+
+ /* SA info */
+ u32 sa_size;
+ u32 outb_sa_count;
+ struct work_struct sa_work;
+ struct workqueue_struct *sa_workq;
+};
+
+/* CN10K IPSEC Security Association (SA) */
+/* SA direction */
+#define CN10K_IPSEC_SA_DIR_INB 0
+#define CN10K_IPSEC_SA_DIR_OUTB 1
+/* SA protocol */
+#define CN10K_IPSEC_SA_IPSEC_PROTO_AH 0
+#define CN10K_IPSEC_SA_IPSEC_PROTO_ESP 1
+/* SA Encryption Type */
+#define CN10K_IPSEC_SA_ENCAP_TYPE_AES_GCM 5
+/* SA IPSEC mode Transport/Tunnel */
+#define CN10K_IPSEC_SA_IPSEC_MODE_TRANSPORT 0
+#define CN10K_IPSEC_SA_IPSEC_MODE_TUNNEL 1
+/* SA AES Key Length */
+#define CN10K_IPSEC_SA_AES_KEY_LEN_128 1
+#define CN10K_IPSEC_SA_AES_KEY_LEN_192 2
+#define CN10K_IPSEC_SA_AES_KEY_LEN_256 3
+/* IV Source */
+#define CN10K_IPSEC_SA_IV_SRC_COUNTER 0
+#define CN10K_IPSEC_SA_IV_SRC_PACKET 3
+
+struct cn10k_tx_sa_s {
+ u64 esn_en : 1; /* W0 */
+ u64 rsvd_w0_1_8 : 8;
+ u64 hw_ctx_off : 7;
+ u64 ctx_id : 16;
+ u64 rsvd_w0_32_47 : 16;
+ u64 ctx_push_size : 7;
+ u64 rsvd_w0_55 : 1;
+ u64 ctx_hdr_size : 2;
+ u64 aop_valid : 1;
+ u64 rsvd_w0_59 : 1;
+ u64 ctx_size : 4;
+ u64 w1; /* W1 */
+ u64 sa_valid : 1; /* W2 */
+ u64 sa_dir : 1;
+ u64 rsvd_w2_2_3 : 2;
+ u64 ipsec_mode : 1;
+ u64 ipsec_protocol : 1;
+ u64 aes_key_len : 2;
+ u64 enc_type : 3;
+ u64 rsvd_w2_11_19 : 9;
+ u64 iv_src : 2;
+ u64 rsvd_w2_22_31 : 10;
+ u64 rsvd_w2_32_63 : 32;
+ u64 w3; /* W3 */
+ u8 cipher_key[32]; /* W4 - W7 */
+ u32 rsvd_w8_0_31; /* W8 : IV */
+ u32 iv_gcm_salt;
+ u64 rsvd_w9_w30[22]; /* W9 - W30 */
+ u64 hw_ctx[6]; /* W31 - W36 */
+};
+
+/* CPT instruction parameter-1 */
+#define CN10K_IPSEC_INST_PARAM1_DIS_L4_CSUM 0x1
+#define CN10K_IPSEC_INST_PARAM1_DIS_L3_CSUM 0x2
+#define CN10K_IPSEC_INST_PARAM1_CRYPTO_MODE 0x20
+#define CN10K_IPSEC_INST_PARAM1_IV_OFFSET_SHIFT 8
+
+/* CPT instruction parameter-2 */
+#define CN10K_IPSEC_INST_PARAM2_ENC_DATA_OFFSET_SHIFT 0
+#define CN10K_IPSEC_INST_PARAM2_AUTH_DATA_OFFSET_SHIFT 8
+
+/* CPT Instruction Structure */
+struct cpt_inst_s {
+ u64 nixtxl : 3; /* W0 */
+ u64 doneint : 1;
+ u64 rsvd_w0_4_15 : 12;
+ u64 dat_offset : 8;
+ u64 ext_param1 : 8;
+ u64 nixtx_offset : 20;
+ u64 rsvd_w0_52_63 : 12;
+ u64 res_addr; /* W1 */
+ u64 tag : 32; /* W2 */
+ u64 tt : 2;
+ u64 grp : 10;
+ u64 rsvd_w2_44_47 : 4;
+ u64 rvu_pf_func : 16;
+ u64 qord : 1; /* W3 */
+ u64 rsvd_w3_1_2 : 2;
+ u64 wqe_ptr : 61;
+ u64 dlen : 16; /* W4 */
+ u64 param2 : 16;
+ u64 param1 : 16;
+ u64 opcode_major : 8;
+ u64 opcode_minor : 8;
+ u64 dptr; /* W5 */
+ u64 rptr; /* W6 */
+ u64 cptr : 60; /* W7 */
+ u64 ctx_val : 1;
+ u64 egrp : 3;
+};
+
+/* CPT Instruction Result Structure */
+struct cpt_res_s {
+ u64 compcode : 7; /* W0 */
+ u64 doneint : 1;
+ u64 uc_compcode : 8;
+ u64 uc_info : 48;
+ u64 esn; /* W1 */
+};
+
+/* CPT SG structure */
+struct cpt_sg_s {
+ u64 seg1_size : 16;
+ u64 seg2_size : 16;
+ u64 seg3_size : 16;
+ u64 segs : 2;
+ u64 rsvd_63_50 : 14;
+};
+
+/* CPT LF_INPROG Register */
+#define CPT_LF_INPROG_INFLIGHT GENMASK_ULL(8, 0)
+#define CPT_LF_INPROG_GRB_CNT GENMASK_ULL(39, 32)
+#define CPT_LF_INPROG_GWB_CNT GENMASK_ULL(47, 40)
+
+/* CPT LF_Q_GRP_PTR Register */
+#define CPT_LF_Q_GRP_PTR_DQ_PTR GENMASK_ULL(14, 0)
+#define CPT_LF_Q_GRP_PTR_NQ_PTR GENMASK_ULL(46, 32)
+
+/* CPT LF_Q_SIZE Register */
+#define CPT_LF_Q_BASE_ADDR GENMASK_ULL(52, 7)
+
+/* CPT LF_Q_SIZE Register */
+#define CPT_LF_Q_SIZE_DIV40 GENMASK_ULL(14, 0)
+
+/* CPT LF CTX Flush Register */
+#define CPT_LF_CTX_FLUSH_CPTR GENMASK_ULL(45, 0)
+
+#ifdef CONFIG_XFRM_OFFLOAD
+int cn10k_ipsec_init(struct net_device *netdev);
+void cn10k_ipsec_clean(struct otx2_nic *pf);
+int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable);
+bool otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int num_segs, int *offset);
+bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq, struct sk_buff *skb,
+ int num_segs, int size);
+#else
+static inline __maybe_unused int cn10k_ipsec_init(struct net_device *netdev)
+{
+ return 0;
+}
+
+static inline __maybe_unused void cn10k_ipsec_clean(struct otx2_nic *pf)
+{
+}
+
+static inline __maybe_unused
+int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
+{
+ return 0;
+}
+
+static inline bool __maybe_unused
+otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int num_segs, int *offset)
+{
+ return true;
+}
+
+static inline bool __maybe_unused
+cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq, struct sk_buff *skb,
+ int num_segs, int size)
+{
+ return true;
+}
+#endif
+#endif // CN10K_IPSEC_H
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
index 6cc7a78968fc..4c7e0f345cb5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
@@ -133,9 +133,7 @@ static const char *rsrc_name(enum mcs_rsrc_type rsrc_type)
return "SA";
default:
return "Unknown";
- };
-
- return "Unknown";
+ }
}
static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
@@ -533,7 +531,8 @@ static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf,
if (sw_tx_sc->encrypt)
sectag_tci |= (MCS_TCI_E | MCS_TCI_C);
- policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu);
+ policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU,
+ pfvf->netdev->mtu + OTX2_ETH_HLEN);
/* Write SecTag excluding AN bits(1..0) */
policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2);
policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c
new file mode 100644
index 000000000000..a60f8cf53feb
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#include "otx2_common.h"
+#include "otx2_reg.h"
+#include "otx2_struct.h"
+#include "cn10k.h"
+
+/* CN20K mbox AF => PFx irq handler */
+irqreturn_t cn20k_pfaf_mbox_intr_handler(int irq, void *pf_irq)
+{
+ struct otx2_nic *pf = pf_irq;
+ struct mbox *mw = &pf->mbox;
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+ u64 pf_trig_val;
+
+ pf_trig_val = otx2_read64(pf, RVU_PF_INT) & 0x3ULL;
+
+ /* Clear the IRQ */
+ otx2_write64(pf, RVU_PF_INT, pf_trig_val);
+
+ if (pf_trig_val & BIT_ULL(0)) {
+ mbox = &mw->mbox_up;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(pf->mbox_wq, &mw->mbox_up_wrk);
+
+ trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF",
+ BIT_ULL(0));
+ }
+
+ if (pf_trig_val & BIT_ULL(1)) {
+ mbox = &mw->mbox;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(pf->mbox_wq, &mw->mbox_wrk);
+ trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF",
+ BIT_ULL(1));
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t cn20k_vfaf_mbox_intr_handler(int irq, void *vf_irq)
+{
+ struct otx2_nic *vf = vf_irq;
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+ u64 vf_trig_val;
+
+ vf_trig_val = otx2_read64(vf, RVU_VF_INT) & 0x3ULL;
+ /* Clear the IRQ */
+ otx2_write64(vf, RVU_VF_INT, vf_trig_val);
+
+ /* Read latest mbox data */
+ smp_rmb();
+
+ if (vf_trig_val & BIT_ULL(1)) {
+ /* Check for PF => VF response messages */
+ mbox = &vf->mbox.mbox;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
+
+ trace_otx2_msg_interrupt(mbox->pdev, "DOWN reply from PF0 to VF",
+ BIT_ULL(1));
+ }
+
+ if (vf_trig_val & BIT_ULL(0)) {
+ /* Check for PF => VF notification messages */
+ mbox = &vf->mbox.mbox_up;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
+
+ trace_otx2_msg_interrupt(mbox->pdev, "UP message from PF0 to VF",
+ BIT_ULL(0));
+ }
+
+ return IRQ_HANDLED;
+}
+
+void cn20k_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
+{
+ /* Clear PF <=> VF mailbox IRQ */
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(0), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(1), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(0), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(1), ~0ull);
+
+ /* Enable PF <=> VF mailbox IRQ */
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(0), INTR_MASK(numvfs));
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(0), INTR_MASK(numvfs));
+ if (numvfs > 64) {
+ numvfs -= 64;
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(1),
+ INTR_MASK(numvfs));
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(1),
+ INTR_MASK(numvfs));
+ }
+}
+
+void cn20k_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
+{
+ int vector, intr_vec, vec = 0;
+
+ /* Disable PF <=> VF mailbox IRQ */
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(0), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(1), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(0), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(1), ~0ull);
+
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(0), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(0), ~0ull);
+
+ if (numvfs > 64) {
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(1), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(1), ~0ull);
+ }
+
+ for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; intr_vec <=
+ RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1; intr_vec++, vec++) {
+ vector = pci_irq_vector(pf->pdev, intr_vec);
+ free_irq(vector, pf->hw.pfvf_irq_devid[vec]);
+ }
+}
+
+irqreturn_t cn20k_pfvf_mbox_intr_handler(int irq, void *pf_irq)
+{
+ struct pf_irq_data *irq_data = pf_irq;
+ struct otx2_nic *pf = irq_data->pf;
+ struct mbox *mbox;
+ u64 intr;
+
+ /* Sync with mbox memory region */
+ rmb();
+
+ /* Clear interrupts */
+ intr = otx2_read64(pf, irq_data->intr_status);
+ otx2_write64(pf, irq_data->intr_status, intr);
+ mbox = pf->mbox_pfvf;
+
+ if (intr)
+ trace_otx2_msg_interrupt(pf->pdev, "VF(s) to PF", intr);
+
+ irq_data->pf_queue_work_hdlr(mbox, pf->mbox_pfvf_wq, irq_data->start,
+ irq_data->mdevs, intr);
+
+ return IRQ_HANDLED;
+}
+
+int cn20k_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
+{
+ struct otx2_hw *hw = &pf->hw;
+ struct pf_irq_data *irq_data;
+ int intr_vec, ret, vec = 0;
+ char *irq_name;
+
+ /* irq data for 4 PF intr vectors */
+ irq_data = devm_kcalloc(pf->dev, 4,
+ sizeof(struct pf_irq_data), GFP_KERNEL);
+ if (!irq_data)
+ return -ENOMEM;
+
+ for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; intr_vec <=
+ RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1; intr_vec++, vec++) {
+ switch (intr_vec) {
+ case RVU_MBOX_PF_INT_VEC_VFPF_MBOX0:
+ irq_data[vec].intr_status =
+ RVU_MBOX_PF_VFPF_INTX(0);
+ irq_data[vec].start = 0;
+ irq_data[vec].mdevs = 64;
+ break;
+ case RVU_MBOX_PF_INT_VEC_VFPF_MBOX1:
+ irq_data[vec].intr_status =
+ RVU_MBOX_PF_VFPF_INTX(1);
+ irq_data[vec].start = 64;
+ irq_data[vec].mdevs = 96;
+ break;
+ case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX0:
+ irq_data[vec].intr_status =
+ RVU_MBOX_PF_VFPF1_INTX(0);
+ irq_data[vec].start = 0;
+ irq_data[vec].mdevs = 64;
+ break;
+ case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1:
+ irq_data[vec].intr_status =
+ RVU_MBOX_PF_VFPF1_INTX(1);
+ irq_data[vec].start = 64;
+ irq_data[vec].mdevs = 96;
+ break;
+ }
+ irq_data[vec].pf_queue_work_hdlr = otx2_queue_vf_work;
+ irq_data[vec].vec_num = intr_vec;
+ irq_data[vec].pf = pf;
+
+ /* Register mailbox interrupt handler */
+ irq_name = &hw->irq_name[intr_vec * NAME_SIZE];
+ if (pf->pcifunc)
+ snprintf(irq_name, NAME_SIZE,
+ "RVUPF%d_VF%d Mbox%d", rvu_get_pf(pf->pdev,
+ pf->pcifunc), vec / 2, vec % 2);
+ else
+ snprintf(irq_name, NAME_SIZE, "RVUPF_VF%d Mbox%d",
+ vec / 2, vec % 2);
+
+ hw->pfvf_irq_devid[vec] = &irq_data[vec];
+ ret = request_irq(pci_irq_vector(pf->pdev, intr_vec),
+ pf->hw_ops->pfvf_mbox_intr_handler, 0,
+ irq_name,
+ &irq_data[vec]);
+ if (ret) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for PFVF mbox0 irq\n");
+ return ret;
+ }
+ }
+
+ cn20k_enable_pfvf_mbox_intr(pf, numvfs);
+
+ return 0;
+}
+
+#define RQ_BP_LVL_AURA (255 - ((85 * 256) / 100)) /* BP when 85% is full */
+
+static u8 cn20k_aura_bpid_idx(struct otx2_nic *pfvf, int aura_id)
+{
+#ifdef CONFIG_DCB
+ return pfvf->queue_to_pfc_map[aura_id];
+#else
+ return 0;
+#endif
+}
+
+static int cn20k_aura_aq_init(struct otx2_nic *pfvf, int aura_id,
+ int pool_id, int numptrs)
+{
+ struct npa_cn20k_aq_enq_req *aq;
+ struct otx2_pool *pool;
+ u8 bpid_idx;
+ int err;
+
+ pool = &pfvf->qset.pool[pool_id];
+
+ /* Allocate memory for HW to update Aura count.
+ * Alloc one cache line, so that it fits all FC_STYPE modes.
+ */
+ if (!pool->fc_addr) {
+ err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN);
+ if (err)
+ return err;
+ }
+
+ /* Initialize this aura's context via AF */
+ aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ /* Shared mbox memory buffer is full, flush it and retry */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ return err;
+ aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+ }
+
+ aq->aura_id = aura_id;
+
+ /* Will be filled by AF with correct pool context address */
+ aq->aura.pool_addr = pool_id;
+ aq->aura.pool_caching = 1;
+ aq->aura.shift = ilog2(numptrs) - 8;
+ aq->aura.count = numptrs;
+ aq->aura.limit = numptrs;
+ aq->aura.avg_level = 255;
+ aq->aura.ena = 1;
+ aq->aura.fc_ena = 1;
+ aq->aura.fc_addr = pool->fc_addr->iova;
+ aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
+
+ /* Enable backpressure for RQ aura */
+ if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) {
+ aq->aura.bp_ena = 0;
+ /* If NIX1 LF is attached then specify NIX1_RX.
+ *
+ * Below NPA_AURA_S[BP_ENA] is set according to the
+ * NPA_BPINTF_E enumeration given as:
+ * 0x0 + a*0x1 where 'a' is 0 for NIX0_RX and 1 for NIX1_RX so
+ * NIX0_RX is 0x0 + 0*0x1 = 0
+ * NIX1_RX is 0x0 + 1*0x1 = 1
+ * But in HRM it is given that
+ * "NPA_AURA_S[BP_ENA](w1[33:32]) - Enable aura backpressure to
+ * NIX-RX based on [BP] level. One bit per NIX-RX; index
+ * enumerated by NPA_BPINTF_E."
+ */
+ if (pfvf->nix_blkaddr == BLKADDR_NIX1)
+ aq->aura.bp_ena = 1;
+
+ bpid_idx = cn20k_aura_bpid_idx(pfvf, aura_id);
+ aq->aura.bpid = pfvf->bpid[bpid_idx];
+
+ /* Set backpressure level for RQ's Aura */
+ aq->aura.bp = RQ_BP_LVL_AURA;
+ }
+
+ /* Fill AQ info */
+ aq->ctype = NPA_AQ_CTYPE_AURA;
+ aq->op = NPA_AQ_INSTOP_INIT;
+
+ return 0;
+}
+
+static int cn20k_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id,
+ int stack_pages, int numptrs, int buf_size,
+ int type)
+{
+ struct page_pool_params pp_params = { 0 };
+ struct npa_cn20k_aq_enq_req *aq;
+ struct otx2_pool *pool;
+ int err, sz;
+
+ pool = &pfvf->qset.pool[pool_id];
+ /* Alloc memory for stack which is used to store buffer pointers */
+ err = qmem_alloc(pfvf->dev, &pool->stack,
+ stack_pages, pfvf->hw.stack_pg_bytes);
+ if (err)
+ return err;
+
+ pool->rbsize = buf_size;
+
+ /* Initialize this pool's context via AF */
+ aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ /* Shared mbox memory buffer is full, flush it and retry */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ qmem_free(pfvf->dev, pool->stack);
+ return err;
+ }
+ aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ qmem_free(pfvf->dev, pool->stack);
+ return -ENOMEM;
+ }
+ }
+
+ aq->aura_id = pool_id;
+ aq->pool.stack_base = pool->stack->iova;
+ aq->pool.stack_caching = 1;
+ aq->pool.ena = 1;
+ aq->pool.buf_size = buf_size / 128;
+ aq->pool.stack_max_pages = stack_pages;
+ aq->pool.shift = ilog2(numptrs) - 8;
+ aq->pool.ptr_start = 0;
+ aq->pool.ptr_end = ~0ULL;
+
+ /* Fill AQ info */
+ aq->ctype = NPA_AQ_CTYPE_POOL;
+ aq->op = NPA_AQ_INSTOP_INIT;
+
+ if (type != AURA_NIX_RQ) {
+ pool->page_pool = NULL;
+ return 0;
+ }
+
+ sz = ALIGN(ALIGN(SKB_DATA_ALIGN(buf_size), OTX2_ALIGN), PAGE_SIZE);
+ pp_params.order = get_order(sz);
+ pp_params.flags = PP_FLAG_DMA_MAP;
+ pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
+ pp_params.nid = NUMA_NO_NODE;
+ pp_params.dev = pfvf->dev;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pool->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool->page_pool)) {
+ netdev_err(pfvf->netdev, "Creation of page pool failed\n");
+ return PTR_ERR(pool->page_pool);
+ }
+
+ return 0;
+}
+
+static int cn20k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura)
+{
+ struct nix_cn20k_aq_enq_req *aq;
+ struct otx2_nic *pfvf = dev;
+
+ /* Get memory to put this msg */
+ aq = otx2_mbox_alloc_msg_nix_cn20k_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ aq->sq.cq = pfvf->hw.rx_queues + qidx;
+ aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
+ aq->sq.cq_ena = 1;
+ aq->sq.ena = 1;
+ aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
+ aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
+ aq->sq.default_chan = pfvf->hw.tx_chan_base + chan_offset;
+ aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
+ aq->sq.sqb_aura = sqb_aura;
+ aq->sq.sq_int_ena = NIX_SQINT_BITS;
+ aq->sq.qint_idx = 0;
+ /* Due pipelining impact minimum 2000 unused SQ CQE's
+ * need to maintain to avoid CQ overflow.
+ */
+ aq->sq.cq_limit = (SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt);
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_INIT;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+static struct dev_hw_ops cn20k_hw_ops = {
+ .pfaf_mbox_intr_handler = cn20k_pfaf_mbox_intr_handler,
+ .vfaf_mbox_intr_handler = cn20k_vfaf_mbox_intr_handler,
+ .pfvf_mbox_intr_handler = cn20k_pfvf_mbox_intr_handler,
+ .sq_aq_init = cn20k_sq_aq_init,
+ .sqe_flush = cn10k_sqe_flush,
+ .aura_freeptr = cn10k_aura_freeptr,
+ .refill_pool_ptrs = cn10k_refill_pool_ptrs,
+ .aura_aq_init = cn20k_aura_aq_init,
+ .pool_aq_init = cn20k_pool_aq_init,
+};
+
+void cn20k_init(struct otx2_nic *pfvf)
+{
+ pfvf->hw_ops = &cn20k_hw_ops;
+}
+EXPORT_SYMBOL(cn20k_init);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h
new file mode 100644
index 000000000000..832adaf8c57f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#ifndef CN20K_H
+#define CN20K_H
+
+#include "otx2_common.h"
+
+void cn20k_init(struct otx2_nic *pfvf);
+int cn20k_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs);
+void cn20k_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs);
+void cn20k_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs);
+#endif /* CN20K_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 6e0183f0d5a1..75ebb17419c4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -10,22 +10,30 @@
#include <net/page_pool/helpers.h>
#include <net/tso.h>
#include <linux/bitfield.h>
+#include <linux/dcbnl.h>
+#include <net/xfrm.h>
#include "otx2_reg.h"
#include "otx2_common.h"
#include "otx2_struct.h"
#include "cn10k.h"
+#include "otx2_xsk.h"
+
+static bool otx2_is_pfc_enabled(struct otx2_nic *pfvf)
+{
+ return IS_ENABLED(CONFIG_DCB) && !!pfvf->pfc_en;
+}
static void otx2_nix_rq_op_stats(struct queue_stats *stats,
struct otx2_nic *pfvf, int qidx)
{
u64 incr = (u64)qidx << 32;
- u64 *ptr;
+ void __iomem *ptr;
- ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS);
+ ptr = otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS);
stats->bytes = otx2_atomic64_add(incr, ptr);
- ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS);
+ ptr = otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS);
stats->pkts = otx2_atomic64_add(incr, ptr);
}
@@ -33,12 +41,12 @@ static void otx2_nix_sq_op_stats(struct queue_stats *stats,
struct otx2_nic *pfvf, int qidx)
{
u64 incr = (u64)qidx << 32;
- u64 *ptr;
+ void __iomem *ptr;
- ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS);
+ ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS);
stats->bytes = otx2_atomic64_add(incr, ptr);
- ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS);
+ ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS);
stats->pkts = otx2_atomic64_add(incr, ptr);
}
@@ -83,6 +91,7 @@ int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx)
otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx);
return 1;
}
+EXPORT_SYMBOL(otx2_update_rq_stats);
int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx)
{
@@ -99,6 +108,7 @@ int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx)
otx2_nix_sq_op_stats(&sq->stats, pfvf, qidx);
return 1;
}
+EXPORT_SYMBOL(otx2_update_sq_stats);
void otx2_get_dev_stats(struct otx2_nic *pfvf)
{
@@ -114,7 +124,9 @@ void otx2_get_dev_stats(struct otx2_nic *pfvf)
dev_stats->rx_ucast_frames;
dev_stats->tx_bytes = OTX2_GET_TX_STATS(TX_OCTS);
- dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP);
+ dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP) +
+ (unsigned long)atomic_long_read(&dev_stats->tx_discards);
+
dev_stats->tx_bcast_frames = OTX2_GET_TX_STATS(TX_BCAST);
dev_stats->tx_mcast_frames = OTX2_GET_TX_STATS(TX_MCAST);
dev_stats->tx_ucast_frames = OTX2_GET_TX_STATS(TX_UCAST);
@@ -246,13 +258,14 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
mutex_unlock(&pfvf->mbox.lock);
return err;
}
+EXPORT_SYMBOL(otx2_hw_set_mtu);
int otx2_config_pause_frm(struct otx2_nic *pfvf)
{
struct cgx_pause_frm_cfg *req;
int err;
- if (is_otx2_lbkvf(pfvf->pdev))
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev))
return 0;
mutex_lock(&pfvf->mbox.lock);
@@ -307,19 +320,22 @@ fail:
return err;
}
-int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id)
+int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id, const u32 *ind_tbl)
{
struct otx2_rss_info *rss = &pfvf->hw.rss_info;
const int index = rss->rss_size * ctx_id;
struct mbox *mbox = &pfvf->mbox;
- struct otx2_rss_ctx *rss_ctx;
struct nix_aq_enq_req *aq;
int idx, err;
mutex_lock(&mbox->lock);
- rss_ctx = rss->rss_ctx[ctx_id];
+ ind_tbl = ind_tbl ?: rss->ind_tbl;
/* Get memory to put this msg */
for (idx = 0; idx < rss->rss_size; idx++) {
+ /* Ignore the queue if AF_XDP zero copy is enabled */
+ if (test_bit(ind_tbl[idx], pfvf->af_xdp_zc_qidx))
+ continue;
+
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
if (!aq) {
/* The shared memory buffer can be full.
@@ -337,7 +353,7 @@ int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id)
}
}
- aq->rss.rq = rss_ctx->ind_tbl[idx];
+ aq->rss.rq = ind_tbl[idx];
/* Fill AQ info */
aq->qidx = index + idx;
@@ -375,30 +391,22 @@ void otx2_set_rss_key(struct otx2_nic *pfvf)
int otx2_rss_init(struct otx2_nic *pfvf)
{
struct otx2_rss_info *rss = &pfvf->hw.rss_info;
- struct otx2_rss_ctx *rss_ctx;
int idx, ret = 0;
- rss->rss_size = sizeof(*rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
+ rss->rss_size = sizeof(*rss->ind_tbl);
/* Init RSS key if it is not setup already */
if (!rss->enable)
netdev_rss_key_fill(rss->key, sizeof(rss->key));
otx2_set_rss_key(pfvf);
- if (!netif_is_rxfh_configured(pfvf->netdev)) {
- /* Set RSS group 0 as default indirection table */
- rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP] = kzalloc(rss->rss_size,
- GFP_KERNEL);
- if (!rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP])
- return -ENOMEM;
-
- rss_ctx = rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP];
+ if (!netif_is_rxfh_configured(pfvf->netdev))
for (idx = 0; idx < rss->rss_size; idx++)
- rss_ctx->ind_tbl[idx] =
+ rss->ind_tbl[idx] =
ethtool_rxfh_indir_default(idx,
pfvf->hw.rx_queues);
- }
- ret = otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP);
+
+ ret = otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP, NULL);
if (ret)
return ret;
@@ -539,10 +547,13 @@ static int otx2_alloc_pool_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
}
static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- dma_addr_t *dma)
+ dma_addr_t *dma, int qidx, int idx)
{
u8 *buf;
+ if (pool->xsk_pool)
+ return otx2_xsk_pool_alloc_buf(pfvf, pool, dma, idx);
+
if (pool->page_pool)
return otx2_alloc_pool_buf(pfvf, pool, dma);
@@ -561,12 +572,12 @@ static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
}
int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- dma_addr_t *dma)
+ dma_addr_t *dma, int qidx, int idx)
{
int ret;
local_bh_disable();
- ret = __otx2_alloc_rbuf(pfvf, pool, dma);
+ ret = __otx2_alloc_rbuf(pfvf, pool, dma, qidx, idx);
local_bh_enable();
return ret;
}
@@ -574,7 +585,8 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
dma_addr_t *dma)
{
- if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma)))
+ if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma,
+ cq->cq_idx, cq->pool_ptrs - 1)))
return -ENOMEM;
return 0;
}
@@ -646,12 +658,22 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq);
req->regval[2] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL4) {
+ int sdp_chan = hw->tx_chan_base + prio;
+
+ if (is_otx2_sdp_rep(pfvf->pdev))
+ prio = 0;
parent = schq_list[NIX_TXSCH_LVL_TL3][prio];
req->reg[0] = NIX_AF_TL4X_PARENT(schq);
req->regval[0] = (u64)parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
req->regval[1] = dwrr_val;
+ if (is_otx2_sdp_rep(pfvf->pdev)) {
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
+ req->regval[2] = BIT_ULL(12) | BIT_ULL(13) |
+ (sdp_chan & 0xff);
+ }
} else if (lvl == NIX_TXSCH_LVL_TL3) {
parent = schq_list[NIX_TXSCH_LVL_TL2][prio];
req->reg[0] = NIX_AF_TL3X_PARENT(schq);
@@ -659,7 +681,8 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
req->num_regs++;
req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
req->regval[1] = dwrr_val;
- if (lvl == hw->txschq_link_cfg_lvl) {
+ if (lvl == hw->txschq_link_cfg_lvl &&
+ !is_otx2_sdp_rep(pfvf->pdev)) {
req->num_regs++;
req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
/* Enable this queue and backpressure
@@ -676,7 +699,8 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
req->regval[1] = (u64)hw->txschq_aggr_lvl_rr_prio << 24 | dwrr_val;
- if (lvl == hw->txschq_link_cfg_lvl) {
+ if (lvl == hw->txschq_link_cfg_lvl &&
+ !is_otx2_sdp_rep(pfvf->pdev)) {
req->num_regs++;
req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
/* Enable this queue and backpressure
@@ -735,6 +759,7 @@ EXPORT_SYMBOL(otx2_smq_flush);
int otx2_txsch_alloc(struct otx2_nic *pfvf)
{
+ int chan_cnt = pfvf->hw.tx_chan_cnt;
struct nix_txsch_alloc_req *req;
struct nix_txsch_alloc_rsp *rsp;
int lvl, schq, rc;
@@ -747,6 +772,12 @@ int otx2_txsch_alloc(struct otx2_nic *pfvf)
/* Request one schq per level */
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
req->schq[lvl] = 1;
+
+ if (is_otx2_sdp_rep(pfvf->pdev) && chan_cnt > 1) {
+ req->schq[NIX_TXSCH_LVL_SMQ] = chan_cnt;
+ req->schq[NIX_TXSCH_LVL_TL4] = chan_cnt;
+ }
+
rc = otx2_sync_mbox_msg(&pfvf->mbox);
if (rc)
return rc;
@@ -757,10 +788,12 @@ int otx2_txsch_alloc(struct otx2_nic *pfvf)
return PTR_ERR(rsp);
/* Setup transmit scheduler list */
- for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ pfvf->hw.txschq_cnt[lvl] = rsp->schq[lvl];
for (schq = 0; schq < rsp->schq[lvl]; schq++)
pfvf->hw.txschq_list[lvl][schq] =
rsp->schq_list[lvl][schq];
+ }
pfvf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl;
pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio;
@@ -798,12 +831,15 @@ EXPORT_SYMBOL(otx2_txschq_free_one);
void otx2_txschq_stop(struct otx2_nic *pfvf)
{
- int lvl, schq;
+ int lvl, schq, idx;
/* free non QOS TLx nodes */
- for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
- otx2_txschq_free_one(pfvf, lvl,
- pfvf->hw.txschq_list[lvl][0]);
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ for (idx = 0; idx < pfvf->hw.txschq_cnt[lvl]; idx++) {
+ otx2_txschq_free_one(pfvf, lvl,
+ pfvf->hw.txschq_list[lvl][idx]);
+ }
+ }
/* Clear the txschq list */
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
@@ -817,9 +853,10 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
{
int qidx, sqe_tail, sqe_head;
struct otx2_snd_queue *sq;
- u64 incr, *ptr, val;
+ void __iomem *ptr;
+ u64 incr, val;
- ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
+ ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
sq = &pfvf->qset.sq[qidx];
if (!sq->sqb_ptrs)
@@ -850,7 +887,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
-static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
+int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
{
struct otx2_qset *qset = &pfvf->qset;
struct nix_aq_enq_req *aq;
@@ -883,7 +920,7 @@ static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
return otx2_sync_mbox_msg(&pfvf->mbox);
}
-int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
+int otx2_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura)
{
struct otx2_nic *pfvf = dev;
struct otx2_snd_queue *sq;
@@ -902,7 +939,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.ena = 1;
aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
- aq->sq.default_chan = pfvf->hw.tx_chan_base;
+ aq->sq.default_chan = pfvf->hw.tx_chan_base + chan_offset;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
aq->sq.sqb_aura = sqb_aura;
aq->sq.sq_int_ena = NIX_SQINT_BITS;
@@ -925,6 +962,7 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
struct otx2_qset *qset = &pfvf->qset;
struct otx2_snd_queue *sq;
struct otx2_pool *pool;
+ u8 chan_offset;
int err;
pool = &pfvf->qset.pool[sqb_aura];
@@ -936,6 +974,29 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
if (err)
return err;
+ /* Allocate memory for NIX SQE (which includes NIX SG) and CPT SG.
+ * SG of NIX and CPT are same in size. Allocate memory for CPT SG
+ * same as NIX SQE for base address alignment.
+ * Layout of a NIX SQE and CPT SG entry:
+ * -----------------------------
+ * | CPT Scatter Gather |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ * | NIX SQE |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ */
+ err = qmem_alloc(pfvf->dev, &sq->sqe_ring, qset->sqe_cnt,
+ sq->sqe_size * 2);
+ if (err)
+ return err;
+
+ err = qmem_alloc(pfvf->dev, &sq->cpt_resp, qset->sqe_cnt, 64);
+ if (err)
+ return err;
+
if (qidx < pfvf->hw.tx_queues) {
err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
TSO_HEADER_SIZE);
@@ -970,8 +1031,13 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
sq->stats.bytes = 0;
sq->stats.pkts = 0;
+ /* Attach XSK_BUFF_POOL to XDP queue */
+ if (qidx > pfvf->hw.xdp_queues)
+ otx2_attach_xsk_buff(pfvf, sq, (qidx - pfvf->hw.xdp_queues));
- err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura);
+
+ chan_offset = qidx % pfvf->hw.tx_chan_cnt;
+ err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, chan_offset, sqb_aura);
if (err) {
kfree(sq->sg);
sq->sg = NULL;
@@ -982,12 +1048,13 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
}
-static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
+int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
{
struct otx2_qset *qset = &pfvf->qset;
int err, pool_id, non_xdp_queues;
struct nix_aq_enq_req *aq;
struct otx2_cq_queue *cq;
+ struct otx2_pool *pool;
cq = &qset->cq[qidx];
cq->cq_idx = qidx;
@@ -996,8 +1063,20 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
cq->cq_type = CQ_RX;
cq->cint_idx = qidx;
cq->cqe_cnt = qset->rqe_cnt;
- if (pfvf->xdp_prog)
+ if (pfvf->xdp_prog) {
xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0);
+ pool = &qset->pool[qidx];
+ if (pool->xsk_pool) {
+ xdp_rxq_info_reg_mem_model(&cq->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL);
+ xsk_pool_set_rxq_info(pool->xsk_pool, &cq->xdp_rxq);
+ } else if (pool->page_pool) {
+ xdp_rxq_info_reg_mem_model(&cq->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ pool->page_pool);
+ }
+ }
} else if (qidx < non_xdp_queues) {
cq->cq_type = CQ_TX;
cq->cint_idx = qidx - pfvf->hw.rx_queues;
@@ -1216,9 +1295,10 @@ void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool,
pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
page = virt_to_head_page(phys_to_virt(pa));
-
if (pool->page_pool) {
page_pool_put_full_page(pool->page_pool, page, true);
+ } else if (pool->xsk_pool) {
+ /* Note: No way of identifying xdp_buff */
} else {
dma_unmap_page_attrs(pfvf->dev, iova, size,
DMA_FROM_DEVICE,
@@ -1233,6 +1313,7 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
int pool_id, pool_start = 0, pool_end = 0, size = 0;
struct otx2_pool *pool;
u64 iova;
+ int idx;
if (type == AURA_NIX_SQ) {
pool_start = otx2_get_pool_idx(pfvf, type, 0);
@@ -1247,16 +1328,21 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
/* Free SQB and RQB pointers from the aura pool */
for (pool_id = pool_start; pool_id < pool_end; pool_id++) {
- iova = otx2_aura_allocptr(pfvf, pool_id);
pool = &pfvf->qset.pool[pool_id];
+ iova = otx2_aura_allocptr(pfvf, pool_id);
while (iova) {
if (type == AURA_NIX_RQ)
iova -= OTX2_HEAD_ROOM;
-
otx2_free_bufs(pfvf, pool, iova, size);
-
iova = otx2_aura_allocptr(pfvf, pool_id);
}
+
+ for (idx = 0 ; idx < pool->xdp_cnt; idx++) {
+ if (!pool->xdp[idx])
+ continue;
+
+ xsk_buff_free(pool->xdp[idx]);
+ }
}
}
@@ -1273,7 +1359,8 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf)
qmem_free(pfvf->dev, pool->stack);
qmem_free(pfvf->dev, pool->fc_addr);
page_pool_destroy(pool->page_pool);
- pool->page_pool = NULL;
+ devm_kfree(pfvf->dev, pool->xdp);
+ pool->xsk_pool = NULL;
}
devm_kfree(pfvf->dev, pfvf->qset.pool);
pfvf->qset.pool = NULL;
@@ -1282,6 +1369,13 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf)
int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
int pool_id, int numptrs)
{
+ return pfvf->hw_ops->aura_aq_init(pfvf, aura_id, pool_id,
+ numptrs);
+}
+
+int otx2_aura_aq_init(struct otx2_nic *pfvf, int aura_id,
+ int pool_id, int numptrs)
+{
struct npa_aq_enq_req *aq;
struct otx2_pool *pool;
int err;
@@ -1359,7 +1453,15 @@ int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
int stack_pages, int numptrs, int buf_size, int type)
{
+ return pfvf->hw_ops->pool_aq_init(pfvf, pool_id, stack_pages, numptrs,
+ buf_size, type);
+}
+
+int otx2_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id,
+ int stack_pages, int numptrs, int buf_size, int type)
+{
struct page_pool_params pp_params = { 0 };
+ struct xsk_buff_pool *xsk_pool;
struct npa_aq_enq_req *aq;
struct otx2_pool *pool;
int err;
@@ -1403,21 +1505,33 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
aq->ctype = NPA_AQ_CTYPE_POOL;
aq->op = NPA_AQ_INSTOP_INIT;
- if (type != AURA_NIX_RQ) {
- pool->page_pool = NULL;
+ if (type != AURA_NIX_RQ)
+ return 0;
+
+ if (!test_bit(pool_id, pfvf->af_xdp_zc_qidx)) {
+ pp_params.order = get_order(buf_size);
+ pp_params.flags = PP_FLAG_DMA_MAP;
+ pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
+ pp_params.nid = NUMA_NO_NODE;
+ pp_params.dev = pfvf->dev;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pool->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool->page_pool)) {
+ netdev_err(pfvf->netdev, "Creation of page pool failed\n");
+ return PTR_ERR(pool->page_pool);
+ }
return 0;
}
- pp_params.order = get_order(buf_size);
- pp_params.flags = PP_FLAG_DMA_MAP;
- pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
- pp_params.nid = NUMA_NO_NODE;
- pp_params.dev = pfvf->dev;
- pp_params.dma_dir = DMA_FROM_DEVICE;
- pool->page_pool = page_pool_create(&pp_params);
- if (IS_ERR(pool->page_pool)) {
- netdev_err(pfvf->netdev, "Creation of page pool failed\n");
- return PTR_ERR(pool->page_pool);
+ /* Set XSK pool to support AF_XDP zero-copy */
+ xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, pool_id);
+ if (xsk_pool) {
+ pool->xsk_pool = xsk_pool;
+ pool->xdp_cnt = numptrs;
+ pool->xdp = devm_kcalloc(pfvf->dev,
+ numptrs, sizeof(struct xdp_buff *), GFP_KERNEL);
+ if (!pool->xdp)
+ return -ENOMEM;
}
return 0;
@@ -1478,9 +1592,18 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
}
for (ptr = 0; ptr < num_sqbs; ptr++) {
- err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
- if (err)
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr);
+ if (err) {
+ if (pool->xsk_pool) {
+ ptr--;
+ while (ptr >= 0) {
+ xsk_buff_free(pool->xdp[ptr]);
+ ptr--;
+ }
+ }
goto err_mem;
+ }
+
pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
}
@@ -1530,11 +1653,19 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
/* Allocate pointers and free them to aura/pool */
for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
pool = &pfvf->qset.pool[pool_id];
+
for (ptr = 0; ptr < num_ptrs; ptr++) {
- err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
- if (err)
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr);
+ if (err) {
+ if (pool->xsk_pool) {
+ while (ptr)
+ xsk_buff_free(pool->xdp[--ptr]);
+ }
return -ENOMEM;
+ }
+
pfvf->hw_ops->aura_freeptr(pfvf, pool_id,
+ pool->xsk_pool ? bufptr :
bufptr + OTX2_HEAD_ROOM);
}
}
@@ -1693,18 +1824,43 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
return -ENOMEM;
req->chan_base = 0;
-#ifdef CONFIG_DCB
- req->chan_cnt = pfvf->pfc_en ? IEEE_8021QAZ_MAX_TCS : 1;
- req->bpid_per_chan = pfvf->pfc_en ? 1 : 0;
-#else
- req->chan_cnt = 1;
- req->bpid_per_chan = 0;
-#endif
+ if (otx2_is_pfc_enabled(pfvf)) {
+ req->chan_cnt = IEEE_8021QAZ_MAX_TCS;
+ req->bpid_per_chan = 1;
+ } else {
+ req->chan_cnt = pfvf->hw.rx_chan_cnt;
+ req->bpid_per_chan = 0;
+ }
return otx2_sync_mbox_msg(&pfvf->mbox);
}
EXPORT_SYMBOL(otx2_nix_config_bp);
+int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable)
+{
+ struct nix_bp_cfg_req *req;
+
+ if (enable)
+ req = otx2_mbox_alloc_msg_nix_cpt_bp_enable(&pfvf->mbox);
+ else
+ req = otx2_mbox_alloc_msg_nix_cpt_bp_disable(&pfvf->mbox);
+
+ if (!req)
+ return -ENOMEM;
+
+ req->chan_base = 0;
+ if (otx2_is_pfc_enabled(pfvf)) {
+ req->chan_cnt = IEEE_8021QAZ_MAX_TCS;
+ req->bpid_per_chan = 1;
+ } else {
+ req->chan_cnt = pfvf->hw.rx_chan_cnt;
+ req->bpid_per_chan = 0;
+ }
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+EXPORT_SYMBOL(otx2_nix_cpt_config_bp);
+
/* Mbox message handlers */
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
struct cgx_stats_rsp *rsp)
@@ -1738,6 +1894,8 @@ void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
pfvf->hw.sqb_size = rsp->sqb_size;
pfvf->hw.rx_chan_base = rsp->rx_chan_base;
pfvf->hw.tx_chan_base = rsp->tx_chan_base;
+ pfvf->hw.rx_chan_cnt = rsp->rx_chan_cnt;
+ pfvf->hw.tx_chan_cnt = rsp->tx_chan_cnt;
pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx;
pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx;
pfvf->hw.cgx_links = rsp->cgx_links;
@@ -1782,6 +1940,7 @@ void otx2_free_cints(struct otx2_nic *pfvf, int n)
free_irq(vector, &qset->napi[qidx]);
}
}
+EXPORT_SYMBOL(otx2_free_cints);
void otx2_set_cints_affinity(struct otx2_nic *pfvf)
{
@@ -1902,6 +2061,43 @@ int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t
}
EXPORT_SYMBOL(otx2_handle_ntuple_tc_features);
+int otx2_set_hw_capabilities(struct otx2_nic *pfvf)
+{
+ struct mbox *mbox = &pfvf->mbox;
+ struct otx2_hw *hw = &pfvf->hw;
+ struct get_hw_cap_rsp *rsp;
+ struct msg_req *req;
+ int ret = -ENOMEM;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_get_hw_cap(mbox);
+ if (!req)
+ goto fail;
+
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct get_hw_cap_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (rsp->hw_caps & HW_CAP_MACSEC)
+ __set_bit(CN10K_HW_MACSEC, &hw->cap_flag);
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ dev_err(pfvf->dev, "Cannot get MACSEC capability from AF\n");
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
int __weak \
otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
@@ -1915,3 +2111,48 @@ EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name);
MBOX_UP_CGX_MESSAGES
MBOX_UP_MCS_MESSAGES
#undef M
+
+dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
+ struct sk_buff *skb, int seg, int *len)
+{
+ enum dma_data_direction dir = DMA_TO_DEVICE;
+ const skb_frag_t *frag;
+ struct page *page;
+ int offset;
+
+ /* Crypto hardware need write permission for ipsec crypto offload */
+ if (unlikely(xfrm_offload(skb))) {
+ dir = DMA_BIDIRECTIONAL;
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ }
+
+ /* First segment is always skb->data */
+ if (!seg) {
+ page = virt_to_page(skb->data);
+ offset = offset_in_page(skb->data);
+ *len = skb_headlen(skb);
+ } else {
+ frag = &skb_shinfo(skb)->frags[seg - 1];
+ page = skb_frag_page(frag);
+ offset = skb_frag_off(frag);
+ *len = skb_frag_size(frag);
+ }
+ return otx2_dma_map_page(pfvf, page, offset, *len, dir);
+}
+
+void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
+{
+ enum dma_data_direction dir = DMA_TO_DEVICE;
+ struct sk_buff *skb = NULL;
+ int seg;
+
+ skb = (struct sk_buff *)sg->skb;
+ if (unlikely(xfrm_offload(skb)))
+ dir = DMA_BIDIRECTIONAL;
+
+ for (seg = 0; seg < sg->num_segs; seg++) {
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
+ sg->size[seg], dir);
+ }
+ sg->num_segs = 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 327254e578d5..e616a727a3a9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -14,6 +14,7 @@
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
+#include <linux/soc/marvell/silicons.h>
#include <linux/soc/marvell/octeontx2/asm.h>
#include <net/macsec.h>
#include <net/pkt_cls.h>
@@ -21,14 +22,19 @@
#include <linux/time64.h>
#include <linux/dim.h>
#include <uapi/linux/if_macsec.h>
+#include <net/page_pool/helpers.h>
#include <mbox.h>
#include <npc.h>
#include "otx2_reg.h"
#include "otx2_txrx.h"
#include "otx2_devlink.h"
+#include <rvu.h>
#include <rvu_trace.h>
#include "qos.h"
+#include "rep.h"
+#include "cn10k_ipsec.h"
+#include "cn20k.h"
/* IPv4 flag more fragment bit */
#define IPV4_FLAG_MORE 0x20
@@ -39,8 +45,11 @@
#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
+#define PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF 0xB900
#define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00
+#define PCI_DEVID_OCTEONTX2_SDP_REP 0xA0F7
+
/* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 2
#define PCI_MBOX_BAR_NUM 4
@@ -52,6 +61,15 @@
#define NIX_PF_PFC_PRIO_MAX 8
#endif
+/* Number of segments per SG structure */
+#define MAX_SEGS_PER_SG 3
+
+irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq);
+irqreturn_t cn20k_pfaf_mbox_intr_handler(int irq, void *pf_irq);
+irqreturn_t cn20k_vfaf_mbox_intr_handler(int irq, void *vf_irq);
+irqreturn_t cn20k_pfvf_mbox_intr_handler(int irq, void *pf_irq);
+irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq);
+
enum arua_mapped_qtypes {
AURA_NIX_RQ,
AURA_NIX_SQ,
@@ -76,10 +94,6 @@ struct otx2_lmt_info {
u64 lmt_addr;
u16 lmt_id;
};
-/* RSS configuration */
-struct otx2_rss_ctx {
- u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
-};
struct otx2_rss_info {
u8 enable;
@@ -87,7 +101,7 @@ struct otx2_rss_info {
u16 rss_size;
#define RSS_HASH_KEY_SIZE 44 /* 352 bit key */
u8 key[RSS_HASH_KEY_SIZE];
- struct otx2_rss_ctx *rss_ctx[MAX_RSS_GROUPS];
+ u32 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
};
/* NIX (or NPC) RX errors */
@@ -120,31 +134,10 @@ enum otx2_errcodes_re {
ERRCODE_IL4_CSUM = 0x22,
};
-/* NIX TX stats */
-enum nix_stat_lf_tx {
- TX_UCAST = 0x0,
- TX_BCAST = 0x1,
- TX_MCAST = 0x2,
- TX_DROP = 0x3,
- TX_OCTS = 0x4,
- TX_STATS_ENUM_LAST,
-};
-
-/* NIX RX stats */
-enum nix_stat_lf_rx {
- RX_OCTS = 0x0,
- RX_UCAST = 0x1,
- RX_BCAST = 0x2,
- RX_MCAST = 0x3,
- RX_DROP = 0x4,
- RX_DROP_OCTS = 0x5,
- RX_FCS = 0x6,
- RX_ERR = 0x7,
- RX_DRP_BCAST = 0x8,
- RX_DRP_MCAST = 0x9,
- RX_DRP_L3BCAST = 0xa,
- RX_DRP_L3MCAST = 0xb,
- RX_STATS_ENUM_LAST,
+enum otx2_xdp_action {
+ OTX2_XDP_TX = BIT(0),
+ OTX2_XDP_REDIRECT = BIT(1),
+ OTX2_AF_XDP_FRAME = BIT(2),
};
struct otx2_dev_stats {
@@ -161,6 +154,7 @@ struct otx2_dev_stats {
u64 tx_bcast_frames;
u64 tx_mcast_frames;
u64 tx_drops;
+ atomic_long_t tx_discards;
};
/* Driver counted stats */
@@ -224,6 +218,7 @@ struct otx2_hw {
/* NIX */
u8 txschq_link_cfg_lvl;
+ u8 txschq_cnt[NIX_TXSCH_LVL_CNT];
u8 txschq_aggr_lvl_rr_prio;
u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
u16 matchall_ipolicer;
@@ -234,6 +229,8 @@ struct otx2_hw {
/* HW settings, coalescing etc */
u16 rx_chan_base;
u16 tx_chan_base;
+ u8 rx_chan_cnt;
+ u8 tx_chan_cnt;
u16 cq_qcount_wait;
u16 cq_ecount_wait;
u16 rq_skid;
@@ -254,6 +251,7 @@ struct otx2_hw {
u16 nix_msixoff; /* Offset of NIX vectors */
char *irq_name;
cpumask_var_t *affinity_mask;
+ struct pf_irq_data *pfvf_irq_devid[4];
/* Stats */
struct otx2_dev_stats dev_stats;
@@ -365,14 +363,24 @@ struct otx2_flow_config {
struct list_head flow_list_tc;
u8 ucast_flt_cnt;
bool ntuple;
+ u16 ntuple_cnt;
};
struct dev_hw_ops {
- int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura);
+ int (*sq_aq_init)(void *dev, u16 qidx, u8 chan_offset,
+ u16 sqb_aura);
void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq,
int size, int qidx);
int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
void (*aura_freeptr)(void *dev, int aura, u64 buf);
+ irqreturn_t (*pfaf_mbox_intr_handler)(int irq, void *pf_irq);
+ irqreturn_t (*vfaf_mbox_intr_handler)(int irq, void *pf_irq);
+ irqreturn_t (*pfvf_mbox_intr_handler)(int irq, void *pf_irq);
+ int (*aura_aq_init)(struct otx2_nic *pfvf, int aura_id,
+ int pool_id, int numptrs);
+ int (*pool_aq_init)(struct otx2_nic *pfvf, u16 pool_id,
+ int stack_pages, int numptrs, int buf_size,
+ int type);
};
#define CN10K_MCS_SA_PER_SC 4
@@ -440,6 +448,16 @@ struct cn10k_mcs_cfg {
struct list_head rxsc_list;
};
+struct pf_irq_data {
+ u64 intr_status;
+ void (*pf_queue_work_hdlr)(struct mbox *mb, struct workqueue_struct *mw,
+ int first, int mdevs, u64 intr);
+ struct otx2_nic *pf;
+ int vec_num;
+ int start;
+ int mdevs;
+};
+
struct otx2_nic {
void __iomem *reg_base;
struct net_device *netdev;
@@ -466,6 +484,9 @@ struct otx2_nic {
#define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15)
#define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16)
#define OTX2_FLAG_TC_MARK_ENABLED BIT_ULL(17)
+#define OTX2_FLAG_REP_MODE_ENABLED BIT_ULL(18)
+#define OTX2_FLAG_PORT_UP BIT_ULL(19)
+#define OTX2_FLAG_IPSEC_OFFLOAD_ENABLED BIT_ULL(20)
u64 flags;
u64 *cq_op_addr;
@@ -480,6 +501,7 @@ struct otx2_nic {
struct mbox *mbox_pfvf;
struct workqueue_struct *mbox_wq;
struct workqueue_struct *mbox_pfvf_wq;
+ struct qmem *pfvf_mbox_addr;
u8 total_vfs;
u16 pcifunc; /* RVU PF_FUNC */
@@ -511,15 +533,15 @@ struct otx2_nic {
u32 nix_lmt_size;
struct otx2_ptp *ptp;
- struct hwtstamp_config tstamp;
+ struct kernel_hwtstamp_config tstamp;
unsigned long rq_bmap;
/* Devlink */
struct otx2_devlink *dl;
-#ifdef CONFIG_DCB
/* PFC */
u8 pfc_en;
+#ifdef CONFIG_DCB
u8 *queue_to_pfc_map;
u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX];
@@ -533,11 +555,24 @@ struct otx2_nic {
#if IS_ENABLED(CONFIG_MACSEC)
struct cn10k_mcs_cfg *macsec_cfg;
#endif
+
+#if IS_ENABLED(CONFIG_RVU_ESWITCH)
+ struct rep_dev **reps;
+ int rep_cnt;
+ u16 rep_pf_map[RVU_MAX_REP];
+ u16 esw_mode;
+#endif
+
+ /* Inline ipsec */
+ struct cn10k_ipsec ipsec;
+ /* af_xdp zero-copy */
+ unsigned long *af_xdp_zc_qidx;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
{
- return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF;
+ return (pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF) ||
+ (pdev->device == PCI_DEVID_RVU_REP);
}
static inline bool is_96xx_A0(struct pci_dev *pdev)
@@ -552,6 +587,11 @@ static inline bool is_96xx_B0(struct pci_dev *pdev)
(pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
}
+static inline bool is_otx2_sdp_rep(struct pci_dev *pdev)
+{
+ return pdev->device == PCI_DEVID_OCTEONTX2_SDP_REP;
+}
+
/* REVID for PCIe devices.
* Bits 0..1: minor pass, bit 3..2: major pass
* bits 7..4: midr id
@@ -577,6 +617,15 @@ static inline bool is_dev_cn10kb(struct pci_dev *pdev)
return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF;
}
+static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev)
+{
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF &&
+ (pdev->revision & 0xFF) == 0x54)
+ return true;
+
+ return false;
+}
+
static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
{
struct otx2_hw *hw = &pfvf->hw;
@@ -609,9 +658,6 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
__set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag);
__set_bit(QOS_CIR_PIR_SUPPORT, &hw->cap_flag);
}
-
- if (is_dev_cn10kb(pfvf->pdev))
- __set_bit(CN10K_HW_MACSEC, &hw->cap_flag);
}
/* Register read/write APIs */
@@ -626,6 +672,9 @@ static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
case BLKTYPE_NPA:
blkaddr = BLKADDR_NPA;
break;
+ case BLKTYPE_CPT:
+ blkaddr = BLKADDR_CPT0;
+ break;
default:
blkaddr = BLKADDR_RVUM;
break;
@@ -707,8 +756,9 @@ static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr)
::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr));
}
-static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
+static inline u64 otx2_atomic64_add(u64 incr, void __iomem *addr)
{
+ u64 __iomem *ptr = addr;
u64 result;
__asm__ volatile(".cpu generic+lse\n"
@@ -721,7 +771,11 @@ static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
#else
#define otx2_write128(lo, hi, addr) writeq((hi) | (lo), addr)
-#define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; })
+
+static inline u64 otx2_atomic64_add(u64 incr, void __iomem *addr)
+{
+ return 0;
+}
#endif
static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
@@ -771,7 +825,7 @@ static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf)
/* Alloc pointer from pool/aura */
static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
{
- u64 *ptr = (__force u64 *)otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_ALLOCX(0));
+ void __iomem *ptr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_ALLOCX(0));
u64 incr = (u64)aura | BIT_ULL(63);
return otx2_atomic64_add(incr, ptr);
@@ -846,6 +900,7 @@ static struct _req_type __maybe_unused \
*otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \
{ \
struct _req_type *req; \
+ u16 pcifunc = mbox->pfvf->pcifunc; \
\
req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
&mbox->mbox, 0, sizeof(struct _req_type), \
@@ -854,7 +909,8 @@ static struct _req_type __maybe_unused \
return NULL; \
req->hdr.sig = OTX2_MBOX_REQ_SIG; \
req->hdr.id = _id; \
- trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req)); \
+ req->hdr.pcifunc = pcifunc; \
+ trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req), pcifunc); \
return req; \
}
@@ -874,21 +930,11 @@ MBOX_UP_MCS_MESSAGES
/* Time to wait before watchdog kicks off */
#define OTX2_TX_TIMEOUT (100 * HZ)
-#define RVU_PFVF_PF_SHIFT 10
-#define RVU_PFVF_PF_MASK 0x3F
-#define RVU_PFVF_FUNC_SHIFT 0
-#define RVU_PFVF_FUNC_MASK 0x3FF
-
static inline bool is_otx2_vf(u16 pcifunc)
{
return !!(pcifunc & RVU_PFVF_FUNC_MASK);
}
-static inline int rvu_get_pf(u16 pcifunc)
-{
- return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
-}
-
static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf,
struct page *page,
size_t offset, size_t size,
@@ -914,15 +960,19 @@ static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx)
{
u16 smq;
+ int idx;
+
#ifdef CONFIG_DCB
if (qidx < NIX_PF_PFC_PRIO_MAX && pfvf->pfc_alloc_status[qidx])
return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx];
#endif
/* check if qidx falls under QOS queues */
- if (qidx >= pfvf->hw.non_qos_queues)
+ if (qidx >= pfvf->hw.non_qos_queues) {
smq = pfvf->qos.qid_to_sqmap[qidx - pfvf->hw.non_qos_queues];
- else
- smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+ } else {
+ idx = qidx % pfvf->hw.txschq_cnt[NIX_TXSCH_LVL_SMQ];
+ smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][idx];
+ }
return smq;
}
@@ -982,15 +1032,16 @@ void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq);
void otx2_free_pending_sqe(struct otx2_nic *pfvf);
void otx2_sqb_flush(struct otx2_nic *pfvf);
int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- dma_addr_t *dma);
+ dma_addr_t *dma, int qidx, int idx);
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
+int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable);
void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx);
void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura);
-int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
-int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
+int otx2_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura);
+int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura);
int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
dma_addr_t *dma);
int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
@@ -1011,12 +1062,19 @@ void otx2_pfaf_mbox_destroy(struct otx2_nic *pf);
void otx2_disable_mbox_intr(struct otx2_nic *pf);
void otx2_disable_napi(struct otx2_nic *pf);
irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq);
+int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura);
+int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx);
+int otx2_set_hw_capabilities(struct otx2_nic *pfvf);
+int otx2_aura_aq_init(struct otx2_nic *pfvf, int aura_id,
+ int pool_id, int numptrs);
+int otx2_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id,
+ int stack_pages, int numptrs, int buf_size, int type);
/* RSS configuration APIs*/
int otx2_rss_init(struct otx2_nic *pfvf);
int otx2_set_flowkey_cfg(struct otx2_nic *pfvf);
void otx2_set_rss_key(struct otx2_nic *pfvf);
-int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id);
+int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id, const u32 *ind_tbl);
/* Mbox handlers */
void mbox_handler_msix_offset(struct otx2_nic *pfvf,
@@ -1050,8 +1108,11 @@ int otx2_open(struct net_device *netdev);
int otx2_stop(struct net_device *netdev);
int otx2_set_real_num_queues(struct net_device *netdev,
int tx_queues, int rx_queues);
-int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd);
-int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr);
+int otx2_config_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int otx2_config_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
/* MCAM filter related APIs */
int otx2_mcam_flow_init(struct otx2_nic *pf);
@@ -1073,7 +1134,10 @@ int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
-bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf,
+ u64 iova, int len, u16 qidx, u16 flags);
+void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, struct xdp_frame *xdpf,
+ u64 dma_addr, int len, int *offset, u16 flags);
u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
int otx2_handle_ntuple_tc_features(struct net_device *netdev,
netdev_features_t features);
@@ -1142,4 +1206,19 @@ u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid);
void otx2_qos_config_txschq(struct otx2_nic *pfvf);
void otx2_clean_qos_queues(struct otx2_nic *pfvf);
+int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info);
+int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
+ struct flow_cls_offload *cls_flower);
+
+static inline int mcam_entry_cmp(const void *a, const void *b)
+{
+ return *(u16 *)a - *(u16 *)b;
+}
+
+dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
+ struct sk_buff *skb, int seg, int *len);
+void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg);
+int otx2_read_free_sqe(struct otx2_nic *pfvf, u16 qidx);
+void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
+ int first, int mdevs, u64 intr);
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
index 294fba58b670..f110dfa42360 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
@@ -435,6 +435,9 @@ process_pfc:
return err;
}
+ /* Default disable backpressure on NIX-CPT */
+ otx2_nix_cpt_config_bp(pfvf, false);
+
/* Request Per channel Bpids */
if (pfc->pfc_en)
otx2_nix_config_bp(pfvf, true);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
index 53f14aa944bd..a72694219df4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
@@ -41,13 +41,15 @@ static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id,
if (!pfvf->flow_cfg)
return 0;
+ pfvf->flow_cfg->ntuple_cnt = ctx->val.vu16;
otx2_alloc_mcam_entries(pfvf, ctx->val.vu16);
return 0;
}
static int otx2_dl_mcam_count_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct otx2_devlink *otx2_dl = devlink_priv(devlink);
struct otx2_nic *pfvf = otx2_dl->pfvf;
@@ -83,7 +85,8 @@ static int otx2_dl_ucast_flt_cnt_set(struct devlink *devlink, u32 id,
}
static int otx2_dl_ucast_flt_cnt_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct otx2_devlink *otx2_dl = devlink_priv(devlink);
struct otx2_nic *pfvf = otx2_dl->pfvf;
@@ -141,7 +144,56 @@ static const struct devlink_param otx2_dl_params[] = {
otx2_dl_ucast_flt_cnt_validate),
};
+#ifdef CONFIG_RVU_ESWITCH
+static int otx2_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+
+ if (!otx2_rep_dev(pfvf->pdev))
+ return -EOPNOTSUPP;
+
+ *mode = pfvf->esw_mode;
+
+ return 0;
+}
+
+static int otx2_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+ int ret = 0;
+
+ if (!otx2_rep_dev(pfvf->pdev))
+ return -EOPNOTSUPP;
+
+ if (pfvf->esw_mode == mode)
+ return 0;
+
+ switch (mode) {
+ case DEVLINK_ESWITCH_MODE_LEGACY:
+ rvu_rep_destroy(pfvf);
+ break;
+ case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ ret = rvu_rep_create(pfvf, extack);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!ret)
+ pfvf->esw_mode = mode;
+
+ return ret;
+}
+#endif
+
static const struct devlink_ops otx2_devlink_ops = {
+#ifdef CONFIG_RVU_ESWITCH
+ .eswitch_mode_get = otx2_devlink_eswitch_mode_get,
+ .eswitch_mode_set = otx2_devlink_eswitch_mode_set,
+#endif
};
int otx2_register_dl(struct otx2_nic *pfvf)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 2d53dc77ef1e..b90e23dc49de 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -15,6 +15,7 @@
#include "otx2_common.h"
#include "otx2_ptp.h"
+#include <cgx_fw_if.h>
#define DRV_NAME "rvu-nicpf"
#define DRV_VF_NAME "rvu-nicvf"
@@ -315,7 +316,7 @@ static void otx2_get_pauseparam(struct net_device *netdev,
struct otx2_nic *pfvf = netdev_priv(netdev);
struct cgx_pause_frm_cfg *req, *rsp;
- if (is_otx2_lbkvf(pfvf->pdev))
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev))
return;
mutex_lock(&pfvf->mbox.lock);
@@ -347,7 +348,7 @@ static int otx2_set_pauseparam(struct net_device *netdev,
if (pause->autoneg)
return -EOPNOTSUPP;
- if (is_otx2_lbkvf(pfvf->pdev))
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev))
return -EOPNOTSUPP;
if (pause->rx_pause)
@@ -559,10 +560,13 @@ static int otx2_set_coalesce(struct net_device *netdev,
return 0;
}
-static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
- struct ethtool_rxnfc *nfc)
+static int otx2_get_rss_hash_opts(struct net_device *dev,
+ struct ethtool_rxfh_fields *nfc)
{
- struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ struct otx2_rss_info *rss;
+
+ rss = &pfvf->hw.rss_info;
if (!(rss->flowkey_cfg &
(NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6)))
@@ -609,12 +613,17 @@ static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
return 0;
}
-static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf,
- struct ethtool_rxnfc *nfc)
+static int otx2_set_rss_hash_opts(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
- struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct otx2_nic *pfvf = netdev_priv(dev);
u32 rxh_l4 = RXH_L4_B_0_1 | RXH_L4_B_2_3;
- u32 rss_cfg = rss->flowkey_cfg;
+ struct otx2_rss_info *rss;
+ u32 rss_cfg;
+
+ rss = &pfvf->hw.rss_info;
+ rss_cfg = rss->flowkey_cfg;
if (!rss->enable) {
netdev_err(pfvf->netdev,
@@ -743,8 +752,6 @@ static int otx2_get_rxnfc(struct net_device *dev,
if (netif_running(dev) && ntuple)
ret = otx2_get_all_flows(pfvf, nfc, rules);
break;
- case ETHTOOL_GRXFH:
- return otx2_get_rss_hash_opts(pfvf, nfc);
default:
break;
}
@@ -759,9 +766,6 @@ static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
pfvf->flow_cfg->ntuple = ntuple;
switch (nfc->cmd) {
- case ETHTOOL_SRXFH:
- ret = otx2_set_rss_hash_opts(pfvf, nfc);
- break;
case ETHTOOL_SRXCLSRLINS:
if (netif_running(dev) && ntuple)
ret = otx2_add_flow(pfvf, nfc);
@@ -792,60 +796,91 @@ static u32 otx2_get_rxfh_indir_size(struct net_device *dev)
return MAX_RSS_INDIR_TBL_SIZE;
}
-static int otx2_rss_ctx_delete(struct otx2_nic *pfvf, int ctx_id)
+static int otx2_create_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
- struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ struct otx2_rss_info *rss;
+ unsigned int queues;
+ u32 *ind_tbl;
+ int idx;
+
+ rss = &pfvf->hw.rss_info;
+ queues = pfvf->hw.rx_queues;
- otx2_rss_ctx_flow_del(pfvf, ctx_id);
- kfree(rss->rss_ctx[ctx_id]);
- rss->rss_ctx[ctx_id] = NULL;
+ if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+ ctx->hfunc = ETH_RSS_HASH_TOP;
+ if (!rss->enable) {
+ netdev_err(dev, "RSS is disabled, cannot change settings\n");
+ return -EIO;
+ }
+
+ ind_tbl = rxfh->indir;
+ if (!ind_tbl) {
+ ind_tbl = ethtool_rxfh_context_indir(ctx);
+ for (idx = 0; idx < rss->rss_size; idx++)
+ ind_tbl[idx] = ethtool_rxfh_indir_default(idx, queues);
+ }
+
+ otx2_set_rss_table(pfvf, rxfh->rss_context, ind_tbl);
return 0;
}
-static int otx2_rss_ctx_create(struct otx2_nic *pfvf,
- u32 *rss_context)
+static int otx2_modify_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
- struct otx2_rss_info *rss = &pfvf->hw.rss_info;
- u8 ctx;
+ struct otx2_nic *pfvf = netdev_priv(dev);
- for (ctx = 0; ctx < MAX_RSS_GROUPS; ctx++) {
- if (!rss->rss_ctx[ctx])
- break;
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (!pfvf->hw.rss_info.enable) {
+ netdev_err(dev, "RSS is disabled, cannot change settings\n");
+ return -EIO;
}
- if (ctx == MAX_RSS_GROUPS)
- return -EINVAL;
- rss->rss_ctx[ctx] = kzalloc(sizeof(*rss->rss_ctx[ctx]), GFP_KERNEL);
- if (!rss->rss_ctx[ctx])
- return -ENOMEM;
- *rss_context = ctx;
+ if (rxfh->indir)
+ otx2_set_rss_table(pfvf, rxfh->rss_context, rxfh->indir);
return 0;
}
+static int otx2_remove_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ u32 rss_context,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+
+ if (!pfvf->hw.rss_info.enable) {
+ netdev_err(dev, "RSS is disabled, cannot change settings\n");
+ return -EIO;
+ }
+
+ otx2_rss_ctx_flow_del(pfvf, rss_context);
+ return 0;
+}
+
/* Configure RSS table and hash key */
static int otx2_set_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
- u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
struct otx2_nic *pfvf = netdev_priv(dev);
- struct otx2_rss_ctx *rss_ctx;
struct otx2_rss_info *rss;
- int ret, idx;
+ int idx;
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (rxfh->rss_context)
- rss_context = rxfh->rss_context;
-
- if (rss_context != ETH_RXFH_CONTEXT_ALLOC &&
- rss_context >= MAX_RSS_GROUPS)
- return -EINVAL;
-
rss = &pfvf->hw.rss_info;
if (!rss->enable) {
@@ -857,21 +892,12 @@ static int otx2_set_rxfh(struct net_device *dev,
memcpy(rss->key, rxfh->key, sizeof(rss->key));
otx2_set_rss_key(pfvf);
}
- if (rxfh->rss_delete)
- return otx2_rss_ctx_delete(pfvf, rss_context);
-
- if (rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- ret = otx2_rss_ctx_create(pfvf, &rss_context);
- rxfh->rss_context = rss_context;
- if (ret)
- return ret;
- }
+
if (rxfh->indir) {
- rss_ctx = rss->rss_ctx[rss_context];
for (idx = 0; idx < rss->rss_size; idx++)
- rss_ctx->ind_tbl[idx] = rxfh->indir[idx];
+ rss->ind_tbl[idx] = rxfh->indir[idx];
}
- otx2_set_rss_table(pfvf, rss_context);
+ otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP, NULL);
return 0;
}
@@ -880,9 +906,7 @@ static int otx2_set_rxfh(struct net_device *dev,
static int otx2_get_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh)
{
- u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
struct otx2_nic *pfvf = netdev_priv(dev);
- struct otx2_rss_ctx *rss_ctx;
struct otx2_rss_info *rss;
u32 *indir = rxfh->indir;
int idx, rx_queues;
@@ -890,28 +914,21 @@ static int otx2_get_rxfh(struct net_device *dev,
rss = &pfvf->hw.rss_info;
rxfh->hfunc = ETH_RSS_HASH_TOP;
- if (rxfh->rss_context)
- rss_context = rxfh->rss_context;
-
if (!indir)
return 0;
- if (!rss->enable && rss_context == DEFAULT_RSS_CONTEXT_GROUP) {
+ if (!rss->enable) {
rx_queues = pfvf->hw.rx_queues;
for (idx = 0; idx < MAX_RSS_INDIR_TBL_SIZE; idx++)
indir[idx] = ethtool_rxfh_indir_default(idx, rx_queues);
return 0;
}
- if (rss_context >= MAX_RSS_GROUPS)
- return -ENOENT;
- rss_ctx = rss->rss_ctx[rss_context];
- if (!rss_ctx)
- return -ENOENT;
-
- if (indir) {
- for (idx = 0; idx < rss->rss_size; idx++)
- indir[idx] = rss_ctx->ind_tbl[idx];
+ for (idx = 0; idx < rss->rss_size; idx++) {
+ /* Ignore if the rx queue is AF_XDP zero copy enabled */
+ if (test_bit(rss->ind_tbl[idx], pfvf->af_xdp_zc_qidx))
+ continue;
+ indir[idx] = rss->ind_tbl[idx];
}
if (rxfh->key)
memcpy(rxfh->key, rss->key, sizeof(rss->key));
@@ -937,8 +954,8 @@ static u32 otx2_get_link(struct net_device *netdev)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
- /* LBK link is internal and always UP */
- if (is_otx2_lbkvf(pfvf->pdev))
+ /* LBK and SDP links are internal and always UP */
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev))
return 1;
return pfvf->linfo.link_up;
}
@@ -1119,17 +1136,9 @@ static void otx2_get_link_mode_info(u64 link_mode_bmap,
*link_ksettings)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_link_modes) = { 0, };
- const int otx2_sgmii_features[6] = {
- ETHTOOL_LINK_MODE_10baseT_Half_BIT,
- ETHTOOL_LINK_MODE_10baseT_Full_BIT,
- ETHTOOL_LINK_MODE_100baseT_Half_BIT,
- ETHTOOL_LINK_MODE_100baseT_Full_BIT,
- ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
- ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- };
/* CGX link modes to Ethtool link mode mapping */
- const int cgx_link_mode[27] = {
- 0, /* SGMII Mode */
+ const int cgx_link_mode[CGX_MODE_MAX] = {
+ 0, /* SGMII 1000baseT */
ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
@@ -1159,14 +1168,19 @@ static void otx2_get_link_mode_info(u64 link_mode_bmap,
};
u8 bit;
- for_each_set_bit(bit, (unsigned long *)&link_mode_bmap, 27) {
- /* SGMII mode is set */
- if (bit == 0)
- linkmode_set_bit_array(otx2_sgmii_features,
- ARRAY_SIZE(otx2_sgmii_features),
- otx2_link_modes);
- else
+ for_each_set_bit(bit, (unsigned long *)&link_mode_bmap, ARRAY_SIZE(cgx_link_mode)) {
+ if (bit == CGX_MODE_SGMII_10M_BIT) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, otx2_link_modes);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, otx2_link_modes);
+ } else if (bit == CGX_MODE_SGMII_100M_BIT) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, otx2_link_modes);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, otx2_link_modes);
+ } else if (bit == CGX_MODE_SGMII) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, otx2_link_modes);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, otx2_link_modes);
+ } else {
linkmode_set_bit(cgx_link_mode[bit], otx2_link_modes);
+ }
}
if (req_mode == OTX2_MODE_ADVERTISED)
@@ -1207,23 +1221,10 @@ static int otx2_get_link_ksettings(struct net_device *netdev,
return 0;
}
-static void otx2_get_advertised_mode(const struct ethtool_link_ksettings *cmd,
- u64 *mode)
-{
- u32 bit_pos;
-
- /* Firmware does not support requesting multiple advertised modes
- * return first set bit
- */
- bit_pos = find_first_bit(cmd->link_modes.advertising,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- if (bit_pos != __ETHTOOL_LINK_MODE_MASK_NBITS)
- *mode = bit_pos;
-}
-
static int otx2_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct otx2_nic *pf = netdev_priv(netdev);
struct ethtool_link_ksettings cur_ks;
struct cgx_set_link_mode_req *req;
@@ -1260,7 +1261,20 @@ static int otx2_set_link_ksettings(struct net_device *netdev,
*/
req->args.duplex = cmd->base.duplex ^ 0x1;
req->args.an = cmd->base.autoneg;
- otx2_get_advertised_mode(cmd, &req->args.mode);
+ /* Mask unsupported modes and send message to AF */
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mask);
+
+ linkmode_copy(req->args.advertising,
+ cmd->link_modes.advertising);
+ linkmode_andnot(req->args.advertising,
+ req->args.advertising, mask);
+
+ /* inform AF that we need parse this differently */
+ if (bitmap_weight(req->args.advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS) >= 2)
+ req->args.multimode = true;
err = otx2_sync_mbox_msg(&pf->mbox);
end:
@@ -1269,7 +1283,8 @@ end:
}
static void otx2_get_fec_stats(struct net_device *netdev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
struct cgx_fw_data *rsp;
@@ -1302,12 +1317,12 @@ static void otx2_get_fec_stats(struct net_device *netdev,
}
static const struct ethtool_ops otx2_ethtool_ops = {
- .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE,
.supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN |
ETHTOOL_RING_USE_CQE_SIZE,
+ .rxfh_max_num_contexts = MAX_RSS_GROUPS,
.get_link = otx2_get_link,
.get_drvinfo = otx2_get_drvinfo,
.get_strings = otx2_get_strings,
@@ -1325,6 +1340,11 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
+ .get_rxfh_fields = otx2_get_rss_hash_opts,
+ .set_rxfh_fields = otx2_set_rss_hash_opts,
+ .create_rxfh_context = otx2_create_rxfh,
+ .modify_rxfh_context = otx2_modify_rxfh,
+ .remove_rxfh_context = otx2_remove_rxfh,
.get_msglevel = otx2_get_msglevel,
.set_msglevel = otx2_set_msglevel,
.get_pauseparam = otx2_get_pauseparam,
@@ -1409,7 +1429,7 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev,
{
struct otx2_nic *pfvf = netdev_priv(netdev);
- if (is_otx2_lbkvf(pfvf->pdev)) {
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) {
cmd->base.duplex = DUPLEX_FULL;
cmd->base.speed = SPEED_100000;
} else {
@@ -1419,12 +1439,12 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev,
}
static const struct ethtool_ops otx2vf_ethtool_ops = {
- .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE,
.supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN |
ETHTOOL_RING_USE_CQE_SIZE,
+ .rxfh_max_num_contexts = MAX_RSS_GROUPS,
.get_link = otx2_get_link,
.get_drvinfo = otx2vf_get_drvinfo,
.get_strings = otx2vf_get_strings,
@@ -1438,6 +1458,11 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
+ .get_rxfh_fields = otx2_get_rss_hash_opts,
+ .set_rxfh_fields = otx2_set_rss_hash_opts,
+ .create_rxfh_context = otx2_create_rxfh,
+ .modify_rxfh_context = otx2_modify_rxfh,
+ .remove_rxfh_context = otx2_remove_rxfh,
.get_ringparam = otx2_get_ringparam,
.set_ringparam = otx2_set_ringparam,
.get_coalesce = otx2_get_coalesce,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 58720a161ee2..64c6d9162ef6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -64,11 +64,6 @@ static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
return 0;
}
-static int mcam_entry_cmp(const void *a, const void *b)
-{
- return *(u16 *)a - *(u16 *)b;
-}
-
int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
@@ -252,7 +247,7 @@ int otx2_mcam_entry_init(struct otx2_nic *pfvf)
mutex_unlock(&pfvf->mbox.lock);
/* Allocate entries for Ntuple filters */
- count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
+ count = otx2_alloc_mcam_entries(pfvf, flow_cfg->ntuple_cnt);
if (count <= 0) {
otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
return 0;
@@ -312,6 +307,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc);
pf->flow_cfg->ucast_flt_cnt = OTX2_DEFAULT_UNICAST_FLOWS;
+ pf->flow_cfg->ntuple_cnt = OTX2_DEFAULT_FLOWCOUNT;
/* Allocate bare minimum number of MCAM entries needed for
* unicast and ntuple filters.
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index e6b03bad2dba..a7feb4c392b3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -26,6 +26,8 @@
#include "cn10k.h"
#include "qos.h"
#include <rvu_trace.h>
+#include "cn10k_ipsec.h"
+#include "otx2_xsk.h"
#define DRV_NAME "rvu_nicpf"
#define DRV_STRING "Marvell RVU NIC Physical Function Driver"
@@ -204,7 +206,8 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
/* Register ME interrupt handler*/
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE];
- snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc));
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0",
+ rvu_get_pf(pf->pdev, pf->pcifunc));
ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0),
otx2_pf_me_intr_handler, 0, irq_name, pf);
if (ret) {
@@ -214,7 +217,8 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
/* Register FLR interrupt handler */
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE];
- snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc));
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0",
+ rvu_get_pf(pf->pdev, pf->pcifunc));
ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0),
otx2_pf_flr_intr_handler, 0, irq_name, pf);
if (ret) {
@@ -226,7 +230,7 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
if (numvfs > 64) {
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE];
snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1",
- rvu_get_pf(pf->pcifunc));
+ rvu_get_pf(pf->pdev, pf->pcifunc));
ret = request_irq(pci_irq_vector
(pf->pdev, RVU_PF_INT_VEC_VFME1),
otx2_pf_me_intr_handler, 0, irq_name, pf);
@@ -236,7 +240,7 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
}
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE];
snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1",
- rvu_get_pf(pf->pcifunc));
+ rvu_get_pf(pf->pdev, pf->pcifunc));
ret = request_irq(pci_irq_vector
(pf->pdev, RVU_PF_INT_VEC_VFFLR1),
otx2_pf_flr_intr_handler, 0, irq_name, pf);
@@ -292,8 +296,8 @@ static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
return 0;
}
-static void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
- int first, int mdevs, u64 intr)
+void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
+ int first, int mdevs, u64 intr)
{
struct otx2_mbox_dev *mdev;
struct otx2_mbox *mbox;
@@ -463,6 +467,9 @@ static void otx2_pfvf_mbox_handler(struct work_struct *work)
offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ trace_otx2_msg_status(pf->pdev, "PF-VF down queue handler(forwarding)",
+ vf_mbox->num_msgs);
+
for (id = 0; id < vf_mbox->num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
offset);
@@ -471,7 +478,7 @@ static void otx2_pfvf_mbox_handler(struct work_struct *work)
goto inval_msg;
/* Set VF's number in each of the msg */
- msg->pcifunc &= RVU_PFVF_FUNC_MASK;
+ msg->pcifunc &= ~RVU_PFVF_FUNC_MASK;
msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK;
offset = msg->next_msgoff;
}
@@ -501,6 +508,9 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
offset = mbox->rx_start + ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ trace_otx2_msg_status(pf->pdev, "PF-VF up queue handler(response)",
+ vf_mbox->up_num_msgs);
+
for (id = 0; id < vf_mbox->up_num_msgs; id++) {
msg = mdev->mbase + offset;
@@ -519,6 +529,7 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
switch (msg->id) {
case MBOX_MSG_CGX_LINK_EVENT:
+ case MBOX_MSG_REP_EVENT_UP_NOTIFY:
break;
default:
if (msg->rc)
@@ -536,7 +547,7 @@ end:
}
}
-static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
+irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
{
struct otx2_nic *pf = (struct otx2_nic *)(pf_irq);
int vfs = pf->total_vfs;
@@ -565,6 +576,23 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
return IRQ_HANDLED;
}
+static void *cn20k_pfvf_mbox_alloc(struct otx2_nic *pf, int numvfs)
+{
+ struct qmem *mbox_addr;
+ int err;
+
+ err = qmem_alloc(&pf->pdev->dev, &mbox_addr, numvfs, MBOX_SIZE);
+ if (err) {
+ dev_err(pf->dev, "qmem alloc fail\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ otx2_write64(pf, RVU_PF_VF_MBOX_ADDR, (u64)mbox_addr->iova);
+ pf->pfvf_mbox_addr = mbox_addr;
+
+ return mbox_addr->base;
+}
+
static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
{
void __iomem *hwbase;
@@ -586,20 +614,27 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
if (!pf->mbox_pfvf_wq)
return -ENOMEM;
- /* On CN10K platform, PF <-> VF mailbox region follows after
- * PF <-> AF mailbox region.
+ /* For CN20K, PF allocates mbox memory in DRAM and writes PF/VF
+ * regions/offsets in RVU_PF_VF_MBOX_ADDR, the RVU_PFX_FUNC_PFAF_MBOX
+ * gives the aliased address to access PF/VF mailbox regions.
*/
- if (test_bit(CN10K_MBOX, &pf->hw.cap_flag))
- base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
- MBOX_SIZE;
- else
- base = readq((void __iomem *)((u64)pf->reg_base +
- RVU_PF_VF_BAR4_ADDR));
+ if (is_cn20k(pf->pdev)) {
+ hwbase = (void __iomem *)cn20k_pfvf_mbox_alloc(pf, numvfs);
+ } else {
+ /* On CN10K platform, PF <-> VF mailbox region follows after
+ * PF <-> AF mailbox region.
+ */
+ if (test_bit(CN10K_MBOX, &pf->hw.cap_flag))
+ base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
+ MBOX_SIZE;
+ else
+ base = readq(pf->reg_base + RVU_PF_VF_BAR4_ADDR);
- hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
- if (!hwbase) {
- err = -ENOMEM;
- goto free_wq;
+ hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
+ if (!hwbase) {
+ err = -ENOMEM;
+ goto free_wq;
+ }
}
mbox = &pf->mbox_pfvf[0];
@@ -623,7 +658,7 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
return 0;
free_iomem:
- if (hwbase)
+ if (hwbase && !(is_cn20k(pf->pdev)))
iounmap(hwbase);
free_wq:
destroy_workqueue(pf->mbox_pfvf_wq);
@@ -642,8 +677,10 @@ static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf)
pf->mbox_pfvf_wq = NULL;
}
- if (mbox->mbox.hwbase)
+ if (mbox->mbox.hwbase && !is_cn20k(pf->pdev))
iounmap(mbox->mbox.hwbase);
+ else
+ qmem_free(&pf->pdev->dev, pf->pfvf_mbox_addr);
otx2_mbox_destroy(&mbox->mbox);
}
@@ -667,6 +704,9 @@ static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
{
int vector;
+ if (is_cn20k(pf->pdev))
+ return cn20k_disable_pfvf_mbox_intr(pf, numvfs);
+
/* Disable PF <=> VF mailbox IRQ */
otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull);
otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull);
@@ -688,11 +728,14 @@ static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
char *irq_name;
int err;
+ if (is_cn20k(pf->pdev))
+ return cn20k_register_pfvf_mbox_intr(pf, numvfs);
+
/* Register MBOX0 interrupt handler */
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE];
if (pf->pcifunc)
snprintf(irq_name, NAME_SIZE,
- "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc));
+ "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pdev, pf->pcifunc));
else
snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0");
err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0),
@@ -708,7 +751,8 @@ static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE];
if (pf->pcifunc)
snprintf(irq_name, NAME_SIZE,
- "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc));
+ "RVUPF%d_VF Mbox1",
+ rvu_get_pf(pf->pdev, pf->pcifunc));
else
snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1");
err = request_irq(pci_irq_vector(pf->pdev,
@@ -816,6 +860,9 @@ static void otx2_pfaf_mbox_handler(struct work_struct *work)
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
pf = af_mbox->pfvf;
+ trace_otx2_msg_status(pf->pdev, "PF-AF down queue handler(response)",
+ num_msgs);
+
for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
otx2_process_pfaf_mbox_msg(pf, msg);
@@ -832,6 +879,9 @@ static void otx2_handle_link_event(struct otx2_nic *pf)
struct cgx_link_user_info *linfo = &pf->linfo;
struct net_device *netdev = pf->netdev;
+ if (pf->flags & OTX2_FLAG_PORT_UP)
+ return;
+
pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name,
linfo->link_up ? "UP" : "DOWN", linfo->speed,
linfo->full_duplex ? "Full" : "Half");
@@ -844,6 +894,35 @@ static void otx2_handle_link_event(struct otx2_nic *pf)
}
}
+static int otx2_mbox_up_handler_rep_event_up_notify(struct otx2_nic *pf,
+ struct rep_event *info,
+ struct msg_rsp *rsp)
+{
+ struct net_device *netdev = pf->netdev;
+
+ if (info->event == RVU_EVENT_MTU_CHANGE) {
+ netdev->mtu = info->evt_data.mtu;
+ return 0;
+ }
+
+ if (info->event == RVU_EVENT_PORT_STATE) {
+ if (info->evt_data.port_state) {
+ pf->flags |= OTX2_FLAG_PORT_UP;
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+ } else {
+ pf->flags &= ~OTX2_FLAG_PORT_UP;
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ }
+ return 0;
+ }
+#ifdef CONFIG_RVU_ESWITCH
+ rvu_event_up_notify(pf, info);
+#endif
+ return 0;
+}
+
int otx2_mbox_up_handler_mcs_intr_notify(struct otx2_nic *pf,
struct mcs_intr_info *event,
struct msg_rsp *rsp)
@@ -913,6 +992,7 @@ static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
}
MBOX_UP_CGX_MESSAGES
MBOX_UP_MCS_MESSAGES
+MBOX_UP_REP_MESSAGES
#undef M
break;
default:
@@ -938,6 +1018,9 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+ trace_otx2_msg_status(pf->pdev, "PF-AF up queue handler(notification)",
+ num_msgs);
+
for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
@@ -958,7 +1041,7 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
otx2_mbox_msg_send(mbox, 0);
}
-static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
+irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
{
struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
struct mbox *mw = &pf->mbox;
@@ -987,6 +1070,9 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF",
BIT_ULL(0));
+
+ trace_otx2_msg_status(pf->pdev, "PF-AF up work queued(interrupt)",
+ hdr->num_msgs);
}
if (mbox_data & MBOX_DOWN_MSG) {
@@ -1003,6 +1089,9 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF",
BIT_ULL(0));
+
+ trace_otx2_msg_status(pf->pdev, "PF-AF down work queued(interrupt)",
+ hdr->num_msgs);
}
return IRQ_HANDLED;
@@ -1010,12 +1099,21 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
void otx2_disable_mbox_intr(struct otx2_nic *pf)
{
- int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX);
+ int vector;
/* Disable AF => PF mailbox IRQ */
- otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0));
+ if (!is_cn20k(pf->pdev)) {
+ vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX);
+ otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0));
+ } else {
+ vector = pci_irq_vector(pf->pdev,
+ RVU_MBOX_PF_INT_VEC_AFPF_MBOX);
+ otx2_write64(pf, RVU_PF_INT_ENA_W1C,
+ BIT_ULL(0) | BIT_ULL(1));
+ }
free_irq(vector, pf);
}
+EXPORT_SYMBOL(otx2_disable_mbox_intr);
int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
{
@@ -1025,10 +1123,24 @@ int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
int err;
/* Register mailbox interrupt handler */
- irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE];
- snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox");
- err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX),
- otx2_pfaf_mbox_intr_handler, 0, irq_name, pf);
+ if (!is_cn20k(pf->pdev)) {
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d AFPF Mbox",
+ rvu_get_pf(pf->pdev, pf->pcifunc));
+ err = request_irq(pci_irq_vector
+ (pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX),
+ pf->hw_ops->pfaf_mbox_intr_handler,
+ 0, irq_name, pf);
+ } else {
+ irq_name = &hw->irq_name[RVU_MBOX_PF_INT_VEC_AFPF_MBOX *
+ NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d AFPF Mbox",
+ rvu_get_pf(pf->pdev, pf->pcifunc));
+ err = request_irq(pci_irq_vector
+ (pf->pdev, RVU_MBOX_PF_INT_VEC_AFPF_MBOX),
+ pf->hw_ops->pfaf_mbox_intr_handler,
+ 0, irq_name, pf);
+ }
if (err) {
dev_err(pf->dev,
"RVUPF: IRQ registration failed for PFAF mbox irq\n");
@@ -1038,8 +1150,14 @@ int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
/* Enable mailbox interrupt for msgs coming from AF.
* First clear to avoid spurious interrupts, if any.
*/
- otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
- otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
+ if (!is_cn20k(pf->pdev)) {
+ otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
+ otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
+ } else {
+ otx2_write64(pf, RVU_PF_INT, BIT_ULL(0) | BIT_ULL(1));
+ otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0) |
+ BIT_ULL(1));
+ }
if (!probe_af)
return 0;
@@ -1070,12 +1188,13 @@ void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
pf->mbox_wq = NULL;
}
- if (mbox->mbox.hwbase)
+ if (mbox->mbox.hwbase && !is_cn20k(pf->pdev))
iounmap((void __iomem *)mbox->mbox.hwbase);
otx2_mbox_destroy(&mbox->mbox);
otx2_mbox_destroy(&mbox->mbox_up);
}
+EXPORT_SYMBOL(otx2_pfaf_mbox_destroy);
int otx2_pfaf_mbox_init(struct otx2_nic *pf)
{
@@ -1089,12 +1208,20 @@ int otx2_pfaf_mbox_init(struct otx2_nic *pf)
if (!pf->mbox_wq)
return -ENOMEM;
- /* Mailbox is a reserved memory (in RAM) region shared between
- * admin function (i.e AF) and this PF, shouldn't be mapped as
- * device memory to allow unaligned accesses.
+ /* For CN20K, AF allocates mbox memory in DRAM and writes PF
+ * regions/offsets in RVU_MBOX_AF_PFX_ADDR, the RVU_PFX_FUNC_PFAF_MBOX
+ * gives the aliased address to access AF/PF mailbox regions.
*/
- hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM),
- MBOX_SIZE);
+ if (is_cn20k(pf->pdev))
+ hwbase = pf->reg_base + RVU_PFX_FUNC_PFAF_MBOX +
+ ((u64)BLKADDR_MBOX << RVU_FUNC_BLKADDR_SHIFT);
+ else
+ /* Mailbox is a reserved memory (in RAM) region shared between
+ * admin function (i.e AF) and this PF, shouldn't be mapped as
+ * device memory to allow unaligned accesses.
+ */
+ hwbase = ioremap_wc(pci_resource_start
+ (pf->pdev, PCI_MBOX_BAR_NUM), MBOX_SIZE);
if (!hwbase) {
dev_err(pf->dev, "Unable to map PFAF mailbox region\n");
err = -ENOMEM;
@@ -1267,8 +1394,8 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
{
struct otx2_nic *pf = data;
struct otx2_snd_queue *sq;
- u64 val, *ptr;
- u64 qidx = 0;
+ void __iomem *ptr;
+ u64 val, qidx = 0;
/* CQ */
for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) {
@@ -1398,6 +1525,7 @@ irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq)
return IRQ_HANDLED;
}
+EXPORT_SYMBOL(otx2_cq_intr_handler);
void otx2_disable_napi(struct otx2_nic *pf)
{
@@ -1415,6 +1543,7 @@ void otx2_disable_napi(struct otx2_nic *pf)
netif_napi_del(&cq_poll->napi);
}
}
+EXPORT_SYMBOL(otx2_disable_napi);
static void otx2_free_cq_res(struct otx2_nic *pf)
{
@@ -1446,6 +1575,8 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
if (!sq->sqe)
continue;
qmem_free(pf->dev, sq->sqe);
+ qmem_free(pf->dev, sq->sqe_ring);
+ qmem_free(pf->dev, sq->cpt_resp);
qmem_free(pf->dev, sq->tso_hdrs);
kfree(sq->sg);
kfree(sq->sqb_ptrs);
@@ -1496,10 +1627,11 @@ int otx2_init_hw_resources(struct otx2_nic *pf)
hw->sqpool_cnt = otx2_get_total_tx_queues(pf);
hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
- /* Maximum hardware supported transmit length */
- pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN;
-
- pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);
+ if (!otx2_rep_dev(pf->pdev)) {
+ /* Maximum hardware supported transmit length */
+ pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN;
+ pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);
+ }
mutex_lock(&mbox->lock);
/* NPA init */
@@ -1512,6 +1644,9 @@ int otx2_init_hw_resources(struct otx2_nic *pf)
if (err)
goto err_free_npa_lf;
+ /* Default disable backpressure on NIX-CPT */
+ otx2_nix_cpt_config_bp(pf, false);
+
/* Enable backpressure for CGX mapped PF/VFs */
if (!is_otx2_lbkvf(pf->pdev))
otx2_nix_config_bp(pf, true);
@@ -1552,10 +1687,15 @@ int otx2_init_hw_resources(struct otx2_nic *pf)
}
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
- err = otx2_txschq_config(pf, lvl, 0, false);
- if (err) {
- mutex_unlock(&mbox->lock);
- goto err_free_nix_queues;
+ int idx;
+
+ for (idx = 0; idx < pf->hw.txschq_cnt[lvl]; idx++) {
+ err = otx2_txschq_config(pf, lvl, idx, false);
+ if (err) {
+ dev_err(pf->dev, "Failed to config TXSCH\n");
+ mutex_unlock(&mbox->lock);
+ goto err_free_nix_queues;
+ }
}
}
@@ -1604,6 +1744,7 @@ exit:
mutex_unlock(&mbox->lock);
return err;
}
+EXPORT_SYMBOL(otx2_init_hw_resources);
void otx2_free_hw_resources(struct otx2_nic *pf)
{
@@ -1611,9 +1752,7 @@ void otx2_free_hw_resources(struct otx2_nic *pf)
struct nix_lf_free_req *free_req;
struct mbox *mbox = &pf->mbox;
struct otx2_cq_queue *cq;
- struct otx2_pool *pool;
struct msg_req *req;
- int pool_id;
int qidx;
/* Ensure all SQE are processed */
@@ -1627,11 +1766,12 @@ void otx2_free_hw_resources(struct otx2_nic *pf)
otx2_pfc_txschq_stop(pf);
#endif
- otx2_clean_qos_queues(pf);
+ if (!otx2_rep_dev(pf->pdev))
+ otx2_clean_qos_queues(pf);
mutex_lock(&mbox->lock);
/* Disable backpressure */
- if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK))
+ if (!is_otx2_lbkvf(pf->pdev))
otx2_nix_config_bp(pf, false);
mutex_unlock(&mbox->lock);
@@ -1653,17 +1793,11 @@ void otx2_free_hw_resources(struct otx2_nic *pf)
/* Free RQ buffer pointers*/
otx2_free_aura_ptr(pf, AURA_NIX_RQ);
- for (qidx = 0; qidx < pf->hw.rx_queues; qidx++) {
- pool_id = otx2_get_pool_idx(pf, AURA_NIX_RQ, qidx);
- pool = &pf->qset.pool[pool_id];
- page_pool_destroy(pool->page_pool);
- pool->page_pool = NULL;
- }
-
otx2_free_cq_res(pf);
/* Free all ingress bandwidth profiles allocated */
- cn10k_free_all_ipolicers(pf);
+ if (!otx2_rep_dev(pf->pdev))
+ cn10k_free_all_ipolicers(pf);
mutex_lock(&mbox->lock);
/* Reset NIX LF */
@@ -1691,6 +1825,7 @@ void otx2_free_hw_resources(struct otx2_nic *pf)
}
mutex_unlock(&mbox->lock);
}
+EXPORT_SYMBOL(otx2_free_hw_resources);
static bool otx2_promisc_use_mce_list(struct otx2_nic *pfvf)
{
@@ -1784,6 +1919,7 @@ void otx2_free_queue_mem(struct otx2_qset *qset)
kfree(qset->napi);
qset->napi = NULL;
}
+EXPORT_SYMBOL(otx2_free_queue_mem);
int otx2_alloc_queue_mem(struct otx2_nic *pf)
{
@@ -1830,6 +1966,7 @@ err_free_mem:
otx2_free_queue_mem(qset);
return -ENOMEM;
}
+EXPORT_SYMBOL(otx2_alloc_queue_mem);
int otx2_open(struct net_device *netdev)
{
@@ -1906,7 +2043,7 @@ int otx2_open(struct net_device *netdev)
if (err) {
dev_err(pf->dev,
"RVUPF%d: IRQ registration failed for QERR\n",
- rvu_get_pf(pf->pcifunc));
+ rvu_get_pf(pf->pdev, pf->pcifunc));
goto err_disable_napi;
}
@@ -1924,7 +2061,7 @@ int otx2_open(struct net_device *netdev)
if (name_len >= NAME_SIZE) {
dev_err(pf->dev,
"RVUPF%d: IRQ registration failed for CQ%d, irq name is too long\n",
- rvu_get_pf(pf->pcifunc), qidx);
+ rvu_get_pf(pf->pdev, pf->pcifunc), qidx);
err = -EINVAL;
goto err_free_cints;
}
@@ -1935,7 +2072,7 @@ int otx2_open(struct net_device *netdev)
if (err) {
dev_err(pf->dev,
"RVUPF%d: IRQ registration failed for CQ%d\n",
- rvu_get_pf(pf->pcifunc), qidx);
+ rvu_get_pf(pf->pdev, pf->pcifunc), qidx);
goto err_free_cints;
}
vec++;
@@ -1963,6 +2100,7 @@ int otx2_open(struct net_device *netdev)
}
pf->flags &= ~OTX2_FLAG_INTF_DOWN;
+ pf->flags &= ~OTX2_FLAG_PORT_UP;
/* 'intf_down' may be checked on any cpu */
smp_wmb();
@@ -2020,7 +2158,6 @@ int otx2_stop(struct net_device *netdev)
struct otx2_nic *pf = netdev_priv(netdev);
struct otx2_cq_poll *cq_poll = NULL;
struct otx2_qset *qset = &pf->qset;
- struct otx2_rss_info *rss;
int qidx, vec, wrk;
/* If the DOWN flag is set resources are already freed */
@@ -2038,10 +2175,7 @@ int otx2_stop(struct net_device *netdev)
otx2_rxtx_enable(pf, false);
/* Clear RSS enable flag */
- rss = &pf->hw.rss_info;
- rss->enable = false;
- if (!netif_is_rxfh_configured(netdev))
- kfree(rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
+ pf->hw.rss_info.enable = false;
/* Cleanup Queue IRQ */
vec = pci_irq_vector(pf->pdev,
@@ -2086,6 +2220,7 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct otx2_nic *pf = netdev_priv(netdev);
int qidx = skb_get_queue_mapping(skb);
+ struct otx2_dev_stats *dev_stats;
struct otx2_snd_queue *sq;
struct netdev_queue *txq;
int sq_idx;
@@ -2098,6 +2233,8 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Check for minimum and maximum packet length */
if (skb->len <= ETH_HLEN ||
(!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) {
+ dev_stats = &pf->hw.dev_stats;
+ atomic_long_inc(&dev_stats->tx_discards);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -2105,7 +2242,7 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
sq = &pf->qset.sq[sq_idx];
txq = netdev_get_tx_queue(netdev, qidx);
- if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
+ if (!otx2_sq_append_skb(pf, txq, sq, skb, qidx)) {
netif_tx_stop_queue(txq);
/* Check again, incase SQBs got freed up */
@@ -2222,6 +2359,10 @@ static int otx2_set_features(struct net_device *netdev,
return otx2_enable_rxvlan(pf,
features & NETIF_F_HW_VLAN_CTAG_RX);
+ if (changed & NETIF_F_HW_ESP)
+ return cn10k_ipsec_ethtool_init(netdev,
+ features & NETIF_F_HW_ESP);
+
return otx2_handle_ntuple_tc_features(netdev, features);
}
@@ -2304,18 +2445,26 @@ static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable)
return 0;
}
-int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
+int otx2_config_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ *config = pfvf->tstamp;
+ return 0;
+}
+EXPORT_SYMBOL(otx2_config_hwtstamp_get);
+
+int otx2_config_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
- struct hwtstamp_config config;
if (!pfvf->ptp)
return -ENODEV;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
if (pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC)
pfvf->flags &= ~OTX2_FLAG_PTP_ONESTEP_SYNC;
@@ -2324,8 +2473,11 @@ int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
otx2_config_hw_tx_tstamp(pfvf, false);
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
- if (!test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag))
+ if (!test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "One-step time stamping is not supported");
return -ERANGE;
+ }
pfvf->flags |= OTX2_FLAG_PTP_ONESTEP_SYNC;
schedule_delayed_work(&pfvf->ptp->synctstamp_work,
msecs_to_jiffies(500));
@@ -2337,7 +2489,7 @@ int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
otx2_config_hw_rx_tstamp(pfvf, false);
break;
@@ -2356,35 +2508,17 @@ int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
otx2_config_hw_rx_tstamp(pfvf, true);
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
- memcpy(&pfvf->tstamp, &config, sizeof(config));
-
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
-}
-EXPORT_SYMBOL(otx2_config_hwtstamp);
+ pfvf->tstamp = *config;
-int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
-{
- struct otx2_nic *pfvf = netdev_priv(netdev);
- struct hwtstamp_config *cfg = &pfvf->tstamp;
-
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return otx2_config_hwtstamp(netdev, req);
- case SIOCGHWTSTAMP:
- return copy_to_user(req->ifr_data, cfg,
- sizeof(*cfg)) ? -EFAULT : 0;
- default:
- return -EOPNOTSUPP;
- }
+ return 0;
}
-EXPORT_SYMBOL(otx2_ioctl);
+EXPORT_SYMBOL(otx2_config_hwtstamp_set);
static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac)
{
@@ -2630,7 +2764,6 @@ static int otx2_get_vf_config(struct net_device *netdev, int vf,
static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
int qidx)
{
- struct page *page;
u64 dma_addr;
int err = 0;
@@ -2640,11 +2773,11 @@ static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
if (dma_mapping_error(pf->dev, dma_addr))
return -ENOMEM;
- err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx);
+ err = otx2_xdp_sq_append_pkt(pf, xdpf, dma_addr, xdpf->len,
+ qidx, OTX2_XDP_REDIRECT);
if (!err) {
otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE);
- page = virt_to_page(xdpf->data);
- put_page(page);
+ xdp_return_frame(xdpf);
return -ENOMEM;
}
return 0;
@@ -2728,6 +2861,8 @@ static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return otx2_xdp_setup(pf, xdp->prog);
+ case XDP_SETUP_XSK_POOL:
+ return otx2_xsk_pool_setup(pf, xdp->xsk.pool, xdp->xsk.queue_id);
default:
return -EINVAL;
}
@@ -2800,14 +2935,16 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_set_features = otx2_set_features,
.ndo_tx_timeout = otx2_tx_timeout,
.ndo_get_stats64 = otx2_get_stats64,
- .ndo_eth_ioctl = otx2_ioctl,
.ndo_set_vf_mac = otx2_set_vf_mac,
.ndo_set_vf_vlan = otx2_set_vf_vlan,
.ndo_get_vf_config = otx2_get_vf_config,
.ndo_bpf = otx2_xdp,
+ .ndo_xsk_wakeup = otx2_xsk_wakeup,
.ndo_xdp_xmit = otx2_xdp_xmit,
.ndo_setup_tc = otx2_setup_tc,
.ndo_set_vf_trust = otx2_ndo_set_vf_trust,
+ .ndo_hwtstamp_get = otx2_config_hwtstamp_get,
+ .ndo_hwtstamp_set = otx2_config_hwtstamp_set,
};
int otx2_wq_init(struct otx2_nic *pf)
@@ -2861,6 +2998,7 @@ int otx2_realloc_msix_vectors(struct otx2_nic *pf)
return otx2_register_mbox_intr(pf, false);
}
+EXPORT_SYMBOL(otx2_realloc_msix_vectors);
static int otx2_sriov_vfcfg_init(struct otx2_nic *pf)
{
@@ -2924,8 +3062,13 @@ int otx2_init_rsrc(struct pci_dev *pdev, struct otx2_nic *pf)
if (err)
return err;
- err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
- RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
+ if (!is_cn20k(pf->pdev))
+ err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
+ RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
+ else
+ err = pci_alloc_irq_vectors(hw->pdev, RVU_MBOX_PF_INT_VEC_CNT,
+ RVU_MBOX_PF_INT_VEC_CNT,
+ PCI_IRQ_MSIX);
if (err < 0) {
dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
__func__, num_vec);
@@ -2934,6 +3077,11 @@ int otx2_init_rsrc(struct pci_dev *pdev, struct otx2_nic *pf)
otx2_setup_dev_hw_settings(pf);
+ if (is_cn20k(pf->pdev))
+ cn20k_init(pf);
+ else
+ otx2_init_hw_ops(pf);
+
/* Init PF <=> AF mailbox stuff */
err = otx2_pfaf_mbox_init(pf);
if (err)
@@ -2976,6 +3124,7 @@ err_free_irq_vectors:
return err;
}
+EXPORT_SYMBOL(otx2_init_rsrc);
static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
@@ -2991,7 +3140,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- err = pci_request_regions(pdev, DRV_NAME);
+ err = pcim_request_all_regions(pdev, DRV_NAME);
if (err) {
dev_err(dev, "PCI request regions failed 0x%x\n", err);
return err;
@@ -3000,7 +3149,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (err) {
dev_err(dev, "DMA mask config failed, abort\n");
- goto err_release_regions;
+ return err;
}
pci_set_master(pdev);
@@ -3010,10 +3159,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES);
netdev = alloc_etherdev_mqs(sizeof(*pf), qcount + qos_txqs, qcount);
- if (!netdev) {
- err = -ENOMEM;
- goto err_release_regions;
- }
+ if (!netdev)
+ return -ENOMEM;
pci_set_drvdata(pdev, netdev);
SET_NETDEV_DEV(netdev, &pdev->dev);
@@ -3071,6 +3218,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_ptp_destroy;
+ otx2_set_hw_capabilities(pf);
+
err = cn10k_mcs_init(pf);
if (err)
goto err_del_mcam_entries;
@@ -3109,10 +3258,14 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* reset CGX/RPM MAC stats */
otx2_reset_mac_stats(pf);
+ err = cn10k_ipsec_init(netdev);
+ if (err)
+ goto err_mcs_free;
+
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_mcs_free;
+ goto err_ipsec_clean;
}
err = otx2_wq_init(pf);
@@ -3137,22 +3290,36 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Enable link notifications */
otx2_cgx_config_linkevents(pf, true);
+ pf->af_xdp_zc_qidx = bitmap_zalloc(qcount, GFP_KERNEL);
+ if (!pf->af_xdp_zc_qidx) {
+ err = -ENOMEM;
+ goto err_sriov_cleannup;
+ }
+
#ifdef CONFIG_DCB
err = otx2_dcbnl_set_ops(netdev);
if (err)
- goto err_pf_sriov_init;
+ goto err_free_zc_bmap;
#endif
otx2_qos_init(pf, qos_txqs);
return 0;
+#ifdef CONFIG_DCB
+err_free_zc_bmap:
+ bitmap_free(pf->af_xdp_zc_qidx);
+#endif
+err_sriov_cleannup:
+ otx2_sriov_vfcfg_cleanup(pf);
err_pf_sriov_init:
otx2_shutdown_tc(pf);
err_mcam_flow_del:
otx2_mcam_flow_del(pf);
err_unreg_netdev:
unregister_netdev(netdev);
+err_ipsec_clean:
+ cn10k_ipsec_clean(pf);
err_mcs_free:
cn10k_mcs_free(pf);
err_del_mcam_entries:
@@ -3171,8 +3338,6 @@ err_detach_rsrc:
err_free_netdev:
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
-err_release_regions:
- pci_release_regions(pdev);
return err;
}
@@ -3214,6 +3379,7 @@ static void otx2_vf_link_event_task(struct work_struct *work)
req = (struct cgx_link_info_msg *)msghdr;
req->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.pcifunc = pf->pcifunc;
memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx);
@@ -3350,6 +3516,7 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_unregister_dl(pf);
unregister_netdev(netdev);
+ cn10k_ipsec_clean(pf);
cn10k_mcs_free(pf);
otx2_sriov_disable(pf->pdev);
otx2_sriov_vfcfg_cleanup(pf);
@@ -3369,10 +3536,9 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);
pci_free_irq_vectors(pf->pdev);
+ bitmap_free(pf->af_xdp_zc_qidx);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
-
- pci_release_regions(pdev);
}
static struct pci_driver otx2_pf_driver = {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index 63130ba37e9d..dedd586ed310 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -193,7 +193,7 @@ static int ptp_pps_on(struct otx2_ptp *ptp, int on, u64 period)
return otx2_sync_mbox_msg(&ptp->nic->mbox);
}
-static u64 ptp_cc_read(const struct cyclecounter *cc)
+static u64 ptp_cc_read(struct cyclecounter *cc)
{
struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
@@ -491,7 +491,7 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf)
if (!ptp)
return;
- cancel_delayed_work(&pfvf->ptp->synctstamp_work);
+ cancel_delayed_work_sync(&pfvf->ptp->synctstamp_work);
ptp_clock_unregister(ptp->ptp_clock);
kfree(ptp);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
index e3aee6e36215..1cd576fd09c5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
@@ -44,6 +44,17 @@
#define RVU_PF_VF_MBOX_ADDR (0xC40)
#define RVU_PF_LMTLINE_ADDR (0xC48)
+#define RVU_MBOX_PF_VFX_PFVF_TRIGX(a) (0x2000 | (a) << 3)
+#define RVU_MBOX_PF_VFPF_INTX(a) (0x1000 | (a) << 3)
+#define RVU_MBOX_PF_VFPF_INT_W1SX(a) (0x1020 | (a) << 3)
+#define RVU_MBOX_PF_VFPF_INT_ENA_W1SX(a) (0x1040 | (a) << 3)
+#define RVU_MBOX_PF_VFPF_INT_ENA_W1CX(a) (0x1060 | (a) << 3)
+
+#define RVU_MBOX_PF_VFPF1_INTX(a) (0x1080 | (a) << 3)
+#define RVU_MBOX_PF_VFPF1_INT_W1SX(a) (0x10a0 | (a) << 3)
+#define RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(a) (0x10c0 | (a) << 3)
+#define RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(a) (0x10e0 | (a) << 3)
+
/* RVU VF registers */
#define RVU_VF_VFPF_MBOX0 (0x00000)
#define RVU_VF_VFPF_MBOX1 (0x00008)
@@ -58,6 +69,11 @@
#define RVU_VF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
#define RVU_VF_MBOX_REGION (0xC0000)
+/* CN20K RVU_MBOX_E: RVU PF/VF MBOX Address Range Enumeration */
+#define RVU_MBOX_AF_PFX_ADDR(a) (0x5000 | (a) << 4)
+#define RVU_PFX_FUNC_PFAF_MBOX (0x80000)
+#define RVU_PFX_FUNCX_VFAF_MBOX (0x40000)
+
#define RVU_FUNC_BLKADDR_SHIFT 20
#define RVU_FUNC_BLKADDR_MASK 0x1FULL
@@ -138,39 +154,12 @@
#define NIX_LF_CINTX_ENA_W1S(a) (NIX_LFBASE | 0xD40 | (a) << 12)
#define NIX_LF_CINTX_ENA_W1C(a) (NIX_LFBASE | 0xD50 | (a) << 12)
-/* NIX AF transmit scheduler registers */
-#define NIX_AF_SMQX_CFG(a) (0x700 | (u64)(a) << 16)
-#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xB10 | (u64)(a) << 16)
-#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (u64)(a) << 16)
-#define NIX_AF_TL1X_CIR(a) (0xC20 | (u64)(a) << 16)
-#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (u64)(a) << 16)
-#define NIX_AF_TL2X_PARENT(a) (0xE88 | (u64)(a) << 16)
-#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (u64)(a) << 16)
-#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (u64)(a) << 16)
-#define NIX_AF_TL2X_CIR(a) (0xE20 | (u64)(a) << 16)
-#define NIX_AF_TL2X_PIR(a) (0xE30 | (u64)(a) << 16)
-#define NIX_AF_TL3X_PARENT(a) (0x1088 | (u64)(a) << 16)
-#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (u64)(a) << 16)
-#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (u64)(a) << 16)
-#define NIX_AF_TL3X_CIR(a) (0x1020 | (u64)(a) << 16)
-#define NIX_AF_TL3X_PIR(a) (0x1030 | (u64)(a) << 16)
-#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (u64)(a) << 16)
-#define NIX_AF_TL4X_PARENT(a) (0x1288 | (u64)(a) << 16)
-#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (u64)(a) << 16)
-#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (u64)(a) << 16)
-#define NIX_AF_TL4X_CIR(a) (0x1220 | (u64)(a) << 16)
-#define NIX_AF_TL4X_PIR(a) (0x1230 | (u64)(a) << 16)
-#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (u64)(a) << 16)
-#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (u64)(a) << 16)
-#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (u64)(a) << 16)
-#define NIX_AF_MDQX_CIR(a) (0x1420 | (u64)(a) << 16)
-#define NIX_AF_MDQX_PIR(a) (0x1430 | (u64)(a) << 16)
-#define NIX_AF_MDQX_PARENT(a) (0x1480 | (u64)(a) << 16)
-#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (u64)(a) << 16 | (b) << 3)
-
/* LMT LF registers */
#define LMT_LFBASE BIT_ULL(RVU_FUNC_BLKADDR_SHIFT)
#define LMT_LF_LMTLINEX(a) (LMT_LFBASE | 0x000 | (a) << 12)
#define LMT_LF_LMTCANCEL (LMT_LFBASE | 0x400)
+/* CN20K registers */
+#define RVU_PF_DISC (0x0)
+
#endif /* OTX2_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index e63cc1eb6d89..26a08d2cfbb1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -443,6 +443,7 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
struct flow_action_entry *act;
struct net_device *target;
struct otx2_nic *priv;
+ struct rep_dev *rdev;
u32 burst, mark = 0;
u8 nr_police = 0;
u8 num_intf = 1;
@@ -464,14 +465,19 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
return 0;
case FLOW_ACTION_REDIRECT_INGRESS:
target = act->dev;
- priv = netdev_priv(target);
- /* npc_install_flow_req doesn't support passing a target pcifunc */
- if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) {
- NL_SET_ERR_MSG_MOD(extack,
- "can't redirect to other pf/vf");
- return -EOPNOTSUPP;
+ if (target->dev.parent) {
+ priv = netdev_priv(target);
+ if (rvu_get_pf(nic->pdev, nic->pcifunc) !=
+ rvu_get_pf(nic->pdev, priv->pcifunc)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't redirect to other pf/vf");
+ return -EOPNOTSUPP;
+ }
+ req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK;
+ } else {
+ rdev = netdev_priv(target);
+ req->vf = rdev->pcifunc & RVU_PFVF_FUNC_MASK;
}
- req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK;
/* if op is already set; avoid overwriting the same */
if (!req->op)
@@ -1300,6 +1306,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
req->channel = nic->hw.rx_chan_base;
req->entry = flow_cfg->flow_ent[mcam_idx];
req->intf = NIX_INTF_RX;
+ req->vf = nic->pcifunc;
req->set_cntr = 1;
new_node->entry = req->entry;
@@ -1319,7 +1326,6 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
free_leaf:
otx2_tc_del_from_flow_list(flow_cfg, new_node);
- kfree_rcu(new_node, rcu);
if (new_node->is_act_police) {
mutex_lock(&nic->mbox.lock);
@@ -1339,6 +1345,7 @@ free_leaf:
mutex_unlock(&nic->mbox.lock);
}
+ kfree_rcu(new_node, rcu);
return rc;
}
@@ -1400,8 +1407,8 @@ static int otx2_tc_get_flow_stats(struct otx2_nic *nic,
return 0;
}
-static int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
- struct flow_cls_offload *cls_flower)
+int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
+ struct flow_cls_offload *cls_flower)
{
switch (cls_flower->command) {
case FLOW_CLS_REPLACE:
@@ -1414,6 +1421,7 @@ static int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
return -EOPNOTSUPP;
}
}
+EXPORT_SYMBOL(otx2_setup_tc_cls_flower);
static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic,
struct tc_cls_matchall_offload *cls)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 7aaf32e9aa95..625bb5a05344 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -11,6 +11,8 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <net/ip6_checksum.h>
+#include <net/xfrm.h>
+#include <net/xdp.h>
#include "otx2_reg.h"
#include "otx2_common.h"
@@ -18,6 +20,7 @@
#include "otx2_txrx.h"
#include "otx2_ptp.h"
#include "cn10k.h"
+#include "otx2_xsk.h"
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
#define PTP_PORT 0x13F
@@ -26,11 +29,30 @@
*/
#define PTP_SYNC_SEC_OFFSET 34
+DEFINE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled);
+
+static int otx2_get_free_sqe(struct otx2_snd_queue *sq)
+{
+ return (sq->cons_head - sq->head - 1 + sq->sqe_cnt)
+ & (sq->sqe_cnt - 1);
+}
+
static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
struct otx2_cq_queue *cq,
- bool *need_xdp_flush);
+ u32 *metasize, bool *need_xdp_flush);
+
+static void otx2_sq_set_sqe_base(struct otx2_snd_queue *sq,
+ struct sk_buff *skb)
+{
+ if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) &&
+ (xfrm_offload(skb)))
+ sq->sqe_base = sq->sqe_ring->base + sq->sqe_size +
+ (sq->head * (sq->sqe_size * 2));
+ else
+ sq->sqe_base = sq->sqe->base;
+}
static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq)
@@ -80,54 +102,24 @@ static unsigned int frag_num(unsigned int i)
#endif
}
-static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
- struct sk_buff *skb, int seg, int *len)
-{
- const skb_frag_t *frag;
- struct page *page;
- int offset;
-
- /* First segment is always skb->data */
- if (!seg) {
- page = virt_to_page(skb->data);
- offset = offset_in_page(skb->data);
- *len = skb_headlen(skb);
- } else {
- frag = &skb_shinfo(skb)->frags[seg - 1];
- page = skb_frag_page(frag);
- offset = skb_frag_off(frag);
- *len = skb_frag_size(frag);
- }
- return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE);
-}
-
-static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
-{
- int seg;
-
- for (seg = 0; seg < sg->num_segs; seg++) {
- otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
- sg->size[seg], DMA_TO_DEVICE);
- }
- sg->num_segs = 0;
-}
-
static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
struct otx2_snd_queue *sq,
- struct nix_cqe_tx_s *cqe)
+ struct nix_cqe_tx_s *cqe,
+ int *xsk_frames)
{
struct nix_send_comp_s *snd_comp = &cqe->comp;
struct sg_list *sg;
- struct page *page;
- u64 pa;
sg = &sq->sg[snd_comp->sqe_id];
+ if (sg->flags & OTX2_AF_XDP_FRAME) {
+ (*xsk_frames)++;
+ return;
+ }
- pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]);
- otx2_dma_unmap_page(pfvf, sg->dma_addr[0],
- sg->size[0], DMA_TO_DEVICE);
- page = virt_to_page(phys_to_virt(pa));
- put_page(page);
+ if (sg->flags & OTX2_XDP_REDIRECT)
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[0], sg->size[0], DMA_TO_DEVICE);
+ xdp_return_frame((struct xdp_frame *)sg->skb);
+ sg->skb = (u64)NULL;
}
static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
@@ -343,18 +335,24 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
struct nix_rx_parse_s *parse = &cqe->parse;
struct nix_rx_sg_s *sg = &cqe->sg;
struct sk_buff *skb = NULL;
+ u64 *word = (u64 *)parse;
void *end, *start;
+ u32 metasize = 0;
u64 *seg_addr;
u16 *seg_size;
int seg;
if (unlikely(parse->errlev || parse->errcode)) {
- if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
+ if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx)) {
+ trace_otx2_parse_dump(pfvf->pdev, "Err:", word);
return;
+ }
}
+ trace_otx2_parse_dump(pfvf->pdev, "", word);
if (pfvf->xdp_prog)
- if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, need_xdp_flush))
+ if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq,
+ &metasize, need_xdp_flush))
return;
skb = napi_get_frags(napi);
@@ -376,14 +374,18 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
}
otx2_set_rxhash(pfvf, cqe, skb);
- skb_record_rx_queue(skb, cq->cq_idx);
- if (pfvf->netdev->features & NETIF_F_RXCSUM)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (!(pfvf->flags & OTX2_FLAG_REP_MODE_ENABLED)) {
+ skb_record_rx_queue(skb, cq->cq_idx);
+ if (pfvf->netdev->features & NETIF_F_RXCSUM)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
if (pfvf->flags & OTX2_FLAG_TC_MARK_ENABLED)
skb->mark = parse->match_id;
skb_mark_for_recycle(skb);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
napi_gro_frags(napi);
}
@@ -447,23 +449,42 @@ int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
return cnt - cq->pool_ptrs;
}
+static void otx2_zc_submit_pkts(struct otx2_nic *pfvf, struct xsk_buff_pool *xsk_pool,
+ int *xsk_frames, int qidx, int budget)
+{
+ if (*xsk_frames)
+ xsk_tx_completed(xsk_pool, *xsk_frames);
+
+ if (xsk_uses_need_wakeup(xsk_pool))
+ xsk_set_tx_need_wakeup(xsk_pool);
+
+ otx2_zc_napi_handler(pfvf, xsk_pool, qidx, budget);
+}
+
static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq, int budget)
{
int tx_pkts = 0, tx_bytes = 0, qidx;
struct otx2_snd_queue *sq;
struct nix_cqe_tx_s *cqe;
+ struct net_device *ndev;
int processed_cqe = 0;
+ int xsk_frames = 0;
+
+ qidx = cq->cq_idx - pfvf->hw.rx_queues;
+ sq = &pfvf->qset.sq[qidx];
if (cq->pend_cqe >= budget)
goto process_cqe;
- if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) {
+ if (sq->xsk_pool)
+ otx2_zc_submit_pkts(pfvf, sq->xsk_pool, &xsk_frames,
+ qidx, budget);
return 0;
+ }
process_cqe:
- qidx = cq->cq_idx - pfvf->hw.rx_queues;
- sq = &pfvf->qset.sq[qidx];
while (likely(processed_cqe < budget) && cq->pend_cqe) {
cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
@@ -473,10 +494,8 @@ process_cqe:
break;
}
- qidx = cq->cq_idx - pfvf->hw.rx_queues;
-
if (cq->cq_type == CQ_XDP)
- otx2_xdp_snd_pkt_handler(pfvf, sq, cqe);
+ otx2_xdp_snd_pkt_handler(pfvf, sq, cqe, &xsk_frames);
else
otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[qidx],
cqe, budget, &tx_pkts, &tx_bytes);
@@ -493,6 +512,13 @@ process_cqe:
otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
((u64)cq->cq_idx << 32) | processed_cqe);
+#if IS_ENABLED(CONFIG_RVU_ESWITCH)
+ if (pfvf->flags & OTX2_FLAG_REP_MODE_ENABLED)
+ ndev = pfvf->reps[qidx]->netdev;
+ else
+#endif
+ ndev = pfvf->netdev;
+
if (likely(tx_pkts)) {
struct netdev_queue *txq;
@@ -500,14 +526,20 @@ process_cqe:
if (qidx >= pfvf->hw.tx_queues)
qidx -= pfvf->hw.xdp_queues;
- txq = netdev_get_tx_queue(pfvf->netdev, qidx);
+ if (pfvf->flags & OTX2_FLAG_REP_MODE_ENABLED)
+ qidx = 0;
+ txq = netdev_get_tx_queue(ndev, qidx);
netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
/* Check if queue was stopped earlier due to ring full */
smp_mb();
if (netif_tx_queue_stopped(txq) &&
- netif_carrier_ok(pfvf->netdev))
+ netif_carrier_ok(ndev))
netif_tx_wake_queue(txq);
}
+
+ if (sq->xsk_pool)
+ otx2_zc_submit_pkts(pfvf, sq->xsk_pool, &xsk_frames, qidx, budget);
+
return 0;
}
@@ -533,9 +565,10 @@ static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_p
int otx2_napi_handler(struct napi_struct *napi, int budget)
{
struct otx2_cq_queue *rx_cq = NULL;
+ struct otx2_cq_queue *cq = NULL;
+ struct otx2_pool *pool = NULL;
struct otx2_cq_poll *cq_poll;
int workdone = 0, cq_idx, i;
- struct otx2_cq_queue *cq;
struct otx2_qset *qset;
struct otx2_nic *pfvf;
int filled_cnt = -1;
@@ -560,6 +593,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
if (rx_cq && rx_cq->pool_ptrs)
filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
+
/* Clear the IRQ */
otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
@@ -572,20 +606,31 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
if (pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED)
otx2_adjust_adaptive_coalese(pfvf, cq_poll);
+ if (likely(cq))
+ pool = &pfvf->qset.pool[cq->cq_idx];
+
if (unlikely(!filled_cnt)) {
struct refill_work *work;
struct delayed_work *dwork;
- work = &pfvf->refill_wrk[cq->cq_idx];
- dwork = &work->pool_refill_work;
- /* Schedule a task if no other task is running */
- if (!cq->refill_task_sched) {
- work->napi = napi;
- cq->refill_task_sched = true;
- schedule_delayed_work(dwork,
- msecs_to_jiffies(100));
+ if (likely(cq)) {
+ work = &pfvf->refill_wrk[cq->cq_idx];
+ dwork = &work->pool_refill_work;
+ /* Schedule a task if no other task is running */
+ if (!cq->refill_task_sched) {
+ work->napi = napi;
+ cq->refill_task_sched = true;
+ schedule_delayed_work(dwork,
+ msecs_to_jiffies(100));
+ }
+ /* Call wake-up for not able to fill buffers */
+ if (pool->xsk_pool)
+ xsk_set_rx_need_wakeup(pool->xsk_pool);
}
} else {
+ /* Clear wake-up, since buffers are filled successfully */
+ if (pool && pool->xsk_pool)
+ xsk_clear_rx_need_wakeup(pool->xsk_pool);
/* Re-enable interrupts */
otx2_write64(pfvf,
NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
@@ -594,6 +639,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
}
return workdone;
}
+EXPORT_SYMBOL(otx2_napi_handler);
void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
int size, int qidx)
@@ -612,7 +658,6 @@ void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
sq->head &= (sq->sqe_cnt - 1);
}
-#define MAX_SEGS_PER_SG 3
/* Add SQE scatter/gather subdescriptor structure */
static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
struct sk_buff *skb, int num_segs, int *offset)
@@ -1141,18 +1186,19 @@ static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
}
}
-bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
+bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq,
struct sk_buff *skb, u16 qidx)
{
- struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx);
- struct otx2_nic *pfvf = netdev_priv(netdev);
int offset, num_segs, free_desc;
struct nix_sqe_hdr_s *sqe_hdr;
+ struct otx2_nic *pfvf = dev;
+ bool ret;
/* Check if there is enough room between producer
* and consumer index.
*/
- free_desc = (sq->cons_head - sq->head - 1 + sq->sqe_cnt) & (sq->sqe_cnt - 1);
+ free_desc = otx2_get_free_sqe(sq);
if (free_desc < sq->sqe_thresh)
return false;
@@ -1164,6 +1210,7 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
/* If SKB doesn't fit in a single SQE, linearize it.
* TODO: Consider adding JUMP descriptor instead.
*/
+
if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) {
if (__skb_linearize(skb)) {
dev_kfree_skb_any(skb);
@@ -1183,6 +1230,9 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
return true;
}
+ /* Set sqe base address */
+ otx2_sq_set_sqe_base(sq, skb);
+
/* Set SQE's SEND_HDR.
* Do not clear the first 64bit as it contains constant info.
*/
@@ -1195,7 +1245,13 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
otx2_sqe_add_ext(pfvf, sq, skb, &offset);
/* Add SG subdesc with data frags */
- if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) {
+ if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) &&
+ (xfrm_offload(skb)))
+ ret = otx2_sqe_add_sg_ipsec(pfvf, sq, skb, num_segs, &offset);
+ else
+ ret = otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset);
+
+ if (!ret) {
otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]);
return false;
}
@@ -1204,11 +1260,15 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
sqe_hdr->sizem1 = (offset / 16) - 1;
+ if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) &&
+ (xfrm_offload(skb)))
+ return cn10k_ipsec_transmit(pfvf, txq, sq, skb, num_segs,
+ offset);
+
netdev_tx_sent_queue(txq, skb->len);
/* Flush SQE to HW */
pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
-
return true;
}
EXPORT_SYMBOL(otx2_sq_append_skb);
@@ -1221,15 +1281,19 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int q
u16 pool_id;
u64 iova;
- if (pfvf->xdp_prog)
+ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
+ pool = &pfvf->qset.pool[pool_id];
+
+ if (pfvf->xdp_prog) {
+ if (pool->page_pool)
+ xdp_rxq_info_unreg_mem_model(&cq->xdp_rxq);
+
xdp_rxq_info_unreg(&cq->xdp_rxq);
+ }
if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
return;
- pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
- pool = &pfvf->qset.pool[pool_id];
-
while (cq->pend_cqe) {
cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq);
processed_cqe++;
@@ -1350,8 +1414,8 @@ void otx2_free_pending_sqe(struct otx2_nic *pfvf)
}
}
-static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
- int len, int *offset)
+void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, struct xdp_frame *xdpf,
+ u64 dma_addr, int len, int *offset, u16 flags)
{
struct nix_sqe_sg_s *sg = NULL;
u64 *iova = NULL;
@@ -1368,16 +1432,34 @@ static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
sq->sg[sq->head].dma_addr[0] = dma_addr;
sq->sg[sq->head].size[0] = len;
sq->sg[sq->head].num_segs = 1;
+ sq->sg[sq->head].flags = flags;
+ sq->sg[sq->head].skb = (u64)xdpf;
}
-bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
+int otx2_read_free_sqe(struct otx2_nic *pfvf, u16 qidx)
+{
+ struct otx2_snd_queue *sq;
+ int free_sqe;
+
+ sq = &pfvf->qset.sq[qidx];
+ free_sqe = otx2_get_free_sqe(sq);
+ if (free_sqe < sq->sqe_thresh) {
+ netdev_warn(pfvf->netdev, "No free sqe for Send queue%d\n", qidx);
+ return 0;
+ }
+
+ return free_sqe - sq->sqe_thresh;
+}
+
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf,
+ u64 iova, int len, u16 qidx, u16 flags)
{
struct nix_sqe_hdr_s *sqe_hdr;
struct otx2_snd_queue *sq;
int offset, free_sqe;
sq = &pfvf->qset.sq[qidx];
- free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
+ free_sqe = otx2_get_free_sqe(sq);
if (free_sqe < sq->sqe_thresh)
return false;
@@ -1396,7 +1478,7 @@ bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
offset = sizeof(*sqe_hdr);
- otx2_xdp_sqe_add_sg(sq, iova, len, &offset);
+ otx2_xdp_sqe_add_sg(sq, xdpf, iova, len, &offset, flags);
sqe_hdr->sizem1 = (offset / 16) - 1;
pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
@@ -1407,16 +1489,30 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
struct otx2_cq_queue *cq,
- bool *need_xdp_flush)
+ u32 *metasize, bool *need_xdp_flush)
{
+ struct xdp_buff xdp, *xsk_buff = NULL;
unsigned char *hard_start;
+ struct otx2_pool *pool;
+ struct xdp_frame *xdpf;
int qidx = cq->cq_idx;
- struct xdp_buff xdp;
struct page *page;
u64 iova, pa;
u32 act;
int err;
+ pool = &pfvf->qset.pool[qidx];
+
+ if (pool->xsk_pool) {
+ xsk_buff = pool->xdp[--cq->rbpool->xdp_top];
+ if (!xsk_buff)
+ return false;
+
+ xsk_buff->data_end = xsk_buff->data + cqe->sg.seg_size;
+ act = bpf_prog_run_xdp(prog, xsk_buff);
+ goto handle_xdp_verdict;
+ }
+
iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
page = virt_to_page(phys_to_virt(pa));
@@ -1425,41 +1521,63 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
hard_start = (unsigned char *)phys_to_virt(pa);
xdp_prepare_buff(&xdp, hard_start, OTX2_HEAD_ROOM,
- cqe->sg.seg_size, false);
+ cqe->sg.seg_size, true);
act = bpf_prog_run_xdp(prog, &xdp);
+handle_xdp_verdict:
switch (act) {
case XDP_PASS:
+ *metasize = xdp.data - xdp.data_meta;
break;
case XDP_TX:
qidx += pfvf->hw.tx_queues;
cq->pool_ptrs++;
- return otx2_xdp_sq_append_pkt(pfvf, iova,
- cqe->sg.seg_size, qidx);
+ xdpf = xdp_convert_buff_to_frame(&xdp);
+ return otx2_xdp_sq_append_pkt(pfvf, xdpf,
+ cqe->sg.seg_addr,
+ cqe->sg.seg_size,
+ qidx, OTX2_XDP_TX);
case XDP_REDIRECT:
cq->pool_ptrs++;
- err = xdp_do_redirect(pfvf->netdev, &xdp, prog);
+ if (xsk_buff) {
+ err = xdp_do_redirect(pfvf->netdev, xsk_buff, prog);
+ if (!err) {
+ *need_xdp_flush = true;
+ return true;
+ }
+ return false;
+ }
- otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
- DMA_FROM_DEVICE);
+ err = xdp_do_redirect(pfvf->netdev, &xdp, prog);
if (!err) {
*need_xdp_flush = true;
return true;
}
- put_page(page);
+
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ xdpf = xdp_convert_buff_to_frame(&xdp);
+ xdp_return_frame(xdpf);
break;
default:
bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act);
- break;
+ fallthrough;
case XDP_ABORTED:
- trace_xdp_exception(pfvf->netdev, prog, act);
- break;
+ if (act == XDP_ABORTED)
+ trace_xdp_exception(pfvf->netdev, prog, act);
+ fallthrough;
case XDP_DROP:
- otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
- DMA_FROM_DEVICE);
- put_page(page);
cq->pool_ptrs++;
+ if (xsk_buff) {
+ xsk_buff_free(xsk_buff);
+ } else if (pp_page_to_nmdesc(page)->pp) {
+ page_pool_recycle_direct(pool->page_pool, page);
+ } else {
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ }
return true;
}
return false;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 3f1d2655ff77..acf259d72008 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -12,6 +12,7 @@
#include <linux/iommu.h>
#include <linux/if_vlan.h>
#include <net/xdp.h>
+#include <net/xdp_sock_drv.h>
#define LBK_CHAN_BASE 0x000
#define SDP_CHAN_BASE 0x700
@@ -76,6 +77,7 @@ struct otx2_rcv_queue {
struct sg_list {
u16 num_segs;
+ u16 flags;
u64 skb;
u64 size[OTX2_MAX_FRAGS_IN_SQE];
u64 dma_addr[OTX2_MAX_FRAGS_IN_SQE];
@@ -101,6 +103,11 @@ struct otx2_snd_queue {
struct queue_stats stats;
u16 sqb_count;
u64 *sqb_ptrs;
+ /* SQE ring and CPT response queue for Inline IPSEC */
+ struct qmem *sqe_ring;
+ struct qmem *cpt_resp;
+ /* Buffer pool for af_xdp zero-copy */
+ struct xsk_buff_pool *xsk_pool;
} ____cacheline_aligned_in_smp;
enum cq_type {
@@ -124,7 +131,11 @@ struct otx2_pool {
struct qmem *stack;
struct qmem *fc_addr;
struct page_pool *page_pool;
+ struct xsk_buff_pool *xsk_pool;
+ struct xdp_buff **xdp;
+ u16 xdp_cnt;
u16 rbsize;
+ u16 xdp_top;
};
struct otx2_cq_queue {
@@ -141,6 +152,7 @@ struct otx2_cq_queue {
void *cqe_base;
struct qmem *cqe;
struct otx2_pool *rbpool;
+ bool xsk_zc_en;
struct xdp_rxq_info xdp_rxq;
} ____cacheline_aligned_in_smp;
@@ -167,7 +179,8 @@ static inline u64 otx2_iova_to_phys(void *iommu_domain, dma_addr_t dma_addr)
}
int otx2_napi_handler(struct napi_struct *napi, int budget);
-bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
+bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq,
struct sk_buff *skb, u16 qidx);
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq,
int size, int qidx);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index c4e6c78a8deb..f4fdbfba8667 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -14,6 +14,7 @@
#include "otx2_reg.h"
#include "otx2_ptp.h"
#include "cn10k.h"
+#include "cn10k_ipsec.h"
#define DRV_NAME "rvu_nicvf"
#define DRV_STRING "Marvell RVU NIC Virtual Function Driver"
@@ -21,6 +22,7 @@
static const struct pci_device_id otx2_vf_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AFVF) },
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_SDP_REP) },
{ }
};
@@ -134,7 +136,7 @@ static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf,
rsp->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
- rsp->hdr.pcifunc = 0;
+ rsp->hdr.pcifunc = req->pcifunc;
rsp->hdr.rc = 0;
err = otx2_mbox_up_handler_cgx_link_event(
vf, (struct cgx_link_info_msg *)req, rsp);
@@ -238,6 +240,10 @@ static void otx2vf_disable_mbox_intr(struct otx2_nic *vf)
/* Disable VF => PF mailbox IRQ */
otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0));
+
+ if (is_cn20k(vf->pdev))
+ otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0) | BIT_ULL(1));
+
free_irq(vector, vf);
}
@@ -250,9 +256,18 @@ static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf)
/* Register mailbox interrupt handler */
irq_name = &hw->irq_name[RVU_VF_INT_VEC_MBOX * NAME_SIZE];
- snprintf(irq_name, NAME_SIZE, "RVUVFAF Mbox");
- err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX),
- otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf);
+ snprintf(irq_name, NAME_SIZE, "RVUVF%d AFVF Mbox", ((vf->pcifunc &
+ RVU_PFVF_FUNC_MASK) - 1));
+
+ if (!is_cn20k(vf->pdev)) {
+ err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX),
+ otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf);
+ } else {
+ err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX),
+ vf->hw_ops->vfaf_mbox_intr_handler, 0, irq_name,
+ vf);
+ }
+
if (err) {
dev_err(vf->dev,
"RVUPF: IRQ registration failed for VFAF mbox irq\n");
@@ -262,8 +277,15 @@ static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf)
/* Enable mailbox interrupt for msgs coming from PF.
* First clear to avoid spurious interrupts, if any.
*/
- otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
- otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0));
+ if (!is_cn20k(vf->pdev)) {
+ otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
+ otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0));
+ } else {
+ otx2_write64(vf, RVU_VF_INT, BIT_ULL(0) | BIT_ULL(1) |
+ BIT_ULL(2) | BIT_ULL(3));
+ otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0) |
+ BIT_ULL(1) | BIT_ULL(2) | BIT_ULL(3));
+ }
if (!probe_pf)
return 0;
@@ -313,7 +335,13 @@ static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf)
if (!vf->mbox_wq)
return -ENOMEM;
- if (test_bit(CN10K_MBOX, &vf->hw.cap_flag)) {
+ /* For cn20k platform, VF mailbox region is in dram aliased from AF
+ * VF MBOX ADDR, MBOX is a separate RVU block.
+ */
+ if (is_cn20k(vf->pdev)) {
+ hwbase = vf->reg_base + RVU_VF_MBOX_REGION + ((u64)BLKADDR_MBOX <<
+ RVU_FUNC_BLKADDR_SHIFT);
+ } else if (test_bit(CN10K_MBOX, &vf->hw.cap_flag)) {
/* For cn10k platform, VF mailbox region is in its BAR2
* register space
*/
@@ -371,7 +399,7 @@ static int otx2vf_open(struct net_device *netdev)
/* LBKs do not receive link events so tell everyone we are up here */
vf = netdev_priv(netdev);
- if (is_otx2_lbkvf(vf->pdev)) {
+ if (is_otx2_lbkvf(vf->pdev) || is_otx2_sdp_rep(vf->pdev)) {
pr_info("%s NIC Link is UP\n", netdev->name);
netif_carrier_on(netdev);
netif_tx_start_all_queues(netdev);
@@ -389,13 +417,23 @@ static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct otx2_nic *vf = netdev_priv(netdev);
int qidx = skb_get_queue_mapping(skb);
+ struct otx2_dev_stats *dev_stats;
struct otx2_snd_queue *sq;
struct netdev_queue *txq;
+ /* Check for minimum and maximum packet length */
+ if (skb->len <= ETH_HLEN ||
+ (!skb_shinfo(skb)->gso_size && skb->len > vf->tx_max_pktlen)) {
+ dev_stats = &vf->hw.dev_stats;
+ atomic_long_inc(&dev_stats->tx_discards);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
sq = &vf->qset.sq[qidx];
txq = netdev_get_tx_queue(netdev, qidx);
- if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
+ if (!otx2_sq_append_skb(vf, txq, sq, skb, qidx)) {
netif_tx_stop_queue(txq);
/* Check again, incase SQBs got freed up */
@@ -496,8 +534,9 @@ static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_set_features = otx2vf_set_features,
.ndo_get_stats64 = otx2_get_stats64,
.ndo_tx_timeout = otx2_tx_timeout,
- .ndo_eth_ioctl = otx2_ioctl,
.ndo_setup_tc = otx2_setup_tc,
+ .ndo_hwtstamp_get = otx2_config_hwtstamp_get,
+ .ndo_hwtstamp_set = otx2_config_hwtstamp_set,
};
static int otx2_vf_wq_init(struct otx2_nic *vf)
@@ -546,7 +585,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- err = pci_request_regions(pdev, DRV_NAME);
+ err = pcim_request_all_regions(pdev, DRV_NAME);
if (err) {
dev_err(dev, "PCI request regions failed 0x%x\n", err);
return err;
@@ -555,7 +594,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (err) {
dev_err(dev, "DMA mask config failed, abort\n");
- goto err_release_regions;
+ return err;
}
pci_set_master(pdev);
@@ -563,10 +602,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
qcount = num_online_cpus();
qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES);
netdev = alloc_etherdev_mqs(sizeof(*vf), qcount + qos_txqs, qcount);
- if (!netdev) {
- err = -ENOMEM;
- goto err_release_regions;
- }
+ if (!netdev)
+ return -ENOMEM;
pci_set_drvdata(pdev, netdev);
SET_NETDEV_DEV(netdev, &pdev->dev);
@@ -616,6 +653,12 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
otx2_setup_dev_hw_settings(vf);
+
+ if (is_cn20k(vf->pdev))
+ cn20k_init(vf);
+ else
+ otx2_init_hw_ops(vf);
+
/* Init VF <=> PF mailbox stuff */
err = otx2vf_vfaf_mbox_init(vf);
if (err)
@@ -683,10 +726,23 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
snprintf(netdev->name, sizeof(netdev->name), "lbk%d", n);
}
+ if (is_otx2_sdp_rep(vf->pdev)) {
+ int n;
+
+ n = vf->pcifunc & RVU_PFVF_FUNC_MASK;
+ n -= 1;
+ snprintf(netdev->name, sizeof(netdev->name), "sdp%d-%d",
+ pdev->bus->number, n);
+ }
+
+ err = cn10k_ipsec_init(netdev);
+ if (err)
+ goto err_ptp_destroy;
+
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_ptp_destroy;
+ goto err_ipsec_clean;
}
err = otx2_vf_wq_init(vf);
@@ -707,19 +763,36 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_shutdown_tc;
+ vf->af_xdp_zc_qidx = bitmap_zalloc(qcount, GFP_KERNEL);
+ if (!vf->af_xdp_zc_qidx) {
+ err = -ENOMEM;
+ goto err_unreg_devlink;
+ }
+
#ifdef CONFIG_DCB
- err = otx2_dcbnl_set_ops(netdev);
- if (err)
- goto err_shutdown_tc;
+ /* Priority flow control is not supported for LBK and SDP vf(s) */
+ if (!(is_otx2_lbkvf(vf->pdev) || is_otx2_sdp_rep(vf->pdev))) {
+ err = otx2_dcbnl_set_ops(netdev);
+ if (err)
+ goto err_free_zc_bmap;
+ }
#endif
otx2_qos_init(vf, qos_txqs);
return 0;
+#ifdef CONFIG_DCB
+err_free_zc_bmap:
+ bitmap_free(vf->af_xdp_zc_qidx);
+#endif
+err_unreg_devlink:
+ otx2_unregister_dl(vf);
err_shutdown_tc:
otx2_shutdown_tc(vf);
err_unreg_netdev:
unregister_netdev(netdev);
+err_ipsec_clean:
+ cn10k_ipsec_clean(vf);
err_ptp_destroy:
otx2_ptp_destroy(vf);
err_detach_rsrc:
@@ -736,8 +809,6 @@ err_free_irq_vectors:
err_free_netdev:
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
-err_release_regions:
- pci_release_regions(pdev);
return err;
}
@@ -772,6 +843,7 @@ static void otx2vf_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
if (vf->otx2_wq)
destroy_workqueue(vf->otx2_wq);
+ cn10k_ipsec_clean(vf);
otx2_ptp_destroy(vf);
otx2_mcam_flow_del(vf);
otx2_shutdown_tc(vf);
@@ -783,10 +855,9 @@ static void otx2vf_remove(struct pci_dev *pdev)
qmem_free(vf->dev, vf->dync_lmt);
otx2vf_vfaf_mbox_destroy(vf);
pci_free_irq_vectors(vf->pdev);
+ bitmap_free(vf->af_xdp_zc_qidx);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
-
- pci_release_regions(pdev);
}
static struct pci_driver otx2vf_driver = {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c
new file mode 100644
index 000000000000..7d67b4cbaf71
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#include <linux/bpf_trace.h>
+#include <linux/stringify.h>
+#include <net/xdp_sock_drv.h>
+#include <net/xdp.h>
+
+#include "otx2_common.h"
+#include "otx2_struct.h"
+#include "otx2_xsk.h"
+
+int otx2_xsk_pool_alloc_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma, int idx)
+{
+ struct xdp_buff *xdp;
+ int delta;
+
+ xdp = xsk_buff_alloc(pool->xsk_pool);
+ if (!xdp)
+ return -ENOMEM;
+
+ pool->xdp[pool->xdp_top++] = xdp;
+ *dma = OTX2_DATA_ALIGN(xsk_buff_xdp_get_dma(xdp));
+ /* Adjust xdp->data for unaligned addresses */
+ delta = *dma - xsk_buff_xdp_get_dma(xdp);
+ xdp->data += delta;
+
+ return 0;
+}
+
+static int otx2_xsk_ctx_disable(struct otx2_nic *pfvf, u16 qidx, int aura_id)
+{
+ struct nix_cn10k_aq_enq_req *cn10k_rq_aq;
+ struct npa_aq_enq_req *aura_aq;
+ struct npa_aq_enq_req *pool_aq;
+ struct nix_aq_enq_req *rq_aq;
+
+ if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
+ cn10k_rq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!cn10k_rq_aq)
+ return -ENOMEM;
+ cn10k_rq_aq->qidx = qidx;
+ cn10k_rq_aq->rq.ena = 0;
+ cn10k_rq_aq->rq_mask.ena = 1;
+ cn10k_rq_aq->ctype = NIX_AQ_CTYPE_RQ;
+ cn10k_rq_aq->op = NIX_AQ_INSTOP_WRITE;
+ } else {
+ rq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!rq_aq)
+ return -ENOMEM;
+ rq_aq->qidx = qidx;
+ rq_aq->sq.ena = 0;
+ rq_aq->sq_mask.ena = 1;
+ rq_aq->ctype = NIX_AQ_CTYPE_RQ;
+ rq_aq->op = NIX_AQ_INSTOP_WRITE;
+ }
+
+ aura_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!aura_aq)
+ goto fail;
+
+ aura_aq->aura_id = aura_id;
+ aura_aq->aura.ena = 0;
+ aura_aq->aura_mask.ena = 1;
+ aura_aq->ctype = NPA_AQ_CTYPE_AURA;
+ aura_aq->op = NPA_AQ_INSTOP_WRITE;
+
+ pool_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!pool_aq)
+ goto fail;
+
+ pool_aq->aura_id = aura_id;
+ pool_aq->pool.ena = 0;
+ pool_aq->pool_mask.ena = 1;
+
+ pool_aq->ctype = NPA_AQ_CTYPE_POOL;
+ pool_aq->op = NPA_AQ_INSTOP_WRITE;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+
+fail:
+ otx2_mbox_reset(&pfvf->mbox.mbox, 0);
+ return -ENOMEM;
+}
+
+static void otx2_clean_up_rq(struct otx2_nic *pfvf, int qidx)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct otx2_cq_queue *cq;
+ struct otx2_pool *pool;
+ u64 iova;
+
+ /* If the DOWN flag is set SQs are already freed */
+ if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
+ return;
+
+ cq = &qset->cq[qidx];
+ if (cq)
+ otx2_cleanup_rx_cqes(pfvf, cq, qidx);
+
+ pool = &pfvf->qset.pool[qidx];
+ iova = otx2_aura_allocptr(pfvf, qidx);
+ while (iova) {
+ iova -= OTX2_HEAD_ROOM;
+ otx2_free_bufs(pfvf, pool, iova, pfvf->rbsize);
+ iova = otx2_aura_allocptr(pfvf, qidx);
+ }
+
+ mutex_lock(&pfvf->mbox.lock);
+ otx2_xsk_ctx_disable(pfvf, qidx, qidx);
+ mutex_unlock(&pfvf->mbox.lock);
+}
+
+int otx2_xsk_pool_enable(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qidx)
+{
+ u16 rx_queues = pf->hw.rx_queues;
+ u16 tx_queues = pf->hw.tx_queues;
+ int err;
+
+ if (qidx >= rx_queues || qidx >= tx_queues)
+ return -EINVAL;
+
+ err = xsk_pool_dma_map(pool, pf->dev, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+ if (err)
+ return err;
+
+ set_bit(qidx, pf->af_xdp_zc_qidx);
+ otx2_clean_up_rq(pf, qidx);
+ /* Reconfigure RSS table as 'qidx' cannot be part of RSS now */
+ otx2_set_rss_table(pf, DEFAULT_RSS_CONTEXT_GROUP, NULL);
+ /* Kick start the NAPI context so that receiving will start */
+ return otx2_xsk_wakeup(pf->netdev, qidx, XDP_WAKEUP_RX);
+}
+
+int otx2_xsk_pool_disable(struct otx2_nic *pf, u16 qidx)
+{
+ struct net_device *netdev = pf->netdev;
+ struct xsk_buff_pool *pool;
+ struct otx2_snd_queue *sq;
+
+ pool = xsk_get_pool_from_qid(netdev, qidx);
+ if (!pool)
+ return -EINVAL;
+
+ sq = &pf->qset.sq[qidx + pf->hw.tx_queues];
+ sq->xsk_pool = NULL;
+ otx2_clean_up_rq(pf, qidx);
+ clear_bit(qidx, pf->af_xdp_zc_qidx);
+ xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+ /* Reconfigure RSS table as 'qidx' now need to be part of RSS now */
+ otx2_set_rss_table(pf, DEFAULT_RSS_CONTEXT_GROUP, NULL);
+
+ return 0;
+}
+
+int otx2_xsk_pool_setup(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qidx)
+{
+ if (pool)
+ return otx2_xsk_pool_enable(pf, pool, qidx);
+
+ return otx2_xsk_pool_disable(pf, qidx);
+}
+
+int otx2_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
+{
+ struct otx2_nic *pf = netdev_priv(dev);
+ struct otx2_cq_poll *cq_poll = NULL;
+ struct otx2_qset *qset = &pf->qset;
+
+ if (pf->flags & OTX2_FLAG_INTF_DOWN)
+ return -ENETDOWN;
+
+ if (queue_id >= pf->hw.rx_queues || queue_id >= pf->hw.tx_queues)
+ return -EINVAL;
+
+ cq_poll = &qset->napi[queue_id];
+ if (!cq_poll)
+ return -EINVAL;
+
+ /* Trigger interrupt */
+ if (!napi_if_scheduled_mark_missed(&cq_poll->napi)) {
+ otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), BIT_ULL(0));
+ otx2_write64(pf, NIX_LF_CINTX_INT_W1S(cq_poll->cint_idx), BIT_ULL(0));
+ }
+
+ return 0;
+}
+
+void otx2_attach_xsk_buff(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, int qidx)
+{
+ if (test_bit(qidx, pfvf->af_xdp_zc_qidx))
+ sq->xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, qidx);
+}
+
+static void otx2_xsk_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len,
+ u16 qidx)
+{
+ struct nix_sqe_hdr_s *sqe_hdr;
+ struct otx2_snd_queue *sq;
+ int offset;
+
+ sq = &pfvf->qset.sq[qidx];
+ memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
+
+ sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
+
+ if (!sqe_hdr->total) {
+ sqe_hdr->aura = sq->aura_id;
+ sqe_hdr->df = 1;
+ sqe_hdr->sq = qidx;
+ sqe_hdr->pnc = 1;
+ }
+ sqe_hdr->total = len;
+ sqe_hdr->sqe_id = sq->head;
+
+ offset = sizeof(*sqe_hdr);
+
+ otx2_xdp_sqe_add_sg(sq, NULL, iova, len, &offset, OTX2_AF_XDP_FRAME);
+ sqe_hdr->sizem1 = (offset / 16) - 1;
+ pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
+}
+
+void otx2_zc_napi_handler(struct otx2_nic *pfvf, struct xsk_buff_pool *pool,
+ int queue, int budget)
+{
+ struct xdp_desc *xdp_desc = pool->tx_descs;
+ int i, batch;
+
+ budget = min(budget, otx2_read_free_sqe(pfvf, queue));
+ batch = xsk_tx_peek_release_desc_batch(pool, budget);
+ if (!batch)
+ return;
+
+ for (i = 0; i < batch; i++) {
+ dma_addr_t dma_addr;
+
+ dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc[i].addr);
+ otx2_xsk_sq_append_pkt(pfvf, dma_addr, xdp_desc[i].len, queue);
+ }
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h
new file mode 100644
index 000000000000..8047fafee8fe
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU PF/VF Netdev Devlink
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#ifndef OTX2_XSK_H
+#define OTX2_XSK_H
+
+struct otx2_nic;
+struct xsk_buff_pool;
+
+int otx2_xsk_pool_setup(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qid);
+int otx2_xsk_pool_enable(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qid);
+int otx2_xsk_pool_disable(struct otx2_nic *pf, u16 qid);
+int otx2_xsk_pool_alloc_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma, int idx);
+int otx2_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
+void otx2_zc_napi_handler(struct otx2_nic *pfvf, struct xsk_buff_pool *pool,
+ int queue, int budget);
+void otx2_attach_xsk_buff(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, int qidx);
+
+#endif /* OTX2_XSK_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
index 0f844c14485a..5765bac119f0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
@@ -165,6 +165,11 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
} else if (level == NIX_TXSCH_LVL_TL2) {
+ /* configure parent txschq */
+ cfg->reg[num_regs] = NIX_AF_TL2X_PARENT(node->schq);
+ cfg->regval[num_regs] = (u64)hw->tx_link << 16;
+ num_regs++;
+
/* configure link cfg */
if (level == pfvf->qos.link_cfg_lvl) {
cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link);
@@ -1633,6 +1638,7 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force
if (!node->is_static)
dwrr_del_node = true;
+ WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
/* destroy the leaf node */
otx2_qos_disable_sq(pfvf, qid);
otx2_qos_destroy_node(pfvf, node);
@@ -1677,9 +1683,6 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force
}
kfree(new_cfg);
- /* update tx_real_queues */
- otx2_qos_update_tx_netdev_queues(pfvf);
-
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
index 9d887bfc3108..2872adabc830 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
@@ -82,7 +82,7 @@ static int otx2_qos_sq_aura_pool_init(struct otx2_nic *pfvf, int qidx)
}
for (ptr = 0; ptr < num_sqbs; ptr++) {
- err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr);
if (err)
goto sqb_free;
pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
@@ -151,9 +151,10 @@ static void otx2_qos_sq_free_sqbs(struct otx2_nic *pfvf, int qidx)
static void otx2_qos_sqb_flush(struct otx2_nic *pfvf, int qidx)
{
int sqe_tail, sqe_head;
- u64 incr, *ptr, val;
+ void __iomem *ptr;
+ u64 incr, val;
- ptr = (__force u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
+ ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
incr = (u64)qidx << 32;
val = otx2_atomic64_add(incr, ptr);
sqe_head = (val >> 20) & 0x3F;
@@ -256,6 +257,26 @@ out:
return err;
}
+static int otx2_qos_nix_npa_ndc_sync(struct otx2_nic *pfvf)
+{
+ struct ndc_sync_op *req;
+ int rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_ndc_sync_op(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->nix_lf_tx_sync = true;
+ req->npa_lf_sync = true;
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx)
{
struct otx2_qset *qset = &pfvf->qset;
@@ -285,6 +306,8 @@ void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx)
otx2_qos_sqb_flush(pfvf, sq_idx);
otx2_smq_flush(pfvf, otx2_get_smq_idx(pfvf, sq_idx));
+ /* NIX/NPA NDC sync */
+ otx2_qos_nix_npa_ndc_sync(pfvf);
otx2_cleanup_tx_cqes(pfvf, cq);
mutex_lock(&pfvf->mbox.lock);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
new file mode 100644
index 000000000000..b476733a0234
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
@@ -0,0 +1,879 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU representor driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/net_tstamp.h>
+#include <linux/sort.h>
+
+#include "otx2_common.h"
+#include "cn10k.h"
+#include "otx2_reg.h"
+#include "rep.h"
+
+#define DRV_NAME "rvu_rep"
+#define DRV_STRING "Marvell RVU Representor Driver"
+
+static const struct pci_device_id rvu_rep_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_RVU_REP) },
+ { }
+};
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION(DRV_STRING);
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, rvu_rep_id_table);
+
+static int rvu_rep_notify_pfvf(struct otx2_nic *priv, u16 event,
+ struct rep_event *data);
+
+static int rvu_rep_mcam_flow_init(struct rep_dev *rep)
+{
+ struct npc_mcam_alloc_entry_req *req;
+ struct npc_mcam_alloc_entry_rsp *rsp;
+ struct otx2_nic *priv = rep->mdev;
+ int ent, allocated = 0;
+ int count;
+
+ rep->flow_cfg = kcalloc(1, sizeof(struct otx2_flow_config), GFP_KERNEL);
+
+ if (!rep->flow_cfg)
+ return -ENOMEM;
+
+ count = OTX2_DEFAULT_FLOWCOUNT;
+
+ rep->flow_cfg->flow_ent = kcalloc(count, sizeof(u16), GFP_KERNEL);
+ if (!rep->flow_cfg->flow_ent)
+ return -ENOMEM;
+
+ while (allocated < count) {
+ req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&priv->mbox);
+ if (!req)
+ goto exit;
+
+ req->hdr.pcifunc = rep->pcifunc;
+ req->contig = false;
+ req->ref_entry = 0;
+ req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
+ NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
+
+ if (otx2_sync_mbox_msg(&priv->mbox))
+ goto exit;
+
+ rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
+ (&priv->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp))
+ goto exit;
+
+ for (ent = 0; ent < rsp->count; ent++)
+ rep->flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
+
+ allocated += rsp->count;
+
+ if (rsp->count != req->count)
+ break;
+ }
+exit:
+ /* Multiple MCAM entry alloc requests could result in non-sequential
+ * MCAM entries in the flow_ent[] array. Sort them in an ascending
+ * order, otherwise user installed ntuple filter index and MCAM entry
+ * index will not be in sync.
+ */
+ if (allocated)
+ sort(&rep->flow_cfg->flow_ent[0], allocated,
+ sizeof(rep->flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL);
+
+ mutex_unlock(&priv->mbox.lock);
+
+ rep->flow_cfg->max_flows = allocated;
+
+ if (allocated) {
+ rep->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
+ rep->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
+ rep->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
+ }
+
+ INIT_LIST_HEAD(&rep->flow_cfg->flow_list);
+ INIT_LIST_HEAD(&rep->flow_cfg->flow_list_tc);
+ return 0;
+}
+
+static int rvu_rep_setup_tc_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct rep_dev *rep = cb_priv;
+ struct otx2_nic *priv = rep->mdev;
+
+ if (!(rep->flags & RVU_REP_VF_INITIALIZED))
+ return -EINVAL;
+
+ if (!(rep->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
+ rvu_rep_mcam_flow_init(rep);
+
+ priv->netdev = rep->netdev;
+ priv->flags = rep->flags;
+ priv->pcifunc = rep->pcifunc;
+ priv->flow_cfg = rep->flow_cfg;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return otx2_setup_tc_cls_flower(priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static LIST_HEAD(rvu_rep_block_cb_list);
+static int rvu_rep_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct rvu_rep *rep = netdev_priv(netdev);
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return flow_block_cb_setup_simple(type_data,
+ &rvu_rep_block_cb_list,
+ rvu_rep_setup_tc_cb,
+ rep, rep, true);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+rvu_rep_sp_stats64(const struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct rep_dev *rep = netdev_priv(dev);
+ struct otx2_nic *priv = rep->mdev;
+ struct otx2_rcv_queue *rq;
+ struct otx2_snd_queue *sq;
+ u16 qidx = rep->rep_id;
+
+ otx2_update_rq_stats(priv, qidx);
+ rq = &priv->qset.rq[qidx];
+
+ otx2_update_sq_stats(priv, qidx);
+ sq = &priv->qset.sq[qidx];
+
+ stats->tx_bytes = sq->stats.bytes;
+ stats->tx_packets = sq->stats.pkts;
+ stats->rx_bytes = rq->stats.bytes;
+ stats->rx_packets = rq->stats.pkts;
+ return 0;
+}
+
+static bool
+rvu_rep_has_offload_stats(const struct net_device *dev, int attr_id)
+{
+ return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT;
+}
+
+static int
+rvu_rep_get_offload_stats(int attr_id, const struct net_device *dev,
+ void *sp)
+{
+ if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT)
+ return rvu_rep_sp_stats64(dev, (struct rtnl_link_stats64 *)sp);
+
+ return -EINVAL;
+}
+
+static int rvu_rep_dl_port_fn_hw_addr_get(struct devlink_port *port,
+ u8 *hw_addr, int *hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ struct rep_dev *rep = container_of(port, struct rep_dev, dl_port);
+
+ ether_addr_copy(hw_addr, rep->mac);
+ *hw_addr_len = ETH_ALEN;
+ return 0;
+}
+
+static int rvu_rep_dl_port_fn_hw_addr_set(struct devlink_port *port,
+ const u8 *hw_addr, int hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ struct rep_dev *rep = container_of(port, struct rep_dev, dl_port);
+ struct otx2_nic *priv = rep->mdev;
+ struct rep_event evt = {0};
+
+ eth_hw_addr_set(rep->netdev, hw_addr);
+ ether_addr_copy(rep->mac, hw_addr);
+
+ ether_addr_copy(evt.evt_data.mac, hw_addr);
+ evt.pcifunc = rep->pcifunc;
+ rvu_rep_notify_pfvf(priv, RVU_EVENT_MAC_ADDR_CHANGE, &evt);
+ return 0;
+}
+
+static const struct devlink_port_ops rvu_rep_dl_port_ops = {
+ .port_fn_hw_addr_get = rvu_rep_dl_port_fn_hw_addr_get,
+ .port_fn_hw_addr_set = rvu_rep_dl_port_fn_hw_addr_set,
+};
+
+static void
+rvu_rep_devlink_set_switch_id(struct otx2_nic *priv,
+ struct netdev_phys_item_id *ppid)
+{
+ struct pci_dev *pdev = priv->pdev;
+ u64 id;
+
+ id = pci_get_dsn(pdev);
+
+ ppid->id_len = sizeof(id);
+ put_unaligned_be64(id, &ppid->id);
+}
+
+static void rvu_rep_devlink_port_unregister(struct rep_dev *rep)
+{
+ devlink_port_unregister(&rep->dl_port);
+}
+
+static int rvu_rep_devlink_port_register(struct rep_dev *rep)
+{
+ struct devlink_port_attrs attrs = {};
+ struct otx2_nic *priv = rep->mdev;
+ struct devlink *dl = priv->dl->dl;
+ int err;
+
+ if (!(rep->pcifunc & RVU_PFVF_FUNC_MASK)) {
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = rvu_get_pf(priv->pdev, rep->pcifunc);
+ } else {
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
+ attrs.pci_vf.pf = rvu_get_pf(priv->pdev, rep->pcifunc);
+ attrs.pci_vf.vf = rep->pcifunc & RVU_PFVF_FUNC_MASK;
+ }
+
+ rvu_rep_devlink_set_switch_id(priv, &attrs.switch_id);
+ devlink_port_attrs_set(&rep->dl_port, &attrs);
+
+ err = devl_port_register_with_ops(dl, &rep->dl_port, rep->rep_id,
+ &rvu_rep_dl_port_ops);
+ if (err) {
+ dev_err(rep->mdev->dev, "devlink_port_register failed: %d\n",
+ err);
+ return err;
+ }
+ return 0;
+}
+
+static int rvu_rep_get_repid(struct otx2_nic *priv, u16 pcifunc)
+{
+ int rep_id;
+
+ for (rep_id = 0; rep_id < priv->rep_cnt; rep_id++)
+ if (priv->rep_pf_map[rep_id] == pcifunc)
+ return rep_id;
+ return -EINVAL;
+}
+
+static int rvu_rep_notify_pfvf(struct otx2_nic *priv, u16 event,
+ struct rep_event *data)
+{
+ struct rep_event *req;
+
+ mutex_lock(&priv->mbox.lock);
+ req = otx2_mbox_alloc_msg_rep_event_notify(&priv->mbox);
+ if (!req) {
+ mutex_unlock(&priv->mbox.lock);
+ return -ENOMEM;
+ }
+ req->event = event;
+ req->pcifunc = data->pcifunc;
+
+ memcpy(&req->evt_data, &data->evt_data, sizeof(struct rep_evt_data));
+ otx2_sync_mbox_msg(&priv->mbox);
+ mutex_unlock(&priv->mbox.lock);
+ return 0;
+}
+
+static void rvu_rep_state_evt_handler(struct otx2_nic *priv,
+ struct rep_event *info)
+{
+ struct rep_dev *rep;
+ int rep_id;
+
+ rep_id = rvu_rep_get_repid(priv, info->pcifunc);
+ rep = priv->reps[rep_id];
+ if (info->evt_data.vf_state)
+ rep->flags |= RVU_REP_VF_INITIALIZED;
+ else
+ rep->flags &= ~RVU_REP_VF_INITIALIZED;
+}
+
+int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info)
+{
+ if (info->event & RVU_EVENT_PFVF_STATE)
+ rvu_rep_state_evt_handler(pf, info);
+ return 0;
+}
+
+static int rvu_rep_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct rep_dev *rep = netdev_priv(dev);
+ struct otx2_nic *priv = rep->mdev;
+ struct rep_event evt = {0};
+
+ netdev_info(dev, "Changing MTU from %d to %d\n",
+ dev->mtu, new_mtu);
+ dev->mtu = new_mtu;
+
+ evt.evt_data.mtu = new_mtu;
+ evt.pcifunc = rep->pcifunc;
+ rvu_rep_notify_pfvf(priv, RVU_EVENT_MTU_CHANGE, &evt);
+ return 0;
+}
+
+static void rvu_rep_get_stats(struct work_struct *work)
+{
+ struct delayed_work *del_work = to_delayed_work(work);
+ struct nix_stats_req *req;
+ struct nix_stats_rsp *rsp;
+ struct rep_stats *stats;
+ struct otx2_nic *priv;
+ struct rep_dev *rep;
+ int err;
+
+ rep = container_of(del_work, struct rep_dev, stats_wrk);
+ priv = rep->mdev;
+
+ mutex_lock(&priv->mbox.lock);
+ req = otx2_mbox_alloc_msg_nix_lf_stats(&priv->mbox);
+ if (!req) {
+ mutex_unlock(&priv->mbox.lock);
+ return;
+ }
+ req->pcifunc = rep->pcifunc;
+ err = otx2_sync_mbox_msg_busy_poll(&priv->mbox);
+ if (err)
+ goto exit;
+
+ rsp = (struct nix_stats_rsp *)
+ otx2_mbox_get_rsp(&priv->mbox.mbox, 0, &req->hdr);
+
+ if (IS_ERR(rsp)) {
+ err = PTR_ERR(rsp);
+ goto exit;
+ }
+
+ stats = &rep->stats;
+ stats->rx_bytes = rsp->rx.octs;
+ stats->rx_frames = rsp->rx.ucast + rsp->rx.bcast +
+ rsp->rx.mcast;
+ stats->rx_drops = rsp->rx.drop;
+ stats->rx_mcast_frames = rsp->rx.mcast;
+ stats->tx_bytes = rsp->tx.octs;
+ stats->tx_frames = rsp->tx.ucast + rsp->tx.bcast + rsp->tx.mcast;
+ stats->tx_drops = rsp->tx.drop +
+ (unsigned long)atomic_long_read(&stats->tx_discards);
+exit:
+ mutex_unlock(&priv->mbox.lock);
+}
+
+static void rvu_rep_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct rep_dev *rep = netdev_priv(dev);
+
+ if (!(rep->flags & RVU_REP_VF_INITIALIZED))
+ return;
+
+ stats->rx_packets = rep->stats.rx_frames;
+ stats->rx_bytes = rep->stats.rx_bytes;
+ stats->rx_dropped = rep->stats.rx_drops;
+ stats->multicast = rep->stats.rx_mcast_frames;
+
+ stats->tx_packets = rep->stats.tx_frames;
+ stats->tx_bytes = rep->stats.tx_bytes;
+ stats->tx_dropped = rep->stats.tx_drops;
+
+ schedule_delayed_work(&rep->stats_wrk, msecs_to_jiffies(100));
+}
+
+static int rvu_eswitch_config(struct otx2_nic *priv, u8 ena)
+{
+ struct esw_cfg_req *req;
+
+ mutex_lock(&priv->mbox.lock);
+ req = otx2_mbox_alloc_msg_esw_cfg(&priv->mbox);
+ if (!req) {
+ mutex_unlock(&priv->mbox.lock);
+ return -ENOMEM;
+ }
+ req->ena = ena;
+ otx2_sync_mbox_msg(&priv->mbox);
+ mutex_unlock(&priv->mbox.lock);
+ return 0;
+}
+
+static netdev_tx_t rvu_rep_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct rep_dev *rep = netdev_priv(dev);
+ struct otx2_nic *pf = rep->mdev;
+ struct otx2_snd_queue *sq;
+ struct netdev_queue *txq;
+ struct rep_stats *stats;
+
+ /* Check for minimum and maximum packet length */
+ if (skb->len <= ETH_HLEN ||
+ (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) {
+ stats = &rep->stats;
+ atomic_long_inc(&stats->tx_discards);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ sq = &pf->qset.sq[rep->rep_id];
+ txq = netdev_get_tx_queue(dev, 0);
+
+ if (!otx2_sq_append_skb(pf, txq, sq, skb, rep->rep_id)) {
+ netif_tx_stop_queue(txq);
+
+ /* Check again, in case SQBs got freed up */
+ smp_mb();
+ if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
+ > sq->sqe_thresh)
+ netif_tx_wake_queue(txq);
+
+ return NETDEV_TX_BUSY;
+ }
+ return NETDEV_TX_OK;
+}
+
+static int rvu_rep_open(struct net_device *dev)
+{
+ struct rep_dev *rep = netdev_priv(dev);
+ struct otx2_nic *priv = rep->mdev;
+ struct rep_event evt = {0};
+
+ if (!(rep->flags & RVU_REP_VF_INITIALIZED))
+ return 0;
+
+ netif_carrier_on(dev);
+ netif_tx_start_all_queues(dev);
+
+ evt.event = RVU_EVENT_PORT_STATE;
+ evt.evt_data.port_state = 1;
+ evt.pcifunc = rep->pcifunc;
+ rvu_rep_notify_pfvf(priv, RVU_EVENT_PORT_STATE, &evt);
+ return 0;
+}
+
+static int rvu_rep_stop(struct net_device *dev)
+{
+ struct rep_dev *rep = netdev_priv(dev);
+ struct otx2_nic *priv = rep->mdev;
+ struct rep_event evt = {0};
+
+ if (!(rep->flags & RVU_REP_VF_INITIALIZED))
+ return 0;
+
+ netif_carrier_off(dev);
+ netif_tx_disable(dev);
+
+ evt.event = RVU_EVENT_PORT_STATE;
+ evt.pcifunc = rep->pcifunc;
+ rvu_rep_notify_pfvf(priv, RVU_EVENT_PORT_STATE, &evt);
+ return 0;
+}
+
+static const struct net_device_ops rvu_rep_netdev_ops = {
+ .ndo_open = rvu_rep_open,
+ .ndo_stop = rvu_rep_stop,
+ .ndo_start_xmit = rvu_rep_xmit,
+ .ndo_get_stats64 = rvu_rep_get_stats64,
+ .ndo_change_mtu = rvu_rep_change_mtu,
+ .ndo_has_offload_stats = rvu_rep_has_offload_stats,
+ .ndo_get_offload_stats = rvu_rep_get_offload_stats,
+ .ndo_setup_tc = rvu_rep_setup_tc,
+};
+
+static int rvu_rep_napi_init(struct otx2_nic *priv,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_qset *qset = &priv->qset;
+ struct otx2_cq_poll *cq_poll = NULL;
+ struct otx2_hw *hw = &priv->hw;
+ int err = 0, qidx, vec;
+ char *irq_name;
+
+ qset->napi = kcalloc(hw->cint_cnt, sizeof(*cq_poll), GFP_KERNEL);
+ if (!qset->napi)
+ return -ENOMEM;
+
+ /* Register NAPI handler */
+ for (qidx = 0; qidx < hw->cint_cnt; qidx++) {
+ cq_poll = &qset->napi[qidx];
+ cq_poll->cint_idx = qidx;
+ cq_poll->cq_ids[CQ_RX] =
+ (qidx < hw->rx_queues) ? qidx : CINT_INVALID_CQ;
+ cq_poll->cq_ids[CQ_TX] = (qidx < hw->tx_queues) ?
+ qidx + hw->rx_queues :
+ CINT_INVALID_CQ;
+ cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ;
+ cq_poll->cq_ids[CQ_QOS] = CINT_INVALID_CQ;
+
+ cq_poll->dev = (void *)priv;
+ netif_napi_add(priv->reps[qidx]->netdev, &cq_poll->napi,
+ otx2_napi_handler);
+ napi_enable(&cq_poll->napi);
+ }
+ /* Register CQ IRQ handlers */
+ vec = hw->nix_msixoff + NIX_LF_CINT_VEC_START;
+ for (qidx = 0; qidx < hw->cint_cnt; qidx++) {
+ irq_name = &hw->irq_name[vec * NAME_SIZE];
+
+ snprintf(irq_name, NAME_SIZE, "rep%d-rxtx-%d", qidx, qidx);
+
+ err = request_irq(pci_irq_vector(priv->pdev, vec),
+ otx2_cq_intr_handler, 0, irq_name,
+ &qset->napi[qidx]);
+ if (err) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "RVU REP IRQ registration failed for CQ%d",
+ qidx);
+ goto err_free_cints;
+ }
+ vec++;
+
+ /* Enable CQ IRQ */
+ otx2_write64(priv, NIX_LF_CINTX_INT(qidx), BIT_ULL(0));
+ otx2_write64(priv, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0));
+ }
+ priv->flags &= ~OTX2_FLAG_INTF_DOWN;
+ return 0;
+
+err_free_cints:
+ otx2_free_cints(priv, qidx);
+ otx2_disable_napi(priv);
+ return err;
+}
+
+static void rvu_rep_free_cq_rsrc(struct otx2_nic *priv)
+{
+ struct otx2_qset *qset = &priv->qset;
+ struct otx2_cq_poll *cq_poll = NULL;
+ int qidx, vec;
+
+ /* Cleanup CQ NAPI and IRQ */
+ vec = priv->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
+ for (qidx = 0; qidx < priv->hw.cint_cnt; qidx++) {
+ /* Disable interrupt */
+ otx2_write64(priv, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
+
+ synchronize_irq(pci_irq_vector(priv->pdev, vec));
+
+ cq_poll = &qset->napi[qidx];
+ napi_synchronize(&cq_poll->napi);
+ vec++;
+ }
+ otx2_free_cints(priv, priv->hw.cint_cnt);
+ otx2_disable_napi(priv);
+}
+
+static void rvu_rep_rsrc_free(struct otx2_nic *priv)
+{
+ struct otx2_qset *qset = &priv->qset;
+ struct delayed_work *work;
+ int wrk;
+
+ for (wrk = 0; wrk < priv->qset.cq_cnt; wrk++) {
+ work = &priv->refill_wrk[wrk].pool_refill_work;
+ cancel_delayed_work_sync(work);
+ }
+ devm_kfree(priv->dev, priv->refill_wrk);
+
+ otx2_free_hw_resources(priv);
+ otx2_free_queue_mem(qset);
+}
+
+static int rvu_rep_rsrc_init(struct otx2_nic *priv)
+{
+ struct otx2_qset *qset = &priv->qset;
+ int err;
+
+ err = otx2_alloc_queue_mem(priv);
+ if (err)
+ return err;
+
+ priv->hw.max_mtu = otx2_get_max_mtu(priv);
+ priv->tx_max_pktlen = priv->hw.max_mtu + OTX2_ETH_HLEN;
+ priv->rbsize = ALIGN(priv->hw.rbuf_len, OTX2_ALIGN) + OTX2_HEAD_ROOM;
+
+ err = otx2_init_hw_resources(priv);
+ if (err)
+ goto err_free_rsrc;
+
+ /* Set maximum frame size allowed in HW */
+ err = otx2_hw_set_mtu(priv, priv->hw.max_mtu);
+ if (err) {
+ dev_err(priv->dev, "Failed to set HW MTU\n");
+ goto err_free_rsrc;
+ }
+ return 0;
+
+err_free_rsrc:
+ otx2_free_hw_resources(priv);
+ otx2_free_queue_mem(qset);
+ return err;
+}
+
+void rvu_rep_destroy(struct otx2_nic *priv)
+{
+ struct rep_dev *rep;
+ int rep_id;
+
+ rvu_eswitch_config(priv, false);
+ priv->flags |= OTX2_FLAG_INTF_DOWN;
+ rvu_rep_free_cq_rsrc(priv);
+ for (rep_id = 0; rep_id < priv->rep_cnt; rep_id++) {
+ rep = priv->reps[rep_id];
+ unregister_netdev(rep->netdev);
+ rvu_rep_devlink_port_unregister(rep);
+ free_netdev(rep->netdev);
+ kfree(rep->flow_cfg);
+ }
+ kfree(priv->reps);
+ rvu_rep_rsrc_free(priv);
+}
+
+int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack)
+{
+ int rep_cnt = priv->rep_cnt;
+ struct net_device *ndev;
+ struct rep_dev *rep;
+ int rep_id, err;
+ u16 pcifunc;
+
+ err = rvu_rep_rsrc_init(priv);
+ if (err)
+ return -ENOMEM;
+
+ priv->reps = kcalloc(rep_cnt, sizeof(struct rep_dev *), GFP_KERNEL);
+ if (!priv->reps)
+ return -ENOMEM;
+
+ for (rep_id = 0; rep_id < rep_cnt; rep_id++) {
+ ndev = alloc_etherdev(sizeof(*rep));
+ if (!ndev) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "PFVF representor:%d creation failed",
+ rep_id);
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ rep = netdev_priv(ndev);
+ priv->reps[rep_id] = rep;
+ rep->mdev = priv;
+ rep->netdev = ndev;
+ rep->rep_id = rep_id;
+
+ ndev->min_mtu = OTX2_MIN_MTU;
+ ndev->max_mtu = priv->hw.max_mtu;
+ ndev->netdev_ops = &rvu_rep_netdev_ops;
+ pcifunc = priv->rep_pf_map[rep_id];
+ rep->pcifunc = pcifunc;
+
+ snprintf(ndev->name, sizeof(ndev->name), "Rpf%dvf%d",
+ rvu_get_pf(priv->pdev, pcifunc),
+ (pcifunc & RVU_PFVF_FUNC_MASK));
+
+ ndev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6);
+
+ ndev->hw_features |= NETIF_F_HW_TC;
+ ndev->features |= ndev->hw_features;
+ eth_hw_addr_random(ndev);
+ err = rvu_rep_devlink_port_register(rep);
+ if (err) {
+ free_netdev(ndev);
+ goto exit;
+ }
+
+ SET_NETDEV_DEVLINK_PORT(ndev, &rep->dl_port);
+ err = register_netdev(ndev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "PFVF representor registration failed");
+ rvu_rep_devlink_port_unregister(rep);
+ free_netdev(ndev);
+ goto exit;
+ }
+
+ INIT_DELAYED_WORK(&rep->stats_wrk, rvu_rep_get_stats);
+ }
+ err = rvu_rep_napi_init(priv, extack);
+ if (err)
+ goto exit;
+
+ rvu_eswitch_config(priv, true);
+ return 0;
+exit:
+ while (--rep_id >= 0) {
+ rep = priv->reps[rep_id];
+ unregister_netdev(rep->netdev);
+ rvu_rep_devlink_port_unregister(rep);
+ free_netdev(rep->netdev);
+ }
+ kfree(priv->reps);
+ rvu_rep_rsrc_free(priv);
+ return err;
+}
+
+static int rvu_get_rep_cnt(struct otx2_nic *priv)
+{
+ struct get_rep_cnt_rsp *rsp;
+ struct mbox_msghdr *msghdr;
+ struct msg_req *req;
+ int err, rep;
+
+ mutex_lock(&priv->mbox.lock);
+ req = otx2_mbox_alloc_msg_get_rep_cnt(&priv->mbox);
+ if (!req) {
+ mutex_unlock(&priv->mbox.lock);
+ return -ENOMEM;
+ }
+ err = otx2_sync_mbox_msg(&priv->mbox);
+ if (err)
+ goto exit;
+
+ msghdr = otx2_mbox_get_rsp(&priv->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(msghdr)) {
+ err = PTR_ERR(msghdr);
+ goto exit;
+ }
+
+ rsp = (struct get_rep_cnt_rsp *)msghdr;
+ priv->hw.tx_queues = rsp->rep_cnt;
+ priv->hw.rx_queues = rsp->rep_cnt;
+ priv->rep_cnt = rsp->rep_cnt;
+ for (rep = 0; rep < priv->rep_cnt; rep++)
+ priv->rep_pf_map[rep] = rsp->rep_pf_map[rep];
+
+exit:
+ mutex_unlock(&priv->mbox.lock);
+ return err;
+}
+
+static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct otx2_nic *priv;
+ struct otx2_hw *hw;
+ int err;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ return err;
+ }
+
+ err = pcim_request_all_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ return err;
+ }
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "DMA mask config failed, abort\n");
+ goto err_set_drv_data;
+ }
+
+ pci_set_master(pdev);
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ err = -ENOMEM;
+ goto err_set_drv_data;
+ }
+
+ pci_set_drvdata(pdev, priv);
+ priv->pdev = pdev;
+ priv->dev = dev;
+ priv->flags |= OTX2_FLAG_INTF_DOWN;
+ priv->flags |= OTX2_FLAG_REP_MODE_ENABLED;
+
+ hw = &priv->hw;
+ hw->pdev = pdev;
+ hw->max_queues = OTX2_MAX_CQ_CNT;
+ hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
+ hw->xqe_size = 128;
+
+ err = otx2_init_rsrc(pdev, priv);
+ if (err)
+ goto err_set_drv_data;
+
+ priv->iommu_domain = iommu_get_domain_for_dev(dev);
+
+ err = rvu_get_rep_cnt(priv);
+ if (err)
+ goto err_detach_rsrc;
+
+ err = otx2_register_dl(priv);
+ if (err)
+ goto err_detach_rsrc;
+
+ return 0;
+
+err_detach_rsrc:
+ if (priv->hw.lmt_info)
+ free_percpu(priv->hw.lmt_info);
+ if (test_bit(CN10K_LMTST, &priv->hw.cap_flag))
+ qmem_free(priv->dev, priv->dync_lmt);
+ otx2_detach_resources(&priv->mbox);
+ otx2_disable_mbox_intr(priv);
+ otx2_pfaf_mbox_destroy(priv);
+ pci_free_irq_vectors(pdev);
+err_set_drv_data:
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void rvu_rep_remove(struct pci_dev *pdev)
+{
+ struct otx2_nic *priv = pci_get_drvdata(pdev);
+
+ otx2_unregister_dl(priv);
+ if (!(priv->flags & OTX2_FLAG_INTF_DOWN))
+ rvu_rep_destroy(priv);
+ otx2_detach_resources(&priv->mbox);
+ if (priv->hw.lmt_info)
+ free_percpu(priv->hw.lmt_info);
+ if (test_bit(CN10K_LMTST, &priv->hw.cap_flag))
+ qmem_free(priv->dev, priv->dync_lmt);
+ otx2_disable_mbox_intr(priv);
+ otx2_pfaf_mbox_destroy(priv);
+ pci_free_irq_vectors(priv->pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver rvu_rep_driver = {
+ .name = DRV_NAME,
+ .id_table = rvu_rep_id_table,
+ .probe = rvu_rep_probe,
+ .remove = rvu_rep_remove,
+ .shutdown = rvu_rep_remove,
+};
+
+static int __init rvu_rep_init_module(void)
+{
+ return pci_register_driver(&rvu_rep_driver);
+}
+
+static void __exit rvu_rep_cleanup_module(void)
+{
+ pci_unregister_driver(&rvu_rep_driver);
+}
+
+module_init(rvu_rep_init_module);
+module_exit(rvu_rep_cleanup_module);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.h b/drivers/net/ethernet/marvell/octeontx2/nic/rep.h
new file mode 100644
index 000000000000..5bc9e2c7d800
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU REPRESENTOR driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#ifndef REP_H
+#define REP_H
+
+#include <linux/pci.h>
+
+#include "otx2_reg.h"
+#include "otx2_txrx.h"
+#include "otx2_common.h"
+
+#define PCI_DEVID_RVU_REP 0xA0E0
+
+#define RVU_MAX_REP OTX2_MAX_CQ_CNT
+
+struct rep_stats {
+ u64 rx_bytes;
+ u64 rx_frames;
+ u64 rx_drops;
+ u64 rx_mcast_frames;
+
+ u64 tx_bytes;
+ u64 tx_frames;
+ u64 tx_drops;
+ atomic_long_t tx_discards;
+};
+
+struct rep_dev {
+ struct otx2_nic *mdev;
+ struct net_device *netdev;
+ struct rep_stats stats;
+ struct delayed_work stats_wrk;
+ struct devlink_port dl_port;
+ struct otx2_flow_config *flow_cfg;
+#define RVU_REP_VF_INITIALIZED BIT_ULL(0)
+ u64 flags;
+ u16 rep_id;
+ u16 pcifunc;
+ u8 mac[ETH_ALEN];
+};
+
+static inline bool otx2_rep_dev(struct pci_dev *pdev)
+{
+ return pdev->device == PCI_DEVID_RVU_REP;
+}
+
+int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack);
+void rvu_rep_destroy(struct otx2_nic *priv);
+int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info);
+#endif /* REP_H */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_counter.c b/drivers/net/ethernet/marvell/prestera/prestera_counter.c
index 4cd53a2dae46..634f4543c1d7 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_counter.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_counter.c
@@ -336,8 +336,7 @@ prestera_counter_block_get_by_idx(struct prestera_counter *counter, u32 idx)
static void prestera_counter_stats_work(struct work_struct *work)
{
- struct delayed_work *dl_work =
- container_of(work, struct delayed_work, work);
+ struct delayed_work *dl_work = to_delayed_work(work);
struct prestera_counter *counter =
container_of(dl_work, struct prestera_counter, stats_dw);
struct prestera_counter_block *block;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 22ca6ee9665e..65e7ef033bde 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -280,6 +280,7 @@ prestera_mac_select_pcs(struct phylink_config *config,
}
static void prestera_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct prestera_port *port = container_of(pcs, struct prestera_port,
@@ -395,7 +396,6 @@ static int prestera_port_sfp_bind(struct prestera_port *port)
continue;
port->phylink_pcs.ops = &prestera_pcs_ops;
- port->phylink_pcs.neg_mode = true;
port->phy_config.dev = &port->dev->dev;
port->phy_config.type = PHYLINK_NETDEV;
@@ -634,7 +634,7 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
goto err_dl_port_register;
dev->features |= NETIF_F_HW_TC;
- dev->netns_local = true;
+ dev->netns_immutable = true;
dev->netdev_ops = &prestera_netdev_ops;
dev->ethtool_ops = &prestera_ethtool_ops;
SET_NETDEV_DEV(dev, sw->dev->dev);
@@ -1500,7 +1500,7 @@ EXPORT_SYMBOL(prestera_device_unregister);
static int __init prestera_module_init(void)
{
- prestera_wq = alloc_workqueue("prestera", 0, 0);
+ prestera_wq = alloc_workqueue("prestera", WQ_PERCPU, 0);
if (!prestera_wq)
return -ENOMEM;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
index 35857dc19542..3e13322470da 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -845,9 +845,9 @@ static int prestera_pci_probe(struct pci_dev *pdev,
goto err_pci_enable_device;
}
- err = pci_request_regions(pdev, driver_name);
+ err = pcim_request_all_regions(pdev, driver_name);
if (err) {
- dev_err(&pdev->dev, "pci_request_regions failed\n");
+ dev_err(&pdev->dev, "pcim_request_all_regions failed\n");
goto err_pci_request_regions;
}
@@ -898,7 +898,7 @@ static int prestera_pci_probe(struct pci_dev *pdev,
dev_info(fw->dev.dev, "Prestera FW is ready\n");
- fw->wq = alloc_workqueue("prestera_fw_wq", WQ_HIGHPRI, 1);
+ fw->wq = alloc_workqueue("prestera_fw_wq", WQ_HIGHPRI | WQ_PERCPU, 1);
if (!fw->wq) {
err = -ENOMEM;
goto err_wq_alloc;
@@ -938,7 +938,6 @@ err_pci_dev_alloc:
err_pp_ioremap:
err_mem_ioremap:
err_dma_mask:
- pci_release_regions(pdev);
err_pci_request_regions:
err_pci_enable_device:
return err;
@@ -953,7 +952,6 @@ static void prestera_pci_remove(struct pci_dev *pdev)
pci_free_irq_vectors(pdev);
destroy_workqueue(fw->wq);
prestera_fw_uninit(fw);
- pci_release_regions(pdev);
}
static const struct pci_device_id prestera_pci_devices[] = {
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index fe38426ec42d..68f8a1e36aa6 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -353,7 +353,7 @@ static void rxq_refill(struct net_device *dev)
static inline void rxq_refill_timer_wrapper(struct timer_list *t)
{
- struct pxa168_eth_private *pep = from_timer(pep, t, timeout);
+ struct pxa168_eth_private *pep = timer_container_of(pep, t, timeout);
napi_schedule(&pep->napi);
}
@@ -1175,7 +1175,7 @@ static int pxa168_eth_stop(struct net_device *dev)
/* Write to ICR to clear interrupts. */
wrl(pep, INT_W_CLEAR, 0);
napi_disable(&pep->napi);
- del_timer_sync(&pep->timeout);
+ timer_delete_sync(&pep->timeout);
netif_carrier_off(dev);
free_irq(dev->irq, dev);
rxq_deinit(dev);
@@ -1229,9 +1229,9 @@ static int pxa168_rx_poll(struct napi_struct *napi, int budget)
int work_done = 0;
/*
- * We call txq_reclaim every time since in NAPI interupts are disabled
- * and due to this we miss the TX_DONE interrupt,which is not updated in
- * interrupt status register.
+ * We call txq_reclaim every time since in NAPI interrupts are disabled
+ * and due to this we miss the TX_DONE interrupt, which is not updated
+ * in interrupt status register.
*/
txq_reclaim(dev, 0);
if (netif_queue_stopped(dev)
@@ -1394,18 +1394,15 @@ static int pxa168_eth_probe(struct platform_device *pdev)
printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");
- clk = devm_clk_get(&pdev->dev, NULL);
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "Fast Ethernet failed to get clock\n");
+ dev_err(&pdev->dev, "Fast Ethernet failed to get and enable clock\n");
return -ENODEV;
}
- clk_prepare_enable(clk);
dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
- if (!dev) {
- err = -ENOMEM;
- goto err_clk;
- }
+ if (!dev)
+ return -ENOMEM;
platform_set_drvdata(pdev, dev);
pep = netdev_priv(dev);
@@ -1523,8 +1520,6 @@ err_free_mdio:
mdiobus_free(pep->smi_bus);
err_netdev:
free_netdev(dev);
-err_clk:
- clk_disable_unprepare(clk);
return err;
}
@@ -1542,7 +1537,6 @@ static void pxa168_eth_remove(struct platform_device *pdev)
if (dev->phydev)
phy_disconnect(dev->phydev);
- clk_disable_unprepare(pep->clk);
mdiobus_unregister(pep->smi_bus);
mdiobus_free(pep->smi_bus);
unregister_netdev(dev);
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 25bf6ec44289..05349a0b2db1 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -1494,7 +1494,7 @@ static int xm_check_link(struct net_device *dev)
*/
static void xm_link_timer(struct timer_list *t)
{
- struct skge_port *skge = from_timer(skge, t, link_timer);
+ struct skge_port *skge = timer_container_of(skge, t, link_timer);
struct net_device *dev = skge->netdev;
struct skge_hw *hw = skge->hw;
int port = skge->port;
@@ -2662,7 +2662,7 @@ static int skge_down(struct net_device *dev)
netif_tx_disable(dev);
if (is_genesis(hw) && hw->phy_type == SK_PHY_XMAC)
- del_timer_sync(&skge->link_timer);
+ timer_delete_sync(&skge->link_timer);
napi_disable(&skge->napi);
netif_carrier_off(dev);
@@ -3742,10 +3742,7 @@ static int skge_device_event(struct notifier_block *unused,
skge = netdev_priv(dev);
switch (event) {
case NETDEV_CHANGENAME:
- if (skge->debugfs)
- skge->debugfs = debugfs_rename(skge_debug,
- skge->debugfs,
- skge_debug, dev->name);
+ debugfs_change_name(skge->debugfs, "%s", dev->name);
break;
case NETDEV_GOING_DOWN:
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 3914cd9210d4..3831f533b9db 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -130,6 +130,7 @@ static const struct pci_device_id sky2_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4373) }, /* 88E8075 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */
@@ -2960,7 +2961,7 @@ static int sky2_rx_hung(struct net_device *dev)
static void sky2_watchdog(struct timer_list *t)
{
- struct sky2_hw *hw = from_timer(hw, t, watchdog_timer);
+ struct sky2_hw *hw = timer_container_of(hw, t, watchdog_timer);
/* Check for lost IRQ once a second */
if (sky2_read32(hw, B0_ISRC)) {
@@ -4493,10 +4494,7 @@ static int sky2_device_event(struct notifier_block *unused,
switch (event) {
case NETDEV_CHANGENAME:
- if (sky2->debugfs) {
- sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs,
- sky2_debug, dev->name);
- }
+ debugfs_change_name(sky2->debugfs, "%s", dev->name);
break;
case NETDEV_GOING_DOWN:
@@ -5054,7 +5052,7 @@ static int sky2_suspend(struct device *dev)
if (!hw)
return 0;
- del_timer_sync(&hw->watchdog_timer);
+ timer_delete_sync(&hw->watchdog_timer);
cancel_work_sync(&hw->restart_work);
rtnl_lock();
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index 95c4405b7d7b..2ba361f8ce7d 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -7,14 +7,6 @@ config NET_VENDOR_MEDIATEK
if NET_VENDOR_MEDIATEK
-config NET_AIROHA
- tristate "Airoha SoC Gigabit Ethernet support"
- depends on NET_DSA || !NET_DSA
- select PAGE_POOL
- help
- This driver supports the gigabit ethernet MACs in the
- Airoha SoC family.
-
config NET_MEDIATEK_SOC_WED
depends on ARCH_MEDIATEK || COMPILE_TEST
def_bool NET_MEDIATEK_SOC != n
@@ -25,6 +17,7 @@ config NET_MEDIATEK_SOC
select PINCTRL
select PHYLINK
select DIMLIB
+ select GENERIC_ALLOCATOR
select PAGE_POOL
select PAGE_POOL_STATS
select PCS_MTK_LYNXI
diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
index ddbb7f4a516c..03e008fbc859 100644
--- a/drivers/net/ethernet/mediatek/Makefile
+++ b/drivers/net/ethernet/mediatek/Makefile
@@ -11,4 +11,3 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
endif
obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
-obj-$(CONFIG_NET_AIROHA) += airoha_eth.o
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_path.c b/drivers/net/ethernet/mediatek/mtk_eth_path.c
index 7c27a19c4d8f..b4c01e2878f6 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_path.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_path.c
@@ -14,7 +14,7 @@
struct mtk_eth_muxc {
const char *name;
- int cap_bit;
+ u64 cap_bit;
int (*set_path)(struct mtk_eth *eth, u64 path);
};
@@ -31,6 +31,8 @@ static const char *mtk_eth_path_name(u64 path)
return "gmac2_rgmii";
case MTK_ETH_PATH_GMAC2_SGMII:
return "gmac2_sgmii";
+ case MTK_ETH_PATH_GMAC2_2P5GPHY:
+ return "gmac2_2p5gphy";
case MTK_ETH_PATH_GMAC2_GEPHY:
return "gmac2_gephy";
case MTK_ETH_PATH_GDM1_ESW:
@@ -127,6 +129,29 @@ static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, u64 path)
return 0;
}
+static int set_mux_gmac2_to_2p5gphy(struct mtk_eth *eth, u64 path)
+{
+ int ret;
+
+ if (path == MTK_ETH_PATH_GMAC2_2P5GPHY) {
+ ret = regmap_clear_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_GMAC2_V2);
+ if (ret)
+ return ret;
+
+ /* Setup mux to 2p5g PHY */
+ ret = regmap_clear_bits(eth->infra, TOP_MISC_NETSYS_PCS_MUX,
+ MUX_G2_USXGMII_SEL);
+ if (ret)
+ return ret;
+
+ dev_dbg(eth->dev, "path %s in %s updated\n",
+ mtk_eth_path_name(path), __func__);
+ }
+
+ return 0;
+}
+
static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, u64 path)
{
unsigned int val = 0;
@@ -210,6 +235,10 @@ static const struct mtk_eth_muxc mtk_eth_muxc[] = {
.cap_bit = MTK_ETH_MUX_U3_GMAC2_TO_QPHY,
.set_path = set_mux_u3_gmac2_to_qphy,
}, {
+ .name = "mux_gmac2_to_2p5gphy",
+ .cap_bit = MTK_ETH_MUX_GMAC2_TO_2P5GPHY,
+ .set_path = set_mux_gmac2_to_2p5gphy,
+ }, {
.name = "mux_gmac1_gmac2_to_sgmii_rgmii",
.cap_bit = MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII,
.set_path = set_mux_gmac1_gmac2_to_sgmii_rgmii,
@@ -260,6 +289,20 @@ int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
return mtk_eth_mux_setup(eth, path);
}
+int mtk_gmac_2p5gphy_path_setup(struct mtk_eth *eth, int mac_id)
+{
+ u64 path = 0;
+
+ if (mac_id == MTK_GMAC2_ID)
+ path = MTK_ETH_PATH_GMAC2_2P5GPHY;
+
+ if (!path)
+ return -EINVAL;
+
+ /* Setup proper MUXes along the path */
+ return mtk_eth_mux_setup(eth, path);
+}
+
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
{
u64 path = 0;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 53485142938c..e68997a29191 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -27,6 +27,7 @@
#include <net/dsa.h>
#include <net/dst_metadata.h>
#include <net/page_pool/helpers.h>
+#include <linux/genalloc.h>
#include "mtk_eth_soc.h"
#include "mtk_wed.h"
@@ -269,12 +270,8 @@ static const char * const mtk_clks_source_name[] = {
"ethwarp_wocpu2",
"ethwarp_wocpu1",
"ethwarp_wocpu0",
- "top_usxgmii0_sel",
- "top_usxgmii1_sel",
"top_sgm0_sel",
"top_sgm1_sel",
- "top_xfi_phy0_xtal_sel",
- "top_xfi_phy1_xtal_sel",
"top_eth_gmii_sel",
"top_eth_refck_50m_sel",
"top_eth_sys_200m_sel",
@@ -507,7 +504,7 @@ static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
static void mtk_setup_bridge_switch(struct mtk_eth *eth)
{
/* Force Port1 XGMAC Link Up */
- mtk_m32(eth, 0, MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID),
+ mtk_m32(eth, 0, MTK_XGMAC_FORCE_MODE(MTK_GMAC1_ID),
MTK_XGMAC_STS(MTK_GMAC1_ID));
/* Adjust GSW bridge IPG to 11 */
@@ -536,6 +533,26 @@ static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
return NULL;
}
+static int mtk_mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+
+ if (mtk_interface_mode_is_xgmii(eth, iface) &&
+ mac->id != MTK_GMAC1_ID) {
+ mtk_m32(mac->hw, XMAC_MCR_TRX_DISABLE,
+ XMAC_MCR_TRX_DISABLE, MTK_XMAC_MCR(mac->id));
+
+ mtk_m32(mac->hw, MTK_XGMAC_FORCE_MODE(mac->id) |
+ MTK_XGMAC_FORCE_LINK(mac->id),
+ MTK_XGMAC_FORCE_MODE(mac->id), MTK_XGMAC_STS(mac->id));
+ }
+
+ return 0;
+}
+
static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -577,6 +594,12 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
}
break;
case PHY_INTERFACE_MODE_INTERNAL:
+ if (mac->id == MTK_GMAC2_ID &&
+ MTK_HAS_CAPS(eth->soc->caps, MTK_2P5GPHY)) {
+ err = mtk_gmac_2p5gphy_path_setup(eth, mac->id);
+ if (err)
+ goto init_err;
+ }
break;
default:
goto err_phy;
@@ -648,12 +671,12 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
}
/* Setup gmac */
- if (mtk_is_netsys_v3_or_greater(eth) &&
- mac->interface == PHY_INTERFACE_MODE_INTERNAL) {
+ if (mtk_interface_mode_is_xgmii(eth, state->interface)) {
mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
- mtk_setup_bridge_switch(eth);
+ if (mac->id == MTK_GMAC1_ID)
+ mtk_setup_bridge_switch(eth);
}
return;
@@ -700,10 +723,19 @@ static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
{
struct mtk_mac *mac = container_of(config, struct mtk_mac,
phylink_config);
- u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
- mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK);
- mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
+ if (!mtk_interface_mode_is_xgmii(mac->hw, interface)) {
+ /* GMAC modes */
+ mtk_m32(mac->hw,
+ MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK, 0,
+ MTK_MAC_MCR(mac->id));
+ } else if (mac->id != MTK_GMAC1_ID) {
+ /* XGMAC except for built-in switch */
+ mtk_m32(mac->hw, XMAC_MCR_TRX_DISABLE, XMAC_MCR_TRX_DISABLE,
+ MTK_XMAC_MCR(mac->id));
+ mtk_m32(mac->hw, MTK_XGMAC_FORCE_LINK(mac->id), 0,
+ MTK_XGMAC_STS(mac->id));
+ }
}
static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
@@ -734,7 +766,7 @@ static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
case SPEED_100:
val |= MTK_QTX_SCH_MAX_RATE_EN |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
- FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3);
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3) |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
break;
case SPEED_1000:
@@ -757,13 +789,13 @@ static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
case SPEED_100:
val |= MTK_QTX_SCH_MAX_RATE_EN |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
- FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5);
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
break;
case SPEED_1000:
val |= MTK_QTX_SCH_MAX_RATE_EN |
- FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) |
- FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 6) |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
break;
default:
@@ -775,13 +807,12 @@ static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
}
-static void mtk_mac_link_up(struct phylink_config *config,
- struct phy_device *phy,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex, bool tx_pause, bool rx_pause)
+static void mtk_gdm_mac_link_up(struct mtk_mac *mac,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause,
+ bool rx_pause)
{
- struct mtk_mac *mac = container_of(config, struct mtk_mac,
- phylink_config);
u32 mcr;
mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
@@ -815,32 +846,145 @@ static void mtk_mac_link_up(struct phylink_config *config,
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
+static void mtk_xgdm_mac_link_up(struct mtk_mac *mac,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause,
+ bool rx_pause)
+{
+ u32 mcr;
+
+ if (mac->id == MTK_GMAC1_ID)
+ return;
+
+ /* Eliminate the interference(before link-up) caused by PHY noise */
+ mtk_m32(mac->hw, XMAC_LOGIC_RST, 0, MTK_XMAC_LOGIC_RST(mac->id));
+ mdelay(20);
+ mtk_m32(mac->hw, XMAC_GLB_CNTCLR, XMAC_GLB_CNTCLR,
+ MTK_XMAC_CNT_CTRL(mac->id));
+
+ mtk_m32(mac->hw, MTK_XGMAC_FORCE_LINK(mac->id),
+ MTK_XGMAC_FORCE_LINK(mac->id), MTK_XGMAC_STS(mac->id));
+
+ mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
+ mcr &= ~(XMAC_MCR_FORCE_TX_FC | XMAC_MCR_FORCE_RX_FC |
+ XMAC_MCR_TRX_DISABLE);
+ /* Configure pause modes -
+ * phylink will avoid these for half duplex
+ */
+ if (tx_pause)
+ mcr |= XMAC_MCR_FORCE_TX_FC;
+ if (rx_pause)
+ mcr |= XMAC_MCR_FORCE_RX_FC;
+
+ mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id));
+}
+
+static void mtk_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+
+ if (mtk_interface_mode_is_xgmii(mac->hw, interface))
+ mtk_xgdm_mac_link_up(mac, phy, mode, interface, speed, duplex,
+ tx_pause, rx_pause);
+ else
+ mtk_gdm_mac_link_up(mac, phy, mode, interface, speed, duplex,
+ tx_pause, rx_pause);
+}
+
+static void mtk_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+
+ mtk_m32(eth, MAC_MCR_EEE100M | MAC_MCR_EEE1G, 0, MTK_MAC_MCR(mac->id));
+}
+
+static int mtk_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+ u32 val;
+
+ if (mtk_interface_mode_is_xgmii(eth, mac->interface))
+ return -EOPNOTSUPP;
+
+ /* Tx idle timer in ms */
+ timer = DIV_ROUND_UP(timer, 1000);
+
+ /* If the timer is zero, then set LPI_MODE, which allows the
+ * system to enter LPI mode immediately rather than waiting for
+ * the LPI threshold.
+ */
+ if (!timer)
+ val = MAC_EEE_LPI_MODE;
+ else if (FIELD_FIT(MAC_EEE_LPI_TXIDLE_THD, timer))
+ val = FIELD_PREP(MAC_EEE_LPI_TXIDLE_THD, timer);
+ else
+ val = MAC_EEE_LPI_TXIDLE_THD;
+
+ if (tx_clk_stop)
+ val |= MAC_EEE_CKG_TXIDLE;
+
+ /* PHY Wake-up time, this field does not have a reset value, so use the
+ * reset value from MT7531 (36us for 100M and 17us for 1000M).
+ */
+ val |= FIELD_PREP(MAC_EEE_WAKEUP_TIME_1000, 17) |
+ FIELD_PREP(MAC_EEE_WAKEUP_TIME_100, 36);
+
+ mtk_w32(eth, val, MTK_MAC_EEECR(mac->id));
+ mtk_m32(eth, 0, MAC_MCR_EEE100M | MAC_MCR_EEE1G, MTK_MAC_MCR(mac->id));
+
+ return 0;
+}
+
static const struct phylink_mac_ops mtk_phylink_ops = {
+ .mac_prepare = mtk_mac_prepare,
.mac_select_pcs = mtk_mac_select_pcs,
.mac_config = mtk_mac_config,
.mac_finish = mtk_mac_finish,
.mac_link_down = mtk_mac_link_down,
.mac_link_up = mtk_mac_link_up,
+ .mac_disable_tx_lpi = mtk_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = mtk_mac_enable_tx_lpi,
};
+static void mtk_mdio_config(struct mtk_eth *eth)
+{
+ u32 val;
+
+ /* Configure MDC Divider */
+ val = FIELD_PREP(PPSC_MDC_CFG, eth->mdc_divider);
+
+ /* Configure MDC Turbo Mode */
+ if (mtk_is_netsys_v3_or_greater(eth))
+ mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3);
+ else
+ val |= PPSC_MDC_TURBO;
+
+ mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC);
+}
+
static int mtk_mdio_init(struct mtk_eth *eth)
{
- unsigned int max_clk = 2500000, divider;
+ unsigned int max_clk = 2500000;
struct device_node *mii_np;
int ret;
u32 val;
- mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
+ mii_np = of_get_available_child_by_name(eth->dev->of_node, "mdio-bus");
if (!mii_np) {
dev_err(eth->dev, "no %s child node found", "mdio-bus");
return -ENODEV;
}
- if (!of_device_is_available(mii_np)) {
- ret = -ENODEV;
- goto err_put_node;
- }
-
eth->mii_bus = devm_mdiobus_alloc(eth->dev);
if (!eth->mii_bus) {
ret = -ENOMEM;
@@ -865,20 +1009,9 @@ static int mtk_mdio_init(struct mtk_eth *eth)
}
max_clk = val;
}
- divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
-
- /* Configure MDC Turbo Mode */
- if (mtk_is_netsys_v3_or_greater(eth))
- mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3);
-
- /* Configure MDC Divider */
- val = FIELD_PREP(PPSC_MDC_CFG, divider);
- if (!mtk_is_netsys_v3_or_greater(eth))
- val |= PPSC_MDC_TURBO;
- mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC);
-
- dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider);
-
+ eth->mdc_divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
+ mtk_mdio_config(eth);
+ dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / eth->mdc_divider);
ret = of_mdiobus_register(eth->mii_bus, mii_np);
err_put_node:
@@ -1135,6 +1268,34 @@ static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
return (void *)data;
}
+static void *mtk_dma_ring_alloc(struct mtk_eth *eth, size_t size,
+ dma_addr_t *dma_handle, bool use_sram)
+{
+ void *dma_ring;
+
+ if (use_sram && eth->sram_pool) {
+ dma_ring = (void *)gen_pool_alloc(eth->sram_pool, size);
+ if (!dma_ring)
+ return dma_ring;
+ *dma_handle = gen_pool_virt_to_phys(eth->sram_pool,
+ (unsigned long)dma_ring);
+ } else {
+ dma_ring = dma_alloc_coherent(eth->dma_dev, size, dma_handle,
+ GFP_KERNEL);
+ }
+
+ return dma_ring;
+}
+
+static void mtk_dma_ring_free(struct mtk_eth *eth, size_t size, void *dma_ring,
+ dma_addr_t dma_handle, bool in_sram)
+{
+ if (in_sram && eth->sram_pool)
+ gen_pool_free(eth->sram_pool, (unsigned long)dma_ring, size);
+ else
+ dma_free_coherent(eth->dma_dev, size, dma_ring, dma_handle);
+}
+
/* the qdma core needs scratch memory to be setup */
static int mtk_init_fq_dma(struct mtk_eth *eth)
{
@@ -1144,13 +1305,8 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
dma_addr_t dma_addr;
int i, j, len;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
- eth->scratch_ring = eth->sram_base;
- else
- eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
- cnt * soc->tx.desc_size,
- &eth->phy_scratch_ring,
- GFP_KERNEL);
+ eth->scratch_ring = mtk_dma_ring_alloc(eth, cnt * soc->tx.desc_size,
+ &eth->phy_scratch_ring, true);
if (unlikely(!eth->scratch_ring))
return -ENOMEM;
@@ -1605,6 +1761,13 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
bool gso = false;
int tx_num;
+ if (skb_vlan_tag_present(skb) &&
+ !eth_proto_is_802_3(eth_hdr(skb)->h_proto)) {
+ skb = __vlan_hwaccel_push_inside(skb);
+ if (!skb)
+ goto dropped;
+ }
+
/* normally we can rely on the stack not calling this more than once,
* however we have 2 queues running on the same ring so we need to lock
* the ring access
@@ -1650,8 +1813,9 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
drop:
spin_unlock(&eth->page_lock);
- stats->tx_dropped++;
dev_kfree_skb_any(skb);
+dropped:
+ stats->tx_dropped++;
return NETDEV_TX_OK;
}
@@ -2079,7 +2243,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
if (ring->page_pool) {
struct page *page = virt_to_head_page(data);
struct xdp_buff xdp;
- u32 ret;
+ u32 ret, metasize;
new_data = mtk_page_pool_get_buff(ring->page_pool,
&dma_addr,
@@ -2095,7 +2259,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
- false);
+ true);
xdp_buff_clear_frags_flag(&xdp);
ret = mtk_xdp_run(eth, ring, &xdp, netdev);
@@ -2115,6 +2279,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb_reserve(skb, xdp.data - xdp.data_hard_start);
skb_put(skb, xdp.data_end - xdp.data);
+ metasize = xdp.data - xdp.data_meta;
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb_mark_for_recycle(skb);
} else {
if (ring->frag_size <= PAGE_SIZE)
@@ -2201,14 +2368,18 @@ skip_rx:
ring->data[idx] = new_data;
rxd->rxd1 = (unsigned int)dma_addr;
release_desc:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
+ if (unlikely(dma_addr == DMA_MAPPING_ERROR))
+ addr64 = FIELD_GET(RX_DMA_ADDR64_MASK,
+ rxd->rxd2);
+ else
+ addr64 = RX_DMA_PREP_ADDR64(dma_addr);
+ }
+
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
rxd->rxd2 = RX_DMA_LSO;
else
- rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
-
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) &&
- likely(dma_addr != DMA_MAPPING_ERROR))
- rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
+ rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size) | addr64;
ring->calc_idx = idx;
done++;
@@ -2481,14 +2652,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
if (!ring->buf)
goto no_tx_mem;
- if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
- ring->dma = eth->sram_base + soc->tx.fq_dma_size * sz;
- ring->phys = eth->phy_scratch_ring + soc->tx.fq_dma_size * (dma_addr_t)sz;
- } else {
- ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
- &ring->phys, GFP_KERNEL);
- }
-
+ ring->dma = mtk_dma_ring_alloc(eth, ring_size * sz, &ring->phys, true);
if (!ring->dma)
goto no_tx_mem;
@@ -2587,10 +2751,10 @@ static void mtk_tx_clean(struct mtk_eth *eth)
kfree(ring->buf);
ring->buf = NULL;
}
- if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
- dma_free_coherent(eth->dma_dev,
- ring->dma_size * soc->tx.desc_size,
- ring->dma, ring->phys);
+
+ if (ring->dma) {
+ mtk_dma_ring_free(eth, ring->dma_size * soc->tx.desc_size,
+ ring->dma, ring->phys, true);
ring->dma = NULL;
}
@@ -2607,14 +2771,9 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
const struct mtk_soc_data *soc = eth->soc;
struct mtk_rx_ring *ring;
- int rx_data_len, rx_dma_size, tx_ring_size;
+ int rx_data_len, rx_dma_size;
int i;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
- tx_ring_size = MTK_QDMA_RING_SIZE;
- else
- tx_ring_size = soc->tx.dma_size;
-
if (rx_flag == MTK_RX_FLAGS_QDMA) {
if (ring_no)
return -EINVAL;
@@ -2649,20 +2808,10 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
ring->page_pool = pp;
}
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
- rx_flag != MTK_RX_FLAGS_NORMAL) {
- ring->dma = dma_alloc_coherent(eth->dma_dev,
- rx_dma_size * eth->soc->rx.desc_size,
- &ring->phys, GFP_KERNEL);
- } else {
- struct mtk_tx_ring *tx_ring = &eth->tx_ring;
-
- ring->dma = tx_ring->dma + tx_ring_size *
- eth->soc->tx.desc_size * (ring_no + 1);
- ring->phys = tx_ring->phys + tx_ring_size *
- eth->soc->tx.desc_size * (ring_no + 1);
- }
-
+ ring->dma = mtk_dma_ring_alloc(eth,
+ rx_dma_size * eth->soc->rx.desc_size,
+ &ring->phys,
+ rx_flag == MTK_RX_FLAGS_NORMAL);
if (!ring->dma)
return -ENOMEM;
@@ -2777,10 +2926,9 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_
ring->data = NULL;
}
- if (!in_sram && ring->dma) {
- dma_free_coherent(eth->dma_dev,
- ring->dma_size * eth->soc->rx.desc_size,
- ring->dma, ring->phys);
+ if (ring->dma) {
+ mtk_dma_ring_free(eth, ring->dma_size * eth->soc->rx.desc_size,
+ ring->dma, ring->phys, in_sram);
ring->dma = NULL;
}
@@ -3135,20 +3283,29 @@ static int mtk_dma_init(struct mtk_eth *eth)
static void mtk_dma_free(struct mtk_eth *eth)
{
const struct mtk_soc_data *soc = eth->soc;
- int i;
+ int i, j, txqs = 1;
- for (i = 0; i < MTK_MAX_DEVS; i++)
- if (eth->netdev[i])
- netdev_reset_queue(eth->netdev[i]);
- if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
- dma_free_coherent(eth->dma_dev,
- MTK_QDMA_RING_SIZE * soc->tx.desc_size,
- eth->scratch_ring, eth->phy_scratch_ring);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ txqs = MTK_QDMA_NUM_QUEUES;
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ if (!eth->netdev[i])
+ continue;
+
+ for (j = 0; j < txqs; j++)
+ netdev_tx_reset_subqueue(eth->netdev[i], j);
+ }
+
+ if (eth->scratch_ring) {
+ mtk_dma_ring_free(eth, soc->tx.fq_dma_size * soc->tx.desc_size,
+ eth->scratch_ring, eth->phy_scratch_ring,
+ true);
eth->scratch_ring = NULL;
eth->phy_scratch_ring = 0;
}
+
mtk_tx_clean(eth);
- mtk_rx_clean(eth, &eth->rx_ring[0], MTK_HAS_CAPS(soc->caps, MTK_SRAM));
+ mtk_rx_clean(eth, &eth->rx_ring[0], true);
mtk_rx_clean(eth, &eth->rx_ring_qdma, false);
if (eth->hwlro) {
@@ -3189,6 +3346,53 @@ static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
schedule_work(&eth->pending_work);
}
+static int mtk_get_irqs(struct platform_device *pdev, struct mtk_eth *eth)
+{
+ int i;
+
+ /* future SoCs beginning with MT7988 should use named IRQs in dts */
+ eth->irq[MTK_FE_IRQ_TX] = platform_get_irq_byname_optional(pdev, "fe1");
+ eth->irq[MTK_FE_IRQ_RX] = platform_get_irq_byname_optional(pdev, "fe2");
+ if (eth->irq[MTK_FE_IRQ_TX] >= 0 && eth->irq[MTK_FE_IRQ_RX] >= 0)
+ return 0;
+
+ /* only use legacy mode if platform_get_irq_byname_optional returned -ENXIO */
+ if (eth->irq[MTK_FE_IRQ_TX] != -ENXIO)
+ return dev_err_probe(&pdev->dev, eth->irq[MTK_FE_IRQ_TX],
+ "Error requesting FE TX IRQ\n");
+
+ if (eth->irq[MTK_FE_IRQ_RX] != -ENXIO)
+ return dev_err_probe(&pdev->dev, eth->irq[MTK_FE_IRQ_RX],
+ "Error requesting FE RX IRQ\n");
+
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT))
+ dev_warn(&pdev->dev, "legacy DT: missing interrupt-names.");
+
+ /* legacy way:
+ * On MTK_SHARED_INT SoCs (MT7621 + MT7628) the first IRQ is taken
+ * from devicetree and used for both RX and TX - it is shared.
+ * On SoCs with non-shared IRQs the first entry is not used,
+ * the second is for TX, and the third is for RX.
+ */
+ for (i = 0; i < MTK_FE_IRQ_NUM; i++) {
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
+ if (i == MTK_FE_IRQ_SHARED)
+ eth->irq[MTK_FE_IRQ_SHARED] = platform_get_irq(pdev, i);
+ else
+ eth->irq[i] = eth->irq[MTK_FE_IRQ_SHARED];
+ } else {
+ eth->irq[i] = platform_get_irq(pdev, i + 1);
+ }
+
+ if (eth->irq[i] < 0) {
+ dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
+ return -ENXIO;
+ }
+ }
+
+ return 0;
+}
+
static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
{
struct mtk_eth *eth = _eth;
@@ -3242,7 +3446,7 @@ static void mtk_poll_controller(struct net_device *dev)
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
- mtk_handle_irq_rx(eth->irq[2], dev);
+ mtk_handle_irq_rx(eth->irq[MTK_FE_IRQ_RX], dev);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
}
@@ -3269,7 +3473,7 @@ static int mtk_start_dma(struct mtk_eth *eth)
if (mtk_is_netsys_v2_or_greater(eth))
val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
- MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
+ MTK_CHK_DDONE_EN;
else
val |= MTK_RX_BT_32DWORDS;
mtk_w32(eth, val, reg_map->qdma.glo_cfg);
@@ -3414,9 +3618,6 @@ static int mtk_open(struct net_device *dev)
}
mtk_gdm_config(eth, target_mac->id, gdm_config);
}
- /* Reset and enable PSE */
- mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
- mtk_w32(eth, 0, MTK_RST_GL);
napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
@@ -3928,6 +4129,10 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
else
mtk_hw_reset(eth);
+ /* No MT7628/88 support yet */
+ if (reset && !MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ mtk_mdio_config(eth);
+
if (mtk_is_netsys_v3_or_greater(eth)) {
/* Set FE to PDMAv2 if necessary */
val = mtk_r32(eth, MTK_FE_GLO_MISC);
@@ -3988,11 +4193,27 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
if (mtk_is_netsys_v3_or_greater(eth)) {
- /* PSE should not drop port1, port8 and port9 packets */
- mtk_w32(eth, 0x00000302, PSE_DROP_CFG);
+ /* PSE dummy page mechanism */
+ mtk_w32(eth, PSE_DUMMY_WORK_GDM(1) | PSE_DUMMY_WORK_GDM(2) |
+ PSE_DUMMY_WORK_GDM(3) | DUMMY_PAGE_THR, PSE_DUMY_REQ);
+
+ /* PSE free buffer drop threshold */
+ mtk_w32(eth, 0x00600009, PSE_IQ_REV(8));
+
+ /* PSE should not drop port8, port9 and port13 packets from
+ * WDMA Tx
+ */
+ mtk_w32(eth, 0x00002300, PSE_DROP_CFG);
+
+ /* PSE should drop packets to port8, port9 and port13 on WDMA Rx
+ * ring full
+ */
+ mtk_w32(eth, 0x00002300, PSE_PPE_DROP(0));
+ mtk_w32(eth, 0x00002300, PSE_PPE_DROP(1));
+ mtk_w32(eth, 0x00002300, PSE_PPE_DROP(2));
/* GDM and CDM Threshold */
- mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
+ mtk_w32(eth, 0x08000707, MTK_CDMW0_THRES);
mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
/* Disable GDM1 RX CRC stripping */
@@ -4009,7 +4230,7 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
/* PSE should drop packets to port 8/9 on WDMA Rx ring full */
- mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
+ mtk_w32(eth, 0x00000300, PSE_PPE_DROP(0));
/* PSE Free Queue Flow Control */
mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
@@ -4474,6 +4695,20 @@ static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
return phylink_ethtool_set_pauseparam(mac->phylink, pause);
}
+static int mtk_get_eee(struct net_device *dev, struct ethtool_keee *eee)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ return phylink_ethtool_get_eee(mac->phylink, eee);
+}
+
+static int mtk_set_eee(struct net_device *dev, struct ethtool_keee *eee)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ return phylink_ethtool_set_eee(mac->phylink, eee);
+}
+
static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
@@ -4506,6 +4741,8 @@ static const struct ethtool_ops mtk_ethtool_ops = {
.set_pauseparam = mtk_set_pauseparam,
.get_rxnfc = mtk_get_rxnfc,
.set_rxnfc = mtk_set_rxnfc,
+ .get_eee = mtk_get_eee,
+ .set_eee = mtk_set_eee,
};
static const struct net_device_ops mtk_netdev_ops = {
@@ -4615,6 +4852,9 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->phylink_config.type = PHYLINK_NETDEV;
mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
+ mac->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD |
+ MAC_2500FD;
+ mac->phylink_config.lpi_timer_default = 1000;
/* MT7623 gmac0 is now missing its speed-specific PLL configuration
* in its .mac_config method (since state->speed is not valid there.
@@ -4653,7 +4893,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
}
if (mtk_is_netsys_v3_or_greater(mac->hw) &&
- MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW_BIT) &&
+ MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW) &&
id == MTK_GMAC1_ID) {
mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
MAC_SYM_PAUSE |
@@ -4673,6 +4913,11 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->phylink = phylink;
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_2P5GPHY) &&
+ id == MTK_GMAC2_ID)
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ mac->phylink_config.supported_interfaces);
+
SET_NETDEV_DEV(eth->netdev[id], eth->dev);
eth->netdev[id]->watchdog_timeo = 5 * HZ;
eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
@@ -4687,7 +4932,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
eth->netdev[id]->features |= eth->soc->hw_features;
eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
- eth->netdev[id]->irq = eth->irq[0];
+ eth->netdev[id]->irq = eth->irq[MTK_FE_IRQ_SHARED];
eth->netdev[id]->dev.of_node = np;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
@@ -4730,7 +4975,7 @@ void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
list_add_tail(&dev->close_list, &dev_list);
}
- dev_close_many(&dev_list, false);
+ netif_close_many(&dev_list, false);
eth->dma_dev = dma_dev;
@@ -4772,9 +5017,30 @@ static int mtk_sgmii_init(struct mtk_eth *eth)
return 0;
}
+static int mtk_setup_legacy_sram(struct mtk_eth *eth, struct resource *res)
+{
+ dev_warn(eth->dev, "legacy DT: using hard-coded SRAM offset.\n");
+
+ if (res->start + MTK_ETH_SRAM_OFFSET + MTK_ETH_NETSYS_V2_SRAM_SIZE - 1 >
+ res->end)
+ return -EINVAL;
+
+ eth->sram_pool = devm_gen_pool_create(eth->dev,
+ const_ilog2(MTK_ETH_SRAM_GRANULARITY),
+ NUMA_NO_NODE, dev_name(eth->dev));
+
+ if (IS_ERR(eth->sram_pool))
+ return PTR_ERR(eth->sram_pool);
+
+ return gen_pool_add_virt(eth->sram_pool,
+ (unsigned long)eth->base + MTK_ETH_SRAM_OFFSET,
+ res->start + MTK_ETH_SRAM_OFFSET,
+ MTK_ETH_NETSYS_V2_SRAM_SIZE, NUMA_NO_NODE);
+}
+
static int mtk_probe(struct platform_device *pdev)
{
- struct resource *res = NULL, *res_sram;
+ struct resource *res = NULL;
struct device_node *mac_np;
struct mtk_eth *eth;
int err, i;
@@ -4794,20 +5060,6 @@ static int mtk_probe(struct platform_device *pdev)
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
eth->ip_align = NET_IP_ALIGN;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
- /* SRAM is actual memory and supports transparent access just like DRAM.
- * Hence we don't require __iomem being set and don't need to use accessor
- * functions to read from or write to SRAM.
- */
- if (mtk_is_netsys_v3_or_greater(eth)) {
- eth->sram_base = (void __force *)devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(eth->sram_base))
- return PTR_ERR(eth->sram_base);
- } else {
- eth->sram_base = (void __force *)eth->base + MTK_ETH_SRAM_OFFSET;
- }
- }
-
if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
if (!err)
@@ -4882,16 +5134,21 @@ static int mtk_probe(struct platform_device *pdev)
err = -EINVAL;
goto err_destroy_sgmii;
}
+
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
- if (mtk_is_netsys_v3_or_greater(eth)) {
- res_sram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res_sram) {
+ eth->sram_pool = of_gen_pool_get(pdev->dev.of_node,
+ "sram", 0);
+ if (!eth->sram_pool) {
+ if (!mtk_is_netsys_v3_or_greater(eth)) {
+ err = mtk_setup_legacy_sram(eth, res);
+ if (err)
+ goto err_destroy_sgmii;
+ } else {
+ dev_err(&pdev->dev,
+ "Could not get SRAM pool\n");
err = -EINVAL;
goto err_destroy_sgmii;
}
- eth->phy_scratch_ring = res_sram->start;
- } else {
- eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
}
}
}
@@ -4917,17 +5174,10 @@ static int mtk_probe(struct platform_device *pdev)
}
}
- for (i = 0; i < 3; i++) {
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
- eth->irq[i] = eth->irq[0];
- else
- eth->irq[i] = platform_get_irq(pdev, i);
- if (eth->irq[i] < 0) {
- dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
- err = -ENXIO;
- goto err_wed_exit;
- }
- }
+ err = mtk_get_irqs(pdev, eth);
+ if (err)
+ goto err_wed_exit;
+
for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
eth->clks[i] = devm_clk_get(eth->dev,
mtk_clks_source_name[i]);
@@ -4971,17 +5221,17 @@ static int mtk_probe(struct platform_device *pdev)
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
- err = devm_request_irq(eth->dev, eth->irq[0],
+ err = devm_request_irq(eth->dev, eth->irq[MTK_FE_IRQ_SHARED],
mtk_handle_irq, 0,
dev_name(eth->dev), eth);
} else {
- err = devm_request_irq(eth->dev, eth->irq[1],
+ err = devm_request_irq(eth->dev, eth->irq[MTK_FE_IRQ_TX],
mtk_handle_irq_tx, 0,
dev_name(eth->dev), eth);
if (err)
goto err_free_dev;
- err = devm_request_irq(eth->dev, eth->irq[2],
+ err = devm_request_irq(eth->dev, eth->irq[MTK_FE_IRQ_RX],
mtk_handle_irq_rx, 0,
dev_name(eth->dev), eth);
}
@@ -5027,7 +5277,7 @@ static int mtk_probe(struct platform_device *pdev)
} else
netif_info(eth, probe, eth->netdev[i],
"mediatek frame engine at 0x%08lx, irq %d\n",
- eth->netdev[i]->base_addr, eth->irq[0]);
+ eth->netdev[i]->base_addr, eth->irq[MTK_FE_IRQ_SHARED]);
}
/* we run 2 devices on the same DMA ring so we need a dummy device
@@ -5368,3 +5618,4 @@ module_platform_driver(mtk_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
+MODULE_IMPORT_NS("NETDEV_INTERNAL");
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 0d5225f1d3ee..0168e2fbc619 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -141,8 +141,10 @@
#define MTK_GDMA_MAC_ADRH(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \
0x54C : 0x50C + (_x * 0x1000); })
-/* Internal SRAM offset */
-#define MTK_ETH_SRAM_OFFSET 0x40000
+/* legacy DT support for internal SRAM */
+#define MTK_ETH_SRAM_OFFSET 0x40000
+#define MTK_ETH_SRAM_GRANULARITY 32
+#define MTK_ETH_NETSYS_V2_SRAM_SIZE 0x40000
/* FE global misc reg*/
#define MTK_FE_GLO_MISC 0x124
@@ -151,7 +153,15 @@
#define PSE_FQFC_CFG1 0x100
#define PSE_FQFC_CFG2 0x104
#define PSE_DROP_CFG 0x108
-#define PSE_PPE0_DROP 0x110
+#define PSE_PPE_DROP(x) (0x110 + ((x) * 0x4))
+
+/* PSE Last FreeQ Page Request Control */
+#define PSE_DUMY_REQ 0x10C
+/* PSE_DUMY_REQ is not a typo but actually called like that also in
+ * MediaTek's datasheet
+ */
+#define PSE_DUMMY_WORK_GDM(x) BIT(16 + (x))
+#define DUMMY_PAGE_THR 0x1
/* PSE Input Queue Reservation Register*/
#define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2))
@@ -423,7 +433,8 @@
/* XMAC status registers */
#define MTK_XGMAC_STS(x) (((x) == MTK_GMAC3_ID) ? 0x1001C : 0x1000C)
-#define MTK_XGMAC_FORCE_LINK(x) (((x) == MTK_GMAC2_ID) ? BIT(31) : BIT(15))
+#define MTK_XGMAC_FORCE_MODE(x) (((x) == MTK_GMAC2_ID) ? BIT(31) : BIT(15))
+#define MTK_XGMAC_FORCE_LINK(x) (((x) == MTK_GMAC2_ID) ? BIT(27) : BIT(11))
#define MTK_USXGMII_PCS_LINK BIT(8)
#define MTK_XGMAC_RX_FC BIT(5)
#define MTK_XGMAC_TX_FC BIT(4)
@@ -453,6 +464,8 @@
#define MAC_MCR_RX_FIFO_CLR_DIS BIT(12)
#define MAC_MCR_BACKOFF_EN BIT(9)
#define MAC_MCR_BACKPR_EN BIT(8)
+#define MAC_MCR_EEE1G BIT(7)
+#define MAC_MCR_EEE100M BIT(6)
#define MAC_MCR_FORCE_RX_FC BIT(5)
#define MAC_MCR_FORCE_TX_FC BIT(4)
#define MAC_MCR_SPEED_1000 BIT(3)
@@ -461,6 +474,15 @@
#define MAC_MCR_FORCE_LINK BIT(0)
#define MAC_MCR_FORCE_LINK_DOWN (MAC_MCR_FORCE_MODE)
+/* Mac EEE control registers */
+#define MTK_MAC_EEECR(x) (0x10104 + (x * 0x100))
+#define MAC_EEE_WAKEUP_TIME_1000 GENMASK(31, 24)
+#define MAC_EEE_WAKEUP_TIME_100 GENMASK(23, 16)
+#define MAC_EEE_LPI_TXIDLE_THD GENMASK(15, 8)
+#define MAC_EEE_CKG_TXIDLE BIT(3)
+#define MAC_EEE_CKG_RXLPI BIT(2)
+#define MAC_EEE_LPI_MODE BIT(0)
+
/* Mac status registers */
#define MTK_MAC_MSR(x) (0x10108 + (x * 0x100))
#define MAC_MSR_EEE1G BIT(7)
@@ -505,6 +527,21 @@
#define INTF_MODE_RGMII_1000 (TRGMII_MODE | TRGMII_CENTRAL_ALIGNED)
#define INTF_MODE_RGMII_10_100 0
+/* XFI Mac control registers */
+#define MTK_XMAC_BASE(x) (0x12000 + (((x) - 1) * 0x1000))
+#define MTK_XMAC_MCR(x) (MTK_XMAC_BASE(x))
+#define XMAC_MCR_TRX_DISABLE 0xf
+#define XMAC_MCR_FORCE_TX_FC BIT(5)
+#define XMAC_MCR_FORCE_RX_FC BIT(4)
+
+/* XFI Mac logic reset registers */
+#define MTK_XMAC_LOGIC_RST(x) (MTK_XMAC_BASE(x) + 0x10)
+#define XMAC_LOGIC_RST BIT(0)
+
+/* XFI Mac count global control */
+#define MTK_XMAC_CNT_CTRL(x) (MTK_XMAC_BASE(x) + 0x100)
+#define XMAC_GLB_CNTCLR BIT(0)
+
/* GPIO port control registers for GMAC 2*/
#define GPIO_OD33_CTRL8 0x4c0
#define GPIO_BIAS_CTRL 0xed0
@@ -568,6 +605,10 @@
#define GEPHY_MAC_SEL BIT(1)
/* Top misc registers */
+#define TOP_MISC_NETSYS_PCS_MUX 0x0
+#define NETSYS_PCS_MUX_MASK GENMASK(1, 0)
+#define MUX_G2_USXGMII_SEL BIT(1)
+
#define USB_PHY_SWITCH_REG 0x218
#define QPHY_SEL_MASK GENMASK(1, 0)
#define SGMII_QPHY_SEL 0x2
@@ -603,6 +644,11 @@
#define MTK_MAC_FSM(x) (0x1010C + ((x) * 0x100))
+#define MTK_FE_IRQ_SHARED 0
+#define MTK_FE_IRQ_TX 0
+#define MTK_FE_IRQ_RX 1
+#define MTK_FE_IRQ_NUM (MTK_FE_IRQ_RX + 1)
+
struct mtk_rx_dma {
unsigned int rxd1;
unsigned int rxd2;
@@ -932,6 +978,7 @@ enum mkt_eth_capabilities {
MTK_RGMII_BIT = 0,
MTK_TRGMII_BIT,
MTK_SGMII_BIT,
+ MTK_2P5GPHY_BIT,
MTK_ESW_BIT,
MTK_GEPHY_BIT,
MTK_MUX_BIT,
@@ -952,6 +999,7 @@ enum mkt_eth_capabilities {
MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT,
MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT,
+ MTK_ETH_MUX_GMAC2_TO_2P5GPHY_BIT,
MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT,
MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT,
@@ -961,6 +1009,7 @@ enum mkt_eth_capabilities {
MTK_ETH_PATH_GMAC1_SGMII_BIT,
MTK_ETH_PATH_GMAC2_RGMII_BIT,
MTK_ETH_PATH_GMAC2_SGMII_BIT,
+ MTK_ETH_PATH_GMAC2_2P5GPHY_BIT,
MTK_ETH_PATH_GMAC2_GEPHY_BIT,
MTK_ETH_PATH_GDM1_ESW_BIT,
};
@@ -969,6 +1018,7 @@ enum mkt_eth_capabilities {
#define MTK_RGMII BIT_ULL(MTK_RGMII_BIT)
#define MTK_TRGMII BIT_ULL(MTK_TRGMII_BIT)
#define MTK_SGMII BIT_ULL(MTK_SGMII_BIT)
+#define MTK_2P5GPHY BIT_ULL(MTK_2P5GPHY_BIT)
#define MTK_ESW BIT_ULL(MTK_ESW_BIT)
#define MTK_GEPHY BIT_ULL(MTK_GEPHY_BIT)
#define MTK_MUX BIT_ULL(MTK_MUX_BIT)
@@ -991,6 +1041,8 @@ enum mkt_eth_capabilities {
BIT_ULL(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT)
#define MTK_ETH_MUX_U3_GMAC2_TO_QPHY \
BIT_ULL(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT)
+#define MTK_ETH_MUX_GMAC2_TO_2P5GPHY \
+ BIT_ULL(MTK_ETH_MUX_GMAC2_TO_2P5GPHY_BIT)
#define MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \
BIT_ULL(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT)
#define MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII \
@@ -1002,6 +1054,7 @@ enum mkt_eth_capabilities {
#define MTK_ETH_PATH_GMAC1_SGMII BIT_ULL(MTK_ETH_PATH_GMAC1_SGMII_BIT)
#define MTK_ETH_PATH_GMAC2_RGMII BIT_ULL(MTK_ETH_PATH_GMAC2_RGMII_BIT)
#define MTK_ETH_PATH_GMAC2_SGMII BIT_ULL(MTK_ETH_PATH_GMAC2_SGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_2P5GPHY BIT_ULL(MTK_ETH_PATH_GMAC2_2P5GPHY_BIT)
#define MTK_ETH_PATH_GMAC2_GEPHY BIT_ULL(MTK_ETH_PATH_GMAC2_GEPHY_BIT)
#define MTK_ETH_PATH_GDM1_ESW BIT_ULL(MTK_ETH_PATH_GDM1_ESW_BIT)
@@ -1011,6 +1064,7 @@ enum mkt_eth_capabilities {
#define MTK_GMAC2_RGMII (MTK_ETH_PATH_GMAC2_RGMII | MTK_RGMII)
#define MTK_GMAC2_SGMII (MTK_ETH_PATH_GMAC2_SGMII | MTK_SGMII)
#define MTK_GMAC2_GEPHY (MTK_ETH_PATH_GMAC2_GEPHY | MTK_GEPHY)
+#define MTK_GMAC2_2P5GPHY (MTK_ETH_PATH_GMAC2_2P5GPHY | MTK_2P5GPHY)
#define MTK_GDM1_ESW (MTK_ETH_PATH_GDM1_ESW | MTK_ESW)
/* MUXes present on SoCs */
@@ -1030,6 +1084,10 @@ enum mkt_eth_capabilities {
(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_MUX | \
MTK_SHARED_SGMII)
+/* 2: GMAC2 -> 2P5GPHY */
+#define MTK_MUX_GMAC2_TO_2P5GPHY \
+ (MTK_ETH_MUX_GMAC2_TO_2P5GPHY | MTK_MUX | MTK_INFRA)
+
/* 0: GMACx -> GEPHY, 1: GMACx -> SGMII where x is 1 or 2 */
#define MTK_MUX_GMAC12_TO_GEPHY_SGMII \
(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII | MTK_MUX)
@@ -1065,8 +1123,9 @@ enum mkt_eth_capabilities {
MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
MTK_RSTCTRL_PPE1 | MTK_SRAM)
-#define MT7988_CAPS (MTK_36BIT_DMA | MTK_GDM1_ESW | MTK_QDMA | \
- MTK_RSTCTRL_PPE1 | MTK_RSTCTRL_PPE2 | MTK_SRAM)
+#define MT7988_CAPS (MTK_36BIT_DMA | MTK_GDM1_ESW | MTK_GMAC2_2P5GPHY | \
+ MTK_MUX_GMAC2_TO_2P5GPHY | MTK_QDMA | MTK_RSTCTRL_PPE1 | \
+ MTK_RSTCTRL_PPE2 | MTK_SRAM)
struct mtk_tx_dma_desc_info {
dma_addr_t addr;
@@ -1126,7 +1185,7 @@ struct mtk_reg_map {
};
/* struct mtk_eth_data - This is the structure holding all differences
- * among various plaforms
+ * among various platforms
* @reg_map Soc register map.
* @ana_rgc3: The offset for register ANA_RGC3 related to
* sgmiisys syscon
@@ -1186,8 +1245,9 @@ struct mtk_soc_data {
/* struct mtk_eth - This is the main datasructure for holding the state
* of the driver
* @dev: The device pointer
- * @dev: The device pointer used for dma mapping/alloc
+ * @dma_dev: The device pointer used for dma mapping/alloc
* @base: The mapped register i/o base
+ * @sram_pool: Pointer to SRAM pool used for DMA descriptor rings
* @page_lock: Make sure that register operations are atomic
* @tx_irq__lock: Make sure that IRQ register operations are atomic
* @rx_irq__lock: Make sure that IRQ register operations are atomic
@@ -1226,21 +1286,21 @@ struct mtk_soc_data {
* @mii_bus: If there is a bus we need to create an instance for it
* @pending_work: The workqueue used to reset the dma ring
* @state: Initialization and runtime state of the device
- * @soc: Holding specific data among vaious SoCs
+ * @soc: Holding specific data among various SoCs
*/
struct mtk_eth {
struct device *dev;
struct device *dma_dev;
void __iomem *base;
- void *sram_base;
+ struct gen_pool *sram_pool;
spinlock_t page_lock;
spinlock_t tx_irq_lock;
spinlock_t rx_irq_lock;
struct net_device *dummy_dev;
struct net_device *netdev[MTK_MAX_DEVS];
struct mtk_mac *mac[MTK_MAX_DEVS];
- int irq[3];
+ int irq[MTK_FE_IRQ_NUM];
u32 msg_enable;
unsigned long sysclk;
struct regmap *ethsys;
@@ -1260,6 +1320,7 @@ struct mtk_eth {
struct clk *clks[MTK_CLK_MAX];
struct mii_bus *mii_bus;
+ unsigned int mdc_divider;
struct work_struct pending_work;
unsigned long state;
@@ -1417,6 +1478,23 @@ static inline u32 mtk_get_ib2_multicast_mask(struct mtk_eth *eth)
return MTK_FOE_IB2_MULTICAST;
}
+static inline bool mtk_interface_mode_is_xgmii(struct mtk_eth *eth,
+ phy_interface_t interface)
+{
+ if (!mtk_is_netsys_v3_or_greater(eth))
+ return false;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_INTERNAL:
+ case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_5GBASER:
+ return true;
+ default:
+ return false;
+ }
+}
+
/* read the hardware status register */
void mtk_stats_update_mac(struct mtk_mac *mac);
@@ -1425,6 +1503,7 @@ u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg);
int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id);
+int mtk_gmac_2p5gphy_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index f20bb390df3a..e9bd32741983 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -34,8 +34,10 @@ struct mtk_flow_data {
u16 vlan_in;
struct {
- u16 id;
- __be16 proto;
+ struct {
+ u16 id;
+ __be16 proto;
+ } vlans[2];
u8 num;
} vlan;
struct {
@@ -99,7 +101,9 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i
if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
return -1;
+ rcu_read_lock();
err = dev_fill_forward_path(dev, addr, &stack);
+ rcu_read_unlock();
if (err)
return err;
@@ -349,18 +353,19 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
case FLOW_ACTION_CSUM:
break;
case FLOW_ACTION_VLAN_PUSH:
- if (data.vlan.num == 1 ||
+ if (data.vlan.num + data.pppoe.num == 2 ||
act->vlan.proto != htons(ETH_P_8021Q))
return -EOPNOTSUPP;
- data.vlan.id = act->vlan.vid;
- data.vlan.proto = act->vlan.proto;
+ data.vlan.vlans[data.vlan.num].id = act->vlan.vid;
+ data.vlan.vlans[data.vlan.num].proto = act->vlan.proto;
data.vlan.num++;
break;
case FLOW_ACTION_VLAN_POP:
break;
case FLOW_ACTION_PPPOE_PUSH:
- if (data.pppoe.num == 1)
+ if (data.pppoe.num == 1 ||
+ data.vlan.num == 2)
return -EOPNOTSUPP;
data.pppoe.sid = act->pppoe.sid;
@@ -450,12 +455,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
foe.bridge.vlan = data.vlan_in;
- if (data.vlan.num == 1) {
- if (data.vlan.proto != htons(ETH_P_8021Q))
- return -EOPNOTSUPP;
+ for (i = 0; i < data.vlan.num; i++)
+ mtk_foe_entry_set_vlan(eth, &foe, data.vlan.vlans[i].id);
- mtk_foe_entry_set_vlan(eth, &foe, data.vlan.id);
- }
if (data.pppoe.num == 1)
mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid);
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 25989c79c92e..b83886a41121 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -1163,6 +1163,7 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
struct net_device *ndev = priv->ndev;
unsigned int head = ring->head;
unsigned int entry = ring->tail;
+ unsigned long flags;
while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) {
ret = mtk_star_tx_complete_one(priv);
@@ -1182,9 +1183,9 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
netif_wake_queue(ndev);
if (napi_complete(napi)) {
- spin_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, flags);
mtk_star_enable_dma_irq(priv, false, true);
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
return 0;
@@ -1341,16 +1342,16 @@ push_new_skb:
static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
{
struct mtk_star_priv *priv;
+ unsigned long flags;
int work_done = 0;
priv = container_of(napi, struct mtk_star_priv, rx_napi);
work_done = mtk_star_rx(priv, budget);
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
- spin_lock(&priv->lock);
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ spin_lock_irqsave(&priv->lock, flags);
mtk_star_enable_dma_irq(priv, true, false);
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
return work_done;
@@ -1427,15 +1428,10 @@ static int mtk_star_mdio_init(struct net_device *ndev)
of_node = dev->of_node;
- mdio_node = of_get_child_by_name(of_node, "mdio");
+ mdio_node = of_get_available_child_by_name(of_node, "mdio");
if (!mdio_node)
return -ENODEV;
- if (!of_device_is_available(mdio_node)) {
- ret = -ENODEV;
- goto out_put_node;
- }
-
priv->mii = devm_mdiobus_alloc(dev);
if (!priv->mii) {
ret = -ENOMEM;
@@ -1467,6 +1463,8 @@ static __maybe_unused int mtk_star_suspend(struct device *dev)
if (netif_running(ndev))
mtk_star_disable(ndev);
+ netif_device_detach(ndev);
+
clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
return 0;
@@ -1491,6 +1489,8 @@ static __maybe_unused int mtk_star_resume(struct device *dev)
clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
}
+ netif_device_attach(ndev);
+
return ret;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index e212a4ba9275..1ed1f88dd7f8 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -59,7 +59,9 @@ struct mtk_wed_flow_block_priv {
static const struct mtk_wed_soc_data mt7622_data = {
.regmap = {
.tx_bm_tkid = 0x088,
- .wpdma_rx_ring0 = 0x770,
+ .wpdma_rx_ring = {
+ 0x770,
+ },
.reset_idx_tx_mask = GENMASK(3, 0),
.reset_idx_rx_mask = GENMASK(17, 16),
},
@@ -70,7 +72,9 @@ static const struct mtk_wed_soc_data mt7622_data = {
static const struct mtk_wed_soc_data mt7986_data = {
.regmap = {
.tx_bm_tkid = 0x0c8,
- .wpdma_rx_ring0 = 0x770,
+ .wpdma_rx_ring = {
+ 0x770,
+ },
.reset_idx_tx_mask = GENMASK(1, 0),
.reset_idx_rx_mask = GENMASK(7, 6),
},
@@ -81,7 +85,10 @@ static const struct mtk_wed_soc_data mt7986_data = {
static const struct mtk_wed_soc_data mt7988_data = {
.regmap = {
.tx_bm_tkid = 0x0c8,
- .wpdma_rx_ring0 = 0x7d0,
+ .wpdma_rx_ring = {
+ 0x7d0,
+ 0x7d8,
+ },
.reset_idx_tx_mask = GENMASK(1, 0),
.reset_idx_rx_mask = GENMASK(7, 6),
},
@@ -621,8 +628,8 @@ mtk_wed_amsdu_init(struct mtk_wed_device *dev)
return ret;
}
- /* eagle E1 PCIE1 tx ring 22 flow control issue */
- if (dev->wlan.id == 0x7991)
+ /* Kite and Eagle E1 PCIE1 tx ring 22 flow control issue */
+ if (dev->wlan.id == 0x7991 || dev->wlan.id == 0x7992)
wed_clr(dev, MTK_WED_AMSDU_FIFO, MTK_WED_AMSDU_IS_PRIOR0_RING);
wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN);
@@ -670,7 +677,7 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
void *buf;
int s;
- page = __dev_alloc_page(GFP_KERNEL);
+ page = __dev_alloc_page(GFP_KERNEL | GFP_DMA32);
if (!page)
return -ENOMEM;
@@ -793,7 +800,7 @@ mtk_wed_hwrro_buffer_alloc(struct mtk_wed_device *dev)
struct page *page;
int s;
- page = __dev_alloc_page(GFP_KERNEL);
+ page = __dev_alloc_page(GFP_KERNEL | GFP_DMA32);
if (!page)
return -ENOMEM;
@@ -1239,7 +1246,11 @@ mtk_wed_set_wpdma(struct mtk_wed_device *dev)
return;
wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
- wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring0, dev->wlan.wpdma_rx);
+ wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring[0],
+ dev->wlan.wpdma_rx[0]);
+ if (mtk_wed_is_v3_or_greater(dev->hw))
+ wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring[1],
+ dev->wlan.wpdma_rx[1]);
if (!dev->wlan.hw_rro)
return;
@@ -1318,26 +1329,14 @@ mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
static int
mtk_wed_rro_alloc(struct mtk_wed_device *dev)
{
- struct reserved_mem *rmem;
- struct device_node *np;
- int index;
-
- index = of_property_match_string(dev->hw->node, "memory-region-names",
- "wo-dlm");
- if (index < 0)
- return index;
+ struct resource res;
+ int ret;
- np = of_parse_phandle(dev->hw->node, "memory-region", index);
- if (!np)
- return -ENODEV;
-
- rmem = of_reserved_mem_lookup(np);
- of_node_put(np);
-
- if (!rmem)
- return -ENODEV;
+ ret = of_reserved_mem_region_to_resource_byname(dev->hw->node, "wo-dlm", &res);
+ if (ret)
+ return ret;
- dev->rro.miod_phys = rmem->base;
+ dev->rro.miod_phys = res.start;
dev->rro.fdbk_phys = MTK_WED_MIOD_COUNT + dev->rro.miod_phys;
return mtk_wed_rro_ring_alloc(dev, &dev->rro.ring,
@@ -2000,7 +1999,7 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
if (mtk_wed_is_v3_or_greater(dev->hw))
wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_TKID_ALI_EN);
- /* initail tx interrupt trigger */
+ /* initial tx interrupt trigger */
wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR |
@@ -2011,7 +2010,7 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG,
dev->wlan.tx_tbit[1]));
- /* initail txfree interrupt trigger */
+ /* initial txfree interrupt trigger */
wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE,
MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN |
MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR |
@@ -2335,6 +2334,16 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
if (!dev->rx_wdma[i].desc)
mtk_wed_wdma_rx_ring_setup(dev, i, 16, false);
+ if (dev->wlan.hw_rro) {
+ for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) {
+ u32 addr = MTK_WED_RRO_MSDU_PG_CTRL0(i) +
+ MTK_WED_RING_OFS_COUNT;
+
+ if (!wed_r32(dev, addr))
+ wed_w32(dev, addr, 1);
+ }
+ }
+
mtk_wed_hw_init(dev);
mtk_wed_configure_irq(dev, irq_mask);
@@ -2417,6 +2426,10 @@ mtk_wed_attach(struct mtk_wed_device *dev)
dev->version = hw->version;
dev->hw->pcie_base = mtk_wed_get_pcie_base(dev);
+ ret = dma_set_mask_and_coherent(hw->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto out;
+
if (hw->eth->dma_dev == hw->eth->dev &&
of_dma_is_coherent(hw->eth->dev->of_node))
mtk_eth_set_dma_device(hw->eth, hw->dev);
@@ -2794,7 +2807,6 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
if (!pdev)
goto err_of_node_put;
- get_device(&pdev->dev);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
goto err_put_device;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
index c1f0479d7a71..b49aee9a8b65 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
@@ -17,7 +17,7 @@ struct mtk_wed_wo;
struct mtk_wed_soc_data {
struct {
u32 tx_bm_tkid;
- u32 wpdma_rx_ring0;
+ u32 wpdma_rx_ring[MTK_WED_RX_QUEUES];
u32 reset_idx_tx_mask;
u32 reset_idx_rx_mask;
} regmap;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
index c06e5ad18b01..fa6b21603416 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
@@ -234,27 +234,23 @@ int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data,
}
static int
-mtk_wed_get_memory_region(struct mtk_wed_hw *hw, int index,
+mtk_wed_get_memory_region(struct mtk_wed_hw *hw, const char *name,
struct mtk_wed_wo_memory_region *region)
{
- struct reserved_mem *rmem;
- struct device_node *np;
-
- np = of_parse_phandle(hw->node, "memory-region", index);
- if (!np)
- return -ENODEV;
-
- rmem = of_reserved_mem_lookup(np);
- of_node_put(np);
+ struct resource res;
+ int ret;
- if (!rmem)
- return -ENODEV;
+ ret = of_reserved_mem_region_to_resource_byname(hw->node, name, &res);
+ if (ret)
+ return 0;
- region->phy_addr = rmem->base;
- region->size = rmem->size;
- region->addr = devm_ioremap(hw->dev, region->phy_addr, region->size);
+ region->phy_addr = res.start;
+ region->size = resource_size(&res);
+ region->addr = devm_ioremap_resource(hw->dev, &res);
+ if (IS_ERR(region->addr))
+ return PTR_ERR(region->addr);
- return !region->addr ? -EINVAL : 0;
+ return 0;
}
static int
@@ -319,13 +315,7 @@ mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo)
/* load firmware region metadata */
for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
- int index = of_property_match_string(wo->hw->node,
- "memory-region-names",
- mem_region[i].name);
- if (index < 0)
- continue;
-
- ret = mtk_wed_get_memory_region(wo->hw, index, &mem_region[i]);
+ ret = mtk_wed_get_memory_region(wo->hw, mem_region[i].name, &mem_region[i]);
if (ret)
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 825e05fb8607..0b1cb340206f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -7,6 +7,7 @@ config MLX4_EN
tristate "Mellanox Technologies 1/10/40Gbit Ethernet support"
depends on PCI && NETDEVICES && ETHERNET && INET
depends on PTP_1588_CLOCK_OPTIONAL
+ select PAGE_POOL
select MLX4_CORE
help
This driver supports Mellanox Technologies ConnectX Ethernet
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index b330020dc0d6..07b061a97a6e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -526,28 +526,6 @@ out:
return res;
}
-u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones, u32 uid, u32 obj, u32 count)
-{
- struct mlx4_zone_entry *zone;
- int res = 0;
-
- spin_lock(&zones->lock);
-
- zone = __mlx4_find_zone_by_uid(zones, uid);
-
- if (NULL == zone) {
- res = -1;
- goto out;
- }
-
- __mlx4_free_from_zone(zone, obj, count);
-
-out:
- spin_unlock(&zones->lock);
-
- return res;
-}
-
u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count)
{
struct mlx4_zone_entry *zone;
@@ -682,9 +660,9 @@ static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
}
static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
- struct mlx4_db *db, int order)
+ struct mlx4_db *db, unsigned int order)
{
- int o;
+ unsigned int o;
int i;
for (o = order; o <= 1; ++o) {
@@ -712,7 +690,7 @@ found:
return 0;
}
-int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, unsigned int order)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_db_pgdir *pgdir;
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 0d8a362c2673..edcc6f662618 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -236,7 +236,7 @@ static void dump_err_buf(struct mlx4_dev *dev)
static void poll_catas(struct timer_list *t)
{
- struct mlx4_priv *priv = from_timer(priv, t, catas_err.timer);
+ struct mlx4_priv *priv = timer_container_of(priv, t, catas_err.timer);
struct mlx4_dev *dev = &priv->dev;
u32 slave_read;
@@ -305,7 +305,7 @@ void mlx4_stop_catas_poll(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- del_timer_sync(&priv->catas_err.timer);
+ timer_delete_sync(&priv->catas_err.timer);
if (priv->catas_err.map) {
iounmap(priv->catas_err.map);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index cd754cd76bde..2aeaafcfb993 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -38,7 +38,7 @@
/* mlx4_en_read_clock - read raw cycle counter (to be used by time counter)
*/
-static u64 mlx4_en_read_clock(const struct cyclecounter *tc)
+static u64 mlx4_en_read_clock(struct cyclecounter *tc)
{
struct mlx4_en_dev *mdev =
container_of(tc, struct mlx4_en_dev, cycles);
@@ -249,7 +249,7 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
static u32 freq_to_shift(u16 freq)
{
u32 freq_khz = freq * 1000;
- u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
+ u64 max_val_cycles = freq_khz * 1000ULL * MLX4_EN_WRAP_AROUND_SEC;
u64 max_val_cycles_rounded = 1ULL << fls64(max_val_cycles - 1);
/* calculate max possible multiplier in order to fit in 64bit */
u64 max_mul = div64_u64(ULLONG_MAX, max_val_cycles_rounded);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index 752a72499b4f..be80da03a594 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -290,9 +290,6 @@ static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
struct mlx4_en_priv *priv = netdev_priv(dev);
struct ieee_ets *my_ets = &priv->ets;
- if (!my_ets)
- return -EINVAL;
-
ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
ets->cbs = my_ets->cbs;
memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index cd17a3f4faf8..ad6298456639 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1727,6 +1727,13 @@ static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv)
}
+static u32 mlx4_en_get_rx_ring_count(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ return priv->rx_ring_num;
+}
+
static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -1743,9 +1750,6 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return -EINVAL;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = priv->rx_ring_num;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = mlx4_en_get_num_flows(priv);
break;
@@ -1897,6 +1901,7 @@ static int mlx4_en_get_ts_info(struct net_device *dev,
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
info->so_timestamping |=
SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -2153,6 +2158,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.set_ringparam = mlx4_en_set_ringparam,
.get_rxnfc = mlx4_en_get_rxnfc,
.set_rxnfc = mlx4_en_set_rxnfc,
+ .get_rx_ring_count = mlx4_en_get_rx_ring_count,
.get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
.get_rxfh_key_size = mlx4_en_get_rxfh_key_size,
.get_rxfh = mlx4_en_get_rxfh,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 281b34af0bb4..81bf8908b897 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1180,9 +1180,9 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
mlx4_unregister_mac(mdev->dev, priv->port, mac);
hlist_del_rcu(&entry->hlist);
- kfree_rcu(entry, rcu);
en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
entry->mac, priv->port);
+ kfree_rcu(entry, rcu);
++removed;
}
}
@@ -2420,21 +2420,22 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+static int mlx4_en_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- struct hwtstamp_config config;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
/* device doesn't support time stamping */
- if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
+ if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "device doesn't support time stamping");
return -EINVAL;
+ }
/* TX HW timestamp */
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
break;
@@ -2443,7 +2444,7 @@ static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
}
/* RX HW timestamp */
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
case HWTSTAMP_FILTER_ALL:
@@ -2461,39 +2462,27 @@ static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
if (mlx4_en_reset_config(dev, config, dev->features)) {
- config.tx_type = HWTSTAMP_TX_OFF;
- config.rx_filter = HWTSTAMP_FILTER_NONE;
+ config->tx_type = HWTSTAMP_TX_OFF;
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
}
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
+ return 0;
}
-static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+static int mlx4_en_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
- sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
-}
-
-static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return mlx4_en_hwtstamp_set(dev, ifr);
- case SIOCGHWTSTAMP:
- return mlx4_en_hwtstamp_get(dev, ifr);
- default:
- return -EOPNOTSUPP;
- }
+ *config = priv->hwtstamp_config;
+ return 0;
}
static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
@@ -2560,7 +2549,7 @@ static int mlx4_en_set_features(struct net_device *netdev,
}
if (reset) {
- ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
+ ret = mlx4_en_reset_config(netdev, &priv->hwtstamp_config,
features);
if (ret)
return ret;
@@ -2670,8 +2659,7 @@ static int mlx4_udp_tunnel_sync(struct net_device *dev, unsigned int table)
static const struct udp_tunnel_nic_info mlx4_udp_tunnels = {
.sync_table = mlx4_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
},
@@ -2845,7 +2833,6 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = mlx4_en_change_mtu,
- .ndo_eth_ioctl = mlx4_en_ioctl,
.ndo_tx_timeout = mlx4_en_tx_timeout,
.ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
@@ -2859,6 +2846,8 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_features_check = mlx4_en_features_check,
.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
.ndo_bpf = mlx4_xdp,
+ .ndo_hwtstamp_get = mlx4_en_hwtstamp_get,
+ .ndo_hwtstamp_set = mlx4_en_hwtstamp_set,
};
static const struct net_device_ops mlx4_netdev_ops_master = {
@@ -3513,7 +3502,7 @@ out:
}
int mlx4_en_reset_config(struct net_device *dev,
- struct hwtstamp_config ts_config,
+ struct kernel_hwtstamp_config *ts_config,
netdev_features_t features)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -3523,8 +3512,8 @@ int mlx4_en_reset_config(struct net_device *dev,
int port_up = 0;
int err = 0;
- if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
- priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
+ if (priv->hwtstamp_config.tx_type == ts_config->tx_type &&
+ priv->hwtstamp_config.rx_filter == ts_config->rx_filter &&
!DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
!DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS))
return 0; /* Nothing to change */
@@ -3543,7 +3532,7 @@ int mlx4_en_reset_config(struct net_device *dev,
mutex_lock(&mdev->state_lock);
memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
- memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
+ memcpy(&new_prof.hwtstamp_config, ts_config, sizeof(*ts_config));
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
if (err)
@@ -3561,7 +3550,7 @@ int mlx4_en_reset_config(struct net_device *dev,
dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
else
dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
- } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) {
+ } else if (ts_config->rx_filter == HWTSTAMP_FILTER_NONE) {
/* RX time-stamping is OFF, update the RX vlan offload
* to the latest wanted state
*/
@@ -3582,7 +3571,7 @@ int mlx4_en_reset_config(struct net_device *dev,
* Regardless of the caller's choice,
* Turn Off RX vlan offload in case of time-stamping is ON
*/
- if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) {
+ if (ts_config->rx_filter != HWTSTAMP_FILTER_NONE) {
if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n");
dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 15c57e9517e9..13666d50b90f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -48,60 +48,43 @@
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ip6_checksum.h>
#endif
+#include <net/page_pool/helpers.h>
#include "mlx4_en.h"
-static int mlx4_alloc_page(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_alloc *frag,
- gfp_t gfp)
-{
- struct page *page;
- dma_addr_t dma;
-
- page = alloc_page(gfp);
- if (unlikely(!page))
- return -ENOMEM;
- dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE, priv->dma_dir);
- if (unlikely(dma_mapping_error(priv->ddev, dma))) {
- __free_page(page);
- return -ENOMEM;
- }
- frag->page = page;
- frag->dma = dma;
- frag->page_offset = priv->rx_headroom;
- return 0;
-}
-
static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring,
struct mlx4_en_rx_desc *rx_desc,
struct mlx4_en_rx_alloc *frags,
gfp_t gfp)
{
+ dma_addr_t dma;
int i;
for (i = 0; i < priv->num_frags; i++, frags++) {
if (!frags->page) {
- if (mlx4_alloc_page(priv, frags, gfp)) {
+ frags->page = page_pool_alloc_pages(ring->pp, gfp);
+ if (!frags->page) {
ring->alloc_fail++;
return -ENOMEM;
}
+ page_pool_fragment_page(frags->page, 1);
+ frags->page_offset = priv->rx_headroom;
+
ring->rx_alloc_pages++;
}
- rx_desc->data[i].addr = cpu_to_be64(frags->dma +
- frags->page_offset);
+ dma = page_pool_get_dma_addr(frags->page);
+ rx_desc->data[i].addr = cpu_to_be64(dma + frags->page_offset);
}
return 0;
}
static void mlx4_en_free_frag(const struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring,
struct mlx4_en_rx_alloc *frag)
{
- if (frag->page) {
- dma_unmap_page(priv->ddev, frag->dma,
- PAGE_SIZE, priv->dma_dir);
- __free_page(frag->page);
- }
+ if (frag->page)
+ page_pool_put_full_page(ring->pp, frag->page, false);
/* We need to clear all fields, otherwise a change of priv->log_rx_info
* could lead to see garbage later in frag->page.
*/
@@ -141,18 +124,6 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
(index << ring->log_stride);
struct mlx4_en_rx_alloc *frags = ring->rx_info +
(index << priv->log_rx_info);
- if (likely(ring->page_cache.index > 0)) {
- /* XDP uses a single page per frame */
- if (!frags->page) {
- ring->page_cache.index--;
- frags->page = ring->page_cache.buf[ring->page_cache.index].page;
- frags->dma = ring->page_cache.buf[ring->page_cache.index].dma;
- }
- frags->page_offset = XDP_PACKET_HEADROOM;
- rx_desc->data[0].addr = cpu_to_be64(frags->dma +
- XDP_PACKET_HEADROOM);
- return 0;
- }
return mlx4_en_alloc_frags(priv, ring, rx_desc, frags, gfp);
}
@@ -178,7 +149,7 @@ static void mlx4_en_free_rx_desc(const struct mlx4_en_priv *priv,
frags = ring->rx_info + (index << priv->log_rx_info);
for (nr = 0; nr < priv->num_frags; nr++) {
en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
- mlx4_en_free_frag(priv, frags + nr);
+ mlx4_en_free_frag(priv, ring, frags + nr);
}
}
@@ -268,6 +239,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
u32 size, u16 stride, int node, int queue_index)
{
struct mlx4_en_dev *mdev = priv->mdev;
+ struct page_pool_params pp = {};
struct mlx4_en_rx_ring *ring;
int err = -ENOMEM;
int tmp;
@@ -286,8 +258,27 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
ring->log_stride = ffs(ring->stride) - 1;
ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
- if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0)
+ pp.flags = PP_FLAG_DMA_MAP;
+ pp.pool_size = size * DIV_ROUND_UP(priv->rx_skb_size, PAGE_SIZE);
+ pp.nid = node;
+ pp.napi = &priv->rx_cq[queue_index]->napi;
+ pp.netdev = priv->dev;
+ pp.dev = &mdev->dev->persist->pdev->dev;
+ pp.dma_dir = priv->dma_dir;
+
+ ring->pp = page_pool_create(&pp);
+ if (IS_ERR(ring->pp)) {
+ err = PTR_ERR(ring->pp);
goto err_ring;
+ }
+
+ if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0)
+ goto err_pp;
+
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ ring->pp);
+ if (err)
+ goto err_xdp_info;
tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
sizeof(struct mlx4_en_rx_alloc));
@@ -319,6 +310,8 @@ err_info:
ring->rx_info = NULL;
err_xdp_info:
xdp_rxq_info_unreg(&ring->xdp_rxq);
+err_pp:
+ page_pool_destroy(ring->pp);
err_ring:
kfree(ring);
*pring = NULL;
@@ -409,26 +402,6 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
}
}
-/* When the rx ring is running in page-per-packet mode, a released frame can go
- * directly into a small cache, to avoid unmapping or touching the page
- * allocator. In bpf prog performance scenarios, buffers are either forwarded
- * or dropped, never converted to skbs, so every page can come directly from
- * this cache when it is sized to be a multiple of the napi budget.
- */
-bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
- struct mlx4_en_rx_alloc *frame)
-{
- struct mlx4_en_page_cache *cache = &ring->page_cache;
-
- if (cache->index >= MLX4_EN_CACHE_SIZE)
- return false;
-
- cache->buf[cache->index].page = frame->page;
- cache->buf[cache->index].dma = frame->dma;
- cache->index++;
- return true;
-}
-
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
u32 size, u16 stride)
@@ -445,6 +418,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
xdp_rxq_info_unreg(&ring->xdp_rxq);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
kvfree(ring->rx_info);
+ page_pool_destroy(ring->pp);
ring->rx_info = NULL;
kfree(ring);
*pring = NULL;
@@ -453,14 +427,6 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
- int i;
-
- for (i = 0; i < ring->page_cache.index; i++) {
- dma_unmap_page(priv->ddev, ring->page_cache.buf[i].dma,
- PAGE_SIZE, priv->dma_dir);
- put_page(ring->page_cache.buf[i].page);
- }
- ring->page_cache.index = 0;
mlx4_en_free_rx_buf(priv, ring);
if (ring->stride <= TXBB_SIZE)
ring->buf -= TXBB_SIZE;
@@ -487,7 +453,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
if (unlikely(!page))
goto fail;
- dma = frags->dma;
+ dma = page_pool_get_dma_addr(page);
dma_sync_single_range_for_cpu(priv->ddev, dma, frags->page_offset,
frag_size, priv->dma_dir);
@@ -496,8 +462,11 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
truesize += frag_info->frag_stride;
if (frag_info->frag_stride == PAGE_SIZE / 2) {
+ struct netmem_desc *desc = pp_page_to_nmdesc(page);
+
frags->page_offset ^= PAGE_SIZE / 2;
release = page_count(page) != 1 ||
+ atomic_long_read(&desc->pp_ref_count) != 1 ||
page_is_pfmemalloc(page) ||
page_to_nid(page) != numa_mem_id();
} else if (!priv->rx_headroom) {
@@ -511,10 +480,9 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
release = frags->page_offset + frag_info->frag_size > PAGE_SIZE;
}
if (release) {
- dma_unmap_page(priv->ddev, dma, PAGE_SIZE, priv->dma_dir);
frags->page = NULL;
} else {
- page_ref_inc(page);
+ page_pool_ref_page(page);
}
nr++;
@@ -784,7 +752,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Get pointer to first fragment since we haven't
* skb yet and cast it to ethhdr struct
*/
- dma = frags[0].dma + frags[0].page_offset;
+ dma = page_pool_get_dma_addr(frags[0].page);
+ dma += frags[0].page_offset;
dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
DMA_FROM_DEVICE);
@@ -823,7 +792,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
void *orig_data;
u32 act;
- dma = frags[0].dma + frags[0].page_offset;
+ dma = page_pool_get_dma_addr(frags[0].page);
+ dma += frags[0].page_offset;
dma_sync_single_for_cpu(priv->ddev, dma,
priv->frag_info[0].frag_size,
DMA_FROM_DEVICE);
@@ -886,6 +856,7 @@ xdp_drop_no_cnt:
skb = napi_get_frags(&cq->napi);
if (unlikely(!skb))
goto next;
+ skb_mark_for_recycle(skb);
if (unlikely(ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL)) {
u64 timestamp = mlx4_en_get_cqe_ts(cqe);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 1ddb11cb25f9..87f35bcbeff8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -44,6 +44,7 @@
#include <linux/ipv6.h>
#include <linux/indirect_call_wrapper.h>
#include <net/ipv6.h>
+#include <net/page_pool/helpers.h>
#include "mlx4_en.h"
@@ -350,16 +351,10 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
int napi_mode)
{
struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
- struct mlx4_en_rx_alloc frame = {
- .page = tx_info->page,
- .dma = tx_info->map0_dma,
- };
-
- if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
- dma_unmap_page(priv->ddev, tx_info->map0_dma,
- PAGE_SIZE, priv->dma_dir);
- put_page(tx_info->page);
- }
+ struct page_pool *pool = ring->recycle_ring->pp;
+
+ /* Note that napi_mode = 0 means ndo_close() path, not budget = 0 */
+ page_pool_put_full_page(pool, tx_info->page, !!napi_mode);
return tx_info->nr_txbb;
}
@@ -450,6 +445,8 @@ int mlx4_en_process_tx_cq(struct net_device *dev,
if (unlikely(!priv->port_up))
return 0;
+ if (unlikely(!napi_budget) && cq->type == TX_XDP)
+ return 0;
netdev_txq_bql_complete_prefetchw(ring->tx_queue);
@@ -1194,7 +1191,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
data = &tx_desc->data;
- dma = frame->dma;
+ dma = page_pool_get_dma_addr(frame->page);
tx_info->page = frame->page;
frame->page = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index febeadfdd5a5..4293f8e33f44 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -49,6 +49,8 @@
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
+#include <rdma/ib_verbs.h>
+
#include "mlx4.h"
#include "fw.h"
#include "icm.h"
@@ -172,7 +174,8 @@ MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is defaul
static atomic_t pf_loading = ATOMIC_INIT(0);
static int mlx4_devlink_ierr_reset_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
ctx->val.vbool = !!mlx4_internal_err_reset;
return 0;
@@ -187,7 +190,8 @@ static int mlx4_devlink_ierr_reset_set(struct devlink *devlink, u32 id,
}
static int mlx4_devlink_crdump_snapshot_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx4_priv *priv = devlink_priv(devlink);
struct mlx4_dev *dev = &priv->dev;
@@ -1246,14 +1250,6 @@ err_out:
return err ? err : count;
}
-enum ibta_mtu {
- IB_MTU_256 = 1,
- IB_MTU_512 = 2,
- IB_MTU_1024 = 3,
- IB_MTU_2048 = 4,
- IB_MTU_4096 = 5
-};
-
static inline int int_to_ibta_mtu(int mtu)
{
switch (mtu) {
@@ -1266,7 +1262,7 @@ static inline int int_to_ibta_mtu(int mtu)
}
}
-static inline int ibta_mtu_to_int(enum ibta_mtu mtu)
+static inline int ibta_mtu_to_int(enum ib_mtu mtu)
{
switch (mtu) {
case IB_MTU_256: return 256;
@@ -4372,7 +4368,6 @@ static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index d7d856d1758a..b213094ea30f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1478,12 +1478,6 @@ void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc);
u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count,
int align, u32 skip_mask, u32 *puid);
-/* Free <count> objects, start from <obj> of the uid <uid> from zone_allocator
- * <zones>.
- */
-u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones,
- u32 uid, u32 obj, u32 count);
-
/* If <zones> was allocated with MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP, instead of
* specifying the uid when freeing an object, zone allocator could figure it by
* itself. Other parameters are similar to mlx4_zone_free.
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 28b70dcc652e..aab97694f86b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -247,20 +247,11 @@ struct mlx4_en_tx_desc {
struct mlx4_en_rx_alloc {
struct page *page;
- dma_addr_t dma;
u32 page_offset;
};
#define MLX4_EN_CACHE_SIZE (2 * NAPI_POLL_WEIGHT)
-struct mlx4_en_page_cache {
- u32 index;
- struct {
- struct page *page;
- dma_addr_t dma;
- } buf[MLX4_EN_CACHE_SIZE];
-};
-
enum {
MLX4_EN_TX_RING_STATE_RECOVERING,
};
@@ -335,14 +326,14 @@ struct mlx4_en_rx_ring {
u16 stride;
u16 log_stride;
u16 cqn; /* index of port CQ associated with this ring */
+ u8 fcs_del;
u32 prod;
u32 cons;
u32 buf_size;
- u8 fcs_del;
+ struct page_pool *pp;
void *buf;
void *rx_info;
struct bpf_prog __rcu *xdp_prog;
- struct mlx4_en_page_cache page_cache;
unsigned long bytes;
unsigned long packets;
unsigned long csum_ok;
@@ -397,7 +388,7 @@ struct mlx4_en_port_profile {
u8 num_up;
int rss_rings;
int inline_thold;
- struct hwtstamp_config hwtstamp_config;
+ struct kernel_hwtstamp_config hwtstamp_config;
};
struct mlx4_en_profile {
@@ -621,7 +612,7 @@ struct mlx4_en_priv {
bool wol;
struct device *ddev;
struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
- struct hwtstamp_config hwtstamp_config;
+ struct kernel_hwtstamp_config hwtstamp_config;
u32 counter_index;
#ifdef CONFIG_MLX4_EN_DCB
@@ -707,8 +698,6 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
struct mlx4_en_priv *priv, unsigned int length,
int tx_ind, bool *doorbell_pending);
void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring);
-bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
- struct mlx4_en_rx_alloc *frame);
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring,
@@ -791,7 +780,7 @@ void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
int mlx4_en_moderation_update(struct mlx4_en_priv *priv);
int mlx4_en_reset_config(struct net_device *dev,
- struct hwtstamp_config ts_config,
+ struct kernel_hwtstamp_config *ts_config,
netdev_features_t new_features);
void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
struct mlx4_en_stats_bitmap *stats_bitmap,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index d7444782bfdd..698a5d1f0d7e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -106,7 +106,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
buddy->max_order = max_order;
spin_lock_init(&buddy->lock);
- buddy->bits = kcalloc(buddy->max_order + 1, sizeof(long *),
+ buddy->bits = kcalloc(buddy->max_order + 1, sizeof(*buddy->bits),
GFP_KERNEL);
buddy->num_free = kcalloc(buddy->max_order + 1, sizeof(*buddy->num_free),
GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 4e43f4a7d246..e3d0b13c1610 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -147,26 +147,6 @@ static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
return err;
}
-int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx)
-{
- struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
- struct mlx4_mac_table *table = &info->mac_table;
- int i;
-
- for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
- if (!table->refs[i])
- continue;
-
- if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
- *idx = i;
- return 0;
- }
- }
-
- return -ENOENT;
-}
-EXPORT_SYMBOL_GPL(mlx4_find_cached_mac);
-
static bool mlx4_need_mf_bond(struct mlx4_dev *dev)
{
int i, num_eth_ports = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index ea6070180c96..3c3e84100d5a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -8,7 +8,6 @@ config MLX5_CORE
depends on PCI
select AUXILIARY_BUS
select NET_DEVLINK
- depends on VXLAN || !VXLAN
depends on MLXFW || !MLXFW
depends on PTP_1588_CLOCK_OPTIONAL
depends on PCI_HYPERV_INTERFACE || !PCI_HYPERV_INTERFACE
@@ -31,6 +30,7 @@ config MLX5_CORE_EN
bool "Mellanox 5th generation network adapters (ConnectX series) Ethernet support"
depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE
select PAGE_POOL
+ select PAGE_POOL_STATS
select DIMLIB
help
Ethernet support in Mellanox Technologies ConnectX-4 NIC.
@@ -80,8 +80,8 @@ config MLX5_BRIDGE
default y
help
mlx5 ConnectX offloads support for Ethernet Bridging (BRIDGE).
- Enable adding representors of mlx5 uplink and VF ports to Bridge and
- offloading rules for traffic between such ports. Supports VLANs (trunk and
+ Enable offloading FDB rules from a bridge device containing
+ representors of mlx5 uplink and VF ports. Supports VLANs (trunk and
access modes).
config MLX5_CLS_ACT
@@ -207,3 +207,14 @@ config MLX5_DPLL
help
DPLL support in Mellanox Technologies ConnectX NICs.
+config MLX5_EN_PSP
+ bool "Mellanox Technologies support for PSP cryptography-offload acceleration"
+ depends on INET_PSP
+ depends on MLX5_CORE_EN
+ default y
+ help
+ mlx5 device offload support for Google PSP Security Protocol offload.
+ Adds support for PSP encryption offload and for SPI and key generation
+ interfaces to PSP Stack which supports PSP crypto offload.
+
+ If unsure, say Y.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 5912f7e614f9..8ffa286a18f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -17,7 +17,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
fs_counters.o fs_ft_pool.o rl.o lag/debugfs.o lag/lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \
diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o diag/reporter_vnic.o \
- fw_reset.o qos.o lib/tout.o lib/aso.o wc.o
+ fw_reset.o qos.o lib/tout.o lib/aso.o wc.o fs_pool.o lib/nv_param.o
#
# Netdev basic
@@ -29,7 +29,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
en/qos.o en/htb.o en/trap.o en/fs_tt_redirect.o en/selq.o \
- lib/crypto.o lib/sd.o
+ lib/crypto.o lib/sd.o en/pcie_cong_event.o
#
# Netdev extra
@@ -60,6 +60,7 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/a
ifneq ($(CONFIG_MLX5_TC_CT),)
mlx5_core-y += en/tc_ct.o en/tc/ct_fs_dmfs.o
mlx5_core-$(CONFIG_MLX5_SW_STEERING) += en/tc/ct_fs_smfs.o
+ mlx5_core-$(CONFIG_MLX5_HW_STEERING) += en/tc/ct_fs_hmfs.o
endif
mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
@@ -68,7 +69,7 @@ mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
# Core extra
#
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
- ecpf.o rdma.o esw/legacy.o \
+ ecpf.o rdma.o esw/legacy.o esw/adj_vport.o \
esw/devlink_port.o esw/vporttbl.o esw/qos.o esw/ipsec.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
@@ -84,7 +85,9 @@ mlx5_core-$(CONFIG_MLX5_BRIDGE) += esw/bridge.o esw/bridge_mcast.o esw/bridge
mlx5_core-$(CONFIG_HWMON) += hwmon.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
-mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
+ifneq ($(CONFIG_VXLAN),)
+ mlx5_core-y += lib/vxlan.o
+endif
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += lib/hv.o lib/hv_vhca.o
@@ -109,36 +112,54 @@ mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/ktls_stats.o \
en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \
en_accel/ktls_tx.o en_accel/ktls_rx.o
-mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o \
- steering/dr_matcher.o steering/dr_rule.o \
- steering/dr_icm_pool.o steering/dr_buddy.o \
- steering/dr_ste.o steering/dr_send.o \
- steering/dr_ste_v0.o steering/dr_ste_v1.o \
- steering/dr_ste_v2.o \
- steering/dr_cmd.o steering/dr_fw.o \
- steering/dr_action.o steering/fs_dr.o \
- steering/dr_definer.o steering/dr_ptrn.o \
- steering/dr_arg.o steering/dr_dbg.o lib/smfs.o
+mlx5_core-$(CONFIG_MLX5_EN_PSP) += en_accel/psp.o en_accel/psp_rxtx.o
+
+#
+# SW Steering
+#
+mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/sws/dr_domain.o \
+ steering/sws/dr_table.o \
+ steering/sws/dr_matcher.o \
+ steering/sws/dr_rule.o \
+ steering/sws/dr_icm_pool.o \
+ steering/sws/dr_buddy.o \
+ steering/sws/dr_ste.o \
+ steering/sws/dr_send.o \
+ steering/sws/dr_ste_v0.o \
+ steering/sws/dr_ste_v1.o \
+ steering/sws/dr_ste_v2.o \
+ steering/sws/dr_ste_v3.o \
+ steering/sws/dr_cmd.o \
+ steering/sws/dr_fw.o \
+ steering/sws/dr_action.o \
+ steering/sws/dr_definer.o \
+ steering/sws/dr_ptrn.o \
+ steering/sws/dr_arg.o \
+ steering/sws/dr_dbg.o \
+ steering/sws/fs_dr.o \
+ lib/smfs.o
#
# HW Steering
#
-mlx5_core-$(CONFIG_MLX5_HW_STEERING) += steering/hws/mlx5hws_cmd.o \
- steering/hws/mlx5hws_context.o \
- steering/hws/mlx5hws_pat_arg.o \
- steering/hws/mlx5hws_buddy.o \
- steering/hws/mlx5hws_pool.o \
- steering/hws/mlx5hws_table.o \
- steering/hws/mlx5hws_action.o \
- steering/hws/mlx5hws_rule.o \
- steering/hws/mlx5hws_matcher.o \
- steering/hws/mlx5hws_send.o \
- steering/hws/mlx5hws_definer.o \
- steering/hws/mlx5hws_bwc.o \
- steering/hws/mlx5hws_debug.o \
- steering/hws/mlx5hws_vport.o \
- steering/hws/mlx5hws_bwc_complex.o
-
+mlx5_core-$(CONFIG_MLX5_HW_STEERING) += steering/hws/cmd.o \
+ steering/hws/context.o \
+ steering/hws/pat_arg.o \
+ steering/hws/buddy.o \
+ steering/hws/pool.o \
+ steering/hws/table.o \
+ steering/hws/action.o \
+ steering/hws/rule.o \
+ steering/hws/matcher.o \
+ steering/hws/send.o \
+ steering/hws/definer.o \
+ steering/hws/bwc.o \
+ steering/hws/debug.o \
+ steering/hws/vport.o \
+ steering/hws/bwc_complex.o \
+ steering/hws/fs_hws_pools.o \
+ steering/hws/fs_hws.o \
+ steering/hws/action_ste_pool.o
#
# SF device
@@ -150,5 +171,10 @@ mlx5_core-$(CONFIG_MLX5_SF) += sf/vhca_event.o sf/dev/dev.o sf/dev/driver.o irq_
#
mlx5_core-$(CONFIG_MLX5_SF_MANAGER) += sf/cmd.o sf/hw_table.o sf/devlink.o
+#
+# TPH support
+#
+mlx5_core-$(CONFIG_PCIE_TPH) += lib/st.o
+
obj-$(CONFIG_MLX5_DPLL) += mlx5_dpll.o
mlx5_dpll-y := dpll.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 6bd8a18e3af3..5b08e5ffe0e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -94,6 +94,11 @@ static u16 in_to_opcode(void *in)
return MLX5_GET(mbox_in, in, opcode);
}
+static u16 in_to_uid(void *in)
+{
+ return MLX5_GET(mbox_in, in, uid);
+}
+
/* Returns true for opcodes that might be triggered very frequently and throttle
* the command interface. Limit their command slots usage.
*/
@@ -176,6 +181,7 @@ static int cmd_alloc_index(struct mlx5_cmd *cmd, struct mlx5_cmd_work_ent *ent)
static void cmd_free_index(struct mlx5_cmd *cmd, int idx)
{
lockdep_assert_held(&cmd->alloc_lock);
+ cmd->ent_arr[idx] = NULL;
set_bit(idx, &cmd->vars.bitmask);
}
@@ -289,6 +295,10 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent)
return;
}
cond_resched();
+ if (mlx5_cmd_is_down(dev)) {
+ ent->ret = -ENXIO;
+ return;
+ }
} while (time_before(jiffies, poll_end));
ent->ret = -ETIMEDOUT;
@@ -823,7 +833,7 @@ static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
opcode = in_to_opcode(in);
op_mod = MLX5_GET(mbox_in, in, op_mod);
- uid = MLX5_GET(mbox_in, in, uid);
+ uid = in_to_uid(in);
status = MLX5_GET(mbox_out, out, status);
if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY &&
@@ -922,8 +932,7 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
static void cb_timeout_handler(struct work_struct *work)
{
- struct delayed_work *dwork = container_of(work, struct delayed_work,
- work);
+ struct delayed_work *dwork = to_delayed_work(work);
struct mlx5_cmd_work_ent *ent = container_of(dwork,
struct mlx5_cmd_work_ent,
cb_timeout_work);
@@ -1013,6 +1022,7 @@ static void cmd_work_handler(struct work_struct *work)
complete(&ent->done);
}
up(&cmd->vars.sem);
+ complete(&ent->slotted);
return;
}
} else {
@@ -1065,7 +1075,7 @@ static void cmd_work_handler(struct work_struct *work)
poll_timeout(ent);
/* make sure we read the descriptor after ownership is SW */
rmb();
- mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT));
+ mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, !!ent->ret);
}
}
@@ -1191,6 +1201,44 @@ out_err:
return err;
}
+/* Check if all command slots are stalled (timed out and not recovered).
+ * returns true if all slots timed out on a recent command and have not been
+ * completed by FW yet. (stalled state)
+ * false otherwise (at least one slot is not stalled).
+ *
+ * In such odd situation "all_stalled", this serves as a protection mechanism
+ * to avoid blocking the kernel for long periods of time in case FW is not
+ * responding to commands.
+ */
+static bool mlx5_cmd_all_stalled(struct mlx5_core_dev *dev)
+{
+ struct mlx5_cmd *cmd = &dev->cmd;
+ bool all_stalled = true;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&cmd->alloc_lock, flags);
+
+ /* at least one command slot is free */
+ if (bitmap_weight(&cmd->vars.bitmask, cmd->vars.max_reg_cmds) > 0) {
+ all_stalled = false;
+ goto out;
+ }
+
+ for_each_clear_bit(i, &cmd->vars.bitmask, cmd->vars.max_reg_cmds) {
+ struct mlx5_cmd_work_ent *ent = dev->cmd.ent_arr[i];
+
+ if (!test_bit(MLX5_CMD_ENT_STATE_TIMEDOUT, &ent->state)) {
+ all_stalled = false;
+ break;
+ }
+ }
+out:
+ spin_unlock_irqrestore(&cmd->alloc_lock, flags);
+
+ return all_stalled;
+}
+
/* Notes:
* 1. Callback functions may not sleep
* 2. page queue commands do not support asynchrous completion
@@ -1221,6 +1269,15 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
if (callback && page_queue)
return -EINVAL;
+ if (!page_queue && mlx5_cmd_all_stalled(dev)) {
+ mlx5_core_err_rl(dev,
+ "All CMD slots are stalled, aborting command\n");
+ /* there's no reason to wait and block the whole kernel if FW
+ * isn't currently responding to all slots, fail immediately
+ */
+ return -EAGAIN;
+ }
+
ent = cmd_alloc_ent(cmd, in, out, uout, uout_size,
callback, context, page_queue);
if (IS_ERR(ent))
@@ -1691,6 +1748,13 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
if (test_bit(i, &vector)) {
ent = cmd->ent_arr[i];
+ if (forced && ent->ret == -ETIMEDOUT)
+ set_bit(MLX5_CMD_ENT_STATE_TIMEDOUT,
+ &ent->state);
+ else if (!forced) /* real FW completion */
+ clear_bit(MLX5_CMD_ENT_STATE_TIMEDOUT,
+ &ent->state);
+
/* if we already completed the command, ignore it */
if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP,
&ent->state)) {
@@ -1870,6 +1934,17 @@ static int is_manage_pages(void *in)
return in_to_opcode(in) == MLX5_CMD_OP_MANAGE_PAGES;
}
+static bool mlx5_has_privileged_uid(struct mlx5_core_dev *dev)
+{
+ return !xa_empty(&dev->cmd.vars.privileged_uids);
+}
+
+static bool mlx5_cmd_is_privileged_uid(struct mlx5_core_dev *dev,
+ u16 uid)
+{
+ return !!xa_load(&dev->cmd.vars.privileged_uids, uid);
+}
+
/* Notes:
* 1. Callback functions may not sleep
* 2. Page queue commands do not support asynchrous completion
@@ -1880,7 +1955,9 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
{
struct mlx5_cmd_msg *inb, *outb;
u16 opcode = in_to_opcode(in);
- bool throttle_op;
+ bool throttle_locked = false;
+ bool unpriv_locked = false;
+ u16 uid = in_to_uid(in);
int pages_queue;
gfp_t gfp;
u8 token;
@@ -1889,12 +1966,17 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode))
return -ENXIO;
- throttle_op = mlx5_cmd_is_throttle_opcode(opcode);
- if (throttle_op) {
- if (callback) {
- if (down_trylock(&dev->cmd.vars.throttle_sem))
- return -EBUSY;
- } else {
+ if (!callback) {
+ /* The semaphore is already held for callback commands. It was
+ * acquired in mlx5_cmd_exec_cb()
+ */
+ if (uid && mlx5_has_privileged_uid(dev)) {
+ if (!mlx5_cmd_is_privileged_uid(dev, uid)) {
+ unpriv_locked = true;
+ down(&dev->cmd.vars.unprivileged_sem);
+ }
+ } else if (mlx5_cmd_is_throttle_opcode(opcode)) {
+ throttle_locked = true;
down(&dev->cmd.vars.throttle_sem);
}
}
@@ -1924,8 +2006,8 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
pages_queue, token, force_polling);
- if (callback)
- return err;
+ if (callback && !err)
+ return 0;
if (err > 0) /* Failed in FW, command didn't execute */
err = deliv_status_to_err(err);
@@ -1940,8 +2022,11 @@ out_out:
out_in:
free_msg(dev, inb);
out_up:
- if (throttle_op)
+ if (throttle_locked)
up(&dev->cmd.vars.throttle_sem);
+ if (unpriv_locked)
+ up(&dev->cmd.vars.unprivileged_sem);
+
return err;
}
@@ -2103,18 +2188,22 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work)
struct mlx5_async_work *work = _work;
struct mlx5_async_ctx *ctx;
struct mlx5_core_dev *dev;
- u16 opcode;
+ bool throttle_locked;
+ bool unpriv_locked;
ctx = work->ctx;
dev = ctx->dev;
- opcode = work->opcode;
+ throttle_locked = work->throttle_locked;
+ unpriv_locked = work->unpriv_locked;
status = cmd_status_err(dev, status, work->opcode, work->op_mod, work->out);
work->user_callback(status, work);
/* Can't access "work" from this point on. It could have been freed in
* the callback.
*/
- if (mlx5_cmd_is_throttle_opcode(opcode))
+ if (throttle_locked)
up(&dev->cmd.vars.throttle_sem);
+ if (unpriv_locked)
+ up(&dev->cmd.vars.unprivileged_sem);
if (atomic_dec_and_test(&ctx->num_inflight))
complete(&ctx->inflight_done);
}
@@ -2123,6 +2212,8 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
void *out, int out_size, mlx5_async_cbk_t callback,
struct mlx5_async_work *work)
{
+ struct mlx5_core_dev *dev = ctx->dev;
+ u16 uid;
int ret;
work->ctx = ctx;
@@ -2130,11 +2221,43 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
work->opcode = in_to_opcode(in);
work->op_mod = MLX5_GET(mbox_in, in, op_mod);
work->out = out;
+ work->throttle_locked = false;
+ work->unpriv_locked = false;
+ uid = in_to_uid(in);
+
if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
return -EIO;
- ret = cmd_exec(ctx->dev, in, in_size, out, out_size,
+
+ if (uid && mlx5_has_privileged_uid(dev)) {
+ if (!mlx5_cmd_is_privileged_uid(dev, uid)) {
+ if (down_trylock(&dev->cmd.vars.unprivileged_sem)) {
+ ret = -EBUSY;
+ goto dec_num_inflight;
+ }
+ work->unpriv_locked = true;
+ }
+ } else if (mlx5_cmd_is_throttle_opcode(in_to_opcode(in))) {
+ if (down_trylock(&dev->cmd.vars.throttle_sem)) {
+ ret = -EBUSY;
+ goto dec_num_inflight;
+ }
+ work->throttle_locked = true;
+ }
+
+ ret = cmd_exec(dev, in, in_size, out, out_size,
mlx5_cmd_exec_cb_handler, work, false);
- if (ret && atomic_dec_and_test(&ctx->num_inflight))
+ if (ret)
+ goto sem_up;
+
+ return 0;
+
+sem_up:
+ if (work->throttle_locked)
+ up(&dev->cmd.vars.throttle_sem);
+ if (work->unpriv_locked)
+ up(&dev->cmd.vars.unprivileged_sem);
+dec_num_inflight:
+ if (atomic_dec_and_test(&ctx->num_inflight))
complete(&ctx->inflight_done);
return ret;
@@ -2370,10 +2493,16 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev)
sema_init(&cmd->vars.sem, cmd->vars.max_reg_cmds);
sema_init(&cmd->vars.pages_sem, 1);
sema_init(&cmd->vars.throttle_sem, DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2));
+ sema_init(&cmd->vars.unprivileged_sem,
+ DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2));
+
+ xa_init(&cmd->vars.privileged_uids);
cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
- if (!cmd->pool)
- return -ENOMEM;
+ if (!cmd->pool) {
+ err = -ENOMEM;
+ goto err_destroy_xa;
+ }
err = alloc_cmd_page(dev, cmd);
if (err)
@@ -2407,6 +2536,8 @@ err_cmd_page:
free_cmd_page(dev, cmd);
err_free_pool:
dma_pool_destroy(cmd->pool);
+err_destroy_xa:
+ xa_destroy(&dev->cmd.vars.privileged_uids);
return err;
}
@@ -2419,6 +2550,7 @@ void mlx5_cmd_disable(struct mlx5_core_dev *dev)
destroy_msg_cache(dev);
free_cmd_page(dev, cmd);
dma_pool_destroy(cmd->pool);
+ xa_destroy(&dev->cmd.vars.privileged_uids);
}
void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
@@ -2426,3 +2558,18 @@ void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
{
dev->cmd.state = cmdif_state;
}
+
+int mlx5_cmd_add_privileged_uid(struct mlx5_core_dev *dev, u16 uid)
+{
+ return xa_insert(&dev->cmd.vars.privileged_uids, uid,
+ xa_mk_value(uid), GFP_KERNEL);
+}
+EXPORT_SYMBOL(mlx5_cmd_add_privileged_uid);
+
+void mlx5_cmd_remove_privileged_uid(struct mlx5_core_dev *dev, u16 uid)
+{
+ void *data = xa_erase(&dev->cmd.vars.privileged_uids, uid);
+
+ WARN(!data, "Privileged UID %u does not exist\n", uid);
+}
+EXPORT_SYMBOL(mlx5_cmd_remove_privileged_uid);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 4caa1b6f40ba..60f7ab1d72e7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -66,11 +66,12 @@ void mlx5_cq_tasklet_cb(struct tasklet_struct *t)
tasklet_schedule(&ctx->task);
}
-static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq,
- struct mlx5_eqe *eqe)
+void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq,
+ struct mlx5_eqe *eqe)
{
unsigned long flags;
struct mlx5_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
+ bool schedule_tasklet = false;
spin_lock_irqsave(&tasklet_ctx->lock, flags);
/* When migrating CQs between EQs will be implemented, please note
@@ -80,11 +81,29 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq,
*/
if (list_empty_careful(&cq->tasklet_ctx.list)) {
mlx5_cq_hold(cq);
+ /* If the tasklet CQ work list isn't empty, mlx5_cq_tasklet_cb()
+ * is scheduled/running and hasn't processed the list yet, so it
+ * will see this added CQ when it runs. If the list is empty,
+ * the tasklet needs to be scheduled to pick up the CQ. The
+ * spinlock avoids any race with the tasklet accessing the list.
+ */
+ schedule_tasklet = list_empty(&tasklet_ctx->list);
list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
}
spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
+
+ if (schedule_tasklet)
+ tasklet_schedule(&tasklet_ctx->task);
+}
+EXPORT_SYMBOL(mlx5_add_cq_to_tasklet);
+
+static void mlx5_core_cq_dummy_cb(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe)
+{
+ mlx5_core_err(cq->eq->core.dev,
+ "CQ default completion callback, CQ #%u\n", cq->cqn);
}
+#define MLX5_CQ_INIT_CMD_SN cpu_to_be32(2 << 28)
/* Callers must verify outbox status in case of err */
int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen, u32 *out, int outlen)
@@ -110,10 +129,19 @@ int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
cq->arm_sn = 0;
cq->eq = eq;
cq->uid = MLX5_GET(create_cq_in, in, uid);
+
+ /* Kernel CQs must set the arm_db address prior to calling
+ * this function, allowing for the proper value to be
+ * initialized. User CQs are responsible for their own
+ * initialization since they do not use the arm_db field.
+ */
+ if (cq->arm_db)
+ *cq->arm_db = MLX5_CQ_INIT_CMD_SN;
+
refcount_set(&cq->refcount, 1);
init_completion(&cq->free);
if (!cq->comp)
- cq->comp = mlx5_add_cq_to_tasklet;
+ cq->comp = mlx5_core_cq_dummy_cb;
/* assuming CQ will be deleted before the EQ */
cq->tasklet_ctx.priv = &eq->tasklet_ctx;
INIT_LIST_HEAD(&cq->tasklet_ctx.list);
@@ -134,7 +162,6 @@ int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
mlx5_core_dbg(dev, "failed adding CP 0x%x to debug file system\n",
cq->cqn);
- cq->uar = dev->priv.uar;
cq->irqn = eq->core.irqn;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 9a79674d27f1..64c04f52990f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -228,8 +228,15 @@ enum {
MLX5_INTERFACE_PROTOCOL_VNET,
MLX5_INTERFACE_PROTOCOL_DPLL,
+ MLX5_INTERFACE_PROTOCOL_FWCTL,
};
+static bool is_fwctl_supported(struct mlx5_core_dev *dev)
+{
+ /* fwctl is most useful on PFs, prevent fwctl on SFs for now */
+ return MLX5_CAP_GEN(dev, uctx_cap) && !mlx5_core_is_sf(dev);
+}
+
static const struct mlx5_adev_device {
const char *suffix;
bool (*is_supported)(struct mlx5_core_dev *dev);
@@ -252,6 +259,8 @@ static const struct mlx5_adev_device {
.is_supported = &is_mp_supported },
[MLX5_INTERFACE_PROTOCOL_DPLL] = { .suffix = "dpll",
.is_supported = &is_dpll_supported },
+ [MLX5_INTERFACE_PROTOCOL_FWCTL] = { .suffix = "fwctl",
+ .is_supported = &is_fwctl_supported },
};
int mlx5_adev_idx_alloc(void)
@@ -555,10 +564,14 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev)
{
- u64 fsystem_guid, psystem_guid;
+ u8 fsystem_guid[MLX5_SW_IMAGE_GUID_MAX_BYTES];
+ u8 psystem_guid[MLX5_SW_IMAGE_GUID_MAX_BYTES];
+ u8 flen;
+ u8 plen;
- fsystem_guid = mlx5_query_nic_system_image_guid(dev);
- psystem_guid = mlx5_query_nic_system_image_guid(peer_dev);
+ mlx5_query_nic_sw_system_image_guid(dev, fsystem_guid, &flen);
+ mlx5_query_nic_sw_system_image_guid(peer_dev, psystem_guid, &plen);
- return (fsystem_guid && psystem_guid && fsystem_guid == psystem_guid);
+ return plen && flen && flen == plen &&
+ !memcmp(fsystem_guid, psystem_guid, flen);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 98d4306929f3..887adf4807d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -10,6 +10,7 @@
#include "esw/qos.h"
#include "sf/dev/dev.h"
#include "sf/sf.h"
+#include "lib/nv_param.h"
static int mlx5_devlink_flash_update(struct devlink *devlink,
struct devlink_flash_update_params *params,
@@ -35,6 +36,55 @@ static u16 mlx5_fw_ver_subminor(u32 version)
return version & 0xffff;
}
+static int mlx5_devlink_serial_numbers_put(struct mlx5_core_dev *dev,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct pci_dev *pdev = dev->pdev;
+ unsigned int vpd_size, kw_len;
+ char *str, *end;
+ u8 *vpd_data;
+ int err = 0;
+ int start;
+
+ vpd_data = pci_vpd_alloc(pdev, &vpd_size);
+ if (IS_ERR(vpd_data))
+ return 0;
+
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len);
+ if (start >= 0) {
+ str = kstrndup(vpd_data + start, kw_len, GFP_KERNEL);
+ if (!str) {
+ err = -ENOMEM;
+ goto end;
+ }
+ end = strchrnul(str, ' ');
+ *end = '\0';
+ err = devlink_info_board_serial_number_put(req, str);
+ kfree(str);
+ if (err)
+ goto end;
+ }
+
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, "V3", &kw_len);
+ if (start >= 0) {
+ str = kstrndup(vpd_data + start, kw_len, GFP_KERNEL);
+ if (!str) {
+ err = -ENOMEM;
+ goto end;
+ }
+ err = devlink_info_serial_number_put(req, str);
+ kfree(str);
+ if (err)
+ goto end;
+ }
+
+end:
+ kfree(vpd_data);
+ return err;
+}
+
#define DEVLINK_FW_STRING_LEN 32
static int
@@ -46,6 +96,13 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
u32 running_fw, stored_fw;
int err;
+ if (!mlx5_core_is_pf(dev))
+ return 0;
+
+ err = mlx5_devlink_serial_numbers_put(dev, req, extack);
+ if (err)
+ return err;
+
err = devlink_info_version_fixed_put(req, "fw.psid", dev->board_id);
if (err)
return err;
@@ -104,7 +161,7 @@ static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netli
if (err)
return err;
- mlx5_unload_one_devl_locked(dev, true);
+ mlx5_sync_reset_unload_flow(dev, true);
err = mlx5_health_wait_pci_up(dev);
if (err)
NL_SET_ERR_MSG_MOD(extack, "FW activate aborted, PCI reads fail after reset");
@@ -147,11 +204,6 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
return 0;
}
- if (mlx5_lag_is_active(dev)) {
- NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode");
- return -EOPNOTSUPP;
- }
-
if (mlx5_core_is_mp_slave(dev)) {
NL_SET_ERR_MSG_MOD(extack, "reload is unsupported for multi port slave");
return -EOPNOTSUPP;
@@ -320,11 +372,14 @@ static const struct devlink_ops mlx5_devlink_ops = {
.eswitch_encap_mode_get = mlx5_devlink_eswitch_encap_mode_get,
.rate_leaf_tx_share_set = mlx5_esw_devlink_rate_leaf_tx_share_set,
.rate_leaf_tx_max_set = mlx5_esw_devlink_rate_leaf_tx_max_set,
+ .rate_leaf_tc_bw_set = mlx5_esw_devlink_rate_leaf_tc_bw_set,
+ .rate_node_tc_bw_set = mlx5_esw_devlink_rate_node_tc_bw_set,
.rate_node_tx_share_set = mlx5_esw_devlink_rate_node_tx_share_set,
.rate_node_tx_max_set = mlx5_esw_devlink_rate_node_tx_max_set,
.rate_node_new = mlx5_esw_devlink_rate_node_new,
.rate_node_del = mlx5_esw_devlink_rate_node_del,
- .rate_leaf_parent_set = mlx5_esw_devlink_rate_parent_set,
+ .rate_leaf_parent_set = mlx5_esw_devlink_rate_leaf_parent_set,
+ .rate_node_parent_set = mlx5_esw_devlink_rate_node_parent_set,
#endif
#ifdef CONFIG_MLX5_SF_MANAGER
.port_new = mlx5_devlink_sf_port_new,
@@ -475,6 +530,25 @@ mlx5_devlink_hairpin_queue_size_validate(struct devlink *devlink, u32 id,
return 0;
}
+static int mlx5_devlink_num_doorbells_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *mdev = devlink_priv(devlink);
+ u32 val32 = val.vu32;
+ u32 max_num_channels;
+
+ max_num_channels = mlx5e_get_max_num_channels(mdev);
+ if (val32 > max_num_channels) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Requested num_doorbells (%u) exceeds max number of channels (%u)",
+ val32, max_num_channels);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void mlx5_devlink_hairpin_params_init_values(struct devlink *devlink)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
@@ -554,6 +628,9 @@ static const struct devlink_param mlx5_devlink_eth_params[] = {
"hairpin_queue_size", DEVLINK_PARAM_TYPE_U32,
BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
mlx5_devlink_hairpin_queue_size_validate),
+ DEVLINK_PARAM_GENERIC(NUM_DOORBELLS,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
+ mlx5_devlink_num_doorbells_validate),
};
static int mlx5_devlink_eth_params_register(struct devlink *devlink)
@@ -577,6 +654,10 @@ static int mlx5_devlink_eth_params_register(struct devlink *devlink)
mlx5_devlink_hairpin_params_init_values(devlink);
+ value.vu32 = MLX5_DEFAULT_NUM_DOORBELLS;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_NUM_DOORBELLS,
+ value);
return 0;
}
@@ -591,6 +672,105 @@ static void mlx5_devlink_eth_params_unregister(struct devlink *devlink)
ARRAY_SIZE(mlx5_devlink_eth_params));
}
+#define MLX5_PCIE_CONG_THRESH_MAX 10000
+#define MLX5_PCIE_CONG_THRESH_DEF_LOW 7500
+#define MLX5_PCIE_CONG_THRESH_DEF_HIGH 9000
+
+static int
+mlx5_devlink_pcie_cong_thresh_validate(struct devlink *devl, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if (val.vu16 > MLX5_PCIE_CONG_THRESH_MAX) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Value %u > max supported (%u)",
+ val.vu16, MLX5_PCIE_CONG_THRESH_MAX);
+
+ return -EINVAL;
+ }
+
+ switch (id) {
+ case MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_LOW:
+ case MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_HIGH:
+ case MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_LOW:
+ case MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_HIGH:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void mlx5_devlink_pcie_cong_init_values(struct devlink *devlink)
+{
+ union devlink_param_value value;
+ u32 id;
+
+ value.vu16 = MLX5_PCIE_CONG_THRESH_DEF_LOW;
+ id = MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_LOW;
+ devl_param_driverinit_value_set(devlink, id, value);
+
+ value.vu16 = MLX5_PCIE_CONG_THRESH_DEF_HIGH;
+ id = MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_HIGH;
+ devl_param_driverinit_value_set(devlink, id, value);
+
+ value.vu16 = MLX5_PCIE_CONG_THRESH_DEF_LOW;
+ id = MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_LOW;
+ devl_param_driverinit_value_set(devlink, id, value);
+
+ value.vu16 = MLX5_PCIE_CONG_THRESH_DEF_HIGH;
+ id = MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_HIGH;
+ devl_param_driverinit_value_set(devlink, id, value);
+}
+
+static const struct devlink_param mlx5_devlink_pcie_cong_params[] = {
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_LOW,
+ "pcie_cong_inbound_low", DEVLINK_PARAM_TYPE_U16,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
+ mlx5_devlink_pcie_cong_thresh_validate),
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_HIGH,
+ "pcie_cong_inbound_high", DEVLINK_PARAM_TYPE_U16,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
+ mlx5_devlink_pcie_cong_thresh_validate),
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_LOW,
+ "pcie_cong_outbound_low", DEVLINK_PARAM_TYPE_U16,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
+ mlx5_devlink_pcie_cong_thresh_validate),
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_HIGH,
+ "pcie_cong_outbound_high", DEVLINK_PARAM_TYPE_U16,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
+ mlx5_devlink_pcie_cong_thresh_validate),
+};
+
+static int mlx5_devlink_pcie_cong_params_register(struct devlink *devlink)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ int err;
+
+ if (!mlx5_pcie_cong_event_supported(dev))
+ return 0;
+
+ err = devl_params_register(devlink, mlx5_devlink_pcie_cong_params,
+ ARRAY_SIZE(mlx5_devlink_pcie_cong_params));
+ if (err)
+ return err;
+
+ mlx5_devlink_pcie_cong_init_values(devlink);
+
+ return 0;
+}
+
+static void mlx5_devlink_pcie_cong_params_unregister(struct devlink *devlink)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ if (!mlx5_pcie_cong_event_supported(dev))
+ return;
+
+ devl_params_unregister(devlink, mlx5_devlink_pcie_cong_params,
+ ARRAY_SIZE(mlx5_devlink_pcie_cong_params));
+}
+
static int mlx5_devlink_enable_rdma_validate(struct devlink *devlink, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
@@ -836,8 +1016,20 @@ int mlx5_devlink_params_register(struct devlink *devlink)
if (err)
goto max_uc_list_err;
+ err = mlx5_devlink_pcie_cong_params_register(devlink);
+ if (err)
+ goto pcie_cong_err;
+
+ err = mlx5_nv_param_register_dl_params(devlink);
+ if (err)
+ goto nv_param_err;
+
return 0;
+nv_param_err:
+ mlx5_devlink_pcie_cong_params_unregister(devlink);
+pcie_cong_err:
+ mlx5_devlink_max_uc_list_params_unregister(devlink);
max_uc_list_err:
mlx5_devlink_auxdev_params_unregister(devlink);
auxdev_reg_err:
@@ -848,6 +1040,8 @@ auxdev_reg_err:
void mlx5_devlink_params_unregister(struct devlink *devlink)
{
+ mlx5_nv_param_unregister_dl_params(devlink);
+ mlx5_devlink_pcie_cong_params_unregister(devlink);
mlx5_devlink_max_uc_list_params_unregister(devlink);
mlx5_devlink_auxdev_params_unregister(devlink);
devl_params_unregister(devlink, mlx5_devlink_params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
index 961f75da6227..43b9bf8829cf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
@@ -22,6 +22,12 @@ enum mlx5_devlink_param_id {
MLX5_DEVLINK_PARAM_ID_ESW_MULTIPORT,
MLX5_DEVLINK_PARAM_ID_HAIRPIN_NUM_QUEUES,
MLX5_DEVLINK_PARAM_ID_HAIRPIN_QUEUE_SIZE,
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_LOW,
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_HIGH,
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_LOW,
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_HIGH,
+ MLX5_DEVLINK_PARAM_ID_CQE_COMPRESSION_TYPE,
+ MLX5_DEVLINK_PARAM_ID_SWP_L4_CSUM_MODE,
};
struct mlx5_trap_ctx {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
index 9aed29fa4900..d6e736c1fb24 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
@@ -292,7 +292,7 @@ TRACE_EVENT(mlx5_fs_add_rule,
if (rule->dest_attr.type &
MLX5_FLOW_DESTINATION_TYPE_COUNTER)
__entry->counter_id =
- rule->dest_attr.counter_id;
+ mlx5_fc_id(rule->dest_attr.counter);
),
TP_printk("rule=%p fte=%p index=%u sw_action=<%s> [dst] %s\n",
__entry->rule, __entry->fte, __entry->index,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 080e7eab52c7..7bcf822a89f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -54,7 +54,7 @@ static int mlx5_query_mtrc_caps(struct mlx5_fw_tracer *tracer)
if (!MLX5_GET(mtrc_cap, out, trace_to_memory)) {
mlx5_core_dbg(dev, "FWTracer: Device does not support logging traces to memory\n");
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
tracer->trc_ver = MLX5_GET(mtrc_cap, out, trc_ver);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
index c7216e84ef8c..7cae0c6e5e8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. */
+#include <linux/mlx5/vport.h>
+
#include "reporter_vnic.h"
#include "en_stats.h"
#include "devlink.h"
@@ -13,6 +15,50 @@ struct mlx5_vnic_diag_stats {
__be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)];
};
+static void mlx5_reporter_vnic_diagnose_counter_icm(struct mlx5_core_dev *dev,
+ struct devlink_fmsg *fmsg,
+ u16 vport_num, bool other_vport)
+{
+ u32 out_icm_reg[MLX5_ST_SZ_DW(vhca_icm_ctrl_reg)] = {};
+ u32 in_icm_reg[MLX5_ST_SZ_DW(vhca_icm_ctrl_reg)] = {};
+ u32 out_reg[MLX5_ST_SZ_DW(nic_cap_reg)] = {};
+ u32 in_reg[MLX5_ST_SZ_DW(nic_cap_reg)] = {};
+ u32 cur_alloc_icm;
+ int vhca_icm_ctrl;
+ u16 vhca_id;
+ int err;
+
+ err = mlx5_core_access_reg(dev, in_reg, sizeof(in_reg), out_reg,
+ sizeof(out_reg), MLX5_REG_NIC_CAP, 0, 0);
+ if (err) {
+ mlx5_core_warn(dev, "Reading nic_cap_reg failed. err = %d\n", err);
+ return;
+ }
+ vhca_icm_ctrl = MLX5_GET(nic_cap_reg, out_reg, vhca_icm_ctrl);
+ if (!vhca_icm_ctrl)
+ return;
+
+ MLX5_SET(vhca_icm_ctrl_reg, in_icm_reg, vhca_id_valid, other_vport);
+ if (other_vport) {
+ err = mlx5_vport_get_vhca_id(dev, vport_num, &vhca_id);
+ if (err) {
+ mlx5_core_warn(dev, "vport to vhca_id failed. vport_num = %d, err = %d\n",
+ vport_num, err);
+ return;
+ }
+ MLX5_SET(vhca_icm_ctrl_reg, in_icm_reg, vhca_id, vhca_id);
+ }
+ err = mlx5_core_access_reg(dev, in_icm_reg, sizeof(in_icm_reg),
+ out_icm_reg, sizeof(out_icm_reg),
+ MLX5_REG_VHCA_ICM_CTRL, 0, 0);
+ if (err) {
+ mlx5_core_warn(dev, "Reading vhca_icm_ctrl failed. err = %d\n", err);
+ return;
+ }
+ cur_alloc_icm = MLX5_GET(vhca_icm_ctrl_reg, out_icm_reg, cur_alloc_icm);
+ devlink_fmsg_u32_pair_put(fmsg, "icm_consumption", cur_alloc_icm);
+}
+
void mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
struct devlink_fmsg *fmsg,
u16 vport_num, bool other_vport)
@@ -59,6 +105,17 @@ void mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
devlink_fmsg_u64_pair_put(fmsg, "handled_pkt_steering_fail",
VNIC_ENV_GET64(&vnic, handled_pkt_steering_fail));
}
+ if (MLX5_CAP_GEN(dev, nic_cap_reg))
+ mlx5_reporter_vnic_diagnose_counter_icm(dev, fmsg, vport_num, other_vport);
+ if (MLX5_CAP_GEN(dev, vnic_env_cnt_bar_uar_access))
+ devlink_fmsg_u32_pair_put(fmsg, "bar_uar_access",
+ VNIC_ENV_GET(&vnic, bar_uar_access));
+ if (MLX5_CAP_GEN(dev, vnic_env_cnt_odp_page_fault)) {
+ devlink_fmsg_u32_pair_put(fmsg, "odp_local_triggered_page_fault",
+ VNIC_ENV_GET(&vnic, odp_local_triggered_page_fault));
+ devlink_fmsg_u32_pair_put(fmsg, "odp_remote_triggered_page_fault",
+ VNIC_ENV_GET(&vnic, odp_remote_triggered_page_fault));
+ }
devlink_fmsg_obj_nest_end(fmsg);
devlink_fmsg_pair_nest_end(fmsg);
@@ -87,11 +144,11 @@ void mlx5_reporter_vnic_create(struct mlx5_core_dev *dev)
health->vnic_reporter =
devlink_health_reporter_create(devlink,
&mlx5_reporter_vnic_ops,
- 0, dev);
+ dev);
if (IS_ERR(health->vnic_reporter))
mlx5_core_warn(dev,
- "Failed to create vnic reporter, err = %ld\n",
- PTR_ERR(health->vnic_reporter));
+ "Failed to create vnic reporter, err = %pe\n",
+ health->vnic_reporter);
}
void mlx5_reporter_vnic_destroy(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
index 31142f6cc372..1e5522a19483 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
@@ -242,7 +242,7 @@ static int mlx5_dpll_clock_quality_level_get(const struct dpll_device *dpll,
return 0;
}
errout:
- NL_SET_ERR_MSG_MOD(extack, "Invalid clock quality level obtained from firmware\n");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid clock quality level obtained from firmware");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 57b7298a0e79..811178d8976c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -47,6 +47,7 @@
#include <linux/rhashtable.h>
#include <net/udp_tunnel.h>
#include <net/switchdev.h>
+#include <net/psp/types.h>
#include <net/xdp.h>
#include <linux/dim.h>
#include <linux/bits.h>
@@ -68,7 +69,7 @@ struct page_pool;
#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
#define MLX5E_METADATA_ETHER_LEN 8
-#define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
+#define MLX5E_ETH_HARD_MTU (ETH_HLEN + PSP_ENCAP_HLEN + PSP_TRL_SIZE + VLAN_HLEN + ETH_FCS_LEN)
#define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
#define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
@@ -83,9 +84,11 @@ struct page_pool;
#define MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE (8)
#define MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE (9)
#define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE (PAGE_SIZE >> MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE)
-#define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE (64)
-#define MLX5E_SHAMPO_WQ_RESRV_SIZE (64 * 1024)
-#define MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE (4096)
+#define MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE (PAGE_SHIFT - MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE)
+#define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE_SHIFT (6)
+#define MLX5E_SHAMPO_WQ_RESRV_SIZE_BASE_SHIFT (12)
+#define MLX5E_SHAMPO_WQ_LOG_RESRV_SIZE (16)
+#define MLX5E_SHAMPO_WQ_RESRV_SIZE BIT(MLX5E_SHAMPO_WQ_LOG_RESRV_SIZE)
#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
(6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
@@ -94,8 +97,6 @@ struct page_pool;
#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
-#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18
-
/* Keep in sync with mlx5e_mpwrq_log_wqe_sz.
* These are theoretical maximums, which can be further restricted by
* capabilities. These values are used for static resource allocations and
@@ -231,16 +232,22 @@ struct mlx5e_rx_wqe_cyc {
DECLARE_FLEX_ARRAY(struct mlx5_wqe_data_seg, data);
};
-struct mlx5e_umr_wqe {
+struct mlx5e_umr_wqe_hdr {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_umr_ctrl_seg uctrl;
struct mlx5_mkey_seg mkc;
+};
+
+struct mlx5e_umr_wqe {
+ struct mlx5e_umr_wqe_hdr hdr;
union {
DECLARE_FLEX_ARRAY(struct mlx5_mtt, inline_mtts);
DECLARE_FLEX_ARRAY(struct mlx5_klm, inline_klms);
DECLARE_FLEX_ARRAY(struct mlx5_ksm, inline_ksms);
};
};
+static_assert(offsetof(struct mlx5e_umr_wqe, inline_mtts) == sizeof(struct mlx5e_umr_wqe_hdr),
+ "struct members should be included in struct mlx5e_umr_wqe_hdr, not in struct mlx5e_umr_wqe");
enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_CQE_BASED_MODER,
@@ -273,10 +280,6 @@ enum packet_merge {
struct mlx5e_packet_merge_param {
enum packet_merge type;
u32 timeout;
- struct {
- u8 match_criteria_type;
- u8 alignment_granularity;
- } shampo;
};
struct mlx5e_params {
@@ -342,6 +345,7 @@ struct mlx5e_cq {
/* data path - accessed per napi poll */
u16 event_ctr;
struct napi_struct *napi;
+ struct mlx5_uars_page *uar;
struct mlx5_core_cq mcq;
struct mlx5e_ch_stats *ch_stats;
@@ -373,7 +377,7 @@ struct mlx5e_sq_dma {
enum mlx5e_dma_map_type type;
};
-/* Keep this enum consistent with with the corresponding strings array
+/* Keep this enum consistent with the corresponding strings array
* declared in en/reporter_tx.c
*/
enum {
@@ -382,10 +386,8 @@ enum {
MLX5E_SQ_STATE_RECOVERING,
MLX5E_SQ_STATE_IPSEC,
MLX5E_SQ_STATE_DIM,
- MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
MLX5E_SQ_STATE_PENDING_XSK_TX,
MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
- MLX5E_SQ_STATE_XDP_MULTIBUF,
MLX5E_NUM_SQ_STATES, /* Must be kept last */
};
@@ -394,6 +396,7 @@ struct mlx5e_tx_mpwqe {
struct mlx5e_tx_wqe *wqe;
u32 bytes_count;
u8 ds_count;
+ u8 ds_count_max;
u8 pkt_count;
u8 inline_on;
};
@@ -515,6 +518,12 @@ struct mlx5e_xdpsq {
struct mlx5e_channel *channel;
} ____cacheline_aligned_in_smp;
+struct mlx5e_xdp_buff {
+ struct xdp_buff xdp;
+ struct mlx5_cqe64 *cqe;
+ struct mlx5e_rq *rq;
+};
+
struct mlx5e_ktls_resync_resp;
struct mlx5e_icosq {
@@ -546,7 +555,7 @@ struct mlx5e_icosq {
} ____cacheline_aligned_in_smp;
struct mlx5e_frag_page {
- struct page *page;
+ netmem_ref netmem;
u16 frags;
};
@@ -623,17 +632,16 @@ struct mlx5e_dma_info {
};
struct mlx5e_shampo_hd {
- u32 mkey;
- struct mlx5e_dma_info *info;
struct mlx5e_frag_page *pages;
- u16 curr_page_index;
u32 hd_per_wq;
+ u32 hd_per_page;
u16 hd_per_wqe;
+ u8 log_hd_per_page;
+ u8 log_hd_entry_size;
unsigned long *bitmap;
u16 pi;
u16 ci;
- __be32 key;
- u64 last_addr;
+ __be32 mkey_be;
};
struct mlx5e_hw_gro_data {
@@ -661,7 +669,7 @@ struct mlx5e_rq {
} wqe;
struct {
struct mlx5_wq_ll wq;
- struct mlx5e_umr_wqe umr_wqe;
+ struct mlx5e_umr_wqe_hdr umr_wqe;
struct mlx5e_mpw_info *info;
mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
__be32 umr_mkey_be;
@@ -691,7 +699,7 @@ struct mlx5e_rq {
struct mlx5e_rq_stats *stats;
struct mlx5e_cq cq;
struct mlx5e_cq_decomp cqd;
- struct hwtstamp_config *tstamp;
+ struct kernel_hwtstamp_config *hwtstamp_config;
struct mlx5_clock *clock;
struct mlx5e_icosq *icosq;
struct mlx5e_priv *priv;
@@ -712,12 +720,18 @@ struct mlx5e_rq {
struct bpf_prog __rcu *xdp_prog;
struct mlx5e_xdpsq *xdpsq;
DECLARE_BITMAP(flags, 8);
+
+ /* page pools */
struct page_pool *page_pool;
+ struct page_pool *hd_page_pool;
+
+ struct mlx5e_xdp_buff mxbuf;
/* AF_XDP zero-copy */
struct xsk_buff_pool *xsk_pool;
struct work_struct recover_work;
+ struct work_struct rx_timeout_work;
/* control */
struct mlx5_wq_ctrl wq_ctrl;
@@ -755,7 +769,7 @@ struct mlx5e_channel {
u8 lag_port;
/* XDP_REDIRECT */
- struct mlx5e_xdpsq xdpsq;
+ struct mlx5e_xdpsq *xdpsq;
/* AF_XDP zero-copy */
struct mlx5e_rq xskrq;
@@ -773,12 +787,12 @@ struct mlx5e_channel {
/* control */
struct mlx5e_priv *priv;
struct mlx5_core_dev *mdev;
- struct hwtstamp_config *tstamp;
DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
int ix;
int vec_ix;
int sd_ix;
int cpu;
+ struct mlx5_sq_bfreg *bfreg;
/* Sync between icosq recovery and XSK enable/disable. */
struct mutex icosq_recovery_lock;
@@ -906,12 +920,14 @@ struct mlx5e_priv {
u8 max_opened_tc;
bool tx_ptp_opened;
bool rx_ptp_opened;
- struct hwtstamp_config tstamp;
+ struct kernel_hwtstamp_config hwtstamp_config;
u16 q_counter[MLX5_SD_MAX_GROUP_SZ];
u16 drop_rq_q_counter;
struct notifier_block events_nb;
struct notifier_block blocking_events_nb;
+ struct mlx5e_pcie_cong_event *cong_event;
+
struct udp_tunnel_nic_info nic_info;
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx dcbx;
@@ -925,6 +941,9 @@ struct mlx5e_priv {
#ifdef CONFIG_MLX5_EN_IPSEC
struct mlx5e_ipsec *ipsec;
#endif
+#ifdef CONFIG_MLX5_EN_PSP
+ struct mlx5e_psp *psp;
+#endif
#ifdef CONFIG_MLX5_EN_TLS
struct mlx5e_tls *tls;
#endif
@@ -939,6 +958,7 @@ struct mlx5e_priv {
struct mlx5e_mqprio_rl *mqprio_rl;
struct dentry *dfs_root;
struct mlx5_devcom_comp_dev *devcom;
+ struct ethtool_fec_hist_range *fec_ranges;
};
struct mlx5e_dev {
@@ -1009,8 +1029,11 @@ void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
u64 *buf);
void mlx5e_set_rx_mode_work(struct work_struct *work);
-int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
-int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
+int mlx5e_hwtstamp_set(struct mlx5e_priv *priv,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+int mlx5e_hwtstamp_get(struct mlx5e_priv *priv,
+ struct kernel_hwtstamp_config *config);
int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val, bool rx_filter);
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
@@ -1049,6 +1072,7 @@ struct mlx5e_create_cq_param {
struct mlx5e_ch_stats *ch_stats;
int node;
int ix;
+ struct mlx5_uars_page *uar;
};
struct mlx5e_cq_param;
@@ -1135,7 +1159,9 @@ extern const struct ethtool_ops mlx5e_ethtool_ops;
int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey);
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises);
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
-int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
+int mlx5e_modify_tirs_lb(struct mlx5_core_dev *mdev, bool enable_uc_lb,
+ bool enable_mc_lb);
+int mlx5e_refresh_tirs(struct mlx5_core_dev *mdev, bool enable_uc_lb,
bool enable_mc_lb);
void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc);
@@ -1223,7 +1249,7 @@ void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv);
void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu);
-void mlx5e_set_xdp_feature(struct net_device *netdev);
+void mlx5e_set_xdp_feature(struct mlx5e_priv *priv);
netdev_features_t mlx5e_features_check(struct sk_buff *skb,
struct net_device *netdev,
netdev_features_t features);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
index b59aee75de94..2c98a5299df3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
@@ -26,7 +26,6 @@ struct mlx5e_dcbx {
u8 cap;
/* Buffer configuration */
- bool manual_buffer;
u32 cable_len;
u32 xoff;
u16 port_buff_cell_sz;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
index 0b1ac6e5c890..8818f65d1fbc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -40,11 +40,8 @@ void mlx5e_destroy_devlink(struct mlx5e_dev *mlx5e_dev)
static void
mlx5e_devlink_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_id *ppid)
{
- u64 parent_id;
-
- parent_id = mlx5_query_nic_system_image_guid(dev);
- ppid->id_len = sizeof(parent_id);
- memcpy(ppid->id, &parent_id, sizeof(parent_id));
+ BUILD_BUG_ON(MLX5_SW_IMAGE_GUID_MAX_BYTES > MAX_PHYS_ITEM_ID_LEN);
+ mlx5_query_nic_sw_system_image_guid(dev, ppid->id, &ppid->id_len);
}
int mlx5e_devlink_port_register(struct mlx5e_dev *mlx5e_dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 1e8b7d330701..c3408b3f7010 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -18,7 +18,8 @@ enum {
enum {
MLX5E_TC_PRIO = 0,
- MLX5E_NIC_PRIO
+ MLX5E_PROMISC_PRIO,
+ MLX5E_NIC_PRIO,
};
struct mlx5e_flow_table {
@@ -56,7 +57,7 @@ struct mlx5e_l2_table {
bool promisc_enabled;
};
-#define MLX5E_NUM_INDIR_TIRS (MLX5_NUM_TT - 1)
+#define MLX5E_NUM_INDIR_TIRS (MLX5_NUM_INDIR_TIRS)
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP)
@@ -68,9 +69,13 @@ struct mlx5e_l2_table {
MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_IPSEC_SPI)
-/* NIC prio FTS */
+/* NIC promisc FT level */
enum {
MLX5E_PROMISC_FT_LEVEL,
+};
+
+/* NIC prio FTS */
+enum {
MLX5E_VLAN_FT_LEVEL,
MLX5E_L2_FT_LEVEL,
MLX5E_TTC_FT_LEVEL,
@@ -83,10 +88,11 @@ enum {
#ifdef CONFIG_MLX5_EN_ARFS
MLX5E_ARFS_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
#endif
-#ifdef CONFIG_MLX5_EN_IPSEC
- MLX5E_ACCEL_FS_POL_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
- MLX5E_ACCEL_FS_ESP_FT_LEVEL,
+#if defined(CONFIG_MLX5_EN_IPSEC) || defined(CONFIG_MLX5_EN_PSP)
+ MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
+ MLX5E_ACCEL_FS_POL_FT_LEVEL,
+ MLX5E_ACCEL_FS_POL_MISS_FT_LEVEL,
MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
#endif
};
@@ -126,7 +132,8 @@ struct mlx5e_ptp_fs;
void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs,
struct mlx5e_rx_res *rx_res,
- struct ttc_params *ttc_params, bool tunnel);
+ struct ttc_params *ttc_params, bool tunnel,
+ bool ipsec_rss);
void mlx5e_destroy_ttc_table(struct mlx5e_flow_steering *fs);
int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h
index 9e276fd3c0cf..c21fe36527a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h
@@ -11,6 +11,11 @@ int mlx5e_ethtool_alloc(struct mlx5e_ethtool_steering **ethtool);
void mlx5e_ethtool_free(struct mlx5e_ethtool_steering *ethtool);
void mlx5e_ethtool_init_steering(struct mlx5e_flow_steering *fs);
void mlx5e_ethtool_cleanup_steering(struct mlx5e_flow_steering *fs);
+int mlx5e_ethtool_set_rxfh_fields(struct mlx5e_priv *priv,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack);
+int mlx5e_ethtool_get_rxfh_fields(struct mlx5e_priv *priv,
+ struct ethtool_rxfh_fields *nfc);
int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd);
int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs);
@@ -20,6 +25,15 @@ static inline int mlx5e_ethtool_alloc(struct mlx5e_ethtool_steering **ethtool)
static inline void mlx5e_ethtool_free(struct mlx5e_ethtool_steering *ethtool) { }
static inline void mlx5e_ethtool_init_steering(struct mlx5e_flow_steering *fs) { }
static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_flow_steering *fs) { }
+static inline int
+mlx5e_ethtool_set_rxfh_fields(struct mlx5e_priv *priv,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
+{ return -EOPNOTSUPP; }
+static inline int
+mlx5e_ethtool_get_rxfh_fields(struct mlx5e_priv *priv,
+ struct ethtool_rxfh_fields *nfc)
+{ return -EOPNOTSUPP; }
static inline int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
{ return -EOPNOTSUPP; }
static inline int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
index 81523825faa2..cb972b2d46e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -114,6 +114,7 @@ int mlx5e_health_recover_channels(struct mlx5e_priv *priv)
int err = 0;
rtnl_lock();
+ netdev_lock(priv->netdev);
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
@@ -123,6 +124,7 @@ int mlx5e_health_recover_channels(struct mlx5e_priv *priv)
out:
mutex_unlock(&priv->state_lock);
+ netdev_unlock(priv->netdev);
rtnl_unlock();
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
index b4f3bd7d346e..195863b2c013 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
@@ -138,8 +138,8 @@ void mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv)
if (IS_ERR_OR_NULL(agent)) {
if (IS_ERR(agent))
netdev_warn(priv->netdev,
- "Failed to create hv vhca stats agent, err = %ld\n",
- PTR_ERR(agent));
+ "Failed to create hv vhca stats agent, err = %pe\n",
+ agent);
kvfree(priv->stats_agent.buf);
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c
index 4e72ca8070e2..1de18c7e96ec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c
@@ -6,6 +6,7 @@
#include <linux/xarray.h>
#include <linux/hashtable.h>
#include <linux/refcount.h>
+#include <linux/mlx5/driver.h>
#include "mapping.h"
@@ -24,7 +25,8 @@ struct mapping_ctx {
struct delayed_work dwork;
struct list_head pending_list;
spinlock_t pending_list_lock; /* Guards pending list */
- u64 id;
+ u8 id[MLX5_SW_IMAGE_GUID_MAX_BYTES];
+ u8 id_len;
u8 type;
struct list_head list;
refcount_t refcount;
@@ -220,13 +222,15 @@ mapping_create(size_t data_size, u32 max_id, bool delayed_removal)
}
struct mapping_ctx *
-mapping_create_for_id(u64 id, u8 type, size_t data_size, u32 max_id, bool delayed_removal)
+mapping_create_for_id(u8 *id, u8 id_len, u8 type, size_t data_size, u32 max_id,
+ bool delayed_removal)
{
struct mapping_ctx *ctx;
mutex_lock(&shared_ctx_lock);
list_for_each_entry(ctx, &shared_ctx_list, list) {
- if (ctx->id == id && ctx->type == type) {
+ if (ctx->type == type && ctx->id_len == id_len &&
+ !memcmp(id, ctx->id, id_len)) {
if (refcount_inc_not_zero(&ctx->refcount))
goto unlock;
break;
@@ -237,7 +241,8 @@ mapping_create_for_id(u64 id, u8 type, size_t data_size, u32 max_id, bool delaye
if (IS_ERR(ctx))
goto unlock;
- ctx->id = id;
+ memcpy(ctx->id, id, id_len);
+ ctx->id_len = id_len;
ctx->type = type;
list_add(&ctx->list, &shared_ctx_list);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h
index 4e2119f0f4c1..e86a103d58b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h
@@ -27,6 +27,7 @@ void mapping_destroy(struct mapping_ctx *ctx);
/* adds mapping with an id or get an existing mapping with the same id
*/
struct mapping_ctx *
-mapping_create_for_id(u64 id, u8 type, size_t data_size, u32 max_id, bool delayed_removal);
+mapping_create_for_id(u8 *id, u8 id_len, u8 type, size_t data_size, u32 max_id,
+ bool delayed_removal);
#endif /* __MLX5_MAPPING_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 64b62ed17b07..c9bdee9a8b30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -6,10 +6,14 @@
#include "en/port.h"
#include "en_accel/en_accel.h"
#include "en_accel/ipsec.h"
+#include "en_accel/psp.h"
#include <linux/dim.h>
#include <net/page_pool/types.h>
#include <net/xdp_sock_drv.h>
+#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18
+#define MLX5_REP_MPWRQ_MAX_LOG_WQE_SZ 17
+
static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev)
{
u8 min_page_shift = MLX5_CAP_GEN_2(mdev, log_min_mkey_entity_size);
@@ -96,25 +100,29 @@ u8 mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode)
return sizeof(struct mlx5_ksm) * 4;
}
WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", mode);
- return 0;
+ return 1;
}
u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode)
{
u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
- u8 max_pages_per_wqe, max_log_mpwqe_size;
+ u8 max_pages_per_wqe, max_log_wqe_size_calc;
+ u8 max_log_wqe_size_cap;
u16 max_wqe_size;
/* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */
max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB;
max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe),
MLX5_UMR_FLEX_ALIGNMENT) / umr_entry_size;
- max_log_mpwqe_size = ilog2(max_pages_per_wqe) + page_shift;
+ max_log_wqe_size_calc = ilog2(max_pages_per_wqe) + page_shift;
- WARN_ON_ONCE(max_log_mpwqe_size < MLX5E_ORDER2_MAX_PACKET_MTU);
+ WARN_ON_ONCE(max_log_wqe_size_calc < MLX5E_ORDER2_MAX_PACKET_MTU);
- return min_t(u8, max_log_mpwqe_size, MLX5_MPWRQ_MAX_LOG_WQE_SZ);
+ max_log_wqe_size_cap = mlx5_core_is_ecpf(mdev) ?
+ MLX5_REP_MPWRQ_MAX_LOG_WQE_SZ : MLX5_MPWRQ_MAX_LOG_WQE_SZ;
+
+ return min_t(u8, max_log_wqe_size_calc, max_log_wqe_size_cap);
}
u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
@@ -407,25 +415,10 @@ u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev,
return params->log_rq_mtu_frames - log_pkts_per_wqe;
}
-u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
+static u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5e_params *params)
{
- return order_base_2(DIV_ROUND_UP(MLX5E_RX_MAX_HEAD, MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE));
-}
-
-u8 mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
-{
- return order_base_2(MLX5E_SHAMPO_WQ_RESRV_SIZE / MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE);
-}
-
-u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
-{
- u32 resrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
- PAGE_SIZE;
-
- return order_base_2(DIV_ROUND_UP(resrv_size, params->sw_mtu));
+ return order_base_2(DIV_ROUND_UP(MLX5E_SHAMPO_WQ_RESRV_SIZE,
+ params->sw_mtu));
}
u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
@@ -619,6 +612,7 @@ void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e
.ch_stats = c->stats,
.node = cpu_to_node(c->cpu),
.ix = c->vec_ix,
+ .uar = c->bfreg->up,
};
}
@@ -818,7 +812,7 @@ static void mlx5e_build_common_cq_param(struct mlx5_core_dev *mdev,
{
void *cqc = param->cqc;
- MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.bfreg.up->index);
if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
}
@@ -827,12 +821,12 @@ static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
- int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE;
u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
- int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
+ int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(params));
int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
int wqe_size = BIT(log_stride_sz) * num_strides;
+ int rsrv_size = MLX5E_SHAMPO_WQ_RESRV_SIZE;
/* +1 is for the case that the pkt_per_rsrv dont consume the reservation
* so we get a filler cqe for the rest of the reservation.
@@ -893,6 +887,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
{
void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ u32 lro_timeout;
int ndsegs = 1;
int err;
@@ -918,22 +913,27 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
MLX5_SET(wq, wq, log_wqe_stride_size,
log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
- if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
- MLX5_SET(wq, wq, shampo_enable, true);
- MLX5_SET(wq, wq, log_reservation_size,
- mlx5e_shampo_get_log_rsrv_size(mdev, params));
- MLX5_SET(wq, wq,
- log_max_num_of_packets_per_reservation,
- mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
- MLX5_SET(wq, wq, log_headers_entry_size,
- mlx5e_shampo_get_log_hd_entry_size(mdev, params));
- MLX5_SET(rqc, rqc, reservation_timeout,
- mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_SHAMPO_TIMEOUT));
- MLX5_SET(rqc, rqc, shampo_match_criteria_type,
- params->packet_merge.shampo.match_criteria_type);
- MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity,
- params->packet_merge.shampo.alignment_granularity);
- }
+ if (params->packet_merge.type != MLX5E_PACKET_MERGE_SHAMPO)
+ break;
+
+ MLX5_SET(wq, wq, shampo_enable, true);
+ MLX5_SET(wq, wq, log_reservation_size,
+ MLX5E_SHAMPO_WQ_LOG_RESRV_SIZE -
+ MLX5E_SHAMPO_WQ_RESRV_SIZE_BASE_SHIFT);
+ MLX5_SET(wq, wq,
+ log_max_num_of_packets_per_reservation,
+ mlx5e_shampo_get_log_pkt_per_rsrv(params));
+ MLX5_SET(wq, wq, log_headers_entry_size,
+ MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE -
+ MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE_SHIFT);
+ lro_timeout =
+ mlx5e_choose_lro_timeout(mdev,
+ MLX5E_DEFAULT_SHAMPO_TIMEOUT);
+ MLX5_SET(rqc, rqc, reservation_timeout, lro_timeout);
+ MLX5_SET(rqc, rqc, shampo_match_criteria_type,
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED);
+ MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity,
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE);
break;
}
default: /* MLX5_WQ_TYPE_CYCLIC */
@@ -1005,7 +1005,8 @@ void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
bool allow_swp;
allow_swp = mlx5_geneve_tx_allowed(mdev) ||
- (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO);
+ (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO) ||
+ mlx5_is_psp_device(mdev);
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
MLX5_SET(sqc, sqc, allow_swp, allow_swp);
@@ -1036,17 +1037,17 @@ u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rq_param)
{
- int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE;
u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, NULL));
- int pkt_per_resv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL);
+ int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(params));
int wqe_size = BIT(log_stride_sz) * num_strides;
+ int rsrv_size = MLX5E_SHAMPO_WQ_RESRV_SIZE;
u32 hd_per_wqe;
/* Assumption: hd_per_wqe % 8 == 0. */
- hd_per_wqe = (wqe_size / resv_size) * pkt_per_resv;
- mlx5_core_dbg(mdev, "%s hd_per_wqe = %d rsrv_size = %d wqe_size = %d pkt_per_resv = %d\n",
- __func__, hd_per_wqe, resv_size, wqe_size, pkt_per_resv);
+ hd_per_wqe = (wqe_size / rsrv_size) * pkt_per_rsrv;
+ mlx5_core_dbg(mdev, "%s hd_per_wqe = %d rsrv_size = %d wqe_size = %d pkt_per_rsrv = %d\n",
+ __func__, hd_per_wqe, rsrv_size, wqe_size, pkt_per_rsrv);
return hd_per_wqe;
}
@@ -1231,7 +1232,6 @@ static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
@@ -1240,7 +1240,6 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
- param->is_xdp_mb = !mlx5e_rx_is_linear_skb(mdev, params, xsk);
mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
}
@@ -1259,7 +1258,7 @@ int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev);
mlx5e_build_sq_param(mdev, params, &cparam->txq_sq);
- mlx5e_build_xdpsq_param(mdev, params, NULL, &cparam->xdp_sq);
+ mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq);
mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 3f8986f9d862..00617c65fe3c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -33,7 +33,6 @@ struct mlx5e_sq_param {
struct mlx5_wq_param wq;
bool is_mpw;
bool is_tls;
- bool is_xdp_mb;
u16 stop_room;
};
@@ -52,6 +51,7 @@ struct mlx5e_create_sq_param {
u32 tisn;
u8 tis_lst_sz;
u8 min_inline_mode;
+ u32 uar_page;
};
/* Striding RQ dynamic parameters */
@@ -96,12 +96,6 @@ bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
-u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params);
-u8 mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params);
-u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params);
u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rq_param);
@@ -139,7 +133,6 @@ void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
struct mlx5e_cq_param *param);
void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
struct mlx5e_sq_param *param);
int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c b/drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c
new file mode 100644
index 000000000000..2eb666a46f39
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES.
+
+#include "../devlink.h"
+#include "en.h"
+#include "pcie_cong_event.h"
+
+#define MLX5E_CONG_HIGH_STATE 0x7
+
+enum {
+ MLX5E_INBOUND_CONG = BIT(0),
+ MLX5E_OUTBOUND_CONG = BIT(1),
+};
+
+struct mlx5e_pcie_cong_thresh {
+ u16 inbound_high;
+ u16 inbound_low;
+ u16 outbound_high;
+ u16 outbound_low;
+};
+
+struct mlx5e_pcie_cong_stats {
+ u32 pci_bw_inbound_high;
+ u32 pci_bw_inbound_low;
+ u32 pci_bw_outbound_high;
+ u32 pci_bw_outbound_low;
+ u32 pci_bw_stale_event;
+};
+
+struct mlx5e_pcie_cong_event {
+ u64 obj_id;
+
+ struct mlx5e_priv *priv;
+
+ /* For event notifier and workqueue. */
+ struct work_struct work;
+ struct mlx5_nb nb;
+
+ /* Stores last read state. */
+ u8 state;
+
+ /* For ethtool stats group. */
+ struct mlx5e_pcie_cong_stats stats;
+};
+
+
+static const struct counter_desc mlx5e_pcie_cong_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_pcie_cong_stats,
+ pci_bw_inbound_high) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_pcie_cong_stats,
+ pci_bw_inbound_low) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_pcie_cong_stats,
+ pci_bw_outbound_high) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_pcie_cong_stats,
+ pci_bw_outbound_low) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_pcie_cong_stats,
+ pci_bw_stale_event) },
+};
+
+#define NUM_PCIE_CONG_COUNTERS ARRAY_SIZE(mlx5e_pcie_cong_stats_desc)
+
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie_cong)
+{
+ return priv->cong_event ? NUM_PCIE_CONG_COUNTERS : 0;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie_cong) {}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie_cong)
+{
+ if (!priv->cong_event)
+ return;
+
+ for (int i = 0; i < NUM_PCIE_CONG_COUNTERS; i++)
+ ethtool_puts(data, mlx5e_pcie_cong_stats_desc[i].format);
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie_cong)
+{
+ if (!priv->cong_event)
+ return;
+
+ for (int i = 0; i < NUM_PCIE_CONG_COUNTERS; i++) {
+ u32 ctr = MLX5E_READ_CTR32_CPU(&priv->cong_event->stats,
+ mlx5e_pcie_cong_stats_desc,
+ i);
+
+ mlx5e_ethtool_put_stat(data, ctr);
+ }
+}
+
+MLX5E_DEFINE_STATS_GRP(pcie_cong, 0);
+
+static int
+mlx5_cmd_pcie_cong_event_set(struct mlx5_core_dev *dev,
+ const struct mlx5e_pcie_cong_thresh *config,
+ u64 *obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(pcie_cong_event_cmd_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ void *cong_obj;
+ void *hdr;
+ int err;
+
+ hdr = MLX5_ADDR_OF(pcie_cong_event_cmd_in, in, hdr);
+ cong_obj = MLX5_ADDR_OF(pcie_cong_event_cmd_in, in, cong_obj);
+
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT);
+
+ MLX5_SET(pcie_cong_event_obj, cong_obj, inbound_event_en, 1);
+ MLX5_SET(pcie_cong_event_obj, cong_obj, outbound_event_en, 1);
+
+ MLX5_SET(pcie_cong_event_obj, cong_obj,
+ inbound_cong_high_threshold, config->inbound_high);
+ MLX5_SET(pcie_cong_event_obj, cong_obj,
+ inbound_cong_low_threshold, config->inbound_low);
+
+ MLX5_SET(pcie_cong_event_obj, cong_obj,
+ outbound_cong_high_threshold, config->outbound_high);
+ MLX5_SET(pcie_cong_event_obj, cong_obj,
+ outbound_cong_low_threshold, config->outbound_low);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+ mlx5_core_dbg(dev, "PCIe congestion event (obj_id=%llu) created. Config: in: [%u, %u], out: [%u, %u]\n",
+ *obj_id,
+ config->inbound_high, config->inbound_low,
+ config->outbound_high, config->outbound_low);
+
+ return 0;
+}
+
+static int mlx5_cmd_pcie_cong_event_destroy(struct mlx5_core_dev *dev,
+ u64 obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(pcie_cong_event_cmd_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ void *hdr;
+
+ hdr = MLX5_ADDR_OF(pcie_cong_event_cmd_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
+ MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, obj_id);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_cmd_pcie_cong_event_query(struct mlx5_core_dev *dev,
+ u64 obj_id,
+ u32 *state)
+{
+ u32 in[MLX5_ST_SZ_DW(pcie_cong_event_cmd_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(pcie_cong_event_cmd_out)];
+ void *obj;
+ void *hdr;
+ u8 cong;
+ int err;
+
+ hdr = MLX5_ADDR_OF(pcie_cong_event_cmd_in, in, hdr);
+
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
+ MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, obj_id);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ obj = MLX5_ADDR_OF(pcie_cong_event_cmd_out, out, cong_obj);
+
+ if (state) {
+ cong = MLX5_GET(pcie_cong_event_obj, obj, inbound_cong_state);
+ if (cong == MLX5E_CONG_HIGH_STATE)
+ *state |= MLX5E_INBOUND_CONG;
+
+ cong = MLX5_GET(pcie_cong_event_obj, obj, outbound_cong_state);
+ if (cong == MLX5E_CONG_HIGH_STATE)
+ *state |= MLX5E_OUTBOUND_CONG;
+ }
+
+ return 0;
+}
+
+static void mlx5e_pcie_cong_event_work(struct work_struct *work)
+{
+ struct mlx5e_pcie_cong_event *cong_event;
+ struct mlx5_core_dev *dev;
+ struct mlx5e_priv *priv;
+ u32 new_cong_state = 0;
+ u32 changes;
+ int err;
+
+ cong_event = container_of(work, struct mlx5e_pcie_cong_event, work);
+ priv = cong_event->priv;
+ dev = priv->mdev;
+
+ err = mlx5_cmd_pcie_cong_event_query(dev, cong_event->obj_id,
+ &new_cong_state);
+ if (err) {
+ mlx5_core_warn(dev, "Error %d when querying PCIe cong event object (obj_id=%llu).\n",
+ err, cong_event->obj_id);
+ return;
+ }
+
+ changes = cong_event->state ^ new_cong_state;
+ if (!changes) {
+ cong_event->stats.pci_bw_stale_event++;
+ return;
+ }
+
+ cong_event->state = new_cong_state;
+
+ if (changes & MLX5E_INBOUND_CONG) {
+ if (new_cong_state & MLX5E_INBOUND_CONG)
+ cong_event->stats.pci_bw_inbound_high++;
+ else
+ cong_event->stats.pci_bw_inbound_low++;
+ }
+
+ if (changes & MLX5E_OUTBOUND_CONG) {
+ if (new_cong_state & MLX5E_OUTBOUND_CONG)
+ cong_event->stats.pci_bw_outbound_high++;
+ else
+ cong_event->stats.pci_bw_outbound_low++;
+ }
+}
+
+static int mlx5e_pcie_cong_event_handler(struct notifier_block *nb,
+ unsigned long event, void *eqe)
+{
+ struct mlx5e_pcie_cong_event *cong_event;
+
+ cong_event = mlx5_nb_cof(nb, struct mlx5e_pcie_cong_event, nb);
+ queue_work(cong_event->priv->wq, &cong_event->work);
+
+ return NOTIFY_OK;
+}
+
+static int
+mlx5e_pcie_cong_get_thresh_config(struct mlx5_core_dev *dev,
+ struct mlx5e_pcie_cong_thresh *config)
+{
+ u32 ids[4] = {
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_LOW,
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_HIGH,
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_LOW,
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_HIGH,
+ };
+ struct devlink *devlink = priv_to_devlink(dev);
+ union devlink_param_value val[4];
+
+ for (int i = 0; i < 4; i++) {
+ u32 id = ids[i];
+ int err;
+
+ err = devl_param_driverinit_value_get(devlink, id, &val[i]);
+ if (err)
+ return err;
+ }
+
+ config->inbound_low = val[0].vu16;
+ config->inbound_high = val[1].vu16;
+ config->outbound_low = val[2].vu16;
+ config->outbound_high = val[3].vu16;
+
+ return 0;
+}
+
+static int
+mlx5e_thresh_config_validate(struct mlx5_core_dev *mdev,
+ const struct mlx5e_pcie_cong_thresh *config)
+{
+ int err = 0;
+
+ if (config->inbound_low >= config->inbound_high) {
+ err = -EINVAL;
+ mlx5_core_err(mdev, "PCIe inbound congestion threshold configuration invalid: low (%u) >= high (%u).\n",
+ config->inbound_low, config->inbound_high);
+ }
+
+ if (config->outbound_low >= config->outbound_high) {
+ err = -EINVAL;
+ mlx5_core_err(mdev, "PCIe outbound congestion threshold configuration invalid: low (%u) >= high (%u).\n",
+ config->outbound_low, config->outbound_high);
+ }
+
+ return err;
+}
+
+int mlx5e_pcie_cong_event_init(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pcie_cong_thresh thresh_config = {};
+ struct mlx5e_pcie_cong_event *cong_event;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int err;
+
+ if (!mlx5_pcie_cong_event_supported(mdev))
+ return 0;
+
+ err = mlx5e_pcie_cong_get_thresh_config(mdev, &thresh_config);
+ if (WARN_ON(err))
+ return err;
+
+ err = mlx5e_thresh_config_validate(mdev, &thresh_config);
+ if (err) {
+ mlx5_core_err(mdev, "PCIe congestion event feature disabled\n");
+ return err;
+ }
+
+ cong_event = kvzalloc_node(sizeof(*cong_event), GFP_KERNEL,
+ mdev->priv.numa_node);
+ if (!cong_event)
+ return -ENOMEM;
+
+ INIT_WORK(&cong_event->work, mlx5e_pcie_cong_event_work);
+ MLX5_NB_INIT(&cong_event->nb, mlx5e_pcie_cong_event_handler,
+ OBJECT_CHANGE);
+
+ cong_event->priv = priv;
+
+ err = mlx5_cmd_pcie_cong_event_set(mdev, &thresh_config,
+ &cong_event->obj_id);
+ if (err) {
+ mlx5_core_warn(mdev, "Error creating a PCIe congestion event object\n");
+ goto err_free;
+ }
+
+ err = mlx5_eq_notifier_register(mdev, &cong_event->nb);
+ if (err) {
+ mlx5_core_warn(mdev, "Error registering notifier for the PCIe congestion event\n");
+ goto err_obj_destroy;
+ }
+
+ priv->cong_event = cong_event;
+
+ return 0;
+
+err_obj_destroy:
+ mlx5_cmd_pcie_cong_event_destroy(mdev, cong_event->obj_id);
+err_free:
+ kvfree(cong_event);
+
+ return err;
+}
+
+void mlx5e_pcie_cong_event_cleanup(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pcie_cong_event *cong_event = priv->cong_event;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (!cong_event)
+ return;
+
+ priv->cong_event = NULL;
+
+ mlx5_eq_notifier_unregister(mdev, &cong_event->nb);
+ cancel_work_sync(&cong_event->work);
+
+ if (mlx5_cmd_pcie_cong_event_destroy(mdev, cong_event->obj_id))
+ mlx5_core_warn(mdev, "Error destroying PCIe congestion event (obj_id=%llu)\n",
+ cong_event->obj_id);
+
+ kvfree(cong_event);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.h b/drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.h
new file mode 100644
index 000000000000..b1ea46bf648a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef __MLX5_PCIE_CONG_EVENT_H__
+#define __MLX5_PCIE_CONG_EVENT_H__
+
+int mlx5e_pcie_cong_event_init(struct mlx5e_priv *priv);
+void mlx5e_pcie_cong_event_cleanup(struct mlx5e_priv *priv);
+
+#endif /* __MLX5_PCIE_CONG_EVENT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 5f6a0605e4ae..6049ccf475bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -80,6 +80,7 @@ int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable,
int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
{
struct mlx5_port_eth_proto eproto;
+ const struct mlx5_link_info *info;
bool force_legacy = false;
bool ext;
int err;
@@ -94,9 +95,13 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
if (err)
goto out;
}
- *speed = mlx5_port_ptys2speed(mdev, eproto.oper, force_legacy);
- if (!(*speed))
+ info = mlx5_port_ptys2info(mdev, eproto.oper, force_legacy);
+ if (!info) {
+ *speed = SPEED_UNKNOWN;
err = -EINVAL;
+ goto out;
+ }
+ *speed = info->speed;
out:
return err;
@@ -296,11 +301,16 @@ enum mlx5e_fec_supported_link_mode {
MLX5E_FEC_SUPPORTED_LINK_MODE_200G_2X,
MLX5E_FEC_SUPPORTED_LINK_MODE_400G_4X,
MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_200G_1X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_400G_2X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_800G_4X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_1600G_8X,
MLX5E_MAX_FEC_SUPPORTED_LINK_MODE,
};
#define MLX5E_FEC_FIRST_50G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X
#define MLX5E_FEC_FIRST_100G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_100G_1X
+#define MLX5E_FEC_FIRST_200G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_200G_1X
#define MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, policy, write, link) \
do { \
@@ -320,8 +330,10 @@ static bool mlx5e_is_fec_supported_link_mode(struct mlx5_core_dev *dev,
return link_mode < MLX5E_FEC_FIRST_50G_PER_LANE_MODE ||
(link_mode < MLX5E_FEC_FIRST_100G_PER_LANE_MODE &&
MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm)) ||
- (link_mode >= MLX5E_FEC_FIRST_100G_PER_LANE_MODE &&
- MLX5_CAP_PCAM_FEATURE(dev, fec_100G_per_lane_in_pplm));
+ (link_mode < MLX5E_FEC_FIRST_200G_PER_LANE_MODE &&
+ MLX5_CAP_PCAM_FEATURE(dev, fec_100G_per_lane_in_pplm)) ||
+ (link_mode >= MLX5E_FEC_FIRST_200G_PER_LANE_MODE &&
+ MLX5_CAP_PCAM_FEATURE(dev, fec_200G_per_lane_in_pplm));
}
/* get/set FEC admin field for a given speed */
@@ -368,6 +380,18 @@ static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X:
MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 800g_8x);
break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_1X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 200g_1x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_2X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 400g_2x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_4X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 800g_4x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_1600G_8X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 1600g_8x);
+ break;
default:
return -EINVAL;
}
@@ -421,6 +445,18 @@ static int mlx5e_get_fec_cap_field(u32 *pplm, u16 *fec_cap,
case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X:
*fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 800g_8x);
break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_1X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 200g_1x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_2X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 400g_2x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_4X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 800g_4x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_1600G_8X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 1600g_8x);
+ break;
default:
return -EINVAL;
}
@@ -494,6 +530,26 @@ out:
return 0;
}
+static u16 mlx5e_remap_fec_conf_mode(enum mlx5e_fec_supported_link_mode link_mode,
+ u16 conf_fec)
+{
+ /* RS fec in ethtool is originally mapped to MLX5E_FEC_RS_528_514.
+ * For link modes up to 25G per lane, the value is kept.
+ * For 50G or 100G per lane, it's remapped to MLX5E_FEC_RS_544_514.
+ * For 200G per lane, remapped to MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD.
+ */
+ if (conf_fec != BIT(MLX5E_FEC_RS_528_514))
+ return conf_fec;
+
+ if (link_mode >= MLX5E_FEC_FIRST_200G_PER_LANE_MODE)
+ return BIT(MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD);
+
+ if (link_mode >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE)
+ return BIT(MLX5E_FEC_RS_544_514);
+
+ return conf_fec;
+}
+
int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u16 fec_policy)
{
bool fec_50g_per_lane = MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm);
@@ -530,14 +586,7 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u16 fec_policy)
if (!mlx5e_is_fec_supported_link_mode(dev, i))
break;
- /* RS fec in ethtool is mapped to MLX5E_FEC_RS_528_514
- * to link modes up to 25G per lane and to
- * MLX5E_FEC_RS_544_514 in the new link modes based on
- * 50G or 100G per lane
- */
- if (conf_fec == (1 << MLX5E_FEC_RS_528_514) &&
- i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE)
- conf_fec = (1 << MLX5E_FEC_RS_544_514);
+ conf_fec = mlx5e_remap_fec_conf_mode(i, conf_fec);
mlx5e_get_fec_cap_field(out, &fec_caps, i);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
index d1da225f35da..fa2283dd383b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
@@ -61,6 +61,7 @@ enum {
MLX5E_FEC_NOFEC,
MLX5E_FEC_FIRECODE,
MLX5E_FEC_RS_528_514,
+ MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD = 4,
MLX5E_FEC_RS_544_514 = 7,
MLX5E_FEC_LLRS_272_257_1 = 9,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index 8e25f4ef5ccc..4720523813b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -272,8 +272,8 @@ static int port_update_shared_buffer(struct mlx5_core_dev *mdev,
/* Total shared buffer size is split in a ratio of 3:1 between
* lossy and lossless pools respectively.
*/
- lossy_epool_size = (shared_buffer_size / 4) * 3;
lossless_ipool_size = shared_buffer_size / 4;
+ lossy_epool_size = shared_buffer_size - lossless_ipool_size;
mlx5e_port_set_sbpr(mdev, 0, MLX5_EGRESS_DIR, MLX5_LOSSY_POOL, 0,
lossy_epool_size);
@@ -288,14 +288,12 @@ static int port_set_buffer(struct mlx5e_priv *priv,
u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
struct mlx5_core_dev *mdev = priv->mdev;
int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
- u32 new_headroom_size = 0;
- u32 current_headroom_size;
+ u32 current_headroom_cells = 0;
+ u32 new_headroom_cells = 0;
void *in;
int err;
int i;
- current_headroom_size = port_buffer->headroom_size;
-
in = kzalloc(sz, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -306,12 +304,14 @@ static int port_set_buffer(struct mlx5e_priv *priv,
for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
+ current_headroom_cells += MLX5_GET(bufferx_reg, buffer, size);
+
u64 size = port_buffer->buffer[i].size;
u64 xoff = port_buffer->buffer[i].xoff;
u64 xon = port_buffer->buffer[i].xon;
- new_headroom_size += size;
do_div(size, port_buff_cell_sz);
+ new_headroom_cells += size;
do_div(xoff, port_buff_cell_sz);
do_div(xon, port_buff_cell_sz);
MLX5_SET(bufferx_reg, buffer, size, size);
@@ -320,10 +320,8 @@ static int port_set_buffer(struct mlx5e_priv *priv,
MLX5_SET(bufferx_reg, buffer, xon_threshold, xon);
}
- new_headroom_size /= port_buff_cell_sz;
- current_headroom_size /= port_buff_cell_sz;
- err = port_update_shared_buffer(priv->mdev, current_headroom_size,
- new_headroom_size);
+ err = port_update_shared_buffer(priv->mdev, current_headroom_cells,
+ new_headroom_cells);
if (err)
goto out;
@@ -331,6 +329,9 @@ static int port_set_buffer(struct mlx5e_priv *priv,
if (err)
goto out;
+ /* RO bits should be set to 0 on write */
+ MLX5_SET(pbmc_reg, in, port_buffer_size, 0);
+
err = mlx5e_port_set_pbmc(mdev, in);
out:
kfree(in);
@@ -574,7 +575,6 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (err)
return err;
}
- priv->dcbx.xoff = xoff;
/* Apply the settings */
if (update_buffer) {
@@ -583,6 +583,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
return err;
}
+ priv->dcbx.xoff = xoff;
+
if (update_prio2buffer)
err = mlx5e_port_set_priority2buffer(priv->mdev, prio2buffer);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index afd654583b6b..424f8a2728a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -8,6 +8,7 @@
#include "en/fs_tt_redirect.h"
#include <linux/list.h>
#include <linux/spinlock.h>
+#include <net/netdev_lock.h>
struct mlx5e_ptp_fs {
struct mlx5_flow_handle *l2_rule;
@@ -81,7 +82,7 @@ static struct mlx5e_skb_cb_hwtstamp *mlx5e_skb_cb_get_hwts(struct sk_buff *skb)
}
static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb,
- struct mlx5e_ptp_cq_stats *cq_stats)
+ struct mlx5e_ptpsq *ptpsq)
{
struct skb_shared_hwtstamps hwts = {};
ktime_t diff;
@@ -91,8 +92,17 @@ static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb,
/* Maximal allowed diff is 1 / 128 second */
if (diff > (NSEC_PER_SEC >> 7)) {
- cq_stats->abort++;
- cq_stats->abort_abs_diff_ns += diff;
+ struct mlx5e_txqsq *sq = &ptpsq->txqsq;
+
+ ptpsq->cq_stats->abort++;
+ ptpsq->cq_stats->abort_abs_diff_ns += diff;
+ if (diff > (NSEC_PER_SEC >> 1) &&
+ !test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
+ netdev_warn(sq->channel->netdev,
+ "PTP TX timestamp difference between CQE and port exceeds threshold: %lld ns, recovering SQ %u\n",
+ (s64)diff, sq->sqn);
+ queue_work(sq->priv->wq, &ptpsq->report_unhealthy_work);
+ }
return;
}
@@ -102,7 +112,7 @@ static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb,
void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
ktime_t hwtstamp,
- struct mlx5e_ptp_cq_stats *cq_stats)
+ struct mlx5e_ptpsq *ptpsq)
{
switch (hwtstamp_type) {
case (MLX5E_SKB_CB_CQE_HWTSTAMP):
@@ -120,7 +130,7 @@ void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
!mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp)
return;
- mlx5e_skb_cb_hwtstamp_tx(skb, cq_stats);
+ mlx5e_skb_cb_hwtstamp_tx(skb, ptpsq);
memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
}
@@ -208,7 +218,7 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
- hwtstamp, ptpsq->cq_stats);
+ hwtstamp, ptpsq);
ptpsq->cq_stats->cqe++;
mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
@@ -326,21 +336,19 @@ static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix,
int node;
sq->pdev = c->pdev;
- sq->clock = &mdev->clock;
+ sq->clock = mdev->clock;
sq->mkey_be = c->mkey_be;
sq->netdev = c->netdev;
sq->priv = c->priv;
sq->mdev = mdev;
sq->ch_ix = MLX5E_PTP_CHANNEL_IX;
sq->txq_ix = txq_ix;
- sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+ sq->uar_map = c->bfreg->map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
sq->stats = &c->priv->ptp_stats.sq[tc];
sq->ptpsq = ptpsq;
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
- if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
- set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
sq->stop_room = param->stop_room;
sq->ptp_cyc2time = mlx5_sq_ts_translator(mdev);
@@ -449,8 +457,22 @@ static void mlx5e_ptpsq_unhealthy_work(struct work_struct *work)
{
struct mlx5e_ptpsq *ptpsq =
container_of(work, struct mlx5e_ptpsq, report_unhealthy_work);
+ struct mlx5e_txqsq *sq = &ptpsq->txqsq;
+
+ /* Recovering the PTP SQ means re-enabling NAPI, which requires the
+ * netdev instance lock. However, SQ closing has to wait for this work
+ * task to finish while also holding the same lock. So either get the
+ * lock or find that the SQ is no longer enabled and thus this work is
+ * not relevant anymore.
+ */
+ while (!netdev_trylock(sq->netdev)) {
+ if (!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))
+ return;
+ msleep(20);
+ }
mlx5e_reporter_tx_ptpsq_unhealthy(ptpsq);
+ netdev_unlock(sq->netdev);
}
static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
@@ -473,6 +495,7 @@ static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
csp.wq_ctrl = &txqsq->wq_ctrl;
csp.min_inline_mode = txqsq->min_inline_mode;
csp.ts_cqe_to_dest_cqn = ptpsq->ts_cq.mcq.cqn;
+ csp.uar_page = c->bfreg->index;
err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, 0, &txqsq->sqn);
if (err)
@@ -564,6 +587,7 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
ccp.ch_stats = c->stats;
ccp.napi = &c->napi;
ccp.ix = MLX5E_PTP_CHANNEL_IX;
+ ccp.uar = c->bfreg->up;
cq_param = &cparams->txq_sq_param.cqp;
@@ -613,6 +637,7 @@ static int mlx5e_ptp_open_rx_cq(struct mlx5e_ptp *c,
ccp.ch_stats = c->stats;
ccp.napi = &c->napi;
ccp.ix = MLX5E_PTP_CHANNEL_IX;
+ ccp.uar = c->bfreg->up;
cq_param = &cparams->rq_param.cqp;
@@ -696,8 +721,8 @@ static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
rq->pdev = c->pdev;
rq->netdev = priv->netdev;
rq->priv = priv;
- rq->clock = &mdev->clock;
- rq->tstamp = &priv->tstamp;
+ rq->clock = mdev->clock;
+ rq->hwtstamp_config = &priv->hwtstamp_config;
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->stats = &c->priv->ptp_stats.rq;
@@ -880,19 +905,19 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
c->priv = priv;
c->mdev = priv->mdev;
- c->tstamp = &priv->tstamp;
c->pdev = mlx5_core_dma_dev(priv->mdev);
c->netdev = priv->netdev;
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
c->num_tc = mlx5e_get_dcb_num_tc(params);
c->stats = &priv->ptp_stats.ch;
c->lag_port = lag_port;
+ c->bfreg = &mdev->priv.bfreg;
err = mlx5e_ptp_set_state(c, params);
if (err)
goto err_free;
- netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll);
+ netif_napi_add_locked(netdev, &c->napi, mlx5e_ptp_napi_poll);
mlx5e_ptp_build_params(c, cparams, params);
@@ -910,7 +935,7 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
return 0;
err_napi_del:
- netif_napi_del(&c->napi);
+ netif_napi_del_locked(&c->napi);
err_free:
kvfree(cparams);
kvfree(c);
@@ -920,7 +945,7 @@ err_free:
void mlx5e_ptp_close(struct mlx5e_ptp *c)
{
mlx5e_ptp_close_queues(c);
- netif_napi_del(&c->napi);
+ netif_napi_del_locked(&c->napi);
kvfree(c);
}
@@ -929,7 +954,7 @@ void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
{
int tc;
- napi_enable(&c->napi);
+ napi_enable_locked(&c->napi);
if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
for (tc = 0; tc < c->num_tc; tc++)
@@ -957,7 +982,7 @@ void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c)
mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq);
}
- napi_disable(&c->napi);
+ napi_disable_locked(&c->napi);
}
int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index 883c044852f1..2a457a2ed707 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -64,8 +64,8 @@ struct mlx5e_ptp {
/* control */
struct mlx5e_priv *priv;
struct mlx5_core_dev *mdev;
- struct hwtstamp_config *tstamp;
DECLARE_BITMAP(state, MLX5E_PTP_STATE_NUM_STATES);
+ struct mlx5_sq_bfreg *bfreg;
};
static inline bool mlx5e_use_ptpsq(struct sk_buff *skb)
@@ -147,7 +147,7 @@ enum {
void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
ktime_t hwtstamp,
- struct mlx5e_ptp_cq_stats *cq_stats);
+ struct mlx5e_ptpsq *ptpsq);
void mlx5e_skb_cb_hwtstamp_init(struct sk_buff *skb);
#endif /* __MLX5_EN_PTP_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index f0744a45db92..4e461cb03b83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -374,7 +374,7 @@ void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_que
void mlx5e_reset_qdisc(struct net_device *dev, u16 qid)
{
struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid);
- struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
+ struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
if (!qdisc)
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index 5d128c5b4529..87a2ad69526d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -30,15 +30,11 @@ static bool mlx5_esw_bridge_dev_same_hw(struct net_device *dev, struct mlx5_eswi
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev, *esw_mdev;
- u64 system_guid, esw_system_guid;
mdev = priv->mdev;
esw_mdev = esw->dev;
- system_guid = mlx5_query_nic_system_image_guid(mdev);
- esw_system_guid = mlx5_query_nic_system_image_guid(esw_mdev);
-
- return system_guid == esw_system_guid;
+ return mlx5_same_hw_devs(mdev, esw_mdev);
}
static struct net_device *
@@ -48,15 +44,10 @@ mlx5_esw_bridge_lag_rep_get(struct net_device *dev, struct mlx5_eswitch *esw)
struct list_head *iter;
netdev_for_each_lower_dev(dev, lower, iter) {
- struct mlx5_core_dev *mdev;
- struct mlx5e_priv *priv;
-
if (!mlx5e_eswitch_rep(lower))
continue;
- priv = netdev_priv(lower);
- mdev = priv->mdev;
- if (mlx5_lag_is_shared_fdb(mdev) && mlx5_esw_bridge_dev_same_esw(lower, esw))
+ if (mlx5_esw_bridge_dev_same_esw(lower, esw))
return lower;
}
@@ -125,7 +116,7 @@ static bool mlx5_esw_bridge_is_local(struct net_device *dev, struct net_device *
priv = netdev_priv(rep);
mdev = priv->mdev;
if (netif_is_lag_master(dev))
- return mlx5_lag_is_shared_fdb(mdev) && mlx5_lag_is_master(mdev);
+ return mlx5_lag_is_master(mdev);
return true;
}
@@ -455,6 +446,9 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
if (!rep)
return NOTIFY_DONE;
+ if (netif_is_lag_master(dev) && !mlx5_lag_is_shared_fdb(esw->dev))
+ return NOTIFY_DONE;
+
switch (event) {
case SWITCHDEV_FDB_ADD_TO_BRIDGE:
fdb_info = container_of(info,
@@ -490,8 +484,8 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
fdb_info,
br_offloads);
if (IS_ERR(work)) {
- WARN_ONCE(1, "Failed to init switchdev work, err=%ld",
- PTR_ERR(work));
+ WARN_ONCE(1, "Failed to init switchdev work, err=%pe",
+ work);
return notifier_from_errno(PTR_ERR(work));
}
@@ -529,7 +523,8 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
br_offloads = mlx5_esw_bridge_init(esw);
rtnl_unlock();
if (IS_ERR(br_offloads)) {
- esw_warn(mdev, "Failed to init esw bridge (err=%ld)\n", PTR_ERR(br_offloads));
+ esw_warn(mdev, "Failed to init esw bridge (err=%pe)\n",
+ br_offloads);
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 25d751eba99b..0686fbdd5a05 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -170,16 +170,23 @@ static int mlx5e_rx_reporter_err_rq_cqe_recover(void *ctx)
static int mlx5e_rx_reporter_timeout_recover(void *ctx)
{
struct mlx5_eq_comp *eq;
+ struct mlx5e_priv *priv;
struct mlx5e_rq *rq;
int err;
rq = ctx;
+ priv = rq->priv;
+
+ mutex_lock(&priv->state_lock);
+
eq = rq->cq.mcq.eq;
err = mlx5e_health_channel_eq_recover(rq->netdev, eq, rq->cq.ch_stats);
if (err && rq->icosq)
clear_bit(MLX5E_SQ_STATE_ENABLED, &rq->icosq->state);
+ mutex_unlock(&priv->state_lock);
+
return err;
}
@@ -311,16 +318,15 @@ mlx5e_rx_reporter_diagnose_common_ptp_config(struct mlx5e_priv *priv, struct mlx
struct devlink_fmsg *fmsg)
{
mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP");
- devlink_fmsg_u32_pair_put(fmsg, "filter_type", priv->tstamp.rx_filter);
+ devlink_fmsg_u32_pair_put(fmsg, "filter_type",
+ priv->hwtstamp_config.rx_filter);
mlx5e_rx_reporter_diagnose_generic_rq(&ptp_ch->rq, fmsg);
mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
static void
-mlx5e_rx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg)
+mlx5e_rx_reporter_diagnose_common_config(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg)
{
- struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_rq *generic_rq = &priv->channels.c[0]->rq;
struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
@@ -340,20 +346,100 @@ static void mlx5e_rx_reporter_build_diagnose_output_ptp_rq(struct mlx5e_rq *rq,
devlink_fmsg_obj_nest_end(fmsg);
}
-static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg,
- struct netlink_ext_ack *extack)
+static void mlx5e_rx_reporter_diagnose_rx_res_dir_tirns(struct mlx5e_rx_res *rx_res,
+ struct devlink_fmsg *fmsg)
{
- struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
- struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
+ unsigned int max_nch = mlx5e_rx_res_get_max_nch(rx_res);
int i;
- mutex_lock(&priv->state_lock);
+ devlink_fmsg_arr_pair_nest_start(fmsg, "Direct TIRs");
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- goto unlock;
+ for (i = 0; i < max_nch; i++) {
+ devlink_fmsg_obj_nest_start(fmsg);
+
+ devlink_fmsg_u32_pair_put(fmsg, "ix", i);
+ devlink_fmsg_u32_pair_put(fmsg, "tirn", mlx5e_rx_res_get_tirn_direct(rx_res, i));
+ devlink_fmsg_u32_pair_put(fmsg, "rqtn", mlx5e_rx_res_get_rqtn_direct(rx_res, i));
+
+ devlink_fmsg_obj_nest_end(fmsg);
+ }
+
+ devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rx_res_rss_tirn(struct mlx5e_rss *rss, bool inner,
+ struct devlink_fmsg *fmsg)
+{
+ bool found_valid_tir = false;
+ int tt;
+
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ if (!mlx5e_rss_valid_tir(rss, tt, inner))
+ continue;
+
+ if (!found_valid_tir) {
+ char *tir_msg = inner ? "Inner TIRs Numbers" : "TIRs Numbers";
+
+ found_valid_tir = true;
+ devlink_fmsg_arr_pair_nest_start(fmsg, tir_msg);
+ }
+
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_string_pair_put(fmsg, "tt", mlx5_ttc_get_name(tt));
+ devlink_fmsg_u32_pair_put(fmsg, "tirn", mlx5e_rss_get_tirn(rss, tt, inner));
+ devlink_fmsg_obj_nest_end(fmsg);
+ }
+
+ if (found_valid_tir)
+ devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rx_res_rss_ix(struct mlx5e_rx_res *rx_res, u32 rss_idx,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_rss *rss = mlx5e_rx_res_rss_get(rx_res, rss_idx);
+
+ if (!rss)
+ return;
+
+ devlink_fmsg_obj_nest_start(fmsg);
+
+ devlink_fmsg_u32_pair_put(fmsg, "Index", rss_idx);
+ devlink_fmsg_u32_pair_put(fmsg, "rqtn", mlx5e_rss_get_rqtn(rss));
+ mlx5e_rx_reporter_diagnose_rx_res_rss_tirn(rss, false, fmsg);
+ if (mlx5e_rss_get_inner_ft_support(rss))
+ mlx5e_rx_reporter_diagnose_rx_res_rss_tirn(rss, true, fmsg);
+
+ devlink_fmsg_obj_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rx_res_rss(struct mlx5e_rx_res *rx_res,
+ struct devlink_fmsg *fmsg)
+{
+ int rss_ix;
+
+ devlink_fmsg_arr_pair_nest_start(fmsg, "RSS");
+ for (rss_ix = 0; rss_ix < MLX5E_MAX_NUM_RSS; rss_ix++)
+ mlx5e_rx_reporter_diagnose_rx_res_rss_ix(rx_res, rss_ix, fmsg);
+ devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rx_res(struct mlx5e_priv *priv,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_rx_res *rx_res = priv->rx_res;
+
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX resources");
+ mlx5e_rx_reporter_diagnose_rx_res_dir_tirns(rx_res, fmsg);
+ mlx5e_rx_reporter_diagnose_rx_res_rss(rx_res, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rqs(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
+ int i;
- mlx5e_rx_reporter_diagnose_common_config(reporter, fmsg);
devlink_fmsg_arr_pair_nest_start(fmsg, "RQs");
for (i = 0; i < priv->channels.num; i++) {
@@ -367,7 +453,24 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
}
if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state))
mlx5e_rx_reporter_build_diagnose_output_ptp_rq(&ptp_ch->rq, fmsg);
+
devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+
+ mutex_lock(&priv->state_lock);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto unlock;
+
+ mlx5e_rx_reporter_diagnose_common_config(priv, fmsg);
+ mlx5e_rx_reporter_diagnose_rqs(priv, fmsg);
+ mlx5e_rx_reporter_diagnose_rx_res(priv, fmsg);
unlock:
mutex_unlock(&priv->state_lock);
return 0;
@@ -549,25 +652,29 @@ void mlx5e_reporter_icosq_resume_recovery(struct mlx5e_channel *c)
mutex_unlock(&c->icosq_recovery_lock);
}
+#define MLX5E_REPORTER_RX_GRACEFUL_PERIOD 500
+#define MLX5E_REPORTER_RX_BURST_PERIOD 500
+
static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = {
.name = "rx",
.recover = mlx5e_rx_reporter_recover,
.diagnose = mlx5e_rx_reporter_diagnose,
.dump = mlx5e_rx_reporter_dump,
+ .default_graceful_period = MLX5E_REPORTER_RX_GRACEFUL_PERIOD,
+ .default_burst_period = MLX5E_REPORTER_RX_BURST_PERIOD,
};
-#define MLX5E_REPORTER_RX_GRACEFUL_PERIOD 500
-
void mlx5e_reporter_rx_create(struct mlx5e_priv *priv)
{
+ struct devlink_port *port = priv->netdev->devlink_port;
struct devlink_health_reporter *reporter;
- reporter = devlink_port_health_reporter_create(priv->netdev->devlink_port,
+ reporter = devlink_port_health_reporter_create(port,
&mlx5_rx_reporter_ops,
- MLX5E_REPORTER_RX_GRACEFUL_PERIOD, priv);
+ priv);
if (IS_ERR(reporter)) {
- netdev_warn(priv->netdev, "Failed to create rx reporter, err = %ld\n",
- PTR_ERR(reporter));
+ netdev_warn(priv->netdev, "Failed to create rx reporter, err = %pe\n",
+ reporter);
return;
}
priv->rx_reporter = reporter;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 09433b91be17..9e2cf191ed30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -13,10 +13,8 @@ static const char * const sq_sw_state_type_name[] = {
[MLX5E_SQ_STATE_RECOVERING] = "recovering",
[MLX5E_SQ_STATE_IPSEC] = "ipsec",
[MLX5E_SQ_STATE_DIM] = "dim",
- [MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE] = "vlan_need_l2_inline",
[MLX5E_SQ_STATE_PENDING_XSK_TX] = "pending_xsk_tx",
[MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC] = "pending_tls_rx_resync",
- [MLX5E_SQ_STATE_XDP_MULTIBUF] = "xdp_multibuf",
};
static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
@@ -108,9 +106,7 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
mlx5e_reset_txqsq_cc_pc(sq);
sq->stats->recover++;
clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
- rtnl_lock();
mlx5e_activate_txqsq(sq);
- rtnl_unlock();
if (sq->channel)
mlx5e_trigger_napi_icosq(sq->channel);
@@ -184,16 +180,12 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
carrier_ok = netif_carrier_ok(netdev);
netif_carrier_off(netdev);
- rtnl_lock();
mlx5e_deactivate_priv_channels(priv);
- rtnl_unlock();
mlx5e_ptp_close(chs->ptp);
err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
- rtnl_lock();
mlx5e_activate_priv_channels(priv);
- rtnl_unlock();
/* return carrier back if needed */
if (carrier_ok)
@@ -319,6 +311,30 @@ out:
mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
+static void
+mlx5e_tx_reporter_diagnose_tis_config(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+ u8 num_tc = mlx5e_get_dcb_num_tc(&priv->channels.params);
+ u32 tc, i, tisn;
+
+ devlink_fmsg_arr_pair_nest_start(fmsg, "TIS Config");
+ for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) {
+ for (tc = 0; tc < num_tc; tc++) {
+ tisn = mlx5e_profile_get_tisn(priv->mdev, priv,
+ priv->profile, i, tc);
+
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_u32_pair_put(fmsg, "lag port", i);
+ devlink_fmsg_u32_pair_put(fmsg, "tc", tc);
+ devlink_fmsg_u32_pair_put(fmsg, "tisn", tisn);
+ devlink_fmsg_obj_nest_end(fmsg);
+ }
+ }
+ devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg,
struct netlink_ext_ack *extack)
@@ -334,6 +350,7 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
goto unlock;
mlx5e_tx_reporter_diagnose_common_config(reporter, fmsg);
+ mlx5e_tx_reporter_diagnose_tis_config(reporter, fmsg);
devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
for (i = 0; i < priv->channels.num; i++) {
@@ -522,26 +539,30 @@ void mlx5e_reporter_tx_ptpsq_unhealthy(struct mlx5e_ptpsq *ptpsq)
mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
}
+#define MLX5E_REPORTER_TX_GRACEFUL_PERIOD 500
+#define MLX5E_REPORTER_TX_BURST_PERIOD 500
+
static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
.name = "tx",
.recover = mlx5e_tx_reporter_recover,
.diagnose = mlx5e_tx_reporter_diagnose,
.dump = mlx5e_tx_reporter_dump,
+ .default_graceful_period = MLX5E_REPORTER_TX_GRACEFUL_PERIOD,
+ .default_burst_period = MLX5E_REPORTER_TX_BURST_PERIOD,
};
-#define MLX5_REPORTER_TX_GRACEFUL_PERIOD 500
-
void mlx5e_reporter_tx_create(struct mlx5e_priv *priv)
{
+ struct devlink_port *port = priv->netdev->devlink_port;
struct devlink_health_reporter *reporter;
- reporter = devlink_port_health_reporter_create(priv->netdev->devlink_port,
+ reporter = devlink_port_health_reporter_create(port,
&mlx5_tx_reporter_ops,
- MLX5_REPORTER_TX_GRACEFUL_PERIOD, priv);
+ priv);
if (IS_ERR(reporter)) {
netdev_warn(priv->netdev,
- "Failed to create tx reporter, err = %ld\n",
- PTR_ERR(reporter));
+ "Failed to create tx reporter, err = %pe\n",
+ reporter);
return;
}
priv->tx_reporter = reporter;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
index 5f742f896600..88b0e1050d1a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
@@ -75,18 +75,22 @@ struct mlx5e_rss {
struct mlx5e_tir *inner_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_rqt rqt;
struct mlx5_core_dev *mdev; /* primary */
- u32 drop_rqn;
- bool inner_ft_support;
+ struct mlx5e_rss_params params;
bool enabled;
refcount_t refcnt;
};
+bool mlx5e_rss_get_inner_ft_support(struct mlx5e_rss *rss)
+{
+ return rss->params.inner_ft_support;
+}
+
void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_channels)
{
rss->indir.actual_table_size = mlx5e_rqt_size(rss->mdev, num_channels);
}
-int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir, struct mlx5_core_dev *mdev,
+int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir,
u32 actual_table_size, u32 max_table_size)
{
indir->table = kvmalloc_array(max_table_size, sizeof(*indir->table), GFP_KERNEL);
@@ -134,7 +138,8 @@ static struct mlx5e_rss *mlx5e_rss_init_copy(const struct mlx5e_rss *from)
if (!rss)
return ERR_PTR(-ENOMEM);
- err = mlx5e_rss_params_indir_init(&rss->indir, from->mdev, from->indir.actual_table_size,
+ err = mlx5e_rss_params_indir_init(&rss->indir,
+ from->indir.actual_table_size,
from->indir.max_table_size);
if (err)
goto err_free_rss;
@@ -156,6 +161,7 @@ static void mlx5e_rss_params_init(struct mlx5e_rss *rss)
{
enum mlx5_traffic_types tt;
+ rss->hash.symmetric = true;
rss->hash.hfunc = ETH_RSS_HASH_TOP;
netdev_rss_key_fill(rss->hash.toeplitz_hash_key,
sizeof(rss->hash.toeplitz_hash_key));
@@ -186,11 +192,12 @@ mlx5e_rss_get_tt_config(struct mlx5e_rss *rss, enum mlx5_traffic_types tt)
return rss_tt;
}
-static int mlx5e_rss_create_tir(struct mlx5e_rss *rss,
- enum mlx5_traffic_types tt,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
- bool inner)
+static int
+mlx5e_rss_create_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
+ const struct mlx5e_packet_merge_param *pkt_merge_param,
+ bool inner)
{
+ bool rss_inner = rss->params.inner_ft_support;
struct mlx5e_rss_params_traffic_type rss_tt;
struct mlx5e_tir_builder *builder;
struct mlx5e_tir **tir_p;
@@ -198,7 +205,7 @@ static int mlx5e_rss_create_tir(struct mlx5e_rss *rss,
u32 rqtn;
int err;
- if (inner && !rss->inner_ft_support) {
+ if (inner && !rss_inner) {
mlx5e_rss_warn(rss->mdev,
"Cannot create inner indirect TIR[%d], RSS inner FT is not supported.\n",
tt);
@@ -221,9 +228,11 @@ static int mlx5e_rss_create_tir(struct mlx5e_rss *rss,
rqtn = mlx5e_rqt_get_rqtn(&rss->rqt);
mlx5e_tir_builder_build_rqt(builder, rss->mdev->mlx5e_res.hw_objs.td.tdn,
- rqtn, rss->inner_ft_support);
- mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
+ rqtn, rss_inner);
+ mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
rss_tt = mlx5e_rss_get_tt_config(rss, tt);
+ mlx5e_tir_builder_build_self_lb_block(builder, rss->params.self_lb_blk,
+ rss->params.self_lb_blk);
mlx5e_tir_builder_build_rss(builder, &rss->hash, &rss_tt, inner);
err = mlx5e_tir_init(tir, builder, rss->mdev, true);
@@ -258,15 +267,16 @@ static void mlx5e_rss_destroy_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types
*tir_p = NULL;
}
-static int mlx5e_rss_create_tirs(struct mlx5e_rss *rss,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
- bool inner)
+static int
+mlx5e_rss_create_tirs(struct mlx5e_rss *rss,
+ const struct mlx5e_packet_merge_param *pkt_merge_param,
+ bool inner)
{
enum mlx5_traffic_types tt, max_tt;
int err;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
- err = mlx5e_rss_create_tir(rss, tt, init_pkt_merge_param, inner);
+ err = mlx5e_rss_create_tir(rss, tt, pkt_merge_param, inner);
if (err)
goto err_destroy_tirs;
}
@@ -329,7 +339,7 @@ static int mlx5e_rss_update_tirs(struct mlx5e_rss *rss)
tt, err);
}
- if (!rss->inner_ft_support)
+ if (!rss->params.inner_ft_support)
continue;
err = mlx5e_rss_update_tir(rss, tt, true);
@@ -349,14 +359,16 @@ static int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss)
refcount_set(&rss->refcnt, 1);
return mlx5e_rqt_init_direct(&rss->rqt, rss->mdev, true,
- rss->drop_rqn, rss->indir.max_table_size);
+ rss->params.drop_rqn,
+ rss->indir.max_table_size);
}
-struct mlx5e_rss *mlx5e_rss_init(struct mlx5_core_dev *mdev, bool inner_ft_support, u32 drop_rqn,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
- enum mlx5e_rss_init_type type, unsigned int nch,
- unsigned int max_nch)
+struct mlx5e_rss *
+mlx5e_rss_init(struct mlx5_core_dev *mdev,
+ const struct mlx5e_rss_params *params,
+ const struct mlx5e_rss_init_params *init_params)
{
+ u32 rqt_max_size, rqt_size;
struct mlx5e_rss *rss;
int err;
@@ -364,29 +376,31 @@ struct mlx5e_rss *mlx5e_rss_init(struct mlx5_core_dev *mdev, bool inner_ft_suppo
if (!rss)
return ERR_PTR(-ENOMEM);
- err = mlx5e_rss_params_indir_init(&rss->indir, mdev,
- mlx5e_rqt_size(mdev, nch),
- mlx5e_rqt_size(mdev, max_nch));
+ rqt_size = mlx5e_rqt_size(mdev, init_params->nch);
+ rqt_max_size = mlx5e_rqt_size(mdev, init_params->max_nch);
+ err = mlx5e_rss_params_indir_init(&rss->indir, rqt_size, rqt_max_size);
if (err)
goto err_free_rss;
rss->mdev = mdev;
- rss->inner_ft_support = inner_ft_support;
- rss->drop_rqn = drop_rqn;
+ rss->params = *params;
err = mlx5e_rss_init_no_tirs(rss);
if (err)
goto err_free_indir;
- if (type == MLX5E_RSS_INIT_NO_TIRS)
+ if (init_params->type == MLX5E_RSS_INIT_NO_TIRS)
goto out;
- err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, false);
+ err = mlx5e_rss_create_tirs(rss, init_params->pkt_merge_param,
+ false);
if (err)
goto err_destroy_rqt;
- if (inner_ft_support) {
- err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, true);
+ if (params->inner_ft_support) {
+ err = mlx5e_rss_create_tirs(rss,
+ init_params->pkt_merge_param,
+ true);
if (err)
goto err_destroy_tirs;
}
@@ -412,7 +426,7 @@ int mlx5e_rss_cleanup(struct mlx5e_rss *rss)
mlx5e_rss_destroy_tirs(rss, false);
- if (rss->inner_ft_support)
+ if (rss->params.inner_ft_support)
mlx5e_rss_destroy_tirs(rss, true);
mlx5e_rqt_destroy(&rss->rqt);
@@ -442,20 +456,30 @@ u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
{
struct mlx5e_tir *tir;
- WARN_ON(inner && !rss->inner_ft_support);
+ WARN_ON(inner && !rss->params.inner_ft_support);
tir = rss_get_tir(rss, tt, inner);
WARN_ON(!tir);
return mlx5e_tir_get_tirn(tir);
}
+u32 mlx5e_rss_get_rqtn(struct mlx5e_rss *rss)
+{
+ return mlx5e_rqt_get_rqtn(&rss->rqt);
+}
+
+bool mlx5e_rss_valid_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, bool inner)
+{
+ return !!rss_get_tir(rss, tt, inner);
+}
+
/* Fill the "tirn" output parameter.
* Create the requested TIR if it's its first usage.
*/
-int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
- enum mlx5_traffic_types tt,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
- bool inner, u32 *tirn)
+int
+mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
+ const struct mlx5e_packet_merge_param *pkt_merge_param,
+ bool inner, u32 *tirn)
{
struct mlx5e_tir *tir;
@@ -463,7 +487,7 @@ int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
if (!tir) { /* TIR doesn't exist, create one */
int err;
- err = mlx5e_rss_create_tir(rss, tt, init_pkt_merge_param, inner);
+ err = mlx5e_rss_create_tir(rss, tt, pkt_merge_param, inner);
if (err)
return err;
tir = rss_get_tir(rss, tt, inner);
@@ -496,10 +520,11 @@ void mlx5e_rss_disable(struct mlx5e_rss *rss)
int err;
rss->enabled = false;
- err = mlx5e_rqt_redirect_direct(&rss->rqt, rss->drop_rqn, NULL);
+ err = mlx5e_rqt_redirect_direct(&rss->rqt, rss->params.drop_rqn, NULL);
if (err)
mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to drop RQ %#x: err = %d\n",
- mlx5e_rqt_get_rqtn(&rss->rqt), rss->drop_rqn, err);
+ mlx5e_rqt_get_rqtn(&rss->rqt),
+ rss->params.drop_rqn, err);
}
int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
@@ -532,7 +557,7 @@ int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
}
inner_tir:
- if (!rss->inner_ft_support)
+ if (!rss->params.inner_ft_support)
continue;
tir = rss_get_tir(rss, tt, true);
@@ -551,7 +576,8 @@ inner_tir:
return final_err;
}
-int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc)
+void mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc,
+ bool *symmetric)
{
if (indir)
memcpy(indir, rss->indir.table,
@@ -564,11 +590,12 @@ int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc)
if (hfunc)
*hfunc = rss->hash.hfunc;
- return 0;
+ if (symmetric)
+ *symmetric = rss->hash.symmetric;
}
int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
- const u8 *key, const u8 *hfunc,
+ const u8 *key, const u8 *hfunc, const bool *symmetric,
u32 *rqns, u32 *vhca_ids, unsigned int num_rqns)
{
bool changed_indir = false;
@@ -608,6 +635,11 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
rss->indir.actual_table_size * sizeof(*rss->indir.table));
}
+ if (symmetric) {
+ rss->hash.symmetric = *symmetric;
+ changed_hash = true;
+ }
+
if (changed_indir && rss->enabled) {
err = mlx5e_rss_apply(rss, rqns, vhca_ids, num_rqns);
if (err) {
@@ -658,7 +690,7 @@ int mlx5e_rss_set_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
return err;
}
- if (!(rss->inner_ft_support))
+ if (!(rss->params.inner_ft_support))
return 0;
err = mlx5e_rss_update_tir(rss, tt, true);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
index d0df98963c8d..17664757a561 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
@@ -13,40 +13,57 @@ enum mlx5e_rss_init_type {
MLX5E_RSS_INIT_TIRS
};
+struct mlx5e_rss_init_params {
+ enum mlx5e_rss_init_type type;
+ const struct mlx5e_packet_merge_param *pkt_merge_param;
+ unsigned int nch;
+ unsigned int max_nch;
+};
+
+struct mlx5e_rss_params {
+ bool inner_ft_support;
+ u32 drop_rqn;
+ bool self_lb_blk;
+};
+
struct mlx5e_rss_params_traffic_type
mlx5e_rss_get_default_tt_config(enum mlx5_traffic_types tt);
struct mlx5e_rss;
-int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir, struct mlx5_core_dev *mdev,
+int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir,
u32 actual_table_size, u32 max_table_size);
void mlx5e_rss_params_indir_cleanup(struct mlx5e_rss_params_indir *indir);
void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_channels);
-struct mlx5e_rss *mlx5e_rss_init(struct mlx5_core_dev *mdev, bool inner_ft_support, u32 drop_rqn,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
- enum mlx5e_rss_init_type type, unsigned int nch,
- unsigned int max_nch);
+struct mlx5e_rss *
+mlx5e_rss_init(struct mlx5_core_dev *mdev,
+ const struct mlx5e_rss_params *params,
+ const struct mlx5e_rss_init_params *init_params);
int mlx5e_rss_cleanup(struct mlx5e_rss *rss);
void mlx5e_rss_refcnt_inc(struct mlx5e_rss *rss);
void mlx5e_rss_refcnt_dec(struct mlx5e_rss *rss);
unsigned int mlx5e_rss_refcnt_read(struct mlx5e_rss *rss);
+bool mlx5e_rss_get_inner_ft_support(struct mlx5e_rss *rss);
u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
bool inner);
-int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
- enum mlx5_traffic_types tt,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
- bool inner, u32 *tirn);
+bool mlx5e_rss_valid_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, bool inner);
+u32 mlx5e_rss_get_rqtn(struct mlx5e_rss *rss);
+int
+mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
+ const struct mlx5e_packet_merge_param *pkt_merge_param,
+ bool inner, u32 *tirn);
void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, u32 *vhca_ids, unsigned int num_rqns);
void mlx5e_rss_disable(struct mlx5e_rss *rss);
int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
struct mlx5e_packet_merge_param *pkt_merge_param);
-int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc);
+void mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc,
+ bool *symmetric);
int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
- const u8 *key, const u8 *hfunc,
+ const u8 *key, const u8 *hfunc, const bool *symmetric,
u32 *rqns, u32 *vhca_ids, unsigned int num_rqns);
struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss);
u8 mlx5e_rss_get_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index a86eade9a9e0..55c117b7d8c4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -5,8 +5,6 @@
#include "channels.h"
#include "params.h"
-#define MLX5E_MAX_NUM_RSS 16
-
struct mlx5e_rx_res {
struct mlx5_core_dev *mdev; /* primary */
enum mlx5e_rx_res_features features;
@@ -56,51 +54,74 @@ static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
unsigned int init_nch)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
+ struct mlx5e_rss_init_params init_params;
+ struct mlx5e_rss_params rss_params;
struct mlx5e_rss *rss;
if (WARN_ON(res->rss[0]))
return -EINVAL;
- rss = mlx5e_rss_init(res->mdev, inner_ft_support, res->drop_rqn,
- &res->pkt_merge_param, MLX5E_RSS_INIT_TIRS, init_nch, res->max_nch);
+ init_params = (struct mlx5e_rss_init_params) {
+ .type = MLX5E_RSS_INIT_TIRS,
+ .pkt_merge_param = &res->pkt_merge_param,
+ .nch = init_nch,
+ .max_nch = res->max_nch,
+ };
+
+ rss_params = (struct mlx5e_rss_params) {
+ .inner_ft_support = inner_ft_support,
+ .drop_rqn = res->drop_rqn,
+ .self_lb_blk =
+ res->features & MLX5E_RX_RES_FEATURE_SELF_LB_BLOCK,
+ };
+
+ rss = mlx5e_rss_init(res->mdev, &rss_params, &init_params);
if (IS_ERR(rss))
return PTR_ERR(rss);
- mlx5e_rss_set_indir_uniform(rss, init_nch);
+ mlx5e_rss_set_indir_uniform(rss, init_params.nch);
res->rss[0] = rss;
return 0;
}
-int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch)
+int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 rss_idx, unsigned int init_nch)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
+ struct mlx5e_rss_init_params init_params;
+ struct mlx5e_rss_params rss_params;
struct mlx5e_rss *rss;
- int i;
- for (i = 1; i < MLX5E_MAX_NUM_RSS; i++)
- if (!res->rss[i])
- break;
-
- if (i == MLX5E_MAX_NUM_RSS)
+ if (WARN_ON_ONCE(res->rss[rss_idx]))
return -ENOSPC;
- rss = mlx5e_rss_init(res->mdev, inner_ft_support, res->drop_rqn,
- &res->pkt_merge_param, MLX5E_RSS_INIT_NO_TIRS, init_nch,
- res->max_nch);
+ init_params = (struct mlx5e_rss_init_params) {
+ .type = MLX5E_RSS_INIT_NO_TIRS,
+ .pkt_merge_param = &res->pkt_merge_param,
+ .nch = init_nch,
+ .max_nch = res->max_nch,
+ };
+
+ rss_params = (struct mlx5e_rss_params) {
+ .inner_ft_support = inner_ft_support,
+ .drop_rqn = res->drop_rqn,
+ .self_lb_blk =
+ res->features & MLX5E_RX_RES_FEATURE_SELF_LB_BLOCK,
+ };
+
+ rss = mlx5e_rss_init(res->mdev, &rss_params, &init_params);
if (IS_ERR(rss))
return PTR_ERR(rss);
- mlx5e_rss_set_indir_uniform(rss, init_nch);
+ mlx5e_rss_set_indir_uniform(rss, init_params.nch);
if (res->rss_active) {
u32 *vhca_ids = get_vhca_ids(res, 0);
mlx5e_rss_enable(rss, res->rss_rqns, vhca_ids, res->rss_nch);
}
- res->rss[i] = rss;
- *rss_idx = i;
+ res->rss[rss_idx] = rss;
return 0;
}
@@ -195,23 +216,22 @@ void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int n
mlx5e_rss_set_indir_uniform(res->rss[0], nch);
}
-int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
- u32 *indir, u8 *key, u8 *hfunc)
+void mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
+ u32 *indir, u8 *key, u8 *hfunc, bool *symmetric)
{
- struct mlx5e_rss *rss;
+ struct mlx5e_rss *rss = NULL;
- if (rss_idx >= MLX5E_MAX_NUM_RSS)
- return -EINVAL;
+ if (rss_idx < MLX5E_MAX_NUM_RSS)
+ rss = res->rss[rss_idx];
+ if (WARN_ON_ONCE(!rss))
+ return;
- rss = res->rss[rss_idx];
- if (!rss)
- return -ENOENT;
-
- return mlx5e_rss_get_rxfh(rss, indir, key, hfunc);
+ mlx5e_rss_get_rxfh(rss, indir, key, hfunc, symmetric);
}
int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
- const u32 *indir, const u8 *key, const u8 *hfunc)
+ const u32 *indir, const u8 *key, const u8 *hfunc,
+ const bool *symmetric)
{
u32 *vhca_ids = get_vhca_ids(res, 0);
struct mlx5e_rss *rss;
@@ -223,8 +243,8 @@ int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
if (!rss)
return -ENOENT;
- return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, res->rss_rqns, vhca_ids,
- res->rss_nch);
+ return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, symmetric,
+ res->rss_rqns, vhca_ids, res->rss_nch);
}
int mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
@@ -330,6 +350,7 @@ static struct mlx5e_rx_res *mlx5e_rx_res_alloc(struct mlx5_core_dev *mdev, unsig
static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
+ bool self_lb_blk = res->features & MLX5E_RX_RES_FEATURE_SELF_LB_BLOCK;
struct mlx5e_tir_builder *builder;
int err = 0;
int ix;
@@ -360,6 +381,8 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res)
mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
inner_ft_support);
mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
+ mlx5e_tir_builder_build_self_lb_block(builder, self_lb_blk,
+ self_lb_blk);
mlx5e_tir_builder_build_direct(builder);
err = mlx5e_tir_init(&res->channels[ix].direct_tir, builder, res->mdev, true);
@@ -447,7 +470,7 @@ static void mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res *res)
struct mlx5e_rx_res *
mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features features,
unsigned int max_nch, u32 drop_rqn,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
+ const struct mlx5e_packet_merge_param *pkt_merge_param,
unsigned int init_nch)
{
bool multi_vhca = features & MLX5E_RX_RES_FEATURE_MULTI_VHCA;
@@ -463,7 +486,7 @@ mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features featu
res->max_nch = max_nch;
res->drop_rqn = drop_rqn;
- res->pkt_merge_param = *init_pkt_merge_param;
+ res->pkt_merge_param = *pkt_merge_param;
init_rwsem(&res->pkt_merge_param_sem);
err = mlx5e_rx_res_rss_init_def(res, init_nch);
@@ -497,6 +520,11 @@ void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res)
mlx5e_rx_res_free(res);
}
+unsigned int mlx5e_rx_res_get_max_nch(struct mlx5e_rx_res *res)
+{
+ return res->max_nch;
+}
+
u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix)
{
return mlx5e_tir_get_tirn(&res->channels[ix].direct_tir);
@@ -522,7 +550,7 @@ u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res)
return mlx5e_tir_get_tirn(&res->ptp.tir);
}
-static u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
+u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
{
return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt);
}
@@ -575,8 +603,6 @@ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_chann
for (ix = 0; ix < nch; ix++)
mlx5e_rx_res_channel_activate_direct(res, chs, ix);
- for (ix = nch; ix < res->max_nch; ix++)
- mlx5e_rx_res_channel_deactivate_direct(res, ix);
if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
u32 rqn;
@@ -599,7 +625,7 @@ void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res)
mlx5e_rx_res_rss_disable(res);
- for (ix = 0; ix < res->max_nch; ix++)
+ for (ix = 0; ix < res->rss_nch; ix++)
mlx5e_rx_res_channel_deactivate_direct(res, ix);
if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
index 7b1a9f0f1874..675780120a20 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
@@ -10,6 +10,8 @@
#include "fs.h"
#include "rss.h"
+#define MLX5E_MAX_NUM_RSS 16
+
struct mlx5e_rx_res;
struct mlx5e_channels;
@@ -19,13 +21,14 @@ enum mlx5e_rx_res_features {
MLX5E_RX_RES_FEATURE_INNER_FT = BIT(0),
MLX5E_RX_RES_FEATURE_PTP = BIT(1),
MLX5E_RX_RES_FEATURE_MULTI_VHCA = BIT(2),
+ MLX5E_RX_RES_FEATURE_SELF_LB_BLOCK = BIT(3),
};
/* Setup */
struct mlx5e_rx_res *
mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features features,
unsigned int max_nch, u32 drop_rqn,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
+ const struct mlx5e_packet_merge_param *pkt_merge_param,
unsigned int init_nch);
void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res);
@@ -34,6 +37,9 @@ u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix);
u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res);
+u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix);
+unsigned int mlx5e_rx_res_get_max_nch(struct mlx5e_rx_res *res);
+bool mlx5_rx_res_rss_inner_ft_support(struct mlx5e_rx_res *res);
/* Activate/deactivate API */
void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs);
@@ -43,10 +49,12 @@ void mlx5e_rx_res_xsk_update(struct mlx5e_rx_res *res, struct mlx5e_channels *ch
/* Configuration API */
void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int nch);
-int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
- u32 *indir, u8 *key, u8 *hfunc);
+void mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
+ u32 *indir, u8 *key, u8 *hfunc,
+ bool *symmetric);
int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
- const u32 *indir, const u8 *key, const u8 *hfunc);
+ const u32 *indir, const u8 *key, const u8 *hfunc,
+ const bool *symmetric);
int mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
enum mlx5_traffic_types tt);
@@ -55,7 +63,7 @@ int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
struct mlx5e_packet_merge_param *pkt_merge_param);
-int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch);
+int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 rss_idx, unsigned int init_nch);
int mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx);
int mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res *res);
int mlx5e_rx_res_rss_index(struct mlx5e_rx_res *res, struct mlx5e_rss *rss);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
index d6c12d0ea55b..2e528b2c34d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
@@ -73,11 +73,6 @@ struct mlx5e_tc_act {
bool is_terminating_action;
};
-struct mlx5e_tc_flow_action {
- unsigned int num_entries;
- struct flow_action_entry **entries;
-};
-
extern struct mlx5e_tc_act mlx5e_tc_act_drop;
extern struct mlx5e_tc_act mlx5e_tc_act_trap;
extern struct mlx5e_tc_act mlx5e_tc_act_accept;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
index feeb41693c17..b6cabe829f19 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
@@ -5,6 +5,16 @@
#include "en/tc_priv.h"
#include "en/tc_ct.h"
+static bool
+tc_act_can_offload_ct(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index,
+ struct mlx5_flow_attr *attr)
+{
+ return !((act->ct.action & TCA_CT_ACT_COMMIT) &&
+ flow_action_is_last_entry(parse_state->flow_action, act));
+}
+
static int
tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
@@ -56,6 +66,7 @@ tc_act_is_missable_ct(const struct flow_action_entry *act)
}
struct mlx5e_tc_act mlx5e_tc_act_ct = {
+ .can_offload = tc_act_can_offload_ct,
.parse_action = tc_act_parse_ct,
.post_parse = tc_act_post_parse_ct,
.is_multi_table_act = tc_act_is_multi_table_act_ct,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
index a13c5e707b83..9bdb5820c553 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
@@ -94,29 +94,30 @@ mlx5e_tc_act_vlan_add_push_action(struct mlx5e_priv *priv,
struct net_device **out_dev,
struct netlink_ext_ack *extack)
{
- struct net_device *vlan_dev = *out_dev;
- struct flow_action_entry vlan_act = {
- .id = FLOW_ACTION_VLAN_PUSH,
- .vlan.vid = vlan_dev_vlan_id(vlan_dev),
- .vlan.proto = vlan_dev_vlan_proto(vlan_dev),
- .vlan.prio = 0,
- };
- int err;
-
- err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action, extack, NULL);
- if (err)
- return err;
-
- rcu_read_lock();
- *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), dev_get_iflink(vlan_dev));
- rcu_read_unlock();
- if (!*out_dev)
- return -ENODEV;
+ do {
+ struct net_device *vlan_dev = *out_dev;
+ struct flow_action_entry vlan_act = {
+ .id = FLOW_ACTION_VLAN_PUSH,
+ .vlan.vid = vlan_dev_vlan_id(vlan_dev),
+ .vlan.proto = vlan_dev_vlan_proto(vlan_dev),
+ .vlan.prio = 0,
+ };
+ int err;
+
+ err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr,
+ &attr->action, extack, NULL);
+ if (err)
+ return err;
- if (is_vlan_dev(*out_dev))
- err = mlx5e_tc_act_vlan_add_push_action(priv, attr, out_dev, extack);
+ rcu_read_lock();
+ *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
+ dev_get_iflink(vlan_dev));
+ rcu_read_unlock();
+ if (!*out_dev)
+ return -ENODEV;
+ } while (is_vlan_dev(*out_dev));
- return err;
+ return 0;
}
int
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
index 62b3f7ff5562..e5b30801314b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
@@ -48,4 +48,14 @@ mlx5_ct_fs_smfs_ops_get(void)
}
#endif /* IS_ENABLED(CONFIG_MLX5_SW_STEERING) */
+#if IS_ENABLED(CONFIG_MLX5_HW_STEERING)
+struct mlx5_ct_fs_ops *mlx5_ct_fs_hmfs_ops_get(void);
+#else
+static inline struct mlx5_ct_fs_ops *
+mlx5_ct_fs_hmfs_ops_get(void)
+{
+ return NULL;
+}
+#endif /* IS_ENABLED(CONFIG_MLX5_SW_STEERING) */
+
#endif /* __MLX5_EN_TC_CT_FS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c
new file mode 100644
index 000000000000..d3db6146fcad
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. */
+
+#include "en_tc.h"
+#include "en/tc_ct.h"
+#include "en/tc_priv.h"
+#include "en/tc/ct_fs.h"
+#include "fs_core.h"
+#include "steering/hws/fs_hws_pools.h"
+#include "steering/hws/mlx5hws.h"
+#include "steering/hws/table.h"
+
+struct mlx5_ct_fs_hmfs_matcher {
+ struct mlx5hws_bwc_matcher *hws_bwc_matcher;
+ refcount_t ref;
+};
+
+/* We need {ipv4, ipv6} x {tcp, udp, gre} matchers. */
+#define NUM_MATCHERS (2 * 3)
+
+struct mlx5_ct_fs_hmfs {
+ struct mlx5hws_table *ct_tbl;
+ struct mlx5hws_table *ct_nat_tbl;
+ struct mlx5_flow_table *ct_nat;
+ struct mlx5hws_action *fwd_action;
+ struct mlx5hws_action *last_action;
+ struct mlx5hws_context *ctx;
+ struct mutex lock; /* Guards matchers */
+ struct mlx5_ct_fs_hmfs_matcher matchers[NUM_MATCHERS];
+ struct mlx5_ct_fs_hmfs_matcher matchers_nat[NUM_MATCHERS];
+};
+
+struct mlx5_ct_fs_hmfs_rule {
+ struct mlx5_ct_fs_rule fs_rule;
+ struct mlx5hws_bwc_rule *hws_bwc_rule;
+ struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher;
+ struct mlx5_fc *counter;
+};
+
+static u32 get_matcher_idx(bool ipv4, bool tcp, bool gre)
+{
+ return ipv4 * 3 + tcp * 2 + gre;
+}
+
+static int mlx5_ct_fs_hmfs_init(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct,
+ struct mlx5_flow_table *ct_nat, struct mlx5_flow_table *post_ct)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5hws_table *ct_tbl, *ct_nat_tbl, *post_ct_tbl;
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+
+ ct_tbl = ct->fs_hws_table.hws_table;
+ ct_nat_tbl = ct_nat->fs_hws_table.hws_table;
+ post_ct_tbl = post_ct->fs_hws_table.hws_table;
+ fs_hmfs->ct_nat = ct_nat;
+
+ if (!ct_tbl || !ct_nat_tbl || !post_ct_tbl) {
+ netdev_warn(fs->netdev, "ct_fs_hmfs: failed to init, missing backing hws tables");
+ return -EOPNOTSUPP;
+ }
+
+ netdev_dbg(fs->netdev, "using hmfs steering");
+
+ fs_hmfs->ct_tbl = ct_tbl;
+ fs_hmfs->ct_nat_tbl = ct_nat_tbl;
+ fs_hmfs->ctx = ct_tbl->ctx;
+ mutex_init(&fs_hmfs->lock);
+
+ fs_hmfs->fwd_action = mlx5hws_action_create_dest_table(ct_tbl->ctx, post_ct_tbl, flags);
+ if (!fs_hmfs->fwd_action) {
+ netdev_warn(fs->netdev, "ct_fs_hmfs: failed to create fwd action\n");
+ return -EINVAL;
+ }
+ fs_hmfs->last_action = mlx5hws_action_create_last(ct_tbl->ctx, flags);
+ if (!fs_hmfs->last_action) {
+ netdev_warn(fs->netdev, "ct_fs_hmfs: failed to create last action\n");
+ mlx5hws_action_destroy(fs_hmfs->fwd_action);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void mlx5_ct_fs_hmfs_destroy(struct mlx5_ct_fs *fs)
+{
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+
+ mlx5hws_action_destroy(fs_hmfs->last_action);
+ mlx5hws_action_destroy(fs_hmfs->fwd_action);
+}
+
+static struct mlx5hws_bwc_matcher *
+mlx5_ct_fs_hmfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5hws_table *tbl,
+ struct mlx5_flow_spec *spec, bool ipv4, bool tcp, bool gre)
+{
+ u8 match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS;
+ struct mlx5hws_match_parameters mask = {
+ .match_buf = spec->match_criteria,
+ .match_sz = sizeof(spec->match_criteria),
+ };
+ u32 priority = get_matcher_idx(ipv4, tcp, gre); /* Static priority based on params. */
+ struct mlx5hws_bwc_matcher *hws_bwc_matcher;
+
+ hws_bwc_matcher = mlx5hws_bwc_matcher_create(tbl, priority, match_criteria_enable, &mask);
+ if (!hws_bwc_matcher)
+ return ERR_PTR(-EINVAL);
+
+ return hws_bwc_matcher;
+}
+
+static struct mlx5_ct_fs_hmfs_matcher *
+mlx5_ct_fs_hmfs_matcher_get(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
+ bool nat, bool ipv4, bool tcp, bool gre)
+{
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+ u32 matcher_idx = get_matcher_idx(ipv4, tcp, gre);
+ struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher;
+ struct mlx5hws_bwc_matcher *hws_bwc_matcher;
+ struct mlx5hws_table *tbl;
+
+ hmfs_matcher = nat ?
+ (fs_hmfs->matchers_nat + matcher_idx) :
+ (fs_hmfs->matchers + matcher_idx);
+
+ if (refcount_inc_not_zero(&hmfs_matcher->ref))
+ return hmfs_matcher;
+
+ mutex_lock(&fs_hmfs->lock);
+
+ /* Retry with lock, as the matcher might be already created by another cpu. */
+ if (refcount_inc_not_zero(&hmfs_matcher->ref))
+ goto out_unlock;
+
+ tbl = nat ? fs_hmfs->ct_nat_tbl : fs_hmfs->ct_tbl;
+
+ hws_bwc_matcher = mlx5_ct_fs_hmfs_matcher_create(fs, tbl, spec, ipv4, tcp, gre);
+ if (IS_ERR(hws_bwc_matcher)) {
+ netdev_warn(fs->netdev,
+ "ct_fs_hmfs: failed to create bwc matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %pe\n",
+ nat, ipv4, tcp, gre, hws_bwc_matcher);
+
+ hmfs_matcher = ERR_CAST(hws_bwc_matcher);
+ goto out_unlock;
+ }
+
+ hmfs_matcher->hws_bwc_matcher = hws_bwc_matcher;
+ refcount_set(&hmfs_matcher->ref, 1);
+
+out_unlock:
+ mutex_unlock(&fs_hmfs->lock);
+ return hmfs_matcher;
+}
+
+static void
+mlx5_ct_fs_hmfs_matcher_put(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher)
+{
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+
+ if (!refcount_dec_and_mutex_lock(&hmfs_matcher->ref, &fs_hmfs->lock))
+ return;
+
+ mlx5hws_bwc_matcher_destroy(hmfs_matcher->hws_bwc_matcher);
+ mutex_unlock(&fs_hmfs->lock);
+}
+
+#define NUM_CT_HMFS_RULES 4
+
+static void mlx5_ct_fs_hmfs_fill_rule_actions(struct mlx5_ct_fs_hmfs *fs_hmfs,
+ struct mlx5_flow_attr *attr,
+ struct mlx5hws_rule_action *rule_actions)
+{
+ struct mlx5_fs_hws_action *mh_action = &attr->modify_hdr->fs_hws_action;
+
+ memset(rule_actions, 0, NUM_CT_HMFS_RULES * sizeof(*rule_actions));
+ rule_actions[0].action = mlx5_fc_get_hws_action(fs_hmfs->ctx, attr->counter);
+ rule_actions[0].counter.offset =
+ attr->counter->id - attr->counter->bulk->base_id;
+ /* Modify header is special, it may require extra arguments outside the action itself. */
+ if (mh_action->mh_data) {
+ rule_actions[1].modify_header.offset = mh_action->mh_data->offset;
+ rule_actions[1].modify_header.data = mh_action->mh_data->data;
+ }
+ rule_actions[1].action = mh_action->hws_action;
+ rule_actions[2].action = fs_hmfs->fwd_action;
+ rule_actions[3].action = fs_hmfs->last_action;
+}
+
+static struct mlx5_ct_fs_rule *
+mlx5_ct_fs_hmfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr, struct flow_rule *flow_rule)
+{
+ struct mlx5hws_rule_action rule_actions[NUM_CT_HMFS_RULES];
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+ struct mlx5hws_match_parameters match_params = {
+ .match_buf = spec->match_value,
+ .match_sz = ARRAY_SIZE(spec->match_value),
+ };
+ struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher;
+ struct mlx5_ct_fs_hmfs_rule *hmfs_rule;
+ bool nat, tcp, ipv4, gre;
+ int err;
+
+ if (!mlx5e_tc_ct_is_valid_flow_rule(fs->netdev, flow_rule))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ hmfs_rule = kzalloc(sizeof(*hmfs_rule), GFP_KERNEL);
+ if (!hmfs_rule)
+ return ERR_PTR(-ENOMEM);
+
+ nat = (attr->ft == fs_hmfs->ct_nat);
+ ipv4 = mlx5e_tc_get_ip_version(spec, true) == 4;
+ tcp = MLX5_GET(fte_match_param, spec->match_value,
+ outer_headers.ip_protocol) == IPPROTO_TCP;
+ gre = MLX5_GET(fte_match_param, spec->match_value,
+ outer_headers.ip_protocol) == IPPROTO_GRE;
+
+ hmfs_matcher = mlx5_ct_fs_hmfs_matcher_get(fs, spec, nat, ipv4, tcp, gre);
+ if (IS_ERR(hmfs_matcher)) {
+ err = PTR_ERR(hmfs_matcher);
+ goto err_free_rule;
+ }
+ hmfs_rule->hmfs_matcher = hmfs_matcher;
+
+ mlx5_ct_fs_hmfs_fill_rule_actions(fs_hmfs, attr, rule_actions);
+ hmfs_rule->counter = attr->counter;
+
+ hmfs_rule->hws_bwc_rule =
+ mlx5hws_bwc_rule_create(hmfs_matcher->hws_bwc_matcher, &match_params,
+ spec->flow_context.flow_source, rule_actions);
+ if (!hmfs_rule->hws_bwc_rule) {
+ err = -EINVAL;
+ goto err_put_matcher;
+ }
+
+ return &hmfs_rule->fs_rule;
+
+err_put_matcher:
+ mlx5_fc_put_hws_action(hmfs_rule->counter);
+ mlx5_ct_fs_hmfs_matcher_put(fs, hmfs_matcher);
+err_free_rule:
+ kfree(hmfs_rule);
+ return ERR_PTR(err);
+}
+
+static void mlx5_ct_fs_hmfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule)
+{
+ struct mlx5_ct_fs_hmfs_rule *hmfs_rule = container_of(fs_rule,
+ struct mlx5_ct_fs_hmfs_rule,
+ fs_rule);
+ mlx5hws_bwc_rule_destroy(hmfs_rule->hws_bwc_rule);
+ mlx5_fc_put_hws_action(hmfs_rule->counter);
+ mlx5_ct_fs_hmfs_matcher_put(fs, hmfs_rule->hmfs_matcher);
+ kfree(hmfs_rule);
+}
+
+static int mlx5_ct_fs_hmfs_ct_rule_update(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule,
+ struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr)
+{
+ struct mlx5_ct_fs_hmfs_rule *hmfs_rule = container_of(fs_rule,
+ struct mlx5_ct_fs_hmfs_rule,
+ fs_rule);
+ struct mlx5hws_rule_action rule_actions[NUM_CT_HMFS_RULES];
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+ int err;
+
+ mlx5_ct_fs_hmfs_fill_rule_actions(fs_hmfs, attr, rule_actions);
+
+ err = mlx5hws_bwc_rule_action_update(hmfs_rule->hws_bwc_rule, rule_actions);
+ if (err) {
+ mlx5_fc_put_hws_action(attr->counter);
+ return err;
+ }
+
+ mlx5_fc_put_hws_action(hmfs_rule->counter);
+ hmfs_rule->counter = attr->counter;
+
+ return 0;
+}
+
+static struct mlx5_ct_fs_ops hmfs_ops = {
+ .ct_rule_add = mlx5_ct_fs_hmfs_ct_rule_add,
+ .ct_rule_del = mlx5_ct_fs_hmfs_ct_rule_del,
+ .ct_rule_update = mlx5_ct_fs_hmfs_ct_rule_update,
+
+ .init = mlx5_ct_fs_hmfs_init,
+ .destroy = mlx5_ct_fs_hmfs_destroy,
+
+ .priv_size = sizeof(struct mlx5_ct_fs_hmfs),
+};
+
+struct mlx5_ct_fs_ops *mlx5_ct_fs_hmfs_ops_get(void)
+{
+ return &hmfs_ops;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
index 45737d039252..4d6924b644c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
@@ -13,7 +13,6 @@
#define INIT_ERR_PREFIX "ct_fs_smfs init failed"
#define ct_dbg(fmt, args...)\
netdev_dbg(fs->netdev, "ct_fs_smfs debug: " fmt "\n", ##args)
-#define MLX5_CT_TCP_FLAGS_MASK cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16)
struct mlx5_ct_fs_smfs_matcher {
struct mlx5dr_matcher *dr_matcher;
@@ -149,8 +148,8 @@ mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp
dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, gre, prio);
if (IS_ERR(dr_matcher)) {
netdev_warn(fs->netdev,
- "ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %ld\n",
- nat, ipv4, tcp, gre, PTR_ERR(dr_matcher));
+ "ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %pe\n",
+ nat, ipv4, tcp, gre, dr_matcher);
smfs_matcher = ERR_CAST(dr_matcher);
goto out_unlock;
@@ -220,78 +219,6 @@ mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs *fs)
mlx5_smfs_action_destroy(fs_smfs->fwd_action);
}
-static inline bool
-mlx5_tc_ct_valid_used_dissector_keys(const u64 used_keys)
-{
-#define DISS_BIT(name) BIT_ULL(FLOW_DISSECTOR_KEY_ ## name)
- const u64 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) |
- DISS_BIT(META);
- const u64 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) |
- DISS_BIT(PORTS) | DISS_BIT(TCP);
- const u64 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) |
- DISS_BIT(PORTS) | DISS_BIT(TCP);
- const u64 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) |
- DISS_BIT(PORTS);
- const u64 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) |
- DISS_BIT(PORTS);
- const u64 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS);
- const u64 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS);
-
- return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp ||
- used_keys == ipv6_udp || used_keys == ipv4_gre || used_keys == ipv6_gre);
-}
-
-static bool
-mlx5_ct_fs_smfs_ct_validate_flow_rule(struct mlx5_ct_fs *fs, struct flow_rule *flow_rule)
-{
- struct flow_match_ipv4_addrs ipv4_addrs;
- struct flow_match_ipv6_addrs ipv6_addrs;
- struct flow_match_control control;
- struct flow_match_basic basic;
- struct flow_match_ports ports;
- struct flow_match_tcp tcp;
-
- if (!mlx5_tc_ct_valid_used_dissector_keys(flow_rule->match.dissector->used_keys)) {
- ct_dbg("rule uses unexpected dissectors (0x%016llx)",
- flow_rule->match.dissector->used_keys);
- return false;
- }
-
- flow_rule_match_basic(flow_rule, &basic);
- flow_rule_match_control(flow_rule, &control);
- flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs);
- flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs);
- if (basic.key->ip_proto != IPPROTO_GRE)
- flow_rule_match_ports(flow_rule, &ports);
- if (basic.key->ip_proto == IPPROTO_TCP)
- flow_rule_match_tcp(flow_rule, &tcp);
-
- if (basic.mask->n_proto != htons(0xFFFF) ||
- (basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) ||
- basic.mask->ip_proto != 0xFF ||
- (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP &&
- basic.key->ip_proto != IPPROTO_GRE)) {
- ct_dbg("rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)",
- ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto),
- basic.key->ip_proto, basic.mask->ip_proto);
- return false;
- }
-
- if (basic.key->ip_proto != IPPROTO_GRE &&
- (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF))) {
- ct_dbg("rule uses ports match (src 0x%04x, dst 0x%04x)",
- ports.mask->src, ports.mask->dst);
- return false;
- }
-
- if (basic.key->ip_proto == IPPROTO_TCP && tcp.mask->flags != MLX5_CT_TCP_FLAGS_MASK) {
- ct_dbg("rule uses unexpected tcp match (flags 0x%02x)", tcp.mask->flags);
- return false;
- }
-
- return true;
-}
-
static struct mlx5_ct_fs_rule *
mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr, struct flow_rule *flow_rule)
@@ -304,7 +231,7 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
int num_actions = 0, err;
bool nat, tcp, ipv4, gre;
- if (!mlx5_ct_fs_smfs_ct_validate_flow_rule(fs, flow_rule))
+ if (!mlx5e_tc_ct_is_valid_flow_rule(fs->netdev, flow_rule))
return ERR_PTR(-EOPNOTSUPP);
smfs_rule = kzalloc(sizeof(*smfs_rule), GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c
index 8afcec0c5d3c..991f47050643 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c
@@ -93,8 +93,8 @@ mlx5e_int_port_create_rx_rule(struct mlx5_eswitch *esw,
flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
&flow_act, dest, 1);
if (IS_ERR(flow_rule))
- mlx5_core_warn(esw->dev, "ft offloads: Failed to add internal vport rx rule err %ld\n",
- PTR_ERR(flow_rule));
+ mlx5_core_warn(esw->dev, "ft offloads: Failed to add internal vport rx rule err %pe\n",
+ flow_rule);
kvfree(spec);
@@ -307,7 +307,8 @@ mlx5e_tc_int_port_init(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_int_port_priv *int_port_priv;
- u64 mapping_id;
+ u8 mapping_id[MLX5_SW_IMAGE_GUID_MAX_BYTES];
+ u8 id_len;
if (!mlx5e_tc_int_port_supported(esw))
return NULL;
@@ -316,14 +317,15 @@ mlx5e_tc_int_port_init(struct mlx5e_priv *priv)
if (!int_port_priv)
return NULL;
- mapping_id = mlx5_query_nic_system_image_guid(priv->mdev);
+ mlx5_query_nic_sw_system_image_guid(priv->mdev, mapping_id, &id_len);
- int_port_priv->metadata_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_INT_PORT,
+ int_port_priv->metadata_mapping = mapping_create_for_id(mapping_id, id_len,
+ MAPPING_TYPE_INT_PORT,
sizeof(u32) * 2,
(1 << ESW_VPORT_BITS) - 1, true);
if (IS_ERR(int_port_priv->metadata_mapping)) {
- mlx5_core_warn(priv->mdev, "Can't allocate metadata mapping of int port offload, err=%ld\n",
- PTR_ERR(int_port_priv->metadata_mapping));
+ mlx5_core_warn(priv->mdev, "Can't allocate metadata mapping of int port offload, err=%pe\n",
+ int_port_priv->metadata_mapping);
goto err_mapping;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
index 8218c892b161..7819fb297280 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
@@ -593,3 +593,8 @@ mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter,
*drops = packets2;
*lastuse = max_t(u64, lastuse1, lastuse2);
}
+
+int mlx5e_flow_meter_get_base_id(struct mlx5e_flow_meter_handle *meter)
+{
+ return meter->meters_obj->base_id;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
index 9b795cd106bb..d6afb6556875 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
@@ -72,4 +72,17 @@ void
mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter,
u64 *bytes, u64 *packets, u64 *drops, u64 *lastuse);
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+
+int mlx5e_flow_meter_get_base_id(struct mlx5e_flow_meter_handle *meter);
+
+#else /* CONFIG_MLX5_CLS_ACT */
+
+static inline int
+mlx5e_flow_meter_get_base_id(struct mlx5e_flow_meter_handle *meter)
+{
+ return 0;
+}
+#endif /* CONFIG_MLX5_CLS_ACT */
+
#endif /* __MLX5_EN_FLOW_METER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 4877a9d86807..fc0e57403d25 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -866,7 +866,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
return 0;
err_rule:
- mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, zone_rule->mh);
+ mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, attr, zone_rule->mh);
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
err_mod_hdr:
kfree(attr);
@@ -1195,6 +1195,7 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
struct flow_action_entry *meta_action;
unsigned long cookie = flow->cookie;
struct mlx5_ct_entry *entry;
+ bool has_nat;
int err;
meta_action = mlx5_tc_ct_get_ct_metadata_action(flow_rule);
@@ -1236,6 +1237,8 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
err = mlx5_tc_ct_rule_to_tuple_nat(&entry->tuple_nat, flow_rule);
if (err)
goto err_set;
+ has_nat = memcmp(&entry->tuple, &entry->tuple_nat,
+ sizeof(entry->tuple));
spin_lock_bh(&ct_priv->ht_lock);
@@ -1244,7 +1247,7 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
if (err)
goto err_entries;
- if (memcmp(&entry->tuple, &entry->tuple_nat, sizeof(entry->tuple))) {
+ if (has_nat) {
err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_nat_ht,
&entry->tuple_nat_node,
tuples_nat_ht_params);
@@ -1349,6 +1352,32 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
return 0;
}
+static bool
+mlx5_tc_ct_filter_legacy_non_nic_flows(struct mlx5_ct_ft *ft,
+ struct flow_cls_offload *flow)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+ struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
+ struct flow_match_meta match;
+ struct net_device *netdev;
+ bool same_dev = false;
+
+ if (!is_mdev_legacy_mode(ct_priv->dev) ||
+ !flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
+ return true;
+
+ flow_rule_match_meta(rule, &match);
+
+ if (!(match.key->ingress_ifindex & match.mask->ingress_ifindex))
+ return true;
+
+ netdev = dev_get_by_index(&init_net, match.key->ingress_ifindex);
+ same_dev = ct_priv->netdev == netdev;
+ dev_put(netdev);
+
+ return same_dev;
+}
+
static int
mlx5_tc_ct_block_flow_offload(enum tc_setup_type type, void *type_data,
void *cb_priv)
@@ -1361,6 +1390,9 @@ mlx5_tc_ct_block_flow_offload(enum tc_setup_type type, void *type_data,
switch (f->command) {
case FLOW_CLS_REPLACE:
+ if (!mlx5_tc_ct_filter_legacy_non_nic_flows(ft, f))
+ return -EOPNOTSUPP;
+
return mlx5_tc_ct_block_flow_offload_add(ft, f);
case FLOW_CLS_DESTROY:
return mlx5_tc_ct_block_flow_offload_del(ft, f);
@@ -2065,10 +2097,19 @@ mlx5_tc_ct_fs_init(struct mlx5_tc_ct_priv *ct_priv)
struct mlx5_ct_fs_ops *fs_ops = mlx5_ct_fs_dmfs_ops_get();
int err;
- if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB &&
- ct_priv->dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS) {
- ct_dbg("Using SMFS ct flow steering provider");
- fs_ops = mlx5_ct_fs_smfs_ops_get();
+ if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB) {
+ if (ct_priv->dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_HMFS) {
+ ct_dbg("Using HMFS ct flow steering provider");
+ fs_ops = mlx5_ct_fs_hmfs_ops_get();
+ } else if (ct_priv->dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS) {
+ ct_dbg("Using SMFS ct flow steering provider");
+ fs_ops = mlx5_ct_fs_smfs_ops_get();
+ }
+
+ if (!fs_ops) {
+ ct_dbg("Requested flow steering mode is not enabled.");
+ return -EOPNOTSUPP;
+ }
}
ct_priv->fs = kzalloc(sizeof(*ct_priv->fs) + fs_ops->priv_size, GFP_KERNEL);
@@ -2246,9 +2287,10 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
enum mlx5_flow_namespace_type ns_type,
struct mlx5e_post_act *post_act)
{
+ u8 mapping_id[MLX5_SW_IMAGE_GUID_MAX_BYTES];
struct mlx5_tc_ct_priv *ct_priv;
struct mlx5_core_dev *dev;
- u64 mapping_id;
+ u8 id_len;
int err;
dev = priv->mdev;
@@ -2260,16 +2302,18 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
if (!ct_priv)
goto err_alloc;
- mapping_id = mlx5_query_nic_system_image_guid(dev);
+ mlx5_query_nic_sw_system_image_guid(dev, mapping_id, &id_len);
- ct_priv->zone_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_ZONE,
+ ct_priv->zone_mapping = mapping_create_for_id(mapping_id, id_len,
+ MAPPING_TYPE_ZONE,
sizeof(u16), 0, true);
if (IS_ERR(ct_priv->zone_mapping)) {
err = PTR_ERR(ct_priv->zone_mapping);
goto err_mapping_zone;
}
- ct_priv->labels_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_LABELS,
+ ct_priv->labels_mapping = mapping_create_for_id(mapping_id, id_len,
+ MAPPING_TYPE_LABELS,
sizeof(u32) * 4, 0, true);
if (IS_ERR(ct_priv->labels_mapping)) {
err = PTR_ERR(ct_priv->labels_mapping);
@@ -2421,3 +2465,74 @@ out_inc_drop:
atomic_inc(&ct_priv->debugfs.stats.rx_dropped);
return false;
}
+
+static bool mlx5e_tc_ct_valid_used_dissector_keys(const u64 used_keys)
+{
+#define DISS_BIT(name) BIT_ULL(FLOW_DISSECTOR_KEY_ ## name)
+ const u64 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) |
+ DISS_BIT(META);
+ const u64 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) |
+ DISS_BIT(PORTS) | DISS_BIT(TCP);
+ const u64 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) |
+ DISS_BIT(PORTS) | DISS_BIT(TCP);
+ const u64 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) |
+ DISS_BIT(PORTS);
+ const u64 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) |
+ DISS_BIT(PORTS);
+ const u64 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS);
+ const u64 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS);
+
+ return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp ||
+ used_keys == ipv6_udp || used_keys == ipv4_gre || used_keys == ipv6_gre);
+}
+
+bool mlx5e_tc_ct_is_valid_flow_rule(const struct net_device *dev, struct flow_rule *flow_rule)
+{
+ struct flow_match_ipv4_addrs ipv4_addrs;
+ struct flow_match_ipv6_addrs ipv6_addrs;
+ struct flow_match_control control;
+ struct flow_match_basic basic;
+ struct flow_match_ports ports;
+ struct flow_match_tcp tcp;
+
+ if (!mlx5e_tc_ct_valid_used_dissector_keys(flow_rule->match.dissector->used_keys)) {
+ netdev_dbg(dev, "ct_debug: rule uses unexpected dissectors (0x%016llx)",
+ flow_rule->match.dissector->used_keys);
+ return false;
+ }
+
+ flow_rule_match_basic(flow_rule, &basic);
+ flow_rule_match_control(flow_rule, &control);
+ flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs);
+ flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs);
+ if (basic.key->ip_proto != IPPROTO_GRE)
+ flow_rule_match_ports(flow_rule, &ports);
+ if (basic.key->ip_proto == IPPROTO_TCP)
+ flow_rule_match_tcp(flow_rule, &tcp);
+
+ if (basic.mask->n_proto != htons(0xFFFF) ||
+ (basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) ||
+ basic.mask->ip_proto != 0xFF ||
+ (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP &&
+ basic.key->ip_proto != IPPROTO_GRE)) {
+ netdev_dbg(dev, "ct_debug: rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)",
+ ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto),
+ basic.key->ip_proto, basic.mask->ip_proto);
+ return false;
+ }
+
+ if (basic.key->ip_proto != IPPROTO_GRE &&
+ (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF))) {
+ netdev_dbg(dev, "ct_debug: rule uses ports match (src 0x%04x, dst 0x%04x)",
+ ports.mask->src, ports.mask->dst);
+ return false;
+ }
+
+ if (basic.key->ip_proto == IPPROTO_TCP && tcp.mask->flags != MLX5_CT_TCP_FLAGS_MASK) {
+ netdev_dbg(dev, "ct_debug: rule uses unexpected tcp match (flags 0x%02x)",
+ tcp.mask->flags);
+ return false;
+ }
+
+ return true;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index b66c5f98067f..5e9dbdd4a5e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -128,6 +128,9 @@ bool
mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
struct sk_buff *skb, u8 zone_restore_id);
+#define MLX5_CT_TCP_FLAGS_MASK cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16)
+bool mlx5e_tc_ct_is_valid_flow_rule(const struct net_device *dev, struct flow_rule *flow_rule);
+
#else /* CONFIG_MLX5_TC_CT */
static inline struct mlx5_tc_ct_priv *
@@ -202,5 +205,12 @@ mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
return false;
}
+static inline bool
+mlx5e_tc_ct_is_valid_flow_rule(const struct net_device *dev,
+ struct flow_rule *flow_rule)
+{
+ return false;
+}
+
#endif /* !IS_ENABLED(CONFIG_MLX5_TC_CT) */
#endif /* __MLX5_EN_TC_CT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 721f35e59757..a14f216048cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2018 Mellanox Technologies. */
-#include <net/inet_ecn.h>
+#include <net/flow.h>
+#include <net/inet_dscp.h>
#include <net/vxlan.h>
#include <net/gre.h>
#include <net/geneve.h>
@@ -31,8 +32,7 @@ static void mlx5e_tc_tun_route_attr_cleanup(struct mlx5e_tc_tun_route_attr *attr
{
if (attr->n)
neigh_release(attr->n);
- if (attr->route_dev)
- dev_put(attr->route_dev);
+ dev_put(attr->route_dev);
}
struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev)
@@ -68,16 +68,14 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
* while holding rcu read lock. Take the net_device for correctness
* sake.
*/
- if (uplink_upper)
- dev_hold(uplink_upper);
+ dev_hold(uplink_upper);
rcu_read_unlock();
dst_is_lag_dev = (uplink_upper &&
netif_is_lag_master(uplink_upper) &&
real_dev == uplink_upper &&
mlx5_lag_is_sriov(priv->mdev));
- if (uplink_upper)
- dev_put(uplink_upper);
+ dev_put(uplink_upper);
/* if the egress device isn't on the same HW e-switch or
* it's a LAG device, use the uplink
@@ -236,7 +234,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
int err;
/* add the IP fields */
- attr.fl.fl4.flowi4_tos = tun_key->tos & ~INET_ECN_MASK;
+ attr.fl.fl4.flowi4_dscp = inet_dsfield_to_dscp(tun_key->tos);
attr.fl.fl4.daddr = tun_key->u.ipv4.dst;
attr.fl.fl4.saddr = tun_key->u.ipv4.src;
attr.ttl = tun_key->ttl;
@@ -352,7 +350,7 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
int err;
/* add the IP fields */
- attr.fl.fl4.flowi4_tos = tun_key->tos & ~INET_ECN_MASK;
+ attr.fl.fl4.flowi4_dscp = inet_dsfield_to_dscp(tun_key->tos);
attr.fl.fl4.daddr = tun_key->u.ipv4.dst;
attr.fl.fl4.saddr = tun_key->u.ipv4.src;
attr.ttl = tun_key->ttl;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 878cbdbf5ec8..0735d10f2bac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -5,6 +5,7 @@
#include <net/nexthop.h>
#include <net/ip_tunnels.h>
#include "tc_tun_encap.h"
+#include "fs_core.h"
#include "en_tc.h"
#include "tc_tun.h"
#include "rep/tc.h"
@@ -24,17 +25,24 @@ static int mlx5e_set_int_port_tunnel(struct mlx5e_priv *priv,
route_dev = dev_get_by_index(dev_net(e->out_dev), e->route_dev_ifindex);
- if (!route_dev || !netif_is_ovs_master(route_dev) ||
- attr->parse_attr->filter_dev == e->out_dev)
+ if (!route_dev || !netif_is_ovs_master(route_dev))
goto out;
+ if (priv->mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS &&
+ mlx5e_eswitch_uplink_rep(attr->parse_attr->filter_dev) &&
+ (attr->esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)) {
+ mlx5_core_warn(priv->mdev,
+ "Matching on external port with encap + fwd to table actions is not allowed for firmware steering\n");
+ err = -EINVAL;
+ goto out;
+ }
+
err = mlx5e_set_fwd_to_int_port_actions(priv, attr, e->route_dev_ifindex,
MLX5E_TC_INT_PORT_EGRESS,
&attr->action, out_index);
out:
- if (route_dev)
- dev_put(route_dev);
+ dev_put(route_dev);
return err;
}
@@ -164,8 +172,8 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
&reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
- mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n",
- PTR_ERR(e->pkt_reformat));
+ mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %pe\n",
+ e->pkt_reformat);
return;
}
e->flags |= MLX5_ENCAP_ENTRY_VALID;
@@ -744,8 +752,7 @@ static int mlx5e_set_vf_tunnel(struct mlx5_eswitch *esw,
}
out:
- if (route_dev)
- dev_put(route_dev);
+ dev_put(route_dev);
return err;
}
@@ -779,8 +786,7 @@ static int mlx5e_update_vf_tunnel(struct mlx5_eswitch *esw,
mlx5e_tc_match_to_reg_mod_hdr_change(esw->dev, mod_hdr_acts, VPORT_TO_REG, act_id, data);
out:
- if (route_dev)
- dev_put(route_dev);
+ dev_put(route_dev);
return err;
}
@@ -1839,8 +1845,8 @@ static int mlx5e_tc_tun_fib_event(struct notifier_block *nb, unsigned long event
queue_work(priv->wq, &fib_work->work);
} else if (IS_ERR(fib_work)) {
NL_SET_ERR_MSG_MOD(info->extack, "Failed to init fib work");
- mlx5_core_warn(priv->mdev, "Failed to init fib work, %ld\n",
- PTR_ERR(fib_work));
+ mlx5_core_warn(priv->mdev, "Failed to init fib work, %pe\n",
+ fib_work);
}
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
index e4e487c8431b..7a18a469961d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
@@ -140,7 +140,7 @@ static int mlx5e_tc_tun_parse_vxlan_gbp_option(struct mlx5e_priv *priv,
gbp_mask = (u32 *)&enc_opts.mask->data[0];
if (*gbp_mask & ~VXLAN_GBP_MASK) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "Wrong VxLAN GBP mask(0x%08X)\n", *gbp_mask);
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Wrong VxLAN GBP mask(0x%08X)", *gbp_mask);
return -EINVAL;
}
@@ -165,9 +165,6 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
struct flow_match_enc_keyid enc_keyid;
void *misc_c, *misc_v;
- misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
- misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
-
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
return 0;
@@ -182,6 +179,30 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
err = mlx5e_tc_tun_parse_vxlan_gbp_option(priv, spec, f);
if (err)
return err;
+
+ /* We can't mix custom tunnel headers with symbolic ones and we
+ * don't have a symbolic field name for GBP, so we use custom
+ * tunnel headers in this case. We need hardware support to
+ * match on custom tunnel headers, but we already know it's
+ * supported because the previous call successfully checked for
+ * that.
+ */
+ misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters_5);
+ misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters_5);
+
+ /* Shift by 8 to account for the reserved bits in the vxlan
+ * header after the VNI.
+ */
+ MLX5_SET(fte_match_set_misc5, misc_c, tunnel_header_1,
+ be32_to_cpu(enc_keyid.mask->keyid) << 8);
+ MLX5_SET(fte_match_set_misc5, misc_v, tunnel_header_1,
+ be32_to_cpu(enc_keyid.key->keyid) << 8);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
+
+ return 0;
}
/* match on VNI is required */
@@ -195,6 +216,11 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
+ misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters);
+ misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
+
MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
be32_to_cpu(enc_keyid.mask->keyid));
MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
index 11f724ad90db..0b55e77f19c8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
@@ -124,7 +124,7 @@ void mlx5e_tir_builder_build_rss(struct mlx5e_tir_builder *builder,
const size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key);
void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
- MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, rss_hash->symmetric);
memcpy(rss_key, rss_hash->toeplitz_hash_key, len);
}
@@ -146,6 +146,31 @@ void mlx5e_tir_builder_build_direct(struct mlx5e_tir_builder *builder)
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
}
+static void mlx5e_tir_context_self_lb_block(void *tirc, bool enable_uc_lb,
+ bool enable_mc_lb)
+{
+ u8 lb_flags = 0;
+
+ if (enable_uc_lb)
+ lb_flags = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+ if (enable_mc_lb)
+ lb_flags |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
+
+ MLX5_SET(tirc, tirc, self_lb_block, lb_flags);
+}
+
+void mlx5e_tir_builder_build_self_lb_block(struct mlx5e_tir_builder *builder,
+ bool enable_uc_lb,
+ bool enable_mc_lb)
+{
+ void *tirc = mlx5e_tir_builder_get_tirc(builder);
+
+ if (builder->modify)
+ MLX5_SET(modify_tir_in, builder->in, bitmask.self_lb_en, 1);
+
+ mlx5e_tir_context_self_lb_block(tirc, enable_uc_lb, enable_mc_lb);
+}
+
void mlx5e_tir_builder_build_tls(struct mlx5e_tir_builder *builder)
{
void *tirc = mlx5e_tir_builder_get_tirc(builder);
@@ -153,9 +178,7 @@ void mlx5e_tir_builder_build_tls(struct mlx5e_tir_builder *builder)
WARN_ON(builder->modify);
MLX5_SET(tirc, tirc, tls_en, 1);
- MLX5_SET(tirc, tirc, self_lb_block,
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST |
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST);
+ mlx5e_tir_context_self_lb_block(tirc, true, true);
}
int mlx5e_tir_init(struct mlx5e_tir *tir, struct mlx5e_tir_builder *builder,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
index 857a84bcd53a..958eeb959a19 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
@@ -9,6 +9,7 @@
struct mlx5e_rss_params_hash {
u8 hfunc;
u8 toeplitz_hash_key[40];
+ bool symmetric;
};
struct mlx5e_rss_params_traffic_type {
@@ -34,6 +35,9 @@ void mlx5e_tir_builder_build_rss(struct mlx5e_tir_builder *builder,
const struct mlx5e_rss_params_traffic_type *rss_tt,
bool inner);
void mlx5e_tir_builder_build_direct(struct mlx5e_tir_builder *builder);
+void mlx5e_tir_builder_build_self_lb_block(struct mlx5e_tir_builder *builder,
+ bool enable_uc_lb,
+ bool enable_mc_lb);
void mlx5e_tir_builder_build_tls(struct mlx5e_tir_builder *builder);
struct mlx5_core_dev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index 53ca16cb9c41..da8c44f46edb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -46,8 +46,8 @@ static void mlx5e_init_trap_rq(struct mlx5e_trap *t, struct mlx5e_params *params
rq->pdev = t->pdev;
rq->netdev = priv->netdev;
rq->priv = priv;
- rq->clock = &mdev->clock;
- rq->tstamp = &priv->tstamp;
+ rq->clock = mdev->clock;
+ rq->hwtstamp_config = &priv->hwtstamp_config;
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->stats = &priv->trap_stats.rq;
@@ -76,6 +76,7 @@ static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct mlx5e_trap *t)
ccp.ch_stats = t->stats;
ccp.napi = &t->napi;
ccp.ix = 0;
+ ccp.uar = mdev->priv.bfreg.up;
err = mlx5e_open_cq(priv->mdev, trap_moder, &rq_param->cqp, &ccp, &rq->cq);
if (err)
return err;
@@ -143,13 +144,12 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
t->priv = priv;
t->mdev = priv->mdev;
- t->tstamp = &priv->tstamp;
t->pdev = mlx5_core_dma_dev(priv->mdev);
t->netdev = priv->netdev;
t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
t->stats = &priv->trap_stats.ch;
- netif_napi_add(netdev, &t->napi, mlx5e_trap_napi_poll);
+ netif_napi_add_locked(netdev, &t->napi, mlx5e_trap_napi_poll);
err = mlx5e_open_trap_rq(priv, t);
if (unlikely(err))
@@ -164,7 +164,7 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
err_close_trap_rq:
mlx5e_close_trap_rq(&t->rq);
err_napi_del:
- netif_napi_del(&t->napi);
+ netif_napi_del_locked(&t->napi);
kvfree(t);
return ERR_PTR(err);
}
@@ -173,13 +173,13 @@ void mlx5e_close_trap(struct mlx5e_trap *trap)
{
mlx5e_tir_destroy(&trap->tir);
mlx5e_close_trap_rq(&trap->rq);
- netif_napi_del(&trap->napi);
+ netif_napi_del_locked(&trap->napi);
kvfree(trap);
}
static void mlx5e_activate_trap(struct mlx5e_trap *trap)
{
- napi_enable(&trap->napi);
+ napi_enable_locked(&trap->napi);
mlx5e_activate_rq(&trap->rq);
mlx5e_trigger_napi_sched(&trap->napi);
}
@@ -189,7 +189,7 @@ void mlx5e_deactivate_trap(struct mlx5e_priv *priv)
struct mlx5e_trap *trap = priv->en_trap;
mlx5e_deactivate_rq(&trap->rq);
- napi_disable(&trap->napi);
+ napi_disable_locked(&trap->napi);
}
static struct mlx5e_trap *mlx5e_add_trap_queue(struct mlx5e_priv *priv)
@@ -285,6 +285,7 @@ int mlx5e_handle_trap_event(struct mlx5e_priv *priv, struct mlx5_trap_ctx *trap_
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
+ netdev_lock(priv->netdev);
switch (trap_ctx->action) {
case DEVLINK_TRAP_ACTION_TRAP:
err = mlx5e_handle_action_trap(priv, trap_ctx->id);
@@ -297,6 +298,7 @@ int mlx5e_handle_trap_event(struct mlx5e_priv *priv, struct mlx5_trap_ctx *trap_
trap_ctx->action);
err = -EINVAL;
}
+ netdev_unlock(priv->netdev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h
index aa3f17658c6d..394e917ea2b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h
@@ -22,7 +22,6 @@ struct mlx5e_trap {
/* control */
struct mlx5e_priv *priv;
struct mlx5_core_dev *mdev;
- struct hwtstamp_config *tstamp;
DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
struct mlx5e_params params;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 5ec468268d1a..7e191e1569e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -92,7 +92,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq);
-static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
+static inline bool mlx5e_rx_hw_stamp(struct kernel_hwtstamp_config *config)
{
return config->rx_filter == HWTSTAMP_FILTER_ALL;
}
@@ -214,6 +214,19 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
return pi;
}
+static inline u16 mlx5e_txqsq_get_next_pi_anysize(struct mlx5e_txqsq *sq,
+ u16 *size)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi, contig_wqebbs;
+
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ *size = min_t(u16, contig_wqebbs, sq->max_sq_mpw_wqebbs);
+
+ return pi;
+}
+
void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq);
static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
@@ -296,10 +309,7 @@ mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
{
- struct mlx5_core_cq *mcq;
-
- mcq = &cq->mcq;
- mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
+ mlx5_cq_arm(&cq->mcq, MLX5_CQ_DB_REQ_NOT, cq->uar->map, cq->wq.cc);
}
static inline struct mlx5e_sq_dma *
@@ -309,14 +319,24 @@ mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
}
static inline void
-mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size,
- enum mlx5e_dma_map_type map_type)
+mlx5e_dma_push_single(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size)
{
struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
dma->addr = addr;
dma->size = size;
- dma->type = map_type;
+ dma->type = MLX5E_DMA_MAP_SINGLE;
+}
+
+static inline void
+mlx5e_dma_push_netmem(struct mlx5e_txqsq *sq, netmem_ref netmem,
+ dma_addr_t addr, u32 size)
+{
+ struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
+
+ netmem_dma_unmap_addr_set(netmem, dma, addr, addr);
+ dma->size = size;
+ dma->type = MLX5E_DMA_MAP_PAGE;
}
static inline
@@ -349,7 +369,8 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
break;
case MLX5E_DMA_MAP_PAGE:
- dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
+ netmem_dma_unmap_page_attrs(pdev, dma->addr, dma->size,
+ DMA_TO_DEVICE, 0);
break;
default:
WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
@@ -358,9 +379,9 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq);
-static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
+static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session)
{
- return session->ds_count == max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS;
+ return session->ds_count == session->ds_count_max;
}
static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 4610621a340e..80f9fc10877a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -179,7 +179,7 @@ static int mlx5e_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
{
const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
- if (unlikely(!mlx5e_rx_hw_stamp(_ctx->rq->tstamp)))
+ if (unlikely(!mlx5e_rx_hw_stamp(_ctx->rq->hwtstamp_config)))
return -ENODATA;
*timestamp = mlx5e_cqe_ts_to_ns(_ctx->rq->ptp_cyc2time,
@@ -289,9 +289,9 @@ static u64 mlx5e_xsk_fill_timestamp(void *_priv)
ts = get_cqe_ts(priv->cqe);
if (mlx5_is_real_time_rq(priv->cq->mdev) || mlx5_is_real_time_sq(priv->cq->mdev))
- return mlx5_real_time_cyc2time(&priv->cq->mdev->clock, ts);
+ return mlx5_real_time_cyc2time(priv->cq->mdev->clock, ts);
- return mlx5_timecounter_cyc2time(&priv->cq->mdev->clock, ts);
+ return mlx5_timecounter_cyc2time(priv->cq->mdev->clock, ts);
}
static void mlx5e_xsk_request_checksum(u16 csum_start, u16 csum_offset, void *priv)
@@ -390,6 +390,7 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
.wqe = wqe,
.bytes_count = 0,
.ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT,
+ .ds_count_max = sq->max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS,
.pkt_count = 0,
.inline_on = mlx5e_xdp_get_inline_state(sq, session->inline_on),
};
@@ -501,7 +502,7 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx
mlx5e_xdp_mpwqe_add_dseg(sq, p, stats);
- if (unlikely(mlx5e_xdp_mpwqe_is_full(session, sq->max_sq_mpw_wqebbs)))
+ if (unlikely(mlx5e_xdp_mpwqe_is_full(session)))
mlx5e_xdp_mpwqe_complete(sq);
stats->xmit++;
@@ -546,6 +547,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
bool inline_ok;
bool linear;
u16 pi;
+ int i;
struct mlx5e_xdpsq_stats *stats = sq->stats;
@@ -612,41 +614,33 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
- if (test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
- int i;
-
- memset(&cseg->trailer, 0, sizeof(cseg->trailer));
- memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
-
- eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
+ memset(&cseg->trailer, 0, sizeof(cseg->trailer));
+ memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
- for (i = 0; i < num_frags; i++) {
- skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
- dma_addr_t addr;
+ eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
- addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] :
- page_pool_get_dma_addr(skb_frag_page(frag)) +
- skb_frag_off(frag);
+ for (i = 0; i < num_frags; i++) {
+ skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
+ dma_addr_t addr;
- dseg->addr = cpu_to_be64(addr);
- dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
- dseg->lkey = sq->mkey_be;
- dseg++;
- }
+ addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] :
+ page_pool_get_dma_addr(skb_frag_page(frag)) +
+ skb_frag_off(frag);
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+ dseg->addr = cpu_to_be64(addr);
+ dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
+ dseg->lkey = sq->mkey_be;
+ dseg++;
+ }
- sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
- .num_wqebbs = num_wqebbs,
- .num_pkts = 1,
- };
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- sq->pc += num_wqebbs;
- } else {
- cseg->fm_ce_se = 0;
+ sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
+ .num_wqebbs = num_wqebbs,
+ .num_pkts = 1,
+ };
- sq->pc++;
- }
+ sq->pc += num_wqebbs;
xsk_tx_metadata_request(meta, &mlx5e_xsk_tx_metadata_ops, eseg);
@@ -713,10 +707,11 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
page = xdpi.page.page;
- /* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE)
- * as we know this is a page_pool page.
+ /* No need to check page_pool_page_is_pp() as we
+ * know this is a page_pool page.
*/
- page_pool_recycle_direct(page->pp, page);
+ page_pool_recycle_direct(pp_page_to_nmdesc(page)->pp,
+ page);
} while (++n < num);
break;
@@ -865,7 +860,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
if (unlikely(sq_num >= priv->channels.num))
return -ENXIO;
- sq = &priv->channels.c[sq_num]->xdpsq;
+ sq = priv->channels.c[sq_num]->xdpsq;
for (i = 0; i < n; i++) {
struct mlx5e_xmit_data_frags xdptxdf = {};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index e054db1e10f8..46ab0a9e8cdd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -45,12 +45,6 @@
(MLX5E_XDP_INLINE_WQE_MAX_DS_CNT * MLX5_SEND_WQE_DS - \
sizeof(struct mlx5_wqe_inline_seg))
-struct mlx5e_xdp_buff {
- struct xdp_buff xdp;
- struct mlx5_cqe64 *cqe;
- struct mlx5e_rq *rq;
-};
-
/* XDP packets can be transmitted in different ways. On completion, we need to
* distinguish between them to clean up things in a proper way.
*/
@@ -182,13 +176,13 @@ static inline bool mlx5e_xdp_get_inline_state(struct mlx5e_xdpsq *sq, bool cur)
return cur;
}
-static inline bool mlx5e_xdp_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
+static inline bool mlx5e_xdp_mpwqe_is_full(struct mlx5e_tx_mpwqe *session)
{
if (session->inline_on)
return session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT >
- max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS;
+ session->ds_count_max;
- return mlx5e_tx_mpwqe_is_full(session, max_sq_mpw_wqebbs);
+ return mlx5e_tx_mpwqe_is_full(session);
}
struct mlx5e_xdp_wqe_info {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index 1b7132fa70de..2b05536d564a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -123,7 +123,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
bitmap_zero(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
wi->consumed_strides = 0;
- umr_wqe->ctrl.opmod_idx_opcode =
+ umr_wqe->hdr.ctrl.opmod_idx_opcode =
cpu_to_be32((icosq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_UMR);
/* Optimized for speed: keep in sync with mlx5e_mpwrq_umr_entry_size. */
@@ -134,7 +134,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
offset = offset * sizeof(struct mlx5_klm) * 2 / MLX5_OCTWORD;
else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE))
offset = offset * sizeof(struct mlx5_ksm) * 4 / MLX5_OCTWORD;
- umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
+ umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset);
icosq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
@@ -144,7 +144,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
icosq->pc += rq->mpwqe.umr_wqebbs;
- icosq->doorbell_cseg = &umr_wqe->ctrl;
+ icosq->doorbell_cseg = &umr_wqe->hdr.ctrl;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 9240cfe25d10..5981c71cae2d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -54,7 +54,7 @@ static void mlx5e_build_xsk_cparam(struct mlx5_core_dev *mdev,
struct mlx5e_channel_param *cparam)
{
mlx5e_build_rq_param(mdev, params, xsk, &cparam->rq);
- mlx5e_build_xdpsq_param(mdev, params, xsk, &cparam->xdp_sq);
+ mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
}
static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
@@ -71,8 +71,8 @@ static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
rq->pdev = c->pdev;
rq->netdev = c->netdev;
rq->priv = c->priv;
- rq->tstamp = c->tstamp;
- rq->clock = &mdev->clock;
+ rq->hwtstamp_config = &c->priv->hwtstamp_config;
+ rq->clock = mdev->clock;
rq->icosq = &c->icosq;
rq->ix = c->ix;
rq->channel = c;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 33e32584b07f..8bef99e8367e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -42,6 +42,8 @@
#include <en_accel/macsec.h>
#include "en.h"
#include "en/txrx.h"
+#include "en_accel/psp.h"
+#include "en_accel/psp_rxtx.h"
#if IS_ENABLED(CONFIG_GENEVE)
#include <net/geneve.h>
@@ -119,6 +121,9 @@ struct mlx5e_accel_tx_state {
#ifdef CONFIG_MLX5_EN_IPSEC
struct mlx5e_accel_tx_ipsec_state ipsec;
#endif
+#ifdef CONFIG_MLX5_EN_PSP
+ struct mlx5e_accel_tx_psp_state psp_st;
+#endif
};
static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
@@ -137,6 +142,13 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
return false;
#endif
+#ifdef CONFIG_MLX5_EN_PSP
+ if (mlx5e_psp_is_offload(skb, dev)) {
+ if (unlikely(!mlx5e_psp_handle_tx_skb(dev, skb, &state->psp_st)))
+ return false;
+ }
+#endif
+
#ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) && xfrm_offload(skb)) {
if (unlikely(!mlx5e_ipsec_handle_tx_skb(dev, skb, &state->ipsec)))
@@ -157,8 +169,14 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
}
static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
struct mlx5e_accel_tx_state *state)
{
+#ifdef CONFIG_MLX5_EN_PSP
+ if (mlx5e_psp_is_offload_state(&state->psp_st))
+ return mlx5e_psp_tx_ids_len(&state->psp_st);
+#endif
+
#ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state))
return mlx5e_ipsec_tx_ids_len(&state->ipsec);
@@ -172,8 +190,14 @@ static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
static inline void mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
struct sk_buff *skb,
+ struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
+#ifdef CONFIG_MLX5_EN_PSP
+ if (mlx5e_psp_is_offload_state(&accel->psp_st))
+ mlx5e_psp_tx_build_eseg(priv, skb, &accel->psp_st, eseg);
+#endif
+
#ifdef CONFIG_MLX5_EN_IPSEC
if (xfrm_offload(skb))
mlx5e_ipsec_tx_build_eseg(priv, skb, eseg);
@@ -199,6 +223,11 @@ static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
mlx5e_ktls_handle_tx_wqe(&wqe->ctrl, &state->tls);
#endif
+#ifdef CONFIG_MLX5_EN_PSP
+ if (mlx5e_psp_is_offload_state(&state->psp_st))
+ mlx5e_psp_handle_tx_wqe(wqe, &state->psp_st, inlseg);
+#endif
+
#ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) &&
state->ipsec.xo && state->ipsec.tailen)
@@ -208,21 +237,40 @@ static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv)
{
- return mlx5e_ktls_init_rx(priv);
+ int err;
+
+ err = mlx5_accel_psp_fs_init_rx_tables(priv);
+ if (err)
+ goto out;
+
+ err = mlx5e_ktls_init_rx(priv);
+ if (err)
+ mlx5_accel_psp_fs_cleanup_rx_tables(priv);
+
+out:
+ return err;
}
static inline void mlx5e_accel_cleanup_rx(struct mlx5e_priv *priv)
{
mlx5e_ktls_cleanup_rx(priv);
+ mlx5_accel_psp_fs_cleanup_rx_tables(priv);
}
static inline int mlx5e_accel_init_tx(struct mlx5e_priv *priv)
{
+ int err;
+
+ err = mlx5_accel_psp_fs_init_tx_tables(priv);
+ if (err)
+ return err;
+
return mlx5e_ktls_init_tx(priv);
}
static inline void mlx5e_accel_cleanup_tx(struct mlx5e_priv *priv)
{
mlx5e_ktls_cleanup_tx(priv);
+ mlx5_accel_psp_fs_cleanup_tx_tables(priv);
}
#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
index 4f83e3172767..1febdc5b81f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
@@ -138,7 +138,7 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
flow = mlx5_add_flow_rules(ft->t, spec, &flow_act, &dest, 1);
if (IS_ERR(flow))
- fs_err(fs, "mlx5_add_flow_rules() failed, flow is %ld\n", PTR_ERR(flow));
+ fs_err(fs, "mlx5_add_flow_rules() failed, flow is %pe\n", flow);
out:
kvfree(spec);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index ca92e518be76..35d9530037a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -36,6 +36,7 @@
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
#include <net/netevent.h>
+#include <net/ipv6_stubs.h>
#include "en.h"
#include "eswitch.h"
@@ -94,25 +95,14 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
u32 esn, esn_msb;
u8 overlap;
- switch (x->xso.type) {
- case XFRM_DEV_OFFLOAD_PACKET:
- switch (x->xso.dir) {
- case XFRM_DEV_OFFLOAD_IN:
- esn = x->replay_esn->seq;
- esn_msb = x->replay_esn->seq_hi;
- break;
- case XFRM_DEV_OFFLOAD_OUT:
- esn = x->replay_esn->oseq;
- esn_msb = x->replay_esn->oseq_hi;
- break;
- default:
- WARN_ON(true);
- return false;
- }
- break;
- case XFRM_DEV_OFFLOAD_CRYPTO:
- /* Already parsed by XFRM core */
+ switch (x->xso.dir) {
+ case XFRM_DEV_OFFLOAD_IN:
esn = x->replay_esn->seq;
+ esn_msb = x->replay_esn->seq_hi;
+ break;
+ case XFRM_DEV_OFFLOAD_OUT:
+ esn = x->replay_esn->oseq;
+ esn_msb = x->replay_esn->oseq_hi;
break;
default:
WARN_ON(true);
@@ -121,11 +111,15 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
overlap = sa_entry->esn_state.overlap;
- if (esn >= x->replay_esn->replay_window)
- seq_bottom = esn - x->replay_esn->replay_window + 1;
+ if (!x->replay_esn->replay_window) {
+ seq_bottom = esn;
+ } else {
+ if (esn >= x->replay_esn->replay_window)
+ seq_bottom = esn - x->replay_esn->replay_window + 1;
- if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO)
- esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom));
+ if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO)
+ esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom));
+ }
if (sa_entry->esn_state.esn_msb)
sa_entry->esn_state.esn = esn;
@@ -266,10 +260,15 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+ struct mlx5e_ipsec_addr *addrs = &attrs->addrs;
+ struct net_device *netdev = sa_entry->dev;
struct xfrm_state *x = sa_entry->x;
- struct net_device *netdev;
+ struct dst_entry *rt_dst_entry;
+ struct flowi4 fl4 = {};
+ struct flowi6 fl6 = {};
struct neighbour *n;
u8 addr[ETH_ALEN];
+ struct rtable *rt;
const void *pkey;
u8 *dst, *src;
@@ -277,25 +276,94 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
attrs->type != XFRM_DEV_OFFLOAD_PACKET)
return;
- netdev = x->xso.real_dev;
-
mlx5_query_mac_address(mdev, addr);
switch (attrs->dir) {
case XFRM_DEV_OFFLOAD_IN:
src = attrs->dmac;
dst = attrs->smac;
- pkey = &attrs->saddr.a4;
+
+ switch (addrs->family) {
+ case AF_INET:
+ fl4.flowi4_proto = x->sel.proto;
+ fl4.daddr = addrs->saddr.a4;
+ fl4.saddr = addrs->daddr.a4;
+ pkey = &addrs->saddr.a4;
+ break;
+ case AF_INET6:
+ fl6.flowi6_proto = x->sel.proto;
+ memcpy(fl6.daddr.s6_addr32, addrs->saddr.a6, 16);
+ memcpy(fl6.saddr.s6_addr32, addrs->daddr.a6, 16);
+ pkey = &addrs->saddr.a6;
+ break;
+ default:
+ return;
+ }
break;
case XFRM_DEV_OFFLOAD_OUT:
src = attrs->smac;
dst = attrs->dmac;
- pkey = &attrs->daddr.a4;
+ switch (addrs->family) {
+ case AF_INET:
+ fl4.flowi4_proto = x->sel.proto;
+ fl4.daddr = addrs->daddr.a4;
+ fl4.saddr = addrs->saddr.a4;
+ pkey = &addrs->daddr.a4;
+ break;
+ case AF_INET6:
+ fl6.flowi6_proto = x->sel.proto;
+ memcpy(fl6.daddr.s6_addr32, addrs->daddr.a6, 16);
+ memcpy(fl6.saddr.s6_addr32, addrs->saddr.a6, 16);
+ pkey = &addrs->daddr.a6;
+ break;
+ default:
+ return;
+ }
break;
default:
return;
}
ether_addr_copy(src, addr);
+
+ /* Destination can refer to a routed network, so perform FIB lookup
+ * to resolve nexthop and get its MAC. Neighbour resolution is used as
+ * fallback.
+ */
+ switch (addrs->family) {
+ case AF_INET:
+ rt = ip_route_output_key(dev_net(netdev), &fl4);
+ if (IS_ERR(rt))
+ goto neigh;
+
+ if (rt->rt_type != RTN_UNICAST) {
+ ip_rt_put(rt);
+ goto neigh;
+ }
+ rt_dst_entry = &rt->dst;
+ break;
+ case AF_INET6:
+ rt_dst_entry = ipv6_stub->ipv6_dst_lookup_flow(
+ dev_net(netdev), NULL, &fl6, NULL);
+ if (IS_ERR(rt_dst_entry))
+ goto neigh;
+ break;
+ default:
+ return;
+ }
+
+ n = dst_neigh_lookup(rt_dst_entry, pkey);
+ if (!n) {
+ dst_release(rt_dst_entry);
+ goto neigh;
+ }
+
+ neigh_ha_snapshot(addr, n, netdev);
+ ether_addr_copy(dst, addr);
+ dst_release(rt_dst_entry);
+ neigh_release(n);
+ return;
+
+neigh:
n = neigh_lookup(&arp_tbl, pkey, netdev);
if (!n) {
n = neigh_create(&arp_tbl, pkey, netdev);
@@ -310,6 +378,16 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
neigh_release(n);
}
+static void mlx5e_ipsec_state_mask(struct mlx5e_ipsec_addr *addrs)
+{
+ /*
+ * State doesn't have subnet prefixes in outer headers.
+ * The match is performed for exaxt source/destination addresses.
+ */
+ memset(addrs->smask.m6, 0xFF, sizeof(__be32) * 4);
+ memset(addrs->dmask.m6, 0xFF, sizeof(__be32) * 4);
+}
+
void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_esp_xfrm_attrs *attrs)
{
@@ -381,9 +459,11 @@ skip_replay_window:
attrs->spi = be32_to_cpu(x->id.spi);
/* source , destination ips */
- memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
- memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr));
- attrs->family = x->props.family;
+ memcpy(&attrs->addrs.saddr, x->props.saddr.a6,
+ sizeof(attrs->addrs.saddr));
+ memcpy(&attrs->addrs.daddr, x->id.daddr.a6, sizeof(attrs->addrs.daddr));
+ attrs->addrs.family = x->props.family;
+ mlx5e_ipsec_state_mask(&attrs->addrs);
attrs->type = x->xso.type;
attrs->reqid = x->props.reqid;
attrs->upspec.dport = ntohs(x->sel.dport);
@@ -435,7 +515,8 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
}
if (x->encap) {
if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESPINUDP)) {
- NL_SET_ERR_MSG_MOD(extack, "Encapsulation is not supported");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Encapsulation is not supported");
return -EINVAL;
}
@@ -686,17 +767,18 @@ static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry)
return 0;
}
-static int mlx5e_xfrm_add_state(struct xfrm_state *x,
+static int mlx5e_xfrm_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack)
{
struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
- struct net_device *netdev = x->xso.real_dev;
+ bool allow_tunnel_mode = false;
struct mlx5e_ipsec *ipsec;
struct mlx5e_priv *priv;
gfp_t gfp;
int err;
- priv = netdev_priv(netdev);
+ priv = netdev_priv(dev);
if (!priv->ipsec)
return -EOPNOTSUPP;
@@ -707,6 +789,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
return -ENOMEM;
sa_entry->x = x;
+ sa_entry->dev = dev;
sa_entry->ipsec = ipsec;
/* Check if this SA is originated from acquire flow temporary SA */
if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
@@ -721,15 +804,36 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
goto err_xfrm;
}
+ err = mlx5_eswitch_block_mode(priv->mdev);
+ if (err)
+ goto unblock_ipsec;
+
+ if (x->props.mode == XFRM_MODE_TUNNEL &&
+ x->xso.type == XFRM_DEV_OFFLOAD_PACKET) {
+ allow_tunnel_mode = mlx5e_ipsec_fs_tunnel_allowed(sa_entry);
+ if (!allow_tunnel_mode) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Packet offload tunnel mode is disabled due to encap settings");
+ err = -EINVAL;
+ goto unblock_mode;
+ }
+ }
+
/* check esn */
if (x->props.flags & XFRM_STATE_ESN)
mlx5e_ipsec_update_esn_state(sa_entry);
+ else
+ /* According to RFC4303, section "3.3.3. Sequence Number Generation",
+ * the first packet sent using a given SA will contain a sequence
+ * number of 1.
+ */
+ sa_entry->esn_state.esn = 1;
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs);
err = mlx5_ipsec_create_work(sa_entry);
if (err)
- goto unblock_ipsec;
+ goto unblock_encap;
err = mlx5e_ipsec_create_dwork(sa_entry);
if (err)
@@ -744,14 +848,6 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
if (err)
goto err_hw_ctx;
- if (x->props.mode == XFRM_MODE_TUNNEL &&
- x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
- !mlx5e_ipsec_fs_tunnel_enabled(sa_entry)) {
- NL_SET_ERR_MSG_MOD(extack, "Packet offload tunnel mode is disabled due to encap settings");
- err = -EINVAL;
- goto err_add_rule;
- }
-
/* We use *_bh() variant because xfrm_timer_handler(), which runs
* in softirq context, can reach our state delete logic and we need
* xa_erase_bh() there.
@@ -767,13 +863,20 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
queue_delayed_work(ipsec->wq, &sa_entry->dwork->dwork,
MLX5_IPSEC_RESCHED);
- if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
- x->props.mode == XFRM_MODE_TUNNEL)
- xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id,
- MLX5E_IPSEC_TUNNEL_SA);
+ if (allow_tunnel_mode) {
+ xa_lock_bh(&ipsec->sadb);
+ __xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id,
+ MLX5E_IPSEC_TUNNEL_SA);
+ xa_unlock_bh(&ipsec->sadb);
+ }
out:
x->xso.offload_handle = (unsigned long)sa_entry;
+ if (allow_tunnel_mode)
+ mlx5_eswitch_unblock_encap(priv->mdev);
+
+ mlx5_eswitch_unblock_mode(priv->mdev);
+
return 0;
err_add_rule:
@@ -786,6 +889,11 @@ release_work:
if (sa_entry->work)
kfree(sa_entry->work->data);
kfree(sa_entry->work);
+unblock_encap:
+ if (allow_tunnel_mode)
+ mlx5_eswitch_unblock_encap(priv->mdev);
+unblock_mode:
+ mlx5_eswitch_unblock_mode(priv->mdev);
unblock_ipsec:
mlx5_eswitch_unblock_ipsec(priv->mdev);
err_xfrm:
@@ -794,10 +902,9 @@ err_xfrm:
return err;
}
-static void mlx5e_xfrm_del_state(struct xfrm_state *x)
+static void mlx5e_xfrm_del_state(struct net_device *dev, struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
- struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5e_ipsec_sa_entry *old;
@@ -806,15 +913,9 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id);
WARN_ON(old != sa_entry);
-
- if (attrs->mode == XFRM_MODE_TUNNEL &&
- attrs->type == XFRM_DEV_OFFLOAD_PACKET)
- /* Make sure that no ARP requests are running in parallel */
- flush_workqueue(ipsec->wq);
-
}
-static void mlx5e_xfrm_free_state(struct xfrm_state *x)
+static void mlx5e_xfrm_free_state(struct net_device *dev, struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
@@ -847,8 +948,6 @@ static int mlx5e_ipsec_netevent_event(struct notifier_block *nb,
struct mlx5e_ipsec_sa_entry *sa_entry;
struct mlx5e_ipsec *ipsec;
struct neighbour *n = ptr;
- struct net_device *netdev;
- struct xfrm_state *x;
unsigned long idx;
if (event != NETEVENT_NEIGH_UPDATE || !(n->nud_state & NUD_VALID))
@@ -858,21 +957,19 @@ static int mlx5e_ipsec_netevent_event(struct notifier_block *nb,
xa_for_each_marked(&ipsec->sadb, idx, sa_entry, MLX5E_IPSEC_TUNNEL_SA) {
attrs = &sa_entry->attrs;
- if (attrs->family == AF_INET) {
- if (!neigh_key_eq32(n, &attrs->saddr.a4) &&
- !neigh_key_eq32(n, &attrs->daddr.a4))
+ if (attrs->addrs.family == AF_INET) {
+ if (!neigh_key_eq32(n, &attrs->addrs.saddr.a4) &&
+ !neigh_key_eq32(n, &attrs->addrs.daddr.a4))
continue;
} else {
- if (!neigh_key_eq128(n, &attrs->saddr.a4) &&
- !neigh_key_eq128(n, &attrs->daddr.a4))
+ if (!neigh_key_eq128(n, &attrs->addrs.saddr.a4) &&
+ !neigh_key_eq128(n, &attrs->addrs.daddr.a4))
continue;
}
- x = sa_entry->x;
- netdev = x->xso.real_dev;
data = sa_entry->work->data;
- neigh_ha_snapshot(data->addr, n, netdev);
+ neigh_ha_snapshot(data->addr, n, sa_entry->dev);
queue_work(ipsec->wq, &sa_entry->work->work);
}
@@ -958,21 +1055,6 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
priv->ipsec = NULL;
}
-static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- if (x->props.family == AF_INET) {
- /* Offload with IPv4 options is not supported yet */
- if (ip_hdr(skb)->ihl > 5)
- return false;
- } else {
- /* Offload with IPv6 extension headers is not support yet */
- if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
- return false;
- }
-
- return true;
-}
-
static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
@@ -1003,8 +1085,8 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
size_t headers;
lockdep_assert(lockdep_is_held(&x->lock) ||
- lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex) ||
- lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_state_lock));
+ lockdep_is_held(&net->xfrm.xfrm_cfg_mutex) ||
+ lockdep_is_held(&net->xfrm.xfrm_state_lock));
if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
return;
@@ -1040,7 +1122,7 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
* by removing always available headers.
*/
headers = sizeof(struct ethhdr);
- if (sa_entry->attrs.family == AF_INET)
+ if (sa_entry->attrs.addrs.family == AF_INET)
headers += sizeof(struct iphdr);
else
headers += sizeof(struct ipv6hdr);
@@ -1049,6 +1131,43 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
x->curlft.bytes += success_bytes - headers * success_packets;
}
+static __be32 word_to_mask(int prefix)
+{
+ if (prefix < 0)
+ return 0;
+
+ if (!prefix || prefix > 31)
+ return cpu_to_be32(0xFFFFFFFF);
+
+ return cpu_to_be32(((1U << prefix) - 1) << (32 - prefix));
+}
+
+static void mlx5e_ipsec_policy_mask(struct mlx5e_ipsec_addr *addrs,
+ struct xfrm_selector *sel)
+{
+ int i;
+
+ if (addrs->family == AF_INET) {
+ addrs->smask.m4 = word_to_mask(sel->prefixlen_s);
+ addrs->saddr.a4 &= addrs->smask.m4;
+ addrs->dmask.m4 = word_to_mask(sel->prefixlen_d);
+ addrs->daddr.a4 &= addrs->dmask.m4;
+ return;
+ }
+
+ for (i = 0; i < 4; i++) {
+ if (sel->prefixlen_s != 32 * i)
+ addrs->smask.m6[i] =
+ word_to_mask(sel->prefixlen_s - 32 * i);
+ addrs->saddr.a6[i] &= addrs->smask.m6[i];
+
+ if (sel->prefixlen_d != 32 * i)
+ addrs->dmask.m6[i] =
+ word_to_mask(sel->prefixlen_d - 32 * i);
+ addrs->daddr.a6[i] &= addrs->dmask.m6[i];
+ }
+}
+
static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
struct xfrm_policy *x,
struct netlink_ext_ack *extack)
@@ -1121,9 +1240,10 @@ mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
sel = &x->selector;
memset(attrs, 0, sizeof(*attrs));
- memcpy(&attrs->saddr, sel->saddr.a6, sizeof(attrs->saddr));
- memcpy(&attrs->daddr, sel->daddr.a6, sizeof(attrs->daddr));
- attrs->family = sel->family;
+ memcpy(&attrs->addrs.saddr, sel->saddr.a6, sizeof(attrs->addrs.saddr));
+ memcpy(&attrs->addrs.daddr, sel->daddr.a6, sizeof(attrs->addrs.daddr));
+ attrs->addrs.family = sel->family;
+ mlx5e_ipsec_policy_mask(&attrs->addrs, sel);
attrs->dir = x->xdo.dir;
attrs->action = x->action;
attrs->type = XFRM_DEV_OFFLOAD_PACKET;
@@ -1139,7 +1259,7 @@ mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
static int mlx5e_xfrm_add_policy(struct xfrm_policy *x,
struct netlink_ext_ack *extack)
{
- struct net_device *netdev = x->xdo.real_dev;
+ struct net_device *netdev = x->xdo.dev;
struct mlx5e_ipsec_pol_entry *pol_entry;
struct mlx5e_priv *priv;
int err;
@@ -1201,7 +1321,6 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = mlx5e_xfrm_add_state,
.xdo_dev_state_delete = mlx5e_xfrm_del_state,
.xdo_dev_state_free = mlx5e_xfrm_free_state,
- .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
.xdo_dev_state_update_stats = mlx5e_xfrm_update_stats,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 7d943e93cf6d..f8eaaf37963b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -76,27 +76,36 @@ struct mlx5_replay_esn {
u8 trigger : 1;
};
-struct mlx5_accel_esp_xfrm_attrs {
- u32 spi;
- u32 mode;
- struct aes_gcm_keymat aes_gcm;
-
+struct mlx5e_ipsec_addr {
union {
__be32 a4;
__be32 a6[4];
} saddr;
-
+ union {
+ __be32 m4;
+ __be32 m6[4];
+ } smask;
union {
__be32 a4;
__be32 a6[4];
} daddr;
+ union {
+ __be32 m4;
+ __be32 m6[4];
+ } dmask;
+ u8 family;
+};
+struct mlx5_accel_esp_xfrm_attrs {
+ u32 spi;
+ u32 mode;
+ struct aes_gcm_keymat aes_gcm;
+ struct mlx5e_ipsec_addr addrs;
struct upspec upspec;
u8 dir : 2;
u8 type : 2;
u8 drop : 1;
u8 encap : 1;
- u8 family;
struct mlx5_replay_esn replay_esn;
u32 authsize;
u32 reqid;
@@ -128,6 +137,7 @@ struct mlx5e_ipsec_hw_stats {
u64 ipsec_rx_bytes;
u64 ipsec_rx_drop_pkts;
u64 ipsec_rx_drop_bytes;
+ u64 ipsec_rx_drop_mismatch_sa_sel;
u64 ipsec_tx_pkts;
u64 ipsec_tx_bytes;
u64 ipsec_tx_drop_pkts;
@@ -175,6 +185,7 @@ struct mlx5e_ipsec_rx_create_attr {
u32 family;
int prio;
int pol_level;
+ int pol_miss_level;
int sa_level;
int status_level;
enum mlx5_flow_namespace_type chains_ns;
@@ -184,6 +195,7 @@ struct mlx5e_ipsec_ft {
struct mutex mutex; /* Protect changes to this struct */
struct mlx5_flow_table *pol;
struct mlx5_flow_table *sa;
+ struct mlx5_flow_table *sa_sel;
struct mlx5_flow_table *status;
u32 refcnt;
};
@@ -195,6 +207,8 @@ struct mlx5e_ipsec_drop {
struct mlx5e_ipsec_rule {
struct mlx5_flow_handle *rule;
+ struct mlx5_flow_handle *status_pass;
+ struct mlx5_flow_handle *sa_sel;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_pkt_reformat *pkt_reformat;
struct mlx5_fc *fc;
@@ -206,6 +220,7 @@ struct mlx5e_ipsec_rule {
struct mlx5e_ipsec_miss {
struct mlx5_flow_group *group;
struct mlx5_flow_handle *rule;
+ struct mlx5_fc *fc;
};
struct mlx5e_ipsec_tx_create_attr {
@@ -260,6 +275,7 @@ struct mlx5e_ipsec_limits {
struct mlx5e_ipsec_sa_entry {
struct mlx5e_ipsec_esn_state esn_state;
struct xfrm_state *x;
+ struct net_device *dev;
struct mlx5e_ipsec *ipsec;
struct mlx5_accel_esp_xfrm_attrs attrs;
void (*set_iv_op)(struct sk_buff *skb, struct xfrm_state *x,
@@ -274,18 +290,8 @@ struct mlx5e_ipsec_sa_entry {
};
struct mlx5_accel_pol_xfrm_attrs {
- union {
- __be32 a4;
- __be32 a6[4];
- } saddr;
-
- union {
- __be32 a4;
- __be32 a6[4];
- } daddr;
-
+ struct mlx5e_ipsec_addr addrs;
struct upspec upspec;
- u8 family;
u8 action;
u8 type : 2;
u8 dir : 2;
@@ -313,7 +319,7 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry);
-bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry);
+bool mlx5e_ipsec_fs_tunnel_allowed(struct mlx5e_ipsec_sa_entry *sa_entry);
int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
@@ -336,6 +342,7 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
struct mlx5e_priv *master_priv);
void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event);
+void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv);
static inline struct mlx5_core_dev *
mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
@@ -381,6 +388,10 @@ static inline void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *sl
static inline void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event)
{
}
+
+static inline void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv)
+{
+}
#endif
#endif /* __MLX5E_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index e51b03d4c717..feef86fff4bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -16,6 +16,16 @@
#define MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE 16
#define IPSEC_TUNNEL_DEFAULT_TTL 0x40
+#define MLX5_IPSEC_FS_SA_SELECTOR_MAX_NUM_GROUPS 16
+
+enum {
+ MLX5_IPSEC_ASO_OK,
+ MLX5_IPSEC_ASO_BAD_REPLY,
+
+ /* For crypto offload, set by driver */
+ MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD = 0xAA,
+};
+
struct mlx5e_ipsec_fc {
struct mlx5_fc *cnt;
struct mlx5_fc *drop;
@@ -33,6 +43,9 @@ struct mlx5e_ipsec_tx {
};
struct mlx5e_ipsec_status_checks {
+ struct mlx5_flow_group *pass_group;
+ struct mlx5_flow_handle *packet_offload_pass_rule;
+ struct mlx5_flow_handle *crypto_offload_pass_rule;
struct mlx5_flow_group *drop_all_group;
struct mlx5e_ipsec_drop all;
};
@@ -41,11 +54,14 @@ struct mlx5e_ipsec_rx {
struct mlx5e_ipsec_ft ft;
struct mlx5e_ipsec_miss pol;
struct mlx5e_ipsec_miss sa;
- struct mlx5e_ipsec_rule status;
- struct mlx5e_ipsec_status_checks status_drops;
+ struct mlx5e_ipsec_miss sa_sel;
+ struct mlx5e_ipsec_status_checks status_checks;
struct mlx5e_ipsec_fc *fc;
struct mlx5_fs_chains *chains;
+ struct mlx5_flow_table *pol_miss_ft;
+ struct mlx5_flow_handle *pol_miss_rule;
u8 allow_tunnel_mode : 1;
+ u8 ttc_rules_added : 1;
};
/* IPsec RX flow steering */
@@ -130,11 +146,12 @@ static void ipsec_chains_put_table(struct mlx5_fs_chains *chains, u32 prio)
static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
int level, int prio,
+ int num_reserved_entries,
int max_num_groups, u32 flags)
{
struct mlx5_flow_table_attr ft_attr = {};
- ft_attr.autogroup.num_reserved_entries = 1;
+ ft_attr.autogroup.num_reserved_entries = num_reserved_entries;
ft_attr.autogroup.max_num_groups = max_num_groups;
ft_attr.max_fte = NUM_IPSEC_FTE;
ft_attr.level = level;
@@ -147,22 +164,35 @@ static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
static void ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
- mlx5_del_flow_rules(rx->status_drops.all.rule);
- mlx5_fc_destroy(ipsec->mdev, rx->status_drops.all.fc);
- mlx5_destroy_flow_group(rx->status_drops.drop_all_group);
+ mlx5_del_flow_rules(rx->status_checks.all.rule);
+ mlx5_fc_destroy(ipsec->mdev, rx->status_checks.all.fc);
+ mlx5_destroy_flow_group(rx->status_checks.drop_all_group);
}
static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
- mlx5_del_flow_rules(rx->status.rule);
+ mlx5_del_flow_rules(rx->status_checks.packet_offload_pass_rule);
+ mlx5_del_flow_rules(rx->status_checks.crypto_offload_pass_rule);
+}
- if (rx != ipsec->rx_esw)
- return;
+static void ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_spec *spec)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
-#ifdef CONFIG_MLX5_ESWITCH
- mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
-#endif
+ if (rx == ipsec->rx_esw) {
+ mlx5_esw_ipsec_rx_rule_add_match_obj(sa_entry, spec);
+ } else {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_2);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_2,
+ sa_entry->ipsec_obj_id | BIT(31));
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+ }
}
static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
@@ -194,17 +224,14 @@ static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
flow_act.flags = FLOW_ACT_NO_APPEND;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(flow_counter);
+ dest.counter = flow_counter;
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 1);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
- MLX5_SET(fte_match_param, spec->match_value,
- misc_parameters_2.metadata_reg_c_2,
- sa_entry->ipsec_obj_id | BIT(31));
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -223,7 +250,7 @@ static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
}
sa_entry->ipsec_rule.trailer.fc = flow_counter;
- dest.counter_id = mlx5_fc_id(flow_counter);
+ dest.counter = flow_counter;
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 2);
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
@@ -275,16 +302,14 @@ static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
flow_act.flags = FLOW_ACT_NO_APPEND;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(flow_counter);
+ dest.counter = flow_counter;
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 1);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_2,
- sa_entry->ipsec_obj_id | BIT(31));
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -348,7 +373,7 @@ static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(flow_counter);
+ dest.counter = flow_counter;
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
@@ -359,9 +384,9 @@ static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec,
goto err_rule;
}
- rx->status_drops.drop_all_group = g;
- rx->status_drops.all.rule = rule;
- rx->status_drops.all.fc = flow_counter;
+ rx->status_checks.drop_all_group = g;
+ rx->status_checks.all.rule = rule;
+ rx->status_checks.all.fc = flow_counter;
kvfree(flow_group_in);
kvfree(spec);
@@ -377,9 +402,52 @@ err_out:
return err;
}
-static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx,
- struct mlx5_flow_destination *dest)
+static int ipsec_rx_status_pass_group_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table *ft = rx->ft.status;
+ struct mlx5_flow_group *fg;
+ void *match_criteria;
+ u32 *flow_group_in;
+ int err = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS_2);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
+ match_criteria);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters_2.ipsec_syndrome);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters_2.metadata_reg_c_4);
+
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ start_flow_index, ft->max_fte - 3);
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ end_flow_index, ft->max_fte - 2);
+
+ fg = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(fg)) {
+ err = PTR_ERR(fg);
+ mlx5_core_warn(ipsec->mdev,
+ "Failed to create rx status pass flow group, err=%d\n",
+ err);
+ }
+ rx->status_checks.pass_group = fg;
+
+ kvfree(flow_group_in);
+ return err;
+}
+
+static struct mlx5_flow_handle *
+ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest,
+ u8 aso_ok)
{
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
@@ -388,7 +456,7 @@ static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
misc_parameters_2.ipsec_syndrome);
@@ -397,11 +465,11 @@ static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
MLX5_SET(fte_match_param, spec->match_value,
misc_parameters_2.ipsec_syndrome, 0);
MLX5_SET(fte_match_param, spec->match_value,
- misc_parameters_2.metadata_reg_c_4, 0);
+ misc_parameters_2.metadata_reg_c_4, aso_ok);
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
- flow_act.flags = FLOW_ACT_NO_APPEND;
+ flow_act.flags = FLOW_ACT_NO_APPEND | FLOW_ACT_IGNORE_FLOW_LEVEL;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
@@ -412,19 +480,19 @@ static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
goto err_rule;
}
- rx->status.rule = rule;
kvfree(spec);
- return 0;
+ return rule;
err_rule:
kvfree(spec);
- return err;
+ return ERR_PTR(err);
}
static void mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
ipsec_rx_status_pass_destroy(ipsec, rx);
+ mlx5_destroy_flow_group(rx->status_checks.pass_group);
ipsec_rx_status_drop_destroy(ipsec, rx);
}
@@ -432,19 +500,44 @@ static int mlx5_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx,
struct mlx5_flow_destination *dest)
{
+ struct mlx5_flow_destination pol_dest[2];
+ struct mlx5_flow_handle *rule;
int err;
err = ipsec_rx_status_drop_all_create(ipsec, rx);
if (err)
return err;
- err = ipsec_rx_status_pass_create(ipsec, rx, dest);
+ err = ipsec_rx_status_pass_group_create(ipsec, rx);
if (err)
- goto err_pass_create;
+ goto err_pass_group_create;
+
+ rule = ipsec_rx_status_pass_create(ipsec, rx, dest,
+ MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_crypto_offload_pass_create;
+ }
+ rx->status_checks.crypto_offload_pass_rule = rule;
+
+ pol_dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ pol_dest[0].ft = rx->ft.pol;
+ pol_dest[1] = dest[1];
+ rule = ipsec_rx_status_pass_create(ipsec, rx, pol_dest,
+ MLX5_IPSEC_ASO_OK);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_packet_offload_pass_create;
+ }
+ rx->status_checks.packet_offload_pass_rule = rule;
return 0;
-err_pass_create:
+err_packet_offload_pass_create:
+ mlx5_del_flow_rules(rx->status_checks.crypto_offload_pass_rule);
+err_crypto_offload_pass_create:
+ mlx5_destroy_flow_group(rx->status_checks.pass_group);
+err_pass_group_create:
ipsec_rx_status_drop_destroy(ipsec, rx);
return err;
}
@@ -493,35 +586,56 @@ out:
return err;
}
+static struct mlx5_flow_destination
+ipsec_rx_decrypted_pkt_def_dest(struct mlx5_ttc_table *ttc, u32 family)
+{
+ struct mlx5_flow_destination dest;
+
+ if (!mlx5_ttc_has_esp_flow_group(ttc))
+ return mlx5_ttc_get_default_dest(ttc, family2tt(family));
+
+ dest.ft = mlx5_get_ttc_flow_table(ttc);
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+
+ return dest;
+}
+
+static void ipsec_rx_update_default_dest(struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *old_dest,
+ struct mlx5_flow_destination *new_dest)
+{
+ mlx5_modify_rule_destination(rx->pol_miss_rule, new_dest, old_dest);
+ mlx5_modify_rule_destination(rx->status_checks.crypto_offload_pass_rule,
+ new_dest, old_dest);
+}
+
static void handle_ipsec_rx_bringup(struct mlx5e_ipsec *ipsec, u32 family)
{
struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false);
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
struct mlx5_flow_destination old_dest, new_dest;
- old_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false),
- family2tt(family));
+ old_dest = ipsec_rx_decrypted_pkt_def_dest(ttc, family);
mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, ns, &old_dest, family,
MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL, MLX5E_NIC_PRIO);
new_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest);
- mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest);
+ ipsec_rx_update_default_dest(rx, &old_dest, &new_dest);
}
static void handle_ipsec_rx_cleanup(struct mlx5e_ipsec *ipsec, u32 family)
{
struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
struct mlx5_flow_destination old_dest, new_dest;
old_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
old_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- new_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false),
- family2tt(family));
- mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest);
- mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest);
+ new_dest = ipsec_rx_decrypted_pkt_def_dest(ttc, family);
+ ipsec_rx_update_default_dest(rx, &old_dest, &new_dest);
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, ipsec->mdev);
}
@@ -570,20 +684,18 @@ static void ipsec_mpv_work_handler(struct work_struct *_work)
complete(&work->master_priv->ipsec->comp);
}
-static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec, u32 family)
+static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx, u32 family)
{
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
+ if (rx->ttc_rules_added)
+ mlx5_ttc_destroy_ipsec_rules(ttc);
mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
}
-static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx, u32 family)
+static void ipsec_rx_policy_destroy(struct mlx5e_ipsec_rx *rx)
{
- /* disconnect */
- if (rx != ipsec->rx_esw)
- ipsec_rx_ft_disconnect(ipsec, family);
-
if (rx->chains) {
ipsec_chains_destroy(rx->chains);
} else {
@@ -592,6 +704,29 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
mlx5_destroy_flow_table(rx->ft.pol);
}
+ if (rx->pol_miss_rule) {
+ mlx5_del_flow_rules(rx->pol_miss_rule);
+ mlx5_destroy_flow_table(rx->pol_miss_ft);
+ }
+}
+
+static void ipsec_rx_sa_selector_destroy(struct mlx5_core_dev *mdev,
+ struct mlx5e_ipsec_rx *rx)
+{
+ mlx5_del_flow_rules(rx->sa_sel.rule);
+ mlx5_fc_destroy(mdev, rx->sa_sel.fc);
+ rx->sa_sel.fc = NULL;
+ mlx5_destroy_flow_group(rx->sa_sel.group);
+ mlx5_destroy_flow_table(rx->ft.sa_sel);
+}
+
+static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx, u32 family)
+{
+ /* disconnect */
+ if (rx != ipsec->rx_esw)
+ ipsec_rx_ft_disconnect(ipsec, rx, family);
+
mlx5_del_flow_rules(rx->sa.rule);
mlx5_destroy_flow_group(rx->sa.group);
mlx5_destroy_flow_table(rx->ft.sa);
@@ -600,7 +735,17 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
mlx5_ipsec_rx_status_destroy(ipsec, rx);
mlx5_destroy_flow_table(rx->ft.status);
+ ipsec_rx_sa_selector_destroy(mdev, rx);
+
+ ipsec_rx_policy_destroy(rx);
+
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
+
+#ifdef CONFIG_MLX5_ESWITCH
+ if (rx == ipsec->rx_esw)
+ mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch),
+ 0, 1, 0);
+#endif
}
static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
@@ -620,6 +765,7 @@ static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
attr->family = family;
attr->prio = MLX5E_NIC_PRIO;
attr->pol_level = MLX5E_ACCEL_FS_POL_FT_LEVEL;
+ attr->pol_miss_level = MLX5E_ACCEL_FS_POL_MISS_FT_LEVEL;
attr->sa_level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
attr->status_level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
attr->chains_ns = MLX5_FLOW_NAMESPACE_KERNEL;
@@ -636,7 +782,7 @@ static int ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
if (rx == ipsec->rx_esw)
return mlx5_esw_ipsec_rx_status_pass_dest_get(ipsec, dest);
- *dest = mlx5_ttc_get_default_dest(attr->ttc, family2tt(attr->family));
+ *dest = ipsec_rx_decrypted_pkt_def_dest(attr->ttc, attr->family);
err = mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, attr->ns, dest,
attr->family, MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
attr->prio);
@@ -652,22 +798,254 @@ static int ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
return 0;
}
+static void ipsec_rx_sa_miss_dest_get(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5e_ipsec_rx_create_attr *attr,
+ struct mlx5_flow_destination *dest,
+ struct mlx5_flow_destination *miss_dest)
+{
+ if (rx == ipsec->rx_esw)
+ *miss_dest = *dest;
+ else
+ *miss_dest =
+ mlx5_ttc_get_default_dest(attr->ttc,
+ family2tt(attr->family));
+}
+
+static void ipsec_rx_default_dest_get(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest)
+{
+ dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest->ft = rx->pol_miss_ft;
+}
+
static void ipsec_rx_ft_connect(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx,
struct mlx5e_ipsec_rx_create_attr *attr)
{
struct mlx5_flow_destination dest = {};
+ struct mlx5_ttc_table *ttc, *inner_ttc;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = rx->ft.pol;
- mlx5_ttc_fwd_dest(attr->ttc, family2tt(attr->family), &dest);
+ dest.ft = rx->ft.sa;
+ if (mlx5_ttc_fwd_dest(attr->ttc, family2tt(attr->family), &dest))
+ return;
+
+ ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
+ inner_ttc = mlx5e_fs_get_ttc(ipsec->fs, true);
+ rx->ttc_rules_added = !mlx5_ttc_create_ipsec_rules(ttc, inner_ttc);
+}
+
+static int ipsec_rx_chains_create_miss(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5e_ipsec_rx_create_attr *attr,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ if (rx == ipsec->rx_esw) {
+ /* No need to create miss table for switchdev mode,
+ * just set it to the root chain table.
+ */
+ rx->pol_miss_ft = dest->ft;
+ return 0;
+ }
+
+ ft_attr.max_fte = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft_attr.level = attr->pol_miss_level;
+ ft_attr.prio = attr->prio;
+
+ ft = mlx5_create_auto_grouped_flow_table(attr->ns, &ft_attr);
+ if (IS_ERR(ft))
+ return PTR_ERR(ft);
+
+ rule = mlx5_add_flow_rules(ft, NULL, &flow_act, dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_rule;
+ }
+
+ rx->pol_miss_ft = ft;
+ rx->pol_miss_rule = rule;
+
+ return 0;
+
+err_rule:
+ mlx5_destroy_flow_table(ft);
+ return err;
+}
+
+static int ipsec_rx_policy_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5e_ipsec_rx_create_attr *attr,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_destination default_dest;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ err = ipsec_rx_chains_create_miss(ipsec, rx, attr, dest);
+ if (err)
+ return err;
+
+ ipsec_rx_default_dest_get(ipsec, rx, &default_dest);
+
+ if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
+ rx->chains = ipsec_chains_create(mdev,
+ default_dest.ft,
+ attr->chains_ns,
+ attr->prio,
+ attr->sa_level,
+ &rx->ft.pol);
+ if (IS_ERR(rx->chains))
+ err = PTR_ERR(rx->chains);
+ } else {
+ ft = ipsec_ft_create(attr->ns, attr->pol_level,
+ attr->prio, 1, 2, 0);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_out;
+ }
+ rx->ft.pol = ft;
+
+ err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol,
+ &default_dest);
+ if (err)
+ mlx5_destroy_flow_table(rx->ft.pol);
+ }
+
+ if (!err)
+ return 0;
+
+err_out:
+ if (rx->pol_miss_rule) {
+ mlx5_del_flow_rules(rx->pol_miss_rule);
+ mlx5_destroy_flow_table(rx->pol_miss_ft);
+ }
+ return err;
+}
+
+static int ipsec_rx_sa_selector_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5e_ipsec_rx_create_attr *attr)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_destination dest;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ u32 *flow_group_in;
+ struct mlx5_fc *fc;
+ int err;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ ft = ipsec_ft_create(attr->ns, attr->status_level, attr->prio, 1,
+ MLX5_IPSEC_FS_SA_SELECTOR_MAX_NUM_GROUPS, 0);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "Failed to create RX SA selector flow table, err=%d\n",
+ err);
+ goto err_ft;
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
+ ft->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ ft->max_fte - 1);
+ fg = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(fg)) {
+ err = PTR_ERR(fg);
+ mlx5_core_err(mdev, "Failed to create RX SA selector miss group, err=%d\n",
+ err);
+ goto err_fg;
+ }
+
+ fc = mlx5_fc_create(mdev, false);
+ if (IS_ERR(fc)) {
+ err = PTR_ERR(fc);
+ mlx5_core_err(mdev,
+ "Failed to create ipsec RX SA selector miss rule counter, err=%d\n",
+ err);
+ goto err_cnt;
+ }
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter = fc;
+ flow_act.action =
+ MLX5_FLOW_CONTEXT_ACTION_COUNT | MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+ rule = mlx5_add_flow_rules(ft, NULL, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Failed to create RX SA selector miss drop rule, err=%d\n",
+ err);
+ goto err_rule;
+ }
+
+ rx->ft.sa_sel = ft;
+ rx->sa_sel.group = fg;
+ rx->sa_sel.fc = fc;
+ rx->sa_sel.rule = rule;
+
+ kvfree(flow_group_in);
+
+ return 0;
+
+err_rule:
+ mlx5_fc_destroy(mdev, fc);
+err_cnt:
+ mlx5_destroy_flow_group(fg);
+err_fg:
+ mlx5_destroy_flow_table(ft);
+err_ft:
+ kvfree(flow_group_in);
+ return err;
}
+/* The decryption processing is as follows:
+ *
+ * +----------+ +-------------+
+ * | | | |
+ * | Kernel <--------------+----------+ policy miss <------------+
+ * | | ^ | | ^
+ * +----^-----+ | +-------------+ |
+ * | crypto |
+ * miss offload ok allow/default
+ * ^ ^ ^
+ * | | packet |
+ * +----+---------+ +----+-------------+ offload ok +------+---+
+ * | | | | (no UPSPEC) | |
+ * | SA (decrypt) +-----> status +--->------->----+ policy |
+ * | | | | | |
+ * +--------------+ ++---------+-------+ +-^----+---+
+ * | | | |
+ * v packet +-->->---+ v
+ * | offload ok match |
+ * fails (with UPSPEC) | block
+ * | | +-------------+-+ |
+ * v v | | miss v
+ * drop +---> SA sel +--------->drop
+ * | |
+ * +---------------+
+ */
+
static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx, u32 family)
{
+ struct mlx5_flow_destination dest[2], miss_dest;
struct mlx5e_ipsec_rx_create_attr attr;
- struct mlx5_flow_destination dest[2];
struct mlx5_flow_table *ft;
u32 flags = 0;
int err;
@@ -678,80 +1056,63 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
if (err)
return err;
- ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 0);
+ ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 4, 0);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft_status;
}
rx->ft.status = ft;
- dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
- err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
+ err = ipsec_rx_sa_selector_create(ipsec, rx, &attr);
if (err)
- goto err_add;
+ goto err_fs_ft_sa_sel;
/* Create FT */
if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
- rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
+ rx->allow_tunnel_mode =
+ mlx5_eswitch_block_encap(mdev, rx == ipsec->rx_esw);
+
if (rx->allow_tunnel_mode)
flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 2, flags);
+ ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 1, 2, flags);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft;
}
rx->ft.sa = ft;
- err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, dest);
+ ipsec_rx_sa_miss_dest_get(ipsec, rx, &attr, &dest[0], &miss_dest);
+ err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, &miss_dest);
if (err)
goto err_fs;
- if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
- rx->chains = ipsec_chains_create(mdev, rx->ft.sa,
- attr.chains_ns,
- attr.prio,
- attr.pol_level,
- &rx->ft.pol);
- if (IS_ERR(rx->chains)) {
- err = PTR_ERR(rx->chains);
- goto err_pol_ft;
- }
-
- goto connect;
- }
+ err = ipsec_rx_policy_create(ipsec, rx, &attr, &dest[0]);
+ if (err)
+ goto err_policy;
- ft = ipsec_ft_create(attr.ns, attr.pol_level, attr.prio, 2, 0);
- if (IS_ERR(ft)) {
- err = PTR_ERR(ft);
- goto err_pol_ft;
- }
- rx->ft.pol = ft;
- memset(dest, 0x00, 2 * sizeof(*dest));
- dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[0].ft = rx->ft.sa;
- err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol, dest);
+ dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[1].counter = rx->fc->cnt;
+ err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
if (err)
- goto err_pol_miss;
+ goto err_add;
-connect:
/* connect */
if (rx != ipsec->rx_esw)
ipsec_rx_ft_connect(ipsec, rx, &attr);
return 0;
-err_pol_miss:
- mlx5_destroy_flow_table(rx->ft.pol);
-err_pol_ft:
+err_add:
+ ipsec_rx_policy_destroy(rx);
+err_policy:
mlx5_del_flow_rules(rx->sa.rule);
mlx5_destroy_flow_group(rx->sa.group);
err_fs:
mlx5_destroy_flow_table(rx->ft.sa);
-err_fs_ft:
if (rx->allow_tunnel_mode)
mlx5_eswitch_unblock_encap(mdev);
- mlx5_ipsec_rx_status_destroy(ipsec, rx);
-err_add:
+err_fs_ft:
+ ipsec_rx_sa_selector_destroy(mdev, rx);
+err_fs_ft_sa_sel:
mlx5_destroy_flow_table(rx->ft.status);
err_fs_ft_status:
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
@@ -873,7 +1234,7 @@ static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(tx->fc->cnt);
+ dest.counter = tx->fc->cnt;
fte = mlx5_add_flow_rules(tx->ft.status, spec, &flow_act, &dest, 1);
if (IS_ERR(fte)) {
err = PTR_ERR(fte);
@@ -941,7 +1302,7 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
int err;
ipsec_tx_create_attr_set(ipsec, tx, &attr);
- ft = ipsec_ft_create(tx->ns, attr.cnt_level, attr.prio, 1, 0);
+ ft = ipsec_ft_create(tx->ns, attr.cnt_level, attr.prio, 1, 1, 0);
if (IS_ERR(ft))
return PTR_ERR(ft);
tx->ft.status = ft;
@@ -951,10 +1312,12 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
goto err_status_rule;
if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
- tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
+ tx->allow_tunnel_mode =
+ mlx5_eswitch_block_encap(mdev, tx == ipsec->tx_esw);
+
if (tx->allow_tunnel_mode)
flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 4, flags);
+ ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 1, 4, flags);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_sa_ft;
@@ -982,7 +1345,7 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
goto connect_roce;
}
- ft = ipsec_ft_create(tx->ns, attr.pol_level, attr.prio, 2, 0);
+ ft = ipsec_ft_create(tx->ns, attr.pol_level, attr.prio, 1, 2, 0);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_pol_ft;
@@ -1150,9 +1513,14 @@ static void tx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 prio, int type)
mutex_unlock(&tx->ft.mutex);
}
-static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
- __be32 *daddr)
+static void setup_fte_addr4(struct mlx5_flow_spec *spec,
+ struct mlx5e_ipsec_addr *addrs)
{
+ __be32 *saddr = &addrs->saddr.a4;
+ __be32 *smask = &addrs->smask.m4;
+ __be32 *daddr = &addrs->daddr.a4;
+ __be32 *dmask = &addrs->dmask.m4;
+
if (!*saddr && !*daddr)
return;
@@ -1164,21 +1532,26 @@ static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
if (*saddr) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
- outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), smask, 4);
}
if (*daddr) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
- outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), dmask, 4);
}
}
-static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
- __be32 *daddr)
+static void setup_fte_addr6(struct mlx5_flow_spec *spec,
+ struct mlx5e_ipsec_addr *addrs)
{
+ __be32 *saddr = addrs->saddr.a6;
+ __be32 *smask = addrs->smask.m6;
+ __be32 *daddr = addrs->daddr.a6;
+ __be32 *dmask = addrs->dmask.m6;
+
if (addr6_all_zero(saddr) && addr6_all_zero(daddr))
return;
@@ -1190,15 +1563,15 @@ static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
if (!addr6_all_zero(saddr)) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16);
- memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), dmask, 16);
}
if (!addr6_all_zero(daddr)) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16);
- memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), smask, 16);
}
}
@@ -1340,7 +1713,8 @@ static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8
MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, action[2], field,
MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
- MLX5_SET(set_action_in, action[2], data, 0);
+ MLX5_SET(set_action_in, action[2], data,
+ MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD);
MLX5_SET(set_action_in, action[2], offset, 0);
MLX5_SET(set_action_in, action[2], length, 32);
}
@@ -1359,8 +1733,8 @@ static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8
modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, num_of_actions, action);
if (IS_ERR(modify_hdr)) {
- mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
- PTR_ERR(modify_hdr));
+ mlx5_core_err(mdev, "Failed to allocate modify_header %pe\n",
+ modify_hdr);
return PTR_ERR(modify_hdr);
}
@@ -1387,7 +1761,7 @@ setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) {
bfflen += sizeof(*esp_hdr) + 8;
- switch (attrs->family) {
+ switch (attrs->addrs.family) {
case AF_INET:
bfflen += sizeof(*iphdr);
break;
@@ -1404,7 +1778,7 @@ setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
return -ENOMEM;
eth_hdr = (struct ethhdr *)reformatbf;
- switch (attrs->family) {
+ switch (attrs->addrs.family) {
case AF_INET:
eth_hdr->h_proto = htons(ETH_P_IP);
break;
@@ -1427,11 +1801,11 @@ setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
reformat_params->param_0 = attrs->authsize;
hdr = reformatbf + sizeof(*eth_hdr);
- switch (attrs->family) {
+ switch (attrs->addrs.family) {
case AF_INET:
iphdr = (struct iphdr *)hdr;
- memcpy(&iphdr->saddr, &attrs->saddr.a4, 4);
- memcpy(&iphdr->daddr, &attrs->daddr.a4, 4);
+ memcpy(&iphdr->saddr, &attrs->addrs.saddr.a4, 4);
+ memcpy(&iphdr->daddr, &attrs->addrs.daddr.a4, 4);
iphdr->version = 4;
iphdr->ihl = 5;
iphdr->ttl = IPSEC_TUNNEL_DEFAULT_TTL;
@@ -1440,8 +1814,8 @@ setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
break;
case AF_INET6:
ipv6hdr = (struct ipv6hdr *)hdr;
- memcpy(&ipv6hdr->saddr, &attrs->saddr.a6, 16);
- memcpy(&ipv6hdr->daddr, &attrs->daddr.a6, 16);
+ memcpy(&ipv6hdr->saddr, &attrs->addrs.saddr.a6, 16);
+ memcpy(&ipv6hdr->daddr, &attrs->addrs.daddr.a6, 16);
ipv6hdr->nexthdr = IPPROTO_ESP;
ipv6hdr->version = 6;
ipv6hdr->hop_limit = IPSEC_TUNNEL_DEFAULT_TTL;
@@ -1475,7 +1849,7 @@ static int get_reformat_type(struct mlx5_accel_esp_xfrm_attrs *attrs)
return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP;
return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
case XFRM_DEV_OFFLOAD_OUT:
- if (attrs->family == AF_INET) {
+ if (attrs->addrs.family == AF_INET) {
if (attrs->encap)
return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4;
return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
@@ -1576,6 +1950,85 @@ static int setup_pkt_reformat(struct mlx5e_ipsec *ipsec,
return 0;
}
+static int rx_add_rule_sa_selector(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5e_ipsec_rx *rx,
+ struct upspec *upspec)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_destination dest[2];
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.ipsec_syndrome);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_4);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.ipsec_syndrome, 0);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_4, 0);
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+
+ ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[0].ft = rx->ft.sa_sel;
+ dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[1].counter = rx->fc->cnt;
+
+ rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx pass rule, err=%d\n",
+ err);
+ goto err_add_status_pass_rule;
+ }
+
+ sa_entry->ipsec_rule.status_pass = rule;
+
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.ipsec_syndrome, 0);
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_4, 0);
+
+ setup_fte_upper_proto_match(spec, upspec);
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[0].ft = rx->ft.pol;
+
+ rule = mlx5_add_flow_rules(rx->ft.sa_sel, spec, &flow_act, &dest[0], 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx sa selector rule, err=%d\n",
+ err);
+ goto err_add_sa_sel_rule;
+ }
+
+ sa_entry->ipsec_rule.sa_sel = rule;
+
+ kvfree(spec);
+ return 0;
+
+err_add_sa_sel_rule:
+ mlx5_del_flow_rules(sa_entry->ipsec_rule.status_pass);
+err_add_status_pass_rule:
+ kvfree(spec);
+ return err;
+}
+
static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
@@ -1589,7 +2042,7 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
struct mlx5_fc *counter;
int err = 0;
- rx = rx_ft_get(mdev, ipsec, attrs->family, attrs->type);
+ rx = rx_ft_get(mdev, ipsec, attrs->addrs.family, attrs->type);
if (IS_ERR(rx))
return PTR_ERR(rx);
@@ -1599,16 +2052,15 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
goto err_alloc;
}
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ if (attrs->addrs.family == AF_INET)
+ setup_fte_addr4(spec, &attrs->addrs);
else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+ setup_fte_addr6(spec, &attrs->addrs);
setup_fte_spi(spec, attrs->spi, attrs->encap);
if (!attrs->encap)
setup_fte_esp(spec);
setup_fte_no_frags(spec);
- setup_fte_upper_proto_match(spec, &attrs->upspec);
if (!attrs->drop) {
if (rx != ipsec->rx_esw)
@@ -1649,13 +2101,20 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[0].ft = rx->ft.status;
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[1].counter_id = mlx5_fc_id(counter);
+ dest[1].counter = counter;
rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, dest, 2);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
goto err_add_flow;
}
+
+ if (attrs->upspec.proto && attrs->type == XFRM_DEV_OFFLOAD_PACKET) {
+ err = rx_add_rule_sa_selector(sa_entry, rx, &attrs->upspec);
+ if (err)
+ goto err_add_sa_sel;
+ }
+
if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
err = rx_add_rule_drop_replay(sa_entry, rx);
if (err)
@@ -1679,6 +2138,11 @@ err_drop_reason:
mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.replay.fc);
}
err_add_replay:
+ if (sa_entry->ipsec_rule.sa_sel) {
+ mlx5_del_flow_rules(sa_entry->ipsec_rule.sa_sel);
+ mlx5_del_flow_rules(sa_entry->ipsec_rule.status_pass);
+ }
+err_add_sa_sel:
mlx5_del_flow_rules(rule);
err_add_flow:
mlx5_fc_destroy(mdev, counter);
@@ -1691,7 +2155,7 @@ err_pkt_reformat:
err_mod_header:
kvfree(spec);
err_alloc:
- rx_ft_put(ipsec, attrs->family, attrs->type);
+ rx_ft_put(ipsec, attrs->addrs.family, attrs->type);
return err;
}
@@ -1718,23 +2182,21 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
goto err_alloc;
}
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
- else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
-
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
switch (attrs->type) {
case XFRM_DEV_OFFLOAD_CRYPTO:
+ if (attrs->addrs.family == AF_INET)
+ setup_fte_addr4(spec, &attrs->addrs);
+ else
+ setup_fte_addr6(spec, &attrs->addrs);
setup_fte_spi(spec, attrs->spi, false);
setup_fte_esp(spec);
setup_fte_reg_a(spec);
break;
case XFRM_DEV_OFFLOAD_PACKET:
- if (attrs->reqid)
- setup_fte_reg_c4(spec, attrs->reqid);
+ setup_fte_reg_c4(spec, attrs->reqid);
err = setup_pkt_reformat(ipsec, attrs, &flow_act);
if (err)
goto err_pkt_reformat;
@@ -1762,7 +2224,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
dest[0].ft = tx->ft.status;
dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[1].counter_id = mlx5_fc_id(counter);
+ dest[1].counter = counter;
rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, dest, 2);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -1812,10 +2274,10 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
}
tx = ipsec_tx(ipsec, attrs->type);
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ if (attrs->addrs.family == AF_INET)
+ setup_fte_addr4(spec, &attrs->addrs);
else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+ setup_fte_addr6(spec, &attrs->addrs);
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
@@ -1835,7 +2297,7 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[dstn].counter_id = mlx5_fc_id(tx->fc->drop);
+ dest[dstn].counter = tx->fc->drop;
dstn++;
break;
default:
@@ -1885,12 +2347,12 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
struct mlx5e_ipsec_rx *rx;
int err, dstn = 0;
- ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->family, attrs->prio,
- attrs->type);
+ ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->addrs.family,
+ attrs->prio, attrs->type);
if (IS_ERR(ft))
return PTR_ERR(ft);
- rx = ipsec_rx(pol_entry->ipsec, attrs->family, attrs->type);
+ rx = ipsec_rx(pol_entry->ipsec, attrs->addrs.family, attrs->type);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
@@ -1898,10 +2360,10 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
goto err_alloc;
}
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ if (attrs->addrs.family == AF_INET)
+ setup_fte_addr4(spec, &attrs->addrs);
else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+ setup_fte_addr6(spec, &attrs->addrs);
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
@@ -1913,7 +2375,7 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
case XFRM_POLICY_BLOCK:
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[dstn].counter_id = mlx5_fc_id(rx->fc->drop);
+ dest[dstn].counter = rx->fc->drop;
dstn++;
break;
default:
@@ -1925,8 +2387,7 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
flow_act.flags |= FLOW_ACT_NO_APPEND;
if (rx == ipsec->rx_esw && rx->chains)
flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
- dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[dstn].ft = rx->ft.sa;
+ ipsec_rx_default_dest_get(ipsec, rx, &dest[dstn]);
dstn++;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
if (IS_ERR(rule)) {
@@ -1942,7 +2403,8 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
err_action:
kvfree(spec);
err_alloc:
- rx_ft_put_policy(pol_entry->ipsec, attrs->family, attrs->prio, attrs->type);
+ rx_ft_put_policy(pol_entry->ipsec, attrs->addrs.family, attrs->prio,
+ attrs->type);
return err;
}
@@ -2063,6 +2525,7 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
stats->ipsec_rx_bytes = 0;
stats->ipsec_rx_drop_pkts = 0;
stats->ipsec_rx_drop_bytes = 0;
+ stats->ipsec_rx_drop_mismatch_sa_sel = 0;
stats->ipsec_tx_pkts = 0;
stats->ipsec_tx_bytes = 0;
stats->ipsec_tx_drop_pkts = 0;
@@ -2072,6 +2535,9 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_rx_pkts, &stats->ipsec_rx_bytes);
mlx5_fc_query(mdev, fc->drop, &stats->ipsec_rx_drop_pkts,
&stats->ipsec_rx_drop_bytes);
+ if (ipsec->rx_ipv4->sa_sel.fc)
+ mlx5_fc_query(mdev, ipsec->rx_ipv4->sa_sel.fc,
+ &stats->ipsec_rx_drop_mismatch_sa_sel, &bytes);
fc = ipsec->tx->fc;
mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_tx_pkts, &stats->ipsec_tx_bytes);
@@ -2100,6 +2566,11 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
stats->ipsec_tx_drop_pkts += packets;
stats->ipsec_tx_drop_bytes += bytes;
}
+
+ if (ipsec->rx_esw->sa_sel.fc &&
+ !mlx5_fc_query(mdev, ipsec->rx_esw->sa_sel.fc,
+ &packets, &bytes))
+ stats->ipsec_rx_drop_mismatch_sa_sel += packets;
}
}
@@ -2197,12 +2668,18 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
mlx5_del_flow_rules(ipsec_rule->auth.rule);
mlx5_fc_destroy(mdev, ipsec_rule->auth.fc);
+ if (ipsec_rule->sa_sel) {
+ mlx5_del_flow_rules(ipsec_rule->sa_sel);
+ mlx5_del_flow_rules(ipsec_rule->status_pass);
+ }
+
if (ipsec_rule->replay.rule) {
mlx5_del_flow_rules(ipsec_rule->replay.rule);
mlx5_fc_destroy(mdev, ipsec_rule->replay.fc);
}
mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
- rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type);
+ rx_ft_put(sa_entry->ipsec, sa_entry->attrs.addrs.family,
+ sa_entry->attrs.type);
}
int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
@@ -2238,7 +2715,8 @@ void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev);
if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
- rx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.family,
+ rx_ft_put_policy(pol_entry->ipsec,
+ pol_entry->attrs.addrs.family,
pol_entry->attrs.prio, pol_entry->attrs.type);
return;
}
@@ -2372,18 +2850,24 @@ void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry)
memcpy(sa_entry, &sa_entry_shadow, sizeof(*sa_entry));
}
-bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry)
+bool mlx5e_ipsec_fs_tunnel_allowed(struct mlx5e_ipsec_sa_entry *sa_entry)
{
- struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
- struct mlx5e_ipsec_rx *rx;
- struct mlx5e_ipsec_tx *tx;
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct xfrm_state *x = sa_entry->x;
+ bool from_fdb;
- rx = ipsec_rx(sa_entry->ipsec, attrs->family, attrs->type);
- tx = ipsec_tx(sa_entry->ipsec, attrs->type);
- if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
- return tx->allow_tunnel_mode;
+ if (x->xso.dir == XFRM_DEV_OFFLOAD_OUT) {
+ struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, x->xso.type);
+
+ from_fdb = (tx == ipsec->tx_esw);
+ } else {
+ struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, x->props.family,
+ x->xso.type);
+
+ from_fdb = (rx == ipsec->rx_esw);
+ }
- return rx->allow_tunnel_mode;
+ return mlx5_eswitch_block_encap(ipsec->mdev, from_fdb);
}
void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
@@ -2409,9 +2893,30 @@ void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event)
{
- if (!priv->ipsec)
- return; /* IPsec not supported */
+ if (!priv->ipsec || mlx5_devcom_comp_get_size(priv->devcom) < 2)
+ return; /* IPsec not supported or no peers */
mlx5_devcom_send_event(priv->devcom, event, event, priv);
wait_for_completion(&priv->ipsec->comp);
}
+
+void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv)
+{
+ struct mlx5_devcom_comp_dev *tmp = NULL;
+ struct mlx5e_priv *peer_priv;
+
+ if (!priv->devcom)
+ return;
+
+ if (!mlx5_devcom_for_each_peer_begin(priv->devcom))
+ goto out;
+
+ peer_priv = mlx5_devcom_get_next_peer_data(priv->devcom, &tmp);
+ if (peer_priv)
+ complete_all(&peer_priv->ipsec->comp);
+
+ mlx5_devcom_for_each_peer_end(priv->devcom);
+out:
+ mlx5_devcom_unregister_component(priv->devcom);
+ priv->devcom = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index 53cfa39188cb..ef7322d381af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -42,8 +42,7 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) &&
(mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS ||
- (mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS &&
- is_mdev_legacy_mode(mdev)))) {
+ is_mdev_legacy_mode(mdev))) {
if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
reformat_add_esp_trasport) &&
MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
@@ -91,8 +90,9 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
- struct mlx5_accel_esp_xfrm_attrs *attrs)
+ struct mlx5e_ipsec_sa_entry *sa_entry)
{
+ struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
void *aso_ctx;
aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
@@ -120,8 +120,12 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
* active.
*/
MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
- if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
+ if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) {
MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
+ if (!attrs->replay_esn.trigger)
+ MLX5_SET(ipsec_aso, aso_ctx, mode_parameter,
+ sa_entry->esn_state.esn);
+ }
if (attrs->lft.hard_packet_limit != XFRM_INF) {
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
@@ -175,7 +179,7 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
res = &mdev->mlx5e_res.hw_objs;
if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
- mlx5e_ipsec_packet_setup(obj, res->pdn, attrs);
+ mlx5e_ipsec_packet_setup(obj, res->pdn, sa_entry);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (!err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 727fa7c18523..6056106edcc6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -327,6 +327,10 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
if (unlikely(!sa_entry)) {
rcu_read_unlock();
atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
+ /* Clear secpath to prevent invalid dereference
+ * in downstream XFRM policy checks.
+ */
+ secpath_reset(skb);
return;
}
xfrm_state_hold(sa_entry->x);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 3cc640669247..45b0d19e735c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -40,7 +40,7 @@
#include "en/txrx.h"
/* Bit31: IPsec marker, Bit30: reserved, Bit29-24: IPsec syndrome, Bit23-0: IPsec obj id */
-#define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1)
+#define MLX5_IPSEC_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x2)
#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(5, 0))
#define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
index 92bf3fa44a3b..93be388068f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -42,6 +42,7 @@ static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_pkts) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_mismatch_sa_sel) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_pkts) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_drop_pkts) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 65ccb33edafb..da2d1eb52c13 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -320,7 +320,6 @@ err_dma_unmap:
err_free:
kfree(buf);
err_out:
- priv_rx->rq_stats->tls_resync_req_skip++;
return err;
}
@@ -339,14 +338,19 @@ static void resync_handle_work(struct work_struct *work)
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) {
mlx5e_ktls_priv_rx_put(priv_rx);
+ priv_rx->rq_stats->tls_resync_req_skip++;
+ tls_offload_rx_resync_async_request_cancel(&resync->core);
return;
}
c = resync->priv->channels.c[priv_rx->rxq];
sq = &c->async_icosq;
- if (resync_post_get_progress_params(sq, priv_rx))
+ if (resync_post_get_progress_params(sq, priv_rx)) {
+ priv_rx->rq_stats->tls_resync_req_skip++;
+ tls_offload_rx_resync_async_request_cancel(&resync->core);
mlx5e_ktls_priv_rx_put(priv_rx);
+ }
}
static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync,
@@ -425,14 +429,21 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
{
struct mlx5e_ktls_rx_resync_buf *buf = wi->tls_get_params.buf;
struct mlx5e_ktls_offload_context_rx *priv_rx;
+ struct tls_offload_resync_async *async_resync;
+ struct tls_offload_context_rx *rx_ctx;
u8 tracker_state, auth_state, *ctx;
struct device *dev;
u32 hw_seq;
priv_rx = buf->priv_rx;
dev = mlx5_core_dma_dev(sq->channel->mdev);
- if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
+ rx_ctx = tls_offload_ctx_rx(tls_get_ctx(priv_rx->sk));
+ async_resync = rx_ctx->resync_async;
+ if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) {
+ priv_rx->rq_stats->tls_resync_req_skip++;
+ tls_offload_rx_resync_async_request_cancel(async_resync);
goto out;
+ }
dma_sync_single_for_cpu(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE,
DMA_FROM_DEVICE);
@@ -443,11 +454,13 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING ||
auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) {
priv_rx->rq_stats->tls_resync_req_skip++;
+ tls_offload_rx_resync_async_request_cancel(async_resync);
goto out;
}
hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn);
- tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq));
+ tls_offload_rx_resync_async_request_end(async_resync,
+ cpu_to_be32(hw_seq));
priv_rx->rq_stats->tls_resync_req_end++;
out:
mlx5e_ktls_priv_rx_put(priv_rx);
@@ -472,8 +485,10 @@ static bool resync_queue_get_psv(struct sock *sk)
resync = &priv_rx->resync;
mlx5e_ktls_priv_rx_get(priv_rx);
- if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work)))
+ if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work))) {
mlx5e_ktls_priv_rx_put(priv_rx);
+ return false;
+ }
return true;
}
@@ -482,6 +497,7 @@ static bool resync_queue_get_psv(struct sock *sk)
static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
{
struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ struct tls_offload_resync_async *resync_async;
struct net_device *netdev = rq->netdev;
struct net *net = dev_net(netdev);
struct sock *sk = NULL;
@@ -498,9 +514,9 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
depth += sizeof(struct iphdr);
th = (void *)iph + sizeof(struct iphdr);
- sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
- iph->saddr, th->source, iph->daddr,
- th->dest, netdev->ifindex);
+ sk = inet_lookup_established(net, iph->saddr, th->source,
+ iph->daddr, th->dest,
+ netdev->ifindex);
#if IS_ENABLED(CONFIG_IPV6)
} else {
struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph;
@@ -508,8 +524,7 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
depth += sizeof(struct ipv6hdr);
th = (void *)ipv6h + sizeof(struct ipv6hdr);
- sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
- &ipv6h->saddr, th->source,
+ sk = __inet6_lookup_established(net, &ipv6h->saddr, th->source,
&ipv6h->daddr, ntohs(th->dest),
netdev->ifindex, 0);
#endif
@@ -528,7 +543,8 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
seq = th->seq;
datalen = skb->len - depth;
- tls_offload_rx_resync_async_request_start(sk, seq, datalen);
+ resync_async = tls_offload_ctx_rx(tls_get_ctx(sk))->resync_async;
+ tls_offload_rx_resync_async_request_start(resync_async, seq, datalen);
rq->stats->tls_resync_req_start++;
unref:
@@ -557,6 +573,18 @@ void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk,
resync_handle_seq_match(priv_rx, c);
}
+void
+mlx5e_ktls_rx_resync_async_request_cancel(struct mlx5e_icosq_wqe_info *wi)
+{
+ struct mlx5e_ktls_offload_context_rx *priv_rx;
+ struct mlx5e_ktls_rx_resync_buf *buf;
+
+ buf = wi->tls_get_params.buf;
+ priv_rx = buf->priv_rx;
+ priv_rx->rq_stats->tls_resync_req_skip++;
+ tls_offload_rx_resync_async_request_cancel(&priv_rx->resync.core);
+}
+
/* End of resync section */
void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index d61be26a4df1..08f06984407b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -660,7 +660,7 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
while (remaining > 0) {
skb_frag_t *frag = &record->frags[i];
- get_page(skb_frag_page(frag));
+ page_ref_inc(skb_frag_page(frag));
remaining -= skb_frag_size(frag);
info->frags[i++] = *frag;
}
@@ -744,7 +744,7 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn)
dseg->addr = cpu_to_be64(dma_addr);
dseg->lkey = sq->mkey_be;
dseg->byte_count = cpu_to_be32(fsz);
- mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
+ mlx5e_dma_push_netmem(sq, skb_frag_netmem(frag), dma_addr, fsz);
tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag));
sq->pc += MLX5E_KTLS_DUMP_WQEBBS;
@@ -763,7 +763,7 @@ void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
stats = sq->stats;
mlx5e_tx_dma_unmap(sq->pdev, dma);
- put_page(wi->resync_dump_frag_page);
+ page_ref_dec(wi->resync_dump_frag_page);
stats->tls_dump_packets++;
stats->tls_dump_bytes += wi->num_bytes;
}
@@ -816,12 +816,12 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
err_out:
for (; i < info.nr_frags; i++)
- /* The put_page() here undoes the page ref obtained in tx_sync_info_get().
+ /* The page_ref_dec() here undoes the page ref obtained in tx_sync_info_get().
* Page refs obtained for the DUMP WQEs above (by page_ref_add) will be
* released only upon their completions (or in mlx5e_free_txqsq_descs,
* if channel closes).
*/
- put_page(skb_frag_page(&info.frags[i]));
+ page_ref_dec(skb_frag_page(&info.frags[i]));
return MLX5E_KTLS_SYNC_FAIL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
index f87b65c560ea..cb08799769ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
@@ -29,6 +29,10 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi,
u32 *dma_fifo_cc);
+
+void
+mlx5e_ktls_rx_resync_async_request_cancel(struct mlx5e_icosq_wqe_info *wi);
+
static inline bool
mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index cc9bcc420032..528b04d4de41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -339,9 +339,13 @@ static int mlx5e_macsec_init_sa_fs(struct macsec_context *ctx,
{
struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
struct mlx5_macsec_fs *macsec_fs = priv->mdev->macsec_fs;
+ const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
struct mlx5_macsec_rule_attrs rule_attrs;
union mlx5_macsec_rule *macsec_rule;
+ if (is_tx && tx_sc->encoding_sa != sa->assoc_num)
+ return 0;
+
rule_attrs.macsec_obj_id = sa->macsec_obj_id;
rule_attrs.sci = sa->sci;
rule_attrs.assoc_num = sa->assoc_num;
@@ -1672,7 +1676,7 @@ void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec,
if (!fs_id)
return;
- eseg->flow_table_metadata = cpu_to_be32(MLX5_ETH_WQE_FT_META_MACSEC | fs_id << 2);
+ eseg->flow_table_metadata = cpu_to_be32(MLX5_MACSEC_TX_METADATA(fs_id));
}
void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c
new file mode 100644
index 000000000000..38e7c77cc851
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c
@@ -0,0 +1,1155 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+#include <linux/mlx5/device.h>
+#include <net/psp.h>
+#include <linux/psp.h>
+#include "mlx5_core.h"
+#include "psp.h"
+#include "lib/crypto.h"
+#include "en_accel/psp.h"
+#include "fs_core.h"
+
+enum accel_fs_psp_type {
+ ACCEL_FS_PSP4,
+ ACCEL_FS_PSP6,
+ ACCEL_FS_PSP_NUM_TYPES,
+};
+
+enum accel_psp_syndrome {
+ PSP_OK = 0,
+ PSP_ICV_FAIL,
+ PSP_BAD_TRAILER,
+};
+
+struct mlx5e_psp_tx {
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ struct mlx5_flow_handle *rule;
+ struct mutex mutex; /* Protect PSP TX steering */
+ u32 refcnt;
+ struct mlx5_fc *tx_counter;
+};
+
+struct mlx5e_psp_rx_err {
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_handle *auth_fail_rule;
+ struct mlx5_flow_handle *err_rule;
+ struct mlx5_flow_handle *bad_rule;
+ struct mlx5_modify_hdr *copy_modify_hdr;
+};
+
+struct mlx5e_accel_fs_psp_prot {
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *miss_group;
+ struct mlx5_flow_handle *miss_rule;
+ struct mlx5_flow_destination default_dest;
+ struct mlx5e_psp_rx_err rx_err;
+ u32 refcnt;
+ struct mutex prot_mutex; /* protect ESP4/ESP6 protocol */
+ struct mlx5_flow_handle *def_rule;
+};
+
+struct mlx5e_accel_fs_psp {
+ struct mlx5e_accel_fs_psp_prot fs_prot[ACCEL_FS_PSP_NUM_TYPES];
+ struct mlx5_fc *rx_counter;
+ struct mlx5_fc *rx_auth_fail_counter;
+ struct mlx5_fc *rx_err_counter;
+ struct mlx5_fc *rx_bad_counter;
+};
+
+struct mlx5e_psp_fs {
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_psp_tx *tx_fs;
+ /* Rx manage */
+ struct mlx5e_flow_steering *fs;
+ struct mlx5e_accel_fs_psp *rx_fs;
+};
+
+/* PSP RX flow steering */
+static enum mlx5_traffic_types fs_psp2tt(enum accel_fs_psp_type i)
+{
+ if (i == ACCEL_FS_PSP4)
+ return MLX5_TT_IPV4_UDP;
+
+ return MLX5_TT_IPV6_UDP;
+}
+
+static void accel_psp_fs_rx_err_del_rules(struct mlx5e_psp_fs *fs,
+ struct mlx5e_psp_rx_err *rx_err)
+{
+ if (rx_err->bad_rule) {
+ mlx5_del_flow_rules(rx_err->bad_rule);
+ rx_err->bad_rule = NULL;
+ }
+
+ if (rx_err->err_rule) {
+ mlx5_del_flow_rules(rx_err->err_rule);
+ rx_err->err_rule = NULL;
+ }
+
+ if (rx_err->auth_fail_rule) {
+ mlx5_del_flow_rules(rx_err->auth_fail_rule);
+ rx_err->auth_fail_rule = NULL;
+ }
+
+ if (rx_err->rule) {
+ mlx5_del_flow_rules(rx_err->rule);
+ rx_err->rule = NULL;
+ }
+
+ if (rx_err->copy_modify_hdr) {
+ mlx5_modify_header_dealloc(fs->mdev, rx_err->copy_modify_hdr);
+ rx_err->copy_modify_hdr = NULL;
+ }
+}
+
+static void accel_psp_fs_rx_err_destroy_ft(struct mlx5e_psp_fs *fs,
+ struct mlx5e_psp_rx_err *rx_err)
+{
+ accel_psp_fs_rx_err_del_rules(fs, rx_err);
+
+ if (rx_err->ft) {
+ mlx5_destroy_flow_table(rx_err->ft);
+ rx_err->ft = NULL;
+ }
+}
+
+static void accel_psp_setup_syndrome_match(struct mlx5_flow_spec *spec,
+ enum accel_psp_syndrome syndrome)
+{
+ void *misc_params_2;
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+ misc_params_2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
+ MLX5_SET_TO_ONES(fte_match_set_misc2, misc_params_2, psp_syndrome);
+ misc_params_2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
+ MLX5_SET(fte_match_set_misc2, misc_params_2, psp_syndrome, syndrome);
+}
+
+static int accel_psp_fs_rx_err_add_rule(struct mlx5e_psp_fs *fs,
+ struct mlx5e_accel_fs_psp_prot *fs_prot,
+ struct mlx5e_psp_rx_err *rx_err)
+{
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ struct mlx5_core_dev *mdev = fs->mdev;
+ struct mlx5_flow_destination dest[2];
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_modify_hdr *modify_hdr;
+ struct mlx5_flow_handle *fte;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ /* Action to copy 7 bit psp_syndrome to regB[23:29] */
+ MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
+ MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_PSP_SYNDROME);
+ MLX5_SET(copy_action_in, action, src_offset, 0);
+ MLX5_SET(copy_action_in, action, length, 7);
+ MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+ MLX5_SET(copy_action_in, action, dst_offset, 23);
+
+ modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
+ 1, action);
+ if (IS_ERR(modify_hdr)) {
+ err = PTR_ERR(modify_hdr);
+ mlx5_core_err(mdev,
+ "fail to alloc psp copy modify_header_id err=%d\n", err);
+ goto out_spec;
+ }
+
+ accel_psp_setup_syndrome_match(spec, PSP_OK);
+ /* create fte */
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ flow_act.modify_hdr = modify_hdr;
+ dest[0].type = fs_prot->default_dest.type;
+ dest[0].ft = fs_prot->default_dest.ft;
+ dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[1].counter = fs->rx_fs->rx_counter;
+ fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, dest, 2);
+ if (IS_ERR(fte)) {
+ err = PTR_ERR(fte);
+ mlx5_core_err(mdev, "fail to add psp rx err copy rule err=%d\n", err);
+ goto out;
+ }
+ rx_err->rule = fte;
+
+ /* add auth fail drop rule */
+ memset(spec, 0, sizeof(*spec));
+ memset(&flow_act, 0, sizeof(flow_act));
+ accel_psp_setup_syndrome_match(spec, PSP_ICV_FAIL);
+ /* create fte */
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[0].counter = fs->rx_fs->rx_auth_fail_counter;
+ fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, dest, 1);
+ if (IS_ERR(fte)) {
+ err = PTR_ERR(fte);
+ mlx5_core_err(mdev, "fail to add psp rx auth fail drop rule err=%d\n",
+ err);
+ goto out_drop_rule;
+ }
+ rx_err->auth_fail_rule = fte;
+
+ /* add framing drop rule */
+ memset(spec, 0, sizeof(*spec));
+ memset(&flow_act, 0, sizeof(flow_act));
+ accel_psp_setup_syndrome_match(spec, PSP_BAD_TRAILER);
+ /* create fte */
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[0].counter = fs->rx_fs->rx_err_counter;
+ fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, dest, 1);
+ if (IS_ERR(fte)) {
+ err = PTR_ERR(fte);
+ mlx5_core_err(mdev, "fail to add psp rx framing err drop rule err=%d\n",
+ err);
+ goto out_drop_auth_fail_rule;
+ }
+ rx_err->err_rule = fte;
+
+ /* add misc. errors drop rule */
+ memset(spec, 0, sizeof(*spec));
+ memset(&flow_act, 0, sizeof(flow_act));
+ /* create fte */
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[0].counter = fs->rx_fs->rx_bad_counter;
+ fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, dest, 1);
+ if (IS_ERR(fte)) {
+ err = PTR_ERR(fte);
+ mlx5_core_err(mdev, "fail to add psp rx misc. err drop rule err=%d\n",
+ err);
+ goto out_drop_error_rule;
+ }
+ rx_err->bad_rule = fte;
+
+ rx_err->copy_modify_hdr = modify_hdr;
+
+ goto out_spec;
+
+out_drop_error_rule:
+ mlx5_del_flow_rules(rx_err->err_rule);
+ rx_err->err_rule = NULL;
+out_drop_auth_fail_rule:
+ mlx5_del_flow_rules(rx_err->auth_fail_rule);
+ rx_err->auth_fail_rule = NULL;
+out_drop_rule:
+ mlx5_del_flow_rules(rx_err->rule);
+ rx_err->rule = NULL;
+out:
+ mlx5_modify_header_dealloc(mdev, modify_hdr);
+out_spec:
+ kfree(spec);
+ return err;
+}
+
+static int accel_psp_fs_rx_err_create_ft(struct mlx5e_psp_fs *fs,
+ struct mlx5e_accel_fs_psp_prot *fs_prot,
+ struct mlx5e_psp_rx_err *rx_err)
+{
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs->fs, false);
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_table *ft;
+ int err;
+
+ ft_attr.max_fte = 2;
+ ft_attr.autogroup.max_num_groups = 2;
+ ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL; // MLX5E_ACCEL_FS_TCP_FT_LEVEL
+ ft_attr.prio = MLX5E_NIC_PRIO;
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(fs->mdev, "fail to create psp rx inline ft err=%d\n", err);
+ return err;
+ }
+
+ rx_err->ft = ft;
+ err = accel_psp_fs_rx_err_add_rule(fs, fs_prot, rx_err);
+ if (err)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ mlx5_destroy_flow_table(ft);
+ rx_err->ft = NULL;
+ return err;
+}
+
+static void accel_psp_fs_rx_fs_destroy(struct mlx5e_accel_fs_psp_prot *fs_prot)
+{
+ if (fs_prot->def_rule) {
+ mlx5_del_flow_rules(fs_prot->def_rule);
+ fs_prot->def_rule = NULL;
+ }
+
+ if (fs_prot->miss_rule) {
+ mlx5_del_flow_rules(fs_prot->miss_rule);
+ fs_prot->miss_rule = NULL;
+ }
+
+ if (fs_prot->miss_group) {
+ mlx5_destroy_flow_group(fs_prot->miss_group);
+ fs_prot->miss_group = NULL;
+ }
+
+ if (fs_prot->ft) {
+ mlx5_destroy_flow_table(fs_prot->ft);
+ fs_prot->ft = NULL;
+ }
+}
+
+static void setup_fte_udp_psp(struct mlx5_flow_spec *spec, u16 udp_port)
+{
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, udp_dport, 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, udp_dport, udp_port);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, spec->match_criteria, ip_protocol);
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, ip_protocol, IPPROTO_UDP);
+}
+
+static int accel_psp_fs_rx_create_ft(struct mlx5e_psp_fs *fs,
+ struct mlx5e_accel_fs_psp_prot *fs_prot)
+{
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs->fs, false);
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_modify_hdr *modify_hdr = NULL;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_core_dev *mdev = fs->mdev;
+ struct mlx5_flow_group *miss_group;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ struct mlx5_flow_table *ft;
+ u32 *flow_group_in;
+ int err = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!flow_group_in || !spec) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Create FT */
+ ft_attr.max_fte = 2;
+ ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
+ ft_attr.prio = MLX5E_NIC_PRIO;
+ ft_attr.autogroup.num_reserved_entries = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "fail to create psp rx ft err=%d\n", err);
+ goto out_err;
+ }
+ fs_prot->ft = ft;
+
+ /* Create miss_group */
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
+ miss_group = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(miss_group)) {
+ err = PTR_ERR(miss_group);
+ mlx5_core_err(mdev, "fail to create psp rx miss_group err=%d\n", err);
+ goto out_err;
+ }
+ fs_prot->miss_group = miss_group;
+
+ /* Create miss rule */
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &fs_prot->default_dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "fail to create psp rx miss_rule err=%d\n", err);
+ goto out_err;
+ }
+ fs_prot->miss_rule = rule;
+
+ /* Add default Rx psp rule */
+ setup_fte_udp_psp(spec, PSP_DEFAULT_UDP_PORT);
+ flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_PSP;
+ /* Set bit[31, 30] PSP marker */
+ /* Set bit[29-23] psp_syndrome is set in error FT */
+#define MLX5E_PSP_MARKER_BIT (BIT(30) | BIT(31))
+ MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+ MLX5_SET(set_action_in, action, data, MLX5E_PSP_MARKER_BIT);
+ MLX5_SET(set_action_in, action, offset, 0);
+ MLX5_SET(set_action_in, action, length, 32);
+
+ modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL, 1, action);
+ if (IS_ERR(modify_hdr)) {
+ err = PTR_ERR(modify_hdr);
+ mlx5_core_err(mdev, "fail to alloc psp set modify_header_id err=%d\n", err);
+ modify_hdr = NULL;
+ goto out_err;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ flow_act.modify_hdr = modify_hdr;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = fs_prot->rx_err.ft;
+ rule = mlx5_add_flow_rules(fs_prot->ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "fail to add psp rule Rx decryption, err=%d, flow_act.action = %#04X\n",
+ err, flow_act.action);
+ goto out_err;
+ }
+
+ fs_prot->def_rule = rule;
+ goto out;
+
+out_err:
+ accel_psp_fs_rx_fs_destroy(fs_prot);
+out:
+ kvfree(flow_group_in);
+ kvfree(spec);
+ return err;
+}
+
+static int accel_psp_fs_rx_destroy(struct mlx5e_psp_fs *fs, enum accel_fs_psp_type type)
+{
+ struct mlx5e_accel_fs_psp_prot *fs_prot;
+ struct mlx5e_accel_fs_psp *accel_psp;
+
+ accel_psp = fs->rx_fs;
+
+ /* The netdev unreg already happened, so all offloaded rule are already removed */
+ fs_prot = &accel_psp->fs_prot[type];
+
+ accel_psp_fs_rx_fs_destroy(fs_prot);
+
+ accel_psp_fs_rx_err_destroy_ft(fs, &fs_prot->rx_err);
+
+ return 0;
+}
+
+static int accel_psp_fs_rx_create(struct mlx5e_psp_fs *fs, enum accel_fs_psp_type type)
+{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs->fs, false);
+ struct mlx5e_accel_fs_psp_prot *fs_prot;
+ struct mlx5e_accel_fs_psp *accel_psp;
+ int err;
+
+ accel_psp = fs->rx_fs;
+ fs_prot = &accel_psp->fs_prot[type];
+
+ fs_prot->default_dest = mlx5_ttc_get_default_dest(ttc, fs_psp2tt(type));
+
+ err = accel_psp_fs_rx_err_create_ft(fs, fs_prot, &fs_prot->rx_err);
+ if (err)
+ return err;
+
+ err = accel_psp_fs_rx_create_ft(fs, fs_prot);
+ if (err)
+ accel_psp_fs_rx_err_destroy_ft(fs, &fs_prot->rx_err);
+
+ return err;
+}
+
+static int accel_psp_fs_rx_ft_get(struct mlx5e_psp_fs *fs, enum accel_fs_psp_type type)
+{
+ struct mlx5e_accel_fs_psp_prot *fs_prot;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5e_accel_fs_psp *accel_psp;
+ struct mlx5_ttc_table *ttc;
+ int err = 0;
+
+ if (!fs || !fs->rx_fs)
+ return -EINVAL;
+
+ ttc = mlx5e_fs_get_ttc(fs->fs, false);
+ accel_psp = fs->rx_fs;
+ fs_prot = &accel_psp->fs_prot[type];
+ mutex_lock(&fs_prot->prot_mutex);
+ if (fs_prot->refcnt++)
+ goto out;
+
+ /* create FT */
+ err = accel_psp_fs_rx_create(fs, type);
+ if (err) {
+ fs_prot->refcnt--;
+ goto out;
+ }
+
+ /* connect */
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = fs_prot->ft;
+ mlx5_ttc_fwd_dest(ttc, fs_psp2tt(type), &dest);
+
+out:
+ mutex_unlock(&fs_prot->prot_mutex);
+ return err;
+}
+
+static void accel_psp_fs_rx_ft_put(struct mlx5e_psp_fs *fs, enum accel_fs_psp_type type)
+{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs->fs, false);
+ struct mlx5e_accel_fs_psp_prot *fs_prot;
+ struct mlx5e_accel_fs_psp *accel_psp;
+
+ accel_psp = fs->rx_fs;
+ fs_prot = &accel_psp->fs_prot[type];
+ mutex_lock(&fs_prot->prot_mutex);
+ if (--fs_prot->refcnt)
+ goto out;
+
+ /* disconnect */
+ mlx5_ttc_fwd_default_dest(ttc, fs_psp2tt(type));
+
+ /* remove FT */
+ accel_psp_fs_rx_destroy(fs, type);
+
+out:
+ mutex_unlock(&fs_prot->prot_mutex);
+}
+
+static void accel_psp_fs_cleanup_rx(struct mlx5e_psp_fs *fs)
+{
+ struct mlx5e_accel_fs_psp_prot *fs_prot;
+ struct mlx5e_accel_fs_psp *accel_psp;
+ enum accel_fs_psp_type i;
+
+ if (!fs->rx_fs)
+ return;
+
+ accel_psp = fs->rx_fs;
+ mlx5_fc_destroy(fs->mdev, accel_psp->rx_bad_counter);
+ mlx5_fc_destroy(fs->mdev, accel_psp->rx_err_counter);
+ mlx5_fc_destroy(fs->mdev, accel_psp->rx_auth_fail_counter);
+ mlx5_fc_destroy(fs->mdev, accel_psp->rx_counter);
+ for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++) {
+ fs_prot = &accel_psp->fs_prot[i];
+ mutex_destroy(&fs_prot->prot_mutex);
+ WARN_ON(fs_prot->refcnt);
+ }
+ kfree(fs->rx_fs);
+ fs->rx_fs = NULL;
+}
+
+static int accel_psp_fs_init_rx(struct mlx5e_psp_fs *fs)
+{
+ struct mlx5e_accel_fs_psp_prot *fs_prot;
+ struct mlx5e_accel_fs_psp *accel_psp;
+ struct mlx5_core_dev *mdev = fs->mdev;
+ struct mlx5_fc *flow_counter;
+ enum accel_fs_psp_type i;
+ int err;
+
+ accel_psp = kzalloc(sizeof(*accel_psp), GFP_KERNEL);
+ if (!accel_psp)
+ return -ENOMEM;
+
+ for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++) {
+ fs_prot = &accel_psp->fs_prot[i];
+ mutex_init(&fs_prot->prot_mutex);
+ }
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ mlx5_core_warn(mdev,
+ "fail to create psp rx flow counter err=%pe\n",
+ flow_counter);
+ err = PTR_ERR(flow_counter);
+ goto out_err;
+ }
+ accel_psp->rx_counter = flow_counter;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ mlx5_core_warn(mdev,
+ "fail to create psp rx auth fail flow counter err=%pe\n",
+ flow_counter);
+ err = PTR_ERR(flow_counter);
+ goto out_counter_err;
+ }
+ accel_psp->rx_auth_fail_counter = flow_counter;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ mlx5_core_warn(mdev,
+ "fail to create psp rx error flow counter err=%pe\n",
+ flow_counter);
+ err = PTR_ERR(flow_counter);
+ goto out_auth_fail_counter_err;
+ }
+ accel_psp->rx_err_counter = flow_counter;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ mlx5_core_warn(mdev,
+ "fail to create psp rx bad flow counter err=%pe\n",
+ flow_counter);
+ err = PTR_ERR(flow_counter);
+ goto out_err_counter_err;
+ }
+ accel_psp->rx_bad_counter = flow_counter;
+
+ fs->rx_fs = accel_psp;
+
+ return 0;
+
+out_err_counter_err:
+ mlx5_fc_destroy(mdev, accel_psp->rx_err_counter);
+ accel_psp->rx_err_counter = NULL;
+out_auth_fail_counter_err:
+ mlx5_fc_destroy(mdev, accel_psp->rx_auth_fail_counter);
+ accel_psp->rx_auth_fail_counter = NULL;
+out_counter_err:
+ mlx5_fc_destroy(mdev, accel_psp->rx_counter);
+ accel_psp->rx_counter = NULL;
+out_err:
+ for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++) {
+ fs_prot = &accel_psp->fs_prot[i];
+ mutex_destroy(&fs_prot->prot_mutex);
+ }
+ kfree(accel_psp);
+ fs->rx_fs = NULL;
+
+ return err;
+}
+
+void mlx5_accel_psp_fs_cleanup_rx_tables(struct mlx5e_priv *priv)
+{
+ int i;
+
+ if (!priv->psp)
+ return;
+
+ for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++)
+ accel_psp_fs_rx_ft_put(priv->psp->fs, i);
+}
+
+int mlx5_accel_psp_fs_init_rx_tables(struct mlx5e_priv *priv)
+{
+ struct mlx5e_psp_fs *fs;
+ int err, i;
+
+ if (!priv->psp)
+ return 0;
+
+ fs = priv->psp->fs;
+ for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++) {
+ err = accel_psp_fs_rx_ft_get(fs, i);
+ if (err)
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ i--;
+ while (i >= 0) {
+ accel_psp_fs_rx_ft_put(fs, i);
+ --i;
+ }
+
+ return err;
+}
+
+static int accel_psp_fs_tx_create_ft_table(struct mlx5e_psp_fs *fs)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_core_dev *mdev = fs->mdev;
+ struct mlx5_flow_act flow_act = {};
+ u32 *in, *mc, *outer_headers_c;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ struct mlx5e_psp_tx *tx_fs;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!spec || !in) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ ft_attr.max_fte = 1;
+#define MLX5E_PSP_PRIO 0
+ ft_attr.prio = MLX5E_PSP_PRIO;
+#define MLX5E_PSP_LEVEL 0
+ ft_attr.level = MLX5E_PSP_LEVEL;
+ ft_attr.autogroup.max_num_groups = 1;
+
+ tx_fs = fs->tx_fs;
+ ft = mlx5_create_flow_table(tx_fs->ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "PSP: fail to add psp tx flow table, err = %d\n", err);
+ goto out;
+ }
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ fg = mlx5_create_flow_group(ft, in);
+ if (IS_ERR(fg)) {
+ err = PTR_ERR(fg);
+ mlx5_core_err(mdev, "PSP: fail to add psp tx flow group, err = %d\n", err);
+ goto err_create_fg;
+ }
+
+ setup_fte_udp_psp(spec, PSP_DEFAULT_UDP_PORT);
+ flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_PSP;
+ flow_act.flags |= FLOW_ACT_NO_APPEND;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter = tx_fs->tx_counter;
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "PSP: fail to add psp tx flow rule, err = %d\n", err);
+ goto err_add_flow_rule;
+ }
+
+ tx_fs->ft = ft;
+ tx_fs->fg = fg;
+ tx_fs->rule = rule;
+ goto out;
+
+err_add_flow_rule:
+ mlx5_destroy_flow_group(fg);
+err_create_fg:
+ mlx5_destroy_flow_table(ft);
+out:
+ kvfree(in);
+ kvfree(spec);
+ return err;
+}
+
+static void accel_psp_fs_tx_destroy(struct mlx5e_psp_tx *tx_fs)
+{
+ if (!tx_fs->ft)
+ return;
+
+ mlx5_del_flow_rules(tx_fs->rule);
+ mlx5_destroy_flow_group(tx_fs->fg);
+ mlx5_destroy_flow_table(tx_fs->ft);
+}
+
+static int accel_psp_fs_tx_ft_get(struct mlx5e_psp_fs *fs)
+{
+ struct mlx5e_psp_tx *tx_fs = fs->tx_fs;
+ int err = 0;
+
+ mutex_lock(&tx_fs->mutex);
+ if (tx_fs->refcnt++)
+ goto out;
+
+ err = accel_psp_fs_tx_create_ft_table(fs);
+ if (err)
+ tx_fs->refcnt--;
+out:
+ mutex_unlock(&tx_fs->mutex);
+ return err;
+}
+
+static void accel_psp_fs_tx_ft_put(struct mlx5e_psp_fs *fs)
+{
+ struct mlx5e_psp_tx *tx_fs = fs->tx_fs;
+
+ mutex_lock(&tx_fs->mutex);
+ if (--tx_fs->refcnt)
+ goto out;
+
+ accel_psp_fs_tx_destroy(tx_fs);
+out:
+ mutex_unlock(&tx_fs->mutex);
+}
+
+static void accel_psp_fs_cleanup_tx(struct mlx5e_psp_fs *fs)
+{
+ struct mlx5e_psp_tx *tx_fs = fs->tx_fs;
+
+ if (!tx_fs)
+ return;
+
+ mlx5_fc_destroy(fs->mdev, tx_fs->tx_counter);
+ mutex_destroy(&tx_fs->mutex);
+ WARN_ON(tx_fs->refcnt);
+ kfree(tx_fs);
+ fs->tx_fs = NULL;
+}
+
+static int accel_psp_fs_init_tx(struct mlx5e_psp_fs *fs)
+{
+ struct mlx5_core_dev *mdev = fs->mdev;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_fc *flow_counter;
+ struct mlx5e_psp_tx *tx_fs;
+
+ ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
+ if (!ns)
+ return -EOPNOTSUPP;
+
+ tx_fs = kzalloc(sizeof(*tx_fs), GFP_KERNEL);
+ if (!tx_fs)
+ return -ENOMEM;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ mlx5_core_warn(mdev,
+ "fail to create psp tx flow counter err=%pe\n",
+ flow_counter);
+ kfree(tx_fs);
+ return PTR_ERR(flow_counter);
+ }
+ tx_fs->tx_counter = flow_counter;
+ mutex_init(&tx_fs->mutex);
+ tx_fs->ns = ns;
+ fs->tx_fs = tx_fs;
+ return 0;
+}
+
+static void
+mlx5e_accel_psp_fs_get_stats_fill(struct mlx5e_priv *priv,
+ struct mlx5e_psp_stats *stats)
+{
+ struct mlx5e_psp_tx *tx_fs = priv->psp->fs->tx_fs;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_accel_fs_psp *accel_psp;
+
+ accel_psp = (struct mlx5e_accel_fs_psp *)priv->psp->fs->rx_fs;
+
+ if (tx_fs->tx_counter)
+ mlx5_fc_query(mdev, tx_fs->tx_counter, &stats->psp_tx_pkts,
+ &stats->psp_tx_bytes);
+
+ if (accel_psp->rx_counter)
+ mlx5_fc_query(mdev, accel_psp->rx_counter, &stats->psp_rx_pkts,
+ &stats->psp_rx_bytes);
+
+ if (accel_psp->rx_auth_fail_counter)
+ mlx5_fc_query(mdev, accel_psp->rx_auth_fail_counter,
+ &stats->psp_rx_pkts_auth_fail,
+ &stats->psp_rx_bytes_auth_fail);
+
+ if (accel_psp->rx_err_counter)
+ mlx5_fc_query(mdev, accel_psp->rx_err_counter,
+ &stats->psp_rx_pkts_frame_err,
+ &stats->psp_rx_bytes_frame_err);
+
+ if (accel_psp->rx_bad_counter)
+ mlx5_fc_query(mdev, accel_psp->rx_bad_counter,
+ &stats->psp_rx_pkts_drop,
+ &stats->psp_rx_bytes_drop);
+}
+
+void mlx5_accel_psp_fs_cleanup_tx_tables(struct mlx5e_priv *priv)
+{
+ if (!priv->psp)
+ return;
+
+ accel_psp_fs_tx_ft_put(priv->psp->fs);
+}
+
+int mlx5_accel_psp_fs_init_tx_tables(struct mlx5e_priv *priv)
+{
+ if (!priv->psp)
+ return 0;
+
+ return accel_psp_fs_tx_ft_get(priv->psp->fs);
+}
+
+static void mlx5e_accel_psp_fs_cleanup(struct mlx5e_psp_fs *fs)
+{
+ accel_psp_fs_cleanup_rx(fs);
+ accel_psp_fs_cleanup_tx(fs);
+ kfree(fs);
+}
+
+static struct mlx5e_psp_fs *mlx5e_accel_psp_fs_init(struct mlx5e_priv *priv)
+{
+ struct mlx5e_psp_fs *fs;
+ int err = 0;
+
+ fs = kzalloc(sizeof(*fs), GFP_KERNEL);
+ if (!fs)
+ return ERR_PTR(-ENOMEM);
+
+ fs->mdev = priv->mdev;
+ err = accel_psp_fs_init_tx(fs);
+ if (err)
+ goto err_tx;
+
+ fs->fs = priv->fs;
+ err = accel_psp_fs_init_rx(fs);
+ if (err)
+ goto err_rx;
+
+ return fs;
+
+err_rx:
+ accel_psp_fs_cleanup_tx(fs);
+err_tx:
+ kfree(fs);
+ return ERR_PTR(err);
+}
+
+static int
+mlx5e_psp_set_config(struct psp_dev *psd, struct psp_dev_config *conf,
+ struct netlink_ext_ack *extack)
+{
+ return 0; /* TODO: this should actually do things to the device */
+}
+
+static int
+mlx5e_psp_generate_key_spi(struct mlx5_core_dev *mdev,
+ enum mlx5_psp_gen_spi_in_key_size keysz,
+ unsigned int keysz_bytes,
+ struct psp_key_parsed *key)
+{
+ u32 out[MLX5_ST_SZ_DW(psp_gen_spi_out) + MLX5_ST_SZ_DW(key_spi)] = {};
+ u32 in[MLX5_ST_SZ_DW(psp_gen_spi_in)] = {};
+ void *outkey;
+ int err;
+
+ WARN_ON_ONCE(keysz_bytes > PSP_MAX_KEY);
+
+ MLX5_SET(psp_gen_spi_in, in, opcode, MLX5_CMD_OP_PSP_GEN_SPI);
+ MLX5_SET(psp_gen_spi_in, in, key_size, keysz);
+ MLX5_SET(psp_gen_spi_in, in, num_of_spi, 1);
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ outkey = MLX5_ADDR_OF(psp_gen_spi_out, out, key_spi);
+ key->spi = cpu_to_be32(MLX5_GET(key_spi, outkey, spi));
+ memcpy(key->key, MLX5_ADDR_OF(key_spi, outkey, key) + 32 - keysz_bytes,
+ keysz_bytes);
+
+ return 0;
+}
+
+static int
+mlx5e_psp_rx_spi_alloc(struct psp_dev *psd, u32 version,
+ struct psp_key_parsed *assoc,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = netdev_priv(psd->main_netdev);
+ enum mlx5_psp_gen_spi_in_key_size keysz;
+ u8 keysz_bytes;
+
+ switch (version) {
+ case PSP_VERSION_HDR0_AES_GCM_128:
+ keysz = MLX5_PSP_GEN_SPI_IN_KEY_SIZE_128;
+ keysz_bytes = 16;
+ break;
+ case PSP_VERSION_HDR0_AES_GCM_256:
+ keysz = MLX5_PSP_GEN_SPI_IN_KEY_SIZE_256;
+ keysz_bytes = 32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return mlx5e_psp_generate_key_spi(priv->mdev, keysz, keysz_bytes, assoc);
+}
+
+struct psp_key {
+ u32 id;
+};
+
+static int mlx5e_psp_assoc_add(struct psp_dev *psd, struct psp_assoc *pas,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = netdev_priv(psd->main_netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct psp_key_parsed *tx = &pas->tx;
+ struct mlx5e_psp *psp = priv->psp;
+ struct psp_key *nkey;
+ int err;
+
+ mdev = priv->mdev;
+ nkey = (struct psp_key *)pas->drv_data;
+
+ err = mlx5_create_encryption_key(mdev, tx->key,
+ psp_key_size(pas->version),
+ MLX5_ACCEL_OBJ_PSP_KEY,
+ &nkey->id);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create encryption key (err = %d)\n", err);
+ return err;
+ }
+
+ atomic_inc(&psp->tx_key_cnt);
+ return 0;
+}
+
+static void mlx5e_psp_assoc_del(struct psp_dev *psd, struct psp_assoc *pas)
+{
+ struct mlx5e_priv *priv = netdev_priv(psd->main_netdev);
+ struct mlx5e_psp *psp = priv->psp;
+ struct psp_key *nkey;
+
+ nkey = (struct psp_key *)pas->drv_data;
+ mlx5_destroy_encryption_key(priv->mdev, nkey->id);
+ atomic_dec(&psp->tx_key_cnt);
+}
+
+static int mlx5e_psp_rotate_key(struct mlx5_core_dev *mdev)
+{
+ u32 in[MLX5_ST_SZ_DW(psp_rotate_key_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(psp_rotate_key_out)];
+
+ MLX5_SET(psp_rotate_key_in, in, opcode,
+ MLX5_CMD_OP_PSP_ROTATE_KEY);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static int
+mlx5e_psp_key_rotate(struct psp_dev *psd, struct netlink_ext_ack *exack)
+{
+ struct mlx5e_priv *priv = netdev_priv(psd->main_netdev);
+
+ /* no support for protecting against external rotations */
+ psd->generation = 0;
+
+ return mlx5e_psp_rotate_key(priv->mdev);
+}
+
+static void
+mlx5e_psp_get_stats(struct psp_dev *psd, struct psp_dev_stats *stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(psd->main_netdev);
+ struct mlx5e_psp_stats nstats;
+
+ mlx5e_accel_psp_fs_get_stats_fill(priv, &nstats);
+ stats->rx_packets = nstats.psp_rx_pkts;
+ stats->rx_bytes = nstats.psp_rx_bytes;
+ stats->rx_auth_fail = nstats.psp_rx_pkts_auth_fail;
+ stats->rx_error = nstats.psp_rx_pkts_frame_err;
+ stats->rx_bad = nstats.psp_rx_pkts_drop;
+ stats->tx_packets = nstats.psp_tx_pkts;
+ stats->tx_bytes = nstats.psp_tx_bytes;
+ stats->tx_error = atomic_read(&priv->psp->tx_drop);
+}
+
+static struct psp_dev_ops mlx5_psp_ops = {
+ .set_config = mlx5e_psp_set_config,
+ .rx_spi_alloc = mlx5e_psp_rx_spi_alloc,
+ .tx_key_add = mlx5e_psp_assoc_add,
+ .tx_key_del = mlx5e_psp_assoc_del,
+ .key_rotate = mlx5e_psp_key_rotate,
+ .get_stats = mlx5e_psp_get_stats,
+};
+
+void mlx5e_psp_unregister(struct mlx5e_priv *priv)
+{
+ if (!priv->psp || !priv->psp->psp)
+ return;
+
+ psp_dev_unregister(priv->psp->psp);
+}
+
+void mlx5e_psp_register(struct mlx5e_priv *priv)
+{
+ /* FW Caps missing */
+ if (!priv->psp)
+ return;
+
+ priv->psp->caps.assoc_drv_spc = sizeof(u32);
+ priv->psp->caps.versions = 1 << PSP_VERSION_HDR0_AES_GCM_128;
+ if (MLX5_CAP_PSP(priv->mdev, psp_crypto_esp_aes_gcm_256_encrypt) &&
+ MLX5_CAP_PSP(priv->mdev, psp_crypto_esp_aes_gcm_256_decrypt))
+ priv->psp->caps.versions |= 1 << PSP_VERSION_HDR0_AES_GCM_256;
+
+ priv->psp->psp = psp_dev_create(priv->netdev, &mlx5_psp_ops,
+ &priv->psp->caps, NULL);
+ if (IS_ERR(priv->psp->psp))
+ mlx5_core_err(priv->mdev, "PSP failed to register due to %pe\n",
+ priv->psp->psp);
+}
+
+int mlx5e_psp_init(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_psp_fs *fs;
+ struct mlx5e_psp *psp;
+ int err;
+
+ if (!mlx5_is_psp_device(mdev)) {
+ mlx5_core_dbg(mdev, "PSP offload not supported\n");
+ return 0;
+ }
+
+ if (!MLX5_CAP_ETH(mdev, swp)) {
+ mlx5_core_dbg(mdev, "SWP not supported\n");
+ return 0;
+ }
+
+ if (!MLX5_CAP_ETH(mdev, swp_csum)) {
+ mlx5_core_dbg(mdev, "SWP checksum not supported\n");
+ return 0;
+ }
+
+ if (!MLX5_CAP_ETH(mdev, swp_csum_l4_partial)) {
+ mlx5_core_dbg(mdev, "SWP L4 partial checksum not supported\n");
+ return 0;
+ }
+
+ if (!MLX5_CAP_ETH(mdev, swp_lso)) {
+ mlx5_core_dbg(mdev, "PSP LSO not supported\n");
+ return 0;
+ }
+
+ psp = kzalloc(sizeof(*psp), GFP_KERNEL);
+ if (!psp)
+ return -ENOMEM;
+
+ priv->psp = psp;
+ fs = mlx5e_accel_psp_fs_init(priv);
+ if (IS_ERR(fs)) {
+ err = PTR_ERR(fs);
+ goto out_err;
+ }
+
+ psp->fs = fs;
+
+ mlx5_core_dbg(priv->mdev, "PSP attached to netdevice\n");
+ return 0;
+
+out_err:
+ priv->psp = NULL;
+ kfree(psp);
+ return err;
+}
+
+void mlx5e_psp_cleanup(struct mlx5e_priv *priv)
+{
+ struct mlx5e_psp *psp = priv->psp;
+
+ if (!psp)
+ return;
+
+ WARN_ON(atomic_read(&psp->tx_key_cnt));
+ mlx5e_accel_psp_fs_cleanup(psp->fs);
+ priv->psp = NULL;
+ kfree(psp);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h
new file mode 100644
index 000000000000..6b62fef0d9a7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5E_ACCEL_PSP_H__
+#define __MLX5E_ACCEL_PSP_H__
+#if IS_ENABLED(CONFIG_MLX5_EN_PSP)
+#include <net/psp/types.h>
+#include "en.h"
+
+struct mlx5e_psp_stats {
+ u64 psp_rx_pkts;
+ u64 psp_rx_bytes;
+ u64 psp_rx_pkts_auth_fail;
+ u64 psp_rx_bytes_auth_fail;
+ u64 psp_rx_pkts_frame_err;
+ u64 psp_rx_bytes_frame_err;
+ u64 psp_rx_pkts_drop;
+ u64 psp_rx_bytes_drop;
+ u64 psp_tx_pkts;
+ u64 psp_tx_bytes;
+ u64 psp_tx_pkts_drop;
+ u64 psp_tx_bytes_drop;
+};
+
+struct mlx5e_psp {
+ struct psp_dev *psp;
+ struct psp_dev_caps caps;
+ struct mlx5e_psp_fs *fs;
+ atomic_t tx_key_cnt;
+ atomic_t tx_drop;
+};
+
+static inline bool mlx5_is_psp_device(struct mlx5_core_dev *mdev)
+{
+ if (!MLX5_CAP_GEN(mdev, psp))
+ return false;
+
+ if (!MLX5_CAP_PSP(mdev, psp_crypto_offload) ||
+ !MLX5_CAP_PSP(mdev, psp_crypto_esp_aes_gcm_128_encrypt) ||
+ !MLX5_CAP_PSP(mdev, psp_crypto_esp_aes_gcm_128_decrypt))
+ return false;
+
+ return true;
+}
+
+int mlx5_accel_psp_fs_init_rx_tables(struct mlx5e_priv *priv);
+void mlx5_accel_psp_fs_cleanup_rx_tables(struct mlx5e_priv *priv);
+int mlx5_accel_psp_fs_init_tx_tables(struct mlx5e_priv *priv);
+void mlx5_accel_psp_fs_cleanup_tx_tables(struct mlx5e_priv *priv);
+void mlx5e_psp_register(struct mlx5e_priv *priv);
+void mlx5e_psp_unregister(struct mlx5e_priv *priv);
+int mlx5e_psp_init(struct mlx5e_priv *priv);
+void mlx5e_psp_cleanup(struct mlx5e_priv *priv);
+#else
+static inline int mlx5_accel_psp_fs_init_rx_tables(struct mlx5e_priv *priv)
+{
+ return 0;
+}
+
+static inline void mlx5_accel_psp_fs_cleanup_rx_tables(struct mlx5e_priv *priv) { }
+static inline int mlx5_accel_psp_fs_init_tx_tables(struct mlx5e_priv *priv)
+{
+ return 0;
+}
+
+static inline void mlx5_accel_psp_fs_cleanup_tx_tables(struct mlx5e_priv *priv) { }
+static inline bool mlx5_is_psp_device(struct mlx5_core_dev *mdev)
+{
+ return false;
+}
+
+static inline void mlx5e_psp_register(struct mlx5e_priv *priv) { }
+static inline void mlx5e_psp_unregister(struct mlx5e_priv *priv) { }
+static inline int mlx5e_psp_init(struct mlx5e_priv *priv) { return 0; }
+static inline void mlx5e_psp_cleanup(struct mlx5e_priv *priv) { }
+#endif /* CONFIG_MLX5_EN_PSP */
+#endif /* __MLX5E_ACCEL_PSP_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c
new file mode 100644
index 000000000000..c17ea0fcd8ef
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <net/protocol.h>
+#include <net/udp.h>
+#include <net/ip6_checksum.h>
+#include <net/psp/types.h>
+
+#include "en.h"
+#include "psp.h"
+#include "en_accel/psp_rxtx.h"
+#include "en_accel/psp.h"
+
+enum {
+ MLX5E_PSP_OFFLOAD_RX_SYNDROME_DECRYPTED,
+ MLX5E_PSP_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
+ MLX5E_PSP_OFFLOAD_RX_SYNDROME_BAD_TRAILER,
+};
+
+static void mlx5e_psp_set_swp(struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ /* Tunnel Mode:
+ * SWP: OutL3 InL3 InL4
+ * Pkt: MAC IP ESP IP L4
+ *
+ * Transport Mode:
+ * SWP: OutL3 OutL4
+ * Pkt: MAC IP ESP L4
+ *
+ * Tunnel(VXLAN TCP/UDP) over Transport Mode
+ * SWP: OutL3 InL3 InL4
+ * Pkt: MAC IP ESP UDP VXLAN IP L4
+ */
+ u8 inner_ipproto = 0;
+ struct ethhdr *eth;
+
+ /* Shared settings */
+ eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
+
+ if (skb->inner_protocol_type == ENCAP_TYPE_IPPROTO) {
+ inner_ipproto = skb->inner_ipproto;
+ /* Set SWP additional flags for packet of type IP|UDP|PSP|[ TCP | UDP ] */
+ switch (inner_ipproto) {
+ case IPPROTO_UDP:
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+ fallthrough;
+ case IPPROTO_TCP:
+ eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
+ break;
+ default:
+ break;
+ }
+ } else {
+ /* IP in IP tunneling like vxlan*/
+ if (skb->inner_protocol_type != ENCAP_TYPE_ETHER)
+ return;
+
+ eth = (struct ethhdr *)skb_inner_mac_header(skb);
+ switch (ntohs(eth->h_proto)) {
+ case ETH_P_IP:
+ inner_ipproto = ((struct iphdr *)((char *)skb->data +
+ skb_inner_network_offset(skb)))->protocol;
+ break;
+ case ETH_P_IPV6:
+ inner_ipproto = ((struct ipv6hdr *)((char *)skb->data +
+ skb_inner_network_offset(skb)))->nexthdr;
+ break;
+ default:
+ break;
+ }
+
+ /* Tunnel(VXLAN TCP/UDP) over Transport Mode PSP i.e. PSP payload is vxlan tunnel */
+ switch (inner_ipproto) {
+ case IPPROTO_UDP:
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+ fallthrough;
+ case IPPROTO_TCP:
+ eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
+ eseg->swp_inner_l4_offset =
+ (skb->csum_start + skb->head - skb->data) / 2;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+ break;
+ default:
+ break;
+ }
+
+ psp_st->inner_ipproto = inner_ipproto;
+ }
+}
+
+static bool mlx5e_psp_set_state(struct mlx5e_priv *priv,
+ struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st)
+{
+ struct psp_assoc *pas;
+ bool ret = false;
+
+ rcu_read_lock();
+ pas = psp_skb_get_assoc_rcu(skb);
+ if (!pas)
+ goto out;
+
+ ret = true;
+ psp_st->tailen = PSP_TRL_SIZE;
+ psp_st->spi = pas->tx.spi;
+ psp_st->ver = pas->version;
+ psp_st->keyid = *(u32 *)pas->drv_data;
+
+out:
+ rcu_read_unlock();
+ return ret;
+}
+
+bool mlx5e_psp_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe)
+{
+ u32 psp_meta_data = be32_to_cpu(cqe->ft_metadata);
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ u16 dev_id = priv->psp->psp->id;
+ bool strip_icv = true;
+ u8 generation = 0;
+
+ /* TBD: report errors as SW counters to ethtool, any further handling ? */
+ if (MLX5_PSP_METADATA_SYNDROME(psp_meta_data) != MLX5E_PSP_OFFLOAD_RX_SYNDROME_DECRYPTED)
+ goto drop;
+
+ if (psp_dev_rcv(skb, dev_id, generation, strip_icv))
+ goto drop;
+
+ skb->decrypted = 1;
+ return false;
+
+drop:
+ kfree_skb(skb);
+ return true;
+}
+
+void mlx5e_psp_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ if (!mlx5_is_psp_device(priv->mdev))
+ return;
+
+ if (unlikely(skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_IPV6)))
+ return;
+
+ mlx5e_psp_set_swp(skb, psp_st, eseg);
+ /* Special WA for PSP LSO in ConnectX7 */
+ eseg->swp_outer_l3_offset = 0;
+ eseg->swp_inner_l3_offset = 0;
+
+ eseg->flow_table_metadata |= cpu_to_be32(psp_st->keyid);
+ eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER) |
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC);
+}
+
+void mlx5e_psp_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
+ struct mlx5e_accel_tx_psp_state *psp_st,
+ struct mlx5_wqe_inline_seg *inlseg)
+{
+ inlseg->byte_count = cpu_to_be32(psp_st->tailen | MLX5_INLINE_SEG);
+}
+
+bool mlx5e_psp_handle_tx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct net *net = sock_net(skb->sk);
+ const struct ipv6hdr *ip6;
+ struct tcphdr *th;
+
+ if (!mlx5e_psp_set_state(priv, skb, psp_st))
+ return true;
+
+ /* psp_encap of the packet */
+ if (!psp_dev_encapsulate(net, skb, psp_st->spi, psp_st->ver, 0)) {
+ kfree_skb_reason(skb, SKB_DROP_REASON_PSP_OUTPUT);
+ atomic_inc(&priv->psp->tx_drop);
+ return false;
+ }
+ if (skb_is_gso(skb)) {
+ ip6 = ipv6_hdr(skb);
+ th = inner_tcp_hdr(skb);
+
+ th->check = ~tcp_v6_check(skb_shinfo(skb)->gso_size + inner_tcp_hdrlen(skb), &ip6->saddr,
+ &ip6->daddr, 0);
+ }
+
+ return true;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.h
new file mode 100644
index 000000000000..70289c921bd6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5E_PSP_RXTX_H__
+#define __MLX5E_PSP_RXTX_H__
+
+#include <linux/skbuff.h>
+#include <net/xfrm.h>
+#include <net/psp.h>
+#include "en.h"
+#include "en/txrx.h"
+
+/* Bit30: PSP marker, Bit29-23: PSP syndrome, Bit22-0: PSP obj id */
+#define MLX5_PSP_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x3)
+#define MLX5_PSP_METADATA_SYNDROME(metadata) (((metadata) >> 23) & GENMASK(6, 0))
+#define MLX5_PSP_METADATA_HANDLE(metadata) ((metadata) & GENMASK(22, 0))
+
+struct mlx5e_accel_tx_psp_state {
+ u32 tailen;
+ u32 keyid;
+ __be32 spi;
+ u8 inner_ipproto;
+ u8 ver;
+};
+
+#ifdef CONFIG_MLX5_EN_PSP
+static inline bool mlx5e_psp_is_offload_state(struct mlx5e_accel_tx_psp_state *psp_state)
+{
+ return (psp_state->tailen != 0);
+}
+
+static inline bool mlx5e_psp_is_offload(struct sk_buff *skb, struct net_device *netdev)
+{
+ bool ret;
+
+ rcu_read_lock();
+ ret = !!psp_skb_get_assoc_rcu(skb);
+ rcu_read_unlock();
+ return ret;
+}
+
+bool mlx5e_psp_handle_tx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st);
+
+void mlx5e_psp_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st,
+ struct mlx5_wqe_eth_seg *eseg);
+
+void mlx5e_psp_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
+ struct mlx5e_accel_tx_psp_state *psp_st,
+ struct mlx5_wqe_inline_seg *inlseg);
+
+static inline bool mlx5e_psp_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ u8 inner_ipproto;
+
+ if (!mlx5e_psp_is_offload_state(psp_st))
+ return false;
+
+ inner_ipproto = psp_st->inner_ipproto;
+ eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
+ if (inner_ipproto) {
+ eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
+ if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP)
+ eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
+ sq->stats->csum_partial_inner++;
+ } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
+ sq->stats->csum_partial_inner++;
+ }
+
+ return true;
+}
+
+static inline unsigned int mlx5e_psp_tx_ids_len(struct mlx5e_accel_tx_psp_state *psp_st)
+{
+ return psp_st->tailen;
+}
+
+static inline bool mlx5e_psp_is_rx_flow(struct mlx5_cqe64 *cqe)
+{
+ return MLX5_PSP_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
+}
+
+bool mlx5e_psp_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe);
+#else
+static inline bool mlx5e_psp_is_offload_state(struct mlx5e_accel_tx_psp_state *psp_state)
+{
+ return false;
+}
+
+static inline bool mlx5e_psp_is_offload(struct sk_buff *skb, struct net_device *netdev)
+{
+ return false;
+}
+
+static inline bool mlx5e_psp_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ return false;
+}
+
+static inline bool mlx5e_psp_is_rx_flow(struct mlx5_cqe64 *cqe)
+{
+ return false;
+}
+
+static inline bool mlx5e_psp_offload_handle_rx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe)
+{
+ return false;
+}
+#endif /* CONFIG_MLX5_EN_PSP */
+#endif /* __MLX5E_PSP_RXTX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 6ed3a32b7e22..5a2ac7b6f260 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*/
+#include "devlink.h"
#include "en.h"
#include "lib/crypto.h"
@@ -140,9 +141,22 @@ err_close_tises:
return err;
}
+static unsigned int
+mlx5e_get_devlink_param_num_doorbells(struct mlx5_core_dev *dev)
+{
+ const u32 param_id = DEVLINK_PARAM_GENERIC_ID_NUM_DOORBELLS;
+ struct devlink *devlink = priv_to_devlink(dev);
+ union devlink_param_value val;
+ int err;
+
+ err = devl_param_driverinit_value_get(devlink, param_id, &val);
+ return err ? MLX5_DEFAULT_NUM_DOORBELLS : val.vu32;
+}
+
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises)
{
struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
+ unsigned int num_doorbells, i;
int err;
err = mlx5_core_alloc_pd(mdev, &res->pdn);
@@ -163,17 +177,30 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises)
goto err_dealloc_transport_domain;
}
- err = mlx5_alloc_bfreg(mdev, &res->bfreg, false, false);
- if (err) {
- mlx5_core_err(mdev, "alloc bfreg failed, %d\n", err);
+ num_doorbells = min(mlx5e_get_devlink_param_num_doorbells(mdev),
+ mlx5e_get_max_num_channels(mdev));
+ res->bfregs = kcalloc(num_doorbells, sizeof(*res->bfregs), GFP_KERNEL);
+ if (!res->bfregs) {
+ err = -ENOMEM;
goto err_destroy_mkey;
}
+ for (i = 0; i < num_doorbells; i++) {
+ err = mlx5_alloc_bfreg(mdev, res->bfregs + i, false, false);
+ if (err) {
+ mlx5_core_warn(mdev,
+ "could only allocate %d/%d doorbells, err %d.\n",
+ i, num_doorbells, err);
+ break;
+ }
+ }
+ res->num_bfregs = i;
+
if (create_tises) {
err = mlx5e_create_tises(mdev, res->tisn);
if (err) {
mlx5_core_err(mdev, "alloc tises failed, %d\n", err);
- goto err_destroy_bfreg;
+ goto err_destroy_bfregs;
}
res->tisn_valid = true;
}
@@ -183,15 +210,17 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises)
mdev->mlx5e_res.dek_priv = mlx5_crypto_dek_init(mdev);
if (IS_ERR(mdev->mlx5e_res.dek_priv)) {
- mlx5_core_err(mdev, "crypto dek init failed, %ld\n",
- PTR_ERR(mdev->mlx5e_res.dek_priv));
+ mlx5_core_err(mdev, "crypto dek init failed, %pe\n",
+ mdev->mlx5e_res.dek_priv);
mdev->mlx5e_res.dek_priv = NULL;
}
return 0;
-err_destroy_bfreg:
- mlx5_free_bfreg(mdev, &res->bfreg);
+err_destroy_bfregs:
+ for (i = 0; i < res->num_bfregs; i++)
+ mlx5_free_bfreg(mdev, res->bfregs + i);
+ kfree(res->bfregs);
err_destroy_mkey:
mlx5_core_destroy_mkey(mdev, res->mkey);
err_dealloc_transport_domain:
@@ -209,52 +238,52 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
mdev->mlx5e_res.dek_priv = NULL;
if (res->tisn_valid)
mlx5e_destroy_tises(mdev, res->tisn);
- mlx5_free_bfreg(mdev, &res->bfreg);
+ for (unsigned int i = 0; i < res->num_bfregs; i++)
+ mlx5_free_bfreg(mdev, res->bfregs + i);
+ kfree(res->bfregs);
mlx5_core_destroy_mkey(mdev, res->mkey);
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
mlx5_core_dealloc_pd(mdev, res->pdn);
memset(res, 0, sizeof(*res));
}
-int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
- bool enable_mc_lb)
+int mlx5e_modify_tirs_lb(struct mlx5_core_dev *mdev, bool enable_uc_lb,
+ bool enable_mc_lb)
{
- struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_tir_builder *builder;
struct mlx5e_tir *tir;
- u8 lb_flags = 0;
- int err = 0;
- u32 tirn = 0;
- int inlen;
- void *in;
+ int err = 0;
- inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
+ builder = mlx5e_tir_builder_alloc(true);
+ if (!builder)
return -ENOMEM;
- if (enable_uc_lb)
- lb_flags = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
-
- if (enable_mc_lb)
- lb_flags |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
-
- if (lb_flags)
- MLX5_SET(modify_tir_in, in, ctx.self_lb_block, lb_flags);
-
- MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
+ mlx5e_tir_builder_build_self_lb_block(builder, enable_uc_lb,
+ enable_mc_lb);
mutex_lock(&mdev->mlx5e_res.hw_objs.td.list_lock);
list_for_each_entry(tir, &mdev->mlx5e_res.hw_objs.td.tirs_list, list) {
- tirn = tir->tirn;
- err = mlx5_core_modify_tir(mdev, tirn, in);
- if (err)
+ err = mlx5e_tir_modify(tir, builder);
+ if (err) {
+ mlx5_core_err(mdev,
+ "modify tir(0x%x) enable_lb uc(%d) mc(%d) failed, %d\n",
+ mlx5e_tir_get_tirn(tir),
+ enable_uc_lb, enable_mc_lb, err);
break;
+ }
}
mutex_unlock(&mdev->mlx5e_res.hw_objs.td.list_lock);
- kvfree(in);
- if (err)
- netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
+ mlx5e_tir_builder_free(builder);
return err;
}
+
+int mlx5e_refresh_tirs(struct mlx5_core_dev *mdev, bool enable_uc_lb,
+ bool enable_mc_lb)
+{
+ if (MLX5_CAP_GEN(mdev, tis_tir_td_order))
+ return 0; /* refresh not needed */
+
+ return mlx5e_modify_tirs_lb(mdev, enable_uc_lb, enable_mc_lb);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 8705cffc747f..fddf7c207f8e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -31,14 +31,15 @@
*/
#include <linux/device.h>
#include <linux/netdevice.h>
+#include <linux/units.h>
#include "en.h"
#include "en/port.h"
#include "en/port_buffer.h"
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
-#define MLX5E_100MB (100000)
-#define MLX5E_1GB (1000000)
+#define MLX5E_100MB_TO_KB (100 * MEGA / KILO)
+#define MLX5E_1GB_TO_KB (GIGA / KILO)
#define MLX5E_CEE_STATE_UP 1
#define MLX5E_CEE_STATE_DOWN 0
@@ -362,6 +363,7 @@ static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
+ u8 buffer_ownership = MLX5_BUF_OWNERSHIP_UNKNOWN;
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
u32 old_cable_len = priv->dcbx.cable_len;
@@ -389,7 +391,14 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
if (MLX5_BUFFER_SUPPORTED(mdev)) {
pfc_new.pfc_en = (changed & MLX5E_PORT_BUFFER_PFC) ? pfc->pfc_en : curr_pfc_en;
- if (priv->dcbx.manual_buffer)
+ ret = mlx5_query_port_buffer_ownership(mdev,
+ &buffer_ownership);
+ if (ret)
+ netdev_err(dev,
+ "%s, Failed to get buffer ownership: %d\n",
+ __func__, ret);
+
+ if (buffer_ownership == MLX5_BUF_OWNERSHIP_SW_OWNED)
ret = mlx5e_port_manual_buffer_config(priv, changed,
dev->mtu, &pfc_new,
NULL, NULL);
@@ -564,10 +573,10 @@ static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev,
for (i = 0; i <= mlx5_max_tc(mdev); i++) {
switch (max_bw_unit[i]) {
case MLX5_100_MBPS_UNIT:
- maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB;
+ maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB_TO_KB;
break;
case MLX5_GBPS_UNIT:
- maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB;
+ maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB_TO_KB;
break;
case MLX5_BW_NO_LIMIT:
break;
@@ -587,32 +596,55 @@ static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
struct mlx5_core_dev *mdev = priv->mdev;
u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
- __u64 upper_limit_mbps = roundup(255 * MLX5E_100MB, MLX5E_1GB);
+ u64 upper_limit_100mbps;
+ u64 upper_limit_gbps;
int i;
+ struct {
+ int scale;
+ const char *units_str;
+ } units[] = {
+ [MLX5_100_MBPS_UNIT] = {
+ .scale = 100,
+ .units_str = "Mbps",
+ },
+ [MLX5_GBPS_UNIT] = {
+ .scale = 1,
+ .units_str = "Gbps",
+ },
+ };
memset(max_bw_value, 0, sizeof(max_bw_value));
memset(max_bw_unit, 0, sizeof(max_bw_unit));
+ upper_limit_100mbps = U8_MAX * MLX5E_100MB_TO_KB;
+ upper_limit_gbps = U8_MAX * MLX5E_1GB_TO_KB;
for (i = 0; i <= mlx5_max_tc(mdev); i++) {
if (!maxrate->tc_maxrate[i]) {
max_bw_unit[i] = MLX5_BW_NO_LIMIT;
continue;
}
- if (maxrate->tc_maxrate[i] < upper_limit_mbps) {
+ if (maxrate->tc_maxrate[i] <= upper_limit_100mbps) {
max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
- MLX5E_100MB);
+ MLX5E_100MB_TO_KB);
max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1;
max_bw_unit[i] = MLX5_100_MBPS_UNIT;
- } else {
+ } else if (maxrate->tc_maxrate[i] <= upper_limit_gbps) {
max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
- MLX5E_1GB);
+ MLX5E_1GB_TO_KB);
max_bw_unit[i] = MLX5_GBPS_UNIT;
+ } else {
+ netdev_err(netdev,
+ "tc_%d maxrate %llu Kbps exceeds limit %llu\n",
+ i, maxrate->tc_maxrate[i],
+ upper_limit_gbps);
+ return -EINVAL;
}
}
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- netdev_dbg(netdev, "%s: tc_%d <=> max_bw %d Gbps\n",
- __func__, i, max_bw_value[i]);
+ netdev_dbg(netdev, "%s: tc_%d <=> max_bw %u %s\n", __func__, i,
+ max_bw_value[i] * units[max_bw_unit[i]].scale,
+ units[max_bw_unit[i]].units_str);
}
return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
@@ -982,7 +1014,6 @@ static int mlx5e_dcbnl_setbuffer(struct net_device *dev,
if (!changed)
return 0;
- priv->dcbx.manual_buffer = true;
err = mlx5e_port_manual_buffer_config(priv, changed, dev->mtu, NULL,
buffer_size, prio2buffer);
return err;
@@ -1147,6 +1178,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
bool reset = true;
int err;
+ netdev_lock(priv->netdev);
mutex_lock(&priv->state_lock);
new_params = priv->channels.params;
@@ -1162,6 +1194,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
&trust_state, reset);
mutex_unlock(&priv->state_lock);
+ netdev_unlock(priv->netdev);
return err;
}
@@ -1250,7 +1283,6 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv);
- priv->dcbx.manual_buffer = false;
priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN;
mlx5e_ets_init(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
index 298bb74ec5e9..d1d629697e28 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
@@ -113,7 +113,7 @@ int mlx5e_dim_rx_change(struct mlx5e_rq *rq, bool enable)
__set_bit(MLX5E_RQ_STATE_DIM, &rq->state);
} else {
__clear_bit(MLX5E_RQ_STATE_DIM, &rq->state);
-
+ synchronize_net();
mlx5e_dim_disable(rq->dim);
rq->dim = NULL;
}
@@ -140,7 +140,7 @@ int mlx5e_dim_tx_change(struct mlx5e_txqsq *sq, bool enable)
__set_bit(MLX5E_SQ_STATE_DIM, &sq->state);
} else {
__clear_bit(MLX5E_SQ_STATE_DIM, &sq->state);
-
+ synchronize_net();
mlx5e_dim_disable(sq->dim);
sq->dim = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index cae39198b4db..d3fef1e7e2f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -32,6 +32,7 @@
#include <linux/dim.h>
#include <linux/ethtool_netlink.h>
+#include <net/netdev_queues.h>
#include "en.h"
#include "en/channels.h"
@@ -42,6 +43,8 @@
#include "lib/clock.h"
#include "en/fs_ethtool.h"
+#define LANES_UNKNOWN 0
+
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo)
{
@@ -237,14 +240,38 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_800000baseDR8_2_Full_BIT,
ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT,
ETHTOOL_LINK_MODE_800000baseVR8_Full_BIT);
-}
-
-static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_200GAUI_1_200GBASE_CR1_KR1, ext,
+ ETHTOOL_LINK_MODE_200000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseDR_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseDR_2_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseVR_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_400GAUI_2_400GBASE_CR2_KR2, ext,
+ ETHTOOL_LINK_MODE_400000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseDR2_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseDR2_2_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseSR2_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseVR2_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_800GAUI_4_800GBASE_CR4_KR4, ext,
+ ETHTOOL_LINK_MODE_800000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseDR4_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseDR4_2_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseVR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1600TAUI_8_1600TBASE_CR8_KR8, ext,
+ ETHTOOL_LINK_MODE_1600000baseCR8_Full_BIT,
+ ETHTOOL_LINK_MODE_1600000baseKR8_Full_BIT,
+ ETHTOOL_LINK_MODE_1600000baseDR8_Full_BIT,
+ ETHTOOL_LINK_MODE_1600000baseDR8_2_Full_BIT);
+}
+
+static void mlx5e_ethtool_get_speed_arr(bool ext,
struct ptys2ethtool_config **arr,
u32 *size)
{
- bool ext = mlx5_ptys_ext_supported(mdev);
-
*arr = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
*size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
ARRAY_SIZE(ptys2legacy_ethtool_table);
@@ -344,11 +371,6 @@ void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
param->rx_pending = 1 << priv->channels.params.log_rq_mtu_frames;
param->tx_pending = 1 << priv->channels.params.log_sq_size;
-
- kernel_param->tcp_data_split =
- (priv->channels.params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) ?
- ETHTOOL_TCP_DATA_SPLIT_ENABLED :
- ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
static void mlx5e_get_ringparam(struct net_device *dev,
@@ -361,6 +383,27 @@ static void mlx5e_get_ringparam(struct net_device *dev,
mlx5e_ethtool_get_ringparam(priv, param, kernel_param);
}
+static bool mlx5e_ethtool_set_tcp_data_split(struct mlx5e_priv *priv,
+ u8 tcp_data_split,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *dev = priv->netdev;
+
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
+ !(dev->features & NETIF_F_GRO_HW)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "TCP-data-split is not supported when GRO HW is disabled");
+ return false;
+ }
+
+ /* Might need to disable HW-GRO if it was kept on due to hds. */
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_DISABLED &&
+ dev->cfg->hds_config == ETHTOOL_TCP_DATA_SPLIT_ENABLED)
+ netdev_update_features(priv->netdev);
+
+ return true;
+}
+
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param,
struct netlink_ext_ack *extack)
@@ -419,6 +462,11 @@ static int mlx5e_set_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ if (!mlx5e_ethtool_set_tcp_data_split(priv,
+ kernel_param->tcp_data_split,
+ extack))
+ return -EINVAL;
+
return mlx5e_ethtool_set_ringparam(priv, param, extack);
}
@@ -891,37 +939,19 @@ int mlx5e_set_per_queue_coalesce(struct net_device *dev, u32 queue,
return mlx5e_ethtool_set_per_queue_coalesce(priv, queue, coal);
}
-static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
- unsigned long *supported_modes,
- u32 eth_proto_cap)
-{
- unsigned long proto_cap = eth_proto_cap;
- struct ptys2ethtool_config *table;
- u32 max_size;
- int proto;
-
- mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size);
- for_each_set_bit(proto, &proto_cap, max_size)
- bitmap_or(supported_modes, supported_modes,
- table[proto].supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
-}
-
-static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
- u32 eth_proto_cap, bool ext)
+static void ptys2ethtool_process_link(u32 eth_eproto, bool ext, bool advertised,
+ unsigned long *modes)
{
- unsigned long proto_cap = eth_proto_cap;
+ unsigned long eproto = eth_eproto;
struct ptys2ethtool_config *table;
u32 max_size;
int proto;
- table = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
- max_size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
- ARRAY_SIZE(ptys2legacy_ethtool_table);
-
- for_each_set_bit(proto, &proto_cap, max_size)
- bitmap_or(advertising_modes, advertising_modes,
- table[proto].advertised,
+ mlx5e_ethtool_get_speed_arr(ext, &table, &max_size);
+ for_each_set_bit(proto, &eproto, max_size)
+ bitmap_or(modes, modes,
+ advertised ?
+ table[proto].advertised : table[proto].supported,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
@@ -931,6 +961,7 @@ static const u32 pplm_fec_2_ethtool[] = {
[MLX5E_FEC_RS_528_514] = ETHTOOL_FEC_RS,
[MLX5E_FEC_RS_544_514] = ETHTOOL_FEC_RS,
[MLX5E_FEC_LLRS_272_257_1] = ETHTOOL_FEC_LLRS,
+ [MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD] = ETHTOOL_FEC_RS,
};
static u32 pplm2ethtool_fec(u_long fec_mode, unsigned long size)
@@ -1074,50 +1105,51 @@ static void ptys2ethtool_supported_advertised_port(struct mlx5_core_dev *mdev,
}
}
-static void get_speed_duplex(struct net_device *netdev,
- u32 eth_proto_oper, bool force_legacy,
- u16 data_rate_oper,
- struct ethtool_link_ksettings *link_ksettings)
+static void get_link_properties(struct net_device *netdev,
+ u32 eth_proto_oper, bool force_legacy,
+ u16 data_rate_oper,
+ struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- u32 speed = SPEED_UNKNOWN;
+ const struct mlx5_link_info *info;
u8 duplex = DUPLEX_UNKNOWN;
+ u32 speed = SPEED_UNKNOWN;
+ u32 lanes = LANES_UNKNOWN;
if (!netif_carrier_ok(netdev))
goto out;
- speed = mlx5_port_ptys2speed(priv->mdev, eth_proto_oper, force_legacy);
- if (!speed) {
- if (data_rate_oper)
- speed = 100 * data_rate_oper;
- else
- speed = SPEED_UNKNOWN;
- goto out;
- }
-
- duplex = DUPLEX_FULL;
+ info = mlx5_port_ptys2info(priv->mdev, eth_proto_oper, force_legacy);
+ if (info) {
+ speed = info->speed;
+ lanes = info->lanes;
+ duplex = DUPLEX_FULL;
+ } else if (data_rate_oper)
+ speed = 100 * data_rate_oper;
out:
- link_ksettings->base.speed = speed;
link_ksettings->base.duplex = duplex;
+ link_ksettings->base.speed = speed;
+ link_ksettings->lanes = lanes;
}
static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
struct ethtool_link_ksettings *link_ksettings)
{
unsigned long *supported = link_ksettings->link_modes.supported;
- ptys2ethtool_supported_link(mdev, supported, eth_proto_cap);
+ bool ext = mlx5_ptys_ext_supported(mdev);
+
+ ptys2ethtool_process_link(eth_proto_cap, ext, false, supported);
ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
}
-static void get_advertising(u32 eth_proto_cap, u8 tx_pause, u8 rx_pause,
+static void get_advertising(u32 eth_proto_admin, u8 tx_pause, u8 rx_pause,
struct ethtool_link_ksettings *link_ksettings,
bool ext)
{
unsigned long *advertising = link_ksettings->link_modes.advertising;
- ptys2ethtool_adver_link(advertising, eth_proto_cap, ext);
-
+ ptys2ethtool_process_link(eth_proto_admin, ext, true, advertising);
if (rx_pause)
ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
if (tx_pause ^ rx_pause)
@@ -1173,7 +1205,7 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
bool ext = mlx5_ptys_ext_supported(mdev);
- ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
+ ptys2ethtool_process_link(eth_proto_lp, ext, true, lp_advertising);
}
static int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
@@ -1235,8 +1267,8 @@ static int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
get_supported(mdev, eth_proto_cap, link_ksettings);
get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
admin_ext);
- get_speed_duplex(priv->netdev, eth_proto_oper, !admin_ext,
- data_rate_oper, link_ksettings);
+ get_link_properties(priv->netdev, eth_proto_oper, !admin_ext,
+ data_rate_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
connector_type = connector_type < MLX5E_CONNECTOR_TYPE_NUMBER ?
@@ -1341,28 +1373,22 @@ static bool ext_link_mode_requested(const unsigned long *adver)
return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static bool ext_requested(u8 autoneg, const unsigned long *adver, bool ext_supported)
-{
- bool ext_link_mode = ext_link_mode_requested(adver);
-
- return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_supported;
-}
-
static int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
const struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_port_eth_proto eproto;
+ struct mlx5_link_info info = {};
const unsigned long *adver;
bool an_changes = false;
u8 an_disable_admin;
bool ext_supported;
+ bool ext_requested;
u8 an_disable_cap;
bool an_disable;
u32 link_modes;
u8 an_status;
u8 autoneg;
- u32 speed;
bool ext;
int err;
@@ -1370,13 +1396,15 @@ static int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
adver = link_ksettings->link_modes.advertising;
autoneg = link_ksettings->base.autoneg;
- speed = link_ksettings->base.speed;
+ info.speed = link_ksettings->base.speed;
+ info.lanes = link_ksettings->lanes;
ext_supported = mlx5_ptys_ext_supported(mdev);
- ext = ext_requested(autoneg, adver, ext_supported);
- if (!ext_supported && ext)
+ ext_requested = ext_link_mode_requested(adver);
+ if (!ext_supported && ext_requested)
return -EOPNOTSUPP;
+ ext = autoneg == AUTONEG_ENABLE ? ext_requested : ext_supported;
ethtool2ptys_adver_func = ext ? mlx5e_ethtool2ptys_ext_adver_link :
mlx5e_ethtool2ptys_adver_link;
err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
@@ -1386,7 +1414,7 @@ static int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
goto out;
}
link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
- mlx5_port_speed2linkmodes(mdev, speed, !ext);
+ mlx5_port_info2linkmodes(mdev, &info, !ext);
err = mlx5e_speed_validate(priv->netdev, ext, link_modes, autoneg);
if (err)
@@ -1457,60 +1485,147 @@ static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
static int mlx5e_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- u32 rss_context = rxfh->rss_context;
- int err;
+ bool symmetric;
mutex_lock(&priv->state_lock);
- err = mlx5e_rx_res_rss_get_rxfh(priv->rx_res, rss_context,
- rxfh->indir, rxfh->key, &rxfh->hfunc);
+ mlx5e_rx_res_rss_get_rxfh(priv->rx_res, 0, rxfh->indir, rxfh->key,
+ &rxfh->hfunc, &symmetric);
mutex_unlock(&priv->state_lock);
- return err;
+
+ if (symmetric)
+ rxfh->input_xfrm = RXH_XFRM_SYM_OR_XOR;
+
+ return 0;
}
-static int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
- struct netlink_ext_ack *extack)
+static int mlx5e_rxfh_hfunc_check(struct mlx5e_priv *priv,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
- u32 *rss_context = &rxfh->rss_context;
- u8 hfunc = rxfh->hfunc;
unsigned int count;
- int err;
-
- mutex_lock(&priv->state_lock);
count = priv->channels.params.num_channels;
- if (hfunc == ETH_RSS_HASH_XOR) {
+ if (rxfh->hfunc == ETH_RSS_HASH_XOR) {
unsigned int xor8_max_channels = mlx5e_rqt_max_num_channels_allowed_for_xor8();
if (count > xor8_max_channels) {
- err = -EINVAL;
- netdev_err(priv->netdev, "%s: Cannot set RSS hash function to XOR, current number of channels (%d) exceeds the maximum allowed for XOR8 RSS hfunc (%d)\n",
- __func__, count, xor8_max_channels);
- goto unlock;
+ NL_SET_ERR_MSG_FMT_MOD(
+ extack,
+ "Number of channels (%u) exceeds the max for XOR8 RSS (%u)",
+ count, xor8_max_channels);
+ return -EINVAL;
}
}
- if (*rss_context && rxfh->rss_delete) {
- err = mlx5e_rx_res_rss_destroy(priv->rx_res, *rss_context);
+ return 0;
+}
+
+static int mlx5e_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ bool symmetric = rxfh->input_xfrm == RXH_XFRM_SYM_OR_XOR;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ u8 hfunc = rxfh->hfunc;
+ int err;
+
+ mutex_lock(&priv->state_lock);
+
+ err = mlx5e_rxfh_hfunc_check(priv, rxfh, extack);
+ if (err)
goto unlock;
- }
- if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- err = mlx5e_rx_res_rss_init(priv->rx_res, rss_context, count);
- if (err)
- goto unlock;
- }
+ err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, rxfh->rss_context,
+ rxfh->indir, rxfh->key,
+ hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc,
+ rxfh->input_xfrm == RXH_XFRM_NO_CHANGE ? NULL : &symmetric);
+
+unlock:
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
+static int mlx5e_create_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ bool symmetric = rxfh->input_xfrm == RXH_XFRM_SYM_OR_XOR;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ u8 hfunc = rxfh->hfunc;
+ int err;
+
+ mutex_lock(&priv->state_lock);
+
+ err = mlx5e_rxfh_hfunc_check(priv, rxfh, extack);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_rx_res_rss_init(priv->rx_res, rxfh->rss_context,
+ priv->channels.params.num_channels);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, rxfh->rss_context,
+ rxfh->indir, rxfh->key,
+ hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc,
+ rxfh->input_xfrm == RXH_XFRM_NO_CHANGE ? NULL : &symmetric);
+ if (err)
+ goto unlock;
- err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, *rss_context,
+ mlx5e_rx_res_rss_get_rxfh(priv->rx_res, rxfh->rss_context,
+ ethtool_rxfh_context_indir(ctx),
+ ethtool_rxfh_context_key(ctx),
+ &ctx->hfunc, &symmetric);
+ if (symmetric)
+ ctx->input_xfrm = RXH_XFRM_SYM_OR_XOR;
+
+unlock:
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
+static int mlx5e_modify_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ bool symmetric = rxfh->input_xfrm == RXH_XFRM_SYM_OR_XOR;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ u8 hfunc = rxfh->hfunc;
+ int err;
+
+ mutex_lock(&priv->state_lock);
+
+ err = mlx5e_rxfh_hfunc_check(priv, rxfh, extack);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, rxfh->rss_context,
rxfh->indir, rxfh->key,
- hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc);
+ hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc,
+ rxfh->input_xfrm == RXH_XFRM_NO_CHANGE ? NULL : &symmetric);
unlock:
mutex_unlock(&priv->state_lock);
return err;
}
+static int mlx5e_remove_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ u32 rss_context,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int err;
+
+ mutex_lock(&priv->state_lock);
+ err = mlx5e_rx_res_rss_destroy(priv->rx_res, rss_context);
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
#define MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC 100
#define MLX5E_PFC_PREVEN_TOUT_MAX_MSEC 8000
#define MLX5E_PFC_PREVEN_MINOR_PRECENT 85
@@ -1675,6 +1790,7 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
return 0;
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -1819,11 +1935,12 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
}
static void mlx5e_get_fec_stats(struct net_device *netdev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- mlx5e_stats_fec_get(priv, fec_stats);
+ mlx5e_stats_fec_get(priv, fec_stats, hist);
}
static int mlx5e_get_fecparam(struct net_device *netdev,
@@ -1915,7 +2032,7 @@ static int mlx5e_get_module_info(struct net_device *netdev,
int size_read = 0;
u8 data[4] = {0};
- size_read = mlx5_query_module_eeprom(dev, 0, 2, data);
+ size_read = mlx5_query_module_eeprom(dev, 0, 2, data, NULL);
if (size_read < 2)
return -EIO;
@@ -1957,6 +2074,7 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
struct mlx5_core_dev *mdev = priv->mdev;
int offset = ee->offset;
int size_read;
+ u8 status = 0;
int i = 0;
if (!ee->len)
@@ -1966,15 +2084,15 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
while (i < ee->len) {
size_read = mlx5_query_module_eeprom(mdev, offset, ee->len - i,
- data + i);
-
+ data + i, &status);
if (!size_read)
/* Done reading */
return 0;
if (size_read < 0) {
- netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n",
- __func__, size_read);
+ netdev_err(netdev,
+ "%s: mlx5_query_eeprom failed:0x%x, status %u\n",
+ __func__, size_read, status);
return size_read;
}
@@ -1994,6 +2112,7 @@ static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev,
struct mlx5_core_dev *mdev = priv->mdev;
u8 *data = page_data->data;
int size_read;
+ u8 status = 0;
int i = 0;
if (!page_data->length)
@@ -2007,20 +2126,19 @@ static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev,
query.page = page_data->page;
while (i < page_data->length) {
query.size = page_data->length - i;
- size_read = mlx5_query_module_eeprom_by_page(mdev, &query, data + i);
+ size_read = mlx5_query_module_eeprom_by_page(mdev, &query,
+ data + i, &status);
/* Done reading, return how many bytes was read */
if (!size_read)
return i;
- if (size_read == -EINVAL)
- return -EINVAL;
if (size_read < 0) {
NL_SET_ERR_MSG_FMT_MOD(
extack,
- "Query module eeprom by page failed, read %u bytes, err %d\n",
- i, size_read);
- return i;
+ "Query module eeprom by page failed, read %u bytes, err %d, status %u",
+ i, size_read, status);
+ return size_read;
}
i += size_read;
@@ -2045,14 +2163,9 @@ int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
if (err)
return err;
- dev_hold(dev);
- rtnl_unlock();
-
err = mlx5_firmware_flash(mdev, fw, NULL);
release_firmware(fw);
- rtnl_lock();
- dev_put(dev);
return err;
}
@@ -2166,7 +2279,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
if (!MLX5_CAP_GEN(mdev, cqe_compression))
return -EOPNOTSUPP;
- rx_filter = priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE;
+ rx_filter = priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE;
err = mlx5e_modify_rx_cqe_compression_locked(priv, enable, rx_filter);
if (err)
return err;
@@ -2181,7 +2294,6 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_params new_params;
- int err;
if (enable) {
/* Checking the regular RQ here; mlx5e_validate_xsk_param called
@@ -2202,14 +2314,7 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_STRIDING_RQ, enable);
mlx5e_set_rq_type(mdev, &new_params);
- err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
- if (err)
- return err;
-
- /* update XDP supported features */
- mlx5e_set_xdp_feature(netdev);
-
- return 0;
+ return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
}
static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
@@ -2370,21 +2475,35 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev)
return priv->channels.params.pflags;
}
+static int mlx5e_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_get_rxfh_fields(priv, info);
+}
+
+static int mlx5e_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_set_rxfh_fields(priv, cmd, extack);
+}
+
+static u32 mlx5e_get_rx_ring_count(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return priv->channels.params.num_channels;
+}
+
static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rule_locs)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- /* ETHTOOL_GRXRINGS is needed by ethtool -x which is not part
- * of rxnfc. We keep this logic out of mlx5e_ethtool_get_rxnfc,
- * to avoid breaking "ethtool -x" when mlx5e_ethtool_get_rxnfc
- * is compiled out via CONFIG_MLX5_EN_RXNFC=n.
- */
- if (info->cmd == ETHTOOL_GRXRINGS) {
- info->data = priv->channels.params.num_channels;
- return 0;
- }
-
return mlx5e_ethtool_get_rxnfc(priv, info, rule_locs);
}
@@ -2607,12 +2726,16 @@ static void mlx5e_get_ts_stats(struct net_device *netdev,
}
const struct ethtool_ops mlx5e_ethtool_ops = {
- .cap_rss_ctx_supported = true,
+ .cap_link_lanes_supported = true,
+ .rxfh_per_ctx_fields = true,
.rxfh_per_ctx_key = true,
+ .rxfh_max_num_contexts = MLX5E_MAX_NUM_RSS,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_USE_CQE,
+ .supported_input_xfrm = RXH_XFRM_SYM_OR_XOR,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ext_state = mlx5e_get_link_ext_state,
@@ -2633,8 +2756,14 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
.get_rxfh = mlx5e_get_rxfh,
.set_rxfh = mlx5e_set_rxfh,
+ .get_rxfh_fields = mlx5e_get_rxfh_fields,
+ .set_rxfh_fields = mlx5e_set_rxfh_fields,
+ .create_rxfh_context = mlx5e_create_rxfh_context,
+ .modify_rxfh_context = mlx5e_modify_rxfh_context,
+ .remove_rxfh_context = mlx5e_remove_rxfh_context,
.get_rxnfc = mlx5e_get_rxnfc,
.set_rxnfc = mlx5e_set_rxnfc,
+ .get_rx_ring_count = mlx5e_get_rx_ring_count,
.get_tunable = mlx5e_get_tunable,
.set_tunable = mlx5e_set_tunable,
.get_pause_stats = mlx5e_get_pause_stats,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 05058710d2c7..8928d2dcd43f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -484,7 +484,9 @@ static int mlx5e_vlan_rx_add_svid(struct mlx5e_flow_steering *fs,
}
/* Need to fix some features.. */
+ netdev_lock(netdev);
netdev_update_features(netdev);
+ netdev_unlock(netdev);
return err;
}
@@ -521,7 +523,9 @@ int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
} else if (be16_to_cpu(proto) == ETH_P_8021AD) {
clear_bit(vid, fs->vlan->active_svlans);
mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
+ netdev_lock(netdev);
netdev_update_features(netdev);
+ netdev_unlock(netdev);
}
return 0;
@@ -776,7 +780,7 @@ static int mlx5e_create_promisc_table(struct mlx5e_flow_steering *fs)
ft_attr.max_fte = MLX5E_PROMISC_TABLE_SIZE;
ft_attr.autogroup.max_num_groups = 1;
ft_attr.level = MLX5E_PROMISC_FT_LEVEL;
- ft_attr.prio = MLX5E_NIC_PRIO;
+ ft_attr.prio = MLX5E_PROMISC_PRIO;
ft->t = mlx5_create_auto_grouped_flow_table(fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
@@ -901,6 +905,9 @@ static void mlx5e_set_inner_ttc_params(struct mlx5e_flow_steering *fs,
ft_attr->prio = MLX5E_NIC_PRIO;
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
+ if (mlx5_ttc_is_decrypted_esp_tt(tt))
+ continue;
+
ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
ttc_params->dests[tt].tir_num =
tt == MLX5_TT_ANY ?
@@ -910,9 +917,17 @@ static void mlx5e_set_inner_ttc_params(struct mlx5e_flow_steering *fs,
}
}
+static bool mlx5e_ipsec_rss_supported(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(mdev, ipsec_next_header) &&
+ MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(mdev, outer_l4_type_ext) &&
+ MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(mdev, inner_l4_type_ext);
+}
+
void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs,
struct mlx5e_rx_res *rx_res,
- struct ttc_params *ttc_params, bool tunnel)
+ struct ttc_params *ttc_params, bool tunnel,
+ bool ipsec_rss)
{
struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
@@ -923,7 +938,13 @@ void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs,
ft_attr->level = MLX5E_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
+ ttc_params->ipsec_rss = ipsec_rss &&
+ mlx5e_ipsec_rss_supported(fs->mdev);
+
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
+ if (mlx5_ttc_is_decrypted_esp_tt(tt))
+ continue;
+
ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
ttc_params->dests[tt].tir_num =
tt == MLX5_TT_ANY ?
@@ -1289,7 +1310,7 @@ int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs,
{
struct ttc_params ttc_params = {};
- mlx5e_set_ttc_params(fs, rx_res, &ttc_params, true);
+ mlx5e_set_ttc_params(fs, rx_res, &ttc_params, true, true);
fs->ttc = mlx5_create_ttc_table(fs->mdev, &ttc_params);
return PTR_ERR_OR_ZERO(fs->ttc);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 773624bb2c5d..63bdef5b4ba5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -704,7 +704,7 @@ static int validate_flow(struct mlx5e_priv *priv,
num_tuples += ret;
break;
default:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
if ((fs->flow_type & FLOW_EXT)) {
ret = validate_vlan(fs);
@@ -884,25 +884,27 @@ static int flow_type_to_traffic_type(u32 flow_type)
case ESP_V6_FLOW:
return MLX5_TT_IPV6_IPSEC_ESP;
case IPV4_FLOW:
+ case IP_USER_FLOW:
return MLX5_TT_IPV4;
case IPV6_FLOW:
+ case IPV6_USER_FLOW:
return MLX5_TT_IPV6;
default:
return -EINVAL;
}
}
-static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
- struct ethtool_rxnfc *nfc)
+int mlx5e_ethtool_set_rxfh_fields(struct mlx5e_priv *priv,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
u8 rx_hash_field = 0;
u32 flow_type = 0;
- u32 rss_idx = 0;
+ u32 rss_idx;
int err;
int tt;
- if (nfc->flow_type & FLOW_RSS)
- rss_idx = nfc->rss_context;
+ rss_idx = nfc->rss_context;
flow_type = flow_type_mask(nfc->flow_type);
tt = flow_type_to_traffic_type(flow_type);
@@ -939,16 +941,15 @@ static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
return err;
}
-static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
- struct ethtool_rxnfc *nfc)
+int mlx5e_ethtool_get_rxfh_fields(struct mlx5e_priv *priv,
+ struct ethtool_rxfh_fields *nfc)
{
int hash_field = 0;
u32 flow_type = 0;
- u32 rss_idx = 0;
+ u32 rss_idx;
int tt;
- if (nfc->flow_type & FLOW_RSS)
- rss_idx = nfc->rss_context;
+ rss_idx = nfc->rss_context;
flow_type = flow_type_mask(nfc->flow_type);
tt = flow_type_to_traffic_type(flow_type);
@@ -984,9 +985,6 @@ int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
case ETHTOOL_SRXCLSRLDEL:
err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
break;
- case ETHTOOL_SRXFH:
- err = mlx5e_set_rss_hash_opt(priv, cmd);
- break;
default:
err = -EOPNOTSUPP;
break;
@@ -1011,9 +1009,6 @@ int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
case ETHTOOL_GRXCLSRLALL:
err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
break;
- case ETHTOOL_GRXFH:
- err = mlx5e_get_rss_hash_opt(priv, info);
- break;
default:
err = -EOPNOTSUPP;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index f37afa52e2b8..6168f0814414 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -39,7 +39,9 @@
#include <linux/debugfs.h>
#include <linux/if_bridge.h>
#include <linux/filter.h>
+#include <net/netdev_lock.h>
#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
#include <net/page_pool/types.h>
#include <net/pkt_sched.h>
#include <net/xdp_sock_drv.h>
@@ -50,6 +52,7 @@
#include "en_tc.h"
#include "en_rep.h"
#include "en_accel/ipsec.h"
+#include "en_accel/psp.h"
#include "en_accel/macsec.h"
#include "en_accel/en_accel.h"
#include "en_accel/ktls.h"
@@ -74,10 +77,12 @@
#include "en/trap.h"
#include "lib/devcom.h"
#include "lib/sd.h"
+#include "en/pcie_cong_event.h"
static bool mlx5e_hw_gro_supported(struct mlx5_core_dev *mdev)
{
- if (!MLX5_CAP_GEN(mdev, shampo))
+ if (!MLX5_CAP_GEN(mdev, shampo) ||
+ !MLX5_CAP_SHAMPO(mdev, shampo_header_split_data_merge))
return false;
/* Our HW-GRO implementation relies on "KSM Mkey" for
@@ -228,13 +233,17 @@ static int mlx5e_devcom_event_mpv(int event, void *my_data, void *event_data)
static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data)
{
+ struct mlx5_devcom_match_attr attr = {
+ .key.val = *data,
+ };
+
priv->devcom = mlx5_devcom_register_component(priv->mdev->priv.devc,
MLX5_DEVCOM_MPV,
- *data,
+ &attr,
mlx5e_devcom_event_mpv,
priv);
- if (IS_ERR(priv->devcom))
- return PTR_ERR(priv->devcom);
+ if (!priv->devcom)
+ return -EINVAL;
if (mlx5_core_is_mp_master(priv->mdev)) {
mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_UP,
@@ -247,7 +256,7 @@ static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data)
static void mlx5e_devcom_cleanup_mpv(struct mlx5e_priv *priv)
{
- if (IS_ERR_OR_NULL(priv->devcom))
+ if (!priv->devcom)
return;
if (mlx5_core_is_mp_master(priv->mdev)) {
@@ -257,6 +266,7 @@ static void mlx5e_devcom_cleanup_mpv(struct mlx5e_priv *priv)
}
mlx5_devcom_unregister_component(priv->devcom);
+ priv->devcom = NULL;
}
static int blocking_event(struct notifier_block *nb, unsigned long event, void *data)
@@ -311,8 +321,8 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
struct mlx5e_icosq *sq,
struct mlx5e_umr_wqe *wqe)
{
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->hdr.ctrl;
+ struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->hdr.uctrl;
u16 octowords;
u8 ds_cnt;
@@ -330,52 +340,6 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
-static int mlx5e_rq_shampo_hd_alloc(struct mlx5e_rq *rq, int node)
-{
- rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq->mpwqe.shampo),
- GFP_KERNEL, node);
- if (!rq->mpwqe.shampo)
- return -ENOMEM;
- return 0;
-}
-
-static void mlx5e_rq_shampo_hd_free(struct mlx5e_rq *rq)
-{
- kvfree(rq->mpwqe.shampo);
-}
-
-static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node)
-{
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
-
- shampo->bitmap = bitmap_zalloc_node(shampo->hd_per_wq, GFP_KERNEL,
- node);
- shampo->info = kvzalloc_node(array_size(shampo->hd_per_wq,
- sizeof(*shampo->info)),
- GFP_KERNEL, node);
- shampo->pages = kvzalloc_node(array_size(shampo->hd_per_wq,
- sizeof(*shampo->pages)),
- GFP_KERNEL, node);
- if (!shampo->bitmap || !shampo->info || !shampo->pages)
- goto err_nomem;
-
- return 0;
-
-err_nomem:
- kvfree(shampo->info);
- kvfree(shampo->bitmap);
- kvfree(shampo->pages);
-
- return -ENOMEM;
-}
-
-static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
-{
- kvfree(rq->mpwqe.shampo->bitmap);
- kvfree(rq->mpwqe.shampo->info);
- kvfree(rq->mpwqe.shampo->pages);
-}
-
static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
{
int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
@@ -398,7 +362,9 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
}
- mlx5e_build_umr_wqe(rq, rq->icosq, &rq->mpwqe.umr_wqe);
+ mlx5e_build_umr_wqe(rq, rq->icosq,
+ container_of(&rq->mpwqe.umr_wqe,
+ struct mlx5e_umr_wqe, hdr));
return 0;
}
@@ -586,19 +552,26 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq
}
static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev,
- struct mlx5e_rq *rq)
+ u16 hd_per_wq, __be32 *umr_mkey)
{
u32 max_ksm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size));
+ u32 mkey;
+ int err;
- if (max_ksm_size < rq->mpwqe.shampo->hd_per_wq) {
+ if (max_ksm_size < hd_per_wq) {
mlx5_core_err(mdev, "max ksm list size 0x%x is smaller than shampo header buffer list size 0x%x\n",
- max_ksm_size, rq->mpwqe.shampo->hd_per_wq);
+ max_ksm_size, hd_per_wq);
return -EINVAL;
}
- return mlx5e_create_umr_ksm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq,
- MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE,
- &rq->mpwqe.shampo->mkey);
+ err = mlx5e_create_umr_ksm_mkey(mdev, hd_per_wq,
+ MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE,
+ &mkey);
+ if (err)
+ return err;
+
+ *umr_mkey = cpu_to_be32(mkey);
+ return 0;
}
static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
@@ -709,6 +682,27 @@ static void mlx5e_rq_err_cqe_work(struct work_struct *recover_work)
mlx5e_reporter_rq_cqe_err(rq);
}
+static void mlx5e_rq_timeout_work(struct work_struct *timeout_work)
+{
+ struct mlx5e_rq *rq = container_of(timeout_work,
+ struct mlx5e_rq,
+ rx_timeout_work);
+
+ /* Acquire netdev instance lock to synchronize with channel close and
+ * reopen flows. Either successfully obtain the lock, or detect that
+ * channels are closing for another reason, making this work no longer
+ * necessary.
+ */
+ while (!netdev_trylock(rq->netdev)) {
+ if (!test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &rq->priv->state))
+ return;
+ msleep(20);
+ }
+
+ mlx5e_reporter_rx_timeout(rq);
+ netdev_unlock(rq->netdev);
+}
+
static int mlx5e_alloc_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
{
rq->wqe_overflow.page = alloc_page(GFP_KERNEL);
@@ -741,8 +735,8 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
rq->pdev = c->pdev;
rq->netdev = c->netdev;
rq->priv = c->priv;
- rq->tstamp = c->tstamp;
- rq->clock = &mdev->clock;
+ rq->hwtstamp_config = &c->priv->hwtstamp_config;
+ rq->clock = mdev->clock;
rq->icosq = &c->icosq;
rq->ix = c->ix;
rq->channel = c;
@@ -760,6 +754,35 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
xdp_frag_size);
}
+static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, u16 hd_per_wq,
+ int node)
+{
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+
+ shampo->hd_per_wq = hd_per_wq;
+
+ shampo->bitmap = bitmap_zalloc_node(hd_per_wq, GFP_KERNEL, node);
+ shampo->pages = kvzalloc_node(array_size(hd_per_wq,
+ sizeof(*shampo->pages)),
+ GFP_KERNEL, node);
+ if (!shampo->bitmap || !shampo->pages)
+ goto err_nomem;
+
+ return 0;
+
+err_nomem:
+ kvfree(shampo->pages);
+ bitmap_free(shampo->bitmap);
+
+ return -ENOMEM;
+}
+
+static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
+{
+ kvfree(rq->mpwqe.shampo->pages);
+ bitmap_free(rq->mpwqe.shampo->bitmap);
+}
+
static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rqp,
@@ -768,43 +791,93 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
int node)
{
void *wqc = MLX5_ADDR_OF(rqc, rqp->rqc, wq);
+ u8 log_hd_per_page, log_hd_entry_size;
+ u16 hd_per_wq, hd_per_wqe;
+ u32 hd_pool_size;
int wq_size;
int err;
if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
return 0;
- err = mlx5e_rq_shampo_hd_alloc(rq, node);
- if (err)
- goto out;
- rq->mpwqe.shampo->hd_per_wq =
- mlx5e_shampo_hd_per_wq(mdev, params, rqp);
- err = mlx5e_create_rq_hd_umr_mkey(mdev, rq);
+
+ rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq->mpwqe.shampo),
+ GFP_KERNEL, node);
+ if (!rq->mpwqe.shampo)
+ return -ENOMEM;
+
+ /* split headers data structures */
+ hd_per_wq = mlx5e_shampo_hd_per_wq(mdev, params, rqp);
+ err = mlx5e_rq_shampo_hd_info_alloc(rq, hd_per_wq, node);
if (err)
- goto err_shampo_hd;
- err = mlx5e_rq_shampo_hd_info_alloc(rq, node);
+ goto err_shampo_hd_info_alloc;
+
+ err = mlx5e_create_rq_hd_umr_mkey(mdev, hd_per_wq,
+ &rq->mpwqe.shampo->mkey_be);
if (err)
- goto err_shampo_info;
+ goto err_umr_mkey;
+
+ hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
+ wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
+
+ BUILD_BUG_ON(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE > PAGE_SHIFT);
+ if (hd_per_wqe >= MLX5E_SHAMPO_WQ_HEADER_PER_PAGE) {
+ log_hd_per_page = MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE;
+ log_hd_entry_size = MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
+ } else {
+ log_hd_per_page = order_base_2(hd_per_wqe);
+ log_hd_entry_size = order_base_2(PAGE_SIZE / hd_per_wqe);
+ }
+
+ rq->mpwqe.shampo->hd_per_wqe = hd_per_wqe;
+ rq->mpwqe.shampo->hd_per_page = BIT(log_hd_per_page);
+ rq->mpwqe.shampo->log_hd_per_page = log_hd_per_page;
+ rq->mpwqe.shampo->log_hd_entry_size = log_hd_entry_size;
+
+ hd_pool_size = (hd_per_wqe * wq_size) >> log_hd_per_page;
+
+ if (netif_rxq_has_unreadable_mp(rq->netdev, rq->ix)) {
+ /* Separate page pool for shampo headers */
+ struct page_pool_params pp_params = { };
+
+ pp_params.order = 0;
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.pool_size = hd_pool_size;
+ pp_params.nid = node;
+ pp_params.dev = rq->pdev;
+ pp_params.napi = rq->cq.napi;
+ pp_params.netdev = rq->netdev;
+ pp_params.dma_dir = rq->buff.map_dir;
+ pp_params.max_len = PAGE_SIZE;
+
+ rq->hd_page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rq->hd_page_pool)) {
+ err = PTR_ERR(rq->hd_page_pool);
+ rq->hd_page_pool = NULL;
+ goto err_hds_page_pool;
+ }
+ } else {
+ /* Common page pool, reserve space for headers. */
+ *pool_size += hd_pool_size;
+ rq->hd_page_pool = NULL;
+ }
+
+ /* gro only data structures */
rq->hw_gro_data = kvzalloc_node(sizeof(*rq->hw_gro_data), GFP_KERNEL, node);
if (!rq->hw_gro_data) {
err = -ENOMEM;
goto err_hw_gro_data;
}
- rq->mpwqe.shampo->key =
- cpu_to_be32(rq->mpwqe.shampo->mkey);
- rq->mpwqe.shampo->hd_per_wqe =
- mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
- wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
- *pool_size += (rq->mpwqe.shampo->hd_per_wqe * wq_size) /
- MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
+
return 0;
err_hw_gro_data:
+ page_pool_destroy(rq->hd_page_pool);
+err_hds_page_pool:
+ mlx5_core_destroy_mkey(mdev, be32_to_cpu(rq->mpwqe.shampo->mkey_be));
+err_umr_mkey:
mlx5e_rq_shampo_hd_info_free(rq);
-err_shampo_info:
- mlx5_core_destroy_mkey(mdev, rq->mpwqe.shampo->mkey);
-err_shampo_hd:
- mlx5e_rq_shampo_hd_free(rq);
-out:
+err_shampo_hd_info_alloc:
+ kvfree(rq->mpwqe.shampo);
return err;
}
@@ -814,9 +887,12 @@ static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq)
return;
kvfree(rq->hw_gro_data);
+ if (rq->hd_page_pool != rq->page_pool)
+ page_pool_destroy(rq->hd_page_pool);
mlx5e_rq_shampo_hd_info_free(rq);
- mlx5_core_destroy_mkey(rq->mdev, rq->mpwqe.shampo->mkey);
- mlx5e_rq_shampo_hd_free(rq);
+ mlx5_core_destroy_mkey(rq->mdev,
+ be32_to_cpu(rq->mpwqe.shampo->mkey_be));
+ kvfree(rq->mpwqe.shampo);
}
static int mlx5e_alloc_rq(struct mlx5e_params *params,
@@ -834,6 +910,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
rqp->wq.db_numa_node = node;
INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work);
+ INIT_WORK(&rq->rx_timeout_work, mlx5e_rq_timeout_work);
if (params->xdp_prog)
bpf_prog_inc(params->xdp_prog);
@@ -919,6 +996,8 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
if (xsk) {
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL, NULL);
+ if (err)
+ goto err_free_by_rq_type;
xsk_pool_set_rxq_info(rq->xsk_pool, &rq->xdp_rxq);
} else {
/* Create a page_pool and register it with rxq */
@@ -933,6 +1012,11 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
pp_params.netdev = rq->netdev;
pp_params.dma_dir = rq->buff.map_dir;
pp_params.max_len = PAGE_SIZE;
+ pp_params.queue_idx = rq->ix;
+
+ /* Shampo header data split allow for unreadable netmem */
+ if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
+ pp_params.flags |= PP_FLAG_ALLOW_UNREADABLE_NETMEM;
/* page_pool can be used even when there is no rq->xdp_prog,
* given page_pool does not handle DMA mapping there is no
@@ -945,12 +1029,15 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
rq->page_pool = NULL;
goto err_free_by_rq_type;
}
- if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
+ if (!rq->hd_page_pool)
+ rq->hd_page_pool = rq->page_pool;
+ if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) {
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_PAGE_POOL, rq->page_pool);
+ if (err)
+ goto err_destroy_page_pool;
+ }
}
- if (err)
- goto err_destroy_page_pool;
for (i = 0; i < wq_sz; i++) {
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
@@ -1078,7 +1165,8 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_cou
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
MLX5_SET(wq, wq, log_headers_buffer_entry_num,
order_base_2(rq->mpwqe.shampo->hd_per_wq));
- MLX5_SET(wq, wq, headers_mkey, rq->mpwqe.shampo->mkey);
+ MLX5_SET(wq, wq, headers_mkey,
+ be32_to_cpu(rq->mpwqe.shampo->mkey_be));
}
mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
@@ -1208,7 +1296,8 @@ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
netdev_warn(rq->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
rq->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
- mlx5e_reporter_rx_timeout(rq);
+ queue_work(rq->priv->wq, &rq->rx_timeout_work);
+
return -ETIMEDOUT;
}
@@ -1379,6 +1468,7 @@ void mlx5e_close_rq(struct mlx5e_rq *rq)
if (rq->dim)
cancel_work_sync(&rq->dim->work);
cancel_work_sync(&rq->recover_work);
+ cancel_work_sync(&rq->rx_timeout_work);
mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq);
mlx5e_free_rq(rq);
@@ -1459,7 +1549,7 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
sq->pdev = c->pdev;
sq->mkey_be = c->mkey_be;
sq->channel = c;
- sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+ sq->uar_map = c->bfreg->map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN;
sq->xsk_pool = xsk_pool;
@@ -1544,7 +1634,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
int err;
sq->channel = c;
- sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+ sq->uar_map = c->bfreg->map;
sq->reserved_room = param->stop_room;
param->wq.db_numa_node = cpu_to_node(c->cpu);
@@ -1621,7 +1711,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
int err;
sq->pdev = c->pdev;
- sq->clock = &mdev->clock;
+ sq->clock = mdev->clock;
sq->mkey_be = c->mkey_be;
sq->netdev = c->netdev;
sq->mdev = c->mdev;
@@ -1629,13 +1719,11 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->priv = c->priv;
sq->ch_ix = c->ix;
sq->txq_ix = txq_ix;
- sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+ sq->uar_map = c->bfreg->map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
sq->max_sq_mpw_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
- if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
- set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
if (mlx5_ipsec_device_caps(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
if (param->is_mpw)
@@ -1707,7 +1795,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
- MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
+ MLX5_SET(wq, wq, uar_page, csp->uar_page);
MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
@@ -1811,6 +1899,7 @@ int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
+ csp.uar_page = c->bfreg->index;
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, qos_queue_group_id, &sq->sqn);
if (err)
goto err_free_txqsq;
@@ -1908,7 +1997,20 @@ void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq,
recover_work);
+ /* Recovering queues means re-enabling NAPI, which requires the netdev
+ * instance lock. However, SQ closing flows have to wait for work tasks
+ * to finish while also holding the netdev instance lock. So either get
+ * the lock or find that the SQ is no longer enabled and thus this work
+ * is not relevant anymore.
+ */
+ while (!netdev_trylock(sq->netdev)) {
+ if (!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))
+ return;
+ msleep(20);
+ }
+
mlx5e_reporter_tx_err_cqe(sq);
+ netdev_unlock(sq->netdev);
}
static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
@@ -1968,6 +2070,7 @@ static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = params->tx_min_inline_mode;
+ csp.uar_page = c->bfreg->index;
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
if (err)
goto err_free_icosq;
@@ -2028,43 +2131,15 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
+ csp.uar_page = c->bfreg->index;
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- if (param->is_xdp_mb)
- set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state);
-
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
if (err)
goto err_free_xdpsq;
mlx5e_set_xmit_fp(sq, param->is_mpw);
- if (!param->is_mpw && !test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
- unsigned int ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1;
- unsigned int inline_hdr_sz = 0;
- int i;
-
- if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
- inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
- ds_cnt++;
- }
-
- /* Pre initialize fixed WQE fields */
- for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
- struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
-
- sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) {
- .num_wqebbs = 1,
- .num_pkts = 1,
- };
-
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
- }
- }
-
return 0;
err_free_xdpsq:
@@ -2086,9 +2161,48 @@ void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
mlx5e_free_xdpsq(sq);
}
+static struct mlx5e_xdpsq *mlx5e_open_xdpredirect_sq(struct mlx5e_channel *c,
+ struct mlx5e_params *params,
+ struct mlx5e_channel_param *cparam,
+ struct mlx5e_create_cq_param *ccp)
+{
+ struct mlx5e_xdpsq *xdpsq;
+ int err;
+
+ xdpsq = kvzalloc_node(sizeof(*xdpsq), GFP_KERNEL, cpu_to_node(c->cpu));
+ if (!xdpsq)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation,
+ &cparam->xdp_sq.cqp, ccp, &xdpsq->cq);
+ if (err)
+ goto err_free_xdpsq;
+
+ err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, xdpsq, true);
+ if (err)
+ goto err_close_xdpsq_cq;
+
+ return xdpsq;
+
+err_close_xdpsq_cq:
+ mlx5e_close_cq(&xdpsq->cq);
+err_free_xdpsq:
+ kvfree(xdpsq);
+
+ return ERR_PTR(err);
+}
+
+static void mlx5e_close_xdpredirect_sq(struct mlx5e_xdpsq *xdpsq)
+{
+ mlx5e_close_xdpsq(xdpsq);
+ mlx5e_close_cq(&xdpsq->cq);
+ kvfree(xdpsq);
+}
+
static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
struct net_device *netdev,
struct workqueue_struct *workqueue,
+ struct mlx5_uars_page *uar,
struct mlx5e_cq_param *param,
struct mlx5e_cq *cq)
{
@@ -2105,7 +2219,6 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
mcq->set_ci_db = cq->wq_ctrl.db.db;
mcq->arm_db = cq->wq_ctrl.db.db + 1;
*mcq->set_ci_db = 0;
- *mcq->arm_db = 0;
mcq->vector = param->eq_ix;
mcq->comp = mlx5e_completion_event;
mcq->event = mlx5e_cq_error_event;
@@ -2120,6 +2233,7 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
cq->mdev = mdev;
cq->netdev = netdev;
cq->workqueue = workqueue;
+ cq->uar = uar;
return 0;
}
@@ -2135,7 +2249,8 @@ static int mlx5e_alloc_cq(struct mlx5_core_dev *mdev,
param->wq.db_numa_node = ccp->node;
param->eq_ix = ccp->ix;
- err = mlx5e_alloc_cq_common(mdev, ccp->netdev, ccp->wq, param, cq);
+ err = mlx5e_alloc_cq_common(mdev, ccp->netdev, ccp->wq,
+ ccp->uar, param, cq);
cq->napi = ccp->napi;
cq->ch_stats = ccp->ch_stats;
@@ -2180,7 +2295,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
MLX5_SET(cqc, cqc, cq_period_mode, mlx5e_cq_period_mode(param->cq_period_mode));
MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
- MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, uar_page, cq->uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
@@ -2476,6 +2591,7 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam)
{
+ const struct net_device_ops *netdev_ops = c->netdev->netdev_ops;
struct dim_cq_moder icocq_moder = {0, 0};
struct mlx5e_create_cq_param ccp;
int err;
@@ -2496,15 +2612,18 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
if (err)
goto err_close_icosq_cq;
- err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
- &c->xdpsq.cq);
- if (err)
- goto err_close_tx_cqs;
+ if (netdev_ops->ndo_xdp_xmit && c->xdp) {
+ c->xdpsq = mlx5e_open_xdpredirect_sq(c, params, cparam, &ccp);
+ if (IS_ERR(c->xdpsq)) {
+ err = PTR_ERR(c->xdpsq);
+ goto err_close_tx_cqs;
+ }
+ }
err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
&c->rq.cq);
if (err)
- goto err_close_xdp_tx_cqs;
+ goto err_close_xdpredirect_sq;
err = c->xdp ? mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp,
&ccp, &c->rq_xdpsq.cq) : 0;
@@ -2516,7 +2635,7 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq,
mlx5e_async_icosq_err_cqe_work);
if (err)
- goto err_close_xdpsq_cq;
+ goto err_close_rq_xdpsq_cq;
mutex_init(&c->icosq_recovery_lock);
@@ -2540,16 +2659,8 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
goto err_close_rq;
}
- err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, &c->xdpsq, true);
- if (err)
- goto err_close_xdp_sq;
-
return 0;
-err_close_xdp_sq:
- if (c->xdp)
- mlx5e_close_xdpsq(&c->rq_xdpsq);
-
err_close_rq:
mlx5e_close_rq(&c->rq);
@@ -2562,15 +2673,16 @@ err_close_icosq:
err_close_async_icosq:
mlx5e_close_icosq(&c->async_icosq);
-err_close_xdpsq_cq:
+err_close_rq_xdpsq_cq:
if (c->xdp)
mlx5e_close_cq(&c->rq_xdpsq.cq);
err_close_rx_cq:
mlx5e_close_cq(&c->rq.cq);
-err_close_xdp_tx_cqs:
- mlx5e_close_cq(&c->xdpsq.cq);
+err_close_xdpredirect_sq:
+ if (c->xdpsq)
+ mlx5e_close_xdpredirect_sq(c->xdpsq);
err_close_tx_cqs:
mlx5e_close_tx_cqs(c);
@@ -2586,7 +2698,6 @@ err_close_async_icosq_cq:
static void mlx5e_close_queues(struct mlx5e_channel *c)
{
- mlx5e_close_xdpsq(&c->xdpsq);
if (c->xdp)
mlx5e_close_xdpsq(&c->rq_xdpsq);
/* The same ICOSQ is used for UMRs for both RQ and XSKRQ. */
@@ -2599,7 +2710,8 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
if (c->xdp)
mlx5e_close_cq(&c->rq_xdpsq.cq);
mlx5e_close_cq(&c->rq.cq);
- mlx5e_close_cq(&c->xdpsq.cq);
+ if (c->xdpsq)
+ mlx5e_close_xdpredirect_sq(c->xdpsq);
mlx5e_close_tx_cqs(c);
mlx5e_close_cq(&c->icosq.cq);
mlx5e_close_cq(&c->async_icosq.cq);
@@ -2650,13 +2762,27 @@ void mlx5e_trigger_napi_sched(struct napi_struct *napi)
local_bh_enable();
}
+static void mlx5e_channel_pick_doorbell(struct mlx5e_channel *c)
+{
+ struct mlx5e_hw_objs *hw_objs = &c->mdev->mlx5e_res.hw_objs;
+
+ /* No dedicated Ethernet doorbells, use the global one. */
+ if (hw_objs->num_bfregs == 0) {
+ c->bfreg = &c->mdev->priv.bfreg;
+ return;
+ }
+
+ /* Round-robin between doorbells. */
+ c->bfreg = hw_objs->bfregs + c->vec_ix % hw_objs->num_bfregs;
+}
+
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_params *params,
- struct mlx5e_channel_param *cparam,
struct xsk_buff_pool *xsk_pool,
struct mlx5e_channel **cp)
{
struct net_device *netdev = priv->netdev;
+ struct mlx5e_channel_param *cparam;
struct mlx5_core_dev *mdev;
struct mlx5e_xsk_param xsk;
struct mlx5e_channel *c;
@@ -2678,12 +2804,18 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
return err;
c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
- if (!c)
- return -ENOMEM;
+ cparam = kvzalloc(sizeof(*cparam), GFP_KERNEL);
+ if (!c || !cparam) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ err = mlx5e_build_channel_param(mdev, params, cparam);
+ if (err)
+ goto err_free;
c->priv = priv;
c->mdev = mdev;
- c->tstamp = &priv->tstamp;
c->ix = ix;
c->vec_ix = vec_ix;
c->sd_ix = mlx5_sd_ch_ix_get_dev_ix(mdev, ix);
@@ -2697,8 +2829,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->aff_mask = irq_get_effective_affinity_mask(irq);
c->lag_port = mlx5e_enumerate_lag_port(mdev, ix);
- netif_napi_add_config(netdev, &c->napi, mlx5e_napi_poll, ix);
- netif_napi_set_irq(&c->napi, irq);
+ mlx5e_channel_pick_doorbell(c);
+
+ netif_napi_add_config_locked(netdev, &c->napi, mlx5e_napi_poll, ix);
+ netif_napi_set_irq_locked(&c->napi, irq);
err = mlx5e_open_queues(c, params, cparam);
if (unlikely(err))
@@ -2713,14 +2847,17 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
*cp = c;
+ kvfree(cparam);
return 0;
err_close_queues:
mlx5e_close_queues(c);
err_napi_del:
- netif_napi_del(&c->napi);
+ netif_napi_del_locked(&c->napi);
+err_free:
+ kvfree(cparam);
kvfree(c);
return err;
@@ -2730,7 +2867,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
{
int tc;
- napi_enable(&c->napi);
+ napi_enable_locked(&c->napi);
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_activate_txqsq(&c->sq[tc]);
@@ -2762,7 +2899,7 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
mlx5e_deactivate_txqsq(&c->sq[tc]);
mlx5e_qos_deactivate_queues(c);
- napi_disable(&c->napi);
+ napi_disable_locked(&c->napi);
}
static void mlx5e_close_channel(struct mlx5e_channel *c)
@@ -2771,7 +2908,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
mlx5e_close_xsk(c);
mlx5e_close_queues(c);
mlx5e_qos_close_queues(c);
- netif_napi_del(&c->napi);
+ netif_napi_del_locked(&c->napi);
kvfree(c);
}
@@ -2779,20 +2916,14 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
int mlx5e_open_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *chs)
{
- struct mlx5e_channel_param *cparam;
int err = -ENOMEM;
int i;
chs->num = chs->params.num_channels;
chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
- cparam = kvzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
- if (!chs->c || !cparam)
- goto err_free;
-
- err = mlx5e_build_channel_param(priv->mdev, &chs->params, cparam);
- if (err)
- goto err_free;
+ if (!chs->c)
+ goto err_out;
for (i = 0; i < chs->num; i++) {
struct xsk_buff_pool *xsk_pool = NULL;
@@ -2800,7 +2931,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
if (chs->params.xdp_prog)
xsk_pool = mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, i);
- err = mlx5e_open_channel(priv, i, &chs->params, cparam, xsk_pool, &chs->c[i]);
+ err = mlx5e_open_channel(priv, i, &chs->params, xsk_pool, &chs->c[i]);
if (err)
goto err_close_channels;
}
@@ -2818,7 +2949,6 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
}
mlx5e_health_channels_update(priv);
- kvfree(cparam);
return 0;
err_close_ptp:
@@ -2829,9 +2959,8 @@ err_close_channels:
for (i--; i >= 0; i--)
mlx5e_close_channel(chs->c[i]);
-err_free:
kfree(chs->c);
- kvfree(cparam);
+err_out:
chs->num = 0;
return err;
}
@@ -3236,16 +3365,17 @@ static int mlx5e_switch_priv_params(struct mlx5e_priv *priv,
}
}
+ mlx5e_set_xdp_feature(priv);
return 0;
}
static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
+ struct mlx5e_channels *old_chs,
struct mlx5e_channels *new_chs,
mlx5e_fp_preactivate preactivate,
void *context)
{
struct net_device *netdev = priv->netdev;
- struct mlx5e_channels old_chs;
int carrier_ok;
int err = 0;
@@ -3254,7 +3384,6 @@ static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
mlx5e_deactivate_priv_channels(priv);
- old_chs = priv->channels;
priv->channels = *new_chs;
/* New channels are ready to roll, call the preactivate hook if needed
@@ -3263,12 +3392,14 @@ static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
if (preactivate) {
err = preactivate(priv, context);
if (err) {
- priv->channels = old_chs;
+ priv->channels = *old_chs;
goto out;
}
}
- mlx5e_close_channels(&old_chs);
+ mlx5e_set_xdp_feature(priv);
+ if (!MLX5_CAP_GEN(priv->mdev, tis_tir_td_order))
+ mlx5e_close_channels(old_chs);
priv->profile->update_rx(priv);
mlx5e_selq_apply(&priv->selq);
@@ -3287,16 +3418,20 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
mlx5e_fp_preactivate preactivate,
void *context, bool reset)
{
- struct mlx5e_channels *new_chs;
+ struct mlx5e_channels *old_chs, *new_chs;
int err;
reset &= test_bit(MLX5E_STATE_OPENED, &priv->state);
if (!reset)
return mlx5e_switch_priv_params(priv, params, preactivate, context);
+ old_chs = kzalloc(sizeof(*old_chs), GFP_KERNEL);
new_chs = kzalloc(sizeof(*new_chs), GFP_KERNEL);
- if (!new_chs)
- return -ENOMEM;
+ if (!old_chs || !new_chs) {
+ err = -ENOMEM;
+ goto err_free_chs;
+ }
+
new_chs->params = *params;
mlx5e_selq_prepare_params(&priv->selq, &new_chs->params);
@@ -3305,11 +3440,18 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
if (err)
goto err_cancel_selq;
- err = mlx5e_switch_priv_channels(priv, new_chs, preactivate, context);
+ *old_chs = priv->channels;
+
+ err = mlx5e_switch_priv_channels(priv, old_chs, new_chs,
+ preactivate, context);
if (err)
goto err_close;
+ if (MLX5_CAP_GEN(priv->mdev, tis_tir_td_order))
+ mlx5e_close_channels(old_chs);
+
kfree(new_chs);
+ kfree(old_chs);
return 0;
err_close:
@@ -3317,7 +3459,9 @@ err_close:
err_cancel_selq:
mlx5e_selq_cancel(&priv->selq);
+err_free_chs:
kfree(new_chs);
+ kfree(old_chs);
return err;
}
@@ -3328,8 +3472,8 @@ int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv)
void mlx5e_timestamp_init(struct mlx5e_priv *priv)
{
- priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
- priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
+ priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
+ priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
}
static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
@@ -3473,7 +3617,8 @@ static int mlx5e_alloc_drop_cq(struct mlx5e_priv *priv,
param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
- return mlx5e_alloc_cq_common(priv->mdev, priv->netdev, priv->wq, param, cq);
+ return mlx5e_alloc_cq_common(priv->mdev, priv->netdev, priv->wq,
+ mdev->priv.bfreg.up, param, cq);
}
int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
@@ -3786,8 +3931,11 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
/* MQPRIO is another toplevel qdisc that can't be attached
* simultaneously with the offloaded HTB.
*/
- if (WARN_ON(mlx5e_selq_is_htb_enabled(&priv->selq)))
- return -EINVAL;
+ if (mlx5e_selq_is_htb_enabled(&priv->selq)) {
+ NL_SET_ERR_MSG_MOD(mqprio->extack,
+ "MQPRIO cannot be configured when HTB offload is enabled.");
+ return -EOPNOTSUPP;
+ }
switch (mqprio->mode) {
case TC_MQPRIO_MODE_DCB:
@@ -3878,6 +4026,11 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
s->rx_bytes += rq_stats->bytes;
s->multicast += rq_stats->mcast_packets;
}
+
+#ifdef CONFIG_MLX5_EN_PSP
+ if (priv->psp)
+ s->tx_dropped += atomic_read(&priv->psp->tx_drop);
+#endif
}
void
@@ -3916,6 +4069,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
}
stats->rx_missed_errors = priv->stats.qcnt.rx_out_of_buffer;
+ stats->rx_dropped = PPORT_2863_GET(pstats, if_in_discards);
stats->rx_length_errors =
PPORT_802_3_GET(pstats, a_in_range_length_errors) +
@@ -4022,10 +4176,6 @@ static int set_feature_hw_gro(struct net_device *netdev, bool enable)
if (enable) {
new_params.packet_merge.type = MLX5E_PACKET_MERGE_SHAMPO;
- new_params.packet_merge.shampo.match_criteria_type =
- MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED;
- new_params.packet_merge.shampo.alignment_granularity =
- MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE;
} else if (new_params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
new_params.packet_merge.type = MLX5E_PACKET_MERGE_NONE;
} else {
@@ -4261,23 +4411,23 @@ static int mlx5e_handle_feature(struct net_device *netdev,
return 0;
}
-void mlx5e_set_xdp_feature(struct net_device *netdev)
+void mlx5e_set_xdp_feature(struct mlx5e_priv *priv)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_params *params = &priv->channels.params;
- xdp_features_t val;
+ struct net_device *netdev = priv->netdev;
+ xdp_features_t val = 0;
- if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
- xdp_clear_features_flag(netdev);
- return;
- }
+ if (netdev->netdev_ops->ndo_bpf &&
+ params->packet_merge.type == MLX5E_PACKET_MERGE_NONE)
+ val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY |
+ NETDEV_XDP_ACT_RX_SG;
- val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
- NETDEV_XDP_ACT_XSK_ZEROCOPY |
- NETDEV_XDP_ACT_RX_SG |
- NETDEV_XDP_ACT_NDO_XMIT |
- NETDEV_XDP_ACT_NDO_XMIT_SG;
- xdp_set_features_flag(netdev, val);
+ if (netdev->netdev_ops->ndo_xdp_xmit && params->xdp_prog)
+ val |= NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
+
+ xdp_set_features_flag_locked(netdev, val);
}
int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
@@ -4312,9 +4462,6 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
return -EINVAL;
}
- /* update XDP supported features */
- mlx5e_set_xdp_feature(netdev);
-
return 0;
}
@@ -4341,12 +4488,17 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
netdev_warn(netdev, "Disabling HW_VLAN CTAG FILTERING, not supported in switchdev mode\n");
+ features &= ~NETIF_F_HW_MACSEC;
+ if (netdev->features & NETIF_F_HW_MACSEC)
+ netdev_warn(netdev, "Disabling HW MACsec offload, not supported in switchdev mode\n");
+
return features;
}
static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_features_t features)
{
+ struct netdev_config *cfg = netdev->cfg_pending;
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_vlan_table *vlan;
struct mlx5e_params *params;
@@ -4413,11 +4565,18 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
}
}
+ /* The header-data split ring param requires HW GRO to stay enabled. */
+ if (cfg && cfg->hds_config == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
+ !(features & NETIF_F_GRO_HW)) {
+ netdev_warn(netdev, "Keeping HW-GRO enabled, TCP header-data split depends on it\n");
+ features |= NETIF_F_GRO_HW;
+ }
+
if (mlx5e_is_uplink_rep(priv)) {
features = mlx5e_fix_uplink_rep_features(netdev, features);
- netdev->netns_local = true;
+ netdev->netns_immutable = true;
} else {
- netdev->netns_local = false;
+ netdev->netns_immutable = false;
}
mutex_unlock(&priv->state_lock);
@@ -4610,22 +4769,23 @@ static int mlx5e_hwstamp_config_ptp_rx(struct mlx5e_priv *priv, bool ptp_rx)
&new_params.ptp_rx, true);
}
-int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
+int mlx5e_hwtstamp_set(struct mlx5e_priv *priv,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
bool rx_cqe_compress_def;
bool ptp_rx;
int err;
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
- (mlx5_clock_get_ptp_index(priv->mdev) == -1))
+ (mlx5_clock_get_ptp_index(priv->mdev) == -1)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Timestamps are not supported on this device");
return -EOPNOTSUPP;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ }
/* TX HW timestamp */
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
break;
@@ -4637,7 +4797,7 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def;
/* RX HW timestamp */
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
ptp_rx = false;
break;
@@ -4656,7 +4816,7 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
/* ptp_rx is set if both HW TS is set and CQE
* compression is set
*/
@@ -4669,47 +4829,50 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
err = mlx5e_hwstamp_config_no_ptp_rx(priv,
- config.rx_filter != HWTSTAMP_FILTER_NONE);
+ config->rx_filter != HWTSTAMP_FILTER_NONE);
else
err = mlx5e_hwstamp_config_ptp_rx(priv, ptp_rx);
if (err)
goto err_unlock;
- memcpy(&priv->tstamp, &config, sizeof(config));
+ priv->hwtstamp_config = *config;
mutex_unlock(&priv->state_lock);
/* might need to fix some features */
netdev_update_features(priv->netdev);
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
+ return 0;
err_unlock:
mutex_unlock(&priv->state_lock);
return err;
}
-int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
+static int mlx5e_hwtstamp_set_ndo(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config *cfg = &priv->tstamp;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ return mlx5e_hwtstamp_set(priv, config, extack);
+}
+
+int mlx5e_hwtstamp_get(struct mlx5e_priv *priv,
+ struct kernel_hwtstamp_config *config)
+{
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
return -EOPNOTSUPP;
- return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
+ *config = priv->hwtstamp_config;
+
+ return 0;
}
-static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int mlx5e_hwtstamp_get_ndo(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return mlx5e_hwstamp_set(priv, ifr);
- case SIOCGHWTSTAMP:
- return mlx5e_hwstamp_get(priv, ifr);
- default:
- return -EOPNOTSUPP;
- }
+ return mlx5e_hwtstamp_get(priv, config);
}
#ifdef CONFIG_MLX5_ESWITCH
@@ -4956,21 +5119,19 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
struct net_device *netdev = priv->netdev;
int i;
- /* Take rtnl_lock to ensure no change in netdev->real_num_tx_queues
- * through this flow. However, channel closing flows have to wait for
- * this work to finish while holding rtnl lock too. So either get the
- * lock or find that channels are being closed for other reason and
- * this work is not relevant anymore.
+ /* Recovering the TX queues implies re-enabling NAPI, which requires
+ * the netdev instance lock.
+ * However, channel closing flows have to wait for this work to finish
+ * while holding the same lock. So either get the lock or find that
+ * channels are being closed for other reason and this work is not
+ * relevant anymore.
*/
- while (!rtnl_trylock()) {
+ while (!netdev_trylock(netdev)) {
if (!test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state))
return;
msleep(20);
}
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- goto unlock;
-
for (i = 0; i < netdev->real_num_tx_queues; i++) {
struct netdev_queue *dev_queue =
netdev_get_tx_queue(netdev, i);
@@ -4984,8 +5145,7 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
break;
}
-unlock:
- rtnl_unlock();
+ netdev_unlock(netdev);
}
static void mlx5e_tx_timeout(struct net_device *dev, unsigned int txqueue)
@@ -5100,11 +5260,9 @@ static int mlx5e_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
u8 mode, setting;
- int err;
- err = mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting);
- if (err)
- return err;
+ if (mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting))
+ return -EOPNOTSUPP;
mode = setting ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
mode,
@@ -5155,13 +5313,14 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_set_features = mlx5e_set_features,
.ndo_fix_features = mlx5e_fix_features,
.ndo_change_mtu = mlx5e_change_nic_mtu,
- .ndo_eth_ioctl = mlx5e_ioctl,
.ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
.ndo_features_check = mlx5e_features_check,
.ndo_tx_timeout = mlx5e_tx_timeout,
.ndo_bpf = mlx5e_xdp,
.ndo_xdp_xmit = mlx5e_xdp_xmit,
.ndo_xsk_wakeup = mlx5e_xsk_wakeup,
+ .ndo_hwtstamp_get = mlx5e_hwtstamp_get_ndo,
+ .ndo_hwtstamp_set = mlx5e_hwtstamp_set_ndo,
#ifdef CONFIG_MLX5_EN_ARFS
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
@@ -5282,8 +5441,7 @@ void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv)
priv->nic_info.set_port = mlx5e_vxlan_set_port;
priv->nic_info.unset_port = mlx5e_vxlan_unset_port;
- priv->nic_info.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN;
+ priv->nic_info.flags = UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN;
priv->nic_info.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN;
/* Don't count the space hard-coded to the IANA port */
priv->nic_info.tables[0].n_entries =
@@ -5311,7 +5469,6 @@ static void mlx5e_get_queue_stats_rx(struct net_device *dev, int i,
struct mlx5e_rq_stats *xskrq_stats;
struct mlx5e_rq_stats *rq_stats;
- ASSERT_RTNL();
if (mlx5e_is_uplink_rep(priv) || !priv->stats_nch)
return;
@@ -5331,7 +5488,6 @@ static void mlx5e_get_queue_stats_tx(struct net_device *dev, int i,
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_sq_stats *sq_stats;
- ASSERT_RTNL();
if (!priv->stats_nch)
return;
@@ -5352,7 +5508,6 @@ static void mlx5e_get_base_stats(struct net_device *dev,
struct mlx5e_ptp *ptp_channel;
int i, tc;
- ASSERT_RTNL();
if (!mlx5e_is_uplink_rep(priv)) {
rx->packets = 0;
rx->bytes = 0;
@@ -5436,6 +5591,127 @@ static const struct netdev_stat_ops mlx5e_stat_ops = {
.get_base_stats = mlx5e_get_base_stats,
};
+struct mlx5_qmgmt_data {
+ struct mlx5e_channel *c;
+ struct mlx5e_channel_param cparam;
+};
+
+static int mlx5e_queue_mem_alloc(struct net_device *dev, void *newq,
+ int queue_index)
+{
+ struct mlx5_qmgmt_data *new = (struct mlx5_qmgmt_data *)newq;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_channels *chs = &priv->channels;
+ struct mlx5e_params params = chs->params;
+ struct mlx5_core_dev *mdev;
+ int err;
+
+ mutex_lock(&priv->state_lock);
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = -ENODEV;
+ goto unlock;
+ }
+
+ if (queue_index >= chs->num) {
+ err = -ERANGE;
+ goto unlock;
+ }
+
+ if (MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS) ||
+ chs->params.ptp_rx ||
+ chs->params.xdp_prog ||
+ priv->htb) {
+ netdev_err(priv->netdev,
+ "Cloning channels with Port/rx PTP, XDP or HTB is not supported\n");
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ mdev = mlx5_sd_ch_ix_get_dev(priv->mdev, queue_index);
+ err = mlx5e_build_channel_param(mdev, &params, &new->cparam);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_open_channel(priv, queue_index, &params, NULL, &new->c);
+unlock:
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
+static void mlx5e_queue_mem_free(struct net_device *dev, void *mem)
+{
+ struct mlx5_qmgmt_data *data = (struct mlx5_qmgmt_data *)mem;
+
+ /* not supposed to happen since mlx5e_queue_start never fails
+ * but this is how this should be implemented just in case
+ */
+ if (data->c)
+ mlx5e_close_channel(data->c);
+}
+
+static int mlx5e_queue_stop(struct net_device *dev, void *oldq, int queue_index)
+{
+ /* In mlx5 a txq cannot be simply stopped in isolation, only restarted.
+ * mlx5e_queue_start does not fail, we stop the old queue there.
+ * TODO: Improve this.
+ */
+ return 0;
+}
+
+static int mlx5e_queue_start(struct net_device *dev, void *newq,
+ int queue_index)
+{
+ struct mlx5_qmgmt_data *new = (struct mlx5_qmgmt_data *)newq;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_channel *old;
+
+ mutex_lock(&priv->state_lock);
+
+ /* stop and close the old */
+ old = priv->channels.c[queue_index];
+ mlx5e_deactivate_priv_channels(priv);
+ /* close old before activating new, to avoid napi conflict */
+ mlx5e_close_channel(old);
+
+ /* start the new */
+ priv->channels.c[queue_index] = new->c;
+ mlx5e_activate_priv_channels(priv);
+ mutex_unlock(&priv->state_lock);
+ return 0;
+}
+
+static struct device *mlx5e_queue_get_dma_dev(struct net_device *dev,
+ int queue_index)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_channels *channels;
+ struct device *pdev = NULL;
+ struct mlx5e_channel *ch;
+
+ channels = &priv->channels;
+
+ mutex_lock(&priv->state_lock);
+
+ if (queue_index >= channels->num)
+ goto out;
+
+ ch = channels->c[queue_index];
+ pdev = ch->pdev;
+out:
+ mutex_unlock(&priv->state_lock);
+
+ return pdev;
+}
+
+static const struct netdev_queue_mgmt_ops mlx5e_queue_mgmt_ops = {
+ .ndo_queue_mem_size = sizeof(struct mlx5_qmgmt_data),
+ .ndo_queue_mem_alloc = mlx5e_queue_mem_alloc,
+ .ndo_queue_mem_free = mlx5e_queue_mem_free,
+ .ndo_queue_start = mlx5e_queue_start,
+ .ndo_queue_stop = mlx5e_queue_stop,
+ .ndo_queue_get_dma_dev = mlx5e_queue_get_dma_dev,
+};
+
static void mlx5e_build_nic_netdev(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -5446,8 +5722,11 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
SET_NETDEV_DEV(netdev, mdev->device);
netdev->netdev_ops = &mlx5e_netdev_ops;
+ netdev->queue_mgmt_ops = &mlx5e_queue_mgmt_ops;
netdev->xdp_metadata_ops = &mlx5e_xdp_metadata_ops;
netdev->xsk_tx_metadata_ops = &mlx5e_xsk_tx_metadata_ops;
+ netdev->request_ops_lock = true;
+ netdev_lockdep_set_classes(netdev);
mlx5e_dcbnl_build_netdev(netdev);
@@ -5486,17 +5765,17 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
MLX5E_MPWRQ_UMR_MODE_ALIGNED))
netdev->vlan_features |= NETIF_F_LRO;
+ if (mlx5e_hw_gro_supported(mdev) &&
+ mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT,
+ MLX5E_MPWRQ_UMR_MODE_ALIGNED))
+ netdev->vlan_features |= NETIF_F_GRO_HW;
+
netdev->hw_features = netdev->vlan_features;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
- if (mlx5e_hw_gro_supported(mdev) &&
- mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT,
- MLX5E_MPWRQ_UMR_MODE_ALIGNED))
- netdev->hw_features |= NETIF_F_GRO_HW;
-
if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
@@ -5575,8 +5854,10 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->netmem_tx = true;
+
netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
- mlx5e_set_xdp_feature(netdev);
+ mlx5e_set_xdp_feature(priv);
mlx5e_set_netdev_dev_addr(netdev);
mlx5e_macsec_build_netdev(priv);
mlx5e_ipsec_build_netdev(priv);
@@ -5656,6 +5937,10 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
}
priv->fs = fs;
+ err = mlx5e_psp_init(priv);
+ if (err)
+ mlx5_core_err(mdev, "PSP initialization failed, %d\n", err);
+
err = mlx5e_ktls_init(priv);
if (err)
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
@@ -5668,8 +5953,9 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
if (take_rtnl)
rtnl_lock();
+ mlx5e_psp_register(priv);
/* update XDP supported features */
- mlx5e_set_xdp_feature(netdev);
+ mlx5e_set_xdp_feature(priv);
if (take_rtnl)
rtnl_unlock();
@@ -5680,7 +5966,9 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
mlx5e_health_destroy_reporters(priv);
+ mlx5e_psp_unregister(priv);
mlx5e_ktls_cleanup(priv);
+ mlx5e_psp_cleanup(priv);
mlx5e_fs_cleanup(priv->fs);
debugfs_remove_recursive(priv->dfs_root);
priv->fs = NULL;
@@ -5821,6 +6109,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_init(priv);
+ mlx5e_pcie_cong_event_init(priv);
mlx5e_hv_vhca_stats_create(priv);
if (netdev->reg_state != NETREG_REGISTERED)
return;
@@ -5829,9 +6118,11 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5e_nic_set_rx_mode(priv);
rtnl_lock();
+ netdev_lock(netdev);
if (netif_running(netdev))
mlx5e_open(netdev);
udp_tunnel_nic_reset_ntf(priv->netdev);
+ netdev_unlock(netdev);
netif_device_attach(netdev);
rtnl_unlock();
}
@@ -5844,23 +6135,27 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5e_dcbnl_delete_app(priv);
rtnl_lock();
+ netdev_lock(priv->netdev);
if (netif_running(priv->netdev))
mlx5e_close(priv->netdev);
netif_device_detach(priv->netdev);
+ if (priv->en_trap) {
+ mlx5e_deactivate_trap(priv);
+ mlx5e_close_trap(priv->en_trap);
+ priv->en_trap = NULL;
+ }
+ netdev_unlock(priv->netdev);
rtnl_unlock();
mlx5e_nic_set_rx_mode(priv);
+ mlx5e_pcie_cong_event_cleanup(priv);
mlx5e_hv_vhca_stats_destroy(priv);
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_cleanup(priv);
+ mlx5e_ipsec_disable_events(priv);
mlx5e_disable_blocking_events(priv);
- if (priv->en_trap) {
- mlx5e_deactivate_trap(priv);
- mlx5e_close_trap(priv->en_trap);
- priv->en_trap = NULL;
- }
mlx5e_disable_async_events(priv);
mlx5_lag_remove_netdev(mdev, priv->netdev);
mlx5_vxlan_reset_to_default(mdev->vxlan);
@@ -5870,7 +6165,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
static int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
{
- return mlx5e_refresh_tirs(priv, false, false);
+ return mlx5e_refresh_tirs(priv->mdev, false, false);
}
static const struct mlx5e_profile mlx5e_nic_profile = {
@@ -6004,8 +6299,15 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
if (!priv->channel_stats)
goto err_free_tx_rates;
+ priv->fec_ranges = kcalloc(ETHTOOL_FEC_HIST_MAX,
+ sizeof(*priv->fec_ranges), GFP_KERNEL);
+ if (!priv->fec_ranges)
+ goto err_free_channel_stats;
+
return 0;
+err_free_channel_stats:
+ kfree(priv->channel_stats);
err_free_tx_rates:
kfree(priv->tx_rates);
err_free_txq2sq_stats:
@@ -6029,6 +6331,7 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
if (!priv->mdev)
return;
+ kfree(priv->fec_ranges);
for (i = 0; i < priv->stats_nch; i++)
kvfree(priv->channel_stats[i]);
kfree(priv->channel_stats);
@@ -6115,7 +6418,9 @@ static void mlx5e_update_features(struct net_device *netdev)
return; /* features will be updated on netdev registration */
rtnl_lock();
+ netdev_lock(netdev);
netdev_update_features(netdev);
+ netdev_unlock(netdev);
rtnl_unlock();
}
@@ -6126,7 +6431,7 @@ static void mlx5e_reset_channels(struct net_device *netdev)
int mlx5e_attach_netdev(struct mlx5e_priv *priv)
{
- const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED;
+ const bool need_lock = priv->netdev->reg_state == NETREG_REGISTERED;
const struct mlx5e_profile *profile = priv->profile;
int max_nch;
int err;
@@ -6168,15 +6473,19 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
* 2. Set our default XPS cpumask.
* 3. Build the RQT.
*
- * rtnl_lock is required by netif_set_real_num_*_queues in case the
+ * Locking is required by netif_set_real_num_*_queues in case the
* netdev has been registered by this point (if this function was called
* in the reload or resume flow).
*/
- if (take_rtnl)
+ if (need_lock) {
rtnl_lock();
+ netdev_lock(priv->netdev);
+ }
err = mlx5e_num_channels_changed(priv);
- if (take_rtnl)
+ if (need_lock) {
+ netdev_unlock(priv->netdev);
rtnl_unlock();
+ }
if (err)
goto out;
@@ -6511,8 +6820,24 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
mlx5_core_uplink_netdev_set(mdev, NULL);
mlx5e_dcbnl_delete_app(priv);
- unregister_netdev(priv->netdev);
- _mlx5e_suspend(adev, false);
+ /* When unload driver, the netdev is in registered state
+ * if it's from legacy mode. If from switchdev mode, it
+ * is already unregistered before changing to NIC profile.
+ */
+ if (priv->netdev->reg_state == NETREG_REGISTERED) {
+ mlx5e_psp_unregister(priv);
+ unregister_netdev(priv->netdev);
+ _mlx5e_suspend(adev, false);
+ } else {
+ struct mlx5_core_dev *pos;
+ int i;
+
+ if (test_bit(MLX5E_STATE_DESTROYING, &priv->state))
+ mlx5_sd_for_each_dev(i, mdev, pos)
+ mlx5e_destroy_mdev_resources(pos);
+ else
+ _mlx5e_suspend(adev, true);
+ }
/* Avoid cleanup if profile rollback failed. */
if (priv->profile)
priv->profile->cleanup(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 92094bf60d59..ee9595109649 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -33,6 +33,7 @@
#include <linux/dim.h>
#include <linux/debugfs.h>
#include <linux/mlx5/fs.h>
+#include <net/netdev_lock.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
#include <net/act_api.h>
@@ -65,6 +66,7 @@
#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
#define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
+#define MLX5E_REP_PARAMS_DEF_LOG_RQ_SIZE 0x8
static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
@@ -600,7 +602,8 @@ mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
if (c->xdp)
sqs[num_sqs++] = c->rq_xdpsq.sqn;
- sqs[num_sqs++] = c->xdpsq.sqn;
+ if (c->xdpsq)
+ sqs[num_sqs++] = c->xdpsq->sqn;
}
}
if (ptp_sq) {
@@ -801,6 +804,7 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_stop = mlx5e_rep_close,
.ndo_start_xmit = mlx5e_xmit,
.ndo_setup_tc = mlx5e_rep_setup_tc,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_get_stats64 = mlx5e_rep_get_stats,
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
@@ -854,6 +858,8 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
/* RQ */
mlx5e_build_rq_params(mdev, params);
+ if (!mlx5e_is_uplink_rep(priv) && mlx5_core_is_ecpf(mdev))
+ params->log_rq_mtu_frames = MLX5E_REP_PARAMS_DEF_LOG_RQ_SIZE;
/* If netdev is already registered (e.g. move from nic profile to uplink,
* RTNL lock must be held before triggering netdev notifiers.
@@ -861,7 +867,7 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
if (take_rtnl)
rtnl_lock();
/* update XDP supported features */
- mlx5e_set_xdp_feature(netdev);
+ mlx5e_set_xdp_feature(priv);
if (take_rtnl)
rtnl_unlock();
@@ -881,10 +887,14 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev,
{
SET_NETDEV_DEV(netdev, mdev->device);
netdev->netdev_ops = &mlx5e_netdev_ops_rep;
+ netdev->request_ops_lock = true;
+ netdev_lockdep_set_classes(netdev);
eth_hw_addr_random(netdev);
netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
netdev->watchdog_timeo = 15 * HZ;
+ if (mlx5_core_is_ecpf(mdev))
+ netdev->tx_queue_len = 1 << MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;
@@ -899,7 +909,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev,
netdev->features |= netdev->hw_features;
- netdev->netns_local = true;
+ netdev->netns_immutable = true;
}
static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
@@ -964,7 +974,7 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
MLX5_FLOW_NAMESPACE_KERNEL), false);
/* The inner_ttc in the ttc params is intentionally not set */
- mlx5e_set_ttc_params(priv->fs, priv->rx_res, &ttc_params, false);
+ mlx5e_set_ttc_params(priv->fs, priv->rx_res, &ttc_params, false, false);
if (rep->vport != MLX5_VPORT_UPLINK)
/* To give uplik rep TTC a lower level for chaining from root ft */
@@ -1338,9 +1348,11 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
netdev->wanted_features |= NETIF_F_HW_TC;
rtnl_lock();
+ netdev_lock(netdev);
if (netif_running(netdev))
mlx5e_open(netdev);
udp_tunnel_nic_reset_ntf(priv->netdev);
+ netdev_unlock(netdev);
netif_device_attach(netdev);
rtnl_unlock();
}
@@ -1350,9 +1362,11 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
rtnl_lock();
+ netdev_lock(priv->netdev);
if (netif_running(priv->netdev))
mlx5e_close(priv->netdev);
netif_device_detach(priv->netdev);
+ netdev_unlock(priv->netdev);
rtnl_unlock();
mlx5e_rep_bridge_cleanup(priv);
@@ -1433,11 +1447,11 @@ static void mlx5e_rep_vnic_reporter_create(struct mlx5e_priv *priv,
reporter = devl_port_health_reporter_create(dl_port,
&mlx5_rep_vnic_reporter_ops,
- 0, rpriv);
+ rpriv);
if (IS_ERR(reporter)) {
mlx5_core_err(priv->mdev,
- "Failed to create representor vnic reporter, err = %ld\n",
- PTR_ERR(reporter));
+ "Failed to create representor vnic reporter, err = %pe\n",
+ reporter);
return;
}
@@ -1492,12 +1506,21 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
static int
mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
- struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev));
struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+ int err;
+
+ netdev = mlx5_uplink_netdev_get(dev);
+ if (!netdev)
+ return 0;
+ priv = netdev_priv(netdev);
rpriv->netdev = priv->netdev;
- return mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
- rpriv);
+ err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
+ rpriv);
+ mlx5_uplink_netdev_put(dev, netdev);
+ return err;
}
static void
@@ -1508,6 +1531,21 @@ mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv)
priv = netdev_priv(netdev);
+ /* This bit is set when using devlink to change eswitch mode from
+ * switchdev to legacy. As need to keep uplink netdev ifindex, we
+ * detach uplink representor profile and attach NIC profile only.
+ * The netdev will be unregistered later when unload NIC auxiliary
+ * driver for this case.
+ * We explicitly block devlink eswitch mode change if any IPSec rules
+ * offloaded, but can't block other cases, such as driver unload
+ * and devlink reload. We have to unregister netdev before profile
+ * change for those cases. This is to avoid resource leak because
+ * the offloaded rules don't have the chance to be unoffloaded before
+ * cleanup which is triggered by detach uplink representor profile.
+ */
+ if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_SWITCH_LEGACY))
+ unregister_netdev(netdev);
+
mlx5e_netdev_attach_nic_profile(priv);
}
@@ -1609,8 +1647,16 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
{
struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
struct net_device *netdev = rpriv->netdev;
- struct mlx5e_priv *priv = netdev_priv(netdev);
- void *ppriv = priv->ppriv;
+ struct mlx5e_priv *priv;
+ void *ppriv;
+
+ if (!netdev) {
+ ppriv = rpriv;
+ goto free_ppriv;
+ }
+
+ priv = netdev_priv(netdev);
+ ppriv = priv->ppriv;
if (rep->vport == MLX5_VPORT_UPLINK) {
mlx5e_vport_uplink_rep_unload(rpriv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index d81083f4f316..1f6930c77437 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -51,6 +51,7 @@
#include "ipoib/ipoib.h"
#include "en_accel/ipsec.h"
#include "en_accel/macsec.h"
+#include "en_accel/psp_rxtx.h"
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/ktls_txrx.h"
#include "en/xdp.h"
@@ -273,33 +274,32 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
#define MLX5E_PAGECNT_BIAS_MAX (PAGE_SIZE / 64)
-static int mlx5e_page_alloc_fragmented(struct mlx5e_rq *rq,
+static int mlx5e_page_alloc_fragmented(struct page_pool *pp,
struct mlx5e_frag_page *frag_page)
{
- struct page *page;
+ netmem_ref netmem = page_pool_dev_alloc_netmems(pp);
- page = page_pool_dev_alloc_pages(rq->page_pool);
- if (unlikely(!page))
+ if (unlikely(!netmem))
return -ENOMEM;
- page_pool_fragment_page(page, MLX5E_PAGECNT_BIAS_MAX);
+ page_pool_fragment_netmem(netmem, MLX5E_PAGECNT_BIAS_MAX);
*frag_page = (struct mlx5e_frag_page) {
- .page = page,
+ .netmem = netmem,
.frags = 0,
};
return 0;
}
-static void mlx5e_page_release_fragmented(struct mlx5e_rq *rq,
+static void mlx5e_page_release_fragmented(struct page_pool *pp,
struct mlx5e_frag_page *frag_page)
{
u16 drain_count = MLX5E_PAGECNT_BIAS_MAX - frag_page->frags;
- struct page *page = frag_page->page;
+ netmem_ref netmem = frag_page->netmem;
- if (page_pool_unref_page(page, drain_count) == 0)
- page_pool_put_unrefed_page(rq->page_pool, page, -1, true);
+ if (page_pool_unref_netmem(netmem, drain_count) == 0)
+ page_pool_put_unrefed_netmem(pp, netmem, -1, true);
}
static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
@@ -313,7 +313,8 @@ static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
* offset) should just use the new one without replenishing again
* by themselves.
*/
- err = mlx5e_page_alloc_fragmented(rq, frag->frag_page);
+ err = mlx5e_page_alloc_fragmented(rq->page_pool,
+ frag->frag_page);
return err;
}
@@ -332,7 +333,7 @@ static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *frag)
{
if (mlx5e_frag_can_release(frag))
- mlx5e_page_release_fragmented(rq, frag->frag_page);
+ mlx5e_page_release_fragmented(rq->page_pool, frag->frag_page);
}
static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
@@ -358,7 +359,7 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
frag->flags &= ~BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
headroom = i == 0 ? rq->buff.headroom : 0;
- addr = page_pool_get_dma_addr(frag->frag_page->page);
+ addr = page_pool_get_dma_addr_netmem(frag->frag_page->netmem);
wqe->data[i].addr = cpu_to_be64(addr + frag->offset + headroom);
}
@@ -499,9 +500,10 @@ mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinf
struct xdp_buff *xdp, struct mlx5e_frag_page *frag_page,
u32 frag_offset, u32 len)
{
+ netmem_ref netmem = frag_page->netmem;
skb_frag_t *frag;
- dma_addr_t addr = page_pool_get_dma_addr(frag_page->page);
+ dma_addr_t addr = page_pool_get_dma_addr_netmem(netmem);
dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, rq->buff.map_dir);
if (!xdp_buff_has_frags(xdp)) {
@@ -514,9 +516,9 @@ mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinf
}
frag = &sinfo->frags[sinfo->nr_frags++];
- skb_frag_fill_page_desc(frag, frag_page->page, frag_offset, len);
+ skb_frag_fill_netmem_desc(frag, netmem, frag_offset, len);
- if (page_is_pfmemalloc(frag_page->page))
+ if (netmem_is_pfmemalloc(netmem))
xdp_buff_set_frag_pfmemalloc(xdp);
sinfo->xdp_frags_size += len;
}
@@ -527,27 +529,29 @@ mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
u32 frag_offset, u32 len,
unsigned int truesize)
{
- dma_addr_t addr = page_pool_get_dma_addr(frag_page->page);
+ dma_addr_t addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
u8 next_frag = skb_shinfo(skb)->nr_frags;
+ netmem_ref netmem = frag_page->netmem;
dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len,
rq->buff.map_dir);
- if (skb_can_coalesce(skb, next_frag, frag_page->page, frag_offset)) {
+ if (skb_can_coalesce_netmem(skb, next_frag, netmem, frag_offset)) {
skb_coalesce_rx_frag(skb, next_frag - 1, len, truesize);
- } else {
- frag_page->frags++;
- skb_add_rx_frag(skb, next_frag, frag_page->page,
- frag_offset, len, truesize);
+ return;
}
+
+ frag_page->frags++;
+ skb_add_rx_frag_netmem(skb, next_frag, netmem,
+ frag_offset, len, truesize);
}
static inline void
mlx5e_copy_skb_header(struct mlx5e_rq *rq, struct sk_buff *skb,
- struct page *page, dma_addr_t addr,
+ netmem_ref netmem, dma_addr_t addr,
int offset_from, int dma_offset, u32 headlen)
{
- const void *from = page_address(page) + offset_from;
+ const void *from = netmem_address(netmem) + offset_from;
/* Aligning len to sizeof(long) optimizes memcpy performance */
unsigned int len = ALIGN(headlen, sizeof(long));
@@ -584,7 +588,8 @@ mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
struct mlx5e_frag_page *frag_page;
frag_page = &wi->alloc_units.frag_pages[i];
- mlx5e_page_release_fragmented(rq, frag_page);
+ mlx5e_page_release_fragmented(rq->page_pool,
+ frag_page);
}
}
}
@@ -631,95 +636,96 @@ static void build_ksm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
__be32 key, u16 offset, u16 ksm_len)
{
memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_ksms));
- umr_wqe->ctrl.opmod_idx_opcode =
+ umr_wqe->hdr.ctrl.opmod_idx_opcode =
cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
MLX5_OPCODE_UMR);
- umr_wqe->ctrl.umr_mkey = key;
- umr_wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT)
+ umr_wqe->hdr.ctrl.umr_mkey = key;
+ umr_wqe->hdr.ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT)
| MLX5E_KSM_UMR_DS_CNT(ksm_len));
- umr_wqe->uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
- umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
- umr_wqe->uctrl.xlt_octowords = cpu_to_be16(ksm_len);
- umr_wqe->uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+ umr_wqe->hdr.uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
+ umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset);
+ umr_wqe->hdr.uctrl.xlt_octowords = cpu_to_be16(ksm_len);
+ umr_wqe->hdr.uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+}
+
+static struct mlx5e_frag_page *mlx5e_shampo_hd_to_frag_page(struct mlx5e_rq *rq,
+ int header_index)
+{
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+
+ return &shampo->pages[header_index >> shampo->log_hd_per_page];
}
+static u64 mlx5e_shampo_hd_offset(struct mlx5e_rq *rq, int header_index)
+{
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+ u32 hd_per_page = shampo->hd_per_page;
+
+ return (header_index & (hd_per_page - 1)) << shampo->log_hd_entry_size;
+}
+
+static void mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index);
+
static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
struct mlx5e_icosq *sq,
u16 ksm_entries, u16 index)
{
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- u16 entries, pi, header_offset, err, wqe_bbs, new_entries;
+ u16 pi, header_offset, err, wqe_bbs;
u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
- u16 page_index = shampo->curr_page_index;
- struct mlx5e_frag_page *frag_page;
- u64 addr = shampo->last_addr;
- struct mlx5e_dma_info *dma_info;
struct mlx5e_umr_wqe *umr_wqe;
int headroom, i;
headroom = rq->buff.headroom;
- new_entries = ksm_entries - (shampo->pi & (MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT - 1));
- entries = ALIGN(ksm_entries, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT);
- wqe_bbs = MLX5E_KSM_UMR_WQEBBS(entries);
+ wqe_bbs = MLX5E_KSM_UMR_WQEBBS(ksm_entries);
pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
- build_ksm_umr(sq, umr_wqe, shampo->key, index, entries);
-
- frag_page = &shampo->pages[page_index];
-
- for (i = 0; i < entries; i++, index++) {
- dma_info = &shampo->info[index];
- if (i >= ksm_entries || (index < shampo->pi && shampo->pi - index <
- MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT))
- goto update_ksm;
- header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
- MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
- if (!(header_offset & (PAGE_SIZE - 1))) {
- page_index = (page_index + 1) & (shampo->hd_per_wq - 1);
- frag_page = &shampo->pages[page_index];
-
- err = mlx5e_page_alloc_fragmented(rq, frag_page);
- if (unlikely(err))
+ build_ksm_umr(sq, umr_wqe, shampo->mkey_be, index, ksm_entries);
+
+ for (i = 0; i < ksm_entries; i++, index++) {
+ struct mlx5e_frag_page *frag_page;
+ u64 addr;
+
+ frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
+ header_offset = mlx5e_shampo_hd_offset(rq, index);
+ if (!header_offset) {
+ err = mlx5e_page_alloc_fragmented(rq->hd_page_pool,
+ frag_page);
+ if (err)
goto err_unmap;
-
- addr = page_pool_get_dma_addr(frag_page->page);
-
- dma_info->addr = addr;
- dma_info->frag_page = frag_page;
- } else {
- dma_info->addr = addr + header_offset;
- dma_info->frag_page = frag_page;
}
-update_ksm:
+ addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
umr_wqe->inline_ksms[i] = (struct mlx5_ksm) {
.key = cpu_to_be32(lkey),
- .va = cpu_to_be64(dma_info->addr + headroom),
+ .va = cpu_to_be64(addr + header_offset + headroom),
};
}
sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR,
.num_wqebbs = wqe_bbs,
- .shampo.len = new_entries,
+ .shampo.len = ksm_entries,
};
- shampo->pi = (shampo->pi + new_entries) & (shampo->hd_per_wq - 1);
- shampo->curr_page_index = page_index;
- shampo->last_addr = addr;
+ shampo->pi = (shampo->pi + ksm_entries) & (shampo->hd_per_wq - 1);
sq->pc += wqe_bbs;
- sq->doorbell_cseg = &umr_wqe->ctrl;
+ sq->doorbell_cseg = &umr_wqe->hdr.ctrl;
return 0;
err_unmap:
while (--i >= 0) {
- dma_info = &shampo->info[--index];
- if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) {
- dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE);
- mlx5e_page_release_fragmented(rq, dma_info->frag_page);
+ --index;
+ header_offset = mlx5e_shampo_hd_offset(rq, index);
+ if (!header_offset) {
+ struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
+
+ mlx5e_page_release_fragmented(rq->hd_page_pool,
+ frag_page);
}
}
+
rq->stats->buff_alloc_err++;
return err;
}
@@ -735,12 +741,12 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
ksm_entries = bitmap_find_window(shampo->bitmap,
shampo->hd_per_wqe,
shampo->hd_per_wq, shampo->pi);
- ksm_entries = ALIGN_DOWN(ksm_entries, MLX5E_SHAMPO_WQ_HEADER_PER_PAGE);
+ ksm_entries = ALIGN_DOWN(ksm_entries, shampo->hd_per_page);
if (!ksm_entries)
return 0;
- ksm_entries += (shampo->pi & (MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT - 1));
- index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT);
+ /* pi is aligned to MLX5E_SHAMPO_WQ_HEADER_PER_PAGE */
+ index = shampo->pi;
entries_before = shampo->hd_per_wq - index;
if (unlikely(entries_before < ksm_entries))
@@ -791,10 +797,11 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, frag_page++) {
dma_addr_t addr;
- err = mlx5e_page_alloc_fragmented(rq, frag_page);
+ err = mlx5e_page_alloc_fragmented(rq->page_pool, frag_page);
if (unlikely(err))
goto err_unmap;
- addr = page_pool_get_dma_addr(frag_page->page);
+
+ addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
umr_wqe->inline_mtts[i] = (struct mlx5_mtt) {
.ptag = cpu_to_be64(addr | MLX5_EN_WR),
};
@@ -814,12 +821,12 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
bitmap_zero(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
wi->consumed_strides = 0;
- umr_wqe->ctrl.opmod_idx_opcode =
+ umr_wqe->hdr.ctrl.opmod_idx_opcode =
cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
MLX5_OPCODE_UMR);
offset = (ix * rq->mpwqe.mtts_per_wqe) * sizeof(struct mlx5_mtt) / MLX5_OCTWORD;
- umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
+ umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset);
sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
@@ -829,14 +836,14 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
sq->pc += rq->mpwqe.umr_wqebbs;
- sq->doorbell_cseg = &umr_wqe->ctrl;
+ sq->doorbell_cseg = &umr_wqe->hdr.ctrl;
return 0;
err_unmap:
while (--i >= 0) {
frag_page--;
- mlx5e_page_release_fragmented(rq, frag_page);
+ mlx5e_page_release_fragmented(rq->page_pool, frag_page);
}
bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
@@ -851,13 +858,11 @@ static void
mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
{
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- u64 addr = shampo->info[header_index].addr;
- if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
- struct mlx5e_dma_info *dma_info = &shampo->info[header_index];
+ if (((header_index + 1) & (shampo->hd_per_page - 1)) == 0) {
+ struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
- dma_info->addr = ALIGN_DOWN(addr, PAGE_SIZE);
- mlx5e_page_release_fragmented(rq, dma_info->frag_page);
+ mlx5e_page_release_fragmented(rq->hd_page_pool, frag_page);
}
clear_bit(header_index, shampo->bitmap);
}
@@ -1032,6 +1037,10 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
netdev_WARN_ONCE(cq->netdev,
"Bad OP in ICOSQ CQE: 0x%x\n",
get_cqe_opcode(cqe));
+#ifdef CONFIG_MLX5_EN_TLS
+ if (wi->wqe_type == MLX5E_ICOSQ_WQE_GET_PSV_TLS)
+ mlx5e_ktls_rx_resync_async_request_cancel(wi);
+#endif
mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
(struct mlx5_err_cqe *)cqe);
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
@@ -1102,6 +1111,8 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
if (rq->page_pool)
page_pool_nid_changed(rq->page_pool, numa_mem_id());
+ if (rq->hd_page_pool)
+ page_pool_nid_changed(rq->hd_page_pool, numa_mem_id());
head = rq->mpwqe.actual_wq_head;
i = missing;
@@ -1156,8 +1167,9 @@ static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
}
}
-static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
- u32 cqe_bcnt)
+static unsigned int mlx5e_lro_update_hdr(struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe,
+ u32 cqe_bcnt)
{
struct ethhdr *eth = (struct ethhdr *)(skb->data);
struct tcphdr *tcp;
@@ -1207,14 +1219,17 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
tcp->check = tcp_v6_check(payload_len, &ipv6->saddr,
&ipv6->daddr, check);
}
+
+ return (unsigned int)((unsigned char *)tcp + tcp->doff * 4 - skb->data);
}
static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index)
{
- struct mlx5e_dma_info *last_head = &rq->mpwqe.shampo->info[header_index];
- u16 head_offset = (last_head->addr & (PAGE_SIZE - 1)) + rq->buff.headroom;
+ struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
+ u16 head_offset = mlx5e_shampo_hd_offset(rq, header_index);
+ void *addr = netmem_address(frag_page->netmem);
- return page_address(last_head->frag_page->page) + head_offset;
+ return addr + head_offset + rq->buff.headroom;
}
static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4)
@@ -1281,8 +1296,12 @@ static void mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq *rq, struct iphdr *
tcp->check = ~tcp_v4_check(skb->len - tcp_off, ipv4->saddr,
ipv4->daddr, 0);
skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
- if (ntohs(ipv4->id) == rq->hw_gro_data->second_ip_id)
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
+ if (ntohs(ipv4->id) == rq->hw_gro_data->second_ip_id) {
+ bool encap = rq->hw_gro_data->fk.control.flags & FLOW_DIS_ENCAPSULATION;
+
+ skb_shinfo(skb)->gso_type |= encap ? SKB_GSO_TCP_FIXEDID_INNER :
+ SKB_GSO_TCP_FIXEDID;
+ }
skb->csum_start = (unsigned char *)tcp - skb->head;
skb->csum_offset = offsetof(struct tcphdr, check);
@@ -1513,6 +1532,11 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
+ if (unlikely(mlx5e_psp_is_rx_flow(cqe))) {
+ /* TBD: PSP csum complete corrections for now chose csum_unnecessary path */
+ goto csum_unnecessary;
+ }
+
if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state))
return; /* CQE csum covers all received bytes */
@@ -1541,7 +1565,7 @@ csum_none:
#define MLX5E_CE_BIT_MASK 0x80
-static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
+static inline bool mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
u32 cqe_bcnt,
struct mlx5e_rq *rq,
struct sk_buff *skb)
@@ -1555,6 +1579,11 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (unlikely(get_cqe_tls_offload(cqe)))
mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
+ if (unlikely(mlx5e_psp_is_rx_flow(cqe))) {
+ if (mlx5e_psp_offload_handle_rx_skb(netdev, skb, cqe))
+ return true;
+ }
+
if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
mlx5e_ipsec_offload_handle_rx_skb(netdev, skb,
be32_to_cpu(cqe->ft_metadata));
@@ -1563,8 +1592,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
mlx5e_macsec_offload_handle_rx_skb(netdev, skb, cqe);
if (lro_num_seg > 1) {
- mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
- skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
+ unsigned int hdrlen = mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
+
+ skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt - hdrlen, lro_num_seg);
+ skb_shinfo(skb)->gso_segs = lro_num_seg;
/* Subtract one since we already counted this as one
* "regular" packet in mlx5e_complete_rx_cqe()
*/
@@ -1573,7 +1604,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
stats->lro_bytes += cqe_bcnt;
}
- if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
+ if (unlikely(mlx5e_rx_hw_stamp(rq->hwtstamp_config)))
skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
rq->clock, get_cqe_ts(cqe));
skb_record_rx_queue(skb, rq->ix);
@@ -1598,9 +1629,11 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (unlikely(mlx5e_skb_is_multicast(skb)))
stats->mcast_packets++;
+
+ return false;
}
-static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
+static bool mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
u32 cqe_bcnt,
struct sk_buff *skb)
@@ -1610,16 +1643,20 @@ static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
stats->packets++;
stats->bytes += cqe_bcnt;
if (NAPI_GRO_CB(skb)->count != 1)
- return;
- mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
+ return false;
+
+ if (mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb))
+ return true;
+
skb_reset_network_header(skb);
if (!skb_flow_dissect_flow_keys(skb, &rq->hw_gro_data->fk, 0)) {
napi_gro_receive(rq->cq.napi, skb);
rq->hw_gro_data->skb = NULL;
}
+ return false;
}
-static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
+static inline bool mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
u32 cqe_bcnt,
struct sk_buff *skb)
@@ -1628,7 +1665,7 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
stats->packets++;
stats->bytes += cqe_bcnt;
- mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
+ return mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
}
static inline
@@ -1675,28 +1712,28 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
dma_addr_t addr;
u32 frag_size;
- va = page_address(frag_page->page) + wi->offset;
+ va = netmem_address(frag_page->netmem) + wi->offset;
data = va + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
- addr = page_pool_get_dma_addr(frag_page->page);
+ addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
frag_size, rq->buff.map_dir);
net_prefetch(data);
prog = rcu_dereference(rq->xdp_prog);
if (prog) {
- struct mlx5e_xdp_buff mxbuf;
+ struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
net_prefetchw(va); /* xdp_frame data area */
mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
- cqe_bcnt, &mxbuf);
- if (mlx5e_xdp_handle(rq, prog, &mxbuf))
+ cqe_bcnt, mxbuf);
+ if (mlx5e_xdp_handle(rq, prog, mxbuf))
return NULL; /* page/packet was consumed by XDP */
- rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
- metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
- cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
+ rx_headroom = mxbuf->xdp.data - mxbuf->xdp.data_hard_start;
+ metasize = mxbuf->xdp.data - mxbuf->xdp.data_meta;
+ cqe_bcnt = mxbuf->xdp.data_end - mxbuf->xdp.data;
}
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
@@ -1715,11 +1752,11 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
{
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
+ struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
struct mlx5e_wqe_frag_info *head_wi = wi;
u16 rx_headroom = rq->buff.headroom;
struct mlx5e_frag_page *frag_page;
struct skb_shared_info *sinfo;
- struct mlx5e_xdp_buff mxbuf;
u32 frag_consumed_bytes;
struct bpf_prog *prog;
struct sk_buff *skb;
@@ -1729,18 +1766,18 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
frag_page = wi->frag_page;
- va = page_address(frag_page->page) + wi->offset;
+ va = netmem_address(frag_page->netmem) + wi->offset;
frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
- addr = page_pool_get_dma_addr(frag_page->page);
+ addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
rq->buff.frame0_sz, rq->buff.map_dir);
net_prefetchw(va); /* xdp_frame data area */
net_prefetch(va + rx_headroom);
mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
- frag_consumed_bytes, &mxbuf);
- sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
+ frag_consumed_bytes, mxbuf);
+ sinfo = xdp_get_shared_info_from_buff(&mxbuf->xdp);
truesize = 0;
cqe_bcnt -= frag_consumed_bytes;
@@ -1752,8 +1789,9 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
- mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page,
- wi->offset, frag_consumed_bytes);
+ mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp,
+ frag_page, wi->offset,
+ frag_consumed_bytes);
truesize += frag_info->frag_stride;
cqe_bcnt -= frag_consumed_bytes;
@@ -1762,31 +1800,45 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
}
prog = rcu_dereference(rq->xdp_prog);
- if (prog && mlx5e_xdp_handle(rq, prog, &mxbuf)) {
- if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
- struct mlx5e_wqe_frag_info *pwi;
+ if (prog) {
+ u8 nr_frags_free, old_nr_frags = sinfo->nr_frags;
+
+ if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
+ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT,
+ rq->flags)) {
+ struct mlx5e_wqe_frag_info *pwi;
+
+ wi -= old_nr_frags - sinfo->nr_frags;
- for (pwi = head_wi; pwi < wi; pwi++)
- pwi->frag_page->frags++;
+ for (pwi = head_wi; pwi < wi; pwi++)
+ pwi->frag_page->frags++;
+ }
+ return NULL; /* page/packet was consumed by XDP */
+ }
+
+ nr_frags_free = old_nr_frags - sinfo->nr_frags;
+ if (unlikely(nr_frags_free)) {
+ wi -= nr_frags_free;
+ truesize -= nr_frags_free * frag_info->frag_stride;
}
- return NULL; /* page/packet was consumed by XDP */
}
- skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start, rq->buff.frame0_sz,
- mxbuf.xdp.data - mxbuf.xdp.data_hard_start,
- mxbuf.xdp.data_end - mxbuf.xdp.data,
- mxbuf.xdp.data - mxbuf.xdp.data_meta);
+ skb = mlx5e_build_linear_skb(
+ rq, mxbuf->xdp.data_hard_start, rq->buff.frame0_sz,
+ mxbuf->xdp.data - mxbuf->xdp.data_hard_start,
+ mxbuf->xdp.data_end - mxbuf->xdp.data,
+ mxbuf->xdp.data - mxbuf->xdp.data_meta);
if (unlikely(!skb))
return NULL;
skb_mark_for_recycle(skb);
head_wi->frag_page->frags++;
- if (xdp_buff_has_frags(&mxbuf.xdp)) {
+ if (xdp_buff_has_frags(&mxbuf->xdp)) {
/* sinfo->nr_frags is reset by build_skb, calculate again. */
- xdp_update_skb_shared_info(skb, wi - head_wi - 1,
- sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
+ xdp_update_skb_frags_info(skb, wi - head_wi - 1,
+ sinfo->xdp_frags_size, truesize,
+ xdp_buff_get_skb_flags(&mxbuf->xdp));
for (struct mlx5e_wqe_frag_info *pwi = head_wi + 1; pwi < wi; pwi++)
pwi->frag_page->frags++;
@@ -1842,7 +1894,8 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto wq_cyc_pop;
}
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto wq_cyc_pop;
if (mlx5e_cqe_regb_chain(cqe))
if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
@@ -1889,7 +1942,8 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto wq_cyc_pop;
}
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto wq_cyc_pop;
if (rep->vlan && skb_vlan_tag_present(skb))
skb_vlan_pop(skb);
@@ -1938,7 +1992,8 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
if (!skb)
goto mpwrq_cqe_out;
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto mpwrq_cqe_out;
mlx5e_rep_tc_receive(cqe, rq, skb);
@@ -1986,11 +2041,12 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx];
u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
struct mlx5e_frag_page *head_page = frag_page;
+ struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
u32 frag_offset = head_offset;
u32 byte_cnt = cqe_bcnt;
struct skb_shared_info *sinfo;
- struct mlx5e_xdp_buff mxbuf;
unsigned int truesize = 0;
+ u32 pg_consumed_bytes;
struct bpf_prog *prog;
struct sk_buff *skb;
u32 linear_frame_sz;
@@ -2002,12 +2058,14 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
if (prog) {
/* area for bpf_xdp_[store|load]_bytes */
- net_prefetchw(page_address(frag_page->page) + frag_offset);
- if (unlikely(mlx5e_page_alloc_fragmented(rq, &wi->linear_page))) {
+ net_prefetchw(netmem_address(frag_page->netmem) + frag_offset);
+ if (unlikely(mlx5e_page_alloc_fragmented(rq->page_pool,
+ &wi->linear_page))) {
rq->stats->buff_alloc_err++;
return NULL;
}
- va = page_address(wi->linear_page.page);
+
+ va = netmem_address(wi->linear_page.netmem);
net_prefetchw(va); /* xdp_frame data area */
linear_hr = XDP_PACKET_HEADROOM;
linear_data_len = 0;
@@ -2035,20 +2093,23 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
}
}
- mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz, linear_data_len, &mxbuf);
+ mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz,
+ linear_data_len, mxbuf);
- sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
+ sinfo = xdp_get_shared_info_from_buff(&mxbuf->xdp);
while (byte_cnt) {
/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
- u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
+ pg_consumed_bytes =
+ min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
truesize += pg_consumed_bytes;
else
truesize += ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
- mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page, frag_offset,
+ mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp,
+ frag_page, frag_offset,
pg_consumed_bytes);
byte_cnt -= pg_consumed_bytes;
frag_offset = 0;
@@ -2056,55 +2117,77 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
}
if (prog) {
- if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
+ u8 nr_frags_free, old_nr_frags = sinfo->nr_frags;
+ u32 len;
+
+ if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
struct mlx5e_frag_page *pfp;
+ frag_page -= old_nr_frags - sinfo->nr_frags;
+
for (pfp = head_page; pfp < frag_page; pfp++)
pfp->frags++;
wi->linear_page.frags++;
}
- mlx5e_page_release_fragmented(rq, &wi->linear_page);
+ mlx5e_page_release_fragmented(rq->page_pool,
+ &wi->linear_page);
return NULL; /* page/packet was consumed by XDP */
}
- skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start,
- linear_frame_sz,
- mxbuf.xdp.data - mxbuf.xdp.data_hard_start, 0,
- mxbuf.xdp.data - mxbuf.xdp.data_meta);
+ nr_frags_free = old_nr_frags - sinfo->nr_frags;
+ if (unlikely(nr_frags_free)) {
+ frag_page -= nr_frags_free;
+ truesize -= (nr_frags_free - 1) * PAGE_SIZE +
+ ALIGN(pg_consumed_bytes,
+ BIT(rq->mpwqe.log_stride_sz));
+ }
+
+ len = mxbuf->xdp.data_end - mxbuf->xdp.data;
+
+ skb = mlx5e_build_linear_skb(
+ rq, mxbuf->xdp.data_hard_start, linear_frame_sz,
+ mxbuf->xdp.data - mxbuf->xdp.data_hard_start, len,
+ mxbuf->xdp.data - mxbuf->xdp.data_meta);
if (unlikely(!skb)) {
- mlx5e_page_release_fragmented(rq, &wi->linear_page);
+ mlx5e_page_release_fragmented(rq->page_pool,
+ &wi->linear_page);
return NULL;
}
skb_mark_for_recycle(skb);
wi->linear_page.frags++;
- mlx5e_page_release_fragmented(rq, &wi->linear_page);
+ mlx5e_page_release_fragmented(rq->page_pool, &wi->linear_page);
- if (xdp_buff_has_frags(&mxbuf.xdp)) {
+ if (xdp_buff_has_frags(&mxbuf->xdp)) {
struct mlx5e_frag_page *pagep;
/* sinfo->nr_frags is reset by build_skb, calculate again. */
- xdp_update_skb_shared_info(skb, frag_page - head_page,
- sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
+ xdp_update_skb_frags_info(skb, frag_page - head_page,
+ sinfo->xdp_frags_size,
+ truesize,
+ xdp_buff_get_skb_flags(&mxbuf->xdp));
pagep = head_page;
do
pagep->frags++;
while (++pagep < frag_page);
+
+ headlen = min_t(u16, MLX5E_RX_MAX_HEAD - len,
+ skb->data_len);
+ __pskb_pull_tail(skb, headlen);
}
- __pskb_pull_tail(skb, headlen);
} else {
dma_addr_t addr;
- if (xdp_buff_has_frags(&mxbuf.xdp)) {
+ if (xdp_buff_has_frags(&mxbuf->xdp)) {
struct mlx5e_frag_page *pagep;
- xdp_update_skb_shared_info(skb, sinfo->nr_frags,
- sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
+ xdp_update_skb_frags_info(skb, sinfo->nr_frags,
+ sinfo->xdp_frags_size,
+ truesize,
+ xdp_buff_get_skb_flags(&mxbuf->xdp));
pagep = frag_page - sinfo->nr_frags;
do
@@ -2112,8 +2195,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
while (++pagep < frag_page);
}
/* copy header */
- addr = page_pool_get_dma_addr(head_page->page);
- mlx5e_copy_skb_header(rq, skb, head_page->page, addr,
+ addr = page_pool_get_dma_addr_netmem(head_page->netmem);
+ mlx5e_copy_skb_header(rq, skb, head_page->netmem, addr,
head_offset, head_offset, headlen);
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
@@ -2143,31 +2226,31 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return NULL;
}
- va = page_address(frag_page->page) + head_offset;
+ va = netmem_address(frag_page->netmem) + head_offset;
data = va + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
- addr = page_pool_get_dma_addr(frag_page->page);
+ addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset,
frag_size, rq->buff.map_dir);
net_prefetch(data);
prog = rcu_dereference(rq->xdp_prog);
if (prog) {
- struct mlx5e_xdp_buff mxbuf;
+ struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
net_prefetchw(va); /* xdp_frame data area */
mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
- cqe_bcnt, &mxbuf);
- if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
+ cqe_bcnt, mxbuf);
+ if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
frag_page->frags++;
return NULL; /* page/packet was consumed by XDP */
}
- rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
- metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
- cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
+ rx_headroom = mxbuf->xdp.data - mxbuf->xdp.data_hard_start;
+ metasize = mxbuf->xdp.data - mxbuf->xdp.data_meta;
+ cqe_bcnt = mxbuf->xdp.data_end - mxbuf->xdp.data;
}
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
@@ -2185,29 +2268,34 @@ static struct sk_buff *
mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct mlx5_cqe64 *cqe, u16 header_index)
{
- struct mlx5e_dma_info *head = &rq->mpwqe.shampo->info[header_index];
- u16 head_offset = head->addr & (PAGE_SIZE - 1);
+ struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
+ u16 head_offset = mlx5e_shampo_hd_offset(rq, header_index);
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
u16 head_size = cqe->shampo.header_size;
u16 rx_headroom = rq->buff.headroom;
struct sk_buff *skb = NULL;
+ dma_addr_t page_dma_addr;
+ dma_addr_t dma_addr;
void *hdr, *data;
u32 frag_size;
- hdr = page_address(head->frag_page->page) + head_offset;
+ page_dma_addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
+ dma_addr = page_dma_addr + head_offset;
+
+ hdr = netmem_address(frag_page->netmem) + head_offset;
data = hdr + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + head_size);
- if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) {
+ if (likely(frag_size <= BIT(shampo->log_hd_entry_size))) {
/* build SKB around header */
- dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, rq->buff.map_dir);
+ dma_sync_single_range_for_cpu(rq->pdev, dma_addr, 0, frag_size, rq->buff.map_dir);
net_prefetchw(hdr);
net_prefetch(data);
skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0);
-
if (unlikely(!skb))
return NULL;
- head->frag_page->frags++;
+ frag_page->frags++;
} else {
/* allocate SKB and copy header for large header */
rq->stats->gro_large_hds++;
@@ -2219,7 +2307,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
}
net_prefetchw(skb->data);
- mlx5e_copy_skb_header(rq, skb, head->frag_page->page, head->addr,
+ mlx5e_copy_skb_header(rq, skb, frag_page->netmem, dma_addr,
head_offset + rx_headroom,
rx_headroom, head_size);
/* skb linear part was allocated with headlen and aligned to long */
@@ -2271,7 +2359,10 @@ mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt)
{
int nr_frags = skb_shinfo(skb)->nr_frags;
- return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE;
+ if (PAGE_SIZE >= GRO_LEGACY_MAX_SIZE)
+ return skb->len + data_bcnt <= GRO_LEGACY_MAX_SIZE;
+ else
+ return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE;
}
static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
@@ -2313,11 +2404,23 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
}
if (!*skb) {
- if (likely(head_size))
+ if (likely(head_size)) {
*skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
- else
- *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe, cqe_bcnt,
- data_offset, page_idx);
+ } else {
+ struct mlx5e_frag_page *frag_page;
+
+ frag_page = &wi->alloc_units.frag_pages[page_idx];
+ /* Drop packets with header in unreadable data area to
+ * prevent the kernel from touching it.
+ */
+ if (unlikely(netmem_is_net_iov(frag_page->netmem)))
+ goto free_hd_entry;
+ *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe,
+ cqe_bcnt,
+ data_offset,
+ page_idx);
+ }
+
if (unlikely(!*skb))
goto free_hd_entry;
@@ -2351,7 +2454,10 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
stats->hds_nosplit_bytes += data_bcnt;
}
- mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
+ if (mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb)) {
+ *skb = NULL;
+ goto free_hd_entry;
+ }
if (flush && rq->hw_gro_data->skb)
mlx5e_shampo_flush_skb(rq, cqe, match);
free_hd_entry:
@@ -2409,7 +2515,8 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
if (!skb)
goto mpwrq_cqe_out;
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto mpwrq_cqe_out;
if (mlx5e_cqe_regb_chain(cqe))
if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
@@ -2549,7 +2656,6 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
u32 cqe_bcnt,
struct sk_buff *skb)
{
- struct hwtstamp_config *tstamp;
struct mlx5e_rq_stats *stats;
struct net_device *netdev;
struct mlx5e_priv *priv;
@@ -2573,7 +2679,6 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
}
priv = mlx5i_epriv(netdev);
- tstamp = &priv->tstamp;
stats = &priv->channel_stats[rq->ix]->rq;
flags_rqpn = be32_to_cpu(cqe->flags_rqpn);
@@ -2609,7 +2714,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
stats->csum_none++;
}
- if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
+ if (unlikely(mlx5e_rx_hw_stamp(&priv->hwtstamp_config)))
skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
rq->clock, get_cqe_ts(cqe));
skb_record_rx_queue(skb, rq->ix);
@@ -2742,7 +2847,8 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe
if (!skb)
goto wq_cyc_pop;
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto wq_cyc_pop;
skb_push(skb, ETH_HLEN);
mlx5_devlink_trap_report(rq->mdev, trap_id, skb,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 5bf8318cc48b..fcad464bc4d5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -36,6 +36,7 @@
#include "en.h"
#include "en/port.h"
#include "eswitch.h"
+#include "lib/mlx5.h"
static int mlx5e_test_health_info(struct mlx5e_priv *priv)
{
@@ -165,6 +166,9 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
struct udphdr *udph;
struct iphdr *iph;
+ if (skb_linearize(skb))
+ goto out;
+
/* We are only going to peek, no need to clone the SKB */
if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb))
goto out;
@@ -210,7 +214,7 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
return err;
}
- err = mlx5e_refresh_tirs(priv, true, false);
+ err = mlx5e_modify_tirs_lb(priv->mdev, true, false);
if (err)
goto out;
@@ -239,7 +243,7 @@ static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv,
mlx5_nic_vport_update_local_lb(priv->mdev, false);
dev_remove_pack(&lbtp->pt);
- mlx5e_refresh_tirs(priv, false, false);
+ mlx5e_modify_tirs_lb(priv->mdev, false, false);
}
static int mlx5e_cond_loopback(struct mlx5e_priv *priv)
@@ -247,6 +251,9 @@ static int mlx5e_cond_loopback(struct mlx5e_priv *priv)
if (is_mdev_switchdev_mode(priv->mdev))
return -EOPNOTSUPP;
+ if (mlx5_get_sd(priv->mdev))
+ return -EOPNOTSUPP;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 611ec4b6f370..a2802cfc9b98 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -37,9 +37,7 @@
#include "en/ptp.h"
#include "en/port.h"
-#ifdef CONFIG_PAGE_POOL_STATS
#include <net/page_pool/helpers.h>
-#endif
void mlx5e_ethtool_put_stat(u64 **data, u64 val)
{
@@ -196,7 +194,6 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
#endif
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
-#ifdef CONFIG_PAGE_POOL_STATS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow_high_order) },
@@ -208,7 +205,6 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) },
-#endif
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
@@ -377,7 +373,6 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_arfs_err += rq_stats->arfs_err;
#endif
s->rx_recover += rq_stats->recover;
-#ifdef CONFIG_PAGE_POOL_STATS
s->rx_pp_alloc_fast += rq_stats->pp_alloc_fast;
s->rx_pp_alloc_slow += rq_stats->pp_alloc_slow;
s->rx_pp_alloc_empty += rq_stats->pp_alloc_empty;
@@ -389,7 +384,6 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_pp_recycle_ring += rq_stats->pp_recycle_ring;
s->rx_pp_recycle_ring_full += rq_stats->pp_recycle_ring_full;
s->rx_pp_recycle_released_ref += rq_stats->pp_recycle_released_ref;
-#endif
#ifdef CONFIG_MLX5_EN_TLS
s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
@@ -496,7 +490,6 @@ static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
}
}
-#ifdef CONFIG_PAGE_POOL_STATS
static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
{
struct mlx5e_rq_stats *rq_stats = c->rq.stats;
@@ -519,11 +512,6 @@ static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full;
rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt;
}
-#else
-static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
-{
-}
-#endif
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
{
@@ -1227,6 +1215,13 @@ out:
mutex_unlock(&priv->state_lock);
}
+#define PPORT_PHY_LAYER_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.phys_layer_cntrs.c)
+static const struct counter_desc pport_phy_layer_cntrs_stats_desc[] = {
+ { "link_down_events_phy", PPORT_PHY_LAYER_OFF(link_down_events) }
+};
+
#define PPORT_PHY_STATISTICAL_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.phys_layer_statistical_cntrs.c##_high)
@@ -1243,25 +1238,45 @@ pport_phy_statistical_err_lanes_stats_desc[] = {
{ "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
};
+#define PPORT_PHY_RECOVERY_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, counter_set.phys_layer_recovery_cntrs.c)
+static const struct counter_desc
+pport_phy_recovery_cntrs_stats_desc[] = {
+ { "total_success_recovery_phy",
+ PPORT_PHY_RECOVERY_OFF(total_successful_recovery_events) }
+};
+
+#define NUM_PPORT_PHY_LAYER_COUNTERS \
+ ARRAY_SIZE(pport_phy_layer_cntrs_stats_desc)
#define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
ARRAY_SIZE(pport_phy_statistical_stats_desc)
#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
+#define NUM_PPORT_PHY_RECOVERY_COUNTERS \
+ ARRAY_SIZE(pport_phy_recovery_cntrs_stats_desc)
+
+#define NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(dev) \
+ (MLX5_CAP_PCAM_FEATURE(dev, ppcnt_statistical_group) ? \
+ NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0)
+#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(dev) \
+ (MLX5_CAP_PCAM_FEATURE(dev, per_lane_error_counters) ? \
+ NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0)
+#define NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(dev) \
+ (MLX5_CAP_PCAM_FEATURE(dev, ppcnt_recovery_counters) ? \
+ NUM_PPORT_PHY_RECOVERY_COUNTERS : 0)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
{
struct mlx5_core_dev *mdev = priv->mdev;
int num_stats;
- /* "1" for link_down_events special counter */
- num_stats = 1;
+ num_stats = NUM_PPORT_PHY_LAYER_COUNTERS;
- num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
- NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
+ num_stats += NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev);
- num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
- NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
+ num_stats += NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev);
+ num_stats += NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev);
return num_stats;
}
@@ -1270,18 +1285,22 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
struct mlx5_core_dev *mdev = priv->mdev;
int i;
- ethtool_puts(data, "link_down_events_phy");
-
- if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
- return;
+ for (i = 0; i < NUM_PPORT_PHY_LAYER_COUNTERS; i++)
+ ethtool_puts(data, pport_phy_layer_cntrs_stats_desc[i].format);
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
+ for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev); i++)
ethtool_puts(data, pport_phy_statistical_stats_desc[i].format);
- if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
- ethtool_puts(data,
- pport_phy_statistical_err_lanes_stats_desc[i].format);
+ for (i = 0;
+ i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev);
+ i++)
+ ethtool_puts(data,
+ pport_phy_statistical_err_lanes_stats_desc[i]
+ .format);
+
+ for (i = 0; i < NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev); i++)
+ ethtool_puts(data,
+ pport_phy_recovery_cntrs_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
@@ -1289,30 +1308,35 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
struct mlx5_core_dev *mdev = priv->mdev;
int i;
- /* link_down_events_phy has special handling since it is not stored in __be64 format */
- mlx5e_ethtool_put_stat(
- data, MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
- counter_set.phys_layer_cntrs.link_down_events));
-
- if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
- return;
+ for (i = 0; i < NUM_PPORT_PHY_LAYER_COUNTERS; i++)
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR32_BE(&priv->stats.pport
+ .phy_counters,
+ pport_phy_layer_cntrs_stats_desc, i));
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
+ for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev); i++)
mlx5e_ethtool_put_stat(
data,
MLX5E_READ_CTR64_BE(
&priv->stats.pport.phy_statistical_counters,
pport_phy_statistical_stats_desc, i));
- if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
- mlx5e_ethtool_put_stat(
- data,
- MLX5E_READ_CTR64_BE(
- &priv->stats.pport
- .phy_statistical_counters,
- pport_phy_statistical_err_lanes_stats_desc,
- i));
+ for (i = 0;
+ i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev);
+ i++)
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &priv->stats.pport.phy_statistical_counters,
+ pport_phy_statistical_err_lanes_stats_desc, i));
+
+ for (i = 0; i < NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev); i++)
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR32_BE(
+ &priv->stats.pport.phy_recovery_counters,
+ pport_phy_recovery_cntrs_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
@@ -1328,12 +1352,21 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
- if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
- return;
+ if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
+ out = pstats->phy_statistical_counters;
+ MLX5_SET(ppcnt_reg, in, grp,
+ MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0,
+ 0);
+ }
- out = pstats->phy_statistical_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+ if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_recovery_counters)) {
+ out = pstats->phy_recovery_counters;
+ MLX5_SET(ppcnt_reg, in, grp,
+ MLX5_PHYSICAL_LAYER_RECOVERY_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0,
+ 0);
+ }
}
void mlx5e_get_link_ext_stats(struct net_device *dev,
@@ -1413,16 +1446,13 @@ static void fec_set_rs_stats(struct ethtool_fec_stats *fec_stats, u32 *ppcnt)
}
static void fec_set_block_stats(struct mlx5e_priv *priv,
+ int mode,
struct ethtool_fec_stats *fec_stats)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
- int mode = fec_active_mode(mdev);
-
- if (mode == MLX5E_FEC_NOFEC)
- return;
MLX5_SET(ppcnt_reg, in, local_port, 1);
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
@@ -1433,6 +1463,7 @@ static void fec_set_block_stats(struct mlx5e_priv *priv,
case MLX5E_FEC_RS_528_514:
case MLX5E_FEC_RS_544_514:
case MLX5E_FEC_LLRS_272_257_1:
+ case MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD:
fec_set_rs_stats(fec_stats, out);
return;
case MLX5E_FEC_FIRECODE:
@@ -1460,14 +1491,132 @@ static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
phy_corrected_bits);
}
+#define MLX5_RS_HISTOGRAM_ENTRIES \
+ (MLX5_FLD_SZ_BYTES(rs_histogram_cntrs, hist) / \
+ MLX5_FLD_SZ_BYTES(rs_histogram_cntrs, hist[0]))
+
+enum {
+ MLX5E_HISTOGRAM_FEC_RS_544_514 = 1,
+ MLX5E_HISTOGRAM_FEC_LLRS = 2,
+ MLX5E_HISTOGRAM_FEC_RS_528_514 = 3,
+};
+
+static bool fec_rs_validate_hist_type(int mode, int hist_type)
+{
+ switch (mode) {
+ case MLX5E_FEC_RS_528_514:
+ return hist_type == MLX5E_HISTOGRAM_FEC_RS_528_514;
+ case MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD:
+ case MLX5E_FEC_RS_544_514:
+ return hist_type == MLX5E_HISTOGRAM_FEC_RS_544_514;
+ case MLX5E_FEC_LLRS_272_257_1:
+ return hist_type == MLX5E_HISTOGRAM_FEC_LLRS;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static u8
+fec_rs_histogram_fill_ranges(struct mlx5e_priv *priv, int mode,
+ const struct ethtool_fec_hist_range **ranges)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 out[MLX5_ST_SZ_DW(pphcr_reg)] = {0};
+ u32 in[MLX5_ST_SZ_DW(pphcr_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(pphcr_reg);
+ u8 hist_type, num_of_bins;
+
+ memset(priv->fec_ranges, 0,
+ ETHTOOL_FEC_HIST_MAX * sizeof(*priv->fec_ranges));
+ MLX5_SET(pphcr_reg, in, local_port, 1);
+ if (mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPHCR, 0, 0))
+ return 0;
+
+ hist_type = MLX5_GET(pphcr_reg, out, active_hist_type);
+ if (!fec_rs_validate_hist_type(mode, hist_type))
+ return 0;
+
+ num_of_bins = MLX5_GET(pphcr_reg, out, num_of_bins);
+ if (WARN_ON_ONCE(num_of_bins > MLX5_RS_HISTOGRAM_ENTRIES))
+ return 0;
+
+ for (int i = 0; i < num_of_bins; i++) {
+ void *bin_range = MLX5_ADDR_OF(pphcr_reg, out, bin_range[i]);
+
+ priv->fec_ranges[i].high = MLX5_GET(bin_range_layout, bin_range,
+ high_val);
+ priv->fec_ranges[i].low = MLX5_GET(bin_range_layout, bin_range,
+ low_val);
+ }
+ *ranges = priv->fec_ranges;
+
+ return num_of_bins;
+}
+
+static void fec_rs_histogram_fill_stats(struct mlx5e_priv *priv,
+ u8 num_of_bins,
+ struct ethtool_fec_hist *hist)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ void *rs_histogram_cntrs;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RS_FEC_HISTOGRAM_GROUP);
+ if (mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0))
+ return;
+
+ rs_histogram_cntrs = MLX5_ADDR_OF(ppcnt_reg, out,
+ counter_set.rs_histogram_cntrs);
+ /* Guaranteed that num_of_bins is less than MLX5E_FEC_RS_HIST_MAX
+ * by fec_rs_histogram_fill_ranges().
+ */
+ for (int i = 0; i < num_of_bins; i++)
+ hist->values[i].sum = MLX5_GET64(rs_histogram_cntrs,
+ rs_histogram_cntrs,
+ hist[i]);
+}
+
+static void fec_set_histograms_stats(struct mlx5e_priv *priv, int mode,
+ struct ethtool_fec_hist *hist)
+{
+ u8 num_of_bins;
+
+ switch (mode) {
+ case MLX5E_FEC_RS_528_514:
+ case MLX5E_FEC_RS_544_514:
+ case MLX5E_FEC_LLRS_272_257_1:
+ case MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD:
+ num_of_bins =
+ fec_rs_histogram_fill_ranges(priv, mode, &hist->ranges);
+ if (num_of_bins)
+ return fec_rs_histogram_fill_stats(priv, num_of_bins,
+ hist);
+ break;
+ default:
+ return;
+ }
+}
+
void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
- if (!MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group))
+ int mode = fec_active_mode(priv->mdev);
+
+ if (mode == MLX5E_FEC_NOFEC ||
+ !MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group))
return;
fec_set_corrected_bits_total(priv, fec_stats);
- fec_set_block_stats(priv, fec_stats);
+ fec_set_block_stats(priv, mode, fec_stats);
+
+ if (MLX5_CAP_PCAM_REG(priv->mdev, pphcr))
+ fec_set_histograms_stats(priv, mode, hist);
}
#define PPORT_ETH_EXT_OFF(c) \
@@ -2086,7 +2235,6 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
#endif
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
-#ifdef CONFIG_PAGE_POOL_STATS
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_fast) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow_high_order) },
@@ -2098,7 +2246,6 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring_full) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_released_ref) },
-#endif
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
@@ -2393,8 +2540,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
}
if (priv->rx_ptp_opened) {
for (i = 0; i < NUM_PTP_RQ_STATS; i++)
- ethtool_sprintf(data, ptp_rq_stats_desc[i].format,
- MLX5E_PTP_CHANNEL_IX);
+ ethtool_puts(data, ptp_rq_stats_desc[i].format);
}
}
@@ -2582,6 +2728,7 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
#ifdef CONFIG_MLX5_MACSEC
&MLX5E_STATS_GRP(macsec_hw),
#endif
+ &MLX5E_STATS_GRP(pcie_cong),
};
unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 5961c569cfe0..09f155acb461 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -54,7 +54,7 @@
#define MLX5E_DECLARE_PTP_TX_STAT(type, fld) "ptp_tx%d_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_PTP_CH_STAT(type, fld) "ptp_ch_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_PTP_CQ_STAT(type, fld) "ptp_cq%d_"#fld, offsetof(type, fld)
-#define MLX5E_DECLARE_PTP_RQ_STAT(type, fld) "ptp_rq%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_PTP_RQ_STAT(type, fld) "ptp_rq0_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_QOS_TX_STAT(type, fld) "qos_tx%d_"#fld, offsetof(type, fld)
@@ -117,7 +117,8 @@ void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv);
void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
struct ethtool_pause_stats *pause_stats);
void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
- struct ethtool_fec_stats *fec_stats);
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist);
void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv,
struct ethtool_eth_phy_stats *phy_stats);
@@ -215,7 +216,6 @@ struct mlx5e_sw_stats {
u64 ch_aff_change;
u64 ch_force_irq;
u64 ch_eq_rearm;
-#ifdef CONFIG_PAGE_POOL_STATS
u64 rx_pp_alloc_fast;
u64 rx_pp_alloc_slow;
u64 rx_pp_alloc_slow_high_order;
@@ -227,7 +227,6 @@ struct mlx5e_sw_stats {
u64 rx_pp_recycle_ring;
u64 rx_pp_recycle_ring_full;
u64 rx_pp_recycle_released_ref;
-#endif
#ifdef CONFIG_MLX5_EN_TLS
u64 tx_tls_encrypted_packets;
u64 tx_tls_encrypted_bytes;
@@ -309,6 +308,9 @@ struct mlx5e_vport_stats {
#define PPORT_PHY_STATISTICAL_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \
counter_set.phys_layer_statistical_cntrs.c##_high)
+#define PPORT_PHY_RECOVERY_GET(pstats, c) \
+ MLX5_GET64(ppcnt_reg, (pstats)->phy_recovery_counters, \
+ counter_set.phys_layer_recovery_cntrs.c)
#define PPORT_PER_PRIO_GET(pstats, prio, c) \
MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \
counter_set.eth_per_prio_grp_data_layout.c##_high)
@@ -324,6 +326,7 @@ struct mlx5e_pport_stats {
__be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+ __be64 phy_recovery_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 per_tc_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 per_tc_congest_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
@@ -381,7 +384,6 @@ struct mlx5e_rq_stats {
u64 arfs_err;
#endif
u64 recover;
-#ifdef CONFIG_PAGE_POOL_STATS
u64 pp_alloc_fast;
u64 pp_alloc_slow;
u64 pp_alloc_slow_high_order;
@@ -393,7 +395,6 @@ struct mlx5e_rq_stats {
u64 pp_recycle_ring;
u64 pp_recycle_ring_full;
u64 pp_recycle_released_ref;
-#endif
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_decrypted_packets;
u64 tls_decrypted_bytes;
@@ -535,5 +536,6 @@ extern MLX5E_DECLARE_STATS_GRP(ipsec_hw);
extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
extern MLX5E_DECLARE_STATS_GRP(ptp);
extern MLX5E_DECLARE_STATS_GRP(macsec_hw);
+extern MLX5E_DECLARE_STATS_GRP(pcie_cong);
#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 6b3b1afe8312..a8773b2342c2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -66,6 +66,7 @@
#include "lib/devcom.h"
#include "lib/geneve.h"
#include "lib/fs_chains.h"
+#include "lib/mlx5.h"
#include "diag/en_tc_tracepoint.h"
#include <asm/div64.h>
#include "lag/lag.h"
@@ -757,11 +758,11 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
struct mlx5e_priv *priv = hp->func_priv;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_rss_params_indir indir;
+ u32 rqt_size;
int err;
- err = mlx5e_rss_params_indir_init(&indir, mdev,
- mlx5e_rqt_size(mdev, hp->num_channels),
- mlx5e_rqt_size(mdev, hp->num_channels));
+ rqt_size = mlx5e_rqt_size(mdev, hp->num_channels);
+ err = mlx5e_rss_params_indir_init(&indir, rqt_size, rqt_size);
if (err)
return err;
@@ -837,6 +838,9 @@ static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
ttc_params->ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
+ if (mlx5_ttc_is_decrypted_esp_tt(tt))
+ continue;
+
ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
ttc_params->dests[tt].tir_num =
tt == MLX5_TT_ANY ?
@@ -1282,7 +1286,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[dest_ix].counter_id = mlx5_fc_id(attr->counter);
+ dest[dest_ix].counter = attr->counter;
dest_ix++;
}
@@ -1750,9 +1754,6 @@ extra_split_attr_dests_needed(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr
!list_is_first(&attr->list, &flow->attrs))
return 0;
- if (flow_flag_test(flow, SLOW))
- return 0;
-
esw_attr = attr->esw_attr;
if (!esw_attr->split_count ||
esw_attr->split_count == esw_attr->out_count - 1)
@@ -1766,7 +1767,7 @@ extra_split_attr_dests_needed(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr
for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
/* external dest with encap is considered as internal by firmware */
if (esw_attr->dests[i].vport == MLX5_VPORT_UPLINK &&
- !(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID))
+ !(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
ext_dest = true;
else
int_dest = true;
@@ -2031,9 +2032,8 @@ err_out:
return err;
}
-static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
+static bool mlx5_flow_has_geneve_opt(struct mlx5_flow_spec *spec)
{
- struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec;
void *headers_v = MLX5_ADDR_OF(fte_match_param,
spec->match_value,
misc_parameters_3);
@@ -2072,7 +2072,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
}
complete_all(&flow->del_hw_done);
- if (mlx5_flow_has_geneve_opt(flow))
+ if (mlx5_flow_has_geneve_opt(&attr->parse_attr->spec))
mlx5_geneve_tlv_option_del(priv->mdev->geneve);
if (flow->decap_route)
@@ -2577,12 +2577,13 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level);
if (err) {
- kvfree(tmp_spec);
NL_SET_ERR_MSG_MOD(extack, "Failed to parse tunnel attributes");
netdev_warn(priv->netdev, "Failed to parse tunnel attributes");
- return err;
+ } else {
+ err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
}
- err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
+ if (mlx5_flow_has_geneve_opt(tmp_spec))
+ mlx5_geneve_tlv_option_del(priv->mdev->geneve);
kvfree(tmp_spec);
if (err)
return err;
@@ -3613,15 +3614,11 @@ static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv
bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
{
struct mlx5_core_dev *fmdev, *pmdev;
- u64 fsystem_guid, psystem_guid;
fmdev = priv->mdev;
pmdev = peer_priv->mdev;
- fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
- psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
-
- return (fsystem_guid == psystem_guid);
+ return mlx5_same_hw_devs(fmdev, pmdev);
}
static int
@@ -5236,10 +5233,11 @@ static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv)
int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
+ u8 mapping_id[MLX5_SW_IMAGE_GUID_MAX_BYTES];
struct mlx5_core_dev *dev = priv->mdev;
struct mapping_ctx *chains_mapping;
struct mlx5_chains_attr attr = {};
- u64 mapping_id;
+ u8 id_len;
int err;
mlx5e_mod_hdr_tbl_init(&tc->mod_hdr);
@@ -5255,11 +5253,13 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key);
lockdep_init_map(&tc->ht.run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0);
- mapping_id = mlx5_query_nic_system_image_guid(dev);
+ mlx5_query_nic_sw_system_image_guid(dev, mapping_id, &id_len);
- chains_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
+ chains_mapping = mapping_create_for_id(mapping_id, id_len,
+ MAPPING_TYPE_CHAIN,
sizeof(struct mlx5_mapped_obj),
- MLX5E_TC_TABLE_CHAIN_TAG_MASK, true);
+ MLX5E_TC_TABLE_CHAIN_TAG_MASK,
+ true);
if (IS_ERR(chains_mapping)) {
err = PTR_ERR(chains_mapping);
@@ -5390,13 +5390,15 @@ void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht)
int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
{
const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
+ u8 mapping_id[MLX5_SW_IMAGE_GUID_MAX_BYTES];
+ struct mlx5_devcom_match_attr attr = {};
struct netdev_phys_item_id ppid;
struct mlx5e_rep_priv *rpriv;
struct mapping_ctx *mapping;
struct mlx5_eswitch *esw;
struct mlx5e_priv *priv;
- u64 mapping_id, key;
int err = 0;
+ u8 id_len;
rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
priv = netdev_priv(rpriv->netdev);
@@ -5414,9 +5416,9 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act);
- mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
+ mlx5_query_nic_sw_system_image_guid(esw->dev, mapping_id, &id_len);
- mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL,
+ mapping = mapping_create_for_id(mapping_id, id_len, MAPPING_TYPE_TUNNEL,
sizeof(struct tunnel_match_key),
TUNNEL_INFO_BITS_MASK, true);
@@ -5429,8 +5431,10 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
/* Two last values are reserved for stack devices slow path table mark
* and bridge ingress push mark.
*/
- mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS,
- sz_enc_opts, ENC_OPTS_BITS_MASK - 2, true);
+ mapping = mapping_create_for_id(mapping_id, id_len,
+ MAPPING_TYPE_TUNNEL_ENC_OPTS,
+ sz_enc_opts, ENC_OPTS_BITS_MASK - 2,
+ true);
if (IS_ERR(mapping)) {
err = PTR_ERR(mapping);
goto err_enc_opts_mapping;
@@ -5449,10 +5453,12 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
goto err_action_counter;
}
- err = dev_get_port_parent_id(priv->netdev, &ppid, false);
+ err = netif_get_port_parent_id(priv->netdev, &ppid, false);
if (!err) {
- memcpy(&key, &ppid.id, sizeof(key));
- mlx5_esw_offloads_devcom_init(esw, key);
+ memcpy(&attr.key.buf, &ppid.id, ppid.id_len);
+ attr.flags = MLX5_DEVCOM_MATCH_FLAGS_NS;
+ attr.net = mlx5_core_net(esw->dev);
+ mlx5_esw_offloads_devcom_init(esw, &attr);
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index f8c7912abe0e..14884b9ea7f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -39,6 +39,7 @@
#include "ipoib/ipoib.h"
#include "en_accel/en_accel.h"
#include "en_accel/ipsec_rxtx.h"
+#include "en_accel/psp_rxtx.h"
#include "en_accel/macsec.h"
#include "en/ptp.h"
#include <net/ipv6.h>
@@ -120,6 +121,11 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg)
{
+#ifdef CONFIG_MLX5_EN_PSP
+ if (unlikely(mlx5e_psp_txwqe_build_eseg_csum(sq, skb, &accel->psp_st, eseg)))
+ return;
+#endif
+
if (unlikely(mlx5e_ipsec_txwqe_build_eseg_csum(sq, skb, eseg)))
return;
@@ -196,7 +202,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
dseg->lkey = sq->mkey_be;
dseg->byte_count = cpu_to_be32(headlen);
- mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
+ mlx5e_dma_push_single(sq, dma_addr, headlen);
num_dma++;
dseg++;
}
@@ -214,7 +220,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
dseg->lkey = sq->mkey_be;
dseg->byte_count = cpu_to_be32(fsz);
- mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
+ mlx5e_dma_push_netmem(sq, skb_frag_netmem(frag), dma_addr, fsz);
num_dma++;
dseg++;
}
@@ -250,14 +256,13 @@ mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct sk_buff *skb,
u8 mode;
#ifdef CONFIG_MLX5_EN_TLS
- if (accel && accel->tls.tls_tisn)
+ if (accel->tls.tls_tisn)
return MLX5_INLINE_MODE_TCP_UDP;
#endif
mode = sq->min_inline_mode;
- if (skb_vlan_tag_present(skb) &&
- test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state))
+ if (skb_vlan_tag_present(skb))
mode = max_t(u8, MLX5_INLINE_MODE_L2, mode);
return mode;
@@ -298,7 +303,7 @@ static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb,
stats->packets++;
}
- attr->insz = mlx5e_accel_tx_ids_len(sq, accel);
+ attr->insz = mlx5e_accel_tx_ids_len(sq, skb, accel);
stats->bytes += attr->num_bytes;
}
@@ -337,10 +342,11 @@ static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_at
};
}
-static void mlx5e_tx_skb_update_hwts_flags(struct sk_buff *skb)
+static void mlx5e_tx_skb_update_ts_flags(struct sk_buff *skb)
{
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ skb_tx_timestamp(skb);
}
static void mlx5e_tx_check_stop(struct mlx5e_txqsq *sq)
@@ -392,7 +398,7 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | attr->opcode);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | wqe_attr->ds_cnt);
- mlx5e_tx_skb_update_hwts_flags(skb);
+ mlx5e_tx_skb_update_ts_flags(skb);
sq->pc += wi->num_wqebbs;
@@ -482,12 +488,6 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
eseg->inline_hdr.sz |= cpu_to_be16(ihs);
dseg += wqe_attr->ds_cnt_inl;
- } else if (skb_vlan_tag_present(skb)) {
- eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
- if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
- eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN);
- eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
- stats->added_vlan_packets++;
}
dseg += wqe_attr->ds_cnt_ids;
@@ -525,9 +525,9 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
{
struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
struct mlx5e_tx_wqe *wqe;
- u16 pi;
+ u16 pi, num_wqebbs;
- pi = mlx5e_txqsq_get_next_pi(sq, sq->max_sq_mpw_wqebbs);
+ pi = mlx5e_txqsq_get_next_pi_anysize(sq, &num_wqebbs);
wqe = MLX5E_TX_FETCH_WQE(sq, pi);
net_prefetchw(wqe->data);
@@ -535,6 +535,7 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
.wqe = wqe,
.bytes_count = 0,
.ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT,
+ .ds_count_max = num_wqebbs * MLX5_SEND_WQEBB_NUM_DS,
.pkt_count = 0,
.inline_on = 0,
};
@@ -621,12 +622,12 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
sq->stats->xmit_more += xmit_more;
- mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE);
+ mlx5e_dma_push_single(sq, txd.dma_addr, txd.len);
mlx5e_skb_fifo_push(&sq->db.skb_fifo, skb);
mlx5e_tx_mpwqe_add_dseg(sq, &txd);
- mlx5e_tx_skb_update_hwts_flags(skb);
+ mlx5e_tx_skb_update_ts_flags(skb);
- if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe, sq->max_sq_mpw_wqebbs))) {
+ if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe))) {
/* Might stop the queue and affect the retval of __netdev_tx_sent_queue. */
cseg = mlx5e_tx_mpwqe_session_complete(sq);
@@ -658,7 +659,7 @@ static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg)
{
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
- eseg->flow_table_metadata =
+ eseg->flow_table_metadata |=
cpu_to_be32(mlx5e_ptp_metadata_fifo_peek(&ptpsq->metadata_freelist));
}
@@ -666,7 +667,7 @@ static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *
struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
- mlx5e_accel_tx_eseg(priv, skb, eseg, ihs);
+ mlx5e_accel_tx_eseg(priv, skb, accel, eseg, ihs);
mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
if (unlikely(sq->ptpsq))
mlx5e_cqe_ts_id_eseg(sq->ptpsq, skb, eseg);
@@ -754,7 +755,7 @@ static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb,
hwts.hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, ts);
if (sq->ptpsq) {
mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_CQE_HWTSTAMP,
- hwts.hwtstamp, sq->ptpsq->cq_stats);
+ hwts.hwtstamp, sq->ptpsq);
} else {
skb_tstamp_tx(skb, &hwts);
sq->stats->timestamps++;
@@ -981,6 +982,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_attr attr;
struct mlx5i_tx_wqe *wqe;
+ struct mlx5e_accel_tx_state accel = {};
struct mlx5_wqe_datagram_seg *datagram;
struct mlx5_wqe_ctrl_seg *cseg;
struct mlx5_wqe_eth_seg *eseg;
@@ -991,7 +993,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
int num_dma;
u16 pi;
- mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr);
+ mlx5e_sq_xmit_prepare(sq, skb, &accel, &attr);
mlx5i_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
@@ -1008,7 +1010,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
- mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, eseg);
+ mlx5e_txwqe_build_eseg_csum(sq, skb, &accel, eseg);
eseg->mss = attr.mss;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 417098f0b2bb..76108299ea57 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -165,7 +165,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
if (unlikely(!budget))
goto out;
- busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq);
+ if (c->xdpsq)
+ busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq->cq);
if (c->xdp)
busy |= mlx5e_poll_xdpsq_cq(&c->rq_xdpsq.cq);
@@ -236,7 +237,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
mlx5e_cq_arm(&rq->cq);
mlx5e_cq_arm(&c->icosq.cq);
mlx5e_cq_arm(&c->async_icosq.cq);
- mlx5e_cq_arm(&c->xdpsq.cq);
+ if (c->xdpsq)
+ mlx5e_cq_arm(&c->xdpsq->cq);
if (xsk_open) {
mlx5e_handle_rx_dim(xskrq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 859dcf09b770..25499da177bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -32,9 +32,7 @@ enum {
MLX5_EQ_STATE_ALWAYS_ARMED = 0xb,
};
-enum {
- MLX5_EQ_DOORBEL_OFFSET = 0x40,
-};
+#define MLX5_EQ_DOORBELL_OFFSET 0x40
/* budget must be smaller than MLX5_NUM_SPARE_EQE to guarantee that we update
* the ci before we polled all the entries in the EQ. MLX5_NUM_SPARE_EQE is
@@ -114,10 +112,10 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
struct mlx5_eq *eq = &eq_comp->core;
struct mlx5_eqe *eqe;
int num_eqes = 0;
- u32 cqn = -1;
while ((eqe = next_eqe_sw(eq))) {
struct mlx5_core_cq *cq;
+ u32 cqn;
/* Make sure we read EQ entry contents after we've
* checked the ownership bit.
@@ -144,9 +142,6 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
eq_update_ci(eq, 1);
- if (cqn != -1)
- tasklet_schedule(&eq_comp->tasklet_ctx.task);
-
return 0;
}
@@ -312,7 +307,7 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
MLX5_SET(eqc, eqc, log_eq_size, eq->fbc.log_sz);
- MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
+ MLX5_SET(eqc, eqc, uar_page, priv->bfreg.up->index);
MLX5_SET(eqc, eqc, intr, vecidx);
MLX5_SET(eqc, eqc, log_page_size,
eq->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
@@ -325,7 +320,7 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
eq->irqn = pci_irq_vector(dev->pdev, vecidx);
eq->dev = dev;
- eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
+ eq->doorbell = priv->bfreg.up->map + MLX5_EQ_DOORBELL_OFFSET;
err = mlx5_debug_eq_add(dev, eq);
if (err)
@@ -588,6 +583,9 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
async_event_mask |=
(1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
+ if (mlx5_pcie_cong_event_supported(dev))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
+
mask[0] = async_event_mask;
if (MLX5_CAP_GEN(dev, event_cap))
@@ -804,15 +802,8 @@ EXPORT_SYMBOL(mlx5_eq_get_eqe);
void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
{
- __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
- u32 val;
-
eq->cons_index += cc;
- val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
-
- __raw_writel((__force u32)cpu_to_be32(val), addr);
- /* We still want ordering, just not swabbing, so add a barrier */
- wmb();
+ eq_update_ci(eq, arm);
}
EXPORT_SYMBOL(mlx5_eq_update_ci);
@@ -881,21 +872,27 @@ static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
{
+ struct mlx5_irq_pool *pool = mlx5_irq_table_get_comp_irq_pool(dev);
struct mlx5_eq_table *table = dev->priv.eq_table;
- struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
- struct irq_affinity_desc af_desc = {};
+ struct irq_affinity_desc *af_desc;
struct mlx5_irq *irq;
- /* In case SF irq pool does not exist, fallback to the PF irqs*/
+ /* In case SF irq pool does not exist, fallback to the PF irqs */
if (!mlx5_irq_pool_is_sf_pool(pool))
return comp_irq_request_pci(dev, vecidx);
- af_desc.is_managed = false;
- cpumask_copy(&af_desc.mask, cpu_online_mask);
- cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus);
- irq = mlx5_irq_affinity_request(dev, pool, &af_desc);
- if (IS_ERR(irq))
+ af_desc = kvzalloc(sizeof(*af_desc), GFP_KERNEL);
+ if (!af_desc)
+ return -ENOMEM;
+
+ af_desc->is_managed = false;
+ cpumask_copy(&af_desc->mask, cpu_online_mask);
+ cpumask_andnot(&af_desc->mask, &af_desc->mask, &table->used_cpus);
+ irq = mlx5_irq_affinity_request(dev, pool, af_desc);
+ if (IS_ERR(irq)) {
+ kvfree(af_desc);
return PTR_ERR(irq);
+ }
cpumask_or(&table->used_cpus, &table->used_cpus, mlx5_irq_get_affinity_mask(irq));
mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
@@ -903,6 +900,8 @@ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
+ kvfree(af_desc);
+
return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
index 6b4c9ffad95b..c9a1654d83a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
@@ -87,8 +87,8 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
drop_counter = mlx5_fc_create(esw->dev, false);
if (IS_ERR(drop_counter)) {
esw_warn(esw->dev,
- "vport[%d] configure egress drop rule counter err(%ld)\n",
- vport->vport, PTR_ERR(drop_counter));
+ "vport[%d] configure egress drop rule counter err(%pe)\n",
+ vport->vport, drop_counter);
drop_counter = NULL;
}
vport->egress.legacy.drop_counter = drop_counter;
@@ -135,7 +135,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
if (drop_counter) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- drop_ctr_dst.counter_id = mlx5_fc_id(drop_counter);
+ drop_ctr_dst.counter = drop_counter;
dst = &drop_ctr_dst;
dest_num++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
index d599e50af346..3ce455c2535c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
@@ -27,7 +27,7 @@ esw_acl_table_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int ns,
esw_debug(dev, "Create vport[%d] %s ACL table\n", vport_num,
ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS ? "ingress" : "egress");
- root_ns = mlx5_get_flow_vport_acl_namespace(dev, ns, vport->index);
+ root_ns = mlx5_get_flow_vport_namespace(dev, ns, vport->index);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch root namespace for vport (%d)\n",
vport_num);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
index 093ed86a0acd..1c37098e09ea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
@@ -260,7 +260,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
if (counter) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- drop_ctr_dst.counter_id = mlx5_fc_id(counter);
+ drop_ctr_dst.counter = counter;
dst = &drop_ctr_dst;
dest_num++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c
new file mode 100644
index 000000000000..250af09b5af2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "fs_core.h"
+#include "eswitch.h"
+
+int mlx5_esw_adj_vport_modify(struct mlx5_core_dev *dev, u16 vport,
+ bool connect)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {};
+
+ MLX5_SET(modify_vport_state_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_VPORT_STATE);
+ MLX5_SET(modify_vport_state_in, in, op_mod,
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT);
+ MLX5_SET(modify_vport_state_in, in, other_vport, 1);
+ MLX5_SET(modify_vport_state_in, in, vport_number, vport);
+ MLX5_SET(modify_vport_state_in, in, ingress_connect_valid, 1);
+ MLX5_SET(modify_vport_state_in, in, egress_connect_valid, 1);
+ MLX5_SET(modify_vport_state_in, in, ingress_connect, connect);
+ MLX5_SET(modify_vport_state_in, in, egress_connect, connect);
+ MLX5_SET(modify_vport_state_in, in, admin_state, connect);
+ return mlx5_cmd_exec_in(dev, modify_vport_state, in);
+}
+
+static void mlx5_esw_destroy_esw_vport(struct mlx5_core_dev *dev, u16 vport)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_esw_vport_in)] = {};
+
+ MLX5_SET(destroy_esw_vport_in, in, opcode,
+ MLX5_CMD_OPCODE_DESTROY_ESW_VPORT);
+ MLX5_SET(destroy_esw_vport_in, in, vport_num, vport);
+
+ mlx5_cmd_exec_in(dev, destroy_esw_vport, in);
+}
+
+static int mlx5_esw_create_esw_vport(struct mlx5_core_dev *dev, u16 vhca_id,
+ u16 *vport_num)
+{
+ u32 out[MLX5_ST_SZ_DW(create_esw_vport_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_esw_vport_in)] = {};
+ int err;
+
+ MLX5_SET(create_esw_vport_in, in, opcode,
+ MLX5_CMD_OPCODE_CREATE_ESW_VPORT);
+ MLX5_SET(create_esw_vport_in, in, managed_vhca_id, vhca_id);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *vport_num = MLX5_GET(create_esw_vport_out, out, vport_num);
+
+ return err;
+}
+
+static int mlx5_esw_adj_vport_create(struct mlx5_eswitch *esw, u16 vhca_id,
+ const void *rid_info_reg)
+{
+ struct mlx5_vport *vport;
+ u16 vport_num;
+ int err;
+
+ err = mlx5_esw_create_esw_vport(esw->dev, vhca_id, &vport_num);
+ if (err) {
+ esw_warn(esw->dev,
+ "Failed to create adjacent vport for vhca_id %d, err %d\n",
+ vhca_id, err);
+ return err;
+ }
+
+ esw_debug(esw->dev, "Created adjacent vport[%d] %d for vhca_id 0x%x\n",
+ esw->last_vport_idx, vport_num, vhca_id);
+
+ err = mlx5_esw_vport_alloc(esw, esw->last_vport_idx++, vport_num);
+ if (err)
+ goto destroy_esw_vport;
+
+ xa_set_mark(&esw->vports, vport_num, MLX5_ESW_VPT_VF);
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ vport->adjacent = true;
+ vport->vhca_id = vhca_id;
+
+ vport->adj_info.parent_pci_devfn =
+ MLX5_GET(function_vhca_rid_info_reg, rid_info_reg,
+ parent_pci_device_function);
+ vport->adj_info.function_id =
+ MLX5_GET(function_vhca_rid_info_reg, rid_info_reg, function_id);
+
+ mlx5_fs_vport_egress_acl_ns_add(esw->dev->priv.steering, vport->index);
+ mlx5_fs_vport_ingress_acl_ns_add(esw->dev->priv.steering, vport->index);
+ err = mlx5_esw_offloads_rep_add(esw, vport);
+ if (err)
+ goto acl_ns_remove;
+
+ return 0;
+
+acl_ns_remove:
+ mlx5_fs_vport_ingress_acl_ns_remove(esw->dev->priv.steering,
+ vport->index);
+ mlx5_fs_vport_egress_acl_ns_remove(esw->dev->priv.steering,
+ vport->index);
+ mlx5_esw_vport_free(esw, vport);
+destroy_esw_vport:
+ mlx5_esw_destroy_esw_vport(esw->dev, vport_num);
+ return err;
+}
+
+static void mlx5_esw_adj_vport_destroy(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ u16 vport_num = vport->vport;
+
+ esw_debug(esw->dev, "Destroying adjacent vport %d for vhca_id 0x%x\n",
+ vport_num, vport->vhca_id);
+
+ mlx5_esw_offloads_rep_remove(esw, vport);
+ mlx5_fs_vport_egress_acl_ns_remove(esw->dev->priv.steering,
+ vport->index);
+ mlx5_fs_vport_ingress_acl_ns_remove(esw->dev->priv.steering,
+ vport->index);
+ mlx5_esw_vport_free(esw, vport);
+ /* Reset the vport index back so new adj vports can use this index.
+ * When vport count can incrementally change, this needs to be modified.
+ */
+ esw->last_vport_idx--;
+ mlx5_esw_destroy_esw_vport(esw->dev, vport_num);
+}
+
+void mlx5_esw_adjacent_vhcas_cleanup(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+
+ if (!MLX5_CAP_GEN_2(esw->dev, delegated_vhca_max))
+ return;
+
+ mlx5_esw_for_each_vf_vport(esw, i, vport, U16_MAX) {
+ if (!vport->adjacent)
+ continue;
+ mlx5_esw_adj_vport_destroy(esw, vport);
+ }
+}
+
+void mlx5_esw_adjacent_vhcas_setup(struct mlx5_eswitch *esw)
+{
+ u32 delegated_vhca_max = MLX5_CAP_GEN_2(esw->dev, delegated_vhca_max);
+ u32 in[MLX5_ST_SZ_DW(query_delegated_vhca_in)] = {};
+ int outlen, err, i = 0;
+ u8 *out;
+ u32 count;
+
+ if (!delegated_vhca_max)
+ return;
+
+ outlen = MLX5_ST_SZ_BYTES(query_delegated_vhca_out) +
+ delegated_vhca_max *
+ MLX5_ST_SZ_BYTES(delegated_function_vhca_rid_info);
+
+ esw_debug(esw->dev, "delegated_vhca_max=%d\n", delegated_vhca_max);
+
+ out = kvzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return;
+
+ MLX5_SET(query_delegated_vhca_in, in, opcode,
+ MLX5_CMD_OPCODE_QUERY_DELEGATED_VHCA);
+
+ err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
+ if (err) {
+ kvfree(out);
+ esw_warn(esw->dev, "Failed to query delegated vhca, err %d\n",
+ err);
+ return;
+ }
+
+ count = MLX5_GET(query_delegated_vhca_out, out, functions_count);
+ esw_debug(esw->dev, "Delegated vhca functions count %d\n", count);
+
+ for (i = 0; i < count; i++) {
+ const void *rid_info, *rid_info_reg;
+ u16 vhca_id;
+
+ rid_info = MLX5_ADDR_OF(query_delegated_vhca_out, out,
+ delegated_function_vhca_rid_info[i]);
+
+ rid_info_reg = MLX5_ADDR_OF(delegated_function_vhca_rid_info,
+ rid_info, function_vhca_rid_info);
+
+ vhca_id = MLX5_GET(function_vhca_rid_info_reg, rid_info_reg,
+ vhca_id);
+ esw_debug(esw->dev, "Delegating vhca_id 0x%x\n", vhca_id);
+
+ err = mlx5_esw_adj_vport_create(esw, vhca_id, rid_info_reg);
+ if (err) {
+ esw_warn(esw->dev,
+ "Failed to init adjacent vhca 0x%x, err %d\n",
+ vhca_id, err);
+ break;
+ }
+ }
+
+ kvfree(out);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index c5ea1d1d2b03..60e10047770f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -81,7 +81,8 @@ mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw)
ft_attr.prio = FDB_BR_OFFLOAD;
fdb = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(fdb))
- esw_warn(dev, "Failed to create bridge FDB Table (err=%ld)\n", PTR_ERR(fdb));
+ esw_warn(dev, "Failed to create bridge FDB Table (err=%pe)\n",
+ fdb);
return fdb;
}
@@ -121,8 +122,8 @@ mlx5_esw_bridge_ingress_vlan_proto_fg_create(unsigned int from, unsigned int to,
kvfree(in);
if (IS_ERR(fg))
esw_warn(esw->dev,
- "Failed to create VLAN(proto=%x) flow group for bridge ingress table (err=%ld)\n",
- vlan_proto, PTR_ERR(fg));
+ "Failed to create VLAN(proto=%x) flow group for bridge ingress table (err=%pe)\n",
+ vlan_proto, fg);
return fg;
}
@@ -180,8 +181,8 @@ mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(unsigned int from, unsigned
fg = mlx5_create_flow_group(ingress_ft, in);
if (IS_ERR(fg))
esw_warn(esw->dev,
- "Failed to create bridge ingress table VLAN filter flow group (err=%ld)\n",
- PTR_ERR(fg));
+ "Failed to create bridge ingress table VLAN filter flow group (err=%pe)\n",
+ fg);
kvfree(in);
return fg;
}
@@ -237,8 +238,8 @@ mlx5_esw_bridge_ingress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow
fg = mlx5_create_flow_group(ingress_ft, in);
if (IS_ERR(fg))
esw_warn(esw->dev,
- "Failed to create MAC flow group for bridge ingress table (err=%ld)\n",
- PTR_ERR(fg));
+ "Failed to create MAC flow group for bridge ingress table (err=%pe)\n",
+ fg);
kvfree(in);
return fg;
@@ -274,8 +275,8 @@ mlx5_esw_bridge_egress_vlan_proto_fg_create(unsigned int from, unsigned int to,
fg = mlx5_create_flow_group(egress_ft, in);
if (IS_ERR(fg))
esw_warn(esw->dev,
- "Failed to create VLAN flow group for bridge egress table (err=%ld)\n",
- PTR_ERR(fg));
+ "Failed to create VLAN flow group for bridge egress table (err=%pe)\n",
+ fg);
kvfree(in);
return fg;
}
@@ -324,8 +325,8 @@ mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_
fg = mlx5_create_flow_group(egress_ft, in);
if (IS_ERR(fg))
esw_warn(esw->dev,
- "Failed to create bridge egress table MAC flow group (err=%ld)\n",
- PTR_ERR(fg));
+ "Failed to create bridge egress table MAC flow group (err=%pe)\n",
+ fg);
kvfree(in);
return fg;
}
@@ -354,8 +355,8 @@ mlx5_esw_bridge_egress_miss_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow
fg = mlx5_create_flow_group(egress_ft, in);
if (IS_ERR(fg))
esw_warn(esw->dev,
- "Failed to create bridge egress table miss flow group (err=%ld)\n",
- PTR_ERR(fg));
+ "Failed to create bridge egress table miss flow group (err=%pe)\n",
+ fg);
kvfree(in);
return fg;
}
@@ -501,8 +502,8 @@ mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
if (mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) {
miss_fg = mlx5_esw_bridge_egress_miss_fg_create(esw, egress_ft);
if (IS_ERR(miss_fg)) {
- esw_warn(esw->dev, "Failed to create miss flow group (err=%ld)\n",
- PTR_ERR(miss_fg));
+ esw_warn(esw->dev, "Failed to create miss flow group (err=%pe)\n",
+ miss_fg);
miss_fg = NULL;
goto skip_miss_flow;
}
@@ -510,8 +511,8 @@ mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
miss_pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
if (IS_ERR(miss_pkt_reformat)) {
esw_warn(esw->dev,
- "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
- PTR_ERR(miss_pkt_reformat));
+ "Failed to alloc packet reformat REMOVE_HEADER (err=%pe)\n",
+ miss_pkt_reformat);
miss_pkt_reformat = NULL;
mlx5_destroy_flow_group(miss_fg);
miss_fg = NULL;
@@ -522,8 +523,8 @@ mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
br_offloads->skip_ft,
miss_pkt_reformat);
if (IS_ERR(miss_handle)) {
- esw_warn(esw->dev, "Failed to create miss flow (err=%ld)\n",
- PTR_ERR(miss_handle));
+ esw_warn(esw->dev, "Failed to create miss flow (err=%pe)\n",
+ miss_handle);
miss_handle = NULL;
mlx5_packet_reformat_dealloc(esw->dev, miss_pkt_reformat);
miss_pkt_reformat = NULL;
@@ -570,7 +571,8 @@ mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char *addr,
- struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
+ struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_fc *counter,
struct mlx5_esw_bridge *bridge,
struct mlx5_eswitch *esw)
{
@@ -628,7 +630,7 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char
dests[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dests[0].ft = bridge->egress_ft;
dests[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dests[1].counter_id = counter_id;
+ dests[1].counter = counter;
handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, dests,
ARRAY_SIZE(dests));
@@ -639,17 +641,19 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char
static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr,
- struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
+ struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_fc *counter,
struct mlx5_esw_bridge *bridge)
{
- return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
+ return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter,
bridge, bridge->br_offloads->esw);
}
static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id,
const unsigned char *addr,
- struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
+ struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_fc *counter,
struct mlx5_esw_bridge *bridge)
{
struct mlx5_devcom_comp_dev *devcom = bridge->br_offloads->esw->devcom, *pos;
@@ -671,7 +675,7 @@ mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id,
goto out;
}
- handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
+ handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter,
bridge, peer_esw);
out:
@@ -1045,8 +1049,8 @@ mlx5_esw_bridge_vlan_push_create(u16 vlan_proto, struct mlx5_esw_bridge_vlan *vl
&reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(pkt_reformat)) {
- esw_warn(esw->dev, "Failed to alloc packet reformat INSERT_HEADER (err=%ld)\n",
- PTR_ERR(pkt_reformat));
+ esw_warn(esw->dev, "Failed to alloc packet reformat INSERT_HEADER (err=%pe)\n",
+ pkt_reformat);
return PTR_ERR(pkt_reformat);
}
@@ -1073,8 +1077,8 @@ mlx5_esw_bridge_vlan_pop_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_e
pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
if (IS_ERR(pkt_reformat)) {
- esw_warn(esw->dev, "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
- PTR_ERR(pkt_reformat));
+ esw_warn(esw->dev, "Failed to alloc packet reformat REMOVE_HEADER (err=%pe)\n",
+ pkt_reformat);
return PTR_ERR(pkt_reformat);
}
@@ -1385,10 +1389,9 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_ow
handle = peer ?
mlx5_esw_bridge_ingress_flow_peer_create(vport_num, esw_owner_vhca_id,
- addr, vlan, mlx5_fc_id(counter),
- bridge) :
+ addr, vlan, counter, bridge) :
mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan,
- mlx5_fc_id(counter), bridge);
+ counter, bridge);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d,peer=%d)\n",
@@ -1861,7 +1864,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_
"Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)\n",
addr, vport_num);
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)\n",
+ "Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)",
addr, vport_num);
return -EINVAL;
}
@@ -1874,7 +1877,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_
"Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n",
addr, vid, vport_num);
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Failed to lookup vlan metadata for MDB (MAC=%pM,vid=%u,vport=%u)\n",
+ "Failed to lookup vlan metadata for MDB (MAC=%pM,vid=%u,vport=%u)",
addr, vid, vport_num);
return -EINVAL;
}
@@ -1882,7 +1885,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_
err = mlx5_esw_bridge_port_mdb_attach(dev, port, addr, vid);
if (err) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to add MDB (MAC=%pM,vid=%u,vport=%u)\n",
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to add MDB (MAC=%pM,vid=%u,vport=%u)",
addr, vid, vport_num);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index d0f38818363f..89a58dee50b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -7,11 +7,7 @@
static void
mlx5_esw_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_id *ppid)
{
- u64 parent_id;
-
- parent_id = mlx5_query_nic_system_image_guid(dev);
- ppid->id_len = sizeof(parent_id);
- memcpy(ppid->id, &parent_id, sizeof(parent_id));
+ mlx5_query_nic_sw_system_image_guid(dev, ppid->id, &ppid->id_len);
}
static bool mlx5_esw_devlink_port_supported(struct mlx5_eswitch *esw, u16 vport_num)
@@ -27,12 +23,13 @@ static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *
{
struct mlx5_core_dev *dev = esw->dev;
struct netdev_phys_item_id ppid = {};
+ struct mlx5_vport *vport;
u32 controller_num = 0;
bool external;
u16 pfnum;
mlx5_esw_get_port_parent_id(dev, &ppid);
- pfnum = mlx5_get_dev_index(dev);
+ pfnum = PCI_FUNC(dev->pdev->devfn);
external = mlx5_core_is_ecpf_esw_manager(dev);
if (external)
controller_num = dev->priv.eswitch->offloads.host_number + 1;
@@ -42,15 +39,25 @@ static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *
dl_port->attrs.switch_id.id_len = ppid.id_len;
devlink_port_attrs_pci_pf_set(dl_port, controller_num, pfnum, external);
} else if (mlx5_eswitch_is_vf_vport(esw, vport_num)) {
+ u16 func_id = vport_num - 1;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
+ if (vport->adjacent) {
+ func_id = vport->adj_info.function_id;
+ pfnum = vport->adj_info.parent_pci_devfn;
+ }
+
devlink_port_attrs_pci_vf_set(dl_port, controller_num, pfnum,
- vport_num - 1, external);
+ func_id, external);
} else if (mlx5_core_is_ec_vf_vport(esw->dev, vport_num)) {
+ u16 base_vport = mlx5_core_ec_vf_vport_base(dev);
+
memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
devlink_port_attrs_pci_vf_set(dl_port, 0, pfnum,
- vport_num - 1, false);
+ vport_num - base_vport, false);
}
}
@@ -110,7 +117,7 @@ static void mlx5_esw_offloads_sf_devlink_port_attrs_set(struct mlx5_eswitch *esw
struct netdev_phys_item_id ppid = {};
u16 pfnum;
- pfnum = mlx5_get_dev_index(dev);
+ pfnum = PCI_FUNC(dev->pdev->devfn);
mlx5_esw_get_port_parent_id(dev, &ppid);
memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
@@ -195,7 +202,7 @@ void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_vport *vport)
return;
dl_port = vport->dl_port;
- mlx5_esw_qos_vport_update_node(vport, NULL, NULL);
+ mlx5_esw_qos_vport_update_parent(vport, NULL, NULL);
devl_rate_leaf_destroy(&dl_port->dl_port);
devl_port_unregister(&dl_port->dl_port);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
index 5a0047bdcb51..3cfe743610d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
@@ -10,9 +10,9 @@
#endif
enum {
- MLX5_ESW_IPSEC_RX_POL_FT_LEVEL,
MLX5_ESW_IPSEC_RX_ESP_FT_LEVEL,
MLX5_ESW_IPSEC_RX_ESP_FT_CHK_LEVEL,
+ MLX5_ESW_IPSEC_RX_POL_FT_LEVEL,
};
enum {
@@ -85,6 +85,19 @@ err_header_alloc:
return err;
}
+void mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_spec *spec)
+{
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_1,
+ ESW_IPSEC_RX_MAPPED_ID_MATCH_MASK);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_1,
+ sa_entry->rx_mapped_id << ESW_ZONE_ID_BITS);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+}
+
void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
@@ -150,11 +163,11 @@ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
unsigned long i;
int err;
- xa_for_each(&esw->offloads.vport_reps, i, rep) {
- rpriv = rep->rep_data[REP_ETH].priv;
- if (!rpriv || !rpriv->netdev)
+ mlx5_esw_for_each_rep(esw, i, rep) {
+ if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
continue;
+ rpriv = rep->rep_data[REP_ETH].priv;
rhashtable_walk_enter(&rpriv->tc_ht, &iter);
rhashtable_walk_start(&iter);
while ((flow = rhashtable_walk_next(&iter)) != NULL) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
index ac9c65b89166..514c15258b1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
@@ -20,6 +20,8 @@ int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_tx_create_attr *attr);
void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev);
+void mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_spec *spec);
#else
static inline void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr) {}
@@ -48,5 +50,8 @@ static inline void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_tx_create_attr *attr) {}
static inline void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev) {}
+static inline void
+mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_spec *spec) {}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESW_IPSEC_FS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
index 45183de424f3..929adeb50a98 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
@@ -66,7 +66,6 @@ static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
esw->fdb_table.legacy.addr_grp = NULL;
esw->fdb_table.legacy.allmulti_grp = NULL;
esw->fdb_table.legacy.promisc_grp = NULL;
- atomic64_set(&esw->user_count, 0);
}
static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
@@ -96,7 +95,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
if (!flow_group_in)
return -ENOMEM;
- ft_attr.max_fte = POOL_NEXT_SIZE;
+ ft_attr.max_fte = MLX5_FS_MAX_POOL_SIZE;
ft_attr.prio = LEGACY_FDB_PRIO;
fdb = mlx5_create_flow_table(root_ns, &ft_attr);
if (IS_ERR(fdb)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 7e7f99b38a37..4278bcb04c72 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -64,11 +64,19 @@ static void esw_qos_domain_release(struct mlx5_eswitch *esw)
enum sched_node_type {
SCHED_NODE_TYPE_VPORTS_TSAR,
SCHED_NODE_TYPE_VPORT,
+ SCHED_NODE_TYPE_TC_ARBITER_TSAR,
+ SCHED_NODE_TYPE_RATE_LIMITER,
+ SCHED_NODE_TYPE_VPORT_TC,
+ SCHED_NODE_TYPE_VPORTS_TC_TSAR,
};
static const char * const sched_node_type_str[] = {
[SCHED_NODE_TYPE_VPORTS_TSAR] = "vports TSAR",
[SCHED_NODE_TYPE_VPORT] = "vport",
+ [SCHED_NODE_TYPE_TC_ARBITER_TSAR] = "TC Arbiter TSAR",
+ [SCHED_NODE_TYPE_RATE_LIMITER] = "Rate Limiter",
+ [SCHED_NODE_TYPE_VPORT_TC] = "vport TC",
+ [SCHED_NODE_TYPE_VPORTS_TC_TSAR] = "vports TC TSAR",
};
struct mlx5_esw_sched_node {
@@ -90,15 +98,77 @@ struct mlx5_esw_sched_node {
struct list_head children;
/* Valid only if this node is associated with a vport. */
struct mlx5_vport *vport;
+ /* Level in the hierarchy. The root node level is 1. */
+ u8 level;
+ /* Valid only when this node represents a traffic class. */
+ u8 tc;
+ /* Valid only for a TC arbiter node or vport TC arbiter. */
+ u32 tc_bw[DEVLINK_RATE_TCS_MAX];
};
+static void esw_qos_node_attach_to_parent(struct mlx5_esw_sched_node *node)
+{
+ if (!node->parent) {
+ /* Root children are assigned a depth level of 2. */
+ node->level = 2;
+ list_add_tail(&node->entry, &node->esw->qos.domain->nodes);
+ } else {
+ node->level = node->parent->level + 1;
+ list_add_tail(&node->entry, &node->parent->children);
+ }
+}
+
+static int esw_qos_num_tcs(struct mlx5_core_dev *dev)
+{
+ int num_tcs = mlx5_max_tc(dev) + 1;
+
+ return num_tcs < DEVLINK_RATE_TCS_MAX ? num_tcs : DEVLINK_RATE_TCS_MAX;
+}
+
static void
esw_qos_node_set_parent(struct mlx5_esw_sched_node *node, struct mlx5_esw_sched_node *parent)
{
list_del_init(&node->entry);
node->parent = parent;
- list_add_tail(&node->entry, &parent->children);
- node->esw = parent->esw;
+ if (parent)
+ node->esw = parent->esw;
+ esw_qos_node_attach_to_parent(node);
+}
+
+static void esw_qos_nodes_set_parent(struct list_head *nodes,
+ struct mlx5_esw_sched_node *parent)
+{
+ struct mlx5_esw_sched_node *node, *tmp;
+
+ list_for_each_entry_safe(node, tmp, nodes, entry) {
+ esw_qos_node_set_parent(node, parent);
+ if (!list_empty(&node->children) &&
+ parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) {
+ struct mlx5_esw_sched_node *child;
+
+ list_for_each_entry(child, &node->children, entry) {
+ struct mlx5_vport *vport = child->vport;
+
+ if (vport)
+ vport->qos.sched_node->parent = parent;
+ }
+ }
+ }
+}
+
+void mlx5_esw_qos_vport_qos_free(struct mlx5_vport *vport)
+{
+ if (vport->qos.sched_nodes) {
+ int num_tcs = esw_qos_num_tcs(vport->qos.sched_node->esw->dev);
+ int i;
+
+ for (i = 0; i < num_tcs; i++)
+ kfree(vport->qos.sched_nodes[i]);
+ kfree(vport->qos.sched_nodes);
+ }
+
+ kfree(vport->qos.sched_node);
+ memset(&vport->qos, 0, sizeof(vport->qos));
}
u32 mlx5_esw_qos_vport_get_sched_elem_ix(const struct mlx5_vport *vport)
@@ -118,18 +188,70 @@ mlx5_esw_qos_vport_get_parent(const struct mlx5_vport *vport)
return vport->qos.sched_node->parent;
}
-static void esw_qos_sched_elem_config_warn(struct mlx5_esw_sched_node *node, int err)
+static void esw_qos_sched_elem_warn(struct mlx5_esw_sched_node *node, int err, const char *op)
{
- if (node->vport) {
+ switch (node->type) {
+ case SCHED_NODE_TYPE_VPORTS_TC_TSAR:
esw_warn(node->esw->dev,
- "E-Switch modify %s scheduling element failed (vport=%d,err=%d)\n",
- sched_node_type_str[node->type], node->vport->vport, err);
- return;
+ "E-Switch %s %s scheduling element failed (tc=%d,err=%d)\n",
+ op, sched_node_type_str[node->type], node->tc, err);
+ break;
+ case SCHED_NODE_TYPE_VPORT_TC:
+ esw_warn(node->esw->dev,
+ "E-Switch %s %s scheduling element failed (vport=%d,tc=%d,err=%d)\n",
+ op,
+ sched_node_type_str[node->type],
+ node->vport->vport, node->tc, err);
+ break;
+ case SCHED_NODE_TYPE_VPORT:
+ esw_warn(node->esw->dev,
+ "E-Switch %s %s scheduling element failed (vport=%d,err=%d)\n",
+ op, sched_node_type_str[node->type], node->vport->vport, err);
+ break;
+ case SCHED_NODE_TYPE_RATE_LIMITER:
+ case SCHED_NODE_TYPE_TC_ARBITER_TSAR:
+ case SCHED_NODE_TYPE_VPORTS_TSAR:
+ esw_warn(node->esw->dev,
+ "E-Switch %s %s scheduling element failed (err=%d)\n",
+ op, sched_node_type_str[node->type], err);
+ break;
+ default:
+ esw_warn(node->esw->dev,
+ "E-Switch %s scheduling element failed (err=%d)\n",
+ op, err);
+ break;
+ }
+}
+
+static int esw_qos_node_create_sched_element(struct mlx5_esw_sched_node *node, void *ctx,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = mlx5_create_scheduling_element_cmd(node->esw->dev, SCHEDULING_HIERARCHY_E_SWITCH, ctx,
+ &node->ix);
+ if (err) {
+ esw_qos_sched_elem_warn(node, err, "create");
+ NL_SET_ERR_MSG_MOD(extack, "E-Switch create scheduling element failed");
+ }
+
+ return err;
+}
+
+static int esw_qos_node_destroy_sched_element(struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = mlx5_destroy_scheduling_element_cmd(node->esw->dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ node->ix);
+ if (err) {
+ esw_qos_sched_elem_warn(node, err, "destroy");
+ NL_SET_ERR_MSG_MOD(extack, "E-Switch destroying scheduling element failed.");
}
- esw_warn(node->esw->dev,
- "E-Switch modify %s scheduling element failed (err=%d)\n",
- sched_node_type_str[node->type], err);
+ return err;
}
static int esw_qos_sched_elem_config(struct mlx5_esw_sched_node *node, u32 max_rate, u32 bw_share,
@@ -143,10 +265,21 @@ static int esw_qos_sched_elem_config(struct mlx5_esw_sched_node *node, u32 max_r
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
return -EOPNOTSUPP;
- MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate);
- MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
- bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
- bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
+ if (bw_share && (!MLX5_CAP_QOS(dev, esw_bw_share) ||
+ MLX5_CAP_QOS(dev, max_tsar_bw_share) < MLX5_MIN_BW_SHARE))
+ return -EOPNOTSUPP;
+
+ if (node->max_rate == max_rate && node->bw_share == bw_share)
+ return 0;
+
+ if (node->max_rate != max_rate) {
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate);
+ bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
+ }
+ if (node->bw_share != bw_share) {
+ MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
+ bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
+ }
err = mlx5_modify_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH,
@@ -154,12 +287,14 @@ static int esw_qos_sched_elem_config(struct mlx5_esw_sched_node *node, u32 max_r
node->ix,
bitmask);
if (err) {
- esw_qos_sched_elem_config_warn(node, err);
+ esw_qos_sched_elem_warn(node, err, "modify");
NL_SET_ERR_MSG_MOD(extack, "E-Switch modify scheduling element failed");
return err;
}
+ node->max_rate = max_rate;
+ node->bw_share = bw_share;
if (node->type == SCHED_NODE_TYPE_VPORTS_TSAR)
trace_mlx5_esw_node_qos_config(dev, node, node->ix, bw_share, max_rate);
else if (node->type == SCHED_NODE_TYPE_VPORT)
@@ -168,6 +303,24 @@ static int esw_qos_sched_elem_config(struct mlx5_esw_sched_node *node, u32 max_r
return 0;
}
+static int esw_qos_create_rate_limit_element(struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack)
+{
+ u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+
+ if (!mlx5_qos_element_type_supported(
+ node->esw->dev,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_RATE_LIMIT,
+ SCHEDULING_HIERARCHY_E_SWITCH))
+ return -EOPNOTSUPP;
+
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw, node->max_rate);
+ MLX5_SET(scheduling_context, sched_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_RATE_LIMIT);
+
+ return esw_qos_node_create_sched_element(node, sched_ctx, extack);
+}
+
static u32 esw_qos_calculate_min_rate_divider(struct mlx5_eswitch *esw,
struct mlx5_esw_sched_node *parent)
{
@@ -188,190 +341,91 @@ static u32 esw_qos_calculate_min_rate_divider(struct mlx5_eswitch *esw,
if (max_guarantee)
return max_t(u32, max_guarantee / fw_max_bw_share, 1);
- /* If nodes max min_rate divider is 0 but their parent has bw_share
- * configured, then set bw_share for nodes to minimal value.
- */
-
- if (parent && parent->bw_share)
- return 1;
-
/* If the node nodes has min_rate configured, a divider of 0 sets all
* nodes' bw_share to 0, effectively disabling min guarantees.
*/
return 0;
}
-static u32 esw_qos_calc_bw_share(u32 min_rate, u32 divider, u32 fw_max)
+static u32 esw_qos_calc_bw_share(u32 value, u32 divider, u32 fw_max)
{
if (!divider)
return 0;
- return min_t(u32, max_t(u32, DIV_ROUND_UP(min_rate, divider), MLX5_MIN_BW_SHARE), fw_max);
+ return min_t(u32, fw_max,
+ max_t(u32,
+ DIV_ROUND_UP(value, divider), MLX5_MIN_BW_SHARE));
}
-static int esw_qos_update_sched_node_bw_share(struct mlx5_esw_sched_node *node,
- u32 divider,
- struct netlink_ext_ack *extack)
+static void esw_qos_update_sched_node_bw_share(struct mlx5_esw_sched_node *node,
+ u32 divider,
+ struct netlink_ext_ack *extack)
{
u32 fw_max_bw_share = MLX5_CAP_QOS(node->esw->dev, max_tsar_bw_share);
u32 bw_share;
- int err;
bw_share = esw_qos_calc_bw_share(node->min_rate, divider, fw_max_bw_share);
- if (bw_share == node->bw_share)
- return 0;
-
- err = esw_qos_sched_elem_config(node, node->max_rate, bw_share, extack);
- if (err)
- return err;
-
- node->bw_share = bw_share;
-
- return err;
+ esw_qos_sched_elem_config(node, node->max_rate, bw_share, extack);
}
-static int esw_qos_normalize_min_rate(struct mlx5_eswitch *esw,
- struct mlx5_esw_sched_node *parent,
- struct netlink_ext_ack *extack)
+static void esw_qos_normalize_min_rate(struct mlx5_eswitch *esw,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
{
struct list_head *nodes = parent ? &parent->children : &esw->qos.domain->nodes;
u32 divider = esw_qos_calculate_min_rate_divider(esw, parent);
struct mlx5_esw_sched_node *node;
list_for_each_entry(node, nodes, entry) {
- int err;
-
if (node->esw != esw || node->ix == esw->qos.root_tsar_ix)
continue;
- err = esw_qos_update_sched_node_bw_share(node, divider, extack);
- if (err)
- return err;
+ /* Vports TC TSARs don't have a minimum rate configured,
+ * so there's no need to update the bw_share on them.
+ */
+ if (node->type != SCHED_NODE_TYPE_VPORTS_TC_TSAR) {
+ esw_qos_update_sched_node_bw_share(node, divider,
+ extack);
+ }
if (list_empty(&node->children))
continue;
- err = esw_qos_normalize_min_rate(node->esw, node, extack);
- if (err)
- return err;
+ esw_qos_normalize_min_rate(node->esw, node, extack);
}
-
- return 0;
-}
-
-static int esw_qos_set_vport_min_rate(struct mlx5_vport *vport,
- u32 min_rate, struct netlink_ext_ack *extack)
-{
- struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
- u32 fw_max_bw_share, previous_min_rate;
- bool min_rate_supported;
- int err;
-
- esw_assert_qos_lock_held(vport_node->esw);
- fw_max_bw_share = MLX5_CAP_QOS(vport->dev, max_tsar_bw_share);
- min_rate_supported = MLX5_CAP_QOS(vport->dev, esw_bw_share) &&
- fw_max_bw_share >= MLX5_MIN_BW_SHARE;
- if (min_rate && !min_rate_supported)
- return -EOPNOTSUPP;
- if (min_rate == vport_node->min_rate)
- return 0;
-
- previous_min_rate = vport_node->min_rate;
- vport_node->min_rate = min_rate;
- err = esw_qos_normalize_min_rate(vport_node->parent->esw, vport_node->parent, extack);
- if (err)
- vport_node->min_rate = previous_min_rate;
-
- return err;
}
-static int esw_qos_set_vport_max_rate(struct mlx5_vport *vport,
- u32 max_rate, struct netlink_ext_ack *extack)
+static u32 esw_qos_calculate_tc_bw_divider(u32 *tc_bw)
{
- struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
- u32 act_max_rate = max_rate;
- bool max_rate_supported;
- int err;
+ u32 total = 0;
+ int i;
- esw_assert_qos_lock_held(vport_node->esw);
- max_rate_supported = MLX5_CAP_QOS(vport->dev, esw_rate_limit);
+ for (i = 0; i < DEVLINK_RATE_TCS_MAX; i++)
+ total += tc_bw[i];
- if (max_rate && !max_rate_supported)
- return -EOPNOTSUPP;
- if (max_rate == vport_node->max_rate)
- return 0;
-
- /* Use parent node limit if new max rate is 0. */
- if (!max_rate)
- act_max_rate = vport_node->parent->max_rate;
-
- err = esw_qos_sched_elem_config(vport_node, act_max_rate, vport_node->bw_share, extack);
- if (!err)
- vport_node->max_rate = max_rate;
-
- return err;
+ /* If total is zero, tc-bw config is disabled and we shouldn't reach
+ * here.
+ */
+ return WARN_ON(!total) ? 1 : total;
}
static int esw_qos_set_node_min_rate(struct mlx5_esw_sched_node *node,
u32 min_rate, struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = node->esw;
- u32 previous_min_rate;
- int err;
-
- if (!MLX5_CAP_QOS(esw->dev, esw_bw_share) ||
- MLX5_CAP_QOS(esw->dev, max_tsar_bw_share) < MLX5_MIN_BW_SHARE)
- return -EOPNOTSUPP;
if (min_rate == node->min_rate)
return 0;
- previous_min_rate = node->min_rate;
node->min_rate = min_rate;
- err = esw_qos_normalize_min_rate(esw, NULL, extack);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "E-Switch node min rate setting failed");
-
- /* Attempt restoring previous configuration */
- node->min_rate = previous_min_rate;
- if (esw_qos_normalize_min_rate(esw, NULL, extack))
- NL_SET_ERR_MSG_MOD(extack, "E-Switch BW share restore failed");
- }
-
- return err;
-}
+ esw_qos_normalize_min_rate(esw, node->parent, extack);
-static int esw_qos_set_node_max_rate(struct mlx5_esw_sched_node *node,
- u32 max_rate, struct netlink_ext_ack *extack)
-{
- struct mlx5_esw_sched_node *vport_node;
- int err;
-
- if (node->max_rate == max_rate)
- return 0;
-
- err = esw_qos_sched_elem_config(node, max_rate, node->bw_share, extack);
- if (err)
- return err;
-
- node->max_rate = max_rate;
-
- /* Any unlimited vports in the node should be set with the value of the node. */
- list_for_each_entry(vport_node, &node->children, entry) {
- if (vport_node->max_rate)
- continue;
-
- err = esw_qos_sched_elem_config(vport_node, max_rate, vport_node->bw_share, extack);
- if (err)
- NL_SET_ERR_MSG_MOD(extack,
- "E-Switch vport implicit rate limit setting failed");
- }
-
- return err;
+ return 0;
}
-static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_element_id,
- u32 *tsar_ix)
+static int
+esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_element_id,
+ u32 max_rate, u32 bw_share, u32 *tsar_ix)
{
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
void *attr;
@@ -388,6 +442,8 @@ static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_
SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
MLX5_SET(scheduling_context, tsar_ctx, parent_element_id,
parent_element_id);
+ MLX5_SET(scheduling_context, tsar_ctx, max_average_bw, max_rate);
+ MLX5_SET(scheduling_context, tsar_ctx, bw_share, bw_share);
attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
MLX5_SET(tsar_element, attr, tsar_type, TSAR_ELEMENT_TSAR_TYPE_DWRR);
@@ -398,116 +454,68 @@ static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_
}
static int
-esw_qos_vport_create_sched_element(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent,
- u32 max_rate, u32 bw_share, u32 *sched_elem_ix)
+esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node,
+ struct netlink_ext_ack *extack)
{
+ struct mlx5_esw_sched_node *parent = vport_node->parent;
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
- struct mlx5_core_dev *dev = parent->esw->dev;
+ struct mlx5_core_dev *dev = vport_node->esw->dev;
void *attr;
- int err;
- if (!mlx5_qos_element_type_supported(dev,
- SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT,
- SCHEDULING_HIERARCHY_E_SWITCH))
+ if (!mlx5_qos_element_type_supported(
+ dev,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT,
+ SCHEDULING_HIERARCHY_E_SWITCH))
return -EOPNOTSUPP;
MLX5_SET(scheduling_context, sched_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
attr = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes);
- MLX5_SET(vport_element, attr, vport_number, vport->vport);
- MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent->ix);
- MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate);
- MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
+ MLX5_SET(vport_element, attr, vport_number, vport_node->vport->vport);
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
+ parent ? parent->ix : vport_node->esw->qos.root_tsar_ix);
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
+ vport_node->max_rate);
- err = mlx5_create_scheduling_element_cmd(dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- sched_ctx,
- sched_elem_ix);
- if (err) {
- esw_warn(dev,
- "E-Switch create vport scheduling element failed (vport=%d,err=%d)\n",
- vport->vport, err);
- return err;
- }
-
- return 0;
-}
-
-static int esw_qos_update_node_scheduling_element(struct mlx5_vport *vport,
- struct mlx5_esw_sched_node *curr_node,
- struct mlx5_esw_sched_node *new_node,
- struct netlink_ext_ack *extack)
-{
- struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
- u32 max_rate;
- int err;
-
- err = mlx5_destroy_scheduling_element_cmd(curr_node->esw->dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- vport_node->ix);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy vport scheduling element failed");
- return err;
- }
-
- /* Use new node max rate if vport max rate is unlimited. */
- max_rate = vport_node->max_rate ? vport_node->max_rate : new_node->max_rate;
- err = esw_qos_vport_create_sched_element(vport, new_node, max_rate,
- vport_node->bw_share,
- &vport_node->ix);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "E-Switch vport node set failed.");
- goto err_sched;
- }
-
- esw_qos_node_set_parent(vport->qos.sched_node, new_node);
-
- return 0;
-
-err_sched:
- max_rate = vport_node->max_rate ? vport_node->max_rate : curr_node->max_rate;
- if (esw_qos_vport_create_sched_element(vport, curr_node, max_rate,
- vport_node->bw_share,
- &vport_node->ix))
- esw_warn(curr_node->esw->dev, "E-Switch vport node restore failed (vport=%d)\n",
- vport->vport);
-
- return err;
+ return esw_qos_node_create_sched_element(vport_node, sched_ctx, extack);
}
-static int esw_qos_vport_update_node(struct mlx5_vport *vport,
- struct mlx5_esw_sched_node *node,
- struct netlink_ext_ack *extack)
+static int
+esw_qos_vport_tc_create_sched_element(struct mlx5_esw_sched_node *vport_tc_node,
+ u32 rate_limit_elem_ix,
+ struct netlink_ext_ack *extack)
{
- struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
- struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
- struct mlx5_esw_sched_node *new_node, *curr_node;
- int err;
-
- esw_assert_qos_lock_held(esw);
- curr_node = vport_node->parent;
- new_node = node ?: esw->qos.node0;
- if (curr_node == new_node)
- return 0;
-
- err = esw_qos_update_node_scheduling_element(vport, curr_node, new_node, extack);
- if (err)
- return err;
+ u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+ struct mlx5_core_dev *dev = vport_tc_node->esw->dev;
+ void *attr;
- /* Recalculate bw share weights of old and new nodes */
- if (vport_node->bw_share || new_node->bw_share) {
- esw_qos_normalize_min_rate(curr_node->esw, curr_node, extack);
- esw_qos_normalize_min_rate(new_node->esw, new_node, extack);
- }
+ if (!mlx5_qos_element_type_supported(
+ dev,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC,
+ SCHEDULING_HIERARCHY_E_SWITCH))
+ return -EOPNOTSUPP;
- return 0;
+ MLX5_SET(scheduling_context, sched_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC);
+ attr = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes);
+ MLX5_SET(vport_tc_element, attr, vport_number,
+ vport_tc_node->vport->vport);
+ MLX5_SET(vport_tc_element, attr, traffic_class, vport_tc_node->tc);
+ MLX5_SET(scheduling_context, sched_ctx, max_bw_obj_id,
+ rate_limit_elem_ix);
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
+ vport_tc_node->parent->ix);
+ MLX5_SET(scheduling_context, sched_ctx, bw_share,
+ vport_tc_node->bw_share);
+
+ return esw_qos_node_create_sched_element(vport_tc_node, sched_ctx,
+ extack);
}
static struct mlx5_esw_sched_node *
__esw_qos_alloc_node(struct mlx5_eswitch *esw, u32 tsar_ix, enum sched_node_type type,
struct mlx5_esw_sched_node *parent)
{
- struct list_head *parent_children;
struct mlx5_esw_sched_node *node;
node = kzalloc(sizeof(*node), GFP_KERNEL);
@@ -519,8 +527,15 @@ __esw_qos_alloc_node(struct mlx5_eswitch *esw, u32 tsar_ix, enum sched_node_type
node->type = type;
node->parent = parent;
INIT_LIST_HEAD(&node->children);
- parent_children = parent ? &parent->children : &esw->qos.domain->nodes;
- list_add_tail(&node->entry, parent_children);
+ esw_qos_node_attach_to_parent(node);
+ if (!parent) {
+ /* The caller is responsible for inserting the node into the
+ * parent list if necessary. This function can also be used with
+ * a NULL parent, which doesn't necessarily indicate that it
+ * refers to the root scheduling element.
+ */
+ list_del_init(&node->entry);
+ }
return node;
}
@@ -531,6 +546,153 @@ static void __esw_qos_free_node(struct mlx5_esw_sched_node *node)
kfree(node);
}
+static void esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netlink_ext_ack *extack)
+{
+ esw_qos_node_destroy_sched_element(node, extack);
+ __esw_qos_free_node(node);
+}
+
+static int esw_qos_create_vports_tc_node(struct mlx5_esw_sched_node *parent,
+ u8 tc, struct netlink_ext_ack *extack)
+{
+ u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+ struct mlx5_core_dev *dev = parent->esw->dev;
+ struct mlx5_esw_sched_node *vports_tc_node;
+ void *attr;
+ int err;
+
+ if (!mlx5_qos_element_type_supported(
+ dev,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR,
+ SCHEDULING_HIERARCHY_E_SWITCH) ||
+ !mlx5_qos_tsar_type_supported(dev,
+ TSAR_ELEMENT_TSAR_TYPE_DWRR,
+ SCHEDULING_HIERARCHY_E_SWITCH))
+ return -EOPNOTSUPP;
+
+ vports_tc_node = __esw_qos_alloc_node(parent->esw, 0,
+ SCHED_NODE_TYPE_VPORTS_TC_TSAR,
+ parent);
+ if (!vports_tc_node) {
+ NL_SET_ERR_MSG_MOD(extack, "E-Switch alloc node failed");
+ esw_warn(dev, "Failed to alloc vports TC node (tc=%d)\n", tc);
+ return -ENOMEM;
+ }
+
+ attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
+ MLX5_SET(tsar_element, attr, tsar_type, TSAR_ELEMENT_TSAR_TYPE_DWRR);
+ MLX5_SET(tsar_element, attr, traffic_class, tc);
+ MLX5_SET(scheduling_context, tsar_ctx, parent_element_id, parent->ix);
+ MLX5_SET(scheduling_context, tsar_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
+
+ err = esw_qos_node_create_sched_element(vports_tc_node, tsar_ctx,
+ extack);
+ if (err)
+ goto err_create_sched_element;
+
+ vports_tc_node->tc = tc;
+
+ return 0;
+
+err_create_sched_element:
+ __esw_qos_free_node(vports_tc_node);
+ return err;
+}
+
+static void
+esw_qos_tc_arbiter_get_bw_shares(struct mlx5_esw_sched_node *tc_arbiter_node,
+ u32 *tc_bw)
+{
+ memcpy(tc_bw, tc_arbiter_node->tc_bw, sizeof(tc_arbiter_node->tc_bw));
+}
+
+static void
+esw_qos_set_tc_arbiter_bw_shares(struct mlx5_esw_sched_node *tc_arbiter_node,
+ u32 *tc_bw, struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw = tc_arbiter_node->esw;
+ struct mlx5_esw_sched_node *vports_tc_node;
+ u32 divider, fw_max_bw_share;
+
+ fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
+ divider = esw_qos_calculate_tc_bw_divider(tc_bw);
+ list_for_each_entry(vports_tc_node, &tc_arbiter_node->children, entry) {
+ u8 tc = vports_tc_node->tc;
+ u32 bw_share;
+
+ tc_arbiter_node->tc_bw[tc] = tc_bw[tc];
+ bw_share = tc_bw[tc] * fw_max_bw_share;
+ bw_share = esw_qos_calc_bw_share(bw_share, divider,
+ fw_max_bw_share);
+ esw_qos_sched_elem_config(vports_tc_node, 0, bw_share, extack);
+ }
+}
+
+static void
+esw_qos_destroy_vports_tc_nodes(struct mlx5_esw_sched_node *tc_arbiter_node,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vports_tc_node, *tmp;
+
+ list_for_each_entry_safe(vports_tc_node, tmp,
+ &tc_arbiter_node->children, entry)
+ esw_qos_destroy_node(vports_tc_node, extack);
+}
+
+static int
+esw_qos_create_vports_tc_nodes(struct mlx5_esw_sched_node *tc_arbiter_node,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw = tc_arbiter_node->esw;
+ int err, i, num_tcs = esw_qos_num_tcs(esw->dev);
+
+ for (i = 0; i < num_tcs; i++) {
+ err = esw_qos_create_vports_tc_node(tc_arbiter_node, i, extack);
+ if (err)
+ goto err_tc_node_create;
+ }
+
+ return 0;
+
+err_tc_node_create:
+ esw_qos_destroy_vports_tc_nodes(tc_arbiter_node, NULL);
+ return err;
+}
+
+static int esw_qos_create_tc_arbiter_sched_elem(
+ struct mlx5_esw_sched_node *tc_arbiter_node,
+ struct netlink_ext_ack *extack)
+{
+ u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+ u32 tsar_parent_ix;
+ void *attr;
+
+ if (!mlx5_qos_tsar_type_supported(tc_arbiter_node->esw->dev,
+ TSAR_ELEMENT_TSAR_TYPE_TC_ARB,
+ SCHEDULING_HIERARCHY_E_SWITCH)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "E-Switch TC Arbiter scheduling element is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
+ MLX5_SET(tsar_element, attr, tsar_type, TSAR_ELEMENT_TSAR_TYPE_TC_ARB);
+ tsar_parent_ix = tc_arbiter_node->parent ? tc_arbiter_node->parent->ix :
+ tc_arbiter_node->esw->qos.root_tsar_ix;
+ MLX5_SET(scheduling_context, tsar_ctx, parent_element_id,
+ tsar_parent_ix);
+ MLX5_SET(scheduling_context, tsar_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
+ MLX5_SET(scheduling_context, tsar_ctx, max_average_bw,
+ tc_arbiter_node->max_rate);
+ MLX5_SET(scheduling_context, tsar_ctx, bw_share,
+ tc_arbiter_node->bw_share);
+
+ return esw_qos_node_create_sched_element(tc_arbiter_node, tsar_ctx,
+ extack);
+}
+
static struct mlx5_esw_sched_node *
__esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct mlx5_esw_sched_node *parent,
struct netlink_ext_ack *extack)
@@ -539,7 +701,8 @@ __esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct mlx5_esw_sch
u32 tsar_ix;
int err;
- err = esw_qos_create_node_sched_elem(esw->dev, esw->qos.root_tsar_ix, &tsar_ix);
+ err = esw_qos_create_node_sched_elem(esw->dev, esw->qos.root_tsar_ix, 0,
+ 0, &tsar_ix);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "E-Switch create TSAR for node failed");
return ERR_PTR(err);
@@ -552,17 +715,12 @@ __esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct mlx5_esw_sch
goto err_alloc_node;
}
- err = esw_qos_normalize_min_rate(esw, NULL, extack);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "E-Switch nodes normalization failed");
- goto err_min_rate;
- }
+ list_add_tail(&node->entry, &esw->qos.domain->nodes);
+ esw_qos_normalize_min_rate(esw, NULL, extack);
trace_mlx5_esw_node_qos_create(esw->dev, node, node->ix);
return node;
-err_min_rate:
- __esw_qos_free_node(node);
err_alloc_node:
if (mlx5_destroy_scheduling_element_cmd(esw->dev,
SCHEDULING_HIERARCHY_E_SWITCH,
@@ -595,26 +753,16 @@ esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct netlink_ext_ac
return node;
}
-static int __esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netlink_ext_ack *extack)
+static void __esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = node->esw;
- int err;
-
- trace_mlx5_esw_node_qos_destroy(esw->dev, node, node->ix);
-
- err = mlx5_destroy_scheduling_element_cmd(esw->dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- node->ix);
- if (err)
- NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR_ID failed");
- __esw_qos_free_node(node);
-
- err = esw_qos_normalize_min_rate(esw, NULL, extack);
- if (err)
- NL_SET_ERR_MSG_MOD(extack, "E-Switch nodes normalization failed");
+ if (node->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
+ esw_qos_destroy_vports_tc_nodes(node, extack);
- return err;
+ trace_mlx5_esw_node_qos_destroy(esw->dev, node, node->ix);
+ esw_qos_destroy_node(node, extack);
+ esw_qos_normalize_min_rate(esw, NULL, extack);
}
static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
@@ -625,51 +773,22 @@ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
return -EOPNOTSUPP;
- err = esw_qos_create_node_sched_elem(esw->dev, 0, &esw->qos.root_tsar_ix);
+ err = esw_qos_create_node_sched_elem(esw->dev, 0, 0, 0,
+ &esw->qos.root_tsar_ix);
if (err) {
esw_warn(dev, "E-Switch create root TSAR failed (%d)\n", err);
return err;
}
- if (MLX5_CAP_QOS(dev, log_esw_max_sched_depth)) {
- esw->qos.node0 = __esw_qos_create_vports_sched_node(esw, NULL, extack);
- } else {
- /* The eswitch doesn't support scheduling nodes.
- * Create a software-only node0 using the root TSAR to attach vport QoS to.
- */
- if (!__esw_qos_alloc_node(esw,
- esw->qos.root_tsar_ix,
- SCHED_NODE_TYPE_VPORTS_TSAR,
- NULL))
- esw->qos.node0 = ERR_PTR(-ENOMEM);
- }
- if (IS_ERR(esw->qos.node0)) {
- err = PTR_ERR(esw->qos.node0);
- esw_warn(dev, "E-Switch create rate node 0 failed (%d)\n", err);
- goto err_node0;
- }
refcount_set(&esw->qos.refcnt, 1);
return 0;
-
-err_node0:
- if (mlx5_destroy_scheduling_element_cmd(esw->dev, SCHEDULING_HIERARCHY_E_SWITCH,
- esw->qos.root_tsar_ix))
- esw_warn(esw->dev, "E-Switch destroy root TSAR failed.\n");
-
- return err;
}
static void esw_qos_destroy(struct mlx5_eswitch *esw)
{
int err;
- if (esw->qos.node0->ix != esw->qos.root_tsar_ix)
- __esw_qos_destroy_node(esw->qos.node0, NULL);
- else
- __esw_qos_free_node(esw->qos.node0);
- esw->qos.node0 = NULL;
-
err = mlx5_destroy_scheduling_element_cmd(esw->dev,
SCHEDULING_HIERARCHY_E_SWITCH,
esw->qos.root_tsar_ix);
@@ -699,97 +818,383 @@ static void esw_qos_put(struct mlx5_eswitch *esw)
esw_qos_destroy(esw);
}
-static int esw_qos_vport_enable(struct mlx5_vport *vport,
- u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack)
+static void
+esw_qos_tc_arbiter_scheduling_teardown(struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack)
{
- struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
- u32 sched_elem_ix;
- int err;
+ /* Clean up all Vports TC nodes within the TC arbiter node. */
+ esw_qos_destroy_vports_tc_nodes(node, extack);
+ /* Destroy the scheduling element for the TC arbiter node itself. */
+ esw_qos_node_destroy_sched_element(node, extack);
+}
- esw_assert_qos_lock_held(esw);
- if (vport->qos.sched_node)
- return 0;
+static int esw_qos_tc_arbiter_scheduling_setup(struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack)
+{
+ u32 curr_ix = node->ix;
+ int err;
- err = esw_qos_get(esw, extack);
+ err = esw_qos_create_tc_arbiter_sched_elem(node, extack);
if (err)
return err;
+ /* Initialize the vports TC nodes within created TC arbiter TSAR. */
+ err = esw_qos_create_vports_tc_nodes(node, extack);
+ if (err)
+ goto err_vports_tc_nodes;
- err = esw_qos_vport_create_sched_element(vport, esw->qos.node0, max_rate, bw_share,
- &sched_elem_ix);
+ node->type = SCHED_NODE_TYPE_TC_ARBITER_TSAR;
+
+ return 0;
+
+err_vports_tc_nodes:
+ /* If initialization fails, clean up the scheduling element
+ * for the TC arbiter node.
+ */
+ esw_qos_node_destroy_sched_element(node, NULL);
+ node->ix = curr_ix;
+ return err;
+}
+
+static int
+esw_qos_create_vport_tc_sched_node(struct mlx5_vport *vport,
+ u32 rate_limit_elem_ix,
+ struct mlx5_esw_sched_node *vports_tc_node,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+ struct mlx5_esw_sched_node *vport_tc_node;
+ u8 tc = vports_tc_node->tc;
+ int err;
+
+ vport_tc_node = __esw_qos_alloc_node(vport_node->esw, 0,
+ SCHED_NODE_TYPE_VPORT_TC,
+ vports_tc_node);
+ if (!vport_tc_node)
+ return -ENOMEM;
+
+ vport_tc_node->min_rate = vport_node->min_rate;
+ vport_tc_node->tc = tc;
+ vport_tc_node->vport = vport;
+ err = esw_qos_vport_tc_create_sched_element(vport_tc_node,
+ rate_limit_elem_ix,
+ extack);
if (err)
goto err_out;
- vport->qos.sched_node = __esw_qos_alloc_node(esw, sched_elem_ix, SCHED_NODE_TYPE_VPORT,
- esw->qos.node0);
- if (!vport->qos.sched_node) {
- err = -ENOMEM;
- goto err_alloc;
+ vport->qos.sched_nodes[tc] = vport_tc_node;
+
+ return 0;
+err_out:
+ __esw_qos_free_node(vport_tc_node);
+ return err;
+}
+
+static void
+esw_qos_destroy_vport_tc_sched_elements(struct mlx5_vport *vport,
+ struct netlink_ext_ack *extack)
+{
+ int i, num_tcs = esw_qos_num_tcs(vport->qos.sched_node->esw->dev);
+
+ for (i = 0; i < num_tcs; i++) {
+ if (vport->qos.sched_nodes[i]) {
+ __esw_qos_destroy_node(vport->qos.sched_nodes[i],
+ extack);
+ }
}
- vport->qos.sched_node->vport = vport;
+ kfree(vport->qos.sched_nodes);
+ vport->qos.sched_nodes = NULL;
+}
+
+static int
+esw_qos_create_vport_tc_sched_elements(struct mlx5_vport *vport,
+ enum sched_node_type type,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+ struct mlx5_esw_sched_node *tc_arbiter_node, *vports_tc_node;
+ int err, num_tcs = esw_qos_num_tcs(vport_node->esw->dev);
+ u32 rate_limit_elem_ix;
+
+ vport->qos.sched_nodes = kcalloc(num_tcs,
+ sizeof(struct mlx5_esw_sched_node *),
+ GFP_KERNEL);
+ if (!vport->qos.sched_nodes) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Allocating the vport TC scheduling elements failed.");
+ return -ENOMEM;
+ }
- trace_mlx5_esw_vport_qos_create(vport->dev, vport, bw_share, max_rate);
+ rate_limit_elem_ix = type == SCHED_NODE_TYPE_RATE_LIMITER ?
+ vport_node->ix : 0;
+ tc_arbiter_node = type == SCHED_NODE_TYPE_RATE_LIMITER ?
+ vport_node->parent : vport_node;
+ list_for_each_entry(vports_tc_node, &tc_arbiter_node->children, entry) {
+ err = esw_qos_create_vport_tc_sched_node(vport,
+ rate_limit_elem_ix,
+ vports_tc_node,
+ extack);
+ if (err)
+ goto err_create_vport_tc;
+ }
return 0;
-err_alloc:
- if (mlx5_destroy_scheduling_element_cmd(esw->dev,
- SCHEDULING_HIERARCHY_E_SWITCH, sched_elem_ix))
- esw_warn(esw->dev, "E-Switch destroy vport scheduling element failed.\n");
+err_create_vport_tc:
+ esw_qos_destroy_vport_tc_sched_elements(vport, NULL);
+
+ return err;
+}
+
+static int
+esw_qos_vport_tc_enable(struct mlx5_vport *vport, enum sched_node_type type,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+ struct mlx5_esw_sched_node *parent = vport_node->parent;
+ int err;
+
+ if (type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) {
+ int new_level, max_level;
+
+ /* Increase the parent's level by 2 to account for both the
+ * TC arbiter and the vports TC scheduling element.
+ */
+ new_level = (parent ? parent->level : 2) + 2;
+ max_level = 1 << MLX5_CAP_QOS(vport_node->esw->dev,
+ log_esw_max_sched_depth);
+ if (new_level > max_level) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "TC arbitration on leafs is not supported beyond max depth %d",
+ max_level);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ esw_assert_qos_lock_held(vport->dev->priv.eswitch);
+
+ if (type == SCHED_NODE_TYPE_RATE_LIMITER)
+ err = esw_qos_create_rate_limit_element(vport_node, extack);
+ else
+ err = esw_qos_tc_arbiter_scheduling_setup(vport_node, extack);
+ if (err)
+ return err;
+
+ /* Rate limiters impact multiple nodes not directly connected to them
+ * and are not direct members of the QoS hierarchy.
+ * Unlink it from the parent to reflect that.
+ */
+ if (type == SCHED_NODE_TYPE_RATE_LIMITER) {
+ list_del_init(&vport_node->entry);
+ vport_node->level = 0;
+ }
+
+ err = esw_qos_create_vport_tc_sched_elements(vport, type, extack);
+ if (err)
+ goto err_sched_nodes;
+
+ return 0;
+
+err_sched_nodes:
+ if (type == SCHED_NODE_TYPE_RATE_LIMITER) {
+ esw_qos_node_destroy_sched_element(vport_node, NULL);
+ esw_qos_node_attach_to_parent(vport_node);
+ } else {
+ esw_qos_tc_arbiter_scheduling_teardown(vport_node, NULL);
+ }
+ return err;
+}
+
+static void esw_qos_vport_tc_disable(struct mlx5_vport *vport,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+ enum sched_node_type curr_type = vport_node->type;
+
+ esw_qos_destroy_vport_tc_sched_elements(vport, extack);
+
+ if (curr_type == SCHED_NODE_TYPE_RATE_LIMITER)
+ esw_qos_node_destroy_sched_element(vport_node, extack);
+ else
+ esw_qos_tc_arbiter_scheduling_teardown(vport_node, extack);
+}
+
+static int esw_qos_set_vport_tcs_min_rate(struct mlx5_vport *vport,
+ u32 min_rate,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+ int err, i, num_tcs = esw_qos_num_tcs(vport_node->esw->dev);
+
+ for (i = 0; i < num_tcs; i++) {
+ err = esw_qos_set_node_min_rate(vport->qos.sched_nodes[i],
+ min_rate, extack);
+ if (err)
+ goto err_out;
+ }
+ vport_node->min_rate = min_rate;
+
+ return 0;
err_out:
- esw_qos_put(esw);
+ for (--i; i >= 0; i--) {
+ esw_qos_set_node_min_rate(vport->qos.sched_nodes[i],
+ vport_node->min_rate, extack);
+ }
+ return err;
+}
+
+static void esw_qos_vport_disable(struct mlx5_vport *vport, struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+ enum sched_node_type curr_type = vport_node->type;
+
+ if (curr_type == SCHED_NODE_TYPE_VPORT)
+ esw_qos_node_destroy_sched_element(vport_node, extack);
+ else
+ esw_qos_vport_tc_disable(vport, extack);
+
+ vport_node->bw_share = 0;
+ memset(vport_node->tc_bw, 0, sizeof(vport_node->tc_bw));
+ list_del_init(&vport_node->entry);
+ esw_qos_normalize_min_rate(vport_node->esw, vport_node->parent, extack);
+
+ trace_mlx5_esw_vport_qos_destroy(vport_node->esw->dev, vport);
+}
+
+static int esw_qos_vport_enable(struct mlx5_vport *vport,
+ enum sched_node_type type,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+ int err;
+
+ esw_assert_qos_lock_held(vport->dev->priv.eswitch);
+
+ esw_qos_node_set_parent(vport_node, parent);
+ if (type == SCHED_NODE_TYPE_VPORT)
+ err = esw_qos_vport_create_sched_element(vport_node, extack);
+ else
+ err = esw_qos_vport_tc_enable(vport, type, extack);
+ if (err)
+ return err;
+
+ vport_node->type = type;
+ esw_qos_normalize_min_rate(vport_node->esw, parent, extack);
+ trace_mlx5_esw_vport_qos_create(vport->dev, vport, vport_node->max_rate,
+ vport_node->bw_share);
+
+ return 0;
+}
+
+static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_type type,
+ struct mlx5_esw_sched_node *parent, u32 max_rate,
+ u32 min_rate, struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+ struct mlx5_esw_sched_node *sched_node;
+ struct mlx5_eswitch *parent_esw;
+ int err;
+
+ esw_assert_qos_lock_held(esw);
+ err = esw_qos_get(esw, extack);
+ if (err)
+ return err;
+
+ parent_esw = parent ? parent->esw : esw;
+ sched_node = __esw_qos_alloc_node(parent_esw, 0, type, parent);
+ if (!sched_node) {
+ esw_qos_put(esw);
+ return -ENOMEM;
+ }
+ if (!parent)
+ list_add_tail(&sched_node->entry, &esw->qos.domain->nodes);
+
+ sched_node->max_rate = max_rate;
+ sched_node->min_rate = min_rate;
+ sched_node->vport = vport;
+ vport->qos.sched_node = sched_node;
+ err = esw_qos_vport_enable(vport, type, parent, extack);
+ if (err) {
+ __esw_qos_free_node(sched_node);
+ esw_qos_put(esw);
+ vport->qos.sched_node = NULL;
+ }
return err;
}
+static void mlx5_esw_qos_vport_disable_locked(struct mlx5_vport *vport)
+{
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+
+ esw_assert_qos_lock_held(esw);
+ if (!vport->qos.sched_node)
+ return;
+
+ esw_qos_vport_disable(vport, NULL);
+ mlx5_esw_qos_vport_qos_free(vport);
+ esw_qos_put(esw);
+}
+
void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport)
{
struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
- struct mlx5_esw_sched_node *vport_node;
- struct mlx5_core_dev *dev;
- int err;
+ struct mlx5_esw_sched_node *parent;
lockdep_assert_held(&esw->state_lock);
esw_qos_lock(esw);
- vport_node = vport->qos.sched_node;
- if (!vport_node)
+ if (!vport->qos.sched_node)
goto unlock;
- WARN(vport_node->parent != esw->qos.node0,
- "Disabling QoS on port before detaching it from node");
- dev = vport_node->esw->dev;
- trace_mlx5_esw_vport_qos_destroy(dev, vport);
+ parent = vport->qos.sched_node->parent;
+ WARN(parent, "Disabling QoS on port before detaching it from node");
- err = mlx5_destroy_scheduling_element_cmd(dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- vport_node->ix);
- if (err)
- esw_warn(dev,
- "E-Switch destroy vport scheduling element failed (vport=%d,err=%d)\n",
- vport->vport, err);
-
- __esw_qos_free_node(vport_node);
- memset(&vport->qos, 0, sizeof(vport->qos));
-
- esw_qos_put(esw);
+ mlx5_esw_qos_vport_disable_locked(vport);
unlock:
esw_qos_unlock(esw);
}
+static int mlx5_esw_qos_set_vport_max_rate(struct mlx5_vport *vport, u32 max_rate,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+
+ esw_assert_qos_lock_held(vport->dev->priv.eswitch);
+
+ if (!vport_node)
+ return mlx5_esw_qos_vport_enable(vport, SCHED_NODE_TYPE_VPORT, NULL, max_rate, 0,
+ extack);
+ else
+ return esw_qos_sched_elem_config(vport_node, max_rate, vport_node->bw_share,
+ extack);
+}
+
+static int mlx5_esw_qos_set_vport_min_rate(struct mlx5_vport *vport, u32 min_rate,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+
+ esw_assert_qos_lock_held(vport->dev->priv.eswitch);
+
+ if (!vport_node)
+ return mlx5_esw_qos_vport_enable(vport, SCHED_NODE_TYPE_VPORT, NULL, 0, min_rate,
+ extack);
+ else if (vport_node->type == SCHED_NODE_TYPE_RATE_LIMITER)
+ return esw_qos_set_vport_tcs_min_rate(vport, min_rate, extack);
+ else
+ return esw_qos_set_node_min_rate(vport_node, min_rate, extack);
+}
+
int mlx5_esw_qos_set_vport_rate(struct mlx5_vport *vport, u32 max_rate, u32 min_rate)
{
struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
int err;
esw_qos_lock(esw);
- err = esw_qos_vport_enable(vport, 0, 0, NULL);
- if (err)
- goto unlock;
-
- err = esw_qos_set_vport_min_rate(vport, min_rate, NULL);
+ err = mlx5_esw_qos_set_vport_min_rate(vport, min_rate, NULL);
if (!err)
- err = esw_qos_set_vport_max_rate(vport, max_rate, NULL);
-unlock:
+ err = mlx5_esw_qos_set_vport_max_rate(vport, max_rate, NULL);
esw_qos_unlock(esw);
return err;
}
@@ -809,6 +1214,282 @@ bool mlx5_esw_qos_get_vport_rate(struct mlx5_vport *vport, u32 *max_rate, u32 *m
return enabled;
}
+static int esw_qos_vport_tc_check_type(enum sched_node_type curr_type,
+ enum sched_node_type new_type,
+ struct netlink_ext_ack *extack)
+{
+ if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR &&
+ new_type == SCHED_NODE_TYPE_RATE_LIMITER) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot switch from vport-level TC arbitration to node-level TC arbitration");
+ return -EOPNOTSUPP;
+ }
+
+ if (curr_type == SCHED_NODE_TYPE_RATE_LIMITER &&
+ new_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot switch from node-level TC arbitration to vport-level TC arbitration");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int esw_qos_vport_update(struct mlx5_vport *vport,
+ enum sched_node_type type,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+ struct mlx5_esw_sched_node *curr_parent = vport_node->parent;
+ enum sched_node_type curr_type = vport_node->type;
+ u32 curr_tc_bw[DEVLINK_RATE_TCS_MAX] = {0};
+ int err;
+
+ esw_assert_qos_lock_held(vport->dev->priv.eswitch);
+ if (curr_type == type && curr_parent == parent)
+ return 0;
+
+ err = esw_qos_vport_tc_check_type(curr_type, type, extack);
+ if (err)
+ return err;
+
+ if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type)
+ esw_qos_tc_arbiter_get_bw_shares(vport_node, curr_tc_bw);
+
+ esw_qos_vport_disable(vport, extack);
+
+ err = esw_qos_vport_enable(vport, type, parent, extack);
+ if (err) {
+ esw_qos_vport_enable(vport, curr_type, curr_parent, NULL);
+ extack = NULL;
+ }
+
+ if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type) {
+ esw_qos_set_tc_arbiter_bw_shares(vport_node, curr_tc_bw,
+ extack);
+ }
+
+ return err;
+}
+
+static int esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+ struct mlx5_esw_sched_node *curr_parent;
+ enum sched_node_type type;
+
+ esw_assert_qos_lock_held(esw);
+ curr_parent = vport->qos.sched_node->parent;
+ if (curr_parent == parent)
+ return 0;
+
+ /* Set vport QoS type based on parent node type if different from
+ * default QoS; otherwise, use the vport's current QoS type.
+ */
+ if (parent && parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
+ type = SCHED_NODE_TYPE_RATE_LIMITER;
+ else if (curr_parent &&
+ curr_parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
+ type = SCHED_NODE_TYPE_VPORT;
+ else
+ type = vport->qos.sched_node->type;
+
+ return esw_qos_vport_update(vport, type, parent, extack);
+}
+
+static void
+esw_qos_switch_vport_tcs_to_vport(struct mlx5_esw_sched_node *tc_arbiter_node,
+ struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vports_tc_node, *vport_tc_node, *tmp;
+
+ vports_tc_node = list_first_entry(&tc_arbiter_node->children,
+ struct mlx5_esw_sched_node,
+ entry);
+
+ list_for_each_entry_safe(vport_tc_node, tmp, &vports_tc_node->children,
+ entry)
+ esw_qos_vport_update_parent(vport_tc_node->vport, node, extack);
+}
+
+static int esw_qos_switch_tc_arbiter_node_to_vports(
+ struct mlx5_esw_sched_node *tc_arbiter_node,
+ struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack)
+{
+ u32 parent_tsar_ix = node->parent ?
+ node->parent->ix : node->esw->qos.root_tsar_ix;
+ int err;
+
+ err = esw_qos_create_node_sched_elem(node->esw->dev, parent_tsar_ix,
+ node->max_rate, node->bw_share,
+ &node->ix);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to create scheduling element for vports node when disabling vports TC QoS");
+ return err;
+ }
+
+ node->type = SCHED_NODE_TYPE_VPORTS_TSAR;
+
+ /* Disable TC QoS for vports in the arbiter node. */
+ esw_qos_switch_vport_tcs_to_vport(tc_arbiter_node, node, extack);
+
+ return 0;
+}
+
+static int esw_qos_switch_vports_node_to_tc_arbiter(
+ struct mlx5_esw_sched_node *node,
+ struct mlx5_esw_sched_node *tc_arbiter_node,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vport_node, *tmp;
+ struct mlx5_vport *vport;
+ int err;
+
+ /* Enable TC QoS for each vport in the node. */
+ list_for_each_entry_safe(vport_node, tmp, &node->children, entry) {
+ vport = vport_node->vport;
+ err = esw_qos_vport_update_parent(vport, tc_arbiter_node,
+ extack);
+ if (err)
+ goto err_out;
+ }
+
+ /* Destroy the current vports node TSAR. */
+ err = mlx5_destroy_scheduling_element_cmd(node->esw->dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ node->ix);
+ if (err)
+ goto err_out;
+
+ return 0;
+err_out:
+ /* Restore vports back into the node if an error occurs. */
+ esw_qos_switch_vport_tcs_to_vport(tc_arbiter_node, node, NULL);
+
+ return err;
+}
+
+static struct mlx5_esw_sched_node *
+esw_qos_move_node(struct mlx5_esw_sched_node *curr_node)
+{
+ struct mlx5_esw_sched_node *new_node;
+
+ new_node = __esw_qos_alloc_node(curr_node->esw, curr_node->ix,
+ curr_node->type, NULL);
+ if (!new_node)
+ return ERR_PTR(-ENOMEM);
+
+ esw_qos_nodes_set_parent(&curr_node->children, new_node);
+ return new_node;
+}
+
+static int esw_qos_node_disable_tc_arbitration(struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *curr_node;
+ int err;
+
+ if (node->type != SCHED_NODE_TYPE_TC_ARBITER_TSAR)
+ return 0;
+
+ /* Allocate a new rate node to hold the current state, which will allow
+ * for restoring the vports back to this node after disabling TC
+ * arbitration.
+ */
+ curr_node = esw_qos_move_node(node);
+ if (IS_ERR(curr_node)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed setting up vports node");
+ return PTR_ERR(curr_node);
+ }
+
+ /* Disable TC QoS for all vports, and assign them back to the node. */
+ err = esw_qos_switch_tc_arbiter_node_to_vports(curr_node, node, extack);
+ if (err)
+ goto err_out;
+
+ /* Clean up the TC arbiter node after disabling TC QoS for vports. */
+ esw_qos_tc_arbiter_scheduling_teardown(curr_node, extack);
+ goto out;
+err_out:
+ esw_qos_nodes_set_parent(&curr_node->children, node);
+out:
+ __esw_qos_free_node(curr_node);
+ return err;
+}
+
+static int esw_qos_node_enable_tc_arbitration(struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *curr_node, *child;
+ int err, new_level, max_level;
+
+ if (node->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
+ return 0;
+
+ /* Increase the hierarchy level by one to account for the additional
+ * vports TC scheduling node, and verify that the new level does not
+ * exceed the maximum allowed depth.
+ */
+ new_level = node->level + 1;
+ max_level = 1 << MLX5_CAP_QOS(node->esw->dev, log_esw_max_sched_depth);
+ if (new_level > max_level) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "TC arbitration on nodes is not supported beyond max depth %d",
+ max_level);
+ return -EOPNOTSUPP;
+ }
+
+ /* Ensure the node does not contain non-leaf children before assigning
+ * TC bandwidth.
+ */
+ if (!list_empty(&node->children)) {
+ list_for_each_entry(child, &node->children, entry) {
+ if (!child->vport) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot configure TC bandwidth on a node with non-leaf children");
+ return -EOPNOTSUPP;
+ }
+ }
+ }
+
+ /* Allocate a new node that will store the information of the current
+ * node. This will be used later to restore the node if necessary.
+ */
+ curr_node = esw_qos_move_node(node);
+ if (IS_ERR(curr_node)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed setting up node TC QoS");
+ return PTR_ERR(curr_node);
+ }
+
+ /* Initialize the TC arbiter node for QoS management.
+ * This step prepares the node for handling Traffic Class arbitration.
+ */
+ err = esw_qos_tc_arbiter_scheduling_setup(node, extack);
+ if (err)
+ goto err_setup;
+
+ /* Enable TC QoS for each vport within the current node. */
+ err = esw_qos_switch_vports_node_to_tc_arbiter(curr_node, node, extack);
+ if (err)
+ goto err_switch_vports;
+ goto out;
+
+err_switch_vports:
+ esw_qos_tc_arbiter_scheduling_teardown(node, NULL);
+ node->ix = curr_node->ix;
+ node->type = curr_node->type;
+err_setup:
+ esw_qos_nodes_set_parent(&curr_node->children, node);
+out:
+ __esw_qos_free_node(curr_node);
+ return err;
+}
+
static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev)
{
struct ethtool_link_ksettings lksettings;
@@ -829,6 +1510,7 @@ static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev)
speed = lksettings.base.speed;
out:
+ mlx5_uplink_netdev_put(mdev, slave);
return speed;
}
@@ -875,10 +1557,8 @@ static int mlx5_esw_qos_link_speed_verify(struct mlx5_core_dev *mdev,
int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps)
{
- u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_vport *vport;
u32 link_speed_max;
- u32 bitmask;
int err;
vport = mlx5_eswitch_get_vport(esw, vport_num);
@@ -897,20 +1577,7 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32
}
esw_qos_lock(esw);
- if (!vport->qos.sched_node) {
- /* Eswitch QoS wasn't enabled yet. Enable it and vport QoS. */
- err = esw_qos_vport_enable(vport, rate_mbps, 0, NULL);
- } else {
- struct mlx5_core_dev *dev = vport->qos.sched_node->parent->esw->dev;
-
- MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
- bitmask = MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
- err = mlx5_modify_scheduling_element_cmd(dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- ctx,
- vport->qos.sched_node->ix,
- bitmask);
- }
+ err = mlx5_esw_qos_set_vport_max_rate(vport, rate_mbps, NULL);
esw_qos_unlock(esw);
return err;
@@ -949,8 +1616,62 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *
return 0;
}
+static bool esw_qos_validate_unsupported_tc_bw(struct mlx5_eswitch *esw,
+ u32 *tc_bw)
+{
+ int i, num_tcs = esw_qos_num_tcs(esw->dev);
+
+ for (i = num_tcs; i < DEVLINK_RATE_TCS_MAX; i++) {
+ if (tc_bw[i])
+ return false;
+ }
+
+ return true;
+}
+
+static bool esw_qos_vport_validate_unsupported_tc_bw(struct mlx5_vport *vport,
+ u32 *tc_bw)
+{
+ struct mlx5_esw_sched_node *node = vport->qos.sched_node;
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+
+ esw = (node && node->parent) ? node->parent->esw : esw;
+
+ return esw_qos_validate_unsupported_tc_bw(esw, tc_bw);
+}
+
+static bool esw_qos_tc_bw_disabled(u32 *tc_bw)
+{
+ int i;
+
+ for (i = 0; i < DEVLINK_RATE_TCS_MAX; i++) {
+ if (tc_bw[i])
+ return false;
+ }
+
+ return true;
+}
+
+static void esw_vport_qos_prune_empty(struct mlx5_vport *vport)
+{
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+
+ esw_assert_qos_lock_held(vport->dev->priv.eswitch);
+ if (!vport_node)
+ return;
+
+ if (vport_node->parent || vport_node->max_rate ||
+ vport_node->min_rate || !esw_qos_tc_bw_disabled(vport_node->tc_bw))
+ return;
+
+ mlx5_esw_qos_vport_disable_locked(vport);
+}
+
int mlx5_esw_qos_init(struct mlx5_eswitch *esw)
{
+ if (esw->qos.domain)
+ return 0; /* Nothing to change. */
+
return esw_qos_domain_init(esw);
}
@@ -978,12 +1699,11 @@ int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void
return err;
esw_qos_lock(esw);
- err = esw_qos_vport_enable(vport, 0, 0, extack);
+ err = mlx5_esw_qos_set_vport_min_rate(vport, tx_share, extack);
if (err)
- goto unlock;
-
- err = esw_qos_set_vport_min_rate(vport, tx_share, extack);
-unlock:
+ goto out;
+ esw_vport_qos_prune_empty(vport);
+out:
esw_qos_unlock(esw);
return err;
}
@@ -1004,11 +1724,95 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *
return err;
esw_qos_lock(esw);
- err = esw_qos_vport_enable(vport, 0, 0, extack);
+ err = mlx5_esw_qos_set_vport_max_rate(vport, tx_max, extack);
if (err)
+ goto out;
+ esw_vport_qos_prune_empty(vport);
+out:
+ esw_qos_unlock(esw);
+ return err;
+}
+
+int mlx5_esw_devlink_rate_leaf_tc_bw_set(struct devlink_rate *rate_leaf,
+ void *priv,
+ u32 *tc_bw,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vport_node;
+ struct mlx5_vport *vport = priv;
+ struct mlx5_eswitch *esw;
+ bool disable;
+ int err = 0;
+
+ esw = vport->dev->priv.eswitch;
+ if (!mlx5_esw_allowed(esw))
+ return -EPERM;
+
+ disable = esw_qos_tc_bw_disabled(tc_bw);
+ esw_qos_lock(esw);
+
+ if (!esw_qos_vport_validate_unsupported_tc_bw(vport, tc_bw)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "E-Switch traffic classes number is not supported");
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ vport_node = vport->qos.sched_node;
+ if (disable && !vport_node)
+ goto unlock;
+
+ if (disable) {
+ if (vport_node->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
+ err = esw_qos_vport_update(vport, SCHED_NODE_TYPE_VPORT,
+ vport_node->parent, extack);
+ esw_vport_qos_prune_empty(vport);
+ goto unlock;
+ }
+
+ if (!vport_node) {
+ err = mlx5_esw_qos_vport_enable(vport,
+ SCHED_NODE_TYPE_TC_ARBITER_TSAR,
+ NULL, 0, 0, extack);
+ vport_node = vport->qos.sched_node;
+ } else {
+ err = esw_qos_vport_update(vport,
+ SCHED_NODE_TYPE_TC_ARBITER_TSAR,
+ vport_node->parent, extack);
+ }
+ if (!err)
+ esw_qos_set_tc_arbiter_bw_shares(vport_node, tc_bw, extack);
+unlock:
+ esw_qos_unlock(esw);
+ return err;
+}
+
+int mlx5_esw_devlink_rate_node_tc_bw_set(struct devlink_rate *rate_node,
+ void *priv,
+ u32 *tc_bw,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *node = priv;
+ struct mlx5_eswitch *esw = node->esw;
+ bool disable;
+ int err;
+
+ if (!esw_qos_validate_unsupported_tc_bw(esw, tc_bw)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "E-Switch traffic classes number is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ disable = esw_qos_tc_bw_disabled(tc_bw);
+ esw_qos_lock(esw);
+ if (disable) {
+ err = esw_qos_node_disable_tc_arbitration(node, extack);
goto unlock;
+ }
- err = esw_qos_set_vport_max_rate(vport, tx_max, extack);
+ err = esw_qos_node_enable_tc_arbitration(node, extack);
+ if (!err)
+ esw_qos_set_tc_arbiter_bw_shares(node, tc_bw, extack);
unlock:
esw_qos_unlock(esw);
return err;
@@ -1043,7 +1847,7 @@ int mlx5_esw_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void *
return err;
esw_qos_lock(esw);
- err = esw_qos_set_node_max_rate(node, tx_max, extack);
+ err = esw_qos_sched_elem_config(node, tx_max, node->bw_share, extack);
esw_qos_unlock(esw);
return err;
}
@@ -1084,50 +1888,220 @@ int mlx5_esw_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv,
{
struct mlx5_esw_sched_node *node = priv;
struct mlx5_eswitch *esw = node->esw;
- int err;
esw_qos_lock(esw);
- err = __esw_qos_destroy_node(node, extack);
+ __esw_qos_destroy_node(node, extack);
esw_qos_put(esw);
esw_qos_unlock(esw);
- return err;
+ return 0;
}
-int mlx5_esw_qos_vport_update_node(struct mlx5_vport *vport,
- struct mlx5_esw_sched_node *node,
- struct netlink_ext_ack *extack)
+int mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
int err = 0;
- if (node && node->esw != esw) {
+ if (parent && parent->esw != esw) {
NL_SET_ERR_MSG_MOD(extack, "Cross E-Switch scheduling is not supported");
return -EOPNOTSUPP;
}
esw_qos_lock(esw);
- if (!vport->qos.sched_node && !node)
- goto unlock;
-
- err = esw_qos_vport_enable(vport, 0, 0, extack);
- if (!err)
- err = esw_qos_vport_update_node(vport, node, extack);
-unlock:
+ if (!vport->qos.sched_node && parent) {
+ enum sched_node_type type;
+
+ type = parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR ?
+ SCHED_NODE_TYPE_RATE_LIMITER : SCHED_NODE_TYPE_VPORT;
+ err = mlx5_esw_qos_vport_enable(vport, type, parent, 0, 0,
+ extack);
+ } else if (vport->qos.sched_node) {
+ err = esw_qos_vport_update_parent(vport, parent, extack);
+ }
esw_qos_unlock(esw);
return err;
}
-int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate,
- struct devlink_rate *parent,
- void *priv, void *parent_priv,
- struct netlink_ext_ack *extack)
+int mlx5_esw_devlink_rate_leaf_parent_set(struct devlink_rate *devlink_rate,
+ struct devlink_rate *parent,
+ void *priv, void *parent_priv,
+ struct netlink_ext_ack *extack)
{
- struct mlx5_esw_sched_node *node;
+ struct mlx5_esw_sched_node *node = parent ? parent_priv : NULL;
struct mlx5_vport *vport = priv;
+ int err;
+
+ err = mlx5_esw_qos_vport_update_parent(vport, node, extack);
+ if (!err) {
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+
+ esw_qos_lock(esw);
+ esw_vport_qos_prune_empty(vport);
+ esw_qos_unlock(esw);
+ }
+
+ return err;
+}
+
+static bool esw_qos_is_node_empty(struct mlx5_esw_sched_node *node)
+{
+ if (list_empty(&node->children))
+ return true;
+
+ if (node->type != SCHED_NODE_TYPE_TC_ARBITER_TSAR)
+ return false;
+
+ node = list_first_entry(&node->children, struct mlx5_esw_sched_node,
+ entry);
+
+ return esw_qos_is_node_empty(node);
+}
+
+static int
+mlx5_esw_qos_node_validate_set_parent(struct mlx5_esw_sched_node *node,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ u8 new_level, max_level;
+
+ if (parent && parent->esw != node->esw) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot assign node to another E-Switch");
+ return -EOPNOTSUPP;
+ }
+
+ if (!esw_qos_is_node_empty(node)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot reassign a node that contains rate objects");
+ return -EOPNOTSUPP;
+ }
+
+ if (parent && parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot attach a node to a parent with TC bandwidth configured");
+ return -EOPNOTSUPP;
+ }
+
+ new_level = parent ? parent->level + 1 : 2;
+ if (node->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) {
+ /* Increase by one to account for the vports TC scheduling
+ * element.
+ */
+ new_level += 1;
+ }
+
+ max_level = 1 << MLX5_CAP_QOS(node->esw->dev, log_esw_max_sched_depth);
+ if (new_level > max_level) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Node hierarchy depth %d exceeds the maximum supported level %d",
+ new_level, max_level);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+esw_qos_tc_arbiter_node_update_parent(struct mlx5_esw_sched_node *node,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *curr_parent = node->parent;
+ u32 curr_tc_bw[DEVLINK_RATE_TCS_MAX] = {0};
+ struct mlx5_eswitch *esw = node->esw;
+ int err;
+
+ esw_qos_tc_arbiter_get_bw_shares(node, curr_tc_bw);
+ esw_qos_tc_arbiter_scheduling_teardown(node, extack);
+ esw_qos_node_set_parent(node, parent);
+ err = esw_qos_tc_arbiter_scheduling_setup(node, extack);
+ if (err) {
+ esw_qos_node_set_parent(node, curr_parent);
+ if (esw_qos_tc_arbiter_scheduling_setup(node, extack)) {
+ esw_warn(esw->dev, "Node restore QoS failed\n");
+ return err;
+ }
+ }
+ esw_qos_set_tc_arbiter_bw_shares(node, curr_tc_bw, extack);
+
+ return err;
+}
+
+static int esw_qos_vports_node_update_parent(struct mlx5_esw_sched_node *node,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *curr_parent = node->parent;
+ struct mlx5_eswitch *esw = node->esw;
+ u32 parent_ix;
+ int err;
+
+ parent_ix = parent ? parent->ix : node->esw->qos.root_tsar_ix;
+ mlx5_destroy_scheduling_element_cmd(esw->dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ node->ix);
+ err = esw_qos_create_node_sched_elem(esw->dev, parent_ix,
+ node->max_rate, 0, &node->ix);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to create a node under the new hierarchy.");
+ if (esw_qos_create_node_sched_elem(esw->dev, curr_parent->ix,
+ node->max_rate,
+ node->bw_share,
+ &node->ix))
+ esw_warn(esw->dev, "Node restore QoS failed\n");
+
+ return err;
+ }
+ esw_qos_node_set_parent(node, parent);
+ node->bw_share = 0;
+
+ return 0;
+}
+
+static int mlx5_esw_qos_node_update_parent(struct mlx5_esw_sched_node *node,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *curr_parent;
+ struct mlx5_eswitch *esw = node->esw;
+ int err;
+
+ err = mlx5_esw_qos_node_validate_set_parent(node, parent, extack);
+ if (err)
+ return err;
+
+ esw_qos_lock(esw);
+ curr_parent = node->parent;
+ if (node->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) {
+ err = esw_qos_tc_arbiter_node_update_parent(node, parent,
+ extack);
+ } else {
+ err = esw_qos_vports_node_update_parent(node, parent, extack);
+ }
+
+ if (err)
+ goto out;
+
+ esw_qos_normalize_min_rate(esw, curr_parent, extack);
+ esw_qos_normalize_min_rate(esw, parent, extack);
+
+out:
+ esw_qos_unlock(esw);
+
+ return err;
+}
+
+int mlx5_esw_devlink_rate_node_parent_set(struct devlink_rate *devlink_rate,
+ struct devlink_rate *parent,
+ void *priv, void *parent_priv,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *node = priv, *parent_node;
if (!parent)
- return mlx5_esw_qos_vport_update_node(vport, NULL, extack);
+ return mlx5_esw_qos_node_update_parent(node, NULL, extack);
- node = parent_priv;
- return mlx5_esw_qos_vport_update_node(vport, node, extack);
+ parent_node = parent_priv;
+ return mlx5_esw_qos_node_update_parent(node, parent_node, extack);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
index 61a6fdd5c267..0a50982b0e27 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
@@ -13,6 +13,7 @@ int mlx5_esw_qos_set_vport_rate(struct mlx5_vport *evport, u32 max_rate, u32 min
bool mlx5_esw_qos_get_vport_rate(struct mlx5_vport *vport, u32 *max_rate, u32 *min_rate);
void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport);
+void mlx5_esw_qos_vport_qos_free(struct mlx5_vport *vport);
u32 mlx5_esw_qos_vport_get_sched_elem_ix(const struct mlx5_vport *vport);
struct mlx5_esw_sched_node *mlx5_esw_qos_vport_get_parent(const struct mlx5_vport *vport);
@@ -20,6 +21,14 @@ int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void
u64 tx_share, struct netlink_ext_ack *extack);
int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *priv,
u64 tx_max, struct netlink_ext_ack *extack);
+int mlx5_esw_devlink_rate_leaf_tc_bw_set(struct devlink_rate *rate_node,
+ void *priv,
+ u32 *tc_bw,
+ struct netlink_ext_ack *extack);
+int mlx5_esw_devlink_rate_node_tc_bw_set(struct devlink_rate *rate_node,
+ void *priv,
+ u32 *tc_bw,
+ struct netlink_ext_ack *extack);
int mlx5_esw_devlink_rate_node_tx_share_set(struct devlink_rate *rate_node, void *priv,
u64 tx_share, struct netlink_ext_ack *extack);
int mlx5_esw_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void *priv,
@@ -28,10 +37,14 @@ int mlx5_esw_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv,
struct netlink_ext_ack *extack);
int mlx5_esw_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv,
struct netlink_ext_ack *extack);
-int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate,
- struct devlink_rate *parent,
- void *priv, void *parent_priv,
- struct netlink_ext_ack *extack);
+int mlx5_esw_devlink_rate_leaf_parent_set(struct devlink_rate *devlink_rate,
+ struct devlink_rate *parent,
+ void *priv, void *parent_priv,
+ struct netlink_ext_ack *extack);
+int mlx5_esw_devlink_rate_node_parent_set(struct devlink_rate *devlink_rate,
+ struct devlink_rate *parent,
+ void *priv, void *parent_priv,
+ struct netlink_ext_ack *extack);
#endif
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c
index 749c3957a128..407062096a82 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c
@@ -45,8 +45,8 @@ esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns,
ft_attr.flags = vport_ns->flags;
fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(fdb)) {
- esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n",
- PTR_ERR(fdb));
+ esw_warn(esw->dev, "Failed to create per vport FDB Table err %pe\n",
+ fdb);
}
return fdb;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 09719e9b8611..4b7a1ce7f406 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -257,8 +257,8 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule,
&flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
esw_warn(esw->dev,
- "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
- dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
+ "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%pe)\n",
+ dmac_v, dmac_c, vport, flow_rule);
flow_rule = NULL;
}
@@ -820,6 +820,7 @@ static int mlx5_esw_vport_caps_get(struct mlx5_eswitch *esw, struct mlx5_vport *
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
vport->info.roce_enabled = MLX5_GET(cmd_hca_cap, hca_caps, roce);
+ vport->vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
if (!MLX5_CAP_GEN_MAX(esw->dev, hca_cap_2))
goto out_free;
@@ -839,6 +840,18 @@ out_free:
return err;
}
+bool mlx5_esw_vport_vhca_id(struct mlx5_eswitch *esw, u16 vportn, u16 *vhca_id)
+{
+ struct mlx5_vport *vport;
+
+ vport = mlx5_eswitch_get_vport(esw, vportn);
+ if (IS_ERR(vport) || MLX5_VPORT_INVAL_VHCA_ID(vport))
+ return false;
+
+ *vhca_id = vport->vhca_id;
+ return true;
+}
+
static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
bool vst_mode_steering = esw_vst_mode_is_steering(esw);
@@ -862,13 +875,10 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
vport_num, 1,
vport->info.link_state);
- /* Host PF has its own mac/guid. */
- if (vport_num) {
- mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
- vport->info.mac);
- mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
- vport->info.node_guid);
- }
+ mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true,
+ vport->info.mac);
+ mlx5_query_nic_vport_node_guid(esw->dev, vport_num, true,
+ &vport->info.node_guid);
flags = (vport->info.vlan || vport->info.qos) ?
SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
@@ -929,17 +939,11 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
if (!mlx5_esw_is_manager_vport(esw, vport_num) &&
MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
- ret = mlx5_esw_vport_vhca_id_set(esw, vport_num);
+ ret = mlx5_esw_vport_vhca_id_map(esw, vport);
if (ret)
goto err_vhca_mapping;
}
- /* External controller host PF has factory programmed MAC.
- * Read it from the device.
- */
- if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF)
- mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac);
-
esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
@@ -973,7 +977,7 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
if (!mlx5_esw_is_manager_vport(esw, vport_num) &&
MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
- mlx5_esw_vport_vhca_id_clear(esw, vport_num);
+ mlx5_esw_vport_vhca_id_unmap(esw, vport);
if (vport->vport != MLX5_VPORT_PF &&
(vport->info.ipsec_crypto_enabled || vport->info.ipsec_packet_enabled))
@@ -1038,6 +1042,25 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
return ERR_PTR(err);
}
+static int mlx5_esw_host_functions_enabled_query(struct mlx5_eswitch *esw)
+{
+ const u32 *query_host_out;
+
+ if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
+ return 0;
+
+ query_host_out = mlx5_esw_query_functions(esw->dev);
+ if (IS_ERR(query_host_out))
+ return PTR_ERR(query_host_out);
+
+ esw->esw_funcs.host_funcs_disabled =
+ MLX5_GET(query_esw_functions_out, query_host_out,
+ host_params_context.host_pf_not_exist);
+
+ kvfree(query_host_out);
+ return 0;
+}
+
static void mlx5_eswitch_event_handler_register(struct mlx5_eswitch *esw)
{
if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
@@ -1061,8 +1084,7 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
unsigned long i;
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
- kfree(vport->qos.sched_node);
- memset(&vport->qos, 0, sizeof(vport->qos));
+ mlx5_esw_qos_vport_qos_free(vport);
memset(&vport->info, 0, sizeof(vport->info));
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
}
@@ -1074,8 +1096,7 @@ static void mlx5_eswitch_clear_ec_vf_vports_info(struct mlx5_eswitch *esw)
unsigned long i;
mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) {
- kfree(vport->qos.sched_node);
- memset(&vport->qos, 0, sizeof(vport->qos));
+ mlx5_esw_qos_vport_qos_free(vport);
memset(&vport->info, 0, sizeof(vport->info));
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
}
@@ -1187,7 +1208,8 @@ void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
unsigned long i;
mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
- if (!vport->enabled)
+ /* Adjacent VFs are unloaded separately */
+ if (!vport->enabled || vport->adjacent)
continue;
mlx5_eswitch_unload_pf_vf_vport(esw, vport->vport);
}
@@ -1206,6 +1228,42 @@ static void mlx5_eswitch_unload_ec_vf_vports(struct mlx5_eswitch *esw,
}
}
+static void mlx5_eswitch_unload_adj_vf_vports(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+
+ mlx5_esw_for_each_vf_vport(esw, i, vport, U16_MAX) {
+ if (!vport->enabled || !vport->adjacent)
+ continue;
+ mlx5_eswitch_unload_pf_vf_vport(esw, vport->vport);
+ }
+}
+
+static int
+mlx5_eswitch_load_adj_vf_vports(struct mlx5_eswitch *esw,
+ enum mlx5_eswitch_vport_event enabled_events)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+ int err;
+
+ mlx5_esw_for_each_vf_vport(esw, i, vport, U16_MAX) {
+ if (!vport->adjacent)
+ continue;
+ err = mlx5_eswitch_load_pf_vf_vport(esw, vport->vport,
+ enabled_events);
+ if (err)
+ goto unload_adj_vf_vport;
+ }
+
+ return 0;
+
+unload_adj_vf_vport:
+ mlx5_eswitch_unload_adj_vf_vports(esw);
+ return err;
+}
+
int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
enum mlx5_eswitch_vport_event enabled_events)
{
@@ -1280,29 +1338,34 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
esw->mode == MLX5_ESWITCH_LEGACY;
/* Enable PF vport */
- if (pf_needed) {
+ if (pf_needed && mlx5_esw_host_functions_enabled(esw->dev)) {
ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_PF,
enabled_events);
if (ret)
return ret;
}
- /* Enable external host PF HCA */
- ret = host_pf_enable_hca(esw->dev);
- if (ret)
- goto pf_hca_err;
+ if (mlx5_esw_host_functions_enabled(esw->dev)) {
+ /* Enable external host PF HCA */
+ ret = host_pf_enable_hca(esw->dev);
+ if (ret)
+ goto pf_hca_err;
+ }
/* Enable ECPF vport */
if (mlx5_ecpf_vport_exists(esw->dev)) {
ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_ECPF, enabled_events);
if (ret)
goto ecpf_err;
- if (mlx5_core_ec_sriov_enabled(esw->dev)) {
- ret = mlx5_eswitch_load_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs,
- enabled_events);
- if (ret)
- goto ec_vf_err;
- }
+ }
+
+ /* Enable ECVF vports */
+ if (mlx5_core_ec_sriov_enabled(esw->dev)) {
+ ret = mlx5_eswitch_load_ec_vf_vports(esw,
+ esw->esw_funcs.num_ec_vfs,
+ enabled_events);
+ if (ret)
+ goto ec_vf_err;
}
/* Enable VF vports */
@@ -1310,8 +1373,16 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enabled_events);
if (ret)
goto vf_err;
+
+ /* Enable adjacent VF vports */
+ ret = mlx5_eswitch_load_adj_vf_vports(esw, enabled_events);
+ if (ret)
+ goto unload_vf_vports;
+
return 0;
+unload_vf_vports:
+ mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
vf_err:
if (mlx5_core_ec_sriov_enabled(esw->dev))
mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs);
@@ -1319,9 +1390,10 @@ ec_vf_err:
if (mlx5_ecpf_vport_exists(esw->dev))
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
ecpf_err:
- host_pf_disable_hca(esw->dev);
+ if (mlx5_esw_host_functions_enabled(esw->dev))
+ host_pf_disable_hca(esw->dev);
pf_hca_err:
- if (pf_needed)
+ if (pf_needed && mlx5_esw_host_functions_enabled(esw->dev))
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
return ret;
}
@@ -1331,18 +1403,24 @@ pf_hca_err:
*/
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
{
+ mlx5_eswitch_unload_adj_vf_vports(esw);
+
mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
+ if (mlx5_core_ec_sriov_enabled(esw->dev))
+ mlx5_eswitch_unload_ec_vf_vports(esw,
+ esw->esw_funcs.num_ec_vfs);
+
if (mlx5_ecpf_vport_exists(esw->dev)) {
- if (mlx5_core_ec_sriov_enabled(esw->dev))
- mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_vfs);
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
}
- host_pf_disable_hca(esw->dev);
+ if (mlx5_esw_host_functions_enabled(esw->dev))
+ host_pf_disable_hca(esw->dev);
- if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
- esw->mode == MLX5_ESWITCH_LEGACY)
+ if ((mlx5_core_is_ecpf_esw_manager(esw->dev) ||
+ esw->mode == MLX5_ESWITCH_LEGACY) &&
+ mlx5_esw_host_functions_enabled(esw->dev))
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
}
@@ -1396,22 +1474,79 @@ static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode)
info.new_mode = mode;
- blocking_notifier_call_chain(&esw->n_head, 0, &info);
+ blocking_notifier_call_chain(&esw->dev->priv.esw_n_head, 0, &info);
+}
+
+static int mlx5_esw_egress_acls_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int total_vports = mlx5_eswitch_get_total_vports(dev);
+ int err;
+ int i;
+
+ for (i = 0; i < total_vports; i++) {
+ err = mlx5_fs_vport_egress_acl_ns_add(steering, i);
+ if (err)
+ goto acl_ns_remove;
+ }
+ return 0;
+
+acl_ns_remove:
+ while (i--)
+ mlx5_fs_vport_egress_acl_ns_remove(steering, i);
+ return err;
+}
+
+static void mlx5_esw_egress_acls_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int total_vports = mlx5_eswitch_get_total_vports(dev);
+ int i;
+
+ for (i = total_vports - 1; i >= 0; i--)
+ mlx5_fs_vport_egress_acl_ns_remove(steering, i);
+}
+
+static int mlx5_esw_ingress_acls_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int total_vports = mlx5_eswitch_get_total_vports(dev);
+ int err;
+ int i;
+
+ for (i = 0; i < total_vports; i++) {
+ err = mlx5_fs_vport_ingress_acl_ns_add(steering, i);
+ if (err)
+ goto acl_ns_remove;
+ }
+ return 0;
+
+acl_ns_remove:
+ while (i--)
+ mlx5_fs_vport_ingress_acl_ns_remove(steering, i);
+ return err;
+}
+
+static void mlx5_esw_ingress_acls_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int total_vports = mlx5_eswitch_get_total_vports(dev);
+ int i;
+
+ for (i = total_vports - 1; i >= 0; i--)
+ mlx5_fs_vport_ingress_acl_ns_remove(steering, i);
}
static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
{
struct mlx5_core_dev *dev = esw->dev;
- int total_vports;
int err;
if (esw->flags & MLX5_ESWITCH_VPORT_ACL_NS_CREATED)
return 0;
- total_vports = mlx5_eswitch_get_total_vports(dev);
-
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
- err = mlx5_fs_egress_acls_init(dev, total_vports);
+ err = mlx5_esw_egress_acls_init(dev);
if (err)
return err;
} else {
@@ -1419,7 +1554,7 @@ static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
}
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
- err = mlx5_fs_ingress_acls_init(dev, total_vports);
+ err = mlx5_esw_ingress_acls_init(dev);
if (err)
goto err;
} else {
@@ -1430,7 +1565,7 @@ static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
err:
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
- mlx5_fs_egress_acls_cleanup(dev);
+ mlx5_esw_egress_acls_cleanup(dev);
return err;
}
@@ -1440,9 +1575,9 @@ static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
esw->flags &= ~MLX5_ESWITCH_VPORT_ACL_NS_CREATED;
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
- mlx5_fs_ingress_acls_cleanup(dev);
+ mlx5_esw_ingress_acls_cleanup(dev);
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
- mlx5_fs_egress_acls_cleanup(dev);
+ mlx5_esw_egress_acls_cleanup(dev);
}
/**
@@ -1485,17 +1620,16 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
err = mlx5_esw_qos_init(esw);
if (err)
- goto err_qos_init;
+ goto err_esw_init;
if (esw->mode == MLX5_ESWITCH_LEGACY) {
err = esw_legacy_enable(esw);
} else {
- mlx5_rescan_drivers(esw->dev);
err = esw_offloads_enable(esw);
}
if (err)
- goto err_esw_enable;
+ goto err_esw_init;
esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED;
@@ -1509,9 +1643,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
return 0;
-err_esw_enable:
- mlx5_esw_qos_cleanup(esw);
-err_qos_init:
+err_esw_init:
mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
mlx5_esw_acls_ns_cleanup(esw);
return err;
@@ -1640,7 +1772,6 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw)
if (esw->mode == MLX5_ESWITCH_OFFLOADS)
devl_rate_nodes_destroy(devlink);
- mlx5_esw_qos_cleanup(esw);
}
void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
@@ -1675,7 +1806,8 @@ int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *
void *hca_caps;
int err;
- if (!mlx5_core_is_ecpf(dev)) {
+ if (!mlx5_core_is_ecpf(dev) ||
+ !mlx5_esw_host_functions_enabled(dev)) {
*max_sfs = 0;
return 0;
}
@@ -1697,8 +1829,7 @@ out_free:
return err;
}
-static int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw,
- int index, u16 vport_num)
+int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw, int index, u16 vport_num)
{
struct mlx5_vport *vport;
int err;
@@ -1711,6 +1842,7 @@ static int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw,
vport->vport = vport_num;
vport->index = index;
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
+ vport->vhca_id = MLX5_VHCA_ID_INVALID;
INIT_WORK(&vport->vport_change_handler, esw_vport_change_handler);
err = xa_insert(&esw->vports, vport_num, vport, GFP_KERNEL);
if (err)
@@ -1724,8 +1856,9 @@ insert_err:
return err;
}
-static void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
+ esw->total_vports--;
xa_erase(&esw->vports, vport->vport);
kfree(vport);
}
@@ -1751,21 +1884,23 @@ static int mlx5_esw_vports_init(struct mlx5_eswitch *esw)
xa_init(&esw->vports);
- err = mlx5_esw_vport_alloc(esw, idx, MLX5_VPORT_PF);
- if (err)
- goto err;
- if (esw->first_host_vport == MLX5_VPORT_PF)
- xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
- idx++;
-
- for (i = 0; i < mlx5_core_max_vfs(dev); i++) {
- err = mlx5_esw_vport_alloc(esw, idx, idx);
+ if (mlx5_esw_host_functions_enabled(dev)) {
+ err = mlx5_esw_vport_alloc(esw, idx, MLX5_VPORT_PF);
if (err)
goto err;
- xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_VF);
- xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
+ if (esw->first_host_vport == MLX5_VPORT_PF)
+ xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
idx++;
+ for (i = 0; i < mlx5_core_max_vfs(dev); i++) {
+ err = mlx5_esw_vport_alloc(esw, idx, idx);
+ if (err)
+ goto err;
+ xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_VF);
+ xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
+ idx++;
+ }
}
+
base_sf_num = mlx5_sf_start_function_id(dev);
for (i = 0; i < mlx5_sf_max_functions(dev); i++) {
err = mlx5_esw_vport_alloc(esw, idx, base_sf_num + i);
@@ -1807,6 +1942,9 @@ static int mlx5_esw_vports_init(struct mlx5_eswitch *esw)
err = mlx5_esw_vport_alloc(esw, idx, MLX5_VPORT_UPLINK);
if (err)
goto err;
+
+ /* Adjacent vports or other dynamically create vports will use this */
+ esw->last_vport_idx = ++idx;
return 0;
err:
@@ -1831,7 +1969,8 @@ static int mlx5_devlink_esw_multiport_set(struct devlink *devlink, u32 id,
}
static int mlx5_devlink_esw_multiport_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
@@ -1865,6 +2004,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto free_esw;
esw->dev = dev;
+ dev->priv.eswitch = esw;
esw->manager_vport = mlx5_eswitch_manager_vport(dev);
esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev);
@@ -1875,15 +2015,23 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto abort;
}
+ err = mlx5_esw_host_functions_enabled_query(esw);
+ if (err)
+ goto abort;
+
err = mlx5_esw_vports_init(esw);
if (err)
goto abort;
- dev->priv.eswitch = esw;
err = esw_offloads_init(esw);
if (err)
goto reps_err;
+ esw->mode = MLX5_ESWITCH_LEGACY;
+ err = mlx5_esw_qos_init(esw);
+ if (err)
+ goto reps_err;
+
mutex_init(&esw->offloads.encap_tbl_lock);
hash_init(esw->offloads.encap_tbl);
mutex_init(&esw->offloads.decap_tbl_lock);
@@ -1897,14 +2045,12 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
refcount_set(&esw->qos.refcnt, 0);
esw->enabled_vports = 0;
- esw->mode = MLX5_ESWITCH_LEGACY;
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
else
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
- BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
esw_info(dev,
"Total vports %d, per vport: max uc(%d) max mc(%d)\n",
@@ -1934,6 +2080,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw_info(esw->dev, "cleanup\n");
+ mlx5_esw_qos_cleanup(esw);
destroy_workqueue(esw->work_queue);
WARN_ON(refcount_read(&esw->qos.refcnt));
mutex_destroy(&esw->state_lock);
@@ -2079,6 +2226,9 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
ivi->vf = vport - 1;
mutex_lock(&esw->state_lock);
+
+ mlx5_query_nic_vport_mac_address(esw->dev, vport, true,
+ evport->info.mac);
ether_addr_copy(ivi->mac, evport->info.mac);
ivi->linkstate = evport->info.link_state;
ivi->vlan = evport->info.vlan;
@@ -2229,14 +2379,16 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);
}
-int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *nb)
+int mlx5_esw_event_notifier_register(struct mlx5_core_dev *dev,
+ struct notifier_block *nb)
{
- return blocking_notifier_chain_register(&esw->n_head, nb);
+ return blocking_notifier_chain_register(&dev->priv.esw_n_head, nb);
}
-void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *nb)
+void mlx5_esw_event_notifier_unregister(struct mlx5_core_dev *dev,
+ struct notifier_block *nb)
{
- blocking_notifier_chain_unregister(&esw->n_head, nb);
+ blocking_notifier_chain_unregister(&dev->priv.esw_n_head, nb);
}
/**
@@ -2406,3 +2558,11 @@ void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev)
dev->num_ipsec_offloads--;
mutex_unlock(&esw->state_lock);
}
+
+bool mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev)
+{
+ if (!dev->priv.eswitch)
+ return true;
+
+ return !dev->priv.eswitch->esw_funcs.host_funcs_disabled;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 14dd42d44e6f..ad1073f7b79f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -197,6 +197,11 @@ static inline struct mlx5_vport *mlx5_devlink_port_vport_get(struct devlink_port
return mlx5_devlink_port_get(dl_port)->vport;
}
+#define MLX5_VHCA_ID_INVALID (-1)
+
+#define MLX5_VPORT_INVAL_VHCA_ID(vport) \
+ ((vport)->vhca_id == MLX5_VHCA_ID_INVALID)
+
struct mlx5_vport {
struct mlx5_core_dev *dev;
struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE];
@@ -209,13 +214,30 @@ struct mlx5_vport {
struct vport_egress egress;
u32 default_metadata;
u32 metadata;
+ int vhca_id;
+
+ bool adjacent; /* delegated vhca from adjacent function */
+ struct {
+ u16 parent_pci_devfn; /* Adjacent parent PCI device function */
+ u16 function_id; /* Function ID of the delegated VPort */
+ } adj_info;
struct mlx5_vport_info info;
- /* Protected with the E-Switch qos domain lock. */
+ /* Protected with the E-Switch qos domain lock. The Vport QoS can
+ * either be disabled (sched_node is NULL) or in one of three states:
+ * 1. Regular QoS (sched_node is a vport node).
+ * 2. TC QoS enabled on the vport (sched_node is a TC arbiter).
+ * 3. TC QoS enabled on the vport's parent node
+ * (sched_node is a rate limit node).
+ * When TC is enabled in either mode, the vport owns vport TC scheduling
+ * nodes.
+ */
struct {
- /* Vport scheduling element node. */
+ /* Vport scheduling node. */
struct mlx5_esw_sched_node *sched_node;
+ /* Array of vport traffic class scheduling nodes. */
+ struct mlx5_esw_sched_node **sched_nodes;
} qos;
u16 vport;
@@ -242,6 +264,9 @@ struct mlx5_eswitch_fdb {
struct offloads_fdb {
struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *drop_root;
+ struct mlx5_flow_handle *drop_root_rule;
+ struct mlx5_fc *drop_root_fc;
struct mlx5_flow_table *tc_miss_table;
struct mlx5_flow_table *slow_fdb;
struct mlx5_flow_group *send_to_vport_grp;
@@ -313,6 +338,7 @@ struct mlx5_host_work {
struct mlx5_esw_functions {
struct mlx5_nb nb;
+ bool host_funcs_disabled;
u16 num_vfs;
u16 num_ec_vfs;
};
@@ -363,16 +389,13 @@ struct mlx5_eswitch {
refcount_t refcnt;
u32 root_tsar_ix;
struct mlx5_qos_domain *domain;
- /* Contains all vports with QoS enabled but no explicit node.
- * Cannot be NULL if QoS is enabled, but may be a fake node
- * referencing the root TSAR if the esw doesn't support nodes.
- */
- struct mlx5_esw_sched_node *node0;
} qos;
struct mlx5_esw_bridge_offloads *br_offloads;
struct mlx5_esw_offload offloads;
+ u32 last_vport_idx;
int mode;
+ bool offloads_inactive;
u16 manager_vport;
u16 first_host_vport;
u8 num_peers;
@@ -380,7 +403,6 @@ struct mlx5_eswitch {
struct {
u32 large_group_num;
} params;
- struct blocking_notifier_head n_head;
struct xarray paired;
struct mlx5_devcom_comp_dev *devcom;
u16 enabled_ipsec_vf_count;
@@ -405,6 +427,8 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
+int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw, int index, u16 vport_num);
+void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
#define MLX5_ESWITCH_IGNORE_NUM_VFS (-1)
int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs);
@@ -412,7 +436,8 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf);
void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
-void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key);
+void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw,
+ const struct mlx5_devcom_match_attr *attr);
void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw);
bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
@@ -427,9 +452,8 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
u16 vport_num, bool setting);
int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
u32 max_rate, u32 min_rate);
-int mlx5_esw_qos_vport_update_node(struct mlx5_vport *vport,
- struct mlx5_esw_sched_node *node,
- struct netlink_ext_ack *extack);
+int mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack);
int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting);
int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting);
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
@@ -611,6 +635,11 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);
+void mlx5_esw_adjacent_vhcas_setup(struct mlx5_eswitch *esw);
+void mlx5_esw_adjacent_vhcas_cleanup(struct mlx5_eswitch *esw);
+int mlx5_esw_adj_vport_modify(struct mlx5_core_dev *dev, u16 vport,
+ bool connect);
+
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
#define esw_info(__dev, format, ...) \
@@ -715,6 +744,9 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\
(last) - 1)
+#define mlx5_esw_for_each_rep(esw, i, rep) \
+ xa_for_each(&((esw)->offloads.vport_reps), i, rep)
+
struct mlx5_eswitch *__must_check
mlx5_devlink_eswitch_get(struct devlink *devlink);
@@ -810,12 +842,20 @@ struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u1
int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id);
-int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num);
-void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num);
+int mlx5_esw_vport_vhca_id_map(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+void mlx5_esw_vport_vhca_id_unmap(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num);
+bool mlx5_esw_vport_vhca_id(struct mlx5_eswitch *esw, u16 vportn, u16 *vhca_id);
+
+void mlx5_esw_offloads_rep_remove(struct mlx5_eswitch *esw,
+ const struct mlx5_vport *vport);
+int mlx5_esw_offloads_rep_add(struct mlx5_eswitch *esw,
+ const struct mlx5_vport *vport);
/**
- * mlx5_esw_event_info - Indicates eswitch mode changed/changing.
+ * struct mlx5_esw_event_info - Indicates eswitch mode changed/changing.
*
* @new_mode: New mode of eswitch.
*/
@@ -823,8 +863,10 @@ struct mlx5_esw_event_info {
u16 new_mode;
};
-int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n);
-void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n);
+int mlx5_esw_event_notifier_register(struct mlx5_core_dev *dev,
+ struct notifier_block *n);
+void mlx5_esw_event_notifier_unregister(struct mlx5_core_dev *dev,
+ struct notifier_block *n);
bool mlx5_esw_hold(struct mlx5_core_dev *dev);
void mlx5_esw_release(struct mlx5_core_dev *dev);
@@ -844,7 +886,7 @@ void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
struct mlx5_eswitch *slave_esw);
int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw);
-bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev);
+bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev, bool from_fdb);
void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev);
int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev);
@@ -886,6 +928,7 @@ int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_v
bool enable);
int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
u16 vport_num);
+bool mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev);
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
@@ -893,7 +936,9 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {}
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
-static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key) {}
+static inline void
+mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw,
+ const struct mlx5_devcom_match_attr *attr) {}
static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {}
static inline bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw) { return false; }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
@@ -936,7 +981,8 @@ mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw)
return 0;
}
-static inline bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
+static inline bool
+mlx5_eswitch_block_encap(struct mlx5_core_dev *dev, bool from_fdb)
{
return true;
}
@@ -953,6 +999,19 @@ static inline bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev)
}
static inline void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev) {}
+
+static inline bool
+mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev)
+{
+ return true;
+}
+
+static inline bool
+mlx5_esw_vport_vhca_id(struct mlx5_eswitch *esw, u16 vportn, u16 *vhca_id)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index fd34f43d18d5..8de6c7f6c294 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -53,9 +53,6 @@
#include "lag/lag.h"
#include "en/tc/post_meter.h"
-#define mlx5_esw_for_each_rep(esw, i, rep) \
- xa_for_each(&((esw)->offloads.vport_reps), i, rep)
-
/* There are two match-all miss flows, one for unicast dst mac and
* one for multicast.
*/
@@ -651,6 +648,7 @@ esw_setup_meter(struct mlx5_flow_attr *attr, struct mlx5_flow_act *flow_act)
meter = attr->meter_attr.meter;
flow_act->exe_aso.type = attr->exe_aso_type;
flow_act->exe_aso.object_id = meter->obj_id;
+ flow_act->exe_aso.base_id = mlx5e_flow_meter_get_base_id(meter);
flow_act->exe_aso.flow_meter.meter_idx = meter->idx;
flow_act->exe_aso.flow_meter.init_color = MLX5_FLOW_METER_COLOR_GREEN;
/* use metadata reg 5 for packet color */
@@ -724,7 +722,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[i].counter_id = mlx5_fc_id(attr->counter);
+ dest[i].counter = attr->counter;
i++;
}
@@ -1018,8 +1016,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(on_esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule))
- esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %ld\n",
- PTR_ERR(flow_rule));
+ esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %pe\n",
+ flow_rule);
out:
kvfree(spec);
return flow_rule;
@@ -1067,8 +1065,8 @@ mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num
flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule))
- esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %ld\n",
- vport_num, PTR_ERR(flow_rule));
+ esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %pe\n",
+ vport_num, flow_rule);
kvfree(spec);
return flow_rule;
@@ -1184,19 +1182,19 @@ static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
struct mlx5_core_dev *peer_dev)
{
+ struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_handle **flows;
- /* total vports is the same for both e-switches */
- int nvports = esw->total_vports;
struct mlx5_flow_handle *flow;
+ struct mlx5_vport *peer_vport;
struct mlx5_flow_spec *spec;
- struct mlx5_vport *vport;
int err, pfindex;
unsigned long i;
void *misc;
- if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev))
+ if (!MLX5_VPORT_MANAGER(peer_dev) &&
+ !mlx5_core_is_ecpf_esw_manager(peer_dev))
return 0;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
@@ -1205,7 +1203,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
peer_miss_rules_setup(esw, peer_dev, spec, &dest);
- flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL);
+ flows = kvcalloc(peer_esw->total_vports, sizeof(*flows), GFP_KERNEL);
if (!flows) {
err = -ENOMEM;
goto alloc_flows_err;
@@ -1215,10 +1213,11 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
- if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
- esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
- spec, MLX5_VPORT_PF);
+ if (mlx5_core_is_ecpf_esw_manager(peer_dev) &&
+ mlx5_esw_host_functions_enabled(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
+ esw_set_peer_miss_rule_source_port(esw, peer_esw, spec,
+ MLX5_VPORT_PF);
flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
@@ -1226,11 +1225,11 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
err = PTR_ERR(flow);
goto add_pf_flow_err;
}
- flows[vport->index] = flow;
+ flows[peer_vport->index] = flow;
}
- if (mlx5_ecpf_vport_exists(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
+ if (mlx5_ecpf_vport_exists(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF);
MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
@@ -1238,36 +1237,39 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
err = PTR_ERR(flow);
goto add_ecpf_flow_err;
}
- flows[vport->index] = flow;
+ flows[peer_vport->index] = flow;
}
- mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
- esw_set_peer_miss_rule_source_port(esw,
- peer_dev->priv.eswitch,
- spec, vport->vport);
+ if (mlx5_esw_host_functions_enabled(esw->dev)) {
+ mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_vfs(peer_dev)) {
+ esw_set_peer_miss_rule_source_port(esw, peer_esw,
+ spec,
+ peer_vport->vport);
- flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
- spec, &flow_act, &dest, 1);
- if (IS_ERR(flow)) {
- err = PTR_ERR(flow);
- goto add_vf_flow_err;
+ flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
+ spec, &flow_act, &dest, 1);
+ if (IS_ERR(flow)) {
+ err = PTR_ERR(flow);
+ goto add_vf_flow_err;
+ }
+ flows[peer_vport->index] = flow;
}
- flows[vport->index] = flow;
}
- if (mlx5_core_ec_sriov_enabled(esw->dev)) {
- mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
- if (i >= mlx5_core_max_ec_vfs(peer_dev))
- break;
- esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
- spec, vport->vport);
+ if (mlx5_core_ec_sriov_enabled(peer_dev)) {
+ mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_ec_vfs(peer_dev)) {
+ esw_set_peer_miss_rule_source_port(esw, peer_esw,
+ spec,
+ peer_vport->vport);
flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
spec, &flow_act, &dest, 1);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
goto add_ec_vf_flow_err;
}
- flows[vport->index] = flow;
+ flows[peer_vport->index] = flow;
}
}
@@ -1284,25 +1286,29 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
return 0;
add_ec_vf_flow_err:
- mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
- if (!flows[vport->index])
+ mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_ec_vfs(peer_dev)) {
+ if (!flows[peer_vport->index])
continue;
- mlx5_del_flow_rules(flows[vport->index]);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
add_vf_flow_err:
- mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
- if (!flows[vport->index])
+ mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_vfs(peer_dev)) {
+ if (!flows[peer_vport->index])
continue;
- mlx5_del_flow_rules(flows[vport->index]);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
- if (mlx5_ecpf_vport_exists(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
- mlx5_del_flow_rules(flows[vport->index]);
+ if (mlx5_ecpf_vport_exists(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
add_ecpf_flow_err:
- if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
- mlx5_del_flow_rules(flows[vport->index]);
+
+ if (mlx5_core_is_ecpf_esw_manager(peer_dev) &&
+ mlx5_esw_host_functions_enabled(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
add_pf_flow_err:
esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
@@ -1315,37 +1321,34 @@ alloc_flows_err:
static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
struct mlx5_core_dev *peer_dev)
{
+ struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch;
u16 peer_index = mlx5_get_dev_index(peer_dev);
struct mlx5_flow_handle **flows;
- struct mlx5_vport *vport;
+ struct mlx5_vport *peer_vport;
unsigned long i;
flows = esw->fdb_table.offloads.peer_miss_rules[peer_index];
if (!flows)
return;
- if (mlx5_core_ec_sriov_enabled(esw->dev)) {
- mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
- /* The flow for a particular vport could be NULL if the other ECPF
- * has fewer or no VFs enabled
- */
- if (!flows[vport->index])
- continue;
- mlx5_del_flow_rules(flows[vport->index]);
- }
+ if (mlx5_core_ec_sriov_enabled(peer_dev)) {
+ mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_ec_vfs(peer_dev))
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
- mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev))
- mlx5_del_flow_rules(flows[vport->index]);
+ mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_vfs(peer_dev))
+ mlx5_del_flow_rules(flows[peer_vport->index]);
- if (mlx5_ecpf_vport_exists(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
- mlx5_del_flow_rules(flows[vport->index]);
+ if (mlx5_ecpf_vport_exists(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
- if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
- mlx5_del_flow_rules(flows[vport->index]);
+ if (mlx5_core_is_ecpf_esw_manager(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
kvfree(flows);
@@ -1574,6 +1577,7 @@ esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
attr.max_grp_num = esw->params.large_group_num;
attr.default_ft = miss_fdb;
attr.mapping = esw->offloads.reg_c0_obj_pool;
+ attr.fs_base_prio = FDB_BYPASS_PATH;
chains = mlx5_chains_create(dev, &attr);
if (IS_ERR(chains)) {
@@ -1975,7 +1979,6 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
/* Holds true only as long as DMFS is the default */
mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
MLX5_FLOW_STEERING_MODE_DMFS);
- atomic64_set(&esw->user_count, 0);
}
static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw)
@@ -2156,7 +2159,9 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
&flow_act, dest, 1);
if (IS_ERR(flow_rule)) {
- esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
+ esw_warn(esw->dev,
+ "fs offloads: Failed to add vport rx rule err %pe\n",
+ flow_rule);
goto out;
}
@@ -2175,8 +2180,8 @@ static int esw_create_vport_rx_drop_rule(struct mlx5_eswitch *esw)
&flow_act, NULL, 0);
if (IS_ERR(flow_rule)) {
esw_warn(esw->dev,
- "fs offloads: Failed to add vport rx drop rule err %ld\n",
- PTR_ERR(flow_rule));
+ "fs offloads: Failed to add vport rx drop rule err %pe\n",
+ flow_rule);
return PTR_ERR(flow_rule);
}
@@ -2332,18 +2337,161 @@ out_free:
return err;
}
+static void esw_mode_change(struct mlx5_eswitch *esw, u16 mode)
+{
+ mlx5_devcom_comp_lock(esw->dev->priv.hca_devcom_comp);
+ if (esw->dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV ||
+ mlx5_core_mp_enabled(esw->dev)) {
+ esw->mode = mode;
+ mlx5_rescan_drivers_locked(esw->dev);
+ mlx5_devcom_comp_unlock(esw->dev->priv.hca_devcom_comp);
+ return;
+ }
+
+ esw->dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+ mlx5_rescan_drivers_locked(esw->dev);
+ esw->mode = mode;
+ esw->dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+ mlx5_rescan_drivers_locked(esw->dev);
+ mlx5_devcom_comp_unlock(esw->dev->priv.hca_devcom_comp);
+}
+
+static void mlx5_esw_fdb_drop_destroy(struct mlx5_eswitch *esw)
+{
+ if (!esw->fdb_table.offloads.drop_root)
+ return;
+
+ esw_debug(esw->dev, "Destroying FDB drop root table %#x fc %#x\n",
+ esw->fdb_table.offloads.drop_root->id,
+ esw->fdb_table.offloads.drop_root_fc->id);
+ mlx5_del_flow_rules(esw->fdb_table.offloads.drop_root_rule);
+ /* Don't free flow counter here, can be reused on a later activation */
+ mlx5_destroy_flow_table(esw->fdb_table.offloads.drop_root);
+ esw->fdb_table.offloads.drop_root_rule = NULL;
+ esw->fdb_table.offloads.drop_root = NULL;
+}
+
+static int mlx5_esw_fdb_drop_create(struct mlx5_eswitch *esw)
+{
+ struct mlx5_flow_destination drop_fc_dst = {};
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination *dst = NULL;
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *flow_rule;
+ struct mlx5_flow_table *table;
+ int err = 0, dst_num = 0;
+
+ if (esw->fdb_table.offloads.drop_root)
+ return 0;
+
+ root_ns = esw->fdb_table.offloads.ns;
+
+ ft_attr.prio = FDB_DROP_ROOT;
+ ft_attr.max_fte = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ table = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
+ if (IS_ERR(table)) {
+ esw_warn(dev, "Failed to create fdb drop root table, err %pe\n",
+ table);
+ return PTR_ERR(table);
+ }
+
+ /* Drop FC reusable, create once on first deactivation of FDB */
+ if (!esw->fdb_table.offloads.drop_root_fc) {
+ struct mlx5_fc *counter = mlx5_fc_create(dev, 0);
+
+ err = PTR_ERR_OR_ZERO(counter);
+ if (err)
+ esw_warn(esw->dev, "create fdb drop fc err %d\n", err);
+ else
+ esw->fdb_table.offloads.drop_root_fc = counter;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+ if (esw->fdb_table.offloads.drop_root_fc) {
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ drop_fc_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ drop_fc_dst.counter = esw->fdb_table.offloads.drop_root_fc;
+ dst = &drop_fc_dst;
+ dst_num++;
+ }
+
+ flow_rule = mlx5_add_flow_rules(table, NULL, &flow_act, dst, dst_num);
+ err = PTR_ERR_OR_ZERO(flow_rule);
+ if (err) {
+ esw_warn(esw->dev,
+ "fs offloads: Failed to add vport rx drop rule err %d\n",
+ err);
+ goto err_flow_rule;
+ }
+
+ esw->fdb_table.offloads.drop_root = table;
+ esw->fdb_table.offloads.drop_root_rule = flow_rule;
+ esw_debug(esw->dev, "Created FDB drop root table %#x fc %#x\n",
+ table->id, dst ? dst->counter->id : 0);
+ return 0;
+
+err_flow_rule:
+ /* no need to free drop fc, esw_offloads_steering_cleanup will do it */
+ mlx5_destroy_flow_table(table);
+ return err;
+}
+
+static void mlx5_esw_fdb_active(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+
+ mlx5_esw_fdb_drop_destroy(esw);
+ mlx5_mpfs_enable(esw->dev);
+
+ mlx5_esw_for_each_vf_vport(esw, i, vport, U16_MAX) {
+ if (!vport->adjacent)
+ continue;
+ esw_debug(esw->dev, "Connecting vport %d to eswitch\n",
+ vport->vport);
+ mlx5_esw_adj_vport_modify(esw->dev, vport->vport, true);
+ }
+
+ esw->offloads_inactive = false;
+ esw_warn(esw->dev, "MPFS/FDB active\n");
+}
+
+static void mlx5_esw_fdb_inactive(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+
+ mlx5_mpfs_disable(esw->dev);
+ mlx5_esw_fdb_drop_create(esw);
+
+ mlx5_esw_for_each_vf_vport(esw, i, vport, U16_MAX) {
+ if (!vport->adjacent)
+ continue;
+ esw_debug(esw->dev, "Disconnecting vport %u from eswitch\n",
+ vport->vport);
+
+ mlx5_esw_adj_vport_modify(esw->dev, vport->vport, false);
+ }
+
+ esw->offloads_inactive = true;
+ esw_warn(esw->dev, "MPFS/FDB inactive\n");
+}
+
static int esw_offloads_start(struct mlx5_eswitch *esw,
struct netlink_ext_ack *extack)
{
int err;
- esw->mode = MLX5_ESWITCH_OFFLOADS;
+ esw_mode_change(esw, MLX5_ESWITCH_OFFLOADS);
err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch to offloads");
- esw->mode = MLX5_ESWITCH_LEGACY;
- mlx5_rescan_drivers(esw->dev);
+ esw_mode_change(esw, MLX5_ESWITCH_LEGACY);
return err;
}
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
@@ -2357,7 +2505,20 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
return 0;
}
-static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport)
+void mlx5_esw_offloads_rep_remove(struct mlx5_eswitch *esw,
+ const struct mlx5_vport *vport)
+{
+ struct mlx5_eswitch_rep *rep = xa_load(&esw->offloads.vport_reps,
+ vport->vport);
+
+ if (!rep)
+ return;
+ xa_erase(&esw->offloads.vport_reps, vport->vport);
+ kfree(rep);
+}
+
+int mlx5_esw_offloads_rep_add(struct mlx5_eswitch *esw,
+ const struct mlx5_vport *vport)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
@@ -2369,9 +2530,19 @@ static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx
rep->vport = vport->vport;
rep->vport_index = vport->index;
- for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
- atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
-
+ for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
+ if (!esw->offloads.rep_ops[rep_type]) {
+ atomic_set(&rep->rep_data[rep_type].state,
+ REP_UNREGISTERED);
+ continue;
+ }
+ /* Dynamic/delegated vports add their representors after
+ * mlx5_eswitch_register_vport_reps, so mark them as registered
+ * for them to be loaded later with the others.
+ */
+ rep->esw = esw;
+ atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
+ }
err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL);
if (err)
goto insert_err;
@@ -2409,7 +2580,7 @@ static int esw_offloads_init_reps(struct mlx5_eswitch *esw)
xa_init(&esw->offloads.vport_reps);
mlx5_esw_for_each_vport(esw, i, vport) {
- err = mlx5_esw_offloads_rep_init(esw, vport);
+ err = mlx5_esw_offloads_rep_add(esw, vport);
if (err)
goto err;
}
@@ -2447,7 +2618,8 @@ done:
}
static int esw_port_metadata_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
@@ -2527,8 +2699,11 @@ static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, u8 rep_type)
{
if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
- REP_LOADED, REP_REGISTERED) == REP_LOADED)
+ REP_LOADED, REP_REGISTERED) == REP_LOADED) {
+ if (rep_type == REP_ETH)
+ __esw_offloads_unload_rep(esw, rep, REP_IB);
esw->offloads.rep_ops[rep_type]->unload(rep);
+ }
}
static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
@@ -2810,9 +2985,9 @@ static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
if (IS_ERR(vport))
return PTR_ERR(vport);
- egress_ns = mlx5_get_flow_vport_acl_namespace(master,
- MLX5_FLOW_NAMESPACE_ESW_EGRESS,
- vport->index);
+ egress_ns = mlx5_get_flow_vport_namespace(master,
+ MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+ vport->index);
if (!egress_ns)
return -EINVAL;
@@ -3057,7 +3232,8 @@ err_out:
return err;
}
-void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key)
+void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw,
+ const struct mlx5_devcom_match_attr *attr)
{
int i;
@@ -3076,10 +3252,10 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key)
esw->num_peers = 0;
esw->devcom = mlx5_devcom_register_component(esw->dev->priv.devc,
MLX5_DEVCOM_ESW_OFFLOADS,
- key,
+ attr,
mlx5_esw_offloads_devcom_event,
esw);
- if (IS_ERR(esw->devcom))
+ if (!esw->devcom)
return;
mlx5_devcom_send_event(esw->devcom,
@@ -3090,7 +3266,7 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key)
void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
{
- if (IS_ERR_OR_NULL(esw->devcom))
+ if (!esw->devcom)
return;
mlx5_devcom_send_event(esw->devcom,
@@ -3389,6 +3565,10 @@ create_indir_err:
static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
{
+ mlx5_esw_fdb_drop_destroy(esw);
+ if (esw->fdb_table.offloads.drop_root_fc)
+ mlx5_fc_destroy(esw->dev, esw->fdb_table.offloads.drop_root_fc);
+ esw->fdb_table.offloads.drop_root_fc = NULL;
esw_destroy_vport_rx_drop_rule(esw);
esw_destroy_vport_rx_drop_group(esw);
esw_destroy_vport_rx_group(esw);
@@ -3507,14 +3687,19 @@ bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 cont
int esw_offloads_enable(struct mlx5_eswitch *esw)
{
+ u8 mapping_id[MLX5_SW_IMAGE_GUID_MAX_BYTES];
struct mapping_ctx *reg_c0_obj_pool;
struct mlx5_vport *vport;
unsigned long i;
- u64 mapping_id;
+ u8 id_len;
int err;
mutex_init(&esw->offloads.termtbl_mutex);
- mlx5_rdma_enable_roce(esw->dev);
+ mlx5_esw_adjacent_vhcas_setup(esw);
+
+ err = mlx5_rdma_enable_roce(esw->dev);
+ if (err)
+ goto err_roce;
err = mlx5_esw_host_number_init(esw);
if (err)
@@ -3528,9 +3713,10 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
if (err)
goto err_vport_metadata;
- mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
+ mlx5_query_nic_sw_system_image_guid(esw->dev, mapping_id, &id_len);
- reg_c0_obj_pool = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
+ reg_c0_obj_pool = mapping_create_for_id(mapping_id, id_len,
+ MAPPING_TYPE_CHAIN,
sizeof(struct mlx5_mapped_obj),
ESW_REG_C0_USER_DATA_METADATA_MASK,
true);
@@ -3545,6 +3731,11 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
if (err)
goto err_steering_init;
+ if (esw->offloads_inactive)
+ mlx5_esw_fdb_inactive(esw);
+ else
+ mlx5_esw_fdb_active(esw);
+
/* Representor will control the vport link state */
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
@@ -3575,6 +3766,8 @@ err_vport_metadata:
esw_offloads_metadata_uninit(esw);
err_metadata:
mlx5_rdma_disable_roce(esw->dev);
+err_roce:
+ mlx5_esw_adjacent_vhcas_cleanup(esw);
mutex_destroy(&esw->offloads.termtbl_mutex);
return err;
}
@@ -3584,7 +3777,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
{
int err;
- esw->mode = MLX5_ESWITCH_LEGACY;
+ esw_mode_change(esw, MLX5_ESWITCH_LEGACY);
/* If changing from switchdev to legacy mode without sriov enabled,
* no need to create legacy fdb.
@@ -3608,6 +3801,10 @@ void esw_offloads_disable(struct mlx5_eswitch *esw)
mapping_destroy(esw->offloads.reg_c0_obj_pool);
esw_offloads_metadata_uninit(esw);
mlx5_rdma_disable_roce(esw->dev);
+ mlx5_esw_adjacent_vhcas_cleanup(esw);
+ /* must be done after vhcas cleanup to avoid adjacent vports connect */
+ if (esw->offloads_inactive)
+ mlx5_esw_fdb_active(esw); /* legacy mode always active */
mutex_destroy(&esw->offloads.termtbl_mutex);
}
@@ -3618,6 +3815,7 @@ static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
*mlx5_mode = MLX5_ESWITCH_LEGACY;
break;
case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ case DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE:
*mlx5_mode = MLX5_ESWITCH_OFFLOADS;
break;
default:
@@ -3627,14 +3825,17 @@ static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
return 0;
}
-static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
+static int esw_mode_to_devlink(struct mlx5_eswitch *esw, u16 *mode)
{
- switch (mlx5_mode) {
+ switch (esw->mode) {
case MLX5_ESWITCH_LEGACY:
*mode = DEVLINK_ESWITCH_MODE_LEGACY;
break;
case MLX5_ESWITCH_OFFLOADS:
- *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
+ if (esw->offloads_inactive)
+ *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE;
+ else
+ *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
break;
default:
return -EINVAL;
@@ -3717,6 +3918,68 @@ void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev)
up_write(&esw->mode_lock);
}
+/* Returns false only when uplink netdev exists and its netns is different from
+ * devlink's netns. True for all others so entering switchdev mode is allowed.
+ */
+static bool mlx5_devlink_netdev_netns_immutable_set(struct devlink *devlink,
+ bool immutable)
+{
+ struct mlx5_core_dev *mdev = devlink_priv(devlink);
+ struct net_device *netdev;
+ bool ret;
+
+ netdev = mlx5_uplink_netdev_get(mdev);
+ if (!netdev)
+ return true;
+
+ rtnl_lock();
+ netdev->netns_immutable = immutable;
+ ret = net_eq(dev_net(netdev), devlink_net(devlink));
+ rtnl_unlock();
+
+ mlx5_uplink_netdev_put(mdev, netdev);
+ return ret;
+}
+
+/* Returns true when only changing between active and inactive switchdev mode */
+static bool mlx5_devlink_switchdev_active_mode_change(struct mlx5_eswitch *esw,
+ u16 devlink_mode)
+{
+ /* current mode is not switchdev */
+ if (esw->mode != MLX5_ESWITCH_OFFLOADS)
+ return false;
+
+ /* new mode is not switchdev */
+ if (devlink_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV &&
+ devlink_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE)
+ return false;
+
+ /* already inactive: no change in current state */
+ if (devlink_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE &&
+ esw->offloads_inactive)
+ return false;
+
+ /* already active: no change in current state */
+ if (devlink_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV &&
+ !esw->offloads_inactive)
+ return false;
+
+ down_write(&esw->mode_lock);
+ esw->offloads_inactive = !esw->offloads_inactive;
+ esw->eswitch_operation_in_progress = true;
+ up_write(&esw->mode_lock);
+
+ if (esw->offloads_inactive)
+ mlx5_esw_fdb_inactive(esw);
+ else
+ mlx5_esw_fdb_active(esw);
+
+ down_write(&esw->mode_lock);
+ esw->eswitch_operation_in_progress = false;
+ up_write(&esw->mode_lock);
+ return true;
+}
+
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
@@ -3731,12 +3994,16 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
- if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && mlx5_get_sd(esw->dev)) {
+ if (mlx5_mode == MLX5_ESWITCH_OFFLOADS && mlx5_get_sd(esw->dev)) {
NL_SET_ERR_MSG_MOD(extack,
"Can't change E-Switch mode to switchdev when multi-PF netdev (Socket Direct) is configured.");
return -EPERM;
}
+ /* Avoid try_lock, active/inactive mode change is not restricted */
+ if (mlx5_devlink_switchdev_active_mode_change(esw, mode))
+ return 0;
+
mlx5_lag_disable_change(esw->dev);
err = mlx5_esw_try_lock(esw);
if (err < 0) {
@@ -3759,23 +4026,36 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
esw->eswitch_operation_in_progress = true;
up_write(&esw->mode_lock);
+ if (mlx5_mode == MLX5_ESWITCH_OFFLOADS &&
+ !mlx5_devlink_netdev_netns_immutable_set(devlink, true)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's.");
+ err = -EINVAL;
+ goto skip;
+ }
+
+ if (mlx5_mode == MLX5_ESWITCH_LEGACY)
+ esw->dev->priv.flags |= MLX5_PRIV_FLAGS_SWITCH_LEGACY;
mlx5_eswitch_disable_locked(esw);
- if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
+ if (mlx5_mode == MLX5_ESWITCH_OFFLOADS) {
if (mlx5_devlink_trap_get_num_active(esw->dev)) {
NL_SET_ERR_MSG_MOD(extack,
"Can't change mode while devlink traps are active");
err = -EOPNOTSUPP;
goto skip;
}
+ esw->offloads_inactive =
+ (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE);
err = esw_offloads_start(esw, extack);
- } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
+ } else if (mlx5_mode == MLX5_ESWITCH_LEGACY) {
err = esw_offloads_stop(esw, extack);
- mlx5_rescan_drivers(esw->dev);
} else {
err = -EINVAL;
}
skip:
+ if (mlx5_mode == MLX5_ESWITCH_OFFLOADS && err)
+ mlx5_devlink_netdev_netns_immutable_set(devlink, false);
down_write(&esw->mode_lock);
esw->eswitch_operation_in_progress = false;
unlock:
@@ -3793,7 +4073,7 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
if (IS_ERR(esw))
return PTR_ERR(esw);
- return esw_mode_to_devlink(esw->mode, mode);
+ return esw_mode_to_devlink(esw, mode);
}
static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
@@ -3915,23 +4195,25 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
}
-bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
+bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev, bool from_fdb)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
+ enum devlink_eswitch_encap_mode encap;
+ bool allow_tunnel = false;
if (!mlx5_esw_allowed(esw))
return true;
down_write(&esw->mode_lock);
- if (esw->mode != MLX5_ESWITCH_LEGACY &&
- esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
- up_write(&esw->mode_lock);
- return false;
+ encap = esw->offloads.encap;
+ if (esw->mode == MLX5_ESWITCH_LEGACY ||
+ (encap == DEVLINK_ESWITCH_ENCAP_MODE_NONE && !from_fdb)) {
+ allow_tunnel = true;
+ esw->offloads.num_block_encap++;
}
-
- esw->offloads.num_block_encap++;
up_write(&esw->mode_lock);
- return true;
+
+ return allow_tunnel;
}
void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
@@ -4036,7 +4318,8 @@ mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
{
/* Currently, only ECPF based device has representor for host PF. */
if (vport_num == MLX5_VPORT_PF &&
- !mlx5_core_is_ecpf_esw_manager(esw->dev))
+ (!mlx5_core_is_ecpf_esw_manager(esw->dev) ||
+ !mlx5_esw_host_functions_enabled(esw->dev)))
return false;
if (vport_num == MLX5_VPORT_ECPF &&
@@ -4138,48 +4421,28 @@ u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
}
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
-static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
-{
- int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- void *query_ctx;
- void *hca_caps;
- int err;
-
- *vhca_id = 0;
-
- query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
- if (!query_ctx)
- return -ENOMEM;
-
- err = mlx5_vport_get_other_func_general_cap(esw->dev, vport_num, query_ctx);
- if (err)
- goto out_free;
-
- hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
- *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
-
-out_free:
- kfree(query_ctx);
- return err;
-}
-
-int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
+int mlx5_esw_vport_vhca_id_map(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
u16 *old_entry, *vhca_map_entry, vhca_id;
- int err;
- err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
- if (err) {
- esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n",
- vport_num, err);
- return err;
+ if (WARN_ONCE(MLX5_VPORT_INVAL_VHCA_ID(vport),
+ "vport %d vhca_id is not set", vport->vport)) {
+ int err;
+
+ err = mlx5_vport_get_vhca_id(vport->dev, vport->vport,
+ &vhca_id);
+ if (err)
+ return err;
+ vport->vhca_id = vhca_id;
}
+ vhca_id = vport->vhca_id;
vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL);
if (!vhca_map_entry)
return -ENOMEM;
- *vhca_map_entry = vport_num;
+ *vhca_map_entry = vport->vport;
old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL);
if (xa_is_err(old_entry)) {
kfree(vhca_map_entry);
@@ -4189,17 +4452,12 @@ int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
return 0;
}
-void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num)
+void mlx5_esw_vport_vhca_id_unmap(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- u16 *vhca_map_entry, vhca_id;
- int err;
+ u16 *vhca_map_entry;
- err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
- if (err)
- esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n",
- vport_num, err);
-
- vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id);
+ vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vport->vhca_id);
kfree(vhca_map_entry);
}
@@ -4234,6 +4492,9 @@ int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port,
struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
mutex_lock(&esw->state_lock);
+
+ mlx5_query_nic_vport_mac_address(esw->dev, vport->vport, true,
+ vport->info.mac);
ether_addr_copy(hw_addr, vport->info.mac);
*hw_addr_len = ETH_ALEN;
mutex_unlock(&esw->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index d91ea53eb394..01c5f5990f9a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -6,6 +6,7 @@
#include "mlx5_core.h"
#include "lib/eq.h"
#include "lib/events.h"
+#include "hwmon.h"
struct mlx5_event_nb {
struct mlx5_nb nb;
@@ -153,21 +154,50 @@ static int any_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
+#if IS_ENABLED(CONFIG_HWMON)
+static void print_sensor_names_in_bit_set(struct mlx5_core_dev *dev, struct mlx5_hwmon *hwmon,
+ u64 bit_set, int bit_set_offset)
+{
+ unsigned long *bit_set_ptr = (unsigned long *)&bit_set;
+ int num_bits = sizeof(bit_set) * BITS_PER_BYTE;
+ int i;
+
+ for_each_set_bit(i, bit_set_ptr, num_bits) {
+ const char *sensor_name = hwmon_get_sensor_name(hwmon, i + bit_set_offset);
+
+ mlx5_core_warn(dev, "Sensor name[%d]: %s\n", i + bit_set_offset, sensor_name);
+ }
+}
+#endif /* CONFIG_HWMON */
+
/* type == MLX5_EVENT_TYPE_TEMP_WARN_EVENT */
static int temp_warn(struct notifier_block *nb, unsigned long type, void *data)
{
struct mlx5_event_nb *event_nb = mlx5_nb_cof(nb, struct mlx5_event_nb, nb);
struct mlx5_events *events = event_nb->ctx;
+ struct mlx5_core_dev *dev = events->dev;
struct mlx5_eqe *eqe = data;
u64 value_lsb;
u64 value_msb;
value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb);
+ /* bit 1-63 are not supported for NICs,
+ * hence read only bit 0 (asic) from lsb.
+ */
+ value_lsb &= 0x1;
value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb);
- mlx5_core_warn(events->dev,
- "High temperature on sensors with bit set %llx %llx",
- value_msb, value_lsb);
+ if (net_ratelimit()) {
+ mlx5_core_warn(dev, "High temperature on sensors with bit set %#llx %#llx.\n",
+ value_msb, value_lsb);
+#if IS_ENABLED(CONFIG_HWMON)
+ if (dev->hwmon) {
+ print_sensor_names_in_bit_set(dev, dev->hwmon, value_lsb, 0);
+ print_sensor_names_in_bit_set(dev, dev->hwmon, value_msb,
+ sizeof(value_lsb) * BITS_PER_BYTE);
+ }
+#endif
+ }
return NOTIFY_OK;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index c4de6bf8d1b6..ccef64fb40b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -421,6 +421,13 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
__be64 *pas;
u32 i;
+ conn->cq.mcq.cqe_sz = 64;
+ conn->cq.mcq.set_ci_db = conn->cq.wq_ctrl.db.db;
+ conn->cq.mcq.arm_db = conn->cq.wq_ctrl.db.db + 1;
+ *conn->cq.mcq.set_ci_db = 0;
+ conn->cq.mcq.vector = 0;
+ conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete;
+
cq_size = roundup_pow_of_two(cq_size);
MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(cq_size));
@@ -468,16 +475,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
if (err)
goto err_cqwq;
- conn->cq.mcq.cqe_sz = 64;
- conn->cq.mcq.set_ci_db = conn->cq.wq_ctrl.db.db;
- conn->cq.mcq.arm_db = conn->cq.wq_ctrl.db.db + 1;
- *conn->cq.mcq.set_ci_db = 0;
- *conn->cq.mcq.arm_db = 0;
- conn->cq.mcq.vector = 0;
- conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete;
- conn->cq.mcq.uar = fdev->conn_res.uar;
tasklet_setup(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet);
-
mlx5_fpga_dbg(fdev, "Created CQ #0x%x\n", conn->cq.mcq.cqn);
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
index e5c1012921d2..1ec61164e6b5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
@@ -211,7 +211,7 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
max_num_qps = MLX5_CAP_FPGA(mdev, shell_caps.max_num_qps);
if (!max_num_qps) {
mlx5_fpga_err(fdev, "FPGA reports 0 QPs in SHELL_CAPS\n");
- err = -ENOTSUPP;
+ err = -EOPNOTSUPP;
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 676005854dad..ced747bef641 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -217,7 +217,8 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
int err;
if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
- underlay_qpn == 0)
+ underlay_qpn == 0 &&
+ (ft->type != FS_FT_RDMA_RX && ft->type != FS_FT_RDMA_TX))
return 0;
if (ft->type == FS_FT_FDB &&
@@ -238,6 +239,10 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
MLX5_SET(set_flow_table_root_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(set_flow_table_root_in, in, eswitch_owner_vhca_id,
+ ft->esw_owner_vhca_id);
+ MLX5_SET(set_flow_table_root_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
err = mlx5_cmd_exec_in(dev, set_flow_table_root, in);
if (!err &&
@@ -301,6 +306,10 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
MLX5_SET(create_flow_table_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(create_flow_table_in, in, eswitch_owner_vhca_id,
+ ft->esw_owner_vhca_id);
+ MLX5_SET(create_flow_table_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
en_decap);
@@ -359,6 +368,10 @@ static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
MLX5_SET(destroy_flow_table_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(destroy_flow_table_in, in, eswitch_owner_vhca_id,
+ ft->esw_owner_vhca_id);
+ MLX5_SET(destroy_flow_table_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
err = mlx5_cmd_exec_in(dev, destroy_flow_table, in);
if (!err)
@@ -393,6 +406,10 @@ static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport);
MLX5_SET(modify_flow_table_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(modify_flow_table_in, in, eswitch_owner_vhca_id,
+ ft->esw_owner_vhca_id);
+ MLX5_SET(modify_flow_table_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
MLX5_SET(modify_flow_table_in, in, modify_field_select,
MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
if (next_ft) {
@@ -428,6 +445,10 @@ static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
MLX5_SET(create_flow_group_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(create_flow_group_in, in, eswitch_owner_vhca_id,
+ ft->esw_owner_vhca_id);
+ MLX5_SET(create_flow_group_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
err = mlx5_cmd_exec_inout(dev, create_flow_group, in, out);
if (!err)
fg->id = MLX5_GET(create_flow_group_out, out,
@@ -450,6 +471,10 @@ static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
MLX5_SET(destroy_flow_group_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(destroy_flow_group_in, in, eswitch_owner_vhca_id,
+ ft->esw_owner_vhca_id);
+ MLX5_SET(destroy_flow_group_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
return mlx5_cmd_exec_in(dev, destroy_flow_group, in);
}
@@ -526,7 +551,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
struct mlx5_flow_rule *dst;
void *in_flow_context, *vlan;
void *in_match_value;
- int reformat_id = 0;
+ u32 reformat_id = 0;
unsigned int inlen;
int dst_cnt_size;
u32 *in, action;
@@ -558,6 +583,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(set_fte_in, in, vport_number, ft->vport);
MLX5_SET(set_fte_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(set_fte_in, in, eswitch_owner_vhca_id, ft->esw_owner_vhca_id);
+ MLX5_SET(set_fte_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
@@ -579,23 +607,21 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, action, action);
if (!extended_dest && fte->act_dests.action.pkt_reformat) {
- struct mlx5_pkt_reformat *pkt_reformat = fte->act_dests.action.pkt_reformat;
-
- if (pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
- reformat_id = mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat);
- if (reformat_id < 0) {
- mlx5_core_err(dev,
- "Unsupported SW-owned pkt_reformat type (%d) in FW-owned table\n",
- pkt_reformat->reformat_type);
- err = reformat_id;
- goto err_out;
- }
- } else {
- reformat_id = fte->act_dests.action.pkt_reformat->id;
+ struct mlx5_pkt_reformat *pkt_reformat =
+ fte->act_dests.action.pkt_reformat;
+
+ err = mlx5_fs_get_packet_reformat_id(pkt_reformat,
+ &reformat_id);
+ if (err) {
+ mlx5_core_err(dev,
+ "Unsupported pkt_reformat type (%d)\n",
+ pkt_reformat->reformat_type);
+ goto err_out;
}
}
- MLX5_SET(flow_context, in_flow_context, packet_reformat_id, (u32)reformat_id);
+ MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
+ reformat_id);
if (fte->act_dests.action.modify_hdr) {
if (fte->act_dests.action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
@@ -718,7 +744,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
continue;
MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
- dst->dest_attr.counter_id);
+ mlx5_fc_id(dst->dest_attr.counter));
in_dests += dst_cnt_size;
list_size++;
}
@@ -789,6 +815,10 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
MLX5_SET(delete_fte_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(delete_fte_in, in, eswitch_owner_vhca_id,
+ ft->esw_owner_vhca_id);
+ MLX5_SET(delete_fte_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
return mlx5_cmd_exec_in(dev, delete_fte, in);
}
@@ -1141,6 +1171,8 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ
case FS_FT_RDMA_RX:
case FS_FT_RDMA_TX:
case FS_FT_PORT_SEL:
+ case FS_FT_RDMA_TRANSPORT_RX:
+ case FS_FT_RDMA_TRANSPORT_TX:
return mlx5_fs_cmd_get_fw_cmds();
default:
return mlx5_fs_cmd_get_stub_cmds();
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index c2db0a1c132b..0a6031a64c6f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -113,13 +113,16 @@
#define ETHTOOL_PRIO_NUM_LEVELS 1
#define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
-/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy,
- * {IPsec RoCE MPV,Alias table},IPsec RoCE policy
+/* Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy,
+ * IPsec policy miss, {IPsec RoCE MPV,Alias table},IPsec RoCE policy
*/
#define KERNEL_NIC_PRIO_NUM_LEVELS 11
#define KERNEL_NIC_NUM_PRIOS 1
-/* One more level for tc */
-#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
+/* One more level for tc, and one more for promisc */
+#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 2)
+
+#define KERNEL_NIC_PROMISC_NUM_PRIOS 1
+#define KERNEL_NIC_PROMISC_NUM_LEVELS 1
#define KERNEL_NIC_TC_NUM_PRIOS 1
#define KERNEL_NIC_TC_NUM_LEVELS 3
@@ -187,6 +190,8 @@ static struct init_tree_node {
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
KERNEL_NIC_TC_NUM_LEVELS),
+ ADD_MULTIPLE_PRIO(KERNEL_NIC_PROMISC_NUM_PRIOS,
+ KERNEL_NIC_PROMISC_NUM_LEVELS),
ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
KERNEL_NIC_PRIO_NUM_LEVELS))),
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
@@ -658,6 +663,7 @@ static void del_sw_hw_rule(struct fs_node *node)
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ mlx5_fc_local_put(rule->dest_attr.counter);
goto out;
}
@@ -820,11 +826,17 @@ static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
return index;
fte->index = index + fg->start_index;
+retry_insert:
ret = rhashtable_insert_fast(&fg->ftes_hash,
&fte->hash,
rhash_fte);
- if (ret)
+ if (ret) {
+ if (ret == -EBUSY) {
+ cond_resched();
+ goto retry_insert;
+ }
goto err_ida_remove;
+ }
tree_add_node(&fte->node, &fg->node);
list_add_tail(&fte->node.list, &fg->node.children);
@@ -927,10 +939,10 @@ static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *f
return fg;
}
-static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport,
- enum fs_flow_table_type table_type,
- enum fs_flow_table_op_mod op_mod,
- u32 flags)
+static struct mlx5_flow_table *
+alloc_flow_table(struct mlx5_flow_table_attr *ft_attr, u16 vport,
+ enum fs_flow_table_type table_type,
+ enum fs_flow_table_op_mod op_mod)
{
struct mlx5_flow_table *ft;
int ret;
@@ -945,12 +957,13 @@ static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport,
return ERR_PTR(ret);
}
- ft->level = level;
+ ft->level = ft_attr->level;
ft->node.type = FS_TYPE_FLOW_TABLE;
ft->op_mod = op_mod;
ft->type = table_type;
ft->vport = vport;
- ft->flags = flags;
+ ft->esw_owner_vhca_id = ft_attr->esw_owner_vhca_id;
+ ft->flags = ft_attr->flags;
INIT_LIST_HEAD(&ft->fwd_rules);
mutex_init(&ft->lock);
@@ -1358,10 +1371,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
/* The level is related to the
* priority level range.
*/
- ft = alloc_flow_table(ft_attr->level,
- vport,
- root->table_type,
- op_mod, ft_attr->flags);
+ ft = alloc_flow_table(ft_attr, vport, root->table_type, op_mod);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto unlock_root;
@@ -1449,7 +1459,7 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table *ft;
int autogroups_max_fte;
- ft = mlx5_create_flow_table(ns, ft_attr);
+ ft = mlx5_create_vport_flow_table(ns, ft_attr, ft_attr->vport);
if (IS_ERR(ft))
return ft;
@@ -1823,14 +1833,35 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
return err;
}
+int mlx5_fs_get_packet_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *id)
+{
+ switch (pkt_reformat->owner) {
+ case MLX5_FLOW_RESOURCE_OWNER_FW:
+ *id = pkt_reformat->id;
+ return 0;
+ case MLX5_FLOW_RESOURCE_OWNER_SW:
+ return mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat, id);
+ case MLX5_FLOW_RESOURCE_OWNER_HWS:
+ return mlx5_fs_hws_action_get_pkt_reformat_id(pkt_reformat, id);
+ default:
+ return -EINVAL;
+ }
+}
+
static bool mlx5_pkt_reformat_cmp(struct mlx5_pkt_reformat *p1,
struct mlx5_pkt_reformat *p2)
{
- return p1->owner == p2->owner &&
- (p1->owner == MLX5_FLOW_RESOURCE_OWNER_FW ?
- p1->id == p2->id :
- mlx5_fs_dr_action_get_pkt_reformat_id(p1) ==
- mlx5_fs_dr_action_get_pkt_reformat_id(p2));
+ int err1, err2;
+ u32 id1, id2;
+
+ if (p1->owner != p2->owner)
+ return false;
+
+ err1 = mlx5_fs_get_packet_reformat_id(p1, &id1);
+ err2 = mlx5_fs_get_packet_reformat_id(p2, &id2);
+
+ return !err1 && !err2 && id1 == id2;
}
static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
@@ -2105,13 +2136,22 @@ lookup_fte_locked(struct mlx5_flow_group *g,
fte_tmp = NULL;
goto out;
}
+
+ nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
+
if (!fte_tmp->node.active) {
+ up_write_ref_node(&fte_tmp->node, false);
+
+ if (take_write)
+ up_write_ref_node(&g->node, false);
+ else
+ up_read_ref_node(&g->node);
+
tree_put_node(&fte_tmp->node, false);
- fte_tmp = NULL;
- goto out;
+
+ return NULL;
}
- nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
out:
if (take_write)
up_write_ref_node(&g->node, false);
@@ -2191,6 +2231,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
struct mlx5_flow_handle *rule;
struct match_list *iter;
bool take_write = false;
+ bool try_again = false;
struct fs_fte *fte;
u64 version = 0;
int err;
@@ -2255,6 +2296,7 @@ skip_search:
nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
if (!g->node.active) {
+ try_again = true;
up_write_ref_node(&g->node, false);
continue;
}
@@ -2276,7 +2318,8 @@ skip_search:
tree_put_node(&fte->node, false);
return rule;
}
- rule = ERR_PTR(-ENOENT);
+ err = try_again ? -EAGAIN : -ENOENT;
+ rule = ERR_PTR(err);
out:
kmem_cache_free(steering->ftes_cache, fte);
return rule;
@@ -2700,6 +2743,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
break;
case MLX5_FLOW_NAMESPACE_RDMA_TX:
root_ns = steering->rdma_tx_root_ns;
+ prio = RDMA_TX_BYPASS_PRIO;
break;
case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS:
root_ns = steering->rdma_rx_root_ns;
@@ -2747,36 +2791,56 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL(mlx5_get_flow_namespace);
-struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
- enum mlx5_flow_namespace_type type,
- int vport)
+struct mlx5_vport_acl_root_ns {
+ u16 vport_idx;
+ struct mlx5_flow_root_namespace *root_ns;
+};
+
+struct mlx5_flow_namespace *
+mlx5_get_flow_vport_namespace(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type type, int vport_idx)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
+ struct mlx5_vport_acl_root_ns *vport_ns;
if (!steering)
return NULL;
switch (type) {
case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
- if (vport >= steering->esw_egress_acl_vports)
- return NULL;
- if (steering->esw_egress_root_ns &&
- steering->esw_egress_root_ns[vport])
- return &steering->esw_egress_root_ns[vport]->ns;
+ vport_ns = xa_load(&steering->esw_egress_root_ns, vport_idx);
+ if (vport_ns)
+ return &vport_ns->root_ns->ns;
else
return NULL;
case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
- if (vport >= steering->esw_ingress_acl_vports)
+ vport_ns = xa_load(&steering->esw_ingress_root_ns, vport_idx);
+ if (vport_ns)
+ return &vport_ns->root_ns->ns;
+ else
+ return NULL;
+ case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX:
+ if (vport_idx >= steering->rdma_transport_rx_vports)
return NULL;
- if (steering->esw_ingress_root_ns &&
- steering->esw_ingress_root_ns[vport])
- return &steering->esw_ingress_root_ns[vport]->ns;
+ if (steering->rdma_transport_rx_root_ns &&
+ steering->rdma_transport_rx_root_ns[vport_idx])
+ return &steering->rdma_transport_rx_root_ns[vport_idx]->ns;
+ else
+ return NULL;
+ case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX:
+ if (vport_idx >= steering->rdma_transport_tx_vports)
+ return NULL;
+
+ if (steering->rdma_transport_tx_root_ns &&
+ steering->rdma_transport_tx_root_ns[vport_idx])
+ return &steering->rdma_transport_tx_root_ns[vport_idx]->ns;
else
return NULL;
default:
return NULL;
}
}
+EXPORT_SYMBOL(mlx5_get_flow_vport_namespace);
static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
unsigned int prio,
@@ -3182,6 +3246,211 @@ out_err:
return err;
}
+static int
+init_rdma_transport_rx_root_ns_one(struct mlx5_flow_steering *steering,
+ int vport_idx)
+{
+ struct mlx5_flow_root_namespace *root_ns;
+ struct fs_prio *prio;
+ int ret;
+ int i;
+
+ steering->rdma_transport_rx_root_ns[vport_idx] =
+ create_root_ns(steering, FS_FT_RDMA_TRANSPORT_RX);
+ if (!steering->rdma_transport_rx_root_ns[vport_idx])
+ return -ENOMEM;
+
+ root_ns = steering->rdma_transport_rx_root_ns[vport_idx];
+
+ for (i = 0; i < MLX5_RDMA_TRANSPORT_BYPASS_PRIO; i++) {
+ prio = fs_create_prio(&root_ns->ns, i, 1);
+ if (IS_ERR(prio)) {
+ ret = PTR_ERR(prio);
+ goto err;
+ }
+ }
+ set_prio_attrs(root_ns);
+ return 0;
+
+err:
+ cleanup_root_ns(root_ns);
+ return ret;
+}
+
+static int
+init_rdma_transport_tx_root_ns_one(struct mlx5_flow_steering *steering,
+ int vport_idx)
+{
+ struct mlx5_flow_root_namespace *root_ns;
+ struct fs_prio *prio;
+ int ret;
+ int i;
+
+ steering->rdma_transport_tx_root_ns[vport_idx] =
+ create_root_ns(steering, FS_FT_RDMA_TRANSPORT_TX);
+ if (!steering->rdma_transport_tx_root_ns[vport_idx])
+ return -ENOMEM;
+
+ root_ns = steering->rdma_transport_tx_root_ns[vport_idx];
+
+ for (i = 0; i < MLX5_RDMA_TRANSPORT_BYPASS_PRIO; i++) {
+ prio = fs_create_prio(&root_ns->ns, i, 1);
+ if (IS_ERR(prio)) {
+ ret = PTR_ERR(prio);
+ goto err;
+ }
+ }
+ set_prio_attrs(root_ns);
+ return 0;
+
+err:
+ cleanup_root_ns(root_ns);
+ return ret;
+}
+
+static bool mlx5_fs_ns_is_empty(struct mlx5_flow_namespace *ns)
+{
+ struct fs_prio *iter_prio;
+
+ fs_for_each_prio(iter_prio, ns) {
+ if (iter_prio->num_ft)
+ return false;
+ }
+
+ return true;
+}
+
+int mlx5_fs_set_root_dev(struct mlx5_core_dev *dev,
+ struct mlx5_core_dev *new_dev,
+ enum fs_flow_table_type table_type)
+{
+ struct mlx5_flow_root_namespace **root;
+ int total_vports;
+ int i;
+
+ switch (table_type) {
+ case FS_FT_RDMA_TRANSPORT_TX:
+ root = dev->priv.steering->rdma_transport_tx_root_ns;
+ total_vports = dev->priv.steering->rdma_transport_tx_vports;
+ break;
+ case FS_FT_RDMA_TRANSPORT_RX:
+ root = dev->priv.steering->rdma_transport_rx_root_ns;
+ total_vports = dev->priv.steering->rdma_transport_rx_vports;
+ break;
+ default:
+ WARN_ON_ONCE(true);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < total_vports; i++) {
+ mutex_lock(&root[i]->chain_lock);
+ if (!mlx5_fs_ns_is_empty(&root[i]->ns)) {
+ mutex_unlock(&root[i]->chain_lock);
+ goto err;
+ }
+ root[i]->dev = new_dev;
+ mutex_unlock(&root[i]->chain_lock);
+ }
+ return 0;
+err:
+ while (i--) {
+ mutex_lock(&root[i]->chain_lock);
+ root[i]->dev = dev;
+ mutex_unlock(&root[i]->chain_lock);
+ }
+ /* If you hit this error try destroying all flow tables and try again */
+ mlx5_core_err(dev, "Failed to set root device for RDMA TRANSPORT\n");
+ return -EINVAL;
+}
+EXPORT_SYMBOL(mlx5_fs_set_root_dev);
+
+static int init_rdma_transport_rx_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct mlx5_core_dev *dev = steering->dev;
+ int total_vports;
+ int err;
+ int i;
+
+ /* In case eswitch not supported and working in legacy mode */
+ total_vports = mlx5_eswitch_get_total_vports(dev) ?: 1;
+
+ steering->rdma_transport_rx_root_ns =
+ kcalloc(total_vports,
+ sizeof(*steering->rdma_transport_rx_root_ns),
+ GFP_KERNEL);
+ if (!steering->rdma_transport_rx_root_ns)
+ return -ENOMEM;
+
+ for (i = 0; i < total_vports; i++) {
+ err = init_rdma_transport_rx_root_ns_one(steering, i);
+ if (err)
+ goto cleanup_root_ns;
+ }
+ steering->rdma_transport_rx_vports = total_vports;
+ return 0;
+
+cleanup_root_ns:
+ while (i--)
+ cleanup_root_ns(steering->rdma_transport_rx_root_ns[i]);
+ kfree(steering->rdma_transport_rx_root_ns);
+ steering->rdma_transport_rx_root_ns = NULL;
+ return err;
+}
+
+static int init_rdma_transport_tx_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct mlx5_core_dev *dev = steering->dev;
+ int total_vports;
+ int err;
+ int i;
+
+ /* In case eswitch not supported and working in legacy mode */
+ total_vports = mlx5_eswitch_get_total_vports(dev) ?: 1;
+
+ steering->rdma_transport_tx_root_ns =
+ kcalloc(total_vports,
+ sizeof(*steering->rdma_transport_tx_root_ns),
+ GFP_KERNEL);
+ if (!steering->rdma_transport_tx_root_ns)
+ return -ENOMEM;
+
+ for (i = 0; i < total_vports; i++) {
+ err = init_rdma_transport_tx_root_ns_one(steering, i);
+ if (err)
+ goto cleanup_root_ns;
+ }
+ steering->rdma_transport_tx_vports = total_vports;
+ return 0;
+
+cleanup_root_ns:
+ while (i--)
+ cleanup_root_ns(steering->rdma_transport_tx_root_ns[i]);
+ kfree(steering->rdma_transport_tx_root_ns);
+ steering->rdma_transport_tx_root_ns = NULL;
+ return err;
+}
+
+static void cleanup_rdma_transport_roots_ns(struct mlx5_flow_steering *steering)
+{
+ int i;
+
+ if (steering->rdma_transport_rx_root_ns) {
+ for (i = 0; i < steering->rdma_transport_rx_vports; i++)
+ cleanup_root_ns(steering->rdma_transport_rx_root_ns[i]);
+
+ kfree(steering->rdma_transport_rx_root_ns);
+ steering->rdma_transport_rx_root_ns = NULL;
+ }
+
+ if (steering->rdma_transport_tx_root_ns) {
+ for (i = 0; i < steering->rdma_transport_tx_vports; i++)
+ cleanup_root_ns(steering->rdma_transport_tx_root_ns[i]);
+
+ kfree(steering->rdma_transport_tx_root_ns);
+ steering->rdma_transport_tx_root_ns = NULL;
+ }
+}
+
/* FT and tc chains are stored in the same array so we can re-use the
* mlx5_get_fdb_sub_ns() and tc api for FT chains.
* When creating a new ns for each chain store it in the first available slot.
@@ -3305,6 +3574,11 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
if (!steering->fdb_root_ns)
return -ENOMEM;
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_DROP_ROOT, 1);
+ err = PTR_ERR_OR_ZERO(maj_prio);
+ if (err)
+ goto out_err;
+
err = create_fdb_bypass(steering);
if (err)
goto out_err;
@@ -3362,118 +3636,102 @@ out_err:
return err;
}
-static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
+static void
+mlx5_fs_remove_vport_acl_root_ns(struct xarray *esw_acl_root_ns, u16 vport_idx)
{
- struct fs_prio *prio;
+ struct mlx5_vport_acl_root_ns *vport_ns;
- steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
- if (!steering->esw_egress_root_ns[vport])
- return -ENOMEM;
-
- /* create 1 prio*/
- prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
- return PTR_ERR_OR_ZERO(prio);
+ vport_ns = xa_erase(esw_acl_root_ns, vport_idx);
+ if (vport_ns) {
+ cleanup_root_ns(vport_ns->root_ns);
+ kfree(vport_ns);
+ }
}
-static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
+static int
+mlx5_fs_add_vport_acl_root_ns(struct mlx5_flow_steering *steering,
+ struct xarray *esw_acl_root_ns,
+ enum fs_flow_table_type table_type,
+ u16 vport_idx)
{
+ struct mlx5_vport_acl_root_ns *vport_ns;
struct fs_prio *prio;
+ int err;
- steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
- if (!steering->esw_ingress_root_ns[vport])
- return -ENOMEM;
+ /* sanity check, intended xarrays are used */
+ if (WARN_ON(esw_acl_root_ns != &steering->esw_egress_root_ns &&
+ esw_acl_root_ns != &steering->esw_ingress_root_ns))
+ return -EINVAL;
- /* create 1 prio*/
- prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
- return PTR_ERR_OR_ZERO(prio);
-}
+ if (table_type != FS_FT_ESW_EGRESS_ACL &&
+ table_type != FS_FT_ESW_INGRESS_ACL) {
+ mlx5_core_err(steering->dev,
+ "Invalid table type %d for egress/ingress ACLs\n",
+ table_type);
+ return -EINVAL;
+ }
-int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports)
-{
- struct mlx5_flow_steering *steering = dev->priv.steering;
- int err;
- int i;
+ if (xa_load(esw_acl_root_ns, vport_idx))
+ return -EEXIST;
- steering->esw_egress_root_ns =
- kcalloc(total_vports,
- sizeof(*steering->esw_egress_root_ns),
- GFP_KERNEL);
- if (!steering->esw_egress_root_ns)
+ vport_ns = kzalloc(sizeof(*vport_ns), GFP_KERNEL);
+ if (!vport_ns)
return -ENOMEM;
- for (i = 0; i < total_vports; i++) {
- err = init_egress_acl_root_ns(steering, i);
- if (err)
- goto cleanup_root_ns;
+ vport_ns->root_ns = create_root_ns(steering, table_type);
+ if (!vport_ns->root_ns) {
+ err = -ENOMEM;
+ goto kfree_vport_ns;
+ }
+
+ /* create 1 prio*/
+ prio = fs_create_prio(&vport_ns->root_ns->ns, 0, 1);
+ if (IS_ERR(prio)) {
+ err = PTR_ERR(prio);
+ goto cleanup_root_ns;
}
- steering->esw_egress_acl_vports = total_vports;
+
+ vport_ns->vport_idx = vport_idx;
+ err = xa_insert(esw_acl_root_ns, vport_idx, vport_ns, GFP_KERNEL);
+ if (err)
+ goto cleanup_root_ns;
return 0;
cleanup_root_ns:
- for (i--; i >= 0; i--)
- cleanup_root_ns(steering->esw_egress_root_ns[i]);
- kfree(steering->esw_egress_root_ns);
- steering->esw_egress_root_ns = NULL;
+ cleanup_root_ns(vport_ns->root_ns);
+kfree_vport_ns:
+ kfree(vport_ns);
return err;
}
-void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev)
+int mlx5_fs_vport_egress_acl_ns_add(struct mlx5_flow_steering *steering,
+ u16 vport_idx)
{
- struct mlx5_flow_steering *steering = dev->priv.steering;
- int i;
-
- if (!steering->esw_egress_root_ns)
- return;
-
- for (i = 0; i < steering->esw_egress_acl_vports; i++)
- cleanup_root_ns(steering->esw_egress_root_ns[i]);
-
- kfree(steering->esw_egress_root_ns);
- steering->esw_egress_root_ns = NULL;
+ return mlx5_fs_add_vport_acl_root_ns(steering,
+ &steering->esw_egress_root_ns,
+ FS_FT_ESW_EGRESS_ACL, vport_idx);
}
-int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports)
+int mlx5_fs_vport_ingress_acl_ns_add(struct mlx5_flow_steering *steering,
+ u16 vport_idx)
{
- struct mlx5_flow_steering *steering = dev->priv.steering;
- int err;
- int i;
-
- steering->esw_ingress_root_ns =
- kcalloc(total_vports,
- sizeof(*steering->esw_ingress_root_ns),
- GFP_KERNEL);
- if (!steering->esw_ingress_root_ns)
- return -ENOMEM;
-
- for (i = 0; i < total_vports; i++) {
- err = init_ingress_acl_root_ns(steering, i);
- if (err)
- goto cleanup_root_ns;
- }
- steering->esw_ingress_acl_vports = total_vports;
- return 0;
-
-cleanup_root_ns:
- for (i--; i >= 0; i--)
- cleanup_root_ns(steering->esw_ingress_root_ns[i]);
- kfree(steering->esw_ingress_root_ns);
- steering->esw_ingress_root_ns = NULL;
- return err;
+ return mlx5_fs_add_vport_acl_root_ns(steering,
+ &steering->esw_ingress_root_ns,
+ FS_FT_ESW_INGRESS_ACL, vport_idx);
}
-void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev)
+void mlx5_fs_vport_egress_acl_ns_remove(struct mlx5_flow_steering *steering,
+ int vport_idx)
{
- struct mlx5_flow_steering *steering = dev->priv.steering;
- int i;
-
- if (!steering->esw_ingress_root_ns)
- return;
-
- for (i = 0; i < steering->esw_ingress_acl_vports; i++)
- cleanup_root_ns(steering->esw_ingress_root_ns[i]);
+ mlx5_fs_remove_vport_acl_root_ns(&steering->esw_egress_root_ns,
+ vport_idx);
+}
- kfree(steering->esw_ingress_root_ns);
- steering->esw_ingress_root_ns = NULL;
+void mlx5_fs_vport_ingress_acl_ns_remove(struct mlx5_flow_steering *steering,
+ int vport_idx)
+{
+ mlx5_fs_remove_vport_acl_root_ns(&steering->esw_ingress_root_ns,
+ vport_idx);
}
u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type)
@@ -3519,35 +3777,41 @@ static int mlx5_fs_mode_validate(struct devlink *devlink, u32 id,
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
char *value = val.vstr;
- int err = 0;
+ u8 eswitch_mode;
- if (!strcmp(value, "dmfs")) {
+ eswitch_mode = mlx5_eswitch_mode(dev);
+ if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Changing fs mode is not supported when eswitch offloads enabled.");
+ return -EOPNOTSUPP;
+ }
+
+ if (!strcmp(value, "dmfs"))
return 0;
- } else if (!strcmp(value, "smfs")) {
- u8 eswitch_mode;
- bool smfs_cap;
- eswitch_mode = mlx5_eswitch_mode(dev);
- smfs_cap = mlx5_fs_dr_is_supported(dev);
+ if (!strcmp(value, "smfs")) {
+ bool smfs_cap = mlx5_fs_dr_is_supported(dev);
if (!smfs_cap) {
- err = -EOPNOTSUPP;
NL_SET_ERR_MSG_MOD(extack,
"Software managed steering is not supported by current device");
+ return -EOPNOTSUPP;
}
+ } else if (!strcmp(value, "hmfs")) {
+ bool hmfs_cap = mlx5_fs_hws_is_supported(dev);
- else if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
+ if (!hmfs_cap) {
NL_SET_ERR_MSG_MOD(extack,
- "Software managed steering is not supported when eswitch offloads enabled.");
- err = -EOPNOTSUPP;
+ "Hardware steering is not supported by current device");
+ return -EOPNOTSUPP;
}
} else {
NL_SET_ERR_MSG_MOD(extack,
- "Bad parameter: supported values are [\"dmfs\", \"smfs\"]");
- err = -EINVAL;
+ "Bad parameter: supported values are [\"dmfs\", \"smfs\", \"hmfs\"]");
+ return -EINVAL;
}
- return err;
+ return 0;
}
static int mlx5_fs_mode_set(struct devlink *devlink, u32 id,
@@ -3559,6 +3823,8 @@ static int mlx5_fs_mode_set(struct devlink *devlink, u32 id,
if (!strcmp(ctx->val.vstr, "smfs"))
mode = MLX5_FLOW_STEERING_MODE_SMFS;
+ else if (!strcmp(ctx->val.vstr, "hmfs"))
+ mode = MLX5_FLOW_STEERING_MODE_HMFS;
else
mode = MLX5_FLOW_STEERING_MODE_DMFS;
dev->priv.steering->mode = mode;
@@ -3567,14 +3833,22 @@ static int mlx5_fs_mode_set(struct devlink *devlink, u32 id,
}
static int mlx5_fs_mode_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
- if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS)
+ switch (dev->priv.steering->mode) {
+ case MLX5_FLOW_STEERING_MODE_SMFS:
strscpy(ctx->val.vstr, "smfs", sizeof(ctx->val.vstr));
- else
+ break;
+ case MLX5_FLOW_STEERING_MODE_HMFS:
+ strscpy(ctx->val.vstr, "hmfs", sizeof(ctx->val.vstr));
+ break;
+ default:
strscpy(ctx->val.vstr, "dmfs", sizeof(ctx->val.vstr));
+ }
+
return 0;
}
@@ -3590,6 +3864,11 @@ void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
+ WARN_ON(!xa_empty(&steering->esw_egress_root_ns));
+ WARN_ON(!xa_empty(&steering->esw_ingress_root_ns));
+ xa_destroy(&steering->esw_egress_root_ns);
+ xa_destroy(&steering->esw_ingress_root_ns);
+
cleanup_root_ns(steering->root_ns);
cleanup_fdb_root_ns(steering);
cleanup_root_ns(steering->port_sel_root_ns);
@@ -3598,6 +3877,7 @@ void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
cleanup_root_ns(steering->rdma_rx_root_ns);
cleanup_root_ns(steering->rdma_tx_root_ns);
cleanup_root_ns(steering->egress_root_ns);
+ cleanup_rdma_transport_roots_ns(steering);
devl_params_unregister(priv_to_devlink(dev), mlx5_fs_params,
ARRAY_SIZE(mlx5_fs_params));
@@ -3649,8 +3929,7 @@ int mlx5_fs_core_init(struct mlx5_core_dev *dev)
goto err;
}
- if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
- MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
+ if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support)) {
err = init_rdma_rx_root_ns(steering);
if (err)
goto err;
@@ -3668,6 +3947,20 @@ int mlx5_fs_core_init(struct mlx5_core_dev *dev)
goto err;
}
+ if (MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_RX(dev, ft_support)) {
+ err = init_rdma_transport_rx_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
+ if (MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_TX(dev, ft_support)) {
+ err = init_rdma_transport_tx_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
+ xa_init(&steering->esw_egress_root_ns);
+ xa_init(&steering->esw_ingress_root_ns);
return 0;
err:
@@ -3711,6 +4004,8 @@ int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
if (mlx5_fs_dr_is_supported(dev))
steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
+ else if (mlx5_fs_hws_is_supported(dev))
+ steering->mode = MLX5_FLOW_STEERING_MODE_HMFS;
else
steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
@@ -3818,8 +4113,10 @@ mlx5_get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type
struct mlx5_flow_namespace *ns;
if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
- ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
- ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
+ ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS ||
+ ns_type == MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX ||
+ ns_type == MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX)
+ ns = mlx5_get_flow_vport_namespace(dev, ns_type, 0);
else
ns = mlx5_get_flow_namespace(dev, ns_type);
if (!ns)
@@ -3994,6 +4291,8 @@ int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
if (mode == MLX5_FLOW_STEERING_MODE_SMFS)
cmds = mlx5_fs_cmd_get_dr_cmds();
+ else if (mode == MLX5_FLOW_STEERING_MODE_HMFS)
+ cmds = mlx5_fs_cmd_get_hws_cmds();
else
cmds = mlx5_fs_cmd_get_fw_cmds();
if (!cmds)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index b30976627c6b..1c6591425260 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -37,7 +37,8 @@
#include <linux/mlx5/fs.h>
#include <linux/rhashtable.h>
#include <linux/llist.h>
-#include <steering/fs_dr.h>
+#include <steering/sws/fs_dr.h>
+#include <steering/hws/fs_hws.h>
#define FDB_TC_MAX_CHAIN 3
#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1)
@@ -57,6 +58,7 @@ struct mlx5_flow_definer {
enum mlx5_flow_resource_owner {
MLX5_FLOW_RESOURCE_OWNER_FW,
MLX5_FLOW_RESOURCE_OWNER_SW,
+ MLX5_FLOW_RESOURCE_OWNER_HWS,
};
struct mlx5_modify_hdr {
@@ -64,6 +66,7 @@ struct mlx5_modify_hdr {
enum mlx5_flow_resource_owner owner;
union {
struct mlx5_fs_dr_action fs_dr_action;
+ struct mlx5_fs_hws_action fs_hws_action;
u32 id;
};
};
@@ -74,6 +77,7 @@ struct mlx5_pkt_reformat {
enum mlx5_flow_resource_owner owner;
union {
struct mlx5_fs_dr_action fs_dr_action;
+ struct mlx5_fs_hws_action fs_hws_action;
u32 id;
};
};
@@ -99,22 +103,6 @@ enum fs_node_type {
FS_TYPE_FLOW_DEST
};
-enum fs_flow_table_type {
- FS_FT_NIC_RX = 0x0,
- FS_FT_NIC_TX = 0x1,
- FS_FT_ESW_EGRESS_ACL = 0x2,
- FS_FT_ESW_INGRESS_ACL = 0x3,
- FS_FT_FDB = 0X4,
- FS_FT_SNIFFER_RX = 0X5,
- FS_FT_SNIFFER_TX = 0X6,
- FS_FT_RDMA_RX = 0X7,
- FS_FT_RDMA_TX = 0X8,
- FS_FT_PORT_SEL = 0X9,
- FS_FT_FDB_RX = 0xa,
- FS_FT_FDB_TX = 0xb,
- FS_FT_MAX_TYPE = FS_FT_FDB_TX,
-};
-
enum fs_flow_table_op_mod {
FS_FT_OP_MOD_NORMAL,
FS_FT_OP_MOD_LAG_DEMUX,
@@ -126,7 +114,8 @@ enum fs_fte_status {
enum mlx5_flow_steering_mode {
MLX5_FLOW_STEERING_MODE_DMFS,
- MLX5_FLOW_STEERING_MODE_SMFS
+ MLX5_FLOW_STEERING_MODE_SMFS,
+ MLX5_FLOW_STEERING_MODE_HMFS,
};
enum mlx5_flow_steering_capabilty {
@@ -144,16 +133,18 @@ struct mlx5_flow_steering {
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_root_namespace *fdb_root_ns;
struct mlx5_flow_namespace **fdb_sub_ns;
- struct mlx5_flow_root_namespace **esw_egress_root_ns;
- struct mlx5_flow_root_namespace **esw_ingress_root_ns;
+ struct xarray esw_egress_root_ns;
+ struct xarray esw_ingress_root_ns;
struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
struct mlx5_flow_root_namespace *rdma_rx_root_ns;
struct mlx5_flow_root_namespace *rdma_tx_root_ns;
struct mlx5_flow_root_namespace *egress_root_ns;
struct mlx5_flow_root_namespace *port_sel_root_ns;
- int esw_egress_acl_vports;
- int esw_ingress_acl_vports;
+ struct mlx5_flow_root_namespace **rdma_transport_rx_root_ns;
+ struct mlx5_flow_root_namespace **rdma_transport_tx_root_ns;
+ int rdma_transport_rx_vports;
+ int rdma_transport_tx_vports;
};
struct fs_node {
@@ -190,9 +181,13 @@ struct mlx5_flow_handle {
/* Type of children is mlx5_flow_group */
struct mlx5_flow_table {
struct fs_node node;
- struct mlx5_fs_dr_table fs_dr_table;
+ union {
+ struct mlx5_fs_dr_table fs_dr_table;
+ struct mlx5_fs_hws_table fs_hws_table;
+ };
u32 id;
u16 vport;
+ u16 esw_owner_vhca_id;
unsigned int max_fte;
unsigned int level;
enum fs_flow_table_type type;
@@ -247,7 +242,10 @@ struct fs_fte_dup {
/* Type of children is mlx5_flow_rule */
struct fs_fte {
struct fs_node node;
- struct mlx5_fs_dr_rule fs_dr_rule;
+ union {
+ struct mlx5_fs_dr_rule fs_dr_rule;
+ struct mlx5_fs_hws_rule fs_hws_rule;
+ };
u32 val[MLX5_ST_SZ_DW_MATCH_PARAM];
struct fs_fte_action act_dests;
struct fs_fte_dup *dup;
@@ -280,7 +278,10 @@ struct mlx5_flow_group_mask {
/* Type of children is fs_fte */
struct mlx5_flow_group {
struct fs_node node;
- struct mlx5_fs_dr_matcher fs_dr_matcher;
+ union {
+ struct mlx5_fs_dr_matcher fs_dr_matcher;
+ struct mlx5_fs_hws_matcher fs_hws_matcher;
+ };
struct mlx5_flow_group_mask mask;
u32 start_index;
u32 max_ftes;
@@ -293,7 +294,10 @@ struct mlx5_flow_group {
struct mlx5_flow_root_namespace {
struct mlx5_flow_namespace ns;
enum mlx5_flow_steering_mode mode;
- struct mlx5_fs_dr_domain fs_dr_domain;
+ union {
+ struct mlx5_fs_dr_domain fs_dr_domain;
+ struct mlx5_fs_hws_context fs_hws_context;
+ };
enum fs_flow_table_type table_type;
struct mlx5_core_dev *dev;
struct mlx5_flow_table *root_ft;
@@ -303,6 +307,37 @@ struct mlx5_flow_root_namespace {
const struct mlx5_flow_cmds *cmds;
};
+enum mlx5_fc_type {
+ MLX5_FC_TYPE_ACQUIRED = 0,
+ MLX5_FC_TYPE_LOCAL,
+};
+
+struct mlx5_fc_cache {
+ u64 packets;
+ u64 bytes;
+ u64 lastuse;
+};
+
+struct mlx5_fc {
+ u32 id;
+ bool aging;
+ enum mlx5_fc_type type;
+ struct mlx5_fc_bulk *bulk;
+ struct mlx5_fc_cache cache;
+ refcount_t fc_local_refcount;
+ /* last{packets,bytes} are used for calculating deltas since last reading. */
+ u64 lastpackets;
+ u64 lastbytes;
+};
+
+struct mlx5_fc_bulk {
+ struct mlx5_fs_bulk fs_bulk;
+ u32 base_id;
+ struct mlx5_fs_hws_data hws_data;
+ struct mlx5_fc fcs[];
+};
+
+u32 mlx5_fc_get_base_id(struct mlx5_fc *counter);
int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev);
void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
@@ -325,15 +360,22 @@ void mlx5_fs_core_free(struct mlx5_core_dev *dev);
int mlx5_fs_core_init(struct mlx5_core_dev *dev);
void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev);
-int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports);
-void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev);
-int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports);
-void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev);
+int mlx5_fs_vport_egress_acl_ns_add(struct mlx5_flow_steering *steering,
+ u16 vport_idx);
+int mlx5_fs_vport_ingress_acl_ns_add(struct mlx5_flow_steering *steering,
+ u16 vport_idx);
+void mlx5_fs_vport_egress_acl_ns_remove(struct mlx5_flow_steering *steering,
+ int vport_idx);
+void mlx5_fs_vport_ingress_acl_ns_remove(struct mlx5_flow_steering *steering,
+ int vport_idx);
u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type);
struct mlx5_flow_root_namespace *find_root(struct fs_node *node);
+int mlx5_fs_get_packet_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *id);
+
#define fs_get_obj(v, _node) {v = container_of((_node), typeof(*v), node); }
#define fs_list_for_each_entry(pos, root) \
@@ -382,7 +424,9 @@ struct mlx5_flow_root_namespace *find_root(struct fs_node *node);
(type == FS_FT_PORT_SEL) ? MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) : \
(type == FS_FT_FDB_RX) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
(type == FS_FT_FDB_TX) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
- (BUILD_BUG_ON_ZERO(FS_FT_FDB_TX != FS_FT_MAX_TYPE))\
+ (type == FS_FT_RDMA_TRANSPORT_RX) ? MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_RX(mdev, cap) : \
+ (type == FS_FT_RDMA_TRANSPORT_TX) ? MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_TX(mdev, cap) : \
+ (BUILD_BUG_ON_ZERO(FS_FT_RDMA_TRANSPORT_TX != FS_FT_MAX_TYPE))\
)
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 62d0c689796b..83001eda3884 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -34,6 +34,7 @@
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "fs_core.h"
+#include "fs_pool.h"
#include "fs_cmd.h"
#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
@@ -43,33 +44,6 @@
#define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
#define MLX5_FC_POOL_USED_BUFF_RATIO 10
-struct mlx5_fc_cache {
- u64 packets;
- u64 bytes;
- u64 lastuse;
-};
-
-struct mlx5_fc {
- u32 id;
- bool aging;
- struct mlx5_fc_bulk *bulk;
- struct mlx5_fc_cache cache;
- /* last{packets,bytes} are used for calculating deltas since last reading. */
- u64 lastpackets;
- u64 lastbytes;
-};
-
-struct mlx5_fc_pool {
- struct mlx5_core_dev *dev;
- struct mutex pool_lock; /* protects pool lists */
- struct list_head fully_used;
- struct list_head partially_used;
- struct list_head unused;
- int available_fcs;
- int used_fcs;
- int threshold;
-};
-
struct mlx5_fc_stats {
struct xarray counters;
@@ -80,13 +54,13 @@ struct mlx5_fc_stats {
int bulk_query_len;
bool bulk_query_alloc_failed;
unsigned long next_bulk_query_alloc;
- struct mlx5_fc_pool fc_pool;
+ struct mlx5_fs_pool fc_pool;
};
-static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev);
-static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool);
-static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool);
-static void mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc);
+static void mlx5_fc_pool_init(struct mlx5_fs_pool *fc_pool, struct mlx5_core_dev *dev);
+static void mlx5_fc_pool_cleanup(struct mlx5_fs_pool *fc_pool);
+static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fs_pool *fc_pool);
+static void mlx5_fc_pool_release_counter(struct mlx5_fs_pool *fc_pool, struct mlx5_fc *fc);
static int get_init_bulk_query_len(struct mlx5_core_dev *dev)
{
@@ -186,6 +160,9 @@ static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
{
struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
+ if (WARN_ON(counter->type == MLX5_FC_TYPE_LOCAL))
+ return;
+
if (counter->bulk)
mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter);
else
@@ -435,15 +412,7 @@ void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
fc_stats->sampling_interval);
}
-/* Flow counter bluks */
-
-struct mlx5_fc_bulk {
- struct list_head pool_list;
- u32 base_id;
- int bulk_len;
- unsigned long *bitmask;
- struct mlx5_fc fcs[] __counted_by(bulk_len);
-};
+/* Flow counter bulks */
static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
u32 id)
@@ -452,16 +421,16 @@ static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
counter->id = id;
}
-static int mlx5_fc_bulk_get_free_fcs_amount(struct mlx5_fc_bulk *bulk)
+u32 mlx5_fc_get_base_id(struct mlx5_fc *counter)
{
- return bitmap_weight(bulk->bitmask, bulk->bulk_len);
+ return counter->bulk->base_id;
}
-static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
+static struct mlx5_fs_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev,
+ void *pool_ctx)
{
enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask;
- struct mlx5_fc_bulk *bulk;
- int err = -ENOMEM;
+ struct mlx5_fc_bulk *fc_bulk;
int bulk_len;
u32 base_id;
int i;
@@ -469,207 +438,160 @@ static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc);
bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1;
- bulk = kvzalloc(struct_size(bulk, fcs, bulk_len), GFP_KERNEL);
- if (!bulk)
- goto err_alloc_bulk;
-
- bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
- GFP_KERNEL);
- if (!bulk->bitmask)
- goto err_alloc_bitmask;
+ fc_bulk = kvzalloc(struct_size(fc_bulk, fcs, bulk_len), GFP_KERNEL);
+ if (!fc_bulk)
+ return NULL;
- err = mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id);
- if (err)
- goto err_mlx5_cmd_bulk_alloc;
+ if (mlx5_fs_bulk_init(dev, &fc_bulk->fs_bulk, bulk_len))
+ goto fc_bulk_free;
- bulk->base_id = base_id;
- bulk->bulk_len = bulk_len;
- for (i = 0; i < bulk_len; i++) {
- mlx5_fc_init(&bulk->fcs[i], bulk, base_id + i);
- set_bit(i, bulk->bitmask);
- }
+ if (mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id))
+ goto fs_bulk_cleanup;
+ fc_bulk->base_id = base_id;
+ for (i = 0; i < bulk_len; i++)
+ mlx5_fc_init(&fc_bulk->fcs[i], fc_bulk, base_id + i);
- return bulk;
+ refcount_set(&fc_bulk->hws_data.hws_action_refcount, 0);
+ mutex_init(&fc_bulk->hws_data.lock);
+ return &fc_bulk->fs_bulk;
-err_mlx5_cmd_bulk_alloc:
- kvfree(bulk->bitmask);
-err_alloc_bitmask:
- kvfree(bulk);
-err_alloc_bulk:
- return ERR_PTR(err);
+fs_bulk_cleanup:
+ mlx5_fs_bulk_cleanup(&fc_bulk->fs_bulk);
+fc_bulk_free:
+ kvfree(fc_bulk);
+ return NULL;
}
static int
-mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk)
+mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk)
{
- if (mlx5_fc_bulk_get_free_fcs_amount(bulk) < bulk->bulk_len) {
+ struct mlx5_fc_bulk *fc_bulk = container_of(fs_bulk,
+ struct mlx5_fc_bulk,
+ fs_bulk);
+
+ if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) {
mlx5_core_err(dev, "Freeing bulk before all counters were released\n");
return -EBUSY;
}
- mlx5_cmd_fc_free(dev, bulk->base_id);
- kvfree(bulk->bitmask);
- kvfree(bulk);
+ mlx5_cmd_fc_free(dev, fc_bulk->base_id);
+ mlx5_fs_bulk_cleanup(fs_bulk);
+ kvfree(fc_bulk);
return 0;
}
-static struct mlx5_fc *mlx5_fc_bulk_acquire_fc(struct mlx5_fc_bulk *bulk)
+static void mlx5_fc_pool_update_threshold(struct mlx5_fs_pool *fc_pool)
{
- int free_fc_index = find_first_bit(bulk->bitmask, bulk->bulk_len);
-
- if (free_fc_index >= bulk->bulk_len)
- return ERR_PTR(-ENOSPC);
-
- clear_bit(free_fc_index, bulk->bitmask);
- return &bulk->fcs[free_fc_index];
-}
-
-static int mlx5_fc_bulk_release_fc(struct mlx5_fc_bulk *bulk, struct mlx5_fc *fc)
-{
- int fc_index = fc->id - bulk->base_id;
-
- if (test_bit(fc_index, bulk->bitmask))
- return -EINVAL;
-
- set_bit(fc_index, bulk->bitmask);
- return 0;
+ fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD,
+ fc_pool->used_units / MLX5_FC_POOL_USED_BUFF_RATIO);
}
/* Flow counters pool API */
-static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev)
-{
- fc_pool->dev = dev;
- mutex_init(&fc_pool->pool_lock);
- INIT_LIST_HEAD(&fc_pool->fully_used);
- INIT_LIST_HEAD(&fc_pool->partially_used);
- INIT_LIST_HEAD(&fc_pool->unused);
- fc_pool->available_fcs = 0;
- fc_pool->used_fcs = 0;
- fc_pool->threshold = 0;
-}
+static const struct mlx5_fs_pool_ops mlx5_fc_pool_ops = {
+ .bulk_destroy = mlx5_fc_bulk_destroy,
+ .bulk_create = mlx5_fc_bulk_create,
+ .update_threshold = mlx5_fc_pool_update_threshold,
+};
-static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool)
+static void
+mlx5_fc_pool_init(struct mlx5_fs_pool *fc_pool, struct mlx5_core_dev *dev)
{
- struct mlx5_core_dev *dev = fc_pool->dev;
- struct mlx5_fc_bulk *bulk;
- struct mlx5_fc_bulk *tmp;
-
- list_for_each_entry_safe(bulk, tmp, &fc_pool->fully_used, pool_list)
- mlx5_fc_bulk_destroy(dev, bulk);
- list_for_each_entry_safe(bulk, tmp, &fc_pool->partially_used, pool_list)
- mlx5_fc_bulk_destroy(dev, bulk);
- list_for_each_entry_safe(bulk, tmp, &fc_pool->unused, pool_list)
- mlx5_fc_bulk_destroy(dev, bulk);
+ mlx5_fs_pool_init(fc_pool, dev, &mlx5_fc_pool_ops, NULL);
}
-static void mlx5_fc_pool_update_threshold(struct mlx5_fc_pool *fc_pool)
+static void mlx5_fc_pool_cleanup(struct mlx5_fs_pool *fc_pool)
{
- fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD,
- fc_pool->used_fcs / MLX5_FC_POOL_USED_BUFF_RATIO);
+ mlx5_fs_pool_cleanup(fc_pool);
}
-static struct mlx5_fc_bulk *
-mlx5_fc_pool_alloc_new_bulk(struct mlx5_fc_pool *fc_pool)
+static struct mlx5_fc *
+mlx5_fc_pool_acquire_counter(struct mlx5_fs_pool *fc_pool)
{
- struct mlx5_core_dev *dev = fc_pool->dev;
- struct mlx5_fc_bulk *new_bulk;
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_fc_bulk *fc_bulk;
+ int err;
- new_bulk = mlx5_fc_bulk_create(dev);
- if (!IS_ERR(new_bulk))
- fc_pool->available_fcs += new_bulk->bulk_len;
- mlx5_fc_pool_update_threshold(fc_pool);
- return new_bulk;
+ err = mlx5_fs_pool_acquire_index(fc_pool, &pool_index);
+ if (err)
+ return ERR_PTR(err);
+ fc_bulk = container_of(pool_index.fs_bulk, struct mlx5_fc_bulk, fs_bulk);
+ return &fc_bulk->fcs[pool_index.index];
}
static void
-mlx5_fc_pool_free_bulk(struct mlx5_fc_pool *fc_pool, struct mlx5_fc_bulk *bulk)
+mlx5_fc_pool_release_counter(struct mlx5_fs_pool *fc_pool, struct mlx5_fc *fc)
{
+ struct mlx5_fs_bulk *fs_bulk = &fc->bulk->fs_bulk;
+ struct mlx5_fs_pool_index pool_index = {};
struct mlx5_core_dev *dev = fc_pool->dev;
- fc_pool->available_fcs -= bulk->bulk_len;
- mlx5_fc_bulk_destroy(dev, bulk);
- mlx5_fc_pool_update_threshold(fc_pool);
+ pool_index.fs_bulk = fs_bulk;
+ pool_index.index = fc->id - fc->bulk->base_id;
+ if (mlx5_fs_pool_release_index(fc_pool, &pool_index))
+ mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n");
}
-static struct mlx5_fc *
-mlx5_fc_pool_acquire_from_list(struct list_head *src_list,
- struct list_head *next_list,
- bool move_non_full_bulk)
+/**
+ * mlx5_fc_local_create - Allocate mlx5_fc struct for a counter which
+ * was already acquired using its counter id and bulk data.
+ *
+ * @counter_id: counter acquired counter id
+ * @offset: counter offset from bulk base
+ * @bulk_size: counter's bulk size as was allocated
+ *
+ * Return: Pointer to mlx5_fc on success, ERR_PTR otherwise.
+ */
+struct mlx5_fc *
+mlx5_fc_local_create(u32 counter_id, u32 offset, u32 bulk_size)
{
- struct mlx5_fc_bulk *bulk;
- struct mlx5_fc *fc;
+ struct mlx5_fc_bulk *fc_bulk;
+ struct mlx5_fc *counter;
- if (list_empty(src_list))
- return ERR_PTR(-ENODATA);
+ counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+ if (!counter)
+ return ERR_PTR(-ENOMEM);
+ fc_bulk = kzalloc(sizeof(*fc_bulk), GFP_KERNEL);
+ if (!fc_bulk) {
+ kfree(counter);
+ return ERR_PTR(-ENOMEM);
+ }
- bulk = list_first_entry(src_list, struct mlx5_fc_bulk, pool_list);
- fc = mlx5_fc_bulk_acquire_fc(bulk);
- if (move_non_full_bulk || mlx5_fc_bulk_get_free_fcs_amount(bulk) == 0)
- list_move(&bulk->pool_list, next_list);
- return fc;
+ counter->type = MLX5_FC_TYPE_LOCAL;
+ counter->id = counter_id;
+ fc_bulk->base_id = counter_id - offset;
+ fc_bulk->fs_bulk.bulk_len = bulk_size;
+ refcount_set(&fc_bulk->hws_data.hws_action_refcount, 0);
+ mutex_init(&fc_bulk->hws_data.lock);
+ counter->bulk = fc_bulk;
+ refcount_set(&counter->fc_local_refcount, 1);
+ return counter;
}
+EXPORT_SYMBOL(mlx5_fc_local_create);
-static struct mlx5_fc *
-mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool)
-{
- struct mlx5_fc_bulk *new_bulk;
- struct mlx5_fc *fc;
-
- mutex_lock(&fc_pool->pool_lock);
-
- fc = mlx5_fc_pool_acquire_from_list(&fc_pool->partially_used,
- &fc_pool->fully_used, false);
- if (IS_ERR(fc))
- fc = mlx5_fc_pool_acquire_from_list(&fc_pool->unused,
- &fc_pool->partially_used,
- true);
- if (IS_ERR(fc)) {
- new_bulk = mlx5_fc_pool_alloc_new_bulk(fc_pool);
- if (IS_ERR(new_bulk)) {
- fc = ERR_CAST(new_bulk);
- goto out;
- }
- fc = mlx5_fc_bulk_acquire_fc(new_bulk);
- list_add(&new_bulk->pool_list, &fc_pool->partially_used);
- }
- fc_pool->available_fcs--;
- fc_pool->used_fcs++;
-
-out:
- mutex_unlock(&fc_pool->pool_lock);
- return fc;
+void mlx5_fc_local_destroy(struct mlx5_fc *counter)
+{
+ kfree(counter->bulk);
+ kfree(counter);
}
+EXPORT_SYMBOL(mlx5_fc_local_destroy);
-static void
-mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc)
+void mlx5_fc_local_get(struct mlx5_fc *counter)
{
- struct mlx5_core_dev *dev = fc_pool->dev;
- struct mlx5_fc_bulk *bulk = fc->bulk;
- int bulk_free_fcs_amount;
+ if (!counter || counter->type != MLX5_FC_TYPE_LOCAL)
+ return;
- mutex_lock(&fc_pool->pool_lock);
+ refcount_inc(&counter->fc_local_refcount);
+}
- if (mlx5_fc_bulk_release_fc(bulk, fc)) {
- mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n");
- goto unlock;
- }
+void mlx5_fc_local_put(struct mlx5_fc *counter)
+{
+ if (!counter || counter->type != MLX5_FC_TYPE_LOCAL)
+ return;
- fc_pool->available_fcs++;
- fc_pool->used_fcs--;
-
- bulk_free_fcs_amount = mlx5_fc_bulk_get_free_fcs_amount(bulk);
- if (bulk_free_fcs_amount == 1)
- list_move_tail(&bulk->pool_list, &fc_pool->partially_used);
- if (bulk_free_fcs_amount == bulk->bulk_len) {
- list_del(&bulk->pool_list);
- if (fc_pool->available_fcs > fc_pool->threshold)
- mlx5_fc_pool_free_bulk(fc_pool, bulk);
- else
- list_add(&bulk->pool_list, &fc_pool->unused);
- }
+ if (!refcount_dec_and_test(&counter->fc_local_refcount))
+ return;
-unlock:
- mutex_unlock(&fc_pool->pool_lock);
+ mlx5_fc_local_destroy(counter);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
index c14590acc772..f6abfd00d7e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
@@ -50,10 +50,12 @@ mlx5_ft_pool_get_avail_sz(struct mlx5_core_dev *dev, enum fs_flow_table_type tab
int i, found_i = -1;
for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
- if (dev->priv.ft_pool->ft_left[i] && FT_POOLS[i] >= desired_size &&
+ if (dev->priv.ft_pool->ft_left[i] &&
+ (FT_POOLS[i] >= desired_size ||
+ desired_size == MLX5_FS_MAX_POOL_SIZE) &&
FT_POOLS[i] <= max_ft_size) {
found_i = i;
- if (desired_size != POOL_NEXT_SIZE)
+ if (desired_size != MLX5_FS_MAX_POOL_SIZE)
break;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
index 25f4274b372b..173e312db720 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
@@ -7,8 +7,6 @@
#include <linux/mlx5/driver.h>
#include "fs_core.h"
-#define POOL_NEXT_SIZE 0
-
int mlx5_ft_pool_init(struct mlx5_core_dev *dev);
void mlx5_ft_pool_destroy(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c
new file mode 100644
index 000000000000..f6c226664602
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include <mlx5_core.h>
+#include "fs_pool.h"
+
+int mlx5_fs_bulk_init(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk,
+ int bulk_len)
+{
+ int i;
+
+ fs_bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!fs_bulk->bitmask)
+ return -ENOMEM;
+
+ fs_bulk->bulk_len = bulk_len;
+ for (i = 0; i < bulk_len; i++)
+ set_bit(i, fs_bulk->bitmask);
+
+ return 0;
+}
+
+void mlx5_fs_bulk_cleanup(struct mlx5_fs_bulk *fs_bulk)
+{
+ kvfree(fs_bulk->bitmask);
+}
+
+int mlx5_fs_bulk_get_free_amount(struct mlx5_fs_bulk *bulk)
+{
+ return bitmap_weight(bulk->bitmask, bulk->bulk_len);
+}
+
+static int mlx5_fs_bulk_acquire_index(struct mlx5_fs_bulk *fs_bulk,
+ struct mlx5_fs_pool_index *pool_index)
+{
+ int free_index = find_first_bit(fs_bulk->bitmask, fs_bulk->bulk_len);
+
+ WARN_ON_ONCE(!pool_index || !fs_bulk);
+ if (free_index >= fs_bulk->bulk_len)
+ return -ENOSPC;
+
+ clear_bit(free_index, fs_bulk->bitmask);
+ pool_index->fs_bulk = fs_bulk;
+ pool_index->index = free_index;
+ return 0;
+}
+
+static int mlx5_fs_bulk_release_index(struct mlx5_fs_bulk *fs_bulk, int index)
+{
+ if (test_bit(index, fs_bulk->bitmask))
+ return -EINVAL;
+
+ set_bit(index, fs_bulk->bitmask);
+ return 0;
+}
+
+void mlx5_fs_pool_init(struct mlx5_fs_pool *pool, struct mlx5_core_dev *dev,
+ const struct mlx5_fs_pool_ops *ops, void *pool_ctx)
+{
+ WARN_ON_ONCE(!ops || !ops->bulk_destroy || !ops->bulk_create ||
+ !ops->update_threshold);
+ pool->dev = dev;
+ pool->pool_ctx = pool_ctx;
+ mutex_init(&pool->pool_lock);
+ INIT_LIST_HEAD(&pool->fully_used);
+ INIT_LIST_HEAD(&pool->partially_used);
+ INIT_LIST_HEAD(&pool->unused);
+ pool->available_units = 0;
+ pool->used_units = 0;
+ pool->threshold = 0;
+ pool->ops = ops;
+}
+
+void mlx5_fs_pool_cleanup(struct mlx5_fs_pool *pool)
+{
+ struct mlx5_core_dev *dev = pool->dev;
+ struct mlx5_fs_bulk *bulk;
+ struct mlx5_fs_bulk *tmp;
+
+ list_for_each_entry_safe(bulk, tmp, &pool->fully_used, pool_list)
+ pool->ops->bulk_destroy(dev, bulk);
+ list_for_each_entry_safe(bulk, tmp, &pool->partially_used, pool_list)
+ pool->ops->bulk_destroy(dev, bulk);
+ list_for_each_entry_safe(bulk, tmp, &pool->unused, pool_list)
+ pool->ops->bulk_destroy(dev, bulk);
+}
+
+static struct mlx5_fs_bulk *
+mlx5_fs_pool_alloc_new_bulk(struct mlx5_fs_pool *fs_pool)
+{
+ struct mlx5_core_dev *dev = fs_pool->dev;
+ struct mlx5_fs_bulk *new_bulk;
+
+ new_bulk = fs_pool->ops->bulk_create(dev, fs_pool->pool_ctx);
+ if (new_bulk)
+ fs_pool->available_units += new_bulk->bulk_len;
+ fs_pool->ops->update_threshold(fs_pool);
+ return new_bulk;
+}
+
+static void
+mlx5_fs_pool_free_bulk(struct mlx5_fs_pool *fs_pool, struct mlx5_fs_bulk *bulk)
+{
+ struct mlx5_core_dev *dev = fs_pool->dev;
+
+ fs_pool->available_units -= bulk->bulk_len;
+ fs_pool->ops->bulk_destroy(dev, bulk);
+ fs_pool->ops->update_threshold(fs_pool);
+}
+
+static int
+mlx5_fs_pool_acquire_from_list(struct list_head *src_list,
+ struct list_head *next_list,
+ bool move_non_full_bulk,
+ struct mlx5_fs_pool_index *pool_index)
+{
+ struct mlx5_fs_bulk *fs_bulk;
+ int err;
+
+ if (list_empty(src_list))
+ return -ENODATA;
+
+ fs_bulk = list_first_entry(src_list, struct mlx5_fs_bulk, pool_list);
+ err = mlx5_fs_bulk_acquire_index(fs_bulk, pool_index);
+ if (move_non_full_bulk || mlx5_fs_bulk_get_free_amount(fs_bulk) == 0)
+ list_move(&fs_bulk->pool_list, next_list);
+ return err;
+}
+
+int mlx5_fs_pool_acquire_index(struct mlx5_fs_pool *fs_pool,
+ struct mlx5_fs_pool_index *pool_index)
+{
+ struct mlx5_fs_bulk *new_bulk;
+ int err;
+
+ mutex_lock(&fs_pool->pool_lock);
+
+ err = mlx5_fs_pool_acquire_from_list(&fs_pool->partially_used,
+ &fs_pool->fully_used, false,
+ pool_index);
+ if (err)
+ err = mlx5_fs_pool_acquire_from_list(&fs_pool->unused,
+ &fs_pool->partially_used,
+ true, pool_index);
+ if (err) {
+ new_bulk = mlx5_fs_pool_alloc_new_bulk(fs_pool);
+ if (!new_bulk) {
+ err = -ENOENT;
+ goto out;
+ }
+ err = mlx5_fs_bulk_acquire_index(new_bulk, pool_index);
+ WARN_ON_ONCE(err);
+ list_add(&new_bulk->pool_list, &fs_pool->partially_used);
+ }
+ fs_pool->available_units--;
+ fs_pool->used_units++;
+
+out:
+ mutex_unlock(&fs_pool->pool_lock);
+ return err;
+}
+
+int mlx5_fs_pool_release_index(struct mlx5_fs_pool *fs_pool,
+ struct mlx5_fs_pool_index *pool_index)
+{
+ struct mlx5_fs_bulk *bulk = pool_index->fs_bulk;
+ int bulk_free_amount;
+ int err;
+
+ mutex_lock(&fs_pool->pool_lock);
+
+ /* TBD would rather return void if there was no warn here in original code */
+ err = mlx5_fs_bulk_release_index(bulk, pool_index->index);
+ if (err)
+ goto unlock;
+
+ fs_pool->available_units++;
+ fs_pool->used_units--;
+
+ bulk_free_amount = mlx5_fs_bulk_get_free_amount(bulk);
+ if (bulk_free_amount == 1)
+ list_move_tail(&bulk->pool_list, &fs_pool->partially_used);
+ if (bulk_free_amount == bulk->bulk_len) {
+ list_del(&bulk->pool_list);
+ if (fs_pool->available_units > fs_pool->threshold)
+ mlx5_fs_pool_free_bulk(fs_pool, bulk);
+ else
+ list_add(&bulk->pool_list, &fs_pool->unused);
+ }
+
+unlock:
+ mutex_unlock(&fs_pool->pool_lock);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h
new file mode 100644
index 000000000000..f04ec3107498
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef __MLX5_FS_POOL_H__
+#define __MLX5_FS_POOL_H__
+
+#include <linux/mlx5/driver.h>
+
+struct mlx5_fs_bulk {
+ struct list_head pool_list;
+ int bulk_len;
+ unsigned long *bitmask;
+};
+
+struct mlx5_fs_pool_index {
+ struct mlx5_fs_bulk *fs_bulk;
+ int index;
+};
+
+struct mlx5_fs_pool;
+
+struct mlx5_fs_pool_ops {
+ int (*bulk_destroy)(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *bulk);
+ struct mlx5_fs_bulk * (*bulk_create)(struct mlx5_core_dev *dev,
+ void *pool_ctx);
+ void (*update_threshold)(struct mlx5_fs_pool *pool);
+};
+
+struct mlx5_fs_pool {
+ struct mlx5_core_dev *dev;
+ void *pool_ctx;
+ const struct mlx5_fs_pool_ops *ops;
+ struct mutex pool_lock; /* protects pool lists */
+ struct list_head fully_used;
+ struct list_head partially_used;
+ struct list_head unused;
+ int available_units;
+ int used_units;
+ int threshold;
+};
+
+int mlx5_fs_bulk_init(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk,
+ int bulk_len);
+void mlx5_fs_bulk_cleanup(struct mlx5_fs_bulk *fs_bulk);
+int mlx5_fs_bulk_get_free_amount(struct mlx5_fs_bulk *bulk);
+
+void mlx5_fs_pool_init(struct mlx5_fs_pool *pool, struct mlx5_core_dev *dev,
+ const struct mlx5_fs_pool_ops *ops, void *pool_ctx);
+void mlx5_fs_pool_cleanup(struct mlx5_fs_pool *pool);
+int mlx5_fs_pool_acquire_index(struct mlx5_fs_pool *fs_pool,
+ struct mlx5_fs_pool_index *pool_index);
+int mlx5_fs_pool_release_index(struct mlx5_fs_pool *fs_pool,
+ struct mlx5_fs_pool_index *pool_index);
+
+#endif /* __MLX5_FS_POOL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 76ad46bf477d..eeb4437975f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -281,6 +281,25 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN(dev, shampo)) {
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_SHAMPO, HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, adv_rdma)) {
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ADV_RDMA,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, psp)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_PSP);
+ if (err)
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 566710d34a7b..2bceb42c98cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -6,13 +6,15 @@
#include "fw_reset.h"
#include "diag/fw_tracer.h"
#include "lib/tout.h"
+#include "sf/sf.h"
enum {
MLX5_FW_RESET_FLAGS_RESET_REQUESTED,
MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST,
MLX5_FW_RESET_FLAGS_PENDING_COMP,
MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS,
- MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED
+ MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED,
+ MLX5_FW_RESET_FLAGS_UNLOAD_EVENT,
};
struct mlx5_fw_reset {
@@ -25,6 +27,7 @@ struct mlx5_fw_reset {
struct work_struct reset_reload_work;
struct work_struct reset_now_work;
struct work_struct reset_abort_work;
+ struct delayed_work reset_timeout_work;
unsigned long reset_flags;
u8 reset_method;
struct timer_list timer;
@@ -70,7 +73,8 @@ static int mlx5_fw_reset_enable_remote_dev_reset_set(struct devlink *devlink, u3
}
static int mlx5_fw_reset_enable_remote_dev_reset_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_fw_reset *fw_reset;
@@ -219,7 +223,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL0, 0, 0, false);
}
-static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unloaded)
+static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
struct devlink *devlink = priv_to_devlink(dev);
@@ -228,8 +232,7 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unload
if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
complete(&fw_reset->done);
} else {
- if (!unloaded)
- mlx5_unload_one(dev, false);
+ mlx5_sync_reset_unload_flow(dev, false);
if (mlx5_health_wait_pci_up(dev))
mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
else
@@ -246,7 +249,7 @@ static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
- del_timer_sync(&fw_reset->timer);
+ timer_delete_sync(&fw_reset->timer);
}
static int mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
@@ -258,6 +261,8 @@ static int mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool
return -EALREADY;
}
+ if (current_work() != &fw_reset->reset_timeout_work.work)
+ cancel_delayed_work(&fw_reset->reset_timeout_work);
mlx5_stop_sync_reset_poll(dev);
if (poll_health)
mlx5_start_health_poll(dev);
@@ -272,13 +277,14 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work)
mlx5_sync_reset_clear_reset_requested(dev, false);
mlx5_enter_error_state(dev, true);
- mlx5_fw_reset_complete_reload(dev, false);
+ mlx5_fw_reset_complete_reload(dev);
}
#define MLX5_RESET_POLL_INTERVAL (HZ / 10)
static void poll_sync_reset(struct timer_list *t)
{
- struct mlx5_fw_reset *fw_reset = from_timer(fw_reset, t, timer);
+ struct mlx5_fw_reset *fw_reset = timer_container_of(fw_reset, t,
+ timer);
struct mlx5_core_dev *dev = fw_reset->dev;
u32 fatal_error;
@@ -328,6 +334,11 @@ static int mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev)
}
mlx5_stop_health_poll(dev, true);
mlx5_start_sync_reset_poll(dev);
+
+ if (!test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS,
+ &fw_reset->reset_flags))
+ schedule_delayed_work(&fw_reset->reset_timeout_work,
+ msecs_to_jiffies(mlx5_tout_ms(dev, PCI_SYNC_UPDATE)));
return 0;
}
@@ -345,15 +356,12 @@ static void mlx5_fw_live_patch_event(struct work_struct *work)
}
#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
-static int mlx5_check_hotplug_interrupt(struct mlx5_core_dev *dev)
+static int mlx5_check_hotplug_interrupt(struct mlx5_core_dev *dev,
+ struct pci_dev *bridge)
{
- struct pci_dev *bridge = dev->pdev->bus->self;
u16 reg16;
int err;
- if (!bridge)
- return -EOPNOTSUPP;
-
err = pcie_capability_read_word(bridge, PCI_EXP_SLTCTL, &reg16);
if (err)
return err;
@@ -416,17 +424,28 @@ static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id)
static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev,
u8 reset_method)
{
+ struct pci_dev *bridge = dev->pdev->bus->self;
u16 dev_id;
int err;
+ if (!bridge) {
+ mlx5_core_warn(dev, "PCI bus bridge is not accessible\n");
+ return false;
+ }
+
if (!MLX5_CAP_GEN(dev, fast_teardown)) {
mlx5_core_warn(dev, "fast teardown is not supported by firmware\n");
return false;
}
+ if (!mlx5_core_is_ecpf(dev) && !mlx5_sf_table_empty(dev)) {
+ mlx5_core_warn(dev, "SFs should be removed before reset\n");
+ return false;
+ }
+
#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
if (reset_method != MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET) {
- err = mlx5_check_hotplug_interrupt(dev);
+ err = mlx5_check_hotplug_interrupt(dev, bridge);
if (err)
return false;
}
@@ -582,6 +601,65 @@ static int mlx5_sync_pci_reset(struct mlx5_core_dev *dev, u8 reset_method)
return err;
}
+void mlx5_sync_reset_unload_flow(struct mlx5_core_dev *dev, bool locked)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ unsigned long timeout;
+ int poll_freq = 20;
+ bool reset_action;
+ u8 rst_state;
+ int err;
+
+ if (locked)
+ mlx5_unload_one_devl_locked(dev, false);
+ else
+ mlx5_unload_one(dev, false);
+
+ if (!test_bit(MLX5_FW_RESET_FLAGS_UNLOAD_EVENT, &fw_reset->reset_flags))
+ return;
+
+ mlx5_set_fw_rst_ack(dev);
+ mlx5_core_warn(dev, "Sync Reset Unload done, device reset expected\n");
+
+ reset_action = false;
+ timeout = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, RESET_UNLOAD));
+ do {
+ rst_state = mlx5_get_fw_rst_state(dev);
+ if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ ||
+ rst_state == MLX5_FW_RST_STATE_IDLE) {
+ reset_action = true;
+ break;
+ }
+ if (rst_state == MLX5_FW_RST_STATE_DROP_MODE) {
+ mlx5_core_info(dev, "Sync Reset Drop mode ack\n");
+ mlx5_set_fw_rst_ack(dev);
+ poll_freq = 1000;
+ }
+ msleep(poll_freq);
+ } while (!time_after(jiffies, timeout));
+
+ if (!reset_action) {
+ mlx5_core_err(dev, "Got timeout waiting for sync reset action, state = %u\n",
+ rst_state);
+ fw_reset->ret = -ETIMEDOUT;
+ goto done;
+ }
+
+ mlx5_core_warn(dev, "Sync Reset, got reset action. rst_state = %u\n",
+ rst_state);
+ if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ) {
+ err = mlx5_sync_pci_reset(dev, fw_reset->reset_method);
+ if (err) {
+ mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, err %d\n",
+ err);
+ fw_reset->ret = err;
+ }
+ }
+
+done:
+ clear_bit(MLX5_FW_RESET_FLAGS_UNLOAD_EVENT, &fw_reset->reset_flags);
+}
+
static void mlx5_sync_reset_now_event(struct work_struct *work)
{
struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
@@ -609,17 +687,13 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
mlx5_enter_error_state(dev, true);
done:
fw_reset->ret = err;
- mlx5_fw_reset_complete_reload(dev, false);
+ mlx5_fw_reset_complete_reload(dev);
}
static void mlx5_sync_reset_unload_event(struct work_struct *work)
{
struct mlx5_fw_reset *fw_reset;
struct mlx5_core_dev *dev;
- unsigned long timeout;
- int poll_freq = 20;
- bool reset_action;
- u8 rst_state;
int err;
fw_reset = container_of(work, struct mlx5_fw_reset, reset_unload_work);
@@ -628,6 +702,7 @@ static void mlx5_sync_reset_unload_event(struct work_struct *work)
if (mlx5_sync_reset_clear_reset_requested(dev, false))
return;
+ set_bit(MLX5_FW_RESET_FLAGS_UNLOAD_EVENT, &fw_reset->reset_flags);
mlx5_core_warn(dev, "Sync Reset Unload. Function is forced down.\n");
err = mlx5_cmd_fast_teardown_hca(dev);
@@ -636,49 +711,7 @@ static void mlx5_sync_reset_unload_event(struct work_struct *work)
else
mlx5_enter_error_state(dev, true);
- if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags))
- mlx5_unload_one_devl_locked(dev, false);
- else
- mlx5_unload_one(dev, false);
-
- mlx5_set_fw_rst_ack(dev);
- mlx5_core_warn(dev, "Sync Reset Unload done, device reset expected\n");
-
- reset_action = false;
- timeout = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, RESET_UNLOAD));
- do {
- rst_state = mlx5_get_fw_rst_state(dev);
- if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ ||
- rst_state == MLX5_FW_RST_STATE_IDLE) {
- reset_action = true;
- break;
- }
- if (rst_state == MLX5_FW_RST_STATE_DROP_MODE) {
- mlx5_core_info(dev, "Sync Reset Drop mode ack\n");
- mlx5_set_fw_rst_ack(dev);
- poll_freq = 1000;
- }
- msleep(poll_freq);
- } while (!time_after(jiffies, timeout));
-
- if (!reset_action) {
- mlx5_core_err(dev, "Got timeout waiting for sync reset action, state = %u\n",
- rst_state);
- fw_reset->ret = -ETIMEDOUT;
- goto done;
- }
-
- mlx5_core_warn(dev, "Sync Reset, got reset action. rst_state = %u\n", rst_state);
- if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ) {
- err = mlx5_sync_pci_reset(dev, fw_reset->reset_method);
- if (err) {
- mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, err %d\n", err);
- fw_reset->ret = err;
- }
- }
-
-done:
- mlx5_fw_reset_complete_reload(dev, true);
+ mlx5_fw_reset_complete_reload(dev);
}
static void mlx5_sync_reset_abort_event(struct work_struct *work)
@@ -715,6 +748,19 @@ static void mlx5_sync_reset_events_handle(struct mlx5_fw_reset *fw_reset, struct
}
}
+static void mlx5_sync_reset_timeout_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = container_of(work, struct delayed_work,
+ work);
+ struct mlx5_fw_reset *fw_reset =
+ container_of(dwork, struct mlx5_fw_reset, reset_timeout_work);
+ struct mlx5_core_dev *dev = fw_reset->dev;
+
+ if (mlx5_sync_reset_clear_reset_requested(dev, true))
+ return;
+ mlx5_core_warn(dev, "PCI Sync FW Update Reset Timeout.\n");
+}
+
static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long action, void *data)
{
struct mlx5_fw_reset *fw_reset = mlx5_nb_cof(nb, struct mlx5_fw_reset, nb);
@@ -798,6 +844,7 @@ void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
cancel_work_sync(&fw_reset->reset_reload_work);
cancel_work_sync(&fw_reset->reset_now_work);
cancel_work_sync(&fw_reset->reset_abort_work);
+ cancel_delayed_work(&fw_reset->reset_timeout_work);
}
static const struct devlink_param mlx5_fw_reset_devlink_params[] = {
@@ -841,6 +888,8 @@ int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
INIT_WORK(&fw_reset->reset_reload_work, mlx5_sync_reset_reload_work);
INIT_WORK(&fw_reset->reset_now_work, mlx5_sync_reset_now_event);
INIT_WORK(&fw_reset->reset_abort_work, mlx5_sync_reset_abort_event);
+ INIT_DELAYED_WORK(&fw_reset->reset_timeout_work,
+ mlx5_sync_reset_timeout_work);
init_completion(&fw_reset->done);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
index ea527d06a85f..d5b28525c960 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
@@ -12,6 +12,7 @@ int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel,
int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev);
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev);
+void mlx5_sync_reset_unload_flow(struct mlx5_core_dev *dev, bool locked);
int mlx5_fw_reset_verify_fw_complete(struct mlx5_core_dev *dev,
struct netlink_ext_ack *extack);
void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index a6329ca2d9bf..aeeb136f5ebc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -96,6 +96,11 @@ static int mlx5_health_get_rfr(u8 rfr_severity)
return rfr_severity >> MLX5_RFR_BIT_OFFSET;
}
+static int mlx5_health_get_crr(u8 rfr_severity)
+{
+ return (rfr_severity >> MLX5_CRR_BIT_OFFSET) & 0x01;
+}
+
static bool sensor_fw_synd_rfr(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
@@ -375,6 +380,8 @@ static const char *hsynd_str(u8 synd)
return "High temperature";
case MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PCI_POISONED_ERR:
return "ICM fetch PCI data poisoned error";
+ case MLX5_INITIAL_SEG_HEALTH_SYNDROME_TRUST_LOCKDOWN_ERR:
+ return "Trust lockdown error";
default:
return "unrecognized error";
}
@@ -442,12 +449,15 @@ static void print_health_info(struct mlx5_core_dev *dev)
mlx5_log(dev, severity, "time %u\n", ioread32be(&h->time));
mlx5_log(dev, severity, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
mlx5_log(dev, severity, "rfr %d\n", mlx5_health_get_rfr(rfr_severity));
+ mlx5_log(dev, severity, "crr %d\n", mlx5_health_get_crr(rfr_severity));
mlx5_log(dev, severity, "severity %d (%s)\n", severity, mlx5_loglevel_str(severity));
mlx5_log(dev, severity, "irisc_index %d\n", ioread8(&h->irisc_index));
mlx5_log(dev, severity, "synd 0x%x: %s\n", ioread8(&h->synd),
hsynd_str(ioread8(&h->synd)));
mlx5_log(dev, severity, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
mlx5_log(dev, severity, "raw fw_ver 0x%08x\n", ioread32be(&h->fw_ver));
+ if (mlx5_health_get_crr(rfr_severity))
+ mlx5_core_warn(dev, "Cold reset is required\n");
}
static int
@@ -659,57 +669,64 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
}
}
+#define MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD 180000
+#define MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD 60000
+#define MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD 30000
+#define MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD \
+ MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD
+
+static
+const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ecpf_ops = {
+ .name = "fw_fatal",
+ .recover = mlx5_fw_fatal_reporter_recover,
+ .dump = mlx5_fw_fatal_reporter_dump,
+ .default_graceful_period =
+ MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD,
+};
+
static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_pf_ops = {
.name = "fw_fatal",
.recover = mlx5_fw_fatal_reporter_recover,
.dump = mlx5_fw_fatal_reporter_dump,
+ .default_graceful_period = MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD,
};
static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
.name = "fw_fatal",
.recover = mlx5_fw_fatal_reporter_recover,
+ .default_graceful_period =
+ MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD,
};
-#define MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD 180000
-#define MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD 60000
-#define MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD 30000
-#define MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD
-
void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
{
const struct devlink_health_reporter_ops *fw_fatal_ops;
struct mlx5_core_health *health = &dev->priv.health;
const struct devlink_health_reporter_ops *fw_ops;
struct devlink *devlink = priv_to_devlink(dev);
- u64 grace_period;
- fw_fatal_ops = &mlx5_fw_fatal_reporter_pf_ops;
fw_ops = &mlx5_fw_reporter_pf_ops;
if (mlx5_core_is_ecpf(dev)) {
- grace_period = MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD;
+ fw_fatal_ops = &mlx5_fw_fatal_reporter_ecpf_ops;
} else if (mlx5_core_is_pf(dev)) {
- grace_period = MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD;
+ fw_fatal_ops = &mlx5_fw_fatal_reporter_pf_ops;
} else {
/* VF or SF */
- grace_period = MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD;
fw_fatal_ops = &mlx5_fw_fatal_reporter_ops;
fw_ops = &mlx5_fw_reporter_ops;
}
- health->fw_reporter =
- devl_health_reporter_create(devlink, fw_ops, 0, dev);
+ health->fw_reporter = devl_health_reporter_create(devlink, fw_ops, dev);
if (IS_ERR(health->fw_reporter))
- mlx5_core_warn(dev, "Failed to create fw reporter, err = %ld\n",
- PTR_ERR(health->fw_reporter));
-
- health->fw_fatal_reporter =
- devl_health_reporter_create(devlink,
- fw_fatal_ops,
- grace_period,
- dev);
+ mlx5_core_warn(dev, "Failed to create fw reporter, err = %pe\n",
+ health->fw_reporter);
+
+ health->fw_fatal_reporter = devl_health_reporter_create(devlink,
+ fw_fatal_ops,
+ dev);
if (IS_ERR(health->fw_fatal_reporter))
- mlx5_core_warn(dev, "Failed to create fw fatal reporter, err = %ld\n",
- PTR_ERR(health->fw_fatal_reporter));
+ mlx5_core_warn(dev, "Failed to create fw fatal reporter, err = %pe\n",
+ health->fw_fatal_reporter);
}
static void mlx5_fw_reporters_destroy(struct mlx5_core_dev *dev)
@@ -769,7 +786,8 @@ static void mlx5_health_log_ts_update(struct work_struct *work)
static void poll_health(struct timer_list *t)
{
- struct mlx5_core_dev *dev = from_timer(dev, t, priv.health.timer);
+ struct mlx5_core_dev *dev = timer_container_of(dev, t,
+ priv.health.timer);
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
u32 fatal_error;
@@ -799,14 +817,17 @@ static void poll_health(struct timer_list *t)
health->prev = count;
if (health->miss_counter == MAX_MISSES) {
mlx5_core_err(dev, "device's health compromised - reached miss count\n");
+ health->synd = ioread8(&h->synd);
print_health_info(dev);
queue_work(health->wq, &health->report_work);
}
prev_synd = health->synd;
health->synd = ioread8(&h->synd);
- if (health->synd && health->synd != prev_synd)
+ if (health->synd && health->synd != prev_synd) {
+ print_health_info(dev);
queue_work(health->wq, &health->report_work);
+ }
out:
mod_timer(&health->timer, get_next_poll_jiffies(dev));
@@ -834,7 +855,7 @@ void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
if (disable_health)
set_bit(MLX5_DROP_HEALTH_WORK, &health->flags);
- del_timer_sync(&health->timer);
+ timer_delete_sync(&health->timer);
}
void mlx5_start_health_fw_log_up(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c
index 353f81dccd1c..4ba2636d7fb6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c
@@ -416,3 +416,8 @@ void mlx5_hwmon_dev_unregister(struct mlx5_core_dev *mdev)
mlx5_hwmon_free(hwmon);
mdev->hwmon = NULL;
}
+
+const char *hwmon_get_sensor_name(struct mlx5_hwmon *hwmon, int channel)
+{
+ return hwmon->temp_channel_desc[channel].sensor_name;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h
index 999654a9b9da..f38271c22c10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h
@@ -10,6 +10,7 @@
int mlx5_hwmon_dev_register(struct mlx5_core_dev *mdev);
void mlx5_hwmon_dev_unregister(struct mlx5_core_dev *mdev);
+const char *hwmon_get_sensor_name(struct mlx5_hwmon *hwmon, int channel);
#else
static inline int mlx5_hwmon_dev_register(struct mlx5_core_dev *mdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 9772327d5124..3b2f54ca30a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -238,6 +238,23 @@ static u32 mlx5i_flow_type_mask(u32 flow_type)
return flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
}
+static int mlx5i_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ return mlx5e_ethtool_set_rxfh_fields(priv, cmd, extack);
+}
+
+static int mlx5i_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ return mlx5e_ethtool_get_rxfh_fields(priv, info);
+}
+
static int mlx5i_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
@@ -249,21 +266,18 @@ static int mlx5i_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return mlx5e_ethtool_set_rxnfc(priv, cmd);
}
+static u32 mlx5i_get_rx_ring_count(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ return priv->channels.params.num_channels;
+}
+
static int mlx5i_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rule_locs)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
- /* ETHTOOL_GRXRINGS is needed by ethtool -x which is not part
- * of rxnfc. We keep this logic out of mlx5e_ethtool_get_rxnfc,
- * to avoid breaking "ethtool -x" when mlx5e_ethtool_get_rxnfc
- * is compiled out via CONFIG_MLX5_EN_RXNFC=n.
- */
- if (info->cmd == ETHTOOL_GRXRINGS) {
- info->data = priv->channels.params.num_channels;
- return 0;
- }
-
return mlx5e_ethtool_get_rxnfc(priv, info, rule_locs);
}
@@ -283,8 +297,11 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
.get_coalesce = mlx5i_get_coalesce,
.set_coalesce = mlx5i_set_coalesce,
.get_ts_info = mlx5i_get_ts_info,
+ .get_rxfh_fields = mlx5i_get_rxfh_fields,
+ .set_rxfh_fields = mlx5i_set_rxfh_fields,
.get_rxnfc = mlx5i_get_rxnfc,
.set_rxnfc = mlx5i_set_rxnfc,
+ .get_rx_ring_count = mlx5i_get_rx_ring_count,
.get_link_ksettings = mlx5i_get_link_ksettings,
.get_link = ethtool_op_get_link,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 0979d672d47f..0a6003fe60e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -32,6 +32,7 @@
#include <rdma/ib_verbs.h>
#include <linux/mlx5/fs.h>
+#include <net/netdev_lock.h>
#include "en.h"
#include "en/params.h"
#include "ipoib.h"
@@ -44,6 +45,23 @@ static int mlx5i_open(struct net_device *netdev);
static int mlx5i_close(struct net_device *netdev);
static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu);
+int mlx5i_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *epriv = mlx5i_epriv(dev);
+
+ return mlx5e_hwtstamp_set(epriv, config, extack);
+}
+
+int mlx5i_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct mlx5e_priv *epriv = mlx5i_epriv(dev);
+
+ return mlx5e_hwtstamp_get(epriv, config);
+}
+
static const struct net_device_ops mlx5i_netdev_ops = {
.ndo_open = mlx5i_open,
.ndo_stop = mlx5i_close,
@@ -51,7 +69,8 @@ static const struct net_device_ops mlx5i_netdev_ops = {
.ndo_init = mlx5i_dev_init,
.ndo_uninit = mlx5i_dev_cleanup,
.ndo_change_mtu = mlx5i_change_mtu,
- .ndo_eth_ioctl = mlx5i_ioctl,
+ .ndo_hwtstamp_get = mlx5i_hwtstamp_get,
+ .ndo_hwtstamp_set = mlx5i_hwtstamp_set,
};
/* IPoIB mlx5 netdev profile */
@@ -102,6 +121,8 @@ int mlx5i_init(struct mlx5_core_dev *mdev, struct net_device *netdev)
netdev->netdev_ops = &mlx5i_netdev_ops;
netdev->ethtool_ops = &mlx5i_ethtool_ops;
+ netdev->request_ops_lock = true;
+ netdev_lockdep_set_classes(netdev);
return 0;
}
@@ -313,7 +334,7 @@ void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, u32 qpn)
int mlx5i_update_nic_rx(struct mlx5e_priv *priv)
{
- return mlx5e_refresh_tirs(priv, true, true);
+ return mlx5e_refresh_tirs(priv->mdev, true, true);
}
int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn)
@@ -406,6 +427,7 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
static int mlx5i_init_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
+ enum mlx5e_rx_res_features features;
int err;
priv->fs = mlx5e_fs_init(priv->profile, mdev,
@@ -424,7 +446,9 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
goto err_destroy_q_counters;
}
- priv->rx_res = mlx5e_rx_res_create(priv->mdev, 0, priv->max_nch, priv->drop_rq.rqn,
+ features = MLX5E_RX_RES_FEATURE_SELF_LB_BLOCK;
+ priv->rx_res = mlx5e_rx_res_create(priv->mdev, features, priv->max_nch,
+ priv->drop_rq.rqn,
&priv->channels.params.packet_merge,
priv->channels.params.num_channels);
if (IS_ERR(priv->rx_res)) {
@@ -554,20 +578,6 @@ int mlx5i_dev_init(struct net_device *dev)
return 0;
}
-int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct mlx5e_priv *priv = mlx5i_epriv(dev);
-
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return mlx5e_hwstamp_set(priv, ifr);
- case SIOCGHWTSTAMP:
- return mlx5e_hwstamp_get(priv, ifr);
- default:
- return -EOPNOTSUPP;
- }
-}
-
void mlx5i_dev_cleanup(struct net_device *dev)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index 2ab6437a1c49..d67d5a72bb41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -88,7 +88,11 @@ struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn);
/* Shared ndo functions */
int mlx5i_dev_init(struct net_device *dev);
void mlx5i_dev_cleanup(struct net_device *dev);
-int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+int mlx5i_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+int mlx5i_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config);
/* Parent profile functions */
int mlx5i_init(struct mlx5_core_dev *mdev, struct net_device *netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 028a76944d82..04444dad3a0d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -140,7 +140,6 @@ static int mlx5i_pkey_close(struct net_device *netdev);
static int mlx5i_pkey_dev_init(struct net_device *dev);
static void mlx5i_pkey_dev_cleanup(struct net_device *netdev);
static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu);
-static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
static const struct net_device_ops mlx5i_pkey_netdev_ops = {
.ndo_open = mlx5i_pkey_open,
@@ -149,7 +148,8 @@ static const struct net_device_ops mlx5i_pkey_netdev_ops = {
.ndo_get_stats64 = mlx5i_get_stats,
.ndo_uninit = mlx5i_pkey_dev_cleanup,
.ndo_change_mtu = mlx5i_pkey_change_mtu,
- .ndo_eth_ioctl = mlx5i_pkey_ioctl,
+ .ndo_hwtstamp_get = mlx5i_hwtstamp_get,
+ .ndo_hwtstamp_set = mlx5i_hwtstamp_set,
};
/* Child NDOs */
@@ -184,11 +184,6 @@ static int mlx5i_pkey_dev_init(struct net_device *dev)
return mlx5i_dev_init(dev);
}
-static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- return mlx5i_ioctl(dev, ifr, cmd);
-}
-
static void mlx5i_pkey_dev_cleanup(struct net_device *netdev)
{
mlx5i_parent_put(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
index 1477db7f5307..14d339eceb92 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
@@ -47,29 +47,40 @@ static int cpu_get_least_loaded(struct mlx5_irq_pool *pool,
static struct mlx5_irq *
irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
{
- struct irq_affinity_desc auto_desc = {};
+ struct irq_affinity_desc *auto_desc;
struct mlx5_irq *irq;
u32 irq_index;
int err;
+ auto_desc = kvzalloc(sizeof(*auto_desc), GFP_KERNEL);
+ if (!auto_desc)
+ return ERR_PTR(-ENOMEM);
+
err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs, GFP_KERNEL);
- if (err)
+ if (err) {
+ kvfree(auto_desc);
return ERR_PTR(err);
+ }
+
if (pool->irqs_per_cpu) {
if (cpumask_weight(&af_desc->mask) > 1)
/* if req_mask contain more then one CPU, set the least loadad CPU
* of req_mask
*/
cpumask_set_cpu(cpu_get_least_loaded(pool, &af_desc->mask),
- &auto_desc.mask);
+ &auto_desc->mask);
else
cpu_get(pool, cpumask_first(&af_desc->mask));
}
+
irq = mlx5_irq_alloc(pool, irq_index,
- cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc,
+ cpumask_empty(&auto_desc->mask) ? af_desc : auto_desc,
NULL);
if (IS_ERR(irq))
xa_erase(&pool->irqs, irq_index);
+
+ kvfree(auto_desc);
+
return irq;
}
@@ -139,8 +150,8 @@ mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool,
if (IS_ERR(new_irq)) {
if (!least_loaded_irq) {
/* We failed to create an IRQ and we didn't find an IRQ */
- mlx5_core_err(pool->dev, "Didn't find a matching IRQ. err = %ld\n",
- PTR_ERR(new_irq));
+ mlx5_core_err(pool->dev, "Didn't find a matching IRQ. err = %pe\n",
+ new_irq);
mutex_unlock(&pool->lock);
return new_irq;
}
@@ -175,7 +186,7 @@ unlock:
void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq)
{
- struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
+ struct mlx5_irq_pool *pool = mlx5_irq_get_pool(irq);
int cpu;
cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
index f4b777d4e108..62b6faa4276a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
@@ -105,20 +105,20 @@ static int mapping_show(struct seq_file *file, void *priv)
struct mlx5_lag *ldev;
bool hash = false;
bool lag_active;
+ int i, idx = 0;
int num_ports;
- int i;
ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
lag_active = __mlx5_lag_is_active(ldev);
if (lag_active) {
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) {
- mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, ports,
+ mlx5_infer_tx_enabled(&ldev->tracker, ldev, ports,
&num_ports);
hash = true;
} else {
- for (i = 0; i < ldev->ports; i++)
- ports[i] = ldev->v2p_map[i];
+ mlx5_ldev_for_each(i, 0, ldev)
+ ports[idx++] = ldev->v2p_map[i];
num_ports = ldev->ports;
}
}
@@ -144,11 +144,8 @@ static int members_show(struct seq_file *file, void *priv)
ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
- for (i = 0; i < ldev->ports; i++) {
- if (!ldev->pf[i].dev)
- continue;
+ mlx5_ldev_for_each(i, 0, ldev)
seq_printf(file, "%s\n", dev_name(ldev->pf[i].dev->device));
- }
mutex_unlock(&ldev->lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 8577db3308cc..1ac933cd8f02 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -35,6 +35,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/eswitch.h>
#include <linux/mlx5/vport.h>
+#include "lib/mlx5.h"
#include "lib/devcom.h"
#include "mlx5_core.h"
#include "eswitch.h"
@@ -43,10 +44,6 @@
#include "mp.h"
#include "mpesw.h"
-enum {
- MLX5_LAG_EGRESS_PORT_1 = 1,
- MLX5_LAG_EGRESS_PORT_2,
-};
/* General purpose, use for short periods of time.
* Beware of lock dependencies (preferably, no locks should be acquired
@@ -72,7 +69,7 @@ static u8 lag_active_port_bits(struct mlx5_lag *ldev)
int num_enabled;
int idx;
- mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, enabled_ports,
+ mlx5_infer_tx_enabled(&ldev->tracker, ldev, enabled_ports,
&num_enabled);
for (idx = 0; idx < num_enabled; idx++)
active_port |= BIT_MASK(enabled_ports[idx]);
@@ -80,23 +77,30 @@ static u8 lag_active_port_bits(struct mlx5_lag *ldev)
return active_port;
}
-static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode,
- unsigned long flags)
+static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, struct mlx5_lag *ldev,
+ int mode, unsigned long flags)
{
bool fdb_sel_mode = test_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE,
&flags);
int port_sel_mode = get_port_sel_mode(mode, flags);
u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
+ u8 *ports = ldev->v2p_map;
+ int idx0, idx1;
void *lag_ctx;
lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
MLX5_SET(lagc, lag_ctx, fdb_selection_mode, fdb_sel_mode);
+ idx0 = mlx5_lag_get_dev_index_by_seq(ldev, 0);
+ idx1 = mlx5_lag_get_dev_index_by_seq(ldev, 1);
+
+ if (idx0 < 0 || idx1 < 0)
+ return -EINVAL;
switch (port_sel_mode) {
case MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY:
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[idx0]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[idx1]);
break;
case MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT:
if (!MLX5_CAP_PORT_SELECTION(dev, port_select_flow_table_bypass))
@@ -113,17 +117,23 @@ static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode,
return mlx5_cmd_exec_in(dev, create_lag, in);
}
-static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 num_ports,
+static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, struct mlx5_lag *ldev,
u8 *ports)
{
u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {};
void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
+ int idx0, idx1;
+
+ idx0 = mlx5_lag_get_dev_index_by_seq(ldev, 0);
+ idx1 = mlx5_lag_get_dev_index_by_seq(ldev, 1);
+ if (idx0 < 0 || idx1 < 0)
+ return -EINVAL;
MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
MLX5_SET(modify_lag_in, in, field_select, 0x1);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[idx0]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[idx1]);
return mlx5_cmd_exec_in(dev, modify_lag, in);
}
@@ -148,33 +158,31 @@ int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
}
EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
-static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, u8 num_ports,
+static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, struct mlx5_lag *ldev,
u8 *ports, int *num_disabled)
{
int i;
*num_disabled = 0;
- for (i = 0; i < num_ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev)
if (!tracker->netdev_state[i].tx_enabled ||
!tracker->netdev_state[i].link_up)
ports[(*num_disabled)++] = i;
- }
}
-void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
+void mlx5_infer_tx_enabled(struct lag_tracker *tracker, struct mlx5_lag *ldev,
u8 *ports, int *num_enabled)
{
int i;
*num_enabled = 0;
- for (i = 0; i < num_ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev)
if (tracker->netdev_state[i].tx_enabled &&
tracker->netdev_state[i].link_up)
ports[(*num_enabled)++] = i;
- }
if (*num_enabled == 0)
- mlx5_infer_tx_disabled(tracker, num_ports, ports, num_enabled);
+ mlx5_infer_tx_disabled(tracker, ldev, ports, num_enabled);
}
static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev,
@@ -192,7 +200,7 @@ static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev,
int j;
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
- mlx5_infer_tx_enabled(tracker, ldev->ports, enabled_ports,
+ mlx5_infer_tx_enabled(tracker, ldev, enabled_ports,
&num_enabled);
for (i = 0; i < num_enabled; i++) {
err = scnprintf(buf + written, 4, "%d, ", enabled_ports[i] + 1);
@@ -203,7 +211,7 @@ static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev,
buf[written - 2] = 0;
mlx5_core_info(dev, "lag map active ports: %s\n", buf);
} else {
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
err = scnprintf(buf + written, 10,
@@ -224,9 +232,13 @@ static void mlx5_do_bond_work(struct work_struct *work);
static void mlx5_ldev_free(struct kref *ref)
{
struct mlx5_lag *ldev = container_of(ref, struct mlx5_lag, ref);
+ struct net *net;
+
+ if (ldev->nb.notifier_call) {
+ net = read_pnet(&ldev->net);
+ unregister_netdevice_notifier_net(net, &ldev->nb);
+ }
- if (ldev->nb.notifier_call)
- unregister_netdevice_notifier_net(&init_net, &ldev->nb);
mlx5_lag_mp_cleanup(ldev);
cancel_delayed_work_sync(&ldev->bond_work);
destroy_workqueue(ldev->wq);
@@ -264,7 +276,8 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev)
INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
ldev->nb.notifier_call = mlx5_lag_netdev_event;
- if (register_netdevice_notifier_net(&init_net, &ldev->nb)) {
+ write_pnet(&ldev->net, mlx5_core_net(dev));
+ if (register_netdevice_notifier_net(read_pnet(&ldev->net), &ldev->nb)) {
ldev->nb.notifier_call = NULL;
mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
}
@@ -286,13 +299,55 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
{
int i;
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (ldev->pf[i].netdev == ndev)
return i;
return -ENOENT;
}
+int mlx5_lag_get_dev_index_by_seq(struct mlx5_lag *ldev, int seq)
+{
+ int i, num = 0;
+
+ if (!ldev)
+ return -ENOENT;
+
+ mlx5_ldev_for_each(i, 0, ldev) {
+ if (num == seq)
+ return i;
+ num++;
+ }
+ return -ENOENT;
+}
+
+int mlx5_lag_num_devs(struct mlx5_lag *ldev)
+{
+ int i, num = 0;
+
+ if (!ldev)
+ return 0;
+
+ mlx5_ldev_for_each(i, 0, ldev) {
+ (void)i;
+ num++;
+ }
+ return num;
+}
+
+int mlx5_lag_num_netdevs(struct mlx5_lag *ldev)
+{
+ int i, num = 0;
+
+ if (!ldev)
+ return 0;
+
+ mlx5_ldev_for_each(i, 0, ldev)
+ if (ldev->pf[i].netdev)
+ num++;
+ return num;
+}
+
static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev)
{
return ldev->mode == MLX5_LAG_MODE_ROCE;
@@ -310,7 +365,7 @@ static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
* with mapping that points to active ports.
*/
static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
- u8 num_ports,
+ struct mlx5_lag *ldev,
u8 buckets,
u8 *ports)
{
@@ -323,7 +378,7 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
int i;
int j;
- for (i = 0; i < num_ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
if (tracker->netdev_state[i].tx_enabled &&
tracker->netdev_state[i].link_up)
enabled[enabled_ports_num++] = i;
@@ -334,15 +389,16 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
/* Use native mapping by default where each port's buckets
* point the native port: 1 1 1 .. 1 2 2 2 ... 2 3 3 3 ... 3 etc
*/
- for (i = 0; i < num_ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < buckets; j++) {
idx = i * buckets + j;
- ports[idx] = MLX5_LAG_EGRESS_PORT_1 + i;
+ ports[idx] = i + 1;
}
+ }
/* If all ports are disabled/enabled keep native mapping */
- if (enabled_ports_num == num_ports ||
- disabled_ports_num == num_ports)
+ if (enabled_ports_num == ldev->ports ||
+ disabled_ports_num == ldev->ports)
return;
/* Go over the disabled ports and for each assign a random active port */
@@ -358,7 +414,7 @@ static bool mlx5_lag_has_drop_rule(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (ldev->pf[i].has_drop)
return true;
return false;
@@ -368,7 +424,7 @@ static void mlx5_lag_drop_rule_cleanup(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
if (!ldev->pf[i].has_drop)
continue;
@@ -396,7 +452,7 @@ static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev,
if (!ldev->tracker.has_inactive)
return;
- mlx5_infer_tx_disabled(tracker, ldev->ports, disabled_ports, &num_disabled);
+ mlx5_infer_tx_disabled(tracker, ldev, disabled_ports, &num_disabled);
for (i = 0; i < num_disabled; i++) {
disabled_index = disabled_ports[i];
@@ -428,10 +484,15 @@ static int mlx5_cmd_modify_active_port(struct mlx5_core_dev *dev, u8 ports)
static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev0;
u8 active_ports;
int ret;
+ if (idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[idx].dev;
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) {
ret = mlx5_lag_port_sel_modify(ldev, ports);
if (ret ||
@@ -442,7 +503,7 @@ static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports)
return mlx5_cmd_modify_active_port(dev0, active_ports);
}
- return mlx5_cmd_modify_lag(dev0, ldev->ports, ports);
+ return mlx5_cmd_modify_lag(dev0, ldev, ports);
}
static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev *dev)
@@ -450,7 +511,7 @@ static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev
struct net_device *ndev = NULL;
struct mlx5_lag *ldev;
unsigned long flags;
- int i;
+ int i, last_idx;
spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
@@ -458,14 +519,17 @@ static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev
if (!ldev)
goto unlock;
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (ldev->tracker.netdev_state[i].tx_enabled)
ndev = ldev->pf[i].netdev;
- if (!ndev)
- ndev = ldev->pf[ldev->ports - 1].netdev;
+ if (!ndev) {
+ last_idx = mlx5_lag_get_dev_index_by_seq(ldev, ldev->ports - 1);
+ if (last_idx < 0)
+ goto unlock;
+ ndev = ldev->pf[last_idx].netdev;
+ }
- if (ndev)
- dev_hold(ndev);
+ dev_hold(ndev);
unlock:
spin_unlock_irqrestore(&lag_lock, flags);
@@ -476,16 +540,21 @@ unlock:
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
u8 ports[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS] = {};
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev0;
int idx;
int err;
int i;
int j;
- mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ports);
+ if (first_idx < 0)
+ return;
+
+ dev0 = ldev->pf[first_idx].dev;
+ mlx5_infer_tx_affinity_mapping(tracker, ldev, ldev->buckets, ports);
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
if (ports[idx] == ldev->v2p_map[idx])
@@ -516,13 +585,25 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
blocking_notifier_call_chain(&dev0->priv.lag_nh,
MLX5_DRIVER_EVENT_ACTIVE_BACKUP_LAG_CHANGE_LOWERSTATE,
ndev);
+ dev_put(ndev);
}
}
-static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
- unsigned long *flags)
+static int mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev,
+ enum mlx5_lag_mode mode,
+ unsigned long *flags)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev0;
+
+ if (first_idx < 0)
+ return -EINVAL;
+
+ if (mode == MLX5_LAG_MODE_MPESW ||
+ mode == MLX5_LAG_MODE_MULTIPATH)
+ return 0;
+
+ dev0 = ldev->pf[first_idx].dev;
if (!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table)) {
if (ldev->ports > 2)
@@ -538,30 +619,10 @@ static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
return 0;
}
-static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev,
- struct lag_tracker *tracker,
- enum mlx5_lag_mode mode,
- unsigned long *flags)
-{
- struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1];
-
- if (mode == MLX5_LAG_MODE_MPESW)
- return;
-
- if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) &&
- tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) {
- if (ldev->ports > 2)
- ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS;
- set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags);
- }
-}
-
static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
struct lag_tracker *tracker, bool shared_fdb,
unsigned long *flags)
{
- bool roce_lag = mode == MLX5_LAG_MODE_ROCE;
-
*flags = 0;
if (shared_fdb) {
set_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, flags);
@@ -571,11 +632,7 @@ static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
if (mode == MLX5_LAG_MODE_MPESW)
set_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, flags);
- if (roce_lag)
- return mlx5_lag_set_port_sel_mode_roce(ldev, flags);
-
- mlx5_lag_set_port_sel_mode_offloads(ldev, tracker, mode, flags);
- return 0;
+ return mlx5_lag_set_port_sel_mode(ldev, mode, flags);
}
char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
@@ -592,12 +649,18 @@ char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- struct mlx5_eswitch *master_esw = dev0->priv.eswitch;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_eswitch *master_esw;
+ struct mlx5_core_dev *dev0;
+ int i, j;
int err;
- int i;
- for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[first_idx].dev;
+ master_esw = dev0->priv.eswitch;
+ mlx5_ldev_for_each(i, first_idx + 1, ldev) {
struct mlx5_eswitch *slave_esw = ldev->pf[i].dev->priv.eswitch;
err = mlx5_eswitch_offloads_single_fdb_add_one(master_esw,
@@ -607,9 +670,9 @@ static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev)
}
return 0;
err:
- for (; i > MLX5_LAG_P1; i--)
+ mlx5_ldev_for_each_reverse(j, i, first_idx + 1, ldev)
mlx5_eswitch_offloads_single_fdb_del_one(master_esw,
- ldev->pf[i].dev->priv.eswitch);
+ ldev->pf[j].dev->priv.eswitch);
return err;
}
@@ -619,16 +682,21 @@ static int mlx5_create_lag(struct mlx5_lag *ldev,
unsigned long flags)
{
bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
+ struct mlx5_core_dev *dev0;
int err;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[first_idx].dev;
if (tracker)
mlx5_lag_print_mapping(dev0, ldev, tracker, flags);
mlx5_core_info(dev0, "shared_fdb:%d mode:%s\n",
shared_fdb, mlx5_get_str_port_sel_mode(mode, flags));
- err = mlx5_cmd_create_lag(dev0, ldev->v2p_map, mode, flags);
+ err = mlx5_cmd_create_lag(dev0, ldev, mode, flags);
if (err) {
mlx5_core_err(dev0,
"Failed to create LAG (%d)\n",
@@ -660,17 +728,22 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
enum mlx5_lag_mode mode,
bool shared_fdb)
{
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
bool roce_lag = mode == MLX5_LAG_MODE_ROCE;
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev0;
unsigned long flags = 0;
int err;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[first_idx].dev;
err = mlx5_lag_set_flags(ldev, mode, tracker, shared_fdb, &flags);
if (err)
return err;
if (mode != MLX5_LAG_MODE_MPESW) {
- mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ldev->v2p_map);
+ mlx5_infer_tx_affinity_mapping(tracker, ldev, ldev->buckets, ldev->v2p_map);
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
err = mlx5_lag_port_sel_create(ldev, tracker->hash_type,
ldev->v2p_map);
@@ -708,20 +781,26 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
int mlx5_deactivate_lag(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- struct mlx5_eswitch *master_esw = dev0->priv.eswitch;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
bool roce_lag = __mlx5_lag_is_roce(ldev);
unsigned long flags = ldev->mode_flags;
+ struct mlx5_eswitch *master_esw;
+ struct mlx5_core_dev *dev0;
int err;
int i;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[first_idx].dev;
+ master_esw = dev0->priv.eswitch;
ldev->mode = MLX5_LAG_MODE_NONE;
ldev->mode_flags = 0;
mlx5_lag_mp_reset(ldev);
if (test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags)) {
- for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, first_idx + 1, ldev)
mlx5_eswitch_offloads_single_fdb_del_one(master_esw,
ldev->pf[i].dev->priv.eswitch);
clear_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
@@ -753,6 +832,7 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev)
bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
{
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
#ifdef CONFIG_MLX5_ESWITCH
struct mlx5_core_dev *dev;
u8 mode;
@@ -760,30 +840,29 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
bool roce_support;
int i;
- for (i = 0; i < ldev->ports; i++)
- if (!ldev->pf[i].dev)
- return false;
+ if (first_idx < 0 || mlx5_lag_num_devs(ldev) != ldev->ports)
+ return false;
#ifdef CONFIG_MLX5_ESWITCH
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
dev = ldev->pf[i].dev;
if (mlx5_eswitch_num_vfs(dev->priv.eswitch) && !is_mdev_switchdev_mode(dev))
return false;
}
- dev = ldev->pf[MLX5_LAG_P1].dev;
+ dev = ldev->pf[first_idx].dev;
mode = mlx5_eswitch_mode(dev);
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode)
return false;
#else
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (mlx5_sriov_is_enabled(ldev->pf[i].dev))
return false;
#endif
- roce_support = mlx5_get_roce_state(ldev->pf[MLX5_LAG_P1].dev);
- for (i = 1; i < ldev->ports; i++)
+ roce_support = mlx5_get_roce_state(ldev->pf[first_idx].dev);
+ mlx5_ldev_for_each(i, first_idx + 1, ldev)
if (mlx5_get_roce_state(ldev->pf[i].dev) != roce_support)
return false;
@@ -794,10 +873,7 @@ void mlx5_lag_add_devices(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < ldev->ports; i++) {
- if (!ldev->pf[i].dev)
- continue;
-
+ mlx5_ldev_for_each(i, 0, ldev) {
if (ldev->pf[i].dev->priv.flags &
MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
continue;
@@ -811,10 +887,7 @@ void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < ldev->ports; i++) {
- if (!ldev->pf[i].dev)
- continue;
-
+ mlx5_ldev_for_each(i, 0, ldev) {
if (ldev->pf[i].dev->priv.flags &
MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
continue;
@@ -827,11 +900,16 @@ void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
void mlx5_disable_lag(struct mlx5_lag *ldev)
{
bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev0;
bool roce_lag;
int err;
int i;
+ if (idx < 0)
+ return;
+
+ dev0 = ldev->pf[idx].dev;
roce_lag = __mlx5_lag_is_roce(ldev);
if (shared_fdb) {
@@ -841,7 +919,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
}
- for (i = 1; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, idx + 1, ldev)
mlx5_nic_vport_disable_roce(ldev->pf[i].dev);
}
@@ -853,17 +931,21 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
mlx5_lag_add_devices(ldev);
if (shared_fdb)
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (!(ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
}
-static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
+bool mlx5_lag_shared_fdb_supported(struct mlx5_lag *ldev)
{
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_core_dev *dev;
int i;
- for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
+ if (idx < 0)
+ return false;
+
+ mlx5_ldev_for_each(i, idx + 1, ldev) {
dev = ldev->pf[i].dev;
if (is_mdev_switchdev_mode(dev) &&
mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
@@ -875,7 +957,7 @@ static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
return false;
}
- dev = ldev->pf[MLX5_LAG_P1].dev;
+ dev = ldev->pf[idx].dev;
if (is_mdev_switchdev_mode(dev) &&
mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
mlx5_esw_offloads_devcom_is_ready(dev->priv.eswitch) &&
@@ -891,11 +973,11 @@ static bool mlx5_lag_is_roce_lag(struct mlx5_lag *ldev)
bool roce_lag = true;
int i;
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
roce_lag = roce_lag && !mlx5_sriov_is_enabled(ldev->pf[i].dev);
#ifdef CONFIG_MLX5_ESWITCH
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
roce_lag = roce_lag && is_mdev_legacy_mode(ldev->pf[i].dev);
#endif
@@ -916,12 +998,18 @@ static bool mlx5_lag_should_disable_lag(struct mlx5_lag *ldev, bool do_bond)
static void mlx5_do_bond(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct lag_tracker tracker = { };
+ struct mlx5_core_dev *dev0;
+ struct net_device *ndev;
bool do_bond, roce_lag;
int err;
int i;
+ if (idx < 0)
+ return;
+
+ dev0 = ldev->pf[idx].dev;
if (!mlx5_lag_is_ready(ldev)) {
do_bond = false;
} else {
@@ -935,7 +1023,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
}
if (do_bond && !__mlx5_lag_is_active(ldev)) {
- bool shared_fdb = mlx5_shared_fdb_supported(ldev);
+ bool shared_fdb = mlx5_lag_shared_fdb_supported(ldev);
roce_lag = mlx5_lag_is_roce_lag(ldev);
@@ -949,12 +1037,16 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
if (err) {
if (shared_fdb || roce_lag)
mlx5_lag_add_devices(ldev);
+ if (shared_fdb) {
+ mlx5_ldev_for_each(i, 0, ldev)
+ mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
+ }
return;
} else if (roce_lag) {
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- for (i = 1; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, idx + 1, ldev) {
if (mlx5_get_roce_state(ldev->pf[i].dev))
mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
}
@@ -964,7 +1056,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
if (err)
break;
@@ -975,12 +1067,22 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
mlx5_rescan_drivers_locked(dev0);
mlx5_deactivate_lag(ldev);
mlx5_lag_add_devices(ldev);
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_core_err(dev0, "Failed to enable lag\n");
return;
}
}
+ if (tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
+ ndev = mlx5_lag_active_backup_get_netdev(dev0);
+ /** Only sriov and roce lag should have tracker->TX_type
+ * set so no need to check the mode
+ */
+ blocking_notifier_call_chain(&dev0->priv.lag_nh,
+ MLX5_DRIVER_EVENT_ACTIVE_BACKUP_LAG_CHANGE_LOWERSTATE,
+ ndev);
+ dev_put(ndev);
+ }
} else if (mlx5_lag_should_modify_lag(ldev, do_bond)) {
mlx5_modify_lag(ldev, &tracker);
} else if (mlx5_lag_should_disable_lag(ldev, do_bond)) {
@@ -998,12 +1100,9 @@ struct mlx5_devcom_comp_dev *mlx5_lag_get_devcom_comp(struct mlx5_lag *ldev)
int i;
mutex_lock(&ldev->lock);
- for (i = 0; i < ldev->ports; i++) {
- if (ldev->pf[i].dev) {
- devcom = ldev->pf[i].dev->priv.hca_devcom_comp;
- break;
- }
- }
+ i = mlx5_get_next_ldev_func(ldev, 0);
+ if (i < MLX5_MAX_PORTS)
+ devcom = ldev->pf[i].dev->priv.hca_devcom_comp;
mutex_unlock(&ldev->lock);
return devcom;
}
@@ -1056,7 +1155,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
u8 bond_status = 0;
int num_slaves = 0;
int changed = 0;
- int idx;
+ int i, idx = -1;
if (!netif_is_lag_master(upper))
return 0;
@@ -1071,8 +1170,13 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
*/
rcu_read_lock();
for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
- idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
- if (idx >= 0) {
+ mlx5_ldev_for_each(i, 0, ldev) {
+ if (ldev->pf[i].netdev == ndev_tmp) {
+ idx++;
+ break;
+ }
+ }
+ if (i < MLX5_MAX_PORTS) {
slave = bond_slave_get_rcu(ndev_tmp);
if (slave)
has_inactive |= bond_is_slave_inactive(slave);
@@ -1222,15 +1326,12 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
}
static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
- struct mlx5_core_dev *dev,
- struct net_device *netdev)
+ struct mlx5_core_dev *dev,
+ struct net_device *netdev)
{
unsigned int fn = mlx5_get_dev_index(dev);
unsigned long flags;
- if (fn >= ldev->ports)
- return;
-
spin_lock_irqsave(&lag_lock, flags);
ldev->pf[fn].netdev = netdev;
ldev->tracker.netdev_state[fn].link_up = 0;
@@ -1245,7 +1346,7 @@ static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
int i;
spin_lock_irqsave(&lag_lock, flags);
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
if (ldev->pf[i].netdev == netdev) {
ldev->pf[i].netdev = NULL;
break;
@@ -1255,13 +1356,10 @@ static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
}
static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
- struct mlx5_core_dev *dev)
+ struct mlx5_core_dev *dev)
{
unsigned int fn = mlx5_get_dev_index(dev);
- if (fn >= ldev->ports)
- return;
-
ldev->pf[fn].dev = dev;
dev->priv.lag = ldev;
}
@@ -1269,16 +1367,13 @@ static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
struct mlx5_core_dev *dev)
{
- int i;
-
- for (i = 0; i < ldev->ports; i++)
- if (ldev->pf[i].dev == dev)
- break;
+ int fn;
- if (i == ldev->ports)
+ fn = mlx5_get_dev_index(dev);
+ if (ldev->pf[fn].dev != dev)
return;
- ldev->pf[i].dev = NULL;
+ ldev->pf[fn].dev = NULL;
dev->priv.lag = NULL;
}
@@ -1315,6 +1410,37 @@ static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
return 0;
}
+static void mlx5_lag_unregister_hca_devcom_comp(struct mlx5_core_dev *dev)
+{
+ mlx5_devcom_unregister_component(dev->priv.hca_devcom_comp);
+}
+
+static int mlx5_lag_register_hca_devcom_comp(struct mlx5_core_dev *dev)
+{
+ struct mlx5_devcom_match_attr attr = {
+ .flags = MLX5_DEVCOM_MATCH_FLAGS_NS,
+ .net = mlx5_core_net(dev),
+ };
+ u8 len __always_unused;
+
+ mlx5_query_nic_sw_system_image_guid(dev, attr.key.buf, &len);
+
+ /* This component is use to sync adding core_dev to lag_dev and to sync
+ * changes of mlx5_adev_devices between LAG layer and other layers.
+ */
+ dev->priv.hca_devcom_comp =
+ mlx5_devcom_register_component(dev->priv.devc,
+ MLX5_DEVCOM_HCA_PORTS,
+ &attr, NULL, dev);
+ if (!dev->priv.hca_devcom_comp) {
+ mlx5_core_err(dev,
+ "Failed to register devcom HCA component.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
@@ -1336,6 +1462,7 @@ recheck:
}
mlx5_ldev_remove_mdev(ldev, dev);
mutex_unlock(&ldev->lock);
+ mlx5_lag_unregister_hca_devcom_comp(dev);
mlx5_ldev_put(ldev);
}
@@ -1346,7 +1473,7 @@ void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
if (!mlx5_lag_is_supported(dev))
return;
- if (IS_ERR_OR_NULL(dev->priv.hca_devcom_comp))
+ if (mlx5_lag_register_hca_devcom_comp(dev))
return;
recheck:
@@ -1386,7 +1513,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
struct net_device *netdev)
{
struct mlx5_lag *ldev;
- int i;
+ int num = 0;
ldev = mlx5_lag_dev(dev);
if (!ldev)
@@ -1394,17 +1521,33 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
mutex_lock(&ldev->lock);
mlx5_ldev_add_netdev(ldev, dev, netdev);
-
- for (i = 0; i < ldev->ports; i++)
- if (!ldev->pf[i].netdev)
- break;
-
- if (i >= ldev->ports)
+ num = mlx5_lag_num_netdevs(ldev);
+ if (num >= ldev->ports)
set_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
mutex_unlock(&ldev->lock);
mlx5_queue_bond_work(ldev, 0);
}
+int mlx5_get_pre_ldev_func(struct mlx5_lag *ldev, int start_idx, int end_idx)
+{
+ int i;
+
+ for (i = start_idx; i >= end_idx; i--)
+ if (ldev->pf[i].dev)
+ return i;
+ return -1;
+}
+
+int mlx5_get_next_ldev_func(struct mlx5_lag *ldev, int start_idx)
+{
+ int i;
+
+ for (i = start_idx; i < MLX5_MAX_PORTS; i++)
+ if (ldev->pf[i].dev)
+ return i;
+ return MLX5_MAX_PORTS;
+}
+
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
@@ -1455,12 +1598,13 @@ bool mlx5_lag_is_master(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
unsigned long flags;
- bool res;
+ bool res = false;
+ int idx;
spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
- res = ldev && __mlx5_lag_is_active(ldev) &&
- dev == ldev->pf[MLX5_LAG_P1].dev;
+ idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ res = ldev && __mlx5_lag_is_active(ldev) && idx >= 0 && dev == ldev->pf[idx].dev;
spin_unlock_irqrestore(&lag_lock, flags);
return res;
@@ -1543,7 +1687,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
if (!(ldev && __mlx5_lag_is_roce(ldev)))
goto unlock;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
if (ldev->pf[i].netdev == slave) {
port = i;
break;
@@ -1582,13 +1726,13 @@ struct mlx5_core_dev *mlx5_lag_get_next_peer_mdev(struct mlx5_core_dev *dev, int
if (!ldev)
goto unlock;
- if (*i == ldev->ports)
+ if (*i == MLX5_MAX_PORTS)
goto unlock;
- for (idx = *i; idx < ldev->ports; idx++)
+ mlx5_ldev_for_each(idx, *i, ldev)
if (ldev->pf[idx].dev != dev)
break;
- if (idx == ldev->ports) {
+ if (idx == MLX5_MAX_PORTS) {
*i = idx;
goto unlock;
}
@@ -1609,10 +1753,10 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
{
int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
struct mlx5_core_dev **mdev;
+ int ret = 0, i, j, idx = 0;
struct mlx5_lag *ldev;
unsigned long flags;
int num_ports;
- int ret, i, j;
void *out;
out = kvzalloc(outlen, GFP_KERNEL);
@@ -1631,8 +1775,8 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
ldev = mlx5_lag_dev(dev);
if (ldev && __mlx5_lag_is_active(ldev)) {
num_ports = ldev->ports;
- for (i = 0; i < ldev->ports; i++)
- mdev[i] = ldev->pf[i].dev;
+ mlx5_ldev_for_each(i, 0, ldev)
+ mdev[idx++] = ldev->pf[i].dev;
} else {
num_ports = 1;
mdev[MLX5_LAG_P1] = dev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index 50fcb1eee574..4918eee2b3da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -67,6 +67,7 @@ struct mlx5_lag {
struct workqueue_struct *wq;
struct delayed_work bond_work;
struct notifier_block nb;
+ possible_net_t net;
struct lag_mp lag_mp;
struct mlx5_lag_port_sel port_sel;
/* Protect lag fields/state changes */
@@ -92,6 +93,7 @@ mlx5_lag_is_ready(struct mlx5_lag *ldev)
return test_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
}
+bool mlx5_lag_shared_fdb_supported(struct mlx5_lag *ldev);
bool mlx5_lag_check_prereq(struct mlx5_lag *ldev);
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker);
@@ -103,7 +105,7 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
struct net_device *ndev);
char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags);
-void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
+void mlx5_infer_tx_enabled(struct lag_tracker *tracker, struct mlx5_lag *ldev,
u8 *ports, int *num_enabled);
void mlx5_ldev_add_debugfs(struct mlx5_core_dev *dev);
@@ -119,9 +121,24 @@ static inline bool mlx5_lag_is_supported(struct mlx5_core_dev *dev)
if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
!MLX5_CAP_GEN(dev, lag_master) ||
MLX5_CAP_GEN(dev, num_lag_ports) < 2 ||
+ mlx5_get_dev_index(dev) >= MLX5_MAX_PORTS ||
MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS)
return false;
return true;
}
+#define mlx5_ldev_for_each(i, start_index, ldev) \
+ for (int tmp = start_index; tmp = mlx5_get_next_ldev_func(ldev, tmp), \
+ i = tmp, tmp < MLX5_MAX_PORTS; tmp++)
+
+#define mlx5_ldev_for_each_reverse(i, start_index, end_index, ldev) \
+ for (int tmp = start_index, tmp1 = end_index; \
+ tmp = mlx5_get_pre_ldev_func(ldev, tmp, tmp1), \
+ i = tmp, tmp >= tmp1; tmp--)
+
+int mlx5_get_pre_ldev_func(struct mlx5_lag *ldev, int start_idx, int end_idx);
+int mlx5_get_next_ldev_func(struct mlx5_lag *ldev, int start_idx);
+int mlx5_lag_get_dev_index_by_seq(struct mlx5_lag *ldev, int seq);
+int mlx5_lag_num_devs(struct mlx5_lag *ldev);
+int mlx5_lag_num_netdevs(struct mlx5_lag *ldev);
#endif /* __MLX5_LAG_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
index b1aa494c76ba..aee17fcf3b36 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
@@ -17,7 +17,10 @@ static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
#define MLX5_LAG_MULTIPATH_OFFLOADS_SUPPORTED_PORTS 2
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
{
- if (!mlx5_lag_is_ready(ldev))
+ int idx0 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ int idx1 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P2);
+
+ if (idx0 < 0 || idx1 < 0 || !mlx5_lag_is_ready(ldev))
return false;
if (__mlx5_lag_is_active(ldev) && !__mlx5_lag_is_multipath(ldev))
@@ -26,8 +29,8 @@ static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
if (ldev->ports > MLX5_LAG_MULTIPATH_OFFLOADS_SUPPORTED_PORTS)
return false;
- return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
- ldev->pf[MLX5_LAG_P2].dev);
+ return mlx5_esw_multipath_prereq(ldev->pf[idx0].dev,
+ ldev->pf[idx1].dev);
}
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
@@ -50,43 +53,45 @@ bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev,
enum mlx5_lag_port_affinity port)
{
+ int idx0 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ int idx1 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P2);
struct lag_tracker tracker = {};
- if (!__mlx5_lag_is_multipath(ldev))
+ if (idx0 < 0 || idx1 < 0 || !__mlx5_lag_is_multipath(ldev))
return;
switch (port) {
case MLX5_LAG_NORMAL_AFFINITY:
- tracker.netdev_state[MLX5_LAG_P1].tx_enabled = true;
- tracker.netdev_state[MLX5_LAG_P2].tx_enabled = true;
- tracker.netdev_state[MLX5_LAG_P1].link_up = true;
- tracker.netdev_state[MLX5_LAG_P2].link_up = true;
+ tracker.netdev_state[idx0].tx_enabled = true;
+ tracker.netdev_state[idx1].tx_enabled = true;
+ tracker.netdev_state[idx0].link_up = true;
+ tracker.netdev_state[idx1].link_up = true;
break;
case MLX5_LAG_P1_AFFINITY:
- tracker.netdev_state[MLX5_LAG_P1].tx_enabled = true;
- tracker.netdev_state[MLX5_LAG_P1].link_up = true;
- tracker.netdev_state[MLX5_LAG_P2].tx_enabled = false;
- tracker.netdev_state[MLX5_LAG_P2].link_up = false;
+ tracker.netdev_state[idx0].tx_enabled = true;
+ tracker.netdev_state[idx0].link_up = true;
+ tracker.netdev_state[idx1].tx_enabled = false;
+ tracker.netdev_state[idx1].link_up = false;
break;
case MLX5_LAG_P2_AFFINITY:
- tracker.netdev_state[MLX5_LAG_P1].tx_enabled = false;
- tracker.netdev_state[MLX5_LAG_P1].link_up = false;
- tracker.netdev_state[MLX5_LAG_P2].tx_enabled = true;
- tracker.netdev_state[MLX5_LAG_P2].link_up = true;
+ tracker.netdev_state[idx0].tx_enabled = false;
+ tracker.netdev_state[idx0].link_up = false;
+ tracker.netdev_state[idx1].tx_enabled = true;
+ tracker.netdev_state[idx1].link_up = true;
break;
default:
- mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev,
+ mlx5_core_warn(ldev->pf[idx0].dev,
"Invalid affinity port %d", port);
return;
}
- if (tracker.netdev_state[MLX5_LAG_P1].tx_enabled)
- mlx5_notifier_call_chain(ldev->pf[MLX5_LAG_P1].dev->priv.events,
+ if (tracker.netdev_state[idx0].tx_enabled)
+ mlx5_notifier_call_chain(ldev->pf[idx0].dev->priv.events,
MLX5_DEV_EVENT_PORT_AFFINITY,
(void *)0);
- if (tracker.netdev_state[MLX5_LAG_P2].tx_enabled)
- mlx5_notifier_call_chain(ldev->pf[MLX5_LAG_P2].dev->priv.events,
+ if (tracker.netdev_state[idx1].tx_enabled)
+ mlx5_notifier_call_chain(ldev->pf[idx1].dev->priv.events,
MLX5_DEV_EVENT_PORT_AFFINITY,
(void *)0);
@@ -150,9 +155,14 @@ mlx5_lag_get_next_fib_dev(struct mlx5_lag *ldev,
static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
struct fib_entry_notifier_info *fen_info)
{
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct net_device *nh_dev0, *nh_dev1;
struct fib_info *fi = fen_info->fi;
struct lag_mp *mp = &ldev->lag_mp;
+ int i, dev_idx = 0;
+
+ if (idx < 0)
+ return;
/* Handle delete event */
if (event == FIB_EVENT_ENTRY_DEL) {
@@ -179,17 +189,19 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
}
if (nh_dev0 == nh_dev1) {
- mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev,
+ mlx5_core_warn(ldev->pf[idx].dev,
"Multipath offload doesn't support routes with multiple nexthops of the same device");
return;
}
if (!nh_dev1) {
if (__mlx5_lag_is_active(ldev)) {
- int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev0);
-
- i++;
- mlx5_lag_set_port_affinity(ldev, i);
+ mlx5_ldev_for_each(i, 0, ldev) {
+ dev_idx++;
+ if (ldev->pf[i].netdev == nh_dev0)
+ break;
+ }
+ mlx5_lag_set_port_affinity(ldev, dev_idx);
mlx5_lag_fib_set(mp, fi, fen_info->dst, fen_info->dst_len);
}
@@ -214,6 +226,7 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
struct fib_info *fi)
{
struct lag_mp *mp = &ldev->lag_mp;
+ int i, dev_idx = 0;
/* Check the nh event is related to the route */
if (!mp->fib.mfi || mp->fib.mfi != fi)
@@ -221,11 +234,15 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
/* nh added/removed */
if (event == FIB_EVENT_NH_DEL) {
- int i = mlx5_lag_dev_get_netdev_idx(ldev, fib_nh->fib_nh_dev);
+ mlx5_ldev_for_each(i, 0, ldev) {
+ if (ldev->pf[i].netdev == fib_nh->fib_nh_dev)
+ break;
+ dev_idx++;
+ }
- if (i >= 0) {
- i = (i + 1) % 2 + 1; /* peer port */
- mlx5_lag_set_port_affinity(ldev, i);
+ if (dev_idx >= 0) {
+ dev_idx = (dev_idx + 1) % 2 + 1; /* peer port */
+ mlx5_lag_set_port_affinity(ldev, dev_idx);
}
} else if (event == FIB_EVENT_NH_ADD &&
fib_info_num_path(fi) == 2) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index 571ea26edd0c..aad52d3a90e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -15,7 +15,7 @@ static void mlx5_mpesw_metadata_cleanup(struct mlx5_lag *ldev)
u32 pf_metadata;
int i;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
dev = ldev->pf[i].dev;
esw = dev->priv.eswitch;
pf_metadata = ldev->lag_mpesw.pf_metadata[i];
@@ -36,7 +36,7 @@ static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev)
u32 pf_metadata;
int i, err;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
dev = ldev->pf[i].dev;
esw = dev->priv.eswitch;
pf_metadata = mlx5_esw_match_metadata_alloc(esw);
@@ -52,7 +52,7 @@ static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev)
goto err_metadata;
}
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
dev = ldev->pf[i].dev;
mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_MULTIPORT_ESW,
(void *)0);
@@ -65,23 +65,22 @@ err_metadata:
return err;
}
-#define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 4
static int enable_mpesw(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev0;
int err;
int i;
- if (ldev->mode != MLX5_LAG_MODE_NONE)
+ if (idx < 0 || ldev->mode != MLX5_LAG_MODE_NONE)
return -EINVAL;
- if (ldev->ports > MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS)
- return -EOPNOTSUPP;
-
+ dev0 = ldev->pf[idx].dev;
if (mlx5_eswitch_mode(dev0) != MLX5_ESWITCH_OFFLOADS ||
!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table) ||
!MLX5_CAP_GEN(dev0, create_lag_when_not_master_up) ||
- !mlx5_lag_check_prereq(ldev))
+ !mlx5_lag_check_prereq(ldev) ||
+ !mlx5_lag_shared_fdb_supported(ldev))
return -EOPNOTSUPP;
err = mlx5_mpesw_metadata_set(ldev);
@@ -98,7 +97,7 @@ static int enable_mpesw(struct mlx5_lag *ldev)
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
if (err)
goto err_rescan_drivers;
@@ -112,7 +111,7 @@ err_rescan_drivers:
mlx5_deactivate_lag(ldev);
err_add_devices:
mlx5_lag_add_devices(ldev);
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_mpesw_metadata_cleanup(ldev);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
index ab2717012b79..d832a12ffec0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -39,15 +39,18 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
struct mlx5_lag_definer *lag_definer,
u8 *ports)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_namespace *ns;
- int err, i;
- int idx;
- int j;
+ struct mlx5_core_dev *dev;
+ int err, i, j, k, idx;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev = ldev->pf[first_idx].dev;
ft_attr.max_fte = ldev->ports * ldev->buckets;
ft_attr.level = MLX5_LAG_FT_LEVEL_DEFINER;
@@ -74,7 +77,7 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
flow_act.flags |= FLOW_ACT_NO_APPEND;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < ldev->buckets; j++) {
u8 affinity;
@@ -88,13 +91,13 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
&dest, 1);
if (IS_ERR(lag_definer->rules[idx])) {
err = PTR_ERR(lag_definer->rules[idx]);
- do {
+ mlx5_ldev_for_each_reverse(k, i, 0, ldev) {
while (j--) {
- idx = i * ldev->buckets + j;
+ idx = k * ldev->buckets + j;
mlx5_del_flow_rules(lag_definer->rules[idx]);
}
j = ldev->buckets;
- } while (i--);
+ }
goto destroy_fg;
}
}
@@ -295,11 +298,16 @@ static struct mlx5_lag_definer *
mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash,
enum mlx5_traffic_types tt, bool tunnel, u8 *ports)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_lag_definer *lag_definer;
+ struct mlx5_core_dev *dev;
u32 *match_definer_mask;
int format_id, err;
+ if (first_idx < 0)
+ return ERR_PTR(-EINVAL);
+
+ dev = ldev->pf[first_idx].dev;
lag_definer = kzalloc(sizeof(*lag_definer), GFP_KERNEL);
if (!lag_definer)
return ERR_PTR(-ENOMEM);
@@ -341,12 +349,15 @@ free_lag_definer:
static void mlx5_lag_destroy_definer(struct mlx5_lag *ldev,
struct mlx5_lag_definer *lag_definer)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
- int idx;
- int i;
- int j;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev;
+ int idx, i, j;
- for (i = 0; i < ldev->ports; i++) {
+ if (first_idx < 0)
+ return;
+
+ dev = ldev->pf[first_idx].dev;
+ mlx5_ldev_for_each(i, first_idx, ldev) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
mlx5_del_flow_rules(lag_definer->rules[idx]);
@@ -501,10 +512,15 @@ static void mlx5_lag_set_outer_ttc_params(struct mlx5_lag *ldev,
static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct ttc_params ttc_params = {};
+ struct mlx5_core_dev *dev;
+
+ if (first_idx < 0)
+ return -EINVAL;
+ dev = ldev->pf[first_idx].dev;
mlx5_lag_set_outer_ttc_params(ldev, &ttc_params);
port_sel->outer.ttc = mlx5_create_ttc_table(dev, &ttc_params);
return PTR_ERR_OR_ZERO(port_sel->outer.ttc);
@@ -512,10 +528,15 @@ static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev)
static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct ttc_params ttc_params = {};
+ struct mlx5_core_dev *dev;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev = ldev->pf[first_idx].dev;
mlx5_lag_set_inner_ttc_params(ldev, &ttc_params);
port_sel->inner.ttc = mlx5_create_inner_ttc_table(dev, &ttc_params);
return PTR_ERR_OR_ZERO(port_sel->inner.ttc);
@@ -530,7 +551,7 @@ int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
set_tt_map(port_sel, hash_type);
err = mlx5_lag_create_definers(ldev, hash_type, ports);
if (err)
- return err;
+ goto clear_port_sel;
if (port_sel->tunnel) {
err = mlx5_lag_create_inner_ttc_table(ldev);
@@ -549,6 +570,8 @@ destroy_inner:
mlx5_destroy_ttc_table(port_sel->inner.ttc);
destroy_definers:
mlx5_lag_destroy_definers(ldev);
+clear_port_sel:
+ memset(port_sel, 0, sizeof(*port_sel));
return err;
}
@@ -565,7 +588,7 @@ static int __mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
if (ldev->v2p_map[idx] == ports[idx])
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
index 58bd749b5e4d..129725159a93 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
@@ -100,7 +100,7 @@ static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
- MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.bfreg.up->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
@@ -129,7 +129,7 @@ static int mlx5_aso_create_cq(struct mlx5_core_dev *mdev, int numa_node,
return -ENOMEM;
MLX5_SET(cqc, cqc_data, log_cq_size, 1);
- MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.bfreg.up->index);
if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
MLX5_SET(cqc, cqc_data, cqe_sz, CQE_STRIDE_128_PAD);
@@ -163,7 +163,7 @@ static int mlx5_aso_alloc_sq(struct mlx5_core_dev *mdev, int numa_node,
struct mlx5_wq_param param;
int err;
- sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+ sq->uar_map = mdev->priv.bfreg.map;
param.db_numa_node = numa_node;
param.buf_numa_node = numa_node;
@@ -203,7 +203,7 @@ static int create_aso_sq(struct mlx5_core_dev *mdev, int pdn,
MLX5_SET(sqc, sqc, ts_format, ts_format);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
- MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
+ MLX5_SET(wq, wq, uar_page, mdev->priv.bfreg.index);
MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index b306ae79bf97..0ba0ef8bae42 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -43,6 +43,8 @@
#include <linux/cpufeature.h>
#endif /* CONFIG_X86 */
+#define MLX5_RT_CLOCK_IDENTITY_SIZE MLX5_FLD_SZ_BYTES(mrtcq_reg, rt_clock_identity)
+
enum {
MLX5_PIN_MODE_IN = 0x0,
MLX5_PIN_MODE_OUT = 0x1,
@@ -77,6 +79,56 @@ enum {
MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX = 200000,
};
+struct mlx5_clock_dev_state {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_devcom_comp_dev *compdev;
+ struct mlx5_nb pps_nb;
+ struct work_struct out_work;
+};
+
+struct mlx5_clock_priv {
+ struct mlx5_clock clock;
+ struct mlx5_core_dev *mdev;
+ struct mutex lock; /* protect mdev and used in PTP callbacks */
+ struct mlx5_core_dev *event_mdev;
+};
+
+static struct mlx5_clock_priv *clock_priv(struct mlx5_clock *clock)
+{
+ return container_of(clock, struct mlx5_clock_priv, clock);
+}
+
+static void mlx5_clock_lockdep_assert(struct mlx5_clock *clock)
+{
+ if (!clock->shared)
+ return;
+
+ lockdep_assert(lockdep_is_held(&clock_priv(clock)->lock));
+}
+
+static struct mlx5_core_dev *mlx5_clock_mdev_get(struct mlx5_clock *clock)
+{
+ mlx5_clock_lockdep_assert(clock);
+
+ return clock_priv(clock)->mdev;
+}
+
+static void mlx5_clock_lock(struct mlx5_clock *clock)
+{
+ if (!clock->shared)
+ return;
+
+ mutex_lock(&clock_priv(clock)->lock);
+}
+
+static void mlx5_clock_unlock(struct mlx5_clock *clock)
+{
+ if (!clock->shared)
+ return;
+
+ mutex_unlock(&clock_priv(clock)->lock);
+}
+
static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
{
return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev));
@@ -94,6 +146,22 @@ static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
}
+static int mlx5_clock_identity_get(struct mlx5_core_dev *mdev,
+ u8 identify[MLX5_RT_CLOCK_IDENTITY_SIZE])
+{
+ u32 out[MLX5_ST_SZ_DW(mrtcq_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(mrtcq_reg)] = {};
+ int err;
+
+ err = mlx5_core_access_reg(mdev, in, sizeof(in),
+ out, sizeof(out), MLX5_REG_MRTCQ, 0, 0);
+ if (!err)
+ memcpy(identify, MLX5_ADDR_OF(mrtcq_reg, out, rt_clock_identity),
+ MLX5_RT_CLOCK_IDENTITY_SIZE);
+
+ return err;
+}
+
static u32 mlx5_ptp_shift_constant(u32 dev_freq_khz)
{
/* Optimal shift constant leads to corrections above just 1 scaled ppm.
@@ -119,21 +187,30 @@ static u32 mlx5_ptp_shift_constant(u32 dev_freq_khz)
ilog2((U32_MAX / NSEC_PER_MSEC) * dev_freq_khz));
}
+static s32 mlx5_clock_getmaxphase(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_MCAM_FEATURE(mdev, mtutc_time_adjustment_extended_range) ?
+ MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX :
+ MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX;
+}
+
static s32 mlx5_ptp_getmaxphase(struct ptp_clock_info *ptp)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_core_dev *mdev;
+ s32 ret;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+ ret = mlx5_clock_getmaxphase(mdev);
+ mlx5_clock_unlock(clock);
- return MLX5_CAP_MCAM_FEATURE(mdev, mtutc_time_adjustment_extended_range) ?
- MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX :
- MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX;
+ return ret;
}
static bool mlx5_is_mtutc_time_adj_cap(struct mlx5_core_dev *mdev, s64 delta)
{
- s64 max = mlx5_ptp_getmaxphase(&mdev->clock.ptp_info);
+ s64 max = mlx5_clock_getmaxphase(mdev);
if (delta < -max || delta > max)
return false;
@@ -170,27 +247,24 @@ static bool mlx5_is_ptm_source_time_available(struct mlx5_core_dev *dev)
return !!MLX5_GET(mtptm_reg, out, psta);
}
-static int mlx5_mtctr_syncdevicetime(ktime_t *device_time,
- struct system_counterval_t *sys_counterval,
- void *ctx)
+static int mlx5_mtctr_read(struct mlx5_core_dev *mdev,
+ bool real_time_mode,
+ struct system_counterval_t *sys_counterval,
+ u64 *device)
{
u32 out[MLX5_ST_SZ_DW(mtctr_reg)] = {0};
u32 in[MLX5_ST_SZ_DW(mtctr_reg)] = {0};
- struct mlx5_core_dev *mdev = ctx;
- bool real_time_mode;
- u64 host, device;
+ u64 host;
int err;
- real_time_mode = mlx5_real_time_mode(mdev);
-
MLX5_SET(mtctr_reg, in, first_clock_timestamp_request,
MLX5_MTCTR_REQUEST_PTM_ROOT_CLOCK);
MLX5_SET(mtctr_reg, in, second_clock_timestamp_request,
real_time_mode ? MLX5_MTCTR_REQUEST_REAL_TIME_CLOCK :
- MLX5_MTCTR_REQUEST_FREE_RUNNING_COUNTER);
+ MLX5_MTCTR_REQUEST_FREE_RUNNING_COUNTER);
- err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), MLX5_REG_MTCTR,
- 0, 0);
+ err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MTCTR, 0, 0);
if (err)
return err;
@@ -204,12 +278,47 @@ static int mlx5_mtctr_syncdevicetime(ktime_t *device_time,
.cs_id = CSID_X86_ART,
.use_nsecs = true,
};
+ *device = MLX5_GET64(mtctr_reg, out, second_clock_timestamp);
+
+ return 0;
+}
+
+static int mlx5_mtctr_syncdevicetime(ktime_t *device_time,
+ struct system_counterval_t *sys_counterval,
+ void *ctx)
+{
+ struct mlx5_core_dev *mdev = ctx;
+ bool real_time_mode;
+ u64 device;
+ int err;
+
+ real_time_mode = mlx5_real_time_mode(mdev);
+
+ err = mlx5_mtctr_read(mdev, real_time_mode, sys_counterval, &device);
+ if (err)
+ return err;
- device = MLX5_GET64(mtctr_reg, out, second_clock_timestamp);
if (real_time_mode)
*device_time = ns_to_ktime(REAL_TIME_TO_NS(device >> 32, device & U32_MAX));
else
- *device_time = mlx5_timecounter_cyc2time(&mdev->clock, device);
+ *device_time = mlx5_timecounter_cyc2time(mdev->clock, device);
+
+ return 0;
+}
+
+static int
+mlx5_mtctr_syncdevicecyclestime(ktime_t *device_time,
+ struct system_counterval_t *sys_counterval,
+ void *ctx)
+{
+ struct mlx5_core_dev *mdev = ctx;
+ u64 device;
+ int err;
+
+ err = mlx5_mtctr_read(mdev, false, sys_counterval, &device);
+ if (err)
+ return err;
+ *device_time = ns_to_ktime(device);
return 0;
}
@@ -220,16 +329,49 @@ static int mlx5_ptp_getcrosststamp(struct ptp_clock_info *ptp,
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct system_time_snapshot history_begin = {0};
struct mlx5_core_dev *mdev;
+ int err;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
- if (!mlx5_is_ptm_source_time_available(mdev))
- return -EBUSY;
+ if (!mlx5_is_ptm_source_time_available(mdev)) {
+ err = -EBUSY;
+ goto unlock;
+ }
ktime_get_snapshot(&history_begin);
- return get_device_system_crosststamp(mlx5_mtctr_syncdevicetime, mdev,
- &history_begin, cts);
+ err = get_device_system_crosststamp(mlx5_mtctr_syncdevicetime, mdev,
+ &history_begin, cts);
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
+}
+
+static int mlx5_ptp_getcrosscycles(struct ptp_clock_info *ptp,
+ struct system_device_crosststamp *cts)
+{
+ struct mlx5_clock *clock =
+ container_of(ptp, struct mlx5_clock, ptp_info);
+ struct system_time_snapshot history_begin = {0};
+ struct mlx5_core_dev *mdev;
+ int err;
+
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+
+ if (!mlx5_is_ptm_source_time_available(mdev)) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ ktime_get_snapshot(&history_begin);
+
+ err = get_device_system_crosststamp(mlx5_mtctr_syncdevicecyclestime,
+ mdev, &history_begin, cts);
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
}
#endif /* CONFIG_X86 */
@@ -259,12 +401,11 @@ static u64 mlx5_read_time(struct mlx5_core_dev *dev,
(u64)timer_l | (u64)timer_h1 << 32;
}
-static u64 read_internal_timer(const struct cyclecounter *cc)
+static u64 read_internal_timer(struct cyclecounter *cc)
{
struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles);
struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer);
- struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
- clock);
+ struct mlx5_core_dev *mdev = mlx5_clock_mdev_get(clock);
return mlx5_read_time(mdev, NULL, false) & cc->mask;
}
@@ -272,7 +413,7 @@ static u64 read_internal_timer(const struct cyclecounter *cc)
static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
{
struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
struct mlx5_timer *timer;
u32 sign;
@@ -295,12 +436,10 @@ static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
static void mlx5_pps_out(struct work_struct *work)
{
- struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
- out_work);
- struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
- pps_info);
- struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
- clock);
+ struct mlx5_clock_dev_state *clock_state = container_of(work, struct mlx5_clock_dev_state,
+ out_work);
+ struct mlx5_core_dev *mdev = clock_state->mdev;
+ struct mlx5_clock *clock = mdev->clock;
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
unsigned long flags;
int i;
@@ -322,17 +461,17 @@ static void mlx5_pps_out(struct work_struct *work)
}
}
-static void mlx5_timestamp_overflow(struct work_struct *work)
+static long mlx5_timestamp_overflow(struct ptp_clock_info *ptp_info)
{
- struct delayed_work *dwork = to_delayed_work(work);
struct mlx5_core_dev *mdev;
struct mlx5_timer *timer;
struct mlx5_clock *clock;
unsigned long flags;
- timer = container_of(dwork, struct mlx5_timer, overflow_work);
- clock = container_of(timer, struct mlx5_clock, timer);
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ clock = container_of(ptp_info, struct mlx5_clock, ptp_info);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+ timer = &clock->timer;
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
goto out;
@@ -343,7 +482,8 @@ static void mlx5_timestamp_overflow(struct work_struct *work)
write_sequnlock_irqrestore(&clock->lock, flags);
out:
- schedule_delayed_work(&timer->overflow_work, timer->overflow_period);
+ mlx5_clock_unlock(clock);
+ return timer->overflow_period;
}
static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
@@ -362,15 +502,12 @@ static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
return mlx5_set_mtutc(mdev, in, sizeof(in));
}
-static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
+static int mlx5_clock_settime(struct mlx5_core_dev *mdev, struct mlx5_clock *clock,
+ const struct timespec64 *ts)
{
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_timer *timer = &clock->timer;
- struct mlx5_core_dev *mdev;
unsigned long flags;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
-
if (mlx5_modify_mtutc_allowed(mdev)) {
int err = mlx5_ptp_settime_real_time(mdev, ts);
@@ -386,6 +523,20 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64
return 0;
}
+static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
+{
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_core_dev *mdev;
+ int err;
+
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+ err = mlx5_clock_settime(mdev, clock, ts);
+ mlx5_clock_unlock(clock);
+
+ return err;
+}
+
static
struct timespec64 mlx5_ptp_gettimex_real_time(struct mlx5_core_dev *mdev,
struct ptp_system_timestamp *sts)
@@ -402,23 +553,39 @@ static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
- struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
- unsigned long flags;
u64 cycles, ns;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
if (mlx5_real_time_mode(mdev)) {
*ts = mlx5_ptp_gettimex_real_time(mdev, sts);
goto out;
}
- write_seqlock_irqsave(&clock->lock, flags);
cycles = mlx5_read_time(mdev, sts, false);
- ns = timecounter_cyc2time(&timer->tc, cycles);
- write_sequnlock_irqrestore(&clock->lock, flags);
+ ns = mlx5_timecounter_cyc2time(clock, cycles);
*ts = ns_to_timespec64(ns);
out:
+ mlx5_clock_unlock(clock);
+ return 0;
+}
+
+static int mlx5_ptp_getcyclesx(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
+ ptp_info);
+ struct mlx5_core_dev *mdev;
+ u64 cycles;
+
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+
+ cycles = mlx5_read_time(mdev, sts, false);
+ *ts = ns_to_timespec64(cycles);
+ mlx5_clock_unlock(clock);
return 0;
}
@@ -449,14 +616,16 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
+ int err = 0;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
if (mlx5_modify_mtutc_allowed(mdev)) {
- int err = mlx5_ptp_adjtime_real_time(mdev, delta);
+ err = mlx5_ptp_adjtime_real_time(mdev, delta);
if (err)
- return err;
+ goto unlock;
}
write_seqlock_irqsave(&clock->lock, flags);
@@ -464,17 +633,23 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
- return 0;
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
}
static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_core_dev *mdev;
+ int err;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+ err = mlx5_ptp_adjtime_real_time(mdev, delta);
+ mlx5_clock_unlock(clock);
- return mlx5_ptp_adjtime_real_time(mdev, delta);
+ return err;
}
static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm)
@@ -503,15 +678,17 @@ static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
+ int err = 0;
u32 mult;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
if (mlx5_modify_mtutc_allowed(mdev)) {
- int err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
+ err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
if (err)
- return err;
+ goto unlock;
}
mult = (u32)adjust_by_scaled_ppm(timer->nominal_c_mult, scaled_ppm);
@@ -521,8 +698,11 @@ static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
timer->cycles.mult = mult;
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
+ ptp_schedule_worker(clock->ptp, timer->overflow_period);
- return 0;
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
}
static int mlx5_extts_configure(struct ptp_clock_info *ptp,
@@ -531,25 +711,14 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
{
struct mlx5_clock *clock =
container_of(ptp, struct mlx5_clock, ptp_info);
- struct mlx5_core_dev *mdev =
- container_of(clock, struct mlx5_core_dev, clock);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+ struct mlx5_core_dev *mdev;
u32 field_select = 0;
u8 pin_mode = 0;
u8 pattern = 0;
int pin = -1;
int err = 0;
- if (!MLX5_PPS_CAP(mdev))
- return -EOPNOTSUPP;
-
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* Reject requests to enable time stamping on both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
@@ -573,6 +742,14 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
field_select = MLX5_MTPPS_FS_ENABLE;
}
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+
+ if (!MLX5_PPS_CAP(mdev)) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
MLX5_SET(mtpps_reg, in, pin, pin);
MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
MLX5_SET(mtpps_reg, in, pattern, pattern);
@@ -581,15 +758,23 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
err = mlx5_set_mtpps(mdev, in, sizeof(in));
if (err)
- return err;
+ goto unlock;
- return mlx5_set_mtppse(mdev, pin, 0,
- MLX5_EVENT_MODE_REPETETIVE & on);
+ err = mlx5_set_mtppse(mdev, pin, 0, MLX5_EVENT_MODE_REPETETIVE & on);
+ if (err)
+ goto unlock;
+
+ clock->pps_info.pin_armed[pin] = on;
+ clock_priv(clock)->event_mdev = mdev;
+
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
}
static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
u64 cycles_now, cycles_delta;
u64 nsec_now, nsec_delta;
struct mlx5_timer *timer;
@@ -648,7 +833,7 @@ static int mlx5_perout_conf_out_pulse_duration(struct mlx5_core_dev *mdev,
struct ptp_clock_request *rq,
u32 *out_pulse_duration_ns)
{
- struct mlx5_pps *pps_info = &mdev->clock.pps_info;
+ struct mlx5_pps *pps_info = &mdev->clock->pps_info;
u32 out_pulse_duration;
struct timespec64 ts;
@@ -681,7 +866,7 @@ static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clo
u32 *field_select, u32 *out_pulse_duration_ns,
u64 *period, u64 *time_stamp)
{
- struct mlx5_pps *pps_info = &mdev->clock.pps_info;
+ struct mlx5_pps *pps_info = &mdev->clock->pps_info;
struct ptp_clock_time *time = &rq->perout.start;
struct timespec64 ts;
@@ -704,38 +889,24 @@ static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clo
return 0;
}
-static bool mlx5_perout_verify_flags(struct mlx5_core_dev *mdev, unsigned int flags)
-{
- return ((!mlx5_npps_real_time_supported(mdev) && flags) ||
- (mlx5_npps_real_time_supported(mdev) && flags & ~PTP_PEROUT_DUTY_CYCLE));
-}
-
static int mlx5_perout_configure(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq,
int on)
{
struct mlx5_clock *clock =
container_of(ptp, struct mlx5_clock, ptp_info);
- struct mlx5_core_dev *mdev =
- container_of(clock, struct mlx5_core_dev, clock);
- bool rt_mode = mlx5_real_time_mode(mdev);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
u32 out_pulse_duration_ns = 0;
+ struct mlx5_core_dev *mdev;
u32 field_select = 0;
u64 npps_period = 0;
u64 time_stamp = 0;
u8 pin_mode = 0;
u8 pattern = 0;
+ bool rt_mode;
int pin = -1;
int err = 0;
- if (!MLX5_PPS_CAP(mdev))
- return -EOPNOTSUPP;
-
- /* Reject requests with unsupported flags */
- if (mlx5_perout_verify_flags(mdev, rq->perout.flags))
- return -EOPNOTSUPP;
-
if (rq->perout.index >= clock->ptp_info.n_pins)
return -EINVAL;
@@ -744,14 +915,23 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (pin < 0)
return -EBUSY;
- if (on) {
- bool rt_mode = mlx5_real_time_mode(mdev);
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+ rt_mode = mlx5_real_time_mode(mdev);
+ if (!MLX5_PPS_CAP(mdev)) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ if (on) {
pin_mode = MLX5_PIN_MODE_OUT;
pattern = MLX5_OUT_PATTERN_PERIODIC;
- if (rt_mode && rq->perout.start.sec > U32_MAX)
- return -EINVAL;
+ if (rt_mode && rq->perout.start.sec > U32_MAX) {
+ err = -EINVAL;
+ goto unlock;
+ }
field_select |= MLX5_MTPPS_FS_PIN_MODE |
MLX5_MTPPS_FS_PATTERN |
@@ -764,7 +944,7 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
else
err = perout_conf_1pps(mdev, rq, &time_stamp, rt_mode);
if (err)
- return err;
+ goto unlock;
}
MLX5_SET(mtpps_reg, in, pin, pin);
@@ -777,13 +957,16 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
MLX5_SET(mtpps_reg, in, out_pulse_duration_ns, out_pulse_duration_ns);
err = mlx5_set_mtpps(mdev, in, sizeof(in));
if (err)
- return err;
+ goto unlock;
if (rt_mode)
- return 0;
+ goto unlock;
+
+ err = mlx5_set_mtppse(mdev, pin, 0, MLX5_EVENT_MODE_REPETETIVE & on);
- return mlx5_set_mtppse(mdev, pin, 0,
- MLX5_EVENT_MODE_REPETETIVE & on);
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
}
static int mlx5_pps_configure(struct ptp_clock_info *ptp,
@@ -856,6 +1039,7 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = {
.settime64 = mlx5_ptp_settime,
.enable = NULL,
.verify = NULL,
+ .do_aux_work = mlx5_timestamp_overflow,
};
static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
@@ -869,10 +1053,8 @@ static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
mtpps_size, MLX5_REG_MTPPS, 0, 0);
}
-static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
+static int mlx5_get_pps_pin_mode(struct mlx5_core_dev *mdev, u8 pin)
{
- struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
-
u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
u8 mode;
int err;
@@ -891,8 +1073,9 @@ static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
return PTP_PF_NONE;
}
-static void mlx5_init_pin_config(struct mlx5_clock *clock)
+static void mlx5_init_pin_config(struct mlx5_core_dev *mdev)
{
+ struct mlx5_clock *clock = mdev->clock;
int i;
if (!clock->ptp_info.n_pins)
@@ -908,20 +1091,27 @@ static void mlx5_init_pin_config(struct mlx5_clock *clock)
clock->ptp_info.verify = mlx5_ptp_verify;
clock->ptp_info.pps = 1;
+ clock->ptp_info.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
+
+ if (mlx5_npps_real_time_supported(mdev))
+ clock->ptp_info.supported_perout_flags = PTP_PEROUT_DUTY_CYCLE;
+
for (i = 0; i < clock->ptp_info.n_pins; i++) {
snprintf(clock->ptp_info.pin_config[i].name,
sizeof(clock->ptp_info.pin_config[i].name),
"mlx5_pps%d", i);
clock->ptp_info.pin_config[i].index = i;
- clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
+ clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(mdev, i);
clock->ptp_info.pin_config[i].chan = 0;
}
}
static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+ struct mlx5_clock *clock = mdev->clock;
mlx5_query_mtpps(mdev, out, sizeof(out));
@@ -971,16 +1161,16 @@ static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev,
static int mlx5_pps_event(struct notifier_block *nb,
unsigned long type, void *data)
{
- struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
+ struct mlx5_clock_dev_state *clock_state = mlx5_nb_cof(nb, struct mlx5_clock_dev_state,
+ pps_nb);
+ struct mlx5_core_dev *mdev = clock_state->mdev;
+ struct mlx5_clock *clock = mdev->clock;
struct ptp_clock_event ptp_event;
struct mlx5_eqe *eqe = data;
int pin = eqe->data.pps.pin;
- struct mlx5_core_dev *mdev;
unsigned long flags;
u64 ns;
- mdev = container_of(clock, struct mlx5_core_dev, clock);
-
switch (clock->ptp_info.pin_config[pin].func) {
case PTP_PF_EXTTS:
ptp_event.index = pin;
@@ -1000,11 +1190,15 @@ static int mlx5_pps_event(struct notifier_block *nb,
ptp_clock_event(clock->ptp, &ptp_event);
break;
case PTP_PF_PEROUT:
+ if (clock->shared) {
+ mlx5_core_warn(mdev, " Received unexpected PPS out event\n");
+ break;
+ }
ns = perout_conf_next_event_timer(mdev, clock);
write_seqlock_irqsave(&clock->lock, flags);
clock->pps_info.start[pin] = ns;
write_sequnlock_irqrestore(&clock->lock, flags);
- schedule_work(&clock->pps_info.out_work);
+ schedule_work(&clock_state->out_work);
break;
default:
mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
@@ -1016,7 +1210,7 @@ static int mlx5_pps_event(struct notifier_block *nb,
static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
struct mlx5_timer *timer = &clock->timer;
u32 dev_freq;
@@ -1032,10 +1226,10 @@ static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
ktime_to_ns(ktime_get_real()));
}
-static void mlx5_init_overflow_period(struct mlx5_clock *clock)
+static void mlx5_init_overflow_period(struct mlx5_core_dev *mdev)
{
- struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
+ struct mlx5_clock *clock = mdev->clock;
struct mlx5_timer *timer = &clock->timer;
u64 overflow_cycles;
u64 frac = 0;
@@ -1056,12 +1250,11 @@ static void mlx5_init_overflow_period(struct mlx5_clock *clock)
do_div(ns, NSEC_PER_SEC / HZ);
timer->overflow_period = ns;
- INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow);
- if (timer->overflow_period)
- schedule_delayed_work(&timer->overflow_work, 0);
- else
+ if (!timer->overflow_period) {
+ timer->overflow_period = HZ;
mlx5_core_warn(mdev,
- "invalid overflow period, overflow_work is not scheduled\n");
+ "invalid overflow period, overflow_work is scheduled once per second\n");
+ }
if (clock_info)
clock_info->overflow_period = timer->overflow_period;
@@ -1069,7 +1262,7 @@ static void mlx5_init_overflow_period(struct mlx5_clock *clock)
static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
struct mlx5_ib_clock_info *info;
struct mlx5_timer *timer;
@@ -1092,7 +1285,7 @@ static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
static void mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
u8 log_max_freq_adjustment = 0;
@@ -1111,7 +1304,8 @@ static void mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev *mdev)
static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
+ bool expose_cycles;
/* Configure the PHC */
clock->ptp_info = mlx5_ptp_clock_info;
@@ -1119,46 +1313,48 @@ static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
if (MLX5_CAP_MCAM_REG(mdev, mtutc))
mlx5_init_timer_max_freq_adjustment(mdev);
+ expose_cycles = !MLX5_CAP_GEN(mdev, disciplined_fr_counter) ||
+ !mlx5_real_time_mode(mdev);
+
#ifdef CONFIG_X86
if (MLX5_CAP_MCAM_REG3(mdev, mtptm) &&
- MLX5_CAP_MCAM_REG3(mdev, mtctr) && boot_cpu_has(X86_FEATURE_ART))
+ MLX5_CAP_MCAM_REG3(mdev, mtctr) && boot_cpu_has(X86_FEATURE_ART)) {
clock->ptp_info.getcrosststamp = mlx5_ptp_getcrosststamp;
+ if (expose_cycles)
+ clock->ptp_info.getcrosscycles =
+ mlx5_ptp_getcrosscycles;
+ }
#endif /* CONFIG_X86 */
+ if (expose_cycles)
+ clock->ptp_info.getcyclesx64 = mlx5_ptp_getcyclesx;
+
mlx5_timecounter_init(mdev);
mlx5_init_clock_info(mdev);
- mlx5_init_overflow_period(clock);
+ mlx5_init_overflow_period(mdev);
if (mlx5_real_time_mode(mdev)) {
struct timespec64 ts;
ktime_get_real_ts64(&ts);
- mlx5_ptp_settime(&clock->ptp_info, &ts);
+ mlx5_clock_settime(mdev, clock, &ts);
}
}
static void mlx5_init_pps(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
-
if (!MLX5_PPS_CAP(mdev))
return;
mlx5_get_pps_caps(mdev);
- mlx5_init_pin_config(clock);
+ mlx5_init_pin_config(mdev);
}
-void mlx5_init_clock(struct mlx5_core_dev *mdev)
+static void mlx5_init_clock_dev(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
-
- if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
- mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
- return;
- }
+ struct mlx5_clock *clock = mdev->clock;
seqlock_init(&clock->lock);
- INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
/* Initialize the device clock */
mlx5_init_timer_clock(mdev);
@@ -1167,33 +1363,27 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
mlx5_init_pps(mdev);
clock->ptp = ptp_clock_register(&clock->ptp_info,
- &mdev->pdev->dev);
+ clock->shared ? NULL : &mdev->pdev->dev);
if (IS_ERR(clock->ptp)) {
- mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
- PTR_ERR(clock->ptp));
+ mlx5_core_warn(mdev, "%sptp_clock_register failed %pe\n",
+ clock->shared ? "shared clock " : "",
+ clock->ptp);
clock->ptp = NULL;
}
- MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
- mlx5_eq_notifier_register(mdev, &clock->pps_nb);
+ if (clock->ptp)
+ ptp_schedule_worker(clock->ptp, 0);
}
-void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
+static void mlx5_destroy_clock_dev(struct mlx5_core_dev *mdev)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mdev->clock;
- if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
- return;
-
- mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
if (clock->ptp) {
ptp_clock_unregister(clock->ptp);
clock->ptp = NULL;
}
- cancel_work_sync(&clock->pps_info.out_work);
- cancel_delayed_work_sync(&clock->timer.overflow_work);
-
if (mdev->clock_info) {
free_page((unsigned long)mdev->clock_info);
mdev->clock_info = NULL;
@@ -1201,3 +1391,253 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
kfree(clock->ptp_info.pin_config);
}
+
+static void mlx5_clock_free(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock_priv *cpriv = clock_priv(mdev->clock);
+
+ mlx5_destroy_clock_dev(mdev);
+ mutex_destroy(&cpriv->lock);
+ kfree(cpriv);
+ mdev->clock = NULL;
+}
+
+static int mlx5_clock_alloc(struct mlx5_core_dev *mdev, bool shared)
+{
+ struct mlx5_clock_priv *cpriv;
+ struct mlx5_clock *clock;
+
+ cpriv = kzalloc(sizeof(*cpriv), GFP_KERNEL);
+ if (!cpriv)
+ return -ENOMEM;
+
+ mutex_init(&cpriv->lock);
+ cpriv->mdev = mdev;
+ clock = &cpriv->clock;
+ clock->shared = shared;
+ mdev->clock = clock;
+ mlx5_clock_lock(clock);
+ mlx5_init_clock_dev(mdev);
+ mlx5_clock_unlock(clock);
+
+ if (!clock->shared)
+ return 0;
+
+ if (!clock->ptp) {
+ mlx5_core_warn(mdev, "failed to create ptp dev shared by multiple functions");
+ mlx5_clock_free(mdev);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void mlx5_shared_clock_register(struct mlx5_core_dev *mdev,
+ u8 identity[MLX5_RT_CLOCK_IDENTITY_SIZE])
+{
+ struct mlx5_core_dev *peer_dev, *next = NULL;
+ struct mlx5_devcom_match_attr attr = {};
+ struct mlx5_devcom_comp_dev *compd;
+ struct mlx5_devcom_comp_dev *pos;
+
+ BUILD_BUG_ON(MLX5_RT_CLOCK_IDENTITY_SIZE > MLX5_DEVCOM_MATCH_KEY_MAX);
+ memcpy(attr.key.buf, identity, MLX5_RT_CLOCK_IDENTITY_SIZE);
+
+ compd = mlx5_devcom_register_component(mdev->priv.devc,
+ MLX5_DEVCOM_SHARED_CLOCK,
+ &attr, NULL, mdev);
+ if (!compd)
+ return;
+
+ mdev->clock_state->compdev = compd;
+
+ mlx5_devcom_comp_lock(mdev->clock_state->compdev);
+ mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) {
+ if (peer_dev->clock) {
+ next = peer_dev;
+ break;
+ }
+ }
+
+ if (next) {
+ mdev->clock = next->clock;
+ /* clock info is shared among all the functions using the same clock */
+ mdev->clock_info = next->clock_info;
+ } else {
+ mlx5_clock_alloc(mdev, true);
+ }
+ mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
+
+ if (!mdev->clock) {
+ mlx5_devcom_unregister_component(mdev->clock_state->compdev);
+ mdev->clock_state->compdev = NULL;
+ }
+}
+
+static void mlx5_shared_clock_unregister(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_core_dev *peer_dev, *next = NULL;
+ struct mlx5_clock *clock = mdev->clock;
+ struct mlx5_devcom_comp_dev *pos;
+
+ mlx5_devcom_comp_lock(mdev->clock_state->compdev);
+ mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) {
+ if (peer_dev->clock && peer_dev != mdev) {
+ next = peer_dev;
+ break;
+ }
+ }
+
+ if (next) {
+ struct mlx5_clock_priv *cpriv = clock_priv(clock);
+
+ mlx5_clock_lock(clock);
+ if (mdev == cpriv->mdev)
+ cpriv->mdev = next;
+ mlx5_clock_unlock(clock);
+ } else {
+ mlx5_clock_free(mdev);
+ }
+
+ mdev->clock = NULL;
+ mdev->clock_info = NULL;
+ mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
+
+ mlx5_devcom_unregister_component(mdev->clock_state->compdev);
+}
+
+static void mlx5_clock_arm_pps_in_event(struct mlx5_clock *clock,
+ struct mlx5_core_dev *new_mdev,
+ struct mlx5_core_dev *old_mdev)
+{
+ struct ptp_clock_info *ptp_info = &clock->ptp_info;
+ struct mlx5_clock_priv *cpriv = clock_priv(clock);
+ int i;
+
+ for (i = 0; i < ptp_info->n_pins; i++) {
+ if (ptp_info->pin_config[i].func != PTP_PF_EXTTS ||
+ !clock->pps_info.pin_armed[i])
+ continue;
+
+ if (new_mdev) {
+ mlx5_set_mtppse(new_mdev, i, 0, MLX5_EVENT_MODE_REPETETIVE);
+ cpriv->event_mdev = new_mdev;
+ } else {
+ cpriv->event_mdev = NULL;
+ }
+
+ if (old_mdev)
+ mlx5_set_mtppse(old_mdev, i, 0, MLX5_EVENT_MODE_DISABLE);
+ }
+}
+
+void mlx5_clock_load(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock *clock = mdev->clock;
+ struct mlx5_clock_priv *cpriv;
+
+ if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
+ return;
+
+ INIT_WORK(&mdev->clock_state->out_work, mlx5_pps_out);
+ MLX5_NB_INIT(&mdev->clock_state->pps_nb, mlx5_pps_event, PPS_EVENT);
+ mlx5_eq_notifier_register(mdev, &mdev->clock_state->pps_nb);
+
+ if (!clock->shared) {
+ mlx5_clock_arm_pps_in_event(clock, mdev, NULL);
+ return;
+ }
+
+ cpriv = clock_priv(clock);
+ mlx5_devcom_comp_lock(mdev->clock_state->compdev);
+ mlx5_clock_lock(clock);
+ if (mdev == cpriv->mdev && mdev != cpriv->event_mdev)
+ mlx5_clock_arm_pps_in_event(clock, mdev, cpriv->event_mdev);
+ mlx5_clock_unlock(clock);
+ mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
+}
+
+void mlx5_clock_unload(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_core_dev *peer_dev, *next = NULL;
+ struct mlx5_clock *clock = mdev->clock;
+ struct mlx5_devcom_comp_dev *pos;
+
+ if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
+ return;
+
+ if (!clock->shared) {
+ mlx5_clock_arm_pps_in_event(clock, NULL, mdev);
+ goto out;
+ }
+
+ mlx5_devcom_comp_lock(mdev->clock_state->compdev);
+ mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) {
+ if (peer_dev->clock && peer_dev != mdev) {
+ next = peer_dev;
+ break;
+ }
+ }
+
+ mlx5_clock_lock(clock);
+ if (mdev == clock_priv(clock)->event_mdev)
+ mlx5_clock_arm_pps_in_event(clock, next, mdev);
+ mlx5_clock_unlock(clock);
+ mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
+
+out:
+ mlx5_eq_notifier_unregister(mdev, &mdev->clock_state->pps_nb);
+ cancel_work_sync(&mdev->clock_state->out_work);
+}
+
+static struct mlx5_clock null_clock;
+
+int mlx5_init_clock(struct mlx5_core_dev *mdev)
+{
+ u8 identity[MLX5_RT_CLOCK_IDENTITY_SIZE];
+ struct mlx5_clock_dev_state *clock_state;
+ int err;
+
+ if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
+ mdev->clock = &null_clock;
+ mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
+ return 0;
+ }
+
+ clock_state = kzalloc(sizeof(*clock_state), GFP_KERNEL);
+ if (!clock_state)
+ return -ENOMEM;
+ clock_state->mdev = mdev;
+ mdev->clock_state = clock_state;
+
+ if (MLX5_CAP_MCAM_REG3(mdev, mrtcq) && mlx5_real_time_mode(mdev)) {
+ if (mlx5_clock_identity_get(mdev, identity))
+ mlx5_core_warn(mdev, "failed to get rt clock identity, create ptp dev per function\n");
+ else
+ mlx5_shared_clock_register(mdev, identity);
+ }
+
+ if (!mdev->clock) {
+ err = mlx5_clock_alloc(mdev, false);
+ if (err) {
+ kfree(clock_state);
+ mdev->clock_state = NULL;
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
+{
+ if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
+ return;
+
+ if (mdev->clock->shared)
+ mlx5_shared_clock_unregister(mdev);
+ else
+ mlx5_clock_free(mdev);
+ kfree(mdev->clock_state);
+ mdev->clock_state = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
index bd95b9f8d143..aff3aed62c74 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
@@ -33,6 +33,34 @@
#ifndef __LIB_CLOCK_H__
#define __LIB_CLOCK_H__
+#include <linux/ptp_clock_kernel.h>
+
+#define MAX_PIN_NUM 8
+struct mlx5_pps {
+ u8 pin_caps[MAX_PIN_NUM];
+ u64 start[MAX_PIN_NUM];
+ u8 enabled;
+ u64 min_npps_period;
+ u64 min_out_pulse_duration_ns;
+ bool pin_armed[MAX_PIN_NUM];
+};
+
+struct mlx5_timer {
+ struct cyclecounter cycles;
+ struct timecounter tc;
+ u32 nominal_c_mult;
+ unsigned long overflow_period;
+};
+
+struct mlx5_clock {
+ seqlock_t lock;
+ struct ptp_clock *ptp;
+ struct ptp_clock_info ptp_info;
+ struct mlx5_pps pps_info;
+ struct mlx5_timer timer;
+ bool shared;
+};
+
static inline bool mlx5_is_real_time_rq(struct mlx5_core_dev *mdev)
{
u8 rq_ts_format_cap = MLX5_CAP_GEN(mdev, rq_ts_format);
@@ -54,12 +82,14 @@ static inline bool mlx5_is_real_time_sq(struct mlx5_core_dev *mdev)
typedef ktime_t (*cqe_ts_to_ns)(struct mlx5_clock *, u64);
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
-void mlx5_init_clock(struct mlx5_core_dev *mdev);
+int mlx5_init_clock(struct mlx5_core_dev *mdev);
void mlx5_cleanup_clock(struct mlx5_core_dev *mdev);
+void mlx5_clock_load(struct mlx5_core_dev *mdev);
+void mlx5_clock_unload(struct mlx5_core_dev *mdev);
static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
{
- return mdev->clock.ptp ? ptp_clock_index(mdev->clock.ptp) : -1;
+ return mdev->clock->ptp ? ptp_clock_index(mdev->clock->ptp) : -1;
}
static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
@@ -87,8 +117,10 @@ static inline ktime_t mlx5_real_time_cyc2time(struct mlx5_clock *clock,
return ns_to_ktime(time);
}
#else
-static inline void mlx5_init_clock(struct mlx5_core_dev *mdev) {}
+static inline int mlx5_init_clock(struct mlx5_core_dev *mdev) { return 0; }
static inline void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) {}
+static inline void mlx5_clock_load(struct mlx5_core_dev *mdev) {}
+static inline void mlx5_clock_unload(struct mlx5_core_dev *mdev) {}
static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
{
return -1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h
index c819c047bb9c..4821163a547f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h
@@ -8,6 +8,7 @@ enum {
MLX5_ACCEL_OBJ_TLS_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_TLS,
MLX5_ACCEL_OBJ_IPSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_IPSEC,
MLX5_ACCEL_OBJ_MACSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_MACSEC,
+ MLX5_ACCEL_OBJ_PSP_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_PSP,
MLX5_ACCEL_OBJ_TYPE_KEY_NUM,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
index 7b0766c89f4c..e749618229bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
@@ -4,6 +4,7 @@
#include <linux/mlx5/vport.h>
#include <linux/list.h>
#include "lib/devcom.h"
+#include "lib/mlx5.h"
#include "mlx5_core.h"
static LIST_HEAD(devcom_dev_list);
@@ -22,11 +23,17 @@ struct mlx5_devcom_dev {
struct kref ref;
};
+struct mlx5_devcom_key {
+ u32 flags;
+ union mlx5_devcom_match_key key;
+ possible_net_t net;
+};
+
struct mlx5_devcom_comp {
struct list_head comp_list;
enum mlx5_devcom_component id;
- u64 key;
struct list_head comp_dev_list_head;
+ struct mlx5_devcom_key key;
mlx5_devcom_event_handler_t handler;
struct kref ref;
bool ready;
@@ -69,20 +76,18 @@ mlx5_devcom_dev_alloc(struct mlx5_core_dev *dev)
struct mlx5_devcom_dev *
mlx5_devcom_register_device(struct mlx5_core_dev *dev)
{
- struct mlx5_devcom_dev *devc;
+ struct mlx5_devcom_dev *devc = NULL;
mutex_lock(&dev_list_lock);
if (devcom_dev_exists(dev)) {
- devc = ERR_PTR(-EEXIST);
+ mlx5_core_err(dev, "devcom device already exists");
goto out;
}
devc = mlx5_devcom_dev_alloc(dev);
- if (!devc) {
- devc = ERR_PTR(-ENOMEM);
+ if (!devc)
goto out;
- }
list_add_tail(&devc->list, &devcom_dev_list);
out:
@@ -103,21 +108,27 @@ mlx5_devcom_dev_release(struct kref *ref)
void mlx5_devcom_unregister_device(struct mlx5_devcom_dev *devc)
{
- if (!IS_ERR_OR_NULL(devc))
- kref_put(&devc->ref, mlx5_devcom_dev_release);
+ if (!devc)
+ return;
+
+ kref_put(&devc->ref, mlx5_devcom_dev_release);
}
static struct mlx5_devcom_comp *
-mlx5_devcom_comp_alloc(u64 id, u64 key, mlx5_devcom_event_handler_t handler)
+mlx5_devcom_comp_alloc(u64 id, const struct mlx5_devcom_match_attr *attr,
+ mlx5_devcom_event_handler_t handler)
{
struct mlx5_devcom_comp *comp;
comp = kzalloc(sizeof(*comp), GFP_KERNEL);
if (!comp)
- return ERR_PTR(-ENOMEM);
+ return NULL;
comp->id = id;
- comp->key = key;
+ comp->key.key = attr->key;
+ comp->key.flags = attr->flags;
+ if (attr->flags & MLX5_DEVCOM_MATCH_FLAGS_NS)
+ write_pnet(&comp->key.net, attr->net);
comp->handler = handler;
init_rwsem(&comp->sem);
lockdep_register_key(&comp->lock_key);
@@ -149,7 +160,7 @@ devcom_alloc_comp_dev(struct mlx5_devcom_dev *devc,
devcom = kzalloc(sizeof(*devcom), GFP_KERNEL);
if (!devcom)
- return ERR_PTR(-ENOMEM);
+ return NULL;
kref_get(&devc->ref);
devcom->devc = devc;
@@ -180,21 +191,34 @@ devcom_free_comp_dev(struct mlx5_devcom_comp_dev *devcom)
static bool
devcom_component_equal(struct mlx5_devcom_comp *devcom,
enum mlx5_devcom_component id,
- u64 key)
+ const struct mlx5_devcom_match_attr *attr)
{
- return devcom->id == id && devcom->key == key;
+ if (devcom->id != id)
+ return false;
+
+ if (devcom->key.flags != attr->flags)
+ return false;
+
+ if (memcmp(&devcom->key.key, &attr->key, sizeof(devcom->key.key)))
+ return false;
+
+ if (devcom->key.flags & MLX5_DEVCOM_MATCH_FLAGS_NS &&
+ !net_eq(read_pnet(&devcom->key.net), attr->net))
+ return false;
+
+ return true;
}
static struct mlx5_devcom_comp *
devcom_component_get(struct mlx5_devcom_dev *devc,
enum mlx5_devcom_component id,
- u64 key,
+ const struct mlx5_devcom_match_attr *attr,
mlx5_devcom_event_handler_t handler)
{
struct mlx5_devcom_comp *comp;
devcom_for_each_component(comp) {
- if (devcom_component_equal(comp, id, key)) {
+ if (devcom_component_equal(comp, id, attr)) {
if (handler == comp->handler) {
kref_get(&comp->ref);
return comp;
@@ -212,35 +236,32 @@ devcom_component_get(struct mlx5_devcom_dev *devc,
struct mlx5_devcom_comp_dev *
mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
enum mlx5_devcom_component id,
- u64 key,
+ const struct mlx5_devcom_match_attr *attr,
mlx5_devcom_event_handler_t handler,
void *data)
{
- struct mlx5_devcom_comp_dev *devcom;
+ struct mlx5_devcom_comp_dev *devcom = NULL;
struct mlx5_devcom_comp *comp;
- if (IS_ERR_OR_NULL(devc))
- return ERR_PTR(-EINVAL);
+ if (!devc)
+ return NULL;
mutex_lock(&comp_list_lock);
- comp = devcom_component_get(devc, id, key, handler);
- if (IS_ERR(comp)) {
- devcom = ERR_PTR(-EINVAL);
+ comp = devcom_component_get(devc, id, attr, handler);
+ if (IS_ERR(comp))
goto out_unlock;
- }
if (!comp) {
- comp = mlx5_devcom_comp_alloc(id, key, handler);
- if (IS_ERR(comp)) {
- devcom = ERR_CAST(comp);
+ comp = mlx5_devcom_comp_alloc(id, attr, handler);
+ if (!comp)
goto out_unlock;
- }
+
list_add_tail(&comp->comp_list, &devcom_comp_list);
}
mutex_unlock(&comp_list_lock);
devcom = devcom_alloc_comp_dev(devc, comp, data);
- if (IS_ERR(devcom))
+ if (!devcom)
kref_put(&comp->ref, mlx5_devcom_comp_release);
return devcom;
@@ -252,8 +273,10 @@ out_unlock:
void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom)
{
- if (!IS_ERR_OR_NULL(devcom))
- devcom_free_comp_dev(devcom);
+ if (!devcom)
+ return;
+
+ devcom_free_comp_dev(devcom);
}
int mlx5_devcom_comp_get_size(struct mlx5_devcom_comp_dev *devcom)
@@ -272,7 +295,7 @@ int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom,
int err = 0;
void *data;
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return -ENODEV;
comp = devcom->comp;
@@ -314,7 +337,7 @@ void mlx5_devcom_comp_set_ready(struct mlx5_devcom_comp_dev *devcom, bool ready)
bool mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev *devcom)
{
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return false;
return READ_ONCE(devcom->comp->ready);
@@ -324,7 +347,7 @@ bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom_comp_dev *devcom)
{
struct mlx5_devcom_comp *comp;
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return false;
comp = devcom->comp;
@@ -397,21 +420,21 @@ void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom,
void mlx5_devcom_comp_lock(struct mlx5_devcom_comp_dev *devcom)
{
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return;
down_write(&devcom->comp->sem);
}
void mlx5_devcom_comp_unlock(struct mlx5_devcom_comp_dev *devcom)
{
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return;
up_write(&devcom->comp->sem);
}
int mlx5_devcom_comp_trylock(struct mlx5_devcom_comp_dev *devcom)
{
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return 0;
return down_write_trylock(&devcom->comp->sem);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
index d58032dd0df7..91e5ae529d5c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
@@ -6,11 +6,28 @@
#include <linux/mlx5/driver.h>
+enum mlx5_devom_match_flags {
+ MLX5_DEVCOM_MATCH_FLAGS_NS = BIT(0),
+};
+
+#define MLX5_DEVCOM_MATCH_KEY_MAX 32
+union mlx5_devcom_match_key {
+ u64 val;
+ u8 buf[MLX5_DEVCOM_MATCH_KEY_MAX];
+};
+
+struct mlx5_devcom_match_attr {
+ u32 flags;
+ union mlx5_devcom_match_key key;
+ struct net *net;
+};
+
enum mlx5_devcom_component {
MLX5_DEVCOM_ESW_OFFLOADS,
MLX5_DEVCOM_MPV,
MLX5_DEVCOM_HCA_PORTS,
MLX5_DEVCOM_SD_GROUP,
+ MLX5_DEVCOM_SHARED_CLOCK,
MLX5_DEVCOM_NUM_COMPONENTS,
};
@@ -24,7 +41,7 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom_dev *devc);
struct mlx5_devcom_comp_dev *
mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
enum mlx5_devcom_component id,
- u64 key,
+ const struct mlx5_devcom_match_attr *attr,
mlx5_devcom_event_handler_t handler,
void *data);
void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
index 7c5516b0a844..8115071c34a4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
@@ -30,7 +30,7 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
dm = kzalloc(sizeof(*dm), GFP_KERNEL);
if (!dm)
- return ERR_PTR(-ENOMEM);
+ return NULL;
spin_lock_init(&dm->lock);
@@ -96,7 +96,7 @@ err_modify_hdr:
err_steering:
kfree(dm);
- return ERR_PTR(-ENOMEM);
+ return NULL;
}
void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
index 4b7f7131c560..b1edc71ffc6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
@@ -72,7 +72,7 @@ static inline void eq_update_ci(struct mlx5_eq *eq, int arm)
__raw_writel((__force u32)cpu_to_be32(val), addr);
/* We still want ordering, just not swabbing, so add a barrier */
- mb();
+ wmb();
}
int mlx5_eq_table_init(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index a80ecb672f33..0a3c260af377 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -161,7 +161,8 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains,
ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
- sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ? FT_TBL_SZ : POOL_NEXT_SIZE;
+ sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
+ FT_TBL_SZ : MLX5_FS_MAX_POOL_SIZE;
ft_attr.max_fte = sz;
/* We use chains_default_ft(chains) as the table's next_ft till
@@ -196,6 +197,11 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains,
ns = mlx5_get_flow_namespace(chains->dev, chains->ns);
}
+ if (!ns) {
+ mlx5_core_warn(chains->dev, "Failed to get flow namespace\n");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
ft_attr.autogroup.num_reserved_entries = 2;
ft_attr.autogroup.max_num_groups = chains->group_num;
ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
@@ -699,7 +705,7 @@ mlx5_chains_create_global_table(struct mlx5_fs_chains *chains)
goto err_ignore;
}
- chain = mlx5_chains_get_chain_range(chains),
+ chain = mlx5_chains_get_chain_range(chains);
prio = mlx5_chains_get_prio_range(chains);
level = mlx5_chains_get_level_range(chains);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
index 9f13cea16446..7adad784ad46 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
@@ -9,7 +9,7 @@
#include "mlx5_core.h"
#include "lib/fs_ttc.h"
-#define MLX5_TTC_MAX_NUM_GROUPS 4
+#define MLX5_TTC_MAX_NUM_GROUPS 7
#define MLX5_TTC_GROUP_TCPUDP_SIZE (MLX5_TT_IPV6_UDP + 1)
struct mlx5_fs_ttc_groups {
@@ -31,10 +31,14 @@ static int mlx5_fs_ttc_table_size(const struct mlx5_fs_ttc_groups *groups)
/* L3/L4 traffic type classifier */
struct mlx5_ttc_table {
int num_groups;
+ const struct mlx5_fs_ttc_groups *groups;
+ struct mlx5_core_dev *mdev;
struct mlx5_flow_table *t;
struct mlx5_flow_group **g;
struct mlx5_ttc_rule rules[MLX5_NUM_TT];
struct mlx5_flow_handle *tunnel_rules[MLX5_NUM_TUNNEL_TT];
+ u32 refcnt;
+ struct mutex mutex; /* Protect adding rules for ipsec crypto offload */
};
struct mlx5_flow_table *mlx5_get_ttc_flow_table(struct mlx5_ttc_table *ttc)
@@ -61,6 +65,25 @@ static void mlx5_cleanup_ttc_rules(struct mlx5_ttc_table *ttc)
}
}
+static const char *mlx5_traffic_types_names[MLX5_NUM_TT] = {
+ [MLX5_TT_IPV4_TCP] = "TT_IPV4_TCP",
+ [MLX5_TT_IPV6_TCP] = "TT_IPV6_TCP",
+ [MLX5_TT_IPV4_UDP] = "TT_IPV4_UDP",
+ [MLX5_TT_IPV6_UDP] = "TT_IPV6_UDP",
+ [MLX5_TT_IPV4_IPSEC_AH] = "TT_IPV4_IPSEC_AH",
+ [MLX5_TT_IPV6_IPSEC_AH] = "TT_IPV6_IPSEC_AH",
+ [MLX5_TT_IPV4_IPSEC_ESP] = "TT_IPV4_IPSEC_ESP",
+ [MLX5_TT_IPV6_IPSEC_ESP] = "TT_IPV6_IPSEC_ESP",
+ [MLX5_TT_IPV4] = "TT_IPV4",
+ [MLX5_TT_IPV6] = "TT_IPV6",
+ [MLX5_TT_ANY] = "TT_ANY"
+};
+
+const char *mlx5_ttc_get_name(enum mlx5_traffic_types tt)
+{
+ return mlx5_traffic_types_names[tt];
+}
+
struct mlx5_etype_proto {
u16 etype;
u8 proto;
@@ -144,6 +167,8 @@ static struct mlx5_etype_proto ttc_tunnel_rules[] = {
enum TTC_GROUP_TYPE {
TTC_GROUPS_DEFAULT = 0,
TTC_GROUPS_USE_L4_TYPE = 1,
+ TTC_GROUPS_DEFAULT_ESP = 2,
+ TTC_GROUPS_USE_L4_TYPE_ESP = 3,
};
static const struct mlx5_fs_ttc_groups ttc_groups[] = {
@@ -165,6 +190,31 @@ static const struct mlx5_fs_ttc_groups ttc_groups[] = {
BIT(0),
},
},
+ [TTC_GROUPS_DEFAULT_ESP] = {
+ .num_groups = 6,
+ .group_size = {
+ MLX5_TTC_GROUP_TCPUDP_SIZE + BIT(1) +
+ MLX5_NUM_TUNNEL_TT,
+ BIT(2), /* decrypted outer L4 */
+ BIT(2), /* decrypted inner L4 */
+ BIT(1), /* ESP */
+ BIT(1),
+ BIT(0),
+ },
+ },
+ [TTC_GROUPS_USE_L4_TYPE_ESP] = {
+ .use_l4_type = true,
+ .num_groups = 7,
+ .group_size = {
+ MLX5_TTC_GROUP_TCPUDP_SIZE,
+ BIT(1) + MLX5_NUM_TUNNEL_TT,
+ BIT(2), /* decrypted outer L4 */
+ BIT(2), /* decrypted inner L4 */
+ BIT(1), /* ESP */
+ BIT(1),
+ BIT(0),
+ },
+ },
};
static const struct mlx5_fs_ttc_groups inner_ttc_groups[] = {
@@ -188,6 +238,23 @@ static const struct mlx5_fs_ttc_groups inner_ttc_groups[] = {
},
};
+static const struct mlx5_fs_ttc_groups *
+mlx5_ttc_get_fs_groups(bool use_l4_type, bool ipsec_rss)
+{
+ if (!ipsec_rss)
+ return use_l4_type ? &ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
+ &ttc_groups[TTC_GROUPS_DEFAULT];
+
+ return use_l4_type ? &ttc_groups[TTC_GROUPS_USE_L4_TYPE_ESP] :
+ &ttc_groups[TTC_GROUPS_DEFAULT_ESP];
+}
+
+bool mlx5_ttc_has_esp_flow_group(struct mlx5_ttc_table *ttc)
+{
+ return ttc->groups == &ttc_groups[TTC_GROUPS_DEFAULT_ESP] ||
+ ttc->groups == &ttc_groups[TTC_GROUPS_USE_L4_TYPE_ESP];
+}
+
u8 mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt)
{
return ttc_tunnel_rules[tt].proto;
@@ -238,6 +305,31 @@ static u8 mlx5_etype_to_ipv(u16 ethertype)
return 0;
}
+static void mlx5_fs_ttc_set_match_ipv_outer(struct mlx5_core_dev *mdev,
+ struct mlx5_flow_spec *spec,
+ u16 etype)
+{
+ int match_ipv_outer =
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.outer_ip_version);
+ u8 ipv;
+
+ ipv = mlx5_etype_to_ipv(etype);
+ if (match_ipv_outer && ipv) {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.ip_version);
+ MLX5_SET(fte_match_param, spec->match_value,
+ outer_headers.ip_version, ipv);
+ } else {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.ethertype);
+ MLX5_SET(fte_match_param, spec->match_value,
+ outer_headers.ethertype, etype);
+ }
+
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+}
+
static void mlx5_fs_ttc_set_match_proto(void *headers_c, void *headers_v,
u8 proto, bool use_l4_type)
{
@@ -260,16 +352,12 @@ static void mlx5_fs_ttc_set_match_proto(void *headers_c, void *headers_v,
static struct mlx5_flow_handle *
mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
struct mlx5_flow_destination *dest, u16 etype, u8 proto,
- bool use_l4_type)
+ bool use_l4_type, bool ipsec_rss)
{
- int match_ipv_outer =
- MLX5_CAP_FLOWTABLE_NIC_RX(dev,
- ft_field_support.outer_ip_version);
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
int err = 0;
- u8 ipv;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
@@ -286,15 +374,15 @@ mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
proto, use_l4_type);
}
- ipv = mlx5_etype_to_ipv(etype);
- if (match_ipv_outer && ipv) {
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv);
- } else if (etype) {
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
+ if (etype)
+ mlx5_fs_ttc_set_match_ipv_outer(dev, spec, etype);
+
+ if (ipsec_rss && proto == IPPROTO_ESP) {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.ipsec_next_header);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.ipsec_next_header, 0);
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
}
rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
@@ -323,12 +411,16 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
struct mlx5_ttc_rule *rule = &rules[tt];
+ if (mlx5_ttc_is_decrypted_esp_tt(tt))
+ continue;
+
if (test_bit(tt, params->ignore_dests))
continue;
rule->rule = mlx5_generate_ttc_rule(dev, ft, &params->dests[tt],
ttc_rules[tt].etype,
ttc_rules[tt].proto,
- use_l4_type);
+ use_l4_type,
+ params->ipsec_rss);
if (IS_ERR(rule->rule)) {
err = PTR_ERR(rule->rule);
rule->rule = NULL;
@@ -351,7 +443,7 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
&params->tunnel_dests[tt],
ttc_tunnel_rules[tt].etype,
ttc_tunnel_rules[tt].proto,
- use_l4_type);
+ use_l4_type, false);
if (IS_ERR(trules[tt])) {
err = PTR_ERR(trules[tt]);
trules[tt] = NULL;
@@ -366,10 +458,78 @@ del_rules:
return err;
}
+static int mlx5_create_ttc_table_ipsec_groups(struct mlx5_ttc_table *ttc,
+ bool use_ipv,
+ u32 *in, int *next_ix)
+{
+ u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ const struct mlx5_fs_ttc_groups *groups = ttc->groups;
+ int ix = *next_ix;
+
+ MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
+
+ /* decrypted ESP outer group */
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.l4_type_ext);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += groups->group_size[ttc->num_groups];
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
+ if (IS_ERR(ttc->g[ttc->num_groups]))
+ goto err;
+ ttc->num_groups++;
+
+ MLX5_SET(fte_match_param, mc, outer_headers.l4_type_ext, 0);
+
+ /* decrypted ESP inner group */
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
+ if (use_ipv)
+ MLX5_SET(fte_match_param, mc, outer_headers.ip_version, 0);
+ else
+ MLX5_SET(fte_match_param, mc, outer_headers.ethertype, 0);
+ MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
+ MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.l4_type_ext);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += groups->group_size[ttc->num_groups];
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
+ if (IS_ERR(ttc->g[ttc->num_groups]))
+ goto err;
+ ttc->num_groups++;
+
+ MLX5_SET(fte_match_param, mc, inner_headers.ip_version, 0);
+ MLX5_SET(fte_match_param, mc, inner_headers.l4_type_ext, 0);
+
+ /* undecrypted ESP group */
+ MLX5_SET_CFG(in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
+ if (use_ipv)
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
+ else
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
+ MLX5_SET_TO_ONES(fte_match_param, mc,
+ misc_parameters_2.ipsec_next_header);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += groups->group_size[ttc->num_groups];
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
+ if (IS_ERR(ttc->g[ttc->num_groups]))
+ goto err;
+ ttc->num_groups++;
+
+ *next_ix = ix;
+
+ return 0;
+
+err:
+ return PTR_ERR(ttc->g[ttc->num_groups]);
+}
+
static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
- bool use_ipv,
- const struct mlx5_fs_ttc_groups *groups)
+ bool use_ipv)
{
+ const struct mlx5_fs_ttc_groups *groups = ttc->groups;
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
int ix = 0;
u32 *in;
@@ -417,8 +577,18 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
goto err;
ttc->num_groups++;
+ if (mlx5_ttc_has_esp_flow_group(ttc)) {
+ err = mlx5_create_ttc_table_ipsec_groups(ttc, use_ipv, in, &ix);
+ if (err)
+ goto err;
+
+ MLX5_SET(fte_match_param, mc,
+ misc_parameters_2.ipsec_next_header, 0);
+ }
+
/* L3 Group */
MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
@@ -508,6 +678,9 @@ static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev,
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
struct mlx5_ttc_rule *rule = &rules[tt];
+ if (mlx5_ttc_is_decrypted_esp_tt(tt))
+ continue;
+
if (test_bit(tt, params->ignore_dests))
continue;
rule->rule = mlx5_generate_inner_ttc_rule(dev, ft,
@@ -618,10 +791,6 @@ struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev,
bool use_l4_type;
int err;
- ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
- if (!ttc)
- return ERR_PTR(-ENOMEM);
-
switch (params->ns_type) {
case MLX5_FLOW_NAMESPACE_PORT_SEL:
use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
@@ -635,7 +804,16 @@ struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev,
return ERR_PTR(-EINVAL);
}
+ ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
+ if (!ttc)
+ return ERR_PTR(-ENOMEM);
+
ns = mlx5_get_flow_namespace(dev, params->ns_type);
+ if (!ns) {
+ kvfree(ttc);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
groups = use_l4_type ? &inner_ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
&inner_ttc_groups[TTC_GROUPS_DEFAULT];
@@ -676,6 +854,7 @@ void mlx5_destroy_ttc_table(struct mlx5_ttc_table *ttc)
kfree(ttc->g);
mlx5_destroy_flow_table(ttc->t);
+ mutex_destroy(&ttc->mutex);
kvfree(ttc);
}
@@ -685,16 +864,11 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
bool match_ipv_outer =
MLX5_CAP_FLOWTABLE_NIC_RX(dev,
ft_field_support.outer_ip_version);
- const struct mlx5_fs_ttc_groups *groups;
struct mlx5_flow_namespace *ns;
struct mlx5_ttc_table *ttc;
bool use_l4_type;
int err;
- ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
- if (!ttc)
- return ERR_PTR(-ENOMEM);
-
switch (params->ns_type) {
case MLX5_FLOW_NAMESPACE_PORT_SEL:
use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
@@ -708,12 +882,20 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
return ERR_PTR(-EINVAL);
}
+ ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
+ if (!ttc)
+ return ERR_PTR(-ENOMEM);
+
ns = mlx5_get_flow_namespace(dev, params->ns_type);
- groups = use_l4_type ? &ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
- &ttc_groups[TTC_GROUPS_DEFAULT];
+ if (!ns) {
+ kvfree(ttc);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ ttc->groups = mlx5_ttc_get_fs_groups(use_l4_type, params->ipsec_rss);
WARN_ON_ONCE(params->ft_attr.max_fte);
- params->ft_attr.max_fte = mlx5_fs_ttc_table_size(groups);
+ params->ft_attr.max_fte = mlx5_fs_ttc_table_size(ttc->groups);
ttc->t = mlx5_create_flow_table(ns, &params->ft_attr);
if (IS_ERR(ttc->t)) {
err = PTR_ERR(ttc->t);
@@ -721,7 +903,7 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
return ERR_PTR(err);
}
- err = mlx5_create_ttc_table_groups(ttc, match_ipv_outer, groups);
+ err = mlx5_create_ttc_table_groups(ttc, match_ipv_outer);
if (err)
goto destroy_ft;
@@ -729,6 +911,9 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
if (err)
goto destroy_ft;
+ ttc->mdev = dev;
+ mutex_init(&ttc->mutex);
+
return ttc;
destroy_ft:
@@ -762,3 +947,194 @@ int mlx5_ttc_fwd_default_dest(struct mlx5_ttc_table *ttc,
return mlx5_ttc_fwd_dest(ttc, type, &dest);
}
+
+static void _mlx5_ttc_destroy_ipsec_rules(struct mlx5_ttc_table *ttc)
+{
+ enum mlx5_traffic_types i;
+
+ for (i = MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_TCP;
+ i <= MLX5_TT_DECRYPTED_ESP_INNER_IPV6_UDP; i++) {
+ if (!ttc->rules[i].rule)
+ continue;
+
+ mlx5_del_flow_rules(ttc->rules[i].rule);
+ ttc->rules[i].rule = NULL;
+ }
+}
+
+void mlx5_ttc_destroy_ipsec_rules(struct mlx5_ttc_table *ttc)
+{
+ if (!mlx5_ttc_has_esp_flow_group(ttc))
+ return;
+
+ mutex_lock(&ttc->mutex);
+ if (--ttc->refcnt)
+ goto unlock;
+
+ _mlx5_ttc_destroy_ipsec_rules(ttc);
+unlock:
+ mutex_unlock(&ttc->mutex);
+}
+
+static int mlx5_ttc_get_tt_attrs(enum mlx5_traffic_types type,
+ u16 *etype, int *l4_type_ext,
+ enum mlx5_traffic_types *tir_tt)
+{
+ switch (type) {
+ case MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_TCP:
+ case MLX5_TT_DECRYPTED_ESP_INNER_IPV4_TCP:
+ *etype = ETH_P_IP;
+ *l4_type_ext = MLX5_PACKET_L4_TYPE_EXT_TCP;
+ *tir_tt = MLX5_TT_IPV4_TCP;
+ break;
+ case MLX5_TT_DECRYPTED_ESP_OUTER_IPV6_TCP:
+ case MLX5_TT_DECRYPTED_ESP_INNER_IPV6_TCP:
+ *etype = ETH_P_IPV6;
+ *l4_type_ext = MLX5_PACKET_L4_TYPE_EXT_TCP;
+ *tir_tt = MLX5_TT_IPV6_TCP;
+ break;
+ case MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_UDP:
+ case MLX5_TT_DECRYPTED_ESP_INNER_IPV4_UDP:
+ *etype = ETH_P_IP;
+ *l4_type_ext = MLX5_PACKET_L4_TYPE_EXT_UDP;
+ *tir_tt = MLX5_TT_IPV4_UDP;
+ break;
+ case MLX5_TT_DECRYPTED_ESP_OUTER_IPV6_UDP:
+ case MLX5_TT_DECRYPTED_ESP_INNER_IPV6_UDP:
+ *etype = ETH_P_IPV6;
+ *l4_type_ext = MLX5_PACKET_L4_TYPE_EXT_UDP;
+ *tir_tt = MLX5_TT_IPV6_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct mlx5_flow_handle *
+mlx5_ttc_create_ipsec_outer_rule(struct mlx5_ttc_table *ttc,
+ enum mlx5_traffic_types type)
+{
+ struct mlx5_flow_destination dest;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ enum mlx5_traffic_types tir_tt;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int l4_type_ext;
+ u16 etype;
+ int err;
+
+ err = mlx5_ttc_get_tt_attrs(type, &etype, &l4_type_ext, &tir_tt);
+ if (err)
+ return ERR_PTR(err);
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
+
+ mlx5_fs_ttc_set_match_ipv_outer(ttc->mdev, spec, etype);
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.l4_type_ext);
+ MLX5_SET(fte_match_param, spec->match_value,
+ outer_headers.l4_type_ext, l4_type_ext);
+
+ dest = mlx5_ttc_get_default_dest(ttc, tir_tt);
+
+ rule = mlx5_add_flow_rules(ttc->t, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(ttc->mdev, "%s: add rule failed\n", __func__);
+ }
+
+ kvfree(spec);
+ return err ? ERR_PTR(err) : rule;
+}
+
+static struct mlx5_flow_handle *
+mlx5_ttc_create_ipsec_inner_rule(struct mlx5_ttc_table *ttc,
+ struct mlx5_ttc_table *inner_ttc,
+ enum mlx5_traffic_types type)
+{
+ struct mlx5_flow_destination dest;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ enum mlx5_traffic_types tir_tt;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int l4_type_ext;
+ u16 etype;
+ int err;
+
+ err = mlx5_ttc_get_tt_attrs(type, &etype, &l4_type_ext, &tir_tt);
+ if (err)
+ return ERR_PTR(err);
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ inner_headers.ip_version);
+ MLX5_SET(fte_match_param, spec->match_value,
+ inner_headers.ip_version, mlx5_etype_to_ipv(etype));
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ inner_headers.l4_type_ext);
+ MLX5_SET(fte_match_param, spec->match_value,
+ inner_headers.l4_type_ext, l4_type_ext);
+
+ dest = mlx5_ttc_get_default_dest(inner_ttc, tir_tt);
+
+ spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
+
+ rule = mlx5_add_flow_rules(ttc->t, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(ttc->mdev, "%s: add rule failed\n", __func__);
+ }
+
+ kvfree(spec);
+ return err ? ERR_PTR(err) : rule;
+}
+
+int mlx5_ttc_create_ipsec_rules(struct mlx5_ttc_table *ttc,
+ struct mlx5_ttc_table *inner_ttc)
+{
+ struct mlx5_flow_handle *rule;
+ enum mlx5_traffic_types i;
+
+ if (!mlx5_ttc_has_esp_flow_group(ttc))
+ return 0;
+
+ mutex_lock(&ttc->mutex);
+ if (ttc->refcnt)
+ goto skip;
+
+ for (i = MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_TCP;
+ i <= MLX5_TT_DECRYPTED_ESP_OUTER_IPV6_UDP; i++) {
+ rule = mlx5_ttc_create_ipsec_outer_rule(ttc, i);
+ if (IS_ERR(rule))
+ goto err_out;
+
+ ttc->rules[i].rule = rule;
+ }
+
+ for (i = MLX5_TT_DECRYPTED_ESP_INNER_IPV4_TCP;
+ i <= MLX5_TT_DECRYPTED_ESP_INNER_IPV6_UDP; i++) {
+ rule = mlx5_ttc_create_ipsec_inner_rule(ttc, inner_ttc, i);
+ if (IS_ERR(rule))
+ goto err_out;
+
+ ttc->rules[i].rule = rule;
+ }
+
+skip:
+ ttc->refcnt++;
+ mutex_unlock(&ttc->mutex);
+ return 0;
+
+err_out:
+ _mlx5_ttc_destroy_ipsec_rules(ttc);
+ mutex_unlock(&ttc->mutex);
+ return PTR_ERR(rule);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
index 92eea6bea310..95f6e56724a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
@@ -18,6 +18,14 @@ enum mlx5_traffic_types {
MLX5_TT_IPV4,
MLX5_TT_IPV6,
MLX5_TT_ANY,
+ MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_TCP,
+ MLX5_TT_DECRYPTED_ESP_OUTER_IPV6_TCP,
+ MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_UDP,
+ MLX5_TT_DECRYPTED_ESP_OUTER_IPV6_UDP,
+ MLX5_TT_DECRYPTED_ESP_INNER_IPV4_TCP,
+ MLX5_TT_DECRYPTED_ESP_INNER_IPV6_TCP,
+ MLX5_TT_DECRYPTED_ESP_INNER_IPV4_UDP,
+ MLX5_TT_DECRYPTED_ESP_INNER_IPV6_UDP,
MLX5_NUM_TT,
MLX5_NUM_INDIR_TIRS = MLX5_TT_ANY,
};
@@ -47,8 +55,10 @@ struct ttc_params {
bool inner_ttc;
DECLARE_BITMAP(ignore_tunnel_dests, MLX5_NUM_TUNNEL_TT);
struct mlx5_flow_destination tunnel_dests[MLX5_NUM_TUNNEL_TT];
+ bool ipsec_rss;
};
+const char *mlx5_ttc_get_name(enum mlx5_traffic_types tt);
struct mlx5_flow_table *mlx5_get_ttc_flow_table(struct mlx5_ttc_table *ttc);
struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
@@ -69,4 +79,14 @@ int mlx5_ttc_fwd_default_dest(struct mlx5_ttc_table *ttc,
bool mlx5_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev);
u8 mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt);
+bool mlx5_ttc_has_esp_flow_group(struct mlx5_ttc_table *ttc);
+int mlx5_ttc_create_ipsec_rules(struct mlx5_ttc_table *ttc,
+ struct mlx5_ttc_table *inner_ttc);
+void mlx5_ttc_destroy_ipsec_rules(struct mlx5_ttc_table *ttc);
+static inline bool mlx5_ttc_is_decrypted_esp_tt(enum mlx5_traffic_types tt)
+{
+ return tt >= MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_TCP &&
+ tt <= MLX5_TT_DECRYPTED_ESP_INNER_IPV6_UDP;
+}
+
#endif /* __MLX5_FS_TTC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c
index b7d4b1a2baf2..d524f0220513 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c
@@ -164,6 +164,8 @@ ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev,
roce->rule = rule;
memset(spec, 0, sizeof(*spec));
+ if (default_dst->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, default_dst, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -178,6 +180,8 @@ ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev,
goto out;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ if (default_dst->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
+ flow_act.flags &= ~FLOW_ACT_IGNORE_FLOW_LEVEL;
dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
dst.ft = roce->ft_rdma;
rule = mlx5_add_flow_rules(roce->nic_master_ft, NULL, &flow_act, &dst,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
index 4a078113e292..e6be2f01daf4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
@@ -45,11 +45,7 @@
#define MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI 0x8
#define MLX5_SECTAG_HEADER_SIZE_WITH_SCI (MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI + MACSEC_SCI_LEN)
-/* MACsec RX flow steering */
-#define MLX5_ETH_WQE_FT_META_MACSEC_MASK 0x3E
-
/* MACsec fs_id handling for steering */
-#define macsec_fs_set_tx_fs_id(fs_id) (MLX5_ETH_WQE_FT_META_MACSEC | (fs_id) << 2)
#define macsec_fs_set_rx_fs_id(fs_id) ((fs_id) | BIT(30))
struct mlx5_sectag_header {
@@ -497,7 +493,7 @@ static int macsec_fs_tx_create(struct mlx5_macsec_fs *macsec_fs)
memset(&dest, 0, sizeof(struct mlx5_flow_destination));
memset(&flow_act, 0, sizeof(flow_act));
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(tx_tables->check_miss_rule_counter);
+ dest.counter = tx_tables->check_miss_rule_counter;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
rule = mlx5_add_flow_rules(tx_tables->ft_check, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
@@ -519,7 +515,7 @@ static int macsec_fs_tx_create(struct mlx5_macsec_fs *macsec_fs)
flow_act.flags = FLOW_ACT_NO_APPEND;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(tx_tables->check_rule_counter);
+ dest.counter = tx_tables->check_rule_counter;
rule = mlx5_add_flow_rules(tx_tables->ft_check, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -597,7 +593,7 @@ static int macsec_fs_tx_setup_fte(struct mlx5_macsec_fs *macsec_fs,
MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a,
MLX5_ETH_WQE_FT_META_MACSEC_MASK);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a,
- macsec_fs_set_tx_fs_id(id));
+ MLX5_MACSEC_TX_METADATA(id));
*fs_id = id;
flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC;
@@ -1200,7 +1196,7 @@ static int macsec_fs_rx_create_check_decap_rule(struct mlx5_macsec_fs *macsec_fs
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
roce_dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- roce_dest[dstn].counter_id = mlx5_fc_id(rx_tables->check_rule_counter);
+ roce_dest[dstn].counter = rx_tables->check_rule_counter;
rule = mlx5_add_flow_rules(rx_tables->ft_check, spec, flow_act, roce_dest, dstn + 1);
if (IS_ERR(rule)) {
@@ -1592,7 +1588,7 @@ static int macsec_fs_rx_create(struct mlx5_macsec_fs *macsec_fs)
memset(&flow_act, 0, sizeof(flow_act));
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(rx_tables->check_miss_rule_counter);
+ dest.counter = rx_tables->check_miss_rule_counter;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
rule = mlx5_add_flow_rules(rx_tables->ft_check, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
@@ -2219,9 +2215,11 @@ static int mlx5_macsec_fs_add_roce_rule_tx(struct mlx5_macsec_fs *macsec_fs, u32
MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_A);
- MLX5_SET(set_action_in, action, data, macsec_fs_set_tx_fs_id(fs_id));
- MLX5_SET(set_action_in, action, offset, 0);
- MLX5_SET(set_action_in, action, length, 32);
+ MLX5_SET(set_action_in, action, data,
+ mlx5_macsec_fs_set_tx_fs_id(fs_id));
+ MLX5_SET(set_action_in, action, offset,
+ MLX5_ETH_WQE_FT_META_MACSEC_SHIFT);
+ MLX5_SET(set_action_in, action, length, 8);
modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC,
1, action);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h
index 34b80c3ef6a5..15acaff43641 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h
@@ -12,6 +12,21 @@
#define MLX5_MACSEC_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x1)
#define MLX5_MACSEC_RX_METADAT_HANDLE(metadata) ((metadata) & MLX5_MACSEC_RX_FS_ID_MASK)
+/* MACsec TX flow steering */
+#define MLX5_ETH_WQE_FT_META_MACSEC_MASK \
+ (MLX5_ETH_WQE_FT_META_MACSEC | MLX5_ETH_WQE_FT_META_MACSEC_FS_ID_MASK)
+#define MLX5_ETH_WQE_FT_META_MACSEC_SHIFT MLX5_ETH_WQE_FT_META_SHIFT
+
+/* MACsec fs_id handling for steering */
+#define mlx5_macsec_fs_set_tx_fs_id(fs_id) \
+ (((MLX5_ETH_WQE_FT_META_MACSEC) >> MLX5_ETH_WQE_FT_META_MACSEC_SHIFT) \
+ | ((fs_id) << 2))
+
+#define MLX5_MACSEC_TX_METADATA(fs_id) \
+ (mlx5_macsec_fs_set_tx_fs_id(fs_id) << \
+ MLX5_ETH_WQE_FT_META_MACSEC_SHIFT)
+
+/* MACsec fs_id uses 4 bits, supports up to 16 interfaces */
#define MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES 16
struct mlx5_macsec_fs;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index 37d5f445598c..74ea5da58b7e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -45,14 +45,22 @@ int mlx5_crdump_enable(struct mlx5_core_dev *dev);
void mlx5_crdump_disable(struct mlx5_core_dev *dev);
int mlx5_crdump_collect(struct mlx5_core_dev *dev, u32 *cr_data);
-static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
+static inline struct net_device *mlx5_uplink_netdev_get(struct mlx5_core_dev *mdev)
{
- return devlink_net(priv_to_devlink(dev));
+ struct mlx5e_resources *mlx5e_res = &mdev->mlx5e_res;
+ struct net_device *netdev;
+
+ mutex_lock(&mlx5e_res->uplink_netdev_lock);
+ netdev = mlx5e_res->uplink_netdev;
+ netdev_hold(netdev, &mlx5e_res->tracker, GFP_KERNEL);
+ mutex_unlock(&mlx5e_res->uplink_netdev_lock);
+ return netdev;
}
-static inline struct net_device *mlx5_uplink_netdev_get(struct mlx5_core_dev *mdev)
+static inline void mlx5_uplink_netdev_put(struct mlx5_core_dev *mdev,
+ struct net_device *netdev)
{
- return mdev->mlx5e_res.uplink_netdev;
+ netdev_put(netdev, &mdev->mlx5e_res.tracker);
}
struct mlx5_sd;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
index 4450091e181a..4a88a42ae4f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
@@ -65,13 +65,14 @@ static int del_l2table_entry_cmd(struct mlx5_core_dev *dev, u32 index)
/* UC L2 table hash node */
struct l2table_node {
struct l2addr_node node;
- u32 index; /* index in HW l2 table */
+ int index; /* index in HW l2 table */
int ref_count;
};
struct mlx5_mpfs {
struct hlist_head hash[MLX5_L2_ADDR_HASH_SIZE];
struct mutex lock; /* Synchronize l2 table access */
+ bool enabled;
u32 size;
unsigned long *bitmap;
};
@@ -114,6 +115,8 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev)
return -ENOMEM;
}
+ mpfs->enabled = true;
+
dev->priv.mpfs = mpfs;
return 0;
}
@@ -135,7 +138,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac)
struct mlx5_mpfs *mpfs = dev->priv.mpfs;
struct l2table_node *l2addr;
int err = 0;
- u32 index;
+ int index;
if (!mpfs)
return 0;
@@ -148,30 +151,34 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac)
goto out;
}
- err = alloc_l2table_index(mpfs, &index);
- if (err)
- goto out;
-
l2addr = l2addr_hash_add(mpfs->hash, mac, struct l2table_node, GFP_KERNEL);
if (!l2addr) {
err = -ENOMEM;
- goto hash_add_err;
+ goto out;
}
- err = set_l2table_entry_cmd(dev, index, mac);
- if (err)
- goto set_table_entry_err;
+ index = -1;
+
+ if (mpfs->enabled) {
+ err = alloc_l2table_index(mpfs, &index);
+ if (err)
+ goto hash_del;
+ err = set_l2table_entry_cmd(dev, index, mac);
+ if (err)
+ goto free_l2table_index;
+ mlx5_core_dbg(dev, "MPFS entry %pM, set @index (%d)\n",
+ l2addr->node.addr, index);
+ }
l2addr->index = index;
l2addr->ref_count = 1;
mlx5_core_dbg(dev, "MPFS mac added %pM, index (%d)\n", mac, index);
goto out;
-
-set_table_entry_err:
- l2addr_hash_del(l2addr);
-hash_add_err:
+free_l2table_index:
free_l2table_index(mpfs, index);
+hash_del:
+ l2addr_hash_del(l2addr);
out:
mutex_unlock(&mpfs->lock);
return err;
@@ -183,7 +190,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
struct mlx5_mpfs *mpfs = dev->priv.mpfs;
struct l2table_node *l2addr;
int err = 0;
- u32 index;
+ int index;
if (!mpfs)
return 0;
@@ -200,12 +207,87 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
goto unlock;
index = l2addr->index;
- del_l2table_entry_cmd(dev, index);
+ if (index >= 0) {
+ del_l2table_entry_cmd(dev, index);
+ free_l2table_index(mpfs, index);
+ mlx5_core_dbg(dev, "MPFS entry %pM, deleted @index (%d)\n",
+ mac, index);
+ }
l2addr_hash_del(l2addr);
- free_l2table_index(mpfs, index);
mlx5_core_dbg(dev, "MPFS mac deleted %pM, index (%d)\n", mac, index);
unlock:
mutex_unlock(&mpfs->lock);
return err;
}
EXPORT_SYMBOL(mlx5_mpfs_del_mac);
+
+int mlx5_mpfs_enable(struct mlx5_core_dev *dev)
+{
+ struct mlx5_mpfs *mpfs = dev->priv.mpfs;
+ struct l2table_node *l2addr;
+ struct hlist_node *n;
+ int err = 0, i;
+
+ if (!mpfs)
+ return -ENODEV;
+
+ mutex_lock(&mpfs->lock);
+ if (mpfs->enabled)
+ goto out;
+ mpfs->enabled = true;
+ mlx5_core_dbg(dev, "MPFS enabling mpfs\n");
+
+ mlx5_mpfs_foreach(l2addr, n, mpfs, i) {
+ u32 index;
+
+ err = alloc_l2table_index(mpfs, &index);
+ if (err) {
+ mlx5_core_err(dev, "Failed to allocated MPFS index for %pM, err(%d)\n",
+ l2addr->node.addr, err);
+ goto out;
+ }
+
+ err = set_l2table_entry_cmd(dev, index, l2addr->node.addr);
+ if (err) {
+ mlx5_core_err(dev, "Failed to set MPFS l2table entry for %pM index=%d, err(%d)\n",
+ l2addr->node.addr, index, err);
+ free_l2table_index(mpfs, index);
+ goto out;
+ }
+
+ l2addr->index = index;
+ mlx5_core_dbg(dev, "MPFS entry %pM, set @index (%d)\n",
+ l2addr->node.addr, l2addr->index);
+ }
+out:
+ mutex_unlock(&mpfs->lock);
+ return err;
+}
+
+void mlx5_mpfs_disable(struct mlx5_core_dev *dev)
+{
+ struct mlx5_mpfs *mpfs = dev->priv.mpfs;
+ struct l2table_node *l2addr;
+ struct hlist_node *n;
+ int i;
+
+ if (!mpfs)
+ return;
+
+ mutex_lock(&mpfs->lock);
+ if (!mpfs->enabled)
+ goto unlock;
+ mlx5_mpfs_foreach(l2addr, n, mpfs, i) {
+ if (l2addr->index < 0)
+ continue;
+ del_l2table_entry_cmd(dev, l2addr->index);
+ free_l2table_index(mpfs, l2addr->index);
+ mlx5_core_dbg(dev, "MPFS entry %pM, deleted @index (%d)\n",
+ l2addr->node.addr, l2addr->index);
+ l2addr->index = -1;
+ }
+ mpfs->enabled = false;
+ mlx5_core_dbg(dev, "MPFS disabled\n");
+unlock:
+ mutex_unlock(&mpfs->lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
index 4a293542a7aa..9c63838ce1f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
@@ -45,6 +45,10 @@ struct l2addr_node {
u8 addr[ETH_ALEN];
};
+#define mlx5_mpfs_foreach(hs, tmp, mpfs, i) \
+ for (i = 0; i < MLX5_L2_ADDR_HASH_SIZE; i++) \
+ hlist_for_each_entry_safe(hs, tmp, &(mpfs)->hash[i], node.hlist)
+
#define for_each_l2hash_node(hn, tmp, hash, i) \
for (i = 0; i < MLX5_L2_ADDR_HASH_SIZE; i++) \
hlist_for_each_entry_safe(hn, tmp, &(hash)[i], hlist)
@@ -82,11 +86,16 @@ struct l2addr_node {
})
#ifdef CONFIG_MLX5_MPFS
+struct mlx5_core_dev;
int mlx5_mpfs_init(struct mlx5_core_dev *dev);
void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev);
+int mlx5_mpfs_enable(struct mlx5_core_dev *dev);
+void mlx5_mpfs_disable(struct mlx5_core_dev *dev);
#else /* #ifndef CONFIG_MLX5_MPFS */
static inline int mlx5_mpfs_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) {}
+static inline int mlx5_mpfs_enable(struct mlx5_core_dev *dev) { return 0; }
+static inline void mlx5_mpfs_disable(struct mlx5_core_dev *dev) {}
#endif
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c
new file mode 100644
index 000000000000..19bb620b7436
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c
@@ -0,0 +1,799 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "nv_param.h"
+#include "mlx5_core.h"
+
+enum {
+ MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CONF = 0x80,
+ MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CAP = 0x81,
+ MLX5_CLASS_0_CTRL_ID_NV_SW_OFFLOAD_CONFIG = 0x10a,
+ MLX5_CLASS_0_CTRL_ID_NV_SW_OFFLOAD_CAP = 0x10b,
+ MLX5_CLASS_0_CTRL_ID_NV_SW_ACCELERATE_CONF = 0x11d,
+
+ MLX5_CLASS_3_CTRL_ID_NV_PF_PCI_CONF = 0x80,
+};
+
+struct mlx5_ifc_configuration_item_type_class_global_bits {
+ u8 type_class[0x8];
+ u8 parameter_index[0x18];
+};
+
+struct mlx5_ifc_configuration_item_type_class_per_host_pf_bits {
+ u8 type_class[0x8];
+ u8 pf_index[0x6];
+ u8 pci_bus_index[0x8];
+ u8 parameter_index[0xa];
+};
+
+union mlx5_ifc_config_item_type_auto_bits {
+ struct mlx5_ifc_configuration_item_type_class_global_bits
+ configuration_item_type_class_global;
+ struct mlx5_ifc_configuration_item_type_class_per_host_pf_bits
+ configuration_item_type_class_per_host_pf;
+ u8 reserved_at_0[0x20];
+};
+
+enum {
+ MLX5_ACCESS_MODE_NEXT = 0,
+ MLX5_ACCESS_MODE_CURRENT,
+ MLX5_ACCESS_MODE_DEFAULT,
+};
+
+struct mlx5_ifc_config_item_bits {
+ u8 valid[0x2];
+ u8 priority[0x2];
+ u8 header_type[0x2];
+ u8 ovr_en[0x1];
+ u8 rd_en[0x1];
+ u8 access_mode[0x2];
+ u8 reserved_at_a[0x1];
+ u8 writer_id[0x5];
+ u8 version[0x4];
+ u8 reserved_at_14[0x2];
+ u8 host_id_valid[0x1];
+ u8 length[0x9];
+
+ union mlx5_ifc_config_item_type_auto_bits type;
+
+ u8 reserved_at_40[0x10];
+ u8 crc16[0x10];
+};
+
+struct mlx5_ifc_mnvda_reg_bits {
+ struct mlx5_ifc_config_item_bits configuration_item_header;
+
+ u8 configuration_item_data[64][0x20];
+};
+
+struct mlx5_ifc_nv_global_pci_conf_bits {
+ u8 sriov_valid[0x1];
+ u8 reserved_at_1[0x10];
+ u8 per_pf_total_vf[0x1];
+ u8 reserved_at_12[0xe];
+
+ u8 sriov_en[0x1];
+ u8 reserved_at_21[0xf];
+ u8 total_vfs[0x10];
+
+ u8 reserved_at_40[0x20];
+};
+
+struct mlx5_ifc_nv_global_pci_cap_bits {
+ u8 max_vfs_per_pf_valid[0x1];
+ u8 reserved_at_1[0x13];
+ u8 per_pf_total_vf_supported[0x1];
+ u8 reserved_at_15[0xb];
+
+ u8 sriov_support[0x1];
+ u8 reserved_at_21[0xf];
+ u8 max_vfs_per_pf[0x10];
+
+ u8 reserved_at_40[0x60];
+};
+
+struct mlx5_ifc_nv_pf_pci_conf_bits {
+ u8 reserved_at_0[0x9];
+ u8 pf_total_vf_en[0x1];
+ u8 reserved_at_a[0x16];
+
+ u8 reserved_at_20[0x20];
+
+ u8 reserved_at_40[0x10];
+ u8 total_vf[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_nv_sw_offload_conf_bits {
+ u8 ip_over_vxlan_port[0x10];
+ u8 tunnel_ecn_copy_offload_disable[0x1];
+ u8 pci_atomic_mode[0x3];
+ u8 sr_enable[0x1];
+ u8 ptp_cyc2realtime[0x1];
+ u8 vector_calc_disable[0x1];
+ u8 uctx_en[0x1];
+ u8 prio_tag_required_en[0x1];
+ u8 esw_fdb_ipv4_ttl_modify_enable[0x1];
+ u8 mkey_by_name[0x1];
+ u8 ip_over_vxlan_en[0x1];
+ u8 one_qp_per_recovery[0x1];
+ u8 cqe_compression[0x3];
+ u8 tunnel_udp_entropy_proto_disable[0x1];
+ u8 reserved_at_21[0x1];
+ u8 ar_enable[0x1];
+ u8 log_max_outstanding_wqe[0x5];
+ u8 vf_migration[0x2];
+ u8 log_tx_psn_win[0x6];
+ u8 lro_log_timeout3[0x4];
+ u8 lro_log_timeout2[0x4];
+ u8 lro_log_timeout1[0x4];
+ u8 lro_log_timeout0[0x4];
+};
+
+struct mlx5_ifc_nv_sw_offload_cap_bits {
+ u8 reserved_at_0[0x19];
+ u8 swp_l4_csum_mode_l4_only[0x1];
+ u8 reserved_at_1a[0x6];
+};
+
+struct mlx5_ifc_nv_sw_accelerate_conf_bits {
+ u8 swp_l4_csum_mode[0x2];
+ u8 reserved_at_2[0x3e];
+};
+
+#define MNVDA_HDR_SZ \
+ (MLX5_ST_SZ_BYTES(mnvda_reg) - \
+ MLX5_BYTE_OFF(mnvda_reg, configuration_item_data))
+
+#define MLX5_SET_CFG_ITEM_TYPE(_cls_name, _mnvda_ptr, _field, _val) \
+ MLX5_SET(mnvda_reg, _mnvda_ptr, \
+ configuration_item_header.type.configuration_item_type_class_##_cls_name._field, \
+ _val)
+
+#define MLX5_SET_CFG_HDR_LEN(_mnvda_ptr, _cls_name) \
+ MLX5_SET(mnvda_reg, _mnvda_ptr, configuration_item_header.length, \
+ MLX5_ST_SZ_BYTES(_cls_name))
+
+#define MLX5_GET_CFG_HDR_LEN(_mnvda_ptr) \
+ MLX5_GET(mnvda_reg, _mnvda_ptr, configuration_item_header.length)
+
+static int mlx5_nv_param_read(struct mlx5_core_dev *dev, void *mnvda,
+ size_t len)
+{
+ u32 param_idx, type_class;
+ u32 header_len;
+ void *cls_ptr;
+ int err;
+
+ if (WARN_ON(len > MLX5_ST_SZ_BYTES(mnvda_reg)) || len < MNVDA_HDR_SZ)
+ return -EINVAL; /* A caller bug */
+
+ err = mlx5_core_access_reg(dev, mnvda, len, mnvda, len, MLX5_REG_MNVDA,
+ 0, 0);
+ if (!err)
+ return 0;
+
+ cls_ptr = MLX5_ADDR_OF(mnvda_reg, mnvda,
+ configuration_item_header.type.configuration_item_type_class_global);
+
+ type_class = MLX5_GET(configuration_item_type_class_global, cls_ptr,
+ type_class);
+ param_idx = MLX5_GET(configuration_item_type_class_global, cls_ptr,
+ parameter_index);
+ header_len = MLX5_GET_CFG_HDR_LEN(mnvda);
+
+ mlx5_core_warn(dev, "Failed to read mnvda reg: type_class 0x%x, param_idx 0x%x, header_len %u, err %d\n",
+ type_class, param_idx, header_len, err);
+
+ return -EOPNOTSUPP;
+}
+
+static int mlx5_nv_param_write(struct mlx5_core_dev *dev, void *mnvda,
+ size_t len)
+{
+ if (WARN_ON(len > MLX5_ST_SZ_BYTES(mnvda_reg)) || len < MNVDA_HDR_SZ)
+ return -EINVAL;
+
+ if (WARN_ON(MLX5_GET_CFG_HDR_LEN(mnvda) == 0))
+ return -EINVAL;
+
+ return mlx5_core_access_reg(dev, mnvda, len, mnvda, len, MLX5_REG_MNVDA,
+ 0, 1);
+}
+
+static int
+mlx5_nv_param_read_sw_offload_conf(struct mlx5_core_dev *dev, void *mnvda,
+ size_t len)
+{
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0);
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index,
+ MLX5_CLASS_0_CTRL_ID_NV_SW_OFFLOAD_CONFIG);
+ MLX5_SET_CFG_HDR_LEN(mnvda, nv_sw_offload_conf);
+
+ return mlx5_nv_param_read(dev, mnvda, len);
+}
+
+static int
+mlx5_nv_param_read_sw_offload_cap(struct mlx5_core_dev *dev, void *mnvda,
+ size_t len)
+{
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0);
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index,
+ MLX5_CLASS_0_CTRL_ID_NV_SW_OFFLOAD_CAP);
+ MLX5_SET_CFG_HDR_LEN(mnvda, nv_sw_offload_cap);
+
+ return mlx5_nv_param_read(dev, mnvda, len);
+}
+
+static int
+mlx5_nv_param_read_sw_accelerate_conf(struct mlx5_core_dev *dev, void *mnvda,
+ size_t len, int access_mode)
+{
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0);
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index,
+ MLX5_CLASS_0_CTRL_ID_NV_SW_ACCELERATE_CONF);
+ MLX5_SET_CFG_HDR_LEN(mnvda, nv_sw_accelerate_conf);
+ MLX5_SET(mnvda_reg, mnvda, configuration_item_header.access_mode,
+ access_mode);
+
+ return mlx5_nv_param_read(dev, mnvda, len);
+}
+
+static const char *const
+ cqe_compress_str[] = { "balanced", "aggressive" };
+
+static int
+mlx5_nv_param_devlink_cqe_compress_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ u8 value = U8_MAX;
+ void *data;
+ int err;
+
+ err = mlx5_nv_param_read_sw_offload_conf(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ value = MLX5_GET(nv_sw_offload_conf, data, cqe_compression);
+
+ if (value >= ARRAY_SIZE(cqe_compress_str))
+ return -EOPNOTSUPP;
+
+ strscpy(ctx->val.vstr, cqe_compress_str[value], sizeof(ctx->val.vstr));
+ return 0;
+}
+
+static int
+mlx5_nv_param_devlink_cqe_compress_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cqe_compress_str); i++) {
+ if (!strcmp(val.vstr, cqe_compress_str[i]))
+ return 0;
+ }
+
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid value, supported values are balanced/aggressive");
+ return -EOPNOTSUPP;
+}
+
+static int
+mlx5_nv_param_devlink_cqe_compress_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ int err = 0;
+ void *data;
+ u8 value;
+
+ if (!strcmp(ctx->val.vstr, "aggressive"))
+ value = 1;
+ else /* balanced: can't be anything else already validated above */
+ value = 0;
+
+ err = mlx5_nv_param_read_sw_offload_conf(dev, mnvda, sizeof(mnvda));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read sw_offload_conf mnvda reg");
+ return err;
+ }
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ MLX5_SET(nv_sw_offload_conf, data, cqe_compression, value);
+
+ return mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
+}
+
+enum swp_l4_csum_mode {
+ SWP_L4_CSUM_MODE_DEFAULT = 0,
+ SWP_L4_CSUM_MODE_FULL_CSUM = 1,
+ SWP_L4_CSUM_MODE_L4_ONLY = 2,
+};
+
+static const char *const
+ swp_l4_csum_mode_str[] = { "default", "full_csum", "l4_only" };
+
+static int
+mlx5_swp_l4_csum_mode_get(struct devlink *devlink, u32 id,
+ int access_mode, u8 *value,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ void *data;
+ int err;
+
+ err = mlx5_nv_param_read_sw_accelerate_conf(dev, mnvda, sizeof(mnvda),
+ access_mode);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read sw_accelerate_conf mnvda reg");
+ return err;
+ }
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ *value = MLX5_GET(nv_sw_accelerate_conf, data, swp_l4_csum_mode);
+
+ if (*value >= ARRAY_SIZE(swp_l4_csum_mode_str)) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Invalid swp_l4_csum_mode value %u read from device",
+ *value);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+mlx5_devlink_swp_l4_csum_mode_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ u8 value;
+ int err;
+
+ err = mlx5_swp_l4_csum_mode_get(devlink, id, MLX5_ACCESS_MODE_NEXT,
+ &value, extack);
+ if (err)
+ return err;
+
+ strscpy(ctx->val.vstr, swp_l4_csum_mode_str[value],
+ sizeof(ctx->val.vstr));
+ return 0;
+}
+
+static int
+mlx5_devlink_swp_l4_csum_mode_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 cap[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ void *data;
+ int err, i;
+
+ for (i = 0; i < ARRAY_SIZE(swp_l4_csum_mode_str); i++) {
+ if (!strcmp(val.vstr, swp_l4_csum_mode_str[i]))
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(swp_l4_csum_mode_str) ||
+ i == SWP_L4_CSUM_MODE_DEFAULT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid value, supported values are full_csum/l4_only");
+ return -EINVAL;
+ }
+
+ if (i == SWP_L4_CSUM_MODE_L4_ONLY) {
+ err = mlx5_nv_param_read_sw_offload_cap(dev, cap, sizeof(cap));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read sw_offload_cap");
+ return err;
+ }
+
+ data = MLX5_ADDR_OF(mnvda_reg, cap, configuration_item_data);
+ if (!MLX5_GET(nv_sw_offload_cap, data, swp_l4_csum_mode_l4_only)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "l4_only mode is not supported on this device");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int
+mlx5_swp_l4_csum_mode_set(struct devlink *devlink, u32 id, u8 value,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ void *data;
+ int err;
+
+ err = mlx5_nv_param_read_sw_accelerate_conf(dev, mnvda, sizeof(mnvda),
+ MLX5_ACCESS_MODE_NEXT);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read sw_accelerate_conf mnvda reg");
+ return err;
+ }
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ MLX5_SET(nv_sw_accelerate_conf, data, swp_l4_csum_mode, value);
+
+ err = mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to write sw_accelerate_conf mnvda reg");
+
+ return err;
+}
+
+static int
+mlx5_devlink_swp_l4_csum_mode_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ u8 value;
+
+ if (!strcmp(ctx->val.vstr, "full_csum"))
+ value = SWP_L4_CSUM_MODE_FULL_CSUM;
+ else
+ value = SWP_L4_CSUM_MODE_L4_ONLY;
+
+ return mlx5_swp_l4_csum_mode_set(devlink, id, value, extack);
+}
+
+static int
+mlx5_devlink_swp_l4_csum_mode_get_default(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ u8 value;
+ int err;
+
+ err = mlx5_swp_l4_csum_mode_get(devlink, id, MLX5_ACCESS_MODE_DEFAULT,
+ &value, extack);
+ if (err)
+ return err;
+
+ strscpy(ctx->val.vstr, swp_l4_csum_mode_str[value],
+ sizeof(ctx->val.vstr));
+ return 0;
+}
+
+static int
+mlx5_devlink_swp_l4_csum_mode_set_default(struct devlink *devlink, u32 id,
+ enum devlink_param_cmode cmode,
+ struct netlink_ext_ack *extack)
+{
+ u8 value;
+ int err;
+
+ err = mlx5_swp_l4_csum_mode_get(devlink, id, MLX5_ACCESS_MODE_DEFAULT,
+ &value, extack);
+ if (err)
+ return err;
+
+ return mlx5_swp_l4_csum_mode_set(devlink, id, value, extack);
+}
+
+static int mlx5_nv_param_read_global_pci_conf(struct mlx5_core_dev *dev,
+ void *mnvda, size_t len)
+{
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0);
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index,
+ MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CONF);
+ MLX5_SET_CFG_HDR_LEN(mnvda, nv_global_pci_conf);
+
+ return mlx5_nv_param_read(dev, mnvda, len);
+}
+
+static int mlx5_nv_param_read_global_pci_cap(struct mlx5_core_dev *dev,
+ void *mnvda, size_t len)
+{
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0);
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index,
+ MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CAP);
+ MLX5_SET_CFG_HDR_LEN(mnvda, nv_global_pci_cap);
+
+ return mlx5_nv_param_read(dev, mnvda, len);
+}
+
+static int mlx5_nv_param_read_per_host_pf_conf(struct mlx5_core_dev *dev,
+ void *mnvda, size_t len)
+{
+ MLX5_SET_CFG_ITEM_TYPE(per_host_pf, mnvda, type_class, 3);
+ MLX5_SET_CFG_ITEM_TYPE(per_host_pf, mnvda, parameter_index,
+ MLX5_CLASS_3_CTRL_ID_NV_PF_PCI_CONF);
+ MLX5_SET_CFG_HDR_LEN(mnvda, nv_pf_pci_conf);
+
+ return mlx5_nv_param_read(dev, mnvda, len);
+}
+
+static int mlx5_devlink_enable_sriov_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ bool sriov_en = false;
+ void *data;
+ int err;
+
+ err = mlx5_nv_param_read_global_pci_cap(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ if (!MLX5_GET(nv_global_pci_cap, data, sriov_support)) {
+ ctx->val.vbool = false;
+ return 0;
+ }
+
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_global_pci_conf(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ sriov_en = MLX5_GET(nv_global_pci_conf, data, sriov_en);
+ if (!MLX5_GET(nv_global_pci_conf, data, per_pf_total_vf)) {
+ ctx->val.vbool = sriov_en;
+ return 0;
+ }
+
+ /* SRIOV is per PF */
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_per_host_pf_conf(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ ctx->val.vbool = sriov_en &&
+ MLX5_GET(nv_pf_pci_conf, data, pf_total_vf_en);
+ return 0;
+}
+
+static int mlx5_devlink_enable_sriov_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ bool per_pf_support;
+ void *cap, *data;
+ int err;
+
+ err = mlx5_nv_param_read_global_pci_cap(dev, mnvda, sizeof(mnvda));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read global PCI capability");
+ return err;
+ }
+
+ cap = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ per_pf_support = MLX5_GET(nv_global_pci_cap, cap,
+ per_pf_total_vf_supported);
+
+ if (!MLX5_GET(nv_global_pci_cap, cap, sriov_support)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "SRIOV is not supported on this device");
+ return -EOPNOTSUPP;
+ }
+
+ if (!per_pf_support) {
+ /* We don't allow global SRIOV setting on per PF devlink */
+ NL_SET_ERR_MSG_MOD(extack,
+ "SRIOV is not per PF on this device");
+ return -EOPNOTSUPP;
+ }
+
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_global_pci_conf(dev, mnvda, sizeof(mnvda));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to read global PCI configuration");
+ return err;
+ }
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+
+ /* setup per PF sriov mode */
+ MLX5_SET(nv_global_pci_conf, data, sriov_valid, 1);
+ MLX5_SET(nv_global_pci_conf, data, sriov_en, 1);
+ MLX5_SET(nv_global_pci_conf, data, per_pf_total_vf, 1);
+
+ err = mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to write global PCI configuration");
+ return err;
+ }
+
+ /* enable/disable sriov on this PF */
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_per_host_pf_conf(dev, mnvda, sizeof(mnvda));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to read per host PF configuration");
+ return err;
+ }
+ MLX5_SET(nv_pf_pci_conf, data, pf_total_vf_en, ctx->val.vbool);
+ return mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
+}
+
+static int mlx5_devlink_total_vfs_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ void *data;
+ int err;
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+
+ err = mlx5_nv_param_read_global_pci_cap(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ if (!MLX5_GET(nv_global_pci_cap, data, sriov_support)) {
+ ctx->val.vu32 = 0;
+ return 0;
+ }
+
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_global_pci_conf(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ if (!MLX5_GET(nv_global_pci_conf, data, per_pf_total_vf)) {
+ ctx->val.vu32 = MLX5_GET(nv_global_pci_conf, data, total_vfs);
+ return 0;
+ }
+
+ /* SRIOV is per PF */
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_per_host_pf_conf(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ ctx->val.vu32 = MLX5_GET(nv_pf_pci_conf, data, total_vf);
+
+ return 0;
+}
+
+static int mlx5_devlink_total_vfs_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)];
+ void *data;
+ int err;
+
+ err = mlx5_nv_param_read_global_pci_cap(dev, mnvda, sizeof(mnvda));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to read global pci cap");
+ return err;
+ }
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ if (!MLX5_GET(nv_global_pci_cap, data, sriov_support)) {
+ NL_SET_ERR_MSG_MOD(extack, "Not configurable on this device");
+ return -EOPNOTSUPP;
+ }
+
+ if (!MLX5_GET(nv_global_pci_cap, data, per_pf_total_vf_supported)) {
+ /* We don't allow global SRIOV setting on per PF devlink */
+ NL_SET_ERR_MSG_MOD(extack,
+ "SRIOV is not per PF on this device");
+ return -EOPNOTSUPP;
+ }
+
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_global_pci_conf(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ MLX5_SET(nv_global_pci_conf, data, sriov_valid, 1);
+ MLX5_SET(nv_global_pci_conf, data, per_pf_total_vf, 1);
+
+ err = mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_per_host_pf_conf(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ MLX5_SET(nv_pf_pci_conf, data, total_vf, ctx->val.vu32);
+ return mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
+}
+
+static int mlx5_devlink_total_vfs_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 cap[MLX5_ST_SZ_DW(mnvda_reg)];
+ void *data;
+ u16 max;
+ int err;
+
+ data = MLX5_ADDR_OF(mnvda_reg, cap, configuration_item_data);
+
+ err = mlx5_nv_param_read_global_pci_cap(dev, cap, sizeof(cap));
+ if (err)
+ return err;
+
+ if (!MLX5_GET(nv_global_pci_cap, data, max_vfs_per_pf_valid))
+ return 0; /* optimistic, but set might fail later */
+
+ max = MLX5_GET(nv_global_pci_cap, data, max_vfs_per_pf);
+ if (val.vu16 > max) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Max allowed by device is %u", max);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param mlx5_nv_param_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(ENABLE_SRIOV, BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ mlx5_devlink_enable_sriov_get,
+ mlx5_devlink_enable_sriov_set, NULL),
+ DEVLINK_PARAM_GENERIC(TOTAL_VFS, BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ mlx5_devlink_total_vfs_get,
+ mlx5_devlink_total_vfs_set,
+ mlx5_devlink_total_vfs_validate),
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_CQE_COMPRESSION_TYPE,
+ "cqe_compress_type", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ mlx5_nv_param_devlink_cqe_compress_get,
+ mlx5_nv_param_devlink_cqe_compress_set,
+ mlx5_nv_param_devlink_cqe_compress_validate),
+ DEVLINK_PARAM_DRIVER_WITH_DEFAULTS(MLX5_DEVLINK_PARAM_ID_SWP_L4_CSUM_MODE,
+ "swp_l4_csum_mode", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ mlx5_devlink_swp_l4_csum_mode_get,
+ mlx5_devlink_swp_l4_csum_mode_set,
+ mlx5_devlink_swp_l4_csum_mode_validate,
+ mlx5_devlink_swp_l4_csum_mode_get_default,
+ mlx5_devlink_swp_l4_csum_mode_set_default),
+};
+
+int mlx5_nv_param_register_dl_params(struct devlink *devlink)
+{
+ if (!mlx5_core_is_pf(devlink_priv(devlink)))
+ return 0;
+
+ return devl_params_register(devlink, mlx5_nv_param_devlink_params,
+ ARRAY_SIZE(mlx5_nv_param_devlink_params));
+}
+
+void mlx5_nv_param_unregister_dl_params(struct devlink *devlink)
+{
+ if (!mlx5_core_is_pf(devlink_priv(devlink)))
+ return;
+
+ devl_params_unregister(devlink, mlx5_nv_param_devlink_params,
+ ARRAY_SIZE(mlx5_nv_param_devlink_params));
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.h
new file mode 100644
index 000000000000..9f4922ff7745
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_NV_PARAM_H
+#define __MLX5_NV_PARAM_H
+
+#include <linux/mlx5/driver.h>
+#include "devlink.h"
+
+int mlx5_nv_param_register_dl_params(struct devlink *devlink);
+void mlx5_nv_param_unregister_dl_params(struct devlink *devlink);
+
+#endif
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
index eeb0b7ea05f1..8e17daae48af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
@@ -210,15 +210,19 @@ static void sd_cleanup(struct mlx5_core_dev *dev)
static int sd_register(struct mlx5_core_dev *dev)
{
struct mlx5_devcom_comp_dev *devcom, *pos;
+ struct mlx5_devcom_match_attr attr = {};
struct mlx5_core_dev *peer, *primary;
struct mlx5_sd *sd, *primary_sd;
int err, i;
sd = mlx5_get_sd(dev);
+ attr.key.val = sd->group_id;
+ attr.flags = MLX5_DEVCOM_MATCH_FLAGS_NS;
+ attr.net = mlx5_core_net(dev);
devcom = mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_SD_GROUP,
- sd->group_id, NULL, dev);
- if (IS_ERR(devcom))
- return PTR_ERR(devcom);
+ &attr, NULL, dev);
+ if (!devcom)
+ return -EINVAL;
sd->devcom = devcom;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h
index 452d0df339ac..404f3d4b6380 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h
@@ -4,8 +4,8 @@
#ifndef __MLX5_LIB_SMFS_H__
#define __MLX5_LIB_SMFS_H__
-#include "steering/mlx5dr.h"
-#include "steering/dr_types.h"
+#include "steering/sws/mlx5dr.h"
+#include "steering/sws/dr_types.h"
struct mlx5dr_matcher *
mlx5_smfs_matcher_create(struct mlx5dr_table *table, u32 priority, struct mlx5_flow_spec *spec);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/st.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/st.c
new file mode 100644
index 000000000000..ef06fe6cbb51
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/st.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/device.h>
+
+#include "mlx5_core.h"
+#include "lib/mlx5.h"
+
+struct mlx5_st_idx_data {
+ refcount_t usecount;
+ u16 tag;
+};
+
+struct mlx5_st {
+ /* serialize access upon alloc/free flows */
+ struct mutex lock;
+ struct xa_limit index_limit;
+ struct xarray idx_xa; /* key == index, value == struct mlx5_st_idx_data */
+ u8 direct_mode : 1;
+};
+
+struct mlx5_st *mlx5_st_create(struct mlx5_core_dev *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+ struct mlx5_st *st;
+ u8 direct_mode = 0;
+ u16 num_entries;
+ u32 tbl_loc;
+ int ret;
+
+ if (!MLX5_CAP_GEN(dev, mkey_pcie_tph))
+ return NULL;
+
+#ifdef CONFIG_MLX5_SF
+ if (mlx5_core_is_sf(dev))
+ return dev->priv.parent_mdev->st;
+#endif
+
+ /* Checking whether the device is capable */
+ if (!pdev->tph_cap)
+ return NULL;
+
+ tbl_loc = pcie_tph_get_st_table_loc(pdev);
+ if (tbl_loc == PCI_TPH_LOC_NONE)
+ direct_mode = 1;
+
+ if (!direct_mode) {
+ num_entries = pcie_tph_get_st_table_size(pdev);
+ /* We need a reserved entry for non TPH cases */
+ if (num_entries < 2)
+ return NULL;
+ }
+
+ /* The OS doesn't support ST */
+ ret = pcie_enable_tph(pdev, PCI_TPH_ST_DS_MODE);
+ if (ret)
+ return NULL;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto end;
+
+ mutex_init(&st->lock);
+ xa_init_flags(&st->idx_xa, XA_FLAGS_ALLOC);
+ st->direct_mode = direct_mode;
+ if (st->direct_mode)
+ return st;
+
+ /* entry 0 is reserved for non TPH cases */
+ st->index_limit.min = MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX + 1;
+ st->index_limit.max = num_entries - 1;
+
+ return st;
+
+end:
+ pcie_disable_tph(dev->pdev);
+ return NULL;
+}
+
+void mlx5_st_destroy(struct mlx5_core_dev *dev)
+{
+ struct mlx5_st *st = dev->st;
+
+ if (mlx5_core_is_sf(dev) || !st)
+ return;
+
+ pcie_disable_tph(dev->pdev);
+ WARN_ON_ONCE(!xa_empty(&st->idx_xa));
+ kfree(st);
+}
+
+int mlx5_st_alloc_index(struct mlx5_core_dev *dev, enum tph_mem_type mem_type,
+ unsigned int cpu_uid, u16 *st_index)
+{
+ struct mlx5_st_idx_data *idx_data;
+ struct mlx5_st *st = dev->st;
+ unsigned long index;
+ u32 xa_id;
+ u16 tag;
+ int ret;
+
+ if (!st)
+ return -EOPNOTSUPP;
+
+ ret = pcie_tph_get_cpu_st(dev->pdev, mem_type, cpu_uid, &tag);
+ if (ret)
+ return ret;
+
+ if (st->direct_mode) {
+ *st_index = tag;
+ return 0;
+ }
+
+ mutex_lock(&st->lock);
+
+ xa_for_each(&st->idx_xa, index, idx_data) {
+ if (tag == idx_data->tag) {
+ refcount_inc(&idx_data->usecount);
+ *st_index = index;
+ goto end;
+ }
+ }
+
+ idx_data = kzalloc(sizeof(*idx_data), GFP_KERNEL);
+ if (!idx_data) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ refcount_set(&idx_data->usecount, 1);
+ idx_data->tag = tag;
+
+ ret = xa_alloc(&st->idx_xa, &xa_id, idx_data, st->index_limit, GFP_KERNEL);
+ if (ret)
+ goto clean_idx_data;
+
+ ret = pcie_tph_set_st_entry(dev->pdev, xa_id, tag);
+ if (ret)
+ goto clean_idx_xa;
+
+ *st_index = xa_id;
+ goto end;
+
+clean_idx_xa:
+ xa_erase(&st->idx_xa, xa_id);
+clean_idx_data:
+ kfree(idx_data);
+end:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mlx5_st_alloc_index);
+
+int mlx5_st_dealloc_index(struct mlx5_core_dev *dev, u16 st_index)
+{
+ struct mlx5_st_idx_data *idx_data;
+ struct mlx5_st *st = dev->st;
+ int ret = 0;
+
+ if (!st)
+ return -EOPNOTSUPP;
+
+ if (st->direct_mode)
+ return 0;
+
+ mutex_lock(&st->lock);
+ idx_data = xa_load(&st->idx_xa, st_index);
+ if (WARN_ON_ONCE(!idx_data)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ if (refcount_dec_and_test(&idx_data->usecount)) {
+ xa_erase(&st->idx_xa, st_index);
+ /* We leave PCI config space as was before, no mkey will refer to it */
+ }
+
+end:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mlx5_st_dealloc_index);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
index d55e15c1f380..304912637c35 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
@@ -149,7 +149,7 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev)
struct mlx5_vxlan *vxlan;
if (!MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) || !mlx5_core_is_pf(mdev))
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
if (!vxlan)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 220a9ac75c8b..1ab569ce3fcf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -368,6 +368,10 @@ int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_ty
u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
int err;
+ if (WARN_ON(!dev->caps.hca[cap_type]))
+ /* this cap_type must be added to mlx5_hca_caps_alloc() */
+ return -EINVAL;
+
memset(in, 0, sizeof(in));
out = kzalloc(out_sz, GFP_KERNEL);
if (!out)
@@ -549,6 +553,7 @@ EXPORT_SYMBOL(mlx5_is_roce_on);
static int handle_hca_cap_2(struct mlx5_core_dev *dev, void *set_ctx)
{
+ bool do_set = false;
void *set_hca_cap;
int err;
@@ -559,17 +564,27 @@ static int handle_hca_cap_2(struct mlx5_core_dev *dev, void *set_ctx)
if (err)
return err;
- if (!MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) ||
- !(dev->priv.sw_vhca_id > 0))
- return 0;
-
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
capability);
memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_GENERAL_2]->cur,
MLX5_ST_SZ_BYTES(cmd_hca_cap_2));
- MLX5_SET(cmd_hca_cap_2, set_hca_cap, sw_vhca_id_valid, 1);
- return set_caps(dev, set_ctx, MLX5_CAP_GENERAL_2);
+ if (MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) &&
+ dev->priv.sw_vhca_id > 0) {
+ MLX5_SET(cmd_hca_cap_2, set_hca_cap, sw_vhca_id_valid, 1);
+ do_set = true;
+ }
+
+ if (MLX5_CAP_GEN_2_MAX(dev, lag_per_mp_group)) {
+ MLX5_SET(cmd_hca_cap_2, set_hca_cap, lag_per_mp_group, 1);
+ do_set = true;
+ }
+
+ /* some FW versions that support querying MLX5_CAP_GENERAL_2
+ * capabilities but don't support setting them.
+ * Skip unnecessary update to hca_cap_2 when no changes were introduced
+ */
+ return do_set ? set_caps(dev, set_ctx, MLX5_CAP_GENERAL_2) : 0;
}
static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
@@ -664,6 +679,10 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_current_uc_list,
ilog2(max_uc_list));
+ /* enable absolute native port num */
+ if (MLX5_CAP_GEN_MAX(dev, abs_native_port_num))
+ MLX5_SET(cmd_hca_cap, set_hca_cap, abs_native_port_num, 1);
+
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
}
@@ -941,9 +960,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
mlx5_pci_vsc_init(dev);
- err = pci_enable_ptm(pdev, NULL);
- if (err)
- mlx5_core_info(dev, "PTM is not supported by PCIe\n");
+ pci_enable_ptm(pdev, NULL);
return 0;
@@ -967,36 +984,13 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev)
mlx5_pci_disable_device(dev);
}
-static void mlx5_register_hca_devcom_comp(struct mlx5_core_dev *dev)
-{
- /* This component is use to sync adding core_dev to lag_dev and to sync
- * changes of mlx5_adev_devices between LAG layer and other layers.
- */
- if (!mlx5_lag_is_supported(dev))
- return;
-
- dev->priv.hca_devcom_comp =
- mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_HCA_PORTS,
- mlx5_query_nic_system_image_guid(dev),
- NULL, dev);
- if (IS_ERR(dev->priv.hca_devcom_comp))
- mlx5_core_err(dev, "Failed to register devcom HCA component\n");
-}
-
-static void mlx5_unregister_hca_devcom_comp(struct mlx5_core_dev *dev)
-{
- mlx5_devcom_unregister_component(dev->priv.hca_devcom_comp);
-}
-
static int mlx5_init_once(struct mlx5_core_dev *dev)
{
int err;
dev->priv.devc = mlx5_devcom_register_device(dev);
- if (IS_ERR(dev->priv.devc))
- mlx5_core_warn(dev, "failed to register devcom device %ld\n",
- PTR_ERR(dev->priv.devc));
- mlx5_register_hca_devcom_comp(dev);
+ if (!dev->priv.devc)
+ mlx5_core_warn(dev, "failed to register devcom device\n");
err = mlx5_query_board_id(dev);
if (err) {
@@ -1016,23 +1010,21 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
goto err_irq_cleanup;
}
- err = mlx5_events_init(dev);
- if (err) {
- mlx5_core_err(dev, "failed to initialize events\n");
- goto err_eq_cleanup;
- }
-
err = mlx5_fw_reset_init(dev);
if (err) {
mlx5_core_err(dev, "failed to initialize fw reset events\n");
- goto err_events_cleanup;
+ goto err_eq_cleanup;
}
mlx5_cq_debugfs_init(dev);
mlx5_init_reserved_gids(dev);
- mlx5_init_clock(dev);
+ err = mlx5_init_clock(dev);
+ if (err) {
+ mlx5_core_err(dev, "failed to initialize hardware clock\n");
+ goto err_tables_cleanup;
+ }
dev->vxlan = mlx5_vxlan_create(dev);
dev->geneve = mlx5_geneve_create(dev);
@@ -1040,7 +1032,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
err = mlx5_init_rl_table(dev);
if (err) {
mlx5_core_err(dev, "Failed to init rate limiting\n");
- goto err_tables_cleanup;
+ goto err_clock_cleanup;
}
err = mlx5_mpfs_init(dev);
@@ -1092,9 +1084,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
}
dev->dm = mlx5_dm_create(dev);
- if (IS_ERR(dev->dm))
- mlx5_core_warn(dev, "Failed to init device memory %ld\n", PTR_ERR(dev->dm));
-
+ dev->st = mlx5_st_create(dev);
dev->tracer = mlx5_fw_tracer_create(dev);
dev->hv_vhca = mlx5_hv_vhca_create(dev);
dev->rsc_dump = mlx5_rsc_dump_create(dev);
@@ -1117,21 +1107,19 @@ err_mpfs_cleanup:
mlx5_mpfs_cleanup(dev);
err_rl_cleanup:
mlx5_cleanup_rl_table(dev);
-err_tables_cleanup:
+err_clock_cleanup:
mlx5_geneve_destroy(dev->geneve);
mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_clock(dev);
+err_tables_cleanup:
mlx5_cleanup_reserved_gids(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_fw_reset_cleanup(dev);
-err_events_cleanup:
- mlx5_events_cleanup(dev);
err_eq_cleanup:
mlx5_eq_table_cleanup(dev);
err_irq_cleanup:
mlx5_irq_table_cleanup(dev);
err_devcom:
- mlx5_unregister_hca_devcom_comp(dev);
mlx5_devcom_unregister_device(dev->priv.devc);
return err;
@@ -1142,6 +1130,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_rsc_dump_destroy(dev);
mlx5_hv_vhca_destroy(dev->hv_vhca);
mlx5_fw_tracer_destroy(dev->tracer);
+ mlx5_st_destroy(dev);
mlx5_dm_cleanup(dev);
mlx5_fs_core_free(dev);
mlx5_sf_table_cleanup(dev);
@@ -1158,10 +1147,8 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_cleanup_reserved_gids(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_fw_reset_cleanup(dev);
- mlx5_events_cleanup(dev);
mlx5_eq_table_cleanup(dev);
mlx5_irq_table_cleanup(dev);
- mlx5_unregister_hca_devcom_comp(dev);
mlx5_devcom_unregister_device(dev->priv.devc);
}
@@ -1199,24 +1186,24 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
- mlx5_start_health_poll(dev);
-
err = mlx5_core_enable_hca(dev, 0);
if (err) {
mlx5_core_err(dev, "enable hca failed\n");
- goto stop_health_poll;
+ goto err_cmd_cleanup;
}
+ mlx5_start_health_poll(dev);
+
err = mlx5_core_set_issi(dev);
if (err) {
mlx5_core_err(dev, "failed to set issi\n");
- goto err_disable_hca;
+ goto stop_health_poll;
}
err = mlx5_satisfy_startup_pages(dev, 1);
if (err) {
mlx5_core_err(dev, "failed to allocate boot pages\n");
- goto err_disable_hca;
+ goto stop_health_poll;
}
err = mlx5_tout_query_dtor(dev);
@@ -1229,10 +1216,9 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
reclaim_boot_pages:
mlx5_reclaim_startup_pages(dev);
-err_disable_hca:
- mlx5_core_disable_hca(dev, 0);
stop_health_poll:
mlx5_stop_health_poll(dev, boot);
+ mlx5_core_disable_hca(dev, 0);
err_cmd_cleanup:
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_disable(dev);
@@ -1243,8 +1229,8 @@ err_cmd_cleanup:
static void mlx5_function_disable(struct mlx5_core_dev *dev, bool boot)
{
mlx5_reclaim_startup_pages(dev);
- mlx5_core_disable_hca(dev, 0);
mlx5_stop_health_poll(dev, boot);
+ mlx5_core_disable_hca(dev, 0);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_disable(dev);
}
@@ -1331,10 +1317,9 @@ static int mlx5_load(struct mlx5_core_dev *dev)
{
int err;
- dev->priv.uar = mlx5_get_uars_page(dev);
- if (IS_ERR(dev->priv.uar)) {
- mlx5_core_err(dev, "Failed allocating uar, aborting\n");
- err = PTR_ERR(dev->priv.uar);
+ err = mlx5_alloc_bfreg(dev, &dev->priv.bfreg, false, false);
+ if (err) {
+ mlx5_core_err(dev, "Failed allocating bfreg, %d\n", err);
return err;
}
@@ -1353,6 +1338,8 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_eq_table;
}
+ mlx5_clock_load(dev);
+
err = mlx5_fw_tracer_init(dev->tracer);
if (err) {
mlx5_core_err(dev, "Failed to init FW tracer %d\n", err);
@@ -1390,12 +1377,6 @@ static int mlx5_load(struct mlx5_core_dev *dev)
mlx5_vhca_event_start(dev);
- err = mlx5_sf_hw_table_create(dev);
- if (err) {
- mlx5_core_err(dev, "sf table create failed %d\n", err);
- goto err_vhca;
- }
-
err = mlx5_ec_init(dev);
if (err) {
mlx5_core_err(dev, "Failed to init embedded CPU\n");
@@ -1424,8 +1405,6 @@ err_sriov:
mlx5_lag_remove_mdev(dev);
mlx5_ec_cleanup(dev);
err_ec:
- mlx5_sf_hw_table_destroy(dev);
-err_vhca:
mlx5_vhca_event_stop(dev);
err_set_hca:
mlx5_fs_core_cleanup(dev);
@@ -1436,13 +1415,14 @@ err_fpga_start:
mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_reset_events_stop(dev);
mlx5_fw_tracer_cleanup(dev->tracer);
+ mlx5_clock_unload(dev);
mlx5_eq_table_destroy(dev);
err_eq_table:
mlx5_irq_table_destroy(dev);
err_irq_table:
mlx5_pagealloc_stop(dev);
mlx5_events_stop(dev);
- mlx5_put_uars_page(dev, dev->priv.uar);
+ mlx5_free_bfreg(dev, &dev->priv.bfreg);
return err;
}
@@ -1450,23 +1430,24 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
{
mlx5_eswitch_disable(dev->priv.eswitch);
mlx5_devlink_traps_unregister(priv_to_devlink(dev));
+ mlx5_vhca_event_stop(dev);
mlx5_sf_dev_table_destroy(dev);
mlx5_sriov_detach(dev);
mlx5_lag_remove_mdev(dev);
mlx5_ec_cleanup(dev);
mlx5_sf_hw_table_destroy(dev);
- mlx5_vhca_event_stop(dev);
mlx5_fs_core_cleanup(dev);
mlx5_fpga_device_stop(dev);
mlx5_rsc_dump_cleanup(dev);
mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_reset_events_stop(dev);
mlx5_fw_tracer_cleanup(dev->tracer);
+ mlx5_clock_unload(dev);
mlx5_eq_table_destroy(dev);
mlx5_irq_table_destroy(dev);
mlx5_pagealloc_stop(dev);
mlx5_events_stop(dev);
- mlx5_put_uars_page(dev, dev->priv.uar);
+ mlx5_free_bfreg(dev, &dev->priv.bfreg);
}
int mlx5_init_one_devl_locked(struct mlx5_core_dev *dev)
@@ -1785,9 +1766,12 @@ static const int types[] = {
MLX5_CAP_VDPA_EMULATION,
MLX5_CAP_IPSEC,
MLX5_CAP_PORT_SELECTION,
+ MLX5_CAP_PSP,
MLX5_CAP_MACSEC,
MLX5_CAP_ADV_VIRTUALIZATION,
MLX5_CAP_CRYPTO,
+ MLX5_CAP_SHAMPO,
+ MLX5_CAP_ADV_RDMA,
};
static void mlx5_hca_caps_free(struct mlx5_core_dev *dev)
@@ -1832,6 +1816,50 @@ static int vhca_id_show(struct seq_file *file, void *priv)
DEFINE_SHOW_ATTRIBUTE(vhca_id);
+static int mlx5_notifiers_init(struct mlx5_core_dev *dev)
+{
+ int err;
+
+ err = mlx5_events_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "failed to initialize events\n");
+ return err;
+ }
+
+ BLOCKING_INIT_NOTIFIER_HEAD(&dev->priv.esw_n_head);
+ mlx5_vhca_state_notifier_init(dev);
+
+ err = mlx5_sf_hw_notifier_init(dev);
+ if (err)
+ goto err_sf_hw_notifier;
+
+ err = mlx5_sf_notifiers_init(dev);
+ if (err)
+ goto err_sf_notifiers;
+
+ err = mlx5_sf_dev_notifier_init(dev);
+ if (err)
+ goto err_sf_dev_notifier;
+
+ return 0;
+
+err_sf_dev_notifier:
+ mlx5_sf_notifiers_cleanup(dev);
+err_sf_notifiers:
+ mlx5_sf_hw_notifier_cleanup(dev);
+err_sf_hw_notifier:
+ mlx5_events_cleanup(dev);
+ return err;
+}
+
+static void mlx5_notifiers_cleanup(struct mlx5_core_dev *dev)
+{
+ mlx5_sf_dev_notifier_cleanup(dev);
+ mlx5_sf_notifiers_cleanup(dev);
+ mlx5_sf_hw_notifier_cleanup(dev);
+ mlx5_events_cleanup(dev);
+}
+
int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
{
struct mlx5_priv *priv = &dev->priv;
@@ -1887,6 +1915,10 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
if (err)
goto err_hca_caps;
+ err = mlx5_notifiers_init(dev);
+ if (err)
+ goto err_hca_caps;
+
/* The conjunction of sw_vhca_id with sw_owner_id will be a global
* unique id per function which uses mlx5_core.
* Those values are supplied to FW as part of the init HCA command to
@@ -1929,6 +1961,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
if (priv->sw_vhca_id > 0)
ida_free(&sw_vhca_ida, dev->priv.sw_vhca_id);
+ mlx5_notifiers_cleanup(dev);
mlx5_hca_caps_free(dev);
mlx5_adev_cleanup(dev);
mlx5_pagealloc_cleanup(dev);
@@ -2104,7 +2137,6 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
err = wait_vital(pdev);
if (err) {
@@ -2241,6 +2273,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
{ PCI_VDEVICE(MELLANOX, 0x1023) }, /* ConnectX-8 */
{ PCI_VDEVICE(MELLANOX, 0x1025) }, /* ConnectX-9 */
+ { PCI_VDEVICE(MELLANOX, 0x1027) }, /* ConnectX-10 */
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 99de67c3aa74..cfebc110c02f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -114,6 +114,26 @@ struct mlx5_cmd_alias_obj_create_attr {
u8 access_key[ACCESS_KEY_LEN];
};
+struct mlx5_port_eth_proto {
+ u32 cap;
+ u32 admin;
+ u32 oper;
+};
+
+struct mlx5_module_eeprom_query_params {
+ u16 size;
+ u16 offset;
+ u16 i2c_address;
+ u32 page;
+ u32 bank;
+ u32 module_number;
+};
+
+struct mlx5_link_info {
+ u32 speed;
+ u32 lanes;
+};
+
static inline void mlx5_printk(struct mlx5_core_dev *dev, int level, const char *format, ...)
{
struct device *device = dev->device;
@@ -280,6 +300,89 @@ int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev);
void mlx5_dm_cleanup(struct mlx5_core_dev *dev);
+#ifdef CONFIG_PCIE_TPH
+struct mlx5_st *mlx5_st_create(struct mlx5_core_dev *dev);
+void mlx5_st_destroy(struct mlx5_core_dev *dev);
+#else
+static inline struct mlx5_st *
+mlx5_st_create(struct mlx5_core_dev *dev) { return NULL; }
+static inline void mlx5_st_destroy(struct mlx5_core_dev *dev) { return; }
+#endif
+
+void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
+int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status);
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status *status);
+int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
+
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
+int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+ u32 *rx_pause, u32 *tx_pause);
+
+int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
+int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
+ u8 *pfc_en_rx);
+
+int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
+ u16 stall_critical_watermark,
+ u16 stall_minor_watermark);
+int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev,
+ u16 *stall_critical_watermark,
+ u16 *stall_minor_watermark);
+
+int mlx5_max_tc(struct mlx5_core_dev *mdev);
+int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
+int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
+ u8 prio, u8 *tc);
+int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
+int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
+ u8 tc, u8 *tc_group);
+int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
+int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
+ u8 tc, u8 *bw_pct);
+int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+ u8 *max_bw_value,
+ u8 *max_bw_unit);
+int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+ u8 *max_bw_value,
+ u8 *max_bw_unit);
+int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
+int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
+
+int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen);
+int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen);
+int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
+void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
+ bool *enabled);
+int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
+ u16 offset, u16 size, u8 *data, u8 *status);
+int
+mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
+ struct mlx5_module_eeprom_query_params *params,
+ u8 *data, u8 *status);
+
+int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
+int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
+int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state);
+int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state);
+int mlx5_query_port_buffer_ownership(struct mlx5_core_dev *mdev,
+ u8 *buffer_ownership);
+int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio);
+int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio);
+
+int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
+ struct mlx5_port_eth_proto *eproto);
+bool mlx5_ptys_ext_supported(struct mlx5_core_dev *mdev);
+const struct mlx5_link_info *mlx5_port_ptys2info(struct mlx5_core_dev *mdev,
+ u32 eth_proto_oper,
+ bool force_legacy);
+u32 mlx5_port_info2linkmodes(struct mlx5_core_dev *mdev,
+ struct mlx5_link_info *info,
+ bool force_legacy);
+int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
+
#define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \
MLX5_CAP_GEN((mdev), pps_modify) && \
MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \
@@ -341,6 +444,8 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev);
void mlx5_uninit_one_light(struct mlx5_core_dev *dev);
void mlx5_unload_one_light(struct mlx5_core_dev *dev);
+void mlx5_query_nic_sw_system_image_guid(struct mlx5_core_dev *mdev, u8 *buf,
+ u8 *len);
int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap, u16 vport,
u16 opmod);
#define mlx5_vport_get_other_func_general_cap(dev, vport, out) \
@@ -401,4 +506,17 @@ static inline int mlx5_max_eq_cap_get(const struct mlx5_core_dev *dev)
return 1 << MLX5_CAP_GEN(dev, log_max_eq);
}
+
+static inline bool mlx5_pcie_cong_event_supported(struct mlx5_core_dev *dev)
+{
+ u64 features = MLX5_CAP_GEN_2_64(dev, general_obj_types_127_64);
+
+ if (!(features & MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT))
+ return false;
+
+ if (dev->sd)
+ return false;
+
+ return true;
+}
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
index 0881e961d8b1..586688da9940 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
@@ -10,12 +10,15 @@
struct mlx5_irq;
struct cpu_rmap;
+struct mlx5_irq_pool;
int mlx5_irq_table_init(struct mlx5_core_dev *dev);
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
int mlx5_irq_table_create(struct mlx5_core_dev *dev);
void mlx5_irq_table_destroy(struct mlx5_core_dev *dev);
void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev);
+struct mlx5_irq_pool *
+mlx5_irq_table_get_comp_irq_pool(struct mlx5_core_dev *dev);
int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table);
int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table);
struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev);
@@ -38,7 +41,6 @@ struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq);
int mlx5_irq_get_index(struct mlx5_irq *irq);
int mlx5_irq_get_irq(const struct mlx5_irq *irq);
-struct mlx5_irq_pool;
#ifdef CONFIG_MLX5_SF
struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
struct cpumask *used_cpus, u16 vecidx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 972e8e9df585..cd68c4b2c0bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -291,7 +291,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
{
struct device *device = mlx5_core_dma_dev(dev);
- int nid = dev_to_node(device);
+ int nid = dev->priv.numa_node;
struct page *page;
u64 zero_addr = 1;
u64 addr;
@@ -489,9 +489,12 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
u32 func_id;
u32 npages;
u32 i = 0;
+ int err;
- if (!mlx5_cmd_is_down(dev))
- return mlx5_cmd_do(dev, in, in_size, out, out_size);
+ err = mlx5_cmd_do(dev, in, in_size, out, out_size);
+ /* If FW is gone (-ENXIO), proceed to forceful reclaim */
+ if (err != -ENXIO)
+ return err;
/* No hard feelings, we want our pages back! */
npages = MLX5_GET(manage_pages_in, in, input_num_entries);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 81a9232a03e1..aa3b5878e3da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -54,7 +54,7 @@ static int mlx5_core_func_to_vport(const struct mlx5_core_dev *dev,
/**
* mlx5_get_default_msix_vec_count - Get the default number of MSI-X vectors
- * to be ssigned to each VF.
+ * to be assigned to each VF.
* @dev: PF to work on
* @num_vfs: Number of enabled VFs
*/
@@ -148,7 +148,7 @@ out:
* Free the IRQ and other resources such as rmap from the system.
* BUT doesn't free or remove reference from mlx5.
* This function is very important for the shutdown flow, where we need to
- * cleanup system resoruces but keep mlx5 objects alive,
+ * cleanup system resources but keep mlx5 objects alive,
* see mlx5_irq_table_free_irqs().
*/
static void mlx5_system_free_irq(struct mlx5_irq *irq)
@@ -324,10 +324,8 @@ err_xa:
free_irq(irq->map.virq, &irq->nh);
err_req_irq:
#ifdef CONFIG_RFS_ACCEL
- if (i && rmap && *rmap) {
- free_irq_cpu_rmap(*rmap);
- *rmap = NULL;
- }
+ if (i && rmap && *rmap)
+ irq_cpu_rmap_remove(*rmap, irq->map.virq);
err_irq_rmap:
#endif
if (i && pci_msix_can_alloc_dyn(dev->pdev))
@@ -378,6 +376,11 @@ int mlx5_irq_get_index(struct mlx5_irq *irq)
return irq->map.index;
}
+struct mlx5_irq_pool *mlx5_irq_get_pool(struct mlx5_irq *irq)
+{
+ return irq->pool;
+}
+
/* irq_pool API */
/* requesting an irq from a given pool according to given index */
@@ -405,18 +408,20 @@ static struct mlx5_irq_pool *sf_ctrl_irq_pool_get(struct mlx5_irq_table *irq_tab
return irq_table->sf_ctrl_pool;
}
-static struct mlx5_irq_pool *sf_irq_pool_get(struct mlx5_irq_table *irq_table)
+static struct mlx5_irq_pool *
+sf_comp_irq_pool_get(struct mlx5_irq_table *irq_table)
{
return irq_table->sf_comp_pool;
}
-struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev)
+struct mlx5_irq_pool *
+mlx5_irq_table_get_comp_irq_pool(struct mlx5_core_dev *dev)
{
struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
struct mlx5_irq_pool *pool = NULL;
if (mlx5_core_is_sf(dev))
- pool = sf_irq_pool_get(irq_table);
+ pool = sf_comp_irq_pool_get(irq_table);
/* In some configs, there won't be a pool of SFs IRQs. Hence, returning
* the PF IRQs pool in case the SF pool doesn't exist.
@@ -463,26 +468,32 @@ void mlx5_ctrl_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *ctrl_irq)
struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev)
{
struct mlx5_irq_pool *pool = ctrl_irq_pool_get(dev);
- struct irq_affinity_desc af_desc;
+ struct irq_affinity_desc *af_desc;
struct mlx5_irq *irq;
- cpumask_copy(&af_desc.mask, cpu_online_mask);
- af_desc.is_managed = false;
+ af_desc = kvzalloc(sizeof(*af_desc), GFP_KERNEL);
+ if (!af_desc)
+ return ERR_PTR(-ENOMEM);
+
+ cpumask_copy(&af_desc->mask, cpu_online_mask);
+ af_desc->is_managed = false;
if (!mlx5_irq_pool_is_sf_pool(pool)) {
/* In case we are allocating a control IRQ from a pci device's pool.
* This can happen also for a SF if the SFs pool is empty.
*/
if (!pool->xa_num_irqs.max) {
- cpumask_clear(&af_desc.mask);
+ cpumask_clear(&af_desc->mask);
/* In case we only have a single IRQ for PF/VF */
- cpumask_set_cpu(cpumask_first(cpu_online_mask), &af_desc.mask);
+ cpumask_set_cpu(cpumask_first(cpu_online_mask), &af_desc->mask);
}
/* Allocate the IRQ in index 0. The vector was already allocated */
- irq = irq_pool_request_vector(pool, 0, &af_desc, NULL);
+ irq = irq_pool_request_vector(pool, 0, af_desc, NULL);
} else {
- irq = mlx5_irq_affinity_request(dev, pool, &af_desc);
+ irq = mlx5_irq_affinity_request(dev, pool, af_desc);
}
+ kvfree(af_desc);
+
return irq;
}
@@ -541,16 +552,26 @@ struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu,
{
struct mlx5_irq_table *table = mlx5_irq_table_get(dev);
struct mlx5_irq_pool *pool = table->pcif_pool;
- struct irq_affinity_desc af_desc;
int offset = MLX5_IRQ_VEC_COMP_BASE;
+ struct irq_affinity_desc *af_desc;
+ struct mlx5_irq *irq;
+
+ af_desc = kvzalloc(sizeof(*af_desc), GFP_KERNEL);
+ if (!af_desc)
+ return ERR_PTR(-ENOMEM);
if (!pool->xa_num_irqs.max)
offset = 0;
- af_desc.is_managed = false;
- cpumask_clear(&af_desc.mask);
- cpumask_set_cpu(cpu, &af_desc.mask);
- return mlx5_irq_request(dev, vecidx + offset, &af_desc, rmap);
+ af_desc->is_managed = false;
+ cpumask_clear(&af_desc->mask);
+ cpumask_set_cpu(cpu, &af_desc->mask);
+
+ irq = mlx5_irq_request(dev, vecidx + offset, af_desc, rmap);
+
+ kvfree(af_desc);
+
+ return irq;
}
static struct mlx5_irq_pool *
@@ -572,7 +593,7 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
- name, size, start);
+ name ? name : "mlx5_pcif_pool", size, start);
return pool;
}
@@ -581,7 +602,7 @@ static void irq_pool_free(struct mlx5_irq_pool *pool)
struct mlx5_irq *irq;
unsigned long index;
- /* There are cases in which we are destrying the irq_table before
+ /* There are cases in which we are destroying the irq_table before
* freeing all the IRQs, fast teardown for example. Hence, free the irqs
* which might not have been freed.
*/
@@ -593,9 +614,11 @@ static void irq_pool_free(struct mlx5_irq_pool *pool)
kvfree(pool);
}
-static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec)
+static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec,
+ bool dynamic_vec)
{
struct mlx5_irq_table *table = dev->priv.irq_table;
+ int sf_vec_available = sf_vec;
int num_sf_ctrl;
int err;
@@ -608,7 +631,7 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec)
if (!mlx5_sf_max_functions(dev))
return 0;
if (sf_vec < MLX5_IRQ_VEC_COMP_BASE_SF) {
- mlx5_core_dbg(dev, "Not enught IRQs for SFs. SF may run at lower performance\n");
+ mlx5_core_dbg(dev, "Not enough IRQs for SFs. SF may run at lower performance\n");
return 0;
}
@@ -616,6 +639,13 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec)
num_sf_ctrl = DIV_ROUND_UP(mlx5_sf_max_functions(dev),
MLX5_SFS_PER_CTRL_IRQ);
num_sf_ctrl = min_t(int, MLX5_IRQ_CTRL_SF_MAX, num_sf_ctrl);
+ if (!dynamic_vec && (num_sf_ctrl + 1) > sf_vec_available) {
+ mlx5_core_dbg(dev,
+ "Not enough IRQs for SFs control and completion pool, required=%d avail=%d\n",
+ num_sf_ctrl + 1, sf_vec_available);
+ return 0;
+ }
+
table->sf_ctrl_pool = irq_pool_alloc(dev, pcif_vec, num_sf_ctrl,
"mlx5_sf_ctrl",
MLX5_EQ_SHARE_IRQ_MIN_CTRL,
@@ -624,9 +654,11 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec)
err = PTR_ERR(table->sf_ctrl_pool);
goto err_pf;
}
- /* init sf_comp_pool */
+ sf_vec_available -= num_sf_ctrl;
+
+ /* init sf_comp_pool, remaining vectors are for the SF completions */
table->sf_comp_pool = irq_pool_alloc(dev, pcif_vec + num_sf_ctrl,
- sf_vec - num_sf_ctrl, "mlx5_sf_comp",
+ sf_vec_available, "mlx5_sf_comp",
MLX5_EQ_SHARE_IRQ_MIN_COMP,
MLX5_EQ_SHARE_IRQ_MAX_COMP);
if (IS_ERR(table->sf_comp_pool)) {
@@ -715,6 +747,7 @@ int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table)
int mlx5_irq_table_create(struct mlx5_core_dev *dev)
{
int num_eqs = mlx5_max_eq_cap_get(dev);
+ bool dynamic_vec;
int total_vec;
int pcif_vec;
int req_vec;
@@ -724,21 +757,31 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev)
if (mlx5_core_is_sf(dev))
return 0;
+ /* PCI PF vectors usage is limited by online cpus, device EQs and
+ * PCI MSI-X capability.
+ */
pcif_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + 1;
pcif_vec = min_t(int, pcif_vec, num_eqs);
+ pcif_vec = min_t(int, pcif_vec, pci_msix_vec_count(dev->pdev));
total_vec = pcif_vec;
if (mlx5_sf_max_functions(dev))
total_vec += MLX5_MAX_MSIX_PER_SF * mlx5_sf_max_functions(dev);
total_vec = min_t(int, total_vec, pci_msix_vec_count(dev->pdev));
- pcif_vec = min_t(int, pcif_vec, pci_msix_vec_count(dev->pdev));
req_vec = pci_msix_can_alloc_dyn(dev->pdev) ? 1 : total_vec;
n = pci_alloc_irq_vectors(dev->pdev, 1, req_vec, PCI_IRQ_MSIX);
if (n < 0)
return n;
- err = irq_pools_init(dev, total_vec - pcif_vec, pcif_vec);
+ /* Further limit vectors of the pools based on platform for non dynamic case */
+ dynamic_vec = pci_msix_can_alloc_dyn(dev->pdev);
+ if (!dynamic_vec) {
+ pcif_vec = min_t(int, n, pcif_vec);
+ total_vec = min_t(int, n, total_vec);
+ }
+
+ err = irq_pools_init(dev, total_vec - pcif_vec, pcif_vec, dynamic_vec);
if (err)
pci_free_irq_vectors(dev->pdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
index c4d377f8df30..cc064425fe16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
@@ -28,7 +28,6 @@ struct mlx5_irq_pool {
struct mlx5_core_dev *dev;
};
-struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev);
static inline bool mlx5_irq_pool_is_sf_pool(struct mlx5_irq_pool *pool)
{
return !strncmp("mlx5_sf", pool->name, strlen("mlx5_sf"));
@@ -40,5 +39,6 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
int mlx5_irq_get_locked(struct mlx5_irq *irq);
int mlx5_irq_read_locked(struct mlx5_irq *irq);
int mlx5_irq_put(struct mlx5_irq *irq);
+struct mlx5_irq_pool *mlx5_irq_get_pool(struct mlx5_irq *irq);
#endif /* __PCI_IRQ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 50931584132b..85a9e534f442 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -196,7 +196,6 @@ void mlx5_toggle_port_link(struct mlx5_core_dev *dev)
if (ps == MLX5_PORT_UP)
mlx5_set_port_admin_status(dev, MLX5_PORT_UP);
}
-EXPORT_SYMBOL_GPL(mlx5_toggle_port_link);
int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status)
@@ -210,7 +209,6 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PAOS, 0, 1);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_admin_status);
int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status *status)
@@ -227,7 +225,6 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
*status = MLX5_GET(paos_reg, out, admin_status);
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
u16 *max_mtu, u16 *oper_mtu, u8 port)
@@ -257,7 +254,6 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PMTU, 0, 1);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
u8 port)
@@ -293,11 +289,11 @@ int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
}
static int mlx5_query_module_id(struct mlx5_core_dev *dev, int module_num,
- u8 *module_id)
+ u8 *module_id, u8 *status)
{
u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {};
u32 out[MLX5_ST_SZ_DW(mcia_reg)];
- int err, status;
+ int err;
u8 *ptr;
MLX5_SET(mcia_reg, in, i2c_device_address, MLX5_I2C_ADDR_LOW);
@@ -312,12 +308,12 @@ static int mlx5_query_module_id(struct mlx5_core_dev *dev, int module_num,
if (err)
return err;
- status = MLX5_GET(mcia_reg, out, status);
- if (status) {
- mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n",
- status);
+ if (MLX5_GET(mcia_reg, out, status)) {
+ if (status)
+ *status = MLX5_GET(mcia_reg, out, status);
return -EIO;
}
+
ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
*module_id = ptr[0];
@@ -374,13 +370,14 @@ static int mlx5_mcia_max_bytes(struct mlx5_core_dev *dev)
}
static int mlx5_query_mcia(struct mlx5_core_dev *dev,
- struct mlx5_module_eeprom_query_params *params, u8 *data)
+ struct mlx5_module_eeprom_query_params *params,
+ u8 *data, u8 *status)
{
u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {};
u32 out[MLX5_ST_SZ_DW(mcia_reg)];
- int status, err;
void *ptr;
u16 size;
+ int err;
size = min_t(int, params->size, mlx5_mcia_max_bytes(dev));
@@ -396,12 +393,9 @@ static int mlx5_query_mcia(struct mlx5_core_dev *dev,
if (err)
return err;
- status = MLX5_GET(mcia_reg, out, status);
- if (status) {
- mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n",
- status);
+ *status = MLX5_GET(mcia_reg, out, status);
+ if (*status)
return -EIO;
- }
ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
memcpy(data, ptr, size);
@@ -410,7 +404,7 @@ static int mlx5_query_mcia(struct mlx5_core_dev *dev,
}
int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
- u16 offset, u16 size, u8 *data)
+ u16 offset, u16 size, u8 *data, u8 *status)
{
struct mlx5_module_eeprom_query_params query = {0};
u8 module_id;
@@ -420,7 +414,8 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
if (err)
return err;
- err = mlx5_query_module_id(dev, query.module_number, &module_id);
+ err = mlx5_query_module_id(dev, query.module_number, &module_id,
+ status);
if (err)
return err;
@@ -445,13 +440,12 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
query.size = size;
query.offset = offset;
- return mlx5_query_mcia(dev, &query, data);
+ return mlx5_query_mcia(dev, &query, data, status);
}
-EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom);
int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
struct mlx5_module_eeprom_query_params *params,
- u8 *data)
+ u8 *data, u8 *status)
{
int err;
@@ -465,9 +459,8 @@ int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
return -EINVAL;
}
- return mlx5_query_mcia(dev, params, data);
+ return mlx5_query_mcia(dev, params, data, status);
}
-EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom_by_page);
static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
int pvlc_size, u8 local_port)
@@ -518,7 +511,6 @@ int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PFCC, 0, 1);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
int mlx5_query_port_pause(struct mlx5_core_dev *dev,
u32 *rx_pause, u32 *tx_pause)
@@ -538,7 +530,6 @@ int mlx5_query_port_pause(struct mlx5_core_dev *dev,
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
u16 stall_critical_watermark,
@@ -597,7 +588,6 @@ int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx)
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PFCC, 0, 1);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_pfc);
int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
{
@@ -616,7 +606,6 @@ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_pfc);
int mlx5_max_tc(struct mlx5_core_dev *mdev)
{
@@ -667,7 +656,6 @@ int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc)
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_prio_tc);
int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
u8 prio, u8 *tc)
@@ -689,7 +677,6 @@ int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc);
static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
int inlen)
@@ -728,7 +715,6 @@ int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group)
return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group);
int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
u8 tc, u8 *tc_group)
@@ -749,7 +735,6 @@ int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_tc_group);
int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
{
@@ -763,7 +748,6 @@ int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_tc_bw_alloc);
int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
u8 tc, u8 *bw_pct)
@@ -784,7 +768,6 @@ int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_tc_bw_alloc);
int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
u8 *max_bw_value,
@@ -808,7 +791,6 @@ int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
}
-EXPORT_SYMBOL_GPL(mlx5_modify_port_ets_rate_limit);
int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
u8 *max_bw_value,
@@ -834,7 +816,6 @@ int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_ets_rate_limit);
int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode)
{
@@ -845,7 +826,6 @@ int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode)
MLX5_SET(set_wol_rol_in, in, wol_mode, wol_mode);
return mlx5_cmd_exec_in(mdev, set_wol_rol, in);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_wol);
int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
{
@@ -860,7 +840,6 @@ int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_wol);
int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen)
{
@@ -988,6 +967,26 @@ int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state)
return err;
}
+int mlx5_query_port_buffer_ownership(struct mlx5_core_dev *mdev,
+ u8 *buffer_ownership)
+{
+ u32 out[MLX5_ST_SZ_DW(pfcc_reg)] = {};
+ int err;
+
+ if (!MLX5_CAP_PCAM_FEATURE(mdev, buffer_ownership)) {
+ *buffer_ownership = MLX5_BUF_OWNERSHIP_UNKNOWN;
+ return 0;
+ }
+
+ err = mlx5_query_pfcc_reg(mdev, out, sizeof(out));
+ if (err)
+ return err;
+
+ *buffer_ownership = MLX5_GET(pfcc_reg, out, buf_ownership);
+
+ return 0;
+}
+
int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio)
{
int sz = MLX5_ST_SZ_BYTES(qpdpm_reg);
@@ -1058,53 +1057,58 @@ out:
}
/* speed in units of 1Mb */
-static const u32 mlx5e_link_speed[MLX5E_LINK_MODES_NUMBER] = {
- [MLX5E_1000BASE_CX_SGMII] = 1000,
- [MLX5E_1000BASE_KX] = 1000,
- [MLX5E_10GBASE_CX4] = 10000,
- [MLX5E_10GBASE_KX4] = 10000,
- [MLX5E_10GBASE_KR] = 10000,
- [MLX5E_20GBASE_KR2] = 20000,
- [MLX5E_40GBASE_CR4] = 40000,
- [MLX5E_40GBASE_KR4] = 40000,
- [MLX5E_56GBASE_R4] = 56000,
- [MLX5E_10GBASE_CR] = 10000,
- [MLX5E_10GBASE_SR] = 10000,
- [MLX5E_10GBASE_ER] = 10000,
- [MLX5E_40GBASE_SR4] = 40000,
- [MLX5E_40GBASE_LR4] = 40000,
- [MLX5E_50GBASE_SR2] = 50000,
- [MLX5E_100GBASE_CR4] = 100000,
- [MLX5E_100GBASE_SR4] = 100000,
- [MLX5E_100GBASE_KR4] = 100000,
- [MLX5E_100GBASE_LR4] = 100000,
- [MLX5E_100BASE_TX] = 100,
- [MLX5E_1000BASE_T] = 1000,
- [MLX5E_10GBASE_T] = 10000,
- [MLX5E_25GBASE_CR] = 25000,
- [MLX5E_25GBASE_KR] = 25000,
- [MLX5E_25GBASE_SR] = 25000,
- [MLX5E_50GBASE_CR2] = 50000,
- [MLX5E_50GBASE_KR2] = 50000,
+static const struct mlx5_link_info mlx5e_link_info[MLX5E_LINK_MODES_NUMBER] = {
+ [MLX5E_1000BASE_CX_SGMII] = {.speed = 1000, .lanes = 1},
+ [MLX5E_1000BASE_KX] = {.speed = 1000, .lanes = 1},
+ [MLX5E_10GBASE_CX4] = {.speed = 10000, .lanes = 4},
+ [MLX5E_10GBASE_KX4] = {.speed = 10000, .lanes = 4},
+ [MLX5E_10GBASE_KR] = {.speed = 10000, .lanes = 1},
+ [MLX5E_20GBASE_KR2] = {.speed = 20000, .lanes = 2},
+ [MLX5E_40GBASE_CR4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_40GBASE_KR4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_56GBASE_R4] = {.speed = 56000, .lanes = 4},
+ [MLX5E_10GBASE_CR] = {.speed = 10000, .lanes = 1},
+ [MLX5E_10GBASE_SR] = {.speed = 10000, .lanes = 1},
+ [MLX5E_10GBASE_ER] = {.speed = 10000, .lanes = 1},
+ [MLX5E_40GBASE_SR4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_40GBASE_LR4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_50GBASE_SR2] = {.speed = 50000, .lanes = 2},
+ [MLX5E_100GBASE_CR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100GBASE_SR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100GBASE_KR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100GBASE_LR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100BASE_TX] = {.speed = 100, .lanes = 1},
+ [MLX5E_1000BASE_T] = {.speed = 1000, .lanes = 1},
+ [MLX5E_10GBASE_T] = {.speed = 10000, .lanes = 1},
+ [MLX5E_25GBASE_CR] = {.speed = 25000, .lanes = 1},
+ [MLX5E_25GBASE_KR] = {.speed = 25000, .lanes = 1},
+ [MLX5E_25GBASE_SR] = {.speed = 25000, .lanes = 1},
+ [MLX5E_50GBASE_CR2] = {.speed = 50000, .lanes = 2},
+ [MLX5E_50GBASE_KR2] = {.speed = 50000, .lanes = 2},
};
-static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
- [MLX5E_SGMII_100M] = 100,
- [MLX5E_1000BASE_X_SGMII] = 1000,
- [MLX5E_5GBASE_R] = 5000,
- [MLX5E_10GBASE_XFI_XAUI_1] = 10000,
- [MLX5E_40GBASE_XLAUI_4_XLPPI_4] = 40000,
- [MLX5E_25GAUI_1_25GBASE_CR_KR] = 25000,
- [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = 50000,
- [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = 50000,
- [MLX5E_CAUI_4_100GBASE_CR4_KR4] = 100000,
- [MLX5E_100GAUI_2_100GBASE_CR2_KR2] = 100000,
- [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000,
- [MLX5E_400GAUI_8_400GBASE_CR8] = 400000,
- [MLX5E_100GAUI_1_100GBASE_CR_KR] = 100000,
- [MLX5E_200GAUI_2_200GBASE_CR2_KR2] = 200000,
- [MLX5E_400GAUI_4_400GBASE_CR4_KR4] = 400000,
- [MLX5E_800GAUI_8_800GBASE_CR8_KR8] = 800000,
+static const struct mlx5_link_info
+mlx5e_ext_link_info[MLX5E_EXT_LINK_MODES_NUMBER] = {
+ [MLX5E_SGMII_100M] = {.speed = 100, .lanes = 1},
+ [MLX5E_1000BASE_X_SGMII] = {.speed = 1000, .lanes = 1},
+ [MLX5E_5GBASE_R] = {.speed = 5000, .lanes = 1},
+ [MLX5E_10GBASE_XFI_XAUI_1] = {.speed = 10000, .lanes = 1},
+ [MLX5E_40GBASE_XLAUI_4_XLPPI_4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_25GAUI_1_25GBASE_CR_KR] = {.speed = 25000, .lanes = 1},
+ [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = {.speed = 50000, .lanes = 2},
+ [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = {.speed = 50000, .lanes = 1},
+ [MLX5E_CAUI_4_100GBASE_CR4_KR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100GAUI_2_100GBASE_CR2_KR2] = {.speed = 100000, .lanes = 2},
+ [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = {.speed = 200000, .lanes = 4},
+ [MLX5E_400GAUI_8_400GBASE_CR8] = {.speed = 400000, .lanes = 8},
+ [MLX5E_100GAUI_1_100GBASE_CR_KR] = {.speed = 100000, .lanes = 1},
+ [MLX5E_200GAUI_2_200GBASE_CR2_KR2] = {.speed = 200000, .lanes = 2},
+ [MLX5E_400GAUI_4_400GBASE_CR4_KR4] = {.speed = 400000, .lanes = 4},
+ [MLX5E_800GAUI_8_800GBASE_CR8_KR8] = {.speed = 800000, .lanes = 8},
+ [MLX5E_200GAUI_1_200GBASE_CR1_KR1] = {.speed = 200000, .lanes = 1},
+ [MLX5E_400GAUI_2_400GBASE_CR2_KR2] = {.speed = 400000, .lanes = 2},
+ [MLX5E_800GAUI_4_800GBASE_CR4_KR4] = {.speed = 800000, .lanes = 4},
+ [MLX5E_1600TAUI_8_1600TBASE_CR8_KR8] = {.speed = 1600000, .lanes = 8},
};
int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
@@ -1142,54 +1146,65 @@ bool mlx5_ptys_ext_supported(struct mlx5_core_dev *mdev)
return !!eproto.cap;
}
-static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev,
- const u32 **arr, u32 *size,
- bool force_legacy)
+static void mlx5e_port_get_link_mode_info_arr(struct mlx5_core_dev *mdev,
+ const struct mlx5_link_info **arr,
+ u32 *size,
+ bool force_legacy)
{
bool ext = force_legacy ? false : mlx5_ptys_ext_supported(mdev);
- *size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) :
- ARRAY_SIZE(mlx5e_link_speed);
- *arr = ext ? mlx5e_ext_link_speed : mlx5e_link_speed;
+ *size = ext ? ARRAY_SIZE(mlx5e_ext_link_info) :
+ ARRAY_SIZE(mlx5e_link_info);
+ *arr = ext ? mlx5e_ext_link_info : mlx5e_link_info;
}
-u32 mlx5_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
- bool force_legacy)
+const struct mlx5_link_info *mlx5_port_ptys2info(struct mlx5_core_dev *mdev,
+ u32 eth_proto_oper,
+ bool force_legacy)
{
unsigned long temp = eth_proto_oper;
- const u32 *table;
- u32 speed = 0;
+ const struct mlx5_link_info *table;
u32 max_size;
int i;
- mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy);
+ mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size,
+ force_legacy);
i = find_first_bit(&temp, max_size);
- if (i < max_size)
- speed = table[i];
- return speed;
+
+ /* mlx5e_link_info has holes. Check speed
+ * is not zero as indication of one.
+ */
+ if (i < max_size && table[i].speed)
+ return &table[i];
+
+ return NULL;
}
-u32 mlx5_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
- bool force_legacy)
+u32 mlx5_port_info2linkmodes(struct mlx5_core_dev *mdev,
+ struct mlx5_link_info *info,
+ bool force_legacy)
{
+ const struct mlx5_link_info *table;
u32 link_modes = 0;
- const u32 *table;
u32 max_size;
int i;
- mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy);
+ mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size,
+ force_legacy);
for (i = 0; i < max_size; ++i) {
- if (table[i] == speed)
- link_modes |= MLX5E_PROT_MASK(i);
+ if (table[i].speed == info->speed) {
+ if (!info->lanes || table[i].lanes == info->lanes)
+ link_modes |= MLX5E_PROT_MASK(i);
+ }
}
return link_modes;
}
int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
{
+ const struct mlx5_link_info *table;
struct mlx5_port_eth_proto eproto;
u32 max_speed = 0;
- const u32 *table;
u32 max_size;
bool ext;
int err;
@@ -1200,10 +1215,10 @@ int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
if (err)
return err;
- mlx5e_port_get_speed_arr(mdev, &table, &max_size, false);
+ mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size, false);
for (i = 0; i < max_size; ++i)
if (eproto.cap & MLX5E_PROT_MASK(i))
- max_speed = max(max_speed, table[i]);
+ max_speed = max(max_speed, table[i].speed);
*speed = max_speed;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
index a42f6cd99b74..5c552b71e371 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
@@ -118,8 +118,8 @@ static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *
static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev)
{
+ u8 mac[ETH_ALEN] = {};
union ib_gid gid;
- u8 mac[ETH_ALEN];
mlx5_rdma_make_default_gid(dev, &gid);
return mlx5_core_roce_gid_set(dev, 0,
@@ -140,17 +140,17 @@ void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev)
mlx5_nic_vport_disable_roce(dev);
}
-void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
+int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
{
int err;
if (!MLX5_CAP_GEN(dev, roce))
- return;
+ return 0;
err = mlx5_nic_vport_enable_roce(dev);
if (err) {
mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
- return;
+ return err;
}
err = mlx5_rdma_add_roce_addr(dev);
@@ -165,10 +165,11 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
goto del_roce_addr;
}
- return;
+ return err;
del_roce_addr:
mlx5_rdma_del_roce_addr(dev);
disable_roce:
mlx5_nic_vport_disable_roce(dev);
+ return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.h b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h
index 750cff2a71a4..3d9e76c3d42f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h
@@ -8,12 +8,12 @@
#ifdef CONFIG_MLX5_ESWITCH
-void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
+int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev);
#else /* CONFIG_MLX5_ESWITCH */
-static inline void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) {}
+static inline int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) {}
#endif /* CONFIG_MLX5_ESWITCH */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index e393391966e0..39a209b9b684 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -56,6 +56,8 @@ bool mlx5_qos_tsar_type_supported(struct mlx5_core_dev *dev, int type, u8 hierar
return cap & TSAR_TYPE_CAP_MASK_ROUND_ROBIN;
case TSAR_ELEMENT_TSAR_TYPE_ETS:
return cap & TSAR_TYPE_CAP_MASK_ETS;
+ case TSAR_ELEMENT_TSAR_TYPE_TC_ARB:
+ return cap & TSAR_TYPE_CAP_MASK_TC_ARB;
}
return false;
@@ -87,6 +89,8 @@ bool mlx5_qos_element_type_supported(struct mlx5_core_dev *dev, int type, u8 hie
return cap & ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
case SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP:
return cap & ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_RATE_LIMIT:
+ return cap & ELEMENT_TYPE_CAP_MASK_RATE_LIMIT;
}
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
index 99219ea52c4b..f310bde3d11f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -16,7 +16,6 @@ struct mlx5_sf_dev_table {
struct xarray devices;
phys_addr_t base_address;
u64 sf_bar_length;
- struct notifier_block nb;
struct workqueue_struct *active_wq;
struct work_struct work;
u8 stop_active_wq:1;
@@ -156,18 +155,23 @@ static void mlx5_sf_dev_del(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_de
static int
mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_code, void *data)
{
- struct mlx5_sf_dev_table *table = container_of(nb, struct mlx5_sf_dev_table, nb);
+ struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev,
+ priv.sf_dev_nb);
+ struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
const struct mlx5_vhca_state_event *event = data;
struct mlx5_sf_dev *sf_dev;
u16 max_functions;
u16 sf_index;
u16 base_id;
- max_functions = mlx5_sf_max_functions(table->dev);
+ if (!table)
+ return 0;
+
+ max_functions = mlx5_sf_max_functions(dev);
if (!max_functions)
return 0;
- base_id = mlx5_sf_start_function_id(table->dev);
+ base_id = mlx5_sf_start_function_id(dev);
if (event->function_id < base_id || event->function_id >= (base_id + max_functions))
return 0;
@@ -177,19 +181,19 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
case MLX5_VHCA_STATE_INVALID:
case MLX5_VHCA_STATE_ALLOCATED:
if (sf_dev)
- mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
+ mlx5_sf_dev_del(dev, sf_dev, sf_index);
break;
case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
if (sf_dev)
- mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
+ mlx5_sf_dev_del(dev, sf_dev, sf_index);
else
- mlx5_core_err(table->dev,
+ mlx5_core_err(dev,
"SF DEV: teardown state for invalid dev index=%d sfnum=0x%x\n",
sf_index, event->sw_function_id);
break;
case MLX5_VHCA_STATE_ACTIVE:
if (!sf_dev)
- mlx5_sf_dev_add(table->dev, sf_index, event->function_id,
+ mlx5_sf_dev_add(dev, sf_index, event->function_id,
event->sw_function_id);
break;
default:
@@ -315,6 +319,15 @@ static void mlx5_sf_dev_destroy_active_works(struct mlx5_sf_dev_table *table)
}
}
+int mlx5_sf_dev_notifier_init(struct mlx5_core_dev *dev)
+{
+ if (mlx5_core_is_sf(dev))
+ return 0;
+
+ dev->priv.sf_dev_nb.notifier_call = mlx5_sf_dev_state_change_handler;
+ return mlx5_vhca_event_notifier_register(dev, &dev->priv.sf_dev_nb);
+}
+
void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
{
struct mlx5_sf_dev_table *table;
@@ -329,17 +342,12 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
goto table_err;
}
- table->nb.notifier_call = mlx5_sf_dev_state_change_handler;
table->dev = dev;
table->sf_bar_length = 1 << (MLX5_CAP_GEN(dev, log_min_sf_size) + 12);
table->base_address = pci_resource_start(dev->pdev, 2);
xa_init(&table->devices);
dev->priv.sf_dev_table = table;
- err = mlx5_vhca_event_notifier_register(dev, &table->nb);
- if (err)
- goto vhca_err;
-
err = mlx5_sf_dev_create_active_works(table);
if (err)
goto add_active_err;
@@ -351,10 +359,8 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
arm_err:
mlx5_sf_dev_destroy_active_works(table);
-add_active_err:
- mlx5_vhca_event_notifier_unregister(dev, &table->nb);
mlx5_vhca_event_work_queues_flush(dev);
-vhca_err:
+add_active_err:
kfree(table);
dev->priv.sf_dev_table = NULL;
table_err:
@@ -372,6 +378,14 @@ static void mlx5_sf_dev_destroy_all(struct mlx5_sf_dev_table *table)
}
}
+void mlx5_sf_dev_notifier_cleanup(struct mlx5_core_dev *dev)
+{
+ if (mlx5_core_is_sf(dev))
+ return;
+
+ mlx5_vhca_event_notifier_unregister(dev, &dev->priv.sf_dev_nb);
+}
+
void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
{
struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
@@ -380,8 +394,6 @@ void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
return;
mlx5_sf_dev_destroy_active_works(table);
- mlx5_vhca_event_notifier_unregister(dev, &table->nb);
- mlx5_vhca_event_work_queues_flush(dev);
/* Now that event handler is not running, it is safe to destroy
* the sf device without race.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
index b99131e95e37..3ab0449c770c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
@@ -25,7 +25,9 @@ struct mlx5_sf_peer_devlink_event_ctx {
int err;
};
+int mlx5_sf_dev_notifier_init(struct mlx5_core_dev *dev);
void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev);
+void mlx5_sf_dev_notifier_cleanup(struct mlx5_core_dev *dev);
void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev);
int mlx5_sf_driver_register(void);
@@ -35,10 +37,19 @@ bool mlx5_sf_dev_allocated(const struct mlx5_core_dev *dev);
#else
+static inline int mlx5_sf_dev_notifier_init(struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
static inline void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
{
}
+static inline void mlx5_sf_dev_notifier_cleanup(struct mlx5_core_dev *dev)
+{
+}
+
static inline void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
{
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h
index 0537de86f981..9b0f44253f33 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h
@@ -28,7 +28,7 @@ DECLARE_EVENT_CLASS(mlx5_sf_dev_template,
__entry->hw_fn_id = sfdev->fn_id;
__entry->sfnum = sfdev->sfnum;
),
- TP_printk("(%s) sfdev=%pK aux_id=%d hw_id=0x%x sfnum=%u\n",
+ TP_printk("(%s) sfdev=%p aux_id=%d hw_id=0x%x sfnum=%u\n",
__get_str(devname), __entry->sfdev,
__entry->aux_id, __entry->hw_fn_id,
__entry->sfnum)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index a96be98be032..b82323b8449e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -31,9 +31,6 @@ struct mlx5_sf_table {
struct mlx5_core_dev *dev; /* To refer from notifier context. */
struct xarray function_ids; /* function id based lookup. */
struct mutex sf_state_lock; /* Serializes sf state among user cmds & vhca event handler. */
- struct notifier_block esw_nb;
- struct notifier_block vhca_nb;
- struct notifier_block mdev_nb;
};
static struct mlx5_sf *
@@ -257,6 +254,7 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
return 0;
esw_err:
+ mlx5_sf_function_id_erase(table, sf);
mlx5_sf_free(table, sf);
return err;
}
@@ -284,7 +282,7 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_
NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported");
return -EOPNOTSUPP;
}
- if (new_attr->pfnum != mlx5_get_dev_index(dev)) {
+ if (new_attr->pfnum != PCI_FUNC(dev->pdev->devfn)) {
NL_SET_ERR_MSG_MOD(extack, "Invalid pfnum supplied");
return -EOPNOTSUPP;
}
@@ -390,11 +388,16 @@ static bool mlx5_sf_state_update_check(const struct mlx5_sf *sf, u8 new_state)
static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
{
- struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, vhca_nb);
+ struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev,
+ priv.sf_table_vhca_nb);
+ struct mlx5_sf_table *table = dev->priv.sf_table;
const struct mlx5_vhca_state_event *event = data;
bool update = false;
struct mlx5_sf *sf;
+ if (!table)
+ return 0;
+
mutex_lock(&table->sf_state_lock);
sf = mlx5_sf_lookup_by_function_id(table, event->function_id);
if (!sf)
@@ -406,7 +409,7 @@ static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, v
update = mlx5_sf_state_update_check(sf, event->new_vhca_state);
if (update)
sf->hw_state = event->new_vhca_state;
- trace_mlx5_sf_update_state(table->dev, sf->port_index, sf->controller,
+ trace_mlx5_sf_update_state(dev, sf->port_index, sf->controller,
sf->hw_fn_id, sf->hw_state);
unlock:
mutex_unlock(&table->sf_state_lock);
@@ -424,12 +427,16 @@ static void mlx5_sf_del_all(struct mlx5_sf_table *table)
static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, void *data)
{
- struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, esw_nb);
+ struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev,
+ priv.sf_table_esw_nb);
const struct mlx5_esw_event_info *mode = data;
+ if (!dev->priv.sf_table)
+ return 0;
+
switch (mode->new_mode) {
case MLX5_ESWITCH_LEGACY:
- mlx5_sf_del_all(table);
+ mlx5_sf_del_all(dev->priv.sf_table);
break;
default:
break;
@@ -440,15 +447,16 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi
static int mlx5_sf_mdev_event(struct notifier_block *nb, unsigned long event, void *data)
{
- struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, mdev_nb);
+ struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev,
+ priv.sf_table_mdev_nb);
struct mlx5_sf_peer_devlink_event_ctx *event_ctx = data;
+ struct mlx5_sf_table *table = dev->priv.sf_table;
int ret = NOTIFY_DONE;
struct mlx5_sf *sf;
- if (event != MLX5_DRIVER_EVENT_SF_PEER_DEVLINK)
+ if (!table || event != MLX5_DRIVER_EVENT_SF_PEER_DEVLINK)
return NOTIFY_DONE;
-
mutex_lock(&table->sf_state_lock);
sf = mlx5_sf_lookup_by_function_id(table, event_ctx->fn_id);
if (!sf)
@@ -463,10 +471,40 @@ out:
return ret;
}
+int mlx5_sf_notifiers_init(struct mlx5_core_dev *dev)
+{
+ int err;
+
+ if (mlx5_core_is_sf(dev))
+ return 0;
+
+ dev->priv.sf_table_esw_nb.notifier_call = mlx5_sf_esw_event;
+ err = mlx5_esw_event_notifier_register(dev, &dev->priv.sf_table_esw_nb);
+ if (err)
+ return err;
+
+ dev->priv.sf_table_vhca_nb.notifier_call = mlx5_sf_vhca_event;
+ err = mlx5_vhca_event_notifier_register(dev,
+ &dev->priv.sf_table_vhca_nb);
+ if (err)
+ goto vhca_err;
+
+ dev->priv.sf_table_mdev_nb.notifier_call = mlx5_sf_mdev_event;
+ err = mlx5_blocking_notifier_register(dev, &dev->priv.sf_table_mdev_nb);
+ if (err)
+ goto mdev_err;
+
+ return 0;
+mdev_err:
+ mlx5_vhca_event_notifier_unregister(dev, &dev->priv.sf_table_vhca_nb);
+vhca_err:
+ mlx5_esw_event_notifier_unregister(dev, &dev->priv.sf_table_esw_nb);
+ return err;
+}
+
int mlx5_sf_table_init(struct mlx5_core_dev *dev)
{
struct mlx5_sf_table *table;
- int err;
if (!mlx5_sf_table_supported(dev) || !mlx5_vhca_event_supported(dev))
return 0;
@@ -479,28 +517,18 @@ int mlx5_sf_table_init(struct mlx5_core_dev *dev)
table->dev = dev;
xa_init(&table->function_ids);
dev->priv.sf_table = table;
- table->esw_nb.notifier_call = mlx5_sf_esw_event;
- err = mlx5_esw_event_notifier_register(dev->priv.eswitch, &table->esw_nb);
- if (err)
- goto reg_err;
-
- table->vhca_nb.notifier_call = mlx5_sf_vhca_event;
- err = mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb);
- if (err)
- goto vhca_err;
-
- table->mdev_nb.notifier_call = mlx5_sf_mdev_event;
- mlx5_blocking_notifier_register(dev, &table->mdev_nb);
return 0;
+}
-vhca_err:
- mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
-reg_err:
- mutex_destroy(&table->sf_state_lock);
- kfree(table);
- dev->priv.sf_table = NULL;
- return err;
+void mlx5_sf_notifiers_cleanup(struct mlx5_core_dev *dev)
+{
+ if (mlx5_core_is_sf(dev))
+ return;
+
+ mlx5_blocking_notifier_unregister(dev, &dev->priv.sf_table_mdev_nb);
+ mlx5_vhca_event_notifier_unregister(dev, &dev->priv.sf_table_vhca_nb);
+ mlx5_esw_event_notifier_unregister(dev, &dev->priv.sf_table_esw_nb);
}
void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
@@ -510,10 +538,17 @@ void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
if (!table)
return;
- mlx5_blocking_notifier_unregister(dev, &table->mdev_nb);
- mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb);
- mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
mutex_destroy(&table->sf_state_lock);
WARN_ON(!xa_empty(&table->function_ids));
kfree(table);
}
+
+bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_table *table = dev->priv.sf_table;
+
+ if (!table)
+ return true;
+
+ return xa_empty(&table->function_ids);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
index 1f613320fe07..bd968f3b3855 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
@@ -30,9 +30,7 @@ enum mlx5_sf_hwc_index {
};
struct mlx5_sf_hw_table {
- struct mlx5_core_dev *dev;
struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */
- struct notifier_block vhca_nb;
struct mlx5_sf_hwc_table hwc[MLX5_SF_HWC_MAX];
};
@@ -71,14 +69,16 @@ mlx5_sf_table_fn_to_hwc(struct mlx5_sf_hw_table *table, u16 fn_id)
return NULL;
}
-static int mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table *table, u32 controller,
+static int mlx5_sf_hw_table_id_alloc(struct mlx5_core_dev *dev,
+ struct mlx5_sf_hw_table *table,
+ u32 controller,
u32 usr_sfnum)
{
struct mlx5_sf_hwc_table *hwc;
int free_idx = -1;
int i;
- hwc = mlx5_sf_controller_to_hwc(table->dev, controller);
+ hwc = mlx5_sf_controller_to_hwc(dev, controller);
if (!hwc->sfs)
return -ENOSPC;
@@ -100,11 +100,13 @@ static int mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table *table, u32 control
return free_idx;
}
-static void mlx5_sf_hw_table_id_free(struct mlx5_sf_hw_table *table, u32 controller, int id)
+static void mlx5_sf_hw_table_id_free(struct mlx5_core_dev *dev,
+ struct mlx5_sf_hw_table *table,
+ u32 controller, int id)
{
struct mlx5_sf_hwc_table *hwc;
- hwc = mlx5_sf_controller_to_hwc(table->dev, controller);
+ hwc = mlx5_sf_controller_to_hwc(dev, controller);
hwc->sfs[id].allocated = false;
hwc->sfs[id].pending_delete = false;
}
@@ -120,7 +122,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr
return -EOPNOTSUPP;
mutex_lock(&table->table_lock);
- sw_id = mlx5_sf_hw_table_id_alloc(table, controller, usr_sfnum);
+ sw_id = mlx5_sf_hw_table_id_alloc(dev, table, controller, usr_sfnum);
if (sw_id < 0) {
err = sw_id;
goto exist_err;
@@ -151,7 +153,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr
vhca_err:
mlx5_cmd_dealloc_sf(dev, hw_fn_id);
err:
- mlx5_sf_hw_table_id_free(table, controller, sw_id);
+ mlx5_sf_hw_table_id_free(dev, table, controller, sw_id);
exist_err:
mutex_unlock(&table->table_lock);
return err;
@@ -165,7 +167,7 @@ void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u32 controller, u16 id)
mutex_lock(&table->table_lock);
hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, id);
mlx5_cmd_dealloc_sf(dev, hw_fn_id);
- mlx5_sf_hw_table_id_free(table, controller, id);
+ mlx5_sf_hw_table_id_free(dev, table, controller, id);
mutex_unlock(&table->table_lock);
}
@@ -216,10 +218,12 @@ static void mlx5_sf_hw_table_hwc_dealloc_all(struct mlx5_core_dev *dev,
}
}
-static void mlx5_sf_hw_table_dealloc_all(struct mlx5_sf_hw_table *table)
+static void mlx5_sf_hw_table_dealloc_all(struct mlx5_core_dev *dev,
+ struct mlx5_sf_hw_table *table)
{
- mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_EXTERNAL]);
- mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_LOCAL]);
+ mlx5_sf_hw_table_hwc_dealloc_all(dev,
+ &table->hwc[MLX5_SF_HWC_EXTERNAL]);
+ mlx5_sf_hw_table_hwc_dealloc_all(dev, &table->hwc[MLX5_SF_HWC_LOCAL]);
}
static int mlx5_sf_hw_table_hwc_init(struct mlx5_sf_hwc_table *hwc, u16 max_fn, u16 base_id)
@@ -301,7 +305,6 @@ int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
}
mutex_init(&table->table_lock);
- table->dev = dev;
dev->priv.sf_hw_table = table;
base_id = mlx5_sf_start_function_id(dev);
@@ -338,19 +341,22 @@ void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]);
mutex_destroy(&table->table_lock);
kfree(table);
+ dev->priv.sf_hw_table = NULL;
res_unregister:
mlx5_sf_hw_table_res_unregister(dev);
}
static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
{
- struct mlx5_sf_hw_table *table = container_of(nb, struct mlx5_sf_hw_table, vhca_nb);
+ struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev,
+ priv.sf_hw_table_vhca_nb);
+ struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
const struct mlx5_vhca_state_event *event = data;
struct mlx5_sf_hwc_table *hwc;
struct mlx5_sf_hw *sf_hw;
u16 sw_id;
- if (event->new_vhca_state != MLX5_VHCA_STATE_ALLOCATED)
+ if (!table || event->new_vhca_state != MLX5_VHCA_STATE_ALLOCATED)
return 0;
hwc = mlx5_sf_table_fn_to_hwc(table, event->function_id);
@@ -365,20 +371,28 @@ static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode
* Hence recycle the sf hardware id for reuse.
*/
if (sf_hw->allocated && sf_hw->pending_delete)
- mlx5_sf_hw_table_hwc_sf_free(table->dev, hwc, sw_id);
+ mlx5_sf_hw_table_hwc_sf_free(dev, hwc, sw_id);
mutex_unlock(&table->table_lock);
return 0;
}
-int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev)
+int mlx5_sf_hw_notifier_init(struct mlx5_core_dev *dev)
{
- struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
-
- if (!table)
+ if (mlx5_core_is_sf(dev))
return 0;
- table->vhca_nb.notifier_call = mlx5_sf_hw_vhca_event;
- return mlx5_vhca_event_notifier_register(dev, &table->vhca_nb);
+ dev->priv.sf_hw_table_vhca_nb.notifier_call = mlx5_sf_hw_vhca_event;
+ return mlx5_vhca_event_notifier_register(dev,
+ &dev->priv.sf_hw_table_vhca_nb);
+}
+
+void mlx5_sf_hw_notifier_cleanup(struct mlx5_core_dev *dev)
+{
+ if (mlx5_core_is_sf(dev))
+ return;
+
+ mlx5_vhca_event_notifier_unregister(dev,
+ &dev->priv.sf_hw_table_vhca_nb);
}
void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
@@ -388,9 +402,8 @@ void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
if (!table)
return;
- mlx5_vhca_event_notifier_unregister(dev, &table->vhca_nb);
/* Dealloc SFs whose firmware event has been missed. */
- mlx5_sf_hw_table_dealloc_all(table);
+ mlx5_sf_hw_table_dealloc_all(dev, table);
}
bool mlx5_sf_hw_table_supported(const struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
index 860f9ddb7107..d8a934a0e968 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
@@ -12,11 +12,15 @@
int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev);
void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev);
-int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev);
+int mlx5_sf_hw_notifier_init(struct mlx5_core_dev *dev);
+void mlx5_sf_hw_notifier_cleanup(struct mlx5_core_dev *dev);
void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev);
+int mlx5_sf_notifiers_init(struct mlx5_core_dev *dev);
int mlx5_sf_table_init(struct mlx5_core_dev *dev);
+void mlx5_sf_notifiers_cleanup(struct mlx5_core_dev *dev);
void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev);
+bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev);
int mlx5_devlink_sf_port_new(struct devlink *devlink,
const struct devlink_port_new_attrs *add_attr,
@@ -43,24 +47,42 @@ static inline void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
{
}
-static inline int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev)
+static inline int mlx5_sf_hw_notifier_init(struct mlx5_core_dev *dev)
{
return 0;
}
+static inline void mlx5_sf_hw_notifier_cleanup(struct mlx5_core_dev *dev)
+{
+}
+
static inline void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
{
}
+static inline int mlx5_sf_notifiers_init(struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
static inline int mlx5_sf_table_init(struct mlx5_core_dev *dev)
{
return 0;
}
+static inline void mlx5_sf_notifiers_cleanup(struct mlx5_core_dev *dev)
+{
+}
+
static inline void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
{
}
+static inline bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev)
+{
+ return true;
+}
+
#endif
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
index cda01ba441ae..b04cf6cf8956 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
@@ -9,15 +9,9 @@
#define CREATE_TRACE_POINTS
#include "diag/vhca_tracepoint.h"
-struct mlx5_vhca_state_notifier {
- struct mlx5_core_dev *dev;
- struct mlx5_nb nb;
- struct blocking_notifier_head n_head;
-};
-
struct mlx5_vhca_event_work {
struct work_struct work;
- struct mlx5_vhca_state_notifier *notifier;
+ struct mlx5_core_dev *dev;
struct mlx5_vhca_state_event event;
};
@@ -95,16 +89,14 @@ mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event *
mlx5_vhca_event_arm(dev, event->function_id);
trace_mlx5_sf_vhca_event(dev, event);
- blocking_notifier_call_chain(&dev->priv.vhca_state_notifier->n_head, 0, event);
+ blocking_notifier_call_chain(&dev->priv.vhca_state_n_head, 0, event);
}
static void mlx5_vhca_state_work_handler(struct work_struct *_work)
{
struct mlx5_vhca_event_work *work = container_of(_work, struct mlx5_vhca_event_work, work);
- struct mlx5_vhca_state_notifier *notifier = work->notifier;
- struct mlx5_core_dev *dev = notifier->dev;
- mlx5_vhca_event_notify(dev, &work->event);
+ mlx5_vhca_event_notify(work->dev, &work->event);
kfree(work);
}
@@ -116,8 +108,8 @@ void mlx5_vhca_events_work_enqueue(struct mlx5_core_dev *dev, int idx, struct wo
static int
mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, void *data)
{
- struct mlx5_vhca_state_notifier *notifier =
- mlx5_nb_cof(nb, struct mlx5_vhca_state_notifier, nb);
+ struct mlx5_core_dev *dev = mlx5_nb_cof(nb, struct mlx5_core_dev,
+ priv.vhca_state_nb);
struct mlx5_vhca_event_work *work;
struct mlx5_eqe *eqe = data;
int wq_idx;
@@ -126,10 +118,10 @@ mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, v
if (!work)
return NOTIFY_DONE;
INIT_WORK(&work->work, &mlx5_vhca_state_work_handler);
- work->notifier = notifier;
+ work->dev = dev;
work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id);
wq_idx = work->event.function_id % MLX5_DEV_MAX_WQS;
- mlx5_vhca_events_work_enqueue(notifier->dev, wq_idx, &work->work);
+ mlx5_vhca_events_work_enqueue(dev, wq_idx, &work->work);
return NOTIFY_OK;
}
@@ -145,9 +137,15 @@ void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap)
MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_teardown_request, 1);
}
+void mlx5_vhca_state_notifier_init(struct mlx5_core_dev *dev)
+{
+ BLOCKING_INIT_NOTIFIER_HEAD(&dev->priv.vhca_state_n_head);
+ MLX5_NB_INIT(&dev->priv.vhca_state_nb, mlx5_vhca_state_change_notifier,
+ VHCA_STATE_CHANGE);
+}
+
int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
{
- struct mlx5_vhca_state_notifier *notifier;
char wq_name[MLX5_CMD_WQ_MAX_NAME];
struct mlx5_vhca_events *events;
int err, i;
@@ -160,7 +158,6 @@ int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
return -ENOMEM;
events->dev = dev;
- dev->priv.vhca_events = events;
for (i = 0; i < MLX5_DEV_MAX_WQS; i++) {
snprintf(wq_name, MLX5_CMD_WQ_MAX_NAME, "mlx5_vhca_event%d", i);
events->handler[i].wq = create_singlethread_workqueue(wq_name);
@@ -169,20 +166,10 @@ int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
goto err_create_wq;
}
}
+ dev->priv.vhca_events = events;
- notifier = kzalloc(sizeof(*notifier), GFP_KERNEL);
- if (!notifier) {
- err = -ENOMEM;
- goto err_notifier;
- }
-
- dev->priv.vhca_state_notifier = notifier;
- notifier->dev = dev;
- BLOCKING_INIT_NOTIFIER_HEAD(&notifier->n_head);
- MLX5_NB_INIT(&notifier->nb, mlx5_vhca_state_change_notifier, VHCA_STATE_CHANGE);
return 0;
-err_notifier:
err_create_wq:
for (--i; i >= 0; i--)
destroy_workqueue(events->handler[i].wq);
@@ -211,8 +198,6 @@ void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev)
if (!mlx5_vhca_event_supported(dev))
return;
- kfree(dev->priv.vhca_state_notifier);
- dev->priv.vhca_state_notifier = NULL;
vhca_events = dev->priv.vhca_events;
for (i = 0; i < MLX5_DEV_MAX_WQS; i++)
destroy_workqueue(vhca_events->handler[i].wq);
@@ -221,34 +206,30 @@ void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev)
void mlx5_vhca_event_start(struct mlx5_core_dev *dev)
{
- struct mlx5_vhca_state_notifier *notifier;
-
- if (!dev->priv.vhca_state_notifier)
+ if (!mlx5_vhca_event_supported(dev))
return;
- notifier = dev->priv.vhca_state_notifier;
- mlx5_eq_notifier_register(dev, &notifier->nb);
+ mlx5_eq_notifier_register(dev, &dev->priv.vhca_state_nb);
}
void mlx5_vhca_event_stop(struct mlx5_core_dev *dev)
{
- struct mlx5_vhca_state_notifier *notifier;
-
- if (!dev->priv.vhca_state_notifier)
+ if (!mlx5_vhca_event_supported(dev))
return;
- notifier = dev->priv.vhca_state_notifier;
- mlx5_eq_notifier_unregister(dev, &notifier->nb);
+ mlx5_eq_notifier_unregister(dev, &dev->priv.vhca_state_nb);
+
+ /* Flush workqueues of all pending events. */
+ mlx5_vhca_event_work_queues_flush(dev);
}
int mlx5_vhca_event_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb)
{
- if (!dev->priv.vhca_state_notifier)
- return -EOPNOTSUPP;
- return blocking_notifier_chain_register(&dev->priv.vhca_state_notifier->n_head, nb);
+ return blocking_notifier_chain_register(&dev->priv.vhca_state_n_head,
+ nb);
}
void mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb)
{
- blocking_notifier_chain_unregister(&dev->priv.vhca_state_notifier->n_head, nb);
+ blocking_notifier_chain_unregister(&dev->priv.vhca_state_n_head, nb);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
index 1725ba64f8af..52790423874c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
@@ -18,6 +18,7 @@ static inline bool mlx5_vhca_event_supported(const struct mlx5_core_dev *dev)
}
void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap);
+void mlx5_vhca_state_notifier_init(struct mlx5_core_dev *dev);
int mlx5_vhca_event_init(struct mlx5_core_dev *dev);
void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev);
void mlx5_vhca_event_start(struct mlx5_core_dev *dev);
@@ -37,6 +38,10 @@ static inline void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *s
{
}
+static inline void mlx5_vhca_state_notifier_init(struct mlx5_core_dev *dev)
+{
+}
+
static inline int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
{
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h
deleted file mode 100644
index e2fc69867088..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
-
-#ifndef _DR_STE_V1_
-#define _DR_STE_V1_
-
-#include "dr_types.h"
-#include "dr_ste.h"
-
-bool dr_ste_v1_is_miss_addr_set(u8 *hw_ste_p);
-void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr);
-u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p);
-void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask);
-u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p);
-void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type);
-u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p);
-void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size);
-void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi);
-void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size);
-void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, u8 *action_type_set,
- u32 actions_caps, u8 *last_ste,
- struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
-void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, u8 *action_type_set,
- u32 actions_caps, u8 *last_ste,
- struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
-void dr_ste_v1_set_action_set(u8 *d_action, u8 hw_field, u8 shifter,
- u8 length, u32 data);
-void dr_ste_v1_set_action_add(u8 *d_action, u8 hw_field, u8 shifter,
- u8 length, u32 data);
-void dr_ste_v1_set_action_copy(u8 *d_action, u8 dst_hw_field, u8 dst_shifter,
- u8 dst_len, u8 src_hw_field, u8 src_shifter);
-int dr_ste_v1_set_action_decap_l3_list(void *data, u32 data_sz, u8 *hw_action,
- u32 hw_action_sz, u16 *used_hw_action_num);
-int dr_ste_v1_alloc_modify_hdr_ptrn_arg(struct mlx5dr_action *action);
-void dr_ste_v1_free_modify_hdr_ptrn_arg(struct mlx5dr_action *action);
-void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask);
-
-#endif /* _DR_STE_V1_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
index b27bb4106532..fe56b59e24c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#include "mlx5hws_internal.h"
+#include "internal.h"
#define MLX5HWS_ACTION_METER_INIT_COLOR_OFFSET 1
@@ -11,31 +11,29 @@
/* This is the longest supported action sequence for FDB table:
* DECAP, POP_VLAN, MODIFY, CTR, ASO, PUSH_VLAN, MODIFY, ENCAP, Term.
*/
-static const u32 action_order_arr[MLX5HWS_TABLE_TYPE_MAX][MLX5HWS_ACTION_TYP_MAX] = {
- [MLX5HWS_TABLE_TYPE_FDB] = {
- BIT(MLX5HWS_ACTION_TYP_REMOVE_HEADER) |
- BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) |
- BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2),
- BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
- BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
- BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
- BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
- BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
- BIT(MLX5HWS_ACTION_TYP_INSERT_HEADER) |
- BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) |
- BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3),
- BIT(MLX5HWS_ACTION_TYP_CTR),
- BIT(MLX5HWS_ACTION_TYP_TAG),
- BIT(MLX5HWS_ACTION_TYP_ASO_METER),
- BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
- BIT(MLX5HWS_ACTION_TYP_TBL) |
- BIT(MLX5HWS_ACTION_TYP_VPORT) |
- BIT(MLX5HWS_ACTION_TYP_DROP) |
- BIT(MLX5HWS_ACTION_TYP_SAMPLER) |
- BIT(MLX5HWS_ACTION_TYP_RANGE) |
- BIT(MLX5HWS_ACTION_TYP_DEST_ARRAY),
- BIT(MLX5HWS_ACTION_TYP_LAST),
- },
+static const u32 action_order_arr[MLX5HWS_ACTION_TYP_MAX] = {
+ BIT(MLX5HWS_ACTION_TYP_REMOVE_HEADER) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2),
+ BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
+ BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_INSERT_HEADER) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3),
+ BIT(MLX5HWS_ACTION_TYP_CTR),
+ BIT(MLX5HWS_ACTION_TYP_TAG),
+ BIT(MLX5HWS_ACTION_TYP_ASO_METER),
+ BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
+ BIT(MLX5HWS_ACTION_TYP_TBL) |
+ BIT(MLX5HWS_ACTION_TYP_VPORT) |
+ BIT(MLX5HWS_ACTION_TYP_DROP) |
+ BIT(MLX5HWS_ACTION_TYP_SAMPLER) |
+ BIT(MLX5HWS_ACTION_TYP_RANGE) |
+ BIT(MLX5HWS_ACTION_TYP_DEST_ARRAY),
+ BIT(MLX5HWS_ACTION_TYP_LAST),
};
static const char * const mlx5hws_action_type_str[] = {
@@ -74,6 +72,11 @@ enum mlx5hws_action_type mlx5hws_action_get_type(struct mlx5hws_action *action)
return action->type;
}
+struct mlx5_core_dev *mlx5hws_action_get_dev(struct mlx5hws_action *action)
+{
+ return action->ctx->mdev;
+}
+
static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx,
enum mlx5hws_context_shared_stc_type stc_type,
u8 tbl_type)
@@ -83,8 +86,8 @@ static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx,
int ret;
mutex_lock(&ctx->ctrl_lock);
- if (ctx->common_res[tbl_type].shared_stc[stc_type]) {
- ctx->common_res[tbl_type].shared_stc[stc_type]->refcount++;
+ if (ctx->common_res.shared_stc[stc_type]) {
+ ctx->common_res.shared_stc[stc_type]->refcount++;
mutex_unlock(&ctx->ctrl_lock);
return 0;
}
@@ -114,7 +117,7 @@ static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx,
mlx5hws_err(ctx, "No such stc_type: %d\n", stc_type);
pr_warn("HWS: Invalid stc_type: %d\n", stc_type);
ret = -EINVAL;
- goto unlock_and_out;
+ goto free_shared_stc;
}
ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
@@ -124,8 +127,8 @@ static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx,
goto free_shared_stc;
}
- ctx->common_res[tbl_type].shared_stc[stc_type] = shared_stc;
- ctx->common_res[tbl_type].shared_stc[stc_type]->refcount = 1;
+ ctx->common_res.shared_stc[stc_type] = shared_stc;
+ ctx->common_res.shared_stc[stc_type]->refcount = 1;
mutex_unlock(&ctx->ctrl_lock);
@@ -178,16 +181,16 @@ static void hws_action_put_shared_stc(struct mlx5hws_action *action,
}
mutex_lock(&ctx->ctrl_lock);
- if (--ctx->common_res[tbl_type].shared_stc[stc_type]->refcount) {
+ if (--ctx->common_res.shared_stc[stc_type]->refcount) {
mutex_unlock(&ctx->ctrl_lock);
return;
}
- shared_stc = ctx->common_res[tbl_type].shared_stc[stc_type];
+ shared_stc = ctx->common_res.shared_stc[stc_type];
mlx5hws_action_free_single_stc(ctx, tbl_type, &shared_stc->stc_chunk);
kfree(shared_stc);
- ctx->common_res[tbl_type].shared_stc[stc_type] = NULL;
+ ctx->common_res.shared_stc[stc_type] = NULL;
mutex_unlock(&ctx->ctrl_lock);
}
@@ -206,10 +209,10 @@ bool mlx5hws_action_check_combo(struct mlx5hws_context *ctx,
enum mlx5hws_action_type *user_actions,
enum mlx5hws_table_type table_type)
{
- const u32 *order_arr = action_order_arr[table_type];
+ const u32 *order_arr = action_order_arr;
+ bool valid_combo;
u8 order_idx = 0;
u8 user_idx = 0;
- bool valid_combo;
if (table_type >= MLX5HWS_TABLE_TYPE_MAX) {
mlx5hws_err(ctx, "Invalid table_type %d", table_type);
@@ -240,6 +243,7 @@ hws_action_fixup_stc_attr(struct mlx5hws_context *ctx,
enum mlx5hws_table_type table_type,
bool is_mirror)
{
+ struct mlx5hws_pool *pool;
bool use_fixup = false;
u32 fw_tbl_type;
u32 base_id;
@@ -255,13 +259,11 @@ hws_action_fixup_stc_attr(struct mlx5hws_context *ctx,
use_fixup = true;
break;
}
+ pool = stc_attr->ste_table.ste_pool;
if (!is_mirror)
- base_id = mlx5hws_pool_chunk_get_base_id(stc_attr->ste_table.ste_pool,
- &stc_attr->ste_table.ste);
+ base_id = mlx5hws_pool_get_base_id(pool);
else
- base_id =
- mlx5hws_pool_chunk_get_base_mirror_id(stc_attr->ste_table.ste_pool,
- &stc_attr->ste_table.ste);
+ base_id = mlx5hws_pool_get_base_mirror_id(pool);
*fixup_stc_attr = *stc_attr;
fixup_stc_attr->ste_table.ste_obj_id = base_id;
@@ -321,8 +323,8 @@ int mlx5hws_action_alloc_single_stc(struct mlx5hws_context *ctx,
__must_hold(&ctx->ctrl_lock)
{
struct mlx5hws_cmd_stc_modify_attr cleanup_stc_attr = {0};
- struct mlx5hws_pool *stc_pool = ctx->stc_pool[table_type];
struct mlx5hws_cmd_stc_modify_attr fixup_stc_attr = {0};
+ struct mlx5hws_pool *stc_pool = ctx->stc_pool;
bool use_fixup;
u32 obj_0_id;
int ret;
@@ -339,7 +341,7 @@ __must_hold(&ctx->ctrl_lock)
if (!mlx5hws_context_cap_dynamic_reparse(ctx))
stc_attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
- obj_0_id = mlx5hws_pool_chunk_get_base_id(stc_pool, stc);
+ obj_0_id = mlx5hws_pool_get_base_id(stc_pool);
/* According to table/action limitation change the stc_attr */
use_fixup = hws_action_fixup_stc_attr(ctx, stc_attr, &fixup_stc_attr, table_type, false);
@@ -355,7 +357,7 @@ __must_hold(&ctx->ctrl_lock)
if (table_type == MLX5HWS_TABLE_TYPE_FDB) {
u32 obj_1_id;
- obj_1_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, stc);
+ obj_1_id = mlx5hws_pool_get_base_mirror_id(stc_pool);
use_fixup = hws_action_fixup_stc_attr(ctx, stc_attr,
&fixup_stc_attr,
@@ -387,19 +389,19 @@ void mlx5hws_action_free_single_stc(struct mlx5hws_context *ctx,
struct mlx5hws_pool_chunk *stc)
__must_hold(&ctx->ctrl_lock)
{
- struct mlx5hws_pool *stc_pool = ctx->stc_pool[table_type];
struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_pool *stc_pool = ctx->stc_pool;
u32 obj_id;
/* Modify the STC not to point to an object */
stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
stc_attr.stc_offset = stc->offset;
- obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, stc);
+ obj_id = mlx5hws_pool_get_base_id(stc_pool);
mlx5hws_cmd_stc_modify(ctx->mdev, obj_id, &stc_attr);
if (table_type == MLX5HWS_TABLE_TYPE_FDB) {
- obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, stc);
+ obj_id = mlx5hws_pool_get_base_mirror_id(stc_pool);
mlx5hws_cmd_stc_modify(ctx->mdev, obj_id, &stc_attr);
}
@@ -473,6 +475,7 @@ static void hws_action_fill_stc_attr(struct mlx5hws_action *action,
break;
case MLX5HWS_ACTION_TYP_TBL:
case MLX5HWS_ACTION_TYP_DEST_ARRAY:
+ case MLX5HWS_ACTION_TYP_SAMPLER:
attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT;
attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
attr->dest_table_id = obj_id;
@@ -561,7 +564,7 @@ hws_action_create_stcs(struct mlx5hws_action *action, u32 obj_id)
if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB) {
ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr,
MLX5HWS_TABLE_TYPE_FDB,
- &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+ &action->stc);
if (ret)
goto out_err;
}
@@ -585,7 +588,7 @@ hws_action_destroy_stcs(struct mlx5hws_action *action)
if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB)
mlx5hws_action_free_single_stc(ctx, MLX5HWS_TABLE_TYPE_FDB,
- &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+ &action->stc);
mutex_unlock(&ctx->ctrl_lock);
}
@@ -1187,14 +1190,15 @@ hws_action_create_modify_header_hws(struct mlx5hws_action *action,
struct mlx5hws_action_mh_pattern *pattern,
u32 log_bulk_size)
{
+ u16 num_actions, max_mh_actions = 0, hw_max_actions;
struct mlx5hws_context *ctx = action->ctx;
- u16 num_actions, max_mh_actions = 0;
int i, ret, size_in_bytes;
u32 pat_id, arg_id = 0;
__be64 *new_pattern;
size_t pat_max_sz;
pat_max_sz = MLX5HWS_ARG_CHUNK_SIZE_MAX * MLX5HWS_ARG_DATA_SIZE;
+ hw_max_actions = pat_max_sz / MLX5HWS_MODIFY_ACTION_SIZE;
size_in_bytes = pat_max_sz * sizeof(__be64);
new_pattern = kcalloc(num_of_patterns, size_in_bytes, GFP_KERNEL);
if (!new_pattern)
@@ -1204,16 +1208,20 @@ hws_action_create_modify_header_hws(struct mlx5hws_action *action,
for (i = 0; i < num_of_patterns; i++) {
size_t new_num_actions;
size_t cur_num_actions;
- u32 nope_location;
+ u32 nop_locations;
cur_num_actions = pattern[i].sz / MLX5HWS_MODIFY_ACTION_SIZE;
- mlx5hws_pat_calc_nope(pattern[i].data, cur_num_actions,
- pat_max_sz / MLX5HWS_MODIFY_ACTION_SIZE,
- &new_num_actions, &nope_location,
- &new_pattern[i * pat_max_sz]);
+ ret = mlx5hws_pat_calc_nop(pattern[i].data, cur_num_actions,
+ hw_max_actions, &new_num_actions,
+ &nop_locations,
+ &new_pattern[i * pat_max_sz]);
+ if (ret) {
+ mlx5hws_err(ctx, "Too many actions after nop insertion\n");
+ goto free_new_pat;
+ }
- action[i].modify_header.nope_locations = nope_location;
+ action[i].modify_header.nop_locations = nop_locations;
action[i].modify_header.num_of_actions = new_num_actions;
max_mh_actions = max(max_mh_actions, new_num_actions);
@@ -1260,7 +1268,7 @@ hws_action_create_modify_header_hws(struct mlx5hws_action *action,
MLX5_GET(set_action_in, pattern[i].data, action_type);
} else {
/* Multiple modify actions require a pattern */
- if (unlikely(action[i].modify_header.nope_locations)) {
+ if (unlikely(action[i].modify_header.nop_locations)) {
size_t pattern_sz;
pattern_sz = action[i].modify_header.num_of_actions *
@@ -1350,11 +1358,8 @@ free_action:
}
struct mlx5hws_action *
-mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
- size_t num_dest,
+mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx, size_t num_dest,
struct mlx5hws_action_dest_attr *dests,
- bool ignore_flow_level,
- u32 flow_source,
u32 flags)
{
struct mlx5hws_cmd_set_fte_dest *dest_list = NULL;
@@ -1362,8 +1367,8 @@ mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
struct mlx5hws_cmd_forward_tbl *fw_island;
struct mlx5hws_action *action;
- u32 i /*, packet_reformat_id*/;
- int ret;
+ int ret, last_dest_idx = -1;
+ u32 i;
if (num_dest <= 1) {
mlx5hws_err(ctx, "Action must have multiple dests\n");
@@ -1392,12 +1397,9 @@ mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest_list[i].destination_id = dests[i].dest->dest_obj.obj_id;
fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- fte_attr.ignore_flow_level = ignore_flow_level;
- /* ToDo: In SW steering we have a handling of 'go to WIRE'
- * destination here by upper layer setting 'is_wire_ft' flag
- * if the destination is wire.
- * This is because uplink should be last dest in the list.
- */
+ fte_attr.ignore_flow_level = 1;
+ if (dests[i].is_wire_ft)
+ last_dest_idx = i;
break;
case MLX5HWS_ACTION_TYP_VPORT:
dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
@@ -1421,6 +1423,9 @@ mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
}
}
+ if (last_dest_idx != -1)
+ swap(dest_list[last_dest_idx], dest_list[num_dest - 1]);
+
fte_attr.dests_num = num_dest;
fte_attr.dests = dest_list;
@@ -1576,17 +1581,15 @@ hws_action_create_dest_match_range_definer(struct mlx5hws_context *ctx)
return definer;
}
-static struct mlx5hws_matcher_action_ste *
+static struct mlx5hws_range_action_table *
hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
struct mlx5hws_definer *definer,
u32 miss_ft_id)
{
struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0};
- struct mlx5hws_action_default_stc *default_stc;
- struct mlx5hws_matcher_action_ste *table_ste;
+ struct mlx5hws_range_action_table *table_ste;
struct mlx5hws_pool_attr pool_attr = {0};
struct mlx5hws_pool *ste_pool, *stc_pool;
- struct mlx5hws_pool_chunk *ste;
u32 *rtc_0_id, *rtc_1_id;
u32 obj_id;
int ret;
@@ -1605,7 +1608,6 @@ hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB;
pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
- pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL;
pool_attr.alloc_log_sz = 1;
table_ste->pool = mlx5hws_pool_create(ctx, &pool_attr);
if (!table_ste->pool) {
@@ -1617,8 +1619,6 @@ hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
rtc_0_id = &table_ste->rtc_0_id;
rtc_1_id = &table_ste->rtc_1_id;
ste_pool = table_ste->pool;
- ste = &table_ste->ste;
- ste->order = 1;
rtc_attr.log_size = 0;
rtc_attr.log_depth = 0;
@@ -1630,18 +1630,16 @@ hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
rtc_attr.fw_gen_wqe = true;
rtc_attr.is_scnd_range = true;
- obj_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+ obj_id = mlx5hws_pool_get_base_id(ste_pool);
rtc_attr.pd = ctx->pd_num;
rtc_attr.ste_base = obj_id;
- rtc_attr.ste_offset = ste->offset;
rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, false);
/* STC is a single resource (obj_id), use any STC for the ID */
- stc_pool = ctx->stc_pool[MLX5HWS_TABLE_TYPE_FDB];
- default_stc = ctx->common_res[MLX5HWS_TABLE_TYPE_FDB].default_stc;
- obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit);
+ stc_pool = ctx->stc_pool;
+ obj_id = mlx5hws_pool_get_base_id(stc_pool);
rtc_attr.stc_base = obj_id;
ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_0_id);
@@ -1651,11 +1649,11 @@ hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
}
/* Create mirror RTC */
- obj_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+ obj_id = mlx5hws_pool_get_base_mirror_id(ste_pool);
rtc_attr.ste_base = obj_id;
rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, true);
- obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, &default_stc->default_hit);
+ obj_id = mlx5hws_pool_get_base_mirror_id(stc_pool);
rtc_attr.stc_base = obj_id;
ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_1_id);
@@ -1678,9 +1676,9 @@ free_ste:
return NULL;
}
-static void
-hws_action_destroy_dest_match_range_table(struct mlx5hws_context *ctx,
- struct mlx5hws_matcher_action_ste *table_ste)
+static void hws_action_destroy_dest_match_range_table(
+ struct mlx5hws_context *ctx,
+ struct mlx5hws_range_action_table *table_ste)
{
mutex_lock(&ctx->ctrl_lock);
@@ -1692,12 +1690,11 @@ hws_action_destroy_dest_match_range_table(struct mlx5hws_context *ctx,
mutex_unlock(&ctx->ctrl_lock);
}
-static int
-hws_action_create_dest_match_range_fill_table(struct mlx5hws_context *ctx,
- struct mlx5hws_matcher_action_ste *table_ste,
- struct mlx5hws_action *hit_ft_action,
- struct mlx5hws_definer *range_definer,
- u32 min, u32 max)
+static int hws_action_create_dest_match_range_fill_table(
+ struct mlx5hws_context *ctx,
+ struct mlx5hws_range_action_table *table_ste,
+ struct mlx5hws_action *hit_ft_action,
+ struct mlx5hws_definer *range_definer, u32 min, u32 max)
{
struct mlx5hws_wqe_gta_data_seg_ste match_wqe_data = {0};
struct mlx5hws_wqe_gta_data_seg_ste range_wqe_data = {0};
@@ -1731,7 +1728,7 @@ hws_action_create_dest_match_range_fill_table(struct mlx5hws_context *ctx,
ste_attr.used_id_rtc_0 = &used_rtc_0_id;
ste_attr.used_id_rtc_1 = &used_rtc_1_id;
- common_res = &ctx->common_res[MLX5HWS_TABLE_TYPE_FDB];
+ common_res = &ctx->common_res;
/* init an empty match STE which will always hit */
ste_attr.wqe_ctrl = &wqe_ctrl;
@@ -1750,7 +1747,7 @@ hws_action_create_dest_match_range_fill_table(struct mlx5hws_context *ctx,
wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] |=
htonl(MLX5HWS_ACTION_STC_IDX_LAST_COMBO2 << 29);
wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] =
- htonl(hit_ft_action->stc[MLX5HWS_TABLE_TYPE_FDB].offset);
+ htonl(hit_ft_action->stc.offset);
wqe_data_arr = (__force __be32 *)&range_wqe_data;
@@ -1793,7 +1790,7 @@ mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx,
u32 min, u32 max, u32 flags)
{
struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
- struct mlx5hws_matcher_action_ste *table_ste;
+ struct mlx5hws_range_action_table *table_ste;
struct mlx5hws_action *hit_ft_action;
struct mlx5hws_definer *definer;
struct mlx5hws_action *action;
@@ -1838,12 +1835,11 @@ mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx,
stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE;
stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
- stc_attr.ste_table.ste = table_ste->ste;
stc_attr.ste_table.ste_pool = table_ste->pool;
stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, MLX5HWS_TABLE_TYPE_FDB,
- &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+ &action->stc);
if (ret)
goto error_unlock;
@@ -1875,7 +1871,50 @@ struct mlx5hws_action *
mlx5hws_action_create_flow_sampler(struct mlx5hws_context *ctx,
u32 sampler_id, u32 flags)
{
- mlx5hws_err(ctx, "Flow sampler action - unsupported\n");
+ struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
+ struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
+ struct mlx5hws_cmd_forward_tbl *fw_island;
+ struct mlx5hws_cmd_set_fte_dest dest;
+ struct mlx5hws_action *action;
+ int ret;
+
+ if (flags != (MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED)) {
+ mlx5hws_err(ctx, "Unsupported flags for flow sampler\n");
+ return NULL;
+ }
+
+ ft_attr.type = FS_FT_FDB;
+ ft_attr.level = ctx->caps->fdb_ft.max_level - 1;
+
+ dest.destination_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
+ dest.destination_id = sampler_id;
+
+ fte_attr.dests_num = 1;
+ fte_attr.dests = &dest;
+ fte_attr.action_flags = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ fte_attr.ignore_flow_level = 1;
+
+ fw_island = mlx5hws_cmd_forward_tbl_create(ctx->mdev, &ft_attr, &fte_attr);
+ if (!fw_island)
+ return NULL;
+
+ action = hws_action_create_generic(ctx, flags,
+ MLX5HWS_ACTION_TYP_SAMPLER);
+ if (!action)
+ goto destroy_fw_island;
+
+ ret = hws_action_create_stcs(action, fw_island->ft_id);
+ if (ret)
+ goto free_action;
+
+ action->flow_sampler.fw_island = fw_island;
+
+ return action;
+
+free_action:
+ kfree(action);
+destroy_fw_island:
+ mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, fw_island);
return NULL;
}
@@ -1914,6 +1953,11 @@ static void hws_action_destroy_hws(struct mlx5hws_action *action)
}
kfree(action->dest_array.dest_list);
break;
+ case MLX5HWS_ACTION_TYP_SAMPLER:
+ hws_action_destroy_stcs(action);
+ mlx5hws_cmd_forward_tbl_destroy(action->ctx->mdev,
+ action->flow_sampler.fw_island);
+ break;
case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
case MLX5HWS_ACTION_TYP_MODIFY_HDR:
shared_arg = false;
@@ -1970,8 +2014,8 @@ __must_hold(&ctx->ctrl_lock)
struct mlx5hws_action_default_stc *default_stc;
int ret;
- if (ctx->common_res[tbl_type].default_stc) {
- ctx->common_res[tbl_type].default_stc->refcount++;
+ if (ctx->common_res.default_stc) {
+ ctx->common_res.default_stc->refcount++;
return 0;
}
@@ -2023,8 +2067,8 @@ __must_hold(&ctx->ctrl_lock)
goto free_nop_dw7;
}
- ctx->common_res[tbl_type].default_stc = default_stc;
- ctx->common_res[tbl_type].default_stc->refcount++;
+ ctx->common_res.default_stc = default_stc;
+ ctx->common_res.default_stc->refcount++;
return 0;
@@ -2046,9 +2090,7 @@ __must_hold(&ctx->ctrl_lock)
{
struct mlx5hws_action_default_stc *default_stc;
- default_stc = ctx->common_res[tbl_type].default_stc;
-
- default_stc = ctx->common_res[tbl_type].default_stc;
+ default_stc = ctx->common_res.default_stc;
if (--default_stc->refcount)
return;
@@ -2058,28 +2100,30 @@ __must_hold(&ctx->ctrl_lock)
mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5);
mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr);
kfree(default_stc);
- ctx->common_res[tbl_type].default_stc = NULL;
+ ctx->common_res.default_stc = NULL;
}
static void hws_action_modify_write(struct mlx5hws_send_engine *queue,
u32 arg_idx,
u8 *arg_data,
u16 num_of_actions,
- u32 nope_locations)
+ u32 nop_locations)
{
u8 *new_arg_data = NULL;
int i, j;
- if (unlikely(nope_locations)) {
+ if (unlikely(nop_locations)) {
new_arg_data = kcalloc(num_of_actions,
MLX5HWS_MODIFY_ACTION_SIZE, GFP_KERNEL);
if (unlikely(!new_arg_data))
return;
- for (i = 0, j = 0; i < num_of_actions; i++, j++) {
- memcpy(&new_arg_data[j], arg_data, MLX5HWS_MODIFY_ACTION_SIZE);
- if (BIT(i) & nope_locations)
+ for (i = 0, j = 0; j < num_of_actions; i++, j++) {
+ if (BIT(i) & nop_locations)
j++;
+ memcpy(&new_arg_data[j * MLX5HWS_MODIFY_ACTION_SIZE],
+ &arg_data[i * MLX5HWS_MODIFY_ACTION_SIZE],
+ MLX5HWS_MODIFY_ACTION_SIZE);
}
}
@@ -2150,8 +2194,7 @@ hws_action_apply_stc(struct mlx5hws_actions_apply_data *apply,
{
struct mlx5hws_action *action = apply->rule_action[action_idx].action;
- apply->wqe_ctrl->stc_ix[stc_idx] =
- htonl(action->stc[apply->tbl_type].offset);
+ apply->wqe_ctrl->stc_ix[stc_idx] = htonl(action->stc.offset);
}
static void
@@ -2176,12 +2219,13 @@ hws_action_setter_modify_header(struct mlx5hws_actions_apply_data *apply,
struct mlx5hws_action *action;
u32 arg_sz, arg_idx;
u8 *single_action;
+ u8 max_actions;
__be32 stc_idx;
rule_action = &apply->rule_action[setter->idx_double];
action = rule_action->action;
- stc_idx = htonl(action->stc[apply->tbl_type].offset);
+ stc_idx = htonl(action->stc.offset);
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
@@ -2203,21 +2247,23 @@ hws_action_setter_modify_header(struct mlx5hws_actions_apply_data *apply,
apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] =
*(__be32 *)MLX5_ADDR_OF(set_action_in, single_action, data);
- } else {
- /* Argument offset multiple with number of args per these actions */
- arg_sz = mlx5hws_arg_get_arg_size(action->modify_header.max_num_of_actions);
- arg_idx = rule_action->modify_header.offset * arg_sz;
-
- apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
-
- if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
- apply->require_dep = 1;
- hws_action_modify_write(apply->queue,
- action->modify_header.arg_id + arg_idx,
- rule_action->modify_header.data,
- action->modify_header.num_of_actions,
- action->modify_header.nope_locations);
- }
+ return;
+ }
+
+ /* Argument offset multiple with number of args per these actions */
+ max_actions = action->modify_header.max_num_of_actions;
+ arg_sz = mlx5hws_arg_get_arg_size(max_actions);
+ arg_idx = rule_action->modify_header.offset * arg_sz;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
+
+ if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
+ apply->require_dep = 1;
+ hws_action_modify_write(apply->queue,
+ action->modify_header.arg_id + arg_idx,
+ rule_action->modify_header.data,
+ action->modify_header.num_of_actions,
+ action->modify_header.nop_locations);
}
}
@@ -2240,7 +2286,7 @@ hws_action_setter_insert_ptr(struct mlx5hws_actions_apply_data *apply,
apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
- stc_idx = htonl(action->stc[apply->tbl_type].offset);
+ stc_idx = htonl(action->stc.offset);
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
@@ -2272,7 +2318,7 @@ hws_action_setter_tnl_l3_to_l2(struct mlx5hws_actions_apply_data *apply,
apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
- stc_idx = htonl(action->stc[apply->tbl_type].offset);
+ stc_idx = htonl(action->stc.offset);
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
@@ -2434,6 +2480,7 @@ int mlx5hws_action_template_process(struct mlx5hws_action_template *at)
case MLX5HWS_ACTION_TYP_DROP:
case MLX5HWS_ACTION_TYP_TBL:
case MLX5HWS_ACTION_TYP_DEST_ARRAY:
+ case MLX5HWS_ACTION_TYP_SAMPLER:
case MLX5HWS_ACTION_TYP_VPORT:
case MLX5HWS_ACTION_TYP_MISS:
/* Hit action */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h
index bf5c1b241006..55a079fdd08f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#ifndef MLX5HWS_ACTION_H_
-#define MLX5HWS_ACTION_H_
+#ifndef HWS_ACTION_H_
+#define HWS_ACTION_H_
/* Max number of STEs needed for a rule (including match) */
#define MLX5HWS_ACTION_MAX_STE 20
@@ -70,12 +70,12 @@ struct mlx5hws_action_default_stc {
struct mlx5hws_pool_chunk nop_dw6;
struct mlx5hws_pool_chunk nop_dw7;
struct mlx5hws_pool_chunk default_hit;
- u32 refcount;
+ u32 refcount; /* protected by context ctrl lock */
};
struct mlx5hws_action_shared_stc {
struct mlx5hws_pool_chunk stc_chunk;
- u32 refcount;
+ u32 refcount; /* protected by context ctrl lock */
};
struct mlx5hws_actions_apply_data {
@@ -118,19 +118,25 @@ struct mlx5hws_action_template {
u8 only_term;
};
+struct mlx5hws_range_action_table {
+ struct mlx5hws_pool *pool;
+ u32 rtc_0_id;
+ u32 rtc_1_id;
+};
+
struct mlx5hws_action {
u8 type;
u8 flags;
struct mlx5hws_context *ctx;
union {
struct {
- struct mlx5hws_pool_chunk stc[MLX5HWS_TABLE_TYPE_MAX];
+ struct mlx5hws_pool_chunk stc;
union {
struct {
u32 pat_id;
u32 arg_id;
__be64 single_action;
- u32 nope_locations;
+ u32 nop_locations;
u8 num_of_patterns;
u8 single_action_type;
u8 num_of_actions;
@@ -166,6 +172,9 @@ struct mlx5hws_action {
struct mlx5hws_cmd_set_fte_dest *dest_list;
} dest_array;
struct {
+ struct mlx5hws_cmd_forward_tbl *fw_island;
+ } flow_sampler;
+ struct {
u8 type;
u8 start_anchor;
u8 end_anchor;
@@ -183,7 +192,7 @@ struct mlx5hws_action {
size_t size;
} remove_header;
struct {
- struct mlx5hws_matcher_action_ste *table_ste;
+ struct mlx5hws_range_action_table *table_ste;
struct mlx5hws_action *hit_ft_action;
struct mlx5hws_definer *definer;
} range;
@@ -304,4 +313,4 @@ mlx5hws_action_apply_setter(struct mlx5hws_actions_apply_data *apply,
htonl(num_of_actions << 29);
}
-#endif /* MLX5HWS_ACTION_H_ */
+#endif /* HWS_ACTION_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c
new file mode 100644
index 000000000000..5766a9c82f96
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c
@@ -0,0 +1,467 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+
+static const char *
+hws_pool_opt_to_str(enum mlx5hws_pool_optimize opt)
+{
+ switch (opt) {
+ case MLX5HWS_POOL_OPTIMIZE_NONE:
+ return "rx-and-tx";
+ case MLX5HWS_POOL_OPTIMIZE_ORIG:
+ return "rx-only";
+ case MLX5HWS_POOL_OPTIMIZE_MIRROR:
+ return "tx-only";
+ default:
+ return "unknown";
+ }
+}
+
+static int
+hws_action_ste_table_create_pool(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_table *action_tbl,
+ enum mlx5hws_pool_optimize opt, size_t log_sz)
+{
+ struct mlx5hws_pool_attr pool_attr = { 0 };
+
+ pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
+ pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB;
+ pool_attr.flags = MLX5HWS_POOL_FLAG_BUDDY;
+ pool_attr.opt_type = opt;
+ pool_attr.alloc_log_sz = log_sz;
+
+ action_tbl->pool = mlx5hws_pool_create(ctx, &pool_attr);
+ if (!action_tbl->pool) {
+ mlx5hws_err(ctx, "Failed to allocate STE pool\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hws_action_ste_table_create_single_rtc(
+ struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_table *action_tbl,
+ enum mlx5hws_pool_optimize opt, size_t log_sz, bool tx)
+{
+ struct mlx5hws_cmd_rtc_create_attr rtc_attr = { 0 };
+ u32 *rtc_id;
+
+ rtc_attr.log_depth = 0;
+ rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
+ /* Action STEs use the default always hit definer. */
+ rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer;
+ rtc_attr.is_frst_jumbo = false;
+ rtc_attr.miss_ft_id = 0;
+ rtc_attr.pd = ctx->pd_num;
+ rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
+
+ if (tx) {
+ rtc_attr.table_type = FS_FT_FDB_TX;
+ rtc_attr.ste_base =
+ mlx5hws_pool_get_base_mirror_id(action_tbl->pool);
+ rtc_attr.stc_base =
+ mlx5hws_pool_get_base_mirror_id(ctx->stc_pool);
+ rtc_attr.log_size =
+ opt == MLX5HWS_POOL_OPTIMIZE_ORIG ? 0 : log_sz;
+ rtc_id = &action_tbl->rtc_1_id;
+ } else {
+ rtc_attr.table_type = FS_FT_FDB_RX;
+ rtc_attr.ste_base = mlx5hws_pool_get_base_id(action_tbl->pool);
+ rtc_attr.stc_base = mlx5hws_pool_get_base_id(ctx->stc_pool);
+ rtc_attr.log_size =
+ opt == MLX5HWS_POOL_OPTIMIZE_MIRROR ? 0 : log_sz;
+ rtc_id = &action_tbl->rtc_0_id;
+ }
+
+ return mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_id);
+}
+
+static int
+hws_action_ste_table_create_rtcs(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_table *action_tbl,
+ enum mlx5hws_pool_optimize opt, size_t log_sz)
+{
+ int err;
+
+ err = hws_action_ste_table_create_single_rtc(ctx, action_tbl, opt,
+ log_sz, false);
+ if (err)
+ return err;
+
+ err = hws_action_ste_table_create_single_rtc(ctx, action_tbl, opt,
+ log_sz, true);
+ if (err) {
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, action_tbl->rtc_0_id);
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+hws_action_ste_table_destroy_rtcs(struct mlx5hws_action_ste_table *action_tbl)
+{
+ mlx5hws_cmd_rtc_destroy(action_tbl->pool->ctx->mdev,
+ action_tbl->rtc_1_id);
+ mlx5hws_cmd_rtc_destroy(action_tbl->pool->ctx->mdev,
+ action_tbl->rtc_0_id);
+}
+
+static int
+hws_action_ste_table_create_stc(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_table *action_tbl)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = { 0 };
+
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ stc_attr.ste_table.ste_pool = action_tbl->pool;
+ stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
+
+ return mlx5hws_action_alloc_single_stc(ctx, &stc_attr,
+ MLX5HWS_TABLE_TYPE_FDB,
+ &action_tbl->stc);
+}
+
+static struct mlx5hws_action_ste_table *
+hws_action_ste_table_alloc(struct mlx5hws_action_ste_pool_element *parent_elem)
+{
+ enum mlx5hws_pool_optimize opt = parent_elem->opt;
+ struct mlx5hws_context *ctx = parent_elem->ctx;
+ struct mlx5hws_action_ste_table *action_tbl;
+ size_t log_sz;
+ int err;
+
+ log_sz = min(parent_elem->log_sz ?
+ parent_elem->log_sz +
+ MLX5HWS_ACTION_STE_TABLE_STEP_LOG_SZ :
+ MLX5HWS_ACTION_STE_TABLE_INIT_LOG_SZ,
+ MLX5HWS_ACTION_STE_TABLE_MAX_LOG_SZ);
+
+ action_tbl = kzalloc(sizeof(*action_tbl), GFP_KERNEL);
+ if (!action_tbl)
+ return ERR_PTR(-ENOMEM);
+
+ err = hws_action_ste_table_create_pool(ctx, action_tbl, opt, log_sz);
+ if (err)
+ goto free_tbl;
+
+ err = hws_action_ste_table_create_rtcs(ctx, action_tbl, opt, log_sz);
+ if (err)
+ goto destroy_pool;
+
+ err = hws_action_ste_table_create_stc(ctx, action_tbl);
+ if (err)
+ goto destroy_rtcs;
+
+ action_tbl->parent_elem = parent_elem;
+ INIT_LIST_HEAD(&action_tbl->list_node);
+ action_tbl->last_used = jiffies;
+ list_add(&action_tbl->list_node, &parent_elem->available);
+ parent_elem->log_sz = log_sz;
+
+ mlx5hws_dbg(ctx,
+ "Allocated %s action STE table log_sz %zu; STEs (%d, %d); RTCs (%d, %d); STC %d\n",
+ hws_pool_opt_to_str(opt), log_sz,
+ mlx5hws_pool_get_base_id(action_tbl->pool),
+ mlx5hws_pool_get_base_mirror_id(action_tbl->pool),
+ action_tbl->rtc_0_id, action_tbl->rtc_1_id,
+ action_tbl->stc.offset);
+
+ return action_tbl;
+
+destroy_rtcs:
+ hws_action_ste_table_destroy_rtcs(action_tbl);
+destroy_pool:
+ mlx5hws_pool_destroy(action_tbl->pool);
+free_tbl:
+ kfree(action_tbl);
+
+ return ERR_PTR(err);
+}
+
+static void
+hws_action_ste_table_destroy(struct mlx5hws_action_ste_table *action_tbl)
+{
+ struct mlx5hws_context *ctx = action_tbl->parent_elem->ctx;
+
+ mlx5hws_dbg(ctx,
+ "Destroying %s action STE table: STEs (%d, %d); RTCs (%d, %d); STC %d\n",
+ hws_pool_opt_to_str(action_tbl->parent_elem->opt),
+ mlx5hws_pool_get_base_id(action_tbl->pool),
+ mlx5hws_pool_get_base_mirror_id(action_tbl->pool),
+ action_tbl->rtc_0_id, action_tbl->rtc_1_id,
+ action_tbl->stc.offset);
+
+ mlx5hws_action_free_single_stc(ctx, MLX5HWS_TABLE_TYPE_FDB,
+ &action_tbl->stc);
+ hws_action_ste_table_destroy_rtcs(action_tbl);
+ mlx5hws_pool_destroy(action_tbl->pool);
+
+ list_del(&action_tbl->list_node);
+ kfree(action_tbl);
+}
+
+static int
+hws_action_ste_pool_element_init(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_pool_element *elem,
+ enum mlx5hws_pool_optimize opt)
+{
+ elem->ctx = ctx;
+ elem->opt = opt;
+ INIT_LIST_HEAD(&elem->available);
+ INIT_LIST_HEAD(&elem->full);
+
+ return 0;
+}
+
+static void hws_action_ste_pool_element_destroy(
+ struct mlx5hws_action_ste_pool_element *elem)
+{
+ struct mlx5hws_action_ste_table *action_tbl, *p;
+
+ /* This should be empty, but attempt to free its elements anyway. */
+ list_for_each_entry_safe(action_tbl, p, &elem->full, list_node)
+ hws_action_ste_table_destroy(action_tbl);
+
+ list_for_each_entry_safe(action_tbl, p, &elem->available, list_node)
+ hws_action_ste_table_destroy(action_tbl);
+}
+
+static int hws_action_ste_pool_init(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_pool *pool)
+{
+ enum mlx5hws_pool_optimize opt;
+ int err;
+
+ mutex_init(&pool->lock);
+
+ /* Rules which are added for both RX and TX must use the same action STE
+ * indices for both. If we were to use a single table, then RX-only and
+ * TX-only rules would waste the unused entries. Thus, we use separate
+ * table sets for the three cases.
+ */
+ for (opt = MLX5HWS_POOL_OPTIMIZE_NONE; opt < MLX5HWS_POOL_OPTIMIZE_MAX;
+ opt++) {
+ err = hws_action_ste_pool_element_init(ctx, &pool->elems[opt],
+ opt);
+ if (err)
+ goto destroy_elems;
+ pool->elems[opt].parent_pool = pool;
+ }
+
+ return 0;
+
+destroy_elems:
+ while (opt-- > MLX5HWS_POOL_OPTIMIZE_NONE)
+ hws_action_ste_pool_element_destroy(&pool->elems[opt]);
+
+ return err;
+}
+
+static void hws_action_ste_pool_destroy(struct mlx5hws_action_ste_pool *pool)
+{
+ int opt;
+
+ for (opt = MLX5HWS_POOL_OPTIMIZE_MAX - 1;
+ opt >= MLX5HWS_POOL_OPTIMIZE_NONE; opt--)
+ hws_action_ste_pool_element_destroy(&pool->elems[opt]);
+}
+
+static void hws_action_ste_pool_element_collect_stale(
+ struct mlx5hws_action_ste_pool_element *elem, struct list_head *cleanup)
+{
+ struct mlx5hws_action_ste_table *action_tbl, *p;
+ unsigned long expire_time, now;
+
+ expire_time = secs_to_jiffies(MLX5HWS_ACTION_STE_POOL_EXPIRE_SECONDS);
+ now = jiffies;
+
+ list_for_each_entry_safe(action_tbl, p, &elem->available, list_node) {
+ if (mlx5hws_pool_full(action_tbl->pool) &&
+ time_before(action_tbl->last_used + expire_time, now))
+ list_move(&action_tbl->list_node, cleanup);
+ }
+}
+
+static void hws_action_ste_table_cleanup_list(struct list_head *cleanup)
+{
+ struct mlx5hws_action_ste_table *action_tbl, *p;
+
+ list_for_each_entry_safe(action_tbl, p, cleanup, list_node)
+ hws_action_ste_table_destroy(action_tbl);
+}
+
+static void hws_action_ste_pool_cleanup(struct work_struct *work)
+{
+ enum mlx5hws_pool_optimize opt;
+ struct mlx5hws_context *ctx;
+ LIST_HEAD(cleanup);
+ int i;
+
+ ctx = container_of(work, struct mlx5hws_context,
+ action_ste_cleanup.work);
+
+ for (i = 0; i < ctx->queues; i++) {
+ struct mlx5hws_action_ste_pool *p = &ctx->action_ste_pool[i];
+
+ mutex_lock(&p->lock);
+ for (opt = MLX5HWS_POOL_OPTIMIZE_NONE;
+ opt < MLX5HWS_POOL_OPTIMIZE_MAX; opt++)
+ hws_action_ste_pool_element_collect_stale(
+ &p->elems[opt], &cleanup);
+ mutex_unlock(&p->lock);
+ }
+
+ hws_action_ste_table_cleanup_list(&cleanup);
+
+ schedule_delayed_work(&ctx->action_ste_cleanup,
+ secs_to_jiffies(
+ MLX5HWS_ACTION_STE_POOL_CLEANUP_SECONDS));
+}
+
+int mlx5hws_action_ste_pool_init(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_action_ste_pool *pool;
+ size_t queues = ctx->queues;
+ int i, err;
+
+ pool = kcalloc(queues, sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return -ENOMEM;
+
+ for (i = 0; i < queues; i++) {
+ err = hws_action_ste_pool_init(ctx, &pool[i]);
+ if (err)
+ goto free_pool;
+ }
+
+ ctx->action_ste_pool = pool;
+
+ INIT_DELAYED_WORK(&ctx->action_ste_cleanup,
+ hws_action_ste_pool_cleanup);
+ schedule_delayed_work(
+ &ctx->action_ste_cleanup,
+ secs_to_jiffies(MLX5HWS_ACTION_STE_POOL_CLEANUP_SECONDS));
+
+ return 0;
+
+free_pool:
+ while (i--)
+ hws_action_ste_pool_destroy(&pool[i]);
+ kfree(pool);
+
+ return err;
+}
+
+void mlx5hws_action_ste_pool_uninit(struct mlx5hws_context *ctx)
+{
+ size_t queues = ctx->queues;
+ int i;
+
+ cancel_delayed_work_sync(&ctx->action_ste_cleanup);
+
+ for (i = 0; i < queues; i++)
+ hws_action_ste_pool_destroy(&ctx->action_ste_pool[i]);
+
+ kfree(ctx->action_ste_pool);
+}
+
+static struct mlx5hws_action_ste_pool_element *
+hws_action_ste_choose_elem(struct mlx5hws_action_ste_pool *pool,
+ bool skip_rx, bool skip_tx)
+{
+ if (skip_rx)
+ return &pool->elems[MLX5HWS_POOL_OPTIMIZE_MIRROR];
+
+ if (skip_tx)
+ return &pool->elems[MLX5HWS_POOL_OPTIMIZE_ORIG];
+
+ return &pool->elems[MLX5HWS_POOL_OPTIMIZE_NONE];
+}
+
+static int
+hws_action_ste_table_chunk_alloc(struct mlx5hws_action_ste_table *action_tbl,
+ struct mlx5hws_action_ste_chunk *chunk)
+{
+ int err;
+
+ err = mlx5hws_pool_chunk_alloc(action_tbl->pool, &chunk->ste);
+ if (err)
+ return err;
+
+ chunk->action_tbl = action_tbl;
+ action_tbl->last_used = jiffies;
+
+ return 0;
+}
+
+int mlx5hws_action_ste_chunk_alloc(struct mlx5hws_action_ste_pool *pool,
+ bool skip_rx, bool skip_tx,
+ struct mlx5hws_action_ste_chunk *chunk)
+{
+ struct mlx5hws_action_ste_pool_element *elem;
+ struct mlx5hws_action_ste_table *action_tbl;
+ bool found;
+ int err;
+
+ if (skip_rx && skip_tx)
+ return -EINVAL;
+
+ mutex_lock(&pool->lock);
+
+ elem = hws_action_ste_choose_elem(pool, skip_rx, skip_tx);
+
+ mlx5hws_dbg(elem->ctx,
+ "Allocating action STEs skip_rx %d skip_tx %d order %d\n",
+ skip_rx, skip_tx, chunk->ste.order);
+
+ found = false;
+ list_for_each_entry(action_tbl, &elem->available, list_node) {
+ if (!hws_action_ste_table_chunk_alloc(action_tbl, chunk)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ action_tbl = hws_action_ste_table_alloc(elem);
+ if (IS_ERR(action_tbl)) {
+ err = PTR_ERR(action_tbl);
+ goto out;
+ }
+
+ err = hws_action_ste_table_chunk_alloc(action_tbl, chunk);
+ if (err)
+ goto out;
+ }
+
+ if (mlx5hws_pool_empty(action_tbl->pool))
+ list_move(&action_tbl->list_node, &elem->full);
+
+ err = 0;
+
+out:
+ mutex_unlock(&pool->lock);
+
+ return err;
+}
+
+void mlx5hws_action_ste_chunk_free(struct mlx5hws_action_ste_chunk *chunk)
+{
+ struct mutex *lock = &chunk->action_tbl->parent_elem->parent_pool->lock;
+
+ mlx5hws_dbg(chunk->action_tbl->pool->ctx,
+ "Freeing action STEs offset %d order %d\n",
+ chunk->ste.offset, chunk->ste.order);
+
+ mutex_lock(lock);
+ mlx5hws_pool_chunk_free(chunk->action_tbl->pool, &chunk->ste);
+ chunk->action_tbl->last_used = jiffies;
+ list_move(&chunk->action_tbl->list_node,
+ &chunk->action_tbl->parent_elem->available);
+ mutex_unlock(lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.h
new file mode 100644
index 000000000000..a8ba97359e31
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#ifndef ACTION_STE_POOL_H_
+#define ACTION_STE_POOL_H_
+
+#define MLX5HWS_ACTION_STE_TABLE_INIT_LOG_SZ 10
+#define MLX5HWS_ACTION_STE_TABLE_STEP_LOG_SZ 1
+#define MLX5HWS_ACTION_STE_TABLE_MAX_LOG_SZ 20
+
+#define MLX5HWS_ACTION_STE_POOL_CLEANUP_SECONDS 300
+#define MLX5HWS_ACTION_STE_POOL_EXPIRE_SECONDS 300
+
+struct mlx5hws_action_ste_pool_element;
+
+struct mlx5hws_action_ste_table {
+ struct mlx5hws_action_ste_pool_element *parent_elem;
+ /* Wraps the RTC and STE range for this given action. */
+ struct mlx5hws_pool *pool;
+ /* Match STEs use this STC to jump to this pool's RTC. */
+ struct mlx5hws_pool_chunk stc;
+ u32 rtc_0_id;
+ u32 rtc_1_id;
+ struct list_head list_node;
+ unsigned long last_used;
+};
+
+struct mlx5hws_action_ste_pool_element {
+ struct mlx5hws_context *ctx;
+ struct mlx5hws_action_ste_pool *parent_pool;
+ size_t log_sz; /* Size of the largest table so far. */
+ enum mlx5hws_pool_optimize opt;
+ struct list_head available;
+ struct list_head full;
+};
+
+/* Central repository of action STEs. The context contains one of these pools
+ * per queue.
+ */
+struct mlx5hws_action_ste_pool {
+ /* Protects the entire pool. We have one pool per queue and only one
+ * operation can be active per rule at a given time. Thus this lock
+ * protects solely against concurrent garbage collection and we expect
+ * very little contention.
+ */
+ struct mutex lock;
+ struct mlx5hws_action_ste_pool_element elems[MLX5HWS_POOL_OPTIMIZE_MAX];
+};
+
+/* A chunk of STEs and the table it was allocated from. Used by rules. */
+struct mlx5hws_action_ste_chunk {
+ struct mlx5hws_action_ste_table *action_tbl;
+ struct mlx5hws_pool_chunk ste;
+};
+
+int mlx5hws_action_ste_pool_init(struct mlx5hws_context *ctx);
+
+void mlx5hws_action_ste_pool_uninit(struct mlx5hws_context *ctx);
+
+/* Callers are expected to fill chunk->ste.order. On success, this function
+ * populates chunk->tbl and chunk->ste.offset.
+ */
+int mlx5hws_action_ste_chunk_alloc(struct mlx5hws_action_ste_pool *pool,
+ bool skip_rx, bool skip_tx,
+ struct mlx5hws_action_ste_chunk *chunk);
+
+void mlx5hws_action_ste_chunk_free(struct mlx5hws_action_ste_chunk *chunk);
+
+#endif /* ACTION_STE_POOL_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.c
index e6ed66202a40..b9aef80ba094 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#include "mlx5hws_internal.h"
-#include "mlx5hws_buddy.h"
+#include "internal.h"
+#include "buddy.h"
static int hws_buddy_init(struct mlx5hws_buddy_mem *buddy, u32 max_order)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.h
index 338c44bbedaf..ef6b223677aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#ifndef MLX5HWS_BUDDY_H_
-#define MLX5HWS_BUDDY_H_
+#ifndef HWS_BUDDY_H_
+#define HWS_BUDDY_H_
struct mlx5hws_buddy_mem {
unsigned long **bitmap;
@@ -18,4 +18,4 @@ int mlx5hws_buddy_alloc_mem(struct mlx5hws_buddy_mem *buddy, u32 order);
void mlx5hws_buddy_free_mem(struct mlx5hws_buddy_mem *buddy, u32 seg, u32 order);
-#endif /* MLX5HWS_BUDDY_H_ */
+#endif /* HWS_BUDDY_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
index 8f3a6f9d703d..6ef0c4be27e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#include "mlx5hws_internal.h"
+#include "internal.h"
static u16 hws_bwc_gen_queue_idx(struct mlx5hws_context *ctx)
{
@@ -46,9 +46,10 @@ static void hws_bwc_unlock_all_queues(struct mlx5hws_context *ctx)
}
}
-static void hws_bwc_matcher_init_attr(struct mlx5hws_matcher_attr *attr,
+static void hws_bwc_matcher_init_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
u32 priority,
- u8 size_log)
+ u8 size_log_rx, u8 size_log_tx,
+ struct mlx5hws_matcher_attr *attr)
{
memset(attr, 0, sizeof(*attr));
@@ -58,11 +59,170 @@ static void hws_bwc_matcher_init_attr(struct mlx5hws_matcher_attr *attr,
attr->optimize_flow_src = MLX5HWS_MATCHER_FLOW_SRC_ANY;
attr->insert_mode = MLX5HWS_MATCHER_INSERT_BY_HASH;
attr->distribute_mode = MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH;
- attr->rule.num_log = size_log;
+ attr->size[MLX5HWS_MATCHER_SIZE_TYPE_RX].rule.num_log = size_log_rx;
+ attr->size[MLX5HWS_MATCHER_SIZE_TYPE_TX].rule.num_log = size_log_tx;
attr->resizable = true;
attr->max_num_of_at_attach = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
}
+static int
+hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_matcher *matcher = bwc_matcher->matcher;
+ int drain_error = 0, move_error = 0, poll_error = 0;
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mlx5hws_rule_attr rule_attr;
+ struct mlx5hws_bwc_rule *bwc_rule;
+ struct mlx5hws_send_engine *queue;
+ struct list_head *rules_list;
+ u32 pending_rules;
+ int i, ret = 0;
+ bool drain;
+
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr);
+
+ for (i = 0; i < bwc_queues; i++) {
+ if (list_empty(&bwc_matcher->rules[i]))
+ continue;
+
+ pending_rules = 0;
+ rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
+ rules_list = &bwc_matcher->rules[i];
+
+ list_for_each_entry(bwc_rule, rules_list, list_node) {
+ ret = mlx5hws_matcher_resize_rule_move(matcher,
+ bwc_rule->rule,
+ &rule_attr);
+ if (unlikely(ret)) {
+ if (!move_error) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule: move failed (%d), attempting to move rest of the rules\n",
+ ret);
+ move_error = ret;
+ }
+ /* Rule wasn't queued, no need to poll */
+ continue;
+ }
+
+ pending_rules++;
+ drain = pending_rules >=
+ hws_bwc_get_burst_th(ctx, rule_attr.queue_id);
+ ret = mlx5hws_bwc_queue_poll(ctx,
+ rule_attr.queue_id,
+ &pending_rules,
+ drain);
+ if (unlikely(ret)) {
+ if (ret == -ETIMEDOUT) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule: timeout polling for completions (%d), aborting rehash\n",
+ ret);
+ return ret;
+ }
+ if (!poll_error) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule: polling for completions failed (%d), attempting to move rest of the rules\n",
+ ret);
+ poll_error = ret;
+ }
+ }
+ }
+
+ if (pending_rules) {
+ queue = &ctx->send_queue[rule_attr.queue_id];
+ mlx5hws_send_engine_flush_queue(queue);
+ ret = mlx5hws_bwc_queue_poll(ctx,
+ rule_attr.queue_id,
+ &pending_rules,
+ true);
+ if (unlikely(ret)) {
+ if (ret == -ETIMEDOUT) {
+ mlx5hws_err(ctx,
+ "Moving bwc rule: timeout draining completions (%d), aborting rehash\n",
+ ret);
+ return ret;
+ }
+ if (!drain_error) {
+ mlx5hws_err(ctx,
+ "Moving bwc rule: drain failed (%d), attempting to move rest of the rules\n",
+ ret);
+ drain_error = ret;
+ }
+ }
+ }
+ }
+
+ /* Return the first error that happened */
+ if (unlikely(move_error))
+ return move_error;
+ if (unlikely(poll_error))
+ return poll_error;
+ if (unlikely(drain_error))
+ return drain_error;
+
+ return ret;
+}
+
+static int hws_bwc_matcher_move_all(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ switch (bwc_matcher->matcher_type) {
+ case MLX5HWS_BWC_MATCHER_SIMPLE:
+ return hws_bwc_matcher_move_all_simple(bwc_matcher);
+ case MLX5HWS_BWC_MATCHER_COMPLEX_FIRST:
+ return mlx5hws_bwc_matcher_complex_move_first(bwc_matcher);
+ case MLX5HWS_BWC_MATCHER_COMPLEX_SUBMATCHER:
+ return mlx5hws_bwc_matcher_complex_move(bwc_matcher);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_matcher_attr matcher_attr = {0};
+ struct mlx5hws_matcher *old_matcher;
+ struct mlx5hws_matcher *new_matcher;
+ int ret;
+
+ hws_bwc_matcher_init_attr(bwc_matcher,
+ bwc_matcher->priority,
+ bwc_matcher->rx_size.size_log,
+ bwc_matcher->tx_size.size_log,
+ &matcher_attr);
+
+ old_matcher = bwc_matcher->matcher;
+ new_matcher = mlx5hws_matcher_create(old_matcher->tbl,
+ &bwc_matcher->mt, 1,
+ bwc_matcher->at,
+ bwc_matcher->num_of_at,
+ &matcher_attr);
+ if (!new_matcher) {
+ mlx5hws_err(ctx, "Rehash error: matcher creation failed\n");
+ return -ENOMEM;
+ }
+
+ ret = mlx5hws_matcher_resize_set_target(old_matcher, new_matcher);
+ if (ret) {
+ mlx5hws_err(ctx, "Rehash error: failed setting resize target\n");
+ return ret;
+ }
+
+ ret = hws_bwc_matcher_move_all(bwc_matcher);
+ if (ret)
+ mlx5hws_err(ctx, "Rehash error: moving rules failed, attempting to remove the old matcher\n");
+
+ /* Error during rehash can't be rolled back.
+ * The best option here is to allow the rehash to complete and remove
+ * the old matcher - can't leave the matcher in the 'in_resize' state.
+ */
+
+ bwc_matcher->matcher = new_matcher;
+ mlx5hws_matcher_destroy(old_matcher);
+
+ return ret;
+}
+
int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
struct mlx5hws_table *table,
u32 priority,
@@ -83,12 +243,20 @@ int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
for (i = 0; i < bwc_queues; i++)
INIT_LIST_HEAD(&bwc_matcher->rules[i]);
- hws_bwc_matcher_init_attr(&attr,
+ hws_bwc_matcher_init_attr(bwc_matcher,
priority,
- MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG);
+ bwc_matcher->rx_size.size_log,
+ bwc_matcher->tx_size.size_log,
+ &attr);
+ bwc_matcher->matcher_type = MLX5HWS_BWC_MATCHER_SIMPLE;
bwc_matcher->priority = priority;
- bwc_matcher->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG;
+
+ bwc_matcher->size_of_at_array = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
+ bwc_matcher->at = kcalloc(bwc_matcher->size_of_at_array,
+ sizeof(*bwc_matcher->at), GFP_KERNEL);
+ if (!bwc_matcher->at)
+ goto free_bwc_matcher_rules;
/* create dummy action template */
bwc_matcher->at[0] =
@@ -96,7 +264,7 @@ int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
action_types : init_action_types);
if (!bwc_matcher->at[0]) {
mlx5hws_err(table->ctx, "BWC matcher: failed creating action template\n");
- goto free_bwc_matcher_rules;
+ goto free_bwc_matcher_at_array;
}
bwc_matcher->num_of_at = 1;
@@ -126,12 +294,28 @@ free_mt:
mlx5hws_match_template_destroy(bwc_matcher->mt);
free_at:
mlx5hws_action_template_destroy(bwc_matcher->at[0]);
+free_bwc_matcher_at_array:
+ kfree(bwc_matcher->at);
free_bwc_matcher_rules:
kfree(bwc_matcher->rules);
err:
return -EINVAL;
}
+static void
+hws_bwc_matcher_init_size_rxtx(struct mlx5hws_bwc_matcher_size *size)
+{
+ size->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG;
+ atomic_set(&size->num_of_rules, 0);
+ atomic_set(&size->rehash_required, false);
+}
+
+static void hws_bwc_matcher_init_size(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ hws_bwc_matcher_init_size_rxtx(&bwc_matcher->rx_size);
+ hws_bwc_matcher_init_size_rxtx(&bwc_matcher->tx_size);
+}
+
struct mlx5hws_bwc_matcher *
mlx5hws_bwc_matcher_create(struct mlx5hws_table *table,
u32 priority,
@@ -152,6 +336,8 @@ mlx5hws_bwc_matcher_create(struct mlx5hws_table *table,
if (!bwc_matcher)
return NULL;
+ hws_bwc_matcher_init_size(bwc_matcher);
+
/* Check if the required match params can be all matched
* in single STE, otherwise complex matcher is needed.
*/
@@ -190,6 +376,7 @@ int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
for (i = 0; i < bwc_matcher->num_of_at; i++)
mlx5hws_action_template_destroy(bwc_matcher->at[i]);
+ kfree(bwc_matcher->at);
mlx5hws_match_template_destroy(bwc_matcher->mt);
kfree(bwc_matcher->rules);
@@ -199,22 +386,30 @@ int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
{
- if (bwc_matcher->num_of_rules)
+ u32 rx_rules = atomic_read(&bwc_matcher->rx_size.num_of_rules);
+ u32 tx_rules = atomic_read(&bwc_matcher->tx_size.num_of_rules);
+
+ if (rx_rules || tx_rules)
mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
- "BWC matcher destroy: matcher still has %d rules\n",
- bwc_matcher->num_of_rules);
+ "BWC matcher destroy: matcher still has %u RX and %u TX rules\n",
+ rx_rules, tx_rules);
- mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
+ if (bwc_matcher->matcher_type == MLX5HWS_BWC_MATCHER_COMPLEX_FIRST)
+ mlx5hws_bwc_matcher_destroy_complex(bwc_matcher);
+ else
+ mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
kfree(bwc_matcher);
return 0;
}
-static int hws_bwc_queue_poll(struct mlx5hws_context *ctx,
- u16 queue_id,
- u32 *pending_rules,
- bool drain)
+int mlx5hws_bwc_queue_poll(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 *pending_rules,
+ bool drain)
{
+ unsigned long timeout = jiffies +
+ secs_to_jiffies(MLX5HWS_BWC_POLLING_TIMEOUT);
struct mlx5hws_flow_op_result comp[MLX5HWS_BWC_MATCHER_REHASH_BURST_TH];
u16 burst_th = hws_bwc_get_burst_th(ctx, queue_id);
bool got_comp = *pending_rules >= burst_th;
@@ -250,6 +445,11 @@ static int hws_bwc_queue_poll(struct mlx5hws_context *ctx,
}
got_comp = !!ret;
+
+ if (unlikely(!got_comp && time_after(jiffies, timeout))) {
+ mlx5hws_err(ctx, "BWC poll error: polling queue %d - TIMEOUT\n", queue_id);
+ return -ETIMEDOUT;
+ }
}
return err;
@@ -309,16 +509,12 @@ static void hws_bwc_rule_list_add(struct mlx5hws_bwc_rule *bwc_rule, u16 idx)
{
struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
- bwc_matcher->num_of_rules++;
bwc_rule->bwc_queue_idx = idx;
list_add(&bwc_rule->list_node, &bwc_matcher->rules[idx]);
}
static void hws_bwc_rule_list_remove(struct mlx5hws_bwc_rule *bwc_rule)
{
- struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
-
- bwc_matcher->num_of_rules--;
list_del_init(&bwc_rule->list_node);
}
@@ -334,28 +530,102 @@ hws_bwc_rule_destroy_hws_sync(struct mlx5hws_bwc_rule *bwc_rule,
struct mlx5hws_rule_attr *rule_attr)
{
struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
- struct mlx5hws_flow_op_result completion;
+ u32 expected_completions = 1;
int ret;
ret = hws_bwc_rule_destroy_hws_async(bwc_rule, rule_attr);
if (unlikely(ret))
return ret;
- do {
- ret = mlx5hws_send_queue_poll(ctx, rule_attr->queue_id, &completion, 1);
- } while (ret != 1);
+ ret = mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id,
+ &expected_completions, true);
+ if (unlikely(ret))
+ return ret;
- if (unlikely(completion.status != MLX5HWS_FLOW_OP_SUCCESS ||
- (bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETED &&
- bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETING))) {
- mlx5hws_err(ctx, "Failed destroying BWC rule: completion %d, rule status %d\n",
- completion.status, bwc_rule->rule->status);
+ if (unlikely(bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETED &&
+ bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETING)) {
+ mlx5hws_err(ctx, "Failed destroying BWC rule: rule status %d\n",
+ bwc_rule->rule->status);
return -EINVAL;
}
return 0;
}
+static void hws_bwc_rule_cnt_dec(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+
+ if (!bwc_rule->skip_rx)
+ atomic_dec(&bwc_matcher->rx_size.num_of_rules);
+ if (!bwc_rule->skip_tx)
+ atomic_dec(&bwc_matcher->tx_size.num_of_rules);
+}
+
+static int
+hws_bwc_matcher_rehash_shrink(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_bwc_matcher_size *rx_size = &bwc_matcher->rx_size;
+ struct mlx5hws_bwc_matcher_size *tx_size = &bwc_matcher->tx_size;
+
+ /* It is possible that another thread has added a rule.
+ * Need to check again if we really need rehash/shrink.
+ */
+ if (atomic_read(&rx_size->num_of_rules) ||
+ atomic_read(&tx_size->num_of_rules))
+ return 0;
+
+ /* If the current matcher RX/TX size is already at its initial size. */
+ if (rx_size->size_log == MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG &&
+ tx_size->size_log == MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG)
+ return 0;
+
+ /* Now we've done all the checking - do the shrinking:
+ * - reset match RTC size to the initial size
+ * - create new matcher
+ * - move the rules, which will not do anything as the matcher is empty
+ * - destroy the old matcher
+ */
+
+ rx_size->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG;
+ tx_size->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG;
+
+ return hws_bwc_matcher_move(bwc_matcher);
+}
+
+static int hws_bwc_rule_cnt_dec_with_shrink(struct mlx5hws_bwc_rule *bwc_rule,
+ u16 bwc_queue_idx)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mutex *queue_lock; /* Protect the queue */
+ int ret;
+
+ hws_bwc_rule_cnt_dec(bwc_rule);
+
+ if (atomic_read(&bwc_matcher->rx_size.num_of_rules) ||
+ atomic_read(&bwc_matcher->tx_size.num_of_rules))
+ return 0;
+
+ /* Matcher has no more rules - shrink it to save ICM. */
+
+ queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx);
+ mutex_unlock(queue_lock);
+
+ hws_bwc_lock_all_queues(ctx);
+ ret = hws_bwc_matcher_rehash_shrink(bwc_matcher);
+ hws_bwc_unlock_all_queues(ctx);
+
+ mutex_lock(queue_lock);
+
+ if (unlikely(ret))
+ mlx5hws_err(ctx,
+ "BWC rule deletion: shrinking empty matcher failed (%d)\n",
+ ret);
+
+ return ret;
+}
+
int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule)
{
struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
@@ -373,6 +643,7 @@ int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule)
ret = hws_bwc_rule_destroy_hws_sync(bwc_rule, &attr);
hws_bwc_rule_list_remove(bwc_rule);
+ hws_bwc_rule_cnt_dec_with_shrink(bwc_rule, idx);
mutex_unlock(queue_lock);
@@ -381,9 +652,14 @@ int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule)
int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule)
{
- int ret;
+ bool is_complex = bwc_rule->bwc_matcher->matcher_type ==
+ MLX5HWS_BWC_MATCHER_COMPLEX_FIRST;
+ int ret = 0;
- ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule);
+ if (is_complex)
+ ret = mlx5hws_bwc_rule_destroy_complex(bwc_rule);
+ else
+ ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule);
mlx5hws_bwc_rule_free(bwc_rule);
return ret;
@@ -423,9 +699,8 @@ hws_bwc_rule_create_sync(struct mlx5hws_bwc_rule *bwc_rule,
if (unlikely(ret))
return ret;
- ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
-
- return ret;
+ return mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id,
+ &expected_completions, true);
}
static int
@@ -446,7 +721,8 @@ hws_bwc_rule_update_sync(struct mlx5hws_bwc_rule *bwc_rule,
if (unlikely(ret))
return ret;
- ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
+ ret = mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id,
+ &expected_completions, true);
if (unlikely(ret))
mlx5hws_err(ctx, "Failed updating BWC rule (%d)\n", ret);
@@ -454,23 +730,27 @@ hws_bwc_rule_update_sync(struct mlx5hws_bwc_rule *bwc_rule,
}
static bool
-hws_bwc_matcher_size_maxed_out(struct mlx5hws_bwc_matcher *bwc_matcher)
+hws_bwc_matcher_size_maxed_out(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_bwc_matcher_size *size)
{
struct mlx5hws_cmd_query_caps *caps = bwc_matcher->matcher->tbl->ctx->caps;
- return bwc_matcher->size_log + MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH >=
- caps->ste_alloc_log_max - 1;
+ /* check the match RTC size */
+ return (size->size_log + MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH +
+ MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP) >
+ (caps->ste_alloc_log_max - 1);
}
static bool
hws_bwc_matcher_rehash_size_needed(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_bwc_matcher_size *size,
u32 num_of_rules)
{
- if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher)))
+ if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher, size)))
return false;
if (unlikely((num_of_rules * 100 / MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH) >=
- (1UL << bwc_matcher->size_log)))
+ (1UL << size->size_log)))
return true;
return false;
@@ -496,6 +776,23 @@ hws_bwc_matcher_extend_at(struct mlx5hws_bwc_matcher *bwc_matcher,
struct mlx5hws_rule_action rule_actions[])
{
enum mlx5hws_action_type action_types[MLX5HWS_BWC_MAX_ACTS];
+ void *p;
+
+ if (unlikely(bwc_matcher->num_of_at >= bwc_matcher->size_of_at_array)) {
+ if (bwc_matcher->size_of_at_array >= MLX5HWS_MATCHER_MAX_AT)
+ return -ENOMEM;
+ bwc_matcher->size_of_at_array *= 2;
+ p = krealloc(bwc_matcher->at,
+ bwc_matcher->size_of_at_array *
+ sizeof(*bwc_matcher->at),
+ __GFP_ZERO | GFP_KERNEL);
+ if (!p) {
+ bwc_matcher->size_of_at_array /= 2;
+ return -ENOMEM;
+ }
+
+ bwc_matcher->at = p;
+ }
hws_bwc_rule_actions_to_action_types(rule_actions, action_types);
@@ -510,20 +807,21 @@ hws_bwc_matcher_extend_at(struct mlx5hws_bwc_matcher *bwc_matcher,
}
static int
-hws_bwc_matcher_extend_size(struct mlx5hws_bwc_matcher *bwc_matcher)
+hws_bwc_matcher_extend_size(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_bwc_matcher_size *size)
{
struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
struct mlx5hws_cmd_query_caps *caps = ctx->caps;
- if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher))) {
+ if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher, size))) {
mlx5hws_err(ctx, "Can't resize matcher: depth exceeds limit %d\n",
caps->rtc_log_depth_max);
return -ENOMEM;
}
- bwc_matcher->size_log =
- min(bwc_matcher->size_log + MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP,
- caps->ste_alloc_log_max - MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH);
+ size->size_log = min(size->size_log + MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP,
+ caps->ste_alloc_log_max -
+ MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH);
return 0;
}
@@ -556,182 +854,171 @@ hws_bwc_matcher_find_at(struct mlx5hws_bwc_matcher *bwc_matcher,
return -1;
}
-static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
+static int
+hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
{
- struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
- u16 bwc_queues = mlx5hws_bwc_queues(ctx);
- struct mlx5hws_bwc_rule **bwc_rules;
- struct mlx5hws_rule_attr rule_attr;
- u32 *pending_rules;
- int i, j, ret = 0;
- bool all_done;
- u16 burst_th;
+ bool need_rx_rehash, need_tx_rehash;
+ int ret;
- mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr);
+ need_rx_rehash = atomic_read(&bwc_matcher->rx_size.rehash_required);
+ need_tx_rehash = atomic_read(&bwc_matcher->tx_size.rehash_required);
- pending_rules = kcalloc(bwc_queues, sizeof(*pending_rules), GFP_KERNEL);
- if (!pending_rules)
- return -ENOMEM;
+ /* It is possible that another rule has already performed rehash.
+ * Need to check again if we really need rehash.
+ */
+ if (!need_rx_rehash && !need_tx_rehash)
+ return 0;
- bwc_rules = kcalloc(bwc_queues, sizeof(*bwc_rules), GFP_KERNEL);
- if (!bwc_rules) {
- ret = -ENOMEM;
- goto free_pending_rules;
+ /* If the current matcher RX/TX size is already at its max size,
+ * it can't be rehashed.
+ */
+ if (need_rx_rehash &&
+ hws_bwc_matcher_size_maxed_out(bwc_matcher,
+ &bwc_matcher->rx_size)) {
+ atomic_set(&bwc_matcher->rx_size.rehash_required, false);
+ need_rx_rehash = false;
+ }
+ if (need_tx_rehash &&
+ hws_bwc_matcher_size_maxed_out(bwc_matcher,
+ &bwc_matcher->tx_size)) {
+ atomic_set(&bwc_matcher->tx_size.rehash_required, false);
+ need_tx_rehash = false;
}
- for (i = 0; i < bwc_queues; i++) {
- if (list_empty(&bwc_matcher->rules[i]))
- bwc_rules[i] = NULL;
- else
- bwc_rules[i] = list_first_entry(&bwc_matcher->rules[i],
- struct mlx5hws_bwc_rule,
- list_node);
- }
-
- do {
- all_done = true;
-
- for (i = 0; i < bwc_queues; i++) {
- rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
- burst_th = hws_bwc_get_burst_th(ctx, rule_attr.queue_id);
-
- for (j = 0; j < burst_th && bwc_rules[i]; j++) {
- rule_attr.burst = !!((j + 1) % burst_th);
- ret = mlx5hws_matcher_resize_rule_move(bwc_matcher->matcher,
- bwc_rules[i]->rule,
- &rule_attr);
- if (unlikely(ret)) {
- mlx5hws_err(ctx,
- "Moving BWC rule failed during rehash (%d)\n",
- ret);
- goto free_bwc_rules;
- }
-
- all_done = false;
- pending_rules[i]++;
- bwc_rules[i] = list_is_last(&bwc_rules[i]->list_node,
- &bwc_matcher->rules[i]) ?
- NULL : list_next_entry(bwc_rules[i], list_node);
+ /* If both RX and TX rehash flags are now off, it means that whatever
+ * we wanted to rehash is now at its max size - no rehash can be done.
+ * Return and try adding the rule again - perhaps there was some change.
+ */
+ if (!need_rx_rehash && !need_tx_rehash)
+ return 0;
- ret = hws_bwc_queue_poll(ctx, rule_attr.queue_id,
- &pending_rules[i], false);
- if (unlikely(ret))
- goto free_bwc_rules;
- }
- }
- } while (!all_done);
+ /* Now we're done all the checking - do the rehash:
+ * - extend match RTC size
+ * - create new matcher
+ * - move all the rules to the new matcher
+ * - destroy the old matcher
+ */
+ atomic_set(&bwc_matcher->rx_size.rehash_required, false);
+ atomic_set(&bwc_matcher->tx_size.rehash_required, false);
- /* drain all the bwc queues */
- for (i = 0; i < bwc_queues; i++) {
- if (pending_rules[i]) {
- u16 queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
-
- mlx5hws_send_engine_flush_queue(&ctx->send_queue[queue_id]);
- ret = hws_bwc_queue_poll(ctx, queue_id,
- &pending_rules[i], true);
- if (unlikely(ret))
- goto free_bwc_rules;
- }
+ if (need_rx_rehash) {
+ ret = hws_bwc_matcher_extend_size(bwc_matcher,
+ &bwc_matcher->rx_size);
+ if (ret)
+ return ret;
}
-free_bwc_rules:
- kfree(bwc_rules);
-free_pending_rules:
- kfree(pending_rules);
-
- return ret;
-}
+ if (need_tx_rehash) {
+ ret = hws_bwc_matcher_extend_size(bwc_matcher,
+ &bwc_matcher->tx_size);
+ if (ret)
+ return ret;
+ }
-static int hws_bwc_matcher_move_all(struct mlx5hws_bwc_matcher *bwc_matcher)
-{
- return hws_bwc_matcher_move_all_simple(bwc_matcher);
+ return hws_bwc_matcher_move(bwc_matcher);
}
-static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
+static int hws_bwc_rule_get_at_idx(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_action rule_actions[],
+ u16 bwc_queue_idx)
{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
- struct mlx5hws_matcher_attr matcher_attr = {0};
- struct mlx5hws_matcher *old_matcher;
- struct mlx5hws_matcher *new_matcher;
- int ret;
+ struct mutex *queue_lock; /* Protect the queue */
+ int at_idx, ret;
- hws_bwc_matcher_init_attr(&matcher_attr,
- bwc_matcher->priority,
- bwc_matcher->size_log);
+ /* check if rehash needed due to missing action template */
+ at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ if (likely(at_idx >= 0))
+ return at_idx;
- old_matcher = bwc_matcher->matcher;
- new_matcher = mlx5hws_matcher_create(old_matcher->tbl,
- &bwc_matcher->mt, 1,
- bwc_matcher->at,
- bwc_matcher->num_of_at,
- &matcher_attr);
- if (!new_matcher) {
- mlx5hws_err(ctx, "Rehash error: matcher creation failed\n");
- return -ENOMEM;
- }
+ /* we need to extend BWC matcher action templates array */
+ queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx);
+ mutex_unlock(queue_lock);
+ hws_bwc_lock_all_queues(ctx);
- ret = mlx5hws_matcher_resize_set_target(old_matcher, new_matcher);
- if (ret) {
- mlx5hws_err(ctx, "Rehash error: failed setting resize target\n");
- return ret;
+ /* check again - perhaps other thread already did extend_at */
+ at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ if (at_idx >= 0)
+ goto out;
+
+ ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx, "BWC rule: failed extending AT (%d)", ret);
+ at_idx = -EINVAL;
+ goto out;
}
- ret = hws_bwc_matcher_move_all(bwc_matcher);
- if (ret) {
- mlx5hws_err(ctx, "Rehash error: moving rules failed\n");
- return -ENOMEM;
+ /* action templates array was extended, we need the last idx */
+ at_idx = bwc_matcher->num_of_at - 1;
+ ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher,
+ bwc_matcher->at[at_idx]);
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx, "BWC rule: failed attaching new AT (%d)", ret);
+ at_idx = -EINVAL;
+ goto out;
}
- bwc_matcher->matcher = new_matcher;
- mlx5hws_matcher_destroy(old_matcher);
+out:
+ hws_bwc_unlock_all_queues(ctx);
+ mutex_lock(queue_lock);
+ return at_idx;
+}
- return 0;
+static void hws_bwc_rule_cnt_inc_rxtx(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_bwc_matcher_size *size)
+{
+ u32 num_of_rules = atomic_inc_return(&size->num_of_rules);
+
+ if (unlikely(hws_bwc_matcher_rehash_size_needed(bwc_rule->bwc_matcher,
+ size, num_of_rules)))
+ atomic_set(&size->rehash_required, true);
}
-static int
-hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
+static void hws_bwc_rule_cnt_inc(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+
+ if (!bwc_rule->skip_rx)
+ hws_bwc_rule_cnt_inc_rxtx(bwc_rule, &bwc_matcher->rx_size);
+ if (!bwc_rule->skip_tx)
+ hws_bwc_rule_cnt_inc_rxtx(bwc_rule, &bwc_matcher->tx_size);
+}
+
+static int hws_bwc_rule_cnt_inc_with_rehash(struct mlx5hws_bwc_rule *bwc_rule,
+ u16 bwc_queue_idx)
{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mutex *queue_lock; /* Protect the queue */
int ret;
- /* If the current matcher size is already at its max size, we can't
- * do the rehash. Skip it and try adding the rule again - perhaps
- * there was some change.
- */
- if (hws_bwc_matcher_size_maxed_out(bwc_matcher))
- return 0;
+ hws_bwc_rule_cnt_inc(bwc_rule);
- /* It is possible that other rule has already performed rehash.
- * Need to check again if we really need rehash.
- * If the reason for rehash was size, but not any more - skip rehash.
- */
- if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher, bwc_matcher->num_of_rules))
+ if (!atomic_read(&bwc_matcher->rx_size.rehash_required) &&
+ !atomic_read(&bwc_matcher->tx_size.rehash_required))
return 0;
- /* Now we're done all the checking - do the rehash:
- * - extend match RTC size
- * - create new matcher
- * - move all the rules to the new matcher
- * - destroy the old matcher
- */
+ queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx);
+ mutex_unlock(queue_lock);
- ret = hws_bwc_matcher_extend_size(bwc_matcher);
- if (ret)
- return ret;
+ hws_bwc_lock_all_queues(ctx);
+ ret = hws_bwc_matcher_rehash_size(bwc_matcher);
+ hws_bwc_unlock_all_queues(ctx);
- return hws_bwc_matcher_move(bwc_matcher);
-}
+ mutex_lock(queue_lock);
-static int
-hws_bwc_matcher_rehash_at(struct mlx5hws_bwc_matcher *bwc_matcher)
-{
- /* Rehash by action template doesn't require any additional checking.
- * The bwc_matcher already contains the new action template.
- * Just do the usual rehash:
- * - create new matcher
- * - move all the rules to the new matcher
- * - destroy the old matcher
- */
- return hws_bwc_matcher_move(bwc_matcher);
+ if (likely(!ret))
+ return 0;
+
+ /* Failed to rehash. Print a diagnostic and rollback the counters. */
+ mlx5hws_err(ctx,
+ "BWC rule insertion: rehash to sizes [%d, %d] failed (%d)\n",
+ bwc_matcher->rx_size.size_log,
+ bwc_matcher->tx_size.size_log, ret);
+ hws_bwc_rule_cnt_dec(bwc_rule);
+
+ return ret;
}
int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
@@ -744,7 +1031,6 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
struct mlx5hws_rule_attr rule_attr;
struct mutex *queue_lock; /* Protect the queue */
- u32 num_of_rules;
int ret = 0;
int at_idx;
@@ -754,67 +1040,18 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
mutex_lock(queue_lock);
- /* check if rehash needed due to missing action template */
- at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ at_idx = hws_bwc_rule_get_at_idx(bwc_rule, rule_actions, bwc_queue_idx);
if (unlikely(at_idx < 0)) {
- /* we need to extend BWC matcher action templates array */
mutex_unlock(queue_lock);
- hws_bwc_lock_all_queues(ctx);
-
- ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
- if (unlikely(ret)) {
- hws_bwc_unlock_all_queues(ctx);
- return ret;
- }
-
- /* action templates array was extended, we need the last idx */
- at_idx = bwc_matcher->num_of_at - 1;
-
- ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher,
- bwc_matcher->at[at_idx]);
- if (unlikely(ret)) {
- /* Action template attach failed, possibly due to
- * requiring more action STEs.
- * Need to attempt creating new matcher with all
- * the action templates, including the new one.
- */
- ret = hws_bwc_matcher_rehash_at(bwc_matcher);
- if (unlikely(ret)) {
- mlx5hws_action_template_destroy(bwc_matcher->at[at_idx]);
- bwc_matcher->at[at_idx] = NULL;
- bwc_matcher->num_of_at--;
-
- hws_bwc_unlock_all_queues(ctx);
-
- mlx5hws_err(ctx,
- "BWC rule insertion: rehash AT failed (%d)\n", ret);
- return ret;
- }
- }
-
- hws_bwc_unlock_all_queues(ctx);
- mutex_lock(queue_lock);
+ mlx5hws_err(ctx, "BWC rule create: failed getting AT (%d)",
+ ret);
+ return -EINVAL;
}
- /* check if number of rules require rehash */
- num_of_rules = bwc_matcher->num_of_rules;
-
- if (unlikely(hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))) {
+ ret = hws_bwc_rule_cnt_inc_with_rehash(bwc_rule, bwc_queue_idx);
+ if (unlikely(ret)) {
mutex_unlock(queue_lock);
-
- hws_bwc_lock_all_queues(ctx);
- ret = hws_bwc_matcher_rehash_size(bwc_matcher);
- hws_bwc_unlock_all_queues(ctx);
-
- if (ret) {
- mlx5hws_err(ctx, "BWC rule insertion: rehash size [%d -> %d] failed (%d)\n",
- bwc_matcher->size_log - MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP,
- bwc_matcher->size_log,
- ret);
- return ret;
- }
-
- mutex_lock(queue_lock);
+ return ret;
}
ret = hws_bwc_rule_create_sync(bwc_rule,
@@ -828,12 +1065,29 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
return 0; /* rule inserted successfully */
}
+ /* Rule insertion could fail due to queue being full, timeout, or
+ * matcher in resize. In such cases, no point in trying to rehash.
+ */
+ if (ret == -EBUSY || ret == -ETIMEDOUT || ret == -EAGAIN) {
+ mutex_unlock(queue_lock);
+ mlx5hws_err(ctx,
+ "BWC rule insertion failed - %s (%d)\n",
+ ret == -EBUSY ? "queue is full" :
+ ret == -ETIMEDOUT ? "timeout" :
+ ret == -EAGAIN ? "matcher in resize" : "N/A",
+ ret);
+ hws_bwc_rule_cnt_dec(bwc_rule);
+ return ret;
+ }
+
/* At this point the rule wasn't added.
* It could be because there was collision, or some other problem.
- * If we don't dive deeper than API, the only thing we know is that
- * the status of completion is RTE_FLOW_OP_ERROR.
* Try rehash by size and insert rule again - last chance.
*/
+ if (!bwc_rule->skip_rx)
+ atomic_set(&bwc_matcher->rx_size.rehash_required, true);
+ if (!bwc_rule->skip_tx)
+ atomic_set(&bwc_matcher->tx_size.rehash_required, true);
mutex_unlock(queue_lock);
@@ -843,6 +1097,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
if (ret) {
mlx5hws_err(ctx, "BWC rule insertion: rehash failed (%d)\n", ret);
+ hws_bwc_rule_cnt_dec(bwc_rule);
return ret;
}
@@ -858,6 +1113,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
if (unlikely(ret)) {
mutex_unlock(queue_lock);
mlx5hws_err(ctx, "BWC rule insertion failed (%d)\n", ret);
+ hws_bwc_rule_cnt_dec(bwc_rule);
return ret;
}
@@ -887,13 +1143,24 @@ mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher,
if (unlikely(!bwc_rule))
return NULL;
+ bwc_rule->flow_source = flow_source;
+ mlx5hws_rule_skip(bwc_matcher->matcher, flow_source,
+ &bwc_rule->skip_rx, &bwc_rule->skip_tx);
+
bwc_queue_idx = hws_bwc_gen_queue_idx(ctx);
- ret = mlx5hws_bwc_rule_create_simple(bwc_rule,
- params->match_buf,
- rule_actions,
- flow_source,
- bwc_queue_idx);
+ if (bwc_matcher->matcher_type == MLX5HWS_BWC_MATCHER_COMPLEX_FIRST)
+ ret = mlx5hws_bwc_rule_create_complex(bwc_rule,
+ params,
+ flow_source,
+ rule_actions,
+ bwc_queue_idx);
+ else
+ ret = mlx5hws_bwc_rule_create_simple(bwc_rule,
+ params->match_buf,
+ rule_actions,
+ flow_source,
+ bwc_queue_idx);
if (unlikely(ret)) {
mlx5hws_bwc_rule_free(bwc_rule);
return NULL;
@@ -915,57 +1182,17 @@ hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
idx = bwc_rule->bwc_queue_idx;
- mlx5hws_bwc_rule_fill_attr(bwc_matcher, idx, 0, &rule_attr);
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, idx, bwc_rule->flow_source,
+ &rule_attr);
queue_lock = hws_bwc_get_queue_lock(ctx, idx);
mutex_lock(queue_lock);
- /* check if rehash needed due to missing action template */
- at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ at_idx = hws_bwc_rule_get_at_idx(bwc_rule, rule_actions, idx);
if (unlikely(at_idx < 0)) {
- /* we need to extend BWC matcher action templates array */
mutex_unlock(queue_lock);
- hws_bwc_lock_all_queues(ctx);
-
- /* check again - perhaps other thread already did extend_at */
- at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
- if (likely(at_idx < 0)) {
- ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
- if (unlikely(ret)) {
- hws_bwc_unlock_all_queues(ctx);
- mlx5hws_err(ctx, "BWC rule update: failed extending AT (%d)", ret);
- return -EINVAL;
- }
-
- /* action templates array was extended, we need the last idx */
- at_idx = bwc_matcher->num_of_at - 1;
-
- ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher,
- bwc_matcher->at[at_idx]);
- if (unlikely(ret)) {
- /* Action template attach failed, possibly due to
- * requiring more action STEs.
- * Need to attempt creating new matcher with all
- * the action templates, including the new one.
- */
- ret = hws_bwc_matcher_rehash_at(bwc_matcher);
- if (unlikely(ret)) {
- mlx5hws_action_template_destroy(bwc_matcher->at[at_idx]);
- bwc_matcher->at[at_idx] = NULL;
- bwc_matcher->num_of_at--;
-
- hws_bwc_unlock_all_queues(ctx);
-
- mlx5hws_err(ctx,
- "BWC rule update: rehash AT failed (%d)\n",
- ret);
- return ret;
- }
- }
- }
-
- hws_bwc_unlock_all_queues(ctx);
- mutex_lock(queue_lock);
+ mlx5hws_err(ctx, "BWC rule update: failed getting AT\n");
+ return -EINVAL;
}
ret = hws_bwc_rule_update_sync(bwc_rule,
@@ -991,5 +1218,9 @@ int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
return -EINVAL;
}
+ /* For complex rules, the update should happen on the last subrule. */
+ while (bwc_rule->next_subrule)
+ bwc_rule = bwc_rule->next_subrule;
+
return hws_bwc_rule_action_update(bwc_rule, rule_actions);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
index 4fe8c32d8fbe..b905511f5c53 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
@@ -1,32 +1,69 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#ifndef MLX5HWS_BWC_H_
-#define MLX5HWS_BWC_H_
+#ifndef HWS_BWC_H_
+#define HWS_BWC_H_
#define MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG 1
#define MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP 1
#define MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH 70
#define MLX5HWS_BWC_MATCHER_REHASH_BURST_TH 32
-#define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 255
+
+/* Max number of AT attach operations for the same matcher.
+ * When the limit is reached, a larger buffer is allocated for the ATs.
+ */
+#define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 8
#define MLX5HWS_BWC_MAX_ACTS 16
+#define MLX5HWS_BWC_POLLING_TIMEOUT 60
+
+enum mlx5hws_bwc_matcher_type {
+ /* Standalone bwc matcher. */
+ MLX5HWS_BWC_MATCHER_SIMPLE,
+ /* The first matcher of a complex matcher. When rules are inserted into
+ * a matcher of this type, they are split into subrules and inserted
+ * into their corresponding submatchers.
+ */
+ MLX5HWS_BWC_MATCHER_COMPLEX_FIRST,
+ /* A submatcher that is part of a complex matcher. For most purposes
+ * these are treated as simple matchers, except when it comes to moving
+ * rules during resize.
+ */
+ MLX5HWS_BWC_MATCHER_COMPLEX_SUBMATCHER,
+};
+
+struct mlx5hws_bwc_matcher_complex_data;
+
+struct mlx5hws_bwc_matcher_size {
+ u8 size_log;
+ atomic_t num_of_rules;
+ atomic_t rehash_required;
+};
+
struct mlx5hws_bwc_matcher {
struct mlx5hws_matcher *matcher;
struct mlx5hws_match_template *mt;
- struct mlx5hws_action_template *at[MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM];
+ struct mlx5hws_action_template **at;
+ struct mlx5hws_bwc_matcher_complex_data *complex;
u8 num_of_at;
- u16 priority;
- u8 size_log;
- u32 num_of_rules; /* atomically accessed */
+ u8 size_of_at_array;
+ enum mlx5hws_bwc_matcher_type matcher_type;
+ u32 priority;
+ struct mlx5hws_bwc_matcher_size rx_size;
+ struct mlx5hws_bwc_matcher_size tx_size;
struct list_head *rules;
};
struct mlx5hws_bwc_rule {
struct mlx5hws_bwc_matcher *bwc_matcher;
struct mlx5hws_rule *rule;
+ struct mlx5hws_bwc_rule *next_subrule;
+ struct mlx5hws_bwc_complex_subrule_data *subrule_data;
+ u32 flow_source;
u16 bwc_queue_idx;
+ bool skip_rx;
+ bool skip_tx;
struct list_head list_node;
};
@@ -57,12 +94,19 @@ void mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
u32 flow_source,
struct mlx5hws_rule_attr *rule_attr);
+int mlx5hws_bwc_queue_poll(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 *pending_rules,
+ bool drain);
+
static inline u16 mlx5hws_bwc_queues(struct mlx5hws_context *ctx)
{
/* Besides the control queue, half of the queues are
- * reguler HWS queues, and the other half are BWC queues.
+ * regular HWS queues, and the other half are BWC queues.
*/
- return (ctx->queues - 1) / 2;
+ if (mlx5hws_context_bwc_supported(ctx))
+ return (ctx->queues - 1) / 2;
+ return 0;
}
static inline u16 mlx5hws_bwc_get_queue_id(struct mlx5hws_context *ctx, u16 idx)
@@ -70,4 +114,4 @@ static inline u16 mlx5hws_bwc_get_queue_id(struct mlx5hws_context *ctx, u16 idx)
return idx + mlx5hws_bwc_queues(ctx);
}
-#endif /* MLX5HWS_BWC_H_ */
+#endif /* HWS_BWC_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
new file mode 100644
index 000000000000..660630f18ce9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
@@ -0,0 +1,1101 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+
+/* We chain submatchers by applying three rules on a subrule: modify header (to
+ * set register C6), jump to table (to the next submatcher) and the mandatory
+ * last rule.
+ */
+#define HWS_NUM_CHAIN_ACTIONS 3
+
+static const struct rhashtable_params hws_rules_hash_params = {
+ .key_len = sizeof_field(struct mlx5hws_bwc_complex_subrule_data,
+ match_tag),
+ .key_offset =
+ offsetof(struct mlx5hws_bwc_complex_subrule_data, match_tag),
+ .head_offset =
+ offsetof(struct mlx5hws_bwc_complex_subrule_data, hash_node),
+ .automatic_shrinking = true, .min_size = 1,
+};
+
+static bool
+hws_match_params_exceeds_definer(struct mlx5hws_context *ctx,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask,
+ bool allow_jumbo)
+{
+ struct mlx5hws_definer match_layout = {0};
+ struct mlx5hws_match_template *mt;
+ bool is_complex = false;
+ int ret;
+
+ if (!match_criteria_enable)
+ return false; /* empty matcher */
+
+ mt = mlx5hws_match_template_create(ctx,
+ mask->match_buf,
+ mask->match_sz,
+ match_criteria_enable);
+ if (!mt) {
+ mlx5hws_err(ctx, "Complex matcher: failed creating match template\n");
+ return false;
+ }
+
+ ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout, allow_jumbo);
+ if (ret) {
+ /* The only case that we're interested in is E2BIG,
+ * which means that the match parameters need to be
+ * split into complex martcher.
+ * For all other cases (good or bad) - just return true
+ * and let the usual match creation path handle it,
+ * both for good and bad flows.
+ */
+ if (ret == -E2BIG) {
+ is_complex = true;
+ mlx5hws_dbg(ctx, "Matcher definer layout: need complex matcher\n");
+ } else {
+ mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
+ }
+ } else {
+ kfree(mt->fc);
+ }
+
+ mlx5hws_match_template_destroy(mt);
+
+ return is_complex;
+}
+
+bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask)
+{
+ return hws_match_params_exceeds_definer(ctx, match_criteria_enable,
+ mask, true);
+}
+
+static int
+hws_get_last_set_dword_idx(const struct mlx5hws_match_parameters *mask)
+{
+ int i;
+
+ for (i = mask->match_sz / 4 - 1; i >= 0; i--)
+ if (mask->match_buf[i])
+ return i;
+
+ return -1;
+}
+
+static bool hws_match_mask_is_empty(const struct mlx5hws_match_parameters *mask)
+{
+ return hws_get_last_set_dword_idx(mask) == -1;
+}
+
+static bool hws_dword_is_inner_ipaddr_off(int dword_off)
+{
+ /* IPv4 and IPv6 addresses share the same entry via a union, and the
+ * source and dest addresses are contiguous in the fte_match_param. So
+ * we need to check 8 words.
+ */
+ static const int inner_ip_dword_off =
+ __mlx5_dw_off(fte_match_param, inner_headers.src_ipv4_src_ipv6);
+
+ return dword_off >= inner_ip_dword_off &&
+ dword_off < inner_ip_dword_off + 8;
+}
+
+static bool hws_dword_is_outer_ipaddr_off(int dword_off)
+{
+ static const int outer_ip_dword_off =
+ __mlx5_dw_off(fte_match_param, outer_headers.src_ipv4_src_ipv6);
+
+ return dword_off >= outer_ip_dword_off &&
+ dword_off < outer_ip_dword_off + 8;
+}
+
+static void hws_add_dword_to_mask(struct mlx5hws_match_parameters *mask,
+ const struct mlx5hws_match_parameters *orig,
+ int dword_idx, bool *added_inner_ipv,
+ bool *added_outer_ipv)
+{
+ mask->match_buf[dword_idx] |= orig->match_buf[dword_idx];
+
+ *added_inner_ipv = false;
+ *added_outer_ipv = false;
+
+ /* Any IP address fragment must be accompanied by a match on IP version.
+ * Use the `added_ipv` variables to keep track if we added IP versions
+ * specifically for this dword, so that we can roll them back if the
+ * match params become too large to fit into a definer.
+ */
+ if (hws_dword_is_inner_ipaddr_off(dword_idx) &&
+ !MLX5_GET(fte_match_param, mask->match_buf,
+ inner_headers.ip_version)) {
+ MLX5_SET(fte_match_param, mask->match_buf,
+ inner_headers.ip_version, 0xf);
+ *added_inner_ipv = true;
+ }
+ if (hws_dword_is_outer_ipaddr_off(dword_idx) &&
+ !MLX5_GET(fte_match_param, mask->match_buf,
+ outer_headers.ip_version)) {
+ MLX5_SET(fte_match_param, mask->match_buf,
+ outer_headers.ip_version, 0xf);
+ *added_outer_ipv = true;
+ }
+}
+
+static void hws_remove_dword_from_mask(struct mlx5hws_match_parameters *mask,
+ int dword_idx, bool added_inner_ipv,
+ bool added_outer_ipv)
+{
+ mask->match_buf[dword_idx] = 0;
+ if (added_inner_ipv)
+ MLX5_SET(fte_match_param, mask->match_buf,
+ inner_headers.ip_version, 0);
+ if (added_outer_ipv)
+ MLX5_SET(fte_match_param, mask->match_buf,
+ outer_headers.ip_version, 0);
+}
+
+/* Avoid leaving a single lower dword in `mask` if there are others present in
+ * `orig`. Splitting IPv6 addresses like this causes them to be interpreted as
+ * IPv4.
+ */
+static void hws_avoid_ipv6_split_of(struct mlx5hws_match_parameters *orig,
+ struct mlx5hws_match_parameters *mask,
+ int off)
+{
+ /* Masks are allocated to a full fte_match_param, but it can't hurt to
+ * double check.
+ */
+ if (orig->match_sz <= off + 3 || mask->match_sz <= off + 3)
+ return;
+
+ /* Lower dword is not set, nothing to do. */
+ if (!mask->match_buf[off + 3])
+ return;
+
+ /* Higher dwords also present in `mask`, no ambiguity. */
+ if (mask->match_buf[off] || mask->match_buf[off + 1] ||
+ mask->match_buf[off + 2])
+ return;
+
+ /* There are no higher dwords in `orig`, i.e. we match on IPv4. */
+ if (!orig->match_buf[off] && !orig->match_buf[off + 1] &&
+ !orig->match_buf[off + 2])
+ return;
+
+ /* Put the lower dword back in `orig`. It is always safe to do this, the
+ * dword will just be picked up in the next submask.
+ */
+ orig->match_buf[off + 3] = mask->match_buf[off + 3];
+ mask->match_buf[off + 3] = 0;
+}
+
+static void hws_avoid_ipv6_split(struct mlx5hws_match_parameters *orig,
+ struct mlx5hws_match_parameters *mask)
+{
+ hws_avoid_ipv6_split_of(orig, mask,
+ __mlx5_dw_off(fte_match_param,
+ outer_headers.src_ipv4_src_ipv6));
+ hws_avoid_ipv6_split_of(orig, mask,
+ __mlx5_dw_off(fte_match_param,
+ outer_headers.dst_ipv4_dst_ipv6));
+ hws_avoid_ipv6_split_of(orig, mask,
+ __mlx5_dw_off(fte_match_param,
+ inner_headers.src_ipv4_src_ipv6));
+ hws_avoid_ipv6_split_of(orig, mask,
+ __mlx5_dw_off(fte_match_param,
+ inner_headers.dst_ipv4_dst_ipv6));
+}
+
+/* Build a subset of the `orig` match parameters into `mask`. This subset is
+ * guaranteed to fit in a single definer an as such is a candidate for being a
+ * part of a complex matcher. Upon successful execution, the match params that
+ * go into `mask` are cleared from `orig`.
+ */
+static int hws_get_simple_params(struct mlx5hws_context *ctx, u8 match_criteria,
+ struct mlx5hws_match_parameters *orig,
+ struct mlx5hws_match_parameters *mask)
+{
+ bool added_inner_ipv, added_outer_ipv;
+ int dword_idx;
+ u32 *backup;
+ int ret;
+
+ dword_idx = hws_get_last_set_dword_idx(orig);
+ /* Nothing to do, we consumed all of the match params before. */
+ if (dword_idx == -1)
+ return 0;
+
+ backup = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ if (!backup)
+ return -ENOMEM;
+
+ while (1) {
+ dword_idx = hws_get_last_set_dword_idx(orig);
+ /* Nothing to do, we consumed all of the original match params
+ * into this subset, which still fits into a single matcher.
+ */
+ if (dword_idx == -1) {
+ ret = 0;
+ goto free_backup;
+ }
+
+ memcpy(backup, mask->match_buf, mask->match_sz);
+
+ /* Try to add this dword to the current subset. */
+ hws_add_dword_to_mask(mask, orig, dword_idx, &added_inner_ipv,
+ &added_outer_ipv);
+
+ if (hws_match_params_exceeds_definer(ctx, match_criteria, mask,
+ false)) {
+ /* We just added a match param that makes the definer
+ * too large. Revert and return what we had before.
+ * Note that we can't just zero out the affected fields,
+ * because it's possible that the dword we're looking at
+ * wasn't zero before (e.g. it included auto-added
+ * matches in IP version. This is why we employ the
+ * rather cumbersome memcpy for backing up.
+ */
+ memcpy(mask->match_buf, backup, mask->match_sz);
+ /* Possible future improvement: We can't add any more
+ * dwords, but it may be possible to squeeze in
+ * individual bytes, as definers have special slots for
+ * those.
+ *
+ * For now, keep the code simple. This results in an
+ * extra submatcher in some cases, but it's good enough.
+ */
+ ret = 0;
+ break;
+ }
+
+ /* The current subset of match params still fits in a single
+ * definer. Remove the dword from the original mask.
+ *
+ * Also remove any explicit match on IP version if we just
+ * included one here. We will still automatically add it to
+ * accompany any IP address fragment, but do not need to
+ * consider it by itself.
+ */
+ hws_remove_dword_from_mask(orig, dword_idx, added_inner_ipv,
+ added_outer_ipv);
+ }
+
+ /* Make sure we have not picked up a single lower dword of an IPv6
+ * address, as the firmware will erroneously treat it as an IPv4
+ * address.
+ */
+ hws_avoid_ipv6_split(orig, mask);
+
+free_backup:
+ kfree(backup);
+
+ return ret;
+}
+
+static int
+hws_bwc_matcher_split_mask(struct mlx5hws_context *ctx, u8 match_criteria,
+ const struct mlx5hws_match_parameters *mask,
+ struct mlx5hws_match_parameters *submasks,
+ int *num_submasks)
+{
+ struct mlx5hws_match_parameters mask_copy;
+ int ret, i = 0;
+
+ mask_copy.match_sz = MLX5_ST_SZ_BYTES(fte_match_param);
+ mask_copy.match_buf = kzalloc(mask_copy.match_sz, GFP_KERNEL);
+ if (!mask_copy.match_buf)
+ return -ENOMEM;
+
+ memcpy(mask_copy.match_buf, mask->match_buf, mask->match_sz);
+
+ while (!hws_match_mask_is_empty(&mask_copy)) {
+ if (i >= MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS) {
+ mlx5hws_err(ctx,
+ "Complex matcher: mask too large for %d matchers\n",
+ MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS);
+ ret = -E2BIG;
+ goto free_copy;
+ }
+ /* All but the first matcher need to match on register C6 to
+ * connect pieces of the complex rule together.
+ */
+ if (i > 0) {
+ MLX5_SET(fte_match_param, submasks[i].match_buf,
+ misc_parameters_2.metadata_reg_c_6, -1);
+ match_criteria |= MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2;
+ }
+ ret = hws_get_simple_params(ctx, match_criteria, &mask_copy,
+ &submasks[i]);
+ if (ret < 0)
+ goto free_copy;
+ i++;
+ }
+
+ *num_submasks = i;
+ ret = 0;
+
+free_copy:
+ kfree(mask_copy.match_buf);
+
+ return ret;
+}
+
+static struct mlx5hws_table *
+hws_isolated_table_create(const struct mlx5hws_bwc_matcher *cmatcher)
+{
+ struct mlx5hws_bwc_complex_submatcher *first_subm;
+ struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+ struct mlx5hws_table_attr tbl_attr = {0};
+ struct mlx5hws_table *orig_tbl;
+ struct mlx5hws_context *ctx;
+ struct mlx5hws_table *tbl;
+ int ret;
+
+ first_subm = &cmatcher->complex->submatchers[0];
+ orig_tbl = first_subm->tbl;
+ ctx = orig_tbl->ctx;
+
+ tbl_attr.type = orig_tbl->type;
+ tbl_attr.level = orig_tbl->level;
+ tbl = mlx5hws_table_create(ctx, &tbl_attr);
+ if (!tbl)
+ return ERR_PTR(-EINVAL);
+
+ /* Set the default miss of the isolated table to point
+ * to the end anchor of the original matcher.
+ */
+ mlx5hws_cmd_set_attr_connect_miss_tbl(ctx, tbl->fw_ft_type,
+ tbl->type, &ft_attr);
+ ft_attr.table_miss_id = first_subm->bwc_matcher->matcher->end_ft_id;
+
+ ret = mlx5hws_cmd_flow_table_modify(ctx->mdev, &ft_attr, tbl->ft_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Complex matcher: failed to set isolated tbl default miss\n");
+ goto destroy_tbl;
+ }
+
+ return tbl;
+
+destroy_tbl:
+ mlx5hws_table_destroy(tbl);
+
+ return ERR_PTR(ret);
+}
+
+static int hws_submatcher_init_first(struct mlx5hws_bwc_matcher *cmatcher,
+ struct mlx5hws_table *table, u32 priority,
+ u8 match_criteria,
+ struct mlx5hws_match_parameters *mask)
+{
+ enum mlx5hws_action_type action_types[HWS_NUM_CHAIN_ACTIONS];
+ struct mlx5hws_bwc_complex_submatcher *subm;
+ int ret;
+
+ subm = &cmatcher->complex->submatchers[0];
+
+ /* The first submatcher lives in the original table and does not have an
+ * associated jump to table action. It also points to the outer complex
+ * matcher.
+ */
+ subm->tbl = table;
+ subm->action_tbl = NULL;
+ subm->bwc_matcher = cmatcher;
+
+ action_types[0] = MLX5HWS_ACTION_TYP_MODIFY_HDR;
+ action_types[1] = MLX5HWS_ACTION_TYP_TBL;
+ action_types[2] = MLX5HWS_ACTION_TYP_LAST;
+
+ ret = mlx5hws_bwc_matcher_create_simple(subm->bwc_matcher, subm->tbl,
+ priority, match_criteria, mask,
+ action_types);
+ if (ret)
+ return ret;
+
+ subm->bwc_matcher->matcher_type = MLX5HWS_BWC_MATCHER_COMPLEX_FIRST;
+
+ ret = rhashtable_init(&subm->rules_hash, &hws_rules_hash_params);
+ if (ret)
+ goto destroy_matcher;
+ mutex_init(&subm->hash_lock);
+ ida_init(&subm->chain_ida);
+
+ return 0;
+
+destroy_matcher:
+ mlx5hws_bwc_matcher_destroy_simple(subm->bwc_matcher);
+
+ return ret;
+}
+
+static int hws_submatcher_init(struct mlx5hws_bwc_matcher *cmatcher, int idx,
+ struct mlx5hws_table *table, u32 priority,
+ u8 match_criteria,
+ struct mlx5hws_match_parameters *mask)
+{
+ enum mlx5hws_action_type action_types[HWS_NUM_CHAIN_ACTIONS];
+ struct mlx5hws_bwc_complex_submatcher *subm;
+ bool is_last;
+ int ret;
+
+ if (!idx)
+ return hws_submatcher_init_first(cmatcher, table, priority,
+ match_criteria, mask);
+
+ subm = &cmatcher->complex->submatchers[idx];
+ is_last = idx == cmatcher->complex->num_submatchers - 1;
+
+ subm->tbl = hws_isolated_table_create(cmatcher);
+ if (IS_ERR(subm->tbl))
+ return PTR_ERR(subm->tbl);
+
+ subm->action_tbl =
+ mlx5hws_action_create_dest_table(subm->tbl->ctx, subm->tbl,
+ MLX5HWS_ACTION_FLAG_HWS_FDB);
+ if (!subm->action_tbl) {
+ ret = -EINVAL;
+ goto destroy_tbl;
+ }
+
+ subm->bwc_matcher = kzalloc(sizeof(*subm->bwc_matcher), GFP_KERNEL);
+ if (!subm->bwc_matcher) {
+ ret = -ENOMEM;
+ goto destroy_action;
+ }
+
+ /* Every matcher other than the first also matches of register C6 to
+ * bind subrules together in the complex rule using the chain ids.
+ */
+ match_criteria |= MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2;
+
+ action_types[0] = MLX5HWS_ACTION_TYP_MODIFY_HDR;
+ action_types[1] = MLX5HWS_ACTION_TYP_TBL;
+ action_types[2] = MLX5HWS_ACTION_TYP_LAST;
+
+ /* Every matcher other than the last sets register C6 and jumps to the
+ * next submatcher's table. The final submatcher will use the
+ * user-supplied actions and will attach an action template at rule
+ * insertion time.
+ */
+ ret = mlx5hws_bwc_matcher_create_simple(subm->bwc_matcher, subm->tbl,
+ priority, match_criteria, mask,
+ is_last ? NULL : action_types);
+ if (ret)
+ goto free_matcher;
+
+ subm->bwc_matcher->matcher_type =
+ MLX5HWS_BWC_MATCHER_COMPLEX_SUBMATCHER;
+
+ ret = rhashtable_init(&subm->rules_hash, &hws_rules_hash_params);
+ if (ret)
+ goto destroy_matcher;
+ mutex_init(&subm->hash_lock);
+ ida_init(&subm->chain_ida);
+
+ return 0;
+
+destroy_matcher:
+ mlx5hws_bwc_matcher_destroy_simple(subm->bwc_matcher);
+free_matcher:
+ kfree(subm->bwc_matcher);
+destroy_action:
+ mlx5hws_action_destroy(subm->action_tbl);
+destroy_tbl:
+ mlx5hws_table_destroy(subm->tbl);
+
+ return ret;
+}
+
+static void hws_submatcher_destroy(struct mlx5hws_bwc_matcher *cmatcher,
+ int idx)
+{
+ struct mlx5hws_bwc_complex_submatcher *subm;
+
+ subm = &cmatcher->complex->submatchers[idx];
+
+ ida_destroy(&subm->chain_ida);
+ mutex_destroy(&subm->hash_lock);
+ rhashtable_destroy(&subm->rules_hash);
+
+ if (subm->bwc_matcher) {
+ mlx5hws_bwc_matcher_destroy_simple(subm->bwc_matcher);
+ if (idx)
+ kfree(subm->bwc_matcher);
+ }
+
+ /* We own all of the isolated tables, but not the original one. */
+ if (idx) {
+ mlx5hws_action_destroy(subm->action_tbl);
+ mlx5hws_table_destroy(subm->tbl);
+ }
+}
+
+static int
+hws_complex_data_actions_init(struct mlx5hws_bwc_matcher_complex_data *cdata)
+{
+ struct mlx5hws_context *ctx = cdata->submatchers[0].tbl->ctx;
+ u8 modify_hdr_action[MLX5_ST_SZ_BYTES(set_action_in)] = {0};
+ struct mlx5hws_action_mh_pattern ptrn;
+ int ret = 0;
+
+ /* Create modify header action to set REG_C_6 */
+ MLX5_SET(set_action_in, modify_hdr_action,
+ action_type, MLX5_MODIFICATION_TYPE_SET);
+ MLX5_SET(set_action_in, modify_hdr_action,
+ field, MLX5_MODI_META_REG_C_6);
+ MLX5_SET(set_action_in, modify_hdr_action,
+ length, 0); /* zero means length of 32 */
+ MLX5_SET(set_action_in, modify_hdr_action, offset, 0);
+ MLX5_SET(set_action_in, modify_hdr_action, data, 0);
+
+ ptrn.data = (void *)modify_hdr_action;
+ ptrn.sz = MLX5HWS_ACTION_DOUBLE_SIZE;
+
+ cdata->action_metadata =
+ mlx5hws_action_create_modify_header(ctx, 1, &ptrn, 0,
+ MLX5HWS_ACTION_FLAG_HWS_FDB);
+ if (!cdata->action_metadata) {
+ mlx5hws_err(ctx, "Complex matcher: failed to create set reg C6 action\n");
+ return -EINVAL;
+ }
+
+ /* Create last action */
+ cdata->action_last =
+ mlx5hws_action_create_last(ctx, MLX5HWS_ACTION_FLAG_HWS_FDB);
+ if (!cdata->action_last) {
+ mlx5hws_err(ctx, "Complex matcher: failed to create last action\n");
+ ret = -EINVAL;
+ goto destroy_action_metadata;
+ }
+
+ return 0;
+
+destroy_action_metadata:
+ mlx5hws_action_destroy(cdata->action_metadata);
+
+ return ret;
+}
+
+static void
+hws_complex_data_actions_destroy(struct mlx5hws_bwc_matcher_complex_data *cdata)
+{
+ mlx5hws_action_destroy(cdata->action_last);
+ mlx5hws_action_destroy(cdata->action_metadata);
+}
+
+int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table,
+ u32 priority, u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask)
+{
+ struct mlx5hws_match_parameters
+ submasks[MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS] = {0};
+ struct mlx5hws_bwc_matcher_complex_data *cdata;
+ struct mlx5hws_context *ctx = table->ctx;
+ int num_submatchers;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(submasks); i++) {
+ submasks[i].match_sz = MLX5_ST_SZ_BYTES(fte_match_param);
+ submasks[i].match_buf = kzalloc(submasks[i].match_sz,
+ GFP_KERNEL);
+ if (!submasks[i].match_buf) {
+ ret = -ENOMEM;
+ goto free_submasks;
+ }
+ }
+
+ ret = hws_bwc_matcher_split_mask(ctx, match_criteria_enable, mask,
+ submasks, &num_submatchers);
+ if (ret)
+ goto free_submasks;
+
+ cdata = kzalloc(sizeof(*cdata), GFP_KERNEL);
+ if (!cdata) {
+ ret = -ENOMEM;
+ goto free_submasks;
+ }
+
+ bwc_matcher->complex = cdata;
+ cdata->num_submatchers = num_submatchers;
+
+ for (i = 0; i < num_submatchers; i++) {
+ ret = hws_submatcher_init(bwc_matcher, i, table, priority,
+ match_criteria_enable, &submasks[i]);
+ if (ret)
+ goto destroy_submatchers;
+ }
+
+ ret = hws_complex_data_actions_init(cdata);
+ if (ret)
+ goto destroy_submatchers;
+
+ ret = 0;
+ goto free_submasks;
+
+destroy_submatchers:
+ while (i--)
+ hws_submatcher_destroy(bwc_matcher, i);
+ kfree(cdata);
+ bwc_matcher->complex = NULL;
+
+free_submasks:
+ for (i = 0; i < ARRAY_SIZE(submasks); i++)
+ kfree(submasks[i].match_buf);
+
+ return ret;
+}
+
+void
+mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ int i;
+
+ hws_complex_data_actions_destroy(bwc_matcher->complex);
+ for (i = 0; i < bwc_matcher->complex->num_submatchers; i++)
+ hws_submatcher_destroy(bwc_matcher, i);
+ kfree(bwc_matcher->complex);
+ bwc_matcher->complex = NULL;
+}
+
+static int
+hws_complex_get_subrule_data(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_bwc_complex_submatcher *subm,
+ u32 *match_params)
+__must_hold(&subm->hash_lock)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = subm->bwc_matcher;
+ struct mlx5hws_bwc_complex_subrule_data *sr_data, *old_data;
+ struct mlx5hws_match_template *mt;
+ int ret;
+
+ sr_data = kzalloc(sizeof(*sr_data), GFP_KERNEL);
+ if (!sr_data)
+ return -ENOMEM;
+
+ ret = ida_alloc(&subm->chain_ida, GFP_KERNEL);
+ if (ret < 0)
+ goto free_sr_data;
+ sr_data->chain_id = ret;
+
+ refcount_set(&sr_data->refcount, 1);
+
+ mt = bwc_matcher->matcher->mt;
+ mlx5hws_definer_create_tag(match_params, mt->fc, mt->fc_sz,
+ (u8 *)&sr_data->match_tag);
+
+ old_data = rhashtable_lookup_get_insert_fast(&subm->rules_hash,
+ &sr_data->hash_node,
+ hws_rules_hash_params);
+ if (IS_ERR(old_data)) {
+ ret = PTR_ERR(old_data);
+ goto free_ida;
+ }
+
+ if (old_data) {
+ /* Rule with the same tag already exists - update refcount */
+ refcount_inc(&old_data->refcount);
+ /* Let the new rule use the same tag as the existing rule.
+ * Note that we don't have any indication for the rule creation
+ * process that a rule with similar matching params already
+ * exists - no harm done when this rule is be overwritten by
+ * the same STE.
+ * There's some performance advantage in skipping such cases,
+ * so this is left for future optimizations.
+ */
+ bwc_rule->subrule_data = old_data;
+ ret = 0;
+ goto free_ida;
+ }
+
+ bwc_rule->subrule_data = sr_data;
+ return 0;
+
+free_ida:
+ ida_free(&subm->chain_ida, sr_data->chain_id);
+free_sr_data:
+ kfree(sr_data);
+
+ return ret;
+}
+
+static void
+hws_complex_put_subrule_data(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_bwc_complex_submatcher *subm,
+ bool *is_last_rule)
+__must_hold(&subm->hash_lock)
+{
+ struct mlx5hws_bwc_complex_subrule_data *sr_data;
+
+ if (is_last_rule)
+ *is_last_rule = false;
+
+ sr_data = bwc_rule->subrule_data;
+ if (refcount_dec_and_test(&sr_data->refcount)) {
+ rhashtable_remove_fast(&subm->rules_hash,
+ &sr_data->hash_node,
+ hws_rules_hash_params);
+ ida_free(&subm->chain_ida, sr_data->chain_id);
+ kfree(sr_data);
+ if (is_last_rule)
+ *is_last_rule = true;
+ }
+
+ bwc_rule->subrule_data = NULL;
+}
+
+static int hws_complex_subrule_create(struct mlx5hws_bwc_matcher *cmatcher,
+ struct mlx5hws_bwc_rule *subrule,
+ u32 *match_params, u32 flow_source,
+ int bwc_queue_idx, int subm_idx,
+ struct mlx5hws_rule_action *actions,
+ u32 *chain_id)
+{
+ struct mlx5hws_rule_action chain_actions[HWS_NUM_CHAIN_ACTIONS] = {0};
+ u8 modify_hdr_action[MLX5_ST_SZ_BYTES(set_action_in)] = {0};
+ struct mlx5hws_bwc_matcher_complex_data *cdata;
+ struct mlx5hws_bwc_complex_submatcher *subm;
+ int ret;
+
+ cdata = cmatcher->complex;
+ subm = &cdata->submatchers[subm_idx];
+
+ mutex_lock(&subm->hash_lock);
+
+ ret = hws_complex_get_subrule_data(subrule, subm, match_params);
+ if (ret)
+ goto unlock;
+
+ *chain_id = subrule->subrule_data->chain_id;
+
+ if (!actions) {
+ MLX5_SET(set_action_in, modify_hdr_action, data, *chain_id);
+ chain_actions[0].action = cdata->action_metadata;
+ chain_actions[0].modify_header.data = modify_hdr_action;
+ chain_actions[1].action =
+ cdata->submatchers[subm_idx + 1].action_tbl;
+ chain_actions[2].action = cdata->action_last;
+ actions = chain_actions;
+ }
+
+ ret = mlx5hws_bwc_rule_create_simple(subrule, match_params, actions,
+ flow_source, bwc_queue_idx);
+ if (ret)
+ goto put_subrule_data;
+
+ ret = 0;
+ goto unlock;
+
+put_subrule_data:
+ hws_complex_put_subrule_data(subrule, subm, NULL);
+unlock:
+ mutex_unlock(&subm->hash_lock);
+
+ return ret;
+}
+
+static int hws_complex_subrule_destroy(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_bwc_matcher *cmatcher,
+ int subm_idx)
+{
+ struct mlx5hws_bwc_matcher_complex_data *cdata;
+ struct mlx5hws_bwc_complex_submatcher *subm;
+ struct mlx5hws_context *ctx;
+ bool is_last_rule;
+ int ret = 0;
+
+ cdata = cmatcher->complex;
+ subm = &cdata->submatchers[subm_idx];
+ ctx = subm->tbl->ctx;
+
+ mutex_lock(&subm->hash_lock);
+
+ hws_complex_put_subrule_data(bwc_rule, subm, &is_last_rule);
+ bwc_rule->rule->skip_delete = !is_last_rule;
+ ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule);
+ if (unlikely(ret))
+ mlx5hws_err(ctx,
+ "Complex rule: failed to delete subrule %d (%d)\n",
+ subm_idx, ret);
+
+ if (subm_idx)
+ mlx5hws_bwc_rule_free(bwc_rule);
+
+ mutex_unlock(&subm->hash_lock);
+
+ return ret;
+}
+
+int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_match_parameters *params,
+ u32 flow_source,
+ struct mlx5hws_rule_action rule_actions[],
+ u16 bwc_queue_idx)
+{
+ struct mlx5hws_bwc_rule
+ *subrules[MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS] = {0};
+ struct mlx5hws_bwc_matcher *cmatcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_bwc_matcher_complex_data *cdata;
+ struct mlx5hws_rule_action *subrule_actions;
+ struct mlx5hws_bwc_complex_submatcher *subm;
+ struct mlx5hws_bwc_rule *subrule;
+ u32 *match_params;
+ u32 chain_id;
+ int i, ret;
+
+ cdata = cmatcher->complex;
+ if (!cdata)
+ return -EINVAL;
+
+ /* Duplicate user data because we will modify it to set register C6
+ * values. For the same reason, make sure that we allocate a full
+ * match_param even if the user gave us fewer bytes. We need to ensure
+ * there is space for the match on C6.
+ */
+ match_params = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ if (!match_params)
+ return -ENOMEM;
+
+ memcpy(match_params, params->match_buf, params->match_sz);
+
+ ret = hws_complex_subrule_create(cmatcher, bwc_rule, match_params,
+ flow_source, bwc_queue_idx, 0,
+ NULL, &chain_id);
+ if (ret)
+ goto free_match_params;
+ subrules[0] = bwc_rule;
+
+ for (i = 1; i < cdata->num_submatchers; i++) {
+ subm = &cdata->submatchers[i];
+ subrule = mlx5hws_bwc_rule_alloc(subm->bwc_matcher);
+ if (!subrule) {
+ ret = -ENOMEM;
+ goto destroy_subrules;
+ }
+
+ /* Match on the previous subrule's chain_id. This is how
+ * subrules are connected in steering.
+ */
+ MLX5_SET(fte_match_param, match_params,
+ misc_parameters_2.metadata_reg_c_6, chain_id);
+
+ /* The last subrule uses the complex rule's user-specified
+ * actions. Everything else uses the chaining rules based on the
+ * next table and chain_id.
+ */
+ subrule_actions =
+ i == cdata->num_submatchers - 1 ? rule_actions : NULL;
+
+ ret = hws_complex_subrule_create(cmatcher, subrule,
+ match_params, flow_source,
+ bwc_queue_idx, i,
+ subrule_actions, &chain_id);
+ if (ret) {
+ mlx5hws_bwc_rule_free(subrule);
+ goto destroy_subrules;
+ }
+
+ subrules[i] = subrule;
+ }
+
+ for (i = 0; i < cdata->num_submatchers - 1; i++)
+ subrules[i]->next_subrule = subrules[i + 1];
+
+ kfree(match_params);
+
+ return 0;
+
+destroy_subrules:
+ while (i--)
+ hws_complex_subrule_destroy(subrules[i], cmatcher, i);
+free_match_params:
+ kfree(match_params);
+
+ return ret;
+}
+
+int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_bwc_rule
+ *subrules[MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS] = {0};
+ struct mlx5hws_bwc_matcher_complex_data *cdata;
+ int i, err, ret_val;
+
+ cdata = bwc_matcher->complex;
+
+ /* Construct a list of all the subrules we need to destroy. */
+ subrules[0] = bwc_rule;
+ for (i = 1; i < cdata->num_submatchers; i++)
+ subrules[i] = subrules[i - 1]->next_subrule;
+
+ ret_val = 0;
+ for (i = 0; i < cdata->num_submatchers; i++) {
+ err = hws_complex_subrule_destroy(subrules[i], bwc_matcher, i);
+ /* If something goes wrong, plow along to destroy all of the
+ * subrules but return an error upstack.
+ */
+ if (unlikely(err))
+ ret_val = err;
+ }
+
+ return ret_val;
+}
+
+static void
+hws_bwc_matcher_init_move(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mlx5hws_bwc_rule *bwc_rule;
+ struct list_head *rules_list;
+ int i;
+
+ for (i = 0; i < bwc_queues; i++) {
+ rules_list = &bwc_matcher->rules[i];
+ if (list_empty(rules_list))
+ continue;
+
+ list_for_each_entry(bwc_rule, rules_list, list_node) {
+ if (!bwc_rule->subrule_data)
+ continue;
+ bwc_rule->subrule_data->was_moved = false;
+ }
+ }
+}
+
+int mlx5hws_bwc_matcher_complex_move(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_matcher *matcher = bwc_matcher->matcher;
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mlx5hws_bwc_rule *tmp_bwc_rule;
+ struct mlx5hws_rule_attr rule_attr;
+ int move_error = 0, poll_error = 0;
+ struct mlx5hws_rule *tmp_rule;
+ struct list_head *rules_list;
+ u32 expected_completions = 1;
+ int i, ret = 0;
+
+ hws_bwc_matcher_init_move(bwc_matcher);
+
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr);
+
+ for (i = 0; i < bwc_queues; i++) {
+ rules_list = &bwc_matcher->rules[i];
+ if (list_empty(rules_list))
+ continue;
+
+ rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
+
+ list_for_each_entry(tmp_bwc_rule, rules_list, list_node) {
+ /* Check if a rule with similar tag has already
+ * been moved.
+ */
+ if (tmp_bwc_rule->subrule_data->was_moved) {
+ /* This rule is a duplicate of rule with
+ * identical tag that has already been moved
+ * earlier. Just update this rule's RTCs.
+ */
+ tmp_bwc_rule->rule->rtc_0 =
+ tmp_bwc_rule->subrule_data->rtc_0;
+ tmp_bwc_rule->rule->rtc_1 =
+ tmp_bwc_rule->subrule_data->rtc_1;
+ tmp_bwc_rule->rule->matcher =
+ tmp_bwc_rule->rule->matcher->resize_dst;
+ continue;
+ }
+
+ /* First time we're moving rule with this tag.
+ * Move it for real.
+ */
+ tmp_rule = tmp_bwc_rule->rule;
+ tmp_rule->skip_delete = false;
+ ret = mlx5hws_matcher_resize_rule_move(matcher,
+ tmp_rule,
+ &rule_attr);
+ if (unlikely(ret)) {
+ if (!move_error) {
+ mlx5hws_err(ctx,
+ "Moving complex BWC rule: move failed (%d), attempting to move rest of the rules\n",
+ ret);
+ move_error = ret;
+ }
+ /* Rule wasn't queued, no need to poll */
+ continue;
+ }
+
+ expected_completions = 1;
+ ret = mlx5hws_bwc_queue_poll(ctx,
+ rule_attr.queue_id,
+ &expected_completions,
+ true);
+ if (unlikely(ret)) {
+ if (ret == -ETIMEDOUT) {
+ mlx5hws_err(ctx,
+ "Moving complex BWC rule: timeout polling for completions (%d), aborting rehash\n",
+ ret);
+ return ret;
+ }
+ if (!poll_error) {
+ mlx5hws_err(ctx,
+ "Moving complex BWC rule: polling for completions failed (%d), attempting to move rest of the rules\n",
+ ret);
+ poll_error = ret;
+ }
+ }
+
+ /* Done moving the rule to the new matcher,
+ * now update RTCs for all the duplicated rules.
+ */
+ tmp_bwc_rule->subrule_data->rtc_0 =
+ tmp_bwc_rule->rule->rtc_0;
+ tmp_bwc_rule->subrule_data->rtc_1 =
+ tmp_bwc_rule->rule->rtc_1;
+
+ tmp_bwc_rule->subrule_data->was_moved = true;
+ }
+ }
+
+ /* Return the first error that happened */
+ if (unlikely(move_error))
+ return move_error;
+ if (unlikely(poll_error))
+ return poll_error;
+
+ return ret;
+}
+
+int
+mlx5hws_bwc_matcher_complex_move_first(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_bwc_matcher_complex_data *cdata;
+ struct mlx5hws_table *isolated_tbl;
+ u32 end_ft_id;
+ int i, ret;
+
+ cdata = bwc_matcher->complex;
+
+ /* We are rehashing the first submatcher. We need to update the
+ * subsequent submatchers to point to the end_ft of this new matcher.
+ * This needs to be done before moving any rules to prevent possible
+ * steering loops.
+ */
+ end_ft_id = bwc_matcher->matcher->resize_dst->end_ft_id;
+ for (i = 1; i < cdata->num_submatchers; i++) {
+ isolated_tbl = cdata->submatchers[i].tbl;
+ ret = mlx5hws_matcher_update_end_ft_isolated(isolated_tbl,
+ end_ft_id);
+ if (ret) {
+ mlx5hws_err(ctx,
+ "Complex matcher: failed updating end_ft of isolated matcher (%d)\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return mlx5hws_bwc_matcher_complex_move(bwc_matcher);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h
new file mode 100644
index 000000000000..d07de631ce9f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef HWS_BWC_COMPLEX_H_
+#define HWS_BWC_COMPLEX_H_
+
+#define MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS 4
+
+/* A matcher can't contain two rules with the same match tag, but it is possible
+ * that two different complex rules' subrules have the same match tag. In that
+ * case, those subrules correspond to a single rule, and we need to refcount.
+ */
+struct mlx5hws_bwc_complex_subrule_data {
+ struct mlx5hws_rule_match_tag match_tag;
+ refcount_t refcount;
+ /* The chain_id is what glues individual subrules into larger complex
+ * rules. It is the value that this subrule writes to register C6, and
+ * that the next subrule matches against.
+ */
+ u32 chain_id;
+ u32 rtc_0;
+ u32 rtc_1;
+ /* During rehash we iterate through all the subrules to move them. But
+ * two or more subrules can share the same physical rule in the
+ * submatcher, so we use `was_moved` to keep track if a given rule was
+ * already moved.
+ */
+ bool was_moved;
+ struct rhash_head hash_node;
+};
+
+struct mlx5hws_bwc_complex_submatcher {
+ /* Isolated table that the matcher lives in. Not set for the first
+ * matcher, which lives in the original table.
+ */
+ struct mlx5hws_table *tbl;
+ /* Match a rule with this action to go to `tbl`. This is set in all
+ * submatchers but the first.
+ */
+ struct mlx5hws_action *action_tbl;
+ /* This submatcher's simple matcher. The first submatcher points to the
+ * outer (complex) matcher.
+ */
+ struct mlx5hws_bwc_matcher *bwc_matcher;
+ struct rhashtable rules_hash;
+ struct ida chain_ida;
+ struct mutex hash_lock; /* Protect the hash and ida. */
+};
+
+struct mlx5hws_bwc_matcher_complex_data {
+ struct mlx5hws_bwc_complex_submatcher
+ submatchers[MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS];
+ int num_submatchers;
+ /* Actions used by all but the last submatcher to point to the next
+ * submatcher in the chain. The last submatcher uses the action template
+ * from the complex matcher, to perform the actions that the user
+ * originally requested.
+ */
+ struct mlx5hws_action *action_metadata;
+ struct mlx5hws_action *action_last;
+};
+
+bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask);
+
+int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask);
+
+void mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+int mlx5hws_bwc_matcher_complex_move(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+int
+mlx5hws_bwc_matcher_complex_move_first(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_match_parameters *params,
+ u32 flow_source,
+ struct mlx5hws_rule_action rule_actions[],
+ u16 bwc_queue_idx);
+
+int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule);
+
+#endif /* HWS_BWC_COMPLEX_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
index 2c7b14172049..f22eaf506d28 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#include "mlx5hws_internal.h"
+#include "internal.h"
static enum mlx5_ifc_flow_destination_type
hws_cmd_dest_type_to_ifc_dest_type(enum mlx5_flow_destination_type type)
@@ -55,6 +55,7 @@ int mlx5hws_cmd_flow_table_create(struct mlx5_core_dev *mdev,
MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
MLX5_SET(create_flow_table_in, in, table_type, ft_attr->type);
+ MLX5_SET(create_flow_table_in, in, uid, ft_attr->uid);
ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level);
@@ -130,12 +131,6 @@ int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev,
return mlx5_cmd_exec_in(mdev, destroy_flow_table, in);
}
-void mlx5hws_cmd_alias_flow_table_destroy(struct mlx5_core_dev *mdev,
- u32 table_id)
-{
- hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_FT_ALIAS, table_id);
-}
-
static int hws_cmd_flow_group_create(struct mlx5_core_dev *mdev,
struct mlx5hws_cmd_fg_attr *fg_attr,
u32 *group_id)
@@ -257,6 +252,12 @@ int mlx5hws_cmd_set_fte(struct mlx5_core_dev *mdev,
dest->ext_reformat_id);
}
break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
+ MLX5_SET(dest_format, in_dests,
+ destination_type, ifc_dest_type);
+ MLX5_SET(dest_format, in_dests, destination_id,
+ dest->destination_id);
+ break;
default:
ret = -EOPNOTSUPP;
goto out;
@@ -359,7 +360,7 @@ void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx,
ft_attr->type = fw_ft_type;
ft_attr->table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL;
- default_miss_tbl = ctx->common_res[type].default_miss->ft_id;
+ default_miss_tbl = ctx->common_res.default_miss->ft_id;
if (!default_miss_tbl) {
pr_warn("HWS: no flow table ID for default miss\n");
return;
@@ -406,7 +407,6 @@ int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev,
MLX5_SET(rtc, attr, match_definer_1, rtc_attr->match_definer_1);
MLX5_SET(rtc, attr, stc_id, rtc_attr->stc_base);
MLX5_SET(rtc, attr, ste_table_base_id, rtc_attr->ste_base);
- MLX5_SET(rtc, attr, ste_table_offset, rtc_attr->ste_offset);
MLX5_SET(rtc, attr, miss_flow_table_id, rtc_attr->miss_ft_id);
MLX5_SET(rtc, attr, reparse_mode, rtc_attr->reparse_mode);
@@ -622,12 +622,12 @@ int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev,
u32 pd,
u32 *arg_id)
{
+ u32 in[MLX5_ST_SZ_DW(create_modify_header_arg_in)] = {0};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
- u32 in[MLX5_ST_SZ_DW(create_arg_in)] = {0};
void *attr;
int ret;
- attr = MLX5_ADDR_OF(create_arg_in, in, hdr);
+ attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, hdr);
MLX5_SET(general_obj_in_cmd_hdr,
attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr,
@@ -635,8 +635,8 @@ int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev,
MLX5_SET(general_obj_in_cmd_hdr,
attr, op_param.create.log_obj_range, log_obj_range);
- attr = MLX5_ADDR_OF(create_arg_in, in, arg);
- MLX5_SET(arg, attr, access_pd, pd);
+ attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, arg);
+ MLX5_SET(modify_header_arg, attr, access_pd, pd);
ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (ret) {
@@ -812,7 +812,7 @@ int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev,
struct mlx5hws_cmd_packet_reformat_create_attr *attr,
u32 *reformat_id)
{
- u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {0};
size_t insz, cmd_data_sz, cmd_total_sz;
void *prctx;
void *pdata;
@@ -845,7 +845,7 @@ int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev,
goto out;
}
- *reformat_id = MLX5_GET(alloc_packet_reformat_out, out, packet_reformat_id);
+ *reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id);
out:
kfree(in);
return ret;
@@ -854,13 +854,13 @@ out:
int mlx5hws_cmd_packet_reformat_destroy(struct mlx5_core_dev *mdev,
u32 reformat_id)
{
- u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {0};
int ret;
- MLX5_SET(dealloc_packet_reformat_in, in, opcode,
+ MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
- MLX5_SET(dealloc_packet_reformat_in, in,
+ MLX5_SET(dealloc_packet_reformat_context_in, in,
packet_reformat_id, reformat_id);
ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
@@ -889,73 +889,6 @@ int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn)
return ret;
}
-int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev,
- struct mlx5hws_cmd_allow_other_vhca_access_attr *attr)
-{
- u32 out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {0};
- void *key;
- int ret;
-
- MLX5_SET(allow_other_vhca_access_in,
- in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS);
- MLX5_SET(allow_other_vhca_access_in,
- in, object_type_to_be_accessed, attr->obj_type);
- MLX5_SET(allow_other_vhca_access_in,
- in, object_id_to_be_accessed, attr->obj_id);
-
- key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key);
- memcpy(key, attr->access_key, sizeof(attr->access_key));
-
- ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
- if (ret)
- mlx5_core_err(mdev, "Failed to execute ALLOW_OTHER_VHCA_ACCESS command\n");
-
- return ret;
-}
-
-int mlx5hws_cmd_alias_obj_create(struct mlx5_core_dev *mdev,
- struct mlx5hws_cmd_alias_obj_create_attr *alias_attr,
- u32 *obj_id)
-{
- u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
- u32 in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {0};
- void *attr;
- void *key;
- int ret;
-
- attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr);
- MLX5_SET(general_obj_in_cmd_hdr,
- attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
- MLX5_SET(general_obj_in_cmd_hdr,
- attr, obj_type, alias_attr->obj_type);
- MLX5_SET(general_obj_in_cmd_hdr, attr, op_param.create.alias_object, 1);
-
- attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx);
- MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id);
- MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id);
-
- key = MLX5_ADDR_OF(alias_context, attr, access_key);
- memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key));
-
- ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
- if (ret) {
- mlx5_core_err(mdev, "Failed to create ALIAS OBJ\n");
- goto out;
- }
-
- *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
-out:
- return ret;
-}
-
-int mlx5hws_cmd_alias_obj_destroy(struct mlx5_core_dev *mdev,
- u16 obj_type,
- u32 obj_id)
-{
- return hws_cmd_general_obj_destroy(mdev, obj_type, obj_id);
-}
-
int mlx5hws_cmd_generate_wqe(struct mlx5_core_dev *mdev,
struct mlx5hws_cmd_generate_wqe_attr *attr,
struct mlx5_cqe64 *ret_cqe)
@@ -1267,34 +1200,20 @@ out:
int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function,
u16 vport_number, u16 *gvmi)
{
- bool ec_vf_func = other_function ? mlx5_core_is_ec_vf_vport(mdev, vport_number) : false;
- u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
- int out_size;
- void *out;
int err;
- out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- out = kzalloc(out_size, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
-
- MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
- MLX5_SET(query_hca_cap_in, in, other_function, other_function);
- MLX5_SET(query_hca_cap_in, in, function_id,
- mlx5_vport_to_func_id(mdev, vport_number, ec_vf_func));
- MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func);
- MLX5_SET(query_hca_cap_in, in, op_mod,
- MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 | HCA_CAP_OPMOD_GET_CUR);
+ if (!other_function) {
+ /* self vhca_id */
+ *gvmi = MLX5_CAP_GEN(mdev, vhca_id);
+ return 0;
+ }
- err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
+ err = mlx5_vport_get_vhca_id(mdev, vport_number, gvmi);
if (err) {
- kfree(out);
+ mlx5_core_err(mdev, "Failed to get vport vhca id for vport %d\n",
+ vport_number);
return err;
}
- *gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
-
- kfree(out);
-
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
index 2fbcf4ff571a..122ccc671628 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#ifndef MLX5HWS_CMD_H_
-#define MLX5HWS_CMD_H_
+#ifndef HWS_CMD_H_
+#define HWS_CMD_H_
#define WIRE_PORT 0xFFFF
@@ -36,6 +36,7 @@ struct mlx5hws_cmd_set_fte_attr {
struct mlx5hws_cmd_ft_create_attr {
u8 type;
u8 level;
+ u16 uid;
bool rtc_valid;
bool decap_en;
bool reformat_en;
@@ -63,14 +64,13 @@ struct mlx5hws_cmd_forward_tbl {
u8 type;
u32 ft_id;
u32 fg_id;
- u32 refcount;
+ u32 refcount; /* protected by context ctrl lock */
};
struct mlx5hws_cmd_rtc_create_attr {
u32 pd;
u32 stc_base;
u32 ste_base;
- u32 ste_offset;
u32 miss_ft_id;
bool fw_gen_wqe;
u8 update_index_mode;
@@ -258,9 +258,6 @@ int mlx5hws_cmd_flow_table_query(struct mlx5_core_dev *mdev,
int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev,
u8 fw_ft_type, u32 table_id);
-void mlx5hws_cmd_alias_flow_table_destroy(struct mlx5_core_dev *mdev,
- u32 table_id);
-
int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev,
struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
u32 *rtc_id);
@@ -334,14 +331,6 @@ mlx5hws_cmd_forward_tbl_create(struct mlx5_core_dev *mdev,
void mlx5hws_cmd_forward_tbl_destroy(struct mlx5_core_dev *mdev,
struct mlx5hws_cmd_forward_tbl *tbl);
-int mlx5hws_cmd_alias_obj_create(struct mlx5_core_dev *mdev,
- struct mlx5hws_cmd_alias_obj_create_attr *alias_attr,
- u32 *obj_id);
-
-int mlx5hws_cmd_alias_obj_destroy(struct mlx5_core_dev *mdev,
- u16 obj_type,
- u32 obj_id);
-
int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn);
int mlx5hws_cmd_query_caps(struct mlx5_core_dev *mdev,
@@ -352,10 +341,7 @@ void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx,
enum mlx5hws_table_type type,
struct mlx5hws_cmd_ft_modify_attr *ft_attr);
-int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev,
- struct mlx5hws_cmd_allow_other_vhca_access_attr *attr);
-
int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function,
u16 vport_number, u16 *gvmi);
-#endif /* MLX5HWS_CMD_H_ */
+#endif /* HWS_CMD_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c
index 00e4fdf4a558..428dae869706 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2024 NVIDIA CORPORATION. All rights reserved. */
-#include "mlx5hws_internal.h"
+#include "internal.h"
bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx)
{
@@ -23,7 +23,6 @@ static int hws_context_pools_init(struct mlx5hws_context *ctx)
struct mlx5hws_pool_attr pool_attr = {0};
u8 max_log_sz;
int ret;
- int i;
ret = mlx5hws_pat_init_pattern_cache(&ctx->pattern_cache);
if (ret)
@@ -35,27 +34,20 @@ static int hws_context_pools_init(struct mlx5hws_context *ctx)
/* Create an STC pool per FT type */
pool_attr.pool_type = MLX5HWS_POOL_TYPE_STC;
- pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STC_POOL;
max_log_sz = min(MLX5HWS_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max);
pool_attr.alloc_log_sz = max(max_log_sz, ctx->caps->stc_alloc_log_gran);
- for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
- pool_attr.table_type = i;
- ctx->stc_pool[i] = mlx5hws_pool_create(ctx, &pool_attr);
- if (!ctx->stc_pool[i]) {
- mlx5hws_err(ctx, "Failed to allocate STC pool [%d]", i);
- ret = -ENOMEM;
- goto free_stc_pools;
- }
+ pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB;
+ ctx->stc_pool = mlx5hws_pool_create(ctx, &pool_attr);
+ if (!ctx->stc_pool) {
+ mlx5hws_err(ctx, "Failed to allocate STC pool\n");
+ ret = -ENOMEM;
+ goto uninit_cache;
}
return 0;
-free_stc_pools:
- for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++)
- if (ctx->stc_pool[i])
- mlx5hws_pool_destroy(ctx->stc_pool[i]);
-
+uninit_cache:
mlx5hws_definer_uninit_cache(ctx->definer_cache);
uninit_pat_cache:
mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
@@ -64,12 +56,8 @@ uninit_pat_cache:
static void hws_context_pools_uninit(struct mlx5hws_context *ctx)
{
- int i;
-
- for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
- if (ctx->stc_pool[i])
- mlx5hws_pool_destroy(ctx->stc_pool[i]);
- }
+ if (ctx->stc_pool)
+ mlx5hws_pool_destroy(ctx->stc_pool);
mlx5hws_definer_uninit_cache(ctx->definer_cache);
mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
@@ -161,17 +149,25 @@ static int hws_context_init_hws(struct mlx5hws_context *ctx,
if (ret)
goto uninit_pd;
- if (attr->bwc)
- ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
+ /* Context has support for backward compatible API,
+ * and does not have support for native HWS API.
+ */
+ ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
ret = mlx5hws_send_queues_open(ctx, attr->queues, attr->queue_size);
if (ret)
goto pools_uninit;
+ ret = mlx5hws_action_ste_pool_init(ctx);
+ if (ret)
+ goto close_queues;
+
INIT_LIST_HEAD(&ctx->tbl_list);
return 0;
+close_queues:
+ mlx5hws_send_queues_close(ctx);
pools_uninit:
hws_context_pools_uninit(ctx);
uninit_pd:
@@ -184,6 +180,7 @@ static void hws_context_uninit_hws(struct mlx5hws_context *ctx)
if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT))
return;
+ mlx5hws_action_ste_pool_uninit(ctx);
mlx5hws_send_queues_close(ctx);
hws_context_pools_uninit(ctx);
hws_context_uninit_pd(ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h
index 8ab548aa402b..3f8938c73dc0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h
@@ -1,13 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#ifndef MLX5HWS_CONTEXT_H_
-#define MLX5HWS_CONTEXT_H_
+#ifndef HWS_CONTEXT_H_
+#define HWS_CONTEXT_H_
enum mlx5hws_context_flags {
MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT = 1 << 0,
MLX5HWS_CONTEXT_FLAG_PRIVATE_PD = 1 << 1,
MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT = 1 << 2,
+ MLX5HWS_CONTEXT_FLAG_NATIVE_SUPPORT = 1 << 3,
};
enum mlx5hws_context_shared_stc_type {
@@ -37,8 +38,10 @@ struct mlx5hws_context {
struct mlx5_core_dev *mdev;
struct mlx5hws_cmd_query_caps *caps;
u32 pd_num;
- struct mlx5hws_pool *stc_pool[MLX5HWS_TABLE_TYPE_MAX];
- struct mlx5hws_context_common_res common_res[MLX5HWS_TABLE_TYPE_MAX];
+ struct mlx5hws_pool *stc_pool;
+ struct mlx5hws_action_ste_pool *action_ste_pool; /* One per queue */
+ struct delayed_work action_ste_cleanup;
+ struct mlx5hws_context_common_res common_res;
struct mlx5hws_pattern_cache *pattern_cache;
struct mlx5hws_definer_cache *definer_cache;
struct mutex ctrl_lock; /* control lock to protect the whole context */
@@ -58,8 +61,13 @@ static inline bool mlx5hws_context_bwc_supported(struct mlx5hws_context *ctx)
return ctx->flags & MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
}
+static inline bool mlx5hws_context_native_supported(struct mlx5hws_context *ctx)
+{
+ return ctx->flags & MLX5HWS_CONTEXT_FLAG_NATIVE_SUPPORT;
+}
+
bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx);
u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx);
-#endif /* MLX5HWS_CONTEXT_H_ */
+#endif /* HWS_CONTEXT_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
index 2b8c5a4e1c4c..2ec8cb10139a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
@@ -5,7 +5,7 @@
#include <linux/kernel.h>
#include <linux/seq_file.h>
#include <linux/version.h>
-#include "mlx5hws_internal.h"
+#include "internal.h"
static int
hws_debug_dump_matcher_template_definer(struct seq_file *f,
@@ -99,17 +99,19 @@ hws_debug_dump_matcher_attr(struct seq_file *f, struct mlx5hws_matcher *matcher)
{
struct mlx5hws_matcher_attr *attr = &matcher->attr;
- seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d\n",
+ seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d,-1,-1,%d,%d\n",
MLX5HWS_DEBUG_RES_TYPE_MATCHER_ATTR,
HWS_PTR_TO_ID(matcher),
attr->priority,
attr->mode,
- attr->table.sz_row_log,
- attr->table.sz_col_log,
+ attr->size[MLX5HWS_MATCHER_SIZE_TYPE_RX].table.sz_row_log,
+ attr->size[MLX5HWS_MATCHER_SIZE_TYPE_RX].table.sz_col_log,
attr->optimize_using_rule_idx,
attr->optimize_flow_src,
attr->insert_mode,
- attr->distribute_mode);
+ attr->distribute_mode,
+ attr->size[MLX5HWS_MATCHER_SIZE_TYPE_TX].table.sz_row_log,
+ attr->size[MLX5HWS_MATCHER_SIZE_TYPE_TX].table.sz_col_log);
return 0;
}
@@ -118,8 +120,6 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma
{
enum mlx5hws_table_type tbl_type = matcher->tbl->type;
struct mlx5hws_cmd_ft_query_attr ft_attr = {0};
- struct mlx5hws_pool_chunk *ste;
- struct mlx5hws_pool *ste_pool;
u64 icm_addr_0 = 0;
u64 icm_addr_1 = 0;
u32 ste_0_id = -1;
@@ -134,13 +134,9 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma
matcher->end_ft_id,
matcher->col_matcher ? HWS_PTR_TO_ID(matcher->col_matcher) : 0);
- ste = &matcher->match_ste.ste;
- ste_pool = matcher->match_ste.pool;
- if (ste_pool) {
- ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
- if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
- ste_1_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
- }
+ ste_0_id = matcher->match_ste.ste_0_base;
+ if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
+ ste_1_id = matcher->match_ste.ste_1_base;
seq_printf(f, ",%d,%d,%d,%d",
matcher->match_ste.rtc_0_id,
@@ -148,19 +144,6 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma
matcher->match_ste.rtc_1_id,
(int)ste_1_id);
- ste = &matcher->action_ste[0].ste;
- ste_pool = matcher->action_ste[0].pool;
- if (ste_pool) {
- ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
- if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
- ste_1_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
- else
- ste_1_id = -1;
- } else {
- ste_0_id = -1;
- ste_1_id = -1;
- }
-
ft_attr.type = matcher->tbl->fw_ft_type;
ret = mlx5hws_cmd_flow_table_query(matcher->tbl->ctx->mdev,
matcher->end_ft_id,
@@ -170,12 +153,7 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma
if (ret)
return ret;
- seq_printf(f, ",%d,%d,%d,%d,%d,0x%llx,0x%llx\n",
- matcher->action_ste[0].rtc_0_id,
- (int)ste_0_id,
- matcher->action_ste[0].rtc_1_id,
- (int)ste_1_id,
- 0,
+ seq_printf(f, ",-1,-1,-1,-1,0,0x%llx,0x%llx\n",
mlx5hws_debug_icm_to_idx(icm_addr_0),
mlx5hws_debug_icm_to_idx(icm_addr_1));
@@ -368,9 +346,10 @@ static int hws_debug_dump_context_info(struct seq_file *f, struct mlx5hws_contex
static int hws_debug_dump_context_stc_resource(struct seq_file *f,
struct mlx5hws_context *ctx,
- u32 tbl_type,
struct mlx5hws_pool_resource *resource)
{
+ u32 tbl_type = MLX5HWS_TABLE_TYPE_BASE + MLX5HWS_TABLE_TYPE_FDB;
+
seq_printf(f, "%d,0x%llx,%u,%u\n",
MLX5HWS_DEBUG_RES_TYPE_CONTEXT_STC,
HWS_PTR_TO_ID(ctx),
@@ -382,40 +361,65 @@ static int hws_debug_dump_context_stc_resource(struct seq_file *f,
static int hws_debug_dump_context_stc(struct seq_file *f, struct mlx5hws_context *ctx)
{
- struct mlx5hws_pool *stc_pool;
- u32 table_type;
+ struct mlx5hws_pool *stc_pool = ctx->stc_pool;
int ret;
- int i;
- for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
- stc_pool = ctx->stc_pool[i];
- table_type = MLX5HWS_TABLE_TYPE_BASE + i;
+ if (!stc_pool)
+ return 0;
- if (!stc_pool)
- continue;
+ if (stc_pool->resource) {
+ ret = hws_debug_dump_context_stc_resource(f, ctx,
+ stc_pool->resource);
+ if (ret)
+ return ret;
+ }
- if (stc_pool->resource[0]) {
- ret = hws_debug_dump_context_stc_resource(f, ctx, table_type,
- stc_pool->resource[0]);
- if (ret)
- return ret;
- }
+ if (stc_pool->mirror_resource) {
+ struct mlx5hws_pool_resource *res = stc_pool->mirror_resource;
- if (i == MLX5HWS_TABLE_TYPE_FDB && stc_pool->mirror_resource[0]) {
- ret = hws_debug_dump_context_stc_resource(f, ctx, table_type,
- stc_pool->mirror_resource[0]);
- if (ret)
- return ret;
- }
+ ret = hws_debug_dump_context_stc_resource(f, ctx, res);
+ if (ret)
+ return ret;
}
return 0;
}
+static void
+hws_debug_dump_action_ste_table(struct seq_file *f,
+ struct mlx5hws_action_ste_table *action_tbl)
+{
+ int ste_0_id = mlx5hws_pool_get_base_id(action_tbl->pool);
+ int ste_1_id = mlx5hws_pool_get_base_mirror_id(action_tbl->pool);
+
+ seq_printf(f, "%d,0x%llx,%d,%d,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_ACTION_STE_TABLE,
+ HWS_PTR_TO_ID(action_tbl),
+ action_tbl->rtc_0_id, ste_0_id,
+ action_tbl->rtc_1_id, ste_1_id);
+}
+
+static void hws_debug_dump_action_ste_pool(struct seq_file *f,
+ struct mlx5hws_action_ste_pool *pool)
+{
+ struct mlx5hws_action_ste_table *action_tbl;
+ enum mlx5hws_pool_optimize opt;
+
+ mutex_lock(&pool->lock);
+ for (opt = MLX5HWS_POOL_OPTIMIZE_NONE; opt < MLX5HWS_POOL_OPTIMIZE_MAX;
+ opt++) {
+ list_for_each_entry(action_tbl, &pool->elems[opt].available,
+ list_node) {
+ hws_debug_dump_action_ste_table(f, action_tbl);
+ }
+ }
+ mutex_unlock(&pool->lock);
+}
+
static int hws_debug_dump_context(struct seq_file *f, struct mlx5hws_context *ctx)
{
struct mlx5hws_table *tbl;
- int ret;
+ int ret, i;
ret = hws_debug_dump_context_info(f, ctx);
if (ret)
@@ -435,6 +439,9 @@ static int hws_debug_dump_context(struct seq_file *f, struct mlx5hws_context *ct
return ret;
}
+ for (i = 0; i < ctx->queues; i++)
+ hws_debug_dump_action_ste_pool(f, &ctx->action_ste_pool[i]);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h
index b93a536035d9..89c396f9f266 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#ifndef MLX5HWS_DEBUG_H_
-#define MLX5HWS_DEBUG_H_
+#ifndef HWS_DEBUG_H_
+#define HWS_DEBUG_H_
#define HWS_DEBUG_FORMAT_VERSION "1.0"
@@ -26,6 +26,8 @@ enum mlx5hws_debug_res_type {
MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_HASH_DEFINER = 4205,
MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_RANGE_DEFINER = 4206,
MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_COMPARE_MATCH_DEFINER = 4207,
+
+ MLX5HWS_DEBUG_RES_TYPE_ACTION_STE_TABLE = 4300,
};
static inline u64
@@ -37,4 +39,4 @@ mlx5hws_debug_icm_to_idx(u64 icm_addr)
void mlx5hws_debug_init_dump(struct mlx5hws_context *ctx);
void mlx5hws_debug_uninit_dump(struct mlx5hws_context *ctx);
-#endif /* MLX5HWS_DEBUG_H_ */
+#endif /* HWS_DEBUG_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
index 3f4c58bada37..82fd122d4284 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#include "mlx5hws_internal.h"
+#include "internal.h"
/* Pattern tunnel Layer bits. */
#define MLX5_FLOW_LAYER_VXLAN BIT(12)
@@ -70,7 +70,7 @@
u32 second_dw_mask = (mask) & ((1 << _bit_off) - 1); \
_HWS_SET32(p, (v) >> _bit_off, byte_off, 0, (mask) >> _bit_off); \
_HWS_SET32(p, (v) & second_dw_mask, (byte_off) + DW_SIZE, \
- (bit_off) % BITS_IN_DW, second_dw_mask); \
+ (bit_off + BITS_IN_DW) % BITS_IN_DW, second_dw_mask); \
} else { \
_HWS_SET32(p, v, byte_off, (bit_off), (mask)); \
} \
@@ -158,6 +158,218 @@ struct mlx5hws_definer_conv_data {
u32 match_flags;
};
+#define HWS_DEFINER_ENTRY(name)[MLX5HWS_DEFINER_FNAME_##name] = #name
+
+static const char * const hws_definer_fname_to_str[] = {
+ HWS_DEFINER_ENTRY(ETH_SMAC_47_16_O),
+ HWS_DEFINER_ENTRY(ETH_SMAC_47_16_I),
+ HWS_DEFINER_ENTRY(ETH_SMAC_15_0_O),
+ HWS_DEFINER_ENTRY(ETH_SMAC_15_0_I),
+ HWS_DEFINER_ENTRY(ETH_DMAC_47_16_O),
+ HWS_DEFINER_ENTRY(ETH_DMAC_47_16_I),
+ HWS_DEFINER_ENTRY(ETH_DMAC_15_0_O),
+ HWS_DEFINER_ENTRY(ETH_DMAC_15_0_I),
+ HWS_DEFINER_ENTRY(ETH_TYPE_O),
+ HWS_DEFINER_ENTRY(ETH_TYPE_I),
+ HWS_DEFINER_ENTRY(ETH_L3_TYPE_O),
+ HWS_DEFINER_ENTRY(ETH_L3_TYPE_I),
+ HWS_DEFINER_ENTRY(VLAN_TYPE_O),
+ HWS_DEFINER_ENTRY(VLAN_TYPE_I),
+ HWS_DEFINER_ENTRY(VLAN_FIRST_PRIO_O),
+ HWS_DEFINER_ENTRY(VLAN_FIRST_PRIO_I),
+ HWS_DEFINER_ENTRY(VLAN_CFI_O),
+ HWS_DEFINER_ENTRY(VLAN_CFI_I),
+ HWS_DEFINER_ENTRY(VLAN_ID_O),
+ HWS_DEFINER_ENTRY(VLAN_ID_I),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_TYPE_O),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_TYPE_I),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_PRIO_O),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_PRIO_I),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_CFI_O),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_CFI_I),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_ID_O),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_ID_I),
+ HWS_DEFINER_ENTRY(IPV4_IHL_O),
+ HWS_DEFINER_ENTRY(IPV4_IHL_I),
+ HWS_DEFINER_ENTRY(IP_DSCP_O),
+ HWS_DEFINER_ENTRY(IP_DSCP_I),
+ HWS_DEFINER_ENTRY(IP_ECN_O),
+ HWS_DEFINER_ENTRY(IP_ECN_I),
+ HWS_DEFINER_ENTRY(IP_TTL_O),
+ HWS_DEFINER_ENTRY(IP_TTL_I),
+ HWS_DEFINER_ENTRY(IPV4_DST_O),
+ HWS_DEFINER_ENTRY(IPV4_DST_I),
+ HWS_DEFINER_ENTRY(IPV4_SRC_O),
+ HWS_DEFINER_ENTRY(IPV4_SRC_I),
+ HWS_DEFINER_ENTRY(IP_VERSION_O),
+ HWS_DEFINER_ENTRY(IP_VERSION_I),
+ HWS_DEFINER_ENTRY(IP_FRAG_O),
+ HWS_DEFINER_ENTRY(IP_FRAG_I),
+ HWS_DEFINER_ENTRY(IP_LEN_O),
+ HWS_DEFINER_ENTRY(IP_LEN_I),
+ HWS_DEFINER_ENTRY(IP_TOS_O),
+ HWS_DEFINER_ENTRY(IP_TOS_I),
+ HWS_DEFINER_ENTRY(IPV6_FLOW_LABEL_O),
+ HWS_DEFINER_ENTRY(IPV6_FLOW_LABEL_I),
+ HWS_DEFINER_ENTRY(IPV6_DST_127_96_O),
+ HWS_DEFINER_ENTRY(IPV6_DST_95_64_O),
+ HWS_DEFINER_ENTRY(IPV6_DST_63_32_O),
+ HWS_DEFINER_ENTRY(IPV6_DST_31_0_O),
+ HWS_DEFINER_ENTRY(IPV6_DST_127_96_I),
+ HWS_DEFINER_ENTRY(IPV6_DST_95_64_I),
+ HWS_DEFINER_ENTRY(IPV6_DST_63_32_I),
+ HWS_DEFINER_ENTRY(IPV6_DST_31_0_I),
+ HWS_DEFINER_ENTRY(IPV6_SRC_127_96_O),
+ HWS_DEFINER_ENTRY(IPV6_SRC_95_64_O),
+ HWS_DEFINER_ENTRY(IPV6_SRC_63_32_O),
+ HWS_DEFINER_ENTRY(IPV6_SRC_31_0_O),
+ HWS_DEFINER_ENTRY(IPV6_SRC_127_96_I),
+ HWS_DEFINER_ENTRY(IPV6_SRC_95_64_I),
+ HWS_DEFINER_ENTRY(IPV6_SRC_63_32_I),
+ HWS_DEFINER_ENTRY(IPV6_SRC_31_0_I),
+ HWS_DEFINER_ENTRY(IP_PROTOCOL_O),
+ HWS_DEFINER_ENTRY(IP_PROTOCOL_I),
+ HWS_DEFINER_ENTRY(L4_SPORT_O),
+ HWS_DEFINER_ENTRY(L4_SPORT_I),
+ HWS_DEFINER_ENTRY(L4_DPORT_O),
+ HWS_DEFINER_ENTRY(L4_DPORT_I),
+ HWS_DEFINER_ENTRY(TCP_FLAGS_I),
+ HWS_DEFINER_ENTRY(TCP_FLAGS_O),
+ HWS_DEFINER_ENTRY(TCP_SEQ_NUM),
+ HWS_DEFINER_ENTRY(TCP_ACK_NUM),
+ HWS_DEFINER_ENTRY(GTP_TEID),
+ HWS_DEFINER_ENTRY(GTP_MSG_TYPE),
+ HWS_DEFINER_ENTRY(GTP_EXT_FLAG),
+ HWS_DEFINER_ENTRY(GTP_NEXT_EXT_HDR),
+ HWS_DEFINER_ENTRY(GTP_EXT_HDR_PDU),
+ HWS_DEFINER_ENTRY(GTP_EXT_HDR_QFI),
+ HWS_DEFINER_ENTRY(GTPU_DW0),
+ HWS_DEFINER_ENTRY(GTPU_FIRST_EXT_DW0),
+ HWS_DEFINER_ENTRY(GTPU_DW2),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_0),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_1),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_2),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_3),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_4),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_5),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_6),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_7),
+ HWS_DEFINER_ENTRY(VPORT_REG_C_0),
+ HWS_DEFINER_ENTRY(VXLAN_FLAGS),
+ HWS_DEFINER_ENTRY(VXLAN_VNI),
+ HWS_DEFINER_ENTRY(VXLAN_GPE_FLAGS),
+ HWS_DEFINER_ENTRY(VXLAN_GPE_RSVD0),
+ HWS_DEFINER_ENTRY(VXLAN_GPE_PROTO),
+ HWS_DEFINER_ENTRY(VXLAN_GPE_VNI),
+ HWS_DEFINER_ENTRY(VXLAN_GPE_RSVD1),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_LEN),
+ HWS_DEFINER_ENTRY(GENEVE_OAM),
+ HWS_DEFINER_ENTRY(GENEVE_PROTO),
+ HWS_DEFINER_ENTRY(GENEVE_VNI),
+ HWS_DEFINER_ENTRY(SOURCE_QP),
+ HWS_DEFINER_ENTRY(SOURCE_GVMI),
+ HWS_DEFINER_ENTRY(REG_0),
+ HWS_DEFINER_ENTRY(REG_1),
+ HWS_DEFINER_ENTRY(REG_2),
+ HWS_DEFINER_ENTRY(REG_3),
+ HWS_DEFINER_ENTRY(REG_4),
+ HWS_DEFINER_ENTRY(REG_5),
+ HWS_DEFINER_ENTRY(REG_6),
+ HWS_DEFINER_ENTRY(REG_7),
+ HWS_DEFINER_ENTRY(REG_8),
+ HWS_DEFINER_ENTRY(REG_9),
+ HWS_DEFINER_ENTRY(REG_10),
+ HWS_DEFINER_ENTRY(REG_11),
+ HWS_DEFINER_ENTRY(REG_A),
+ HWS_DEFINER_ENTRY(REG_B),
+ HWS_DEFINER_ENTRY(GRE_KEY_PRESENT),
+ HWS_DEFINER_ENTRY(GRE_C),
+ HWS_DEFINER_ENTRY(GRE_K),
+ HWS_DEFINER_ENTRY(GRE_S),
+ HWS_DEFINER_ENTRY(GRE_PROTOCOL),
+ HWS_DEFINER_ENTRY(GRE_OPT_KEY),
+ HWS_DEFINER_ENTRY(GRE_OPT_SEQ),
+ HWS_DEFINER_ENTRY(GRE_OPT_CHECKSUM),
+ HWS_DEFINER_ENTRY(INTEGRITY_O),
+ HWS_DEFINER_ENTRY(INTEGRITY_I),
+ HWS_DEFINER_ENTRY(ICMP_DW1),
+ HWS_DEFINER_ENTRY(ICMP_DW2),
+ HWS_DEFINER_ENTRY(ICMP_DW3),
+ HWS_DEFINER_ENTRY(IPSEC_SPI),
+ HWS_DEFINER_ENTRY(IPSEC_SEQUENCE_NUMBER),
+ HWS_DEFINER_ENTRY(IPSEC_SYNDROME),
+ HWS_DEFINER_ENTRY(MPLS0_O),
+ HWS_DEFINER_ENTRY(MPLS1_O),
+ HWS_DEFINER_ENTRY(MPLS2_O),
+ HWS_DEFINER_ENTRY(MPLS3_O),
+ HWS_DEFINER_ENTRY(MPLS4_O),
+ HWS_DEFINER_ENTRY(MPLS0_I),
+ HWS_DEFINER_ENTRY(MPLS1_I),
+ HWS_DEFINER_ENTRY(MPLS2_I),
+ HWS_DEFINER_ENTRY(MPLS3_I),
+ HWS_DEFINER_ENTRY(MPLS4_I),
+ HWS_DEFINER_ENTRY(FLEX_PARSER0_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER1_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER2_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER3_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER4_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER5_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER6_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER7_OK),
+ HWS_DEFINER_ENTRY(OKS2_MPLS0_O),
+ HWS_DEFINER_ENTRY(OKS2_MPLS1_O),
+ HWS_DEFINER_ENTRY(OKS2_MPLS2_O),
+ HWS_DEFINER_ENTRY(OKS2_MPLS3_O),
+ HWS_DEFINER_ENTRY(OKS2_MPLS4_O),
+ HWS_DEFINER_ENTRY(OKS2_MPLS0_I),
+ HWS_DEFINER_ENTRY(OKS2_MPLS1_I),
+ HWS_DEFINER_ENTRY(OKS2_MPLS2_I),
+ HWS_DEFINER_ENTRY(OKS2_MPLS3_I),
+ HWS_DEFINER_ENTRY(OKS2_MPLS4_I),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_0),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_1),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_2),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_3),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_4),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_5),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_6),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_7),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_0),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_1),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_2),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_3),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_4),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_5),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_6),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_7),
+ HWS_DEFINER_ENTRY(IB_L4_OPCODE),
+ HWS_DEFINER_ENTRY(IB_L4_QPN),
+ HWS_DEFINER_ENTRY(IB_L4_A),
+ HWS_DEFINER_ENTRY(RANDOM_NUM),
+ HWS_DEFINER_ENTRY(PTYPE_L2_O),
+ HWS_DEFINER_ENTRY(PTYPE_L2_I),
+ HWS_DEFINER_ENTRY(PTYPE_L3_O),
+ HWS_DEFINER_ENTRY(PTYPE_L3_I),
+ HWS_DEFINER_ENTRY(PTYPE_L4_O),
+ HWS_DEFINER_ENTRY(PTYPE_L4_I),
+ HWS_DEFINER_ENTRY(PTYPE_L4_EXT_O),
+ HWS_DEFINER_ENTRY(PTYPE_L4_EXT_I),
+ HWS_DEFINER_ENTRY(PTYPE_FRAG_O),
+ HWS_DEFINER_ENTRY(PTYPE_FRAG_I),
+ HWS_DEFINER_ENTRY(TNL_HDR_0),
+ HWS_DEFINER_ENTRY(TNL_HDR_1),
+ HWS_DEFINER_ENTRY(TNL_HDR_2),
+ HWS_DEFINER_ENTRY(TNL_HDR_3),
+ [MLX5HWS_DEFINER_FNAME_MAX] = "DEFINER_FNAME_UNKNOWN",
+};
+
+const char *mlx5hws_definer_fname_to_str(enum mlx5hws_definer_fname fname)
+{
+ if (fname > MLX5HWS_DEFINER_FNAME_MAX)
+ fname = MLX5HWS_DEFINER_FNAME_MAX;
+ return hws_definer_fname_to_str[fname];
+}
+
static void
hws_definer_ones_set(struct mlx5hws_definer_fc *fc,
void *match_param,
@@ -500,7 +712,8 @@ hws_definer_check_match_flags(struct mlx5hws_definer_conv_data *cd)
return 0;
err_conflict:
- mlx5hws_err(cd->ctx, "Invalid definer fields combination\n");
+ mlx5hws_err(cd->ctx, "Invalid definer fields combination: match_flags = 0x%08x\n",
+ cd->match_flags);
return -EINVAL;
}
@@ -508,18 +721,33 @@ static int
hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
u32 *match_param)
{
- bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set;
+ bool is_ipv6, smac_set, dmac_set, ip_addr_set, ip_ver_set;
struct mlx5hws_definer_fc *fc = cd->fc;
struct mlx5hws_definer_fc *curr_fc;
u32 *s_ipv6, *d_ipv6;
if (HWS_IS_FLD_SET_SZ(match_param, outer_headers.l4_type, 0x2) ||
- HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c2, 0xe) ||
- HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c4, 0x4)) {
+ HWS_IS_FLD_SET_SZ(match_param, outer_headers.l4_type_ext, 0x4) ||
+ HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c6, 0xa) ||
+ HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_d4, 0x4)) {
mlx5hws_err(cd->ctx, "Unsupported outer parameters set\n");
return -EINVAL;
}
+ ip_addr_set = HWS_IS_FLD_SET_SZ(match_param,
+ outer_headers.src_ipv4_src_ipv6,
+ 0x80) ||
+ HWS_IS_FLD_SET_SZ(match_param,
+ outer_headers.dst_ipv4_dst_ipv6, 0x80);
+ ip_ver_set = HWS_IS_FLD_SET(match_param, outer_headers.ip_version) ||
+ HWS_IS_FLD_SET(match_param, outer_headers.ethertype);
+
+ if (ip_addr_set && !ip_ver_set) {
+ mlx5hws_err(cd->ctx,
+ "Unsupported match on IP address without version or ethertype\n");
+ return -EINVAL;
+ }
+
/* L2 Check ethertype */
HWS_SET_HDR(fc, match_param, ETH_TYPE_O,
outer_headers.ethertype,
@@ -558,6 +786,9 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
HWS_SET_HDR(fc, match_param, IP_PROTOCOL_O,
outer_headers.ip_protocol,
eth_l3_outer.protocol_next_header);
+ HWS_SET_HDR(fc, match_param, IP_VERSION_O,
+ outer_headers.ip_version,
+ eth_l3_outer.ip_version);
HWS_SET_HDR(fc, match_param, IP_TTL_O,
outer_headers.ttl_hoplimit,
eth_l3_outer.time_to_live_hop_limit);
@@ -569,10 +800,16 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout);
/* Assume IPv6 is used if ipv6 bits are set */
- is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2];
- is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
+ is_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2] ||
+ d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
+
+ /* IHL is an IPv4-specific field. */
+ if (is_ipv6 && HWS_IS_FLD_SET(match_param, outer_headers.ipv4_ihl)) {
+ mlx5hws_err(cd->ctx, "Unsupported match on IPv6 address and IPv4 IHL\n");
+ return -EINVAL;
+ }
- if (is_s_ipv6) {
+ if (is_ipv6) {
/* Handle IPv6 source address */
HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_O,
outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
@@ -586,13 +823,6 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_O,
outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
ipv6_src_outer.ipv6_address_31_0);
- } else {
- /* Handle IPv4 source address */
- HWS_SET_HDR(fc, match_param, IPV4_SRC_O,
- outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
- ipv4_src_dest_outer.source_address);
- }
- if (is_d_ipv6) {
/* Handle IPv6 destination address */
HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_O,
outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
@@ -607,6 +837,10 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
ipv6_dst_outer.ipv6_address_31_0);
} else {
+ /* Handle IPv4 source address */
+ HWS_SET_HDR(fc, match_param, IPV4_SRC_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv4_src_dest_outer.source_address);
/* Handle IPv4 destination address */
HWS_SET_HDR(fc, match_param, IPV4_DST_O,
outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
@@ -664,18 +898,33 @@ static int
hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
u32 *match_param)
{
- bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set;
+ bool is_ipv6, smac_set, dmac_set, ip_addr_set, ip_ver_set;
struct mlx5hws_definer_fc *fc = cd->fc;
struct mlx5hws_definer_fc *curr_fc;
u32 *s_ipv6, *d_ipv6;
if (HWS_IS_FLD_SET_SZ(match_param, inner_headers.l4_type, 0x2) ||
- HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c2, 0xe) ||
- HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c4, 0x4)) {
+ HWS_IS_FLD_SET_SZ(match_param, inner_headers.l4_type_ext, 0x4) ||
+ HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c6, 0xa) ||
+ HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_d4, 0x4)) {
mlx5hws_err(cd->ctx, "Unsupported inner parameters set\n");
return -EINVAL;
}
+ ip_addr_set = HWS_IS_FLD_SET_SZ(match_param,
+ inner_headers.src_ipv4_src_ipv6,
+ 0x80) ||
+ HWS_IS_FLD_SET_SZ(match_param,
+ inner_headers.dst_ipv4_dst_ipv6, 0x80);
+ ip_ver_set = HWS_IS_FLD_SET(match_param, inner_headers.ip_version) ||
+ HWS_IS_FLD_SET(match_param, inner_headers.ethertype);
+
+ if (ip_addr_set && !ip_ver_set) {
+ mlx5hws_err(cd->ctx,
+ "Unsupported match on IP address without version or ethertype\n");
+ return -EINVAL;
+ }
+
/* L2 Check ethertype */
HWS_SET_HDR(fc, match_param, ETH_TYPE_I,
inner_headers.ethertype,
@@ -727,10 +976,16 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
inner_headers.dst_ipv4_dst_ipv6.ipv6_layout);
/* Assume IPv6 is used if ipv6 bits are set */
- is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2];
- is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
+ is_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2] ||
+ d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
+
+ /* IHL is an IPv4-specific field. */
+ if (is_ipv6 && HWS_IS_FLD_SET(match_param, inner_headers.ipv4_ihl)) {
+ mlx5hws_err(cd->ctx, "Unsupported match on IPv6 address and IPv4 IHL\n");
+ return -EINVAL;
+ }
- if (is_s_ipv6) {
+ if (is_ipv6) {
/* Handle IPv6 source address */
HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_I,
inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
@@ -744,13 +999,6 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_I,
inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
ipv6_src_inner.ipv6_address_31_0);
- } else {
- /* Handle IPv4 source address */
- HWS_SET_HDR(fc, match_param, IPV4_SRC_I,
- inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
- ipv4_src_dest_inner.source_address);
- }
- if (is_d_ipv6) {
/* Handle IPv6 destination address */
HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_I,
inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
@@ -765,6 +1013,10 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
ipv6_dst_inner.ipv6_address_31_0);
} else {
+ /* Handle IPv4 source address */
+ HWS_SET_HDR(fc, match_param, IPV4_SRC_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv4_src_dest_inner.source_address);
/* Handle IPv4 destination address */
HWS_SET_HDR(fc, match_param, IPV4_DST_I,
inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
@@ -1028,8 +1280,9 @@ hws_definer_conv_misc2(struct mlx5hws_definer_conv_data *cd,
struct mlx5hws_definer_fc *fc = cd->fc;
struct mlx5hws_definer_fc *curr_fc;
- if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1a0, 0x8) ||
- HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1b8, 0x8) ||
+ if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.psp_syndrome, 0x8) ||
+ HWS_IS_FLD_SET_SZ(match_param,
+ misc_parameters_2.ipsec_next_header, 0x8) ||
HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1c0, 0x40) ||
HWS_IS_FLD_SET(match_param, misc_parameters_2.macsec_syndrome) ||
HWS_IS_FLD_SET(match_param, misc_parameters_2.ipsec_syndrome)) {
@@ -1578,80 +1831,6 @@ err_free_fc:
return ret;
}
-struct mlx5hws_definer_fc *
-mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx,
- u8 match_criteria_enable,
- u32 *match_param,
- int *fc_sz)
-{
- struct mlx5hws_definer_fc *compressed_fc = NULL;
- struct mlx5hws_definer_conv_data cd = {0};
- struct mlx5hws_definer_fc *fc;
- int ret;
-
- fc = hws_definer_alloc_fc(ctx, MLX5HWS_DEFINER_FNAME_MAX);
- if (!fc)
- return NULL;
-
- cd.fc = fc;
- cd.ctx = ctx;
-
- if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER) {
- ret = hws_definer_conv_outer(&cd, match_param);
- if (ret)
- goto err_free_fc;
- }
-
- if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_INNER) {
- ret = hws_definer_conv_inner(&cd, match_param);
- if (ret)
- goto err_free_fc;
- }
-
- if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC) {
- ret = hws_definer_conv_misc(&cd, match_param);
- if (ret)
- goto err_free_fc;
- }
-
- if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2) {
- ret = hws_definer_conv_misc2(&cd, match_param);
- if (ret)
- goto err_free_fc;
- }
-
- if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3) {
- ret = hws_definer_conv_misc3(&cd, match_param);
- if (ret)
- goto err_free_fc;
- }
-
- if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4) {
- ret = hws_definer_conv_misc4(&cd, match_param);
- if (ret)
- goto err_free_fc;
- }
-
- if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5) {
- ret = hws_definer_conv_misc5(&cd, match_param);
- if (ret)
- goto err_free_fc;
- }
-
- /* Allocate fc array on mt */
- compressed_fc = hws_definer_alloc_compressed_fc(fc);
- if (!compressed_fc) {
- mlx5hws_err(ctx,
- "Convert to compressed fc: failed to set field copy to match template\n");
- goto err_free_fc;
- }
- *fc_sz = hws_definer_get_fc_size(fc);
-
-err_free_fc:
- kfree(fc);
- return compressed_fc;
-}
-
static int
hws_definer_find_byte_in_tag(struct mlx5hws_definer *definer,
u32 hl_byte_off,
@@ -1814,7 +1993,7 @@ hws_definer_copy_sel_ctrl(struct mlx5hws_definer_sel_ctrl *ctrl,
static int
hws_definer_find_best_match_fit(struct mlx5hws_context *ctx,
struct mlx5hws_definer *definer,
- u8 *hl)
+ u8 *hl, bool allow_jumbo)
{
struct mlx5hws_definer_sel_ctrl ctrl = {0};
bool found;
@@ -1831,6 +2010,9 @@ hws_definer_find_best_match_fit(struct mlx5hws_context *ctx,
return 0;
}
+ if (!allow_jumbo)
+ return -E2BIG;
+
/* Try to create a full/limited jumbo definer */
ctrl.allowed_full_dw = ctx->caps->full_dw_jumbo_support ? DW_SELECTORS :
DW_SELECTORS_MATCH;
@@ -1907,7 +2089,8 @@ int mlx5hws_definer_compare(struct mlx5hws_definer *definer_a,
int
mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
struct mlx5hws_match_template *mt,
- struct mlx5hws_definer *match_definer)
+ struct mlx5hws_definer *match_definer,
+ bool allow_jumbo)
{
u8 *match_hl;
int ret;
@@ -1929,7 +2112,8 @@ mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
}
/* Find the match definer layout for header layout match union */
- ret = hws_definer_find_best_match_fit(ctx, match_definer, match_hl);
+ ret = hws_definer_find_best_match_fit(ctx, match_definer, match_hl,
+ allow_jumbo);
if (ret) {
if (ret == -E2BIG)
mlx5hws_dbg(ctx,
@@ -1985,8 +2169,7 @@ int mlx5hws_definer_get_obj(struct mlx5hws_context *ctx,
continue;
/* Reuse definer and set LRU (move to be first in the list) */
- list_del_init(&cached_definer->list_node);
- list_add(&cached_definer->list_node, &cache->list_head);
+ list_move(&cached_definer->list_node, &cache->list_head);
cached_definer->refcount++;
return cached_definer->definer.obj_id;
}
@@ -2118,7 +2301,7 @@ int mlx5hws_definer_mt_init(struct mlx5hws_context *ctx,
struct mlx5hws_definer match_layout = {0};
int ret;
- ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout);
+ ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout, true);
if (ret) {
mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
index 2f6a7df4021c..141f3eb2e307 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#ifndef MLX5HWS_DEFINER_H_
-#define MLX5HWS_DEFINER_H_
+#ifndef HWS_DEFINER_H_
+#define HWS_DEFINER_H_
/* Max available selecotrs */
#define DW_SELECTORS 9
@@ -785,7 +785,7 @@ struct mlx5hws_definer_cache {
struct mlx5hws_definer_cache_item {
struct mlx5hws_definer definer;
- u32 refcount;
+ u32 refcount; /* protected by context ctrl lock */
struct list_head list_node;
};
@@ -823,12 +823,9 @@ void mlx5hws_definer_free(struct mlx5hws_context *ctx,
int mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
struct mlx5hws_match_template *mt,
- struct mlx5hws_definer *match_definer);
+ struct mlx5hws_definer *match_definer,
+ bool allow_jumbo);
-struct mlx5hws_definer_fc *
-mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx,
- u8 match_criteria_enable,
- u32 *match_param,
- int *fc_sz);
+const char *mlx5hws_definer_fname_to_str(enum mlx5hws_definer_fname fname);
-#endif /* MLX5HWS_DEFINER_H_ */
+#endif /* HWS_DEFINER_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
new file mode 100644
index 000000000000..6a4c4cccd643
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
@@ -0,0 +1,1641 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#include <linux/mlx5/vport.h>
+#include <mlx5_core.h>
+#include <fs_core.h>
+#include <fs_cmd.h>
+#include "fs_hws_pools.h"
+#include "mlx5hws.h"
+
+#define MLX5HWS_CTX_MAX_NUM_OF_QUEUES 16
+#define MLX5HWS_CTX_QUEUE_SIZE 256
+
+static struct mlx5hws_action *
+mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context *ctx);
+static void
+mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool *pool, struct xarray *pr_pools,
+ unsigned long index);
+static void
+mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray *mh_pools,
+ unsigned long index);
+
+static int mlx5_fs_init_hws_actions_pool(struct mlx5_core_dev *dev,
+ struct mlx5_fs_hws_context *fs_ctx)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_fs_hws_actions_pool *hws_pool = &fs_ctx->hws_pool;
+ struct mlx5hws_action_reformat_header reformat_hdr = {};
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ enum mlx5hws_action_type action_type;
+ int err = -ENOSPC;
+
+ hws_pool->tag_action = mlx5hws_action_create_tag(ctx, flags);
+ if (!hws_pool->tag_action)
+ return err;
+ hws_pool->pop_vlan_action = mlx5hws_action_create_pop_vlan(ctx, flags);
+ if (!hws_pool->pop_vlan_action)
+ goto destroy_tag;
+ hws_pool->push_vlan_action = mlx5hws_action_create_push_vlan(ctx, flags);
+ if (!hws_pool->push_vlan_action)
+ goto destroy_pop_vlan;
+ hws_pool->drop_action = mlx5hws_action_create_dest_drop(ctx, flags);
+ if (!hws_pool->drop_action)
+ goto destroy_push_vlan;
+ action_type = MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
+ hws_pool->decapl2_action =
+ mlx5hws_action_create_reformat(ctx, action_type, 1,
+ &reformat_hdr, 0, flags);
+ if (!hws_pool->decapl2_action)
+ goto destroy_drop;
+ hws_pool->remove_hdr_vlan_action =
+ mlx5_fs_create_action_remove_header_vlan(ctx);
+ if (!hws_pool->remove_hdr_vlan_action)
+ goto destroy_decapl2;
+ err = mlx5_fs_hws_pr_pool_init(&hws_pool->insert_hdr_pool, dev, 0,
+ MLX5HWS_ACTION_TYP_INSERT_HEADER);
+ if (err)
+ goto destroy_remove_hdr;
+ err = mlx5_fs_hws_pr_pool_init(&hws_pool->dl3tnltol2_pool, dev, 0,
+ MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2);
+ if (err)
+ goto cleanup_insert_hdr;
+ xa_init(&hws_pool->el2tol3tnl_pools);
+ xa_init(&hws_pool->el2tol2tnl_pools);
+ xa_init(&hws_pool->mh_pools);
+ xa_init(&hws_pool->table_dests);
+ xa_init(&hws_pool->vport_dests);
+ xa_init(&hws_pool->vport_vhca_dests);
+ xa_init(&hws_pool->aso_meters);
+ xa_init(&hws_pool->sample_dests);
+ return 0;
+
+cleanup_insert_hdr:
+ mlx5_fs_hws_pr_pool_cleanup(&hws_pool->insert_hdr_pool);
+destroy_remove_hdr:
+ mlx5hws_action_destroy(hws_pool->remove_hdr_vlan_action);
+destroy_decapl2:
+ mlx5hws_action_destroy(hws_pool->decapl2_action);
+destroy_drop:
+ mlx5hws_action_destroy(hws_pool->drop_action);
+destroy_push_vlan:
+ mlx5hws_action_destroy(hws_pool->push_vlan_action);
+destroy_pop_vlan:
+ mlx5hws_action_destroy(hws_pool->pop_vlan_action);
+destroy_tag:
+ mlx5hws_action_destroy(hws_pool->tag_action);
+ return err;
+}
+
+static void mlx5_fs_cleanup_hws_actions_pool(struct mlx5_fs_hws_context *fs_ctx)
+{
+ struct mlx5_fs_hws_actions_pool *hws_pool = &fs_ctx->hws_pool;
+ struct mlx5_fs_hws_data *fs_hws_data;
+ struct mlx5hws_action *action;
+ struct mlx5_fs_pool *pool;
+ unsigned long i;
+
+ xa_for_each(&hws_pool->sample_dests, i, fs_hws_data)
+ kfree(fs_hws_data);
+ xa_destroy(&hws_pool->sample_dests);
+ xa_for_each(&hws_pool->aso_meters, i, fs_hws_data)
+ kfree(fs_hws_data);
+ xa_destroy(&hws_pool->aso_meters);
+ xa_for_each(&hws_pool->vport_vhca_dests, i, action)
+ mlx5hws_action_destroy(action);
+ xa_destroy(&hws_pool->vport_vhca_dests);
+ xa_for_each(&hws_pool->vport_dests, i, action)
+ mlx5hws_action_destroy(action);
+ xa_destroy(&hws_pool->vport_dests);
+ xa_destroy(&hws_pool->table_dests);
+ xa_for_each(&hws_pool->mh_pools, i, pool)
+ mlx5_fs_destroy_mh_pool(pool, &hws_pool->mh_pools, i);
+ xa_destroy(&hws_pool->mh_pools);
+ xa_for_each(&hws_pool->el2tol2tnl_pools, i, pool)
+ mlx5_fs_destroy_pr_pool(pool, &hws_pool->el2tol2tnl_pools, i);
+ xa_destroy(&hws_pool->el2tol2tnl_pools);
+ xa_for_each(&hws_pool->el2tol3tnl_pools, i, pool)
+ mlx5_fs_destroy_pr_pool(pool, &hws_pool->el2tol3tnl_pools, i);
+ xa_destroy(&hws_pool->el2tol3tnl_pools);
+ mlx5_fs_hws_pr_pool_cleanup(&hws_pool->dl3tnltol2_pool);
+ mlx5_fs_hws_pr_pool_cleanup(&hws_pool->insert_hdr_pool);
+ mlx5hws_action_destroy(hws_pool->remove_hdr_vlan_action);
+ mlx5hws_action_destroy(hws_pool->decapl2_action);
+ mlx5hws_action_destroy(hws_pool->drop_action);
+ mlx5hws_action_destroy(hws_pool->push_vlan_action);
+ mlx5hws_action_destroy(hws_pool->pop_vlan_action);
+ mlx5hws_action_destroy(hws_pool->tag_action);
+}
+
+static int mlx5_cmd_hws_create_ns(struct mlx5_flow_root_namespace *ns)
+{
+ struct mlx5hws_context_attr hws_ctx_attr = {};
+ int err;
+
+ hws_ctx_attr.queues = min_t(int, num_online_cpus(),
+ MLX5HWS_CTX_MAX_NUM_OF_QUEUES);
+ hws_ctx_attr.queue_size = MLX5HWS_CTX_QUEUE_SIZE;
+
+ ns->fs_hws_context.hws_ctx =
+ mlx5hws_context_open(ns->dev, &hws_ctx_attr);
+ if (!ns->fs_hws_context.hws_ctx) {
+ mlx5_core_err(ns->dev, "Failed to create hws flow namespace\n");
+ return -EINVAL;
+ }
+ err = mlx5_fs_init_hws_actions_pool(ns->dev, &ns->fs_hws_context);
+ if (err) {
+ mlx5_core_err(ns->dev, "Failed to init hws actions pool\n");
+ mlx5hws_context_close(ns->fs_hws_context.hws_ctx);
+ return err;
+ }
+ return 0;
+}
+
+static int mlx5_cmd_hws_destroy_ns(struct mlx5_flow_root_namespace *ns)
+{
+ mlx5_fs_cleanup_hws_actions_pool(&ns->fs_hws_context);
+ return mlx5hws_context_close(ns->fs_hws_context.hws_ctx);
+}
+
+static int mlx5_cmd_hws_set_peer(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_root_namespace *peer_ns,
+ u16 peer_vhca_id)
+{
+ struct mlx5hws_context *peer_ctx = NULL;
+
+ if (peer_ns)
+ peer_ctx = peer_ns->fs_hws_context.hws_ctx;
+ mlx5hws_context_set_peer(ns->fs_hws_context.hws_ctx, peer_ctx,
+ peer_vhca_id);
+ return 0;
+}
+
+static int mlx5_fs_set_ft_default_miss(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft)
+{
+ struct mlx5hws_table *next_tbl;
+ int err;
+
+ if (!ns->fs_hws_context.hws_ctx)
+ return -EINVAL;
+
+ /* if no change required, return */
+ if (!next_ft && !ft->fs_hws_table.miss_ft_set)
+ return 0;
+
+ next_tbl = next_ft ? next_ft->fs_hws_table.hws_table : NULL;
+ err = mlx5hws_table_set_default_miss(ft->fs_hws_table.hws_table, next_tbl);
+ if (err) {
+ mlx5_core_err(ns->dev, "Failed setting FT default miss (%d)\n", err);
+ return err;
+ }
+ ft->fs_hws_table.miss_ft_set = !!next_tbl;
+ return 0;
+}
+
+static int mlx5_fs_add_flow_table_dest_action(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+ struct mlx5hws_action *dest_ft_action;
+ struct xarray *dests_xa;
+ int err;
+
+ dest_ft_action = mlx5hws_action_create_dest_table_num(fs_ctx->hws_ctx,
+ ft->id, flags);
+ if (!dest_ft_action) {
+ mlx5_core_err(ns->dev, "Failed creating dest table action\n");
+ return -ENOMEM;
+ }
+
+ dests_xa = &fs_ctx->hws_pool.table_dests;
+ err = xa_insert(dests_xa, ft->id, dest_ft_action, GFP_KERNEL);
+ if (err)
+ mlx5hws_action_destroy(dest_ft_action);
+ return err;
+}
+
+static int mlx5_fs_del_flow_table_dest_action(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft)
+{
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+ struct mlx5hws_action *dest_ft_action;
+ struct xarray *dests_xa;
+ int err;
+
+ dests_xa = &fs_ctx->hws_pool.table_dests;
+ dest_ft_action = xa_erase(dests_xa, ft->id);
+ if (!dest_ft_action) {
+ mlx5_core_err(ns->dev, "Failed to erase dest ft action\n");
+ return -ENOENT;
+ }
+
+ err = mlx5hws_action_destroy(dest_ft_action);
+ if (err)
+ mlx5_core_err(ns->dev, "Failed to destroy dest ft action\n");
+ return err;
+}
+
+static int mlx5_cmd_hws_create_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table_attr *ft_attr,
+ struct mlx5_flow_table *next_ft)
+{
+ struct mlx5hws_context *ctx = ns->fs_hws_context.hws_ctx;
+ struct mlx5hws_table_attr tbl_attr = {};
+ struct mlx5hws_table *tbl;
+ int err;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft)) {
+ err = mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft, ft_attr,
+ next_ft);
+ if (err)
+ return err;
+ err = mlx5_fs_add_flow_table_dest_action(ns, ft);
+ if (err)
+ mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
+ return err;
+ }
+
+ if (ns->table_type != FS_FT_FDB) {
+ mlx5_core_err(ns->dev, "Table type %d not supported for HWS\n",
+ ns->table_type);
+ return -EOPNOTSUPP;
+ }
+
+ tbl_attr.type = MLX5HWS_TABLE_TYPE_FDB;
+ tbl_attr.level = ft_attr->level;
+ tbl_attr.uid = ft_attr->uid;
+ tbl = mlx5hws_table_create(ctx, &tbl_attr);
+ if (!tbl) {
+ mlx5_core_err(ns->dev, "Failed creating hws flow_table\n");
+ return -EINVAL;
+ }
+
+ ft->fs_hws_table.hws_table = tbl;
+ ft->id = mlx5hws_table_get_id(tbl);
+
+ if (next_ft) {
+ err = mlx5_fs_set_ft_default_miss(ns, ft, next_ft);
+ if (err)
+ goto destroy_table;
+ }
+
+ ft->max_fte = INT_MAX;
+
+ err = mlx5_fs_add_flow_table_dest_action(ns, ft);
+ if (err)
+ goto clear_ft_miss;
+ return 0;
+
+clear_ft_miss:
+ mlx5_fs_set_ft_default_miss(ns, ft, NULL);
+destroy_table:
+ mlx5hws_table_destroy(tbl);
+ ft->fs_hws_table.hws_table = NULL;
+ return err;
+}
+
+static int mlx5_cmd_hws_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft)
+{
+ int err;
+
+ err = mlx5_fs_del_flow_table_dest_action(ns, ft);
+ if (err)
+ mlx5_core_err(ns->dev, "Failed to remove dest action (%d)\n", err);
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
+
+ err = mlx5_fs_set_ft_default_miss(ns, ft, NULL);
+ if (err)
+ mlx5_core_err(ns->dev, "Failed to disconnect next table (%d)\n", err);
+
+ err = mlx5hws_table_destroy(ft->fs_hws_table.hws_table);
+ if (err)
+ mlx5_core_err(ns->dev, "Failed to destroy flow_table (%d)\n", err);
+
+ return err;
+}
+
+static int mlx5_cmd_hws_modify_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft)
+{
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft);
+
+ return mlx5_fs_set_ft_default_miss(ns, ft, next_ft);
+}
+
+static int mlx5_cmd_hws_update_root_ft(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ u32 underlay_qpn,
+ bool disconnect)
+{
+ return mlx5_fs_cmd_get_fw_cmds()->update_root_ft(ns, ft, underlay_qpn,
+ disconnect);
+}
+
+static int mlx5_cmd_hws_create_flow_group(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft, u32 *in,
+ struct mlx5_flow_group *fg)
+{
+ struct mlx5hws_match_parameters mask;
+ struct mlx5hws_bwc_matcher *matcher;
+ u8 match_criteria_enable;
+ u32 priority;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in, fg);
+
+ mask.match_buf = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ mask.match_sz = sizeof(fg->mask.match_criteria);
+
+ match_criteria_enable = MLX5_GET(create_flow_group_in, in,
+ match_criteria_enable);
+ priority = MLX5_GET(create_flow_group_in, in, start_flow_index);
+ matcher = mlx5hws_bwc_matcher_create(ft->fs_hws_table.hws_table,
+ priority, match_criteria_enable,
+ &mask);
+ if (!matcher) {
+ mlx5_core_err(ns->dev, "Failed creating matcher\n");
+ return -EINVAL;
+ }
+
+ fg->fs_hws_matcher.matcher = matcher;
+ return 0;
+}
+
+static int mlx5_cmd_hws_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *fg)
+{
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg);
+
+ return mlx5hws_bwc_matcher_destroy(fg->fs_hws_matcher.matcher);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_ft(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst)
+{
+ return xa_load(&fs_ctx->hws_pool.table_dests, dst->dest_attr.ft->id);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_table_num(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst)
+{
+ u32 table_num = dst->dest_attr.ft_num;
+
+ return xa_load(&fs_ctx->hws_pool.table_dests, table_num);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_dest_action_table_num(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ u32 table_num = dst->dest_attr.ft_num;
+
+ return mlx5hws_action_create_dest_table_num(ctx, table_num, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_vport(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst,
+ bool is_dest_type_uplink)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ struct mlx5hws_action *dest;
+ struct xarray *dests_xa;
+ bool vhca_id_valid;
+ unsigned long idx;
+ u16 vport_num;
+ int err;
+
+ vhca_id_valid = is_dest_type_uplink ||
+ (dest_attr->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID);
+ vport_num = is_dest_type_uplink ? MLX5_VPORT_UPLINK : dest_attr->vport.num;
+ if (vhca_id_valid) {
+ dests_xa = &fs_ctx->hws_pool.vport_vhca_dests;
+ idx = (unsigned long)dest_attr->vport.vhca_id << 16 | vport_num;
+ } else {
+ dests_xa = &fs_ctx->hws_pool.vport_dests;
+ idx = vport_num;
+ }
+dest_load:
+ dest = xa_load(dests_xa, idx);
+ if (dest)
+ return dest;
+
+ dest = mlx5hws_action_create_dest_vport(ctx, vport_num, vhca_id_valid,
+ dest_attr->vport.vhca_id, flags);
+
+ err = xa_insert(dests_xa, idx, dest, GFP_KERNEL);
+ if (err) {
+ mlx5hws_action_destroy(dest);
+ dest = NULL;
+
+ if (err == -EBUSY)
+ /* xarray entry was already stored by another thread */
+ goto dest_load;
+ }
+
+ return dest;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_dest_action_range(struct mlx5hws_context *ctx,
+ struct mlx5_flow_rule *dst)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
+
+ return mlx5hws_action_create_dest_match_range(ctx,
+ dest_attr->range.field,
+ dest_attr->range.hit_ft,
+ dest_attr->range.miss_ft,
+ dest_attr->range.min,
+ dest_attr->range.max,
+ flags);
+}
+
+static struct mlx5_fs_hws_data *
+mlx5_fs_get_cached_hws_data(struct xarray *cache_xa, unsigned long index)
+{
+ struct mlx5_fs_hws_data *fs_hws_data;
+ int err;
+
+ xa_lock(cache_xa);
+ fs_hws_data = xa_load(cache_xa, index);
+ if (!fs_hws_data) {
+ fs_hws_data = kzalloc(sizeof(*fs_hws_data), GFP_ATOMIC);
+ if (!fs_hws_data) {
+ xa_unlock(cache_xa);
+ return NULL;
+ }
+ refcount_set(&fs_hws_data->hws_action_refcount, 0);
+ mutex_init(&fs_hws_data->lock);
+ err = __xa_insert(cache_xa, index, fs_hws_data, GFP_ATOMIC);
+ if (err) {
+ kfree(fs_hws_data);
+ xa_unlock(cache_xa);
+ return NULL;
+ }
+ }
+ xa_unlock(cache_xa);
+
+ return fs_hws_data;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_aso_meter(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_exe_aso *exe_aso)
+{
+ struct mlx5_fs_hws_create_action_ctx create_ctx;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ struct mlx5_fs_hws_data *meter_hws_data;
+ u32 id = exe_aso->base_id;
+ struct xarray *meters_xa;
+
+ meters_xa = &fs_ctx->hws_pool.aso_meters;
+ meter_hws_data = mlx5_fs_get_cached_hws_data(meters_xa, id);
+ if (!meter_hws_data)
+ return NULL;
+
+ create_ctx.hws_ctx = ctx;
+ create_ctx.actions_type = MLX5HWS_ACTION_TYP_ASO_METER;
+ create_ctx.id = id;
+ create_ctx.return_reg_id = exe_aso->return_reg_id;
+
+ return mlx5_fs_get_hws_action(meter_hws_data, &create_ctx);
+}
+
+static void mlx5_fs_put_action_aso_meter(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_exe_aso *exe_aso)
+{
+ struct mlx5_fs_hws_data *meter_hws_data;
+ struct xarray *meters_xa;
+
+ meters_xa = &fs_ctx->hws_pool.aso_meters;
+ meter_hws_data = xa_load(meters_xa, exe_aso->base_id);
+ if (!meter_hws_data)
+ return;
+ return mlx5_fs_put_hws_action(meter_hws_data);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_sampler(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst)
+{
+ struct mlx5_fs_hws_create_action_ctx create_ctx;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ struct mlx5_fs_hws_data *sampler_hws_data;
+ u32 id = dst->dest_attr.sampler_id;
+ struct xarray *sampler_xa;
+
+ sampler_xa = &fs_ctx->hws_pool.sample_dests;
+ sampler_hws_data = mlx5_fs_get_cached_hws_data(sampler_xa, id);
+ if (!sampler_hws_data)
+ return NULL;
+
+ create_ctx.hws_ctx = ctx;
+ create_ctx.actions_type = MLX5HWS_ACTION_TYP_SAMPLER;
+ create_ctx.id = id;
+
+ return mlx5_fs_get_hws_action(sampler_hws_data, &create_ctx);
+}
+
+static void mlx5_fs_put_dest_action_sampler(struct mlx5_fs_hws_context *fs_ctx,
+ u32 sampler_id)
+{
+ struct mlx5_fs_hws_data *sampler_hws_data;
+ struct xarray *sampler_xa;
+
+ sampler_xa = &fs_ctx->hws_pool.sample_dests;
+ sampler_hws_data = xa_load(sampler_xa, sampler_id);
+ if (!sampler_hws_data)
+ return;
+
+ mlx5_fs_put_hws_action(sampler_hws_data);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_action_dest_array(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_dest_attr *dests,
+ u32 num_of_dests)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+
+ return mlx5hws_action_create_dest_array(ctx, num_of_dests, dests,
+ flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_push_vlan(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.push_vlan_action;
+}
+
+static u32 mlx5_fs_calc_vlan_hdr(struct mlx5_fs_vlan *vlan)
+{
+ u16 n_ethtype = vlan->ethtype;
+ u8 prio = vlan->prio;
+ u16 vid = vlan->vid;
+
+ return (u32)n_ethtype << 16 | (u32)(prio) << 12 | (u32)vid;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_pop_vlan(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.pop_vlan_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_decap_tnl_l2_to_l2(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.decapl2_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_drop(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.drop_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_tag(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.tag_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_action_last(struct mlx5hws_context *ctx)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+
+ return mlx5hws_action_create_last(ctx, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_hws_action(struct mlx5_fs_hws_create_action_ctx *create_ctx)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+
+ switch (create_ctx->actions_type) {
+ case MLX5HWS_ACTION_TYP_CTR:
+ return mlx5hws_action_create_counter(create_ctx->hws_ctx,
+ create_ctx->id, flags);
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ return mlx5hws_action_create_aso_meter(create_ctx->hws_ctx,
+ create_ctx->id,
+ create_ctx->return_reg_id,
+ flags);
+ case MLX5HWS_ACTION_TYP_SAMPLER:
+ return mlx5hws_action_create_flow_sampler(create_ctx->hws_ctx,
+ create_ctx->id, flags);
+ default:
+ return NULL;
+ }
+}
+
+struct mlx5hws_action *
+mlx5_fs_get_hws_action(struct mlx5_fs_hws_data *fs_hws_data,
+ struct mlx5_fs_hws_create_action_ctx *create_ctx)
+{
+ /* try avoid locking if not necessary */
+ if (refcount_inc_not_zero(&fs_hws_data->hws_action_refcount))
+ return fs_hws_data->hws_action;
+
+ mutex_lock(&fs_hws_data->lock);
+ if (refcount_inc_not_zero(&fs_hws_data->hws_action_refcount)) {
+ mutex_unlock(&fs_hws_data->lock);
+ return fs_hws_data->hws_action;
+ }
+ fs_hws_data->hws_action = mlx5_fs_create_hws_action(create_ctx);
+ if (!fs_hws_data->hws_action) {
+ mutex_unlock(&fs_hws_data->lock);
+ return NULL;
+ }
+ refcount_set(&fs_hws_data->hws_action_refcount, 1);
+ mutex_unlock(&fs_hws_data->lock);
+
+ return fs_hws_data->hws_action;
+}
+
+void mlx5_fs_put_hws_action(struct mlx5_fs_hws_data *fs_hws_data)
+{
+ if (!fs_hws_data)
+ return;
+
+ /* try avoid locking if not necessary */
+ if (refcount_dec_not_one(&fs_hws_data->hws_action_refcount))
+ return;
+
+ mutex_lock(&fs_hws_data->lock);
+ if (!refcount_dec_and_test(&fs_hws_data->hws_action_refcount)) {
+ mutex_unlock(&fs_hws_data->lock);
+ return;
+ }
+ mlx5hws_action_destroy(fs_hws_data->hws_action);
+ fs_hws_data->hws_action = NULL;
+ mutex_unlock(&fs_hws_data->lock);
+}
+
+static void mlx5_fs_destroy_fs_action(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_fs_hws_rule_action *fs_action)
+{
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+
+ switch (mlx5hws_action_get_type(fs_action->action)) {
+ case MLX5HWS_ACTION_TYP_CTR:
+ mlx5_fc_put_hws_action(fs_action->counter);
+ break;
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ mlx5_fs_put_action_aso_meter(fs_ctx, fs_action->exe_aso);
+ break;
+ case MLX5HWS_ACTION_TYP_SAMPLER:
+ mlx5_fs_put_dest_action_sampler(fs_ctx, fs_action->sampler_id);
+ break;
+ default:
+ mlx5hws_action_destroy(fs_action->action);
+ }
+}
+
+static void
+mlx5_fs_destroy_fs_actions(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_fs_hws_rule_action **fs_actions,
+ int *num_fs_actions)
+{
+ int i;
+
+ /* Free in reverse order to handle action dependencies */
+ for (i = *num_fs_actions - 1; i >= 0; i--)
+ mlx5_fs_destroy_fs_action(ns, *fs_actions + i);
+ *num_fs_actions = 0;
+ kfree(*fs_actions);
+ *fs_actions = NULL;
+}
+
+/* Splits FTE's actions into cached, rule and destination actions.
+ * The cached and destination actions are saved on the fte hws rule.
+ * The rule actions are returned as a parameter, together with their count.
+ * We want to support a rule with 32 destinations, which means we need to
+ * account for 32 destinations plus usually a counter plus one more action
+ * for a multi-destination flow table.
+ * 32 is SW limitation for array size, keep. HWS limitation is 16M STEs per matcher
+ */
+#define MLX5_FLOW_CONTEXT_ACTION_MAX 34
+static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *group,
+ struct fs_fte *fte,
+ struct mlx5hws_rule_action **ractions)
+{
+ struct mlx5_flow_act *fte_action = &fte->act_dests.action;
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+ struct mlx5hws_action_dest_attr *dest_actions;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ struct mlx5_fs_hws_rule_action *fs_actions;
+ struct mlx5_core_dev *dev = ns->dev;
+ struct mlx5hws_action *dest_action;
+ struct mlx5hws_action *tmp_action;
+ struct mlx5_fs_hws_pr *pr_data;
+ struct mlx5_fs_hws_mh *mh_data;
+ bool delay_encap_set = false;
+ struct mlx5_flow_rule *dst;
+ int num_dest_actions = 0;
+ int num_fs_actions = 0;
+ int num_actions = 0;
+ int err;
+
+ *ractions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(**ractions),
+ GFP_KERNEL);
+ if (!*ractions) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ fs_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
+ sizeof(*fs_actions), GFP_KERNEL);
+ if (!fs_actions) {
+ err = -ENOMEM;
+ goto free_actions_alloc;
+ }
+
+ dest_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
+ sizeof(*dest_actions), GFP_KERNEL);
+ if (!dest_actions) {
+ err = -ENOMEM;
+ goto free_fs_actions_alloc;
+ }
+
+ /* The order of the actions are must to be kept, only the following
+ * order is supported by HW steering:
+ * HWS: decap -> remove_hdr -> pop_vlan -> modify header -> push_vlan
+ * -> reformat (insert_hdr/encap) -> ctr -> tag -> aso
+ * -> drop -> FWD:tbl/vport/sampler/tbl_num/range -> dest_array -> last
+ */
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
+ tmp_action = mlx5_fs_get_action_decap_tnl_l2_to_l2(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_dest_actions_alloc;
+ }
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
+ int reformat_type = fte_action->pkt_reformat->reformat_type;
+
+ if (fte_action->pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
+ mlx5_core_err(dev, "FW-owned reformat can't be used in HWS rule\n");
+ err = -EINVAL;
+ goto free_actions;
+ }
+
+ if (reformat_type == MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2) {
+ pr_data = fte_action->pkt_reformat->fs_hws_action.pr_data;
+ (*ractions)[num_actions].reformat.offset = pr_data->offset;
+ (*ractions)[num_actions].reformat.hdr_idx = pr_data->hdr_idx;
+ (*ractions)[num_actions].reformat.data = pr_data->data;
+ (*ractions)[num_actions++].action =
+ fte_action->pkt_reformat->fs_hws_action.hws_action;
+ } else if (reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR) {
+ (*ractions)[num_actions++].action =
+ fte_action->pkt_reformat->fs_hws_action.hws_action;
+ } else {
+ delay_encap_set = true;
+ }
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
+ tmp_action = mlx5_fs_get_action_pop_vlan(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
+ tmp_action = mlx5_fs_get_action_pop_vlan(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ mh_data = fte_action->modify_hdr->fs_hws_action.mh_data;
+ (*ractions)[num_actions].modify_header.offset = mh_data->offset;
+ (*ractions)[num_actions].modify_header.data = mh_data->data;
+ (*ractions)[num_actions++].action =
+ fte_action->modify_hdr->fs_hws_action.hws_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
+ tmp_action = mlx5_fs_get_action_push_vlan(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions].push_vlan.vlan_hdr =
+ htonl(mlx5_fs_calc_vlan_hdr(&fte_action->vlan[0]));
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
+ tmp_action = mlx5_fs_get_action_push_vlan(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions].push_vlan.vlan_hdr =
+ htonl(mlx5_fs_calc_vlan_hdr(&fte_action->vlan[1]));
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (delay_encap_set) {
+ pr_data = fte_action->pkt_reformat->fs_hws_action.pr_data;
+ (*ractions)[num_actions].reformat.offset = pr_data->offset;
+ (*ractions)[num_actions].reformat.data = pr_data->data;
+ (*ractions)[num_actions++].action =
+ fte_action->pkt_reformat->fs_hws_action.hws_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ struct mlx5_fc *counter;
+
+ if (dst->dest_attr.type !=
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+
+ counter = dst->dest_attr.counter;
+ tmp_action = mlx5_fc_get_hws_action(ctx, counter);
+ if (!tmp_action) {
+ err = -EINVAL;
+ goto free_actions;
+ }
+
+ (*ractions)[num_actions].counter.offset =
+ mlx5_fc_id(counter) - mlx5_fc_get_base_id(counter);
+ (*ractions)[num_actions++].action = tmp_action;
+ fs_actions[num_fs_actions].action = tmp_action;
+ fs_actions[num_fs_actions++].counter = counter;
+ }
+ }
+
+ if (fte->act_dests.flow_context.flow_tag) {
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ tmp_action = mlx5_fs_get_action_tag(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions].tag.value = fte->act_dests.flow_context.flow_tag;
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
+ if (fte_action->exe_aso.type != MLX5_EXE_ASO_FLOW_METER ||
+ num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+
+ tmp_action = mlx5_fs_get_action_aso_meter(fs_ctx,
+ &fte_action->exe_aso);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions].aso_meter.offset =
+ fte_action->exe_aso.flow_meter.meter_idx;
+ (*ractions)[num_actions].aso_meter.init_color =
+ fte_action->exe_aso.flow_meter.init_color;
+ (*ractions)[num_actions++].action = tmp_action;
+ fs_actions[num_fs_actions].action = tmp_action;
+ fs_actions[num_fs_actions++].exe_aso = &fte_action->exe_aso;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
+ dest_action = mlx5_fs_get_dest_action_drop(fs_ctx);
+ if (!dest_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ dest_actions[num_dest_actions++].dest = dest_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ struct mlx5_flow_destination *attr = &dst->dest_attr;
+ bool type_uplink =
+ attr->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK;
+
+ if (num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ num_dest_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ if (attr->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
+ switch (attr->type) {
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ dest_action = mlx5_fs_get_dest_action_ft(fs_ctx, dst);
+ if (dst->dest_attr.ft->flags &
+ MLX5_FLOW_TABLE_UPLINK_VPORT)
+ dest_actions[num_dest_actions].is_wire_ft = true;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
+ dest_action = mlx5_fs_get_dest_action_table_num(fs_ctx,
+ dst);
+ if (dest_action)
+ break;
+ dest_action = mlx5_fs_create_dest_action_table_num(fs_ctx,
+ dst);
+ fs_actions[num_fs_actions++].action = dest_action;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_RANGE:
+ dest_action = mlx5_fs_create_dest_action_range(ctx, dst);
+ fs_actions[num_fs_actions++].action = dest_action;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+ dest_action = mlx5_fs_get_dest_action_vport(fs_ctx, dst,
+ type_uplink);
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
+ dest_action =
+ mlx5_fs_get_dest_action_sampler(fs_ctx,
+ dst);
+ fs_actions[num_fs_actions].action = dest_action;
+ fs_actions[num_fs_actions++].sampler_id =
+ dst->dest_attr.sampler_id;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ if (!dest_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ dest_actions[num_dest_actions++].dest = dest_action;
+ }
+ }
+
+ if (num_dest_actions == 1) {
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ (*ractions)[num_actions++].action = dest_actions->dest;
+ } else if (num_dest_actions > 1) {
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ tmp_action =
+ mlx5_fs_create_action_dest_array(ctx, dest_actions,
+ num_dest_actions);
+ if (!tmp_action) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ fs_actions[num_fs_actions++].action = tmp_action;
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+
+ tmp_action = mlx5_fs_create_action_last(ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_actions[num_fs_actions++].action = tmp_action;
+ (*ractions)[num_actions++].action = tmp_action;
+
+ kfree(dest_actions);
+
+ /* Actions created specifically for this rule will be destroyed
+ * once rule is deleted.
+ */
+ fte->fs_hws_rule.num_fs_actions = num_fs_actions;
+ fte->fs_hws_rule.hws_fs_actions = fs_actions;
+
+ return 0;
+
+free_actions:
+ mlx5_fs_destroy_fs_actions(ns, &fs_actions, &num_fs_actions);
+free_dest_actions_alloc:
+ kfree(dest_actions);
+free_fs_actions_alloc:
+ kfree(fs_actions);
+free_actions_alloc:
+ kfree(*ractions);
+ *ractions = NULL;
+out_err:
+ return err;
+}
+
+static int mlx5_cmd_hws_create_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *group,
+ struct fs_fte *fte)
+{
+ struct mlx5hws_match_parameters params;
+ struct mlx5hws_rule_action *ractions;
+ struct mlx5hws_bwc_rule *rule;
+ int err = 0;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
+
+ err = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions);
+ if (err)
+ goto out_err;
+
+ params.match_sz = sizeof(fte->val);
+ params.match_buf = fte->val;
+
+ rule = mlx5hws_bwc_rule_create(group->fs_hws_matcher.matcher, &params,
+ fte->act_dests.flow_context.flow_source,
+ ractions);
+ kfree(ractions);
+ if (!rule) {
+ err = -EINVAL;
+ goto free_actions;
+ }
+
+ fte->fs_hws_rule.bwc_rule = rule;
+ return 0;
+
+free_actions:
+ mlx5_fs_destroy_fs_actions(ns, &fte->fs_hws_rule.hws_fs_actions,
+ &fte->fs_hws_rule.num_fs_actions);
+out_err:
+ mlx5_core_err(ns->dev, "Failed to create hws rule err(%d)\n", err);
+ return err;
+}
+
+static int mlx5_cmd_hws_delete_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct fs_fte *fte)
+{
+ struct mlx5_fs_hws_rule *rule = &fte->fs_hws_rule;
+ int err;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte);
+
+ err = mlx5hws_bwc_rule_destroy(rule->bwc_rule);
+ rule->bwc_rule = NULL;
+
+ mlx5_fs_destroy_fs_actions(ns, &rule->hws_fs_actions,
+ &rule->num_fs_actions);
+
+ return err;
+}
+
+static int mlx5_cmd_hws_update_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *group,
+ int modify_mask,
+ struct fs_fte *fte)
+{
+ int allowed_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST) |
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
+ struct mlx5_fs_hws_rule_action *saved_hws_fs_actions;
+ struct mlx5hws_rule_action *ractions;
+ int saved_num_fs_actions;
+ int ret;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group,
+ modify_mask, fte);
+
+ if ((modify_mask & ~allowed_mask) != 0)
+ return -EINVAL;
+
+ saved_hws_fs_actions = fte->fs_hws_rule.hws_fs_actions;
+ saved_num_fs_actions = fte->fs_hws_rule.num_fs_actions;
+
+ ret = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions);
+ if (ret)
+ return ret;
+
+ ret = mlx5hws_bwc_rule_action_update(fte->fs_hws_rule.bwc_rule, ractions);
+ kfree(ractions);
+ if (ret)
+ goto restore_actions;
+
+ mlx5_fs_destroy_fs_actions(ns, &saved_hws_fs_actions,
+ &saved_num_fs_actions);
+ return ret;
+
+restore_actions:
+ mlx5_fs_destroy_fs_actions(ns, &fte->fs_hws_rule.hws_fs_actions,
+ &fte->fs_hws_rule.num_fs_actions);
+ fte->fs_hws_rule.hws_fs_actions = saved_hws_fs_actions;
+ fte->fs_hws_rule.num_fs_actions = saved_num_fs_actions;
+ return ret;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context *ctx)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5hws_action_remove_header_attr remove_hdr_vlan = {};
+
+ /* MAC anchor not supported in HWS reformat, use VLAN anchor */
+ remove_hdr_vlan.anchor = MLX5_REFORMAT_CONTEXT_ANCHOR_VLAN_START;
+ remove_hdr_vlan.offset = 0;
+ remove_hdr_vlan.size = sizeof(struct vlan_hdr);
+ return mlx5hws_action_create_remove_header(ctx, &remove_hdr_vlan, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_remove_header_vlan(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_pkt_reformat_params *params)
+{
+ if (!params ||
+ params->param_0 != MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START ||
+ params->param_1 != offsetof(struct vlan_ethhdr, h_vlan_proto) ||
+ params->size != sizeof(struct vlan_hdr))
+ return NULL;
+
+ return fs_ctx->hws_pool.remove_hdr_vlan_action;
+}
+
+static int
+mlx5_fs_verify_insert_header_params(struct mlx5_core_dev *mdev,
+ struct mlx5_pkt_reformat_params *params)
+{
+ if ((!params->data && params->size) || (params->data && !params->size) ||
+ MLX5_CAP_GEN_2(mdev, max_reformat_insert_size) < params->size ||
+ MLX5_CAP_GEN_2(mdev, max_reformat_insert_offset) < params->param_1) {
+ mlx5_core_err(mdev, "Invalid reformat params for INSERT_HDR\n");
+ return -EINVAL;
+ }
+ if (params->param_0 != MLX5_FS_INSERT_HDR_VLAN_ANCHOR ||
+ params->param_1 != MLX5_FS_INSERT_HDR_VLAN_OFFSET ||
+ params->size != MLX5_FS_INSERT_HDR_VLAN_SIZE) {
+ mlx5_core_err(mdev, "Only vlan insert header supported\n");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int
+mlx5_fs_verify_encap_decap_params(struct mlx5_core_dev *dev,
+ struct mlx5_pkt_reformat_params *params)
+{
+ if (params->param_0 || params->param_1) {
+ mlx5_core_err(dev, "Invalid reformat params\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct mlx5_fs_pool *
+mlx5_fs_get_pr_encap_pool(struct mlx5_core_dev *dev, struct xarray *pr_pools,
+ enum mlx5hws_action_type reformat_type, size_t size)
+{
+ struct mlx5_fs_pool *pr_pool;
+ unsigned long index = size;
+ int err;
+
+ pr_pool = xa_load(pr_pools, index);
+ if (pr_pool)
+ return pr_pool;
+
+ pr_pool = kzalloc(sizeof(*pr_pool), GFP_KERNEL);
+ if (!pr_pool)
+ return ERR_PTR(-ENOMEM);
+ err = mlx5_fs_hws_pr_pool_init(pr_pool, dev, size, reformat_type);
+ if (err)
+ goto free_pr_pool;
+ err = xa_insert(pr_pools, index, pr_pool, GFP_KERNEL);
+ if (err)
+ goto cleanup_pr_pool;
+ return pr_pool;
+
+cleanup_pr_pool:
+ mlx5_fs_hws_pr_pool_cleanup(pr_pool);
+free_pr_pool:
+ kfree(pr_pool);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool *pool, struct xarray *pr_pools,
+ unsigned long index)
+{
+ xa_erase(pr_pools, index);
+ mlx5_fs_hws_pr_pool_cleanup(pool);
+ kfree(pool);
+}
+
+static int
+mlx5_cmd_hws_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_pkt_reformat_params *params,
+ enum mlx5_flow_namespace_type namespace,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+ struct mlx5_fs_hws_actions_pool *hws_pool;
+ struct mlx5hws_action *hws_action = NULL;
+ struct mlx5_fs_hws_pr *pr_data = NULL;
+ struct mlx5_fs_pool *pr_pool = NULL;
+ struct mlx5_core_dev *dev = ns->dev;
+ u8 hdr_idx = 0;
+ int err;
+
+ if (!params)
+ return -EINVAL;
+
+ hws_pool = &fs_ctx->hws_pool;
+
+ switch (params->type) {
+ case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
+ case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
+ case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+ if (mlx5_fs_verify_encap_decap_params(dev, params))
+ return -EINVAL;
+ pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
+ params->size);
+ if (IS_ERR(pr_pool))
+ return PTR_ERR(pr_pool);
+ break;
+ case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+ if (mlx5_fs_verify_encap_decap_params(dev, params))
+ return -EINVAL;
+ pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol3tnl_pools,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3,
+ params->size);
+ if (IS_ERR(pr_pool))
+ return PTR_ERR(pr_pool);
+ break;
+ case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
+ if (mlx5_fs_verify_encap_decap_params(dev, params))
+ return -EINVAL;
+ pr_pool = &hws_pool->dl3tnltol2_pool;
+ hdr_idx = params->size == ETH_HLEN ?
+ MLX5_FS_DL3TNLTOL2_MAC_HDR_IDX :
+ MLX5_FS_DL3TNLTOL2_MAC_VLAN_HDR_IDX;
+ break;
+ case MLX5_REFORMAT_TYPE_INSERT_HDR:
+ err = mlx5_fs_verify_insert_header_params(dev, params);
+ if (err)
+ return err;
+ pr_pool = &hws_pool->insert_hdr_pool;
+ break;
+ case MLX5_REFORMAT_TYPE_REMOVE_HDR:
+ hws_action = mlx5_fs_get_action_remove_header_vlan(fs_ctx, params);
+ if (!hws_action)
+ mlx5_core_err(dev, "Only vlan remove header supported\n");
+ break;
+ default:
+ mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n",
+ params->type);
+ return -EOPNOTSUPP;
+ }
+
+ if (pr_pool) {
+ pr_data = mlx5_fs_hws_pr_pool_acquire_pr(pr_pool);
+ if (IS_ERR_OR_NULL(pr_data))
+ return !pr_data ? -EINVAL : PTR_ERR(pr_data);
+ hws_action = pr_data->bulk->hws_action;
+ if (!hws_action) {
+ mlx5_core_err(dev,
+ "Failed allocating packet-reformat action\n");
+ err = -EINVAL;
+ goto release_pr;
+ }
+ pr_data->data = kmemdup(params->data, params->size, GFP_KERNEL);
+ if (!pr_data->data) {
+ err = -ENOMEM;
+ goto release_pr;
+ }
+ pr_data->hdr_idx = hdr_idx;
+ pr_data->data_size = params->size;
+ pkt_reformat->fs_hws_action.pr_data = pr_data;
+ }
+
+ mutex_init(&pkt_reformat->fs_hws_action.lock);
+ pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_HWS;
+ pkt_reformat->fs_hws_action.hws_action = hws_action;
+ return 0;
+
+release_pr:
+ if (pr_pool && pr_data)
+ mlx5_fs_hws_pr_pool_release_pr(pr_pool, pr_data);
+ return err;
+}
+
+static void mlx5_cmd_hws_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+ struct mlx5_fs_hws_actions_pool *hws_pool = &ns->fs_hws_context.hws_pool;
+ struct mlx5_core_dev *dev = ns->dev;
+ struct mlx5_fs_hws_pr *pr_data;
+ struct mlx5_fs_pool *pr_pool;
+
+ if (pkt_reformat->fs_hws_action.fw_reformat_id != 0) {
+ struct mlx5_pkt_reformat fw_pkt_reformat = { 0 };
+
+ fw_pkt_reformat.id = pkt_reformat->fs_hws_action.fw_reformat_id;
+ mlx5_fs_cmd_get_fw_cmds()->
+ packet_reformat_dealloc(ns, &fw_pkt_reformat);
+ pkt_reformat->fs_hws_action.fw_reformat_id = 0;
+ }
+
+ if (pkt_reformat->reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR)
+ return;
+
+ if (!pkt_reformat->fs_hws_action.pr_data) {
+ mlx5_core_err(ns->dev, "Failed release packet-reformat\n");
+ return;
+ }
+ pr_data = pkt_reformat->fs_hws_action.pr_data;
+
+ switch (pkt_reformat->reformat_type) {
+ case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
+ case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
+ case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+ pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
+ pr_data->data_size);
+ break;
+ case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+ pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
+ pr_data->data_size);
+ break;
+ case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
+ pr_pool = &hws_pool->dl3tnltol2_pool;
+ break;
+ case MLX5_REFORMAT_TYPE_INSERT_HDR:
+ pr_pool = &hws_pool->insert_hdr_pool;
+ break;
+ default:
+ mlx5_core_err(ns->dev, "Unknown packet-reformat type\n");
+ return;
+ }
+ if (!pkt_reformat->fs_hws_action.pr_data || IS_ERR(pr_pool)) {
+ mlx5_core_err(ns->dev, "Failed release packet-reformat\n");
+ return;
+ }
+ kfree(pr_data->data);
+ mlx5_fs_hws_pr_pool_release_pr(pr_pool, pr_data);
+ pkt_reformat->fs_hws_action.pr_data = NULL;
+}
+
+static struct mlx5_fs_pool *
+mlx5_fs_create_mh_pool(struct mlx5_core_dev *dev,
+ struct mlx5hws_action_mh_pattern *pattern,
+ struct xarray *mh_pools, unsigned long index)
+{
+ struct mlx5_fs_pool *pool;
+ int err;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+ err = mlx5_fs_hws_mh_pool_init(pool, dev, pattern);
+ if (err)
+ goto free_pool;
+ err = xa_insert(mh_pools, index, pool, GFP_KERNEL);
+ if (err)
+ goto cleanup_pool;
+ return pool;
+
+cleanup_pool:
+ mlx5_fs_hws_mh_pool_cleanup(pool);
+free_pool:
+ kfree(pool);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray *mh_pools,
+ unsigned long index)
+{
+ xa_erase(mh_pools, index);
+ mlx5_fs_hws_mh_pool_cleanup(pool);
+ kfree(pool);
+}
+
+static int mlx5_cmd_hws_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
+ u8 namespace, u8 num_actions,
+ void *modify_actions,
+ struct mlx5_modify_hdr *modify_hdr)
+{
+ struct mlx5_fs_hws_actions_pool *hws_pool = &ns->fs_hws_context.hws_pool;
+ struct mlx5hws_action_mh_pattern pattern = {};
+ struct mlx5_fs_hws_mh *mh_data = NULL;
+ struct mlx5hws_action *hws_action;
+ struct mlx5_fs_pool *pool;
+ unsigned long i, cnt = 0;
+ bool known_pattern;
+ int err;
+
+ pattern.sz = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
+ pattern.data = modify_actions;
+
+ known_pattern = false;
+ xa_for_each(&hws_pool->mh_pools, i, pool) {
+ if (mlx5_fs_hws_mh_pool_match(pool, &pattern)) {
+ known_pattern = true;
+ break;
+ }
+ cnt++;
+ }
+
+ if (!known_pattern) {
+ pool = mlx5_fs_create_mh_pool(ns->dev, &pattern,
+ &hws_pool->mh_pools, cnt);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+ }
+ mh_data = mlx5_fs_hws_mh_pool_acquire_mh(pool);
+ if (IS_ERR(mh_data)) {
+ err = PTR_ERR(mh_data);
+ goto destroy_pool;
+ }
+ hws_action = mh_data->bulk->hws_action;
+ mh_data->data = kmemdup(pattern.data, pattern.sz, GFP_KERNEL);
+ if (!mh_data->data) {
+ err = -ENOMEM;
+ goto release_mh;
+ }
+ modify_hdr->fs_hws_action.mh_data = mh_data;
+ modify_hdr->fs_hws_action.fs_pool = pool;
+ modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
+ modify_hdr->fs_hws_action.hws_action = hws_action;
+
+ return 0;
+
+release_mh:
+ mlx5_fs_hws_mh_pool_release_mh(pool, mh_data);
+destroy_pool:
+ if (!known_pattern)
+ mlx5_fs_destroy_mh_pool(pool, &hws_pool->mh_pools, cnt);
+ return err;
+}
+
+static void mlx5_cmd_hws_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_modify_hdr *modify_hdr)
+{
+ struct mlx5_fs_hws_mh *mh_data;
+ struct mlx5_fs_pool *pool;
+
+ if (!modify_hdr->fs_hws_action.fs_pool || !modify_hdr->fs_hws_action.mh_data) {
+ mlx5_core_err(ns->dev, "Failed release modify-header\n");
+ return;
+ }
+
+ mh_data = modify_hdr->fs_hws_action.mh_data;
+ kfree(mh_data->data);
+ pool = modify_hdr->fs_hws_action.fs_pool;
+ mlx5_fs_hws_mh_pool_release_mh(pool, mh_data);
+ modify_hdr->fs_hws_action.mh_data = NULL;
+}
+
+int
+mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id)
+{
+ enum mlx5_flow_namespace_type ns_type = pkt_reformat->ns_type;
+ struct mutex *lock = &pkt_reformat->fs_hws_action.lock;
+ u32 *id = &pkt_reformat->fs_hws_action.fw_reformat_id;
+ struct mlx5_pkt_reformat fw_pkt_reformat = { 0 };
+ struct mlx5_pkt_reformat_params params = { 0 };
+ struct mlx5_flow_root_namespace *ns;
+ struct mlx5_core_dev *dev;
+ int ret;
+
+ mutex_lock(lock);
+
+ if (*id != 0) {
+ *reformat_id = *id;
+ ret = 0;
+ goto unlock;
+ }
+
+ dev = mlx5hws_action_get_dev(pkt_reformat->fs_hws_action.hws_action);
+ if (!dev) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ns = mlx5_get_root_namespace(dev, ns_type);
+ if (!ns) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ params.type = pkt_reformat->reformat_type;
+ params.size = pkt_reformat->fs_hws_action.pr_data->data_size;
+ params.data = pkt_reformat->fs_hws_action.pr_data->data;
+
+ ret = mlx5_fs_cmd_get_fw_cmds()->
+ packet_reformat_alloc(ns, &params, ns_type, &fw_pkt_reformat);
+ if (ret)
+ goto unlock;
+
+ *id = fw_pkt_reformat.id;
+ *reformat_id = *id;
+ ret = 0;
+
+unlock:
+ mutex_unlock(lock);
+
+ return ret;
+}
+
+static int mlx5_cmd_hws_create_match_definer(struct mlx5_flow_root_namespace *ns,
+ u16 format_id, u32 *match_mask)
+{
+ return -EOPNOTSUPP;
+}
+
+static int mlx5_cmd_hws_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
+ int definer_id)
+{
+ return -EOPNOTSUPP;
+}
+
+static u32 mlx5_cmd_hws_get_capabilities(struct mlx5_flow_root_namespace *ns,
+ enum fs_flow_table_type ft_type)
+{
+ if (ft_type != FS_FT_FDB)
+ return 0;
+
+ return MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX |
+ MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX |
+ MLX5_FLOW_STEERING_CAP_MATCH_RANGES;
+}
+
+bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev)
+{
+ return mlx5hws_is_supported(dev);
+}
+
+static const struct mlx5_flow_cmds mlx5_flow_cmds_hws = {
+ .create_flow_table = mlx5_cmd_hws_create_flow_table,
+ .destroy_flow_table = mlx5_cmd_hws_destroy_flow_table,
+ .modify_flow_table = mlx5_cmd_hws_modify_flow_table,
+ .update_root_ft = mlx5_cmd_hws_update_root_ft,
+ .create_flow_group = mlx5_cmd_hws_create_flow_group,
+ .destroy_flow_group = mlx5_cmd_hws_destroy_flow_group,
+ .create_fte = mlx5_cmd_hws_create_fte,
+ .delete_fte = mlx5_cmd_hws_delete_fte,
+ .update_fte = mlx5_cmd_hws_update_fte,
+ .packet_reformat_alloc = mlx5_cmd_hws_packet_reformat_alloc,
+ .packet_reformat_dealloc = mlx5_cmd_hws_packet_reformat_dealloc,
+ .modify_header_alloc = mlx5_cmd_hws_modify_header_alloc,
+ .modify_header_dealloc = mlx5_cmd_hws_modify_header_dealloc,
+ .create_match_definer = mlx5_cmd_hws_create_match_definer,
+ .destroy_match_definer = mlx5_cmd_hws_destroy_match_definer,
+ .create_ns = mlx5_cmd_hws_create_ns,
+ .destroy_ns = mlx5_cmd_hws_destroy_ns,
+ .set_peer = mlx5_cmd_hws_set_peer,
+ .get_capabilities = mlx5_cmd_hws_get_capabilities,
+};
+
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void)
+{
+ return &mlx5_flow_cmds_hws;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
new file mode 100644
index 000000000000..b92d55b2d147
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#ifndef _MLX5_FS_HWS_
+#define _MLX5_FS_HWS_
+
+#include "mlx5hws.h"
+#include "fs_hws_pools.h"
+
+struct mlx5_fs_hws_actions_pool {
+ struct mlx5hws_action *tag_action;
+ struct mlx5hws_action *pop_vlan_action;
+ struct mlx5hws_action *push_vlan_action;
+ struct mlx5hws_action *drop_action;
+ struct mlx5hws_action *decapl2_action;
+ struct mlx5hws_action *remove_hdr_vlan_action;
+ struct mlx5_fs_pool insert_hdr_pool;
+ struct mlx5_fs_pool dl3tnltol2_pool;
+ struct xarray el2tol3tnl_pools;
+ struct xarray el2tol2tnl_pools;
+ struct xarray mh_pools;
+ struct xarray table_dests;
+ struct xarray vport_vhca_dests;
+ struct xarray vport_dests;
+ struct xarray aso_meters;
+ struct xarray sample_dests;
+};
+
+struct mlx5_fs_hws_context {
+ struct mlx5hws_context *hws_ctx;
+ struct mlx5_fs_hws_actions_pool hws_pool;
+};
+
+struct mlx5_fs_hws_table {
+ struct mlx5hws_table *hws_table;
+ bool miss_ft_set;
+};
+
+struct mlx5_fs_hws_action {
+ struct mlx5hws_action *hws_action;
+ struct mlx5_fs_pool *fs_pool;
+ struct mlx5_fs_hws_pr *pr_data;
+ struct mlx5_fs_hws_mh *mh_data;
+ u32 fw_reformat_id;
+ /* Protect `fw_reformat_id` against being initialized from multiple
+ * threads.
+ */
+ struct mutex lock;
+};
+
+struct mlx5_fs_hws_matcher {
+ struct mlx5hws_bwc_matcher *matcher;
+};
+
+struct mlx5_fs_hws_rule_action {
+ struct mlx5hws_action *action;
+ union {
+ struct mlx5_fc *counter;
+ struct mlx5_exe_aso *exe_aso;
+ u32 sampler_id;
+ };
+};
+
+struct mlx5_fs_hws_rule {
+ struct mlx5hws_bwc_rule *bwc_rule;
+ struct mlx5_fs_hws_rule_action *hws_fs_actions;
+ int num_fs_actions;
+};
+
+struct mlx5_fs_hws_data {
+ struct mlx5hws_action *hws_action;
+ struct mutex lock; /* protects hws_action */
+ refcount_t hws_action_refcount;
+};
+
+struct mlx5_fs_hws_create_action_ctx {
+ enum mlx5hws_action_type actions_type;
+ struct mlx5hws_context *hws_ctx;
+ u32 id;
+ union {
+ u8 return_reg_id;
+ };
+};
+
+struct mlx5hws_action *
+mlx5_fs_get_hws_action(struct mlx5_fs_hws_data *fs_hws_data,
+ struct mlx5_fs_hws_create_action_ctx *create_ctx);
+void mlx5_fs_put_hws_action(struct mlx5_fs_hws_data *fs_hws_data);
+
+#ifdef CONFIG_MLX5_HW_STEERING
+
+int
+mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id);
+
+bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev);
+
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void);
+
+#else
+
+static inline int
+mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev)
+{
+ return false;
+}
+
+static inline const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_MLX5_HWS_STEERING */
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
new file mode 100644
index 000000000000..839d71bd4216
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#include <mlx5_core.h>
+#include "fs_hws_pools.h"
+
+#define MLX5_FS_HWS_DEFAULT_BULK_LEN 65536
+#define MLX5_FS_HWS_POOL_MAX_THRESHOLD BIT(18)
+#define MLX5_FS_HWS_POOL_USED_BUFF_RATIO 10
+
+static struct mlx5hws_action *
+mlx5_fs_dl3tnltol2_bulk_action_create(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_action_reformat_header reformat_hdr[2] = {};
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ enum mlx5hws_action_type reformat_type;
+ u32 log_bulk_size;
+
+ reformat_type = MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2;
+ reformat_hdr[MLX5_FS_DL3TNLTOL2_MAC_HDR_IDX].sz = ETH_HLEN;
+ reformat_hdr[MLX5_FS_DL3TNLTOL2_MAC_VLAN_HDR_IDX].sz = ETH_HLEN + VLAN_HLEN;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ return mlx5hws_action_create_reformat(ctx, reformat_type, 2,
+ reformat_hdr, log_bulk_size, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_el2tol3tnl_bulk_action_create(struct mlx5hws_context *ctx, size_t data_size)
+{
+ struct mlx5hws_action_reformat_header reformat_hdr = {};
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ enum mlx5hws_action_type reformat_type;
+ u32 log_bulk_size;
+
+ reformat_type = MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
+ reformat_hdr.sz = data_size;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ return mlx5hws_action_create_reformat(ctx, reformat_type, 1,
+ &reformat_hdr, log_bulk_size, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_el2tol2tnl_bulk_action_create(struct mlx5hws_context *ctx, size_t data_size)
+{
+ struct mlx5hws_action_reformat_header reformat_hdr = {};
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ enum mlx5hws_action_type reformat_type;
+ u32 log_bulk_size;
+
+ reformat_type = MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
+ reformat_hdr.sz = data_size;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ return mlx5hws_action_create_reformat(ctx, reformat_type, 1,
+ &reformat_hdr, log_bulk_size, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_insert_hdr_bulk_action_create(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_action_insert_header insert_hdr = {};
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ u32 log_bulk_size;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ insert_hdr.hdr.sz = MLX5_FS_INSERT_HDR_VLAN_SIZE;
+ insert_hdr.anchor = MLX5_FS_INSERT_HDR_VLAN_ANCHOR;
+ insert_hdr.offset = MLX5_FS_INSERT_HDR_VLAN_OFFSET;
+
+ return mlx5hws_action_create_insert_header(ctx, 1, &insert_hdr,
+ log_bulk_size, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_pr_bulk_action_create(struct mlx5_core_dev *dev,
+ struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx)
+{
+ struct mlx5_flow_root_namespace *root_ns;
+ struct mlx5hws_context *ctx;
+ size_t encap_data_size;
+
+ root_ns = mlx5_get_root_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!root_ns || root_ns->mode != MLX5_FLOW_STEERING_MODE_HMFS)
+ return NULL;
+
+ ctx = root_ns->fs_hws_context.hws_ctx;
+ if (!ctx)
+ return NULL;
+
+ encap_data_size = pr_pool_ctx->encap_data_size;
+ switch (pr_pool_ctx->reformat_type) {
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+ return mlx5_fs_dl3tnltol2_bulk_action_create(ctx);
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ return mlx5_fs_el2tol3tnl_bulk_action_create(ctx, encap_data_size);
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ return mlx5_fs_el2tol2tnl_bulk_action_create(ctx, encap_data_size);
+ case MLX5HWS_ACTION_TYP_INSERT_HEADER:
+ return mlx5_fs_insert_hdr_bulk_action_create(ctx);
+ default:
+ return NULL;
+ }
+ return NULL;
+}
+
+static struct mlx5_fs_bulk *
+mlx5_fs_hws_pr_bulk_create(struct mlx5_core_dev *dev, void *pool_ctx)
+{
+ struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx;
+ struct mlx5_fs_hws_pr_bulk *pr_bulk;
+ int bulk_len;
+ int i;
+
+ if (!pool_ctx)
+ return NULL;
+ pr_pool_ctx = pool_ctx;
+ bulk_len = MLX5_FS_HWS_DEFAULT_BULK_LEN;
+ pr_bulk = kvzalloc(struct_size(pr_bulk, prs_data, bulk_len), GFP_KERNEL);
+ if (!pr_bulk)
+ return NULL;
+
+ if (mlx5_fs_bulk_init(dev, &pr_bulk->fs_bulk, bulk_len))
+ goto free_pr_bulk;
+
+ for (i = 0; i < bulk_len; i++) {
+ pr_bulk->prs_data[i].bulk = pr_bulk;
+ pr_bulk->prs_data[i].offset = i;
+ }
+
+ pr_bulk->hws_action = mlx5_fs_pr_bulk_action_create(dev, pr_pool_ctx);
+ if (!pr_bulk->hws_action)
+ goto cleanup_fs_bulk;
+
+ return &pr_bulk->fs_bulk;
+
+cleanup_fs_bulk:
+ mlx5_fs_bulk_cleanup(&pr_bulk->fs_bulk);
+free_pr_bulk:
+ kvfree(pr_bulk);
+ return NULL;
+}
+
+static int
+mlx5_fs_hws_pr_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk)
+{
+ struct mlx5_fs_hws_pr_bulk *pr_bulk;
+
+ pr_bulk = container_of(fs_bulk, struct mlx5_fs_hws_pr_bulk, fs_bulk);
+ if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) {
+ mlx5_core_err(dev, "Freeing bulk before all reformats were released\n");
+ return -EBUSY;
+ }
+
+ mlx5hws_action_destroy(pr_bulk->hws_action);
+ mlx5_fs_bulk_cleanup(fs_bulk);
+ kvfree(pr_bulk);
+
+ return 0;
+}
+
+static void mlx5_hws_pool_update_threshold(struct mlx5_fs_pool *hws_pool)
+{
+ hws_pool->threshold = min_t(int, MLX5_FS_HWS_POOL_MAX_THRESHOLD,
+ hws_pool->used_units / MLX5_FS_HWS_POOL_USED_BUFF_RATIO);
+}
+
+static const struct mlx5_fs_pool_ops mlx5_fs_hws_pr_pool_ops = {
+ .bulk_create = mlx5_fs_hws_pr_bulk_create,
+ .bulk_destroy = mlx5_fs_hws_pr_bulk_destroy,
+ .update_threshold = mlx5_hws_pool_update_threshold,
+};
+
+int mlx5_fs_hws_pr_pool_init(struct mlx5_fs_pool *pr_pool,
+ struct mlx5_core_dev *dev, size_t encap_data_size,
+ enum mlx5hws_action_type reformat_type)
+{
+ struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx;
+
+ if (reformat_type != MLX5HWS_ACTION_TYP_INSERT_HEADER &&
+ reformat_type != MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2 &&
+ reformat_type != MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3 &&
+ reformat_type != MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2)
+ return -EOPNOTSUPP;
+
+ pr_pool_ctx = kzalloc(sizeof(*pr_pool_ctx), GFP_KERNEL);
+ if (!pr_pool_ctx)
+ return -ENOMEM;
+ pr_pool_ctx->reformat_type = reformat_type;
+ pr_pool_ctx->encap_data_size = encap_data_size;
+ mlx5_fs_pool_init(pr_pool, dev, &mlx5_fs_hws_pr_pool_ops, pr_pool_ctx);
+ return 0;
+}
+
+void mlx5_fs_hws_pr_pool_cleanup(struct mlx5_fs_pool *pr_pool)
+{
+ struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx;
+
+ mlx5_fs_pool_cleanup(pr_pool);
+ pr_pool_ctx = pr_pool->pool_ctx;
+ if (!pr_pool_ctx)
+ return;
+ kfree(pr_pool_ctx);
+}
+
+struct mlx5_fs_hws_pr *
+mlx5_fs_hws_pr_pool_acquire_pr(struct mlx5_fs_pool *pr_pool)
+{
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_fs_hws_pr_bulk *pr_bulk;
+ int err;
+
+ err = mlx5_fs_pool_acquire_index(pr_pool, &pool_index);
+ if (err)
+ return ERR_PTR(err);
+ pr_bulk = container_of(pool_index.fs_bulk, struct mlx5_fs_hws_pr_bulk,
+ fs_bulk);
+ return &pr_bulk->prs_data[pool_index.index];
+}
+
+void mlx5_fs_hws_pr_pool_release_pr(struct mlx5_fs_pool *pr_pool,
+ struct mlx5_fs_hws_pr *pr_data)
+{
+ struct mlx5_fs_bulk *fs_bulk = &pr_data->bulk->fs_bulk;
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_core_dev *dev = pr_pool->dev;
+
+ pool_index.fs_bulk = fs_bulk;
+ pool_index.index = pr_data->offset;
+ if (mlx5_fs_pool_release_index(pr_pool, &pool_index))
+ mlx5_core_warn(dev, "Attempted to release packet reformat which is not acquired\n");
+}
+
+struct mlx5hws_action *mlx5_fs_hws_pr_get_action(struct mlx5_fs_hws_pr *pr_data)
+{
+ return pr_data->bulk->hws_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_mh_bulk_action_create(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_mh_pattern *pattern)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ u32 log_bulk_size;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ return mlx5hws_action_create_modify_header(ctx, 1, pattern,
+ log_bulk_size, flags);
+}
+
+static struct mlx5_fs_bulk *
+mlx5_fs_hws_mh_bulk_create(struct mlx5_core_dev *dev, void *pool_ctx)
+{
+ struct mlx5hws_action_mh_pattern *pattern;
+ struct mlx5_flow_root_namespace *root_ns;
+ struct mlx5_fs_hws_mh_bulk *mh_bulk;
+ struct mlx5hws_context *ctx;
+ int bulk_len;
+
+ if (!pool_ctx)
+ return NULL;
+
+ root_ns = mlx5_get_root_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!root_ns || root_ns->mode != MLX5_FLOW_STEERING_MODE_HMFS)
+ return NULL;
+
+ ctx = root_ns->fs_hws_context.hws_ctx;
+ if (!ctx)
+ return NULL;
+
+ pattern = pool_ctx;
+ bulk_len = MLX5_FS_HWS_DEFAULT_BULK_LEN;
+ mh_bulk = kvzalloc(struct_size(mh_bulk, mhs_data, bulk_len), GFP_KERNEL);
+ if (!mh_bulk)
+ return NULL;
+
+ if (mlx5_fs_bulk_init(dev, &mh_bulk->fs_bulk, bulk_len))
+ goto free_mh_bulk;
+
+ for (int i = 0; i < bulk_len; i++) {
+ mh_bulk->mhs_data[i].bulk = mh_bulk;
+ mh_bulk->mhs_data[i].offset = i;
+ }
+
+ mh_bulk->hws_action = mlx5_fs_mh_bulk_action_create(ctx, pattern);
+ if (!mh_bulk->hws_action)
+ goto cleanup_fs_bulk;
+
+ return &mh_bulk->fs_bulk;
+
+cleanup_fs_bulk:
+ mlx5_fs_bulk_cleanup(&mh_bulk->fs_bulk);
+free_mh_bulk:
+ kvfree(mh_bulk);
+ return NULL;
+}
+
+static int
+mlx5_fs_hws_mh_bulk_destroy(struct mlx5_core_dev *dev,
+ struct mlx5_fs_bulk *fs_bulk)
+{
+ struct mlx5_fs_hws_mh_bulk *mh_bulk;
+
+ mh_bulk = container_of(fs_bulk, struct mlx5_fs_hws_mh_bulk, fs_bulk);
+ if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) {
+ mlx5_core_err(dev, "Freeing bulk before all modify header were released\n");
+ return -EBUSY;
+ }
+
+ mlx5hws_action_destroy(mh_bulk->hws_action);
+ mlx5_fs_bulk_cleanup(fs_bulk);
+ kvfree(mh_bulk);
+
+ return 0;
+}
+
+static const struct mlx5_fs_pool_ops mlx5_fs_hws_mh_pool_ops = {
+ .bulk_create = mlx5_fs_hws_mh_bulk_create,
+ .bulk_destroy = mlx5_fs_hws_mh_bulk_destroy,
+ .update_threshold = mlx5_hws_pool_update_threshold,
+};
+
+int mlx5_fs_hws_mh_pool_init(struct mlx5_fs_pool *fs_hws_mh_pool,
+ struct mlx5_core_dev *dev,
+ struct mlx5hws_action_mh_pattern *pattern)
+{
+ struct mlx5hws_action_mh_pattern *pool_pattern;
+
+ pool_pattern = kzalloc(sizeof(*pool_pattern), GFP_KERNEL);
+ if (!pool_pattern)
+ return -ENOMEM;
+ pool_pattern->data = kmemdup(pattern->data, pattern->sz, GFP_KERNEL);
+ if (!pool_pattern->data) {
+ kfree(pool_pattern);
+ return -ENOMEM;
+ }
+ pool_pattern->sz = pattern->sz;
+ mlx5_fs_pool_init(fs_hws_mh_pool, dev, &mlx5_fs_hws_mh_pool_ops,
+ pool_pattern);
+ return 0;
+}
+
+void mlx5_fs_hws_mh_pool_cleanup(struct mlx5_fs_pool *fs_hws_mh_pool)
+{
+ struct mlx5hws_action_mh_pattern *pool_pattern;
+
+ mlx5_fs_pool_cleanup(fs_hws_mh_pool);
+ pool_pattern = fs_hws_mh_pool->pool_ctx;
+ if (!pool_pattern)
+ return;
+ kfree(pool_pattern->data);
+ kfree(pool_pattern);
+}
+
+struct mlx5_fs_hws_mh *
+mlx5_fs_hws_mh_pool_acquire_mh(struct mlx5_fs_pool *mh_pool)
+{
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_fs_hws_mh_bulk *mh_bulk;
+ int err;
+
+ err = mlx5_fs_pool_acquire_index(mh_pool, &pool_index);
+ if (err)
+ return ERR_PTR(err);
+ mh_bulk = container_of(pool_index.fs_bulk, struct mlx5_fs_hws_mh_bulk,
+ fs_bulk);
+ return &mh_bulk->mhs_data[pool_index.index];
+}
+
+void mlx5_fs_hws_mh_pool_release_mh(struct mlx5_fs_pool *mh_pool,
+ struct mlx5_fs_hws_mh *mh_data)
+{
+ struct mlx5_fs_bulk *fs_bulk = &mh_data->bulk->fs_bulk;
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_core_dev *dev = mh_pool->dev;
+
+ pool_index.fs_bulk = fs_bulk;
+ pool_index.index = mh_data->offset;
+ if (mlx5_fs_pool_release_index(mh_pool, &pool_index))
+ mlx5_core_warn(dev, "Attempted to release modify header which is not acquired\n");
+}
+
+bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool,
+ struct mlx5hws_action_mh_pattern *pattern)
+{
+ struct mlx5hws_action_mh_pattern *pool_pattern;
+ int num_actions, i;
+
+ pool_pattern = mh_pool->pool_ctx;
+ if (WARN_ON_ONCE(!pool_pattern))
+ return false;
+
+ if (pattern->sz != pool_pattern->sz)
+ return false;
+ num_actions = pattern->sz / MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
+ for (i = 0; i < num_actions; i++) {
+ if ((__force __be32)pattern->data[i] !=
+ (__force __be32)pool_pattern->data[i])
+ return false;
+ }
+ return true;
+}
+
+struct mlx5hws_action *mlx5_fc_get_hws_action(struct mlx5hws_context *ctx,
+ struct mlx5_fc *counter)
+{
+ struct mlx5_fs_hws_create_action_ctx create_ctx;
+ struct mlx5_fc_bulk *fc_bulk = counter->bulk;
+ struct mlx5hws_action *hws_action;
+
+ create_ctx.hws_ctx = ctx;
+ create_ctx.id = fc_bulk->base_id;
+ create_ctx.actions_type = MLX5HWS_ACTION_TYP_CTR;
+
+ mlx5_fc_local_get(counter);
+ hws_action = mlx5_fs_get_hws_action(&fc_bulk->hws_data, &create_ctx);
+ if (!hws_action)
+ mlx5_fc_local_put(counter);
+ return hws_action;
+}
+
+void mlx5_fc_put_hws_action(struct mlx5_fc *counter)
+{
+ mlx5_fs_put_hws_action(&counter->bulk->hws_data);
+ mlx5_fc_local_put(counter);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h
new file mode 100644
index 000000000000..34072551dd21
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#ifndef __MLX5_FS_HWS_POOLS_H__
+#define __MLX5_FS_HWS_POOLS_H__
+
+#include <linux/if_vlan.h>
+#include "fs_pool.h"
+#include "fs_core.h"
+
+#define MLX5_FS_INSERT_HDR_VLAN_ANCHOR MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START
+#define MLX5_FS_INSERT_HDR_VLAN_OFFSET offsetof(struct vlan_ethhdr, h_vlan_proto)
+#define MLX5_FS_INSERT_HDR_VLAN_SIZE sizeof(struct vlan_hdr)
+
+enum {
+ MLX5_FS_DL3TNLTOL2_MAC_HDR_IDX = 0,
+ MLX5_FS_DL3TNLTOL2_MAC_VLAN_HDR_IDX,
+};
+
+struct mlx5_fs_hws_pr {
+ struct mlx5_fs_hws_pr_bulk *bulk;
+ u32 offset;
+ u8 hdr_idx;
+ u8 *data;
+ size_t data_size;
+};
+
+struct mlx5_fs_hws_pr_bulk {
+ struct mlx5_fs_bulk fs_bulk;
+ struct mlx5hws_action *hws_action;
+ struct mlx5_fs_hws_pr prs_data[];
+};
+
+struct mlx5_fs_hws_pr_pool_ctx {
+ enum mlx5hws_action_type reformat_type;
+ size_t encap_data_size;
+};
+
+struct mlx5_fs_hws_mh {
+ struct mlx5_fs_hws_mh_bulk *bulk;
+ u32 offset;
+ u8 *data;
+};
+
+struct mlx5_fs_hws_mh_bulk {
+ struct mlx5_fs_bulk fs_bulk;
+ struct mlx5_fs_pool *mh_pool;
+ struct mlx5hws_action *hws_action;
+ struct mlx5_fs_hws_mh mhs_data[];
+};
+
+int mlx5_fs_hws_pr_pool_init(struct mlx5_fs_pool *pr_pool,
+ struct mlx5_core_dev *dev, size_t encap_data_size,
+ enum mlx5hws_action_type reformat_type);
+void mlx5_fs_hws_pr_pool_cleanup(struct mlx5_fs_pool *pr_pool);
+
+struct mlx5_fs_hws_pr *mlx5_fs_hws_pr_pool_acquire_pr(struct mlx5_fs_pool *pr_pool);
+void mlx5_fs_hws_pr_pool_release_pr(struct mlx5_fs_pool *pr_pool,
+ struct mlx5_fs_hws_pr *pr_data);
+struct mlx5hws_action *mlx5_fs_hws_pr_get_action(struct mlx5_fs_hws_pr *pr_data);
+int mlx5_fs_hws_mh_pool_init(struct mlx5_fs_pool *fs_hws_mh_pool,
+ struct mlx5_core_dev *dev,
+ struct mlx5hws_action_mh_pattern *pattern);
+void mlx5_fs_hws_mh_pool_cleanup(struct mlx5_fs_pool *fs_hws_mh_pool);
+struct mlx5_fs_hws_mh *mlx5_fs_hws_mh_pool_acquire_mh(struct mlx5_fs_pool *mh_pool);
+void mlx5_fs_hws_mh_pool_release_mh(struct mlx5_fs_pool *mh_pool,
+ struct mlx5_fs_hws_mh *mh_data);
+bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool,
+ struct mlx5hws_action_mh_pattern *pattern);
+struct mlx5hws_action *mlx5_fc_get_hws_action(struct mlx5hws_context *ctx,
+ struct mlx5_fc *counter);
+void mlx5_fc_put_hws_action(struct mlx5_fc *counter);
+#endif /* __MLX5_FS_HWS_POOLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h
index 5643be1cd5bf..21279d503117 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#ifndef MLX5HWS_INTERNAL_H_
-#define MLX5HWS_INTERNAL_H_
+#ifndef HWS_INTERNAL_H_
+#define HWS_INTERNAL_H_
#include <linux/mlx5/transobj.h>
#include <linux/mlx5/vport.h>
@@ -10,22 +10,23 @@
#include "wq.h"
#include "lib/mlx5.h"
-#include "mlx5hws_prm.h"
+#include "prm.h"
#include "mlx5hws.h"
-#include "mlx5hws_pool.h"
-#include "mlx5hws_vport.h"
-#include "mlx5hws_context.h"
-#include "mlx5hws_table.h"
-#include "mlx5hws_send.h"
-#include "mlx5hws_rule.h"
-#include "mlx5hws_cmd.h"
-#include "mlx5hws_action.h"
-#include "mlx5hws_definer.h"
-#include "mlx5hws_matcher.h"
-#include "mlx5hws_debug.h"
-#include "mlx5hws_pat_arg.h"
-#include "mlx5hws_bwc.h"
-#include "mlx5hws_bwc_complex.h"
+#include "pool.h"
+#include "vport.h"
+#include "context.h"
+#include "table.h"
+#include "send.h"
+#include "action_ste_pool.h"
+#include "rule.h"
+#include "cmd.h"
+#include "action.h"
+#include "definer.h"
+#include "matcher.h"
+#include "debug.h"
+#include "pat_arg.h"
+#include "bwc.h"
+#include "bwc_complex.h"
#define W_SIZE 2
#define DW_SIZE 4
@@ -39,7 +40,6 @@
#define mlx5hws_dbg(ctx, arg...) mlx5_core_dbg((ctx)->mdev, ##arg)
#define MLX5HWS_TABLE_TYPE_BASE 2
-#define MLX5HWS_ACTION_STE_IDX_ANY 0
static inline bool is_mem_zero(const u8 *mem, size_t size)
{
@@ -56,4 +56,4 @@ static inline unsigned long align(unsigned long val, unsigned long align)
return (val + align - 1) & ~(align - 1);
}
-#endif /* MLX5HWS_INTERNAL_H_ */
+#endif /* HWS_INTERNAL_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
index 61a1155d4b4f..32f87fdf3213 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
@@ -1,26 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#include "mlx5hws_internal.h"
-
-enum mlx5hws_matcher_rtc_type {
- HWS_MATCHER_RTC_TYPE_MATCH,
- HWS_MATCHER_RTC_TYPE_STE_ARRAY,
- HWS_MATCHER_RTC_TYPE_MAX,
-};
-
-static const char * const mlx5hws_matcher_rtc_type_str[] = {
- [HWS_MATCHER_RTC_TYPE_MATCH] = "MATCH",
- [HWS_MATCHER_RTC_TYPE_STE_ARRAY] = "STE_ARRAY",
- [HWS_MATCHER_RTC_TYPE_MAX] = "UNKNOWN",
-};
-
-static const char *hws_matcher_rtc_type_to_str(enum mlx5hws_matcher_rtc_type rtc_type)
-{
- if (rtc_type > HWS_MATCHER_RTC_TYPE_MAX)
- rtc_type = HWS_MATCHER_RTC_TYPE_MAX;
- return mlx5hws_matcher_rtc_type_str[rtc_type];
-}
+#include "internal.h"
static bool hws_matcher_requires_col_tbl(u8 log_num_of_rules)
{
@@ -42,19 +23,202 @@ static void hws_matcher_destroy_end_ft(struct mlx5hws_matcher *matcher)
mlx5hws_table_destroy_default_ft(matcher->tbl, matcher->end_ft_id);
}
+int mlx5hws_matcher_update_end_ft_isolated(struct mlx5hws_table *tbl,
+ u32 miss_ft_id)
+{
+ struct mlx5hws_matcher *tmp_matcher;
+
+ if (list_empty(&tbl->matchers_list))
+ return -EINVAL;
+
+ /* Update isolated_matcher_end_ft_id attribute for all
+ * the matchers in isolated table.
+ */
+ list_for_each_entry(tmp_matcher, &tbl->matchers_list, list_node)
+ tmp_matcher->attr.isolated_matcher_end_ft_id = miss_ft_id;
+
+ tmp_matcher = list_last_entry(&tbl->matchers_list,
+ struct mlx5hws_matcher,
+ list_node);
+
+ return mlx5hws_table_ft_set_next_ft(tbl->ctx,
+ tmp_matcher->end_ft_id,
+ tbl->fw_ft_type,
+ miss_ft_id);
+}
+
+static int hws_matcher_connect_end_ft_isolated(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ u32 end_ft_id;
+ int ret;
+
+ /* Reset end_ft next RTCs */
+ ret = mlx5hws_table_ft_set_next_rtc(tbl->ctx,
+ matcher->end_ft_id,
+ matcher->tbl->fw_ft_type,
+ 0, 0);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to reset FT's next RTCs\n");
+ return ret;
+ }
+
+ /* Connect isolated matcher's end_ft to the complex matcher's end FT */
+ end_ft_id = matcher->attr.isolated_matcher_end_ft_id;
+ ret = mlx5hws_table_ft_set_next_ft(tbl->ctx,
+ matcher->end_ft_id,
+ matcher->tbl->fw_ft_type,
+ end_ft_id);
+
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to set FT's miss_ft_id\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hws_matcher_create_end_ft_isolated(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ int ret;
+
+ ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev,
+ tbl,
+ 0,
+ &matcher->end_ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to create end flow table\n");
+ return ret;
+ }
+
+ ret = hws_matcher_connect_end_ft_isolated(matcher);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to connect end FT\n");
+ goto destroy_default_ft;
+ }
+
+ return 0;
+
+destroy_default_ft:
+ mlx5hws_table_destroy_default_ft(tbl, matcher->end_ft_id);
+ return ret;
+}
+
static int hws_matcher_create_end_ft(struct mlx5hws_matcher *matcher)
{
struct mlx5hws_table *tbl = matcher->tbl;
int ret;
- ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &matcher->end_ft_id);
+ if (mlx5hws_matcher_is_isolated(matcher))
+ ret = hws_matcher_create_end_ft_isolated(matcher);
+ else
+ ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev,
+ tbl,
+ 0,
+ &matcher->end_ft_id);
+
if (ret) {
mlx5hws_err(tbl->ctx, "Failed to create matcher end flow table\n");
return ret;
}
+
+ return 0;
+}
+
+static int hws_matcher_connect_isolated_first(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+ int ret;
+
+ /* Isolated matcher's end_ft is already pointing to the end_ft
+ * of the complex matcher - it was set at creation of end_ft,
+ * so no need to connect it.
+ * We still need to connect the isolated table's start FT to
+ * this matcher's RTC.
+ */
+ ret = mlx5hws_table_ft_set_next_rtc(ctx,
+ tbl->ft_id,
+ tbl->fw_ft_type,
+ matcher->match_ste.rtc_0_id,
+ matcher->match_ste.rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Isolated matcher: failed to connect start FT to match RTC\n");
+ return ret;
+ }
+
+ /* Reset table's FT default miss (drop refcount) */
+ ret = mlx5hws_table_ft_set_default_next_ft(tbl, tbl->ft_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Isolated matcher: failed to reset table ft default miss\n");
+ return ret;
+ }
+
+ list_add(&matcher->list_node, &tbl->matchers_list);
+
+ return ret;
+}
+
+static int hws_matcher_connect_isolated_last(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+ struct mlx5hws_matcher *last;
+ int ret;
+
+ last = list_last_entry(&tbl->matchers_list,
+ struct mlx5hws_matcher,
+ list_node);
+
+ /* New matcher's end_ft is already pointing to the end_ft of
+ * the complex matcher.
+ * Connect previous matcher's end_ft to this new matcher RTC.
+ */
+ ret = mlx5hws_table_ft_set_next_rtc(ctx,
+ last->end_ft_id,
+ tbl->fw_ft_type,
+ matcher->match_ste.rtc_0_id,
+ matcher->match_ste.rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx,
+ "Isolated matcher: failed to connect matcher end_ft to new match RTC\n");
+ return ret;
+ }
+
+ /* Reset prev matcher FT default miss (drop refcount) */
+ ret = mlx5hws_table_ft_set_default_next_ft(tbl, last->end_ft_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Isolated matcher: failed to reset matcher ft default miss\n");
+ return ret;
+ }
+
+ /* Insert after the last matcher */
+ list_add(&matcher->list_node, &last->list_node);
+
return 0;
}
+static int hws_matcher_connect_isolated(struct mlx5hws_matcher *matcher)
+{
+ /* Isolated matcher is expected to be the only one in its table.
+ * However, it can have a collision matcher, and it can go through
+ * rehash process, in which case we will temporary have both old and
+ * new matchers in the isolated table.
+ * Check if this is the first matcher in the isolated table.
+ */
+ if (list_empty(&matcher->tbl->matchers_list))
+ return hws_matcher_connect_isolated_first(matcher);
+
+ /* If this wasn't the first matcher, then we have 3 possible cases:
+ * - this is a collision matcher for the first matcher
+ * - this is a new rehash dest matcher
+ * - this is a collision matcher for the new rehash dest matcher
+ * The logic to add new matcher is the same for all these cases.
+ */
+ return hws_matcher_connect_isolated_last(matcher);
+}
+
static int hws_matcher_connect(struct mlx5hws_matcher *matcher)
{
struct mlx5hws_table *tbl = matcher->tbl;
@@ -64,6 +228,9 @@ static int hws_matcher_connect(struct mlx5hws_matcher *matcher)
struct mlx5hws_matcher *tmp_matcher;
int ret;
+ if (mlx5hws_matcher_is_isolated(matcher))
+ return hws_matcher_connect_isolated(matcher);
+
/* Find location in matcher list */
if (list_empty(&tbl->matchers_list)) {
list_add(&matcher->list_node, &tbl->matchers_list);
@@ -140,6 +307,92 @@ remove_from_list:
return ret;
}
+static int hws_matcher_disconnect_isolated(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher *first, *last, *prev, *next;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+ u32 end_ft_id;
+ int ret;
+
+ first = list_first_entry(&tbl->matchers_list,
+ struct mlx5hws_matcher,
+ list_node);
+ last = list_last_entry(&tbl->matchers_list,
+ struct mlx5hws_matcher,
+ list_node);
+ prev = list_prev_entry(matcher, list_node);
+ next = list_next_entry(matcher, list_node);
+
+ list_del_init(&matcher->list_node);
+
+ if (first == last) {
+ /* This was the only matcher in the list.
+ * Reset isolated table FT next RTCs and connect it
+ * to the whole complex matcher end FT instead.
+ */
+ ret = mlx5hws_table_ft_set_next_rtc(ctx,
+ tbl->ft_id,
+ tbl->fw_ft_type,
+ 0, 0);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to reset FT's next RTCs\n");
+ return ret;
+ }
+
+ end_ft_id = matcher->attr.isolated_matcher_end_ft_id;
+ ret = mlx5hws_table_ft_set_next_ft(tbl->ctx,
+ tbl->ft_id,
+ tbl->fw_ft_type,
+ end_ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to set FT's miss_ft_id\n");
+ return ret;
+ }
+
+ return 0;
+ }
+
+ /* At this point we know that there are more matchers in the list */
+
+ if (matcher == first) {
+ /* We've disconnected the first matcher.
+ * Now update isolated table default FT.
+ */
+ if (!next)
+ return -EINVAL;
+ return mlx5hws_table_ft_set_next_rtc(ctx,
+ tbl->ft_id,
+ tbl->fw_ft_type,
+ next->match_ste.rtc_0_id,
+ next->match_ste.rtc_1_id);
+ }
+
+ if (matcher == last) {
+ /* If we've disconnected the last matcher - update prev
+ * matcher's end_ft to point to the complex matcher end_ft.
+ */
+ if (!prev)
+ return -EINVAL;
+ return hws_matcher_connect_end_ft_isolated(prev);
+ }
+
+ /* This wasn't the first or the last matcher, which means that it has
+ * both prev and next matchers. Note that this only happens if we're
+ * disconnecting collision matcher of the old matcher during rehash.
+ */
+ if (!prev || !next ||
+ !(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION))
+ return -EINVAL;
+
+ /* Update prev end FT to point to next match RTC */
+ return mlx5hws_table_ft_set_next_rtc(ctx,
+ prev->end_ft_id,
+ tbl->fw_ft_type,
+ next->match_ste.rtc_0_id,
+ next->match_ste.rtc_1_id);
+}
+
static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
{
struct mlx5hws_matcher *next = NULL, *prev = NULL;
@@ -147,6 +400,9 @@ static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
u32 prev_ft_id = tbl->ft_id;
int ret;
+ if (mlx5hws_matcher_is_isolated(matcher))
+ return hws_matcher_disconnect_isolated(matcher);
+
if (!list_is_first(&matcher->list_node, &tbl->matchers_list)) {
prev = list_prev_entry(matcher, list_node);
prev_ft_id = prev->end_ft_id;
@@ -165,14 +421,14 @@ static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
next->match_ste.rtc_0_id,
next->match_ste.rtc_1_id);
if (ret) {
- mlx5hws_err(tbl->ctx, "Failed to disconnect matcher\n");
- goto matcher_reconnect;
+ mlx5hws_err(tbl->ctx, "Fatal error, failed to disconnect matcher\n");
+ return ret;
}
} else {
ret = mlx5hws_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl);
if (ret) {
- mlx5hws_err(tbl->ctx, "Failed to disconnect last matcher\n");
- goto matcher_reconnect;
+ mlx5hws_err(tbl->ctx, "Fatal error, failed to disconnect last matcher\n");
+ return ret;
}
}
@@ -180,169 +436,115 @@ static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
if (prev_ft_id == tbl->ft_id) {
ret = mlx5hws_table_update_connected_miss_tables(tbl);
if (ret) {
- mlx5hws_err(tbl->ctx, "Fatal error, failed to update connected miss table\n");
- goto matcher_reconnect;
+ mlx5hws_err(tbl->ctx,
+ "Fatal error, failed to update connected miss table\n");
+ return ret;
}
}
ret = mlx5hws_table_ft_set_default_next_ft(tbl, prev_ft_id);
if (ret) {
mlx5hws_err(tbl->ctx, "Fatal error, failed to restore matcher ft default miss\n");
- goto matcher_reconnect;
+ return ret;
}
return 0;
-
-matcher_reconnect:
- if (list_empty(&tbl->matchers_list) || !prev)
- list_add(&matcher->list_node, &tbl->matchers_list);
- else
- /* insert after prev matcher */
- list_add(&matcher->list_node, &prev->list_node);
-
- return ret;
}
static void hws_matcher_set_rtc_attr_sz(struct mlx5hws_matcher *matcher,
struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
- enum mlx5hws_matcher_rtc_type rtc_type,
bool is_mirror)
{
- struct mlx5hws_pool_chunk *ste = &matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].ste;
enum mlx5hws_matcher_flow_src flow_src = matcher->attr.optimize_flow_src;
- bool is_match_rtc = rtc_type == HWS_MATCHER_RTC_TYPE_MATCH;
if ((flow_src == MLX5HWS_MATCHER_FLOW_SRC_VPORT && !is_mirror) ||
(flow_src == MLX5HWS_MATCHER_FLOW_SRC_WIRE && is_mirror)) {
/* Optimize FDB RTC */
rtc_attr->log_size = 0;
rtc_attr->log_depth = 0;
- } else {
- /* Keep original values */
- rtc_attr->log_size = is_match_rtc ? matcher->attr.table.sz_row_log : ste->order;
- rtc_attr->log_depth = is_match_rtc ? matcher->attr.table.sz_col_log : 0;
}
}
-static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher,
- enum mlx5hws_matcher_rtc_type rtc_type,
- u8 action_ste_selector)
+static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher)
{
struct mlx5hws_matcher_attr *attr = &matcher->attr;
struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0};
struct mlx5hws_match_template *mt = matcher->mt;
struct mlx5hws_context *ctx = matcher->tbl->ctx;
- struct mlx5hws_action_default_stc *default_stc;
- struct mlx5hws_matcher_action_ste *action_ste;
+ union mlx5hws_matcher_size *size_rx, *size_tx;
struct mlx5hws_table *tbl = matcher->tbl;
- struct mlx5hws_pool *ste_pool, *stc_pool;
- struct mlx5hws_pool_chunk *ste;
- u32 *rtc_0_id, *rtc_1_id;
u32 obj_id;
int ret;
- switch (rtc_type) {
- case HWS_MATCHER_RTC_TYPE_MATCH:
- rtc_0_id = &matcher->match_ste.rtc_0_id;
- rtc_1_id = &matcher->match_ste.rtc_1_id;
- ste_pool = matcher->match_ste.pool;
- ste = &matcher->match_ste.ste;
- ste->order = attr->table.sz_col_log + attr->table.sz_row_log;
-
- rtc_attr.log_size = attr->table.sz_row_log;
- rtc_attr.log_depth = attr->table.sz_col_log;
- rtc_attr.is_frst_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
- rtc_attr.is_scnd_range = 0;
- rtc_attr.miss_ft_id = matcher->end_ft_id;
-
- if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH) {
- /* The usual Hash Table */
- rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH;
-
- /* The first mt is used since all share the same definer */
- rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer);
- } else if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX) {
- rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
- rtc_attr.num_hash_definer = 1;
-
- if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
- /* Hash Split Table */
- rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH;
- rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer);
- } else if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR) {
- /* Linear Lookup Table */
- rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR;
- rtc_attr.match_definer_0 = ctx->caps->linear_match_definer;
- }
- }
-
- /* Match pool requires implicit allocation */
- ret = mlx5hws_pool_chunk_alloc(ste_pool, ste);
- if (ret) {
- mlx5hws_err(ctx, "Failed to allocate STE for %s RTC",
- hws_matcher_rtc_type_to_str(rtc_type));
- return ret;
+ size_rx = &attr->size[MLX5HWS_MATCHER_SIZE_TYPE_RX];
+ size_tx = &attr->size[MLX5HWS_MATCHER_SIZE_TYPE_TX];
+
+ rtc_attr.log_size = size_rx->table.sz_row_log;
+ rtc_attr.log_depth = size_rx->table.sz_col_log;
+ rtc_attr.is_frst_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
+ rtc_attr.is_scnd_range = 0;
+ rtc_attr.miss_ft_id = matcher->end_ft_id;
+
+ if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH) {
+ /* The usual Hash Table */
+ rtc_attr.update_index_mode =
+ MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH;
+
+ /* The first mt is used since all share the same definer */
+ rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer);
+ } else if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX) {
+ rtc_attr.update_index_mode =
+ MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
+ rtc_attr.num_hash_definer = 1;
+
+ if (attr->distribute_mode ==
+ MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
+ /* Hash Split Table */
+ rtc_attr.access_index_mode =
+ MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH;
+ rtc_attr.match_definer_0 =
+ mlx5hws_definer_get_id(mt->definer);
+ } else if (attr->distribute_mode ==
+ MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR) {
+ /* Linear Lookup Table */
+ rtc_attr.access_index_mode =
+ MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR;
+ rtc_attr.match_definer_0 =
+ ctx->caps->linear_match_definer;
}
- break;
-
- case HWS_MATCHER_RTC_TYPE_STE_ARRAY:
- action_ste = &matcher->action_ste[action_ste_selector];
-
- rtc_0_id = &action_ste->rtc_0_id;
- rtc_1_id = &action_ste->rtc_1_id;
- ste_pool = action_ste->pool;
- ste = &action_ste->ste;
- ste->order = ilog2(roundup_pow_of_two(action_ste->max_stes)) +
- attr->table.sz_row_log;
- rtc_attr.log_size = ste->order;
- rtc_attr.log_depth = 0;
- rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
- /* The action STEs use the default always hit definer */
- rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer;
- rtc_attr.is_frst_jumbo = false;
- rtc_attr.miss_ft_id = 0;
- break;
-
- default:
- mlx5hws_err(ctx, "HWS Invalid RTC type\n");
- return -EINVAL;
}
- obj_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
-
rtc_attr.pd = ctx->pd_num;
- rtc_attr.ste_base = obj_id;
- rtc_attr.ste_offset = ste->offset;
+ rtc_attr.ste_base = matcher->match_ste.ste_0_base;
rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, false);
- hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, false);
+ hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, false);
/* STC is a single resource (obj_id), use any STC for the ID */
- stc_pool = ctx->stc_pool[tbl->type];
- default_stc = ctx->common_res[tbl->type].default_stc;
- obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit);
+ obj_id = mlx5hws_pool_get_base_id(ctx->stc_pool);
rtc_attr.stc_base = obj_id;
- ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_0_id);
+ ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr,
+ &matcher->match_ste.rtc_0_id);
if (ret) {
- mlx5hws_err(ctx, "Failed to create matcher RTC of type %s",
- hws_matcher_rtc_type_to_str(rtc_type));
- goto free_ste;
+ mlx5hws_err(ctx, "Failed to create matcher RTC\n");
+ return ret;
}
if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
- obj_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
- rtc_attr.ste_base = obj_id;
+ rtc_attr.log_size = size_tx->table.sz_row_log;
+ rtc_attr.log_depth = size_tx->table.sz_col_log;
+ rtc_attr.ste_base = matcher->match_ste.ste_1_base;
rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, true);
- obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, &default_stc->default_hit);
+ obj_id = mlx5hws_pool_get_base_mirror_id(ctx->stc_pool);
rtc_attr.stc_base = obj_id;
- hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, true);
+ hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, true);
- ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_1_id);
+ ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr,
+ &matcher->match_ste.rtc_1_id);
if (ret) {
- mlx5hws_err(ctx, "Failed to create peer matcher RTC of type %s",
- hws_matcher_rtc_type_to_str(rtc_type));
+ mlx5hws_err(ctx, "Failed to create mirror matcher RTC\n");
goto destroy_rtc_0;
}
}
@@ -350,47 +552,18 @@ static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher,
return 0;
destroy_rtc_0:
- mlx5hws_cmd_rtc_destroy(ctx->mdev, *rtc_0_id);
-free_ste:
- if (rtc_type == HWS_MATCHER_RTC_TYPE_MATCH)
- mlx5hws_pool_chunk_free(ste_pool, ste);
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, matcher->match_ste.rtc_0_id);
return ret;
}
-static void hws_matcher_destroy_rtc(struct mlx5hws_matcher *matcher,
- enum mlx5hws_matcher_rtc_type rtc_type,
- u8 action_ste_selector)
+static void hws_matcher_destroy_rtc(struct mlx5hws_matcher *matcher)
{
- struct mlx5hws_matcher_action_ste *action_ste;
- struct mlx5hws_table *tbl = matcher->tbl;
- struct mlx5hws_pool_chunk *ste;
- struct mlx5hws_pool *ste_pool;
- u32 rtc_0_id, rtc_1_id;
-
- switch (rtc_type) {
- case HWS_MATCHER_RTC_TYPE_MATCH:
- rtc_0_id = matcher->match_ste.rtc_0_id;
- rtc_1_id = matcher->match_ste.rtc_1_id;
- ste_pool = matcher->match_ste.pool;
- ste = &matcher->match_ste.ste;
- break;
- case HWS_MATCHER_RTC_TYPE_STE_ARRAY:
- action_ste = &matcher->action_ste[action_ste_selector];
- rtc_0_id = action_ste->rtc_0_id;
- rtc_1_id = action_ste->rtc_1_id;
- ste_pool = action_ste->pool;
- ste = &action_ste->ste;
- break;
- default:
- return;
- }
+ struct mlx5_core_dev *mdev = matcher->tbl->ctx->mdev;
- if (tbl->type == MLX5HWS_TABLE_TYPE_FDB)
- mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, rtc_1_id);
+ if (matcher->tbl->type == MLX5HWS_TABLE_TYPE_FDB)
+ mlx5hws_cmd_rtc_destroy(mdev, matcher->match_ste.rtc_1_id);
- mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, rtc_0_id);
- if (rtc_type == HWS_MATCHER_RTC_TYPE_MATCH)
- mlx5hws_pool_chunk_free(ste_pool, ste);
+ mlx5hws_cmd_rtc_destroy(mdev, matcher->match_ste.rtc_0_id);
}
static int
@@ -398,43 +571,38 @@ hws_matcher_check_attr_sz(struct mlx5hws_cmd_query_caps *caps,
struct mlx5hws_matcher *matcher)
{
struct mlx5hws_matcher_attr *attr = &matcher->attr;
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ union mlx5hws_matcher_size *size;
+ int i;
- if (attr->table.sz_col_log > caps->rtc_log_depth_max) {
- mlx5hws_err(matcher->tbl->ctx, "Matcher depth exceeds limit %d\n",
- caps->rtc_log_depth_max);
- return -EOPNOTSUPP;
- }
+ for (i = 0; i < 2; i++) {
+ size = &attr->size[i];
- if (attr->table.sz_col_log + attr->table.sz_row_log > caps->ste_alloc_log_max) {
- mlx5hws_err(matcher->tbl->ctx, "Total matcher size exceeds limit %d\n",
- caps->ste_alloc_log_max);
- return -EOPNOTSUPP;
- }
+ if (size->table.sz_col_log > caps->rtc_log_depth_max) {
+ mlx5hws_err(ctx, "Matcher depth exceeds limit %d\n",
+ caps->rtc_log_depth_max);
+ return -EOPNOTSUPP;
+ }
- if (attr->table.sz_col_log + attr->table.sz_row_log < caps->ste_alloc_log_gran) {
- mlx5hws_err(matcher->tbl->ctx, "Total matcher size below limit %d\n",
- caps->ste_alloc_log_gran);
- return -EOPNOTSUPP;
+ if (size->table.sz_col_log + size->table.sz_row_log >
+ caps->ste_alloc_log_max) {
+ mlx5hws_err(ctx,
+ "Total matcher size exceeds limit %d\n",
+ caps->ste_alloc_log_max);
+ return -EOPNOTSUPP;
+ }
+
+ if (size->table.sz_col_log + size->table.sz_row_log <
+ caps->ste_alloc_log_gran) {
+ mlx5hws_err(ctx, "Total matcher size below limit %d\n",
+ caps->ste_alloc_log_gran);
+ return -EOPNOTSUPP;
+ }
}
return 0;
}
-static void hws_matcher_set_pool_attr(struct mlx5hws_pool_attr *attr,
- struct mlx5hws_matcher *matcher)
-{
- switch (matcher->attr.optimize_flow_src) {
- case MLX5HWS_MATCHER_FLOW_SRC_VPORT:
- attr->opt_type = MLX5HWS_POOL_OPTIMIZE_ORIG;
- break;
- case MLX5HWS_MATCHER_FLOW_SRC_WIRE:
- attr->opt_type = MLX5HWS_POOL_OPTIMIZE_MIRROR;
- break;
- default:
- break;
- }
-}
-
static int hws_matcher_check_and_process_at(struct mlx5hws_matcher *matcher,
struct mlx5hws_action_template *at)
{
@@ -458,170 +626,17 @@ static int hws_matcher_check_and_process_at(struct mlx5hws_matcher *matcher,
return 0;
}
-static int hws_matcher_resize_init(struct mlx5hws_matcher *src_matcher)
-{
- struct mlx5hws_matcher_resize_data *resize_data;
-
- resize_data = kzalloc(sizeof(*resize_data), GFP_KERNEL);
- if (!resize_data)
- return -ENOMEM;
-
- resize_data->max_stes = src_matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes;
-
- resize_data->action_ste[0].stc = src_matcher->action_ste[0].stc;
- resize_data->action_ste[0].rtc_0_id = src_matcher->action_ste[0].rtc_0_id;
- resize_data->action_ste[0].rtc_1_id = src_matcher->action_ste[0].rtc_1_id;
- resize_data->action_ste[0].pool = src_matcher->action_ste[0].max_stes ?
- src_matcher->action_ste[0].pool :
- NULL;
- resize_data->action_ste[1].stc = src_matcher->action_ste[1].stc;
- resize_data->action_ste[1].rtc_0_id = src_matcher->action_ste[1].rtc_0_id;
- resize_data->action_ste[1].rtc_1_id = src_matcher->action_ste[1].rtc_1_id;
- resize_data->action_ste[1].pool = src_matcher->action_ste[1].max_stes ?
- src_matcher->action_ste[1].pool :
- NULL;
-
- /* Place the new resized matcher on the dst matcher's list */
- list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data);
-
- /* Move all the previous resized matchers to the dst matcher's list */
- while (!list_empty(&src_matcher->resize_data)) {
- resize_data = list_first_entry(&src_matcher->resize_data,
- struct mlx5hws_matcher_resize_data,
- list_node);
- list_del_init(&resize_data->list_node);
- list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data);
- }
-
- return 0;
-}
-
-static void hws_matcher_resize_uninit(struct mlx5hws_matcher *matcher)
-{
- struct mlx5hws_matcher_resize_data *resize_data;
-
- if (!mlx5hws_matcher_is_resizable(matcher))
- return;
-
- while (!list_empty(&matcher->resize_data)) {
- resize_data = list_first_entry(&matcher->resize_data,
- struct mlx5hws_matcher_resize_data,
- list_node);
- list_del_init(&resize_data->list_node);
-
- if (resize_data->max_stes) {
- mlx5hws_action_free_single_stc(matcher->tbl->ctx,
- matcher->tbl->type,
- &resize_data->action_ste[1].stc);
- mlx5hws_action_free_single_stc(matcher->tbl->ctx,
- matcher->tbl->type,
- &resize_data->action_ste[0].stc);
-
- if (matcher->tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
- mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
- resize_data->action_ste[1].rtc_1_id);
- mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
- resize_data->action_ste[0].rtc_1_id);
- }
- mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
- resize_data->action_ste[1].rtc_0_id);
- mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
- resize_data->action_ste[0].rtc_0_id);
- if (resize_data->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].pool) {
- mlx5hws_pool_destroy(resize_data->action_ste[1].pool);
- mlx5hws_pool_destroy(resize_data->action_ste[0].pool);
- }
- }
-
- kfree(resize_data);
- }
-}
-
-static int
-hws_matcher_bind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector)
-{
- struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
- struct mlx5hws_matcher_action_ste *action_ste;
- struct mlx5hws_table *tbl = matcher->tbl;
- struct mlx5hws_pool_attr pool_attr = {0};
- struct mlx5hws_context *ctx = tbl->ctx;
- int ret;
-
- action_ste = &matcher->action_ste[action_ste_selector];
-
- /* Allocate action STE mempool */
- pool_attr.table_type = tbl->type;
- pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
- pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL;
- pool_attr.alloc_log_sz = ilog2(roundup_pow_of_two(action_ste->max_stes)) +
- matcher->attr.table.sz_row_log;
- hws_matcher_set_pool_attr(&pool_attr, matcher);
- action_ste->pool = mlx5hws_pool_create(ctx, &pool_attr);
- if (!action_ste->pool) {
- mlx5hws_err(ctx, "Failed to create action ste pool\n");
- return -EINVAL;
- }
-
- /* Allocate action RTC */
- ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
- if (ret) {
- mlx5hws_err(ctx, "Failed to create action RTC\n");
- goto free_ste_pool;
- }
-
- /* Allocate STC for jumps to STE */
- stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
- stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE;
- stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
- stc_attr.ste_table.ste = action_ste->ste;
- stc_attr.ste_table.ste_pool = action_ste->pool;
- stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
-
- ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl->type,
- &action_ste->stc);
- if (ret) {
- mlx5hws_err(ctx, "Failed to create action jump to table STC\n");
- goto free_rtc;
- }
-
- return 0;
-
-free_rtc:
- hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
-free_ste_pool:
- mlx5hws_pool_destroy(action_ste->pool);
- return ret;
-}
-
-static void hws_matcher_unbind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector)
-{
- struct mlx5hws_matcher_action_ste *action_ste;
- struct mlx5hws_table *tbl = matcher->tbl;
-
- action_ste = &matcher->action_ste[action_ste_selector];
-
- if (!action_ste->max_stes ||
- matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION ||
- mlx5hws_matcher_is_in_resize(matcher))
- return;
-
- mlx5hws_action_free_single_stc(tbl->ctx, tbl->type, &action_ste->stc);
- hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
- mlx5hws_pool_destroy(action_ste->pool);
-}
-
static int hws_matcher_bind_at(struct mlx5hws_matcher *matcher)
{
bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt);
- struct mlx5hws_table *tbl = matcher->tbl;
- struct mlx5hws_context *ctx = tbl->ctx;
- u32 required_stes;
- u8 max_stes = 0;
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ u8 required_stes, max_stes;
int i, ret;
if (matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)
return 0;
+ max_stes = 0;
for (i = 0; i < matcher->num_of_at; i++) {
struct mlx5hws_action_template *at = &matcher->at[i];
@@ -637,38 +652,40 @@ static int hws_matcher_bind_at(struct mlx5hws_matcher *matcher)
/* Future: Optimize reparse */
}
- /* There are no additional STEs required for matcher */
- if (!max_stes)
- return 0;
-
- matcher->action_ste[0].max_stes = max_stes;
- matcher->action_ste[1].max_stes = max_stes;
-
- ret = hws_matcher_bind_at_idx(matcher, 0);
- if (ret)
- return ret;
-
- ret = hws_matcher_bind_at_idx(matcher, 1);
- if (ret)
- goto free_at_0;
+ matcher->num_of_action_stes = max_stes;
return 0;
-
-free_at_0:
- hws_matcher_unbind_at_idx(matcher, 0);
- return ret;
}
-static void hws_matcher_unbind_at(struct mlx5hws_matcher *matcher)
+static void hws_matcher_set_ip_version_match(struct mlx5hws_matcher *matcher)
{
- hws_matcher_unbind_at_idx(matcher, 1);
- hws_matcher_unbind_at_idx(matcher, 0);
+ int i;
+
+ for (i = 0; i < matcher->mt->fc_sz; i++) {
+ switch (matcher->mt->fc[i].fname) {
+ case MLX5HWS_DEFINER_FNAME_ETH_TYPE_O:
+ matcher->matches_outer_ethertype = 1;
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O:
+ matcher->matches_outer_ip_version = 1;
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_TYPE_I:
+ matcher->matches_inner_ethertype = 1;
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I:
+ matcher->matches_inner_ip_version = 1;
+ break;
+ default:
+ break;
+ }
+ }
}
static int hws_matcher_bind_mt(struct mlx5hws_matcher *matcher)
{
+ struct mlx5hws_cmd_ste_create_attr ste_attr = {};
struct mlx5hws_context *ctx = matcher->tbl->ctx;
- struct mlx5hws_pool_attr pool_attr = {0};
+ union mlx5hws_matcher_size *size;
int ret;
/* Calculate match, range and hash definers */
@@ -681,23 +698,41 @@ static int hws_matcher_bind_mt(struct mlx5hws_matcher *matcher)
}
}
- /* Create an STE pool per matcher*/
- pool_attr.table_type = matcher->tbl->type;
- pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
- pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_MATCHER_STE_POOL;
- pool_attr.alloc_log_sz = matcher->attr.table.sz_col_log +
- matcher->attr.table.sz_row_log;
- hws_matcher_set_pool_attr(&pool_attr, matcher);
-
- matcher->match_ste.pool = mlx5hws_pool_create(ctx, &pool_attr);
- if (!matcher->match_ste.pool) {
- mlx5hws_err(ctx, "Failed to allocate matcher STE pool\n");
- ret = -EOPNOTSUPP;
+ hws_matcher_set_ip_version_match(matcher);
+
+ /* Create an STE range each for RX and TX. */
+ ste_attr.table_type = FS_FT_FDB_RX;
+ size = &matcher->attr.size[MLX5HWS_MATCHER_SIZE_TYPE_RX];
+ ste_attr.log_obj_range =
+ matcher->attr.optimize_flow_src ==
+ MLX5HWS_MATCHER_FLOW_SRC_VPORT ?
+ 0 : size->table.sz_col_log + size->table.sz_row_log;
+
+ ret = mlx5hws_cmd_ste_create(ctx->mdev, &ste_attr,
+ &matcher->match_ste.ste_0_base);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate RX STE range (%d)\n", ret);
goto uninit_match_definer;
}
+ ste_attr.table_type = FS_FT_FDB_TX;
+ size = &matcher->attr.size[MLX5HWS_MATCHER_SIZE_TYPE_TX];
+ ste_attr.log_obj_range =
+ matcher->attr.optimize_flow_src ==
+ MLX5HWS_MATCHER_FLOW_SRC_WIRE ?
+ 0 : size->table.sz_col_log + size->table.sz_row_log;
+
+ ret = mlx5hws_cmd_ste_create(ctx->mdev, &ste_attr,
+ &matcher->match_ste.ste_1_base);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate TX STE range (%d)\n", ret);
+ goto destroy_rx_ste_range;
+ }
+
return 0;
+destroy_rx_ste_range:
+ mlx5hws_cmd_ste_destroy(ctx->mdev, matcher->match_ste.ste_0_base);
uninit_match_definer:
if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION))
mlx5hws_definer_mt_uninit(ctx, matcher->mt);
@@ -706,9 +741,12 @@ uninit_match_definer:
static void hws_matcher_unbind_mt(struct mlx5hws_matcher *matcher)
{
- mlx5hws_pool_destroy(matcher->match_ste.pool);
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+
+ mlx5hws_cmd_ste_destroy(ctx->mdev, matcher->match_ste.ste_1_base);
+ mlx5hws_cmd_ste_destroy(ctx->mdev, matcher->match_ste.ste_0_base);
if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION))
- mlx5hws_definer_mt_uninit(matcher->tbl->ctx, matcher->mt);
+ mlx5hws_definer_mt_uninit(ctx, matcher->mt);
}
static int
@@ -717,6 +755,10 @@ hws_matcher_validate_insert_mode(struct mlx5hws_cmd_query_caps *caps,
{
struct mlx5hws_matcher_attr *attr = &matcher->attr;
struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ union mlx5hws_matcher_size *size_rx, *size_tx;
+
+ size_rx = &matcher->attr.size[MLX5HWS_MATCHER_SIZE_TYPE_RX];
+ size_tx = &matcher->attr.size[MLX5HWS_MATCHER_SIZE_TYPE_TX];
switch (attr->insert_mode) {
case MLX5HWS_MATCHER_INSERT_BY_HASH:
@@ -727,7 +769,7 @@ hws_matcher_validate_insert_mode(struct mlx5hws_cmd_query_caps *caps,
break;
case MLX5HWS_MATCHER_INSERT_BY_INDEX:
- if (attr->table.sz_col_log) {
+ if (size_rx->table.sz_col_log || size_tx->table.sz_col_log) {
mlx5hws_err(ctx, "Matcher with INSERT_BY_INDEX supports only Nx1 table size\n");
return -EOPNOTSUPP;
}
@@ -747,7 +789,10 @@ hws_matcher_validate_insert_mode(struct mlx5hws_cmd_query_caps *caps,
return -EOPNOTSUPP;
}
- if (attr->table.sz_row_log > MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX) {
+ if (size_rx->table.sz_row_log >
+ MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX ||
+ size_tx->table.sz_row_log >
+ MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX) {
mlx5hws_err(ctx, "Matcher with linear distribute: rows exceed limit %d",
MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX);
return -EOPNOTSUPP;
@@ -771,6 +816,10 @@ hws_matcher_process_attr(struct mlx5hws_cmd_query_caps *caps,
struct mlx5hws_matcher *matcher)
{
struct mlx5hws_matcher_attr *attr = &matcher->attr;
+ union mlx5hws_matcher_size *size_rx, *size_tx;
+
+ size_rx = &attr->size[MLX5HWS_MATCHER_SIZE_TYPE_RX];
+ size_tx = &attr->size[MLX5HWS_MATCHER_SIZE_TYPE_TX];
if (hws_matcher_validate_insert_mode(caps, matcher))
return -EOPNOTSUPP;
@@ -782,10 +831,16 @@ hws_matcher_process_attr(struct mlx5hws_cmd_query_caps *caps,
/* Convert number of rules to the required depth */
if (attr->mode == MLX5HWS_MATCHER_RESOURCE_MODE_RULE &&
- attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH)
- attr->table.sz_col_log = hws_matcher_rules_to_tbl_depth(attr->rule.num_log);
+ attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH) {
+ size_rx->table.sz_col_log =
+ hws_matcher_rules_to_tbl_depth(size_rx->rule.num_log);
+ size_tx->table.sz_col_log =
+ hws_matcher_rules_to_tbl_depth(size_tx->rule.num_log);
+ }
matcher->flags |= attr->resizable ? MLX5HWS_MATCHER_FLAGS_RESIZABLE : 0;
+ matcher->flags |= attr->isolated_matcher_end_ft_id ?
+ MLX5HWS_MATCHER_FLAGS_ISOLATED : 0;
return hws_matcher_check_attr_sz(caps, matcher);
}
@@ -807,10 +862,10 @@ static int hws_matcher_create_and_connect(struct mlx5hws_matcher *matcher)
/* Create matcher end flow table anchor */
ret = hws_matcher_create_end_ft(matcher);
if (ret)
- goto unbind_at;
+ goto unbind_mt;
/* Allocate the RTC for the new matcher */
- ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+ ret = hws_matcher_create_rtc(matcher);
if (ret)
goto destroy_end_ft;
@@ -822,11 +877,9 @@ static int hws_matcher_create_and_connect(struct mlx5hws_matcher *matcher)
return 0;
destroy_rtc:
- hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+ hws_matcher_destroy_rtc(matcher);
destroy_end_ft:
hws_matcher_destroy_end_ft(matcher);
-unbind_at:
- hws_matcher_unbind_at(matcher);
unbind_mt:
hws_matcher_unbind_mt(matcher);
return ret;
@@ -834,11 +887,9 @@ unbind_mt:
static void hws_matcher_destroy_and_disconnect(struct mlx5hws_matcher *matcher)
{
- hws_matcher_resize_uninit(matcher);
hws_matcher_disconnect(matcher);
- hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+ hws_matcher_destroy_rtc(matcher);
hws_matcher_destroy_end_ft(matcher);
- hws_matcher_unbind_at(matcher);
hws_matcher_unbind_mt(matcher);
}
@@ -846,22 +897,25 @@ static int
hws_matcher_create_col_matcher(struct mlx5hws_matcher *matcher)
{
struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ union mlx5hws_matcher_size *size_rx, *size_tx;
struct mlx5hws_matcher *col_matcher;
- int ret;
+ int i, ret;
+
+ size_rx = &matcher->attr.size[MLX5HWS_MATCHER_SIZE_TYPE_RX];
+ size_tx = &matcher->attr.size[MLX5HWS_MATCHER_SIZE_TYPE_TX];
if (matcher->attr.mode != MLX5HWS_MATCHER_RESOURCE_MODE_RULE ||
matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX)
return 0;
- if (!hws_matcher_requires_col_tbl(matcher->attr.rule.num_log))
+ if (!hws_matcher_requires_col_tbl(size_rx->rule.num_log) &&
+ !hws_matcher_requires_col_tbl(size_tx->rule.num_log))
return 0;
col_matcher = kzalloc(sizeof(*matcher), GFP_KERNEL);
if (!col_matcher)
return -ENOMEM;
- INIT_LIST_HEAD(&col_matcher->resize_data);
-
col_matcher->tbl = matcher->tbl;
col_matcher->mt = matcher->mt;
col_matcher->at = matcher->at;
@@ -872,12 +926,20 @@ hws_matcher_create_col_matcher(struct mlx5hws_matcher *matcher)
col_matcher->flags |= MLX5HWS_MATCHER_FLAGS_COLLISION;
col_matcher->attr.mode = MLX5HWS_MATCHER_RESOURCE_MODE_HTABLE;
col_matcher->attr.optimize_flow_src = matcher->attr.optimize_flow_src;
- col_matcher->attr.table.sz_row_log = matcher->attr.rule.num_log;
- col_matcher->attr.table.sz_col_log = MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH;
- if (col_matcher->attr.table.sz_row_log > MLX5HWS_MATCHER_ASSURED_ROW_RATIO)
- col_matcher->attr.table.sz_row_log -= MLX5HWS_MATCHER_ASSURED_ROW_RATIO;
+ for (i = 0; i < 2; i++) {
+ union mlx5hws_matcher_size *dst = &col_matcher->attr.size[i];
+ union mlx5hws_matcher_size *src = &matcher->attr.size[i];
+
+ dst->table.sz_row_log = src->rule.num_log;
+ dst->table.sz_col_log = MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH;
+ if (dst->table.sz_row_log > MLX5HWS_MATCHER_ASSURED_ROW_RATIO)
+ dst->table.sz_row_log -=
+ MLX5HWS_MATCHER_ASSURED_ROW_RATIO;
+ }
col_matcher->attr.max_num_of_at_attach = matcher->attr.max_num_of_at_attach;
+ col_matcher->attr.isolated_matcher_end_ft_id =
+ matcher->attr.isolated_matcher_end_ft_id;
ret = hws_matcher_process_attr(ctx->caps, col_matcher);
if (ret)
@@ -915,8 +977,6 @@ static int hws_matcher_init(struct mlx5hws_matcher *matcher)
struct mlx5hws_context *ctx = matcher->tbl->ctx;
int ret;
- INIT_LIST_HEAD(&matcher->resize_data);
-
mutex_lock(&ctx->ctrl_lock);
/* Allocate matcher resource and connect to the packet pipe */
@@ -951,18 +1011,44 @@ static int hws_matcher_uninit(struct mlx5hws_matcher *matcher)
return 0;
}
+static int hws_matcher_grow_at_array(struct mlx5hws_matcher *matcher)
+{
+ void *p;
+
+ if (matcher->size_of_at_array >= MLX5HWS_MATCHER_MAX_AT)
+ return -ENOMEM;
+
+ matcher->size_of_at_array *= 2;
+ p = krealloc(matcher->at,
+ matcher->size_of_at_array * sizeof(*matcher->at),
+ __GFP_ZERO | GFP_KERNEL);
+ if (!p) {
+ matcher->size_of_at_array /= 2;
+ return -ENOMEM;
+ }
+
+ matcher->at = p;
+
+ return 0;
+}
+
int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher,
struct mlx5hws_action_template *at)
{
bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt);
- struct mlx5hws_context *ctx = matcher->tbl->ctx;
u32 required_stes;
int ret;
- if (!matcher->attr.max_num_of_at_attach) {
- mlx5hws_dbg(ctx, "Num of current at (%d) exceed allowed value\n",
- matcher->num_of_at);
- return -EOPNOTSUPP;
+ if (unlikely(matcher->num_of_at >= matcher->size_of_at_array)) {
+ ret = hws_matcher_grow_at_array(matcher);
+ if (ret)
+ return ret;
+
+ if (matcher->col_matcher) {
+ ret = hws_matcher_grow_at_array(matcher->col_matcher);
+ if (ret)
+ return ret;
+ }
}
ret = hws_matcher_check_and_process_at(matcher, at);
@@ -970,16 +1056,11 @@ int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher,
return ret;
required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
- if (matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes < required_stes) {
- mlx5hws_dbg(ctx, "Required STEs [%d] exceeds initial action template STE [%d]\n",
- required_stes,
- matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes);
- return -ENOMEM;
- }
+ if (matcher->num_of_action_stes < required_stes)
+ matcher->num_of_action_stes = required_stes;
matcher->at[matcher->num_of_at] = *at;
matcher->num_of_at += 1;
- matcher->attr.max_num_of_at_attach -= 1;
if (matcher->col_matcher)
matcher->col_matcher->num_of_at = matcher->num_of_at;
@@ -1007,9 +1088,10 @@ hws_matcher_set_templates(struct mlx5hws_matcher *matcher,
if (!matcher->mt)
return -ENOMEM;
- matcher->at = kcalloc(num_of_at + matcher->attr.max_num_of_at_attach,
- sizeof(*matcher->at),
- GFP_KERNEL);
+ matcher->size_of_at_array =
+ num_of_at + matcher->attr.max_num_of_at_attach;
+ matcher->at = kvcalloc(matcher->size_of_at_array, sizeof(*matcher->at),
+ GFP_KERNEL);
if (!matcher->at) {
mlx5hws_err(ctx, "Failed to allocate action template array\n");
ret = -ENOMEM;
@@ -1035,7 +1117,7 @@ free_mt:
static void
hws_matcher_unset_templates(struct mlx5hws_matcher *matcher)
{
- kfree(matcher->at);
+ kvfree(matcher->at);
kfree(matcher->mt);
}
@@ -1157,8 +1239,7 @@ static int hws_matcher_resize_precheck(struct mlx5hws_matcher *src_matcher,
return -EINVAL;
}
- if (src_matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes >
- dst_matcher->action_ste[0].max_stes) {
+ if (src_matcher->num_of_action_stes > dst_matcher->num_of_action_stes) {
mlx5hws_err(ctx, "Src/dst matcher max STEs mismatch\n");
return -EINVAL;
}
@@ -1187,10 +1268,6 @@ int mlx5hws_matcher_resize_set_target(struct mlx5hws_matcher *src_matcher,
src_matcher->resize_dst = dst_matcher;
- ret = hws_matcher_resize_init(src_matcher);
- if (ret)
- src_matcher->resize_dst = NULL;
-
out:
mutex_unlock(&src_matcher->tbl->ctx->ctrl_lock);
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h
index 125391d1a114..ae20bcebfdde 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#ifndef MLX5HWS_MATCHER_H_
-#define MLX5HWS_MATCHER_H_
+#ifndef HWS_MATCHER_H_
+#define HWS_MATCHER_H_
/* We calculated that concatenating a collision table to the main table with
* 3% of the main table rows will be enough resources for high insertion
@@ -18,6 +18,14 @@
/* Required depth of the main large table */
#define MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH 2
+/* Action RTC size multiplier that is required in order
+ * to support rule update for rules with action STEs.
+ */
+#define MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT 1
+
+/* Maximum number of action templates that can be attached to a matcher. */
+#define MLX5HWS_MATCHER_MAX_AT 128
+
enum mlx5hws_matcher_offset {
MLX5HWS_MATCHER_OFFSET_TAG_DW1 = 12,
MLX5HWS_MATCHER_OFFSET_TAG_DW0 = 13,
@@ -26,6 +34,7 @@ enum mlx5hws_matcher_offset {
enum mlx5hws_matcher_flags {
MLX5HWS_MATCHER_FLAGS_COLLISION = 1 << 2,
MLX5HWS_MATCHER_FLAGS_RESIZABLE = 1 << 3,
+ MLX5HWS_MATCHER_FLAGS_ISOLATED = 1 << 4,
};
struct mlx5hws_match_template {
@@ -37,32 +46,16 @@ struct mlx5hws_match_template {
};
struct mlx5hws_matcher_match_ste {
- struct mlx5hws_pool_chunk ste;
- u32 rtc_0_id;
- u32 rtc_1_id;
- struct mlx5hws_pool *pool;
-};
-
-struct mlx5hws_matcher_action_ste {
- struct mlx5hws_pool_chunk ste;
- struct mlx5hws_pool_chunk stc;
- u32 rtc_0_id;
- u32 rtc_1_id;
- struct mlx5hws_pool *pool;
- u8 max_stes;
-};
-
-struct mlx5hws_matcher_resize_data_node {
- struct mlx5hws_pool_chunk stc;
u32 rtc_0_id;
u32 rtc_1_id;
- struct mlx5hws_pool *pool;
+ u32 ste_0_base;
+ u32 ste_1_base;
};
-struct mlx5hws_matcher_resize_data {
- struct mlx5hws_matcher_resize_data_node action_ste[2];
- u8 max_stes;
- struct list_head list_node;
+enum {
+ MLX5HWS_MATCHER_IPV_UNSET = 0,
+ MLX5HWS_MATCHER_IPV_4 = 1,
+ MLX5HWS_MATCHER_IPV_6 = 2,
};
struct mlx5hws_matcher {
@@ -71,16 +64,22 @@ struct mlx5hws_matcher {
struct mlx5hws_match_template *mt;
struct mlx5hws_action_template *at;
u8 num_of_at;
+ u8 size_of_at_array;
u8 num_of_mt;
+ u8 num_of_action_stes;
/* enum mlx5hws_matcher_flags */
u8 flags;
+ u8 matches_outer_ethertype:1;
+ u8 matches_outer_ip_version:1;
+ u8 matches_inner_ethertype:1;
+ u8 matches_inner_ip_version:1;
+ u8 outer_ip_version:2;
+ u8 inner_ip_version:2;
u32 end_ft_id;
struct mlx5hws_matcher *col_matcher;
struct mlx5hws_matcher *resize_dst;
struct mlx5hws_matcher_match_ste match_ste;
- struct mlx5hws_matcher_action_ste action_ste[2];
struct list_head list_node;
- struct list_head resize_data;
};
static inline bool
@@ -99,9 +98,17 @@ static inline bool mlx5hws_matcher_is_in_resize(struct mlx5hws_matcher *matcher)
return !!matcher->resize_dst;
}
+static inline bool mlx5hws_matcher_is_isolated(struct mlx5hws_matcher *matcher)
+{
+ return !!(matcher->flags & MLX5HWS_MATCHER_FLAGS_ISOLATED);
+}
+
static inline bool mlx5hws_matcher_is_insert_by_idx(struct mlx5hws_matcher *matcher)
{
return matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX;
}
-#endif /* MLX5HWS_MATCHER_H_ */
+int mlx5hws_matcher_update_end_ft_isolated(struct mlx5hws_table *tbl,
+ u32 miss_ft_id);
+
+#endif /* HWS_MATCHER_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
index f39d636ff39a..1ad7a50d938b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
@@ -70,12 +70,12 @@ enum mlx5hws_send_queue_actions {
struct mlx5hws_context_attr {
u16 queues;
u16 queue_size;
- bool bwc; /* add support for backward compatible API*/
};
struct mlx5hws_table_attr {
enum mlx5hws_table_type type;
u32 level;
+ u16 uid;
};
enum mlx5hws_matcher_flow_src {
@@ -94,6 +94,23 @@ enum mlx5hws_matcher_distribute_mode {
MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR = 0x1,
};
+enum mlx5hws_matcher_size_type {
+ MLX5HWS_MATCHER_SIZE_TYPE_RX,
+ MLX5HWS_MATCHER_SIZE_TYPE_TX,
+ MLX5HWS_MATCHER_SIZE_TYPE_MAX,
+};
+
+union mlx5hws_matcher_size {
+ struct {
+ u8 sz_row_log;
+ u8 sz_col_log;
+ } table;
+
+ struct {
+ u8 num_log;
+ } rule;
+};
+
struct mlx5hws_matcher_attr {
/* Processing priority inside table */
u32 priority;
@@ -108,18 +125,11 @@ struct mlx5hws_matcher_attr {
enum mlx5hws_matcher_distribute_mode distribute_mode;
/* Define whether the created matcher supports resizing into a bigger matcher */
bool resizable;
- union {
- struct {
- u8 sz_row_log;
- u8 sz_col_log;
- } table;
-
- struct {
- u8 num_log;
- } rule;
- };
+ union mlx5hws_matcher_size size[MLX5HWS_MATCHER_SIZE_TYPE_MAX];
/* Optional AT attach configuration - Max number of additional AT */
u8 max_num_of_at_attach;
+ /* Optional end FT (miss FT ID) for match RTC (for isolated matcher) */
+ u32 isolated_matcher_end_ft_id;
};
struct mlx5hws_rule_attr {
@@ -212,6 +222,7 @@ struct mlx5hws_action_dest_attr {
struct mlx5hws_action *dest;
/* Optional reformat action */
struct mlx5hws_action *reformat;
+ bool is_wire_ft;
};
/**
@@ -503,6 +514,15 @@ enum mlx5hws_action_type
mlx5hws_action_get_type(struct mlx5hws_action *action);
/**
+ * mlx5hws_action_get_dev - Get mlx5 core device.
+ *
+ * @action: The action to get the device from.
+ *
+ * Return: mlx5 core device.
+ */
+struct mlx5_core_dev *mlx5hws_action_get_dev(struct mlx5hws_action *action);
+
+/**
* mlx5hws_action_create_dest_drop - Create a direct rule drop action.
*
* @ctx: The context in which the new action will be created.
@@ -715,18 +735,13 @@ mlx5hws_action_create_push_vlan(struct mlx5hws_context *ctx, u32 flags);
* @num_dest: The number of dests attributes.
* @dests: The destination array. Each contains a destination action and can
* have additional actions.
- * @ignore_flow_level: Whether to turn on 'ignore_flow_level' for this dest.
- * @flow_source: Source port of the traffic for this actions.
* @flags: Action creation flags (enum mlx5hws_action_flags).
*
* Return: pointer to mlx5hws_action on success NULL otherwise.
*/
struct mlx5hws_action *
-mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
- size_t num_dest,
+mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx, size_t num_dest,
struct mlx5hws_action_dest_attr *dests,
- bool ignore_flow_level,
- u32 flow_source,
u32 flags);
/**
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c
deleted file mode 100644
index 601fad5fc54a..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c
+++ /dev/null
@@ -1,86 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#include "mlx5hws_internal.h"
-
-bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
- u8 match_criteria_enable,
- struct mlx5hws_match_parameters *mask)
-{
- struct mlx5hws_definer match_layout = {0};
- struct mlx5hws_match_template *mt;
- bool is_complex = false;
- int ret;
-
- if (!match_criteria_enable)
- return false; /* empty matcher */
-
- mt = mlx5hws_match_template_create(ctx,
- mask->match_buf,
- mask->match_sz,
- match_criteria_enable);
- if (!mt) {
- mlx5hws_err(ctx, "BWC: failed creating match template\n");
- return false;
- }
-
- ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout);
- if (ret) {
- /* The only case that we're interested in is E2BIG,
- * which means that the match parameters need to be
- * split into complex martcher.
- * For all other cases (good or bad) - just return true
- * and let the usual match creation path handle it,
- * both for good and bad flows.
- */
- if (ret == -E2BIG) {
- is_complex = true;
- mlx5hws_dbg(ctx, "Matcher definer layout: need complex matcher\n");
- } else {
- mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
- }
- }
-
- mlx5hws_match_template_destroy(mt);
-
- return is_complex;
-}
-
-int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
- struct mlx5hws_table *table,
- u32 priority,
- u8 match_criteria_enable,
- struct mlx5hws_match_parameters *mask)
-{
- mlx5hws_err(table->ctx, "Complex matcher is not supported yet\n");
- return -EOPNOTSUPP;
-}
-
-void
-mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
-{
- /* nothing to do here */
-}
-
-int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
- struct mlx5hws_match_parameters *params,
- u32 flow_source,
- struct mlx5hws_rule_action rule_actions[],
- u16 bwc_queue_idx)
-{
- mlx5hws_err(bwc_rule->bwc_matcher->matcher->tbl->ctx,
- "Complex rule is not supported yet\n");
- return -EOPNOTSUPP;
-}
-
-int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule)
-{
- return 0;
-}
-
-int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
-{
- mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
- "Moving complex rule is not supported yet\n");
- return -EOPNOTSUPP;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h
deleted file mode 100644
index 068ee8118609..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#ifndef MLX5HWS_BWC_COMPLEX_H_
-#define MLX5HWS_BWC_COMPLEX_H_
-
-bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
- u8 match_criteria_enable,
- struct mlx5hws_match_parameters *mask);
-
-int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
- struct mlx5hws_table *table,
- u32 priority,
- u8 match_criteria_enable,
- struct mlx5hws_match_parameters *mask);
-
-void mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher);
-
-int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher);
-
-int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
- struct mlx5hws_match_parameters *params,
- u32 flow_source,
- struct mlx5hws_rule_action rule_actions[],
- u16 bwc_queue_idx);
-
-int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule);
-
-#endif /* MLX5HWS_BWC_COMPLEX_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c
deleted file mode 100644
index a8a63e3278be..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c
+++ /dev/null
@@ -1,640 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#include "mlx5hws_internal.h"
-#include "mlx5hws_buddy.h"
-
-static void hws_pool_free_one_resource(struct mlx5hws_pool_resource *resource)
-{
- switch (resource->pool->type) {
- case MLX5HWS_POOL_TYPE_STE:
- mlx5hws_cmd_ste_destroy(resource->pool->ctx->mdev, resource->base_id);
- break;
- case MLX5HWS_POOL_TYPE_STC:
- mlx5hws_cmd_stc_destroy(resource->pool->ctx->mdev, resource->base_id);
- break;
- default:
- break;
- }
-
- kfree(resource);
-}
-
-static void hws_pool_resource_free(struct mlx5hws_pool *pool,
- int resource_idx)
-{
- hws_pool_free_one_resource(pool->resource[resource_idx]);
- pool->resource[resource_idx] = NULL;
-
- if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) {
- hws_pool_free_one_resource(pool->mirror_resource[resource_idx]);
- pool->mirror_resource[resource_idx] = NULL;
- }
-}
-
-static struct mlx5hws_pool_resource *
-hws_pool_create_one_resource(struct mlx5hws_pool *pool, u32 log_range,
- u32 fw_ft_type)
-{
- struct mlx5hws_cmd_ste_create_attr ste_attr;
- struct mlx5hws_cmd_stc_create_attr stc_attr;
- struct mlx5hws_pool_resource *resource;
- u32 obj_id = 0;
- int ret;
-
- resource = kzalloc(sizeof(*resource), GFP_KERNEL);
- if (!resource)
- return NULL;
-
- switch (pool->type) {
- case MLX5HWS_POOL_TYPE_STE:
- ste_attr.log_obj_range = log_range;
- ste_attr.table_type = fw_ft_type;
- ret = mlx5hws_cmd_ste_create(pool->ctx->mdev, &ste_attr, &obj_id);
- break;
- case MLX5HWS_POOL_TYPE_STC:
- stc_attr.log_obj_range = log_range;
- stc_attr.table_type = fw_ft_type;
- ret = mlx5hws_cmd_stc_create(pool->ctx->mdev, &stc_attr, &obj_id);
- break;
- default:
- ret = -EINVAL;
- }
-
- if (ret) {
- mlx5hws_err(pool->ctx, "Failed to allocate resource objects\n");
- goto free_resource;
- }
-
- resource->pool = pool;
- resource->range = 1 << log_range;
- resource->base_id = obj_id;
-
- return resource;
-
-free_resource:
- kfree(resource);
- return NULL;
-}
-
-static int
-hws_pool_resource_alloc(struct mlx5hws_pool *pool, u32 log_range, int idx)
-{
- struct mlx5hws_pool_resource *resource;
- u32 fw_ft_type, opt_log_range;
-
- fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, false);
- opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_ORIG ? 0 : log_range;
- resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type);
- if (!resource) {
- mlx5hws_err(pool->ctx, "Failed allocating resource\n");
- return -EINVAL;
- }
-
- pool->resource[idx] = resource;
-
- if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) {
- struct mlx5hws_pool_resource *mirror_resource;
-
- fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, true);
- opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_MIRROR ? 0 : log_range;
- mirror_resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type);
- if (!mirror_resource) {
- mlx5hws_err(pool->ctx, "Failed allocating mirrored resource\n");
- hws_pool_free_one_resource(resource);
- pool->resource[idx] = NULL;
- return -EINVAL;
- }
- pool->mirror_resource[idx] = mirror_resource;
- }
-
- return 0;
-}
-
-static unsigned long *hws_pool_create_and_init_bitmap(u32 log_range)
-{
- unsigned long *cur_bmp;
-
- cur_bmp = bitmap_zalloc(1 << log_range, GFP_KERNEL);
- if (!cur_bmp)
- return NULL;
-
- bitmap_fill(cur_bmp, 1 << log_range);
-
- return cur_bmp;
-}
-
-static void hws_pool_buddy_db_put_chunk(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
-{
- struct mlx5hws_buddy_mem *buddy;
-
- buddy = pool->db.buddy_manager->buddies[chunk->resource_idx];
- if (!buddy) {
- mlx5hws_err(pool->ctx, "No such buddy (%d)\n", chunk->resource_idx);
- return;
- }
-
- mlx5hws_buddy_free_mem(buddy, chunk->offset, chunk->order);
-}
-
-static struct mlx5hws_buddy_mem *
-hws_pool_buddy_get_next_buddy(struct mlx5hws_pool *pool, int idx,
- u32 order, bool *is_new_buddy)
-{
- static struct mlx5hws_buddy_mem *buddy;
- u32 new_buddy_size;
-
- buddy = pool->db.buddy_manager->buddies[idx];
- if (buddy)
- return buddy;
-
- new_buddy_size = max(pool->alloc_log_sz, order);
- *is_new_buddy = true;
- buddy = mlx5hws_buddy_create(new_buddy_size);
- if (!buddy) {
- mlx5hws_err(pool->ctx, "Failed to create buddy order: %d index: %d\n",
- new_buddy_size, idx);
- return NULL;
- }
-
- if (hws_pool_resource_alloc(pool, new_buddy_size, idx) != 0) {
- mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
- pool->type, new_buddy_size, idx);
- mlx5hws_buddy_cleanup(buddy);
- return NULL;
- }
-
- pool->db.buddy_manager->buddies[idx] = buddy;
-
- return buddy;
-}
-
-static int hws_pool_buddy_get_mem_chunk(struct mlx5hws_pool *pool,
- int order,
- u32 *buddy_idx,
- int *seg)
-{
- struct mlx5hws_buddy_mem *buddy;
- bool new_mem = false;
- int ret = 0;
- int i;
-
- *seg = -1;
-
- /* Find the next free place from the buddy array */
- while (*seg == -1) {
- for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
- buddy = hws_pool_buddy_get_next_buddy(pool, i,
- order,
- &new_mem);
- if (!buddy) {
- ret = -ENOMEM;
- goto out;
- }
-
- *seg = mlx5hws_buddy_alloc_mem(buddy, order);
- if (*seg != -1)
- goto found;
-
- if (pool->flags & MLX5HWS_POOL_FLAGS_ONE_RESOURCE) {
- mlx5hws_err(pool->ctx,
- "Fail to allocate seg for one resource pool\n");
- ret = -ENOMEM;
- goto out;
- }
-
- if (new_mem) {
- /* We have new memory pool, should be place for us */
- mlx5hws_err(pool->ctx,
- "No memory for order: %d with buddy no: %d\n",
- order, i);
- ret = -ENOMEM;
- goto out;
- }
- }
- }
-
-found:
- *buddy_idx = i;
-out:
- return ret;
-}
-
-static int hws_pool_buddy_db_get_chunk(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
-{
- int ret = 0;
-
- /* Go over the buddies and find next free slot */
- ret = hws_pool_buddy_get_mem_chunk(pool, chunk->order,
- &chunk->resource_idx,
- &chunk->offset);
- if (ret)
- mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
- chunk->order);
-
- return ret;
-}
-
-static void hws_pool_buddy_db_uninit(struct mlx5hws_pool *pool)
-{
- struct mlx5hws_buddy_mem *buddy;
- int i;
-
- for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
- buddy = pool->db.buddy_manager->buddies[i];
- if (buddy) {
- mlx5hws_buddy_cleanup(buddy);
- kfree(buddy);
- pool->db.buddy_manager->buddies[i] = NULL;
- }
- }
-
- kfree(pool->db.buddy_manager);
-}
-
-static int hws_pool_buddy_db_init(struct mlx5hws_pool *pool, u32 log_range)
-{
- pool->db.buddy_manager = kzalloc(sizeof(*pool->db.buddy_manager), GFP_KERNEL);
- if (!pool->db.buddy_manager)
- return -ENOMEM;
-
- if (pool->flags & MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE) {
- bool new_buddy;
-
- if (!hws_pool_buddy_get_next_buddy(pool, 0, log_range, &new_buddy)) {
- mlx5hws_err(pool->ctx,
- "Failed allocating memory on create log_sz: %d\n", log_range);
- kfree(pool->db.buddy_manager);
- return -ENOMEM;
- }
- }
-
- pool->p_db_uninit = &hws_pool_buddy_db_uninit;
- pool->p_get_chunk = &hws_pool_buddy_db_get_chunk;
- pool->p_put_chunk = &hws_pool_buddy_db_put_chunk;
-
- return 0;
-}
-
-static int hws_pool_create_resource_on_index(struct mlx5hws_pool *pool,
- u32 alloc_size, int idx)
-{
- int ret = hws_pool_resource_alloc(pool, alloc_size, idx);
-
- if (ret) {
- mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
- pool->type, alloc_size, idx);
- return ret;
- }
-
- return 0;
-}
-
-static struct mlx5hws_pool_elements *
-hws_pool_element_create_new_elem(struct mlx5hws_pool *pool, u32 order, int idx)
-{
- struct mlx5hws_pool_elements *elem;
- u32 alloc_size;
-
- alloc_size = pool->alloc_log_sz;
-
- elem = kzalloc(sizeof(*elem), GFP_KERNEL);
- if (!elem)
- return NULL;
-
- /* Sharing the same resource, also means that all the elements are with size 1 */
- if ((pool->flags & MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS) &&
- !(pool->flags & MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK)) {
- /* Currently all chunks in size 1 */
- elem->bitmap = hws_pool_create_and_init_bitmap(alloc_size - order);
- if (!elem->bitmap) {
- mlx5hws_err(pool->ctx,
- "Failed to create bitmap type: %d: size %d index: %d\n",
- pool->type, alloc_size, idx);
- goto free_elem;
- }
-
- elem->log_size = alloc_size - order;
- }
-
- if (hws_pool_create_resource_on_index(pool, alloc_size, idx)) {
- mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
- pool->type, alloc_size, idx);
- goto free_db;
- }
-
- pool->db.element_manager->elements[idx] = elem;
-
- return elem;
-
-free_db:
- bitmap_free(elem->bitmap);
-free_elem:
- kfree(elem);
- return NULL;
-}
-
-static int hws_pool_element_find_seg(struct mlx5hws_pool_elements *elem, int *seg)
-{
- unsigned int segment, size;
-
- size = 1 << elem->log_size;
-
- segment = find_first_bit(elem->bitmap, size);
- if (segment >= size) {
- elem->is_full = true;
- return -ENOMEM;
- }
-
- bitmap_clear(elem->bitmap, segment, 1);
- *seg = segment;
- return 0;
-}
-
-static int
-hws_pool_onesize_element_get_mem_chunk(struct mlx5hws_pool *pool, u32 order,
- u32 *idx, int *seg)
-{
- struct mlx5hws_pool_elements *elem;
-
- elem = pool->db.element_manager->elements[0];
- if (!elem)
- elem = hws_pool_element_create_new_elem(pool, order, 0);
- if (!elem)
- goto err_no_elem;
-
- if (hws_pool_element_find_seg(elem, seg) != 0) {
- mlx5hws_err(pool->ctx, "No more resources (last request order: %d)\n", order);
- return -ENOMEM;
- }
-
- *idx = 0;
- elem->num_of_elements++;
- return 0;
-
-err_no_elem:
- mlx5hws_err(pool->ctx, "Failed to allocate element for order: %d\n", order);
- return -ENOMEM;
-}
-
-static int
-hws_pool_general_element_get_mem_chunk(struct mlx5hws_pool *pool, u32 order,
- u32 *idx, int *seg)
-{
- int ret, i;
-
- for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
- if (!pool->resource[i]) {
- ret = hws_pool_create_resource_on_index(pool, order, i);
- if (ret)
- goto err_no_res;
- *idx = i;
- *seg = 0; /* One memory slot in that element */
- return 0;
- }
- }
-
- mlx5hws_err(pool->ctx, "No more resources (last request order: %d)\n", order);
- return -ENOMEM;
-
-err_no_res:
- mlx5hws_err(pool->ctx, "Failed to allocate element for order: %d\n", order);
- return -ENOMEM;
-}
-
-static int hws_pool_general_element_db_get_chunk(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
-{
- int ret;
-
- /* Go over all memory elements and find/allocate free slot */
- ret = hws_pool_general_element_get_mem_chunk(pool, chunk->order,
- &chunk->resource_idx,
- &chunk->offset);
- if (ret)
- mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
- chunk->order);
-
- return ret;
-}
-
-static void hws_pool_general_element_db_put_chunk(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
-{
- if (unlikely(!pool->resource[chunk->resource_idx]))
- pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
-
- if (pool->flags & MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE)
- hws_pool_resource_free(pool, chunk->resource_idx);
-}
-
-static void hws_pool_general_element_db_uninit(struct mlx5hws_pool *pool)
-{
- (void)pool;
-}
-
-/* This memory management works as the following:
- * - At start doesn't allocate no mem at all.
- * - When new request for chunk arrived:
- * allocate resource and give it.
- * - When free that chunk:
- * the resource is freed.
- */
-static int hws_pool_general_element_db_init(struct mlx5hws_pool *pool)
-{
- pool->p_db_uninit = &hws_pool_general_element_db_uninit;
- pool->p_get_chunk = &hws_pool_general_element_db_get_chunk;
- pool->p_put_chunk = &hws_pool_general_element_db_put_chunk;
-
- return 0;
-}
-
-static void hws_onesize_element_db_destroy_element(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_elements *elem,
- struct mlx5hws_pool_chunk *chunk)
-{
- if (unlikely(!pool->resource[chunk->resource_idx]))
- pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
-
- hws_pool_resource_free(pool, chunk->resource_idx);
- kfree(elem);
- pool->db.element_manager->elements[chunk->resource_idx] = NULL;
-}
-
-static void hws_onesize_element_db_put_chunk(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
-{
- struct mlx5hws_pool_elements *elem;
-
- if (unlikely(chunk->resource_idx))
- pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
-
- elem = pool->db.element_manager->elements[chunk->resource_idx];
- if (!elem) {
- mlx5hws_err(pool->ctx, "No such element (%d)\n", chunk->resource_idx);
- return;
- }
-
- bitmap_set(elem->bitmap, chunk->offset, 1);
- elem->is_full = false;
- elem->num_of_elements--;
-
- if (pool->flags & MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE &&
- !elem->num_of_elements)
- hws_onesize_element_db_destroy_element(pool, elem, chunk);
-}
-
-static int hws_onesize_element_db_get_chunk(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
-{
- int ret = 0;
-
- /* Go over all memory elements and find/allocate free slot */
- ret = hws_pool_onesize_element_get_mem_chunk(pool, chunk->order,
- &chunk->resource_idx,
- &chunk->offset);
- if (ret)
- mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
- chunk->order);
-
- return ret;
-}
-
-static void hws_onesize_element_db_uninit(struct mlx5hws_pool *pool)
-{
- struct mlx5hws_pool_elements *elem;
- int i;
-
- for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
- elem = pool->db.element_manager->elements[i];
- if (elem) {
- bitmap_free(elem->bitmap);
- kfree(elem);
- pool->db.element_manager->elements[i] = NULL;
- }
- }
- kfree(pool->db.element_manager);
-}
-
-/* This memory management works as the following:
- * - At start doesn't allocate no mem at all.
- * - When new request for chunk arrived:
- * aloocate the first and only slot of memory/resource
- * when it ended return error.
- */
-static int hws_pool_onesize_element_db_init(struct mlx5hws_pool *pool)
-{
- pool->db.element_manager = kzalloc(sizeof(*pool->db.element_manager), GFP_KERNEL);
- if (!pool->db.element_manager)
- return -ENOMEM;
-
- pool->p_db_uninit = &hws_onesize_element_db_uninit;
- pool->p_get_chunk = &hws_onesize_element_db_get_chunk;
- pool->p_put_chunk = &hws_onesize_element_db_put_chunk;
-
- return 0;
-}
-
-static int hws_pool_db_init(struct mlx5hws_pool *pool,
- enum mlx5hws_db_type db_type)
-{
- int ret;
-
- if (db_type == MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE)
- ret = hws_pool_general_element_db_init(pool);
- else if (db_type == MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE)
- ret = hws_pool_onesize_element_db_init(pool);
- else
- ret = hws_pool_buddy_db_init(pool, pool->alloc_log_sz);
-
- if (ret) {
- mlx5hws_err(pool->ctx, "Failed to init general db : %d (ret: %d)\n", db_type, ret);
- return ret;
- }
-
- return 0;
-}
-
-static void hws_pool_db_unint(struct mlx5hws_pool *pool)
-{
- pool->p_db_uninit(pool);
-}
-
-int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
-{
- int ret;
-
- mutex_lock(&pool->lock);
- ret = pool->p_get_chunk(pool, chunk);
- mutex_unlock(&pool->lock);
-
- return ret;
-}
-
-void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
-{
- mutex_lock(&pool->lock);
- pool->p_put_chunk(pool, chunk);
- mutex_unlock(&pool->lock);
-}
-
-struct mlx5hws_pool *
-mlx5hws_pool_create(struct mlx5hws_context *ctx, struct mlx5hws_pool_attr *pool_attr)
-{
- enum mlx5hws_db_type res_db_type;
- struct mlx5hws_pool *pool;
-
- pool = kzalloc(sizeof(*pool), GFP_KERNEL);
- if (!pool)
- return NULL;
-
- pool->ctx = ctx;
- pool->type = pool_attr->pool_type;
- pool->alloc_log_sz = pool_attr->alloc_log_sz;
- pool->flags = pool_attr->flags;
- pool->tbl_type = pool_attr->table_type;
- pool->opt_type = pool_attr->opt_type;
-
- /* Support general db */
- if (pool->flags == (MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE |
- MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK))
- res_db_type = MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE;
- else if (pool->flags == (MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
- MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS))
- res_db_type = MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE;
- else
- res_db_type = MLX5HWS_POOL_DB_TYPE_BUDDY;
-
- pool->alloc_log_sz = pool_attr->alloc_log_sz;
-
- if (hws_pool_db_init(pool, res_db_type))
- goto free_pool;
-
- mutex_init(&pool->lock);
-
- return pool;
-
-free_pool:
- kfree(pool);
- return NULL;
-}
-
-int mlx5hws_pool_destroy(struct mlx5hws_pool *pool)
-{
- int i;
-
- mutex_destroy(&pool->lock);
-
- for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++)
- if (pool->resource[i])
- hws_pool_resource_free(pool, i);
-
- hws_pool_db_unint(pool);
-
- kfree(pool);
- return 0;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h
deleted file mode 100644
index 621298b352b2..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h
+++ /dev/null
@@ -1,151 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-
-#ifndef MLX5HWS_POOL_H_
-#define MLX5HWS_POOL_H_
-
-#define MLX5HWS_POOL_STC_LOG_SZ 15
-
-#define MLX5HWS_POOL_RESOURCE_ARR_SZ 100
-
-enum mlx5hws_pool_type {
- MLX5HWS_POOL_TYPE_STE,
- MLX5HWS_POOL_TYPE_STC,
-};
-
-struct mlx5hws_pool_chunk {
- u32 resource_idx;
- /* Internal offset, relative to base index */
- int offset;
- int order;
-};
-
-struct mlx5hws_pool_resource {
- struct mlx5hws_pool *pool;
- u32 base_id;
- u32 range;
-};
-
-enum mlx5hws_pool_flags {
- /* Only a one resource in that pool */
- MLX5HWS_POOL_FLAGS_ONE_RESOURCE = 1 << 0,
- MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE = 1 << 1,
- /* No sharing resources between chunks */
- MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK = 1 << 2,
- /* All objects are in the same size */
- MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS = 1 << 3,
- /* Managed by buddy allocator */
- MLX5HWS_POOL_FLAGS_BUDDY_MANAGED = 1 << 4,
- /* Allocate pool_type memory on pool creation */
- MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE = 1 << 5,
-
- /* These values should be used by the caller */
- MLX5HWS_POOL_FLAGS_FOR_STC_POOL =
- MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
- MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS,
- MLX5HWS_POOL_FLAGS_FOR_MATCHER_STE_POOL =
- MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE |
- MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK,
- MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL =
- MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
- MLX5HWS_POOL_FLAGS_BUDDY_MANAGED |
- MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE,
-};
-
-enum mlx5hws_pool_optimize {
- MLX5HWS_POOL_OPTIMIZE_NONE = 0x0,
- MLX5HWS_POOL_OPTIMIZE_ORIG = 0x1,
- MLX5HWS_POOL_OPTIMIZE_MIRROR = 0x2,
-};
-
-struct mlx5hws_pool_attr {
- enum mlx5hws_pool_type pool_type;
- enum mlx5hws_table_type table_type;
- enum mlx5hws_pool_flags flags;
- enum mlx5hws_pool_optimize opt_type;
- /* Allocation size once memory is depleted */
- size_t alloc_log_sz;
-};
-
-enum mlx5hws_db_type {
- /* Uses for allocating chunk of big memory, each element has its own resource in the FW*/
- MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE,
- /* One resource only, all the elements are with same one size */
- MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE,
- /* Many resources, the memory allocated with buddy mechanism */
- MLX5HWS_POOL_DB_TYPE_BUDDY,
-};
-
-struct mlx5hws_buddy_manager {
- struct mlx5hws_buddy_mem *buddies[MLX5HWS_POOL_RESOURCE_ARR_SZ];
-};
-
-struct mlx5hws_pool_elements {
- u32 num_of_elements;
- unsigned long *bitmap;
- u32 log_size;
- bool is_full;
-};
-
-struct mlx5hws_element_manager {
- struct mlx5hws_pool_elements *elements[MLX5HWS_POOL_RESOURCE_ARR_SZ];
-};
-
-struct mlx5hws_pool_db {
- enum mlx5hws_db_type type;
- union {
- struct mlx5hws_element_manager *element_manager;
- struct mlx5hws_buddy_manager *buddy_manager;
- };
-};
-
-typedef int (*mlx5hws_pool_db_get_chunk)(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk);
-typedef void (*mlx5hws_pool_db_put_chunk)(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk);
-typedef void (*mlx5hws_pool_unint_db)(struct mlx5hws_pool *pool);
-
-struct mlx5hws_pool {
- struct mlx5hws_context *ctx;
- enum mlx5hws_pool_type type;
- enum mlx5hws_pool_flags flags;
- struct mutex lock; /* protect the pool */
- size_t alloc_log_sz;
- enum mlx5hws_table_type tbl_type;
- enum mlx5hws_pool_optimize opt_type;
- struct mlx5hws_pool_resource *resource[MLX5HWS_POOL_RESOURCE_ARR_SZ];
- struct mlx5hws_pool_resource *mirror_resource[MLX5HWS_POOL_RESOURCE_ARR_SZ];
- /* DB */
- struct mlx5hws_pool_db db;
- /* Functions */
- mlx5hws_pool_unint_db p_db_uninit;
- mlx5hws_pool_db_get_chunk p_get_chunk;
- mlx5hws_pool_db_put_chunk p_put_chunk;
-};
-
-struct mlx5hws_pool *
-mlx5hws_pool_create(struct mlx5hws_context *ctx,
- struct mlx5hws_pool_attr *pool_attr);
-
-int mlx5hws_pool_destroy(struct mlx5hws_pool *pool);
-
-int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk);
-
-void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk);
-
-static inline u32
-mlx5hws_pool_chunk_get_base_id(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
-{
- return pool->resource[chunk->resource_idx]->base_id;
-}
-
-static inline u32
-mlx5hws_pool_chunk_get_base_mirror_id(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
-{
- return pool->mirror_resource[chunk->resource_idx]->base_id;
-}
-#endif /* MLX5HWS_POOL_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
index e084a5cbf81f..d56271a9e4f0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#include "mlx5hws_internal.h"
+#include "internal.h"
enum mlx5hws_arg_chunk_size
mlx5hws_arg_data_size_to_arg_log_size(u16 data_size)
@@ -153,8 +153,7 @@ mlx5hws_pat_get_existing_cached_pattern(struct mlx5hws_pattern_cache *cache,
cached_pattern = mlx5hws_pat_find_cached_pattern(cache, num_of_actions, actions);
if (cached_pattern) {
/* LRU: move it to be first in the list */
- list_del_init(&cached_pattern->ptrn_list_node);
- list_add(&cached_pattern->ptrn_list_node, &cache->ptrn_list);
+ list_move(&cached_pattern->ptrn_list_node, &cache->ptrn_list);
cached_pattern->refcount++;
}
@@ -280,7 +279,7 @@ int mlx5hws_pat_get_pattern(struct mlx5hws_context *ctx,
return ret;
clean_pattern:
- mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, *pattern_id);
+ mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, ptrn_id);
out_unlock:
mutex_unlock(&ctx->pattern_cache->lock);
return ret;
@@ -344,7 +343,7 @@ void mlx5hws_arg_write(struct mlx5hws_send_engine *queue,
mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
memset(wqe_ctrl, 0, wqe_len);
mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
- memcpy(wqe_arg, arg_data, wqe_len);
+ memcpy(wqe_arg, arg_data, MLX5HWS_ARG_DATA_SIZE);
send_attr.id = arg_idx++;
mlx5hws_send_engine_post_end(&ctrl, &send_attr);
@@ -491,8 +490,8 @@ hws_action_modify_get_target_fields(u8 action_type, __be64 *pattern,
switch (action_type) {
case MLX5_ACTION_TYPE_SET:
case MLX5_ACTION_TYPE_ADD:
- *src_field = MLX5_GET(set_action_in, pattern, field);
- *dst_field = INVALID_FIELD;
+ *src_field = INVALID_FIELD;
+ *dst_field = MLX5_GET(set_action_in, pattern, field);
break;
case MLX5_ACTION_TYPE_COPY:
*src_field = MLX5_GET(copy_action_in, pattern, src_field);
@@ -523,57 +522,61 @@ bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], s
return true;
}
-void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions,
- size_t max_actions, size_t *new_size,
- u32 *nope_location, __be64 *new_pat)
+int mlx5hws_pat_calc_nop(__be64 *pattern, size_t num_actions,
+ size_t max_actions, size_t *new_size,
+ u32 *nop_locations, __be64 *new_pat)
{
- u16 prev_src_field = 0, prev_dst_field = 0;
- u16 src_field, dst_field;
+ u16 prev_src_field = INVALID_FIELD, prev_dst_field = INVALID_FIELD;
u8 action_type;
+ bool dependent;
size_t i, j;
*new_size = num_actions;
- *nope_location = 0;
+ *nop_locations = 0;
if (num_actions == 1)
- return;
+ return 0;
for (i = 0, j = 0; i < num_actions; i++, j++) {
- action_type = MLX5_GET(set_action_in, &pattern[i], action_type);
+ u16 src_field = INVALID_FIELD;
+ u16 dst_field = INVALID_FIELD;
+
+ if (j >= max_actions)
+ return -EINVAL;
+ action_type = MLX5_GET(set_action_in, &pattern[i], action_type);
hws_action_modify_get_target_fields(action_type, &pattern[i],
&src_field, &dst_field);
- if (i % 2) {
- if (action_type == MLX5_ACTION_TYPE_COPY &&
- (prev_src_field == src_field ||
- prev_dst_field == dst_field)) {
- /* need Nope */
- *new_size += 1;
- *nope_location |= BIT(i);
- memset(&new_pat[j], 0, MLX5HWS_MODIFY_ACTION_SIZE);
- MLX5_SET(set_action_in, &new_pat[j],
- action_type,
- MLX5_MODIFICATION_TYPE_NOP);
- j++;
- } else if (prev_src_field == src_field) {
- /* need Nope*/
- *new_size += 1;
- *nope_location |= BIT(i);
- MLX5_SET(set_action_in, &new_pat[j],
- action_type,
- MLX5_MODIFICATION_TYPE_NOP);
- j++;
- }
- }
- memcpy(&new_pat[j], &pattern[i], MLX5HWS_MODIFY_ACTION_SIZE);
- /* check if no more space */
- if (j > max_actions) {
- *new_size = num_actions;
- *nope_location = 0;
- return;
+
+ /* For every action, look at it and the previous one. The two
+ * actions are dependent if:
+ */
+ dependent =
+ (i > 0) &&
+ /* At least one of the actions is a write and */
+ (dst_field != INVALID_FIELD ||
+ prev_dst_field != INVALID_FIELD) &&
+ /* One reads from the other's source */
+ (dst_field == prev_src_field ||
+ src_field == prev_dst_field ||
+ /* Or both write to the same destination */
+ dst_field == prev_dst_field);
+
+ if (dependent) {
+ *new_size += 1;
+ *nop_locations |= BIT(i);
+ memset(&new_pat[j], 0, MLX5HWS_MODIFY_ACTION_SIZE);
+ MLX5_SET(set_action_in, &new_pat[j], action_type,
+ MLX5_MODIFICATION_TYPE_NOP);
+ j++;
+ if (j >= max_actions)
+ return -EINVAL;
}
+ memcpy(&new_pat[j], &pattern[i], MLX5HWS_MODIFY_ACTION_SIZE);
prev_src_field = src_field;
prev_dst_field = dst_field;
}
+
+ return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h
index 27ca93385b08..7fbd8dc7aa18 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h
@@ -31,7 +31,7 @@ struct mlx5hws_pattern_cache_item {
u8 *data;
u16 num_of_actions;
} mh_data;
- u32 refcount;
+ u32 refcount; /* protected by pattern_cache lock */
struct list_head ptrn_list_node;
};
@@ -96,6 +96,7 @@ int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx,
u8 *arg_data,
size_t data_size);
-void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions, size_t max_actions,
- size_t *new_size, u32 *nope_location, __be64 *new_pat);
+int mlx5hws_pat_calc_nop(__be64 *pattern, size_t num_actions,
+ size_t max_actions, size_t *new_size,
+ u32 *nop_locations, __be64 *new_pat);
#endif /* MLX5HWS_PAT_ARG_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
new file mode 100644
index 000000000000..7b5071c3df36
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
@@ -0,0 +1,394 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+#include "buddy.h"
+
+static void hws_pool_free_one_resource(struct mlx5hws_pool_resource *resource)
+{
+ switch (resource->pool->type) {
+ case MLX5HWS_POOL_TYPE_STE:
+ mlx5hws_cmd_ste_destroy(resource->pool->ctx->mdev, resource->base_id);
+ break;
+ case MLX5HWS_POOL_TYPE_STC:
+ mlx5hws_cmd_stc_destroy(resource->pool->ctx->mdev, resource->base_id);
+ break;
+ default:
+ break;
+ }
+
+ kfree(resource);
+}
+
+static void hws_pool_resource_free(struct mlx5hws_pool *pool)
+{
+ hws_pool_free_one_resource(pool->resource);
+ pool->resource = NULL;
+
+ if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) {
+ hws_pool_free_one_resource(pool->mirror_resource);
+ pool->mirror_resource = NULL;
+ }
+}
+
+static struct mlx5hws_pool_resource *
+hws_pool_create_one_resource(struct mlx5hws_pool *pool, u32 log_range,
+ u32 fw_ft_type)
+{
+ struct mlx5hws_cmd_ste_create_attr ste_attr;
+ struct mlx5hws_cmd_stc_create_attr stc_attr;
+ struct mlx5hws_pool_resource *resource;
+ u32 obj_id = 0;
+ int ret;
+
+ resource = kzalloc(sizeof(*resource), GFP_KERNEL);
+ if (!resource)
+ return NULL;
+
+ switch (pool->type) {
+ case MLX5HWS_POOL_TYPE_STE:
+ ste_attr.log_obj_range = log_range;
+ ste_attr.table_type = fw_ft_type;
+ ret = mlx5hws_cmd_ste_create(pool->ctx->mdev, &ste_attr, &obj_id);
+ break;
+ case MLX5HWS_POOL_TYPE_STC:
+ stc_attr.log_obj_range = log_range;
+ stc_attr.table_type = fw_ft_type;
+ ret = mlx5hws_cmd_stc_create(pool->ctx->mdev, &stc_attr, &obj_id);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ goto free_resource;
+
+ resource->pool = pool;
+ resource->range = 1 << log_range;
+ resource->base_id = obj_id;
+
+ return resource;
+
+free_resource:
+ kfree(resource);
+ return NULL;
+}
+
+static int hws_pool_resource_alloc(struct mlx5hws_pool *pool)
+{
+ struct mlx5hws_pool_resource *resource;
+ u32 fw_ft_type, opt_log_range;
+
+ fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, false);
+ opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_MIRROR ?
+ 0 : pool->alloc_log_sz;
+ resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type);
+ if (!resource) {
+ mlx5hws_err(pool->ctx, "Failed to allocate resource\n");
+ return -EINVAL;
+ }
+
+ pool->resource = resource;
+
+ if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) {
+ struct mlx5hws_pool_resource *mirror_resource;
+
+ fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, true);
+ opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_ORIG ?
+ 0 : pool->alloc_log_sz;
+ mirror_resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type);
+ if (!mirror_resource) {
+ mlx5hws_err(pool->ctx, "Failed to allocate mirrored resource\n");
+ hws_pool_free_one_resource(resource);
+ pool->resource = NULL;
+ return -EINVAL;
+ }
+ pool->mirror_resource = mirror_resource;
+ }
+
+ return 0;
+}
+
+static int hws_pool_buddy_init(struct mlx5hws_pool *pool)
+{
+ struct mlx5hws_buddy_mem *buddy;
+
+ buddy = mlx5hws_buddy_create(pool->alloc_log_sz);
+ if (!buddy) {
+ mlx5hws_err(pool->ctx, "Failed to create buddy order: %zu\n",
+ pool->alloc_log_sz);
+ return -ENOMEM;
+ }
+
+ if (hws_pool_resource_alloc(pool) != 0) {
+ mlx5hws_err(pool->ctx, "Failed to create resource type: %d size %zu\n",
+ pool->type, pool->alloc_log_sz);
+ mlx5hws_buddy_cleanup(buddy);
+ kfree(buddy);
+ return -ENOMEM;
+ }
+
+ pool->db.buddy = buddy;
+
+ return 0;
+}
+
+static int hws_pool_buddy_db_get_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ struct mlx5hws_buddy_mem *buddy = pool->db.buddy;
+
+ if (!buddy) {
+ mlx5hws_err(pool->ctx, "Bad buddy state\n");
+ return -EINVAL;
+ }
+
+ chunk->offset = mlx5hws_buddy_alloc_mem(buddy, chunk->order);
+ if (chunk->offset >= 0)
+ return 0;
+
+ return -ENOMEM;
+}
+
+static void hws_pool_buddy_db_put_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ struct mlx5hws_buddy_mem *buddy;
+
+ buddy = pool->db.buddy;
+ if (!buddy) {
+ mlx5hws_err(pool->ctx, "Bad buddy state\n");
+ return;
+ }
+
+ mlx5hws_buddy_free_mem(buddy, chunk->offset, chunk->order);
+}
+
+static void hws_pool_buddy_db_uninit(struct mlx5hws_pool *pool)
+{
+ struct mlx5hws_buddy_mem *buddy;
+
+ buddy = pool->db.buddy;
+ if (buddy) {
+ mlx5hws_buddy_cleanup(buddy);
+ kfree(buddy);
+ pool->db.buddy = NULL;
+ }
+}
+
+static int hws_pool_buddy_db_init(struct mlx5hws_pool *pool)
+{
+ int ret;
+
+ ret = hws_pool_buddy_init(pool);
+ if (ret)
+ return ret;
+
+ pool->p_db_uninit = &hws_pool_buddy_db_uninit;
+ pool->p_get_chunk = &hws_pool_buddy_db_get_chunk;
+ pool->p_put_chunk = &hws_pool_buddy_db_put_chunk;
+
+ return 0;
+}
+
+static unsigned long *hws_pool_create_and_init_bitmap(u32 log_range)
+{
+ unsigned long *bitmap;
+
+ bitmap = bitmap_zalloc(1 << log_range, GFP_KERNEL);
+ if (!bitmap)
+ return NULL;
+
+ bitmap_fill(bitmap, 1 << log_range);
+
+ return bitmap;
+}
+
+static int hws_pool_bitmap_init(struct mlx5hws_pool *pool)
+{
+ unsigned long *bitmap;
+
+ bitmap = hws_pool_create_and_init_bitmap(pool->alloc_log_sz);
+ if (!bitmap) {
+ mlx5hws_err(pool->ctx, "Failed to create bitmap order: %zu\n",
+ pool->alloc_log_sz);
+ return -ENOMEM;
+ }
+
+ if (hws_pool_resource_alloc(pool) != 0) {
+ mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %zu\n",
+ pool->type, pool->alloc_log_sz);
+ bitmap_free(bitmap);
+ return -ENOMEM;
+ }
+
+ pool->db.bitmap = bitmap;
+
+ return 0;
+}
+
+static int hws_pool_bitmap_db_get_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ unsigned long *bitmap, size;
+
+ if (chunk->order != 0) {
+ mlx5hws_err(pool->ctx, "Pool only supports order 0 allocs\n");
+ return -EINVAL;
+ }
+
+ bitmap = pool->db.bitmap;
+ if (!bitmap) {
+ mlx5hws_err(pool->ctx, "Bad bitmap state\n");
+ return -EINVAL;
+ }
+
+ size = 1 << pool->alloc_log_sz;
+
+ chunk->offset = find_first_bit(bitmap, size);
+ if (chunk->offset >= size)
+ return -ENOMEM;
+
+ bitmap_clear(bitmap, chunk->offset, 1);
+
+ return 0;
+}
+
+static void hws_pool_bitmap_db_put_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ unsigned long *bitmap;
+
+ bitmap = pool->db.bitmap;
+ if (!bitmap) {
+ mlx5hws_err(pool->ctx, "Bad bitmap state\n");
+ return;
+ }
+
+ bitmap_set(bitmap, chunk->offset, 1);
+}
+
+static void hws_pool_bitmap_db_uninit(struct mlx5hws_pool *pool)
+{
+ unsigned long *bitmap;
+
+ bitmap = pool->db.bitmap;
+ if (bitmap) {
+ bitmap_free(bitmap);
+ pool->db.bitmap = NULL;
+ }
+}
+
+static int hws_pool_bitmap_db_init(struct mlx5hws_pool *pool)
+{
+ int ret;
+
+ ret = hws_pool_bitmap_init(pool);
+ if (ret)
+ return ret;
+
+ pool->p_db_uninit = &hws_pool_bitmap_db_uninit;
+ pool->p_get_chunk = &hws_pool_bitmap_db_get_chunk;
+ pool->p_put_chunk = &hws_pool_bitmap_db_put_chunk;
+
+ return 0;
+}
+
+static int hws_pool_db_init(struct mlx5hws_pool *pool,
+ enum mlx5hws_db_type db_type)
+{
+ int ret;
+
+ if (db_type == MLX5HWS_POOL_DB_TYPE_BITMAP)
+ ret = hws_pool_bitmap_db_init(pool);
+ else
+ ret = hws_pool_buddy_db_init(pool);
+
+ if (ret) {
+ mlx5hws_err(pool->ctx, "Failed to init pool type: %d (ret: %d)\n",
+ db_type, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hws_pool_db_unint(struct mlx5hws_pool *pool)
+{
+ pool->p_db_uninit(pool);
+}
+
+int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ int ret;
+
+ mutex_lock(&pool->lock);
+ ret = pool->p_get_chunk(pool, chunk);
+ if (ret == 0)
+ pool->available_elems -= 1 << chunk->order;
+ mutex_unlock(&pool->lock);
+
+ return ret;
+}
+
+void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ mutex_lock(&pool->lock);
+ pool->p_put_chunk(pool, chunk);
+ pool->available_elems += 1 << chunk->order;
+ mutex_unlock(&pool->lock);
+}
+
+struct mlx5hws_pool *
+mlx5hws_pool_create(struct mlx5hws_context *ctx, struct mlx5hws_pool_attr *pool_attr)
+{
+ enum mlx5hws_db_type res_db_type;
+ struct mlx5hws_pool *pool;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ pool->ctx = ctx;
+ pool->type = pool_attr->pool_type;
+ pool->alloc_log_sz = pool_attr->alloc_log_sz;
+ pool->flags = pool_attr->flags;
+ pool->tbl_type = pool_attr->table_type;
+ pool->opt_type = pool_attr->opt_type;
+
+ if (pool->flags & MLX5HWS_POOL_FLAG_BUDDY)
+ res_db_type = MLX5HWS_POOL_DB_TYPE_BUDDY;
+ else
+ res_db_type = MLX5HWS_POOL_DB_TYPE_BITMAP;
+
+ pool->alloc_log_sz = pool_attr->alloc_log_sz;
+ pool->available_elems = 1 << pool_attr->alloc_log_sz;
+
+ if (hws_pool_db_init(pool, res_db_type))
+ goto free_pool;
+
+ mutex_init(&pool->lock);
+
+ return pool;
+
+free_pool:
+ kfree(pool);
+ return NULL;
+}
+
+void mlx5hws_pool_destroy(struct mlx5hws_pool *pool)
+{
+ mutex_destroy(&pool->lock);
+
+ if (pool->available_elems != 1 << pool->alloc_log_sz)
+ mlx5hws_err(pool->ctx, "Attempting to destroy non-empty pool\n");
+
+ if (pool->resource)
+ hws_pool_resource_free(pool);
+
+ hws_pool_db_unint(pool);
+
+ kfree(pool);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h
new file mode 100644
index 000000000000..33e33d5f1fb3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_POOL_H_
+#define MLX5HWS_POOL_H_
+
+#define MLX5HWS_POOL_STC_LOG_SZ 15
+
+enum mlx5hws_pool_type {
+ MLX5HWS_POOL_TYPE_STE,
+ MLX5HWS_POOL_TYPE_STC,
+};
+
+struct mlx5hws_pool_chunk {
+ int offset;
+ int order;
+};
+
+struct mlx5hws_pool_resource {
+ struct mlx5hws_pool *pool;
+ u32 base_id;
+ u32 range;
+};
+
+enum mlx5hws_pool_flags {
+ /* Managed by a buddy allocator. If this is not set only allocations of
+ * order 0 are supported.
+ */
+ MLX5HWS_POOL_FLAG_BUDDY = BIT(0),
+};
+
+enum mlx5hws_pool_optimize {
+ MLX5HWS_POOL_OPTIMIZE_NONE = 0x0,
+ MLX5HWS_POOL_OPTIMIZE_ORIG = 0x1,
+ MLX5HWS_POOL_OPTIMIZE_MIRROR = 0x2,
+ MLX5HWS_POOL_OPTIMIZE_MAX = 0x3,
+};
+
+struct mlx5hws_pool_attr {
+ enum mlx5hws_pool_type pool_type;
+ enum mlx5hws_table_type table_type;
+ enum mlx5hws_pool_flags flags;
+ enum mlx5hws_pool_optimize opt_type;
+ /* Allocation size once memory is depleted */
+ size_t alloc_log_sz;
+};
+
+enum mlx5hws_db_type {
+ /* Uses a bitmap, supports only allocations of order 0. */
+ MLX5HWS_POOL_DB_TYPE_BITMAP,
+ /* Entries are managed using a buddy mechanism. */
+ MLX5HWS_POOL_DB_TYPE_BUDDY,
+};
+
+struct mlx5hws_pool_db {
+ enum mlx5hws_db_type type;
+ union {
+ unsigned long *bitmap;
+ struct mlx5hws_buddy_mem *buddy;
+ };
+};
+
+typedef int (*mlx5hws_pool_db_get_chunk)(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk);
+typedef void (*mlx5hws_pool_db_put_chunk)(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk);
+typedef void (*mlx5hws_pool_unint_db)(struct mlx5hws_pool *pool);
+
+struct mlx5hws_pool {
+ struct mlx5hws_context *ctx;
+ enum mlx5hws_pool_type type;
+ enum mlx5hws_pool_flags flags;
+ struct mutex lock; /* protect the pool */
+ size_t alloc_log_sz;
+ size_t available_elems;
+ enum mlx5hws_table_type tbl_type;
+ enum mlx5hws_pool_optimize opt_type;
+ struct mlx5hws_pool_resource *resource;
+ struct mlx5hws_pool_resource *mirror_resource;
+ struct mlx5hws_pool_db db;
+ /* Functions */
+ mlx5hws_pool_unint_db p_db_uninit;
+ mlx5hws_pool_db_get_chunk p_get_chunk;
+ mlx5hws_pool_db_put_chunk p_put_chunk;
+};
+
+struct mlx5hws_pool *
+mlx5hws_pool_create(struct mlx5hws_context *ctx,
+ struct mlx5hws_pool_attr *pool_attr);
+
+void mlx5hws_pool_destroy(struct mlx5hws_pool *pool);
+
+int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk);
+
+void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk);
+
+static inline u32 mlx5hws_pool_get_base_id(struct mlx5hws_pool *pool)
+{
+ return pool->resource->base_id;
+}
+
+static inline u32 mlx5hws_pool_get_base_mirror_id(struct mlx5hws_pool *pool)
+{
+ return pool->mirror_resource->base_id;
+}
+
+static inline bool
+mlx5hws_pool_empty(struct mlx5hws_pool *pool)
+{
+ bool ret;
+
+ mutex_lock(&pool->lock);
+ ret = pool->available_elems == 0;
+ mutex_unlock(&pool->lock);
+
+ return ret;
+}
+
+static inline bool
+mlx5hws_pool_full(struct mlx5hws_pool *pool)
+{
+ bool ret;
+
+ mutex_lock(&pool->lock);
+ ret = pool->available_elems == (1 << pool->alloc_log_sz);
+ mutex_unlock(&pool->lock);
+
+ return ret;
+}
+#endif /* MLX5HWS_POOL_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h
index de92cecbeb92..271490a51b96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h
@@ -390,11 +390,6 @@ struct mlx5_ifc_definer_bits {
u8 match_mask[0x160];
};
-struct mlx5_ifc_arg_bits {
- u8 rsvd0[0x88];
- u8 access_pd[0x18];
-};
-
struct mlx5_ifc_header_modify_pattern_in_bits {
u8 modify_field_select[0x40];
@@ -428,11 +423,6 @@ struct mlx5_ifc_create_definer_in_bits {
struct mlx5_ifc_definer_bits definer;
};
-struct mlx5_ifc_create_arg_in_bits {
- struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
- struct mlx5_ifc_arg_bits arg;
-};
-
struct mlx5_ifc_create_header_modify_pattern_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
struct mlx5_ifc_header_modify_pattern_in_bits pattern;
@@ -479,36 +469,4 @@ enum {
MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL = 1,
};
-struct mlx5_ifc_alloc_packet_reformat_out_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
-
- u8 syndrome[0x20];
-
- u8 packet_reformat_id[0x20];
-
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_dealloc_packet_reformat_in_bits {
- u8 opcode[0x10];
- u8 reserved_at_10[0x10];
-
- u8 reserved_at_20[0x10];
- u8 op_mod[0x10];
-
- u8 packet_reformat_id[0x20];
-
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_dealloc_packet_reformat_out_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
-
- u8 syndrome[0x20];
-
- u8 reserved_at_40[0x40];
-};
-
#endif /* MLX5_PRM_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c
index 8a011b958b43..a94f094e72ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c
@@ -1,12 +1,10 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#include "mlx5hws_internal.h"
+#include "internal.h"
-static void hws_rule_skip(struct mlx5hws_matcher *matcher,
- struct mlx5hws_match_template *mt,
- u32 flow_source,
- bool *skip_rx, bool *skip_tx)
+void mlx5hws_rule_skip(struct mlx5hws_matcher *matcher, u32 flow_source,
+ bool *skip_rx, bool *skip_tx)
{
/* By default FDB rules are added to both RX and TX */
*skip_rx = false;
@@ -14,20 +12,21 @@ static void hws_rule_skip(struct mlx5hws_matcher *matcher,
if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT) {
*skip_rx = true;
- } else if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK) {
+ return;
+ }
+
+ if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK) {
*skip_tx = true;
- } else {
- /* If no flow source was set for current rule,
- * check for flow source in matcher attributes.
- */
- if (matcher->attr.optimize_flow_src) {
- *skip_tx =
- matcher->attr.optimize_flow_src == MLX5HWS_MATCHER_FLOW_SRC_WIRE;
- *skip_rx =
- matcher->attr.optimize_flow_src == MLX5HWS_MATCHER_FLOW_SRC_VPORT;
- return;
- }
+ return;
}
+
+ /* If no flow source was set for current rule,
+ * check for flow source in matcher attributes.
+ */
+ *skip_tx = matcher->attr.optimize_flow_src ==
+ MLX5HWS_MATCHER_FLOW_SRC_WIRE;
+ *skip_rx = matcher->attr.optimize_flow_src ==
+ MLX5HWS_MATCHER_FLOW_SRC_VPORT;
}
static void
@@ -66,7 +65,8 @@ static void hws_rule_init_dep_wqe(struct mlx5hws_send_ring_dep_wqe *dep_wqe,
attr->rule_idx : 0;
if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
- hws_rule_skip(matcher, mt, attr->flow_source, &skip_rx, &skip_tx);
+ mlx5hws_rule_skip(matcher, attr->flow_source,
+ &skip_rx, &skip_tx);
if (!skip_rx) {
dep_wqe->rtc_0 = matcher->match_ste.rtc_0_id;
@@ -129,27 +129,18 @@ static void hws_rule_gen_comp(struct mlx5hws_send_engine *queue,
static void
hws_rule_save_resize_info(struct mlx5hws_rule *rule,
- struct mlx5hws_send_ste_attr *ste_attr,
- bool is_update)
+ struct mlx5hws_send_ste_attr *ste_attr)
{
if (!mlx5hws_matcher_is_resizable(rule->matcher))
return;
- if (likely(!is_update)) {
+ /* resize_info might already exist (if we're in update flow) */
+ if (likely(!rule->resize_info)) {
rule->resize_info = kzalloc(sizeof(*rule->resize_info), GFP_KERNEL);
if (unlikely(!rule->resize_info)) {
pr_warn("HWS: resize info isn't allocated for rule\n");
return;
}
-
- rule->resize_info->max_stes =
- rule->matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes;
- rule->resize_info->action_ste_pool[0] = rule->matcher->action_ste[0].max_stes ?
- rule->matcher->action_ste[0].pool :
- NULL;
- rule->resize_info->action_ste_pool[1] = rule->matcher->action_ste[1].max_stes ?
- rule->matcher->action_ste[1].pool :
- NULL;
}
memcpy(rule->resize_info->ctrl_seg, ste_attr->wqe_ctrl,
@@ -204,84 +195,30 @@ hws_rule_load_delete_info(struct mlx5hws_rule *rule,
}
}
-static int hws_rule_alloc_action_ste_idx(struct mlx5hws_rule *rule,
- u8 action_ste_selector)
+static int mlx5hws_rule_alloc_action_ste(struct mlx5hws_rule *rule,
+ u16 queue_id, bool skip_rx,
+ bool skip_tx)
{
struct mlx5hws_matcher *matcher = rule->matcher;
- struct mlx5hws_matcher_action_ste *action_ste;
- struct mlx5hws_pool_chunk ste = {0};
- int ret;
-
- action_ste = &matcher->action_ste[action_ste_selector];
- ste.order = ilog2(roundup_pow_of_two(action_ste->max_stes));
- ret = mlx5hws_pool_chunk_alloc(action_ste->pool, &ste);
- if (unlikely(ret)) {
- mlx5hws_err(matcher->tbl->ctx,
- "Failed to allocate STE for rule actions");
- return ret;
- }
- rule->action_ste_idx = ste.offset;
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
- return 0;
+ rule->action_ste.ste.order =
+ ilog2(roundup_pow_of_two(matcher->num_of_action_stes));
+ return mlx5hws_action_ste_chunk_alloc(&ctx->action_ste_pool[queue_id],
+ skip_rx, skip_tx,
+ &rule->action_ste);
}
-static void hws_rule_free_action_ste_idx(struct mlx5hws_rule *rule,
- u8 action_ste_selector)
+void mlx5hws_rule_free_action_ste(struct mlx5hws_action_ste_chunk *action_ste)
{
- struct mlx5hws_matcher *matcher = rule->matcher;
- struct mlx5hws_pool_chunk ste = {0};
- struct mlx5hws_pool *pool;
- u8 max_stes;
-
- if (mlx5hws_matcher_is_resizable(matcher)) {
- /* Free the original action pool if rule was resized */
- max_stes = rule->resize_info->max_stes;
- pool = rule->resize_info->action_ste_pool[action_ste_selector];
- } else {
- max_stes = matcher->action_ste[action_ste_selector].max_stes;
- pool = matcher->action_ste[action_ste_selector].pool;
- }
-
- /* This release is safe only when the rule match part was deleted */
- ste.order = ilog2(roundup_pow_of_two(max_stes));
- ste.offset = rule->action_ste_idx;
-
- mlx5hws_pool_chunk_free(pool, &ste);
-}
-
-static int hws_rule_alloc_action_ste(struct mlx5hws_rule *rule,
- struct mlx5hws_rule_attr *attr)
-{
- int action_ste_idx;
- int ret;
-
- ret = hws_rule_alloc_action_ste_idx(rule, 0);
- if (unlikely(ret))
- return ret;
-
- action_ste_idx = rule->action_ste_idx;
-
- ret = hws_rule_alloc_action_ste_idx(rule, 1);
- if (unlikely(ret)) {
- hws_rule_free_action_ste_idx(rule, 0);
- return ret;
- }
-
- /* Both pools have to return the same index */
- if (unlikely(rule->action_ste_idx != action_ste_idx)) {
- pr_warn("HWS: allocation of action STE failed - pool indexes mismatch\n");
- return -EINVAL;
- }
-
- return 0;
-}
+ if (!action_ste->action_tbl)
+ return;
-void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule)
-{
- if (rule->action_ste_idx > -1) {
- hws_rule_free_action_ste_idx(rule, 1);
- hws_rule_free_action_ste_idx(rule, 0);
- }
+ /* This release is safe only when the rule match STE was deleted
+ * (when the rule is being deleted) or replaced with the new STE that
+ * isn't pointing to old action STEs (when the rule is being updated).
+ */
+ mlx5hws_action_ste_chunk_free(action_ste);
}
static void hws_rule_create_init(struct mlx5hws_rule *rule,
@@ -298,14 +235,17 @@ static void hws_rule_create_init(struct mlx5hws_rule *rule,
/* In update we use these rtc's */
rule->rtc_0 = 0;
rule->rtc_1 = 0;
- rule->action_ste_selector = 0;
+
+ rule->status = MLX5HWS_RULE_STATUS_CREATING;
} else {
- rule->action_ste_selector = !rule->action_ste_selector;
+ rule->status = MLX5HWS_RULE_STATUS_UPDATING;
+ /* Save the old action STE info so we can free it after writing
+ * new action STEs and a corresponding match STE.
+ */
+ rule->old_action_ste = rule->action_ste;
}
rule->pending_wqes = 0;
- rule->action_ste_idx = -1;
- rule->status = MLX5HWS_RULE_STATUS_CREATING;
/* Init default send STE attributes */
ste_attr->gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
@@ -315,8 +255,7 @@ static void hws_rule_create_init(struct mlx5hws_rule *rule,
/* Init default action apply */
apply->tbl_type = tbl->type;
- apply->common_res = &ctx->common_res[tbl->type];
- apply->jump_to_action_stc = matcher->action_ste[0].stc.offset;
+ apply->common_res = &ctx->common_res;
apply->require_dep = 0;
}
@@ -332,8 +271,6 @@ static void hws_rule_move_init(struct mlx5hws_rule *rule,
rule->rtc_1 = 0;
rule->pending_wqes = 0;
- rule->action_ste_idx = -1;
- rule->action_ste_selector = 0;
rule->status = MLX5HWS_RULE_STATUS_CREATING;
rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_WRITING;
}
@@ -394,21 +331,24 @@ static int hws_rule_create_hws(struct mlx5hws_rule *rule,
if (action_stes) {
/* Allocate action STEs for rules that need more than match STE */
- if (!is_update) {
- ret = hws_rule_alloc_action_ste(rule, attr);
- if (ret) {
- mlx5hws_err(ctx, "Failed to allocate action memory %d", ret);
- mlx5hws_send_abort_new_dep_wqe(queue);
- return ret;
- }
+ ret = mlx5hws_rule_alloc_action_ste(rule, attr->queue_id,
+ !!ste_attr.rtc_0,
+ !!ste_attr.rtc_1);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate action memory %d", ret);
+ mlx5hws_send_abort_new_dep_wqe(queue);
+ return ret;
}
+ apply.jump_to_action_stc =
+ rule->action_ste.action_tbl->stc.offset;
/* Skip RX/TX based on the dep_wqe init */
ste_attr.rtc_0 = dep_wqe->rtc_0 ?
- matcher->action_ste[rule->action_ste_selector].rtc_0_id : 0;
+ rule->action_ste.action_tbl->rtc_0_id : 0;
ste_attr.rtc_1 = dep_wqe->rtc_1 ?
- matcher->action_ste[rule->action_ste_selector].rtc_1_id : 0;
+ rule->action_ste.action_tbl->rtc_1_id : 0;
/* Action STEs are written to a specific index last to first */
- ste_attr.direct_index = rule->action_ste_idx + action_stes;
+ ste_attr.direct_index =
+ rule->action_ste.ste.offset + action_stes;
apply.next_direct_idx = ste_attr.direct_index;
} else {
apply.next_direct_idx = 0;
@@ -459,7 +399,7 @@ static int hws_rule_create_hws(struct mlx5hws_rule *rule,
if (!is_update)
hws_rule_save_delete_info(rule, &ste_attr);
- hws_rule_save_resize_info(rule, &ste_attr, is_update);
+ hws_rule_save_resize_info(rule, &ste_attr);
mlx5hws_send_engine_inc_rule(queue);
if (!attr->burst)
@@ -480,7 +420,10 @@ static void hws_rule_destroy_failed_hws(struct mlx5hws_rule *rule,
attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
/* Rule failed now we can safely release action STEs */
- mlx5hws_rule_free_action_ste(rule);
+ mlx5hws_rule_free_action_ste(&rule->action_ste);
+
+ /* Perhaps the rule failed updating - release old action STEs as well */
+ mlx5hws_rule_free_action_ste(&rule->old_action_ste);
/* Clear complex tag */
hws_rule_clear_delete_info(rule);
@@ -517,7 +460,8 @@ static int hws_rule_destroy_hws(struct mlx5hws_rule *rule,
}
/* Rule is not completed yet */
- if (rule->status == MLX5HWS_RULE_STATUS_CREATING)
+ if (rule->status == MLX5HWS_RULE_STATUS_CREATING ||
+ rule->status == MLX5HWS_RULE_STATUS_UPDATING)
return -EBUSY;
/* Rule failed and doesn't require cleanup */
@@ -534,7 +478,7 @@ static int hws_rule_destroy_hws(struct mlx5hws_rule *rule,
hws_rule_gen_comp(queue, rule, false,
attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
- mlx5hws_rule_free_action_ste(rule);
+ mlx5hws_rule_free_action_ste(&rule->action_ste);
mlx5hws_rule_clear_resize_info(rule);
return 0;
}
@@ -711,6 +655,124 @@ int mlx5hws_rule_move_hws_add(struct mlx5hws_rule *rule,
return 0;
}
+static u8 hws_rule_ethertype_to_matcher_ipv(u32 ethertype)
+{
+ switch (ethertype) {
+ case ETH_P_IP:
+ return MLX5HWS_MATCHER_IPV_4;
+ case ETH_P_IPV6:
+ return MLX5HWS_MATCHER_IPV_6;
+ default:
+ return MLX5HWS_MATCHER_IPV_UNSET;
+ }
+}
+
+static u8 hws_rule_ip_version_to_matcher_ipv(u32 ip_version)
+{
+ switch (ip_version) {
+ case 4:
+ return MLX5HWS_MATCHER_IPV_4;
+ case 6:
+ return MLX5HWS_MATCHER_IPV_6;
+ default:
+ return MLX5HWS_MATCHER_IPV_UNSET;
+ }
+}
+
+static int hws_rule_check_outer_ip_version(struct mlx5hws_matcher *matcher,
+ u32 *match_param)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ u8 outer_ipv_ether = MLX5HWS_MATCHER_IPV_UNSET;
+ u8 outer_ipv_ip = MLX5HWS_MATCHER_IPV_UNSET;
+ u8 outer_ipv, ver;
+
+ if (matcher->matches_outer_ethertype) {
+ ver = MLX5_GET(fte_match_param, match_param,
+ outer_headers.ethertype);
+ outer_ipv_ether = hws_rule_ethertype_to_matcher_ipv(ver);
+ }
+ if (matcher->matches_outer_ip_version) {
+ ver = MLX5_GET(fte_match_param, match_param,
+ outer_headers.ip_version);
+ outer_ipv_ip = hws_rule_ip_version_to_matcher_ipv(ver);
+ }
+
+ if (outer_ipv_ether != MLX5HWS_MATCHER_IPV_UNSET &&
+ outer_ipv_ip != MLX5HWS_MATCHER_IPV_UNSET &&
+ outer_ipv_ether != outer_ipv_ip) {
+ mlx5hws_err(ctx, "Rule matches on inconsistent outer ethertype and ip version\n");
+ return -EINVAL;
+ }
+
+ outer_ipv = outer_ipv_ether != MLX5HWS_MATCHER_IPV_UNSET ?
+ outer_ipv_ether : outer_ipv_ip;
+ if (outer_ipv != MLX5HWS_MATCHER_IPV_UNSET &&
+ matcher->outer_ip_version != MLX5HWS_MATCHER_IPV_UNSET &&
+ outer_ipv != matcher->outer_ip_version) {
+ mlx5hws_err(ctx, "Matcher and rule disagree on outer IP version\n");
+ return -EINVAL;
+ }
+ matcher->outer_ip_version = outer_ipv;
+
+ return 0;
+}
+
+static int hws_rule_check_inner_ip_version(struct mlx5hws_matcher *matcher,
+ u32 *match_param)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ u8 inner_ipv_ether = MLX5HWS_MATCHER_IPV_UNSET;
+ u8 inner_ipv_ip = MLX5HWS_MATCHER_IPV_UNSET;
+ u8 inner_ipv, ver;
+
+ if (matcher->matches_inner_ethertype) {
+ ver = MLX5_GET(fte_match_param, match_param,
+ inner_headers.ethertype);
+ inner_ipv_ether = hws_rule_ethertype_to_matcher_ipv(ver);
+ }
+ if (matcher->matches_inner_ip_version) {
+ ver = MLX5_GET(fte_match_param, match_param,
+ inner_headers.ip_version);
+ inner_ipv_ip = hws_rule_ip_version_to_matcher_ipv(ver);
+ }
+
+ if (inner_ipv_ether != MLX5HWS_MATCHER_IPV_UNSET &&
+ inner_ipv_ip != MLX5HWS_MATCHER_IPV_UNSET &&
+ inner_ipv_ether != inner_ipv_ip) {
+ mlx5hws_err(ctx, "Rule matches on inconsistent inner ethertype and ip version\n");
+ return -EINVAL;
+ }
+
+ inner_ipv = inner_ipv_ether != MLX5HWS_MATCHER_IPV_UNSET ?
+ inner_ipv_ether : inner_ipv_ip;
+ if (inner_ipv != MLX5HWS_MATCHER_IPV_UNSET &&
+ matcher->inner_ip_version != MLX5HWS_MATCHER_IPV_UNSET &&
+ inner_ipv != matcher->inner_ip_version) {
+ mlx5hws_err(ctx, "Matcher and rule disagree on inner IP version\n");
+ return -EINVAL;
+ }
+ matcher->inner_ip_version = inner_ipv;
+
+ return 0;
+}
+
+static int hws_rule_check_ip_version(struct mlx5hws_matcher *matcher,
+ u32 *match_param)
+{
+ int ret;
+
+ ret = hws_rule_check_outer_ip_version(matcher, match_param);
+ if (unlikely(ret))
+ return ret;
+
+ ret = hws_rule_check_inner_ip_version(matcher, match_param);
+ if (unlikely(ret))
+ return ret;
+
+ return 0;
+}
+
int mlx5hws_rule_create(struct mlx5hws_matcher *matcher,
u8 mt_idx,
u32 *match_param,
@@ -721,6 +783,10 @@ int mlx5hws_rule_create(struct mlx5hws_matcher *matcher,
{
int ret;
+ ret = hws_rule_check_ip_version(matcher, match_param);
+ if (unlikely(ret))
+ return ret;
+
rule_handle->matcher = matcher;
ret = hws_rule_enqueue_precheck_create(rule_handle, attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h
index 495cdd17e9f3..d0f082b8dbf5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h
@@ -15,6 +15,8 @@ enum mlx5hws_rule_status {
MLX5HWS_RULE_STATUS_UNKNOWN,
MLX5HWS_RULE_STATUS_CREATING,
MLX5HWS_RULE_STATUS_CREATED,
+ MLX5HWS_RULE_STATUS_UPDATING,
+ MLX5HWS_RULE_STATUS_UPDATED,
MLX5HWS_RULE_STATUS_DELETING,
MLX5HWS_RULE_STATUS_DELETED,
MLX5HWS_RULE_STATUS_FAILING,
@@ -42,12 +44,10 @@ struct mlx5hws_rule_match_tag {
};
struct mlx5hws_rule_resize_info {
- struct mlx5hws_pool *action_ste_pool[2];
u32 rtc_0;
u32 rtc_1;
u32 rule_idx;
u8 state;
- u8 max_stes;
u8 ctrl_seg[MLX5HWS_WQE_SZ_GTA_CTRL]; /* Ctrl segment of STE: 48 bytes */
u8 data_seg[MLX5HWS_WQE_SZ_GTA_DATA]; /* Data segment of STE: 64 bytes */
};
@@ -58,18 +58,21 @@ struct mlx5hws_rule {
struct mlx5hws_rule_match_tag tag;
struct mlx5hws_rule_resize_info *resize_info;
};
+ struct mlx5hws_action_ste_chunk action_ste;
+ struct mlx5hws_action_ste_chunk old_action_ste;
u32 rtc_0; /* The RTC into which the STE was inserted */
u32 rtc_1; /* The RTC into which the STE was inserted */
- int action_ste_idx; /* STE array index */
u8 status; /* enum mlx5hws_rule_status */
- u8 action_ste_selector; /* For rule update - which action STE is in use */
u8 pending_wqes;
bool skip_delete; /* For complex rules - another rule with same tag
* still exists, so don't actually delete this rule.
*/
};
-void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule);
+void mlx5hws_rule_skip(struct mlx5hws_matcher *matcher, u32 flow_source,
+ bool *skip_rx, bool *skip_tx);
+
+void mlx5hws_rule_free_action_ste(struct mlx5hws_action_ste_chunk *action_ste);
int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule,
void *queue, void *user_data);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
index 6d443e6ee8d9..7510c46e58a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#include "mlx5hws_internal.h"
+#include "internal.h"
#include "lib/clock.h"
enum { CQ_OK = 0, CQ_EMPTY = -1, CQ_POLL_ERR = -2 };
@@ -344,18 +344,133 @@ hws_send_engine_update_rule_resize(struct mlx5hws_send_engine *queue,
}
}
+static void hws_send_engine_dump_error_cqe(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_priv *priv,
+ struct mlx5_cqe64 *cqe)
+{
+ u8 wqe_opcode = cqe ? be32_to_cpu(cqe->sop_drop_qpn) >> 24 : 0;
+ struct mlx5hws_context *ctx = priv->rule->matcher->tbl->ctx;
+ u32 opcode = cqe ? get_cqe_opcode(cqe) : 0;
+ struct mlx5hws_rule *rule = priv->rule;
+
+ /* If something bad happens and lots of rules are failing, we don't
+ * want to pollute dmesg. Print only the first bad cqe per engine,
+ * the one that started the avalanche.
+ */
+ if (queue->error_cqe_printed)
+ return;
+
+ queue->error_cqe_printed = true;
+
+ if (mlx5hws_rule_move_in_progress(rule))
+ mlx5hws_err(ctx,
+ "--- rule 0x%08llx: error completion moving rule: phase %s, wqes left %d\n",
+ HWS_PTR_TO_ID(rule),
+ rule->resize_info->state ==
+ MLX5HWS_RULE_RESIZE_STATE_WRITING ? "WRITING" :
+ rule->resize_info->state ==
+ MLX5HWS_RULE_RESIZE_STATE_DELETING ? "DELETING" :
+ "UNKNOWN",
+ rule->pending_wqes);
+ else
+ mlx5hws_err(ctx,
+ "--- rule 0x%08llx: error completion %s (%d), wqes left %d\n",
+ HWS_PTR_TO_ID(rule),
+ rule->status ==
+ MLX5HWS_RULE_STATUS_CREATING ? "CREATING" :
+ rule->status ==
+ MLX5HWS_RULE_STATUS_DELETING ? "DELETING" :
+ rule->status ==
+ MLX5HWS_RULE_STATUS_FAILING ? "FAILING" :
+ rule->status ==
+ MLX5HWS_RULE_STATUS_UPDATING ? "UPDATING" : "NA",
+ rule->status,
+ rule->pending_wqes);
+
+ mlx5hws_err(ctx, " rule 0x%08llx: matcher 0x%llx %s\n",
+ HWS_PTR_TO_ID(rule),
+ HWS_PTR_TO_ID(rule->matcher),
+ (rule->matcher->flags & MLX5HWS_MATCHER_FLAGS_ISOLATED) ?
+ "(isolated)" : "");
+
+ if (!cqe) {
+ mlx5hws_err(ctx, " rule 0x%08llx: no CQE\n",
+ HWS_PTR_TO_ID(rule));
+ return;
+ }
+
+ mlx5hws_err(ctx, " rule 0x%08llx: cqe->opcode = %d %s\n",
+ HWS_PTR_TO_ID(rule), opcode,
+ opcode == MLX5_CQE_REQ ? "(MLX5_CQE_REQ)" :
+ opcode == MLX5_CQE_REQ_ERR ? "(MLX5_CQE_REQ_ERR)" : " ");
+
+ if (opcode == MLX5_CQE_REQ_ERR) {
+ struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe;
+
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |--- hw_error_syndrome = 0x%x\n",
+ HWS_PTR_TO_ID(rule),
+ err_cqe->rsvd1[16]);
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |--- hw_syndrome_type = 0x%x\n",
+ HWS_PTR_TO_ID(rule),
+ err_cqe->rsvd1[17] >> 4);
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |--- vendor_err_synd = 0x%x\n",
+ HWS_PTR_TO_ID(rule),
+ err_cqe->vendor_err_synd);
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |--- syndrome = 0x%x\n",
+ HWS_PTR_TO_ID(rule),
+ err_cqe->syndrome);
+ }
+
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: cqe->byte_cnt = 0x%08x\n",
+ HWS_PTR_TO_ID(rule), be32_to_cpu(cqe->byte_cnt));
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |-- UPDATE STATUS = %s\n",
+ HWS_PTR_TO_ID(rule),
+ (be32_to_cpu(cqe->byte_cnt) & 0x80000000) ?
+ "FAILURE" : "SUCCESS");
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |------- SYNDROME = %s\n",
+ HWS_PTR_TO_ID(rule),
+ ((be32_to_cpu(cqe->byte_cnt) & 0x00000003) == 1) ?
+ "SET_FLOW_FAIL" :
+ ((be32_to_cpu(cqe->byte_cnt) & 0x00000003) == 2) ?
+ "DISABLE_FLOW_FAIL" : "UNKNOWN");
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: cqe->sop_drop_qpn = 0x%08x\n",
+ HWS_PTR_TO_ID(rule), be32_to_cpu(cqe->sop_drop_qpn));
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |-send wqe opcode = 0x%02x %s\n",
+ HWS_PTR_TO_ID(rule), wqe_opcode,
+ wqe_opcode == MLX5HWS_WQE_OPCODE_TBL_ACCESS ?
+ "(MLX5HWS_WQE_OPCODE_TBL_ACCESS)" : "(UNKNOWN)");
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |------------ qpn = 0x%06x\n",
+ HWS_PTR_TO_ID(rule),
+ be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff);
+}
+
static void hws_send_engine_update_rule(struct mlx5hws_send_engine *queue,
struct mlx5hws_send_ring_priv *priv,
u16 wqe_cnt,
- enum mlx5hws_flow_op_status *status)
+ enum mlx5hws_flow_op_status *status,
+ struct mlx5_cqe64 *cqe)
{
priv->rule->pending_wqes--;
- if (*status == MLX5HWS_FLOW_OP_ERROR) {
+ if (unlikely(*status == MLX5HWS_FLOW_OP_ERROR)) {
if (priv->retry_id) {
+ /* If there is a retry_id, then it's not an error yet,
+ * retry to insert this rule in the collision RTC.
+ */
hws_send_engine_retry_post_send(queue, priv, wqe_cnt);
return;
}
+ hws_send_engine_dump_error_cqe(queue, priv, cqe);
/* Some part of the rule failed */
priv->rule->status = MLX5HWS_RULE_STATUS_FAILING;
*priv->used_id = 0;
@@ -377,17 +492,25 @@ static void hws_send_engine_update_rule(struct mlx5hws_send_engine *queue,
*status = MLX5HWS_FLOW_OP_ERROR;
} else {
- /* Increase the status, this only works on good flow as the enum
- * is arrange it away creating -> created -> deleting -> deleted
+ /* Increase the status, this only works on good flow as
+ * the enum is arranged this way:
+ * - creating -> created
+ * - updating -> updated
+ * - deleting -> deleted
*/
priv->rule->status++;
*status = MLX5HWS_FLOW_OP_SUCCESS;
- /* Rule was deleted now we can safely release action STEs
- * and clear resize info
- */
if (priv->rule->status == MLX5HWS_RULE_STATUS_DELETED) {
- mlx5hws_rule_free_action_ste(priv->rule);
+ /* Rule was deleted, now we can safely release
+ * action STEs and clear resize info
+ */
+ mlx5hws_rule_free_action_ste(&priv->rule->action_ste);
mlx5hws_rule_clear_resize_info(priv->rule);
+ } else if (priv->rule->status == MLX5HWS_RULE_STATUS_UPDATED) {
+ /* Rule was updated, free the old action STEs */
+ mlx5hws_rule_free_action_ste(&priv->rule->old_action_ste);
+ /* Update completed - move the rule back to "created" */
+ priv->rule->status = MLX5HWS_RULE_STATUS_CREATED;
}
}
}
@@ -412,7 +535,8 @@ static void hws_send_engine_update(struct mlx5hws_send_engine *queue,
if (priv->user_data) {
if (priv->rule) {
- hws_send_engine_update_rule(queue, priv, wqe_cnt, &status);
+ hws_send_engine_update_rule(queue, priv, wqe_cnt,
+ &status, cqe);
/* Completion is provided on the last rule WQE */
if (priv->rule->pending_wqes)
return;
@@ -566,7 +690,7 @@ static int hws_send_ring_alloc_sq(struct mlx5_core_dev *mdev,
size_t buf_sz;
int err;
- sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+ sq->uar_map = mdev->priv.bfreg.map;
sq->mdev = mdev;
param.db_numa_node = numa_node;
@@ -633,13 +757,14 @@ static int hws_send_ring_create_sq(struct mlx5_core_dev *mdev, u32 pdn,
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+ MLX5_SET(sqc, sqc, non_wire, 1);
ts_format = mlx5_is_real_time_sq(mdev) ? MLX5_TIMESTAMP_FORMAT_REAL_TIME :
MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
MLX5_SET(sqc, sqc, ts_format, ts_format);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
- MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
+ MLX5_SET(wq, wq, uar_page, mdev->priv.bfreg.index);
MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
@@ -748,12 +873,6 @@ err_free_sqc:
return err;
}
-static void hws_cq_complete(struct mlx5_core_cq *mcq,
- struct mlx5_eqe *eqe)
-{
- pr_err("CQ completion CQ: #%u\n", mcq->cqn);
-}
-
static int hws_send_ring_alloc_cq(struct mlx5_core_dev *mdev,
int numa_node,
struct mlx5hws_send_engine *queue,
@@ -776,7 +895,6 @@ static int hws_send_ring_alloc_cq(struct mlx5_core_dev *mdev,
mcq->cqe_sz = 64;
mcq->set_ci_db = cq->wq_ctrl.db.db;
mcq->arm_db = cq->wq_ctrl.db.db + 1;
- mcq->comp = hws_cq_complete;
for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
@@ -815,7 +933,7 @@ static int hws_send_ring_create_cq(struct mlx5_core_dev *mdev,
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
- MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.bfreg.up->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
@@ -838,8 +956,7 @@ static int hws_send_ring_open_cq(struct mlx5_core_dev *mdev,
if (!cqc_data)
return -ENOMEM;
- MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
- MLX5_SET(cqc, cqc_data, cqe_sz, queue->num_entries);
+ MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.bfreg.up->index);
MLX5_SET(cqc, cqc_data, log_cq_size, ilog2(queue->num_entries));
err = hws_send_ring_alloc_cq(mdev, numa_node, queue, cqc_data, cq);
@@ -896,15 +1013,18 @@ close_cq:
return err;
}
-void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue)
+static void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue)
{
+ if (!queue->num_entries)
+ return; /* this queue wasn't initialized */
+
hws_send_ring_close(queue);
kfree(queue->completed.entries);
}
-int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
- struct mlx5hws_send_engine *queue,
- u16 queue_size)
+static int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue,
+ u16 queue_size)
{
int err;
@@ -990,6 +1110,7 @@ static int hws_bwc_send_queues_init(struct mlx5hws_context *ctx)
for (i = 0; i < bwc_queues; i++) {
mutex_init(&ctx->bwc_send_queue_locks[i]);
lockdep_register_key(ctx->bwc_lock_class_keys + i);
+ lockdep_set_class(ctx->bwc_send_queue_locks + i, ctx->bwc_lock_class_keys + i);
}
return 0;
@@ -1004,7 +1125,7 @@ int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
u16 queue_size)
{
int err = 0;
- u32 i;
+ int i = 0;
/* Open one extra queue for control path */
ctx->queues = queues + 1;
@@ -1020,7 +1141,13 @@ int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
goto free_bwc_locks;
}
- for (i = 0; i < ctx->queues; i++) {
+ /* If native API isn't supported, skip the unused native queues:
+ * initialize BWC queues and control queue only.
+ */
+ if (!mlx5hws_context_native_supported(ctx))
+ i = mlx5hws_bwc_get_queue_id(ctx, 0);
+
+ for (; i < ctx->queues; i++) {
err = mlx5hws_send_queue_open(ctx, &ctx->send_queue[i], queue_size);
if (err)
goto close_send_queues;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h
index b50825d6dc53..3fb8e99309b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h
@@ -140,6 +140,7 @@ struct mlx5hws_send_engine {
u16 used_entries;
u16 num_entries;
bool err;
+ bool error_cqe_printed;
struct mutex lock; /* Protects the send engine */
};
@@ -189,12 +190,6 @@ void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue);
void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue);
-void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue);
-
-int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
- struct mlx5hws_send_engine *queue,
- u16 queue_size);
-
void mlx5hws_send_queues_close(struct mlx5hws_context *ctx);
int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
index 8c063a8d87d7..6113383ae47b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#include "mlx5hws_internal.h"
+#include "internal.h"
u32 mlx5hws_table_get_id(struct mlx5hws_table *tbl)
{
@@ -9,6 +9,7 @@ u32 mlx5hws_table_get_id(struct mlx5hws_table *tbl)
}
static void hws_table_init_next_ft_attr(struct mlx5hws_table *tbl,
+ u16 uid,
struct mlx5hws_cmd_ft_create_attr *ft_attr)
{
ft_attr->type = tbl->fw_ft_type;
@@ -16,7 +17,9 @@ static void hws_table_init_next_ft_attr(struct mlx5hws_table *tbl,
ft_attr->level = tbl->ctx->caps->fdb_ft.max_level - 1;
else
ft_attr->level = tbl->ctx->caps->nic_ft.max_level - 1;
+
ft_attr->rtc_valid = true;
+ ft_attr->uid = uid;
}
static void hws_table_set_cap_attr(struct mlx5hws_table *tbl,
@@ -37,6 +40,7 @@ static void hws_table_set_cap_attr(struct mlx5hws_table *tbl,
}
static int hws_table_up_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
+__must_hold(&tbl->ctx->ctrl_lock)
{
struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
@@ -48,8 +52,8 @@ static int hws_table_up_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
if (tbl->type != MLX5HWS_TABLE_TYPE_FDB)
return 0;
- if (ctx->common_res[tbl_type].default_miss) {
- ctx->common_res[tbl_type].default_miss->refcount++;
+ if (ctx->common_res.default_miss) {
+ ctx->common_res.default_miss->refcount++;
return 0;
}
@@ -70,29 +74,28 @@ static int hws_table_up_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
return -EINVAL;
}
- /* ctx->ctrl_lock must be held here */
- ctx->common_res[tbl_type].default_miss = default_miss;
- ctx->common_res[tbl_type].default_miss->refcount++;
+ ctx->common_res.default_miss = default_miss;
+ ctx->common_res.default_miss->refcount++;
return 0;
}
/* Called under ctx->ctrl_lock */
static void hws_table_down_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
+__must_hold(&tbl->ctx->ctrl_lock)
{
struct mlx5hws_cmd_forward_tbl *default_miss;
struct mlx5hws_context *ctx = tbl->ctx;
- u8 tbl_type = tbl->type;
if (tbl->type != MLX5HWS_TABLE_TYPE_FDB)
return;
- default_miss = ctx->common_res[tbl_type].default_miss;
+ default_miss = ctx->common_res.default_miss;
if (--default_miss->refcount)
return;
mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, default_miss);
- ctx->common_res[tbl_type].default_miss = NULL;
+ ctx->common_res.default_miss = NULL;
}
static int hws_table_connect_to_default_miss_tbl(struct mlx5hws_table *tbl, u32 ft_id)
@@ -119,12 +122,12 @@ static int hws_table_connect_to_default_miss_tbl(struct mlx5hws_table *tbl, u32
int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev,
struct mlx5hws_table *tbl,
- u32 *ft_id)
+ u16 uid, u32 *ft_id)
{
struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
int ret;
- hws_table_init_next_ft_attr(tbl, &ft_attr);
+ hws_table_init_next_ft_attr(tbl, uid, &ft_attr);
hws_table_set_cap_attr(tbl, &ft_attr);
ret = mlx5hws_cmd_flow_table_create(mdev, &ft_attr, ft_id);
@@ -189,7 +192,10 @@ static int hws_table_init(struct mlx5hws_table *tbl)
}
mutex_lock(&ctx->ctrl_lock);
- ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &tbl->ft_id);
+ ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev,
+ tbl,
+ tbl->uid,
+ &tbl->ft_id);
if (ret) {
mlx5hws_err(tbl->ctx, "Failed to create flow table object\n");
mutex_unlock(&ctx->ctrl_lock);
@@ -239,6 +245,7 @@ struct mlx5hws_table *mlx5hws_table_create(struct mlx5hws_context *ctx,
tbl->ctx = ctx;
tbl->type = attr->type;
tbl->level = attr->level;
+ tbl->uid = attr->uid;
ret = hws_table_init(tbl);
if (ret) {
@@ -342,10 +349,10 @@ int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx,
return mlx5hws_cmd_flow_table_modify(ctx->mdev, &ft_attr, ft_id);
}
-static int hws_table_ft_set_next_ft(struct mlx5hws_context *ctx,
- u32 ft_id,
- u32 fw_ft_type,
- u32 next_ft_id)
+int mlx5hws_table_ft_set_next_ft(struct mlx5hws_context *ctx,
+ u32 ft_id,
+ u32 fw_ft_type,
+ u32 next_ft_id)
{
struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
@@ -389,10 +396,10 @@ int mlx5hws_table_connect_to_miss_table(struct mlx5hws_table *src_tbl,
if (dst_tbl) {
if (list_empty(&dst_tbl->matchers_list)) {
/* Connect src_tbl last_ft to dst_tbl start anchor */
- ret = hws_table_ft_set_next_ft(src_tbl->ctx,
- last_ft_id,
- src_tbl->fw_ft_type,
- dst_tbl->ft_id);
+ ret = mlx5hws_table_ft_set_next_ft(src_tbl->ctx,
+ last_ft_id,
+ src_tbl->fw_ft_type,
+ dst_tbl->ft_id);
if (ret)
return ret;
@@ -478,15 +485,9 @@ int mlx5hws_table_set_default_miss(struct mlx5hws_table *tbl,
if (old_miss_tbl)
list_del_init(&tbl->default_miss.next);
- old_miss_tbl = tbl->default_miss.miss_tbl;
- if (old_miss_tbl)
- list_del_init(&old_miss_tbl->default_miss.head);
-
if (miss_tbl)
list_add(&tbl->default_miss.next, &miss_tbl->default_miss.head);
- mutex_unlock(&ctx->ctrl_lock);
- return 0;
out:
mutex_unlock(&ctx->ctrl_lock);
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h
index dd50420eec9e..1246f9bd8422 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h
@@ -18,6 +18,7 @@ struct mlx5hws_table {
enum mlx5hws_table_type type;
u32 fw_ft_type;
u32 level;
+ u16 uid;
struct list_head matchers_list;
struct list_head tbl_list_node;
struct mlx5hws_default_miss default_miss;
@@ -47,7 +48,7 @@ u32 mlx5hws_table_get_res_fw_ft_type(enum mlx5hws_table_type tbl_type,
int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev,
struct mlx5hws_table *tbl,
- u32 *ft_id);
+ u16 uid, u32 *ft_id);
void mlx5hws_table_destroy_default_ft(struct mlx5hws_table *tbl,
u32 ft_id);
@@ -65,4 +66,9 @@ int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx,
u32 rtc_0_id,
u32 rtc_1_id);
+int mlx5hws_table_ft_set_next_ft(struct mlx5hws_context *ctx,
+ u32 ft_id,
+ u32 fw_ft_type,
+ u32 next_ft_id);
+
#endif /* MLX5HWS_TABLE_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.c
index faf42421c43f..d8e382b9fa61 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#include "mlx5hws_internal.h"
+#include "internal.h"
int mlx5hws_vport_init_vports(struct mlx5hws_context *ctx)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.h
index 0912fc166b3a..0912fc166b3a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.h
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c
index 2ebb61ef3ea9..2ebb61ef3ea9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c
index 01ed6442095d..01ed6442095d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_arg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_buddy.c
index fe228d948b47..fe228d948b47 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_buddy.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c
index baefb9a3fa05..1ebb2b15c080 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019 Mellanox Technologies. */
#include "dr_types.h"
+#include "eswitch.h"
int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
bool other_vport,
@@ -34,34 +35,21 @@ int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport,
u16 vport_number, u16 *gvmi)
{
- bool ec_vf_func = other_vport ? mlx5_core_is_ec_vf_vport(mdev, vport_number) : false;
- u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
- int out_size;
- void *out;
int err;
- out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- out = kzalloc(out_size, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
-
- MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
- MLX5_SET(query_hca_cap_in, in, other_function, other_vport);
- MLX5_SET(query_hca_cap_in, in, function_id, mlx5_vport_to_func_id(mdev, vport_number, ec_vf_func));
- MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func);
- MLX5_SET(query_hca_cap_in, in, op_mod,
- MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
- HCA_CAP_OPMOD_GET_CUR);
+ if (!other_vport) {
+ /* self vhca_id */
+ *gvmi = MLX5_CAP_GEN(mdev, vhca_id);
+ return 0;
+ }
- err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
+ err = mlx5_vport_get_vhca_id(mdev, vport_number, gvmi);
if (err) {
- kfree(out);
+ mlx5_core_err(mdev, "Failed to get vport vhca id for vport %d\n",
+ vport_number);
return err;
}
- *gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
-
- kfree(out);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
index 030a5776c937..030a5776c937 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.h
index 57c6b363b870..57c6b363b870 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.h
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_definer.c
index d5ea97751945..d5ea97751945 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_definer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_definer.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
index 3d74109f8230..e8c67ed9f748 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
@@ -8,7 +8,7 @@
#define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
((dmn)->info.caps.dmn_type##_sw_owner || \
((dmn)->info.caps.dmn_type##_sw_owner_v2 && \
- (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_7))
+ (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_8))
bool mlx5dr_domain_is_support_ptrn_arg(struct mlx5dr_domain *dmn)
{
@@ -297,7 +297,9 @@ dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
if (ret) {
mlx5dr_dbg(dmn, "Couldn't insert new vport into xarray (%d)\n", ret);
kvfree(vport_caps);
- return ERR_PTR(ret);
+ if (ret == -EBUSY)
+ return ERR_PTR(-EBUSY);
+ return NULL;
}
return vport_caps;
@@ -408,7 +410,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
switch (dmn->type) {
case MLX5DR_DOMAIN_TYPE_NIC_RX:
if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, rx))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
dmn->info.supp_sw_steering = true;
dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
@@ -417,7 +419,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
break;
case MLX5DR_DOMAIN_TYPE_NIC_TX:
if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, tx))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
dmn->info.supp_sw_steering = true;
dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
@@ -426,10 +428,10 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
break;
case MLX5DR_DOMAIN_TYPE_FDB:
if (!dmn->info.caps.eswitch_manager)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
@@ -514,30 +516,6 @@ def_xa_destroy:
return NULL;
}
-/* Assure synchronization of the device steering tables with updates made by SW
- * insertion.
- */
-int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
-{
- int ret = 0;
-
- if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) {
- mlx5dr_domain_lock(dmn);
- ret = mlx5dr_send_ring_force_drain(dmn);
- mlx5dr_domain_unlock(dmn);
- if (ret) {
- mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n",
- flags, ret);
- return ret;
- }
- }
-
- if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)
- ret = mlx5dr_cmd_sync_steering(dmn->mdev);
-
- return ret;
-}
-
int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
{
if (WARN_ON_ONCE(refcount_read(&dmn->refcount) > 1))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_fw.c
index f05ef0cd54ba..f05ef0cd54ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_fw.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_icm_pool.c
index 0b5af9f3f605..0b5af9f3f605 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_icm_pool.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_matcher.c
index 0726848eb3ff..0726848eb3ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_matcher.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ptrn.c
index 8ca534ef5d03..8ca534ef5d03 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ptrn.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_rule.c
index d1db04baa1fa..d1db04baa1fa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_rule.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
index 6fa06ba2d346..d034372fa047 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
@@ -1049,12 +1049,6 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
return 0;
}
-static void dr_cq_complete(struct mlx5_core_cq *mcq,
- struct mlx5_eqe *eqe)
-{
- pr_err("CQ completion CQ: #%u\n", mcq->cqn);
-}
-
static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
struct mlx5_uars_page *uar,
size_t ncqe)
@@ -1067,7 +1061,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
int inlen, err, eqn;
void *cqc, *in;
__be64 *pas;
- int vector;
u32 i;
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@@ -1090,14 +1083,20 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
cqe->op_own = MLX5_CQE_INVALID << 4 | MLX5_CQE_OWNER_MASK;
}
+ cq->mcq.cqe_sz = 64;
+ cq->mcq.set_ci_db = cq->wq_ctrl.db.db;
+ cq->mcq.arm_db = cq->wq_ctrl.db.db + 1;
+ *cq->mcq.set_ci_db = 0;
+ cq->mcq.vector = 0;
+ cq->mdev = mdev;
+
inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
sizeof(u64) * cq->wq_ctrl.buf.npages;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
goto err_cqwq;
- vector = raw_smp_processor_id() % mlx5_comp_vectors_max(mdev);
- err = mlx5_comp_eqn_get(mdev, vector, &eqn);
+ err = mlx5_comp_eqn_get(mdev, 0, &eqn);
if (err) {
kvfree(in);
goto err_cqwq;
@@ -1114,28 +1113,12 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas);
- cq->mcq.comp = dr_cq_complete;
-
err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
kvfree(in);
if (err)
goto err_cqwq;
- cq->mcq.cqe_sz = 64;
- cq->mcq.set_ci_db = cq->wq_ctrl.db.db;
- cq->mcq.arm_db = cq->wq_ctrl.db.db + 1;
- *cq->mcq.set_ci_db = 0;
-
- /* set no-zero value, in order to avoid the HW to run db-recovery on
- * CQ that used in polling mode.
- */
- *cq->mcq.arm_db = cpu_to_be32(2 << 28);
-
- cq->mcq.vector = 0;
- cq->mcq.uar = uar;
- cq->mdev = mdev;
-
return cq;
err_cqwq:
@@ -1333,36 +1316,3 @@ void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
kfree(send_ring->sync_buff);
kfree(send_ring);
}
-
-int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn)
-{
- struct mlx5dr_send_ring *send_ring = dmn->send_ring;
- struct postsend_info send_info = {};
- u8 data[DR_STE_SIZE];
- int num_of_sends_req;
- int ret;
- int i;
-
- /* Sending this amount of requests makes sure we will get drain */
- num_of_sends_req = send_ring->signal_th * TH_NUMS_TO_DRAIN / 2;
-
- /* Send fake requests forcing the last to be signaled */
- send_info.write.addr = (uintptr_t)data;
- send_info.write.length = DR_STE_SIZE;
- send_info.write.lkey = 0;
- /* Using the sync_mr in order to write/read */
- send_info.remote_addr = (uintptr_t)send_ring->sync_mr->addr;
- send_info.rkey = send_ring->sync_mr->mkey;
-
- for (i = 0; i < num_of_sends_req; i++) {
- ret = dr_postsend_icm_data(dmn, &send_info);
- if (ret)
- return ret;
- }
-
- spin_lock(&send_ring->lock);
- ret = dr_handle_pending_wc(dmn, send_ring);
- spin_unlock(&send_ring->lock);
-
- return ret;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c
index e94fbb015efa..c8b8ff80c7c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c
@@ -555,7 +555,7 @@ void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
{
- ste_ctx->set_actions_tx(dmn, action_type_set, ste_ctx->actions_caps,
+ ste_ctx->set_actions_tx(ste_ctx, dmn, action_type_set, ste_ctx->actions_caps,
hw_ste_arr, attr, added_stes);
}
@@ -566,7 +566,7 @@ void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
{
- ste_ctx->set_actions_rx(dmn, action_type_set, ste_ctx->actions_caps,
+ ste_ctx->set_actions_rx(ste_ctx, dmn, action_type_set, ste_ctx->actions_caps,
hw_ste_arr, attr, added_stes);
}
@@ -1458,6 +1458,8 @@ struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
return mlx5dr_ste_get_ctx_v1();
else if (version == MLX5_STEERING_FORMAT_CONNECTX_7)
return mlx5dr_ste_get_ctx_v2();
+ else if (version == MLX5_STEERING_FORMAT_CONNECTX_8)
+ return mlx5dr_ste_get_ctx_v3();
return NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
index 54a6619c3ecb..3d5afc832fa5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
@@ -160,13 +160,15 @@ struct mlx5dr_ste_ctx {
/* Actions */
u32 actions_caps;
- void (*set_actions_rx)(struct mlx5dr_domain *dmn,
+ void (*set_actions_rx)(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *hw_ste_arr,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes);
- void (*set_actions_tx)(struct mlx5dr_domain *dmn,
+ void (*set_actions_tx)(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *hw_ste_arr,
@@ -197,7 +199,21 @@ struct mlx5dr_ste_ctx {
u16 *used_hw_action_num);
int (*alloc_modify_hdr_chunk)(struct mlx5dr_action *action);
void (*dealloc_modify_hdr_chunk)(struct mlx5dr_action *action);
-
+ /* Actions bit set */
+ void (*set_encap)(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id, int size);
+ void (*set_push_vlan)(u8 *ste, u8 *d_action,
+ u32 vlan_hdr);
+ void (*set_pop_vlan)(u8 *hw_ste_p, u8 *s_action,
+ u8 vlans_num);
+ void (*set_rx_decap)(u8 *hw_ste_p, u8 *s_action);
+ void (*set_encap_l3)(u8 *hw_ste_p, u8 *frst_s_action,
+ u8 *scnd_d_action, u32 reformat_id,
+ int size);
+ void (*set_insert_hdr)(u8 *hw_ste_p, u8 *d_action, u32 reformat_id,
+ u8 anchor, u8 offset, int size);
+ void (*set_remove_hdr)(u8 *hw_ste_p, u8 *s_action, u8 anchor,
+ u8 offset, int size);
/* Send */
void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size);
};
@@ -205,5 +221,6 @@ struct mlx5dr_ste_ctx {
struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v0(void);
struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void);
struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void);
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v3(void);
#endif /* _DR_STE_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c
index e9f6c7ed7a7b..42536bee55e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c
@@ -406,7 +406,8 @@ static void dr_ste_v0_arr_init_next(u8 **last_ste,
}
static void
-dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
+dr_ste_v0_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *last_ste,
@@ -476,7 +477,8 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
}
static void
-dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
+dr_ste_v0_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *last_ste,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
index 1d49704b9542..6447efbae00d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
@@ -5,136 +5,6 @@
#include "mlx5_ifc_dr_ste_v1.h"
#include "dr_ste_v1.h"
-#define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
- ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
- DR_STE_V1_LU_TYPE_##lookup_type##_O)
-
-enum dr_ste_v1_entry_format {
- DR_STE_V1_TYPE_BWC_BYTE = 0x0,
- DR_STE_V1_TYPE_BWC_DW = 0x1,
- DR_STE_V1_TYPE_MATCH = 0x2,
- DR_STE_V1_TYPE_MATCH_RANGES = 0x7,
-};
-
-/* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */
-enum {
- DR_STE_V1_LU_TYPE_NOP = 0x0000,
- DR_STE_V1_LU_TYPE_ETHL2_TNL = 0x0002,
- DR_STE_V1_LU_TYPE_IBL3_EXT = 0x0102,
- DR_STE_V1_LU_TYPE_ETHL2_O = 0x0003,
- DR_STE_V1_LU_TYPE_IBL4 = 0x0103,
- DR_STE_V1_LU_TYPE_ETHL2_I = 0x0004,
- DR_STE_V1_LU_TYPE_SRC_QP_GVMI = 0x0104,
- DR_STE_V1_LU_TYPE_ETHL2_SRC_O = 0x0005,
- DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O = 0x0105,
- DR_STE_V1_LU_TYPE_ETHL2_SRC_I = 0x0006,
- DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I = 0x0106,
- DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x0007,
- DR_STE_V1_LU_TYPE_IPV6_DES_O = 0x0107,
- DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x0008,
- DR_STE_V1_LU_TYPE_IPV6_DES_I = 0x0108,
- DR_STE_V1_LU_TYPE_ETHL4_O = 0x0009,
- DR_STE_V1_LU_TYPE_IPV6_SRC_O = 0x0109,
- DR_STE_V1_LU_TYPE_ETHL4_I = 0x000a,
- DR_STE_V1_LU_TYPE_IPV6_SRC_I = 0x010a,
- DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O = 0x000b,
- DR_STE_V1_LU_TYPE_MPLS_O = 0x010b,
- DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I = 0x000c,
- DR_STE_V1_LU_TYPE_MPLS_I = 0x010c,
- DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O = 0x000d,
- DR_STE_V1_LU_TYPE_GRE = 0x010d,
- DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x000e,
- DR_STE_V1_LU_TYPE_GENERAL_PURPOSE = 0x010e,
- DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f,
- DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f,
- DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110,
- DR_STE_V1_LU_TYPE_FLEX_PARSER_OK = 0x0011,
- DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111,
- DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112,
- DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113,
- DR_STE_V1_LU_TYPE_ETHL4_MISC_I = 0x0114,
- DR_STE_V1_LU_TYPE_INVALID = 0x00ff,
- DR_STE_V1_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
-};
-
-enum dr_ste_v1_header_anchors {
- DR_STE_HEADER_ANCHOR_START_OUTER = 0x00,
- DR_STE_HEADER_ANCHOR_1ST_VLAN = 0x02,
- DR_STE_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
- DR_STE_HEADER_ANCHOR_INNER_MAC = 0x13,
- DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
-};
-
-enum dr_ste_v1_action_size {
- DR_STE_ACTION_SINGLE_SZ = 4,
- DR_STE_ACTION_DOUBLE_SZ = 8,
- DR_STE_ACTION_TRIPLE_SZ = 12,
-};
-
-enum dr_ste_v1_action_insert_ptr_attr {
- DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0, /* Regular push header (e.g. push vlan) */
- DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */
- DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2, /* IPsec */
-};
-
-enum dr_ste_v1_action_id {
- DR_STE_V1_ACTION_ID_NOP = 0x00,
- DR_STE_V1_ACTION_ID_COPY = 0x05,
- DR_STE_V1_ACTION_ID_SET = 0x06,
- DR_STE_V1_ACTION_ID_ADD = 0x07,
- DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE = 0x08,
- DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER = 0x09,
- DR_STE_V1_ACTION_ID_INSERT_INLINE = 0x0a,
- DR_STE_V1_ACTION_ID_INSERT_POINTER = 0x0b,
- DR_STE_V1_ACTION_ID_FLOW_TAG = 0x0c,
- DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d,
- DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e,
- DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f,
- DR_STE_V1_ACTION_ID_ASO = 0x12,
- DR_STE_V1_ACTION_ID_TRAILER = 0x13,
- DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14,
- DR_STE_V1_ACTION_ID_MAX = 0x21,
- /* use for special cases */
- DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3 = 0x22,
-};
-
-enum {
- DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
- DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
- DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
- DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
- DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
- DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
- DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
- DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
- DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
- DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
- DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
- DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
- DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
- DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
- DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
- DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0 = 0x8c,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1 = 0x8d,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0 = 0x8e,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1 = 0x8f,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0 = 0x90,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91,
-};
-
-enum dr_ste_v1_aso_ctx_type {
- DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2,
-};
-
static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = {
[MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
@@ -379,13 +249,12 @@ static void dr_ste_v1_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
MLX5_SET(ste_match_bwc_v1, hw_ste_p, counter_id, ctr_id);
}
-static void dr_ste_v1_set_reparse(u8 *hw_ste_p)
+void dr_ste_v1_set_reparse(u8 *hw_ste_p)
{
MLX5_SET(ste_match_bwc_v1, hw_ste_p, reparse, 1);
}
-static void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action,
- u32 reformat_id, int size)
+void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action, u32 reformat_id, int size)
{
MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, action_id,
DR_STE_V1_ACTION_ID_INSERT_POINTER);
@@ -397,10 +266,10 @@ static void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
- u32 reformat_id,
- u8 anchor, u8 offset,
- int size)
+void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id,
+ u8 anchor, u8 offset,
+ int size)
{
MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action,
action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER);
@@ -417,9 +286,9 @@ static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
- u8 anchor, u8 offset,
- int size)
+void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
+ u8 anchor, u8 offset,
+ int size)
{
MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
@@ -432,8 +301,7 @@ static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action,
- u32 vlan_hdr)
+void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action, u32 vlan_hdr)
{
MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
action_id, DR_STE_V1_ACTION_ID_INSERT_INLINE);
@@ -446,7 +314,7 @@ static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
+void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
{
MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
@@ -459,11 +327,8 @@ static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p,
- u8 *frst_s_action,
- u8 *scnd_d_action,
- u32 reformat_id,
- int size)
+void dr_ste_v1_set_encap_l3(u8 *hw_ste_p, u8 *frst_s_action, u8 *scnd_d_action,
+ u32 reformat_id, int size)
{
/* Remove L2 headers */
MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, action_id,
@@ -483,7 +348,7 @@ static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
+void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
{
MLX5_SET(ste_single_action_remove_header_v1, s_action, action_id,
DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
@@ -620,7 +485,8 @@ static void dr_ste_v1_arr_init_next_match_range(u8 **last_ste,
dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH_RANGES);
}
-void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
+void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *last_ste,
@@ -640,7 +506,7 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
+ ste_ctx->set_pop_vlan(last_ste, action, attr->vlans.count);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
@@ -677,8 +543,8 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_encap = true;
}
- dr_ste_v1_set_push_vlan(last_ste, action,
- attr->vlans.headers[i]);
+ ste_ctx->set_push_vlan(last_ste, action,
+ attr->vlans.headers[i]);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
}
@@ -691,9 +557,9 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_encap = true;
}
- dr_ste_v1_set_encap(last_ste, action,
- attr->reformat.id,
- attr->reformat.size);
+ ste_ctx->set_encap(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
} else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
@@ -706,10 +572,10 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
}
d_action = action + DR_STE_ACTION_SINGLE_SZ;
- dr_ste_v1_set_encap_l3(last_ste,
- action, d_action,
- attr->reformat.id,
- attr->reformat.size);
+ ste_ctx->set_encap_l3(last_ste,
+ action, d_action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_TRIPLE_SZ;
action += DR_STE_ACTION_TRIPLE_SZ;
} else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
@@ -718,11 +584,11 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_insert_hdr(last_ste, action,
- attr->reformat.id,
- attr->reformat.param_0,
- attr->reformat.param_1,
- attr->reformat.size);
+ ste_ctx->set_insert_hdr(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
} else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) {
@@ -731,10 +597,10 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_remove_hdr(last_ste, action,
- attr->reformat.param_0,
- attr->reformat.param_1,
- attr->reformat.size);
+ ste_ctx->set_remove_hdr(last_ste, action,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
}
@@ -776,7 +642,8 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
-void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
+void dr_ste_v1_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *last_ste,
@@ -799,7 +666,7 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
allow_modify_hdr = false;
allow_ctr = false;
} else if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2]) {
- dr_ste_v1_set_rx_decap(last_ste, action);
+ ste_ctx->set_rx_decap(last_ste, action);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
allow_modify_hdr = false;
@@ -827,7 +694,7 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
+ ste_ctx->set_pop_vlan(last_ste, action, attr->vlans.count);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
allow_ctr = false;
@@ -868,8 +735,8 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_push_vlan(last_ste, action,
- attr->vlans.headers[i]);
+ ste_ctx->set_push_vlan(last_ste, action,
+ attr->vlans.headers[i]);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
}
@@ -895,9 +762,9 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_encap(last_ste, action,
- attr->reformat.id,
- attr->reformat.size);
+ ste_ctx->set_encap(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
allow_modify_hdr = false;
@@ -912,10 +779,10 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
d_action = action + DR_STE_ACTION_SINGLE_SZ;
- dr_ste_v1_set_encap_l3(last_ste,
- action, d_action,
- attr->reformat.id,
- attr->reformat.size);
+ ste_ctx->set_encap_l3(last_ste,
+ action, d_action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_TRIPLE_SZ;
allow_modify_hdr = false;
} else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
@@ -925,11 +792,11 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_insert_hdr(last_ste, action,
- attr->reformat.id,
- attr->reformat.param_0,
- attr->reformat.param_1,
- attr->reformat.size);
+ ste_ctx->set_insert_hdr(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
allow_modify_hdr = false;
@@ -941,10 +808,10 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
allow_modify_hdr = true;
allow_ctr = true;
}
- dr_ste_v1_set_remove_hdr(last_ste, action,
- attr->reformat.param_0,
- attr->reformat.param_1,
- attr->reformat.size);
+ ste_ctx->set_remove_hdr(last_ste, action,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
}
@@ -1027,9 +894,6 @@ void dr_ste_v1_set_action_copy(u8 *d_action,
MLX5_SET(ste_double_action_copy_v1, d_action, source_right_shifter, src_shifter);
}
-#define DR_STE_DECAP_L3_ACTION_NUM 8
-#define DR_STE_L2_HDR_MAX_SZ 20
-
int dr_ste_v1_set_action_decap_l3_list(void *data,
u32 data_sz,
u8 *hw_action,
@@ -2330,7 +2194,14 @@ static struct mlx5dr_ste_ctx ste_ctx_v1 = {
.set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
.alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
.dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg,
-
+ /* Actions bit set */
+ .set_encap = &dr_ste_v1_set_encap,
+ .set_push_vlan = &dr_ste_v1_set_push_vlan,
+ .set_pop_vlan = &dr_ste_v1_set_pop_vlan,
+ .set_rx_decap = &dr_ste_v1_set_rx_decap,
+ .set_encap_l3 = &dr_ste_v1_set_encap_l3,
+ .set_insert_hdr = &dr_ste_v1_set_insert_hdr,
+ .set_remove_hdr = &dr_ste_v1_set_remove_hdr,
/* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
new file mode 100644
index 000000000000..591c20c95a6a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef _DR_STE_V1_
+#define _DR_STE_V1_
+
+#include "dr_types.h"
+#include "dr_ste.h"
+
+#define DR_STE_DECAP_L3_ACTION_NUM 8
+#define DR_STE_L2_HDR_MAX_SZ 20
+#define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
+ ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
+ DR_STE_V1_LU_TYPE_##lookup_type##_O)
+
+enum dr_ste_v1_entry_format {
+ DR_STE_V1_TYPE_BWC_BYTE = 0x0,
+ DR_STE_V1_TYPE_BWC_DW = 0x1,
+ DR_STE_V1_TYPE_MATCH = 0x2,
+ DR_STE_V1_TYPE_MATCH_RANGES = 0x7,
+};
+
+/* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */
+enum {
+ DR_STE_V1_LU_TYPE_NOP = 0x0000,
+ DR_STE_V1_LU_TYPE_ETHL2_TNL = 0x0002,
+ DR_STE_V1_LU_TYPE_IBL3_EXT = 0x0102,
+ DR_STE_V1_LU_TYPE_ETHL2_O = 0x0003,
+ DR_STE_V1_LU_TYPE_IBL4 = 0x0103,
+ DR_STE_V1_LU_TYPE_ETHL2_I = 0x0004,
+ DR_STE_V1_LU_TYPE_SRC_QP_GVMI = 0x0104,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_O = 0x0005,
+ DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O = 0x0105,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_I = 0x0006,
+ DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I = 0x0106,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x0007,
+ DR_STE_V1_LU_TYPE_IPV6_DES_O = 0x0107,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x0008,
+ DR_STE_V1_LU_TYPE_IPV6_DES_I = 0x0108,
+ DR_STE_V1_LU_TYPE_ETHL4_O = 0x0009,
+ DR_STE_V1_LU_TYPE_IPV6_SRC_O = 0x0109,
+ DR_STE_V1_LU_TYPE_ETHL4_I = 0x000a,
+ DR_STE_V1_LU_TYPE_IPV6_SRC_I = 0x010a,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O = 0x000b,
+ DR_STE_V1_LU_TYPE_MPLS_O = 0x010b,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I = 0x000c,
+ DR_STE_V1_LU_TYPE_MPLS_I = 0x010c,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O = 0x000d,
+ DR_STE_V1_LU_TYPE_GRE = 0x010d,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x000e,
+ DR_STE_V1_LU_TYPE_GENERAL_PURPOSE = 0x010e,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f,
+ DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f,
+ DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_OK = 0x0011,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112,
+ DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113,
+ DR_STE_V1_LU_TYPE_ETHL4_MISC_I = 0x0114,
+ DR_STE_V1_LU_TYPE_INVALID = 0x00ff,
+ DR_STE_V1_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
+};
+
+enum dr_ste_v1_header_anchors {
+ DR_STE_HEADER_ANCHOR_START_OUTER = 0x00,
+ DR_STE_HEADER_ANCHOR_1ST_VLAN = 0x02,
+ DR_STE_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
+ DR_STE_HEADER_ANCHOR_INNER_MAC = 0x13,
+ DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
+};
+
+enum dr_ste_v1_action_size {
+ DR_STE_ACTION_SINGLE_SZ = 4,
+ DR_STE_ACTION_DOUBLE_SZ = 8,
+ DR_STE_ACTION_TRIPLE_SZ = 12,
+};
+
+enum dr_ste_v1_action_insert_ptr_attr {
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0, /* Regular push header (e.g. push vlan) */
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2, /* IPsec */
+};
+
+enum dr_ste_v1_action_id {
+ DR_STE_V1_ACTION_ID_NOP = 0x00,
+ DR_STE_V1_ACTION_ID_COPY = 0x05,
+ DR_STE_V1_ACTION_ID_SET = 0x06,
+ DR_STE_V1_ACTION_ID_ADD = 0x07,
+ DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE = 0x08,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER = 0x09,
+ DR_STE_V1_ACTION_ID_INSERT_INLINE = 0x0a,
+ DR_STE_V1_ACTION_ID_INSERT_POINTER = 0x0b,
+ DR_STE_V1_ACTION_ID_FLOW_TAG = 0x0c,
+ DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d,
+ DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e,
+ DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f,
+ DR_STE_V1_ACTION_ID_ASO = 0x12,
+ DR_STE_V1_ACTION_ID_TRAILER = 0x13,
+ DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14,
+ DR_STE_V1_ACTION_ID_MAX = 0x21,
+ /* use for special cases */
+ DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3 = 0x22,
+};
+
+enum {
+ DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
+ DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
+ DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
+ DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
+ DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
+ DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
+ DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
+ DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
+ DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
+ DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
+ DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
+ DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
+ DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
+ DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0 = 0x8c,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1 = 0x8d,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0 = 0x8e,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1 = 0x8f,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0 = 0x90,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91,
+};
+
+enum dr_ste_v1_aso_ctx_type {
+ DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2,
+};
+
+bool dr_ste_v1_is_miss_addr_set(u8 *hw_ste_p);
+void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr);
+u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p);
+void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask);
+u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p);
+void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type);
+u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p);
+void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size);
+void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi);
+void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size);
+void dr_ste_v1_set_reparse(u8 *hw_ste_p);
+void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action, u32 reformat_id, int size);
+void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action, u32 vlan_hdr);
+void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num);
+void dr_ste_v1_set_encap_l3(u8 *hw_ste_p, u8 *frst_s_action, u8 *scnd_d_action,
+ u32 reformat_id, int size);
+void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action);
+void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action, u32 reformat_id,
+ u8 anchor, u8 offset, int size);
+void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action, u8 anchor,
+ u8 offset, int size);
+void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_domain *dmn,
+ u8 *action_type_set, u32 actions_caps, u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
+void dr_ste_v1_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_domain *dmn,
+ u8 *action_type_set, u32 actions_caps, u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
+void dr_ste_v1_set_action_set(u8 *d_action, u8 hw_field, u8 shifter,
+ u8 length, u32 data);
+void dr_ste_v1_set_action_add(u8 *d_action, u8 hw_field, u8 shifter,
+ u8 length, u32 data);
+void dr_ste_v1_set_action_copy(u8 *d_action, u8 dst_hw_field, u8 dst_shifter,
+ u8 dst_len, u8 src_hw_field, u8 src_shifter);
+int dr_ste_v1_set_action_decap_l3_list(void *data, u32 data_sz, u8 *hw_action,
+ u32 hw_action_sz, u16 *used_hw_action_num);
+int dr_ste_v1_alloc_modify_hdr_ptrn_arg(struct mlx5dr_action *action);
+void dr_ste_v1_free_modify_hdr_ptrn_arg(struct mlx5dr_action *action);
+void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+
+#endif /* _DR_STE_V1_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
new file mode 100644
index 000000000000..d0ebaf820d42
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "dr_ste_v1.h"
+#include "dr_ste_v2.h"
+
+static struct mlx5dr_ste_ctx ste_ctx_v2 = {
+ /* Builders */
+ .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init,
+ .build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init,
+ .build_eth_l3_ipv6_dst_init = &dr_ste_v1_build_eth_l3_ipv6_dst_init,
+ .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
+ .build_eth_l2_src_init = &dr_ste_v1_build_eth_l2_src_init,
+ .build_eth_l2_dst_init = &dr_ste_v1_build_eth_l2_dst_init,
+ .build_eth_l2_tnl_init = &dr_ste_v1_build_eth_l2_tnl_init,
+ .build_eth_l3_ipv4_misc_init = &dr_ste_v1_build_eth_l3_ipv4_misc_init,
+ .build_eth_ipv6_l3_l4_init = &dr_ste_v1_build_eth_ipv6_l3_l4_init,
+ .build_mpls_init = &dr_ste_v1_build_mpls_init,
+ .build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init,
+ .build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init,
+ .build_tnl_mpls_over_udp_init = &dr_ste_v1_build_tnl_mpls_over_udp_init,
+ .build_tnl_mpls_over_gre_init = &dr_ste_v1_build_tnl_mpls_over_gre_init,
+ .build_icmp_init = &dr_ste_v1_build_icmp_init,
+ .build_general_purpose_init = &dr_ste_v1_build_general_purpose_init,
+ .build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init,
+ .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
+ .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
+ .build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
+ .build_tnl_geneve_tlv_opt_exist_init =
+ &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init,
+ .build_register_0_init = &dr_ste_v1_build_register_0_init,
+ .build_register_1_init = &dr_ste_v1_build_register_1_init,
+ .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init,
+ .build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init,
+ .build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init,
+ .build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
+ .build_tnl_header_0_1_init = &dr_ste_v1_build_tnl_header_0_1_init,
+ .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
+ .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
+
+ /* Getters and Setters */
+ .ste_init = &dr_ste_v1_init,
+ .set_next_lu_type = &dr_ste_v1_set_next_lu_type,
+ .get_next_lu_type = &dr_ste_v1_get_next_lu_type,
+ .is_miss_addr_set = &dr_ste_v1_is_miss_addr_set,
+ .set_miss_addr = &dr_ste_v1_set_miss_addr,
+ .get_miss_addr = &dr_ste_v1_get_miss_addr,
+ .set_hit_addr = &dr_ste_v1_set_hit_addr,
+ .set_byte_mask = &dr_ste_v1_set_byte_mask,
+ .get_byte_mask = &dr_ste_v1_get_byte_mask,
+
+ /* Actions */
+ .actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP |
+ DR_STE_CTX_ACTION_CAP_RX_PUSH |
+ DR_STE_CTX_ACTION_CAP_RX_ENCAP,
+ .set_actions_rx = &dr_ste_v1_set_actions_rx,
+ .set_actions_tx = &dr_ste_v1_set_actions_tx,
+ .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v2_action_modify_field_arr),
+ .modify_field_arr = dr_ste_v2_action_modify_field_arr,
+ .set_action_set = &dr_ste_v1_set_action_set,
+ .set_action_add = &dr_ste_v1_set_action_add,
+ .set_action_copy = &dr_ste_v1_set_action_copy,
+ .set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
+ .alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
+ .dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg,
+ /* Actions bit set */
+ .set_encap = &dr_ste_v1_set_encap,
+ .set_push_vlan = &dr_ste_v1_set_push_vlan,
+ .set_pop_vlan = &dr_ste_v1_set_pop_vlan,
+ .set_rx_decap = &dr_ste_v1_set_rx_decap,
+ .set_encap_l3 = &dr_ste_v1_set_encap_l3,
+ .set_insert_hdr = &dr_ste_v1_set_insert_hdr,
+ .set_remove_hdr = &dr_ste_v1_set_remove_hdr,
+ /* Send */
+ .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
+};
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void)
+{
+ return &ste_ctx_v2;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h
index 808b013cf48c..d853fde49cfc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h
@@ -1,7 +1,8 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
-#include "dr_ste_v1.h"
+#ifndef _DR_STE_V2_
+#define _DR_STE_V2_
enum {
DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
@@ -164,71 +165,4 @@ static const struct mlx5dr_ste_action_modify_field dr_ste_v2_action_modify_field
},
};
-static struct mlx5dr_ste_ctx ste_ctx_v2 = {
- /* Builders */
- .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init,
- .build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init,
- .build_eth_l3_ipv6_dst_init = &dr_ste_v1_build_eth_l3_ipv6_dst_init,
- .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
- .build_eth_l2_src_init = &dr_ste_v1_build_eth_l2_src_init,
- .build_eth_l2_dst_init = &dr_ste_v1_build_eth_l2_dst_init,
- .build_eth_l2_tnl_init = &dr_ste_v1_build_eth_l2_tnl_init,
- .build_eth_l3_ipv4_misc_init = &dr_ste_v1_build_eth_l3_ipv4_misc_init,
- .build_eth_ipv6_l3_l4_init = &dr_ste_v1_build_eth_ipv6_l3_l4_init,
- .build_mpls_init = &dr_ste_v1_build_mpls_init,
- .build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init,
- .build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init,
- .build_tnl_mpls_over_udp_init = &dr_ste_v1_build_tnl_mpls_over_udp_init,
- .build_tnl_mpls_over_gre_init = &dr_ste_v1_build_tnl_mpls_over_gre_init,
- .build_icmp_init = &dr_ste_v1_build_icmp_init,
- .build_general_purpose_init = &dr_ste_v1_build_general_purpose_init,
- .build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init,
- .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
- .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
- .build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
- .build_tnl_geneve_tlv_opt_exist_init =
- &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init,
- .build_register_0_init = &dr_ste_v1_build_register_0_init,
- .build_register_1_init = &dr_ste_v1_build_register_1_init,
- .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init,
- .build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init,
- .build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init,
- .build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
- .build_tnl_header_0_1_init = &dr_ste_v1_build_tnl_header_0_1_init,
- .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
- .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
-
- /* Getters and Setters */
- .ste_init = &dr_ste_v1_init,
- .set_next_lu_type = &dr_ste_v1_set_next_lu_type,
- .get_next_lu_type = &dr_ste_v1_get_next_lu_type,
- .is_miss_addr_set = &dr_ste_v1_is_miss_addr_set,
- .set_miss_addr = &dr_ste_v1_set_miss_addr,
- .get_miss_addr = &dr_ste_v1_get_miss_addr,
- .set_hit_addr = &dr_ste_v1_set_hit_addr,
- .set_byte_mask = &dr_ste_v1_set_byte_mask,
- .get_byte_mask = &dr_ste_v1_get_byte_mask,
-
- /* Actions */
- .actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP |
- DR_STE_CTX_ACTION_CAP_RX_PUSH |
- DR_STE_CTX_ACTION_CAP_RX_ENCAP,
- .set_actions_rx = &dr_ste_v1_set_actions_rx,
- .set_actions_tx = &dr_ste_v1_set_actions_tx,
- .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v2_action_modify_field_arr),
- .modify_field_arr = dr_ste_v2_action_modify_field_arr,
- .set_action_set = &dr_ste_v1_set_action_set,
- .set_action_add = &dr_ste_v1_set_action_add,
- .set_action_copy = &dr_ste_v1_set_action_copy,
- .set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
- .alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
- .dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg,
-
- /* Send */
- .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
-};
-
-struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void)
-{
- return &ste_ctx_v2;
-}
+#endif /* _DR_STE_V2_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c
new file mode 100644
index 000000000000..e468a9ae44e8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "dr_ste_v1.h"
+#include "dr_ste_v2.h"
+
+static void dr_ste_v3_set_encap(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id, int size)
+{
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_POINTER);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, size, size / 2);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, pointer, reformat_id);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, attributes,
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_push_vlan(u8 *ste, u8 *d_action,
+ u32 vlan_hdr)
+{
+ MLX5_SET(ste_double_action_insert_with_inline_v3, d_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_INLINE);
+ /* The hardware expects here offset to vlan header in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_inline_v3, d_action, start_offset,
+ HDR_LEN_L2_MACS >> 1);
+ MLX5_SET(ste_double_action_insert_with_inline_v3, d_action, inline_data, vlan_hdr);
+ dr_ste_v1_set_reparse(ste);
+}
+
+static void dr_ste_v3_set_pop_vlan(u8 *hw_ste_p, u8 *s_action,
+ u8 vlans_num)
+{
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ start_anchor, DR_STE_HEADER_ANCHOR_1ST_VLAN);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ remove_size, (HDR_LEN_L2_VLAN >> 1) * vlans_num);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_encap_l3(u8 *hw_ste_p,
+ u8 *frst_s_action,
+ u8 *scnd_d_action,
+ u32 reformat_id,
+ int size)
+{
+ /* Remove L2 headers */
+ MLX5_SET(ste_single_action_remove_header_v3, frst_s_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+ MLX5_SET(ste_single_action_remove_header_v3, frst_s_action, end_anchor,
+ DR_STE_HEADER_ANCHOR_IPV6_IPV4);
+
+ /* Encapsulate with given reformat ID */
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_POINTER);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, size, size / 2);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, pointer, reformat_id);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, attributes,
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
+{
+ MLX5_SET(ste_single_action_remove_header_v3, s_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+ MLX5_SET(ste_single_action_remove_header_v3, s_action, decap, 1);
+ MLX5_SET(ste_single_action_remove_header_v3, s_action, vni_to_cqe, 1);
+ MLX5_SET(ste_single_action_remove_header_v3, s_action, end_anchor,
+ DR_STE_HEADER_ANCHOR_INNER_MAC);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id, u8 anchor,
+ u8 offset, int size)
+{
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ start_anchor, anchor);
+
+ /* The hardware expects here size and offset in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ size, size / 2);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ start_offset, offset / 2);
+
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ pointer, reformat_id);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ attributes, DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
+ u8 anchor, u8 offset, int size)
+{
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ start_anchor, anchor);
+
+ /* The hardware expects here size and offset in words (2 byte) */
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ remove_size, size / 2);
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ start_offset, offset / 2);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static int
+dr_ste_v3_set_action_decap_l3_list(void *data, u32 data_sz,
+ u8 *hw_action, u32 hw_action_sz,
+ uint16_t *used_hw_action_num)
+{
+ u8 padded_data[DR_STE_L2_HDR_MAX_SZ] = {};
+ void *data_ptr = padded_data;
+ u16 used_actions = 0;
+ u32 inline_data_sz;
+ u32 i;
+
+ if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
+ return -EINVAL;
+
+ inline_data_sz =
+ MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v3, inline_data);
+
+ /* Add an alignment padding */
+ memcpy(padded_data + data_sz % inline_data_sz, data, data_sz);
+
+ /* Remove L2L3 outer headers */
+ MLX5_SET(ste_single_action_remove_header_v3, hw_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+ MLX5_SET(ste_single_action_remove_header_v3, hw_action, decap, 1);
+ MLX5_SET(ste_single_action_remove_header_v3, hw_action, vni_to_cqe, 1);
+ MLX5_SET(ste_single_action_remove_header_v3, hw_action, end_anchor,
+ DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4);
+ hw_action += DR_STE_ACTION_DOUBLE_SZ;
+ used_actions++; /* Remove and NOP are a single double action */
+
+ /* Point to the last dword of the header */
+ data_ptr += (data_sz / inline_data_sz) * inline_data_sz;
+
+ /* Add the new header using inline action 4Byte at a time, the header
+ * is added in reversed order to the beginning of the packet to avoid
+ * incorrect parsing by the HW. Since header is 14B or 18B an extra
+ * two bytes are padded and later removed.
+ */
+ for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
+ void *addr_inline;
+
+ MLX5_SET(ste_double_action_insert_with_inline_v3, hw_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_INLINE);
+ /* The hardware expects here offset to words (2 bytes) */
+ MLX5_SET(ste_double_action_insert_with_inline_v3, hw_action, start_offset, 0);
+
+ /* Copy bytes one by one to avoid endianness problem */
+ addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v3,
+ hw_action, inline_data);
+ memcpy(addr_inline, data_ptr - i * inline_data_sz, inline_data_sz);
+ hw_action += DR_STE_ACTION_DOUBLE_SZ;
+ used_actions++;
+ }
+
+ /* Remove first 2 extra bytes */
+ MLX5_SET(ste_single_action_remove_header_size_v3, hw_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+ MLX5_SET(ste_single_action_remove_header_size_v3, hw_action, start_offset, 0);
+ /* The hardware expects here size in words (2 bytes) */
+ MLX5_SET(ste_single_action_remove_header_size_v3, hw_action, remove_size, 1);
+ used_actions++;
+
+ *used_hw_action_num = used_actions;
+
+ return 0;
+}
+
+static struct mlx5dr_ste_ctx ste_ctx_v3 = {
+ /* Builders */
+ .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init,
+ .build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init,
+ .build_eth_l3_ipv6_dst_init = &dr_ste_v1_build_eth_l3_ipv6_dst_init,
+ .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
+ .build_eth_l2_src_init = &dr_ste_v1_build_eth_l2_src_init,
+ .build_eth_l2_dst_init = &dr_ste_v1_build_eth_l2_dst_init,
+ .build_eth_l2_tnl_init = &dr_ste_v1_build_eth_l2_tnl_init,
+ .build_eth_l3_ipv4_misc_init = &dr_ste_v1_build_eth_l3_ipv4_misc_init,
+ .build_eth_ipv6_l3_l4_init = &dr_ste_v1_build_eth_ipv6_l3_l4_init,
+ .build_mpls_init = &dr_ste_v1_build_mpls_init,
+ .build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init,
+ .build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init,
+ .build_tnl_mpls_over_udp_init = &dr_ste_v1_build_tnl_mpls_over_udp_init,
+ .build_tnl_mpls_over_gre_init = &dr_ste_v1_build_tnl_mpls_over_gre_init,
+ .build_icmp_init = &dr_ste_v1_build_icmp_init,
+ .build_general_purpose_init = &dr_ste_v1_build_general_purpose_init,
+ .build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init,
+ .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
+ .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
+ .build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
+ .build_tnl_geneve_tlv_opt_exist_init =
+ &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init,
+ .build_register_0_init = &dr_ste_v1_build_register_0_init,
+ .build_register_1_init = &dr_ste_v1_build_register_1_init,
+ .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init,
+ .build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init,
+ .build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init,
+ .build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
+ .build_tnl_header_0_1_init = &dr_ste_v1_build_tnl_header_0_1_init,
+ .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
+ .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
+
+ /* Getters and Setters */
+ .ste_init = &dr_ste_v1_init,
+ .set_next_lu_type = &dr_ste_v1_set_next_lu_type,
+ .get_next_lu_type = &dr_ste_v1_get_next_lu_type,
+ .is_miss_addr_set = &dr_ste_v1_is_miss_addr_set,
+ .set_miss_addr = &dr_ste_v1_set_miss_addr,
+ .get_miss_addr = &dr_ste_v1_get_miss_addr,
+ .set_hit_addr = &dr_ste_v1_set_hit_addr,
+ .set_byte_mask = &dr_ste_v1_set_byte_mask,
+ .get_byte_mask = &dr_ste_v1_get_byte_mask,
+
+ /* Actions */
+ .actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP |
+ DR_STE_CTX_ACTION_CAP_RX_PUSH |
+ DR_STE_CTX_ACTION_CAP_RX_ENCAP,
+ .set_actions_rx = &dr_ste_v1_set_actions_rx,
+ .set_actions_tx = &dr_ste_v1_set_actions_tx,
+ .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v2_action_modify_field_arr),
+ .modify_field_arr = dr_ste_v2_action_modify_field_arr,
+ .set_action_set = &dr_ste_v1_set_action_set,
+ .set_action_add = &dr_ste_v1_set_action_add,
+ .set_action_copy = &dr_ste_v1_set_action_copy,
+ .set_action_decap_l3_list = &dr_ste_v3_set_action_decap_l3_list,
+ .alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
+ .dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg,
+ /* Actions bit set */
+ .set_encap = &dr_ste_v3_set_encap,
+ .set_push_vlan = &dr_ste_v3_set_push_vlan,
+ .set_pop_vlan = &dr_ste_v3_set_pop_vlan,
+ .set_rx_decap = &dr_ste_v3_set_rx_decap,
+ .set_encap_l3 = &dr_ste_v3_set_encap_l3,
+ .set_insert_hdr = &dr_ste_v3_set_insert_hdr,
+ .set_remove_hdr = &dr_ste_v3_set_remove_hdr,
+ /* Send */
+ .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
+};
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v3(void)
+{
+ return &ste_ctx_v3;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_table.c
index 69294a66fd7f..69294a66fd7f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_table.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h
index 7618c6147f86..cc328292bf84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h
@@ -1473,7 +1473,6 @@ struct mlx5dr_send_ring {
int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn);
void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
struct mlx5dr_send_ring *send_ring);
-int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn);
int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn,
struct mlx5dr_ste *ste,
u8 *data,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
index 4b349d4005e4..f367997ab61e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
@@ -521,7 +521,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
goto free_actions;
}
- id = dst->dest_attr.counter_id;
+ id = mlx5_fc_id(dst->dest_attr.counter);
tmp_action =
mlx5dr_action_create_flow_counter(id);
if (!tmp_action) {
@@ -833,15 +833,21 @@ static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns,
return steering_caps;
}
-int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat)
+int
+mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id)
{
+ struct mlx5dr_action *dr_action;
+
switch (pkt_reformat->reformat_type) {
case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
case MLX5_REFORMAT_TYPE_INSERT_HDR:
- return mlx5dr_action_get_pkt_reformat_id(pkt_reformat->fs_dr_action.dr_action);
+ dr_action = pkt_reformat->fs_dr_action.dr_action;
+ *reformat_id = mlx5dr_action_get_pkt_reformat_id(dr_action);
+ return 0;
}
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h
index 99a3b2eff6b8..f869f2daefbf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h
@@ -38,7 +38,9 @@ struct mlx5_fs_dr_table {
bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev);
-int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat);
+int
+mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id);
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void);
@@ -49,9 +51,11 @@ static inline const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void)
return NULL;
}
-static inline u32 mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat)
+static inline int
+mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id)
{
- return 0;
+ return -EOPNOTSUPP;
}
static inline bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h
index fb078fa0f0cc..898c3618ff26 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h
@@ -600,4 +600,44 @@ struct mlx5_ifc_ste_double_action_aso_v1_bits {
};
};
+struct mlx5_ifc_ste_single_action_remove_header_v3_bits {
+ u8 action_id[0x8];
+ u8 start_anchor[0x7];
+ u8 end_anchor[0x7];
+ u8 reserved_at_16[0x1];
+ u8 outer_l4_remove[0x1];
+ u8 reserved_at_18[0x4];
+ u8 decap[0x1];
+ u8 vni_to_cqe[0x1];
+ u8 qos_profile[0x2];
+};
+
+struct mlx5_ifc_ste_single_action_remove_header_size_v3_bits {
+ u8 action_id[0x8];
+ u8 start_anchor[0x7];
+ u8 start_offset[0x8];
+ u8 outer_l4_remove[0x1];
+ u8 reserved_at_18[0x2];
+ u8 remove_size[0x6];
+};
+
+struct mlx5_ifc_ste_double_action_insert_with_inline_v3_bits {
+ u8 action_id[0x8];
+ u8 start_anchor[0x7];
+ u8 start_offset[0x8];
+ u8 reserved_at_17[0x9];
+
+ u8 inline_data[0x20];
+};
+
+struct mlx5_ifc_ste_double_action_insert_with_ptr_v3_bits {
+ u8 action_id[0x8];
+ u8 start_anchor[0x7];
+ u8 start_offset[0x8];
+ u8 size[0x6];
+ u8 attributes[0x3];
+
+ u8 pointer[0x20];
+};
+
#endif /* MLX5_IFC_DR_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr_ste_v1.h
index ca3b0f1453a7..ca3b0f1453a7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr_ste_v1.h
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
index 3ac7dc67509f..fc8a2169d1a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
@@ -45,8 +45,6 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type);
int mlx5dr_domain_destroy(struct mlx5dr_domain *domain);
-int mlx5dr_domain_sync(struct mlx5dr_domain *domain, u32 flags);
-
void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn,
u16 peer_vhca_id);
@@ -160,7 +158,7 @@ mlx5dr_is_supported(struct mlx5_core_dev *dev)
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
(MLX5_CAP_GEN(dev, steering_format_version) <=
- MLX5_STEERING_FORMAT_CONNECTX_7)));
+ MLX5_STEERING_FORMAT_CONNECTX_8)));
}
/* buddy functions & structure */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 0d5f750faa45..306affbcfd3b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -36,6 +36,7 @@
#include <linux/mlx5/vport.h>
#include <linux/mlx5/eswitch.h>
#include "mlx5_core.h"
+#include "eswitch.h"
#include "sf/sf.h"
/* Mutex to hold while enabling or disabling RoCE */
@@ -77,15 +78,14 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
}
static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
- u32 *out)
+ bool other_vport, u32 *out)
{
u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
MLX5_SET(query_nic_vport_context_in, in, opcode,
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
- if (vport)
- MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
+ MLX5_SET(query_nic_vport_context_in, in, other_vport, other_vport);
return mlx5_cmd_exec_inout(mdev, query_nic_vport_context, in, out);
}
@@ -96,7 +96,7 @@ int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
int err;
- err = mlx5_query_nic_vport_context(mdev, vport, out);
+ err = mlx5_query_nic_vport_context(mdev, vport, vport > 0, out);
if (!err)
*min_inline = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.min_wqe_inline_mode);
@@ -218,7 +218,7 @@ int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, false, out);
if (!err)
*mtu = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.mtu);
@@ -428,7 +428,7 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, false, out);
if (err)
goto out;
@@ -450,7 +450,7 @@ int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group)
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, false, out);
if (err)
goto out;
@@ -461,23 +461,27 @@ out:
return err;
}
-int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
+int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+ u16 vport, bool other_vport, u64 *node_guid)
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ int err;
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
- mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, vport, other_vport, out);
+ if (err)
+ goto out;
*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
nic_vport_context.node_guid);
-
+out:
kvfree(out);
- return 0;
+ return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
@@ -519,19 +523,22 @@ int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ int err;
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
- mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, false, out);
+ if (err)
+ goto out;
*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.qkey_violation_counter);
-
+out:
kvfree(out);
- return 0;
+ return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
@@ -797,7 +804,7 @@ int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, vport, out);
+ err = mlx5_query_nic_vport_context(mdev, vport, vport > 0, out);
if (err)
goto out;
@@ -901,7 +908,7 @@ int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status)
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, false, out);
if (err)
goto out;
@@ -1183,27 +1190,101 @@ u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
+void mlx5_query_nic_sw_system_image_guid(struct mlx5_core_dev *mdev, u8 *buf,
+ u8 *len)
+{
+ u64 fw_system_image_guid;
+
+ *len = 0;
+
+ fw_system_image_guid = mlx5_query_nic_system_image_guid(mdev);
+ if (!fw_system_image_guid)
+ return;
+
+ memcpy(buf, &fw_system_image_guid, sizeof(fw_system_image_guid));
+ *len += sizeof(fw_system_image_guid);
+
+ if (MLX5_CAP_GEN_2(mdev, load_balance_id) &&
+ MLX5_CAP_GEN_2(mdev, lag_per_mp_group))
+ buf[(*len)++] = MLX5_CAP_GEN_2(mdev, load_balance_id);
+}
+
+static bool mlx5_vport_use_vhca_id_as_func_id(struct mlx5_core_dev *dev,
+ u16 vport_num, u16 *vhca_id)
+{
+ if (!MLX5_CAP_GEN_2(dev, function_id_type_vhca_id))
+ return false;
+
+ return mlx5_esw_vport_vhca_id(dev->priv.eswitch, vport_num, vhca_id);
+}
+
int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 vport, void *out,
u16 opmod)
{
- bool ec_vf_func = mlx5_core_is_ec_vf_vport(dev, vport);
u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {};
+ u16 vhca_id = 0, function_id = 0;
+ bool ec_vf_func = false;
+
+ /* if this vport is referring to a vport on the ec PF (embedded cpu )
+ * let the FW know which domain we are querying since vport numbers or
+ * function_ids are not unique across the different PF domains,
+ * unless we use vhca_id as the function_id below.
+ */
+ ec_vf_func = mlx5_core_is_ec_vf_vport(dev, vport);
+ function_id = mlx5_vport_to_func_id(dev, vport, ec_vf_func);
+
+ if (mlx5_vport_use_vhca_id_as_func_id(dev, vport, &vhca_id)) {
+ MLX5_SET(query_hca_cap_in, in, function_id_type, 1);
+ function_id = vhca_id;
+ ec_vf_func = false;
+ mlx5_core_dbg(dev, "%s using vhca_id as function_id for vport %d vhca_id 0x%x\n",
+ __func__, vport, vhca_id);
+ }
opmod = (opmod << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
- MLX5_SET(query_hca_cap_in, in, function_id, mlx5_vport_to_func_id(dev, vport, ec_vf_func));
MLX5_SET(query_hca_cap_in, in, other_function, true);
MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func);
+ MLX5_SET(query_hca_cap_in, in, function_id, function_id);
return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
}
EXPORT_SYMBOL_GPL(mlx5_vport_get_other_func_cap);
+int mlx5_vport_get_vhca_id(struct mlx5_core_dev *dev, u16 vport, u16 *vhca_id)
+{
+ int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *query_ctx;
+ void *hca_caps;
+ int err;
+
+ /* try get vhca_id via eswitch */
+ if (mlx5_esw_vport_vhca_id(dev->priv.eswitch, vport, vhca_id))
+ return 0;
+
+ query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
+ if (!query_ctx)
+ return -ENOMEM;
+
+ err = mlx5_vport_get_other_func_general_cap(dev, vport, query_ctx);
+ if (err)
+ goto out_free;
+
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
+ *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
+
+out_free:
+ kfree(query_ctx);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_vport_get_vhca_id);
+
int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap,
u16 vport, u16 opmod)
{
- bool ec_vf_func = mlx5_core_is_ec_vf_vport(dev, vport);
int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ u16 vhca_id = 0, function_id = 0;
+ bool ec_vf_func = false;
void *set_hca_cap;
void *set_ctx;
int ret;
@@ -1212,14 +1293,29 @@ int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap
if (!set_ctx)
return -ENOMEM;
+ /* if this vport is referring to a vport on the ec PF (embedded cpu )
+ * let the FW know which domain we are querying since vport numbers or
+ * function_ids are not unique across the different PF domains,
+ * unless we use vhca_id as the function_id below.
+ */
+ ec_vf_func = mlx5_core_is_ec_vf_vport(dev, vport);
+ function_id = mlx5_vport_to_func_id(dev, vport, ec_vf_func);
+
+ if (mlx5_vport_use_vhca_id_as_func_id(dev, vport, &vhca_id)) {
+ MLX5_SET(set_hca_cap_in, set_ctx, function_id_type, 1);
+ function_id = vhca_id;
+ ec_vf_func = false;
+ mlx5_core_dbg(dev, "%s using vhca_id as function_id for vport %d vhca_id 0x%x\n",
+ __func__, vport, vhca_id);
+ }
+
MLX5_SET(set_hca_cap_in, set_ctx, opcode, MLX5_CMD_OP_SET_HCA_CAP);
MLX5_SET(set_hca_cap_in, set_ctx, op_mod, opmod << 1);
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
memcpy(set_hca_cap, hca_cap, MLX5_ST_SZ_BYTES(cmd_hca_cap));
- MLX5_SET(set_hca_cap_in, set_ctx, function_id,
- mlx5_vport_to_func_id(dev, vport, ec_vf_func));
MLX5_SET(set_hca_cap_in, set_ctx, other_function, true);
MLX5_SET(set_hca_cap_in, set_ctx, ec_vf_function, ec_vf_func);
+ MLX5_SET(set_hca_cap_in, set_ctx, function_id, function_id);
ret = mlx5_cmd_exec_in(dev, set_hca_cap, set_ctx);
kfree(set_ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wc.c b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
index 1bed75eca97d..815a7c97d6b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
@@ -7,6 +7,11 @@
#include "mlx5_core.h"
#include "wq.h"
+#if IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && IS_ENABLED(CONFIG_ARM64)
+#include <asm/neon.h>
+#include <asm/simd.h>
+#endif
+
#define TEST_WC_NUM_WQES 255
#define TEST_WC_LOG_CQ_SZ (order_base_2(TEST_WC_NUM_WQES))
#define TEST_WC_SQ_LOG_WQ_SZ TEST_WC_LOG_CQ_SZ
@@ -94,7 +99,7 @@ static int create_wc_cq(struct mlx5_wc_cq *cq, void *cqc_data)
MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
- MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.bfreg.up->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
@@ -116,7 +121,7 @@ static int mlx5_wc_create_cq(struct mlx5_core_dev *mdev, struct mlx5_wc_cq *cq)
return -ENOMEM;
MLX5_SET(cqc, cqc, log_cq_size, TEST_WC_LOG_CQ_SZ);
- MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.bfreg.up->index);
if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
@@ -255,7 +260,29 @@ static void mlx5_wc_destroy_sq(struct mlx5_wc_sq *sq)
mlx5_wq_destroy(&sq->wq_ctrl);
}
-static void mlx5_wc_post_nop(struct mlx5_wc_sq *sq, bool signaled)
+static void mlx5_iowrite64_copy(struct mlx5_wc_sq *sq, __be32 mmio_wqe[16],
+ size_t mmio_wqe_size, unsigned int offset)
+{
+#if IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && IS_ENABLED(CONFIG_ARM64)
+ if (cpu_has_neon()) {
+ scoped_ksimd() {
+ asm volatile(
+ ".arch_extension simd\n\t"
+ "ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [%0]\n\t"
+ "st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [%1]"
+ :
+ : "r"(mmio_wqe), "r"(sq->bfreg.map + offset)
+ : "memory", "v0", "v1", "v2", "v3");
+ }
+ return;
+ }
+#endif
+ __iowrite64_copy(sq->bfreg.map + offset, mmio_wqe,
+ mmio_wqe_size / 8);
+}
+
+static void mlx5_wc_post_nop(struct mlx5_wc_sq *sq, unsigned int *offset,
+ bool signaled)
{
int buf_size = (1 << MLX5_CAP_GEN(sq->cq.mdev, log_bf_reg_size)) / 2;
struct mlx5_wqe_ctrl_seg *ctrl;
@@ -288,10 +315,9 @@ static void mlx5_wc_post_nop(struct mlx5_wc_sq *sq, bool signaled)
*/
wmb();
- __iowrite64_copy(sq->bfreg.map + sq->bfreg.offset, mmio_wqe,
- sizeof(mmio_wqe) / 8);
+ mlx5_iowrite64_copy(sq, mmio_wqe, sizeof(mmio_wqe), *offset);
- sq->bfreg.offset ^= buf_size;
+ *offset ^= buf_size;
}
static int mlx5_wc_poll_cq(struct mlx5_wc_sq *sq)
@@ -332,6 +358,7 @@ static int mlx5_wc_poll_cq(struct mlx5_wc_sq *sq)
static void mlx5_core_test_wc(struct mlx5_core_dev *mdev)
{
+ unsigned int offset = 0;
unsigned long expires;
struct mlx5_wc_sq *sq;
int i, err;
@@ -358,9 +385,9 @@ static void mlx5_core_test_wc(struct mlx5_core_dev *mdev)
goto err_create_sq;
for (i = 0; i < TEST_WC_NUM_WQES - 1; i++)
- mlx5_wc_post_nop(sq, false);
+ mlx5_wc_post_nop(sq, &offset, false);
- mlx5_wc_post_nop(sq, true);
+ mlx5_wc_post_nop(sq, &offset, true);
expires = jiffies + TEST_WC_POLLING_MAX_TIME_JIFFIES;
do {
@@ -378,10 +405,14 @@ err_create_cq:
mlx5_free_bfreg(mdev, &sq->bfreg);
err_alloc_bfreg:
kfree(sq);
+
+ if (mdev->wc_state == MLX5_WC_STATE_UNSUPPORTED)
+ mlx5_core_warn(mdev, "Write combining is not supported\n");
}
bool mlx5_wc_support_get(struct mlx5_core_dev *mdev)
{
+ struct mutex *wc_state_lock = &mdev->wc_state_lock;
struct mlx5_core_dev *parent = NULL;
if (!MLX5_CAP_GEN(mdev, bf)) {
@@ -400,32 +431,31 @@ bool mlx5_wc_support_get(struct mlx5_core_dev *mdev)
*/
goto out;
- mutex_lock(&mdev->wc_state_lock);
-
- if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
- goto unlock;
-
#ifdef CONFIG_MLX5_SF
- if (mlx5_core_is_sf(mdev))
+ if (mlx5_core_is_sf(mdev)) {
parent = mdev->priv.parent_mdev;
+ wc_state_lock = &parent->wc_state_lock;
+ }
#endif
- if (parent) {
- mutex_lock(&parent->wc_state_lock);
+ mutex_lock(wc_state_lock);
+ if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
+ goto unlock;
+
+ if (parent) {
mlx5_core_test_wc(parent);
mlx5_core_dbg(mdev, "parent set wc_state=%d\n",
parent->wc_state);
mdev->wc_state = parent->wc_state;
- mutex_unlock(&parent->wc_state_lock);
+ } else {
+ mlx5_core_test_wc(mdev);
}
- mlx5_core_test_wc(mdev);
-
unlock:
- mutex_unlock(&mdev->wc_state_lock);
+ mutex_unlock(wc_state_lock);
out:
mlx5_core_dbg(mdev, "wc_state=%d\n", mdev->wc_state);
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index fb2e5b844c15..d1f8a72cae53 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -142,8 +142,10 @@ static int mlxbf_gige_open(struct net_device *netdev)
mlxbf_gige_cache_stats(priv);
err = mlxbf_gige_clean_port(priv);
- if (err)
+ if (err) {
+ dev_err(priv->dev, "open: clean_port failed: %pe\n", ERR_PTR(err));
return err;
+ }
/* Clear driver's valid_polarity to match hardware,
* since the above call to clean_port() resets the
@@ -154,19 +156,25 @@ static int mlxbf_gige_open(struct net_device *netdev)
phy_start(phydev);
err = mlxbf_gige_tx_init(priv);
- if (err)
+ if (err) {
+ dev_err(priv->dev, "open: tx_init failed: %pe\n", ERR_PTR(err));
goto phy_deinit;
+ }
err = mlxbf_gige_rx_init(priv);
- if (err)
+ if (err) {
+ dev_err(priv->dev, "open: rx_init failed: %pe\n", ERR_PTR(err));
goto tx_deinit;
+ }
netif_napi_add(netdev, &priv->napi, mlxbf_gige_poll);
napi_enable(&priv->napi);
netif_start_queue(netdev);
err = mlxbf_gige_request_irqs(priv);
- if (err)
+ if (err) {
+ dev_err(priv->dev, "open: request_irqs failed: %pe\n", ERR_PTR(err));
goto napi_deinit;
+ }
mlxbf_gige_enable_mac_rx_filter(priv, MLXBF_GIGE_BCAST_MAC_FILTER_IDX);
mlxbf_gige_enable_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX);
@@ -418,8 +426,10 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
/* Attach MDIO device */
err = mlxbf_gige_mdio_probe(pdev, priv);
- if (err)
+ if (err) {
+ dev_err(priv->dev, "probe: mdio_probe failed: %pe\n", ERR_PTR(err));
return err;
+ }
priv->base = base;
priv->llu_base = llu_base;
@@ -438,7 +448,7 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
+ dev_err(&pdev->dev, "DMA configuration failed: %pe\n", ERR_PTR(err));
goto out;
}
@@ -447,8 +457,10 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
priv->llu_plu_irq = platform_get_irq(pdev, MLXBF_GIGE_LLU_PLU_INTR_IDX);
phy_irq = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(&pdev->dev), "phy", 0);
- if (phy_irq < 0) {
- dev_err(&pdev->dev, "Error getting PHY irq. Use polling instead");
+ if (phy_irq == -EPROBE_DEFER) {
+ err = -EPROBE_DEFER;
+ goto out;
+ } else if (phy_irq < 0) {
phy_irq = PHY_POLL;
}
@@ -466,7 +478,7 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
mlxbf_gige_link_cfgs[priv->hw_version].adjust_link,
mlxbf_gige_link_cfgs[priv->hw_version].phy_mode);
if (err) {
- dev_err(&pdev->dev, "Could not attach to PHY\n");
+ dev_err(&pdev->dev, "Could not attach to PHY: %pe\n", ERR_PTR(err));
goto out;
}
@@ -477,7 +489,7 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
err = register_netdev(netdev);
if (err) {
- dev_err(&pdev->dev, "Failed to register netdev\n");
+ dev_err(&pdev->dev, "Failed to register netdev: %pe\n", ERR_PTR(err));
phy_disconnect(phydev);
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
index 46245e0b2462..43c84900369a 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
@@ -14,7 +14,6 @@
#define MLXFW_FSM_STATE_WAIT_TIMEOUT_MS 30000
#define MLXFW_FSM_STATE_WAIT_ROUNDS \
(MLXFW_FSM_STATE_WAIT_TIMEOUT_MS / MLXFW_FSM_STATE_WAIT_CYCLE_MS)
-#define MLXFW_FSM_MAX_COMPONENT_SIZE (10 * (1 << 20))
static const int mlxfw_fsm_state_errno[] = {
[MLXFW_FSM_STATE_ERR_ERROR] = -EIO,
@@ -229,7 +228,6 @@ static int mlxfw_flash_component(struct mlxfw_dev *mlxfw_dev,
return err;
}
- comp_max_size = min_t(u32, comp_max_size, MLXFW_FSM_MAX_COMPONENT_SIZE);
if (comp->data_size > comp_max_size) {
MLXFW_ERR_MSG(mlxfw_dev, extack,
"Component size is bigger than limit", -EINVAL);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 4a79c0d7e7ad..83c7cf3bbea3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -35,6 +35,7 @@
#include "reg.h"
#include "resources.h"
#include "../mlxfw/mlxfw.h"
+#include "txheader.h"
static LIST_HEAD(mlxsw_core_driver_list);
static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
@@ -677,7 +678,7 @@ struct mlxsw_reg_trans {
struct list_head bulk_list;
struct mlxsw_core *core;
struct sk_buff *tx_skb;
- struct mlxsw_tx_info tx_info;
+ struct mlxsw_txhdr_info txhdr_info;
struct delayed_work timeout_dw;
unsigned int retries;
u64 tid;
@@ -737,12 +738,11 @@ static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
if (!skb)
return -ENOMEM;
- trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0,
- skb->data + mlxsw_core->driver->txhdr_len,
- skb->len - mlxsw_core->driver->txhdr_len);
+ trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0, skb->data,
+ skb->len);
atomic_set(&trans->active, 1);
- err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
+ err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->txhdr_info);
if (err) {
dev_kfree_skb(skb);
return err;
@@ -886,7 +886,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
return 0;
- emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0);
+ emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_PERCPU, 0);
if (!emad_wq)
return -ENOMEM;
mlxsw_core->emad_wq = emad_wq;
@@ -944,7 +944,7 @@ static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
(MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
- sizeof(u32) + mlxsw_core->driver->txhdr_len);
+ sizeof(u32) + MLXSW_TXHDR_LEN);
if (mlxsw_core->emad.enable_string_tlv)
emad_len += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
if (mlxsw_core->emad.enable_latency_tlv)
@@ -984,8 +984,8 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
list_add_tail(&trans->bulk_list, bulk_list);
trans->core = mlxsw_core;
trans->tx_skb = skb;
- trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
- trans->tx_info.is_emad = true;
+ trans->txhdr_info.tx_info.local_port = MLXSW_PORT_CPU_PORT;
+ trans->txhdr_info.tx_info.is_emad = true;
INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
trans->tid = tid;
init_completion(&trans->completion);
@@ -995,7 +995,6 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
trans->type = type;
mlxsw_emad_construct(mlxsw_core, skb, reg, payload, type, trans->tid);
- mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
@@ -2044,7 +2043,7 @@ static int mlxsw_core_health_init(struct mlxsw_core *mlxsw_core)
return 0;
fw_fatal = devl_health_reporter_create(devlink, &mlxsw_core_health_fw_fatal_ops,
- 0, mlxsw_core);
+ mlxsw_core);
if (IS_ERR(fw_fatal)) {
dev_err(mlxsw_core->bus_info->dev, "Failed to create fw fatal reporter");
return PTR_ERR(fw_fatal);
@@ -2330,10 +2329,10 @@ bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+ const struct mlxsw_txhdr_info *txhdr_info)
{
return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
- tx_info);
+ txhdr_info);
}
EXPORT_SYMBOL(mlxsw_core_skb_transmit);
@@ -3382,7 +3381,7 @@ static int __init mlxsw_core_module_init(void)
if (err)
return err;
- mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
+ mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_PERCPU, 0);
if (!mlxsw_wq) {
err = -ENOMEM;
goto err_alloc_workqueue;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 6d11225594dd..1a871397a6df 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -72,7 +72,14 @@ struct mlxsw_tx_info {
bool is_emad;
};
+struct mlxsw_txhdr_info {
+ struct mlxsw_tx_info tx_info;
+ bool data;
+ u16 max_fid; /* Used for PTP packets which are sent as data. */
+};
+
struct mlxsw_rx_md_info {
+ struct napi_struct *napi;
u32 cookie_index;
u32 latency;
u32 tx_congestion;
@@ -94,7 +101,7 @@ struct mlxsw_rx_md_info {
bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
const struct mlxsw_tx_info *tx_info);
int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
+ const struct mlxsw_txhdr_info *txhdr_info);
void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core,
struct sk_buff *skb, u16 local_port);
@@ -425,8 +432,6 @@ struct mlxsw_driver {
int (*trap_policer_counter_get)(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_policer *policer,
u64 *p_drops);
- void (*txhdr_construct)(struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
int (*resources_register)(struct mlxsw_core *mlxsw_core);
int (*kvd_sizes_get)(struct mlxsw_core *mlxsw_core,
const struct mlxsw_config_profile *profile,
@@ -439,7 +444,6 @@ struct mlxsw_driver {
void (*ptp_transmitted)(struct mlxsw_core *mlxsw_core,
struct sk_buff *skb, u16 local_port);
- u8 txhdr_len;
const struct mlxsw_config_profile *profile;
bool sdq_supports_cqe_v2;
};
@@ -486,7 +490,7 @@ struct mlxsw_bus {
bool (*skb_transmit_busy)(void *bus_priv,
const struct mlxsw_tx_info *tx_info);
int (*skb_transmit)(void *bus_priv, struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
+ const struct mlxsw_txhdr_info *txhdr_info);
int (*cmd_exec)(void *bus_priv, u16 opcode, u8 opcode_mod,
u32 in_mod, bool out_mbox_direct,
char *in_mbox, size_t in_mbox_size,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
index b032d5a4b3b8..10f5bc4892fc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
@@ -601,6 +601,8 @@ int mlxsw_linecard_devlink_info_get(struct mlxsw_linecard *linecard,
err = devlink_info_version_fixed_put(req,
DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
info->psid);
+ if (err)
+ goto unlock;
sprintf(buf, "%u.%u.%u", info->fw_major, info->fw_minor,
info->fw_sub_minor);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index e746cd9c68ed..eac9a14a6058 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -205,11 +205,11 @@ static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev,
return 0;
}
-static struct thermal_zone_params mlxsw_thermal_params = {
+static const struct thermal_zone_params mlxsw_thermal_params = {
.no_hwmon = true,
};
-static struct thermal_zone_device_ops mlxsw_thermal_ops = {
+static const struct thermal_zone_device_ops mlxsw_thermal_ops = {
.should_bind = mlxsw_thermal_should_bind,
.get_temp = mlxsw_thermal_get_temp,
};
@@ -252,7 +252,7 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
return 0;
}
-static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
+static const struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
.should_bind = mlxsw_thermal_module_should_bind,
.get_temp = mlxsw_thermal_module_temp_get,
};
@@ -280,7 +280,7 @@ static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
return 0;
}
-static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = {
+static const struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = {
.should_bind = mlxsw_thermal_module_should_bind,
.get_temp = mlxsw_thermal_gearbox_temp_get,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index 1e150ce1c73a..f9f565c1036d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -516,7 +516,7 @@ static bool mlxsw_i2c_skb_transmit_busy(void *bus_priv,
}
static int mlxsw_i2c_skb_transmit(void *bus_priv, struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+ const struct mlxsw_txhdr_info *txhdr_info)
{
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index d6f37456fb31..8769cba2c746 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -21,6 +21,7 @@
#include "cmd.h"
#include "port.h"
#include "resources.h"
+#include "txheader.h"
#define mlxsw_pci_write32(mlxsw_pci, reg, val) \
iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
@@ -155,7 +156,7 @@ static int mlxsw_pci_napi_devs_init(struct mlxsw_pci *mlxsw_pci)
}
strscpy(mlxsw_pci->napi_dev_rx->name, "mlxsw_rx",
sizeof(mlxsw_pci->napi_dev_rx->name));
- dev_set_threaded(mlxsw_pci->napi_dev_rx, true);
+ netif_threaded_enable(mlxsw_pci->napi_dev_rx);
return 0;
@@ -737,6 +738,7 @@ static void mlxsw_pci_cqe_rdq_md_init(struct sk_buff *skb, const char *cqe)
}
static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
+ struct napi_struct *napi,
struct mlxsw_pci_queue *q,
u16 consumer_counter_limit,
enum mlxsw_pci_cqe_v cqe_v, char *cqe)
@@ -807,6 +809,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
}
mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
+ mlxsw_skb_cb(skb)->rx_md_info.napi = napi;
mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
@@ -869,7 +872,7 @@ static int mlxsw_pci_napi_poll_cq_rx(struct napi_struct *napi, int budget)
continue;
}
- mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
+ mlxsw_pci_cqe_rdq_handle(mlxsw_pci, napi, rdq,
wqe_counter, q->u.cq.v, cqe);
if (++work_done == budget)
@@ -2093,6 +2096,39 @@ static void mlxsw_pci_fini(void *bus_priv)
mlxsw_pci_free_irq_vectors(mlxsw_pci);
}
+static int mlxsw_pci_txhdr_construct(struct sk_buff *skb,
+ const struct mlxsw_txhdr_info *txhdr_info)
+{
+ const struct mlxsw_tx_info tx_info = txhdr_info->tx_info;
+ char *txhdr;
+
+ if (skb_cow_head(skb, MLXSW_TXHDR_LEN))
+ return -ENOMEM;
+
+ txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
+ memset(txhdr, 0, MLXSW_TXHDR_LEN);
+
+ mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
+ mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
+ mlxsw_tx_hdr_swid_set(txhdr, 0);
+
+ if (unlikely(txhdr_info->data)) {
+ u16 fid = txhdr_info->max_fid + tx_info.local_port - 1;
+
+ mlxsw_tx_hdr_rx_is_router_set(txhdr, true);
+ mlxsw_tx_hdr_fid_valid_set(txhdr, true);
+ mlxsw_tx_hdr_fid_set(txhdr, fid);
+ mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA);
+ } else {
+ mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
+ mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
+ mlxsw_tx_hdr_port_mid_set(txhdr, tx_info.local_port);
+ mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
+ }
+
+ return 0;
+}
+
static struct mlxsw_pci_queue *
mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
const struct mlxsw_tx_info *tx_info)
@@ -2120,7 +2156,7 @@ static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
}
static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+ const struct mlxsw_txhdr_info *txhdr_info)
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
struct mlxsw_pci_queue *q;
@@ -2129,13 +2165,17 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
int i;
int err;
+ err = mlxsw_pci_txhdr_construct(skb, txhdr_info);
+ if (err)
+ return err;
+
if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
err = skb_linearize(skb);
if (err)
return err;
}
- q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
+ q = mlxsw_pci_sdq_pick(mlxsw_pci, &txhdr_info->tx_info);
spin_lock_bh(&q->lock);
elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
if (!elem_info) {
@@ -2143,7 +2183,7 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
err = -EAGAIN;
goto unlock;
}
- mlxsw_skb_cb(skb)->tx_info = *tx_info;
+ mlxsw_skb_cb(skb)->tx_info = txhdr_info->tx_info;
elem_info->sdq.skb = skb;
wqe = elem_info->elem;
@@ -2174,6 +2214,8 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
+ mlxsw_pci_wqe_ipcs_set(wqe, skb->ip_summed == CHECKSUM_PARTIAL);
+
/* Everything is set up, ring producer doorbell to get HW going */
q->producer_counter++;
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index 6bed495dcf0f..7fa94e5828de 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -90,6 +90,11 @@ MLXSW_ITEM32(pci, wqe, lp, 0x00, 30, 1);
*/
MLXSW_ITEM32(pci, wqe, type, 0x00, 23, 4);
+/* pci_wqe_ipcs
+ * Calculate IPv4 and TCP / UDP checksums.
+ */
+MLXSW_ITEM32(pci, wqe, ipcs, 0x00, 14, 1);
+
/* pci_wqe_byte_count
* Size of i-th scatter/gather entry, 0 if entry is unused.
*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 3f5e5d99251b..9a2d64a0a858 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -107,74 +107,6 @@ static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
};
-/* tx_hdr_version
- * Tx header version.
- * Must be set to 1.
- */
-MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
-
-/* tx_hdr_ctl
- * Packet control type.
- * 0 - Ethernet control (e.g. EMADs, LACP)
- * 1 - Ethernet data
- */
-MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
-
-/* tx_hdr_proto
- * Packet protocol type. Must be set to 1 (Ethernet).
- */
-MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
-
-/* tx_hdr_rx_is_router
- * Packet is sent from the router. Valid for data packets only.
- */
-MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
-
-/* tx_hdr_fid_valid
- * Indicates if the 'fid' field is valid and should be used for
- * forwarding lookup. Valid for data packets only.
- */
-MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
-
-/* tx_hdr_swid
- * Switch partition ID. Must be set to 0.
- */
-MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
-
-/* tx_hdr_control_tclass
- * Indicates if the packet should use the control TClass and not one
- * of the data TClasses.
- */
-MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
-
-/* tx_hdr_etclass
- * Egress TClass to be used on the egress device on the egress port.
- */
-MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
-
-/* tx_hdr_port_mid
- * Destination local port for unicast packets.
- * Destination multicast ID for multicast packets.
- *
- * Control packets are directed to a specific egress port, while data
- * packets are transmitted through the CPU port (0) into the switch partition,
- * where forwarding rules are applied.
- */
-MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
-
-/* tx_hdr_fid
- * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
- * set, otherwise calculated based on the packet's VID using VID to FID mapping.
- * Valid for data packets only.
- */
-MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
-
-/* tx_hdr_type
- * 0 - Data packets
- * 6 - Control packets
- */
-MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
-
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index, bool clear,
u64 *packets, u64 *bytes)
@@ -233,61 +165,6 @@ void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
counter_index);
}
-void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
-
- memset(txhdr, 0, MLXSW_TXHDR_LEN);
-
- mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
- mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
- mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
- mlxsw_tx_hdr_swid_set(txhdr, 0);
- mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
- mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
- mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
-}
-
-int
-mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- char *txhdr;
- u16 max_fid;
- int err;
-
- if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
- err = -ENOMEM;
- goto err_skb_cow_head;
- }
-
- if (!MLXSW_CORE_RES_VALID(mlxsw_core, FID)) {
- err = -EIO;
- goto err_res_valid;
- }
- max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
-
- txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
- memset(txhdr, 0, MLXSW_TXHDR_LEN);
-
- mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
- mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
- mlxsw_tx_hdr_rx_is_router_set(txhdr, true);
- mlxsw_tx_hdr_fid_valid_set(txhdr, true);
- mlxsw_tx_hdr_fid_set(txhdr, max_fid + tx_info->local_port - 1);
- mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA);
- return 0;
-
-err_res_valid:
-err_skb_cow_head:
- this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb);
- return err;
-}
-
static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
{
unsigned int type;
@@ -299,30 +176,49 @@ static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
return !!ptp_parse_header(skb, type);
}
-static int mlxsw_sp_txhdr_handle(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+static void mlxsw_sp_txhdr_info_data_init(struct mlxsw_core *mlxsw_core,
+ struct sk_buff *skb,
+ struct mlxsw_txhdr_info *txhdr_info)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ /* Resource validation was done as part of PTP init. */
+ u16 max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
+
+ txhdr_info->data = true;
+ txhdr_info->max_fid = max_fid;
+}
- /* In Spectrum-2 and Spectrum-3, PTP events that require a time stamp
- * need special handling and cannot be transmitted as regular control
- * packets.
+static struct sk_buff *
+mlxsw_sp_vlan_tag_push(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb)
+{
+ /* In some Spectrum ASICs, in order for PTP event packets to have their
+ * correction field correctly set on the egress port they must be
+ * transmitted as data packets. Such packets ingress the ASIC via the
+ * CPU port and must have a VLAN tag, as the CPU port is not configured
+ * with a PVID. Push the default VLAN (4095), which is configured as
+ * egress untagged on all the ports.
*/
- if (unlikely(mlxsw_sp_skb_requires_ts(skb)))
- return mlxsw_sp->ptp_ops->txhdr_construct(mlxsw_core,
- mlxsw_sp_port, skb,
- tx_info);
+ if (skb_vlan_tagged(skb))
+ return skb;
- if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
- this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb);
- return -ENOMEM;
- }
+ return vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
+ MLXSW_SP_DEFAULT_VID);
+}
- mlxsw_sp_txhdr_construct(skb, tx_info);
- return 0;
+static struct sk_buff *
+mlxsw_sp_txhdr_preparations(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ struct mlxsw_txhdr_info *txhdr_info)
+{
+ if (likely(!mlxsw_sp_skb_requires_ts(skb)))
+ return skb;
+
+ if (!mlxsw_sp->ptp_ops->tx_as_data)
+ return skb;
+
+ /* Special handling for PTP events that require a time stamp and cannot
+ * be transmitted as regular control packets.
+ */
+ mlxsw_sp_txhdr_info_data_init(mlxsw_sp->core, skb, txhdr_info);
+ return mlxsw_sp_vlan_tag_push(mlxsw_sp, skb);
}
enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
@@ -721,16 +617,16 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
- const struct mlxsw_tx_info tx_info = {
- .local_port = mlxsw_sp_port->local_port,
- .is_emad = false,
+ struct mlxsw_txhdr_info txhdr_info = {
+ .tx_info.local_port = mlxsw_sp_port->local_port,
+ .tx_info.is_emad = false,
};
u64 len;
int err;
memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
- if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
+ if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &txhdr_info.tx_info))
return NETDEV_TX_BUSY;
if (eth_skb_pad(skb)) {
@@ -738,10 +634,11 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- err = mlxsw_sp_txhdr_handle(mlxsw_sp->core, mlxsw_sp_port, skb,
- &tx_info);
- if (err)
+ skb = mlxsw_sp_txhdr_preparations(mlxsw_sp, skb, &txhdr_info);
+ if (!skb) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
return NETDEV_TX_OK;
+ }
/* TX header is consumed by HW on the way so we shouldn't count its
* bytes as being sent.
@@ -751,7 +648,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
/* Due to a race we might fail here because of a full queue. In that
* unlikely case we simply drop the packet.
*/
- err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
+ err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &txhdr_info);
if (!err) {
pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
@@ -1262,63 +1159,31 @@ static int mlxsw_sp_set_features(struct net_device *dev,
return 0;
}
-static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct ifreq *ifr)
+static int mlxsw_sp_port_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
- int err;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
- &config);
- if (err)
- return err;
-
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- return 0;
+ return mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
+ config, extack);
}
-static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct ifreq *ifr)
+static int mlxsw_sp_port_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config config;
- int err;
-
- err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
- &config);
- if (err)
- return err;
-
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- return 0;
+ return mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
+ config);
}
static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
{
- struct hwtstamp_config config = {0};
-
- mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
-}
-
-static int
-mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct kernel_hwtstamp_config config = {};
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
- case SIOCGHWTSTAMP:
- return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
- default:
- return -EOPNOTSUPP;
- }
+ mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config,
+ NULL);
}
static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
@@ -1335,7 +1200,8 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
.ndo_set_features = mlxsw_sp_set_features,
- .ndo_eth_ioctl = mlxsw_sp_port_ioctl,
+ .ndo_hwtstamp_get = mlxsw_sp_port_hwtstamp_get,
+ .ndo_hwtstamp_set = mlxsw_sp_port_hwtstamp_set,
};
static int
@@ -1677,10 +1543,12 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
netif_carrier_off(dev);
dev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_HW_TC;
- dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
+ NETIF_F_HW_TC | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
dev->lltx = true;
- dev->netns_local = true;
+ dev->netns_immutable = true;
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = MLXSW_PORT_MAX_MTU - MLXSW_PORT_ETH_FRAME_HDR;
@@ -2449,7 +2317,7 @@ void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
u64_stats_update_end(&pcpu_stats->syncp);
skb->protocol = eth_type_trans(skb, skb->dev);
- netif_receive_skb(skb);
+ napi_gro_receive(mlxsw_skb_cb(skb)->rx_md_info.napi, skb);
}
static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port,
@@ -2507,11 +2375,11 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
ROUTER_EXP, false),
MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
ROUTER_EXP, false),
+ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_LINK_LOCAL, FORWARD,
+ ROUTER_EXP, false),
/* Multicast Router Traps */
MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
- /* NVE traps */
- MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
};
static const struct mlxsw_listener mlxsw_sp1_listener[] = {
@@ -2792,7 +2660,6 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
.get_stats_count = mlxsw_sp1_get_stats_count,
.get_stats_strings = mlxsw_sp1_get_stats_strings,
.get_stats = mlxsw_sp1_get_stats,
- .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
};
static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
@@ -2811,7 +2678,7 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
.get_stats_count = mlxsw_sp2_get_stats_count,
.get_stats_strings = mlxsw_sp2_get_stats_strings,
.get_stats = mlxsw_sp2_get_stats,
- .txhdr_construct = mlxsw_sp2_ptp_txhdr_construct,
+ .tx_as_data = true,
};
static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
@@ -2830,7 +2697,6 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
.get_stats_count = mlxsw_sp2_get_stats_count,
.get_stats_strings = mlxsw_sp2_get_stats_strings,
.get_stats = mlxsw_sp2_get_stats,
- .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
};
struct mlxsw_sp_sample_trigger_node {
@@ -3992,11 +3858,9 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.trap_policer_fini = mlxsw_sp_trap_policer_fini,
.trap_policer_set = mlxsw_sp_trap_policer_set,
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
- .txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp1_resources_register,
.kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
- .txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp1_config_profile,
.sdq_supports_cqe_v2 = false,
};
@@ -4030,10 +3894,8 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.trap_policer_fini = mlxsw_sp_trap_policer_fini,
.trap_policer_set = mlxsw_sp_trap_policer_set,
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
- .txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
- .txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
.sdq_supports_cqe_v2 = true,
};
@@ -4067,10 +3929,8 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.trap_policer_fini = mlxsw_sp_trap_policer_fini,
.trap_policer_set = mlxsw_sp_trap_policer_set,
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
- .txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
- .txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
.sdq_supports_cqe_v2 = true,
};
@@ -4102,10 +3962,8 @@ static struct mlxsw_driver mlxsw_sp4_driver = {
.trap_policer_fini = mlxsw_sp_trap_policer_fini,
.trap_policer_set = mlxsw_sp_trap_policer_set,
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
- .txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
- .txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp4_config_profile,
.sdq_supports_cqe_v2 = true,
};
@@ -5343,25 +5201,13 @@ static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
return 0;
if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
return -EOPNOTSUPP;
- if (cu_info->linking) {
- if (!netif_running(dev))
- return 0;
- /* When the bridge is VLAN-aware, the VNI of the VxLAN
- * device needs to be mapped to a VLAN, but at this
- * point no VLANs are configured on the VxLAN device
- */
- if (br_vlan_enabled(upper_dev))
- return 0;
+ if (!netif_running(dev))
+ return 0;
+ if (cu_info->linking)
return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
dev, 0, extack);
- } else {
- /* VLANs were already flushed, which triggered the
- * necessary cleanup
- */
- if (br_vlan_enabled(upper_dev))
- return 0;
+ else
mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
- }
break;
case NETDEV_PRE_UP:
upper_dev = netdev_master_upper_dev_get(dev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 8d3c61287696..b03ff9e044f9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -233,9 +233,10 @@ struct mlxsw_sp_ptp_ops {
u16 local_port);
int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config);
int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void (*shaper_work)(struct work_struct *work);
int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp,
struct kernel_ethtool_ts_info *info);
@@ -243,10 +244,7 @@ struct mlxsw_sp_ptp_ops {
void (*get_stats_strings)(u8 **p);
void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *data, int data_index);
- int (*txhdr_construct)(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
+ bool tx_as_data;
};
struct mlxsw_sp_fid_core_ops {
@@ -354,7 +352,7 @@ struct mlxsw_sp_port {
struct mlxsw_sp_flow_block *eg_flow_block;
struct {
struct delayed_work shaper_dw;
- struct hwtstamp_config hwtstamp_config;
+ struct kernel_hwtstamp_config hwtstamp_config;
u16 ing_types;
u16 egr_types;
struct mlxsw_sp_ptp_port_stats stats;
@@ -664,10 +662,10 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *br_dev);
int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
const struct net_device *br_dev,
- const struct net_device *vxlan_dev, u16 vid,
+ struct net_device *vxlan_dev, u16 vid,
struct netlink_ext_ack *extack);
void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *vxlan_dev);
+ struct net_device *vxlan_dev);
extern struct notifier_block mlxsw_sp_switchdev_notifier;
/* spectrum.c */
@@ -711,12 +709,6 @@ int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
unsigned int *p_counter_index);
void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index);
-void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
-int mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
bool mlxsw_sp_port_dev_check(const struct net_device *dev);
struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
@@ -763,9 +755,6 @@ void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev);
-bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev);
-u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev);
u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
enum mlxsw_sp_l3proto ul_proto,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
index a54eedb69a3f..067f0055a55a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
@@ -212,7 +212,22 @@ static const u8 mlxsw_sp4_acl_bf_crc6_tab[256] = {
* This array defines key offsets for easy access when copying key blocks from
* entry key to Bloom filter chunk.
*/
-static const u8 chunk_key_offsets[MLXSW_BLOOM_KEY_CHUNKS] = {2, 20, 38};
+static char *
+mlxsw_sp_acl_bf_enc_key_get(struct mlxsw_sp_acl_atcam_entry *aentry,
+ u8 chunk_index)
+{
+ switch (chunk_index) {
+ case 0:
+ return &aentry->ht_key.enc_key[2];
+ case 1:
+ return &aentry->ht_key.enc_key[20];
+ case 2:
+ return &aentry->ht_key.enc_key[38];
+ default:
+ WARN_ON_ONCE(1);
+ return &aentry->ht_key.enc_key[0];
+ }
+}
static u16 mlxsw_sp2_acl_bf_crc16_byte(u16 crc, u8 c)
{
@@ -235,9 +250,10 @@ __mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
u8 key_offset, u8 chunk_key_len, u8 chunk_len)
{
struct mlxsw_afk_key_info *key_info = aregion->region->key_info;
- u8 chunk_index, chunk_count, block_count;
+ u8 chunk_index, chunk_count;
char *chunk = output;
__be16 erp_region_id;
+ u32 block_count;
block_count = mlxsw_afk_key_info_blocks_count_get(key_info);
chunk_count = 1 + ((block_count - 1) >> 2);
@@ -245,12 +261,13 @@ __mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
(aregion->region->id << 4));
for (chunk_index = max_chunks - chunk_count; chunk_index < max_chunks;
chunk_index++) {
+ char *enc_key;
+
memset(chunk, 0, pad_bytes);
memcpy(chunk + pad_bytes, &erp_region_id,
sizeof(erp_region_id));
- memcpy(chunk + key_offset,
- &aentry->ht_key.enc_key[chunk_key_offsets[chunk_index]],
- chunk_key_len);
+ enc_key = mlxsw_sp_acl_bf_enc_key_get(aentry, chunk_index);
+ memcpy(chunk + key_offset, enc_key, chunk_key_len);
chunk += chunk_len;
}
*len = chunk_count * chunk_len;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
index 6fe185ea6732..1850a975b380 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
@@ -324,6 +324,10 @@ static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5b[] =
MLXSW_AFK_ELEMENT_INST_EXT_U32(SRC_SYS_PORT, 0x04, 0, 9, -1, true), /* RX_ACL_SYSTEM_PORT */
};
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_1b[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4),
+};
+
static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5b[] = {
MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER, 0x04, 20, 12),
};
@@ -341,7 +345,7 @@ static const struct mlxsw_afk_block mlxsw_sp4_afk_blocks[] = {
MLXSW_AFK_BLOCK(0x14, mlxsw_sp_afk_element_info_mac_4),
MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x1A, mlxsw_sp_afk_element_info_mac_5b),
MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x38, mlxsw_sp_afk_element_info_ipv4_0),
- MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x39, mlxsw_sp_afk_element_info_ipv4_1),
+ MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x3F, mlxsw_sp_afk_element_info_ipv4_1b),
MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2),
MLXSW_AFK_BLOCK(0x36, mlxsw_sp_afk_element_info_ipv4_5b),
MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index b1d08e958bf9..69f9da9fb305 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -1489,7 +1489,8 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp_acl_tcam_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
struct mlxsw_sp_acl_tcam *tcam;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
index 50e591420bd9..b1094aaffa5f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
@@ -170,8 +170,7 @@ void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
mlxsw_sp_counter_sub_pools_fini(mlxsw_sp);
- WARN_ON(find_first_bit(pool->usage, pool->pool_size) !=
- pool->pool_size);
+ WARN_ON(!bitmap_empty(pool->usage, pool->pool_size));
WARN_ON(atomic_read(&pool->active_entries_count));
bitmap_free(pool->usage);
devl_resource_occ_get_unregister(devlink,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 2bed8c86b7cf..0a8fb9c842d3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -262,7 +262,7 @@ err_port_pause_configure:
}
struct mlxsw_sp_port_hw_stats {
- char str[ETH_GSTRING_LEN];
+ char str[ETH_GSTRING_LEN] __nonstring;
u64 (*getter)(const char *payload);
bool cells_bytes;
};
@@ -768,7 +768,9 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
if (err)
return;
- mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
+ err = mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
+ if (err)
+ return;
for (i = 0; i < len; i++) {
data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
if (!hw_stats[i].cells_bytes)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index f07955b5439f..353fd9ca89a6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -192,6 +192,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP;
}
+ if (sample_act_count) {
+ NL_SET_ERR_MSG_MOD(extack, "Mirror action after sample action is not supported");
+ return -EOPNOTSUPP;
+ }
+
err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
block, out_dev,
extack);
@@ -265,6 +270,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP;
}
+ if (mirror_act_count) {
+ NL_SET_ERR_MSG_MOD(extack, "Sample action after mirror action is not supported");
+ return -EOPNOTSUPP;
+ }
+
err = mlxsw_sp_acl_rulei_act_sample(mlxsw_sp, rulei,
block,
act->sample.psample_group,
@@ -820,8 +830,10 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
return -EINVAL;
rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
- if (!rule)
- return -EINVAL;
+ if (!rule) {
+ err = -EINVAL;
+ goto err_rule_get_stats;
+ }
err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
&drops, &lastuse, &used_hw_stats);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
index 69cd689dbc83..5afe6b155ef0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
@@ -1003,10 +1003,10 @@ static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp,
mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets,
&bytes);
- if (mr_route->mfc->mfc_un.res.pkt != packets)
- mr_route->mfc->mfc_un.res.lastuse = jiffies;
- mr_route->mfc->mfc_un.res.pkt = packets;
- mr_route->mfc->mfc_un.res.bytes = bytes;
+ if (atomic_long_read(&mr_route->mfc->mfc_un.res.pkt) != packets)
+ WRITE_ONCE(mr_route->mfc->mfc_un.res.lastuse, jiffies);
+ atomic_long_set(&mr_route->mfc->mfc_un.res.pkt, packets);
+ atomic_long_set(&mr_route->mfc->mfc_un.res.bytes, bytes);
}
static void mlxsw_sp_mr_stats_update(struct work_struct *work)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index d94081c7658e..5b9f0844b8f6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -46,7 +46,7 @@ struct mlxsw_sp2_ptp_state {
refcount_t ptp_port_enabled_ref; /* Number of ports with time stamping
* enabled.
*/
- struct hwtstamp_config config;
+ struct kernel_hwtstamp_config config;
struct mutex lock; /* Protects 'config' and HW configuration. */
};
@@ -131,7 +131,7 @@ static u64 __mlxsw_sp1_ptp_read_frc(struct mlxsw_sp1_ptp_clock *clock,
return (u64) frc_l | (u64) frc_h2 << 32;
}
-static u64 mlxsw_sp1_ptp_read_frc(const struct cyclecounter *cc)
+static u64 mlxsw_sp1_ptp_read_frc(struct cyclecounter *cc)
{
struct mlxsw_sp1_ptp_clock *clock =
container_of(cc, struct mlxsw_sp1_ptp_clock, cycles);
@@ -1083,14 +1083,14 @@ void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
}
int mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
*config = mlxsw_sp_port->ptp.hwtstamp_config;
return 0;
}
static int
-mlxsw_sp1_ptp_get_message_types(const struct hwtstamp_config *config,
+mlxsw_sp1_ptp_get_message_types(const struct kernel_hwtstamp_config *config,
u16 *p_ing_types, u16 *p_egr_types,
enum hwtstamp_rx_filters *p_rx_filter)
{
@@ -1246,7 +1246,8 @@ void mlxsw_sp1_ptp_shaper_work(struct work_struct *work)
}
int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
enum hwtstamp_rx_filters rx_filter;
u16 ing_types;
@@ -1270,7 +1271,7 @@ int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (err)
return err;
- /* Notify the ioctl caller what we are actually timestamping. */
+ /* Notify the caller what we are actually timestamping. */
config->rx_filter = rx_filter;
return 0;
@@ -1353,6 +1354,10 @@ struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp)
struct mlxsw_sp2_ptp_state *ptp_state;
int err;
+ /* Max FID will be used in data path, check validity as part of init. */
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, FID))
+ return ERR_PTR(-EIO);
+
ptp_state = kzalloc(sizeof(*ptp_state), GFP_KERNEL);
if (!ptp_state)
return ERR_PTR(-ENOMEM);
@@ -1447,7 +1452,7 @@ void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
}
int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
struct mlxsw_sp2_ptp_state *ptp_state;
@@ -1461,7 +1466,7 @@ int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
-mlxsw_sp2_ptp_get_message_types(const struct hwtstamp_config *config,
+mlxsw_sp2_ptp_get_message_types(const struct kernel_hwtstamp_config *config,
u16 *p_ing_types, u16 *p_egr_types,
enum hwtstamp_rx_filters *p_rx_filter)
{
@@ -1538,7 +1543,7 @@ static int mlxsw_sp2_ptp_mtpcpc_set(struct mlxsw_sp *mlxsw_sp, bool ptp_trap_en,
static int mlxsw_sp2_ptp_enable(struct mlxsw_sp *mlxsw_sp, u16 ing_types,
u16 egr_types,
- struct hwtstamp_config new_config)
+ struct kernel_hwtstamp_config new_config)
{
struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
int err;
@@ -1552,7 +1557,7 @@ static int mlxsw_sp2_ptp_enable(struct mlxsw_sp *mlxsw_sp, u16 ing_types,
}
static int mlxsw_sp2_ptp_disable(struct mlxsw_sp *mlxsw_sp,
- struct hwtstamp_config new_config)
+ struct kernel_hwtstamp_config new_config)
{
struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
int err;
@@ -1567,7 +1572,7 @@ static int mlxsw_sp2_ptp_disable(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port,
u16 ing_types, u16 egr_types,
- struct hwtstamp_config new_config)
+ struct kernel_hwtstamp_config new_config)
{
struct mlxsw_sp2_ptp_state *ptp_state;
int err;
@@ -1588,7 +1593,7 @@ static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp2_ptp_deconfigure_port(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config new_config)
+ struct kernel_hwtstamp_config new_config)
{
struct mlxsw_sp2_ptp_state *ptp_state;
int err;
@@ -1610,11 +1615,12 @@ err_ptp_disable:
}
int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
+ struct kernel_hwtstamp_config new_config;
struct mlxsw_sp2_ptp_state *ptp_state;
enum hwtstamp_rx_filters rx_filter;
- struct hwtstamp_config new_config;
u16 new_ing_types, new_egr_types;
bool ptp_enabled;
int err;
@@ -1648,7 +1654,7 @@ int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->ptp.ing_types = new_ing_types;
mlxsw_sp_port->ptp.egr_types = new_egr_types;
- /* Notify the ioctl caller what we are actually timestamping. */
+ /* Notify the caller what we are actually timestamping. */
config->rx_filter = rx_filter;
mutex_unlock(&ptp_state->lock);
@@ -1679,43 +1685,3 @@ int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-
-int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
- this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb);
- return -ENOMEM;
- }
-
- mlxsw_sp_txhdr_construct(skb, tx_info);
- return 0;
-}
-
-int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- /* In Spectrum-2 and Spectrum-3, in order for PTP event packets to have
- * their correction field correctly set on the egress port they must be
- * transmitted as data packets. Such packets ingress the ASIC via the
- * CPU port and must have a VLAN tag, as the CPU port is not configured
- * with a PVID. Push the default VLAN (4095), which is configured as
- * egress untagged on all the ports.
- */
- if (!skb_vlan_tagged(skb)) {
- skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
- MLXSW_SP_DEFAULT_VID);
- if (!skb) {
- this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
- return -ENOMEM;
- }
- }
-
- return mlxsw_sp_txhdr_ptp_data_construct(mlxsw_core, mlxsw_sp_port, skb,
- tx_info);
-}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index c8aa1452fbb9..df37f1470830 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -34,10 +34,11 @@ void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
u64 timestamp);
int mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config);
int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void mlxsw_sp1_ptp_shaper_work(struct work_struct *work);
@@ -49,11 +50,6 @@ void mlxsw_sp1_get_stats_strings(u8 **p);
void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *data, int data_index);
-int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
-
struct mlxsw_sp_ptp_clock *
mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev);
@@ -70,19 +66,15 @@ void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
struct sk_buff *skb, u16 local_port);
int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config);
int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
struct kernel_ethtool_ts_info *info);
-int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info);
-
#else
static inline struct mlxsw_sp_ptp_clock *
@@ -127,14 +119,15 @@ mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
static inline int
mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
return -EOPNOTSUPP;
}
static inline int
mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
@@ -157,15 +150,6 @@ static inline void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
{
}
-static inline int
-mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- return -EOPNOTSUPP;
-}
-
static inline struct mlxsw_sp_ptp_clock *
mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
{
@@ -200,23 +184,15 @@ static inline void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
static inline int
mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
return -EOPNOTSUPP;
}
static inline int
mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 7d6d859cef3f..a2033837182e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -3014,6 +3014,9 @@ static int mlxsw_sp_neigh_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
.rif = rif,
};
+ if (!mlxsw_sp_dev_lower_is_port(mlxsw_sp_rif_dev(rif)))
+ return 0;
+
neigh_for_each(&arp_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms);
if (rms.err)
goto err_arp;
@@ -8184,41 +8187,6 @@ mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
return NULL;
}
-bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev)
-{
- struct mlxsw_sp_rif *rif;
-
- mutex_lock(&mlxsw_sp->router->lock);
- rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
- mutex_unlock(&mlxsw_sp->router->lock);
-
- return rif;
-}
-
-u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
-{
- struct mlxsw_sp_rif *rif;
- u16 vid = 0;
-
- mutex_lock(&mlxsw_sp->router->lock);
- rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
- if (!rif)
- goto out;
-
- /* We only return the VID for VLAN RIFs. Otherwise we return an
- * invalid value (0).
- */
- if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
- goto out;
-
- vid = mlxsw_sp_fid_8021q_vid(rif->fid);
-
-out:
- mutex_unlock(&mlxsw_sp->router->lock);
- return vid;
-}
-
static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
{
char ritr_pl[MLXSW_REG_RITR_LEN];
@@ -8417,19 +8385,6 @@ u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
return lb_rif->common.rif_index;
}
-u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
-{
- struct net_device *dev = mlxsw_sp_rif_dev(&lb_rif->common);
- u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
- struct mlxsw_sp_vr *ul_vr;
-
- ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
- if (WARN_ON(IS_ERR(ul_vr)))
- return 0;
-
- return ul_vr->id;
-}
-
u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
{
return lb_rif->ul_rif_id;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 0432c7cc6b07..313efab5c324 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -90,7 +90,6 @@ struct mlxsw_sp_ipip_entry;
struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
u16 rif_index);
u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif);
-u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif);
u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev);
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 4b5fd71c897d..32d2e61f2b82 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -423,8 +423,7 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
- 0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0,
- 0);
+ 0, 0, tun->net, parms.link, tun->fwmark, 0, 0);
rt = ip_route_output_key(tun->net, &fl4);
if (IS_ERR(rt))
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 6397ff0dc951..a48bf342084d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -2929,23 +2929,8 @@ void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
}
-int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *br_dev,
- const struct net_device *vxlan_dev, u16 vid,
- struct netlink_ext_ack *extack)
-{
- struct mlxsw_sp_bridge_device *bridge_device;
-
- bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
- if (WARN_ON(!bridge_device))
- return -EINVAL;
-
- return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
- extack);
-}
-
-void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *vxlan_dev)
+static void __mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *vxlan_dev)
{
struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
struct mlxsw_sp_fid *fid;
@@ -2963,6 +2948,47 @@ void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_fid_put(fid);
}
+int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev,
+ struct net_device *vxlan_dev, u16 vid,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ int err;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (WARN_ON(!bridge_device))
+ return -EINVAL;
+
+ mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(bridge_device->dev);
+ if (!mlxsw_sp_port)
+ return -EINVAL;
+
+ err = bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
+ extack);
+ if (err)
+ return err;
+
+ err = switchdev_bridge_port_offload(vxlan_dev, mlxsw_sp_port->dev,
+ NULL, NULL, NULL, false, extack);
+ if (err)
+ goto err_bridge_port_offload;
+
+ return 0;
+
+err_bridge_port_offload:
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ return err;
+}
+
+void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *vxlan_dev)
+{
+ switchdev_bridge_port_unoffload(vxlan_dev, NULL, NULL, NULL);
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+}
+
static void
mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
enum mlxsw_sp_l3proto *proto,
@@ -3867,7 +3893,7 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_fid_put(fid);
return -EINVAL;
}
- mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
mlxsw_sp_fid_put(fid);
return 0;
}
@@ -3883,7 +3909,7 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
/* Fourth case: Thew new VLAN is PVID, which means the VLAN currently
* mapped to the VNI should be unmapped
*/
- mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
mlxsw_sp_fid_put(fid);
/* Fifth case: The new VLAN is also egress untagged, which means the
@@ -3923,7 +3949,7 @@ mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp,
if (mlxsw_sp_fid_8021q_vid(fid) != vid)
goto out;
- mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
out:
mlxsw_sp_fid_put(fid);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 899c954e0e5f..b5c3f789c685 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -173,7 +173,7 @@ static void mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u16 local_port,
if (err)
return;
- netif_receive_skb(skb);
+ napi_gro_receive(mlxsw_skb_cb(skb)->rx_md_info.napi, skb);
}
static void mlxsw_sp_rx_mark_listener(struct sk_buff *skb, u16 local_port,
@@ -959,18 +959,18 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
},
{
.trap = MLXSW_SP_TRAP_CONTROL(ARP_REQUEST, NEIGH_DISCOVERY,
- MIRROR),
+ TRAP),
.listeners_arr = {
- MLXSW_SP_RXL_MARK(ROUTER_ARPBC, NEIGH_DISCOVERY,
- TRAP_TO_CPU, false),
+ MLXSW_SP_RXL_NO_MARK(ARPBC, NEIGH_DISCOVERY,
+ TRAP_TO_CPU, false),
},
},
{
.trap = MLXSW_SP_TRAP_CONTROL(ARP_RESPONSE, NEIGH_DISCOVERY,
- MIRROR),
+ TRAP),
.listeners_arr = {
- MLXSW_SP_RXL_MARK(ROUTER_ARPUC, NEIGH_DISCOVERY,
- TRAP_TO_CPU, false),
+ MLXSW_SP_RXL_NO_MARK(ARPUC, NEIGH_DISCOVERY,
+ TRAP_TO_CPU, false),
},
},
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 83477c8e6971..9962dc157901 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -29,6 +29,8 @@ enum {
MLXSW_TRAP_ID_FDB_MISMATCH = 0x3B,
MLXSW_TRAP_ID_FID_MISS = 0x3D,
MLXSW_TRAP_ID_DECAP_ECN0 = 0x40,
+ MLXSW_TRAP_ID_ARPBC = 0x50,
+ MLXSW_TRAP_ID_ARPUC = 0x51,
MLXSW_TRAP_ID_MTUERROR = 0x52,
MLXSW_TRAP_ID_TTLERROR = 0x53,
MLXSW_TRAP_ID_LBERROR = 0x54,
@@ -66,13 +68,10 @@ enum {
MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92,
MLXSW_TRAP_ID_IPIP_DECAP_ERROR = 0xB1,
MLXSW_TRAP_ID_NVE_DECAP_ARP = 0xB8,
- MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD,
MLXSW_TRAP_ID_IPV4_BFD = 0xD0,
MLXSW_TRAP_ID_IPV6_BFD = 0xD1,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
- MLXSW_TRAP_ID_ROUTER_ARPBC = 0xE0,
- MLXSW_TRAP_ID_ROUTER_ARPUC = 0xE1,
MLXSW_TRAP_ID_DISCARD_NON_ROUTABLE = 0x11A,
MLXSW_TRAP_ID_DISCARD_ROUTER2 = 0x130,
MLXSW_TRAP_ID_DISCARD_ROUTER3 = 0x131,
@@ -95,6 +94,7 @@ enum {
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_SIP_BC = 0x16A,
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_DIP_LOCAL_NET = 0x16B,
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_DIP_LINK_LOCAL = 0x16C,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_LINK_LOCAL = 0x16D,
MLXSW_TRAP_ID_DISCARD_ROUTER_IRIF_EN = 0x178,
MLXSW_TRAP_ID_DISCARD_ROUTER_ERIF_EN = 0x179,
MLXSW_TRAP_ID_DISCARD_ROUTER_LPM4 = 0x17B,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/txheader.h b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
index da51dd9d5e44..e78cba5821b6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/txheader.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
@@ -4,6 +4,69 @@
#ifndef _MLXSW_TXHEADER_H
#define _MLXSW_TXHEADER_H
+/* tx_hdr_version
+ * Tx header version.
+ * Must be set to 1.
+ */
+MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
+
+/* tx_hdr_ctl
+ * Packet control type.
+ * 0 - Ethernet control (e.g. EMADs, LACP)
+ * 1 - Ethernet data
+ */
+MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
+
+/* tx_hdr_proto
+ * Packet protocol type. Must be set to 1 (Ethernet).
+ */
+MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
+
+/* tx_hdr_rx_is_router
+ * Packet is sent from the router. Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
+
+/* tx_hdr_fid_valid
+ * Indicates if the 'fid' field is valid and should be used for
+ * forwarding lookup. Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
+
+/* tx_hdr_swid
+ * Switch partition ID. Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
+
+/* tx_hdr_control_tclass
+ * Indicates if the packet should use the control TClass and not one
+ * of the data TClasses.
+ */
+MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
+
+/* tx_hdr_port_mid
+ * Destination local port for unicast packets.
+ * Destination multicast ID for multicast packets.
+ *
+ * Control packets are directed to a specific egress port, while data
+ * packets are transmitted through the CPU port (0) into the switch partition,
+ * where forwarding rules are applied.
+ */
+MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
+
+/* tx_hdr_fid
+ * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
+ * set, otherwise calculated based on the packet's VID using VID to FID mapping.
+ * Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
+
+/* tx_hdr_type
+ * 0 - Data packets
+ * 6 - Control packets
+ */
+MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
+
#define MLXSW_TXHDR_LEN 0x10
#define MLXSW_TXHDR_VERSION_0 0
#define MLXSW_TXHDR_VERSION_1 1
diff --git a/drivers/net/ethernet/meta/Kconfig b/drivers/net/ethernet/meta/Kconfig
index 831921b9d4d5..ca5c7ac2a5bc 100644
--- a/drivers/net/ethernet/meta/Kconfig
+++ b/drivers/net/ethernet/meta/Kconfig
@@ -19,14 +19,16 @@ if NET_VENDOR_META
config FBNIC
tristate "Meta Platforms Host Network Interface"
- depends on X86_64 || COMPILE_TEST
+ depends on 64BIT || COMPILE_TEST
depends on !S390
depends on MAX_SKB_FRAGS < 22
depends on PCI_MSI
depends on PTP_1588_CLOCK_OPTIONAL
select NET_DEVLINK
select PAGE_POOL
+ select PCS_XPCS
select PHYLINK
+ select PLDMFW
help
This driver supports Meta Platforms Host Network Interface.
diff --git a/drivers/net/ethernet/meta/fbnic/Makefile b/drivers/net/ethernet/meta/fbnic/Makefile
index cadd4dac6620..72c41af65364 100644
--- a/drivers/net/ethernet/meta/fbnic/Makefile
+++ b/drivers/net/ethernet/meta/fbnic/Makefile
@@ -7,9 +7,12 @@
obj-$(CONFIG_FBNIC) += fbnic.o
-fbnic-y := fbnic_devlink.o \
+fbnic-y := fbnic_csr.o \
+ fbnic_debugfs.o \
+ fbnic_devlink.o \
fbnic_ethtool.o \
fbnic_fw.o \
+ fbnic_fw_log.o \
fbnic_hw_stats.o \
fbnic_hwmon.o \
fbnic_irq.o \
@@ -18,6 +21,8 @@ fbnic-y := fbnic_devlink.o \
fbnic_pci.o \
fbnic_phylink.o \
fbnic_rpc.o \
+ fbnic_mdio.o \
+ fbnic_time.o \
fbnic_tlv.o \
fbnic_txrx.o \
- fbnic_time.o
+# End of objects
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h
index fec567c8fe4a..779a083b9215 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic.h
@@ -12,27 +12,41 @@
#include "fbnic_csr.h"
#include "fbnic_fw.h"
+#include "fbnic_fw_log.h"
#include "fbnic_hw_stats.h"
#include "fbnic_mac.h"
#include "fbnic_rpc.h"
+struct fbnic_napi_vector;
+
+#define FBNIC_MAX_NAPI_VECTORS 128u
+#define FBNIC_MBX_CMPL_SLOTS 4
+
struct fbnic_dev {
struct device *dev;
struct net_device *netdev;
+ struct dentry *dbg_fbd;
struct device *hwmon;
+ struct devlink_health_reporter *fw_reporter;
+ struct devlink_health_reporter *otp_reporter;
u32 __iomem *uc_addr0;
u32 __iomem *uc_addr4;
const struct fbnic_mac *mac;
unsigned int fw_msix_vector;
- unsigned int pcs_msix_vector;
+ unsigned int mac_msix_vector;
unsigned short num_irqs;
+ struct {
+ u8 users;
+ char name[IFNAMSIZ + 9];
+ } napi_irq[FBNIC_MAX_NAPI_VECTORS];
+
struct delayed_work service_task;
struct fbnic_fw_mbx mbx[FBNIC_IPC_MBX_INDICES];
struct fbnic_fw_cap fw_cap;
- struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_fw_completion *cmpl_data[FBNIC_MBX_CMPL_SLOTS];
/* Lock protecting Tx Mailbox queue to prevent possible races */
spinlock_t fw_tx_lock;
@@ -48,6 +62,13 @@ struct fbnic_dev {
struct fbnic_act_tcam act_tcam[FBNIC_RPC_TCAM_ACT_NUM_ENTRIES];
struct fbnic_mac_addr mac_addr[FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES];
u8 mac_addr_boundary;
+ u8 tce_tcam_last;
+
+ /* IP TCAM */
+ struct fbnic_ip_addr ip_src[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES];
+ struct fbnic_ip_addr ip_dst[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES];
+ struct fbnic_ip_addr ipo_src[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES];
+ struct fbnic_ip_addr ipo_dst[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES];
/* Number of TCQs/RCQs available on hardware */
u16 max_num_queues;
@@ -62,8 +83,21 @@ struct fbnic_dev {
/* Last @time_high refresh time in jiffies (to catch stalls) */
unsigned long last_read;
+ /* PMD specific data */
+ unsigned long end_of_pmd_training;
+ u8 pmd_state;
+
/* Local copy of hardware statistics */
struct fbnic_hw_stats hw_stats;
+
+ /* Firmware time since boot in milliseconds */
+ u64 firmware_time;
+ u64 prev_firmware_time;
+
+ struct fbnic_fw_log fw_log;
+
+ /* MDIO bus for PHYs */
+ struct mii_bus *mdio_bus;
};
/* Reserve entry 0 in the MSI-X "others" array until we have filled all
@@ -134,18 +168,29 @@ extern char fbnic_driver_name[];
void fbnic_devlink_free(struct fbnic_dev *fbd);
struct fbnic_dev *fbnic_devlink_alloc(struct pci_dev *pdev);
+int fbnic_devlink_health_create(struct fbnic_dev *fbd);
+void fbnic_devlink_health_destroy(struct fbnic_dev *fbd);
void fbnic_devlink_register(struct fbnic_dev *fbd);
void fbnic_devlink_unregister(struct fbnic_dev *fbd);
+void __printf(2, 3)
+fbnic_devlink_fw_report(struct fbnic_dev *fbd, const char *format, ...);
+void fbnic_devlink_otp_check(struct fbnic_dev *fbd, const char *msg);
-int fbnic_fw_enable_mbx(struct fbnic_dev *fbd);
-void fbnic_fw_disable_mbx(struct fbnic_dev *fbd);
+int fbnic_fw_request_mbx(struct fbnic_dev *fbd);
+void fbnic_fw_free_mbx(struct fbnic_dev *fbd);
void fbnic_hwmon_register(struct fbnic_dev *fbd);
void fbnic_hwmon_unregister(struct fbnic_dev *fbd);
-int fbnic_pcs_irq_enable(struct fbnic_dev *fbd);
-void fbnic_pcs_irq_disable(struct fbnic_dev *fbd);
+int fbnic_mac_request_irq(struct fbnic_dev *fbd);
+void fbnic_mac_free_irq(struct fbnic_dev *fbd);
+void fbnic_napi_name_irqs(struct fbnic_dev *fbd);
+int fbnic_napi_request_irq(struct fbnic_dev *fbd,
+ struct fbnic_napi_vector *nv);
+void fbnic_napi_free_irq(struct fbnic_dev *fbd,
+ struct fbnic_napi_vector *nv);
+void fbnic_synchronize_irq(struct fbnic_dev *fbd, int nr);
int fbnic_request_irq(struct fbnic_dev *dev, int nr, irq_handler_t handler,
unsigned long flags, const char *name, void *data);
void fbnic_free_irq(struct fbnic_dev *dev, int nr, void *data);
@@ -155,6 +200,21 @@ int fbnic_alloc_irqs(struct fbnic_dev *fbd);
void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
const size_t str_sz);
+void fbnic_dbg_fbd_init(struct fbnic_dev *fbd);
+void fbnic_dbg_fbd_exit(struct fbnic_dev *fbd);
+void fbnic_dbg_init(void);
+void fbnic_dbg_exit(void);
+
+void fbnic_rpc_reset_valid_entries(struct fbnic_dev *fbd);
+
+int fbnic_mdiobus_create(struct fbnic_dev *fbd);
+
+void fbnic_csr_get_regs(struct fbnic_dev *fbd, u32 *data, u32 *regs_version);
+int fbnic_csr_regs_len(struct fbnic_dev *fbd);
+
+void fbnic_config_txrx_usecs(struct fbnic_napi_vector *nv, u32 arm);
+void fbnic_config_rx_frames(struct fbnic_napi_vector *nv);
+
enum fbnic_boards {
fbnic_board_asic
};
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.c b/drivers/net/ethernet/meta/fbnic/fbnic_csr.c
new file mode 100644
index 000000000000..d9c0dc1c2af9
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include "fbnic.h"
+
+#define FBNIC_BOUNDS(section) { \
+ .start = FBNIC_CSR_START_##section, \
+ .end = FBNIC_CSR_END_##section + 1, \
+}
+
+struct fbnic_csr_bounds {
+ u32 start;
+ u32 end;
+};
+
+static const struct fbnic_csr_bounds fbnic_csr_sects[] = {
+ FBNIC_BOUNDS(INTR),
+ FBNIC_BOUNDS(INTR_CQ),
+ FBNIC_BOUNDS(QM_TX),
+ FBNIC_BOUNDS(QM_RX),
+ FBNIC_BOUNDS(TCE),
+ FBNIC_BOUNDS(TCE_RAM),
+ FBNIC_BOUNDS(TMI),
+ FBNIC_BOUNDS(PTP),
+ FBNIC_BOUNDS(RXB),
+ FBNIC_BOUNDS(RPC),
+ FBNIC_BOUNDS(FAB),
+ FBNIC_BOUNDS(MASTER),
+ FBNIC_BOUNDS(PCS),
+ FBNIC_BOUNDS(RSFEC),
+ FBNIC_BOUNDS(MAC_MAC),
+ FBNIC_BOUNDS(SIG),
+ FBNIC_BOUNDS(PCIE_SS_COMPHY),
+ FBNIC_BOUNDS(PUL_USER),
+ FBNIC_BOUNDS(QUEUE),
+ FBNIC_BOUNDS(RPC_RAM),
+};
+
+#define FBNIC_RPC_TCAM_ACT_DW_PER_ENTRY 14
+#define FBNIC_RPC_TCAM_ACT_NUM_ENTRIES 64
+
+#define FBNIC_RPC_TCAM_MACDA_DW_PER_ENTRY 4
+#define FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES 32
+
+#define FBNIC_RPC_TCAM_OUTER_IPSRC_DW_PER_ENTRY 9
+#define FBNIC_RPC_TCAM_OUTER_IPSRC_NUM_ENTRIES 8
+
+#define FBNIC_RPC_TCAM_OUTER_IPDST_DW_PER_ENTRY 9
+#define FBNIC_RPC_TCAM_OUTER_IPDST_NUM_ENTRIES 8
+
+#define FBNIC_RPC_TCAM_IPSRC_DW_PER_ENTRY 9
+#define FBNIC_RPC_TCAM_IPSRC_NUM_ENTRIES 8
+
+#define FBNIC_RPC_TCAM_IPDST_DW_PER_ENTRY 9
+#define FBNIC_RPC_TCAM_IPDST_NUM_ENTRIES 8
+
+#define FBNIC_RPC_RSS_TBL_DW_PER_ENTRY 2
+#define FBNIC_RPC_RSS_TBL_NUM_ENTRIES 256
+
+static void fbnic_csr_get_regs_rpc_ram(struct fbnic_dev *fbd, u32 **data_p)
+{
+ u32 start = FBNIC_CSR_START_RPC_RAM;
+ u32 end = FBNIC_CSR_END_RPC_RAM;
+ u32 *data = *data_p;
+ u32 i, j;
+
+ *(data++) = start;
+ *(data++) = end;
+
+ /* FBNIC_RPC_TCAM_ACT */
+ for (i = 0; i < FBNIC_RPC_TCAM_ACT_NUM_ENTRIES; i++) {
+ for (j = 0; j < FBNIC_RPC_TCAM_ACT_DW_PER_ENTRY; j++)
+ *(data++) = rd32(fbd, FBNIC_RPC_TCAM_ACT(i, j));
+ }
+
+ /* FBNIC_RPC_TCAM_MACDA */
+ for (i = 0; i < FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES; i++) {
+ for (j = 0; j < FBNIC_RPC_TCAM_MACDA_DW_PER_ENTRY; j++)
+ *(data++) = rd32(fbd, FBNIC_RPC_TCAM_MACDA(i, j));
+ }
+
+ /* FBNIC_RPC_TCAM_OUTER_IPSRC */
+ for (i = 0; i < FBNIC_RPC_TCAM_OUTER_IPSRC_NUM_ENTRIES; i++) {
+ for (j = 0; j < FBNIC_RPC_TCAM_OUTER_IPSRC_DW_PER_ENTRY; j++)
+ *(data++) = rd32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(i, j));
+ }
+
+ /* FBNIC_RPC_TCAM_OUTER_IPDST */
+ for (i = 0; i < FBNIC_RPC_TCAM_OUTER_IPDST_NUM_ENTRIES; i++) {
+ for (j = 0; j < FBNIC_RPC_TCAM_OUTER_IPDST_DW_PER_ENTRY; j++)
+ *(data++) = rd32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(i, j));
+ }
+
+ /* FBNIC_RPC_TCAM_IPSRC */
+ for (i = 0; i < FBNIC_RPC_TCAM_IPSRC_NUM_ENTRIES; i++) {
+ for (j = 0; j < FBNIC_RPC_TCAM_IPSRC_DW_PER_ENTRY; j++)
+ *(data++) = rd32(fbd, FBNIC_RPC_TCAM_IPSRC(i, j));
+ }
+
+ /* FBNIC_RPC_TCAM_IPDST */
+ for (i = 0; i < FBNIC_RPC_TCAM_IPDST_NUM_ENTRIES; i++) {
+ for (j = 0; j < FBNIC_RPC_TCAM_IPDST_DW_PER_ENTRY; j++)
+ *(data++) = rd32(fbd, FBNIC_RPC_TCAM_IPDST(i, j));
+ }
+
+ /* FBNIC_RPC_RSS_TBL */
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_NUM_ENTRIES; i++) {
+ for (j = 0; j < FBNIC_RPC_RSS_TBL_DW_PER_ENTRY; j++)
+ *(data++) = rd32(fbd, FBNIC_RPC_RSS_TBL(i, j));
+ }
+
+ *data_p = data;
+}
+
+void fbnic_csr_get_regs(struct fbnic_dev *fbd, u32 *data, u32 *regs_version)
+{
+ const struct fbnic_csr_bounds *bound;
+ u32 *start = data;
+ int i, j;
+
+ *regs_version = 1u;
+
+ /* Skip RPC_RAM section which cannot be dumped linearly */
+ for (i = 0, bound = fbnic_csr_sects;
+ i < ARRAY_SIZE(fbnic_csr_sects) - 1; i++, ++bound) {
+ *(data++) = bound->start;
+ *(data++) = bound->end - 1;
+ for (j = bound->start; j < bound->end; j++)
+ *(data++) = rd32(fbd, j);
+ }
+
+ /* Dump the RPC_RAM as special case registers */
+ fbnic_csr_get_regs_rpc_ram(fbd, &data);
+
+ WARN_ON(data - start != fbnic_csr_regs_len(fbd));
+}
+
+int fbnic_csr_regs_len(struct fbnic_dev *fbd)
+{
+ int i, len = 0;
+
+ /* Dump includes start and end information of each section
+ * which results in an offset of 2
+ */
+ for (i = 0; i < ARRAY_SIZE(fbnic_csr_sects); i++)
+ len += fbnic_csr_sects[i].end - fbnic_csr_sects[i].start + 2;
+
+ return len;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
index 79cdd231d327..422265dc7abd 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
@@ -12,13 +12,28 @@
#define DESC_BIT(nr) BIT_ULL(nr)
#define DESC_GENMASK(h, l) GENMASK_ULL(h, l)
+#define FW_VER_CODE(_major, _minor, _patch, _build) ( \
+ FIELD_PREP(FBNIC_FW_CAP_RESP_VERSION_MAJOR, _major) | \
+ FIELD_PREP(FBNIC_FW_CAP_RESP_VERSION_MINOR, _minor) | \
+ FIELD_PREP(FBNIC_FW_CAP_RESP_VERSION_PATCH, _patch) | \
+ FIELD_PREP(FBNIC_FW_CAP_RESP_VERSION_BUILD, _build))
+
/* Defines the minimum firmware version required by the driver */
-#define MIN_FW_MAJOR_VERSION 0
-#define MIN_FW_MINOR_VERSION 10
-#define MIN_FW_BUILD_VERSION 6
-#define MIN_FW_VERSION_CODE (MIN_FW_MAJOR_VERSION * (1u << 24) + \
- MIN_FW_MINOR_VERSION * (1u << 16) + \
- MIN_FW_BUILD_VERSION)
+#define MIN_FW_VER_CODE FW_VER_CODE(0, 10, 6, 0)
+
+/* Defines the minimum firmware version required for firmware logs */
+#define MIN_FW_VER_CODE_LOG FW_VER_CODE(0, 12, 9, 0)
+
+/* Driver can request that firmware sends all cached logs in bulk. This
+ * feature was enabled on older firmware however firmware has a bug
+ * which attempted to send 30 messages per mbx message which caused an
+ * overflow flooding the mailbox. This results in a kernel warning
+ * related to corrupt mailbox messages.
+ *
+ * If firmware is new enough only request sending historical logs when
+ * the log buffer is empty to prevent duplicate logs.
+ */
+#define MIN_FW_VER_CODE_HIST FW_VER_CODE(25, 5, 7, 0)
#define PCI_DEVICE_ID_META_FBNIC_ASIC 0x0013
@@ -397,6 +412,23 @@ enum {
#define FBNIC_TCE_DROP_CTRL_TTI_FRM_DROP_EN CSR_BIT(1)
#define FBNIC_TCE_DROP_CTRL_TTI_TBI_DROP_EN CSR_BIT(2)
+#define FBNIC_TCE_TTI_CM_DROP_PKTS 0x0403e /* 0x100f8 */
+#define FBNIC_TCE_TTI_CM_DROP_BYTE_L 0x0403f /* 0x100fc */
+#define FBNIC_TCE_TTI_CM_DROP_BYTE_H 0x04040 /* 0x10100 */
+#define FBNIC_TCE_TTI_FRAME_DROP_PKTS 0x04041 /* 0x10104 */
+#define FBNIC_TCE_TTI_FRAME_DROP_BYTE_L 0x04042 /* 0x10108 */
+#define FBNIC_TCE_TTI_FRAME_DROP_BYTE_H 0x04043 /* 0x1010c */
+#define FBNIC_TCE_TBI_DROP_PKTS 0x04044 /* 0x10110 */
+#define FBNIC_TCE_TBI_DROP_BYTE_L 0x04045 /* 0x10114 */
+
+#define FBNIC_TCE_TCAM_IDX2DEST_MAP 0x0404A /* 0x10128 */
+#define FBNIC_TCE_TCAM_IDX2DEST_MAP_DEST_ID_0 CSR_GENMASK(3, 0)
+enum {
+ FBNIC_TCE_TCAM_DEST_MAC = 1,
+ FBNIC_TCE_TCAM_DEST_BMC = 2,
+ FBNIC_TCE_TCAM_DEST_FW = 4,
+};
+
#define FBNIC_TCE_TXB_TX_BMC_Q_CTRL 0x0404B /* 0x1012c */
#define FBNIC_TCE_TXB_BMC_DWRR_CTRL 0x0404C /* 0x10130 */
#define FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM0 CSR_GENMASK(7, 0)
@@ -407,11 +439,48 @@ enum {
#define FBNIC_TCE_TXB_BMC_DWRR_CTRL_EXT 0x0404F /* 0x1013c */
#define FBNIC_CSR_END_TCE 0x04050 /* CSR section delimiter */
+/* TCE RAM registers */
+#define FBNIC_CSR_START_TCE_RAM 0x04200 /* CSR section delimiter */
+#define FBNIC_TCE_RAM_TCAM(m, n) \
+ (0x04200 + 0x8 * (n) + (m)) /* 0x10800 + 32*n + 4*m */
+#define FBNIC_TCE_RAM_TCAM_MASK CSR_GENMASK(15, 0)
+#define FBNIC_TCE_RAM_TCAM_VALUE CSR_GENMASK(31, 16)
+#define FBNIC_TCE_RAM_TCAM3(n) (0x04218 + (n)) /* 0x010860 + 4*n */
+#define FBNIC_TCE_RAM_TCAM3_DEST_MASK CSR_GENMASK(5, 3)
+#define FBNIC_TCE_RAM_TCAM3_MCQ_MASK CSR_BIT(7)
+#define FBNIC_TCE_RAM_TCAM3_VALIDATE CSR_BIT(31)
+#define FBNIC_CSR_END_TCE_RAM 0x0421F /* CSR section delimiter */
+
/* TMI registers */
#define FBNIC_CSR_START_TMI 0x04400 /* CSR section delimiter */
#define FBNIC_TMI_SOP_PROT_CTRL 0x04400 /* 0x11000 */
#define FBNIC_TMI_DROP_CTRL 0x04401 /* 0x11004 */
#define FBNIC_TMI_DROP_CTRL_EN CSR_BIT(0)
+#define FBNIC_TMI_DROP_PKTS 0x04402 /* 0x11008 */
+#define FBNIC_TMI_DROP_BYTE_L 0x04403 /* 0x1100c */
+#define FBNIC_TMI_ILLEGAL_PTP_REQS 0x04409 /* 0x11024 */
+#define FBNIC_TMI_GOOD_PTP_TS 0x0440a /* 0x11028 */
+#define FBNIC_TMI_BAD_PTP_TS 0x0440b /* 0x1102c */
+#define FBNIC_TMI_STAT_TX_PACKET_1519_2047_BYTES_L \
+ 0x04433 /* 0x110cc */
+#define FBNIC_TMI_STAT_TX_PACKET_1519_2047_BYTES_H \
+ 0x04434 /* 0x110d0 */
+#define FBNIC_TMI_STAT_TX_PACKET_2048_4095_BYTES_L \
+ 0x04435 /* 0x110d4 */
+#define FBNIC_TMI_STAT_TX_PACKET_2048_4095_BYTES_H \
+ 0x04436 /* 0x110d8 */
+#define FBNIC_TMI_STAT_TX_PACKET_4096_8191_BYTES_L \
+ 0x04437 /* 0x110dc */
+#define FBNIC_TMI_STAT_TX_PACKET_4096_8191_BYTES_H \
+ 0x04438 /* 0x110e0 */
+#define FBNIC_TMI_STAT_TX_PACKET_8192_9216_BYTES_L \
+ 0x04439 /* 0x110e4 */
+#define FBNIC_TMI_STAT_TX_PACKET_8192_9216_BYTES_H \
+ 0x0443a /* 0x110e8 */
+#define FBNIC_TMI_STAT_TX_PACKET_9217_MAX_BYTES_L \
+ 0x0443b /* 0x110ec */
+#define FBNIC_TMI_STAT_TX_PACKET_9217_MAX_BYTES_H \
+ 0x0443c /* 0x110f0 */
#define FBNIC_CSR_END_TMI 0x0443f /* CSR section delimiter */
/* Precision Time Protocol Registers */
@@ -439,7 +508,7 @@ enum {
#define FBNIC_PTP_ADD_VAL_NS 0x04806 /* 0x12018 */
#define FBNIC_PTP_ADD_VAL_NS_MASK CSR_GENMASK(15, 0)
-#define FBNIC_PTP_ADD_VAL_SUBNS 0x04807 /* 0x1201c */
+#define FBNIC_PTP_ADD_VAL_SUBNS 0x04807 /* 0x1201c */
#define FBNIC_PTP_CTR_VAL_HI 0x04808 /* 0x12020 */
#define FBNIC_PTP_CTR_VAL_LO 0x04809 /* 0x12024 */
@@ -465,6 +534,14 @@ enum {
FBNIC_RXB_FIFO_INDICES = 8
};
+enum {
+ FBNIC_RXB_INTF_NET = 0,
+ FBNIC_RXB_INTF_RBT = 1,
+ /* Unused */
+ /* Unused */
+ FBNIC_RXB_INTF_INDICES = 4
+};
+
#define FBNIC_RXB_CT_SIZE(n) (0x08000 + (n)) /* 0x20000 + 4*n */
#define FBNIC_RXB_CT_SIZE_CNT 8
#define FBNIC_RXB_CT_SIZE_HEADER CSR_GENMASK(5, 0)
@@ -585,8 +662,11 @@ enum {
FBNIC_RPC_ACT_TBL0_DEST_EI = 4,
};
+#define FBNIC_RPC_ACT_TBL0_Q_SEL CSR_BIT(4)
+#define FBNIC_RPC_ACT_TBL0_Q_ID CSR_GENMASK(15, 8)
#define FBNIC_RPC_ACT_TBL0_DMA_HINT CSR_GENMASK(24, 16)
#define FBNIC_RPC_ACT_TBL0_TS_ENA CSR_BIT(28)
+#define FBNIC_RPC_ACT_TBL0_ACT_TBL_IDX CSR_BIT(29)
#define FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID CSR_BIT(30)
#define FBNIC_RPC_ACT_TBL1_DEFAULT 0x0840b /* 0x2102c */
@@ -618,7 +698,37 @@ enum {
FBNIC_RPC_RSS_KEY_DWORD_LEN * 32 - \
FBNIC_RPC_RSS_KEY_BIT_LEN)
+#define FBNIC_RPC_CNTR_TCP_OPT_ERR 0x0849e /* 0x21278 */
+#define FBNIC_RPC_CNTR_UNKN_ETYPE 0x0849f /* 0x2127c */
+#define FBNIC_RPC_CNTR_IPV4_FRAG 0x084a0 /* 0x21280 */
+#define FBNIC_RPC_CNTR_IPV6_FRAG 0x084a1 /* 0x21284 */
+#define FBNIC_RPC_CNTR_IPV4_ESP 0x084a2 /* 0x21288 */
+#define FBNIC_RPC_CNTR_IPV6_ESP 0x084a3 /* 0x2128c */
+#define FBNIC_RPC_CNTR_UNKN_EXT_HDR 0x084a4 /* 0x21290 */
+#define FBNIC_RPC_CNTR_OUT_OF_HDR_ERR 0x084a5 /* 0x21294 */
+#define FBNIC_RPC_CNTR_OVR_SIZE_ERR 0x084a6 /* 0x21298 */
+
#define FBNIC_RPC_TCAM_MACDA_VALIDATE 0x0852d /* 0x214b4 */
+#define FBNIC_RPC_STAT_RX_PACKET_1519_2047_BYTES_L \
+ 0x0855f /* 0x2157c */
+#define FBNIC_RPC_STAT_RX_PACKET_1519_2047_BYTES_H \
+ 0x08560 /* 0x21580 */
+#define FBNIC_RPC_STAT_RX_PACKET_2048_4095_BYTES_L \
+ 0x08561 /* 0x21584 */
+#define FBNIC_RPC_STAT_RX_PACKET_2048_4095_BYTES_H \
+ 0x08562 /* 0x21588 */
+#define FBNIC_RPC_STAT_RX_PACKET_4096_8191_BYTES_L \
+ 0x08563 /* 0x2158c */
+#define FBNIC_RPC_STAT_RX_PACKET_4096_8191_BYTES_H \
+ 0x08564 /* 0x21590 */
+#define FBNIC_RPC_STAT_RX_PACKET_8192_9216_BYTES_L \
+ 0x08565 /* 0x21594 */
+#define FBNIC_RPC_STAT_RX_PACKET_8192_9216_BYTES_H \
+ 0x08566 /* 0x21598 */
+#define FBNIC_RPC_STAT_RX_PACKET_9217_MAX_BYTES_L \
+ 0x08567 /* 0x2159c */
+#define FBNIC_RPC_STAT_RX_PACKET_9217_MAX_BYTES_H \
+ 0x08568 /* 0x215a0 */
#define FBNIC_CSR_END_RPC 0x0856b /* CSR section delimiter */
/* RPC RAM Registers */
@@ -645,6 +755,18 @@ enum {
#define FBNIC_RPC_TCAM_MACDA_VALUE CSR_GENMASK(15, 0)
#define FBNIC_RPC_TCAM_MACDA_MASK CSR_GENMASK(31, 16)
+#define FBNIC_RPC_TCAM_OUTER_IPSRC(m, n)\
+ (0x08c00 + 0x08 * (n) + (m)) /* 0x023000 + 32*n + 4*m */
+#define FBNIC_RPC_TCAM_IP_ADDR_VALUE CSR_GENMASK(15, 0)
+#define FBNIC_RPC_TCAM_IP_ADDR_MASK CSR_GENMASK(31, 16)
+
+#define FBNIC_RPC_TCAM_OUTER_IPDST(m, n)\
+ (0x08c48 + 0x08 * (n) + (m)) /* 0x023120 + 32*n + 4*m */
+#define FBNIC_RPC_TCAM_IPSRC(m, n)\
+ (0x08c90 + 0x08 * (n) + (m)) /* 0x023240 + 32*n + 4*m */
+#define FBNIC_RPC_TCAM_IPDST(m, n)\
+ (0x08cd8 + 0x08 * (n) + (m)) /* 0x023360 + 32*n + 4*m */
+
#define FBNIC_RPC_RSS_TBL(n, m) \
(0x08d20 + 0x100 * (n) + (m)) /* 0x023480 + 1024*n + 4*m */
#define FBNIC_RPC_RSS_TBL_COUNT 2
@@ -663,6 +785,30 @@ enum {
#define FBNIC_MASTER_SPARE_0 0x0C41B /* 0x3106c */
#define FBNIC_CSR_END_MASTER 0x0C452 /* CSR section delimiter */
+/* MAC PCS registers */
+#define FBNIC_CSR_START_PCS 0x10000 /* CSR section delimiter */
+#define FBNIC_PCS_PAGE(n) (0x10000 + 0x400 * (n)) /* 0x40000 + 1024*n */
+#define FBNIC_PCS(reg, n) ((reg) + FBNIC_PCS_PAGE(n))
+#define FBNIC_CSR_END_PCS 0x10668 /* CSR section delimiter */
+
+#define FBNIC_CSR_START_RSFEC 0x10800 /* CSR section delimiter */
+
+/* We have 4 RSFEC engines present in our part, however we are only using 1.
+ * As such only CCW(0) and NCCW(0) will never be non-zero and the other
+ * registers can be ignored.
+ */
+#define FBNIC_RSFEC_CCW_LO(n) (0x10802 + 8 * (n)) /* 0x42008 + 32*n */
+#define FBNIC_RSFEC_CCW_HI(n) (0x10803 + 8 * (n)) /* 0x4200c + 32*n */
+#define FBNIC_RSFEC_NCCW_LO(n) (0x10804 + 8 * (n)) /* 0x42010 + 32*n */
+#define FBNIC_RSFEC_NCCW_HI(n) (0x10805 + 8 * (n)) /* 0x42014 + 32*n */
+
+#define FBNIC_PCS_MAX_LANES 4
+#define FBNIC_PCS_SYMBLERR_LO(n) \
+ (0x10880 + 2 * (n)) /* 0x42200 + 8*n */
+#define FBNIC_PCS_SYMBLERR_HI(n) \
+ (0x10881 + 2 * (n)) /* 0x42204 + 8*n */
+#define FBNIC_CSR_END_RSFEC 0x108c8 /* CSR section delimiter */
+
/* MAC MAC registers (ASIC only) */
#define FBNIC_CSR_START_MAC_MAC 0x11000 /* CSR section delimiter */
#define FBNIC_MAC_COMMAND_CONFIG 0x11002 /* 0x44008 */
@@ -700,18 +846,18 @@ enum {
#define FBNIC_CSR_END_SIG 0x1184e /* CSR section delimiter */
#define FBNIC_CSR_START_MAC_STAT 0x11a00
+#define FBNIC_MAC_STAT_RX_XOFF_STB_L 0x11a00 /* 0x46800 */
+#define FBNIC_MAC_STAT_RX_XOFF_STB_H 0x11a01 /* 0x46804 */
+#define FBNIC_MAC_STAT_TX_XOFF_STB_L 0x11a04 /* 0x46810 */
+#define FBNIC_MAC_STAT_TX_XOFF_STB_H 0x11a05 /* 0x46814 */
#define FBNIC_MAC_STAT_RX_BYTE_COUNT_L 0x11a08 /* 0x46820 */
#define FBNIC_MAC_STAT_RX_BYTE_COUNT_H 0x11a09 /* 0x46824 */
-#define FBNIC_MAC_STAT_RX_ALIGN_ERROR_L \
- 0x11a0a /* 0x46828 */
-#define FBNIC_MAC_STAT_RX_ALIGN_ERROR_H \
- 0x11a0b /* 0x4682c */
+#define FBNIC_MAC_STAT_RX_ALIGN_ERROR_L 0x11a0a /* 0x46828 */
+#define FBNIC_MAC_STAT_RX_ALIGN_ERROR_H 0x11a0b /* 0x4682c */
#define FBNIC_MAC_STAT_RX_TOOLONG_L 0x11a0e /* 0x46838 */
#define FBNIC_MAC_STAT_RX_TOOLONG_H 0x11a0f /* 0x4683c */
-#define FBNIC_MAC_STAT_RX_RECEIVED_OK_L \
- 0x11a12 /* 0x46848 */
-#define FBNIC_MAC_STAT_RX_RECEIVED_OK_H \
- 0x11a13 /* 0x4684c */
+#define FBNIC_MAC_STAT_RX_RECEIVED_OK_L 0x11a12 /* 0x46848 */
+#define FBNIC_MAC_STAT_RX_RECEIVED_OK_H 0x11a13 /* 0x4684c */
#define FBNIC_MAC_STAT_RX_PACKET_BAD_FCS_L \
0x11a14 /* 0x46850 */
#define FBNIC_MAC_STAT_RX_PACKET_BAD_FCS_H \
@@ -722,27 +868,138 @@ enum {
#define FBNIC_MAC_STAT_RX_MULTICAST_H 0x11a1d /* 0x46874 */
#define FBNIC_MAC_STAT_RX_BROADCAST_L 0x11a1e /* 0x46878 */
#define FBNIC_MAC_STAT_RX_BROADCAST_H 0x11a1f /* 0x4687c */
+#define FBNIC_MAC_STAT_RX_UNDERSIZE_L 0x11a24 /* 0x46890 */
+#define FBNIC_MAC_STAT_RX_UNDERSIZE_H 0x11a25 /* 0x46894 */
+#define FBNIC_MAC_STAT_RX_PACKET_64_BYTES_L \
+ 0x11a26 /* 0x46898 */
+#define FBNIC_MAC_STAT_RX_PACKET_64_BYTES_H \
+ 0x11a27 /* 0x4689c */
+#define FBNIC_MAC_STAT_RX_PACKET_65_127_BYTES_L \
+ 0x11a28 /* 0x468a0 */
+#define FBNIC_MAC_STAT_RX_PACKET_65_127_BYTES_H \
+ 0x11a29 /* 0x468a4 */
+#define FBNIC_MAC_STAT_RX_PACKET_128_255_BYTES_L \
+ 0x11a2a /* 0x468a8 */
+#define FBNIC_MAC_STAT_RX_PACKET_128_255_BYTES_H \
+ 0x11a2b /* 0x468ac */
+#define FBNIC_MAC_STAT_RX_PACKET_256_511_BYTES_L \
+ 0x11a2c /* 0x468b0 */
+#define FBNIC_MAC_STAT_RX_PACKET_256_511_BYTES_H \
+ 0x11a2d /* 0x468b4 */
+#define FBNIC_MAC_STAT_RX_PACKET_512_1023_BYTES_L \
+ 0x11a2e /* 0x468b8 */
+#define FBNIC_MAC_STAT_RX_PACKET_512_1023_BYTES_H \
+ 0x11a2f /* 0x468bc */
+#define FBNIC_MAC_STAT_RX_PACKET_1024_1518_BYTES_L \
+ 0x11a30 /* 0x468c0 */
+#define FBNIC_MAC_STAT_RX_PACKET_1024_1518_BYTES_H \
+ 0x11a31 /* 0x468c4 */
+#define FBNIC_MAC_STAT_RX_PACKET_1519_MAX_BYTES_L \
+ 0x11a32 /* 0x468c8 */
+#define FBNIC_MAC_STAT_RX_PACKET_1519_MAX_BYTES_H \
+ 0x11a33 /* 0x468cc */
+#define FBNIC_MAC_STAT_RX_OVERSIZE_L 0x11a34 /* 0x468d0 */
+#define FBNIC_MAC_STAT_RX_OVERSSIZE_H 0x11a35 /* 0x468d4 */
+#define FBNIC_MAC_STAT_RX_JABBER_L 0x11a36 /* 0x468d8 */
+#define FBNIC_MAC_STAT_RX_JABBER_H 0x11a37 /* 0x468dc */
+#define FBNIC_MAC_STAT_RX_FRAGMENT_L 0x11a38 /* 0x468e0 */
+#define FBNIC_MAC_STAT_RX_FRAGMENT_H 0x11a39 /* 0x468e4 */
+#define FBNIC_MAC_STAT_RX_CONTROL_FRAMES_L \
+ 0x11a3c /* 0x468f0 */
+#define FBNIC_MAC_STAT_RX_CONTROL_FRAMES_H \
+ 0x11a3d /* 0x468f4 */
#define FBNIC_MAC_STAT_TX_BYTE_COUNT_L 0x11a3e /* 0x468f8 */
#define FBNIC_MAC_STAT_TX_BYTE_COUNT_H 0x11a3f /* 0x468fc */
#define FBNIC_MAC_STAT_TX_TRANSMITTED_OK_L \
0x11a42 /* 0x46908 */
#define FBNIC_MAC_STAT_TX_TRANSMITTED_OK_H \
0x11a43 /* 0x4690c */
-#define FBNIC_MAC_STAT_TX_IFOUTERRORS_L \
- 0x11a46 /* 0x46918 */
-#define FBNIC_MAC_STAT_TX_IFOUTERRORS_H \
- 0x11a47 /* 0x4691c */
+#define FBNIC_MAC_STAT_TX_IFOUTERRORS_L 0x11a46 /* 0x46918 */
+#define FBNIC_MAC_STAT_TX_IFOUTERRORS_H 0x11a47 /* 0x4691c */
#define FBNIC_MAC_STAT_TX_MULTICAST_L 0x11a4a /* 0x46928 */
#define FBNIC_MAC_STAT_TX_MULTICAST_H 0x11a4b /* 0x4692c */
#define FBNIC_MAC_STAT_TX_BROADCAST_L 0x11a4c /* 0x46930 */
#define FBNIC_MAC_STAT_TX_BROADCAST_H 0x11a4d /* 0x46934 */
+#define FBNIC_MAC_STAT_TX_PACKET_64_BYTES_L \
+ 0x11a4e /* 0x46938 */
+#define FBNIC_MAC_STAT_TX_PACKET_64_BYTES_H \
+ 0x11a4f /* 0x4693c */
+#define FBNIC_MAC_STAT_TX_PACKET_65_127_BYTES_L \
+ 0x11a50 /* 0x46940 */
+#define FBNIC_MAC_STAT_TX_PACKET_65_127_BYTES_H \
+ 0x11a51 /* 0x46944 */
+#define FBNIC_MAC_STAT_TX_PACKET_128_255_BYTES_L \
+ 0x11a52 /* 0x46948 */
+#define FBNIC_MAC_STAT_TX_PACKET_128_255_BYTES_H \
+ 0x11a53 /* 0x4694c */
+#define FBNIC_MAC_STAT_TX_PACKET_256_511_BYTES_L \
+ 0x11a54 /* 0x46950 */
+#define FBNIC_MAC_STAT_TX_PACKET_256_511_BYTES_H \
+ 0x11a55 /* 0x46954 */
+#define FBNIC_MAC_STAT_TX_PACKET_512_1023_BYTES_L \
+ 0x11a56 /* 0x46958 */
+#define FBNIC_MAC_STAT_TX_PACKET_512_1023_BYTES_H \
+ 0x11a57 /* 0x4695c */
+#define FBNIC_MAC_STAT_TX_PACKET_1024_1518_BYTES_L \
+ 0x11a58 /* 0x46960 */
+#define FBNIC_MAC_STAT_TX_PACKET_1024_1518_BYTES_H \
+ 0x11a59 /* 0x46964 */
+#define FBNIC_MAC_STAT_TX_PACKET_1519_MAX_BYTES_L \
+ 0x11a5a /* 0x46968 */
+#define FBNIC_MAC_STAT_TX_PACKET_1519_MAX_BYTES_H \
+ 0x11a5b /* 0x4696c */
+#define FBNIC_MAC_STAT_TX_CONTROL_FRAMES_L \
+ 0x11a5e /* 0x46978 */
+#define FBNIC_MAC_STAT_TX_CONTROL_FRAMES_H \
+ 0x11a5f /* 0x4697c */
+
+/* PCIE Comphy Registers */
+#define FBNIC_CSR_START_PCIE_SS_COMPHY 0x2442e /* CSR section delimiter */
+#define FBNIC_CSR_END_PCIE_SS_COMPHY 0x279d7 /* CSR section delimiter */
+
/* PUL User Registers */
#define FBNIC_CSR_START_PUL_USER 0x31000 /* CSR section delimiter */
#define FBNIC_PUL_OB_TLP_HDR_AW_CFG 0x3103d /* 0xc40f4 */
+#define FBNIC_PUL_OB_TLP_HDR_AW_CFG_FLUSH CSR_BIT(19)
#define FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME CSR_BIT(18)
#define FBNIC_PUL_OB_TLP_HDR_AR_CFG 0x3103e /* 0xc40f8 */
+#define FBNIC_PUL_OB_TLP_HDR_AR_CFG_FLUSH CSR_BIT(19)
#define FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME CSR_BIT(18)
-#define FBNIC_CSR_END_PUL_USER 0x31080 /* CSR section delimiter */
+#define FBNIC_PUL_USER_OB_RD_TLP_CNT_31_0 \
+ 0x3106e /* 0xc41b8 */
+#define FBNIC_PUL_USER_OB_RD_DWORD_CNT_31_0 \
+ 0x31070 /* 0xc41c0 */
+#define FBNIC_PUL_USER_OB_RD_DWORD_CNT_63_32 \
+ 0x31071 /* 0xc41c4 */
+#define FBNIC_PUL_USER_OB_WR_TLP_CNT_31_0 \
+ 0x31072 /* 0xc41c8 */
+#define FBNIC_PUL_USER_OB_WR_TLP_CNT_63_32 \
+ 0x31073 /* 0xc41cc */
+#define FBNIC_PUL_USER_OB_WR_DWORD_CNT_31_0 \
+ 0x31074 /* 0xc41d0 */
+#define FBNIC_PUL_USER_OB_WR_DWORD_CNT_63_32 \
+ 0x31075 /* 0xc41d4 */
+#define FBNIC_PUL_USER_OB_CPL_TLP_CNT_31_0 \
+ 0x31076 /* 0xc41d8 */
+#define FBNIC_PUL_USER_OB_CPL_TLP_CNT_63_32 \
+ 0x31077 /* 0xc41dc */
+#define FBNIC_PUL_USER_OB_CPL_DWORD_CNT_31_0 \
+ 0x31078 /* 0xc41e0 */
+#define FBNIC_PUL_USER_OB_CPL_DWORD_CNT_63_32 \
+ 0x31079 /* 0xc41e4 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_31_0 \
+ 0x3107a /* 0xc41e8 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_63_32 \
+ 0x3107b /* 0xc41ec */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_31_0 \
+ 0x3107c /* 0xc41f0 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_63_32 \
+ 0x3107d /* 0xc41f4 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_31_0 \
+ 0x3107e /* 0xc41f8 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_63_32 \
+ 0x3107f /* 0xc41fc */
+#define FBNIC_CSR_END_PUL_USER 0x310ea /* CSR section delimiter */
/* Queue Registers
*
@@ -773,6 +1030,12 @@ enum {
#define FBNIC_QUEUE_TWQ1_BAL 0x022 /* 0x088 */
#define FBNIC_QUEUE_TWQ1_BAH 0x023 /* 0x08c */
+/* Tx Work Queue Statistics Registers */
+#define FBNIC_QUEUE_TWQ0_PKT_CNT 0x062 /* 0x188 */
+#define FBNIC_QUEUE_TWQ0_ERR_CNT 0x063 /* 0x18c */
+#define FBNIC_QUEUE_TWQ1_PKT_CNT 0x072 /* 0x1c8 */
+#define FBNIC_QUEUE_TWQ1_ERR_CNT 0x073 /* 0x1cc */
+
/* Tx Completion Queue Registers */
#define FBNIC_QUEUE_TCQ_CTL 0x080 /* 0x200 */
#define FBNIC_QUEUE_TCQ_CTL_RESET CSR_BIT(0)
@@ -862,6 +1125,12 @@ enum {
FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_RSS = 2,
};
+/* Rx Per CQ Statistics Counters */
+#define FBNIC_QUEUE_RDE_PKT_CNT 0x2a2 /* 0xa88 */
+#define FBNIC_QUEUE_RDE_PKT_ERR_CNT 0x2a3 /* 0xa8c */
+#define FBNIC_QUEUE_RDE_CQ_DROP_CNT 0x2a4 /* 0xa90 */
+#define FBNIC_QUEUE_RDE_BDQ_DROP_CNT 0x2a5 /* 0xa94 */
+
/* Rx Interrupt Manager Registers */
#define FBNIC_QUEUE_RIM_CTL 0x2c0 /* 0xb00 */
#define FBNIC_QUEUE_RIM_CTL_MSIX_MASK CSR_GENMASK(7, 0)
@@ -911,4 +1180,22 @@ enum {
#define FBNIC_IPC_MBX_DESC_FW_CMPL DESC_BIT(1)
#define FBNIC_IPC_MBX_DESC_HOST_CMPL DESC_BIT(0)
+/* OTP Registers
+ * These registers are accessible via bar4 offset and are written by CMRT
+ * on boot. For the write status, the register is broken up in half with OTP
+ * Write Data Status occupying the top 16 bits and the ECC status occupying the
+ * bottom 16 bits.
+ */
+#define FBNIC_NS_OTP_STATUS 0x0021d
+#define FBNIC_NS_OTP_WRITE_STATUS 0x0021e
+
+#define FBNIC_NS_OTP_WRITE_DATA_STATUS_MASK CSR_GENMASK(31, 16)
+#define FBNIC_NS_OTP_WRITE_ECC_STATUS_MASK CSR_GENMASK(15, 0)
+
+#define FBNIC_REGS_VERSION CSR_GENMASK(31, 16)
+#define FBNIC_REGS_HW_TYPE CSR_GENMASK(15, 8)
+enum{
+ FBNIC_CSR_VERSION_V1_0_ASIC = 1,
+};
+
#endif /* _FBNIC_CSR_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c b/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
new file mode 100644
index 000000000000..b7238dd967fe
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/debugfs.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+#include <linux/seq_file.h>
+
+#include "fbnic.h"
+
+static struct dentry *fbnic_dbg_root;
+
+static void fbnic_dbg_desc_break(struct seq_file *s, int i)
+{
+ while (i--)
+ seq_putc(s, '-');
+
+ seq_putc(s, '\n');
+}
+
+static int fbnic_dbg_mac_addr_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+ char hdr[80];
+ int i;
+
+ /* Generate Header */
+ snprintf(hdr, sizeof(hdr), "%3s %s %-17s %s\n",
+ "Idx", "S", "TCAM Bitmap", "Addr/Mask");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ for (i = 0; i < FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES; i++) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ seq_printf(s, "%02d %d %64pb %pm\n",
+ i, mac_addr->state, mac_addr->act_tcam,
+ mac_addr->value.addr8);
+ seq_printf(s, " %pm\n",
+ mac_addr->mask.addr8);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_mac_addr);
+
+static int fbnic_dbg_tce_tcam_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+ int i, tcam_idx = 0;
+ char hdr[80];
+
+ /* Generate Header */
+ snprintf(hdr, sizeof(hdr), "%3s %s %-17s %s\n",
+ "Idx", "S", "TCAM Bitmap", "Addr/Mask");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ for (i = 0; i < ARRAY_SIZE(fbd->mac_addr); i++) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ /* Verify BMC bit is set */
+ if (!test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam))
+ continue;
+
+ if (tcam_idx == FBNIC_TCE_TCAM_NUM_ENTRIES)
+ break;
+
+ seq_printf(s, "%02d %d %64pb %pm\n",
+ tcam_idx, mac_addr->state, mac_addr->act_tcam,
+ mac_addr->value.addr8);
+ seq_printf(s, " %pm\n",
+ mac_addr->mask.addr8);
+ tcam_idx++;
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_tce_tcam);
+
+static int fbnic_dbg_act_tcam_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+ char hdr[80];
+ int i;
+
+ /* Generate Header */
+ snprintf(hdr, sizeof(hdr), "%3s %s %-55s %-4s %s\n",
+ "Idx", "S", "Value/Mask", "RSS", "Dest");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ for (i = 0; i < FBNIC_RPC_TCAM_ACT_NUM_ENTRIES; i++) {
+ struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[i];
+
+ seq_printf(s, "%02d %d %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %08x\n",
+ i, act_tcam->state,
+ act_tcam->value.tcam[10], act_tcam->value.tcam[9],
+ act_tcam->value.tcam[8], act_tcam->value.tcam[7],
+ act_tcam->value.tcam[6], act_tcam->value.tcam[5],
+ act_tcam->value.tcam[4], act_tcam->value.tcam[3],
+ act_tcam->value.tcam[2], act_tcam->value.tcam[1],
+ act_tcam->value.tcam[0], act_tcam->rss_en_mask,
+ act_tcam->dest);
+ seq_printf(s, " %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
+ act_tcam->mask.tcam[10], act_tcam->mask.tcam[9],
+ act_tcam->mask.tcam[8], act_tcam->mask.tcam[7],
+ act_tcam->mask.tcam[6], act_tcam->mask.tcam[5],
+ act_tcam->mask.tcam[4], act_tcam->mask.tcam[3],
+ act_tcam->mask.tcam[2], act_tcam->mask.tcam[1],
+ act_tcam->mask.tcam[0]);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_act_tcam);
+
+static int fbnic_dbg_ip_addr_show(struct seq_file *s,
+ struct fbnic_ip_addr *ip_addr)
+{
+ char hdr[80];
+ int i;
+
+ /* Generate Header */
+ snprintf(hdr, sizeof(hdr), "%3s %s %-17s %s %s\n",
+ "Idx", "S", "TCAM Bitmap", "V", "Addr/Mask");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES; i++, ip_addr++) {
+ seq_printf(s, "%02d %d %64pb %d %pi6\n",
+ i, ip_addr->state, ip_addr->act_tcam,
+ ip_addr->version, &ip_addr->value);
+ seq_printf(s, " %pi6\n",
+ &ip_addr->mask);
+ }
+
+ return 0;
+}
+
+static int fbnic_dbg_ip_src_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ return fbnic_dbg_ip_addr_show(s, fbd->ip_src);
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ip_src);
+
+static int fbnic_dbg_ip_dst_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ return fbnic_dbg_ip_addr_show(s, fbd->ip_dst);
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ip_dst);
+
+static int fbnic_dbg_ipo_src_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ return fbnic_dbg_ip_addr_show(s, fbd->ipo_src);
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ipo_src);
+
+static int fbnic_dbg_ipo_dst_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ return fbnic_dbg_ip_addr_show(s, fbd->ipo_dst);
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ipo_dst);
+
+static int fbnic_dbg_fw_log_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+ struct fbnic_fw_log_entry *entry;
+ unsigned long flags;
+
+ if (!fbnic_fw_log_ready(fbd))
+ return -ENXIO;
+
+ spin_lock_irqsave(&fbd->fw_log.lock, flags);
+
+ list_for_each_entry_reverse(entry, &fbd->fw_log.entries, list) {
+ seq_printf(s, FBNIC_FW_LOG_FMT, entry->index,
+ (entry->timestamp / (MSEC_PER_SEC * 60 * 60 * 24)),
+ (entry->timestamp / (MSEC_PER_SEC * 60 * 60)) % 24,
+ ((entry->timestamp / (MSEC_PER_SEC * 60) % 60)),
+ ((entry->timestamp / MSEC_PER_SEC) % 60),
+ (entry->timestamp % MSEC_PER_SEC),
+ entry->msg);
+ }
+
+ spin_unlock_irqrestore(&fbd->fw_log.lock, flags);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_fw_log);
+
+static int fbnic_dbg_pcie_stats_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ rtnl_lock();
+ fbnic_get_hw_stats(fbd);
+
+ seq_printf(s, "ob_rd_tlp: %llu\n", fbd->hw_stats.pcie.ob_rd_tlp.value);
+ seq_printf(s, "ob_rd_dword: %llu\n",
+ fbd->hw_stats.pcie.ob_rd_dword.value);
+ seq_printf(s, "ob_wr_tlp: %llu\n", fbd->hw_stats.pcie.ob_wr_tlp.value);
+ seq_printf(s, "ob_wr_dword: %llu\n",
+ fbd->hw_stats.pcie.ob_wr_dword.value);
+ seq_printf(s, "ob_cpl_tlp: %llu\n",
+ fbd->hw_stats.pcie.ob_cpl_tlp.value);
+ seq_printf(s, "ob_cpl_dword: %llu\n",
+ fbd->hw_stats.pcie.ob_cpl_dword.value);
+ seq_printf(s, "ob_rd_no_tag: %llu\n",
+ fbd->hw_stats.pcie.ob_rd_no_tag.value);
+ seq_printf(s, "ob_rd_no_cpl_cred: %llu\n",
+ fbd->hw_stats.pcie.ob_rd_no_cpl_cred.value);
+ seq_printf(s, "ob_rd_no_np_cred: %llu\n",
+ fbd->hw_stats.pcie.ob_rd_no_np_cred.value);
+ rtnl_unlock();
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_pcie_stats);
+
+void fbnic_dbg_fbd_init(struct fbnic_dev *fbd)
+{
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ const char *name = pci_name(pdev);
+
+ fbd->dbg_fbd = debugfs_create_dir(name, fbnic_dbg_root);
+ debugfs_create_file("pcie_stats", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_pcie_stats_fops);
+ debugfs_create_file("mac_addr", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_mac_addr_fops);
+ debugfs_create_file("tce_tcam", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_tce_tcam_fops);
+ debugfs_create_file("act_tcam", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_act_tcam_fops);
+ debugfs_create_file("ip_src", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_ip_src_fops);
+ debugfs_create_file("ip_dst", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_ip_dst_fops);
+ debugfs_create_file("ipo_src", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_ipo_src_fops);
+ debugfs_create_file("ipo_dst", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_ipo_dst_fops);
+ debugfs_create_file("fw_log", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_fw_log_fops);
+}
+
+void fbnic_dbg_fbd_exit(struct fbnic_dev *fbd)
+{
+ debugfs_remove_recursive(fbd->dbg_fbd);
+ fbd->dbg_fbd = NULL;
+}
+
+void fbnic_dbg_init(void)
+{
+ fbnic_dbg_root = debugfs_create_dir(fbnic_driver_name, NULL);
+}
+
+void fbnic_dbg_exit(void)
+{
+ debugfs_remove_recursive(fbnic_dbg_root);
+ fbnic_dbg_root = NULL;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
index 0072d612215e..b62b1d5b1453 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
@@ -3,10 +3,13 @@
#include <linux/unaligned.h>
#include <linux/pci.h>
+#include <linux/pldmfw.h>
#include <linux/types.h>
#include <net/devlink.h>
#include "fbnic.h"
+#include "fbnic_fw.h"
+#include "fbnic_tlv.h"
#define FBNIC_SN_STR_LEN 24
@@ -109,10 +112,512 @@ static int fbnic_devlink_info_get(struct devlink *devlink,
return 0;
}
+static bool
+fbnic_pldm_match_record(struct pldmfw *context, struct pldmfw_record *record)
+{
+ struct pldmfw_desc_tlv *desc;
+ u32 anti_rollback_ver = 0;
+ struct devlink *devlink;
+ struct fbnic_dev *fbd;
+ struct pci_dev *pdev;
+
+ /* First, use the standard PCI matching function */
+ if (!pldmfw_op_pci_match_record(context, record))
+ return false;
+
+ pdev = to_pci_dev(context->dev);
+ fbd = pci_get_drvdata(pdev);
+ devlink = priv_to_devlink(fbd);
+
+ /* If PCI match is successful, check for vendor-specific descriptors */
+ list_for_each_entry(desc, &record->descs, entry) {
+ if (desc->type != PLDM_DESC_ID_VENDOR_DEFINED)
+ continue;
+
+ if (desc->size < 21 || desc->data[0] != 1 ||
+ desc->data[1] != 15)
+ continue;
+
+ if (memcmp(desc->data + 2, "AntiRollbackVer", 15) != 0)
+ continue;
+
+ anti_rollback_ver = get_unaligned_le32(desc->data + 17);
+ break;
+ }
+
+ /* Compare versions and return error if they do not match */
+ if (anti_rollback_ver < fbd->fw_cap.anti_rollback_version) {
+ char buf[128];
+
+ snprintf(buf, sizeof(buf),
+ "New firmware anti-rollback version (0x%x) is older than device version (0x%x)!",
+ anti_rollback_ver, fbd->fw_cap.anti_rollback_version);
+ devlink_flash_update_status_notify(devlink, buf,
+ "Anti-Rollback", 0, 0);
+
+ return false;
+ }
+
+ return true;
+}
+
+static int
+fbnic_flash_start(struct fbnic_dev *fbd, struct pldmfw_component *component)
+{
+ struct fbnic_fw_completion *cmpl;
+ int err;
+
+ cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ);
+ if (!cmpl)
+ return -ENOMEM;
+
+ err = fbnic_fw_xmit_fw_start_upgrade(fbd, cmpl,
+ component->identifier,
+ component->component_size);
+ if (err)
+ goto cmpl_free;
+
+ /* Wait for firmware to ack firmware upgrade start */
+ if (wait_for_completion_timeout(&cmpl->done, 10 * HZ))
+ err = cmpl->result;
+ else
+ err = -ETIMEDOUT;
+
+ fbnic_mbx_clear_cmpl(fbd, cmpl);
+cmpl_free:
+ fbnic_fw_put_cmpl(cmpl);
+
+ return err;
+}
+
+static int
+fbnic_flash_component(struct pldmfw *context,
+ struct pldmfw_component *component)
+{
+ const u8 *data = component->component_data;
+ const u32 size = component->component_size;
+ struct fbnic_fw_completion *cmpl;
+ const char *component_name;
+ struct devlink *devlink;
+ struct fbnic_dev *fbd;
+ struct pci_dev *pdev;
+ u32 offset = 0;
+ u32 length = 0;
+ char buf[32];
+ int err;
+
+ pdev = to_pci_dev(context->dev);
+ fbd = pci_get_drvdata(pdev);
+ devlink = priv_to_devlink(fbd);
+
+ switch (component->identifier) {
+ case QSPI_SECTION_CMRT:
+ component_name = "boot1";
+ break;
+ case QSPI_SECTION_CONTROL_FW:
+ component_name = "boot2";
+ break;
+ case QSPI_SECTION_OPTION_ROM:
+ component_name = "option-rom";
+ break;
+ default:
+ snprintf(buf, sizeof(buf), "Unknown component ID %u!",
+ component->identifier);
+ devlink_flash_update_status_notify(devlink, buf, NULL, 0,
+ size);
+ return -EINVAL;
+ }
+
+ /* Once firmware receives the request to start upgrading it responds
+ * with two messages:
+ * 1. An ACK that it received the message and possible error code
+ * indicating that an upgrade is not currently possible.
+ * 2. A request for the first chunk of data
+ *
+ * Setup completions for write before issuing the start message so
+ * the driver can catch both messages.
+ */
+ cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ);
+ if (!cmpl)
+ return -ENOMEM;
+
+ err = fbnic_mbx_set_cmpl(fbd, cmpl);
+ if (err)
+ goto cmpl_free;
+
+ devlink_flash_update_timeout_notify(devlink, "Initializing",
+ component_name, 15);
+ err = fbnic_flash_start(fbd, component);
+ if (err)
+ goto err_no_msg;
+
+ while (offset < size) {
+ if (!wait_for_completion_timeout(&cmpl->done, 15 * HZ)) {
+ err = -ETIMEDOUT;
+ break;
+ }
+
+ err = cmpl->result;
+ if (err)
+ break;
+
+ /* Verify firmware is requesting the next chunk in the seq. */
+ if (cmpl->u.fw_update.offset != offset + length) {
+ err = -EFAULT;
+ break;
+ }
+
+ offset = cmpl->u.fw_update.offset;
+ length = cmpl->u.fw_update.length;
+
+ if (length > TLV_MAX_DATA || offset + length > size) {
+ err = -EFAULT;
+ break;
+ }
+
+ devlink_flash_update_status_notify(devlink, "Flashing",
+ component_name,
+ offset, size);
+
+ /* Mailbox will set length to 0 once it receives the finish
+ * message.
+ */
+ if (!length)
+ continue;
+
+ reinit_completion(&cmpl->done);
+ err = fbnic_fw_xmit_fw_write_chunk(fbd, data, offset, length,
+ 0);
+ if (err)
+ break;
+ }
+
+ if (err) {
+ fbnic_fw_xmit_fw_write_chunk(fbd, NULL, 0, 0, err);
+err_no_msg:
+ snprintf(buf, sizeof(buf), "Mailbox encountered error %d!",
+ err);
+ devlink_flash_update_status_notify(devlink, buf,
+ component_name, 0, 0);
+ }
+
+ fbnic_mbx_clear_cmpl(fbd, cmpl);
+cmpl_free:
+ fbnic_fw_put_cmpl(cmpl);
+
+ return err;
+}
+
+static const struct pldmfw_ops fbnic_pldmfw_ops = {
+ .match_record = fbnic_pldm_match_record,
+ .flash_component = fbnic_flash_component,
+};
+
+static int
+fbnic_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_dev *fbd = devlink_priv(devlink);
+ const struct firmware *fw = params->fw;
+ struct device *dev = fbd->dev;
+ struct pldmfw context;
+ char *err_msg;
+ int err;
+
+ context.ops = &fbnic_pldmfw_ops;
+ context.dev = dev;
+
+ err = pldmfw_flash_image(&context, fw);
+ if (err) {
+ switch (err) {
+ case -EINVAL:
+ err_msg = "Invalid image";
+ break;
+ case -EOPNOTSUPP:
+ err_msg = "Unsupported image";
+ break;
+ case -ENOMEM:
+ err_msg = "Out of memory";
+ break;
+ case -EFAULT:
+ err_msg = "Invalid header";
+ break;
+ case -ENOENT:
+ err_msg = "No matching record";
+ break;
+ case -ENODEV:
+ err_msg = "No matching device";
+ break;
+ case -ETIMEDOUT:
+ err_msg = "Timed out waiting for reply";
+ break;
+ default:
+ err_msg = "Unknown error";
+ break;
+ }
+
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Failed to flash PLDM Image: %s (error: %d)",
+ err_msg, err);
+ }
+
+ return err;
+}
+
static const struct devlink_ops fbnic_devlink_ops = {
- .info_get = fbnic_devlink_info_get,
+ .info_get = fbnic_devlink_info_get,
+ .flash_update = fbnic_devlink_flash_update,
};
+static int fbnic_fw_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_dev *fbd = devlink_health_reporter_priv(reporter);
+ u32 offset, index, index_count, length, size;
+ struct fbnic_fw_completion *fw_cmpl;
+ u8 *dump_data, **data;
+ int err;
+
+ fw_cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_COREDUMP_GET_INFO_RESP);
+ if (!fw_cmpl)
+ return -ENOMEM;
+
+ err = fbnic_fw_xmit_coredump_info_msg(fbd, fw_cmpl, true);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to transmit core dump info msg");
+ goto cmpl_free;
+ }
+ if (!wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Timed out waiting on core dump info");
+ err = -ETIMEDOUT;
+ goto cmpl_cleanup;
+ }
+
+ size = fw_cmpl->u.coredump_info.size;
+ err = fw_cmpl->result;
+
+ fbnic_mbx_clear_cmpl(fbd, fw_cmpl);
+ fbnic_fw_put_cmpl(fw_cmpl);
+
+ /* Handle error returned by firmware */
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware core dump returned error");
+ return err;
+ }
+ if (!size) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Firmware core dump returned size 0");
+ return -EIO;
+ }
+
+ /* Read the dump, we can only transfer TLV_MAX_DATA at a time */
+ index_count = DIV_ROUND_UP(size, TLV_MAX_DATA);
+
+ fw_cmpl = __fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_COREDUMP_READ_RESP,
+ sizeof(void *) * index_count + size);
+ if (!fw_cmpl)
+ return -ENOMEM;
+
+ /* Populate pointer table w/ pointer offsets */
+ dump_data = (void *)&fw_cmpl->u.coredump.data[index_count];
+ data = fw_cmpl->u.coredump.data;
+ fw_cmpl->u.coredump.size = size;
+ fw_cmpl->u.coredump.stride = TLV_MAX_DATA;
+
+ for (index = 0; index < index_count; index++) {
+ /* First iteration installs completion */
+ struct fbnic_fw_completion *cmpl_arg = index ? NULL : fw_cmpl;
+
+ offset = index * TLV_MAX_DATA;
+ length = min(size - offset, TLV_MAX_DATA);
+
+ data[index] = dump_data + offset;
+ err = fbnic_fw_xmit_coredump_read_msg(fbd, cmpl_arg,
+ offset, length);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to transmit core dump msg");
+ if (cmpl_arg)
+ goto cmpl_free;
+ else
+ goto cmpl_cleanup;
+ }
+
+ if (wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) {
+ reinit_completion(&fw_cmpl->done);
+ } else {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Timed out waiting on core dump (%d/%d)",
+ index + 1, index_count);
+ err = -ETIMEDOUT;
+ goto cmpl_cleanup;
+ }
+
+ /* If we didn't see the reply record as incomplete */
+ if (fw_cmpl->u.coredump.data[index]) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "No data for core dump chunk (%d/%d)",
+ index + 1, index_count);
+ err = -EIO;
+ goto cmpl_cleanup;
+ }
+ }
+
+ devlink_fmsg_binary_pair_nest_start(fmsg, "FW coredump");
+
+ for (offset = 0; offset < size; offset += length) {
+ length = min_t(u32, size - offset, TLV_MAX_DATA);
+
+ devlink_fmsg_binary_put(fmsg, dump_data + offset, length);
+ }
+
+ devlink_fmsg_binary_pair_nest_end(fmsg);
+
+cmpl_cleanup:
+ fbnic_mbx_clear_cmpl(fbd, fw_cmpl);
+cmpl_free:
+ fbnic_fw_put_cmpl(fw_cmpl);
+
+ return err;
+}
+
+static int
+fbnic_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_dev *fbd = devlink_health_reporter_priv(reporter);
+ u32 sec, msec;
+
+ /* Device is most likely down, we're not exchanging heartbeats */
+ if (!fbd->prev_firmware_time)
+ return 0;
+
+ sec = div_u64_rem(fbd->firmware_time, MSEC_PER_SEC, &msec);
+
+ devlink_fmsg_pair_nest_start(fmsg, "last_heartbeat");
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_pair_nest_start(fmsg, "fw_uptime");
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_u32_pair_put(fmsg, "sec", sec);
+ devlink_fmsg_u32_pair_put(fmsg, "msec", msec);
+ devlink_fmsg_obj_nest_end(fmsg);
+ devlink_fmsg_pair_nest_end(fmsg);
+ devlink_fmsg_obj_nest_end(fmsg);
+ devlink_fmsg_pair_nest_end(fmsg);
+
+ return 0;
+}
+
+void __printf(2, 3)
+fbnic_devlink_fw_report(struct fbnic_dev *fbd, const char *format, ...)
+{
+ char msg[FBNIC_FW_LOG_MAX_SIZE];
+ va_list args;
+
+ va_start(args, format);
+ vsnprintf(msg, FBNIC_FW_LOG_MAX_SIZE, format, args);
+ va_end(args);
+
+ devlink_health_report(fbd->fw_reporter, msg, fbd);
+ if (fbnic_fw_log_ready(fbd))
+ fbnic_fw_log_write(fbd, 0, fbd->firmware_time, msg);
+}
+
+static const struct devlink_health_reporter_ops fbnic_fw_ops = {
+ .name = "fw",
+ .dump = fbnic_fw_reporter_dump,
+ .diagnose = fbnic_fw_reporter_diagnose,
+};
+
+static u32 fbnic_read_otp_status(struct fbnic_dev *fbd)
+{
+ return fbnic_fw_rd32(fbd, FBNIC_NS_OTP_STATUS);
+}
+
+static int
+fbnic_otp_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_dev *fbd = devlink_health_reporter_priv(reporter);
+ u32 otp_status, otp_write_status, m;
+
+ otp_status = fbnic_read_otp_status(fbd);
+ otp_write_status = fbnic_fw_rd32(fbd, FBNIC_NS_OTP_WRITE_STATUS);
+
+ /* Dump OTP status */
+ devlink_fmsg_pair_nest_start(fmsg, "OTP");
+ devlink_fmsg_obj_nest_start(fmsg);
+
+ devlink_fmsg_u32_pair_put(fmsg, "Status", otp_status);
+
+ /* Extract OTP Write Data status */
+ m = FBNIC_NS_OTP_WRITE_DATA_STATUS_MASK;
+ devlink_fmsg_u32_pair_put(fmsg, "Data",
+ FIELD_GET(m, otp_write_status));
+
+ /* Extract OTP Write ECC status */
+ m = FBNIC_NS_OTP_WRITE_ECC_STATUS_MASK;
+ devlink_fmsg_u32_pair_put(fmsg, "ECC",
+ FIELD_GET(m, otp_write_status));
+
+ devlink_fmsg_obj_nest_end(fmsg);
+ devlink_fmsg_pair_nest_end(fmsg);
+
+ return 0;
+}
+
+void fbnic_devlink_otp_check(struct fbnic_dev *fbd, const char *msg)
+{
+ /* Check if there is anything to report */
+ if (!fbnic_read_otp_status(fbd))
+ return;
+
+ devlink_health_report(fbd->otp_reporter, msg, fbd);
+ if (fbnic_fw_log_ready(fbd))
+ fbnic_fw_log_write(fbd, 0, fbd->firmware_time, msg);
+}
+
+static const struct devlink_health_reporter_ops fbnic_otp_ops = {
+ .name = "otp",
+ .dump = fbnic_otp_reporter_dump,
+};
+
+int fbnic_devlink_health_create(struct fbnic_dev *fbd)
+{
+ fbd->fw_reporter = devlink_health_reporter_create(priv_to_devlink(fbd),
+ &fbnic_fw_ops, fbd);
+ if (IS_ERR(fbd->fw_reporter)) {
+ dev_warn(fbd->dev,
+ "Failed to create FW fault reporter: %pe\n",
+ fbd->fw_reporter);
+ return PTR_ERR(fbd->fw_reporter);
+ }
+
+ fbd->otp_reporter = devlink_health_reporter_create(priv_to_devlink(fbd),
+ &fbnic_otp_ops, fbd);
+ if (IS_ERR(fbd->otp_reporter)) {
+ devlink_health_reporter_destroy(fbd->fw_reporter);
+ dev_warn(fbd->dev,
+ "Failed to create OTP fault reporter: %pe\n",
+ fbd->otp_reporter);
+ return PTR_ERR(fbd->otp_reporter);
+ }
+
+ return 0;
+}
+
+void fbnic_devlink_health_destroy(struct fbnic_dev *fbd)
+{
+ devlink_health_reporter_destroy(fbd->otp_reporter);
+ devlink_health_reporter_destroy(fbd->fw_reporter);
+}
+
void fbnic_devlink_free(struct fbnic_dev *fbd)
{
struct devlink *devlink = priv_to_devlink(fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
index 1117d5a32867..693ebdf38705 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
@@ -1,11 +1,1588 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
+#include <net/ipv6.h>
#include "fbnic.h"
#include "fbnic_netdev.h"
#include "fbnic_tlv.h"
+struct fbnic_stat {
+ u8 string[ETH_GSTRING_LEN];
+ unsigned int size;
+ unsigned int offset;
+};
+
+#define FBNIC_STAT_FIELDS(type, name, stat) { \
+ .string = name, \
+ .size = sizeof_field(struct type, stat), \
+ .offset = offsetof(struct type, stat), \
+}
+
+/* Hardware statistics not captured in rtnl_link_stats */
+#define FBNIC_HW_STAT(name, stat) \
+ FBNIC_STAT_FIELDS(fbnic_hw_stats, name, stat)
+
+static const struct fbnic_stat fbnic_gstrings_hw_stats[] = {
+ /* TTI */
+ FBNIC_HW_STAT("tti_cm_drop_frames", tti.cm_drop.frames),
+ FBNIC_HW_STAT("tti_cm_drop_bytes", tti.cm_drop.bytes),
+ FBNIC_HW_STAT("tti_frame_drop_frames", tti.frame_drop.frames),
+ FBNIC_HW_STAT("tti_frame_drop_bytes", tti.frame_drop.bytes),
+ FBNIC_HW_STAT("tti_tbi_drop_frames", tti.tbi_drop.frames),
+ FBNIC_HW_STAT("tti_tbi_drop_bytes", tti.tbi_drop.bytes),
+
+ /* TMI */
+ FBNIC_HW_STAT("ptp_illegal_req", tmi.ptp_illegal_req),
+ FBNIC_HW_STAT("ptp_good_ts", tmi.ptp_good_ts),
+ FBNIC_HW_STAT("ptp_bad_ts", tmi.ptp_bad_ts),
+
+ /* RPC */
+ FBNIC_HW_STAT("rpc_unkn_etype", rpc.unkn_etype),
+ FBNIC_HW_STAT("rpc_unkn_ext_hdr", rpc.unkn_ext_hdr),
+ FBNIC_HW_STAT("rpc_ipv4_frag", rpc.ipv4_frag),
+ FBNIC_HW_STAT("rpc_ipv6_frag", rpc.ipv6_frag),
+ FBNIC_HW_STAT("rpc_ipv4_esp", rpc.ipv4_esp),
+ FBNIC_HW_STAT("rpc_ipv6_esp", rpc.ipv6_esp),
+ FBNIC_HW_STAT("rpc_tcp_opt_err", rpc.tcp_opt_err),
+ FBNIC_HW_STAT("rpc_out_of_hdr_err", rpc.out_of_hdr_err),
+};
+
+#define FBNIC_HW_FIXED_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_stats)
+
+#define FBNIC_RXB_ENQUEUE_STAT(name, stat) \
+ FBNIC_STAT_FIELDS(fbnic_rxb_enqueue_stats, name, stat)
+
+static const struct fbnic_stat fbnic_gstrings_rxb_enqueue_stats[] = {
+ FBNIC_RXB_ENQUEUE_STAT("rxb_integrity_err%u", integrity_err),
+ FBNIC_RXB_ENQUEUE_STAT("rxb_mac_err%u", mac_err),
+ FBNIC_RXB_ENQUEUE_STAT("rxb_parser_err%u", parser_err),
+ FBNIC_RXB_ENQUEUE_STAT("rxb_frm_err%u", frm_err),
+
+ FBNIC_RXB_ENQUEUE_STAT("rxb_drbo%u_frames", drbo.frames),
+ FBNIC_RXB_ENQUEUE_STAT("rxb_drbo%u_bytes", drbo.bytes),
+};
+
+#define FBNIC_HW_RXB_ENQUEUE_STATS_LEN \
+ ARRAY_SIZE(fbnic_gstrings_rxb_enqueue_stats)
+
+#define FBNIC_RXB_FIFO_STAT(name, stat) \
+ FBNIC_STAT_FIELDS(fbnic_rxb_fifo_stats, name, stat)
+
+static const struct fbnic_stat fbnic_gstrings_rxb_fifo_stats[] = {
+ FBNIC_RXB_FIFO_STAT("rxb_fifo%u_drop", trans_drop),
+ FBNIC_RXB_FIFO_STAT("rxb_fifo%u_dropped_frames", drop.frames),
+ FBNIC_RXB_FIFO_STAT("rxb_fifo%u_ecn", trans_ecn),
+ FBNIC_RXB_FIFO_STAT("rxb_fifo%u_level", level),
+};
+
+#define FBNIC_HW_RXB_FIFO_STATS_LEN ARRAY_SIZE(fbnic_gstrings_rxb_fifo_stats)
+
+#define FBNIC_RXB_DEQUEUE_STAT(name, stat) \
+ FBNIC_STAT_FIELDS(fbnic_rxb_dequeue_stats, name, stat)
+
+static const struct fbnic_stat fbnic_gstrings_rxb_dequeue_stats[] = {
+ FBNIC_RXB_DEQUEUE_STAT("rxb_intf%u_frames", intf.frames),
+ FBNIC_RXB_DEQUEUE_STAT("rxb_intf%u_bytes", intf.bytes),
+ FBNIC_RXB_DEQUEUE_STAT("rxb_pbuf%u_frames", pbuf.frames),
+ FBNIC_RXB_DEQUEUE_STAT("rxb_pbuf%u_bytes", pbuf.bytes),
+};
+
+#define FBNIC_HW_RXB_DEQUEUE_STATS_LEN \
+ ARRAY_SIZE(fbnic_gstrings_rxb_dequeue_stats)
+
+#define FBNIC_HW_Q_STAT(name, stat) \
+ FBNIC_STAT_FIELDS(fbnic_hw_q_stats, name, stat.value)
+
+static const struct fbnic_stat fbnic_gstrings_hw_q_stats[] = {
+ FBNIC_HW_Q_STAT("rde_%u_pkt_err", rde_pkt_err),
+ FBNIC_HW_Q_STAT("rde_%u_pkt_cq_drop", rde_pkt_cq_drop),
+ FBNIC_HW_Q_STAT("rde_%u_pkt_bdq_drop", rde_pkt_bdq_drop),
+};
+
+#define FBNIC_HW_Q_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_q_stats)
+#define FBNIC_HW_STATS_LEN \
+ (FBNIC_HW_FIXED_STATS_LEN + \
+ FBNIC_HW_RXB_ENQUEUE_STATS_LEN * FBNIC_RXB_ENQUEUE_INDICES + \
+ FBNIC_HW_RXB_FIFO_STATS_LEN * FBNIC_RXB_FIFO_INDICES + \
+ FBNIC_HW_RXB_DEQUEUE_STATS_LEN * FBNIC_RXB_DEQUEUE_INDICES + \
+ FBNIC_HW_Q_STATS_LEN * FBNIC_MAX_QUEUES)
+
+#define FBNIC_QUEUE_STAT(name, stat) \
+ FBNIC_STAT_FIELDS(fbnic_ring, name, stat)
+
+static const struct fbnic_stat fbnic_gstrings_xdp_stats[] = {
+ FBNIC_QUEUE_STAT("xdp_tx_queue_%u_packets", stats.packets),
+ FBNIC_QUEUE_STAT("xdp_tx_queue_%u_bytes", stats.bytes),
+ FBNIC_QUEUE_STAT("xdp_tx_queue_%u_dropped", stats.dropped),
+};
+
+#define FBNIC_XDP_STATS_LEN ARRAY_SIZE(fbnic_gstrings_xdp_stats)
+
+#define FBNIC_STATS_LEN \
+ (FBNIC_HW_STATS_LEN + FBNIC_XDP_STATS_LEN * FBNIC_MAX_XDPQS)
+
+static void
+fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ fbnic_get_fw_ver_commit_str(fbd, drvinfo->fw_version,
+ sizeof(drvinfo->fw_version));
+}
+
+static int fbnic_get_regs_len(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ return fbnic_csr_regs_len(fbn->fbd) * sizeof(u32);
+}
+
+static void fbnic_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *data)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ fbnic_csr_get_regs(fbn->fbd, data, &regs->version);
+}
+
+static struct fbnic_net *fbnic_clone_create(struct fbnic_net *orig)
+{
+ struct fbnic_net *clone;
+
+ clone = kmemdup(orig, sizeof(*orig), GFP_KERNEL);
+ if (!clone)
+ return NULL;
+
+ memset(clone->tx, 0, sizeof(clone->tx));
+ memset(clone->rx, 0, sizeof(clone->rx));
+ memset(clone->napi, 0, sizeof(clone->napi));
+ return clone;
+}
+
+static void fbnic_clone_swap_cfg(struct fbnic_net *orig,
+ struct fbnic_net *clone)
+{
+ swap(clone->rcq_size, orig->rcq_size);
+ swap(clone->hpq_size, orig->hpq_size);
+ swap(clone->ppq_size, orig->ppq_size);
+ swap(clone->txq_size, orig->txq_size);
+ swap(clone->num_rx_queues, orig->num_rx_queues);
+ swap(clone->num_tx_queues, orig->num_tx_queues);
+ swap(clone->num_napi, orig->num_napi);
+ swap(clone->hds_thresh, orig->hds_thresh);
+}
+
+static void fbnic_aggregate_vector_counters(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv)
+{
+ int i, j;
+
+ for (i = 0; i < nv->txt_count; i++) {
+ fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub0);
+ fbnic_aggregate_ring_xdp_counters(fbn, &nv->qt[i].sub1);
+ fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].cmpl);
+ }
+
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ fbnic_aggregate_ring_bdq_counters(fbn, &nv->qt[i].sub0);
+ fbnic_aggregate_ring_bdq_counters(fbn, &nv->qt[i].sub1);
+ fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].cmpl);
+ }
+}
+
+static void fbnic_clone_swap(struct fbnic_net *orig,
+ struct fbnic_net *clone)
+{
+ struct fbnic_dev *fbd = orig->fbd;
+ unsigned int i;
+
+ for (i = 0; i < max(clone->num_napi, orig->num_napi); i++)
+ fbnic_synchronize_irq(fbd, FBNIC_NON_NAPI_VECTORS + i);
+ for (i = 0; i < orig->num_napi; i++)
+ fbnic_aggregate_vector_counters(orig, orig->napi[i]);
+
+ fbnic_clone_swap_cfg(orig, clone);
+
+ for (i = 0; i < ARRAY_SIZE(orig->napi); i++)
+ swap(clone->napi[i], orig->napi[i]);
+ for (i = 0; i < ARRAY_SIZE(orig->tx); i++)
+ swap(clone->tx[i], orig->tx[i]);
+ for (i = 0; i < ARRAY_SIZE(orig->rx); i++)
+ swap(clone->rx[i], orig->rx[i]);
+}
+
+static void fbnic_clone_free(struct fbnic_net *clone)
+{
+ kfree(clone);
+}
+
+static int fbnic_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ ec->tx_coalesce_usecs = fbn->tx_usecs;
+ ec->rx_coalesce_usecs = fbn->rx_usecs;
+ ec->rx_max_coalesced_frames = fbn->rx_max_frames;
+
+ return 0;
+}
+
+static int fbnic_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ /* Verify against hardware limits */
+ if (ec->rx_coalesce_usecs > FIELD_MAX(FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT)) {
+ NL_SET_ERR_MSG_MOD(extack, "rx_usecs is above device max");
+ return -EINVAL;
+ }
+ if (ec->tx_coalesce_usecs > FIELD_MAX(FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT)) {
+ NL_SET_ERR_MSG_MOD(extack, "tx_usecs is above device max");
+ return -EINVAL;
+ }
+ if (ec->rx_max_coalesced_frames >
+ FIELD_MAX(FBNIC_QUEUE_RIM_THRESHOLD_RCD_MASK) /
+ FBNIC_MIN_RXD_PER_FRAME) {
+ NL_SET_ERR_MSG_MOD(extack, "rx_frames is above device max");
+ return -EINVAL;
+ }
+
+ fbn->tx_usecs = ec->tx_coalesce_usecs;
+ fbn->rx_usecs = ec->rx_coalesce_usecs;
+ fbn->rx_max_frames = ec->rx_max_coalesced_frames;
+
+ if (netif_running(netdev)) {
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+
+ fbnic_config_txrx_usecs(nv, 0);
+ fbnic_config_rx_frames(nv);
+ }
+ }
+
+ return 0;
+}
+
+static void
+fbnic_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ ring->rx_max_pending = FBNIC_QUEUE_SIZE_MAX;
+ ring->rx_mini_max_pending = FBNIC_QUEUE_SIZE_MAX;
+ ring->rx_jumbo_max_pending = FBNIC_QUEUE_SIZE_MAX;
+ ring->tx_max_pending = FBNIC_QUEUE_SIZE_MAX;
+
+ ring->rx_pending = fbn->rcq_size;
+ ring->rx_mini_pending = fbn->hpq_size;
+ ring->rx_jumbo_pending = fbn->ppq_size;
+ ring->tx_pending = fbn->txq_size;
+
+ kernel_ring->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
+ kernel_ring->hds_thresh_max = FBNIC_HDS_THRESH_MAX;
+ kernel_ring->hds_thresh = fbn->hds_thresh;
+}
+
+static void fbnic_set_rings(struct fbnic_net *fbn,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring)
+{
+ fbn->rcq_size = ring->rx_pending;
+ fbn->hpq_size = ring->rx_mini_pending;
+ fbn->ppq_size = ring->rx_jumbo_pending;
+ fbn->txq_size = ring->tx_pending;
+ fbn->hds_thresh = kernel_ring->hds_thresh;
+}
+
+static int
+fbnic_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_net *clone;
+ int err;
+
+ ring->rx_pending = roundup_pow_of_two(ring->rx_pending);
+ ring->rx_mini_pending = roundup_pow_of_two(ring->rx_mini_pending);
+ ring->rx_jumbo_pending = roundup_pow_of_two(ring->rx_jumbo_pending);
+ ring->tx_pending = roundup_pow_of_two(ring->tx_pending);
+
+ /* These are absolute minimums allowing the device and driver to operate
+ * but not necessarily guarantee reasonable performance. Settings below
+ * Rx queue size of 128 and BDQs smaller than 64 are likely suboptimal
+ * at best.
+ */
+ if (ring->rx_pending < max(FBNIC_QUEUE_SIZE_MIN, FBNIC_RX_DESC_MIN) ||
+ ring->rx_mini_pending < FBNIC_QUEUE_SIZE_MIN ||
+ ring->rx_jumbo_pending < FBNIC_QUEUE_SIZE_MIN ||
+ ring->tx_pending < max(FBNIC_QUEUE_SIZE_MIN, FBNIC_TX_DESC_MIN)) {
+ NL_SET_ERR_MSG_MOD(extack, "requested ring size too small");
+ return -EINVAL;
+ }
+
+ if (kernel_ring->tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_DISABLED) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot disable TCP data split");
+ return -EINVAL;
+ }
+
+ /* If an XDP program is attached, we should check for potential frame
+ * splitting. If the new HDS threshold can cause splitting, we should
+ * only allow if the attached XDP program can handle frags.
+ */
+ if (fbnic_check_split_frames(fbn->xdp_prog, netdev->mtu,
+ kernel_ring->hds_thresh)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Use higher HDS threshold or multi-buf capable program");
+ return -EINVAL;
+ }
+
+ if (!netif_running(netdev)) {
+ fbnic_set_rings(fbn, ring, kernel_ring);
+ return 0;
+ }
+
+ clone = fbnic_clone_create(fbn);
+ if (!clone)
+ return -ENOMEM;
+
+ fbnic_set_rings(clone, ring, kernel_ring);
+
+ err = fbnic_alloc_napi_vectors(clone);
+ if (err)
+ goto err_free_clone;
+
+ err = fbnic_alloc_resources(clone);
+ if (err)
+ goto err_free_napis;
+
+ fbnic_down_noidle(fbn);
+ err = fbnic_wait_all_queues_idle(fbn->fbd, true);
+ if (err)
+ goto err_start_stack;
+
+ err = fbnic_set_netif_queues(clone);
+ if (err)
+ goto err_start_stack;
+
+ /* Nothing can fail past this point */
+ fbnic_flush(fbn);
+
+ fbnic_clone_swap(fbn, clone);
+
+ fbnic_up(fbn);
+
+ fbnic_free_resources(clone);
+ fbnic_free_napi_vectors(clone);
+ fbnic_clone_free(clone);
+
+ return 0;
+
+err_start_stack:
+ fbnic_flush(fbn);
+ fbnic_up(fbn);
+ fbnic_free_resources(clone);
+err_free_napis:
+ fbnic_free_napi_vectors(clone);
+err_free_clone:
+ fbnic_clone_free(clone);
+ return err;
+}
+
+static void fbnic_get_rxb_enqueue_strings(u8 **data, unsigned int idx)
+{
+ const struct fbnic_stat *stat;
+ int i;
+
+ stat = fbnic_gstrings_rxb_enqueue_stats;
+ for (i = 0; i < FBNIC_HW_RXB_ENQUEUE_STATS_LEN; i++, stat++)
+ ethtool_sprintf(data, stat->string, idx);
+}
+
+static void fbnic_get_rxb_fifo_strings(u8 **data, unsigned int idx)
+{
+ const struct fbnic_stat *stat;
+ int i;
+
+ stat = fbnic_gstrings_rxb_fifo_stats;
+ for (i = 0; i < FBNIC_HW_RXB_FIFO_STATS_LEN; i++, stat++)
+ ethtool_sprintf(data, stat->string, idx);
+}
+
+static void fbnic_get_rxb_dequeue_strings(u8 **data, unsigned int idx)
+{
+ const struct fbnic_stat *stat;
+ int i;
+
+ stat = fbnic_gstrings_rxb_dequeue_stats;
+ for (i = 0; i < FBNIC_HW_RXB_DEQUEUE_STATS_LEN; i++, stat++)
+ ethtool_sprintf(data, stat->string, idx);
+}
+
+static void fbnic_get_xdp_queue_strings(u8 **data, unsigned int idx)
+{
+ const struct fbnic_stat *stat;
+ int i;
+
+ stat = fbnic_gstrings_xdp_stats;
+ for (i = 0; i < FBNIC_XDP_STATS_LEN; i++, stat++)
+ ethtool_sprintf(data, stat->string, idx);
+}
+
+static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
+{
+ const struct fbnic_stat *stat;
+ int i, idx;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < FBNIC_HW_FIXED_STATS_LEN; i++)
+ ethtool_puts(&data, fbnic_gstrings_hw_stats[i].string);
+
+ for (i = 0; i < FBNIC_RXB_ENQUEUE_INDICES; i++)
+ fbnic_get_rxb_enqueue_strings(&data, i);
+
+ for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++)
+ fbnic_get_rxb_fifo_strings(&data, i);
+
+ for (i = 0; i < FBNIC_RXB_DEQUEUE_INDICES; i++)
+ fbnic_get_rxb_dequeue_strings(&data, i);
+
+ for (idx = 0; idx < FBNIC_MAX_QUEUES; idx++) {
+ stat = fbnic_gstrings_hw_q_stats;
+
+ for (i = 0; i < FBNIC_HW_Q_STATS_LEN; i++, stat++)
+ ethtool_sprintf(&data, stat->string, idx);
+ }
+
+ for (i = 0; i < FBNIC_MAX_XDPQS; i++)
+ fbnic_get_xdp_queue_strings(&data, i);
+ break;
+ }
+}
+
+static void fbnic_report_hw_stats(const struct fbnic_stat *stat,
+ const void *base, int len, u64 **data)
+{
+ while (len--) {
+ u8 *curr = (u8 *)base + stat->offset;
+
+ **data = *(u64 *)curr;
+
+ stat++;
+ (*data)++;
+ }
+}
+
+static void fbnic_get_xdp_queue_stats(struct fbnic_ring *ring, u64 **data)
+{
+ const struct fbnic_stat *stat;
+ int i;
+
+ if (!ring) {
+ *data += FBNIC_XDP_STATS_LEN;
+ return;
+ }
+
+ stat = fbnic_gstrings_xdp_stats;
+ for (i = 0; i < FBNIC_XDP_STATS_LEN; i++, stat++, (*data)++) {
+ u8 *p = (u8 *)ring + stat->offset;
+
+ **data = *(u64 *)p;
+ }
+}
+
+static void fbnic_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i;
+
+ fbnic_get_hw_stats(fbn->fbd);
+
+ spin_lock(&fbd->hw_stats.lock);
+ fbnic_report_hw_stats(fbnic_gstrings_hw_stats, &fbd->hw_stats,
+ FBNIC_HW_FIXED_STATS_LEN, &data);
+
+ for (i = 0; i < FBNIC_RXB_ENQUEUE_INDICES; i++) {
+ const struct fbnic_rxb_enqueue_stats *enq;
+
+ enq = &fbd->hw_stats.rxb.enq[i];
+ fbnic_report_hw_stats(fbnic_gstrings_rxb_enqueue_stats,
+ enq, FBNIC_HW_RXB_ENQUEUE_STATS_LEN,
+ &data);
+ }
+
+ for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++) {
+ const struct fbnic_rxb_fifo_stats *fifo;
+
+ fifo = &fbd->hw_stats.rxb.fifo[i];
+ fbnic_report_hw_stats(fbnic_gstrings_rxb_fifo_stats,
+ fifo, FBNIC_HW_RXB_FIFO_STATS_LEN,
+ &data);
+ }
+
+ for (i = 0; i < FBNIC_RXB_DEQUEUE_INDICES; i++) {
+ const struct fbnic_rxb_dequeue_stats *deq;
+
+ deq = &fbd->hw_stats.rxb.deq[i];
+ fbnic_report_hw_stats(fbnic_gstrings_rxb_dequeue_stats,
+ deq, FBNIC_HW_RXB_DEQUEUE_STATS_LEN,
+ &data);
+ }
+
+ for (i = 0; i < FBNIC_MAX_QUEUES; i++) {
+ const struct fbnic_hw_q_stats *hw_q = &fbd->hw_stats.hw_q[i];
+
+ fbnic_report_hw_stats(fbnic_gstrings_hw_q_stats, hw_q,
+ FBNIC_HW_Q_STATS_LEN, &data);
+ }
+ spin_unlock(&fbd->hw_stats.lock);
+
+ for (i = 0; i < FBNIC_MAX_XDPQS; i++)
+ fbnic_get_xdp_queue_stats(fbn->tx[i + FBNIC_MAX_TXQS], &data);
+}
+
+static int fbnic_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return FBNIC_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int fbnic_get_rss_hash_idx(u32 flow_type)
+{
+ switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
+ case TCP_V4_FLOW:
+ return FBNIC_TCP4_HASH_OPT;
+ case TCP_V6_FLOW:
+ return FBNIC_TCP6_HASH_OPT;
+ case UDP_V4_FLOW:
+ return FBNIC_UDP4_HASH_OPT;
+ case UDP_V6_FLOW:
+ return FBNIC_UDP6_HASH_OPT;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case IPV4_FLOW:
+ case IPV4_USER_FLOW:
+ return FBNIC_IPV4_HASH_OPT;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV6_FLOW:
+ case IPV6_USER_FLOW:
+ return FBNIC_IPV6_HASH_OPT;
+ case ETHER_FLOW:
+ return FBNIC_ETHER_HASH_OPT;
+ }
+
+ return -1;
+}
+
+static int fbnic_get_cls_rule_all(struct fbnic_net *fbn,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i, cnt = 0;
+
+ /* Report maximum rule count */
+ cmd->data = FBNIC_RPC_ACT_TBL_NFC_ENTRIES;
+
+ for (i = 0; i < FBNIC_RPC_ACT_TBL_NFC_ENTRIES; i++) {
+ int idx = i + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+ struct fbnic_act_tcam *act_tcam;
+
+ act_tcam = &fbd->act_tcam[idx];
+ if (act_tcam->state != FBNIC_TCAM_S_VALID)
+ continue;
+
+ if (rule_locs) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+
+ rule_locs[cnt] = i;
+ }
+
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static int fbnic_get_cls_rule(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp;
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_act_tcam *act_tcam;
+ int idx;
+
+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (fsp->location >= FBNIC_RPC_ACT_TBL_NFC_ENTRIES)
+ return -EINVAL;
+
+ idx = fsp->location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+ act_tcam = &fbd->act_tcam[idx];
+
+ if (act_tcam->state != FBNIC_TCAM_S_VALID)
+ return -EINVAL;
+
+ /* Report maximum rule count */
+ cmd->data = FBNIC_RPC_ACT_TBL_NFC_ENTRIES;
+
+ /* Set flow type field */
+ if (!(act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_IP_VALID)) {
+ fsp->flow_type = ETHER_FLOW;
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
+ act_tcam->mask.tcam[1])) {
+ struct fbnic_mac_addr *mac_addr;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
+ act_tcam->value.tcam[1]);
+ mac_addr = &fbd->mac_addr[idx];
+
+ ether_addr_copy(fsp->h_u.ether_spec.h_dest,
+ mac_addr->value.addr8);
+ eth_broadcast_addr(fsp->m_u.ether_spec.h_dest);
+ }
+ } else if (act_tcam->value.tcam[1] &
+ FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID) {
+ fsp->flow_type = IPV6_USER_FLOW;
+ fsp->h_u.usr_ip6_spec.l4_proto = IPPROTO_IPV6;
+ fsp->m_u.usr_ip6_spec.l4_proto = 0xff;
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+ int i;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ipo_src[idx];
+
+ for (i = 0; i < 4; i++) {
+ fsp->h_u.usr_ip6_spec.ip6src[i] =
+ ip_addr->value.s6_addr32[i];
+ fsp->m_u.usr_ip6_spec.ip6src[i] =
+ ~ip_addr->mask.s6_addr32[i];
+ }
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+ int i;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ipo_dst[idx];
+
+ for (i = 0; i < 4; i++) {
+ fsp->h_u.usr_ip6_spec.ip6dst[i] =
+ ip_addr->value.s6_addr32[i];
+ fsp->m_u.usr_ip6_spec.ip6dst[i] =
+ ~ip_addr->mask.s6_addr32[i];
+ }
+ }
+ } else if ((act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_IP_IS_V6)) {
+ if (act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L4_VALID) {
+ if (act_tcam->value.tcam[1] &
+ FBNIC_RPC_TCAM_ACT1_L4_IS_UDP)
+ fsp->flow_type = UDP_V6_FLOW;
+ else
+ fsp->flow_type = TCP_V6_FLOW;
+ fsp->h_u.tcp_ip6_spec.psrc =
+ cpu_to_be16(act_tcam->value.tcam[3]);
+ fsp->m_u.tcp_ip6_spec.psrc =
+ cpu_to_be16(~act_tcam->mask.tcam[3]);
+ fsp->h_u.tcp_ip6_spec.pdst =
+ cpu_to_be16(act_tcam->value.tcam[4]);
+ fsp->m_u.tcp_ip6_spec.pdst =
+ cpu_to_be16(~act_tcam->mask.tcam[4]);
+ } else {
+ fsp->flow_type = IPV6_USER_FLOW;
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+ int i;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ip_src[idx];
+
+ for (i = 0; i < 4; i++) {
+ fsp->h_u.usr_ip6_spec.ip6src[i] =
+ ip_addr->value.s6_addr32[i];
+ fsp->m_u.usr_ip6_spec.ip6src[i] =
+ ~ip_addr->mask.s6_addr32[i];
+ }
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+ int i;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ip_dst[idx];
+
+ for (i = 0; i < 4; i++) {
+ fsp->h_u.usr_ip6_spec.ip6dst[i] =
+ ip_addr->value.s6_addr32[i];
+ fsp->m_u.usr_ip6_spec.ip6dst[i] =
+ ~ip_addr->mask.s6_addr32[i];
+ }
+ }
+ } else {
+ if (act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L4_VALID) {
+ if (act_tcam->value.tcam[1] &
+ FBNIC_RPC_TCAM_ACT1_L4_IS_UDP)
+ fsp->flow_type = UDP_V4_FLOW;
+ else
+ fsp->flow_type = TCP_V4_FLOW;
+ fsp->h_u.tcp_ip4_spec.psrc =
+ cpu_to_be16(act_tcam->value.tcam[3]);
+ fsp->m_u.tcp_ip4_spec.psrc =
+ cpu_to_be16(~act_tcam->mask.tcam[3]);
+ fsp->h_u.tcp_ip4_spec.pdst =
+ cpu_to_be16(act_tcam->value.tcam[4]);
+ fsp->m_u.tcp_ip4_spec.pdst =
+ cpu_to_be16(~act_tcam->mask.tcam[4]);
+ } else {
+ fsp->flow_type = IPV4_USER_FLOW;
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ip_src[idx];
+
+ fsp->h_u.usr_ip4_spec.ip4src =
+ ip_addr->value.s6_addr32[3];
+ fsp->m_u.usr_ip4_spec.ip4src =
+ ~ip_addr->mask.s6_addr32[3];
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ip_dst[idx];
+
+ fsp->h_u.usr_ip4_spec.ip4dst =
+ ip_addr->value.s6_addr32[3];
+ fsp->m_u.usr_ip4_spec.ip4dst =
+ ~ip_addr->mask.s6_addr32[3];
+ }
+ }
+
+ /* Record action */
+ if (act_tcam->dest & FBNIC_RPC_ACT_TBL0_DROP)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
+ else if (act_tcam->dest & FBNIC_RPC_ACT_TBL0_Q_SEL)
+ fsp->ring_cookie = FIELD_GET(FBNIC_RPC_ACT_TBL0_Q_ID,
+ act_tcam->dest);
+ else
+ fsp->flow_type |= FLOW_RSS;
+
+ cmd->rss_context = FIELD_GET(FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID,
+ act_tcam->dest);
+
+ return 0;
+}
+
+static int fbnic_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+ u32 special = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = fbn->num_rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = fbnic_get_cls_rule(fbn, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ rule_locs = NULL;
+ special = RX_CLS_LOC_SPECIAL;
+ fallthrough;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = fbnic_get_cls_rule_all(fbn, cmd, rule_locs);
+ if (ret < 0)
+ break;
+
+ cmd->data |= special;
+ cmd->rule_cnt = ret;
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+static int fbnic_cls_rule_any_loc(struct fbnic_dev *fbd)
+{
+ int i;
+
+ for (i = FBNIC_RPC_ACT_TBL_NFC_ENTRIES; i--;) {
+ int idx = i + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+
+ if (fbd->act_tcam[idx].state != FBNIC_TCAM_S_VALID)
+ return i;
+ }
+
+ return -ENOSPC;
+}
+
+static int fbnic_set_cls_rule_ins(struct fbnic_net *fbn,
+ const struct ethtool_rxnfc *cmd)
+{
+ u16 flow_value = 0, flow_mask = 0xffff, ip_value = 0, ip_mask = 0xffff;
+ u16 sport = 0, sport_mask = ~0, dport = 0, dport_mask = ~0;
+ u16 misc = 0, misc_mask = ~0;
+ u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ FBNIC_RPC_ACT_TBL0_DEST_HOST);
+ struct fbnic_ip_addr *ip_src = NULL, *ip_dst = NULL;
+ struct fbnic_mac_addr *mac_addr = NULL;
+ struct ethtool_rx_flow_spec *fsp;
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_act_tcam *act_tcam;
+ struct in6_addr *addr6, *mask6;
+ struct in_addr *addr4, *mask4;
+ int hash_idx, location;
+ u32 flow_type;
+ int idx, j;
+
+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (fsp->location != RX_CLS_LOC_ANY)
+ return -EINVAL;
+ location = fbnic_cls_rule_any_loc(fbd);
+ if (location < 0)
+ return location;
+
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+ dest = FBNIC_RPC_ACT_TBL0_DROP;
+ } else if (fsp->flow_type & FLOW_RSS) {
+ if (cmd->rss_context == 1)
+ dest |= FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID;
+ } else {
+ u32 ring_idx = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+
+ if (ring_idx >= fbn->num_rx_queues)
+ return -EINVAL;
+
+ dest |= FBNIC_RPC_ACT_TBL0_Q_SEL |
+ FIELD_PREP(FBNIC_RPC_ACT_TBL0_Q_ID, ring_idx);
+ }
+
+ idx = location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+ act_tcam = &fbd->act_tcam[idx];
+
+ /* Do not allow overwriting for now.
+ * To support overwriting rules we will need to add logic to free
+ * any IP or MACDA TCAMs that may be associated with the old rule.
+ */
+ if (act_tcam->state != FBNIC_TCAM_S_DISABLED)
+ return -EBUSY;
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_RSS);
+ hash_idx = fbnic_get_rss_hash_idx(flow_type);
+
+ switch (flow_type) {
+ case UDP_V4_FLOW:
+udp4_flow:
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L4_IS_UDP;
+ fallthrough;
+ case TCP_V4_FLOW:
+tcp4_flow:
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L4_VALID;
+ flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_L4_IS_UDP |
+ FBNIC_RPC_TCAM_ACT1_L4_VALID);
+
+ sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
+ sport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
+ dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
+ dport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
+ goto ip4_flow;
+ case IP_USER_FLOW:
+ if (!fsp->m_u.usr_ip4_spec.proto)
+ goto ip4_flow;
+ if (fsp->m_u.usr_ip4_spec.proto != 0xff)
+ return -EINVAL;
+ if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_UDP)
+ goto udp4_flow;
+ if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_TCP)
+ goto tcp4_flow;
+ return -EINVAL;
+ip4_flow:
+ addr4 = (struct in_addr *)&fsp->h_u.usr_ip4_spec.ip4src;
+ mask4 = (struct in_addr *)&fsp->m_u.usr_ip4_spec.ip4src;
+ if (mask4->s_addr) {
+ ip_src = __fbnic_ip4_sync(fbd, fbd->ip_src,
+ addr4, mask4);
+ if (!ip_src)
+ return -ENOSPC;
+
+ set_bit(idx, ip_src->act_tcam);
+ ip_value |= FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ ip_src - fbd->ip_src);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPSRC_IDX);
+ }
+
+ addr4 = (struct in_addr *)&fsp->h_u.usr_ip4_spec.ip4dst;
+ mask4 = (struct in_addr *)&fsp->m_u.usr_ip4_spec.ip4dst;
+ if (mask4->s_addr) {
+ ip_dst = __fbnic_ip4_sync(fbd, fbd->ip_dst,
+ addr4, mask4);
+ if (!ip_dst) {
+ if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
+ memset(ip_src, 0, sizeof(*ip_src));
+ return -ENOSPC;
+ }
+
+ set_bit(idx, ip_dst->act_tcam);
+ ip_value |= FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ ip_dst - fbd->ip_dst);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPDST_IDX);
+ }
+ flow_value |= FBNIC_RPC_TCAM_ACT1_IP_VALID |
+ FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
+ FBNIC_RPC_TCAM_ACT1_IP_VALID |
+ FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID);
+ break;
+ case UDP_V6_FLOW:
+udp6_flow:
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L4_IS_UDP;
+ fallthrough;
+ case TCP_V6_FLOW:
+tcp6_flow:
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L4_VALID;
+ flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_L4_IS_UDP |
+ FBNIC_RPC_TCAM_ACT1_L4_VALID);
+
+ sport = be16_to_cpu(fsp->h_u.tcp_ip6_spec.psrc);
+ sport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip6_spec.psrc);
+ dport = be16_to_cpu(fsp->h_u.tcp_ip6_spec.pdst);
+ dport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip6_spec.pdst);
+ goto ipv6_flow;
+ case IPV6_USER_FLOW:
+ if (!fsp->m_u.usr_ip6_spec.l4_proto)
+ goto ipv6_flow;
+
+ if (fsp->m_u.usr_ip6_spec.l4_proto != 0xff)
+ return -EINVAL;
+ if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_UDP)
+ goto udp6_flow;
+ if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_TCP)
+ goto tcp6_flow;
+ if (fsp->h_u.usr_ip6_spec.l4_proto != IPPROTO_IPV6)
+ return -EINVAL;
+
+ addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6src;
+ mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6src;
+ if (!ipv6_addr_any(mask6)) {
+ ip_src = __fbnic_ip6_sync(fbd, fbd->ipo_src,
+ addr6, mask6);
+ if (!ip_src)
+ return -ENOSPC;
+
+ set_bit(idx, ip_src->act_tcam);
+ ip_value |=
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
+ ip_src - fbd->ipo_src);
+ ip_mask &=
+ ~(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX);
+ }
+
+ addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6dst;
+ mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6dst;
+ if (!ipv6_addr_any(mask6)) {
+ ip_dst = __fbnic_ip6_sync(fbd, fbd->ipo_dst,
+ addr6, mask6);
+ if (!ip_dst) {
+ if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
+ memset(ip_src, 0, sizeof(*ip_src));
+ return -ENOSPC;
+ }
+
+ set_bit(idx, ip_dst->act_tcam);
+ ip_value |=
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
+ ip_dst - fbd->ipo_dst);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX);
+ }
+
+ flow_value |= FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID;
+ flow_mask &= FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID;
+ipv6_flow:
+ addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6src;
+ mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6src;
+ if (!ip_src && !ipv6_addr_any(mask6)) {
+ ip_src = __fbnic_ip6_sync(fbd, fbd->ip_src,
+ addr6, mask6);
+ if (!ip_src)
+ return -ENOSPC;
+
+ set_bit(idx, ip_src->act_tcam);
+ ip_value |= FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ ip_src - fbd->ip_src);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPSRC_IDX);
+ }
+
+ addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6dst;
+ mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6dst;
+ if (!ip_dst && !ipv6_addr_any(mask6)) {
+ ip_dst = __fbnic_ip6_sync(fbd, fbd->ip_dst,
+ addr6, mask6);
+ if (!ip_dst) {
+ if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
+ memset(ip_src, 0, sizeof(*ip_src));
+ return -ENOSPC;
+ }
+
+ set_bit(idx, ip_dst->act_tcam);
+ ip_value |= FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ ip_dst - fbd->ip_dst);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPDST_IDX);
+ }
+
+ flow_value |= FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
+ FBNIC_RPC_TCAM_ACT1_IP_VALID |
+ FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
+ FBNIC_RPC_TCAM_ACT1_IP_VALID |
+ FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID);
+ break;
+ case ETHER_FLOW:
+ if (!is_zero_ether_addr(fsp->m_u.ether_spec.h_dest)) {
+ u8 *addr = fsp->h_u.ether_spec.h_dest;
+ u8 *mask = fsp->m_u.ether_spec.h_dest;
+
+ /* Do not allow MAC addr of 0 */
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* Only support full MAC address to avoid
+ * conflicts with other MAC addresses.
+ */
+ if (!is_broadcast_ether_addr(mask))
+ return -EINVAL;
+
+ if (is_multicast_ether_addr(addr))
+ mac_addr = __fbnic_mc_sync(fbd, addr);
+ else
+ mac_addr = __fbnic_uc_sync(fbd, addr);
+
+ if (!mac_addr)
+ return -ENOSPC;
+
+ set_bit(idx, mac_addr->act_tcam);
+ flow_value |=
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
+ mac_addr - fbd->mac_addr);
+ flow_mask &= ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX;
+ }
+
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ flow_mask &= ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Write action table values */
+ act_tcam->dest = dest;
+ act_tcam->rss_en_mask = fbnic_flow_hash_2_rss_en_mask(fbn, hash_idx);
+
+ /* Write IP Match value/mask to action_tcam[0] */
+ act_tcam->value.tcam[0] = ip_value;
+ act_tcam->mask.tcam[0] = ip_mask;
+
+ /* Write flow type value/mask to action_tcam[1] */
+ act_tcam->value.tcam[1] = flow_value;
+ act_tcam->mask.tcam[1] = flow_mask;
+
+ /* Write error, DSCP, extra L4 matches to action_tcam[2] */
+ act_tcam->value.tcam[2] = misc;
+ act_tcam->mask.tcam[2] = misc_mask;
+
+ /* Write source/destination port values */
+ act_tcam->value.tcam[3] = sport;
+ act_tcam->mask.tcam[3] = sport_mask;
+ act_tcam->value.tcam[4] = dport;
+ act_tcam->mask.tcam[4] = dport_mask;
+
+ for (j = 5; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++)
+ act_tcam->mask.tcam[j] = 0xffff;
+
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+ fsp->location = location;
+
+ if (netif_running(fbn->netdev)) {
+ fbnic_write_rules(fbd);
+ if (ip_src || ip_dst)
+ fbnic_write_ip_addr(fbd);
+ if (mac_addr)
+ fbnic_write_macda(fbd);
+ }
+
+ return 0;
+}
+
+static void fbnic_clear_nfc_macda(struct fbnic_net *fbn,
+ unsigned int tcam_idx)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;)
+ __fbnic_xc_unsync(&fbd->mac_addr[idx], tcam_idx);
+
+ /* Write updates to hardware */
+ if (netif_running(fbn->netdev))
+ fbnic_write_macda(fbd);
+}
+
+static void fbnic_clear_nfc_ip_addr(struct fbnic_net *fbn,
+ unsigned int tcam_idx)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->ip_src); idx--;)
+ __fbnic_ip_unsync(&fbd->ip_src[idx], tcam_idx);
+ for (idx = ARRAY_SIZE(fbd->ip_dst); idx--;)
+ __fbnic_ip_unsync(&fbd->ip_dst[idx], tcam_idx);
+ for (idx = ARRAY_SIZE(fbd->ipo_src); idx--;)
+ __fbnic_ip_unsync(&fbd->ipo_src[idx], tcam_idx);
+ for (idx = ARRAY_SIZE(fbd->ipo_dst); idx--;)
+ __fbnic_ip_unsync(&fbd->ipo_dst[idx], tcam_idx);
+
+ /* Write updates to hardware */
+ if (netif_running(fbn->netdev))
+ fbnic_write_ip_addr(fbd);
+}
+
+static int fbnic_set_cls_rule_del(struct fbnic_net *fbn,
+ const struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp;
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_act_tcam *act_tcam;
+ int idx;
+
+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (fsp->location >= FBNIC_RPC_ACT_TBL_NFC_ENTRIES)
+ return -EINVAL;
+
+ idx = fsp->location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+ act_tcam = &fbd->act_tcam[idx];
+
+ if (act_tcam->state != FBNIC_TCAM_S_VALID)
+ return -EINVAL;
+
+ act_tcam->state = FBNIC_TCAM_S_DELETE;
+
+ if ((act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID) &&
+ (~act_tcam->mask.tcam[1] & FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX))
+ fbnic_clear_nfc_macda(fbn, idx);
+
+ if ((act_tcam->value.tcam[0] &
+ (FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID)) &&
+ (~act_tcam->mask.tcam[0] &
+ (FBNIC_RPC_TCAM_ACT0_IPSRC_IDX |
+ FBNIC_RPC_TCAM_ACT0_IPDST_IDX |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX)))
+ fbnic_clear_nfc_ip_addr(fbn, idx);
+
+ if (netif_running(fbn->netdev))
+ fbnic_write_rules(fbd);
+
+ return 0;
+}
+
+static int fbnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ ret = fbnic_set_cls_rule_ins(fbn, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = fbnic_set_cls_rule_del(fbn, cmd);
+ break;
+ }
+
+ return ret;
+}
+
+static u32 fbnic_get_rxfh_key_size(struct net_device *netdev)
+{
+ return FBNIC_RPC_RSS_KEY_BYTE_LEN;
+}
+
+static u32 fbnic_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return FBNIC_RPC_RSS_TBL_SIZE;
+}
+
+static int
+fbnic_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ unsigned int i;
+
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+
+ if (rxfh->key) {
+ for (i = 0; i < FBNIC_RPC_RSS_KEY_BYTE_LEN; i++) {
+ u32 rss_key = fbn->rss_key[i / 4] << ((i % 4) * 8);
+
+ rxfh->key[i] = rss_key >> 24;
+ }
+ }
+
+ if (rxfh->indir) {
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
+ rxfh->indir[i] = fbn->indir_tbl[0][i];
+ }
+
+ return 0;
+}
+
+static unsigned int
+fbnic_set_indir(struct fbnic_net *fbn, unsigned int idx, const u32 *indir)
+{
+ unsigned int i, changes = 0;
+
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) {
+ if (fbn->indir_tbl[idx][i] == indir[i])
+ continue;
+
+ fbn->indir_tbl[idx][i] = indir[i];
+ changes++;
+ }
+
+ return changes;
+}
+
+static int
+fbnic_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ unsigned int i, changes = 0;
+
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
+ return -EINVAL;
+
+ if (rxfh->key) {
+ u32 rss_key = 0;
+
+ for (i = FBNIC_RPC_RSS_KEY_BYTE_LEN; i--;) {
+ rss_key >>= 8;
+ rss_key |= (u32)(rxfh->key[i]) << 24;
+
+ if (i % 4)
+ continue;
+
+ if (fbn->rss_key[i / 4] == rss_key)
+ continue;
+
+ fbn->rss_key[i / 4] = rss_key;
+ changes++;
+ }
+ }
+
+ if (rxfh->indir)
+ changes += fbnic_set_indir(fbn, 0, rxfh->indir);
+
+ if (changes && netif_running(netdev))
+ fbnic_rss_reinit_hw(fbn->fbd, fbn);
+
+ return 0;
+}
+
+static int
+fbnic_get_rss_hash_opts(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
+{
+ int hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ if (hash_opt_idx < 0)
+ return -EINVAL;
+
+ /* Report options from rss_en table in fbn */
+ cmd->data = fbn->rss_flow_hash[hash_opt_idx];
+
+ return 0;
+}
+
+#define FBNIC_L2_HASH_OPTIONS \
+ (RXH_L2DA | RXH_DISCARD)
+#define FBNIC_L3_HASH_OPTIONS \
+ (FBNIC_L2_HASH_OPTIONS | RXH_IP_SRC | RXH_IP_DST | RXH_IP6_FL)
+#define FBNIC_L4_HASH_OPTIONS \
+ (FBNIC_L3_HASH_OPTIONS | RXH_L4_B_0_1 | RXH_L4_B_2_3)
+
+static int
+fbnic_set_rss_hash_opts(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ int hash_opt_idx;
+
+ /* Verify the type requested is correct */
+ hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
+ if (hash_opt_idx < 0)
+ return -EINVAL;
+
+ /* Verify the fields asked for can actually be assigned based on type */
+ if (cmd->data & ~FBNIC_L4_HASH_OPTIONS ||
+ (hash_opt_idx > FBNIC_L4_HASH_OPT &&
+ cmd->data & ~FBNIC_L3_HASH_OPTIONS) ||
+ (hash_opt_idx > FBNIC_IP_HASH_OPT &&
+ cmd->data & ~FBNIC_L2_HASH_OPTIONS))
+ return -EINVAL;
+
+ fbn->rss_flow_hash[hash_opt_idx] = cmd->data;
+
+ if (netif_running(fbn->netdev)) {
+ fbnic_rss_reinit(fbn->fbd, fbn);
+ fbnic_write_rules(fbn->fbd);
+ }
+
+ return 0;
+}
+
+static int
+fbnic_modify_rxfh_context(struct net_device *netdev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ const u32 *indir = rxfh->indir;
+ unsigned int changes;
+
+ if (!indir)
+ indir = ethtool_rxfh_context_indir(ctx);
+
+ changes = fbnic_set_indir(fbn, rxfh->rss_context, indir);
+ if (changes && netif_running(netdev))
+ fbnic_rss_reinit_hw(fbn->fbd, fbn);
+
+ return 0;
+}
+
+static int
+fbnic_create_rxfh_context(struct net_device *netdev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) {
+ NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported");
+ return -EOPNOTSUPP;
+ }
+ ctx->hfunc = ETH_RSS_HASH_TOP;
+
+ if (!rxfh->indir) {
+ u32 *indir = ethtool_rxfh_context_indir(ctx);
+ unsigned int num_rx = fbn->num_rx_queues;
+ unsigned int i;
+
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
+ indir[i] = ethtool_rxfh_indir_default(i, num_rx);
+ }
+
+ return fbnic_modify_rxfh_context(netdev, ctx, rxfh, extack);
+}
+
+static int
+fbnic_remove_rxfh_context(struct net_device *netdev,
+ struct ethtool_rxfh_context *ctx, u32 rss_context,
+ struct netlink_ext_ack *extack)
+{
+ /* Nothing to do, contexts are allocated statically */
+ return 0;
+}
+
+static void fbnic_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ ch->max_rx = fbd->max_num_queues;
+ ch->max_tx = fbd->max_num_queues;
+ ch->max_combined = min(ch->max_rx, ch->max_tx);
+ ch->max_other = FBNIC_NON_NAPI_VECTORS;
+
+ if (fbn->num_rx_queues > fbn->num_napi ||
+ fbn->num_tx_queues > fbn->num_napi)
+ ch->combined_count = min(fbn->num_rx_queues,
+ fbn->num_tx_queues);
+ else
+ ch->combined_count =
+ fbn->num_rx_queues + fbn->num_tx_queues - fbn->num_napi;
+ ch->rx_count = fbn->num_rx_queues - ch->combined_count;
+ ch->tx_count = fbn->num_tx_queues - ch->combined_count;
+ ch->other_count = FBNIC_NON_NAPI_VECTORS;
+}
+
+static void fbnic_set_queues(struct fbnic_net *fbn, struct ethtool_channels *ch,
+ unsigned int max_napis)
+{
+ fbn->num_rx_queues = ch->rx_count + ch->combined_count;
+ fbn->num_tx_queues = ch->tx_count + ch->combined_count;
+ fbn->num_napi = min(ch->rx_count + ch->tx_count + ch->combined_count,
+ max_napis);
+}
+
+static int fbnic_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ unsigned int max_napis, standalone;
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_net *clone;
+ int err;
+
+ max_napis = fbd->num_irqs - FBNIC_NON_NAPI_VECTORS;
+ standalone = ch->rx_count + ch->tx_count;
+
+ /* Limits for standalone queues:
+ * - each queue has its own NAPI (num_napi >= rx + tx + combined)
+ * - combining queues (combined not 0, rx or tx must be 0)
+ */
+ if ((ch->rx_count && ch->tx_count && ch->combined_count) ||
+ (standalone && standalone + ch->combined_count > max_napis) ||
+ ch->rx_count + ch->combined_count > fbd->max_num_queues ||
+ ch->tx_count + ch->combined_count > fbd->max_num_queues ||
+ ch->other_count != FBNIC_NON_NAPI_VECTORS)
+ return -EINVAL;
+
+ if (!netif_running(netdev)) {
+ fbnic_set_queues(fbn, ch, max_napis);
+ fbnic_reset_indir_tbl(fbn);
+ return 0;
+ }
+
+ clone = fbnic_clone_create(fbn);
+ if (!clone)
+ return -ENOMEM;
+
+ fbnic_set_queues(clone, ch, max_napis);
+
+ err = fbnic_alloc_napi_vectors(clone);
+ if (err)
+ goto err_free_clone;
+
+ err = fbnic_alloc_resources(clone);
+ if (err)
+ goto err_free_napis;
+
+ fbnic_down_noidle(fbn);
+ err = fbnic_wait_all_queues_idle(fbn->fbd, true);
+ if (err)
+ goto err_start_stack;
+
+ err = fbnic_set_netif_queues(clone);
+ if (err)
+ goto err_start_stack;
+
+ /* Nothing can fail past this point */
+ fbnic_flush(fbn);
+
+ fbnic_clone_swap(fbn, clone);
+
+ /* Reset RSS indirection table */
+ fbnic_reset_indir_tbl(fbn);
+
+ fbnic_up(fbn);
+
+ fbnic_free_resources(clone);
+ fbnic_free_napi_vectors(clone);
+ fbnic_clone_free(clone);
+
+ return 0;
+
+err_start_stack:
+ fbnic_flush(fbn);
+ fbnic_up(fbn);
+ fbnic_free_resources(clone);
+err_free_napis:
+ fbnic_free_napi_vectors(clone);
+err_free_clone:
+ fbnic_clone_free(clone);
+ return err;
+}
+
static int
fbnic_get_ts_info(struct net_device *netdev,
struct kernel_ethtool_ts_info *tsinfo)
@@ -35,14 +1612,86 @@ fbnic_get_ts_info(struct net_device *netdev,
return 0;
}
-static void
-fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+static void fbnic_get_ts_stats(struct net_device *netdev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ u64 ts_packets, ts_lost;
+ struct fbnic_ring *ring;
+ unsigned int start;
+ int i;
+
+ ts_stats->pkts = fbn->tx_stats.twq.ts_packets;
+ ts_stats->lost = fbn->tx_stats.twq.ts_lost;
+ for (i = 0; i < fbn->num_tx_queues; i++) {
+ ring = fbn->tx[i];
+ do {
+ start = u64_stats_fetch_begin(&ring->stats.syncp);
+ ts_packets = ring->stats.twq.ts_packets;
+ ts_lost = ring->stats.twq.ts_lost;
+ } while (u64_stats_fetch_retry(&ring->stats.syncp, start));
+ ts_stats->pkts += ts_packets;
+ ts_stats->lost += ts_lost;
+ }
+}
+
+static int
+fbnic_get_module_eeprom_by_page(struct net_device *netdev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
{
struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_fw_completion *fw_cmpl;
struct fbnic_dev *fbd = fbn->fbd;
+ int err;
- fbnic_get_fw_ver_commit_str(fbd, drvinfo->fw_version,
- sizeof(drvinfo->fw_version));
+ if (page_data->i2c_address != 0x50) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid i2c address. Only 0x50 is supported");
+ return -EINVAL;
+ }
+
+ fw_cmpl = __fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_QSFP_READ_RESP,
+ page_data->length);
+ if (!fw_cmpl)
+ return -ENOMEM;
+
+ /* Initialize completion and queue it for FW to process */
+ fw_cmpl->u.qsfp.length = page_data->length;
+ fw_cmpl->u.qsfp.offset = page_data->offset;
+ fw_cmpl->u.qsfp.page = page_data->page;
+ fw_cmpl->u.qsfp.bank = page_data->bank;
+
+ err = fbnic_fw_xmit_qsfp_read_msg(fbd, fw_cmpl, page_data->page,
+ page_data->bank, page_data->offset,
+ page_data->length);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to transmit EEPROM read request");
+ goto exit_free;
+ }
+
+ if (!wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) {
+ err = -ETIMEDOUT;
+ NL_SET_ERR_MSG_MOD(extack,
+ "Timed out waiting for firmware response");
+ goto exit_cleanup;
+ }
+
+ if (fw_cmpl->result) {
+ err = fw_cmpl->result;
+ NL_SET_ERR_MSG_MOD(extack, "Failed to read EEPROM");
+ goto exit_cleanup;
+ }
+
+ memcpy(page_data->data, fw_cmpl->u.qsfp.data, page_data->length);
+
+exit_cleanup:
+ fbnic_mbx_clear_cmpl(fbd, fw_cmpl);
+exit_free:
+ fbnic_fw_put_cmpl(fw_cmpl);
+
+ return err ? : page_data->length;
}
static void fbnic_set_counter(u64 *stat, struct fbnic_stat_counter *counter)
@@ -52,6 +1701,63 @@ static void fbnic_set_counter(u64 *stat, struct fbnic_stat_counter *counter)
}
static void
+fbnic_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_mac_stats *mac_stats;
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ mac_stats = &fbd->hw_stats.mac;
+
+ fbd->mac->get_pause_stats(fbd, false, &mac_stats->pause);
+
+ pause_stats->tx_pause_frames = mac_stats->pause.tx_pause_frames.value;
+ pause_stats->rx_pause_frames = mac_stats->pause.rx_pause_frames.value;
+}
+
+static void
+fbnic_get_fec_stats(struct net_device *netdev,
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_phy_stats *phy_stats;
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ fbnic_get_hw_stats32(fbd);
+ phy_stats = &fbd->hw_stats.phy;
+
+ spin_lock(&fbd->hw_stats.lock);
+ fec_stats->corrected_blocks.total =
+ phy_stats->fec.corrected_blocks.value;
+ fec_stats->uncorrectable_blocks.total =
+ phy_stats->fec.uncorrectable_blocks.value;
+ spin_unlock(&fbd->hw_stats.lock);
+}
+
+static void
+fbnic_get_eth_phy_stats(struct net_device *netdev,
+ struct ethtool_eth_phy_stats *eth_phy_stats)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_phy_stats *phy_stats;
+ struct fbnic_dev *fbd = fbn->fbd;
+ u64 total = 0;
+ int i;
+
+ fbnic_get_hw_stats32(fbd);
+ phy_stats = &fbd->hw_stats.phy;
+
+ spin_lock(&fbd->hw_stats.lock);
+ for (i = 0; i < FBNIC_PCS_MAX_LANES; i++)
+ total += phy_stats->pcs.SymbolErrorDuringCarrier.lanes[i].value;
+
+ eth_phy_stats->SymbolErrorDuringCarrier = total;
+ spin_unlock(&fbd->hw_stats.lock);
+}
+
+static void
fbnic_get_eth_mac_stats(struct net_device *netdev,
struct ethtool_eth_mac_stats *eth_mac_stats)
{
@@ -93,34 +1799,123 @@ fbnic_get_eth_mac_stats(struct net_device *netdev,
&mac_stats->eth_mac.FrameTooLongErrors);
}
-static void fbnic_get_ts_stats(struct net_device *netdev,
- struct ethtool_ts_stats *ts_stats)
+static void
+fbnic_get_eth_ctrl_stats(struct net_device *netdev,
+ struct ethtool_eth_ctrl_stats *eth_ctrl_stats)
{
struct fbnic_net *fbn = netdev_priv(netdev);
- u64 ts_packets, ts_lost;
- struct fbnic_ring *ring;
- unsigned int start;
+ struct fbnic_mac_stats *mac_stats;
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ mac_stats = &fbd->hw_stats.mac;
+
+ fbd->mac->get_eth_ctrl_stats(fbd, false, &mac_stats->eth_ctrl);
+
+ eth_ctrl_stats->MACControlFramesReceived =
+ mac_stats->eth_ctrl.MACControlFramesReceived.value;
+ eth_ctrl_stats->MACControlFramesTransmitted =
+ mac_stats->eth_ctrl.MACControlFramesTransmitted.value;
+}
+
+static const struct ethtool_rmon_hist_range fbnic_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 2047 },
+ { 2048, 4095 },
+ { 4096, 8191 },
+ { 8192, 9216 },
+ { 9217, FBNIC_MAX_JUMBO_FRAME_SIZE },
+ {}
+};
+
+static void
+fbnic_get_rmon_stats(struct net_device *netdev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_mac_stats *mac_stats;
+ struct fbnic_dev *fbd = fbn->fbd;
int i;
- ts_stats->pkts = fbn->tx_stats.ts_packets;
- ts_stats->lost = fbn->tx_stats.ts_lost;
- for (i = 0; i < fbn->num_tx_queues; i++) {
- ring = fbn->tx[i];
- do {
- start = u64_stats_fetch_begin(&ring->stats.syncp);
- ts_packets = ring->stats.ts_packets;
- ts_lost = ring->stats.ts_lost;
- } while (u64_stats_fetch_retry(&ring->stats.syncp, start));
- ts_stats->pkts += ts_packets;
- ts_stats->lost += ts_lost;
+ mac_stats = &fbd->hw_stats.mac;
+
+ fbd->mac->get_rmon_stats(fbd, false, &mac_stats->rmon);
+
+ rmon_stats->undersize_pkts =
+ mac_stats->rmon.undersize_pkts.value;
+ rmon_stats->oversize_pkts =
+ mac_stats->rmon.oversize_pkts.value;
+ rmon_stats->fragments =
+ mac_stats->rmon.fragments.value;
+ rmon_stats->jabbers =
+ mac_stats->rmon.jabbers.value;
+
+ for (i = 0; fbnic_rmon_ranges[i].high; i++) {
+ rmon_stats->hist[i] = mac_stats->rmon.hist[i].value;
+ rmon_stats->hist_tx[i] = mac_stats->rmon.hist_tx[i].value;
}
+
+ *ranges = fbnic_rmon_ranges;
+}
+
+static void fbnic_get_link_ext_stats(struct net_device *netdev,
+ struct ethtool_link_ext_stats *stats)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ stats->link_down_events = fbn->link_down_events;
}
static const struct ethtool_ops fbnic_ethtool_ops = {
- .get_drvinfo = fbnic_get_drvinfo,
- .get_ts_info = fbnic_get_ts_info,
- .get_ts_stats = fbnic_get_ts_stats,
- .get_eth_mac_stats = fbnic_get_eth_mac_stats,
+ .cap_link_lanes_supported = true,
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT |
+ ETHTOOL_RING_USE_HDS_THRS,
+ .rxfh_max_num_contexts = FBNIC_RPC_RSS_TBL_COUNT,
+ .get_drvinfo = fbnic_get_drvinfo,
+ .get_regs_len = fbnic_get_regs_len,
+ .get_regs = fbnic_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_link_ext_stats = fbnic_get_link_ext_stats,
+ .get_coalesce = fbnic_get_coalesce,
+ .set_coalesce = fbnic_set_coalesce,
+ .get_ringparam = fbnic_get_ringparam,
+ .set_ringparam = fbnic_set_ringparam,
+ .get_pause_stats = fbnic_get_pause_stats,
+ .get_pauseparam = fbnic_phylink_get_pauseparam,
+ .set_pauseparam = fbnic_phylink_set_pauseparam,
+ .get_strings = fbnic_get_strings,
+ .get_ethtool_stats = fbnic_get_ethtool_stats,
+ .get_sset_count = fbnic_get_sset_count,
+ .get_rxnfc = fbnic_get_rxnfc,
+ .set_rxnfc = fbnic_set_rxnfc,
+ .get_rxfh_key_size = fbnic_get_rxfh_key_size,
+ .get_rxfh_indir_size = fbnic_get_rxfh_indir_size,
+ .get_rxfh = fbnic_get_rxfh,
+ .set_rxfh = fbnic_set_rxfh,
+ .get_rxfh_fields = fbnic_get_rss_hash_opts,
+ .set_rxfh_fields = fbnic_set_rss_hash_opts,
+ .create_rxfh_context = fbnic_create_rxfh_context,
+ .modify_rxfh_context = fbnic_modify_rxfh_context,
+ .remove_rxfh_context = fbnic_remove_rxfh_context,
+ .get_channels = fbnic_get_channels,
+ .set_channels = fbnic_set_channels,
+ .get_ts_info = fbnic_get_ts_info,
+ .get_ts_stats = fbnic_get_ts_stats,
+ .get_link_ksettings = fbnic_phylink_ethtool_ksettings_get,
+ .get_fec_stats = fbnic_get_fec_stats,
+ .get_fecparam = fbnic_phylink_get_fecparam,
+ .get_module_eeprom_by_page = fbnic_get_module_eeprom_by_page,
+ .get_eth_phy_stats = fbnic_get_eth_phy_stats,
+ .get_eth_mac_stats = fbnic_get_eth_mac_stats,
+ .get_eth_ctrl_stats = fbnic_get_eth_ctrl_stats,
+ .get_rmon_stats = fbnic_get_rmon_stats,
};
void fbnic_set_ethtool_ops(struct net_device *dev)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
index 8f7a2a19ddf8..d8d9b6cfde82 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
@@ -17,11 +17,29 @@ static void __fbnic_mbx_wr_desc(struct fbnic_dev *fbd, int mbx_idx,
{
u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
+ /* Write the upper 32b and then the lower 32b. Doing this the
+ * FW can then read lower, upper, lower to verify that the state
+ * of the descriptor wasn't changed mid-transaction.
+ */
fw_wr32(fbd, desc_offset + 1, upper_32_bits(desc));
fw_wrfl(fbd);
fw_wr32(fbd, desc_offset, lower_32_bits(desc));
}
+static void __fbnic_mbx_invalidate_desc(struct fbnic_dev *fbd, int mbx_idx,
+ int desc_idx, u32 desc)
+{
+ u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
+
+ /* For initialization we write the lower 32b of the descriptor first.
+ * This way we can set the state to mark it invalid before we clear the
+ * upper 32b.
+ */
+ fw_wr32(fbd, desc_offset, desc);
+ fw_wrfl(fbd);
+ fw_wr32(fbd, desc_offset + 1, 0);
+}
+
static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
{
u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
@@ -33,29 +51,41 @@ static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
return desc;
}
-static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+static void fbnic_mbx_reset_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
{
int desc_idx;
+ /* Disable DMA transactions from the device,
+ * and flush any transactions triggered during cleaning
+ */
+ switch (mbx_idx) {
+ case FBNIC_IPC_MBX_RX_IDX:
+ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
+ FBNIC_PUL_OB_TLP_HDR_AW_CFG_FLUSH);
+ break;
+ case FBNIC_IPC_MBX_TX_IDX:
+ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
+ FBNIC_PUL_OB_TLP_HDR_AR_CFG_FLUSH);
+ break;
+ }
+
+ wrfl(fbd);
+
/* Initialize first descriptor to all 0s. Doing this gives us a
* solid stop for the firmware to hit when it is done looping
* through the ring.
*/
- __fbnic_mbx_wr_desc(fbd, mbx_idx, 0, 0);
-
- fw_wrfl(fbd);
+ __fbnic_mbx_invalidate_desc(fbd, mbx_idx, 0, 0);
/* We then fill the rest of the ring starting at the end and moving
* back toward descriptor 0 with skip descriptors that have no
* length nor address, and tell the firmware that they can skip
* them and just move past them to the one we initialized to 0.
*/
- for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;) {
- __fbnic_mbx_wr_desc(fbd, mbx_idx, desc_idx,
- FBNIC_IPC_MBX_DESC_FW_CMPL |
- FBNIC_IPC_MBX_DESC_HOST_CMPL);
- fw_wrfl(fbd);
- }
+ for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;)
+ __fbnic_mbx_invalidate_desc(fbd, mbx_idx, desc_idx,
+ FBNIC_IPC_MBX_DESC_FW_CMPL |
+ FBNIC_IPC_MBX_DESC_HOST_CMPL);
}
void fbnic_mbx_init(struct fbnic_dev *fbd)
@@ -65,6 +95,9 @@ void fbnic_mbx_init(struct fbnic_dev *fbd)
/* Initialize lock to protect Tx ring */
spin_lock_init(&fbd->fw_tx_lock);
+ /* Reset FW Capabilities */
+ memset(&fbd->fw_cap, 0, sizeof(fbd->fw_cap));
+
/* Reinitialize mailbox memory */
for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
memset(&fbd->mbx[i], 0, sizeof(struct fbnic_fw_mbx));
@@ -76,7 +109,7 @@ void fbnic_mbx_init(struct fbnic_dev *fbd)
wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
- fbnic_mbx_init_desc_ring(fbd, i);
+ fbnic_mbx_reset_desc_ring(fbd, i);
}
static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx,
@@ -97,11 +130,8 @@ static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx,
return -EBUSY;
addr = dma_map_single(fbd->dev, msg, PAGE_SIZE, direction);
- if (dma_mapping_error(fbd->dev, addr)) {
- free_page((unsigned long)msg);
-
+ if (dma_mapping_error(fbd->dev, addr))
return -ENOSPC;
- }
mbx->buf_info[tail].msg = msg;
mbx->buf_info[tail].addr = addr;
@@ -141,7 +171,7 @@ static void fbnic_mbx_clean_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
{
int i;
- fbnic_mbx_init_desc_ring(fbd, mbx_idx);
+ fbnic_mbx_reset_desc_ring(fbd, mbx_idx);
for (i = FBNIC_IPC_MBX_DESC_LEN; i--;)
fbnic_mbx_unmap_and_free_msg(fbd, mbx_idx, i);
@@ -171,7 +201,7 @@ static int fbnic_mbx_alloc_rx_msgs(struct fbnic_dev *fbd)
return -ENODEV;
/* Fill all but 1 unused descriptors in the Rx queue. */
- count = (head - tail - 1) % FBNIC_IPC_MBX_DESC_LEN;
+ count = (head - tail - 1) & (FBNIC_IPC_MBX_DESC_LEN - 1);
while (!err && count--) {
struct fbnic_tlv_msg *msg;
@@ -207,6 +237,44 @@ static int fbnic_mbx_map_tlv_msg(struct fbnic_dev *fbd,
return err;
}
+static int fbnic_mbx_set_cmpl_slot(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data)
+{
+ struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
+ int free = -EXFULL;
+ int i;
+
+ if (!tx_mbx->ready)
+ return -ENODEV;
+
+ for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
+ if (!fbd->cmpl_data[i])
+ free = i;
+ else if (fbd->cmpl_data[i]->msg_type == cmpl_data->msg_type)
+ return -EEXIST;
+ }
+
+ if (free == -EXFULL)
+ return -EXFULL;
+
+ fbd->cmpl_data[free] = cmpl_data;
+
+ return 0;
+}
+
+static void fbnic_mbx_clear_cmpl_slot(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
+ if (fbd->cmpl_data[i] == cmpl_data) {
+ fbd->cmpl_data[i] = NULL;
+ break;
+ }
+ }
+}
+
static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd)
{
struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
@@ -228,6 +296,89 @@ static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd)
tx_mbx->head = head;
}
+int fbnic_mbx_set_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data)
+{
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+ err = fbnic_mbx_set_cmpl_slot(fbd, cmpl_data);
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+
+ return err;
+}
+
+static int fbnic_mbx_map_req_w_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_tlv_msg *msg,
+ struct fbnic_fw_completion *cmpl_data)
+{
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+ if (cmpl_data) {
+ err = fbnic_mbx_set_cmpl_slot(fbd, cmpl_data);
+ if (err)
+ goto unlock_mbx;
+ }
+
+ err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg,
+ le16_to_cpu(msg->hdr.len) * sizeof(u32), 1);
+
+ /* If we successfully reserved a completion and msg failed
+ * then clear completion data for next caller
+ */
+ if (err && cmpl_data)
+ fbnic_mbx_clear_cmpl_slot(fbd, cmpl_data);
+
+unlock_mbx:
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+
+ return err;
+}
+
+void fbnic_mbx_clear_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *fw_cmpl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+ fbnic_mbx_clear_cmpl_slot(fbd, fw_cmpl);
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+}
+
+static void fbnic_fw_release_cmpl_data(struct kref *kref)
+{
+ struct fbnic_fw_completion *cmpl_data;
+
+ cmpl_data = container_of(kref, struct fbnic_fw_completion,
+ ref_count);
+ kfree(cmpl_data);
+}
+
+static struct fbnic_fw_completion *
+fbnic_fw_get_cmpl_by_type(struct fbnic_dev *fbd, u32 msg_type)
+{
+ struct fbnic_fw_completion *cmpl_data = NULL;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+ for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
+ if (fbd->cmpl_data[i] &&
+ fbd->cmpl_data[i]->msg_type == msg_type) {
+ cmpl_data = fbd->cmpl_data[i];
+ kref_get(&cmpl_data->ref_count);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+
+ return cmpl_data;
+}
+
/**
* fbnic_fw_xmit_simple_msg - Transmit a simple single TLV message w/o data
* @fbd: FBNIC device structure
@@ -235,11 +386,11 @@ static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd)
*
* Return:
* One the following values:
- * -EOPNOTSUPP: Is not ASIC so mailbox is not supported
- * -ENODEV: Device I/O error
- * -ENOMEM: Failed to allocate message
- * -EBUSY: No space in mailbox
- * -ENOSPC: DMA mapping failed
+ * -EOPNOTSUPP: Is not ASIC so mailbox is not supported
+ * -ENODEV: Device I/O error
+ * -ENOMEM: Failed to allocate message
+ * -EBUSY: No space in mailbox
+ * -ENOSPC: DMA mapping failed
*
* This function sends a single TLV header indicating the host wants to take
* some action. However there are no other side effects which means that any
@@ -265,67 +416,41 @@ static int fbnic_fw_xmit_simple_msg(struct fbnic_dev *fbd, u32 msg_type)
return err;
}
-/**
- * fbnic_fw_xmit_cap_msg - Allocate and populate a FW capabilities message
- * @fbd: FBNIC device structure
- *
- * Return: NULL on failure to allocate, error pointer on error, or pointer
- * to new TLV test message.
- *
- * Sends a single TLV header indicating the host wants the firmware to
- * confirm the capabilities and version.
- **/
-static int fbnic_fw_xmit_cap_msg(struct fbnic_dev *fbd)
-{
- int err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ);
-
- /* Return 0 if we are not calling this on ASIC */
- return (err == -EOPNOTSUPP) ? 0 : err;
-}
-
-static void fbnic_mbx_postinit_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
{
struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
- /* This is a one time init, so just exit if it is completed */
- if (mbx->ready)
- return;
-
mbx->ready = true;
switch (mbx_idx) {
case FBNIC_IPC_MBX_RX_IDX:
+ /* Enable DMA writes from the device */
+ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
+ FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME);
+
/* Make sure we have a page for the FW to write to */
fbnic_mbx_alloc_rx_msgs(fbd);
break;
case FBNIC_IPC_MBX_TX_IDX:
- /* Force version to 1 if we successfully requested an update
- * from the firmware. This should be overwritten once we get
- * the actual version from the firmware in the capabilities
- * request message.
- */
- if (!fbnic_fw_xmit_cap_msg(fbd) &&
- !fbd->fw_cap.running.mgmt.version)
- fbd->fw_cap.running.mgmt.version = 1;
+ /* Enable DMA reads from the device */
+ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
+ FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME);
break;
}
}
-static void fbnic_mbx_postinit(struct fbnic_dev *fbd)
+static bool fbnic_mbx_event(struct fbnic_dev *fbd)
{
- int i;
-
- /* We only need to do this on the first interrupt following init.
+ /* We only need to do this on the first interrupt following reset.
* this primes the mailbox so that we will have cleared all the
* skip descriptors.
*/
if (!(rd32(fbd, FBNIC_INTR_STATUS(0)) & (1u << FBNIC_FW_MSIX_ENTRY)))
- return;
+ return false;
wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
- for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
- fbnic_mbx_postinit_desc_ring(fbd, i);
+ return true;
}
/**
@@ -370,6 +495,11 @@ int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership)
fbd->last_heartbeat_request = req_time;
+ /* Set prev_firmware_time to 0 to avoid triggering firmware crash
+ * detection until we receive the second uptime in a heartbeat resp.
+ */
+ fbd->prev_firmware_time = 0;
+
/* Set heartbeat detection based on if we are taking ownership */
fbd->fw_heartbeat_enabled = take_ownership;
@@ -403,6 +533,7 @@ static const struct fbnic_tlv_index fbnic_fw_cap_resp_index[] = {
FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_UEFI_VERSION),
FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION),
FBNIC_TLV_ATTR_LAST
};
@@ -437,29 +568,25 @@ static int fbnic_fw_parse_bmc_addrs(u8 bmc_mac_addr[][ETH_ALEN],
static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
{
- u32 active_slot = 0, all_multi = 0;
+ u32 all_multi = 0, version = 0;
struct fbnic_dev *fbd = opaque;
- u32 speed = 0, fec = 0;
- size_t commit_size = 0;
bool bmc_present;
int err;
- get_unsigned_result(FBNIC_FW_CAP_RESP_VERSION,
- fbd->fw_cap.running.mgmt.version);
-
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_VERSION);
+ fbd->fw_cap.running.mgmt.version = version;
if (!fbd->fw_cap.running.mgmt.version)
return -EINVAL;
- if (fbd->fw_cap.running.mgmt.version < MIN_FW_VERSION_CODE) {
+ if (fbd->fw_cap.running.mgmt.version < MIN_FW_VER_CODE) {
+ char required_ver[FBNIC_FW_VER_MAX_SIZE];
char running_ver[FBNIC_FW_VER_MAX_SIZE];
fbnic_mk_fw_ver_str(fbd->fw_cap.running.mgmt.version,
running_ver);
- dev_err(fbd->dev, "Device firmware version(%s) is older than minimum required version(%02d.%02d.%02d)\n",
- running_ver,
- MIN_FW_MAJOR_VERSION,
- MIN_FW_MINOR_VERSION,
- MIN_FW_BUILD_VERSION);
+ fbnic_mk_fw_ver_str(MIN_FW_VER_CODE, required_ver);
+ dev_err(fbd->dev, "Device firmware version(%s) is older than minimum required version(%s)\n",
+ running_ver, required_ver);
/* Disable TX mailbox to prevent card use until firmware is
* updated.
*/
@@ -467,43 +594,41 @@ static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
return -EINVAL;
}
- get_string_result(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR, commit_size,
- fbd->fw_cap.running.mgmt.commit,
- FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
- if (!commit_size)
+ if (fta_get_str(results, FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR,
+ fbd->fw_cap.running.mgmt.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE) <= 0)
dev_warn(fbd->dev, "Firmware did not send mgmt commit!\n");
- get_unsigned_result(FBNIC_FW_CAP_RESP_STORED_VERSION,
- fbd->fw_cap.stored.mgmt.version);
- get_string_result(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR, commit_size,
- fbd->fw_cap.stored.mgmt.commit,
- FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
-
- get_unsigned_result(FBNIC_FW_CAP_RESP_CMRT_VERSION,
- fbd->fw_cap.running.bootloader.version);
- get_string_result(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR, commit_size,
- fbd->fw_cap.running.bootloader.commit,
- FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
-
- get_unsigned_result(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION,
- fbd->fw_cap.stored.bootloader.version);
- get_string_result(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR, commit_size,
- fbd->fw_cap.stored.bootloader.commit,
- FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
-
- get_unsigned_result(FBNIC_FW_CAP_RESP_UEFI_VERSION,
- fbd->fw_cap.stored.undi.version);
- get_string_result(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR, commit_size,
- fbd->fw_cap.stored.undi.commit,
- FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
-
- get_unsigned_result(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT, active_slot);
- fbd->fw_cap.active_slot = active_slot;
-
- get_unsigned_result(FBNIC_FW_CAP_RESP_FW_LINK_SPEED, speed);
- get_unsigned_result(FBNIC_FW_CAP_RESP_FW_LINK_FEC, fec);
- fbd->fw_cap.link_speed = speed;
- fbd->fw_cap.link_fec = fec;
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_VERSION);
+ fbd->fw_cap.stored.mgmt.version = version;
+ fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_COMMIT_STR,
+ fbd->fw_cap.stored.mgmt.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_CMRT_VERSION);
+ fbd->fw_cap.running.bootloader.version = version;
+ fta_get_str(results, FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR,
+ fbd->fw_cap.running.bootloader.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION);
+ fbd->fw_cap.stored.bootloader.version = version;
+ fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR,
+ fbd->fw_cap.stored.bootloader.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_UEFI_VERSION);
+ fbd->fw_cap.stored.undi.version = version;
+ fta_get_str(results, FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
+ fbd->fw_cap.stored.undi.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ fbd->fw_cap.active_slot =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT);
+ fbd->fw_cap.link_speed =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_SPEED);
+ fbd->fw_cap.link_fec =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_FEC);
bmc_present = !!results[FBNIC_FW_CAP_RESP_BMC_PRESENT];
if (bmc_present) {
@@ -518,7 +643,8 @@ static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
if (err)
return err;
- get_unsigned_result(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI, all_multi);
+ all_multi =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_BMC_ALL_MULTI);
} else {
memset(fbd->fw_cap.bmc_mac_addr, 0,
sizeof(fbd->fw_cap.bmc_mac_addr));
@@ -529,10 +655,17 @@ static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
if (results[FBNIC_FW_CAP_RESP_BMC_ALL_MULTI] || !bmc_present)
fbd->fw_cap.all_multi = all_multi;
+ fbd->fw_cap.anti_rollback_version =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION);
+
+ /* Always assume we need a BMC reinit */
+ fbd->fw_cap.need_bmc_tcam_reinit = true;
+
return 0;
}
static const struct fbnic_tlv_index fbnic_ownership_resp_index[] = {
+ FBNIC_TLV_ATTR_U64(FBNIC_FW_OWNERSHIP_TIME),
FBNIC_TLV_ATTR_LAST
};
@@ -544,10 +677,14 @@ static int fbnic_fw_parse_ownership_resp(void *opaque,
/* Count the ownership response as a heartbeat reply */
fbd->last_heartbeat_response = jiffies;
+ /* Capture firmware time for logging and firmware crash check */
+ fbd->firmware_time = fta_get_uint(results, FBNIC_FW_OWNERSHIP_TIME);
+
return 0;
}
static const struct fbnic_tlv_index fbnic_heartbeat_resp_index[] = {
+ FBNIC_TLV_ATTR_U64(FBNIC_FW_HEARTBEAT_UPTIME),
FBNIC_TLV_ATTR_LAST
};
@@ -558,6 +695,9 @@ static int fbnic_fw_parse_heartbeat_resp(void *opaque,
fbd->last_heartbeat_response = jiffies;
+ /* Capture firmware time for logging and firmware crash check */
+ fbd->firmware_time = fta_get_uint(results, FBNIC_FW_HEARTBEAT_UPTIME);
+
return 0;
}
@@ -579,6 +719,7 @@ static int fbnic_fw_xmit_heartbeat_message(struct fbnic_dev *fbd)
goto free_message;
fbd->last_heartbeat_request = req_time;
+ fbd->prev_firmware_time = fbd->firmware_time;
return err;
@@ -639,7 +780,8 @@ void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd)
return;
/* Was the last heartbeat response long time ago? */
- if (!fbnic_fw_heartbeat_current(fbd)) {
+ if (!fbnic_fw_heartbeat_current(fbd) ||
+ fbd->firmware_time < fbd->prev_firmware_time) {
dev_warn(fbd->dev,
"Firmware did not respond to heartbeat message\n");
fbd->fw_heartbeat_enabled = false;
@@ -651,6 +793,769 @@ void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd)
dev_warn(fbd->dev, "Failed to send heartbeat message\n");
}
+/**
+ * fbnic_fw_xmit_coredump_info_msg - Create and transmit a coredump info message
+ * @fbd: FBNIC device structure
+ * @cmpl_data: Structure to store info in
+ * @force: Force coredump event if one hasn't already occurred
+ *
+ * Return: zero on success, negative errno on failure
+ *
+ * Asks the FW for info related to coredump. If a coredump doesn't exist it
+ * can optionally force one if force is true.
+ */
+int fbnic_fw_xmit_coredump_info_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ bool force)
+{
+ struct fbnic_tlv_msg *msg;
+ int err = 0;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_COREDUMP_GET_INFO_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ if (force) {
+ err = fbnic_tlv_attr_put_flag(msg, FBNIC_FW_COREDUMP_REQ_INFO_CREATE);
+ if (err)
+ goto free_msg;
+ }
+
+ err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
+ if (err)
+ goto free_msg;
+
+ return 0;
+
+free_msg:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_coredump_info_resp_index[] = {
+ FBNIC_TLV_ATTR_FLAG(FBNIC_FW_COREDUMP_INFO_AVAILABLE),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_COREDUMP_INFO_SIZE),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_COREDUMP_INFO_ERROR),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int
+fbnic_fw_parse_coredump_info_resp(void *opaque, struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ u32 msg_type;
+ s32 err;
+
+ /* Verify we have a completion pointer to provide with data */
+ msg_type = FBNIC_TLV_MSG_ID_COREDUMP_GET_INFO_RESP;
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ err = fta_get_sint(results, FBNIC_FW_COREDUMP_INFO_ERROR);
+ if (err)
+ goto msg_err;
+
+ if (!results[FBNIC_FW_COREDUMP_INFO_AVAILABLE]) {
+ err = -ENOENT;
+ goto msg_err;
+ }
+
+ cmpl_data->u.coredump_info.size =
+ fta_get_uint(results, FBNIC_FW_COREDUMP_INFO_SIZE);
+
+msg_err:
+ cmpl_data->result = err;
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return err;
+}
+
+/**
+ * fbnic_fw_xmit_coredump_read_msg - Create and transmit a coredump read request
+ * @fbd: FBNIC device structure
+ * @cmpl_data: Completion struct to store coredump
+ * @offset: Offset into coredump requested
+ * @length: Length of section of coredump to fetch
+ *
+ * Return: zero on success, negative errno on failure
+ *
+ * Asks the firmware to provide a section of the coredump back in a message.
+ * The response will have an offset and size matching the values provided.
+ */
+int fbnic_fw_xmit_coredump_read_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ u32 offset, u32 length)
+{
+ struct fbnic_tlv_msg *msg;
+ int err = 0;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_COREDUMP_READ_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ if (offset) {
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_COREDUMP_READ_OFFSET,
+ offset);
+ if (err)
+ goto free_message;
+ }
+
+ if (length) {
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_COREDUMP_READ_LENGTH,
+ length);
+ if (err)
+ goto free_message;
+ }
+
+ err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
+ if (err)
+ goto free_message;
+
+ return 0;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_coredump_resp_index[] = {
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_COREDUMP_READ_OFFSET),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_COREDUMP_READ_LENGTH),
+ FBNIC_TLV_ATTR_RAW_DATA(FBNIC_FW_COREDUMP_READ_DATA),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_COREDUMP_READ_ERROR),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_coredump_resp(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ u32 index, last_offset, last_length;
+ struct fbnic_dev *fbd = opaque;
+ struct fbnic_tlv_msg *data_hdr;
+ u32 length, offset;
+ u32 msg_type;
+ s32 err;
+
+ /* Verify we have a completion pointer to provide with data */
+ msg_type = FBNIC_TLV_MSG_ID_COREDUMP_READ_RESP;
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ err = fta_get_sint(results, FBNIC_FW_COREDUMP_READ_ERROR);
+ if (err)
+ goto msg_err;
+
+ data_hdr = results[FBNIC_FW_COREDUMP_READ_DATA];
+ if (!data_hdr) {
+ err = -ENODATA;
+ goto msg_err;
+ }
+
+ offset = fta_get_uint(results, FBNIC_FW_COREDUMP_READ_OFFSET);
+ length = fta_get_uint(results, FBNIC_FW_COREDUMP_READ_LENGTH);
+
+ if (length > le16_to_cpu(data_hdr->hdr.len) - sizeof(u32)) {
+ dev_err(fbd->dev, "length greater than size of message\n");
+ err = -EINVAL;
+ goto msg_err;
+ }
+
+ /* Only the last offset can have a length != stride */
+ last_length =
+ (cmpl_data->u.coredump.size % cmpl_data->u.coredump.stride) ? :
+ cmpl_data->u.coredump.stride;
+ last_offset = cmpl_data->u.coredump.size - last_length;
+
+ /* Verify offset and length */
+ if (offset % cmpl_data->u.coredump.stride || offset > last_offset) {
+ dev_err(fbd->dev, "offset %d out of range\n", offset);
+ err = -EINVAL;
+ } else if (length != ((offset == last_offset) ?
+ last_length : cmpl_data->u.coredump.stride)) {
+ dev_err(fbd->dev, "length %d out of range for offset %d\n",
+ length, offset);
+ err = -EINVAL;
+ }
+ if (err)
+ goto msg_err;
+
+ /* If data pointer is NULL it is already filled, just skip the copy */
+ index = offset / cmpl_data->u.coredump.stride;
+ if (!cmpl_data->u.coredump.data[index])
+ goto msg_err;
+
+ /* Copy data and mark index filled by setting pointer to NULL */
+ memcpy(cmpl_data->u.coredump.data[index],
+ fbnic_tlv_attr_get_value_ptr(data_hdr), length);
+ cmpl_data->u.coredump.data[index] = NULL;
+
+msg_err:
+ cmpl_data->result = err;
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return err;
+}
+
+int fbnic_fw_xmit_fw_start_upgrade(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ unsigned int id, unsigned int len)
+{
+ struct fbnic_tlv_msg *msg;
+ int err;
+
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ if (!len)
+ return -EINVAL;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_START_UPGRADE_SECTION, id);
+ if (err)
+ goto free_message;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_START_UPGRADE_IMAGE_LENGTH,
+ len);
+ if (err)
+ goto free_message;
+
+ err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
+ if (err)
+ goto free_message;
+
+ return 0;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_fw_start_upgrade_resp_index[] = {
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_START_UPGRADE_ERROR),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_fw_start_upgrade_resp(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ u32 msg_type;
+ s32 err;
+
+ /* Verify we have a completion pointer */
+ msg_type = FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ;
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ /* Check for errors */
+ err = fta_get_sint(results, FBNIC_FW_START_UPGRADE_ERROR);
+
+ cmpl_data->result = err;
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return 0;
+}
+
+int fbnic_fw_xmit_fw_write_chunk(struct fbnic_dev *fbd,
+ const u8 *data, u32 offset, u16 length,
+ int cancel_error)
+{
+ struct fbnic_tlv_msg *msg;
+ int err;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_RESP);
+ if (!msg)
+ return -ENOMEM;
+
+ /* Report error to FW to cancel upgrade */
+ if (cancel_error) {
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_ERROR,
+ cancel_error);
+ if (err)
+ goto free_message;
+ }
+
+ if (data) {
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_OFFSET,
+ offset);
+ if (err)
+ goto free_message;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_LENGTH,
+ length);
+ if (err)
+ goto free_message;
+
+ err = fbnic_tlv_attr_put_value(msg, FBNIC_FW_WRITE_CHUNK_DATA,
+ data + offset, length);
+ if (err)
+ goto free_message;
+ }
+
+ err = fbnic_mbx_map_tlv_msg(fbd, msg);
+ if (err)
+ goto free_message;
+
+ return 0;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_fw_write_chunk_req_index[] = {
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_WRITE_CHUNK_OFFSET),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_WRITE_CHUNK_LENGTH),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_fw_write_chunk_req(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ u32 msg_type;
+ u32 offset;
+ u32 length;
+
+ /* Verify we have a completion pointer */
+ msg_type = FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ;
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ /* Pull length/offset pair and mark it as complete */
+ offset = fta_get_uint(results, FBNIC_FW_WRITE_CHUNK_OFFSET);
+ length = fta_get_uint(results, FBNIC_FW_WRITE_CHUNK_LENGTH);
+ cmpl_data->u.fw_update.offset = offset;
+ cmpl_data->u.fw_update.length = length;
+
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return 0;
+}
+
+static const struct fbnic_tlv_index fbnic_fw_finish_upgrade_req_index[] = {
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_FINISH_UPGRADE_ERROR),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_fw_finish_upgrade_req(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ u32 msg_type;
+ s32 err;
+
+ /* Verify we have a completion pointer */
+ msg_type = FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ;
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ /* Check for errors */
+ err = fta_get_sint(results, FBNIC_FW_FINISH_UPGRADE_ERROR);
+
+ /* Close out update by incrementing offset by length which should
+ * match the total size of the component. Set length to 0 since no
+ * new chunks will be requested.
+ */
+ cmpl_data->u.fw_update.offset += cmpl_data->u.fw_update.length;
+ cmpl_data->u.fw_update.length = 0;
+
+ cmpl_data->result = err;
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return 0;
+}
+
+/**
+ * fbnic_fw_xmit_qsfp_read_msg - Transmit a QSFP read request
+ * @fbd: FBNIC device structure
+ * @cmpl_data: Structure to store EEPROM response in
+ * @page: Refers to page number on page enabled QSFP modules
+ * @bank: Refers to a collection of pages
+ * @offset: Offset into QSFP EEPROM requested
+ * @length: Length of section of QSFP EEPROM to fetch
+ *
+ * Return: zero on success, negative value on failure
+ *
+ * Asks the firmware to provide a section of the QSFP EEPROM back in a
+ * message. The response will have an offset and size matching the values
+ * provided.
+ */
+int fbnic_fw_xmit_qsfp_read_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ u32 page, u32 bank, u32 offset, u32 length)
+{
+ struct fbnic_tlv_msg *msg;
+ int err = 0;
+
+ if (!length || length > TLV_MAX_DATA)
+ return -EINVAL;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_QSFP_READ_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_QSFP_BANK, bank);
+ if (err)
+ goto free_message;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_QSFP_PAGE, page);
+ if (err)
+ goto free_message;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_QSFP_OFFSET, offset);
+ if (err)
+ goto free_message;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_QSFP_LENGTH, length);
+ if (err)
+ goto free_message;
+
+ err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
+ if (err)
+ goto free_message;
+
+ return 0;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_qsfp_read_resp_index[] = {
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_QSFP_BANK),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_QSFP_PAGE),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_QSFP_OFFSET),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_QSFP_LENGTH),
+ FBNIC_TLV_ATTR_RAW_DATA(FBNIC_FW_QSFP_DATA),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_QSFP_ERROR),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_qsfp_read_resp(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ struct fbnic_tlv_msg *data_hdr;
+ u32 length, offset, page, bank;
+ u8 *data;
+ s32 err;
+
+ /* Verify we have a completion pointer to provide with data */
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd,
+ FBNIC_TLV_MSG_ID_QSFP_READ_RESP);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ bank = fta_get_uint(results, FBNIC_FW_QSFP_BANK);
+ if (bank != cmpl_data->u.qsfp.bank) {
+ dev_warn(fbd->dev, "bank not equal to bank requested: %d vs %d\n",
+ bank, cmpl_data->u.qsfp.bank);
+ err = -EINVAL;
+ goto msg_err;
+ }
+
+ page = fta_get_uint(results, FBNIC_FW_QSFP_PAGE);
+ if (page != cmpl_data->u.qsfp.page) {
+ dev_warn(fbd->dev, "page not equal to page requested: %d vs %d\n",
+ page, cmpl_data->u.qsfp.page);
+ err = -EINVAL;
+ goto msg_err;
+ }
+
+ offset = fta_get_uint(results, FBNIC_FW_QSFP_OFFSET);
+ length = fta_get_uint(results, FBNIC_FW_QSFP_LENGTH);
+
+ if (length != cmpl_data->u.qsfp.length ||
+ offset != cmpl_data->u.qsfp.offset) {
+ dev_warn(fbd->dev,
+ "offset/length not equal to size requested: %d/%d vs %d/%d\n",
+ offset, length,
+ cmpl_data->u.qsfp.offset, cmpl_data->u.qsfp.length);
+ err = -EINVAL;
+ goto msg_err;
+ }
+
+ err = fta_get_sint(results, FBNIC_FW_QSFP_ERROR);
+ if (err)
+ goto msg_err;
+
+ data_hdr = results[FBNIC_FW_QSFP_DATA];
+ if (!data_hdr) {
+ err = -ENODATA;
+ goto msg_err;
+ }
+
+ /* Copy data */
+ data = fbnic_tlv_attr_get_value_ptr(data_hdr);
+ memcpy(cmpl_data->u.qsfp.data, data, length);
+msg_err:
+ cmpl_data->result = err;
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return err;
+}
+
+/**
+ * fbnic_fw_xmit_tsene_read_msg - Create and transmit a sensor read request
+ * @fbd: FBNIC device structure
+ * @cmpl_data: Completion data structure to store sensor response
+ *
+ * Asks the firmware to provide an update with the latest sensor data.
+ * The response will contain temperature and voltage readings.
+ *
+ * Return: 0 on success, negative error value on failure
+ */
+int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data)
+{
+ struct fbnic_tlv_msg *msg;
+ int err;
+
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_TSENE_READ_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
+ if (err)
+ goto free_message;
+
+ return 0;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_tsene_read_resp_index[] = {
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_THERM),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_VOLT),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_ERROR),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_tsene_read_resp(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ s32 err_resp;
+ int err = 0;
+
+ /* Verify we have a completion pointer to provide with data */
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd,
+ FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ err_resp = fta_get_sint(results, FBNIC_FW_TSENE_ERROR);
+ if (err_resp)
+ goto msg_err;
+
+ if (!results[FBNIC_FW_TSENE_THERM] || !results[FBNIC_FW_TSENE_VOLT]) {
+ err = -EINVAL;
+ goto msg_err;
+ }
+
+ cmpl_data->u.tsene.millidegrees =
+ fta_get_sint(results, FBNIC_FW_TSENE_THERM);
+ cmpl_data->u.tsene.millivolts =
+ fta_get_sint(results, FBNIC_FW_TSENE_VOLT);
+
+msg_err:
+ cmpl_data->result = err_resp ? : err;
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_fw_log_req_index[] = {
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_LOG_MSEC),
+ FBNIC_TLV_ATTR_U64(FBNIC_FW_LOG_INDEX),
+ FBNIC_TLV_ATTR_STRING(FBNIC_FW_LOG_MSG, FBNIC_FW_LOG_MAX_SIZE),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_LOG_LENGTH),
+ FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_LOG_MSEC_ARRAY),
+ FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_LOG_INDEX_ARRAY),
+ FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_LOG_MSG_ARRAY),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_process_log_array(struct fbnic_tlv_msg **results,
+ u16 length, u16 arr_type_idx,
+ u16 attr_type_idx,
+ struct fbnic_tlv_msg **tlv_array_out)
+{
+ struct fbnic_tlv_msg *attr;
+ int attr_len;
+ int err;
+
+ if (!results[attr_type_idx])
+ return -EINVAL;
+
+ tlv_array_out[0] = results[attr_type_idx];
+
+ if (!length)
+ return 0;
+
+ if (!results[arr_type_idx])
+ return -EINVAL;
+
+ attr = results[arr_type_idx];
+ attr_len = le16_to_cpu(attr->hdr.len) / sizeof(u32) - 1;
+ err = fbnic_tlv_attr_parse_array(&attr[1], attr_len, &tlv_array_out[1],
+ fbnic_fw_log_req_index,
+ attr_type_idx,
+ length);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int fbnic_fw_parse_logs(struct fbnic_dev *fbd,
+ struct fbnic_tlv_msg **msec_tlv,
+ struct fbnic_tlv_msg **index_tlv,
+ struct fbnic_tlv_msg **log_tlv,
+ int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ char log[FBNIC_FW_LOG_MAX_SIZE];
+ ssize_t len;
+ u64 index;
+ u32 msec;
+ int err;
+
+ if (!msec_tlv[i] || !index_tlv[i] || !log_tlv[i]) {
+ dev_warn(fbd->dev, "Received log message with missing attributes!\n");
+ return -EINVAL;
+ }
+
+ index = fbnic_tlv_attr_get_signed(index_tlv[i], 0);
+ msec = fbnic_tlv_attr_get_signed(msec_tlv[i], 0);
+ len = fbnic_tlv_attr_get_string(log_tlv[i], log,
+ FBNIC_FW_LOG_MAX_SIZE);
+ if (len < 0)
+ return len;
+
+ err = fbnic_fw_log_write(fbd, index, msec, log);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int fbnic_fw_parse_log_req(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_tlv_msg *index_tlv[FBNIC_FW_MAX_LOG_HISTORY];
+ struct fbnic_tlv_msg *msec_tlv[FBNIC_FW_MAX_LOG_HISTORY];
+ struct fbnic_tlv_msg *log_tlv[FBNIC_FW_MAX_LOG_HISTORY];
+ struct fbnic_dev *fbd = opaque;
+ u16 length;
+ int err;
+
+ length = fta_get_uint(results, FBNIC_FW_LOG_LENGTH);
+ if (length >= FBNIC_FW_MAX_LOG_HISTORY)
+ return -E2BIG;
+
+ err = fbnic_fw_process_log_array(results, length,
+ FBNIC_FW_LOG_MSEC_ARRAY,
+ FBNIC_FW_LOG_MSEC, msec_tlv);
+ if (err)
+ return err;
+
+ err = fbnic_fw_process_log_array(results, length,
+ FBNIC_FW_LOG_INDEX_ARRAY,
+ FBNIC_FW_LOG_INDEX, index_tlv);
+ if (err)
+ return err;
+
+ err = fbnic_fw_process_log_array(results, length,
+ FBNIC_FW_LOG_MSG_ARRAY,
+ FBNIC_FW_LOG_MSG, log_tlv);
+ if (err)
+ return err;
+
+ err = fbnic_fw_parse_logs(fbd, msec_tlv, index_tlv, log_tlv,
+ length + 1);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int fbnic_fw_xmit_send_logs(struct fbnic_dev *fbd, bool enable,
+ bool send_log_history)
+{
+ struct fbnic_tlv_msg *msg;
+ int err;
+
+ if (fbd->fw_cap.running.mgmt.version < MIN_FW_VER_CODE_LOG) {
+ dev_warn(fbd->dev, "Firmware version is too old to support firmware logs!\n");
+ return -EOPNOTSUPP;
+ }
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_LOG_SEND_LOGS_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ if (enable) {
+ err = fbnic_tlv_attr_put_flag(msg, FBNIC_SEND_LOGS);
+ if (err)
+ goto free_message;
+
+ /* Report request for version 1 of logs */
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_SEND_LOGS_VERSION,
+ FBNIC_FW_LOG_VERSION);
+ if (err)
+ goto free_message;
+
+ if (send_log_history) {
+ err = fbnic_tlv_attr_put_flag(msg,
+ FBNIC_SEND_LOGS_HISTORY);
+ if (err)
+ goto free_message;
+ }
+ }
+
+ err = fbnic_mbx_map_tlv_msg(fbd, msg);
+ if (err)
+ goto free_message;
+
+ return 0;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
FBNIC_TLV_PARSER(FW_CAP_RESP, fbnic_fw_cap_resp_index,
fbnic_fw_parse_cap_resp),
@@ -658,6 +1563,29 @@ static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
fbnic_fw_parse_ownership_resp),
FBNIC_TLV_PARSER(HEARTBEAT_RESP, fbnic_heartbeat_resp_index,
fbnic_fw_parse_heartbeat_resp),
+ FBNIC_TLV_PARSER(COREDUMP_GET_INFO_RESP,
+ fbnic_coredump_info_resp_index,
+ fbnic_fw_parse_coredump_info_resp),
+ FBNIC_TLV_PARSER(COREDUMP_READ_RESP, fbnic_coredump_resp_index,
+ fbnic_fw_parse_coredump_resp),
+ FBNIC_TLV_PARSER(FW_START_UPGRADE_RESP,
+ fbnic_fw_start_upgrade_resp_index,
+ fbnic_fw_parse_fw_start_upgrade_resp),
+ FBNIC_TLV_PARSER(FW_WRITE_CHUNK_REQ,
+ fbnic_fw_write_chunk_req_index,
+ fbnic_fw_parse_fw_write_chunk_req),
+ FBNIC_TLV_PARSER(FW_FINISH_UPGRADE_REQ,
+ fbnic_fw_finish_upgrade_req_index,
+ fbnic_fw_parse_fw_finish_upgrade_req),
+ FBNIC_TLV_PARSER(QSFP_READ_RESP,
+ fbnic_qsfp_read_resp_index,
+ fbnic_fw_parse_qsfp_read_resp),
+ FBNIC_TLV_PARSER(TSENE_READ_RESP,
+ fbnic_tsene_read_resp_index,
+ fbnic_fw_parse_tsene_read_resp),
+ FBNIC_TLV_PARSER(LOG_MSG_REQ,
+ fbnic_fw_log_req_index,
+ fbnic_fw_parse_log_req),
FBNIC_TLV_MSG_ERROR
};
@@ -726,7 +1654,7 @@ next_page:
void fbnic_mbx_poll(struct fbnic_dev *fbd)
{
- fbnic_mbx_postinit(fbd);
+ fbnic_mbx_event(fbd);
fbnic_mbx_process_tx_msgs(fbd);
fbnic_mbx_process_rx_msgs(fbd);
@@ -734,60 +1662,222 @@ void fbnic_mbx_poll(struct fbnic_dev *fbd)
int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
{
- struct fbnic_fw_mbx *tx_mbx;
- int attempts = 50;
+ struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
+ unsigned long timeout = jiffies + 10 * HZ + 1;
+ int err, i;
- /* Immediate fail if BAR4 isn't there */
- if (!fbnic_fw_present(fbd))
- return -ENODEV;
+ do {
+ if (!time_is_after_jiffies(timeout))
+ return -ETIMEDOUT;
- tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
- while (!tx_mbx->ready && --attempts) {
/* Force the firmware to trigger an interrupt response to
* avoid the mailbox getting stuck closed if the interrupt
* is reset.
*/
- fbnic_mbx_init_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX);
+ fbnic_mbx_reset_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX);
- msleep(200);
+ /* Immediate fail if BAR4 went away */
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ msleep(20);
+ } while (!fbnic_mbx_event(fbd));
+
+ /* FW has shown signs of life. Enable DMA and start Tx/Rx */
+ for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
+ fbnic_mbx_init_desc_ring(fbd, i);
+
+ /* Request an update from the firmware. This should overwrite
+ * mgmt.version once we get the actual version from the firmware
+ * in the capabilities request message.
+ */
+ err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ);
+ if (err)
+ goto clean_mbx;
+
+ /* Poll until we get a current management firmware version, use "1"
+ * to indicate we entered the polling state waiting for a response
+ */
+ for (fbd->fw_cap.running.mgmt.version = 1;
+ fbd->fw_cap.running.mgmt.version < MIN_FW_VER_CODE;) {
+ if (!tx_mbx->ready)
+ err = -ENODEV;
+ if (err)
+ goto clean_mbx;
+ msleep(20);
fbnic_mbx_poll(fbd);
+
+ /* set err, but wait till mgmt.version check to report it */
+ if (!time_is_after_jiffies(timeout))
+ err = -ETIMEDOUT;
+ }
+
+ return 0;
+clean_mbx:
+ /* Cleanup Rx buffers and disable mailbox */
+ fbnic_mbx_clean(fbd);
+ return err;
+}
+
+static void __fbnic_fw_evict_cmpl(struct fbnic_fw_completion *cmpl_data)
+{
+ cmpl_data->result = -EPIPE;
+ complete(&cmpl_data->done);
+}
+
+static void fbnic_mbx_evict_all_cmpl(struct fbnic_dev *fbd)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
+ struct fbnic_fw_completion *cmpl_data = fbd->cmpl_data[i];
+
+ if (cmpl_data)
+ __fbnic_fw_evict_cmpl(cmpl_data);
}
- return attempts ? 0 : -ETIMEDOUT;
+ memset(fbd->cmpl_data, 0, sizeof(fbd->cmpl_data));
}
void fbnic_mbx_flush_tx(struct fbnic_dev *fbd)
{
+ unsigned long timeout = jiffies + 10 * HZ + 1;
struct fbnic_fw_mbx *tx_mbx;
- int attempts = 50;
- u8 count = 0;
-
- /* Nothing to do if there is no mailbox */
- if (!fbnic_fw_present(fbd))
- return;
+ u8 tail;
/* Record current Rx stats */
tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
- /* Nothing to do if mailbox never got to ready */
- if (!tx_mbx->ready)
- return;
+ spin_lock_irq(&fbd->fw_tx_lock);
+
+ /* Clear ready to prevent any further attempts to transmit */
+ tx_mbx->ready = false;
+
+ /* Read tail to determine the last tail state for the ring */
+ tail = tx_mbx->tail;
+
+ /* Flush any completions as we are no longer processing Rx */
+ fbnic_mbx_evict_all_cmpl(fbd);
+
+ spin_unlock_irq(&fbd->fw_tx_lock);
/* Give firmware time to process packet,
- * we will wait up to 10 seconds which is 50 waits of 200ms.
+ * we will wait up to 10 seconds which is 500 waits of 20ms.
*/
do {
u8 head = tx_mbx->head;
- if (head == tx_mbx->tail)
+ /* Tx ring is empty once head == tail */
+ if (head == tail)
break;
- msleep(200);
+ msleep(20);
fbnic_mbx_process_tx_msgs(fbd);
+ } while (time_is_after_jiffies(timeout));
+}
+
+int fbnic_fw_xmit_rpc_macda_sync(struct fbnic_dev *fbd)
+{
+ struct fbnic_tlv_msg *mac_array;
+ int i, addr_count = 0, err;
+ struct fbnic_tlv_msg *msg;
+ u32 rx_flags = 0;
+
+ /* Nothing to do if there is no FW to sync with */
+ if (!fbd->mbx[FBNIC_IPC_MBX_TX_IDX].ready)
+ return 0;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_RPC_MAC_SYNC_REQ);
+ if (!msg)
+ return -ENOMEM;
- count += (tx_mbx->head - head) % FBNIC_IPC_MBX_DESC_LEN;
- } while (count < FBNIC_IPC_MBX_DESC_LEN && --attempts);
+ mac_array = fbnic_tlv_attr_nest_start(msg,
+ FBNIC_FW_RPC_MAC_SYNC_UC_ARRAY);
+ if (!mac_array)
+ goto free_message_nospc;
+
+ /* Populate the unicast MAC addrs and capture PROMISC/ALLMULTI flags */
+ for (addr_count = 0, i = FBNIC_RPC_TCAM_MACDA_PROMISC_IDX;
+ i >= fbd->mac_addr_boundary; i--) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ if (mac_addr->state != FBNIC_TCAM_S_VALID)
+ continue;
+ if (test_bit(FBNIC_MAC_ADDR_T_ALLMULTI, mac_addr->act_tcam))
+ rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_ALLMULTI;
+ if (test_bit(FBNIC_MAC_ADDR_T_PROMISC, mac_addr->act_tcam))
+ rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_PROMISC;
+ if (!test_bit(FBNIC_MAC_ADDR_T_UNICAST, mac_addr->act_tcam))
+ continue;
+ if (addr_count == FW_RPC_MAC_SYNC_UC_ARRAY_SIZE) {
+ rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_PROMISC;
+ continue;
+ }
+
+ err = fbnic_tlv_attr_put_value(mac_array,
+ FBNIC_FW_RPC_MAC_SYNC_MAC_ADDR,
+ mac_addr->value.addr8,
+ ETH_ALEN);
+ if (err)
+ goto free_message;
+ addr_count++;
+ }
+
+ /* Close array */
+ fbnic_tlv_attr_nest_stop(msg);
+
+ mac_array = fbnic_tlv_attr_nest_start(msg,
+ FBNIC_FW_RPC_MAC_SYNC_MC_ARRAY);
+ if (!mac_array)
+ goto free_message_nospc;
+
+ /* Repeat for multicast addrs, record BROADCAST/ALLMULTI flags */
+ for (addr_count = 0, i = FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX;
+ i < fbd->mac_addr_boundary; i++) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ if (mac_addr->state != FBNIC_TCAM_S_VALID)
+ continue;
+ if (test_bit(FBNIC_MAC_ADDR_T_BROADCAST, mac_addr->act_tcam))
+ rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_BROADCAST;
+ if (test_bit(FBNIC_MAC_ADDR_T_ALLMULTI, mac_addr->act_tcam))
+ rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_ALLMULTI;
+ if (!test_bit(FBNIC_MAC_ADDR_T_MULTICAST, mac_addr->act_tcam))
+ continue;
+ if (addr_count == FW_RPC_MAC_SYNC_MC_ARRAY_SIZE) {
+ rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_ALLMULTI;
+ continue;
+ }
+
+ err = fbnic_tlv_attr_put_value(mac_array,
+ FBNIC_FW_RPC_MAC_SYNC_MAC_ADDR,
+ mac_addr->value.addr8,
+ ETH_ALEN);
+ if (err)
+ goto free_message;
+ addr_count++;
+ }
+
+ /* Close array */
+ fbnic_tlv_attr_nest_stop(msg);
+
+ /* Report flags at end of list */
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_RPC_MAC_SYNC_RX_FLAGS,
+ rx_flags);
+ if (err)
+ goto free_message;
+
+ /* Send message off to FW notifying it of current RPC config */
+ err = fbnic_mbx_map_tlv_msg(fbd, msg);
+ if (err)
+ goto free_message;
+ return 0;
+free_message_nospc:
+ err = -ENOSPC;
+free_message:
+ free_page((unsigned long)msg);
+ return err;
}
void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
@@ -802,3 +1892,29 @@ void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
fbnic_mk_full_fw_ver_str(mgmt->version, delim, mgmt->commit,
fw_version, str_sz);
}
+
+struct fbnic_fw_completion *__fbnic_fw_alloc_cmpl(u32 msg_type,
+ size_t priv_size)
+{
+ struct fbnic_fw_completion *cmpl;
+
+ cmpl = kzalloc(sizeof(*cmpl) + priv_size, GFP_KERNEL);
+ if (!cmpl)
+ return NULL;
+
+ cmpl->msg_type = msg_type;
+ init_completion(&cmpl->done);
+ kref_init(&cmpl->ref_count);
+
+ return cmpl;
+}
+
+struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type)
+{
+ return __fbnic_fw_alloc_cmpl(msg_type, 0);
+}
+
+void fbnic_fw_put_cmpl(struct fbnic_fw_completion *fw_cmpl)
+{
+ kref_put(&fw_cmpl->ref_count, fbnic_fw_release_cmpl_data);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
index 7cd8841920e4..1ecd777aaada 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
@@ -19,10 +19,23 @@ struct fbnic_fw_mbx {
};
// FW_VER_MAX_SIZE must match ETHTOOL_FWVERS_LEN
-#define FBNIC_FW_VER_MAX_SIZE 32
+#define FBNIC_FW_VER_MAX_SIZE 32
// Formatted version is in the format XX.YY.ZZ_RRR_COMMIT
#define FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE (FBNIC_FW_VER_MAX_SIZE - 13)
-#define FBNIC_FW_LOG_MAX_SIZE 256
+#define FBNIC_FW_LOG_VERSION 1
+#define FBNIC_FW_LOG_MAX_SIZE 256
+/*
+ * The max amount of logs which can fit in a single mailbox message. Firmware
+ * assumes each mailbox message is 4096B. The amount of messages supported is
+ * calculated as 4096 minus headers for message, arrays, and length minus the
+ * size of length divided by headers for each array plus the maximum LOG size,
+ * and the size of MSEC and INDEX. Put another way:
+ *
+ * MAX_LOG_HISTORY = ((4096 - TLV_HDR_SZ * 5 - LENGTH_SZ)
+ * / (FBNIC_FW_LOG_MAX_SIZE + TLV_HDR_SZ * 3 + MSEC_SZ
+ * + INDEX_SZ))
+ */
+#define FBNIC_FW_MAX_LOG_HISTORY 14
struct fbnic_fw_ver {
u32 version;
@@ -38,27 +51,83 @@ struct fbnic_fw_cap {
} stored;
u8 active_slot;
u8 bmc_mac_addr[4][ETH_ALEN];
- u8 bmc_present : 1;
- u8 all_multi : 1;
+ u8 bmc_present : 1;
+ u8 need_bmc_tcam_reinit : 1;
+ u8 need_bmc_macda_sync : 1;
+ u8 all_multi : 1;
u8 link_speed;
u8 link_fec;
+ u32 anti_rollback_version;
};
struct fbnic_fw_completion {
- struct {
- s32 millivolts;
- s32 millidegrees;
- } tsene;
+ u32 msg_type;
+ struct completion done;
+ struct kref ref_count;
+ int result;
+ union {
+ struct {
+ u32 size;
+ } coredump_info;
+ struct {
+ u32 size;
+ u16 stride;
+ u8 *data[];
+ } coredump;
+ struct {
+ u32 offset;
+ u32 length;
+ } fw_update;
+ struct {
+ u16 length;
+ u8 offset;
+ u8 page;
+ u8 bank;
+ u8 data[] __aligned(sizeof(u32)) __counted_by(length);
+ } qsfp;
+ struct {
+ s32 millivolts;
+ s32 millidegrees;
+ } tsene;
+ } u;
};
void fbnic_mbx_init(struct fbnic_dev *fbd);
void fbnic_mbx_clean(struct fbnic_dev *fbd);
+int fbnic_mbx_set_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data);
+void fbnic_mbx_clear_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data);
void fbnic_mbx_poll(struct fbnic_dev *fbd);
int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd);
void fbnic_mbx_flush_tx(struct fbnic_dev *fbd);
int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership);
int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll);
void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd);
+int fbnic_fw_xmit_coredump_info_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ bool force);
+int fbnic_fw_xmit_coredump_read_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ u32 offset, u32 length);
+int fbnic_fw_xmit_fw_start_upgrade(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ unsigned int id, unsigned int len);
+int fbnic_fw_xmit_fw_write_chunk(struct fbnic_dev *fbd,
+ const u8 *data, u32 offset, u16 length,
+ int cancel_error);
+int fbnic_fw_xmit_qsfp_read_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ u32 page, u32 bank, u32 offset, u32 length);
+int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data);
+int fbnic_fw_xmit_send_logs(struct fbnic_dev *fbd, bool enable,
+ bool send_log_history);
+int fbnic_fw_xmit_rpc_macda_sync(struct fbnic_dev *fbd);
+struct fbnic_fw_completion *__fbnic_fw_alloc_cmpl(u32 msg_type,
+ size_t priv_size);
+struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type);
+void fbnic_fw_put_cmpl(struct fbnic_fw_completion *cmpl_data);
#define fbnic_mk_full_fw_ver_str(_rev_id, _delim, _commit, _str, _str_sz) \
do { \
@@ -74,6 +143,15 @@ do { \
#define fbnic_mk_fw_ver_str(_rev_id, _str) \
fbnic_mk_full_fw_ver_str(_rev_id, "", "", _str, sizeof(_str))
+enum {
+ QSPI_SECTION_CMRT = 0,
+ QSPI_SECTION_CONTROL_FW = 1,
+ QSPI_SECTION_UCODE = 2,
+ QSPI_SECTION_OPTION_ROM = 3,
+ QSPI_SECTION_USER = 4,
+ QSPI_SECTION_INVALID,
+};
+
#define FW_HEARTBEAT_PERIOD (10 * HZ)
enum {
@@ -83,6 +161,24 @@ enum {
FBNIC_TLV_MSG_ID_OWNERSHIP_RESP = 0x13,
FBNIC_TLV_MSG_ID_HEARTBEAT_REQ = 0x14,
FBNIC_TLV_MSG_ID_HEARTBEAT_RESP = 0x15,
+ FBNIC_TLV_MSG_ID_COREDUMP_GET_INFO_REQ = 0x18,
+ FBNIC_TLV_MSG_ID_COREDUMP_GET_INFO_RESP = 0x19,
+ FBNIC_TLV_MSG_ID_COREDUMP_READ_REQ = 0x20,
+ FBNIC_TLV_MSG_ID_COREDUMP_READ_RESP = 0x21,
+ FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ = 0x22,
+ FBNIC_TLV_MSG_ID_FW_START_UPGRADE_RESP = 0x23,
+ FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ = 0x24,
+ FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_RESP = 0x25,
+ FBNIC_TLV_MSG_ID_FW_FINISH_UPGRADE_REQ = 0x28,
+ FBNIC_TLV_MSG_ID_FW_FINISH_UPGRADE_RESP = 0x29,
+ FBNIC_TLV_MSG_ID_QSFP_READ_REQ = 0x38,
+ FBNIC_TLV_MSG_ID_QSFP_READ_RESP = 0x39,
+ FBNIC_TLV_MSG_ID_TSENE_READ_REQ = 0x3C,
+ FBNIC_TLV_MSG_ID_TSENE_READ_RESP = 0x3D,
+ FBNIC_TLV_MSG_ID_LOG_SEND_LOGS_REQ = 0x43,
+ FBNIC_TLV_MSG_ID_LOG_MSG_REQ = 0x44,
+ FBNIC_TLV_MSG_ID_LOG_MSG_RESP = 0x45,
+ FBNIC_TLV_MSG_ID_RPC_MAC_SYNC_REQ = 0x46,
};
#define FBNIC_FW_CAP_RESP_VERSION_MAJOR CSR_GENMASK(31, 24)
@@ -108,14 +204,15 @@ enum {
FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR = 0x10,
FBNIC_FW_CAP_RESP_UEFI_VERSION = 0x11,
FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR = 0x12,
+ FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION = 0x15,
FBNIC_FW_CAP_RESP_MSG_MAX
};
enum {
- FBNIC_FW_LINK_SPEED_25R1 = 1,
- FBNIC_FW_LINK_SPEED_50R2 = 2,
- FBNIC_FW_LINK_SPEED_50R1 = 3,
- FBNIC_FW_LINK_SPEED_100R2 = 4,
+ FBNIC_FW_LINK_MODE_25CR = 1,
+ FBNIC_FW_LINK_MODE_50CR2 = 2,
+ FBNIC_FW_LINK_MODE_50CR = 3,
+ FBNIC_FW_LINK_MODE_100CR2 = 4,
};
enum {
@@ -125,7 +222,105 @@ enum {
};
enum {
+ FBNIC_FW_QSFP_BANK = 0x0,
+ FBNIC_FW_QSFP_PAGE = 0x1,
+ FBNIC_FW_QSFP_OFFSET = 0x2,
+ FBNIC_FW_QSFP_LENGTH = 0x3,
+ FBNIC_FW_QSFP_ERROR = 0x4,
+ FBNIC_FW_QSFP_DATA = 0x5,
+ FBNIC_FW_QSFP_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_TSENE_THERM = 0x0,
+ FBNIC_FW_TSENE_VOLT = 0x1,
+ FBNIC_FW_TSENE_ERROR = 0x2,
+ FBNIC_FW_TSENE_MSG_MAX
+};
+
+enum {
FBNIC_FW_OWNERSHIP_FLAG = 0x0,
+ FBNIC_FW_OWNERSHIP_TIME = 0x1,
FBNIC_FW_OWNERSHIP_MSG_MAX
};
+
+enum {
+ FBNIC_FW_HEARTBEAT_UPTIME = 0x0,
+ FBNIC_FW_HEARTBEAT_NUMBER_OF_MESSAGES = 0x1,
+ FBNIC_FW_HEARTBEAT_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_COREDUMP_REQ_INFO_CREATE = 0x0,
+ FBNIC_FW_COREDUMP_REQ_INFO_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_COREDUMP_INFO_AVAILABLE = 0x0,
+ FBNIC_FW_COREDUMP_INFO_SIZE = 0x1,
+ FBNIC_FW_COREDUMP_INFO_ERROR = 0x2,
+ FBNIC_FW_COREDUMP_INFO_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_COREDUMP_READ_OFFSET = 0x0,
+ FBNIC_FW_COREDUMP_READ_LENGTH = 0x1,
+ FBNIC_FW_COREDUMP_READ_DATA = 0x2,
+ FBNIC_FW_COREDUMP_READ_ERROR = 0x3,
+ FBNIC_FW_COREDUMP_READ_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_START_UPGRADE_ERROR = 0x0,
+ FBNIC_FW_START_UPGRADE_SECTION = 0x1,
+ FBNIC_FW_START_UPGRADE_IMAGE_LENGTH = 0x2,
+ FBNIC_FW_START_UPGRADE_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_WRITE_CHUNK_OFFSET = 0x0,
+ FBNIC_FW_WRITE_CHUNK_LENGTH = 0x1,
+ FBNIC_FW_WRITE_CHUNK_DATA = 0x2,
+ FBNIC_FW_WRITE_CHUNK_ERROR = 0x3,
+ FBNIC_FW_WRITE_CHUNK_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_FINISH_UPGRADE_ERROR = 0x0,
+ FBNIC_FW_FINISH_UPGRADE_MSG_MAX
+};
+
+enum {
+ FBNIC_SEND_LOGS = 0x0,
+ FBNIC_SEND_LOGS_VERSION = 0x1,
+ FBNIC_SEND_LOGS_HISTORY = 0x2,
+ FBNIC_SEND_LOGS_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_LOG_MSEC = 0x0,
+ FBNIC_FW_LOG_INDEX = 0x1,
+ FBNIC_FW_LOG_MSG = 0x2,
+ FBNIC_FW_LOG_LENGTH = 0x3,
+ FBNIC_FW_LOG_MSEC_ARRAY = 0x4,
+ FBNIC_FW_LOG_INDEX_ARRAY = 0x5,
+ FBNIC_FW_LOG_MSG_ARRAY = 0x6,
+ FBNIC_FW_LOG_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_RPC_MAC_SYNC_RX_FLAGS = 0x0,
+ FBNIC_FW_RPC_MAC_SYNC_UC_ARRAY = 0x1,
+ FBNIC_FW_RPC_MAC_SYNC_MC_ARRAY = 0x2,
+ FBNIC_FW_RPC_MAC_SYNC_MAC_ADDR = 0x3,
+ FBNIC_FW_RPC_MAC_SYNC_MSG_MAX
+};
+
+#define FW_RPC_MAC_SYNC_RX_FLAGS_PROMISC 1
+#define FW_RPC_MAC_SYNC_RX_FLAGS_ALLMULTI 2
+#define FW_RPC_MAC_SYNC_RX_FLAGS_BROADCAST 4
+
+#define FW_RPC_MAC_SYNC_UC_ARRAY_SIZE 8
+#define FW_RPC_MAC_SYNC_MC_ARRAY_SIZE 8
+
#endif /* _FBNIC_FW_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c
new file mode 100644
index 000000000000..85a883dba385
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
+
+#include "fbnic.h"
+#include "fbnic_fw.h"
+#include "fbnic_fw_log.h"
+
+void fbnic_fw_log_enable(struct fbnic_dev *fbd, bool send_hist)
+{
+ int err;
+
+ if (!fbnic_fw_log_ready(fbd))
+ return;
+
+ if (fbd->fw_cap.running.mgmt.version < MIN_FW_VER_CODE_HIST)
+ send_hist = false;
+
+ err = fbnic_fw_xmit_send_logs(fbd, true, send_hist);
+ if (err && err != -EOPNOTSUPP)
+ dev_warn(fbd->dev, "Unable to enable firmware logs: %d\n", err);
+}
+
+void fbnic_fw_log_disable(struct fbnic_dev *fbd)
+{
+ int err;
+
+ err = fbnic_fw_xmit_send_logs(fbd, false, false);
+ if (err && err != -EOPNOTSUPP)
+ dev_warn(fbd->dev, "Unable to disable firmware logs: %d\n",
+ err);
+}
+
+int fbnic_fw_log_init(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_log *log = &fbd->fw_log;
+ void *data;
+
+ if (WARN_ON_ONCE(fbnic_fw_log_ready(fbd)))
+ return -EEXIST;
+
+ data = vmalloc(FBNIC_FW_LOG_SIZE);
+ if (!data)
+ return -ENOMEM;
+
+ spin_lock_init(&fbd->fw_log.lock);
+ INIT_LIST_HEAD(&log->entries);
+ log->size = FBNIC_FW_LOG_SIZE;
+ log->data_start = data;
+ log->data_end = data + FBNIC_FW_LOG_SIZE;
+
+ fbnic_fw_log_enable(fbd, true);
+
+ return 0;
+}
+
+void fbnic_fw_log_free(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_log *log = &fbd->fw_log;
+
+ if (!fbnic_fw_log_ready(fbd))
+ return;
+
+ fbnic_fw_log_disable(fbd);
+ INIT_LIST_HEAD(&log->entries);
+ log->size = 0;
+ vfree(log->data_start);
+ log->data_start = NULL;
+ log->data_end = NULL;
+}
+
+int fbnic_fw_log_write(struct fbnic_dev *fbd, u64 index, u32 timestamp,
+ const char *msg)
+{
+ struct fbnic_fw_log_entry *entry, *head, *tail, *next;
+ struct fbnic_fw_log *log = &fbd->fw_log;
+ size_t msg_len = strlen(msg) + 1;
+ unsigned long flags;
+ void *entry_end;
+
+ if (!fbnic_fw_log_ready(fbd)) {
+ dev_err(fbd->dev, "Firmware sent log entry without being requested!\n");
+ return -ENOSPC;
+ }
+
+ spin_lock_irqsave(&log->lock, flags);
+
+ if (list_empty(&log->entries)) {
+ entry = log->data_start;
+ } else {
+ head = list_first_entry(&log->entries, typeof(*head), list);
+ entry_end = head->msg + head->len + 1;
+ entry = PTR_ALIGN(entry_end, 8);
+ }
+
+ entry_end = entry->msg + msg_len + 1;
+
+ /* We've reached the end of the buffer, wrap around */
+ if (entry_end > log->data_end) {
+ entry = log->data_start;
+ entry_end = entry->msg + msg_len + 1;
+ }
+
+ /* Make room for entry by removing from tail. */
+ list_for_each_entry_safe_reverse(tail, next, &log->entries, list) {
+ if (entry <= tail && entry_end > (void *)tail)
+ list_del(&tail->list);
+ else
+ break;
+ }
+
+ entry->index = index;
+ entry->timestamp = timestamp;
+ entry->len = msg_len;
+ strscpy(entry->msg, msg, entry->len);
+ list_add(&entry->list, &log->entries);
+
+ spin_unlock_irqrestore(&log->lock, flags);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.h
new file mode 100644
index 000000000000..50ec79003108
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_FW_LOG_H_
+#define _FBNIC_FW_LOG_H_
+
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+/* A 512K log buffer was chosen fairly arbitrarily */
+#define FBNIC_FW_LOG_SIZE (512 * 1024) /* bytes */
+
+/* Firmware log output is prepended with log index followed by a timestamp.
+ * The timestamp is similar to Zephyr's format DD:HH:MM:SS.MMM
+ */
+#define FBNIC_FW_LOG_FMT "[%5lld] [%02ld:%02ld:%02ld:%02ld.%03ld] %s\n"
+
+struct fbnic_dev;
+
+struct fbnic_fw_log_entry {
+ struct list_head list;
+ u64 index;
+ u32 timestamp;
+ u16 len;
+ char msg[] __counted_by(len);
+};
+
+struct fbnic_fw_log {
+ void *data_start;
+ void *data_end;
+ size_t size;
+ struct list_head entries;
+ /* Spin lock for accessing or modifying entries */
+ spinlock_t lock;
+};
+
+#define fbnic_fw_log_ready(_fbd) (!!(_fbd)->fw_log.data_start)
+
+void fbnic_fw_log_enable(struct fbnic_dev *fbd, bool send_hist);
+void fbnic_fw_log_disable(struct fbnic_dev *fbd);
+int fbnic_fw_log_init(struct fbnic_dev *fbd);
+void fbnic_fw_log_free(struct fbnic_dev *fbd);
+int fbnic_fw_log_write(struct fbnic_dev *fbd, u64 index, u32 timestamp,
+ const char *msg);
+#endif /* _FBNIC_FW_LOG_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c
index a0acc7606aa1..8b9b2076beec 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c
@@ -1,5 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/rtnetlink.h>
+
#include "fbnic.h"
+static void fbnic_hw_stat_rst32(struct fbnic_dev *fbd, u32 reg,
+ struct fbnic_stat_counter *stat)
+{
+ /* We do not touch the "value" field here.
+ * It gets zeroed out on fbd structure allocation.
+ * After that we want it to grow continuously
+ * through device resets and power state changes.
+ */
+ stat->u.old_reg_value_32 = rd32(fbd, reg);
+}
+
+static void fbnic_hw_stat_rd32(struct fbnic_dev *fbd, u32 reg,
+ struct fbnic_stat_counter *stat)
+{
+ u32 new_reg_value;
+
+ new_reg_value = rd32(fbd, reg);
+ stat->value += new_reg_value - stat->u.old_reg_value_32;
+ stat->u.old_reg_value_32 = new_reg_value;
+}
+
u64 fbnic_stat_rd64(struct fbnic_dev *fbd, u32 reg, u32 offset)
{
u32 prev_upper, upper, lower, diff;
@@ -25,3 +51,551 @@ u64 fbnic_stat_rd64(struct fbnic_dev *fbd, u32 reg, u32 offset)
*/
return ((u64)upper << 32);
}
+
+static void fbnic_hw_stat_rst64(struct fbnic_dev *fbd, u32 reg, s32 offset,
+ struct fbnic_stat_counter *stat)
+{
+ /* Record initial counter values and compute deltas from there to ensure
+ * stats start at 0 after reboot/reset. This avoids exposing absolute
+ * hardware counter values to userspace.
+ */
+ stat->u.old_reg_value_64 = fbnic_stat_rd64(fbd, reg, offset);
+}
+
+static void fbnic_hw_stat_rd64(struct fbnic_dev *fbd, u32 reg, s32 offset,
+ struct fbnic_stat_counter *stat)
+{
+ u64 new_reg_value;
+
+ new_reg_value = fbnic_stat_rd64(fbd, reg, offset);
+ stat->value += new_reg_value - stat->u.old_reg_value_64;
+ stat->u.old_reg_value_64 = new_reg_value;
+}
+
+static void fbnic_reset_tmi_stats(struct fbnic_dev *fbd,
+ struct fbnic_tmi_stats *tmi)
+{
+ fbnic_hw_stat_rst32(fbd, FBNIC_TMI_DROP_PKTS, &tmi->drop.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_TMI_DROP_BYTE_L, 1, &tmi->drop.bytes);
+
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_TMI_ILLEGAL_PTP_REQS,
+ &tmi->ptp_illegal_req);
+ fbnic_hw_stat_rst32(fbd, FBNIC_TMI_GOOD_PTP_TS, &tmi->ptp_good_ts);
+ fbnic_hw_stat_rst32(fbd, FBNIC_TMI_BAD_PTP_TS, &tmi->ptp_bad_ts);
+}
+
+static void fbnic_get_tmi_stats32(struct fbnic_dev *fbd,
+ struct fbnic_tmi_stats *tmi)
+{
+ fbnic_hw_stat_rd32(fbd, FBNIC_TMI_DROP_PKTS, &tmi->drop.frames);
+
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_TMI_ILLEGAL_PTP_REQS,
+ &tmi->ptp_illegal_req);
+ fbnic_hw_stat_rd32(fbd, FBNIC_TMI_GOOD_PTP_TS, &tmi->ptp_good_ts);
+ fbnic_hw_stat_rd32(fbd, FBNIC_TMI_BAD_PTP_TS, &tmi->ptp_bad_ts);
+}
+
+static void fbnic_get_tmi_stats(struct fbnic_dev *fbd,
+ struct fbnic_tmi_stats *tmi)
+{
+ fbnic_hw_stat_rd64(fbd, FBNIC_TMI_DROP_BYTE_L, 1, &tmi->drop.bytes);
+}
+
+static void fbnic_reset_tti_stats(struct fbnic_dev *fbd,
+ struct fbnic_tti_stats *tti)
+{
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_TCE_TTI_CM_DROP_PKTS,
+ &tti->cm_drop.frames);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_TCE_TTI_CM_DROP_BYTE_L,
+ 1,
+ &tti->cm_drop.bytes);
+
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_TCE_TTI_FRAME_DROP_PKTS,
+ &tti->frame_drop.frames);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_TCE_TTI_FRAME_DROP_BYTE_L,
+ 1,
+ &tti->frame_drop.bytes);
+
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_TCE_TBI_DROP_PKTS,
+ &tti->tbi_drop.frames);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_TCE_TBI_DROP_BYTE_L,
+ 1,
+ &tti->tbi_drop.bytes);
+}
+
+static void fbnic_get_tti_stats32(struct fbnic_dev *fbd,
+ struct fbnic_tti_stats *tti)
+{
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_TCE_TTI_CM_DROP_PKTS,
+ &tti->cm_drop.frames);
+
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_TCE_TTI_FRAME_DROP_PKTS,
+ &tti->frame_drop.frames);
+
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_TCE_TBI_DROP_PKTS,
+ &tti->tbi_drop.frames);
+}
+
+static void fbnic_get_tti_stats(struct fbnic_dev *fbd,
+ struct fbnic_tti_stats *tti)
+{
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_TCE_TTI_CM_DROP_BYTE_L,
+ 1,
+ &tti->cm_drop.bytes);
+
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_TCE_TTI_FRAME_DROP_BYTE_L,
+ 1,
+ &tti->frame_drop.bytes);
+
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_TCE_TBI_DROP_BYTE_L,
+ 1,
+ &tti->tbi_drop.bytes);
+}
+
+static void fbnic_reset_rpc_stats(struct fbnic_dev *fbd,
+ struct fbnic_rpc_stats *rpc)
+{
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_RPC_CNTR_UNKN_ETYPE,
+ &rpc->unkn_etype);
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_RPC_CNTR_UNKN_EXT_HDR,
+ &rpc->unkn_ext_hdr);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RPC_CNTR_IPV4_FRAG, &rpc->ipv4_frag);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RPC_CNTR_IPV6_FRAG, &rpc->ipv6_frag);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RPC_CNTR_IPV4_ESP, &rpc->ipv4_esp);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RPC_CNTR_IPV6_ESP, &rpc->ipv6_esp);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RPC_CNTR_TCP_OPT_ERR, &rpc->tcp_opt_err);
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_RPC_CNTR_OUT_OF_HDR_ERR,
+ &rpc->out_of_hdr_err);
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_RPC_CNTR_OVR_SIZE_ERR,
+ &rpc->ovr_size_err);
+}
+
+static void fbnic_get_rpc_stats32(struct fbnic_dev *fbd,
+ struct fbnic_rpc_stats *rpc)
+{
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_RPC_CNTR_UNKN_ETYPE,
+ &rpc->unkn_etype);
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_RPC_CNTR_UNKN_EXT_HDR,
+ &rpc->unkn_ext_hdr);
+
+ fbnic_hw_stat_rd32(fbd, FBNIC_RPC_CNTR_IPV4_FRAG, &rpc->ipv4_frag);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RPC_CNTR_IPV6_FRAG, &rpc->ipv6_frag);
+
+ fbnic_hw_stat_rd32(fbd, FBNIC_RPC_CNTR_IPV4_ESP, &rpc->ipv4_esp);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RPC_CNTR_IPV6_ESP, &rpc->ipv6_esp);
+
+ fbnic_hw_stat_rd32(fbd, FBNIC_RPC_CNTR_TCP_OPT_ERR, &rpc->tcp_opt_err);
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_RPC_CNTR_OUT_OF_HDR_ERR,
+ &rpc->out_of_hdr_err);
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_RPC_CNTR_OVR_SIZE_ERR,
+ &rpc->ovr_size_err);
+}
+
+static void fbnic_reset_rxb_fifo_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_fifo_stats *fifo)
+{
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_DROP_FRMS_STS(i),
+ &fifo->drop.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_RXB_DROP_BYTES_STS_L(i), 1,
+ &fifo->drop.bytes);
+
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_TRUN_FRMS_STS(i),
+ &fifo->trunc.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_RXB_TRUN_BYTES_STS_L(i), 1,
+ &fifo->trunc.bytes);
+
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_TRANS_DROP_STS(i),
+ &fifo->trans_drop);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_TRANS_ECN_STS(i),
+ &fifo->trans_ecn);
+
+ fifo->level.u.old_reg_value_32 = 0;
+}
+
+static void fbnic_reset_rxb_enq_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_enqueue_stats *enq)
+{
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_DRBO_FRM_CNT_SRC(i),
+ &enq->drbo.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_RXB_DRBO_BYTE_CNT_SRC_L(i), 4,
+ &enq->drbo.bytes);
+
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_INTEGRITY_ERR(i),
+ &enq->integrity_err);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_MAC_ERR(i),
+ &enq->mac_err);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_PARSER_ERR(i),
+ &enq->parser_err);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_FRM_ERR(i),
+ &enq->frm_err);
+}
+
+static void fbnic_reset_rxb_deq_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_dequeue_stats *deq)
+{
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_INTF_FRM_CNT_DST(i),
+ &deq->intf.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_RXB_INTF_BYTE_CNT_DST_L(i), 4,
+ &deq->intf.bytes);
+
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_PBUF_FRM_CNT_DST(i),
+ &deq->pbuf.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_RXB_PBUF_BYTE_CNT_DST_L(i), 4,
+ &deq->pbuf.bytes);
+}
+
+static void fbnic_reset_rxb_stats(struct fbnic_dev *fbd,
+ struct fbnic_rxb_stats *rxb)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++)
+ fbnic_reset_rxb_fifo_stats(fbd, i, &rxb->fifo[i]);
+
+ for (i = 0; i < FBNIC_RXB_INTF_INDICES; i++) {
+ fbnic_reset_rxb_enq_stats(fbd, i, &rxb->enq[i]);
+ fbnic_reset_rxb_deq_stats(fbd, i, &rxb->deq[i]);
+ }
+}
+
+static void fbnic_get_rxb_fifo_stats32(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_fifo_stats *fifo)
+{
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_DROP_FRMS_STS(i),
+ &fifo->drop.frames);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_TRUN_FRMS_STS(i),
+ &fifo->trunc.frames);
+
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_TRANS_DROP_STS(i),
+ &fifo->trans_drop);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_TRANS_ECN_STS(i),
+ &fifo->trans_ecn);
+
+ fifo->level.value = rd32(fbd, FBNIC_RXB_PBUF_FIFO_LEVEL(i));
+}
+
+static void fbnic_get_rxb_fifo_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_fifo_stats *fifo)
+{
+ fbnic_hw_stat_rd64(fbd, FBNIC_RXB_DROP_BYTES_STS_L(i), 1,
+ &fifo->drop.bytes);
+ fbnic_hw_stat_rd64(fbd, FBNIC_RXB_TRUN_BYTES_STS_L(i), 1,
+ &fifo->trunc.bytes);
+
+ fbnic_get_rxb_fifo_stats32(fbd, i, fifo);
+}
+
+static void fbnic_get_rxb_enq_stats32(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_enqueue_stats *enq)
+{
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_DRBO_FRM_CNT_SRC(i),
+ &enq->drbo.frames);
+
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_INTEGRITY_ERR(i),
+ &enq->integrity_err);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_MAC_ERR(i),
+ &enq->mac_err);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_PARSER_ERR(i),
+ &enq->parser_err);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_FRM_ERR(i),
+ &enq->frm_err);
+}
+
+static void fbnic_get_rxb_enq_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_enqueue_stats *enq)
+{
+ fbnic_hw_stat_rd64(fbd, FBNIC_RXB_DRBO_BYTE_CNT_SRC_L(i), 4,
+ &enq->drbo.bytes);
+
+ fbnic_get_rxb_enq_stats32(fbd, i, enq);
+}
+
+static void fbnic_get_rxb_deq_stats32(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_dequeue_stats *deq)
+{
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_INTF_FRM_CNT_DST(i),
+ &deq->intf.frames);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_PBUF_FRM_CNT_DST(i),
+ &deq->pbuf.frames);
+}
+
+static void fbnic_get_rxb_deq_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_dequeue_stats *deq)
+{
+ fbnic_hw_stat_rd64(fbd, FBNIC_RXB_INTF_BYTE_CNT_DST_L(i), 4,
+ &deq->intf.bytes);
+ fbnic_hw_stat_rd64(fbd, FBNIC_RXB_PBUF_BYTE_CNT_DST_L(i), 4,
+ &deq->pbuf.bytes);
+
+ fbnic_get_rxb_deq_stats32(fbd, i, deq);
+}
+
+static void fbnic_get_rxb_stats32(struct fbnic_dev *fbd,
+ struct fbnic_rxb_stats *rxb)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++)
+ fbnic_get_rxb_fifo_stats32(fbd, i, &rxb->fifo[i]);
+
+ for (i = 0; i < FBNIC_RXB_INTF_INDICES; i++) {
+ fbnic_get_rxb_enq_stats32(fbd, i, &rxb->enq[i]);
+ fbnic_get_rxb_deq_stats32(fbd, i, &rxb->deq[i]);
+ }
+}
+
+static void fbnic_get_rxb_stats(struct fbnic_dev *fbd,
+ struct fbnic_rxb_stats *rxb)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++)
+ fbnic_get_rxb_fifo_stats(fbd, i, &rxb->fifo[i]);
+
+ for (i = 0; i < FBNIC_RXB_INTF_INDICES; i++) {
+ fbnic_get_rxb_enq_stats(fbd, i, &rxb->enq[i]);
+ fbnic_get_rxb_deq_stats(fbd, i, &rxb->deq[i]);
+ }
+}
+
+static void fbnic_reset_hw_rxq_stats(struct fbnic_dev *fbd,
+ struct fbnic_hw_q_stats *hw_q)
+{
+ int i;
+
+ for (i = 0; i < fbd->max_num_queues; i++, hw_q++) {
+ u32 base = FBNIC_QUEUE(i);
+
+ fbnic_hw_stat_rst32(fbd,
+ base + FBNIC_QUEUE_RDE_PKT_ERR_CNT,
+ &hw_q->rde_pkt_err);
+ fbnic_hw_stat_rst32(fbd,
+ base + FBNIC_QUEUE_RDE_CQ_DROP_CNT,
+ &hw_q->rde_pkt_cq_drop);
+ fbnic_hw_stat_rst32(fbd,
+ base + FBNIC_QUEUE_RDE_BDQ_DROP_CNT,
+ &hw_q->rde_pkt_bdq_drop);
+ }
+}
+
+static void fbnic_get_hw_rxq_stats32(struct fbnic_dev *fbd,
+ struct fbnic_hw_q_stats *hw_q)
+{
+ int i;
+
+ for (i = 0; i < fbd->max_num_queues; i++, hw_q++) {
+ u32 base = FBNIC_QUEUE(i);
+
+ fbnic_hw_stat_rd32(fbd,
+ base + FBNIC_QUEUE_RDE_PKT_ERR_CNT,
+ &hw_q->rde_pkt_err);
+ fbnic_hw_stat_rd32(fbd,
+ base + FBNIC_QUEUE_RDE_CQ_DROP_CNT,
+ &hw_q->rde_pkt_cq_drop);
+ fbnic_hw_stat_rd32(fbd,
+ base + FBNIC_QUEUE_RDE_BDQ_DROP_CNT,
+ &hw_q->rde_pkt_bdq_drop);
+ }
+}
+
+void fbnic_get_hw_q_stats(struct fbnic_dev *fbd,
+ struct fbnic_hw_q_stats *hw_q)
+{
+ spin_lock(&fbd->hw_stats.lock);
+ fbnic_get_hw_rxq_stats32(fbd, hw_q);
+ spin_unlock(&fbd->hw_stats.lock);
+}
+
+static void fbnic_reset_pcie_stats_asic(struct fbnic_dev *fbd,
+ struct fbnic_pcie_stats *pcie)
+{
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_PUL_USER_OB_RD_TLP_CNT_31_0,
+ 1,
+ &pcie->ob_rd_tlp);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_PUL_USER_OB_RD_DWORD_CNT_31_0,
+ 1,
+ &pcie->ob_rd_dword);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_PUL_USER_OB_CPL_TLP_CNT_31_0,
+ 1,
+ &pcie->ob_cpl_tlp);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_PUL_USER_OB_CPL_DWORD_CNT_31_0,
+ 1,
+ &pcie->ob_cpl_dword);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_PUL_USER_OB_WR_TLP_CNT_31_0,
+ 1,
+ &pcie->ob_wr_tlp);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_PUL_USER_OB_WR_DWORD_CNT_31_0,
+ 1,
+ &pcie->ob_wr_dword);
+
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_31_0,
+ 1,
+ &pcie->ob_rd_no_tag);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_31_0,
+ 1,
+ &pcie->ob_rd_no_cpl_cred);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_31_0,
+ 1,
+ &pcie->ob_rd_no_np_cred);
+}
+
+static void fbnic_get_pcie_stats_asic64(struct fbnic_dev *fbd,
+ struct fbnic_pcie_stats *pcie)
+{
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_PUL_USER_OB_RD_TLP_CNT_31_0,
+ 1,
+ &pcie->ob_rd_tlp);
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_PUL_USER_OB_RD_DWORD_CNT_31_0,
+ 1,
+ &pcie->ob_rd_dword);
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_PUL_USER_OB_WR_TLP_CNT_31_0,
+ 1,
+ &pcie->ob_wr_tlp);
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_PUL_USER_OB_WR_DWORD_CNT_31_0,
+ 1,
+ &pcie->ob_wr_dword);
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_PUL_USER_OB_CPL_TLP_CNT_31_0,
+ 1,
+ &pcie->ob_cpl_tlp);
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_PUL_USER_OB_CPL_DWORD_CNT_31_0,
+ 1,
+ &pcie->ob_cpl_dword);
+
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_31_0,
+ 1,
+ &pcie->ob_rd_no_tag);
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_31_0,
+ 1,
+ &pcie->ob_rd_no_cpl_cred);
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_31_0,
+ 1,
+ &pcie->ob_rd_no_np_cred);
+}
+
+static void fbnic_reset_phy_stats(struct fbnic_dev *fbd,
+ struct fbnic_phy_stats *phy_stats)
+{
+ const struct fbnic_mac *mac = fbd->mac;
+
+ mac->get_fec_stats(fbd, true, &phy_stats->fec);
+ mac->get_pcs_stats(fbd, true, &phy_stats->pcs);
+}
+
+static void fbnic_get_phy_stats32(struct fbnic_dev *fbd,
+ struct fbnic_phy_stats *phy_stats)
+{
+ const struct fbnic_mac *mac = fbd->mac;
+
+ mac->get_fec_stats(fbd, false, &phy_stats->fec);
+ mac->get_pcs_stats(fbd, false, &phy_stats->pcs);
+}
+
+static void fbnic_reset_hw_mac_stats(struct fbnic_dev *fbd,
+ struct fbnic_mac_stats *mac_stats)
+{
+ const struct fbnic_mac *mac = fbd->mac;
+
+ mac->get_eth_mac_stats(fbd, true, &mac_stats->eth_mac);
+ mac->get_pause_stats(fbd, true, &mac_stats->pause);
+ mac->get_eth_ctrl_stats(fbd, true, &mac_stats->eth_ctrl);
+ mac->get_rmon_stats(fbd, true, &mac_stats->rmon);
+}
+
+void fbnic_reset_hw_stats(struct fbnic_dev *fbd)
+{
+ spin_lock(&fbd->hw_stats.lock);
+ fbnic_reset_phy_stats(fbd, &fbd->hw_stats.phy);
+ fbnic_reset_tmi_stats(fbd, &fbd->hw_stats.tmi);
+ fbnic_reset_tti_stats(fbd, &fbd->hw_stats.tti);
+ fbnic_reset_rpc_stats(fbd, &fbd->hw_stats.rpc);
+ fbnic_reset_rxb_stats(fbd, &fbd->hw_stats.rxb);
+ fbnic_reset_hw_rxq_stats(fbd, fbd->hw_stats.hw_q);
+ fbnic_reset_pcie_stats_asic(fbd, &fbd->hw_stats.pcie);
+ spin_unlock(&fbd->hw_stats.lock);
+
+ /* Once registered, the only other access to MAC stats is via the
+ * ethtool API which is protected by the rtnl_lock. The call to
+ * fbnic_reset_hw_stats() during PCI recovery is also protected
+ * by the rtnl_lock hence, we don't need the spinlock to access
+ * the MAC stats.
+ */
+ if (fbd->netdev)
+ ASSERT_RTNL();
+ fbnic_reset_hw_mac_stats(fbd, &fbd->hw_stats.mac);
+}
+
+void fbnic_init_hw_stats(struct fbnic_dev *fbd)
+{
+ spin_lock_init(&fbd->hw_stats.lock);
+
+ fbnic_reset_hw_stats(fbd);
+}
+
+static void __fbnic_get_hw_stats32(struct fbnic_dev *fbd)
+{
+ fbnic_get_phy_stats32(fbd, &fbd->hw_stats.phy);
+ fbnic_get_tmi_stats32(fbd, &fbd->hw_stats.tmi);
+ fbnic_get_tti_stats32(fbd, &fbd->hw_stats.tti);
+ fbnic_get_rpc_stats32(fbd, &fbd->hw_stats.rpc);
+ fbnic_get_rxb_stats32(fbd, &fbd->hw_stats.rxb);
+ fbnic_get_hw_rxq_stats32(fbd, fbd->hw_stats.hw_q);
+}
+
+void fbnic_get_hw_stats32(struct fbnic_dev *fbd)
+{
+ spin_lock(&fbd->hw_stats.lock);
+ __fbnic_get_hw_stats32(fbd);
+ spin_unlock(&fbd->hw_stats.lock);
+}
+
+void fbnic_get_hw_stats(struct fbnic_dev *fbd)
+{
+ spin_lock(&fbd->hw_stats.lock);
+ __fbnic_get_hw_stats32(fbd);
+
+ fbnic_get_tmi_stats(fbd, &fbd->hw_stats.tmi);
+ fbnic_get_tti_stats(fbd, &fbd->hw_stats.tti);
+ fbnic_get_rxb_stats(fbd, &fbd->hw_stats.rxb);
+ fbnic_get_pcie_stats_asic64(fbd, &fbd->hw_stats.pcie);
+ spin_unlock(&fbd->hw_stats.lock);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
index 30348904b510..aa3f429a9aed 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
@@ -1,4 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_HW_STATS_H_
+#define _FBNIC_HW_STATS_H_
+
#include <linux/ethtool.h>
+#include <linux/spinlock.h>
#include "fbnic_csr.h"
@@ -11,6 +18,44 @@ struct fbnic_stat_counter {
bool reported;
};
+struct fbnic_hw_stat {
+ struct fbnic_stat_counter frames;
+ struct fbnic_stat_counter bytes;
+};
+
+struct fbnic_fec_stats {
+ struct fbnic_stat_counter corrected_blocks, uncorrectable_blocks;
+};
+
+struct fbnic_pcs_stats {
+ struct {
+ struct fbnic_stat_counter lanes[FBNIC_PCS_MAX_LANES];
+ } SymbolErrorDuringCarrier;
+};
+
+/* Note: not updated by fbnic_get_hw_stats() */
+struct fbnic_eth_ctrl_stats {
+ struct fbnic_stat_counter MACControlFramesTransmitted;
+ struct fbnic_stat_counter MACControlFramesReceived;
+};
+
+/* Note: not updated by fbnic_get_hw_stats() */
+struct fbnic_rmon_stats {
+ struct fbnic_stat_counter undersize_pkts;
+ struct fbnic_stat_counter oversize_pkts;
+ struct fbnic_stat_counter fragments;
+ struct fbnic_stat_counter jabbers;
+
+ struct fbnic_stat_counter hist[ETHTOOL_RMON_HIST_MAX];
+ struct fbnic_stat_counter hist_tx[ETHTOOL_RMON_HIST_MAX];
+};
+
+/* Note: not updated by fbnic_get_hw_stats() */
+struct fbnic_pause_stats {
+ struct fbnic_stat_counter tx_pause_frames;
+ struct fbnic_stat_counter rx_pause_frames;
+};
+
struct fbnic_eth_mac_stats {
struct fbnic_stat_counter FramesTransmittedOK;
struct fbnic_stat_counter FramesReceivedOK;
@@ -27,14 +72,92 @@ struct fbnic_eth_mac_stats {
struct fbnic_stat_counter FrameTooLongErrors;
};
+struct fbnic_phy_stats {
+ struct fbnic_fec_stats fec;
+ struct fbnic_pcs_stats pcs;
+};
+
struct fbnic_mac_stats {
struct fbnic_eth_mac_stats eth_mac;
+ struct fbnic_pause_stats pause;
+ struct fbnic_eth_ctrl_stats eth_ctrl;
+ struct fbnic_rmon_stats rmon;
+};
+
+struct fbnic_tmi_stats {
+ struct fbnic_hw_stat drop;
+ struct fbnic_stat_counter ptp_illegal_req, ptp_good_ts, ptp_bad_ts;
+};
+
+struct fbnic_tti_stats {
+ struct fbnic_hw_stat cm_drop, frame_drop, tbi_drop;
+};
+
+struct fbnic_rpc_stats {
+ struct fbnic_stat_counter unkn_etype, unkn_ext_hdr;
+ struct fbnic_stat_counter ipv4_frag, ipv6_frag, ipv4_esp, ipv6_esp;
+ struct fbnic_stat_counter tcp_opt_err, out_of_hdr_err, ovr_size_err;
+};
+
+struct fbnic_rxb_enqueue_stats {
+ struct fbnic_hw_stat drbo;
+ struct fbnic_stat_counter integrity_err, mac_err;
+ struct fbnic_stat_counter parser_err, frm_err;
+};
+
+struct fbnic_rxb_fifo_stats {
+ struct fbnic_hw_stat drop, trunc;
+ struct fbnic_stat_counter trans_drop, trans_ecn;
+ struct fbnic_stat_counter level;
+};
+
+struct fbnic_rxb_dequeue_stats {
+ struct fbnic_hw_stat intf, pbuf;
+};
+
+struct fbnic_rxb_stats {
+ struct fbnic_rxb_enqueue_stats enq[FBNIC_RXB_ENQUEUE_INDICES];
+ struct fbnic_rxb_fifo_stats fifo[FBNIC_RXB_FIFO_INDICES];
+ struct fbnic_rxb_dequeue_stats deq[FBNIC_RXB_DEQUEUE_INDICES];
+};
+
+struct fbnic_hw_q_stats {
+ struct fbnic_stat_counter rde_pkt_err;
+ struct fbnic_stat_counter rde_pkt_cq_drop;
+ struct fbnic_stat_counter rde_pkt_bdq_drop;
+};
+
+struct fbnic_pcie_stats {
+ struct fbnic_stat_counter ob_rd_tlp, ob_rd_dword;
+ struct fbnic_stat_counter ob_wr_tlp, ob_wr_dword;
+ struct fbnic_stat_counter ob_cpl_tlp, ob_cpl_dword;
+
+ struct fbnic_stat_counter ob_rd_no_tag;
+ struct fbnic_stat_counter ob_rd_no_cpl_cred;
+ struct fbnic_stat_counter ob_rd_no_np_cred;
};
struct fbnic_hw_stats {
+ struct fbnic_phy_stats phy;
struct fbnic_mac_stats mac;
+ struct fbnic_tmi_stats tmi;
+ struct fbnic_tti_stats tti;
+ struct fbnic_rpc_stats rpc;
+ struct fbnic_rxb_stats rxb;
+ struct fbnic_hw_q_stats hw_q[FBNIC_MAX_QUEUES];
+ struct fbnic_pcie_stats pcie;
+
+ /* Lock protecting the access to hw stats */
+ spinlock_t lock;
};
u64 fbnic_stat_rd64(struct fbnic_dev *fbd, u32 reg, u32 offset);
+void fbnic_reset_hw_stats(struct fbnic_dev *fbd);
+void fbnic_init_hw_stats(struct fbnic_dev *fbd);
+void fbnic_get_hw_q_stats(struct fbnic_dev *fbd,
+ struct fbnic_hw_q_stats *hw_q);
+void fbnic_get_hw_stats32(struct fbnic_dev *fbd);
void fbnic_get_hw_stats(struct fbnic_dev *fbd);
+
+#endif /* _FBNIC_HW_STATS_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c b/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c
index bcd1086e3768..def8598aceec 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c
@@ -66,7 +66,7 @@ void fbnic_hwmon_register(struct fbnic_dev *fbd)
if (IS_ERR(fbd->hwmon)) {
dev_notice(fbd->dev,
"Failed to register hwmon device %pe\n",
- fbd->hwmon);
+ fbd->hwmon);
fbd->hwmon = NULL;
}
}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
index 914362195920..02e8b0b257fe 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
@@ -19,75 +19,111 @@ static irqreturn_t fbnic_fw_msix_intr(int __always_unused irq, void *data)
return IRQ_HANDLED;
}
+static int __fbnic_fw_enable_mbx(struct fbnic_dev *fbd, int vector)
+{
+ int err;
+
+ /* Initialize mailbox and attempt to poll it into ready state */
+ fbnic_mbx_init(fbd);
+ err = fbnic_mbx_poll_tx_ready(fbd);
+ if (err) {
+ dev_warn(fbd->dev, "FW mailbox did not enter ready state\n");
+ return err;
+ }
+
+ /* Enable interrupt and unmask the vector */
+ enable_irq(vector);
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
+
+ return 0;
+}
+
/**
- * fbnic_fw_enable_mbx - Configure and initialize Firmware Mailbox
+ * fbnic_fw_request_mbx - Configure and initialize Firmware Mailbox
* @fbd: Pointer to device to initialize
*
- * This function will initialize the firmware mailbox rings, enable the IRQ
- * and initialize the communication between the Firmware and the host. The
- * firmware is expected to respond to the initialization by sending an
- * interrupt essentially notifying the host that it has seen the
- * initialization and is now synced up.
+ * This function will allocate the IRQ and then reinitialize the mailbox
+ * starting communication between the host and firmware.
*
* Return: non-zero on failure.
**/
-int fbnic_fw_enable_mbx(struct fbnic_dev *fbd)
+int fbnic_fw_request_mbx(struct fbnic_dev *fbd)
{
- u32 vector = fbd->fw_msix_vector;
- int err;
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ int vector, err;
+
+ WARN_ON(fbd->fw_msix_vector);
+
+ vector = pci_irq_vector(pdev, FBNIC_FW_MSIX_ENTRY);
+ if (vector < 0)
+ return vector;
/* Request the IRQ for FW Mailbox vector. */
err = request_threaded_irq(vector, NULL, &fbnic_fw_msix_intr,
- IRQF_ONESHOT, dev_name(fbd->dev), fbd);
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ dev_name(fbd->dev), fbd);
if (err)
return err;
/* Initialize mailbox and attempt to poll it into ready state */
- fbnic_mbx_init(fbd);
- err = fbnic_mbx_poll_tx_ready(fbd);
- if (err) {
- dev_warn(fbd->dev, "FW mailbox did not enter ready state\n");
+ err = __fbnic_fw_enable_mbx(fbd, vector);
+ if (err)
free_irq(vector, fbd);
- return err;
- }
- /* Enable interrupts */
- fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
+ fbd->fw_msix_vector = vector;
- return 0;
+ return err;
}
/**
- * fbnic_fw_disable_mbx - Disable mailbox and place it in standby state
- * @fbd: Pointer to device to disable
+ * fbnic_fw_disable_mbx - Temporarily place mailbox in standby state
+ * @fbd: Pointer to device
*
- * This function will disable the mailbox interrupt, free any messages still
- * in the mailbox and place it into a standby state. The firmware is
- * expected to see the update and assume that the host is in the reset state.
+ * Shutdown the mailbox by notifying the firmware to stop sending us logs, mask
+ * and synchronize the IRQ, and then clean up the rings.
**/
-void fbnic_fw_disable_mbx(struct fbnic_dev *fbd)
+static void fbnic_fw_disable_mbx(struct fbnic_dev *fbd)
{
- /* Disable interrupt and free vector */
- fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_FW_MSIX_ENTRY);
+ /* Disable interrupt and synchronize the IRQ */
+ disable_irq(fbd->fw_msix_vector);
- /* Free the vector */
- free_irq(fbd->fw_msix_vector, fbd);
+ /* Mask the vector */
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_FW_MSIX_ENTRY);
/* Make sure disabling logs message is sent, must be done here to
* avoid risk of completing without a running interrupt.
*/
fbnic_mbx_flush_tx(fbd);
-
- /* Reset the mailboxes to the initialized state */
fbnic_mbx_clean(fbd);
}
-static irqreturn_t fbnic_pcs_msix_intr(int __always_unused irq, void *data)
+/**
+ * fbnic_fw_free_mbx - Disable mailbox and place it in standby state
+ * @fbd: Pointer to device to disable
+ *
+ * This function will disable the mailbox interrupt, free any messages still
+ * in the mailbox and place it into a disabled state. The firmware is
+ * expected to see the update and assume that the host is in the reset state.
+ **/
+void fbnic_fw_free_mbx(struct fbnic_dev *fbd)
+{
+ /* Vector has already been freed */
+ if (!fbd->fw_msix_vector)
+ return;
+
+ fbnic_fw_disable_mbx(fbd);
+
+ /* Free the vector */
+ free_irq(fbd->fw_msix_vector, fbd);
+ fbd->fw_msix_vector = 0;
+}
+
+static irqreturn_t fbnic_mac_msix_intr(int __always_unused irq, void *data)
{
struct fbnic_dev *fbd = data;
struct fbnic_net *fbn;
- if (fbd->mac->pcs_get_link_event(fbd) == FBNIC_LINK_EVENT_NONE) {
+ if (fbd->mac->get_link_event(fbd) == FBNIC_LINK_EVENT_NONE) {
fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0),
1u << FBNIC_PCS_MSIX_ENTRY);
return IRQ_HANDLED;
@@ -95,55 +131,88 @@ static irqreturn_t fbnic_pcs_msix_intr(int __always_unused irq, void *data)
fbn = netdev_priv(fbd->netdev);
- phylink_pcs_change(&fbn->phylink_pcs, false);
+ /* Record link down events */
+ if (!fbd->mac->get_link(fbd, fbn->aui, fbn->fec))
+ phylink_pcs_change(fbn->pcs, false);
return IRQ_HANDLED;
}
/**
- * fbnic_pcs_irq_enable - Configure the MAC to enable it to advertise link
+ * fbnic_mac_request_irq - Configure the MAC to enable it to advertise link
* @fbd: Pointer to device to initialize
*
- * This function provides basic bringup for the MAC/PCS IRQ. For now the IRQ
+ * This function provides basic bringup for the MAC/PHY IRQ. For now the IRQ
* will remain disabled until we start the MAC/PCS/PHY logic via phylink.
*
* Return: non-zero on failure.
**/
-int fbnic_pcs_irq_enable(struct fbnic_dev *fbd)
+int fbnic_mac_request_irq(struct fbnic_dev *fbd)
{
- u32 vector = fbd->pcs_msix_vector;
- int err;
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ int vector, err;
+
+ WARN_ON(fbd->mac_msix_vector);
- /* Request the IRQ for MAC link vector.
- * Map MAC cause to it, and unmask it
+ vector = pci_irq_vector(pdev, FBNIC_PCS_MSIX_ENTRY);
+ if (vector < 0)
+ return vector;
+
+ /* Request the IRQ for PCS link vector.
+ * Map PCS cause to it, and unmask it
*/
- err = request_irq(vector, &fbnic_pcs_msix_intr, 0,
+ err = request_irq(vector, &fbnic_mac_msix_intr, 0,
fbd->netdev->name, fbd);
if (err)
return err;
+ /* Map and enable interrupt, unmask vector after link is configured */
fbnic_wr32(fbd, FBNIC_INTR_MSIX_CTRL(FBNIC_INTR_MSIX_CTRL_PCS_IDX),
FBNIC_PCS_MSIX_ENTRY | FBNIC_INTR_MSIX_CTRL_ENABLE);
+ fbd->mac_msix_vector = vector;
+
return 0;
}
/**
- * fbnic_pcs_irq_disable - Teardown the MAC IRQ to prepare for stopping
+ * fbnic_mac_free_irq - Teardown the MAC IRQ to prepare for stopping
* @fbd: Pointer to device that is stopping
*
- * This function undoes the work done in fbnic_pcs_irq_enable and prepares
+ * This function undoes the work done in fbnic_mac_request_irq and prepares
* the device to no longer receive traffic on the host interface.
**/
-void fbnic_pcs_irq_disable(struct fbnic_dev *fbd)
+void fbnic_mac_free_irq(struct fbnic_dev *fbd)
{
+ /* Vector has already been freed */
+ if (!fbd->mac_msix_vector)
+ return;
+
/* Disable interrupt */
fbnic_wr32(fbd, FBNIC_INTR_MSIX_CTRL(FBNIC_INTR_MSIX_CTRL_PCS_IDX),
FBNIC_PCS_MSIX_ENTRY);
+ fbnic_wrfl(fbd);
+
+ /* Synchronize IRQ to prevent race that would unmask vector */
+ synchronize_irq(fbd->mac_msix_vector);
+
+ /* Mask the vector */
fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_PCS_MSIX_ENTRY);
/* Free the vector */
- free_irq(fbd->pcs_msix_vector, fbd);
+ free_irq(fbd->mac_msix_vector, fbd);
+ fbd->mac_msix_vector = 0;
+}
+
+void fbnic_synchronize_irq(struct fbnic_dev *fbd, int nr)
+{
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ int irq = pci_irq_vector(pdev, nr);
+
+ if (irq < 0)
+ return;
+
+ synchronize_irq(irq);
}
int fbnic_request_irq(struct fbnic_dev *fbd, int nr, irq_handler_t handler,
@@ -169,13 +238,52 @@ void fbnic_free_irq(struct fbnic_dev *fbd, int nr, void *data)
free_irq(irq, data);
}
+void fbnic_napi_name_irqs(struct fbnic_dev *fbd)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(fbd->napi_irq); i++)
+ snprintf(fbd->napi_irq[i].name,
+ sizeof(fbd->napi_irq[i].name),
+ "%s-TxRx-%u", fbd->netdev->name, i);
+}
+
+int fbnic_napi_request_irq(struct fbnic_dev *fbd,
+ struct fbnic_napi_vector *nv)
+{
+ struct fbnic_net *fbn = netdev_priv(fbd->netdev);
+ int i = fbnic_napi_idx(nv);
+ int err;
+
+ if (!fbd->napi_irq[i].users) {
+ err = fbnic_request_irq(fbd, nv->v_idx,
+ fbnic_msix_clean_rings, 0,
+ fbd->napi_irq[i].name,
+ &fbn->napi[i]);
+ if (err)
+ return err;
+ }
+
+ fbd->napi_irq[i].users++;
+ return 0;
+}
+
+void fbnic_napi_free_irq(struct fbnic_dev *fbd,
+ struct fbnic_napi_vector *nv)
+{
+ struct fbnic_net *fbn = netdev_priv(fbd->netdev);
+ int i = fbnic_napi_idx(nv);
+
+ if (--fbd->napi_irq[i].users)
+ return;
+
+ fbnic_free_irq(fbd, nv->v_idx, &fbn->napi[i]);
+}
+
void fbnic_free_irqs(struct fbnic_dev *fbd)
{
struct pci_dev *pdev = to_pci_dev(fbd->dev);
- fbd->pcs_msix_vector = 0;
- fbd->fw_msix_vector = 0;
-
fbd->num_irqs = 0;
pci_free_irq_vectors(pdev);
@@ -201,8 +309,5 @@ int fbnic_alloc_irqs(struct fbnic_dev *fbd)
fbd->num_irqs = num_irqs;
- fbd->pcs_msix_vector = pci_irq_vector(pdev, FBNIC_PCS_MSIX_ENTRY);
- fbd->fw_msix_vector = pci_irq_vector(pdev, FBNIC_FW_MSIX_ENTRY);
-
return 0;
}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
index 80b82ff12c4d..fc7abea4ef5b 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
@@ -79,18 +79,20 @@ static void fbnic_mac_init_axi(struct fbnic_dev *fbd)
fbnic_init_readrq(fbd, FBNIC_QM_RNI_RBP_CTL, cls, readrq);
fbnic_init_mps(fbd, FBNIC_QM_RNI_RDE_CTL, cls, mps);
fbnic_init_mps(fbd, FBNIC_QM_RNI_RCM_CTL, cls, mps);
-
- /* Enable XALI AR/AW outbound */
- wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
- FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME);
- wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
- FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME);
}
static void fbnic_mac_init_qm(struct fbnic_dev *fbd)
{
+ u64 default_meta = FIELD_PREP(FBNIC_TWD_L2_HLEN_MASK, ETH_HLEN) |
+ FBNIC_TWD_FLAG_REQ_COMPLETION;
u32 clock_freq;
+ /* Configure default TWQ Metadata descriptor */
+ wr32(fbd, FBNIC_QM_TWQ_DEFAULT_META_L,
+ lower_32_bits(default_meta));
+ wr32(fbd, FBNIC_QM_TWQ_DEFAULT_META_H,
+ upper_32_bits(default_meta));
+
/* Configure TSO behavior */
wr32(fbd, FBNIC_QM_TQS_CTL0,
FIELD_PREP(FBNIC_QM_TQS_CTL0_LSO_TS_MASK,
@@ -432,14 +434,14 @@ static void fbnic_mac_tx_pause_config(struct fbnic_dev *fbd, bool tx_pause)
wr32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL, rxb_pause_ctrl);
}
-static int fbnic_pcs_get_link_event_asic(struct fbnic_dev *fbd)
+static int fbnic_mac_get_link_event(struct fbnic_dev *fbd)
{
- u32 pcs_intr_mask = rd32(fbd, FBNIC_SIG_PCS_INTR_STS);
+ u32 intr_mask = rd32(fbd, FBNIC_SIG_PCS_INTR_STS);
- if (pcs_intr_mask & FBNIC_SIG_PCS_INTR_LINK_DOWN)
+ if (intr_mask & FBNIC_SIG_PCS_INTR_LINK_DOWN)
return FBNIC_LINK_EVENT_DOWN;
- return (pcs_intr_mask & FBNIC_SIG_PCS_INTR_LINK_UP) ?
+ return (intr_mask & FBNIC_SIG_PCS_INTR_LINK_UP) ?
FBNIC_LINK_EVENT_UP : FBNIC_LINK_EVENT_NONE;
}
@@ -458,15 +460,14 @@ static u32 __fbnic_mac_cmd_config_asic(struct fbnic_dev *fbd,
command_config |= FBNIC_MAC_COMMAND_CONFIG_RX_PAUSE_DIS;
/* Disable fault handling if no FEC is requested */
- if ((fbn->fec & FBNIC_FEC_MODE_MASK) == FBNIC_FEC_OFF)
+ if (fbn->fec == FBNIC_FEC_OFF)
command_config |= FBNIC_MAC_COMMAND_CONFIG_FLT_HDL_DIS;
return command_config;
}
-static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd)
+static bool fbnic_mac_get_link_status(struct fbnic_dev *fbd, u8 aui, u8 fec)
{
- struct fbnic_net *fbn = netdev_priv(fbd->netdev);
u32 pcs_status, lane_mask = ~0;
pcs_status = rd32(fbd, FBNIC_SIG_PCS_OUT0);
@@ -474,15 +475,15 @@ static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd)
return false;
/* Define the expected lane mask for the status bits we need to check */
- switch (fbn->link_mode & FBNIC_LINK_MODE_MASK) {
- case FBNIC_LINK_100R2:
+ switch (aui) {
+ case FBNIC_AUI_100GAUI2:
lane_mask = 0xf;
break;
- case FBNIC_LINK_50R1:
+ case FBNIC_AUI_50GAUI1:
lane_mask = 3;
break;
- case FBNIC_LINK_50R2:
- switch (fbn->fec & FBNIC_FEC_MODE_MASK) {
+ case FBNIC_AUI_LAUI2:
+ switch (fec) {
case FBNIC_FEC_OFF:
lane_mask = 0x63;
break;
@@ -494,13 +495,13 @@ static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd)
break;
}
break;
- case FBNIC_LINK_25R1:
+ case FBNIC_AUI_25GAUI:
lane_mask = 1;
break;
}
/* Use an XOR to remove the bits we expect to see set */
- switch (fbn->fec & FBNIC_FEC_MODE_MASK) {
+ switch (fec) {
case FBNIC_FEC_OFF:
lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT0_BLOCK_LOCK,
pcs_status);
@@ -519,7 +520,46 @@ static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd)
return !lane_mask;
}
-static bool fbnic_pcs_get_link_asic(struct fbnic_dev *fbd)
+static bool fbnic_pmd_update_state(struct fbnic_dev *fbd, bool signal_detect)
+{
+ /* Delay link up for 4 seconds to allow for link training.
+ * The state transitions for this are as follows:
+ *
+ * All states have the following two transitions in common:
+ * Loss of signal -> FBNIC_PMD_INITIALIZE
+ * The condition handled below (!signal)
+ * Reconfiguration -> FBNIC_PMD_INITIALIZE
+ * Occurs when mac_prepare starts a PHY reconfig
+ * FBNIC_PMD_TRAINING:
+ * signal still detected && 4s have passed -> Report link up
+ * When link is brought up in link_up -> FBNIC_PMD_SEND_DATA
+ * FBNIC_PMD_INITIALIZE:
+ * signal detected -> FBNIC_PMD_TRAINING
+ */
+ if (!signal_detect) {
+ fbd->pmd_state = FBNIC_PMD_INITIALIZE;
+ return false;
+ }
+
+ switch (fbd->pmd_state) {
+ case FBNIC_PMD_TRAINING:
+ return time_before(fbd->end_of_pmd_training, jiffies);
+ case FBNIC_PMD_LINK_READY:
+ case FBNIC_PMD_SEND_DATA:
+ return true;
+ }
+
+ fbd->end_of_pmd_training = jiffies + 4 * HZ;
+
+ /* Ensure end_of_training is visible before the state change */
+ smp_wmb();
+
+ fbd->pmd_state = FBNIC_PMD_TRAINING;
+
+ return false;
+}
+
+static bool fbnic_mac_get_link(struct fbnic_dev *fbd, u8 aui, u8 fec)
{
bool link;
@@ -536,7 +576,8 @@ static bool fbnic_pcs_get_link_asic(struct fbnic_dev *fbd)
wr32(fbd, FBNIC_SIG_PCS_INTR_STS,
FBNIC_SIG_PCS_INTR_LINK_DOWN | FBNIC_SIG_PCS_INTR_LINK_UP);
- link = fbnic_mac_get_pcs_link_status(fbd);
+ link = fbnic_mac_get_link_status(fbd, aui, fec);
+ link = fbnic_pmd_update_state(fbd, link);
/* Enable interrupt to only capture changes in link state */
wr32(fbd, FBNIC_SIG_PCS_INTR_MASK,
@@ -546,73 +587,53 @@ static bool fbnic_pcs_get_link_asic(struct fbnic_dev *fbd)
return link;
}
-static void fbnic_pcs_get_fw_settings(struct fbnic_dev *fbd)
+void fbnic_mac_get_fw_settings(struct fbnic_dev *fbd, u8 *aui, u8 *fec)
{
- struct fbnic_net *fbn = netdev_priv(fbd->netdev);
- u8 link_mode = fbn->link_mode;
- u8 fec = fbn->fec;
-
- /* Update FEC first to reflect FW current mode */
- if (fbn->fec & FBNIC_FEC_AUTO) {
- switch (fbd->fw_cap.link_fec) {
- case FBNIC_FW_LINK_FEC_NONE:
- fec = FBNIC_FEC_OFF;
- break;
- case FBNIC_FW_LINK_FEC_RS:
- fec = FBNIC_FEC_RS;
- break;
- case FBNIC_FW_LINK_FEC_BASER:
- fec = FBNIC_FEC_BASER;
- break;
- default:
- return;
- }
-
- fbn->fec = fec;
+ /* Retrieve default speed from FW */
+ switch (fbd->fw_cap.link_speed) {
+ case FBNIC_FW_LINK_MODE_25CR:
+ *aui = FBNIC_AUI_25GAUI;
+ break;
+ case FBNIC_FW_LINK_MODE_50CR2:
+ *aui = FBNIC_AUI_LAUI2;
+ break;
+ case FBNIC_FW_LINK_MODE_50CR:
+ *aui = FBNIC_AUI_50GAUI1;
+ *fec = FBNIC_FEC_RS;
+ return;
+ case FBNIC_FW_LINK_MODE_100CR2:
+ *aui = FBNIC_AUI_100GAUI2;
+ *fec = FBNIC_FEC_RS;
+ return;
+ default:
+ *aui = FBNIC_AUI_UNKNOWN;
+ return;
}
- /* Do nothing if AUTO mode is not engaged */
- if (fbn->link_mode & FBNIC_LINK_AUTO) {
- switch (fbd->fw_cap.link_speed) {
- case FBNIC_FW_LINK_SPEED_25R1:
- link_mode = FBNIC_LINK_25R1;
- break;
- case FBNIC_FW_LINK_SPEED_50R2:
- link_mode = FBNIC_LINK_50R2;
- break;
- case FBNIC_FW_LINK_SPEED_50R1:
- link_mode = FBNIC_LINK_50R1;
- fec = FBNIC_FEC_RS;
- break;
- case FBNIC_FW_LINK_SPEED_100R2:
- link_mode = FBNIC_LINK_100R2;
- fec = FBNIC_FEC_RS;
- break;
- default:
- return;
- }
-
- fbn->link_mode = link_mode;
+ /* Update FEC first to reflect FW current mode */
+ switch (fbd->fw_cap.link_fec) {
+ case FBNIC_FW_LINK_FEC_NONE:
+ *fec = FBNIC_FEC_OFF;
+ break;
+ case FBNIC_FW_LINK_FEC_RS:
+ default:
+ *fec = FBNIC_FEC_RS;
+ break;
+ case FBNIC_FW_LINK_FEC_BASER:
+ *fec = FBNIC_FEC_BASER;
+ break;
}
}
-static int fbnic_pcs_enable_asic(struct fbnic_dev *fbd)
+static void fbnic_mac_prepare(struct fbnic_dev *fbd, u8 aui, u8 fec)
{
/* Mask and clear the PCS interrupt, will be enabled by link handler */
wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, ~0);
wr32(fbd, FBNIC_SIG_PCS_INTR_STS, ~0);
- /* Pull in settings from FW */
- fbnic_pcs_get_fw_settings(fbd);
-
- return 0;
-}
-
-static void fbnic_pcs_disable_asic(struct fbnic_dev *fbd)
-{
- /* Mask and clear the PCS interrupt */
- wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, ~0);
- wr32(fbd, FBNIC_SIG_PCS_INTR_STS, ~0);
+ /* If we don't have link tear it all down and start over */
+ if (!fbnic_mac_get_link_status(fbd, aui, fec))
+ fbd->pmd_state = FBNIC_PMD_INITIALIZE;
}
static void fbnic_mac_link_down_asic(struct fbnic_dev *fbd)
@@ -653,6 +674,50 @@ static void fbnic_mac_link_up_asic(struct fbnic_dev *fbd,
}
static void
+fbnic_pcs_rsfec_stat_rd32(struct fbnic_dev *fbd, u32 reg, bool reset,
+ struct fbnic_stat_counter *stat)
+{
+ u32 pcs_rsfec_stat;
+
+ /* The PCS/RFSEC registers are only 16b wide each. So what we will
+ * have after the 64b read is 0x0000xxxx0000xxxx. To make it usable
+ * as a full stat we will shift the upper bits into the lower set of
+ * 0s and then mask off the math at 32b.
+ *
+ * Read ordering must be lower reg followed by upper reg.
+ */
+ pcs_rsfec_stat = rd32(fbd, reg) & 0xffff;
+ pcs_rsfec_stat |= rd32(fbd, reg + 1) << 16;
+
+ /* RFSEC registers clear themselves upon being read so there is no
+ * need to store the old_reg_value.
+ */
+ if (!reset)
+ stat->value += pcs_rsfec_stat;
+}
+
+static void
+fbnic_mac_get_fec_stats(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_fec_stats *s)
+{
+ fbnic_pcs_rsfec_stat_rd32(fbd, FBNIC_RSFEC_CCW_LO(0), reset,
+ &s->corrected_blocks);
+ fbnic_pcs_rsfec_stat_rd32(fbd, FBNIC_RSFEC_NCCW_LO(0), reset,
+ &s->uncorrectable_blocks);
+}
+
+static void
+fbnic_mac_get_pcs_stats(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_pcs_stats *s)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_PCS_MAX_LANES; i++)
+ fbnic_pcs_rsfec_stat_rd32(fbd, FBNIC_PCS_SYMBLERR_LO(i), reset,
+ &s->SymbolErrorDuringCarrier.lanes[i]);
+}
+
+static void
fbnic_mac_get_eth_mac_stats(struct fbnic_dev *fbd, bool reset,
struct fbnic_eth_mac_stats *mac_stats)
{
@@ -686,34 +751,165 @@ fbnic_mac_get_eth_mac_stats(struct fbnic_dev *fbd, bool reset,
MAC_STAT_TX_BROADCAST);
}
-static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id, long *val)
+static void
+fbnic_mac_get_pause_stats(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_pause_stats *pause_stats)
+{
+ fbnic_mac_stat_rd64(fbd, reset, pause_stats->tx_pause_frames,
+ MAC_STAT_TX_XOFF_STB);
+ fbnic_mac_stat_rd64(fbd, reset, pause_stats->rx_pause_frames,
+ MAC_STAT_RX_XOFF_STB);
+}
+
+static void
+fbnic_mac_get_eth_ctrl_stats(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_eth_ctrl_stats *ctrl_stats)
{
- struct fbnic_fw_completion fw_cmpl;
+ fbnic_mac_stat_rd64(fbd, reset, ctrl_stats->MACControlFramesReceived,
+ MAC_STAT_RX_CONTROL_FRAMES);
+ fbnic_mac_stat_rd64(fbd, reset, ctrl_stats->MACControlFramesTransmitted,
+ MAC_STAT_TX_CONTROL_FRAMES);
+}
+
+static void
+fbnic_mac_get_rmon_stats(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_rmon_stats *rmon_stats)
+{
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->undersize_pkts,
+ MAC_STAT_RX_UNDERSIZE);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->oversize_pkts,
+ MAC_STAT_RX_OVERSIZE);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->fragments,
+ MAC_STAT_RX_FRAGMENT);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->jabbers,
+ MAC_STAT_RX_JABBER);
+
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[0],
+ MAC_STAT_RX_PACKET_64_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[1],
+ MAC_STAT_RX_PACKET_65_127_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[2],
+ MAC_STAT_RX_PACKET_128_255_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[3],
+ MAC_STAT_RX_PACKET_256_511_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[4],
+ MAC_STAT_RX_PACKET_512_1023_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[5],
+ MAC_STAT_RX_PACKET_1024_1518_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[6],
+ RPC_STAT_RX_PACKET_1519_2047_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[7],
+ RPC_STAT_RX_PACKET_2048_4095_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[8],
+ RPC_STAT_RX_PACKET_4096_8191_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[9],
+ RPC_STAT_RX_PACKET_8192_9216_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[10],
+ RPC_STAT_RX_PACKET_9217_MAX_BYTES);
+
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[0],
+ MAC_STAT_TX_PACKET_64_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[1],
+ MAC_STAT_TX_PACKET_65_127_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[2],
+ MAC_STAT_TX_PACKET_128_255_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[3],
+ MAC_STAT_TX_PACKET_256_511_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[4],
+ MAC_STAT_TX_PACKET_512_1023_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[5],
+ MAC_STAT_TX_PACKET_1024_1518_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[6],
+ TMI_STAT_TX_PACKET_1519_2047_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[7],
+ TMI_STAT_TX_PACKET_2048_4095_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[8],
+ TMI_STAT_TX_PACKET_4096_8191_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[9],
+ TMI_STAT_TX_PACKET_8192_9216_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[10],
+ TMI_STAT_TX_PACKET_9217_MAX_BYTES);
+}
+
+static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id,
+ long *val)
+{
+ struct fbnic_fw_completion *fw_cmpl;
+ int err = 0, retries = 5;
s32 *sensor;
+ fw_cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
+ if (!fw_cmpl)
+ return -ENOMEM;
+
switch (id) {
case FBNIC_SENSOR_TEMP:
- sensor = &fw_cmpl.tsene.millidegrees;
+ sensor = &fw_cmpl->u.tsene.millidegrees;
break;
case FBNIC_SENSOR_VOLTAGE:
- sensor = &fw_cmpl.tsene.millivolts;
+ sensor = &fw_cmpl->u.tsene.millivolts;
break;
default:
- return -EINVAL;
+ err = -EINVAL;
+ goto exit_free;
+ }
+
+ err = fbnic_fw_xmit_tsene_read_msg(fbd, fw_cmpl);
+ if (err) {
+ dev_err(fbd->dev,
+ "Failed to transmit TSENE read msg, err %d\n",
+ err);
+ goto exit_free;
+ }
+
+ /* Allow 2 seconds for reply, resend and try up to 5 times */
+ while (!wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) {
+ retries--;
+
+ if (retries == 0) {
+ dev_err(fbd->dev,
+ "Timed out waiting for TSENE read\n");
+ err = -ETIMEDOUT;
+ goto exit_cleanup;
+ }
+
+ err = fbnic_fw_xmit_tsene_read_msg(fbd, NULL);
+ if (err) {
+ dev_err(fbd->dev,
+ "Failed to transmit TSENE read msg, err %d\n",
+ err);
+ goto exit_cleanup;
+ }
+ }
+
+ /* Handle error returned by firmware */
+ if (fw_cmpl->result) {
+ err = fw_cmpl->result;
+ dev_err(fbd->dev, "%s: Firmware returned error %d\n",
+ __func__, err);
+ goto exit_cleanup;
}
*val = *sensor;
+exit_cleanup:
+ fbnic_mbx_clear_cmpl(fbd, fw_cmpl);
+exit_free:
+ fbnic_fw_put_cmpl(fw_cmpl);
- return 0;
+ return err;
}
static const struct fbnic_mac fbnic_mac_asic = {
.init_regs = fbnic_mac_init_regs,
- .pcs_enable = fbnic_pcs_enable_asic,
- .pcs_disable = fbnic_pcs_disable_asic,
- .pcs_get_link = fbnic_pcs_get_link_asic,
- .pcs_get_link_event = fbnic_pcs_get_link_event_asic,
+ .get_link = fbnic_mac_get_link,
+ .get_link_event = fbnic_mac_get_link_event,
+ .prepare = fbnic_mac_prepare,
+ .get_fec_stats = fbnic_mac_get_fec_stats,
+ .get_pcs_stats = fbnic_mac_get_pcs_stats,
.get_eth_mac_stats = fbnic_mac_get_eth_mac_stats,
+ .get_pause_stats = fbnic_mac_get_pause_stats,
+ .get_eth_ctrl_stats = fbnic_mac_get_eth_ctrl_stats,
+ .get_rmon_stats = fbnic_mac_get_rmon_stats,
.link_down = fbnic_mac_link_down_asic,
.link_up = fbnic_mac_link_up_asic,
.get_sensor = fbnic_mac_get_sensor_asic,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
index 05a591653e09..f08fe8b7c497 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
@@ -10,6 +10,24 @@ struct fbnic_dev;
#define FBNIC_MAX_JUMBO_FRAME_SIZE 9742
+/* States loosely based on section 136.8.11.7.5 of IEEE 802.3-2022 Ethernet
+ * Standard. These are needed to track the state of the PHY as it has a delay
+ * of several seconds from the time link comes up until it has completed
+ * training that we need to wait to report the link.
+ *
+ * Currently we treat training as a single block as this is managed by the
+ * firmware.
+ *
+ * We have FBNIC_PMD_SEND_DATA set to 0 as the expected default at driver load
+ * and we initialize the structure containing it to zero at allocation.
+ */
+enum {
+ FBNIC_PMD_SEND_DATA = 0x0,
+ FBNIC_PMD_INITIALIZE = 0x1,
+ FBNIC_PMD_TRAINING = 0x2,
+ FBNIC_PMD_LINK_READY = 0x3,
+};
+
enum {
FBNIC_LINK_EVENT_NONE = 0,
FBNIC_LINK_EVENT_UP = 1,
@@ -25,27 +43,24 @@ enum {
FBNIC_FEC_OFF = 0,
FBNIC_FEC_RS = 1,
FBNIC_FEC_BASER = 2,
- FBNIC_FEC_AUTO = 4,
};
-#define FBNIC_FEC_MODE_MASK (FBNIC_FEC_AUTO - 1)
-
-/* Treat the link modes as a set of modulation/lanes bitmask:
+/* Treat the AUI modes as a modulation/lanes bitmask:
* Bit 0: Lane Count, 0 = R1, 1 = R2
* Bit 1: Modulation, 0 = NRZ, 1 = PAM4
- * Bit 2: Retrieve link mode from FW
+ * Bit 2: Unknown Modulation/Lane Configuration
*/
enum {
- FBNIC_LINK_25R1 = 0,
- FBNIC_LINK_50R2 = 1,
- FBNIC_LINK_50R1 = 2,
- FBNIC_LINK_100R2 = 3,
- FBNIC_LINK_AUTO = 4,
+ FBNIC_AUI_25GAUI = 0, /* 25.7812GBd 25.78125 * 1 */
+ FBNIC_AUI_LAUI2 = 1, /* 51.5625GBd 25.78128 * 2 */
+ FBNIC_AUI_50GAUI1 = 2, /* 53.125GBd 53.125 * 1 */
+ FBNIC_AUI_100GAUI2 = 3, /* 106.25GBd 53.125 * 2 */
+ FBNIC_AUI_UNKNOWN = 4,
+ __FBNIC_AUI_MAX__
};
-#define FBNIC_LINK_MODE_R2 (FBNIC_LINK_50R2)
-#define FBNIC_LINK_MODE_PAM4 (FBNIC_LINK_50R1)
-#define FBNIC_LINK_MODE_MASK (FBNIC_LINK_AUTO - 1)
+#define FBNIC_AUI_MODE_R2 (FBNIC_AUI_LAUI2)
+#define FBNIC_AUI_MODE_PAM4 (FBNIC_AUI_50GAUI1)
enum fbnic_sensor_id {
FBNIC_SENSOR_TEMP, /* Temp in millidegrees Centigrade */
@@ -59,15 +74,15 @@ enum fbnic_sensor_id {
* void (*init_regs)(struct fbnic_dev *fbd);
* Initialize MAC registers to enable Tx/Rx paths and FIFOs.
*
- * void (*pcs_enable)(struct fbnic_dev *fbd);
- * Configure and enable PCS to enable link if not already enabled
- * void (*pcs_disable)(struct fbnic_dev *fbd);
- * Shutdown the link if we are the only consumer of it.
- * bool (*pcs_get_link)(struct fbnic_dev *fbd);
- * Check PCS link status
- * int (*pcs_get_link_event)(struct fbnic_dev *fbd)
+ * int (*get_link_event)(struct fbnic_dev *fbd)
* Get the current link event status, reports true if link has
* changed to either FBNIC_LINK_EVENT_DOWN or FBNIC_LINK_EVENT_UP
+ * bool (*get_link)(struct fbnic_dev *fbd, u8 aui, u8 fec);
+ * Check link status
+ *
+ * void (*prepare)(struct fbnic_dev *fbd, u8 aui, u8 fec);
+ * Prepare PHY for init by fetching settings, disabling interrupts,
+ * and sending an updated PHY config to FW if needed.
*
* void (*link_down)(struct fbnic_dev *fbd);
* Configure MAC for link down event
@@ -78,13 +93,23 @@ enum fbnic_sensor_id {
struct fbnic_mac {
void (*init_regs)(struct fbnic_dev *fbd);
- int (*pcs_enable)(struct fbnic_dev *fbd);
- void (*pcs_disable)(struct fbnic_dev *fbd);
- bool (*pcs_get_link)(struct fbnic_dev *fbd);
- int (*pcs_get_link_event)(struct fbnic_dev *fbd);
+ int (*get_link_event)(struct fbnic_dev *fbd);
+ bool (*get_link)(struct fbnic_dev *fbd, u8 aui, u8 fec);
+
+ void (*prepare)(struct fbnic_dev *fbd, u8 aui, u8 fec);
+ void (*get_fec_stats)(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_fec_stats *fec_stats);
+ void (*get_pcs_stats)(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_pcs_stats *pcs_stats);
void (*get_eth_mac_stats)(struct fbnic_dev *fbd, bool reset,
struct fbnic_eth_mac_stats *mac_stats);
+ void (*get_pause_stats)(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_pause_stats *pause_stats);
+ void (*get_eth_ctrl_stats)(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_eth_ctrl_stats *ctrl_stats);
+ void (*get_rmon_stats)(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_rmon_stats *rmon_stats);
void (*link_down)(struct fbnic_dev *fbd);
void (*link_up)(struct fbnic_dev *fbd, bool tx_pause, bool rx_pause);
@@ -93,4 +118,5 @@ struct fbnic_mac {
};
int fbnic_mac_init(struct fbnic_dev *fbd);
+void fbnic_mac_get_fw_settings(struct fbnic_dev *fbd, u8 *aui, u8 *fec);
#endif /* _FBNIC_MAC_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mdio.c b/drivers/net/ethernet/meta/fbnic/fbnic_mdio.c
new file mode 100644
index 000000000000..709041f7fc43
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mdio.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/mdio.h>
+#include <linux/pcs/pcs-xpcs.h>
+
+#include "fbnic.h"
+#include "fbnic_netdev.h"
+
+#define DW_VENDOR BIT(15)
+#define FBNIC_PCS_VENDOR BIT(9)
+#define FBNIC_PCS_ZERO_MASK (DW_VENDOR - FBNIC_PCS_VENDOR)
+
+static int
+fbnic_mdio_read_pmd(struct fbnic_dev *fbd, int addr, int regnum)
+{
+ u8 aui = FBNIC_AUI_UNKNOWN;
+ struct fbnic_net *fbn;
+ int ret = 0;
+
+ /* We don't need a second PMD, just one can handle both lanes */
+ if (addr)
+ return 0;
+
+ if (fbd->netdev) {
+ fbn = netdev_priv(fbd->netdev);
+ if (fbn->aui < FBNIC_AUI_UNKNOWN)
+ aui = fbn->aui;
+ }
+
+ switch (regnum) {
+ case MDIO_DEVID1:
+ ret = MP_FBNIC_XPCS_PMA_100G_ID >> 16;
+ break;
+ case MDIO_DEVID2:
+ ret = MP_FBNIC_XPCS_PMA_100G_ID & 0xffff;
+ break;
+ case MDIO_DEVS1:
+ ret = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS;
+ break;
+ case MDIO_STAT2:
+ ret = MDIO_STAT2_DEVPRST_VAL;
+ break;
+ case MDIO_PMA_RXDET:
+ /* If training isn't complete default to 0 */
+ if (fbd->pmd_state != FBNIC_PMD_SEND_DATA)
+ break;
+ /* Report either 1 or 2 lanes detected depending on config */
+ ret = (MDIO_PMD_RXDET_GLOBAL | MDIO_PMD_RXDET_0) |
+ ((aui & FBNIC_AUI_MODE_R2) *
+ (MDIO_PMD_RXDET_1 / FBNIC_AUI_MODE_R2));
+ break;
+ default:
+ break;
+ }
+
+ dev_dbg(fbd->dev,
+ "SWMII PMD Rd: Addr: %d RegNum: %d Value: 0x%04x\n",
+ addr, regnum, ret);
+
+ return ret;
+}
+
+static int
+fbnic_mdio_read_pcs(struct fbnic_dev *fbd, int addr, int regnum)
+{
+ int ret, offset = 0;
+
+ /* We will need access to both PCS instances to get config info */
+ if (addr >= 2)
+ return 0;
+
+ /* Report 0 for reserved registers */
+ if (regnum & FBNIC_PCS_ZERO_MASK)
+ return 0;
+
+ /* Intercept and return correct ID for PCS */
+ if (regnum == MDIO_DEVID1)
+ return DW_XPCS_ID >> 16;
+ if (regnum == MDIO_DEVID2)
+ return DW_XPCS_ID & 0xffff;
+ if (regnum == MDIO_DEVS1)
+ return MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS;
+
+ /* Swap vendor page bit for FBNIC PCS vendor page bit */
+ if (regnum & DW_VENDOR)
+ offset ^= DW_VENDOR | FBNIC_PCS_VENDOR;
+
+ ret = fbnic_rd32(fbd, FBNIC_PCS_PAGE(addr) + (regnum ^ offset));
+
+ dev_dbg(fbd->dev,
+ "SWMII PCS Rd: Addr: %d RegNum: %d Value: 0x%04x\n",
+ addr, regnum, ret);
+
+ return ret;
+}
+
+static int
+fbnic_mdio_read_c45(struct mii_bus *bus, int addr, int devnum, int regnum)
+{
+ struct fbnic_dev *fbd = bus->priv;
+
+ if (devnum == MDIO_MMD_PMAPMD)
+ return fbnic_mdio_read_pmd(fbd, addr, regnum);
+
+ if (devnum == MDIO_MMD_PCS)
+ return fbnic_mdio_read_pcs(fbd, addr, regnum);
+
+ return 0;
+}
+
+static void
+fbnic_mdio_write_pmd(struct fbnic_dev *fbd, int addr, int regnum, u16 val)
+{
+ dev_dbg(fbd->dev,
+ "SWMII PMD Wr: Addr: %d RegNum: %d Value: 0x%04x\n",
+ addr, regnum, val);
+}
+
+static void
+fbnic_mdio_write_pcs(struct fbnic_dev *fbd, int addr, int regnum, u16 val)
+{
+ dev_dbg(fbd->dev,
+ "SWMII PCS Wr: Addr: %d RegNum: %d Value: 0x%04x\n",
+ addr, regnum, val);
+
+ /* Allow access to both halves of PCS for 50R2 config */
+ if (addr > 2)
+ return;
+
+ /* Skip write for reserved registers */
+ if (regnum & FBNIC_PCS_ZERO_MASK)
+ return;
+
+ /* Swap vendor page bit for FBNIC PCS vendor page bit */
+ if (regnum & DW_VENDOR)
+ regnum ^= DW_VENDOR | FBNIC_PCS_VENDOR;
+
+ fbnic_wr32(fbd, FBNIC_PCS_PAGE(addr) + regnum, val);
+}
+
+static int
+fbnic_mdio_write_c45(struct mii_bus *bus, int addr, int devnum,
+ int regnum, u16 val)
+{
+ struct fbnic_dev *fbd = bus->priv;
+
+ if (devnum == MDIO_MMD_PMAPMD)
+ fbnic_mdio_write_pmd(fbd, addr, regnum, val);
+
+ if (devnum == MDIO_MMD_PCS)
+ fbnic_mdio_write_pcs(fbd, addr, regnum, val);
+
+ return 0;
+}
+
+/**
+ * fbnic_mdiobus_create - Create an MDIO bus to allow interfacing w/ PHYs
+ * @fbd: Pointer to FBNIC device structure to populate bus on
+ *
+ * Initialize an MDIO bus and place a pointer to it on the fbd struct. This bus
+ * will be used to interface with the PMA/PMD and PCS.
+ *
+ * Return: 0 on success, negative on failure
+ **/
+int fbnic_mdiobus_create(struct fbnic_dev *fbd)
+{
+ struct mii_bus *bus;
+ int err;
+
+ bus = devm_mdiobus_alloc(fbd->dev);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "fbnic_mii_bus";
+ bus->read_c45 = &fbnic_mdio_read_c45;
+ bus->write_c45 = &fbnic_mdio_write_c45;
+
+ /* Disable PHY auto probing. We will add PCS manually */
+ bus->phy_mask = ~0;
+
+ bus->parent = fbd->dev;
+ bus->priv = fbd;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(fbd->dev));
+
+ err = devm_mdiobus_register(fbd->dev, bus);
+ if (err) {
+ dev_err(fbd->dev, "Failed to create MDIO bus: %d\n", err);
+ return err;
+ }
+
+ fbd->mdio_bus = bus;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
index c08798fad203..81c9d5c9a4b2 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -23,13 +23,7 @@ int __fbnic_open(struct fbnic_net *fbn)
if (err)
goto free_napi_vectors;
- err = netif_set_real_num_tx_queues(fbn->netdev,
- fbn->num_tx_queues);
- if (err)
- goto free_resources;
-
- err = netif_set_real_num_rx_queues(fbn->netdev,
- fbn->num_rx_queues);
+ err = fbnic_set_netif_queues(fbn);
if (err)
goto free_resources;
@@ -39,7 +33,7 @@ int __fbnic_open(struct fbnic_net *fbn)
dev_warn(fbd->dev,
"Error %d sending host ownership message to the firmware\n",
err);
- goto free_resources;
+ goto err_reset_queues;
}
err = fbnic_time_start(fbn);
@@ -50,18 +44,23 @@ int __fbnic_open(struct fbnic_net *fbn)
if (err)
goto time_stop;
- err = fbnic_pcs_irq_enable(fbd);
+ err = fbnic_mac_request_irq(fbd);
if (err)
goto time_stop;
+
/* Pull the BMC config and initialize the RPC */
fbnic_bmc_rpc_init(fbd);
fbnic_rss_reinit(fbd, fbn);
+ phylink_resume(fbn->phylink);
+
return 0;
time_stop:
fbnic_time_stop(fbn);
release_ownership:
fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
+err_reset_queues:
+ fbnic_reset_netif_queues(fbn);
free_resources:
fbnic_free_resources(fbn);
free_napi_vectors:
@@ -74,6 +73,8 @@ static int fbnic_open(struct net_device *netdev)
struct fbnic_net *fbn = netdev_priv(netdev);
int err;
+ fbnic_napi_name_irqs(fbn->fbd);
+
err = __fbnic_open(fbn);
if (!err)
fbnic_up(fbn);
@@ -85,12 +86,15 @@ static int fbnic_stop(struct net_device *netdev)
{
struct fbnic_net *fbn = netdev_priv(netdev);
+ fbnic_mac_free_irq(fbn->fbd);
+ phylink_suspend(fbn->phylink, fbnic_bmc_present(fbn->fbd));
+
fbnic_down(fbn);
- fbnic_pcs_irq_disable(fbn->fbd);
fbnic_time_stop(fbn);
fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
+ fbnic_reset_netif_queues(fbn);
fbnic_free_resources(fbn);
fbnic_free_napi_vectors(fbn);
@@ -179,11 +183,10 @@ static int fbnic_mc_unsync(struct net_device *netdev, const unsigned char *addr)
return ret;
}
-void __fbnic_set_rx_mode(struct net_device *netdev)
+void __fbnic_set_rx_mode(struct fbnic_dev *fbd)
{
- struct fbnic_net *fbn = netdev_priv(netdev);
bool uc_promisc = false, mc_promisc = false;
- struct fbnic_dev *fbd = fbn->fbd;
+ struct net_device *netdev = fbd->netdev;
struct fbnic_mac_addr *mac_addr;
int err;
@@ -220,49 +223,8 @@ void __fbnic_set_rx_mode(struct net_device *netdev)
uc_promisc |= !!(netdev->flags & IFF_PROMISC);
mc_promisc |= !!(netdev->flags & IFF_ALLMULTI) || uc_promisc;
- /* Populate last TCAM entry with promiscuous entry and 0/1 bit mask */
- mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_PROMISC_IDX];
- if (uc_promisc) {
- if (!is_zero_ether_addr(mac_addr->value.addr8) ||
- mac_addr->state != FBNIC_TCAM_S_VALID) {
- eth_zero_addr(mac_addr->value.addr8);
- eth_broadcast_addr(mac_addr->mask.addr8);
- clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
- mac_addr->act_tcam);
- set_bit(FBNIC_MAC_ADDR_T_PROMISC,
- mac_addr->act_tcam);
- mac_addr->state = FBNIC_TCAM_S_ADD;
- }
- } else if (mc_promisc &&
- (!fbnic_bmc_present(fbd) || !fbd->fw_cap.all_multi)) {
- /* We have to add a special handler for multicast as the
- * BMC may have an all-multi rule already in place. As such
- * adding a rule ourselves won't do any good so we will have
- * to modify the rules for the ALL MULTI below if the BMC
- * already has the rule in place.
- */
- if (!is_multicast_ether_addr(mac_addr->value.addr8) ||
- mac_addr->state != FBNIC_TCAM_S_VALID) {
- eth_zero_addr(mac_addr->value.addr8);
- eth_broadcast_addr(mac_addr->mask.addr8);
- mac_addr->value.addr8[0] ^= 1;
- mac_addr->mask.addr8[0] ^= 1;
- set_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
- mac_addr->act_tcam);
- clear_bit(FBNIC_MAC_ADDR_T_PROMISC,
- mac_addr->act_tcam);
- mac_addr->state = FBNIC_TCAM_S_ADD;
- }
- } else if (mac_addr->state == FBNIC_TCAM_S_VALID) {
- if (test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam)) {
- clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
- mac_addr->act_tcam);
- clear_bit(FBNIC_MAC_ADDR_T_PROMISC,
- mac_addr->act_tcam);
- } else {
- mac_addr->state = FBNIC_TCAM_S_DELETE;
- }
- }
+ /* Update the promiscuous rules */
+ fbnic_promisc_sync(fbd, uc_promisc, mc_promisc);
/* Add rules for BMC all multicast if it is enabled */
fbnic_bmc_rpc_all_multi_config(fbd, mc_promisc);
@@ -273,13 +235,17 @@ void __fbnic_set_rx_mode(struct net_device *netdev)
/* Write updates to hardware */
fbnic_write_rules(fbd);
fbnic_write_macda(fbd);
+ fbnic_write_tce_tcam(fbd);
}
static void fbnic_set_rx_mode(struct net_device *netdev)
{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
/* No need to update the hardware if we are not running */
if (netif_running(netdev))
- __fbnic_set_rx_mode(netdev);
+ __fbnic_set_rx_mode(fbd);
}
static int fbnic_set_mac(struct net_device *netdev, void *p)
@@ -296,10 +262,9 @@ static int fbnic_set_mac(struct net_device *netdev, void *p)
return 0;
}
-void fbnic_clear_rx_mode(struct net_device *netdev)
+void fbnic_clear_rx_mode(struct fbnic_dev *fbd)
{
- struct fbnic_net *fbn = netdev_priv(netdev);
- struct fbnic_dev *fbd = fbn->fbd;
+ struct net_device *netdev = fbd->netdev;
int idx;
for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
@@ -405,18 +370,31 @@ static int fbnic_hwtstamp_set(struct net_device *netdev,
static void fbnic_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats64)
{
+ u64 rx_bytes, rx_packets, rx_dropped = 0, rx_errors = 0;
+ u64 rx_over = 0, rx_missed = 0, rx_length = 0;
u64 tx_bytes, tx_packets, tx_dropped = 0;
- u64 rx_bytes, rx_packets, rx_dropped = 0;
struct fbnic_net *fbn = netdev_priv(dev);
+ struct fbnic_dev *fbd = fbn->fbd;
struct fbnic_queue_stats *stats;
+
unsigned int start, i;
+ fbnic_get_hw_stats(fbd);
+
stats = &fbn->tx_stats;
tx_bytes = stats->bytes;
tx_packets = stats->packets;
tx_dropped = stats->dropped;
+ /* Record drops from Tx HW Datapath */
+ spin_lock(&fbd->hw_stats.lock);
+ tx_dropped += fbd->hw_stats.tmi.drop.frames.value +
+ fbd->hw_stats.tti.cm_drop.frames.value +
+ fbd->hw_stats.tti.frame_drop.frames.value +
+ fbd->hw_stats.tti.tbi_drop.frames.value;
+ spin_unlock(&fbd->hw_stats.lock);
+
stats64->tx_bytes = tx_bytes;
stats64->tx_packets = tx_packets;
stats64->tx_dropped = tx_dropped;
@@ -446,11 +424,37 @@ static void fbnic_get_stats64(struct net_device *dev,
rx_packets = stats->packets;
rx_dropped = stats->dropped;
+ spin_lock(&fbd->hw_stats.lock);
+ /* Record drops for the host FIFOs.
+ * 4: network to Host, 6: BMC to Host
+ * Exclude the BMC and MC FIFOs as those stats may contain drops
+ * due to unrelated items such as TCAM misses. They are still
+ * accessible through the ethtool stats.
+ */
+ i = FBNIC_RXB_FIFO_HOST;
+ rx_missed += fbd->hw_stats.rxb.fifo[i].drop.frames.value;
+ i = FBNIC_RXB_FIFO_BMC_TO_HOST;
+ rx_missed += fbd->hw_stats.rxb.fifo[i].drop.frames.value;
+
+ for (i = 0; i < fbd->max_num_queues; i++) {
+ /* Report packets dropped due to CQ/BDQ being full/empty */
+ rx_over += fbd->hw_stats.hw_q[i].rde_pkt_cq_drop.value;
+ rx_over += fbd->hw_stats.hw_q[i].rde_pkt_bdq_drop.value;
+
+ /* Report packets with errors */
+ rx_errors += fbd->hw_stats.hw_q[i].rde_pkt_err.value;
+ }
+ spin_unlock(&fbd->hw_stats.lock);
+
stats64->rx_bytes = rx_bytes;
stats64->rx_packets = rx_packets;
stats64->rx_dropped = rx_dropped;
+ stats64->rx_over_errors = rx_over;
+ stats64->rx_errors = rx_errors;
+ stats64->rx_missed_errors = rx_missed;
for (i = 0; i < fbn->num_rx_queues; i++) {
+ struct fbnic_ring *xdpr = fbn->tx[FBNIC_MAX_TXQS + i];
struct fbnic_ring *rxr = fbn->rx[i];
if (!rxr)
@@ -462,14 +466,66 @@ static void fbnic_get_stats64(struct net_device *dev,
rx_bytes = stats->bytes;
rx_packets = stats->packets;
rx_dropped = stats->dropped;
+ rx_length = stats->rx.length_errors;
} while (u64_stats_fetch_retry(&stats->syncp, start));
stats64->rx_bytes += rx_bytes;
stats64->rx_packets += rx_packets;
stats64->rx_dropped += rx_dropped;
+ stats64->rx_errors += rx_length;
+ stats64->rx_length_errors += rx_length;
+
+ if (!xdpr)
+ continue;
+
+ stats = &xdpr->stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ tx_bytes = stats->bytes;
+ tx_packets = stats->packets;
+ tx_dropped = stats->dropped;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ stats64->tx_bytes += tx_bytes;
+ stats64->tx_packets += tx_packets;
+ stats64->tx_dropped += tx_dropped;
}
}
+bool fbnic_check_split_frames(struct bpf_prog *prog, unsigned int mtu,
+ u32 hds_thresh)
+{
+ if (!prog)
+ return false;
+
+ if (prog->aux->xdp_has_frags)
+ return false;
+
+ return mtu + ETH_HLEN > hds_thresh;
+}
+
+static int fbnic_bpf(struct net_device *netdev, struct netdev_bpf *bpf)
+{
+ struct bpf_prog *prog = bpf->prog, *prev_prog;
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ if (bpf->command != XDP_SETUP_PROG)
+ return -EINVAL;
+
+ if (fbnic_check_split_frames(prog, netdev->mtu,
+ fbn->hds_thresh)) {
+ NL_SET_ERR_MSG_MOD(bpf->extack,
+ "MTU too high, or HDS threshold is too low for single buffer XDP");
+ return -EOPNOTSUPP;
+ }
+
+ prev_prog = xchg(&fbn->xdp_prog, prog);
+ if (prev_prog)
+ bpf_prog_put(prev_prog);
+
+ return 0;
+}
+
static const struct net_device_ops fbnic_netdev_ops = {
.ndo_open = fbnic_open,
.ndo_stop = fbnic_stop,
@@ -479,6 +535,7 @@ static const struct net_device_ops fbnic_netdev_ops = {
.ndo_set_mac_address = fbnic_set_mac,
.ndo_set_rx_mode = fbnic_set_rx_mode,
.ndo_get_stats64 = fbnic_get_stats64,
+ .ndo_bpf = fbnic_bpf,
.ndo_hwtstamp_get = fbnic_hwtstamp_get,
.ndo_hwtstamp_set = fbnic_hwtstamp_set,
};
@@ -486,24 +543,59 @@ static const struct net_device_ops fbnic_netdev_ops = {
static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
struct netdev_queue_stats_rx *rx)
{
+ u64 bytes, packets, alloc_fail, alloc_fail_bdq;
struct fbnic_net *fbn = netdev_priv(dev);
struct fbnic_ring *rxr = fbn->rx[idx];
+ struct fbnic_dev *fbd = fbn->fbd;
struct fbnic_queue_stats *stats;
+ u64 csum_complete, csum_none;
+ struct fbnic_q_triad *qt;
unsigned int start;
- u64 bytes, packets;
if (!rxr)
return;
+ /* fbn->rx points to completion queues */
+ qt = container_of(rxr, struct fbnic_q_triad, cmpl);
+
stats = &rxr->stats;
do {
start = u64_stats_fetch_begin(&stats->syncp);
bytes = stats->bytes;
packets = stats->packets;
+ alloc_fail = stats->rx.alloc_failed;
+ csum_complete = stats->rx.csum_complete;
+ csum_none = stats->rx.csum_none;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ stats = &qt->sub0.stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ alloc_fail_bdq = stats->bdq.alloc_failed;
} while (u64_stats_fetch_retry(&stats->syncp, start));
+ alloc_fail += alloc_fail_bdq;
+
+ stats = &qt->sub1.stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ alloc_fail_bdq = stats->bdq.alloc_failed;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+ alloc_fail += alloc_fail_bdq;
rx->bytes = bytes;
rx->packets = packets;
+ rx->alloc_fail = alloc_fail;
+ rx->csum_complete = csum_complete;
+ rx->csum_none = csum_none;
+
+ fbnic_get_hw_q_stats(fbd, fbd->hw_stats.hw_q);
+
+ spin_lock(&fbd->hw_stats.lock);
+ rx->hw_drop_overruns = fbd->hw_stats.hw_q[idx].rde_pkt_cq_drop.value +
+ fbd->hw_stats.hw_q[idx].rde_pkt_bdq_drop.value;
+ rx->hw_drops = fbd->hw_stats.hw_q[idx].rde_pkt_err.value +
+ rx->hw_drop_overruns;
+ spin_unlock(&fbd->hw_stats.lock);
}
static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
@@ -512,6 +604,8 @@ static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
struct fbnic_net *fbn = netdev_priv(dev);
struct fbnic_ring *txr = fbn->tx[idx];
struct fbnic_queue_stats *stats;
+ u64 stop, wake, csum, lso;
+ struct fbnic_ring *xdpr;
unsigned int start;
u64 bytes, packets;
@@ -523,10 +617,31 @@ static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
start = u64_stats_fetch_begin(&stats->syncp);
bytes = stats->bytes;
packets = stats->packets;
+ csum = stats->twq.csum_partial;
+ lso = stats->twq.lso;
+ stop = stats->twq.stop;
+ wake = stats->twq.wake;
} while (u64_stats_fetch_retry(&stats->syncp, start));
tx->bytes = bytes;
tx->packets = packets;
+ tx->needs_csum = csum + lso;
+ tx->hw_gso_wire_packets = lso;
+ tx->stop = stop;
+ tx->wake = wake;
+
+ xdpr = fbn->tx[FBNIC_MAX_TXQS + idx];
+ if (xdpr) {
+ stats = &xdpr->stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ bytes = stats->bytes;
+ packets = stats->packets;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ tx->bytes += bytes;
+ tx->packets += packets;
+ }
}
static void fbnic_get_base_stats(struct net_device *dev,
@@ -537,9 +652,17 @@ static void fbnic_get_base_stats(struct net_device *dev,
tx->bytes = fbn->tx_stats.bytes;
tx->packets = fbn->tx_stats.packets;
+ tx->needs_csum = fbn->tx_stats.twq.csum_partial + fbn->tx_stats.twq.lso;
+ tx->hw_gso_wire_packets = fbn->tx_stats.twq.lso;
+ tx->stop = fbn->tx_stats.twq.stop;
+ tx->wake = fbn->tx_stats.twq.wake;
rx->bytes = fbn->rx_stats.bytes;
rx->packets = fbn->rx_stats.packets;
+ rx->alloc_fail = fbn->rx_stats.rx.alloc_failed +
+ fbn->bdq_stats.bdq.alloc_failed;
+ rx->csum_complete = fbn->rx_stats.rx.csum_complete;
+ rx->csum_none = fbn->rx_stats.rx.csum_none;
}
static const struct netdev_stat_ops fbnic_stat_ops = {
@@ -574,10 +697,7 @@ void fbnic_reset_queues(struct fbnic_net *fbn,
**/
void fbnic_netdev_free(struct fbnic_dev *fbd)
{
- struct fbnic_net *fbn = netdev_priv(fbd->netdev);
-
- if (fbn->phylink)
- phylink_destroy(fbn->phylink);
+ fbnic_phylink_destroy(fbd->netdev);
free_netdev(fbd->netdev);
fbd->netdev = NULL;
@@ -590,7 +710,7 @@ void fbnic_netdev_free(struct fbnic_dev *fbd)
* Allocate and initialize the netdev and netdev private structure. Bind
* together the hardware, netdev, and pci data structures.
*
- * Return: 0 on success, negative on failure
+ * Return: Pointer to net_device on success, NULL on failure
**/
struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
{
@@ -607,6 +727,8 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
netdev->netdev_ops = &fbnic_netdev_ops;
netdev->stat_ops = &fbnic_stat_ops;
+ netdev->queue_mgmt_ops = &fbnic_queue_mgmt_ops;
+ netdev->netmem_tx = true;
fbnic_set_ethtool_ops(netdev);
@@ -614,13 +736,20 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
fbn->netdev = netdev;
fbn->fbd = fbd;
- INIT_LIST_HEAD(&fbn->napis);
fbn->txq_size = FBNIC_TXQ_SIZE_DEFAULT;
fbn->hpq_size = FBNIC_HPQ_SIZE_DEFAULT;
fbn->ppq_size = FBNIC_PPQ_SIZE_DEFAULT;
fbn->rcq_size = FBNIC_RCQ_SIZE_DEFAULT;
+ fbn->tx_usecs = FBNIC_TX_USECS_DEFAULT;
+ fbn->rx_usecs = FBNIC_RX_USECS_DEFAULT;
+ fbn->rx_max_frames = FBNIC_RX_FRAMES_DEFAULT;
+
+ /* Initialize the hds_thresh */
+ netdev->cfg->hds_thresh = FBNIC_HDS_THRESH_DEFAULT;
+ fbn->hds_thresh = FBNIC_HDS_THRESH_DEFAULT;
+
default_queues = netif_get_num_default_rss_queues();
if (default_queues > fbd->max_num_queues)
default_queues = fbd->max_num_queues;
@@ -631,15 +760,32 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
fbnic_rss_key_fill(fbn->rss_key);
fbnic_rss_init_en_mask(fbn);
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ netdev->gso_partial_features =
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
netdev->features |=
+ netdev->gso_partial_features |
+ FBNIC_TUN_GSO_FEATURES |
NETIF_F_RXHASH |
NETIF_F_SG |
NETIF_F_HW_CSUM |
- NETIF_F_RXCSUM;
+ NETIF_F_RXCSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_GSO_UDP_L4;
netdev->hw_features |= netdev->features;
netdev->vlan_features |= netdev->features;
netdev->hw_enc_features |= netdev->features;
+ netdev->features |= NETIF_F_NTUPLE;
netdev->min_mtu = IPV6_MIN_MTU;
netdev->max_mtu = FBNIC_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
@@ -649,13 +795,11 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
*/
netdev->ethtool->wol_enabled = true;
- fbn->fec = FBNIC_FEC_AUTO | FBNIC_FEC_RS;
- fbn->link_mode = FBNIC_LINK_AUTO | FBNIC_LINK_50R2;
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
- if (fbnic_phylink_init(netdev)) {
+ if (fbnic_phylink_create(netdev)) {
fbnic_netdev_free(fbd);
return NULL;
}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
index b8417b300778..9129a658f8fa 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
@@ -4,17 +4,27 @@
#ifndef _FBNIC_NETDEV_H_
#define _FBNIC_NETDEV_H_
-#include <linux/types.h>
#include <linux/phylink.h>
+#include <linux/types.h>
#include "fbnic_csr.h"
#include "fbnic_rpc.h"
#include "fbnic_txrx.h"
+#define FBNIC_MAX_NAPI_VECTORS 128u
+#define FBNIC_MIN_RXD_PER_FRAME 2
+
+/* Natively supported tunnel GSO features (not thru GSO_PARTIAL) */
+#define FBNIC_TUN_GSO_FEATURES NETIF_F_GSO_IPXIP6
+
struct fbnic_net {
- struct fbnic_ring *tx[FBNIC_MAX_TXQS];
+ struct bpf_prog *xdp_prog;
+
+ struct fbnic_ring *tx[FBNIC_MAX_TXQS + FBNIC_MAX_XDPQS];
struct fbnic_ring *rx[FBNIC_MAX_RXQS];
+ struct fbnic_napi_vector *napi[FBNIC_MAX_NAPI_VECTORS];
+
struct net_device *netdev;
struct fbnic_dev *fbd;
@@ -23,15 +33,21 @@ struct fbnic_net {
u32 ppq_size;
u32 rcq_size;
+ u32 hds_thresh;
+
+ u16 rx_usecs;
+ u16 tx_usecs;
+
+ u32 rx_max_frames;
+
u16 num_napi;
struct phylink *phylink;
struct phylink_config phylink_config;
- struct phylink_pcs phylink_pcs;
+ struct phylink_pcs *pcs;
- /* TBD: Remove these when phylink supports FEC and lane config */
+ u8 aui;
u8 fec;
- u8 link_mode;
/* Cached top bits of the HW time counter for 40b -> 64b conversion */
u32 time_high;
@@ -52,17 +68,19 @@ struct fbnic_net {
/* Storage for stats after ring destruction */
struct fbnic_queue_stats tx_stats;
struct fbnic_queue_stats rx_stats;
+ struct fbnic_queue_stats bdq_stats;
u64 link_down_events;
- /* Time stampinn filter config */
+ /* Time stamping filter config */
struct kernel_hwtstamp_config hwtstamp_config;
- struct list_head napis;
+ bool tx_pause;
};
int __fbnic_open(struct fbnic_net *fbn);
void fbnic_up(struct fbnic_net *fbn);
void fbnic_down(struct fbnic_net *fbn);
+void fbnic_down_noidle(struct fbnic_net *fbn);
struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd);
void fbnic_netdev_free(struct fbnic_dev *fbd);
@@ -70,6 +88,7 @@ int fbnic_netdev_register(struct net_device *netdev);
void fbnic_netdev_unregister(struct net_device *netdev);
void fbnic_reset_queues(struct fbnic_net *fbn,
unsigned int tx, unsigned int rx);
+
void fbnic_set_ethtool_ops(struct net_device *dev);
int fbnic_ptp_setup(struct fbnic_dev *fbd);
@@ -78,8 +97,21 @@ void fbnic_time_init(struct fbnic_net *fbn);
int fbnic_time_start(struct fbnic_net *fbn);
void fbnic_time_stop(struct fbnic_net *fbn);
-void __fbnic_set_rx_mode(struct net_device *netdev);
-void fbnic_clear_rx_mode(struct net_device *netdev);
-
+void __fbnic_set_rx_mode(struct fbnic_dev *fbd);
+void fbnic_clear_rx_mode(struct fbnic_dev *fbd);
+
+void fbnic_phylink_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause);
+int fbnic_phylink_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause);
+int fbnic_phylink_ethtool_ksettings_get(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd);
+int fbnic_phylink_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam);
+int fbnic_phylink_create(struct net_device *netdev);
+void fbnic_phylink_destroy(struct net_device *netdev);
int fbnic_phylink_init(struct net_device *netdev);
+void fbnic_phylink_pmd_training_complete_notify(struct net_device *netdev);
+bool fbnic_check_split_frames(struct bpf_prog *prog,
+ unsigned int mtu, u32 hds_threshold);
#endif /* _FBNIC_NETDEV_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
index 2de5a6fde7e8..9240673c7533 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
@@ -6,9 +6,11 @@
#include <linux/pci.h>
#include <linux/rtnetlink.h>
#include <linux/types.h>
+#include <net/devlink.h>
#include "fbnic.h"
#include "fbnic_drvinfo.h"
+#include "fbnic_hw_stats.h"
#include "fbnic_netdev.h"
char fbnic_driver_name[] = DRV_NAME;
@@ -116,14 +118,12 @@ static void fbnic_service_task_start(struct fbnic_net *fbn)
struct fbnic_dev *fbd = fbn->fbd;
schedule_delayed_work(&fbd->service_task, HZ);
- phylink_resume(fbn->phylink);
}
static void fbnic_service_task_stop(struct fbnic_net *fbn)
{
struct fbnic_dev *fbd = fbn->fbd;
- phylink_suspend(fbn->phylink, fbnic_bmc_present(fbd));
cancel_delayed_work(&fbd->service_task);
}
@@ -135,7 +135,7 @@ void fbnic_up(struct fbnic_net *fbn)
fbnic_rss_reinit_hw(fbn->fbd, fbn);
- __fbnic_set_rx_mode(fbn->netdev);
+ __fbnic_set_rx_mode(fbn->fbd);
/* Enable Tx/Rx processing */
fbnic_napi_enable(fbn);
@@ -144,7 +144,7 @@ void fbnic_up(struct fbnic_net *fbn)
fbnic_service_task_start(fbn);
}
-static void fbnic_down_noidle(struct fbnic_net *fbn)
+void fbnic_down_noidle(struct fbnic_net *fbn)
{
fbnic_service_task_stop(fbn);
@@ -152,7 +152,7 @@ static void fbnic_down_noidle(struct fbnic_net *fbn)
fbnic_napi_disable(fbn);
netif_tx_disable(fbn->netdev);
- fbnic_clear_rx_mode(fbn->netdev);
+ fbnic_clear_rx_mode(fbn->fbd);
fbnic_clear_rules(fbn->fbd);
fbnic_rss_disable_hw(fbn->fbd);
fbnic_disable(fbn);
@@ -167,11 +167,25 @@ void fbnic_down(struct fbnic_net *fbn)
fbnic_flush(fbn);
}
+static int fbnic_fw_config_after_crash(struct fbnic_dev *fbd)
+{
+ if (fbnic_fw_xmit_ownership_msg(fbd, true)) {
+ dev_err(fbd->dev, "NIC failed to take ownership\n");
+
+ return -1;
+ }
+
+ fbnic_rpc_reset_valid_entries(fbd);
+ __fbnic_set_rx_mode(fbd);
+
+ return 0;
+}
+
static void fbnic_health_check(struct fbnic_dev *fbd)
{
struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
- /* As long as the heart is beating the FW is healty */
+ /* As long as the heart is beating the FW is healthy */
if (fbd->fw_heartbeat_enabled)
return;
@@ -182,30 +196,39 @@ static void fbnic_health_check(struct fbnic_dev *fbd)
if (tx_mbx->head != tx_mbx->tail)
return;
- /* TBD: Need to add a more thorough recovery here.
- * Specifically I need to verify what all the firmware will have
- * changed since we had setup and it rebooted. May just need to
- * perform a down/up. For now we will just reclaim ownership so
- * the heartbeat can catch the next fault.
- */
- fbnic_fw_xmit_ownership_msg(fbd, true);
+ fbnic_devlink_fw_report(fbd, "Firmware crash detected!");
+ fbnic_devlink_otp_check(fbd, "error detected after firmware recovery");
+
+ if (fbnic_fw_config_after_crash(fbd))
+ dev_err(fbd->dev, "Firmware recovery failed after crash\n");
}
static void fbnic_service_task(struct work_struct *work)
{
struct fbnic_dev *fbd = container_of(to_delayed_work(work),
struct fbnic_dev, service_task);
+ struct net_device *netdev = fbd->netdev;
+
+ if (netif_running(netdev))
+ fbnic_phylink_pmd_training_complete_notify(netdev);
rtnl_lock();
+ fbnic_get_hw_stats32(fbd);
+
fbnic_fw_check_heartbeat(fbd);
fbnic_health_check(fbd);
- if (netif_carrier_ok(fbd->netdev))
+ fbnic_bmc_rpc_check(fbd);
+
+ if (netif_carrier_ok(fbd->netdev)) {
+ netdev_lock(fbd->netdev);
fbnic_napi_depletion_check(fbd->netdev);
+ netdev_unlock(fbd->netdev);
+ }
- if (netif_running(fbd->netdev))
+ if (netif_running(netdev))
schedule_delayed_work(&fbd->service_task, HZ);
rtnl_unlock();
@@ -262,6 +285,10 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
}
+ err = fbnic_devlink_health_create(fbd);
+ if (err)
+ goto free_fbd;
+
/* Populate driver with hardware-specific info and handlers */
fbd->max_num_queues = info->max_num_queues;
@@ -272,7 +299,7 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = fbnic_alloc_irqs(fbd);
if (err)
- goto free_fbd;
+ goto err_destroy_health;
err = fbnic_mac_init(fbd);
if (err) {
@@ -280,14 +307,30 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto free_irqs;
}
- err = fbnic_fw_enable_mbx(fbd);
+ err = fbnic_fw_request_mbx(fbd);
if (err) {
dev_err(&pdev->dev,
"Firmware mailbox initialization failure\n");
goto free_irqs;
}
+ /* Send the request to enable the FW logging to host. Note if this
+ * fails we ignore the error and just display a message as it is
+ * possible the FW is just too old to support the logging and needs
+ * to be updated.
+ */
+ err = fbnic_fw_log_init(fbd);
+ if (err)
+ dev_warn(fbd->dev,
+ "Unable to initialize firmware log buffer: %d\n",
+ err);
+
fbnic_devlink_register(fbd);
+ fbnic_devlink_otp_check(fbd, "error detected during probe");
+ fbnic_dbg_fbd_init(fbd);
+
+ /* Capture snapshot of hardware stats so netdev can calculate delta */
+ fbnic_init_hw_stats(fbd);
fbnic_hwmon_register(fbd);
@@ -296,6 +339,9 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_failure_mode;
}
+ if (fbnic_mdiobus_create(fbd))
+ goto init_failure_mode;
+
netdev = fbnic_netdev_alloc(fbd);
if (!netdev) {
dev_err(&pdev->dev, "Netdev allocation failed\n");
@@ -326,8 +372,9 @@ init_failure_mode:
return 0;
free_irqs:
fbnic_free_irqs(fbd);
+err_destroy_health:
+ fbnic_devlink_health_destroy(fbd);
free_fbd:
- pci_disable_device(pdev);
fbnic_devlink_free(fbd);
return err;
@@ -338,7 +385,7 @@ free_fbd:
* @pdev: PCI device information struct
*
* Called by the PCI subsystem to alert the driver that it should release
- * a PCI device. The could be caused by a Hot-Plug event, or because the
+ * a PCI device. This could be caused by a Hot-Plug event, or because the
* driver is going to be removed from memory.
**/
static void fbnic_remove(struct pci_dev *pdev)
@@ -355,11 +402,13 @@ static void fbnic_remove(struct pci_dev *pdev)
}
fbnic_hwmon_unregister(fbd);
+ fbnic_dbg_fbd_exit(fbd);
fbnic_devlink_unregister(fbd);
- fbnic_fw_disable_mbx(fbd);
+ fbnic_fw_log_free(fbd);
+ fbnic_fw_free_mbx(fbd);
fbnic_free_irqs(fbd);
- pci_disable_device(pdev);
+ fbnic_devlink_health_destroy(fbd);
fbnic_devlink_free(fbd);
}
@@ -372,16 +421,24 @@ static int fbnic_pm_suspend(struct device *dev)
goto null_uc_addr;
rtnl_lock();
+ netdev_lock(netdev);
netif_device_detach(netdev);
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
+ netdev_unlock(netdev);
rtnl_unlock();
null_uc_addr:
- fbnic_fw_disable_mbx(fbd);
+ fbnic_fw_log_disable(fbd);
+
+ devl_lock(priv_to_devlink(fbd));
+
+ fbnic_fw_free_mbx(fbd);
+
+ devl_unlock(priv_to_devlink(fbd));
/* Free the IRQs so they aren't trying to occupy sleeping CPUs */
fbnic_free_irqs(fbd);
@@ -413,11 +470,22 @@ static int __fbnic_pm_resume(struct device *dev)
fbd->mac->init_regs(fbd);
+ devl_lock(priv_to_devlink(fbd));
+
/* Re-enable mailbox */
- err = fbnic_fw_enable_mbx(fbd);
+ err = fbnic_fw_request_mbx(fbd);
+ devl_unlock(priv_to_devlink(fbd));
if (err)
goto err_free_irqs;
+ /* Only send log history if log buffer is empty to prevent duplicate
+ * log entries.
+ */
+ fbnic_fw_log_enable(fbd, list_empty(&fbd->fw_log.entries));
+
+ /* Since the FW should be up, check if it reported OTP errors */
+ fbnic_devlink_otp_check(fbd, "error detected after PM resume");
+
/* No netdev means there isn't a network interface to bring up */
if (fbnic_init_failure(fbd))
return 0;
@@ -428,19 +496,23 @@ static int __fbnic_pm_resume(struct device *dev)
fbnic_reset_queues(fbn, fbn->num_tx_queues, fbn->num_rx_queues);
rtnl_lock();
+ netdev_lock(netdev);
- if (netif_running(netdev)) {
+ if (netif_running(netdev))
err = __fbnic_open(fbn);
- if (err)
- goto err_disable_mbx;
- }
+ netdev_unlock(netdev);
rtnl_unlock();
+ if (err)
+ goto err_free_mbx;
return 0;
-err_disable_mbx:
- rtnl_unlock();
- fbnic_fw_disable_mbx(fbd);
+err_free_mbx:
+ fbnic_fw_log_disable(fbd);
+
+ devl_lock(priv_to_devlink(fbd));
+ fbnic_fw_free_mbx(fbd);
+ devl_unlock(priv_to_devlink(fbd));
err_free_irqs:
fbnic_free_irqs(fbd);
err_invalidate_uc_addr:
@@ -455,6 +527,10 @@ static void __fbnic_pm_attach(struct device *dev)
struct net_device *netdev = fbd->netdev;
struct fbnic_net *fbn;
+ rtnl_lock();
+ fbnic_reset_hw_stats(fbd);
+ rtnl_unlock();
+
if (fbnic_init_failure(fbd))
return;
@@ -505,7 +581,6 @@ static pci_ers_result_t fbnic_err_slot_reset(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- pci_save_state(pdev);
if (pci_enable_device_mem(pdev)) {
dev_err(&pdev->dev,
@@ -552,9 +627,13 @@ static int __init fbnic_init_module(void)
{
int err;
+ fbnic_dbg_init();
+
err = pci_register_driver(&fbnic_driver);
- if (err)
+ if (err) {
+ fbnic_dbg_exit();
goto out;
+ }
pr_info(DRV_SUMMARY " (%s)", fbnic_driver.name);
out:
@@ -570,5 +649,7 @@ module_init(fbnic_init_module);
static void __exit fbnic_exit_module(void)
{
pci_unregister_driver(&fbnic_driver);
+
+ fbnic_dbg_exit();
}
module_exit(fbnic_exit_module);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
index 1a5e1e719b30..09c5225111be 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+#include <linux/pcs/pcs-xpcs.h>
#include <linux/phy.h>
#include <linux/phylink.h>
@@ -8,77 +9,99 @@
#include "fbnic_mac.h"
#include "fbnic_netdev.h"
-static struct fbnic_net *
-fbnic_pcs_to_net(struct phylink_pcs *pcs)
+static phy_interface_t fbnic_phylink_select_interface(u8 aui)
{
- return container_of(pcs, struct fbnic_net, phylink_pcs);
+ switch (aui) {
+ case FBNIC_AUI_100GAUI2:
+ return PHY_INTERFACE_MODE_100GBASEP;
+ case FBNIC_AUI_50GAUI1:
+ return PHY_INTERFACE_MODE_50GBASER;
+ case FBNIC_AUI_LAUI2:
+ return PHY_INTERFACE_MODE_LAUI;
+ case FBNIC_AUI_25GAUI:
+ return PHY_INTERFACE_MODE_25GBASER;
+ }
+
+ return PHY_INTERFACE_MODE_NA;
}
-static void
-fbnic_phylink_pcs_get_state(struct phylink_pcs *pcs,
- struct phylink_link_state *state)
+void fbnic_phylink_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
{
- struct fbnic_net *fbn = fbnic_pcs_to_net(pcs);
- struct fbnic_dev *fbd = fbn->fbd;
-
- /* For now we use hard-coded defaults and FW config to determine
- * the current values. In future patches we will add support for
- * reconfiguring these values and changing link settings.
- */
- switch (fbd->fw_cap.link_speed) {
- case FBNIC_FW_LINK_SPEED_25R1:
- state->speed = SPEED_25000;
- break;
- case FBNIC_FW_LINK_SPEED_50R2:
- state->speed = SPEED_50000;
- break;
- case FBNIC_FW_LINK_SPEED_100R2:
- state->speed = SPEED_100000;
- break;
- default:
- state->speed = SPEED_UNKNOWN;
- break;
- }
-
- state->duplex = DUPLEX_FULL;
+ struct fbnic_net *fbn = netdev_priv(netdev);
- state->link = fbd->mac->pcs_get_link(fbd);
+ phylink_ethtool_get_pauseparam(fbn->phylink, pause);
}
-static int
-fbnic_phylink_pcs_enable(struct phylink_pcs *pcs)
+int fbnic_phylink_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
{
- struct fbnic_net *fbn = fbnic_pcs_to_net(pcs);
- struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_net *fbn = netdev_priv(netdev);
- return fbd->mac->pcs_enable(fbd);
+ return phylink_ethtool_set_pauseparam(fbn->phylink, pause);
}
static void
-fbnic_phylink_pcs_disable(struct phylink_pcs *pcs)
+fbnic_phylink_get_supported_fec_modes(unsigned long *supported)
{
- struct fbnic_net *fbn = fbnic_pcs_to_net(pcs);
- struct fbnic_dev *fbd = fbn->fbd;
+ /* The NIC can support up to 8 possible combinations.
+ * Either 50G-CR, or 100G-CR2
+ * This is with RS FEC mode only
+ * Either 25G-CR, or 50G-CR2
+ * This is with No FEC, RS, or Base-R
+ */
+ if (phylink_test(supported, 100000baseCR2_Full) ||
+ phylink_test(supported, 50000baseCR_Full))
+ phylink_set(supported, FEC_RS);
+ if (phylink_test(supported, 50000baseCR2_Full) ||
+ phylink_test(supported, 25000baseCR_Full)) {
+ phylink_set(supported, FEC_BASER);
+ phylink_set(supported, FEC_NONE);
+ phylink_set(supported, FEC_RS);
+ }
+}
+
+int fbnic_phylink_ethtool_ksettings_get(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ int err;
+
+ err = phylink_ethtool_ksettings_get(fbn->phylink, cmd);
+ if (!err) {
+ unsigned long *supp = cmd->link_modes.supported;
- return fbd->mac->pcs_disable(fbd);
+ cmd->base.port = PORT_DA;
+ cmd->lanes = (fbn->aui & FBNIC_AUI_MODE_R2) ? 2 : 1;
+
+ fbnic_phylink_get_supported_fec_modes(supp);
+ }
+
+ return err;
}
-static int
-fbnic_phylink_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
- phy_interface_t interface,
- const unsigned long *advertising,
- bool permit_pause_to_mac)
+int fbnic_phylink_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ if (fbn->fec & FBNIC_FEC_RS) {
+ fecparam->active_fec = ETHTOOL_FEC_RS;
+ fecparam->fec = ETHTOOL_FEC_RS;
+ } else if (fbn->fec & FBNIC_FEC_BASER) {
+ fecparam->active_fec = ETHTOOL_FEC_BASER;
+ fecparam->fec = ETHTOOL_FEC_BASER;
+ } else {
+ fecparam->active_fec = ETHTOOL_FEC_OFF;
+ fecparam->fec = ETHTOOL_FEC_OFF;
+ }
+
+ if (fbn->aui & FBNIC_AUI_MODE_PAM4)
+ fecparam->fec |= ETHTOOL_FEC_AUTO;
+
return 0;
}
-static const struct phylink_pcs_ops fbnic_phylink_pcs_ops = {
- .pcs_config = fbnic_phylink_pcs_config,
- .pcs_enable = fbnic_phylink_pcs_enable,
- .pcs_disable = fbnic_phylink_pcs_disable,
- .pcs_get_state = fbnic_phylink_pcs_get_state,
-};
-
static struct phylink_pcs *
fbnic_phylink_mac_select_pcs(struct phylink_config *config,
phy_interface_t interface)
@@ -86,7 +109,20 @@ fbnic_phylink_mac_select_pcs(struct phylink_config *config,
struct net_device *netdev = to_net_dev(config->dev);
struct fbnic_net *fbn = netdev_priv(netdev);
- return &fbn->phylink_pcs;
+ return fbn->pcs;
+}
+
+static int
+fbnic_phylink_mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ fbd->mac->prepare(fbd, fbn->aui, fbn->fec);
+
+ return 0;
}
static void
@@ -95,6 +131,20 @@ fbnic_phylink_mac_config(struct phylink_config *config, unsigned int mode,
{
}
+static int
+fbnic_phylink_mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ /* Retest the link state and restart interrupts */
+ fbd->mac->get_link(fbd, fbn->aui, fbn->fec);
+
+ return 0;
+}
+
static void
fbnic_phylink_mac_link_down(struct phylink_config *config, unsigned int mode,
phy_interface_t interface)
@@ -118,44 +168,144 @@ fbnic_phylink_mac_link_up(struct phylink_config *config,
struct fbnic_net *fbn = netdev_priv(netdev);
struct fbnic_dev *fbd = fbn->fbd;
+ fbn->tx_pause = tx_pause;
+ fbnic_config_drop_mode(fbn, tx_pause);
+
fbd->mac->link_up(fbd, tx_pause, rx_pause);
}
static const struct phylink_mac_ops fbnic_phylink_mac_ops = {
.mac_select_pcs = fbnic_phylink_mac_select_pcs,
+ .mac_prepare = fbnic_phylink_mac_prepare,
.mac_config = fbnic_phylink_mac_config,
+ .mac_finish = fbnic_phylink_mac_finish,
.mac_link_down = fbnic_phylink_mac_link_down,
.mac_link_up = fbnic_phylink_mac_link_up,
};
-int fbnic_phylink_init(struct net_device *netdev)
+/**
+ * fbnic_phylink_create - Phylink device creation
+ * @netdev: Network Device struct to attach phylink device
+ *
+ * Initialize and attach a phylink instance to the device. The phylink
+ * device will make use of the netdev struct to track carrier and will
+ * eventually be used to expose the current state of the MAC and PCS
+ * setup.
+ *
+ * Return: 0 on success, negative on failure
+ **/
+int fbnic_phylink_create(struct net_device *netdev)
{
struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct phylink_pcs *pcs;
struct phylink *phylink;
+ int err;
+
+ pcs = xpcs_create_pcs_mdiodev(fbd->mdio_bus, 0);
+ if (IS_ERR(pcs)) {
+ err = PTR_ERR(pcs);
+ dev_err(fbd->dev, "Failed to create PCS device: %d\n", err);
+ return err;
+ }
- fbn->phylink_pcs.neg_mode = true;
- fbn->phylink_pcs.ops = &fbnic_phylink_pcs_ops;
+ fbn->pcs = pcs;
fbn->phylink_config.dev = &netdev->dev;
fbn->phylink_config.type = PHYLINK_NETDEV;
fbn->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
- MAC_10000FD | MAC_25000FD |
- MAC_40000FD | MAC_50000FD |
+ MAC_25000FD | MAC_50000FD |
MAC_100000FD;
fbn->phylink_config.default_an_inband = true;
- __set_bit(PHY_INTERFACE_MODE_XGMII,
+ __set_bit(PHY_INTERFACE_MODE_100GBASEP,
fbn->phylink_config.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_XLGMII,
+ __set_bit(PHY_INTERFACE_MODE_50GBASER,
fbn->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_LAUI,
+ fbn->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_25GBASER,
+ fbn->phylink_config.supported_interfaces);
+
+ fbnic_mac_get_fw_settings(fbd, &fbn->aui, &fbn->fec);
phylink = phylink_create(&fbn->phylink_config, NULL,
- PHY_INTERFACE_MODE_XLGMII,
+ fbnic_phylink_select_interface(fbn->aui),
&fbnic_phylink_mac_ops);
- if (IS_ERR(phylink))
- return PTR_ERR(phylink);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ dev_err(netdev->dev.parent,
+ "Failed to create Phylink interface, err: %d\n", err);
+ xpcs_destroy_pcs(pcs);
+ return err;
+ }
fbn->phylink = phylink;
return 0;
}
+
+/**
+ * fbnic_phylink_destroy - Teardown phylink related interfaces
+ * @netdev: Network Device struct containing phylink device
+ *
+ * Detach and free resources related to phylink interface.
+ **/
+void fbnic_phylink_destroy(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ if (fbn->phylink)
+ phylink_destroy(fbn->phylink);
+ if (fbn->pcs)
+ xpcs_destroy_pcs(fbn->pcs);
+}
+
+/**
+ * fbnic_phylink_pmd_training_complete_notify - PMD training complete notifier
+ * @netdev: Netdev struct phylink device attached to
+ *
+ * When the link first comes up the PMD will have a period of 2 to 3 seconds
+ * where the link will flutter due to link training. To avoid spamming the
+ * kernel log with messages about this we add a delay of 4 seconds from the
+ * time of the last PCS report of link so that we can guarantee we are unlikely
+ * to see any further link loss events due to link training.
+ **/
+void fbnic_phylink_pmd_training_complete_notify(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ if (fbd->pmd_state != FBNIC_PMD_TRAINING)
+ return;
+
+ /* Prevent reading end_of_pmd_training until we verified state */
+ smp_rmb();
+
+ if (!time_before(READ_ONCE(fbd->end_of_pmd_training), jiffies))
+ return;
+
+ /* At this point we have verified that the link has been up for
+ * the full training duration. As a first step we will try
+ * transitioning to link ready.
+ */
+ if (cmpxchg(&fbd->pmd_state, FBNIC_PMD_TRAINING,
+ FBNIC_PMD_LINK_READY) != FBNIC_PMD_TRAINING)
+ return;
+
+ /* Perform a follow-up check to verify that the link didn't flap
+ * just before our transition by rechecking the training timer.
+ */
+ if (!time_before(READ_ONCE(fbd->end_of_pmd_training), jiffies))
+ return;
+
+ /* The training timeout has been completed. We are good to swap out
+ * link_ready for send_data assuming no other events have occurred
+ * that would have pulled us back into initialization or training.
+ */
+ if (cmpxchg(&fbd->pmd_state, FBNIC_PMD_LINK_READY,
+ FBNIC_PMD_SEND_DATA) != FBNIC_PMD_LINK_READY)
+ return;
+
+ phylink_pcs_change(fbn->pcs, false);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
index 337b8b3aef2f..7f31e890031c 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
@@ -3,8 +3,10 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <net/ipv6.h>
#include "fbnic.h"
+#include "fbnic_fw.h"
#include "fbnic_netdev.h"
#include "fbnic_rpc.h"
@@ -13,10 +15,11 @@ void fbnic_reset_indir_tbl(struct fbnic_net *fbn)
unsigned int num_rx = fbn->num_rx_queues;
unsigned int i;
- for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) {
+ if (netif_is_rxfh_configured(fbn->netdev))
+ return;
+
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
fbn->indir_tbl[0][i] = ethtool_rxfh_indir_default(i, num_rx);
- fbn->indir_tbl[1][i] = ethtool_rxfh_indir_default(i, num_rx);
- }
}
void fbnic_rss_key_fill(u32 *buffer)
@@ -59,7 +62,7 @@ void fbnic_rss_disable_hw(struct fbnic_dev *fbd)
#define FBNIC_FH_2_RSSEM_BIT(_fh, _rssem, _val) \
FIELD_PREP(FBNIC_RPC_ACT_TBL1_RSS_ENA_##_rssem, \
FIELD_GET(RXH_##_fh, _val))
-static u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type)
+u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type)
{
u32 flow_hash = fbn->rss_flow_hash[flow_type];
u32 rss_en_mask = 0;
@@ -69,6 +72,8 @@ static u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type)
rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP_DST, IP_DST, flow_hash);
rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L4_B_0_1, L4_SRC, flow_hash);
rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L4_B_2_3, L4_DST, flow_hash);
+ rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP6_FL, OV6_FL_LBL, flow_hash);
+ rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP6_FL, IV6_FL_LBL, flow_hash);
return rss_en_mask;
}
@@ -127,12 +132,9 @@ void fbnic_bmc_rpc_all_multi_config(struct fbnic_dev *fbd,
else
clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
mac_addr->act_tcam);
- } else if (!test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam) &&
- !is_zero_ether_addr(mac_addr->mask.addr8) &&
- mac_addr->state == FBNIC_TCAM_S_VALID) {
- clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI, mac_addr->act_tcam);
- clear_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam);
- mac_addr->state = FBNIC_TCAM_S_DELETE;
+ } else {
+ __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_BMC);
+ __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_ALLMULTI);
}
/* We have to add a special handler for multicast as the
@@ -234,8 +236,25 @@ void fbnic_bmc_rpc_init(struct fbnic_dev *fbd)
act_tcam->mask.tcam[j] = 0xffff;
act_tcam->state = FBNIC_TCAM_S_UPDATE;
+}
+
+void fbnic_bmc_rpc_check(struct fbnic_dev *fbd)
+{
+ int err;
+
+ if (fbd->fw_cap.need_bmc_tcam_reinit) {
+ fbnic_bmc_rpc_init(fbd);
+ __fbnic_set_rx_mode(fbd);
+ fbd->fw_cap.need_bmc_tcam_reinit = false;
+ }
- fbnic_bmc_rpc_all_multi_config(fbd, false);
+ if (fbd->fw_cap.need_bmc_macda_sync) {
+ err = fbnic_fw_xmit_rpc_macda_sync(fbd);
+ if (err)
+ dev_warn(fbd->dev,
+ "Writing MACDA table to FW failed, err: %d\n", err);
+ fbd->fw_cap.need_bmc_macda_sync = false;
+ }
}
#define FBNIC_ACT1_INIT(_l4, _udp, _ip, _v6) \
@@ -450,6 +469,50 @@ int __fbnic_xc_unsync(struct fbnic_mac_addr *mac_addr, unsigned int tcam_idx)
return 0;
}
+void fbnic_promisc_sync(struct fbnic_dev *fbd,
+ bool uc_promisc, bool mc_promisc)
+{
+ struct fbnic_mac_addr *mac_addr;
+
+ /* Populate last TCAM entry with promiscuous entry and 0/1 bit mask */
+ mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_PROMISC_IDX];
+ if (uc_promisc) {
+ if (!is_zero_ether_addr(mac_addr->value.addr8) ||
+ mac_addr->state != FBNIC_TCAM_S_VALID) {
+ eth_zero_addr(mac_addr->value.addr8);
+ eth_broadcast_addr(mac_addr->mask.addr8);
+ clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
+ mac_addr->act_tcam);
+ set_bit(FBNIC_MAC_ADDR_T_PROMISC,
+ mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+ }
+ } else if (mc_promisc &&
+ (!fbnic_bmc_present(fbd) || !fbd->fw_cap.all_multi)) {
+ /* We have to add a special handler for multicast as the
+ * BMC may have an all-multi rule already in place. As such
+ * adding a rule ourselves won't do any good so we will have
+ * to modify the rules for the ALL MULTI below if the BMC
+ * already has the rule in place.
+ */
+ if (!is_multicast_ether_addr(mac_addr->value.addr8) ||
+ mac_addr->state != FBNIC_TCAM_S_VALID) {
+ eth_zero_addr(mac_addr->value.addr8);
+ eth_broadcast_addr(mac_addr->mask.addr8);
+ mac_addr->value.addr8[0] ^= 1;
+ mac_addr->mask.addr8[0] ^= 1;
+ set_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
+ mac_addr->act_tcam);
+ clear_bit(FBNIC_MAC_ADDR_T_PROMISC,
+ mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+ }
+ } else if (mac_addr->state == FBNIC_TCAM_S_VALID) {
+ __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_ALLMULTI);
+ __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_PROMISC);
+ }
+}
+
void fbnic_sift_macda(struct fbnic_dev *fbd)
{
int dest, src;
@@ -533,6 +596,21 @@ static void fbnic_clear_macda(struct fbnic_dev *fbd)
}
}
+static void fbnic_clear_valid_macda(struct fbnic_dev *fbd)
+{
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx];
+
+ if (mac_addr->state == FBNIC_TCAM_S_VALID) {
+ fbnic_clear_macda_entry(fbd, idx);
+
+ mac_addr->state = FBNIC_TCAM_S_UPDATE;
+ }
+ }
+}
+
static void fbnic_write_macda_entry(struct fbnic_dev *fbd, unsigned int idx,
struct fbnic_mac_addr *mac_addr)
{
@@ -554,7 +632,7 @@ static void fbnic_write_macda_entry(struct fbnic_dev *fbd, unsigned int idx,
void fbnic_write_macda(struct fbnic_dev *fbd)
{
- int idx;
+ int idx, updates = 0;
for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx];
@@ -563,6 +641,9 @@ void fbnic_write_macda(struct fbnic_dev *fbd)
if (!(mac_addr->state & FBNIC_TCAM_S_UPDATE))
continue;
+ /* Record update count */
+ updates++;
+
/* Clear by writing 0s. */
if (mac_addr->state == FBNIC_TCAM_S_DELETE) {
/* Invalidate entry and clear addr state info */
@@ -576,6 +657,14 @@ void fbnic_write_macda(struct fbnic_dev *fbd)
mac_addr->state = FBNIC_TCAM_S_VALID;
}
+
+ /* If reinitializing the BMC TCAM we are doing an initial update */
+ if (fbd->fw_cap.need_bmc_tcam_reinit)
+ updates++;
+
+ /* If needed notify firmware of changes to MACDA TCAM */
+ if (updates != 0 && fbnic_bmc_present(fbd))
+ fbd->fw_cap.need_bmc_macda_sync = true;
}
static void fbnic_clear_act_tcam(struct fbnic_dev *fbd, unsigned int idx)
@@ -587,13 +676,488 @@ static void fbnic_clear_act_tcam(struct fbnic_dev *fbd, unsigned int idx)
wr32(fbd, FBNIC_RPC_TCAM_ACT(idx, i), 0);
}
-void fbnic_clear_rules(struct fbnic_dev *fbd)
+static void fbnic_clear_tce_tcam_entry(struct fbnic_dev *fbd, unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_TCE_TCAM_WORD_LEN; i++)
+ wr32(fbd, FBNIC_TCE_RAM_TCAM(idx, i), 0);
+}
+
+static void fbnic_write_tce_tcam_dest(struct fbnic_dev *fbd, unsigned int idx,
+ struct fbnic_mac_addr *mac_addr)
+{
+ u32 dest = FBNIC_TCE_TCAM_DEST_BMC;
+ u32 idx2dest_map;
+
+ if (is_multicast_ether_addr(mac_addr->value.addr8))
+ dest |= FBNIC_TCE_TCAM_DEST_MAC;
+
+ idx2dest_map = rd32(fbd, FBNIC_TCE_TCAM_IDX2DEST_MAP);
+ idx2dest_map &= ~(FBNIC_TCE_TCAM_IDX2DEST_MAP_DEST_ID_0 << (4 * idx));
+ idx2dest_map |= dest << (4 * idx);
+
+ wr32(fbd, FBNIC_TCE_TCAM_IDX2DEST_MAP, idx2dest_map);
+}
+
+static void fbnic_write_tce_tcam_entry(struct fbnic_dev *fbd, unsigned int idx,
+ struct fbnic_mac_addr *mac_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &mac_addr->mask.addr16[FBNIC_TCE_TCAM_WORD_LEN - 1];
+ value = &mac_addr->value.addr16[FBNIC_TCE_TCAM_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_TCE_TCAM_WORD_LEN; i++)
+ wr32(fbd, FBNIC_TCE_RAM_TCAM(idx, i),
+ FIELD_PREP(FBNIC_TCE_RAM_TCAM_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_TCE_RAM_TCAM_VALUE, ntohs(*value--)));
+
+ wrfl(fbd);
+
+ wr32(fbd, FBNIC_TCE_RAM_TCAM3(idx), FBNIC_TCE_RAM_TCAM3_MCQ_MASK |
+ FBNIC_TCE_RAM_TCAM3_DEST_MASK |
+ FBNIC_TCE_RAM_TCAM3_VALIDATE);
+}
+
+static void __fbnic_write_tce_tcam_rev(struct fbnic_dev *fbd)
+{
+ int tcam_idx = FBNIC_TCE_TCAM_NUM_ENTRIES;
+ int mac_idx;
+
+ for (mac_idx = ARRAY_SIZE(fbd->mac_addr); mac_idx--;) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[mac_idx];
+
+ /* Verify BMC bit is set */
+ if (!test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam))
+ continue;
+
+ if (!tcam_idx) {
+ dev_err(fbd->dev, "TCE TCAM overflow\n");
+ return;
+ }
+
+ tcam_idx--;
+ fbnic_write_tce_tcam_dest(fbd, tcam_idx, mac_addr);
+ fbnic_write_tce_tcam_entry(fbd, tcam_idx, mac_addr);
+ }
+
+ while (tcam_idx)
+ fbnic_clear_tce_tcam_entry(fbd, --tcam_idx);
+
+ fbd->tce_tcam_last = tcam_idx;
+}
+
+static void __fbnic_write_tce_tcam(struct fbnic_dev *fbd)
+{
+ int tcam_idx = 0;
+ int mac_idx;
+
+ for (mac_idx = 0; mac_idx < ARRAY_SIZE(fbd->mac_addr); mac_idx++) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[mac_idx];
+
+ /* Verify BMC bit is set */
+ if (!test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam))
+ continue;
+
+ if (tcam_idx == FBNIC_TCE_TCAM_NUM_ENTRIES) {
+ dev_err(fbd->dev, "TCE TCAM overflow\n");
+ return;
+ }
+
+ fbnic_write_tce_tcam_dest(fbd, tcam_idx, mac_addr);
+ fbnic_write_tce_tcam_entry(fbd, tcam_idx, mac_addr);
+ tcam_idx++;
+ }
+
+ while (tcam_idx < FBNIC_TCE_TCAM_NUM_ENTRIES)
+ fbnic_clear_tce_tcam_entry(fbd, tcam_idx++);
+
+ fbd->tce_tcam_last = tcam_idx;
+}
+
+void fbnic_write_tce_tcam(struct fbnic_dev *fbd)
+{
+ if (fbd->tce_tcam_last)
+ __fbnic_write_tce_tcam_rev(fbd);
+ else
+ __fbnic_write_tce_tcam(fbd);
+}
+
+struct fbnic_ip_addr *__fbnic_ip4_sync(struct fbnic_dev *fbd,
+ struct fbnic_ip_addr *ip_addr,
+ const struct in_addr *addr,
+ const struct in_addr *mask)
+{
+ struct fbnic_ip_addr *avail_addr = NULL;
+ unsigned int i;
+
+ /* Scan from top of list to bottom, filling bottom up. */
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES; i++, ip_addr++) {
+ struct in6_addr *m = &ip_addr->mask;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DISABLED) {
+ avail_addr = ip_addr;
+ continue;
+ }
+
+ if (ip_addr->version != 4)
+ continue;
+
+ /* Drop avail_addr if mask is a subset of our current mask,
+ * This prevents us from inserting a longer prefix behind a
+ * shorter one.
+ *
+ * The mask is stored inverted value so as an example:
+ * m ffff ffff ffff ffff ffff ffff ffff 0000 0000
+ * mask 0000 0000 0000 0000 0000 0000 0000 ffff ffff
+ *
+ * "m" and "mask" represent typical IPv4 mask stored in
+ * the TCAM and those provided by the stack. The code below
+ * should return a non-zero result if there is a 0 stored
+ * anywhere in "m" where "mask" has a 0.
+ */
+ if (~m->s6_addr32[3] & ~mask->s_addr) {
+ avail_addr = NULL;
+ continue;
+ }
+
+ /* Check to see if the mask actually contains fewer bits than
+ * our new mask "m". The XOR below should only result in 0 if
+ * "m" is masking a bit that we are looking for in our new
+ * "mask", we eliminated the 0^0 case with the check above.
+ *
+ * If it contains fewer bits we need to stop here, otherwise
+ * we might be adding an unreachable rule.
+ */
+ if (~(m->s6_addr32[3] ^ mask->s_addr))
+ break;
+
+ if (ip_addr->value.s6_addr32[3] == addr->s_addr) {
+ avail_addr = ip_addr;
+ break;
+ }
+ }
+
+ if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) {
+ ipv6_addr_set(&avail_addr->value, 0, 0, 0, addr->s_addr);
+ ipv6_addr_set(&avail_addr->mask, htonl(~0), htonl(~0),
+ htonl(~0), ~mask->s_addr);
+ avail_addr->version = 4;
+
+ avail_addr->state = FBNIC_TCAM_S_ADD;
+ }
+
+ return avail_addr;
+}
+
+struct fbnic_ip_addr *__fbnic_ip6_sync(struct fbnic_dev *fbd,
+ struct fbnic_ip_addr *ip_addr,
+ const struct in6_addr *addr,
+ const struct in6_addr *mask)
+{
+ struct fbnic_ip_addr *avail_addr = NULL;
+ unsigned int i;
+
+ ip_addr = &ip_addr[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES - 1];
+
+ /* Scan from bottom of list to top, filling top down. */
+ for (i = FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES; i--; ip_addr--) {
+ struct in6_addr *m = &ip_addr->mask;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DISABLED) {
+ avail_addr = ip_addr;
+ continue;
+ }
+
+ if (ip_addr->version != 6)
+ continue;
+
+ /* Drop avail_addr if mask is a superset of our current mask.
+ * This prevents us from inserting a longer prefix behind a
+ * shorter one.
+ *
+ * The mask is stored inverted value so as an example:
+ * m 0000 0000 0000 0000 0000 0000 0000 0000 0000
+ * mask ffff ffff ffff ffff ffff ffff ffff ffff ffff
+ *
+ * "m" and "mask" represent typical IPv6 mask stored in
+ * the TCAM and those provided by the stack. The code below
+ * should return a non-zero result which will cause us
+ * to drop the avail_addr value that might be cached
+ * to prevent us from dropping a v6 address behind it.
+ */
+ if ((m->s6_addr32[0] & mask->s6_addr32[0]) |
+ (m->s6_addr32[1] & mask->s6_addr32[1]) |
+ (m->s6_addr32[2] & mask->s6_addr32[2]) |
+ (m->s6_addr32[3] & mask->s6_addr32[3])) {
+ avail_addr = NULL;
+ continue;
+ }
+
+ /* The previous test eliminated any overlap between the
+ * two values so now we need to check for gaps.
+ *
+ * If the mask is equal to our current mask then it should
+ * result with m ^ mask = ffff ffff, if however the value
+ * stored in m is bigger then we should see a 0 appear
+ * somewhere in the mask.
+ */
+ if (~(m->s6_addr32[0] ^ mask->s6_addr32[0]) |
+ ~(m->s6_addr32[1] ^ mask->s6_addr32[1]) |
+ ~(m->s6_addr32[2] ^ mask->s6_addr32[2]) |
+ ~(m->s6_addr32[3] ^ mask->s6_addr32[3]))
+ break;
+
+ if (ipv6_addr_cmp(&ip_addr->value, addr))
+ continue;
+
+ avail_addr = ip_addr;
+ break;
+ }
+
+ if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) {
+ memcpy(&avail_addr->value, addr, sizeof(*addr));
+ ipv6_addr_set(&avail_addr->mask,
+ ~mask->s6_addr32[0], ~mask->s6_addr32[1],
+ ~mask->s6_addr32[2], ~mask->s6_addr32[3]);
+ avail_addr->version = 6;
+
+ avail_addr->state = FBNIC_TCAM_S_ADD;
+ }
+
+ return avail_addr;
+}
+
+int __fbnic_ip_unsync(struct fbnic_ip_addr *ip_addr, unsigned int tcam_idx)
+{
+ if (!test_and_clear_bit(tcam_idx, ip_addr->act_tcam))
+ return -ENOENT;
+
+ if (bitmap_empty(ip_addr->act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES))
+ ip_addr->state = FBNIC_TCAM_S_DELETE;
+
+ return 0;
+}
+
+static void fbnic_clear_ip_src_entry(struct fbnic_dev *fbd, unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_IPSRC(idx, i), 0);
+}
+
+static void fbnic_clear_ip_dst_entry(struct fbnic_dev *fbd, unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_IPDST(idx, i), 0);
+}
+
+static void fbnic_clear_ip_outer_src_entry(struct fbnic_dev *fbd,
+ unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(idx, i), 0);
+}
+
+static void fbnic_clear_ip_outer_dst_entry(struct fbnic_dev *fbd,
+ unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(idx, i), 0);
+}
+
+static void fbnic_write_ip_src_entry(struct fbnic_dev *fbd, unsigned int idx,
+ struct fbnic_ip_addr *ip_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+ value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_IPSRC(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--)));
+ wrfl(fbd);
+
+ /* Bit 129 is used to flag for v4/v6 */
+ wr32(fbd, FBNIC_RPC_TCAM_IPSRC(idx, i),
+ (ip_addr->version == 6) | FBNIC_RPC_TCAM_VALIDATE);
+}
+
+static void fbnic_write_ip_dst_entry(struct fbnic_dev *fbd, unsigned int idx,
+ struct fbnic_ip_addr *ip_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+ value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_IPDST(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--)));
+ wrfl(fbd);
+
+ /* Bit 129 is used to flag for v4/v6 */
+ wr32(fbd, FBNIC_RPC_TCAM_IPDST(idx, i),
+ (ip_addr->version == 6) | FBNIC_RPC_TCAM_VALIDATE);
+}
+
+static void fbnic_write_ip_outer_src_entry(struct fbnic_dev *fbd,
+ unsigned int idx,
+ struct fbnic_ip_addr *ip_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+ value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--)));
+ wrfl(fbd);
+
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(idx, i), FBNIC_RPC_TCAM_VALIDATE);
+}
+
+static void fbnic_write_ip_outer_dst_entry(struct fbnic_dev *fbd,
+ unsigned int idx,
+ struct fbnic_ip_addr *ip_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+ value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--)));
+ wrfl(fbd);
+
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(idx, i), FBNIC_RPC_TCAM_VALIDATE);
+}
+
+void fbnic_write_ip_addr(struct fbnic_dev *fbd)
+{
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->ip_src); idx--;) {
+ struct fbnic_ip_addr *ip_addr = &fbd->ip_src[idx];
+
+ /* Check if update flag is set else skip. */
+ if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ /* Clear by writing 0s. */
+ if (ip_addr->state == FBNIC_TCAM_S_DELETE) {
+ /* Invalidate entry and clear addr state info */
+ fbnic_clear_ip_src_entry(fbd, idx);
+ memset(ip_addr, 0, sizeof(*ip_addr));
+
+ continue;
+ }
+
+ fbnic_write_ip_src_entry(fbd, idx, ip_addr);
+
+ ip_addr->state = FBNIC_TCAM_S_VALID;
+ }
+
+ /* Repeat process for other IP TCAMs */
+ for (idx = ARRAY_SIZE(fbd->ip_dst); idx--;) {
+ struct fbnic_ip_addr *ip_addr = &fbd->ip_dst[idx];
+
+ if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DELETE) {
+ fbnic_clear_ip_dst_entry(fbd, idx);
+ memset(ip_addr, 0, sizeof(*ip_addr));
+
+ continue;
+ }
+
+ fbnic_write_ip_dst_entry(fbd, idx, ip_addr);
+
+ ip_addr->state = FBNIC_TCAM_S_VALID;
+ }
+
+ for (idx = ARRAY_SIZE(fbd->ipo_src); idx--;) {
+ struct fbnic_ip_addr *ip_addr = &fbd->ipo_src[idx];
+
+ if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DELETE) {
+ fbnic_clear_ip_outer_src_entry(fbd, idx);
+ memset(ip_addr, 0, sizeof(*ip_addr));
+
+ continue;
+ }
+
+ fbnic_write_ip_outer_src_entry(fbd, idx, ip_addr);
+
+ ip_addr->state = FBNIC_TCAM_S_VALID;
+ }
+
+ for (idx = ARRAY_SIZE(fbd->ipo_dst); idx--;) {
+ struct fbnic_ip_addr *ip_addr = &fbd->ipo_dst[idx];
+
+ if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DELETE) {
+ fbnic_clear_ip_outer_dst_entry(fbd, idx);
+ memset(ip_addr, 0, sizeof(*ip_addr));
+
+ continue;
+ }
+
+ fbnic_write_ip_outer_dst_entry(fbd, idx, ip_addr);
+
+ ip_addr->state = FBNIC_TCAM_S_VALID;
+ }
+}
+
+static void fbnic_clear_valid_act_tcam(struct fbnic_dev *fbd)
{
- u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
- FBNIC_RPC_ACT_TBL0_DEST_BMC);
int i = FBNIC_RPC_TCAM_ACT_NUM_ENTRIES - 1;
struct fbnic_act_tcam *act_tcam;
+ /* Work from the bottom up deleting all other rules from hardware */
+ do {
+ act_tcam = &fbd->act_tcam[i];
+
+ if (act_tcam->state != FBNIC_TCAM_S_VALID)
+ continue;
+
+ fbnic_clear_act_tcam(fbd, i);
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+ } while (i--);
+}
+
+void fbnic_clear_rules(struct fbnic_dev *fbd)
+{
/* Clear MAC rules */
fbnic_clear_macda(fbd);
@@ -608,6 +1172,11 @@ void fbnic_clear_rules(struct fbnic_dev *fbd)
* the interface back up.
*/
if (fbnic_bmc_present(fbd)) {
+ u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ FBNIC_RPC_ACT_TBL0_DEST_BMC);
+ int i = FBNIC_RPC_TCAM_ACT_NUM_ENTRIES - 1;
+ struct fbnic_act_tcam *act_tcam;
+
act_tcam = &fbd->act_tcam[i];
if (act_tcam->state == FBNIC_TCAM_S_VALID &&
@@ -616,21 +1185,10 @@ void fbnic_clear_rules(struct fbnic_dev *fbd)
wr32(fbd, FBNIC_RPC_ACT_TBL1(i), 0);
act_tcam->state = FBNIC_TCAM_S_UPDATE;
-
- i--;
}
}
- /* Work from the bottom up deleting all other rules from hardware */
- do {
- act_tcam = &fbd->act_tcam[i];
-
- if (act_tcam->state != FBNIC_TCAM_S_VALID)
- continue;
-
- fbnic_clear_act_tcam(fbd, i);
- act_tcam->state = FBNIC_TCAM_S_UPDATE;
- } while (i--);
+ fbnic_clear_valid_act_tcam(fbd);
}
static void fbnic_delete_act_tcam(struct fbnic_dev *fbd, unsigned int idx)
@@ -680,3 +1238,9 @@ void fbnic_write_rules(struct fbnic_dev *fbd)
fbnic_update_act_tcam(fbd, i);
}
}
+
+void fbnic_rpc_reset_valid_entries(struct fbnic_dev *fbd)
+{
+ fbnic_clear_valid_act_tcam(fbd);
+ fbnic_clear_valid_macda(fbd);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
index d62935f722a2..3d4925b2ac75 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
@@ -7,6 +7,8 @@
#include <uapi/linux/in6.h>
#include <linux/bitfield.h>
+struct in_addr;
+
/* The TCAM state definitions follow an expected ordering.
* They start out disabled, then move through the following states:
* Disabled 0 -> Add 2
@@ -32,9 +34,18 @@ enum {
#define FBNIC_RPC_TCAM_MACDA_WORD_LEN 3
#define FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES 32
+/* 8 IPSRC and IPDST TCAM Entries each
+ * 8 registers, Validate each
+ */
+#define FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN 8
+#define FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES 8
+
#define FBNIC_RPC_TCAM_ACT_WORD_LEN 11
#define FBNIC_RPC_TCAM_ACT_NUM_ENTRIES 64
+#define FBNIC_TCE_TCAM_WORD_LEN 3
+#define FBNIC_TCE_TCAM_NUM_ENTRIES 8
+
struct fbnic_mac_addr {
union {
unsigned char addr8[ETH_ALEN];
@@ -44,6 +55,13 @@ struct fbnic_mac_addr {
DECLARE_BITMAP(act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES);
};
+struct fbnic_ip_addr {
+ struct in6_addr mask, value;
+ unsigned char version;
+ unsigned char state;
+ DECLARE_BITMAP(act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES);
+};
+
struct fbnic_act_tcam {
struct {
u16 tcam[FBNIC_RPC_TCAM_ACT_WORD_LEN];
@@ -78,6 +96,11 @@ enum {
#define FBNIC_RPC_ACT_TBL_BMC_OFFSET 0
#define FBNIC_RPC_ACT_TBL_BMC_ALL_MULTI_OFFSET 1
+/* This should leave us with 48 total entries in the TCAM that can be used
+ * for NFC after also deducting the 14 needed for RSS table programming.
+ */
+#define FBNIC_RPC_ACT_TBL_NFC_OFFSET 2
+
/* We reserve the last 14 entries for RSS rules on the host. The BMC
* unicast rule will need to be populated above these and is expected to
* use MACDA TCAM entry 23 to store the BMC MAC address.
@@ -85,6 +108,9 @@ enum {
#define FBNIC_RPC_ACT_TBL_RSS_OFFSET \
(FBNIC_RPC_ACT_TBL_NUM_ENTRIES - FBNIC_RSS_EN_NUM_ENTRIES)
+#define FBNIC_RPC_ACT_TBL_NFC_ENTRIES \
+ (FBNIC_RPC_ACT_TBL_RSS_OFFSET - FBNIC_RPC_ACT_TBL_NFC_OFFSET)
+
/* Flags used to identify the owner for this MAC filter. Note that any
* flags set for Broadcast thru Promisc indicate that the rule belongs
* to the RSS filters for the host.
@@ -158,6 +184,7 @@ struct fbnic_net;
void fbnic_bmc_rpc_init(struct fbnic_dev *fbd);
void fbnic_bmc_rpc_all_multi_config(struct fbnic_dev *fbd, bool enable_host);
+void fbnic_bmc_rpc_check(struct fbnic_dev *fbd);
void fbnic_reset_indir_tbl(struct fbnic_net *fbn);
void fbnic_rss_key_fill(u32 *buffer);
@@ -165,6 +192,7 @@ void fbnic_rss_init_en_mask(struct fbnic_net *fbn);
void fbnic_rss_disable_hw(struct fbnic_dev *fbd);
void fbnic_rss_reinit_hw(struct fbnic_dev *fbd, struct fbnic_net *fbn);
void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn);
+u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type);
int __fbnic_xc_unsync(struct fbnic_mac_addr *mac_addr, unsigned int tcam_idx);
struct fbnic_mac_addr *__fbnic_uc_sync(struct fbnic_dev *fbd,
@@ -174,6 +202,20 @@ struct fbnic_mac_addr *__fbnic_mc_sync(struct fbnic_dev *fbd,
void fbnic_sift_macda(struct fbnic_dev *fbd);
void fbnic_write_macda(struct fbnic_dev *fbd);
+void fbnic_promisc_sync(struct fbnic_dev *fbd,
+ bool uc_promisc, bool mc_promisc);
+
+struct fbnic_ip_addr *__fbnic_ip4_sync(struct fbnic_dev *fbd,
+ struct fbnic_ip_addr *ip_addr,
+ const struct in_addr *addr,
+ const struct in_addr *mask);
+struct fbnic_ip_addr *__fbnic_ip6_sync(struct fbnic_dev *fbd,
+ struct fbnic_ip_addr *ip_addr,
+ const struct in6_addr *addr,
+ const struct in6_addr *mask);
+int __fbnic_ip_unsync(struct fbnic_ip_addr *ip_addr, unsigned int tcam_idx);
+void fbnic_write_ip_addr(struct fbnic_dev *fbd);
+
static inline int __fbnic_uc_unsync(struct fbnic_mac_addr *mac_addr)
{
return __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_UNICAST);
@@ -186,4 +228,5 @@ static inline int __fbnic_mc_unsync(struct fbnic_mac_addr *mac_addr)
void fbnic_clear_rules(struct fbnic_dev *fbd);
void fbnic_write_rules(struct fbnic_dev *fbd);
+void fbnic_write_tce_tcam(struct fbnic_dev *fbd);
#endif /* _FBNIC_RPC_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_time.c b/drivers/net/ethernet/meta/fbnic/fbnic_time.c
index 39d99677b71e..db7748189f45 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_time.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_time.c
@@ -253,7 +253,7 @@ static void fbnic_ptp_reset(struct fbnic_dev *fbd)
void fbnic_time_init(struct fbnic_net *fbn)
{
- /* This is not really a statistic, but the lockng primitive fits
+ /* This is not really a statistic, but the locking primitive fits
* our usecase perfectly, we need an atomic 8 bytes READ_ONCE() /
* WRITE_ONCE() behavior.
*/
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
index 2a174ab062a3..517ed8b2f1cb 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
@@ -196,13 +196,17 @@ int fbnic_tlv_attr_put_string(struct fbnic_tlv_msg *msg, u16 attr_id,
/**
* fbnic_tlv_attr_get_unsigned - Retrieve unsigned value from result
* @attr: Attribute to retrieve data from
+ * @def: The default value if attr is NULL
*
* Return: unsigned 64b value containing integer value
**/
-u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr)
+u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr, u64 def)
{
__le64 le64_value = 0;
+ if (!attr)
+ return def;
+
memcpy(&le64_value, &attr->value[0],
le16_to_cpu(attr->hdr.len) - sizeof(*attr));
@@ -212,15 +216,21 @@ u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr)
/**
* fbnic_tlv_attr_get_signed - Retrieve signed value from result
* @attr: Attribute to retrieve data from
+ * @def: The default value if attr is NULL
*
* Return: signed 64b value containing integer value
**/
-s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr)
+s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr, s64 def)
{
- int shift = (8 + sizeof(*attr) - le16_to_cpu(attr->hdr.len)) * 8;
__le64 le64_value = 0;
+ int shift;
s64 value;
+ if (!attr)
+ return def;
+
+ shift = (8 + sizeof(*attr) - le16_to_cpu(attr->hdr.len)) * 8;
+
/* Copy the value and adjust for byte ordering */
memcpy(&le64_value, &attr->value[0],
le16_to_cpu(attr->hdr.len) - sizeof(*attr));
@@ -233,19 +243,40 @@ s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr)
/**
* fbnic_tlv_attr_get_string - Retrieve string value from result
* @attr: Attribute to retrieve data from
- * @str: Pointer to an allocated string to store the data
- * @max_size: The maximum size which can be in str
+ * @dst: Pointer to an allocated string to store the data
+ * @dstsize: The maximum size which can be in dst
*
- * Return: the size of the string read from firmware
+ * Return: the size of the string read from firmware or negative error.
**/
-size_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *str,
- size_t max_size)
+ssize_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *dst,
+ size_t dstsize)
{
- max_size = min_t(size_t, max_size,
- (le16_to_cpu(attr->hdr.len) * 4) - sizeof(*attr));
- memcpy(str, &attr->value, max_size);
+ size_t srclen, len;
+ ssize_t ret;
+
+ if (!attr)
+ return -EINVAL;
+
+ if (dstsize == 0)
+ return -E2BIG;
+
+ srclen = le16_to_cpu(attr->hdr.len) - sizeof(*attr);
+ if (srclen > 0 && ((char *)attr->value)[srclen - 1] == '\0')
+ srclen--;
+
+ if (srclen >= dstsize) {
+ len = dstsize - 1;
+ ret = -E2BIG;
+ } else {
+ len = srclen;
+ ret = len;
+ }
+
+ memcpy(dst, &attr->value, len);
+ /* Zero pad end of dst. */
+ memset(dst + len, 0, dstsize - len);
- return max_size;
+ return ret;
}
/**
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h
index 67300ab44353..3508b46ebdd0 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h
@@ -80,7 +80,7 @@ struct fbnic_tlv_index {
enum fbnic_tlv_type type;
};
-#define TLV_MAX_DATA (PAGE_SIZE - 512)
+#define TLV_MAX_DATA ((PAGE_SIZE - 512) & 0xFFFF)
#define FBNIC_TLV_ATTR_ID_UNKNOWN USHRT_MAX
#define FBNIC_TLV_ATTR_STRING(id, len) { id, len, FBNIC_TLV_STRING }
#define FBNIC_TLV_ATTR_FLAG(id) { id, 0, FBNIC_TLV_FLAG }
@@ -114,34 +114,10 @@ static inline bool fbnic_tlv_attr_get_bool(struct fbnic_tlv_msg *attr)
return !!attr;
}
-u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr);
-s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr);
-size_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *str,
- size_t max_size);
-
-#define get_unsigned_result(id, location) \
-do { \
- struct fbnic_tlv_msg *result = results[id]; \
- if (result) \
- location = fbnic_tlv_attr_get_unsigned(result); \
-} while (0)
-
-#define get_signed_result(id, location) \
-do { \
- struct fbnic_tlv_msg *result = results[id]; \
- if (result) \
- location = fbnic_tlv_attr_get_signed(result); \
-} while (0)
-
-#define get_string_result(id, size, str, max_size) \
-do { \
- struct fbnic_tlv_msg *result = results[id]; \
- if (result) \
- size = fbnic_tlv_attr_get_string(result, str, max_size); \
-} while (0)
-
-#define get_bool(id) (!!(results[id]))
-
+u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr, u64 def);
+s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr, s64 def);
+ssize_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *dst,
+ size_t dstsize);
struct fbnic_tlv_msg *fbnic_tlv_msg_alloc(u16 msg_id);
int fbnic_tlv_attr_put_flag(struct fbnic_tlv_msg *msg, const u16 attr_id);
int fbnic_tlv_attr_put_value(struct fbnic_tlv_msg *msg, const u16 attr_id,
@@ -170,6 +146,13 @@ int fbnic_tlv_msg_parse(void *opaque, struct fbnic_tlv_msg *msg,
const struct fbnic_tlv_parser *parser);
int fbnic_tlv_parser_error(void *opaque, struct fbnic_tlv_msg **results);
+#define fta_get_uint(_results, _id) \
+ fbnic_tlv_attr_get_unsigned(_results[_id], 0)
+#define fta_get_sint(_results, _id) \
+ fbnic_tlv_attr_get_signed(_results[_id], 0)
+#define fta_get_str(_results, _id, _dst, _dstsize) \
+ fbnic_tlv_attr_get_string(_results[_id], _dst, _dstsize)
+
#define FBNIC_TLV_MSG_ERROR \
FBNIC_TLV_PARSER(UNKNOWN, NULL, fbnic_tlv_parser_error)
#endif /* _FBNIC_TLV_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
index b5050fabe8fe..13d508ce637f 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
@@ -2,10 +2,14 @@
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
#include <linux/bitfield.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <linux/iopoll.h>
#include <linux/pci.h>
#include <net/netdev_queues.h>
#include <net/page_pool/helpers.h>
+#include <net/tcp.h>
+#include <net/xdp.h>
#include "fbnic.h"
#include "fbnic_csr.h"
@@ -13,11 +17,19 @@
#include "fbnic_txrx.h"
enum {
+ FBNIC_XDP_PASS = 0,
+ FBNIC_XDP_CONSUME,
+ FBNIC_XDP_TX,
+ FBNIC_XDP_LEN_ERR,
+};
+
+enum {
FBNIC_XMIT_CB_TS = 0x01,
};
struct fbnic_xmit_cb {
u32 bytecount;
+ u16 gso_segs;
u8 desc_count;
u8 flags;
int hw_head;
@@ -25,6 +37,8 @@ struct fbnic_xmit_cb {
#define FBNIC_XMIT_CB(__skb) ((struct fbnic_xmit_cb *)((__skb)->cb))
+#define FBNIC_XMIT_NOUNMAP ((void *)1)
+
static u32 __iomem *fbnic_ring_csr_base(const struct fbnic_ring *ring)
{
unsigned long csr_base = (unsigned long)ring->doorbell;
@@ -113,6 +127,11 @@ static int fbnic_maybe_stop_tx(const struct net_device *dev,
res = netif_txq_maybe_stop(txq, fbnic_desc_unused(ring), size,
FBNIC_TX_DESC_WAKEUP);
+ if (!res) {
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.twq.stop++;
+ u64_stats_update_end(&ring->stats.syncp);
+ }
return !res;
}
@@ -174,8 +193,72 @@ static bool fbnic_tx_tstamp(struct sk_buff *skb)
}
static bool
+fbnic_tx_lso(struct fbnic_ring *ring, struct sk_buff *skb,
+ struct skb_shared_info *shinfo, __le64 *meta,
+ unsigned int *l2len, unsigned int *i3len)
+{
+ unsigned int l3_type, l4_type, l4len, hdrlen;
+ unsigned char *l4hdr;
+ __be16 payload_len;
+
+ if (unlikely(skb_cow_head(skb, 0)))
+ return true;
+
+ if (shinfo->gso_type & SKB_GSO_PARTIAL) {
+ l3_type = FBNIC_TWD_L3_TYPE_OTHER;
+ } else if (!skb->encapsulation) {
+ if (ip_hdr(skb)->version == 4)
+ l3_type = FBNIC_TWD_L3_TYPE_IPV4;
+ else
+ l3_type = FBNIC_TWD_L3_TYPE_IPV6;
+ } else {
+ unsigned int o3len;
+
+ o3len = skb_inner_network_header(skb) - skb_network_header(skb);
+ *i3len -= o3len;
+ *meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_L3_OHLEN_MASK,
+ o3len / 2));
+ l3_type = FBNIC_TWD_L3_TYPE_V6V6;
+ }
+
+ l4hdr = skb_checksum_start(skb);
+ payload_len = cpu_to_be16(skb->len - (l4hdr - skb->data));
+
+ if (shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+ struct tcphdr *tcph = (struct tcphdr *)l4hdr;
+
+ l4_type = FBNIC_TWD_L4_TYPE_TCP;
+ l4len = __tcp_hdrlen((struct tcphdr *)l4hdr);
+ csum_replace_by_diff(&tcph->check, (__force __wsum)payload_len);
+ } else {
+ struct udphdr *udph = (struct udphdr *)l4hdr;
+
+ l4_type = FBNIC_TWD_L4_TYPE_UDP;
+ l4len = sizeof(struct udphdr);
+ csum_replace_by_diff(&udph->check, (__force __wsum)payload_len);
+ }
+
+ hdrlen = (l4hdr - skb->data) + l4len;
+ *meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_L3_TYPE_MASK, l3_type) |
+ FIELD_PREP(FBNIC_TWD_L4_TYPE_MASK, l4_type) |
+ FIELD_PREP(FBNIC_TWD_L4_HLEN_MASK, l4len / 4) |
+ FIELD_PREP(FBNIC_TWD_MSS_MASK, shinfo->gso_size) |
+ FBNIC_TWD_FLAG_REQ_LSO);
+
+ FBNIC_XMIT_CB(skb)->bytecount += (shinfo->gso_segs - 1) * hdrlen;
+ FBNIC_XMIT_CB(skb)->gso_segs = shinfo->gso_segs;
+
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.twq.lso += shinfo->gso_segs;
+ u64_stats_update_end(&ring->stats.syncp);
+
+ return false;
+}
+
+static bool
fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
unsigned int l2len, i3len;
if (fbnic_tx_tstamp(skb))
@@ -190,7 +273,15 @@ fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
*meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_CSUM_OFFSET_MASK,
skb->csum_offset / 2));
- *meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_CSO);
+ if (shinfo->gso_size) {
+ if (fbnic_tx_lso(ring, skb, shinfo, meta, &l2len, &i3len))
+ return true;
+ } else {
+ *meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_CSO);
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.twq.csum_partial++;
+ u64_stats_update_end(&ring->stats.syncp);
+ }
*meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_L2_HLEN_MASK, l2len / 2) |
FIELD_PREP(FBNIC_TWD_L3_IHLEN_MASK, i3len / 2));
@@ -198,12 +289,15 @@ fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
}
static void
-fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq)
+fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq,
+ u64 *csum_cmpl, u64 *csum_none)
{
skb_checksum_none_assert(skb);
- if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
+ if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
+ (*csum_none)++;
return;
+ }
if (FIELD_GET(FBNIC_RCD_META_L4_CSUM_UNNECESSARY, rcd)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -212,6 +306,7 @@ fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq)
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = (__force __wsum)csum;
+ (*csum_cmpl)++;
}
}
@@ -222,6 +317,7 @@ fbnic_tx_map(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
unsigned int tail = ring->tail, first;
unsigned int size, data_len;
skb_frag_t *frag;
+ bool is_net_iov;
dma_addr_t dma;
__le64 *twd;
@@ -237,6 +333,7 @@ fbnic_tx_map(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
if (size > FIELD_MAX(FBNIC_TWD_LEN_MASK))
goto dma_error;
+ is_net_iov = false;
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
@@ -249,6 +346,8 @@ fbnic_tx_map(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
FIELD_PREP(FBNIC_TWD_LEN_MASK, size) |
FIELD_PREP(FBNIC_TWD_TYPE_MASK,
FBNIC_TWD_TYPE_AL));
+ if (is_net_iov)
+ ring->tx_buf[tail] = FBNIC_XMIT_NOUNMAP;
tail++;
tail &= ring->size_mask;
@@ -262,6 +361,7 @@ fbnic_tx_map(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
if (size > FIELD_MAX(FBNIC_TWD_LEN_MASK))
goto dma_error;
+ is_net_iov = skb_frag_is_net_iov(frag);
dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
}
@@ -297,6 +397,8 @@ dma_error:
twd = &ring->desc[tail];
if (tail == first)
fbnic_unmap_single_twd(dev, twd);
+ else if (ring->tx_buf[tail] == FBNIC_XMIT_NOUNMAP)
+ ring->tx_buf[tail] = NULL;
else
fbnic_unmap_page_twd(dev, twd);
}
@@ -329,7 +431,9 @@ fbnic_xmit_frame_ring(struct sk_buff *skb, struct fbnic_ring *ring)
/* Write all members within DWORD to condense this into 2 4B writes */
FBNIC_XMIT_CB(skb)->bytecount = skb->len;
+ FBNIC_XMIT_CB(skb)->gso_segs = 1;
FBNIC_XMIT_CB(skb)->desc_count = 0;
+ FBNIC_XMIT_CB(skb)->flags = 0;
if (fbnic_tx_offloads(ring, skb, meta))
goto err_free;
@@ -356,6 +460,59 @@ netdev_tx_t fbnic_xmit_frame(struct sk_buff *skb, struct net_device *dev)
return fbnic_xmit_frame_ring(skb, fbn->tx[q_map]);
}
+static netdev_features_t
+fbnic_features_check_encap_gso(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features, unsigned int l3len)
+{
+ netdev_features_t skb_gso_features;
+ struct ipv6hdr *ip6_hdr;
+ unsigned char l4_hdr;
+ unsigned int start;
+ __be16 frag_off;
+
+ /* Require MANGLEID for GSO_PARTIAL of IPv4.
+ * In theory we could support TSO with single, innermost v4 header
+ * by pretending everything before it is L2, but that needs to be
+ * parsed case by case.. so leaving it for when the need arises.
+ */
+ if (!(features & NETIF_F_TSO_MANGLEID))
+ features &= ~NETIF_F_TSO;
+
+ skb_gso_features = skb_shinfo(skb)->gso_type;
+ skb_gso_features <<= NETIF_F_GSO_SHIFT;
+
+ /* We'd only clear the native GSO features, so don't bother validating
+ * if the match can only be on those supported thru GSO_PARTIAL.
+ */
+ if (!(skb_gso_features & FBNIC_TUN_GSO_FEATURES))
+ return features;
+
+ /* We can only do IPv6-in-IPv6, not v4-in-v6. It'd be nice
+ * to fall back to partial for this, or any failure below.
+ * This is just an optimization, UDPv4 will be caught later on.
+ */
+ if (skb_gso_features & NETIF_F_TSO)
+ return features & ~FBNIC_TUN_GSO_FEATURES;
+
+ /* Inner headers multiple of 2 */
+ if ((skb_inner_network_header(skb) - skb_network_header(skb)) % 2)
+ return features & ~FBNIC_TUN_GSO_FEATURES;
+
+ /* Encapsulated GSO packet, make 100% sure it's IPv6-in-IPv6. */
+ ip6_hdr = ipv6_hdr(skb);
+ if (ip6_hdr->version != 6)
+ return features & ~FBNIC_TUN_GSO_FEATURES;
+
+ l4_hdr = ip6_hdr->nexthdr;
+ start = (unsigned char *)ip6_hdr - skb->data + sizeof(struct ipv6hdr);
+ start = ipv6_skip_exthdr(skb, start, &l4_hdr, &frag_off);
+ if (frag_off || l4_hdr != IPPROTO_IPV6 ||
+ skb->data + start != skb_inner_network_header(skb))
+ return features & ~FBNIC_TUN_GSO_FEATURES;
+
+ return features;
+}
+
netdev_features_t
fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t features)
@@ -376,9 +533,12 @@ fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
!FIELD_FIT(FBNIC_TWD_L2_HLEN_MASK, l2len / 2) ||
!FIELD_FIT(FBNIC_TWD_L3_IHLEN_MASK, l3len / 2) ||
!FIELD_FIT(FBNIC_TWD_CSUM_OFFSET_MASK, skb->csum_offset / 2))
- return features & ~NETIF_F_CSUM_MASK;
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
- return features;
+ if (likely(!skb->encapsulation) || !skb_is_gso(skb))
+ return features;
+
+ return fbnic_features_check_encap_gso(skb, dev, features, l3len);
}
static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
@@ -423,13 +583,17 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
desc_cnt--;
while (desc_cnt--) {
- fbnic_unmap_page_twd(nv->dev, &ring->desc[head]);
+ if (ring->tx_buf[head] != FBNIC_XMIT_NOUNMAP)
+ fbnic_unmap_page_twd(nv->dev,
+ &ring->desc[head]);
+ else
+ ring->tx_buf[head] = NULL;
head++;
head &= ring->size_mask;
}
total_bytes += FBNIC_XMIT_CB(skb)->bytecount;
- total_packets += 1;
+ total_packets += FBNIC_XMIT_CB(skb)->gso_segs;
napi_consume_skb(skb, napi_budget);
}
@@ -444,7 +608,7 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
if (unlikely(discard)) {
u64_stats_update_begin(&ring->stats.syncp);
ring->stats.dropped += total_packets;
- ring->stats.ts_lost += ts_lost;
+ ring->stats.twq.ts_lost += ts_lost;
u64_stats_update_end(&ring->stats.syncp);
netdev_tx_completed_queue(txq, total_packets, total_bytes);
@@ -456,9 +620,62 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
ring->stats.packets += total_packets;
u64_stats_update_end(&ring->stats.syncp);
- netif_txq_completed_wake(txq, total_packets, total_bytes,
- fbnic_desc_unused(ring),
- FBNIC_TX_DESC_WAKEUP);
+ if (!netif_txq_completed_wake(txq, total_packets, total_bytes,
+ fbnic_desc_unused(ring),
+ FBNIC_TX_DESC_WAKEUP)) {
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.twq.wake++;
+ u64_stats_update_end(&ring->stats.syncp);
+ }
+}
+
+static void fbnic_clean_twq1(struct fbnic_napi_vector *nv, bool pp_allow_direct,
+ struct fbnic_ring *ring, bool discard,
+ unsigned int hw_head)
+{
+ u64 total_bytes = 0, total_packets = 0;
+ unsigned int head = ring->head;
+
+ while (hw_head != head) {
+ struct page *page;
+ u64 twd;
+
+ if (unlikely(!(ring->desc[head] & FBNIC_TWD_TYPE(AL))))
+ goto next_desc;
+
+ twd = le64_to_cpu(ring->desc[head]);
+ page = ring->tx_buf[head];
+
+ /* TYPE_AL is 2, TYPE_LAST_AL is 3. So this trick gives
+ * us one increment per packet, with no branches.
+ */
+ total_packets += FIELD_GET(FBNIC_TWD_TYPE_MASK, twd) -
+ FBNIC_TWD_TYPE_AL;
+ total_bytes += FIELD_GET(FBNIC_TWD_LEN_MASK, twd);
+
+ page_pool_put_page(pp_page_to_nmdesc(page)->pp, page, -1,
+ pp_allow_direct);
+next_desc:
+ head++;
+ head &= ring->size_mask;
+ }
+
+ if (!total_bytes)
+ return;
+
+ ring->head = head;
+
+ if (discard) {
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.dropped += total_packets;
+ u64_stats_update_end(&ring->stats.syncp);
+ return;
+ }
+
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.bytes += total_bytes;
+ ring->stats.packets += total_packets;
+ u64_stats_update_end(&ring->stats.syncp);
}
static void fbnic_clean_tsq(struct fbnic_napi_vector *nv,
@@ -507,49 +724,70 @@ static void fbnic_clean_tsq(struct fbnic_napi_vector *nv,
skb_tstamp_tx(skb, &hwtstamp);
u64_stats_update_begin(&ring->stats.syncp);
- ring->stats.ts_packets++;
+ ring->stats.twq.ts_packets++;
u64_stats_update_end(&ring->stats.syncp);
}
static void fbnic_page_pool_init(struct fbnic_ring *ring, unsigned int idx,
- struct page *page)
+ netmem_ref netmem)
{
struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
- page_pool_fragment_page(page, PAGECNT_BIAS_MAX);
- rx_buf->pagecnt_bias = PAGECNT_BIAS_MAX;
- rx_buf->page = page;
+ page_pool_fragment_netmem(netmem, FBNIC_PAGECNT_BIAS_MAX);
+ rx_buf->pagecnt_bias = FBNIC_PAGECNT_BIAS_MAX;
+ rx_buf->netmem = netmem;
}
-static struct page *fbnic_page_pool_get(struct fbnic_ring *ring,
- unsigned int idx)
+static struct page *
+fbnic_page_pool_get_head(struct fbnic_q_triad *qt, unsigned int idx)
{
- struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
+ struct fbnic_rx_buf *rx_buf = &qt->sub0.rx_buf[idx];
rx_buf->pagecnt_bias--;
- return rx_buf->page;
+ /* sub0 is always fed system pages, from the NAPI-level page_pool */
+ return netmem_to_page(rx_buf->netmem);
+}
+
+static netmem_ref
+fbnic_page_pool_get_data(struct fbnic_q_triad *qt, unsigned int idx)
+{
+ struct fbnic_rx_buf *rx_buf = &qt->sub1.rx_buf[idx];
+
+ rx_buf->pagecnt_bias--;
+
+ return rx_buf->netmem;
}
static void fbnic_page_pool_drain(struct fbnic_ring *ring, unsigned int idx,
- struct fbnic_napi_vector *nv, int budget)
+ int budget)
{
struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
- struct page *page = rx_buf->page;
+ netmem_ref netmem = rx_buf->netmem;
- if (!page_pool_unref_page(page, rx_buf->pagecnt_bias))
- page_pool_put_unrefed_page(nv->page_pool, page, -1, !!budget);
+ if (!page_pool_unref_netmem(netmem, rx_buf->pagecnt_bias))
+ page_pool_put_unrefed_netmem(ring->page_pool, netmem, -1,
+ !!budget);
- rx_buf->page = NULL;
+ rx_buf->netmem = 0;
}
static void fbnic_clean_twq(struct fbnic_napi_vector *nv, int napi_budget,
- struct fbnic_q_triad *qt, s32 ts_head, s32 head0)
+ struct fbnic_q_triad *qt, s32 ts_head, s32 head0,
+ s32 head1)
{
if (head0 >= 0)
fbnic_clean_twq0(nv, napi_budget, &qt->sub0, false, head0);
else if (ts_head >= 0)
fbnic_clean_twq0(nv, napi_budget, &qt->sub0, false, ts_head);
+
+ if (head1 >= 0) {
+ qt->cmpl.deferred_head = -1;
+ if (napi_budget)
+ fbnic_clean_twq1(nv, true, &qt->sub1, false, head1);
+ else
+ qt->cmpl.deferred_head = head1;
+ }
}
static void
@@ -557,6 +795,7 @@ fbnic_clean_tcq(struct fbnic_napi_vector *nv, struct fbnic_q_triad *qt,
int napi_budget)
{
struct fbnic_ring *cmpl = &qt->cmpl;
+ s32 head1 = cmpl->deferred_head;
s32 head0 = -1, ts_head = -1;
__le64 *raw_tcd, done;
u32 head = cmpl->head;
@@ -574,7 +813,10 @@ fbnic_clean_tcq(struct fbnic_napi_vector *nv, struct fbnic_q_triad *qt,
switch (FIELD_GET(FBNIC_TCD_TYPE_MASK, tcd)) {
case FBNIC_TCD_TYPE_0:
- if (!(tcd & FBNIC_TCD_TWQ1))
+ if (tcd & FBNIC_TCD_TWQ1)
+ head1 = FIELD_GET(FBNIC_TCD_TYPE0_HEAD1_MASK,
+ tcd);
+ else
head0 = FIELD_GET(FBNIC_TCD_TYPE0_HEAD0_MASK,
tcd);
/* Currently all err status bits are related to
@@ -607,11 +849,11 @@ fbnic_clean_tcq(struct fbnic_napi_vector *nv, struct fbnic_q_triad *qt,
}
/* Unmap and free processed buffers */
- fbnic_clean_twq(nv, napi_budget, qt, ts_head, head0);
+ fbnic_clean_twq(nv, napi_budget, qt, ts_head, head0, head1);
}
-static void fbnic_clean_bdq(struct fbnic_napi_vector *nv, int napi_budget,
- struct fbnic_ring *ring, unsigned int hw_head)
+static void fbnic_clean_bdq(struct fbnic_ring *ring, unsigned int hw_head,
+ int napi_budget)
{
unsigned int head = ring->head;
@@ -619,7 +861,7 @@ static void fbnic_clean_bdq(struct fbnic_napi_vector *nv, int napi_budget,
return;
do {
- fbnic_page_pool_drain(ring, head, nv, napi_budget);
+ fbnic_page_pool_drain(ring, head, napi_budget);
head++;
head &= ring->size_mask;
@@ -628,10 +870,10 @@ static void fbnic_clean_bdq(struct fbnic_napi_vector *nv, int napi_budget,
ring->head = head;
}
-static void fbnic_bd_prep(struct fbnic_ring *bdq, u16 id, struct page *page)
+static void fbnic_bd_prep(struct fbnic_ring *bdq, u16 id, netmem_ref netmem)
{
__le64 *bdq_desc = &bdq->desc[id * FBNIC_BD_FRAG_COUNT];
- dma_addr_t dma = page_pool_get_dma_addr(page);
+ dma_addr_t dma = page_pool_get_dma_addr_netmem(netmem);
u64 bd, i = FBNIC_BD_FRAG_COUNT;
bd = (FBNIC_BD_PAGE_ADDR_MASK & dma) |
@@ -646,10 +888,11 @@ static void fbnic_bd_prep(struct fbnic_ring *bdq, u16 id, struct page *page)
*bdq_desc = cpu_to_le64(bd);
bd += FIELD_PREP(FBNIC_BD_DESC_ADDR_MASK, 1) |
FIELD_PREP(FBNIC_BD_DESC_ID_MASK, 1);
+ bdq_desc++;
} while (--i);
}
-static void fbnic_fill_bdq(struct fbnic_napi_vector *nv, struct fbnic_ring *bdq)
+static void fbnic_fill_bdq(struct fbnic_ring *bdq)
{
unsigned int count = fbnic_desc_unused(bdq);
unsigned int i = bdq->tail;
@@ -658,14 +901,19 @@ static void fbnic_fill_bdq(struct fbnic_napi_vector *nv, struct fbnic_ring *bdq)
return;
do {
- struct page *page;
+ netmem_ref netmem;
+
+ netmem = page_pool_dev_alloc_netmems(bdq->page_pool);
+ if (!netmem) {
+ u64_stats_update_begin(&bdq->stats.syncp);
+ bdq->stats.bdq.alloc_failed++;
+ u64_stats_update_end(&bdq->stats.syncp);
- page = page_pool_dev_alloc_pages(nv->page_pool);
- if (!page)
break;
+ }
- fbnic_page_pool_init(bdq, i, page);
- fbnic_bd_prep(bdq, i, page);
+ fbnic_page_pool_init(bdq, i, netmem);
+ fbnic_bd_prep(bdq, i, netmem);
i++;
i &= bdq->size_mask;
@@ -712,7 +960,7 @@ static void fbnic_pkt_prepare(struct fbnic_napi_vector *nv, u64 rcd,
{
unsigned int hdr_pg_idx = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
unsigned int hdr_pg_off = FIELD_GET(FBNIC_RCD_AL_BUFF_OFF_MASK, rcd);
- struct page *page = fbnic_page_pool_get(&qt->sub0, hdr_pg_idx);
+ struct page *page = fbnic_page_pool_get_head(qt, hdr_pg_idx);
unsigned int len = FIELD_GET(FBNIC_RCD_AL_BUFF_LEN_MASK, rcd);
unsigned int frame_sz, hdr_pg_start, hdr_pg_end, headroom;
unsigned char *hdr_start;
@@ -727,7 +975,7 @@ static void fbnic_pkt_prepare(struct fbnic_napi_vector *nv, u64 rcd,
headroom = hdr_pg_off - hdr_pg_start + FBNIC_RX_PAD;
frame_sz = hdr_pg_end - hdr_pg_start;
- xdp_init_buff(&pkt->buff, frame_sz, NULL);
+ xdp_init_buff(&pkt->buff, frame_sz, &qt->xdp_rxq);
hdr_pg_start += (FBNIC_RCD_AL_BUFF_FRAG_MASK & rcd) *
FBNIC_BD_FRAG_SIZE;
@@ -738,13 +986,12 @@ static void fbnic_pkt_prepare(struct fbnic_napi_vector *nv, u64 rcd,
/* Build frame around buffer */
hdr_start = page_address(page) + hdr_pg_start;
-
+ net_prefetch(pkt->buff.data);
xdp_prepare_buff(&pkt->buff, hdr_start, headroom,
len - FBNIC_RX_PAD, true);
- pkt->data_truesize = 0;
- pkt->data_len = 0;
- pkt->nr_frags = 0;
+ pkt->hwtstamp = 0;
+ pkt->add_frag_failed = false;
}
static void fbnic_add_rx_frag(struct fbnic_napi_vector *nv, u64 rcd,
@@ -754,9 +1001,9 @@ static void fbnic_add_rx_frag(struct fbnic_napi_vector *nv, u64 rcd,
unsigned int pg_idx = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
unsigned int pg_off = FIELD_GET(FBNIC_RCD_AL_BUFF_OFF_MASK, rcd);
unsigned int len = FIELD_GET(FBNIC_RCD_AL_BUFF_LEN_MASK, rcd);
- struct page *page = fbnic_page_pool_get(&qt->sub1, pg_idx);
- struct skb_shared_info *shinfo;
+ netmem_ref netmem = fbnic_page_pool_get_data(qt, pg_idx);
unsigned int truesize;
+ bool added;
truesize = FIELD_GET(FBNIC_RCD_AL_PAGE_FIN, rcd) ?
FBNIC_BD_FRAG_SIZE - pg_off : ALIGN(len, 128);
@@ -765,88 +1012,171 @@ static void fbnic_add_rx_frag(struct fbnic_napi_vector *nv, u64 rcd,
FBNIC_BD_FRAG_SIZE;
/* Sync DMA buffer */
- dma_sync_single_range_for_cpu(nv->dev, page_pool_get_dma_addr(page),
- pg_off, truesize, DMA_BIDIRECTIONAL);
-
- /* Add page to xdp shared info */
- shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
-
- /* We use gso_segs to store truesize */
- pkt->data_truesize += truesize;
-
- __skb_fill_page_desc_noacc(shinfo, pkt->nr_frags++, page, pg_off, len);
-
- /* Store data_len in gso_size */
- pkt->data_len += len;
+ page_pool_dma_sync_netmem_for_cpu(qt->sub1.page_pool, netmem,
+ pg_off, truesize);
+
+ added = xdp_buff_add_frag(&pkt->buff, netmem, pg_off, len, truesize);
+ if (unlikely(!added)) {
+ pkt->add_frag_failed = true;
+ netdev_err_once(nv->napi.dev,
+ "Failed to add fragment to xdp_buff\n");
+ }
}
-static void fbnic_put_pkt_buff(struct fbnic_napi_vector *nv,
+static void fbnic_put_pkt_buff(struct fbnic_q_triad *qt,
struct fbnic_pkt_buff *pkt, int budget)
{
- struct skb_shared_info *shinfo;
struct page *page;
- int nr_frags;
if (!pkt->buff.data_hard_start)
return;
- shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
- nr_frags = pkt->nr_frags;
+ if (xdp_buff_has_frags(&pkt->buff)) {
+ struct skb_shared_info *shinfo;
+ netmem_ref netmem;
+ int nr_frags;
- while (nr_frags--) {
- page = skb_frag_page(&shinfo->frags[nr_frags]);
- page_pool_put_full_page(nv->page_pool, page, !!budget);
+ shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
+ nr_frags = shinfo->nr_frags;
+
+ while (nr_frags--) {
+ netmem = skb_frag_netmem(&shinfo->frags[nr_frags]);
+ page_pool_put_full_netmem(qt->sub1.page_pool, netmem,
+ !!budget);
+ }
}
page = virt_to_page(pkt->buff.data_hard_start);
- page_pool_put_full_page(nv->page_pool, page, !!budget);
+ page_pool_put_full_page(qt->sub0.page_pool, page, !!budget);
}
static struct sk_buff *fbnic_build_skb(struct fbnic_napi_vector *nv,
struct fbnic_pkt_buff *pkt)
{
- unsigned int nr_frags = pkt->nr_frags;
- struct skb_shared_info *shinfo;
- unsigned int truesize;
struct sk_buff *skb;
- truesize = xdp_data_hard_end(&pkt->buff) + FBNIC_RX_TROOM -
- pkt->buff.data_hard_start;
-
- /* Build frame around buffer */
- skb = napi_build_skb(pkt->buff.data_hard_start, truesize);
- if (unlikely(!skb))
+ skb = xdp_build_skb_from_buff(&pkt->buff);
+ if (!skb)
return NULL;
- /* Push data pointer to start of data, put tail to end of data */
- skb_reserve(skb, pkt->buff.data - pkt->buff.data_hard_start);
- __skb_put(skb, pkt->buff.data_end - pkt->buff.data);
+ /* Add timestamp if present */
+ if (pkt->hwtstamp)
+ skb_hwtstamps(skb)->hwtstamp = pkt->hwtstamp;
- /* Add tracking for metadata at the start of the frame */
- skb_metadata_set(skb, pkt->buff.data - pkt->buff.data_meta);
+ return skb;
+}
- /* Add Rx frags */
- if (nr_frags) {
- /* Verify that shared info didn't move */
+static long fbnic_pkt_tx(struct fbnic_napi_vector *nv,
+ struct fbnic_pkt_buff *pkt)
+{
+ struct fbnic_ring *ring = &nv->qt[0].sub1;
+ int size, offset, nsegs = 1, data_len = 0;
+ unsigned int tail = ring->tail;
+ struct skb_shared_info *shinfo;
+ skb_frag_t *frag = NULL;
+ struct page *page;
+ dma_addr_t dma;
+ __le64 *twd;
+
+ if (unlikely(xdp_buff_has_frags(&pkt->buff))) {
shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
- WARN_ON(skb_shinfo(skb) != shinfo);
+ nsegs += shinfo->nr_frags;
+ data_len = shinfo->xdp_frags_size;
+ frag = &shinfo->frags[0];
+ }
- skb->truesize += pkt->data_truesize;
- skb->data_len += pkt->data_len;
- shinfo->nr_frags = nr_frags;
- skb->len += pkt->data_len;
+ if (fbnic_desc_unused(ring) < nsegs) {
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.dropped++;
+ u64_stats_update_end(&ring->stats.syncp);
+ return -FBNIC_XDP_CONSUME;
}
- skb_mark_for_recycle(skb);
+ page = virt_to_page(pkt->buff.data_hard_start);
+ offset = offset_in_page(pkt->buff.data);
+ dma = page_pool_get_dma_addr(page);
- /* Set MAC header specific fields */
- skb->protocol = eth_type_trans(skb, nv->napi.dev);
+ size = pkt->buff.data_end - pkt->buff.data;
- /* Add timestamp if present */
- if (pkt->hwtstamp)
- skb_hwtstamps(skb)->hwtstamp = pkt->hwtstamp;
+ while (nsegs--) {
+ dma_sync_single_range_for_device(nv->dev, dma, offset, size,
+ DMA_BIDIRECTIONAL);
+ dma += offset;
- return skb;
+ ring->tx_buf[tail] = page;
+
+ twd = &ring->desc[tail];
+ *twd = cpu_to_le64(FIELD_PREP(FBNIC_TWD_ADDR_MASK, dma) |
+ FIELD_PREP(FBNIC_TWD_LEN_MASK, size) |
+ FIELD_PREP(FBNIC_TWD_TYPE_MASK,
+ FBNIC_TWD_TYPE_AL));
+
+ tail++;
+ tail &= ring->size_mask;
+
+ if (!data_len)
+ break;
+
+ offset = skb_frag_off(frag);
+ page = skb_frag_page(frag);
+ dma = page_pool_get_dma_addr(page);
+
+ size = skb_frag_size(frag);
+ data_len -= size;
+ frag++;
+ }
+
+ *twd |= FBNIC_TWD_TYPE(LAST_AL);
+
+ ring->tail = tail;
+
+ return -FBNIC_XDP_TX;
+}
+
+static void fbnic_pkt_commit_tail(struct fbnic_napi_vector *nv,
+ unsigned int pkt_tail)
+{
+ struct fbnic_ring *ring = &nv->qt[0].sub1;
+
+ /* Force DMA writes to flush before writing to tail */
+ dma_wmb();
+
+ writel(pkt_tail, ring->doorbell);
+}
+
+static struct sk_buff *fbnic_run_xdp(struct fbnic_napi_vector *nv,
+ struct fbnic_pkt_buff *pkt)
+{
+ struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
+ struct bpf_prog *xdp_prog;
+ int act;
+
+ xdp_prog = READ_ONCE(fbn->xdp_prog);
+ if (!xdp_prog)
+ goto xdp_pass;
+
+ /* Should never happen, config paths enforce HDS threshold > MTU */
+ if (xdp_buff_has_frags(&pkt->buff) && !xdp_prog->aux->xdp_has_frags)
+ return ERR_PTR(-FBNIC_XDP_LEN_ERR);
+
+ act = bpf_prog_run_xdp(xdp_prog, &pkt->buff);
+ switch (act) {
+ case XDP_PASS:
+xdp_pass:
+ return fbnic_build_skb(nv, pkt);
+ case XDP_TX:
+ return ERR_PTR(fbnic_pkt_tx(nv, pkt));
+ default:
+ bpf_warn_invalid_xdp_action(nv->napi.dev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(nv->napi.dev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+ break;
+ }
+
+ return ERR_PTR(-FBNIC_XDP_CONSUME);
}
static enum pkt_hash_types fbnic_skb_hash_type(u64 rcd)
@@ -875,12 +1205,13 @@ static void fbnic_rx_tstamp(struct fbnic_napi_vector *nv, u64 rcd,
static void fbnic_populate_skb_fields(struct fbnic_napi_vector *nv,
u64 rcd, struct sk_buff *skb,
- struct fbnic_q_triad *qt)
+ struct fbnic_q_triad *qt,
+ u64 *csum_cmpl, u64 *csum_none)
{
struct net_device *netdev = nv->napi.dev;
struct fbnic_ring *rcq = &qt->cmpl;
- fbnic_rx_csum(rcd, skb, rcq);
+ fbnic_rx_csum(rcd, skb, rcq, csum_cmpl, csum_none);
if (netdev->features & NETIF_F_RXHASH)
skb_set_hash(skb,
@@ -898,10 +1229,11 @@ static bool fbnic_rcd_metadata_err(u64 rcd)
static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
struct fbnic_q_triad *qt, int budget)
{
- unsigned int packets = 0, bytes = 0, dropped = 0;
+ unsigned int packets = 0, bytes = 0, dropped = 0, alloc_failed = 0;
+ u64 csum_complete = 0, csum_none = 0, length_errors = 0;
+ s32 head0 = -1, head1 = -1, pkt_tail = -1;
struct fbnic_ring *rcq = &qt->cmpl;
struct fbnic_pkt_buff *pkt;
- s32 head0 = -1, head1 = -1;
__le64 *raw_rcd, done;
u32 head = rcq->head;
@@ -912,6 +1244,7 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
/* Walk the completion queue collecting the heads reported by NIC */
while (likely(packets < budget)) {
struct sk_buff *skb = ERR_PTR(-EINVAL);
+ u32 pkt_bytes;
u64 rcd;
if ((*raw_rcd & cpu_to_le64(FBNIC_RCD_DONE)) == done)
@@ -942,22 +1275,38 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
/* We currently ignore the action table index */
break;
case FBNIC_RCD_TYPE_META:
- if (likely(!fbnic_rcd_metadata_err(rcd)))
- skb = fbnic_build_skb(nv, pkt);
+ if (likely(!fbnic_rcd_metadata_err(rcd) &&
+ !pkt->add_frag_failed)) {
+ pkt_bytes = xdp_get_buff_len(&pkt->buff);
+ skb = fbnic_run_xdp(nv, pkt);
+ }
/* Populate skb and invalidate XDP */
if (!IS_ERR_OR_NULL(skb)) {
- fbnic_populate_skb_fields(nv, rcd, skb, qt);
-
- packets++;
- bytes += skb->len;
-
+ fbnic_populate_skb_fields(nv, rcd, skb, qt,
+ &csum_complete,
+ &csum_none);
napi_gro_receive(&nv->napi, skb);
+ } else if (skb == ERR_PTR(-FBNIC_XDP_TX)) {
+ pkt_tail = nv->qt[0].sub1.tail;
+ } else if (PTR_ERR(skb) == -FBNIC_XDP_CONSUME) {
+ fbnic_put_pkt_buff(qt, pkt, 1);
} else {
- dropped++;
- fbnic_put_pkt_buff(nv, pkt, 1);
+ if (!skb)
+ alloc_failed++;
+
+ if (skb == ERR_PTR(-FBNIC_XDP_LEN_ERR))
+ length_errors++;
+ else
+ dropped++;
+
+ fbnic_put_pkt_buff(qt, pkt, 1);
+ goto next_dont_count;
}
+ packets++;
+ bytes += pkt_bytes;
+next_dont_count:
pkt->buff.data_hard_start = NULL;
break;
@@ -974,19 +1323,24 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
u64_stats_update_begin(&rcq->stats.syncp);
rcq->stats.packets += packets;
rcq->stats.bytes += bytes;
- /* Re-add ethernet header length (removed in fbnic_build_skb) */
- rcq->stats.bytes += ETH_HLEN * packets;
rcq->stats.dropped += dropped;
+ rcq->stats.rx.alloc_failed += alloc_failed;
+ rcq->stats.rx.csum_complete += csum_complete;
+ rcq->stats.rx.csum_none += csum_none;
+ rcq->stats.rx.length_errors += length_errors;
u64_stats_update_end(&rcq->stats.syncp);
+ if (pkt_tail >= 0)
+ fbnic_pkt_commit_tail(nv, pkt_tail);
+
/* Unmap and free processed buffers */
if (head0 >= 0)
- fbnic_clean_bdq(nv, budget, &qt->sub0, head0);
- fbnic_fill_bdq(nv, &qt->sub0);
+ fbnic_clean_bdq(&qt->sub0, head0, budget);
+ fbnic_fill_bdq(&qt->sub0);
if (head1 >= 0)
- fbnic_clean_bdq(nv, budget, &qt->sub1, head1);
- fbnic_fill_bdq(nv, &qt->sub1);
+ fbnic_clean_bdq(&qt->sub1, head1, budget);
+ fbnic_fill_bdq(&qt->sub1);
/* Record the current head/tail of the queue */
if (rcq->head != head) {
@@ -1033,20 +1387,20 @@ static int fbnic_poll(struct napi_struct *napi, int budget)
if (likely(napi_complete_done(napi, work_done)))
fbnic_nv_irq_rearm(nv);
- return 0;
+ return work_done;
}
-static irqreturn_t fbnic_msix_clean_rings(int __always_unused irq, void *data)
+irqreturn_t fbnic_msix_clean_rings(int __always_unused irq, void *data)
{
- struct fbnic_napi_vector *nv = data;
+ struct fbnic_napi_vector *nv = *(void **)data;
napi_schedule_irqoff(&nv->napi);
return IRQ_HANDLED;
}
-static void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
- struct fbnic_ring *rxr)
+void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr)
{
struct fbnic_queue_stats *stats = &rxr->stats;
@@ -1054,10 +1408,27 @@ static void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
fbn->rx_stats.bytes += stats->bytes;
fbn->rx_stats.packets += stats->packets;
fbn->rx_stats.dropped += stats->dropped;
+ fbn->rx_stats.rx.alloc_failed += stats->rx.alloc_failed;
+ fbn->rx_stats.rx.csum_complete += stats->rx.csum_complete;
+ fbn->rx_stats.rx.csum_none += stats->rx.csum_none;
+ fbn->rx_stats.rx.length_errors += stats->rx.length_errors;
+ /* Remember to add new stats here */
+ BUILD_BUG_ON(sizeof(fbn->rx_stats.rx) / 8 != 4);
}
-static void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
- struct fbnic_ring *txr)
+void fbnic_aggregate_ring_bdq_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *bdq)
+{
+ struct fbnic_queue_stats *stats = &bdq->stats;
+
+ /* Capture stats from queues before dissasociating them */
+ fbn->bdq_stats.bdq.alloc_failed += stats->bdq.alloc_failed;
+ /* Remember to add new stats here */
+ BUILD_BUG_ON(sizeof(fbn->rx_stats.bdq) / 8 != 1);
+}
+
+void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *txr)
{
struct fbnic_queue_stats *stats = &txr->stats;
@@ -1065,8 +1436,28 @@ static void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
fbn->tx_stats.bytes += stats->bytes;
fbn->tx_stats.packets += stats->packets;
fbn->tx_stats.dropped += stats->dropped;
- fbn->tx_stats.ts_lost += stats->ts_lost;
- fbn->tx_stats.ts_packets += stats->ts_packets;
+ fbn->tx_stats.twq.csum_partial += stats->twq.csum_partial;
+ fbn->tx_stats.twq.lso += stats->twq.lso;
+ fbn->tx_stats.twq.ts_lost += stats->twq.ts_lost;
+ fbn->tx_stats.twq.ts_packets += stats->twq.ts_packets;
+ fbn->tx_stats.twq.stop += stats->twq.stop;
+ fbn->tx_stats.twq.wake += stats->twq.wake;
+ /* Remember to add new stats here */
+ BUILD_BUG_ON(sizeof(fbn->tx_stats.twq) / 8 != 6);
+}
+
+void fbnic_aggregate_ring_xdp_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *xdpr)
+{
+ struct fbnic_queue_stats *stats = &xdpr->stats;
+
+ if (!(xdpr->flags & FBNIC_RING_F_STATS))
+ return;
+
+ /* Capture stats from queues before dissasociating them */
+ fbn->tx_stats.dropped += stats->dropped;
+ fbn->tx_stats.bytes += stats->bytes;
+ fbn->tx_stats.packets += stats->packets;
}
static void fbnic_remove_tx_ring(struct fbnic_net *fbn,
@@ -1082,6 +1473,19 @@ static void fbnic_remove_tx_ring(struct fbnic_net *fbn,
fbn->tx[txr->q_idx] = NULL;
}
+static void fbnic_remove_xdp_ring(struct fbnic_net *fbn,
+ struct fbnic_ring *xdpr)
+{
+ if (!(xdpr->flags & FBNIC_RING_F_STATS))
+ return;
+
+ fbnic_aggregate_ring_xdp_counters(fbn, xdpr);
+
+ /* Remove pointer to the Tx ring */
+ WARN_ON(fbn->tx[xdpr->q_idx] && fbn->tx[xdpr->q_idx] != xdpr);
+ fbn->tx[xdpr->q_idx] = NULL;
+}
+
static void fbnic_remove_rx_ring(struct fbnic_net *fbn,
struct fbnic_ring *rxr)
{
@@ -1095,66 +1499,70 @@ static void fbnic_remove_rx_ring(struct fbnic_net *fbn,
fbn->rx[rxr->q_idx] = NULL;
}
+static void fbnic_remove_bdq_ring(struct fbnic_net *fbn,
+ struct fbnic_ring *bdq)
+{
+ if (!(bdq->flags & FBNIC_RING_F_STATS))
+ return;
+
+ fbnic_aggregate_ring_bdq_counters(fbn, bdq);
+}
+
+static void fbnic_free_qt_page_pools(struct fbnic_q_triad *qt)
+{
+ page_pool_destroy(qt->sub0.page_pool);
+ page_pool_destroy(qt->sub1.page_pool);
+}
+
static void fbnic_free_napi_vector(struct fbnic_net *fbn,
struct fbnic_napi_vector *nv)
{
struct fbnic_dev *fbd = nv->fbd;
- u32 v_idx = nv->v_idx;
int i, j;
for (i = 0; i < nv->txt_count; i++) {
fbnic_remove_tx_ring(fbn, &nv->qt[i].sub0);
+ fbnic_remove_xdp_ring(fbn, &nv->qt[i].sub1);
fbnic_remove_tx_ring(fbn, &nv->qt[i].cmpl);
}
for (j = 0; j < nv->rxt_count; j++, i++) {
- fbnic_remove_rx_ring(fbn, &nv->qt[i].sub0);
- fbnic_remove_rx_ring(fbn, &nv->qt[i].sub1);
+ fbnic_remove_bdq_ring(fbn, &nv->qt[i].sub0);
+ fbnic_remove_bdq_ring(fbn, &nv->qt[i].sub1);
fbnic_remove_rx_ring(fbn, &nv->qt[i].cmpl);
}
- fbnic_free_irq(fbd, v_idx, nv);
- page_pool_destroy(nv->page_pool);
- netif_napi_del(&nv->napi);
- list_del(&nv->napis);
+ fbnic_napi_free_irq(fbd, nv);
+ netif_napi_del_locked(&nv->napi);
+ fbn->napi[fbnic_napi_idx(nv)] = NULL;
kfree(nv);
}
void fbnic_free_napi_vectors(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv, *temp;
-
- list_for_each_entry_safe(nv, temp, &fbn->napis, napis)
- fbnic_free_napi_vector(fbn, nv);
-}
-
-static void fbnic_name_napi_vector(struct fbnic_napi_vector *nv)
-{
- unsigned char *dev_name = nv->napi.dev->name;
+ int i;
- if (!nv->rxt_count)
- snprintf(nv->name, sizeof(nv->name), "%s-Tx-%u", dev_name,
- nv->v_idx - FBNIC_NON_NAPI_VECTORS);
- else
- snprintf(nv->name, sizeof(nv->name), "%s-TxRx-%u", dev_name,
- nv->v_idx - FBNIC_NON_NAPI_VECTORS);
+ for (i = 0; i < fbn->num_napi; i++)
+ if (fbn->napi[i])
+ fbnic_free_napi_vector(fbn, fbn->napi[i]);
}
-#define FBNIC_PAGE_POOL_FLAGS \
- (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV)
-
-static int fbnic_alloc_nv_page_pool(struct fbnic_net *fbn,
- struct fbnic_napi_vector *nv)
+static int
+fbnic_alloc_qt_page_pools(struct fbnic_net *fbn, struct fbnic_q_triad *qt,
+ unsigned int rxq_idx)
{
struct page_pool_params pp_params = {
.order = 0,
- .flags = FBNIC_PAGE_POOL_FLAGS,
- .pool_size = (fbn->hpq_size + fbn->ppq_size) * nv->rxt_count,
+ .flags = PP_FLAG_DMA_MAP |
+ PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = fbn->hpq_size + fbn->ppq_size,
.nid = NUMA_NO_NODE,
- .dev = nv->dev,
+ .dev = fbn->netdev->dev.parent,
.dma_dir = DMA_BIDIRECTIONAL,
.offset = 0,
- .max_len = PAGE_SIZE
+ .max_len = PAGE_SIZE,
+ .netdev = fbn->netdev,
+ .queue_idx = rxq_idx,
};
struct page_pool *pp;
@@ -1174,9 +1582,24 @@ static int fbnic_alloc_nv_page_pool(struct fbnic_net *fbn,
if (IS_ERR(pp))
return PTR_ERR(pp);
- nv->page_pool = pp;
+ qt->sub0.page_pool = pp;
+ if (netif_rxq_has_unreadable_mp(fbn->netdev, rxq_idx)) {
+ pp_params.flags |= PP_FLAG_ALLOW_UNREADABLE_NETMEM;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+
+ pp = page_pool_create(&pp_params);
+ if (IS_ERR(pp))
+ goto err_destroy_sub0;
+ } else {
+ page_pool_get(pp);
+ }
+ qt->sub1.page_pool = pp;
return 0;
+
+err_destroy_sub0:
+ page_pool_destroy(pp);
+ return PTR_ERR(pp);
}
static void fbnic_ring_init(struct fbnic_ring *ring, u32 __iomem *doorbell,
@@ -1186,6 +1609,7 @@ static void fbnic_ring_init(struct fbnic_ring *ring, u32 __iomem *doorbell,
ring->doorbell = doorbell;
ring->q_idx = q_idx;
ring->flags = flags;
+ ring->deferred_head = -1;
}
static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
@@ -1195,11 +1619,18 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
{
int txt_count = txq_count, rxt_count = rxq_count;
u32 __iomem *uc_addr = fbd->uc_addr0;
+ int xdp_count = 0, qt_count, err;
struct fbnic_napi_vector *nv;
struct fbnic_q_triad *qt;
- int qt_count, err;
u32 __iomem *db;
+ /* We need to reserve at least one Tx Queue Triad for an XDP ring */
+ if (rxq_count) {
+ xdp_count = 1;
+ if (!txt_count)
+ txt_count = 1;
+ }
+
qt_count = txt_count + rxq_count;
if (!qt_count)
return -EINVAL;
@@ -1222,42 +1653,34 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
nv->v_idx = v_idx;
/* Tie napi to netdev */
- list_add(&nv->napis, &fbn->napis);
- netif_napi_add(fbn->netdev, &nv->napi, fbnic_poll);
+ fbn->napi[fbnic_napi_idx(nv)] = nv;
+ netif_napi_add_config_locked(fbn->netdev, &nv->napi, fbnic_poll,
+ fbnic_napi_idx(nv));
/* Record IRQ to NAPI struct */
- netif_napi_set_irq(&nv->napi,
- pci_irq_vector(to_pci_dev(fbd->dev), nv->v_idx));
+ netif_napi_set_irq_locked(&nv->napi,
+ pci_irq_vector(to_pci_dev(fbd->dev),
+ nv->v_idx));
/* Tie nv back to PCIe dev */
nv->dev = fbd->dev;
- /* Allocate page pool */
- if (rxq_count) {
- err = fbnic_alloc_nv_page_pool(fbn, nv);
- if (err)
- goto napi_del;
- }
-
- /* Initialize vector name */
- fbnic_name_napi_vector(nv);
-
/* Request the IRQ for napi vector */
- err = fbnic_request_irq(fbd, v_idx, &fbnic_msix_clean_rings,
- IRQF_SHARED, nv->name, nv);
+ err = fbnic_napi_request_irq(fbd, nv);
if (err)
- goto pp_destroy;
+ goto napi_del;
/* Initialize queue triads */
qt = nv->qt;
while (txt_count) {
+ u8 flags = FBNIC_RING_F_CTX | FBNIC_RING_F_STATS;
+
/* Configure Tx queue */
db = &uc_addr[FBNIC_QUEUE(txq_idx) + FBNIC_QUEUE_TWQ0_TAIL];
/* Assign Tx queue to netdev if applicable */
if (txq_count > 0) {
- u8 flags = FBNIC_RING_F_CTX | FBNIC_RING_F_STATS;
fbnic_ring_init(&qt->sub0, db, txq_idx, flags);
fbn->tx[txq_idx] = &qt->sub0;
@@ -1267,6 +1690,28 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
FBNIC_RING_F_DISABLED);
}
+ /* Configure XDP queue */
+ db = &uc_addr[FBNIC_QUEUE(txq_idx) + FBNIC_QUEUE_TWQ1_TAIL];
+
+ /* Assign XDP queue to netdev if applicable
+ *
+ * The setup for this is in itself a bit different.
+ * 1. We only need one XDP Tx queue per NAPI vector.
+ * 2. We associate it to the first Rx queue index.
+ * 3. The hardware side is associated based on the Tx Queue.
+ * 4. The netdev queue is offset by FBNIC_MAX_TXQs.
+ */
+ if (xdp_count > 0) {
+ unsigned int xdp_idx = FBNIC_MAX_TXQS + rxq_idx;
+
+ fbnic_ring_init(&qt->sub1, db, xdp_idx, flags);
+ fbn->tx[xdp_idx] = &qt->sub1;
+ xdp_count--;
+ } else {
+ fbnic_ring_init(&qt->sub1, db, 0,
+ FBNIC_RING_F_DISABLED);
+ }
+
/* Configure Tx completion queue */
db = &uc_addr[FBNIC_QUEUE(txq_idx) + FBNIC_QUEUE_TCQ_HEAD];
fbnic_ring_init(&qt->cmpl, db, 0, 0);
@@ -1282,11 +1727,13 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
while (rxt_count) {
/* Configure header queue */
db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_BDQ_HPQ_TAIL];
- fbnic_ring_init(&qt->sub0, db, 0, FBNIC_RING_F_CTX);
+ fbnic_ring_init(&qt->sub0, db, 0,
+ FBNIC_RING_F_CTX | FBNIC_RING_F_STATS);
/* Configure payload queue */
db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_BDQ_PPQ_TAIL];
- fbnic_ring_init(&qt->sub1, db, 0, FBNIC_RING_F_CTX);
+ fbnic_ring_init(&qt->sub1, db, 0,
+ FBNIC_RING_F_CTX | FBNIC_RING_F_STATS);
/* Configure Rx completion queue */
db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_RCQ_HEAD];
@@ -1303,11 +1750,9 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
return 0;
-pp_destroy:
- page_pool_destroy(nv->page_pool);
napi_del:
- netif_napi_del(&nv->napi);
- list_del(&nv->napis);
+ netif_napi_del_locked(&nv->napi);
+ fbn->napi[fbnic_napi_idx(nv)] = NULL;
kfree(nv);
return err;
}
@@ -1363,7 +1808,7 @@ int fbnic_alloc_napi_vectors(struct fbnic_net *fbn)
free_vectors:
fbnic_free_napi_vectors(fbn);
- return -ENOMEM;
+ return err;
}
static void fbnic_free_ring_resources(struct device *dev,
@@ -1520,6 +1965,12 @@ static void fbnic_free_qt_resources(struct fbnic_net *fbn,
fbnic_free_ring_resources(dev, &qt->cmpl);
fbnic_free_ring_resources(dev, &qt->sub1);
fbnic_free_ring_resources(dev, &qt->sub0);
+
+ if (xdp_rxq_info_is_reg(&qt->xdp_rxq)) {
+ xdp_rxq_info_unreg_mem_model(&qt->xdp_rxq);
+ xdp_rxq_info_unreg(&qt->xdp_rxq);
+ fbnic_free_qt_page_pools(qt);
+ }
}
static int fbnic_alloc_tx_qt_resources(struct fbnic_net *fbn,
@@ -1532,6 +1983,10 @@ static int fbnic_alloc_tx_qt_resources(struct fbnic_net *fbn,
if (err)
return err;
+ err = fbnic_alloc_tx_ring_resources(fbn, &qt->sub1);
+ if (err)
+ goto free_sub0;
+
err = fbnic_alloc_tx_ring_resources(fbn, &qt->cmpl);
if (err)
goto free_sub1;
@@ -1539,20 +1994,37 @@ static int fbnic_alloc_tx_qt_resources(struct fbnic_net *fbn,
return 0;
free_sub1:
+ fbnic_free_ring_resources(dev, &qt->sub1);
+free_sub0:
fbnic_free_ring_resources(dev, &qt->sub0);
return err;
}
static int fbnic_alloc_rx_qt_resources(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv,
struct fbnic_q_triad *qt)
{
struct device *dev = fbn->netdev->dev.parent;
int err;
- err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub0);
+ err = fbnic_alloc_qt_page_pools(fbn, qt, qt->cmpl.q_idx);
if (err)
return err;
+ err = xdp_rxq_info_reg(&qt->xdp_rxq, fbn->netdev, qt->sub0.q_idx,
+ nv->napi.napi_id);
+ if (err)
+ goto free_page_pools;
+
+ err = xdp_rxq_info_reg_mem_model(&qt->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ qt->sub0.page_pool);
+ if (err)
+ goto unreg_rxq;
+
+ err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub0);
+ if (err)
+ goto unreg_mm;
+
err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub1);
if (err)
goto free_sub0;
@@ -1567,19 +2039,21 @@ free_sub1:
fbnic_free_ring_resources(dev, &qt->sub1);
free_sub0:
fbnic_free_ring_resources(dev, &qt->sub0);
+unreg_mm:
+ xdp_rxq_info_unreg_mem_model(&qt->xdp_rxq);
+unreg_rxq:
+ xdp_rxq_info_unreg(&qt->xdp_rxq);
+free_page_pools:
+ fbnic_free_qt_page_pools(qt);
return err;
}
static void fbnic_free_nv_resources(struct fbnic_net *fbn,
struct fbnic_napi_vector *nv)
{
- int i, j;
-
- /* Free Tx Resources */
- for (i = 0; i < nv->txt_count; i++)
- fbnic_free_qt_resources(fbn, &nv->qt[i]);
+ int i;
- for (j = 0; j < nv->rxt_count; j++, i++)
+ for (i = 0; i < nv->txt_count + nv->rxt_count; i++)
fbnic_free_qt_resources(fbn, &nv->qt[i]);
}
@@ -1592,19 +2066,19 @@ static int fbnic_alloc_nv_resources(struct fbnic_net *fbn,
for (i = 0; i < nv->txt_count; i++) {
err = fbnic_alloc_tx_qt_resources(fbn, &nv->qt[i]);
if (err)
- goto free_resources;
+ goto free_qt_resources;
}
/* Allocate Rx Resources */
for (j = 0; j < nv->rxt_count; j++, i++) {
- err = fbnic_alloc_rx_qt_resources(fbn, &nv->qt[i]);
+ err = fbnic_alloc_rx_qt_resources(fbn, nv, &nv->qt[i]);
if (err)
- goto free_resources;
+ goto free_qt_resources;
}
return 0;
-free_resources:
+free_qt_resources:
while (i--)
fbnic_free_qt_resources(fbn, &nv->qt[i]);
return err;
@@ -1612,19 +2086,18 @@ free_resources:
void fbnic_free_resources(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv;
+ int i;
- list_for_each_entry(nv, &fbn->napis, napis)
- fbnic_free_nv_resources(fbn, nv);
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_free_nv_resources(fbn, fbn->napi[i]);
}
int fbnic_alloc_resources(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv;
- int err = -ENODEV;
+ int i, err = -ENODEV;
- list_for_each_entry(nv, &fbn->napis, napis) {
- err = fbnic_alloc_nv_resources(fbn, nv);
+ for (i = 0; i < fbn->num_napi; i++) {
+ err = fbnic_alloc_nv_resources(fbn, fbn->napi[i]);
if (err)
goto free_resources;
}
@@ -1632,12 +2105,77 @@ int fbnic_alloc_resources(struct fbnic_net *fbn)
return 0;
free_resources:
- list_for_each_entry_continue_reverse(nv, &fbn->napis, napis)
- fbnic_free_nv_resources(fbn, nv);
+ while (i--)
+ fbnic_free_nv_resources(fbn, fbn->napi[i]);
return err;
}
+static void fbnic_set_netif_napi(struct fbnic_napi_vector *nv)
+{
+ int i, j;
+
+ /* Associate Tx queue with NAPI */
+ for (i = 0; i < nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
+ NETDEV_QUEUE_TYPE_TX, &nv->napi);
+ }
+
+ /* Associate Rx queue with NAPI */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
+ NETDEV_QUEUE_TYPE_RX, &nv->napi);
+ }
+}
+
+static void fbnic_reset_netif_napi(struct fbnic_napi_vector *nv)
+{
+ int i, j;
+
+ /* Disassociate Tx queue from NAPI */
+ for (i = 0; i < nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+ }
+
+ /* Disassociate Rx queue from NAPI */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+ }
+}
+
+int fbnic_set_netif_queues(struct fbnic_net *fbn)
+{
+ int i, err;
+
+ err = netif_set_real_num_queues(fbn->netdev, fbn->num_tx_queues,
+ fbn->num_rx_queues);
+ if (err)
+ return err;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_set_netif_napi(fbn->napi[i]);
+
+ return 0;
+}
+
+void fbnic_reset_netif_queues(struct fbnic_net *fbn)
+{
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_reset_netif_napi(fbn->napi[i]);
+}
+
static void fbnic_disable_twq0(struct fbnic_ring *txr)
{
u32 twq_ctl = fbnic_ring_rd32(txr, FBNIC_QUEUE_TWQ0_CTL);
@@ -1647,6 +2185,15 @@ static void fbnic_disable_twq0(struct fbnic_ring *txr)
fbnic_ring_wr32(txr, FBNIC_QUEUE_TWQ0_CTL, twq_ctl);
}
+static void fbnic_disable_twq1(struct fbnic_ring *txr)
+{
+ u32 twq_ctl = fbnic_ring_rd32(txr, FBNIC_QUEUE_TWQ1_CTL);
+
+ twq_ctl &= ~FBNIC_QUEUE_TWQ_CTL_ENABLE;
+
+ fbnic_ring_wr32(txr, FBNIC_QUEUE_TWQ1_CTL, twq_ctl);
+}
+
static void fbnic_disable_tcq(struct fbnic_ring *txr)
{
fbnic_ring_wr32(txr, FBNIC_QUEUE_TCQ_CTL, 0);
@@ -1670,38 +2217,51 @@ static void fbnic_disable_rcq(struct fbnic_ring *rxr)
void fbnic_napi_disable(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv;
+ int i;
- list_for_each_entry(nv, &fbn->napis, napis) {
- napi_disable(&nv->napi);
+ for (i = 0; i < fbn->num_napi; i++) {
+ napi_disable_locked(&fbn->napi[i]->napi);
- fbnic_nv_irq_disable(nv);
+ fbnic_nv_irq_disable(fbn->napi[i]);
}
}
-void fbnic_disable(struct fbnic_net *fbn)
+static void __fbnic_nv_disable(struct fbnic_napi_vector *nv)
{
- struct fbnic_dev *fbd = fbn->fbd;
- struct fbnic_napi_vector *nv;
- int i, j;
+ int i, t;
- list_for_each_entry(nv, &fbn->napis, napis) {
- /* Disable Tx queue triads */
- for (i = 0; i < nv->txt_count; i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ /* Disable Tx queue triads */
+ for (t = 0; t < nv->txt_count; t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
- fbnic_disable_twq0(&qt->sub0);
- fbnic_disable_tcq(&qt->cmpl);
- }
+ fbnic_disable_twq0(&qt->sub0);
+ fbnic_disable_twq1(&qt->sub1);
+ fbnic_disable_tcq(&qt->cmpl);
+ }
- /* Disable Rx queue triads */
- for (j = 0; j < nv->rxt_count; j++, i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ /* Disable Rx queue triads */
+ for (i = 0; i < nv->rxt_count; i++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
- fbnic_disable_bdq(&qt->sub0, &qt->sub1);
- fbnic_disable_rcq(&qt->cmpl);
- }
+ fbnic_disable_bdq(&qt->sub0, &qt->sub1);
+ fbnic_disable_rcq(&qt->cmpl);
}
+}
+
+static void
+fbnic_nv_disable(struct fbnic_net *fbn, struct fbnic_napi_vector *nv)
+{
+ __fbnic_nv_disable(nv);
+ fbnic_wrfl(fbn->fbd);
+}
+
+void fbnic_disable(struct fbnic_net *fbn)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ __fbnic_nv_disable(fbn->napi[i]);
fbnic_wrfl(fbd);
}
@@ -1790,94 +2350,117 @@ int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail)
return err;
}
-void fbnic_flush(struct fbnic_net *fbn)
+static int
+fbnic_wait_queue_idle(struct fbnic_net *fbn, bool rx, unsigned int idx)
{
- struct fbnic_napi_vector *nv;
+ static const unsigned int tx_regs[] = {
+ FBNIC_QM_TWQ_IDLE(0), FBNIC_QM_TQS_IDLE(0),
+ FBNIC_QM_TDE_IDLE(0), FBNIC_QM_TCQ_IDLE(0),
+ }, rx_regs[] = {
+ FBNIC_QM_HPQ_IDLE(0), FBNIC_QM_PPQ_IDLE(0),
+ FBNIC_QM_RCQ_IDLE(0),
+ };
+ struct fbnic_dev *fbd = fbn->fbd;
+ unsigned int val, mask, off;
+ const unsigned int *regs;
+ unsigned int reg_cnt;
+ int i, err;
- list_for_each_entry(nv, &fbn->napis, napis) {
- int i, j;
+ regs = rx ? rx_regs : tx_regs;
+ reg_cnt = rx ? ARRAY_SIZE(rx_regs) : ARRAY_SIZE(tx_regs);
- /* Flush any processed Tx Queue Triads and drop the rest */
- for (i = 0; i < nv->txt_count; i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
- struct netdev_queue *tx_queue;
+ off = idx / 32;
+ mask = BIT(idx % 32);
- /* Clean the work queues of unprocessed work */
- fbnic_clean_twq0(nv, 0, &qt->sub0, true, qt->sub0.tail);
+ for (i = 0; i < reg_cnt; i++) {
+ err = read_poll_timeout_atomic(fbnic_rd32, val, val & mask,
+ 2, 500000, false,
+ fbd, regs[i] + off);
+ if (err) {
+ netdev_err(fbd->netdev,
+ "wait for queue %s%d idle failed 0x%04x(%d): %08x (mask: %08x)\n",
+ rx ? "Rx" : "Tx", idx, regs[i] + off, i,
+ val, mask);
+ return err;
+ }
+ }
- /* Reset completion queue descriptor ring */
- memset(qt->cmpl.desc, 0, qt->cmpl.size);
+ return 0;
+}
- /* Nothing else to do if Tx queue is disabled */
- if (qt->sub0.flags & FBNIC_RING_F_DISABLED)
- continue;
+static void fbnic_nv_flush(struct fbnic_napi_vector *nv)
+{
+ int j, t;
- /* Reset BQL associated with Tx queue */
- tx_queue = netdev_get_tx_queue(nv->napi.dev,
- qt->sub0.q_idx);
- netdev_tx_reset_queue(tx_queue);
+ /* Flush any processed Tx Queue Triads and drop the rest */
+ for (t = 0; t < nv->txt_count; t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
+ struct netdev_queue *tx_queue;
- /* Disassociate Tx queue from NAPI */
- netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
- NETDEV_QUEUE_TYPE_TX, NULL);
- }
+ /* Clean the work queues of unprocessed work */
+ fbnic_clean_twq0(nv, 0, &qt->sub0, true, qt->sub0.tail);
+ fbnic_clean_twq1(nv, false, &qt->sub1, true,
+ qt->sub1.tail);
- /* Flush any processed Rx Queue Triads and drop the rest */
- for (j = 0; j < nv->rxt_count; j++, i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ /* Reset completion queue descriptor ring */
+ memset(qt->cmpl.desc, 0, qt->cmpl.size);
- /* Clean the work queues of unprocessed work */
- fbnic_clean_bdq(nv, 0, &qt->sub0, qt->sub0.tail);
- fbnic_clean_bdq(nv, 0, &qt->sub1, qt->sub1.tail);
+ /* Nothing else to do if Tx queue is disabled */
+ if (qt->sub0.flags & FBNIC_RING_F_DISABLED)
+ continue;
+
+ /* Reset BQL associated with Tx queue */
+ tx_queue = netdev_get_tx_queue(nv->napi.dev,
+ qt->sub0.q_idx);
+ netdev_tx_reset_queue(tx_queue);
+ }
- /* Reset completion queue descriptor ring */
- memset(qt->cmpl.desc, 0, qt->cmpl.size);
+ /* Flush any processed Rx Queue Triads and drop the rest */
+ for (j = 0; j < nv->rxt_count; j++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
- fbnic_put_pkt_buff(nv, qt->cmpl.pkt, 0);
- qt->cmpl.pkt->buff.data_hard_start = NULL;
+ /* Clean the work queues of unprocessed work */
+ fbnic_clean_bdq(&qt->sub0, qt->sub0.tail, 0);
+ fbnic_clean_bdq(&qt->sub1, qt->sub1.tail, 0);
- /* Disassociate Rx queue from NAPI */
- netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
- NETDEV_QUEUE_TYPE_RX, NULL);
- }
+ /* Reset completion queue descriptor ring */
+ memset(qt->cmpl.desc, 0, qt->cmpl.size);
+
+ fbnic_put_pkt_buff(qt, qt->cmpl.pkt, 0);
+ memset(qt->cmpl.pkt, 0, sizeof(struct fbnic_pkt_buff));
}
}
-void fbnic_fill(struct fbnic_net *fbn)
+void fbnic_flush(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv;
-
- list_for_each_entry(nv, &fbn->napis, napis) {
- int i, j;
+ int i;
- /* Configure NAPI mapping for Tx */
- for (i = 0; i < nv->txt_count; i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_nv_flush(fbn->napi[i]);
+}
- /* Nothing to do if Tx queue is disabled */
- if (qt->sub0.flags & FBNIC_RING_F_DISABLED)
- continue;
+static void fbnic_nv_fill(struct fbnic_napi_vector *nv)
+{
+ int j, t;
- /* Associate Tx queue with NAPI */
- netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
- NETDEV_QUEUE_TYPE_TX, &nv->napi);
- }
+ /* Configure NAPI mapping and populate pages
+ * in the BDQ rings to use for Rx
+ */
+ for (j = 0, t = nv->txt_count; j < nv->rxt_count; j++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
- /* Configure NAPI mapping and populate pages
- * in the BDQ rings to use for Rx
- */
- for (j = 0; j < nv->rxt_count; j++, i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ /* Populate the header and payload BDQs */
+ fbnic_fill_bdq(&qt->sub0);
+ fbnic_fill_bdq(&qt->sub1);
+ }
+}
- /* Associate Rx queue with NAPI */
- netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
- NETDEV_QUEUE_TYPE_RX, &nv->napi);
+void fbnic_fill(struct fbnic_net *fbn)
+{
+ int i;
- /* Populate the header and payload BDQs */
- fbnic_fill_bdq(nv, &qt->sub0);
- fbnic_fill_bdq(nv, &qt->sub1);
- }
- }
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_nv_fill(fbn->napi[i]);
}
static void fbnic_enable_twq0(struct fbnic_ring *twq)
@@ -1902,6 +2485,28 @@ static void fbnic_enable_twq0(struct fbnic_ring *twq)
fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_CTL, FBNIC_QUEUE_TWQ_CTL_ENABLE);
}
+static void fbnic_enable_twq1(struct fbnic_ring *twq)
+{
+ u32 log_size = fls(twq->size_mask);
+
+ if (!twq->size_mask)
+ return;
+
+ /* Reset head/tail */
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ1_CTL, FBNIC_QUEUE_TWQ_CTL_RESET);
+ twq->tail = 0;
+ twq->head = 0;
+
+ /* Store descriptor ring address and size */
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ1_BAL, lower_32_bits(twq->dma));
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ1_BAH, upper_32_bits(twq->dma));
+
+ /* Write lower 4 bits of log size as 64K ring size is 0 */
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ1_SIZE, log_size & 0xf);
+
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ1_CTL, FBNIC_QUEUE_TWQ_CTL_ENABLE);
+}
+
static void fbnic_enable_tcq(struct fbnic_napi_vector *nv,
struct fbnic_ring *tcq)
{
@@ -1970,11 +2575,15 @@ write_ctl:
}
static void fbnic_config_drop_mode_rcq(struct fbnic_napi_vector *nv,
- struct fbnic_ring *rcq)
+ struct fbnic_ring *rcq, bool tx_pause)
{
+ struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
u32 drop_mode, rcq_ctl;
- drop_mode = FBNIC_QUEUE_RDE_CTL0_DROP_IMMEDIATE;
+ if (!tx_pause && fbn->num_rx_queues > 1)
+ drop_mode = FBNIC_QUEUE_RDE_CTL0_DROP_IMMEDIATE;
+ else
+ drop_mode = FBNIC_QUEUE_RDE_CTL0_DROP_NEVER;
/* Specify packet layout */
rcq_ctl = FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_DROP_MODE_MASK, drop_mode) |
@@ -1984,17 +2593,83 @@ static void fbnic_config_drop_mode_rcq(struct fbnic_napi_vector *nv,
fbnic_ring_wr32(rcq, FBNIC_QUEUE_RDE_CTL0, rcq_ctl);
}
+void fbnic_config_drop_mode(struct fbnic_net *fbn, bool tx_pause)
+{
+ int i, t;
+
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+
+ for (t = 0; t < nv->rxt_count; t++) {
+ struct fbnic_q_triad *qt = &nv->qt[nv->txt_count + t];
+
+ fbnic_config_drop_mode_rcq(nv, &qt->cmpl, tx_pause);
+ }
+ }
+}
+
+static void fbnic_config_rim_threshold(struct fbnic_ring *rcq, u16 nv_idx, u32 rx_desc)
+{
+ u32 threshold;
+
+ /* Set the threhsold to half the ring size if rx_frames
+ * is not configured
+ */
+ threshold = rx_desc ? : rcq->size_mask / 2;
+
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_CTL, nv_idx);
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_THRESHOLD, threshold);
+}
+
+void fbnic_config_txrx_usecs(struct fbnic_napi_vector *nv, u32 arm)
+{
+ struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
+ struct fbnic_dev *fbd = nv->fbd;
+ u32 val = arm;
+
+ val |= FIELD_PREP(FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT, fbn->rx_usecs) |
+ FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT_UPD_EN;
+ val |= FIELD_PREP(FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT, fbn->tx_usecs) |
+ FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT_UPD_EN;
+
+ fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(nv->v_idx), val);
+}
+
+void fbnic_config_rx_frames(struct fbnic_napi_vector *nv)
+{
+ struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
+ int i;
+
+ for (i = nv->txt_count; i < nv->rxt_count + nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ fbnic_config_rim_threshold(&qt->cmpl, nv->v_idx,
+ fbn->rx_max_frames *
+ FBNIC_MIN_RXD_PER_FRAME);
+ }
+}
+
static void fbnic_enable_rcq(struct fbnic_napi_vector *nv,
struct fbnic_ring *rcq)
{
+ struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
u32 log_size = fls(rcq->size_mask);
- u32 rcq_ctl;
+ u32 hds_thresh = fbn->hds_thresh;
+ u32 rcq_ctl = 0;
+
+ fbnic_config_drop_mode_rcq(nv, rcq, fbn->tx_pause);
- fbnic_config_drop_mode_rcq(nv, rcq);
+ /* Force lower bound on MAX_HEADER_BYTES. Below this, all frames should
+ * be split at L4. It would also result in the frames being split at
+ * L2/L3 depending on the frame size.
+ */
+ if (fbn->hds_thresh < FBNIC_HDR_BYTES_MIN) {
+ rcq_ctl = FBNIC_QUEUE_RDE_CTL0_EN_HDR_SPLIT;
+ hds_thresh = FBNIC_HDR_BYTES_MIN;
+ }
- rcq_ctl = FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PADLEN_MASK, FBNIC_RX_PAD) |
- FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_MAX_HDR_MASK,
- FBNIC_RX_MAX_HDR) |
+ rcq_ctl |= FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PADLEN_MASK, FBNIC_RX_PAD) |
+ FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_MAX_HDR_MASK, hds_thresh) |
FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PAYLD_OFF_MASK,
FBNIC_RX_PAYLD_OFFSET) |
FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PAYLD_PG_CL_MASK,
@@ -2014,61 +2689,73 @@ static void fbnic_enable_rcq(struct fbnic_napi_vector *nv,
fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_SIZE, log_size & 0xf);
/* Store interrupt information for the completion queue */
- fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_CTL, nv->v_idx);
- fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_THRESHOLD, rcq->size_mask / 2);
+ fbnic_config_rim_threshold(rcq, nv->v_idx, fbn->rx_max_frames *
+ FBNIC_MIN_RXD_PER_FRAME);
fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_MASK, 0);
/* Enable queue */
fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_CTL, FBNIC_QUEUE_RCQ_CTL_ENABLE);
}
-void fbnic_enable(struct fbnic_net *fbn)
+static void __fbnic_nv_enable(struct fbnic_napi_vector *nv)
{
- struct fbnic_dev *fbd = fbn->fbd;
- struct fbnic_napi_vector *nv;
- int i, j;
+ int j, t;
- list_for_each_entry(nv, &fbn->napis, napis) {
- /* Setup Tx Queue Triads */
- for (i = 0; i < nv->txt_count; i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ /* Setup Tx Queue Triads */
+ for (t = 0; t < nv->txt_count; t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
- fbnic_enable_twq0(&qt->sub0);
- fbnic_enable_tcq(nv, &qt->cmpl);
- }
+ fbnic_enable_twq0(&qt->sub0);
+ fbnic_enable_twq1(&qt->sub1);
+ fbnic_enable_tcq(nv, &qt->cmpl);
+ }
- /* Setup Rx Queue Triads */
- for (j = 0; j < nv->rxt_count; j++, i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ /* Setup Rx Queue Triads */
+ for (j = 0; j < nv->rxt_count; j++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
- fbnic_enable_bdq(&qt->sub0, &qt->sub1);
- fbnic_config_drop_mode_rcq(nv, &qt->cmpl);
- fbnic_enable_rcq(nv, &qt->cmpl);
- }
+ page_pool_enable_direct_recycling(qt->sub0.page_pool,
+ &nv->napi);
+ page_pool_enable_direct_recycling(qt->sub1.page_pool,
+ &nv->napi);
+
+ fbnic_enable_bdq(&qt->sub0, &qt->sub1);
+ fbnic_enable_rcq(nv, &qt->cmpl);
}
+}
- fbnic_wrfl(fbd);
+static void fbnic_nv_enable(struct fbnic_net *fbn, struct fbnic_napi_vector *nv)
+{
+ __fbnic_nv_enable(nv);
+ fbnic_wrfl(fbn->fbd);
}
-static void fbnic_nv_irq_enable(struct fbnic_napi_vector *nv)
+void fbnic_enable(struct fbnic_net *fbn)
{
- struct fbnic_dev *fbd = nv->fbd;
- u32 val;
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i;
- val = FBNIC_INTR_CQ_REARM_INTR_UNMASK;
+ for (i = 0; i < fbn->num_napi; i++)
+ __fbnic_nv_enable(fbn->napi[i]);
- fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(nv->v_idx), val);
+ fbnic_wrfl(fbd);
+}
+
+static void fbnic_nv_irq_enable(struct fbnic_napi_vector *nv)
+{
+ fbnic_config_txrx_usecs(nv, FBNIC_INTR_CQ_REARM_INTR_UNMASK);
}
void fbnic_napi_enable(struct fbnic_net *fbn)
{
u32 irqs[FBNIC_MAX_MSIX_VECS / 32] = {};
struct fbnic_dev *fbd = fbn->fbd;
- struct fbnic_napi_vector *nv;
int i;
- list_for_each_entry(nv, &fbn->napis, napis) {
- napi_enable(&nv->napi);
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+
+ napi_enable_locked(&nv->napi);
fbnic_nv_irq_enable(nv);
@@ -2096,17 +2783,18 @@ void fbnic_napi_depletion_check(struct net_device *netdev)
struct fbnic_net *fbn = netdev_priv(netdev);
u32 irqs[FBNIC_MAX_MSIX_VECS / 32] = {};
struct fbnic_dev *fbd = fbn->fbd;
- struct fbnic_napi_vector *nv;
- int i, j;
+ int i, j, t;
+
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
- list_for_each_entry(nv, &fbn->napis, napis) {
/* Find RQs which are completely out of pages */
- for (i = nv->txt_count, j = 0; j < nv->rxt_count; j++, i++) {
+ for (t = nv->txt_count, j = 0; j < nv->rxt_count; j++, t++) {
/* Assume 4 pages is always enough to fit a packet
* and therefore generate a completion and an IRQ.
*/
- if (fbnic_desc_used(&nv->qt[i].sub0) < 4 ||
- fbnic_desc_used(&nv->qt[i].sub1) < 4)
+ if (fbnic_desc_used(&nv->qt[t].sub0) < 4 ||
+ fbnic_desc_used(&nv->qt[t].sub1) < 4)
irqs[nv->v_idx / 32] |= BIT(nv->v_idx % 32);
}
}
@@ -2120,3 +2808,123 @@ void fbnic_napi_depletion_check(struct net_device *netdev)
fbnic_wrfl(fbd);
}
+
+static int fbnic_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ const struct fbnic_q_triad *real;
+ struct fbnic_q_triad *qt = qmem;
+ struct fbnic_napi_vector *nv;
+
+ if (!netif_running(dev))
+ return fbnic_alloc_qt_page_pools(fbn, qt, idx);
+
+ real = container_of(fbn->rx[idx], struct fbnic_q_triad, cmpl);
+ nv = fbn->napi[idx % fbn->num_napi];
+
+ fbnic_ring_init(&qt->sub0, real->sub0.doorbell, real->sub0.q_idx,
+ real->sub0.flags);
+ fbnic_ring_init(&qt->sub1, real->sub1.doorbell, real->sub1.q_idx,
+ real->sub1.flags);
+ fbnic_ring_init(&qt->cmpl, real->cmpl.doorbell, real->cmpl.q_idx,
+ real->cmpl.flags);
+
+ return fbnic_alloc_rx_qt_resources(fbn, nv, qt);
+}
+
+static void fbnic_queue_mem_free(struct net_device *dev, void *qmem)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ struct fbnic_q_triad *qt = qmem;
+
+ if (!netif_running(dev))
+ fbnic_free_qt_page_pools(qt);
+ else
+ fbnic_free_qt_resources(fbn, qt);
+}
+
+static void __fbnic_nv_restart(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i;
+
+ fbnic_nv_enable(fbn, nv);
+ fbnic_nv_fill(nv);
+
+ napi_enable_locked(&nv->napi);
+ fbnic_nv_irq_enable(nv);
+ fbnic_wr32(fbd, FBNIC_INTR_SET(nv->v_idx / 32), BIT(nv->v_idx % 32));
+ fbnic_wrfl(fbd);
+
+ for (i = 0; i < nv->txt_count; i++)
+ netif_wake_subqueue(fbn->netdev, nv->qt[i].sub0.q_idx);
+}
+
+static int fbnic_queue_start(struct net_device *dev, void *qmem, int idx)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ struct fbnic_napi_vector *nv;
+ struct fbnic_q_triad *real;
+
+ real = container_of(fbn->rx[idx], struct fbnic_q_triad, cmpl);
+ nv = fbn->napi[idx % fbn->num_napi];
+
+ fbnic_aggregate_ring_bdq_counters(fbn, &real->sub0);
+ fbnic_aggregate_ring_bdq_counters(fbn, &real->sub1);
+ fbnic_aggregate_ring_rx_counters(fbn, &real->cmpl);
+
+ memcpy(real, qmem, sizeof(*real));
+
+ __fbnic_nv_restart(fbn, nv);
+
+ return 0;
+}
+
+static int fbnic_queue_stop(struct net_device *dev, void *qmem, int idx)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ const struct fbnic_q_triad *real;
+ struct fbnic_napi_vector *nv;
+ int i, t;
+ int err;
+
+ real = container_of(fbn->rx[idx], struct fbnic_q_triad, cmpl);
+ nv = fbn->napi[idx % fbn->num_napi];
+
+ napi_disable_locked(&nv->napi);
+ fbnic_nv_irq_disable(nv);
+
+ for (i = 0; i < nv->txt_count; i++)
+ netif_stop_subqueue(dev, nv->qt[i].sub0.q_idx);
+ fbnic_nv_disable(fbn, nv);
+
+ for (t = 0; t < nv->txt_count + nv->rxt_count; t++) {
+ err = fbnic_wait_queue_idle(fbn, t >= nv->txt_count,
+ nv->qt[t].sub0.q_idx);
+ if (err)
+ goto err_restart;
+ }
+
+ fbnic_synchronize_irq(fbn->fbd, nv->v_idx);
+ fbnic_nv_flush(nv);
+
+ page_pool_disable_direct_recycling(real->sub0.page_pool);
+ page_pool_disable_direct_recycling(real->sub1.page_pool);
+
+ memcpy(qmem, real, sizeof(*real));
+
+ return 0;
+
+err_restart:
+ __fbnic_nv_restart(fbn, nv);
+ return err;
+}
+
+const struct netdev_queue_mgmt_ops fbnic_queue_mgmt_ops = {
+ .ndo_queue_mem_size = sizeof(struct fbnic_q_triad),
+ .ndo_queue_mem_alloc = fbnic_queue_mem_alloc,
+ .ndo_queue_mem_free = fbnic_queue_mem_free,
+ .ndo_queue_start = fbnic_queue_start,
+ .ndo_queue_stop = fbnic_queue_stop,
+};
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
index 8d626287c3f4..27776e844e29 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
@@ -24,20 +24,37 @@ struct fbnic_net;
#define FBNIC_TX_DESC_WAKEUP (FBNIC_MAX_SKB_DESC * 2)
#define FBNIC_TX_DESC_MIN roundup_pow_of_two(FBNIC_TX_DESC_WAKEUP)
+/* To receive the worst case packet we need:
+ * 1 descriptor for primary metadata
+ * + 1 descriptor for optional metadata
+ * + 1 descriptor for headers
+ * + 4 descriptors for payload
+ */
+#define FBNIC_MAX_RX_PKT_DESC 7
+#define FBNIC_RX_DESC_MIN roundup_pow_of_two(FBNIC_MAX_RX_PKT_DESC * 2)
+
#define FBNIC_MAX_TXQS 128u
#define FBNIC_MAX_RXQS 128u
+#define FBNIC_MAX_XDPQS 128u
+
+/* These apply to TWQs, TCQ, RCQ */
+#define FBNIC_QUEUE_SIZE_MIN 16u
+#define FBNIC_QUEUE_SIZE_MAX SZ_64K
#define FBNIC_TXQ_SIZE_DEFAULT 1024
#define FBNIC_HPQ_SIZE_DEFAULT 256
#define FBNIC_PPQ_SIZE_DEFAULT 256
#define FBNIC_RCQ_SIZE_DEFAULT 1024
+#define FBNIC_TX_USECS_DEFAULT 35
+#define FBNIC_RX_USECS_DEFAULT 30
+#define FBNIC_RX_FRAMES_DEFAULT 0
#define FBNIC_RX_TROOM \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+#define FBNIC_RX_HROOM_PAD 128
#define FBNIC_RX_HROOM \
- (ALIGN(FBNIC_RX_TROOM + NET_SKB_PAD, 128) - FBNIC_RX_TROOM)
+ (ALIGN(FBNIC_RX_TROOM + FBNIC_RX_HROOM_PAD, 128) - FBNIC_RX_TROOM)
#define FBNIC_RX_PAD 0
-#define FBNIC_RX_MAX_HDR (1536 - FBNIC_RX_PAD)
#define FBNIC_RX_PAYLD_OFFSET 0
#define FBNIC_RX_PAYLD_PG_CL 0
@@ -45,29 +62,48 @@ struct fbnic_net;
#define FBNIC_RING_F_CTX BIT(1)
#define FBNIC_RING_F_STATS BIT(2) /* Ring's stats may be used */
+#define FBNIC_HDS_THRESH_MAX \
+ (4096 - FBNIC_RX_HROOM - FBNIC_RX_TROOM - FBNIC_RX_PAD)
+#define FBNIC_HDS_THRESH_DEFAULT \
+ (1536 - FBNIC_RX_PAD)
+#define FBNIC_HDR_BYTES_MIN 128
+
struct fbnic_pkt_buff {
struct xdp_buff buff;
ktime_t hwtstamp;
- u32 data_truesize;
- u16 data_len;
- u16 nr_frags;
+ bool add_frag_failed;
};
struct fbnic_queue_stats {
u64 packets;
u64 bytes;
+ union {
+ struct {
+ u64 csum_partial;
+ u64 lso;
+ u64 ts_packets;
+ u64 ts_lost;
+ u64 stop;
+ u64 wake;
+ } twq;
+ struct {
+ u64 alloc_failed;
+ u64 csum_complete;
+ u64 csum_none;
+ u64 length_errors;
+ } rx;
+ struct {
+ u64 alloc_failed;
+ } bdq;
+ };
u64 dropped;
- u64 ts_packets;
- u64 ts_lost;
struct u64_stats_sync syncp;
};
-/* Pagecnt bias is long max to reserve the last bit to catch overflow
- * cases where if we overcharge the bias it will flip over to be negative.
- */
-#define PAGECNT_BIAS_MAX LONG_MAX
+#define FBNIC_PAGECNT_BIAS_MAX PAGE_SIZE
+
struct fbnic_rx_buf {
- struct page *page;
+ netmem_ref netmem;
long pagecnt_bias;
};
@@ -88,6 +124,17 @@ struct fbnic_ring {
u32 head, tail; /* Head/Tail of ring */
+ union {
+ /* Rx BDQs only */
+ struct page_pool *page_pool;
+
+ /* Deferred_head is used to cache the head for TWQ1 if
+ * an attempt is made to clean TWQ1 with zero napi_budget.
+ * We do not use it for any other ring.
+ */
+ s32 deferred_head;
+ };
+
struct fbnic_queue_stats stats;
/* Slow path fields follow */
@@ -97,38 +144,47 @@ struct fbnic_ring {
struct fbnic_q_triad {
struct fbnic_ring sub0, sub1, cmpl;
+ struct xdp_rxq_info xdp_rxq;
};
struct fbnic_napi_vector {
struct napi_struct napi;
struct device *dev; /* Device for DMA unmapping */
- struct page_pool *page_pool;
struct fbnic_dev *fbd;
- char name[IFNAMSIZ + 9];
u16 v_idx;
u8 txt_count;
u8 rxt_count;
- struct list_head napis;
-
struct fbnic_q_triad qt[];
};
-#define FBNIC_MAX_TXQS 128u
-#define FBNIC_MAX_RXQS 128u
+extern const struct netdev_queue_mgmt_ops fbnic_queue_mgmt_ops;
netdev_tx_t fbnic_xmit_frame(struct sk_buff *skb, struct net_device *dev);
netdev_features_t
fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t features);
+void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr);
+void fbnic_aggregate_ring_bdq_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr);
+void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *txr);
+void fbnic_aggregate_ring_xdp_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *xdpr);
+
int fbnic_alloc_napi_vectors(struct fbnic_net *fbn);
void fbnic_free_napi_vectors(struct fbnic_net *fbn);
int fbnic_alloc_resources(struct fbnic_net *fbn);
void fbnic_free_resources(struct fbnic_net *fbn);
+int fbnic_set_netif_queues(struct fbnic_net *fbn);
+void fbnic_reset_netif_queues(struct fbnic_net *fbn);
+irqreturn_t fbnic_msix_clean_rings(int irq, void *data);
void fbnic_napi_enable(struct fbnic_net *fbn);
void fbnic_napi_disable(struct fbnic_net *fbn);
+void fbnic_config_drop_mode(struct fbnic_net *fbn, bool tx_pause);
void fbnic_enable(struct fbnic_net *fbn);
void fbnic_disable(struct fbnic_net *fbn);
void fbnic_flush(struct fbnic_net *fbn);
@@ -137,4 +193,9 @@ void fbnic_fill(struct fbnic_net *fbn);
void fbnic_napi_depletion_check(struct net_device *netdev);
int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail);
+static inline int fbnic_napi_idx(const struct fbnic_napi_vector *nv)
+{
+ return nv->v_idx - FBNIC_NON_NAPI_VECTORS;
+}
+
#endif /* _FBNIC_TXRX_H_ */
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index c7b0b09c2b09..541c41a9077a 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -335,7 +335,7 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter)
/* When running in DMA Mode the RX interrupt is not enabled in
timberdale because RX data is received by DMA callbacks
it must still be enabled in the KS8842 because it indicates
- to timberdale when there is RX data for it's DMA FIFOs */
+ to timberdale when there is RX data for its DMA FIFOs */
iowrite16(ENABLED_IRQS_DMA_IP, adapter->hw_addr + REG_TIMB_IER);
ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
} else {
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 3062cc0f9199..c862b13b447a 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -20,8 +20,6 @@
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
#include <linux/of_net.h>
#include "ks8851.h"
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index dc1d9f774565..cdde19b8edc4 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -3951,7 +3951,7 @@ static void ksz_stop_timer(struct ksz_timer_info *info)
{
if (info->max) {
info->max = 0;
- del_timer_sync(&info->timer);
+ timer_delete_sync(&info->timer);
}
}
@@ -6304,7 +6304,8 @@ static void mib_read_work(struct work_struct *work)
static void mib_monitor(struct timer_list *t)
{
- struct dev_info *hw_priv = from_timer(hw_priv, t, mib_timer_info.timer);
+ struct dev_info *hw_priv = timer_container_of(hw_priv, t,
+ mib_timer_info.timer);
mib_read_work(&hw_priv->mib_read);
@@ -6331,7 +6332,8 @@ static void mib_monitor(struct timer_list *t)
*/
static void dev_monitor(struct timer_list *t)
{
- struct dev_priv *priv = from_timer(priv, t, monitor_timer_info.timer);
+ struct dev_priv *priv = timer_container_of(priv, t,
+ monitor_timer_info.timer);
struct net_device *dev = priv->mii_if.dev;
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index 73832fb2bc32..ee046468652c 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -59,7 +59,6 @@ config LAN743X
source "drivers/net/ethernet/microchip/lan865x/Kconfig"
source "drivers/net/ethernet/microchip/lan966x/Kconfig"
-source "drivers/net/ethernet/microchip/lan969x/Kconfig"
source "drivers/net/ethernet/microchip/sparx5/Kconfig"
source "drivers/net/ethernet/microchip/vcap/Kconfig"
source "drivers/net/ethernet/microchip/fdma/Kconfig"
diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile
index 7770df82200f..3c65baed9fd8 100644
--- a/drivers/net/ethernet/microchip/Makefile
+++ b/drivers/net/ethernet/microchip/Makefile
@@ -11,7 +11,6 @@ lan743x-objs := lan743x_main.o lan743x_ethtool.o lan743x_ptp.o
obj-$(CONFIG_LAN865X) += lan865x/
obj-$(CONFIG_LAN966X_SWITCH) += lan966x/
-obj-$(CONFIG_LAN969X_SWITCH) += lan969x/
obj-$(CONFIG_SPARX5_SWITCH) += sparx5/
obj-$(CONFIG_VCAP) += vcap/
obj-$(CONFIG_FDMA) += fdma/
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 1a1cbd034eda..40002d9fe274 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -18,6 +18,8 @@
#define EEPROM_MAC_OFFSET (0x01)
#define MAX_EEPROM_SIZE (512)
#define MAX_OTP_SIZE (1024)
+#define MAX_HS_OTP_SIZE (8 * 1024)
+#define MAX_HS_EEPROM_SIZE (64 * 1024)
#define OTP_INDICATOR_1 (0xF3)
#define OTP_INDICATOR_2 (0xF7)
@@ -272,6 +274,9 @@ static int lan743x_hs_otp_read(struct lan743x_adapter *adapter, u32 offset,
int ret;
int i;
+ if (offset + length > MAX_HS_OTP_SIZE)
+ return -EINVAL;
+
ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
if (ret < 0)
return ret;
@@ -320,6 +325,9 @@ static int lan743x_hs_otp_write(struct lan743x_adapter *adapter, u32 offset,
int ret;
int i;
+ if (offset + length > MAX_HS_OTP_SIZE)
+ return -EINVAL;
+
ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
if (ret < 0)
return ret;
@@ -497,6 +505,9 @@ static int lan743x_hs_eeprom_read(struct lan743x_adapter *adapter,
u32 val;
int i;
+ if (offset + length > MAX_HS_EEPROM_SIZE)
+ return -EINVAL;
+
retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
if (retval < 0)
return retval;
@@ -539,6 +550,9 @@ static int lan743x_hs_eeprom_write(struct lan743x_adapter *adapter,
u32 val;
int i;
+ if (offset + length > MAX_HS_EEPROM_SIZE)
+ return -EINVAL;
+
retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
if (retval < 0)
return retval;
@@ -604,9 +618,9 @@ static int lan743x_ethtool_get_eeprom_len(struct net_device *netdev)
struct lan743x_adapter *adapter = netdev_priv(netdev);
if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP)
- return MAX_OTP_SIZE;
+ return adapter->is_pci11x1x ? MAX_HS_OTP_SIZE : MAX_OTP_SIZE;
- return MAX_EEPROM_SIZE;
+ return adapter->is_pci11x1x ? MAX_HS_EEPROM_SIZE : MAX_EEPROM_SIZE;
}
static int lan743x_ethtool_get_eeprom(struct net_device *netdev,
@@ -899,23 +913,29 @@ static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset)
}
}
+static int lan743x_ethtool_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *fields)
+{
+ fields->data = 0;
+
+ switch (fields->flow_type) {
+ case TCP_V4_FLOW:case UDP_V4_FLOW:
+ case TCP_V6_FLOW:case UDP_V6_FLOW:
+ fields->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case IPV4_FLOW: case IPV6_FLOW:
+ fields->data |= RXH_IP_SRC | RXH_IP_DST;
+ return 0;
+ }
+
+ return 0;
+}
+
static int lan743x_ethtool_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *rxnfc,
u32 *rule_locs)
{
switch (rxnfc->cmd) {
- case ETHTOOL_GRXFH:
- rxnfc->data = 0;
- switch (rxnfc->flow_type) {
- case TCP_V4_FLOW:case UDP_V4_FLOW:
- case TCP_V6_FLOW:case UDP_V6_FLOW:
- rxnfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- fallthrough;
- case IPV4_FLOW: case IPV6_FLOW:
- rxnfc->data |= RXH_IP_SRC | RXH_IP_DST;
- return 0;
- }
- break;
case ETHTOOL_GRXRINGS:
rxnfc->data = LAN743X_USED_RX_CHANNELS;
return 0;
@@ -1055,9 +1075,6 @@ static int lan743x_ethtool_get_eee(struct net_device *netdev,
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
- eee->tx_lpi_timer = lan743x_csr_read(adapter,
- MAC_EEE_TX_LPI_REQ_DLY_CNT);
-
return phylink_ethtool_get_eee(adapter->phylink, eee);
}
@@ -1065,24 +1082,6 @@ static int lan743x_ethtool_set_eee(struct net_device *netdev,
struct ethtool_keee *eee)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
- u32 tx_lpi_timer;
-
- tx_lpi_timer = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT);
- if (tx_lpi_timer != eee->tx_lpi_timer) {
- u32 mac_cr = lan743x_csr_read(adapter, MAC_CR);
-
- /* Software should only change this field when Energy Efficient
- * Ethernet Enable (EEEEN) is cleared.
- * This function will trigger an autonegotiation restart and
- * eee will be reenabled during link up if eee was negotiated.
- */
- lan743x_mac_eee_enable(adapter, false);
- lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT,
- eee->tx_lpi_timer);
-
- if (mac_cr & MAC_CR_EEE_EN_)
- lan743x_mac_eee_enable(adapter, true);
- }
return phylink_ethtool_set_eee(adapter->phylink, eee);
}
@@ -1375,6 +1374,7 @@ const struct ethtool_ops lan743x_ethtool_ops = {
.get_rxfh_indir_size = lan743x_ethtool_get_rxfh_indir_size,
.get_rxfh = lan743x_ethtool_get_rxfh,
.set_rxfh = lan743x_ethtool_set_rxfh,
+ .get_rxfh_fields = lan743x_ethtool_get_rxfh_fields,
.get_ts_info = lan743x_ethtool_get_ts_info,
.get_eee = lan743x_ethtool_get_eee,
.set_eee = lan743x_ethtool_set_eee,
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 4dc5adcda6a3..e4c542fc6c2b 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1330,7 +1330,7 @@ static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu)
}
/* PHY */
-static int lan743x_phy_reset(struct lan743x_adapter *adapter)
+static int lan743x_hw_reset_phy(struct lan743x_adapter *adapter)
{
u32 data;
@@ -1346,11 +1346,6 @@ static int lan743x_phy_reset(struct lan743x_adapter *adapter)
50000, 1000000);
}
-static int lan743x_phy_init(struct lan743x_adapter *adapter)
-{
- return lan743x_phy_reset(adapter);
-}
-
static void lan743x_phy_interface_select(struct lan743x_adapter *adapter)
{
u32 id_rev;
@@ -1729,6 +1724,7 @@ int lan743x_rx_set_tstamp_mode(struct lan743x_adapter *adapter,
default:
return -ERANGE;
}
+ adapter->rx_tstamp_filter = rx_filter;
return 0;
}
@@ -1815,6 +1811,7 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
if (nr_frags <= 0) {
tx->frame_data0 |= TX_DESC_DATA0_LS_;
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+ tx->frame_last = tx->frame_first;
}
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
@@ -1884,6 +1881,7 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
tx->frame_first = 0;
tx->frame_data0 = 0;
tx->frame_tail = 0;
+ tx->frame_last = 0;
return -ENOMEM;
}
@@ -1924,16 +1922,18 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx,
TX_DESC_DATA0_DTYPE_DATA_) {
tx->frame_data0 |= TX_DESC_DATA0_LS_;
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+ tx->frame_last = tx->frame_tail;
}
- tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
- buffer_info = &tx->buffer_info[tx->frame_tail];
+ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_last];
+ buffer_info = &tx->buffer_info[tx->frame_last];
buffer_info->skb = skb;
if (time_stamp)
buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED;
if (ignore_sync)
buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
+ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
tx->last_tail = tx->frame_tail;
@@ -2495,8 +2495,7 @@ static int lan743x_rx_process_buffer(struct lan743x_rx *rx)
/* save existing skb, allocate new skb and map to dma */
skb = buffer_info->skb;
- if (lan743x_rx_init_ring_element(rx, rx->last_head,
- GFP_ATOMIC | GFP_DMA)) {
+ if (lan743x_rx_init_ring_element(rx, rx->last_head, GFP_ATOMIC)) {
/* failed to allocate next skb.
* Memory is very low.
* Drop this packet and reuse buffer.
@@ -2966,7 +2965,7 @@ static int lan743x_phylink_2500basex_config(struct lan743x_adapter *adapter)
return lan743x_pcs_power_reset(adapter);
}
-void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable)
+static void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable)
{
u32 mac_cr;
@@ -3027,10 +3026,8 @@ static void lan743x_phylink_mac_link_down(struct phylink_config *config,
phy_interface_t interface)
{
struct net_device *netdev = to_net_dev(config->dev);
- struct lan743x_adapter *adapter = netdev_priv(netdev);
- netif_tx_stop_all_queues(to_net_dev(config->dev));
- lan743x_mac_eee_enable(adapter, false);
+ netif_tx_stop_all_queues(netdev);
}
static void lan743x_phylink_mac_link_up(struct phylink_config *config,
@@ -3072,16 +3069,40 @@ static void lan743x_phylink_mac_link_up(struct phylink_config *config,
cap & FLOW_CTRL_TX,
cap & FLOW_CTRL_RX);
- if (phydev)
- lan743x_mac_eee_enable(adapter, phydev->enable_tx_lpi);
-
netif_tx_wake_all_queues(netdev);
}
+static void lan743x_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ lan743x_mac_eee_enable(adapter, false);
+}
+
+static int lan743x_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ /* Software should only change this field when Energy Efficient
+ * Ethernet Enable (EEEEN) is cleared. We ensure that by clearing
+ * EEEEN during probe, and phylink itself guarantees that
+ * mac_disable_tx_lpi() will have been previously called.
+ */
+ lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT, timer);
+ lan743x_mac_eee_enable(adapter, true);
+
+ return 0;
+}
+
static const struct phylink_mac_ops lan743x_phylink_mac_ops = {
.mac_config = lan743x_phylink_mac_config,
.mac_link_down = lan743x_phylink_mac_link_down,
.mac_link_up = lan743x_phylink_mac_link_up,
+ .mac_disable_tx_lpi = lan743x_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = lan743x_mac_enable_tx_lpi,
};
static int lan743x_phylink_create(struct lan743x_adapter *adapter)
@@ -3095,6 +3116,9 @@ static int lan743x_phylink_create(struct lan743x_adapter *adapter)
adapter->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
+ adapter->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD;
+ adapter->phylink_config.lpi_timer_default =
+ lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT);
lan743x_phy_interface_select(adapter);
@@ -3120,6 +3144,10 @@ static int lan743x_phylink_create(struct lan743x_adapter *adapter)
phy_interface_set_rgmii(adapter->phylink_config.supported_interfaces);
}
+ memcpy(adapter->phylink_config.lpi_interfaces,
+ adapter->phylink_config.supported_interfaces,
+ sizeof(adapter->phylink_config.lpi_interfaces));
+
pl = phylink_create(&adapter->phylink_config, NULL,
adapter->phy_interface, &lan743x_phylink_mac_ops);
@@ -3319,8 +3347,6 @@ static int lan743x_netdev_ioctl(struct net_device *netdev,
if (!netif_running(netdev))
return -EINVAL;
- if (cmd == SIOCSHWTSTAMP)
- return lan743x_ptp_ioctl(netdev, ifr, cmd);
return phylink_mii_ioctl(adapter->phylink, ifr, cmd);
}
@@ -3415,6 +3441,8 @@ static const struct net_device_ops lan743x_netdev_ops = {
.ndo_change_mtu = lan743x_netdev_change_mtu,
.ndo_get_stats64 = lan743x_netdev_get_stats64,
.ndo_set_mac_address = lan743x_netdev_set_mac_address,
+ .ndo_hwtstamp_get = lan743x_ptp_hwtstamp_get,
+ .ndo_hwtstamp_set = lan743x_ptp_hwtstamp_set,
};
static void lan743x_hardware_cleanup(struct lan743x_adapter *adapter)
@@ -3462,6 +3490,7 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
struct pci_dev *pdev)
{
struct lan743x_tx *tx;
+ u32 sgmii_ctl;
int index;
int ret;
@@ -3474,6 +3503,15 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
spin_lock_init(&adapter->eth_syslock_spinlock);
mutex_init(&adapter->sgmii_rw_lock);
pci11x1x_set_rfe_rd_fifo_threshold(adapter);
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ if (adapter->is_sgmii_en) {
+ sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_;
+ sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_;
+ } else {
+ sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_;
+ sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_;
+ }
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
} else {
adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS;
adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS;
@@ -3491,10 +3529,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
if (ret)
return ret;
- ret = lan743x_phy_init(adapter);
- if (ret)
- return ret;
-
ret = lan743x_ptp_init(adapter);
if (ret)
return ret;
@@ -3517,12 +3551,14 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
spin_lock_init(&tx->ring_lock);
}
+ /* Ensure EEEEN is clear */
+ lan743x_mac_eee_enable(adapter, false);
+
return 0;
}
static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
{
- u32 sgmii_ctl;
int ret;
adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
@@ -3534,10 +3570,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
adapter->mdiobus->priv = (void *)adapter;
if (adapter->is_pci11x1x) {
if (adapter->is_sgmii_en) {
- sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
- sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_;
- sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_;
- lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
netif_dbg(adapter, drv, adapter->netdev,
"SGMII operation\n");
adapter->mdiobus->read = lan743x_mdiobus_read_c22;
@@ -3548,10 +3580,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
netif_dbg(adapter, drv, adapter->netdev,
"lan743x-mdiobus-c45\n");
} else {
- sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
- sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_;
- sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_;
- lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
netif_dbg(adapter, drv, adapter->netdev,
"RGMII operation\n");
// Only C22 support when RGMII I/F
@@ -3637,6 +3665,10 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev,
if (ret)
goto cleanup_pci;
+ ret = lan743x_hw_reset_phy(adapter);
+ if (ret)
+ goto cleanup_pci;
+
ret = lan743x_hardware_init(adapter, pdev);
if (ret)
goto cleanup_pci;
@@ -3883,7 +3915,6 @@ static int lan743x_pm_resume(struct device *dev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- pci_save_state(pdev);
/* Restore HW_CFG that was saved during pm suspend */
if (adapter->is_pci11x1x)
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 8ef897c114d3..02a28b709163 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -980,6 +980,7 @@ struct lan743x_tx {
u32 frame_first;
u32 frame_data0;
u32 frame_tail;
+ u32 frame_last;
struct lan743x_tx_buffer_info *buffer_info;
@@ -1086,6 +1087,7 @@ struct lan743x_adapter {
phy_interface_t phy_interface;
struct phylink *phylink;
struct phylink_config phylink_config;
+ int rx_tstamp_filter;
};
#define LAN743X_COMPONENT_FLAG_RX(channel) BIT(20 + (channel))
@@ -1206,6 +1208,5 @@ void lan743x_hs_syslock_release(struct lan743x_adapter *adapter);
void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter,
bool tx_enable, bool rx_enable);
int lan743x_sgmii_read(struct lan743x_adapter *adapter, u8 mmd, u16 addr);
-void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable);
#endif /* _LAN743X_H */
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 4a777b449ecd..a3b48388b3fd 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -463,10 +463,6 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
struct lan743x_ptp_perout *perout = &ptp->perout[index];
int ret = 0;
- /* Reject requests with unsupported flags */
- if (perout_request->flags & ~PTP_PEROUT_DUTY_CYCLE)
- return -EOPNOTSUPP;
-
if (on) {
perout_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT,
perout_request->index);
@@ -1537,6 +1533,10 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter)
ptp->ptp_clock_info.n_per_out = LAN743X_PTP_N_EVENT_CHAN;
ptp->ptp_clock_info.n_pins = n_pins;
ptp->ptp_clock_info.pps = LAN743X_PTP_N_PPS;
+ ptp->ptp_clock_info.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
+ ptp->ptp_clock_info.supported_perout_flags = PTP_PEROUT_DUTY_CYCLE;
ptp->ptp_clock_info.pin_config = ptp->pin_config;
ptp->ptp_clock_info.adjfine = lan743x_ptpci_adjfine;
ptp->ptp_clock_info.adjtime = lan743x_ptpci_adjtime;
@@ -1736,23 +1736,32 @@ void lan743x_ptp_tx_timestamp_skb(struct lan743x_adapter *adapter,
lan743x_ptp_tx_ts_complete(adapter);
}
-int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+int lan743x_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
- struct hwtstamp_config config;
- int ret = 0;
- int index;
+ struct lan743x_tx *tx = &adapter->tx[0];
- if (!ifr) {
- netif_err(adapter, drv, adapter->netdev,
- "SIOCSHWTSTAMP, ifr == NULL\n");
- return -EINVAL;
- }
+ if (tx->ts_flags & TX_TS_FLAG_ONE_STEP_SYNC)
+ config->tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
+ else if (tx->ts_flags & TX_TS_FLAG_TIMESTAMPING_ENABLED)
+ config->tx_type = HWTSTAMP_TX_ON;
+ else
+ config->tx_type = HWTSTAMP_TX_OFF;
+
+ config->rx_filter = adapter->rx_tstamp_filter;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ return 0;
+}
- switch (config.tx_type) {
+int lan743x_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int index;
+
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
for (index = 0; index < adapter->used_tx_channels;
index++)
@@ -1776,19 +1785,12 @@ int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
lan743x_ptp_set_sync_ts_insert(adapter, true);
break;
case HWTSTAMP_TX_ONESTEP_P2P:
- ret = -ERANGE;
- break;
+ return -ERANGE;
default:
netif_warn(adapter, drv, adapter->netdev,
- " tx_type = %d, UNKNOWN\n", config.tx_type);
- ret = -EINVAL;
- break;
+ " tx_type = %d, UNKNOWN\n", config->tx_type);
+ return -EINVAL;
}
- ret = lan743x_rx_set_tstamp_mode(adapter, config.rx_filter);
-
- if (!ret)
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
- return ret;
+ return lan743x_rx_set_tstamp_mode(adapter, config->rx_filter);
}
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.h b/drivers/net/ethernet/microchip/lan743x_ptp.h
index 0d29914cd460..f33dc83c5700 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.h
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.h
@@ -18,9 +18,9 @@
*/
#define LAN743X_PTP_N_EVENT_CHAN 2
#define LAN743X_PTP_N_PEROUT LAN743X_PTP_N_EVENT_CHAN
-#define LAN743X_PTP_N_EXTTS 4
-#define LAN743X_PTP_N_PPS 0
#define PCI11X1X_PTP_IO_MAX_CHANNELS 8
+#define LAN743X_PTP_N_EXTTS PCI11X1X_PTP_IO_MAX_CHANNELS
+#define LAN743X_PTP_N_PPS 0
#define PTP_CMD_CTL_TIMEOUT_CNT 50
struct lan743x_adapter;
@@ -51,8 +51,11 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter);
void lan743x_ptp_close(struct lan743x_adapter *adapter);
void lan743x_ptp_update_latency(struct lan743x_adapter *adapter,
u32 link_speed);
-
-int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+int lan743x_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int lan743x_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
#define LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS (4)
diff --git a/drivers/net/ethernet/microchip/lan865x/lan865x.c b/drivers/net/ethernet/microchip/lan865x/lan865x.c
index dd436bdff0f8..0277d9737369 100644
--- a/drivers/net/ethernet/microchip/lan865x/lan865x.c
+++ b/drivers/net/ethernet/microchip/lan865x/lan865x.c
@@ -32,6 +32,10 @@
/* MAC Specific Addr 1 Top Reg */
#define LAN865X_REG_MAC_H_SADDR1 0x00010023
+/* MAC TSU Timer Increment Register */
+#define LAN865X_REG_MAC_TSU_TIMER_INCR 0x00010077
+#define MAC_TSU_TIMER_INCR_COUNT_NANOSECONDS 0x0028
+
struct lan865x_priv {
struct work_struct multicast_work;
struct net_device *netdev;
@@ -311,6 +315,8 @@ static int lan865x_net_open(struct net_device *netdev)
phy_start(netdev->phydev);
+ netif_start_queue(netdev);
+
return 0;
}
@@ -320,6 +326,8 @@ static const struct net_device_ops lan865x_netdev_ops = {
.ndo_start_xmit = lan865x_send_packet,
.ndo_set_rx_mode = lan865x_set_multicast_list,
.ndo_set_mac_address = lan865x_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
};
static int lan865x_probe(struct spi_device *spi)
@@ -344,6 +352,21 @@ static int lan865x_probe(struct spi_device *spi)
goto free_netdev;
}
+ /* LAN865x Rev.B0/B1 configuration parameters from AN1760
+ * As per the Configuration Application Note AN1760 published in the
+ * link, https://www.microchip.com/en-us/application-notes/an1760
+ * Revision F (DS60001760G - June 2024), configure the MAC to set time
+ * stamping at the end of the Start of Frame Delimiter (SFD) and set the
+ * Timer Increment reg to 40 ns to be used as a 25 MHz internal clock.
+ */
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_TSU_TIMER_INCR,
+ MAC_TSU_TIMER_INCR_COUNT_NANOSECONDS);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to config TSU Timer Incr reg: %d\n",
+ ret);
+ goto oa_tc6_exit;
+ }
+
/* As per the point s3 in the below errata, SPI receive Ethernet frame
* transfer may halt when starting the next frame in the same data block
* (chunk) as the end of a previous frame. The RFA field should be
@@ -402,13 +425,16 @@ static void lan865x_remove(struct spi_device *spi)
free_netdev(priv->netdev);
}
-static const struct spi_device_id spidev_spi_ids[] = {
+static const struct spi_device_id lan865x_ids[] = {
{ .name = "lan8650" },
+ { .name = "lan8651" },
{},
};
+MODULE_DEVICE_TABLE(spi, lan865x_ids);
static const struct of_device_id lan865x_dt_ids[] = {
{ .compatible = "microchip,lan8650" },
+ { .compatible = "microchip,lan8651" },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, lan865x_dt_ids);
@@ -420,7 +446,7 @@ static struct spi_driver lan865x_driver = {
},
.probe = lan865x_probe,
.remove = lan865x_remove,
- .id_table = spidev_spi_ids,
+ .id_table = lan865x_ids,
};
module_spi_driver(lan865x_driver);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
index 2474dfd330f4..fe4e61405284 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
@@ -294,7 +294,7 @@ static void lan966x_stats_update(struct lan966x *lan966x)
{
int i, j;
- mutex_lock(&lan966x->stats_lock);
+ spin_lock(&lan966x->stats_lock);
for (i = 0; i < lan966x->num_phys_ports; i++) {
uint idx = i * lan966x->num_stats;
@@ -310,7 +310,7 @@ static void lan966x_stats_update(struct lan966x *lan966x)
}
}
- mutex_unlock(&lan966x->stats_lock);
+ spin_unlock(&lan966x->stats_lock);
}
static int lan966x_get_sset_count(struct net_device *dev, int sset)
@@ -365,7 +365,7 @@ static void lan966x_get_eth_mac_stats(struct net_device *dev,
idx = port->chip_port * lan966x->num_stats;
- mutex_lock(&lan966x->stats_lock);
+ spin_lock(&lan966x->stats_lock);
mac_stats->FramesTransmittedOK =
lan966x->stats[idx + SYS_COUNT_TX_UC] +
@@ -416,7 +416,7 @@ static void lan966x_get_eth_mac_stats(struct net_device *dev,
lan966x->stats[idx + SYS_COUNT_RX_LONG] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG];
- mutex_unlock(&lan966x->stats_lock);
+ spin_unlock(&lan966x->stats_lock);
}
static const struct ethtool_rmon_hist_range lan966x_rmon_ranges[] = {
@@ -442,7 +442,7 @@ static void lan966x_get_eth_rmon_stats(struct net_device *dev,
idx = port->chip_port * lan966x->num_stats;
- mutex_lock(&lan966x->stats_lock);
+ spin_lock(&lan966x->stats_lock);
rmon_stats->undersize_pkts =
lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
@@ -500,7 +500,7 @@ static void lan966x_get_eth_rmon_stats(struct net_device *dev,
lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526];
- mutex_unlock(&lan966x->stats_lock);
+ spin_unlock(&lan966x->stats_lock);
*ranges = lan966x_rmon_ranges;
}
@@ -603,7 +603,7 @@ void lan966x_stats_get(struct net_device *dev,
idx = port->chip_port * lan966x->num_stats;
- mutex_lock(&lan966x->stats_lock);
+ spin_lock(&lan966x->stats_lock);
stats->rx_bytes = lan966x->stats[idx + SYS_COUNT_RX_OCT] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_OCT];
@@ -685,7 +685,7 @@ void lan966x_stats_get(struct net_device *dev,
stats->collisions = lan966x->stats[idx + SYS_COUNT_TX_COL];
- mutex_unlock(&lan966x->stats_lock);
+ spin_unlock(&lan966x->stats_lock);
}
int lan966x_stats_init(struct lan966x *lan966x)
@@ -701,7 +701,7 @@ int lan966x_stats_init(struct lan966x *lan966x)
return -ENOMEM;
/* Init stats worker */
- mutex_init(&lan966x->stats_lock);
+ spin_lock_init(&lan966x->stats_lock);
snprintf(queue_name, sizeof(queue_name), "%s-stats",
dev_name(lan966x->dev));
lan966x->stats_queue = create_singlethread_workqueue(queue_name);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 3234a960fcc3..47752d3fde0b 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -353,6 +353,11 @@ static void lan966x_ifh_set_rew_op(void *ifh, u64 rew_op)
lan966x_ifh_set(ifh, rew_op, IFH_POS_REW_CMD, IFH_WID_REW_CMD);
}
+static void lan966x_ifh_set_oam_type(void *ifh, u64 oam_type)
+{
+ lan966x_ifh_set(ifh, oam_type, IFH_POS_PDU_TYPE, IFH_WID_PDU_TYPE);
+}
+
static void lan966x_ifh_set_timestamp(void *ifh, u64 timestamp)
{
lan966x_ifh_set(ifh, timestamp, IFH_POS_TIMESTAMP, IFH_WID_TIMESTAMP);
@@ -380,6 +385,7 @@ static netdev_tx_t lan966x_port_xmit(struct sk_buff *skb,
return err;
lan966x_ifh_set_rew_op(ifh, LAN966X_SKB_CB(skb)->rew_op);
+ lan966x_ifh_set_oam_type(ifh, LAN966X_SKB_CB(skb)->pdu_type);
lan966x_ifh_set_timestamp(ifh, LAN966X_SKB_CB(skb)->ts_id);
}
@@ -828,7 +834,6 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
port->phylink_config.type = PHYLINK_NETDEV;
port->phylink_pcs.poll = true;
port->phylink_pcs.ops = &lan966x_phylink_pcs_ops;
- port->phylink_pcs.neg_mode = true;
port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD;
@@ -874,6 +879,7 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
lan966x_vlan_port_set_vlan_aware(port, 0);
lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
lan966x_vlan_port_apply(port);
+ lan966x_vlan_port_rew_host(port);
return 0;
}
@@ -1255,7 +1261,6 @@ cleanup_ports:
cancel_delayed_work_sync(&lan966x->stats_work);
destroy_workqueue(lan966x->stats_queue);
- mutex_destroy(&lan966x->stats_lock);
debugfs_remove_recursive(lan966x->debugfs_root);
@@ -1273,7 +1278,6 @@ static void lan966x_remove(struct platform_device *pdev)
cancel_delayed_work_sync(&lan966x->stats_work);
destroy_workqueue(lan966x->stats_queue);
- mutex_destroy(&lan966x->stats_lock);
lan966x_mac_purge_entries(lan966x);
lan966x_mdb_deinit(lan966x);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index 25cb2f61986f..eea286c29474 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -75,6 +75,10 @@
#define IFH_REW_OP_ONE_STEP_PTP 0x3
#define IFH_REW_OP_TWO_STEP_PTP 0x4
+#define IFH_PDU_TYPE_NONE 0
+#define IFH_PDU_TYPE_IPV4 7
+#define IFH_PDU_TYPE_IPV6 8
+
#define FDMA_RX_DCB_MAX_DBS 1
#define FDMA_TX_DCB_MAX_DBS 1
@@ -254,6 +258,7 @@ struct lan966x_phc {
struct lan966x_skb_cb {
u8 rew_op;
+ u8 pdu_type;
u16 ts_id;
unsigned long jiffies;
};
@@ -290,8 +295,8 @@ struct lan966x {
const struct lan966x_stat_layout *stats_layout;
u32 num_stats;
- /* workqueue for reading stats */
- struct mutex stats_lock;
+ /* lock for reading stats */
+ spinlock_t stats_lock;
u64 *stats;
struct delayed_work stats_work;
struct workqueue_struct *stats_queue;
@@ -443,7 +448,7 @@ int lan966x_stats_init(struct lan966x *lan966x);
void lan966x_port_config_down(struct lan966x_port *port);
void lan966x_port_config_up(struct lan966x_port *port);
-void lan966x_port_status_get(struct lan966x_port *port,
+void lan966x_port_status_get(struct lan966x_port *port, unsigned int neg_mode,
struct phylink_link_state *state);
int lan966x_port_pcs_set(struct lan966x_port *port,
struct lan966x_port_config *config);
@@ -492,6 +497,7 @@ void lan966x_vlan_port_apply(struct lan966x_port *port);
bool lan966x_vlan_cpu_member_cpu_vlan_mask(struct lan966x *lan966x, u16 vid);
void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port,
bool vlan_aware);
+void lan966x_vlan_port_rew_host(struct lan966x_port *port);
int lan966x_vlan_port_set_vid(struct lan966x_port *port,
u16 vid,
bool pvid,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
index 1d63903f9006..75188b99e4e7 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
@@ -88,11 +88,12 @@ static struct lan966x_port *lan966x_pcs_to_port(struct phylink_pcs *pcs)
}
static void lan966x_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct lan966x_port *port = lan966x_pcs_to_port(pcs);
- lan966x_port_status_get(port, state);
+ lan966x_port_status_get(port, neg_mode, state);
}
static int lan966x_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
index fdfa4040d9ee..cf7de0267c32 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
@@ -284,7 +284,7 @@ void lan966x_port_config_up(struct lan966x_port *port)
lan966x_port_link_up(port);
}
-void lan966x_port_status_get(struct lan966x_port *port,
+void lan966x_port_status_get(struct lan966x_port *port, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct lan966x *lan966x = port->lan966x;
@@ -314,7 +314,7 @@ void lan966x_port_status_get(struct lan966x_port *port,
bmsr |= BMSR_ANEGCOMPLETE;
lp_adv = DEV_PCS1G_ANEG_STATUS_LP_ADV_GET(val);
- phylink_mii_c22_pcs_decode_state(state, bmsr, lp_adv);
+ phylink_mii_c22_pcs_decode_state(state, neg_mode, bmsr, lp_adv);
} else {
if (!state->link)
return;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
index 63905bb5a63a..8c40db90ee8f 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
@@ -1,11 +1,14 @@
// SPDX-License-Identifier: GPL-2.0+
#include <linux/ptp_classify.h>
+#include <linux/units.h>
#include "lan966x_main.h"
#include "vcap_api.h"
#include "vcap_api_client.h"
+#define LAN9X66_CLOCK_RATE 165617754
+
#define LAN966X_MAX_PTP_ID 512
/* Represents 1ppm adjustment in 2^59 format with 6.037735849ns as reference
@@ -322,34 +325,55 @@ void lan966x_ptp_hwtstamp_get(struct lan966x_port *port,
*cfg = phc->hwtstamp_config;
}
-static int lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb)
+static void lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb,
+ u8 *rew_op, u8 *pdu_type)
{
struct ptp_header *header;
u8 msgtype;
int type;
- if (port->ptp_tx_cmd == IFH_REW_OP_NOOP)
- return IFH_REW_OP_NOOP;
+ if (port->ptp_tx_cmd == IFH_REW_OP_NOOP) {
+ *rew_op = IFH_REW_OP_NOOP;
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ return;
+ }
type = ptp_classify_raw(skb);
- if (type == PTP_CLASS_NONE)
- return IFH_REW_OP_NOOP;
+ if (type == PTP_CLASS_NONE) {
+ *rew_op = IFH_REW_OP_NOOP;
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ return;
+ }
header = ptp_parse_header(skb, type);
- if (!header)
- return IFH_REW_OP_NOOP;
+ if (!header) {
+ *rew_op = IFH_REW_OP_NOOP;
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ return;
+ }
+
+ if (type & PTP_CLASS_L2)
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ if (type & PTP_CLASS_IPV4)
+ *pdu_type = IFH_PDU_TYPE_IPV4;
+ if (type & PTP_CLASS_IPV6)
+ *pdu_type = IFH_PDU_TYPE_IPV6;
- if (port->ptp_tx_cmd == IFH_REW_OP_TWO_STEP_PTP)
- return IFH_REW_OP_TWO_STEP_PTP;
+ if (port->ptp_tx_cmd == IFH_REW_OP_TWO_STEP_PTP) {
+ *rew_op = IFH_REW_OP_TWO_STEP_PTP;
+ return;
+ }
/* If it is sync and run 1 step then set the correct operation,
* otherwise run as 2 step
*/
msgtype = ptp_get_msgtype(header, type);
- if ((msgtype & 0xf) == 0)
- return IFH_REW_OP_ONE_STEP_PTP;
+ if ((msgtype & 0xf) == 0) {
+ *rew_op = IFH_REW_OP_ONE_STEP_PTP;
+ return;
+ }
- return IFH_REW_OP_TWO_STEP_PTP;
+ *rew_op = IFH_REW_OP_TWO_STEP_PTP;
}
static void lan966x_ptp_txtstamp_old_release(struct lan966x_port *port)
@@ -374,10 +398,12 @@ int lan966x_ptp_txtstamp_request(struct lan966x_port *port,
{
struct lan966x *lan966x = port->lan966x;
unsigned long flags;
+ u8 pdu_type;
u8 rew_op;
- rew_op = lan966x_ptp_classify(port, skb);
+ lan966x_ptp_classify(port, skb, &rew_op, &pdu_type);
LAN966X_SKB_CB(skb)->rew_op = rew_op;
+ LAN966X_SKB_CB(skb)->pdu_type = pdu_type;
if (rew_op != IFH_REW_OP_TWO_STEP_PTP)
return 0;
@@ -815,10 +841,6 @@ static int lan966x_ptp_perout(struct ptp_clock_info *ptp,
bool pps = false;
int pin;
- if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE |
- PTP_PEROUT_PHASE))
- return -EOPNOTSUPP;
-
pin = ptp_find_pin(phc->clock, PTP_PF_PEROUT, rq->perout.index);
if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM)
return -EINVAL;
@@ -917,12 +939,6 @@ static int lan966x_ptp_extts(struct ptp_clock_info *ptp,
if (lan966x->ptp_ext_irq <= 0)
return -EOPNOTSUPP;
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
pin = ptp_find_pin(phc->clock, PTP_PF_EXTTS, rq->extts.index);
if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM)
return -EINVAL;
@@ -978,6 +994,10 @@ static struct ptp_clock_info lan966x_ptp_clock_info = {
.n_per_out = LAN966X_PHC_PINS_NUM,
.n_ext_ts = LAN966X_PHC_PINS_NUM,
.n_pins = LAN966X_PHC_PINS_NUM,
+ .supported_extts_flags = PTP_RISING_EDGE |
+ PTP_STRICT_FLAGS,
+ .supported_perout_flags = PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE,
};
static int lan966x_ptp_phc_init(struct lan966x *lan966x,
@@ -1109,5 +1129,5 @@ void lan966x_ptp_rxtstamp(struct lan966x *lan966x, struct sk_buff *skb,
u32 lan966x_ptp_get_period_ps(void)
{
/* This represents the system clock period in picoseconds */
- return 15125;
+ return PICO / LAN9X66_CLOCK_RATE;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
index 1c88120eb291..bcb4db76b75c 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
@@ -297,6 +297,7 @@ static void lan966x_port_bridge_leave(struct lan966x_port *port,
lan966x_vlan_port_set_vlan_aware(port, false);
lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
lan966x_vlan_port_apply(port);
+ lan966x_vlan_port_rew_host(port);
}
int lan966x_port_changeupper(struct net_device *dev,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
index a1471e38d118..2a37fc1ba4bc 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
@@ -403,11 +403,11 @@ static void lan966x_es0_read_esdx_counter(struct lan966x *lan966x,
u32 counter;
id = id & 0xff; /* counter limit */
- mutex_lock(&lan966x->stats_lock);
+ spin_lock(&lan966x->stats_lock);
lan_wr(SYS_STAT_CFG_STAT_VIEW_SET(id), lan966x, SYS_STAT_CFG);
counter = lan_rd(lan966x, SYS_CNT(LAN966X_STAT_ESDX_GRN_PKTS)) +
lan_rd(lan966x, SYS_CNT(LAN966X_STAT_ESDX_YEL_PKTS));
- mutex_unlock(&lan966x->stats_lock);
+ spin_unlock(&lan966x->stats_lock);
if (counter)
admin->cache.counter = counter;
}
@@ -417,14 +417,14 @@ static void lan966x_es0_write_esdx_counter(struct lan966x *lan966x,
{
id = id & 0xff; /* counter limit */
- mutex_lock(&lan966x->stats_lock);
+ spin_lock(&lan966x->stats_lock);
lan_wr(SYS_STAT_CFG_STAT_VIEW_SET(id), lan966x, SYS_STAT_CFG);
lan_wr(0, lan966x, SYS_CNT(LAN966X_STAT_ESDX_GRN_BYTES));
lan_wr(admin->cache.counter, lan966x,
SYS_CNT(LAN966X_STAT_ESDX_GRN_PKTS));
lan_wr(0, lan966x, SYS_CNT(LAN966X_STAT_ESDX_YEL_BYTES));
lan_wr(0, lan966x, SYS_CNT(LAN966X_STAT_ESDX_YEL_PKTS));
- mutex_unlock(&lan966x->stats_lock);
+ spin_unlock(&lan966x->stats_lock);
}
static void lan966x_vcap_cache_write(struct net_device *dev,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
index fa34a739c748..7da22520724c 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
@@ -149,6 +149,27 @@ void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port,
port->vlan_aware = vlan_aware;
}
+/* When the interface is in host mode, the interface should not be vlan aware
+ * but it should insert all the tags that it gets from the network stack.
+ * The tags are not in the data of the frame but actually in the skb and the ifh
+ * is configured already to get this tag. So what we need to do is to update the
+ * rewriter to insert the vlan tag for all frames which have a vlan tag
+ * different than 0.
+ */
+void lan966x_vlan_port_rew_host(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 val;
+
+ /* Tag all frames except when VID=0*/
+ val = REW_TAG_CFG_TAG_CFG_SET(2);
+
+ /* Update only some bits in the register */
+ lan_rmw(val,
+ REW_TAG_CFG_TAG_CFG,
+ lan966x, REW_TAG_CFG(port->chip_port));
+}
+
void lan966x_vlan_port_apply(struct lan966x_port *port)
{
struct lan966x *lan966x = port->lan966x;
diff --git a/drivers/net/ethernet/microchip/lan969x/Kconfig b/drivers/net/ethernet/microchip/lan969x/Kconfig
deleted file mode 100644
index 728180d3fa33..000000000000
--- a/drivers/net/ethernet/microchip/lan969x/Kconfig
+++ /dev/null
@@ -1,5 +0,0 @@
-config LAN969X_SWITCH
- tristate "Lan969x switch driver"
- depends on SPARX5_SWITCH
- help
- This driver supports the lan969x family of network switch devices.
diff --git a/drivers/net/ethernet/microchip/lan969x/Makefile b/drivers/net/ethernet/microchip/lan969x/Makefile
deleted file mode 100644
index 82d318a7219c..000000000000
--- a/drivers/net/ethernet/microchip/lan969x/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the Microchip lan969x network device drivers.
-#
-
-obj-$(CONFIG_LAN969X_SWITCH) += lan969x-switch.o
-
-lan969x-switch-y := lan969x_regs.o lan969x.o lan969x_calendar.o
-
-# Provide include files
-ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/fdma
-ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap
diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig
index 3f04992eace6..a4d6706590d2 100644
--- a/drivers/net/ethernet/microchip/sparx5/Kconfig
+++ b/drivers/net/ethernet/microchip/sparx5/Kconfig
@@ -3,7 +3,7 @@ config SPARX5_SWITCH
depends on NET_SWITCHDEV
depends on HAS_IOMEM
depends on OF
- depends on ARCH_SPARX5 || COMPILE_TEST
+ depends on ARCH_SPARX5 || ARCH_LAN969X || COMPILE_TEST
depends on PTP_1588_CLOCK_OPTIONAL
depends on BRIDGE || BRIDGE=n
select PHYLINK
@@ -24,3 +24,10 @@ config SPARX5_DCB
DSCP and PCP.
If unsure, set to Y.
+
+config LAN969X_SWITCH
+ bool "Lan969x switch driver"
+ depends on SPARX5_SWITCH
+ select PAGE_POOL
+ help
+ This driver supports the lan969x family of network switch devices.
diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile
index 3435ca86dd70..d447f9e84d92 100644
--- a/drivers/net/ethernet/microchip/sparx5/Makefile
+++ b/drivers/net/ethernet/microchip/sparx5/Makefile
@@ -16,6 +16,14 @@ sparx5-switch-y := sparx5_main.o sparx5_packet.o \
sparx5-switch-$(CONFIG_SPARX5_DCB) += sparx5_dcb.o
sparx5-switch-$(CONFIG_DEBUG_FS) += sparx5_vcap_debugfs.o
+sparx5-switch-$(CONFIG_LAN969X_SWITCH) += lan969x/lan969x_regs.o \
+ lan969x/lan969x.o \
+ lan969x/lan969x_calendar.o \
+ lan969x/lan969x_vcap_ag_api.o \
+ lan969x/lan969x_vcap_impl.o \
+ lan969x/lan969x_rgmii.o \
+ lan969x/lan969x_fdma.o
+
# Provide include files
ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap
ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/fdma
diff --git a/drivers/net/ethernet/microchip/lan969x/lan969x.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c
index 79e5bcefbd73..f3a9c71bea36 100644
--- a/drivers/net/ethernet/microchip/lan969x/lan969x.c
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c
@@ -90,9 +90,12 @@ static const struct sparx5_main_io_resource lan969x_main_iomap[] = {
{ TARGET_DEV2G5 + 27, 0x30d8000, 1 }, /* 0xe30d8000 */
{ TARGET_DEV10G + 9, 0x30dc000, 1 }, /* 0xe30dc000 */
{ TARGET_PCS10G_BR + 9, 0x30e0000, 1 }, /* 0xe30e0000 */
+ { TARGET_DEVRGMII, 0x30e4000, 1 }, /* 0xe30e4000 */
+ { TARGET_DEVRGMII + 1, 0x30e8000, 1 }, /* 0xe30e8000 */
{ TARGET_DSM, 0x30ec000, 1 }, /* 0xe30ec000 */
{ TARGET_PORT_CONF, 0x30f0000, 1 }, /* 0xe30f0000 */
{ TARGET_ASM, 0x3200000, 1 }, /* 0xe3200000 */
+ { TARGET_HSIO_WRAP, 0x3408000, 1 }, /* 0xe3408000 */
};
static struct sparx5_sdlb_group lan969x_sdlb_groups[LAN969X_SDLB_GRP_CNT] = {
@@ -273,9 +276,9 @@ static irqreturn_t lan969x_ptp_irq_handler(int irq, void *args)
if (WARN_ON(!skb_match))
continue;
- spin_lock(&sparx5->ptp_ts_id_lock);
+ spin_lock_irqsave(&sparx5->ptp_ts_id_lock, flags);
sparx5->ptp_skbs--;
- spin_unlock(&sparx5->ptp_ts_id_lock);
+ spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags);
/* Get the h/w timestamp */
sparx5_get_hwtimestamp(sparx5, &ts, delay);
@@ -319,6 +322,9 @@ static const struct sparx5_consts lan969x_consts = {
.qres_max_prio_idx = 315,
.qres_max_colour_idx = 323,
.tod_pin = 4,
+ .vcaps = lan969x_vcaps,
+ .vcap_stats = &lan969x_vcap_stats,
+ .vcaps_cfg = lan969x_vcap_inst_cfg,
};
static const struct sparx5_ops lan969x_ops = {
@@ -326,6 +332,7 @@ static const struct sparx5_ops lan969x_ops = {
.is_port_5g = &lan969x_port_is_5g,
.is_port_10g = &lan969x_port_is_10g,
.is_port_25g = &lan969x_port_is_25g,
+ .is_port_rgmii = &lan969x_port_is_rgmii,
.get_port_dev_index = &lan969x_port_dev_mapping,
.get_port_dev_bit = &lan969x_get_dev_mode_bit,
.get_hsch_max_group_rate = &lan969x_get_hsch_max_group_rate,
@@ -333,6 +340,11 @@ static const struct sparx5_ops lan969x_ops = {
.set_port_mux = &lan969x_port_mux_set,
.ptp_irq_handler = &lan969x_ptp_irq_handler,
.dsm_calendar_calc = &lan969x_dsm_calendar_calc,
+ .port_config_rgmii = &lan969x_port_config_rgmii,
+ .fdma_init = &lan969x_fdma_init,
+ .fdma_deinit = &lan969x_fdma_deinit,
+ .fdma_poll = &lan969x_fdma_napi_poll,
+ .fdma_xmit = &lan969x_fdma_xmit,
};
const struct sparx5_match_data lan969x_desc = {
@@ -343,8 +355,3 @@ const struct sparx5_match_data lan969x_desc = {
.consts = &lan969x_consts,
.ops = &lan969x_ops,
};
-EXPORT_SYMBOL_GPL(lan969x_desc);
-
-MODULE_DESCRIPTION("Microchip lan969x switch driver");
-MODULE_AUTHOR("Daniel Machon <daniel.machon@microchip.com>");
-MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/ethernet/microchip/lan969x/lan969x.h b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h
index 7ce047ad9ca4..529fde3d4deb 100644
--- a/drivers/net/ethernet/microchip/lan969x/lan969x.h
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h
@@ -9,10 +9,18 @@
#include "../sparx5/sparx5_main.h"
#include "../sparx5/sparx5_regs.h"
+#include "../sparx5/sparx5_vcap_impl.h"
/* lan969x.c */
extern const struct sparx5_match_data lan969x_desc;
+/* lan969x_vcap_ag_api.c */
+extern const struct vcap_statistics lan969x_vcap_stats;
+extern const struct vcap_info lan969x_vcaps[];
+
+/* lan969x_vcap_impl.c */
+extern const struct sparx5_vcap_inst lan969x_vcap_inst_cfg[];
+
/* lan969x_regs.c */
extern const unsigned int lan969x_tsize[TSIZE_LAST];
extern const unsigned int lan969x_raddr[RADDR_LAST];
@@ -51,7 +59,24 @@ static inline bool lan969x_port_is_25g(int portno)
return false;
}
+static inline bool lan969x_port_is_rgmii(int portno)
+{
+ return portno == 28 || portno == 29;
+}
+
/* lan969x_calendar.c */
int lan969x_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi,
struct sparx5_calendar_data *data);
+
+/* lan969x_rgmii.c */
+int lan969x_port_config_rgmii(struct sparx5_port *port,
+ struct sparx5_port_config *conf);
+
+/* lan969x_fdma.c */
+int lan969x_fdma_init(struct sparx5 *sparx5);
+int lan969x_fdma_deinit(struct sparx5 *sparx5);
+int lan969x_fdma_napi_poll(struct napi_struct *napi, int weight);
+int lan969x_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev);
+
#endif
diff --git a/drivers/net/ethernet/microchip/lan969x/lan969x_calendar.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_calendar.c
index e857640df185..e857640df185 100644
--- a/drivers/net/ethernet/microchip/lan969x/lan969x_calendar.c
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_calendar.c
diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c
new file mode 100644
index 000000000000..1282f5c3ee6d
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip lan969x Switch driver
+ *
+ * Copyright (c) 2025 Microchip Technology Inc. and its subsidiaries.
+ */
+#include <net/page_pool/helpers.h>
+
+#include "../sparx5_main.h"
+#include "../sparx5_main_regs.h"
+#include "../sparx5_port.h"
+
+#include "fdma_api.h"
+#include "lan969x.h"
+
+#define FDMA_PRIV(fdma) ((struct sparx5 *)((fdma)->priv))
+
+static int lan969x_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
+{
+ *dataptr = FDMA_PRIV(fdma)->tx.dbs[dcb].dma_addr;
+
+ return 0;
+}
+
+static int lan969x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
+{
+ struct sparx5_rx *rx = &FDMA_PRIV(fdma)->rx;
+ struct page *page;
+
+ page = page_pool_dev_alloc_pages(rx->page_pool);
+ if (unlikely(!page))
+ return -ENOMEM;
+
+ rx->page[dcb][db] = page;
+
+ *dataptr = page_pool_get_dma_addr(page);
+
+ return 0;
+}
+
+static int lan969x_fdma_get_next_dcb(struct sparx5_tx *tx)
+{
+ struct fdma *fdma = &tx->fdma;
+
+ for (int i = 0; i < fdma->n_dcbs; ++i)
+ if (!tx->dbs[i].used && !fdma_is_last(fdma, &fdma->dcbs[i]))
+ return i;
+
+ return -ENOSPC;
+}
+
+static void lan969x_fdma_tx_clear_buf(struct sparx5 *sparx5, int weight)
+{
+ struct fdma *fdma = &sparx5->tx.fdma;
+ struct sparx5_tx_buf *db;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&sparx5->tx_lock, flags);
+
+ for (i = 0; i < fdma->n_dcbs; ++i) {
+ db = &sparx5->tx.dbs[i];
+
+ if (!db->used)
+ continue;
+
+ if (!fdma_db_is_done(fdma_db_get(fdma, i, 0)))
+ continue;
+
+ db->dev->stats.tx_bytes += db->skb->len;
+ db->dev->stats.tx_packets++;
+ sparx5->tx.packets++;
+
+ dma_unmap_single(sparx5->dev,
+ db->dma_addr,
+ db->skb->len,
+ DMA_TO_DEVICE);
+
+ if (!db->ptp)
+ napi_consume_skb(db->skb, weight);
+
+ db->used = false;
+ }
+
+ spin_unlock_irqrestore(&sparx5->tx_lock, flags);
+}
+
+static void lan969x_fdma_free_pages(struct sparx5_rx *rx)
+{
+ struct fdma *fdma = &rx->fdma;
+
+ for (int i = 0; i < fdma->n_dcbs; ++i) {
+ for (int j = 0; j < fdma->n_dbs; ++j)
+ page_pool_put_full_page(rx->page_pool,
+ rx->page[i][j], false);
+ }
+}
+
+static struct sk_buff *lan969x_fdma_rx_get_frame(struct sparx5 *sparx5,
+ struct sparx5_rx *rx)
+{
+ const struct sparx5_consts *consts = sparx5->data->consts;
+ struct fdma *fdma = &rx->fdma;
+ struct sparx5_port *port;
+ struct frame_info fi;
+ struct sk_buff *skb;
+ struct fdma_db *db;
+ struct page *page;
+
+ db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
+ page = rx->page[fdma->dcb_index][fdma->db_index];
+
+ sparx5_ifh_parse(sparx5, page_address(page), &fi);
+ port = fi.src_port < consts->n_ports ? sparx5->ports[fi.src_port] :
+ NULL;
+ if (WARN_ON(!port))
+ goto free_page;
+
+ skb = build_skb(page_address(page), fdma->db_size);
+ if (unlikely(!skb))
+ goto free_page;
+
+ skb_mark_for_recycle(skb);
+ skb_put(skb, fdma_db_len_get(db));
+ skb_pull(skb, IFH_LEN * sizeof(u32));
+
+ skb->dev = port->ndev;
+
+ if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
+ skb_trim(skb, skb->len - ETH_FCS_LEN);
+
+ sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp);
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if (test_bit(port->portno, sparx5->bridge_mask))
+ skb->offload_fwd_mark = 1;
+
+ skb->dev->stats.rx_bytes += skb->len;
+ skb->dev->stats.rx_packets++;
+
+ return skb;
+
+free_page:
+ page_pool_recycle_direct(rx->page_pool, page);
+
+ return NULL;
+}
+
+static int lan969x_fdma_rx_alloc(struct sparx5 *sparx5)
+{
+ struct sparx5_rx *rx = &sparx5->rx;
+ struct fdma *fdma = &rx->fdma;
+ int err;
+
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = fdma->n_dcbs * fdma->n_dbs,
+ .nid = NUMA_NO_NODE,
+ .dev = sparx5->dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = 0,
+ .max_len = fdma->db_size -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+ };
+
+ rx->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rx->page_pool))
+ return PTR_ERR(rx->page_pool);
+
+ err = fdma_alloc_coherent(sparx5->dev, fdma);
+ if (err)
+ return err;
+
+ fdma_dcbs_init(fdma,
+ FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_INTR);
+
+ return 0;
+}
+
+static int lan969x_fdma_tx_alloc(struct sparx5 *sparx5)
+{
+ struct sparx5_tx *tx = &sparx5->tx;
+ struct fdma *fdma = &tx->fdma;
+ int err;
+
+ tx->dbs = kcalloc(fdma->n_dcbs,
+ sizeof(struct sparx5_tx_buf),
+ GFP_KERNEL);
+ if (!tx->dbs)
+ return -ENOMEM;
+
+ err = fdma_alloc_coherent(sparx5->dev, fdma);
+ if (err) {
+ kfree(tx->dbs);
+ return err;
+ }
+
+ fdma_dcbs_init(fdma,
+ FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_DONE);
+
+ return 0;
+}
+
+static void lan969x_fdma_rx_init(struct sparx5 *sparx5)
+{
+ struct fdma *fdma = &sparx5->rx.fdma;
+
+ fdma->channel_id = FDMA_XTR_CHANNEL;
+ fdma->n_dcbs = FDMA_DCB_MAX;
+ fdma->n_dbs = 1;
+ fdma->priv = sparx5;
+ fdma->size = fdma_get_size(fdma);
+ fdma->db_size = PAGE_SIZE;
+ fdma->ops.dataptr_cb = &lan969x_fdma_rx_dataptr_cb;
+ fdma->ops.nextptr_cb = &fdma_nextptr_cb;
+
+ /* Fetch a netdev for SKB and NAPI use, any will do */
+ for (int idx = 0; idx < sparx5->data->consts->n_ports; ++idx) {
+ struct sparx5_port *port = sparx5->ports[idx];
+
+ if (port && port->ndev) {
+ sparx5->rx.ndev = port->ndev;
+ break;
+ }
+ }
+}
+
+static void lan969x_fdma_tx_init(struct sparx5 *sparx5)
+{
+ struct fdma *fdma = &sparx5->tx.fdma;
+
+ fdma->channel_id = FDMA_INJ_CHANNEL;
+ fdma->n_dcbs = FDMA_DCB_MAX;
+ fdma->n_dbs = 1;
+ fdma->priv = sparx5;
+ fdma->size = fdma_get_size(fdma);
+ fdma->db_size = PAGE_SIZE;
+ fdma->ops.dataptr_cb = &lan969x_fdma_tx_dataptr_cb;
+ fdma->ops.nextptr_cb = &fdma_nextptr_cb;
+}
+
+int lan969x_fdma_napi_poll(struct napi_struct *napi, int weight)
+{
+ struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi);
+ struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx);
+ int old_dcb, dcb_reload, counter = 0;
+ struct fdma *fdma = &rx->fdma;
+ struct sk_buff *skb;
+
+ dcb_reload = fdma->dcb_index;
+
+ lan969x_fdma_tx_clear_buf(sparx5, weight);
+
+ /* Process RX data */
+ while (counter < weight) {
+ if (!fdma_has_frames(fdma))
+ break;
+
+ skb = lan969x_fdma_rx_get_frame(sparx5, rx);
+ if (!skb)
+ break;
+
+ napi_gro_receive(&rx->napi, skb);
+
+ fdma_db_advance(fdma);
+ counter++;
+ /* Check if the DCB can be reused */
+ if (fdma_dcb_is_reusable(fdma))
+ continue;
+
+ fdma_db_reset(fdma);
+ fdma_dcb_advance(fdma);
+ }
+
+ /* Allocate new pages and map them */
+ while (dcb_reload != fdma->dcb_index) {
+ old_dcb = dcb_reload;
+ dcb_reload++;
+ /* n_dcbs must be a power of 2 */
+ dcb_reload &= fdma->n_dcbs - 1;
+
+ fdma_dcb_add(fdma,
+ old_dcb,
+ FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_INTR);
+
+ sparx5_fdma_reload(sparx5, fdma);
+ }
+
+ if (counter < weight && napi_complete_done(napi, counter))
+ spx5_wr(0xff, sparx5, FDMA_INTR_DB_ENA);
+
+ return counter;
+}
+
+int lan969x_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev)
+{
+ int next_dcb, needed_headroom, needed_tailroom, err;
+ struct sparx5_tx *tx = &sparx5->tx;
+ struct fdma *fdma = &tx->fdma;
+ struct sparx5_tx_buf *db_buf;
+ u64 status;
+
+ next_dcb = lan969x_fdma_get_next_dcb(tx);
+ if (next_dcb < 0)
+ return -EBUSY;
+
+ needed_headroom = max_t(int, IFH_LEN * 4 - skb_headroom(skb), 0);
+ needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
+ if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
+ GFP_ATOMIC);
+ if (unlikely(err))
+ return err;
+ }
+
+ skb_push(skb, IFH_LEN * 4);
+ memcpy(skb->data, ifh, IFH_LEN * 4);
+ skb_put(skb, ETH_FCS_LEN);
+
+ db_buf = &tx->dbs[next_dcb];
+ db_buf->dma_addr = dma_map_single(sparx5->dev,
+ skb->data,
+ skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(sparx5->dev, db_buf->dma_addr))
+ return -ENOMEM;
+
+ db_buf->dev = dev;
+ db_buf->skb = skb;
+ db_buf->ptp = false;
+ db_buf->used = true;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ db_buf->ptp = true;
+
+ status = FDMA_DCB_STATUS_SOF |
+ FDMA_DCB_STATUS_EOF |
+ FDMA_DCB_STATUS_BLOCKO(0) |
+ FDMA_DCB_STATUS_BLOCKL(skb->len) |
+ FDMA_DCB_STATUS_INTR;
+
+ fdma_dcb_advance(fdma);
+ fdma_dcb_add(fdma, next_dcb, 0, status);
+
+ sparx5_fdma_reload(sparx5, fdma);
+
+ return NETDEV_TX_OK;
+}
+
+int lan969x_fdma_init(struct sparx5 *sparx5)
+{
+ struct sparx5_rx *rx = &sparx5->rx;
+ int err;
+
+ lan969x_fdma_rx_init(sparx5);
+ lan969x_fdma_tx_init(sparx5);
+ sparx5_fdma_injection_mode(sparx5);
+
+ err = dma_set_mask_and_coherent(sparx5->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(sparx5->dev, "Failed to set 64-bit FDMA mask");
+ return err;
+ }
+
+ err = lan969x_fdma_rx_alloc(sparx5);
+ if (err) {
+ dev_err(sparx5->dev, "Failed to allocate RX buffers: %d\n",
+ err);
+ return err;
+ }
+
+ err = lan969x_fdma_tx_alloc(sparx5);
+ if (err) {
+ fdma_free_coherent(sparx5->dev, &rx->fdma);
+ dev_err(sparx5->dev, "Failed to allocate TX buffers: %d\n",
+ err);
+ return err;
+ }
+
+ /* Reset FDMA state */
+ spx5_wr(FDMA_CTRL_NRESET_SET(0), sparx5, FDMA_CTRL);
+ spx5_wr(FDMA_CTRL_NRESET_SET(1), sparx5, FDMA_CTRL);
+
+ return err;
+}
+
+int lan969x_fdma_deinit(struct sparx5 *sparx5)
+{
+ struct sparx5_rx *rx = &sparx5->rx;
+ struct sparx5_tx *tx = &sparx5->tx;
+
+ sparx5_fdma_stop(sparx5);
+ fdma_free_coherent(sparx5->dev, &tx->fdma);
+ fdma_free_coherent(sparx5->dev, &rx->fdma);
+ lan969x_fdma_free_pages(rx);
+ page_pool_destroy(rx->page_pool);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan969x/lan969x_regs.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_regs.c
index ace4ba21eec4..ace4ba21eec4 100644
--- a/drivers/net/ethernet/microchip/lan969x/lan969x_regs.c
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_regs.c
diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c
new file mode 100644
index 000000000000..4e422ca50828
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip lan969x Switch driver
+ *
+ * Copyright (c) 2024 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "lan969x.h"
+
+/* Tx clock selectors */
+#define LAN969X_RGMII_TX_CLK_SEL_125MHZ 1 /* 1000Mbps */
+#define LAN969X_RGMII_TX_CLK_SEL_25MHZ 2 /* 100Mbps */
+#define LAN969X_RGMII_TX_CLK_SEL_2M5MHZ 3 /* 10Mbps */
+
+/* Port speed selectors */
+#define LAN969X_RGMII_SPEED_SEL_10 0 /* Select 10Mbps speed */
+#define LAN969X_RGMII_SPEED_SEL_100 1 /* Select 100Mbps speed */
+#define LAN969X_RGMII_SPEED_SEL_1000 2 /* Select 1000Mbps speed */
+
+/* Clock delay selectors */
+#define LAN969X_RGMII_CLK_DELAY_SEL_1_0_NS 2 /* Phase shift 45deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_1_7_NS 3 /* Phase shift 77deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_2_0_NS 4 /* Phase shift 90deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_2_5_NS 5 /* Phase shift 112deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_3_0_NS 6 /* Phase shift 135deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_3_3_NS 7 /* Phase shift 147deg */
+
+#define LAN969X_RGMII_PORT_START_IDX 28 /* Index of the first RGMII port */
+#define LAN969X_RGMII_IFG_TX 4 /* TX Inter Frame Gap value */
+#define LAN969X_RGMII_IFG_RX1 5 /* RX1 Inter Frame Gap value */
+#define LAN969X_RGMII_IFG_RX2 1 /* RX2 Inter Frame Gap value */
+
+#define RGMII_PORT_IDX(port) ((port)->portno - LAN969X_RGMII_PORT_START_IDX)
+
+/* Get the tx clock selector based on the port speed. */
+static int lan969x_rgmii_get_clk_sel(int speed)
+{
+ return (speed == SPEED_10 ? LAN969X_RGMII_TX_CLK_SEL_2M5MHZ :
+ speed == SPEED_100 ? LAN969X_RGMII_TX_CLK_SEL_25MHZ :
+ LAN969X_RGMII_TX_CLK_SEL_125MHZ);
+}
+
+/* Get the port speed selector based on the port speed. */
+static int lan969x_rgmii_get_speed_sel(int speed)
+{
+ return (speed == SPEED_10 ? LAN969X_RGMII_SPEED_SEL_10 :
+ speed == SPEED_100 ? LAN969X_RGMII_SPEED_SEL_100 :
+ LAN969X_RGMII_SPEED_SEL_1000);
+}
+
+/* Get the clock delay selector based on the clock delay in picoseconds. */
+static int lan969x_rgmii_get_clk_delay_sel(struct sparx5_port *port,
+ u32 delay_ps, u32 *clk_delay_sel)
+{
+ switch (delay_ps) {
+ case 0:
+ /* Hardware default selector. */
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_2_5_NS;
+ break;
+ case 1000:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_1_0_NS;
+ break;
+ case 1700:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_1_7_NS;
+ break;
+ case 2000:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_2_0_NS;
+ break;
+ case 2500:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_2_5_NS;
+ break;
+ case 3000:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_3_0_NS;
+ break;
+ case 3300:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_3_3_NS;
+ break;
+ default:
+ dev_err(port->sparx5->dev, "Invalid RGMII delay: %u", delay_ps);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Configure the RGMII tx clock frequency. */
+static void lan969x_rgmii_tx_clk_config(struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ u32 clk_sel = lan969x_rgmii_get_clk_sel(conf->speed);
+ u32 idx = RGMII_PORT_IDX(port);
+
+ /* Take the RGMII clock domain out of reset and set tx clock
+ * frequency.
+ */
+ spx5_rmw(HSIO_WRAP_RGMII_CFG_TX_CLK_CFG_SET(clk_sel) |
+ HSIO_WRAP_RGMII_CFG_RGMII_TX_RST_SET(0) |
+ HSIO_WRAP_RGMII_CFG_RGMII_RX_RST_SET(0),
+ HSIO_WRAP_RGMII_CFG_TX_CLK_CFG |
+ HSIO_WRAP_RGMII_CFG_RGMII_TX_RST |
+ HSIO_WRAP_RGMII_CFG_RGMII_RX_RST,
+ port->sparx5, HSIO_WRAP_RGMII_CFG(idx));
+}
+
+/* Configure the RGMII port device. */
+static void lan969x_rgmii_port_device_config(struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ u32 dtag, dotag, etype, speed_sel, idx = RGMII_PORT_IDX(port);
+
+ speed_sel = lan969x_rgmii_get_speed_sel(conf->speed);
+
+ etype = (port->vlan_type == SPX5_VLAN_PORT_TYPE_S_CUSTOM ?
+ port->custom_etype :
+ port->vlan_type == SPX5_VLAN_PORT_TYPE_C ?
+ ETH_P_8021Q : ETH_P_8021AD);
+
+ dtag = port->max_vlan_tags == SPX5_PORT_MAX_TAGS_TWO;
+ dotag = port->max_vlan_tags != SPX5_PORT_MAX_TAGS_NONE;
+
+ /* Enable the MAC. */
+ spx5_wr(DEVRGMII_MAC_ENA_CFG_RX_ENA_SET(1) |
+ DEVRGMII_MAC_ENA_CFG_TX_ENA_SET(1),
+ port->sparx5, DEVRGMII_MAC_ENA_CFG(idx));
+
+ /* Configure the Inter Frame Gap. */
+ spx5_wr(DEVRGMII_MAC_IFG_CFG_TX_IFG_SET(LAN969X_RGMII_IFG_TX) |
+ DEVRGMII_MAC_IFG_CFG_RX_IFG1_SET(LAN969X_RGMII_IFG_RX1) |
+ DEVRGMII_MAC_IFG_CFG_RX_IFG2_SET(LAN969X_RGMII_IFG_RX2),
+ port->sparx5, DEVRGMII_MAC_IFG_CFG(idx));
+
+ /* Configure port data rate. */
+ spx5_wr(DEVRGMII_DEV_RST_CTRL_SPEED_SEL_SET(speed_sel),
+ port->sparx5, DEVRGMII_DEV_RST_CTRL(idx));
+
+ /* Configure VLAN awareness. */
+ spx5_wr(DEVRGMII_MAC_TAGS_CFG_TAG_ID_SET(etype) |
+ DEVRGMII_MAC_TAGS_CFG_PB_ENA_SET(dtag) |
+ DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(dotag) |
+ DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(dotag),
+ port->sparx5,
+ DEVRGMII_MAC_TAGS_CFG(idx));
+}
+
+/* Configure the RGMII delay lines in the MAC.
+ *
+ * We use the rx-internal-delay-ps" and "tx-internal-delay-ps" properties to
+ * configure the rx and tx delays for the MAC. If these properties are missing
+ * or set to zero, the MAC will not apply any delay.
+ *
+ * The PHY side delays are determined by the PHY mode
+ * (e.g. PHY_INTERFACE_MODE_RGMII_{ID, RXID, TXID}), and ignored by the MAC side
+ * entirely.
+ */
+static int lan969x_rgmii_delay_config(struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ u32 tx_clk_sel, rx_clk_sel, tx_delay_ps = 0, rx_delay_ps = 0;
+ u32 idx = RGMII_PORT_IDX(port);
+ int err;
+
+ of_property_read_u32(port->of_node, "rx-internal-delay-ps",
+ &rx_delay_ps);
+
+ of_property_read_u32(port->of_node, "tx-internal-delay-ps",
+ &tx_delay_ps);
+
+ err = lan969x_rgmii_get_clk_delay_sel(port, rx_delay_ps, &rx_clk_sel);
+ if (err)
+ return err;
+
+ err = lan969x_rgmii_get_clk_delay_sel(port, tx_delay_ps, &tx_clk_sel);
+ if (err)
+ return err;
+
+ /* Configure rx delay. */
+ spx5_rmw(HSIO_WRAP_DLL_CFG_DLL_RST_SET(0) |
+ HSIO_WRAP_DLL_CFG_DLL_ENA_SET(1) |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_ENA_SET(!!rx_delay_ps) |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_SEL_SET(rx_clk_sel),
+ HSIO_WRAP_DLL_CFG_DLL_RST |
+ HSIO_WRAP_DLL_CFG_DLL_ENA |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_ENA |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_SEL,
+ port->sparx5, HSIO_WRAP_DLL_CFG(idx, 0));
+
+ /* Configure tx delay. */
+ spx5_rmw(HSIO_WRAP_DLL_CFG_DLL_RST_SET(0) |
+ HSIO_WRAP_DLL_CFG_DLL_ENA_SET(1) |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_ENA_SET(!!tx_delay_ps) |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_SEL_SET(tx_clk_sel),
+ HSIO_WRAP_DLL_CFG_DLL_RST |
+ HSIO_WRAP_DLL_CFG_DLL_ENA |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_ENA |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_SEL,
+ port->sparx5, HSIO_WRAP_DLL_CFG(idx, 1));
+
+ return 0;
+}
+
+/* Configure GPIO's to be used as RGMII interface. */
+static void lan969x_rgmii_gpio_config(struct sparx5_port *port)
+{
+ u32 idx = RGMII_PORT_IDX(port);
+
+ /* Enable the RGMII on the GPIOs. */
+ spx5_wr(HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG_SET(1), port->sparx5,
+ HSIO_WRAP_XMII_CFG(!idx));
+}
+
+int lan969x_port_config_rgmii(struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ int err;
+
+ err = lan969x_rgmii_delay_config(port, conf);
+ if (err)
+ return err;
+
+ lan969x_rgmii_tx_clk_config(port, conf);
+ lan969x_rgmii_gpio_config(port);
+ lan969x_rgmii_port_device_config(port, conf);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_ag_api.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_ag_api.c
new file mode 100644
index 000000000000..7acc5bcf337a
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_ag_api.c
@@ -0,0 +1,3843 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Copyright (C) 2024 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP API
+ */
+
+/* This file is autogenerated by cml-utils 2024-10-07 11:10:56 +0200.
+ * Commit ID: b5ddc8e244eb2481a9524f1ddc630a8b41e7c391
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#include "lan969x.h"
+
+/* keyfields */
+static const struct vcap_field is0_normal_7tuple_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 10,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 16,
+ .width = 65,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 82,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 83,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 86,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 89,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 93,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 105,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 108,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 111,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 112,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 124,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 127,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI2] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 130,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 131,
+ .width = 12,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 144,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 192,
+ .width = 48,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 240,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 241,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 242,
+ .width = 16,
+ },
+ [VCAP_KF_IP_SNAP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 258,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 259,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 260,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 262,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 263,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 264,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 270,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 398,
+ .width = 128,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 526,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 527,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 528,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 544,
+ .width = 8,
+ },
+};
+
+static const struct vcap_field is0_normal_5tuple_ip4_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 10,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 15,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 17,
+ .width = 65,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 82,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 83,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 90,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 93,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 94,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 106,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 109,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 112,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 113,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 125,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 128,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI2] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 131,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 132,
+ .width = 12,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 145,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 146,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 147,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 149,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 150,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 151,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 157,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 189,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 221,
+ .width = 8,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 229,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 230,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 231,
+ .width = 8,
+ },
+ [VCAP_KF_IP_PAYLOAD_5TUPLE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 239,
+ .width = 32,
+ },
+};
+
+static const struct vcap_field is2_mac_etype_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 56,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 57,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 67,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 80,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 81,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 88,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 89,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 137,
+ .width = 48,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 185,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 186,
+ .width = 16,
+ },
+ [VCAP_KF_L2_PAYLOAD_ETYPE] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 202,
+ .width = 64,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 266,
+ .width = 16,
+ },
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 282,
+ .width = 1,
+ },
+ [VCAP_KF_OAM_Y1731_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 283,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field is2_arp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 56,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 57,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 67,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 80,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 81,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 85,
+ .width = 48,
+ },
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 133,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 134,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_LEN_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 135,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_TGT_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 136,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 137,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 138,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 139,
+ .width = 2,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 141,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 173,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 205,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 206,
+ .width = 16,
+ },
+};
+
+static const struct vcap_field is2_ip4_tcp_udp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 56,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 57,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 67,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 80,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 81,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 88,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 89,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 90,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 93,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 95,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 103,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 135,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 167,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 168,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 169,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 185,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 201,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 217,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 218,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 219,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 220,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 221,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 222,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 223,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 224,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 225,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field is2_ip4_other_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 56,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 57,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 67,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 80,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 81,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 88,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 89,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 90,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 93,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 95,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 103,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 135,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 167,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 168,
+ .width = 8,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 176,
+ .width = 16,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U112,
+ .offset = 192,
+ .width = 96,
+ },
+};
+
+static const struct vcap_field is2_ip6_std_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 56,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 57,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 67,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 80,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 81,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 89,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 90,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 218,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 219,
+ .width = 8,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 227,
+ .width = 16,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 243,
+ .width = 40,
+ },
+};
+
+static const struct vcap_field is2_ip_7tuple_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 11,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 12,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 18,
+ .width = 65,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 83,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 98,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 111,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 112,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 115,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 118,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 119,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 120,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 168,
+ .width = 48,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 218,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 219,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 220,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 228,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 356,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 484,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 485,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 486,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 487,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 503,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 519,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 535,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 536,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 537,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 538,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 539,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 540,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 541,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 542,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 543,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field es0_isdx_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_IF_EGR_PORT_NO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 1,
+ .width = 6,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 7,
+ .width = 13,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 23,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 27,
+ .width = 1,
+ },
+ [VCAP_KF_PROT_ACTIVE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 38,
+ .width = 10,
+ },
+};
+
+static const struct vcap_field es2_mac_etype_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 28,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 41,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 44,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 76,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 77,
+ .width = 7,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 91,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 96,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 144,
+ .width = 48,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 192,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 193,
+ .width = 16,
+ },
+ [VCAP_KF_L2_PAYLOAD_ETYPE] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 209,
+ .width = 64,
+ },
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 273,
+ .width = 1,
+ },
+ [VCAP_KF_OAM_Y1731_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 274,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field es2_arp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 28,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 41,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 44,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 76,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 77,
+ .width = 7,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 91,
+ .width = 1,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 95,
+ .width = 48,
+ },
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 143,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 144,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_LEN_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 145,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_TGT_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 146,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 147,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 148,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 149,
+ .width = 2,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 151,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 183,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 215,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field es2_ip4_tcp_udp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 28,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 41,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 44,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 76,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 77,
+ .width = 7,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 91,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 96,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 97,
+ .width = 2,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 99,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 100,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 101,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 109,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 141,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 173,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 174,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 175,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 191,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 207,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 223,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 224,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 225,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 226,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 227,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 228,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 229,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 230,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 231,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field es2_ip4_other_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 28,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 41,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 44,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 76,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 77,
+ .width = 7,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 91,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 96,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 97,
+ .width = 2,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 99,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 100,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 101,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 109,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 141,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 173,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 174,
+ .width = 8,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U112,
+ .offset = 182,
+ .width = 96,
+ },
+};
+
+static const struct vcap_field es2_ip_7tuple_keyfield[] = {
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 10,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 11,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 25,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 38,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 41,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 73,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 74,
+ .width = 7,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 81,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 85,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 88,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 89,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 93,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 141,
+ .width = 48,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 191,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 192,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 193,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 201,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 329,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 457,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 458,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 459,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 460,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 476,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 492,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 508,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 509,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 510,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 511,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 512,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 513,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 514,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 515,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 516,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field es2_ip6_std_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 28,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 41,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 44,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 76,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 77,
+ .width = 7,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 91,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 96,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 97,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 225,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 226,
+ .width = 8,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 234,
+ .width = 16,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 250,
+ .width = 40,
+ },
+};
+
+/* keyfield_set */
+static const struct vcap_set is0_keyfield_set[] = {
+ [VCAP_KFS_NORMAL_7TUPLE] = {
+ .type_id = 0,
+ .sw_per_item = 12,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = {
+ .type_id = 2,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+};
+
+static const struct vcap_set is2_keyfield_set[] = {
+ [VCAP_KFS_MAC_ETYPE] = {
+ .type_id = 0,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_ARP] = {
+ .type_id = 3,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_TCP_UDP] = {
+ .type_id = 4,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_OTHER] = {
+ .type_id = 5,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP6_STD] = {
+ .type_id = 6,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP_7TUPLE] = {
+ .type_id = 1,
+ .sw_per_item = 12,
+ .sw_cnt = 1,
+ },
+};
+
+static const struct vcap_set es0_keyfield_set[] = {
+ [VCAP_KFS_ISDX] = {
+ .type_id = 0,
+ .sw_per_item = 1,
+ .sw_cnt = 1,
+ },
+};
+
+static const struct vcap_set es2_keyfield_set[] = {
+ [VCAP_KFS_MAC_ETYPE] = {
+ .type_id = 0,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_ARP] = {
+ .type_id = 1,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_TCP_UDP] = {
+ .type_id = 2,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_OTHER] = {
+ .type_id = 3,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP_7TUPLE] = {
+ .type_id = -1,
+ .sw_per_item = 12,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_IP6_STD] = {
+ .type_id = 4,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+};
+
+/* keyfield_set map */
+static const struct vcap_field *is0_keyfield_set_map[] = {
+ [VCAP_KFS_NORMAL_7TUPLE] = is0_normal_7tuple_keyfield,
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = is0_normal_5tuple_ip4_keyfield,
+};
+
+static const struct vcap_field *is2_keyfield_set_map[] = {
+ [VCAP_KFS_MAC_ETYPE] = is2_mac_etype_keyfield,
+ [VCAP_KFS_ARP] = is2_arp_keyfield,
+ [VCAP_KFS_IP4_TCP_UDP] = is2_ip4_tcp_udp_keyfield,
+ [VCAP_KFS_IP4_OTHER] = is2_ip4_other_keyfield,
+ [VCAP_KFS_IP6_STD] = is2_ip6_std_keyfield,
+ [VCAP_KFS_IP_7TUPLE] = is2_ip_7tuple_keyfield,
+};
+
+static const struct vcap_field *es0_keyfield_set_map[] = {
+ [VCAP_KFS_ISDX] = es0_isdx_keyfield,
+};
+
+static const struct vcap_field *es2_keyfield_set_map[] = {
+ [VCAP_KFS_MAC_ETYPE] = es2_mac_etype_keyfield,
+ [VCAP_KFS_ARP] = es2_arp_keyfield,
+ [VCAP_KFS_IP4_TCP_UDP] = es2_ip4_tcp_udp_keyfield,
+ [VCAP_KFS_IP4_OTHER] = es2_ip4_other_keyfield,
+ [VCAP_KFS_IP_7TUPLE] = es2_ip_7tuple_keyfield,
+ [VCAP_KFS_IP6_STD] = es2_ip6_std_keyfield,
+};
+
+/* keyfield_set map sizes */
+static int is0_keyfield_set_map_size[] = {
+ [VCAP_KFS_NORMAL_7TUPLE] = ARRAY_SIZE(is0_normal_7tuple_keyfield),
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = ARRAY_SIZE(is0_normal_5tuple_ip4_keyfield),
+};
+
+static int is2_keyfield_set_map_size[] = {
+ [VCAP_KFS_MAC_ETYPE] = ARRAY_SIZE(is2_mac_etype_keyfield),
+ [VCAP_KFS_ARP] = ARRAY_SIZE(is2_arp_keyfield),
+ [VCAP_KFS_IP4_TCP_UDP] = ARRAY_SIZE(is2_ip4_tcp_udp_keyfield),
+ [VCAP_KFS_IP4_OTHER] = ARRAY_SIZE(is2_ip4_other_keyfield),
+ [VCAP_KFS_IP6_STD] = ARRAY_SIZE(is2_ip6_std_keyfield),
+ [VCAP_KFS_IP_7TUPLE] = ARRAY_SIZE(is2_ip_7tuple_keyfield),
+};
+
+static int es0_keyfield_set_map_size[] = {
+ [VCAP_KFS_ISDX] = ARRAY_SIZE(es0_isdx_keyfield),
+};
+
+static int es2_keyfield_set_map_size[] = {
+ [VCAP_KFS_MAC_ETYPE] = ARRAY_SIZE(es2_mac_etype_keyfield),
+ [VCAP_KFS_ARP] = ARRAY_SIZE(es2_arp_keyfield),
+ [VCAP_KFS_IP4_TCP_UDP] = ARRAY_SIZE(es2_ip4_tcp_udp_keyfield),
+ [VCAP_KFS_IP4_OTHER] = ARRAY_SIZE(es2_ip4_other_keyfield),
+ [VCAP_KFS_IP_7TUPLE] = ARRAY_SIZE(es2_ip_7tuple_keyfield),
+ [VCAP_KFS_IP6_STD] = ARRAY_SIZE(es2_ip6_std_keyfield),
+};
+
+/* actionfields */
+static const struct vcap_field is0_classification_actionfield[] = {
+ [VCAP_AF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 6,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 2,
+ },
+ [VCAP_AF_DEI_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_AF_DEI_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 20,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 21,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 22,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_LOOKUP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 25,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_KEY] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 30,
+ .width = 7,
+ },
+ [VCAP_AF_CLS_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 37,
+ .width = 3,
+ },
+ [VCAP_AF_VID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 43,
+ .width = 13,
+ },
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 66,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 67,
+ .width = 10,
+ },
+ [VCAP_AF_PAG_OVERRIDE_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 107,
+ .width = 8,
+ },
+ [VCAP_AF_PAG_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 115,
+ .width = 8,
+ },
+ [VCAP_AF_NXT_IDX_CTRL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 167,
+ .width = 3,
+ },
+ [VCAP_AF_NXT_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 170,
+ .width = 10,
+ },
+};
+
+static const struct vcap_field is0_full_actionfield[] = {
+ [VCAP_AF_DSCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 1,
+ .width = 6,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 11,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 12,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 2,
+ },
+ [VCAP_AF_DEI_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 18,
+ .width = 1,
+ },
+ [VCAP_AF_DEI_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 20,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 21,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_LOOKUP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 24,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_KEY] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 26,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 7,
+ },
+ [VCAP_AF_CLS_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 36,
+ .width = 3,
+ },
+ [VCAP_AF_VID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 13,
+ },
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 65,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 66,
+ .width = 10,
+ },
+ [VCAP_AF_MASK_MODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 76,
+ .width = 3,
+ },
+ [VCAP_AF_PORT_MASK] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 79,
+ .width = 37,
+ },
+ [VCAP_AF_PAG_OVERRIDE_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 174,
+ .width = 8,
+ },
+ [VCAP_AF_PAG_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 182,
+ .width = 8,
+ },
+ [VCAP_AF_NXT_IDX_CTRL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 266,
+ .width = 3,
+ },
+ [VCAP_AF_NXT_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 269,
+ .width = 10,
+ },
+};
+
+static const struct vcap_field is0_class_reduced_actionfield[] = {
+ [VCAP_AF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 5,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 6,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 9,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 10,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_LOOKUP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 12,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_KEY] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 3,
+ },
+ [VCAP_AF_CLS_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 3,
+ },
+ [VCAP_AF_VID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 23,
+ .width = 13,
+ },
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 46,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 47,
+ .width = 10,
+ },
+ [VCAP_AF_NXT_IDX_CTRL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 89,
+ .width = 3,
+ },
+ [VCAP_AF_NXT_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 92,
+ .width = 10,
+ },
+};
+
+static const struct vcap_field is2_base_type_actionfield[] = {
+ [VCAP_AF_PIPELINE_FORCE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_PIPELINE_PT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 5,
+ },
+ [VCAP_AF_HIT_ME_ONCE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 7,
+ .width = 1,
+ },
+ [VCAP_AF_INTR_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 8,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_COPY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 9,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 10,
+ .width = 3,
+ },
+ [VCAP_AF_LRN_DIS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_AF_RT_DIS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 17,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 5,
+ },
+ [VCAP_AF_IGNORE_PIPELINE_CTRL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_AF_MASK_MODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 3,
+ },
+ [VCAP_AF_PORT_MASK] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 30,
+ .width = 37,
+ },
+ [VCAP_AF_MIRROR_PROBE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 2,
+ },
+ [VCAP_AF_MATCH_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 131,
+ .width = 16,
+ },
+ [VCAP_AF_MATCH_ID_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 147,
+ .width = 16,
+ },
+ [VCAP_AF_CNT_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 163,
+ .width = 10,
+ },
+};
+
+static const struct vcap_field es0_es0_actionfield[] = {
+ [VCAP_AF_PUSH_OUTER_TAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_AF_PUSH_INNER_TAG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_AF_TAG_A_TPID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_A_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 6,
+ .width = 2,
+ },
+ [VCAP_AF_TAG_A_PCP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 8,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_A_DEI_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 11,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_B_TPID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_B_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 2,
+ },
+ [VCAP_AF_TAG_B_PCP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 19,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_B_DEI_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 22,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_C_TPID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 25,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_C_PCP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 28,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_C_DEI_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 31,
+ .width = 3,
+ },
+ [VCAP_AF_VID_A_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 34,
+ .width = 12,
+ },
+ [VCAP_AF_PCP_A_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 46,
+ .width = 3,
+ },
+ [VCAP_AF_DEI_A_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 49,
+ .width = 1,
+ },
+ [VCAP_AF_VID_B_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 50,
+ .width = 12,
+ },
+ [VCAP_AF_PCP_B_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 62,
+ .width = 3,
+ },
+ [VCAP_AF_DEI_B_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 65,
+ .width = 1,
+ },
+ [VCAP_AF_VID_C_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 66,
+ .width = 12,
+ },
+ [VCAP_AF_PCP_C_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 3,
+ },
+ [VCAP_AF_DEI_C_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_AF_POP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 82,
+ .width = 2,
+ },
+ [VCAP_AF_UNTAG_VID_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_AF_PUSH_CUSTOMER_TAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 85,
+ .width = 2,
+ },
+ [VCAP_AF_TAG_C_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 2,
+ },
+ [VCAP_AF_DSCP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 127,
+ .width = 3,
+ },
+ [VCAP_AF_DSCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 130,
+ .width = 6,
+ },
+ [VCAP_AF_ESDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 319,
+ .width = 10,
+ },
+ [VCAP_AF_FWD_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 438,
+ .width = 2,
+ },
+ [VCAP_AF_CPU_QU] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 440,
+ .width = 3,
+ },
+ [VCAP_AF_PIPELINE_PT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 443,
+ .width = 2,
+ },
+ [VCAP_AF_PIPELINE_ACT] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 445,
+ .width = 1,
+ },
+ [VCAP_AF_SWAP_MACS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 454,
+ .width = 1,
+ },
+ [VCAP_AF_LOOP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 455,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field es2_base_type_actionfield[] = {
+ [VCAP_AF_HIT_ME_ONCE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_INTR_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_FWD_MODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_AF_COPY_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 14,
+ },
+ [VCAP_AF_COPY_PORT_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 6,
+ },
+ [VCAP_AF_MIRROR_PROBE_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 24,
+ .width = 2,
+ },
+ [VCAP_AF_CPU_COPY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 3,
+ },
+ [VCAP_AF_POLICE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 30,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_REMARK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 31,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 32,
+ .width = 5,
+ },
+ [VCAP_AF_ES2_REW_CMD] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 37,
+ .width = 3,
+ },
+ [VCAP_AF_CNT_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 9,
+ },
+ [VCAP_AF_IGNORE_PIPELINE_CTRL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 49,
+ .width = 1,
+ },
+};
+
+/* actionfield_set */
+static const struct vcap_set is0_actionfield_set[] = {
+ [VCAP_AFS_CLASSIFICATION] = {
+ .type_id = 1,
+ .sw_per_item = 2,
+ .sw_cnt = 6,
+ },
+ [VCAP_AFS_FULL] = {
+ .type_id = -1,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+ [VCAP_AFS_CLASS_REDUCED] = {
+ .type_id = 1,
+ .sw_per_item = 1,
+ .sw_cnt = 12,
+ },
+};
+
+static const struct vcap_set is2_actionfield_set[] = {
+ [VCAP_AFS_BASE_TYPE] = {
+ .type_id = -1,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+};
+
+static const struct vcap_set es0_actionfield_set[] = {
+ [VCAP_AFS_ES0] = {
+ .type_id = -1,
+ .sw_per_item = 1,
+ .sw_cnt = 1,
+ },
+};
+
+static const struct vcap_set es2_actionfield_set[] = {
+ [VCAP_AFS_BASE_TYPE] = {
+ .type_id = -1,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+};
+
+/* actionfield_set map */
+static const struct vcap_field *is0_actionfield_set_map[] = {
+ [VCAP_AFS_CLASSIFICATION] = is0_classification_actionfield,
+ [VCAP_AFS_FULL] = is0_full_actionfield,
+ [VCAP_AFS_CLASS_REDUCED] = is0_class_reduced_actionfield,
+};
+
+static const struct vcap_field *is2_actionfield_set_map[] = {
+ [VCAP_AFS_BASE_TYPE] = is2_base_type_actionfield,
+};
+
+static const struct vcap_field *es0_actionfield_set_map[] = {
+ [VCAP_AFS_ES0] = es0_es0_actionfield,
+};
+
+static const struct vcap_field *es2_actionfield_set_map[] = {
+ [VCAP_AFS_BASE_TYPE] = es2_base_type_actionfield,
+};
+
+/* actionfield_set map size */
+static int is0_actionfield_set_map_size[] = {
+ [VCAP_AFS_CLASSIFICATION] = ARRAY_SIZE(is0_classification_actionfield),
+ [VCAP_AFS_FULL] = ARRAY_SIZE(is0_full_actionfield),
+ [VCAP_AFS_CLASS_REDUCED] = ARRAY_SIZE(is0_class_reduced_actionfield),
+};
+
+static int is2_actionfield_set_map_size[] = {
+ [VCAP_AFS_BASE_TYPE] = ARRAY_SIZE(is2_base_type_actionfield),
+};
+
+static int es0_actionfield_set_map_size[] = {
+ [VCAP_AFS_ES0] = ARRAY_SIZE(es0_es0_actionfield),
+};
+
+static int es2_actionfield_set_map_size[] = {
+ [VCAP_AFS_BASE_TYPE] = ARRAY_SIZE(es2_base_type_actionfield),
+};
+
+/* Type Groups */
+static const struct vcap_typegroup is0_x12_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 5,
+ .value = 16,
+ },
+ {
+ .offset = 52,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 104,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 156,
+ .width = 3,
+ .value = 0,
+ },
+ {
+ .offset = 208,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 260,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 312,
+ .width = 4,
+ .value = 0,
+ },
+ {
+ .offset = 364,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 416,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 468,
+ .width = 3,
+ .value = 0,
+ },
+ {
+ .offset = 520,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 572,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x6_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 4,
+ .value = 8,
+ },
+ {
+ .offset = 52,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 104,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 156,
+ .width = 3,
+ .value = 0,
+ },
+ {
+ .offset = 208,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 260,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x3_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup is0_x2_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup is0_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup is2_x12_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 312,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 468,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x6_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x3_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup is2_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup es0_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup es2_x12_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 312,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 468,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x6_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x3_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup es2_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup *is0_keyfield_set_typegroups[] = {
+ [12] = is0_x12_keyfield_set_typegroups,
+ [6] = is0_x6_keyfield_set_typegroups,
+ [3] = is0_x3_keyfield_set_typegroups,
+ [2] = is0_x2_keyfield_set_typegroups,
+ [1] = is0_x1_keyfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup *is2_keyfield_set_typegroups[] = {
+ [12] = is2_x12_keyfield_set_typegroups,
+ [6] = is2_x6_keyfield_set_typegroups,
+ [3] = is2_x3_keyfield_set_typegroups,
+ [1] = is2_x1_keyfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup *es0_keyfield_set_typegroups[] = {
+ [1] = es0_x1_keyfield_set_typegroups,
+ [2] = NULL,
+};
+
+static const struct vcap_typegroup *es2_keyfield_set_typegroups[] = {
+ [12] = es2_x12_keyfield_set_typegroups,
+ [6] = es2_x6_keyfield_set_typegroups,
+ [3] = es2_x3_keyfield_set_typegroups,
+ [1] = es2_x1_keyfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup is0_x3_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 103,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 206,
+ .width = 2,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x2_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 103,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x1_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 1,
+ .value = 1,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x3_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 95,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 190,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x1_actionfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup es0_x1_actionfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup es2_x3_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 19,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 38,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x1_actionfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup *is0_actionfield_set_typegroups[] = {
+ [3] = is0_x3_actionfield_set_typegroups,
+ [2] = is0_x2_actionfield_set_typegroups,
+ [1] = is0_x1_actionfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup *is2_actionfield_set_typegroups[] = {
+ [3] = is2_x3_actionfield_set_typegroups,
+ [1] = is2_x1_actionfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup *es0_actionfield_set_typegroups[] = {
+ [1] = es0_x1_actionfield_set_typegroups,
+ [2] = NULL,
+};
+
+static const struct vcap_typegroup *es2_actionfield_set_typegroups[] = {
+ [3] = es2_x3_actionfield_set_typegroups,
+ [1] = es2_x1_actionfield_set_typegroups,
+ [13] = NULL,
+};
+
+/* Keyfieldset names */
+static const char * const vcap_keyfield_set_names[] = {
+ [VCAP_KFS_NO_VALUE] = "(None)",
+ [VCAP_KFS_ARP] = "VCAP_KFS_ARP",
+ [VCAP_KFS_ETAG] = "VCAP_KFS_ETAG",
+ [VCAP_KFS_IP4_OTHER] = "VCAP_KFS_IP4_OTHER",
+ [VCAP_KFS_IP4_TCP_UDP] = "VCAP_KFS_IP4_TCP_UDP",
+ [VCAP_KFS_IP4_VID] = "VCAP_KFS_IP4_VID",
+ [VCAP_KFS_IP6_OTHER] = "VCAP_KFS_IP6_OTHER",
+ [VCAP_KFS_IP6_STD] = "VCAP_KFS_IP6_STD",
+ [VCAP_KFS_IP6_TCP_UDP] = "VCAP_KFS_IP6_TCP_UDP",
+ [VCAP_KFS_IP6_VID] = "VCAP_KFS_IP6_VID",
+ [VCAP_KFS_IP_7TUPLE] = "VCAP_KFS_IP_7TUPLE",
+ [VCAP_KFS_ISDX] = "VCAP_KFS_ISDX",
+ [VCAP_KFS_LL_FULL] = "VCAP_KFS_LL_FULL",
+ [VCAP_KFS_MAC_ETYPE] = "VCAP_KFS_MAC_ETYPE",
+ [VCAP_KFS_MAC_LLC] = "VCAP_KFS_MAC_LLC",
+ [VCAP_KFS_MAC_SNAP] = "VCAP_KFS_MAC_SNAP",
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = "VCAP_KFS_NORMAL_5TUPLE_IP4",
+ [VCAP_KFS_NORMAL_7TUPLE] = "VCAP_KFS_NORMAL_7TUPLE",
+ [VCAP_KFS_OAM] = "VCAP_KFS_OAM",
+ [VCAP_KFS_PURE_5TUPLE_IP4] = "VCAP_KFS_PURE_5TUPLE_IP4",
+ [VCAP_KFS_SMAC_SIP4] = "VCAP_KFS_SMAC_SIP4",
+ [VCAP_KFS_SMAC_SIP6] = "VCAP_KFS_SMAC_SIP6",
+};
+
+/* Actionfieldset names */
+static const char * const vcap_actionfield_set_names[] = {
+ [VCAP_AFS_NO_VALUE] = "(None)",
+ [VCAP_AFS_BASE_TYPE] = "VCAP_AFS_BASE_TYPE",
+ [VCAP_AFS_CLASSIFICATION] = "VCAP_AFS_CLASSIFICATION",
+ [VCAP_AFS_CLASS_REDUCED] = "VCAP_AFS_CLASS_REDUCED",
+ [VCAP_AFS_ES0] = "VCAP_AFS_ES0",
+ [VCAP_AFS_FULL] = "VCAP_AFS_FULL",
+ [VCAP_AFS_SMAC_SIP] = "VCAP_AFS_SMAC_SIP",
+};
+
+/* Keyfield names */
+static const char * const vcap_keyfield_names[] = {
+ [VCAP_KF_NO_VALUE] = "(None)",
+ [VCAP_KF_8021BR_ECID_BASE] = "8021BR_ECID_BASE",
+ [VCAP_KF_8021BR_ECID_EXT] = "8021BR_ECID_EXT",
+ [VCAP_KF_8021BR_E_TAGGED] = "8021BR_E_TAGGED",
+ [VCAP_KF_8021BR_GRP] = "8021BR_GRP",
+ [VCAP_KF_8021BR_IGR_ECID_BASE] = "8021BR_IGR_ECID_BASE",
+ [VCAP_KF_8021BR_IGR_ECID_EXT] = "8021BR_IGR_ECID_EXT",
+ [VCAP_KF_8021Q_DEI0] = "8021Q_DEI0",
+ [VCAP_KF_8021Q_DEI1] = "8021Q_DEI1",
+ [VCAP_KF_8021Q_DEI2] = "8021Q_DEI2",
+ [VCAP_KF_8021Q_DEI_CLS] = "8021Q_DEI_CLS",
+ [VCAP_KF_8021Q_PCP0] = "8021Q_PCP0",
+ [VCAP_KF_8021Q_PCP1] = "8021Q_PCP1",
+ [VCAP_KF_8021Q_PCP2] = "8021Q_PCP2",
+ [VCAP_KF_8021Q_PCP_CLS] = "8021Q_PCP_CLS",
+ [VCAP_KF_8021Q_TPID] = "8021Q_TPID",
+ [VCAP_KF_8021Q_TPID0] = "8021Q_TPID0",
+ [VCAP_KF_8021Q_TPID1] = "8021Q_TPID1",
+ [VCAP_KF_8021Q_TPID2] = "8021Q_TPID2",
+ [VCAP_KF_8021Q_VID0] = "8021Q_VID0",
+ [VCAP_KF_8021Q_VID1] = "8021Q_VID1",
+ [VCAP_KF_8021Q_VID2] = "8021Q_VID2",
+ [VCAP_KF_8021Q_VID_CLS] = "8021Q_VID_CLS",
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = "8021Q_VLAN_TAGGED_IS",
+ [VCAP_KF_8021Q_VLAN_TAGS] = "8021Q_VLAN_TAGS",
+ [VCAP_KF_ACL_GRP_ID] = "ACL_GRP_ID",
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = "ARP_ADDR_SPACE_OK_IS",
+ [VCAP_KF_ARP_LEN_OK_IS] = "ARP_LEN_OK_IS",
+ [VCAP_KF_ARP_OPCODE] = "ARP_OPCODE",
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = "ARP_OPCODE_UNKNOWN_IS",
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = "ARP_PROTO_SPACE_OK_IS",
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = "ARP_SENDER_MATCH_IS",
+ [VCAP_KF_ARP_TGT_MATCH_IS] = "ARP_TGT_MATCH_IS",
+ [VCAP_KF_COSID_CLS] = "COSID_CLS",
+ [VCAP_KF_ES0_ISDX_KEY_ENA] = "ES0_ISDX_KEY_ENA",
+ [VCAP_KF_ETYPE] = "ETYPE",
+ [VCAP_KF_ETYPE_LEN_IS] = "ETYPE_LEN_IS",
+ [VCAP_KF_HOST_MATCH] = "HOST_MATCH",
+ [VCAP_KF_IF_EGR_PORT_MASK] = "IF_EGR_PORT_MASK",
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = "IF_EGR_PORT_MASK_RNG",
+ [VCAP_KF_IF_EGR_PORT_NO] = "IF_EGR_PORT_NO",
+ [VCAP_KF_IF_IGR_PORT] = "IF_IGR_PORT",
+ [VCAP_KF_IF_IGR_PORT_MASK] = "IF_IGR_PORT_MASK",
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = "IF_IGR_PORT_MASK_L3",
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = "IF_IGR_PORT_MASK_RNG",
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = "IF_IGR_PORT_MASK_SEL",
+ [VCAP_KF_IF_IGR_PORT_SEL] = "IF_IGR_PORT_SEL",
+ [VCAP_KF_IP4_IS] = "IP4_IS",
+ [VCAP_KF_IP_MC_IS] = "IP_MC_IS",
+ [VCAP_KF_IP_PAYLOAD_5TUPLE] = "IP_PAYLOAD_5TUPLE",
+ [VCAP_KF_IP_SNAP_IS] = "IP_SNAP_IS",
+ [VCAP_KF_ISDX_CLS] = "ISDX_CLS",
+ [VCAP_KF_ISDX_GT0_IS] = "ISDX_GT0_IS",
+ [VCAP_KF_L2_BC_IS] = "L2_BC_IS",
+ [VCAP_KF_L2_DMAC] = "L2_DMAC",
+ [VCAP_KF_L2_FRM_TYPE] = "L2_FRM_TYPE",
+ [VCAP_KF_L2_FWD_IS] = "L2_FWD_IS",
+ [VCAP_KF_L2_LLC] = "L2_LLC",
+ [VCAP_KF_L2_MC_IS] = "L2_MC_IS",
+ [VCAP_KF_L2_PAYLOAD0] = "L2_PAYLOAD0",
+ [VCAP_KF_L2_PAYLOAD1] = "L2_PAYLOAD1",
+ [VCAP_KF_L2_PAYLOAD2] = "L2_PAYLOAD2",
+ [VCAP_KF_L2_PAYLOAD_ETYPE] = "L2_PAYLOAD_ETYPE",
+ [VCAP_KF_L2_SMAC] = "L2_SMAC",
+ [VCAP_KF_L2_SNAP] = "L2_SNAP",
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = "L3_DIP_EQ_SIP_IS",
+ [VCAP_KF_L3_DPL_CLS] = "L3_DPL_CLS",
+ [VCAP_KF_L3_DSCP] = "L3_DSCP",
+ [VCAP_KF_L3_DST_IS] = "L3_DST_IS",
+ [VCAP_KF_L3_FRAGMENT] = "L3_FRAGMENT",
+ [VCAP_KF_L3_FRAGMENT_TYPE] = "L3_FRAGMENT_TYPE",
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = "L3_FRAG_INVLD_L4_LEN",
+ [VCAP_KF_L3_FRAG_OFS_GT0] = "L3_FRAG_OFS_GT0",
+ [VCAP_KF_L3_IP4_DIP] = "L3_IP4_DIP",
+ [VCAP_KF_L3_IP4_SIP] = "L3_IP4_SIP",
+ [VCAP_KF_L3_IP6_DIP] = "L3_IP6_DIP",
+ [VCAP_KF_L3_IP6_SIP] = "L3_IP6_SIP",
+ [VCAP_KF_L3_IP_PROTO] = "L3_IP_PROTO",
+ [VCAP_KF_L3_OPTIONS_IS] = "L3_OPTIONS_IS",
+ [VCAP_KF_L3_PAYLOAD] = "L3_PAYLOAD",
+ [VCAP_KF_L3_RT_IS] = "L3_RT_IS",
+ [VCAP_KF_L3_TOS] = "L3_TOS",
+ [VCAP_KF_L3_TTL_GT0] = "L3_TTL_GT0",
+ [VCAP_KF_L4_1588_DOM] = "L4_1588_DOM",
+ [VCAP_KF_L4_1588_VER] = "L4_1588_VER",
+ [VCAP_KF_L4_ACK] = "L4_ACK",
+ [VCAP_KF_L4_DPORT] = "L4_DPORT",
+ [VCAP_KF_L4_FIN] = "L4_FIN",
+ [VCAP_KF_L4_PAYLOAD] = "L4_PAYLOAD",
+ [VCAP_KF_L4_PSH] = "L4_PSH",
+ [VCAP_KF_L4_RNG] = "L4_RNG",
+ [VCAP_KF_L4_RST] = "L4_RST",
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = "L4_SEQUENCE_EQ0_IS",
+ [VCAP_KF_L4_SPORT] = "L4_SPORT",
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = "L4_SPORT_EQ_DPORT_IS",
+ [VCAP_KF_L4_SYN] = "L4_SYN",
+ [VCAP_KF_L4_URG] = "L4_URG",
+ [VCAP_KF_LOOKUP_FIRST_IS] = "LOOKUP_FIRST_IS",
+ [VCAP_KF_LOOKUP_GEN_IDX] = "LOOKUP_GEN_IDX",
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = "LOOKUP_GEN_IDX_SEL",
+ [VCAP_KF_LOOKUP_PAG] = "LOOKUP_PAG",
+ [VCAP_KF_MIRROR_PROBE] = "MIRROR_PROBE",
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = "OAM_CCM_CNTS_EQ0",
+ [VCAP_KF_OAM_DETECTED] = "OAM_DETECTED",
+ [VCAP_KF_OAM_FLAGS] = "OAM_FLAGS",
+ [VCAP_KF_OAM_MEL_FLAGS] = "OAM_MEL_FLAGS",
+ [VCAP_KF_OAM_MEPID] = "OAM_MEPID",
+ [VCAP_KF_OAM_OPCODE] = "OAM_OPCODE",
+ [VCAP_KF_OAM_VER] = "OAM_VER",
+ [VCAP_KF_OAM_Y1731_IS] = "OAM_Y1731_IS",
+ [VCAP_KF_PROT_ACTIVE] = "PROT_ACTIVE",
+ [VCAP_KF_TCP_IS] = "TCP_IS",
+ [VCAP_KF_TCP_UDP_IS] = "TCP_UDP_IS",
+ [VCAP_KF_TYPE] = "TYPE",
+};
+
+/* Actionfield names */
+static const char * const vcap_actionfield_names[] = {
+ [VCAP_AF_NO_VALUE] = "(None)",
+ [VCAP_AF_ACL_ID] = "ACL_ID",
+ [VCAP_AF_CLS_VID_SEL] = "CLS_VID_SEL",
+ [VCAP_AF_CNT_ID] = "CNT_ID",
+ [VCAP_AF_COPY_PORT_NUM] = "COPY_PORT_NUM",
+ [VCAP_AF_COPY_QUEUE_NUM] = "COPY_QUEUE_NUM",
+ [VCAP_AF_CPU_COPY_ENA] = "CPU_COPY_ENA",
+ [VCAP_AF_CPU_QU] = "CPU_QU",
+ [VCAP_AF_CPU_QUEUE_NUM] = "CPU_QUEUE_NUM",
+ [VCAP_AF_DEI_A_VAL] = "DEI_A_VAL",
+ [VCAP_AF_DEI_B_VAL] = "DEI_B_VAL",
+ [VCAP_AF_DEI_C_VAL] = "DEI_C_VAL",
+ [VCAP_AF_DEI_ENA] = "DEI_ENA",
+ [VCAP_AF_DEI_VAL] = "DEI_VAL",
+ [VCAP_AF_DP_ENA] = "DP_ENA",
+ [VCAP_AF_DP_VAL] = "DP_VAL",
+ [VCAP_AF_DSCP_ENA] = "DSCP_ENA",
+ [VCAP_AF_DSCP_SEL] = "DSCP_SEL",
+ [VCAP_AF_DSCP_VAL] = "DSCP_VAL",
+ [VCAP_AF_ES2_REW_CMD] = "ES2_REW_CMD",
+ [VCAP_AF_ESDX] = "ESDX",
+ [VCAP_AF_FWD_KILL_ENA] = "FWD_KILL_ENA",
+ [VCAP_AF_FWD_MODE] = "FWD_MODE",
+ [VCAP_AF_FWD_SEL] = "FWD_SEL",
+ [VCAP_AF_HIT_ME_ONCE] = "HIT_ME_ONCE",
+ [VCAP_AF_HOST_MATCH] = "HOST_MATCH",
+ [VCAP_AF_IGNORE_PIPELINE_CTRL] = "IGNORE_PIPELINE_CTRL",
+ [VCAP_AF_INTR_ENA] = "INTR_ENA",
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = "ISDX_ADD_REPLACE_SEL",
+ [VCAP_AF_ISDX_ENA] = "ISDX_ENA",
+ [VCAP_AF_ISDX_VAL] = "ISDX_VAL",
+ [VCAP_AF_LOOP_ENA] = "LOOP_ENA",
+ [VCAP_AF_LRN_DIS] = "LRN_DIS",
+ [VCAP_AF_MAP_IDX] = "MAP_IDX",
+ [VCAP_AF_MAP_KEY] = "MAP_KEY",
+ [VCAP_AF_MAP_LOOKUP_SEL] = "MAP_LOOKUP_SEL",
+ [VCAP_AF_MASK_MODE] = "MASK_MODE",
+ [VCAP_AF_MATCH_ID] = "MATCH_ID",
+ [VCAP_AF_MATCH_ID_MASK] = "MATCH_ID_MASK",
+ [VCAP_AF_MIRROR_ENA] = "MIRROR_ENA",
+ [VCAP_AF_MIRROR_PROBE] = "MIRROR_PROBE",
+ [VCAP_AF_MIRROR_PROBE_ID] = "MIRROR_PROBE_ID",
+ [VCAP_AF_NXT_IDX] = "NXT_IDX",
+ [VCAP_AF_NXT_IDX_CTRL] = "NXT_IDX_CTRL",
+ [VCAP_AF_PAG_OVERRIDE_MASK] = "PAG_OVERRIDE_MASK",
+ [VCAP_AF_PAG_VAL] = "PAG_VAL",
+ [VCAP_AF_PCP_A_VAL] = "PCP_A_VAL",
+ [VCAP_AF_PCP_B_VAL] = "PCP_B_VAL",
+ [VCAP_AF_PCP_C_VAL] = "PCP_C_VAL",
+ [VCAP_AF_PCP_ENA] = "PCP_ENA",
+ [VCAP_AF_PCP_VAL] = "PCP_VAL",
+ [VCAP_AF_PIPELINE_ACT] = "PIPELINE_ACT",
+ [VCAP_AF_PIPELINE_FORCE_ENA] = "PIPELINE_FORCE_ENA",
+ [VCAP_AF_PIPELINE_PT] = "PIPELINE_PT",
+ [VCAP_AF_POLICE_ENA] = "POLICE_ENA",
+ [VCAP_AF_POLICE_IDX] = "POLICE_IDX",
+ [VCAP_AF_POLICE_REMARK] = "POLICE_REMARK",
+ [VCAP_AF_POLICE_VCAP_ONLY] = "POLICE_VCAP_ONLY",
+ [VCAP_AF_POP_VAL] = "POP_VAL",
+ [VCAP_AF_PORT_MASK] = "PORT_MASK",
+ [VCAP_AF_PUSH_CUSTOMER_TAG] = "PUSH_CUSTOMER_TAG",
+ [VCAP_AF_PUSH_INNER_TAG] = "PUSH_INNER_TAG",
+ [VCAP_AF_PUSH_OUTER_TAG] = "PUSH_OUTER_TAG",
+ [VCAP_AF_QOS_ENA] = "QOS_ENA",
+ [VCAP_AF_QOS_VAL] = "QOS_VAL",
+ [VCAP_AF_REW_OP] = "REW_OP",
+ [VCAP_AF_RT_DIS] = "RT_DIS",
+ [VCAP_AF_SWAP_MACS_ENA] = "SWAP_MACS_ENA",
+ [VCAP_AF_TAG_A_DEI_SEL] = "TAG_A_DEI_SEL",
+ [VCAP_AF_TAG_A_PCP_SEL] = "TAG_A_PCP_SEL",
+ [VCAP_AF_TAG_A_TPID_SEL] = "TAG_A_TPID_SEL",
+ [VCAP_AF_TAG_A_VID_SEL] = "TAG_A_VID_SEL",
+ [VCAP_AF_TAG_B_DEI_SEL] = "TAG_B_DEI_SEL",
+ [VCAP_AF_TAG_B_PCP_SEL] = "TAG_B_PCP_SEL",
+ [VCAP_AF_TAG_B_TPID_SEL] = "TAG_B_TPID_SEL",
+ [VCAP_AF_TAG_B_VID_SEL] = "TAG_B_VID_SEL",
+ [VCAP_AF_TAG_C_DEI_SEL] = "TAG_C_DEI_SEL",
+ [VCAP_AF_TAG_C_PCP_SEL] = "TAG_C_PCP_SEL",
+ [VCAP_AF_TAG_C_TPID_SEL] = "TAG_C_TPID_SEL",
+ [VCAP_AF_TAG_C_VID_SEL] = "TAG_C_VID_SEL",
+ [VCAP_AF_TYPE] = "TYPE",
+ [VCAP_AF_UNTAG_VID_ENA] = "UNTAG_VID_ENA",
+ [VCAP_AF_VID_A_VAL] = "VID_A_VAL",
+ [VCAP_AF_VID_B_VAL] = "VID_B_VAL",
+ [VCAP_AF_VID_C_VAL] = "VID_C_VAL",
+ [VCAP_AF_VID_VAL] = "VID_VAL",
+};
+
+/* VCAPs */
+const struct vcap_info lan969x_vcaps[] = {
+ [VCAP_TYPE_IS0] = {
+ .name = "is0",
+ .rows = 256,
+ .sw_count = 12,
+ .sw_width = 52,
+ .sticky_width = 1,
+ .act_width = 103,
+ .default_cnt = 70,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = is0_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(is0_keyfield_set),
+ .actionfield_set = is0_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(is0_actionfield_set),
+ .keyfield_set_map = is0_keyfield_set_map,
+ .keyfield_set_map_size = is0_keyfield_set_map_size,
+ .actionfield_set_map = is0_actionfield_set_map,
+ .actionfield_set_map_size = is0_actionfield_set_map_size,
+ .keyfield_set_typegroups = is0_keyfield_set_typegroups,
+ .actionfield_set_typegroups = is0_actionfield_set_typegroups,
+ },
+ [VCAP_TYPE_IS2] = {
+ .name = "is2",
+ .rows = 256,
+ .sw_count = 12,
+ .sw_width = 52,
+ .sticky_width = 1,
+ .act_width = 103,
+ .default_cnt = 38,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = is2_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(is2_keyfield_set),
+ .actionfield_set = is2_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(is2_actionfield_set),
+ .keyfield_set_map = is2_keyfield_set_map,
+ .keyfield_set_map_size = is2_keyfield_set_map_size,
+ .actionfield_set_map = is2_actionfield_set_map,
+ .actionfield_set_map_size = is2_actionfield_set_map_size,
+ .keyfield_set_typegroups = is2_keyfield_set_typegroups,
+ .actionfield_set_typegroups = is2_actionfield_set_typegroups,
+ },
+ [VCAP_TYPE_ES0] = {
+ .name = "es0",
+ .rows = 1536,
+ .sw_count = 1,
+ .sw_width = 51,
+ .sticky_width = 1,
+ .act_width = 469,
+ .default_cnt = 35,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = es0_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(es0_keyfield_set),
+ .actionfield_set = es0_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(es0_actionfield_set),
+ .keyfield_set_map = es0_keyfield_set_map,
+ .keyfield_set_map_size = es0_keyfield_set_map_size,
+ .actionfield_set_map = es0_actionfield_set_map,
+ .actionfield_set_map_size = es0_actionfield_set_map_size,
+ .keyfield_set_typegroups = es0_keyfield_set_typegroups,
+ .actionfield_set_typegroups = es0_actionfield_set_typegroups,
+ },
+ [VCAP_TYPE_ES2] = {
+ .name = "es2",
+ .rows = 256,
+ .sw_count = 12,
+ .sw_width = 52,
+ .sticky_width = 1,
+ .act_width = 19,
+ .default_cnt = 39,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = es2_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(es2_keyfield_set),
+ .actionfield_set = es2_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(es2_actionfield_set),
+ .keyfield_set_map = es2_keyfield_set_map,
+ .keyfield_set_map_size = es2_keyfield_set_map_size,
+ .actionfield_set_map = es2_actionfield_set_map,
+ .actionfield_set_map_size = es2_actionfield_set_map_size,
+ .keyfield_set_typegroups = es2_keyfield_set_typegroups,
+ .actionfield_set_typegroups = es2_actionfield_set_typegroups,
+ },
+};
+
+const struct vcap_statistics lan969x_vcap_stats = {
+ .name = "lan969x",
+ .count = 4,
+ .keyfield_set_names = vcap_keyfield_set_names,
+ .actionfield_set_names = vcap_actionfield_set_names,
+ .keyfield_names = vcap_keyfield_names,
+ .actionfield_names = vcap_actionfield_names,
+};
diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_impl.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_impl.c
new file mode 100644
index 000000000000..543a1f2bf6bd
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_impl.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "vcap_api.h"
+#include "lan969x.h"
+
+const struct sparx5_vcap_inst lan969x_vcap_inst_cfg[] = {
+ {
+ .vtype = VCAP_TYPE_IS0, /* CLM-0 */
+ .vinst = 0,
+ .map_id = 1,
+ .lookups = SPARX5_IS0_LOOKUPS,
+ .lookups_per_instance = SPARX5_IS0_LOOKUPS / 3,
+ .first_cid = SPARX5_VCAP_CID_IS0_L0,
+ .last_cid = SPARX5_VCAP_CID_IS0_L2 - 1,
+ .blockno = 2,
+ .blocks = 1,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_IS0, /* CLM-1 */
+ .vinst = 1,
+ .map_id = 2,
+ .lookups = SPARX5_IS0_LOOKUPS,
+ .lookups_per_instance = SPARX5_IS0_LOOKUPS / 3,
+ .first_cid = SPARX5_VCAP_CID_IS0_L2,
+ .last_cid = SPARX5_VCAP_CID_IS0_L4 - 1,
+ .blockno = 3,
+ .blocks = 1,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_IS0, /* CLM-2 */
+ .vinst = 2,
+ .map_id = 3,
+ .lookups = SPARX5_IS0_LOOKUPS,
+ .lookups_per_instance = SPARX5_IS0_LOOKUPS / 3,
+ .first_cid = SPARX5_VCAP_CID_IS0_L4,
+ .last_cid = SPARX5_VCAP_CID_IS0_MAX,
+ .blockno = 4,
+ .blocks = 1,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_IS2, /* IS2-0 */
+ .vinst = 0,
+ .map_id = 4,
+ .lookups = SPARX5_IS2_LOOKUPS,
+ .lookups_per_instance = SPARX5_IS2_LOOKUPS / 2,
+ .first_cid = SPARX5_VCAP_CID_IS2_L0,
+ .last_cid = SPARX5_VCAP_CID_IS2_L2 - 1,
+ .blockno = 0,
+ .blocks = 1,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_IS2, /* IS2-1 */
+ .vinst = 1,
+ .map_id = 5,
+ .lookups = SPARX5_IS2_LOOKUPS,
+ .lookups_per_instance = SPARX5_IS2_LOOKUPS / 2,
+ .first_cid = SPARX5_VCAP_CID_IS2_L2,
+ .last_cid = SPARX5_VCAP_CID_IS2_MAX,
+ .blockno = 1,
+ .blocks = 1,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_ES0,
+ .lookups = SPARX5_ES0_LOOKUPS,
+ .lookups_per_instance = SPARX5_ES0_LOOKUPS,
+ .first_cid = SPARX5_VCAP_CID_ES0_L0,
+ .last_cid = SPARX5_VCAP_CID_ES0_MAX,
+ .count = 1536,
+ .ingress = false,
+ },
+ {
+ .vtype = VCAP_TYPE_ES2,
+ .lookups = SPARX5_ES2_LOOKUPS,
+ .lookups_per_instance = SPARX5_ES2_LOOKUPS,
+ .first_cid = SPARX5_VCAP_CID_ES2_L0,
+ .last_cid = SPARX5_VCAP_CID_ES2_MAX,
+ .count = 1024,
+ .ingress = false,
+ },
+};
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c b/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c
index 5fe941c66c17..5c46d81de530 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c
@@ -98,7 +98,6 @@ u32 sparx5_cal_speed_to_value(enum sparx5_cal_bw speed)
default: return 0;
}
}
-EXPORT_SYMBOL_GPL(sparx5_cal_speed_to_value);
static u32 sparx5_bandwidth_to_calendar(u32 bw)
{
@@ -150,7 +149,6 @@ enum sparx5_cal_bw sparx5_get_port_cal_speed(struct sparx5 *sparx5, u32 portno)
return SPX5_CAL_SPEED_NONE;
return sparx5_bandwidth_to_calendar(port->conf.bandwidth);
}
-EXPORT_SYMBOL_GPL(sparx5_get_port_cal_speed);
/* Auto configure the QSYS calendar based on port configuration */
int sparx5_config_auto_calendar(struct sparx5 *sparx5)
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
index 832f4ae57c83..049541eeaae0 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
@@ -1212,6 +1212,22 @@ static int sparx5_get_ts_info(struct net_device *dev,
return 0;
}
+static void sparx5_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+
+ phylink_ethtool_get_pauseparam(port->phylink, pause);
+}
+
+static int sparx5_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+
+ return phylink_ethtool_set_pauseparam(port->phylink, pause);
+}
+
const struct ethtool_ops sparx5_ethtool_ops = {
.get_sset_count = sparx5_get_sset_count,
.get_strings = sparx5_get_sset_strings,
@@ -1224,6 +1240,8 @@ const struct ethtool_ops sparx5_ethtool_ops = {
.get_eth_ctrl_stats = sparx5_get_eth_mac_ctrl_stats,
.get_rmon_stats = sparx5_get_eth_rmon_stats,
.get_ts_info = sparx5_get_ts_info,
+ .get_pauseparam = sparx5_get_pauseparam,
+ .set_pauseparam = sparx5_set_pauseparam,
};
int sparx_stats_init(struct sparx5 *sparx5)
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
index 0027144a2af2..dbe86f937b21 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
@@ -18,9 +18,6 @@
#include "sparx5_main.h"
#include "sparx5_port.h"
-#define FDMA_XTR_CHANNEL 6
-#define FDMA_INJ_CHANNEL 0
-
#define FDMA_XTR_BUFFER_SIZE 2048
#define FDMA_WEIGHT 4
@@ -133,7 +130,7 @@ static void sparx5_fdma_tx_deactivate(struct sparx5 *sparx5, struct sparx5_tx *t
sparx5, FDMA_CH_ACTIVATE);
}
-static void sparx5_fdma_reload(struct sparx5 *sparx5, struct fdma *fdma)
+void sparx5_fdma_reload(struct sparx5 *sparx5, struct fdma *fdma)
{
/* Reload the RX channel */
spx5_wr(BIT(fdma->channel_id), sparx5, FDMA_CH_RELOAD);
@@ -183,7 +180,7 @@ static bool sparx5_fdma_rx_get_frame(struct sparx5 *sparx5, struct sparx5_rx *rx
return true;
}
-static int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight)
+int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight)
{
struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi);
struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx);
@@ -213,11 +210,11 @@ static int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight)
return counter;
}
-int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb)
+int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev)
{
struct sparx5_tx *tx = &sparx5->tx;
struct fdma *fdma = &tx->fdma;
- static bool first_time = true;
void *virt_addr;
fdma_dcb_advance(fdma);
@@ -238,12 +235,8 @@ int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb)
FDMA_DCB_STATUS_BLOCKO(0) |
FDMA_DCB_STATUS_BLOCKL(skb->len + IFH_LEN * 4 + 4));
- if (first_time) {
- sparx5_fdma_tx_activate(sparx5, tx);
- first_time = false;
- } else {
- sparx5_fdma_reload(sparx5, fdma);
- }
+ sparx5_fdma_reload(sparx5, fdma);
+
return NETDEV_TX_OK;
}
@@ -260,10 +253,6 @@ static int sparx5_fdma_rx_alloc(struct sparx5 *sparx5)
fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
FDMA_DCB_STATUS_INTR);
- netif_napi_add_weight(rx->ndev, &rx->napi, sparx5_fdma_napi_callback,
- FDMA_WEIGHT);
- napi_enable(&rx->napi);
- sparx5_fdma_rx_activate(sparx5, rx);
return 0;
}
@@ -348,7 +337,7 @@ irqreturn_t sparx5_fdma_handler(int irq, void *args)
return IRQ_HANDLED;
}
-static void sparx5_fdma_injection_mode(struct sparx5 *sparx5)
+void sparx5_fdma_injection_mode(struct sparx5 *sparx5)
{
const int byte_swap = 1;
int portno;
@@ -410,7 +399,7 @@ static void sparx5_fdma_injection_mode(struct sparx5 *sparx5)
}
}
-int sparx5_fdma_start(struct sparx5 *sparx5)
+int sparx5_fdma_init(struct sparx5 *sparx5)
{
int err;
@@ -443,24 +432,55 @@ int sparx5_fdma_start(struct sparx5 *sparx5)
return err;
}
+int sparx5_fdma_deinit(struct sparx5 *sparx5)
+{
+ sparx5_fdma_stop(sparx5);
+ fdma_free_phys(&sparx5->rx.fdma);
+ fdma_free_phys(&sparx5->tx.fdma);
+
+ return 0;
+}
+
static u32 sparx5_fdma_port_ctrl(struct sparx5 *sparx5)
{
return spx5_rd(sparx5, FDMA_PORT_CTRL(0));
}
+int sparx5_fdma_start(struct sparx5 *sparx5)
+{
+ const struct sparx5_ops *ops = sparx5->data->ops;
+ struct sparx5_rx *rx = &sparx5->rx;
+ struct sparx5_tx *tx = &sparx5->tx;
+
+ netif_napi_add_weight(rx->ndev,
+ &rx->napi,
+ ops->fdma_poll,
+ FDMA_WEIGHT);
+
+ napi_enable(&rx->napi);
+
+ sparx5_fdma_rx_activate(sparx5, rx);
+ sparx5_fdma_tx_activate(sparx5, tx);
+
+ return 0;
+}
+
int sparx5_fdma_stop(struct sparx5 *sparx5)
{
+ struct sparx5_rx *rx = &sparx5->rx;
+ struct sparx5_tx *tx = &sparx5->tx;
u32 val;
- napi_disable(&sparx5->rx.napi);
+ napi_disable(&rx->napi);
+
/* Stop the fdma and channel interrupts */
- sparx5_fdma_rx_deactivate(sparx5, &sparx5->rx);
- sparx5_fdma_tx_deactivate(sparx5, &sparx5->tx);
+ sparx5_fdma_rx_deactivate(sparx5, rx);
+ sparx5_fdma_tx_deactivate(sparx5, tx);
+
/* Wait for the RX channel to stop */
read_poll_timeout(sparx5_fdma_port_ctrl, val,
FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_GET(val) == 0,
500, 10000, 0, sparx5);
- fdma_free_phys(&sparx5->rx.fdma);
- fdma_free_phys(&sparx5->tx.fdma);
+
return 0;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index 4f2d5413a64f..582145713cfd 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -24,12 +24,14 @@
#include <linux/types.h>
#include <linux/reset.h>
-#include "../lan969x/lan969x.h" /* for lan969x match data */
+#include "lan969x/lan969x.h" /* for lan969x match data */
#include "sparx5_main_regs.h"
#include "sparx5_main.h"
#include "sparx5_port.h"
#include "sparx5_qos.h"
+#include "sparx5_vcap_ag_api.h"
+#include "sparx5_vcap_impl.h"
const struct sparx5_regs *regs;
@@ -311,10 +313,13 @@ static int sparx5_create_port(struct sparx5 *sparx5,
struct initial_port_config *config)
{
struct sparx5_port *spx5_port;
+ const struct sparx5_ops *ops;
struct net_device *ndev;
struct phylink *phylink;
int err;
+ ops = sparx5->data->ops;
+
ndev = sparx5_create_netdev(sparx5, config->portno);
if (IS_ERR(ndev)) {
dev_err(sparx5->dev, "Could not create net device: %02u\n",
@@ -333,7 +338,6 @@ static int sparx5_create_port(struct sparx5 *sparx5,
spx5_port->custom_etype = 0x8880; /* Vitesse */
spx5_port->phylink_pcs.poll = true;
spx5_port->phylink_pcs.ops = &sparx5_phylink_pcs_ops;
- spx5_port->phylink_pcs.neg_mode = true;
spx5_port->is_mrouter = false;
INIT_LIST_HEAD(&spx5_port->tc_templates);
sparx5->ports[config->portno] = spx5_port;
@@ -355,6 +359,9 @@ static int sparx5_create_port(struct sparx5 *sparx5,
MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD |
MAC_2500FD | MAC_5000FD | MAC_10000FD | MAC_25000FD;
+ if (ops->is_port_rgmii(spx5_port->portno))
+ phy_interface_set_rgmii(spx5_port->phylink_config.supported_interfaces);
+
__set_bit(PHY_INTERFACE_MODE_SGMII,
spx5_port->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_QSGMII,
@@ -388,6 +395,8 @@ static int sparx5_create_port(struct sparx5 *sparx5,
spx5_port->phylink = phylink;
+ spx5_port->ndev->dev.of_node = spx5_port->of_node;
+
return 0;
}
@@ -701,6 +710,11 @@ static int sparx5_start(struct sparx5 *sparx5)
/* Init masks */
sparx5_update_fwd(sparx5);
+ /* Init flood masks */
+ for (int pgid = sparx5_get_pgid(sparx5, PGID_UC_FLOOD);
+ pgid <= sparx5_get_pgid(sparx5, PGID_BCAST); pgid++)
+ sparx5_pgid_clear(sparx5, pgid);
+
/* CPU copy CPU pgids */
spx5_wr(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1), sparx5,
ANA_AC_PGID_MISC_CFG(sparx5_get_pgid(sparx5, PGID_CPU)));
@@ -768,26 +782,27 @@ static int sparx5_start(struct sparx5 *sparx5)
if (err)
return err;
- if (is_sparx5(sparx5)) {
- err = sparx5_vcap_init(sparx5);
- if (err) {
- sparx5_unregister_notifier_blocks(sparx5);
- return err;
- }
+ err = sparx5_vcap_init(sparx5);
+ if (err) {
+ sparx5_unregister_notifier_blocks(sparx5);
+ return err;
}
/* Start Frame DMA with fallback to register based INJ/XTR */
err = -ENXIO;
- if (sparx5->fdma_irq >= 0 && is_sparx5(sparx5)) {
- if (GCB_CHIP_ID_REV_ID_GET(sparx5->chip_id) > 0)
- err = devm_request_threaded_irq(sparx5->dev,
- sparx5->fdma_irq,
- NULL,
- sparx5_fdma_handler,
- IRQF_ONESHOT,
- "sparx5-fdma", sparx5);
- if (!err)
- err = sparx5_fdma_start(sparx5);
+ if (sparx5->fdma_irq >= 0) {
+ if (GCB_CHIP_ID_REV_ID_GET(sparx5->chip_id) > 0 ||
+ !is_sparx5(sparx5))
+ err = devm_request_irq(sparx5->dev,
+ sparx5->fdma_irq,
+ sparx5_fdma_handler,
+ 0,
+ "sparx5-fdma", sparx5);
+ if (!err) {
+ err = ops->fdma_init(sparx5);
+ if (!err)
+ sparx5_fdma_start(sparx5);
+ }
if (err)
sparx5->fdma_irq = -ENXIO;
} else {
@@ -831,6 +846,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
struct initial_port_config *configs, *config;
struct device_node *np = pdev->dev.of_node;
struct device_node *ports, *portnp;
+ const struct sparx5_ops *ops;
struct reset_control *reset;
struct sparx5 *sparx5;
int idx = 0, err = 0;
@@ -852,6 +868,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
return -EINVAL;
regs = sparx5->data->regs;
+ ops = sparx5->data->ops;
/* Do switch core reset if available */
reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch");
@@ -881,7 +898,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
for_each_available_child_of_node(ports, portnp) {
struct sparx5_port_config *conf;
- struct phy *serdes;
+ struct phy *serdes = NULL;
u32 portno;
err = of_property_read_u32(portnp, "reg", &portno);
@@ -911,13 +928,17 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
conf->sd_sgpio = ~0;
else
sparx5->sd_sgpio_remapping = true;
- serdes = devm_of_phy_get(sparx5->dev, portnp, NULL);
- if (IS_ERR(serdes)) {
- err = dev_err_probe(sparx5->dev, PTR_ERR(serdes),
- "port %u: missing serdes\n",
- portno);
- of_node_put(portnp);
- goto cleanup_config;
+ /* There is no SerDes node for RGMII ports. */
+ if (!ops->is_port_rgmii(portno)) {
+ serdes = devm_of_phy_get(sparx5->dev, portnp, NULL);
+ if (IS_ERR(serdes)) {
+ err = dev_err_probe(sparx5->dev,
+ PTR_ERR(serdes),
+ "port %u: missing serdes\n",
+ portno);
+ of_node_put(portnp);
+ goto cleanup_config;
+ }
}
config->portno = portno;
config->node = portnp;
@@ -1015,6 +1036,7 @@ cleanup_pnode:
static void mchp_sparx5_remove(struct platform_device *pdev)
{
struct sparx5 *sparx5 = platform_get_drvdata(pdev);
+ const struct sparx5_ops *ops = sparx5->data->ops;
debugfs_remove_recursive(sparx5->debugfs_root);
if (sparx5->xtr_irq) {
@@ -1026,7 +1048,7 @@ static void mchp_sparx5_remove(struct platform_device *pdev)
sparx5->fdma_irq = -ENXIO;
}
sparx5_ptp_deinit(sparx5);
- sparx5_fdma_stop(sparx5);
+ ops->fdma_deinit(sparx5);
sparx5_cleanup_ports(sparx5);
sparx5_vcap_destroy(sparx5);
/* Unregister netdevs */
@@ -1063,6 +1085,9 @@ static const struct sparx5_consts sparx5_consts = {
.qres_max_prio_idx = 630,
.qres_max_colour_idx = 638,
.tod_pin = 4,
+ .vcaps = sparx5_vcaps,
+ .vcaps_cfg = sparx5_vcap_inst_cfg,
+ .vcap_stats = &sparx5_vcap_stats,
};
static const struct sparx5_ops sparx5_ops = {
@@ -1070,6 +1095,7 @@ static const struct sparx5_ops sparx5_ops = {
.is_port_5g = &sparx5_port_is_5g,
.is_port_10g = &sparx5_port_is_10g,
.is_port_25g = &sparx5_port_is_25g,
+ .is_port_rgmii = &sparx5_port_is_rgmii,
.get_port_dev_index = &sparx5_port_dev_mapping,
.get_port_dev_bit = &sparx5_port_dev_mapping,
.get_hsch_max_group_rate = &sparx5_get_hsch_max_group_rate,
@@ -1077,6 +1103,10 @@ static const struct sparx5_ops sparx5_ops = {
.set_port_mux = &sparx5_port_mux_set,
.ptp_irq_handler = &sparx5_ptp_irq_handler,
.dsm_calendar_calc = &sparx5_dsm_calendar_calc,
+ .fdma_init = &sparx5_fdma_init,
+ .fdma_deinit = &sparx5_fdma_deinit,
+ .fdma_poll = &sparx5_fdma_napi_callback,
+ .fdma_xmit = &sparx5_fdma_xmit,
};
static const struct sparx5_match_data sparx5_desc = {
@@ -1090,7 +1120,7 @@ static const struct sparx5_match_data sparx5_desc = {
static const struct of_device_id mchp_sparx5_match[] = {
{ .compatible = "microchip,sparx5-switch", .data = &sparx5_desc },
-#if IS_ENABLED(CONFIG_LAN969X_SWITCH)
+#ifdef CONFIG_LAN969X_SWITCH
{ .compatible = "microchip,lan9691-switch", .data = &lan969x_desc },
#endif
{ }
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index 146bdc938adc..fe7d8bcc0cd9 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -112,6 +112,8 @@ enum sparx5_feature {
#define XTR_QUEUE 0
#define INJ_QUEUE 0
+#define FDMA_XTR_CHANNEL 6
+#define FDMA_INJ_CHANNEL 0
#define FDMA_DCB_MAX 64
#define FDMA_RX_DCB_MAX_DBS 15
#define FDMA_TX_DCB_MAX_DBS 1
@@ -157,11 +159,25 @@ struct sparx5_calendar_data {
*/
struct sparx5_rx {
struct fdma fdma;
- struct sk_buff *skb[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
+ struct page_pool *page_pool;
+ union {
+ struct sk_buff *skb[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
+ struct page *page[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
+ };
dma_addr_t dma;
struct napi_struct napi;
struct net_device *ndev;
u64 packets;
+ u8 page_order;
+};
+
+/* Used to store information about TX buffers. */
+struct sparx5_tx_buf {
+ struct net_device *dev;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ bool used;
+ bool ptp;
};
/* Frame DMA transmit state:
@@ -169,6 +185,7 @@ struct sparx5_rx {
*/
struct sparx5_tx {
struct fdma fdma;
+ struct sparx5_tx_buf *dbs;
u64 packets;
u64 dropped;
};
@@ -303,6 +320,9 @@ struct sparx5_consts {
u32 qres_max_prio_idx; /* Maximum QRES prio index */
u32 qres_max_colour_idx; /* Maximum QRES colour index */
u32 tod_pin; /* PTP TOD pin */
+ const struct sparx5_vcap_inst *vcaps_cfg;
+ const struct vcap_info *vcaps;
+ const struct vcap_statistics *vcap_stats;
};
struct sparx5_ops {
@@ -310,6 +330,7 @@ struct sparx5_ops {
bool (*is_port_5g)(int portno);
bool (*is_port_10g)(int portno);
bool (*is_port_25g)(int portno);
+ bool (*is_port_rgmii)(int portno);
u32 (*get_port_dev_index)(struct sparx5 *sparx5, int port);
u32 (*get_port_dev_bit)(struct sparx5 *sparx5, int port);
u32 (*get_hsch_max_group_rate)(int grp);
@@ -320,6 +341,13 @@ struct sparx5_ops {
irqreturn_t (*ptp_irq_handler)(int irq, void *args);
int (*dsm_calendar_calc)(struct sparx5 *sparx5, u32 taxi,
struct sparx5_calendar_data *data);
+ int (*port_config_rgmii)(struct sparx5_port *port,
+ struct sparx5_port_config *conf);
+ int (*fdma_init)(struct sparx5 *sparx5);
+ int (*fdma_deinit)(struct sparx5 *sparx5);
+ int (*fdma_poll)(struct napi_struct *napi, int weight);
+ int (*fdma_xmit)(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev);
};
struct sparx5_main_io_resource {
@@ -430,10 +458,16 @@ int sparx5_manual_injection_mode(struct sparx5 *sparx5);
void sparx5_port_inj_timer_setup(struct sparx5_port *port);
/* sparx5_fdma.c */
+int sparx5_fdma_init(struct sparx5 *sparx5);
+int sparx5_fdma_deinit(struct sparx5 *sparx5);
int sparx5_fdma_start(struct sparx5 *sparx5);
int sparx5_fdma_stop(struct sparx5 *sparx5);
-int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb);
+int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight);
+int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev);
irqreturn_t sparx5_fdma_handler(int irq, void *args);
+void sparx5_fdma_reload(struct sparx5 *sparx5, struct fdma *fdma);
+void sparx5_fdma_injection_mode(struct sparx5 *sparx5);
/* sparx5_mactable.c */
void sparx5_mact_pull_work(struct work_struct *work);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
index 561344f19062..d9ef4ef137b8 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
@@ -37,6 +37,7 @@ enum sparx5_target {
TARGET_FDMA = 117,
TARGET_GCB = 118,
TARGET_HSCH = 119,
+ TARGET_HSIO_WRAP = 120,
TARGET_LRN = 122,
TARGET_PCEP = 129,
TARGET_PCS10G_BR = 132,
@@ -54,6 +55,7 @@ enum sparx5_target {
TARGET_VCAP_SUPER = 326,
TARGET_VOP = 327,
TARGET_XQS = 331,
+ TARGET_DEVRGMII = 392,
NUM_TARGETS = 517
};
@@ -5367,6 +5369,69 @@ extern const struct sparx5_regs *regs;
#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_GET(x)\
FIELD_GET(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY, x)
+/* LAN969X ONLY */
+/* HSIOWRAP:XMII_CFG:XMII_CFG */
+#define HSIO_WRAP_XMII_CFG(g) \
+ __REG(TARGET_HSIO_WRAP, 0, 1, 116, g, 2, 20, 0, 0, 1, 4)
+
+#define HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG GENMASK(2, 1)
+#define HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG_SET(x)\
+ FIELD_PREP(HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG, x)
+#define HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG_GET(x)\
+ FIELD_GET(HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG, x)
+
+/* LAN969X ONLY */
+/* HSIOWRAP:XMII_CFG:RGMII_CFG */
+#define HSIO_WRAP_RGMII_CFG(g) \
+ __REG(TARGET_HSIO_WRAP, 0, 1, 116, g, 2, 20, 4, 0, 1, 4)
+
+#define HSIO_WRAP_RGMII_CFG_TX_CLK_CFG GENMASK(4, 2)
+#define HSIO_WRAP_RGMII_CFG_TX_CLK_CFG_SET(x)\
+ FIELD_PREP(HSIO_WRAP_RGMII_CFG_TX_CLK_CFG, x)
+#define HSIO_WRAP_RGMII_CFG_TX_CLK_CFG_GET(x)\
+ FIELD_GET(HSIO_WRAP_RGMII_CFG_TX_CLK_CFG, x)
+
+#define HSIO_WRAP_RGMII_CFG_RGMII_TX_RST BIT(1)
+#define HSIO_WRAP_RGMII_CFG_RGMII_TX_RST_SET(x)\
+ FIELD_PREP(HSIO_WRAP_RGMII_CFG_RGMII_TX_RST, x)
+#define HSIO_WRAP_RGMII_CFG_RGMII_TX_RST_GET(x)\
+ FIELD_GET(HSIO_WRAP_RGMII_CFG_RGMII_TX_RST, x)
+
+#define HSIO_WRAP_RGMII_CFG_RGMII_RX_RST BIT(0)
+#define HSIO_WRAP_RGMII_CFG_RGMII_RX_RST_SET(x)\
+ FIELD_PREP(HSIO_WRAP_RGMII_CFG_RGMII_RX_RST, x)
+#define HSIO_WRAP_RGMII_CFG_RGMII_RX_RST_GET(x)\
+ FIELD_GET(HSIO_WRAP_RGMII_CFG_RGMII_RX_RST, x)
+
+/* LAN969X ONLY */
+/* HSIOWRAP:XMII_CFG:DLL_CFG */
+#define HSIO_WRAP_DLL_CFG(g, r) \
+ __REG(TARGET_HSIO_WRAP, 0, 1, 116, g, 2, 20, 12, r, 2, 4)
+
+#define HSIO_WRAP_DLL_CFG_DLL_ENA BIT(19)
+#define HSIO_WRAP_DLL_CFG_DLL_ENA_SET(x)\
+ FIELD_PREP(HSIO_WRAP_DLL_CFG_DLL_ENA, x)
+#define HSIO_WRAP_DLL_CFG_DLL_ENA_GET(x)\
+ FIELD_GET(HSIO_WRAP_DLL_CFG_DLL_ENA, x)
+
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_ENA BIT(18)
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_ENA_SET(x)\
+ FIELD_PREP(HSIO_WRAP_DLL_CFG_DLL_CLK_ENA, x)
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_ENA_GET(x)\
+ FIELD_GET(HSIO_WRAP_DLL_CFG_DLL_CLK_ENA, x)
+
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_SEL GENMASK(17, 15)
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_SEL_SET(x)\
+ FIELD_PREP(HSIO_WRAP_DLL_CFG_DLL_CLK_SEL, x)
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_SEL_GET(x)\
+ FIELD_GET(HSIO_WRAP_DLL_CFG_DLL_CLK_SEL, x)
+
+#define HSIO_WRAP_DLL_CFG_DLL_RST BIT(0)
+#define HSIO_WRAP_DLL_CFG_DLL_RST_SET(x)\
+ FIELD_PREP(HSIO_WRAP_DLL_CFG_DLL_RST, x)
+#define HSIO_WRAP_DLL_CFG_DLL_RST_GET(x)\
+ FIELD_GET(HSIO_WRAP_DLL_CFG_DLL_RST, x)
+
/* LRN:COMMON:COMMON_ACCESS_CTRL */
#define LRN_COMMON_ACCESS_CTRL \
__REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 0, 0, 1, 4)
@@ -8110,4 +8175,84 @@ extern const struct sparx5_regs *regs;
#define XQS_CNT(g) \
__REG(TARGET_XQS, 0, 1, 0, g, 1024, 4, 0, 0, 1, 4)
+/* LAN969X ONLY */
+/* DEV1G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEVRGMII_DEV_RST_CTRL(t) \
+ __REG(TARGET_DEVRGMII, t, 2, 0, 0, 1, 36, 0, 0, 1, 4)
+
+#define DEVRGMII_DEV_RST_CTRL_SPEED_SEL GENMASK(22, 20)
+#define DEVRGMII_DEV_RST_CTRL_SPEED_SEL_SET(x)\
+ FIELD_PREP(DEVRGMII_DEV_RST_CTRL_SPEED_SEL, x)
+#define DEVRGMII_DEV_RST_CTRL_SPEED_SEL_GET(x)\
+ FIELD_GET(DEVRGMII_DEV_RST_CTRL_SPEED_SEL, x)
+
+/* LAN969X ONLY */
+/* DEV1G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEVRGMII_MAC_ENA_CFG(t) \
+ __REG(TARGET_DEVRGMII, t, 2, 36, 0, 1, 36, 0, 0, 1, 4)
+
+#define DEVRGMII_MAC_ENA_CFG_RX_ENA BIT(4)
+#define DEVRGMII_MAC_ENA_CFG_RX_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_ENA_CFG_RX_ENA, x)
+#define DEVRGMII_MAC_ENA_CFG_RX_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_ENA_CFG_RX_ENA, x)
+
+#define DEVRGMII_MAC_ENA_CFG_TX_ENA BIT(0)
+#define DEVRGMII_MAC_ENA_CFG_TX_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_ENA_CFG_TX_ENA, x)
+#define DEVRGMII_MAC_ENA_CFG_TX_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_ENA_CFG_TX_ENA, x)
+
+/* LAN969X ONLY */
+/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG */
+#define DEVRGMII_MAC_TAGS_CFG(t) \
+ __REG(TARGET_DEVRGMII, t, 2, 36, 0, 1, 36, 12, 0, 1, 4)
+
+#define DEVRGMII_MAC_TAGS_CFG_TAG_ID GENMASK(31, 16)
+#define DEVRGMII_MAC_TAGS_CFG_TAG_ID_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_TAGS_CFG_TAG_ID, x)
+#define DEVRGMII_MAC_TAGS_CFG_TAG_ID_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_TAGS_CFG_TAG_ID, x)
+
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(3)
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, x)
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, x)
+
+#define DEVRGMII_MAC_TAGS_CFG_PB_ENA GENMASK(2, 1)
+#define DEVRGMII_MAC_TAGS_CFG_PB_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_TAGS_CFG_PB_ENA, x)
+#define DEVRGMII_MAC_TAGS_CFG_PB_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_TAGS_CFG_PB_ENA, x)
+
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0)
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
+
+/* LAN969X ONLY */
+/* DEV1G:MAC_CFG_STATUS:MAC_IFG_CFG */
+#define DEVRGMII_MAC_IFG_CFG(t) \
+ __REG(TARGET_DEVRGMII, t, 2, 36, 0, 1, 36, 24, 0, 1, 4)
+
+#define DEVRGMII_MAC_IFG_CFG_TX_IFG GENMASK(12, 8)
+#define DEVRGMII_MAC_IFG_CFG_TX_IFG_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_IFG_CFG_TX_IFG, x)
+#define DEVRGMII_MAC_IFG_CFG_TX_IFG_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_IFG_CFG_TX_IFG, x)
+
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG2 GENMASK(7, 4)
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG2_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_IFG_CFG_RX_IFG2, x)
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG2_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_IFG_CFG_RX_IFG2, x)
+
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG1 GENMASK(3, 0)
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG1_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_IFG_CFG_RX_IFG1, x)
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG1_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_IFG_CFG_RX_IFG1, x)
+
#endif /* _SPARX5_MAIN_REGS_H_ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c
index 9806729e9c62..76097761fa97 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c
@@ -12,7 +12,6 @@
#define SPX5_MIRROR_DISABLED 0
#define SPX5_MIRROR_EGRESS 1
#define SPX5_MIRROR_INGRESS 2
-#define SPX5_MIRROR_MONITOR_PORT_DEFAULT 65
#define SPX5_QFWD_MP_OFFSET 9 /* Mirror port offset in the QFWD register */
/* Convert from bool ingress/egress to mirror direction */
@@ -200,7 +199,7 @@ void sparx5_mirror_del(struct sparx5_mall_entry *entry)
sparx5_mirror_monitor_set(sparx5,
mirror_idx,
- SPX5_MIRROR_MONITOR_PORT_DEFAULT);
+ sparx5->data->consts->n_ports);
}
void sparx5_mirror_stats(struct sparx5_mall_entry *entry,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index b6f635d85820..f713656f1fae 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -232,9 +232,12 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
struct net_device_stats *stats = &dev->stats;
struct sparx5_port *port = netdev_priv(dev);
struct sparx5 *sparx5 = port->sparx5;
+ const struct sparx5_ops *ops;
u32 ifh[IFH_LEN];
netdev_tx_t ret;
+ ops = sparx5->data->ops;
+
memset(ifh, 0, IFH_LEN * 4);
sparx5_set_port_ifh(sparx5, ifh, port->portno);
@@ -254,7 +257,7 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
spin_lock(&sparx5->tx_lock);
if (sparx5->fdma_irq > 0)
- ret = sparx5_fdma_xmit(sparx5, ifh, skb);
+ ret = ops->fdma_xmit(sparx5, ifh, skb, dev);
else
ret = sparx5_inject(sparx5, ifh, skb, dev);
spin_unlock(&sparx5->tx_lock);
@@ -264,6 +267,12 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
if (ret < 0)
goto drop;
+ if (!is_sparx5(sparx5))
+ /* When lan969x and TX_OK, stats and SKB consumption is handled
+ * in the TX completion loop, so dont go any further.
+ */
+ return NETDEV_TX_OK;
+
stats->tx_bytes += skb->len;
stats->tx_packets++;
sparx5->tx.packets++;
@@ -366,6 +375,6 @@ irqreturn_t sparx5_xtr_handler(int irq, void *_sparx5)
void sparx5_port_inj_timer_setup(struct sparx5_port *port)
{
- hrtimer_init(&port->inj_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->inj_timer.function = sparx5_injection_timeout;
+ hrtimer_setup(&port->inj_timer, sparx5_injection_timeout, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
index f8562c1a894d..cfb4b2e17ace 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
@@ -32,7 +32,19 @@ sparx5_phylink_mac_select_pcs(struct phylink_config *config,
{
struct sparx5_port *port = netdev_priv(to_net_dev(config->dev));
- return &port->phylink_pcs;
+ /* Return the PCS for all the modes that require it. */
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_5GBASER:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_25GBASER:
+ return &port->phylink_pcs;
+ default:
+ return NULL;
+ }
}
static void sparx5_phylink_mac_config(struct phylink_config *config,
@@ -77,7 +89,7 @@ static struct sparx5_port *sparx5_pcs_to_port(struct phylink_pcs *pcs)
return container_of(pcs, struct sparx5_port, phylink_pcs);
}
-static void sparx5_pcs_get_state(struct phylink_pcs *pcs,
+static void sparx5_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct sparx5_port *port = sparx5_pcs_to_port(pcs);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
index 1401761c6251..04bc8fffaf96 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
@@ -257,6 +257,15 @@ static int sparx5_port_verify_speed(struct sparx5 *sparx5,
conf->speed != SPEED_25000))
return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ if (conf->speed != SPEED_1000 &&
+ conf->speed != SPEED_100 &&
+ conf->speed != SPEED_10)
+ return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
+ break;
default:
return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
}
@@ -994,6 +1003,7 @@ int sparx5_port_config(struct sparx5 *sparx5,
struct sparx5_port *port,
struct sparx5_port_config *conf)
{
+ bool rgmii = phy_interface_mode_is_rgmii(conf->phy_mode);
bool high_speed_dev = sparx5_is_baser(conf->portmode);
const struct sparx5_ops *ops = sparx5->data->ops;
int err, urgency, stop_wm;
@@ -1002,8 +1012,14 @@ int sparx5_port_config(struct sparx5 *sparx5,
if (err)
return err;
+ if (rgmii) {
+ err = ops->port_config_rgmii(port, conf);
+ if (err)
+ return err;
+ }
+
/* high speed device is already configured */
- if (!high_speed_dev)
+ if (!rgmii && !high_speed_dev)
sparx5_port_config_low_set(sparx5, port, conf);
/* Configure flow control */
@@ -1067,24 +1083,6 @@ int sparx5_port_init(struct sparx5 *sparx5,
if (err)
return err;
- /* Configure MAC vlan awareness */
- err = sparx5_port_max_tags_set(sparx5, port);
- if (err)
- return err;
-
- /* Set Max Length */
- spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
- DEV2G5_MAC_MAXLEN_CFG_MAX_LEN,
- sparx5,
- DEV2G5_MAC_MAXLEN_CFG(port->portno));
-
- /* 1G/2G5: Signal Detect configuration */
- spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) |
- DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) |
- DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena),
- sparx5,
- DEV2G5_PCS1G_SD_CFG(port->portno));
-
/* Set Pause WM hysteresis */
spx5_rmw(QSYS_PAUSE_CFG_PAUSE_START_SET(pause_start) |
QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop) |
@@ -1108,6 +1106,27 @@ int sparx5_port_init(struct sparx5 *sparx5,
ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS,
sparx5, ANA_CL_FILTER_CTRL(port->portno));
+ if (ops->is_port_rgmii(port->portno))
+ return 0; /* RGMII device - nothing more to configure */
+
+ /* Configure MAC vlan awareness */
+ err = sparx5_port_max_tags_set(sparx5, port);
+ if (err)
+ return err;
+
+ /* Set Max Length */
+ spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
+ DEV2G5_MAC_MAXLEN_CFG_MAX_LEN,
+ sparx5,
+ DEV2G5_MAC_MAXLEN_CFG(port->portno));
+
+ /* 1G/2G5: Signal Detect configuration */
+ spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) |
+ DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) |
+ DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena),
+ sparx5,
+ DEV2G5_PCS1G_SD_CFG(port->portno));
+
if (conf->portmode == PHY_INTERFACE_MODE_QSGMII ||
conf->portmode == PHY_INTERFACE_MODE_SGMII) {
err = sparx5_serdes_set(sparx5, port, conf);
@@ -1151,7 +1170,7 @@ int sparx5_port_init(struct sparx5 *sparx5,
spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
DEV10G_MAC_MAXLEN_CFG_MAX_LEN,
devinst,
- DEV10G_MAC_ENA_CFG(0));
+ DEV10G_MAC_MAXLEN_CFG(0));
/* Handle Signal Detect in 10G PCS */
spx5_inst_wr(PCS10G_BR_PCS_SD_CFG_SD_POL_SET(sd_pol) |
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
index 9b9bcc6834bc..c8a37468a3d1 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
@@ -40,6 +40,11 @@ static inline bool sparx5_port_is_25g(int portno)
return portno >= 56 && portno <= 63;
}
+static inline bool sparx5_port_is_rgmii(int portno)
+{
+ return false;
+}
+
static inline u32 sparx5_to_high_dev(struct sparx5 *sparx5, int port)
{
const struct sparx5_ops *ops = sparx5->data->ops;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
index 1c2903700a9c..2f168700f63c 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
@@ -303,7 +303,6 @@ void sparx5_get_hwtimestamp(struct sparx5 *sparx5,
spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
}
-EXPORT_SYMBOL_GPL(sparx5_get_hwtimestamp);
irqreturn_t sparx5_ptp_irq_handler(int irq, void *args)
{
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
index bc9ecb9392cd..0a71abbd3da5 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
@@ -176,6 +176,7 @@ static int sparx5_port_bridge_join(struct sparx5_port *port,
struct net_device *bridge,
struct netlink_ext_ack *extack)
{
+ struct switchdev_brport_flags flags = {0};
struct sparx5 *sparx5 = port->sparx5;
struct net_device *ndev = port->ndev;
int err;
@@ -205,6 +206,11 @@ static int sparx5_port_bridge_join(struct sparx5_port *port,
*/
__dev_mc_unsync(ndev, sparx5_mc_unsync);
+ /* Enable uc/mc/bc flooding */
+ flags.mask = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
+ flags.val = flags.mask;
+ sparx5_port_attr_bridge_flags(port, flags);
+
return 0;
err_switchdev_offload:
@@ -215,6 +221,7 @@ err_switchdev_offload:
static void sparx5_port_bridge_leave(struct sparx5_port *port,
struct net_device *bridge)
{
+ struct switchdev_brport_flags flags = {0};
struct sparx5 *sparx5 = port->sparx5;
switchdev_bridge_port_unoffload(port->ndev, NULL, NULL, NULL);
@@ -234,6 +241,11 @@ static void sparx5_port_bridge_leave(struct sparx5_port *port,
/* Port enters in host more therefore restore mc list */
__dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync);
+
+ /* Disable uc/mc/bc flooding */
+ flags.mask = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
+ flags.val = 0;
+ sparx5_port_attr_bridge_flags(port, flags);
}
static int sparx5_port_changeupper(struct net_device *dev,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h
index 7d106f1276fe..e68f5639a40a 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h
@@ -10,6 +10,8 @@
#ifndef __SPARX5_VCAP_AG_API_H__
#define __SPARX5_VCAP_AG_API_H__
+#include "vcap_api.h"
+
/* VCAPs */
extern const struct vcap_info sparx5_vcaps[];
extern const struct vcap_statistics sparx5_vcap_stats;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
index 967c8621c250..25066ddb8d4d 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
@@ -17,7 +17,6 @@
#define SUPER_VCAP_BLK_SIZE 3072 /* addresses per Super VCAP block */
#define STREAMSIZE (64 * 4) /* bytes in the VCAP cache area */
-#define SPARX5_IS2_LOOKUPS 4
#define VCAP_IS2_KEYSEL(_ena, _noneth, _v4_mc, _v4_uc, _v6_mc, _v6_uc, _arp) \
(ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA_SET(_ena) | \
ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL_SET(_noneth) | \
@@ -27,7 +26,6 @@
ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL_SET(_v6_uc) | \
ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL_SET(_arp))
-#define SPARX5_IS0_LOOKUPS 6
#define VCAP_IS0_KEYSEL(_ena, _etype, _ipv4, _ipv6, _mpls_uc, _mpls_mc, _mlbs) \
(ANA_CL_ADV_CL_CFG_LOOKUP_ENA_SET(_ena) | \
ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL_SET(_etype) | \
@@ -37,31 +35,17 @@
ANA_CL_ADV_CL_CFG_MPLS_MC_CLM_KEY_SEL_SET(_mpls_mc) | \
ANA_CL_ADV_CL_CFG_MLBS_CLM_KEY_SEL_SET(_mlbs))
-#define SPARX5_ES0_LOOKUPS 1
#define VCAP_ES0_KEYSEL(_key) (REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA_SET(_key))
#define SPARX5_STAT_ESDX_GRN_PKTS 0x300
#define SPARX5_STAT_ESDX_YEL_PKTS 0x301
-#define SPARX5_ES2_LOOKUPS 2
#define VCAP_ES2_KEYSEL(_ena, _arp, _ipv4, _ipv6) \
(EACL_VCAP_ES2_KEY_SEL_KEY_ENA_SET(_ena) | \
EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL_SET(_arp) | \
EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL_SET(_ipv4) | \
EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL_SET(_ipv6))
-static struct sparx5_vcap_inst {
- enum vcap_type vtype; /* type of vcap */
- int vinst; /* instance number within the same type */
- int lookups; /* number of lookups in this vcap type */
- int lookups_per_instance; /* number of lookups in this instance */
- int first_cid; /* first chain id in this vcap */
- int last_cid; /* last chain id in this vcap */
- int count; /* number of available addresses, not in super vcap */
- int map_id; /* id in the super vcap block mapping (if applicable) */
- int blockno; /* starting block in super vcap (if applicable) */
- int blocks; /* number of blocks in super vcap (if applicable) */
- bool ingress; /* is vcap in the ingress path */
-} sparx5_vcap_inst_cfg[] = {
+const struct sparx5_vcap_inst sparx5_vcap_inst_cfg[] = {
{
.vtype = VCAP_TYPE_IS0, /* CLM-0 */
.vinst = 0,
@@ -1793,6 +1777,7 @@ void sparx5_vcap_set_port_keyset(struct net_device *ndev,
static void sparx5_vcap_is0_port_key_selection(struct sparx5 *sparx5,
struct vcap_admin *admin)
{
+ const struct sparx5_consts *consts = sparx5->data->consts;
int portno, lookup;
u32 keysel;
@@ -1804,7 +1789,7 @@ static void sparx5_vcap_is0_port_key_selection(struct sparx5 *sparx5,
VCAP_IS0_PS_MPLS_FOLLOW_ETYPE,
VCAP_IS0_PS_MLBS_FOLLOW_ETYPE);
for (lookup = 0; lookup < admin->lookups; ++lookup) {
- for (portno = 0; portno < SPX5_PORTS; ++portno) {
+ for (portno = 0; portno < consts->n_ports; ++portno) {
spx5_wr(keysel, sparx5,
ANA_CL_ADV_CL_CFG(portno, lookup));
spx5_rmw(ANA_CL_ADV_CL_CFG_LOOKUP_ENA,
@@ -1819,6 +1804,7 @@ static void sparx5_vcap_is0_port_key_selection(struct sparx5 *sparx5,
static void sparx5_vcap_is2_port_key_selection(struct sparx5 *sparx5,
struct vcap_admin *admin)
{
+ const struct sparx5_consts *consts = sparx5->data->consts;
int portno, lookup;
u32 keysel;
@@ -1829,13 +1815,13 @@ static void sparx5_vcap_is2_port_key_selection(struct sparx5 *sparx5,
VCAP_IS2_PS_IPV6_UC_IP_7TUPLE,
VCAP_IS2_PS_ARP_ARP);
for (lookup = 0; lookup < admin->lookups; ++lookup) {
- for (portno = 0; portno < SPX5_PORTS; ++portno) {
+ for (portno = 0; portno < consts->n_ports; ++portno) {
spx5_wr(keysel, sparx5,
ANA_ACL_VCAP_S2_KEY_SEL(portno, lookup));
}
}
/* IS2 lookups are in bit 0:3 */
- for (portno = 0; portno < SPX5_PORTS; ++portno)
+ for (portno = 0; portno < consts->n_ports; ++portno)
spx5_rmw(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0xf),
ANA_ACL_VCAP_S2_CFG_SEC_ENA,
sparx5,
@@ -1846,11 +1832,12 @@ static void sparx5_vcap_is2_port_key_selection(struct sparx5 *sparx5,
static void sparx5_vcap_es0_port_key_selection(struct sparx5 *sparx5,
struct vcap_admin *admin)
{
+ const struct sparx5_consts *consts = sparx5->data->consts;
int portno;
u32 keysel;
keysel = VCAP_ES0_KEYSEL(VCAP_ES0_PS_FORCE_ISDX_LOOKUPS);
- for (portno = 0; portno < SPX5_PORTS; ++portno)
+ for (portno = 0; portno < consts->n_ports; ++portno)
spx5_rmw(keysel, REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA,
sparx5, REW_RTAG_ETAG_CTRL(portno));
@@ -1862,6 +1849,7 @@ static void sparx5_vcap_es0_port_key_selection(struct sparx5 *sparx5,
static void sparx5_vcap_es2_port_key_selection(struct sparx5 *sparx5,
struct vcap_admin *admin)
{
+ const struct sparx5_consts *consts = sparx5->data->consts;
int portno, lookup;
u32 keysel;
@@ -1869,7 +1857,7 @@ static void sparx5_vcap_es2_port_key_selection(struct sparx5 *sparx5,
VCAP_ES2_PS_IPV4_IP4_TCP_UDP_OTHER,
VCAP_ES2_PS_IPV6_IP_7TUPLE);
for (lookup = 0; lookup < admin->lookups; ++lookup)
- for (portno = 0; portno < SPX5_PORTS; ++portno)
+ for (portno = 0; portno < consts->n_ports; ++portno)
spx5_wr(keysel, sparx5,
EACL_VCAP_ES2_KEY_SEL(portno, lookup));
}
@@ -1901,19 +1889,20 @@ static void sparx5_vcap_port_key_selection(struct sparx5 *sparx5,
static void sparx5_vcap_port_key_deselection(struct sparx5 *sparx5,
struct vcap_admin *admin)
{
+ const struct sparx5_consts *consts = sparx5->data->consts;
int portno, lookup;
switch (admin->vtype) {
case VCAP_TYPE_IS0:
for (lookup = 0; lookup < admin->lookups; ++lookup)
- for (portno = 0; portno < SPX5_PORTS; ++portno)
+ for (portno = 0; portno < consts->n_ports; ++portno)
spx5_rmw(ANA_CL_ADV_CL_CFG_LOOKUP_ENA_SET(0),
ANA_CL_ADV_CL_CFG_LOOKUP_ENA,
sparx5,
ANA_CL_ADV_CL_CFG(portno, lookup));
break;
case VCAP_TYPE_IS2:
- for (portno = 0; portno < SPX5_PORTS; ++portno)
+ for (portno = 0; portno < consts->n_ports; ++portno)
spx5_rmw(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0),
ANA_ACL_VCAP_S2_CFG_SEC_ENA,
sparx5,
@@ -1925,7 +1914,7 @@ static void sparx5_vcap_port_key_deselection(struct sparx5 *sparx5,
break;
case VCAP_TYPE_ES2:
for (lookup = 0; lookup < admin->lookups; ++lookup)
- for (portno = 0; portno < SPX5_PORTS; ++portno)
+ for (portno = 0; portno < consts->n_ports; ++portno)
spx5_rmw(EACL_VCAP_ES2_KEY_SEL_KEY_ENA_SET(0),
EACL_VCAP_ES2_KEY_SEL_KEY_ENA,
sparx5,
@@ -2042,6 +2031,7 @@ static void sparx5_vcap_block_alloc(struct sparx5 *sparx5,
/* Allocate a vcap control and vcap instances and configure the system */
int sparx5_vcap_init(struct sparx5 *sparx5)
{
+ const struct sparx5_consts *consts = sparx5->data->consts;
const struct sparx5_vcap_inst *cfg;
struct vcap_control *ctrl;
struct vcap_admin *admin;
@@ -2063,14 +2053,14 @@ int sparx5_vcap_init(struct sparx5 *sparx5)
sparx5->vcap_ctrl = ctrl;
/* select the sparx5 VCAP model */
- ctrl->vcaps = sparx5_vcaps;
- ctrl->stats = &sparx5_vcap_stats;
+ ctrl->vcaps = consts->vcaps;
+ ctrl->stats = consts->vcap_stats;
/* Setup callbacks to allow the API to use the VCAP HW */
ctrl->ops = &sparx5_vcap_ops;
INIT_LIST_HEAD(&ctrl->list);
for (idx = 0; idx < ARRAY_SIZE(sparx5_vcap_inst_cfg); ++idx) {
- cfg = &sparx5_vcap_inst_cfg[idx];
+ cfg = &consts->vcaps_cfg[idx];
admin = sparx5_vcap_admin_alloc(sparx5, ctrl, cfg);
if (IS_ERR(admin)) {
err = PTR_ERR(admin);
@@ -2085,7 +2075,7 @@ int sparx5_vcap_init(struct sparx5 *sparx5)
list_add_tail(&admin->list, &ctrl->list);
}
dir = vcap_debugfs(sparx5->dev, sparx5->debugfs_root, ctrl);
- for (idx = 0; idx < SPX5_PORTS; ++idx)
+ for (idx = 0; idx < consts->n_ports; ++idx)
if (sparx5->ports[idx])
vcap_port_debugfs(sparx5->dev, dir, ctrl,
sparx5->ports[idx]->ndev);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h
index 2684d9199b05..d0a42406bf26 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h
@@ -16,6 +16,11 @@
#include "vcap_api.h"
#include "vcap_api_client.h"
+#define SPARX5_IS2_LOOKUPS 4
+#define SPARX5_IS0_LOOKUPS 6
+#define SPARX5_ES0_LOOKUPS 1
+#define SPARX5_ES2_LOOKUPS 2
+
#define SPARX5_VCAP_CID_IS0_L0 VCAP_CID_INGRESS_L0 /* IS0/CLM lookup 0 */
#define SPARX5_VCAP_CID_IS0_L1 VCAP_CID_INGRESS_L1 /* IS0/CLM lookup 1 */
#define SPARX5_VCAP_CID_IS0_L2 VCAP_CID_INGRESS_L2 /* IS0/CLM lookup 2 */
@@ -40,6 +45,22 @@
#define SPARX5_VCAP_CID_ES2_MAX \
(VCAP_CID_EGRESS_STAGE2_L1 + VCAP_CID_LOOKUP_SIZE - 1) /* ES2 Max */
+struct sparx5_vcap_inst {
+ enum vcap_type vtype; /* type of vcap */
+ int vinst; /* instance number within the same type */
+ int lookups; /* number of lookups in this vcap type */
+ int lookups_per_instance; /* number of lookups in this instance */
+ int first_cid; /* first chain id in this vcap */
+ int last_cid; /* last chain id in this vcap */
+ int count; /* number of available addresses, not in super vcap */
+ int map_id; /* id in the super vcap block mapping (if applicable) */
+ int blockno; /* starting block in super vcap (if applicable) */
+ int blocks; /* number of blocks in super vcap (if applicable) */
+ bool ingress; /* is vcap in the ingress path */
+};
+
+extern const struct sparx5_vcap_inst sparx5_vcap_inst_cfg[];
+
/* IS0 port keyset selection control */
/* IS0 ethernet, IPv4, IPv6 traffic type keyset generation */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
index d42097aa60a0..494782871903 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
@@ -167,16 +167,6 @@ void sparx5_update_fwd(struct sparx5 *sparx5)
/* Divide up fwd mask in 32 bit words */
bitmap_to_arr32(mask, sparx5->bridge_fwd_mask, SPX5_PORTS);
- /* Update flood masks */
- for (port = sparx5_get_pgid(sparx5, PGID_UC_FLOOD);
- port <= sparx5_get_pgid(sparx5, PGID_BCAST); port++) {
- spx5_wr(mask[0], sparx5, ANA_AC_PGID_CFG(port));
- if (is_sparx5(sparx5)) {
- spx5_wr(mask[1], sparx5, ANA_AC_PGID_CFG1(port));
- spx5_wr(mask[2], sparx5, ANA_AC_PGID_CFG2(port));
- }
- }
-
/* Update SRC masks */
for (port = 0; port < sparx5->data->consts->n_ports; port++) {
if (test_bit(port, sparx5->bridge_fwd_mask)) {
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
index 7251121ab196..16eb3de60eb6 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
@@ -366,12 +366,13 @@ static void vcap_api_iterator_init_test(struct kunit *test)
struct vcap_typegroup typegroups[] = {
{ .offset = 0, .width = 2, .value = 2, },
{ .offset = 156, .width = 1, .value = 0, },
- { .offset = 0, .width = 0, .value = 0, },
+ { }
};
struct vcap_typegroup typegroups2[] = {
{ .offset = 0, .width = 3, .value = 4, },
{ .offset = 49, .width = 2, .value = 0, },
{ .offset = 98, .width = 2, .value = 0, },
+ { }
};
vcap_iter_init(&iter, 52, typegroups, 86);
@@ -399,6 +400,7 @@ static void vcap_api_iterator_next_test(struct kunit *test)
{ .offset = 147, .width = 3, .value = 0, },
{ .offset = 196, .width = 2, .value = 0, },
{ .offset = 245, .width = 1, .value = 0, },
+ { }
};
int idx;
@@ -433,7 +435,7 @@ static void vcap_api_encode_typegroups_test(struct kunit *test)
{ .offset = 147, .width = 3, .value = 5, },
{ .offset = 196, .width = 2, .value = 2, },
{ .offset = 245, .width = 5, .value = 27, },
- { .offset = 0, .width = 0, .value = 0, },
+ { }
};
vcap_encode_typegroups(stream, 49, typegroups, false);
@@ -463,6 +465,7 @@ static void vcap_api_encode_bit_test(struct kunit *test)
{ .offset = 147, .width = 3, .value = 5, },
{ .offset = 196, .width = 2, .value = 2, },
{ .offset = 245, .width = 1, .value = 0, },
+ { }
};
vcap_iter_init(&iter, 49, typegroups, 44);
@@ -489,7 +492,7 @@ static void vcap_api_encode_field_test(struct kunit *test)
{ .offset = 147, .width = 3, .value = 5, },
{ .offset = 196, .width = 2, .value = 2, },
{ .offset = 245, .width = 5, .value = 27, },
- { .offset = 0, .width = 0, .value = 0, },
+ { }
};
struct vcap_field rf = {
.type = VCAP_FIELD_U32,
@@ -538,7 +541,7 @@ static void vcap_api_encode_short_field_test(struct kunit *test)
{ .offset = 0, .width = 3, .value = 7, },
{ .offset = 21, .width = 2, .value = 3, },
{ .offset = 42, .width = 1, .value = 1, },
- { .offset = 0, .width = 0, .value = 0, },
+ { }
};
struct vcap_field rf = {
.type = VCAP_FIELD_U32,
@@ -608,7 +611,7 @@ static void vcap_api_encode_keyfield_test(struct kunit *test)
struct vcap_typegroup tgt[] = {
{ .offset = 0, .width = 2, .value = 2, },
{ .offset = 156, .width = 1, .value = 1, },
- { .offset = 0, .width = 0, .value = 0, },
+ { }
};
vcap_test_api_init(&admin);
@@ -671,7 +674,7 @@ static void vcap_api_encode_max_keyfield_test(struct kunit *test)
struct vcap_typegroup tgt[] = {
{ .offset = 0, .width = 2, .value = 2, },
{ .offset = 156, .width = 1, .value = 1, },
- { .offset = 0, .width = 0, .value = 0, },
+ { }
};
u32 keyres[] = {
0x928e8a84,
@@ -732,7 +735,7 @@ static void vcap_api_encode_actionfield_test(struct kunit *test)
{ .offset = 0, .width = 2, .value = 2, },
{ .offset = 21, .width = 1, .value = 1, },
{ .offset = 42, .width = 1, .value = 0, },
- { .offset = 0, .width = 0, .value = 0, },
+ { }
};
vcap_encode_actionfield(&rule, &caf, &rf, tgt);
diff --git a/drivers/net/ethernet/microsoft/Kconfig b/drivers/net/ethernet/microsoft/Kconfig
index 901fbffbf718..3f36ee6a8ece 100644
--- a/drivers/net/ethernet/microsoft/Kconfig
+++ b/drivers/net/ethernet/microsoft/Kconfig
@@ -22,6 +22,7 @@ config MICROSOFT_MANA
depends on PCI_HYPERV
select AUXILIARY_BUS
select PAGE_POOL
+ select NET_SHAPER
help
This driver supports Microsoft Azure Network Adapter (MANA).
So far, the driver is only supported on X86_64.
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index e97af7ac2bb2..efb4e412ec7e 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -6,11 +6,29 @@
#include <linux/pci.h>
#include <linux/utsname.h>
#include <linux/version.h>
+#include <linux/msi.h>
+#include <linux/irqdomain.h>
+#include <linux/export.h>
#include <net/mana/mana.h>
+#include <net/mana/hw_channel.h>
struct dentry *mana_debugfs_root;
+struct mana_dev_recovery {
+ struct list_head list;
+ struct pci_dev *pdev;
+ enum gdma_eqe_type type;
+};
+
+static struct mana_dev_recovery_work {
+ struct list_head dev_list;
+ struct delayed_work work;
+
+ /* Lock for dev_list above */
+ spinlock_t lock;
+} mana_dev_recovery_work;
+
static u32 mana_gd_r32(struct gdma_context *g, u64 offset)
{
return readl(g->bar0_va + offset);
@@ -31,6 +49,9 @@ static void mana_gd_init_pf_regs(struct pci_dev *pdev)
gc->db_page_base = gc->bar0_va +
mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);
+ gc->phys_db_page_base = gc->bar0_pa +
+ mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);
+
sriov_base_off = mana_gd_r64(gc, GDMA_SRIOV_REG_CFG_BASE_OFF);
sriov_base_va = gc->bar0_va + sriov_base_off;
@@ -63,6 +84,24 @@ static void mana_gd_init_registers(struct pci_dev *pdev)
mana_gd_init_vf_regs(pdev);
}
+/* Suppress logging when we set timeout to zero */
+bool mana_need_log(struct gdma_context *gc, int err)
+{
+ struct hw_channel_context *hwc;
+
+ if (err != -ETIMEDOUT)
+ return true;
+
+ if (!gc)
+ return true;
+
+ hwc = gc->hwc.driver_data;
+ if (hwc && hwc->hwc_timeout == 0)
+ return false;
+
+ return true;
+}
+
static int mana_gd_query_max_resources(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
@@ -80,8 +119,15 @@ static int mana_gd_query_max_resources(struct pci_dev *pdev)
return err ? err : -EPROTO;
}
- if (gc->num_msix_usable > resp.max_msix)
- gc->num_msix_usable = resp.max_msix;
+ if (!pci_msix_can_alloc_dyn(pdev)) {
+ if (gc->num_msix_usable > resp.max_msix)
+ gc->num_msix_usable = resp.max_msix;
+ } else {
+ /* If dynamic allocation is enabled we have already allocated
+ * hwc msi
+ */
+ gc->num_msix_usable = min(resp.max_msix, num_online_cpus() + 1);
+ }
if (gc->num_msix_usable <= 1)
return -ENOSPC;
@@ -134,9 +180,10 @@ static int mana_gd_detect_devices(struct pci_dev *pdev)
struct gdma_list_devices_resp resp = {};
struct gdma_general_req req = {};
struct gdma_dev_id dev;
- u32 i, max_num_devs;
+ int found_dev = 0;
u16 dev_type;
int err;
+ u32 i;
mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req),
sizeof(resp));
@@ -148,12 +195,17 @@ static int mana_gd_detect_devices(struct pci_dev *pdev)
return err ? err : -EPROTO;
}
- max_num_devs = min_t(u32, MAX_NUM_GDMA_DEVICES, resp.num_of_devs);
-
- for (i = 0; i < max_num_devs; i++) {
+ for (i = 0; i < GDMA_DEV_LIST_SIZE &&
+ found_dev < resp.num_of_devs; i++) {
dev = resp.devs[i];
dev_type = dev.type;
+ /* Skip empty devices */
+ if (dev.as_uint32 == 0)
+ continue;
+
+ found_dev++;
+
/* HWC is already detected in mana_hwc_create_channel(). */
if (dev_type == GDMA_DEVICE_HWC)
continue;
@@ -177,7 +229,7 @@ int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
return mana_hwc_send_request(hwc, req_len, req, resp_len, resp);
}
-EXPORT_SYMBOL_NS(mana_gd_send_request, NET_MANA);
+EXPORT_SYMBOL_NS(mana_gd_send_request, "NET_MANA");
int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
struct gdma_mem_info *gmi)
@@ -260,8 +312,9 @@ static int mana_gd_disable_queue(struct gdma_queue *queue)
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) {
- dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err,
- resp.hdr.status);
+ if (mana_need_log(gc, err))
+ dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err,
+ resp.hdr.status);
return err ? err : -EPROTO;
}
@@ -331,6 +384,7 @@ void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue)
mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type,
queue->id, queue->head * GDMA_WQE_BU_SIZE, 0);
}
+EXPORT_SYMBOL_NS(mana_gd_wq_ring_doorbell, "NET_MANA");
void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
{
@@ -343,12 +397,173 @@ void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq->id,
head, arm_bit);
}
+EXPORT_SYMBOL_NS(mana_gd_ring_cq, "NET_MANA");
+
+#define MANA_SERVICE_PERIOD 10
+
+static void mana_serv_rescan(struct pci_dev *pdev)
+{
+ struct pci_bus *parent;
+
+ pci_lock_rescan_remove();
+
+ parent = pdev->bus;
+ if (!parent) {
+ dev_err(&pdev->dev, "MANA service: no parent bus\n");
+ goto out;
+ }
+
+ pci_stop_and_remove_bus_device(pdev);
+ pci_rescan_bus(parent);
+
+out:
+ pci_unlock_rescan_remove();
+}
+
+static void mana_serv_fpga(struct pci_dev *pdev)
+{
+ struct pci_bus *bus, *parent;
+
+ pci_lock_rescan_remove();
+
+ bus = pdev->bus;
+ if (!bus) {
+ dev_err(&pdev->dev, "MANA service: no bus\n");
+ goto out;
+ }
+
+ parent = bus->parent;
+ if (!parent) {
+ dev_err(&pdev->dev, "MANA service: no parent bus\n");
+ goto out;
+ }
+
+ pci_stop_and_remove_bus_device(bus->self);
+
+ msleep(MANA_SERVICE_PERIOD * 1000);
+
+ pci_rescan_bus(parent);
+
+out:
+ pci_unlock_rescan_remove();
+}
+
+static void mana_serv_reset(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ struct hw_channel_context *hwc;
+ int ret;
+
+ if (!gc) {
+ /* Perform PCI rescan on device if GC is not set up */
+ dev_err(&pdev->dev, "MANA service: GC not setup, rescanning\n");
+ mana_serv_rescan(pdev);
+ return;
+ }
+
+ hwc = gc->hwc.driver_data;
+ if (!hwc) {
+ dev_err(&pdev->dev, "MANA service: no HWC\n");
+ goto out;
+ }
+
+ /* HWC is not responding in this case, so don't wait */
+ hwc->hwc_timeout = 0;
+
+ dev_info(&pdev->dev, "MANA reset cycle start\n");
+
+ mana_gd_suspend(pdev, PMSG_SUSPEND);
+
+ msleep(MANA_SERVICE_PERIOD * 1000);
+
+ ret = mana_gd_resume(pdev);
+ if (ret == -ETIMEDOUT || ret == -EPROTO) {
+ /* Perform PCI rescan on device if we failed on HWC */
+ dev_err(&pdev->dev, "MANA service: resume failed, rescanning\n");
+ mana_serv_rescan(pdev);
+ goto out;
+ }
+
+ if (ret)
+ dev_info(&pdev->dev, "MANA reset cycle failed err %d\n", ret);
+ else
+ dev_info(&pdev->dev, "MANA reset cycle completed\n");
+
+out:
+ gc->in_service = false;
+}
+
+struct mana_serv_work {
+ struct work_struct serv_work;
+ struct pci_dev *pdev;
+ enum gdma_eqe_type type;
+};
+
+static void mana_do_service(enum gdma_eqe_type type, struct pci_dev *pdev)
+{
+ switch (type) {
+ case GDMA_EQE_HWC_FPGA_RECONFIG:
+ mana_serv_fpga(pdev);
+ break;
+
+ case GDMA_EQE_HWC_RESET_REQUEST:
+ mana_serv_reset(pdev);
+ break;
+
+ default:
+ dev_err(&pdev->dev, "MANA service: unknown type %d\n", type);
+ break;
+ }
+}
+
+static void mana_recovery_delayed_func(struct work_struct *w)
+{
+ struct mana_dev_recovery_work *work;
+ struct mana_dev_recovery *dev;
+ unsigned long flags;
+
+ work = container_of(w, struct mana_dev_recovery_work, work.work);
+
+ spin_lock_irqsave(&work->lock, flags);
+
+ while (!list_empty(&work->dev_list)) {
+ dev = list_first_entry(&work->dev_list,
+ struct mana_dev_recovery, list);
+ list_del(&dev->list);
+ spin_unlock_irqrestore(&work->lock, flags);
+
+ mana_do_service(dev->type, dev->pdev);
+ pci_dev_put(dev->pdev);
+ kfree(dev);
+
+ spin_lock_irqsave(&work->lock, flags);
+ }
+
+ spin_unlock_irqrestore(&work->lock, flags);
+}
+
+static void mana_serv_func(struct work_struct *w)
+{
+ struct mana_serv_work *mns_wk;
+ struct pci_dev *pdev;
+
+ mns_wk = container_of(w, struct mana_serv_work, serv_work);
+ pdev = mns_wk->pdev;
+
+ if (pdev)
+ mana_do_service(mns_wk->type, pdev);
+
+ pci_dev_put(pdev);
+ kfree(mns_wk);
+ module_put(THIS_MODULE);
+}
static void mana_gd_process_eqe(struct gdma_queue *eq)
{
u32 head = eq->head % (eq->queue_size / GDMA_EQE_SIZE);
struct gdma_context *gc = eq->gdma_dev->gdma_context;
struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr;
+ struct mana_serv_work *mns_wk;
union gdma_eqe_info eqe_info;
enum gdma_eqe_type type;
struct gdma_event event;
@@ -383,7 +598,9 @@ static void mana_gd_process_eqe(struct gdma_queue *eq)
case GDMA_EQE_HWC_INIT_EQ_ID_DB:
case GDMA_EQE_HWC_INIT_DATA:
case GDMA_EQE_HWC_INIT_DONE:
+ case GDMA_EQE_HWC_SOC_SERVICE:
case GDMA_EQE_RNIC_QP_FATAL:
+ case GDMA_EQE_HWC_SOC_RECONFIG_DATA:
if (!eq->eq.callback)
break;
@@ -392,6 +609,46 @@ static void mana_gd_process_eqe(struct gdma_queue *eq)
eq->eq.callback(eq->eq.context, eq, &event);
break;
+ case GDMA_EQE_HWC_FPGA_RECONFIG:
+ case GDMA_EQE_HWC_RESET_REQUEST:
+ dev_info(gc->dev, "Recv MANA service type:%d\n", type);
+
+ if (!test_and_set_bit(GC_PROBE_SUCCEEDED, &gc->flags)) {
+ /*
+ * Device is in probe and we received a hardware reset
+ * event, the probe function will detect that the flag
+ * has changed and perform service procedure.
+ */
+ dev_info(gc->dev,
+ "Service is to be processed in probe\n");
+ break;
+ }
+
+ if (gc->in_service) {
+ dev_info(gc->dev, "Already in service\n");
+ break;
+ }
+
+ if (!try_module_get(THIS_MODULE)) {
+ dev_info(gc->dev, "Module is unloading\n");
+ break;
+ }
+
+ mns_wk = kzalloc(sizeof(*mns_wk), GFP_ATOMIC);
+ if (!mns_wk) {
+ module_put(THIS_MODULE);
+ break;
+ }
+
+ dev_info(gc->dev, "Start MANA service type:%d\n", type);
+ gc->in_service = true;
+ mns_wk->pdev = to_pci_dev(gc->dev);
+ mns_wk->type = type;
+ pci_dev_get(mns_wk->pdev);
+ INIT_WORK(&mns_wk->serv_work, mana_serv_func);
+ schedule_work(&mns_wk->serv_work);
+ break;
+
default:
break;
}
@@ -474,7 +731,9 @@ static int mana_gd_register_irq(struct gdma_queue *queue,
}
queue->eq.msix_index = msi_index;
- gic = &gc->irq_contexts[msi_index];
+ gic = xa_load(&gc->irq_contexts, msi_index);
+ if (WARN_ON(!gic))
+ return -EINVAL;
spin_lock_irqsave(&gic->lock, flags);
list_add_rcu(&queue->entry, &gic->eq_list);
@@ -483,7 +742,7 @@ static int mana_gd_register_irq(struct gdma_queue *queue,
return 0;
}
-static void mana_gd_deregiser_irq(struct gdma_queue *queue)
+static void mana_gd_deregister_irq(struct gdma_queue *queue)
{
struct gdma_dev *gd = queue->gdma_dev;
struct gdma_irq_context *gic;
@@ -499,7 +758,10 @@ static void mana_gd_deregiser_irq(struct gdma_queue *queue)
if (WARN_ON(msix_index >= gc->num_msix_usable))
return;
- gic = &gc->irq_contexts[msix_index];
+ gic = xa_load(&gc->irq_contexts, msix_index);
+ if (WARN_ON(!gic))
+ return;
+
spin_lock_irqsave(&gic->lock, flags);
list_for_each_entry_rcu(eq, &gic->eq_list, entry) {
if (queue == eq) {
@@ -533,7 +795,8 @@ int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err) {
- dev_err(dev, "test_eq failed: %d\n", err);
+ if (mana_need_log(gc, err))
+ dev_err(dev, "test_eq failed: %d\n", err);
goto out;
}
@@ -568,11 +831,11 @@ static void mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets,
if (flush_evenets) {
err = mana_gd_test_eq(gc, queue);
- if (err)
+ if (err && mana_need_log(gc, err))
dev_warn(gc->dev, "Failed to flush EQ: %d\n", err);
}
- mana_gd_deregiser_irq(queue);
+ mana_gd_deregister_irq(queue);
if (queue->eq.disable_needed)
mana_gd_disable_queue(queue);
@@ -666,8 +929,11 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd,
gmi = &queue->mem_info;
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "GDMA queue type: %d, size: %u, gdma memory allocation err: %d\n",
+ spec->type, spec->queue_size, err);
goto free_q;
+ }
queue->head = 0;
queue->tail = 0;
@@ -688,6 +954,8 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd,
*queue_ptr = queue;
return 0;
out:
+ dev_err(gc->dev, "Failed to create queue type %d of size %u, err: %d\n",
+ spec->type, spec->queue_size, err);
mana_gd_free_memory(gmi);
free_q:
kfree(queue);
@@ -709,14 +977,15 @@ int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle)
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) {
- dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
- err, resp.hdr.status);
+ if (mana_need_log(gc, err))
+ dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
+ err, resp.hdr.status);
return -EPROTO;
}
return 0;
}
-EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, NET_MANA);
+EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, "NET_MANA");
static int mana_gd_create_dma_region(struct gdma_dev *gd,
struct gdma_mem_info *gmi)
@@ -770,7 +1039,13 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
}
gmi->dma_region_handle = resp.dma_region_handle;
+ dev_dbg(gc->dev, "Created DMA region handle 0x%llx\n",
+ gmi->dma_region_handle);
out:
+ if (err)
+ dev_dbg(gc->dev,
+ "Failed to create DMA region of length: %u, page_type: %d, status: 0x%x, err: %d\n",
+ length, req->gdma_page_type, resp.hdr.status, err);
kfree(req);
return err;
}
@@ -793,8 +1068,11 @@ int mana_gd_create_mana_eq(struct gdma_dev *gd,
gmi = &queue->mem_info;
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "GDMA queue type: %d, size: %u, gdma memory allocation err: %d\n",
+ spec->type, spec->queue_size, err);
goto free_q;
+ }
err = mana_gd_create_dma_region(gd, gmi);
if (err)
@@ -815,12 +1093,14 @@ int mana_gd_create_mana_eq(struct gdma_dev *gd,
*queue_ptr = queue;
return 0;
out:
+ dev_err(gc->dev, "Failed to create queue type %d of size: %u, err: %d\n",
+ spec->type, spec->queue_size, err);
mana_gd_free_memory(gmi);
free_q:
kfree(queue);
return err;
}
-EXPORT_SYMBOL_NS(mana_gd_create_mana_eq, NET_MANA);
+EXPORT_SYMBOL_NS(mana_gd_create_mana_eq, "NET_MANA");
int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
const struct gdma_queue_spec *spec,
@@ -841,8 +1121,11 @@ int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
gmi = &queue->mem_info;
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "GDMA queue type: %d, size: %u, memory allocation err: %d\n",
+ spec->type, spec->queue_size, err);
goto free_q;
+ }
err = mana_gd_create_dma_region(gd, gmi);
if (err)
@@ -862,11 +1145,14 @@ int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
*queue_ptr = queue;
return 0;
out:
+ dev_err(gc->dev, "Failed to create queue type %d of size: %u, err: %d\n",
+ spec->type, spec->queue_size, err);
mana_gd_free_memory(gmi);
free_q:
kfree(queue);
return err;
}
+EXPORT_SYMBOL_NS(mana_gd_create_mana_wq_cq, "NET_MANA");
void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
{
@@ -897,7 +1183,7 @@ void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
mana_gd_free_memory(gmi);
kfree(queue);
}
-EXPORT_SYMBOL_NS(mana_gd_destroy_queue, NET_MANA);
+EXPORT_SYMBOL_NS(mana_gd_destroy_queue, "NET_MANA");
int mana_gd_verify_vf_version(struct pci_dev *pdev)
{
@@ -934,6 +1220,7 @@ int mana_gd_verify_vf_version(struct pci_dev *pdev)
err, resp.hdr.status);
return err ? err : -EPROTO;
}
+ gc->pf_cap_flags1 = resp.pf_cap_flags1;
if (resp.pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG) {
err = mana_gd_query_hwc_timeout(pdev, &hwc->hwc_timeout);
if (err) {
@@ -974,7 +1261,6 @@ int mana_gd_register_device(struct gdma_dev *gd)
return 0;
}
-EXPORT_SYMBOL_NS(mana_gd_register_device, NET_MANA);
int mana_gd_deregister_device(struct gdma_dev *gd)
{
@@ -993,8 +1279,9 @@ int mana_gd_deregister_device(struct gdma_dev *gd)
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) {
- dev_err(gc->dev, "Failed to deregister device: %d, 0x%x\n",
- err, resp.hdr.status);
+ if (mana_need_log(gc, err))
+ dev_err(gc->dev, "Failed to deregister device: %d, 0x%x\n",
+ err, resp.hdr.status);
if (!err)
err = -EPROTO;
}
@@ -1005,7 +1292,6 @@ int mana_gd_deregister_device(struct gdma_dev *gd)
return err;
}
-EXPORT_SYMBOL_NS(mana_gd_deregister_device, NET_MANA);
u32 mana_gd_wq_avail_space(struct gdma_queue *wq)
{
@@ -1041,7 +1327,7 @@ static u32 mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req,
header->inline_oob_size_div4 = client_oob_size / sizeof(u32);
if (oob_in_sgl) {
- WARN_ON_ONCE(!pad_data || wqe_req->num_sge < 2);
+ WARN_ON_ONCE(wqe_req->num_sge < 2);
header->client_oob_in_sgl = 1;
@@ -1097,7 +1383,6 @@ int mana_gd_post_work_request(struct gdma_queue *wq,
struct gdma_posted_wqe_info *wqe_info)
{
u32 client_oob_size = wqe_req->inline_oob_size;
- struct gdma_context *gc;
u32 sgl_data_size;
u32 max_wqe_size;
u32 wqe_size;
@@ -1127,11 +1412,8 @@ int mana_gd_post_work_request(struct gdma_queue *wq,
if (wqe_size > max_wqe_size)
return -EINVAL;
- if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) {
- gc = wq->gdma_dev->gdma_context;
- dev_err(gc->dev, "unsuccessful flow control!\n");
+ if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq))
return -ENOSPC;
- }
if (wqe_info)
wqe_info->wqe_size_in_bu = wqe_size / GDMA_WQE_BU_SIZE;
@@ -1148,6 +1430,7 @@ int mana_gd_post_work_request(struct gdma_queue *wq,
return 0;
}
+EXPORT_SYMBOL_NS(mana_gd_post_work_request, "NET_MANA");
int mana_gd_post_and_ring(struct gdma_queue *queue,
const struct gdma_wqe_request *wqe_req,
@@ -1157,8 +1440,11 @@ int mana_gd_post_and_ring(struct gdma_queue *queue,
int err;
err = mana_gd_post_work_request(queue, wqe_req, wqe_info);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "Failed to post work req from queue type %d of size %u (err=%d)\n",
+ queue->type, queue->queue_size, err);
return err;
+ }
mana_gd_wq_ring_doorbell(gc, queue);
@@ -1218,6 +1504,7 @@ int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe)
return cqe_idx;
}
+EXPORT_SYMBOL_NS(mana_gd_poll_cq, "NET_MANA");
static irqreturn_t mana_gd_intr(int irq, void *arg)
{
@@ -1253,7 +1540,49 @@ void mana_gd_free_res_map(struct gdma_resource *r)
r->size = 0;
}
-static int irq_setup(unsigned int *irqs, unsigned int len, int node)
+/*
+ * Spread on CPUs with the following heuristics:
+ *
+ * 1. No more than one IRQ per CPU, if possible;
+ * 2. NUMA locality is the second priority;
+ * 3. Sibling dislocality is the last priority.
+ *
+ * Let's consider this topology:
+ *
+ * Node 0 1
+ * Core 0 1 2 3
+ * CPU 0 1 2 3 4 5 6 7
+ *
+ * The most performant IRQ distribution based on the above topology
+ * and heuristics may look like this:
+ *
+ * IRQ Nodes Cores CPUs
+ * 0 1 0 0-1
+ * 1 1 1 2-3
+ * 2 1 0 0-1
+ * 3 1 1 2-3
+ * 4 2 2 4-5
+ * 5 2 3 6-7
+ * 6 2 2 4-5
+ * 7 2 3 6-7
+ *
+ * The heuristics is implemented as follows.
+ *
+ * The outer for_each() loop resets the 'weight' to the actual number
+ * of CPUs in the hop. Then inner for_each() loop decrements it by the
+ * number of sibling groups (cores) while assigning first set of IRQs
+ * to each group. IRQs 0 and 1 above are distributed this way.
+ *
+ * Now, because NUMA locality is more important, we should walk the
+ * same set of siblings and assign 2nd set of IRQs (2 and 3), and it's
+ * implemented by the medium while() loop. We do like this unless the
+ * number of IRQs assigned on this hop will not become equal to number
+ * of CPUs in the hop (weight == 0). Then we switch to the next hop and
+ * do the same thing.
+ */
+
+static int irq_setup(unsigned int *irqs, unsigned int len, int node,
+ bool skip_first_cpu)
{
const struct cpumask *next, *prev = cpu_none_mask;
cpumask_var_t cpus __free(free_cpumask_var);
@@ -1268,11 +1597,18 @@ static int irq_setup(unsigned int *irqs, unsigned int len, int node)
while (weight > 0) {
cpumask_andnot(cpus, next, prev);
for_each_cpu(cpu, cpus) {
+ cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
+ --weight;
+
+ if (unlikely(skip_first_cpu)) {
+ skip_first_cpu = false;
+ continue;
+ }
+
if (len-- == 0)
goto done;
+
irq_set_affinity_and_hint(*irqs++, topology_sibling_cpumask(cpu));
- cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
- --weight;
}
}
prev = next;
@@ -1282,47 +1618,108 @@ done:
return 0;
}
-static int mana_gd_setup_irqs(struct pci_dev *pdev)
+static int mana_gd_setup_dyn_irqs(struct pci_dev *pdev, int nvec)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
- unsigned int max_queues_per_port;
struct gdma_irq_context *gic;
- unsigned int max_irqs, cpu;
- int start_irq_index = 1;
- int nvec, *irqs, irq;
- int err, i = 0, j;
+ bool skip_first_cpu = false;
+ int *irqs, irq, err, i;
- cpus_read_lock();
- max_queues_per_port = num_online_cpus();
- if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
- max_queues_per_port = MANA_MAX_NUM_QUEUES;
+ irqs = kmalloc_array(nvec, sizeof(int), GFP_KERNEL);
+ if (!irqs)
+ return -ENOMEM;
+
+ /*
+ * While processing the next pci irq vector, we start with index 1,
+ * as IRQ vector at index 0 is already processed for HWC.
+ * However, the population of irqs array starts with index 0, to be
+ * further used in irq_setup()
+ */
+ for (i = 1; i <= nvec; i++) {
+ gic = kzalloc(sizeof(*gic), GFP_KERNEL);
+ if (!gic) {
+ err = -ENOMEM;
+ goto free_irq;
+ }
+ gic->handler = mana_gd_process_eq_events;
+ INIT_LIST_HEAD(&gic->eq_list);
+ spin_lock_init(&gic->lock);
- /* Need 1 interrupt for the Hardware communication Channel (HWC) */
- max_irqs = max_queues_per_port + 1;
+ snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s",
+ i - 1, pci_name(pdev));
- nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX);
- if (nvec < 0) {
- cpus_read_unlock();
- return nvec;
+ /* one pci vector is already allocated for HWC */
+ irqs[i - 1] = pci_irq_vector(pdev, i);
+ if (irqs[i - 1] < 0) {
+ err = irqs[i - 1];
+ goto free_current_gic;
+ }
+
+ err = request_irq(irqs[i - 1], mana_gd_intr, 0, gic->name, gic);
+ if (err)
+ goto free_current_gic;
+
+ xa_store(&gc->irq_contexts, i, gic, GFP_KERNEL);
}
- if (nvec <= num_online_cpus())
- start_irq_index = 0;
- irqs = kmalloc_array((nvec - start_irq_index), sizeof(int), GFP_KERNEL);
- if (!irqs) {
- err = -ENOMEM;
- goto free_irq_vector;
+ /*
+ * When calling irq_setup() for dynamically added IRQs, if number of
+ * CPUs is more than or equal to allocated MSI-X, we need to skip the
+ * first CPU sibling group since they are already affinitized to HWC IRQ
+ */
+ cpus_read_lock();
+ if (gc->num_msix_usable <= num_online_cpus())
+ skip_first_cpu = true;
+
+ err = irq_setup(irqs, nvec, gc->numa_node, skip_first_cpu);
+ if (err) {
+ cpus_read_unlock();
+ goto free_irq;
}
- gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context),
- GFP_KERNEL);
- if (!gc->irq_contexts) {
- err = -ENOMEM;
- goto free_irq_vector;
+ cpus_read_unlock();
+ kfree(irqs);
+ return 0;
+
+free_current_gic:
+ kfree(gic);
+free_irq:
+ for (i -= 1; i > 0; i--) {
+ irq = pci_irq_vector(pdev, i);
+ gic = xa_load(&gc->irq_contexts, i);
+ if (WARN_ON(!gic))
+ continue;
+
+ irq_update_affinity_hint(irq, NULL);
+ free_irq(irq, gic);
+ xa_erase(&gc->irq_contexts, i);
+ kfree(gic);
}
+ kfree(irqs);
+ return err;
+}
+
+static int mana_gd_setup_irqs(struct pci_dev *pdev, int nvec)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ struct gdma_irq_context *gic;
+ int *irqs, *start_irqs, irq;
+ unsigned int cpu;
+ int err, i;
+
+ irqs = kmalloc_array(nvec, sizeof(int), GFP_KERNEL);
+ if (!irqs)
+ return -ENOMEM;
+
+ start_irqs = irqs;
for (i = 0; i < nvec; i++) {
- gic = &gc->irq_contexts[i];
+ gic = kzalloc(sizeof(*gic), GFP_KERNEL);
+ if (!gic) {
+ err = -ENOMEM;
+ goto free_irq;
+ }
+
gic->handler = mana_gd_process_eq_events;
INIT_LIST_HEAD(&gic->eq_list);
spin_lock_init(&gic->lock);
@@ -1334,67 +1731,128 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s",
i - 1, pci_name(pdev));
- irq = pci_irq_vector(pdev, i);
- if (irq < 0) {
- err = irq;
- goto free_irq;
+ irqs[i] = pci_irq_vector(pdev, i);
+ if (irqs[i] < 0) {
+ err = irqs[i];
+ goto free_current_gic;
}
- if (!i) {
- err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
- if (err)
- goto free_irq;
-
- /* If number of IRQ is one extra than number of online CPUs,
- * then we need to assign IRQ0 (hwc irq) and IRQ1 to
- * same CPU.
- * Else we will use different CPUs for IRQ0 and IRQ1.
- * Also we are using cpumask_local_spread instead of
- * cpumask_first for the node, because the node can be
- * mem only.
- */
- if (start_irq_index) {
- cpu = cpumask_local_spread(i, gc->numa_node);
- irq_set_affinity_and_hint(irq, cpumask_of(cpu));
- } else {
- irqs[start_irq_index] = irq;
- }
- } else {
- irqs[i - start_irq_index] = irq;
- err = request_irq(irqs[i - start_irq_index], mana_gd_intr, 0,
- gic->name, gic);
- if (err)
- goto free_irq;
- }
+ err = request_irq(irqs[i], mana_gd_intr, 0, gic->name, gic);
+ if (err)
+ goto free_current_gic;
+
+ xa_store(&gc->irq_contexts, i, gic, GFP_KERNEL);
}
- err = irq_setup(irqs, (nvec - start_irq_index), gc->numa_node);
- if (err)
+ /* If number of IRQ is one extra than number of online CPUs,
+ * then we need to assign IRQ0 (hwc irq) and IRQ1 to
+ * same CPU.
+ * Else we will use different CPUs for IRQ0 and IRQ1.
+ * Also we are using cpumask_local_spread instead of
+ * cpumask_first for the node, because the node can be
+ * mem only.
+ */
+ cpus_read_lock();
+ if (nvec > num_online_cpus()) {
+ cpu = cpumask_local_spread(0, gc->numa_node);
+ irq_set_affinity_and_hint(irqs[0], cpumask_of(cpu));
+ irqs++;
+ nvec -= 1;
+ }
+
+ err = irq_setup(irqs, nvec, gc->numa_node, false);
+ if (err) {
+ cpus_read_unlock();
goto free_irq;
+ }
- gc->max_num_msix = nvec;
- gc->num_msix_usable = nvec;
cpus_read_unlock();
+ kfree(start_irqs);
return 0;
+free_current_gic:
+ kfree(gic);
free_irq:
- for (j = i - 1; j >= 0; j--) {
- irq = pci_irq_vector(pdev, j);
- gic = &gc->irq_contexts[j];
+ for (i -= 1; i >= 0; i--) {
+ irq = pci_irq_vector(pdev, i);
+ gic = xa_load(&gc->irq_contexts, i);
+ if (WARN_ON(!gic))
+ continue;
irq_update_affinity_hint(irq, NULL);
free_irq(irq, gic);
+ xa_erase(&gc->irq_contexts, i);
+ kfree(gic);
}
- kfree(gc->irq_contexts);
- kfree(irqs);
- gc->irq_contexts = NULL;
-free_irq_vector:
- cpus_read_unlock();
- pci_free_irq_vectors(pdev);
+ kfree(start_irqs);
return err;
}
+static int mana_gd_setup_hwc_irqs(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ unsigned int max_irqs, min_irqs;
+ int nvec, err;
+
+ if (pci_msix_can_alloc_dyn(pdev)) {
+ max_irqs = 1;
+ min_irqs = 1;
+ } else {
+ /* Need 1 interrupt for HWC */
+ max_irqs = min(num_online_cpus(), MANA_MAX_NUM_QUEUES) + 1;
+ min_irqs = 2;
+ }
+
+ nvec = pci_alloc_irq_vectors(pdev, min_irqs, max_irqs, PCI_IRQ_MSIX);
+ if (nvec < 0)
+ return nvec;
+
+ err = mana_gd_setup_irqs(pdev, nvec);
+ if (err) {
+ pci_free_irq_vectors(pdev);
+ return err;
+ }
+
+ gc->num_msix_usable = nvec;
+ gc->max_num_msix = nvec;
+
+ return 0;
+}
+
+static int mana_gd_setup_remaining_irqs(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ struct msi_map irq_map;
+ int max_irqs, i, err;
+
+ if (!pci_msix_can_alloc_dyn(pdev))
+ /* remain irqs are already allocated with HWC IRQ */
+ return 0;
+
+ /* allocate only remaining IRQs*/
+ max_irqs = gc->num_msix_usable - 1;
+
+ for (i = 1; i <= max_irqs; i++) {
+ irq_map = pci_msix_alloc_irq_at(pdev, i, NULL);
+ if (!irq_map.virq) {
+ err = irq_map.index;
+ /* caller will handle cleaning up all allocated
+ * irqs, after HWC is destroyed
+ */
+ return err;
+ }
+ }
+
+ err = mana_gd_setup_dyn_irqs(pdev, max_irqs);
+ if (err)
+ return err;
+
+ gc->max_num_msix = gc->max_num_msix + max_irqs;
+
+ return 0;
+}
+
static void mana_gd_remove_irqs(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
@@ -1409,19 +1867,21 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev)
if (irq < 0)
continue;
- gic = &gc->irq_contexts[i];
+ gic = xa_load(&gc->irq_contexts, i);
+ if (WARN_ON(!gic))
+ continue;
/* Need to clear the hint before free_irq */
irq_update_affinity_hint(irq, NULL);
free_irq(irq, gic);
+ xa_erase(&gc->irq_contexts, i);
+ kfree(gic);
}
pci_free_irq_vectors(pdev);
gc->max_num_msix = 0;
gc->num_msix_usable = 0;
- kfree(gc->irq_contexts);
- gc->irq_contexts = NULL;
}
static int mana_gd_setup(struct pci_dev *pdev)
@@ -1432,9 +1892,16 @@ static int mana_gd_setup(struct pci_dev *pdev)
mana_gd_init_registers(pdev);
mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
- err = mana_gd_setup_irqs(pdev);
- if (err)
- return err;
+ gc->service_wq = alloc_ordered_workqueue("gdma_service_wq", 0);
+ if (!gc->service_wq)
+ return -ENOMEM;
+
+ err = mana_gd_setup_hwc_irqs(pdev);
+ if (err) {
+ dev_err(gc->dev, "Failed to setup IRQs for HWC creation: %d\n",
+ err);
+ goto free_workqueue;
+ }
err = mana_hwc_create_channel(gc);
if (err)
@@ -1448,16 +1915,26 @@ static int mana_gd_setup(struct pci_dev *pdev)
if (err)
goto destroy_hwc;
+ err = mana_gd_setup_remaining_irqs(pdev);
+ if (err) {
+ dev_err(gc->dev, "Failed to setup remaining IRQs: %d", err);
+ goto destroy_hwc;
+ }
+
err = mana_gd_detect_devices(pdev);
if (err)
goto destroy_hwc;
+ dev_dbg(&pdev->dev, "mana gdma setup successful\n");
return 0;
destroy_hwc:
mana_hwc_destroy_channel(gc);
remove_irq:
mana_gd_remove_irqs(pdev);
+free_workqueue:
+ destroy_workqueue(gc->service_wq);
+ dev_err(&pdev->dev, "%s failed (error %d)\n", __func__, err);
return err;
}
@@ -1468,6 +1945,9 @@ static void mana_gd_cleanup(struct pci_dev *pdev)
mana_hwc_destroy_channel(gc);
mana_gd_remove_irqs(pdev);
+
+ destroy_workqueue(gc->service_wq);
+ dev_dbg(&pdev->dev, "mana gdma cleanup successful\n");
}
static bool mana_is_pf(unsigned short dev_id)
@@ -1486,8 +1966,10 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
BUILD_BUG_ON(2 * MAX_PORTS_IN_MANA_DEV * GDMA_EQE_SIZE > EQ_SIZE);
err = pci_enable_device(pdev);
- if (err)
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable pci device (err=%d)\n", err);
return -ENXIO;
+ }
pci_set_master(pdev);
@@ -1496,9 +1978,10 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto disable_dev;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (err)
+ if (err) {
+ dev_err(&pdev->dev, "DMA set mask failed: %d\n", err);
goto release_region;
-
+ }
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
err = -ENOMEM;
@@ -1518,6 +2001,7 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
gc->is_pf = mana_is_pf(pdev->device);
gc->bar0_va = bar0_va;
gc->dev = &pdev->dev;
+ xa_init(&gc->irq_contexts);
if (gc->is_pf)
gc->mana_pci_debugfs = debugfs_create_dir("0", mana_debugfs_root);
@@ -1533,8 +2017,25 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto cleanup_gd;
+ err = mana_rdma_probe(&gc->mana_ib);
+ if (err)
+ goto cleanup_mana;
+
+ /*
+ * If a hardware reset event has occurred over HWC during probe,
+ * rollback and perform hardware reset procedure.
+ */
+ if (test_and_set_bit(GC_PROBE_SUCCEEDED, &gc->flags)) {
+ err = -EPROTO;
+ goto cleanup_mana_rdma;
+ }
+
return 0;
+cleanup_mana_rdma:
+ mana_rdma_remove(&gc->mana_ib);
+cleanup_mana:
+ mana_remove(&gc->mana, false);
cleanup_gd:
mana_gd_cleanup(pdev);
unmap_bar:
@@ -1545,6 +2046,8 @@ unmap_bar:
* adapter-MTU file and apc->mana_pci_debugfs folder.
*/
debugfs_remove_recursive(gc->mana_pci_debugfs);
+ gc->mana_pci_debugfs = NULL;
+ xa_destroy(&gc->irq_contexts);
pci_iounmap(pdev, bar0_va);
free_gc:
pci_set_drvdata(pdev, NULL);
@@ -1554,6 +2057,35 @@ release_region:
disable_dev:
pci_disable_device(pdev);
dev_err(&pdev->dev, "gdma probe failed: err = %d\n", err);
+
+ /*
+ * Hardware could be in recovery mode and the HWC returns TIMEDOUT or
+ * EPROTO from mana_gd_setup(), mana_probe() or mana_rdma_probe(), or
+ * we received a hardware reset event over HWC interrupt. In this case,
+ * perform the device recovery procedure after MANA_SERVICE_PERIOD
+ * seconds.
+ */
+ if (err == -ETIMEDOUT || err == -EPROTO) {
+ struct mana_dev_recovery *dev;
+ unsigned long flags;
+
+ dev_info(&pdev->dev, "Start MANA recovery mode\n");
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return err;
+
+ dev->pdev = pci_dev_get(pdev);
+ dev->type = GDMA_EQE_HWC_RESET_REQUEST;
+
+ spin_lock_irqsave(&mana_dev_recovery_work.lock, flags);
+ list_add_tail(&dev->list, &mana_dev_recovery_work.dev_list);
+ spin_unlock_irqrestore(&mana_dev_recovery_work.lock, flags);
+
+ schedule_delayed_work(&mana_dev_recovery_work.work,
+ secs_to_jiffies(MANA_SERVICE_PERIOD));
+ }
+
return err;
}
@@ -1561,25 +2093,33 @@ static void mana_gd_remove(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
+ mana_rdma_remove(&gc->mana_ib);
mana_remove(&gc->mana, false);
mana_gd_cleanup(pdev);
debugfs_remove_recursive(gc->mana_pci_debugfs);
+ gc->mana_pci_debugfs = NULL;
+
+ xa_destroy(&gc->irq_contexts);
+
pci_iounmap(pdev, gc->bar0_va);
vfree(gc);
pci_release_regions(pdev);
pci_disable_device(pdev);
+
+ dev_dbg(&pdev->dev, "mana gdma remove successful\n");
}
/* The 'state' parameter is not used. */
-static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
+int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
+ mana_rdma_remove(&gc->mana_ib);
mana_remove(&gc->mana, true);
mana_gd_cleanup(pdev);
@@ -1591,7 +2131,7 @@ static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
* fail -- if this happens, it's safer to just report an error than try to undo
* what has been done.
*/
-static int mana_gd_resume(struct pci_dev *pdev)
+int mana_gd_resume(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
int err;
@@ -1604,6 +2144,10 @@ static int mana_gd_resume(struct pci_dev *pdev)
if (err)
return err;
+ err = mana_rdma_probe(&gc->mana_ib);
+ if (err)
+ return err;
+
return 0;
}
@@ -1614,12 +2158,15 @@ static void mana_gd_shutdown(struct pci_dev *pdev)
dev_info(&pdev->dev, "Shutdown was called\n");
+ mana_rdma_remove(&gc->mana_ib);
mana_remove(&gc->mana, true);
mana_gd_cleanup(pdev);
debugfs_remove_recursive(gc->mana_pci_debugfs);
+ gc->mana_pci_debugfs = NULL;
+
pci_disable_device(pdev);
}
@@ -1643,20 +2190,43 @@ static int __init mana_driver_init(void)
{
int err;
+ INIT_LIST_HEAD(&mana_dev_recovery_work.dev_list);
+ spin_lock_init(&mana_dev_recovery_work.lock);
+ INIT_DELAYED_WORK(&mana_dev_recovery_work.work, mana_recovery_delayed_func);
+
mana_debugfs_root = debugfs_create_dir("mana", NULL);
err = pci_register_driver(&mana_driver);
- if (err)
+ if (err) {
debugfs_remove(mana_debugfs_root);
+ mana_debugfs_root = NULL;
+ }
return err;
}
static void __exit mana_driver_exit(void)
{
- debugfs_remove(mana_debugfs_root);
+ struct mana_dev_recovery *dev;
+ unsigned long flags;
+
+ disable_delayed_work_sync(&mana_dev_recovery_work.work);
+
+ spin_lock_irqsave(&mana_dev_recovery_work.lock, flags);
+ while (!list_empty(&mana_dev_recovery_work.dev_list)) {
+ dev = list_first_entry(&mana_dev_recovery_work.dev_list,
+ struct mana_dev_recovery, list);
+ list_del(&dev->list);
+ pci_dev_put(dev->pdev);
+ kfree(dev);
+ }
+ spin_unlock_irqrestore(&mana_dev_recovery_work.lock, flags);
pci_unregister_driver(&mana_driver);
+
+ debugfs_remove(mana_debugfs_root);
+
+ mana_debugfs_root = NULL;
}
module_init(mana_driver_init);
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index a00f915c5188..aa4e2731e2ba 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2021, Microsoft Corporation. */
#include <net/mana/gdma.h>
+#include <net/mana/mana.h>
#include <net/mana/hw_channel.h>
#include <linux/vmalloc.h>
@@ -112,11 +113,14 @@ out:
static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
struct gdma_event *event)
{
+ union hwc_init_soc_service_type service_data;
struct hw_channel_context *hwc = ctx;
struct gdma_dev *gd = hwc->gdma_dev;
union hwc_init_type_data type_data;
union hwc_init_eq_id_db eq_db;
+ struct mana_context *ac;
u32 type, val;
+ int ret;
switch (event->type) {
case GDMA_EQE_HWC_INIT_EQ_ID_DB:
@@ -193,13 +197,41 @@ static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
hwc->hwc_timeout = val;
break;
+ case HWC_DATA_HW_LINK_CONNECT:
+ case HWC_DATA_HW_LINK_DISCONNECT:
+ ac = gd->gdma_context->mana.driver_data;
+ if (!ac)
+ break;
+
+ WRITE_ONCE(ac->link_event, type);
+ schedule_work(&ac->link_change_work);
+
+ break;
+
default:
dev_warn(hwc->dev, "Received unknown reconfig type %u\n", type);
break;
}
break;
+ case GDMA_EQE_HWC_SOC_SERVICE:
+ service_data.as_uint32 = event->details[0];
+ type = service_data.type;
+ switch (type) {
+ case GDMA_SERVICE_TYPE_RDMA_SUSPEND:
+ case GDMA_SERVICE_TYPE_RDMA_RESUME:
+ ret = mana_rdma_service_event(gd->gdma_context, type);
+ if (ret)
+ dev_err(hwc->dev, "Failed to schedule adev service event: %d\n",
+ ret);
+ break;
+ default:
+ dev_warn(hwc->dev, "Received unknown SOC service type %u\n", type);
+ break;
+ }
+
+ break;
default:
dev_warn(hwc->dev, "Received unknown gdma event %u\n", event->type);
/* Ignore unknown events, which should never happen. */
@@ -440,7 +472,8 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
gmi = &dma_buf->mem_info;
err = mana_gd_alloc_memory(gc, buf_size, gmi);
if (err) {
- dev_err(hwc->dev, "Failed to allocate DMA buffer: %d\n", err);
+ dev_err(hwc->dev, "Failed to allocate DMA buffer size: %u, err %d\n",
+ buf_size, err);
goto out;
}
@@ -529,6 +562,9 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc,
out:
if (err)
mana_hwc_destroy_wq(hwc, hwc_wq);
+
+ dev_err(hwc->dev, "Failed to create HWC queue size= %u type= %d err= %d\n",
+ queue_size, q_type, err);
return err;
}
@@ -856,7 +892,14 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
if (!wait_for_completion_timeout(&ctx->comp_event,
(msecs_to_jiffies(hwc->hwc_timeout)))) {
- dev_err(hwc->dev, "HWC: Request timed out!\n");
+ if (hwc->hwc_timeout != 0)
+ dev_err(hwc->dev, "HWC: Request timed out: %u ms\n",
+ hwc->hwc_timeout);
+
+ /* Reduce further waiting if HWC no response */
+ if (hwc->hwc_timeout > 1)
+ hwc->hwc_timeout = 1;
+
err = -ETIMEDOUT;
goto out;
}
@@ -867,8 +910,13 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
}
if (ctx->status_code && ctx->status_code != GDMA_STATUS_MORE_ENTRIES) {
- dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
- ctx->status_code);
+ if (ctx->status_code == GDMA_STATUS_CMD_UNSUPPORTED) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ if (req_msg->req.msg_type != MANA_QUERY_PHY_STAT)
+ dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
+ ctx->status_code);
err = -EPROTO;
goto out;
}
diff --git a/drivers/net/ethernet/microsoft/mana/mana_bpf.c b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
index 23b1521c0df9..7697c9b52ed3 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_bpf.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
@@ -91,7 +91,7 @@ u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
goto out;
xdp_init_buff(xdp, PAGE_SIZE, &rxq->xdp_rxq);
- xdp_prepare_buff(xdp, buf_va, XDP_PACKET_HEADROOM, pkt_len, false);
+ xdp_prepare_buff(xdp, buf_va, XDP_PACKET_HEADROOM, pkt_len, true);
act = bpf_prog_run_xdp(prog, xdp);
@@ -174,6 +174,7 @@ static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog,
struct mana_port_context *apc = netdev_priv(ndev);
struct bpf_prog *old_prog;
struct gdma_context *gc;
+ int err;
gc = apc->ac->gdma_dev->gdma_context;
@@ -195,11 +196,45 @@ static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog,
*/
apc->bpf_prog = prog;
- if (old_prog)
- bpf_prog_put(old_prog);
+ if (apc->port_is_up) {
+ /* Re-create rxq's after xdp prog was loaded or unloaded.
+ * Ex: re create rxq's to switch from full pages to smaller
+ * size page fragments when xdp prog is unloaded and
+ * vice-versa.
+ */
+
+ /* Pre-allocate buffers to prevent failure in mana_attach */
+ err = mana_pre_alloc_rxbufs(apc, ndev->mtu, apc->num_queues);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "XDP: Insufficient memory for tx/rx re-config");
+ return err;
+ }
+
+ err = mana_detach(ndev, false);
+ if (err) {
+ netdev_err(ndev,
+ "mana_detach failed at xdp set: %d\n", err);
+ NL_SET_ERR_MSG_MOD(extack,
+ "XDP: Re-config failed at detach");
+ goto err_dealloc_rxbuffs;
+ }
+
+ err = mana_attach(ndev);
+ if (err) {
+ netdev_err(ndev,
+ "mana_attach failed at xdp set: %d\n", err);
+ NL_SET_ERR_MSG_MOD(extack,
+ "XDP: Re-config failed at attach");
+ goto err_dealloc_rxbuffs;
+ }
- if (apc->port_is_up)
mana_chn_setxdp(apc, prog);
+ mana_pre_dealloc_rxbufs(apc);
+ }
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
if (prog)
ndev->max_mtu = MANA_XDP_MTU_MAX;
@@ -207,6 +242,11 @@ static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog,
ndev->max_mtu = gc->adapter_mtu - ETH_HLEN;
return 0;
+
+err_dealloc_rxbuffs:
+ apc->bpf_prog = old_prog;
+ mana_pre_dealloc_rxbufs(apc);
+ return err;
}
int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 57ac732e7707..1ad154f9db1a 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -10,14 +10,18 @@
#include <linux/filter.h>
#include <linux/mm.h>
#include <linux/pci.h>
+#include <linux/export.h>
+#include <linux/skbuff.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
+#include <net/netdev_lock.h>
#include <net/page_pool/helpers.h>
#include <net/xdp.h>
#include <net/mana/mana.h>
#include <net/mana/mana_auxiliary.h>
+#include <net/mana/hw_channel.h>
static DEFINE_IDA(mana_adev_ida);
@@ -46,25 +50,44 @@ static const struct file_operations mana_dbg_q_fops = {
.read = mana_dbg_q_read,
};
+static bool mana_en_need_log(struct mana_port_context *apc, int err)
+{
+ if (apc && apc->ac && apc->ac->gdma_dev &&
+ apc->ac->gdma_dev->gdma_context)
+ return mana_need_log(apc->ac->gdma_dev->gdma_context, err);
+ else
+ return true;
+}
+
+static void mana_put_rx_page(struct mana_rxq *rxq, struct page *page,
+ bool from_pool)
+{
+ if (from_pool)
+ page_pool_put_full_page(rxq->page_pool, page, false);
+ else
+ put_page(page);
+}
+
/* Microsoft Azure Network Adapter (MANA) functions */
static int mana_open(struct net_device *ndev)
{
struct mana_port_context *apc = netdev_priv(ndev);
int err;
-
err = mana_alloc_queues(ndev);
- if (err)
+
+ if (err) {
+ netdev_err(ndev, "%s failed to allocate queues: %d\n", __func__, err);
return err;
+ }
apc->port_is_up = true;
/* Ensure port state updated before txq state */
smp_wmb();
- netif_carrier_on(ndev);
netif_tx_wake_all_queues(ndev);
-
+ netdev_dbg(ndev, "%s successful\n", __func__);
return 0;
}
@@ -78,6 +101,46 @@ static int mana_close(struct net_device *ndev)
return mana_detach(ndev, true);
}
+static void mana_link_state_handle(struct work_struct *w)
+{
+ struct mana_context *ac;
+ struct net_device *ndev;
+ u32 link_event;
+ bool link_up;
+ int i;
+
+ ac = container_of(w, struct mana_context, link_change_work);
+
+ rtnl_lock();
+
+ link_event = READ_ONCE(ac->link_event);
+
+ if (link_event == HWC_DATA_HW_LINK_CONNECT)
+ link_up = true;
+ else if (link_event == HWC_DATA_HW_LINK_DISCONNECT)
+ link_up = false;
+ else
+ goto out;
+
+ /* Process all ports */
+ for (i = 0; i < ac->num_ports; i++) {
+ ndev = ac->ports[i];
+ if (!ndev)
+ continue;
+
+ if (link_up) {
+ netif_carrier_on(ndev);
+
+ __netdev_notify_peers(ndev);
+ } else {
+ netif_carrier_off(ndev);
+ }
+ }
+
+out:
+ rtnl_unlock();
+}
+
static bool mana_can_tx(struct gdma_queue *wq)
{
return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
@@ -176,6 +239,9 @@ static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
return 0;
frag_err:
+ if (net_ratelimit())
+ netdev_err(apc->ndev, "Failed to map skb of size %u to DMA\n",
+ skb->len);
for (i = sg_i - 1; i >= hsg; i--)
dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
DMA_TO_DEVICE);
@@ -245,10 +311,10 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct netdev_queue *net_txq;
struct mana_stats_tx *tx_stats;
struct gdma_queue *gdma_sq;
+ int err, len, num_gso_seg;
unsigned int csum_type;
struct mana_txq *txq;
struct mana_cq *cq;
- int err, len;
if (unlikely(!apc->port_is_up))
goto tx_drop;
@@ -256,11 +322,29 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (skb_cow_head(skb, MANA_HEADROOM))
goto tx_drop_count;
+ if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
+ goto tx_drop_count;
+
txq = &apc->tx_qp[txq_idx].txq;
gdma_sq = txq->gdma_sq;
cq = &apc->tx_qp[txq_idx].tx_cq;
tx_stats = &txq->stats;
+ BUILD_BUG_ON(MAX_TX_WQE_SGL_ENTRIES != MANA_MAX_TX_WQE_SGL_ENTRIES);
+ if (MAX_SKB_FRAGS + 2 > MAX_TX_WQE_SGL_ENTRIES &&
+ skb_shinfo(skb)->nr_frags + 2 > MAX_TX_WQE_SGL_ENTRIES) {
+ /* GSO skb with Hardware SGE limit exceeded is not expected here
+ * as they are handled in mana_features_check() callback
+ */
+ if (skb_linearize(skb)) {
+ netdev_warn_once(ndev, "Failed to linearize skb with nr_frags=%d and is_gso=%d\n",
+ skb_shinfo(skb)->nr_frags,
+ skb_is_gso(skb));
+ goto tx_drop_count;
+ }
+ apc->eth_stats.tx_linear_pkt_cnt++;
+ }
+
pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
@@ -374,8 +458,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
}
- WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES);
-
if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
pkg.wqe_req.sgl = pkg.sgl_array;
} else {
@@ -398,6 +480,7 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_queue_tail(&txq->pending_skbs, skb);
len = skb->len;
+ num_gso_seg = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
net_txq = netdev_get_tx_queue(ndev, txq_idx);
err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
@@ -409,9 +492,9 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (err) {
(void)skb_dequeue_tail(&txq->pending_skbs);
+ mana_unmap_skb(skb, apc);
netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
- err = NETDEV_TX_BUSY;
- goto tx_busy;
+ goto free_sgl_ptr;
}
err = NETDEV_TX_OK;
@@ -422,13 +505,15 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* skb may be freed after mana_gd_post_work_request. Do not use it. */
skb = NULL;
+ /* Populated the packet and bytes counters based on post GSO packet
+ * calculations
+ */
tx_stats = &txq->stats;
u64_stats_update_begin(&tx_stats->syncp);
- tx_stats->packets++;
- tx_stats->bytes += len;
+ tx_stats->packets += num_gso_seg;
+ tx_stats->bytes += len + ((num_gso_seg - 1) * gso_hs);
u64_stats_update_end(&tx_stats->syncp);
-tx_busy:
if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
netif_tx_wake_queue(net_txq);
apc->eth_stats.wake_queue++;
@@ -446,6 +531,25 @@ tx_drop:
return NETDEV_TX_OK;
}
+#if (MAX_SKB_FRAGS + 2 > MANA_MAX_TX_WQE_SGL_ENTRIES)
+static netdev_features_t mana_features_check(struct sk_buff *skb,
+ struct net_device *ndev,
+ netdev_features_t features)
+{
+ if (skb_shinfo(skb)->nr_frags + 2 > MAX_TX_WQE_SGL_ENTRIES) {
+ /* Exceeds HW SGE limit.
+ * GSO case:
+ * Disable GSO so the stack will software-segment the skb
+ * into smaller skbs that fit the SGE budget.
+ * Non-GSO case:
+ * The xmit path will attempt skb_linearize() as a fallback.
+ */
+ features &= ~NETIF_F_GSO_MASK;
+ }
+ return features;
+}
+#endif
+
static void mana_get_stats64(struct net_device *ndev,
struct rtnl_link_stats64 *st)
{
@@ -462,6 +566,11 @@ static void mana_get_stats64(struct net_device *ndev,
netdev_stats_to_stats64(st, &ndev->stats);
+ if (apc->ac->hwc_timeout_occurred)
+ netdev_warn_once(ndev, "HWC timeout occurred\n");
+
+ st->rx_missed_errors = apc->ac->hc_stats.hc_rx_discards_no_wqe;
+
for (q = 0; q < num_queues; q++) {
rx_stats = &apc->rxqs[q]->stats;
@@ -607,21 +716,40 @@ static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da)
}
/* Get RX buffer's data size, alloc size, XDP headroom based on MTU */
-static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size,
- u32 *headroom)
+static void mana_get_rxbuf_cfg(struct mana_port_context *apc,
+ int mtu, u32 *datasize, u32 *alloc_size,
+ u32 *headroom, u32 *frag_count)
{
- if (mtu > MANA_XDP_MTU_MAX)
- *headroom = 0; /* no support for XDP */
- else
- *headroom = XDP_PACKET_HEADROOM;
+ u32 len, buf_size;
- *alloc_size = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD + *headroom);
+ /* Calculate datasize first (consistent across all cases) */
+ *datasize = mtu + ETH_HLEN;
- /* Using page pool in this case, so alloc_size is PAGE_SIZE */
- if (*alloc_size < PAGE_SIZE)
- *alloc_size = PAGE_SIZE;
+ /* For xdp and jumbo frames make sure only one packet fits per page */
+ if (mtu + MANA_RXBUF_PAD > PAGE_SIZE / 2 || mana_xdp_get(apc)) {
+ if (mana_xdp_get(apc)) {
+ *headroom = XDP_PACKET_HEADROOM;
+ *alloc_size = PAGE_SIZE;
+ } else {
+ *headroom = 0; /* no support for XDP */
+ *alloc_size = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD +
+ *headroom);
+ }
- *datasize = mtu + ETH_HLEN;
+ *frag_count = 1;
+ return;
+ }
+
+ /* Standard MTU case - optimize for multiple packets per page */
+ *headroom = 0;
+
+ /* Calculate base buffer size needed */
+ len = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD + *headroom);
+ buf_size = ALIGN(len, MANA_RX_FRAG_ALIGNMENT);
+
+ /* Calculate how many packets can fit in a page */
+ *frag_count = PAGE_SIZE / buf_size;
+ *alloc_size = buf_size;
}
int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_queues)
@@ -633,8 +761,9 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu
void *va;
int i;
- mana_get_rxbuf_cfg(new_mtu, &mpc->rxbpre_datasize,
- &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom);
+ mana_get_rxbuf_cfg(mpc, new_mtu, &mpc->rxbpre_datasize,
+ &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom,
+ &mpc->rxbpre_frag_count);
dev = mpc->ac->gdma_dev->gdma_context->dev;
@@ -652,30 +781,16 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu
mpc->rxbpre_total = 0;
for (i = 0; i < num_rxb; i++) {
- if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
- va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
- if (!va)
- goto error;
-
- page = virt_to_head_page(va);
- /* Check if the frag falls back to single page */
- if (compound_order(page) <
- get_order(mpc->rxbpre_alloc_size)) {
- put_page(page);
- goto error;
- }
- } else {
- page = dev_alloc_page();
- if (!page)
- goto error;
+ page = dev_alloc_pages(get_order(mpc->rxbpre_alloc_size));
+ if (!page)
+ goto error;
- va = page_to_virt(page);
- }
+ va = page_to_virt(page);
da = dma_map_single(dev, va + mpc->rxbpre_headroom,
mpc->rxbpre_datasize, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, da)) {
- put_page(virt_to_head_page(va));
+ put_page(page);
goto error;
}
@@ -687,6 +802,7 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu
return 0;
error:
+ netdev_err(mpc->ndev, "Failed to pre-allocate RX buffers for %d queues\n", num_queues);
mana_pre_dealloc_rxbufs(mpc);
return -ENOMEM;
}
@@ -723,27 +839,102 @@ out:
return err;
}
+static int mana_shaper_set(struct net_shaper_binding *binding,
+ const struct net_shaper *shaper,
+ struct netlink_ext_ack *extack)
+{
+ struct mana_port_context *apc = netdev_priv(binding->netdev);
+ u32 old_speed, rate;
+ int err;
+
+ if (shaper->handle.scope != NET_SHAPER_SCOPE_NETDEV) {
+ NL_SET_ERR_MSG_MOD(extack, "net shaper scope should be netdev");
+ return -EINVAL;
+ }
+
+ if (apc->handle.id && shaper->handle.id != apc->handle.id) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot create multiple shapers");
+ return -EOPNOTSUPP;
+ }
+
+ if (!shaper->bw_max || (shaper->bw_max % 100000000)) {
+ NL_SET_ERR_MSG_MOD(extack, "Please use multiples of 100Mbps for bandwidth");
+ return -EINVAL;
+ }
+
+ rate = div_u64(shaper->bw_max, 1000); /* Convert bps to Kbps */
+ rate = div_u64(rate, 1000); /* Convert Kbps to Mbps */
+
+ /* Get current speed */
+ err = mana_query_link_cfg(apc);
+ old_speed = (err) ? SPEED_UNKNOWN : apc->speed;
+
+ if (!err) {
+ err = mana_set_bw_clamp(apc, rate, TRI_STATE_TRUE);
+ apc->speed = (err) ? old_speed : rate;
+ apc->handle = (err) ? apc->handle : shaper->handle;
+ }
+
+ return err;
+}
+
+static int mana_shaper_del(struct net_shaper_binding *binding,
+ const struct net_shaper_handle *handle,
+ struct netlink_ext_ack *extack)
+{
+ struct mana_port_context *apc = netdev_priv(binding->netdev);
+ int err;
+
+ err = mana_set_bw_clamp(apc, 0, TRI_STATE_FALSE);
+
+ if (!err) {
+ /* Reset mana port context parameters */
+ apc->handle.id = 0;
+ apc->handle.scope = NET_SHAPER_SCOPE_UNSPEC;
+ apc->speed = apc->max_speed;
+ }
+
+ return err;
+}
+
+static void mana_shaper_cap(struct net_shaper_binding *binding,
+ enum net_shaper_scope scope,
+ unsigned long *flags)
+{
+ *flags = BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MAX) |
+ BIT(NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS);
+}
+
+static const struct net_shaper_ops mana_shaper_ops = {
+ .set = mana_shaper_set,
+ .delete = mana_shaper_del,
+ .capabilities = mana_shaper_cap,
+};
+
static const struct net_device_ops mana_devops = {
.ndo_open = mana_open,
.ndo_stop = mana_close,
.ndo_select_queue = mana_select_queue,
+#if (MAX_SKB_FRAGS + 2 > MANA_MAX_TX_WQE_SGL_ENTRIES)
+ .ndo_features_check = mana_features_check,
+#endif
.ndo_start_xmit = mana_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_get_stats64 = mana_get_stats64,
.ndo_bpf = mana_bpf,
.ndo_xdp_xmit = mana_xdp_xmit,
.ndo_change_mtu = mana_change_mtu,
+ .net_shaper_ops = &mana_shaper_ops,
};
static void mana_cleanup_port_context(struct mana_port_context *apc)
{
/*
- * at this point all dir/files under the vport directory
- * are already cleaned up.
- * We are sure the apc->mana_port_debugfs remove will not
- * cause any freed memory access issues
+ * make sure subsequent cleanup attempts don't end up removing already
+ * cleaned dentry pointer
*/
debugfs_remove(apc->mana_port_debugfs);
+ apc->mana_port_debugfs = NULL;
kfree(apc->rxqs);
apc->rxqs = NULL;
}
@@ -779,8 +970,13 @@ static int mana_send_request(struct mana_context *ac, void *in_buf,
err = mana_gd_send_request(gc, in_len, in_buf, out_len,
out_buf);
if (err || resp->status) {
- dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
- err, resp->status);
+ if (err == -EOPNOTSUPP)
+ return err;
+
+ if (req->req.msg_type != MANA_QUERY_PHY_STAT &&
+ mana_need_log(gc, err))
+ dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
+ err, resp->status);
return err ? err : -EPROTO;
}
@@ -855,8 +1051,10 @@ static void mana_pf_deregister_hw_vport(struct mana_port_context *apc)
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
- netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
- err);
+ if (mana_en_need_log(apc, err))
+ netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
+ err);
+
return;
}
@@ -911,8 +1109,10 @@ static void mana_pf_deregister_filter(struct mana_port_context *apc)
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
- netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
- err);
+ if (mana_en_need_log(apc, err))
+ netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
+ err);
+
return;
}
@@ -926,7 +1126,7 @@ static void mana_pf_deregister_filter(struct mana_port_context *apc)
static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
u32 proto_minor_ver, u32 proto_micro_ver,
- u16 *max_num_vports)
+ u16 *max_num_vports, u8 *bm_hostmode)
{
struct gdma_context *gc = ac->gdma_dev->gdma_context;
struct mana_query_device_cfg_resp resp = {};
@@ -937,7 +1137,7 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
sizeof(req), sizeof(resp));
- req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
+ req.hdr.resp.msg_version = GDMA_MESSAGE_V3;
req.proto_major_ver = proto_major_ver;
req.proto_minor_ver = proto_minor_ver;
@@ -961,11 +1161,16 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
*max_num_vports = resp.max_num_vports;
- if (resp.hdr.response.msg_version == GDMA_MESSAGE_V2)
+ if (resp.hdr.response.msg_version >= GDMA_MESSAGE_V2)
gc->adapter_mtu = resp.adapter_mtu;
else
gc->adapter_mtu = ETH_FRAME_LEN;
+ if (resp.hdr.response.msg_version >= GDMA_MESSAGE_V3)
+ *bm_hostmode = resp.bm_hostmode;
+ else
+ *bm_hostmode = 0;
+
debugfs_create_u16("adapter-MTU", 0400, gc->mana_pci_debugfs, &gc->adapter_mtu);
return 0;
@@ -1022,7 +1227,7 @@ void mana_uncfg_vport(struct mana_port_context *apc)
WARN_ON(apc->vport_use_count < 0);
mutex_unlock(&apc->vport_mutex);
}
-EXPORT_SYMBOL_NS(mana_uncfg_vport, NET_MANA);
+EXPORT_SYMBOL_NS(mana_uncfg_vport, "NET_MANA");
int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
u32 doorbell_pg_id)
@@ -1092,7 +1297,7 @@ out:
return err;
}
-EXPORT_SYMBOL_NS(mana_cfg_vport, NET_MANA);
+EXPORT_SYMBOL_NS(mana_cfg_vport, "NET_MANA");
static int mana_cfg_vport_steering(struct mana_port_context *apc,
enum TRI_STATE rx,
@@ -1137,7 +1342,9 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
err = mana_send_request(apc->ac, req, req_buf_size, &resp,
sizeof(resp));
if (err) {
- netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
+ if (mana_en_need_log(apc, err))
+ netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
+
goto out;
}
@@ -1161,6 +1368,95 @@ out:
return err;
}
+int mana_query_link_cfg(struct mana_port_context *apc)
+{
+ struct net_device *ndev = apc->ndev;
+ struct mana_query_link_config_resp resp = {};
+ struct mana_query_link_config_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_LINK_CONFIG,
+ sizeof(req), sizeof(resp));
+
+ req.vport = apc->port_handle;
+ req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+
+ if (err) {
+ if (err == -EOPNOTSUPP) {
+ netdev_info_once(ndev, "MANA_QUERY_LINK_CONFIG not supported\n");
+ return err;
+ }
+ netdev_err(ndev, "Failed to query link config: %d\n", err);
+ return err;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_LINK_CONFIG,
+ sizeof(resp));
+
+ if (err || resp.hdr.status) {
+ netdev_err(ndev, "Failed to query link config: %d, 0x%x\n", err,
+ resp.hdr.status);
+ if (!err)
+ err = -EOPNOTSUPP;
+ return err;
+ }
+
+ if (resp.qos_unconfigured) {
+ err = -EINVAL;
+ return err;
+ }
+ apc->speed = resp.link_speed_mbps;
+ apc->max_speed = resp.qos_speed_mbps;
+ return 0;
+}
+
+int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed,
+ int enable_clamping)
+{
+ struct mana_set_bw_clamp_resp resp = {};
+ struct mana_set_bw_clamp_req req = {};
+ struct net_device *ndev = apc->ndev;
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_SET_BW_CLAMP,
+ sizeof(req), sizeof(resp));
+ req.vport = apc->port_handle;
+ req.link_speed_mbps = speed;
+ req.enable_clamping = enable_clamping;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+
+ if (err) {
+ if (err == -EOPNOTSUPP) {
+ netdev_info_once(ndev, "MANA_SET_BW_CLAMP not supported\n");
+ return err;
+ }
+ netdev_err(ndev, "Failed to set bandwidth clamp for speed %u, err = %d",
+ speed, err);
+ return err;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_SET_BW_CLAMP,
+ sizeof(resp));
+
+ if (err || resp.hdr.status) {
+ netdev_err(ndev, "Failed to set bandwidth clamp: %d, 0x%x\n", err,
+ resp.hdr.status);
+ if (!err)
+ err = -EOPNOTSUPP;
+ return err;
+ }
+
+ if (resp.qos_unconfigured)
+ netdev_info(ndev, "QoS is unconfigured\n");
+
+ return 0;
+}
+
int mana_create_wq_obj(struct mana_port_context *apc,
mana_handle_t vport,
u32 wq_type, struct mana_obj_spec *wq_spec,
@@ -1214,7 +1510,7 @@ int mana_create_wq_obj(struct mana_port_context *apc,
out:
return err;
}
-EXPORT_SYMBOL_NS(mana_create_wq_obj, NET_MANA);
+EXPORT_SYMBOL_NS(mana_create_wq_obj, "NET_MANA");
void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
mana_handle_t wq_obj)
@@ -1232,7 +1528,9 @@ void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
- netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
+ if (mana_en_need_log(apc, err))
+ netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
+
return;
}
@@ -1242,7 +1540,7 @@ void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err,
resp.hdr.status);
}
-EXPORT_SYMBOL_NS(mana_destroy_wq_obj, NET_MANA);
+EXPORT_SYMBOL_NS(mana_destroy_wq_obj, "NET_MANA");
static void mana_destroy_eq(struct mana_context *ac)
{
@@ -1254,6 +1552,7 @@ static void mana_destroy_eq(struct mana_context *ac)
return;
debugfs_remove_recursive(ac->mana_eqs_debugfs);
+ ac->mana_eqs_debugfs = NULL;
for (i = 0; i < gc->max_num_queues; i++) {
eq = ac->eqs[i].eq;
@@ -1304,8 +1603,10 @@ static int mana_create_eq(struct mana_context *ac)
for (i = 0; i < gc->max_num_queues; i++) {
spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "Failed to create EQ %d : %d\n", i, err);
goto out;
+ }
mana_create_eq_debugfs(ac, i);
}
@@ -1385,7 +1686,7 @@ static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
return 0;
}
-static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
+void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
{
struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
@@ -1547,8 +1848,12 @@ static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va,
return NULL;
if (xdp->data_hard_start) {
+ u32 metasize = xdp->data - xdp->data_meta;
+
skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb_put(skb, xdp->data_end - xdp->data);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
return skb;
}
@@ -1646,8 +1951,11 @@ drop_xdp:
drop:
if (from_pool) {
- page_pool_recycle_direct(rxq->page_pool,
- virt_to_head_page(buf_va));
+ if (rxq->frag_count == 1)
+ page_pool_recycle_direct(rxq->page_pool,
+ virt_to_head_page(buf_va));
+ else
+ page_pool_free_va(rxq->page_pool, buf_va, true);
} else {
WARN_ON_ONCE(rxq->xdp_save_va);
/* Save for reuse */
@@ -1660,51 +1968,49 @@ drop:
}
static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
- dma_addr_t *da, bool *from_pool, bool is_napi)
+ dma_addr_t *da, bool *from_pool)
{
struct page *page;
+ u32 offset;
void *va;
-
*from_pool = false;
- /* Reuse XDP dropped page if available */
- if (rxq->xdp_save_va) {
- va = rxq->xdp_save_va;
- rxq->xdp_save_va = NULL;
- } else if (rxq->alloc_size > PAGE_SIZE) {
- if (is_napi)
- va = napi_alloc_frag(rxq->alloc_size);
- else
- va = netdev_alloc_frag(rxq->alloc_size);
+ /* Don't use fragments for jumbo frames or XDP where it's 1 fragment
+ * per page.
+ */
+ if (rxq->frag_count == 1) {
+ /* Reuse XDP dropped page if available */
+ if (rxq->xdp_save_va) {
+ va = rxq->xdp_save_va;
+ page = virt_to_head_page(va);
+ rxq->xdp_save_va = NULL;
+ } else {
+ page = page_pool_dev_alloc_pages(rxq->page_pool);
+ if (!page)
+ return NULL;
- if (!va)
- return NULL;
+ *from_pool = true;
+ va = page_to_virt(page);
+ }
- page = virt_to_head_page(va);
- /* Check if the frag falls back to single page */
- if (compound_order(page) < get_order(rxq->alloc_size)) {
- put_page(page);
+ *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, *da)) {
+ mana_put_rx_page(rxq, page, *from_pool);
return NULL;
}
- } else {
- page = page_pool_dev_alloc_pages(rxq->page_pool);
- if (!page)
- return NULL;
- *from_pool = true;
- va = page_to_virt(page);
+ return va;
}
- *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, *da)) {
- if (*from_pool)
- page_pool_put_full_page(rxq->page_pool, page, false);
- else
- put_page(virt_to_head_page(va));
-
+ page = page_pool_dev_alloc_frag(rxq->page_pool, &offset,
+ rxq->alloc_size);
+ if (!page)
return NULL;
- }
+
+ va = page_to_virt(page) + offset;
+ *da = page_pool_get_dma_addr(page) + offset + rxq->headroom;
+ *from_pool = true;
return va;
}
@@ -1718,12 +2024,12 @@ static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
dma_addr_t da;
void *va;
- va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true);
+ va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
if (!va)
return;
-
- dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
- DMA_FROM_DEVICE);
+ if (!rxoob->from_pool || rxq->frag_count == 1)
+ dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
+ DMA_FROM_DEVICE);
*old_buf = rxoob->buf_va;
*old_fp = rxoob->from_pool;
@@ -1914,12 +2220,13 @@ static void mana_destroy_txq(struct mana_port_context *apc)
for (i = 0; i < apc->num_queues; i++) {
debugfs_remove_recursive(apc->tx_qp[i].mana_tx_debugfs);
+ apc->tx_qp[i].mana_tx_debugfs = NULL;
napi = &apc->tx_qp[i].tx_cq.napi;
if (apc->tx_qp[i].txq.napi_initialized) {
napi_synchronize(napi);
- napi_disable(napi);
- netif_napi_del(napi);
+ napi_disable_locked(napi);
+ netif_napi_del_locked(napi);
apc->tx_qp[i].txq.napi_initialized = false;
}
mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
@@ -2071,8 +2378,9 @@ static int mana_create_txq(struct mana_port_context *apc,
mana_create_txq_debugfs(apc, i);
- netif_napi_add_tx(net, &cq->napi, mana_poll);
- napi_enable(&cq->napi);
+ set_bit(NAPI_STATE_NO_BUSY_POLL, &cq->napi.state);
+ netif_napi_add_locked(net, &cq->napi, mana_poll);
+ napi_enable_locked(&cq->napi);
txq->napi_initialized = true;
mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
@@ -2080,6 +2388,8 @@ static int mana_create_txq(struct mana_port_context *apc,
return 0;
out:
+ netdev_err(net, "Failed to create %d TX queues, %d\n",
+ apc->num_queues, err);
mana_destroy_txq(apc);
return err;
}
@@ -2099,15 +2409,15 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
return;
debugfs_remove_recursive(rxq->mana_rx_debugfs);
+ rxq->mana_rx_debugfs = NULL;
napi = &rxq->rx_cq.napi;
if (napi_initialized) {
napi_synchronize(napi);
- napi_disable(napi);
-
- netif_napi_del(napi);
+ napi_disable_locked(napi);
+ netif_napi_del_locked(napi);
}
xdp_rxq_info_unreg(&rxq->xdp_rxq);
@@ -2124,15 +2434,15 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
if (!rx_oob->buf_va)
continue;
- dma_unmap_single(dev, rx_oob->sgl[0].address,
- rx_oob->sgl[0].size, DMA_FROM_DEVICE);
-
page = virt_to_head_page(rx_oob->buf_va);
- if (rx_oob->from_pool)
- page_pool_put_full_page(rxq->page_pool, page, false);
- else
- put_page(page);
+ if (rxq->frag_count == 1 || !rx_oob->from_pool) {
+ dma_unmap_single(dev, rx_oob->sgl[0].address,
+ rx_oob->sgl[0].size, DMA_FROM_DEVICE);
+ mana_put_rx_page(rxq, page, rx_oob->from_pool);
+ } else {
+ page_pool_free_va(rxq->page_pool, rx_oob->buf_va, true);
+ }
rx_oob->buf_va = NULL;
}
@@ -2156,7 +2466,7 @@ static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
if (mpc->rxbufs_pre)
va = mana_get_rxbuf_pre(rxq, &da);
else
- va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false);
+ va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
if (!va)
return -ENOMEM;
@@ -2238,10 +2548,22 @@ static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
struct page_pool_params pprm = {};
int ret;
- pprm.pool_size = mpc->rx_queue_size;
+ pprm.pool_size = mpc->rx_queue_size / rxq->frag_count + 1;
pprm.nid = gc->numa_node;
pprm.napi = &rxq->rx_cq.napi;
pprm.netdev = rxq->ndev;
+ pprm.order = get_order(rxq->alloc_size);
+ pprm.queue_idx = rxq->rxq_idx;
+ pprm.dev = gc->dev;
+
+ /* Let the page pool do the dma map when page sharing with multiple
+ * fragments enabled for rx buffers.
+ */
+ if (rxq->frag_count > 1) {
+ pprm.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pprm.max_len = PAGE_SIZE;
+ pprm.dma_dir = DMA_FROM_DEVICE;
+ }
rxq->page_pool = page_pool_create(&pprm);
@@ -2280,9 +2602,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
rxq->rxq_idx = rxq_idx;
rxq->rxobj = INVALID_MANA_HANDLE;
- mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size,
- &rxq->headroom);
-
+ mana_get_rxbuf_cfg(apc, ndev->mtu, &rxq->datasize, &rxq->alloc_size,
+ &rxq->headroom, &rxq->frag_count);
/* Create page pool for RX queue */
err = mana_create_page_pool(rxq, gc);
if (err) {
@@ -2357,14 +2678,14 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
gc->cq_table[cq->gdma_id] = cq->gdma_cq;
- netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1);
+ netif_napi_add_weight_locked(ndev, &cq->napi, mana_poll, 1);
WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
cq->napi.napi_id));
WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
rxq->page_pool));
- napi_enable(&cq->napi);
+ napi_enable_locked(&cq->napi);
mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
out:
@@ -2415,6 +2736,7 @@ static int mana_add_rx_queues(struct mana_port_context *apc,
rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
if (!rxq) {
err = -ENOMEM;
+ netdev_err(ndev, "Failed to create rxq %d : %d\n", i, err);
goto out;
}
@@ -2448,7 +2770,7 @@ static void mana_destroy_vport(struct mana_port_context *apc)
mana_destroy_txq(apc);
mana_uncfg_vport(apc);
- if (gd->gdma_context->is_pf)
+ if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode)
mana_pf_deregister_hw_vport(apc);
}
@@ -2460,7 +2782,7 @@ static int mana_create_vport(struct mana_port_context *apc,
apc->default_rxobj = INVALID_MANA_HANDLE;
- if (gd->gdma_context->is_pf) {
+ if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) {
err = mana_pf_register_hw_vport(apc);
if (err)
return err;
@@ -2527,15 +2849,17 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
return 0;
}
-void mana_query_gf_stats(struct mana_port_context *apc)
+int mana_query_gf_stats(struct mana_context *ac)
{
+ struct gdma_context *gc = ac->gdma_dev->gdma_context;
struct mana_query_gf_stat_resp resp = {};
struct mana_query_gf_stat_req req = {};
- struct net_device *ndev = apc->ndev;
+ struct device *dev = gc->dev;
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT,
sizeof(req), sizeof(resp));
+ req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
req.req_stats = STATISTICS_FLAGS_RX_DISCARDS_NO_WQE |
STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED |
STATISTICS_FLAGS_HC_RX_BYTES |
@@ -2564,52 +2888,136 @@ void mana_query_gf_stats(struct mana_port_context *apc)
STATISTICS_FLAGS_HC_TX_BCAST_BYTES |
STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR;
- err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ err = mana_send_request(ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
- netdev_err(ndev, "Failed to query GF stats: %d\n", err);
- return;
+ dev_err(dev, "Failed to query GF stats: %d\n", err);
+ return err;
}
err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_GF_STAT,
sizeof(resp));
if (err || resp.hdr.status) {
- netdev_err(ndev, "Failed to query GF stats: %d, 0x%x\n", err,
- resp.hdr.status);
- return;
+ dev_err(dev, "Failed to query GF stats: %d, 0x%x\n", err,
+ resp.hdr.status);
+ return err;
}
- apc->eth_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe;
- apc->eth_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled;
- apc->eth_stats.hc_rx_bytes = resp.hc_rx_bytes;
- apc->eth_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts;
- apc->eth_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes;
- apc->eth_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts;
- apc->eth_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes;
- apc->eth_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts;
- apc->eth_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes;
- apc->eth_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled;
- apc->eth_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled;
- apc->eth_stats.hc_tx_err_inval_vportoffset_pkt =
+ ac->hc_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe;
+ ac->hc_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled;
+ ac->hc_stats.hc_rx_bytes = resp.hc_rx_bytes;
+ ac->hc_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts;
+ ac->hc_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes;
+ ac->hc_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts;
+ ac->hc_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes;
+ ac->hc_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts;
+ ac->hc_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes;
+ ac->hc_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled;
+ ac->hc_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled;
+ ac->hc_stats.hc_tx_err_inval_vportoffset_pkt =
resp.tx_err_inval_vport_offset_pkt;
- apc->eth_stats.hc_tx_err_vlan_enforcement =
+ ac->hc_stats.hc_tx_err_vlan_enforcement =
resp.tx_err_vlan_enforcement;
- apc->eth_stats.hc_tx_err_eth_type_enforcement =
+ ac->hc_stats.hc_tx_err_eth_type_enforcement =
resp.tx_err_ethtype_enforcement;
- apc->eth_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement;
- apc->eth_stats.hc_tx_err_sqpdid_enforcement =
+ ac->hc_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement;
+ ac->hc_stats.hc_tx_err_sqpdid_enforcement =
resp.tx_err_SQPDID_enforcement;
- apc->eth_stats.hc_tx_err_cqpdid_enforcement =
+ ac->hc_stats.hc_tx_err_cqpdid_enforcement =
resp.tx_err_CQPDID_enforcement;
- apc->eth_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation;
- apc->eth_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob;
- apc->eth_stats.hc_tx_bytes = resp.hc_tx_bytes;
- apc->eth_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts;
- apc->eth_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes;
- apc->eth_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts;
- apc->eth_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes;
- apc->eth_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts;
- apc->eth_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes;
- apc->eth_stats.hc_tx_err_gdma = resp.tx_err_gdma;
+ ac->hc_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation;
+ ac->hc_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob;
+ ac->hc_stats.hc_tx_bytes = resp.hc_tx_bytes;
+ ac->hc_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts;
+ ac->hc_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes;
+ ac->hc_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts;
+ ac->hc_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes;
+ ac->hc_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts;
+ ac->hc_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes;
+ ac->hc_stats.hc_tx_err_gdma = resp.tx_err_gdma;
+
+ return 0;
+}
+
+void mana_query_phy_stats(struct mana_port_context *apc)
+{
+ struct mana_query_phy_stat_resp resp = {};
+ struct mana_query_phy_stat_req req = {};
+ struct net_device *ndev = apc->ndev;
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_PHY_STAT,
+ sizeof(req), sizeof(resp));
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err)
+ return;
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_PHY_STAT,
+ sizeof(resp));
+ if (err || resp.hdr.status) {
+ netdev_err(ndev,
+ "Failed to query PHY stats: %d, resp:0x%x\n",
+ err, resp.hdr.status);
+ return;
+ }
+
+ /* Aggregate drop counters */
+ apc->phy_stats.rx_pkt_drop_phy = resp.rx_pkt_drop_phy;
+ apc->phy_stats.tx_pkt_drop_phy = resp.tx_pkt_drop_phy;
+
+ /* Per TC traffic Counters */
+ apc->phy_stats.rx_pkt_tc0_phy = resp.rx_pkt_tc0_phy;
+ apc->phy_stats.tx_pkt_tc0_phy = resp.tx_pkt_tc0_phy;
+ apc->phy_stats.rx_pkt_tc1_phy = resp.rx_pkt_tc1_phy;
+ apc->phy_stats.tx_pkt_tc1_phy = resp.tx_pkt_tc1_phy;
+ apc->phy_stats.rx_pkt_tc2_phy = resp.rx_pkt_tc2_phy;
+ apc->phy_stats.tx_pkt_tc2_phy = resp.tx_pkt_tc2_phy;
+ apc->phy_stats.rx_pkt_tc3_phy = resp.rx_pkt_tc3_phy;
+ apc->phy_stats.tx_pkt_tc3_phy = resp.tx_pkt_tc3_phy;
+ apc->phy_stats.rx_pkt_tc4_phy = resp.rx_pkt_tc4_phy;
+ apc->phy_stats.tx_pkt_tc4_phy = resp.tx_pkt_tc4_phy;
+ apc->phy_stats.rx_pkt_tc5_phy = resp.rx_pkt_tc5_phy;
+ apc->phy_stats.tx_pkt_tc5_phy = resp.tx_pkt_tc5_phy;
+ apc->phy_stats.rx_pkt_tc6_phy = resp.rx_pkt_tc6_phy;
+ apc->phy_stats.tx_pkt_tc6_phy = resp.tx_pkt_tc6_phy;
+ apc->phy_stats.rx_pkt_tc7_phy = resp.rx_pkt_tc7_phy;
+ apc->phy_stats.tx_pkt_tc7_phy = resp.tx_pkt_tc7_phy;
+
+ /* Per TC byte Counters */
+ apc->phy_stats.rx_byte_tc0_phy = resp.rx_byte_tc0_phy;
+ apc->phy_stats.tx_byte_tc0_phy = resp.tx_byte_tc0_phy;
+ apc->phy_stats.rx_byte_tc1_phy = resp.rx_byte_tc1_phy;
+ apc->phy_stats.tx_byte_tc1_phy = resp.tx_byte_tc1_phy;
+ apc->phy_stats.rx_byte_tc2_phy = resp.rx_byte_tc2_phy;
+ apc->phy_stats.tx_byte_tc2_phy = resp.tx_byte_tc2_phy;
+ apc->phy_stats.rx_byte_tc3_phy = resp.rx_byte_tc3_phy;
+ apc->phy_stats.tx_byte_tc3_phy = resp.tx_byte_tc3_phy;
+ apc->phy_stats.rx_byte_tc4_phy = resp.rx_byte_tc4_phy;
+ apc->phy_stats.tx_byte_tc4_phy = resp.tx_byte_tc4_phy;
+ apc->phy_stats.rx_byte_tc5_phy = resp.rx_byte_tc5_phy;
+ apc->phy_stats.tx_byte_tc5_phy = resp.tx_byte_tc5_phy;
+ apc->phy_stats.rx_byte_tc6_phy = resp.rx_byte_tc6_phy;
+ apc->phy_stats.tx_byte_tc6_phy = resp.tx_byte_tc6_phy;
+ apc->phy_stats.rx_byte_tc7_phy = resp.rx_byte_tc7_phy;
+ apc->phy_stats.tx_byte_tc7_phy = resp.tx_byte_tc7_phy;
+
+ /* Per TC pause Counters */
+ apc->phy_stats.rx_pause_tc0_phy = resp.rx_pause_tc0_phy;
+ apc->phy_stats.tx_pause_tc0_phy = resp.tx_pause_tc0_phy;
+ apc->phy_stats.rx_pause_tc1_phy = resp.rx_pause_tc1_phy;
+ apc->phy_stats.tx_pause_tc1_phy = resp.tx_pause_tc1_phy;
+ apc->phy_stats.rx_pause_tc2_phy = resp.rx_pause_tc2_phy;
+ apc->phy_stats.tx_pause_tc2_phy = resp.tx_pause_tc2_phy;
+ apc->phy_stats.rx_pause_tc3_phy = resp.rx_pause_tc3_phy;
+ apc->phy_stats.tx_pause_tc3_phy = resp.tx_pause_tc3_phy;
+ apc->phy_stats.rx_pause_tc4_phy = resp.rx_pause_tc4_phy;
+ apc->phy_stats.tx_pause_tc4_phy = resp.tx_pause_tc4_phy;
+ apc->phy_stats.rx_pause_tc5_phy = resp.rx_pause_tc5_phy;
+ apc->phy_stats.tx_pause_tc5_phy = resp.tx_pause_tc5_phy;
+ apc->phy_stats.rx_pause_tc6_phy = resp.rx_pause_tc6_phy;
+ apc->phy_stats.tx_pause_tc6_phy = resp.tx_pause_tc6_phy;
+ apc->phy_stats.rx_pause_tc7_phy = resp.rx_pause_tc7_phy;
+ apc->phy_stats.tx_pause_tc7_phy = resp.tx_pause_tc7_phy;
}
static int mana_init_port(struct net_device *ndev)
@@ -2660,12 +3068,18 @@ int mana_alloc_queues(struct net_device *ndev)
int err;
err = mana_create_vport(apc, ndev);
- if (err)
+ if (err) {
+ netdev_err(ndev, "Failed to create vPort %u : %d\n", apc->port_idx, err);
return err;
+ }
err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
- if (err)
+ if (err) {
+ netdev_err(ndev,
+ "netif_set_real_num_tx_queues () failed for ndev with num_queues %u : %d\n",
+ apc->num_queues, err);
goto destroy_vport;
+ }
err = mana_add_rx_queues(apc, ndev);
if (err)
@@ -2674,16 +3088,22 @@ int mana_alloc_queues(struct net_device *ndev)
apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
- if (err)
+ if (err) {
+ netdev_err(ndev,
+ "netif_set_real_num_rx_queues () failed for ndev with num_queues %u : %d\n",
+ apc->num_queues, err);
goto destroy_vport;
+ }
mana_rss_table_init(apc);
err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
- if (err)
+ if (err) {
+ netdev_err(ndev, "Failed to configure RSS table: %d\n", err);
goto destroy_vport;
+ }
- if (gd->gdma_context->is_pf) {
+ if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) {
err = mana_pf_register_filter(apc);
if (err)
goto destroy_vport;
@@ -2722,9 +3142,6 @@ int mana_attach(struct net_device *ndev)
/* Ensure port state updated before txq state */
smp_wmb();
- if (apc->port_is_up)
- netif_carrier_on(ndev);
-
netif_device_attach(ndev);
return 0;
@@ -2745,7 +3162,7 @@ static int mana_dealloc_queues(struct net_device *ndev)
mana_chn_setxdp(apc, NULL);
- if (gd->gdma_context->is_pf)
+ if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode)
mana_pf_deregister_filter(apc);
/* No packet can be transmitted now since apc->port_is_up is false.
@@ -2794,11 +3211,10 @@ static int mana_dealloc_queues(struct net_device *ndev)
apc->rss_state = TRI_STATE_FALSE;
err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
- if (err) {
+ if (err && mana_en_need_log(apc, err))
netdev_err(ndev, "Failed to disable vPort: %d\n", err);
- return err;
- }
+ /* Even in err case, still need to cleanup the vPort */
mana_destroy_vport(apc);
return 0;
@@ -2818,12 +3234,13 @@ int mana_detach(struct net_device *ndev, bool from_close)
smp_wmb();
netif_tx_disable(ndev);
- netif_carrier_off(ndev);
if (apc->port_st_save) {
err = mana_dealloc_queues(ndev);
- if (err)
+ if (err) {
+ netdev_err(ndev, "%s failed to deallocate queues: %d\n", __func__, err);
return err;
+ }
}
if (!from_close) {
@@ -2872,6 +3289,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
ndev->dev_port = port_idx;
SET_NETDEV_DEV(ndev, gc->dev);
+ netif_set_tso_max_size(ndev, GSO_MAX_SIZE);
+
netif_carrier_off(ndev);
netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
@@ -2903,6 +3322,10 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
goto free_indir;
}
+ netif_carrier_on(ndev);
+
+ debugfs_create_u32("current_speed", 0400, apc->mana_port_debugfs, &apc->speed);
+
return 0;
free_indir:
@@ -2935,7 +3358,7 @@ static void remove_adev(struct gdma_dev *gd)
gd->adev = NULL;
}
-static int add_adev(struct gdma_dev *gd)
+static int add_adev(struct gdma_dev *gd, const char *name)
{
struct auxiliary_device *adev;
struct mana_adev *madev;
@@ -2951,7 +3374,7 @@ static int add_adev(struct gdma_dev *gd)
goto idx_fail;
adev->id = ret;
- adev->name = "rdma";
+ adev->name = name;
adev->dev.parent = gd->gdma_context->dev;
adev->dev.release = adev_release;
madev->mdev = gd;
@@ -2967,6 +3390,8 @@ static int add_adev(struct gdma_dev *gd)
goto add_fail;
gd->adev = adev;
+ dev_dbg(gd->gdma_context->dev,
+ "Auxiliary device added successfully\n");
return 0;
add_fail:
@@ -2981,11 +3406,94 @@ idx_fail:
return ret;
}
+static void mana_rdma_service_handle(struct work_struct *work)
+{
+ struct mana_service_work *serv_work =
+ container_of(work, struct mana_service_work, work);
+ struct gdma_dev *gd = serv_work->gdma_dev;
+ struct device *dev = gd->gdma_context->dev;
+ int ret;
+
+ if (READ_ONCE(gd->rdma_teardown))
+ goto out;
+
+ switch (serv_work->event) {
+ case GDMA_SERVICE_TYPE_RDMA_SUSPEND:
+ if (!gd->adev || gd->is_suspended)
+ break;
+
+ remove_adev(gd);
+ gd->is_suspended = true;
+ break;
+
+ case GDMA_SERVICE_TYPE_RDMA_RESUME:
+ if (!gd->is_suspended)
+ break;
+
+ ret = add_adev(gd, "rdma");
+ if (ret)
+ dev_err(dev, "Failed to add adev on resume: %d\n", ret);
+ else
+ gd->is_suspended = false;
+ break;
+
+ default:
+ dev_warn(dev, "unknown adev service event %u\n",
+ serv_work->event);
+ break;
+ }
+
+out:
+ kfree(serv_work);
+}
+
+int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type event)
+{
+ struct gdma_dev *gd = &gc->mana_ib;
+ struct mana_service_work *serv_work;
+
+ if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
+ /* RDMA device is not detected on pci */
+ return 0;
+ }
+
+ serv_work = kzalloc(sizeof(*serv_work), GFP_ATOMIC);
+ if (!serv_work)
+ return -ENOMEM;
+
+ serv_work->event = event;
+ serv_work->gdma_dev = gd;
+
+ INIT_WORK(&serv_work->work, mana_rdma_service_handle);
+ queue_work(gc->service_wq, &serv_work->work);
+
+ return 0;
+}
+
+#define MANA_GF_STATS_PERIOD (2 * HZ)
+
+static void mana_gf_stats_work_handler(struct work_struct *work)
+{
+ struct mana_context *ac =
+ container_of(to_delayed_work(work), struct mana_context, gf_stats_work);
+ int err;
+
+ err = mana_query_gf_stats(ac);
+ if (err == -ETIMEDOUT) {
+ /* HWC timeout detected - reset stats and stop rescheduling */
+ ac->hwc_timeout_occurred = true;
+ memset(&ac->hc_stats, 0, sizeof(ac->hc_stats));
+ return;
+ }
+ schedule_delayed_work(&ac->gf_stats_work, MANA_GF_STATS_PERIOD);
+}
+
int mana_probe(struct gdma_dev *gd, bool resuming)
{
struct gdma_context *gc = gd->gdma_context;
struct mana_context *ac = gd->driver_data;
struct device *dev = gc->dev;
+ u8 bm_hostmode = 0;
u16 num_ports = 0;
int err;
int i;
@@ -3008,16 +3516,22 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
}
err = mana_create_eq(ac);
- if (err)
+ if (err) {
+ dev_err(dev, "Failed to create EQs: %d\n", err);
goto out;
+ }
err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
- MANA_MICRO_VERSION, &num_ports);
+ MANA_MICRO_VERSION, &num_ports, &bm_hostmode);
if (err)
goto out;
+ ac->bm_hostmode = bm_hostmode;
+
if (!resuming) {
ac->num_ports = num_ports;
+
+ INIT_WORK(&ac->link_change_work, mana_link_state_handle);
} else {
if (ac->num_ports != num_ports) {
dev_err(dev, "The number of vPorts changed: %d->%d\n",
@@ -3025,6 +3539,8 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
err = -EPROTO;
goto out;
}
+
+ enable_work(&ac->link_change_work);
}
if (ac->num_ports == 0)
@@ -3063,10 +3579,20 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
}
}
- err = add_adev(gd);
+ err = add_adev(gd, "eth");
+
+ INIT_DELAYED_WORK(&ac->gf_stats_work, mana_gf_stats_work_handler);
+ schedule_delayed_work(&ac->gf_stats_work, MANA_GF_STATS_PERIOD);
+
out:
- if (err)
+ if (err) {
mana_remove(gd, false);
+ } else {
+ dev_dbg(dev, "gd=%p, id=%u, num_ports=%d, type=%u, instance=%u\n",
+ gd, gd->dev_id.as_uint32, ac->num_ports,
+ gd->dev_id.type, gd->dev_id.instance);
+ dev_dbg(dev, "%s succeeded\n", __func__);
+ }
return err;
}
@@ -3081,6 +3607,9 @@ void mana_remove(struct gdma_dev *gd, bool suspending)
int err;
int i;
+ disable_work_sync(&ac->link_change_work);
+ cancel_delayed_work_sync(&ac->gf_stats_work);
+
/* adev currently doesn't support suspending, always remove it */
if (gd->adev)
remove_adev(gd);
@@ -3128,23 +3657,68 @@ out:
gd->driver_data = NULL;
gd->gdma_context = NULL;
kfree(ac);
+ dev_dbg(dev, "%s succeeded\n", __func__);
+}
+
+int mana_rdma_probe(struct gdma_dev *gd)
+{
+ int err = 0;
+
+ if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
+ /* RDMA device is not detected on pci */
+ return err;
+ }
+
+ err = mana_gd_register_device(gd);
+ if (err)
+ return err;
+
+ err = add_adev(gd, "rdma");
+ if (err)
+ mana_gd_deregister_device(gd);
+
+ return err;
+}
+
+void mana_rdma_remove(struct gdma_dev *gd)
+{
+ struct gdma_context *gc = gd->gdma_context;
+
+ if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
+ /* RDMA device is not detected on pci */
+ return;
+ }
+
+ WRITE_ONCE(gd->rdma_teardown, true);
+ flush_workqueue(gc->service_wq);
+
+ if (gd->adev)
+ remove_adev(gd);
+
+ mana_gd_deregister_device(gd);
}
-struct net_device *mana_get_primary_netdev_rcu(struct mana_context *ac, u32 port_index)
+struct net_device *mana_get_primary_netdev(struct mana_context *ac,
+ u32 port_index,
+ netdevice_tracker *tracker)
{
struct net_device *ndev;
- RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
- "Taking primary netdev without holding the RCU read lock");
if (port_index >= ac->num_ports)
return NULL;
- /* When mana is used in netvsc, the upper netdevice should be returned. */
- if (ac->ports[port_index]->flags & IFF_SLAVE)
- ndev = netdev_master_upper_dev_get_rcu(ac->ports[port_index]);
- else
+ rcu_read_lock();
+
+ /* If mana is used in netvsc, the upper netdevice should be returned. */
+ ndev = netdev_master_upper_dev_get_rcu(ac->ports[port_index]);
+
+ /* If there is no upper device, use the parent Ethernet device */
+ if (!ndev)
ndev = ac->ports[port_index];
+ netdev_hold(ndev, tracker, GFP_ATOMIC);
+ rcu_read_unlock();
+
return ndev;
}
-EXPORT_SYMBOL_NS(mana_get_primary_netdev_rcu, NET_MANA);
+EXPORT_SYMBOL_NS(mana_get_primary_netdev, "NET_MANA");
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index c419626073f5..0e2f4343ac67 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -7,72 +7,132 @@
#include <net/mana/mana.h>
-static const struct {
+struct mana_stats_desc {
char name[ETH_GSTRING_LEN];
u16 offset;
-} mana_eth_stats[] = {
+};
+
+static const struct mana_stats_desc mana_eth_stats[] = {
{"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
{"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
- {"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_stats,
+ {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)},
+ {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
+ tx_cqe_unknown_type)},
+ {"tx_linear_pkt_cnt", offsetof(struct mana_ethtool_stats,
+ tx_linear_pkt_cnt)},
+ {"rx_coalesced_err", offsetof(struct mana_ethtool_stats,
+ rx_coalesced_err)},
+ {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
+ rx_cqe_unknown_type)},
+};
+
+static const struct mana_stats_desc mana_hc_stats[] = {
+ {"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_hc_stats,
hc_rx_discards_no_wqe)},
- {"hc_rx_err_vport_disabled", offsetof(struct mana_ethtool_stats,
+ {"hc_rx_err_vport_disabled", offsetof(struct mana_ethtool_hc_stats,
hc_rx_err_vport_disabled)},
- {"hc_rx_bytes", offsetof(struct mana_ethtool_stats, hc_rx_bytes)},
- {"hc_rx_ucast_pkts", offsetof(struct mana_ethtool_stats,
+ {"hc_rx_bytes", offsetof(struct mana_ethtool_hc_stats, hc_rx_bytes)},
+ {"hc_rx_ucast_pkts", offsetof(struct mana_ethtool_hc_stats,
hc_rx_ucast_pkts)},
- {"hc_rx_ucast_bytes", offsetof(struct mana_ethtool_stats,
+ {"hc_rx_ucast_bytes", offsetof(struct mana_ethtool_hc_stats,
hc_rx_ucast_bytes)},
- {"hc_rx_bcast_pkts", offsetof(struct mana_ethtool_stats,
+ {"hc_rx_bcast_pkts", offsetof(struct mana_ethtool_hc_stats,
hc_rx_bcast_pkts)},
- {"hc_rx_bcast_bytes", offsetof(struct mana_ethtool_stats,
+ {"hc_rx_bcast_bytes", offsetof(struct mana_ethtool_hc_stats,
hc_rx_bcast_bytes)},
- {"hc_rx_mcast_pkts", offsetof(struct mana_ethtool_stats,
- hc_rx_mcast_pkts)},
- {"hc_rx_mcast_bytes", offsetof(struct mana_ethtool_stats,
+ {"hc_rx_mcast_pkts", offsetof(struct mana_ethtool_hc_stats,
+ hc_rx_mcast_pkts)},
+ {"hc_rx_mcast_bytes", offsetof(struct mana_ethtool_hc_stats,
hc_rx_mcast_bytes)},
- {"hc_tx_err_gf_disabled", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_err_gf_disabled", offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_gf_disabled)},
- {"hc_tx_err_vport_disabled", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_err_vport_disabled", offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_vport_disabled)},
{"hc_tx_err_inval_vportoffset_pkt",
- offsetof(struct mana_ethtool_stats,
+ offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_inval_vportoffset_pkt)},
- {"hc_tx_err_vlan_enforcement", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_err_vlan_enforcement", offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_vlan_enforcement)},
{"hc_tx_err_eth_type_enforcement",
- offsetof(struct mana_ethtool_stats, hc_tx_err_eth_type_enforcement)},
- {"hc_tx_err_sa_enforcement", offsetof(struct mana_ethtool_stats,
+ offsetof(struct mana_ethtool_hc_stats, hc_tx_err_eth_type_enforcement)},
+ {"hc_tx_err_sa_enforcement", offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_sa_enforcement)},
{"hc_tx_err_sqpdid_enforcement",
- offsetof(struct mana_ethtool_stats, hc_tx_err_sqpdid_enforcement)},
+ offsetof(struct mana_ethtool_hc_stats, hc_tx_err_sqpdid_enforcement)},
{"hc_tx_err_cqpdid_enforcement",
- offsetof(struct mana_ethtool_stats, hc_tx_err_cqpdid_enforcement)},
- {"hc_tx_err_mtu_violation", offsetof(struct mana_ethtool_stats,
+ offsetof(struct mana_ethtool_hc_stats, hc_tx_err_cqpdid_enforcement)},
+ {"hc_tx_err_mtu_violation", offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_mtu_violation)},
- {"hc_tx_err_inval_oob", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_err_inval_oob", offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_inval_oob)},
- {"hc_tx_err_gdma", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_err_gdma", offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_gdma)},
- {"hc_tx_bytes", offsetof(struct mana_ethtool_stats, hc_tx_bytes)},
- {"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_bytes", offsetof(struct mana_ethtool_hc_stats, hc_tx_bytes)},
+ {"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_hc_stats,
hc_tx_ucast_pkts)},
- {"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_hc_stats,
hc_tx_ucast_bytes)},
- {"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_hc_stats,
hc_tx_bcast_pkts)},
- {"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_hc_stats,
hc_tx_bcast_bytes)},
- {"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_hc_stats,
hc_tx_mcast_pkts)},
- {"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_hc_stats,
hc_tx_mcast_bytes)},
- {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)},
- {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
- tx_cqe_unknown_type)},
- {"rx_coalesced_err", offsetof(struct mana_ethtool_stats,
- rx_coalesced_err)},
- {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
- rx_cqe_unknown_type)},
+};
+
+static const struct mana_stats_desc mana_phy_stats[] = {
+ { "hc_rx_pkt_drop_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_drop_phy) },
+ { "hc_tx_pkt_drop_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_drop_phy) },
+ { "hc_tc0_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc0_phy) },
+ { "hc_tc0_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc0_phy) },
+ { "hc_tc0_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc0_phy) },
+ { "hc_tc0_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc0_phy) },
+ { "hc_tc1_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc1_phy) },
+ { "hc_tc1_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc1_phy) },
+ { "hc_tc1_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc1_phy) },
+ { "hc_tc1_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc1_phy) },
+ { "hc_tc2_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc2_phy) },
+ { "hc_tc2_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc2_phy) },
+ { "hc_tc2_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc2_phy) },
+ { "hc_tc2_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc2_phy) },
+ { "hc_tc3_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc3_phy) },
+ { "hc_tc3_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc3_phy) },
+ { "hc_tc3_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc3_phy) },
+ { "hc_tc3_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc3_phy) },
+ { "hc_tc4_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc4_phy) },
+ { "hc_tc4_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc4_phy) },
+ { "hc_tc4_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc4_phy) },
+ { "hc_tc4_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc4_phy) },
+ { "hc_tc5_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc5_phy) },
+ { "hc_tc5_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc5_phy) },
+ { "hc_tc5_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc5_phy) },
+ { "hc_tc5_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc5_phy) },
+ { "hc_tc6_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc6_phy) },
+ { "hc_tc6_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc6_phy) },
+ { "hc_tc6_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc6_phy) },
+ { "hc_tc6_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc6_phy) },
+ { "hc_tc7_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc7_phy) },
+ { "hc_tc7_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc7_phy) },
+ { "hc_tc7_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc7_phy) },
+ { "hc_tc7_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc7_phy) },
+ { "hc_tc0_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc0_phy) },
+ { "hc_tc0_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc0_phy) },
+ { "hc_tc1_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc1_phy) },
+ { "hc_tc1_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc1_phy) },
+ { "hc_tc2_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc2_phy) },
+ { "hc_tc2_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc2_phy) },
+ { "hc_tc3_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc3_phy) },
+ { "hc_tc3_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc3_phy) },
+ { "hc_tc4_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc4_phy) },
+ { "hc_tc4_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc4_phy) },
+ { "hc_tc5_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc5_phy) },
+ { "hc_tc5_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc5_phy) },
+ { "hc_tc6_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc6_phy) },
+ { "hc_tc6_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc6_phy) },
+ { "hc_tc7_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc7_phy) },
+ { "hc_tc7_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc7_phy) },
};
static int mana_get_sset_count(struct net_device *ndev, int stringset)
@@ -83,8 +143,8 @@ static int mana_get_sset_count(struct net_device *ndev, int stringset)
if (stringset != ETH_SS_STATS)
return -EINVAL;
- return ARRAY_SIZE(mana_eth_stats) + num_queues *
- (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT);
+ return ARRAY_SIZE(mana_eth_stats) + ARRAY_SIZE(mana_phy_stats) + ARRAY_SIZE(mana_hc_stats) +
+ num_queues * (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT);
}
static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
@@ -95,10 +155,15 @@ static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
if (stringset != ETH_SS_STATS)
return;
-
for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++)
ethtool_puts(&data, mana_eth_stats[i].name);
+ for (i = 0; i < ARRAY_SIZE(mana_hc_stats); i++)
+ ethtool_puts(&data, mana_hc_stats[i].name);
+
+ for (i = 0; i < ARRAY_SIZE(mana_phy_stats); i++)
+ ethtool_puts(&data, mana_phy_stats[i].name);
+
for (i = 0; i < num_queues; i++) {
ethtool_sprintf(&data, "rx_%d_packets", i);
ethtool_sprintf(&data, "rx_%d_bytes", i);
@@ -128,6 +193,8 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
struct mana_port_context *apc = netdev_priv(ndev);
unsigned int num_queues = apc->num_queues;
void *eth_stats = &apc->eth_stats;
+ void *hc_stats = &apc->ac->hc_stats;
+ void *phy_stats = &apc->phy_stats;
struct mana_stats_rx *rx_stats;
struct mana_stats_tx *tx_stats;
unsigned int start;
@@ -148,12 +215,22 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
if (!apc->port_is_up)
return;
- /* we call mana function to update stats from GDMA */
- mana_query_gf_stats(apc);
+
+ /* We call this mana function to get the phy stats from GDMA and includes
+ * aggregate tx/rx drop counters, Per-TC(Traffic Channel) tx/rx and pause
+ * counters.
+ */
+ mana_query_phy_stats(apc);
for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++)
data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset);
+ for (q = 0; q < ARRAY_SIZE(mana_hc_stats); q++)
+ data[i++] = *(u64 *)(hc_stats + mana_hc_stats[q].offset);
+
+ for (q = 0; q < ARRAY_SIZE(mana_phy_stats); q++)
+ data[i++] = *(u64 *)(phy_stats + mana_phy_stats[q].offset);
+
for (q = 0; q < num_queues; q++) {
rx_stats = &apc->rxqs[q]->stats;
@@ -427,6 +504,12 @@ out:
static int mana_get_link_ksettings(struct net_device *ndev,
struct ethtool_link_ksettings *cmd)
{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ int err;
+
+ err = mana_query_link_cfg(apc);
+ cmd->base.speed = (err) ? SPEED_UNKNOWN : apc->max_speed;
+
cmd->base.duplex = DUPLEX_FULL;
cmd->base.port = PORT_OTHER;
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 3d72aa7b1305..08bee56aea35 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -830,6 +830,7 @@ EXPORT_SYMBOL(ocelot_vlan_prepare);
int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
bool untagged)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
int err;
/* Ignore VID 0 added to our RX filter by the 8021q module, since
@@ -849,6 +850,11 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
ocelot_bridge_vlan_find(ocelot, vid));
if (err)
return err;
+ } else if (ocelot_port->pvid_vlan &&
+ ocelot_bridge_vlan_find(ocelot, vid) == ocelot_port->pvid_vlan) {
+ err = ocelot_port_set_pvid(ocelot, port, NULL);
+ if (err)
+ return err;
}
/* Untagged egress vlan clasification */
@@ -1432,7 +1438,7 @@ void ocelot_ifh_set_basic(void *ifh, struct ocelot *ocelot, int port,
memset(ifh, 0, OCELOT_TAG_LEN);
ocelot_ifh_set_bypass(ifh, 1);
- ocelot_ifh_set_src(ifh, BIT_ULL(ocelot->num_phys_ports));
+ ocelot_ifh_set_src(ifh, ocelot->num_phys_ports);
ocelot_ifh_set_dest(ifh, BIT_ULL(port));
ocelot_ifh_set_qos_class(ifh, qos_class);
ocelot_ifh_set_tag_type(ifh, tag_type);
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 7c9540a71725..469784d3a1a6 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -730,7 +730,7 @@ static void ocelot_get_stats64(struct net_device *dev,
static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr,
- u16 vid, u16 flags,
+ u16 vid, u16 flags, bool *notified,
struct netlink_ext_ack *extack)
{
struct ocelot_port_private *priv = netdev_priv(dev);
@@ -744,7 +744,7 @@ static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
static int ocelot_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
- struct netlink_ext_ack *extack)
+ bool *notified, struct netlink_ext_ack *extack)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
@@ -758,12 +758,13 @@ static int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
bool is_static, void *data)
{
struct ocelot_dump_ctx *dump = data;
+ struct ndo_fdb_dump_context *ctx = (void *)dump->cb->ctx;
u32 portid = NETLINK_CB(dump->cb->skb).portid;
u32 seq = dump->cb->nlh->nlmsg_seq;
struct nlmsghdr *nlh;
struct ndmsg *ndm;
- if (dump->idx < dump->cb->args[2])
+ if (dump->idx < ctx->fdb_idx)
goto skip;
nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
@@ -869,23 +870,30 @@ static int ocelot_set_features(struct net_device *dev,
static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
+}
+
+static int ocelot_port_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
+{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->port.index;
- /* If the attached PHY device isn't capable of timestamping operations,
- * use our own (when possible).
- */
- if (!phy_has_hwtstamp(dev->phydev) && ocelot->ptp) {
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return ocelot_hwstamp_set(ocelot, port, ifr);
- case SIOCGHWTSTAMP:
- return ocelot_hwstamp_get(ocelot, port, ifr);
- }
- }
+ ocelot_hwstamp_get(ocelot, port, cfg);
- return phy_mii_ioctl(dev->phydev, ifr, cmd);
+ return 0;
+}
+
+static int ocelot_port_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->port.index;
+
+ return ocelot_hwstamp_set(ocelot, port, cfg, extack);
}
static int ocelot_change_mtu(struct net_device *dev, int new_mtu)
@@ -916,6 +924,8 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_set_features = ocelot_set_features,
.ndo_setup_tc = ocelot_setup_tc,
.ndo_eth_ioctl = ocelot_ioctl,
+ .ndo_hwtstamp_get = ocelot_port_hwtstamp_get,
+ .ndo_hwtstamp_set = ocelot_port_hwtstamp_set,
};
struct net_device *ocelot_port_to_netdev(struct ocelot *ocelot, int port)
@@ -992,6 +1002,16 @@ static int ocelot_port_get_ts_info(struct net_device *dev,
return ocelot_get_ts_info(ocelot, port, info);
}
+static void ocelot_port_ts_stats(struct net_device *dev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->port.index;
+
+ ocelot_port_get_ts_stats(ocelot, port, ts_stats);
+}
+
static const struct ethtool_ops ocelot_ethtool_ops = {
.get_strings = ocelot_port_get_strings,
.get_ethtool_stats = ocelot_port_get_ethtool_stats,
@@ -999,6 +1019,7 @@ static const struct ethtool_ops ocelot_ethtool_ops = {
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_ts_info = ocelot_port_get_ts_info,
+ .get_ts_stats = ocelot_port_ts_stats,
};
static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port,
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
index e172638b0601..88b5422cc2a0 100644
--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -14,6 +14,8 @@
#include <soc/mscc/ocelot.h>
#include "ocelot.h"
+#define OCELOT_PTP_TX_TSTAMP_TIMEOUT (5 * HZ)
+
int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
@@ -209,11 +211,6 @@ int ocelot_ptp_enable(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
- /* Reject requests with unsupported flags */
- if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE |
- PTP_PEROUT_PHASE))
- return -EOPNOTSUPP;
-
pin = ptp_find_pin(ocelot->ptp_clock, PTP_PF_PEROUT,
rq->perout.index);
if (pin == 0)
@@ -495,58 +492,64 @@ static int ocelot_traps_to_ptp_rx_filter(unsigned int proto)
return HWTSTAMP_FILTER_NONE;
}
-int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr)
+static int ocelot_ptp_tx_type_to_cmd(int tx_type, int *ptp_cmd)
+{
+ switch (tx_type) {
+ case HWTSTAMP_TX_ON:
+ *ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ /* IFH_REW_OP_ONE_STEP_PTP updates the correctionField,
+ * what we need to update is the originTimestamp.
+ */
+ *ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
+ break;
+ case HWTSTAMP_TX_OFF:
+ *ptp_cmd = 0;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+void ocelot_hwstamp_get(struct ocelot *ocelot, int port,
+ struct kernel_hwtstamp_config *cfg)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- struct hwtstamp_config cfg = {};
switch (ocelot_port->ptp_cmd) {
case IFH_REW_OP_TWO_STEP_PTP:
- cfg.tx_type = HWTSTAMP_TX_ON;
+ cfg->tx_type = HWTSTAMP_TX_ON;
break;
case IFH_REW_OP_ORIGIN_PTP:
- cfg.tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
+ cfg->tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
break;
default:
- cfg.tx_type = HWTSTAMP_TX_OFF;
+ cfg->tx_type = HWTSTAMP_TX_OFF;
break;
}
- cfg.rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto);
-
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ cfg->rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto);
}
EXPORT_SYMBOL(ocelot_hwstamp_get);
-int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
+int ocelot_hwstamp_set(struct ocelot *ocelot, int port,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
bool l2 = false, l4 = false;
- struct hwtstamp_config cfg;
+ int ptp_cmd;
int err;
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
-
/* Tx type sanity check */
- switch (cfg.tx_type) {
- case HWTSTAMP_TX_ON:
- ocelot_port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
- break;
- case HWTSTAMP_TX_ONESTEP_SYNC:
- /* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we
- * need to update the origin time.
- */
- ocelot_port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
- break;
- case HWTSTAMP_TX_OFF:
- ocelot_port->ptp_cmd = 0;
- break;
- default:
- return -ERANGE;
- }
+ err = ocelot_ptp_tx_type_to_cmd(cfg->tx_type, &ptp_cmd);
+ if (err)
+ return err;
- switch (cfg.rx_filter) {
+ switch (cfg->rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -573,9 +576,11 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
if (err)
return err;
- cfg.rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto);
+ ocelot_port->ptp_cmd = ptp_cmd;
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ cfg->rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto);
+
+ return 0;
}
EXPORT_SYMBOL(ocelot_hwstamp_set);
@@ -603,34 +608,92 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL(ocelot_get_ts_info);
-static int ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
- struct sk_buff *clone)
+static struct sk_buff *ocelot_port_dequeue_ptp_tx_skb(struct ocelot *ocelot,
+ int port, u8 ts_id,
+ u32 seqid)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- unsigned long flags;
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ struct ptp_header *hdr;
- spin_lock_irqsave(&ocelot->ts_id_lock, flags);
+ spin_lock(&ocelot->ts_id_lock);
- if (ocelot_port->ptp_skbs_in_flight == OCELOT_MAX_PTP_ID ||
- ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) {
- spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
- return -EBUSY;
+ skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) {
+ if (OCELOT_SKB_CB(skb)->ts_id != ts_id)
+ continue;
+
+ /* Check that the timestamp ID is for the expected PTP
+ * sequenceId. We don't have to test ptp_parse_header() against
+ * NULL, because we've pre-validated the packet's ptp_class.
+ */
+ hdr = ptp_parse_header(skb, OCELOT_SKB_CB(skb)->ptp_class);
+ if (seqid != ntohs(hdr->sequence_id))
+ continue;
+
+ __skb_unlink(skb, &ocelot_port->tx_skbs);
+ ocelot->ptp_skbs_in_flight--;
+ skb_match = skb;
+ break;
}
- skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
- /* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */
- OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id;
+ spin_unlock(&ocelot->ts_id_lock);
- ocelot_port->ts_id++;
- if (ocelot_port->ts_id == OCELOT_MAX_PTP_ID)
- ocelot_port->ts_id = 0;
+ return skb_match;
+}
- ocelot_port->ptp_skbs_in_flight++;
+static int ocelot_port_queue_ptp_tx_skb(struct ocelot *ocelot, int port,
+ struct sk_buff *clone)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ DECLARE_BITMAP(ts_id_in_flight, OCELOT_MAX_PTP_ID);
+ struct sk_buff *skb, *skb_tmp;
+ unsigned long n;
+
+ spin_lock(&ocelot->ts_id_lock);
+
+ /* To get a better chance of acquiring a timestamp ID, first flush the
+ * stale packets still waiting in the TX timestamping queue. They are
+ * probably lost.
+ */
+ skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) {
+ if (time_before(OCELOT_SKB_CB(skb)->ptp_tx_time +
+ OCELOT_PTP_TX_TSTAMP_TIMEOUT, jiffies)) {
+ u64_stats_update_begin(&ocelot_port->ts_stats->syncp);
+ ocelot_port->ts_stats->lost++;
+ u64_stats_update_end(&ocelot_port->ts_stats->syncp);
+
+ dev_dbg_ratelimited(ocelot->dev,
+ "port %d invalidating stale timestamp ID %u which seems lost\n",
+ port, OCELOT_SKB_CB(skb)->ts_id);
+
+ __skb_unlink(skb, &ocelot_port->tx_skbs);
+ kfree_skb(skb);
+ ocelot->ptp_skbs_in_flight--;
+ } else {
+ __set_bit(OCELOT_SKB_CB(skb)->ts_id, ts_id_in_flight);
+ }
+ }
+
+ if (ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) {
+ spin_unlock(&ocelot->ts_id_lock);
+ return -EBUSY;
+ }
+
+ n = find_first_zero_bit(ts_id_in_flight, OCELOT_MAX_PTP_ID);
+ if (n == OCELOT_MAX_PTP_ID) {
+ spin_unlock(&ocelot->ts_id_lock);
+ return -EBUSY;
+ }
+
+ /* Found an available timestamp ID, use it */
+ OCELOT_SKB_CB(clone)->ts_id = n;
+ OCELOT_SKB_CB(clone)->ptp_tx_time = jiffies;
ocelot->ptp_skbs_in_flight++;
+ __skb_queue_tail(&ocelot_port->tx_skbs, clone);
- skb_queue_tail(&ocelot_port->tx_skbs, clone);
+ spin_unlock(&ocelot->ts_id_lock);
- spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
+ dev_dbg_ratelimited(ocelot->dev, "port %d timestamp id %lu\n", port, n);
return 0;
}
@@ -668,13 +731,20 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
return 0;
ptp_class = ptp_classify_raw(skb);
- if (ptp_class == PTP_CLASS_NONE)
- return -EINVAL;
+ if (ptp_class == PTP_CLASS_NONE) {
+ err = -EINVAL;
+ goto error;
+ }
/* Store ptp_cmd in OCELOT_SKB_CB(skb)->ptp_cmd */
if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
if (ocelot_ptp_is_onestep_sync(skb, ptp_class)) {
OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
+
+ u64_stats_update_begin(&ocelot_port->ts_stats->syncp);
+ ocelot_port->ts_stats->onestep_pkts_unconfirmed++;
+ u64_stats_update_end(&ocelot_port->ts_stats->syncp);
+
return 0;
}
@@ -684,18 +754,30 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
*clone = skb_clone_sk(skb);
- if (!(*clone))
- return -ENOMEM;
+ if (!(*clone)) {
+ err = -ENOMEM;
+ goto error;
+ }
- err = ocelot_port_add_txtstamp_skb(ocelot, port, *clone);
- if (err)
- return err;
+ /* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */
+ err = ocelot_port_queue_ptp_tx_skb(ocelot, port, *clone);
+ if (err) {
+ kfree_skb(*clone);
+ goto error;
+ }
+ skb_shinfo(*clone)->tx_flags |= SKBTX_IN_PROGRESS;
OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
OCELOT_SKB_CB(*clone)->ptp_class = ptp_class;
}
return 0;
+
+error:
+ u64_stats_update_begin(&ocelot_port->ts_stats->syncp);
+ ocelot_port->ts_stats->err++;
+ u64_stats_update_end(&ocelot_port->ts_stats->syncp);
+ return err;
}
EXPORT_SYMBOL(ocelot_port_txtstamp_request);
@@ -726,28 +808,16 @@ static void ocelot_get_hwtimestamp(struct ocelot *ocelot,
spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
}
-static bool ocelot_validate_ptp_skb(struct sk_buff *clone, u16 seqid)
-{
- struct ptp_header *hdr;
-
- hdr = ptp_parse_header(clone, OCELOT_SKB_CB(clone)->ptp_class);
- if (WARN_ON(!hdr))
- return false;
-
- return seqid == ntohs(hdr->sequence_id);
-}
-
void ocelot_get_txtstamp(struct ocelot *ocelot)
{
int budget = OCELOT_PTP_QUEUE_SZ;
while (budget--) {
- struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
struct skb_shared_hwtstamps shhwtstamps;
+ struct ocelot_port *ocelot_port;
u32 val, id, seqid, txport;
- struct ocelot_port *port;
+ struct sk_buff *skb_match;
struct timespec64 ts;
- unsigned long flags;
val = ocelot_read(ocelot, SYS_PTP_STATUS);
@@ -761,38 +831,26 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val);
-
- port = ocelot->ports[txport];
-
- spin_lock(&ocelot->ts_id_lock);
- port->ptp_skbs_in_flight--;
- ocelot->ptp_skbs_in_flight--;
- spin_unlock(&ocelot->ts_id_lock);
+ ocelot_port = ocelot->ports[txport];
/* Retrieve its associated skb */
-try_again:
- spin_lock_irqsave(&port->tx_skbs.lock, flags);
-
- skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
- if (OCELOT_SKB_CB(skb)->ts_id != id)
- continue;
- __skb_unlink(skb, &port->tx_skbs);
- skb_match = skb;
- break;
+ skb_match = ocelot_port_dequeue_ptp_tx_skb(ocelot, txport, id,
+ seqid);
+ if (!skb_match) {
+ u64_stats_update_begin(&ocelot_port->ts_stats->syncp);
+ ocelot_port->ts_stats->err++;
+ u64_stats_update_end(&ocelot_port->ts_stats->syncp);
+
+ dev_dbg_ratelimited(ocelot->dev,
+ "port %d received TX timestamp (seqid %d, ts id %u) for packet previously declared stale\n",
+ txport, seqid, id);
+
+ goto next_ts;
}
- spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
-
- if (WARN_ON(!skb_match))
- continue;
-
- if (!ocelot_validate_ptp_skb(skb_match, seqid)) {
- dev_err_ratelimited(ocelot->dev,
- "port %d received stale TX timestamp for seqid %d, discarding\n",
- txport, seqid);
- dev_kfree_skb_any(skb);
- goto try_again;
- }
+ u64_stats_update_begin(&ocelot_port->ts_stats->syncp);
+ ocelot_port->ts_stats->pkts++;
+ u64_stats_update_end(&ocelot_port->ts_stats->syncp);
/* Get the h/w timestamp */
ocelot_get_hwtimestamp(ocelot, &ts);
@@ -802,7 +860,7 @@ try_again:
shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
skb_complete_tx_timestamp(skb_match, &shhwtstamps);
- /* Next ts */
+next_ts:
ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
}
}
diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c
index c018783757fb..d2be1be37716 100644
--- a/drivers/net/ethernet/mscc/ocelot_stats.c
+++ b/drivers/net/ethernet/mscc/ocelot_stats.c
@@ -821,6 +821,26 @@ void ocelot_port_get_eth_phy_stats(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL_GPL(ocelot_port_get_eth_phy_stats);
+void ocelot_port_get_ts_stats(struct ocelot *ocelot, int port,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_ts_stats *stats = ocelot_port->ts_stats;
+ unsigned int start;
+
+ if (!ocelot->ptp)
+ return;
+
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ ts_stats->pkts = stats->pkts;
+ ts_stats->onestep_pkts_unconfirmed = stats->onestep_pkts_unconfirmed;
+ ts_stats->lost = stats->lost;
+ ts_stats->err = stats->err;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_ts_stats);
+
void ocelot_port_get_stats64(struct ocelot *ocelot, int port,
struct rtnl_link_stats64 *stats)
{
@@ -960,6 +980,23 @@ int ocelot_stats_init(struct ocelot *ocelot)
if (!ocelot->stats)
return -ENOMEM;
+ if (ocelot->ptp) {
+ for (int port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (!ocelot_port)
+ continue;
+
+ ocelot_port->ts_stats = devm_kzalloc(ocelot->dev,
+ sizeof(*ocelot_port->ts_stats),
+ GFP_KERNEL);
+ if (!ocelot_port->ts_stats)
+ return -ENOMEM;
+
+ u64_stats_init(&ocelot_port->ts_stats->syncp);
+ }
+ }
+
snprintf(queue_name, sizeof(queue_name), "%s-stats",
dev_name(ocelot->dev));
ocelot->stats_queue = create_singlethread_workqueue(queue_name);
@@ -984,6 +1021,6 @@ int ocelot_stats_init(struct ocelot *ocelot)
void ocelot_stats_deinit(struct ocelot *ocelot)
{
- cancel_delayed_work(&ocelot->stats_work);
+ disable_delayed_work_sync(&ocelot->stats_work);
destroy_workqueue(ocelot->stats_queue);
}
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 055b55651a49..498eec8ae61d 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -108,6 +108,8 @@ static struct ptp_clock_info ocelot_ptp_clock_info = {
.n_ext_ts = 0,
.n_per_out = OCELOT_PTP_PINS_NUM,
.n_pins = OCELOT_PTP_PINS_NUM,
+ .supported_perout_flags = PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE,
.pps = 0,
.gettime64 = ocelot_ptp_gettime64,
.settime64 = ocelot_ptp_settime64,
diff --git a/drivers/net/ethernet/mucse/Kconfig b/drivers/net/ethernet/mucse/Kconfig
new file mode 100644
index 000000000000..0b3e853d625f
--- /dev/null
+++ b/drivers/net/ethernet/mucse/Kconfig
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Mucse network device configuration
+#
+
+config NET_VENDOR_MUCSE
+ bool "Mucse devices"
+ default y
+ help
+ If you have a network (Ethernet) card from Mucse(R), say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Mucse(R) cards. If you say Y, you will
+ be asked for your specific card in the following questions.
+
+if NET_VENDOR_MUCSE
+
+config MGBE
+ tristate "Mucse(R) 1GbE PCI Express adapters support"
+ depends on PCI
+ help
+ This driver supports Mucse(R) 1GbE PCI Express family of
+ adapters.
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/device_drivers/ethernet/mucse/rnpgbe.rst>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called rnpgbe.
+
+endif # NET_VENDOR_MUCSE
+
diff --git a/drivers/net/ethernet/mucse/Makefile b/drivers/net/ethernet/mucse/Makefile
new file mode 100644
index 000000000000..675173fa05f7
--- /dev/null
+++ b/drivers/net/ethernet/mucse/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright(c) 2020 - 2025 MUCSE Corporation.
+#
+# Makefile for the MUCSE(R) network device drivers
+#
+
+obj-$(CONFIG_MGBE) += rnpgbe/
diff --git a/drivers/net/ethernet/mucse/rnpgbe/Makefile b/drivers/net/ethernet/mucse/rnpgbe/Makefile
new file mode 100644
index 000000000000..de8bcb7772ab
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright(c) 2020 - 2025 MUCSE Corporation.
+#
+# Makefile for the MUCSE(R) 1GbE PCI Express ethernet driver
+#
+
+obj-$(CONFIG_MGBE) += rnpgbe.o
+rnpgbe-objs := rnpgbe_main.o\
+ rnpgbe_chip.o\
+ rnpgbe_mbx.o\
+ rnpgbe_mbx_fw.o
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h
new file mode 100644
index 000000000000..5b024f9f7e17
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2020 - 2025 Mucse Corporation. */
+
+#ifndef _RNPGBE_H
+#define _RNPGBE_H
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+
+enum rnpgbe_boards {
+ board_n500,
+ board_n210
+};
+
+struct mucse_mbx_info {
+ u32 timeout_us;
+ u32 delay_us;
+ u16 fw_req;
+ u16 fw_ack;
+ /* lock for only one use mbx */
+ struct mutex lock;
+ /* fw <--> pf mbx */
+ u32 fwpf_shm_base;
+ u32 pf2fw_mbx_ctrl;
+ u32 fwpf_mbx_mask;
+ u32 fwpf_ctrl_base;
+};
+
+/* Enum for firmware notification modes,
+ * more modes (e.g., portup, link_report) will be added in future
+ **/
+enum {
+ mucse_fw_powerup,
+};
+
+struct mucse_hw {
+ void __iomem *hw_addr;
+ struct pci_dev *pdev;
+ struct mucse_mbx_info mbx;
+ int port;
+ u8 pfvfnum;
+};
+
+struct mucse_stats {
+ u64 tx_dropped;
+};
+
+struct mucse {
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct mucse_hw hw;
+ struct mucse_stats stats;
+};
+
+int rnpgbe_get_permanent_mac(struct mucse_hw *hw, u8 *perm_addr);
+int rnpgbe_reset_hw(struct mucse_hw *hw);
+int rnpgbe_send_notify(struct mucse_hw *hw,
+ bool enable,
+ int mode);
+int rnpgbe_init_hw(struct mucse_hw *hw, int board_type);
+
+/* Device IDs */
+#define PCI_VENDOR_ID_MUCSE 0x8848
+#define RNPGBE_DEVICE_ID_N500_QUAD_PORT 0x8308
+#define RNPGBE_DEVICE_ID_N500_DUAL_PORT 0x8318
+#define RNPGBE_DEVICE_ID_N210 0x8208
+#define RNPGBE_DEVICE_ID_N210L 0x820a
+
+#define mucse_hw_wr32(hw, reg, val) \
+ writel((val), (hw)->hw_addr + (reg))
+#endif /* _RNPGBE_H */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c
new file mode 100644
index 000000000000..ebc7b3750157
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2020 - 2025 Mucse Corporation. */
+
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+
+#include "rnpgbe.h"
+#include "rnpgbe_hw.h"
+#include "rnpgbe_mbx.h"
+#include "rnpgbe_mbx_fw.h"
+
+/**
+ * rnpgbe_get_permanent_mac - Get permanent mac
+ * @hw: hw information structure
+ * @perm_addr: pointer to store perm_addr
+ *
+ * rnpgbe_get_permanent_mac tries to get mac from hw
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+int rnpgbe_get_permanent_mac(struct mucse_hw *hw, u8 *perm_addr)
+{
+ struct device *dev = &hw->pdev->dev;
+ int err;
+
+ err = mucse_mbx_get_macaddr(hw, hw->pfvfnum, perm_addr, hw->port);
+ if (err) {
+ dev_err(dev, "Failed to get MAC from FW %d\n", err);
+ return err;
+ }
+
+ if (!is_valid_ether_addr(perm_addr)) {
+ dev_err(dev, "Failed to get valid MAC from FW\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * rnpgbe_reset_hw - Do a hardware reset
+ * @hw: hw information structure
+ *
+ * rnpgbe_reset_hw calls fw to do a hardware
+ * reset, and cleans some regs to default.
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+int rnpgbe_reset_hw(struct mucse_hw *hw)
+{
+ mucse_hw_wr32(hw, RNPGBE_DMA_AXI_EN, 0);
+ return mucse_mbx_reset_hw(hw);
+}
+
+/**
+ * rnpgbe_send_notify - Echo fw status
+ * @hw: hw information structure
+ * @enable: true or false status
+ * @mode: status mode
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+int rnpgbe_send_notify(struct mucse_hw *hw,
+ bool enable,
+ int mode)
+{
+ int err;
+ /* Keep switch struct to support more modes in the future */
+ switch (mode) {
+ case mucse_fw_powerup:
+ err = mucse_mbx_powerup(hw, enable);
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+/**
+ * rnpgbe_init_n500 - Setup n500 hw info
+ * @hw: hw information structure
+ *
+ * rnpgbe_init_n500 initializes all private
+ * structure for n500
+ **/
+static void rnpgbe_init_n500(struct mucse_hw *hw)
+{
+ struct mucse_mbx_info *mbx = &hw->mbx;
+
+ mbx->fwpf_ctrl_base = MUCSE_N500_FWPF_CTRL_BASE;
+ mbx->fwpf_shm_base = MUCSE_N500_FWPF_SHM_BASE;
+}
+
+/**
+ * rnpgbe_init_n210 - Setup n210 hw info
+ * @hw: hw information structure
+ *
+ * rnpgbe_init_n210 initializes all private
+ * structure for n210
+ **/
+static void rnpgbe_init_n210(struct mucse_hw *hw)
+{
+ struct mucse_mbx_info *mbx = &hw->mbx;
+
+ mbx->fwpf_ctrl_base = MUCSE_N210_FWPF_CTRL_BASE;
+ mbx->fwpf_shm_base = MUCSE_N210_FWPF_SHM_BASE;
+}
+
+/**
+ * rnpgbe_init_hw - Setup hw info according to board_type
+ * @hw: hw information structure
+ * @board_type: board type
+ *
+ * rnpgbe_init_hw initializes all hw data
+ *
+ * Return: 0 on success, -EINVAL on failure
+ **/
+int rnpgbe_init_hw(struct mucse_hw *hw, int board_type)
+{
+ struct mucse_mbx_info *mbx = &hw->mbx;
+
+ hw->port = 0;
+
+ mbx->pf2fw_mbx_ctrl = MUCSE_GBE_PFFW_MBX_CTRL_OFFSET;
+ mbx->fwpf_mbx_mask = MUCSE_GBE_FWPF_MBX_MASK_OFFSET;
+
+ switch (board_type) {
+ case board_n500:
+ rnpgbe_init_n500(hw);
+ break;
+ case board_n210:
+ rnpgbe_init_n210(hw);
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* init_params with mbx base */
+ mucse_init_mbx_params_pf(hw);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h
new file mode 100644
index 000000000000..e77e6bc3d3e3
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2020 - 2025 Mucse Corporation. */
+
+#ifndef _RNPGBE_HW_H
+#define _RNPGBE_HW_H
+
+#define MUCSE_N500_FWPF_CTRL_BASE 0x28b00
+#define MUCSE_N500_FWPF_SHM_BASE 0x2d000
+#define MUCSE_GBE_PFFW_MBX_CTRL_OFFSET 0x5500
+#define MUCSE_GBE_FWPF_MBX_MASK_OFFSET 0x5700
+#define MUCSE_N210_FWPF_CTRL_BASE 0x29400
+#define MUCSE_N210_FWPF_SHM_BASE 0x2d900
+
+#define RNPGBE_DMA_AXI_EN 0x0010
+
+#define RNPGBE_MAX_QUEUES 8
+#endif /* _RNPGBE_HW_H */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c
new file mode 100644
index 000000000000..316f941629d4
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2020 - 2025 Mucse Corporation. */
+
+#include <linux/pci.h>
+#include <net/rtnetlink.h>
+#include <linux/etherdevice.h>
+
+#include "rnpgbe.h"
+#include "rnpgbe_hw.h"
+#include "rnpgbe_mbx_fw.h"
+
+static const char rnpgbe_driver_name[] = "rnpgbe";
+
+/* rnpgbe_pci_tbl - PCI Device ID Table
+ *
+ * { PCI_VDEVICE(Vendor ID, Device ID),
+ * private_data (used for different hw chip) }
+ */
+static struct pci_device_id rnpgbe_pci_tbl[] = {
+ { PCI_VDEVICE(MUCSE, RNPGBE_DEVICE_ID_N210), board_n210 },
+ { PCI_VDEVICE(MUCSE, RNPGBE_DEVICE_ID_N210L), board_n210 },
+ { PCI_VDEVICE(MUCSE, RNPGBE_DEVICE_ID_N500_DUAL_PORT), board_n500 },
+ { PCI_VDEVICE(MUCSE, RNPGBE_DEVICE_ID_N500_QUAD_PORT), board_n500 },
+ /* required last entry */
+ {0, },
+};
+
+/**
+ * rnpgbe_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP).
+ *
+ * Return: 0
+ **/
+static int rnpgbe_open(struct net_device *netdev)
+{
+ return 0;
+}
+
+/**
+ * rnpgbe_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS.
+ *
+ * Return: 0, this is not allowed to fail
+ **/
+static int rnpgbe_close(struct net_device *netdev)
+{
+ return 0;
+}
+
+/**
+ * rnpgbe_xmit_frame - Send a skb to driver
+ * @skb: skb structure to be sent
+ * @netdev: network interface device structure
+ *
+ * Return: NETDEV_TX_OK
+ **/
+static netdev_tx_t rnpgbe_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct mucse *mucse = netdev_priv(netdev);
+
+ dev_kfree_skb_any(skb);
+ mucse->stats.tx_dropped++;
+
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops rnpgbe_netdev_ops = {
+ .ndo_open = rnpgbe_open,
+ .ndo_stop = rnpgbe_close,
+ .ndo_start_xmit = rnpgbe_xmit_frame,
+};
+
+/**
+ * rnpgbe_add_adapter - Add netdev for this pci_dev
+ * @pdev: PCI device information structure
+ * @board_type: board type
+ *
+ * rnpgbe_add_adapter initializes a netdev for this pci_dev
+ * structure. Initializes Bar map, private structure, and a
+ * hardware reset occur.
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+static int rnpgbe_add_adapter(struct pci_dev *pdev,
+ int board_type)
+{
+ struct net_device *netdev;
+ u8 perm_addr[ETH_ALEN];
+ void __iomem *hw_addr;
+ struct mucse *mucse;
+ struct mucse_hw *hw;
+ int err, err_notify;
+
+ netdev = alloc_etherdev_mq(sizeof(struct mucse), RNPGBE_MAX_QUEUES);
+ if (!netdev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ mucse = netdev_priv(netdev);
+ mucse->netdev = netdev;
+ mucse->pdev = pdev;
+ pci_set_drvdata(pdev, mucse);
+
+ hw = &mucse->hw;
+ hw_addr = devm_ioremap(&pdev->dev,
+ pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2));
+ if (!hw_addr) {
+ err = -EIO;
+ goto err_free_net;
+ }
+
+ hw->hw_addr = hw_addr;
+ hw->pdev = pdev;
+
+ err = rnpgbe_init_hw(hw, board_type);
+ if (err) {
+ dev_err(&pdev->dev, "Init hw err %d\n", err);
+ goto err_free_net;
+ }
+ /* Step 1: Send power-up notification to firmware (no response expected)
+ * This informs firmware to initialize hardware power state, but
+ * firmware only acknowledges receipt without returning data. Must be
+ * done before synchronization as firmware may be in low-power idle
+ * state initially.
+ */
+ err_notify = rnpgbe_send_notify(hw, true, mucse_fw_powerup);
+ if (err_notify) {
+ dev_warn(&pdev->dev, "Send powerup to hw failed %d\n",
+ err_notify);
+ dev_warn(&pdev->dev, "Maybe low performance\n");
+ }
+ /* Step 2: Synchronize mailbox communication with firmware (requires
+ * response) After power-up, confirm firmware is ready to process
+ * requests with responses. This ensures subsequent request/response
+ * interactions work reliably.
+ */
+ err = mucse_mbx_sync_fw(hw);
+ if (err) {
+ dev_err(&pdev->dev, "Sync fw failed! %d\n", err);
+ goto err_powerdown;
+ }
+
+ netdev->netdev_ops = &rnpgbe_netdev_ops;
+ err = rnpgbe_reset_hw(hw);
+ if (err) {
+ dev_err(&pdev->dev, "Hw reset failed %d\n", err);
+ goto err_powerdown;
+ }
+
+ err = rnpgbe_get_permanent_mac(hw, perm_addr);
+ if (!err) {
+ eth_hw_addr_set(netdev, perm_addr);
+ } else if (err == -EINVAL) {
+ dev_warn(&pdev->dev, "Using random MAC\n");
+ eth_hw_addr_random(netdev);
+ } else if (err) {
+ dev_err(&pdev->dev, "get perm_addr failed %d\n", err);
+ goto err_powerdown;
+ }
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_powerdown;
+
+ return 0;
+err_powerdown:
+ /* notify powerdown only powerup ok */
+ if (!err_notify) {
+ err_notify = rnpgbe_send_notify(hw, false, mucse_fw_powerup);
+ if (err_notify)
+ dev_warn(&pdev->dev, "Send powerdown to hw failed %d\n",
+ err_notify);
+ }
+err_free_net:
+ free_netdev(netdev);
+ return err;
+}
+
+/**
+ * rnpgbe_probe - Device initialization routine
+ * @pdev: PCI device information struct
+ * @id: entry in rnpgbe_pci_tbl
+ *
+ * rnpgbe_probe initializes a PF adapter identified by a pci_dev
+ * structure.
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+static int rnpgbe_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int board_type = id->driver_data;
+ int err;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(56));
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting %d\n", err);
+ goto err_disable_dev;
+ }
+
+ err = pci_request_mem_regions(pdev, rnpgbe_driver_name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_request_selected_regions failed %d\n", err);
+ goto err_disable_dev;
+ }
+
+ pci_set_master(pdev);
+ err = pci_save_state(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_save_state failed %d\n", err);
+ goto err_free_regions;
+ }
+
+ err = rnpgbe_add_adapter(pdev, board_type);
+ if (err)
+ goto err_free_regions;
+
+ return 0;
+err_free_regions:
+ pci_release_mem_regions(pdev);
+err_disable_dev:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * rnpgbe_rm_adapter - Remove netdev for this mucse structure
+ * @pdev: PCI device information struct
+ *
+ * rnpgbe_rm_adapter remove a netdev for this mucse structure
+ **/
+static void rnpgbe_rm_adapter(struct pci_dev *pdev)
+{
+ struct mucse *mucse = pci_get_drvdata(pdev);
+ struct mucse_hw *hw = &mucse->hw;
+ struct net_device *netdev;
+ int err;
+
+ if (!mucse)
+ return;
+ netdev = mucse->netdev;
+ unregister_netdev(netdev);
+ err = rnpgbe_send_notify(hw, false, mucse_fw_powerup);
+ if (err)
+ dev_warn(&pdev->dev, "Send powerdown to hw failed %d\n", err);
+ free_netdev(netdev);
+}
+
+/**
+ * rnpgbe_remove - Device removal routine
+ * @pdev: PCI device information struct
+ *
+ * rnpgbe_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. This could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void rnpgbe_remove(struct pci_dev *pdev)
+{
+ rnpgbe_rm_adapter(pdev);
+ pci_release_mem_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+/**
+ * rnpgbe_dev_shutdown - Device shutdown routine
+ * @pdev: PCI device information struct
+ **/
+static void rnpgbe_dev_shutdown(struct pci_dev *pdev)
+{
+ struct mucse *mucse = pci_get_drvdata(pdev);
+ struct net_device *netdev = mucse->netdev;
+
+ rtnl_lock();
+ netif_device_detach(netdev);
+ if (netif_running(netdev))
+ rnpgbe_close(netdev);
+ rtnl_unlock();
+ pci_disable_device(pdev);
+}
+
+/**
+ * rnpgbe_shutdown - Device shutdown routine
+ * @pdev: PCI device information struct
+ *
+ * rnpgbe_shutdown is called by the PCI subsystem to alert the driver
+ * that os shutdown. Device should setup wakeup state here.
+ **/
+static void rnpgbe_shutdown(struct pci_dev *pdev)
+{
+ rnpgbe_dev_shutdown(pdev);
+}
+
+static struct pci_driver rnpgbe_driver = {
+ .name = rnpgbe_driver_name,
+ .id_table = rnpgbe_pci_tbl,
+ .probe = rnpgbe_probe,
+ .remove = rnpgbe_remove,
+ .shutdown = rnpgbe_shutdown,
+};
+
+module_pci_driver(rnpgbe_driver);
+
+MODULE_DEVICE_TABLE(pci, rnpgbe_pci_tbl);
+MODULE_AUTHOR("Yibo Dong, <dong100@mucse.com>");
+MODULE_DESCRIPTION("Mucse(R) 1 Gigabit PCI Express Network Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c
new file mode 100644
index 000000000000..de5e29230b3c
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2025 Mucse Corporation. */
+
+#include <linux/errno.h>
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
+
+#include "rnpgbe_mbx.h"
+
+/**
+ * mbx_data_rd32 - Reads reg with base mbx->fwpf_shm_base
+ * @mbx: pointer to the MBX structure
+ * @reg: register offset
+ *
+ * Return: register value
+ **/
+static u32 mbx_data_rd32(struct mucse_mbx_info *mbx, u32 reg)
+{
+ struct mucse_hw *hw = container_of(mbx, struct mucse_hw, mbx);
+
+ return readl(hw->hw_addr + mbx->fwpf_shm_base + reg);
+}
+
+/**
+ * mbx_data_wr32 - Writes value to reg with base mbx->fwpf_shm_base
+ * @mbx: pointer to the MBX structure
+ * @reg: register offset
+ * @value: value to be written
+ *
+ **/
+static void mbx_data_wr32(struct mucse_mbx_info *mbx, u32 reg, u32 value)
+{
+ struct mucse_hw *hw = container_of(mbx, struct mucse_hw, mbx);
+
+ writel(value, hw->hw_addr + mbx->fwpf_shm_base + reg);
+}
+
+/**
+ * mbx_ctrl_rd32 - Reads reg with base mbx->fwpf_ctrl_base
+ * @mbx: pointer to the MBX structure
+ * @reg: register offset
+ *
+ * Return: register value
+ **/
+static u32 mbx_ctrl_rd32(struct mucse_mbx_info *mbx, u32 reg)
+{
+ struct mucse_hw *hw = container_of(mbx, struct mucse_hw, mbx);
+
+ return readl(hw->hw_addr + mbx->fwpf_ctrl_base + reg);
+}
+
+/**
+ * mbx_ctrl_wr32 - Writes value to reg with base mbx->fwpf_ctrl_base
+ * @mbx: pointer to the MBX structure
+ * @reg: register offset
+ * @value: value to be written
+ *
+ **/
+static void mbx_ctrl_wr32(struct mucse_mbx_info *mbx, u32 reg, u32 value)
+{
+ struct mucse_hw *hw = container_of(mbx, struct mucse_hw, mbx);
+
+ writel(value, hw->hw_addr + mbx->fwpf_ctrl_base + reg);
+}
+
+/**
+ * mucse_mbx_get_lock_pf - Write ctrl and read back lock status
+ * @hw: pointer to the HW structure
+ *
+ * Return: register value after write
+ **/
+static u32 mucse_mbx_get_lock_pf(struct mucse_hw *hw)
+{
+ struct mucse_mbx_info *mbx = &hw->mbx;
+ u32 reg = MUCSE_MBX_PF2FW_CTRL(mbx);
+
+ mbx_ctrl_wr32(mbx, reg, MUCSE_MBX_PFU);
+
+ return mbx_ctrl_rd32(mbx, reg);
+}
+
+/**
+ * mucse_obtain_mbx_lock_pf - Obtain mailbox lock
+ * @hw: pointer to the HW structure
+ *
+ * Pair with mucse_release_mbx_lock_pf()
+ * This function maybe used in an irq handler.
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+static int mucse_obtain_mbx_lock_pf(struct mucse_hw *hw)
+{
+ struct mucse_mbx_info *mbx = &hw->mbx;
+ u32 val;
+
+ return read_poll_timeout_atomic(mucse_mbx_get_lock_pf,
+ val, val & MUCSE_MBX_PFU,
+ mbx->delay_us,
+ mbx->timeout_us,
+ false, hw);
+}
+
+/**
+ * mucse_release_mbx_lock_pf - Release mailbox lock
+ * @hw: pointer to the HW structure
+ * @req: send a request or not
+ *
+ * Pair with mucse_obtain_mbx_lock_pf():
+ * - Releases the mailbox lock by clearing MUCSE_MBX_PFU bit
+ * - Simultaneously sends the request by setting MUCSE_MBX_REQ bit
+ * if req is true
+ * (Both bits are in the same mailbox control register,
+ * so operations are combined)
+ **/
+static void mucse_release_mbx_lock_pf(struct mucse_hw *hw, bool req)
+{
+ struct mucse_mbx_info *mbx = &hw->mbx;
+ u32 reg = MUCSE_MBX_PF2FW_CTRL(mbx);
+
+ mbx_ctrl_wr32(mbx, reg, req ? MUCSE_MBX_REQ : 0);
+}
+
+/**
+ * mucse_mbx_get_fwreq - Read fw req from reg
+ * @mbx: pointer to the mbx structure
+ *
+ * Return: the fwreq value
+ **/
+static u16 mucse_mbx_get_fwreq(struct mucse_mbx_info *mbx)
+{
+ u32 val = mbx_data_rd32(mbx, MUCSE_MBX_FW2PF_CNT);
+
+ return FIELD_GET(GENMASK_U32(15, 0), val);
+}
+
+/**
+ * mucse_mbx_inc_pf_ack - Increase ack
+ * @hw: pointer to the HW structure
+ *
+ * mucse_mbx_inc_pf_ack reads pf_ack from hw, then writes
+ * new value back after increase
+ **/
+static void mucse_mbx_inc_pf_ack(struct mucse_hw *hw)
+{
+ struct mucse_mbx_info *mbx = &hw->mbx;
+ u16 ack;
+ u32 val;
+
+ val = mbx_data_rd32(mbx, MUCSE_MBX_PF2FW_CNT);
+ ack = FIELD_GET(GENMASK_U32(31, 16), val);
+ ack++;
+ val &= ~GENMASK_U32(31, 16);
+ val |= FIELD_PREP(GENMASK_U32(31, 16), ack);
+ mbx_data_wr32(mbx, MUCSE_MBX_PF2FW_CNT, val);
+}
+
+/**
+ * mucse_read_mbx_pf - Read a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: the message buffer
+ * @size: length of buffer
+ *
+ * mucse_read_mbx_pf copies a message from the mbx buffer to the caller's
+ * memory buffer. The presumption is that the caller knows that there was
+ * a message due to a fw request so no polling for message is needed.
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+static int mucse_read_mbx_pf(struct mucse_hw *hw, u32 *msg, u16 size)
+{
+ const int size_in_words = size / sizeof(u32);
+ struct mucse_mbx_info *mbx = &hw->mbx;
+ int err;
+
+ err = mucse_obtain_mbx_lock_pf(hw);
+ if (err)
+ return err;
+
+ for (int i = 0; i < size_in_words; i++)
+ msg[i] = mbx_data_rd32(mbx, MUCSE_MBX_FWPF_SHM + 4 * i);
+ /* Hw needs write data_reg at last */
+ mbx_data_wr32(mbx, MUCSE_MBX_FWPF_SHM, 0);
+ /* flush reqs as we have read this request data */
+ hw->mbx.fw_req = mucse_mbx_get_fwreq(mbx);
+ mucse_mbx_inc_pf_ack(hw);
+ mucse_release_mbx_lock_pf(hw, false);
+
+ return 0;
+}
+
+/**
+ * mucse_check_for_msg_pf - Check to see if the fw has sent mail
+ * @hw: pointer to the HW structure
+ *
+ * Return: 0 if the fw has set the Status bit or else -EIO
+ **/
+static int mucse_check_for_msg_pf(struct mucse_hw *hw)
+{
+ struct mucse_mbx_info *mbx = &hw->mbx;
+ u16 fw_req;
+
+ fw_req = mucse_mbx_get_fwreq(mbx);
+ /* chip's register is reset to 0 when rc send reset
+ * mbx command. Return -EIO if in this state, others
+ * fw == hw->mbx.fw_req means no new msg.
+ **/
+ if (fw_req == 0 || fw_req == hw->mbx.fw_req)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * mucse_poll_for_msg - Wait for message notification
+ * @hw: pointer to the HW structure
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+static int mucse_poll_for_msg(struct mucse_hw *hw)
+{
+ struct mucse_mbx_info *mbx = &hw->mbx;
+ int val;
+
+ return read_poll_timeout(mucse_check_for_msg_pf,
+ val, !val, mbx->delay_us,
+ mbx->timeout_us,
+ false, hw);
+}
+
+/**
+ * mucse_poll_and_read_mbx - Wait for message notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: the message buffer
+ * @size: length of buffer
+ *
+ * Return: 0 if it successfully received a message notification and
+ * copied it into the receive buffer, negative errno on failure
+ **/
+int mucse_poll_and_read_mbx(struct mucse_hw *hw, u32 *msg, u16 size)
+{
+ int err;
+
+ err = mucse_poll_for_msg(hw);
+ if (err)
+ return err;
+
+ return mucse_read_mbx_pf(hw, msg, size);
+}
+
+/**
+ * mucse_mbx_get_fwack - Read fw ack from reg
+ * @mbx: pointer to the MBX structure
+ *
+ * Return: the fwack value
+ **/
+static u16 mucse_mbx_get_fwack(struct mucse_mbx_info *mbx)
+{
+ u32 val = mbx_data_rd32(mbx, MUCSE_MBX_FW2PF_CNT);
+
+ return FIELD_GET(GENMASK_U32(31, 16), val);
+}
+
+/**
+ * mucse_mbx_inc_pf_req - Increase req
+ * @hw: pointer to the HW structure
+ *
+ * mucse_mbx_inc_pf_req reads pf_req from hw, then writes
+ * new value back after increase
+ **/
+static void mucse_mbx_inc_pf_req(struct mucse_hw *hw)
+{
+ struct mucse_mbx_info *mbx = &hw->mbx;
+ u16 req;
+ u32 val;
+
+ val = mbx_data_rd32(mbx, MUCSE_MBX_PF2FW_CNT);
+ req = FIELD_GET(GENMASK_U32(15, 0), val);
+ req++;
+ val &= ~GENMASK_U32(15, 0);
+ val |= FIELD_PREP(GENMASK_U32(15, 0), req);
+ mbx_data_wr32(mbx, MUCSE_MBX_PF2FW_CNT, val);
+}
+
+/**
+ * mucse_write_mbx_pf - Place a message in the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: the message buffer
+ * @size: length of buffer
+ *
+ * Return: 0 if it successfully copied message into the buffer,
+ * negative errno on failure
+ **/
+static int mucse_write_mbx_pf(struct mucse_hw *hw, u32 *msg, u16 size)
+{
+ const int size_in_words = size / sizeof(u32);
+ struct mucse_mbx_info *mbx = &hw->mbx;
+ int err;
+
+ err = mucse_obtain_mbx_lock_pf(hw);
+ if (err)
+ return err;
+
+ for (int i = 0; i < size_in_words; i++)
+ mbx_data_wr32(mbx, MUCSE_MBX_FWPF_SHM + i * 4, msg[i]);
+
+ /* flush acks as we are overwriting the message buffer */
+ hw->mbx.fw_ack = mucse_mbx_get_fwack(mbx);
+ mucse_mbx_inc_pf_req(hw);
+ mucse_release_mbx_lock_pf(hw, true);
+
+ return 0;
+}
+
+/**
+ * mucse_check_for_ack_pf - Check to see if the fw has ACKed
+ * @hw: pointer to the HW structure
+ *
+ * Return: 0 if the fw has set the Status bit or else -EIO
+ **/
+static int mucse_check_for_ack_pf(struct mucse_hw *hw)
+{
+ struct mucse_mbx_info *mbx = &hw->mbx;
+ u16 fw_ack;
+
+ fw_ack = mucse_mbx_get_fwack(mbx);
+ /* chip's register is reset to 0 when rc send reset
+ * mbx command. Return -EIO if in this state, others
+ * fw_ack == hw->mbx.fw_ack means no new ack.
+ **/
+ if (fw_ack == 0 || fw_ack == hw->mbx.fw_ack)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * mucse_poll_for_ack - Wait for message acknowledgment
+ * @hw: pointer to the HW structure
+ *
+ * Return: 0 if it successfully received a message acknowledgment,
+ * else negative errno
+ **/
+static int mucse_poll_for_ack(struct mucse_hw *hw)
+{
+ struct mucse_mbx_info *mbx = &hw->mbx;
+ int val;
+
+ return read_poll_timeout(mucse_check_for_ack_pf,
+ val, !val, mbx->delay_us,
+ mbx->timeout_us,
+ false, hw);
+}
+
+/**
+ * mucse_write_and_wait_ack_mbx - Write a message to the mailbox, wait for ack
+ * @hw: pointer to the HW structure
+ * @msg: the message buffer
+ * @size: length of buffer
+ *
+ * Return: 0 if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout_cnt period
+ **/
+int mucse_write_and_wait_ack_mbx(struct mucse_hw *hw, u32 *msg, u16 size)
+{
+ int err;
+
+ err = mucse_write_mbx_pf(hw, msg, size);
+ if (err)
+ return err;
+
+ return mucse_poll_for_ack(hw);
+}
+
+/**
+ * mucse_mbx_reset - Reset mbx info, sync info from regs
+ * @hw: pointer to the HW structure
+ *
+ * mucse_mbx_reset resets all mbx variables to default.
+ **/
+static void mucse_mbx_reset(struct mucse_hw *hw)
+{
+ struct mucse_mbx_info *mbx = &hw->mbx;
+ u32 val;
+
+ val = mbx_data_rd32(mbx, MUCSE_MBX_FW2PF_CNT);
+ hw->mbx.fw_req = FIELD_GET(GENMASK_U32(15, 0), val);
+ hw->mbx.fw_ack = FIELD_GET(GENMASK_U32(31, 16), val);
+ mbx_ctrl_wr32(mbx, MUCSE_MBX_PF2FW_CTRL(mbx), 0);
+ mbx_ctrl_wr32(mbx, MUCSE_MBX_FWPF_MASK(mbx), GENMASK_U32(31, 16));
+}
+
+/**
+ * mucse_init_mbx_params_pf - Set initial values for pf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for pf mailbox
+ */
+void mucse_init_mbx_params_pf(struct mucse_hw *hw)
+{
+ struct mucse_mbx_info *mbx = &hw->mbx;
+
+ mbx->delay_us = 100;
+ mbx->timeout_us = 4 * USEC_PER_SEC;
+ mutex_init(&mbx->lock);
+ mucse_mbx_reset(hw);
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h
new file mode 100644
index 000000000000..e6fcc8d1d3ca
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2020 - 2025 Mucse Corporation. */
+
+#ifndef _RNPGBE_MBX_H
+#define _RNPGBE_MBX_H
+
+#include "rnpgbe.h"
+
+#define MUCSE_MBX_FW2PF_CNT 0
+#define MUCSE_MBX_PF2FW_CNT 4
+#define MUCSE_MBX_FWPF_SHM 8
+#define MUCSE_MBX_PF2FW_CTRL(mbx) ((mbx)->pf2fw_mbx_ctrl)
+#define MUCSE_MBX_FWPF_MASK(mbx) ((mbx)->fwpf_mbx_mask)
+#define MUCSE_MBX_REQ BIT(0) /* Request a req to mailbox */
+#define MUCSE_MBX_PFU BIT(3) /* PF owns the mailbox buffer */
+
+int mucse_write_and_wait_ack_mbx(struct mucse_hw *hw, u32 *msg, u16 size);
+void mucse_init_mbx_params_pf(struct mucse_hw *hw);
+int mucse_poll_and_read_mbx(struct mucse_hw *hw, u32 *msg, u16 size);
+#endif /* _RNPGBE_MBX_H */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c
new file mode 100644
index 000000000000..8c8bd5e8e1db
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2020 - 2025 Mucse Corporation. */
+
+#include <linux/if_ether.h>
+#include <linux/bitfield.h>
+
+#include "rnpgbe.h"
+#include "rnpgbe_mbx.h"
+#include "rnpgbe_mbx_fw.h"
+
+/**
+ * mucse_fw_send_cmd_wait_resp - Send cmd req and wait for response
+ * @hw: pointer to the HW structure
+ * @req: pointer to the cmd req structure
+ * @reply: pointer to the fw reply structure
+ *
+ * mucse_fw_send_cmd_wait_resp sends req to pf-fw mailbox and wait
+ * reply from fw.
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+static int mucse_fw_send_cmd_wait_resp(struct mucse_hw *hw,
+ struct mbx_fw_cmd_req *req,
+ struct mbx_fw_cmd_reply *reply)
+{
+ int len = le16_to_cpu(req->datalen);
+ int retry_cnt = 3;
+ int err;
+
+ mutex_lock(&hw->mbx.lock);
+ err = mucse_write_and_wait_ack_mbx(hw, (u32 *)req, len);
+ if (err)
+ goto out;
+ do {
+ err = mucse_poll_and_read_mbx(hw, (u32 *)reply,
+ sizeof(*reply));
+ if (err)
+ goto out;
+ /* mucse_write_and_wait_ack_mbx return 0 means fw has
+ * received request, wait for the expect opcode
+ * reply with 'retry_cnt' times.
+ */
+ } while (--retry_cnt >= 0 && reply->opcode != req->opcode);
+out:
+ mutex_unlock(&hw->mbx.lock);
+ if (!err && retry_cnt < 0)
+ return -ETIMEDOUT;
+ if (!err && reply->error_code)
+ return -EIO;
+
+ return err;
+}
+
+/**
+ * mucse_mbx_get_info - Get hw info from fw
+ * @hw: pointer to the HW structure
+ *
+ * mucse_mbx_get_info tries to get hw info from hw.
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+static int mucse_mbx_get_info(struct mucse_hw *hw)
+{
+ struct mbx_fw_cmd_req req = {
+ .datalen = cpu_to_le16(MUCSE_MBX_REQ_HDR_LEN),
+ .opcode = cpu_to_le16(GET_HW_INFO),
+ };
+ struct mbx_fw_cmd_reply reply = {};
+ int err;
+
+ err = mucse_fw_send_cmd_wait_resp(hw, &req, &reply);
+ if (!err)
+ hw->pfvfnum = FIELD_GET(GENMASK_U16(7, 0),
+ le16_to_cpu(reply.hw_info.pfnum));
+
+ return err;
+}
+
+/**
+ * mucse_mbx_sync_fw - Try to sync with fw
+ * @hw: pointer to the HW structure
+ *
+ * mucse_mbx_sync_fw tries to sync with fw. It is only called in
+ * probe. Nothing (register network) todo if failed.
+ * Try more times to do sync.
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+int mucse_mbx_sync_fw(struct mucse_hw *hw)
+{
+ int try_cnt = 3;
+ int err;
+
+ do {
+ err = mucse_mbx_get_info(hw);
+ } while (err == -ETIMEDOUT && try_cnt--);
+
+ return err;
+}
+
+/**
+ * mucse_mbx_powerup - Echo fw to powerup
+ * @hw: pointer to the HW structure
+ * @is_powerup: true for powerup, false for powerdown
+ *
+ * mucse_mbx_powerup echo fw to change working frequency
+ * to normal after received true, and reduce working frequency
+ * if false.
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+int mucse_mbx_powerup(struct mucse_hw *hw, bool is_powerup)
+{
+ struct mbx_fw_cmd_req req = {
+ .datalen = cpu_to_le16(sizeof(req.powerup) +
+ MUCSE_MBX_REQ_HDR_LEN),
+ .opcode = cpu_to_le16(POWER_UP),
+ .powerup = {
+ /* fw needs this to reply correct cmd */
+ .version = cpu_to_le32(GENMASK_U32(31, 0)),
+ .status = cpu_to_le32(is_powerup ? 1 : 0),
+ },
+ };
+ int len, err;
+
+ len = le16_to_cpu(req.datalen);
+ mutex_lock(&hw->mbx.lock);
+ err = mucse_write_and_wait_ack_mbx(hw, (u32 *)&req, len);
+ mutex_unlock(&hw->mbx.lock);
+
+ return err;
+}
+
+/**
+ * mucse_mbx_reset_hw - Posts a mbx req to reset hw
+ * @hw: pointer to the HW structure
+ *
+ * mucse_mbx_reset_hw posts a mbx req to firmware to reset hw.
+ * We use mucse_fw_send_cmd_wait_resp to wait hw reset ok.
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+int mucse_mbx_reset_hw(struct mucse_hw *hw)
+{
+ struct mbx_fw_cmd_req req = {
+ .datalen = cpu_to_le16(MUCSE_MBX_REQ_HDR_LEN),
+ .opcode = cpu_to_le16(RESET_HW),
+ };
+ struct mbx_fw_cmd_reply reply = {};
+
+ return mucse_fw_send_cmd_wait_resp(hw, &req, &reply);
+}
+
+/**
+ * mucse_mbx_get_macaddr - Posts a mbx req to request macaddr
+ * @hw: pointer to the HW structure
+ * @pfvfnum: index of pf/vf num
+ * @mac_addr: pointer to store mac_addr
+ * @port: port index
+ *
+ * mucse_mbx_get_macaddr posts a mbx req to firmware to get mac_addr.
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+int mucse_mbx_get_macaddr(struct mucse_hw *hw, int pfvfnum,
+ u8 *mac_addr,
+ int port)
+{
+ struct mbx_fw_cmd_req req = {
+ .datalen = cpu_to_le16(sizeof(req.get_mac_addr) +
+ MUCSE_MBX_REQ_HDR_LEN),
+ .opcode = cpu_to_le16(GET_MAC_ADDRESS),
+ .get_mac_addr = {
+ .port_mask = cpu_to_le32(BIT(port)),
+ .pfvf_num = cpu_to_le32(pfvfnum),
+ },
+ };
+ struct mbx_fw_cmd_reply reply = {};
+ int err;
+
+ err = mucse_fw_send_cmd_wait_resp(hw, &req, &reply);
+ if (err)
+ return err;
+
+ if (le32_to_cpu(reply.mac_addr.ports) & BIT(port))
+ memcpy(mac_addr, reply.mac_addr.addrs[port].mac, ETH_ALEN);
+ else
+ return -ENODATA;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h
new file mode 100644
index 000000000000..fb24fc12b613
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2020 - 2025 Mucse Corporation. */
+
+#ifndef _RNPGBE_MBX_FW_H
+#define _RNPGBE_MBX_FW_H
+
+#include <linux/types.h>
+
+#include "rnpgbe.h"
+
+#define MUCSE_MBX_REQ_HDR_LEN 24
+
+enum MUCSE_FW_CMD {
+ GET_HW_INFO = 0x0601,
+ GET_MAC_ADDRESS = 0x0602,
+ RESET_HW = 0x0603,
+ POWER_UP = 0x0803,
+};
+
+struct mucse_hw_info {
+ u8 link_stat;
+ u8 port_mask;
+ __le32 speed;
+ __le16 phy_type;
+ __le16 nic_mode;
+ __le16 pfnum;
+ __le32 fw_version;
+ __le32 axi_mhz;
+ union {
+ u8 port_id[4];
+ __le32 port_ids;
+ };
+ __le32 bd_uid;
+ __le32 phy_id;
+ __le32 wol_status;
+ __le32 ext_info;
+} __packed;
+
+struct mbx_fw_cmd_req {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 ret_value;
+ __le32 cookie_lo;
+ __le32 cookie_hi;
+ __le32 reply_lo;
+ __le32 reply_hi;
+ union {
+ u8 data[32];
+ struct {
+ __le32 version;
+ __le32 status;
+ } powerup;
+ struct {
+ __le32 port_mask;
+ __le32 pfvf_num;
+ } get_mac_addr;
+ };
+} __packed;
+
+struct mbx_fw_cmd_reply {
+ __le16 flags;
+ __le16 opcode;
+ __le16 error_code;
+ __le16 datalen;
+ __le32 cookie_lo;
+ __le32 cookie_hi;
+ union {
+ u8 data[40];
+ struct mac_addr {
+ __le32 ports;
+ struct _addr {
+ /* for macaddr:01:02:03:04:05:06
+ * mac-hi=0x01020304 mac-lo=0x05060000
+ */
+ u8 mac[8];
+ } addrs[4];
+ } mac_addr;
+ struct mucse_hw_info hw_info;
+ };
+} __packed;
+
+int mucse_mbx_sync_fw(struct mucse_hw *hw);
+int mucse_mbx_powerup(struct mucse_hw *hw, bool is_powerup);
+int mucse_mbx_reset_hw(struct mucse_hw *hw);
+int mucse_mbx_get_macaddr(struct mucse_hw *hw, int pfvfnum,
+ u8 *mac_addr, int port);
+#endif /* _RNPGBE_MBX_FW_H */
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index b7d9657a7af3..7be30a8df268 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -2482,7 +2482,7 @@ static int myri10ge_close(struct net_device *dev)
if (mgp->ss[0].tx.req_bytes == NULL)
return 0;
- del_timer_sync(&mgp->watchdog_timer);
+ timer_delete_sync(&mgp->watchdog_timer);
mgp->running = MYRI10GE_ETH_STOPPING;
for (i = 0; i < mgp->num_slices; i++)
napi_disable(&mgp->ss[i].napi);
@@ -3416,10 +3416,6 @@ static void myri10ge_watchdog(struct work_struct *work)
* nic was resumed from power saving mode.
*/
pci_restore_state(mgp->pdev);
-
- /* save state again for accounting reasons */
- pci_save_state(mgp->pdev);
-
} else {
/* if we get back -1's from our slot, perhaps somebody
* powered off our card. Don't try to reset it in
@@ -3478,7 +3474,7 @@ static void myri10ge_watchdog_timer(struct timer_list *t)
u32 rx_pause_cnt;
u16 cmd;
- mgp = from_timer(mgp, t, watchdog_timer);
+ mgp = timer_container_of(mgp, t, watchdog_timer);
rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
busy_slice_cnt = 0;
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index ad0c14849115..b253734dbc80 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -846,7 +846,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
- i = pci_request_regions(pdev, DRV_NAME);
+ i = pcim_request_all_regions(pdev, DRV_NAME);
if (i)
goto err_pci_request_regions;
@@ -1786,7 +1786,7 @@ static void init_registers(struct net_device *dev)
*/
static void netdev_timer(struct timer_list *t)
{
- struct netdev_private *np = from_timer(np, t, timer);
+ struct netdev_private *np = timer_container_of(np, t, timer);
struct net_device *dev = np->dev;
void __iomem * ioaddr = ns_ioaddr(dev);
int next_tick = NATSEMI_TIMER_FREQ;
@@ -3179,7 +3179,7 @@ static int netdev_close(struct net_device *dev)
* the final WOL settings?
*/
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
disable_irq(irq);
spin_lock_irq(&np->lock);
natsemi_irq_disable(dev);
@@ -3278,7 +3278,7 @@ static int __maybe_unused natsemi_suspend(struct device *dev_d)
if (netif_running (dev)) {
const int irq = np->pci_dev->irq;
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
disable_irq(irq);
spin_lock_irq(&np->lock);
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index bea969dfa536..cdbf82affa7b 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -820,7 +820,7 @@ static void rx_irq(struct net_device *ndev)
struct ns83820 *dev = PRIV(ndev);
struct rx_info *info = &dev->rx_info;
unsigned next_rx;
- int rx_rc, len;
+ int len;
u32 cmdsts;
__le32 *desc;
unsigned long flags;
@@ -881,8 +881,10 @@ static void rx_irq(struct net_device *ndev)
if (likely(CMDSTS_OK & cmdsts)) {
#endif
skb_put(skb, len);
- if (unlikely(!skb))
+ if (unlikely(!skb)) {
+ ndev->stats.rx_dropped++;
goto netdev_mangle_me_harder_failed;
+ }
if (cmdsts & CMDSTS_DEST_MULTI)
ndev->stats.multicast++;
ndev->stats.rx_packets++;
@@ -901,15 +903,12 @@ static void rx_irq(struct net_device *ndev)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag);
}
#endif
- rx_rc = netif_rx(skb);
- if (NET_RX_DROP == rx_rc) {
-netdev_mangle_me_harder_failed:
- ndev->stats.rx_dropped++;
- }
+ netif_rx(skb);
} else {
dev_kfree_skb_irq(skb);
}
+netdev_mangle_me_harder_failed:
nr++;
next_rx = info->next_rx;
desc = info->descs + (DESC_SIZE * next_rx);
@@ -1527,7 +1526,7 @@ static int ns83820_stop(struct net_device *ndev)
struct ns83820 *dev = PRIV(ndev);
/* FIXME: protect against interrupt handler? */
- del_timer_sync(&dev->tx_watchdog);
+ timer_delete_sync(&dev->tx_watchdog);
ns83820_disable_interrupts(dev);
@@ -1587,7 +1586,7 @@ static void ns83820_tx_timeout(struct net_device *ndev, unsigned int txqueue)
static void ns83820_tx_watch(struct timer_list *t)
{
- struct ns83820 *dev = from_timer(dev, t, tx_watchdog);
+ struct ns83820 *dev = timer_container_of(dev, t, tx_watchdog);
struct net_device *ndev = dev->ndev;
#if defined(DEBUG)
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index f8016dc25e0a..1e55ccb4822b 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -3425,7 +3425,6 @@ static void s2io_reset(struct s2io_nic *sp)
/* Restore the PCI state saved during initialization. */
pci_restore_state(sp->pdev);
- pci_save_state(sp->pdev);
pci_read_config_word(sp->pdev, 0x2, &val16);
if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
break;
@@ -4195,7 +4194,7 @@ pci_map_failed:
static void
s2io_alarm_handle(struct timer_list *t)
{
- struct s2io_nic *sp = from_timer(sp, t, alarm_timer);
+ struct s2io_nic *sp = timer_container_of(sp, t, alarm_timer);
struct net_device *dev = sp->dev;
s2io_handle_errors(dev);
@@ -4707,7 +4706,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
/*
* rx_traffic_int reg is an R1 register, writing all 1's
* will ensure that the actual interrupt causing bit
- * get's cleared and hence a read can be avoided.
+ * gets cleared and hence a read can be avoided.
*/
if (reason & GEN_INTR_RXTRAFFIC)
writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
@@ -4721,7 +4720,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
/*
* tx_traffic_int reg is an R1 register, writing all 1's
- * will ensure that the actual interrupt causing bit get's
+ * will ensure that the actual interrupt causing bit gets
* cleared and hence a read can be avoided.
*/
if (reason & GEN_INTR_TXTRAFFIC)
@@ -7019,7 +7018,7 @@ static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
if (!is_s2io_card_up(sp))
return;
- del_timer_sync(&sp->alarm_timer);
+ timer_delete_sync(&sp->alarm_timer);
/* If s2io_set_link task is executing, wait till it completes. */
while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
msleep(50);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
index 2ec62c8d86e1..59486fe2ad18 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -20,6 +20,8 @@ nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
struct sk_buff *skb;
skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
+ if (!skb)
+ return NULL;
skb_put(skb, size);
return skb;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 9d97cd281f18..c03558adda91 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -458,7 +458,8 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
map_id_full = be64_to_cpu(cbe->map_ptr);
map_id = map_id_full;
- if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
+ if (size_add(pkt_size, data_size) > INT_MAX ||
+ len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
return -EINVAL;
if (cbe->hdr.ver != NFP_CCM_ABI_VERSION)
return -EINVAL;
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
index 515069d5637b..9e7c285eaa6b 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
@@ -266,17 +266,17 @@ static void set_sha2_512hmac(struct nfp_ipsec_cfg_add_sa *cfg, int *trunc_len)
}
}
-static int nfp_net_xfrm_add_state(struct xfrm_state *x,
+static int nfp_net_xfrm_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack)
{
- struct net_device *netdev = x->xso.real_dev;
struct nfp_ipsec_cfg_mssg msg = {};
int i, key_len, trunc_len, err = 0;
struct nfp_ipsec_cfg_add_sa *cfg;
struct nfp_net *nn;
unsigned int saidx;
- nn = netdev_priv(netdev);
+ nn = netdev_priv(dev);
cfg = &msg.cfg_add_sa;
/* General */
@@ -546,17 +546,16 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x,
return 0;
}
-static void nfp_net_xfrm_del_state(struct xfrm_state *x)
+static void nfp_net_xfrm_del_state(struct net_device *dev, struct xfrm_state *x)
{
struct nfp_ipsec_cfg_mssg msg = {
.cmd = NFP_IPSEC_CFG_MSSG_INV_SA,
.sa_idx = x->xso.offload_handle - 1,
};
- struct net_device *netdev = x->xso.real_dev;
struct nfp_net *nn;
int err;
- nn = netdev_priv(netdev);
+ nn = netdev_priv(dev);
err = nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_IPSEC, &msg,
sizeof(msg), nfp_net_ipsec_cfg);
if (err)
@@ -565,20 +564,9 @@ static void nfp_net_xfrm_del_state(struct xfrm_state *x)
xa_erase(&nn->xa_ipsec, x->xso.offload_handle - 1);
}
-static bool nfp_net_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- if (x->props.family == AF_INET)
- /* Offload with IPv4 options is not supported yet */
- return ip_hdr(skb)->ihl == 5;
-
- /* Offload with IPv6 extension headers is not support yet */
- return !(ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr));
-}
-
static const struct xfrmdev_ops nfp_net_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = nfp_net_xfrm_add_state,
.xdo_dev_state_delete = nfp_net_xfrm_del_state,
- .xdo_dev_offload_ok = nfp_net_ipsec_offload_ok,
};
void nfp_net_ipsec_init(struct nfp_net *nn)
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
index f80f1a6953fa..f252ecdcd2cd 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/tls.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
@@ -495,14 +495,13 @@ int nfp_net_tls_rx_resync_req(struct net_device *netdev,
switch (ipv6h->version) {
case 4:
- sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
- iph->saddr, th->source, iph->daddr,
- th->dest, netdev->ifindex);
+ sk = inet_lookup_established(net, iph->saddr, th->source,
+ iph->daddr, th->dest,
+ netdev->ifindex);
break;
#if IS_ENABLED(CONFIG_IPV6)
case 6:
- sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
- &ipv6h->saddr, th->source,
+ sk = __inet6_lookup_established(net, &ipv6h->saddr, th->source,
&ipv6h->daddr, ntohs(th->dest),
netdev->ifindex, 0);
break;
diff --git a/drivers/net/ethernet/netronome/nfp/devlink_param.c b/drivers/net/ethernet/netronome/nfp/devlink_param.c
index 0e1a3800f371..85e3b19e6165 100644
--- a/drivers/net/ethernet/netronome/nfp/devlink_param.c
+++ b/drivers/net/ethernet/netronome/nfp/devlink_param.c
@@ -81,7 +81,8 @@ static const struct nfp_devlink_param_u8_arg nfp_devlink_u8_args[] = {
static int
nfp_devlink_param_u8_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
const struct nfp_devlink_param_u8_arg *arg;
struct nfp_pf *pf = devlink_priv(devlink);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 80e4675582bf..dde60c4572fa 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -564,8 +564,8 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
/* Init ring buffer and unallocated stats_ids. */
priv->stats_ids.free_list.buf =
- vmalloc(array_size(NFP_FL_STATS_ELEM_RS,
- priv->stats_ring_size));
+ vmalloc_array(priv->stats_ring_size,
+ NFP_FL_STATS_ELEM_RS);
if (!priv->stats_ids.free_list.buf)
goto err_free_last_used;
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
index f1c6c47564b1..91a227929a5f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
@@ -779,7 +779,7 @@ nfp_nfd3_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
case NFP_NET_META_CSUM:
meta->csum_type = CHECKSUM_COMPLETE;
meta->csum =
- (__force __wsum)__get_unaligned_cpu32(data);
+ (__force __wsum)get_unaligned((u32 *)data);
data += 4;
break;
case NFP_NET_META_RESYNC_INFO:
@@ -1169,14 +1169,10 @@ int nfp_nfd3_poll(struct napi_struct *napi, int budget)
if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) {
struct dim_sample dim_sample = {};
- unsigned int start;
u64 pkts, bytes;
- do {
- start = u64_stats_fetch_begin(&r_vec->rx_sync);
- pkts = r_vec->rx_pkts;
- bytes = r_vec->rx_bytes;
- } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
+ pkts = r_vec->rx_pkts;
+ bytes = r_vec->rx_bytes;
dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
net_dim(&r_vec->rx_dim, &dim_sample);
@@ -1184,14 +1180,10 @@ int nfp_nfd3_poll(struct napi_struct *napi, int budget)
if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
struct dim_sample dim_sample = {};
- unsigned int start;
u64 pkts, bytes;
- do {
- start = u64_stats_fetch_begin(&r_vec->tx_sync);
- pkts = r_vec->tx_pkts;
- bytes = r_vec->tx_bytes;
- } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
+ pkts = r_vec->tx_pkts;
+ bytes = r_vec->tx_bytes;
dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
net_dim(&r_vec->tx_dim, &dim_sample);
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
index ebeb6ab4465c..ee0db3d5fd66 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
@@ -779,7 +779,7 @@ nfp_nfdk_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
case NFP_NET_META_CSUM:
meta->csum_type = CHECKSUM_COMPLETE;
meta->csum =
- (__force __wsum)__get_unaligned_cpu32(data);
+ (__force __wsum)get_unaligned((u32 *)data);
data += 4;
break;
case NFP_NET_META_RESYNC_INFO:
@@ -1279,14 +1279,10 @@ int nfp_nfdk_poll(struct napi_struct *napi, int budget)
if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) {
struct dim_sample dim_sample = {};
- unsigned int start;
u64 pkts, bytes;
- do {
- start = u64_stats_fetch_begin(&r_vec->rx_sync);
- pkts = r_vec->rx_pkts;
- bytes = r_vec->rx_bytes;
- } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
+ pkts = r_vec->rx_pkts;
+ bytes = r_vec->rx_bytes;
dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
net_dim(&r_vec->rx_dim, &dim_sample);
@@ -1294,14 +1290,10 @@ int nfp_nfdk_poll(struct napi_struct *napi, int budget)
if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
struct dim_sample dim_sample = {};
- unsigned int start;
u64 pkts, bytes;
- do {
- start = u64_stats_fetch_begin(&r_vec->tx_sync);
- pkts = r_vec->tx_pkts;
- bytes = r_vec->tx_bytes;
- } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
+ pkts = r_vec->tx_pkts;
+ bytes = r_vec->tx_bytes;
dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
net_dim(&r_vec->tx_dim, &dim_sample);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
index 0d6c59d6d4ae..ea6a288c0d5e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
@@ -83,42 +83,12 @@ nfp_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
return 0;
}
-static u32 nfp_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0
-};
-
-static const struct hwmon_channel_info nfp_chip = {
- .type = hwmon_chip,
- .config = nfp_chip_config,
-};
-
-static u32 nfp_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT,
- 0
-};
-
-static const struct hwmon_channel_info nfp_temp = {
- .type = hwmon_temp,
- .config = nfp_temp_config,
-};
-
-static u32 nfp_power_config[] = {
- HWMON_P_INPUT | HWMON_P_MAX,
- HWMON_P_INPUT,
- HWMON_P_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info nfp_power = {
- .type = hwmon_power,
- .config = nfp_power_config,
-};
-
static const struct hwmon_channel_info * const nfp_hwmon_info[] = {
- &nfp_chip,
- &nfp_temp,
- &nfp_power,
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT),
+ HWMON_CHANNEL_INFO(power, HWMON_P_INPUT | HWMON_P_MAX,
+ HWMON_P_INPUT,
+ HWMON_P_INPUT),
NULL
};
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 71301dbd8fb5..48390b2fd44d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -797,7 +797,7 @@ static int nfp_pci_probe(struct pci_dev *pdev,
pf->pdev = pdev;
pf->dev_info = dev_info;
- pf->wq = alloc_workqueue("nfp-%s", 0, 2, pci_name(pdev));
+ pf->wq = alloc_workqueue("nfp-%s", WQ_PERCPU, 2, pci_name(pdev));
if (!pf->wq) {
err = -ENOMEM;
goto err_pci_priv_unset;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 6e0929af0f72..9ef72f294117 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -159,7 +159,7 @@ static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
static void nfp_net_reconfig_timer(struct timer_list *t)
{
- struct nfp_net *nn = from_timer(nn, t, reconfig_timer);
+ struct nfp_net *nn = timer_container_of(nn, t, reconfig_timer);
spin_lock_bh(&nn->reconfig_lock);
@@ -227,7 +227,7 @@ static void nfp_net_reconfig_sync_enter(struct nfp_net *nn)
spin_unlock_bh(&nn->reconfig_lock);
if (cancelled_timer) {
- del_timer_sync(&nn->reconfig_timer);
+ timer_delete_sync(&nn->reconfig_timer);
nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
}
@@ -829,7 +829,7 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
return err;
}
- irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask);
+ irq_update_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask);
nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, r_vec->irq_vector,
r_vec->irq_entry);
@@ -840,7 +840,7 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
static void
nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
{
- irq_set_affinity_hint(r_vec->irq_vector, NULL);
+ irq_update_affinity_hint(r_vec->irq_vector, NULL);
nfp_net_napi_del(&nn->dp, r_vec);
free_irq(r_vec->irq_vector, r_vec);
}
@@ -2394,8 +2394,7 @@ static int nfp_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
static const struct udp_tunnel_nic_info nfp_udp_tunnels = {
.sync_table = nfp_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
{
.n_entries = NFP_NET_N_VXLAN_PORTS,
@@ -2538,7 +2537,7 @@ nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
nn->dp.num_r_vecs, num_online_cpus());
nn->max_r_vecs = nn->dp.num_r_vecs;
- nn->dp.xsk_pools = kcalloc(nn->max_r_vecs, sizeof(nn->dp.xsk_pools),
+ nn->dp.xsk_pools = kcalloc(nn->max_r_vecs, sizeof(*nn->dp.xsk_pools),
GFP_KERNEL);
if (!nn->dp.xsk_pools) {
err = -ENOMEM;
@@ -2558,14 +2557,16 @@ nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
&nn->tlv_caps);
if (err)
- goto err_free_nn;
+ goto err_free_xsk_pools;
err = nfp_ccm_mbox_alloc(nn);
if (err)
- goto err_free_nn;
+ goto err_free_xsk_pools;
return nn;
+err_free_xsk_pools:
+ kfree(nn->dp.xsk_pools);
err_free_nn:
if (nn->dp.netdev)
free_netdev(nn->dp.netdev);
@@ -2779,7 +2780,7 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
break;
}
- netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
+ netdev->watchdog_timeo = secs_to_jiffies(5);
/* MTU range: 68 - hw-specific max */
netdev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index d8b735ccf899..d843d1e19715 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -77,7 +77,7 @@ DEFINE_SHOW_ATTRIBUTE(nfp_rx_q);
static int nfp_tx_q_show(struct seq_file *file, void *data);
DEFINE_SHOW_ATTRIBUTE(nfp_tx_q);
-static int nfp_tx_q_show(struct seq_file *file, void *data)
+static int __nfp_tx_q_show(struct seq_file *file, void *data, bool is_xdp)
{
struct nfp_net_r_vector *r_vec = file->private;
struct nfp_net_tx_ring *tx_ring;
@@ -86,10 +86,10 @@ static int nfp_tx_q_show(struct seq_file *file, void *data)
rtnl_lock();
- if (debugfs_real_fops(file->file) == &nfp_tx_q_fops)
- tx_ring = r_vec->tx_ring;
- else
+ if (is_xdp)
tx_ring = r_vec->xdp_ring;
+ else
+ tx_ring = r_vec->tx_ring;
if (!r_vec->nfp_net || !tx_ring)
goto out;
nn = r_vec->nfp_net;
@@ -115,9 +115,14 @@ out:
return 0;
}
+static int nfp_tx_q_show(struct seq_file *file, void *data)
+{
+ return __nfp_tx_q_show(file, data, false);
+}
+
static int nfp_xdp_q_show(struct seq_file *file, void *data)
{
- return nfp_tx_q_show(file, data);
+ return __nfp_tx_q_show(file, data, true);
}
DEFINE_SHOW_ATTRIBUTE(nfp_xdp_q);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index fbca8d0efd85..16c828dd5c1a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -1303,9 +1303,10 @@ static u32 ethtool_flow_to_nfp_flag(u32 flow_type)
return xlate_ethtool_to_nfp[flow_type];
}
-static int nfp_net_get_rss_hash_opts(struct nfp_net *nn,
- struct ethtool_rxnfc *cmd)
+static int nfp_net_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct nfp_net *nn = netdev_priv(netdev);
u32 nfp_rss_flag;
cmd->data = 0;
@@ -1451,16 +1452,16 @@ static int nfp_net_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXCLSRLALL:
cmd->data = NFP_FS_MAX_ENTRY;
return nfp_net_get_fs_loc(nn, rule_locs);
- case ETHTOOL_GRXFH:
- return nfp_net_get_rss_hash_opts(nn, cmd);
default:
return -EOPNOTSUPP;
}
}
-static int nfp_net_set_rss_hash_opt(struct nfp_net *nn,
- struct ethtool_rxnfc *nfc)
+static int nfp_net_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct nfp_net *nn = netdev_priv(netdev);
u32 new_rss_cfg = nn->rss_cfg;
u32 nfp_rss_flag;
int err;
@@ -1763,8 +1764,6 @@ static int nfp_net_set_rxnfc(struct net_device *netdev,
struct nfp_net *nn = netdev_priv(netdev);
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- return nfp_net_set_rss_hash_opt(nn, cmd);
case ETHTOOL_SRXCLSRLINS:
return nfp_net_fs_add(nn, cmd);
case ETHTOOL_SRXCLSRLDEL:
@@ -1789,7 +1788,7 @@ static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev)
struct nfp_net *nn = netdev_priv(netdev);
if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
- return -EOPNOTSUPP;
+ return 0;
return nfp_net_rss_key_sz(nn);
}
@@ -2506,6 +2505,8 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.get_rxfh_key_size = nfp_net_get_rxfh_key_size,
.get_rxfh = nfp_net_get_rxfh,
.set_rxfh = nfp_net_set_rxfh,
+ .get_rxfh_fields = nfp_net_get_rxfh_fields,
+ .set_rxfh_fields = nfp_net_set_rxfh_fields,
.get_regs_len = nfp_net_get_regs_len,
.get_regs = nfp_net_get_regs,
.set_dump = nfp_app_set_dump,
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 720f577929db..19aa1f1538aa 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1120,20 +1120,6 @@ static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
}
}
-static void nv_napi_enable(struct net_device *dev)
-{
- struct fe_priv *np = get_nvpriv(dev);
-
- napi_enable(&np->napi);
-}
-
-static void nv_napi_disable(struct net_device *dev)
-{
- struct fe_priv *np = get_nvpriv(dev);
-
- napi_disable(&np->napi);
-}
-
#define MII_READ (-1)
/* mii_rw: read/write a register on the PHY.
*
@@ -1905,7 +1891,7 @@ packet_dropped:
/* If rx bufs are exhausted called after 50ms to attempt to refresh */
static void nv_do_rx_refill(struct timer_list *t)
{
- struct fe_priv *np = from_timer(np, t, oom_kick);
+ struct fe_priv *np = timer_container_of(np, t, oom_kick);
/* Just reschedule NAPI rx processing */
napi_schedule(&np->napi);
@@ -3114,7 +3100,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
* Changing the MTU is a rare event, it shouldn't matter.
*/
nv_disable_irq(dev);
- nv_napi_disable(dev);
+ napi_disable(&np->napi);
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
spin_lock(&np->lock);
@@ -3143,7 +3129,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
spin_unlock(&np->lock);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
- nv_napi_enable(dev);
+ napi_enable(&np->napi);
nv_enable_irq(dev);
}
return 0;
@@ -4154,7 +4140,7 @@ static void nv_free_irq(struct net_device *dev)
static void nv_do_nic_poll(struct timer_list *t)
{
- struct fe_priv *np = from_timer(np, t, nic_poll);
+ struct fe_priv *np = timer_container_of(np, t, nic_poll);
struct net_device *dev = np->dev;
u8 __iomem *base = get_hwbase(dev);
u32 mask = 0;
@@ -4273,7 +4259,7 @@ static void nv_do_stats_poll(struct timer_list *t)
__acquires(&netdev_priv(dev)->hwstats_lock)
__releases(&netdev_priv(dev)->hwstats_lock)
{
- struct fe_priv *np = from_timer(np, t, stats_poll);
+ struct fe_priv *np = timer_container_of(np, t, stats_poll);
struct net_device *dev = np->dev;
/* If lock is currently taken, the stats are being refreshed
@@ -4731,7 +4717,7 @@ static int nv_set_ringparam(struct net_device *dev,
if (netif_running(dev)) {
nv_disable_irq(dev);
- nv_napi_disable(dev);
+ napi_disable(&np->napi);
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
spin_lock(&np->lock);
@@ -4784,7 +4770,7 @@ static int nv_set_ringparam(struct net_device *dev,
spin_unlock(&np->lock);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
- nv_napi_enable(dev);
+ napi_enable(&np->napi);
nv_enable_irq(dev);
}
return 0;
@@ -5277,7 +5263,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
if (test->flags & ETH_TEST_FL_OFFLINE) {
if (netif_running(dev)) {
netif_stop_queue(dev);
- nv_napi_disable(dev);
+ napi_disable(&np->napi);
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
spin_lock_irq(&np->lock);
@@ -5334,7 +5320,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
/* restart rx engine */
nv_start_rxtx(dev);
netif_start_queue(dev);
- nv_napi_enable(dev);
+ napi_enable(&np->napi);
nv_enable_hw_interrupts(dev, np->irqmask);
}
}
@@ -5576,6 +5562,7 @@ static int nv_open(struct net_device *dev)
/* ask for interrupts */
nv_enable_hw_interrupts(dev, np->irqmask);
+ netdev_lock(dev);
spin_lock_irq(&np->lock);
writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
writel(0, base + NvRegMulticastAddrB);
@@ -5594,7 +5581,7 @@ static int nv_open(struct net_device *dev)
ret = nv_update_linkspeed(dev);
nv_start_rxtx(dev);
netif_start_queue(dev);
- nv_napi_enable(dev);
+ napi_enable_locked(&np->napi);
if (ret) {
netif_carrier_on(dev);
@@ -5611,6 +5598,7 @@ static int nv_open(struct net_device *dev)
round_jiffies(jiffies + STATS_INTERVAL));
spin_unlock_irq(&np->lock);
+ netdev_unlock(dev);
/* If the loopback feature was set while the device was down, make sure
* that it's set correctly now.
@@ -5632,12 +5620,12 @@ static int nv_close(struct net_device *dev)
spin_lock_irq(&np->lock);
np->in_shutdown = 1;
spin_unlock_irq(&np->lock);
- nv_napi_disable(dev);
+ napi_disable(&np->napi);
synchronize_irq(np->pci_dev->irq);
- del_timer_sync(&np->oom_kick);
- del_timer_sync(&np->nic_poll);
- del_timer_sync(&np->stats_poll);
+ timer_delete_sync(&np->oom_kick);
+ timer_delete_sync(&np->nic_poll);
+ timer_delete_sync(&np->stats_poll);
netif_stop_queue(dev);
spin_lock_irq(&np->lock);
diff --git a/drivers/net/ethernet/oa_tc6.c b/drivers/net/ethernet/oa_tc6.c
index f9c0dcd965c2..91a906a7918a 100644
--- a/drivers/net/ethernet/oa_tc6.c
+++ b/drivers/net/ethernet/oa_tc6.c
@@ -113,6 +113,7 @@ struct oa_tc6 {
struct mii_bus *mdiobus;
struct spi_device *spi;
struct mutex spi_ctrl_lock; /* Protects spi control transfer */
+ spinlock_t tx_skb_lock; /* Protects tx skb handling */
void *spi_ctrl_tx_buf;
void *spi_ctrl_rx_buf;
void *spi_data_tx_buf;
@@ -1004,8 +1005,10 @@ static u16 oa_tc6_prepare_spi_tx_buf_for_tx_skbs(struct oa_tc6 *tc6)
for (used_tx_credits = 0; used_tx_credits < tc6->tx_credits;
used_tx_credits++) {
if (!tc6->ongoing_tx_skb) {
+ spin_lock_bh(&tc6->tx_skb_lock);
tc6->ongoing_tx_skb = tc6->waiting_tx_skb;
tc6->waiting_tx_skb = NULL;
+ spin_unlock_bh(&tc6->tx_skb_lock);
}
if (!tc6->ongoing_tx_skb)
break;
@@ -1111,8 +1114,9 @@ static int oa_tc6_spi_thread_handler(void *data)
/* This kthread will be waken up if there is a tx skb or mac-phy
* interrupt to perform spi transfer with tx chunks.
*/
- wait_event_interruptible(tc6->spi_wq, tc6->waiting_tx_skb ||
- tc6->int_flag ||
+ wait_event_interruptible(tc6->spi_wq, tc6->int_flag ||
+ (tc6->waiting_tx_skb &&
+ tc6->tx_credits) ||
kthread_should_stop());
if (kthread_should_stop())
@@ -1209,7 +1213,9 @@ netdev_tx_t oa_tc6_start_xmit(struct oa_tc6 *tc6, struct sk_buff *skb)
return NETDEV_TX_OK;
}
+ spin_lock_bh(&tc6->tx_skb_lock);
tc6->waiting_tx_skb = skb;
+ spin_unlock_bh(&tc6->tx_skb_lock);
/* Wake spi kthread to perform spi transfer */
wake_up_interruptible(&tc6->spi_wq);
@@ -1239,10 +1245,12 @@ struct oa_tc6 *oa_tc6_init(struct spi_device *spi, struct net_device *netdev)
tc6->netdev = netdev;
SET_NETDEV_DEV(netdev, &spi->dev);
mutex_init(&tc6->spi_ctrl_lock);
+ spin_lock_init(&tc6->tx_skb_lock);
/* Set the SPI controller to pump at realtime priority */
tc6->spi->rt = true;
- spi_setup(tc6->spi);
+ if (spi_setup(tc6->spi) < 0)
+ return NULL;
tc6->spi_ctrl_tx_buf = devm_kzalloc(&tc6->spi->dev,
OA_TC6_CTRL_SPI_BUF_SIZE,
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 4ac29cd59f2b..62f05f4569b1 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -198,23 +198,21 @@ pch_tx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED);
}
-static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+static int pch_gbe_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config cfg;
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev;
u8 station[20];
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
-
/* Get ieee1588's dev information */
pdev = adapter->ptp_pdev;
- if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
+ if (cfg->tx_type != HWTSTAMP_TX_OFF && cfg->tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
- switch (cfg.rx_filter) {
+ switch (cfg->rx_filter) {
case HWTSTAMP_FILTER_NONE:
adapter->hwts_rx_en = 0;
break;
@@ -223,17 +221,17 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
pch_ch_control_write(pdev, SLAVE_MODE | CAP_MODE0);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- adapter->hwts_rx_en = 1;
+ adapter->hwts_rx_en = cfg->rx_filter;
pch_ch_control_write(pdev, MASTER_MODE | CAP_MODE0);
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- adapter->hwts_rx_en = 1;
+ adapter->hwts_rx_en = cfg->rx_filter;
pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
strcpy(station, PTP_L4_MULTICAST_SA);
pch_set_station_address(station, pdev);
break;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- adapter->hwts_rx_en = 1;
+ adapter->hwts_rx_en = cfg->rx_filter;
pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
strcpy(station, PTP_L2_MULTICAST_SA);
pch_set_station_address(station, pdev);
@@ -242,12 +240,23 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return -ERANGE;
}
- adapter->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON;
+ adapter->hwts_tx_en = cfg->tx_type == HWTSTAMP_TX_ON;
/* Clear out any old time stamps. */
pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED);
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
+}
+
+static int pch_gbe_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ cfg->tx_type = adapter->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg->rx_filter = adapter->hwts_rx_en;
+
+ return 0;
}
static inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
@@ -1014,8 +1023,8 @@ static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
*/
static void pch_gbe_watchdog(struct timer_list *t)
{
- struct pch_gbe_adapter *adapter = from_timer(adapter, t,
- watchdog_timer);
+ struct pch_gbe_adapter *adapter = timer_container_of(adapter, t,
+ watchdog_timer);
struct net_device *netdev = adapter->netdev;
struct pch_gbe_hw *hw = &adapter->hw;
@@ -1916,7 +1925,7 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter)
pch_gbe_irq_disable(adapter);
pch_gbe_free_irq(adapter);
- del_timer_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
netdev->tx_queue_len = adapter->tx_queue_len;
netif_carrier_off(netdev);
@@ -2234,9 +2243,6 @@ static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
netdev_dbg(netdev, "cmd : 0x%04x\n", cmd);
- if (cmd == SIOCSHWTSTAMP)
- return hwtstamp_ioctl(netdev, ifr, cmd);
-
return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
}
@@ -2328,6 +2334,8 @@ static const struct net_device_ops pch_gbe_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = pch_gbe_netpoll,
#endif
+ .ndo_hwtstamp_get = pch_gbe_hwtstamp_get,
+ .ndo_hwtstamp_set = pch_gbe_hwtstamp_set,
};
static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index a36d422b5173..b0de7e9f12a5 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -1025,7 +1025,7 @@ static inline int hamachi_tx(struct net_device *dev)
static void hamachi_timer(struct timer_list *t)
{
- struct hamachi_private *hmp = from_timer(hmp, t, timer);
+ struct hamachi_private *hmp = timer_container_of(hmp, t, timer);
struct net_device *dev = hmp->mii_if.dev;
void __iomem *ioaddr = hmp->base;
int next_tick = 10*HZ;
@@ -1712,7 +1712,7 @@ static int hamachi_close(struct net_device *dev)
free_irq(hmp->pci_dev->irq, dev);
- del_timer_sync(&hmp->timer);
+ timer_delete_sync(&hmp->timer);
/* Free all the skbuffs in the Rx queue. */
for (i = 0; i < RX_RING_SIZE; i++) {
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index c0515dc63246..1e25ac13a7d8 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -652,7 +652,7 @@ err_free_irq:
static void yellowfin_timer(struct timer_list *t)
{
- struct yellowfin_private *yp = from_timer(yp, t, timer);
+ struct yellowfin_private *yp = timer_container_of(yp, t, timer);
struct net_device *dev = pci_get_drvdata(yp->pci_dev);
void __iomem *ioaddr = yp->base;
int next_tick = 60*HZ;
@@ -1222,7 +1222,7 @@ static int yellowfin_close(struct net_device *dev)
iowrite32(0x80000000, ioaddr + RxCtrl);
iowrite32(0x80000000, ioaddr + TxCtrl);
- del_timer(&yp->timer);
+ timer_delete(&yp->timer);
#if defined(__i386__)
if (yellowfin_debug > 2) {
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index cb4e12df7719..fe58024b5901 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -934,7 +934,8 @@ static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
static void pasemi_mac_tx_timer(struct timer_list *t)
{
- struct pasemi_mac_txring *txring = from_timer(txring, t, clean_timer);
+ struct pasemi_mac_txring *txring = timer_container_of(txring, t,
+ clean_timer);
struct pasemi_mac *mac = txring->mac;
pasemi_mac_clean_tx(txring);
@@ -1288,7 +1289,7 @@ static int pasemi_mac_close(struct net_device *dev)
phy_disconnect(dev->phydev);
}
- del_timer_sync(&mac->tx->clean_timer);
+ timer_delete_sync(&mac->tx->clean_timer);
netif_stop_queue(dev);
napi_disable(&mac->napi);
diff --git a/drivers/net/ethernet/pensando/Kconfig b/drivers/net/ethernet/pensando/Kconfig
index 01fe76786f77..c99758adf3ad 100644
--- a/drivers/net/ethernet/pensando/Kconfig
+++ b/drivers/net/ethernet/pensando/Kconfig
@@ -24,6 +24,7 @@ config IONIC
select NET_DEVLINK
select DIMLIB
select PAGE_POOL
+ select AUXILIARY_BUS
help
This enables the support for the Pensando family of Ethernet
adapters. More specific information on this driver can be
diff --git a/drivers/net/ethernet/pensando/ionic/Makefile b/drivers/net/ethernet/pensando/ionic/Makefile
index 4e7642a2d25f..a598972fef41 100644
--- a/drivers/net/ethernet/pensando/ionic/Makefile
+++ b/drivers/net/ethernet/pensando/ionic/Makefile
@@ -5,5 +5,5 @@ obj-$(CONFIG_IONIC) := ionic.o
ionic-y := ionic_main.o ionic_bus_pci.o ionic_devlink.o ionic_dev.o \
ionic_debugfs.o ionic_lif.o ionic_rx_filter.o ionic_ethtool.o \
- ionic_txrx.o ionic_stats.o ionic_fw.o
+ ionic_txrx.o ionic_stats.o ionic_fw.o ionic_aux.o
ionic-$(CONFIG_PTP_1588_CLOCK) += ionic_phc.o
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 1c61390677f7..85198e6a806e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -18,8 +18,6 @@ struct ionic_lif;
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF 0x1002
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003
-#define IONIC_ASIC_TYPE_ELBA 2
-
#define DEVCMD_TIMEOUT 5
#define IONIC_ADMINQ_TIME_SLICE msecs_to_jiffies(100)
@@ -59,7 +57,6 @@ struct ionic {
DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX);
cpumask_var_t *affinity_masks;
struct delayed_work doorbell_check_dwork;
- struct work_struct nb_work;
struct notifier_block nb;
struct rw_semaphore vf_op_lock; /* lock for VF operations */
struct ionic_vf *vfs;
@@ -68,16 +65,9 @@ struct ionic {
int watchdog_period;
};
-struct ionic_admin_ctx {
- struct completion work;
- union ionic_adminq_cmd cmd;
- union ionic_adminq_comp comp;
-};
-
int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
const int err, const bool do_msg);
-int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode,
u8 status, int err);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_api.h b/drivers/net/ethernet/pensando/ionic/ionic_api.h
new file mode 100644
index 000000000000..bd88666836b8
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_api.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#ifndef _IONIC_API_H_
+#define _IONIC_API_H_
+
+#include <linux/auxiliary_bus.h>
+#include "ionic_if.h"
+#include "ionic_regs.h"
+
+/**
+ * struct ionic_aux_dev - Auxiliary device information
+ * @lif: Logical interface
+ * @idx: Index identifier
+ * @adev: Auxiliary device
+ */
+struct ionic_aux_dev {
+ struct ionic_lif *lif;
+ int idx;
+ struct auxiliary_device adev;
+};
+
+/**
+ * struct ionic_admin_ctx - Admin command context
+ * @work: Work completion wait queue element
+ * @cmd: Admin command (64B) to be copied to the queue
+ * @comp: Admin completion (16B) copied from the queue
+ */
+struct ionic_admin_ctx {
+ struct completion work;
+ union ionic_adminq_cmd cmd;
+ union ionic_adminq_comp comp;
+};
+
+#define IONIC_INTR_INDEX_NOT_ASSIGNED -1
+#define IONIC_INTR_NAME_MAX_SZ 32
+
+/**
+ * struct ionic_intr_info - Interrupt information
+ * @name: Name identifier
+ * @rearm_count: Interrupt rearm count
+ * @index: Interrupt index position
+ * @vector: Interrupt number
+ * @dim_coal_hw: Interrupt coalesce value in hardware units
+ * @affinity_mask: CPU affinity mask
+ * @aff_notify: context for notification of IRQ affinity changes
+ */
+struct ionic_intr_info {
+ char name[IONIC_INTR_NAME_MAX_SZ];
+ u64 rearm_count;
+ unsigned int index;
+ unsigned int vector;
+ u32 dim_coal_hw;
+ cpumask_var_t *affinity_mask;
+ struct irq_affinity_notify aff_notify;
+};
+
+/**
+ * ionic_adminq_post_wait - Post an admin command and wait for response
+ * @lif: Logical interface
+ * @ctx: API admin command context
+ *
+ * Post the command to an admin queue in the ethernet driver. If this command
+ * succeeds, then the command has been posted, but that does not indicate a
+ * completion. If this command returns success, then the completion callback
+ * will eventually be called.
+ *
+ * Return: zero or negative error status
+ */
+int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
+
+/**
+ * ionic_error_to_errno - Transform ionic_if errors to os errno
+ * @code: Ionic error number
+ *
+ * Return: Negative OS error number or zero
+ */
+int ionic_error_to_errno(enum ionic_status_code code);
+
+/**
+ * ionic_request_rdma_reset - request reset or disable the device or lif
+ * @lif: Logical interface
+ *
+ * The reset is triggered asynchronously. It will wait until reset request
+ * completes or times out.
+ */
+void ionic_request_rdma_reset(struct ionic_lif *lif);
+
+/**
+ * ionic_intr_alloc - Reserve a device interrupt
+ * @lif: Logical interface
+ * @intr: Reserved ionic interrupt structure
+ *
+ * Reserve an interrupt index and get irq number for that index.
+ *
+ * Return: zero or negative error status
+ */
+int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr);
+
+/**
+ * ionic_intr_free - Release a device interrupt index
+ * @lif: Logical interface
+ * @intr: Interrupt index
+ *
+ * Mark the interrupt index unused so that it can be reserved again.
+ */
+void ionic_intr_free(struct ionic_lif *lif, int intr);
+
+/**
+ * ionic_get_cmb - Reserve cmb pages
+ * @lif: Logical interface
+ * @pgid: First page index
+ * @pgaddr: First page bus addr (contiguous)
+ * @order: Log base two number of pages (PAGE_SIZE)
+ * @stride_log2: Size of stride to determine CMB pool
+ * @expdb: Will be set to true if this CMB region has expdb enabled
+ *
+ * Return: zero or negative error status
+ */
+int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr,
+ int order, u8 stride_log2, bool *expdb);
+
+/**
+ * ionic_put_cmb - Release cmb pages
+ * @lif: Logical interface
+ * @pgid: First page index
+ * @order: Log base two number of pages (PAGE_SIZE)
+ */
+void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order);
+
+#endif /* _IONIC_API_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_aux.c b/drivers/net/ethernet/pensando/ionic/ionic_aux.c
new file mode 100644
index 000000000000..a2be338eb3e5
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_aux.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/kernel.h>
+#include "ionic.h"
+#include "ionic_lif.h"
+#include "ionic_aux.h"
+
+static DEFINE_IDA(aux_ida);
+
+static void ionic_auxbus_release(struct device *dev)
+{
+ struct ionic_aux_dev *ionic_adev;
+
+ ionic_adev = container_of(dev, struct ionic_aux_dev, adev.dev);
+ ida_free(&aux_ida, ionic_adev->adev.id);
+ kfree(ionic_adev);
+}
+
+int ionic_auxbus_register(struct ionic_lif *lif)
+{
+ struct ionic_aux_dev *ionic_adev;
+ struct auxiliary_device *aux_dev;
+ int err, id;
+
+ if (!(le64_to_cpu(lif->ionic->ident.lif.capabilities) & IONIC_LIF_CAP_RDMA))
+ return 0;
+
+ ionic_adev = kzalloc(sizeof(*ionic_adev), GFP_KERNEL);
+ if (!ionic_adev)
+ return -ENOMEM;
+
+ aux_dev = &ionic_adev->adev;
+
+ id = ida_alloc(&aux_ida, GFP_KERNEL);
+ if (id < 0) {
+ dev_err(lif->ionic->dev, "Failed to allocate aux id: %d\n", id);
+ kfree(ionic_adev);
+ return id;
+ }
+
+ aux_dev->id = id;
+ aux_dev->name = "rdma";
+ aux_dev->dev.parent = &lif->ionic->pdev->dev;
+ aux_dev->dev.release = ionic_auxbus_release;
+ ionic_adev->lif = lif;
+ err = auxiliary_device_init(aux_dev);
+ if (err) {
+ dev_err(lif->ionic->dev, "Failed to initialize %s aux device: %d\n",
+ aux_dev->name, err);
+ ida_free(&aux_ida, id);
+ kfree(ionic_adev);
+ return err;
+ }
+
+ err = auxiliary_device_add(aux_dev);
+ if (err) {
+ dev_err(lif->ionic->dev, "Failed to add %s aux device: %d\n",
+ aux_dev->name, err);
+ auxiliary_device_uninit(aux_dev);
+ return err;
+ }
+
+ lif->ionic_adev = ionic_adev;
+ return 0;
+}
+
+void ionic_auxbus_unregister(struct ionic_lif *lif)
+{
+ mutex_lock(&lif->adev_lock);
+ if (!lif->ionic_adev)
+ goto out;
+
+ auxiliary_device_delete(&lif->ionic_adev->adev);
+ auxiliary_device_uninit(&lif->ionic_adev->adev);
+
+ lif->ionic_adev = NULL;
+out:
+ mutex_unlock(&lif->adev_lock);
+}
+
+void ionic_request_rdma_reset(struct ionic_lif *lif)
+{
+ struct ionic *ionic = lif->ionic;
+ int err;
+
+ union ionic_dev_cmd cmd = {
+ .cmd.opcode = IONIC_CMD_RDMA_RESET_LIF,
+ .cmd.lif_index = cpu_to_le16(lif->index),
+ };
+
+ mutex_lock(&ionic->dev_cmd_lock);
+
+ ionic_dev_cmd_go(&ionic->idev, &cmd);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+
+ mutex_unlock(&ionic->dev_cmd_lock);
+
+ if (err)
+ pr_warn("%s request_reset: error %d\n", __func__, err);
+}
+EXPORT_SYMBOL_NS(ionic_request_rdma_reset, "NET_IONIC");
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_aux.h b/drivers/net/ethernet/pensando/ionic/ionic_aux.h
new file mode 100644
index 000000000000..f5528a9f187d
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_aux.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#ifndef _IONIC_AUX_H_
+#define _IONIC_AUX_H_
+
+int ionic_auxbus_register(struct ionic_lif *lif);
+void ionic_auxbus_unregister(struct ionic_lif *lif);
+
+#endif /* _IONIC_AUX_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index b93791d6b593..70d86c5f52fb 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -9,6 +9,7 @@
#include "ionic.h"
#include "ionic_bus.h"
#include "ionic_lif.h"
+#include "ionic_aux.h"
#include "ionic_debugfs.h"
/* Supported devices */
@@ -271,6 +272,8 @@ static int ionic_setup_one(struct ionic *ionic)
}
ionic_debugfs_add_ident(ionic);
+ ionic_map_cmb(ionic);
+
err = ionic_init(ionic);
if (err) {
dev_err(dev, "Cannot init device: %d, aborting\n", err);
@@ -375,6 +378,8 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_deregister_devlink;
}
+ ionic_auxbus_register(ionic->lif);
+
mod_timer(&ionic->watchdog_timer,
round_jiffies(jiffies + ionic->watchdog_period));
ionic_queue_doorbell_check(ionic, IONIC_NAPI_DEADLINE);
@@ -394,6 +399,7 @@ err_out_free_irqs:
err_out_pci:
ionic_dev_teardown(ionic);
ionic_clear_pci(ionic);
+ ionic_debugfs_del_dev(ionic);
err_out:
mutex_destroy(&ionic->dev_cmd_lock);
ionic_devlink_free(ionic);
@@ -408,12 +414,14 @@ static void ionic_remove(struct pci_dev *pdev)
timer_shutdown_sync(&ionic->watchdog_timer);
if (ionic->lif) {
+ cancel_work_sync(&ionic->lif->deferred.work);
/* prevent adminq cmds if already known as down */
if (test_and_clear_bit(IONIC_LIF_F_FW_RESET, ionic->lif->state))
set_bit(IONIC_LIF_F_FW_STOPPING, ionic->lif->state);
if (ionic->lif->doorbell_wa)
cancel_delayed_work_sync(&ionic->doorbell_check_dwork);
+ ionic_auxbus_unregister(ionic->lif);
ionic_lif_unregister(ionic->lif);
ionic_devlink_unregister(ionic);
ionic_lif_deinit(ionic->lif);
@@ -440,9 +448,10 @@ static void ionic_reset_prepare(struct pci_dev *pdev)
set_bit(IONIC_LIF_F_FW_RESET, lif->state);
- del_timer_sync(&ionic->watchdog_timer);
+ timer_delete_sync(&ionic->watchdog_timer);
cancel_work_sync(&lif->deferred.work);
+ ionic_auxbus_unregister(ionic->lif);
mutex_lock(&lif->queue_lock);
ionic_stop_queues_reconfig(lif);
ionic_txrx_free(lif);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 9e42d599840d..ab27e9225c1e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -13,7 +13,7 @@
static void ionic_watchdog_cb(struct timer_list *t)
{
- struct ionic *ionic = from_timer(ionic, t, watchdog_timer);
+ struct ionic *ionic = timer_container_of(ionic, t, watchdog_timer);
struct ionic_lif *lif = ionic->lif;
struct ionic_deferred_work *work;
int hb;
@@ -199,13 +199,201 @@ void ionic_init_devinfo(struct ionic *ionic)
dev_dbg(ionic->dev, "fw_version %s\n", idev->dev_info.fw_version);
}
+static void ionic_map_disc_cmb(struct ionic *ionic)
+{
+ struct ionic_identity *ident = &ionic->ident;
+ u32 length_reg0, length, offset, num_regions;
+ struct ionic_dev_bar *bar = ionic->bars;
+ struct ionic_dev *idev = &ionic->idev;
+ struct device *dev = ionic->dev;
+ int err, sz, i;
+ u64 end;
+
+ mutex_lock(&ionic->dev_cmd_lock);
+
+ ionic_dev_cmd_discover_cmb(idev);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ if (!err) {
+ sz = min(sizeof(ident->cmb_layout),
+ sizeof(idev->dev_cmd_regs->data));
+ memcpy_fromio(&ident->cmb_layout,
+ &idev->dev_cmd_regs->data, sz);
+ }
+ mutex_unlock(&ionic->dev_cmd_lock);
+
+ if (err) {
+ dev_warn(dev, "Cannot discover CMB layout, disabling CMB\n");
+ return;
+ }
+
+ bar += 2;
+
+ num_regions = le32_to_cpu(ident->cmb_layout.num_regions);
+ if (!num_regions || num_regions > IONIC_MAX_CMB_REGIONS) {
+ dev_warn(dev, "Invalid number of CMB entries (%d)\n",
+ num_regions);
+ return;
+ }
+
+ dev_dbg(dev, "ionic_cmb_layout_identity num_regions %d flags %x:\n",
+ num_regions, ident->cmb_layout.flags);
+
+ for (i = 0; i < num_regions; i++) {
+ offset = le32_to_cpu(ident->cmb_layout.region[i].offset);
+ length = le32_to_cpu(ident->cmb_layout.region[i].length);
+ end = offset + length;
+
+ dev_dbg(dev, "CMB entry %d: bar_num %u cmb_type %u offset %x length %u\n",
+ i, ident->cmb_layout.region[i].bar_num,
+ ident->cmb_layout.region[i].cmb_type,
+ offset, length);
+
+ if (end > (bar->len >> IONIC_CMB_SHIFT_64K)) {
+ dev_warn(dev, "Out of bounds CMB region %d offset %x length %u\n",
+ i, offset, length);
+ return;
+ }
+ }
+
+ /* if first entry matches PCI config, expdb is not supported */
+ if (ident->cmb_layout.region[0].bar_num == bar->res_index &&
+ le32_to_cpu(ident->cmb_layout.region[0].length) == bar->len &&
+ !ident->cmb_layout.region[0].offset) {
+ dev_warn(dev, "No CMB mapping discovered\n");
+ return;
+ }
+
+ /* process first entry for regular mapping */
+ length_reg0 = le32_to_cpu(ident->cmb_layout.region[0].length);
+ if (!length_reg0) {
+ dev_warn(dev, "region len = 0. No CMB mapping discovered\n");
+ return;
+ }
+
+ /* Verify first entry size matches expected 8MB size (in 64KB pages) */
+ if (length_reg0 != IONIC_BAR2_CMB_ENTRY_SIZE >> IONIC_CMB_SHIFT_64K) {
+ dev_warn(dev, "Unexpected CMB size in entry 0: %u pages\n",
+ length_reg0);
+ return;
+ }
+
+ sz = BITS_TO_LONGS((length_reg0 << IONIC_CMB_SHIFT_64K) /
+ PAGE_SIZE) * sizeof(long);
+ idev->cmb_inuse = kzalloc(sz, GFP_KERNEL);
+ if (!idev->cmb_inuse) {
+ dev_warn(dev, "No memory for CMB, disabling\n");
+ idev->phy_cmb_pages = 0;
+ idev->phy_cmb_expdb64_pages = 0;
+ idev->phy_cmb_expdb128_pages = 0;
+ idev->phy_cmb_expdb256_pages = 0;
+ idev->phy_cmb_expdb512_pages = 0;
+ idev->cmb_npages = 0;
+ return;
+ }
+
+ for (i = 0; i < num_regions; i++) {
+ /* check this region matches first region length as to
+ * ease implementation
+ */
+ if (le32_to_cpu(ident->cmb_layout.region[i].length) !=
+ length_reg0)
+ continue;
+
+ offset = le32_to_cpu(ident->cmb_layout.region[i].offset);
+
+ switch (ident->cmb_layout.region[i].cmb_type) {
+ case IONIC_CMB_TYPE_DEVMEM:
+ idev->phy_cmb_pages = bar->bus_addr + offset;
+ idev->cmb_npages =
+ (length_reg0 << IONIC_CMB_SHIFT_64K) / PAGE_SIZE;
+ dev_dbg(dev, "regular cmb mapping: bar->bus_addr %pa region[%d].length %u\n",
+ &bar->bus_addr, i, length);
+ dev_dbg(dev, "idev->phy_cmb_pages %pad, idev->cmb_npages %u\n",
+ &idev->phy_cmb_pages, idev->cmb_npages);
+ break;
+
+ case IONIC_CMB_TYPE_EXPDB64:
+ idev->phy_cmb_expdb64_pages =
+ bar->bus_addr + (offset << IONIC_CMB_SHIFT_64K);
+ dev_dbg(dev, "idev->phy_cmb_expdb64_pages %pad\n",
+ &idev->phy_cmb_expdb64_pages);
+ break;
+
+ case IONIC_CMB_TYPE_EXPDB128:
+ idev->phy_cmb_expdb128_pages =
+ bar->bus_addr + (offset << IONIC_CMB_SHIFT_64K);
+ dev_dbg(dev, "idev->phy_cmb_expdb128_pages %pad\n",
+ &idev->phy_cmb_expdb128_pages);
+ break;
+
+ case IONIC_CMB_TYPE_EXPDB256:
+ idev->phy_cmb_expdb256_pages =
+ bar->bus_addr + (offset << IONIC_CMB_SHIFT_64K);
+ dev_dbg(dev, "idev->phy_cmb_expdb256_pages %pad\n",
+ &idev->phy_cmb_expdb256_pages);
+ break;
+
+ case IONIC_CMB_TYPE_EXPDB512:
+ idev->phy_cmb_expdb512_pages =
+ bar->bus_addr + (offset << IONIC_CMB_SHIFT_64K);
+ dev_dbg(dev, "idev->phy_cmb_expdb512_pages %pad\n",
+ &idev->phy_cmb_expdb512_pages);
+ break;
+
+ default:
+ dev_warn(dev, "[%d] Invalid cmb_type (%d)\n",
+ i, ident->cmb_layout.region[i].cmb_type);
+ break;
+ }
+ }
+}
+
+static void ionic_map_classic_cmb(struct ionic *ionic)
+{
+ struct ionic_dev_bar *bar = ionic->bars;
+ struct ionic_dev *idev = &ionic->idev;
+ struct device *dev = ionic->dev;
+ int sz;
+
+ bar += 2;
+ /* classic CMB mapping */
+ idev->phy_cmb_pages = bar->bus_addr;
+ idev->cmb_npages = bar->len / PAGE_SIZE;
+ dev_dbg(dev, "classic cmb mapping: bar->bus_addr %pa bar->len %lu\n",
+ &bar->bus_addr, bar->len);
+ dev_dbg(dev, "idev->phy_cmb_pages %pad, idev->cmb_npages %u\n",
+ &idev->phy_cmb_pages, idev->cmb_npages);
+
+ sz = BITS_TO_LONGS(idev->cmb_npages) * sizeof(long);
+ idev->cmb_inuse = kzalloc(sz, GFP_KERNEL);
+ if (!idev->cmb_inuse) {
+ idev->phy_cmb_pages = 0;
+ idev->cmb_npages = 0;
+ }
+}
+
+void ionic_map_cmb(struct ionic *ionic)
+{
+ struct pci_dev *pdev = ionic->pdev;
+ struct device *dev = ionic->dev;
+
+ if (!(pci_resource_flags(pdev, 4) & IORESOURCE_MEM)) {
+ dev_dbg(dev, "No CMB, disabling\n");
+ return;
+ }
+
+ if (ionic->ident.dev.capabilities & cpu_to_le64(IONIC_DEV_CAP_DISC_CMB))
+ ionic_map_disc_cmb(ionic);
+ else
+ ionic_map_classic_cmb(ionic);
+}
+
int ionic_dev_setup(struct ionic *ionic)
{
struct ionic_dev_bar *bar = ionic->bars;
unsigned int num_bars = ionic->num_bars;
struct ionic_dev *idev = &ionic->idev;
struct device *dev = ionic->dev;
- int size;
u32 sig;
int err;
@@ -255,16 +443,11 @@ int ionic_dev_setup(struct ionic *ionic)
mutex_init(&idev->cmb_inuse_lock);
if (num_bars < 3 || !ionic->bars[IONIC_PCI_BAR_CMB].len) {
idev->cmb_inuse = NULL;
+ idev->phy_cmb_pages = 0;
+ idev->cmb_npages = 0;
return 0;
}
- idev->phy_cmb_pages = bar->bus_addr;
- idev->cmb_npages = bar->len / PAGE_SIZE;
- size = BITS_TO_LONGS(idev->cmb_npages) * sizeof(long);
- idev->cmb_inuse = kzalloc(size, GFP_KERNEL);
- if (!idev->cmb_inuse)
- dev_warn(dev, "No memory for CMB, disabling\n");
-
return 0;
}
@@ -277,7 +460,15 @@ void ionic_dev_teardown(struct ionic *ionic)
idev->phy_cmb_pages = 0;
idev->cmb_npages = 0;
- destroy_workqueue(ionic->wq);
+ idev->phy_cmb_expdb64_pages = 0;
+ idev->phy_cmb_expdb128_pages = 0;
+ idev->phy_cmb_expdb256_pages = 0;
+ idev->phy_cmb_expdb512_pages = 0;
+
+ if (ionic->wq) {
+ destroy_workqueue(ionic->wq);
+ ionic->wq = NULL;
+ }
mutex_destroy(&idev->cmb_inuse_lock);
}
@@ -421,9 +612,9 @@ do_check_time:
if (fw_hb_ready != idev->fw_hb_ready) {
idev->fw_hb_ready = fw_hb_ready;
if (!fw_hb_ready)
- dev_info(ionic->dev, "FW heartbeat stalled at %d\n", fw_hb);
+ dev_info(ionic->dev, "FW heartbeat stalled at %u\n", fw_hb);
else
- dev_info(ionic->dev, "FW heartbeat restored at %d\n", fw_hb);
+ dev_info(ionic->dev, "FW heartbeat restored at %u\n", fw_hb);
}
if (!fw_hb_ready)
@@ -695,28 +886,79 @@ void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq,
ionic_dev_cmd_go(idev, &cmd);
}
+void ionic_dev_cmd_discover_cmb(struct ionic_dev *idev)
+{
+ union ionic_dev_cmd cmd = {
+ .discover_cmb.opcode = IONIC_CMD_DISCOVER_CMB,
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
int ionic_db_page_num(struct ionic_lif *lif, int pid)
{
return (lif->hw_index * lif->dbid_count) + pid;
}
-int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr, int order)
+int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr,
+ int order, u8 stride_log2, bool *expdb)
{
struct ionic_dev *idev = &lif->ionic->idev;
- int ret;
+ void __iomem *nonexpdb_pgptr;
+ phys_addr_t nonexpdb_pgaddr;
+ int i, idx;
mutex_lock(&idev->cmb_inuse_lock);
- ret = bitmap_find_free_region(idev->cmb_inuse, idev->cmb_npages, order);
+ idx = bitmap_find_free_region(idev->cmb_inuse, idev->cmb_npages, order);
mutex_unlock(&idev->cmb_inuse_lock);
- if (ret < 0)
- return ret;
+ if (idx < 0)
+ return idx;
+
+ *pgid = (u32)idx;
+
+ if (idev->phy_cmb_expdb64_pages &&
+ stride_log2 == IONIC_EXPDB_64B_WQE_LG2) {
+ *pgaddr = idev->phy_cmb_expdb64_pages + idx * PAGE_SIZE;
+ if (expdb)
+ *expdb = true;
+ } else if (idev->phy_cmb_expdb128_pages &&
+ stride_log2 == IONIC_EXPDB_128B_WQE_LG2) {
+ *pgaddr = idev->phy_cmb_expdb128_pages + idx * PAGE_SIZE;
+ if (expdb)
+ *expdb = true;
+ } else if (idev->phy_cmb_expdb256_pages &&
+ stride_log2 == IONIC_EXPDB_256B_WQE_LG2) {
+ *pgaddr = idev->phy_cmb_expdb256_pages + idx * PAGE_SIZE;
+ if (expdb)
+ *expdb = true;
+ } else if (idev->phy_cmb_expdb512_pages &&
+ stride_log2 == IONIC_EXPDB_512B_WQE_LG2) {
+ *pgaddr = idev->phy_cmb_expdb512_pages + idx * PAGE_SIZE;
+ if (expdb)
+ *expdb = true;
+ } else {
+ *pgaddr = idev->phy_cmb_pages + idx * PAGE_SIZE;
+ if (expdb)
+ *expdb = false;
+ }
- *pgid = ret;
- *pgaddr = idev->phy_cmb_pages + ret * PAGE_SIZE;
+ /* clear the requested CMB region, 1 PAGE_SIZE ioremap at a time */
+ nonexpdb_pgaddr = idev->phy_cmb_pages + idx * PAGE_SIZE;
+ for (i = 0; i < (1 << order); i++) {
+ nonexpdb_pgptr =
+ ioremap_wc(nonexpdb_pgaddr + i * PAGE_SIZE, PAGE_SIZE);
+ if (!nonexpdb_pgptr) {
+ ionic_put_cmb(lif, *pgid, order);
+ return -ENOMEM;
+ }
+ memset_io(nonexpdb_pgptr, 0, PAGE_SIZE);
+ iounmap(nonexpdb_pgptr);
+ }
return 0;
}
+EXPORT_SYMBOL_NS(ionic_get_cmb, "NET_IONIC");
void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order)
{
@@ -726,6 +968,7 @@ void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order)
bitmap_release_region(idev->cmb_inuse, pgid, order);
mutex_unlock(&idev->cmb_inuse_lock);
}
+EXPORT_SYMBOL_NS(ionic_put_cmb, "NET_IONIC");
int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
struct ionic_intr_info *intr,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index c8c710cfe70c..35566f97eaea 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -12,6 +12,7 @@
#include "ionic_if.h"
#include "ionic_regs.h"
+#include "ionic_api.h"
#define IONIC_MAX_TX_DESC 8192
#define IONIC_MAX_RX_DESC 16384
@@ -34,6 +35,11 @@
#define IONIC_RX_MIN_DOORBELL_DEADLINE (HZ / 100) /* 10ms */
#define IONIC_RX_MAX_DOORBELL_DEADLINE (HZ * 4) /* 4s */
+#define IONIC_EXPDB_64B_WQE_LG2 6
+#define IONIC_EXPDB_128B_WQE_LG2 7
+#define IONIC_EXPDB_256B_WQE_LG2 8
+#define IONIC_EXPDB_512B_WQE_LG2 9
+
struct ionic_dev_bar {
void __iomem *vaddr;
phys_addr_t bus_addr;
@@ -170,6 +176,11 @@ struct ionic_dev {
dma_addr_t phy_cmb_pages;
u32 cmb_npages;
+ dma_addr_t phy_cmb_expdb64_pages;
+ dma_addr_t phy_cmb_expdb128_pages;
+ dma_addr_t phy_cmb_expdb256_pages;
+ dma_addr_t phy_cmb_expdb512_pages;
+
u32 port_info_sz;
struct ionic_port_info *port_info;
dma_addr_t port_info_pa;
@@ -273,19 +284,6 @@ struct ionic_queue {
char name[IONIC_QUEUE_NAME_MAX_SZ];
} ____cacheline_aligned_in_smp;
-#define IONIC_INTR_INDEX_NOT_ASSIGNED -1
-#define IONIC_INTR_NAME_MAX_SZ 32
-
-struct ionic_intr_info {
- char name[IONIC_INTR_NAME_MAX_SZ];
- u64 rearm_count;
- unsigned int index;
- unsigned int vector;
- u32 dim_coal_hw;
- cpumask_var_t *affinity_mask;
- struct irq_affinity_notify aff_notify;
-};
-
struct ionic_cq {
struct ionic_lif *lif;
struct ionic_queue *bound_q;
@@ -363,8 +361,8 @@ void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq,
int ionic_db_page_num(struct ionic_lif *lif, int pid);
-int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr, int order);
-void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order);
+void ionic_dev_cmd_discover_cmb(struct ionic_dev *idev);
+void ionic_map_cmb(struct ionic *ionic);
int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
struct ionic_intr_info *intr,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index dda22fa4448c..2d9efadb5d2a 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -158,6 +158,20 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
25000baseCR_Full);
copper_seen++;
break;
+ case IONIC_XCVR_PID_QSFP_50G_CR2_FC:
+ case IONIC_XCVR_PID_QSFP_50G_CR2:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 50000baseCR2_Full);
+ copper_seen++;
+ break;
+ case IONIC_XCVR_PID_QSFP_200G_CR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported, 200000baseCR4_Full);
+ copper_seen++;
+ break;
+ case IONIC_XCVR_PID_QSFP_400G_CR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported, 400000baseCR4_Full);
+ copper_seen++;
+ break;
case IONIC_XCVR_PID_SFP_10GBASE_AOC:
case IONIC_XCVR_PID_SFP_10GBASE_CU:
ethtool_link_ksettings_add_link_mode(ks, supported,
@@ -196,6 +210,31 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseSR_Full);
break;
+ case IONIC_XCVR_PID_QSFP_200G_AOC:
+ case IONIC_XCVR_PID_QSFP_200G_SR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 200000baseSR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_200G_FR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 200000baseLR4_ER4_FR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_200G_DR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 200000baseDR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_400G_FR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 400000baseLR4_ER4_FR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_400G_DR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 400000baseDR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_400G_SR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 400000baseSR4_Full);
+ break;
case IONIC_XCVR_PID_SFP_10GBASE_SR:
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseSR_Full);
@@ -909,63 +948,20 @@ static int ionic_get_tunable(struct net_device *netdev,
return 0;
}
-static int ionic_get_module_info(struct net_device *netdev,
- struct ethtool_modinfo *modinfo)
-
-{
- struct ionic_lif *lif = netdev_priv(netdev);
- struct ionic_dev *idev = &lif->ionic->idev;
- struct ionic_xcvr_status *xcvr;
- struct sfp_eeprom_base *sfp;
-
- xcvr = &idev->port_info->status.xcvr;
- sfp = (struct sfp_eeprom_base *) xcvr->sprom;
-
- /* report the module data type and length */
- switch (sfp->phys_id) {
- case SFF8024_ID_SFP:
- modinfo->type = ETH_MODULE_SFF_8079;
- modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
- break;
- case SFF8024_ID_QSFP_8436_8636:
- case SFF8024_ID_QSFP28_8636:
- modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
- break;
- default:
- netdev_info(netdev, "unknown xcvr type 0x%02x\n",
- xcvr->sprom[0]);
- modinfo->type = 0;
- modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
- break;
- }
-
- return 0;
-}
-
-static int ionic_get_module_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *ee,
- u8 *data)
+static int ionic_do_module_copy(u8 *dst, u8 *src, u32 len)
{
- struct ionic_lif *lif = netdev_priv(netdev);
- struct ionic_dev *idev = &lif->ionic->idev;
- struct ionic_xcvr_status *xcvr;
- char tbuf[sizeof(xcvr->sprom)];
+ char tbuf[sizeof_field(struct ionic_xcvr_status, sprom)];
int count = 10;
- u32 len;
/* The NIC keeps the module prom up-to-date in the DMA space
* so we can simply copy the module bytes into the data buffer.
*/
- xcvr = &idev->port_info->status.xcvr;
- len = min_t(u32, sizeof(xcvr->sprom), ee->len);
-
do {
- memcpy(data, xcvr->sprom, len);
- memcpy(tbuf, xcvr->sprom, len);
+ memcpy(dst, src, len);
+ memcpy(tbuf, src, len);
/* Let's make sure we got a consistent copy */
- if (!memcmp(data, tbuf, len))
+ if (!memcmp(dst, tbuf, len))
break;
} while (--count);
@@ -976,6 +972,48 @@ static int ionic_get_module_eeprom(struct net_device *netdev,
return 0;
}
+static int ionic_get_module_eeprom_by_page(struct net_device *netdev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_dev *idev = &lif->ionic->idev;
+ int err;
+ u8 *src;
+
+ if (!page_data->length)
+ return -EINVAL;
+
+ if (page_data->bank != 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Only bank 0 is supported");
+ return -EINVAL;
+ }
+
+ switch (page_data->page) {
+ case 0:
+ src = &idev->port_info->status.xcvr.sprom[page_data->offset];
+ break;
+ case 1:
+ src = &idev->port_info->sprom_page1[page_data->offset - 128];
+ break;
+ case 2:
+ src = &idev->port_info->sprom_page2[page_data->offset - 128];
+ break;
+ case 17:
+ src = &idev->port_info->sprom_page17[page_data->offset - 128];
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ memset(page_data->data, 0, page_data->length);
+ err = ionic_do_module_copy(page_data->data, src, page_data->length);
+ if (err)
+ return err;
+
+ return page_data->length;
+}
+
static int ionic_get_ts_info(struct net_device *netdev,
struct kernel_ethtool_ts_info *info)
{
@@ -1121,8 +1159,7 @@ static const struct ethtool_ops ionic_ethtool_ops = {
.set_rxfh = ionic_set_rxfh,
.get_tunable = ionic_get_tunable,
.set_tunable = ionic_set_tunable,
- .get_module_info = ionic_get_module_info,
- .get_module_eeprom = ionic_get_module_eeprom,
+ .get_module_eeprom_by_page = ionic_get_module_eeprom_by_page,
.get_pauseparam = ionic_get_pauseparam,
.set_pauseparam = ionic_set_pauseparam,
.get_fecparam = ionic_get_fecparam,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 9c85c0706c6e..47559c909c8b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -56,6 +56,9 @@ enum ionic_cmd_opcode {
IONIC_CMD_VF_SETATTR = 61,
IONIC_CMD_VF_CTRL = 62,
+ /* CMB command */
+ IONIC_CMD_DISCOVER_CMB = 80,
+
/* QoS commands */
IONIC_CMD_QOS_CLASS_IDENTIFY = 240,
IONIC_CMD_QOS_CLASS_INIT = 241,
@@ -269,9 +272,11 @@ union ionic_drv_identity {
/**
* enum ionic_dev_capability - Device capabilities
* @IONIC_DEV_CAP_VF_CTRL: Device supports VF ctrl operations
+ * @IONIC_DEV_CAP_DISC_CMB: Device supports CMB discovery operations
*/
enum ionic_dev_capability {
IONIC_DEV_CAP_VF_CTRL = BIT(0),
+ IONIC_DEV_CAP_DISC_CMB = BIT(1),
};
/**
@@ -395,6 +400,7 @@ enum ionic_logical_qtype {
* @IONIC_Q_F_4X_DESC: Quadruple main descriptor size
* @IONIC_Q_F_4X_CQ_DESC: Quadruple cq descriptor size
* @IONIC_Q_F_4X_SG_DESC: Quadruple sg descriptor size
+ * @IONIC_QIDENT_F_EXPDB: Queue supports express doorbell
*/
enum ionic_q_feature {
IONIC_QIDENT_F_CQ = BIT_ULL(0),
@@ -407,6 +413,7 @@ enum ionic_q_feature {
IONIC_Q_F_4X_DESC = BIT_ULL(7),
IONIC_Q_F_4X_CQ_DESC = BIT_ULL(8),
IONIC_Q_F_4X_SG_DESC = BIT_ULL(9),
+ IONIC_QIDENT_F_EXPDB = BIT_ULL(10),
};
/**
@@ -495,6 +502,16 @@ union ionic_lif_config {
};
/**
+ * enum ionic_lif_rdma_cap_stats - LIF stat type
+ * @IONIC_LIF_RDMA_STAT_GLOBAL: Global stats
+ * @IONIC_LIF_RDMA_STAT_QP: Queue pair stats
+ */
+enum ionic_lif_rdma_cap_stats {
+ IONIC_LIF_RDMA_STAT_GLOBAL = BIT(0),
+ IONIC_LIF_RDMA_STAT_QP = BIT(1),
+};
+
+/**
* struct ionic_lif_identity - LIF identity information (type-specific)
*
* @capabilities: LIF capabilities
@@ -513,10 +530,10 @@ union ionic_lif_config {
* @eth.config: LIF config struct with features, mtu, mac, q counts
*
* @rdma: RDMA identify structure
- * @rdma.version: RDMA version of opcodes and queue descriptors
+ * @rdma.version: RDMA capability version
* @rdma.qp_opcodes: Number of RDMA queue pair opcodes supported
* @rdma.admin_opcodes: Number of RDMA admin opcodes supported
- * @rdma.rsvd: reserved byte(s)
+ * @rdma.minor_version: RDMA capability minor version
* @rdma.npts_per_lif: Page table size per LIF
* @rdma.nmrs_per_lif: Number of memory regions per LIF
* @rdma.nahs_per_lif: Number of address handles per LIF
@@ -526,12 +543,17 @@ union ionic_lif_config {
* @rdma.rrq_stride: Remote RQ work request stride
* @rdma.rsq_stride: Remote SQ work request stride
* @rdma.dcqcn_profiles: Number of DCQCN profiles
- * @rdma.rsvd_dimensions: reserved byte(s)
+ * @rdma.udma_shift: Log2 number of queues per queue group
+ * @rdma.rsvd_dimensions: Reserved byte
+ * @rdma.page_size_cap: Supported page sizes
* @rdma.aq_qtype: RDMA Admin Qtype
* @rdma.sq_qtype: RDMA Send Qtype
* @rdma.rq_qtype: RDMA Receive Qtype
* @rdma.cq_qtype: RDMA Completion Qtype
* @rdma.eq_qtype: RDMA Event Qtype
+ * @rdma.stats_type: Supported statistics type
+ * (enum ionic_lif_rdma_cap_stats)
+ * @rdma.rsvd1: Reserved byte(s)
* @words: word access to struct contents
*/
union ionic_lif_identity {
@@ -557,7 +579,7 @@ union ionic_lif_identity {
u8 version;
u8 qp_opcodes;
u8 admin_opcodes;
- u8 rsvd;
+ u8 minor_version;
__le32 npts_per_lif;
__le32 nmrs_per_lif;
__le32 nahs_per_lif;
@@ -567,12 +589,16 @@ union ionic_lif_identity {
u8 rrq_stride;
u8 rsq_stride;
u8 dcqcn_profiles;
- u8 rsvd_dimensions[10];
+ u8 udma_shift;
+ u8 rsvd_dimensions;
+ __le64 page_size_cap;
struct ionic_lif_logical_qtype aq_qtype;
struct ionic_lif_logical_qtype sq_qtype;
struct ionic_lif_logical_qtype rq_qtype;
struct ionic_lif_logical_qtype cq_qtype;
struct ionic_lif_logical_qtype eq_qtype;
+ __le16 stats_type;
+ u8 rsvd1[162];
} __packed rdma;
} __packed;
__le32 words[478];
@@ -1074,7 +1100,7 @@ struct ionic_rxq_sg_desc {
* first IPv4 header. If the receive packet
* contains both a tunnel IPv4 header and a
* transport IPv4 header, the device validates the
- * checksum for the both IPv4 headers.
+ * checksum for both IPv4 headers.
*
* IONIC_RXQ_COMP_CSUM_F_IP_BAD:
* The IPv4 checksum calculated by the device did
@@ -1277,7 +1303,10 @@ enum ionic_xcvr_pid {
IONIC_XCVR_PID_SFP_25GBASE_CR_S = 3,
IONIC_XCVR_PID_SFP_25GBASE_CR_L = 4,
IONIC_XCVR_PID_SFP_25GBASE_CR_N = 5,
-
+ IONIC_XCVR_PID_QSFP_50G_CR2_FC = 6,
+ IONIC_XCVR_PID_QSFP_50G_CR2 = 7,
+ IONIC_XCVR_PID_QSFP_200G_CR4 = 8,
+ IONIC_XCVR_PID_QSFP_400G_CR4 = 9,
/* Fiber */
IONIC_XCVR_PID_QSFP_100G_AOC = 50,
IONIC_XCVR_PID_QSFP_100G_ACC = 51,
@@ -1303,6 +1332,15 @@ enum ionic_xcvr_pid {
IONIC_XCVR_PID_SFP_25GBASE_ACC = 71,
IONIC_XCVR_PID_SFP_10GBASE_T = 72,
IONIC_XCVR_PID_SFP_1000BASE_T = 73,
+ IONIC_XCVR_PID_QSFP_200G_AOC = 74,
+ IONIC_XCVR_PID_QSFP_200G_FR4 = 75,
+ IONIC_XCVR_PID_QSFP_200G_DR4 = 76,
+ IONIC_XCVR_PID_QSFP_200G_SR4 = 77,
+ IONIC_XCVR_PID_QSFP_200G_ACC = 78,
+ IONIC_XCVR_PID_QSFP_400G_FR4 = 79,
+ IONIC_XCVR_PID_QSFP_400G_DR4 = 80,
+ IONIC_XCVR_PID_QSFP_400G_SR4 = 81,
+ IONIC_XCVR_PID_QSFP_400G_VR4 = 82,
};
/**
@@ -1404,6 +1442,8 @@ struct ionic_xcvr_status {
*/
union ionic_port_config {
struct {
+#define IONIC_SPEED_400G 400000 /* 400G in Mbps */
+#define IONIC_SPEED_200G 200000 /* 200G in Mbps */
#define IONIC_SPEED_100G 100000 /* 100G in Mbps */
#define IONIC_SPEED_50G 50000 /* 50G in Mbps */
#define IONIC_SPEED_40G 40000 /* 40G in Mbps */
@@ -2181,6 +2221,80 @@ struct ionic_vf_ctrl_comp {
};
/**
+ * struct ionic_discover_cmb_cmd - CMB discovery command
+ * @opcode: Opcode for the command
+ * @rsvd: Reserved bytes
+ */
+struct ionic_discover_cmb_cmd {
+ u8 opcode;
+ u8 rsvd[63];
+};
+
+/**
+ * struct ionic_discover_cmb_comp - CMB discover command completion.
+ * @status: Status of the command (enum ionic_status_code)
+ * @rsvd: Reserved bytes
+ */
+struct ionic_discover_cmb_comp {
+ u8 status;
+ u8 rsvd[15];
+};
+
+#define IONIC_MAX_CMB_REGIONS 16
+#define IONIC_CMB_SHIFT_64K 16
+
+enum ionic_cmb_type {
+ IONIC_CMB_TYPE_DEVMEM = 0,
+ IONIC_CMB_TYPE_EXPDB64 = 1,
+ IONIC_CMB_TYPE_EXPDB128 = 2,
+ IONIC_CMB_TYPE_EXPDB256 = 3,
+ IONIC_CMB_TYPE_EXPDB512 = 4,
+};
+
+/**
+ * union ionic_cmb_region - Configuration for CMB region
+ * @bar_num: CMB mapping number from FW
+ * @cmb_type: Type of CMB this region describes (enum ionic_cmb_type)
+ * @rsvd: Reserved
+ * @offset: Offset within BAR in 64KB pages
+ * @length: Length of the CMB region
+ * @words: 32-bit words for direct access to the entire region
+ */
+union ionic_cmb_region {
+ struct {
+ u8 bar_num;
+ u8 cmb_type;
+ u8 rsvd[6];
+ __le32 offset;
+ __le32 length;
+ } __packed;
+ __le32 words[4];
+};
+
+/**
+ * union ionic_discover_cmb_identity - CMB layout identity structure
+ * @num_regions: Number of CMB regions, up to 16
+ * @flags: Feature and capability bits (0 for express
+ * doorbell, 1 for 4K alignment indicator,
+ * 31-24 for version information)
+ * @region: CMB mappings region, entry 0 for regular
+ * mapping, entries 1-7 for WQE sizes 64,
+ * 128, 256, 512, 1024, 2048 and 4096 bytes
+ * @words: Full union buffer size
+ */
+union ionic_discover_cmb_identity {
+ struct {
+ __le32 num_regions;
+#define IONIC_CMB_FLAG_EXPDB BIT(0)
+#define IONIC_CMB_FLAG_4KALIGN BIT(1)
+#define IONIC_CMB_FLAG_VERSION 0xff000000
+ __le32 flags;
+ union ionic_cmb_region region[IONIC_MAX_CMB_REGIONS];
+ };
+ __le32 words[478];
+};
+
+/**
* struct ionic_qos_identify_cmd - QoS identify command
* @opcode: opcode
* @ver: Highest version of identify supported by driver
@@ -2825,6 +2939,10 @@ union ionic_port_identity {
* @status: Port status data
* @stats: Port statistics data
* @mgmt_stats: Port management statistics data
+ * @sprom_epage: Extended Transceiver sprom
+ * @sprom_page1: Extended Transceiver sprom, page 1
+ * @sprom_page2: Extended Transceiver sprom, page 2
+ * @sprom_page17: Extended Transceiver sprom, page 17
* @rsvd: reserved byte(s)
* @pb_stats: uplink pb drop stats
*/
@@ -2835,8 +2953,17 @@ struct ionic_port_info {
struct ionic_port_stats stats;
struct ionic_mgmt_port_stats mgmt_stats;
};
- /* room for pb_stats to start at 2k offset */
- u8 rsvd[760];
+ union {
+ u8 sprom_epage[384];
+ struct {
+ u8 sprom_page1[128];
+ u8 sprom_page2[128];
+ u8 sprom_page17[128];
+ };
+ };
+ u8 rsvd[376];
+
+ /* pb_stats must start at 2k offset */
struct ionic_port_pb_stats pb_stats;
};
@@ -3027,6 +3154,8 @@ union ionic_dev_cmd {
struct ionic_vf_getattr_cmd vf_getattr;
struct ionic_vf_ctrl_cmd vf_ctrl;
+ struct ionic_discover_cmb_cmd discover_cmb;
+
struct ionic_lif_identify_cmd lif_identify;
struct ionic_lif_init_cmd lif_init;
struct ionic_lif_reset_cmd lif_reset;
@@ -3066,6 +3195,8 @@ union ionic_dev_cmd_comp {
struct ionic_vf_getattr_comp vf_getattr;
struct ionic_vf_ctrl_comp vf_ctrl;
+ struct ionic_discover_cmb_comp discover_cmb;
+
struct ionic_lif_identify_comp lif_identify;
struct ionic_lif_init_comp lif_init;
ionic_lif_reset_comp lif_reset;
@@ -3207,9 +3338,16 @@ union ionic_adminq_comp {
#define IONIC_BAR0_DEV_CMD_DATA_REGS_OFFSET 0x0c00
#define IONIC_BAR0_INTR_STATUS_OFFSET 0x1000
#define IONIC_BAR0_INTR_CTRL_OFFSET 0x2000
+
+/* BAR2 */
+#define IONIC_BAR2_CMB_ENTRY_SIZE 0x800000
#define IONIC_DEV_CMD_DONE 0x00000001
-#define IONIC_ASIC_TYPE_CAPRI 0
+#define IONIC_ASIC_TYPE_NONE 0
+#define IONIC_ASIC_TYPE_CAPRI 1
+#define IONIC_ASIC_TYPE_ELBA 2
+#define IONIC_ASIC_TYPE_GIGLIO 3
+#define IONIC_ASIC_TYPE_SALINA 4
/**
* struct ionic_doorbell - Doorbell register layout
@@ -3256,6 +3394,7 @@ struct ionic_identity {
union ionic_port_identity port;
union ionic_qos_identity qos;
union ionic_q_identity txq;
+ union ionic_discover_cmb_identity cmb_layout;
};
#endif /* _IONIC_IF_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 40496587b2b3..058eea86e141 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -19,6 +19,7 @@
#include "ionic_bus.h"
#include "ionic_dev.h"
#include "ionic_lif.h"
+#include "ionic_aux.h"
#include "ionic_txrx.h"
#include "ionic_ethtool.h"
#include "ionic_debugfs.h"
@@ -243,29 +244,36 @@ static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
0, intr->name, &qcq->napi);
}
-static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
+int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
{
struct ionic *ionic = lif->ionic;
- int index;
+ int index, err;
index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
- if (index == ionic->nintrs) {
- netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
- __func__, index, ionic->nintrs);
+ if (index == ionic->nintrs)
return -ENOSPC;
- }
set_bit(index, ionic->intrs);
ionic_intr_init(&ionic->idev, intr, index);
+ err = ionic_bus_get_irq(ionic, intr->index);
+ if (err < 0) {
+ clear_bit(index, ionic->intrs);
+ return err;
+ }
+
+ intr->vector = err;
+
return 0;
}
+EXPORT_SYMBOL_NS(ionic_intr_alloc, "NET_IONIC");
-static void ionic_intr_free(struct ionic *ionic, int index)
+void ionic_intr_free(struct ionic_lif *lif, int index)
{
- if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs)
- clear_bit(index, ionic->intrs);
+ if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < lif->ionic->nintrs)
+ clear_bit(index, lif->ionic->intrs);
}
+EXPORT_SYMBOL_NS(ionic_intr_free, "NET_IONIC");
static void ionic_irq_aff_notify(struct irq_affinity_notify *notify,
const cpumask_t *mask)
@@ -400,7 +408,7 @@ static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
irq_set_affinity_hint(qcq->intr.vector, NULL);
devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi);
qcq->intr.vector = 0;
- ionic_intr_free(lif->ionic, qcq->intr.index);
+ ionic_intr_free(lif, qcq->intr.index);
qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
}
@@ -510,13 +518,6 @@ static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qc
goto err_out;
}
- err = ionic_bus_get_irq(lif->ionic, qcq->intr.index);
- if (err < 0) {
- netdev_warn(lif->netdev, "no vector for %s: %d\n",
- qcq->q.name, err);
- goto err_out_free_intr;
- }
- qcq->intr.vector = err;
ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_SET);
@@ -545,7 +546,7 @@ static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qc
return 0;
err_out_free_intr:
- ionic_intr_free(lif->ionic, qcq->intr.index);
+ ionic_intr_free(lif, qcq->intr.index);
err_out:
return err;
}
@@ -672,7 +673,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
new->cmb_order = order_base_2(new->cmb_q_size / PAGE_SIZE);
err = ionic_get_cmb(lif, &new->cmb_pgid, &new->cmb_q_base_pa,
- new->cmb_order);
+ new->cmb_order, 0, NULL);
if (err) {
netdev_err(lif->netdev,
"Cannot allocate queue order %d from cmb: err %d\n",
@@ -740,7 +741,7 @@ err_out_free_q:
err_out_free_irq:
if (flags & IONIC_QCQ_F_INTR) {
devm_free_irq(dev, new->intr.vector, &new->napi);
- ionic_intr_free(lif->ionic, new->intr.index);
+ ionic_intr_free(lif, new->intr.index);
}
err_out_free_page_pool:
page_pool_destroy(new->q.page_pool);
@@ -2334,20 +2335,6 @@ static int ionic_stop(struct net_device *netdev)
return 0;
}
-static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- struct ionic_lif *lif = netdev_priv(netdev);
-
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return ionic_lif_hwstamp_set(lif, ifr);
- case SIOCGHWTSTAMP:
- return ionic_lif_hwstamp_get(lif, ifr);
- default:
- return -EOPNOTSUPP;
- }
-}
-
static int ionic_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivf)
{
@@ -2811,7 +2798,6 @@ static int ionic_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
static const struct net_device_ops ionic_netdev_ops = {
.ndo_open = ionic_open,
.ndo_stop = ionic_stop,
- .ndo_eth_ioctl = ionic_eth_ioctl,
.ndo_start_xmit = ionic_start_xmit,
.ndo_bpf = ionic_xdp,
.ndo_xdp_xmit = ionic_xdp_xmit,
@@ -2832,6 +2818,8 @@ static const struct net_device_ops ionic_netdev_ops = {
.ndo_get_vf_config = ionic_get_vf_config,
.ndo_set_vf_link_state = ionic_set_vf_link_state,
.ndo_get_vf_stats = ionic_get_vf_stats,
+ .ndo_hwtstamp_get = ionic_hwstamp_get,
+ .ndo_hwtstamp_set = ionic_hwstamp_set,
};
static int ionic_cmb_reconfig(struct ionic_lif *lif,
@@ -3265,7 +3253,7 @@ int ionic_lif_alloc(struct ionic *ionic)
lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU,
le32_to_cpu(lif->identity->eth.min_frame_size));
lif->netdev->max_mtu =
- le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN;
+ le32_to_cpu(lif->identity->eth.max_frame_size) - VLAN_ETH_HLEN;
lif->neqs = ionic->neqs_per_lif;
lif->nxqs = ionic->ntxqs_per_lif;
@@ -3293,6 +3281,7 @@ int ionic_lif_alloc(struct ionic *ionic)
mutex_init(&lif->queue_lock);
mutex_init(&lif->config_lock);
+ mutex_init(&lif->adev_lock);
spin_lock_init(&lif->adminq_lock);
@@ -3349,6 +3338,7 @@ err_out_free_lif_info:
lif->info = NULL;
lif->info_pa = 0;
err_out_free_mutex:
+ mutex_destroy(&lif->adev_lock);
mutex_destroy(&lif->config_lock);
mutex_destroy(&lif->queue_lock);
err_out_free_netdev:
@@ -3384,6 +3374,7 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
netif_device_detach(lif->netdev);
+ ionic_auxbus_unregister(ionic->lif);
mutex_lock(&lif->queue_lock);
if (test_bit(IONIC_LIF_F_UP, lif->state)) {
dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
@@ -3446,6 +3437,8 @@ int ionic_restart_lif(struct ionic_lif *lif)
netif_device_attach(lif->netdev);
ionic_queue_doorbell_check(ionic, IONIC_NAPI_DEADLINE);
+ ionic_auxbus_register(ionic->lif);
+
return 0;
err_txrx_free:
@@ -3526,12 +3519,9 @@ void ionic_lif_free(struct ionic_lif *lif)
lif->info = NULL;
lif->info_pa = 0;
- /* unmap doorbell page */
- ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
- lif->kern_dbpage = NULL;
-
mutex_destroy(&lif->config_lock);
mutex_destroy(&lif->queue_lock);
+ mutex_destroy(&lif->adev_lock);
/* free netdev & lif */
ionic_debugfs_del_lif(lif);
@@ -3555,6 +3545,9 @@ void ionic_lif_deinit(struct ionic_lif *lif)
ionic_lif_qcq_deinit(lif, lif->notifyqcq);
ionic_lif_qcq_deinit(lif, lif->adminqcq);
+ ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
+ lif->kern_dbpage = NULL;
+
ionic_lif_reset(lif);
}
@@ -3804,10 +3797,6 @@ err_out_adminq_deinit:
return err;
}
-static void ionic_lif_notify_work(struct work_struct *ws)
-{
-}
-
static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
{
struct ionic_admin_ctx ctx = {
@@ -3858,8 +3847,6 @@ int ionic_lif_register(struct ionic_lif *lif)
ionic_lif_register_phc(lif);
- INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work);
-
lif->ionic->nb.notifier_call = ionic_lif_notify;
err = register_netdevice_notifier(&lif->ionic->nb);
@@ -3869,8 +3856,8 @@ int ionic_lif_register(struct ionic_lif *lif)
/* only register LIF0 for now */
err = register_netdev(lif->netdev);
if (err) {
- dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
- ionic_lif_unregister_phc(lif);
+ dev_err(lif->ionic->dev, "Cannot register net device: %d, aborting\n", err);
+ ionic_lif_unregister(lif);
return err;
}
@@ -3885,7 +3872,6 @@ void ionic_lif_unregister(struct ionic_lif *lif)
{
if (lif->ionic->nb.notifier_call) {
unregister_netdevice_notifier(&lif->ionic->nb);
- cancel_work_sync(&lif->ionic->nb_work);
lif->ionic->nb.notifier_call = NULL;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index e01756fb7fdd..8e10f66dc50e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -6,10 +6,11 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
-#include <uapi/linux/net_tstamp.h>
+#include <linux/net_tstamp.h>
#include <linux/dim.h>
#include <linux/pci.h>
#include "ionic_rx_filter.h"
+#include "ionic_api.h"
#define IONIC_ADMINQ_LENGTH 16 /* must be a power of two */
#define IONIC_NOTIFYQ_LENGTH 64 /* must be a power of two */
@@ -225,6 +226,8 @@ struct ionic_lif {
dma_addr_t info_pa;
u32 info_sz;
struct ionic_qtype_info qtype_info[IONIC_QTYPE_MAX];
+ struct ionic_aux_dev *ionic_adev;
+ struct mutex adev_lock; /* lock for aux_dev actions */
u8 rss_hash_key[IONIC_RSS_HASH_KEY_SIZE];
u8 *rss_ind_tbl;
@@ -251,7 +254,7 @@ struct ionic_phc {
struct timecounter tc;
struct mutex config_lock; /* lock for ts_config */
- struct hwtstamp_config ts_config;
+ struct kernel_hwtstamp_config ts_config;
u64 ts_config_rx_filt;
u32 ts_config_tx_mode;
@@ -359,8 +362,11 @@ int ionic_lif_size(struct ionic *ionic);
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
void ionic_lif_hwstamp_replay(struct ionic_lif *lif);
void ionic_lif_hwstamp_recreate_queues(struct ionic_lif *lif);
-int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr);
-int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr);
+int ionic_hwstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+int ionic_hwstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
ktime_t ionic_lif_phc_ktime(struct ionic_lif *lif, u64 counter);
void ionic_lif_register_phc(struct ionic_lif *lif);
void ionic_lif_unregister_phc(struct ionic_lif *lif);
@@ -370,12 +376,15 @@ void ionic_lif_free_phc(struct ionic_lif *lif);
static inline void ionic_lif_hwstamp_replay(struct ionic_lif *lif) {}
static inline void ionic_lif_hwstamp_recreate_queues(struct ionic_lif *lif) {}
-static inline int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr)
+static inline int ionic_hwstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
-static inline int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr)
+static inline int ionic_hwstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 0f817c3f92d8..14dc055be3e9 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -72,7 +72,7 @@ static const char *ionic_error_to_str(enum ionic_status_code code)
}
}
-static int ionic_error_to_errno(enum ionic_status_code code)
+int ionic_error_to_errno(enum ionic_status_code code)
{
switch (code) {
case IONIC_RC_SUCCESS:
@@ -81,8 +81,9 @@ static int ionic_error_to_errno(enum ionic_status_code code)
case IONIC_RC_EQTYPE:
case IONIC_RC_EQID:
case IONIC_RC_EINVAL:
- case IONIC_RC_ENOSUPP:
return -EINVAL;
+ case IONIC_RC_ENOSUPP:
+ return -EOPNOTSUPP;
case IONIC_RC_EPERM:
return -EPERM;
case IONIC_RC_ENOENT:
@@ -113,6 +114,7 @@ static int ionic_error_to_errno(enum ionic_status_code code)
return -EIO;
}
}
+EXPORT_SYMBOL_NS(ionic_error_to_errno, "NET_IONIC");
static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
{
@@ -479,6 +481,7 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{
return __ionic_adminq_post_wait(lif, ctx, true);
}
+EXPORT_SYMBOL_NS(ionic_adminq_post_wait, "NET_IONIC");
int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{
@@ -515,9 +518,9 @@ static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds,
unsigned long start_time;
unsigned long max_wait;
unsigned long duration;
- int done = 0;
bool fw_up;
int opcode;
+ bool done;
int err;
/* Wait for dev cmd to complete, retrying if we get EAGAIN,
@@ -525,6 +528,7 @@ static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds,
*/
max_wait = jiffies + (max_seconds * HZ);
try_again:
+ done = false;
opcode = idev->opcode;
start_time = jiffies;
for (fw_up = ionic_is_fw_running(idev);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_phc.c b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
index 7505efdff8e9..05b44fc482f8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_phc.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
@@ -65,11 +65,12 @@ static u64 ionic_hwstamp_rx_filt(int config_rx_filter)
}
static int ionic_lif_hwstamp_set_ts_config(struct ionic_lif *lif,
- struct hwtstamp_config *new_ts)
+ struct kernel_hwtstamp_config *new_ts,
+ struct netlink_ext_ack *extack)
{
+ struct kernel_hwtstamp_config *config;
+ struct kernel_hwtstamp_config ts = {};
struct ionic *ionic = lif->ionic;
- struct hwtstamp_config *config;
- struct hwtstamp_config ts;
int tx_mode = 0;
u64 rx_filt = 0;
int err, err2;
@@ -99,12 +100,16 @@ static int ionic_lif_hwstamp_set_ts_config(struct ionic_lif *lif,
tx_mode = ionic_hwstamp_tx_mode(config->tx_type);
if (tx_mode < 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "TX time stamping mode isn't supported");
err = tx_mode;
goto err_queues;
}
mask = cpu_to_le64(BIT_ULL(tx_mode));
if ((ionic->ident.lif.eth.hwstamp_tx_modes & mask) != mask) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "TX time stamping mode isn't supported");
err = -ERANGE;
goto err_queues;
}
@@ -124,32 +129,47 @@ static int ionic_lif_hwstamp_set_ts_config(struct ionic_lif *lif,
if (tx_mode) {
err = ionic_lif_create_hwstamp_txq(lif);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Error creating TX timestamp queue");
goto err_queues;
+ }
}
if (rx_filt) {
err = ionic_lif_create_hwstamp_rxq(lif);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Error creating RX timestamp queue");
goto err_queues;
+ }
}
if (tx_mode != lif->phc->ts_config_tx_mode) {
err = ionic_lif_set_hwstamp_txmode(lif, tx_mode);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Error enabling TX timestamp mode");
goto err_txmode;
+ }
}
if (rx_filt != lif->phc->ts_config_rx_filt) {
err = ionic_lif_set_hwstamp_rxfilt(lif, rx_filt);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Error enabling RX timestamp mode");
goto err_rxfilt;
+ }
}
if (rx_all != (lif->phc->ts_config.rx_filter == HWTSTAMP_FILTER_ALL)) {
err = ionic_lif_config_hwstamp_rxq_all(lif, rx_all);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Error enabling RX timestamp mode");
goto err_rxall;
+ }
}
memcpy(&lif->phc->ts_config, config, sizeof(*config));
@@ -183,28 +203,24 @@ err_queues:
return err;
}
-int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr)
+int ionic_hwstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
+ struct ionic_lif *lif = netdev_priv(netdev);
int err;
if (!lif->phc || !lif->phc->ptp)
return -EOPNOTSUPP;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
mutex_lock(&lif->queue_lock);
- err = ionic_lif_hwstamp_set_ts_config(lif, &config);
+ err = ionic_lif_hwstamp_set_ts_config(lif, config, extack);
mutex_unlock(&lif->queue_lock);
if (err) {
netdev_info(lif->netdev, "hwstamp set failed: %d\n", err);
return err;
}
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
-
return 0;
}
@@ -216,7 +232,7 @@ void ionic_lif_hwstamp_replay(struct ionic_lif *lif)
return;
mutex_lock(&lif->queue_lock);
- err = ionic_lif_hwstamp_set_ts_config(lif, NULL);
+ err = ionic_lif_hwstamp_set_ts_config(lif, NULL, NULL);
mutex_unlock(&lif->queue_lock);
if (err)
netdev_info(lif->netdev, "hwstamp replay failed: %d\n", err);
@@ -246,19 +262,18 @@ void ionic_lif_hwstamp_recreate_queues(struct ionic_lif *lif)
mutex_unlock(&lif->phc->config_lock);
}
-int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr)
+int ionic_hwstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config config;
+ struct ionic_lif *lif = netdev_priv(netdev);
if (!lif->phc || !lif->phc->ptp)
return -EOPNOTSUPP;
mutex_lock(&lif->phc->config_lock);
- memcpy(&config, &lif->phc->ts_config, sizeof(config));
+ memcpy(config, &lif->phc->ts_config, sizeof(*config));
mutex_unlock(&lif->phc->config_lock);
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
return 0;
}
@@ -290,7 +305,7 @@ static u64 ionic_hwstamp_read(struct ionic *ionic,
return (u64)tick_low | ((u64)tick_high << 32);
}
-static u64 ionic_cc_read(const struct cyclecounter *cc)
+static u64 ionic_cc_read(struct cyclecounter *cc)
{
struct ionic_phc *phc = container_of(cc, struct ionic_phc, cc);
struct ionic *ionic = phc->lif->ionic;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 2ac59564ded1..301ebee2fdc5 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -29,6 +29,10 @@ static void ionic_tx_clean(struct ionic_queue *q,
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell)
{
+ /* Ensure TX descriptor writes reach memory before NIC reads them.
+ * Prevents device from fetching stale descriptors.
+ */
+ dma_wmb();
ionic_q_post(q, ring_dbell);
}
@@ -321,7 +325,7 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
len, DMA_TO_DEVICE);
} else /* XDP_REDIRECT */ {
dma_addr = ionic_tx_map_single(q, frame->data, len);
- if (!dma_addr)
+ if (dma_addr == DMA_MAPPING_ERROR)
return -EIO;
}
@@ -357,7 +361,7 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
} else {
dma_addr = ionic_tx_map_frag(q, frag, 0,
skb_frag_size(frag));
- if (dma_mapping_error(q->dev, dma_addr)) {
+ if (dma_addr == DMA_MAPPING_ERROR) {
ionic_tx_desc_unmap_bufs(q, desc_info);
return -EIO;
}
@@ -1083,7 +1087,7 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
net_warn_ratelimited("%s: DMA single map failed on %s!\n",
dev_name(dev), q->name);
q_to_tx_stats(q)->dma_map_err++;
- return 0;
+ return DMA_MAPPING_ERROR;
}
return dma_addr;
}
@@ -1100,7 +1104,7 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
dev_name(dev), q->name);
q_to_tx_stats(q)->dma_map_err++;
- return 0;
+ return DMA_MAPPING_ERROR;
}
return dma_addr;
}
@@ -1116,7 +1120,7 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
int frag_idx;
dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
- if (!dma_addr)
+ if (dma_addr == DMA_MAPPING_ERROR)
return -EIO;
buf_info->dma_addr = dma_addr;
buf_info->len = skb_headlen(skb);
@@ -1126,7 +1130,7 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
nfrags = skb_shinfo(skb)->nr_frags;
for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) {
dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
- if (!dma_addr)
+ if (dma_addr == DMA_MAPPING_ERROR)
goto dma_fail;
buf_info->dma_addr = dma_addr;
buf_info->len = skb_frag_size(frag);
@@ -1444,19 +1448,6 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
bool encap;
int err;
- desc_info = &q->tx_info[q->head_idx];
-
- if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
- return -EIO;
-
- len = skb->len;
- mss = skb_shinfo(skb)->gso_size;
- outer_csum = (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
- SKB_GSO_GRE_CSUM |
- SKB_GSO_IPXIP4 |
- SKB_GSO_IPXIP6 |
- SKB_GSO_UDP_TUNNEL |
- SKB_GSO_UDP_TUNNEL_CSUM));
has_vlan = !!skb_vlan_tag_present(skb);
vlan_tci = skb_vlan_tag_get(skb);
encap = skb->encapsulation;
@@ -1470,12 +1461,21 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
err = ionic_tx_tcp_inner_pseudo_csum(skb);
else
err = ionic_tx_tcp_pseudo_csum(skb);
- if (unlikely(err)) {
- /* clean up mapping from ionic_tx_map_skb */
- ionic_tx_desc_unmap_bufs(q, desc_info);
+ if (unlikely(err))
return err;
- }
+ desc_info = &q->tx_info[q->head_idx];
+ if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
+ return -EIO;
+
+ len = skb->len;
+ mss = skb_shinfo(skb)->gso_size;
+ outer_csum = (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
+ SKB_GSO_GRE_CSUM |
+ SKB_GSO_IPXIP4 |
+ SKB_GSO_IPXIP6 |
+ SKB_GSO_UDP_TUNNEL |
+ SKB_GSO_UDP_TUNNEL_CSUM));
if (encap)
hdrlen = skb_inner_tcp_all_headers(skb);
else
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 9cff0a8ffb2c..e8ff661fa4a5 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2832,7 +2832,7 @@ netxen_sysfs_validate_crb(struct netxen_adapter *adapter,
static ssize_t
netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2860,7 +2860,7 @@ netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj,
static ssize_t
netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2901,7 +2901,7 @@ netxen_sysfs_validate_mem(struct netxen_adapter *adapter,
static ssize_t
netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2922,7 +2922,7 @@ netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj,
}
static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2959,7 +2959,7 @@ static const struct bin_attribute bin_attr_mem = {
static ssize_t
netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index b7def3b54937..016b575861b9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -939,7 +939,6 @@ u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc);
u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc);
/* doorbell recovery mechanism */
-void qed_db_recovery_dp(struct qed_hwfn *p_hwfn);
void qed_db_recovery_execute(struct qed_hwfn *p_hwfn);
bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
index f6cd1b3efdfd..27e91d0d39f8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
@@ -1305,37 +1305,6 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
char *results_buf);
/**
- * qed_print_mcp_trace_results_cont(): Prints MCP Trace results, and
- * keeps the MCP trace meta data allocated, to support continuous MCP Trace
- * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should
- * be called to free the meta data.
- *
- * @p_hwfn: HW device data.
- * @dump_buf: MVP trace dump buffer, starting from the header.
- * @results_buf: Buffer for printing the mcp trace results.
- *
- * Return: Error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- char *results_buf);
-
-/**
- * qed_print_mcp_trace_line(): Prints MCP Trace results for a single line
- *
- * @p_hwfn: HW device data.
- * @dump_buf: MCP trace dump buffer, starting from the header.
- * @num_dumped_bytes: Number of bytes that were dumped.
- * @results_buf: Buffer for printing the mcp trace results.
- *
- * Return: Error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
- u8 *dump_buf,
- u32 num_dumped_bytes,
- char *results_buf);
-
-/**
* qed_mcp_trace_free_meta_data(): Frees the MCP Trace meta data.
* Should be called after continuous MCP Trace parsing.
*
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 464a72afb758..1f0cea3cae92 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -4462,10 +4462,11 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
goto out;
}
- /* Add override window info to buffer */
+ /* Add override window info to buffer, preventing buffer overflow */
override_window_dwords =
- qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
- PROTECTION_OVERRIDE_ELEMENT_DWORDS;
+ min(qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
+ PROTECTION_OVERRIDE_ELEMENT_DWORDS,
+ PROTECTION_OVERRIDE_DEPTH_DWORDS);
if (override_window_dwords) {
addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
offset += qed_grc_dump_addr_range(p_hwfn,
@@ -7614,31 +7615,6 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
results_buf, &parsed_buf_size, true);
}
-enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- char *results_buf)
-{
- u32 parsed_buf_size;
-
- return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf,
- &parsed_buf_size, false);
-}
-
-enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
- u8 *dump_buf,
- u32 num_dumped_bytes,
- char *results_buf)
-{
- u32 parsed_results_bytes;
-
- return qed_parse_mcp_trace_buf(p_hwfn,
- dump_buf,
- num_dumped_bytes,
- 0,
- num_dumped_bytes,
- results_buf, &parsed_results_bytes);
-}
-
/* Frees the specified MCP Trace meta data */
void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 86a93cac2647..f3d2b2b3bad5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -255,25 +255,6 @@ static void qed_db_recovery_teardown(struct qed_hwfn *p_hwfn)
p_hwfn->db_recovery_info.db_recovery_counter = 0;
}
-/* Print the content of the doorbell recovery mechanism */
-void qed_db_recovery_dp(struct qed_hwfn *p_hwfn)
-{
- struct qed_db_recovery_entry *db_entry = NULL;
-
- DP_NOTICE(p_hwfn,
- "Displaying doorbell recovery database. Counter was %d\n",
- p_hwfn->db_recovery_info.db_recovery_counter);
-
- /* Protect the list */
- spin_lock_bh(&p_hwfn->db_recovery_info.lock);
- list_for_each_entry(db_entry,
- &p_hwfn->db_recovery_info.list, list_entry) {
- qed_db_recovery_dp_entry(p_hwfn, db_entry, "Printing");
- }
-
- spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
-}
-
/* Ring the doorbell of a single doorbell recovery entry */
static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
struct qed_db_recovery_entry *db_entry)
@@ -2235,7 +2216,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
}
/* CID map / ILT shadow table / T2
- * The talbes sizes are determined by the computations above
+ * The table sizes are determined by the computations above
*/
rc = qed_cxt_tables_alloc(p_hwfn);
if (rc)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
index 1adc7fbb3f2f..0c5278c0598c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_devlink.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
@@ -87,20 +87,21 @@ qed_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter,
return 0;
}
+#define QED_REPORTER_FW_GRACEFUL_PERIOD 0
+
static const struct devlink_health_reporter_ops qed_fw_fatal_reporter_ops = {
.name = "fw_fatal",
.recover = qed_fw_fatal_reporter_recover,
.dump = qed_fw_fatal_reporter_dump,
+ .default_graceful_period = QED_REPORTER_FW_GRACEFUL_PERIOD,
};
-#define QED_REPORTER_FW_GRACEFUL_PERIOD 0
-
void qed_fw_reporters_create(struct devlink *devlink)
{
struct qed_devlink *dl = devlink_priv(devlink);
- dl->fw_reporter = devlink_health_reporter_create(devlink, &qed_fw_fatal_reporter_ops,
- QED_REPORTER_FW_GRACEFUL_PERIOD, dl);
+ dl->fw_reporter = devlink_health_reporter_create(devlink,
+ &qed_fw_fatal_reporter_ops, dl);
if (IS_ERR(dl->fw_reporter)) {
DP_NOTICE(dl->cdev, "Failed to create fw reporter, err = %ld\n",
PTR_ERR(dl->fw_reporter));
@@ -120,7 +121,8 @@ void qed_fw_reporters_destroy(struct devlink *devlink)
}
static int qed_dl_param_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct qed_devlink *qed_dl = devlink_priv(dl);
struct qed_dev *cdev;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index ed1a84542ad2..10e355397cee 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -2666,58 +2666,6 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
-/**
- * qed_calc_session_ctx_validation(): Calcualte validation byte for
- * session context.
- *
- * @p_ctx_mem: Pointer to context memory.
- * @ctx_size: Context size.
- * @ctx_type: Context type.
- * @cid: Context cid.
- *
- * Return: Void.
- */
-void qed_calc_session_ctx_validation(void *p_ctx_mem,
- u16 ctx_size, u8 ctx_type, u32 cid);
-
-/**
- * qed_calc_task_ctx_validation(): Calcualte validation byte for task
- * context.
- *
- * @p_ctx_mem: Pointer to context memory.
- * @ctx_size: Context size.
- * @ctx_type: Context type.
- * @tid: Context tid.
- *
- * Return: Void.
- */
-void qed_calc_task_ctx_validation(void *p_ctx_mem,
- u16 ctx_size, u8 ctx_type, u32 tid);
-
-/**
- * qed_memset_session_ctx(): Memset session context to 0 while
- * preserving validation bytes.
- *
- * @p_ctx_mem: Pointer to context memory.
- * @ctx_size: Size to initialzie.
- * @ctx_type: Context type.
- *
- * Return: Void.
- */
-void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
-
-/**
- * qed_memset_task_ctx(): Memset task context to 0 while preserving
- * validation bytes.
- *
- * @p_ctx_mem: Pointer to context memory.
- * @ctx_size: size to initialzie.
- * @ctx_type: context type.
- *
- * Return: Void.
- */
-void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
-
#define NUM_STORMS 6
/**
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index 9e5f0dbc8a07..9907973399dc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -69,17 +69,6 @@ int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
return 0;
}
-void qed_ptt_invalidate(struct qed_hwfn *p_hwfn)
-{
- struct qed_ptt *p_ptt;
- int i;
-
- for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
- p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
- p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET;
- }
-}
-
void qed_ptt_pool_free(struct qed_hwfn *p_hwfn)
{
kfree(p_hwfn->p_ptt_pool);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index e535983ce21b..3c98f58a184f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -62,15 +62,6 @@ enum _dmae_cmd_crc_mask {
void qed_gtt_init(struct qed_hwfn *p_hwfn);
/**
- * qed_ptt_invalidate(): Forces all ptt entries to be re-configured
- *
- * @p_hwfn: HW device data.
- *
- * Return: Void.
- */
-void qed_ptt_invalidate(struct qed_hwfn *p_hwfn);
-
-/**
* qed_ptt_pool_alloc(): Allocate and initialize PTT pool.
*
* @p_hwfn: HW device data.
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index 407029a36fa1..aa20bb8caa9a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -18,16 +18,6 @@
#define CDU_VALIDATION_DEFAULT_CFG CDU_CONTEXT_VALIDATION_DEFAULT_CFG
-static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = {
- {400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */
- {528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */
- {608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */
-};
-
-static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {
- {240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
-};
-
/* General constants */
#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
QM_PQ_ELEMENT_SIZE, \
@@ -1576,134 +1566,6 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
}
-DECLARE_CRC8_TABLE(cdu_crc8_table);
-
-/* Calculate and return CDU validation byte per connection type/region/cid */
-static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
-{
- const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
- u8 crc, validation_byte = 0;
- static u8 crc8_table_valid; /* automatically initialized to 0 */
- u32 validation_string = 0;
- __be32 data_to_crc;
-
- if (!crc8_table_valid) {
- crc8_populate_msb(cdu_crc8_table, 0x07);
- crc8_table_valid = 1;
- }
-
- /* The CRC is calculated on the String-to-compress:
- * [31:8] = {CID[31:20],CID[11:0]}
- * [7:4] = Region
- * [3:0] = Type
- */
- if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
- validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
-
- if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
- validation_string |= ((region & 0xF) << 4);
-
- if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
- validation_string |= (conn_type & 0xF);
-
- /* Convert to big-endian and calculate CRC8 */
- data_to_crc = cpu_to_be32(validation_string);
- crc = crc8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc),
- CRC8_INIT_VALUE);
-
- /* The validation byte [7:0] is composed:
- * for type A validation
- * [7] = active configuration bit
- * [6:0] = crc[6:0]
- *
- * for type B validation
- * [7] = active configuration bit
- * [6:3] = connection_type[3:0]
- * [2:0] = crc[2:0]
- */
- validation_byte |=
- ((validation_cfg >>
- CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
-
- if ((validation_cfg >>
- CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
- validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
- else
- validation_byte |= crc & 0x7F;
-
- return validation_byte;
-}
-
-/* Calcualte and set validation bytes for session context */
-void qed_calc_session_ctx_validation(void *p_ctx_mem,
- u16 ctx_size, u8 ctx_type, u32 cid)
-{
- u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
-
- p_ctx = (u8 * const)p_ctx_mem;
- x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
- t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
- u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
-
- memset(p_ctx, 0, ctx_size);
-
- *x_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 3, cid);
- *t_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 4, cid);
- *u_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 5, cid);
-}
-
-/* Calcualte and set validation bytes for task context */
-void qed_calc_task_ctx_validation(void *p_ctx_mem,
- u16 ctx_size, u8 ctx_type, u32 tid)
-{
- u8 *p_ctx, *region1_val_ptr;
-
- p_ctx = (u8 * const)p_ctx_mem;
- region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
-
- memset(p_ctx, 0, ctx_size);
-
- *region1_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 1, tid);
-}
-
-/* Memset session context to 0 while preserving validation bytes */
-void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
-{
- u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
- u8 x_val, t_val, u_val;
-
- p_ctx = (u8 * const)p_ctx_mem;
- x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
- t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
- u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
-
- x_val = *x_val_ptr;
- t_val = *t_val_ptr;
- u_val = *u_val_ptr;
-
- memset(p_ctx, 0, ctx_size);
-
- *x_val_ptr = x_val;
- *t_val_ptr = t_val;
- *u_val_ptr = u_val;
-}
-
-/* Memset task context to 0 while preserving validation bytes */
-void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
-{
- u8 *p_ctx, *region1_val_ptr;
- u8 region1_val;
-
- p_ctx = (u8 * const)p_ctx_mem;
- region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
-
- region1_val = *region1_val_ptr;
-
- memset(p_ctx, 0, ctx_size);
-
- *region1_val_ptr = region1_val;
-}
-
/* Enable and configure context validation */
void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index f915c423fe70..d4685ad4b169 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -454,7 +454,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
static void qed_free_cdev(struct qed_dev *cdev)
{
- kfree((void *)cdev);
+ kfree(cdev);
}
static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
@@ -1214,7 +1214,8 @@ static int qed_slowpath_wq_start(struct qed_dev *cdev)
hwfn = &cdev->hwfns[i];
hwfn->slowpath_wq = alloc_workqueue("slowpath-%02x:%02x.%02x",
- 0, 0, cdev->pdev->bus->number,
+ WQ_PERCPU, 0,
+ cdev->pdev->bus->number,
PCI_SLOT(cdev->pdev->devfn),
hwfn->abs_pf_id);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 26a714bfad4e..c7f497c36f66 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -3301,7 +3301,9 @@ int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
+ if (((rsp & FW_MSG_CODE_MASK) == FW_MSG_CODE_UNSUPPORTED))
+ rc = -EOPNOTSUPP;
+ else if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
rc = -EINVAL;
return rc;
@@ -3356,6 +3358,7 @@ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
p_ptt, &nvm_info.num_images);
if (rc == -EOPNOTSUPP) {
DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n");
+ nvm_info.num_images = 0;
goto out;
} else if (rc || !nvm_info.num_images) {
DP_ERR(p_hwfn, "Failed getting number of images\n");
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
index f55eed092f25..7d78f072b0a1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
@@ -242,7 +242,7 @@ static int qed_mfw_get_tlv_group(u8 tlv_type, u8 *tlv_group)
}
/* Returns size of the data buffer or, -1 in case TLV data is not available. */
-static int
+static noinline_for_stack int
qed_mfw_get_gen_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
struct qed_mfw_tlv_generic *p_drv_buf,
struct qed_tlv_parsed_buf *p_buf)
@@ -304,7 +304,7 @@ qed_mfw_get_gen_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
return -1;
}
-static int
+static noinline_for_stack int
qed_mfw_get_eth_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
struct qed_mfw_tlv_eth *p_drv_buf,
struct qed_tlv_parsed_buf *p_buf)
@@ -438,7 +438,7 @@ qed_mfw_get_tlv_time_value(struct qed_mfw_tlv_time *p_time,
return QED_MFW_TLV_TIME_SIZE;
}
-static int
+static noinline_for_stack int
qed_mfw_get_fcoe_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
struct qed_mfw_tlv_fcoe *p_drv_buf,
struct qed_tlv_parsed_buf *p_buf)
@@ -1073,7 +1073,7 @@ qed_mfw_get_fcoe_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
return -1;
}
-static int
+static noinline_for_stack int
qed_mfw_get_iscsi_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
struct qed_mfw_tlv_iscsi *p_drv_buf,
struct qed_tlv_parsed_buf *p_buf)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
index 5d725f59db24..8be567a6ad44 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
@@ -183,9 +183,6 @@ void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_buffer,
list_entry);
- if (!p_buffer)
- break;
-
list_move_tail(&p_buffer->list_entry,
&p_ooo_info->free_buffers_list);
}
@@ -218,9 +215,6 @@ void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_buffer,
list_entry);
- if (!p_buffer)
- break;
-
list_move_tail(&p_buffer->list_entry,
&p_ooo_info->free_buffers_list);
}
@@ -255,9 +249,6 @@ void qed_ooo_free(struct qed_hwfn *p_hwfn)
p_buffer = list_first_entry(&p_ooo_info->free_buffers_list,
struct qed_ooo_buffer, list_entry);
- if (!p_buffer)
- break;
-
list_del(&p_buffer->list_entry);
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
p_buffer->rx_buffer_size,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
index 295ce435a1a4..4df8a97b717e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
@@ -307,7 +307,7 @@ static int qed_ptp_hw_adjfreq(struct qed_dev *cdev, s32 ppb)
} else if (ppb == 1) {
/* This is a special case as its the only value which wouldn't
* fit in a s64 variable. In order to prevent castings simple
- * handle it seperately.
+ * handle it separately.
*/
best_val = 4;
best_period = 0xee6b27f;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index fa167b1aa019..5222a035fd19 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -3033,7 +3033,7 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
u16 length;
int rc;
- /* Valiate PF can send such a request */
+ /* Validate PF can send such a request */
if (!vf->vport_instance) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
@@ -3312,7 +3312,7 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
goto out;
}
- /* Determine if the unicast filtering is acceptible by PF */
+ /* Determine if the unicast filtering is acceptable by PF */
if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) &&
(params.type == QED_FILTER_VLAN ||
params.type == QED_FILTER_MAC_VLAN)) {
@@ -3729,7 +3729,7 @@ qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
if (rc) {
- DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
+ DP_ERR(p_hwfn, "Failed to re-enable VF[%d] access\n",
vfid);
return rc;
}
@@ -4480,7 +4480,7 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
/* Failure to acquire the ptt in 100g creates an odd error
- * where the first engine has already relased IOV.
+ * where the first engine has already released IOV.
*/
if (!ptt) {
DP_ERR(hwfn, "Failed to acquire ptt\n");
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index c553da16d4b1..23982704273c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -504,7 +504,7 @@ static int qede_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
typeof(cmd->link_modes) *link_modes = &cmd->link_modes;
- struct ethtool_link_settings_hdr *base = &cmd->base;
+ struct ethtool_link_settings *base = &cmd->base;
struct qede_dev *edev = netdev_priv(dev);
struct qed_link_output current_link;
@@ -537,7 +537,7 @@ static int qede_get_link_ksettings(struct net_device *dev,
static int qede_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd)
{
- const struct ethtool_link_settings_hdr *base = &cmd->base;
+ const struct ethtool_link_settings *base = &cmd->base;
const struct ethtool_forced_speed_map *map;
struct qede_dev *edev = netdev_priv(dev);
struct qed_link_output current_link;
@@ -1168,8 +1168,11 @@ static int qede_set_phys_id(struct net_device *dev,
return 0;
}
-static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
+static int qede_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
{
+ struct qede_dev *edev = netdev_priv(dev);
+
info->data = RXH_IP_SRC | RXH_IP_DST;
switch (info->flow_type) {
@@ -1206,9 +1209,6 @@ static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
case ETHTOOL_GRXRINGS:
info->data = QEDE_RSS_COUNT(edev);
break;
- case ETHTOOL_GRXFH:
- rc = qede_get_rss_flags(edev, info);
- break;
case ETHTOOL_GRXCLSRLCNT:
info->rule_cnt = qede_get_arfs_filter_count(edev);
info->data = QEDE_RFS_MAX_FLTR;
@@ -1227,14 +1227,17 @@ static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
return rc;
}
-static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
+static int qede_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *info,
+ struct netlink_ext_ack *extack)
{
struct qed_update_vport_params *vport_update_params;
+ struct qede_dev *edev = netdev_priv(dev);
u8 set_caps = 0, clr_caps = 0;
int rc = 0;
DP_VERBOSE(edev, QED_MSG_DEBUG,
- "Set rss flags command parameters: flow type = %d, data = %llu\n",
+ "Set rss flags command parameters: flow type = %d, data = %u\n",
info->flow_type, info->data);
switch (info->flow_type) {
@@ -1337,9 +1340,6 @@ static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
int rc;
switch (info->cmd) {
- case ETHTOOL_SRXFH:
- rc = qede_set_rss_flags(edev, info);
- break;
case ETHTOOL_SRXCLSRLINS:
rc = qede_add_cls_rule(edev, info);
break;
@@ -2293,6 +2293,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
.get_rxfh_key_size = qede_get_rxfh_key_size,
.get_rxfh = qede_get_rxfh,
.set_rxfh = qede_set_rxfh,
+ .get_rxfh_fields = qede_get_rxfh_fields,
+ .set_rxfh_fields = qede_set_rxfh_fields,
.get_ts_info = qede_get_ts_info,
.get_channels = qede_get_channels,
.set_channels = qede_set_channels,
@@ -2335,6 +2337,8 @@ static const struct ethtool_ops qede_vf_ethtool_ops = {
.get_rxfh_key_size = qede_get_rxfh_key_size,
.get_rxfh = qede_get_rxfh,
.set_rxfh = qede_set_rxfh,
+ .get_rxfh_fields = qede_get_rxfh_fields,
+ .set_rxfh_fields = qede_set_rxfh_fields,
.get_channels = qede_get_channels,
.set_channels = qede_set_channels,
.get_per_queue_coalesce = qede_get_per_coalesce,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index 985026dd816f..7e341e026489 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -987,20 +987,17 @@ static int qede_udp_tunnel_sync(struct net_device *dev, unsigned int table)
static const struct udp_tunnel_nic_info qede_udp_tunnels_both = {
.sync_table = qede_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
},
}, qede_udp_tunnels_vxlan = {
.sync_table = qede_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
},
}, qede_udp_tunnels_geneve = {
.sync_table = qede_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
},
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 847fa62c80df..e338bfc8b7b2 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -4,6 +4,7 @@
* Copyright (c) 2019-2020 Marvell International Ltd.
*/
+#include <linux/array_size.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
@@ -960,7 +961,7 @@ static inline void qede_tpa_cont(struct qede_dev *edev,
{
int i;
- for (i = 0; cqe->len_list[i]; i++)
+ for (i = 0; cqe->len_list[i] && i < ARRAY_SIZE(cqe->len_list); i++)
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
le16_to_cpu(cqe->len_list[i]));
@@ -985,7 +986,7 @@ static int qede_tpa_end(struct qede_dev *edev,
dma_unmap_page(rxq->dev, tpa_info->buffer.mapping,
PAGE_SIZE, rxq->data_direction);
- for (i = 0; cqe->len_list[i]; i++)
+ for (i = 0; cqe->len_list[i] && i < ARRAY_SIZE(cqe->len_list); i++)
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
le16_to_cpu(cqe->len_list[i]));
if (unlikely(i > 1))
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 99df00c30b8c..66ab1b9d65a1 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -203,7 +203,7 @@ static struct pci_driver qede_pci_driver = {
};
static struct qed_eth_cb_ops qede_ll_ops = {
- {
+ .common = {
#ifdef CONFIG_RFS_ACCEL
.arfs_filter_op = qede_arfs_filter_op,
#endif
@@ -506,25 +506,6 @@ static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
}
#endif
-static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct qede_dev *edev = netdev_priv(dev);
-
- if (!netif_running(dev))
- return -EAGAIN;
-
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return qede_ptp_hw_ts(edev, ifr);
- default:
- DP_VERBOSE(edev, QED_MSG_DEBUG,
- "default IOCTL cmd 0x%x\n", cmd);
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
static void qede_fp_sb_dump(struct qede_dev *edev, struct qede_fastpath *fp)
{
char *p_sb = (char *)fp->sb_info->sb_virt;
@@ -717,7 +698,6 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_set_mac_address = qede_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = qede_change_mtu,
- .ndo_eth_ioctl = qede_ioctl,
.ndo_tx_timeout = qede_tx_timeout,
#ifdef CONFIG_QED_SRIOV
.ndo_set_vf_mac = qede_set_vf_mac,
@@ -742,6 +722,8 @@ static const struct net_device_ops qede_netdev_ops = {
#endif
.ndo_xdp_xmit = qede_xdp_transmit,
.ndo_setup_tc = qede_setup_tc_offload,
+ .ndo_hwtstamp_get = qede_hwtstamp_get,
+ .ndo_hwtstamp_set = qede_hwtstamp_set,
};
static const struct net_device_ops qede_netdev_vf_ops = {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 9d6399a5c780..d351be5fbda1 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -181,7 +181,7 @@ static void qede_ptp_task(struct work_struct *work)
}
/* Read the PHC. This API is invoked with ptp_lock held. */
-static u64 qede_ptp_read_cc(const struct cyclecounter *cc)
+static u64 qede_ptp_read_cc(struct cyclecounter *cc)
{
struct qede_dev *edev;
struct qede_ptp *ptp;
@@ -199,18 +199,15 @@ static u64 qede_ptp_read_cc(const struct cyclecounter *cc)
return phc_cycles;
}
-static int qede_ptp_cfg_filters(struct qede_dev *edev)
+static void qede_ptp_cfg_filters(struct qede_dev *edev)
{
enum qed_ptp_hwtstamp_tx_type tx_type = QED_PTP_HWTSTAMP_TX_ON;
enum qed_ptp_filter_type rx_filter = QED_PTP_FILTER_NONE;
struct qede_ptp *ptp = edev->ptp;
- if (!ptp)
- return -EIO;
-
if (!ptp->hw_ts_ioctl_called) {
DP_INFO(edev, "TS IOCTL not called\n");
- return 0;
+ return;
}
switch (ptp->tx_type) {
@@ -223,11 +220,6 @@ static int qede_ptp_cfg_filters(struct qede_dev *edev)
clear_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags);
tx_type = QED_PTP_HWTSTAMP_TX_OFF;
break;
-
- case HWTSTAMP_TX_ONESTEP_SYNC:
- case HWTSTAMP_TX_ONESTEP_P2P:
- DP_ERR(edev, "One-step timestamping is not supported\n");
- return -ERANGE;
}
spin_lock_bh(&ptp->lock);
@@ -286,39 +278,65 @@ static int qede_ptp_cfg_filters(struct qede_dev *edev)
ptp->ops->cfg_filters(edev->cdev, rx_filter, tx_type);
spin_unlock_bh(&ptp->lock);
-
- return 0;
}
-int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr)
+int qede_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
+ struct qede_dev *edev = netdev_priv(netdev);
struct qede_ptp *ptp;
- int rc;
+
+ if (!netif_running(netdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device is down");
+ return -EAGAIN;
+ }
ptp = edev->ptp;
- if (!ptp)
+ if (!ptp) {
+ NL_SET_ERR_MSG_MOD(extack, "HW timestamping is not supported");
return -EIO;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ }
DP_VERBOSE(edev, QED_MSG_DEBUG,
- "HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n",
- config.tx_type, config.rx_filter);
+ "HWTSTAMP SET: Requested tx_type = %d, requested rx_filters = %d\n",
+ config->tx_type, config->rx_filter);
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_ON:
+ case HWTSTAMP_TX_OFF:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "One-step timestamping is not supported");
+ return -ERANGE;
+ }
ptp->hw_ts_ioctl_called = 1;
- ptp->tx_type = config.tx_type;
- ptp->rx_filter = config.rx_filter;
+ ptp->tx_type = config->tx_type;
+ ptp->rx_filter = config->rx_filter;
- rc = qede_ptp_cfg_filters(edev);
- if (rc)
- return rc;
+ qede_ptp_cfg_filters(edev);
+
+ config->rx_filter = ptp->rx_filter;
+
+ return 0;
+}
- config.rx_filter = ptp->rx_filter;
+int qede_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+ struct qede_ptp *ptp;
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
+ ptp = edev->ptp;
+ if (!ptp)
+ return -EIO;
+
+ config->tx_type = ptp->tx_type;
+ config->rx_filter = ptp->rx_filter;
+
+ return 0;
}
int qede_ptp_get_ts_info(struct qede_dev *edev, struct kernel_ethtool_ts_info *info)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
index adafc894797e..88f168395812 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.h
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
@@ -14,7 +14,11 @@
void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb);
void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb);
-int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req);
+int qede_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int qede_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void qede_ptp_disable(struct qede_dev *edev);
int qede_ptp_enable(struct qede_dev *edev);
int qede_ptp_get_ts_info(struct qede_dev *edev, struct kernel_ethtool_ts_info *ts);
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index fc78bc959ded..fca94a69c777 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -1501,7 +1501,7 @@ static int ql_finish_auto_neg(struct ql3_adapter *qdev)
"Remote error detected. Calling ql_port_start()\n");
/*
* ql_port_start() is shared code and needs
- * to lock the PHY on it's own.
+ * to lock the PHY on its own.
*/
ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
if (ql_port_start(qdev)) /* Restart port */
@@ -3420,7 +3420,7 @@ static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
pci_disable_msi(qdev->pdev);
}
- del_timer_sync(&qdev->adapter_timer);
+ timer_delete_sync(&qdev->adapter_timer);
napi_disable(&qdev->napi);
@@ -3735,7 +3735,7 @@ static void ql_get_board_info(struct ql3_adapter *qdev)
static void ql3xxx_timer(struct timer_list *t)
{
- struct ql3_adapter *qdev = from_timer(qdev, t, adapter_timer);
+ struct ql3_adapter *qdev = timer_container_of(qdev, t, adapter_timer);
queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index d7cdea8f604d..91e7b38143ea 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -4215,7 +4215,6 @@ static pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *pdev)
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
int err = 0;
- pdev->error_state = pci_channel_io_normal;
err = pci_enable_device(pdev);
if (err)
goto disconnect;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index b733374b4dc5..6145252d8ff8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -2051,7 +2051,7 @@ static void qlcnic_83xx_init_hw(struct qlcnic_adapter *p_dev)
dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
}
-/* POST FW related definations*/
+/* POST FW related definitions*/
#define QLC_83XX_POST_SIGNATURE_REG 0x41602014
#define QLC_83XX_POST_MODE_REG 0x41602018
#define QLC_83XX_POST_FAST_MODE 0
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index b3588a1ebc25..e051d8c7a28d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -367,7 +367,7 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *netdev,
- const unsigned char *addr, u16 vid,
+ const unsigned char *addr, u16 vid, bool *notified,
struct netlink_ext_ack *extack)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
@@ -394,7 +394,7 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *netdev,
const unsigned char *addr, u16 vid, u16 flags,
- struct netlink_ext_ack *extack)
+ bool *notified, struct netlink_ext_ack *extack)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int err = 0;
@@ -486,7 +486,6 @@ static int qlcnic_udp_tunnel_sync(struct net_device *dev, unsigned int table)
static const struct udp_tunnel_nic_info qlcnic_udp_tunnels = {
.sync_table = qlcnic_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
},
@@ -3767,8 +3766,6 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
- pdev->error_state = pci_channel_io_normal;
-
err = pci_enable_device(pdev);
if (err)
return err;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index f9dd50152b1e..d57b976b9040 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -454,8 +454,10 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
num_vlans = sriov->num_allowed_vlans;
sriov->allowed_vlans = kcalloc(num_vlans, sizeof(u16), GFP_KERNEL);
- if (!sriov->allowed_vlans)
+ if (!sriov->allowed_vlans) {
+ qlcnic_sriov_free_vlans(adapter);
return -ENOMEM;
+ }
vlans = (u16 *)&cmd->rsp.arg[3];
for (i = 0; i < num_vlans; i++)
@@ -1482,8 +1484,11 @@ static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_o
}
cmd_op = (cmd.rsp.arg[0] & 0xff);
- if (cmd.rsp.arg[0] >> 25 == 2)
- return 2;
+ if (cmd.rsp.arg[0] >> 25 == 2) {
+ ret = 2;
+ goto out;
+ }
+
if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
set_bit(QLC_BC_VF_STATE, &vf->state);
else
@@ -2167,8 +2172,10 @@ int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
vf = &sriov->vf_info[i];
vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
sizeof(*vf->sriov_vlans), GFP_KERNEL);
- if (!vf->sriov_vlans)
+ if (!vf->sriov_vlans) {
+ qlcnic_sriov_free_vlans(adapter);
return -ENOMEM;
+ }
}
return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 74125188beb8..5296d9a6ee83 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -264,7 +264,7 @@ static int qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
}
static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -281,7 +281,7 @@ static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
}
static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -310,7 +310,7 @@ static int qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
}
static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -332,7 +332,7 @@ static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
}
static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -396,7 +396,7 @@ static int validate_pm_config(struct qlcnic_adapter *adapter,
static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -446,7 +446,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -539,7 +539,7 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -623,7 +623,7 @@ out:
static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -675,7 +675,7 @@ static int validate_npar_config(struct qlcnic_adapter *adapter,
static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -722,7 +722,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -769,7 +769,7 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -804,7 +804,7 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -839,7 +839,7 @@ static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -868,7 +868,7 @@ static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -898,7 +898,7 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -938,7 +938,7 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -1115,7 +1115,7 @@ static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter,
static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -1217,7 +1217,6 @@ static const struct bin_attribute bin_attr_pci_config = {
.attr = { .name = "pci_config", .mode = 0644 },
.size = 0,
.read = qlcnic_sysfs_read_pci_config,
- .write = NULL,
};
static const struct bin_attribute bin_attr_port_stats = {
diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig
index 9210ff360fdc..ba7efb108637 100644
--- a/drivers/net/ethernet/qualcomm/Kconfig
+++ b/drivers/net/ethernet/qualcomm/Kconfig
@@ -52,7 +52,6 @@ config QCOM_EMAC
depends on HAS_DMA && HAS_IOMEM
select CRC32
select PHYLIB
- select MDIO_DEVRES
help
This driver supports the Qualcomm Technologies, Inc. Gigabit
Ethernet Media Access Controller (EMAC). The controller
@@ -61,6 +60,21 @@ config QCOM_EMAC
low power, Receive-Side Scaling (RSS), and IEEE 1588-2008
Precision Clock Synchronization Protocol.
+config QCOM_PPE
+ tristate "Qualcomm Technologies, Inc. PPE Ethernet support"
+ depends on COMMON_CLK && HAS_IOMEM && OF
+ depends on ARCH_QCOM || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ This driver supports the Qualcomm Technologies, Inc. packet
+ process engine (PPE) available with IPQ SoC. The PPE includes
+ the Ethernet MACs, Ethernet DMA (EDMA) and switch core that
+ supports L3 flow offload, L2 switch function, RSS and tunnel
+ offload.
+
+ To compile this driver as a module, choose M here. The module
+ will be called qcom-ppe.
+
source "drivers/net/ethernet/qualcomm/rmnet/Kconfig"
endif # NET_VENDOR_QUALCOMM
diff --git a/drivers/net/ethernet/qualcomm/Makefile b/drivers/net/ethernet/qualcomm/Makefile
index 9250976dd884..166a59aea363 100644
--- a/drivers/net/ethernet/qualcomm/Makefile
+++ b/drivers/net/ethernet/qualcomm/Makefile
@@ -11,4 +11,5 @@ qcauart-objs := qca_uart.o
obj-y += emac/
+obj-$(CONFIG_QCOM_PPE) += ppe/
obj-$(CONFIG_RMNET) += rmnet/
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
index a508ebc4b206..28b3a7071e58 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
@@ -419,7 +419,7 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
goto error_put_device;
}
- /* v2 SGMII has a per-lane digital digital, so parse it if it exists */
+ /* v2 SGMII has a per-lane digital, so parse it if it exists */
res = platform_get_resource(sgmii_pdev, IORESOURCE_MEM, 1);
if (res) {
phy->digital = ioremap(res->start, resource_size(res));
diff --git a/drivers/net/ethernet/qualcomm/ppe/Makefile b/drivers/net/ethernet/qualcomm/ppe/Makefile
new file mode 100644
index 000000000000..9e60b2400c16
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the device driver of PPE (Packet Process Engine) in IPQ SoC
+#
+
+obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
+qcom-ppe-objs := ppe.o ppe_config.o ppe_debugfs.o
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe.c b/drivers/net/ethernet/qualcomm/ppe/ppe.c
new file mode 100644
index 000000000000..be747510d947
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+/* PPE platform device probe, DTSI parser and PPE clock initializations. */
+
+#include <linux/clk.h>
+#include <linux/interconnect.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include "ppe.h"
+#include "ppe_config.h"
+#include "ppe_debugfs.h"
+
+#define PPE_PORT_MAX 8
+#define PPE_CLK_RATE 353000000
+
+/* ICC clocks for enabling PPE device. The avg_bw and peak_bw with value 0
+ * will be updated by the clock rate of PPE.
+ */
+static const struct icc_bulk_data ppe_icc_data[] = {
+ {
+ .name = "ppe",
+ .avg_bw = 0,
+ .peak_bw = 0,
+ },
+ {
+ .name = "ppe_cfg",
+ .avg_bw = 0,
+ .peak_bw = 0,
+ },
+ {
+ .name = "qos_gen",
+ .avg_bw = 6000,
+ .peak_bw = 6000,
+ },
+ {
+ .name = "timeout_ref",
+ .avg_bw = 6000,
+ .peak_bw = 6000,
+ },
+ {
+ .name = "nssnoc_memnoc",
+ .avg_bw = 533333,
+ .peak_bw = 533333,
+ },
+ {
+ .name = "memnoc_nssnoc",
+ .avg_bw = 533333,
+ .peak_bw = 533333,
+ },
+ {
+ .name = "memnoc_nssnoc_1",
+ .avg_bw = 533333,
+ .peak_bw = 533333,
+ },
+};
+
+static const struct regmap_range ppe_readable_ranges[] = {
+ regmap_reg_range(0x0, 0x1ff), /* Global */
+ regmap_reg_range(0x400, 0x5ff), /* LPI CSR */
+ regmap_reg_range(0x1000, 0x11ff), /* GMAC0 */
+ regmap_reg_range(0x1200, 0x13ff), /* GMAC1 */
+ regmap_reg_range(0x1400, 0x15ff), /* GMAC2 */
+ regmap_reg_range(0x1600, 0x17ff), /* GMAC3 */
+ regmap_reg_range(0x1800, 0x19ff), /* GMAC4 */
+ regmap_reg_range(0x1a00, 0x1bff), /* GMAC5 */
+ regmap_reg_range(0xb000, 0xefff), /* PRX CSR */
+ regmap_reg_range(0xf000, 0x1efff), /* IPE */
+ regmap_reg_range(0x20000, 0x5ffff), /* PTX CSR */
+ regmap_reg_range(0x60000, 0x9ffff), /* IPE L2 CSR */
+ regmap_reg_range(0xb0000, 0xeffff), /* IPO CSR */
+ regmap_reg_range(0x100000, 0x17ffff), /* IPE PC */
+ regmap_reg_range(0x180000, 0x1bffff), /* PRE IPO CSR */
+ regmap_reg_range(0x1d0000, 0x1dffff), /* Tunnel parser */
+ regmap_reg_range(0x1e0000, 0x1effff), /* Ingress parse */
+ regmap_reg_range(0x200000, 0x2fffff), /* IPE L3 */
+ regmap_reg_range(0x300000, 0x3fffff), /* IPE tunnel */
+ regmap_reg_range(0x400000, 0x4fffff), /* Scheduler */
+ regmap_reg_range(0x500000, 0x503fff), /* XGMAC0 */
+ regmap_reg_range(0x504000, 0x507fff), /* XGMAC1 */
+ regmap_reg_range(0x508000, 0x50bfff), /* XGMAC2 */
+ regmap_reg_range(0x50c000, 0x50ffff), /* XGMAC3 */
+ regmap_reg_range(0x510000, 0x513fff), /* XGMAC4 */
+ regmap_reg_range(0x514000, 0x517fff), /* XGMAC5 */
+ regmap_reg_range(0x600000, 0x6fffff), /* BM */
+ regmap_reg_range(0x800000, 0x9fffff), /* QM */
+ regmap_reg_range(0xb00000, 0xbef800), /* EDMA */
+};
+
+static const struct regmap_access_table ppe_reg_table = {
+ .yes_ranges = ppe_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ppe_readable_ranges),
+};
+
+static const struct regmap_config regmap_config_ipq9574 = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .rd_table = &ppe_reg_table,
+ .wr_table = &ppe_reg_table,
+ .max_register = 0xbef800,
+ .fast_io = true,
+};
+
+static int ppe_clock_init_and_reset(struct ppe_device *ppe_dev)
+{
+ unsigned long ppe_rate = ppe_dev->clk_rate;
+ struct device *dev = ppe_dev->dev;
+ struct reset_control *rstc;
+ struct clk_bulk_data *clks;
+ struct clk *clk;
+ int ret, i;
+
+ for (i = 0; i < ppe_dev->num_icc_paths; i++) {
+ ppe_dev->icc_paths[i].name = ppe_icc_data[i].name;
+ ppe_dev->icc_paths[i].avg_bw = ppe_icc_data[i].avg_bw ? :
+ Bps_to_icc(ppe_rate);
+
+ /* PPE does not have an explicit peak bandwidth requirement,
+ * so set the peak bandwidth to be equal to the average
+ * bandwidth.
+ */
+ ppe_dev->icc_paths[i].peak_bw = ppe_icc_data[i].peak_bw ? :
+ Bps_to_icc(ppe_rate);
+ }
+
+ ret = devm_of_icc_bulk_get(dev, ppe_dev->num_icc_paths,
+ ppe_dev->icc_paths);
+ if (ret)
+ return ret;
+
+ ret = icc_bulk_set_bw(ppe_dev->num_icc_paths, ppe_dev->icc_paths);
+ if (ret)
+ return ret;
+
+ /* The PPE clocks have a common parent clock. Setting the clock
+ * rate of "ppe" ensures the clock rate of all PPE clocks is
+ * configured to the same rate.
+ */
+ clk = devm_clk_get(dev, "ppe");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = clk_set_rate(clk, ppe_rate);
+ if (ret)
+ return ret;
+
+ ret = devm_clk_bulk_get_all_enabled(dev, &clks);
+ if (ret < 0)
+ return ret;
+
+ /* Reset the PPE. */
+ rstc = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(rstc))
+ return PTR_ERR(rstc);
+
+ ret = reset_control_assert(rstc);
+ if (ret)
+ return ret;
+
+ /* The delay 10 ms of assert is necessary for resetting PPE. */
+ usleep_range(10000, 11000);
+
+ return reset_control_deassert(rstc);
+}
+
+static int qcom_ppe_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ppe_device *ppe_dev;
+ void __iomem *base;
+ int ret, num_icc;
+
+ num_icc = ARRAY_SIZE(ppe_icc_data);
+ ppe_dev = devm_kzalloc(dev, struct_size(ppe_dev, icc_paths, num_icc),
+ GFP_KERNEL);
+ if (!ppe_dev)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base), "PPE ioremap failed\n");
+
+ ppe_dev->regmap = devm_regmap_init_mmio(dev, base, &regmap_config_ipq9574);
+ if (IS_ERR(ppe_dev->regmap))
+ return dev_err_probe(dev, PTR_ERR(ppe_dev->regmap),
+ "PPE initialize regmap failed\n");
+ ppe_dev->dev = dev;
+ ppe_dev->clk_rate = PPE_CLK_RATE;
+ ppe_dev->num_ports = PPE_PORT_MAX;
+ ppe_dev->num_icc_paths = num_icc;
+
+ ret = ppe_clock_init_and_reset(ppe_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "PPE clock config failed\n");
+
+ ret = ppe_hw_config(ppe_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "PPE HW config failed\n");
+
+ ppe_debugfs_setup(ppe_dev);
+ platform_set_drvdata(pdev, ppe_dev);
+
+ return 0;
+}
+
+static void qcom_ppe_remove(struct platform_device *pdev)
+{
+ struct ppe_device *ppe_dev;
+
+ ppe_dev = platform_get_drvdata(pdev);
+ ppe_debugfs_teardown(ppe_dev);
+}
+
+static const struct of_device_id qcom_ppe_of_match[] = {
+ { .compatible = "qcom,ipq9574-ppe" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_ppe_of_match);
+
+static struct platform_driver qcom_ppe_driver = {
+ .driver = {
+ .name = "qcom_ppe",
+ .of_match_table = qcom_ppe_of_match,
+ },
+ .probe = qcom_ppe_probe,
+ .remove = qcom_ppe_remove,
+};
+module_platform_driver(qcom_ppe_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. IPQ PPE driver");
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe.h b/drivers/net/ethernet/qualcomm/ppe/ppe.h
new file mode 100644
index 000000000000..27458f0bc206
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef __PPE_H__
+#define __PPE_H__
+
+#include <linux/compiler.h>
+#include <linux/interconnect.h>
+
+struct device;
+struct regmap;
+struct dentry;
+
+/**
+ * struct ppe_device - PPE device private data.
+ * @dev: PPE device structure.
+ * @regmap: PPE register map.
+ * @clk_rate: PPE clock rate.
+ * @num_ports: Number of PPE ports.
+ * @debugfs_root: Debugfs root entry.
+ * @num_icc_paths: Number of interconnect paths.
+ * @icc_paths: Interconnect path array.
+ *
+ * PPE device is the instance of PPE hardware, which is used to
+ * configure PPE packet process modules such as BM (buffer management),
+ * QM (queue management), and scheduler.
+ */
+struct ppe_device {
+ struct device *dev;
+ struct regmap *regmap;
+ unsigned long clk_rate;
+ unsigned int num_ports;
+ struct dentry *debugfs_root;
+ unsigned int num_icc_paths;
+ struct icc_bulk_data icc_paths[] __counted_by(num_icc_paths);
+};
+#endif
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
new file mode 100644
index 000000000000..e9a0e22907a6
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
@@ -0,0 +1,2034 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+/* PPE HW initialization configs such as BM(buffer management),
+ * QM(queue management) and scheduler configs.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+#include "ppe.h"
+#include "ppe_config.h"
+#include "ppe_regs.h"
+
+#define PPE_QUEUE_SCH_PRI_NUM 8
+
+/**
+ * struct ppe_bm_port_config - PPE BM port configuration.
+ * @port_id_start: The fist BM port ID to configure.
+ * @port_id_end: The last BM port ID to configure.
+ * @pre_alloc: BM port dedicated buffer number.
+ * @in_fly_buf: Buffer number for receiving the packet after pause frame sent.
+ * @ceil: Ceil to generate the back pressure.
+ * @weight: Weight value.
+ * @resume_offset: Resume offset from the threshold value.
+ * @resume_ceil: Ceil to resume from the back pressure state.
+ * @dynamic: Dynamic threshold used or not.
+ *
+ * The is for configuring the threshold that impacts the port
+ * flow control.
+ */
+struct ppe_bm_port_config {
+ unsigned int port_id_start;
+ unsigned int port_id_end;
+ unsigned int pre_alloc;
+ unsigned int in_fly_buf;
+ unsigned int ceil;
+ unsigned int weight;
+ unsigned int resume_offset;
+ unsigned int resume_ceil;
+ bool dynamic;
+};
+
+/**
+ * struct ppe_qm_queue_config - PPE queue config.
+ * @queue_start: PPE start of queue ID.
+ * @queue_end: PPE end of queue ID.
+ * @prealloc_buf: Queue dedicated buffer number.
+ * @ceil: Ceil to start drop packet from queue.
+ * @weight: Weight value.
+ * @resume_offset: Resume offset from the threshold.
+ * @dynamic: Threshold value is decided dynamically or statically.
+ *
+ * Queue configuration decides the threshold to drop packet from PPE
+ * hardware queue.
+ */
+struct ppe_qm_queue_config {
+ unsigned int queue_start;
+ unsigned int queue_end;
+ unsigned int prealloc_buf;
+ unsigned int ceil;
+ unsigned int weight;
+ unsigned int resume_offset;
+ bool dynamic;
+};
+
+/**
+ * enum ppe_scheduler_direction - PPE scheduler direction for packet.
+ * @PPE_SCH_INGRESS: Scheduler for the packet on ingress,
+ * @PPE_SCH_EGRESS: Scheduler for the packet on egress,
+ */
+enum ppe_scheduler_direction {
+ PPE_SCH_INGRESS = 0,
+ PPE_SCH_EGRESS = 1,
+};
+
+/**
+ * struct ppe_scheduler_bm_config - PPE arbitration for buffer config.
+ * @valid: Arbitration entry valid or not.
+ * @dir: Arbitration entry for egress or ingress.
+ * @port: Port ID to use arbitration entry.
+ * @backup_port_valid: Backup port valid or not.
+ * @backup_port: Backup port ID to use.
+ *
+ * Configure the scheduler settings for accessing and releasing the PPE buffers.
+ */
+struct ppe_scheduler_bm_config {
+ bool valid;
+ enum ppe_scheduler_direction dir;
+ unsigned int port;
+ bool backup_port_valid;
+ unsigned int backup_port;
+};
+
+/**
+ * struct ppe_scheduler_qm_config - PPE arbitration for scheduler config.
+ * @ensch_port_bmp: Port bit map for enqueue scheduler.
+ * @ensch_port: Port ID to enqueue scheduler.
+ * @desch_port: Port ID to dequeue scheduler.
+ * @desch_backup_port_valid: Dequeue for the backup port valid or not.
+ * @desch_backup_port: Backup port ID to dequeue scheduler.
+ *
+ * Configure the scheduler settings for enqueuing and dequeuing packets on
+ * the PPE port.
+ */
+struct ppe_scheduler_qm_config {
+ unsigned int ensch_port_bmp;
+ unsigned int ensch_port;
+ unsigned int desch_port;
+ bool desch_backup_port_valid;
+ unsigned int desch_backup_port;
+};
+
+/**
+ * struct ppe_scheduler_port_config - PPE port scheduler config.
+ * @port: Port ID to be scheduled.
+ * @flow_level: Scheduler flow level or not.
+ * @node_id: Node ID, for level 0, queue ID is used.
+ * @loop_num: Loop number of scheduler config.
+ * @pri_max: Max priority configured.
+ * @flow_id: Strict priority ID.
+ * @drr_node_id: Node ID for scheduler.
+ *
+ * PPE port scheduler configuration which decides the priority in the
+ * packet scheduler for the egress port.
+ */
+struct ppe_scheduler_port_config {
+ unsigned int port;
+ bool flow_level;
+ unsigned int node_id;
+ unsigned int loop_num;
+ unsigned int pri_max;
+ unsigned int flow_id;
+ unsigned int drr_node_id;
+};
+
+/**
+ * struct ppe_port_schedule_resource - PPE port scheduler resource.
+ * @ucastq_start: Unicast queue start ID.
+ * @ucastq_end: Unicast queue end ID.
+ * @mcastq_start: Multicast queue start ID.
+ * @mcastq_end: Multicast queue end ID.
+ * @flow_id_start: Flow start ID.
+ * @flow_id_end: Flow end ID.
+ * @l0node_start: Scheduler node start ID for queue level.
+ * @l0node_end: Scheduler node end ID for queue level.
+ * @l1node_start: Scheduler node start ID for flow level.
+ * @l1node_end: Scheduler node end ID for flow level.
+ *
+ * PPE scheduler resource allocated among the PPE ports.
+ */
+struct ppe_port_schedule_resource {
+ unsigned int ucastq_start;
+ unsigned int ucastq_end;
+ unsigned int mcastq_start;
+ unsigned int mcastq_end;
+ unsigned int flow_id_start;
+ unsigned int flow_id_end;
+ unsigned int l0node_start;
+ unsigned int l0node_end;
+ unsigned int l1node_start;
+ unsigned int l1node_end;
+};
+
+/* There are total 2048 buffers available in PPE, out of which some
+ * buffers are reserved for some specific purposes per PPE port. The
+ * rest of the pool of 1550 buffers are assigned to the general 'group0'
+ * which is shared among all ports of the PPE.
+ */
+static const int ipq9574_ppe_bm_group_config = 1550;
+
+/* The buffer configurations per PPE port. There are 15 BM ports and
+ * 4 BM groups supported by PPE. BM port (0-7) is for EDMA port 0,
+ * BM port (8-13) is for PPE physical port 1-6 and BM port 14 is for
+ * EIP port.
+ */
+static const struct ppe_bm_port_config ipq9574_ppe_bm_port_config[] = {
+ {
+ /* Buffer configuration for the BM port ID 0 of EDMA. */
+ .port_id_start = 0,
+ .port_id_end = 0,
+ .pre_alloc = 0,
+ .in_fly_buf = 100,
+ .ceil = 1146,
+ .weight = 7,
+ .resume_offset = 8,
+ .resume_ceil = 0,
+ .dynamic = true,
+ },
+ {
+ /* Buffer configuration for the BM port ID 1-7 of EDMA. */
+ .port_id_start = 1,
+ .port_id_end = 7,
+ .pre_alloc = 0,
+ .in_fly_buf = 100,
+ .ceil = 250,
+ .weight = 4,
+ .resume_offset = 36,
+ .resume_ceil = 0,
+ .dynamic = true,
+ },
+ {
+ /* Buffer configuration for the BM port ID 8-13 of PPE ports. */
+ .port_id_start = 8,
+ .port_id_end = 13,
+ .pre_alloc = 0,
+ .in_fly_buf = 128,
+ .ceil = 250,
+ .weight = 4,
+ .resume_offset = 36,
+ .resume_ceil = 0,
+ .dynamic = true,
+ },
+ {
+ /* Buffer configuration for the BM port ID 14 of EIP. */
+ .port_id_start = 14,
+ .port_id_end = 14,
+ .pre_alloc = 0,
+ .in_fly_buf = 40,
+ .ceil = 250,
+ .weight = 4,
+ .resume_offset = 36,
+ .resume_ceil = 0,
+ .dynamic = true,
+ },
+};
+
+/* QM fetches the packet from PPE buffer management for transmitting the
+ * packet out. The QM group configuration limits the total number of buffers
+ * enqueued by all PPE hardware queues.
+ * There are total 2048 buffers available, out of which some buffers are
+ * dedicated to hardware exception handlers. The remaining buffers are
+ * assigned to the general 'group0', which is the group assigned to all
+ * queues by default.
+ */
+static const int ipq9574_ppe_qm_group_config = 2000;
+
+/* Default QM settings for unicast and multicast queues for IPQ9754. */
+static const struct ppe_qm_queue_config ipq9574_ppe_qm_queue_config[] = {
+ {
+ /* QM settings for unicast queues 0 to 255. */
+ .queue_start = 0,
+ .queue_end = 255,
+ .prealloc_buf = 0,
+ .ceil = 1200,
+ .weight = 7,
+ .resume_offset = 36,
+ .dynamic = true,
+ },
+ {
+ /* QM settings for multicast queues 256 to 299. */
+ .queue_start = 256,
+ .queue_end = 299,
+ .prealloc_buf = 0,
+ .ceil = 250,
+ .weight = 0,
+ .resume_offset = 36,
+ .dynamic = false,
+ },
+};
+
+/* PPE scheduler configuration for BM includes multiple entries. Each entry
+ * indicates the primary port to be assigned the buffers for the ingress or
+ * to release the buffers for the egress. Backup port ID will be used when
+ * the primary port ID is down.
+ */
+static const struct ppe_scheduler_bm_config ipq9574_ppe_sch_bm_config[] = {
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 1, false, 0},
+ {true, PPE_SCH_EGRESS, 1, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 7, false, 0},
+ {true, PPE_SCH_EGRESS, 7, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 1, false, 0},
+ {true, PPE_SCH_EGRESS, 1, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 2, false, 0},
+ {true, PPE_SCH_EGRESS, 2, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 1, false, 0},
+ {true, PPE_SCH_EGRESS, 1, false, 0},
+ {true, PPE_SCH_INGRESS, 3, false, 0},
+ {true, PPE_SCH_EGRESS, 3, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 7, false, 0},
+ {true, PPE_SCH_EGRESS, 7, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 1, false, 0},
+ {true, PPE_SCH_EGRESS, 1, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 4, false, 0},
+ {true, PPE_SCH_EGRESS, 4, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 1, false, 0},
+ {true, PPE_SCH_EGRESS, 1, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 2, false, 0},
+ {true, PPE_SCH_EGRESS, 2, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 7, false, 0},
+ {true, PPE_SCH_EGRESS, 7, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 1, false, 0},
+ {true, PPE_SCH_EGRESS, 1, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 3, false, 0},
+ {true, PPE_SCH_EGRESS, 3, false, 0},
+ {true, PPE_SCH_INGRESS, 1, false, 0},
+ {true, PPE_SCH_EGRESS, 1, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 4, false, 0},
+ {true, PPE_SCH_EGRESS, 4, false, 0},
+ {true, PPE_SCH_INGRESS, 7, false, 0},
+ {true, PPE_SCH_EGRESS, 7, false, 0},
+};
+
+/* PPE scheduler configuration for QM includes multiple entries. Each entry
+ * contains ports to be dispatched for enqueueing and dequeueing. The backup
+ * port for dequeueing is supported to be used when the primary port for
+ * dequeueing is down.
+ */
+static const struct ppe_scheduler_qm_config ipq9574_ppe_sch_qm_config[] = {
+ {0x98, 6, 0, true, 1},
+ {0x94, 5, 6, true, 3},
+ {0x86, 0, 5, true, 4},
+ {0x8C, 1, 6, true, 0},
+ {0x1C, 7, 5, true, 1},
+ {0x98, 2, 6, true, 0},
+ {0x1C, 5, 7, true, 1},
+ {0x34, 3, 6, true, 0},
+ {0x8C, 4, 5, true, 1},
+ {0x98, 2, 6, true, 0},
+ {0x8C, 5, 4, true, 1},
+ {0xA8, 0, 6, true, 2},
+ {0x98, 5, 1, true, 0},
+ {0x98, 6, 5, true, 2},
+ {0x89, 1, 6, true, 4},
+ {0xA4, 3, 0, true, 1},
+ {0x8C, 5, 6, true, 4},
+ {0xA8, 0, 2, true, 1},
+ {0x98, 6, 5, true, 0},
+ {0xC4, 4, 3, true, 1},
+ {0x94, 6, 5, true, 0},
+ {0x1C, 7, 6, true, 1},
+ {0x98, 2, 5, true, 0},
+ {0x1C, 6, 7, true, 1},
+ {0x1C, 5, 6, true, 0},
+ {0x94, 3, 5, true, 1},
+ {0x8C, 4, 6, true, 0},
+ {0x94, 1, 5, true, 3},
+ {0x94, 6, 1, true, 0},
+ {0xD0, 3, 5, true, 2},
+ {0x98, 6, 0, true, 1},
+ {0x94, 5, 6, true, 3},
+ {0x94, 1, 5, true, 0},
+ {0x98, 2, 6, true, 1},
+ {0x8C, 4, 5, true, 0},
+ {0x1C, 7, 6, true, 1},
+ {0x8C, 0, 5, true, 4},
+ {0x89, 1, 6, true, 2},
+ {0x98, 5, 0, true, 1},
+ {0x94, 6, 5, true, 3},
+ {0x92, 0, 6, true, 2},
+ {0x98, 1, 5, true, 0},
+ {0x98, 6, 2, true, 1},
+ {0xD0, 0, 5, true, 3},
+ {0x94, 6, 0, true, 1},
+ {0x8C, 5, 6, true, 4},
+ {0x8C, 1, 5, true, 0},
+ {0x1C, 6, 7, true, 1},
+ {0x1C, 5, 6, true, 0},
+ {0xB0, 2, 3, true, 1},
+ {0xC4, 4, 5, true, 0},
+ {0x8C, 6, 4, true, 1},
+ {0xA4, 3, 6, true, 0},
+ {0x1C, 5, 7, true, 1},
+ {0x4C, 0, 5, true, 4},
+ {0x8C, 6, 0, true, 1},
+ {0x34, 7, 6, true, 3},
+ {0x94, 5, 0, true, 1},
+ {0x98, 6, 5, true, 2},
+};
+
+static const struct ppe_scheduler_port_config ppe_port_sch_config[] = {
+ {
+ .port = 0,
+ .flow_level = true,
+ .node_id = 0,
+ .loop_num = 1,
+ .pri_max = 1,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 0,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 8,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 16,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 24,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 32,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 40,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 48,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 56,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 256,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 264,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 1,
+ .flow_level = true,
+ .node_id = 36,
+ .loop_num = 2,
+ .pri_max = 0,
+ .flow_id = 1,
+ .drr_node_id = 8,
+ },
+ {
+ .port = 1,
+ .flow_level = false,
+ .node_id = 144,
+ .loop_num = 16,
+ .pri_max = 8,
+ .flow_id = 36,
+ .drr_node_id = 48,
+ },
+ {
+ .port = 1,
+ .flow_level = false,
+ .node_id = 272,
+ .loop_num = 4,
+ .pri_max = 4,
+ .flow_id = 36,
+ .drr_node_id = 48,
+ },
+ {
+ .port = 2,
+ .flow_level = true,
+ .node_id = 40,
+ .loop_num = 2,
+ .pri_max = 0,
+ .flow_id = 2,
+ .drr_node_id = 12,
+ },
+ {
+ .port = 2,
+ .flow_level = false,
+ .node_id = 160,
+ .loop_num = 16,
+ .pri_max = 8,
+ .flow_id = 40,
+ .drr_node_id = 64,
+ },
+ {
+ .port = 2,
+ .flow_level = false,
+ .node_id = 276,
+ .loop_num = 4,
+ .pri_max = 4,
+ .flow_id = 40,
+ .drr_node_id = 64,
+ },
+ {
+ .port = 3,
+ .flow_level = true,
+ .node_id = 44,
+ .loop_num = 2,
+ .pri_max = 0,
+ .flow_id = 3,
+ .drr_node_id = 16,
+ },
+ {
+ .port = 3,
+ .flow_level = false,
+ .node_id = 176,
+ .loop_num = 16,
+ .pri_max = 8,
+ .flow_id = 44,
+ .drr_node_id = 80,
+ },
+ {
+ .port = 3,
+ .flow_level = false,
+ .node_id = 280,
+ .loop_num = 4,
+ .pri_max = 4,
+ .flow_id = 44,
+ .drr_node_id = 80,
+ },
+ {
+ .port = 4,
+ .flow_level = true,
+ .node_id = 48,
+ .loop_num = 2,
+ .pri_max = 0,
+ .flow_id = 4,
+ .drr_node_id = 20,
+ },
+ {
+ .port = 4,
+ .flow_level = false,
+ .node_id = 192,
+ .loop_num = 16,
+ .pri_max = 8,
+ .flow_id = 48,
+ .drr_node_id = 96,
+ },
+ {
+ .port = 4,
+ .flow_level = false,
+ .node_id = 284,
+ .loop_num = 4,
+ .pri_max = 4,
+ .flow_id = 48,
+ .drr_node_id = 96,
+ },
+ {
+ .port = 5,
+ .flow_level = true,
+ .node_id = 52,
+ .loop_num = 2,
+ .pri_max = 0,
+ .flow_id = 5,
+ .drr_node_id = 24,
+ },
+ {
+ .port = 5,
+ .flow_level = false,
+ .node_id = 208,
+ .loop_num = 16,
+ .pri_max = 8,
+ .flow_id = 52,
+ .drr_node_id = 112,
+ },
+ {
+ .port = 5,
+ .flow_level = false,
+ .node_id = 288,
+ .loop_num = 4,
+ .pri_max = 4,
+ .flow_id = 52,
+ .drr_node_id = 112,
+ },
+ {
+ .port = 6,
+ .flow_level = true,
+ .node_id = 56,
+ .loop_num = 2,
+ .pri_max = 0,
+ .flow_id = 6,
+ .drr_node_id = 28,
+ },
+ {
+ .port = 6,
+ .flow_level = false,
+ .node_id = 224,
+ .loop_num = 16,
+ .pri_max = 8,
+ .flow_id = 56,
+ .drr_node_id = 128,
+ },
+ {
+ .port = 6,
+ .flow_level = false,
+ .node_id = 292,
+ .loop_num = 4,
+ .pri_max = 4,
+ .flow_id = 56,
+ .drr_node_id = 128,
+ },
+ {
+ .port = 7,
+ .flow_level = true,
+ .node_id = 60,
+ .loop_num = 2,
+ .pri_max = 0,
+ .flow_id = 7,
+ .drr_node_id = 32,
+ },
+ {
+ .port = 7,
+ .flow_level = false,
+ .node_id = 240,
+ .loop_num = 16,
+ .pri_max = 8,
+ .flow_id = 60,
+ .drr_node_id = 144,
+ },
+ {
+ .port = 7,
+ .flow_level = false,
+ .node_id = 296,
+ .loop_num = 4,
+ .pri_max = 4,
+ .flow_id = 60,
+ .drr_node_id = 144,
+ },
+};
+
+/* The scheduler resource is applied to each PPE port, The resource
+ * includes the unicast & multicast queues, flow nodes and DRR nodes.
+ */
+static const struct ppe_port_schedule_resource ppe_scheduler_res[] = {
+ { .ucastq_start = 0,
+ .ucastq_end = 63,
+ .mcastq_start = 256,
+ .mcastq_end = 271,
+ .flow_id_start = 0,
+ .flow_id_end = 0,
+ .l0node_start = 0,
+ .l0node_end = 7,
+ .l1node_start = 0,
+ .l1node_end = 0,
+ },
+ { .ucastq_start = 144,
+ .ucastq_end = 159,
+ .mcastq_start = 272,
+ .mcastq_end = 275,
+ .flow_id_start = 36,
+ .flow_id_end = 39,
+ .l0node_start = 48,
+ .l0node_end = 63,
+ .l1node_start = 8,
+ .l1node_end = 11,
+ },
+ { .ucastq_start = 160,
+ .ucastq_end = 175,
+ .mcastq_start = 276,
+ .mcastq_end = 279,
+ .flow_id_start = 40,
+ .flow_id_end = 43,
+ .l0node_start = 64,
+ .l0node_end = 79,
+ .l1node_start = 12,
+ .l1node_end = 15,
+ },
+ { .ucastq_start = 176,
+ .ucastq_end = 191,
+ .mcastq_start = 280,
+ .mcastq_end = 283,
+ .flow_id_start = 44,
+ .flow_id_end = 47,
+ .l0node_start = 80,
+ .l0node_end = 95,
+ .l1node_start = 16,
+ .l1node_end = 19,
+ },
+ { .ucastq_start = 192,
+ .ucastq_end = 207,
+ .mcastq_start = 284,
+ .mcastq_end = 287,
+ .flow_id_start = 48,
+ .flow_id_end = 51,
+ .l0node_start = 96,
+ .l0node_end = 111,
+ .l1node_start = 20,
+ .l1node_end = 23,
+ },
+ { .ucastq_start = 208,
+ .ucastq_end = 223,
+ .mcastq_start = 288,
+ .mcastq_end = 291,
+ .flow_id_start = 52,
+ .flow_id_end = 55,
+ .l0node_start = 112,
+ .l0node_end = 127,
+ .l1node_start = 24,
+ .l1node_end = 27,
+ },
+ { .ucastq_start = 224,
+ .ucastq_end = 239,
+ .mcastq_start = 292,
+ .mcastq_end = 295,
+ .flow_id_start = 56,
+ .flow_id_end = 59,
+ .l0node_start = 128,
+ .l0node_end = 143,
+ .l1node_start = 28,
+ .l1node_end = 31,
+ },
+ { .ucastq_start = 240,
+ .ucastq_end = 255,
+ .mcastq_start = 296,
+ .mcastq_end = 299,
+ .flow_id_start = 60,
+ .flow_id_end = 63,
+ .l0node_start = 144,
+ .l0node_end = 159,
+ .l1node_start = 32,
+ .l1node_end = 35,
+ },
+ { .ucastq_start = 64,
+ .ucastq_end = 143,
+ .mcastq_start = 0,
+ .mcastq_end = 0,
+ .flow_id_start = 1,
+ .flow_id_end = 35,
+ .l0node_start = 8,
+ .l0node_end = 47,
+ .l1node_start = 1,
+ .l1node_end = 7,
+ },
+};
+
+/* Set the PPE queue level scheduler configuration. */
+static int ppe_scheduler_l0_queue_map_set(struct ppe_device *ppe_dev,
+ int node_id, int port,
+ struct ppe_scheduler_cfg scheduler_cfg)
+{
+ u32 val, reg;
+ int ret;
+
+ reg = PPE_L0_FLOW_MAP_TBL_ADDR + node_id * PPE_L0_FLOW_MAP_TBL_INC;
+ val = FIELD_PREP(PPE_L0_FLOW_MAP_TBL_FLOW_ID, scheduler_cfg.flow_id);
+ val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_C_PRI, scheduler_cfg.pri);
+ val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_E_PRI, scheduler_cfg.pri);
+ val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_C_NODE_WT, scheduler_cfg.drr_node_wt);
+ val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_E_NODE_WT, scheduler_cfg.drr_node_wt);
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ reg = PPE_L0_C_FLOW_CFG_TBL_ADDR +
+ (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
+ PPE_L0_C_FLOW_CFG_TBL_INC;
+ val = FIELD_PREP(PPE_L0_C_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
+ val |= FIELD_PREP(PPE_L0_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.unit_is_packet);
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ reg = PPE_L0_E_FLOW_CFG_TBL_ADDR +
+ (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
+ PPE_L0_E_FLOW_CFG_TBL_INC;
+ val = FIELD_PREP(PPE_L0_E_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
+ val |= FIELD_PREP(PPE_L0_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.unit_is_packet);
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ reg = PPE_L0_FLOW_PORT_MAP_TBL_ADDR + node_id * PPE_L0_FLOW_PORT_MAP_TBL_INC;
+ val = FIELD_PREP(PPE_L0_FLOW_PORT_MAP_TBL_PORT_NUM, port);
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ reg = PPE_L0_COMP_CFG_TBL_ADDR + node_id * PPE_L0_COMP_CFG_TBL_INC;
+ val = FIELD_PREP(PPE_L0_COMP_CFG_TBL_NODE_METER_LEN, scheduler_cfg.frame_mode);
+
+ return regmap_update_bits(ppe_dev->regmap, reg,
+ PPE_L0_COMP_CFG_TBL_NODE_METER_LEN,
+ val);
+}
+
+/* Set the PPE flow level scheduler configuration. */
+static int ppe_scheduler_l1_queue_map_set(struct ppe_device *ppe_dev,
+ int node_id, int port,
+ struct ppe_scheduler_cfg scheduler_cfg)
+{
+ u32 val, reg;
+ int ret;
+
+ val = FIELD_PREP(PPE_L1_FLOW_MAP_TBL_FLOW_ID, scheduler_cfg.flow_id);
+ val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_C_PRI, scheduler_cfg.pri);
+ val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_E_PRI, scheduler_cfg.pri);
+ val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_C_NODE_WT, scheduler_cfg.drr_node_wt);
+ val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_E_NODE_WT, scheduler_cfg.drr_node_wt);
+ reg = PPE_L1_FLOW_MAP_TBL_ADDR + node_id * PPE_L1_FLOW_MAP_TBL_INC;
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(PPE_L1_C_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
+ val |= FIELD_PREP(PPE_L1_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.unit_is_packet);
+ reg = PPE_L1_C_FLOW_CFG_TBL_ADDR +
+ (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
+ PPE_L1_C_FLOW_CFG_TBL_INC;
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(PPE_L1_E_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
+ val |= FIELD_PREP(PPE_L1_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.unit_is_packet);
+ reg = PPE_L1_E_FLOW_CFG_TBL_ADDR +
+ (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
+ PPE_L1_E_FLOW_CFG_TBL_INC;
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(PPE_L1_FLOW_PORT_MAP_TBL_PORT_NUM, port);
+ reg = PPE_L1_FLOW_PORT_MAP_TBL_ADDR + node_id * PPE_L1_FLOW_PORT_MAP_TBL_INC;
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ reg = PPE_L1_COMP_CFG_TBL_ADDR + node_id * PPE_L1_COMP_CFG_TBL_INC;
+ val = FIELD_PREP(PPE_L1_COMP_CFG_TBL_NODE_METER_LEN, scheduler_cfg.frame_mode);
+
+ return regmap_update_bits(ppe_dev->regmap, reg, PPE_L1_COMP_CFG_TBL_NODE_METER_LEN, val);
+}
+
+/**
+ * ppe_queue_scheduler_set - Configure scheduler for PPE hardware queue
+ * @ppe_dev: PPE device
+ * @node_id: PPE queue ID or flow ID
+ * @flow_level: Flow level scheduler or queue level scheduler
+ * @port: PPE port ID set scheduler configuration
+ * @scheduler_cfg: PPE scheduler configuration
+ *
+ * PPE scheduler configuration supports queue level and flow level on
+ * the PPE egress port.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
+ int node_id, bool flow_level, int port,
+ struct ppe_scheduler_cfg scheduler_cfg)
+{
+ if (flow_level)
+ return ppe_scheduler_l1_queue_map_set(ppe_dev, node_id,
+ port, scheduler_cfg);
+
+ return ppe_scheduler_l0_queue_map_set(ppe_dev, node_id,
+ port, scheduler_cfg);
+}
+
+/**
+ * ppe_queue_ucast_base_set - Set PPE unicast queue base ID and profile ID
+ * @ppe_dev: PPE device
+ * @queue_dst: PPE queue destination configuration
+ * @queue_base: PPE queue base ID
+ * @profile_id: Profile ID
+ *
+ * The PPE unicast queue base ID and profile ID are configured based on the
+ * destination port information that can be service code or CPU code or the
+ * destination port.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_queue_ucast_base_set(struct ppe_device *ppe_dev,
+ struct ppe_queue_ucast_dest queue_dst,
+ int queue_base, int profile_id)
+{
+ int index, profile_size;
+ u32 val, reg;
+
+ profile_size = queue_dst.src_profile << 8;
+ if (queue_dst.service_code_en)
+ index = PPE_QUEUE_BASE_SERVICE_CODE + profile_size +
+ queue_dst.service_code;
+ else if (queue_dst.cpu_code_en)
+ index = PPE_QUEUE_BASE_CPU_CODE + profile_size +
+ queue_dst.cpu_code;
+ else
+ index = profile_size + queue_dst.dest_port;
+
+ val = FIELD_PREP(PPE_UCAST_QUEUE_MAP_TBL_PROFILE_ID, profile_id);
+ val |= FIELD_PREP(PPE_UCAST_QUEUE_MAP_TBL_QUEUE_ID, queue_base);
+ reg = PPE_UCAST_QUEUE_MAP_TBL_ADDR + index * PPE_UCAST_QUEUE_MAP_TBL_INC;
+
+ return regmap_write(ppe_dev->regmap, reg, val);
+}
+
+/**
+ * ppe_queue_ucast_offset_pri_set - Set PPE unicast queue offset based on priority
+ * @ppe_dev: PPE device
+ * @profile_id: Profile ID
+ * @priority: PPE internal priority to be used to set queue offset
+ * @queue_offset: Queue offset used for calculating the destination queue ID
+ *
+ * The PPE unicast queue offset is configured based on the PPE
+ * internal priority.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_queue_ucast_offset_pri_set(struct ppe_device *ppe_dev,
+ int profile_id,
+ int priority,
+ int queue_offset)
+{
+ u32 val, reg;
+ int index;
+
+ index = (profile_id << 4) + priority;
+ val = FIELD_PREP(PPE_UCAST_PRIORITY_MAP_TBL_CLASS, queue_offset);
+ reg = PPE_UCAST_PRIORITY_MAP_TBL_ADDR + index * PPE_UCAST_PRIORITY_MAP_TBL_INC;
+
+ return regmap_write(ppe_dev->regmap, reg, val);
+}
+
+/**
+ * ppe_queue_ucast_offset_hash_set - Set PPE unicast queue offset based on hash
+ * @ppe_dev: PPE device
+ * @profile_id: Profile ID
+ * @rss_hash: Packet hash value to be used to set queue offset
+ * @queue_offset: Queue offset used for calculating the destination queue ID
+ *
+ * The PPE unicast queue offset is configured based on the RSS hash value.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_queue_ucast_offset_hash_set(struct ppe_device *ppe_dev,
+ int profile_id,
+ int rss_hash,
+ int queue_offset)
+{
+ u32 val, reg;
+ int index;
+
+ index = (profile_id << 8) + rss_hash;
+ val = FIELD_PREP(PPE_UCAST_HASH_MAP_TBL_HASH, queue_offset);
+ reg = PPE_UCAST_HASH_MAP_TBL_ADDR + index * PPE_UCAST_HASH_MAP_TBL_INC;
+
+ return regmap_write(ppe_dev->regmap, reg, val);
+}
+
+/**
+ * ppe_port_resource_get - Get PPE resource per port
+ * @ppe_dev: PPE device
+ * @port: PPE port
+ * @type: Resource type
+ * @res_start: Resource start ID returned
+ * @res_end: Resource end ID returned
+ *
+ * PPE resource is assigned per PPE port, which is acquired for QoS scheduler.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_port_resource_get(struct ppe_device *ppe_dev, int port,
+ enum ppe_resource_type type,
+ int *res_start, int *res_end)
+{
+ struct ppe_port_schedule_resource res;
+
+ /* The reserved resource with the maximum port ID of PPE is
+ * also allowed to be acquired.
+ */
+ if (port > ppe_dev->num_ports)
+ return -EINVAL;
+
+ res = ppe_scheduler_res[port];
+ switch (type) {
+ case PPE_RES_UCAST:
+ *res_start = res.ucastq_start;
+ *res_end = res.ucastq_end;
+ break;
+ case PPE_RES_MCAST:
+ *res_start = res.mcastq_start;
+ *res_end = res.mcastq_end;
+ break;
+ case PPE_RES_FLOW_ID:
+ *res_start = res.flow_id_start;
+ *res_end = res.flow_id_end;
+ break;
+ case PPE_RES_L0_NODE:
+ *res_start = res.l0node_start;
+ *res_end = res.l0node_end;
+ break;
+ case PPE_RES_L1_NODE:
+ *res_start = res.l1node_start;
+ *res_end = res.l1node_end;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ppe_sc_config_set - Set PPE service code configuration
+ * @ppe_dev: PPE device
+ * @sc: Service ID, 0-255 supported by PPE
+ * @cfg: Service code configuration
+ *
+ * PPE service code is used by the PPE during its packet processing stages,
+ * to perform or bypass certain selected packet operations on the packet.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_sc_config_set(struct ppe_device *ppe_dev, int sc, struct ppe_sc_cfg cfg)
+{
+ u32 val, reg, servcode_val[2] = {};
+ unsigned long bitmap_value;
+ int ret;
+
+ val = FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_PORT_ID_VALID, cfg.dest_port_valid);
+ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_PORT_ID, cfg.dest_port);
+ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_DIRECTION, cfg.is_src);
+
+ bitmap_value = bitmap_read(cfg.bitmaps.egress, 0, PPE_SC_BYPASS_EGRESS_SIZE);
+ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_BYPASS_BITMAP, bitmap_value);
+ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_RX_CNT_EN,
+ test_bit(PPE_SC_BYPASS_COUNTER_RX, cfg.bitmaps.counter));
+ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_TX_CNT_EN,
+ test_bit(PPE_SC_BYPASS_COUNTER_TX, cfg.bitmaps.counter));
+ reg = PPE_IN_L2_SERVICE_TBL_ADDR + PPE_IN_L2_SERVICE_TBL_INC * sc;
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ bitmap_value = bitmap_read(cfg.bitmaps.ingress, 0, PPE_SC_BYPASS_INGRESS_SIZE);
+ PPE_SERVICE_SET_BYPASS_BITMAP(servcode_val, bitmap_value);
+ PPE_SERVICE_SET_RX_CNT_EN(servcode_val,
+ test_bit(PPE_SC_BYPASS_COUNTER_RX_VLAN, cfg.bitmaps.counter));
+ reg = PPE_SERVICE_TBL_ADDR + PPE_SERVICE_TBL_INC * sc;
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ servcode_val, ARRAY_SIZE(servcode_val));
+ if (ret)
+ return ret;
+
+ reg = PPE_EG_SERVICE_TBL_ADDR + PPE_EG_SERVICE_TBL_INC * sc;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ servcode_val, ARRAY_SIZE(servcode_val));
+ if (ret)
+ return ret;
+
+ PPE_EG_SERVICE_SET_NEXT_SERVCODE(servcode_val, cfg.next_service_code);
+ PPE_EG_SERVICE_SET_UPDATE_ACTION(servcode_val, cfg.eip_field_update_bitmap);
+ PPE_EG_SERVICE_SET_HW_SERVICE(servcode_val, cfg.eip_hw_service);
+ PPE_EG_SERVICE_SET_OFFSET_SEL(servcode_val, cfg.eip_offset_sel);
+ PPE_EG_SERVICE_SET_TX_CNT_EN(servcode_val,
+ test_bit(PPE_SC_BYPASS_COUNTER_TX_VLAN, cfg.bitmaps.counter));
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ servcode_val, ARRAY_SIZE(servcode_val));
+ if (ret)
+ return ret;
+
+ bitmap_value = bitmap_read(cfg.bitmaps.tunnel, 0, PPE_SC_BYPASS_TUNNEL_SIZE);
+ val = FIELD_PREP(PPE_TL_SERVICE_TBL_BYPASS_BITMAP, bitmap_value);
+ reg = PPE_TL_SERVICE_TBL_ADDR + PPE_TL_SERVICE_TBL_INC * sc;
+
+ return regmap_write(ppe_dev->regmap, reg, val);
+}
+
+/**
+ * ppe_counter_enable_set - Set PPE port counter enabled
+ * @ppe_dev: PPE device
+ * @port: PPE port ID
+ *
+ * Enable PPE counters on the given port for the unicast packet, multicast
+ * packet and VLAN packet received and transmitted by PPE.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_counter_enable_set(struct ppe_device *ppe_dev, int port)
+{
+ u32 reg, mru_mtu_val[3];
+ int ret;
+
+ reg = PPE_MRU_MTU_CTRL_TBL_ADDR + PPE_MRU_MTU_CTRL_TBL_INC * port;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
+ if (ret)
+ return ret;
+
+ PPE_MRU_MTU_CTRL_SET_RX_CNT_EN(mru_mtu_val, true);
+ PPE_MRU_MTU_CTRL_SET_TX_CNT_EN(mru_mtu_val, true);
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
+ if (ret)
+ return ret;
+
+ reg = PPE_MC_MTU_CTRL_TBL_ADDR + PPE_MC_MTU_CTRL_TBL_INC * port;
+ ret = regmap_set_bits(ppe_dev->regmap, reg, PPE_MC_MTU_CTRL_TBL_TX_CNT_EN);
+ if (ret)
+ return ret;
+
+ reg = PPE_PORT_EG_VLAN_TBL_ADDR + PPE_PORT_EG_VLAN_TBL_INC * port;
+
+ return regmap_set_bits(ppe_dev->regmap, reg, PPE_PORT_EG_VLAN_TBL_TX_COUNTING_EN);
+}
+
+static int ppe_rss_hash_ipv4_config(struct ppe_device *ppe_dev, int index,
+ struct ppe_rss_hash_cfg cfg)
+{
+ u32 reg, val;
+
+ switch (index) {
+ case 0:
+ val = cfg.hash_sip_mix[0];
+ break;
+ case 1:
+ val = cfg.hash_dip_mix[0];
+ break;
+ case 2:
+ val = cfg.hash_protocol_mix;
+ break;
+ case 3:
+ val = cfg.hash_dport_mix;
+ break;
+ case 4:
+ val = cfg.hash_sport_mix;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg = PPE_RSS_HASH_MIX_IPV4_ADDR + index * PPE_RSS_HASH_MIX_IPV4_INC;
+
+ return regmap_update_bits(ppe_dev->regmap, reg,
+ PPE_RSS_HASH_MIX_IPV4_VAL,
+ FIELD_PREP(PPE_RSS_HASH_MIX_IPV4_VAL, val));
+}
+
+static int ppe_rss_hash_ipv6_config(struct ppe_device *ppe_dev, int index,
+ struct ppe_rss_hash_cfg cfg)
+{
+ u32 reg, val;
+
+ switch (index) {
+ case 0 ... 3:
+ val = cfg.hash_sip_mix[index];
+ break;
+ case 4 ... 7:
+ val = cfg.hash_dip_mix[index - 4];
+ break;
+ case 8:
+ val = cfg.hash_protocol_mix;
+ break;
+ case 9:
+ val = cfg.hash_dport_mix;
+ break;
+ case 10:
+ val = cfg.hash_sport_mix;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg = PPE_RSS_HASH_MIX_ADDR + index * PPE_RSS_HASH_MIX_INC;
+
+ return regmap_update_bits(ppe_dev->regmap, reg,
+ PPE_RSS_HASH_MIX_VAL,
+ FIELD_PREP(PPE_RSS_HASH_MIX_VAL, val));
+}
+
+/**
+ * ppe_rss_hash_config_set - Configure the PPE hash settings for the packet received.
+ * @ppe_dev: PPE device.
+ * @mode: Configure RSS hash for the packet type IPv4 and IPv6.
+ * @cfg: RSS hash configuration.
+ *
+ * PPE RSS hash settings are configured for the packet type IPv4 and IPv6.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_rss_hash_config_set(struct ppe_device *ppe_dev, int mode,
+ struct ppe_rss_hash_cfg cfg)
+{
+ u32 val, reg;
+ int i, ret;
+
+ if (mode & PPE_RSS_HASH_MODE_IPV4) {
+ val = FIELD_PREP(PPE_RSS_HASH_MASK_IPV4_HASH_MASK, cfg.hash_mask);
+ val |= FIELD_PREP(PPE_RSS_HASH_MASK_IPV4_FRAGMENT, cfg.hash_fragment_mode);
+ ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_MASK_IPV4_ADDR, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(PPE_RSS_HASH_SEED_IPV4_VAL, cfg.hash_seed);
+ ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_SEED_IPV4_ADDR, val);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < PPE_RSS_HASH_MIX_IPV4_ENTRIES; i++) {
+ ret = ppe_rss_hash_ipv4_config(ppe_dev, i, cfg);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < PPE_RSS_HASH_FIN_IPV4_ENTRIES; i++) {
+ val = FIELD_PREP(PPE_RSS_HASH_FIN_IPV4_INNER, cfg.hash_fin_inner[i]);
+ val |= FIELD_PREP(PPE_RSS_HASH_FIN_IPV4_OUTER, cfg.hash_fin_outer[i]);
+ reg = PPE_RSS_HASH_FIN_IPV4_ADDR + i * PPE_RSS_HASH_FIN_IPV4_INC;
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if (mode & PPE_RSS_HASH_MODE_IPV6) {
+ val = FIELD_PREP(PPE_RSS_HASH_MASK_HASH_MASK, cfg.hash_mask);
+ val |= FIELD_PREP(PPE_RSS_HASH_MASK_FRAGMENT, cfg.hash_fragment_mode);
+ ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_MASK_ADDR, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(PPE_RSS_HASH_SEED_VAL, cfg.hash_seed);
+ ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_SEED_ADDR, val);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < PPE_RSS_HASH_MIX_ENTRIES; i++) {
+ ret = ppe_rss_hash_ipv6_config(ppe_dev, i, cfg);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < PPE_RSS_HASH_FIN_ENTRIES; i++) {
+ val = FIELD_PREP(PPE_RSS_HASH_FIN_INNER, cfg.hash_fin_inner[i]);
+ val |= FIELD_PREP(PPE_RSS_HASH_FIN_OUTER, cfg.hash_fin_outer[i]);
+ reg = PPE_RSS_HASH_FIN_ADDR + i * PPE_RSS_HASH_FIN_INC;
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ppe_ring_queue_map_set - Set the PPE queue to Ethernet DMA ring mapping
+ * @ppe_dev: PPE device
+ * @ring_id: Ethernet DMA ring ID
+ * @queue_map: Bit map of queue IDs to given Ethernet DMA ring
+ *
+ * Configure the mapping from a set of PPE queues to a given Ethernet DMA ring.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_ring_queue_map_set(struct ppe_device *ppe_dev, int ring_id, u32 *queue_map)
+{
+ u32 reg, queue_bitmap_val[PPE_RING_TO_QUEUE_BITMAP_WORD_CNT];
+
+ memcpy(queue_bitmap_val, queue_map, sizeof(queue_bitmap_val));
+ reg = PPE_RING_Q_MAP_TBL_ADDR + PPE_RING_Q_MAP_TBL_INC * ring_id;
+
+ return regmap_bulk_write(ppe_dev->regmap, reg,
+ queue_bitmap_val,
+ ARRAY_SIZE(queue_bitmap_val));
+}
+
+static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
+ const struct ppe_bm_port_config port_cfg)
+{
+ u32 reg, val, bm_fc_val[2];
+ int ret;
+
+ reg = PPE_BM_PORT_FC_CFG_TBL_ADDR + PPE_BM_PORT_FC_CFG_TBL_INC * bm_port_id;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ bm_fc_val, ARRAY_SIZE(bm_fc_val));
+ if (ret)
+ return ret;
+
+ /* Configure BM flow control related threshold. */
+ PPE_BM_PORT_FC_SET_WEIGHT(bm_fc_val, port_cfg.weight);
+ PPE_BM_PORT_FC_SET_RESUME_OFFSET(bm_fc_val, port_cfg.resume_offset);
+ PPE_BM_PORT_FC_SET_RESUME_THRESHOLD(bm_fc_val, port_cfg.resume_ceil);
+ PPE_BM_PORT_FC_SET_DYNAMIC(bm_fc_val, port_cfg.dynamic);
+ PPE_BM_PORT_FC_SET_REACT_LIMIT(bm_fc_val, port_cfg.in_fly_buf);
+ PPE_BM_PORT_FC_SET_PRE_ALLOC(bm_fc_val, port_cfg.pre_alloc);
+
+ /* Configure low/high bits of the ceiling for the BM port. */
+ val = FIELD_GET(GENMASK(2, 0), port_cfg.ceil);
+ PPE_BM_PORT_FC_SET_CEILING_LOW(bm_fc_val, val);
+ val = FIELD_GET(GENMASK(10, 3), port_cfg.ceil);
+ PPE_BM_PORT_FC_SET_CEILING_HIGH(bm_fc_val, val);
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ bm_fc_val, ARRAY_SIZE(bm_fc_val));
+ if (ret)
+ return ret;
+
+ /* Assign the default group ID 0 to the BM port. */
+ val = FIELD_PREP(PPE_BM_PORT_GROUP_ID_SHARED_GROUP_ID, 0);
+ reg = PPE_BM_PORT_GROUP_ID_ADDR + PPE_BM_PORT_GROUP_ID_INC * bm_port_id;
+ ret = regmap_update_bits(ppe_dev->regmap, reg,
+ PPE_BM_PORT_GROUP_ID_SHARED_GROUP_ID,
+ val);
+ if (ret)
+ return ret;
+
+ /* Enable BM port flow control. */
+ reg = PPE_BM_PORT_FC_MODE_ADDR + PPE_BM_PORT_FC_MODE_INC * bm_port_id;
+
+ return regmap_set_bits(ppe_dev->regmap, reg, PPE_BM_PORT_FC_MODE_EN);
+}
+
+/* Configure the buffer threshold for the port flow control function. */
+static int ppe_config_bm(struct ppe_device *ppe_dev)
+{
+ const struct ppe_bm_port_config *port_cfg;
+ unsigned int i, bm_port_id, port_cfg_cnt;
+ u32 reg, val;
+ int ret;
+
+ /* Configure the allocated buffer number only for group 0.
+ * The buffer number of group 1-3 is already cleared to 0
+ * after PPE reset during the probe of PPE driver.
+ */
+ reg = PPE_BM_SHARED_GROUP_CFG_ADDR;
+ val = FIELD_PREP(PPE_BM_SHARED_GROUP_CFG_SHARED_LIMIT,
+ ipq9574_ppe_bm_group_config);
+ ret = regmap_update_bits(ppe_dev->regmap, reg,
+ PPE_BM_SHARED_GROUP_CFG_SHARED_LIMIT,
+ val);
+ if (ret)
+ goto bm_config_fail;
+
+ /* Configure buffer thresholds for the BM ports. */
+ port_cfg = ipq9574_ppe_bm_port_config;
+ port_cfg_cnt = ARRAY_SIZE(ipq9574_ppe_bm_port_config);
+ for (i = 0; i < port_cfg_cnt; i++) {
+ for (bm_port_id = port_cfg[i].port_id_start;
+ bm_port_id <= port_cfg[i].port_id_end; bm_port_id++) {
+ ret = ppe_config_bm_threshold(ppe_dev, bm_port_id,
+ port_cfg[i]);
+ if (ret)
+ goto bm_config_fail;
+ }
+ }
+
+ return 0;
+
+bm_config_fail:
+ dev_err(ppe_dev->dev, "PPE BM config error %d\n", ret);
+ return ret;
+}
+
+/* Configure PPE hardware queue depth, which is decided by the threshold
+ * of queue.
+ */
+static int ppe_config_qm(struct ppe_device *ppe_dev)
+{
+ const struct ppe_qm_queue_config *queue_cfg;
+ int ret, i, queue_id, queue_cfg_count;
+ u32 reg, multicast_queue_cfg[5];
+ u32 unicast_queue_cfg[4];
+ u32 group_cfg[3];
+
+ /* Assign the buffer number to the group 0 by default. */
+ reg = PPE_AC_GRP_CFG_TBL_ADDR;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ group_cfg, ARRAY_SIZE(group_cfg));
+ if (ret)
+ goto qm_config_fail;
+
+ PPE_AC_GRP_SET_BUF_LIMIT(group_cfg, ipq9574_ppe_qm_group_config);
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ group_cfg, ARRAY_SIZE(group_cfg));
+ if (ret)
+ goto qm_config_fail;
+
+ queue_cfg = ipq9574_ppe_qm_queue_config;
+ queue_cfg_count = ARRAY_SIZE(ipq9574_ppe_qm_queue_config);
+ for (i = 0; i < queue_cfg_count; i++) {
+ queue_id = queue_cfg[i].queue_start;
+
+ /* Configure threshold for dropping packets separately for
+ * unicast and multicast PPE queues.
+ */
+ while (queue_id <= queue_cfg[i].queue_end) {
+ if (queue_id < PPE_AC_UNICAST_QUEUE_CFG_TBL_ENTRIES) {
+ reg = PPE_AC_UNICAST_QUEUE_CFG_TBL_ADDR +
+ PPE_AC_UNICAST_QUEUE_CFG_TBL_INC * queue_id;
+
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ unicast_queue_cfg,
+ ARRAY_SIZE(unicast_queue_cfg));
+ if (ret)
+ goto qm_config_fail;
+
+ PPE_AC_UNICAST_QUEUE_SET_EN(unicast_queue_cfg, true);
+ PPE_AC_UNICAST_QUEUE_SET_GRP_ID(unicast_queue_cfg, 0);
+ PPE_AC_UNICAST_QUEUE_SET_PRE_LIMIT(unicast_queue_cfg,
+ queue_cfg[i].prealloc_buf);
+ PPE_AC_UNICAST_QUEUE_SET_DYNAMIC(unicast_queue_cfg,
+ queue_cfg[i].dynamic);
+ PPE_AC_UNICAST_QUEUE_SET_WEIGHT(unicast_queue_cfg,
+ queue_cfg[i].weight);
+ PPE_AC_UNICAST_QUEUE_SET_THRESHOLD(unicast_queue_cfg,
+ queue_cfg[i].ceil);
+ PPE_AC_UNICAST_QUEUE_SET_GRN_RESUME(unicast_queue_cfg,
+ queue_cfg[i].resume_offset);
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ unicast_queue_cfg,
+ ARRAY_SIZE(unicast_queue_cfg));
+ if (ret)
+ goto qm_config_fail;
+ } else {
+ reg = PPE_AC_MULTICAST_QUEUE_CFG_TBL_ADDR +
+ PPE_AC_MULTICAST_QUEUE_CFG_TBL_INC * queue_id;
+
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ multicast_queue_cfg,
+ ARRAY_SIZE(multicast_queue_cfg));
+ if (ret)
+ goto qm_config_fail;
+
+ PPE_AC_MULTICAST_QUEUE_SET_EN(multicast_queue_cfg, true);
+ PPE_AC_MULTICAST_QUEUE_SET_GRN_GRP_ID(multicast_queue_cfg, 0);
+ PPE_AC_MULTICAST_QUEUE_SET_GRN_PRE_LIMIT(multicast_queue_cfg,
+ queue_cfg[i].prealloc_buf);
+ PPE_AC_MULTICAST_QUEUE_SET_GRN_THRESHOLD(multicast_queue_cfg,
+ queue_cfg[i].ceil);
+ PPE_AC_MULTICAST_QUEUE_SET_GRN_RESUME(multicast_queue_cfg,
+ queue_cfg[i].resume_offset);
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ multicast_queue_cfg,
+ ARRAY_SIZE(multicast_queue_cfg));
+ if (ret)
+ goto qm_config_fail;
+ }
+
+ /* Enable enqueue. */
+ reg = PPE_ENQ_OPR_TBL_ADDR + PPE_ENQ_OPR_TBL_INC * queue_id;
+ ret = regmap_clear_bits(ppe_dev->regmap, reg,
+ PPE_ENQ_OPR_TBL_ENQ_DISABLE);
+ if (ret)
+ goto qm_config_fail;
+
+ /* Enable dequeue. */
+ reg = PPE_DEQ_OPR_TBL_ADDR + PPE_DEQ_OPR_TBL_INC * queue_id;
+ ret = regmap_clear_bits(ppe_dev->regmap, reg,
+ PPE_DEQ_OPR_TBL_DEQ_DISABLE);
+ if (ret)
+ goto qm_config_fail;
+
+ queue_id++;
+ }
+ }
+
+ /* Enable queue counter for all PPE hardware queues. */
+ ret = regmap_set_bits(ppe_dev->regmap, PPE_EG_BRIDGE_CONFIG_ADDR,
+ PPE_EG_BRIDGE_CONFIG_QUEUE_CNT_EN);
+ if (ret)
+ goto qm_config_fail;
+
+ return 0;
+
+qm_config_fail:
+ dev_err(ppe_dev->dev, "PPE QM config error %d\n", ret);
+ return ret;
+}
+
+static int ppe_node_scheduler_config(struct ppe_device *ppe_dev,
+ const struct ppe_scheduler_port_config config)
+{
+ struct ppe_scheduler_cfg sch_cfg;
+ int ret, i;
+
+ for (i = 0; i < config.loop_num; i++) {
+ if (!config.pri_max) {
+ /* Round robin scheduler without priority. */
+ sch_cfg.flow_id = config.flow_id;
+ sch_cfg.pri = 0;
+ sch_cfg.drr_node_id = config.drr_node_id;
+ } else {
+ sch_cfg.flow_id = config.flow_id + (i / config.pri_max);
+ sch_cfg.pri = i % config.pri_max;
+ sch_cfg.drr_node_id = config.drr_node_id + i;
+ }
+
+ /* Scheduler weight, must be more than 0. */
+ sch_cfg.drr_node_wt = 1;
+ /* Byte based to be scheduled. */
+ sch_cfg.unit_is_packet = false;
+ /* Frame + CRC calculated. */
+ sch_cfg.frame_mode = PPE_SCH_WITH_FRAME_CRC;
+
+ ret = ppe_queue_scheduler_set(ppe_dev, config.node_id + i,
+ config.flow_level,
+ config.port,
+ sch_cfg);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Initialize scheduler settings for PPE buffer utilization and dispatching
+ * packet on PPE queue.
+ */
+static int ppe_config_scheduler(struct ppe_device *ppe_dev)
+{
+ const struct ppe_scheduler_port_config *port_cfg;
+ const struct ppe_scheduler_qm_config *qm_cfg;
+ const struct ppe_scheduler_bm_config *bm_cfg;
+ int ret, i, count;
+ u32 val, reg;
+
+ count = ARRAY_SIZE(ipq9574_ppe_sch_bm_config);
+ bm_cfg = ipq9574_ppe_sch_bm_config;
+
+ /* Configure the depth of BM scheduler entries. */
+ val = FIELD_PREP(PPE_BM_SCH_CTRL_SCH_DEPTH, count);
+ val |= FIELD_PREP(PPE_BM_SCH_CTRL_SCH_OFFSET, 0);
+ val |= FIELD_PREP(PPE_BM_SCH_CTRL_SCH_EN, 1);
+
+ ret = regmap_write(ppe_dev->regmap, PPE_BM_SCH_CTRL_ADDR, val);
+ if (ret)
+ goto sch_config_fail;
+
+ /* Configure each BM scheduler entry with the valid ingress port and
+ * egress port, the second port takes effect when the specified port
+ * is in the inactive state.
+ */
+ for (i = 0; i < count; i++) {
+ val = FIELD_PREP(PPE_BM_SCH_CFG_TBL_VALID, bm_cfg[i].valid);
+ val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_DIR, bm_cfg[i].dir);
+ val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_PORT_NUM, bm_cfg[i].port);
+ val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_SECOND_PORT_VALID,
+ bm_cfg[i].backup_port_valid);
+ val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_SECOND_PORT,
+ bm_cfg[i].backup_port);
+
+ reg = PPE_BM_SCH_CFG_TBL_ADDR + i * PPE_BM_SCH_CFG_TBL_INC;
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ goto sch_config_fail;
+ }
+
+ count = ARRAY_SIZE(ipq9574_ppe_sch_qm_config);
+ qm_cfg = ipq9574_ppe_sch_qm_config;
+
+ /* Configure the depth of QM scheduler entries. */
+ val = FIELD_PREP(PPE_PSCH_SCH_DEPTH_CFG_SCH_DEPTH, count);
+ ret = regmap_write(ppe_dev->regmap, PPE_PSCH_SCH_DEPTH_CFG_ADDR, val);
+ if (ret)
+ goto sch_config_fail;
+
+ /* Configure each QM scheduler entry with enqueue port and dequeue
+ * port, the second port takes effect when the specified dequeue
+ * port is in the inactive port.
+ */
+ for (i = 0; i < count; i++) {
+ val = FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_ENS_PORT_BITMAP,
+ qm_cfg[i].ensch_port_bmp);
+ val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_ENS_PORT,
+ qm_cfg[i].ensch_port);
+ val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_DES_PORT,
+ qm_cfg[i].desch_port);
+ val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT_EN,
+ qm_cfg[i].desch_backup_port_valid);
+ val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT,
+ qm_cfg[i].desch_backup_port);
+
+ reg = PPE_PSCH_SCH_CFG_TBL_ADDR + i * PPE_PSCH_SCH_CFG_TBL_INC;
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ goto sch_config_fail;
+ }
+
+ count = ARRAY_SIZE(ppe_port_sch_config);
+ port_cfg = ppe_port_sch_config;
+
+ /* Configure scheduler per PPE queue or flow. */
+ for (i = 0; i < count; i++) {
+ if (port_cfg[i].port >= ppe_dev->num_ports)
+ break;
+
+ ret = ppe_node_scheduler_config(ppe_dev, port_cfg[i]);
+ if (ret)
+ goto sch_config_fail;
+ }
+
+ return 0;
+
+sch_config_fail:
+ dev_err(ppe_dev->dev, "PPE scheduler arbitration config error %d\n", ret);
+ return ret;
+};
+
+/* Configure PPE queue destination of each PPE port. */
+static int ppe_queue_dest_init(struct ppe_device *ppe_dev)
+{
+ int ret, port_id, index, q_base, q_offset, res_start, res_end, pri_max;
+ struct ppe_queue_ucast_dest queue_dst;
+
+ for (port_id = 0; port_id < ppe_dev->num_ports; port_id++) {
+ memset(&queue_dst, 0, sizeof(queue_dst));
+
+ ret = ppe_port_resource_get(ppe_dev, port_id, PPE_RES_UCAST,
+ &res_start, &res_end);
+ if (ret)
+ return ret;
+
+ q_base = res_start;
+ queue_dst.dest_port = port_id;
+
+ /* Configure queue base ID and profile ID that is same as
+ * physical port ID.
+ */
+ ret = ppe_queue_ucast_base_set(ppe_dev, queue_dst,
+ q_base, port_id);
+ if (ret)
+ return ret;
+
+ /* Queue priority range supported by each PPE port */
+ ret = ppe_port_resource_get(ppe_dev, port_id, PPE_RES_L0_NODE,
+ &res_start, &res_end);
+ if (ret)
+ return ret;
+
+ pri_max = res_end - res_start;
+
+ /* Redirect ARP reply packet with the max priority on CPU port,
+ * which keeps the ARP reply directed to CPU (CPU code is 101)
+ * with highest priority queue of EDMA.
+ */
+ if (port_id == 0) {
+ memset(&queue_dst, 0, sizeof(queue_dst));
+
+ queue_dst.cpu_code_en = true;
+ queue_dst.cpu_code = 101;
+ ret = ppe_queue_ucast_base_set(ppe_dev, queue_dst,
+ q_base + pri_max,
+ 0);
+ if (ret)
+ return ret;
+ }
+
+ /* Initialize the queue offset of internal priority. */
+ for (index = 0; index < PPE_QUEUE_INTER_PRI_NUM; index++) {
+ q_offset = index > pri_max ? pri_max : index;
+
+ ret = ppe_queue_ucast_offset_pri_set(ppe_dev, port_id,
+ index, q_offset);
+ if (ret)
+ return ret;
+ }
+
+ /* Initialize the queue offset of RSS hash as 0 to avoid the
+ * random hardware value that will lead to the unexpected
+ * destination queue generated.
+ */
+ for (index = 0; index < PPE_QUEUE_HASH_NUM; index++) {
+ ret = ppe_queue_ucast_offset_hash_set(ppe_dev, port_id,
+ index, 0);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* Initialize the service code 1 used by CPU port. */
+static int ppe_servcode_init(struct ppe_device *ppe_dev)
+{
+ struct ppe_sc_cfg sc_cfg = {};
+
+ bitmap_zero(sc_cfg.bitmaps.counter, PPE_SC_BYPASS_COUNTER_SIZE);
+ bitmap_zero(sc_cfg.bitmaps.tunnel, PPE_SC_BYPASS_TUNNEL_SIZE);
+
+ bitmap_fill(sc_cfg.bitmaps.ingress, PPE_SC_BYPASS_INGRESS_SIZE);
+ clear_bit(PPE_SC_BYPASS_INGRESS_FAKE_MAC_HEADER, sc_cfg.bitmaps.ingress);
+ clear_bit(PPE_SC_BYPASS_INGRESS_SERVICE_CODE, sc_cfg.bitmaps.ingress);
+ clear_bit(PPE_SC_BYPASS_INGRESS_FAKE_L2_PROTO, sc_cfg.bitmaps.ingress);
+
+ bitmap_fill(sc_cfg.bitmaps.egress, PPE_SC_BYPASS_EGRESS_SIZE);
+ clear_bit(PPE_SC_BYPASS_EGRESS_ACL_POST_ROUTING_CHECK, sc_cfg.bitmaps.egress);
+
+ return ppe_sc_config_set(ppe_dev, PPE_EDMA_SC_BYPASS_ID, sc_cfg);
+}
+
+/* Initialize PPE port configurations. */
+static int ppe_port_config_init(struct ppe_device *ppe_dev)
+{
+ u32 reg, val, mru_mtu_val[3];
+ int i, ret;
+
+ /* MTU and MRU settings are not required for CPU port 0. */
+ for (i = 1; i < ppe_dev->num_ports; i++) {
+ /* Enable Ethernet port counter */
+ ret = ppe_counter_enable_set(ppe_dev, i);
+ if (ret)
+ return ret;
+
+ reg = PPE_MRU_MTU_CTRL_TBL_ADDR + PPE_MRU_MTU_CTRL_TBL_INC * i;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
+ if (ret)
+ return ret;
+
+ /* Drop the packet when the packet size is more than the MTU
+ * and redirect the packet to the CPU port when the received
+ * packet size is more than the MRU of the physical interface.
+ */
+ PPE_MRU_MTU_CTRL_SET_MRU_CMD(mru_mtu_val, PPE_ACTION_REDIRECT_TO_CPU);
+ PPE_MRU_MTU_CTRL_SET_MTU_CMD(mru_mtu_val, PPE_ACTION_DROP);
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
+ if (ret)
+ return ret;
+
+ reg = PPE_MC_MTU_CTRL_TBL_ADDR + PPE_MC_MTU_CTRL_TBL_INC * i;
+ val = FIELD_PREP(PPE_MC_MTU_CTRL_TBL_MTU_CMD, PPE_ACTION_DROP);
+ ret = regmap_update_bits(ppe_dev->regmap, reg,
+ PPE_MC_MTU_CTRL_TBL_MTU_CMD,
+ val);
+ if (ret)
+ return ret;
+ }
+
+ /* Enable CPU port counters. */
+ return ppe_counter_enable_set(ppe_dev, 0);
+}
+
+/* Initialize the PPE RSS configuration for IPv4 and IPv6 packet receive.
+ * RSS settings are to calculate the random RSS hash value generated during
+ * packet receive. This hash is then used to generate the queue offset used
+ * to determine the queue used to transmit the packet.
+ */
+static int ppe_rss_hash_init(struct ppe_device *ppe_dev)
+{
+ u16 fins[PPE_RSS_HASH_TUPLES] = { 0x205, 0x264, 0x227, 0x245, 0x201 };
+ u8 ips[PPE_RSS_HASH_IP_LENGTH] = { 0x13, 0xb, 0x13, 0xb };
+ struct ppe_rss_hash_cfg hash_cfg;
+ int i, ret;
+
+ hash_cfg.hash_seed = get_random_u32();
+ hash_cfg.hash_mask = 0xfff;
+
+ /* Use 5 tuple as RSS hash key for the first fragment of TCP, UDP
+ * and UDP-Lite packets.
+ */
+ hash_cfg.hash_fragment_mode = false;
+
+ /* The final common seed configs used to calculate the RSS has value,
+ * which is available for both IPv4 and IPv6 packet.
+ */
+ for (i = 0; i < ARRAY_SIZE(fins); i++) {
+ hash_cfg.hash_fin_inner[i] = fins[i] & 0x1f;
+ hash_cfg.hash_fin_outer[i] = fins[i] >> 5;
+ }
+
+ /* RSS seeds for IP protocol, L4 destination & source port and
+ * destination & source IP used to calculate the RSS hash value.
+ */
+ hash_cfg.hash_protocol_mix = 0x13;
+ hash_cfg.hash_dport_mix = 0xb;
+ hash_cfg.hash_sport_mix = 0x13;
+ hash_cfg.hash_dip_mix[0] = 0xb;
+ hash_cfg.hash_sip_mix[0] = 0x13;
+
+ /* Configure RSS seed configs for IPv4 packet. */
+ ret = ppe_rss_hash_config_set(ppe_dev, PPE_RSS_HASH_MODE_IPV4, hash_cfg);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(ips); i++) {
+ hash_cfg.hash_sip_mix[i] = ips[i];
+ hash_cfg.hash_dip_mix[i] = ips[i];
+ }
+
+ /* Configure RSS seed configs for IPv6 packet. */
+ return ppe_rss_hash_config_set(ppe_dev, PPE_RSS_HASH_MODE_IPV6, hash_cfg);
+}
+
+/* Initialize mapping between PPE queues assigned to CPU port 0
+ * to Ethernet DMA ring 0.
+ */
+static int ppe_queues_to_ring_init(struct ppe_device *ppe_dev)
+{
+ u32 queue_bmap[PPE_RING_TO_QUEUE_BITMAP_WORD_CNT] = {};
+ int ret, queue_id, queue_max;
+
+ ret = ppe_port_resource_get(ppe_dev, 0, PPE_RES_UCAST,
+ &queue_id, &queue_max);
+ if (ret)
+ return ret;
+
+ for (; queue_id <= queue_max; queue_id++)
+ queue_bmap[queue_id / 32] |= BIT_MASK(queue_id % 32);
+
+ return ppe_ring_queue_map_set(ppe_dev, 0, queue_bmap);
+}
+
+/* Initialize PPE bridge settings to only enable L2 frame receive and
+ * transmit between CPU port and PPE Ethernet ports.
+ */
+static int ppe_bridge_init(struct ppe_device *ppe_dev)
+{
+ u32 reg, mask, port_cfg[4], vsi_cfg[2];
+ int ret, i;
+
+ /* Configure the following settings for CPU port0:
+ * a.) Enable Bridge TX
+ * b.) Disable FDB new address learning
+ * c.) Disable station move address learning
+ */
+ mask = PPE_PORT_BRIDGE_TXMAC_EN;
+ mask |= PPE_PORT_BRIDGE_NEW_LRN_EN;
+ mask |= PPE_PORT_BRIDGE_STA_MOVE_LRN_EN;
+ ret = regmap_update_bits(ppe_dev->regmap,
+ PPE_PORT_BRIDGE_CTRL_ADDR,
+ mask,
+ PPE_PORT_BRIDGE_TXMAC_EN);
+ if (ret)
+ return ret;
+
+ for (i = 1; i < ppe_dev->num_ports; i++) {
+ /* Enable invalid VSI forwarding for all the physical ports
+ * to CPU port0, in case no VSI is assigned to the physical
+ * port.
+ */
+ reg = PPE_L2_VP_PORT_TBL_ADDR + PPE_L2_VP_PORT_TBL_INC * i;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ port_cfg, ARRAY_SIZE(port_cfg));
+
+ if (ret)
+ return ret;
+
+ PPE_L2_PORT_SET_INVALID_VSI_FWD_EN(port_cfg, true);
+ PPE_L2_PORT_SET_DST_INFO(port_cfg, 0);
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ port_cfg, ARRAY_SIZE(port_cfg));
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < PPE_VSI_TBL_ENTRIES; i++) {
+ /* Set the VSI forward membership to include only CPU port0.
+ * FDB learning and forwarding take place only after switchdev
+ * is supported later to create the VSI and join the physical
+ * ports to the VSI port member.
+ */
+ reg = PPE_VSI_TBL_ADDR + PPE_VSI_TBL_INC * i;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ vsi_cfg, ARRAY_SIZE(vsi_cfg));
+ if (ret)
+ return ret;
+
+ PPE_VSI_SET_MEMBER_PORT_BITMAP(vsi_cfg, BIT(0));
+ PPE_VSI_SET_UUC_BITMAP(vsi_cfg, BIT(0));
+ PPE_VSI_SET_UMC_BITMAP(vsi_cfg, BIT(0));
+ PPE_VSI_SET_BC_BITMAP(vsi_cfg, BIT(0));
+ PPE_VSI_SET_NEW_ADDR_LRN_EN(vsi_cfg, true);
+ PPE_VSI_SET_NEW_ADDR_FWD_CMD(vsi_cfg, PPE_ACTION_FORWARD);
+ PPE_VSI_SET_STATION_MOVE_LRN_EN(vsi_cfg, true);
+ PPE_VSI_SET_STATION_MOVE_FWD_CMD(vsi_cfg, PPE_ACTION_FORWARD);
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ vsi_cfg, ARRAY_SIZE(vsi_cfg));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int ppe_hw_config(struct ppe_device *ppe_dev)
+{
+ int ret;
+
+ ret = ppe_config_bm(ppe_dev);
+ if (ret)
+ return ret;
+
+ ret = ppe_config_qm(ppe_dev);
+ if (ret)
+ return ret;
+
+ ret = ppe_config_scheduler(ppe_dev);
+ if (ret)
+ return ret;
+
+ ret = ppe_queue_dest_init(ppe_dev);
+ if (ret)
+ return ret;
+
+ ret = ppe_servcode_init(ppe_dev);
+ if (ret)
+ return ret;
+
+ ret = ppe_port_config_init(ppe_dev);
+ if (ret)
+ return ret;
+
+ ret = ppe_rss_hash_init(ppe_dev);
+ if (ret)
+ return ret;
+
+ ret = ppe_queues_to_ring_init(ppe_dev);
+ if (ret)
+ return ret;
+
+ return ppe_bridge_init(ppe_dev);
+}
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
new file mode 100644
index 000000000000..4bb45ca40144
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
@@ -0,0 +1,317 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef __PPE_CONFIG_H__
+#define __PPE_CONFIG_H__
+
+#include <linux/types.h>
+
+#include "ppe.h"
+
+/* There are different table index ranges for configuring queue base ID of
+ * the destination port, CPU code and service code.
+ */
+#define PPE_QUEUE_BASE_DEST_PORT 0
+#define PPE_QUEUE_BASE_CPU_CODE 1024
+#define PPE_QUEUE_BASE_SERVICE_CODE 2048
+
+#define PPE_QUEUE_INTER_PRI_NUM 16
+#define PPE_QUEUE_HASH_NUM 256
+
+/* The service code is used by EDMA port to transmit packet to PPE. */
+#define PPE_EDMA_SC_BYPASS_ID 1
+
+/* The PPE RSS hash configured for IPv4 and IPv6 packet separately. */
+#define PPE_RSS_HASH_MODE_IPV4 BIT(0)
+#define PPE_RSS_HASH_MODE_IPV6 BIT(1)
+#define PPE_RSS_HASH_IP_LENGTH 4
+#define PPE_RSS_HASH_TUPLES 5
+
+/* PPE supports 300 queues, each bit presents as one queue. */
+#define PPE_RING_TO_QUEUE_BITMAP_WORD_CNT 10
+
+/**
+ * enum ppe_scheduler_frame_mode - PPE scheduler frame mode.
+ * @PPE_SCH_WITH_IPG_PREAMBLE_FRAME_CRC: The scheduled frame includes IPG,
+ * preamble, Ethernet packet and CRC.
+ * @PPE_SCH_WITH_FRAME_CRC: The scheduled frame includes Ethernet frame and CRC
+ * excluding IPG and preamble.
+ * @PPE_SCH_WITH_L3_PAYLOAD: The scheduled frame includes layer 3 packet data.
+ */
+enum ppe_scheduler_frame_mode {
+ PPE_SCH_WITH_IPG_PREAMBLE_FRAME_CRC = 0,
+ PPE_SCH_WITH_FRAME_CRC = 1,
+ PPE_SCH_WITH_L3_PAYLOAD = 2,
+};
+
+/**
+ * struct ppe_scheduler_cfg - PPE scheduler configuration.
+ * @flow_id: PPE flow ID.
+ * @pri: Scheduler priority.
+ * @drr_node_id: Node ID for scheduled traffic.
+ * @drr_node_wt: Weight for scheduled traffic.
+ * @unit_is_packet: Packet based or byte based unit for scheduled traffic.
+ * @frame_mode: Packet mode to be scheduled.
+ *
+ * PPE scheduler supports commit rate and exceed rate configurations.
+ */
+struct ppe_scheduler_cfg {
+ int flow_id;
+ int pri;
+ int drr_node_id;
+ int drr_node_wt;
+ bool unit_is_packet;
+ enum ppe_scheduler_frame_mode frame_mode;
+};
+
+/**
+ * enum ppe_resource_type - PPE resource type.
+ * @PPE_RES_UCAST: Unicast queue resource.
+ * @PPE_RES_MCAST: Multicast queue resource.
+ * @PPE_RES_L0_NODE: Level 0 for queue based node resource.
+ * @PPE_RES_L1_NODE: Level 1 for flow based node resource.
+ * @PPE_RES_FLOW_ID: Flow based node resource.
+ */
+enum ppe_resource_type {
+ PPE_RES_UCAST,
+ PPE_RES_MCAST,
+ PPE_RES_L0_NODE,
+ PPE_RES_L1_NODE,
+ PPE_RES_FLOW_ID,
+};
+
+/**
+ * struct ppe_queue_ucast_dest - PPE unicast queue destination.
+ * @src_profile: Source profile.
+ * @service_code_en: Enable service code to map the queue base ID.
+ * @service_code: Service code.
+ * @cpu_code_en: Enable CPU code to map the queue base ID.
+ * @cpu_code: CPU code.
+ * @dest_port: destination port.
+ *
+ * PPE egress queue ID is decided by the service code if enabled, otherwise
+ * by the CPU code if enabled, or by destination port if both service code
+ * and CPU code are disabled.
+ */
+struct ppe_queue_ucast_dest {
+ int src_profile;
+ bool service_code_en;
+ int service_code;
+ bool cpu_code_en;
+ int cpu_code;
+ int dest_port;
+};
+
+/* Hardware bitmaps for bypassing features of the ingress packet. */
+enum ppe_sc_ingress_type {
+ PPE_SC_BYPASS_INGRESS_VLAN_TAG_FMT_CHECK = 0,
+ PPE_SC_BYPASS_INGRESS_VLAN_MEMBER_CHECK = 1,
+ PPE_SC_BYPASS_INGRESS_VLAN_TRANSLATE = 2,
+ PPE_SC_BYPASS_INGRESS_MY_MAC_CHECK = 3,
+ PPE_SC_BYPASS_INGRESS_DIP_LOOKUP = 4,
+ PPE_SC_BYPASS_INGRESS_FLOW_LOOKUP = 5,
+ PPE_SC_BYPASS_INGRESS_FLOW_ACTION = 6,
+ PPE_SC_BYPASS_INGRESS_ACL = 7,
+ PPE_SC_BYPASS_INGRESS_FAKE_MAC_HEADER = 8,
+ PPE_SC_BYPASS_INGRESS_SERVICE_CODE = 9,
+ PPE_SC_BYPASS_INGRESS_WRONG_PKT_FMT_L2 = 10,
+ PPE_SC_BYPASS_INGRESS_WRONG_PKT_FMT_L3_IPV4 = 11,
+ PPE_SC_BYPASS_INGRESS_WRONG_PKT_FMT_L3_IPV6 = 12,
+ PPE_SC_BYPASS_INGRESS_WRONG_PKT_FMT_L4 = 13,
+ PPE_SC_BYPASS_INGRESS_FLOW_SERVICE_CODE = 14,
+ PPE_SC_BYPASS_INGRESS_ACL_SERVICE_CODE = 15,
+ PPE_SC_BYPASS_INGRESS_FAKE_L2_PROTO = 16,
+ PPE_SC_BYPASS_INGRESS_PPPOE_TERMINATION = 17,
+ PPE_SC_BYPASS_INGRESS_DEFAULT_VLAN = 18,
+ PPE_SC_BYPASS_INGRESS_DEFAULT_PCP = 19,
+ PPE_SC_BYPASS_INGRESS_VSI_ASSIGN = 20,
+ /* Values 21-23 are not specified by hardware. */
+ PPE_SC_BYPASS_INGRESS_VLAN_ASSIGN_FAIL = 24,
+ PPE_SC_BYPASS_INGRESS_SOURCE_GUARD = 25,
+ PPE_SC_BYPASS_INGRESS_MRU_MTU_CHECK = 26,
+ PPE_SC_BYPASS_INGRESS_FLOW_SRC_CHECK = 27,
+ PPE_SC_BYPASS_INGRESS_FLOW_QOS = 28,
+ /* This must be last as it determines the size of the BITMAP. */
+ PPE_SC_BYPASS_INGRESS_SIZE,
+};
+
+/* Hardware bitmaps for bypassing features of the egress packet. */
+enum ppe_sc_egress_type {
+ PPE_SC_BYPASS_EGRESS_VLAN_MEMBER_CHECK = 0,
+ PPE_SC_BYPASS_EGRESS_VLAN_TRANSLATE = 1,
+ PPE_SC_BYPASS_EGRESS_VLAN_TAG_FMT_CTRL = 2,
+ PPE_SC_BYPASS_EGRESS_FDB_LEARN = 3,
+ PPE_SC_BYPASS_EGRESS_FDB_REFRESH = 4,
+ PPE_SC_BYPASS_EGRESS_L2_SOURCE_SECURITY = 5,
+ PPE_SC_BYPASS_EGRESS_MANAGEMENT_FWD = 6,
+ PPE_SC_BYPASS_EGRESS_BRIDGING_FWD = 7,
+ PPE_SC_BYPASS_EGRESS_IN_STP_FLTR = 8,
+ PPE_SC_BYPASS_EGRESS_EG_STP_FLTR = 9,
+ PPE_SC_BYPASS_EGRESS_SOURCE_FLTR = 10,
+ PPE_SC_BYPASS_EGRESS_POLICER = 11,
+ PPE_SC_BYPASS_EGRESS_L2_PKT_EDIT = 12,
+ PPE_SC_BYPASS_EGRESS_L3_PKT_EDIT = 13,
+ PPE_SC_BYPASS_EGRESS_ACL_POST_ROUTING_CHECK = 14,
+ PPE_SC_BYPASS_EGRESS_PORT_ISOLATION = 15,
+ PPE_SC_BYPASS_EGRESS_PRE_ACL_QOS = 16,
+ PPE_SC_BYPASS_EGRESS_POST_ACL_QOS = 17,
+ PPE_SC_BYPASS_EGRESS_DSCP_QOS = 18,
+ PPE_SC_BYPASS_EGRESS_PCP_QOS = 19,
+ PPE_SC_BYPASS_EGRESS_PREHEADER_QOS = 20,
+ PPE_SC_BYPASS_EGRESS_FAKE_MAC_DROP = 21,
+ PPE_SC_BYPASS_EGRESS_TUNL_CONTEXT = 22,
+ PPE_SC_BYPASS_EGRESS_FLOW_POLICER = 23,
+ /* This must be last as it determines the size of the BITMAP. */
+ PPE_SC_BYPASS_EGRESS_SIZE,
+};
+
+/* Hardware bitmaps for bypassing counter of packet. */
+enum ppe_sc_counter_type {
+ PPE_SC_BYPASS_COUNTER_RX_VLAN = 0,
+ PPE_SC_BYPASS_COUNTER_RX = 1,
+ PPE_SC_BYPASS_COUNTER_TX_VLAN = 2,
+ PPE_SC_BYPASS_COUNTER_TX = 3,
+ /* This must be last as it determines the size of the BITMAP. */
+ PPE_SC_BYPASS_COUNTER_SIZE,
+};
+
+/* Hardware bitmaps for bypassing features of tunnel packet. */
+enum ppe_sc_tunnel_type {
+ PPE_SC_BYPASS_TUNNEL_SERVICE_CODE = 0,
+ PPE_SC_BYPASS_TUNNEL_TUNNEL_HANDLE = 1,
+ PPE_SC_BYPASS_TUNNEL_L3_IF_CHECK = 2,
+ PPE_SC_BYPASS_TUNNEL_VLAN_CHECK = 3,
+ PPE_SC_BYPASS_TUNNEL_DMAC_CHECK = 4,
+ PPE_SC_BYPASS_TUNNEL_UDP_CSUM_0_CHECK = 5,
+ PPE_SC_BYPASS_TUNNEL_TBL_DE_ACCE_CHECK = 6,
+ PPE_SC_BYPASS_TUNNEL_PPPOE_MC_TERM_CHECK = 7,
+ PPE_SC_BYPASS_TUNNEL_TTL_EXCEED_CHECK = 8,
+ PPE_SC_BYPASS_TUNNEL_MAP_SRC_CHECK = 9,
+ PPE_SC_BYPASS_TUNNEL_MAP_DST_CHECK = 10,
+ PPE_SC_BYPASS_TUNNEL_LPM_DST_LOOKUP = 11,
+ PPE_SC_BYPASS_TUNNEL_LPM_LOOKUP = 12,
+ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_L2 = 13,
+ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_L3_IPV4 = 14,
+ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_L3_IPV6 = 15,
+ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_L4 = 16,
+ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_TUNNEL = 17,
+ /* Values 18-19 are not specified by hardware. */
+ PPE_SC_BYPASS_TUNNEL_PRE_IPO = 20,
+ /* This must be last as it determines the size of the BITMAP. */
+ PPE_SC_BYPASS_TUNNEL_SIZE,
+};
+
+/**
+ * struct ppe_sc_bypass - PPE service bypass bitmaps
+ * @ingress: Bitmap of features that can be bypassed on the ingress packet.
+ * @egress: Bitmap of features that can be bypassed on the egress packet.
+ * @counter: Bitmap of features that can be bypassed on the counter type.
+ * @tunnel: Bitmap of features that can be bypassed on the tunnel packet.
+ */
+struct ppe_sc_bypass {
+ DECLARE_BITMAP(ingress, PPE_SC_BYPASS_INGRESS_SIZE);
+ DECLARE_BITMAP(egress, PPE_SC_BYPASS_EGRESS_SIZE);
+ DECLARE_BITMAP(counter, PPE_SC_BYPASS_COUNTER_SIZE);
+ DECLARE_BITMAP(tunnel, PPE_SC_BYPASS_TUNNEL_SIZE);
+};
+
+/**
+ * struct ppe_sc_cfg - PPE service code configuration.
+ * @dest_port_valid: Generate destination port or not.
+ * @dest_port: Destination port ID.
+ * @bitmaps: Bitmap of bypass features.
+ * @is_src: Destination port acts as source port, packet sent to CPU.
+ * @next_service_code: New service code generated.
+ * @eip_field_update_bitmap: Fields updated as actions taken for EIP.
+ * @eip_hw_service: Selected hardware functions for EIP.
+ * @eip_offset_sel: Packet offset selection, using packet's layer 4 offset
+ * or using packet's layer 3 offset for EIP.
+ *
+ * Service code is generated during the packet passing through PPE.
+ */
+struct ppe_sc_cfg {
+ bool dest_port_valid;
+ int dest_port;
+ struct ppe_sc_bypass bitmaps;
+ bool is_src;
+ int next_service_code;
+ int eip_field_update_bitmap;
+ int eip_hw_service;
+ int eip_offset_sel;
+};
+
+/**
+ * enum ppe_action_type - PPE action of the received packet.
+ * @PPE_ACTION_FORWARD: Packet forwarded per L2/L3 process.
+ * @PPE_ACTION_DROP: Packet dropped by PPE.
+ * @PPE_ACTION_COPY_TO_CPU: Packet copied to CPU port per multicast queue.
+ * @PPE_ACTION_REDIRECT_TO_CPU: Packet redirected to CPU port per unicast queue.
+ */
+enum ppe_action_type {
+ PPE_ACTION_FORWARD = 0,
+ PPE_ACTION_DROP = 1,
+ PPE_ACTION_COPY_TO_CPU = 2,
+ PPE_ACTION_REDIRECT_TO_CPU = 3,
+};
+
+/**
+ * struct ppe_rss_hash_cfg - PPE RSS hash configuration.
+ * @hash_mask: Mask of the generated hash value.
+ * @hash_fragment_mode: Hash generation mode for the first fragment of TCP,
+ * UDP and UDP-Lite packets, to use either 3 tuple or 5 tuple for RSS hash
+ * key computation.
+ * @hash_seed: Seed to generate RSS hash.
+ * @hash_sip_mix: Source IP selection.
+ * @hash_dip_mix: Destination IP selection.
+ * @hash_protocol_mix: Protocol selection.
+ * @hash_sport_mix: Source L4 port selection.
+ * @hash_dport_mix: Destination L4 port selection.
+ * @hash_fin_inner: RSS hash value first selection.
+ * @hash_fin_outer: RSS hash value second selection.
+ *
+ * PPE RSS hash value is generated for the packet based on the RSS hash
+ * configured.
+ */
+struct ppe_rss_hash_cfg {
+ u32 hash_mask;
+ bool hash_fragment_mode;
+ u32 hash_seed;
+ u8 hash_sip_mix[PPE_RSS_HASH_IP_LENGTH];
+ u8 hash_dip_mix[PPE_RSS_HASH_IP_LENGTH];
+ u8 hash_protocol_mix;
+ u8 hash_sport_mix;
+ u8 hash_dport_mix;
+ u8 hash_fin_inner[PPE_RSS_HASH_TUPLES];
+ u8 hash_fin_outer[PPE_RSS_HASH_TUPLES];
+};
+
+int ppe_hw_config(struct ppe_device *ppe_dev);
+int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
+ int node_id, bool flow_level, int port,
+ struct ppe_scheduler_cfg scheduler_cfg);
+int ppe_queue_ucast_base_set(struct ppe_device *ppe_dev,
+ struct ppe_queue_ucast_dest queue_dst,
+ int queue_base,
+ int profile_id);
+int ppe_queue_ucast_offset_pri_set(struct ppe_device *ppe_dev,
+ int profile_id,
+ int priority,
+ int queue_offset);
+int ppe_queue_ucast_offset_hash_set(struct ppe_device *ppe_dev,
+ int profile_id,
+ int rss_hash,
+ int queue_offset);
+int ppe_port_resource_get(struct ppe_device *ppe_dev, int port,
+ enum ppe_resource_type type,
+ int *res_start, int *res_end);
+int ppe_sc_config_set(struct ppe_device *ppe_dev, int sc,
+ struct ppe_sc_cfg cfg);
+int ppe_counter_enable_set(struct ppe_device *ppe_dev, int port);
+int ppe_rss_hash_config_set(struct ppe_device *ppe_dev, int mode,
+ struct ppe_rss_hash_cfg hash_cfg);
+int ppe_ring_queue_map_set(struct ppe_device *ppe_dev,
+ int ring_id,
+ u32 *queue_map);
+#endif
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
new file mode 100644
index 000000000000..fd959a76ff43
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
@@ -0,0 +1,847 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+/* PPE debugfs routines for display of PPE counters useful for debug. */
+
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/dev_printk.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/seq_file.h>
+
+#include "ppe.h"
+#include "ppe_config.h"
+#include "ppe_debugfs.h"
+#include "ppe_regs.h"
+
+#define PPE_PKT_CNT_TBL_SIZE 3
+#define PPE_DROP_PKT_CNT_TBL_SIZE 5
+
+#define PPE_W0_PKT_CNT GENMASK(31, 0)
+#define PPE_W2_DROP_PKT_CNT_LOW GENMASK(31, 8)
+#define PPE_W3_DROP_PKT_CNT_HIGH GENMASK(7, 0)
+
+#define PPE_GET_PKT_CNT(tbl_cnt) \
+ FIELD_GET(PPE_W0_PKT_CNT, *(tbl_cnt))
+#define PPE_GET_DROP_PKT_CNT_LOW(tbl_cnt) \
+ FIELD_GET(PPE_W2_DROP_PKT_CNT_LOW, *((tbl_cnt) + 0x2))
+#define PPE_GET_DROP_PKT_CNT_HIGH(tbl_cnt) \
+ FIELD_GET(PPE_W3_DROP_PKT_CNT_HIGH, *((tbl_cnt) + 0x3))
+
+/**
+ * enum ppe_cnt_size_type - PPE counter size type
+ * @PPE_PKT_CNT_SIZE_1WORD: Counter size with single register
+ * @PPE_PKT_CNT_SIZE_3WORD: Counter size with table of 3 words
+ * @PPE_PKT_CNT_SIZE_5WORD: Counter size with table of 5 words
+ *
+ * PPE takes the different register size to record the packet counters.
+ * It uses single register, or register table with 3 words or 5 words.
+ * The counter with table size 5 words also records the drop counter.
+ * There are also some other counter types occupying sizes less than 32
+ * bits, which is not covered by this enumeration type.
+ */
+enum ppe_cnt_size_type {
+ PPE_PKT_CNT_SIZE_1WORD,
+ PPE_PKT_CNT_SIZE_3WORD,
+ PPE_PKT_CNT_SIZE_5WORD,
+};
+
+/**
+ * enum ppe_cnt_type - PPE counter type.
+ * @PPE_CNT_BM: Packet counter processed by BM.
+ * @PPE_CNT_PARSE: Packet counter parsed on ingress.
+ * @PPE_CNT_PORT_RX: Packet counter on the ingress port.
+ * @PPE_CNT_VLAN_RX: VLAN packet counter received.
+ * @PPE_CNT_L2_FWD: Packet counter processed by L2 forwarding.
+ * @PPE_CNT_CPU_CODE: Packet counter marked with various CPU codes.
+ * @PPE_CNT_VLAN_TX: VLAN packet counter transmitted.
+ * @PPE_CNT_PORT_TX: Packet counter on the egress port.
+ * @PPE_CNT_QM: Packet counter processed by QM.
+ */
+enum ppe_cnt_type {
+ PPE_CNT_BM,
+ PPE_CNT_PARSE,
+ PPE_CNT_PORT_RX,
+ PPE_CNT_VLAN_RX,
+ PPE_CNT_L2_FWD,
+ PPE_CNT_CPU_CODE,
+ PPE_CNT_VLAN_TX,
+ PPE_CNT_PORT_TX,
+ PPE_CNT_QM,
+};
+
+/**
+ * struct ppe_debugfs_entry - PPE debugfs entry.
+ * @name: Debugfs file name.
+ * @counter_type: PPE packet counter type.
+ * @ppe: PPE device.
+ *
+ * The PPE debugfs entry is used to create the debugfs file and passed
+ * to debugfs_create_file() as private data.
+ */
+struct ppe_debugfs_entry {
+ const char *name;
+ enum ppe_cnt_type counter_type;
+ struct ppe_device *ppe;
+};
+
+static const struct ppe_debugfs_entry debugfs_files[] = {
+ {
+ .name = "bm",
+ .counter_type = PPE_CNT_BM,
+ },
+ {
+ .name = "parse",
+ .counter_type = PPE_CNT_PARSE,
+ },
+ {
+ .name = "port_rx",
+ .counter_type = PPE_CNT_PORT_RX,
+ },
+ {
+ .name = "vlan_rx",
+ .counter_type = PPE_CNT_VLAN_RX,
+ },
+ {
+ .name = "l2_forward",
+ .counter_type = PPE_CNT_L2_FWD,
+ },
+ {
+ .name = "cpu_code",
+ .counter_type = PPE_CNT_CPU_CODE,
+ },
+ {
+ .name = "vlan_tx",
+ .counter_type = PPE_CNT_VLAN_TX,
+ },
+ {
+ .name = "port_tx",
+ .counter_type = PPE_CNT_PORT_TX,
+ },
+ {
+ .name = "qm",
+ .counter_type = PPE_CNT_QM,
+ },
+};
+
+static int ppe_pkt_cnt_get(struct ppe_device *ppe_dev, u32 reg,
+ enum ppe_cnt_size_type cnt_type,
+ u32 *cnt, u32 *drop_cnt)
+{
+ u32 drop_pkt_cnt[PPE_DROP_PKT_CNT_TBL_SIZE];
+ u32 pkt_cnt[PPE_PKT_CNT_TBL_SIZE];
+ u32 value;
+ int ret;
+
+ switch (cnt_type) {
+ case PPE_PKT_CNT_SIZE_1WORD:
+ ret = regmap_read(ppe_dev->regmap, reg, &value);
+ if (ret)
+ return ret;
+
+ *cnt = value;
+ break;
+ case PPE_PKT_CNT_SIZE_3WORD:
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ pkt_cnt, ARRAY_SIZE(pkt_cnt));
+ if (ret)
+ return ret;
+
+ *cnt = PPE_GET_PKT_CNT(pkt_cnt);
+ break;
+ case PPE_PKT_CNT_SIZE_5WORD:
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ drop_pkt_cnt, ARRAY_SIZE(drop_pkt_cnt));
+ if (ret)
+ return ret;
+
+ *cnt = PPE_GET_PKT_CNT(drop_pkt_cnt);
+
+ /* Drop counter with low 24 bits. */
+ value = PPE_GET_DROP_PKT_CNT_LOW(drop_pkt_cnt);
+ *drop_cnt = FIELD_PREP(GENMASK(23, 0), value);
+
+ /* Drop counter with high 8 bits. */
+ value = PPE_GET_DROP_PKT_CNT_HIGH(drop_pkt_cnt);
+ *drop_cnt |= FIELD_PREP(GENMASK(31, 24), value);
+ break;
+ }
+
+ return 0;
+}
+
+static void ppe_tbl_pkt_cnt_clear(struct ppe_device *ppe_dev, u32 reg,
+ enum ppe_cnt_size_type cnt_type)
+{
+ u32 drop_pkt_cnt[PPE_DROP_PKT_CNT_TBL_SIZE] = {};
+ u32 pkt_cnt[PPE_PKT_CNT_TBL_SIZE] = {};
+
+ switch (cnt_type) {
+ case PPE_PKT_CNT_SIZE_1WORD:
+ regmap_write(ppe_dev->regmap, reg, 0);
+ break;
+ case PPE_PKT_CNT_SIZE_3WORD:
+ regmap_bulk_write(ppe_dev->regmap, reg,
+ pkt_cnt, ARRAY_SIZE(pkt_cnt));
+ break;
+ case PPE_PKT_CNT_SIZE_5WORD:
+ regmap_bulk_write(ppe_dev->regmap, reg,
+ drop_pkt_cnt, ARRAY_SIZE(drop_pkt_cnt));
+ break;
+ }
+}
+
+static int ppe_bm_counter_get(struct ppe_device *ppe_dev, struct seq_file *seq)
+{
+ u32 reg, val, pkt_cnt, pkt_cnt1;
+ int ret, i, tag;
+
+ seq_printf(seq, "%-24s", "BM SILENT_DROP:");
+ tag = 0;
+ for (i = 0; i < PPE_DROP_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_DROP_CNT_TBL_ADDR + i * PPE_DROP_CNT_TBL_INC;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u(%s=%04d)", pkt_cnt, "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ /* The number of packets dropped because hardware buffers were
+ * available only partially for the packet.
+ */
+ seq_printf(seq, "%-24s", "BM OVERFLOW_DROP:");
+ tag = 0;
+ for (i = 0; i < PPE_DROP_STAT_TBL_ENTRIES; i++) {
+ reg = PPE_DROP_STAT_TBL_ADDR + PPE_DROP_STAT_TBL_INC * i;
+
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u(%s=%04d)", pkt_cnt, "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ /* The number of currently occupied buffers, that can't be flushed. */
+ seq_printf(seq, "%-24s", "BM USED/REACT:");
+ tag = 0;
+ for (i = 0; i < PPE_BM_USED_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_BM_USED_CNT_TBL_ADDR + i * PPE_BM_USED_CNT_TBL_INC;
+ ret = regmap_read(ppe_dev->regmap, reg, &val);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ /* The number of PPE buffers used for caching the received
+ * packets before the pause frame sent.
+ */
+ pkt_cnt = FIELD_GET(PPE_BM_USED_CNT_VAL, val);
+
+ reg = PPE_BM_REACT_CNT_TBL_ADDR + i * PPE_BM_REACT_CNT_TBL_INC;
+ ret = regmap_read(ppe_dev->regmap, reg, &val);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ /* The number of PPE buffers used for caching the received
+ * packets after pause frame sent out.
+ */
+ pkt_cnt1 = FIELD_GET(PPE_BM_REACT_CNT_VAL, val);
+
+ if (pkt_cnt > 0 || pkt_cnt1 > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u(%s=%04d)", pkt_cnt, pkt_cnt1,
+ "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of packets processed by the ingress parser module of PPE. */
+static int ppe_parse_pkt_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, cnt = 0, tunnel_cnt = 0;
+ int i, ret, tag = 0;
+
+ seq_printf(seq, "%-24s", "PARSE TPRX/IPRX:");
+ for (i = 0; i < PPE_IPR_PKT_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_TPR_PKT_CNT_TBL_ADDR + i * PPE_TPR_PKT_CNT_TBL_INC;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD,
+ &tunnel_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ reg = PPE_IPR_PKT_CNT_TBL_ADDR + i * PPE_IPR_PKT_CNT_TBL_INC;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD,
+ &cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (tunnel_cnt > 0 || cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u(%s=%04d)", tunnel_cnt, cnt,
+ "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of packets received or dropped on the ingress port. */
+static int ppe_port_rx_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, pkt_cnt = 0, drop_cnt = 0;
+ int ret, i, tag;
+
+ seq_printf(seq, "%-24s", "PORT RX/RX_DROP:");
+ tag = 0;
+ for (i = 0; i < PPE_PHY_PORT_RX_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_PHY_PORT_RX_CNT_TBL_ADDR + PPE_PHY_PORT_RX_CNT_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD,
+ &pkt_cnt, &drop_cnt);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u(%s=%04d)", pkt_cnt, drop_cnt,
+ "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ seq_printf(seq, "%-24s", "VPORT RX/RX_DROP:");
+ tag = 0;
+ for (i = 0; i < PPE_PORT_RX_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_PORT_RX_CNT_TBL_ADDR + PPE_PORT_RX_CNT_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD,
+ &pkt_cnt, &drop_cnt);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u(%s=%04d)", pkt_cnt, drop_cnt,
+ "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of packets received or dropped by layer 2 processing. */
+static int ppe_l2_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, pkt_cnt = 0, drop_cnt = 0;
+ int ret, i, tag = 0;
+
+ seq_printf(seq, "%-24s", "L2 RX/RX_DROP:");
+ for (i = 0; i < PPE_PRE_L2_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_PRE_L2_CNT_TBL_ADDR + PPE_PRE_L2_CNT_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD,
+ &pkt_cnt, &drop_cnt);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u(%s=%04d)", pkt_cnt, drop_cnt,
+ "vsi", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of VLAN packets received by PPE. */
+static int ppe_vlan_rx_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, pkt_cnt = 0;
+ int ret, i, tag = 0;
+
+ seq_printf(seq, "%-24s", "VLAN RX:");
+ for (i = 0; i < PPE_VLAN_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_VLAN_CNT_TBL_ADDR + PPE_VLAN_CNT_TBL_INC * i;
+
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u(%s=%04d)", pkt_cnt, "vsi", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of packets handed to CPU by PPE. */
+static int ppe_cpu_code_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, pkt_cnt = 0;
+ int ret, i;
+
+ seq_printf(seq, "%-24s", "CPU CODE:");
+ for (i = 0; i < PPE_DROP_CPU_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_DROP_CPU_CNT_TBL_ADDR + PPE_DROP_CPU_CNT_TBL_INC * i;
+
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (!pkt_cnt)
+ continue;
+
+ /* There are 256 CPU codes saved in the first 256 entries
+ * of register table, and 128 drop codes for each PPE port
+ * (0-7), the total entries is 256 + 8 * 128.
+ */
+ if (i < 256)
+ seq_printf(seq, "%10u(cpucode:%d)", pkt_cnt, i);
+ else
+ seq_printf(seq, "%10u(port=%04d),dropcode:%d", pkt_cnt,
+ (i - 256) % 8, (i - 256) / 8);
+ seq_putc(seq, '\n');
+ seq_printf(seq, "%-24s", "");
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of packets forwarded by VLAN on the egress direction. */
+static int ppe_vlan_tx_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, pkt_cnt = 0;
+ int ret, i, tag = 0;
+
+ seq_printf(seq, "%-24s", "VLAN TX:");
+ for (i = 0; i < PPE_EG_VSI_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_EG_VSI_COUNTER_TBL_ADDR + PPE_EG_VSI_COUNTER_TBL_INC * i;
+
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u(%s=%04d)", pkt_cnt, "vsi", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of packets transmitted or dropped on the egress port. */
+static int ppe_port_tx_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, pkt_cnt = 0, drop_cnt = 0;
+ int ret, i, tag;
+
+ seq_printf(seq, "%-24s", "VPORT TX/TX_DROP:");
+ tag = 0;
+ for (i = 0; i < PPE_VPORT_TX_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_VPORT_TX_COUNTER_TBL_ADDR + PPE_VPORT_TX_COUNTER_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ reg = PPE_VPORT_TX_DROP_CNT_TBL_ADDR + PPE_VPORT_TX_DROP_CNT_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &drop_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0 || drop_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u(%s=%04d)", pkt_cnt, drop_cnt,
+ "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ seq_printf(seq, "%-24s", "PORT TX/TX_DROP:");
+ tag = 0;
+ for (i = 0; i < PPE_PORT_TX_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_PORT_TX_COUNTER_TBL_ADDR + PPE_PORT_TX_COUNTER_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ reg = PPE_PORT_TX_DROP_CNT_TBL_ADDR + PPE_PORT_TX_DROP_CNT_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &drop_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0 || drop_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u(%s=%04d)", pkt_cnt, drop_cnt,
+ "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of packets transmitted or pending by the PPE queue. */
+static int ppe_queue_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, val, pkt_cnt = 0, pend_cnt = 0, drop_cnt = 0;
+ int ret, i, tag = 0;
+
+ seq_printf(seq, "%-24s", "QUEUE TX/PEND/DROP:");
+ for (i = 0; i < PPE_QUEUE_TX_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_QUEUE_TX_COUNTER_TBL_ADDR + PPE_QUEUE_TX_COUNTER_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (i < PPE_AC_UNICAST_QUEUE_CFG_TBL_ENTRIES) {
+ reg = PPE_AC_UNICAST_QUEUE_CNT_TBL_ADDR +
+ PPE_AC_UNICAST_QUEUE_CNT_TBL_INC * i;
+ ret = regmap_read(ppe_dev->regmap, reg, &val);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ pend_cnt = FIELD_GET(PPE_AC_UNICAST_QUEUE_CNT_TBL_PEND_CNT, val);
+
+ reg = PPE_UNICAST_DROP_CNT_TBL_ADDR +
+ PPE_AC_UNICAST_QUEUE_CNT_TBL_INC *
+ (i * PPE_UNICAST_DROP_TYPES + PPE_UNICAST_DROP_FORCE_OFFSET);
+
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &drop_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+ } else {
+ int mq_offset = i - PPE_AC_UNICAST_QUEUE_CFG_TBL_ENTRIES;
+
+ reg = PPE_AC_MULTICAST_QUEUE_CNT_TBL_ADDR +
+ PPE_AC_MULTICAST_QUEUE_CNT_TBL_INC * mq_offset;
+ ret = regmap_read(ppe_dev->regmap, reg, &val);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ pend_cnt = FIELD_GET(PPE_AC_MULTICAST_QUEUE_CNT_TBL_PEND_CNT, val);
+
+ if (mq_offset < PPE_P0_MULTICAST_QUEUE_NUM) {
+ reg = PPE_CPU_PORT_MULTICAST_FORCE_DROP_CNT_TBL_ADDR(mq_offset);
+ } else {
+ mq_offset -= PPE_P0_MULTICAST_QUEUE_NUM;
+
+ reg = PPE_P1_MULTICAST_DROP_CNT_TBL_ADDR;
+ reg += (mq_offset / PPE_MULTICAST_QUEUE_NUM) *
+ PPE_MULTICAST_QUEUE_PORT_ADDR_INC;
+ reg += (mq_offset % PPE_MULTICAST_QUEUE_NUM) *
+ PPE_MULTICAST_DROP_CNT_TBL_INC *
+ PPE_MULTICAST_DROP_TYPES;
+ }
+
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &drop_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (pkt_cnt > 0 || pend_cnt > 0 || drop_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u/%u(%s=%04d)",
+ pkt_cnt, pend_cnt, drop_cnt, "queue", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* Display the various packet counters of PPE. */
+static int ppe_packet_counter_show(struct seq_file *seq, void *v)
+{
+ struct ppe_debugfs_entry *entry = seq->private;
+ struct ppe_device *ppe_dev = entry->ppe;
+ int ret;
+
+ switch (entry->counter_type) {
+ case PPE_CNT_BM:
+ ret = ppe_bm_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_PARSE:
+ ret = ppe_parse_pkt_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_PORT_RX:
+ ret = ppe_port_rx_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_VLAN_RX:
+ ret = ppe_vlan_rx_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_L2_FWD:
+ ret = ppe_l2_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_CPU_CODE:
+ ret = ppe_cpu_code_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_VLAN_TX:
+ ret = ppe_vlan_tx_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_PORT_TX:
+ ret = ppe_port_tx_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_QM:
+ ret = ppe_queue_counter_get(ppe_dev, seq);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Flush the various packet counters of PPE. */
+static ssize_t ppe_packet_counter_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct ppe_debugfs_entry *entry = file_inode(file)->i_private;
+ struct ppe_device *ppe_dev = entry->ppe;
+ u32 reg;
+ int i;
+
+ switch (entry->counter_type) {
+ case PPE_CNT_BM:
+ for (i = 0; i < PPE_DROP_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_DROP_CNT_TBL_ADDR + i * PPE_DROP_CNT_TBL_INC;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD);
+ }
+
+ for (i = 0; i < PPE_DROP_STAT_TBL_ENTRIES; i++) {
+ reg = PPE_DROP_STAT_TBL_ADDR + PPE_DROP_STAT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+ }
+
+ break;
+ case PPE_CNT_PARSE:
+ for (i = 0; i < PPE_IPR_PKT_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_IPR_PKT_CNT_TBL_ADDR + i * PPE_IPR_PKT_CNT_TBL_INC;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD);
+
+ reg = PPE_TPR_PKT_CNT_TBL_ADDR + i * PPE_TPR_PKT_CNT_TBL_INC;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD);
+ }
+
+ break;
+ case PPE_CNT_PORT_RX:
+ for (i = 0; i < PPE_PORT_RX_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_PORT_RX_CNT_TBL_ADDR + PPE_PORT_RX_CNT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD);
+ }
+
+ for (i = 0; i < PPE_PHY_PORT_RX_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_PHY_PORT_RX_CNT_TBL_ADDR + PPE_PHY_PORT_RX_CNT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD);
+ }
+
+ break;
+ case PPE_CNT_VLAN_RX:
+ for (i = 0; i < PPE_VLAN_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_VLAN_CNT_TBL_ADDR + PPE_VLAN_CNT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+ }
+
+ break;
+ case PPE_CNT_L2_FWD:
+ for (i = 0; i < PPE_PRE_L2_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_PRE_L2_CNT_TBL_ADDR + PPE_PRE_L2_CNT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD);
+ }
+
+ break;
+ case PPE_CNT_CPU_CODE:
+ for (i = 0; i < PPE_DROP_CPU_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_DROP_CPU_CNT_TBL_ADDR + PPE_DROP_CPU_CNT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+ }
+
+ break;
+ case PPE_CNT_VLAN_TX:
+ for (i = 0; i < PPE_EG_VSI_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_EG_VSI_COUNTER_TBL_ADDR + PPE_EG_VSI_COUNTER_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+ }
+
+ break;
+ case PPE_CNT_PORT_TX:
+ for (i = 0; i < PPE_PORT_TX_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_PORT_TX_DROP_CNT_TBL_ADDR + PPE_PORT_TX_DROP_CNT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+
+ reg = PPE_PORT_TX_COUNTER_TBL_ADDR + PPE_PORT_TX_COUNTER_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+ }
+
+ for (i = 0; i < PPE_VPORT_TX_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_VPORT_TX_COUNTER_TBL_ADDR + PPE_VPORT_TX_COUNTER_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+
+ reg = PPE_VPORT_TX_DROP_CNT_TBL_ADDR + PPE_VPORT_TX_DROP_CNT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+ }
+
+ break;
+ case PPE_CNT_QM:
+ for (i = 0; i < PPE_QUEUE_TX_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_QUEUE_TX_COUNTER_TBL_ADDR + PPE_QUEUE_TX_COUNTER_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ return count;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(ppe_packet_counter);
+
+void ppe_debugfs_setup(struct ppe_device *ppe_dev)
+{
+ struct ppe_debugfs_entry *entry;
+ int i;
+
+ ppe_dev->debugfs_root = debugfs_create_dir("ppe", NULL);
+ if (IS_ERR(ppe_dev->debugfs_root))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_files); i++) {
+ entry = devm_kzalloc(ppe_dev->dev, sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return;
+
+ entry->ppe = ppe_dev;
+ entry->counter_type = debugfs_files[i].counter_type;
+
+ debugfs_create_file(debugfs_files[i].name, 0444,
+ ppe_dev->debugfs_root, entry,
+ &ppe_packet_counter_fops);
+ }
+}
+
+void ppe_debugfs_teardown(struct ppe_device *ppe_dev)
+{
+ debugfs_remove_recursive(ppe_dev->debugfs_root);
+ ppe_dev->debugfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h
new file mode 100644
index 000000000000..81f49a709123
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+/* PPE debugfs counters setup. */
+
+#ifndef __PPE_DEBUGFS_H__
+#define __PPE_DEBUGFS_H__
+
+#include "ppe.h"
+
+void ppe_debugfs_setup(struct ppe_device *ppe_dev);
+void ppe_debugfs_teardown(struct ppe_device *ppe_dev);
+
+#endif
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
new file mode 100644
index 000000000000..746dfbb5a682
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
@@ -0,0 +1,591 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+/* PPE hardware register and table declarations. */
+#ifndef __PPE_REGS_H__
+#define __PPE_REGS_H__
+
+#include <linux/bitfield.h>
+
+/* PPE scheduler configurations for buffer manager block. */
+#define PPE_BM_SCH_CTRL_ADDR 0xb000
+#define PPE_BM_SCH_CTRL_INC 4
+#define PPE_BM_SCH_CTRL_SCH_DEPTH GENMASK(7, 0)
+#define PPE_BM_SCH_CTRL_SCH_OFFSET GENMASK(14, 8)
+#define PPE_BM_SCH_CTRL_SCH_EN BIT(31)
+
+/* PPE drop counters. */
+#define PPE_DROP_CNT_TBL_ADDR 0xb024
+#define PPE_DROP_CNT_TBL_ENTRIES 8
+#define PPE_DROP_CNT_TBL_INC 4
+
+/* BM port drop counters. */
+#define PPE_DROP_STAT_TBL_ADDR 0xe000
+#define PPE_DROP_STAT_TBL_ENTRIES 30
+#define PPE_DROP_STAT_TBL_INC 0x10
+
+/* Egress VLAN counters. */
+#define PPE_EG_VSI_COUNTER_TBL_ADDR 0x41000
+#define PPE_EG_VSI_COUNTER_TBL_ENTRIES 64
+#define PPE_EG_VSI_COUNTER_TBL_INC 0x10
+
+/* Port TX counters. */
+#define PPE_PORT_TX_COUNTER_TBL_ADDR 0x45000
+#define PPE_PORT_TX_COUNTER_TBL_ENTRIES 8
+#define PPE_PORT_TX_COUNTER_TBL_INC 0x10
+
+/* Virtual port TX counters. */
+#define PPE_VPORT_TX_COUNTER_TBL_ADDR 0x47000
+#define PPE_VPORT_TX_COUNTER_TBL_ENTRIES 256
+#define PPE_VPORT_TX_COUNTER_TBL_INC 0x10
+
+/* Queue counters. */
+#define PPE_QUEUE_TX_COUNTER_TBL_ADDR 0x4a000
+#define PPE_QUEUE_TX_COUNTER_TBL_ENTRIES 300
+#define PPE_QUEUE_TX_COUNTER_TBL_INC 0x10
+
+/* RSS settings are to calculate the random RSS hash value generated during
+ * packet receive to ARM cores. This hash is then used to generate the queue
+ * offset used to determine the queue used to transmit the packet to ARM cores.
+ */
+#define PPE_RSS_HASH_MASK_ADDR 0xb4318
+#define PPE_RSS_HASH_MASK_HASH_MASK GENMASK(20, 0)
+#define PPE_RSS_HASH_MASK_FRAGMENT BIT(28)
+
+#define PPE_RSS_HASH_SEED_ADDR 0xb431c
+#define PPE_RSS_HASH_SEED_VAL GENMASK(31, 0)
+
+#define PPE_RSS_HASH_MIX_ADDR 0xb4320
+#define PPE_RSS_HASH_MIX_ENTRIES 11
+#define PPE_RSS_HASH_MIX_INC 4
+#define PPE_RSS_HASH_MIX_VAL GENMASK(4, 0)
+
+#define PPE_RSS_HASH_FIN_ADDR 0xb4350
+#define PPE_RSS_HASH_FIN_ENTRIES 5
+#define PPE_RSS_HASH_FIN_INC 4
+#define PPE_RSS_HASH_FIN_INNER GENMASK(4, 0)
+#define PPE_RSS_HASH_FIN_OUTER GENMASK(9, 5)
+
+#define PPE_RSS_HASH_MASK_IPV4_ADDR 0xb4380
+#define PPE_RSS_HASH_MASK_IPV4_HASH_MASK GENMASK(20, 0)
+#define PPE_RSS_HASH_MASK_IPV4_FRAGMENT BIT(28)
+
+#define PPE_RSS_HASH_SEED_IPV4_ADDR 0xb4384
+#define PPE_RSS_HASH_SEED_IPV4_VAL GENMASK(31, 0)
+
+#define PPE_RSS_HASH_MIX_IPV4_ADDR 0xb4390
+#define PPE_RSS_HASH_MIX_IPV4_ENTRIES 5
+#define PPE_RSS_HASH_MIX_IPV4_INC 4
+#define PPE_RSS_HASH_MIX_IPV4_VAL GENMASK(4, 0)
+
+#define PPE_RSS_HASH_FIN_IPV4_ADDR 0xb43b0
+#define PPE_RSS_HASH_FIN_IPV4_ENTRIES 5
+#define PPE_RSS_HASH_FIN_IPV4_INC 4
+#define PPE_RSS_HASH_FIN_IPV4_INNER GENMASK(4, 0)
+#define PPE_RSS_HASH_FIN_IPV4_OUTER GENMASK(9, 5)
+
+#define PPE_BM_SCH_CFG_TBL_ADDR 0xc000
+#define PPE_BM_SCH_CFG_TBL_ENTRIES 128
+#define PPE_BM_SCH_CFG_TBL_INC 0x10
+#define PPE_BM_SCH_CFG_TBL_PORT_NUM GENMASK(3, 0)
+#define PPE_BM_SCH_CFG_TBL_DIR BIT(4)
+#define PPE_BM_SCH_CFG_TBL_VALID BIT(5)
+#define PPE_BM_SCH_CFG_TBL_SECOND_PORT_VALID BIT(6)
+#define PPE_BM_SCH_CFG_TBL_SECOND_PORT GENMASK(11, 8)
+
+/* PPE service code configuration for the ingress direction functions,
+ * including bypass configuration for relevant PPE switch core functions
+ * such as flow entry lookup bypass.
+ */
+#define PPE_SERVICE_TBL_ADDR 0x15000
+#define PPE_SERVICE_TBL_ENTRIES 256
+#define PPE_SERVICE_TBL_INC 0x10
+#define PPE_SERVICE_W0_BYPASS_BITMAP GENMASK(31, 0)
+#define PPE_SERVICE_W1_RX_CNT_EN BIT(0)
+
+#define PPE_SERVICE_SET_BYPASS_BITMAP(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_SERVICE_W0_BYPASS_BITMAP, tbl_cfg, value)
+#define PPE_SERVICE_SET_RX_CNT_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_SERVICE_W1_RX_CNT_EN, (tbl_cfg) + 0x1, value)
+
+/* PPE port egress VLAN configurations. */
+#define PPE_PORT_EG_VLAN_TBL_ADDR 0x20020
+#define PPE_PORT_EG_VLAN_TBL_ENTRIES 8
+#define PPE_PORT_EG_VLAN_TBL_INC 4
+#define PPE_PORT_EG_VLAN_TBL_VLAN_TYPE BIT(0)
+#define PPE_PORT_EG_VLAN_TBL_CTAG_MODE GENMASK(2, 1)
+#define PPE_PORT_EG_VLAN_TBL_STAG_MODE GENMASK(4, 3)
+#define PPE_PORT_EG_VLAN_TBL_VSI_TAG_MODE_EN BIT(5)
+#define PPE_PORT_EG_VLAN_TBL_PCP_PROP_CMD BIT(6)
+#define PPE_PORT_EG_VLAN_TBL_DEI_PROP_CMD BIT(7)
+#define PPE_PORT_EG_VLAN_TBL_TX_COUNTING_EN BIT(8)
+
+/* PPE queue counters enable/disable control. */
+#define PPE_EG_BRIDGE_CONFIG_ADDR 0x20044
+#define PPE_EG_BRIDGE_CONFIG_QUEUE_CNT_EN BIT(2)
+
+/* PPE service code configuration on the egress direction. */
+#define PPE_EG_SERVICE_TBL_ADDR 0x43000
+#define PPE_EG_SERVICE_TBL_ENTRIES 256
+#define PPE_EG_SERVICE_TBL_INC 0x10
+#define PPE_EG_SERVICE_W0_UPDATE_ACTION GENMASK(31, 0)
+#define PPE_EG_SERVICE_W1_NEXT_SERVCODE GENMASK(7, 0)
+#define PPE_EG_SERVICE_W1_HW_SERVICE GENMASK(13, 8)
+#define PPE_EG_SERVICE_W1_OFFSET_SEL BIT(14)
+#define PPE_EG_SERVICE_W1_TX_CNT_EN BIT(15)
+
+#define PPE_EG_SERVICE_SET_UPDATE_ACTION(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_EG_SERVICE_W0_UPDATE_ACTION, tbl_cfg, value)
+#define PPE_EG_SERVICE_SET_NEXT_SERVCODE(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_EG_SERVICE_W1_NEXT_SERVCODE, (tbl_cfg) + 0x1, value)
+#define PPE_EG_SERVICE_SET_HW_SERVICE(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_EG_SERVICE_W1_HW_SERVICE, (tbl_cfg) + 0x1, value)
+#define PPE_EG_SERVICE_SET_OFFSET_SEL(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_EG_SERVICE_W1_OFFSET_SEL, (tbl_cfg) + 0x1, value)
+#define PPE_EG_SERVICE_SET_TX_CNT_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_EG_SERVICE_W1_TX_CNT_EN, (tbl_cfg) + 0x1, value)
+
+/* PPE port bridge configuration */
+#define PPE_PORT_BRIDGE_CTRL_ADDR 0x60300
+#define PPE_PORT_BRIDGE_CTRL_ENTRIES 8
+#define PPE_PORT_BRIDGE_CTRL_INC 4
+#define PPE_PORT_BRIDGE_NEW_LRN_EN BIT(0)
+#define PPE_PORT_BRIDGE_STA_MOVE_LRN_EN BIT(3)
+#define PPE_PORT_BRIDGE_TXMAC_EN BIT(16)
+
+/* PPE port control configurations for the traffic to the multicast queues. */
+#define PPE_MC_MTU_CTRL_TBL_ADDR 0x60a00
+#define PPE_MC_MTU_CTRL_TBL_ENTRIES 8
+#define PPE_MC_MTU_CTRL_TBL_INC 4
+#define PPE_MC_MTU_CTRL_TBL_MTU GENMASK(13, 0)
+#define PPE_MC_MTU_CTRL_TBL_MTU_CMD GENMASK(15, 14)
+#define PPE_MC_MTU_CTRL_TBL_TX_CNT_EN BIT(16)
+
+/* PPE VSI configurations */
+#define PPE_VSI_TBL_ADDR 0x63800
+#define PPE_VSI_TBL_ENTRIES 64
+#define PPE_VSI_TBL_INC 0x10
+#define PPE_VSI_W0_MEMBER_PORT_BITMAP GENMASK(7, 0)
+#define PPE_VSI_W0_UUC_BITMAP GENMASK(15, 8)
+#define PPE_VSI_W0_UMC_BITMAP GENMASK(23, 16)
+#define PPE_VSI_W0_BC_BITMAP GENMASK(31, 24)
+#define PPE_VSI_W1_NEW_ADDR_LRN_EN BIT(0)
+#define PPE_VSI_W1_NEW_ADDR_FWD_CMD GENMASK(2, 1)
+#define PPE_VSI_W1_STATION_MOVE_LRN_EN BIT(3)
+#define PPE_VSI_W1_STATION_MOVE_FWD_CMD GENMASK(5, 4)
+
+#define PPE_VSI_SET_MEMBER_PORT_BITMAP(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W0_MEMBER_PORT_BITMAP, tbl_cfg, value)
+#define PPE_VSI_SET_UUC_BITMAP(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W0_UUC_BITMAP, tbl_cfg, value)
+#define PPE_VSI_SET_UMC_BITMAP(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W0_UMC_BITMAP, tbl_cfg, value)
+#define PPE_VSI_SET_BC_BITMAP(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W0_BC_BITMAP, tbl_cfg, value)
+#define PPE_VSI_SET_NEW_ADDR_LRN_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W1_NEW_ADDR_LRN_EN, (tbl_cfg) + 0x1, value)
+#define PPE_VSI_SET_NEW_ADDR_FWD_CMD(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W1_NEW_ADDR_FWD_CMD, (tbl_cfg) + 0x1, value)
+#define PPE_VSI_SET_STATION_MOVE_LRN_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W1_STATION_MOVE_LRN_EN, (tbl_cfg) + 0x1, value)
+#define PPE_VSI_SET_STATION_MOVE_FWD_CMD(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W1_STATION_MOVE_FWD_CMD, (tbl_cfg) + 0x1, value)
+
+/* PPE port control configurations for the traffic to the unicast queues. */
+#define PPE_MRU_MTU_CTRL_TBL_ADDR 0x65000
+#define PPE_MRU_MTU_CTRL_TBL_ENTRIES 256
+#define PPE_MRU_MTU_CTRL_TBL_INC 0x10
+#define PPE_MRU_MTU_CTRL_W0_MRU GENMASK(13, 0)
+#define PPE_MRU_MTU_CTRL_W0_MRU_CMD GENMASK(15, 14)
+#define PPE_MRU_MTU_CTRL_W0_MTU GENMASK(29, 16)
+#define PPE_MRU_MTU_CTRL_W0_MTU_CMD GENMASK(31, 30)
+#define PPE_MRU_MTU_CTRL_W1_RX_CNT_EN BIT(0)
+#define PPE_MRU_MTU_CTRL_W1_TX_CNT_EN BIT(1)
+#define PPE_MRU_MTU_CTRL_W1_SRC_PROFILE GENMASK(3, 2)
+#define PPE_MRU_MTU_CTRL_W1_INNER_PREC_LOW BIT(31)
+#define PPE_MRU_MTU_CTRL_W2_INNER_PREC_HIGH GENMASK(1, 0)
+
+#define PPE_MRU_MTU_CTRL_SET_MRU(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_MRU_MTU_CTRL_W0_MRU, tbl_cfg, value)
+#define PPE_MRU_MTU_CTRL_SET_MRU_CMD(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_MRU_MTU_CTRL_W0_MRU_CMD, tbl_cfg, value)
+#define PPE_MRU_MTU_CTRL_SET_MTU(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_MRU_MTU_CTRL_W0_MTU, tbl_cfg, value)
+#define PPE_MRU_MTU_CTRL_SET_MTU_CMD(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_MRU_MTU_CTRL_W0_MTU_CMD, tbl_cfg, value)
+#define PPE_MRU_MTU_CTRL_SET_RX_CNT_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_MRU_MTU_CTRL_W1_RX_CNT_EN, (tbl_cfg) + 0x1, value)
+#define PPE_MRU_MTU_CTRL_SET_TX_CNT_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_MRU_MTU_CTRL_W1_TX_CNT_EN, (tbl_cfg) + 0x1, value)
+
+/* PPE service code configuration for destination port and counter. */
+#define PPE_IN_L2_SERVICE_TBL_ADDR 0x66000
+#define PPE_IN_L2_SERVICE_TBL_ENTRIES 256
+#define PPE_IN_L2_SERVICE_TBL_INC 0x10
+#define PPE_IN_L2_SERVICE_TBL_DST_PORT_ID_VALID BIT(0)
+#define PPE_IN_L2_SERVICE_TBL_DST_PORT_ID GENMASK(4, 1)
+#define PPE_IN_L2_SERVICE_TBL_DST_DIRECTION BIT(5)
+#define PPE_IN_L2_SERVICE_TBL_DST_BYPASS_BITMAP GENMASK(29, 6)
+#define PPE_IN_L2_SERVICE_TBL_RX_CNT_EN BIT(30)
+#define PPE_IN_L2_SERVICE_TBL_TX_CNT_EN BIT(31)
+
+/* L2 Port configurations */
+#define PPE_L2_VP_PORT_TBL_ADDR 0x98000
+#define PPE_L2_VP_PORT_TBL_ENTRIES 256
+#define PPE_L2_VP_PORT_TBL_INC 0x10
+#define PPE_L2_VP_PORT_W0_INVALID_VSI_FWD_EN BIT(0)
+#define PPE_L2_VP_PORT_W0_DST_INFO GENMASK(9, 2)
+
+#define PPE_L2_PORT_SET_INVALID_VSI_FWD_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_L2_VP_PORT_W0_INVALID_VSI_FWD_EN, tbl_cfg, value)
+#define PPE_L2_PORT_SET_DST_INFO(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_L2_VP_PORT_W0_DST_INFO, tbl_cfg, value)
+
+/* Port RX and RX drop counters. */
+#define PPE_PORT_RX_CNT_TBL_ADDR 0x150000
+#define PPE_PORT_RX_CNT_TBL_ENTRIES 256
+#define PPE_PORT_RX_CNT_TBL_INC 0x20
+
+/* Physical port RX and RX drop counters. */
+#define PPE_PHY_PORT_RX_CNT_TBL_ADDR 0x156000
+#define PPE_PHY_PORT_RX_CNT_TBL_ENTRIES 8
+#define PPE_PHY_PORT_RX_CNT_TBL_INC 0x20
+
+/* Counters for the packet to CPU port. */
+#define PPE_DROP_CPU_CNT_TBL_ADDR 0x160000
+#define PPE_DROP_CPU_CNT_TBL_ENTRIES 1280
+#define PPE_DROP_CPU_CNT_TBL_INC 0x10
+
+/* VLAN counters. */
+#define PPE_VLAN_CNT_TBL_ADDR 0x178000
+#define PPE_VLAN_CNT_TBL_ENTRIES 64
+#define PPE_VLAN_CNT_TBL_INC 0x10
+
+/* PPE L2 counters. */
+#define PPE_PRE_L2_CNT_TBL_ADDR 0x17c000
+#define PPE_PRE_L2_CNT_TBL_ENTRIES 64
+#define PPE_PRE_L2_CNT_TBL_INC 0x20
+
+/* Port TX drop counters. */
+#define PPE_PORT_TX_DROP_CNT_TBL_ADDR 0x17d000
+#define PPE_PORT_TX_DROP_CNT_TBL_ENTRIES 8
+#define PPE_PORT_TX_DROP_CNT_TBL_INC 0x10
+
+/* Virtual port TX counters. */
+#define PPE_VPORT_TX_DROP_CNT_TBL_ADDR 0x17e000
+#define PPE_VPORT_TX_DROP_CNT_TBL_ENTRIES 256
+#define PPE_VPORT_TX_DROP_CNT_TBL_INC 0x10
+
+/* Counters for the tunnel packet. */
+#define PPE_TPR_PKT_CNT_TBL_ADDR 0x1d0080
+#define PPE_TPR_PKT_CNT_TBL_ENTRIES 8
+#define PPE_TPR_PKT_CNT_TBL_INC 4
+
+/* Counters for the all packet received. */
+#define PPE_IPR_PKT_CNT_TBL_ADDR 0x1e0080
+#define PPE_IPR_PKT_CNT_TBL_ENTRIES 8
+#define PPE_IPR_PKT_CNT_TBL_INC 4
+
+/* PPE service code configuration for the tunnel packet. */
+#define PPE_TL_SERVICE_TBL_ADDR 0x306000
+#define PPE_TL_SERVICE_TBL_ENTRIES 256
+#define PPE_TL_SERVICE_TBL_INC 4
+#define PPE_TL_SERVICE_TBL_BYPASS_BITMAP GENMASK(31, 0)
+
+/* Port scheduler global config. */
+#define PPE_PSCH_SCH_DEPTH_CFG_ADDR 0x400000
+#define PPE_PSCH_SCH_DEPTH_CFG_INC 4
+#define PPE_PSCH_SCH_DEPTH_CFG_SCH_DEPTH GENMASK(7, 0)
+
+/* PPE queue level scheduler configurations. */
+#define PPE_L0_FLOW_MAP_TBL_ADDR 0x402000
+#define PPE_L0_FLOW_MAP_TBL_ENTRIES 300
+#define PPE_L0_FLOW_MAP_TBL_INC 0x10
+#define PPE_L0_FLOW_MAP_TBL_FLOW_ID GENMASK(5, 0)
+#define PPE_L0_FLOW_MAP_TBL_C_PRI GENMASK(8, 6)
+#define PPE_L0_FLOW_MAP_TBL_E_PRI GENMASK(11, 9)
+#define PPE_L0_FLOW_MAP_TBL_C_NODE_WT GENMASK(21, 12)
+#define PPE_L0_FLOW_MAP_TBL_E_NODE_WT GENMASK(31, 22)
+
+#define PPE_L0_C_FLOW_CFG_TBL_ADDR 0x404000
+#define PPE_L0_C_FLOW_CFG_TBL_ENTRIES 512
+#define PPE_L0_C_FLOW_CFG_TBL_INC 0x10
+#define PPE_L0_C_FLOW_CFG_TBL_NODE_ID GENMASK(7, 0)
+#define PPE_L0_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT BIT(8)
+
+#define PPE_L0_E_FLOW_CFG_TBL_ADDR 0x406000
+#define PPE_L0_E_FLOW_CFG_TBL_ENTRIES 512
+#define PPE_L0_E_FLOW_CFG_TBL_INC 0x10
+#define PPE_L0_E_FLOW_CFG_TBL_NODE_ID GENMASK(7, 0)
+#define PPE_L0_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT BIT(8)
+
+#define PPE_L0_FLOW_PORT_MAP_TBL_ADDR 0x408000
+#define PPE_L0_FLOW_PORT_MAP_TBL_ENTRIES 300
+#define PPE_L0_FLOW_PORT_MAP_TBL_INC 0x10
+#define PPE_L0_FLOW_PORT_MAP_TBL_PORT_NUM GENMASK(3, 0)
+
+#define PPE_L0_COMP_CFG_TBL_ADDR 0x428000
+#define PPE_L0_COMP_CFG_TBL_ENTRIES 300
+#define PPE_L0_COMP_CFG_TBL_INC 0x10
+#define PPE_L0_COMP_CFG_TBL_SHAPER_METER_LEN GENMASK(1, 0)
+#define PPE_L0_COMP_CFG_TBL_NODE_METER_LEN GENMASK(3, 2)
+
+/* PPE queue to Ethernet DMA ring mapping table. */
+#define PPE_RING_Q_MAP_TBL_ADDR 0x42a000
+#define PPE_RING_Q_MAP_TBL_ENTRIES 24
+#define PPE_RING_Q_MAP_TBL_INC 0x40
+
+/* Table addresses for per-queue dequeue setting. */
+#define PPE_DEQ_OPR_TBL_ADDR 0x430000
+#define PPE_DEQ_OPR_TBL_ENTRIES 300
+#define PPE_DEQ_OPR_TBL_INC 0x10
+#define PPE_DEQ_OPR_TBL_DEQ_DISABLE BIT(0)
+
+/* PPE flow level scheduler configurations. */
+#define PPE_L1_FLOW_MAP_TBL_ADDR 0x440000
+#define PPE_L1_FLOW_MAP_TBL_ENTRIES 64
+#define PPE_L1_FLOW_MAP_TBL_INC 0x10
+#define PPE_L1_FLOW_MAP_TBL_FLOW_ID GENMASK(3, 0)
+#define PPE_L1_FLOW_MAP_TBL_C_PRI GENMASK(6, 4)
+#define PPE_L1_FLOW_MAP_TBL_E_PRI GENMASK(9, 7)
+#define PPE_L1_FLOW_MAP_TBL_C_NODE_WT GENMASK(19, 10)
+#define PPE_L1_FLOW_MAP_TBL_E_NODE_WT GENMASK(29, 20)
+
+#define PPE_L1_C_FLOW_CFG_TBL_ADDR 0x442000
+#define PPE_L1_C_FLOW_CFG_TBL_ENTRIES 64
+#define PPE_L1_C_FLOW_CFG_TBL_INC 0x10
+#define PPE_L1_C_FLOW_CFG_TBL_NODE_ID GENMASK(5, 0)
+#define PPE_L1_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT BIT(6)
+
+#define PPE_L1_E_FLOW_CFG_TBL_ADDR 0x444000
+#define PPE_L1_E_FLOW_CFG_TBL_ENTRIES 64
+#define PPE_L1_E_FLOW_CFG_TBL_INC 0x10
+#define PPE_L1_E_FLOW_CFG_TBL_NODE_ID GENMASK(5, 0)
+#define PPE_L1_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT BIT(6)
+
+#define PPE_L1_FLOW_PORT_MAP_TBL_ADDR 0x446000
+#define PPE_L1_FLOW_PORT_MAP_TBL_ENTRIES 64
+#define PPE_L1_FLOW_PORT_MAP_TBL_INC 0x10
+#define PPE_L1_FLOW_PORT_MAP_TBL_PORT_NUM GENMASK(3, 0)
+
+#define PPE_L1_COMP_CFG_TBL_ADDR 0x46a000
+#define PPE_L1_COMP_CFG_TBL_ENTRIES 64
+#define PPE_L1_COMP_CFG_TBL_INC 0x10
+#define PPE_L1_COMP_CFG_TBL_SHAPER_METER_LEN GENMASK(1, 0)
+#define PPE_L1_COMP_CFG_TBL_NODE_METER_LEN GENMASK(3, 2)
+
+/* PPE port scheduler configurations for egress. */
+#define PPE_PSCH_SCH_CFG_TBL_ADDR 0x47a000
+#define PPE_PSCH_SCH_CFG_TBL_ENTRIES 128
+#define PPE_PSCH_SCH_CFG_TBL_INC 0x10
+#define PPE_PSCH_SCH_CFG_TBL_DES_PORT GENMASK(3, 0)
+#define PPE_PSCH_SCH_CFG_TBL_ENS_PORT GENMASK(7, 4)
+#define PPE_PSCH_SCH_CFG_TBL_ENS_PORT_BITMAP GENMASK(15, 8)
+#define PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT_EN BIT(16)
+#define PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT GENMASK(20, 17)
+
+/* There are 15 BM ports and 4 BM groups supported by PPE.
+ * BM port (0-7) is for EDMA port 0, BM port (8-13) is for
+ * PPE physical port 1-6 and BM port 14 is for EIP port.
+ */
+#define PPE_BM_PORT_FC_MODE_ADDR 0x600100
+#define PPE_BM_PORT_FC_MODE_ENTRIES 15
+#define PPE_BM_PORT_FC_MODE_INC 0x4
+#define PPE_BM_PORT_FC_MODE_EN BIT(0)
+
+#define PPE_BM_PORT_GROUP_ID_ADDR 0x600180
+#define PPE_BM_PORT_GROUP_ID_ENTRIES 15
+#define PPE_BM_PORT_GROUP_ID_INC 0x4
+#define PPE_BM_PORT_GROUP_ID_SHARED_GROUP_ID GENMASK(1, 0)
+
+/* Counters for PPE buffers used for packets cached. */
+#define PPE_BM_USED_CNT_TBL_ADDR 0x6001c0
+#define PPE_BM_USED_CNT_TBL_ENTRIES 15
+#define PPE_BM_USED_CNT_TBL_INC 0x4
+#define PPE_BM_USED_CNT_VAL GENMASK(10, 0)
+
+/* Counters for PPE buffers used for packets received after pause frame sent. */
+#define PPE_BM_REACT_CNT_TBL_ADDR 0x600240
+#define PPE_BM_REACT_CNT_TBL_ENTRIES 15
+#define PPE_BM_REACT_CNT_TBL_INC 0x4
+#define PPE_BM_REACT_CNT_VAL GENMASK(8, 0)
+
+#define PPE_BM_SHARED_GROUP_CFG_ADDR 0x600290
+#define PPE_BM_SHARED_GROUP_CFG_ENTRIES 4
+#define PPE_BM_SHARED_GROUP_CFG_INC 0x4
+#define PPE_BM_SHARED_GROUP_CFG_SHARED_LIMIT GENMASK(10, 0)
+
+#define PPE_BM_PORT_FC_CFG_TBL_ADDR 0x601000
+#define PPE_BM_PORT_FC_CFG_TBL_ENTRIES 15
+#define PPE_BM_PORT_FC_CFG_TBL_INC 0x10
+#define PPE_BM_PORT_FC_W0_REACT_LIMIT GENMASK(8, 0)
+#define PPE_BM_PORT_FC_W0_RESUME_THRESHOLD GENMASK(17, 9)
+#define PPE_BM_PORT_FC_W0_RESUME_OFFSET GENMASK(28, 18)
+#define PPE_BM_PORT_FC_W0_CEILING_LOW GENMASK(31, 29)
+#define PPE_BM_PORT_FC_W1_CEILING_HIGH GENMASK(7, 0)
+#define PPE_BM_PORT_FC_W1_WEIGHT GENMASK(10, 8)
+#define PPE_BM_PORT_FC_W1_DYNAMIC BIT(11)
+#define PPE_BM_PORT_FC_W1_PRE_ALLOC GENMASK(22, 12)
+
+#define PPE_BM_PORT_FC_SET_REACT_LIMIT(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W0_REACT_LIMIT, tbl_cfg, value)
+#define PPE_BM_PORT_FC_SET_RESUME_THRESHOLD(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W0_RESUME_THRESHOLD, tbl_cfg, value)
+#define PPE_BM_PORT_FC_SET_RESUME_OFFSET(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W0_RESUME_OFFSET, tbl_cfg, value)
+#define PPE_BM_PORT_FC_SET_CEILING_LOW(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W0_CEILING_LOW, tbl_cfg, value)
+#define PPE_BM_PORT_FC_SET_CEILING_HIGH(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W1_CEILING_HIGH, (tbl_cfg) + 0x1, value)
+#define PPE_BM_PORT_FC_SET_WEIGHT(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W1_WEIGHT, (tbl_cfg) + 0x1, value)
+#define PPE_BM_PORT_FC_SET_DYNAMIC(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W1_DYNAMIC, (tbl_cfg) + 0x1, value)
+#define PPE_BM_PORT_FC_SET_PRE_ALLOC(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W1_PRE_ALLOC, (tbl_cfg) + 0x1, value)
+
+/* The queue base configurations based on destination port,
+ * service code or CPU code.
+ */
+#define PPE_UCAST_QUEUE_MAP_TBL_ADDR 0x810000
+#define PPE_UCAST_QUEUE_MAP_TBL_ENTRIES 3072
+#define PPE_UCAST_QUEUE_MAP_TBL_INC 0x10
+#define PPE_UCAST_QUEUE_MAP_TBL_PROFILE_ID GENMASK(3, 0)
+#define PPE_UCAST_QUEUE_MAP_TBL_QUEUE_ID GENMASK(11, 4)
+
+/* The queue offset configurations based on RSS hash value. */
+#define PPE_UCAST_HASH_MAP_TBL_ADDR 0x830000
+#define PPE_UCAST_HASH_MAP_TBL_ENTRIES 4096
+#define PPE_UCAST_HASH_MAP_TBL_INC 0x10
+#define PPE_UCAST_HASH_MAP_TBL_HASH GENMASK(7, 0)
+
+/* The queue offset configurations based on PPE internal priority. */
+#define PPE_UCAST_PRIORITY_MAP_TBL_ADDR 0x842000
+#define PPE_UCAST_PRIORITY_MAP_TBL_ENTRIES 256
+#define PPE_UCAST_PRIORITY_MAP_TBL_INC 0x10
+#define PPE_UCAST_PRIORITY_MAP_TBL_CLASS GENMASK(3, 0)
+
+/* PPE unicast queue (0-255) configurations. */
+#define PPE_AC_UNICAST_QUEUE_CFG_TBL_ADDR 0x848000
+#define PPE_AC_UNICAST_QUEUE_CFG_TBL_ENTRIES 256
+#define PPE_AC_UNICAST_QUEUE_CFG_TBL_INC 0x10
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_EN BIT(0)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_WRED_EN BIT(1)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_FC_EN BIT(2)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_CLR_AWARE BIT(3)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_GRP_ID GENMASK(5, 4)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_PRE_LIMIT GENMASK(16, 6)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_DYNAMIC BIT(17)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_WEIGHT GENMASK(20, 18)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_THRESHOLD GENMASK(31, 21)
+#define PPE_AC_UNICAST_QUEUE_CFG_W3_GRN_RESUME GENMASK(23, 13)
+
+#define PPE_AC_UNICAST_QUEUE_SET_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_UNICAST_QUEUE_CFG_W0_EN, tbl_cfg, value)
+#define PPE_AC_UNICAST_QUEUE_SET_GRP_ID(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_UNICAST_QUEUE_CFG_W0_GRP_ID, tbl_cfg, value)
+#define PPE_AC_UNICAST_QUEUE_SET_PRE_LIMIT(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_UNICAST_QUEUE_CFG_W0_PRE_LIMIT, tbl_cfg, value)
+#define PPE_AC_UNICAST_QUEUE_SET_DYNAMIC(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_UNICAST_QUEUE_CFG_W0_DYNAMIC, tbl_cfg, value)
+#define PPE_AC_UNICAST_QUEUE_SET_WEIGHT(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_UNICAST_QUEUE_CFG_W0_WEIGHT, tbl_cfg, value)
+#define PPE_AC_UNICAST_QUEUE_SET_THRESHOLD(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_UNICAST_QUEUE_CFG_W0_THRESHOLD, tbl_cfg, value)
+#define PPE_AC_UNICAST_QUEUE_SET_GRN_RESUME(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_UNICAST_QUEUE_CFG_W3_GRN_RESUME, (tbl_cfg) + 0x3, value)
+
+/* PPE multicast queue (256-299) configurations. */
+#define PPE_AC_MULTICAST_QUEUE_CFG_TBL_ADDR 0x84a000
+#define PPE_AC_MULTICAST_QUEUE_CFG_TBL_ENTRIES 44
+#define PPE_AC_MULTICAST_QUEUE_CFG_TBL_INC 0x10
+#define PPE_AC_MULTICAST_QUEUE_CFG_W0_EN BIT(0)
+#define PPE_AC_MULTICAST_QUEUE_CFG_W0_FC_EN BIT(1)
+#define PPE_AC_MULTICAST_QUEUE_CFG_W0_CLR_AWARE BIT(2)
+#define PPE_AC_MULTICAST_QUEUE_CFG_W0_GRP_ID GENMASK(4, 3)
+#define PPE_AC_MULTICAST_QUEUE_CFG_W0_PRE_LIMIT GENMASK(15, 5)
+#define PPE_AC_MULTICAST_QUEUE_CFG_W0_THRESHOLD GENMASK(26, 16)
+#define PPE_AC_MULTICAST_QUEUE_CFG_W2_RESUME GENMASK(17, 7)
+
+#define PPE_AC_MULTICAST_QUEUE_SET_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_MULTICAST_QUEUE_CFG_W0_EN, tbl_cfg, value)
+#define PPE_AC_MULTICAST_QUEUE_SET_GRN_GRP_ID(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_MULTICAST_QUEUE_CFG_W0_GRP_ID, tbl_cfg, value)
+#define PPE_AC_MULTICAST_QUEUE_SET_GRN_PRE_LIMIT(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_MULTICAST_QUEUE_CFG_W0_PRE_LIMIT, tbl_cfg, value)
+#define PPE_AC_MULTICAST_QUEUE_SET_GRN_THRESHOLD(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_MULTICAST_QUEUE_CFG_W0_THRESHOLD, tbl_cfg, value)
+#define PPE_AC_MULTICAST_QUEUE_SET_GRN_RESUME(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_MULTICAST_QUEUE_CFG_W2_RESUME, (tbl_cfg) + 0x2, value)
+
+/* PPE admission control group (0-3) configurations */
+#define PPE_AC_GRP_CFG_TBL_ADDR 0x84c000
+#define PPE_AC_GRP_CFG_TBL_ENTRIES 0x4
+#define PPE_AC_GRP_CFG_TBL_INC 0x10
+#define PPE_AC_GRP_W0_AC_EN BIT(0)
+#define PPE_AC_GRP_W0_AC_FC_EN BIT(1)
+#define PPE_AC_GRP_W0_CLR_AWARE BIT(2)
+#define PPE_AC_GRP_W0_THRESHOLD_LOW GENMASK(31, 25)
+#define PPE_AC_GRP_W1_THRESHOLD_HIGH GENMASK(3, 0)
+#define PPE_AC_GRP_W1_BUF_LIMIT GENMASK(14, 4)
+#define PPE_AC_GRP_W2_RESUME_GRN GENMASK(15, 5)
+#define PPE_AC_GRP_W2_PRE_ALLOC GENMASK(26, 16)
+
+#define PPE_AC_GRP_SET_BUF_LIMIT(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_GRP_W1_BUF_LIMIT, (tbl_cfg) + 0x1, value)
+
+/* Counters for packets handled by unicast queues (0-255). */
+#define PPE_AC_UNICAST_QUEUE_CNT_TBL_ADDR 0x84e000
+#define PPE_AC_UNICAST_QUEUE_CNT_TBL_ENTRIES 256
+#define PPE_AC_UNICAST_QUEUE_CNT_TBL_INC 0x10
+#define PPE_AC_UNICAST_QUEUE_CNT_TBL_PEND_CNT GENMASK(12, 0)
+
+/* Counters for packets handled by multicast queues (256-299). */
+#define PPE_AC_MULTICAST_QUEUE_CNT_TBL_ADDR 0x852000
+#define PPE_AC_MULTICAST_QUEUE_CNT_TBL_ENTRIES 44
+#define PPE_AC_MULTICAST_QUEUE_CNT_TBL_INC 0x10
+#define PPE_AC_MULTICAST_QUEUE_CNT_TBL_PEND_CNT GENMASK(12, 0)
+
+/* Table addresses for per-queue enqueue setting. */
+#define PPE_ENQ_OPR_TBL_ADDR 0x85c000
+#define PPE_ENQ_OPR_TBL_ENTRIES 300
+#define PPE_ENQ_OPR_TBL_INC 0x10
+#define PPE_ENQ_OPR_TBL_ENQ_DISABLE BIT(0)
+
+/* Unicast drop count includes the possible drops with WRED for the green,
+ * yellow and red categories.
+ */
+#define PPE_UNICAST_DROP_CNT_TBL_ADDR 0x9e0000
+#define PPE_UNICAST_DROP_CNT_TBL_ENTRIES 1536
+#define PPE_UNICAST_DROP_CNT_TBL_INC 0x10
+#define PPE_UNICAST_DROP_TYPES 6
+#define PPE_UNICAST_DROP_FORCE_OFFSET 3
+
+/* There are 16 multicast queues dedicated to CPU port 0. Multicast drop
+ * count includes the force drop for green, yellow and red category packets.
+ */
+#define PPE_P0_MULTICAST_DROP_CNT_TBL_ADDR 0x9f0000
+#define PPE_P0_MULTICAST_DROP_CNT_TBL_ENTRIES 48
+#define PPE_P0_MULTICAST_DROP_CNT_TBL_INC 0x10
+#define PPE_P0_MULTICAST_QUEUE_NUM 16
+
+/* Each PPE physical port has four dedicated multicast queues, providing
+ * a total of 12 entries per port. The multicast drop count includes forced
+ * drops for green, yellow, and red category packets.
+ */
+#define PPE_MULTICAST_QUEUE_PORT_ADDR_INC 0x1000
+#define PPE_MULTICAST_DROP_CNT_TBL_INC 0x10
+#define PPE_MULTICAST_DROP_TYPES 3
+#define PPE_MULTICAST_QUEUE_NUM 4
+#define PPE_MULTICAST_DROP_CNT_TBL_ENTRIES 12
+
+#define PPE_CPU_PORT_MULTICAST_FORCE_DROP_CNT_TBL_ADDR(mq_offset) \
+ (PPE_P0_MULTICAST_DROP_CNT_TBL_ADDR + \
+ (mq_offset) * PPE_P0_MULTICAST_DROP_CNT_TBL_INC * \
+ PPE_MULTICAST_DROP_TYPES)
+
+#define PPE_P1_MULTICAST_DROP_CNT_TBL_ADDR \
+ (PPE_P0_MULTICAST_DROP_CNT_TBL_ADDR + PPE_MULTICAST_QUEUE_PORT_ADDR_INC)
+#endif
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index ef9c02b000e4..38a779f4b866 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -54,7 +54,7 @@ MODULE_PARM_DESC(qcaspi_burst_len, "Number of data bytes per burst. Use 1-5000."
#define QCASPI_PLUGGABLE_MIN 0
#define QCASPI_PLUGGABLE_MAX 1
-static int qcaspi_pluggable = QCASPI_PLUGGABLE_MIN;
+static int qcaspi_pluggable = QCASPI_PLUGGABLE_MAX;
module_param(qcaspi_pluggable, int, 0);
MODULE_PARM_DESC(qcaspi_pluggable, "Pluggable SPI connection (yes/no).");
@@ -818,7 +818,6 @@ qcaspi_netdev_init(struct net_device *dev)
dev->mtu = QCAFRM_MAX_MTU;
dev->type = ARPHRD_ETHER;
- qca->clkspeed = qcaspi_clkspeed;
qca->burst_len = qcaspi_burst_len;
qca->spi_thread = NULL;
qca->buffer_size = (QCAFRM_MAX_MTU + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN +
@@ -909,17 +908,15 @@ qca_spi_probe(struct spi_device *spi)
legacy_mode = of_property_read_bool(spi->dev.of_node,
"qca,legacy-mode");
- if (qcaspi_clkspeed == 0) {
- if (spi->max_speed_hz)
- qcaspi_clkspeed = spi->max_speed_hz;
- else
- qcaspi_clkspeed = QCASPI_CLK_SPEED;
- }
+ if (qcaspi_clkspeed)
+ spi->max_speed_hz = qcaspi_clkspeed;
+ else if (!spi->max_speed_hz)
+ spi->max_speed_hz = QCASPI_CLK_SPEED;
- if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) ||
- (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) {
- dev_err(&spi->dev, "Invalid clkspeed: %d\n",
- qcaspi_clkspeed);
+ if (spi->max_speed_hz < QCASPI_CLK_SPEED_MIN ||
+ spi->max_speed_hz > QCASPI_CLK_SPEED_MAX) {
+ dev_err(&spi->dev, "Invalid clkspeed: %u\n",
+ spi->max_speed_hz);
return -EINVAL;
}
@@ -944,14 +941,13 @@ qca_spi_probe(struct spi_device *spi)
return -EINVAL;
}
- dev_info(&spi->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n",
+ dev_info(&spi->dev, "ver=%s, clkspeed=%u, burst_len=%d, pluggable=%d\n",
QCASPI_DRV_VERSION,
- qcaspi_clkspeed,
+ spi->max_speed_hz,
qcaspi_burst_len,
qcaspi_pluggable);
spi->mode = SPI_MODE_3;
- spi->max_speed_hz = qcaspi_clkspeed;
if (spi_setup(spi) < 0) {
dev_err(&spi->dev, "Unable to setup SPI device\n");
return -EFAULT;
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index 7ba5c9e2f61c..90b290f94c27 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -89,7 +89,6 @@ struct qcaspi {
#endif
/* user configurable options */
- u32 clkspeed;
u8 legacy_mode;
u16 burst_len;
};
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index f3bea196a8f9..ba8763cac9d9 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -117,11 +117,14 @@ static void rmnet_unregister_bridge(struct rmnet_port *port)
rmnet_unregister_real_device(bridge_dev);
}
-static int rmnet_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int rmnet_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
u32 data_format = RMNET_FLAGS_INGRESS_DEAGGREGATION;
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct net_device *real_dev;
int mode = RMNET_EPMODE_VND;
struct rmnet_endpoint *ep;
@@ -134,7 +137,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
return -EINVAL;
}
- real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+ real_dev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK]));
if (!real_dev) {
NL_SET_ERR_MSG_MOD(extack, "link does not exist");
return -ENODEV;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index a5e3d1a88305..8b4640c5d61e 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -686,8 +686,8 @@ void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u32 size,
void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
{
- hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->hrtimer.function = rmnet_map_flush_tx_packet_queue;
+ hrtimer_setup(&port->hrtimer, rmnet_map_flush_tx_packet_queue, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
spin_lock_init(&port->agg_lock);
rmnet_map_update_ul_agg_config(port, 4096, 1, 800);
INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work);
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 9ce0e8a64ba8..a73dcaffa8c5 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1684,6 +1684,7 @@ static void rtl8139_tx_timeout_task (struct work_struct *work)
if (tmp8 & CmdTxEnb)
RTL_W8 (ChipCmd, CmdRxEnb);
+ netdev_lock(dev);
spin_lock_bh(&tp->rx_lock);
/* Disable interrupts by clearing the interrupt mask. */
RTL_W16 (IntrMask, 0x0000);
@@ -1694,11 +1695,12 @@ static void rtl8139_tx_timeout_task (struct work_struct *work)
spin_unlock_irq(&tp->lock);
/* ...and finally, reset everything */
- napi_enable(&tp->napi);
+ napi_enable_locked(&tp->napi);
rtl8139_hw_start(dev);
netif_wake_queue(dev);
spin_unlock_bh(&tp->rx_lock);
+ netdev_unlock(dev);
}
static void rtl8139_tx_timeout(struct net_device *dev, unsigned int txqueue)
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index 8a8ea51c639e..272c83bfdc6c 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -58,7 +58,7 @@ config 8139TOO
config 8139TOO_PIO
bool "Use PIO instead of MMIO"
default y
- depends on 8139TOO
+ depends on 8139TOO && !NO_IOPORT_MAP
help
This instructs the driver to use programmed I/O ports (PIO) instead
of PCI shared memory (MMIO). This can possibly solve some problems
@@ -114,7 +114,8 @@ config R8169
will be called r8169. This is recommended.
config R8169_LEDS
- def_bool R8169 && LEDS_TRIGGER_NETDEV
+ bool "Support for controlling the NIC LEDs"
+ depends on R8169 && LEDS_TRIGGER_NETDEV
depends on !(R8169=y && LEDS_CLASS=m)
help
Optional support for controlling the NIC LED's with the netdev
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index 6cbcb3164367..0d65434982a2 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -717,7 +717,7 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance)
problem where the adapter forgets its ethernet address. */
static void atp_timed_checker(struct timer_list *t)
{
- struct net_local *lp = from_timer(lp, t, timer);
+ struct net_local *lp = timer_container_of(lp, t, timer);
struct net_device *dev = lp->dev;
long ioaddr = dev->base_addr;
int tickssofar = jiffies - lp->last_rx_time;
@@ -832,7 +832,7 @@ net_close(struct net_device *dev)
netif_stop_queue(dev);
- del_timer_sync(&lp->timer);
+ timer_delete_sync(&lp->timer);
/* Flush the Tx and disable Rx here. */
lp->addr_mode = CMR2h_OFF;
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index be4c9622618d..2c1a0c21af8d 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -23,7 +23,7 @@ enum mac_version {
RTL_GIGA_MAC_VER_08,
RTL_GIGA_MAC_VER_09,
RTL_GIGA_MAC_VER_10,
- RTL_GIGA_MAC_VER_11,
+ /* support for RTL_GIGA_MAC_VER_11 has been removed */
/* RTL_GIGA_MAC_VER_12 was handled the same as VER_17 */
/* RTL_GIGA_MAC_VER_13 was merged with VER_10 */
RTL_GIGA_MAC_VER_14,
@@ -64,14 +64,15 @@ enum mac_version {
/* support for RTL_GIGA_MAC_VER_50 has been removed */
RTL_GIGA_MAC_VER_51,
RTL_GIGA_MAC_VER_52,
- RTL_GIGA_MAC_VER_53,
/* support for RTL_GIGA_MAC_VER_60 has been removed */
RTL_GIGA_MAC_VER_61,
RTL_GIGA_MAC_VER_63,
RTL_GIGA_MAC_VER_64,
- RTL_GIGA_MAC_VER_65,
RTL_GIGA_MAC_VER_66,
- RTL_GIGA_MAC_NONE
+ RTL_GIGA_MAC_VER_70,
+ RTL_GIGA_MAC_VER_80,
+ RTL_GIGA_MAC_NONE,
+ RTL_GIGA_MAC_VER_LAST = RTL_GIGA_MAC_NONE - 1
};
struct rtl8169_private;
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index b84587841e31..405e91eb3141 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -16,7 +16,6 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
-#include <linux/hwmon.h>
#include <linux/phy.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
@@ -57,8 +56,13 @@
#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw"
#define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw"
#define FIRMWARE_8125D_1 "rtl_nic/rtl8125d-1.fw"
+#define FIRMWARE_8125D_2 "rtl_nic/rtl8125d-2.fw"
+#define FIRMWARE_8125K_1 "rtl_nic/rtl8125k-1.fw"
+#define FIRMWARE_8125BP_2 "rtl_nic/rtl8125bp-2.fw"
+#define FIRMWARE_9151A_1 "rtl_nic/rtl9151a-1.fw"
#define FIRMWARE_8126A_2 "rtl_nic/rtl8126a-2.fw"
#define FIRMWARE_8126A_3 "rtl_nic/rtl8126a-3.fw"
+#define FIRMWARE_8127A_1 "rtl_nic/rtl8127a-1.fw"
#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
@@ -88,61 +92,121 @@
#define JUMBO_6K (6 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
#define JUMBO_7K (7 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
#define JUMBO_9K (9 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
+#define JUMBO_16K (SZ_16K - VLAN_ETH_HLEN - ETH_FCS_LEN)
-static const struct {
+static const struct rtl_chip_info {
+ u16 mask;
+ u16 val;
+ enum mac_version mac_version;
const char *name;
const char *fw_name;
} rtl_chip_infos[] = {
- /* PCI devices. */
- [RTL_GIGA_MAC_VER_02] = {"RTL8169s" },
- [RTL_GIGA_MAC_VER_03] = {"RTL8110s" },
- [RTL_GIGA_MAC_VER_04] = {"RTL8169sb/8110sb" },
- [RTL_GIGA_MAC_VER_05] = {"RTL8169sc/8110sc" },
- [RTL_GIGA_MAC_VER_06] = {"RTL8169sc/8110sc" },
- /* PCI-E devices. */
- [RTL_GIGA_MAC_VER_07] = {"RTL8102e" },
- [RTL_GIGA_MAC_VER_08] = {"RTL8102e" },
- [RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e" },
- [RTL_GIGA_MAC_VER_10] = {"RTL8101e/RTL8100e" },
- [RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" },
- [RTL_GIGA_MAC_VER_14] = {"RTL8401" },
- [RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" },
- [RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" },
- [RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c" },
- [RTL_GIGA_MAC_VER_20] = {"RTL8168c/8111c" },
- [RTL_GIGA_MAC_VER_21] = {"RTL8168c/8111c" },
- [RTL_GIGA_MAC_VER_22] = {"RTL8168c/8111c" },
- [RTL_GIGA_MAC_VER_23] = {"RTL8168cp/8111cp" },
- [RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp" },
- [RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d", FIRMWARE_8168D_1},
- [RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d", FIRMWARE_8168D_2},
- [RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp" },
- [RTL_GIGA_MAC_VER_29] = {"RTL8105e", FIRMWARE_8105E_1},
- [RTL_GIGA_MAC_VER_30] = {"RTL8105e", FIRMWARE_8105E_1},
- [RTL_GIGA_MAC_VER_31] = {"RTL8168dp/8111dp" },
- [RTL_GIGA_MAC_VER_32] = {"RTL8168e/8111e", FIRMWARE_8168E_1},
- [RTL_GIGA_MAC_VER_33] = {"RTL8168e/8111e", FIRMWARE_8168E_2},
- [RTL_GIGA_MAC_VER_34] = {"RTL8168evl/8111evl", FIRMWARE_8168E_3},
- [RTL_GIGA_MAC_VER_35] = {"RTL8168f/8111f", FIRMWARE_8168F_1},
- [RTL_GIGA_MAC_VER_36] = {"RTL8168f/8111f", FIRMWARE_8168F_2},
- [RTL_GIGA_MAC_VER_37] = {"RTL8402", FIRMWARE_8402_1 },
- [RTL_GIGA_MAC_VER_38] = {"RTL8411", FIRMWARE_8411_1 },
- [RTL_GIGA_MAC_VER_39] = {"RTL8106e", FIRMWARE_8106E_1},
- [RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g", FIRMWARE_8168G_2},
- [RTL_GIGA_MAC_VER_42] = {"RTL8168gu/8111gu", FIRMWARE_8168G_3},
- [RTL_GIGA_MAC_VER_43] = {"RTL8106eus", FIRMWARE_8106E_2},
- [RTL_GIGA_MAC_VER_44] = {"RTL8411b", FIRMWARE_8411_2 },
- [RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h", FIRMWARE_8168H_2},
- [RTL_GIGA_MAC_VER_48] = {"RTL8107e", FIRMWARE_8107E_2},
- [RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" },
- [RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117", FIRMWARE_8168FP_3},
- [RTL_GIGA_MAC_VER_53] = {"RTL8168fp/RTL8117", },
- [RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3},
- /* reserve 62 for CFG_METHOD_4 in the vendor driver */
- [RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2},
- [RTL_GIGA_MAC_VER_64] = {"RTL8125D", FIRMWARE_8125D_1},
- [RTL_GIGA_MAC_VER_65] = {"RTL8126A", FIRMWARE_8126A_2},
- [RTL_GIGA_MAC_VER_66] = {"RTL8126A", FIRMWARE_8126A_3},
+ /* 8127A family. */
+ { 0x7cf, 0x6c9, RTL_GIGA_MAC_VER_80, "RTL8127A", FIRMWARE_8127A_1 },
+
+ /* 8126A family. */
+ { 0x7cf, 0x64a, RTL_GIGA_MAC_VER_70, "RTL8126A", FIRMWARE_8126A_3 },
+ { 0x7cf, 0x649, RTL_GIGA_MAC_VER_70, "RTL8126A", FIRMWARE_8126A_2 },
+
+ /* 8125BP family. */
+ { 0x7cf, 0x681, RTL_GIGA_MAC_VER_66, "RTL8125BP", FIRMWARE_8125BP_2 },
+
+ /* 8125D family. */
+ { 0x7cf, 0x68b, RTL_GIGA_MAC_VER_64, "RTL9151A", FIRMWARE_9151A_1 },
+ { 0x7cf, 0x68a, RTL_GIGA_MAC_VER_64, "RTL8125K", FIRMWARE_8125K_1 },
+ { 0x7cf, 0x689, RTL_GIGA_MAC_VER_64, "RTL8125D", FIRMWARE_8125D_2 },
+ { 0x7cf, 0x688, RTL_GIGA_MAC_VER_64, "RTL8125D", FIRMWARE_8125D_1 },
+
+ /* 8125B family. */
+ { 0x7cf, 0x641, RTL_GIGA_MAC_VER_63, "RTL8125B", FIRMWARE_8125B_2 },
+
+ /* 8125A family. */
+ { 0x7cf, 0x609, RTL_GIGA_MAC_VER_61, "RTL8125A", FIRMWARE_8125A_3 },
+
+ /* RTL8117 */
+ { 0x7cf, 0x54b, RTL_GIGA_MAC_VER_52, "RTL8168fp/RTL8117" },
+ { 0x7cf, 0x54a, RTL_GIGA_MAC_VER_52, "RTL8168fp/RTL8117",
+ FIRMWARE_8168FP_3 },
+
+ /* 8168EP family. */
+ { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51, "RTL8168ep/8111ep" },
+
+ /* 8168H family. */
+ { 0x7cf, 0x541, RTL_GIGA_MAC_VER_46, "RTL8168h/8111h",
+ FIRMWARE_8168H_2 },
+ /* Realtek calls it RTL8168M, but it's handled like RTL8168H */
+ { 0x7cf, 0x6c0, RTL_GIGA_MAC_VER_46, "RTL8168M", FIRMWARE_8168H_2 },
+
+ /* 8168G family. */
+ { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44, "RTL8411b", FIRMWARE_8411_2 },
+ { 0x7cf, 0x509, RTL_GIGA_MAC_VER_42, "RTL8168gu/8111gu",
+ FIRMWARE_8168G_3 },
+ { 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40, "RTL8168g/8111g",
+ FIRMWARE_8168G_2 },
+
+ /* 8168F family. */
+ { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38, "RTL8411", FIRMWARE_8411_1 },
+ { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36, "RTL8168f/8111f",
+ FIRMWARE_8168F_2 },
+ { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35, "RTL8168f/8111f",
+ FIRMWARE_8168F_1 },
+
+ /* 8168E family. */
+ { 0x7c8, 0x2c8, RTL_GIGA_MAC_VER_34, "RTL8168evl/8111evl",
+ FIRMWARE_8168E_3 },
+ { 0x7cf, 0x2c1, RTL_GIGA_MAC_VER_32, "RTL8168e/8111e",
+ FIRMWARE_8168E_1 },
+ { 0x7c8, 0x2c0, RTL_GIGA_MAC_VER_33, "RTL8168e/8111e",
+ FIRMWARE_8168E_2 },
+
+ /* 8168D family. */
+ { 0x7cf, 0x281, RTL_GIGA_MAC_VER_25, "RTL8168d/8111d",
+ FIRMWARE_8168D_1 },
+ { 0x7c8, 0x280, RTL_GIGA_MAC_VER_26, "RTL8168d/8111d",
+ FIRMWARE_8168D_2 },
+
+ /* 8168DP family. */
+ { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28, "RTL8168dp/8111dp" },
+ { 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31, "RTL8168dp/8111dp" },
+
+ /* 8168C family. */
+ { 0x7cf, 0x3c9, RTL_GIGA_MAC_VER_23, "RTL8168cp/8111cp" },
+ { 0x7cf, 0x3c8, RTL_GIGA_MAC_VER_18, "RTL8168cp/8111cp" },
+ { 0x7c8, 0x3c8, RTL_GIGA_MAC_VER_24, "RTL8168cp/8111cp" },
+ { 0x7cf, 0x3c0, RTL_GIGA_MAC_VER_19, "RTL8168c/8111c" },
+ { 0x7cf, 0x3c2, RTL_GIGA_MAC_VER_20, "RTL8168c/8111c" },
+ { 0x7cf, 0x3c3, RTL_GIGA_MAC_VER_21, "RTL8168c/8111c" },
+ { 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22, "RTL8168c/8111c" },
+
+ /* 8168B family. */
+ { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17, "RTL8168b/8111b" },
+ /* This one is very old and rare, support has been removed.
+ * { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11, "RTL8168b/8111b" },
+ */
+
+ /* 8101 family. */
+ { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39, "RTL8106e", FIRMWARE_8106E_1 },
+ { 0x7c8, 0x440, RTL_GIGA_MAC_VER_37, "RTL8402", FIRMWARE_8402_1 },
+ { 0x7cf, 0x409, RTL_GIGA_MAC_VER_29, "RTL8105e", FIRMWARE_8105E_1 },
+ { 0x7c8, 0x408, RTL_GIGA_MAC_VER_30, "RTL8105e", FIRMWARE_8105E_1 },
+ { 0x7cf, 0x349, RTL_GIGA_MAC_VER_08, "RTL8102e" },
+ { 0x7cf, 0x249, RTL_GIGA_MAC_VER_08, "RTL8102e" },
+ { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07, "RTL8102e" },
+ { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07, "RTL8102e" },
+ { 0x7cf, 0x240, RTL_GIGA_MAC_VER_14, "RTL8401" },
+ { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09, "RTL8102e/RTL8103e" },
+ { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09, "RTL8102e/RTL8103e" },
+ { 0x7c8, 0x340, RTL_GIGA_MAC_VER_10, "RTL8101e/RTL8100e" },
+
+ /* 8110 family. */
+ { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06, "RTL8169sc/8110sc" },
+ { 0xfc8, 0x180, RTL_GIGA_MAC_VER_05, "RTL8169sc/8110sc" },
+ { 0xfc8, 0x100, RTL_GIGA_MAC_VER_04, "RTL8169sb/8110sb" },
+ { 0xfc8, 0x040, RTL_GIGA_MAC_VER_03, "RTL8110s" },
+ { 0xfc8, 0x008, RTL_GIGA_MAC_VER_02, "RTL8169s" },
+
+ /* Catch-all */
+ { 0x000, 0x000, RTL_GIGA_MAC_NONE }
};
static const struct pci_device_id rtl8169_pci_tbl[] = {
@@ -156,8 +220,6 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_VDEVICE(REALTEK, 0x8168) },
{ PCI_VDEVICE(NCUBE, 0x8168) },
{ PCI_VDEVICE(REALTEK, 0x8169) },
- { PCI_VENDOR_ID_DLINK, 0x4300,
- PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0 },
{ PCI_VDEVICE(DLINK, 0x4300) },
{ PCI_VDEVICE(DLINK, 0x4302) },
{ PCI_VDEVICE(AT, 0xc107) },
@@ -166,7 +228,10 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
{ 0x0001, 0x8168, PCI_ANY_ID, 0x2410 },
{ PCI_VDEVICE(REALTEK, 0x8125) },
{ PCI_VDEVICE(REALTEK, 0x8126) },
+ { PCI_VDEVICE(REALTEK, 0x8127) },
{ PCI_VDEVICE(REALTEK, 0x3000) },
+ { PCI_VDEVICE(REALTEK, 0x5000) },
+ { PCI_VDEVICE(REALTEK, 0x0e10) },
{}
};
@@ -347,6 +412,8 @@ enum rtl8125_registers {
TxPoll_8125 = 0x90,
LEDSEL3 = 0x96,
MAC0_BKP = 0x19e0,
+ RSS_CTRL_8125 = 0x4500,
+ Q_NUM_CTRL_8125 = 0x4800,
EEE_TXIDLE_TIMER_8125 = 0x6048,
};
@@ -621,7 +688,6 @@ struct rtl8169_tc_offsets {
enum rtl_flag {
RTL_FLAG_TASK_RESET_PENDING,
- RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE,
RTL_FLAG_TASK_TX_TIMEOUT,
RTL_FLAG_MAX
};
@@ -630,6 +696,7 @@ enum rtl_dash_type {
RTL_DASH_NONE,
RTL_DASH_DP,
RTL_DASH_EP,
+ RTL_DASH_25_BP,
};
struct rtl8169_private {
@@ -660,13 +727,9 @@ struct rtl8169_private {
struct work_struct work;
} wk;
- raw_spinlock_t config25_lock;
raw_spinlock_t mac_ocp_lock;
struct mutex led_lock; /* serialize LED ctrl RMW access */
- raw_spinlock_t cfg9346_usage_lock;
- int cfg9346_usage_count;
-
unsigned supports_gmii:1;
unsigned aspm_manageable:1;
unsigned dash_enabled:1;
@@ -710,8 +773,13 @@ MODULE_FIRMWARE(FIRMWARE_8107E_2);
MODULE_FIRMWARE(FIRMWARE_8125A_3);
MODULE_FIRMWARE(FIRMWARE_8125B_2);
MODULE_FIRMWARE(FIRMWARE_8125D_1);
+MODULE_FIRMWARE(FIRMWARE_8125D_2);
+MODULE_FIRMWARE(FIRMWARE_8125K_1);
+MODULE_FIRMWARE(FIRMWARE_8125BP_2);
+MODULE_FIRMWARE(FIRMWARE_9151A_1);
MODULE_FIRMWARE(FIRMWARE_8126A_2);
MODULE_FIRMWARE(FIRMWARE_8126A_3);
+MODULE_FIRMWARE(FIRMWARE_8127A_1);
static inline struct device *tp_to_dev(struct rtl8169_private *tp)
{
@@ -720,22 +788,12 @@ static inline struct device *tp_to_dev(struct rtl8169_private *tp)
static void rtl_lock_config_regs(struct rtl8169_private *tp)
{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&tp->cfg9346_usage_lock, flags);
- if (!--tp->cfg9346_usage_count)
- RTL_W8(tp, Cfg9346, Cfg9346_Lock);
- raw_spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags);
+ RTL_W8(tp, Cfg9346, Cfg9346_Lock);
}
static void rtl_unlock_config_regs(struct rtl8169_private *tp)
{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&tp->cfg9346_usage_lock, flags);
- if (!tp->cfg9346_usage_count++)
- RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
- raw_spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags);
+ RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
}
static void rtl_pci_commit(struct rtl8169_private *tp)
@@ -746,24 +804,32 @@ static void rtl_pci_commit(struct rtl8169_private *tp)
static void rtl_mod_config2(struct rtl8169_private *tp, u8 clear, u8 set)
{
- unsigned long flags;
u8 val;
- raw_spin_lock_irqsave(&tp->config25_lock, flags);
val = RTL_R8(tp, Config2);
RTL_W8(tp, Config2, (val & ~clear) | set);
- raw_spin_unlock_irqrestore(&tp->config25_lock, flags);
}
static void rtl_mod_config5(struct rtl8169_private *tp, u8 clear, u8 set)
{
- unsigned long flags;
u8 val;
- raw_spin_lock_irqsave(&tp->config25_lock, flags);
val = RTL_R8(tp, Config5);
RTL_W8(tp, Config5, (val & ~clear) | set);
- raw_spin_unlock_irqrestore(&tp->config25_lock, flags);
+}
+
+static void r8169_mod_reg8_cond(struct rtl8169_private *tp, int reg,
+ u8 bits, bool cond)
+{
+ u8 val, old_val;
+
+ old_val = RTL_R8(tp, reg);
+ if (cond)
+ val = old_val | bits;
+ else
+ val = old_val & ~bits;
+ if (val != old_val)
+ RTL_W8(tp, reg, val);
}
static bool rtl_is_8125(struct rtl8169_private *tp)
@@ -775,7 +841,7 @@ static bool rtl_is_8168evl_up(struct rtl8169_private *tp)
{
return tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
tp->mac_version != RTL_GIGA_MAC_VER_39 &&
- tp->mac_version <= RTL_GIGA_MAC_VER_53;
+ tp->mac_version <= RTL_GIGA_MAC_VER_52;
}
static bool rtl_supports_eee(struct rtl8169_private *tp)
@@ -943,9 +1009,7 @@ void r8169_get_led_name(struct rtl8169_private *tp, int idx,
static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type)
{
/* based on RTL8168FP_OOBMAC_BASE in vendor driver */
- if (type == ERIAR_OOB &&
- (tp->mac_version == RTL_GIGA_MAC_VER_52 ||
- tp->mac_version == RTL_GIGA_MAC_VER_53))
+ if (type == ERIAR_OOB && tp->mac_version == RTL_GIGA_MAC_VER_52)
*cmd |= 0xf70 << 18;
}
@@ -1234,7 +1298,7 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
case RTL_GIGA_MAC_VER_31:
r8168dp_2_mdio_write(tp, location, val);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST:
r8168g_mdio_write(tp, location, val);
break;
default:
@@ -1249,7 +1313,7 @@ static int rtl_readphy(struct rtl8169_private *tp, int location)
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_2_mdio_read(tp, location);
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST:
return r8168g_mdio_read(tp, location);
default:
return r8169_mdio_read(tp, location);
@@ -1364,10 +1428,19 @@ static void rtl8168ep_driver_start(struct rtl8169_private *tp)
rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30);
}
+static void rtl8125bp_driver_start(struct rtl8169_private *tp)
+{
+ r8168ep_ocp_write(tp, 0x01, 0x14, OOB_CMD_DRIVER_START);
+ r8168ep_ocp_write(tp, 0x01, 0x18, 0x00);
+ r8168ep_ocp_write(tp, 0x01, 0x10, 0x01);
+}
+
static void rtl8168_driver_start(struct rtl8169_private *tp)
{
if (tp->dash_type == RTL_DASH_DP)
rtl8168dp_driver_start(tp);
+ else if (tp->dash_type == RTL_DASH_25_BP)
+ rtl8125bp_driver_start(tp);
else
rtl8168ep_driver_start(tp);
}
@@ -1388,10 +1461,19 @@ static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
}
+static void rtl8125bp_driver_stop(struct rtl8169_private *tp)
+{
+ r8168ep_ocp_write(tp, 0x01, 0x14, OOB_CMD_DRIVER_STOP);
+ r8168ep_ocp_write(tp, 0x01, 0x18, 0x00);
+ r8168ep_ocp_write(tp, 0x01, 0x10, 0x01);
+}
+
static void rtl8168_driver_stop(struct rtl8169_private *tp)
{
if (tp->dash_type == RTL_DASH_DP)
rtl8168dp_driver_stop(tp);
+ else if (tp->dash_type == RTL_DASH_25_BP)
+ rtl8125bp_driver_stop(tp);
else
rtl8168ep_driver_stop(tp);
}
@@ -1414,6 +1496,7 @@ static bool rtl_dash_is_enabled(struct rtl8169_private *tp)
case RTL_DASH_DP:
return r8168dp_check_dash(tp);
case RTL_DASH_EP:
+ case RTL_DASH_25_BP:
return r8168ep_check_dash(tp);
default:
return false;
@@ -1426,8 +1509,10 @@ static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return RTL_DASH_DP;
- case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
+ case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_52:
return RTL_DASH_EP;
+ case RTL_GIGA_MAC_VER_66:
+ return RTL_DASH_25_BP;
default:
return RTL_DASH_NONE;
}
@@ -1436,16 +1521,17 @@ static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
static void rtl_set_d3_pll_down(struct rtl8169_private *tp, bool enable)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
- case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30:
- case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_66:
- if (enable)
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~D3_NO_PLL_DOWN);
- else
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | D3_NO_PLL_DOWN);
+ case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_24:
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ case RTL_GIGA_MAC_VER_38:
+ break;
+ case RTL_GIGA_MAC_VER_80:
+ r8169_mod_reg8_cond(tp, PMCH, D3_NO_PLL_DOWN, true);
break;
default:
+ r8169_mod_reg8_cond(tp, PMCH, D3HOT_NO_PLL_DOWN, true);
+ r8169_mod_reg8_cond(tp, PMCH, D3COLD_NO_PLL_DOWN, !enable);
break;
}
}
@@ -1556,61 +1642,40 @@ static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
{
- static const struct {
- u32 opt;
- u16 reg;
- u8 mask;
- } cfg[] = {
- { WAKE_PHY, Config3, LinkUp },
- { WAKE_UCAST, Config5, UWF },
- { WAKE_BCAST, Config5, BWF },
- { WAKE_MCAST, Config5, MWF },
- { WAKE_ANY, Config5, LanWake },
- { WAKE_MAGIC, Config3, MagicPacket }
- };
- unsigned int i, tmp = ARRAY_SIZE(cfg);
- unsigned long flags;
- u8 options;
-
rtl_unlock_config_regs(tp);
if (rtl_is_8168evl_up(tp)) {
- tmp--;
if (wolopts & WAKE_MAGIC)
rtl_eri_set_bits(tp, 0x0dc, MagicPacket_v2);
else
rtl_eri_clear_bits(tp, 0x0dc, MagicPacket_v2);
} else if (rtl_is_8125(tp)) {
- tmp--;
if (wolopts & WAKE_MAGIC)
r8168_mac_ocp_modify(tp, 0xc0b6, 0, BIT(0));
else
r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0);
+ } else {
+ r8169_mod_reg8_cond(tp, Config3, MagicPacket,
+ wolopts & WAKE_MAGIC);
}
- raw_spin_lock_irqsave(&tp->config25_lock, flags);
- for (i = 0; i < tmp; i++) {
- options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask;
- if (wolopts & cfg[i].opt)
- options |= cfg[i].mask;
- RTL_W8(tp, cfg[i].reg, options);
- }
- raw_spin_unlock_irqrestore(&tp->config25_lock, flags);
+ r8169_mod_reg8_cond(tp, Config3, LinkUp, wolopts & WAKE_PHY);
+ if (rtl_is_8125(tp))
+ r8168_mac_ocp_modify(tp, 0xe0c6, 0x3f,
+ wolopts & WAKE_PHY ? 0x13 : 0);
+ r8169_mod_reg8_cond(tp, Config5, UWF, wolopts & WAKE_UCAST);
+ r8169_mod_reg8_cond(tp, Config5, BWF, wolopts & WAKE_BCAST);
+ r8169_mod_reg8_cond(tp, Config5, MWF, wolopts & WAKE_MCAST);
+ r8169_mod_reg8_cond(tp, Config5, LanWake, wolopts);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
- options = RTL_R8(tp, Config1) & ~PMEnable;
- if (wolopts)
- options |= PMEnable;
- RTL_W8(tp, Config1, options);
+ r8169_mod_reg8_cond(tp, Config1, PMEnable, wolopts);
break;
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_66:
- if (wolopts)
- rtl_mod_config2(tp, 0, PME_SIGNAL);
- else
- rtl_mod_config2(tp, PME_SIGNAL, 0);
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_LAST:
+ r8169_mod_reg8_cond(tp, Config2, PME_SIGNAL, wolopts);
break;
default:
break;
@@ -2082,7 +2147,7 @@ static void rtl_set_eee_txidle_timer(struct rtl8169_private *tp)
tp->tx_lpi_timer = timer_val;
r8168_mac_ocp_write(tp, 0xe048, timer_val);
break;
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
tp->tx_lpi_timer = timer_val;
RTL_W16(tp, EEE_TXIDLE_TIMER_8125, timer_val);
break;
@@ -2271,147 +2336,30 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_eth_ctrl_stats = rtl8169_get_eth_ctrl_stats,
};
-static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
+static const struct rtl_chip_info *rtl8169_get_chip_version(u16 xid, bool gmii)
{
- /*
- * The driver currently handles the 8168Bf and the 8168Be identically
- * but they can be identified more specifically through the test below
- * if needed:
- *
- * (RTL_R32(tp, TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
- *
- * Same thing for the 8101Eb and the 8101Ec:
- *
- * (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
- */
- static const struct rtl_mac_info {
- u16 mask;
- u16 val;
- enum mac_version ver;
- } mac_info[] = {
- /* 8126A family. */
- { 0x7cf, 0x64a, RTL_GIGA_MAC_VER_66 },
- { 0x7cf, 0x649, RTL_GIGA_MAC_VER_65 },
-
- /* 8125D family. */
- { 0x7cf, 0x688, RTL_GIGA_MAC_VER_64 },
-
- /* 8125B family. */
- { 0x7cf, 0x641, RTL_GIGA_MAC_VER_63 },
-
- /* 8125A family. */
- { 0x7cf, 0x609, RTL_GIGA_MAC_VER_61 },
- /* It seems only XID 609 made it to the mass market.
- * { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 },
- * { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 },
- */
-
- /* RTL8117 */
- { 0x7cf, 0x54b, RTL_GIGA_MAC_VER_53 },
- { 0x7cf, 0x54a, RTL_GIGA_MAC_VER_52 },
-
- /* 8168EP family. */
- { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 },
- /* It seems this chip version never made it to
- * the wild. Let's disable detection.
- * { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 },
- * { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49 },
- */
-
- /* 8168H family. */
- { 0x7cf, 0x541, RTL_GIGA_MAC_VER_46 },
- /* It seems this chip version never made it to
- * the wild. Let's disable detection.
- * { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 },
- */
- /* Realtek calls it RTL8168M, but it's handled like RTL8168H */
- { 0x7cf, 0x6c0, RTL_GIGA_MAC_VER_46 },
-
- /* 8168G family. */
- { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44 },
- { 0x7cf, 0x509, RTL_GIGA_MAC_VER_42 },
- /* It seems this chip version never made it to
- * the wild. Let's disable detection.
- * { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41 },
- */
- { 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40 },
-
- /* 8168F family. */
- { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38 },
- { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 },
- { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35 },
-
- /* 8168E family. */
- { 0x7c8, 0x2c8, RTL_GIGA_MAC_VER_34 },
- { 0x7cf, 0x2c1, RTL_GIGA_MAC_VER_32 },
- { 0x7c8, 0x2c0, RTL_GIGA_MAC_VER_33 },
-
- /* 8168D family. */
- { 0x7cf, 0x281, RTL_GIGA_MAC_VER_25 },
- { 0x7c8, 0x280, RTL_GIGA_MAC_VER_26 },
-
- /* 8168DP family. */
- /* It seems this early RTL8168dp version never made it to
- * the wild. Support has been removed.
- * { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 },
- */
- { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 },
- { 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31 },
-
- /* 8168C family. */
- { 0x7cf, 0x3c9, RTL_GIGA_MAC_VER_23 },
- { 0x7cf, 0x3c8, RTL_GIGA_MAC_VER_18 },
- { 0x7c8, 0x3c8, RTL_GIGA_MAC_VER_24 },
- { 0x7cf, 0x3c0, RTL_GIGA_MAC_VER_19 },
- { 0x7cf, 0x3c2, RTL_GIGA_MAC_VER_20 },
- { 0x7cf, 0x3c3, RTL_GIGA_MAC_VER_21 },
- { 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22 },
-
- /* 8168B family. */
- { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 },
- /* This one is very old and rare, let's see if anybody complains.
- * { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 },
- */
-
- /* 8101 family. */
- { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39 },
- { 0x7c8, 0x440, RTL_GIGA_MAC_VER_37 },
- { 0x7cf, 0x409, RTL_GIGA_MAC_VER_29 },
- { 0x7c8, 0x408, RTL_GIGA_MAC_VER_30 },
- { 0x7cf, 0x349, RTL_GIGA_MAC_VER_08 },
- { 0x7cf, 0x249, RTL_GIGA_MAC_VER_08 },
- { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 },
- { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 },
- { 0x7cf, 0x240, RTL_GIGA_MAC_VER_14 },
- { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 },
- { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09 },
- { 0x7c8, 0x340, RTL_GIGA_MAC_VER_10 },
-
- /* 8110 family. */
- { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06 },
- { 0xfc8, 0x180, RTL_GIGA_MAC_VER_05 },
- { 0xfc8, 0x100, RTL_GIGA_MAC_VER_04 },
- { 0xfc8, 0x040, RTL_GIGA_MAC_VER_03 },
- { 0xfc8, 0x008, RTL_GIGA_MAC_VER_02 },
-
- /* Catch-all */
- { 0x000, 0x000, RTL_GIGA_MAC_NONE }
+ /* Chips combining a 1Gbps MAC with a 100Mbps PHY */
+ static const struct rtl_chip_info rtl8106eus_info = {
+ .mac_version = RTL_GIGA_MAC_VER_43,
+ .name = "RTL8106eus",
+ .fw_name = FIRMWARE_8106E_2,
};
- const struct rtl_mac_info *p = mac_info;
- enum mac_version ver;
+ static const struct rtl_chip_info rtl8107e_info = {
+ .mac_version = RTL_GIGA_MAC_VER_48,
+ .name = "RTL8107e",
+ .fw_name = FIRMWARE_8107E_2,
+ };
+ const struct rtl_chip_info *p = rtl_chip_infos;
while ((xid & p->mask) != p->val)
p++;
- ver = p->ver;
- if (ver != RTL_GIGA_MAC_NONE && !gmii) {
- if (ver == RTL_GIGA_MAC_VER_42)
- ver = RTL_GIGA_MAC_VER_43;
- else if (ver == RTL_GIGA_MAC_VER_46)
- ver = RTL_GIGA_MAC_VER_48;
- }
+ if (p->mac_version == RTL_GIGA_MAC_VER_42 && !gmii)
+ return &rtl8106eus_info;
+ if (p->mac_version == RTL_GIGA_MAC_VER_46 && !gmii)
+ return &rtl8107e_info;
- return ver;
+ return p;
}
static void rtl_release_firmware(struct rtl8169_private *tp)
@@ -2440,26 +2388,6 @@ void r8169_apply_firmware(struct rtl8169_private *tp)
}
}
-static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
-{
- /* Adjust EEE LED frequency */
- if (tp->mac_version != RTL_GIGA_MAC_VER_38)
- RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
-
- rtl_eri_set_bits(tp, 0x1b0, 0x0003);
-}
-
-static void rtl8125a_config_eee_mac(struct rtl8169_private *tp)
-{
- r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0));
- r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1));
-}
-
-static void rtl8125b_config_eee_mac(struct rtl8169_private *tp)
-{
- r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0));
-}
-
static void rtl_rar_exgmac_set(struct rtl8169_private *tp, const u8 *addr)
{
rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, get_unaligned_le32(addr));
@@ -2555,13 +2483,13 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_38:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
case RTL_GIGA_MAC_VER_61:
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
break;
- case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_LAST:
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST |
RX_PAUSE_SLOT_ON);
break;
@@ -2576,86 +2504,31 @@ static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
}
-static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
-{
- RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
- RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1);
-}
-
-static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
-{
- RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
- RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1);
-}
-
-static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
-{
- RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
-}
-
-static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
-{
- RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
-}
-
-static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
-{
- RTL_W8(tp, MaxTxPacketSize, 0x24);
- RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
- RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01);
-}
-
-static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
-{
- RTL_W8(tp, MaxTxPacketSize, 0x3f);
- RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
- RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01);
-}
-
-static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
-{
- RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0));
-}
-
-static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
-{
- RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0));
-}
-
static void rtl_jumbo_config(struct rtl8169_private *tp)
{
bool jumbo = tp->dev->mtu > ETH_DATA_LEN;
int readrq = 4096;
+ if (jumbo && tp->mac_version >= RTL_GIGA_MAC_VER_17 &&
+ tp->mac_version <= RTL_GIGA_MAC_VER_26)
+ readrq = 512;
+
rtl_unlock_config_regs(tp);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_17:
- if (jumbo) {
- readrq = 512;
- r8168b_1_hw_jumbo_enable(tp);
- } else {
- r8168b_1_hw_jumbo_disable(tp);
- }
+ r8169_mod_reg8_cond(tp, Config4, BIT(0), jumbo);
break;
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
- if (jumbo) {
- readrq = 512;
- r8168c_hw_jumbo_enable(tp);
- } else {
- r8168c_hw_jumbo_disable(tp);
- }
+ r8169_mod_reg8_cond(tp, Config3, Jumbo_En0, jumbo);
+ r8169_mod_reg8_cond(tp, Config4, Jumbo_En1, jumbo);
break;
case RTL_GIGA_MAC_VER_28:
- if (jumbo)
- r8168dp_hw_jumbo_enable(tp);
- else
- r8168dp_hw_jumbo_disable(tp);
+ r8169_mod_reg8_cond(tp, Config3, Jumbo_En0, jumbo);
break;
case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
- if (jumbo)
- r8168e_hw_jumbo_enable(tp);
- else
- r8168e_hw_jumbo_disable(tp);
+ RTL_W8(tp, MaxTxPacketSize, jumbo ? 0x24 : 0x3f);
+ r8169_mod_reg8_cond(tp, Config3, Jumbo_En0, jumbo);
+ r8169_mod_reg8_cond(tp, Config4, BIT(0), jumbo);
break;
default:
break;
@@ -2741,14 +2614,14 @@ DECLARE_RTL_COND(rtl_rxtx_empty_cond_2)
static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_61:
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
- case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_LAST:
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42);
@@ -2909,10 +2782,45 @@ static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
RTL_R32(tp, CSIDR) : ~0;
}
+static void rtl_csi_mod(struct rtl8169_private *tp, int addr,
+ u32 mask, u32 set)
+{
+ u32 val;
+
+ WARN(addr % 4, "Invalid CSI address %#x\n", addr);
+
+ netdev_notice_once(tp->dev,
+ "No native access to PCI extended config space, falling back to CSI\n");
+
+ val = rtl_csi_read(tp, addr);
+ rtl_csi_write(tp, addr, (val & ~mask) | set);
+}
+
+static void rtl_disable_zrxdc_timeout(struct rtl8169_private *tp)
+{
+ struct pci_dev *pdev = tp->pci_dev;
+ int rc;
+ u8 val;
+
+#define RTL_GEN3_RELATED_OFF 0x0890
+#define RTL_GEN3_ZRXDC_NONCOMPL 0x1
+ if (pdev->cfg_size > RTL_GEN3_RELATED_OFF) {
+ rc = pci_read_config_byte(pdev, RTL_GEN3_RELATED_OFF, &val);
+ if (rc == PCIBIOS_SUCCESSFUL) {
+ val &= ~RTL_GEN3_ZRXDC_NONCOMPL;
+ rc = pci_write_config_byte(pdev, RTL_GEN3_RELATED_OFF,
+ val);
+ if (rc == PCIBIOS_SUCCESSFUL)
+ return;
+ }
+ }
+
+ rtl_csi_mod(tp, RTL_GEN3_RELATED_OFF, RTL_GEN3_ZRXDC_NONCOMPL, 0);
+}
+
static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val)
{
struct pci_dev *pdev = tp->pci_dev;
- u32 csi;
/* According to Realtek the value at config space address 0x070f
* controls the L0s/L1 entrance latency. We try standard ECAM access
@@ -2924,10 +2832,7 @@ static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val)
pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL)
return;
- netdev_notice_once(tp->dev,
- "No native access to PCI extended config space, falling back to CSI\n");
- csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
- rtl_csi_write(tp, 0x070c, csi | val << 24);
+ rtl_csi_mod(tp, 0x070c, 0xff000000, val << 24);
}
static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp)
@@ -2991,7 +2896,7 @@ static void rtl_enable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38:
rtl_eri_set_bits(tp, 0xd4, 0x0c00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST:
r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80);
break;
default:
@@ -3005,7 +2910,7 @@ static void rtl_disable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
rtl_eri_clear_bits(tp, 0xd4, 0x1f00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST:
r8168_mac_ocp_modify(tp, 0xc0ac, 0x1f80, 0);
break;
default:
@@ -3031,8 +2936,8 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
rtl_mod_config5(tp, 0, ASPM_en);
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_65:
- case RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_70:
+ case RTL_GIGA_MAC_VER_80:
val8 = RTL_R8(tp, INT_CFG0_8125) | INT_CFG0_CLKREQEN;
RTL_W8(tp, INT_CFG0_8125, val8);
break;
@@ -3043,7 +2948,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
/* reset ephy tx/rx disable timer */
r8168_mac_ocp_modify(tp, 0xe094, 0xff00, 0);
/* chip can trigger L1.2 */
@@ -3055,7 +2960,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
} else {
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, 0);
break;
default:
@@ -3063,8 +2968,8 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
}
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_65:
- case RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_70:
+ case RTL_GIGA_MAC_VER_80:
val8 = RTL_R8(tp, INT_CFG0_8125) & ~INT_CFG0_CLKREQEN;
RTL_W8(tp, INT_CFG0_8125, val8);
break;
@@ -3260,8 +3165,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
- rtl8168_config_eee_mac(tp);
-
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
rtl_mod_config5(tp, Spi_en, 0);
@@ -3286,8 +3189,6 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
rtl_mod_config5(tp, Spi_en, 0);
-
- rtl8168_config_eee_mac(tp);
}
static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
@@ -3337,8 +3238,6 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
- rtl8168_config_eee_mac(tp);
-
rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06);
rtl_eri_clear_bits(tp, 0x1b0, BIT(12));
@@ -3479,8 +3378,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
- rtl8168_config_eee_mac(tp);
-
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
@@ -3499,7 +3396,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini);
}
- r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070);
+ r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0000);
r8168_mac_ocp_modify(tp, 0xe052, 0x6000, 0x8008);
r8168_mac_ocp_modify(tp, 0xe0d6, 0x01ff, 0x017f);
r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f);
@@ -3528,8 +3425,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
- rtl8168_config_eee_mac(tp);
-
rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
@@ -3585,8 +3480,6 @@ static void rtl_hw_start_8117(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
- rtl8168_config_eee_mac(tp);
-
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
@@ -3604,7 +3497,7 @@ static void rtl_hw_start_8117(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini);
}
- r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070);
+ r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0000);
r8168_mac_ocp_write(tp, 0xea80, 0x0003);
r8168_mac_ocp_modify(tp, 0xe052, 0x0000, 0x0009);
r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f);
@@ -3766,8 +3659,8 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
rtl_pcie_state_l2l3_disable(tp);
RTL_W16(tp, 0x382, 0x221b);
- RTL_W8(tp, 0x4500, 0);
- RTL_W16(tp, 0x4800, 0);
+ RTL_W32(tp, RSS_CTRL_8125, 0);
+ RTL_W16(tp, Q_NUM_CTRL_8125, 0);
/* disable UPS */
r8168_mac_ocp_modify(tp, 0xd40a, 0x0010, 0x0000);
@@ -3784,12 +3677,13 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
/* disable new tx descriptor format */
r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000);
- if (tp->mac_version == RTL_GIGA_MAC_VER_65 ||
- tp->mac_version == RTL_GIGA_MAC_VER_66)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_70 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_80)
RTL_W8(tp, 0xD8, RTL_R8(tp, 0xD8) & ~0x02);
- if (tp->mac_version == RTL_GIGA_MAC_VER_65 ||
- tp->mac_version == RTL_GIGA_MAC_VER_66)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_80)
+ r8168_mac_ocp_modify(tp, 0xe614, 0x0f00, 0x0f00);
+ else if (tp->mac_version == RTL_GIGA_MAC_VER_70)
r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400);
else if (tp->mac_version == RTL_GIGA_MAC_VER_63)
r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200);
@@ -3804,11 +3698,11 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xc0b4, 0x0000, 0x000c);
r8168_mac_ocp_modify(tp, 0xeb6a, 0x00ff, 0x0033);
r8168_mac_ocp_modify(tp, 0xeb50, 0x03e0, 0x0040);
- r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030);
+ r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0000);
r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000);
r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001);
- if (tp->mac_version == RTL_GIGA_MAC_VER_65 ||
- tp->mac_version == RTL_GIGA_MAC_VER_66)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_70 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_80)
r8168_mac_ocp_modify(tp, 0xea1c, 0x0300, 0x0000);
else
r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000);
@@ -3826,11 +3720,6 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10);
- if (tp->mac_version == RTL_GIGA_MAC_VER_61)
- rtl8125a_config_eee_mac(tp);
- else
- rtl8125b_config_eee_mac(tp);
-
rtl_disable_rxdvgate(tp);
}
@@ -3881,6 +3770,13 @@ static void rtl_hw_start_8125d(struct rtl8169_private *tp)
static void rtl_hw_start_8126a(struct rtl8169_private *tp)
{
+ rtl_disable_zrxdc_timeout(tp);
+ rtl_set_def_aspm_entry_latency(tp);
+ rtl_hw_start_8125_common(tp);
+}
+
+static void rtl_hw_start_8127a(struct rtl8169_private *tp)
+{
rtl_set_def_aspm_entry_latency(tp);
rtl_hw_start_8125_common(tp);
}
@@ -3892,7 +3788,6 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3,
[RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2,
[RTL_GIGA_MAC_VER_10] = NULL,
- [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b,
[RTL_GIGA_MAC_VER_14] = rtl_hw_start_8401,
[RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b,
[RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1,
@@ -3924,12 +3819,12 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_48] = rtl_hw_start_8168h_1,
[RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3,
[RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117,
- [RTL_GIGA_MAC_VER_53] = rtl_hw_start_8117,
[RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2,
[RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b,
[RTL_GIGA_MAC_VER_64] = rtl_hw_start_8125d,
- [RTL_GIGA_MAC_VER_65] = rtl_hw_start_8126a,
- [RTL_GIGA_MAC_VER_66] = rtl_hw_start_8126a,
+ [RTL_GIGA_MAC_VER_66] = rtl_hw_start_8125d,
+ [RTL_GIGA_MAC_VER_70] = rtl_hw_start_8126a,
+ [RTL_GIGA_MAC_VER_80] = rtl_hw_start_8127a,
};
if (hw_configs[tp->mac_version])
@@ -3946,12 +3841,15 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_61:
case RTL_GIGA_MAC_VER_64:
+ case RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_80:
for (i = 0xa00; i < 0xb00; i += 4)
RTL_W32(tp, i, 0);
+ if (tp->mac_version == RTL_GIGA_MAC_VER_80)
+ RTL_W16(tp, INT_CFG1_8125, 0x0000);
break;
case RTL_GIGA_MAC_VER_63:
- case RTL_GIGA_MAC_VER_65:
- case RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_70:
for (i = 0xa00; i < 0xa80; i += 4)
RTL_W32(tp, i, 0);
RTL_W16(tp, INT_CFG1_8125, 0x0000);
@@ -4183,7 +4081,7 @@ static void rtl8169_cleanup(struct rtl8169_private *tp)
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST:
rtl_enable_rxdvgate(tp);
fsleep(2000);
break;
@@ -4340,7 +4238,7 @@ static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
padto = max_t(unsigned int, padto, ETH_ZLEN);
break;
default:
@@ -4768,12 +4666,6 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
if (status & LinkChg)
phy_mac_interrupt(tp->phydev);
- if (unlikely(status & RxFIFOOver &&
- tp->mac_version == RTL_GIGA_MAC_VER_11)) {
- netif_stop_queue(tp->dev);
- rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
- }
-
rtl_irq_disable(tp);
napi_schedule(&tp->napi);
out:
@@ -4811,8 +4703,6 @@ static void rtl_task(struct work_struct *work)
reset:
rtl_reset_work(tp);
netif_wake_queue(tp->dev);
- } else if (test_and_clear_bit(RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, tp->wk.flags)) {
- rtl_reset_work(tp);
}
}
@@ -4832,6 +4722,41 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
return work_done;
}
+static void rtl_enable_tx_lpi(struct rtl8169_private *tp, bool enable)
+{
+ if (!rtl_supports_eee(tp))
+ return;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_52:
+ /* Adjust EEE LED frequency */
+ if (tp->mac_version != RTL_GIGA_MAC_VER_38)
+ RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
+ if (enable)
+ rtl_eri_set_bits(tp, 0x1b0, 0x0003);
+ else
+ rtl_eri_clear_bits(tp, 0x1b0, 0x0003);
+ break;
+ case RTL_GIGA_MAC_VER_61:
+ if (enable) {
+ r8168_mac_ocp_modify(tp, 0xe040, 0, 0x0003);
+ r8168_mac_ocp_modify(tp, 0xeb62, 0, 0x0006);
+ } else {
+ r8168_mac_ocp_modify(tp, 0xe040, 0x0003, 0);
+ r8168_mac_ocp_modify(tp, 0xeb62, 0x0006, 0);
+ }
+ break;
+ case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_LAST:
+ if (enable)
+ r8168_mac_ocp_modify(tp, 0xe040, 0, 0x0003);
+ else
+ r8168_mac_ocp_modify(tp, 0xe040, 0x0003, 0);
+ break;
+ default:
+ break;
+ }
+}
+
static void r8169_phylink_handler(struct net_device *ndev)
{
struct rtl8169_private *tp = netdev_priv(ndev);
@@ -4839,6 +4764,7 @@ static void r8169_phylink_handler(struct net_device *ndev)
if (netif_carrier_ok(ndev)) {
rtl_link_chg_patch(tp);
+ rtl_enable_tx_lpi(tp, tp->phydev->enable_tx_lpi);
pm_request_resume(d);
} else {
pm_runtime_idle(d);
@@ -5082,9 +5008,8 @@ static int rtl8169_resume(struct device *device)
if (!device_may_wakeup(tp_to_dev(tp)))
clk_prepare_enable(tp->clk);
- /* Reportedly at least Asus X453MA truncates packets otherwise */
- if (tp->mac_version == RTL_GIGA_MAC_VER_37)
- rtl_init_rxcfg(tp);
+ /* Some chip versions may truncate packets without this initialization */
+ rtl_init_rxcfg(tp);
return rtl8169_runtime_resume(device);
}
@@ -5136,10 +5061,8 @@ static void rtl_shutdown(struct pci_dev *pdev)
/* Restore original MAC address */
rtl_rar_set(tp, tp->dev->perm_addr);
- if (system_state == SYSTEM_POWER_OFF && !tp->dash_enabled) {
- pci_wake_from_d3(pdev, tp->saved_wolopts);
- pci_set_power_state(pdev, PCI_D3hot);
- }
+ if (system_state == SYSTEM_POWER_OFF && !tp->dash_enabled)
+ pci_prepare_to_sleep(pdev);
}
static void rtl_remove_one(struct pci_dev *pdev)
@@ -5191,9 +5114,6 @@ static void rtl_set_irq_mask(struct rtl8169_private *tp)
if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
tp->irq_mask |= SYSErr | RxFIFOOver;
- else if (tp->mac_version == RTL_GIGA_MAC_VER_11)
- /* special workaround needed */
- tp->irq_mask |= RxFIFOOver;
}
static int rtl_alloc_irq(struct rtl8169_private *tp)
@@ -5266,6 +5186,33 @@ static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr,
return 0;
}
+static int r8169_mdio_read_reg_c45(struct mii_bus *mii_bus, int addr,
+ int devnum, int regnum)
+{
+ struct rtl8169_private *tp = mii_bus->priv;
+
+ if (addr > 0)
+ return -ENODEV;
+
+ if (devnum == MDIO_MMD_VEND2 && regnum > MDIO_STAT2)
+ return r8168_phy_ocp_read(tp, regnum);
+
+ return 0;
+}
+
+static int r8169_mdio_write_reg_c45(struct mii_bus *mii_bus, int addr,
+ int devnum, int regnum, u16 val)
+{
+ struct rtl8169_private *tp = mii_bus->priv;
+
+ if (addr > 0 || devnum != MDIO_MMD_VEND2 || regnum <= MDIO_STAT2)
+ return -ENODEV;
+
+ r8168_phy_ocp_write(tp, regnum, val);
+
+ return 0;
+}
+
static int r8169_mdio_register(struct rtl8169_private *tp)
{
struct pci_dev *pdev = tp->pci_dev;
@@ -5289,12 +5236,18 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
new_bus->priv = tp;
new_bus->parent = &pdev->dev;
new_bus->irq[0] = PHY_MAC_INTERRUPT;
+ new_bus->phy_mask = GENMASK(31, 1);
snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x",
pci_domain_nr(pdev->bus), pci_dev_id(pdev));
new_bus->read = r8169_mdio_read_reg;
new_bus->write = r8169_mdio_write_reg;
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_40) {
+ new_bus->read_c45 = r8169_mdio_read_reg_c45;
+ new_bus->write_c45 = r8169_mdio_write_reg_c45;
+ }
+
ret = devm_mdiobus_register(&pdev->dev, new_bus);
if (ret)
return ret;
@@ -5316,6 +5269,11 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
phy_support_eee(tp->phydev);
phy_support_asym_pause(tp->phydev);
+ /* mimic behavior of r8125/r8126 vendor drivers */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_61)
+ phy_disable_eee_mode(tp->phydev,
+ ETHTOOL_LINK_MODE_2500baseT_Full_BIT);
+
/* PHY will be woken up in rtl_open() */
phy_suspend(tp->phydev);
@@ -5357,13 +5315,13 @@ static void rtl_hw_init_8125(struct rtl8169_private *tp)
static void rtl_hw_initialize(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
+ case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_52:
rtl8168ep_stop_cmac(tp);
fallthrough;
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
rtl_hw_init_8168g(tp);
break;
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
rtl_hw_init_8125(tp);
break;
default:
@@ -5382,12 +5340,14 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
return JUMBO_7K;
/* RTL8168b */
- case RTL_GIGA_MAC_VER_11:
case RTL_GIGA_MAC_VER_17:
return JUMBO_4K;
/* RTL8168c */
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
return JUMBO_6K;
+ /* RTL8125/8126 */
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
+ return JUMBO_16K;
default:
return JUMBO_9K;
}
@@ -5422,55 +5382,18 @@ done:
/* register is set if system vendor successfully tested ASPM 1.2 */
static bool rtl_aspm_is_safe(struct rtl8169_private *tp)
{
- if (tp->mac_version >= RTL_GIGA_MAC_VER_61 &&
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_46 &&
r8168_mac_ocp_read(tp, 0xc0b2) & 0xf)
return true;
return false;
}
-static umode_t r8169_hwmon_is_visible(const void *drvdata,
- enum hwmon_sensor_types type,
- u32 attr, int channel)
-{
- return 0444;
-}
-
-static int r8169_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long *val)
-{
- struct rtl8169_private *tp = dev_get_drvdata(dev);
- int val_raw;
-
- val_raw = phy_read_paged(tp->phydev, 0xbd8, 0x12) & 0x3ff;
- if (val_raw >= 512)
- val_raw -= 1024;
-
- *val = 1000 * val_raw / 2;
-
- return 0;
-}
-
-static const struct hwmon_ops r8169_hwmon_ops = {
- .is_visible = r8169_hwmon_is_visible,
- .read = r8169_hwmon_read,
-};
-
-static const struct hwmon_channel_info * const r8169_hwmon_info[] = {
- HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
- NULL
-};
-
-static const struct hwmon_chip_info r8169_hwmon_chip_info = {
- .ops = &r8169_hwmon_ops,
- .info = r8169_hwmon_info,
-};
-
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ const struct rtl_chip_info *chip;
struct rtl8169_private *tp;
int jumbo_max, region, rc;
- enum mac_version chipset;
struct net_device *dev;
u32 txconfig;
u16 xid;
@@ -5487,8 +5410,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1;
tp->ocp_base = OCP_STD_PHY_BASE;
- raw_spin_lock_init(&tp->cfg9346_usage_lock);
- raw_spin_lock_init(&tp->config25_lock);
raw_spin_lock_init(&tp->mac_ocp_lock);
mutex_init(&tp->led_lock);
@@ -5510,11 +5431,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (region < 0)
return dev_err_probe(&pdev->dev, -ENODEV, "no MMIO resource found\n");
- rc = pcim_iomap_regions(pdev, BIT(region), KBUILD_MODNAME);
- if (rc < 0)
- return dev_err_probe(&pdev->dev, rc, "cannot remap MMIO, aborting\n");
-
- tp->mmio_addr = pcim_iomap_table(pdev)[region];
+ tp->mmio_addr = pcim_iomap_region(pdev, region, KBUILD_MODNAME);
+ if (IS_ERR(tp->mmio_addr))
+ return dev_err_probe(&pdev->dev, PTR_ERR(tp->mmio_addr),
+ "cannot remap MMIO, aborting\n");
txconfig = RTL_R32(tp, TxConfig);
if (txconfig == ~0U)
@@ -5523,22 +5443,34 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
xid = (txconfig >> 20) & 0xfcf;
/* Identify chip attached to board */
- chipset = rtl8169_get_mac_version(xid, tp->supports_gmii);
- if (chipset == RTL_GIGA_MAC_NONE)
+ chip = rtl8169_get_chip_version(xid, tp->supports_gmii);
+ if (chip->mac_version == RTL_GIGA_MAC_NONE)
return dev_err_probe(&pdev->dev, -ENODEV,
"unknown chip XID %03x, contact r8169 maintainers (see MAINTAINERS file)\n",
xid);
- tp->mac_version = chipset;
+ tp->mac_version = chip->mac_version;
+ tp->fw_name = chip->fw_name;
/* Disable ASPM L1 as that cause random device stop working
* problems as well as full system hangs for some PCIe devices users.
*/
- if (rtl_aspm_is_safe(tp))
+ if (rtl_aspm_is_safe(tp)) {
+ dev_info(&pdev->dev, "System vendor flags ASPM as safe\n");
rc = 0;
- else
+ } else {
rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
+ }
tp->aspm_manageable = !rc;
+ /* Fiber mode on RTL8127AF isn't supported */
+ if (rtl_is_8125(tp)) {
+ u16 data = r8168_mac_ocp_read(tp, 0xd006);
+
+ if ((data & 0xff) == 0x07)
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "Fiber mode not supported\n");
+ }
+
tp->dash_type = rtl_get_dash_type(tp);
tp->dash_enabled = rtl_dash_is_enabled(tp);
@@ -5633,8 +5565,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl_set_irq_mask(tp);
- tp->fw_name = rtl_chip_infos[chipset].fw_name;
-
tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
&tp->counters_phys_addr,
GFP_KERNEL);
@@ -5647,12 +5577,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- /* The temperature sensor is available from RTl8125B */
- if (IS_REACHABLE(CONFIG_HWMON) && tp->mac_version >= RTL_GIGA_MAC_VER_63)
- /* ignore errors */
- devm_hwmon_device_register_with_info(&pdev->dev, "nic_temp", tp,
- &r8169_hwmon_chip_info,
- NULL);
rc = register_netdev(dev);
if (rc)
return rc;
@@ -5665,7 +5589,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n",
- rtl_chip_infos[chipset].name, dev->dev_addr, xid, tp->irq);
+ chip->name, dev->dev_addr, xid, tp->irq);
if (jumbo_max)
netdev_info(dev, "jumbo features [frames: %d bytes, tx checksumming: %s]\n",
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
index 1d5b33f6c4b5..032d9d2cfa2a 100644
--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -50,6 +50,15 @@ static void r8168g_phy_param(struct phy_device *phydev, u16 parm,
phy_restore_page(phydev, oldpage, 0);
}
+static void rtl8125_phy_param(struct phy_device *phydev, u16 parm,
+ u16 mask, u16 val)
+{
+ phy_lock_mdio_bus(phydev);
+ __phy_write_mmd(phydev, MDIO_MMD_VEND2, 0xb87c, parm);
+ __phy_modify_mmd(phydev, MDIO_MMD_VEND2, 0xb87e, mask, val);
+ phy_unlock_mdio_bus(phydev);
+}
+
struct phy_reg {
u16 reg;
u16 val;
@@ -96,15 +105,7 @@ static void rtl8125_common_config_eee_phy(struct phy_device *phydev)
phy_modify_paged(phydev, 0xa4a, 0x11, 0x0200, 0x0000);
}
-static void rtl8125a_config_eee_phy(struct phy_device *phydev)
-{
- rtl8168g_config_eee_phy(phydev);
- /* disable EEE at 2.5Gbps */
- phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000);
- rtl8125_common_config_eee_phy(phydev);
-}
-
-static void rtl8125b_config_eee_phy(struct phy_device *phydev)
+static void rtl8125_config_eee_phy(struct phy_device *phydev)
{
rtl8168g_config_eee_phy(phydev);
rtl8125_common_config_eee_phy(phydev);
@@ -284,15 +285,6 @@ static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp,
rtl_writephy_batch(phydev, phy_reg_init);
}
-static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp,
- struct phy_device *phydev)
-{
- phy_write(phydev, 0x1f, 0x0001);
- phy_set_bits(phydev, 0x16, BIT(0));
- phy_write(phydev, 0x10, 0xf41b);
- phy_write(phydev, 0x1f, 0x0000);
-}
-
static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp,
struct phy_device *phydev)
{
@@ -1021,12 +1013,8 @@ static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp,
phy_write_paged(phydev, 0xac5, 0x16, 0x01ff);
phy_modify_paged(phydev, 0xac8, 0x15, 0x00f0, 0x0030);
- phy_write(phydev, 0x1f, 0x0b87);
- phy_write(phydev, 0x16, 0x80a2);
- phy_write(phydev, 0x17, 0x0153);
- phy_write(phydev, 0x16, 0x809c);
- phy_write(phydev, 0x17, 0x0153);
- phy_write(phydev, 0x1f, 0x0000);
+ rtl8125_phy_param(phydev, 0x80a2, 0xffff, 0x0153);
+ rtl8125_phy_param(phydev, 0x809c, 0xffff, 0x0153);
phy_write(phydev, 0x1f, 0x0a43);
phy_write(phydev, 0x13, 0x81B3);
@@ -1066,7 +1054,7 @@ static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp,
rtl8168g_enable_gphy_10m(phydev);
rtl8168g_disable_aldps(phydev);
- rtl8125a_config_eee_phy(phydev);
+ rtl8125_config_eee_phy(phydev);
}
static void rtl8125b_hw_phy_config(struct rtl8169_private *tp,
@@ -1078,14 +1066,9 @@ static void rtl8125b_hw_phy_config(struct rtl8169_private *tp,
phy_modify_paged(phydev, 0xac4, 0x13, 0x00f0, 0x0090);
phy_modify_paged(phydev, 0xad3, 0x10, 0x0003, 0x0001);
- phy_write(phydev, 0x1f, 0x0b87);
- phy_write(phydev, 0x16, 0x80f5);
- phy_write(phydev, 0x17, 0x760e);
- phy_write(phydev, 0x16, 0x8107);
- phy_write(phydev, 0x17, 0x360e);
- phy_write(phydev, 0x16, 0x8551);
- phy_modify(phydev, 0x17, 0xff00, 0x0800);
- phy_write(phydev, 0x1f, 0x0000);
+ rtl8125_phy_param(phydev, 0x80f5, 0xffff, 0x760e);
+ rtl8125_phy_param(phydev, 0x8107, 0xffff, 0x360e);
+ rtl8125_phy_param(phydev, 0x8551, 0xff00, 0x0800);
phy_modify_paged(phydev, 0xbf0, 0x10, 0xe000, 0xa000);
phy_modify_paged(phydev, 0xbf4, 0x13, 0x0f00, 0x0300);
@@ -1106,7 +1089,7 @@ static void rtl8125b_hw_phy_config(struct rtl8169_private *tp,
rtl8125_legacy_force_mode(phydev);
rtl8168g_disable_aldps(phydev);
- rtl8125b_config_eee_phy(phydev);
+ rtl8125_config_eee_phy(phydev);
}
static void rtl8125d_hw_phy_config(struct rtl8169_private *tp,
@@ -1116,7 +1099,25 @@ static void rtl8125d_hw_phy_config(struct rtl8169_private *tp,
rtl8168g_enable_gphy_10m(phydev);
rtl8125_legacy_force_mode(phydev);
rtl8168g_disable_aldps(phydev);
- rtl8125b_config_eee_phy(phydev);
+ rtl8125_config_eee_phy(phydev);
+}
+
+static void rtl8125bp_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+ rtl8168g_enable_gphy_10m(phydev);
+
+ r8168g_phy_param(phydev, 0x8010, 0x0800, 0x0000);
+
+ rtl8125_phy_param(phydev, 0x8088, 0xff00, 0x9000);
+ rtl8125_phy_param(phydev, 0x808f, 0xff00, 0x9000);
+
+ r8168g_phy_param(phydev, 0x8174, 0x2000, 0x1800);
+
+ rtl8125_legacy_force_mode(phydev);
+ rtl8168g_disable_aldps(phydev);
+ rtl8125_config_eee_phy(phydev);
}
static void rtl8126a_hw_phy_config(struct rtl8169_private *tp,
@@ -1129,6 +1130,171 @@ static void rtl8126a_hw_phy_config(struct rtl8169_private *tp,
rtl8125_common_config_eee_phy(phydev);
}
+static void rtl8127a_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+ rtl8168g_enable_gphy_10m(phydev);
+
+ r8168g_phy_param(phydev, 0x8415, 0xff00, 0x9300);
+ r8168g_phy_param(phydev, 0x81a3, 0xff00, 0x0f00);
+ r8168g_phy_param(phydev, 0x81ae, 0xff00, 0x0f00);
+ r8168g_phy_param(phydev, 0x81b9, 0xff00, 0xb900);
+ rtl8125_phy_param(phydev, 0x83b0, 0x0e00, 0x0000);
+ rtl8125_phy_param(phydev, 0x83C5, 0x0e00, 0x0000);
+ rtl8125_phy_param(phydev, 0x83da, 0x0e00, 0x0000);
+ rtl8125_phy_param(phydev, 0x83ef, 0x0e00, 0x0000);
+ phy_modify_paged(phydev, 0x0bf3, 0x14, 0x01f0, 0x0160);
+ phy_modify_paged(phydev, 0x0bf3, 0x15, 0x001f, 0x0014);
+ phy_modify_paged(phydev, 0x0bf2, 0x14, 0x6000, 0x0000);
+ phy_modify_paged(phydev, 0x0bf2, 0x16, 0xc000, 0x0000);
+ phy_modify_paged(phydev, 0x0bf2, 0x14, 0x1fff, 0x0187);
+ phy_modify_paged(phydev, 0x0bf2, 0x15, 0x003f, 0x0003);
+
+ r8168g_phy_param(phydev, 0x8173, 0xffff, 0x8620);
+ r8168g_phy_param(phydev, 0x8175, 0xffff, 0x8671);
+ r8168g_phy_param(phydev, 0x817c, 0x0000, 0x2000);
+ r8168g_phy_param(phydev, 0x8187, 0x0000, 0x2000);
+ r8168g_phy_param(phydev, 0x8192, 0x0000, 0x2000);
+ r8168g_phy_param(phydev, 0x819d, 0x0000, 0x2000);
+ r8168g_phy_param(phydev, 0x81a8, 0x2000, 0x0000);
+ r8168g_phy_param(phydev, 0x81b3, 0x2000, 0x0000);
+ r8168g_phy_param(phydev, 0x81be, 0x0000, 0x2000);
+ r8168g_phy_param(phydev, 0x817d, 0xff00, 0xa600);
+ r8168g_phy_param(phydev, 0x8188, 0xff00, 0xa600);
+ r8168g_phy_param(phydev, 0x8193, 0xff00, 0xa600);
+ r8168g_phy_param(phydev, 0x819e, 0xff00, 0xa600);
+ r8168g_phy_param(phydev, 0x81a9, 0xff00, 0x1400);
+ r8168g_phy_param(phydev, 0x81b4, 0xff00, 0x1400);
+ r8168g_phy_param(phydev, 0x81bf, 0xff00, 0xa600);
+
+ phy_modify_paged(phydev, 0x0aea, 0x15, 0x0028, 0x0000);
+
+ rtl8125_phy_param(phydev, 0x84f0, 0xffff, 0x201c);
+ rtl8125_phy_param(phydev, 0x84f2, 0xffff, 0x3117);
+
+ phy_write_paged(phydev, 0x0aec, 0x13, 0x0000);
+ phy_write_paged(phydev, 0x0ae2, 0x10, 0xffff);
+ phy_write_paged(phydev, 0x0aec, 0x17, 0xffff);
+ phy_write_paged(phydev, 0x0aed, 0x11, 0xffff);
+ phy_write_paged(phydev, 0x0aec, 0x14, 0x0000);
+ phy_modify_paged(phydev, 0x0aed, 0x10, 0x0001, 0x0000);
+ phy_write_paged(phydev, 0x0adb, 0x14, 0x0150);
+ rtl8125_phy_param(phydev, 0x8197, 0xff00, 0x5000);
+ rtl8125_phy_param(phydev, 0x8231, 0xff00, 0x5000);
+ rtl8125_phy_param(phydev, 0x82cb, 0xff00, 0x5000);
+ rtl8125_phy_param(phydev, 0x82cd, 0xff00, 0x5700);
+ rtl8125_phy_param(phydev, 0x8233, 0xff00, 0x5700);
+ rtl8125_phy_param(phydev, 0x8199, 0xff00, 0x5700);
+
+ rtl8125_phy_param(phydev, 0x815a, 0xffff, 0x0150);
+ rtl8125_phy_param(phydev, 0x81f4, 0xffff, 0x0150);
+ rtl8125_phy_param(phydev, 0x828e, 0xffff, 0x0150);
+ rtl8125_phy_param(phydev, 0x81b1, 0xffff, 0x0000);
+ rtl8125_phy_param(phydev, 0x824b, 0xffff, 0x0000);
+ rtl8125_phy_param(phydev, 0x82e5, 0xffff, 0x0000);
+
+ rtl8125_phy_param(phydev, 0x84f7, 0xff00, 0x2800);
+ phy_modify_paged(phydev, 0x0aec, 0x11, 0x0000, 0x1000);
+ rtl8125_phy_param(phydev, 0x81b3, 0xff00, 0xad00);
+ rtl8125_phy_param(phydev, 0x824d, 0xff00, 0xad00);
+ rtl8125_phy_param(phydev, 0x82e7, 0xff00, 0xad00);
+ phy_modify_paged(phydev, 0x0ae4, 0x17, 0x000f, 0x0001);
+ rtl8125_phy_param(phydev, 0x82ce, 0xf000, 0x4000);
+
+ rtl8125_phy_param(phydev, 0x84ac, 0xffff, 0x0000);
+ rtl8125_phy_param(phydev, 0x84ae, 0xffff, 0x0000);
+ rtl8125_phy_param(phydev, 0x84b0, 0xffff, 0xf818);
+ rtl8125_phy_param(phydev, 0x84b2, 0xff00, 0x6000);
+
+ rtl8125_phy_param(phydev, 0x8ffc, 0xffff, 0x6008);
+ rtl8125_phy_param(phydev, 0x8ffe, 0xffff, 0xf450);
+
+ rtl8125_phy_param(phydev, 0x8015, 0x0000, 0x0200);
+ rtl8125_phy_param(phydev, 0x8016, 0x0800, 0x0000);
+ rtl8125_phy_param(phydev, 0x8fe6, 0xff00, 0x0800);
+ rtl8125_phy_param(phydev, 0x8fe4, 0xffff, 0x2114);
+
+ rtl8125_phy_param(phydev, 0x8647, 0xffff, 0xa7b1);
+ rtl8125_phy_param(phydev, 0x8649, 0xffff, 0xbbca);
+ rtl8125_phy_param(phydev, 0x864b, 0xff00, 0xdc00);
+
+ rtl8125_phy_param(phydev, 0x8154, 0xc000, 0x4000);
+ rtl8125_phy_param(phydev, 0x8158, 0xc000, 0x0000);
+
+ rtl8125_phy_param(phydev, 0x826c, 0xffff, 0xffff);
+ rtl8125_phy_param(phydev, 0x826e, 0xffff, 0xffff);
+
+ rtl8125_phy_param(phydev, 0x8872, 0xff00, 0x0e00);
+ r8168g_phy_param(phydev, 0x8012, 0x0000, 0x0800);
+ r8168g_phy_param(phydev, 0x8012, 0x0000, 0x4000);
+ phy_modify_paged(phydev, 0x0b57, 0x13, 0x0000, 0x0001);
+ r8168g_phy_param(phydev, 0x834a, 0xff00, 0x0700);
+ rtl8125_phy_param(phydev, 0x8217, 0x3f00, 0x2a00);
+ r8168g_phy_param(phydev, 0x81b1, 0xff00, 0x0b00);
+ rtl8125_phy_param(phydev, 0x8fed, 0xff00, 0x4e00);
+
+ rtl8125_phy_param(phydev, 0x88ac, 0xff00, 0x2300);
+ phy_modify_paged(phydev, 0x0bf0, 0x16, 0x0000, 0x3800);
+ rtl8125_phy_param(phydev, 0x88de, 0xff00, 0x0000);
+ rtl8125_phy_param(phydev, 0x80b4, 0xffff, 0x5195);
+
+ r8168g_phy_param(phydev, 0x8370, 0xffff, 0x8671);
+ r8168g_phy_param(phydev, 0x8372, 0xffff, 0x86c8);
+
+ r8168g_phy_param(phydev, 0x8401, 0xffff, 0x86c8);
+ r8168g_phy_param(phydev, 0x8403, 0xffff, 0x86da);
+ r8168g_phy_param(phydev, 0x8406, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x8408, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x840a, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x840c, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x840e, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x8410, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x8412, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x8414, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x8416, 0x1800, 0x1000);
+
+ r8168g_phy_param(phydev, 0x82bd, 0xffff, 0x1f40);
+
+ phy_modify_paged(phydev, 0x0bfb, 0x12, 0x07ff, 0x0328);
+ phy_write_paged(phydev, 0x0bfb, 0x13, 0x3e14);
+
+ r8168g_phy_param(phydev, 0x81c4, 0xffff, 0x003b);
+ r8168g_phy_param(phydev, 0x81c6, 0xffff, 0x0086);
+ r8168g_phy_param(phydev, 0x81c8, 0xffff, 0x00b7);
+ r8168g_phy_param(phydev, 0x81ca, 0xffff, 0x00db);
+ r8168g_phy_param(phydev, 0x81cc, 0xffff, 0x00fe);
+ r8168g_phy_param(phydev, 0x81ce, 0xffff, 0x00fe);
+ r8168g_phy_param(phydev, 0x81d0, 0xffff, 0x00fe);
+ r8168g_phy_param(phydev, 0x81d2, 0xffff, 0x00fe);
+ r8168g_phy_param(phydev, 0x81d4, 0xffff, 0x00c3);
+ r8168g_phy_param(phydev, 0x81d6, 0xffff, 0x0078);
+ r8168g_phy_param(phydev, 0x81d8, 0xffff, 0x0047);
+ r8168g_phy_param(phydev, 0x81da, 0xffff, 0x0023);
+
+ rtl8125_phy_param(phydev, 0x88d7, 0xffff, 0x01a0);
+ rtl8125_phy_param(phydev, 0x88d9, 0xffff, 0x01a0);
+ rtl8125_phy_param(phydev, 0x8ffa, 0xffff, 0x002a);
+
+ rtl8125_phy_param(phydev, 0x8fee, 0xffff, 0xffdf);
+ rtl8125_phy_param(phydev, 0x8ff0, 0xffff, 0xffff);
+ rtl8125_phy_param(phydev, 0x8ff2, 0xffff, 0x0a4a);
+ rtl8125_phy_param(phydev, 0x8ff4, 0xffff, 0xaa5a);
+ rtl8125_phy_param(phydev, 0x8ff6, 0xffff, 0x0a4a);
+
+ rtl8125_phy_param(phydev, 0x8ff8, 0xffff, 0xaa5a);
+ rtl8125_phy_param(phydev, 0x88d5, 0xff00, 0x0200);
+
+ r8168g_phy_param(phydev, 0x84bb, 0xff00, 0x0a00);
+ r8168g_phy_param(phydev, 0x84c0, 0xff00, 0x1600);
+
+ phy_modify_paged(phydev, 0x0a43, 0x10, 0x0000, 0x0003);
+
+ rtl8125_legacy_force_mode(phydev);
+ rtl8168g_disable_aldps(phydev);
+ rtl8125_common_config_eee_phy(phydev);
+}
+
void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
enum mac_version ver)
{
@@ -1144,7 +1310,6 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config,
[RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config,
[RTL_GIGA_MAC_VER_10] = NULL,
- [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config,
[RTL_GIGA_MAC_VER_14] = rtl8401_hw_phy_config,
[RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config,
[RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config,
@@ -1176,12 +1341,12 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config,
[RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config,
[RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config,
- [RTL_GIGA_MAC_VER_53] = rtl8117_hw_phy_config,
[RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config,
[RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config,
[RTL_GIGA_MAC_VER_64] = rtl8125d_hw_phy_config,
- [RTL_GIGA_MAC_VER_65] = rtl8126a_hw_phy_config,
- [RTL_GIGA_MAC_VER_66] = rtl8126a_hw_phy_config,
+ [RTL_GIGA_MAC_VER_66] = rtl8125bp_hw_phy_config,
+ [RTL_GIGA_MAC_VER_70] = rtl8126a_hw_phy_config,
+ [RTL_GIGA_MAC_VER_80] = rtl8127a_1_hw_phy_config,
};
if (phy_configs[ver])
diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h
index 583c33930f88..b9209eb6ea73 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase.h
+++ b/drivers/net/ethernet/realtek/rtase/rtase.h
@@ -9,7 +9,11 @@
#ifndef RTASE_H
#define RTASE_H
-#define RTASE_HW_VER_MASK 0x7C800000
+#define RTASE_HW_VER_MASK 0x7C800000
+#define RTASE_HW_VER_906X_7XA 0x00800000
+#define RTASE_HW_VER_906X_7XC 0x04000000
+#define RTASE_HW_VER_907XD_V1 0x04800000
+#define RTASE_HW_VER_907XD_VA 0x08000000
#define RTASE_RX_DMA_BURST_256 4
#define RTASE_TX_DMA_BURST_UNLIMITED 7
@@ -166,11 +170,12 @@ enum rtase_registers {
#define RTASE_TC_MODE_MASK GENMASK(11, 10)
RTASE_TOKSEL = 0x2046,
+ RTASE_TXQCRDT_0 = 0x2500,
RTASE_RFIFONFULL = 0x4406,
RTASE_INT_MITI_TX = 0x0A00,
RTASE_INT_MITI_RX = 0x0A80,
- RTASE_VLAN_ENTRY_0 = 0xAC80,
+ RTASE_VLAN_ENTRY_0 = 0xAC80,
};
enum rtase_desc_status_bit {
@@ -236,7 +241,7 @@ union rtase_rx_desc {
#define RTASE_RX_RES BIT(20)
#define RTASE_RX_RUNT BIT(19)
#define RTASE_RX_RWT BIT(18)
-#define RTASE_RX_CRC BIT(16)
+#define RTASE_RX_CRC BIT(17)
#define RTASE_RX_V6F BIT(31)
#define RTASE_RX_V4F BIT(30)
#define RTASE_RX_UDPT BIT(29)
@@ -255,6 +260,12 @@ union rtase_rx_desc {
#define RTASE_VLAN_TAG_MASK GENMASK(15, 0)
#define RTASE_RX_PKT_SIZE_MASK GENMASK(13, 0)
+/* txqos hardware definitions */
+#define RTASE_1T_CLOCK 64
+#define RTASE_1T_POWER 10000000
+#define RTASE_IDLESLOPE_INT_SHIFT 25
+#define RTASE_IDLESLOPE_INT_MASK GENMASK(31, 25)
+
#define RTASE_IVEC_NAME_SIZE (IFNAMSIZ + 10)
struct rtase_int_vector {
@@ -277,6 +288,7 @@ struct rtase_ring {
u32 cur_idx;
u32 dirty_idx;
u16 index;
+ u8 type;
struct sk_buff *skbuff[RTASE_NUM_DESC];
void *data_buf[RTASE_NUM_DESC];
@@ -290,6 +302,13 @@ struct rtase_ring {
u64 alloc_fail;
};
+struct rtase_txqos {
+ int hicredit;
+ int locredit;
+ int idleslope;
+ int sendslope;
+};
+
struct rtase_stats {
u64 tx_dropped;
u64 rx_dropped;
@@ -309,6 +328,7 @@ struct rtase_private {
struct page_pool *page_pool;
struct rtase_ring tx_ring[RTASE_NUM_TX_QUEUE];
+ struct rtase_txqos tx_qos[RTASE_NUM_TX_QUEUE];
struct rtase_ring rx_ring[RTASE_NUM_RX_QUEUE];
struct rtase_counters *tally_vaddr;
dma_addr_t tally_paddr;
@@ -327,6 +347,8 @@ struct rtase_private {
u16 int_nums;
u16 tx_int_mit;
u16 rx_int_mit;
+
+ u32 hw_ver;
};
#define RTASE_LSO_64K 64000
diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
index f8777b7663d3..ef13109c49cf 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase_main.c
+++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
@@ -326,6 +326,7 @@ static void rtase_tx_desc_init(struct rtase_private *tp, u16 idx)
ring->cur_idx = 0;
ring->dirty_idx = 0;
ring->index = idx;
+ ring->type = NETDEV_QUEUE_TYPE_TX;
ring->alloc_fail = 0;
for (i = 0; i < RTASE_NUM_DESC; i++) {
@@ -345,6 +346,9 @@ static void rtase_tx_desc_init(struct rtase_private *tp, u16 idx)
ring->ivec = &tp->int_vector[0];
list_add_tail(&ring->ring_entry, &tp->int_vector[0].ring_list);
}
+
+ netif_queue_set_napi(tp->dev, ring->index,
+ ring->type, &ring->ivec->napi);
}
static void rtase_map_to_asic(union rtase_rx_desc *desc, dma_addr_t mapping,
@@ -590,6 +594,7 @@ static void rtase_rx_desc_init(struct rtase_private *tp, u16 idx)
ring->cur_idx = 0;
ring->dirty_idx = 0;
ring->index = idx;
+ ring->type = NETDEV_QUEUE_TYPE_RX;
ring->alloc_fail = 0;
for (i = 0; i < RTASE_NUM_DESC; i++)
@@ -597,6 +602,8 @@ static void rtase_rx_desc_init(struct rtase_private *tp, u16 idx)
ring->ring_handler = rx_handler;
ring->ivec = &tp->int_vector[idx];
+ netif_queue_set_napi(tp->dev, ring->index,
+ ring->type, &ring->ivec->napi);
list_add_tail(&ring->ring_entry, &tp->int_vector[idx].ring_list);
}
@@ -1114,7 +1121,7 @@ static int rtase_open(struct net_device *dev)
/* request other interrupts to handle multiqueue */
for (i = 1; i < tp->int_nums; i++) {
ivec = &tp->int_vector[i];
- snprintf(ivec->name, sizeof(ivec->name), "%s_int%i",
+ snprintf(ivec->name, sizeof(ivec->name), "%s_int%u",
tp->dev->name, i);
ret = request_irq(ivec->irq, rtase_q_interrupt, 0,
ivec->name, ivec);
@@ -1161,8 +1168,12 @@ static void rtase_down(struct net_device *dev)
ivec = &tp->int_vector[i];
napi_disable(&ivec->napi);
list_for_each_entry_safe(ring, tmp, &ivec->ring_list,
- ring_entry)
+ ring_entry) {
+ netif_queue_set_napi(tp->dev, ring->index,
+ ring->type, NULL);
+
list_del(&ring->ring_entry);
+ }
}
netif_tx_disable(dev);
@@ -1501,7 +1512,10 @@ static void rtase_wait_for_quiescence(const struct net_device *dev)
static void rtase_sw_reset(struct net_device *dev)
{
struct rtase_private *tp = netdev_priv(dev);
+ struct rtase_ring *ring, *tmp;
+ struct rtase_int_vector *ivec;
int ret;
+ u32 i;
netif_stop_queue(dev);
netif_carrier_off(dev);
@@ -1512,6 +1526,17 @@ static void rtase_sw_reset(struct net_device *dev)
rtase_tx_clear(tp);
rtase_rx_clear(tp);
+ for (i = 0; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ list_for_each_entry_safe(ring, tmp, &ivec->ring_list,
+ ring_entry) {
+ netif_queue_set_napi(tp->dev, ring->index,
+ ring->type, NULL);
+
+ list_del(&ring->ring_entry);
+ }
+ }
+
ret = rtase_init_ring(dev);
if (ret) {
netdev_err(dev, "unable to init ring\n");
@@ -1651,6 +1676,65 @@ static void rtase_get_stats64(struct net_device *dev,
stats->rx_length_errors = tp->stats.rx_length_errors;
}
+static void rtase_set_hw_cbs(const struct rtase_private *tp, u32 queue)
+{
+ u32 idle = tp->tx_qos[queue].idleslope * RTASE_1T_CLOCK;
+ u32 val, i;
+
+ val = u32_encode_bits(idle / RTASE_1T_POWER, RTASE_IDLESLOPE_INT_MASK);
+ idle %= RTASE_1T_POWER;
+
+ for (i = 1; i <= RTASE_IDLESLOPE_INT_SHIFT; i++) {
+ idle *= 2;
+ if ((idle / RTASE_1T_POWER) == 1)
+ val |= BIT(RTASE_IDLESLOPE_INT_SHIFT - i);
+
+ idle %= RTASE_1T_POWER;
+ }
+
+ rtase_w32(tp, RTASE_TXQCRDT_0 + queue * 4, val);
+}
+
+static int rtase_setup_tc_cbs(struct rtase_private *tp,
+ const struct tc_cbs_qopt_offload *qopt)
+{
+ int queue = qopt->queue;
+
+ if (queue < 0 || queue >= tp->func_tx_queue_num)
+ return -EINVAL;
+
+ if (!qopt->enable) {
+ tp->tx_qos[queue].hicredit = 0;
+ tp->tx_qos[queue].locredit = 0;
+ tp->tx_qos[queue].idleslope = 0;
+ tp->tx_qos[queue].sendslope = 0;
+
+ rtase_w32(tp, RTASE_TXQCRDT_0 + queue * 4, 0);
+ } else {
+ tp->tx_qos[queue].hicredit = qopt->hicredit;
+ tp->tx_qos[queue].locredit = qopt->locredit;
+ tp->tx_qos[queue].idleslope = qopt->idleslope;
+ tp->tx_qos[queue].sendslope = qopt->sendslope;
+
+ rtase_set_hw_cbs(tp, queue);
+ }
+
+ return 0;
+}
+
+static int rtase_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_QDISC_CBS:
+ return rtase_setup_tc_cbs(tp, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static netdev_features_t rtase_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -1686,6 +1770,7 @@ static const struct net_device_ops rtase_netdev_ops = {
.ndo_change_mtu = rtase_change_mtu,
.ndo_tx_timeout = rtase_tx_timeout,
.ndo_get_stats64 = rtase_get_stats64,
+ .ndo_setup_tc = rtase_setup_tc,
.ndo_fix_features = rtase_fix_features,
.ndo_set_features = rtase_set_features,
};
@@ -1714,10 +1799,22 @@ static int rtase_get_settings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
u32 supported = SUPPORTED_MII | SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ const struct rtase_private *tp = netdev_priv(dev);
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
supported);
- cmd->base.speed = SPEED_5000;
+
+ switch (tp->hw_ver) {
+ case RTASE_HW_VER_906X_7XA:
+ case RTASE_HW_VER_906X_7XC:
+ cmd->base.speed = SPEED_5000;
+ break;
+ case RTASE_HW_VER_907XD_V1:
+ case RTASE_HW_VER_907XD_VA:
+ cmd->base.speed = SPEED_10000;
+ break;
+ }
+
cmd->base.duplex = DUPLEX_FULL;
cmd->base.port = PORT_MII;
cmd->base.autoneg = AUTONEG_DISABLE;
@@ -1789,6 +1886,18 @@ static void rtase_init_netdev_ops(struct net_device *dev)
dev->ethtool_ops = &rtase_ethtool_ops;
}
+static void rtase_init_napi(struct rtase_private *tp)
+{
+ u16 i;
+
+ for (i = 0; i < tp->int_nums; i++) {
+ netif_napi_add_config(tp->dev, &tp->int_vector[i].napi,
+ tp->int_vector[i].poll, i);
+ netif_napi_set_irq(&tp->int_vector[i].napi,
+ tp->int_vector[i].irq);
+ }
+}
+
static void rtase_reset_interrupt(struct pci_dev *pdev,
const struct rtase_private *tp)
{
@@ -1816,7 +1925,7 @@ static int rtase_alloc_msix(struct pci_dev *pdev, struct rtase_private *tp)
for (i = 0; i < tp->int_nums; i++) {
irq = pci_irq_vector(pdev, i);
- if (!irq) {
+ if (irq < 0) {
pci_disable_msix(pdev);
return irq;
}
@@ -1874,9 +1983,6 @@ static void rtase_init_int_vector(struct rtase_private *tp)
memset(tp->int_vector[0].name, 0x0, sizeof(tp->int_vector[0].name));
INIT_LIST_HEAD(&tp->int_vector[0].ring_list);
- netif_napi_add(tp->dev, &tp->int_vector[0].napi,
- tp->int_vector[0].poll);
-
/* interrupt vector 1 ~ 3 */
for (i = 1; i < tp->int_nums; i++) {
tp->int_vector[i].tp = tp;
@@ -1890,9 +1996,6 @@ static void rtase_init_int_vector(struct rtase_private *tp)
memset(tp->int_vector[i].name, 0x0,
sizeof(tp->int_vector[0].name));
INIT_LIST_HEAD(&tp->int_vector[i].ring_list);
-
- netif_napi_add(tp->dev, &tp->int_vector[i].napi,
- tp->int_vector[i].poll);
}
}
@@ -1901,10 +2004,10 @@ static u16 rtase_calc_time_mitigation(u32 time_us)
u8 msb, time_count, time_unit;
u16 int_miti;
- time_us = min_t(int, time_us, RTASE_MITI_MAX_TIME);
+ time_us = min(time_us, RTASE_MITI_MAX_TIME);
- msb = fls(time_us);
- if (msb >= RTASE_MITI_COUNT_BIT_NUM) {
+ if (time_us > RTASE_MITI_TIME_COUNT_MASK) {
+ msb = fls(time_us);
time_unit = msb - RTASE_MITI_COUNT_BIT_NUM;
time_count = time_us >> (msb - RTASE_MITI_COUNT_BIT_NUM);
} else {
@@ -1923,7 +2026,7 @@ static u16 rtase_calc_packet_num_mitigation(u16 pkt_num)
u8 msb, pkt_num_count, pkt_num_unit;
u16 int_miti;
- pkt_num = min_t(int, pkt_num, RTASE_MITI_MAX_PKT_NUM);
+ pkt_num = min(pkt_num, RTASE_MITI_MAX_PKT_NUM);
if (pkt_num > 60) {
pkt_num_unit = RTASE_MITI_MAX_PKT_NUM_IDX;
@@ -1972,20 +2075,22 @@ static void rtase_init_software_variable(struct pci_dev *pdev,
tp->dev->max_mtu = RTASE_MAX_JUMBO_SIZE;
}
-static bool rtase_check_mac_version_valid(struct rtase_private *tp)
+static int rtase_check_mac_version_valid(struct rtase_private *tp)
{
- u32 hw_ver = rtase_r32(tp, RTASE_TX_CONFIG_0) & RTASE_HW_VER_MASK;
- bool known_ver = false;
+ int ret = -ENODEV;
- switch (hw_ver) {
- case 0x00800000:
- case 0x04000000:
- case 0x04800000:
- known_ver = true;
+ tp->hw_ver = rtase_r32(tp, RTASE_TX_CONFIG_0) & RTASE_HW_VER_MASK;
+
+ switch (tp->hw_ver) {
+ case RTASE_HW_VER_906X_7XA:
+ case RTASE_HW_VER_906X_7XC:
+ case RTASE_HW_VER_907XD_V1:
+ case RTASE_HW_VER_907XD_VA:
+ ret = 0;
break;
}
- return known_ver;
+ return ret;
}
static int rtase_init_board(struct pci_dev *pdev, struct net_device **dev_out,
@@ -2004,7 +2109,7 @@ static int rtase_init_board(struct pci_dev *pdev, struct net_device **dev_out,
SET_NETDEV_DEV(dev, &pdev->dev);
ret = pci_enable_device(pdev);
- if (ret < 0)
+ if (ret)
goto err_out_free_dev;
/* make sure PCI base addr 1 is MMIO */
@@ -2020,7 +2125,7 @@ static int rtase_init_board(struct pci_dev *pdev, struct net_device **dev_out,
}
ret = pci_request_regions(pdev, KBUILD_MODNAME);
- if (ret < 0)
+ if (ret)
goto err_out_disable;
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
@@ -2096,7 +2201,7 @@ static int rtase_init_one(struct pci_dev *pdev,
dev_dbg(&pdev->dev, "Automotive Switch Ethernet driver loaded\n");
ret = rtase_init_board(pdev, &dev, &ioaddr);
- if (ret != 0)
+ if (ret)
return ret;
tp = netdev_priv(dev);
@@ -2105,19 +2210,25 @@ static int rtase_init_one(struct pci_dev *pdev,
tp->pdev = pdev;
/* identify chip attached to board */
- if (!rtase_check_mac_version_valid(tp))
- return dev_err_probe(&pdev->dev, -ENODEV,
- "unknown chip version, contact rtase maintainers (see MAINTAINERS file)\n");
+ ret = rtase_check_mac_version_valid(tp);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unknown chip version: 0x%08x, contact rtase maintainers (see MAINTAINERS file)\n",
+ tp->hw_ver);
+ goto err_out_release_board;
+ }
rtase_init_software_variable(pdev, tp);
rtase_init_hardware(tp);
ret = rtase_alloc_interrupt(pdev, tp);
- if (ret < 0) {
+ if (ret) {
dev_err(&pdev->dev, "unable to alloc MSIX/MSI\n");
- goto err_out_1;
+ goto err_out_del_napi;
}
+ rtase_init_napi(tp);
+
rtase_init_netdev_ops(dev);
dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
@@ -2148,7 +2259,7 @@ static int rtase_init_one(struct pci_dev *pdev,
GFP_KERNEL);
if (!tp->tally_vaddr) {
ret = -ENOMEM;
- goto err_out;
+ goto err_out_free_dma;
}
rtase_tally_counter_clear(tp);
@@ -2158,14 +2269,14 @@ static int rtase_init_one(struct pci_dev *pdev,
netif_carrier_off(dev);
ret = register_netdev(dev);
- if (ret != 0)
- goto err_out;
+ if (ret)
+ goto err_out_free_dma;
netdev_dbg(dev, "%pM, IRQ %d\n", dev->dev_addr, dev->irq);
return 0;
-err_out:
+err_out_free_dma:
if (tp->tally_vaddr) {
dma_free_coherent(&pdev->dev,
sizeof(*tp->tally_vaddr),
@@ -2175,12 +2286,13 @@ err_out:
tp->tally_vaddr = NULL;
}
-err_out_1:
+err_out_del_napi:
for (i = 0; i < tp->int_nums; i++) {
ivec = &tp->int_vector[i];
netif_napi_del(&ivec->napi);
}
+err_out_release_board:
rtase_release_board(pdev, dev, ioaddr);
return ret;
diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile
index f65fc76f8b4d..d63e0c61bb68 100644
--- a/drivers/net/ethernet/renesas/Makefile
+++ b/drivers/net/ethernet/renesas/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_SH_ETH) += sh_eth.o
ravb-objs := ravb_main.o ravb_ptp.o
obj-$(CONFIG_RAVB) += ravb.o
+rswitch-objs := rswitch_main.o rswitch_l2.o
obj-$(CONFIG_RENESAS_ETHER_SWITCH) += rswitch.o
obj-$(CONFIG_RENESAS_GEN4_PTP) += rcar_gen4_ptp.o
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 7b48060c250b..5e56ec9b1013 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -35,16 +35,6 @@
/* Driver's parameters */
#define RAVB_ALIGN 128
-/* Hardware time stamp */
-#define RAVB_TXTSTAMP_VALID 0x00000001 /* TX timestamp valid */
-#define RAVB_TXTSTAMP_ENABLED 0x00000010 /* Enable TX timestamping */
-
-#define RAVB_RXTSTAMP_VALID 0x00000001 /* RX timestamp valid */
-#define RAVB_RXTSTAMP_TYPE 0x00000006 /* RX type mask */
-#define RAVB_RXTSTAMP_TYPE_V2_L2_EVENT 0x00000002
-#define RAVB_RXTSTAMP_TYPE_ALL 0x00000006
-#define RAVB_RXTSTAMP_ENABLED 0x00000010 /* Enable RX timestamping */
-
enum ravb_reg {
/* AVB-DMAC registers */
CCC = 0x0000,
@@ -1017,7 +1007,6 @@ enum CSR2_BIT {
#define CSR2_CSUM_ENABLE (CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4 | \
CSR2_RTCP6 | CSR2_RUDP6 | CSR2_RICMP6)
-#define DBAT_ENTRY_NUM 22
#define RX_QUEUE_OFFSET 4
#define NUM_RX_QUEUE 2
#define NUM_TX_QUEUE 2
@@ -1062,6 +1051,7 @@ struct ravb_hw_info {
u32 rx_max_frame_size;
u32 rx_buffer_size;
u32 rx_desc_size;
+ u32 dbat_entry_num;
unsigned aligned_tx: 1;
unsigned coalesce_irqs:1; /* Needs software IRQ coalescing */
@@ -1114,8 +1104,8 @@ struct ravb_private {
u32 rx_over_errors;
u32 rx_fifo_errors;
struct net_device_stats stats[NUM_RX_QUEUE];
- u32 tstamp_tx_ctrl;
- u32 tstamp_rx_ctrl;
+ enum hwtstamp_tx_types tstamp_tx_ctrl;
+ enum hwtstamp_rx_filters tstamp_rx_ctrl;
struct list_head ts_skb_list;
u32 ts_skb_tag;
struct ravb_ptp ptp;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index ac0f093f647a..57b0db314fb5 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -802,7 +802,6 @@ static int ravb_rx_gbeth(struct net_device *ndev, int budget, int q)
const struct ravb_hw_info *info = priv->info;
struct net_device_stats *stats;
struct ravb_rx_desc *desc;
- struct sk_buff *skb;
int rx_packets = 0;
u8 desc_status;
u16 desc_len;
@@ -815,6 +814,8 @@ static int ravb_rx_gbeth(struct net_device *ndev, int budget, int q)
stats = &priv->stats[q];
for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
+ struct sk_buff *skb = NULL;
+
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q].desc[entry];
if (rx_packets == budget || desc->die_dt == DT_FEMPTY)
@@ -945,6 +946,30 @@ refill:
return rx_packets;
}
+static void ravb_rx_rcar_hwstamp(struct ravb_private *priv, int q,
+ struct ravb_ex_rx_desc *desc,
+ struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct timespec64 ts;
+ bool get_ts;
+
+ if (q == RAVB_NC)
+ get_ts = priv->tstamp_rx_ctrl != HWTSTAMP_FILTER_NONE;
+ else
+ get_ts = priv->tstamp_rx_ctrl == HWTSTAMP_FILTER_ALL;
+
+ if (!get_ts)
+ return;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ ts.tv_sec = ((u64)le16_to_cpu(desc->ts_sh) << 32)
+ | le32_to_cpu(desc->ts_sl);
+ ts.tv_nsec = le32_to_cpu(desc->ts_n);
+ shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
+}
+
/* Packet receive function for Ethernet AVB */
static int ravb_rx_rcar(struct net_device *ndev, int budget, int q)
{
@@ -954,7 +979,6 @@ static int ravb_rx_rcar(struct net_device *ndev, int budget, int q)
struct ravb_ex_rx_desc *desc;
unsigned int limit, i;
struct sk_buff *skb;
- struct timespec64 ts;
int rx_packets = 0;
u8 desc_status;
u16 pkt_len;
@@ -991,7 +1015,6 @@ static int ravb_rx_rcar(struct net_device *ndev, int budget, int q)
if (desc_status & MSC_CEEF)
stats->rx_missed_errors++;
} else {
- u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
struct ravb_rx_buffer *rx_buff;
void *rx_addr;
@@ -1009,19 +1032,8 @@ static int ravb_rx_rcar(struct net_device *ndev, int budget, int q)
break;
}
skb_mark_for_recycle(skb);
- get_ts &= (q == RAVB_NC) ?
- RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
- ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
- if (get_ts) {
- struct skb_shared_hwtstamps *shhwtstamps;
-
- shhwtstamps = skb_hwtstamps(skb);
- memset(shhwtstamps, 0, sizeof(*shhwtstamps));
- ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) <<
- 32) | le32_to_cpu(desc->ts_sl);
- ts.tv_nsec = le32_to_cpu(desc->ts_n);
- shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
- }
+
+ ravb_rx_rcar_hwstamp(priv, q, desc, skb);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
@@ -1974,7 +1986,6 @@ out_ptp_stop:
out_set_reset:
ravb_set_opmode(ndev, CCC_OPC_RESET);
out_rpm_put:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
out_napi_off:
if (info->nc_queues)
@@ -2210,15 +2221,35 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_tx_timestamp(skb);
}
- /* Descriptor type must be set after all the above writes */
- dma_wmb();
+
if (num_tx_desc > 1) {
desc->die_dt = DT_FEND;
desc--;
+ /* When using multi-descriptors, DT_FEND needs to get written
+ * before DT_FSTART, but the compiler may reorder the memory
+ * writes in an attempt to optimize the code.
+ * Use a dma_wmb() barrier to make sure DT_FEND and DT_FSTART
+ * are written exactly in the order shown in the code.
+ * This is particularly important for cases where the DMA engine
+ * is already running when we are running this code. If the DMA
+ * sees DT_FSTART without the corresponding DT_FEND it will enter
+ * an error condition.
+ */
+ dma_wmb();
desc->die_dt = DT_FSTART;
} else {
+ /* Descriptor type must be set after all the above writes */
+ dma_wmb();
desc->die_dt = DT_FSINGLE;
}
+
+ /* Before ringing the doorbell we need to make sure that the latest
+ * writes have been committed to memory, otherwise it could delay
+ * things until the doorbell is rang again.
+ * This is in replacement of the read operation mentioned in the HW
+ * manuals.
+ */
+ dma_wmb();
ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
priv->cur_tx[q] += num_tx_desc;
@@ -2383,95 +2414,55 @@ static int ravb_close(struct net_device *ndev)
if (error)
return error;
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
}
-static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
+static int ravb_hwtstamp_get(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config)
{
struct ravb_private *priv = netdev_priv(ndev);
- struct hwtstamp_config config;
- config.flags = 0;
- config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
- HWTSTAMP_TX_OFF;
- switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) {
- case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT:
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
- break;
- case RAVB_RXTSTAMP_TYPE_ALL:
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- break;
- default:
- config.rx_filter = HWTSTAMP_FILTER_NONE;
- }
+ config->flags = 0;
+ config->tx_type = priv->tstamp_tx_ctrl;
+ config->rx_filter = priv->tstamp_rx_ctrl;
- return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
/* Control hardware time stamping */
-static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
+static int ravb_hwtstamp_set(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct ravb_private *priv = netdev_priv(ndev);
- struct hwtstamp_config config;
- u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED;
- u32 tstamp_tx_ctrl;
+ enum hwtstamp_rx_filters tstamp_rx_ctrl;
+ enum hwtstamp_tx_types tstamp_tx_ctrl;
- if (copy_from_user(&config, req->ifr_data, sizeof(config)))
- return -EFAULT;
-
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
- tstamp_tx_ctrl = 0;
- break;
case HWTSTAMP_TX_ON:
- tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED;
+ tstamp_tx_ctrl = config->tx_type;
break;
default:
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
- tstamp_rx_ctrl = 0;
- break;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
+ tstamp_rx_ctrl = config->rx_filter;
break;
default:
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ tstamp_rx_ctrl = HWTSTAMP_FILTER_ALL;
}
priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
- return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
-}
-
-/* ioctl to device function */
-static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
-{
- struct phy_device *phydev = ndev->phydev;
-
- if (!netif_running(ndev))
- return -EINVAL;
-
- if (!phydev)
- return -ENODEV;
-
- switch (cmd) {
- case SIOCGHWTSTAMP:
- return ravb_hwtstamp_get(ndev, req);
- case SIOCSHWTSTAMP:
- return ravb_hwtstamp_set(ndev, req);
- }
-
- return phy_mii_ioctl(phydev, req, cmd);
+ return 0;
}
static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
@@ -2607,11 +2598,13 @@ static const struct net_device_ops ravb_netdev_ops = {
.ndo_get_stats = ravb_get_stats,
.ndo_set_rx_mode = ravb_set_rx_mode,
.ndo_tx_timeout = ravb_tx_timeout,
- .ndo_eth_ioctl = ravb_do_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_change_mtu = ravb_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_set_features = ravb_set_features,
+ .ndo_hwtstamp_get = ravb_hwtstamp_get,
+ .ndo_hwtstamp_set = ravb_hwtstamp_set,
};
/* MDIO bus init function */
@@ -2693,6 +2686,7 @@ static const struct ravb_hw_info ravb_gen2_hw_info = {
.rx_buffer_size = SZ_2K +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
+ .dbat_entry_num = 22,
.aligned_tx = 1,
.gptp = 1,
.nc_queues = 1,
@@ -2716,6 +2710,7 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
.rx_buffer_size = SZ_2K +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
+ .dbat_entry_num = 22,
.internal_delay = 1,
.tx_counters = 1,
.multi_irqs = 1,
@@ -2742,6 +2737,7 @@ static const struct ravb_hw_info ravb_gen4_hw_info = {
.rx_buffer_size = SZ_2K +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
+ .dbat_entry_num = 22,
.internal_delay = 1,
.tx_counters = 1,
.multi_irqs = 1,
@@ -2763,10 +2759,12 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = {
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .tx_max_frame_size = SZ_2K,
.rx_max_frame_size = SZ_2K,
.rx_buffer_size = SZ_2K +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
+ .dbat_entry_num = 22,
.multi_irqs = 1,
.err_mgmt_irqs = 1,
.gptp = 1,
@@ -2792,6 +2790,7 @@ static const struct ravb_hw_info gbeth_hw_info = {
.rx_max_frame_size = SZ_8K,
.rx_buffer_size = SZ_2K,
.rx_desc_size = sizeof(struct ravb_rx_desc),
+ .dbat_entry_num = 2,
.aligned_tx = 1,
.coalesce_irqs = 1,
.tx_counters = 1,
@@ -2919,13 +2918,14 @@ static int ravb_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
"failed to get cpg reset\n");
+ info = of_device_get_match_data(&pdev->dev);
+
ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
- NUM_TX_QUEUE, NUM_RX_QUEUE);
+ info->nc_queues ? NUM_TX_QUEUE : 1,
+ info->nc_queues ? NUM_RX_QUEUE : 1);
if (!ndev)
return -ENOMEM;
- info = of_device_get_match_data(&pdev->dev);
-
ndev->features = info->net_features;
ndev->hw_features = info->net_hw_features;
ndev->vlan_features = info->vlan_features;
@@ -3023,7 +3023,7 @@ static int ravb_probe(struct platform_device *pdev)
ravb_parse_delay_mode(np, ndev);
/* Allocate descriptor base address table */
- priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
+ priv->desc_bat_size = sizeof(struct ravb_desc) * info->dbat_entry_num;
priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
&priv->desc_bat_dma, GFP_KERNEL);
if (!priv->desc_bat) {
@@ -3033,7 +3033,7 @@ static int ravb_probe(struct platform_device *pdev)
error = -ENOMEM;
goto out_rpm_put;
}
- for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
+ for (q = RAVB_BE; q < info->dbat_entry_num; q++)
priv->desc_bat[q].die_dt = DT_EOS;
/* Initialise HW timestamp list */
@@ -3074,7 +3074,7 @@ static int ravb_probe(struct platform_device *pdev)
if (info->coalesce_irqs) {
netdev_sw_irq_coalesce_default_on(ndev);
if (num_present_cpus() == 1)
- dev_set_threaded(ndev, true);
+ netif_threaded_enable(ndev);
}
/* Network device register */
@@ -3088,7 +3088,6 @@ static int ravb_probe(struct platform_device *pdev)
netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
(u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
- pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
@@ -3216,10 +3215,15 @@ static int ravb_suspend(struct device *dev)
netif_device_detach(ndev);
- if (priv->wol_enabled)
- return ravb_wol_setup(ndev);
+ rtnl_lock();
+ if (priv->wol_enabled) {
+ ret = ravb_wol_setup(ndev);
+ rtnl_unlock();
+ return ret;
+ }
ret = ravb_close(ndev);
+ rtnl_unlock();
if (ret)
return ret;
@@ -3244,19 +3248,20 @@ static int ravb_resume(struct device *dev)
if (!netif_running(ndev))
return 0;
+ rtnl_lock();
/* If WoL is enabled restore the interface. */
- if (priv->wol_enabled) {
+ if (priv->wol_enabled)
ret = ravb_wol_restore(ndev);
- if (ret)
- return ret;
- } else {
+ else
ret = pm_runtime_force_resume(dev);
- if (ret)
- return ret;
+ if (ret) {
+ rtnl_unlock();
+ return ret;
}
/* Reopening the interface will restore the device to the working state. */
ret = ravb_open(ndev);
+ rtnl_unlock();
if (ret < 0)
goto out_rpm_put;
@@ -3266,10 +3271,8 @@ static int ravb_resume(struct device *dev)
return 0;
out_rpm_put:
- if (!priv->wol_enabled) {
- pm_runtime_mark_last_busy(dev);
+ if (!priv->wol_enabled)
pm_runtime_put_autosuspend(dev);
- }
return ret;
}
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index 6e4ef7af27bf..226c6c0ab945 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -176,13 +176,6 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
struct net_device *ndev = priv->ndev;
unsigned long flags;
- /* Reject requests with unsupported flags */
- if (req->flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
if (req->index)
return -EINVAL;
@@ -213,10 +206,6 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
unsigned long flags;
int error = 0;
- /* Reject requests with unsupported flags */
- if (req->flags)
- return -EOPNOTSUPP;
-
if (req->index)
return -EINVAL;
@@ -288,6 +277,7 @@ static const struct ptp_clock_info ravb_ptp_info = {
.max_adj = 50000000,
.n_ext_ts = N_EXT_TS,
.n_per_out = N_PER_OUT,
+ .supported_extts_flags = PTP_RISING_EDGE | PTP_FALLING_EDGE,
.adjfine = ravb_ptp_adjfine,
.adjtime = ravb_ptp_adjtime,
.gettime64 = ravb_ptp_gettime64,
diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
index 72e7fcc56693..d0979abd36de 100644
--- a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
+++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
@@ -12,25 +12,24 @@
#include <linux/slab.h>
#include "rcar_gen4_ptp.h"
-#define ptp_to_priv(ptp) container_of(ptp, struct rcar_gen4_ptp_private, info)
-static const struct rcar_gen4_ptp_reg_offset gen4_offs = {
- .enable = PTPTMEC,
- .disable = PTPTMDC,
- .increment = PTPTIVC0,
- .config_t0 = PTPTOVC00,
- .config_t1 = PTPTOVC10,
- .config_t2 = PTPTOVC20,
- .monitor_t0 = PTPGPTPTM00,
- .monitor_t1 = PTPGPTPTM10,
- .monitor_t2 = PTPGPTPTM20,
-};
+#define PTPTMEC_REG 0x0010
+#define PTPTMDC_REG 0x0014
+#define PTPTIVC0_REG 0x0020
+#define PTPTOVC00_REG 0x0030
+#define PTPTOVC10_REG 0x0034
+#define PTPTOVC20_REG 0x0038
+#define PTPGPTPTM00_REG 0x0050
+#define PTPGPTPTM10_REG 0x0054
+#define PTPGPTPTM20_REG 0x0058
+
+#define ptp_to_priv(ptp) container_of(ptp, struct rcar_gen4_ptp_private, info)
static int rcar_gen4_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp);
- bool neg_adj = scaled_ppm < 0 ? true : false;
s64 addend = ptp_priv->default_addend;
+ bool neg_adj = scaled_ppm < 0;
s64 diff;
if (neg_adj)
@@ -38,20 +37,21 @@ static int rcar_gen4_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
diff = div_s64(addend * scaled_ppm_to_ppb(scaled_ppm), NSEC_PER_SEC);
addend = neg_adj ? addend - diff : addend + diff;
- iowrite32(addend, ptp_priv->addr + ptp_priv->offs->increment);
+ iowrite32(addend, ptp_priv->addr + PTPTIVC0_REG);
return 0;
}
-/* Caller must hold the lock */
static void _rcar_gen4_ptp_gettime(struct ptp_clock_info *ptp,
struct timespec64 *ts)
{
struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp);
- ts->tv_nsec = ioread32(ptp_priv->addr + ptp_priv->offs->monitor_t0);
- ts->tv_sec = ioread32(ptp_priv->addr + ptp_priv->offs->monitor_t1) |
- ((s64)ioread32(ptp_priv->addr + ptp_priv->offs->monitor_t2) << 32);
+ lockdep_assert_held(&ptp_priv->lock);
+
+ ts->tv_nsec = ioread32(ptp_priv->addr + PTPGPTPTM00_REG);
+ ts->tv_sec = ioread32(ptp_priv->addr + PTPGPTPTM10_REG) |
+ ((s64)ioread32(ptp_priv->addr + PTPGPTPTM20_REG) << 32);
}
static int rcar_gen4_ptp_gettime(struct ptp_clock_info *ptp,
@@ -73,14 +73,14 @@ static void _rcar_gen4_ptp_settime(struct ptp_clock_info *ptp,
{
struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp);
- iowrite32(1, ptp_priv->addr + ptp_priv->offs->disable);
- iowrite32(0, ptp_priv->addr + ptp_priv->offs->config_t2);
- iowrite32(0, ptp_priv->addr + ptp_priv->offs->config_t1);
- iowrite32(0, ptp_priv->addr + ptp_priv->offs->config_t0);
- iowrite32(1, ptp_priv->addr + ptp_priv->offs->enable);
- iowrite32(ts->tv_sec >> 32, ptp_priv->addr + ptp_priv->offs->config_t2);
- iowrite32(ts->tv_sec, ptp_priv->addr + ptp_priv->offs->config_t1);
- iowrite32(ts->tv_nsec, ptp_priv->addr + ptp_priv->offs->config_t0);
+ iowrite32(1, ptp_priv->addr + PTPTMDC_REG);
+ iowrite32(0, ptp_priv->addr + PTPTOVC20_REG);
+ iowrite32(0, ptp_priv->addr + PTPTOVC10_REG);
+ iowrite32(0, ptp_priv->addr + PTPTOVC00_REG);
+ iowrite32(1, ptp_priv->addr + PTPTMEC_REG);
+ iowrite32(ts->tv_sec >> 32, ptp_priv->addr + PTPTOVC20_REG);
+ iowrite32(ts->tv_sec, ptp_priv->addr + PTPTOVC10_REG);
+ iowrite32(ts->tv_nsec, ptp_priv->addr + PTPTOVC00_REG);
}
static int rcar_gen4_ptp_settime(struct ptp_clock_info *ptp,
@@ -130,17 +130,6 @@ static struct ptp_clock_info rcar_gen4_ptp_info = {
.enable = rcar_gen4_ptp_enable,
};
-static int rcar_gen4_ptp_set_offs(struct rcar_gen4_ptp_private *ptp_priv,
- enum rcar_gen4_ptp_reg_layout layout)
-{
- if (layout != RCAR_GEN4_PTP_REG_LAYOUT)
- return -EINVAL;
-
- ptp_priv->offs = &gen4_offs;
-
- return 0;
-}
-
static s64 rcar_gen4_ptp_rate_to_increment(u32 rate)
{
/* Timer increment in ns.
@@ -151,27 +140,20 @@ static s64 rcar_gen4_ptp_rate_to_increment(u32 rate)
return div_s64(1000000000LL << 27, rate);
}
-int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv,
- enum rcar_gen4_ptp_reg_layout layout, u32 rate)
+int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv, u32 rate)
{
- int ret;
-
if (ptp_priv->initialized)
return 0;
spin_lock_init(&ptp_priv->lock);
- ret = rcar_gen4_ptp_set_offs(ptp_priv, layout);
- if (ret)
- return ret;
-
ptp_priv->default_addend = rcar_gen4_ptp_rate_to_increment(rate);
- iowrite32(ptp_priv->default_addend, ptp_priv->addr + ptp_priv->offs->increment);
+ iowrite32(ptp_priv->default_addend, ptp_priv->addr + PTPTIVC0_REG);
ptp_priv->clock = ptp_clock_register(&ptp_priv->info, NULL);
if (IS_ERR(ptp_priv->clock))
return PTR_ERR(ptp_priv->clock);
- iowrite32(0x01, ptp_priv->addr + ptp_priv->offs->enable);
+ iowrite32(0x01, ptp_priv->addr + PTPTMEC_REG);
ptp_priv->initialized = true;
return 0;
@@ -180,7 +162,7 @@ EXPORT_SYMBOL_GPL(rcar_gen4_ptp_register);
int rcar_gen4_ptp_unregister(struct rcar_gen4_ptp_private *ptp_priv)
{
- iowrite32(1, ptp_priv->addr + ptp_priv->offs->disable);
+ iowrite32(1, ptp_priv->addr + PTPTMDC_REG);
return ptp_clock_unregister(ptp_priv->clock);
}
diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.h b/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
index e22da5acd53d..9a9c232c854e 100644
--- a/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
+++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
@@ -9,60 +9,16 @@
#include <linux/ptp_clock_kernel.h>
-#define RCAR_GEN4_GPTP_OFFSET_S4 0x00018000
-
-enum rcar_gen4_ptp_reg_layout {
- RCAR_GEN4_PTP_REG_LAYOUT
-};
-
-/* driver's definitions */
-#define RCAR_GEN4_RXTSTAMP_ENABLED BIT(0)
-#define RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT BIT(1)
-#define RCAR_GEN4_RXTSTAMP_TYPE_ALL (RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT | BIT(2))
-#define RCAR_GEN4_RXTSTAMP_TYPE RCAR_GEN4_RXTSTAMP_TYPE_ALL
-
-#define RCAR_GEN4_TXTSTAMP_ENABLED BIT(0)
-
-#define PTPRO 0
-
-enum rcar_gen4_ptp_reg {
- PTPTMEC = PTPRO + 0x0010,
- PTPTMDC = PTPRO + 0x0014,
- PTPTIVC0 = PTPRO + 0x0020,
- PTPTOVC00 = PTPRO + 0x0030,
- PTPTOVC10 = PTPRO + 0x0034,
- PTPTOVC20 = PTPRO + 0x0038,
- PTPGPTPTM00 = PTPRO + 0x0050,
- PTPGPTPTM10 = PTPRO + 0x0054,
- PTPGPTPTM20 = PTPRO + 0x0058,
-};
-
-struct rcar_gen4_ptp_reg_offset {
- u16 enable;
- u16 disable;
- u16 increment;
- u16 config_t0;
- u16 config_t1;
- u16 config_t2;
- u16 monitor_t0;
- u16 monitor_t1;
- u16 monitor_t2;
-};
-
struct rcar_gen4_ptp_private {
void __iomem *addr;
struct ptp_clock *clock;
struct ptp_clock_info info;
- const struct rcar_gen4_ptp_reg_offset *offs;
spinlock_t lock; /* For multiple registers access */
- u32 tstamp_tx_ctrl;
- u32 tstamp_rx_ctrl;
s64 default_addend;
bool initialized;
};
-int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv,
- enum rcar_gen4_ptp_reg_layout layout, u32 rate);
+int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv, u32 rate);
int rcar_gen4_ptp_unregister(struct rcar_gen4_ptp_private *ptp_priv);
struct rcar_gen4_ptp_private *rcar_gen4_ptp_alloc(struct platform_device *pdev);
diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
index 72e3ff596d31..aa605304fed0 100644
--- a/drivers/net/ethernet/renesas/rswitch.h
+++ b/drivers/net/ethernet/renesas/rswitch.h
@@ -1,18 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Renesas Ethernet Switch device driver
*
- * Copyright (C) 2022 Renesas Electronics Corporation
+ * Copyright (C) 2022-2025 Renesas Electronics Corporation
*/
#ifndef __RSWITCH_H__
#define __RSWITCH_H__
#include <linux/platform_device.h>
+#include <linux/phy.h>
+
#include "rcar_gen4_ptp.h"
#define RSWITCH_MAX_NUM_QUEUES 128
+#define RSWITCH_NUM_AGENTS 5
#define RSWITCH_NUM_PORTS 3
+
+#define rswitch_for_all_ports(_priv, _rdev) \
+ list_for_each_entry(_rdev, &_priv->port_list, list)
+
#define rswitch_for_each_enabled_port(priv, i) \
for (i = 0; i < RSWITCH_NUM_PORTS; i++) \
if (priv->rdev[i]->disabled) \
@@ -724,35 +731,28 @@ enum rswitch_etha_mode {
#define EAVCC_VEM_SC_TAG (0x3 << 16)
-#define MPIC_PIS_MII 0x00
-#define MPIC_PIS_GMII 0x02
-#define MPIC_PIS_XGMII 0x04
-#define MPIC_LSC_SHIFT 3
-#define MPIC_LSC_100M (1 << MPIC_LSC_SHIFT)
-#define MPIC_LSC_1G (2 << MPIC_LSC_SHIFT)
-#define MPIC_LSC_2_5G (3 << MPIC_LSC_SHIFT)
-
-#define MDIO_READ_C45 0x03
-#define MDIO_WRITE_C45 0x01
+#define MPIC_PIS GENMASK(2, 0)
+#define MPIC_PIS_GMII 2
+#define MPIC_PIS_XGMII 4
+#define MPIC_LSC GENMASK(5, 3)
+#define MPIC_LSC_100M 1
+#define MPIC_LSC_1G 2
+#define MPIC_LSC_2_5G 3
+#define MPIC_PSMCS GENMASK(22, 16)
+#define MPIC_PSMHT GENMASK(26, 24)
#define MPSM_PSME BIT(0)
-#define MPSM_MFF_C45 BIT(2)
-#define MPSM_PRD_SHIFT 16
-#define MPSM_PRD_MASK GENMASK(31, MPSM_PRD_SHIFT)
-
-/* Completion flags */
-#define MMIS1_PAACS BIT(2) /* Address */
-#define MMIS1_PWACS BIT(1) /* Write */
-#define MMIS1_PRACS BIT(0) /* Read */
-#define MMIS1_CLEAR_FLAGS 0xf
-
-#define MPIC_PSMCS_SHIFT 16
-#define MPIC_PSMCS_MASK GENMASK(22, MPIC_PSMCS_SHIFT)
-#define MPIC_PSMCS(val) ((val) << MPIC_PSMCS_SHIFT)
-
-#define MPIC_PSMHT_SHIFT 24
-#define MPIC_PSMHT_MASK GENMASK(26, MPIC_PSMHT_SHIFT)
-#define MPIC_PSMHT(val) ((val) << MPIC_PSMHT_SHIFT)
+#define MPSM_MFF BIT(2)
+#define MPSM_MMF_C22 0
+#define MPSM_MMF_C45 1
+#define MPSM_PDA GENMASK(7, 3)
+#define MPSM_PRA GENMASK(12, 8)
+#define MPSM_POP GENMASK(14, 13)
+#define MPSM_POP_ADDRESS 0
+#define MPSM_POP_WRITE 1
+#define MPSM_POP_READ_C22 2
+#define MPSM_POP_READ_C45 3
+#define MPSM_PRD GENMASK(31, 16)
#define MLVC_PLV BIT(16)
@@ -806,6 +806,7 @@ enum rswitch_gwca_mode {
#define CABPPFLC_INIT_VALUE 0x00800080
/* MFWD */
+#define FWPC0(i) (FWPC00 + (i) * 0x10)
#define FWPC0_LTHTA BIT(0)
#define FWPC0_IP4UE BIT(3)
#define FWPC0_IP4TE BIT(4)
@@ -814,23 +815,42 @@ enum rswitch_gwca_mode {
#define FWPC0_IP4EA BIT(10)
#define FWPC0_IPDSA BIT(12)
#define FWPC0_IPHLA BIT(18)
-#define FWPC0_MACSDA BIT(20)
+#define FWPC0_MACDSA BIT(20)
+#define FWPC0_MACSSA BIT(23)
#define FWPC0_MACHLA BIT(26)
#define FWPC0_MACHMA BIT(27)
#define FWPC0_VLANSA BIT(28)
-#define FWPC0(i) (FWPC00 + (i) * 0x10)
-#define FWPC0_DEFAULT (FWPC0_LTHTA | FWPC0_IP4UE | FWPC0_IP4TE | \
- FWPC0_IP4OE | FWPC0_L2SE | FWPC0_IP4EA | \
- FWPC0_IPDSA | FWPC0_IPHLA | FWPC0_MACSDA | \
- FWPC0_MACHLA | FWPC0_MACHMA | FWPC0_VLANSA)
#define FWPC1(i) (FWPC10 + (i) * 0x10)
+#define FWCP1_LTHFW GENMASK(16 + (RSWITCH_NUM_AGENTS - 1), 16)
#define FWPC1_DDE BIT(0)
-#define FWPBFC(i) (FWPBFC0 + (i) * 0x10)
+#define FWPC2(i) (FWPC20 + (i) * 0x10)
+#define FWCP2_LTWFW GENMASK(16 + (RSWITCH_NUM_AGENTS - 1), 16)
+#define FWCP2_LTWFW_MASK GENMASK(16 + (RSWITCH_NUM_AGENTS - 1), 16)
+
+#define FWPBFC(i) (FWPBFC0 + (i) * 0x10)
+#define FWPBFC_PBDV GENMASK(RSWITCH_NUM_AGENTS - 1, 0)
#define FWPBFCSDC(j, i) (FWPBFCSDC00 + (i) * 0x10 + (j) * 0x04)
+#define FWMACHEC_MACHMUE_MASK GENMASK(26, 16)
+
+#define FWMACTIM_MACTIOG BIT(0)
+#define FWMACTIM_MACTR BIT(1)
+
+#define FWMACAGUSPC_MACAGUSP GENMASK(9, 0)
+#define FWMACAGC_MACAGT GENMASK(15, 0)
+#define FWMACAGC_MACAGE BIT(16)
+#define FWMACAGC_MACAGSL BIT(17)
+#define FWMACAGC_MACAGPM BIT(18)
+#define FWMACAGC_MACDES BIT(24)
+#define FWMACAGC_MACAGOG BIT(28)
+#define FWMACAGC_MACDESOG BIT(29)
+
+#define RSW_AGEING_CLK_PER_US 0x140
+#define RSW_AGEING_TIME 300
+
/* TOP */
#define TPEMIMC7(queue) (TPEMIMC70 + (queue) * 4)
@@ -972,14 +992,6 @@ struct rswitch_gwca_queue {
};
};
-struct rswitch_gwca_ts_info {
- struct sk_buff *skb;
- struct list_head list;
-
- int port;
- u8 tag;
-};
-
#define RSWITCH_NUM_IRQ_REGS (RSWITCH_MAX_NUM_QUEUES / BITS_PER_TYPE(u32))
struct rswitch_gwca {
unsigned int index;
@@ -989,14 +1001,13 @@ struct rswitch_gwca {
struct rswitch_gwca_queue *queues;
int num_queues;
struct rswitch_gwca_queue ts_queue;
- struct list_head ts_info_list;
DECLARE_BITMAP(used, RSWITCH_MAX_NUM_QUEUES);
u32 tx_irq_bits[RSWITCH_NUM_IRQ_REGS];
u32 rx_irq_bits[RSWITCH_NUM_IRQ_REGS];
- int speed;
};
#define NUM_QUEUES_PER_NDEV 2
+#define TS_TAGS_PER_PORT 256
struct rswitch_device {
struct rswitch_private *priv;
struct net_device *ndev;
@@ -1004,13 +1015,22 @@ struct rswitch_device {
void __iomem *addr;
struct rswitch_gwca_queue *tx_queue;
struct rswitch_gwca_queue *rx_queue;
- u8 ts_tag;
+ struct sk_buff *ts_skb[TS_TAGS_PER_PORT];
+ DECLARE_BITMAP(ts_skb_used, TS_TAGS_PER_PORT);
bool disabled;
+ struct list_head list;
+
int port;
struct rswitch_etha *etha;
struct device_node *np_port;
struct phy *serdes;
+
+ struct net_device *brdev; /* master bridge device */
+ unsigned int learning_requested : 1;
+ unsigned int learning_offloaded : 1;
+ unsigned int forwarding_requested : 1;
+ unsigned int forwarding_offloaded : 1;
};
struct rswitch_mfwd_mac_table_entry {
@@ -1035,11 +1055,20 @@ struct rswitch_private {
struct rswitch_etha etha[RSWITCH_NUM_PORTS];
struct rswitch_mfwd mfwd;
+ struct list_head port_list;
+
spinlock_t lock; /* lock interrupt registers' control */
struct clk *clk;
bool etha_no_runtime_change;
bool gwca_halt;
+ struct net_device *offload_brdev;
+
+ enum hwtstamp_tx_types tstamp_tx_ctrl;
+ enum hwtstamp_rx_filters tstamp_rx_ctrl;
};
+bool is_rdev(const struct net_device *ndev);
+void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set);
+
#endif /* #ifndef __RSWITCH_H__ */
diff --git a/drivers/net/ethernet/renesas/rswitch_l2.c b/drivers/net/ethernet/renesas/rswitch_l2.c
new file mode 100644
index 000000000000..4a69ec77d69c
--- /dev/null
+++ b/drivers/net/ethernet/renesas/rswitch_l2.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Renesas Ethernet Switch device driver
+ *
+ * Copyright (C) 2025 Renesas Electronics Corporation
+ */
+
+#include <linux/err.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/kernel.h>
+#include <net/switchdev.h>
+
+#include "rswitch.h"
+#include "rswitch_l2.h"
+
+static bool rdev_for_l2_offload(struct rswitch_device *rdev)
+{
+ return rdev->priv->offload_brdev &&
+ rdev->brdev == rdev->priv->offload_brdev &&
+ (test_bit(rdev->port, rdev->priv->opened_ports));
+}
+
+static void rswitch_change_l2_hw_offloading(struct rswitch_device *rdev,
+ bool start, bool learning)
+{
+ u32 bits = learning ? FWPC0_MACSSA | FWPC0_MACHLA | FWPC0_MACHMA : FWPC0_MACDSA;
+ u32 clear = start ? 0 : bits;
+ u32 set = start ? bits : 0;
+
+ if ((learning && rdev->learning_offloaded == start) ||
+ (!learning && rdev->forwarding_offloaded == start))
+ return;
+
+ rswitch_modify(rdev->priv->addr, FWPC0(rdev->port), clear, set);
+
+ if (learning)
+ rdev->learning_offloaded = start;
+ else
+ rdev->forwarding_offloaded = start;
+
+ netdev_info(rdev->ndev, "%s hw %s\n", start ? "starting" : "stopping",
+ learning ? "learning" : "forwarding");
+}
+
+static void rswitch_update_l2_hw_learning(struct rswitch_private *priv)
+{
+ struct rswitch_device *rdev;
+ bool learning_needed;
+
+ rswitch_for_all_ports(priv, rdev) {
+ if (rdev_for_l2_offload(rdev))
+ learning_needed = rdev->learning_requested;
+ else
+ learning_needed = false;
+
+ rswitch_change_l2_hw_offloading(rdev, learning_needed, true);
+ }
+}
+
+static void rswitch_update_l2_hw_forwarding(struct rswitch_private *priv)
+{
+ struct rswitch_device *rdev;
+ unsigned int fwd_mask;
+
+ /* calculate fwd_mask with zeroes in bits corresponding to ports that
+ * shall participate in hardware forwarding
+ */
+ fwd_mask = GENMASK(RSWITCH_NUM_AGENTS - 1, 0);
+
+ rswitch_for_all_ports(priv, rdev) {
+ if (rdev_for_l2_offload(rdev) && rdev->forwarding_requested)
+ fwd_mask &= ~BIT(rdev->port);
+ }
+
+ rswitch_for_all_ports(priv, rdev) {
+ if ((rdev_for_l2_offload(rdev) && rdev->forwarding_requested) ||
+ rdev->forwarding_offloaded) {
+ /* Update allowed offload destinations even for ports
+ * with L2 offload enabled earlier.
+ *
+ * Do not allow L2 forwarding to self for hw port.
+ */
+ iowrite32(FIELD_PREP(FWCP2_LTWFW_MASK, fwd_mask | BIT(rdev->port)),
+ priv->addr + FWPC2(rdev->port));
+ }
+
+ if (rdev_for_l2_offload(rdev) &&
+ rdev->forwarding_requested &&
+ !rdev->forwarding_offloaded) {
+ rswitch_change_l2_hw_offloading(rdev, true, false);
+ } else if (rdev->forwarding_offloaded) {
+ rswitch_change_l2_hw_offloading(rdev, false, false);
+ }
+ }
+}
+
+void rswitch_update_l2_offload(struct rswitch_private *priv)
+{
+ rswitch_update_l2_hw_learning(priv);
+ rswitch_update_l2_hw_forwarding(priv);
+}
+
+static void rswitch_update_offload_brdev(struct rswitch_private *priv)
+{
+ struct net_device *offload_brdev = NULL;
+ struct rswitch_device *rdev, *rdev2;
+
+ rswitch_for_all_ports(priv, rdev) {
+ if (!rdev->brdev)
+ continue;
+ rswitch_for_all_ports(priv, rdev2) {
+ if (rdev2 == rdev)
+ break;
+ if (rdev2->brdev == rdev->brdev) {
+ offload_brdev = rdev->brdev;
+ break;
+ }
+ }
+ if (offload_brdev)
+ break;
+ }
+
+ if (offload_brdev == priv->offload_brdev)
+ dev_dbg(&priv->pdev->dev,
+ "changing l2 offload from %s to %s\n",
+ netdev_name(priv->offload_brdev),
+ netdev_name(offload_brdev));
+ else if (offload_brdev)
+ dev_dbg(&priv->pdev->dev, "starting l2 offload for %s\n",
+ netdev_name(offload_brdev));
+ else if (!offload_brdev)
+ dev_dbg(&priv->pdev->dev, "stopping l2 offload for %s\n",
+ netdev_name(priv->offload_brdev));
+
+ priv->offload_brdev = offload_brdev;
+
+ rswitch_update_l2_offload(priv);
+}
+
+static bool rswitch_port_check(const struct net_device *ndev)
+{
+ return is_rdev(ndev);
+}
+
+static void rswitch_port_update_brdev(struct net_device *ndev,
+ struct net_device *brdev)
+{
+ struct rswitch_device *rdev;
+
+ if (!is_rdev(ndev))
+ return;
+
+ rdev = netdev_priv(ndev);
+ rdev->brdev = brdev;
+ rswitch_update_offload_brdev(rdev->priv);
+}
+
+static int rswitch_port_update_stp_state(struct net_device *ndev, u8 stp_state)
+{
+ struct rswitch_device *rdev;
+
+ if (!is_rdev(ndev))
+ return -ENODEV;
+
+ rdev = netdev_priv(ndev);
+ rdev->learning_requested = (stp_state == BR_STATE_LEARNING ||
+ stp_state == BR_STATE_FORWARDING);
+ rdev->forwarding_requested = (stp_state == BR_STATE_FORWARDING);
+ rswitch_update_l2_offload(rdev->priv);
+
+ return 0;
+}
+
+static int rswitch_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info;
+ struct net_device *brdev;
+
+ if (!rswitch_port_check(ndev))
+ return NOTIFY_DONE;
+ if (event != NETDEV_CHANGEUPPER)
+ return NOTIFY_DONE;
+
+ info = ptr;
+
+ if (netif_is_bridge_master(info->upper_dev)) {
+ brdev = info->linking ? info->upper_dev : NULL;
+ rswitch_port_update_brdev(ndev, brdev);
+ }
+
+ return NOTIFY_OK;
+}
+
+static int rswitch_update_ageing_time(struct net_device *ndev, clock_t time)
+{
+ struct rswitch_device *rdev = netdev_priv(ndev);
+ u32 reg_val;
+
+ if (!is_rdev(ndev))
+ return -ENODEV;
+
+ if (!FIELD_FIT(FWMACAGC_MACAGT, time))
+ return -EINVAL;
+
+ reg_val = FIELD_PREP(FWMACAGC_MACAGT, time);
+ reg_val |= FWMACAGC_MACAGE | FWMACAGC_MACAGSL;
+ iowrite32(reg_val, rdev->priv->addr + FWMACAGC);
+
+ return 0;
+}
+
+static int rswitch_port_attr_set(struct net_device *ndev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ return rswitch_port_update_stp_state(ndev, attr->u.stp_state);
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ return rswitch_update_ageing_time(ndev, attr->u.ageing_time);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int rswitch_switchdev_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
+ int ret;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ ret = switchdev_handle_port_attr_set(ndev, ptr,
+ rswitch_port_check,
+ rswitch_port_attr_set);
+ return notifier_from_errno(ret);
+ }
+
+ if (!rswitch_port_check(ndev))
+ return NOTIFY_DONE;
+
+ return notifier_from_errno(-EOPNOTSUPP);
+}
+
+static int rswitch_switchdev_blocking_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
+ int ret;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ return -EOPNOTSUPP;
+ case SWITCHDEV_PORT_OBJ_DEL:
+ return -EOPNOTSUPP;
+ case SWITCHDEV_PORT_ATTR_SET:
+ ret = switchdev_handle_port_attr_set(ndev, ptr,
+ rswitch_port_check,
+ rswitch_port_attr_set);
+ break;
+ default:
+ if (!rswitch_port_check(ndev))
+ return NOTIFY_DONE;
+ ret = -EOPNOTSUPP;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static struct notifier_block rswitch_netdevice_nb = {
+ .notifier_call = rswitch_netdevice_event,
+};
+
+static struct notifier_block rswitch_switchdev_nb = {
+ .notifier_call = rswitch_switchdev_event,
+};
+
+static struct notifier_block rswitch_switchdev_blocking_nb = {
+ .notifier_call = rswitch_switchdev_blocking_event,
+};
+
+int rswitch_register_notifiers(void)
+{
+ int ret;
+
+ ret = register_netdevice_notifier(&rswitch_netdevice_nb);
+ if (ret)
+ goto register_netdevice_notifier_failed;
+
+ ret = register_switchdev_notifier(&rswitch_switchdev_nb);
+ if (ret)
+ goto register_switchdev_notifier_failed;
+
+ ret = register_switchdev_blocking_notifier(&rswitch_switchdev_blocking_nb);
+ if (ret)
+ goto register_switchdev_blocking_notifier_failed;
+
+ return 0;
+
+register_switchdev_blocking_notifier_failed:
+ unregister_switchdev_notifier(&rswitch_switchdev_nb);
+register_switchdev_notifier_failed:
+ unregister_netdevice_notifier(&rswitch_netdevice_nb);
+register_netdevice_notifier_failed:
+
+ return ret;
+}
+
+void rswitch_unregister_notifiers(void)
+{
+ unregister_switchdev_blocking_notifier(&rswitch_switchdev_blocking_nb);
+ unregister_switchdev_notifier(&rswitch_switchdev_nb);
+ unregister_netdevice_notifier(&rswitch_netdevice_nb);
+}
diff --git a/drivers/net/ethernet/renesas/rswitch_l2.h b/drivers/net/ethernet/renesas/rswitch_l2.h
new file mode 100644
index 000000000000..57050ede8f31
--- /dev/null
+++ b/drivers/net/ethernet/renesas/rswitch_l2.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Renesas Ethernet Switch device driver
+ *
+ * Copyright (C) 2025 Renesas Electronics Corporation
+ */
+
+#ifndef __RSWITCH_L2_H__
+#define __RSWITCH_L2_H__
+
+void rswitch_update_l2_offload(struct rswitch_private *priv);
+
+int rswitch_register_notifiers(void);
+void rswitch_unregister_notifiers(void);
+
+#endif /* #ifndef __RSWITCH_L2_H__ */
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch_main.c
index 8d18dae4d8fb..e14b21148f27 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch_main.c
@@ -1,15 +1,18 @@
// SPDX-License-Identifier: GPL-2.0
/* Renesas Ethernet Switch device driver
*
- * Copyright (C) 2022 Renesas Electronics Corporation
+ * Copyright (C) 2022-2025 Renesas Electronics Corporation
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/ip.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/net_tstamp.h>
#include <linux/of.h>
@@ -25,6 +28,9 @@
#include <linux/sys_soc.h>
#include "rswitch.h"
+#include "rswitch_l2.h"
+
+#define RSWITCH_GPTP_OFFSET_S4 0x00018000
static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected)
{
@@ -34,7 +40,7 @@ static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected
1, RSWITCH_TIMEOUT_US);
}
-static void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set)
+void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set)
{
iowrite32((ioread32(addr + reg) & ~clear) | set, addr + reg);
}
@@ -109,27 +115,56 @@ static void rswitch_top_init(struct rswitch_private *priv)
}
/* Forwarding engine block (MFWD) */
-static void rswitch_fwd_init(struct rswitch_private *priv)
+static int rswitch_fwd_init(struct rswitch_private *priv)
{
+ u32 all_ports_mask = GENMASK(RSWITCH_NUM_AGENTS - 1, 0);
unsigned int i;
-
- /* For ETHA */
- for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
- iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(i));
+ u32 reg_val;
+
+ /* Start with empty configuration */
+ for (i = 0; i < RSWITCH_NUM_AGENTS; i++) {
+ /* Disable all port features */
+ iowrite32(0, priv->addr + FWPC0(i));
+ /* Disallow L3 forwarding and direct descriptor forwarding */
+ iowrite32(FIELD_PREP(FWCP1_LTHFW, all_ports_mask),
+ priv->addr + FWPC1(i));
+ /* Disallow L2 forwarding */
+ iowrite32(FIELD_PREP(FWCP2_LTWFW, all_ports_mask),
+ priv->addr + FWPC2(i));
+ /* Disallow port based forwarding */
iowrite32(0, priv->addr + FWPBFC(i));
}
- for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ /* Configure MAC table aging */
+ rswitch_modify(priv->addr, FWMACAGUSPC, FWMACAGUSPC_MACAGUSP,
+ FIELD_PREP(FWMACAGUSPC_MACAGUSP, RSW_AGEING_CLK_PER_US));
+
+ reg_val = FIELD_PREP(FWMACAGC_MACAGT, RSW_AGEING_TIME);
+ reg_val |= FWMACAGC_MACAGE | FWMACAGC_MACAGSL;
+ iowrite32(reg_val, priv->addr + FWMACAGC);
+
+ /* For enabled ETHA ports, setup port based forwarding */
+ rswitch_for_each_enabled_port(priv, i) {
+ /* Port based forwarding from port i to GWCA port */
+ rswitch_modify(priv->addr, FWPBFC(i), FWPBFC_PBDV,
+ FIELD_PREP(FWPBFC_PBDV, BIT(priv->gwca.index)));
+ /* Within GWCA port, forward to Rx queue for port i */
iowrite32(priv->rdev[i]->rx_queue->index,
priv->addr + FWPBFCSDC(GWCA_INDEX, i));
- iowrite32(BIT(priv->gwca.index), priv->addr + FWPBFC(i));
}
- /* For GWCA */
- iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(priv->gwca.index));
- iowrite32(FWPC1_DDE, priv->addr + FWPC1(priv->gwca.index));
- iowrite32(0, priv->addr + FWPBFC(priv->gwca.index));
- iowrite32(GENMASK(RSWITCH_NUM_PORTS - 1, 0), priv->addr + FWPBFC(priv->gwca.index));
+ /* For GWCA port, allow direct descriptor forwarding */
+ rswitch_modify(priv->addr, FWPC1(priv->gwca.index), FWPC1_DDE, FWPC1_DDE);
+
+ /* Initialize hardware L2 forwarding table */
+
+ /* Allow entire table to be used for "unsecure" entries */
+ rswitch_modify(priv->addr, FWMACHEC, 0, FWMACHEC_MACHMUE_MASK);
+
+ /* Initialize MAC hash table */
+ iowrite32(FWMACTIM_MACTIOG, priv->addr + FWMACTIM);
+
+ return rswitch_reg_wait(priv->addr, FWMACTIM, FWMACTIM_MACTIOG, 0);
}
/* Gateway CPU agent block (GWCA) */
@@ -547,7 +582,6 @@ static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv)
desc = &gq->ts_ring[gq->ring_size];
desc->desc.die_dt = DT_LINKFIX;
rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
- INIT_LIST_HEAD(&priv->gwca.ts_info_list);
return 0;
}
@@ -811,7 +845,7 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
if (!skb)
goto out;
- get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
+ get_ts = rdev->priv->tstamp_rx_ctrl != HWTSTAMP_FILTER_NONE;
if (get_ts) {
struct skb_shared_hwtstamps *shhwtstamps;
struct timespec64 ts;
@@ -862,13 +896,10 @@ static void rswitch_tx_free(struct net_device *ndev)
struct rswitch_ext_desc *desc;
struct sk_buff *skb;
- for (; rswitch_get_num_cur_queues(gq) > 0;
- gq->dirty = rswitch_next_queue_index(gq, false, 1)) {
- desc = &gq->tx_ring[gq->dirty];
- if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
- break;
-
+ desc = &gq->tx_ring[gq->dirty];
+ while ((desc->desc.die_dt & DT_MASK) == DT_FEMPTY) {
dma_rmb();
+
skb = gq->skbs[gq->dirty];
if (skb) {
rdev->ndev->stats.tx_packets++;
@@ -879,7 +910,10 @@ static void rswitch_tx_free(struct net_device *ndev)
dev_kfree_skb_any(gq->skbs[gq->dirty]);
gq->skbs[gq->dirty] = NULL;
}
+
desc->desc.die_dt = DT_EEMPTY;
+ gq->dirty = rswitch_next_queue_index(gq, false, 1);
+ desc = &gq->tx_ring[gq->dirty];
}
}
@@ -908,8 +942,10 @@ retry:
if (napi_complete_done(napi, budget - quota)) {
spin_lock_irqsave(&priv->lock, flags);
- rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
- rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
+ if (test_bit(rdev->port, priv->opened_ports)) {
+ rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
+ rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
+ }
spin_unlock_irqrestore(&priv->lock, flags);
}
@@ -1001,9 +1037,10 @@ static int rswitch_gwca_request_irqs(struct rswitch_private *priv)
static void rswitch_ts(struct rswitch_private *priv)
{
struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
- struct rswitch_gwca_ts_info *ts_info, *ts_info2;
struct skb_shared_hwtstamps shhwtstamps;
struct rswitch_ts_desc *desc;
+ struct rswitch_device *rdev;
+ struct sk_buff *ts_skb;
struct timespec64 ts;
unsigned int num;
u32 tag, port;
@@ -1013,23 +1050,28 @@ static void rswitch_ts(struct rswitch_private *priv)
dma_rmb();
port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl));
- tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl));
-
- list_for_each_entry_safe(ts_info, ts_info2, &priv->gwca.ts_info_list, list) {
- if (!(ts_info->port == port && ts_info->tag == tag))
- continue;
-
- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
- ts.tv_sec = __le32_to_cpu(desc->ts_sec);
- ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
- shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
- skb_tstamp_tx(ts_info->skb, &shhwtstamps);
- dev_consume_skb_irq(ts_info->skb);
- list_del(&ts_info->list);
- kfree(ts_info);
- break;
- }
+ if (unlikely(port >= RSWITCH_NUM_PORTS))
+ goto next;
+ rdev = priv->rdev[port];
+ tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl));
+ if (unlikely(tag >= TS_TAGS_PER_PORT))
+ goto next;
+ ts_skb = xchg(&rdev->ts_skb[tag], NULL);
+ smp_mb(); /* order rdev->ts_skb[] read before bitmap update */
+ clear_bit(tag, rdev->ts_skb_used);
+
+ if (unlikely(!ts_skb))
+ goto next;
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ ts.tv_sec = __le32_to_cpu(desc->ts_sec);
+ ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
+ shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
+ skb_tstamp_tx(ts_skb, &shhwtstamps);
+ dev_consume_skb_irq(ts_skb);
+
+next:
gq->cur = rswitch_next_queue_index(gq, true, 1);
desc = &gq->ts_ring[gq->cur];
}
@@ -1114,32 +1156,47 @@ static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha)
static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac)
{
- u32 val;
+ u32 pis, lsc;
rswitch_etha_write_mac_address(etha, mac);
+ switch (etha->phy_interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ pis = MPIC_PIS_GMII;
+ break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_5GBASER:
+ pis = MPIC_PIS_XGMII;
+ break;
+ default:
+ pis = FIELD_GET(MPIC_PIS, ioread32(etha->addr + MPIC));
+ break;
+ }
+
switch (etha->speed) {
case 100:
- val = MPIC_LSC_100M;
+ lsc = MPIC_LSC_100M;
break;
case 1000:
- val = MPIC_LSC_1G;
+ lsc = MPIC_LSC_1G;
break;
case 2500:
- val = MPIC_LSC_2_5G;
+ lsc = MPIC_LSC_2_5G;
break;
default:
- return;
+ lsc = FIELD_GET(MPIC_LSC, ioread32(etha->addr + MPIC));
+ break;
}
- iowrite32(MPIC_PIS_GMII | val, etha->addr + MPIC);
+ rswitch_modify(etha->addr, MPIC, MPIC_PIS | MPIC_LSC,
+ FIELD_PREP(MPIC_PIS, pis) | FIELD_PREP(MPIC_LSC, lsc));
}
static void rswitch_etha_enable_mii(struct rswitch_etha *etha)
{
- rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
- MPIC_PSMCS(etha->psmcs) | MPIC_PSMHT(0x06));
- rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45);
+ rswitch_modify(etha->addr, MPIC, MPIC_PSMCS | MPIC_PSMHT,
+ FIELD_PREP(MPIC_PSMCS, etha->psmcs) |
+ FIELD_PREP(MPIC_PSMHT, 0x06));
}
static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac)
@@ -1168,42 +1225,29 @@ static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac)
return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION);
}
-static int rswitch_etha_set_access(struct rswitch_etha *etha, bool read,
- int phyad, int devad, int regad, int data)
+static int rswitch_etha_mpsm_op(struct rswitch_etha *etha, bool read,
+ unsigned int mmf, unsigned int pda,
+ unsigned int pra, unsigned int pop,
+ unsigned int prd)
{
- int pop = read ? MDIO_READ_C45 : MDIO_WRITE_C45;
u32 val;
int ret;
- if (devad == 0xffffffff)
- return -ENODEV;
-
- writel(MMIS1_CLEAR_FLAGS, etha->addr + MMIS1);
-
- val = MPSM_PSME | MPSM_MFF_C45;
- iowrite32((regad << 16) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
+ val = MPSM_PSME |
+ FIELD_PREP(MPSM_MFF, mmf) |
+ FIELD_PREP(MPSM_PDA, pda) |
+ FIELD_PREP(MPSM_PRA, pra) |
+ FIELD_PREP(MPSM_POP, pop) |
+ FIELD_PREP(MPSM_PRD, prd);
+ iowrite32(val, etha->addr + MPSM);
- ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
+ ret = rswitch_reg_wait(etha->addr, MPSM, MPSM_PSME, 0);
if (ret)
return ret;
- rswitch_modify(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
-
if (read) {
- writel((pop << 13) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
-
- ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
- if (ret)
- return ret;
-
- ret = (ioread32(etha->addr + MPSM) & MPSM_PRD_MASK) >> 16;
-
- rswitch_modify(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
- } else {
- iowrite32((data << 16) | (pop << 13) | (devad << 8) | (phyad << 3) | val,
- etha->addr + MPSM);
-
- ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PWACS, MMIS1_PWACS);
+ val = ioread32(etha->addr + MPSM);
+ ret = FIELD_GET(MPSM_PRD, val);
}
return ret;
@@ -1213,16 +1257,47 @@ static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad,
int regad)
{
struct rswitch_etha *etha = bus->priv;
+ int ret;
- return rswitch_etha_set_access(etha, true, addr, devad, regad, 0);
+ ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
+ MPSM_POP_ADDRESS, regad);
+ if (ret)
+ return ret;
+
+ return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C45, addr, devad,
+ MPSM_POP_READ_C45, 0);
}
static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad,
int regad, u16 val)
{
struct rswitch_etha *etha = bus->priv;
+ int ret;
+
+ ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
+ MPSM_POP_ADDRESS, regad);
+ if (ret)
+ return ret;
+
+ return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
+ MPSM_POP_WRITE, val);
+}
+
+static int rswitch_etha_mii_read_c22(struct mii_bus *bus, int phyad, int regad)
+{
+ struct rswitch_etha *etha = bus->priv;
+
+ return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C22, phyad, regad,
+ MPSM_POP_READ_C22, 0);
+}
+
+static int rswitch_etha_mii_write_c22(struct mii_bus *bus, int phyad,
+ int regad, u16 val)
+{
+ struct rswitch_etha *etha = bus->priv;
- return rswitch_etha_set_access(etha, false, addr, devad, regad, val);
+ return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C22, phyad, regad,
+ MPSM_POP_WRITE, val);
}
/* Call of_node_put(port) after done */
@@ -1237,17 +1312,14 @@ static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev)
if (!ports)
return NULL;
- for_each_child_of_node(ports, port) {
+ for_each_available_child_of_node(ports, port) {
err = of_property_read_u32(port, "reg", &index);
if (err < 0) {
port = NULL;
goto out;
}
- if (index == rdev->etha->index) {
- if (!of_device_is_available(port))
- port = NULL;
+ if (index == rdev->etha->index)
break;
- }
}
out:
@@ -1307,6 +1379,8 @@ static int rswitch_mii_register(struct rswitch_device *rdev)
mii_bus->priv = rdev->etha;
mii_bus->read_c45 = rswitch_etha_mii_read_c45;
mii_bus->write_c45 = rswitch_etha_mii_write_c45;
+ mii_bus->read = rswitch_etha_mii_read_c22;
+ mii_bus->write = rswitch_etha_mii_write_c22;
mii_bus->parent = &rdev->priv->pdev->dev;
mdio_np = of_get_child_by_name(rdev->np_port, "mdio");
@@ -1527,7 +1601,7 @@ static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
{
unsigned int i;
- for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ rswitch_for_each_enabled_port(priv, i) {
phy_exit(priv->rdev[i]->serdes);
rswitch_ether_port_deinit_one(priv->rdev[i]);
}
@@ -1538,54 +1612,61 @@ static int rswitch_open(struct net_device *ndev)
struct rswitch_device *rdev = netdev_priv(ndev);
unsigned long flags;
- phy_start(ndev->phydev);
+ if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
+ iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
napi_enable(&rdev->napi);
- netif_start_queue(ndev);
spin_lock_irqsave(&rdev->priv->lock, flags);
+ bitmap_set(rdev->priv->opened_ports, rdev->port, 1);
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
spin_unlock_irqrestore(&rdev->priv->lock, flags);
- if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
- iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
+ phy_start(ndev->phydev);
- bitmap_set(rdev->priv->opened_ports, rdev->port, 1);
+ netif_start_queue(ndev);
+
+ if (rdev->brdev)
+ rswitch_update_l2_offload(rdev->priv);
return 0;
-};
+}
static int rswitch_stop(struct net_device *ndev)
{
struct rswitch_device *rdev = netdev_priv(ndev);
- struct rswitch_gwca_ts_info *ts_info, *ts_info2;
+ struct sk_buff *ts_skb;
unsigned long flags;
+ unsigned int tag;
netif_tx_stop_all_queues(ndev);
- bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
- if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
- iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
-
- list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) {
- if (ts_info->port != rdev->port)
- continue;
- dev_kfree_skb_irq(ts_info->skb);
- list_del(&ts_info->list);
- kfree(ts_info);
- }
+ phy_stop(ndev->phydev);
spin_lock_irqsave(&rdev->priv->lock, flags);
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
+ bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
spin_unlock_irqrestore(&rdev->priv->lock, flags);
- phy_stop(ndev->phydev);
napi_disable(&rdev->napi);
+ if (rdev->brdev)
+ rswitch_update_l2_offload(rdev->priv);
+
+ if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
+ iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
+
+ for_each_set_bit(tag, rdev->ts_skb_used, TS_TAGS_PER_PORT) {
+ ts_skb = xchg(&rdev->ts_skb[tag], NULL);
+ clear_bit(tag, rdev->ts_skb_used);
+ if (ts_skb)
+ dev_kfree_skb(ts_skb);
+ }
+
return 0;
-};
+}
static bool rswitch_ext_desc_set_info1(struct rswitch_device *rdev,
struct sk_buff *skb,
@@ -1594,20 +1675,17 @@ static bool rswitch_ext_desc_set_info1(struct rswitch_device *rdev,
desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) |
INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT);
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
- struct rswitch_gwca_ts_info *ts_info;
+ unsigned int tag;
- ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC);
- if (!ts_info)
+ tag = find_first_zero_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT);
+ if (tag == TS_TAGS_PER_PORT)
return false;
+ smp_mb(); /* order bitmap read before rdev->ts_skb[] write */
+ rdev->ts_skb[tag] = skb_get(skb);
+ set_bit(tag, rdev->ts_skb_used);
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- rdev->ts_tag++;
- desc->info1 |= cpu_to_le64(INFO1_TSUN(rdev->ts_tag) | INFO1_TXC);
-
- ts_info->skb = skb_get(skb);
- ts_info->port = rdev->port;
- ts_info->tag = rdev->ts_tag;
- list_add_tail(&ts_info->list, &rdev->priv->gwca.ts_info_list);
+ desc->info1 |= cpu_to_le64(INFO1_TSUN(tag) | INFO1_TXC);
skb_tx_timestamp(skb);
}
@@ -1681,8 +1759,11 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
if (dma_mapping_error(ndev->dev.parent, dma_addr_orig))
goto err_kfree;
- gq->skbs[gq->cur] = skb;
- gq->unmap_addrs[gq->cur] = dma_addr_orig;
+ /* Stored the skb at the last descriptor to avoid skb free before hardware completes send */
+ gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = skb;
+ gq->unmap_addrs[(gq->cur + nr_desc - 1) % gq->ring_size] = dma_addr_orig;
+
+ dma_wmb();
/* DT_FSTART should be set at last. So, this is reverse order. */
for (i = nr_desc; i-- > 0; ) {
@@ -1694,14 +1775,13 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
goto err_unmap;
}
- wmb(); /* gq->cur must be incremented after die_dt was set */
-
gq->cur = rswitch_next_queue_index(gq, true, nr_desc);
rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
return ret;
err_unmap:
+ gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = NULL;
dma_unmap_single(ndev->dev.parent, dma_addr_orig, skb->len, DMA_TO_DEVICE);
err_kfree:
@@ -1715,88 +1795,77 @@ static struct net_device_stats *rswitch_get_stats(struct net_device *ndev)
return &ndev->stats;
}
-static int rswitch_hwstamp_get(struct net_device *ndev, struct ifreq *req)
+static int rswitch_hwstamp_get(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config)
{
struct rswitch_device *rdev = netdev_priv(ndev);
- struct rcar_gen4_ptp_private *ptp_priv;
- struct hwtstamp_config config;
-
- ptp_priv = rdev->priv->ptp_priv;
+ struct rswitch_private *priv = rdev->priv;
- config.flags = 0;
- config.tx_type = ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
- HWTSTAMP_TX_OFF;
- switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) {
- case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT:
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
- break;
- case RCAR_GEN4_RXTSTAMP_TYPE_ALL:
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- break;
- default:
- config.rx_filter = HWTSTAMP_FILTER_NONE;
- break;
- }
+ config->flags = 0;
+ config->tx_type = priv->tstamp_tx_ctrl;
+ config->rx_filter = priv->tstamp_rx_ctrl;
- return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
+ return 0;
}
-static int rswitch_hwstamp_set(struct net_device *ndev, struct ifreq *req)
+static int rswitch_hwstamp_set(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct rswitch_device *rdev = netdev_priv(ndev);
- u32 tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED;
- struct hwtstamp_config config;
- u32 tstamp_tx_ctrl;
-
- if (copy_from_user(&config, req->ifr_data, sizeof(config)))
- return -EFAULT;
+ enum hwtstamp_rx_filters tstamp_rx_ctrl;
+ enum hwtstamp_tx_types tstamp_tx_ctrl;
- if (config.flags)
+ if (config->flags)
return -EINVAL;
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
- tstamp_tx_ctrl = 0;
- break;
case HWTSTAMP_TX_ON:
- tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED;
+ tstamp_tx_ctrl = config->tx_type;
break;
default:
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
- tstamp_rx_ctrl = 0;
- break;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
+ tstamp_rx_ctrl = config->rx_filter;
break;
default:
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ tstamp_rx_ctrl = HWTSTAMP_FILTER_ALL;
break;
}
- rdev->priv->ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
- rdev->priv->ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
+ rdev->priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
+ rdev->priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
- return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
+ return 0;
}
-static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
+static int rswitch_get_port_parent_id(struct net_device *ndev,
+ struct netdev_phys_item_id *ppid)
{
- if (!netif_running(ndev))
- return -EINVAL;
+ struct rswitch_device *rdev = netdev_priv(ndev);
+ const char *name;
- switch (cmd) {
- case SIOCGHWTSTAMP:
- return rswitch_hwstamp_get(ndev, req);
- case SIOCSHWTSTAMP:
- return rswitch_hwstamp_set(ndev, req);
- default:
- return phy_mii_ioctl(ndev->phydev, req, cmd);
- }
+ name = dev_name(&rdev->priv->pdev->dev);
+ ppid->id_len = min_t(size_t, strlen(name), sizeof(ppid->id));
+ memcpy(ppid->id, name, ppid->id_len);
+
+ return 0;
+}
+
+static int rswitch_get_phys_port_name(struct net_device *ndev,
+ char *name, size_t len)
+{
+ struct rswitch_device *rdev = netdev_priv(ndev);
+
+ snprintf(name, len, "tsn%d", rdev->port);
+
+ return 0;
}
static const struct net_device_ops rswitch_netdev_ops = {
@@ -1804,11 +1873,20 @@ static const struct net_device_ops rswitch_netdev_ops = {
.ndo_stop = rswitch_stop,
.ndo_start_xmit = rswitch_start_xmit,
.ndo_get_stats = rswitch_get_stats,
- .ndo_eth_ioctl = rswitch_eth_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
+ .ndo_get_port_parent_id = rswitch_get_port_parent_id,
+ .ndo_get_phys_port_name = rswitch_get_phys_port_name,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
+ .ndo_hwtstamp_get = rswitch_hwstamp_get,
+ .ndo_hwtstamp_set = rswitch_hwstamp_set,
};
+bool is_rdev(const struct net_device *ndev)
+{
+ return (ndev->netdev_ops == &rswitch_netdev_ops);
+}
+
static int rswitch_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info)
{
struct rswitch_device *rdev = netdev_priv(ndev);
@@ -1889,7 +1967,6 @@ static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index
rdev->np_port = rswitch_get_port_node(rdev);
rdev->disabled = !rdev->np_port;
err = of_get_ethdev_address(rdev->np_port, ndev);
- of_node_put(rdev->np_port);
if (err) {
if (is_valid_ether_addr(rdev->etha->mac_addr))
eth_hw_addr_set(ndev, rdev->etha->mac_addr);
@@ -1901,9 +1978,6 @@ static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index
if (err < 0)
goto out_get_params;
- if (rdev->priv->gwca.speed < rdev->etha->speed)
- rdev->priv->gwca.speed = rdev->etha->speed;
-
err = rswitch_rxdmac_alloc(ndev);
if (err < 0)
goto out_rxdmac;
@@ -1912,6 +1986,8 @@ static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index
if (err < 0)
goto out_txdmac;
+ list_add_tail(&rdev->list, &priv->port_list);
+
return 0;
out_txdmac:
@@ -1919,6 +1995,7 @@ out_txdmac:
out_rxdmac:
out_get_params:
+ of_node_put(rdev->np_port);
netif_napi_del(&rdev->napi);
free_netdev(ndev);
@@ -1930,8 +2007,10 @@ static void rswitch_device_free(struct rswitch_private *priv, unsigned int index
struct rswitch_device *rdev = priv->rdev[index];
struct net_device *ndev = rdev->ndev;
+ list_del(&rdev->list);
rswitch_txdmac_free(ndev);
rswitch_rxdmac_free(ndev);
+ of_node_put(rdev->np_port);
netif_napi_del(&rdev->napi);
free_netdev(ndev);
}
@@ -1975,10 +2054,11 @@ static int rswitch_init(struct rswitch_private *priv)
}
}
- rswitch_fwd_init(priv);
+ err = rswitch_fwd_init(priv);
+ if (err < 0)
+ goto err_fwd_init;
- err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT,
- clk_get_rate(priv->clk));
+ err = rcar_gen4_ptp_register(priv->ptp_priv, clk_get_rate(priv->clk));
if (err < 0)
goto err_ptp_register;
@@ -2024,6 +2104,7 @@ err_gwca_ts_request_irq:
err_gwca_request_irq:
rcar_gen4_ptp_unregister(priv->ptp_priv);
+err_fwd_init:
err_ptp_register:
for (i = 0; i < RSWITCH_NUM_PORTS; i++)
rswitch_device_free(priv, i);
@@ -2058,6 +2139,7 @@ static int renesas_eth_sw_probe(struct platform_device *pdev)
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+
spin_lock_init(&priv->lock);
priv->clk = devm_clk_get(&pdev->dev, NULL);
@@ -2078,7 +2160,7 @@ static int renesas_eth_sw_probe(struct platform_device *pdev)
if (IS_ERR(priv->addr))
return PTR_ERR(priv->addr);
- priv->ptp_priv->addr = priv->addr + RCAR_GEN4_GPTP_OFFSET_S4;
+ priv->ptp_priv->addr = priv->addr + RSWITCH_GPTP_OFFSET_S4;
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
if (ret < 0) {
@@ -2095,6 +2177,8 @@ static int renesas_eth_sw_probe(struct platform_device *pdev)
if (!priv->gwca.queues)
return -ENOMEM;
+ INIT_LIST_HEAD(&priv->port_list);
+
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
@@ -2105,6 +2189,15 @@ static int renesas_eth_sw_probe(struct platform_device *pdev)
return ret;
}
+ if (list_empty(&priv->port_list))
+ dev_warn(&pdev->dev, "could not initialize any ports\n");
+
+ ret = rswitch_register_notifiers();
+ if (ret) {
+ dev_err(&pdev->dev, "could not register notifiers\n");
+ return ret;
+ }
+
device_set_wakeup_capable(&pdev->dev, 1);
return ret;
@@ -2138,6 +2231,7 @@ static void renesas_eth_sw_remove(struct platform_device *pdev)
{
struct rswitch_private *priv = platform_get_drvdata(pdev);
+ rswitch_unregister_notifiers();
rswitch_deinit(priv);
pm_runtime_put(&pdev->dev);
diff --git a/drivers/net/ethernet/renesas/rtsn.c b/drivers/net/ethernet/renesas/rtsn.c
index 6b3f7fca8d15..fdb1e7b7fb06 100644
--- a/drivers/net/ethernet/renesas/rtsn.c
+++ b/drivers/net/ethernet/renesas/rtsn.c
@@ -62,6 +62,9 @@ struct rtsn_private {
int tx_data_irq;
int rx_data_irq;
+
+ u32 tstamp_tx_ctrl;
+ u32 tstamp_rx_ctrl;
};
static u32 rtsn_read(struct rtsn_private *priv, enum rtsn_reg reg)
@@ -162,8 +165,7 @@ static int rtsn_rx(struct net_device *ndev, int budget)
unsigned int i;
bool get_ts;
- get_ts = priv->ptp_priv->tstamp_rx_ctrl &
- RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
+ get_ts = priv->tstamp_rx_ctrl != HWTSTAMP_FILTER_NONE;
ndescriptors = priv->dirty_rx + priv->num_rx_ring - priv->cur_rx;
rx_packets = 0;
@@ -1122,31 +1124,16 @@ static int rtsn_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
static int rtsn_hwtstamp_get(struct net_device *ndev,
struct kernel_hwtstamp_config *config)
{
- struct rcar_gen4_ptp_private *ptp_priv;
struct rtsn_private *priv;
if (!netif_running(ndev))
return -ENODEV;
priv = netdev_priv(ndev);
- ptp_priv = priv->ptp_priv;
config->flags = 0;
-
- config->tx_type =
- ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
-
- switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) {
- case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT:
- config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
- break;
- case RCAR_GEN4_RXTSTAMP_TYPE_ALL:
- config->rx_filter = HWTSTAMP_FILTER_ALL;
- break;
- default:
- config->rx_filter = HWTSTAMP_FILTER_NONE;
- break;
- }
+ config->tx_type = priv->tstamp_tx_ctrl;
+ config->rx_filter = priv->tstamp_rx_ctrl;
return 0;
}
@@ -1155,26 +1142,22 @@ static int rtsn_hwtstamp_set(struct net_device *ndev,
struct kernel_hwtstamp_config *config,
struct netlink_ext_ack *extack)
{
- struct rcar_gen4_ptp_private *ptp_priv;
+ enum hwtstamp_rx_filters tstamp_rx_ctrl;
+ enum hwtstamp_tx_types tstamp_tx_ctrl;
struct rtsn_private *priv;
- u32 tstamp_rx_ctrl;
- u32 tstamp_tx_ctrl;
if (!netif_running(ndev))
return -ENODEV;
priv = netdev_priv(ndev);
- ptp_priv = priv->ptp_priv;
if (config->flags)
return -EINVAL;
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
- tstamp_tx_ctrl = 0;
- break;
case HWTSTAMP_TX_ON:
- tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED;
+ tstamp_tx_ctrl = config->tx_type;
break;
default:
return -ERANGE;
@@ -1182,21 +1165,17 @@ static int rtsn_hwtstamp_set(struct net_device *ndev,
switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
- tstamp_rx_ctrl = 0;
- break;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED |
- RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
+ tstamp_rx_ctrl = config->rx_filter;
break;
default:
config->rx_filter = HWTSTAMP_FILTER_ALL;
- tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED |
- RCAR_GEN4_RXTSTAMP_TYPE_ALL;
+ tstamp_rx_ctrl = HWTSTAMP_FILTER_ALL;
break;
}
- ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
- ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
+ priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
+ priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
return 0;
}
@@ -1259,7 +1238,12 @@ static int rtsn_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
priv->pdev = pdev;
priv->ndev = ndev;
+
priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
+ if (!priv->ptp_priv) {
+ ret = -ENOMEM;
+ goto error_free;
+ }
spin_lock_init(&priv->lock);
platform_set_drvdata(pdev, priv);
@@ -1325,8 +1309,7 @@ static int rtsn_probe(struct platform_device *pdev)
device_set_wakeup_capable(&pdev->dev, 1);
- ret = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT,
- clk_get_rate(priv->clk));
+ ret = rcar_gen4_ptp_register(priv->ptp_priv, clk_get_rate(priv->clk));
if (ret)
goto error_pm;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 8887b8921009..6fb0ffc1c844 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2233,7 +2233,7 @@ static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
pm_runtime_get_sync(&mdp->pdev->dev);
__sh_eth_get_regs(ndev, buf);
- pm_runtime_put_sync(&mdp->pdev->dev);
+ pm_runtime_put(&mdp->pdev->dev);
}
static u32 sh_eth_get_msglevel(struct net_device *ndev)
@@ -2360,6 +2360,7 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -2386,6 +2387,7 @@ static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
return 0;
}
+#endif
static const struct ethtool_ops sh_eth_ethtool_ops = {
.get_regs_len = sh_eth_get_regs_len,
@@ -2401,8 +2403,10 @@ static const struct ethtool_ops sh_eth_ethtool_ops = {
.set_ringparam = sh_eth_set_ringparam,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+#ifdef CONFIG_PM_SLEEP
.get_wol = sh_eth_get_wol,
.set_wol = sh_eth_set_wol,
+#endif
};
/* network device open function */
@@ -2447,7 +2451,7 @@ out_free_irq:
free_irq(ndev->irq, ndev);
out_napi_off:
napi_disable(&mdp->napi);
- pm_runtime_put_sync(&mdp->pdev->dev);
+ pm_runtime_put(&mdp->pdev->dev);
return ret;
}
@@ -3443,8 +3447,6 @@ static void sh_eth_drv_remove(struct platform_device *pdev)
free_netdev(ndev);
}
-#ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
static int sh_eth_wol_setup(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -3494,10 +3496,12 @@ static int sh_eth_suspend(struct device *dev)
netif_device_detach(ndev);
+ rtnl_lock();
if (mdp->wol_enabled)
ret = sh_eth_wol_setup(ndev);
else
ret = sh_eth_close(ndev);
+ rtnl_unlock();
return ret;
}
@@ -3511,10 +3515,12 @@ static int sh_eth_resume(struct device *dev)
if (!netif_running(ndev))
return 0;
+ rtnl_lock();
if (mdp->wol_enabled)
ret = sh_eth_wol_restore(ndev);
else
ret = sh_eth_open(ndev);
+ rtnl_unlock();
if (ret < 0)
return ret;
@@ -3523,28 +3529,8 @@ static int sh_eth_resume(struct device *dev)
return ret;
}
-#endif
-
-static int sh_eth_runtime_nop(struct device *dev)
-{
- /* Runtime PM callback shared between ->runtime_suspend()
- * and ->runtime_resume(). Simply returns success.
- *
- * This driver re-initializes all registers after
- * pm_runtime_get_sync() anyway so there is no need
- * to save and restore registers here.
- */
- return 0;
-}
-static const struct dev_pm_ops sh_eth_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(sh_eth_suspend, sh_eth_resume)
- SET_RUNTIME_PM_OPS(sh_eth_runtime_nop, sh_eth_runtime_nop, NULL)
-};
-#define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
-#else
-#define SH_ETH_PM_OPS NULL
-#endif
+static DEFINE_SIMPLE_DEV_PM_OPS(sh_eth_dev_pm_ops, sh_eth_suspend, sh_eth_resume);
static const struct platform_device_id sh_eth_id_table[] = {
{ "sh7619-ether", (kernel_ulong_t)&sh7619_data },
@@ -3564,7 +3550,7 @@ static struct platform_driver sh_eth_driver = {
.id_table = sh_eth_id_table,
.driver = {
.name = CARDNAME,
- .pm = SH_ETH_PM_OPS,
+ .pm = pm_sleep_ptr(&sh_eth_dev_pm_ops),
.of_match_table = of_match_ptr(sh_eth_match_table),
},
};
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 84fa911c78db..36af94a2e062 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2502,7 +2502,7 @@ static void rocker_carrier_init(const struct rocker_port *rocker_port)
u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
bool link_up;
- link_up = link_status & (1 << rocker_port->pport);
+ link_up = link_status & (1ULL << rocker_port->pport);
if (link_up)
netif_carrier_on(rocker_port->dev);
else
@@ -2576,7 +2576,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
rocker_carrier_init(rocker_port);
dev->features |= NETIF_F_SG;
- dev->netns_local = true;
+ dev->netns_immutable = true;
/* MTU range: 68 - 9000 */
dev->min_mtu = ROCKER_PORT_MIN_MTU;
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 826990459fa4..61e50517c05b 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1933,7 +1933,7 @@ static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port,
spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
/* Check if adding and already exists, or removing and can't find */
- if (!found != !removing) {
+ if (!found == removing) {
kfree(fdb);
if (!found && removing)
return 0;
@@ -1982,7 +1982,7 @@ err_out:
static void ofdpa_fdb_cleanup(struct timer_list *t)
{
- struct ofdpa *ofdpa = from_timer(ofdpa, t, fdb_cleanup_timer);
+ struct ofdpa *ofdpa = timer_container_of(ofdpa, t, fdb_cleanup_timer);
struct ofdpa_port *ofdpa_port;
struct ofdpa_fdb_tbl_entry *entry;
struct hlist_node *tmp;
@@ -2386,7 +2386,7 @@ static void ofdpa_fini(struct rocker *rocker)
struct hlist_node *tmp;
int bkt;
- del_timer_sync(&ofdpa->fdb_cleanup_timer);
+ timer_delete_sync(&ofdpa->fdb_cleanup_timer);
flush_workqueue(rocker->rocker_owq);
spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index 4a439b34114d..ad73733644f9 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -308,8 +308,8 @@ static int sxgbe_set_coalesce(struct net_device *dev,
return 0;
}
-static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv,
- struct ethtool_rxnfc *cmd)
+static int sxgbe_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
cmd->data = 0;
@@ -344,26 +344,11 @@ static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv,
return 0;
}
-static int sxgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
+static int sxgbe_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_GRXFH:
- ret = sxgbe_get_rss_hash_opts(priv, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
-static int sxgbe_set_rss_hash_opt(struct sxgbe_priv_data *priv,
- struct ethtool_rxnfc *cmd)
-{
u32 reg_val = 0;
/* RSS does not support anything other than hashing
@@ -421,22 +406,6 @@ static int sxgbe_set_rss_hash_opt(struct sxgbe_priv_data *priv,
return 0;
}
-static int sxgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
-{
- struct sxgbe_priv_data *priv = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = sxgbe_set_rss_hash_opt(priv, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static void sxgbe_get_regs(struct net_device *dev,
struct ethtool_regs *regs, void *space)
{
@@ -489,8 +458,8 @@ static const struct ethtool_ops sxgbe_ethtool_ops = {
.get_channels = sxgbe_get_channels,
.get_coalesce = sxgbe_get_coalesce,
.set_coalesce = sxgbe_set_coalesce,
- .get_rxnfc = sxgbe_get_rxnfc,
- .set_rxnfc = sxgbe_set_rxnfc,
+ .get_rxfh_fields = sxgbe_get_rxfh_fields,
+ .set_rxfh_fields = sxgbe_set_rxfh_fields,
.get_regs = sxgbe_get_regs,
.get_regs_len = sxgbe_get_regs_len,
.get_eee = sxgbe_get_eee,
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 12c8396b6942..849c5a6c2af1 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -91,7 +91,7 @@ void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv)
{
/* Exit and disable EEE in case of we are in LPI state. */
priv->hw->mac->reset_eee_mode(priv->ioaddr);
- del_timer_sync(&priv->eee_ctrl_timer);
+ timer_delete_sync(&priv->eee_ctrl_timer);
priv->tx_path_in_lpi_mode = false;
}
@@ -104,7 +104,8 @@ void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv)
*/
static void sxgbe_eee_ctrl_timer(struct timer_list *t)
{
- struct sxgbe_priv_data *priv = from_timer(priv, t, eee_ctrl_timer);
+ struct sxgbe_priv_data *priv = timer_container_of(priv, t,
+ eee_ctrl_timer);
sxgbe_enable_eee_mode(priv);
mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
@@ -1012,7 +1013,7 @@ static void sxgbe_disable_mtl_engine(struct sxgbe_priv_data *priv)
*/
static void sxgbe_tx_timer(struct timer_list *t)
{
- struct sxgbe_tx_queue *p = from_timer(p, t, txtimer);
+ struct sxgbe_tx_queue *p = timer_container_of(p, t, txtimer);
sxgbe_tx_queue_clean(p);
}
@@ -1044,7 +1045,7 @@ static void sxgbe_tx_del_timer(struct sxgbe_priv_data *priv)
SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
struct sxgbe_tx_queue *p = priv->txq[queue_num];
- del_timer_sync(&p->txtimer);
+ timer_delete_sync(&p->txtimer);
}
}
@@ -1208,7 +1209,7 @@ static int sxgbe_release(struct net_device *dev)
struct sxgbe_priv_data *priv = netdev_priv(dev);
if (priv->eee_enabled)
- del_timer_sync(&priv->eee_ctrl_timer);
+ timer_delete_sync(&priv->eee_ctrl_timer);
/* Stop and disconnect the PHY */
if (dev->phydev) {
@@ -1520,8 +1521,10 @@ static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit)
skb = priv->rxq[qnum]->rx_skbuff[entry];
- if (unlikely(!skb))
+ if (unlikely(!skb)) {
netdev_err(priv->dev, "rx descriptor is not consistent\n");
+ break;
+ }
prefetch(skb->data - NET_IP_ALIGN);
priv->rxq[qnum]->rx_skbuff[entry] = NULL;
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index 9319a2675e7b..20dad39b5ab9 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -170,7 +170,7 @@ ether3_setbuffer(struct net_device *dev, buffer_rw_t read, int start)
*/
static void ether3_ledoff(struct timer_list *t)
{
- struct dev_priv *private = from_timer(private, t, timer);
+ struct dev_priv *private = timer_container_of(private, t, timer);
struct net_device *dev = private->dev;
ether3_outw(priv(dev)->regs.config2 |= CFG2_CTRLO, REG_CONFIG2);
@@ -181,7 +181,7 @@ static void ether3_ledoff(struct timer_list *t)
*/
static inline void ether3_ledon(struct net_device *dev)
{
- del_timer(&priv(dev)->timer);
+ timer_delete(&priv(dev)->timer);
priv(dev)->timer.expires = jiffies + HZ / 50; /* leave on for 1/50th second */
add_timer(&priv(dev)->timer);
if (priv(dev)->regs.config2 & CFG2_CTRLO)
@@ -454,7 +454,7 @@ static void ether3_timeout(struct net_device *dev, unsigned int txqueue)
{
unsigned long flags;
- del_timer(&priv(dev)->timer);
+ timer_delete(&priv(dev)->timer);
local_irq_save(flags);
printk(KERN_ERR "%s: transmit timed out, network cable problem?\n", dev->name);
@@ -851,7 +851,7 @@ static void ether3_remove(struct expansion_card *ec)
ecard_set_drvdata(ec, NULL);
unregister_netdev(dev);
- del_timer_sync(&priv(dev)->timer);
+ timer_delete_sync(&priv(dev)->timer);
free_netdev(dev);
ecard_release_resources(ec);
}
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 3eb55dcfa8a6..c4c43434f314 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -38,8 +38,9 @@ config SFC_MTD
default y
help
This exposes the on-board flash and/or EEPROM as MTD devices
- (e.g. /dev/mtd1). This is required to update the firmware or
- the boot configuration under Linux.
+ (e.g. /dev/mtd1). This is required to update the boot
+ configuration under Linux, or use some older userland tools to
+ update the firmware.
config SFC_MCDI_MON
bool "Solarflare SFC9100-family hwmon support"
depends on SFC && HWMON && !(SFC=y && HWMON=m)
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 8f446b9bd5ee..d99039ec468d 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -7,7 +7,7 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \
mcdi_functions.o mcdi_filters.o mcdi_mon.o \
ef100.o ef100_nic.o ef100_netdev.o \
ef100_ethtool.o ef100_rx.o ef100_tx.o \
- efx_devlink.o
+ efx_devlink.o efx_reflash.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o \
mae.o tc.o tc_bindings.o tc_counters.o \
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index de131fc5fa0b..fcec81f862ec 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1751,7 +1751,7 @@ static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
#endif
}
-static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
+static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 **names)
{
DECLARE_BITMAP(mask, EF10_STAT_COUNT);
@@ -3501,7 +3501,7 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
const struct efx_ef10_nvram_type_info *info;
- size_t size, erase_size, outlen;
+ size_t size, erase_size, write_size, outlen;
int type_idx = 0;
bool protected;
int rc;
@@ -3516,7 +3516,8 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
if (info->port != efx_port_num(efx))
return -ENODEV;
- rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
+ rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &write_size,
+ &protected);
if (rc)
return rc;
if (protected &&
@@ -3561,6 +3562,8 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
if (!erase_size)
part->common.mtd.flags |= MTD_NO_ERASE;
+ part->common.mtd.writesize = write_size;
+
return 0;
}
@@ -3982,7 +3985,6 @@ static int efx_ef10_udp_tnl_unset_port(struct net_device *dev,
static const struct udp_tunnel_nic_info efx_ef10_udp_tunnels = {
.set_port = efx_ef10_udp_tnl_set_port,
.unset_port = efx_ef10_udp_tnl_unset_port,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
.tables = {
{
.n_entries = 16,
@@ -4416,6 +4418,7 @@ const struct efx_nic_type efx_x4_nic_type = {
.can_rx_scatter = true,
.always_rx_scatter = true,
.option_descriptors = true,
+ .flash_auto_partition = true,
.min_interrupt_mode = EFX_INT_MODE_MSIX,
.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
.offload_features = EF10_OFFLOAD_FEATURES,
diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c
index 5c2551369812..6c3b74000d3b 100644
--- a/drivers/net/ethernet/sfc/ef100_ethtool.c
+++ b/drivers/net/ethernet/sfc/ef100_ethtool.c
@@ -59,6 +59,7 @@ const struct ethtool_ops ef100_ethtool_ops = {
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
.get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
.rxfh_per_ctx_key = true,
+ .cap_rss_rxnfc_adds = true,
.rxfh_priv_size = sizeof(struct efx_rss_context_priv),
.get_rxfh = efx_ethtool_get_rxfh,
.set_rxfh = efx_ethtool_set_rxfh,
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c
index 7f7d560cb2b4..3a06e3b1bd6b 100644
--- a/drivers/net/ethernet/sfc/ef100_netdev.c
+++ b/drivers/net/ethernet/sfc/ef100_netdev.c
@@ -450,9 +450,9 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data)
net_dev->hw_enc_features |= efx->type->offload_features;
net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_ALL_TSO;
- netif_set_tso_max_segs(net_dev,
- ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT);
- efx->mdio.dev = net_dev;
+ nic_data = efx->nic_data;
+ netif_set_tso_max_size(efx->net_dev, nic_data->tso_max_payload_len);
+ netif_set_tso_max_segs(efx->net_dev, nic_data->tso_max_payload_num_segs);
rc = efx_ef100_init_datapath_caps(efx);
if (rc < 0)
@@ -478,7 +478,6 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data)
/* Don't fail init if RSS setup doesn't work. */
efx_mcdi_push_default_indir_table(efx, efx->n_rx_channels);
- nic_data = efx->nic_data;
rc = ef100_get_mac_address(efx, net_dev->perm_addr, CLIENT_HANDLE_SELF,
efx->type->is_vf);
if (rc)
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index 6da06931187d..3ad95a4c8af2 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -583,7 +583,7 @@ static const struct efx_hw_stat_desc ef100_stat_desc[EF100_STAT_COUNT] = {
EFX_GENERIC_SW_STAT(rx_noskb_drops),
};
-static size_t ef100_describe_stats(struct efx_nic *efx, u8 *names)
+static size_t ef100_describe_stats(struct efx_nic *efx, u8 **names)
{
DECLARE_BITMAP(mask, EF100_STAT_COUNT) = {};
@@ -887,8 +887,7 @@ static int ef100_process_design_param(struct efx_nic *efx,
case ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS:
/* We always put HDR_NUM_SEGS=1 in our TSO descriptors */
if (!reader->value) {
- netif_err(efx, probe, efx->net_dev,
- "TSO_MAX_HDR_NUM_SEGS < 1\n");
+ pci_err(efx->pci_dev, "TSO_MAX_HDR_NUM_SEGS < 1\n");
return -EOPNOTSUPP;
}
return 0;
@@ -901,32 +900,28 @@ static int ef100_process_design_param(struct efx_nic *efx,
*/
if (!reader->value || reader->value > EFX_MIN_DMAQ_SIZE ||
EFX_MIN_DMAQ_SIZE % (u32)reader->value) {
- netif_err(efx, probe, efx->net_dev,
- "%s size granularity is %llu, can't guarantee safety\n",
- reader->type == ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY ? "RXQ" : "TXQ",
- reader->value);
+ pci_err(efx->pci_dev,
+ "%s size granularity is %llu, can't guarantee safety\n",
+ reader->type == ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY ? "RXQ" : "TXQ",
+ reader->value);
return -EOPNOTSUPP;
}
return 0;
case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN:
nic_data->tso_max_payload_len = min_t(u64, reader->value,
GSO_LEGACY_MAX_SIZE);
- netif_set_tso_max_size(efx->net_dev,
- nic_data->tso_max_payload_len);
return 0;
case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS:
nic_data->tso_max_payload_num_segs = min_t(u64, reader->value, 0xffff);
- netif_set_tso_max_segs(efx->net_dev,
- nic_data->tso_max_payload_num_segs);
return 0;
case ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES:
nic_data->tso_max_frames = min_t(u64, reader->value, 0xffff);
return 0;
case ESE_EF100_DP_GZ_COMPAT:
if (reader->value) {
- netif_err(efx, probe, efx->net_dev,
- "DP_COMPAT has unknown bits %#llx, driver not compatible with this hw\n",
- reader->value);
+ pci_err(efx->pci_dev,
+ "DP_COMPAT has unknown bits %#llx, driver not compatible with this hw\n",
+ reader->value);
return -EOPNOTSUPP;
}
return 0;
@@ -946,10 +941,10 @@ static int ef100_process_design_param(struct efx_nic *efx,
* So the value of this shouldn't matter.
*/
if (reader->value != ESE_EF100_DP_GZ_VI_STRIDES_DEFAULT)
- netif_dbg(efx, probe, efx->net_dev,
- "NIC has other than default VI_STRIDES (mask "
- "%#llx), early probing might use wrong one\n",
- reader->value);
+ pci_dbg(efx->pci_dev,
+ "NIC has other than default VI_STRIDES (mask "
+ "%#llx), early probing might use wrong one\n",
+ reader->value);
return 0;
case ESE_EF100_DP_GZ_RX_MAX_RUNT:
/* Driver doesn't look at L2_STATUS:LEN_ERR bit, so we don't
@@ -961,9 +956,9 @@ static int ef100_process_design_param(struct efx_nic *efx,
/* Host interface says "Drivers should ignore design parameters
* that they do not recognise."
*/
- netif_dbg(efx, probe, efx->net_dev,
- "Ignoring unrecognised design parameter %u\n",
- reader->type);
+ pci_dbg(efx->pci_dev,
+ "Ignoring unrecognised design parameter %u\n",
+ reader->type);
return 0;
}
}
@@ -999,13 +994,13 @@ static int ef100_check_design_params(struct efx_nic *efx)
*/
if (reader.state != EF100_TLV_TYPE) {
if (reader.state == EF100_TLV_TYPE_CONT)
- netif_err(efx, probe, efx->net_dev,
- "truncated design parameter (incomplete type %u)\n",
- reader.type);
+ pci_err(efx->pci_dev,
+ "truncated design parameter (incomplete type %u)\n",
+ reader.type);
else
- netif_err(efx, probe, efx->net_dev,
- "truncated design parameter %u\n",
- reader.type);
+ pci_err(efx->pci_dev,
+ "truncated design parameter %u\n",
+ reader.type);
rc = -EIO;
}
out:
diff --git a/drivers/net/ethernet/sfc/ef100_tx.c b/drivers/net/ethernet/sfc/ef100_tx.c
index e6b6be549581..03005757c060 100644
--- a/drivers/net/ethernet/sfc/ef100_tx.c
+++ b/drivers/net/ethernet/sfc/ef100_tx.c
@@ -189,6 +189,7 @@ static void ef100_make_tso_desc(struct efx_nic *efx,
{
bool gso_partial = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
unsigned int len, ip_offset, tcp_offset, payload_segs;
+ u32 mangleid_outer = ESE_GZ_TX_DESC_IP4_ID_INC_MOD16;
u32 mangleid = ESE_GZ_TX_DESC_IP4_ID_INC_MOD16;
unsigned int outer_ip_offset, outer_l4_offset;
u16 vlan_tci = skb_vlan_tag_get(skb);
@@ -200,8 +201,17 @@ static void ef100_make_tso_desc(struct efx_nic *efx,
bool outer_csum;
u32 paylen;
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID)
- mangleid = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
+ if (encap) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID_INNER)
+ mangleid = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID)
+ mangleid_outer = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
+ } else {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID)
+ mangleid = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
+ mangleid_outer = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
+ }
+
if (efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_TX)
vlan_enable = skb_vlan_tag_present(skb);
@@ -245,8 +255,7 @@ static void ef100_make_tso_desc(struct efx_nic *efx,
ESF_GZ_TX_TSO_OUTER_L4_OFF_W, outer_l4_offset >> 1,
ESF_GZ_TX_TSO_ED_OUTER_UDP_LEN, udp_encap && !gso_partial,
ESF_GZ_TX_TSO_ED_OUTER_IP_LEN, encap && !gso_partial,
- ESF_GZ_TX_TSO_ED_OUTER_IP4_ID, encap ? mangleid :
- ESE_GZ_TX_DESC_IP4_ID_NO_OP,
+ ESF_GZ_TX_TSO_ED_OUTER_IP4_ID, mangleid_outer,
ESF_GZ_TX_TSO_VLAN_INSERT_EN, vlan_enable,
ESF_GZ_TX_TSO_VLAN_INSERT_TCI, vlan_tci
);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 90bb7db15519..112e55b98ed3 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -418,14 +418,6 @@ unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs)
return usecs * 1000 / efx->timer_quantum_ns;
}
-unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks)
-{
- /* We must round up when converting ticks to microseconds
- * because we round down when converting the other way.
- */
- return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000);
-}
-
/* Set interrupt moderation parameters */
int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive,
@@ -484,28 +476,6 @@ void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
/**************************************************************************
*
- * ioctls
- *
- *************************************************************************/
-
-/* Net device ioctl
- * Context: process, rtnl_lock() held.
- */
-static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
-{
- struct efx_nic *efx = efx_netdev_priv(net_dev);
- struct mii_ioctl_data *data = if_mii(ifr);
-
- /* Convert phy_id from older PRTAD/DEVAD format */
- if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
- (data->phy_id & 0xfc00) == 0x0400)
- data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
-
- return mdio_mii_ioctl(&efx->mdio, data, cmd);
-}
-
-/**************************************************************************
- *
* Kernel net device interface
*
*************************************************************************/
@@ -601,7 +571,6 @@ static const struct net_device_ops efx_netdev_ops = {
.ndo_tx_timeout = efx_watchdog,
.ndo_start_xmit = efx_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_eth_ioctl = efx_ioctl,
.ndo_change_mtu = efx_change_mtu,
.ndo_set_mac_address = efx_set_mac_address,
.ndo_set_rx_mode = efx_set_rx_mode,
@@ -1209,7 +1178,6 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
rc = efx_init_struct(efx, pci_dev);
if (rc)
goto fail1;
- efx->mdio.dev = net_dev;
pci_info(pci_dev, "Solarflare NIC detected\n");
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 7a6cab883d66..45e191686625 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -168,7 +168,6 @@ extern const struct ethtool_ops efx_ethtool_ops;
/* Global */
unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs);
-unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks);
int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive,
bool rx_may_override_tx);
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index 06b4f52713ef..ed3a96ebc7f3 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -216,8 +216,8 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
if (efx_separate_tx_channels) {
efx->n_tx_channels =
- min(max(n_channels / 2, 1U),
- efx->max_tx_channels);
+ clamp(n_channels / 2, 1U,
+ efx->max_tx_channels);
efx->tx_channel_offset =
n_channels - efx->n_tx_channels;
efx->n_rx_channels =
@@ -1281,7 +1281,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
time = jiffies - channel->rfs_last_expiry;
/* Would our quota be >= 20? */
if (channel->rfs_filter_count * time >= 600 * HZ)
- mod_delayed_work(system_wq, &channel->filter_work, 0);
+ mod_delayed_work(system_percpu_wq, &channel->filter_work, 0);
#endif
/* There is no race here; although napi_disable() will
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index 13cf647051af..e8fdbb62d872 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -635,22 +635,6 @@ int __efx_reconfigure_port(struct efx_nic *efx)
return rc;
}
-/* Reinitialise the MAC to pick up new PHY settings, even if the port is
- * disabled.
- */
-int efx_reconfigure_port(struct efx_nic *efx)
-{
- int rc;
-
- EFX_ASSERT_RESET_SERIALISED(efx);
-
- mutex_lock(&efx->mac_lock);
- rc = __efx_reconfigure_port(efx);
- mutex_unlock(&efx->mac_lock);
-
- return rc;
-}
-
/**************************************************************************
*
* Device reset and suspend
@@ -1019,6 +1003,7 @@ int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev)
INIT_LIST_HEAD(&efx->vf_reps);
INIT_WORK(&efx->mac_work, efx_mac_work);
init_waitqueue_head(&efx->flush_wq);
+ mutex_init(&efx->reflash_mutex);
efx->tx_queues_per_channel = 1;
efx->rxq_entries = EFX_DEFAULT_DMAQ_SIZE;
@@ -1273,9 +1258,6 @@ out:
/* For simplicity and reliability, we always require a slot reset and try to
* reset the hardware when a pci error affecting the device is detected.
- * We leave both the link_reset and mmio_enabled callback unimplemented:
- * with our request for slot reset the mmio_enabled callback will never be
- * called, and the link_reset callback is not used by AER or EEH mechanisms.
*/
const struct pci_error_handlers efx_err_handlers = {
.error_detected = efx_io_error_detected,
diff --git a/drivers/net/ethernet/sfc/efx_common.h b/drivers/net/ethernet/sfc/efx_common.h
index 2c54dac3e662..19a8ca530969 100644
--- a/drivers/net/ethernet/sfc/efx_common.h
+++ b/drivers/net/ethernet/sfc/efx_common.h
@@ -40,7 +40,6 @@ void efx_destroy_reset_workqueue(void);
void efx_start_monitor(struct efx_nic *efx);
int __efx_reconfigure_port(struct efx_nic *efx);
-int efx_reconfigure_port(struct efx_nic *efx);
#define EFX_ASSERT_RESET_SERIALISED(efx) \
do { \
diff --git a/drivers/net/ethernet/sfc/efx_devlink.c b/drivers/net/ethernet/sfc/efx_devlink.c
index 3cd750820fdd..d842c60dfc10 100644
--- a/drivers/net/ethernet/sfc/efx_devlink.c
+++ b/drivers/net/ethernet/sfc/efx_devlink.c
@@ -19,6 +19,7 @@
#include "mae.h"
#include "ef100_rep.h"
#endif
+#include "efx_reflash.h"
struct efx_devlink {
struct efx_nic *efx;
@@ -615,7 +616,19 @@ static int efx_devlink_info_get(struct devlink *devlink,
return 0;
}
+static int efx_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_devlink *devlink_private = devlink_priv(devlink);
+ struct efx_nic *efx = devlink_private->efx;
+
+ return efx_reflash_flash_firmware(efx, params->fw, extack);
+}
+
static const struct devlink_ops sfc_devlink_ops = {
+ .supported_flash_update_params = 0,
+ .flash_update = efx_devlink_flash_update,
.info_get = efx_devlink_info_get,
};
diff --git a/drivers/net/ethernet/sfc/efx_reflash.c b/drivers/net/ethernet/sfc/efx_reflash.c
new file mode 100644
index 000000000000..b12e95f1c80a
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_reflash.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for AMD network controllers and boards
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/crc32.h>
+#include <net/devlink.h>
+#include "efx_reflash.h"
+#include "net_driver.h"
+#include "fw_formats.h"
+#include "mcdi_pcol.h"
+#include "mcdi.h"
+
+/* Try to parse a Reflash header at the specified offset */
+static bool efx_reflash_parse_reflash_header(const struct firmware *fw,
+ size_t header_offset, u32 *type,
+ u32 *subtype, const u8 **data,
+ size_t *data_size)
+{
+ size_t header_end, trailer_offset, trailer_end;
+ u32 magic, version, payload_size, header_len;
+ const u8 *header, *trailer;
+ u32 expected_crc, crc;
+
+ if (check_add_overflow(header_offset, EFX_REFLASH_HEADER_LENGTH_OFST +
+ EFX_REFLASH_HEADER_LENGTH_LEN,
+ &header_end))
+ return false;
+ if (fw->size < header_end)
+ return false;
+
+ header = fw->data + header_offset;
+ magic = get_unaligned_le32(header + EFX_REFLASH_HEADER_MAGIC_OFST);
+ if (magic != EFX_REFLASH_HEADER_MAGIC_VALUE)
+ return false;
+
+ version = get_unaligned_le32(header + EFX_REFLASH_HEADER_VERSION_OFST);
+ if (version != EFX_REFLASH_HEADER_VERSION_VALUE)
+ return false;
+
+ payload_size = get_unaligned_le32(header + EFX_REFLASH_HEADER_PAYLOAD_SIZE_OFST);
+ header_len = get_unaligned_le32(header + EFX_REFLASH_HEADER_LENGTH_OFST);
+ if (check_add_overflow(header_offset, header_len, &trailer_offset) ||
+ check_add_overflow(trailer_offset, payload_size, &trailer_offset) ||
+ check_add_overflow(trailer_offset, EFX_REFLASH_TRAILER_LEN,
+ &trailer_end))
+ return false;
+ if (fw->size < trailer_end)
+ return false;
+
+ trailer = fw->data + trailer_offset;
+ expected_crc = get_unaligned_le32(trailer + EFX_REFLASH_TRAILER_CRC_OFST);
+ /* Addition could overflow u32, but not size_t since we already
+ * checked trailer_offset didn't overflow. So cast to size_t first.
+ */
+ crc = crc32_le(0, header, (size_t)header_len + payload_size);
+ if (crc != expected_crc)
+ return false;
+
+ *type = get_unaligned_le32(header + EFX_REFLASH_HEADER_FIRMWARE_TYPE_OFST);
+ *subtype = get_unaligned_le32(header + EFX_REFLASH_HEADER_FIRMWARE_SUBTYPE_OFST);
+ if (*type == EFX_REFLASH_FIRMWARE_TYPE_BUNDLE) {
+ /* All the bundle data is written verbatim to NVRAM */
+ *data = fw->data;
+ *data_size = fw->size;
+ } else {
+ /* Other payload types strip the reflash header and trailer
+ * from the data written to NVRAM
+ */
+ *data = header + header_len;
+ *data_size = payload_size;
+ }
+
+ return true;
+}
+
+/* Map from FIRMWARE_TYPE to NVRAM_PARTITION_TYPE */
+static int efx_reflash_partition_type(u32 type, u32 subtype,
+ u32 *partition_type,
+ u32 *partition_subtype)
+{
+ int rc = 0;
+
+ switch (type) {
+ case EFX_REFLASH_FIRMWARE_TYPE_BOOTROM:
+ *partition_type = NVRAM_PARTITION_TYPE_EXPANSION_ROM;
+ *partition_subtype = subtype;
+ break;
+ case EFX_REFLASH_FIRMWARE_TYPE_BUNDLE:
+ *partition_type = NVRAM_PARTITION_TYPE_BUNDLE;
+ *partition_subtype = subtype;
+ break;
+ default:
+ /* Not supported */
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+/* Try to parse a SmartNIC image header at the specified offset */
+static bool efx_reflash_parse_snic_header(const struct firmware *fw,
+ size_t header_offset,
+ u32 *partition_type,
+ u32 *partition_subtype,
+ const u8 **data, size_t *data_size)
+{
+ u32 magic, version, payload_size, header_len, expected_crc, crc;
+ size_t header_end, payload_end;
+ const u8 *header;
+
+ if (check_add_overflow(header_offset, EFX_SNICIMAGE_HEADER_MINLEN,
+ &header_end) ||
+ fw->size < header_end)
+ return false;
+
+ header = fw->data + header_offset;
+ magic = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_MAGIC_OFST);
+ if (magic != EFX_SNICIMAGE_HEADER_MAGIC_VALUE)
+ return false;
+
+ version = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_VERSION_OFST);
+ if (version != EFX_SNICIMAGE_HEADER_VERSION_VALUE)
+ return false;
+
+ header_len = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_LENGTH_OFST);
+ if (check_add_overflow(header_offset, header_len, &header_end))
+ return false;
+ payload_size = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_PAYLOAD_SIZE_OFST);
+ if (check_add_overflow(header_end, payload_size, &payload_end) ||
+ fw->size < payload_end)
+ return false;
+
+ expected_crc = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_CRC_OFST);
+
+ /* Calculate CRC omitting the expected CRC field itself */
+ crc = crc32_le(~0, header, EFX_SNICIMAGE_HEADER_CRC_OFST);
+ crc = ~crc32_le(crc,
+ header + EFX_SNICIMAGE_HEADER_CRC_OFST +
+ EFX_SNICIMAGE_HEADER_CRC_LEN,
+ header_len + payload_size - EFX_SNICIMAGE_HEADER_CRC_OFST -
+ EFX_SNICIMAGE_HEADER_CRC_LEN);
+ if (crc != expected_crc)
+ return false;
+
+ *partition_type =
+ get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_PARTITION_TYPE_OFST);
+ *partition_subtype =
+ get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_PARTITION_SUBTYPE_OFST);
+ *data = fw->data;
+ *data_size = fw->size;
+ return true;
+}
+
+/* Try to parse a SmartNIC bundle header at the specified offset */
+static bool efx_reflash_parse_snic_bundle_header(const struct firmware *fw,
+ size_t header_offset,
+ u32 *partition_type,
+ u32 *partition_subtype,
+ const u8 **data,
+ size_t *data_size)
+{
+ u32 magic, version, bundle_type, header_len, expected_crc, crc;
+ size_t header_end;
+ const u8 *header;
+
+ if (check_add_overflow(header_offset, EFX_SNICBUNDLE_HEADER_LEN,
+ &header_end))
+ return false;
+ if (fw->size < header_end)
+ return false;
+
+ header = fw->data + header_offset;
+ magic = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_MAGIC_OFST);
+ if (magic != EFX_SNICBUNDLE_HEADER_MAGIC_VALUE)
+ return false;
+
+ version = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_VERSION_OFST);
+ if (version != EFX_SNICBUNDLE_HEADER_VERSION_VALUE)
+ return false;
+
+ bundle_type = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_BUNDLE_TYPE_OFST);
+ if (bundle_type != NVRAM_PARTITION_TYPE_BUNDLE)
+ return false;
+
+ header_len = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_LENGTH_OFST);
+ if (header_len != EFX_SNICBUNDLE_HEADER_LEN)
+ return false;
+
+ expected_crc = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_CRC_OFST);
+ crc = ~crc32_le(~0, header, EFX_SNICBUNDLE_HEADER_CRC_OFST);
+ if (crc != expected_crc)
+ return false;
+
+ *partition_type = NVRAM_PARTITION_TYPE_BUNDLE;
+ *partition_subtype = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_BUNDLE_SUBTYPE_OFST);
+ *data = fw->data;
+ *data_size = fw->size;
+ return true;
+}
+
+/* Try to find a valid firmware payload in the firmware data.
+ * When we recognise a valid header, we parse it for the partition type
+ * (so we know where to ask the MC to write it to) and the location of
+ * the data blob to write.
+ */
+static int efx_reflash_parse_firmware_data(const struct firmware *fw,
+ u32 *partition_type,
+ u32 *partition_subtype,
+ const u8 **data, size_t *data_size)
+{
+ size_t header_offset;
+ u32 type, subtype;
+
+ /* Some packaging formats (such as CMS/PKCS#7 signed images)
+ * prepend a header for which finding the size is a non-trivial
+ * task, so step through the firmware data until we find a valid
+ * header.
+ *
+ * The checks are intended to reject firmware data that is clearly not
+ * in the expected format. They do not need to be exhaustive as the
+ * running firmware will perform its own comprehensive validity and
+ * compatibility checks during the update procedure.
+ *
+ * Firmware packages may contain multiple reflash images, e.g. a
+ * bundle containing one or more other images. Only check the
+ * outermost container by stopping after the first candidate image
+ * found even it is for an unsupported partition type.
+ */
+ for (header_offset = 0; header_offset < fw->size; header_offset++) {
+ if (efx_reflash_parse_snic_bundle_header(fw, header_offset,
+ partition_type,
+ partition_subtype,
+ data, data_size))
+ return 0;
+
+ if (efx_reflash_parse_snic_header(fw, header_offset,
+ partition_type,
+ partition_subtype, data,
+ data_size))
+ return 0;
+
+ if (efx_reflash_parse_reflash_header(fw, header_offset, &type,
+ &subtype, data, data_size))
+ return efx_reflash_partition_type(type, subtype,
+ partition_type,
+ partition_subtype);
+ }
+
+ return -EINVAL;
+}
+
+/* Limit the number of status updates during the erase or write phases */
+#define EFX_DEVLINK_STATUS_UPDATE_COUNT 50
+
+/* Expected timeout for the efx_mcdi_nvram_update_finish_polled() */
+#define EFX_DEVLINK_UPDATE_FINISH_TIMEOUT 900
+
+/* Ideal erase chunk size. This is a balance between minimising the number of
+ * MCDI requests to erase an entire partition whilst avoiding tripping the MCDI
+ * RPC timeout.
+ */
+#define EFX_NVRAM_ERASE_IDEAL_CHUNK_SIZE (64 * 1024)
+
+static int efx_reflash_erase_partition(struct efx_nic *efx,
+ struct netlink_ext_ack *extack,
+ struct devlink *devlink, u32 type,
+ size_t partition_size,
+ size_t align)
+{
+ size_t chunk, offset, next_update;
+ int rc;
+
+ /* Partitions that cannot be erased or do not require erase before
+ * write are advertised with a erase alignment/sector size of zero.
+ */
+ if (align == 0)
+ /* Nothing to do */
+ return 0;
+
+ if (partition_size % align)
+ return -EINVAL;
+
+ /* Erase the entire NVRAM partition a chunk at a time to avoid
+ * potentially tripping the MCDI RPC timeout.
+ */
+ if (align >= EFX_NVRAM_ERASE_IDEAL_CHUNK_SIZE)
+ chunk = align;
+ else
+ chunk = rounddown(EFX_NVRAM_ERASE_IDEAL_CHUNK_SIZE, align);
+
+ for (offset = 0, next_update = 0; offset < partition_size; offset += chunk) {
+ if (offset >= next_update) {
+ devlink_flash_update_status_notify(devlink, "Erasing",
+ NULL, offset,
+ partition_size);
+ next_update += partition_size / EFX_DEVLINK_STATUS_UPDATE_COUNT;
+ }
+
+ chunk = min_t(size_t, partition_size - offset, chunk);
+ rc = efx_mcdi_nvram_erase(efx, type, offset, chunk);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Erase failed for NVRAM partition %#x at %#zx-%#zx",
+ type, offset, offset + chunk - 1);
+ return rc;
+ }
+ }
+
+ devlink_flash_update_status_notify(devlink, "Erasing", NULL,
+ partition_size, partition_size);
+
+ return 0;
+}
+
+static int efx_reflash_write_partition(struct efx_nic *efx,
+ struct netlink_ext_ack *extack,
+ struct devlink *devlink, u32 type,
+ const u8 *data, size_t data_size,
+ size_t align)
+{
+ size_t write_max, chunk, offset, next_update;
+ int rc;
+
+ if (align == 0)
+ return -EINVAL;
+
+ /* Write the NVRAM partition in chunks that are the largest multiple
+ * of the partition's required write alignment that will fit into the
+ * MCDI NVRAM_WRITE RPC payload.
+ */
+ if (efx->type->mcdi_max_ver < 2)
+ write_max = MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN *
+ MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM;
+ else
+ write_max = MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN *
+ MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM_MCDI2;
+ chunk = rounddown(write_max, align);
+
+ for (offset = 0, next_update = 0; offset + chunk <= data_size; offset += chunk) {
+ if (offset >= next_update) {
+ devlink_flash_update_status_notify(devlink, "Writing",
+ NULL, offset,
+ data_size);
+ next_update += data_size / EFX_DEVLINK_STATUS_UPDATE_COUNT;
+ }
+
+ rc = efx_mcdi_nvram_write(efx, type, offset, data + offset, chunk);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Write failed for NVRAM partition %#x at %#zx-%#zx",
+ type, offset, offset + chunk - 1);
+ return rc;
+ }
+ }
+
+ /* Round up left over data to satisfy write alignment */
+ if (offset < data_size) {
+ size_t remaining = data_size - offset;
+ u8 *buf;
+
+ if (offset >= next_update)
+ devlink_flash_update_status_notify(devlink, "Writing",
+ NULL, offset,
+ data_size);
+
+ chunk = roundup(remaining, align);
+ buf = kmalloc(chunk, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf, data + offset, remaining);
+ memset(buf + remaining, 0xFF, chunk - remaining);
+ rc = efx_mcdi_nvram_write(efx, type, offset, buf, chunk);
+ kfree(buf);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Write failed for NVRAM partition %#x at %#zx-%#zx",
+ type, offset, offset + chunk - 1);
+ return rc;
+ }
+ }
+
+ devlink_flash_update_status_notify(devlink, "Writing", NULL, data_size,
+ data_size);
+
+ return 0;
+}
+
+int efx_reflash_flash_firmware(struct efx_nic *efx, const struct firmware *fw,
+ struct netlink_ext_ack *extack)
+{
+ size_t data_size, size, erase_align, write_align;
+ struct devlink *devlink = efx->devlink;
+ u32 type, data_subtype, subtype;
+ const u8 *data;
+ bool protected;
+ int rc;
+
+ if (!efx_has_cap(efx, BUNDLE_UPDATE)) {
+ NL_SET_ERR_MSG_MOD(extack, "NVRAM bundle updates are not supported by the firmware");
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&efx->reflash_mutex);
+
+ devlink_flash_update_status_notify(devlink, "Checking update", NULL, 0, 0);
+
+ if (efx->type->flash_auto_partition) {
+ /* NIC wants entire FW file including headers;
+ * FW will validate 'subtype' if there is one
+ */
+ type = NVRAM_PARTITION_TYPE_AUTO;
+ data = fw->data;
+ data_size = fw->size;
+ } else {
+ rc = efx_reflash_parse_firmware_data(fw, &type, &data_subtype, &data,
+ &data_size);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Firmware image validation check failed");
+ goto out_unlock;
+ }
+
+ rc = efx_mcdi_nvram_metadata(efx, type, &subtype, NULL, NULL, 0);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Metadata query for NVRAM partition %#x failed",
+ type);
+ goto out_unlock;
+ }
+
+ if (subtype != data_subtype) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Firmware image is not appropriate for this adapter");
+ rc = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
+ rc = efx_mcdi_nvram_info(efx, type, &size, &erase_align, &write_align,
+ &protected);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Info query for NVRAM partition %#x failed",
+ type);
+ goto out_unlock;
+ }
+
+ if (protected) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "NVRAM partition %#x is protected",
+ type);
+ rc = -EPERM;
+ goto out_unlock;
+ }
+
+ if (write_align == 0) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "NVRAM partition %#x is not writable",
+ type);
+ rc = -EACCES;
+ goto out_unlock;
+ }
+
+ if (erase_align != 0 && size % erase_align) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "NVRAM partition %#x has a bad partition table entry, can't erase it",
+ type);
+ rc = -EACCES;
+ goto out_unlock;
+ }
+
+ if (data_size > size) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Firmware image is too big for NVRAM partition %#x",
+ type);
+ rc = -EFBIG;
+ goto out_unlock;
+ }
+
+ devlink_flash_update_status_notify(devlink, "Starting update", NULL, 0, 0);
+
+ rc = efx_mcdi_nvram_update_start(efx, type);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Update start request for NVRAM partition %#x failed",
+ type);
+ goto out_unlock;
+ }
+
+ rc = efx_reflash_erase_partition(efx, extack, devlink, type, size,
+ erase_align);
+ if (rc)
+ goto out_update_finish;
+
+ rc = efx_reflash_write_partition(efx, extack, devlink, type, data,
+ data_size, write_align);
+ if (rc)
+ goto out_update_finish;
+
+ devlink_flash_update_timeout_notify(devlink, "Finishing update", NULL,
+ EFX_DEVLINK_UPDATE_FINISH_TIMEOUT);
+
+out_update_finish:
+ if (rc)
+ /* Don't obscure the return code from an earlier failure */
+ efx_mcdi_nvram_update_finish(efx, type, EFX_UPDATE_FINISH_ABORT);
+ else
+ rc = efx_mcdi_nvram_update_finish_polled(efx, type);
+out_unlock:
+ mutex_unlock(&efx->reflash_mutex);
+ devlink_flash_update_status_notify(devlink, rc ? "Update failed" :
+ "Update complete",
+ NULL, 0, 0);
+ return rc;
+}
diff --git a/drivers/net/ethernet/sfc/efx_reflash.h b/drivers/net/ethernet/sfc/efx_reflash.h
new file mode 100644
index 000000000000..3dffac565161
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_reflash.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for AMD network controllers and boards
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef _EFX_REFLASH_H
+#define _EFX_REFLASH_H
+
+#include "net_driver.h"
+#include <linux/firmware.h>
+
+int efx_reflash_flash_firmware(struct efx_nic *efx, const struct firmware *fw,
+ struct netlink_ext_ack *extack);
+
+#endif /* _EFX_REFLASH_H */
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index bb1930818beb..18fe5850a978 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -217,7 +217,8 @@ static int efx_ethtool_set_wol(struct net_device *net_dev,
}
static void efx_ethtool_get_fec_stats(struct net_device *net_dev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
@@ -262,10 +263,13 @@ const struct ethtool_ops efx_ethtool_ops = {
.set_rxnfc = efx_ethtool_set_rxnfc,
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
.get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
+ .rxfh_per_ctx_fields = true,
.rxfh_per_ctx_key = true,
+ .cap_rss_rxnfc_adds = true,
.rxfh_priv_size = sizeof(struct efx_rss_context_priv),
.get_rxfh = efx_ethtool_get_rxfh,
.set_rxfh = efx_ethtool_set_rxfh,
+ .get_rxfh_fields = efx_ethtool_get_rxfh_fields,
.create_rxfh_context = efx_ethtool_create_rxfh_context,
.modify_rxfh_context = efx_ethtool_modify_rxfh_context,
.remove_rxfh_context = efx_ethtool_remove_rxfh_context,
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
index ae32e08540fa..fa303e171d98 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -395,7 +395,7 @@ int efx_ethtool_fill_self_tests(struct efx_nic *efx,
return n;
}
-static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
+static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 **strings)
{
size_t n_stats = 0;
struct efx_channel *channel;
@@ -403,24 +403,22 @@ static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
efx_for_each_channel(channel, efx) {
if (efx_channel_has_tx_queues(channel)) {
n_stats++;
- if (strings != NULL) {
- snprintf(strings, ETH_GSTRING_LEN,
- "tx-%u.tx_packets",
- channel->tx_queue[0].queue /
- EFX_MAX_TXQ_PER_CHANNEL);
+ if (!strings)
+ continue;
- strings += ETH_GSTRING_LEN;
- }
+ ethtool_sprintf(strings, "tx-%u.tx_packets",
+ channel->tx_queue[0].queue /
+ EFX_MAX_TXQ_PER_CHANNEL);
}
}
efx_for_each_channel(channel, efx) {
if (efx_channel_has_rx_queue(channel)) {
n_stats++;
- if (strings != NULL) {
- snprintf(strings, ETH_GSTRING_LEN,
- "rx-%d.rx_packets", channel->channel);
- strings += ETH_GSTRING_LEN;
- }
+ if (!strings)
+ continue;
+
+ ethtool_sprintf(strings, "rx-%d.rx_packets",
+ channel->channel);
}
}
if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
@@ -428,11 +426,11 @@ static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
n_stats++;
- if (strings) {
- snprintf(strings, ETH_GSTRING_LEN,
- "tx-xdp-cpu-%hu.tx_packets", xdp);
- strings += ETH_GSTRING_LEN;
- }
+ if (!strings)
+ continue;
+
+ ethtool_sprintf(strings, "tx-xdp-cpu-%hu.tx_packets",
+ xdp);
}
}
@@ -464,15 +462,11 @@ void efx_ethtool_get_strings(struct net_device *net_dev,
switch (string_set) {
case ETH_SS_STATS:
- strings += (efx->type->describe_stats(efx, strings) *
- ETH_GSTRING_LEN);
+ efx->type->describe_stats(efx, &strings);
for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
- strscpy(strings + i * ETH_GSTRING_LEN,
- efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
- strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
- strings += (efx_describe_per_queue_stats(efx, strings) *
- ETH_GSTRING_LEN);
- efx_ptp_describe_stats(efx, strings);
+ ethtool_puts(&strings, efx_sw_stat_desc[i].name);
+ efx_describe_per_queue_stats(efx, &strings);
+ efx_ptp_describe_stats(efx, &strings);
break;
case ETH_SS_TEST:
efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
@@ -806,6 +800,56 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
return rc;
}
+int efx_ethtool_get_rxfh_fields(struct net_device *net_dev,
+ struct ethtool_rxfh_fields *info)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+ struct efx_rss_context_priv *ctx;
+ __u64 data;
+ int rc = 0;
+
+ ctx = &efx->rss_context.priv;
+
+ if (info->rss_context) {
+ ctx = efx_find_rss_context_entry(efx, info->rss_context);
+ if (!ctx)
+ return -ENOENT;
+ }
+
+ data = 0;
+ if (!efx_rss_active(ctx)) /* No RSS */
+ goto out_setdata_unlock;
+
+ switch (info->flow_type) {
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ if (ctx->rx_hash_udp_4tuple)
+ data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+ RXH_IP_SRC | RXH_IP_DST);
+ else
+ data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+ RXH_IP_SRC | RXH_IP_DST);
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ break;
+ }
+out_setdata_unlock:
+ info->data = data;
+ return rc;
+}
+
int efx_ethtool_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
@@ -818,55 +862,6 @@ int efx_ethtool_get_rxnfc(struct net_device *net_dev,
info->data = efx->n_rx_channels;
return 0;
- case ETHTOOL_GRXFH: {
- struct efx_rss_context_priv *ctx = &efx->rss_context.priv;
- __u64 data;
-
- mutex_lock(&net_dev->ethtool->rss_lock);
- if (info->flow_type & FLOW_RSS && info->rss_context) {
- ctx = efx_find_rss_context_entry(efx, info->rss_context);
- if (!ctx) {
- rc = -ENOENT;
- goto out_unlock;
- }
- }
-
- data = 0;
- if (!efx_rss_active(ctx)) /* No RSS */
- goto out_setdata_unlock;
-
- switch (info->flow_type & ~FLOW_RSS) {
- case UDP_V4_FLOW:
- case UDP_V6_FLOW:
- if (ctx->rx_hash_udp_4tuple)
- data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
- RXH_IP_SRC | RXH_IP_DST);
- else
- data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case TCP_V4_FLOW:
- case TCP_V6_FLOW:
- data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
- RXH_IP_SRC | RXH_IP_DST);
- break;
- case SCTP_V4_FLOW:
- case SCTP_V6_FLOW:
- case AH_ESP_V4_FLOW:
- case AH_ESP_V6_FLOW:
- case IPV4_FLOW:
- case IPV6_FLOW:
- data = RXH_IP_SRC | RXH_IP_DST;
- break;
- default:
- break;
- }
-out_setdata_unlock:
- info->data = data;
-out_unlock:
- mutex_unlock(&net_dev->ethtool->rss_lock);
- return rc;
- }
-
case ETHTOOL_GRXCLSRLCNT:
info->data = efx_filter_get_rx_id_limit(efx);
if (info->data == 0)
diff --git a/drivers/net/ethernet/sfc/ethtool_common.h b/drivers/net/ethernet/sfc/ethtool_common.h
index fc52e891637d..24db4fccbe78 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.h
+++ b/drivers/net/ethernet/sfc/ethtool_common.h
@@ -49,6 +49,8 @@ int efx_ethtool_get_rxfh(struct net_device *net_dev,
int efx_ethtool_set_rxfh(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack);
+int efx_ethtool_get_rxfh_fields(struct net_device *net_dev,
+ struct ethtool_rxfh_fields *info);
int efx_ethtool_create_rxfh_context(struct net_device *net_dev,
struct ethtool_rxfh_context *ctx,
const struct ethtool_rxfh_param *rxfh,
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index 8925745f1c17..6ea41f6c9ef5 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -1394,9 +1394,8 @@ static int ef4_probe_interrupts(struct ef4_nic *efx)
if (n_channels > extra_channels)
n_channels -= extra_channels;
if (ef4_separate_tx_channels) {
- efx->n_tx_channels = min(max(n_channels / 2,
- 1U),
- efx->max_tx_channels);
+ efx->n_tx_channels = clamp(n_channels / 2, 1U,
+ efx->max_tx_channels);
efx->n_rx_channels = max(n_channels -
efx->n_tx_channels,
1U);
@@ -1886,14 +1885,6 @@ unsigned int ef4_usecs_to_ticks(struct ef4_nic *efx, unsigned int usecs)
return usecs * 1000 / efx->timer_quantum_ns;
}
-unsigned int ef4_ticks_to_usecs(struct ef4_nic *efx, unsigned int ticks)
-{
- /* We must round up when converting ticks to microseconds
- * because we round down when converting the other way.
- */
- return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000);
-}
-
/* Set interrupt moderation parameters */
int ef4_init_irq_moderation(struct ef4_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive,
@@ -3136,9 +3127,6 @@ out:
/* For simplicity and reliability, we always require a slot reset and try to
* reset the hardware when a pci error affecting the device is detected.
- * We leave both the link_reset and mmio_enabled callback unimplemented:
- * with our request for slot reset the mmio_enabled callback will never be
- * called, and the link_reset callback is not used by AER or EEH mechanisms.
*/
static const struct pci_error_handlers ef4_err_handlers = {
.error_detected = ef4_io_error_detected,
diff --git a/drivers/net/ethernet/sfc/falcon/efx.h b/drivers/net/ethernet/sfc/falcon/efx.h
index d3b4646545fa..52508f2c8cb2 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.h
+++ b/drivers/net/ethernet/sfc/falcon/efx.h
@@ -198,7 +198,6 @@ int ef4_try_recovery(struct ef4_nic *efx);
/* Global */
void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type);
unsigned int ef4_usecs_to_ticks(struct ef4_nic *efx, unsigned int usecs);
-unsigned int ef4_ticks_to_usecs(struct ef4_nic *efx, unsigned int ticks);
int ef4_init_irq_moderation(struct ef4_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive,
bool rx_may_override_tx);
diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c
index f4db683b80f7..27d1cd6f24ca 100644
--- a/drivers/net/ethernet/sfc/falcon/ethtool.c
+++ b/drivers/net/ethernet/sfc/falcon/ethtool.c
@@ -353,7 +353,7 @@ static int ef4_ethtool_fill_self_tests(struct ef4_nic *efx,
return n;
}
-static size_t ef4_describe_per_queue_stats(struct ef4_nic *efx, u8 *strings)
+static size_t ef4_describe_per_queue_stats(struct ef4_nic *efx, u8 **strings)
{
size_t n_stats = 0;
struct ef4_channel *channel;
@@ -361,24 +361,22 @@ static size_t ef4_describe_per_queue_stats(struct ef4_nic *efx, u8 *strings)
ef4_for_each_channel(channel, efx) {
if (ef4_channel_has_tx_queues(channel)) {
n_stats++;
- if (strings != NULL) {
- snprintf(strings, ETH_GSTRING_LEN,
- "tx-%u.tx_packets",
- channel->tx_queue[0].queue /
- EF4_TXQ_TYPES);
+ if (!strings)
+ continue;
- strings += ETH_GSTRING_LEN;
- }
+ ethtool_sprintf(strings, "tx-%u.tx_packets",
+ channel->tx_queue[0].queue /
+ EF4_TXQ_TYPES);
}
}
ef4_for_each_channel(channel, efx) {
if (ef4_channel_has_rx_queue(channel)) {
n_stats++;
- if (strings != NULL) {
- snprintf(strings, ETH_GSTRING_LEN,
- "rx-%d.rx_packets", channel->channel);
- strings += ETH_GSTRING_LEN;
- }
+ if (!strings)
+ continue;
+
+ ethtool_sprintf(strings, "rx-%d.rx_packets",
+ channel->channel);
}
}
return n_stats;
@@ -409,14 +407,10 @@ static void ef4_ethtool_get_strings(struct net_device *net_dev,
switch (string_set) {
case ETH_SS_STATS:
- strings += (efx->type->describe_stats(efx, strings) *
- ETH_GSTRING_LEN);
+ efx->type->describe_stats(efx, &strings);
for (i = 0; i < EF4_ETHTOOL_SW_STAT_COUNT; i++)
- strscpy(strings + i * ETH_GSTRING_LEN,
- ef4_sw_stat_desc[i].name, ETH_GSTRING_LEN);
- strings += EF4_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
- strings += (ef4_describe_per_queue_stats(efx, strings) *
- ETH_GSTRING_LEN);
+ ethtool_puts(&strings, ef4_sw_stat_desc[i].name);
+ ef4_describe_per_queue_stats(efx, &strings);
break;
case ETH_SS_TEST:
ef4_ethtool_fill_self_tests(efx, NULL, strings, NULL);
@@ -950,6 +944,37 @@ static int ef4_ethtool_get_class_rule(struct ef4_nic *efx,
}
static int
+ef4_ethtool_get_rxfh_fields(struct net_device *net_dev,
+ struct ethtool_rxfh_fields *info)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+
+ info->data = 0;
+ /* Falcon A0 and A1 had a 4-tuple hash for TCP and UDP, but it was
+ * broken so we do not enable it.
+ * Falcon B0 adds a Toeplitz hash, 4-tuple for TCP and 2-tuple for
+ * other IPv4, including UDP.
+ * See falcon_init_rx_cfg().
+ */
+ if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0)
+ return 0;
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case IPV4_FLOW:
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int
ef4_ethtool_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
@@ -960,29 +985,6 @@ ef4_ethtool_get_rxnfc(struct net_device *net_dev,
info->data = efx->n_rx_channels;
return 0;
- case ETHTOOL_GRXFH: {
- unsigned min_revision = 0;
-
- info->data = 0;
- switch (info->flow_type) {
- case TCP_V4_FLOW:
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- fallthrough;
- case UDP_V4_FLOW:
- case SCTP_V4_FLOW:
- case AH_ESP_V4_FLOW:
- case IPV4_FLOW:
- info->data |= RXH_IP_SRC | RXH_IP_DST;
- min_revision = EF4_REV_FALCON_B0;
- break;
- default:
- break;
- }
- if (ef4_nic_rev(efx) < min_revision)
- info->data = 0;
- return 0;
- }
-
case ETHTOOL_GRXCLSRLCNT:
info->data = ef4_filter_get_rx_id_limit(efx);
if (info->data == 0)
@@ -1349,6 +1351,7 @@ const struct ethtool_ops ef4_ethtool_ops = {
.get_rxfh_indir_size = ef4_ethtool_get_rxfh_indir_size,
.get_rxfh = ef4_ethtool_get_rxfh,
.set_rxfh = ef4_ethtool_set_rxfh,
+ .get_rxfh_fields = ef4_ethtool_get_rxfh_fields,
.get_module_info = ef4_ethtool_get_module_info,
.get_module_eeprom = ef4_ethtool_get_module_eeprom,
.get_link_ksettings = ef4_ethtool_get_link_ksettings,
diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c
index 36114ce88034..c44df8e4dd30 100644
--- a/drivers/net/ethernet/sfc/falcon/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon/falcon.c
@@ -1453,8 +1453,8 @@ static void falcon_stats_complete(struct ef4_nic *efx)
static void falcon_stats_timer_func(struct timer_list *t)
{
- struct falcon_nic_data *nic_data = from_timer(nic_data, t,
- stats_timer);
+ struct falcon_nic_data *nic_data = timer_container_of(nic_data, t,
+ stats_timer);
struct ef4_nic *efx = nic_data->efx;
spin_lock(&efx->stats_lock);
@@ -2564,7 +2564,7 @@ static void falcon_remove_nic(struct ef4_nic *efx)
efx->nic_data = NULL;
}
-static size_t falcon_describe_nic_stats(struct ef4_nic *efx, u8 *names)
+static size_t falcon_describe_nic_stats(struct ef4_nic *efx, u8 **names)
{
return ef4_nic_describe_stats(falcon_stat_desc, FALCON_STAT_COUNT,
falcon_stat_mask, names);
@@ -2657,7 +2657,7 @@ void falcon_stop_nic_stats(struct ef4_nic *efx)
++nic_data->stats_disable_count;
spin_unlock_bh(&efx->stats_lock);
- del_timer_sync(&nic_data->stats_timer);
+ timer_delete_sync(&nic_data->stats_timer);
/* Wait enough time for the most recent transfer to
* complete. */
diff --git a/drivers/net/ethernet/sfc/falcon/farch.c b/drivers/net/ethernet/sfc/falcon/farch.c
index c64623c2e80c..01017c41338e 100644
--- a/drivers/net/ethernet/sfc/falcon/farch.c
+++ b/drivers/net/ethernet/sfc/falcon/farch.c
@@ -1631,28 +1631,6 @@ void ef4_farch_rx_push_indir_table(struct ef4_nic *efx)
}
}
-/* Looks at available SRAM resources and works out how many queues we
- * can support, and where things like descriptor caches should live.
- *
- * SRAM is split up as follows:
- * 0 buftbl entries for channels
- * efx->vf_buftbl_base buftbl entries for SR-IOV
- * efx->rx_dc_base RX descriptor caches
- * efx->tx_dc_base TX descriptor caches
- */
-void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw)
-{
- unsigned vi_count;
-
- /* Account for the buffer table entries backing the datapath channels
- * and the descriptor caches for those channels.
- */
- vi_count = max(efx->n_channels, efx->n_tx_channels * EF4_TXQ_TYPES);
-
- efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
- efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
-}
-
u32 ef4_farch_fpga_ver(struct ef4_nic *efx)
{
ef4_oword_t altera_build;
diff --git a/drivers/net/ethernet/sfc/falcon/net_driver.h b/drivers/net/ethernet/sfc/falcon/net_driver.h
index a2c7139f2b32..7ab0db44720d 100644
--- a/drivers/net/ethernet/sfc/falcon/net_driver.h
+++ b/drivers/net/ethernet/sfc/falcon/net_driver.h
@@ -1057,7 +1057,7 @@ struct ef4_nic_type {
void (*finish_flush)(struct ef4_nic *efx);
void (*prepare_flr)(struct ef4_nic *efx);
void (*finish_flr)(struct ef4_nic *efx);
- size_t (*describe_stats)(struct ef4_nic *efx, u8 *names);
+ size_t (*describe_stats)(struct ef4_nic *efx, u8 **names);
size_t (*update_stats)(struct ef4_nic *efx, u64 *full_stats,
struct rtnl_link_stats64 *core_stats);
void (*start_stats)(struct ef4_nic *efx);
diff --git a/drivers/net/ethernet/sfc/falcon/nic.c b/drivers/net/ethernet/sfc/falcon/nic.c
index 78c851b5a56f..a6304686bc90 100644
--- a/drivers/net/ethernet/sfc/falcon/nic.c
+++ b/drivers/net/ethernet/sfc/falcon/nic.c
@@ -444,18 +444,15 @@ void ef4_nic_get_regs(struct ef4_nic *efx, void *buf)
* bits in the first @count bits of @mask for which a name is defined.
*/
size_t ef4_nic_describe_stats(const struct ef4_hw_stat_desc *desc, size_t count,
- const unsigned long *mask, u8 *names)
+ const unsigned long *mask, u8 **names)
{
size_t visible = 0;
size_t index;
for_each_set_bit(index, mask, count) {
if (desc[index].name) {
- if (names) {
- strscpy(names, desc[index].name,
- ETH_GSTRING_LEN);
- names += ETH_GSTRING_LEN;
- }
+ if (names)
+ ethtool_puts(names, desc[index].name);
++visible;
}
}
@@ -511,14 +508,3 @@ void ef4_nic_update_stats(const struct ef4_hw_stat_desc *desc, size_t count,
}
}
}
-
-void ef4_nic_fix_nodesc_drop_stat(struct ef4_nic *efx, u64 *rx_nodesc_drops)
-{
- /* if down, or this is the first update after coming up */
- if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state)
- efx->rx_nodesc_drops_while_down +=
- *rx_nodesc_drops - efx->rx_nodesc_drops_total;
- efx->rx_nodesc_drops_total = *rx_nodesc_drops;
- efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
- *rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
-}
diff --git a/drivers/net/ethernet/sfc/falcon/nic.h b/drivers/net/ethernet/sfc/falcon/nic.h
index ada6e036fd97..bc6ef937d0fe 100644
--- a/drivers/net/ethernet/sfc/falcon/nic.h
+++ b/drivers/net/ethernet/sfc/falcon/nic.h
@@ -477,7 +477,6 @@ void ef4_farch_finish_flr(struct ef4_nic *efx);
void falcon_start_nic_stats(struct ef4_nic *efx);
void falcon_stop_nic_stats(struct ef4_nic *efx);
int falcon_reset_xaui(struct ef4_nic *efx);
-void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw);
void ef4_farch_init_common(struct ef4_nic *efx);
void ef4_farch_rx_push_indir_table(struct ef4_nic *efx);
@@ -498,14 +497,10 @@ size_t ef4_nic_get_regs_len(struct ef4_nic *efx);
void ef4_nic_get_regs(struct ef4_nic *efx, void *buf);
size_t ef4_nic_describe_stats(const struct ef4_hw_stat_desc *desc, size_t count,
- const unsigned long *mask, u8 *names);
+ const unsigned long *mask, u8 **names);
void ef4_nic_update_stats(const struct ef4_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u64 *stats,
const void *dma_buf, bool accumulate);
-void ef4_nic_fix_nodesc_drop_stat(struct ef4_nic *efx, u64 *stat);
-
-#define EF4_MAX_FLUSH_TIME 5000
-
void ef4_farch_generate_event(struct ef4_nic *efx, unsigned int evq,
ef4_qword_t *event);
diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c
index 6bbdb5d2eebf..f69fcf6caca8 100644
--- a/drivers/net/ethernet/sfc/falcon/rx.c
+++ b/drivers/net/ethernet/sfc/falcon/rx.c
@@ -382,7 +382,8 @@ void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic)
void ef4_rx_slow_fill(struct timer_list *t)
{
- struct ef4_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
+ struct ef4_rx_queue *rx_queue = timer_container_of(rx_queue, t,
+ slow_fill);
/* Post an event to cause NAPI to run and refill the queue */
ef4_nic_generate_fill_event(rx_queue);
@@ -791,7 +792,7 @@ void ef4_fini_rx_queue(struct ef4_rx_queue *rx_queue)
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"shutting down RX queue %d\n", ef4_rx_queue_index(rx_queue));
- del_timer_sync(&rx_queue->slow_fill);
+ timer_delete_sync(&rx_queue->slow_fill);
/* Release RX buffers from the current read ptr to the write ptr */
if (rx_queue->buffer) {
diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c
index b9369483758c..e6e80b039ca2 100644
--- a/drivers/net/ethernet/sfc/falcon/tx.c
+++ b/drivers/net/ethernet/sfc/falcon/tx.c
@@ -40,14 +40,6 @@ static inline u8 *ef4_tx_get_copy_buffer(struct ef4_tx_queue *tx_queue,
return (u8 *)page_buf->addr + offset;
}
-u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue,
- struct ef4_tx_buffer *buffer, size_t len)
-{
- if (len > EF4_TX_CB_SIZE)
- return NULL;
- return ef4_tx_get_copy_buffer(tx_queue, buffer);
-}
-
static void ef4_dequeue_buffer(struct ef4_tx_queue *tx_queue,
struct ef4_tx_buffer *buffer,
unsigned int *pkts_compl,
diff --git a/drivers/net/ethernet/sfc/falcon/tx.h b/drivers/net/ethernet/sfc/falcon/tx.h
index 2a88c59cbbbe..868ed8a861ab 100644
--- a/drivers/net/ethernet/sfc/falcon/tx.h
+++ b/drivers/net/ethernet/sfc/falcon/tx.h
@@ -15,9 +15,6 @@
unsigned int ef4_tx_limit_len(struct ef4_tx_queue *tx_queue,
dma_addr_t dma_addr, unsigned int len);
-u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue,
- struct ef4_tx_buffer *buffer, size_t len);
-
int ef4_enqueue_skb_tso(struct ef4_tx_queue *tx_queue, struct sk_buff *skb,
bool *data_mapped);
diff --git a/drivers/net/ethernet/sfc/fw_formats.h b/drivers/net/ethernet/sfc/fw_formats.h
new file mode 100644
index 000000000000..cbc350c96013
--- /dev/null
+++ b/drivers/net/ethernet/sfc/fw_formats.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for AMD network controllers and boards
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef _EFX_FW_FORMATS_H
+#define _EFX_FW_FORMATS_H
+
+/* Header layouts of firmware update images recognised by Efx NICs.
+ * The sources-of-truth for these layouts are AMD internal documents
+ * and sfregistry headers, neither of which are available externally
+ * nor usable directly by the driver.
+ *
+ * While each format includes a 'magic number', these are at different
+ * offsets in the various formats, and a legal header for one format
+ * could have the right value in whichever field occupies that offset
+ * to match another format's magic.
+ * Besides, some packaging formats (such as CMS/PKCS#7 signed images)
+ * prepend a header for which finding the size is a non-trivial task;
+ * rather than trying to parse those headers, we search byte-by-byte
+ * through the provided firmware image looking for a valid header.
+ * Thus, format recognition has to include validation of the checksum
+ * field, even though the firmware will validate that itself before
+ * applying the image.
+ */
+
+/* EF10 (Medford2, X2) "reflash" header format. Defined in SF-121352-AN */
+#define EFX_REFLASH_HEADER_MAGIC_OFST 0
+#define EFX_REFLASH_HEADER_MAGIC_LEN 4
+#define EFX_REFLASH_HEADER_MAGIC_VALUE 0x106F1A5
+
+#define EFX_REFLASH_HEADER_VERSION_OFST 4
+#define EFX_REFLASH_HEADER_VERSION_LEN 4
+#define EFX_REFLASH_HEADER_VERSION_VALUE 4
+
+#define EFX_REFLASH_HEADER_FIRMWARE_TYPE_OFST 8
+#define EFX_REFLASH_HEADER_FIRMWARE_TYPE_LEN 4
+#define EFX_REFLASH_FIRMWARE_TYPE_BOOTROM 0x2
+#define EFX_REFLASH_FIRMWARE_TYPE_BUNDLE 0xd
+
+#define EFX_REFLASH_HEADER_FIRMWARE_SUBTYPE_OFST 12
+#define EFX_REFLASH_HEADER_FIRMWARE_SUBTYPE_LEN 4
+
+#define EFX_REFLASH_HEADER_PAYLOAD_SIZE_OFST 16
+#define EFX_REFLASH_HEADER_PAYLOAD_SIZE_LEN 4
+
+#define EFX_REFLASH_HEADER_LENGTH_OFST 20
+#define EFX_REFLASH_HEADER_LENGTH_LEN 4
+
+/* Reflash trailer */
+#define EFX_REFLASH_TRAILER_CRC_OFST 0
+#define EFX_REFLASH_TRAILER_CRC_LEN 4
+
+#define EFX_REFLASH_TRAILER_LEN \
+ (EFX_REFLASH_TRAILER_CRC_OFST + EFX_REFLASH_TRAILER_CRC_LEN)
+
+/* EF100 "SmartNIC image" header format.
+ * Defined in sfregistry "src/layout/snic_image_hdr.h".
+ */
+#define EFX_SNICIMAGE_HEADER_MAGIC_OFST 16
+#define EFX_SNICIMAGE_HEADER_MAGIC_LEN 4
+#define EFX_SNICIMAGE_HEADER_MAGIC_VALUE 0x541C057A
+
+#define EFX_SNICIMAGE_HEADER_VERSION_OFST 20
+#define EFX_SNICIMAGE_HEADER_VERSION_LEN 4
+#define EFX_SNICIMAGE_HEADER_VERSION_VALUE 1
+
+#define EFX_SNICIMAGE_HEADER_LENGTH_OFST 24
+#define EFX_SNICIMAGE_HEADER_LENGTH_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_PARTITION_TYPE_OFST 36
+#define EFX_SNICIMAGE_HEADER_PARTITION_TYPE_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_PARTITION_SUBTYPE_OFST 40
+#define EFX_SNICIMAGE_HEADER_PARTITION_SUBTYPE_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_PAYLOAD_SIZE_OFST 60
+#define EFX_SNICIMAGE_HEADER_PAYLOAD_SIZE_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_CRC_OFST 64
+#define EFX_SNICIMAGE_HEADER_CRC_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_MINLEN 256
+
+/* EF100 "SmartNIC bundle" header format. Defined in SF-122606-TC */
+#define EFX_SNICBUNDLE_HEADER_MAGIC_OFST 0
+#define EFX_SNICBUNDLE_HEADER_MAGIC_LEN 4
+#define EFX_SNICBUNDLE_HEADER_MAGIC_VALUE 0xB1001001
+
+#define EFX_SNICBUNDLE_HEADER_VERSION_OFST 4
+#define EFX_SNICBUNDLE_HEADER_VERSION_LEN 4
+#define EFX_SNICBUNDLE_HEADER_VERSION_VALUE 1
+
+#define EFX_SNICBUNDLE_HEADER_BUNDLE_TYPE_OFST 8
+#define EFX_SNICBUNDLE_HEADER_BUNDLE_TYPE_LEN 4
+
+#define EFX_SNICBUNDLE_HEADER_BUNDLE_SUBTYPE_OFST 12
+#define EFX_SNICBUNDLE_HEADER_BUNDLE_SUBTYPE_LEN 4
+
+#define EFX_SNICBUNDLE_HEADER_LENGTH_OFST 20
+#define EFX_SNICBUNDLE_HEADER_LENGTH_LEN 4
+
+#define EFX_SNICBUNDLE_HEADER_CRC_OFST 224
+#define EFX_SNICBUNDLE_HEADER_CRC_LEN 4
+
+#define EFX_SNICBUNDLE_HEADER_LEN \
+ (EFX_SNICBUNDLE_HEADER_CRC_OFST + EFX_SNICBUNDLE_HEADER_CRC_LEN)
+
+#endif /* _EFX_FW_FORMATS_H */
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 4cc7b501135f..ef374a8e05c3 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -217,28 +217,4 @@ _efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
(reg) != 0xa1c), \
page)
-/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
- * in the BIU means that writes to TIMER_COMMAND[0] invalidate the
- * collector register.
- */
-static inline void _efx_writed_page_locked(struct efx_nic *efx,
- const efx_dword_t *value,
- unsigned int reg,
- unsigned int page)
-{
- unsigned long flags __attribute__ ((unused));
-
- if (page == 0) {
- spin_lock_irqsave(&efx->biu_lock, flags);
- efx_writed(efx, value, efx_paged_reg(efx, page, reg));
- spin_unlock_irqrestore(&efx->biu_lock, flags);
- } else {
- efx_writed(efx, value, efx_paged_reg(efx, page, reg));
- }
-}
-#define efx_writed_page_locked(efx, value, reg, page) \
- _efx_writed_page_locked(efx, value, \
- reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \
- page)
-
#endif /* EFX_IO_H */
diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c
index 10709d828a63..7cfd9000f79d 100644
--- a/drivers/net/ethernet/sfc/mae.c
+++ b/drivers/net/ethernet/sfc/mae.c
@@ -76,17 +76,6 @@ void efx_mae_mport_uplink(struct efx_nic *efx __always_unused, u32 *out)
*out = EFX_DWORD_VAL(mport);
}
-void efx_mae_mport_vf(struct efx_nic *efx __always_unused, u32 vf_id, u32 *out)
-{
- efx_dword_t mport;
-
- EFX_POPULATE_DWORD_3(mport,
- MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_FUNC,
- MAE_MPORT_SELECTOR_FUNC_PF_ID, MAE_MPORT_SELECTOR_FUNC_PF_ID_CALLER,
- MAE_MPORT_SELECTOR_FUNC_VF_ID, vf_id);
- *out = EFX_DWORD_VAL(mport);
-}
-
/* Constructs an mport selector from an mport ID, because they're not the same */
void efx_mae_mport_mport(struct efx_nic *efx __always_unused, u32 mport_id, u32 *out)
{
@@ -766,7 +755,7 @@ int efx_mae_match_check_caps_lhs(struct efx_nic *efx,
rc = efx_mae_match_check_cap_typ(supported_fields[MAE_FIELD_INGRESS_PORT],
ingress_port_mask_type);
if (rc) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "No support for %s mask in field %s\n",
+ NL_SET_ERR_MSG_FMT_MOD(extack, "No support for %s mask in field %s",
mask_type_name(ingress_port_mask_type),
"ingress_port");
return rc;
@@ -1101,6 +1090,9 @@ void efx_mae_remove_mport(void *desc, void *arg)
kfree(mport);
}
+/*
+ * Takes ownership of @desc, even if it returns an error
+ */
static int efx_mae_process_mport(struct efx_nic *efx,
struct mae_mport_desc *desc)
{
@@ -1111,6 +1103,7 @@ static int efx_mae_process_mport(struct efx_nic *efx,
if (!IS_ERR_OR_NULL(mport)) {
netif_err(efx, drv, efx->net_dev,
"mport with id %u does exist!!!\n", desc->mport_id);
+ kfree(desc);
return -EEXIST;
}
diff --git a/drivers/net/ethernet/sfc/mae.h b/drivers/net/ethernet/sfc/mae.h
index 8df30bc4f3ba..db79912c86d8 100644
--- a/drivers/net/ethernet/sfc/mae.h
+++ b/drivers/net/ethernet/sfc/mae.h
@@ -23,7 +23,6 @@ int efx_mae_free_mport(struct efx_nic *efx, u32 id);
void efx_mae_mport_wire(struct efx_nic *efx, u32 *out);
void efx_mae_mport_uplink(struct efx_nic *efx, u32 *out);
-void efx_mae_mport_vf(struct efx_nic *efx, u32 vf_id, u32 *out);
void efx_mae_mport_mport(struct efx_nic *efx, u32 mport_id, u32 *out);
int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id);
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 76578502226e..5e9b8def5e42 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -530,7 +530,7 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
* of it aborting the next request.
*/
if (!timeout)
- del_timer_sync(&mcdi->async_timer);
+ timer_delete_sync(&mcdi->async_timer);
spin_lock(&mcdi->async_lock);
async = list_first_entry(&mcdi->async_list,
@@ -605,7 +605,7 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
static void efx_mcdi_timeout_async(struct timer_list *t)
{
- struct efx_mcdi_iface *mcdi = from_timer(mcdi, t, async_timer);
+ struct efx_mcdi_iface *mcdi = timer_container_of(mcdi, t, async_timer);
efx_mcdi_complete_async(mcdi, true);
}
@@ -1051,15 +1051,6 @@ efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
cookie, false);
}
-int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
- const efx_dword_t *inbuf, size_t inlen,
- size_t outlen, efx_mcdi_async_completer *complete,
- unsigned long cookie)
-{
- return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
- cookie, true);
-}
-
int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual)
@@ -1068,14 +1059,6 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
outlen_actual, false, NULL, NULL);
}
-int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen,
- efx_dword_t *outbuf, size_t outlen,
- size_t *outlen_actual)
-{
- return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
- outlen_actual, true, NULL, NULL);
-}
-
void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
size_t inlen, efx_dword_t *outbuf,
size_t outlen, int rc)
@@ -1139,7 +1122,7 @@ void efx_mcdi_flush_async(struct efx_nic *efx)
/* We must be in poll or fail mode so no more requests can be queued */
BUG_ON(mcdi->mode == MCDI_MODE_EVENTS);
- del_timer_sync(&mcdi->async_timer);
+ timer_delete_sync(&mcdi->async_timer);
/* If a request is still running, make sure we give the MC
* time to complete it so that the response won't overwrite our
@@ -1642,12 +1625,15 @@ fail:
return rc;
}
+#define EFX_MCDI_NVRAM_DEFAULT_WRITE_LEN 128
+
int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
size_t *size_out, size_t *erase_size_out,
- bool *protected_out)
+ size_t *write_size_out, bool *protected_out)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_V2_OUT_LEN);
+ size_t write_size = 0;
size_t outlen;
int rc;
@@ -1662,6 +1648,12 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
goto fail;
}
+ if (outlen >= MC_CMD_NVRAM_INFO_V2_OUT_LEN)
+ write_size = MCDI_DWORD(outbuf, NVRAM_INFO_V2_OUT_WRITESIZE);
+ else
+ write_size = EFX_MCDI_NVRAM_DEFAULT_WRITE_LEN;
+
+ *write_size_out = write_size;
*size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
*erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
*protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
@@ -1982,33 +1974,6 @@ efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out)
}
-int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
-{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
- size_t outlen;
- int rc;
-
- rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- goto fail;
-
- if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
- rc = -EIO;
- goto fail;
- }
-
- *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
-
- return 0;
-
-fail:
- *id_out = -1;
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
-
int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
@@ -2021,38 +1986,6 @@ int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
return rc;
}
-int efx_mcdi_flush_rxqs(struct efx_nic *efx)
-{
- struct efx_channel *channel;
- struct efx_rx_queue *rx_queue;
- MCDI_DECLARE_BUF(inbuf,
- MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
- int rc, count;
-
- BUILD_BUG_ON(EFX_MAX_CHANNELS >
- MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
-
- count = 0;
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- if (rx_queue->flush_pending) {
- rx_queue->flush_pending = false;
- atomic_dec(&efx->rxq_flush_pending);
- MCDI_SET_ARRAY_DWORD(
- inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
- count, efx_rx_queue_index(rx_queue));
- count++;
- }
- }
- }
-
- rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
- MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
- WARN_ON(rc < 0);
-
- return rc;
-}
-
int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
{
int rc;
@@ -2239,11 +2172,9 @@ out_free:
return rc;
}
-#ifdef CONFIG_SFC_MTD
-
#define EFX_MCDI_NVRAM_LEN_MAX 128
-static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
+int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN);
int rc;
@@ -2261,6 +2192,8 @@ static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
return rc;
}
+#ifdef CONFIG_SFC_MTD
+
static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
loff_t offset, u8 *buffer, size_t length)
{
@@ -2285,13 +2218,20 @@ static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
return 0;
}
-static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
- loff_t offset, const u8 *buffer, size_t length)
+#endif /* CONFIG_SFC_MTD */
+
+int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
+ loff_t offset, const u8 *buffer, size_t length)
{
- MCDI_DECLARE_BUF(inbuf,
- MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
+ efx_dword_t *inbuf;
+ size_t inlen;
int rc;
+ inlen = ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4);
+ inbuf = kzalloc(inlen, GFP_KERNEL);
+ if (!inbuf)
+ return -ENOMEM;
+
MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
@@ -2299,14 +2239,14 @@ static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
- ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
- NULL, 0, NULL);
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, inlen, NULL, 0, NULL);
+ kfree(inbuf);
+
return rc;
}
-static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
- loff_t offset, size_t length)
+int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, loff_t offset,
+ size_t length)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
int rc;
@@ -2322,7 +2262,8 @@ static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
return rc;
}
-static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
+int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type,
+ enum efx_update_finish_mode mode)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN);
@@ -2330,22 +2271,41 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
int rc, rc2;
MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
- /* Always set this flag. Old firmware ignores it */
- MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_FINISH_V2_IN_FLAGS,
+
+ /* Old firmware doesn't support background update finish and abort
+ * operations. Fallback to waiting if the requested mode is not
+ * supported.
+ */
+ if (!efx_has_cap(efx, NVRAM_UPDATE_POLL_VERIFY_RESULT) ||
+ (!efx_has_cap(efx, NVRAM_UPDATE_ABORT_SUPPORTED) &&
+ mode == EFX_UPDATE_FINISH_ABORT))
+ mode = EFX_UPDATE_FINISH_WAIT;
+
+ MCDI_POPULATE_DWORD_4(inbuf, NVRAM_UPDATE_FINISH_V2_IN_FLAGS,
NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT,
- 1);
+ (mode != EFX_UPDATE_FINISH_ABORT),
+ NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND,
+ (mode == EFX_UPDATE_FINISH_BACKGROUND),
+ NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT,
+ (mode == EFX_UPDATE_FINISH_POLL),
+ NVRAM_UPDATE_FINISH_V2_IN_FLAG_ABORT,
+ (mode == EFX_UPDATE_FINISH_ABORT));
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (!rc && outlen >= MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) {
rc2 = MCDI_DWORD(outbuf, NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE);
- if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS)
+ if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS &&
+ rc2 != MC_CMD_NVRAM_VERIFY_RC_PENDING)
netif_err(efx, drv, efx->net_dev,
"NVRAM update failed verification with code 0x%x\n",
rc2);
switch (rc2) {
case MC_CMD_NVRAM_VERIFY_RC_SUCCESS:
break;
+ case MC_CMD_NVRAM_VERIFY_RC_PENDING:
+ rc = -EAGAIN;
+ break;
case MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED:
case MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED:
case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED:
@@ -2360,6 +2320,8 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
case MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES:
case MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS:
case MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH:
+ case MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED:
+ case MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE:
rc = -EPERM;
break;
default:
@@ -2372,6 +2334,42 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
return rc;
}
+#define EFX_MCDI_NVRAM_UPDATE_FINISH_INITIAL_POLL_DELAY_MS 5
+#define EFX_MCDI_NVRAM_UPDATE_FINISH_MAX_POLL_DELAY_MS 5000
+#define EFX_MCDI_NVRAM_UPDATE_FINISH_RETRIES 185
+
+int efx_mcdi_nvram_update_finish_polled(struct efx_nic *efx, unsigned int type)
+{
+ unsigned int delay = EFX_MCDI_NVRAM_UPDATE_FINISH_INITIAL_POLL_DELAY_MS;
+ unsigned int retry = 0;
+ int rc;
+
+ /* NVRAM updates can take a long time (e.g. up to 1 minute for bundle
+ * images). Polling for NVRAM update completion ensures that other MCDI
+ * commands can be issued before the background NVRAM update completes.
+ *
+ * The initial call either completes the update synchronously, or
+ * returns -EAGAIN to indicate processing is continuing. In the latter
+ * case, we poll for at least 900 seconds, at increasing intervals
+ * (5ms, 50ms, 500ms, 5s).
+ */
+ rc = efx_mcdi_nvram_update_finish(efx, type, EFX_UPDATE_FINISH_BACKGROUND);
+ while (rc == -EAGAIN) {
+ if (retry > EFX_MCDI_NVRAM_UPDATE_FINISH_RETRIES)
+ return -ETIMEDOUT;
+ retry++;
+
+ msleep(delay);
+ if (delay < EFX_MCDI_NVRAM_UPDATE_FINISH_MAX_POLL_DELAY_MS)
+ delay *= 10;
+
+ rc = efx_mcdi_nvram_update_finish(efx, type, EFX_UPDATE_FINISH_POLL);
+ }
+ return rc;
+}
+
+#ifdef CONFIG_SFC_MTD
+
int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
size_t len, size_t *retlen, u8 *buffer)
{
@@ -2465,7 +2463,8 @@ int efx_mcdi_mtd_sync(struct mtd_info *mtd)
if (part->updating) {
part->updating = false;
- rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
+ rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type,
+ EFX_UPDATE_FINISH_WAIT);
}
return rc;
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index ea612c619874..3755cd3fe1e6 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -155,9 +155,6 @@ int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual);
-int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd,
- size_t inlen, efx_dword_t *outbuf,
- size_t outlen, size_t *outlen_actual);
typedef void efx_mcdi_async_completer(struct efx_nic *efx,
unsigned long cookie, int rc,
@@ -167,11 +164,6 @@ int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
const efx_dword_t *inbuf, size_t inlen, size_t outlen,
efx_mcdi_async_completer *complete,
unsigned long cookie);
-int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
- const efx_dword_t *inbuf, size_t inlen,
- size_t outlen,
- efx_mcdi_async_completer *complete,
- unsigned long cookie);
void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
size_t inlen, efx_dword_t *outbuf,
@@ -400,7 +392,7 @@ int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq);
int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
size_t *size_out, size_t *erase_size_out,
- bool *protected_out);
+ size_t *write_size_out, bool *protected_out);
int efx_new_mcdi_nvram_test_all(struct efx_nic *efx);
int efx_mcdi_nvram_metadata(struct efx_nic *efx, unsigned int type,
u32 *subtype, u16 version[4], char *desc,
@@ -410,10 +402,8 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx);
int efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac,
int *id_out);
-int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
-int efx_mcdi_flush_rxqs(struct efx_nic *efx);
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
void efx_mcdi_mac_start_stats(struct efx_nic *efx);
void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
@@ -434,6 +424,26 @@ static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
#endif
+int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type);
+int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
+ loff_t offset, const u8 *buffer, size_t length);
+int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
+ loff_t offset, size_t length);
+int efx_mcdi_nvram_metadata(struct efx_nic *efx, unsigned int type,
+ u32 *subtype, u16 version[4], char *desc,
+ size_t descsize);
+
+enum efx_update_finish_mode {
+ EFX_UPDATE_FINISH_WAIT,
+ EFX_UPDATE_FINISH_BACKGROUND,
+ EFX_UPDATE_FINISH_POLL,
+ EFX_UPDATE_FINISH_ABORT,
+};
+
+int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type,
+ enum efx_update_finish_mode mode);
+int efx_mcdi_nvram_update_finish_polled(struct efx_nic *efx, unsigned int type);
+
#ifdef CONFIG_SFC_MTD
int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len,
size_t *retlen, u8 *buffer);
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index cd297e19cddc..b9866e389e6d 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -72,19 +72,19 @@
* | \------- Error
* \------------------------------ Resync (always set)
*
- * The client writes it's request into MC shared memory, and rings the
- * doorbell. Each request is completed by either by the MC writing
+ * The client writes its request into MC shared memory, and rings the
+ * doorbell. Each request is completed either by the MC writing
* back into shared memory, or by writing out an event.
*
* All MCDI commands support completion by shared memory response. Each
* request may also contain additional data (accounted for by HEADER.LEN),
- * and some response's may also contain additional data (again, accounted
+ * and some responses may also contain additional data (again, accounted
* for by HEADER.LEN).
*
* Some MCDI commands support completion by event, in which any associated
* response data is included in the event.
*
- * The protocol requires one response to be delivered for every request, a
+ * The protocol requires one response to be delivered for every request; a
* request should not be sent unless the response for the previous request
* has been received (either by polling shared memory, or by receiving
* an event).
@@ -165,6 +165,7 @@
#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
+
#define MC_CMD_ERR_CODE_OFST 0
#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4
@@ -321,7 +322,7 @@
/* enum: The requesting client is not a function */
#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c
/* enum: The requested operation might require the command to be passed between
- * MCs, and thetransport doesn't support that. Should only ever been seen over
+ * MCs, and the transport doesn't support that. Should only ever been seen over
* the UART.
*/
#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d
@@ -358,7 +359,7 @@
* sub-variant switching.
*/
#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
-/* enum: The clock whose frequency you've attempted to set set doesn't exist on
+/* enum: The clock whose frequency you've attempted to set doesn't exist on
* this NIC
*/
#define MC_CMD_ERR_NO_CLOCK 0x1015
@@ -387,25 +388,6 @@
*/
#define MC_CMD_ERR_PIOBUFS_PRESENT 0x101b
-/* MC_CMD_RESOURCE_SPECIFIER enum */
-/* enum: Any */
-#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
-#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe /* enum */
-
-/* MC_CMD_FPGA_FLASH_INDEX enum */
-#define MC_CMD_FPGA_FLASH_PRIMARY 0x0 /* enum */
-#define MC_CMD_FPGA_FLASH_SECONDARY 0x1 /* enum */
-
-/* MC_CMD_EXTERNAL_MAE_LINK_MODE enum */
-/* enum: Legacy mode as described in XN-200039-TC. */
-#define MC_CMD_EXTERNAL_MAE_LINK_MODE_LEGACY 0x0
-/* enum: Switchdev mode as described in XN-200039-TC. */
-#define MC_CMD_EXTERNAL_MAE_LINK_MODE_SWITCHDEV 0x1
-/* enum: Bootstrap mode as described in XN-200039-TC. */
-#define MC_CMD_EXTERNAL_MAE_LINK_MODE_BOOTSTRAP 0x2
-/* enum: Link-mode change is in-progress as described in XN-200039-TC. */
-#define MC_CMD_EXTERNAL_MAE_LINK_MODE_PENDING 0xf
-
/* PCIE_INTERFACE enum: From EF100 onwards, SFC products can have multiple PCIe
* interfaces. There is a need to refer to interfaces explicitly from drivers
* (for example, a management driver on one interface administering a function
@@ -424,6 +406,14 @@
* an on-NIC ARM module is expected to be connected.
*/
#define PCIE_INTERFACE_NIC_EMBEDDED 0x1
+/* enum: The PCIe logical interface 0. It is an alias for HOST_PRIMARY. */
+#define PCIE_INTERFACE_PCIE_HOST_INTF_0 0x0
+/* enum: The PCIe logical interface 1. */
+#define PCIE_INTERFACE_PCIE_HOST_INTF_1 0x2
+/* enum: The PCIe logical interface 2. */
+#define PCIE_INTERFACE_PCIE_HOST_INTF_2 0x3
+/* enum: The PCIe logical interface 3. */
+#define PCIE_INTERFACE_PCIE_HOST_INTF_3 0x4
/* enum: For MCDI commands issued over a PCIe interface, this value is
* translated into the interface over which the command was issued. Not
* meaningful for other MCDI transports.
@@ -640,7 +630,11 @@
* be allocated by different counter blocks, so e.g. AR counter 42 is different
* from CT counter 42. Generation counts are also type-specific. This value is
* also present in the header of streaming counter packets, in the IDENTIFIER
- * field (see packetiser packet format definitions).
+ * field (see packetiser packet format definitions). Also note that LACP
+ * counter IDs are not allocated individually, instead the counter IDs are
+ * directly tied to the LACP balance table indices. These in turn are allocated
+ * in large contiguous blocks as a LAG config. Calling MAE_COUNTER_ALLOC/FREE
+ * with an LACP counter type will return EPERM.
*/
/* enum: Action Rule counters - can be referenced in AR response. */
#define MAE_COUNTER_TYPE_AR 0x0
@@ -648,6 +642,14 @@
#define MAE_COUNTER_TYPE_CT 0x1
/* enum: Outer Rule counters - can be referenced in OR response. */
#define MAE_COUNTER_TYPE_OR 0x2
+/* enum: LACP counters - linked to LACP balance table entries. */
+#define MAE_COUNTER_TYPE_LACP 0x3
+
+/* MAE_COUNTER_ID enum: ID of allocated counter or counter list. */
+/* enum: A counter ID that is guaranteed never to represent a real counter or
+ * counter list.
+ */
+#define MAE_COUNTER_ID_NULL 0xffffffff
/* TABLE_ID enum: Unique IDs for tables. The 32-bit ID values have been
* structured with bits [31:24] reserved (0), [23:16] indicating which major
@@ -656,7 +658,9 @@
* variations of the same table. (All of the tables currently defined within
* the streaming engines are listed here, but this does not imply that they are
* all supported - MC_CMD_TABLE_LIST returns the list of actually supported
- * tables.)
+ * tables.) The DPU offload engines' enumerators follow a deliberate pattern:
+ * 0x01010000 + is_dpu_net * 0x10000 + is_wr_or_tx * 0x8000 + is_lite_pipe *
+ * 0x1000 + oe_engine_type * 0x100 + oe_instance_within_pipe * 0x10
*/
/* enum: Outer_Rule_Table in the MAE - refer to SF-123102-TC. */
#define TABLE_ID_OUTER_RULE_TABLE 0x10000
@@ -694,45 +698,70 @@
#define TABLE_ID_RSS_CONTEXT_TABLE 0x20200
/* enum: Indirection_Table in VNIC Rx - refer to SF-123102-TC. */
#define TABLE_ID_INDIRECTION_TABLE 0x20300
-
-/* TABLE_COMPRESSED_VLAN enum: Compressed VLAN TPID as used by some field
- * types; can be calculated by (((ether_type_msb >> 2) & 0x4) ^ 0x4) |
- * (ether_type_msb & 0x3);
- */
-#define TABLE_COMPRESSED_VLAN_TPID_8100 0x5 /* enum */
-#define TABLE_COMPRESSED_VLAN_TPID_88A8 0x4 /* enum */
-#define TABLE_COMPRESSED_VLAN_TPID_9100 0x1 /* enum */
-#define TABLE_COMPRESSED_VLAN_TPID_9200 0x2 /* enum */
-#define TABLE_COMPRESSED_VLAN_TPID_9300 0x3 /* enum */
-
-/* TABLE_NAT_DIR enum: NAT direction. */
-#define TABLE_NAT_DIR_SOURCE 0x0 /* enum */
-#define TABLE_NAT_DIR_DEST 0x1 /* enum */
-
-/* TABLE_RSS_KEY_MODE enum: Defines how the value for Toeplitz hashing for RSS
- * is constructed as a concatenation (indicated here by "++") of packet header
- * fields.
- */
-/* enum: IP src addr ++ IP dst addr */
-#define TABLE_RSS_KEY_MODE_SA_DA 0x0
-/* enum: IP src addr ++ IP dst addr ++ TCP/UDP src port ++ TCP/UDP dst port */
-#define TABLE_RSS_KEY_MODE_SA_DA_SP_DP 0x1
-/* enum: IP src addr */
-#define TABLE_RSS_KEY_MODE_SA 0x2
-/* enum: IP dst addr */
-#define TABLE_RSS_KEY_MODE_DA 0x3
-/* enum: IP src addr ++ TCP/UDP src port */
-#define TABLE_RSS_KEY_MODE_SA_SP 0x4
-/* enum: IP dest addr ++ TCP dest port */
-#define TABLE_RSS_KEY_MODE_DA_DP 0x5
-/* enum: Nothing (produces input of 0, resulting in output hash of 0) */
-#define TABLE_RSS_KEY_MODE_NONE 0x7
-
-/* TABLE_RSS_SPREAD_MODE enum: RSS spreading mode. */
-/* enum: RSS uses Indirection_Table lookup. */
-#define TABLE_RSS_SPREAD_MODE_INDIRECTION 0x0
-/* enum: RSS uses even spreading calculation. */
-#define TABLE_RSS_SPREAD_MODE_EVEN 0x1
+/* enum: DPU.host read pipe first CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_HOST_RD_CRC0_OE_PROFILE 0x1010000
+/* enum: DPU.host read pipe second CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_HOST_RD_CRC1_OE_PROFILE 0x1010010
+/* enum: DPU.host write pipe first CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_HOST_WR_CRC0_OE_PROFILE 0x1018000
+/* enum: DPU.host write pipe second CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_HOST_WR_CRC1_OE_PROFILE 0x1018010
+/* enum: DPU.net 'full' receive pipe CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RX_CRC0_OE_PROFILE 0x1020000
+/* enum: DPU.net 'full' receive pipe first checksum offload engine profiles -
+ * refer to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RX_CSUM0_OE_PROFILE 0x1020100
+/* enum: DPU.net 'full' receive pipe second checksum offload engine profiles -
+ * refer to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RX_CSUM1_OE_PROFILE 0x1020110
+/* enum: DPU.net 'full' receive pipe AES-GCM offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RX_AES_GCM0_OE_PROFILE 0x1020200
+/* enum: DPU.net 'lite' receive pipe CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RXLITE_CRC0_OE_PROFILE 0x1021000
+/* enum: DPU.net 'lite' receive pipe checksum offload engine profiles - refer
+ * to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RXLITE_CSUM0_OE_PROFILE 0x1021100
+/* enum: DPU.net 'full' transmit pipe CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TX_CRC0_OE_PROFILE 0x1028000
+/* enum: DPU.net 'full' transmit pipe first checksum offload engine profiles -
+ * refer to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TX_CSUM0_OE_PROFILE 0x1028100
+/* enum: DPU.net 'full' transmit pipe second checksum offload engine profiles -
+ * refer to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TX_CSUM1_OE_PROFILE 0x1028110
+/* enum: DPU.net 'full' transmit pipe AES-GCM offload engine profiles - refer
+ * to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TX_AES_GCM0_OE_PROFILE 0x1028200
+/* enum: DPU.net 'lite' transmit pipe CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TXLITE_CRC0_OE_PROFILE 0x1029000
+/* enum: DPU.net 'lite' transmit pipe checksum offload engine profiles - refer
+ * to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TXLITE_CSUM0_OE_PROFILE 0x1029100
/* TABLE_FIELD_ID enum: Unique IDs for fields. Related concepts have been
* loosely grouped together into blocks with gaps for expansion, but the values
@@ -1026,6 +1055,16 @@
#define TABLE_FIELD_ID_BAL_TBL_BASE_DIV64 0xde
/* enum: Length of balance table region: 0=>64, 1=>128, 2=>256. */
#define TABLE_FIELD_ID_BAL_TBL_LEN_ID 0xdf
+/* enum: LACP LAG ID (i.e. the low 3 bits of LACP LAG mport ID), indexing
+ * LACP_LAG_Config_Table. Refer to SF-123102-TC.
+ */
+#define TABLE_FIELD_ID_LACP_LAG_ID 0xe0
+/* enum: Address in LACP_Balance_Table. The balance table is partitioned
+ * between LAGs according to the settings in LACP_LAG_Config_Table and then
+ * indexed by the LACP hash, providing the mapping to destination mports. Refer
+ * to SF-123102-TC.
+ */
+#define TABLE_FIELD_ID_BAL_TBL_ADDR 0xe1
/* enum: UDP port to match for UDP-based encapsulations; required to be 0 for
* other encapsulation types.
*/
@@ -1082,6 +1121,55 @@
#define TABLE_FIELD_ID_INDIR_TBL_LEN_ID 0x105
/* enum: An offset to be applied to the base destination queue ID. */
#define TABLE_FIELD_ID_INDIR_OFFSET 0x106
+/* enum: DPU offload engine profile ID to address. */
+#define TABLE_FIELD_ID_OE_PROFILE 0x3e8
+/* enum: Width of the CRC to calculate - see CRC_VARIANT enum. */
+#define TABLE_FIELD_ID_CRC_VARIANT 0x3f2
+/* enum: If set, reflect the bits of each input byte, bit 7 is LSB, bit 0 is
+ * MSB. If clear, bit 7 is MSB, bit 0 is LSB.
+ */
+#define TABLE_FIELD_ID_CRC_REFIN 0x3f3
+/* enum: If set, reflect the bits of each output byte, bit 7 is LSB, bit 0 is
+ * MSB. If clear, bit 7 is MSB, bit 0 is LSB.
+ */
+#define TABLE_FIELD_ID_CRC_REFOUT 0x3f4
+/* enum: If set, invert every bit of the output value. */
+#define TABLE_FIELD_ID_CRC_INVOUT 0x3f5
+/* enum: The CRC polynomial to use for checksumming, in normal form. */
+#define TABLE_FIELD_ID_CRC_POLY 0x3f6
+/* enum: Operation for the checksum engine to perform - see DPU_CSUM_OP enum.
+ */
+#define TABLE_FIELD_ID_CSUM_OP 0x410
+/* enum: Byte offset of checksum relative to region_start (for VALIDATE_*
+ * operations only).
+ */
+#define TABLE_FIELD_ID_CSUM_OFFSET 0x411
+/* enum: Indicates there is additional data on OPR bus that needs to be
+ * incorporated into the payload checksum.
+ */
+#define TABLE_FIELD_ID_CSUM_OPR_ADDITIONAL_DATA 0x412
+/* enum: Log2 data size of additional data on OPR bus. */
+#define TABLE_FIELD_ID_CSUM_OPR_DATA_SIZE_LOG2 0x413
+/* enum: 4 byte offset of where to find the additional data on the OPR bus. */
+#define TABLE_FIELD_ID_CSUM_OPR_4B_OFF 0x414
+/* enum: Operation type for the AES-GCM core - see GCM_OP_CODE enum. */
+#define TABLE_FIELD_ID_GCM_OP_CODE 0x41a
+/* enum: Key length - AES_KEY_LEN enum. */
+#define TABLE_FIELD_ID_GCM_KEY_LEN 0x41b
+/* enum: OPR 4 byte offset for ICV or GHASH output (only in BULK_* mode) or
+ * IPSEC descrypt output.
+ */
+#define TABLE_FIELD_ID_GCM_OPR_4B_OFFSET 0x41c
+/* enum: If OP_CODE is BULK_*, indicates Emit GHASH (Fragment mode). Else,
+ * indicates IPSEC-ESN mode.
+ */
+#define TABLE_FIELD_ID_GCM_EMIT_GHASH_ISESN 0x41d
+/* enum: Replay Protection Enable. */
+#define TABLE_FIELD_ID_GCM_REPLAY_PROTECT_EN 0x41e
+/* enum: IPSEC Encrypt ESP trailer NEXT_HEADER byte. */
+#define TABLE_FIELD_ID_GCM_NEXT_HDR 0x41f
+/* enum: Replay Window Size. */
+#define TABLE_FIELD_ID_GCM_REPLAY_WIN_SIZE 0x420
/* MCDI_EVENT structuredef: The structure of an MCDI_EVENT on Siena/EF10/EF100
* platforms
@@ -1138,6 +1226,24 @@
#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_OFST 0
#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8
+#define MCDI_EVENT_PORT_LINKCHANGE_PORT_HANDLE_OFST 0
+#define MCDI_EVENT_PORT_LINKCHANGE_PORT_HANDLE_LBN 0
+#define MCDI_EVENT_PORT_LINKCHANGE_PORT_HANDLE_WIDTH 24
+#define MCDI_EVENT_PORT_LINKCHANGE_SEQ_NUM_OFST 0
+#define MCDI_EVENT_PORT_LINKCHANGE_SEQ_NUM_LBN 24
+#define MCDI_EVENT_PORT_LINKCHANGE_SEQ_NUM_WIDTH 7
+#define MCDI_EVENT_PORT_LINKCHANGE_LINK_UP_OFST 0
+#define MCDI_EVENT_PORT_LINKCHANGE_LINK_UP_LBN 31
+#define MCDI_EVENT_PORT_LINKCHANGE_LINK_UP_WIDTH 1
+#define MCDI_EVENT_PORT_MODULECHANGE_PORT_HANDLE_OFST 0
+#define MCDI_EVENT_PORT_MODULECHANGE_PORT_HANDLE_LBN 0
+#define MCDI_EVENT_PORT_MODULECHANGE_PORT_HANDLE_WIDTH 24
+#define MCDI_EVENT_PORT_MODULECHANGE_SEQ_NUM_OFST 0
+#define MCDI_EVENT_PORT_MODULECHANGE_SEQ_NUM_LBN 24
+#define MCDI_EVENT_PORT_MODULECHANGE_SEQ_NUM_WIDTH 7
+#define MCDI_EVENT_PORT_MODULECHANGE_MDI_CONNECTED_OFST 0
+#define MCDI_EVENT_PORT_MODULECHANGE_MDI_CONNECTED_LBN 31
+#define MCDI_EVENT_PORT_MODULECHANGE_MDI_CONNECTED_WIDTH 1
#define MCDI_EVENT_SENSOREVT_MONITOR_OFST 0
#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0
#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8
@@ -1237,7 +1343,7 @@
#define MCDI_EVENT_AOE_FPGA_LOAD_FAILED 0xe
/* enum: Notify that invalid flash type detected */
#define MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf
-/* enum: Notify that the attempt to run FPGA Controller firmware timedout */
+/* enum: Notify that the attempt to run FPGA Controller firmware timed out */
#define MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10
/* enum: Failure to probe one or more FPGA boot flash chips */
#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_INVALID 0x11
@@ -1255,7 +1361,7 @@
#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8
/* enum: FC Assert happened, but the register information is not available */
#define MCDI_EVENT_AOE_ERR_FC_ASSERT_SEEN 0x0
-/* enum: The register information for FC Assert is ready for readinng by driver
+/* enum: The register information for FC Assert is ready for reading by driver
*/
#define MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1
#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_OFST 0
@@ -1364,6 +1470,12 @@
#define MCDI_EVENT_MODULECHANGE_SEQ_OFST 0
#define MCDI_EVENT_MODULECHANGE_SEQ_LBN 30
#define MCDI_EVENT_MODULECHANGE_SEQ_WIDTH 2
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_VI_ID_OFST 0
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_VI_ID_LBN 0
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_VI_ID_WIDTH 16
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_ID_OFST 0
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_ID_LBN 16
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_ID_WIDTH 16
#define MCDI_EVENT_DATA_LBN 0
#define MCDI_EVENT_DATA_WIDTH 32
/* Alias for PTP_DATA. */
@@ -1500,6 +1612,31 @@
* change to the journal.
*/
#define MCDI_EVENT_CODE_MPORT_JOURNAL_CHANGE 0x27
+/* enum: Notification that a source queue is enabled and attached to its proxy
+ * sink queue. SRC field contains the handle of the affected descriptor proxy
+ * function. DATA field contains the relative source queue number and absolute
+ * VI ID.
+ */
+#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_QUEUE_START 0x28
+/* enum: Notification of a change in link state and/or link speed of a network
+ * port link. This event applies to a network port identified by a handle,
+ * PORT_HANDLE, which is discovered by the driver using the MC_CMD_ENUM_PORTS
+ * command.
+ */
+#define MCDI_EVENT_CODE_PORT_LINKCHANGE 0x29
+/* enum: Notification of a change in the state of an MDI (external connector)
+ * of a network port. This typically corresponds to module plug/unplug for
+ * modular interfaces (e.g., SFP/QSFP and similar) or cable connect/disconnect.
+ * This event applies to a network port identified by a handle, PORT_HANDLE,
+ * which is discovered by the driver using the MC_CMD_ENUM_PORTS command.
+ */
+#define MCDI_EVENT_CODE_PORT_MODULECHANGE 0x2a
+/* enum: Notification that the port enumeration journal has changed since it
+ * was last read and updates can be read using the MC_CMD_ENUM_PORTS command.
+ * The firmware may moderate the events so that an event is not sent for every
+ * change to the journal.
+ */
+#define MCDI_EVENT_CODE_ENUM_PORTS_CHANGE 0x2b
/* enum: Artificial event generated by host and posted via MC for test
* purposes.
*/
@@ -1512,6 +1649,14 @@
#define MCDI_EVENT_LINKCHANGE_DATA_LEN 4
#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0
#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32
+#define MCDI_EVENT_PORT_LINKCHANGE_DATA_OFST 0
+#define MCDI_EVENT_PORT_LINKCHANGE_DATA_LEN 4
+#define MCDI_EVENT_PORT_LINKCHANGE_DATA_LBN 0
+#define MCDI_EVENT_PORT_LINKCHANGE_DATA_WIDTH 32
+#define MCDI_EVENT_PORT_MODULECHANGE_DATA_OFST 0
+#define MCDI_EVENT_PORT_MODULECHANGE_DATA_LEN 4
+#define MCDI_EVENT_PORT_MODULECHANGE_DATA_LBN 0
+#define MCDI_EVENT_PORT_MODULECHANGE_DATA_WIDTH 32
#define MCDI_EVENT_SENSOREVT_DATA_OFST 0
#define MCDI_EVENT_SENSOREVT_DATA_LEN 4
#define MCDI_EVENT_SENSOREVT_DATA_LBN 0
@@ -1668,247 +1813,6 @@
#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_LBN 0
#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_WIDTH 32
-/* FCDI_EVENT structuredef */
-#define FCDI_EVENT_LEN 8
-#define FCDI_EVENT_CONT_LBN 32
-#define FCDI_EVENT_CONT_WIDTH 1
-#define FCDI_EVENT_LEVEL_LBN 33
-#define FCDI_EVENT_LEVEL_WIDTH 3
-/* enum: Info. */
-#define FCDI_EVENT_LEVEL_INFO 0x0
-/* enum: Warning. */
-#define FCDI_EVENT_LEVEL_WARN 0x1
-/* enum: Error. */
-#define FCDI_EVENT_LEVEL_ERR 0x2
-/* enum: Fatal. */
-#define FCDI_EVENT_LEVEL_FATAL 0x3
-#define FCDI_EVENT_DATA_OFST 0
-#define FCDI_EVENT_DATA_LEN 4
-#define FCDI_EVENT_LINK_STATE_STATUS_OFST 0
-#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0
-#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
-#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */
-#define FCDI_EVENT_LINK_UP 0x1 /* enum */
-#define FCDI_EVENT_DATA_LBN 0
-#define FCDI_EVENT_DATA_WIDTH 32
-#define FCDI_EVENT_SRC_LBN 36
-#define FCDI_EVENT_SRC_WIDTH 8
-#define FCDI_EVENT_EV_CODE_LBN 60
-#define FCDI_EVENT_EV_CODE_WIDTH 4
-#define FCDI_EVENT_CODE_LBN 44
-#define FCDI_EVENT_CODE_WIDTH 8
-/* enum: The FC was rebooted. */
-#define FCDI_EVENT_CODE_REBOOT 0x1
-/* enum: Bad assert. */
-#define FCDI_EVENT_CODE_ASSERT 0x2
-/* enum: DDR3 test result. */
-#define FCDI_EVENT_CODE_DDR_TEST_RESULT 0x3
-/* enum: Link status. */
-#define FCDI_EVENT_CODE_LINK_STATE 0x4
-/* enum: A timed read is ready to be serviced. */
-#define FCDI_EVENT_CODE_TIMED_READ 0x5
-/* enum: One or more PPS IN events */
-#define FCDI_EVENT_CODE_PPS_IN 0x6
-/* enum: Tick event from PTP clock */
-#define FCDI_EVENT_CODE_PTP_TICK 0x7
-/* enum: ECC error counters */
-#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8
-/* enum: Current status of PTP */
-#define FCDI_EVENT_CODE_PTP_STATUS 0x9
-/* enum: Port id config to map MC-FC port idx */
-#define FCDI_EVENT_CODE_PORT_CONFIG 0xa
-/* enum: Boot result or error code */
-#define FCDI_EVENT_CODE_BOOT_RESULT 0xb
-#define FCDI_EVENT_REBOOT_SRC_LBN 36
-#define FCDI_EVENT_REBOOT_SRC_WIDTH 8
-#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */
-#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */
-#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
-#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LEN 4
-#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
-#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
-#define FCDI_EVENT_ASSERT_TYPE_LBN 36
-#define FCDI_EVENT_ASSERT_TYPE_WIDTH 8
-#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
-#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
-#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
-#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LEN 4
-#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
-#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
-#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
-#define FCDI_EVENT_LINK_STATE_DATA_LEN 4
-#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
-#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
-#define FCDI_EVENT_PTP_STATE_OFST 0
-#define FCDI_EVENT_PTP_STATE_LEN 4
-#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
-#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
-#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
-#define FCDI_EVENT_PTP_STATE_LBN 0
-#define FCDI_EVENT_PTP_STATE_WIDTH 32
-#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
-#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
-#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
-#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LEN 4
-#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
-#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
-/* Index of MC port being referred to */
-#define FCDI_EVENT_PORT_CONFIG_SRC_LBN 36
-#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
-/* FC Port index that matches the MC port index in SRC */
-#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
-#define FCDI_EVENT_PORT_CONFIG_DATA_LEN 4
-#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
-#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
-#define FCDI_EVENT_BOOT_RESULT_OFST 0
-#define FCDI_EVENT_BOOT_RESULT_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */
-#define FCDI_EVENT_BOOT_RESULT_LBN 0
-#define FCDI_EVENT_BOOT_RESULT_WIDTH 32
-
-/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
- * to the MC. Note that this structure | is overlayed over a normal FCDI event
- * such that bits 32-63 containing | event code, level, source etc remain the
- * same. In this case the data | field of the header is defined to be the
- * number of timestamps
- */
-#define FCDI_EXTENDED_EVENT_PPS_LENMIN 16
-#define FCDI_EXTENDED_EVENT_PPS_LENMAX 248
-#define FCDI_EXTENDED_EVENT_PPS_LENMAX_MCDI2 1016
-#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_NUM(len) (((len)-8)/8)
-/* Number of timestamps following */
-#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
-#define FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
-#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
-/* Seconds field of a timestamp record */
-#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
-#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
-#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
-/* Nanoseconds field of a timestamp record */
-#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
-#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
-#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
-/* Timestamp records comprising the event */
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_LBN 64
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_WIDTH 32
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_LBN 96
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_WIDTH 32
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM_MCDI2 126
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
-
-/* MUM_EVENT structuredef */
-#define MUM_EVENT_LEN 8
-#define MUM_EVENT_CONT_LBN 32
-#define MUM_EVENT_CONT_WIDTH 1
-#define MUM_EVENT_LEVEL_LBN 33
-#define MUM_EVENT_LEVEL_WIDTH 3
-/* enum: Info. */
-#define MUM_EVENT_LEVEL_INFO 0x0
-/* enum: Warning. */
-#define MUM_EVENT_LEVEL_WARN 0x1
-/* enum: Error. */
-#define MUM_EVENT_LEVEL_ERR 0x2
-/* enum: Fatal. */
-#define MUM_EVENT_LEVEL_FATAL 0x3
-#define MUM_EVENT_DATA_OFST 0
-#define MUM_EVENT_DATA_LEN 4
-#define MUM_EVENT_SENSOR_ID_OFST 0
-#define MUM_EVENT_SENSOR_ID_LBN 0
-#define MUM_EVENT_SENSOR_ID_WIDTH 8
-/* Enum values, see field(s): */
-/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
-#define MUM_EVENT_SENSOR_STATE_OFST 0
-#define MUM_EVENT_SENSOR_STATE_LBN 8
-#define MUM_EVENT_SENSOR_STATE_WIDTH 8
-#define MUM_EVENT_PORT_PHY_READY_OFST 0
-#define MUM_EVENT_PORT_PHY_READY_LBN 0
-#define MUM_EVENT_PORT_PHY_READY_WIDTH 1
-#define MUM_EVENT_PORT_PHY_LINK_UP_OFST 0
-#define MUM_EVENT_PORT_PHY_LINK_UP_LBN 1
-#define MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1
-#define MUM_EVENT_PORT_PHY_TX_LOL_OFST 0
-#define MUM_EVENT_PORT_PHY_TX_LOL_LBN 2
-#define MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1
-#define MUM_EVENT_PORT_PHY_RX_LOL_OFST 0
-#define MUM_EVENT_PORT_PHY_RX_LOL_LBN 3
-#define MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1
-#define MUM_EVENT_PORT_PHY_TX_LOS_OFST 0
-#define MUM_EVENT_PORT_PHY_TX_LOS_LBN 4
-#define MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1
-#define MUM_EVENT_PORT_PHY_RX_LOS_OFST 0
-#define MUM_EVENT_PORT_PHY_RX_LOS_LBN 5
-#define MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1
-#define MUM_EVENT_PORT_PHY_TX_FAULT_OFST 0
-#define MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6
-#define MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1
-#define MUM_EVENT_DATA_LBN 0
-#define MUM_EVENT_DATA_WIDTH 32
-#define MUM_EVENT_SRC_LBN 36
-#define MUM_EVENT_SRC_WIDTH 8
-#define MUM_EVENT_EV_CODE_LBN 60
-#define MUM_EVENT_EV_CODE_WIDTH 4
-#define MUM_EVENT_CODE_LBN 44
-#define MUM_EVENT_CODE_WIDTH 8
-/* enum: The MUM was rebooted. */
-#define MUM_EVENT_CODE_REBOOT 0x1
-/* enum: Bad assert. */
-#define MUM_EVENT_CODE_ASSERT 0x2
-/* enum: Sensor failure. */
-#define MUM_EVENT_CODE_SENSOR 0x3
-/* enum: Link fault has been asserted, or has cleared. */
-#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
-#define MUM_EVENT_SENSOR_DATA_OFST 0
-#define MUM_EVENT_SENSOR_DATA_LEN 4
-#define MUM_EVENT_SENSOR_DATA_LBN 0
-#define MUM_EVENT_SENSOR_DATA_WIDTH 32
-#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0
-#define MUM_EVENT_PORT_PHY_FLAGS_LEN 4
-#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0
-#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
-#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
-#define MUM_EVENT_PORT_PHY_COPPER_LEN_LEN 4
-#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
-#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
-#define MUM_EVENT_PORT_PHY_CAPS_OFST 0
-#define MUM_EVENT_PORT_PHY_CAPS_LEN 4
-#define MUM_EVENT_PORT_PHY_CAPS_LBN 0
-#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
-#define MUM_EVENT_PORT_PHY_TECH_OFST 0
-#define MUM_EVENT_PORT_PHY_TECH_LEN 4
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */
-#define MUM_EVENT_PORT_PHY_TECH_LBN 0
-#define MUM_EVENT_PORT_PHY_TECH_WIDTH 32
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40
-#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4
-
/***********************************/
/* MC_CMD_READ32
@@ -1969,90 +1873,6 @@
/***********************************/
-/* MC_CMD_COPYCODE
- * Copy MC code between two locations and jump. Note - this command really
- * belongs to INSECURE category but is required by shmboot. The command handler
- * has additional checks to reject insecure calls.
- */
-#define MC_CMD_COPYCODE 0x3
-#undef MC_CMD_0x3_PRIVILEGE_CTG
-
-#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_COPYCODE_IN msgrequest */
-#define MC_CMD_COPYCODE_IN_LEN 16
-/* Source address
- *
- * The main image should be entered via a copy of a single word from and to a
- * magic address, which controls various aspects of the boot. The magic address
- * is a bitfield, with each bit as documented below.
- */
-#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
-#define MC_CMD_COPYCODE_IN_SRC_ADDR_LEN 4
-/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
-#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
-/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
- * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below)
- */
-#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0
-/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT,
- * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see
- * below)
- */
-#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_LBN 6
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1
-/* Destination address */
-#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
-#define MC_CMD_COPYCODE_IN_DEST_ADDR_LEN 4
-#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
-#define MC_CMD_COPYCODE_IN_NUMWORDS_LEN 4
-/* Address of where to jump after copy. */
-#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
-#define MC_CMD_COPYCODE_IN_JUMP_LEN 4
-/* enum: Control should return to the caller rather than jumping */
-#define MC_CMD_COPYCODE_JUMP_NONE 0x1
-
-/* MC_CMD_COPYCODE_OUT msgresponse */
-#define MC_CMD_COPYCODE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_SET_FUNC
- * Select function for function-specific commands.
- */
-#define MC_CMD_SET_FUNC 0x4
-#undef MC_CMD_0x4_PRIVILEGE_CTG
-
-#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SET_FUNC_IN msgrequest */
-#define MC_CMD_SET_FUNC_IN_LEN 4
-/* Set function */
-#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
-#define MC_CMD_SET_FUNC_IN_FUNC_LEN 4
-
-/* MC_CMD_SET_FUNC_OUT msgresponse */
-#define MC_CMD_SET_FUNC_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_BOOT_STATUS
* Get the instruction address from which the MC booted.
*/
@@ -2259,6 +2079,7 @@
/* Log destination */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_LEN 4
+/* enum property: bitmask */
/* enum: UART. */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
/* enum: Event queue. */
@@ -2304,6 +2125,9 @@
/* MC_CMD_GET_VERSION_OUT msgresponse */
#define MC_CMD_GET_VERSION_OUT_LEN 32
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2326,6 +2150,9 @@
/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2356,6 +2183,9 @@
* (depending on which components exist on a particular adapter)
*/
#define MC_CMD_GET_VERSION_V2_OUT_LEN 304
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2495,6 +2325,9 @@
* (depending on which components exist on a particular adapter)
*/
#define MC_CMD_GET_VERSION_V3_OUT_LEN 328
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2641,6 +2474,9 @@
* version information
*/
#define MC_CMD_GET_VERSION_V4_OUT_LEN 392
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2803,6 +2639,9 @@
* and board version information
*/
#define MC_CMD_GET_VERSION_V5_OUT_LEN 424
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -3065,8 +2904,18 @@
* subscribers.
*/
#define MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b
-/* enum: Above this for future use. */
-#define MC_CMD_PTP_OP_MAX 0x1c
+/* enum: X4 and later adapters should use this instead of
+ * PTP_OP_TIME_EVENT_SUBSCRIBE. Subscribe to receive periodic time events
+ * indicating the current NIC time
+ */
+#define MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE_V2 0x1c
+/* enum: For X4 and later NICs. Packet timestamps and time sync events have
+ * IS_SET and IN_SYNC flags, that indicates whether time is updated and if it
+ * is in sync with host. Once set, IN_SYNC flag is cleared by hardware after a
+ * software configurable time out. Host driver need to query what is set and
+ * what is maximum supported interval, this MCDI can be used to query these.
+ */
+#define MC_CMD_PTP_OP_GET_SYNC_TIMEOUT 0x1d
/* MC_CMD_PTP_IN_ENABLE msgrequest */
#define MC_CMD_PTP_IN_ENABLE_LEN 16
@@ -3507,6 +3356,22 @@
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_LEN 4
+/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2 msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Event queue ID */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_QUEUE_ID_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_QUEUE_ID_LEN 4
+/* Space for flags. */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_FLAGS_OFST 12
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_FLAGS_LEN 4
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_REPORT_SYNC_STATUS_OFST 12
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_REPORT_SYNC_STATUS_LBN 31
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_REPORT_SYNC_STATUS_WIDTH 1
+
/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
@@ -3540,6 +3405,13 @@
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_LEN 4
+/* MC_CMD_PTP_IN_GET_SYNC_TIMEOUT msgrequest */
+#define MC_CMD_PTP_IN_GET_SYNC_TIMEOUT_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
/* MC_CMD_PTP_OUT msgresponse */
#define MC_CMD_PTP_OUT_LEN 0
@@ -3939,416 +3811,14 @@
/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */
#define MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0
-
-/***********************************/
-/* MC_CMD_CSR_READ32
- * Read 32bit words from the indirect memory map.
- */
-#define MC_CMD_CSR_READ32 0xc
-#undef MC_CMD_0xc_PRIVILEGE_CTG
-
-#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_CSR_READ32_IN msgrequest */
-#define MC_CMD_CSR_READ32_IN_LEN 12
-/* Address */
-#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
-#define MC_CMD_CSR_READ32_IN_ADDR_LEN 4
-#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
-#define MC_CMD_CSR_READ32_IN_STEP_LEN 4
-#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
-#define MC_CMD_CSR_READ32_IN_NUMWORDS_LEN 4
-
-/* MC_CMD_CSR_READ32_OUT msgresponse */
-#define MC_CMD_CSR_READ32_OUT_LENMIN 4
-#define MC_CMD_CSR_READ32_OUT_LENMAX 252
-#define MC_CMD_CSR_READ32_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_CSR_READ32_OUT_BUFFER_NUM(len) (((len)-0)/4)
-/* The last dword is the status, not a value read */
-#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
-#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4
-#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1
-#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM 63
-#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM_MCDI2 255
-
-
-/***********************************/
-/* MC_CMD_CSR_WRITE32
- * Write 32bit dwords to the indirect memory map.
- */
-#define MC_CMD_CSR_WRITE32 0xd
-#undef MC_CMD_0xd_PRIVILEGE_CTG
-
-#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_CSR_WRITE32_IN msgrequest */
-#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
-#define MC_CMD_CSR_WRITE32_IN_LENMAX 252
-#define MC_CMD_CSR_WRITE32_IN_LENMAX_MCDI2 1020
-#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_NUM(len) (((len)-8)/4)
-/* Address */
-#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
-#define MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4
-#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
-#define MC_CMD_CSR_WRITE32_IN_STEP_LEN 4
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM 61
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM_MCDI2 253
-
-/* MC_CMD_CSR_WRITE32_OUT msgresponse */
-#define MC_CMD_CSR_WRITE32_OUT_LEN 4
-#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
-#define MC_CMD_CSR_WRITE32_OUT_STATUS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_HP
- * These commands are used for HP related features. They are grouped under one
- * MCDI command to avoid creating too many MCDI commands.
- */
-#define MC_CMD_HP 0x54
-#undef MC_CMD_0x54_PRIVILEGE_CTG
-
-#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_HP_IN msgrequest */
-#define MC_CMD_HP_IN_LEN 16
-/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at
- * the specified address with the specified interval.When address is NULL,
- * INTERVAL is interpreted as a command: 0: stop OCSD / 1: Report OCSD current
- * state / 2: (debug) Show temperature reported by one of the supported
- * sensors.
- */
-#define MC_CMD_HP_IN_SUBCMD_OFST 0
-#define MC_CMD_HP_IN_SUBCMD_LEN 4
-/* enum: OCSD (Option Card Sensor Data) sub-command. */
-#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0
-/* enum: Last known valid HP sub-command. */
-#define MC_CMD_HP_IN_LAST_SUBCMD 0x0
-/* The address to the array of sensor fields. (Or NULL to use a sub-command.)
- */
-#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4
-#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8
-#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4
-#define MC_CMD_HP_IN_OCSD_ADDR_LO_LEN 4
-#define MC_CMD_HP_IN_OCSD_ADDR_LO_LBN 32
-#define MC_CMD_HP_IN_OCSD_ADDR_LO_WIDTH 32
-#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8
-#define MC_CMD_HP_IN_OCSD_ADDR_HI_LEN 4
-#define MC_CMD_HP_IN_OCSD_ADDR_HI_LBN 64
-#define MC_CMD_HP_IN_OCSD_ADDR_HI_WIDTH 32
-/* The requested update interval, in seconds. (Or the sub-command if ADDR is
- * NULL.)
- */
-#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
-#define MC_CMD_HP_IN_OCSD_INTERVAL_LEN 4
-
-/* MC_CMD_HP_OUT msgresponse */
-#define MC_CMD_HP_OUT_LEN 4
-#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
-#define MC_CMD_HP_OUT_OCSD_STATUS_LEN 4
-/* enum: OCSD stopped for this card. */
-#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1
-/* enum: OCSD was successfully started with the address provided. */
-#define MC_CMD_HP_OUT_OCSD_STARTED 0x2
-/* enum: OCSD was already started for this card. */
-#define MC_CMD_HP_OUT_OCSD_ALREADY_STARTED 0x3
-
-
-/***********************************/
-/* MC_CMD_STACKINFO
- * Get stack information.
- */
-#define MC_CMD_STACKINFO 0xf
-#undef MC_CMD_0xf_PRIVILEGE_CTG
-
-#define MC_CMD_0xf_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_STACKINFO_IN msgrequest */
-#define MC_CMD_STACKINFO_IN_LEN 0
-
-/* MC_CMD_STACKINFO_OUT msgresponse */
-#define MC_CMD_STACKINFO_OUT_LENMIN 12
-#define MC_CMD_STACKINFO_OUT_LENMAX 252
-#define MC_CMD_STACKINFO_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num))
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_NUM(len) (((len)-0)/12)
-/* (thread ptr, stack size, free space) for each thread in system */
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM 21
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM_MCDI2 85
-
-
-/***********************************/
-/* MC_CMD_MDIO_READ
- * MDIO register read.
- */
-#define MC_CMD_MDIO_READ 0x10
-#undef MC_CMD_0x10_PRIVILEGE_CTG
-
-#define MC_CMD_0x10_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_MDIO_READ_IN msgrequest */
-#define MC_CMD_MDIO_READ_IN_LEN 16
-/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
- * external devices.
- */
-#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
-#define MC_CMD_MDIO_READ_IN_BUS_LEN 4
-/* enum: Internal. */
-#define MC_CMD_MDIO_BUS_INTERNAL 0x0
-/* enum: External. */
-#define MC_CMD_MDIO_BUS_EXTERNAL 0x1
-/* Port address */
-#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
-#define MC_CMD_MDIO_READ_IN_PRTAD_LEN 4
-/* Device Address or clause 22. */
-#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
-#define MC_CMD_MDIO_READ_IN_DEVAD_LEN 4
-/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
- * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
- */
-#define MC_CMD_MDIO_CLAUSE22 0x20
-/* Address */
-#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
-#define MC_CMD_MDIO_READ_IN_ADDR_LEN 4
-
-/* MC_CMD_MDIO_READ_OUT msgresponse */
-#define MC_CMD_MDIO_READ_OUT_LEN 8
-/* Value */
-#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
-#define MC_CMD_MDIO_READ_OUT_VALUE_LEN 4
-/* Status the MDIO commands return the raw status bits from the MDIO block. A
- * "good" transaction should have the DONE bit set and all other bits clear.
- */
-#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
-#define MC_CMD_MDIO_READ_OUT_STATUS_LEN 4
-/* enum: Good. */
-#define MC_CMD_MDIO_STATUS_GOOD 0x8
-
-
-/***********************************/
-/* MC_CMD_MDIO_WRITE
- * MDIO register write.
- */
-#define MC_CMD_MDIO_WRITE 0x11
-#undef MC_CMD_0x11_PRIVILEGE_CTG
-
-#define MC_CMD_0x11_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_MDIO_WRITE_IN msgrequest */
-#define MC_CMD_MDIO_WRITE_IN_LEN 20
-/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
- * external devices.
- */
-#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
-#define MC_CMD_MDIO_WRITE_IN_BUS_LEN 4
-/* enum: Internal. */
-/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
-/* enum: External. */
-/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
-/* Port address */
-#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
-#define MC_CMD_MDIO_WRITE_IN_PRTAD_LEN 4
-/* Device Address or clause 22. */
-#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
-#define MC_CMD_MDIO_WRITE_IN_DEVAD_LEN 4
-/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
- * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
- */
-/* MC_CMD_MDIO_CLAUSE22 0x20 */
-/* Address */
-#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
-#define MC_CMD_MDIO_WRITE_IN_ADDR_LEN 4
-/* Value */
-#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
-#define MC_CMD_MDIO_WRITE_IN_VALUE_LEN 4
-
-/* MC_CMD_MDIO_WRITE_OUT msgresponse */
-#define MC_CMD_MDIO_WRITE_OUT_LEN 4
-/* Status; the MDIO commands return the raw status bits from the MDIO block. A
- * "good" transaction should have the DONE bit set and all other bits clear.
- */
-#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
-#define MC_CMD_MDIO_WRITE_OUT_STATUS_LEN 4
-/* enum: Good. */
-/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
-
-
-/***********************************/
-/* MC_CMD_DBI_WRITE
- * Write DBI register(s).
- */
-#define MC_CMD_DBI_WRITE 0x12
-#undef MC_CMD_0x12_PRIVILEGE_CTG
-
-#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_DBI_WRITE_IN msgrequest */
-#define MC_CMD_DBI_WRITE_IN_LENMIN 12
-#define MC_CMD_DBI_WRITE_IN_LENMAX 252
-#define MC_CMD_DBI_WRITE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num))
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_NUM(len) (((len)-0)/12)
-/* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset
- * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF.
- */
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM 21
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM_MCDI2 85
-
-/* MC_CMD_DBI_WRITE_OUT msgresponse */
-#define MC_CMD_DBI_WRITE_OUT_LEN 0
-
-/* MC_CMD_DBIWROP_TYPEDEF structuredef */
-#define MC_CMD_DBIWROP_TYPEDEF_LEN 12
-#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
-#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LEN 4
-#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
-#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
-#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
-#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4
-#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_OFST 4
-#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
-#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
-#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_OFST 4
-#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
-#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1
-#define MC_CMD_DBIWROP_TYPEDEF_CS2_OFST 4
-#define MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14
-#define MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1
-#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
-#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
-#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
-#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LEN 4
-#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
-#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_PORT_READ32
- * Read a 32-bit register from the indirect port register map. The port to
- * access is implied by the Shared memory channel used.
- */
-#define MC_CMD_PORT_READ32 0x14
-
-/* MC_CMD_PORT_READ32_IN msgrequest */
-#define MC_CMD_PORT_READ32_IN_LEN 4
-/* Address */
-#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
-#define MC_CMD_PORT_READ32_IN_ADDR_LEN 4
-
-/* MC_CMD_PORT_READ32_OUT msgresponse */
-#define MC_CMD_PORT_READ32_OUT_LEN 8
-/* Value */
-#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
-#define MC_CMD_PORT_READ32_OUT_VALUE_LEN 4
-/* Status */
-#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
-#define MC_CMD_PORT_READ32_OUT_STATUS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PORT_WRITE32
- * Write a 32-bit register to the indirect port register map. The port to
- * access is implied by the Shared memory channel used.
- */
-#define MC_CMD_PORT_WRITE32 0x15
-
-/* MC_CMD_PORT_WRITE32_IN msgrequest */
-#define MC_CMD_PORT_WRITE32_IN_LEN 8
-/* Address */
-#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
-#define MC_CMD_PORT_WRITE32_IN_ADDR_LEN 4
-/* Value */
-#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
-#define MC_CMD_PORT_WRITE32_IN_VALUE_LEN 4
-
-/* MC_CMD_PORT_WRITE32_OUT msgresponse */
-#define MC_CMD_PORT_WRITE32_OUT_LEN 4
-/* Status */
-#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
-#define MC_CMD_PORT_WRITE32_OUT_STATUS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PORT_READ128
- * Read a 128-bit register from the indirect port register map. The port to
- * access is implied by the Shared memory channel used.
- */
-#define MC_CMD_PORT_READ128 0x16
-
-/* MC_CMD_PORT_READ128_IN msgrequest */
-#define MC_CMD_PORT_READ128_IN_LEN 4
-/* Address */
-#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
-#define MC_CMD_PORT_READ128_IN_ADDR_LEN 4
-
-/* MC_CMD_PORT_READ128_OUT msgresponse */
-#define MC_CMD_PORT_READ128_OUT_LEN 20
-/* Value */
-#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
-#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
-/* Status */
-#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
-#define MC_CMD_PORT_READ128_OUT_STATUS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PORT_WRITE128
- * Write a 128-bit register to the indirect port register map. The port to
- * access is implied by the Shared memory channel used.
- */
-#define MC_CMD_PORT_WRITE128 0x17
-
-/* MC_CMD_PORT_WRITE128_IN msgrequest */
-#define MC_CMD_PORT_WRITE128_IN_LEN 20
-/* Address */
-#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
-#define MC_CMD_PORT_WRITE128_IN_ADDR_LEN 4
-/* Value */
-#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
-#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
-
-/* MC_CMD_PORT_WRITE128_OUT msgresponse */
-#define MC_CMD_PORT_WRITE128_OUT_LEN 4
-/* Status */
-#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
-#define MC_CMD_PORT_WRITE128_OUT_STATUS_LEN 4
-
-/* MC_CMD_CAPABILITIES structuredef */
-#define MC_CMD_CAPABILITIES_LEN 4
-/* Small buf table. */
-#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0
-#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 1
-/* Turbo mode (for Maranello). */
-#define MC_CMD_CAPABILITIES_TURBO_LBN 1
-#define MC_CMD_CAPABILITIES_TURBO_WIDTH 1
-/* Turbo mode active (for Maranello). */
-#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 2
-#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 1
-/* PTP offload. */
-#define MC_CMD_CAPABILITIES_PTP_LBN 3
-#define MC_CMD_CAPABILITIES_PTP_WIDTH 1
-/* AOE mode. */
-#define MC_CMD_CAPABILITIES_AOE_LBN 4
-#define MC_CMD_CAPABILITIES_AOE_WIDTH 1
-/* AOE mode active. */
-#define MC_CMD_CAPABILITIES_AOE_ACTIVE_LBN 5
-#define MC_CMD_CAPABILITIES_AOE_ACTIVE_WIDTH 1
-/* AOE mode active. */
-#define MC_CMD_CAPABILITIES_FC_ACTIVE_LBN 6
-#define MC_CMD_CAPABILITIES_FC_ACTIVE_WIDTH 1
-#define MC_CMD_CAPABILITIES_RESERVED_LBN 7
-#define MC_CMD_CAPABILITIES_RESERVED_WIDTH 25
+/* MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT msgresponse */
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_LEN 8
+/* Current value set in NIC, in seconds */
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_CURRENT_OFST 0
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_CURRENT_LEN 4
+/* Maximum supported by NIC, in seconds */
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_MAXIMUM_OFST 4
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_MAXIMUM_LEN 4
/***********************************/
@@ -4427,112 +3897,6 @@
/***********************************/
-/* MC_CMD_DBI_READX
- * Read DBI register(s) -- extended functionality
- */
-#define MC_CMD_DBI_READX 0x19
-#undef MC_CMD_0x19_PRIVILEGE_CTG
-
-#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_DBI_READX_IN msgrequest */
-#define MC_CMD_DBI_READX_IN_LENMIN 8
-#define MC_CMD_DBI_READX_IN_LENMAX 248
-#define MC_CMD_DBI_READX_IN_LENMAX_MCDI2 1016
-#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num))
-#define MC_CMD_DBI_READX_IN_DBIRDOP_NUM(len) (((len)-0)/8)
-/* Each Read op consists of an address (offset 0), VF/CS2) */
-#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_LEN 4
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_LBN 0
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_WIDTH 32
-#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4
-#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_LEN 4
-#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_LBN 32
-#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_WIDTH 32
-#define MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1
-#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31
-#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM_MCDI2 127
-
-/* MC_CMD_DBI_READX_OUT msgresponse */
-#define MC_CMD_DBI_READX_OUT_LENMIN 4
-#define MC_CMD_DBI_READX_OUT_LENMAX 252
-#define MC_CMD_DBI_READX_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_DBI_READX_OUT_VALUE_NUM(len) (((len)-0)/4)
-/* Value */
-#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0
-#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4
-#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1
-#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63
-#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM_MCDI2 255
-
-/* MC_CMD_DBIRDOP_TYPEDEF structuredef */
-#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8
-#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
-#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LEN 4
-#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
-#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
-#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
-#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_OFST 4
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_OFST 4
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1
-#define MC_CMD_DBIRDOP_TYPEDEF_CS2_OFST 4
-#define MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14
-#define MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1
-#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32
-#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_SET_RAND_SEED
- * Set the 16byte seed for the MC pseudo-random generator.
- */
-#define MC_CMD_SET_RAND_SEED 0x1a
-#undef MC_CMD_0x1a_PRIVILEGE_CTG
-
-#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SET_RAND_SEED_IN msgrequest */
-#define MC_CMD_SET_RAND_SEED_IN_LEN 16
-/* Seed value. */
-#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
-#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16
-
-/* MC_CMD_SET_RAND_SEED_OUT msgresponse */
-#define MC_CMD_SET_RAND_SEED_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_LTSSM_HIST
- * Retrieve the history of the LTSSM, if the build supports it.
- */
-#define MC_CMD_LTSSM_HIST 0x1b
-
-/* MC_CMD_LTSSM_HIST_IN msgrequest */
-#define MC_CMD_LTSSM_HIST_IN_LEN 0
-
-/* MC_CMD_LTSSM_HIST_OUT msgresponse */
-#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0
-#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252
-#define MC_CMD_LTSSM_HIST_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_LTSSM_HIST_OUT_DATA_NUM(len) (((len)-0)/4)
-/* variable number of LTSSM values, as bytes. The history is read-to-clear. */
-#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0
-#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4
-#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0
-#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM 63
-#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM_MCDI2 255
-
-
-/***********************************/
/* MC_CMD_DRV_ATTACH
* Inform MCPU that this port is managed on the host (i.e. driver active). For
* Huntington, also request the preferred datapath firmware to use if possible
@@ -4705,6 +4069,7 @@
/* Flags associated with this function */
#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_LEN 4
+/* enum property: bitshift */
/* enum: Labels the lowest-numbered function visible to the OS */
#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
/* enum: The function can control the link state of the physical port it is
@@ -4732,22 +4097,6 @@
/***********************************/
-/* MC_CMD_SHMUART
- * Route UART output to circular buffer in shared memory instead.
- */
-#define MC_CMD_SHMUART 0x1f
-
-/* MC_CMD_SHMUART_IN msgrequest */
-#define MC_CMD_SHMUART_IN_LEN 4
-/* ??? */
-#define MC_CMD_SHMUART_IN_FLAG_OFST 0
-#define MC_CMD_SHMUART_IN_FLAG_LEN 4
-
-/* MC_CMD_SHMUART_OUT msgresponse */
-#define MC_CMD_SHMUART_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_PORT_RESET
* Generic per-port reset. There is no equivalent for per-board reset. Locks
* required: None; Return code: 0, ETIME. NOTE: This command is deprecated -
@@ -4790,100 +4139,6 @@
/***********************************/
-/* MC_CMD_PCIE_CREDITS
- * Read instantaneous and minimum flow control thresholds.
- */
-#define MC_CMD_PCIE_CREDITS 0x21
-
-/* MC_CMD_PCIE_CREDITS_IN msgrequest */
-#define MC_CMD_PCIE_CREDITS_IN_LEN 8
-/* poll period. 0 is disabled */
-#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
-#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_LEN 4
-/* wipe statistics */
-#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
-#define MC_CMD_PCIE_CREDITS_IN_WIPE_LEN 4
-
-/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
-#define MC_CMD_PCIE_CREDITS_OUT_LEN 16
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_OFST 0
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_OFST 2
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_OFST 4
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_OFST 6
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_OFST 8
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_OFST 10
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_OFST 12
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_OFST 14
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_LEN 2
-
-
-/***********************************/
-/* MC_CMD_RXD_MONITOR
- * Get histogram of RX queue fill level.
- */
-#define MC_CMD_RXD_MONITOR 0x22
-
-/* MC_CMD_RXD_MONITOR_IN msgrequest */
-#define MC_CMD_RXD_MONITOR_IN_LEN 12
-#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0
-#define MC_CMD_RXD_MONITOR_IN_QID_LEN 4
-#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4
-#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_LEN 4
-#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8
-#define MC_CMD_RXD_MONITOR_IN_WIPE_LEN 4
-
-/* MC_CMD_RXD_MONITOR_OUT msgresponse */
-#define MC_CMD_RXD_MONITOR_OUT_LEN 80
-#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0
-#define MC_CMD_RXD_MONITOR_OUT_QID_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48
-#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_LEN 4
-
-
-/***********************************/
/* MC_CMD_PUTS
* Copy the given ASCII string out onto UART and/or out of the network port.
*/
@@ -4931,6 +4186,54 @@
/* MC_CMD_GET_PHY_CFG_IN msgrequest */
#define MC_CMD_GET_PHY_CFG_IN_LEN 0
+/* MC_CMD_GET_PHY_CFG_IN_V2 msgrequest */
+#define MC_CMD_GET_PHY_CFG_IN_V2_LEN 8
+/* Target port to request PHY state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
+ */
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_LBN 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_OFST 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_LBN 32
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LINK_END_OFST 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_LBN 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_OFST 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_LBN 32
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_PHY_CFG_OUT msgresponse */
#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
/* flags */
@@ -5026,6 +4329,9 @@
#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_OFST 8
#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21
#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1
+#define MC_CMD_PHY_CAP_200000FDX_OFST 8
+#define MC_CMD_PHY_CAP_200000FDX_LBN 22
+#define MC_CMD_PHY_CAP_200000FDX_WIDTH 1
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_LEN 4
@@ -5059,6 +4365,7 @@
#define MC_CMD_MEDIA_DSFP 0x8
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4
+/* enum property: bitshift */
/* enum: Native clause 22 */
#define MC_CMD_MMD_CLAUSE22 0x0
#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
@@ -5084,7 +4391,7 @@
#define MC_CMD_START_BIST 0x25
#undef MC_CMD_0x25_PRIVILEGE_CTG
-#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_START_BIST_IN msgrequest */
#define MC_CMD_START_BIST_IN_LEN 4
@@ -5124,7 +4431,7 @@
#define MC_CMD_POLL_BIST 0x26
#undef MC_CMD_0x26_PRIVILEGE_CTG
-#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_POLL_BIST_IN msgrequest */
#define MC_CMD_POLL_BIST_IN_LEN 0
@@ -5282,33 +4589,6 @@
/***********************************/
-/* MC_CMD_FLUSH_RX_QUEUES
- * Flush receive queue(s). If SRIOV is enabled (via MC_CMD_SRIOV), then RXQ
- * flushes should be initiated via this MCDI operation, rather than via
- * directly writing FLUSH_CMD.
- *
- * The flush is completed (either done/fail) asynchronously (after this command
- * returns). The driver must still wait for flush done/failure events as usual.
- */
-#define MC_CMD_FLUSH_RX_QUEUES 0x27
-
-/* MC_CMD_FLUSH_RX_QUEUES_IN msgrequest */
-#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMIN 4
-#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX 252
-#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX_MCDI2 1020
-#define MC_CMD_FLUSH_RX_QUEUES_IN_LEN(num) (0+4*(num))
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_NUM(len) (((len)-0)/4)
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_OFST 0
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_LEN 4
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MINNUM 1
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM 63
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM_MCDI2 255
-
-/* MC_CMD_FLUSH_RX_QUEUES_OUT msgresponse */
-#define MC_CMD_FLUSH_RX_QUEUES_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_LOOPBACK_MODES
* Returns a bitmask of loopback modes available at each speed.
*/
@@ -5320,6 +4600,54 @@
/* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */
#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
+/* MC_CMD_GET_LOOPBACK_MODES_IN_V2 msgrequest */
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_LEN 8
+/* Target port to request loopback modes for. Uses MAE_LINK_ENDPOINT_SELECTOR
+ * which identifies a real or virtual network port by MAE port and link end.
+ * See the structure definition for more details
+ */
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_LBN 32
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LINK_END_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_LBN 32
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 40
/* Supported loopbacks. */
@@ -5333,6 +4661,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_LBN 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_WIDTH 32
+/* enum property: bitshift */
/* enum: None. */
#define MC_CMD_LOOPBACK_NONE 0x0
/* enum: Data. */
@@ -5422,6 +4751,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_LBN 96
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5435,6 +4765,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_LBN 160
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5448,6 +4779,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_LBN 224
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5461,6 +4793,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_LBN 288
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
@@ -5479,6 +4812,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_LBN 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_WIDTH 32
+/* enum property: bitshift */
/* enum: None. */
/* MC_CMD_LOOPBACK_NONE 0x0 */
/* enum: Data. */
@@ -5568,6 +4902,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_LBN 96
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5581,6 +4916,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_LBN 160
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5594,6 +4930,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_LBN 224
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5607,6 +4944,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_LBN 288
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported 25G loopbacks. */
@@ -5620,6 +4958,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_LBN 352
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported 50 loopbacks. */
@@ -5633,6 +4972,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_LBN 416
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported 100G loopbacks. */
@@ -5646,6 +4986,214 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_LBN 480
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+
+/* MC_CMD_GET_LOOPBACK_MODES_OUT_V3 msgresponse: Supported loopback modes for
+ * newer NICs with 200G support
+ */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_LEN 72
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_LBN 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_WIDTH 32
+/* enum property: bitshift */
+/* enum: None. */
+/* MC_CMD_LOOPBACK_NONE 0x0 */
+/* enum: Data. */
+/* MC_CMD_LOOPBACK_DATA 0x1 */
+/* enum: GMAC. */
+/* MC_CMD_LOOPBACK_GMAC 0x2 */
+/* enum: XGMII. */
+/* MC_CMD_LOOPBACK_XGMII 0x3 */
+/* enum: XGXS. */
+/* MC_CMD_LOOPBACK_XGXS 0x4 */
+/* enum: XAUI. */
+/* MC_CMD_LOOPBACK_XAUI 0x5 */
+/* enum: GMII. */
+/* MC_CMD_LOOPBACK_GMII 0x6 */
+/* enum: SGMII. */
+/* MC_CMD_LOOPBACK_SGMII 0x7 */
+/* enum: XGBR. */
+/* MC_CMD_LOOPBACK_XGBR 0x8 */
+/* enum: XFI. */
+/* MC_CMD_LOOPBACK_XFI 0x9 */
+/* enum: XAUI Far. */
+/* MC_CMD_LOOPBACK_XAUI_FAR 0xa */
+/* enum: GMII Far. */
+/* MC_CMD_LOOPBACK_GMII_FAR 0xb */
+/* enum: SGMII Far. */
+/* MC_CMD_LOOPBACK_SGMII_FAR 0xc */
+/* enum: XFI Far. */
+/* MC_CMD_LOOPBACK_XFI_FAR 0xd */
+/* enum: GPhy. */
+/* MC_CMD_LOOPBACK_GPHY 0xe */
+/* enum: PhyXS. */
+/* MC_CMD_LOOPBACK_PHYXS 0xf */
+/* enum: PCS. */
+/* MC_CMD_LOOPBACK_PCS 0x10 */
+/* enum: PMA-PMD. */
+/* MC_CMD_LOOPBACK_PMAPMD 0x11 */
+/* enum: Cross-Port. */
+/* MC_CMD_LOOPBACK_XPORT 0x12 */
+/* enum: XGMII-Wireside. */
+/* MC_CMD_LOOPBACK_XGMII_WS 0x13 */
+/* enum: XAUI Wireside. */
+/* MC_CMD_LOOPBACK_XAUI_WS 0x14 */
+/* enum: XAUI Wireside Far. */
+/* MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 */
+/* enum: XAUI Wireside near. */
+/* MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 */
+/* enum: GMII Wireside. */
+/* MC_CMD_LOOPBACK_GMII_WS 0x17 */
+/* enum: XFI Wireside. */
+/* MC_CMD_LOOPBACK_XFI_WS 0x18 */
+/* enum: XFI Wireside Far. */
+/* MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 */
+/* enum: PhyXS Wireside. */
+/* MC_CMD_LOOPBACK_PHYXS_WS 0x1a */
+/* enum: PMA lanes MAC-Serdes. */
+/* MC_CMD_LOOPBACK_PMA_INT 0x1b */
+/* enum: KR Serdes Parallel (Encoder). */
+/* MC_CMD_LOOPBACK_SD_NEAR 0x1c */
+/* enum: KR Serdes Serial. */
+/* MC_CMD_LOOPBACK_SD_FAR 0x1d */
+/* enum: PMA lanes MAC-Serdes Wireside. */
+/* MC_CMD_LOOPBACK_PMA_INT_WS 0x1e */
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+/* MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f */
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+/* MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 */
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+/* MC_CMD_LOOPBACK_SD_FEP_WS 0x21 */
+/* enum: KR Serdes Serial Wireside. */
+/* MC_CMD_LOOPBACK_SD_FES_WS 0x22 */
+/* enum: Near side of AOE Siena side port */
+/* MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 */
+/* enum: Medford Wireside datapath loopback */
+/* MC_CMD_LOOPBACK_DATA_WS 0x24 */
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+/* MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_LBN 64
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_OFST 12
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_LBN 96
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_LBN 128
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_OFST 20
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_LBN 160
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_LBN 192
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_OFST 28
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_LBN 224
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_LBN 256
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_OFST 36
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_LBN 288
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 25G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_LBN 320
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_OFST 44
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_LBN 352
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 50 loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_LBN 384
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_OFST 52
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_LBN 416
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 100G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_LBN 448
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_OFST 60
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_LBN 480
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 200G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_OFST 64
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_OFST 64
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_LBN 512
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_OFST 68
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_LBN 544
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
@@ -5673,13 +5221,835 @@
#define FEC_TYPE_TYPE_LEN 4
/* enum: No FEC */
#define MC_CMD_FEC_NONE 0x0
-/* enum: Clause 74 BASE-R FEC (a.k.a Firecode) */
+/* enum: IEEE 802.3, Clause 74 BASE-R FEC (a.k.a Firecode) */
#define MC_CMD_FEC_BASER 0x1
-/* enum: Clause 91/Clause 108 Reed-Solomon FEC */
+/* enum: IEEE 802.3, Clause 91/Clause 108 Reed-Solomon FEC */
#define MC_CMD_FEC_RS 0x2
+/* enum: IEEE 802.3, Clause 161, interleaved RS-FEC sublayer for 100GBASE-R
+ * PHYs
+ */
+#define MC_CMD_FEC_IEEE_RS_INT 0x3
+/* enum: Ethernet Consortium, Low Latency RS-FEC. RS(272, 258). Replaces FEC
+ * specified in Clause 119 for 100/200G PHY. Replaces FEC specified in Clause
+ * 134 for 50G PHY.
+ */
+#define MC_CMD_FEC_ETCS_RS_LL 0x4
+/* enum: FEC mode selected automatically */
+#define MC_CMD_FEC_AUTO 0x5
#define FEC_TYPE_TYPE_LBN 0
#define FEC_TYPE_TYPE_WIDTH 32
+/* MC_CMD_ETH_TECH structuredef: Ethernet technology as defined by IEEE802.3,
+ * Ethernet Technology Consortium, proprietary technologies. The driver must
+ * not use technologies labelled NONE and AUTO.
+ */
+#define MC_CMD_ETH_TECH_LEN 16
+/* The enums in this field can be used either as bitwise indices into a tech
+ * mask (e.g. see MC_CMD_ETH_AN_FIELDS/TECH_MASK for example) or as regular
+ * enums (e.g. see MC_CMD_LINK_CTRL_IN/ADVERTISED_TECH_ABILITIES_MASK). This
+ * structure must be updated to add new technologies when projects that need
+ * them arise. An incomplete list of possible expansion in the future include:
+ * 100GBASE_KP4, 800GBASE_CR8, 800GBASE_KR8, 800GBASE_DR8, 800GBASE_SR8
+ * 800GBASE_VR8
+ */
+#define MC_CMD_ETH_TECH_TECH_OFST 0
+#define MC_CMD_ETH_TECH_TECH_LEN 16
+/* enum: 1000BASE-KX - 1000BASE-X PCS/PMA over an electrical backplane PMD. See
+ * IEEE 802.3 Clause 70
+ */
+#define MC_CMD_ETH_TECH_1000BASEKX 0x0
+/* enum: 10GBASE-R - PCS/PMA over an electrical backplane PMD. Refer to IEEE
+ * 802.3 Clause 72
+ */
+#define MC_CMD_ETH_TECH_10GBASE_KR 0x1
+/* enum: 40GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 84.
+ */
+#define MC_CMD_ETH_TECH_40GBASE_KR4 0x2
+/* enum: 40GBASE-R PCS/PMA over 4 lane shielded copper balanced cable PMD. See
+ * IEEE 802.3 Clause 85
+ */
+#define MC_CMD_ETH_TECH_40GBASE_CR4 0x3
+/* enum: 40GBASE-R PCS/PMA over 4 lane multimode fiber PMD as specified in
+ * Clause 86
+ */
+#define MC_CMD_ETH_TECH_40GBASE_SR4 0x4
+/* enum: 40GBASE-R PCS/PMA over 4 WDM lane single mode fiber PMD with long
+ * reach. See IEEE 802.3 Clause 87
+ */
+#define MC_CMD_ETH_TECH_40GBASE_LR4 0x5
+/* enum: 25GBASE-R PCS/PMA over shielded balanced copper cable PMD. See IEEE
+ * 802.3 Clause 110
+ */
+#define MC_CMD_ETH_TECH_25GBASE_CR 0x6
+/* enum: 25GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 111
+ */
+#define MC_CMD_ETH_TECH_25GBASE_KR 0x7
+/* enum: 25GBASE-R PCS/PMA over multimode fiber PMD. Refer to IEEE 802.3 Clause
+ * 112
+ */
+#define MC_CMD_ETH_TECH_25GBASE_SR 0x8
+/* enum: An Ethernet Physical layer operating at 50 Gb/s on twin-axial copper
+ * cable. Refer to Ethernet Technology Consortium 25/50G Ethernet Spec.
+ */
+#define MC_CMD_ETH_TECH_50GBASE_CR2 0x9
+/* enum: An Ethernet Physical layer operating at 50 Gb/s on copper backplane.
+ * Refer to Ethernet Technology Consortium 25/50G Ethernet Spec.
+ */
+#define MC_CMD_ETH_TECH_50GBASE_KR2 0xa
+/* enum: 100GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 93
+ */
+#define MC_CMD_ETH_TECH_100GBASE_KR4 0xb
+/* enum: 100GBASE-R PCS/PMA over 4 lane multimode fiber PMD. See IEEE 802.3
+ * Clause 95
+ */
+#define MC_CMD_ETH_TECH_100GBASE_SR4 0xc
+/* enum: 100GBASE-R PCS/PMA over 4 lane shielded copper balanced cable PMD. See
+ * IEEE 802.3 Clause 92
+ */
+#define MC_CMD_ETH_TECH_100GBASE_CR4 0xd
+/* enum: 100GBASE-R PCS/PMA over 4 WDM lane single mode fiber PMD, with
+ * long/extended reach,. See IEEE 802.3 Clause 88
+ */
+#define MC_CMD_ETH_TECH_100GBASE_LR4_ER4 0xe
+/* enum: An Ethernet Physical layer operating at 50 Gb/s on short reach fiber.
+ * Refer to Ethernet Technology Consortium 25/50G Ethernet Spec.
+ */
+#define MC_CMD_ETH_TECH_50GBASE_SR2 0xf
+/* enum: 1000BASEX PCS/PMA. See IEEE 802.3 Clause 36 over undefined PMD, duplex
+ * mode unknown
+ */
+#define MC_CMD_ETH_TECH_1000BASEX 0x10
+/* enum: Non-standardised. 10G direct attach */
+#define MC_CMD_ETH_TECH_10GBASE_CR 0x11
+/* enum: 10GBASE-SR fiber over 850nm optics. See IEEE 802.3 Clause 52 */
+#define MC_CMD_ETH_TECH_10GBASE_SR 0x12
+/* enum: 10GBASE-LR fiber over 1310nm optics. See IEEE 802.3 Clause 52 */
+#define MC_CMD_ETH_TECH_10GBASE_LR 0x13
+/* enum: 10GBASE-LRM fiber over 1310 nm optics. See IEEE 802.3 Clause 68 */
+#define MC_CMD_ETH_TECH_10GBASE_LRM 0x14
+/* enum: 10GBASE-ER fiber over 1550nm optics. See IEEE 802.3 Clause 52 */
+#define MC_CMD_ETH_TECH_10GBASE_ER 0x15
+/* enum: 50GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 137
+ */
+#define MC_CMD_ETH_TECH_50GBASE_KR 0x16
+/* enum: 50GBASE-SR PCS/PMA over multimode fiber PMD as specified in Clause 138
+ */
+#define MC_CMD_ETH_TECH_50GBASE_SR 0x17
+/* enum: 50GBASE-CR PCS/PMA over shielded copper balanced cable PMD. See IEEE
+ * 802.3 Clause 136
+ */
+#define MC_CMD_ETH_TECH_50GBASE_CR 0x18
+/* enum: 50GBASE-R PCS/PMA over single mode fiber PMD as specified in Clause
+ * 139.
+ */
+#define MC_CMD_ETH_TECH_50GBASE_LR_ER_FR 0x19
+/* enum: 100 Gb/s PHY using 100GBASE-R encoding over single-mode fiber with
+ * reach up to at least 500 m (see IEEE 802.3 Clause 140)
+ */
+#define MC_CMD_ETH_TECH_50GBASE_DR 0x1a
+/* enum: 100GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 137
+ */
+#define MC_CMD_ETH_TECH_100GBASE_KR2 0x1b
+/* enum: 100GBASE-R PCS/PMA over 2 lane multimode fiber PMD. See IEEE 802.3
+ * Clause 138
+ */
+#define MC_CMD_ETH_TECH_100GBASE_SR2 0x1c
+/* enum: 100GBASE-R PCS/PMA over 2 lane shielded copper balanced cable PMD. See
+ * IEEE 802.3 Clause 136
+ */
+#define MC_CMD_ETH_TECH_100GBASE_CR2 0x1d
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_100GBASE_LR2_ER2_FR2 0x1e
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_100GBASE_DR2 0x1f
+/* enum: 200GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 137
+ */
+#define MC_CMD_ETH_TECH_200GBASE_KR4 0x20
+/* enum: 200GBASE-R PCS/PMA over 4 lane multimode fiber PMD. See IEEE 802.3
+ * Clause 138
+ */
+#define MC_CMD_ETH_TECH_200GBASE_SR4 0x21
+/* enum: 200GBASE-R PCS/PMA over 4 WDM lane single-mode fiber PMD as specified
+ * in Clause 122
+ */
+#define MC_CMD_ETH_TECH_200GBASE_LR4_ER4_FR4 0x22
+/* enum: 200GBASE-R PCS/PMA over 4-lane single-mode fiber PMD. See IEEE 802.3
+ * Clause 121
+ */
+#define MC_CMD_ETH_TECH_200GBASE_DR4 0x23
+/* enum: 200GBASE-R PCS/PMA over 4 lane shielded copper balanced cable PMD as
+ * specified in Clause 136
+ */
+#define MC_CMD_ETH_TECH_200GBASE_CR4 0x24
+/* enum: Ethernet Technology Consortium 400G AN Spec. 400GBASE-KR8 PMD uses
+ * 802.3 Clause 137, but the number PMD lanes is 8.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_KR8 0x25
+/* enum: 400GBASE-R PCS/PMA over 8-lane multimode fiber PMD. See IEEE 802.3
+ * Clause 138
+ */
+#define MC_CMD_ETH_TECH_400GBASE_SR8 0x26
+/* enum: 400GBASE-R PCS/PMA over 8 WDM lane single-mode fiber PMD. See IEEE
+ * 802.3 Clause 122
+ */
+#define MC_CMD_ETH_TECH_400GBASE_LR8_ER8_FR8 0x27
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_400GBASE_DR8 0x28
+/* enum: Ethernet Technology Consortium 400G AN Spec. 400GBASE-CR8 PMD uses
+ * IEEE 802.3 Clause 136, but the number PMD lanes is 8.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_CR8 0x29
+/* enum: 100GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3ck
+ * Clause 163.
+ */
+#define MC_CMD_ETH_TECH_100GBASE_KR 0x2a
+/* enum: IEEE 802.3ck. 100G PHY with PMD as specified in Clause 167 over short
+ * reach fiber
+ */
+#define MC_CMD_ETH_TECH_100GBASE_SR 0x2b
+/* enum: 100G PMD together with single-mode fiber medium. See IEEE 802.3 Clause
+ * 140
+ */
+#define MC_CMD_ETH_TECH_100GBASE_LR_ER_FR 0x2c
+/* enum: 100GBASE-R PCS/PMA over shielded balanced copper cable PMD. See IEEE
+ * 802.3 in Clause 162 IEEE 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_100GBASE_CR 0x2d
+/* enum: 100G PMD together with single-mode fiber medium. See IEEE 802.3 Clause
+ * 140
+ */
+#define MC_CMD_ETH_TECH_100GBASE_DR 0x2e
+/* enum: 200GBASE-R PCS/PMA over an electrical backplane PMD as specified in
+ * Clause 163 IEEE 802.3ck
+ */
+#define MC_CMD_ETH_TECH_200GBASE_KR2 0x2f
+/* enum: 200G PHY with PMD as specified in Clause 167 over short reach fiber
+ * IEEE 802.3ck
+ */
+#define MC_CMD_ETH_TECH_200GBASE_SR2 0x30
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_200GBASE_LR2_ER2_FR2 0x31
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_200GBASE_DR2 0x32
+/* enum: 200GBASE-R PCS/PMA over 2 lane shielded balanced copper cable PMD as
+ * specified in Clause 162 IEEE 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_200GBASE_CR2 0x33
+/* enum: 400GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 163 IEEE 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_KR4 0x34
+/* enum: 400G PHY with PMD over short reach fiber. See Clause 167 of IEEE
+ * 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_SR4 0x35
+/* enum: 400GBASE-R PCS/PMA over 4 WDM lane single-mode fiber PMD. See IEEE
+ * 802.3 Clause 151
+ */
+#define MC_CMD_ETH_TECH_400GBASE_LR4_ER4_FR4 0x36
+/* enum: 400GBASE-R PCS/PMA over 4-lane single-mode fiber PMD as specified in
+ * Clause 124
+ */
+#define MC_CMD_ETH_TECH_400GBASE_DR4 0x37
+/* enum: 400GBASE-R PCS/PMA over 4 lane shielded balanced copper cable PMD as
+ * specified in Clause 162 of IEEE 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_CR4 0x38
+/* enum: Automatic tech mode. The driver must not use this. */
+#define MC_CMD_ETH_TECH_AUTO 0x39
+/* enum: See IEEE 802.3cc-2017 Clause 114 */
+#define MC_CMD_ETH_TECH_25GBASE_LR_ER 0x3a
+/* enum: Up to 7 m over twinaxial copper cable assembly (10 lanes, 10 Gbit/s
+ * each) See IEEE 802.3ba-2010 Clause 85
+ */
+#define MC_CMD_ETH_TECH_100GBASE_CR10 0x3b
+/* enum: Invalid tech mode. The driver must not use this. */
+#define MC_CMD_ETH_TECH_NONE 0x7f
+#define MC_CMD_ETH_TECH_TECH_LBN 0
+#define MC_CMD_ETH_TECH_TECH_WIDTH 128
+
+/* MC_CMD_LINK_STATUS_FLAGS structuredef */
+#define MC_CMD_LINK_STATUS_FLAGS_LEN 8
+/* Flags used to report the current configuration/state of the link. */
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_OFST 0
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LEN 8
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LO_OFST 0
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LO_LEN 4
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LO_LBN 0
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LO_WIDTH 32
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_HI_OFST 4
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_HI_LEN 4
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_HI_LBN 32
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_HI_WIDTH 32
+/* enum property: bitshift */
+/* enum: Whether we have overall link up */
+#define MC_CMD_LINK_STATUS_FLAGS_LINK_UP 0x0
+/* enum: If set, the PHY has no external RX link synchronisation */
+#define MC_CMD_LINK_STATUS_FLAGS_NO_PHY_LINK 0x1
+/* enum: If set, PMD/MDI is not connected (e.g. cable disconnected, module cage
+ * empty)
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_MDI_DISCONNECTED 0x2
+/* enum: Set on error while decoding module data (e.g. module EEPROM does not
+ * contain valid values, has checksum errors, etc.)
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_BAD 0x3
+/* enum: Set when module unsupported (e.g. unsupported link rate or link
+ * technology)
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_UNSUPPORTED 0x4
+/* enum: Set on error while communicating with the module (e.g. I2C errors
+ * while reading EEPROM)
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_COMMS_FAULT 0x5
+/* enum: Set on module overcurrent/overvoltage condition */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_POWER_FAULT 0x6
+/* enum: Set on module overtemperature condition */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_THERMAL_FAULT 0x7
+/* enum: If set, the module is indicating Loss of Signal */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_LOS 0x8
+/* enum: If set, PMA is indicating loss of CDR lock (clock sync) */
+#define MC_CMD_LINK_STATUS_FLAGS_PMA_NO_CDR_LOCK 0x9
+/* enum: If set, PMA is indicating loss of analog signal */
+#define MC_CMD_LINK_STATUS_FLAGS_PMA_LOS 0xa
+/* enum: If set, PCS is indicating loss of block lock */
+#define MC_CMD_LINK_STATUS_FLAGS_PCS_NO_BLOCK_LOCK 0xb
+/* enum: If set, PCS is indicating loss of alignment marker lock on one or more
+ * lanes
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PCS_NO_AM_LOCK 0xc
+/* enum: If set, PCS is indicating loss of overall alignment lock */
+#define MC_CMD_LINK_STATUS_FLAGS_PCS_NO_ALIGN_LOCK 0xd
+/* enum: If set, PCS is indicating high bit error rate condition. */
+#define MC_CMD_LINK_STATUS_FLAGS_PCS_HI_BER 0xe
+/* enum: If set, FEC is indicating loss of FEC lock */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_NO_LOCK 0xf
+/* enum: If set, indicates that the number of symbol errors in a 8192-codeword
+ * window has exceeded the threshold K (417).
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_HI_SER 0x10
+/* enum: If set, the receiver has detected the local FEC has degraded. */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_LOCAL_DEGRADED 0x11
+/* enum: If set, the receiver has detected the remote FEC has degraded. */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_RM_DEGRADED 0x12
+/* enum: If set, the number of symbol errors is over an internal threshold. */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_DEGRADED_SER 0x13
+/* enum: If set, autonegotiation has detected an auto-negotiation capable link
+ * partner
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_AN_ABLE 0x14
+/* enum: If set, autonegotiation base page exchange has failed */
+#define MC_CMD_LINK_STATUS_FLAGS_AN_BP_FAILED 0x15
+/* enum: If set, autonegotiation next page exchange has failed */
+#define MC_CMD_LINK_STATUS_FLAGS_AN_NP_FAILED 0x16
+/* enum: If set, autonegotiation has failed to negotiate a common set of
+ * capabilities
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_AN_NO_HCD 0x17
+/* enum: If set, local end link training has failed to establish link training
+ * frame lock on one or more lanes
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_LT_NO_LOCAL_FRAME_LOCK 0x18
+/* enum: If set, remote end link training has failed to establish link training
+ * frame lock on one or more lanes
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_LT_NO_RM_FRAME_LOCK 0x19
+/* enum: If set, remote end has failed to assert Receiver Ready (link training
+ * success) within the designated timeout
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_LT_NO_RX_READY 0x1a
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LBN 0
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_WIDTH 64
+
+/* MC_CMD_PAUSE_MODE structuredef */
+#define MC_CMD_PAUSE_MODE_LEN 1
+#define MC_CMD_PAUSE_MODE_TYPE_OFST 0
+#define MC_CMD_PAUSE_MODE_TYPE_LEN 1
+/* enum: See IEEE 802.3 Clause 73.6.6 */
+#define MC_CMD_PAUSE_MODE_AN_PAUSE 0x0
+/* enum: See IEEE 802.3 Clause 73.6.6 */
+#define MC_CMD_PAUSE_MODE_AN_ASYM_DIR 0x1
+#define MC_CMD_PAUSE_MODE_TYPE_LBN 0
+#define MC_CMD_PAUSE_MODE_TYPE_WIDTH 8
+
+/* MC_CMD_ETH_AN_FIELDS structuredef: Fields used for IEEE 802.3 Clause 73
+ * Auto-Negotiation. Warning - This is fixed size and cannot be extended. This
+ * structure is used to define autonegotiable abilities (advertised, link
+ * partner and supported abilities).
+ */
+#define MC_CMD_ETH_AN_FIELDS_LEN 25
+/* Mask of Ethernet technologies. The bit indices in this mask are taken from
+ * the TECH field in the MC_CMD_ETH_TECH structure.
+ */
+#define MC_CMD_ETH_AN_FIELDS_TECH_MASK_OFST 0
+#define MC_CMD_ETH_AN_FIELDS_TECH_MASK_LEN 16
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+#define MC_CMD_ETH_AN_FIELDS_TECH_MASK_LBN 0
+#define MC_CMD_ETH_AN_FIELDS_TECH_MASK_WIDTH 128
+/* Mask of supported FEC modes */
+#define MC_CMD_ETH_AN_FIELDS_FEC_MASK_OFST 16
+#define MC_CMD_ETH_AN_FIELDS_FEC_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+#define MC_CMD_ETH_AN_FIELDS_FEC_MASK_LBN 128
+#define MC_CMD_ETH_AN_FIELDS_FEC_MASK_WIDTH 32
+/* Mask of requested FEC modes */
+#define MC_CMD_ETH_AN_FIELDS_FEC_REQ_OFST 20
+#define MC_CMD_ETH_AN_FIELDS_FEC_REQ_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+#define MC_CMD_ETH_AN_FIELDS_FEC_REQ_LBN 160
+#define MC_CMD_ETH_AN_FIELDS_FEC_REQ_WIDTH 32
+/* Bitmask of negotiated pause modes */
+#define MC_CMD_ETH_AN_FIELDS_PAUSE_MASK_OFST 24
+#define MC_CMD_ETH_AN_FIELDS_PAUSE_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+#define MC_CMD_ETH_AN_FIELDS_PAUSE_MASK_LBN 192
+#define MC_CMD_ETH_AN_FIELDS_PAUSE_MASK_WIDTH 8
+
+/* MC_CMD_LOOPBACK_V2 structuredef: Loopback modes for use with the new
+ * MC_CMD_LINK_CTRL and MC_CMD_LINK_STATE. These loopback modes are not
+ * supported in other getlink/setlink commands.
+ */
+#define MC_CMD_LOOPBACK_V2_LEN 4
+#define MC_CMD_LOOPBACK_V2_MODE_OFST 0
+#define MC_CMD_LOOPBACK_V2_MODE_LEN 4
+/* enum: No loopback */
+#define MC_CMD_LOOPBACK_V2_NONE 0x0
+/* enum: Let firmware choose a supported loopback mode */
+#define MC_CMD_LOOPBACK_V2_AUTO 0x1
+/* enum: Loopback after the MAC */
+#define MC_CMD_LOOPBACK_V2_POST_MAC 0x2
+/* enum: Loopback after the PCS */
+#define MC_CMD_LOOPBACK_V2_POST_PCS 0x3
+/* enum: Loopback after the PMA */
+#define MC_CMD_LOOPBACK_V2_POST_PMA 0x4
+/* enum: Loopback after the MDI Wireside */
+#define MC_CMD_LOOPBACK_V2_POST_MDI_WS 0x5
+/* enum: Loopback after the PMA Wireside */
+#define MC_CMD_LOOPBACK_V2_POST_PMA_WS 0x6
+/* enum: Loopback after the PCS Wireside */
+#define MC_CMD_LOOPBACK_V2_POST_PCS_WS 0x7
+/* enum: Loopback after the MAC Wireside */
+#define MC_CMD_LOOPBACK_V2_POST_MAC_WS 0x8
+/* enum: Loopback after the MAC FIFOs (before the MAC) */
+#define MC_CMD_LOOPBACK_V2_PRE_MAC 0x9
+#define MC_CMD_LOOPBACK_V2_MODE_LBN 0
+#define MC_CMD_LOOPBACK_V2_MODE_WIDTH 32
+
+/* MC_CMD_FCNTL structuredef */
+#define MC_CMD_FCNTL_LEN 4
+#define MC_CMD_FCNTL_MASK_OFST 0
+#define MC_CMD_FCNTL_MASK_LEN 4
+/* enum: Flow control is off. */
+#define MC_CMD_FCNTL_OFF 0x0
+/* enum: Respond to flow control. */
+#define MC_CMD_FCNTL_RESPOND 0x1
+/* enum: Respond to and Issue flow control. */
+#define MC_CMD_FCNTL_BIDIR 0x2
+/* enum: Auto negotiate flow control. */
+#define MC_CMD_FCNTL_AUTO 0x3
+/* enum: Priority flow control. This is only supported on KSB. */
+#define MC_CMD_FCNTL_QBB 0x4
+/* enum: Issue flow control. */
+#define MC_CMD_FCNTL_GENERATE 0x5
+#define MC_CMD_FCNTL_MASK_LBN 0
+#define MC_CMD_FCNTL_MASK_WIDTH 32
+
+/* MC_CMD_LINK_FLAGS structuredef */
+#define MC_CMD_LINK_FLAGS_LEN 4
+/* The enums defined in this field are used as indices into the
+ * MC_CMD_LINK_FLAGS bitmask.
+ */
+#define MC_CMD_LINK_FLAGS_MASK_OFST 0
+#define MC_CMD_LINK_FLAGS_MASK_LEN 4
+/* enum property: bitshift */
+/* enum: Enable auto-negotiation. If AN is enabled, link technology and FEC
+ * mode are determined by advertised capabilities and requested FEC modes,
+ * combined with link partner capabilities. If AN is disabled, link technology
+ * is forced to LINK_TECHNOLOGY and FEC mode is forced to FEC_MODE. Not valid
+ * if loopback is enabled
+ */
+#define MC_CMD_LINK_FLAGS_AUTONEG_EN 0x0
+/* enum: Enable parallel detect. In addition to AN, try to sense partner forced
+ * speed/FEC mode (when partner AN disabled). Only valid if AN is enabled.
+ */
+#define MC_CMD_LINK_FLAGS_PARALLEL_DETECT_EN 0x1
+/* enum: Force link down, in electrical idle. */
+#define MC_CMD_LINK_FLAGS_LINK_DISABLE 0x2
+/* enum: Ignore the sequence number and always apply. */
+#define MC_CMD_LINK_FLAGS_IGNORE_MODULE_SEQ 0x3
+#define MC_CMD_LINK_FLAGS_MASK_LBN 0
+#define MC_CMD_LINK_FLAGS_MASK_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_LINK_CTRL
+ * Write the unified MAC/PHY link configuration. Locks required: None. Return
+ * code: 0, EINVAL, ETIME, EAGAIN
+ */
+#define MC_CMD_LINK_CTRL 0x6b
+#undef MC_CMD_0x6b_PRIVILEGE_CTG
+
+#define MC_CMD_0x6b_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_LINK_CTRL_IN msgrequest */
+#define MC_CMD_LINK_CTRL_IN_LEN 40
+/* Handle to the port to set link state for. */
+#define MC_CMD_LINK_CTRL_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_LINK_CTRL_IN_PORT_HANDLE_LEN 4
+/* Control flags */
+#define MC_CMD_LINK_CTRL_IN_CONTROL_FLAGS_OFST 4
+#define MC_CMD_LINK_CTRL_IN_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_FLAGS/MASK */
+/* Reserved for future expansion, and included to provide padding for alignment
+ * purposes.
+ */
+#define MC_CMD_LINK_CTRL_IN_RESERVED_OFST 8
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LEN 8
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LO_OFST 8
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LO_LEN 4
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LO_LBN 64
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LO_WIDTH 32
+#define MC_CMD_LINK_CTRL_IN_RESERVED_HI_OFST 12
+#define MC_CMD_LINK_CTRL_IN_RESERVED_HI_LEN 4
+#define MC_CMD_LINK_CTRL_IN_RESERVED_HI_LBN 96
+#define MC_CMD_LINK_CTRL_IN_RESERVED_HI_WIDTH 32
+/* Technology abilities to advertise during auto-negotiation */
+#define MC_CMD_LINK_CTRL_IN_ADVERTISED_TECH_ABILITIES_MASK_OFST 16
+#define MC_CMD_LINK_CTRL_IN_ADVERTISED_TECH_ABILITIES_MASK_LEN 16
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Pause abilities to advertise during auto-negotiation. Valid when auto-
+ * negotation is enabled and MC_CMD_SET_MAC_IN/FCTL is set to
+ * MC_CMD_FCNTL_AUTO. If auto-negotiation is disabled the driver must
+ * explicitly configure pause mode with MC_CMD_SET_MAC.
+ */
+#define MC_CMD_LINK_CTRL_IN_ADVERTISED_PAUSE_ABILITIES_MASK_OFST 32
+#define MC_CMD_LINK_CTRL_IN_ADVERTISED_PAUSE_ABILITIES_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+/* When auto-negotiation is enabled, this is the FEC mode to request. Note that
+ * a weaker FEC mode may get negotiated, depending on what the link partner
+ * supports. The driver should subsequently use MC_CMD_GET_LINK to check the
+ * actual negotiated FEC mode. When auto-negotiation is disabled, this is the
+ * forced FEC mode.
+ */
+#define MC_CMD_LINK_CTRL_IN_FEC_MODE_OFST 33
+#define MC_CMD_LINK_CTRL_IN_FEC_MODE_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* This is only to be used when auto-negotiation is disabled (forced speed or
+ * loopback mode). If the specified value does not align with the values
+ * defined in the enum MC_CMD_ETH_TECH/TECH, it is considered invalid.
+ */
+#define MC_CMD_LINK_CTRL_IN_LINK_TECHNOLOGY_OFST 36
+#define MC_CMD_LINK_CTRL_IN_LINK_TECHNOLOGY_LEN 2
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* The sequence number of the last MODULECHANGE event. If this doesn't match,
+ * fail with EAGAIN.
+ */
+#define MC_CMD_LINK_CTRL_IN_MODULE_SEQ_OFST 38
+#define MC_CMD_LINK_CTRL_IN_MODULE_SEQ_LEN 1
+/* Loopback Mode. Only valid when auto-negotiation is disabled. */
+#define MC_CMD_LINK_CTRL_IN_LOOPBACK_OFST 39
+#define MC_CMD_LINK_CTRL_IN_LOOPBACK_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+
+/* MC_CMD_LINK_CTRL_OUT msgresponse */
+#define MC_CMD_LINK_CTRL_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LINK_STATE
+ */
+#define MC_CMD_LINK_STATE 0x6c
+#undef MC_CMD_0x6c_PRIVILEGE_CTG
+
+#define MC_CMD_0x6c_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_LINK_STATE_IN msgrequest */
+#define MC_CMD_LINK_STATE_IN_LEN 4
+/* Handle to the port to get link state for. */
+#define MC_CMD_LINK_STATE_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_LINK_STATE_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_LINK_STATE_OUT msgresponse */
+#define MC_CMD_LINK_STATE_OUT_LEN 114
+/* Flags used to report the current configuration/state of the link. */
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_OFST 0
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LEN 8
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LO_OFST 0
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LO_LEN 4
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LO_LBN 0
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LO_WIDTH 32
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_HI_OFST 4
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_HI_LEN 4
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_HI_LBN 32
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_HI_WIDTH 32
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_STATUS_FLAGS/STATUS_FLAGS */
+/* Configured technology. If the specified value does not align with the values
+ * defined in the enum MC_CMD_ETH_TECH/TECH, it is considered invalid.
+ */
+#define MC_CMD_LINK_STATE_OUT_LINK_TECHNOLOGY_OFST 8
+#define MC_CMD_LINK_STATE_OUT_LINK_TECHNOLOGY_LEN 2
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Configured FEC mode */
+#define MC_CMD_LINK_STATE_OUT_FEC_MODE_OFST 10
+#define MC_CMD_LINK_STATE_OUT_FEC_MODE_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* Bitmask of auto-negotiated pause modes */
+#define MC_CMD_LINK_STATE_OUT_PAUSE_MASK_OFST 11
+#define MC_CMD_LINK_STATE_OUT_PAUSE_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+/* Configured loopback mode */
+#define MC_CMD_LINK_STATE_OUT_LOOPBACK_OFST 12
+#define MC_CMD_LINK_STATE_OUT_LOOPBACK_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* Abilities requested by the driver to advertise during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_OFST 16
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_LEN 32
+/* See structuredef: MC_CMD_ETH_AN_FIELDS */
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_TECH_MASK_OFST 16
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_TECH_MASK_LEN 16
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_FEC_MASK_OFST 32
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_FEC_MASK_LEN 4
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_FEC_REQ_OFST 36
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_FEC_REQ_LEN 4
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_PAUSE_MASK_OFST 40
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_PAUSE_MASK_LEN 1
+/* Abilities advertised by the link partner during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_OFST 48
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_LEN 32
+/* See structuredef: MC_CMD_ETH_AN_FIELDS */
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_TECH_MASK_OFST 48
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_TECH_MASK_LEN 16
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_FEC_MASK_OFST 64
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_FEC_MASK_LEN 4
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_FEC_REQ_OFST 68
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_FEC_REQ_LEN 4
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_PAUSE_MASK_OFST 72
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_PAUSE_MASK_LEN 1
+/* Abilities supported by the local device (including cable abilities) For
+ * fixed local device capbilities see MC_CMD_GET_LOCAL_DEVICE_INFO
+ */
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_OFST 80
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_LEN 28
+/* See structuredef: MC_CMD_ETH_AN_FIELDS */
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_TECH_MASK_OFST 80
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_TECH_MASK_LEN 16
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_FEC_MASK_OFST 96
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_FEC_MASK_LEN 4
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_FEC_REQ_OFST 100
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_FEC_REQ_LEN 4
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_PAUSE_MASK_OFST 104
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_PAUSE_MASK_LEN 1
+/* Control flags */
+#define MC_CMD_LINK_STATE_OUT_CONTROL_FLAGS_OFST 108
+#define MC_CMD_LINK_STATE_OUT_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_FLAGS/MASK */
+/* Sequence number to synchronize link change events */
+#define MC_CMD_LINK_STATE_OUT_PORT_LINKCHANGE_SEQ_NUM_OFST 112
+#define MC_CMD_LINK_STATE_OUT_PORT_LINKCHANGE_SEQ_NUM_LEN 1
+/* Sequence number to synchronize module change events */
+#define MC_CMD_LINK_STATE_OUT_PORT_MODULECHANGE_SEQ_NUM_OFST 113
+#define MC_CMD_LINK_STATE_OUT_PORT_MODULECHANGE_SEQ_NUM_LEN 1
+
+/* MC_CMD_LINK_STATE_OUT_V2 msgresponse: Updated LINK_STATE_OUT with
+ * LOCAL_AN_SUPPORT
+ */
+#define MC_CMD_LINK_STATE_OUT_V2_LEN 120
+/* Flags used to report the current configuration/state of the link. */
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_OFST 0
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LEN 8
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LO_OFST 0
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LO_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LO_LBN 0
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LO_WIDTH 32
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_HI_OFST 4
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_HI_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_HI_LBN 32
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_HI_WIDTH 32
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_STATUS_FLAGS/STATUS_FLAGS */
+/* Configured technology. If the specified value does not align with the values
+ * defined in the enum MC_CMD_ETH_TECH/TECH, it is considered invalid.
+ */
+#define MC_CMD_LINK_STATE_OUT_V2_LINK_TECHNOLOGY_OFST 8
+#define MC_CMD_LINK_STATE_OUT_V2_LINK_TECHNOLOGY_LEN 2
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Configured FEC mode */
+#define MC_CMD_LINK_STATE_OUT_V2_FEC_MODE_OFST 10
+#define MC_CMD_LINK_STATE_OUT_V2_FEC_MODE_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* Bitmask of auto-negotiated pause modes */
+#define MC_CMD_LINK_STATE_OUT_V2_PAUSE_MASK_OFST 11
+#define MC_CMD_LINK_STATE_OUT_V2_PAUSE_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+/* Configured loopback mode */
+#define MC_CMD_LINK_STATE_OUT_V2_LOOPBACK_OFST 12
+#define MC_CMD_LINK_STATE_OUT_V2_LOOPBACK_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* Abilities requested by the driver to advertise during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_V2_ADVERTISED_ABILITIES_OFST 16
+#define MC_CMD_LINK_STATE_OUT_V2_ADVERTISED_ABILITIES_LEN 32
+/* Abilities advertised by the link partner during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_V2_LINK_PARTNER_ABILITIES_OFST 48
+#define MC_CMD_LINK_STATE_OUT_V2_LINK_PARTNER_ABILITIES_LEN 32
+/* Abilities supported by the local device (including cable abilities) For
+ * fixed local device capbilities see MC_CMD_GET_LOCAL_DEVICE_INFO
+ */
+#define MC_CMD_LINK_STATE_OUT_V2_SUPPORTED_ABILITIES_OFST 80
+#define MC_CMD_LINK_STATE_OUT_V2_SUPPORTED_ABILITIES_LEN 28
+/* Control flags */
+#define MC_CMD_LINK_STATE_OUT_V2_CONTROL_FLAGS_OFST 108
+#define MC_CMD_LINK_STATE_OUT_V2_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_FLAGS/MASK */
+/* Sequence number to synchronize link change events */
+#define MC_CMD_LINK_STATE_OUT_V2_PORT_LINKCHANGE_SEQ_NUM_OFST 112
+#define MC_CMD_LINK_STATE_OUT_V2_PORT_LINKCHANGE_SEQ_NUM_LEN 1
+/* Sequence number to synchronize module change events */
+#define MC_CMD_LINK_STATE_OUT_V2_PORT_MODULECHANGE_SEQ_NUM_OFST 113
+#define MC_CMD_LINK_STATE_OUT_V2_PORT_MODULECHANGE_SEQ_NUM_LEN 1
+/* Reports the auto-negotiation supported by the local device. This depends on
+ * the port and module properties.
+ */
+#define MC_CMD_LINK_STATE_OUT_V2_LOCAL_AN_SUPPORT_OFST 116
+#define MC_CMD_LINK_STATE_OUT_V2_LOCAL_AN_SUPPORT_LEN 4
+/* Enum values, see field(s): */
+/* AN_TYPE/TYPE */
+
+/* MC_CMD_LINK_STATE_OUT_V3 msgresponse: Updated LINK_STATE_OUT_V2 for explicit
+ * reporting of the link speed and duplex mode.
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_LEN 128
+/* Flags used to report the current configuration/state of the link. */
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_OFST 0
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LEN 8
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LO_OFST 0
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LO_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LO_LBN 0
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LO_WIDTH 32
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_HI_OFST 4
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_HI_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_HI_LBN 32
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_HI_WIDTH 32
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_STATUS_FLAGS/STATUS_FLAGS */
+/* Configured technology. If the specified value does not align with the values
+ * defined in the enum MC_CMD_ETH_TECH/TECH, it is considered invalid.
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_TECHNOLOGY_OFST 8
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_TECHNOLOGY_LEN 2
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Configured FEC mode */
+#define MC_CMD_LINK_STATE_OUT_V3_FEC_MODE_OFST 10
+#define MC_CMD_LINK_STATE_OUT_V3_FEC_MODE_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* Bitmask of auto-negotiated pause modes */
+#define MC_CMD_LINK_STATE_OUT_V3_PAUSE_MASK_OFST 11
+#define MC_CMD_LINK_STATE_OUT_V3_PAUSE_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+/* Configured loopback mode */
+#define MC_CMD_LINK_STATE_OUT_V3_LOOPBACK_OFST 12
+#define MC_CMD_LINK_STATE_OUT_V3_LOOPBACK_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* Abilities requested by the driver to advertise during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_V3_ADVERTISED_ABILITIES_OFST 16
+#define MC_CMD_LINK_STATE_OUT_V3_ADVERTISED_ABILITIES_LEN 32
+/* Abilities advertised by the link partner during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_PARTNER_ABILITIES_OFST 48
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_PARTNER_ABILITIES_LEN 32
+/* Abilities supported by the local device (including cable abilities) For
+ * fixed local device capbilities see MC_CMD_GET_LOCAL_DEVICE_INFO
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_SUPPORTED_ABILITIES_OFST 80
+#define MC_CMD_LINK_STATE_OUT_V3_SUPPORTED_ABILITIES_LEN 28
+/* Control flags */
+#define MC_CMD_LINK_STATE_OUT_V3_CONTROL_FLAGS_OFST 108
+#define MC_CMD_LINK_STATE_OUT_V3_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_FLAGS/MASK */
+/* Sequence number to synchronize link change events */
+#define MC_CMD_LINK_STATE_OUT_V3_PORT_LINKCHANGE_SEQ_NUM_OFST 112
+#define MC_CMD_LINK_STATE_OUT_V3_PORT_LINKCHANGE_SEQ_NUM_LEN 1
+/* Sequence number to synchronize module change events */
+#define MC_CMD_LINK_STATE_OUT_V3_PORT_MODULECHANGE_SEQ_NUM_OFST 113
+#define MC_CMD_LINK_STATE_OUT_V3_PORT_MODULECHANGE_SEQ_NUM_LEN 1
+/* Reports the auto-negotiation supported by the local device. This depends on
+ * the port and module properties.
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_LOCAL_AN_SUPPORT_OFST 116
+#define MC_CMD_LINK_STATE_OUT_V3_LOCAL_AN_SUPPORT_LEN 4
+/* Enum values, see field(s): */
+/* AN_TYPE/TYPE */
+/* Autonegotiated speed in mbit/s. The link may still be down even if this
+ * reads non-zero. LINK_SPEED field is intended to be used by drivers without
+ * the most up-to-date MCDI definitions, unable to deduce the link speed from
+ * the reported LINK_TECHNOLOGY field.
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_SPEED_OFST 120
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_SPEED_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V3_FLAGS_OFST 124
+#define MC_CMD_LINK_STATE_OUT_V3_FLAGS_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V3_FULL_DUPLEX_OFST 124
+#define MC_CMD_LINK_STATE_OUT_V3_FULL_DUPLEX_LBN 0
+#define MC_CMD_LINK_STATE_OUT_V3_FULL_DUPLEX_WIDTH 1
+
/***********************************/
/* MC_CMD_GET_LINK
@@ -5694,6 +6064,54 @@
/* MC_CMD_GET_LINK_IN msgrequest */
#define MC_CMD_GET_LINK_IN_LEN 0
+/* MC_CMD_GET_LINK_IN_V2 msgrequest */
+#define MC_CMD_GET_LINK_IN_V2_LEN 8
+/* Target port to request link state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details.
+ */
+#define MC_CMD_GET_LINK_IN_V2_TARGET_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_LBN 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_OFST 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_LBN 32
+#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LINK_END_OFST 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_LBN 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_OFST 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_LBN 32
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_LINK_OUT msgresponse */
#define MC_CMD_GET_LINK_OUT_LEN 28
/* Near-side advertised capabilities. Refer to
@@ -5745,6 +6163,7 @@
/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
#define MC_CMD_GET_LINK_OUT_FCNTL_LEN 4
+/* enum property: value */
/* Enum values, see field(s): */
/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
@@ -5813,6 +6232,7 @@
/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_V2_FCNTL_OFST 20
#define MC_CMD_GET_LINK_OUT_V2_FCNTL_LEN 4
+/* enum property: value */
/* Enum values, see field(s): */
/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_OFST 24
@@ -5969,6 +6389,95 @@
#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_LBN 7
#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_WIDTH 1
+/* MC_CMD_SET_LINK_IN_V3 msgrequest */
+#define MC_CMD_SET_LINK_IN_V3_LEN 28
+/* Near-side advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_SET_LINK_IN_V3_CAP_OFST 0
+#define MC_CMD_SET_LINK_IN_V3_CAP_LEN 4
+/* Flags */
+#define MC_CMD_SET_LINK_IN_V3_FLAGS_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_FLAGS_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_LOWPOWER_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_LOWPOWER_LBN 0
+#define MC_CMD_SET_LINK_IN_V3_LOWPOWER_WIDTH 1
+#define MC_CMD_SET_LINK_IN_V3_POWEROFF_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_POWEROFF_LBN 1
+#define MC_CMD_SET_LINK_IN_V3_POWEROFF_WIDTH 1
+#define MC_CMD_SET_LINK_IN_V3_TXDIS_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_TXDIS_LBN 2
+#define MC_CMD_SET_LINK_IN_V3_TXDIS_WIDTH 1
+#define MC_CMD_SET_LINK_IN_V3_LINKDOWN_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_LINKDOWN_LBN 3
+#define MC_CMD_SET_LINK_IN_V3_LINKDOWN_WIDTH 1
+/* Loopback mode. */
+#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_MODE_OFST 8
+#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_MODE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+/* A loopback speed of "0" is supported, and means (choose any available
+ * speed).
+ */
+#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_SPEED_OFST 12
+#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_SPEED_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_OFST 16
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_LEN 1
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_NUMBER_OFST 16
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_NUMBER_LBN 0
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_NUMBER_WIDTH 7
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_IGNORE_OFST 16
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_IGNORE_LBN 7
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_IGNORE_WIDTH 1
+/* Padding */
+#define MC_CMD_SET_LINK_IN_V3_RESERVED_OFST 17
+#define MC_CMD_SET_LINK_IN_V3_RESERVED_LEN 3
+/* Target port to set link state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
+ */
+#define MC_CMD_SET_LINK_IN_V3_TARGET_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LEN 8
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_LBN 160
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_WIDTH 32
+#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_OFST 24
+#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_LBN 192
+#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FLAT_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_TYPE_OFST 23
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 160
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 180
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 176
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 22
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LINK_END_OFST 24
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LINK_END_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LEN 8
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_LBN 160
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_OFST 24
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_LBN 192
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_SET_LINK_OUT msgresponse */
#define MC_CMD_SET_LINK_OUT_LEN 0
@@ -6034,17 +6543,17 @@
#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
#define MC_CMD_SET_MAC_IN_FCNTL_LEN 4
/* enum: Flow control is off. */
-#define MC_CMD_FCNTL_OFF 0x0
+/* MC_CMD_FCNTL_OFF 0x0 */
/* enum: Respond to flow control. */
-#define MC_CMD_FCNTL_RESPOND 0x1
+/* MC_CMD_FCNTL_RESPOND 0x1 */
/* enum: Respond to and Issue flow control. */
-#define MC_CMD_FCNTL_BIDIR 0x2
-/* enum: Auto neg flow control. */
-#define MC_CMD_FCNTL_AUTO 0x3
-/* enum: Priority flow control (eftest builds only). */
-#define MC_CMD_FCNTL_QBB 0x4
+/* MC_CMD_FCNTL_BIDIR 0x2 */
+/* enum: Auto negotiate flow control. */
+/* MC_CMD_FCNTL_AUTO 0x3 */
+/* enum: Priority flow control. This is only supported on KSB. */
+/* MC_CMD_FCNTL_QBB 0x4 */
/* enum: Issue flow control. */
-#define MC_CMD_FCNTL_GENERATE 0x5
+/* MC_CMD_FCNTL_GENERATE 0x5 */
#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24
#define MC_CMD_SET_MAC_IN_FLAGS_LEN 4
#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_OFST 24
@@ -6086,9 +6595,9 @@
/* MC_CMD_FCNTL_RESPOND 0x1 */
/* enum: Respond to and Issue flow control. */
/* MC_CMD_FCNTL_BIDIR 0x2 */
-/* enum: Auto neg flow control. */
+/* enum: Auto negotiate flow control. */
/* MC_CMD_FCNTL_AUTO 0x3 */
-/* enum: Priority flow control (eftest builds only). */
+/* enum: Priority flow control. This is only supported on KSB. */
/* MC_CMD_FCNTL_QBB 0x4 */
/* enum: Issue flow control. */
/* MC_CMD_FCNTL_GENERATE 0x5 */
@@ -6155,9 +6664,9 @@
/* MC_CMD_FCNTL_RESPOND 0x1 */
/* enum: Respond to and Issue flow control. */
/* MC_CMD_FCNTL_BIDIR 0x2 */
-/* enum: Auto neg flow control. */
+/* enum: Auto negotiate flow control. */
/* MC_CMD_FCNTL_AUTO 0x3 */
-/* enum: Priority flow control (eftest builds only). */
+/* enum: Priority flow control. This is only supported on KSB. */
/* MC_CMD_FCNTL_QBB 0x4 */
/* enum: Issue flow control. */
/* MC_CMD_FCNTL_GENERATE 0x5 */
@@ -6188,19 +6697,9 @@
#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_OFST 28
#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_LBN 4
#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_WIDTH 1
-/* Identifies the MAC to update by the specifying the end of a logical MAE
- * link. Setting TARGET to MAE_LINK_ENDPOINT_COMPAT is equivalent to using the
- * previous version of the command (MC_CMD_SET_MAC_EXT). Not all possible
- * combinations of MPORT_END and MPORT_SELECTOR in TARGET will work in all
- * circumstances. 1. Some will always work (e.g. a VF can always address its
- * logical MAC using MPORT_SELECTOR=ASSIGNED,LINK_END=VNIC), 2. Some are not
- * meaningful and will always fail with EINVAL (e.g. attempting to address the
- * VNIC end of a link to a physical port), 3. Some are meaningful but require
- * the MCDI client to have the required permission and fail with EPERM
- * otherwise (e.g. trying to set the MAC on a VF the caller cannot administer),
- * and 4. Some could be implementation-specific and fail with ENOTSUP if not
- * available (no examples exist right now). See SF-123581-TC section 4.3 for
- * more details.
+/* Target port to set mac state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
*/
#define MC_CMD_SET_MAC_V3_IN_TARGET_OFST 32
#define MC_CMD_SET_MAC_V3_IN_TARGET_LEN 8
@@ -6212,6 +6711,7 @@
#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_LEN 4
#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_LBN 288
#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_OFST 32
#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_LEN 4
#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FLAT_OFST 32
@@ -6405,6 +6905,98 @@
#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
#define MC_CMD_MAC_STATS_IN_PORT_ID_LEN 4
+/* MC_CMD_MAC_STATS_V2_IN msgrequest */
+#define MC_CMD_MAC_STATS_V2_IN_LEN 28
+/* ??? */
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_OFST 0
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LEN 8
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_LBN 0
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_LBN 32
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_WIDTH 32
+#define MC_CMD_MAC_STATS_V2_IN_CMD_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_CMD_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_DMA_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_DMA_LBN 0
+#define MC_CMD_MAC_STATS_V2_IN_DMA_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_CLEAR_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_CLEAR_LBN 1
+#define MC_CMD_MAC_STATS_V2_IN_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CHANGE_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_ENABLE_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CLEAR_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CLEAR_LBN 4
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_NOEVENT_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_NOEVENT_LBN 5
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIOD_MS_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIOD_MS_LBN 16
+#define MC_CMD_MAC_STATS_V2_IN_PERIOD_MS_WIDTH 16
+/* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as
+ * returned by MC_CMD_GET_CAPABILITIES_V4_OUT. For legacy firmware not
+ * supporting MC_CMD_GET_CAPABILITIES_V4_OUT, DMA_LEN should be set to
+ * MC_CMD_MAC_NSTATS * sizeof(uint64_t)
+ */
+#define MC_CMD_MAC_STATS_V2_IN_DMA_LEN_OFST 12
+#define MC_CMD_MAC_STATS_V2_IN_DMA_LEN_LEN 4
+/* port id so vadapter stats can be provided */
+#define MC_CMD_MAC_STATS_V2_IN_PORT_ID_OFST 16
+#define MC_CMD_MAC_STATS_V2_IN_PORT_ID_LEN 4
+/* Target port to request statistics for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
+ */
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LEN 8
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_LBN 160
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_WIDTH 32
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_OFST 24
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_LBN 192
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FLAT_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_TYPE_OFST 23
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 160
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 180
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 176
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 22
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LINK_END_OFST 24
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LINK_END_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LEN 8
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_LBN 160
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_OFST 24
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_LBN 192
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0
@@ -6421,6 +7013,7 @@
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_LBN 32
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
+/* enum property: index */
#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */
#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */
#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */
@@ -6583,6 +7176,7 @@
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_LBN 32
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2
+/* enum property: index */
/* enum: Start of FEC stats buffer space, Medford2 and up */
#define MC_CMD_MAC_FEC_DMABUF_START 0x61
/* enum: Number of uncorrected FEC codewords on link (RS-FEC only for Medford2)
@@ -6622,6 +7216,7 @@
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_LBN 32
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3
+/* enum property: index */
/* enum: Start of CTPIO stats buffer space, Medford2 and up */
#define MC_CMD_MAC_CTPIO_DMABUF_START 0x68
/* enum: Number of CTPIO fallbacks because a DMA packet was in progress on the
@@ -6702,6 +7297,7 @@
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_LBN 32
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V4
+/* enum property: index */
/* enum: Start of V4 stats buffer space */
#define MC_CMD_MAC_V4_DMABUF_START 0x79
/* enum: RXDP counter: Number of packets truncated because scattering was
@@ -6723,112 +7319,35 @@
/* Other enum values, see field(s): */
/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA/STATISTICS */
-
-/***********************************/
-/* MC_CMD_SRIOV
- * to be documented
- */
-#define MC_CMD_SRIOV 0x30
-
-/* MC_CMD_SRIOV_IN msgrequest */
-#define MC_CMD_SRIOV_IN_LEN 12
-#define MC_CMD_SRIOV_IN_ENABLE_OFST 0
-#define MC_CMD_SRIOV_IN_ENABLE_LEN 4
-#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4
-#define MC_CMD_SRIOV_IN_VI_BASE_LEN 4
-#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8
-#define MC_CMD_SRIOV_IN_VF_COUNT_LEN 4
-
-/* MC_CMD_SRIOV_OUT msgresponse */
-#define MC_CMD_SRIOV_OUT_LEN 8
-#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0
-#define MC_CMD_SRIOV_OUT_VI_SCALE_LEN 4
-#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4
-#define MC_CMD_SRIOV_OUT_VF_TOTAL_LEN 4
-
-/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
-/* this is only used for the first record */
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LEN 8
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_OFST 8
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_LBN 64
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_OFST 12
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_LBN 96
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_OFST 20
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LEN 8
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_OFST 20
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_LBN 160
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_OFST 24
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_LBN 192
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_MEMCPY
- * DMA write data into (Rid,Addr), either by dma reading (Rid,Addr), or by data
- * embedded directly in the command.
- *
- * A common pattern is for a client to use generation counts to signal a dma
- * update of a datastructure. To facilitate this, this MCDI operation can
- * contain multiple requests which are executed in strict order. Requests take
- * the form of duplicating the entire MCDI request continuously (including the
- * requests record, which is ignored in all but the first structure)
- *
- * The source data can either come from a DMA from the host, or it can be
- * embedded within the request directly, thereby eliminating a DMA read. To
- * indicate this, the client sets FROM_RID=%RID_INLINE, ADDR_HI=0, and
- * ADDR_LO=offset, and inserts the data at %offset from the start of the
- * payload. It's the callers responsibility to ensure that the embedded data
- * doesn't overlap the records.
- *
- * Returns: 0, EINVAL (invalid RID)
- */
-#define MC_CMD_MEMCPY 0x31
-
-/* MC_CMD_MEMCPY_IN msgrequest */
-#define MC_CMD_MEMCPY_IN_LENMIN 32
-#define MC_CMD_MEMCPY_IN_LENMAX 224
-#define MC_CMD_MEMCPY_IN_LENMAX_MCDI2 992
-#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num))
-#define MC_CMD_MEMCPY_IN_RECORD_NUM(len) (((len)-0)/32)
-/* see MC_CMD_MEMCPY_RECORD_TYPEDEF */
-#define MC_CMD_MEMCPY_IN_RECORD_OFST 0
-#define MC_CMD_MEMCPY_IN_RECORD_LEN 32
-#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1
-#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM 7
-#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM_MCDI2 31
-
-/* MC_CMD_MEMCPY_OUT msgresponse */
-#define MC_CMD_MEMCPY_OUT_LEN 0
+/* MC_CMD_MAC_STATS_V5_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V5_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V5_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V5*64))>>3)
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LO_LEN 4
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LO_LBN 0
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LO_WIDTH 32
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_HI_LEN 4
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_HI_LBN 32
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V5
+/* enum property: index */
+/* enum: Start of V5 stats buffer space */
+#define MC_CMD_MAC_V5_DMABUF_START 0x7c
+/* enum: Link toggle counter: Number of times the link has toggled between
+ * up/down and down/up
+ */
+#define MC_CMD_MAC_LINK_TOGGLES 0x7c
+/* enum: This includes the space at offset 125 which is the final
+ * GENERATION_END in a MAC_STATS_V5 response and otherwise unused.
+ */
+#define MC_CMD_MAC_NSTATS_V5 0x7e
+/* Other enum values, see field(s): */
+/* MC_CMD_MAC_STATS_V4_OUT_NO_DMA/STATISTICS */
/***********************************/
@@ -6984,6 +7503,7 @@
#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4
#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
#define MC_CMD_WOL_FILTER_RESET_IN_MASK_LEN 4
+/* enum property: bitmask */
#define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */
#define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */
@@ -6992,23 +7512,6 @@
/***********************************/
-/* MC_CMD_SET_MCAST_HASH
- * Set the MCAST hash value without otherwise reconfiguring the MAC
- */
-#define MC_CMD_SET_MCAST_HASH 0x35
-
-/* MC_CMD_SET_MCAST_HASH_IN msgrequest */
-#define MC_CMD_SET_MCAST_HASH_IN_LEN 32
-#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0
-#define MC_CMD_SET_MCAST_HASH_IN_HASH0_LEN 16
-#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16
-#define MC_CMD_SET_MCAST_HASH_IN_HASH1_LEN 16
-
-/* MC_CMD_SET_MCAST_HASH_OUT msgresponse */
-#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_NVRAM_TYPES
* Return bitfield indicating available types of virtual NVRAM partitions.
* Locks required: none. Returns: 0
@@ -7026,6 +7529,7 @@
/* Bit mask of supported types. */
#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
#define MC_CMD_NVRAM_TYPES_OUT_TYPES_LEN 4
+/* enum property: bitshift */
/* enum: Disabled callisto. */
#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
/* enum: MC firmware. */
@@ -7152,6 +7656,12 @@
#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_OFST 12
#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7
#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITE_ONLY_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITE_ONLY_LBN 8
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITE_ONLY_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_SEQUENTIAL_WRITE_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_SEQUENTIAL_WRITE_LBN 9
+#define MC_CMD_NVRAM_INFO_V2_OUT_SEQUENTIAL_WRITE_WIDTH 1
#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16
#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_LEN 4
#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20
@@ -7499,6 +8009,128 @@
#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_COPY_FAILED 0x19
/* enum: The update operation is in-progress. */
#define MC_CMD_NVRAM_VERIFY_RC_PENDING 0x1a
+/* enum: The update was an invalid user configuration file. */
+#define MC_CMD_NVRAM_VERIFY_RC_BAD_CONFIG 0x1b
+/* enum: The write was to the AUTO partition but the data was not recognised as
+ * a valid partition.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_UNKNOWN_TYPE 0x1c
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT msgresponse */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_LEN 88
+/* Result of nvram update completion processing. Result codes that indicate an
+ * internal build failure and therefore not expected to be seen by customers in
+ * the field are marked with a prefix 'Internal-error'.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_RESULT_CODE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_RESULT_CODE_LEN 4
+/* enum: Invalid return code; only non-zero values are defined. Defined as
+ * unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_UNKNOWN 0x0 */
+/* enum: Verify succeeded without any errors. */
+/* MC_CMD_NVRAM_VERIFY_RC_SUCCESS 0x1 */
+/* enum: CMS format verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED 0x2 */
+/* enum: Invalid CMS format in image metadata. */
+/* MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT 0x3 */
+/* enum: Message digest verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED 0x4 */
+/* enum: Error in message digest calculated over the reflash-header, payload
+ * and reflash-trailer.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST 0x5 */
+/* enum: Signature verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED 0x6 */
+/* enum: There are no valid signatures in the image. */
+/* MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES 0x7 */
+/* enum: Trusted approvers verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED 0x8 */
+/* enum: The Trusted approver's list is empty. */
+/* MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS 0x9 */
+/* enum: Signature chain verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED 0xa */
+/* enum: The signers of the signatures in the image are not listed in the
+ * Trusted approver's list.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH 0xb */
+/* enum: The image contains a test-signed certificate, but the adapter accepts
+ * only production signed images.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc */
+/* enum: The image has a lower security level than the current firmware. */
+/* MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd */
+/* enum: Internal-error. The signed image is missing the 'contents' section,
+ * where the 'contents' section holds the actual image payload to be applied.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_CONTENT_NOT_FOUND 0xe */
+/* enum: Internal-error. The bundle header is invalid. */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_CONTENT_HEADER_INVALID 0xf */
+/* enum: Internal-error. The bundle does not have a valid reflash image layout.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_REFLASH_IMAGE_INVALID 0x10 */
+/* enum: Internal-error. The bundle has an inconsistent layout of components or
+ * incorrect checksum.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_IMAGE_LAYOUT_INVALID 0x11 */
+/* enum: Internal-error. The bundle manifest is inconsistent with components in
+ * the bundle.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_INVALID 0x12 */
+/* enum: Internal-error. The number of components in a bundle do not match the
+ * number of components advertised by the bundle manifest.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_NUM_COMPONENTS_MISMATCH 0x13 */
+/* enum: Internal-error. The bundle contains too many components for the MC
+ * firmware to process
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_TOO_MANY_COMPONENTS 0x14 */
+/* enum: Internal-error. The bundle manifest has an invalid/inconsistent
+ * component.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_INVALID 0x15 */
+/* enum: Internal-error. The hash of a component does not match the hash stored
+ * in the bundle manifest.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_MISMATCH 0x16 */
+/* enum: Internal-error. Component hash calculation failed. */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_FAILED 0x17 */
+/* enum: Internal-error. The component does not have a valid reflash image
+ * layout.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_REFLASH_IMAGE_INVALID 0x18 */
+/* enum: The bundle processing code failed to copy a component to its target
+ * partition.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_COPY_FAILED 0x19 */
+/* enum: The update operation is in-progress. */
+/* MC_CMD_NVRAM_VERIFY_RC_PENDING 0x1a */
+/* enum: The update was an invalid user configuration file. */
+/* MC_CMD_NVRAM_VERIFY_RC_BAD_CONFIG 0x1b */
+/* enum: The write was to the AUTO partition but the data was not recognised as
+ * a valid partition.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_UNKNOWN_TYPE 0x1c */
+/* If the update was a user configuration, what action(s) the user must take to
+ * apply the new configuration.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_ACTIONS_REQUIRED_OFST 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_ACTIONS_REQUIRED_LEN 4
+/* enum: No action required. */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_NONE 0x0
+/* enum: The MC firmware must be rebooted (eg with MC_CMD_REBOOT). */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_FIRMWARE_REBOOT 0x1
+/* enum: The host must be rebooted. */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_HOST_REBOOT 0x2
+/* enum: The firmware and host must be rebooted (in either order). */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_FIRMWARE_AND_HOST_REBOOT 0x3
+/* enum: The host must be fully powered off. */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_HOST_POWERCYCLE 0x4
+/* If the update failed with MC_CMD_NVRAM_VERIFY_RC_BAD_CONFIG, a null-
+ * terminated US-ASCII string suitable for showing to the user.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_ERROR_STRING_OFST 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_ERROR_STRING_LEN 80
/***********************************/
@@ -7522,7 +8154,7 @@
#define MC_CMD_REBOOT 0x3d
#undef MC_CMD_0x3d_PRIVILEGE_CTG
-#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_REBOOT_IN msgrequest */
#define MC_CMD_REBOOT_IN_LEN 4
@@ -7535,65 +8167,6 @@
/***********************************/
-/* MC_CMD_SCHEDINFO
- * Request scheduler info. Locks required: NONE. Returns: An array of
- * (timeslice,maximum overrun), one for each thread, in ascending order of
- * thread address.
- */
-#define MC_CMD_SCHEDINFO 0x3e
-#undef MC_CMD_0x3e_PRIVILEGE_CTG
-
-#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SCHEDINFO_IN msgrequest */
-#define MC_CMD_SCHEDINFO_IN_LEN 0
-
-/* MC_CMD_SCHEDINFO_OUT msgresponse */
-#define MC_CMD_SCHEDINFO_OUT_LENMIN 4
-#define MC_CMD_SCHEDINFO_OUT_LENMAX 252
-#define MC_CMD_SCHEDINFO_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_SCHEDINFO_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_SCHEDINFO_OUT_DATA_NUM(len) (((len)-0)/4)
-#define MC_CMD_SCHEDINFO_OUT_DATA_OFST 0
-#define MC_CMD_SCHEDINFO_OUT_DATA_LEN 4
-#define MC_CMD_SCHEDINFO_OUT_DATA_MINNUM 1
-#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM 63
-#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM_MCDI2 255
-
-
-/***********************************/
-/* MC_CMD_REBOOT_MODE
- * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot
- * mode to the specified value. Returns the old mode.
- */
-#define MC_CMD_REBOOT_MODE 0x3f
-#undef MC_CMD_0x3f_PRIVILEGE_CTG
-
-#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_REBOOT_MODE_IN msgrequest */
-#define MC_CMD_REBOOT_MODE_IN_LEN 4
-#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
-#define MC_CMD_REBOOT_MODE_IN_VALUE_LEN 4
-/* enum: Normal. */
-#define MC_CMD_REBOOT_MODE_NORMAL 0x0
-/* enum: Power-on Reset. */
-#define MC_CMD_REBOOT_MODE_POR 0x2
-/* enum: Snapper. */
-#define MC_CMD_REBOOT_MODE_SNAPPER 0x3
-/* enum: snapper fake POR */
-#define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4
-#define MC_CMD_REBOOT_MODE_IN_FAKE_OFST 0
-#define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7
-#define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1
-
-/* MC_CMD_REBOOT_MODE_OUT msgresponse */
-#define MC_CMD_REBOOT_MODE_OUT_LEN 4
-#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
-#define MC_CMD_REBOOT_MODE_OUT_VALUE_LEN 4
-
-
-/***********************************/
/* MC_CMD_SENSOR_INFO
* Returns information about every available sensor.
*
@@ -8061,6 +8634,54 @@
/* MC_CMD_GET_PHY_STATE_IN msgrequest */
#define MC_CMD_GET_PHY_STATE_IN_LEN 0
+/* MC_CMD_GET_PHY_STATE_IN_V2 msgrequest */
+#define MC_CMD_GET_PHY_STATE_IN_V2_LEN 8
+/* Target port to request PHY state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details.
+ */
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_LBN 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_OFST 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_LBN 32
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LINK_END_OFST 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_LBN 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_OFST 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_LBN 32
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
@@ -8072,22 +8693,6 @@
/***********************************/
-/* MC_CMD_SETUP_8021QBB
- * 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to
- * disable 802.Qbb for a given priority.
- */
-#define MC_CMD_SETUP_8021QBB 0x44
-
-/* MC_CMD_SETUP_8021QBB_IN msgrequest */
-#define MC_CMD_SETUP_8021QBB_IN_LEN 32
-#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFST 0
-#define MC_CMD_SETUP_8021QBB_IN_TXQS_LEN 32
-
-/* MC_CMD_SETUP_8021QBB_OUT msgresponse */
-#define MC_CMD_SETUP_8021QBB_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_WOL_FILTER_GET
* Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS
*/
@@ -8106,133 +8711,6 @@
/***********************************/
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD
- * Add a protocol offload to NIC for lights-out state. Locks required: None.
- * Returns: 0, ENOSYS
- */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
-#undef MC_CMD_0x46_PRIVILEGE_CTG
-
-#define MC_CMD_0x46_PRIVILEGE_CTG SRIOV_CTG_LINK
-
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX_MCDI2 1020
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_NUM(len) (((len)-4)/4)
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
-#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
-#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM 62
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM_MCDI2 254
-
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_LEN 4
-
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_LEN 16
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_OFST 26
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_LEN 16
-
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_LEN 4
-
-
-/***********************************/
-/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD
- * Remove a protocol offload from NIC for lights-out state. Locks required:
- * None. Returns: 0, ENOSYS
- */
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
-#undef MC_CMD_0x47_PRIVILEGE_CTG
-
-#define MC_CMD_0x47_PRIVILEGE_CTG SRIOV_CTG_LINK
-
-/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_LEN 4
-
-/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_MAC_RESET_RESTORE
- * Restore MAC after block reset. Locks required: None. Returns: 0.
- */
-#define MC_CMD_MAC_RESET_RESTORE 0x48
-
-/* MC_CMD_MAC_RESET_RESTORE_IN msgrequest */
-#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
-
-/* MC_CMD_MAC_RESET_RESTORE_OUT msgresponse */
-#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_TESTASSERT
- * Deliberately trigger an assert-detonation in the firmware for testing
- * purposes (i.e. to allow tests that the driver copes gracefully). Locks
- * required: None Returns: 0
- */
-#define MC_CMD_TESTASSERT 0x49
-#undef MC_CMD_0x49_PRIVILEGE_CTG
-
-#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_TESTASSERT_IN msgrequest */
-#define MC_CMD_TESTASSERT_IN_LEN 0
-
-/* MC_CMD_TESTASSERT_OUT msgresponse */
-#define MC_CMD_TESTASSERT_OUT_LEN 0
-
-/* MC_CMD_TESTASSERT_V2_IN msgrequest */
-#define MC_CMD_TESTASSERT_V2_IN_LEN 4
-/* How to provoke the assertion */
-#define MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0
-#define MC_CMD_TESTASSERT_V2_IN_TYPE_LEN 4
-/* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless
- * you're testing firmware, this is what you want.
- */
-#define MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0
-/* enum: Assert using assert(0); */
-#define MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1
-/* enum: Deliberately trigger a watchdog */
-#define MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2
-/* enum: Deliberately trigger a trap by loading from an invalid address */
-#define MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3
-/* enum: Deliberately trigger a trap by storing to an invalid address */
-#define MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4
-/* enum: Jump to an invalid address */
-#define MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5
-
-/* MC_CMD_TESTASSERT_V2_OUT msgresponse */
-#define MC_CMD_TESTASSERT_V2_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_WORKAROUND
* Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't
* understand the given workaround number - which should not be treated as a
@@ -8324,6 +8802,62 @@
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_BANK_LBN 16
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_BANK_WIDTH 16
+/* MC_CMD_GET_PHY_MEDIA_INFO_IN_V2 msgrequest */
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_LEN 12
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_PAGE_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_PAGE_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_PAGE_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_PAGE_LBN 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_PAGE_WIDTH 16
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_BANK_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_BANK_LBN 16
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_BANK_WIDTH 16
+/* Target port to request PHY state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
+ */
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_LBN 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_OFST 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_LBN 64
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 7
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 52
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 48
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 6
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LINK_END_OFST 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_LBN 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_OFST 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_LBN 64
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252
@@ -8348,7 +8882,7 @@
#define MC_CMD_NVRAM_TEST 0x4c
#undef MC_CMD_0x4c_PRIVILEGE_CTG
-#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_NVRAM_TEST_IN msgrequest */
#define MC_CMD_NVRAM_TEST_IN_LEN 4
@@ -8370,103 +8904,6 @@
/***********************************/
-/* MC_CMD_MRSFP_TWEAK
- * Read status and/or set parameters for the 'mrsfp' driver in mr_rusty builds.
- * I2C I/O expander bits are always read; if equaliser parameters are supplied,
- * they are configured first. Locks required: None. Return code: 0, EINVAL.
- */
-#define MC_CMD_MRSFP_TWEAK 0x4d
-
-/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
-/* 0-6 low->high de-emph. */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_LEN 4
-/* 0-8 low->high ref.V */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_LEN 4
-/* 0-8 0-8 low->high boost */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_LEN 4
-/* 0-8 low->high ref.V */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_LEN 4
-
-/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
-#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0
-
-/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */
-#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
-/* input bits */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_LEN 4
-/* output bits */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_LEN 4
-/* direction */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_LEN 4
-/* enum: Out. */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
-/* enum: In. */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1
-
-
-/***********************************/
-/* MC_CMD_SENSOR_SET_LIMS
- * Adjusts the sensor limits. This is a warranty-voiding operation. Returns:
- * ENOENT if the sensor specified does not exist, EINVAL if the limits are out
- * of range.
- */
-#define MC_CMD_SENSOR_SET_LIMS 0x4e
-#undef MC_CMD_0x4e_PRIVILEGE_CTG
-
-#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
-#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
-#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
-#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
-/* interpretation is is sensor-specific. */
-#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
-#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4
-/* interpretation is is sensor-specific. */
-#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
-#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4
-/* interpretation is is sensor-specific. */
-#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
-#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4
-/* interpretation is is sensor-specific. */
-#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
-#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4
-
-/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
-#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_RESOURCE_LIMITS
- */
-#define MC_CMD_GET_RESOURCE_LIMITS 0x4f
-
-/* MC_CMD_GET_RESOURCE_LIMITS_IN msgrequest */
-#define MC_CMD_GET_RESOURCE_LIMITS_IN_LEN 0
-
-/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_LEN 4
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_LEN 4
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_LEN 4
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_LEN 4
-
-
-/***********************************/
/* MC_CMD_NVRAM_PARTITIONS
* Reads the list of available virtual NVRAM partition types. Locks required:
* none. Returns: 0, EINVAL (bad type).
@@ -8582,806 +9019,6 @@
#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_LEN 4
-
-/***********************************/
-/* MC_CMD_CLP
- * Perform a CLP related operation, see SF-110495-PS for details of CLP
- * processing. This command has been extended to accomodate the requirements of
- * different manufacturers which are to be found in SF-119187-TC, SF-119186-TC,
- * SF-120509-TC and SF-117282-PS.
- */
-#define MC_CMD_CLP 0x56
-#undef MC_CMD_0x56_PRIVILEGE_CTG
-
-#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_CLP_IN msgrequest */
-#define MC_CMD_CLP_IN_LEN 4
-/* Sub operation */
-#define MC_CMD_CLP_IN_OP_OFST 0
-#define MC_CMD_CLP_IN_OP_LEN 4
-/* enum: Return to factory default settings */
-#define MC_CMD_CLP_OP_DEFAULT 0x1
-/* enum: Set MAC address */
-#define MC_CMD_CLP_OP_SET_MAC 0x2
-/* enum: Get MAC address */
-#define MC_CMD_CLP_OP_GET_MAC 0x3
-/* enum: Set UEFI/GPXE boot mode */
-#define MC_CMD_CLP_OP_SET_BOOT 0x4
-/* enum: Get UEFI/GPXE boot mode */
-#define MC_CMD_CLP_OP_GET_BOOT 0x5
-
-/* MC_CMD_CLP_OUT msgresponse */
-#define MC_CMD_CLP_OUT_LEN 0
-
-/* MC_CMD_CLP_IN_DEFAULT msgrequest */
-#define MC_CMD_CLP_IN_DEFAULT_LEN 4
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-
-/* MC_CMD_CLP_OUT_DEFAULT msgresponse */
-#define MC_CMD_CLP_OUT_DEFAULT_LEN 0
-
-/* MC_CMD_CLP_IN_SET_MAC msgrequest */
-#define MC_CMD_CLP_IN_SET_MAC_LEN 12
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00
- * restores the permanent (factory-programmed) MAC address associated with the
- * port. A non-zero MAC address persists until a PCIe reset or a power cycle.
- */
-#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
-#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
-/* Padding */
-#define MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10
-#define MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2
-
-/* MC_CMD_CLP_OUT_SET_MAC msgresponse */
-#define MC_CMD_CLP_OUT_SET_MAC_LEN 0
-
-/* MC_CMD_CLP_IN_SET_MAC_V2 msgrequest */
-#define MC_CMD_CLP_IN_SET_MAC_V2_LEN 16
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00
- * restores the permanent (factory-programmed) MAC address associated with the
- * port. A non-zero MAC address persists until a PCIe reset or a power cycle.
- */
-#define MC_CMD_CLP_IN_SET_MAC_V2_ADDR_OFST 4
-#define MC_CMD_CLP_IN_SET_MAC_V2_ADDR_LEN 6
-/* Padding */
-#define MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_OFST 10
-#define MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_LEN 2
-#define MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_OFST 12
-#define MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_LEN 4
-#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_OFST 12
-#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_LBN 0
-#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_WIDTH 1
-
-/* MC_CMD_CLP_IN_GET_MAC msgrequest */
-#define MC_CMD_CLP_IN_GET_MAC_LEN 4
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-
-/* MC_CMD_CLP_IN_GET_MAC_V2 msgrequest */
-#define MC_CMD_CLP_IN_GET_MAC_V2_LEN 8
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-#define MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_OFST 4
-#define MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_LEN 4
-#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_OFST 4
-#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_LBN 0
-#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_WIDTH 1
-
-/* MC_CMD_CLP_OUT_GET_MAC msgresponse */
-#define MC_CMD_CLP_OUT_GET_MAC_LEN 8
-/* MAC address assigned to port */
-#define MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0
-#define MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6
-/* Padding */
-#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6
-#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2
-
-/* MC_CMD_CLP_IN_SET_BOOT msgrequest */
-#define MC_CMD_CLP_IN_SET_BOOT_LEN 5
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-/* Boot flag */
-#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
-#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
-
-/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */
-#define MC_CMD_CLP_OUT_SET_BOOT_LEN 0
-
-/* MC_CMD_CLP_IN_GET_BOOT msgrequest */
-#define MC_CMD_CLP_IN_GET_BOOT_LEN 4
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-
-/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
-#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4
-/* Boot flag */
-#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0
-#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1
-/* Padding */
-#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1
-#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3
-
-
-/***********************************/
-/* MC_CMD_MUM
- * Perform a MUM operation
- */
-#define MC_CMD_MUM 0x57
-#undef MC_CMD_0x57_PRIVILEGE_CTG
-
-#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_MUM_IN msgrequest */
-#define MC_CMD_MUM_IN_LEN 4
-#define MC_CMD_MUM_IN_OP_HDR_OFST 0
-#define MC_CMD_MUM_IN_OP_HDR_LEN 4
-#define MC_CMD_MUM_IN_OP_OFST 0
-#define MC_CMD_MUM_IN_OP_LBN 0
-#define MC_CMD_MUM_IN_OP_WIDTH 8
-/* enum: NULL MCDI command to MUM */
-#define MC_CMD_MUM_OP_NULL 0x1
-/* enum: Get MUM version */
-#define MC_CMD_MUM_OP_GET_VERSION 0x2
-/* enum: Issue raw I2C command to MUM */
-#define MC_CMD_MUM_OP_RAW_CMD 0x3
-/* enum: Read from registers on devices connected to MUM. */
-#define MC_CMD_MUM_OP_READ 0x4
-/* enum: Write to registers on devices connected to MUM. */
-#define MC_CMD_MUM_OP_WRITE 0x5
-/* enum: Control UART logging. */
-#define MC_CMD_MUM_OP_LOG 0x6
-/* enum: Operations on MUM GPIO lines */
-#define MC_CMD_MUM_OP_GPIO 0x7
-/* enum: Get sensor readings from MUM */
-#define MC_CMD_MUM_OP_READ_SENSORS 0x8
-/* enum: Initiate clock programming on the MUM */
-#define MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9
-/* enum: Initiate FPGA load from flash on the MUM */
-#define MC_CMD_MUM_OP_FPGA_LOAD 0xa
-/* enum: Request sensor reading from MUM ADC resulting from earlier request via
- * MUM ATB
- */
-#define MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb
-/* enum: Send commands relating to the QSFP ports via the MUM for PHY
- * operations
- */
-#define MC_CMD_MUM_OP_QSFP 0xc
-/* enum: Request discrete and SODIMM DDR info (type, size, speed grade, voltage
- * level) from MUM
- */
-#define MC_CMD_MUM_OP_READ_DDR_INFO 0xd
-
-/* MC_CMD_MUM_IN_NULL msgrequest */
-#define MC_CMD_MUM_IN_NULL_LEN 4
-/* MUM cmd header */
-#define MC_CMD_MUM_IN_CMD_OFST 0
-#define MC_CMD_MUM_IN_CMD_LEN 4
-
-/* MC_CMD_MUM_IN_GET_VERSION msgrequest */
-#define MC_CMD_MUM_IN_GET_VERSION_LEN 4
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-
-/* MC_CMD_MUM_IN_READ msgrequest */
-#define MC_CMD_MUM_IN_READ_LEN 16
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* ID of (device connected to MUM) to read from registers of */
-#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4
-#define MC_CMD_MUM_IN_READ_DEVICE_LEN 4
-/* enum: Hittite HMC1035 clock generator on Sorrento board */
-#define MC_CMD_MUM_DEV_HITTITE 0x1
-/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
-#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2
-/* 32-bit address to read from */
-#define MC_CMD_MUM_IN_READ_ADDR_OFST 8
-#define MC_CMD_MUM_IN_READ_ADDR_LEN 4
-/* Number of words to read. */
-#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
-#define MC_CMD_MUM_IN_READ_NUMWORDS_LEN 4
-
-/* MC_CMD_MUM_IN_WRITE msgrequest */
-#define MC_CMD_MUM_IN_WRITE_LENMIN 16
-#define MC_CMD_MUM_IN_WRITE_LENMAX 252
-#define MC_CMD_MUM_IN_WRITE_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
-#define MC_CMD_MUM_IN_WRITE_BUFFER_NUM(len) (((len)-12)/4)
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* ID of (device connected to MUM) to write to registers of */
-#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
-#define MC_CMD_MUM_IN_WRITE_DEVICE_LEN 4
-/* enum: Hittite HMC1035 clock generator on Sorrento board */
-/* MC_CMD_MUM_DEV_HITTITE 0x1 */
-/* 32-bit address to write to */
-#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
-#define MC_CMD_MUM_IN_WRITE_ADDR_LEN 4
-/* Words to write */
-#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
-#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
-#define MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1
-#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60
-#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM_MCDI2 252
-
-/* MC_CMD_MUM_IN_RAW_CMD msgrequest */
-#define MC_CMD_MUM_IN_RAW_CMD_LENMIN 17
-#define MC_CMD_MUM_IN_RAW_CMD_LENMAX 252
-#define MC_CMD_MUM_IN_RAW_CMD_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_NUM(len) (((len)-16)/1)
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* MUM I2C cmd code */
-#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
-#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_LEN 4
-/* Number of bytes to write */
-#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
-#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_LEN 4
-/* Number of bytes to read */
-#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
-#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_LEN 4
-/* Bytes to write */
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM_MCDI2 1004
-
-/* MC_CMD_MUM_IN_LOG msgrequest */
-#define MC_CMD_MUM_IN_LOG_LEN 8
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_LOG_OP_OFST 4
-#define MC_CMD_MUM_IN_LOG_OP_LEN 4
-#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */
-
-/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
-#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */
-/* MC_CMD_MUM_IN_LOG_OP_LEN 4 */
-/* Enable/disable debug output to UART */
-#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
-#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO msgrequest */
-#define MC_CMD_MUM_IN_GPIO_LEN 8
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OPCODE_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
-#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
-#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */
-
-/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
-#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_LEN 4
-/* The first 32-bit word to be written to the GPIO OUT register. */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_LEN 4
-/* The second 32-bit word to be written to the GPIO OUT register. */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_LEN 4
-/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_LEN 4
-/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OP msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
-#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16
-#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8
-
-/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
-
-/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
-
-/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
-
-/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */
-#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
-#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4
-#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_OFST 4
-#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
-#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
-#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_OFST 4
-#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
-#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8
-
-/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* Bit-mask of clocks to be programmed */
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_LEN 4
-#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
-#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
-#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
-/* Control flags for clock programming */
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_OFST 8
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_OFST 8
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_WIDTH 1
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_OFST 8
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_LBN 2
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_WIDTH 1
-
-/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */
-#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* Enable/Disable FPGA config from flash */
-#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
-#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_LEN 4
-
-/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
-#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-
-/* MC_CMD_MUM_IN_QSFP msgrequest */
-#define MC_CMD_MUM_IN_QSFP_LEN 12
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_OPCODE_OFST 4
-#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
-#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
-#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
-#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_IDX_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
-#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_INIT_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_INIT_IDX_LEN 4
-#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
-#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_LEN 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_LEN 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_LEN 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_LEN 4
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_LEN 4
-
-/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */
-#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-
-/* MC_CMD_MUM_OUT msgresponse */
-#define MC_CMD_MUM_OUT_LEN 0
-
-/* MC_CMD_MUM_OUT_NULL msgresponse */
-#define MC_CMD_MUM_OUT_NULL_LEN 0
-
-/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
-#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12
-#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
-#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_LEN 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_LEN 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_LBN 32
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_WIDTH 32
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_LEN 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_LBN 64
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_WIDTH 32
-
-/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */
-#define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1
-#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252
-#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num))
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_NUM(len) (((len)-0)/1)
-/* returned data */
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM_MCDI2 1020
-
-/* MC_CMD_MUM_OUT_READ msgresponse */
-#define MC_CMD_MUM_OUT_READ_LENMIN 4
-#define MC_CMD_MUM_OUT_READ_LENMAX 252
-#define MC_CMD_MUM_OUT_READ_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num))
-#define MC_CMD_MUM_OUT_READ_BUFFER_NUM(len) (((len)-0)/4)
-#define MC_CMD_MUM_OUT_READ_BUFFER_OFST 0
-#define MC_CMD_MUM_OUT_READ_BUFFER_LEN 4
-#define MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1
-#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63
-#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM_MCDI2 255
-
-/* MC_CMD_MUM_OUT_WRITE msgresponse */
-#define MC_CMD_MUM_OUT_WRITE_LEN 0
-
-/* MC_CMD_MUM_OUT_LOG msgresponse */
-#define MC_CMD_MUM_OUT_LOG_LEN 0
-
-/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */
-#define MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
-/* The first 32-bit word read from the GPIO IN register. */
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_LEN 4
-/* The second 32-bit word read from the GPIO IN register. */
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
-/* The first 32-bit word read from the GPIO OUT register. */
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_LEN 4
-/* The second 32-bit word read from the GPIO OUT register. */
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_LEN 4
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_LEN 4
-
-/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0
-
-/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */
-#define MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4
-#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252
-#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num))
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_NUM(len) (((len)-0)/4)
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM_MCDI2 255
-#define MC_CMD_MUM_OUT_READ_SENSORS_READING_OFST 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16
-#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_OFST 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16
-#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8
-#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_OFST 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24
-#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8
-
-/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
-#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
-#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
-#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_LEN 4
-
-/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
-#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
-
-/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
-#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
-#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
-#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_LEN 4
-
-/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
-
-/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1
-
-/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_LEN 4
-
-/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_NUM(len) (((len)-4)/1)
-/* in bytes */
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM_MCDI2 1016
-
-/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_LEN 4
-
-/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_LEN 4
-
-/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX 248
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX_MCDI2 1016
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num))
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_NUM(len) (((len)-8)/8)
-/* Discrete (soldered) DDR resistor strap info */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_OFST 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_OFST 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16
-/* Number of SODIMM info records */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_LEN 4
-/* Array of SODIMM info records */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_LEN 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_LBN 64
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_WIDTH 32
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_LEN 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_LBN 96
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_WIDTH 32
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM_MCDI2 126
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_LBN 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_WIDTH 8
-/* enum: SODIMM bank 1 (Top SODIMM for Sorrento) */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK1 0x0
-/* enum: SODIMM bank 2 (Bottom SODDIMM for Sorrento) */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK2 0x1
-/* enum: Total number of SODIMM banks */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_BANKS 0x2
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_LBN 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_WIDTH 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_LBN 16
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_WIDTH 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_LBN 20
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_WIDTH 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_POWERED 0x0 /* enum */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V25 0x1 /* enum */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V35 0x2 /* enum */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V5 0x3 /* enum */
-/* enum: Values 5-15 are reserved for future usage */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V8 0x4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_LBN 24
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_WIDTH 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_LBN 32
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_WIDTH 16
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_LBN 48
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_WIDTH 4
-/* enum: No module present */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_ABSENT 0x0
-/* enum: Module present supported and powered on */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_POWERED 0x1
-/* enum: Module present but bad type */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_TYPE 0x2
-/* enum: Module present but incompatible voltage */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_VOLTAGE 0x3
-/* enum: Module present but unknown SPD */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SPD 0x4
-/* enum: Module present but slot cannot support it */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SLOT 0x5
-/* enum: Modules may or may not be present, but cannot establish contact by I2C
- */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_REACHABLE 0x6
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_LBN 52
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_WIDTH 12
-
/* MC_CMD_DYNAMIC_SENSORS_LIMITS structuredef: Set of sensor limits. This
* should match the equivalent structure in the sensor_query SPHINX service.
*/
@@ -9500,27 +9137,22 @@
* and a generation count for this version of the sensor table. On systems
* advertising the DYNAMIC_SENSORS capability bit, this replaces the
* MC_CMD_READ_SENSORS command. On multi-MC systems this may include sensors
- * added by the NMC.
- *
- * Sensor handles are persistent for the lifetime of the sensor and are used to
- * identify sensors in MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS and
- * MC_CMD_DYNAMIC_SENSORS_GET_VALUES.
- *
- * The generation count is maintained by the MC, is persistent across reboots
- * and will be incremented each time the sensor table is modified. When the
- * table is modified, a CODE_DYNAMIC_SENSORS_CHANGE event will be generated
- * containing the new generation count. The driver should compare this against
- * the current generation count, and if it is different, call
- * MC_CMD_DYNAMIC_SENSORS_LIST again to update it's copy of the sensor table.
- *
- * The sensor count is provided to allow a future path to supporting more than
+ * added by the NMC. Sensor handles are persistent for the lifetime of the
+ * sensor and are used to identify sensors in
+ * MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS and
+ * MC_CMD_DYNAMIC_SENSORS_GET_VALUES. The generation count is maintained by the
+ * MC, is persistent across reboots and will be incremented each time the
+ * sensor table is modified. When the table is modified, a
+ * CODE_DYNAMIC_SENSORS_CHANGE event will be generated containing the new
+ * generation count. The driver should compare this against the current
+ * generation count, and if it is different, call MC_CMD_DYNAMIC_SENSORS_LIST
+ * again to update it's copy of the sensor table. The sensor count is provided
+ * to allow a future path to supporting more than
* MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM_MCDI2 sensors, i.e.
* the maximum number that will fit in a single response. As this is a fairly
* large number (253) it is not anticipated that this will be needed in the
- * near future, so can currently be ignored.
- *
- * On Riverhead this command is implemented as a wrapper for `list` in the
- * sensor_query SPHINX service.
+ * near future, so can currently be ignored. On Riverhead this command is
+ * implemented as a wrapper for `list` in the sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_LIST 0x66
#undef MC_CMD_0x66_PRIVILEGE_CTG
@@ -9557,15 +9189,13 @@
/***********************************/
/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS
* Get descriptions for a set of sensors, specified as an array of sensor
- * handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST
- *
- * Any handles which do not correspond to a sensor currently managed by the MC
- * will be dropped from from the response. This may happen when a sensor table
- * update is in progress, and effectively means the set of usable sensors is
- * the intersection between the sets of sensors known to the driver and the MC.
- *
- * On Riverhead this command is implemented as a wrapper for
- * `get_descriptions` in the sensor_query SPHINX service.
+ * handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST. Any handles which do not
+ * correspond to a sensor currently managed by the MC will be dropped from
+ * the response. This may happen when a sensor table update is in progress, and
+ * effectively means the set of usable sensors is the intersection between the
+ * sets of sensors known to the driver and the MC. On Riverhead this command is
+ * implemented as a wrapper for `get_descriptions` in the sensor_query SPHINX
+ * service.
*/
#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS 0x67
#undef MC_CMD_0x67_PRIVILEGE_CTG
@@ -9602,19 +9232,15 @@
/***********************************/
/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS
* Read the state and value for a set of sensors, specified as an array of
- * sensor handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST.
- *
- * In the case of a broken sensor, then the state of the response's
- * MC_CMD_DYNAMIC_SENSORS_VALUE entry will be set to BROKEN, and any value
- * provided should be treated as erroneous.
- *
- * Any handles which do not correspond to a sensor currently managed by the MC
- * will be dropped from from the response. This may happen when a sensor table
- * update is in progress, and effectively means the set of usable sensors is
- * the intersection between the sets of sensors known to the driver and the MC.
- *
- * On Riverhead this command is implemented as a wrapper for `get_readings`
- * in the sensor_query SPHINX service.
+ * sensor handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST. In the case of a
+ * broken sensor, then the state of the response's MC_CMD_DYNAMIC_SENSORS_VALUE
+ * entry will be set to BROKEN, and any value provided should be treated as
+ * erroneous. Any handles which do not correspond to a sensor currently managed
+ * by the MC will be dropped from the response. This may happen when a
+ * sensor table update is in progress, and effectively means the set of usable
+ * sensors is the intersection between the sets of sensors known to the driver
+ * and the MC. On Riverhead this command is implemented as a wrapper for
+ * `get_readings` in the sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS 0x68
#undef MC_CMD_0x68_PRIVILEGE_CTG
@@ -9647,45 +9273,1286 @@
#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM 21
#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM_MCDI2 85
+/* MC_CMD_MAC_FLAGS structuredef */
+#define MC_CMD_MAC_FLAGS_LEN 4
+/* The enums defined in this field are used as indices into the
+ * MC_CMD_MAC_FLAGS bitmask.
+ */
+#define MC_CMD_MAC_FLAGS_MASK_OFST 0
+#define MC_CMD_MAC_FLAGS_MASK_LEN 4
+/* enum property: bitshift */
+/* enum: Include the FCS in the packet data delivered to the host. Ignored if
+ * RX_INCLUDE_FCS not set in capabilities.
+ */
+#define MC_CMD_MAC_FLAGS_FLAG_INCLUDE_FCS 0x0
+#define MC_CMD_MAC_FLAGS_MASK_LBN 0
+#define MC_CMD_MAC_FLAGS_MASK_WIDTH 32
+
+/* MC_CMD_TRANSMISSION_MODE structuredef */
+#define MC_CMD_TRANSMISSION_MODE_LEN 4
+#define MC_CMD_TRANSMISSION_MODE_MASK_OFST 0
+#define MC_CMD_TRANSMISSION_MODE_MASK_LEN 4
+/* enum property: value */
+#define MC_CMD_TRANSMISSION_MODE_PROMSC_MODE 0x0 /* enum */
+#define MC_CMD_TRANSMISSION_MODE_UNCST_MODE 0x1 /* enum */
+#define MC_CMD_TRANSMISSION_MODE_BRDCST_MODE 0x2 /* enum */
+#define MC_CMD_TRANSMISSION_MODE_MASK_LBN 0
+#define MC_CMD_TRANSMISSION_MODE_MASK_WIDTH 32
+
+/* MC_CMD_MAC_CONFIG_OPTIONS structuredef */
+#define MC_CMD_MAC_CONFIG_OPTIONS_LEN 4
+#define MC_CMD_MAC_CONFIG_OPTIONS_MASK_OFST 0
+#define MC_CMD_MAC_CONFIG_OPTIONS_MASK_LEN 4
+/* enum property: bitmask */
+/* enum: Configure the MAC address. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_ADDR 0x0
+/* enum: Configure the maximum frame length. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_MAX_FRAME_LEN 0x1
+/* enum: Configure flow control. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_FCNTL 0x2
+/* enum: Configure the transmission mode. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_TRANSMISSION_MODE 0x3
+/* enum: Configure FCS. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_INCLUDE_FCS 0x4
+#define MC_CMD_MAC_CONFIG_OPTIONS_MASK_LBN 0
+#define MC_CMD_MAC_CONFIG_OPTIONS_MASK_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_MAC_CTRL
+ * Set MAC configuration. Return code: 0, EINVAL, ENOTSUP
+ */
+#define MC_CMD_MAC_CTRL 0x1df
+#undef MC_CMD_0x1df_PRIVILEGE_CTG
+
+#define MC_CMD_0x1df_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_MAC_CTRL_IN msgrequest */
+#define MC_CMD_MAC_CTRL_IN_LEN 32
+/* Handle for selected network port. */
+#define MC_CMD_MAC_CTRL_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_CTRL_IN_PORT_HANDLE_LEN 4
+/* Select which parameters to configure. A parameter will only be modified if
+ * the corresponding control flag is set.
+ */
+#define MC_CMD_MAC_CTRL_IN_CONTROL_FLAGS_OFST 4
+#define MC_CMD_MAC_CTRL_IN_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_CONFIG_OPTIONS/MASK */
+/* MAC address of the device. */
+#define MC_CMD_MAC_CTRL_IN_ADDR_OFST 8
+#define MC_CMD_MAC_CTRL_IN_ADDR_LEN 8
+#define MC_CMD_MAC_CTRL_IN_ADDR_LO_OFST 8
+#define MC_CMD_MAC_CTRL_IN_ADDR_LO_LEN 4
+#define MC_CMD_MAC_CTRL_IN_ADDR_LO_LBN 64
+#define MC_CMD_MAC_CTRL_IN_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_CTRL_IN_ADDR_HI_OFST 12
+#define MC_CMD_MAC_CTRL_IN_ADDR_HI_LEN 4
+#define MC_CMD_MAC_CTRL_IN_ADDR_HI_LBN 96
+#define MC_CMD_MAC_CTRL_IN_ADDR_HI_WIDTH 32
+/* Includes the ethernet header, optional VLAN tags, payload and FCS. */
+#define MC_CMD_MAC_CTRL_IN_MAX_FRAME_LEN_OFST 16
+#define MC_CMD_MAC_CTRL_IN_MAX_FRAME_LEN_LEN 4
+/* Settings for flow control. */
+#define MC_CMD_MAC_CTRL_IN_FCNTL_OFST 20
+#define MC_CMD_MAC_CTRL_IN_FCNTL_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_FCNTL/MASK */
+/* Configure the MAC to transmit in one of promiscuous, unicast or broadcast
+ * mode.
+ */
+#define MC_CMD_MAC_CTRL_IN_TRANSMISSION_MODE_OFST 24
+#define MC_CMD_MAC_CTRL_IN_TRANSMISSION_MODE_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_TRANSMISSION_MODE/MASK */
+/* Flags to control and expand the configuration of the MAC. */
+#define MC_CMD_MAC_CTRL_IN_FLAGS_OFST 28
+#define MC_CMD_MAC_CTRL_IN_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_FLAGS/MASK */
+
+/* MC_CMD_MAC_CTRL_IN_V2 msgrequest: Updated MAC_CTRL with QBB mask */
+#define MC_CMD_MAC_CTRL_IN_V2_LEN 33
+/* Handle for selected network port. */
+#define MC_CMD_MAC_CTRL_IN_V2_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_CTRL_IN_V2_PORT_HANDLE_LEN 4
+/* Select which parameters to configure. A parameter will only be modified if
+ * the corresponding control flag is set.
+ */
+#define MC_CMD_MAC_CTRL_IN_V2_CONTROL_FLAGS_OFST 4
+#define MC_CMD_MAC_CTRL_IN_V2_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_CONFIG_OPTIONS/MASK */
+/* MAC address of the device. */
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_OFST 8
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LEN 8
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LO_OFST 8
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LO_LEN 4
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LO_LBN 64
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_HI_OFST 12
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_HI_LEN 4
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_HI_LBN 96
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_HI_WIDTH 32
+/* Includes the ethernet header, optional VLAN tags, payload and FCS. */
+#define MC_CMD_MAC_CTRL_IN_V2_MAX_FRAME_LEN_OFST 16
+#define MC_CMD_MAC_CTRL_IN_V2_MAX_FRAME_LEN_LEN 4
+/* Settings for flow control. */
+#define MC_CMD_MAC_CTRL_IN_V2_FCNTL_OFST 20
+#define MC_CMD_MAC_CTRL_IN_V2_FCNTL_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_FCNTL/MASK */
+/* Configure the MAC to transmit in one of promiscuous, unicast or broadcast
+ * mode.
+ */
+#define MC_CMD_MAC_CTRL_IN_V2_TRANSMISSION_MODE_OFST 24
+#define MC_CMD_MAC_CTRL_IN_V2_TRANSMISSION_MODE_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_TRANSMISSION_MODE/MASK */
+/* Flags to control and expand the configuration of the MAC. */
+#define MC_CMD_MAC_CTRL_IN_V2_FLAGS_OFST 28
+#define MC_CMD_MAC_CTRL_IN_V2_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_FLAGS/MASK */
+/* Priority-based flow control mask (QBB). PRIO7 corresponds to the highest
+ * priority, and PRIO0 to the lowest. This field is only used when CFG_FCNTL is
+ * set and FCNTL is QBB
+ */
+#define MC_CMD_MAC_CTRL_IN_V2_PRIO_FCNTL_MASK_OFST 32
+#define MC_CMD_MAC_CTRL_IN_V2_PRIO_FCNTL_MASK_LEN 1
+/* enum property: bitmask */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO0 0x0 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO1 0x1 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO2 0x2 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO3 0x3 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO4 0x4 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO5 0x5 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO6 0x6 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO7 0x7 /* enum */
+
+/* MC_CMD_MAC_CTRL_OUT msgresponse */
+#define MC_CMD_MAC_CTRL_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_MAC_STATE
+ * Read the MAC state. Return code: 0, ETIME.
+ */
+#define MC_CMD_MAC_STATE 0x1e0
+#undef MC_CMD_0x1e0_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e0_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_MAC_STATE_IN msgrequest */
+#define MC_CMD_MAC_STATE_IN_LEN 4
+/* Handle for selected network port. */
+#define MC_CMD_MAC_STATE_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_STATE_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_MAC_STATE_OUT msgresponse */
+#define MC_CMD_MAC_STATE_OUT_LEN 32
+/* The configured maximum frame length of the MAC. */
+#define MC_CMD_MAC_STATE_OUT_MAX_FRAME_LEN_OFST 0
+#define MC_CMD_MAC_STATE_OUT_MAX_FRAME_LEN_LEN 4
+/* This returns the negotiated flow control value. */
+#define MC_CMD_MAC_STATE_OUT_FCNTL_OFST 4
+#define MC_CMD_MAC_STATE_OUT_FCNTL_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_FCNTL/MASK */
+/* MAC address of the device. */
+#define MC_CMD_MAC_STATE_OUT_ADDR_OFST 8
+#define MC_CMD_MAC_STATE_OUT_ADDR_LEN 8
+#define MC_CMD_MAC_STATE_OUT_ADDR_LO_OFST 8
+#define MC_CMD_MAC_STATE_OUT_ADDR_LO_LEN 4
+#define MC_CMD_MAC_STATE_OUT_ADDR_LO_LBN 64
+#define MC_CMD_MAC_STATE_OUT_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_STATE_OUT_ADDR_HI_OFST 12
+#define MC_CMD_MAC_STATE_OUT_ADDR_HI_LEN 4
+#define MC_CMD_MAC_STATE_OUT_ADDR_HI_LBN 96
+#define MC_CMD_MAC_STATE_OUT_ADDR_HI_WIDTH 32
+/* Flags indicating MAC faults. */
+#define MC_CMD_MAC_STATE_OUT_MAC_FAULT_FLAGS_OFST 16
+#define MC_CMD_MAC_STATE_OUT_MAC_FAULT_FLAGS_LEN 4
+/* enum property: bitshift */
+/* enum: Indicates a local MAC fault. */
+#define MC_CMD_MAC_STATE_OUT_LOCAL 0x0
+/* enum: Indicates a remote MAC fault. */
+#define MC_CMD_MAC_STATE_OUT_REMOTE 0x1
+/* enum: Indicates a pending reconfiguration of the MAC. */
+#define MC_CMD_MAC_STATE_OUT_PENDING_RECONFIG 0x2
+/* The flags that were used to configure the MAC. This is a copy of the FLAGS
+ * field in the MC_CMD_MAC_CTRL_IN command.
+ */
+#define MC_CMD_MAC_STATE_OUT_FLAGS_OFST 20
+#define MC_CMD_MAC_STATE_OUT_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_FLAGS/MASK */
+/* The transmission mode that was used to configure the MAC. This is a copy of
+ * the TRANSMISSION_MODE field in the MC_CMD_MAC_CTRL_IN command.
+ */
+#define MC_CMD_MAC_STATE_OUT_TRANSMISSION_MODE_OFST 24
+#define MC_CMD_MAC_STATE_OUT_TRANSMISSION_MODE_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_TRANSMISSION_MODE/MASK */
+/* The control flags that were used to configure the MAC. This is a copy of the
+ * CONTROL field in the MC_CMD_MAC_CTRL_IN command.
+ */
+#define MC_CMD_MAC_STATE_OUT_CONTROL_FLAGS_OFST 28
+#define MC_CMD_MAC_STATE_OUT_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_CONFIG_OPTIONS/MASK */
+
+
+/***********************************/
+/* MC_CMD_GET_ASSIGNED_PORT_HANDLE
+ * Obtain a handle that can be operated on to configure and query the status of
+ * the link. ENOENT is returned when no port is assigned to the client. Return
+ * code: 0, ENOENT
+ */
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE 0x1e2
+#undef MC_CMD_0x1e2_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_ASSIGNED_PORT_HANDLE_IN msgrequest */
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE_IN_LEN 0
+
+/* MC_CMD_GET_ASSIGNED_PORT_HANDLE_OUT msgresponse */
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE_OUT_LEN 4
+/* Handle for assigned port. */
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE_OUT_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE_OUT_PORT_HANDLE_LEN 4
+
+/* MC_CMD_STAT_ID structuredef */
+#define MC_CMD_STAT_ID_LEN 4
+#define MC_CMD_STAT_ID_SOURCE_ID_OFST 0
+#define MC_CMD_STAT_ID_SOURCE_ID_LEN 2
+/* enum property: index */
+/* enum: Internal markers (generation start and end markers) */
+#define MC_CMD_STAT_ID_MARKER 0x1
+/* enum: Network port MAC statistics. */
+#define MC_CMD_STAT_ID_MAC 0x2
+/* enum: Network port PHY statistics. */
+#define MC_CMD_STAT_ID_PHY 0x3
+#define MC_CMD_STAT_ID_SOURCE_ID_LBN 0
+#define MC_CMD_STAT_ID_SOURCE_ID_WIDTH 16
+#define MC_CMD_STAT_ID_MARKER_STAT_ID_OFST 2
+#define MC_CMD_STAT_ID_MARKER_STAT_ID_LEN 2
+/* enum property: index */
+/* enum: This value is used to mark the start of a generation of statistics for
+ * DMA synchronization. It is incremented whenever a new set of statistics is
+ * transferred. Always the first entry in the DMA buffer.
+ */
+#define MC_CMD_STAT_ID_GENERATION_START 0x1
+/* enum: This value is used to mark the end of a generation of statistics for
+ * DMA synchronizaion. Always the last entry in the DMA buffer and set to the
+ * same value as GENERATION_START. The host driver must compare the
+ * GENERATION_START and GENERATION_END values to verify that the DMA buffer is
+ * consistent upon copying the the DMA buffer. If they do not match, it means
+ * that new DMA transfer has started while the host driver was copying the DMA
+ * buffer. In this case, the host driver must repeat the copy operation.
+ */
+#define MC_CMD_STAT_ID_GENERATION_END 0x2
+#define MC_CMD_STAT_ID_MARKER_STAT_ID_LBN 16
+#define MC_CMD_STAT_ID_MARKER_STAT_ID_WIDTH 16
+#define MC_CMD_STAT_ID_MAC_STAT_ID_OFST 2
+#define MC_CMD_STAT_ID_MAC_STAT_ID_LEN 2
+/* enum property: index */
+/* enum: Total number of packets transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_PKTS 0x1
+/* enum: Pause frames transmitted. */
+#define MC_CMD_STAT_ID_TX_PAUSE_PKTS 0x2
+/* enum: Control frames transmitted. */
+#define MC_CMD_STAT_ID_TX_CONTROL_PKTS 0x3
+/* enum: Unicast packets transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_UNICAST_PKTS 0x4
+/* enum: Multicast packets transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_MULTICAST_PKTS 0x5
+/* enum: Broadcast packets transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_BROADCAST_PKTS 0x6
+/* enum: Bytes transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_BYTES 0x7
+/* enum: Bytes transmitted with bad CRC. */
+#define MC_CMD_STAT_ID_TX_BAD_BYTES 0x8
+/* enum: Bytes transmitted with good CRC. */
+#define MC_CMD_STAT_ID_TX_GOOD_BYTES 0x9
+/* enum: Packets transmitted with length less than 64 bytes. */
+#define MC_CMD_STAT_ID_TX_LT64_PKTS 0xa
+/* enum: Packets transmitted with length equal to 64 bytes. */
+#define MC_CMD_STAT_ID_TX_64_PKTS 0xb
+/* enum: Packets transmitted with length between 65 and 127 bytes. */
+#define MC_CMD_STAT_ID_TX_65_TO_127_PKTS 0xc
+/* enum: Packets transmitted with length between 128 and 255 bytes. */
+#define MC_CMD_STAT_ID_TX_128_TO_255_PKTS 0xd
+/* enum: Packets transmitted with length between 256 and 511 bytes. */
+#define MC_CMD_STAT_ID_TX_256_TO_511_PKTS 0xe
+/* enum: Packets transmitted with length between 512 and 1023 bytes. */
+#define MC_CMD_STAT_ID_TX_512_TO_1023_PKTS 0xf
+/* enum: Packets transmitted with length between 1024 and 1518 bytes. */
+#define MC_CMD_STAT_ID_TX_1024_TO_15XX_PKTS 0x10
+/* enum: Packets transmitted with length between 1519 and 9216 bytes. */
+#define MC_CMD_STAT_ID_TX_15XX_TO_JUMBO_PKTS 0x11
+/* enum: Packets transmitted with length greater than 9216 bytes. */
+#define MC_CMD_STAT_ID_TX_GTJUMBO_PKTS 0x12
+/* enum: Packets transmitted with bad FCS. */
+#define MC_CMD_STAT_ID_TX_BAD_FCS_PKTS 0x13
+/* enum: Packets transmitted with good FCS. */
+#define MC_CMD_STAT_ID_TX_GOOD_FCS_PKTS 0x14
+/* enum: Packets received. */
+#define MC_CMD_STAT_ID_RX_PKTS 0x15
+/* enum: Pause frames received. */
+#define MC_CMD_STAT_ID_RX_PAUSE_PKTS 0x16
+/* enum: Total number of good packets received. */
+#define MC_CMD_STAT_ID_RX_GOOD_PKTS 0x17
+/* enum: Total number of BAD packets received. */
+#define MC_CMD_STAT_ID_RX_BAD_PKTS 0x18
+/* enum: Total number of control frames received. */
+#define MC_CMD_STAT_ID_RX_CONTROL_PKTS 0x19
+/* enum: Total number of unicast packets received. */
+#define MC_CMD_STAT_ID_RX_UNICAST_PKTS 0x1a
+/* enum: Total number of multicast packets received. */
+#define MC_CMD_STAT_ID_RX_MULTICAST_PKTS 0x1b
+/* enum: Total number of broadcast packets received. */
+#define MC_CMD_STAT_ID_RX_BROADCAST_PKTS 0x1c
+/* enum: Total number of bytes received. */
+#define MC_CMD_STAT_ID_RX_BYTES 0x1d
+/* enum: Total number of bytes received with bad CRC. */
+#define MC_CMD_STAT_ID_RX_BAD_BYTES 0x1e
+/* enum: Total number of bytes received with GOOD CRC. */
+#define MC_CMD_STAT_ID_RX_GOOD_BYTES 0x1f
+/* enum: Packets received with length equal to 64 bytes. */
+#define MC_CMD_STAT_ID_RX_64_PKTS 0x20
+/* enum: Packets received with length between 65 and 127 bytes. */
+#define MC_CMD_STAT_ID_RX_65_TO_127_PKTS 0x21
+/* enum: Packets received with length between 128 and 255 bytes. */
+#define MC_CMD_STAT_ID_RX_128_TO_255_PKTS 0x22
+/* enum: Packets received with length between 256 and 511 bytes. */
+#define MC_CMD_STAT_ID_RX_256_TO_511_PKTS 0x23
+/* enum: Packets received with length between 512 and 1023 bytes. */
+#define MC_CMD_STAT_ID_RX_512_TO_1023_PKTS 0x24
+/* enum: Packets received with length between 1024 and 1518 bytes. */
+#define MC_CMD_STAT_ID_RX_1024_TO_15XX_PKTS 0x25
+/* enum: Packets received with length between 1519 and 9216 bytes. */
+#define MC_CMD_STAT_ID_RX_15XX_TO_JUMBO_PKTS 0x26
+/* enum: Packets received with length greater than 9216 bytes. */
+#define MC_CMD_STAT_ID_RX_GTJUMBO_PKTS 0x27
+/* enum: Packets received with length less than 64 bytes. */
+#define MC_CMD_STAT_ID_RX_UNDERSIZE_PKTS 0x28
+/* enum: Packets received with bad FCS. */
+#define MC_CMD_STAT_ID_RX_BAD_FCS_PKTS 0x29
+/* enum: Packets received with GOOD FCS. */
+#define MC_CMD_STAT_ID_RX_GOOD_FCS_PKTS 0x2a
+/* enum: Packets received with overflow. */
+#define MC_CMD_STAT_ID_RX_OVERFLOW_PKTS 0x2b
+/* enum: Packets received with symbol error. */
+#define MC_CMD_STAT_ID_RX_SYMBOL_ERROR_PKTS 0x2c
+/* enum: Packets received with alignment error. */
+#define MC_CMD_STAT_ID_RX_ALIGN_ERROR_PKTS 0x2d
+/* enum: Packets received with length error. */
+#define MC_CMD_STAT_ID_RX_LENGTH_ERROR_PKTS 0x2e
+/* enum: Packets received with internal error. */
+#define MC_CMD_STAT_ID_RX_INTERNAL_ERROR_PKTS 0x2f
+/* enum: Packets received with jabber. These packets are larger than the
+ * allowed maximum receive unit (MRU). This indicates that a packet either has
+ * a bad CRC or has an RX error.
+ */
+#define MC_CMD_STAT_ID_RX_JABBER_PKTS 0x30
+/* enum: Packets dropped due to having no descriptor. This is a datapath stat
+ */
+#define MC_CMD_STAT_ID_RX_NODESC_DROPS 0x31
+/* enum: Packets received with lanes 0 and 1 character error. */
+#define MC_CMD_STAT_ID_RX_LANES01_CHAR_ERR 0x32
+/* enum: Packets received with lanes 2 and 3 character error. */
+#define MC_CMD_STAT_ID_RX_LANES23_CHAR_ERR 0x33
+/* enum: Packets received with lanes 0 and 1 disparity error. */
+#define MC_CMD_STAT_ID_RX_LANES01_DISP_ERR 0x34
+/* enum: Packets received with lanes 2 and 3 disparity error. */
+#define MC_CMD_STAT_ID_RX_LANES23_DISP_ERR 0x35
+/* enum: Packets received with match fault. */
+#define MC_CMD_STAT_ID_RX_MATCH_FAULT 0x36
+#define MC_CMD_STAT_ID_MAC_STAT_ID_LBN 16
+#define MC_CMD_STAT_ID_MAC_STAT_ID_WIDTH 16
+/* Include FEC stats. */
+#define MC_CMD_STAT_ID_PHY_STAT_ID_OFST 2
+#define MC_CMD_STAT_ID_PHY_STAT_ID_LEN 2
+/* enum property: index */
+/* enum: Number of uncorrected FEC codewords on link (RS-FEC only from Medford2
+ * onwards)
+ */
+#define MC_CMD_STAT_ID_FEC_UNCORRECTED_ERRORS 0x1
+/* enum: Number of corrected FEC codewords on link (RS-FEC only from Medford2
+ * onwards)
+ */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_ERRORS 0x2
+/* enum: Number of corrected 10-bit symbol errors, lane 0 (RS-FEC only) */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_SYMBOLS_LANE0 0x3
+/* enum: Number of corrected 10-bit symbol errors, lane 1 (RS-FEC only) */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_SYMBOLS_LANE1 0x4
+/* enum: Number of corrected 10-bit symbol errors, lane 2 (RS-FEC only) */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_SYMBOLS_LANE2 0x5
+/* enum: Number of corrected 10-bit symbol errors, lane 3 (RS-FEC only) */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_SYMBOLS_LANE3 0x6
+#define MC_CMD_STAT_ID_PHY_STAT_ID_LBN 16
+#define MC_CMD_STAT_ID_PHY_STAT_ID_WIDTH 16
+
+/* MC_CMD_STAT_DESC structuredef: Structure describing the layout and size of
+ * the stats DMA buffer descriptor.
+ */
+#define MC_CMD_STAT_DESC_LEN 8
+/* Unique identifier of the statistic. Formatted as MC_CMD_STAT_ID */
+#define MC_CMD_STAT_DESC_STAT_ID_OFST 0
+#define MC_CMD_STAT_DESC_STAT_ID_LEN 4
+#define MC_CMD_STAT_DESC_STAT_ID_LBN 0
+#define MC_CMD_STAT_DESC_STAT_ID_WIDTH 32
+/* See structuredef: MC_CMD_STAT_ID */
+#define MC_CMD_STAT_DESC_STAT_ID_SOURCE_ID_OFST 0
+#define MC_CMD_STAT_DESC_STAT_ID_SOURCE_ID_LEN 2
+#define MC_CMD_STAT_DESC_STAT_ID_SOURCE_ID_LBN 0
+#define MC_CMD_STAT_DESC_STAT_ID_SOURCE_ID_WIDTH 16
+#define MC_CMD_STAT_DESC_STAT_ID_MARKER_STAT_ID_OFST 2
+#define MC_CMD_STAT_DESC_STAT_ID_MARKER_STAT_ID_LEN 2
+#define MC_CMD_STAT_DESC_STAT_ID_MARKER_STAT_ID_LBN 16
+#define MC_CMD_STAT_DESC_STAT_ID_MARKER_STAT_ID_WIDTH 16
+#define MC_CMD_STAT_DESC_STAT_ID_MAC_STAT_ID_OFST 2
+#define MC_CMD_STAT_DESC_STAT_ID_MAC_STAT_ID_LEN 2
+#define MC_CMD_STAT_DESC_STAT_ID_MAC_STAT_ID_LBN 16
+#define MC_CMD_STAT_DESC_STAT_ID_MAC_STAT_ID_WIDTH 16
+#define MC_CMD_STAT_DESC_STAT_ID_PHY_STAT_ID_OFST 2
+#define MC_CMD_STAT_DESC_STAT_ID_PHY_STAT_ID_LEN 2
+#define MC_CMD_STAT_DESC_STAT_ID_PHY_STAT_ID_LBN 16
+#define MC_CMD_STAT_DESC_STAT_ID_PHY_STAT_ID_WIDTH 16
+/* Index of the statistic in the DMA buffer. */
+#define MC_CMD_STAT_DESC_STAT_INDEX_OFST 4
+#define MC_CMD_STAT_DESC_STAT_INDEX_LEN 2
+#define MC_CMD_STAT_DESC_STAT_INDEX_LBN 32
+#define MC_CMD_STAT_DESC_STAT_INDEX_WIDTH 16
+/* Reserved for future extension (e.g. flags field) - currently always 0. */
+#define MC_CMD_STAT_DESC_RESERVED_OFST 6
+#define MC_CMD_STAT_DESC_RESERVED_LEN 2
+#define MC_CMD_STAT_DESC_RESERVED_LBN 48
+#define MC_CMD_STAT_DESC_RESERVED_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_MAC_STATISTICS_DESCRIPTOR
+ * Get a list of descriptors that describe the layout and size of the stats
+ * buffer required for retrieving statistics for a given port. Each entry in
+ * the list is formatted as MC_CMD_STAT_DESC and provides the ID of each stat
+ * and its location and size in the buffer. It also gives the overall minimum
+ * size of the DMA buffer required when DMA mode is used. Note that the first
+ * and last entries in the list are reserved for the generation start
+ * (MC_CMD_MARKER_STAT_GENERATION_START) and end
+ * (MC_CMD_MARKER_STAT_GENERATION_END) markers respectively, to be used for DMA
+ * synchronisation as described in the documentation for the relevant enum
+ * entries. The entries are present in the buffer even if DMA mode is not used.
+ * Provisions are made (but currently unused) for extending the size of the
+ * descriptors, extending the size of the list beyond the maximum MCDI response
+ * size, as well as the dynamic runtime updates of the list. Returns: 0 on
+ * success, ENOENT on non-existent port handle
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR 0x1e3
+#undef MC_CMD_0x1e3_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN msgrequest */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_LEN 8
+/* Handle of port to get MAC statitstics descriptors for. */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_PORT_HANDLE_LEN 4
+/* Offset of the first entry to return, for cases where not all entries fit in
+ * the MCDI response. Should be set to 0 on initial request, and on subsequent
+ * requests updated by the number of entries already returned, as long as the
+ * MORE_ENTRIES flag is set.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_OFFSET_OFST 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_OFFSET_LEN 4
+
+/* MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT msgresponse */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_LENMIN 28
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_LENMAX 252
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_LEN(num) (20+8*(num))
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_NUM(len) (((len)-20)/8)
+/* Generation number of the stats buffer. This is incremented each time the
+ * buffer is updated, and is used to verify the consistency of the buffer
+ * contents. Reserved for future extension (dynamic list updates). Currently
+ * always set to 0.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_GENERATION_OFST 0
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_GENERATION_LEN 4
+/* Minimum size of the DMA buffer required to retrieve all statistics for the
+ * port. This is the sum of the sizes of all the statistics, plus the size of
+ * the generation markers. Minimum buffer size in bytes required to fit all
+ * statistics. Drivers will typically round up this value to the granularity of
+ * the host DMA allocation units.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_DMA_BUFFER_SIZE_OFST 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_DMA_BUFFER_SIZE_LEN 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_FLAGS_OFST 8
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_FLAGS_LEN 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_MORE_ENTRIES_OFST 8
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_MORE_ENTRIES_LBN 0
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_MORE_ENTRIES_WIDTH 1
+/* Size of the individual descriptor entry in the list. Determines the entry
+ * stride in the list. Currently always set to size of MC_CMD_STAT_DESC, larger
+ * values can be used in the future for extending the descriptor, by appending
+ * new data to the end of the existing structure.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRY_SIZE_OFST 12
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRY_SIZE_LEN 4
+/* Number of entries returned in the descriptor list. */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRY_COUNT_OFST 16
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRY_COUNT_LEN 4
+/* List of descriptors. Each entry is formatted as MC_CMD_STAT_DESC and
+ * provides the ID of each stat and its location and size in the buffer. The
+ * first and last entries are reserved for the generation start and end markers
+ * respectively.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_OFST 20
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LEN 8
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LO_OFST 20
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LO_LEN 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LO_LBN 160
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LO_WIDTH 32
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_HI_OFST 24
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_HI_LEN 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_HI_LBN 192
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_HI_WIDTH 32
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_MINNUM 1
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_MAXNUM 29
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_MAXNUM_MCDI2 125
+
+
+/***********************************/
+/* MC_CMD_MAC_STATISTICS
+ * Get generic MAC statistics. This call retrieves unified statistics managed
+ * by the MC. The MC will populate and provide all supported statistics in the
+ * format as returned by MC_CMD_MAC_STATISTICS_DESCRIPTOR. Refer to the
+ * aforementioned command for the format and contents of the stats DMA buffer.
+ * To ensure consistent and accurate results, it is essential for the driver to
+ * initialize the DMA buffer with zeros when DMA mode is used. Returns: 0 on
+ * success, ETIME if the DMA buffer is not ready, ENOENT on non-existent port
+ * handle, and EINVAL on invalid parameters (DMA buffer too small)
+ */
+#define MC_CMD_MAC_STATISTICS 0x1e4
+#undef MC_CMD_0x1e4_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAC_STATISTICS_IN msgrequest */
+#define MC_CMD_MAC_STATISTICS_IN_LEN 20
+/* Handle of port to get MAC statistics for. */
+#define MC_CMD_MAC_STATISTICS_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_STATISTICS_IN_PORT_HANDLE_LEN 4
+/* Contains options for querying the MAC statistics. */
+#define MC_CMD_MAC_STATISTICS_IN_CMD_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_CMD_LEN 4
+#define MC_CMD_MAC_STATISTICS_IN_DMA_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_DMA_LBN 0
+#define MC_CMD_MAC_STATISTICS_IN_DMA_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_CLEAR_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_CLEAR_LBN 1
+#define MC_CMD_MAC_STATISTICS_IN_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_CHANGE_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_ENABLE_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_NOEVENT_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_NOEVENT_LBN 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_PERIOD_MS_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIOD_MS_LBN 16
+#define MC_CMD_MAC_STATISTICS_IN_PERIOD_MS_WIDTH 16
+/* This is the address of the DMA buffer to use for transfer of the statistics.
+ * Only valid if the DMA flag is set to 1.
+ */
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_OFST 8
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LO_OFST 8
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LO_LBN 64
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_HI_OFST 12
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_HI_LBN 96
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_HI_WIDTH 32
+/* This is the length of the DMA buffer to use for the transfer of the
+ * statistics. The buffer should be at least DMA_BUFFER_SIZE long, as returned
+ * by MC_CMD_MAC_STATISTICS_DESCRIPTOR. If the supplied buffer is too small,
+ * the command will fail with EINVAL. Only valid if the DMA flag is set to 1.
+ */
+#define MC_CMD_MAC_STATISTICS_IN_DMA_LEN_OFST 16
+#define MC_CMD_MAC_STATISTICS_IN_DMA_LEN_LEN 4
+
+/* MC_CMD_MAC_STATISTICS_OUT msgresponse */
+#define MC_CMD_MAC_STATISTICS_OUT_LENMIN 5
+#define MC_CMD_MAC_STATISTICS_OUT_LENMAX 252
+#define MC_CMD_MAC_STATISTICS_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_MAC_STATISTICS_OUT_LEN(num) (4+1*(num))
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_NUM(len) (((len)-4)/1)
+/* length of the data in bytes */
+#define MC_CMD_MAC_STATISTICS_OUT_DATALEN_OFST 0
+#define MC_CMD_MAC_STATISTICS_OUT_DATALEN_LEN 4
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_OFST 4
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_LEN 1
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_MINNUM 1
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_MAXNUM 248
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_MAXNUM_MCDI2 1016
+
+/* NET_PORT_HANDLE_DESC structuredef: Network port descriptor containing a port
+ * handle and attributes used, for example, in MC_CMD_ENUM_PORTS.
+ */
+#define NET_PORT_HANDLE_DESC_LEN 53
+/* The handle to identify the port */
+#define NET_PORT_HANDLE_DESC_PORT_HANDLE_OFST 0
+#define NET_PORT_HANDLE_DESC_PORT_HANDLE_LEN 4
+#define NET_PORT_HANDLE_DESC_PORT_HANDLE_LBN 0
+#define NET_PORT_HANDLE_DESC_PORT_HANDLE_WIDTH 32
+/* Includes the type of port e.g. physical, virtual or MAE MPORT and other
+ * properties relevant to the port.
+ */
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_OFST 4
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LEN 8
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LO_OFST 4
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LO_LEN 4
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LO_LBN 32
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LO_WIDTH 32
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_HI_OFST 8
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_HI_LEN 4
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_HI_LBN 64
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_HI_WIDTH 32
+#define NET_PORT_HANDLE_DESC_PORT_TYPE_OFST 4
+#define NET_PORT_HANDLE_DESC_PORT_TYPE_LBN 0
+#define NET_PORT_HANDLE_DESC_PORT_TYPE_WIDTH 3
+#define NET_PORT_HANDLE_DESC_PHYSICAL 0x0 /* enum */
+#define NET_PORT_HANDLE_DESC_VIRTUAL 0x1 /* enum */
+#define NET_PORT_HANDLE_DESC_MPORT 0x2 /* enum */
+#define NET_PORT_HANDLE_DESC_IS_ZOMBIE_OFST 4
+#define NET_PORT_HANDLE_DESC_IS_ZOMBIE_LBN 8
+#define NET_PORT_HANDLE_DESC_IS_ZOMBIE_WIDTH 1
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LBN 32
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_WIDTH 64
+/* The dynamic change that led to the port enumeration */
+#define NET_PORT_HANDLE_DESC_ENTRY_SRC_OFST 12
+#define NET_PORT_HANDLE_DESC_ENTRY_SRC_LEN 1
+/* enum: Indicates that the ENTRY_SRC field has not been initialized. */
+#define NET_PORT_HANDLE_DESC_UNKNOWN 0x0
+/* enum: The port was enumerated at start of day. */
+#define NET_PORT_HANDLE_DESC_PRESENT 0x1
+/* enum: The port was dynamically added. */
+#define NET_PORT_HANDLE_DESC_ADDED 0x2
+/* enum: The port was dynamically deleted. */
+#define NET_PORT_HANDLE_DESC_DELETED 0x3
+#define NET_PORT_HANDLE_DESC_ENTRY_SRC_LBN 96
+#define NET_PORT_HANDLE_DESC_ENTRY_SRC_WIDTH 8
+/* This is an opaque 40 byte label exposed to users as a unique identifier of
+ * the port. It is represented as a zero-terminated ASCII string and assigned
+ * by the port administrator which is typically either the firmware for a
+ * physical port or the host software responsible for creating the virtual
+ * port. The label is conveyed to the driver after assignment, which, unlike
+ * the port administrator, does not need to know how to interpret the label.
+ */
+#define NET_PORT_HANDLE_DESC_PORT_LABEL_OFST 13
+#define NET_PORT_HANDLE_DESC_PORT_LABEL_LEN 40
+#define NET_PORT_HANDLE_DESC_PORT_LABEL_LBN 104
+#define NET_PORT_HANDLE_DESC_PORT_LABEL_WIDTH 320
+
+
+/***********************************/
+/* MC_CMD_ENUM_PORTS
+ * This command returns handles for all ports present in the system. The PCIe
+ * function type of each port (either physical or virtual) is also reported.
+ * After a start-of-day port enumeration, firmware keeps track of all available
+ * ports upon creation or deletion and updates the ports if there is a change.
+ * This command is cleared after a control interface reset (e.g. FLR,
+ * ENTITY_RESET), in which case it must be called again to reenumerate the
+ * ports. The command is also clear-on-read and repeated calls will drain the
+ * buffer.
+ */
+#define MC_CMD_ENUM_PORTS 0x1e5
+#undef MC_CMD_0x1e5_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e5_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_ENUM_PORTS_IN msgrequest */
+#define MC_CMD_ENUM_PORTS_IN_LEN 0
+
+/* MC_CMD_ENUM_PORTS_OUT msgresponse */
+#define MC_CMD_ENUM_PORTS_OUT_LENMIN 12
+#define MC_CMD_ENUM_PORTS_OUT_LENMAX 252
+#define MC_CMD_ENUM_PORTS_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_ENUM_PORTS_OUT_LEN(num) (12+1*(num))
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_NUM(len) (((len)-12)/1)
+/* Any unused flags are reserved and must be ignored. */
+#define MC_CMD_ENUM_PORTS_OUT_FLAGS_OFST 0
+#define MC_CMD_ENUM_PORTS_OUT_FLAGS_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_MORE_OFST 0
+#define MC_CMD_ENUM_PORTS_OUT_MORE_LBN 0
+#define MC_CMD_ENUM_PORTS_OUT_MORE_WIDTH 1
+/* The number of NET_PORT_HANDLE_DESC structures in PORT_HANDLES. */
+#define MC_CMD_ENUM_PORTS_OUT_PORT_COUNT_OFST 4
+#define MC_CMD_ENUM_PORTS_OUT_PORT_COUNT_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_SIZEOF_NET_PORT_HANDLE_DESC_OFST 8
+#define MC_CMD_ENUM_PORTS_OUT_SIZEOF_NET_PORT_HANDLE_DESC_LEN 4
+/* Array of NET_PORT_HANDLE_DESC structures. Callers must use must use the
+ * SIZEOF_NET_PORT_HANDLE_DESC field field as the array stride between entries.
+ * This may also allow for tail padding for alignment. Fields beyond
+ * SIZEOF_NET_PORT_HANDLE_DESC are not present.
+ */
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_OFST 12
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_LEN 1
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_MINNUM 0
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_MAXNUM 240
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_MAXNUM_MCDI2 1008
+/* See structuredef: NET_PORT_HANDLE_DESC */
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_HANDLE_OFST 12
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_HANDLE_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_OFST 16
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LEN 8
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LO_OFST 16
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LO_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LO_LBN 128
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LO_WIDTH 32
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_HI_OFST 20
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_HI_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_HI_LBN 160
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_HI_WIDTH 32
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_TYPE_LBN 128
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_TYPE_WIDTH 3
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_IS_ZOMBIE_LBN 136
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_IS_ZOMBIE_WIDTH 1
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_ENTRY_SRC_OFST 24
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_ENTRY_SRC_LEN 1
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_LABEL_OFST 25
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_LABEL_LEN 40
+
+
+/***********************************/
+/* MC_CMD_GET_TRANSCEIVER_PROPERTIES
+ * Read properties of the transceiver associated with the port. Can be either
+ * for a fixed onboard transceiver or an inserted module. The returned data is
+ * interpreted from the transceiver hardware and may be fixed up by the
+ * firmware. Use MC_CMD_GET_MODULE_DATA to get raw undecoded data.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES 0x1e6
+#undef MC_CMD_0x1e6_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e6_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_TRANSCEIVER_PROPERTIES_IN msgrequest */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_IN_LEN 4
+/* Handle to port to get transceiver properties from. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT msgresponse */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_LEN 89
+/* Supported technology abilities. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_TECH_ABILITIES_MASK_OFST 0
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_TECH_ABILITIES_MASK_LEN 16
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Reserved for future expansion to accommodate future Ethernet technology
+ * expansion.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_RESERVED_OFST 16
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_RESERVED_LEN 16
+/* Preferred FEC modes. This is a function of the cable type and length. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_PREFERRED_FEC_MASK_OFST 32
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_PREFERRED_FEC_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* SFF-8042 code reported by the module. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_CODE_OFST 36
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_CODE_LEN 2
+/* Medium. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_MEDIUM_OFST 38
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_MEDIUM_LEN 1
+/* enum property: value */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_UNKNOWN 0x0 /* enum */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_COPPER 0x1 /* enum */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_OPTICAL 0x2 /* enum */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_BACKPLANE 0x3 /* enum */
+/* Identifies the tech */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_MEDIA_SUBTYPE_OFST 39
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_MEDIA_SUBTYPE_LEN 1
+/* enum property: value */
+/* MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_UNKNOWN 0x0 */
+/* enum: Ethernet over twisted-pair copper cables for distances up to 100
+ * meters.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_BASET 0x1
+/* enum: Ethernet over twin-axial, balanced copper cable. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_CR 0x2
+/* enum: Ethernet over backplane for connections on the same board. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_KX 0x3
+/* enum: Ethernet over a single backplane lane for connections between
+ * different boards.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_KR 0x4
+/* enum: Ethernet over copper backplane. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_KP 0x5
+/* enum: Ethernet over fiber optic. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_BASEX 0x6
+/* enum: Short range ethernet over multimode fiber optic (See IEEE 802.3 Clause
+ * 49 and 52).
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_SR 0x7
+/* enum: Long range, extended range or far reach ethernet used with single mode
+ * fiber optics.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_LR_ER_FR 0x8
+/* enum: Long reach multimode ethernet over multimode optical fiber. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_LRM 0x9
+/* enum: Very short reach PAM4 ethernet over multimode optical fiber (see IEEE
+ * 802.3db).
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VR 0xa
+/* enum: BASE-R encoding and PAM4 over single-mode fiber with reach up to at
+ * least 500 meters (803.2 Clause 121 and 124)
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_DR 0xb
+/* String of the vendor name as intepreted by NMC firmware. NMC firmware
+ * applies workarounds for known buggy transceivers. The vendor name is
+ * presented as 16 bytes of ASCII characters padded with spaces. It can also be
+ * represented as 16 bytes of zeros if the field is unspecified for the
+ * connected module. See SFF-8472/CMIS specifications for details.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_NAME_OFST 40
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_NAME_LEN 1
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_NAME_NUM 16
+/* The vendor part number as intepreted by NMC firmware. The field is presented
+ * as 16 bytes of ASCII chars padded with spaces. It can also be 16 bytes of
+ * zeros if the field is unspecified for the connected module.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_PN_OFST 56
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_PN_LEN 1
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_PN_NUM 16
+/* Serial number of the module presented as 16 bytes of ASCII characters padded
+ * with spaces. It can also be 16 bytes of zeros if the field is unspecified
+ * for the connected module. See SFF-8472/CMIS specifications for details.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_SERIAL_NUMBER_OFST 72
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_SERIAL_NUMBER_LEN 1
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_SERIAL_NUMBER_NUM 16
+/* This reports the number of module changes detected by the NMC firmware. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_PORT_MODULECHANGE_SEQ_NUM_OFST 88
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_PORT_MODULECHANGE_SEQ_NUM_LEN 1
+
+
+/***********************************/
+/* MC_CMD_GET_FIXED_PORT_PROPERTIES
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES 0x1e7
+#undef MC_CMD_0x1e7_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e7_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_FIXED_PORT_PROPERTIES_IN msgrequest: In this context, the port
+ * consists of the MAC and the PHY, and excludes any modules inserted into the
+ * cage. This information is fixed for a given board but not for a given ASIC.
+ * This command reports properties for the port as it is currently configured,
+ * and not its hardware capabilities, which can be better than the current
+ * configuration.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_IN_LEN 4
+/* Handle to the port to from which to retreive properties */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT msgresponse */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_LEN 36
+/* Supported capabilities of the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_OFST 0
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_LEN 25
+/* See structuredef: MC_CMD_ETH_AN_FIELDS */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_TECH_MASK_OFST 0
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_TECH_MASK_LEN 16
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_FEC_MASK_OFST 16
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_FEC_MASK_LEN 4
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_FEC_REQ_OFST 20
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_FEC_REQ_LEN 4
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_PAUSE_MASK_OFST 24
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_PAUSE_MASK_LEN 1
+/* Number of lanes supported by the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_NUM_LANES_OFST 25
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_NUM_LANES_LEN 1
+/* Bitmask of supported loopback modes. Where the response to this command
+ * includes the LOOPBACK_MODES_MASK_V2 field, that field should be used in
+ * preference to ensure that all available loopback modes are seen.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_LOOPBACK_MODES_MASK_OFST 26
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_LOOPBACK_MODES_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* This field serves as a cage index that uniquely identifies the cage to which
+ * the module is connected. This is useful when splitter cables that have
+ * multiple ports on a single cage are used.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MDI_INDEX_OFST 27
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MDI_INDEX_LEN 1
+/* This bitmask is used to specify the lanes within the cage identified by
+ * MDI_INDEX that are allocated to the port.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MDI_LANE_MASK_OFST 28
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MDI_LANE_MASK_LEN 1
+/* Maximum frame length supported by the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MAX_FRAME_LEN_OFST 32
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MAX_FRAME_LEN_LEN 4
+
+/* MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2 msgresponse */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LEN 48
+/* Supported capabilities of the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_ABILITIES_OFST 0
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_ABILITIES_LEN 25
+/* Number of lanes supported by the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_NUM_LANES_OFST 25
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_NUM_LANES_LEN 1
+/* Bitmask of supported loopback modes. Where the response to this command
+ * includes the LOOPBACK_MODES_MASK_V2 field, that field should be used in
+ * preference to ensure that all available loopback modes are seen.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_OFST 26
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* This field serves as a cage index that uniquely identifies the cage to which
+ * the module is connected. This is useful when splitter cables that have
+ * multiple ports on a single cage are used.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MDI_INDEX_OFST 27
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MDI_INDEX_LEN 1
+/* This bitmask is used to specify the lanes within the cage identified by
+ * MDI_INDEX that are allocated to the port.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MDI_LANE_MASK_OFST 28
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MDI_LANE_MASK_LEN 1
+/* Maximum frame length supported by the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MAX_FRAME_LEN_OFST 32
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MAX_FRAME_LEN_LEN 4
+/* Bitmask of supported loopback modes. This field replaces the
+ * LOOPBACK_MODES_MASK field which is defined under version 1 of this command.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_OFST 40
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LEN 8
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LO_OFST 40
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LO_LEN 4
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LO_LBN 320
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LO_WIDTH 32
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_HI_OFST 44
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_HI_LEN 4
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_HI_LBN 352
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+
+
+/***********************************/
+/* MC_CMD_GET_MODULE_DATA
+ * Read media-specific data from the PHY (e.g. SFP/SFP+ module ID information
+ * for SFP+ PHYs). This command returns raw data from the module's EEPROM and
+ * it is not interpreted by the MC. Use MC_CMD_GET_TRANSCEIVER_PROPERTIES to
+ * get interpreted data. Return code: 0, ENOENT
+ */
+#define MC_CMD_GET_MODULE_DATA 0x1e8
+#undef MC_CMD_0x1e8_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e8_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_MODULE_DATA_IN msgrequest */
+#define MC_CMD_GET_MODULE_DATA_IN_LEN 16
+/* Handle to identify the port from which to request module properties. */
+#define MC_CMD_GET_MODULE_DATA_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_MODULE_DATA_IN_PORT_HANDLE_LEN 4
+/* 7 bit I2C address of the device. DEPRECATED: This field is replaced by
+ * MODULE_ADDR in V2. Use V2 of this command for proper alignment and easier
+ * access.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_DEVADDR_LBN 32
+#define MC_CMD_GET_MODULE_DATA_IN_DEVADDR_WIDTH 7
+/* 0 if the page does not support banked access, non-zero otherwise. Non-zero
+ * BANK is valid if OFFSET is in the range 80h - ffh, i.e. in the Upper Memory
+ * region.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_BANK_OFST 6
+#define MC_CMD_GET_MODULE_DATA_IN_BANK_LEN 2
+/* 0 if paged access is not supported, non-zero otherwise. Non-zero PAGE is
+ * valid if OFFSET is in the range 80h - ffh.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_PAGE_OFST 8
+#define MC_CMD_GET_MODULE_DATA_IN_PAGE_LEN 2
+/* Offset in the range 00h - 7fh to access lower memory. Offset in the range
+ * 80h - ffh to access upper memory
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_OFFSET_OFST 10
+#define MC_CMD_GET_MODULE_DATA_IN_OFFSET_LEN 1
+#define MC_CMD_GET_MODULE_DATA_IN_LENGTH_OFST 12
+#define MC_CMD_GET_MODULE_DATA_IN_LENGTH_LEN 4
+
+/* MC_CMD_GET_MODULE_DATA_IN_V2 msgrequest: Updated MC_CMD_GET_MODULE_DATA with
+ * 8-bit wide ADDRESSING field. This new field provides a correctly aligned
+ * container for the 7-bit DEVADDR field from V1, now renamed MODULE_ADDR, to
+ * ensure proper alignment.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_LEN 16
+/* Handle to identify the port from which to request module properties. */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_MODULE_DATA_IN_V2_PORT_HANDLE_LEN 4
+/* 7 bit I2C address of the device. DEPRECATED: This field is replaced by
+ * MODULE_ADDR in V2. Use V2 of this command for proper alignment and easier
+ * access.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_DEVADDR_LBN 32
+#define MC_CMD_GET_MODULE_DATA_IN_V2_DEVADDR_WIDTH 7
+/* 0 if the page does not support banked access, non-zero otherwise. Non-zero
+ * BANK is valid if OFFSET is in the range 80h - ffh, i.e. in the Upper Memory
+ * region.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_BANK_OFST 6
+#define MC_CMD_GET_MODULE_DATA_IN_V2_BANK_LEN 2
+/* 0 if paged access is not supported, non-zero otherwise. Non-zero PAGE is
+ * valid if OFFSET is in the range 80h - ffh.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_PAGE_OFST 8
+#define MC_CMD_GET_MODULE_DATA_IN_V2_PAGE_LEN 2
+/* Offset in the range 00h - 7fh to access lower memory. Offset in the range
+ * 80h - ffh to access upper memory
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_OFFSET_OFST 10
+#define MC_CMD_GET_MODULE_DATA_IN_V2_OFFSET_LEN 1
+#define MC_CMD_GET_MODULE_DATA_IN_V2_LENGTH_OFST 12
+#define MC_CMD_GET_MODULE_DATA_IN_V2_LENGTH_LEN 4
+/* Container for 7 bit I2C addresses. */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_ADDRESSING_OFST 4
+#define MC_CMD_GET_MODULE_DATA_IN_V2_ADDRESSING_LEN 1
+#define MC_CMD_GET_MODULE_DATA_IN_V2_MODULE_ADDR_OFST 4
+#define MC_CMD_GET_MODULE_DATA_IN_V2_MODULE_ADDR_LBN 0
+#define MC_CMD_GET_MODULE_DATA_IN_V2_MODULE_ADDR_WIDTH 7
+
+/* MC_CMD_GET_MODULE_DATA_OUT msgresponse */
+#define MC_CMD_GET_MODULE_DATA_OUT_LENMIN 5
+#define MC_CMD_GET_MODULE_DATA_OUT_LENMAX 252
+#define MC_CMD_GET_MODULE_DATA_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_GET_MODULE_DATA_OUT_LEN(num) (4+1*(num))
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_NUM(len) (((len)-4)/1)
+/* length of the data in bytes */
+#define MC_CMD_GET_MODULE_DATA_OUT_DATALEN_OFST 0
+#define MC_CMD_GET_MODULE_DATA_OUT_DATALEN_LEN 4
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_OFST 4
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_LEN 1
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_MINNUM 1
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_MAXNUM 248
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_MAXNUM_MCDI2 1016
+
+/* EVENT_MASK structuredef */
+#define EVENT_MASK_LEN 4
+#define EVENT_MASK_TYPE_OFST 0
+#define EVENT_MASK_TYPE_LEN 4
+/* enum: PORT_LINKCHANGE event is enabled */
+#define EVENT_MASK_PORT_LINKCHANGE 0x0
+/* enum: PORT_MODULECHANGE event is enabled */
+#define EVENT_MASK_PORT_MODULECHANGE 0x1
+#define EVENT_MASK_TYPE_LBN 0
+#define EVENT_MASK_TYPE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_SET_NETPORT_EVENTS_MASK
+ */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK 0x1e9
+#undef MC_CMD_0x1e9_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e9_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_SET_NETPORT_EVENTS_MASK_IN msgrequest: Enable or disable delivery of
+ * specified network port events for a given port identified by PORT_HANDLE. At
+ * start of day, or after any control interface reset (FLR, ENTITY_RESET,
+ * etc.), all event delivery is disabled for all ports associated with the
+ * control interface.
+ */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_LEN 8
+/* Handle to port to set event delivery mask. */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_PORT_HANDLE_LEN 4
+/* Bitmask of events to enable. Event delivery is enabled when corresponding
+ * bit is 1, disabled when 0.
+ */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_EVENT_MASK_OFST 4
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_EVENT_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* EVENT_MASK/TYPE */
+
+/* MC_CMD_SET_NETPORT_EVENTS_MASK_OUT msgresponse */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_OUT_LEN 0
+
/***********************************/
-/* MC_CMD_EVENT_CTRL
- * Configure which categories of unsolicited events the driver expects to
- * receive (Riverhead).
- */
-#define MC_CMD_EVENT_CTRL 0x69
-#undef MC_CMD_0x69_PRIVILEGE_CTG
-
-#define MC_CMD_0x69_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_EVENT_CTRL_IN msgrequest */
-#define MC_CMD_EVENT_CTRL_IN_LENMIN 0
-#define MC_CMD_EVENT_CTRL_IN_LENMAX 252
-#define MC_CMD_EVENT_CTRL_IN_LENMAX_MCDI2 1020
-#define MC_CMD_EVENT_CTRL_IN_LEN(num) (0+4*(num))
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_NUM(len) (((len)-0)/4)
-/* Array of event categories for which the driver wishes to receive events. */
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_OFST 0
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_LEN 4
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MINNUM 0
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM 63
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM_MCDI2 255
-/* enum: Driver wishes to receive LINKCHANGE events. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_LINKCHANGE 0x0
-/* enum: Driver wishes to receive SENSOR_CHANGE and SENSOR_STATE_CHANGE events.
- */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_SENSOREVT 0x1
-/* enum: Driver wishes to receive receive errors. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_RX_ERR 0x2
-/* enum: Driver wishes to receive transmit errors. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_TX_ERR 0x3
-/* enum: Driver wishes to receive firmware alerts. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_FWALERT 0x4
-/* enum: Driver wishes to receive reboot events. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_MC_REBOOT 0x5
-
-/* MC_CMD_EVENT_CTRL_OUT msgrequest */
-#define MC_CMD_EVENT_CTRL_OUT_LEN 0
+/* MC_CMD_GET_NETPORT_EVENTS_MASK
+ */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK 0x1ea
+#undef MC_CMD_0x1ea_PRIVILEGE_CTG
+
+#define MC_CMD_0x1ea_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_NETPORT_EVENTS_MASK_IN msgrequest: Get event delivery mask a
+ * given port identified by PORT_HANDLE.
+ */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_IN_LEN 4
+/* Handle to port to get event deliver mask for. */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_GET_NETPORT_EVENTS_MASK_OUT msgresponse */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_OUT_LEN 4
+/* Bitmask of events enabled. Event delivery is enabled when corresponding bit
+ * is 1, disabled when 0.
+ */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_OUT_EVENT_MASK_OFST 0
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_OUT_EVENT_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* EVENT_MASK/TYPE */
+
+
+/***********************************/
+/* MC_CMD_GET_SUPPORTED_NETPORT_EVENTS
+ */
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS 0x1eb
+#undef MC_CMD_0x1eb_PRIVILEGE_CTG
+
+#define MC_CMD_0x1eb_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_IN msgrequest: Get network port events
+ * supported by the platform. Information returned is fixed for a given NIC
+ * platform.
+ */
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_IN_LEN 0
+
+/* MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_OUT msgresponse */
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_OUT_LEN 4
+/* Bitmask of events enabled. Event delivery is enabled when corresponding bit
+ * is 1, disabled when 0.
+ */
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_OUT_EVENT_MASK_OFST 0
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_OUT_EVENT_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* EVENT_MASK/TYPE */
+
+
+/***********************************/
+/* MC_CMD_GET_NETPORT_STATISTICS
+ * Get generic MAC statistics. This call retrieves unified statistics managed
+ * by the MC. The MC will populate and provide all supported statistics in the
+ * format as returned by MC_CMD_MAC_STATISTICS_DESCRIPTOR. Refer to the
+ * aforementioned command for the format and contents of the stats DMA buffer.
+ * To ensure consistent and accurate results, it is essential for the driver to
+ * initialize the DMA buffer with zeros when DMA mode is used. Returns: 0 on
+ * success, ETIME if the DMA buffer is not ready, ENOENT on non-existent port
+ * handle, and EINVAL on invalid parameters (DMA buffer too small)
+ */
+#define MC_CMD_GET_NETPORT_STATISTICS 0x1fa
+#undef MC_CMD_0x1fa_PRIVILEGE_CTG
+
+#define MC_CMD_0x1fa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_NETPORT_STATISTICS_IN msgrequest */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_LEN 20
+/* Handle of port to get MAC statistics for. */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PORT_HANDLE_LEN 4
+/* Contains options for querying the MAC statistics. */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CMD_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CMD_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_LBN 0
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CLEAR_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CLEAR_LBN 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CLEAR_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_CHANGE_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_ENABLE_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_NOEVENT_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_NOEVENT_LBN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIOD_MS_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIOD_MS_LBN 15
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIOD_MS_WIDTH 17
+/* Specifies the physical address of the DMA buffer to use for statistics
+ * transfer. This field must contain a valid address under either of these
+ * conditions: 1. DMA flag is set (immediate DMA requested) 2. Both
+ * PERIODIC_CHANGE and PERIODIC_ENABLE are set (periodic DMA configured)
+ */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_OFST 8
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LO_OFST 8
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LO_LBN 64
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LO_WIDTH 32
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_HI_OFST 12
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_HI_LBN 96
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_HI_WIDTH 32
+/* Specifies the length of the DMA buffer in bytes for statistics transfer. The
+ * buffer size must be at least DMA_BUFFER_SIZE bytes (as returned by
+ * MC_CMD_MAC_STATISTICS_DESCRIPTOR). Providing an insufficient buffer size
+ * will result in an EINVAL error. This field must contain a valid length under
+ * either of these conditions: 1. DMA flag is set (immediate DMA requested) 2.
+ * Both PERIODIC_CHANGE and PERIODIC_ENABLE are set (periodic DMA configured)
+ */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_LEN_OFST 16
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_LEN_LEN 4
+
+/* MC_CMD_GET_NETPORT_STATISTICS_OUT msgresponse */
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_LENMIN 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_LENMAX 248
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_LENMAX_MCDI2 1016
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_LEN(num) (0+8*(num))
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_NUM(len) (((len)-0)/8)
+/* Statistics buffer. Zero-length if DMA mode is used. The statistics buffer is
+ * an array of 8-byte counter values, containing the generation start marker,
+ * stats counters, and generation end marker. The index of each counter in the
+ * array is reported by the MAC_STATISTICS_DESCRIPTOR command. The same layout
+ * is used for the DMA buffer for DMA mode stats.
+ */
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_OFST 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LEN 8
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LO_OFST 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LO_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LO_LBN 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LO_WIDTH 32
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_HI_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_HI_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_HI_LBN 32
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_HI_WIDTH 32
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_MINNUM 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_MAXNUM 31
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_MAXNUM_MCDI2 127
/* EVB_PORT_ID structuredef */
#define EVB_PORT_ID_LEN 4
@@ -9706,44 +10573,6 @@
#define EVB_PORT_ID_PORT_ID_LBN 0
#define EVB_PORT_ID_PORT_ID_WIDTH 32
-/* EVB_VLAN_TAG structuredef */
-#define EVB_VLAN_TAG_LEN 2
-/* The VLAN tag value */
-#define EVB_VLAN_TAG_VLAN_ID_LBN 0
-#define EVB_VLAN_TAG_VLAN_ID_WIDTH 12
-#define EVB_VLAN_TAG_MODE_LBN 12
-#define EVB_VLAN_TAG_MODE_WIDTH 4
-/* enum: Insert the VLAN. */
-#define EVB_VLAN_TAG_INSERT 0x0
-/* enum: Replace the VLAN if already present. */
-#define EVB_VLAN_TAG_REPLACE 0x1
-
-/* BUFTBL_ENTRY structuredef */
-#define BUFTBL_ENTRY_LEN 12
-/* the owner ID */
-#define BUFTBL_ENTRY_OID_OFST 0
-#define BUFTBL_ENTRY_OID_LEN 2
-#define BUFTBL_ENTRY_OID_LBN 0
-#define BUFTBL_ENTRY_OID_WIDTH 16
-/* the page parameter as one of ESE_DZ_SMC_PAGE_SIZE_ */
-#define BUFTBL_ENTRY_PGSZ_OFST 2
-#define BUFTBL_ENTRY_PGSZ_LEN 2
-#define BUFTBL_ENTRY_PGSZ_LBN 16
-#define BUFTBL_ENTRY_PGSZ_WIDTH 16
-/* the raw 64-bit address field from the SMC, not adjusted for page size */
-#define BUFTBL_ENTRY_RAWADDR_OFST 4
-#define BUFTBL_ENTRY_RAWADDR_LEN 8
-#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4
-#define BUFTBL_ENTRY_RAWADDR_LO_LEN 4
-#define BUFTBL_ENTRY_RAWADDR_LO_LBN 32
-#define BUFTBL_ENTRY_RAWADDR_LO_WIDTH 32
-#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8
-#define BUFTBL_ENTRY_RAWADDR_HI_LEN 4
-#define BUFTBL_ENTRY_RAWADDR_HI_LBN 64
-#define BUFTBL_ENTRY_RAWADDR_HI_WIDTH 32
-#define BUFTBL_ENTRY_RAWADDR_LBN 32
-#define BUFTBL_ENTRY_RAWADDR_WIDTH 64
-
/* NVRAM_PARTITION_TYPE structuredef */
#define NVRAM_PARTITION_TYPE_LEN 2
#define NVRAM_PARTITION_TYPE_ID_OFST 0
@@ -9787,6 +10616,8 @@
#define NVRAM_PARTITION_TYPE_NMC_LOG 0x700
/* enum: Non-volatile log output of second core on dual-core device */
#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701
+/* enum: RAM (volatile) log output partition */
+#define NVRAM_PARTITION_TYPE_RAM_LOG 0x702
/* enum: Device state dump output partition */
#define NVRAM_PARTITION_TYPE_DUMP 0x800
/* enum: Crash log partition for NMC firmware */
@@ -9923,6 +10754,16 @@
#define NVRAM_PARTITION_TYPE_SUC_SOC_CONFIG 0x1f07
/* enum: System-on-Chip update information. */
#define NVRAM_PARTITION_TYPE_SOC_UPDATE 0x2003
+/* enum: Virtual partition. Write-only. Writes will actually be sent to an
+ * appropriate partition (for instance BUNDLE if the data starts with the magic
+ * number for a bundle update), or discarded with an error if not recognised as
+ * a supported type.
+ */
+#define NVRAM_PARTITION_TYPE_AUTO 0x2100
+/* enum: MC/NMC (first stage) bootloader firmware. (For X4, see XN-202072-PS
+ * and XN-202084-SW section 3.1).
+ */
+#define NVRAM_PARTITION_TYPE_BOOTLOADER 0x2200
/* enum: Start of reserved value range (firmware may use for any purpose) */
#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
/* enum: End of reserved value range (firmware may use for any purpose) */
@@ -9981,116 +10822,6 @@
#define LICENSED_APP_ID_ID_LBN 0
#define LICENSED_APP_ID_ID_WIDTH 32
-/* LICENSED_FEATURES structuredef */
-#define LICENSED_FEATURES_LEN 8
-/* Bitmask of licensed firmware features */
-#define LICENSED_FEATURES_MASK_OFST 0
-#define LICENSED_FEATURES_MASK_LEN 8
-#define LICENSED_FEATURES_MASK_LO_OFST 0
-#define LICENSED_FEATURES_MASK_LO_LEN 4
-#define LICENSED_FEATURES_MASK_LO_LBN 0
-#define LICENSED_FEATURES_MASK_LO_WIDTH 32
-#define LICENSED_FEATURES_MASK_HI_OFST 4
-#define LICENSED_FEATURES_MASK_HI_LEN 4
-#define LICENSED_FEATURES_MASK_HI_LBN 32
-#define LICENSED_FEATURES_MASK_HI_WIDTH 32
-#define LICENSED_FEATURES_RX_CUT_THROUGH_OFST 0
-#define LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0
-#define LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1
-#define LICENSED_FEATURES_PIO_OFST 0
-#define LICENSED_FEATURES_PIO_LBN 1
-#define LICENSED_FEATURES_PIO_WIDTH 1
-#define LICENSED_FEATURES_EVQ_TIMER_OFST 0
-#define LICENSED_FEATURES_EVQ_TIMER_LBN 2
-#define LICENSED_FEATURES_EVQ_TIMER_WIDTH 1
-#define LICENSED_FEATURES_CLOCK_OFST 0
-#define LICENSED_FEATURES_CLOCK_LBN 3
-#define LICENSED_FEATURES_CLOCK_WIDTH 1
-#define LICENSED_FEATURES_RX_TIMESTAMPS_OFST 0
-#define LICENSED_FEATURES_RX_TIMESTAMPS_LBN 4
-#define LICENSED_FEATURES_RX_TIMESTAMPS_WIDTH 1
-#define LICENSED_FEATURES_TX_TIMESTAMPS_OFST 0
-#define LICENSED_FEATURES_TX_TIMESTAMPS_LBN 5
-#define LICENSED_FEATURES_TX_TIMESTAMPS_WIDTH 1
-#define LICENSED_FEATURES_RX_SNIFF_OFST 0
-#define LICENSED_FEATURES_RX_SNIFF_LBN 6
-#define LICENSED_FEATURES_RX_SNIFF_WIDTH 1
-#define LICENSED_FEATURES_TX_SNIFF_OFST 0
-#define LICENSED_FEATURES_TX_SNIFF_LBN 7
-#define LICENSED_FEATURES_TX_SNIFF_WIDTH 1
-#define LICENSED_FEATURES_PROXY_FILTER_OPS_OFST 0
-#define LICENSED_FEATURES_PROXY_FILTER_OPS_LBN 8
-#define LICENSED_FEATURES_PROXY_FILTER_OPS_WIDTH 1
-#define LICENSED_FEATURES_EVENT_CUT_THROUGH_OFST 0
-#define LICENSED_FEATURES_EVENT_CUT_THROUGH_LBN 9
-#define LICENSED_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
-#define LICENSED_FEATURES_MASK_LBN 0
-#define LICENSED_FEATURES_MASK_WIDTH 64
-
-/* LICENSED_V3_APPS structuredef */
-#define LICENSED_V3_APPS_LEN 8
-/* Bitmask of licensed applications */
-#define LICENSED_V3_APPS_MASK_OFST 0
-#define LICENSED_V3_APPS_MASK_LEN 8
-#define LICENSED_V3_APPS_MASK_LO_OFST 0
-#define LICENSED_V3_APPS_MASK_LO_LEN 4
-#define LICENSED_V3_APPS_MASK_LO_LBN 0
-#define LICENSED_V3_APPS_MASK_LO_WIDTH 32
-#define LICENSED_V3_APPS_MASK_HI_OFST 4
-#define LICENSED_V3_APPS_MASK_HI_LEN 4
-#define LICENSED_V3_APPS_MASK_HI_LBN 32
-#define LICENSED_V3_APPS_MASK_HI_WIDTH 32
-#define LICENSED_V3_APPS_ONLOAD_OFST 0
-#define LICENSED_V3_APPS_ONLOAD_LBN 0
-#define LICENSED_V3_APPS_ONLOAD_WIDTH 1
-#define LICENSED_V3_APPS_PTP_OFST 0
-#define LICENSED_V3_APPS_PTP_LBN 1
-#define LICENSED_V3_APPS_PTP_WIDTH 1
-#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_OFST 0
-#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_LBN 2
-#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_WIDTH 1
-#define LICENSED_V3_APPS_SOLARSECURE_OFST 0
-#define LICENSED_V3_APPS_SOLARSECURE_LBN 3
-#define LICENSED_V3_APPS_SOLARSECURE_WIDTH 1
-#define LICENSED_V3_APPS_PERF_MONITOR_OFST 0
-#define LICENSED_V3_APPS_PERF_MONITOR_LBN 4
-#define LICENSED_V3_APPS_PERF_MONITOR_WIDTH 1
-#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_OFST 0
-#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_LBN 5
-#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_WIDTH 1
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_OFST 0
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_LBN 6
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_WIDTH 1
-#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_OFST 0
-#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_LBN 7
-#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_WIDTH 1
-#define LICENSED_V3_APPS_TCP_DIRECT_OFST 0
-#define LICENSED_V3_APPS_TCP_DIRECT_LBN 8
-#define LICENSED_V3_APPS_TCP_DIRECT_WIDTH 1
-#define LICENSED_V3_APPS_LOW_LATENCY_OFST 0
-#define LICENSED_V3_APPS_LOW_LATENCY_LBN 9
-#define LICENSED_V3_APPS_LOW_LATENCY_WIDTH 1
-#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_OFST 0
-#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_LBN 10
-#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_OFST 0
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_OFST 0
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1
-#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_OFST 0
-#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13
-#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1
-#define LICENSED_V3_APPS_DSHBRD_OFST 0
-#define LICENSED_V3_APPS_DSHBRD_LBN 14
-#define LICENSED_V3_APPS_DSHBRD_WIDTH 1
-#define LICENSED_V3_APPS_SCATRD_OFST 0
-#define LICENSED_V3_APPS_SCATRD_LBN 15
-#define LICENSED_V3_APPS_SCATRD_WIDTH 1
-#define LICENSED_V3_APPS_MASK_LBN 0
-#define LICENSED_V3_APPS_MASK_WIDTH 64
-
/* LICENSED_V3_FEATURES structuredef */
#define LICENSED_V3_FEATURES_LEN 8
/* Bitmask of licensed firmware features */
@@ -10199,44 +10930,6 @@
#define RSS_MODE_HASH_SELECTOR_LBN 0
#define RSS_MODE_HASH_SELECTOR_WIDTH 8
-/* CTPIO_STATS_MAP structuredef */
-#define CTPIO_STATS_MAP_LEN 4
-/* The (function relative) VI number */
-#define CTPIO_STATS_MAP_VI_OFST 0
-#define CTPIO_STATS_MAP_VI_LEN 2
-#define CTPIO_STATS_MAP_VI_LBN 0
-#define CTPIO_STATS_MAP_VI_WIDTH 16
-/* The target bucket for the VI */
-#define CTPIO_STATS_MAP_BUCKET_OFST 2
-#define CTPIO_STATS_MAP_BUCKET_LEN 2
-#define CTPIO_STATS_MAP_BUCKET_LBN 16
-#define CTPIO_STATS_MAP_BUCKET_WIDTH 16
-
-
-/***********************************/
-/* MC_CMD_READ_REGS
- * Get a dump of the MCPU registers
- */
-#define MC_CMD_READ_REGS 0x50
-#undef MC_CMD_0x50_PRIVILEGE_CTG
-
-#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_READ_REGS_IN msgrequest */
-#define MC_CMD_READ_REGS_IN_LEN 0
-
-/* MC_CMD_READ_REGS_OUT msgresponse */
-#define MC_CMD_READ_REGS_OUT_LEN 308
-/* Whether the corresponding register entry contains a valid value */
-#define MC_CMD_READ_REGS_OUT_MASK_OFST 0
-#define MC_CMD_READ_REGS_OUT_MASK_LEN 16
-/* Same order as MIPS GDB (r0-r31, sr, lo, hi, bad, cause, 32 x float, fsr,
- * fir, fp)
- */
-#define MC_CMD_READ_REGS_OUT_REGS_OFST 16
-#define MC_CMD_READ_REGS_OUT_REGS_LEN 4
-#define MC_CMD_READ_REGS_OUT_REGS_NUM 73
-
/***********************************/
/* MC_CMD_INIT_EVQ
@@ -10640,25 +11333,6 @@
#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3
#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1
-/* QUEUE_CRC_MODE structuredef */
-#define QUEUE_CRC_MODE_LEN 1
-#define QUEUE_CRC_MODE_MODE_LBN 0
-#define QUEUE_CRC_MODE_MODE_WIDTH 4
-/* enum: No CRC. */
-#define QUEUE_CRC_MODE_NONE 0x0
-/* enum: CRC Fiber channel over ethernet. */
-#define QUEUE_CRC_MODE_FCOE 0x1
-/* enum: CRC (digest) iSCSI header only. */
-#define QUEUE_CRC_MODE_ISCSI_HDR 0x2
-/* enum: CRC (digest) iSCSI header and payload. */
-#define QUEUE_CRC_MODE_ISCSI 0x3
-/* enum: CRC Fiber channel over IP over ethernet. */
-#define QUEUE_CRC_MODE_FCOIPOE 0x4
-/* enum: CRC MPA. */
-#define QUEUE_CRC_MODE_MPA 0x5
-#define QUEUE_CRC_MODE_SPARE_LBN 4
-#define QUEUE_CRC_MODE_SPARE_WIDTH 4
-
/***********************************/
/* MC_CMD_INIT_RXQ
@@ -10827,6 +11501,9 @@
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_OFST 16
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_LBN 20
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SUPPRESS_RX_EVENTS_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SUPPRESS_RX_EVENTS_LBN 21
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SUPPRESS_RX_EVENTS_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4
@@ -10933,6 +11610,9 @@
#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_OFST 16
#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_LBN 20
#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SUPPRESS_RX_EVENTS_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SUPPRESS_RX_EVENTS_LBN 21
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SUPPRESS_RX_EVENTS_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_OFST 20
#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_LEN 4
@@ -11068,6 +11748,9 @@
#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_OFST 16
#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_LBN 20
#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_WIDTH 1
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SUPPRESS_RX_EVENTS_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SUPPRESS_RX_EVENTS_LBN 21
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SUPPRESS_RX_EVENTS_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_OFST 20
#define MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_LEN 4
@@ -11216,6 +11899,9 @@
#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_OFST 16
#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_LBN 20
#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_WIDTH 1
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SUPPRESS_RX_EVENTS_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SUPPRESS_RX_EVENTS_LBN 21
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SUPPRESS_RX_EVENTS_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_OFST 20
#define MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_LEN 4
@@ -11610,320 +12296,6 @@
/* MC_CMD_PROXY_CMD_OUT msgresponse */
#define MC_CMD_PROXY_CMD_OUT_LEN 0
-/* MC_PROXY_STATUS_BUFFER structuredef: Host memory status buffer used to
- * manage proxied requests
- */
-#define MC_PROXY_STATUS_BUFFER_LEN 16
-/* Handle allocated by the firmware for this proxy transaction */
-#define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0
-#define MC_PROXY_STATUS_BUFFER_HANDLE_LEN 4
-/* enum: An invalid handle. */
-#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0
-#define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0
-#define MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32
-/* The requesting physical function number */
-#define MC_PROXY_STATUS_BUFFER_PF_OFST 4
-#define MC_PROXY_STATUS_BUFFER_PF_LEN 2
-#define MC_PROXY_STATUS_BUFFER_PF_LBN 32
-#define MC_PROXY_STATUS_BUFFER_PF_WIDTH 16
-/* The requesting virtual function number. Set to VF_NULL if the target is a
- * PF.
- */
-#define MC_PROXY_STATUS_BUFFER_VF_OFST 6
-#define MC_PROXY_STATUS_BUFFER_VF_LEN 2
-#define MC_PROXY_STATUS_BUFFER_VF_LBN 48
-#define MC_PROXY_STATUS_BUFFER_VF_WIDTH 16
-/* The target function RID. */
-#define MC_PROXY_STATUS_BUFFER_RID_OFST 8
-#define MC_PROXY_STATUS_BUFFER_RID_LEN 2
-#define MC_PROXY_STATUS_BUFFER_RID_LBN 64
-#define MC_PROXY_STATUS_BUFFER_RID_WIDTH 16
-/* The status of the proxy as described in MC_CMD_PROXY_COMPLETE. */
-#define MC_PROXY_STATUS_BUFFER_STATUS_OFST 10
-#define MC_PROXY_STATUS_BUFFER_STATUS_LEN 2
-#define MC_PROXY_STATUS_BUFFER_STATUS_LBN 80
-#define MC_PROXY_STATUS_BUFFER_STATUS_WIDTH 16
-/* If a request is authorized rather than carried out by the host, this is the
- * elevated privilege mask granted to the requesting function.
- */
-#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12
-#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LEN 4
-#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96
-#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_PROXY_CONFIGURE
- * Enable/disable authorization of MCDI requests from unprivileged functions by
- * a designated admin function
- */
-#define MC_CMD_PROXY_CONFIGURE 0x58
-#undef MC_CMD_0x58_PRIVILEGE_CTG
-
-#define MC_CMD_0x58_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */
-#define MC_CMD_PROXY_CONFIGURE_IN_LEN 108
-#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
-#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_OFST 0
-#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
-#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size REQUEST_BLOCK_SIZE.
- */
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_LBN 32
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_LBN 64
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2 */
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_LEN 4
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size REPLY_BLOCK_SIZE.
- */
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_LBN 128
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_LBN 160
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2 */
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_LEN 4
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
- * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
- */
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_LBN 224
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_LBN 256
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2, or zero if this buffer is not provided */
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_LEN 4
-/* Applies to all three buffers */
-#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40
-#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_LEN 4
-/* A bit mask defining which MCDI operations may be proxied */
-#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
-#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
-
-/* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_OFST 0
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size REQUEST_BLOCK_SIZE.
- */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_OFST 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_OFST 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_LBN 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_LBN 64
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2 */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_LEN 4
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size REPLY_BLOCK_SIZE.
- */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_OFST 16
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_OFST 16
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_LBN 128
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_LBN 160
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2 */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_LEN 4
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
- * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
- */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_OFST 28
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_OFST 28
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_LBN 224
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_LBN 256
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2, or zero if this buffer is not provided */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_LEN 4
-/* Applies to all three buffers */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_LEN 4
-/* A bit mask defining which MCDI operations may be proxied */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_LEN 4
-
-/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
-#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_PROXY_COMPLETE
- * Tells FW that a requested proxy operation has either been completed (by
- * using MC_CMD_PROXY_CMD) or authorized/declined. May only be sent by the
- * function that enabled proxying/authorization (by using
- * MC_CMD_PROXY_CONFIGURE).
- */
-#define MC_CMD_PROXY_COMPLETE 0x5f
-#undef MC_CMD_0x5f_PRIVILEGE_CTG
-
-#define MC_CMD_0x5f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PROXY_COMPLETE_IN msgrequest */
-#define MC_CMD_PROXY_COMPLETE_IN_LEN 12
-#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0
-#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_LEN 4
-#define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4
-#define MC_CMD_PROXY_COMPLETE_IN_STATUS_LEN 4
-/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply
- * is stored in the REPLY_BUFF.
- */
-#define MC_CMD_PROXY_COMPLETE_IN_COMPLETE 0x0
-/* enum: The operation has been authorized. The originating function may now
- * try again.
- */
-#define MC_CMD_PROXY_COMPLETE_IN_AUTHORIZED 0x1
-/* enum: The operation has been declined. */
-#define MC_CMD_PROXY_COMPLETE_IN_DECLINED 0x2
-/* enum: The authorization failed because the relevant application did not
- * respond in time.
- */
-#define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3
-#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8
-#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_LEN 4
-
-/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */
-#define MC_CMD_PROXY_COMPLETE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_ALLOC_BUFTBL_CHUNK
- * Allocate a set of buffer table entries using the specified owner ID. This
- * operation allocates the required buffer table entries (and fails if it
- * cannot do so). The buffer table entries will initially be zeroed.
- */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87
-#undef MC_CMD_0x87_PRIVILEGE_CTG
-
-#define MC_CMD_0x87_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
-/* Owner ID to use */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_LEN 4
-/* Size of buffer table pages to use, in bytes (note that only a few values are
- * legal on any specific hardware).
- */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_LEN 4
-
-/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_LEN 4
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_LEN 4
-/* Buffer table IDs for use in DMA descriptors. */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PROGRAM_BUFTBL_ENTRIES
- * Reprogram a set of buffer table entries in the specified chunk.
- */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88
-#undef MC_CMD_0x88_PRIVILEGE_CTG
-
-#define MC_CMD_0x88_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX_MCDI2 268
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_NUM(len) (((len)-12)/8)
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4
-/* ID */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
-/* Num entries */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
-/* Buffer table entry address */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_LEN 4
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_LBN 96
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_WIDTH 32
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_LEN 4
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_LBN 128
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_WIDTH 32
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM_MCDI2 32
-
-/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_FREE_BUFTBL_CHUNK
- */
-#define MC_CMD_FREE_BUFTBL_CHUNK 0x89
-#undef MC_CMD_0x89_PRIVILEGE_CTG
-
-#define MC_CMD_0x89_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
-#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
-#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
-#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_LEN 4
-
-/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
-#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
-
/***********************************/
/* MC_CMD_FILTER_OP
@@ -12822,6 +13194,10 @@
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES 0x5
/* enum: read the supported encapsulation types for the VNIC */
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_TYPES 0x6
+/* enum: read the supported RX filter matches for low-latency queues (as
+ * allocated by MC_CMD_ALLOC_LL_QUEUES)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_LL_RX_MATCHES 0x7
/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
@@ -12860,6 +13236,48 @@
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
+/* MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT msgresponse:
+ * GET_PARSER_DISP_INFO response format for OP_GET_SECURITY_RULE_INFO.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_LEN 36
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* a version number representing the set of rule lookups that are implemented
+ * by the currently running firmware
+ */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_OFST 4
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_LEN 4
+/* enum: implements lookup sequences described in SF-114946-SW draft C */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_SF_114946_SW_C 0x0
+/* the number of nodes in the subnet map */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_OFST 8
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_LEN 4
+/* the number of entries in one subnet map node */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_OFST 12
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_LEN 4
+/* minimum valid value for a subnet ID in a subnet map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_OFST 16
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_LEN 4
+/* maximum valid value for a subnet ID in a subnet map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_OFST 20
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_LEN 4
+/* the number of entries in the local and remote port range maps */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_OFST 24
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_LEN 4
+/* minimum valid value for a portrange ID in a port range map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_OFST 28
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_LEN 4
+/* maximum valid value for a portrange ID in a port range map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_OFST 32
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_LEN 4
+
/* MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT msgresponse: This response is
* returned if a MC_CMD_GET_PARSER_DISP_INFO_IN request is sent with OP value
* OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES. It contains information about the
@@ -12914,136 +13332,6 @@
/***********************************/
-/* MC_CMD_PARSER_DISP_RW
- * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging.
- * Please note that this interface is only of use to debug tools which have
- * knowledge of firmware and hardware data structures; nothing here is intended
- * for use by normal driver code. Note that although this command is in the
- * Admin privilege group, in tamperproof adapters, only read operations are
- * permitted.
- */
-#define MC_CMD_PARSER_DISP_RW 0xe5
-#undef MC_CMD_0xe5_PRIVILEGE_CTG
-
-#define MC_CMD_0xe5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PARSER_DISP_RW_IN msgrequest */
-#define MC_CMD_PARSER_DISP_RW_IN_LEN 32
-/* identifies the target of the operation */
-#define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0
-#define MC_CMD_PARSER_DISP_RW_IN_TARGET_LEN 4
-/* enum: RX dispatcher CPU */
-#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
-/* enum: TX dispatcher CPU */
-#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
-/* enum: Lookup engine (with original metadata format). Deprecated; used only
- * by cmdclient as a fallback for very old Huntington firmware, and not
- * supported in firmware beyond v6.4.0.1005. Use LUE_VERSIONED_METADATA
- * instead.
- */
-#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
-/* enum: Lookup engine (with requested metadata format) */
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3
-/* enum: RX0 dispatcher CPU (alias for RX_DICPU; Medford has 2 RX DICPUs) */
-#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0
-/* enum: RX1 dispatcher CPU (only valid for Medford) */
-#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4
-/* enum: Miscellaneous other state (only valid for Medford) */
-#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5
-/* identifies the type of operation requested */
-#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
-#define MC_CMD_PARSER_DISP_RW_IN_OP_LEN 4
-/* enum: Read a word of DICPU DMEM or a LUE entry */
-#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0
-/* enum: Write a word of DICPU DMEM or a LUE entry. Not permitted on
- * tamperproof adapters.
- */
-#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
-/* enum: Read-modify-write a word of DICPU DMEM (not valid for LUE). Not
- * permitted on tamperproof adapters.
- */
-#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
-/* data memory address (DICPU targets) or LUE index (LUE targets) */
-#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
-#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_LEN 4
-/* selector (for MISC_STATE target) */
-#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8
-#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_LEN 4
-/* enum: Port to datapath mapping */
-#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1
-/* value to write (for DMEM writes) */
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_LEN 4
-/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_LEN 4
-/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_LEN 4
-/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_LEN 4
-/* value to write (for LUE writes) */
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
-
-/* MC_CMD_PARSER_DISP_RW_OUT msgresponse */
-#define MC_CMD_PARSER_DISP_RW_OUT_LEN 52
-/* value read (for DMEM reads) */
-#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0
-#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_LEN 4
-/* value read (for LUE reads) */
-#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0
-#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20
-/* up to 8 32-bit words of additional soft state from the LUE manager (the
- * exact content is firmware-dependent and intended only for debug use)
- */
-#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20
-#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32
-/* datapath(s) used for each port (for MISC_STATE PORT_DP_MAPPING selector) */
-#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_OFST 0
-#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_LEN 4
-#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_NUM 4
-#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */
-#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */
-
-
-/***********************************/
-/* MC_CMD_GET_PF_COUNT
- * Get number of PFs on the device.
- */
-#define MC_CMD_GET_PF_COUNT 0xb6
-#undef MC_CMD_0xb6_PRIVILEGE_CTG
-
-#define MC_CMD_0xb6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PF_COUNT_IN msgrequest */
-#define MC_CMD_GET_PF_COUNT_IN_LEN 0
-
-/* MC_CMD_GET_PF_COUNT_OUT msgresponse */
-#define MC_CMD_GET_PF_COUNT_OUT_LEN 1
-/* Identifies the number of PFs on the device. */
-#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST 0
-#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_LEN 1
-
-
-/***********************************/
-/* MC_CMD_SET_PF_COUNT
- * Set number of PFs on the device.
- */
-#define MC_CMD_SET_PF_COUNT 0xb7
-
-/* MC_CMD_SET_PF_COUNT_IN msgrequest */
-#define MC_CMD_SET_PF_COUNT_IN_LEN 4
-/* New number of PFs on the device. */
-#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
-#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_LEN 4
-
-/* MC_CMD_SET_PF_COUNT_OUT msgresponse */
-#define MC_CMD_SET_PF_COUNT_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_PORT_ASSIGNMENT
* Get port assignment for current PCI function.
*/
@@ -13069,25 +13357,6 @@
/***********************************/
-/* MC_CMD_SET_PORT_ASSIGNMENT
- * Set port assignment for current PCI function.
- */
-#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9
-#undef MC_CMD_0xb9_PRIVILEGE_CTG
-
-#define MC_CMD_0xb9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */
-#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
-/* Identifies the port assignment for this function. */
-#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
-#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_LEN 4
-
-/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
-#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_ALLOC_VIS
* Allocate VIs for current PCI function.
*/
@@ -13184,263 +13453,6 @@
/***********************************/
-/* MC_CMD_SET_SRIOV_CFG
- * Set SRIOV config for this PF.
- */
-#define MC_CMD_SET_SRIOV_CFG 0xbb
-#undef MC_CMD_0xbb_PRIVILEGE_CTG
-
-#define MC_CMD_0xbb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */
-#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
-/* Number of VFs currently enabled. */
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_LEN 4
-/* Max number of VFs before sriov stride and offset may need to be changed. */
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4
-#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
-#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_OFST 8
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
-/* RID offset of first VF from PF, or 0 for no change, or
- * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
- */
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_LEN 4
-/* RID offset of each subsequent VF from the previous, 0 for no change, or
- * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
- */
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_LEN 4
-
-/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
-#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_VI_ALLOC_INFO
- * Get information about number of VI's and base VI number allocated to this
- * function. This message is not available to dynamic clients created by
- * MC_CMD_CLIENT_ALLOC.
- */
-#define MC_CMD_GET_VI_ALLOC_INFO 0x8d
-#undef MC_CMD_0x8d_PRIVILEGE_CTG
-
-#define MC_CMD_0x8d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */
-#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
-
-/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
-/* The number of VIs allocated on this function */
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_LEN 4
-/* The base absolute VI number allocated to this function. Required to
- * correctly interpret wakeup events.
- */
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_LEN 4
-/* Function's port vi_shift value (always 0 on Huntington) */
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DUMP_VI_STATE
- * For CmdClient use. Dump pertinent information on a specific absolute VI. The
- * VI must be owned by the calling client or one of its ancestors; usership of
- * the VI (as set by MC_CMD_SET_VI_USER) is not sufficient.
- */
-#define MC_CMD_DUMP_VI_STATE 0x8e
-#undef MC_CMD_0x8e_PRIVILEGE_CTG
-
-#define MC_CMD_0x8e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_DUMP_VI_STATE_IN msgrequest */
-#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
-/* The VI number to query. */
-#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
-#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4
-
-/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
-#define MC_CMD_DUMP_VI_STATE_OUT_LEN 100
-/* The PF part of the function owning this VI. */
-#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0
-#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2
-/* The VF part of the function owning this VI. */
-#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_OFST 2
-#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_LEN 2
-/* Base of VIs allocated to this function. */
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_OFST 4
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_LEN 2
-/* Count of VIs allocated to the owner function. */
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_OFST 6
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_LEN 2
-/* Base interrupt vector allocated to this function. */
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_OFST 8
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_LEN 2
-/* Number of interrupt vectors allocated to this function. */
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_OFST 10
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_LEN 2
-/* Raw evq ptr table data. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_LBN 96
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_LBN 128
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_WIDTH 32
-/* Raw evq timer table data. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_LBN 160
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_LBN 192
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_WIDTH 32
-/* Combined metadata field. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_OFST 28
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_OFST 28
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_OFST 28
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8
-/* TXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_LBN 256
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_LBN 288
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_WIDTH 32
-/* TXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_LBN 320
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_LBN 352
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_WIDTH 32
-/* TXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_LBN 384
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_LBN 416
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_WIDTH 32
-/* Combined metadata field. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_LBN 448
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_LBN 480
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24
-/* RXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_LBN 512
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_LBN 544
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_WIDTH 32
-/* RXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_LBN 576
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_LBN 608
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_WIDTH 32
-/* Reserved, currently 0. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_LBN 640
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_LBN 672
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_WIDTH 32
-/* Combined metadata field. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_LBN 704
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_LBN 736
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8
-/* Current user, as assigned by MC_CMD_SET_VI_USER. */
-#define MC_CMD_DUMP_VI_STATE_OUT_USER_CLIENT_ID_OFST 96
-#define MC_CMD_DUMP_VI_STATE_OUT_USER_CLIENT_ID_LEN 4
-
-
-/***********************************/
/* MC_CMD_ALLOC_PIOBUF
* Allocate a push I/O buffer for later use with a tx queue.
*/
@@ -13491,354 +13503,102 @@
/* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
-/* VI number to get information for. */
+/* Queue handle, encodes queue type and VI number to get information for. */
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
-/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4
-/* Transaction processing steering hint 1 for use with the Rx Queue. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_OFST 0
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_LEN 1
-/* Transaction processing steering hint 2 for use with the Ev Queue. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_OFST 1
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_LEN 1
-/* Use Relaxed ordering model for TLPs on this VI. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_LBN 16
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_WIDTH 1
-/* Use ID based ordering for TLPs on this VI. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_LBN 17
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_WIDTH 1
-/* Set no snoop bit for TLPs on this VI. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_LBN 18
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_WIDTH 1
-/* Enable TPH for TLPs on this VI. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_LEN 4
+/* MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT msgresponse: This message has the same
+ * layout as GET_VI_TLP_PROCESSING_OUT, but with corrected field ordering to
+ * simplify use in drivers
+ */
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_LEN 4
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_DATA_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_DATA_LEN 4
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG1_RX_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG1_RX_LBN 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG1_RX_WIDTH 8
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG2_EV_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG2_EV_LBN 8
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG2_EV_WIDTH 8
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_LBN 16
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_PACKET_DATA_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_PACKET_DATA_LBN 16
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_PACKET_DATA_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_ID_BASED_ORDERING_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_ID_BASED_ORDERING_LBN 17
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_ID_BASED_ORDERING_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_NO_SNOOP_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_NO_SNOOP_LBN 18
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_NO_SNOOP_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_ON_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_ON_LBN 19
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_ON_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_SYNC_DATA_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_SYNC_DATA_LBN 20
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_SYNC_DATA_WIDTH 1
/***********************************/
/* MC_CMD_SET_VI_TLP_PROCESSING
* Set TLP steering and ordering information for a VI. The caller must have the
* GRP_FUNC_DMA privilege and must be the currently-assigned user of this VI or
- * an ancestor of the current user (see MC_CMD_SET_VI_USER).
+ * an ancestor of the current user (see MC_CMD_SET_VI_USER). Note that LL
+ * queues require this to be called after allocation but before initialisation
+ * of the queue. TLP options of a queue are fixed after queue is initialised,
+ * with the values set to current global value or they can be overriden using
+ * this command. At LL queue allocation, all overrides are cleared.
*/
#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
#undef MC_CMD_0xb1_PRIVILEGE_CTG
#define MC_CMD_0xb1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-/* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
-/* VI number to set information for. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
-/* Transaction processing steering hint 1 for use with the Rx Queue. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1
-/* Transaction processing steering hint 2 for use with the Ev Queue. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_OFST 5
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_LEN 1
-/* Use Relaxed ordering model for TLPs on this VI. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_LBN 48
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_WIDTH 1
-/* Use ID based ordering for TLPs on this VI. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_LBN 49
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_WIDTH 1
-/* Set the no snoop bit for TLPs on this VI. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_LBN 50
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_WIDTH 1
-/* Enable TPH for TLPs on this VI. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_LEN 4
+/* MC_CMD_SET_VI_TLP_PROCESSING_V2_IN msgrequest: This message has the same
+ * layout as SET_VI_TLP_PROCESSING_OUT, but with corrected field ordering to
+ * simplify use in drivers.
+ */
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_LEN 8
+/* Queue handle, encodes queue type and VI number to set information for. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_INSTANCE_OFST 0
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_INSTANCE_LEN 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_DATA_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_DATA_LEN 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG1_RX_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG1_RX_LBN 0
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG1_RX_WIDTH 8
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG2_EV_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG2_EV_LBN 8
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG2_EV_WIDTH 8
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_LBN 16
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_PACKET_DATA_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_PACKET_DATA_LBN 16
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_PACKET_DATA_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_ID_BASED_ORDERING_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_ID_BASED_ORDERING_LBN 17
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_ID_BASED_ORDERING_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_NO_SNOOP_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_NO_SNOOP_LBN 18
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_NO_SNOOP_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_ON_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_ON_LBN 19
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_ON_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_SYNC_DATA_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_SYNC_DATA_LBN 20
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_SYNC_DATA_WIDTH 1
/* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */
#define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0
/***********************************/
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS
- * Get global PCIe steering and transaction processing configuration.
- */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc
-#undef MC_CMD_0xbc_PRIVILEGE_CTG
-
-#define MC_CMD_0xbc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
-/* enum: MISC. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0
-/* enum: IDO. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1
-/* enum: RO. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2
-/* enum: TPH Type. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3
-
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
-/* Amalgamated TLP info word. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_LEN 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_WIDTH 31
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_LBN 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_LBN 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_LBN 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_LBN 3
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_LBN 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_WIDTH 28
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_LBN 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_LBN 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_LBN 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_LBN 3
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_WIDTH 29
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_LBN 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_LBN 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_LBN 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_LBN 6
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_LBN 8
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_LBN 9
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_WIDTH 23
-
-
-/***********************************/
-/* MC_CMD_SET_TLP_PROCESSING_GLOBALS
- * Set global PCIe steering and transaction processing configuration.
- */
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd
-#undef MC_CMD_0xbd_PRIVILEGE_CTG
-
-#define MC_CMD_0xbd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
-/* Amalgamated TLP info word. */
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_LEN 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_LBN 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_LBN 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_LBN 3
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_LBN 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_LBN 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_LBN 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_LBN 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_LBN 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_LBN 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_LBN 6
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_LBN 8
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_LBN 10
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_WIDTH 22
-
-/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_SATELLITE_DOWNLOAD
- * Download a new set of images to the satellite CPUs from the host.
- */
-#define MC_CMD_SATELLITE_DOWNLOAD 0x91
-#undef MC_CMD_0x91_PRIVILEGE_CTG
-
-#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs
- * are subtle, and so downloads must proceed in a number of phases.
- *
- * 1) PHASE_RESET with a target of TARGET_ALL and chunk ID/length of 0.
- *
- * 2) PHASE_IMEMS for each of the IMEM targets (target IDs 0-11). Each download
- * may consist of multiple chunks. The final chunk (with CHUNK_ID_LAST) should
- * be a checksum (a simple 32-bit sum) of the transferred data. An individual
- * download may be aborted using CHUNK_ID_ABORT.
- *
- * 3) PHASE_VECTORS for each of the vector table targets (target IDs 12-15),
- * similar to PHASE_IMEMS.
- *
- * 4) PHASE_READY with a target of TARGET_ALL and chunk ID/length of 0.
- *
- * After any error (a requested abort is not considered to be an error) the
- * sequence must be restarted from PHASE_RESET.
- */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMIN 20
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX 252
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX_MCDI2 1020
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_LEN(num) (16+4*(num))
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_NUM(len) (((len)-16)/4)
-/* Download phase. (Note: the IDLE phase is used internally and is never valid
- * in a command from the host.)
- */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_LEN 4
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */
-/* Target for download. (These match the blob numbers defined in
- * mc_flash_layout.h.)
- */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_LEN 4
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb
-/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc
-/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd
-/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe
-/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf
-/* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff
-/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LEN 4
-/* enum: Last chunk, containing checksum rather than data */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff
-/* enum: Abort download of this item */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe
-/* Length of this chunk in bytes */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_LEN 4
-/* Data for this chunk */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MINNUM 1
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM 59
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM_MCDI2 251
-
-/* MC_CMD_SATELLITE_DOWNLOAD_OUT msgresponse */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
-/* Same as MC_CMD_ERR field, but included as 0 in success cases */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_LEN 4
-/* Extra status information */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_LEN 4
-/* enum: Code download OK, completed. */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0
-/* enum: Code download aborted as requested. */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1
-/* enum: Code download OK so far, send next chunk. */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2
-/* enum: Download phases out of sequence */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100
-/* enum: Bad target for this phase */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101
-/* enum: Chunk ID out of sequence */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200
-/* enum: Chunk length zero or too large */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201
-/* enum: Checksum was incorrect */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300
-
-
-/***********************************/
/* MC_CMD_GET_CAPABILITIES
- * Get device capabilities.
- *
- * This is supplementary to the MC_CMD_GET_BOARD_CFG command, and intended to
- * reference inherent device capabilities as opposed to current NVRAM config.
+ * Get device capabilities. This is supplementary to the MC_CMD_GET_BOARD_CFG
+ * command, and intended to reference inherent device capabilities as opposed
+ * to current NVRAM config.
*/
#define MC_CMD_GET_CAPABILITIES 0xbe
#undef MC_CMD_0xbe_PRIVILEGE_CTG
@@ -14490,7 +14250,10 @@
/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -14900,7 +14663,10 @@
/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -15335,7 +15101,10 @@
/* MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -15778,7 +15547,10 @@
/* MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -16226,7 +15998,10 @@
/* MC_CMD_GET_CAPABILITIES_V6_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -16685,7 +16460,10 @@
/* MC_CMD_GET_CAPABILITIES_V7_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -16796,9 +16574,21 @@
#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CXL_CONFIG_ENABLE_WIDTH 1
/* MC_CMD_GET_CAPABILITIES_V8_OUT msgresponse */
#define MC_CMD_GET_CAPABILITIES_V8_OUT_LEN 160
@@ -17189,7 +16979,10 @@
/* MC_CMD_GET_CAPABILITIES_V8_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -17300,9 +17093,21 @@
#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CXL_CONFIG_ENABLE_WIDTH 1
/* These bits are reserved for communicating test-specific capabilities to
* host-side test software. All production drivers should treat this field as
* opaque.
@@ -17707,7 +17512,10 @@
/* MC_CMD_GET_CAPABILITIES_V9_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -17818,9 +17626,21 @@
#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CXL_CONFIG_ENABLE_WIDTH 1
/* These bits are reserved for communicating test-specific capabilities to
* host-side test software. All production drivers should treat this field as
* opaque.
@@ -18260,7 +18080,10 @@
/* MC_CMD_GET_CAPABILITIES_V10_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V10_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -18371,9 +18194,21 @@
#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CXL_CONFIG_ENABLE_WIDTH 1
/* These bits are reserved for communicating test-specific capabilities to
* host-side test software. All production drivers should treat this field as
* opaque.
@@ -18438,6 +18273,1182 @@
#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_QUEUE_SIZES_OFST 188
#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_QUEUE_SIZES_LEN 4
+/* MC_CMD_GET_CAPABILITIES_V11_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LEN 196
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V11_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V11_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+/* Third word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS3_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS3_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_WOL_ETHERWAKE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_WOL_ETHERWAKE_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_WOL_ETHERWAKE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_EVEN_SPREADING_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_EVEN_SPREADING_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_EVEN_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_SUPPORTED_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VDPA_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VDPA_SUPPORTED_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VDPA_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CXL_CONFIG_ENABLE_WIDTH 1
+/* These bits are reserved for communicating test-specific capabilities to
+ * host-side test software. All production drivers should treat this field as
+ * opaque.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LEN 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LO_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LO_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LO_LBN 1216
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LO_WIDTH 32
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_HI_OFST 156
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_HI_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_HI_LBN 1248
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_HI_WIDTH 32
+/* The minimum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum number of queues that can be used by an RSS context in exclusive
+ * mode. In exclusive mode the context has a configurable indirection table and
+ * a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4
+/* The maximum number of queues that can be used by an RSS context in even-
+ * spreading mode. In even-spreading mode the context has no indirection table
+ * but it does have a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4
+/* The total number of RSS contexts supported. Note that the number of
+ * available contexts using indirection tables is also limited by the
+ * availability of indirection table space allocated from a common pool.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_NUM_CONTEXTS_OFST 176
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_NUM_CONTEXTS_LEN 4
+/* The total amount of indirection table space that can be shared between RSS
+ * contexts.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_TABLE_POOL_SIZE_OFST 180
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_TABLE_POOL_SIZE_LEN 4
+/* A bitmap of the queue sizes the device can provide, where bit N being set
+ * indicates that 2**N is a valid size. The device may be limited in the number
+ * of different queue sizes that can exist simultaneously, so a bit being set
+ * here does not guarantee that an attempt to create a queue of that size will
+ * succeed.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SUPPORTED_QUEUE_SIZES_OFST 184
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SUPPORTED_QUEUE_SIZES_LEN 4
+/* A bitmap of queue sizes that are always available, in the same format as
+ * SUPPORTED_QUEUE_SIZES. Attempting to create a queue with one of these sizes
+ * will never fail due to unavailability of the requested size.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_QUEUE_SIZES_OFST 188
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_QUEUE_SIZES_LEN 4
+/* Number of available indirect memory maps. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INDIRECT_MAP_INDEX_COUNT_OFST 192
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INDIRECT_MAP_INDEX_COUNT_LEN 4
+
+/* MC_CMD_GET_CAPABILITIES_V12_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LEN 204
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V12_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V12_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+/* Third word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS3_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS3_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_WOL_ETHERWAKE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_WOL_ETHERWAKE_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_WOL_ETHERWAKE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_EVEN_SPREADING_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_EVEN_SPREADING_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_EVEN_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_SUPPORTED_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VDPA_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VDPA_SUPPORTED_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VDPA_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CXL_CONFIG_ENABLE_WIDTH 1
+/* These bits are reserved for communicating test-specific capabilities to
+ * host-side test software. All production drivers should treat this field as
+ * opaque.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LEN 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LO_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LO_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LO_LBN 1216
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LO_WIDTH 32
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_HI_OFST 156
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_HI_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_HI_LBN 1248
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_HI_WIDTH 32
+/* The minimum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum number of queues that can be used by an RSS context in exclusive
+ * mode. In exclusive mode the context has a configurable indirection table and
+ * a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4
+/* The maximum number of queues that can be used by an RSS context in even-
+ * spreading mode. In even-spreading mode the context has no indirection table
+ * but it does have a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4
+/* The total number of RSS contexts supported. Note that the number of
+ * available contexts using indirection tables is also limited by the
+ * availability of indirection table space allocated from a common pool.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_NUM_CONTEXTS_OFST 176
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_NUM_CONTEXTS_LEN 4
+/* The total amount of indirection table space that can be shared between RSS
+ * contexts.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_TABLE_POOL_SIZE_OFST 180
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_TABLE_POOL_SIZE_LEN 4
+/* A bitmap of the queue sizes the device can provide, where bit N being set
+ * indicates that 2**N is a valid size. The device may be limited in the number
+ * of different queue sizes that can exist simultaneously, so a bit being set
+ * here does not guarantee that an attempt to create a queue of that size will
+ * succeed.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SUPPORTED_QUEUE_SIZES_OFST 184
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SUPPORTED_QUEUE_SIZES_LEN 4
+/* A bitmap of queue sizes that are always available, in the same format as
+ * SUPPORTED_QUEUE_SIZES. Attempting to create a queue with one of these sizes
+ * will never fail due to unavailability of the requested size.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_QUEUE_SIZES_OFST 188
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_QUEUE_SIZES_LEN 4
+/* Number of available indirect memory maps. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INDIRECT_MAP_INDEX_COUNT_OFST 192
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INDIRECT_MAP_INDEX_COUNT_LEN 4
+/* Number of VIs available for external ports 4-7. Information for ports 0-3 is
+ * in NUM_VIS_PER_PORT in GET_CAPABILITIES_V2_OUT.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT2_OFST 196
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT2_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT2_NUM 4
+
/***********************************/
/* MC_CMD_V2_EXTN
@@ -18468,168 +19479,13 @@
* are not defined.
*/
#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_TSA 0x1
-
-
-/***********************************/
-/* MC_CMD_TCM_BUCKET_ALLOC
- * Allocate a pacer bucket (for qau rp or a snapper test)
+/* enum: MCDI command used for platform management. Typically, these commands
+ * are used for low-level operations directed at the platform as a whole (e.g.
+ * MMIO device enumeration) rather than individual functions and use a
+ * dedicated comms channel (e.g. RPmsg/IPI). May be handled by the same or
+ * different CPU as MCDI_MESSAGE_TYPE_MC.
*/
-#define MC_CMD_TCM_BUCKET_ALLOC 0xb2
-#undef MC_CMD_0xb2_PRIVILEGE_CTG
-
-#define MC_CMD_0xb2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */
-#define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0
-
-/* MC_CMD_TCM_BUCKET_ALLOC_OUT msgresponse */
-#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
-/* the bucket id */
-#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
-#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_LEN 4
-
-
-/***********************************/
-/* MC_CMD_TCM_BUCKET_FREE
- * Free a pacer bucket
- */
-#define MC_CMD_TCM_BUCKET_FREE 0xb3
-#undef MC_CMD_0xb3_PRIVILEGE_CTG
-
-#define MC_CMD_0xb3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */
-#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
-/* the bucket id */
-#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
-#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_LEN 4
-
-/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
-#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_TCM_BUCKET_INIT
- * Initialise pacer bucket with a given rate
- */
-#define MC_CMD_TCM_BUCKET_INIT 0xb4
-#undef MC_CMD_0xb4_PRIVILEGE_CTG
-
-#define MC_CMD_0xb4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */
-#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
-/* the bucket id */
-#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0
-#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_LEN 4
-/* the rate in mbps */
-#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
-#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_LEN 4
-
-/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12
-/* the bucket id */
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_LEN 4
-/* the rate in mbps */
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_LEN 4
-/* the desired maximum fill level */
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_LEN 4
-
-/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
-#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_TCM_TXQ_INIT
- * Initialise txq in pacer with given options or set options
- */
-#define MC_CMD_TCM_TXQ_INIT 0xb5
-#undef MC_CMD_0xb5_PRIVILEGE_CTG
-
-#define MC_CMD_0xb5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */
-#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28
-/* the txq id */
-#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
-#define MC_CMD_TCM_TXQ_INIT_IN_QID_LEN 4
-/* the static priority associated with the txq */
-#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
-#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_LEN 4
-/* bitmask of the priority queues this txq is inserted into when inserted. */
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_LEN 4
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_WIDTH 1
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_LBN 2
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
-/* the reaction point (RP) bucket */
-#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
-#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_LEN 4
-/* an already reserved bucket (typically set to bucket associated with outer
- * vswitch)
- */
-#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
-#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_LEN 4
-/* an already reserved bucket (typically set to bucket associated with inner
- * vswitch)
- */
-#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
-#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_LEN 4
-/* the min bucket (typically for ETS/minimum bandwidth) */
-#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
-#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_LEN 4
-
-/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32
-/* the txq id */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_LEN 4
-/* the static priority associated with the txq */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_LEN 4
-/* bitmask of the priority queues this txq is inserted into when inserted. */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_LEN 4
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_WIDTH 1
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_LBN 2
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
-/* the reaction point (RP) bucket */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_LEN 4
-/* an already reserved bucket (typically set to bucket associated with outer
- * vswitch)
- */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_LEN 4
-/* an already reserved bucket (typically set to bucket associated with inner
- * vswitch)
- */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_LEN 4
-/* the min bucket (typically for ETS/minimum bandwidth) */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_LEN 4
-/* the static priority associated with the txq */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_LEN 4
-
-/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
-#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
+#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_PLATFORM 0x2
/***********************************/
@@ -18740,27 +19596,6 @@
/***********************************/
-/* MC_CMD_VSWITCH_QUERY
- * read some config of v-switch. For now this command is an empty placeholder.
- * It may be used to check if a v-switch is connected to a given EVB port (if
- * not, then the command returns ENOENT).
- */
-#define MC_CMD_VSWITCH_QUERY 0x63
-#undef MC_CMD_0x63_PRIVILEGE_CTG
-
-#define MC_CMD_0x63_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VSWITCH_QUERY_IN msgrequest */
-#define MC_CMD_VSWITCH_QUERY_IN_LEN 4
-/* The port to which the v-switch is connected. */
-#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
-#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
-
-/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */
-#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_VPORT_ALLOC
* allocate a v-port.
*/
@@ -18936,28 +19771,6 @@
/***********************************/
-/* MC_CMD_VADAPTOR_GET_MAC
- * read the MAC address assigned to a v-adaptor.
- */
-#define MC_CMD_VADAPTOR_GET_MAC 0x5e
-#undef MC_CMD_0x5e_PRIVILEGE_CTG
-
-#define MC_CMD_0x5e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VADAPTOR_GET_MAC_IN msgrequest */
-#define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
-/* The port to which the v-adaptor is connected. */
-#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
-#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
-
-/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
-#define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
-/* The MAC address assigned to this v-adaptor */
-#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_OFST 0
-#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_LEN 6
-
-
-/***********************************/
/* MC_CMD_VADAPTOR_QUERY
* read some config of v-adaptor.
*/
@@ -19014,86 +19827,6 @@
/***********************************/
-/* MC_CMD_RDWR_A64_REGIONS
- * Assign the 64 bit region addresses.
- */
-#define MC_CMD_RDWR_A64_REGIONS 0x9b
-#undef MC_CMD_0x9b_PRIVILEGE_CTG
-
-#define MC_CMD_0x9b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
-#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_LEN 4
-/* Write enable bits 0-3, set to write, clear to read. */
-#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
-#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_OFST 16
-#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_LEN 1
-
-/* MC_CMD_RDWR_A64_REGIONS_OUT msgresponse: This data always included
- * regardless of state of write bits in the request.
- */
-#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_LEN 4
-
-
-/***********************************/
-/* MC_CMD_ONLOAD_STACK_ALLOC
- * Allocate an Onload stack ID.
- */
-#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c
-#undef MC_CMD_0x9c_PRIVILEGE_CTG
-
-#define MC_CMD_0x9c_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */
-#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
-/* The handle of the owning upstream port */
-#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
-#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
-
-/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
-#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
-/* The handle of the new Onload stack */
-#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
-#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_LEN 4
-
-
-/***********************************/
-/* MC_CMD_ONLOAD_STACK_FREE
- * Free an Onload stack ID.
- */
-#define MC_CMD_ONLOAD_STACK_FREE 0x9d
-#undef MC_CMD_0x9d_PRIVILEGE_CTG
-
-#define MC_CMD_0x9d_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */
-#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
-/* The handle of the Onload stack */
-#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
-#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_LEN 4
-
-/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
-#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_RSS_CONTEXT_ALLOC
* Allocate an RSS context.
*/
@@ -19305,93 +20038,6 @@
/***********************************/
-/* MC_CMD_RSS_CONTEXT_WRITE_TABLE
- * Write a portion of a selectable-size indirection table for an RSS context.
- * This command must be used instead of MC_CMD_RSS_CONTEXT_SET_TABLE if the
- * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES.
- */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE 0x13e
-#undef MC_CMD_0x13e_PRIVILEGE_CTG
-
-#define MC_CMD_0x13e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN msgrequest */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMIN 8
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX 252
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LEN(num) (4+4*(num))
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_NUM(len) (((len)-4)/4)
-/* The handle of the RSS context */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_OFST 0
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_LEN 4
-/* An array of index-value pairs to be written to the table. Structure is
- * MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY.
- */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_OFST 4
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_LEN 4
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MINNUM 1
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM 62
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM_MCDI2 254
-
-/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT msgresponse */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT_LEN 0
-
-/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY structuredef */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_LEN 4
-/* The index of the table entry to be written. */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_OFST 0
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LEN 2
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LBN 0
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_WIDTH 16
-/* The value to write into the table entry. */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_OFST 2
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LEN 2
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LBN 16
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_WIDTH 16
-
-
-/***********************************/
-/* MC_CMD_RSS_CONTEXT_READ_TABLE
- * Read a portion of a selectable-size indirection table for an RSS context.
- * This command must be used instead of MC_CMD_RSS_CONTEXT_GET_TABLE if the
- * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES.
- */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE 0x13f
-#undef MC_CMD_0x13f_PRIVILEGE_CTG
-
-#define MC_CMD_0x13f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_RSS_CONTEXT_READ_TABLE_IN msgrequest */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMIN 6
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX 252
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LEN(num) (4+2*(num))
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_NUM(len) (((len)-4)/2)
-/* The handle of the RSS context */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_OFST 0
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_LEN 4
-/* An array containing the indices of the entries to be read. */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_OFST 4
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_LEN 2
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MINNUM 1
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM 124
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM_MCDI2 508
-
-/* MC_CMD_RSS_CONTEXT_READ_TABLE_OUT msgresponse */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMIN 2
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX 252
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LEN(num) (0+2*(num))
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_NUM(len) (((len)-0)/2)
-/* A buffer containing the requested entries read from the table. */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_OFST 0
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_LEN 2
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MINNUM 1
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM 126
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM_MCDI2 510
-
-
-/***********************************/
/* MC_CMD_RSS_CONTEXT_SET_FLAGS
* Set various control flags for an RSS context.
*/
@@ -19525,158 +20171,6 @@
/***********************************/
-/* MC_CMD_DOT1P_MAPPING_ALLOC
- * Allocate a .1p mapping.
- */
-#define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4
-#undef MC_CMD_0xa4_PRIVILEGE_CTG
-
-#define MC_CMD_0xa4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
-/* The handle of the owning upstream port */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
-/* Number of queues spanned by this mapping, in the range 1-64; valid fixed
- * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and
- * referenced RSS contexts must span no more than this number.
- */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_LEN 4
-
-/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
-/* The handle of the new .1p mapping. This should be considered opaque to the
- * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
- * handle.
- */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_LEN 4
-/* enum: guaranteed invalid .1p mapping handle value */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff
-
-
-/***********************************/
-/* MC_CMD_DOT1P_MAPPING_FREE
- * Free a .1p mapping.
- */
-#define MC_CMD_DOT1P_MAPPING_FREE 0xa5
-#undef MC_CMD_0xa5_PRIVILEGE_CTG
-
-#define MC_CMD_0xa5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */
-#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
-/* The handle of the .1p mapping */
-#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_LEN 4
-
-/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */
-#define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DOT1P_MAPPING_SET_TABLE
- * Set the mapping table for a .1p mapping.
- */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6
-#undef MC_CMD_0xa6_PRIVILEGE_CTG
-
-#define MC_CMD_0xa6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
-/* The handle of the .1p mapping */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
-/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
- * handle)
- */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_OFST 4
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_LEN 32
-
-/* MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT msgresponse */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DOT1P_MAPPING_GET_TABLE
- * Get the mapping table for a .1p mapping.
- */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7
-#undef MC_CMD_0xa7_PRIVILEGE_CTG
-
-#define MC_CMD_0xa7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
-/* The handle of the .1p mapping */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
-
-/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36
-/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
- * handle)
- */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_OFST 4
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_LEN 32
-
-
-/***********************************/
-/* MC_CMD_GET_VECTOR_CFG
- * Get Interrupt Vector config for this PF.
- */
-#define MC_CMD_GET_VECTOR_CFG 0xbf
-#undef MC_CMD_0xbf_PRIVILEGE_CTG
-
-#define MC_CMD_0xbf_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */
-#define MC_CMD_GET_VECTOR_CFG_IN_LEN 0
-
-/* MC_CMD_GET_VECTOR_CFG_OUT msgresponse */
-#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
-/* Base absolute interrupt vector number. */
-#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
-#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_LEN 4
-/* Number of interrupt vectors allocate to this PF. */
-#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
-#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_LEN 4
-/* Number of interrupt vectors to allocate per VF. */
-#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
-#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_LEN 4
-
-
-/***********************************/
-/* MC_CMD_SET_VECTOR_CFG
- * Set Interrupt Vector config for this PF.
- */
-#define MC_CMD_SET_VECTOR_CFG 0xc0
-#undef MC_CMD_0xc0_PRIVILEGE_CTG
-
-#define MC_CMD_0xc0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */
-#define MC_CMD_SET_VECTOR_CFG_IN_LEN 12
-/* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to
- * let the system find a suitable base.
- */
-#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
-#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_LEN 4
-/* Number of interrupt vectors allocate to this PF. */
-#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
-#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_LEN 4
-/* Number of interrupt vectors to allocate per VF. */
-#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
-#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_LEN 4
-
-/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
-#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_VPORT_ADD_MAC_ADDRESS
* Add a MAC address to a v-port
*/
@@ -19810,124 +20304,6 @@
/***********************************/
-/* MC_CMD_EVB_PORT_QUERY
- * read some config of v-port.
- */
-#define MC_CMD_EVB_PORT_QUERY 0x62
-#undef MC_CMD_0x62_PRIVILEGE_CTG
-
-#define MC_CMD_0x62_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_EVB_PORT_QUERY_IN msgrequest */
-#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4
-/* The handle of the v-port */
-#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0
-#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_LEN 4
-
-/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */
-#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8
-/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
-#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0
-#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_LEN 4
-/* The number of VLAN tags that may be used on a v-adaptor connected to this
- * EVB port.
- */
-#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4
-#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DUMP_BUFTBL_ENTRIES
- * Dump buffer table entries, mainly for command client debug use. Dumps
- * absolute entries, and does not use chunk handles. All entries must be in
- * range, and used for q page mapping, Although the latter restriction may be
- * lifted in future.
- */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
-#undef MC_CMD_0xab_PRIVILEGE_CTG
-
-#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
-/* Index of the first buffer table entry. */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
-/* Number of buffer table entries to dump. */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
-
-/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_NUM(len) (((len)-0)/12)
-/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM 21
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM_MCDI2 85
-
-
-/***********************************/
-/* MC_CMD_SET_RXDP_CONFIG
- * Set global RXDP configuration settings
- */
-#define MC_CMD_SET_RXDP_CONFIG 0xc1
-#undef MC_CMD_0xc1_PRIVILEGE_CTG
-
-#define MC_CMD_0xc1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
-#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
-#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
-#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_LEN 4
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_OFST 0
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_OFST 0
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_WIDTH 2
-/* enum: pad to 64 bytes */
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0
-/* enum: pad to 128 bytes (Medford only) */
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1
-/* enum: pad to 256 bytes (Medford only) */
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2
-
-/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */
-#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_RXDP_CONFIG
- * Get global RXDP configuration settings
- */
-#define MC_CMD_GET_RXDP_CONFIG 0xc2
-#undef MC_CMD_0xc2_PRIVILEGE_CTG
-
-#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */
-#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0
-
-/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */
-#define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
-#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
-#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_LEN 4
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_OFST 0
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_OFST 0
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_WIDTH 2
-/* Enum values, see field(s): */
-/* MC_CMD_SET_RXDP_CONFIG/MC_CMD_SET_RXDP_CONFIG_IN/PAD_HOST_LEN */
-
-
-/***********************************/
/* MC_CMD_GET_CLOCK
* Return the system and PDCPU clock frequencies.
*/
@@ -19950,210 +20326,6 @@
/***********************************/
-/* MC_CMD_SET_CLOCK
- * Control the system and DPCPU clock frequencies. Changes are lost reboot.
- */
-#define MC_CMD_SET_CLOCK 0xad
-#undef MC_CMD_0xad_PRIVILEGE_CTG
-
-#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SET_CLOCK_IN msgrequest */
-#define MC_CMD_SET_CLOCK_IN_LEN 28
-/* Requested frequency in MHz for system clock domain */
-#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
-#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_LEN 4
-/* enum: Leave the system clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for inter-core clock domain */
-#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
-#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_LEN 4
-/* enum: Leave the inter-core clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for DPCPU clock domain */
-#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
-#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_LEN 4
-/* enum: Leave the DPCPU clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for PCS clock domain */
-#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12
-#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_LEN 4
-/* enum: Leave the PCS clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for MC clock domain */
-#define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16
-#define MC_CMD_SET_CLOCK_IN_MC_FREQ_LEN 4
-/* enum: Leave the MC clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for rmon clock domain */
-#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20
-#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_LEN 4
-/* enum: Leave the rmon clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for vswitch clock domain */
-#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24
-#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_LEN 4
-/* enum: Leave the vswitch clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0
-
-/* MC_CMD_SET_CLOCK_OUT msgresponse */
-#define MC_CMD_SET_CLOCK_OUT_LEN 28
-/* Resulting system frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
-#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_LEN 4
-/* enum: The system clock domain doesn't exist */
-#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0
-/* Resulting inter-core frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
-#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_LEN 4
-/* enum: The inter-core clock domain doesn't exist / isn't used */
-#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0
-/* Resulting DPCPU frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
-#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_LEN 4
-/* enum: The dpcpu clock domain doesn't exist */
-#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0
-/* Resulting PCS frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12
-#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_LEN 4
-/* enum: The PCS clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0
-/* Resulting MC frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16
-#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_LEN 4
-/* enum: The MC clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0
-/* Resulting rmon frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20
-#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_LEN 4
-/* enum: The rmon clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0
-/* Resulting vswitch frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24
-#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_LEN 4
-/* enum: The vswitch clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0
-
-
-/***********************************/
-/* MC_CMD_DPCPU_RPC
- * Send an arbitrary DPCPU message.
- */
-#define MC_CMD_DPCPU_RPC 0xae
-#undef MC_CMD_0xae_PRIVILEGE_CTG
-
-#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_DPCPU_RPC_IN msgrequest */
-#define MC_CMD_DPCPU_RPC_IN_LEN 36
-#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
-#define MC_CMD_DPCPU_RPC_IN_CPU_LEN 4
-/* enum: RxDPCPU0 */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0
-/* enum: TxDPCPU0 */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1
-/* enum: TxDPCPU1 */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2
-/* enum: RxDPCPU1 (Medford only) */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3
-/* enum: RxDPCPU (will be for the calling function; for now, just an alias of
- * DPCPU_RX0)
- */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80
-/* enum: TxDPCPU (will be for the calling function; for now, just an alias of
- * DPCPU_TX0)
- */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81
-/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be
- * initialised to zero
- */
-#define MC_CMD_DPCPU_RPC_IN_DATA_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_LBN 48
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_LBN 80
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12
-#define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24
-/* Register data to write. Only valid in write/write-read. */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_LEN 4
-/* Register address. */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_LEN 4
-
-/* MC_CMD_DPCPU_RPC_OUT msgresponse */
-#define MC_CMD_DPCPU_RPC_OUT_LEN 36
-#define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0
-#define MC_CMD_DPCPU_RPC_OUT_RC_LEN 4
-/* DATA */
-#define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
-#define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
-#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_OFST 4
-#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_LBN 32
-#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_WIDTH 16
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_OFST 4
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_LBN 48
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_WIDTH 16
-#define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
-#define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_LEN 4
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_LEN 4
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_LEN 4
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_LEN 4
-
-
-/***********************************/
/* MC_CMD_TRIGGER_INTERRUPT
* Trigger an interrupt by prodding the BIU.
*/
@@ -20173,66 +20345,6 @@
/***********************************/
-/* MC_CMD_SHMBOOT_OP
- * Special operations to support (for now) shmboot.
- */
-#define MC_CMD_SHMBOOT_OP 0xe6
-#undef MC_CMD_0xe6_PRIVILEGE_CTG
-
-#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SHMBOOT_OP_IN msgrequest */
-#define MC_CMD_SHMBOOT_OP_IN_LEN 4
-/* Identifies the operation to perform */
-#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
-#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_LEN 4
-/* enum: Copy slave_data section to the slave core. (Greenport only) */
-#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0
-
-/* MC_CMD_SHMBOOT_OP_OUT msgresponse */
-#define MC_CMD_SHMBOOT_OP_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_CAP_BLK_READ
- * Read multiple 64bit words from capture block memory
- */
-#define MC_CMD_CAP_BLK_READ 0xe7
-#undef MC_CMD_0xe7_PRIVILEGE_CTG
-
-#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_CAP_BLK_READ_IN msgrequest */
-#define MC_CMD_CAP_BLK_READ_IN_LEN 12
-#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
-#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_LEN 4
-#define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4
-#define MC_CMD_CAP_BLK_READ_IN_ADDR_LEN 4
-#define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8
-#define MC_CMD_CAP_BLK_READ_IN_COUNT_LEN 4
-
-/* MC_CMD_CAP_BLK_READ_OUT msgresponse */
-#define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
-#define MC_CMD_CAP_BLK_READ_OUT_LENMAX 248
-#define MC_CMD_CAP_BLK_READ_OUT_LENMAX_MCDI2 1016
-#define MC_CMD_CAP_BLK_READ_OUT_LEN(num) (0+8*(num))
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_NUM(len) (((len)-0)/8)
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_LEN 4
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_LBN 0
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_WIDTH 32
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_LEN 4
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_LBN 32
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_WIDTH 32
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM_MCDI2 127
-
-
-/***********************************/
/* MC_CMD_DUMP_DO
* Take a dump of the DUT state
*/
@@ -20380,34 +20492,6 @@
/***********************************/
-/* MC_CMD_SET_PSU
- * Adjusts power supply parameters. This is a warranty-voiding operation.
- * Returns: ENOENT if the parameter or rail specified does not exist, EINVAL if
- * the parameter is out of range.
- */
-#define MC_CMD_SET_PSU 0xea
-#undef MC_CMD_0xea_PRIVILEGE_CTG
-
-#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SET_PSU_IN msgrequest */
-#define MC_CMD_SET_PSU_IN_LEN 12
-#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
-#define MC_CMD_SET_PSU_IN_PARAM_LEN 4
-#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */
-#define MC_CMD_SET_PSU_IN_RAIL_OFST 4
-#define MC_CMD_SET_PSU_IN_RAIL_LEN 4
-#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */
-#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */
-/* desired value, eg voltage in mV */
-#define MC_CMD_SET_PSU_IN_VALUE_OFST 8
-#define MC_CMD_SET_PSU_IN_VALUE_LEN 4
-
-/* MC_CMD_SET_PSU_OUT msgresponse */
-#define MC_CMD_SET_PSU_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_FUNCTION_INFO
* Get function information. PF and VF number.
*/
@@ -20448,7 +20532,7 @@
#define MC_CMD_ENABLE_OFFLINE_BIST 0xed
#undef MC_CMD_0xed_PRIVILEGE_CTG
-#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
@@ -20458,137 +20542,13 @@
/***********************************/
-/* MC_CMD_UART_SEND_DATA
- * Send checksummed[sic] block of data over the uart. Response is a placeholder
- * should we wish to make this reliable; currently requests are fire-and-
- * forget.
- */
-#define MC_CMD_UART_SEND_DATA 0xee
-#undef MC_CMD_0xee_PRIVILEGE_CTG
-
-#define MC_CMD_0xee_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_UART_SEND_DATA_OUT msgrequest */
-#define MC_CMD_UART_SEND_DATA_OUT_LENMIN 16
-#define MC_CMD_UART_SEND_DATA_OUT_LENMAX 252
-#define MC_CMD_UART_SEND_DATA_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_NUM(len) (((len)-16)/1)
-/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
-#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
-#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_LEN 4
-/* Offset at which to write the data */
-#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4
-#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_LEN 4
-/* Length of data */
-#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8
-#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_LEN 4
-/* Reserved for future use */
-#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12
-#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_LEN 4
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM 236
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM_MCDI2 1004
-
-/* MC_CMD_UART_SEND_DATA_IN msgresponse */
-#define MC_CMD_UART_SEND_DATA_IN_LEN 0
-
-
-/***********************************/
-/* MC_CMD_UART_RECV_DATA
- * Request checksummed[sic] block of data over the uart. Only a placeholder,
- * subject to change and not currently implemented.
- */
-#define MC_CMD_UART_RECV_DATA 0xef
-#undef MC_CMD_0xef_PRIVILEGE_CTG
-
-#define MC_CMD_0xef_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_UART_RECV_DATA_OUT msgrequest */
-#define MC_CMD_UART_RECV_DATA_OUT_LEN 16
-/* CRC32 over OFFSET, LENGTH, RESERVED */
-#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0
-#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_LEN 4
-/* Offset from which to read the data */
-#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4
-#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_LEN 4
-/* Length of data */
-#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8
-#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_LEN 4
-/* Reserved for future use */
-#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12
-#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_LEN 4
-
-/* MC_CMD_UART_RECV_DATA_IN msgresponse */
-#define MC_CMD_UART_RECV_DATA_IN_LENMIN 16
-#define MC_CMD_UART_RECV_DATA_IN_LENMAX 252
-#define MC_CMD_UART_RECV_DATA_IN_LENMAX_MCDI2 1020
-#define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
-#define MC_CMD_UART_RECV_DATA_IN_DATA_NUM(len) (((len)-16)/1)
-/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
-#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
-#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_LEN 4
-/* Offset at which to write the data */
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_LEN 4
-/* Length of data */
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_LEN 4
-/* Reserved for future use */
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_LEN 4
-#define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16
-#define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
-#define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
-#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM 236
-#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM_MCDI2 1004
-
-
-/***********************************/
-/* MC_CMD_READ_FUSES
- * Read data programmed into the device One-Time-Programmable (OTP) Fuses
- */
-#define MC_CMD_READ_FUSES 0xf0
-#undef MC_CMD_0xf0_PRIVILEGE_CTG
-
-#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_READ_FUSES_IN msgrequest */
-#define MC_CMD_READ_FUSES_IN_LEN 8
-/* Offset in OTP to read */
-#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
-#define MC_CMD_READ_FUSES_IN_OFFSET_LEN 4
-/* Length of data to read in bytes */
-#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
-#define MC_CMD_READ_FUSES_IN_LENGTH_LEN 4
-
-/* MC_CMD_READ_FUSES_OUT msgresponse */
-#define MC_CMD_READ_FUSES_OUT_LENMIN 4
-#define MC_CMD_READ_FUSES_OUT_LENMAX 252
-#define MC_CMD_READ_FUSES_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
-#define MC_CMD_READ_FUSES_OUT_DATA_NUM(len) (((len)-4)/1)
-/* Length of returned OTP data in bytes */
-#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
-#define MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4
-/* Returned data */
-#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4
-#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1
-#define MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0
-#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248
-#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM_MCDI2 1016
-
-
-/***********************************/
/* MC_CMD_KR_TUNE
* Get or set KR Serdes RXEQ and TX Driver settings
*/
#define MC_CMD_KR_TUNE 0xf1
#undef MC_CMD_0xf1_PRIVILEGE_CTG
-#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_KR_TUNE_IN msgrequest */
#define MC_CMD_KR_TUNE_IN_LENMIN 4
@@ -21138,262 +21098,6 @@
/***********************************/
-/* MC_CMD_PCIE_TUNE
- * Get or set PCIE Serdes RXEQ and TX Driver settings
- */
-#define MC_CMD_PCIE_TUNE 0xf2
-#undef MC_CMD_0xf2_PRIVILEGE_CTG
-
-#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PCIE_TUNE_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_IN_LENMIN 4
-#define MC_CMD_PCIE_TUNE_IN_LENMAX 252
-#define MC_CMD_PCIE_TUNE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_IN_LEN(num) (4+4*(num))
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_NUM(len) (((len)-4)/4)
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1
-/* enum: Get current RXEQ settings */
-#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0
-/* enum: Override RXEQ settings */
-#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1
-/* enum: Get current TX Driver settings */
-#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2
-/* enum: Override TX Driver settings */
-#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3
-/* enum: Start PCIe Serdes Eye diagram plot on a given lane. */
-#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5
-/* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The
- * caller should call this command repeatedly after starting eye plot, until no
- * more data is returned.
- */
-#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6
-/* enum: Enable the SERDES BIST and set it to generate a 200MHz square wave */
-#define MC_CMD_PCIE_TUNE_IN_BIST_SQUARE_WAVE 0x7
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3
-/* Arguments specific to the operation */
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_OFST 4
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_LEN 4
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MINNUM 0
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM 62
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM_MCDI2 254
-
-/* MC_CMD_PCIE_TUNE_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_OUT_LEN 0
-
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_LEN 4
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
-
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMIN 4
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX 252
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4)
-/* RXEQ Parameter */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: Attenuation (0-15) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0
-/* enum: CTLE Boost (0-15) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1
-/* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2
-/* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3
-/* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4
-/* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5
-/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6
-/* enum: DFE DLev */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7
-/* enum: Figure of Merit */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8
-/* enum: CTLE EQ Capacitor (HF Gain) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
-/* enum: CTLE EQ Resistor (DC Gain) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 5
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 13
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 14
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 10
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
-
-/* MC_CMD_PCIE_TUNE_RXEQ_SET_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMIN 8
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX 252
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_NUM(len) (((len)-4)/4)
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_LEN 3
-/* RXEQ Parameter */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LEN 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM_MCDI2 254
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
-/* Enum values, see field(s): */
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_ID */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 5
-/* Enum values, see field(s): */
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 13
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_LBN 14
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 2
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
-
-/* MC_CMD_PCIE_TUNE_RXEQ_SET_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_OUT_LEN 0
-
-/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
-
-/* MC_CMD_PCIE_TUNE_TXEQ_GET_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMIN 4
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX 252
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4)
-/* RXEQ Parameter */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: TxMargin (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0
-/* enum: TxSwing (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1
-/* enum: De-emphasis coefficient C(-1) (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2
-/* enum: De-emphasis coefficient C(0) (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3
-/* enum: De-emphasis coefficient C(+1) (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4
-/* Enum values, see field(s): */
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_LBN 12
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 12
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
-
-/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LEN 8
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_LEN 4
-
-/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0
-
-/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_LEN 4
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
-
-/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_NUM(len) (((len)-0)/2)
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM_MCDI2 510
-
-/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN_LEN 0
-
-/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT msgrequest */
-#define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_LICENSING
* Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
* - not used for V3 licensing
@@ -21532,56 +21236,6 @@
/***********************************/
-/* MC_CMD_LICENSING_GET_ID_V3
- * Get ID and type from the NVRAM_PARTITION_TYPE_LICENSE application license
- * partition - V3 licensing (Medford)
- */
-#define MC_CMD_LICENSING_GET_ID_V3 0xd1
-#undef MC_CMD_0xd1_PRIVILEGE_CTG
-
-#define MC_CMD_0xd1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_LICENSING_GET_ID_V3_IN msgrequest */
-#define MC_CMD_LICENSING_GET_ID_V3_IN_LEN 0
-
-/* MC_CMD_LICENSING_GET_ID_V3_OUT msgresponse */
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN 8
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX 252
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num))
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_NUM(len) (((len)-8)/1)
-/* type of license (eg 3) */
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4
-/* length of the license ID (in bytes) */
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_LEN 4
-/* the unique license ID of the adapter */
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MINNUM 0
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM 244
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM_MCDI2 1012
-
-
-/***********************************/
-/* MC_CMD_MC2MC_PROXY
- * Execute an arbitrary MCDI command on the slave MC of a dual-core device.
- * This will fail on a single-core system.
- */
-#define MC_CMD_MC2MC_PROXY 0xf4
-#undef MC_CMD_0xf4_PRIVILEGE_CTG
-
-#define MC_CMD_0xf4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_MC2MC_PROXY_IN msgrequest */
-#define MC_CMD_MC2MC_PROXY_IN_LEN 0
-
-/* MC_CMD_MC2MC_PROXY_OUT msgresponse */
-#define MC_CMD_MC2MC_PROXY_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_LICENSED_APP_STATE
* Query the state of an individual licensed application. (Note that the actual
* state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation
@@ -21610,424 +21264,6 @@
/***********************************/
-/* MC_CMD_GET_LICENSED_V3_APP_STATE
- * Query the state of an individual licensed application. (Note that the actual
- * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
- * operation or a reboot of the MC.) Used for V3 licensing (Medford)
- */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE 0xd2
-#undef MC_CMD_0xd2_PRIVILEGE_CTG
-
-#define MC_CMD_0xd2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_LICENSED_V3_APP_STATE_IN msgrequest */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN 8
-/* application ID to query (LICENSED_V3_APPS_xxx) expressed as a single bit
- * mask
- */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_OFST 0
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LEN 8
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_OFST 0
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_LEN 4
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_LBN 0
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_WIDTH 32
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_OFST 4
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_LEN 4
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_LBN 32
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_WIDTH 32
-
-/* MC_CMD_GET_LICENSED_V3_APP_STATE_OUT msgresponse */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
-/* state of this application */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_LEN 4
-/* enum: no (or invalid) license is present for the application */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0
-/* enum: a valid license is present for the application */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1
-
-
-/***********************************/
-/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES
- * Query the state of an one or more licensed features. (Note that the actual
- * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
- * operation or a reboot of the MC.) Used for V3 licensing (Medford)
- */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES 0xd3
-#undef MC_CMD_0xd3_PRIVILEGE_CTG
-
-#define MC_CMD_0xd3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN msgrequest */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_LEN 8
-/* features to query (LICENSED_V3_FEATURES_xxx) expressed as a mask with one or
- * more bits set
- */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_OFST 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LEN 8
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_OFST 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_LEN 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_LBN 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_WIDTH 32
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_OFST 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_LEN 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_LBN 32
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_WIDTH 32
-
-/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT msgresponse */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_LEN 8
-/* states of these features - bit set for licensed, clear for not licensed */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_OFST 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LEN 8
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_OFST 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_LEN 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_LBN 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_WIDTH 32
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_OFST 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_LEN 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_LBN 32
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_LICENSED_APP_OP
- * Perform an action for an individual licensed application - not used for V3
- * licensing.
- */
-#define MC_CMD_LICENSED_APP_OP 0xf6
-#undef MC_CMD_0xf6_PRIVILEGE_CTG
-
-#define MC_CMD_0xf6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_LICENSED_APP_OP_IN msgrequest */
-#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8
-#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252
-#define MC_CMD_LICENSED_APP_OP_IN_LENMAX_MCDI2 1020
-#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_NUM(len) (((len)-8)/4)
-/* application ID */
-#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
-#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4
-/* the type of operation requested */
-#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
-#define MC_CMD_LICENSED_APP_OP_IN_OP_LEN 4
-/* enum: validate application */
-#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
-/* enum: mask application */
-#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1
-/* arguments specific to this particular operation */
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM_MCDI2 253
-
-/* MC_CMD_LICENSED_APP_OP_OUT msgresponse */
-#define MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0
-#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252
-#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_NUM(len) (((len)-0)/4)
-/* result specific to this particular operation */
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM_MCDI2 255
-
-/* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
-/* application ID */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_LEN 4
-/* the type of operation requested */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_LEN 4
-/* validation challenge */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
-
-/* MC_CMD_LICENSED_APP_OP_VALIDATE_OUT msgresponse */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
-/* feature expiry (time_t) */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_LEN 4
-/* validation response */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
-
-/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
-/* application ID */
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_LEN 4
-/* the type of operation requested */
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_LEN 4
-/* flag */
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_LEN 4
-
-/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
-#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_LICENSED_V3_VALIDATE_APP
- * Perform validation for an individual licensed application - V3 licensing
- * (Medford)
- */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP 0xd4
-#undef MC_CMD_0xd4_PRIVILEGE_CTG
-
-#define MC_CMD_0xd4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_LICENSED_V3_VALIDATE_APP_IN msgrequest */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_LEN 56
-/* challenge for validation (384 bits) */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_OFST 0
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_LEN 48
-/* application ID expressed as a single bit mask */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 48
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 48
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_LEN 4
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_LBN 384
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_WIDTH 32
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 52
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_LEN 4
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_LBN 416
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_WIDTH 32
-
-/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 116
-/* validation response to challenge in the form of ECDSA signature consisting
- * of two 384-bit integers, r and s, in big-endian order. The signature signs a
- * SHA-384 digest of a message constructed from the concatenation of the input
- * message and the remaining fields of this output message, e.g. challenge[48
- * bytes] ... expiry_time[4 bytes] ...
- */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_OFST 0
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96
-/* application expiry time */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_LEN 4
-/* application expiry units */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_LEN 4
-/* enum: expiry units are accounting units */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0
-/* enum: expiry units are calendar days */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1
-/* base MAC address of the NIC stored in NVRAM (note that this is a constant
- * value for a given NIC regardless which function is calling, effectively this
- * is PF0 base MAC address)
- */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_OFST 104
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_LEN 6
-/* MAC address of v-adaptor associated with the client. If no such v-adapator
- * exists, then the field is filled with 0xFF.
- */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_OFST 110
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_LEN 6
-
-
-/***********************************/
-/* MC_CMD_LICENSED_V3_MASK_FEATURES
- * Mask features - V3 licensing (Medford)
- */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5
-#undef MC_CMD_0xd5_PRIVILEGE_CTG
-
-#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12
-/* mask to be applied to features to be changed */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_OFST 0
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LEN 8
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_OFST 0
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_LEN 4
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_LBN 0
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_WIDTH 32
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_LEN 4
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_LBN 32
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_WIDTH 32
-/* whether to turn on or turn off the masked features */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4
-/* enum: turn the features off */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0
-/* enum: turn the features back on */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1
-
-/* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_LICENSING_V3_TEMPORARY
- * Perform operations to support installation of a single temporary license in
- * the adapter, in addition to those found in the licensing partition. See
- * SF-116124-SW for an overview of how this could be used. The license is
- * stored in MC persistent data and so will survive a MC reboot, but will be
- * erased when the adapter is power cycled
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY 0xd6
-#undef MC_CMD_0xd6_PRIVILEGE_CTG
-
-#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4
-/* operation code */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_LEN 4
-/* enum: install a new license, overwriting any existing temporary license.
- * This is an asynchronous operation owing to the time taken to validate an
- * ECDSA license
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0
-/* enum: clear the license immediately rather than waiting for the next power
- * cycle
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1
-/* enum: get the status of the asynchronous MC_CMD_LICENSING_V3_TEMPORARY_SET
- * operation
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_LEN 4
-/* ECDSA license and signature */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_LEN 4
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_LEN 4
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12
-/* status code */
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_LEN 4
-/* enum: finished validating and installing license */
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0
-/* enum: license validation and installation in progress */
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1
-/* enum: licensing error. More specific error messages are not provided to
- * avoid exposing details of the licensing system to the client
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2
-/* bitmask of licensed features */
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_OFST 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LEN 8
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_OFST 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_LEN 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_LBN 32
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_WIDTH 32
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_OFST 8
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_LEN 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_LBN 64
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_SET_PORT_SNIFF_CONFIG
- * Configure RX port sniffing for the physical port associated with the calling
- * function. Only a privileged function may change the port sniffing
- * configuration. A copy of all traffic delivered to the host (non-promiscuous
- * mode) or all traffic arriving at the port (promiscuous mode) may be
- * delivered to a specific queue, or a set of queues with RSS.
- */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7
-#undef MC_CMD_0xf7_PRIVILEGE_CTG
-
-#define MC_CMD_0xf7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
-/* configuration flags */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_OFST 0
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_OFST 0
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
-/* receive queue handle (for RSS mode, this is the base queue) */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
-/* receive mode */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
-/* enum: receive to just the specified queue */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
-/* enum: receive to multiple queues using RSS context */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
-/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
- * that these handles should be considered opaque to the host, although a value
- * of 0xFFFFFFFF is guaranteed never to be a valid handle.
- */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
-
-/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_PORT_SNIFF_CONFIG
- * Obtain the current RX port sniffing configuration for the physical port
- * associated with the calling function. Only a privileged function may read
- * the configuration.
- */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8
-#undef MC_CMD_0xf8_PRIVILEGE_CTG
-
-#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0
-
-/* MC_CMD_GET_PORT_SNIFF_CONFIG_OUT msgresponse */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16
-/* configuration flags */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_OFST 0
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_OFST 0
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
-/* receiving queue handle (for RSS mode, this is the base queue) */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
-/* receive mode */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
-/* enum: receiving to just the specified queue */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
-/* enum: receiving to multiple queues using RSS context */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
-/* RSS context (for RX_MODE_RSS) */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
-
-
-/***********************************/
/* MC_CMD_SET_PARSER_DISP_CONFIG
* Change configuration related to the parser-dispatcher subsystem.
*/
@@ -22073,305 +21309,6 @@
/***********************************/
-/* MC_CMD_GET_PARSER_DISP_CONFIG
- * Read configuration related to the parser-dispatcher subsystem.
- */
-#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa
-#undef MC_CMD_0xfa_PRIVILEGE_CTG
-
-#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
-/* the type of configuration setting to read */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
-/* handle for the entity to query: queue handle, EVB port ID, etc. depending on
- * the type of configuration setting being read
- */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
-
-/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_NUM(len) (((len)-0)/4)
-/* current value: the details depend on the type of configuration setting being
- * read
- */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM_MCDI2 255
-
-
-/***********************************/
-/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG
- * Configure TX port sniffing for the physical port associated with the calling
- * function. Only a privileged function may change the port sniffing
- * configuration. A copy of all traffic transmitted through the port may be
- * delivered to a specific queue, or a set of queues with RSS. Note that these
- * packets are delivered with transmit timestamps in the packet prefix, not
- * receive timestamps, so it is likely that the queue(s) will need to be
- * dedicated as TX sniff receivers.
- */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG 0xfb
-#undef MC_CMD_0xfb_PRIVILEGE_CTG
-
-#define MC_CMD_0xfb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16
-/* configuration flags */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_OFST 0
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
-/* receive queue handle (for RSS mode, this is the base queue) */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
-/* receive mode */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
-/* enum: receive to just the specified queue */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
-/* enum: receive to multiple queues using RSS context */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
-/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
- * that these handles should be considered opaque to the host, although a value
- * of 0xFFFFFFFF is guaranteed never to be a valid handle.
- */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
-
-/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG
- * Obtain the current TX port sniffing configuration for the physical port
- * associated with the calling function. Only a privileged function may read
- * the configuration.
- */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc
-#undef MC_CMD_0xfc_PRIVILEGE_CTG
-
-#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0
-
-/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16
-/* configuration flags */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_OFST 0
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
-/* receiving queue handle (for RSS mode, this is the base queue) */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
-/* receive mode */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
-/* enum: receiving to just the specified queue */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
-/* enum: receiving to multiple queues using RSS context */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
-/* RSS context (for RX_MODE_RSS) */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
-
-
-/***********************************/
-/* MC_CMD_RMON_STATS_RX_ERRORS
- * Per queue rx error stats.
- */
-#define MC_CMD_RMON_STATS_RX_ERRORS 0xfe
-#undef MC_CMD_0xfe_PRIVILEGE_CTG
-
-#define MC_CMD_0xfe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_RMON_STATS_RX_ERRORS_IN msgrequest */
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8
-/* The rx queue to get stats for. */
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_OFST 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_LEN 4
-
-
-/***********************************/
-/* MC_CMD_GET_PCIE_RESOURCE_INFO
- * Find out about available PCIE resources
- */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
-#undef MC_CMD_0xfd_PRIVILEGE_CTG
-
-#define MC_CMD_0xfd_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0
-
-/* MC_CMD_GET_PCIE_RESOURCE_INFO_OUT msgresponse */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28
-/* The maximum number of PFs the device can expose */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_LEN 4
-/* The maximum number of VFs the device can expose in total */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_LEN 4
-/* The maximum number of MSI-X vectors the device can provide in total */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_LEN 4
-/* the number of MSI-X vectors the device will allocate by default to each PF
- */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_LEN 4
-/* the number of MSI-X vectors the device will allocate by default to each VF
- */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_LEN 4
-/* the maximum number of MSI-X vectors the device can allocate to any one PF */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_LEN 4
-/* the maximum number of MSI-X vectors the device can allocate to any one VF */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_GET_PORT_MODES
- * Find out about available port modes
- */
-#define MC_CMD_GET_PORT_MODES 0xff
-#undef MC_CMD_0xff_PRIVILEGE_CTG
-
-#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PORT_MODES_IN msgrequest */
-#define MC_CMD_GET_PORT_MODES_IN_LEN 0
-
-/* MC_CMD_GET_PORT_MODES_OUT msgresponse */
-#define MC_CMD_GET_PORT_MODES_OUT_LEN 12
-/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*)
- * that are supported for customer use in production firmware.
- */
-#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
-#define MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4
-/* Default (canonical) board mode */
-#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
-#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_LEN 4
-/* Current board mode */
-#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
-#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4
-
-/* MC_CMD_GET_PORT_MODES_OUT_V2 msgresponse */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_LEN 16
-/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*)
- * that are supported for customer use in production firmware.
- */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_MODES_OFST 0
-#define MC_CMD_GET_PORT_MODES_OUT_V2_MODES_LEN 4
-/* Default (canonical) board mode */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_OFST 4
-#define MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_LEN 4
-/* Current board mode */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_OFST 8
-#define MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_LEN 4
-/* Bitmask of engineering port modes available on the board (indexed by
- * TLV_PORT_MODE_*). A superset of MC_CMD_GET_PORT_MODES_OUT/MODES that
- * contains all modes implemented in firmware for a particular board. Modes
- * listed in MODES are considered production modes and should be exposed in
- * userland tools. Modes listed in ENGINEERING_MODES, but not in MODES
- * should be considered hidden (not to be exposed in userland tools) and for
- * engineering use only. There are no other semantic differences and any mode
- * listed in either MODES or ENGINEERING_MODES can be set on the board.
- */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_OFST 12
-#define MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_LEN 4
-
-
-/***********************************/
-/* MC_CMD_OVERRIDE_PORT_MODE
- * Override flash config port mode for subsequent MC reboot(s). Override data
- * is stored in the presistent data section of DMEM and activated on next MC
- * warm reboot. A cold reboot resets the override. It is assumed that a
- * sufficient number of PFs are available and that port mapping is valid for
- * the new port mode, as the override does not affect PF configuration.
- */
-#define MC_CMD_OVERRIDE_PORT_MODE 0x137
-#undef MC_CMD_0x137_PRIVILEGE_CTG
-
-#define MC_CMD_0x137_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_OVERRIDE_PORT_MODE_IN msgrequest */
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_LEN 8
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_OFST 0
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_LEN 4
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_OFST 0
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_LBN 0
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_WIDTH 1
-/* New mode (TLV_PORT_MODE_*) to set, if override enabled */
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_OFST 4
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_LEN 4
-
-/* MC_CMD_OVERRIDE_PORT_MODE_OUT msgresponse */
-#define MC_CMD_OVERRIDE_PORT_MODE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_READ_ATB
- * Sample voltages on the ATB
- */
-#define MC_CMD_READ_ATB 0x100
-#undef MC_CMD_0x100_PRIVILEGE_CTG
-
-#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_READ_ATB_IN msgrequest */
-#define MC_CMD_READ_ATB_IN_LEN 16
-#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0
-#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_LEN 4
-#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */
-#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */
-#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */
-#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4
-#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_LEN 4
-#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8
-#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_LEN 4
-#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12
-#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_LEN 4
-
-/* MC_CMD_READ_ATB_OUT msgresponse */
-#define MC_CMD_READ_ATB_OUT_LEN 4
-#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0
-#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_LEN 4
-
-
-/***********************************/
/* MC_CMD_GET_WORKAROUNDS
* Read the list of all implemented and all currently enabled workarounds. The
* enums here must correspond with those in MC_CMD_WORKAROUND.
@@ -22538,447 +21475,6 @@
#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_LEN 4
-
-/***********************************/
-/* MC_CMD_GET_SNAPSHOT_LENGTH
- * Obtain the current range of allowable values for the SNAPSHOT_LENGTH
- * parameter to MC_CMD_INIT_RXQ.
- */
-#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
-#undef MC_CMD_0x101_PRIVILEGE_CTG
-
-#define MC_CMD_0x101_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_SNAPSHOT_LENGTH_IN msgrequest */
-#define MC_CMD_GET_SNAPSHOT_LENGTH_IN_LEN 0
-
-/* MC_CMD_GET_SNAPSHOT_LENGTH_OUT msgresponse */
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8
-/* Minimum acceptable snapshot length. */
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_LEN 4
-/* Maximum acceptable snapshot length. */
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_LEN 4
-
-
-/***********************************/
-/* MC_CMD_FUSE_DIAGS
- * Additional fuse diagnostics
- */
-#define MC_CMD_FUSE_DIAGS 0x102
-#undef MC_CMD_0x102_PRIVILEGE_CTG
-
-#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_FUSE_DIAGS_IN msgrequest */
-#define MC_CMD_FUSE_DIAGS_IN_LEN 0
-
-/* MC_CMD_FUSE_DIAGS_OUT msgresponse */
-#define MC_CMD_FUSE_DIAGS_OUT_LEN 48
-/* Total number of mismatched bits between pairs in area 0 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_LEN 4
-/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_LEN 4
-/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_LEN 4
-/* Checksum of data after logical OR of pairs in area 0 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_LEN 4
-/* Total number of mismatched bits between pairs in area 1 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_LEN 4
-/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_LEN 4
-/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_LEN 4
-/* Checksum of data after logical OR of pairs in area 1 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_LEN 4
-/* Total number of mismatched bits between pairs in area 2 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_LEN 4
-/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_LEN 4
-/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_LEN 4
-/* Checksum of data after logical OR of pairs in area 2 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PRIVILEGE_MODIFY
- * Modify the privileges of a set of PCIe functions. Note that this operation
- * only effects non-admin functions unless the admin privilege itself is
- * included in one of the masks provided.
- */
-#define MC_CMD_PRIVILEGE_MODIFY 0x60
-#undef MC_CMD_0x60_PRIVILEGE_CTG
-
-#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
-/* The groups of functions to have their privilege masks modified. */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_LEN 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */
-/* For VFS_OF_PF specify the PF, for ONE specify the target function */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_OFST 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_OFST 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16
-/* Privileges to be added to the target functions. For privilege definitions
- * refer to the command MC_CMD_PRIVILEGE_MASK
- */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
-#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_LEN 4
-/* Privileges to be removed from the target functions. For privilege
- * definitions refer to the command MC_CMD_PRIVILEGE_MASK
- */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
-#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_LEN 4
-
-/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
-#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_READ_BYTES
- * Read XPM memory
- */
-#define MC_CMD_XPM_READ_BYTES 0x103
-#undef MC_CMD_0x103_PRIVILEGE_CTG
-
-#define MC_CMD_0x103_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_XPM_READ_BYTES_IN msgrequest */
-#define MC_CMD_XPM_READ_BYTES_IN_LEN 8
-/* Start address (byte) */
-#define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0
-#define MC_CMD_XPM_READ_BYTES_IN_ADDR_LEN 4
-/* Count (bytes) */
-#define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4
-#define MC_CMD_XPM_READ_BYTES_IN_COUNT_LEN 4
-
-/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
-#define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
-#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX 252
-#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_XPM_READ_BYTES_OUT_LEN(num) (0+1*(num))
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_NUM(len) (((len)-0)/1)
-/* Data */
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_OFST 0
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_LEN 1
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MINNUM 0
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM 252
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM_MCDI2 1020
-
-
-/***********************************/
-/* MC_CMD_XPM_WRITE_BYTES
- * Write XPM memory
- */
-#define MC_CMD_XPM_WRITE_BYTES 0x104
-#undef MC_CMD_0x104_PRIVILEGE_CTG
-
-#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
-#define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
-#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX 252
-#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX_MCDI2 1020
-#define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_NUM(len) (((len)-8)/1)
-/* Start address (byte) */
-#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
-#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_LEN 4
-/* Count (bytes) */
-#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4
-#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_LEN 4
-/* Data */
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MINNUM 0
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM 244
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM_MCDI2 1012
-
-/* MC_CMD_XPM_WRITE_BYTES_OUT msgresponse */
-#define MC_CMD_XPM_WRITE_BYTES_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_READ_SECTOR
- * Read XPM sector
- */
-#define MC_CMD_XPM_READ_SECTOR 0x105
-#undef MC_CMD_0x105_PRIVILEGE_CTG
-
-#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */
-#define MC_CMD_XPM_READ_SECTOR_IN_LEN 8
-/* Sector index */
-#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0
-#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_LEN 4
-/* Sector size */
-#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4
-#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_LEN 4
-
-/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
-#define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
-#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX 36
-#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX_MCDI2 36
-#define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_NUM(len) (((len)-4)/1)
-/* Sector type */
-#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
-#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_LEN 4
-#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_DATA 0x3 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */
-/* Sector data */
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MINNUM 0
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM 32
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM_MCDI2 32
-
-
-/***********************************/
-/* MC_CMD_XPM_WRITE_SECTOR
- * Write XPM sector
- */
-#define MC_CMD_XPM_WRITE_SECTOR 0x106
-#undef MC_CMD_0x106_PRIVILEGE_CTG
-
-#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
-#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX 44
-#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX_MCDI2 44
-#define MC_CMD_XPM_WRITE_SECTOR_IN_LEN(num) (12+1*(num))
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_NUM(len) (((len)-12)/1)
-/* If writing fails due to an uncorrectable error, try up to RETRIES following
- * sectors (or until no more space available). If 0, only one write attempt is
- * made. Note that uncorrectable errors are unlikely, thanks to XPM self-repair
- * mechanism.
- */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_OFST 0
-#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_LEN 1
-#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_OFST 1
-#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3
-/* Sector type */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
-#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
-/* Sector size */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
-#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_LEN 4
-/* Sector data */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MINNUM 0
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM 32
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM_MCDI2 32
-
-/* MC_CMD_XPM_WRITE_SECTOR_OUT msgresponse */
-#define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
-/* New sector index */
-#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0
-#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_LEN 4
-
-
-/***********************************/
-/* MC_CMD_XPM_INVALIDATE_SECTOR
- * Invalidate XPM sector
- */
-#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
-#undef MC_CMD_0x107_PRIVILEGE_CTG
-
-#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */
-#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4
-/* Sector index */
-#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0
-#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_LEN 4
-
-/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */
-#define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_BLANK_CHECK
- * Blank-check XPM memory and report bad locations
- */
-#define MC_CMD_XPM_BLANK_CHECK 0x108
-#undef MC_CMD_0x108_PRIVILEGE_CTG
-
-#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */
-#define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8
-/* Start address (byte) */
-#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0
-#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_LEN 4
-/* Count (bytes) */
-#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4
-#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_LEN 4
-
-/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
-#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
-#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX 252
-#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_NUM(len) (((len)-4)/2)
-/* Total number of bad (non-blank) locations */
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_LEN 4
-/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit
- * into MCDI response)
- */
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_OFST 4
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_LEN 2
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MINNUM 0
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM 124
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM_MCDI2 508
-
-
-/***********************************/
-/* MC_CMD_XPM_REPAIR
- * Blank-check and repair XPM memory
- */
-#define MC_CMD_XPM_REPAIR 0x109
-#undef MC_CMD_0x109_PRIVILEGE_CTG
-
-#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_REPAIR_IN msgrequest */
-#define MC_CMD_XPM_REPAIR_IN_LEN 8
-/* Start address (byte) */
-#define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0
-#define MC_CMD_XPM_REPAIR_IN_ADDR_LEN 4
-/* Count (bytes) */
-#define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4
-#define MC_CMD_XPM_REPAIR_IN_COUNT_LEN 4
-
-/* MC_CMD_XPM_REPAIR_OUT msgresponse */
-#define MC_CMD_XPM_REPAIR_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_DECODER_TEST
- * Test XPM memory address decoders for gross manufacturing defects. Can only
- * be performed on an unprogrammed part.
- */
-#define MC_CMD_XPM_DECODER_TEST 0x10a
-#undef MC_CMD_0x10a_PRIVILEGE_CTG
-
-#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */
-#define MC_CMD_XPM_DECODER_TEST_IN_LEN 0
-
-/* MC_CMD_XPM_DECODER_TEST_OUT msgresponse */
-#define MC_CMD_XPM_DECODER_TEST_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_WRITE_TEST
- * XPM memory write test. Test XPM write logic for gross manufacturing defects
- * by writing to a dedicated test row. There are 16 locations in the test row
- * and the test can only be performed on locations that have not been
- * previously used (i.e. can be run at most 16 times). The test will pick the
- * first available location to use, or fail with ENOSPC if none left.
- */
-#define MC_CMD_XPM_WRITE_TEST 0x10b
-#undef MC_CMD_0x10b_PRIVILEGE_CTG
-
-#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */
-#define MC_CMD_XPM_WRITE_TEST_IN_LEN 0
-
-/* MC_CMD_XPM_WRITE_TEST_OUT msgresponse */
-#define MC_CMD_XPM_WRITE_TEST_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_EXEC_SIGNED
- * Check the CMAC of the contents of IMEM and DMEM against the value supplied
- * and if correct begin execution from the start of IMEM. The caller supplies a
- * key ID, the length of IMEM and DMEM to validate and the expected CMAC. CMAC
- * computation runs from the start of IMEM, and from the start of DMEM + 16k,
- * to match flash booting. The command will respond with EINVAL if the CMAC
- * does match, otherwise it will respond with success before it jumps to IMEM.
- */
-#define MC_CMD_EXEC_SIGNED 0x10c
-#undef MC_CMD_0x10c_PRIVILEGE_CTG
-
-#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_EXEC_SIGNED_IN msgrequest */
-#define MC_CMD_EXEC_SIGNED_IN_LEN 28
-/* the length of code to include in the CMAC */
-#define MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0
-#define MC_CMD_EXEC_SIGNED_IN_CODELEN_LEN 4
-/* the length of date to include in the CMAC */
-#define MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4
-#define MC_CMD_EXEC_SIGNED_IN_DATALEN_LEN 4
-/* the XPM sector containing the key to use */
-#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8
-#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_LEN 4
-/* the expected CMAC value */
-#define MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12
-#define MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16
-
-/* MC_CMD_EXEC_SIGNED_OUT msgresponse */
-#define MC_CMD_EXEC_SIGNED_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_PREPARE_SIGNED
- * Prepare to upload a signed image. This will scrub the specified length of
- * the data region, which must be at least as large as the DATALEN supplied to
- * MC_CMD_EXEC_SIGNED.
- */
-#define MC_CMD_PREPARE_SIGNED 0x10d
-#undef MC_CMD_0x10d_PRIVILEGE_CTG
-
-#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PREPARE_SIGNED_IN msgrequest */
-#define MC_CMD_PREPARE_SIGNED_IN_LEN 4
-/* the length of data area to clear */
-#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0
-#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_LEN 4
-
-/* MC_CMD_PREPARE_SIGNED_OUT msgresponse */
-#define MC_CMD_PREPARE_SIGNED_OUT_LEN 0
-
-
/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
/* UDP port (the standard ports are named below but any port may be used) */
@@ -23049,110 +21545,6 @@
/***********************************/
-/* MC_CMD_RX_BALANCING
- * Configure a port upconverter to distribute the packets on both RX engines.
- * Packets are distributed based on a table with the destination vFIFO. The
- * index of the table is a hash of source and destination of IPV4 and VLAN
- * priority.
- */
-#define MC_CMD_RX_BALANCING 0x118
-#undef MC_CMD_0x118_PRIVILEGE_CTG
-
-#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_RX_BALANCING_IN msgrequest */
-#define MC_CMD_RX_BALANCING_IN_LEN 16
-/* The RX port whose upconverter table will be modified */
-#define MC_CMD_RX_BALANCING_IN_PORT_OFST 0
-#define MC_CMD_RX_BALANCING_IN_PORT_LEN 4
-/* The VLAN priority associated to the table index and vFIFO */
-#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4
-#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 4
-/* The resulting bit of SRC^DST for indexing the table */
-#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8
-#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 4
-/* The RX engine to which the vFIFO in the table entry will point to */
-#define MC_CMD_RX_BALANCING_IN_ENG_OFST 12
-#define MC_CMD_RX_BALANCING_IN_ENG_LEN 4
-
-/* MC_CMD_RX_BALANCING_OUT msgresponse */
-#define MC_CMD_RX_BALANCING_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_NVRAM_PRIVATE_APPEND
- * Append a single TLV to the MC_USAGE_TLV partition. Returns MC_CMD_ERR_EEXIST
- * if the tag is already present.
- */
-#define MC_CMD_NVRAM_PRIVATE_APPEND 0x11c
-#undef MC_CMD_0x11c_PRIVILEGE_CTG
-
-#define MC_CMD_0x11c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_NVRAM_PRIVATE_APPEND_IN msgrequest */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMIN 9
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX 252
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX_MCDI2 1020
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num))
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_NUM(len) (((len)-8)/1)
-/* The tag to be appended */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_LEN 4
-/* The length of the data */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_OFST 4
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_LEN 4
-/* The data to be contained in the TLV structure */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_OFST 8
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MINNUM 1
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM 244
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM_MCDI2 1012
-
-/* MC_CMD_NVRAM_PRIVATE_APPEND_OUT msgresponse */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_VERIFY_CONTENTS
- * Verify that the contents of the XPM memory is correct (Medford only). This
- * is used during manufacture to check that the XPM memory has been programmed
- * correctly at ATE.
- */
-#define MC_CMD_XPM_VERIFY_CONTENTS 0x11b
-#undef MC_CMD_0x11b_PRIVILEGE_CTG
-
-#define MC_CMD_0x11b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_XPM_VERIFY_CONTENTS_IN msgrequest */
-#define MC_CMD_XPM_VERIFY_CONTENTS_IN_LEN 4
-/* Data type to be checked */
-#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_OFST 0
-#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_LEN 4
-
-/* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX 252
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num))
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_NUM(len) (((len)-12)/1)
-/* Number of sectors found (test builds only) */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_LEN 4
-/* Number of bytes found (test builds only) */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_OFST 4
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_LEN 4
-/* Length of signature */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_OFST 8
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_LEN 4
-/* Signature */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_OFST 12
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MINNUM 0
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM 240
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM_MCDI2 1008
-
-
-/***********************************/
/* MC_CMD_SET_EVQ_TMR
* Update the timer load, timer reload and timer mode values for a given EVQ.
* The requested timer values (in TMR_LOAD_REQ_NS and TMR_RELOAD_REQ_NS) will
@@ -23262,576 +21654,6 @@
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_LEN 4
-
-/***********************************/
-/* MC_CMD_ALLOCATE_TX_VFIFO_CP
- * When we use the TX_vFIFO_ULL mode, we can allocate common pools using the
- * non used switch buffers.
- */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP 0x11d
-#undef MC_CMD_0x11d_PRIVILEGE_CTG
-
-#define MC_CMD_0x11d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN msgrequest */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_LEN 20
-/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index. The calling client must be the currently-assigned user of
- * this VI (see MC_CMD_SET_VI_USER).
- */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_LEN 4
-/* Will the common pool be used as TX_vFIFO_ULL (1) */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_OFST 4
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_LEN 4
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */
-/* enum: Using this interface without TX_vFIFO_ULL is not supported for now */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0
-/* Number of buffers to reserve for the common pool */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_OFST 8
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_LEN 4
-/* TX datapath to which the Common Pool is connected to. */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_OFST 12
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_LEN 4
-/* enum: Extracts information from function */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1
-/* Network port or RX Engine to which the common pool connects. */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_OFST 16
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_LEN 4
-/* enum: Extracts information from function */
-/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT1 0x1 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT2 0x2 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT3 0x3 /* enum */
-/* enum: To enable Switch loopback with Rx engine 0 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE0 0x4
-/* enum: To enable Switch loopback with Rx engine 1 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE1 0x5
-
-/* MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT msgresponse */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_LEN 4
-/* ID of the common pool allocated */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_OFST 0
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_LEN 4
-
-
-/***********************************/
-/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO
- * When we use the TX_vFIFO_ULL mode, we can allocate vFIFOs using the
- * previously allocated common pools.
- */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO 0x11e
-#undef MC_CMD_0x11e_PRIVILEGE_CTG
-
-#define MC_CMD_0x11e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN msgrequest */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LEN 20
-/* Common pool previously allocated to which the new vFIFO will be associated
- */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_OFST 0
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_LEN 4
-/* Port or RX engine to associate the vFIFO egress */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_OFST 4
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_LEN 4
-/* enum: Extracts information from common pool */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT1 0x1 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT2 0x2 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT3 0x3 /* enum */
-/* enum: To enable Switch loopback with Rx engine 0 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE0 0x4
-/* enum: To enable Switch loopback with Rx engine 1 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5
-/* Minimum number of buffers that the pool must have */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_OFST 8
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_LEN 4
-/* enum: Do not check the space available */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0
-/* Will the vFIFO be used as TX_vFIFO_ULL */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_OFST 12
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_LEN 4
-/* Network priority of the vFIFO,if applicable */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_OFST 16
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_LEN 4
-/* enum: Search for the lowest unused priority */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1
-
-/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT msgresponse */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_LEN 8
-/* Short vFIFO ID */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_OFST 0
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_LEN 4
-/* Network priority of the vFIFO */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_OFST 4
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_LEN 4
-
-
-/***********************************/
-/* MC_CMD_TEARDOWN_TX_VFIFO_VF
- * This interface clears the configuration of the given vFIFO and leaves it
- * ready to be re-used.
- */
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF 0x11f
-#undef MC_CMD_0x11f_PRIVILEGE_CTG
-
-#define MC_CMD_0x11f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TEARDOWN_TX_VFIFO_VF_IN msgrequest */
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_LEN 4
-/* Short vFIFO ID */
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_OFST 0
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_LEN 4
-
-/* MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT msgresponse */
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DEALLOCATE_TX_VFIFO_CP
- * This interface clears the configuration of the given common pool and leaves
- * it ready to be re-used.
- */
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP 0x121
-#undef MC_CMD_0x121_PRIVILEGE_CTG
-
-#define MC_CMD_0x121_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN msgrequest */
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_LEN 4
-/* Common pool ID given when pool allocated */
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_OFST 0
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_LEN 4
-
-/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT msgresponse */
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS
- * This interface allows the host to find out how many common pool buffers are
- * not yet assigned.
- */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS 0x124
-#undef MC_CMD_0x124_PRIVILEGE_CTG
-
-#define MC_CMD_0x124_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN msgrequest */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN_LEN 0
-
-/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT msgresponse */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_LEN 8
-/* Available buffers for the ENG to NET vFIFOs. */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_OFST 0
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_LEN 4
-/* Available buffers for the ENG to ENG and NET to ENG vFIFOs. */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_OFST 4
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_LEN 4
-
-
-/***********************************/
-/* MC_CMD_SUC_VERSION
- * Get the version of the SUC
- */
-#define MC_CMD_SUC_VERSION 0x134
-#undef MC_CMD_0x134_PRIVILEGE_CTG
-
-#define MC_CMD_0x134_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_SUC_VERSION_IN msgrequest */
-#define MC_CMD_SUC_VERSION_IN_LEN 0
-
-/* MC_CMD_SUC_VERSION_OUT msgresponse */
-#define MC_CMD_SUC_VERSION_OUT_LEN 24
-/* The SUC firmware version as four numbers - a.b.c.d */
-#define MC_CMD_SUC_VERSION_OUT_VERSION_OFST 0
-#define MC_CMD_SUC_VERSION_OUT_VERSION_LEN 4
-#define MC_CMD_SUC_VERSION_OUT_VERSION_NUM 4
-/* The date, in seconds since the Unix epoch, when the firmware image was
- * built.
- */
-#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_OFST 16
-#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_LEN 4
-/* The ID of the SUC chip. This is specific to the platform but typically
- * indicates family, memory sizes etc. See SF-116728-SW for further details.
- */
-#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_OFST 20
-#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_LEN 4
-
-/* MC_CMD_SUC_BOOT_VERSION_IN msgrequest: Get the version of the SUC boot
- * loader.
- */
-#define MC_CMD_SUC_BOOT_VERSION_IN_LEN 4
-#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_OFST 0
-#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_LEN 4
-/* enum: Requests the SUC boot version. */
-#define MC_CMD_SUC_VERSION_GET_BOOT_VERSION 0xb007700b
-
-/* MC_CMD_SUC_BOOT_VERSION_OUT msgresponse */
-#define MC_CMD_SUC_BOOT_VERSION_OUT_LEN 4
-/* The SUC boot version */
-#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_OFST 0
-#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_LEN 4
-
-
-/***********************************/
-/* MC_CMD_GET_RX_PREFIX_ID
- * This command is part of the mechanism for configuring the format of the RX
- * packet prefix. It takes as input a bitmask of the fields the host would like
- * to be in the prefix. If the hardware supports RX prefixes with that
- * combination of fields, then this command returns a list of prefix-ids,
- * opaque identifiers suitable for use in the RX_PREFIX_ID field of a
- * MC_CMD_INIT_RXQ_V5_IN message. If the combination of fields is not
- * supported, returns ENOTSUP. If the firmware can't create any new prefix-ids
- * due to resource constraints, returns ENOSPC.
- */
-#define MC_CMD_GET_RX_PREFIX_ID 0x13b
-#undef MC_CMD_0x13b_PRIVILEGE_CTG
-
-#define MC_CMD_0x13b_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_RX_PREFIX_ID_IN msgrequest */
-#define MC_CMD_GET_RX_PREFIX_ID_IN_LEN 8
-/* Field bitmask. */
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LEN 8
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_LEN 4
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_LBN 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_WIDTH 32
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_OFST 4
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_LEN 4
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_LBN 32
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_WIDTH 32
-#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_LBN 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_LBN 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_LBN 2
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_LBN 3
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_LBN 4
-#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_LBN 5
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_LBN 6
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_LBN 7
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_LBN 7
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_LBN 8
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_LBN 9
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_LBN 10
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_LBN 11
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_WIDTH 1
-
-/* MC_CMD_GET_RX_PREFIX_ID_OUT msgresponse */
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMIN 8
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMAX 252
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_LEN(num) (4+4*(num))
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_NUM(len) (((len)-4)/4)
-/* Number of prefix-ids returned */
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_NUM_RX_PREFIX_IDS_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_NUM_RX_PREFIX_IDS_LEN 4
-/* Opaque prefix identifiers which can be passed into MC_CMD_INIT_RXQ_V5 or
- * MC_CMD_QUERY_PREFIX_ID
- */
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_OFST 4
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_LEN 4
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MINNUM 1
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MAXNUM 62
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MAXNUM_MCDI2 254
-
-/* RX_PREFIX_FIELD_INFO structuredef: Information about a single RX prefix
- * field
- */
-#define RX_PREFIX_FIELD_INFO_LEN 4
-/* The offset of the field from the start of the prefix, in bits */
-#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_OFST 0
-#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_LEN 2
-#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_LBN 0
-#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_WIDTH 16
-/* The width of the field, in bits */
-#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_OFST 2
-#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_LEN 1
-#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_LBN 16
-#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_WIDTH 8
-/* The type of the field. These enum values are in the same order as the fields
- * in the MC_CMD_GET_RX_PREFIX_ID_IN bitmask
- */
-#define RX_PREFIX_FIELD_INFO_TYPE_OFST 3
-#define RX_PREFIX_FIELD_INFO_TYPE_LEN 1
-#define RX_PREFIX_FIELD_INFO_LENGTH 0x0 /* enum */
-#define RX_PREFIX_FIELD_INFO_RSS_HASH_VALID 0x1 /* enum */
-#define RX_PREFIX_FIELD_INFO_USER_FLAG 0x2 /* enum */
-#define RX_PREFIX_FIELD_INFO_CLASS 0x3 /* enum */
-#define RX_PREFIX_FIELD_INFO_PARTIAL_TSTAMP 0x4 /* enum */
-#define RX_PREFIX_FIELD_INFO_RSS_HASH 0x5 /* enum */
-#define RX_PREFIX_FIELD_INFO_USER_MARK 0x6 /* enum */
-#define RX_PREFIX_FIELD_INFO_INGRESS_MPORT 0x7 /* enum */
-#define RX_PREFIX_FIELD_INFO_INGRESS_VPORT 0x7 /* enum */
-#define RX_PREFIX_FIELD_INFO_CSUM_FRAME 0x8 /* enum */
-#define RX_PREFIX_FIELD_INFO_VLAN_STRIP_TCI 0x9 /* enum */
-#define RX_PREFIX_FIELD_INFO_VLAN_STRIPPED 0xa /* enum */
-#define RX_PREFIX_FIELD_INFO_VSWITCH_STATUS 0xb /* enum */
-#define RX_PREFIX_FIELD_INFO_TYPE_LBN 24
-#define RX_PREFIX_FIELD_INFO_TYPE_WIDTH 8
-
-/* RX_PREFIX_FIXED_RESPONSE structuredef: Information about an RX prefix in
- * which every field has a fixed offset and width
- */
-#define RX_PREFIX_FIXED_RESPONSE_LENMIN 4
-#define RX_PREFIX_FIXED_RESPONSE_LENMAX 252
-#define RX_PREFIX_FIXED_RESPONSE_LENMAX_MCDI2 1020
-#define RX_PREFIX_FIXED_RESPONSE_LEN(num) (4+4*(num))
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_NUM(len) (((len)-4)/4)
-/* Length of the RX prefix in bytes */
-#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_OFST 0
-#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_LEN 1
-#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_LBN 0
-#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_WIDTH 8
-/* Number of fields present in the prefix */
-#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_OFST 1
-#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_LEN 1
-#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_LBN 8
-#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_WIDTH 8
-#define RX_PREFIX_FIXED_RESPONSE_RESERVED_OFST 2
-#define RX_PREFIX_FIXED_RESPONSE_RESERVED_LEN 2
-#define RX_PREFIX_FIXED_RESPONSE_RESERVED_LBN 16
-#define RX_PREFIX_FIXED_RESPONSE_RESERVED_WIDTH 16
-/* Array of RX_PREFIX_FIELD_INFO structures, of length FIELD_COUNT */
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_OFST 4
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_LEN 4
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MINNUM 0
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MAXNUM 62
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MAXNUM_MCDI2 254
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_LBN 32
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_QUERY_RX_PREFIX_ID
- * This command takes an RX prefix id (obtained from MC_CMD_GET_RX_PREFIX_ID)
- * and returns a description of the RX prefix of packets delievered to an RXQ
- * created with that prefix id
- */
-#define MC_CMD_QUERY_RX_PREFIX_ID 0x13c
-#undef MC_CMD_0x13c_PRIVILEGE_CTG
-
-#define MC_CMD_0x13c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_QUERY_RX_PREFIX_ID_IN msgrequest */
-#define MC_CMD_QUERY_RX_PREFIX_ID_IN_LEN 4
-/* Prefix id to query */
-#define MC_CMD_QUERY_RX_PREFIX_ID_IN_RX_PREFIX_ID_OFST 0
-#define MC_CMD_QUERY_RX_PREFIX_ID_IN_RX_PREFIX_ID_LEN 4
-
-/* MC_CMD_QUERY_RX_PREFIX_ID_OUT msgresponse */
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMIN 4
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMAX 252
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LEN(num) (4+1*(num))
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_NUM(len) (((len)-4)/1)
-/* An enum describing the structure of this response. */
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_OFST 0
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_LEN 1
-/* enum: The response is of format RX_PREFIX_FIXED_RESPONSE */
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_FIXED 0x0
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESERVED_OFST 1
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESERVED_LEN 3
-/* The response. Its format is as defined by the RESPONSE_TYPE value */
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_OFST 4
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_LEN 1
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MINNUM 0
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MAXNUM 248
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MAXNUM_MCDI2 1016
-
-
-/***********************************/
-/* MC_CMD_BUNDLE
- * A command to perform various bundle-related operations on insecure cards.
- */
-#define MC_CMD_BUNDLE 0x13d
-#undef MC_CMD_0x13d_PRIVILEGE_CTG
-
-#define MC_CMD_0x13d_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_BUNDLE_IN msgrequest */
-#define MC_CMD_BUNDLE_IN_LEN 4
-/* Sub-command code */
-#define MC_CMD_BUNDLE_IN_OP_OFST 0
-#define MC_CMD_BUNDLE_IN_OP_LEN 4
-/* enum: Get the current host access mode set on component partitions. */
-#define MC_CMD_BUNDLE_IN_OP_COMPONENT_ACCESS_GET 0x0
-/* enum: Set the host access mode set on component partitions. */
-#define MC_CMD_BUNDLE_IN_OP_COMPONENT_ACCESS_SET 0x1
-
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN msgrequest: Retrieve the current
- * access mode on component partitions such as MC_FIRMWARE, SUC_FIRMWARE and
- * EXPANSION_UEFI. This command only works on engineering (insecure) cards. On
- * secure adapters, this command returns MC_CMD_ERR_EPERM.
- */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_LEN 4
-/* Sub-command code. Must be OP_COMPONENT_ACCESS_GET. */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_OP_OFST 0
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_OP_LEN 4
-
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT msgresponse: Returns the access
- * control mode.
- */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_LEN 4
-/* Access mode of component partitions. */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_ACCESS_MODE_OFST 0
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_ACCESS_MODE_LEN 4
-/* enum: Component partitions are read-only from the host. */
-#define MC_CMD_BUNDLE_COMPONENTS_READ_ONLY 0x0
-/* enum: Component partitions can read read-from written-to by the host. */
-#define MC_CMD_BUNDLE_COMPONENTS_READ_WRITE 0x1
-
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN msgrequest: The component
- * partitions such as MC_FIRMWARE, SUC_FIRMWARE, EXPANSION_UEFI are set as
- * read-only on firmware built with bundle support. This command marks these
- * partitions as read/writeable. The access status set by this command does not
- * persist across MC reboots. This command only works on engineering (insecure)
- * cards. On secure adapters, this command returns MC_CMD_ERR_EPERM.
- */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_LEN 8
-/* Sub-command code. Must be OP_COMPONENT_ACCESS_SET. */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_OP_OFST 0
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_OP_LEN 4
-/* Access mode of component partitions. */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_ACCESS_MODE_OFST 4
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_ACCESS_MODE_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT/ACCESS_MODE */
-
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_OUT msgresponse */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_VPD
- * Read all VPD starting from a given address
- */
-#define MC_CMD_GET_VPD 0x165
-#undef MC_CMD_0x165_PRIVILEGE_CTG
-
-#define MC_CMD_0x165_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_VPD_IN msgresponse */
-#define MC_CMD_GET_VPD_IN_LEN 4
-/* VPD address to start from. In case VPD is longer than MCDI buffer
- * (unlikely), user can make multiple calls with different starting addresses.
- */
-#define MC_CMD_GET_VPD_IN_ADDR_OFST 0
-#define MC_CMD_GET_VPD_IN_ADDR_LEN 4
-
-/* MC_CMD_GET_VPD_OUT msgresponse */
-#define MC_CMD_GET_VPD_OUT_LENMIN 0
-#define MC_CMD_GET_VPD_OUT_LENMAX 252
-#define MC_CMD_GET_VPD_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_GET_VPD_OUT_LEN(num) (0+1*(num))
-#define MC_CMD_GET_VPD_OUT_DATA_NUM(len) (((len)-0)/1)
-/* VPD data returned. */
-#define MC_CMD_GET_VPD_OUT_DATA_OFST 0
-#define MC_CMD_GET_VPD_OUT_DATA_LEN 1
-#define MC_CMD_GET_VPD_OUT_DATA_MINNUM 0
-#define MC_CMD_GET_VPD_OUT_DATA_MAXNUM 252
-#define MC_CMD_GET_VPD_OUT_DATA_MAXNUM_MCDI2 1020
-
-
-/***********************************/
-/* MC_CMD_GET_NCSI_INFO
- * Provide information about the NC-SI stack
- */
-#define MC_CMD_GET_NCSI_INFO 0x167
-#undef MC_CMD_0x167_PRIVILEGE_CTG
-
-#define MC_CMD_0x167_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_NCSI_INFO_IN msgrequest */
-#define MC_CMD_GET_NCSI_INFO_IN_LEN 8
-/* Operation to be performed */
-#define MC_CMD_GET_NCSI_INFO_IN_OP_OFST 0
-#define MC_CMD_GET_NCSI_INFO_IN_OP_LEN 4
-/* enum: Information on the link settings. */
-#define MC_CMD_GET_NCSI_INFO_IN_OP_LINK 0x0
-/* enum: Statistics associated with the channel */
-#define MC_CMD_GET_NCSI_INFO_IN_OP_STATISTICS 0x1
-/* The NC-SI channel on which the operation is to be performed */
-#define MC_CMD_GET_NCSI_INFO_IN_CHANNEL_OFST 4
-#define MC_CMD_GET_NCSI_INFO_IN_CHANNEL_LEN 4
-
-/* MC_CMD_GET_NCSI_INFO_LINK_OUT msgresponse */
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_LEN 12
-/* Settings as received from BMC. */
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_SETTINGS_OFST 0
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_SETTINGS_LEN 4
-/* Advertised capabilities applied to channel. */
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ADV_CAP_OFST 4
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ADV_CAP_LEN 4
-/* General status */
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATUS_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATUS_LEN 4
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_LBN 0
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_WIDTH 2
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_LBN 2
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_WIDTH 1
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_LBN 3
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_WIDTH 1
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_LBN 4
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_WIDTH 1
-
-/* MC_CMD_GET_NCSI_INFO_STATISTICS_OUT msgresponse */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_LEN 28
-/* The number of NC-SI commands received. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMDS_RX_OFST 0
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMDS_RX_LEN 4
-/* The number of NC-SI commands dropped. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_PKTS_DROPPED_OFST 4
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_PKTS_DROPPED_LEN 4
-/* The number of invalid NC-SI commands received. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_TYPE_ERRS_OFST 8
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_TYPE_ERRS_LEN 4
-/* The number of checksum errors seen. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_CSUM_ERRS_OFST 12
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_CSUM_ERRS_LEN 4
-/* The number of NC-SI requests received. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_RX_PKTS_OFST 16
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_RX_PKTS_LEN 4
-/* The number of NC-SI responses sent (includes AENs) */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_TX_PKTS_OFST 20
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_TX_PKTS_LEN 4
-/* The number of NC-SI AENs sent */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_OFST 24
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_LEN 4
-
/* CLIENT_HANDLE structuredef: A client is an abstract entity that can make
* requests of the device and that can own resources managed by the device.
* Examples of clients include PCIe functions and dynamic clients. A client
@@ -23848,59 +21670,10 @@
#define CLIENT_HANDLE_OPAQUE_LBN 0
#define CLIENT_HANDLE_OPAQUE_WIDTH 32
-/* CLOCK_INFO structuredef: Information about a single hardware clock */
-#define CLOCK_INFO_LEN 28
-/* Enumeration that uniquely identifies the clock */
-#define CLOCK_INFO_CLOCK_ID_OFST 0
-#define CLOCK_INFO_CLOCK_ID_LEN 2
-/* enum: The Riverhead CMC (card MC) */
-#define CLOCK_INFO_CLOCK_CMC 0x0
-/* enum: The Riverhead NMC (network MC) */
-#define CLOCK_INFO_CLOCK_NMC 0x1
-/* enum: The Riverhead SDNET slice main logic */
-#define CLOCK_INFO_CLOCK_SDNET 0x2
-/* enum: The Riverhead SDNET LUT */
-#define CLOCK_INFO_CLOCK_SDNET_LUT 0x3
-/* enum: The Riverhead SDNET control logic */
-#define CLOCK_INFO_CLOCK_SDNET_CTRL 0x4
-/* enum: The Riverhead Streaming SubSystem */
-#define CLOCK_INFO_CLOCK_SSS 0x5
-/* enum: The Riverhead network MAC and associated CSR registers */
-#define CLOCK_INFO_CLOCK_MAC 0x6
-#define CLOCK_INFO_CLOCK_ID_LBN 0
-#define CLOCK_INFO_CLOCK_ID_WIDTH 16
-/* Assorted flags */
-#define CLOCK_INFO_FLAGS_OFST 2
-#define CLOCK_INFO_FLAGS_LEN 2
-#define CLOCK_INFO_SETTABLE_OFST 2
-#define CLOCK_INFO_SETTABLE_LBN 0
-#define CLOCK_INFO_SETTABLE_WIDTH 1
-#define CLOCK_INFO_FLAGS_LBN 16
-#define CLOCK_INFO_FLAGS_WIDTH 16
-/* The frequency in HZ */
-#define CLOCK_INFO_FREQUENCY_OFST 4
-#define CLOCK_INFO_FREQUENCY_LEN 8
-#define CLOCK_INFO_FREQUENCY_LO_OFST 4
-#define CLOCK_INFO_FREQUENCY_LO_LEN 4
-#define CLOCK_INFO_FREQUENCY_LO_LBN 32
-#define CLOCK_INFO_FREQUENCY_LO_WIDTH 32
-#define CLOCK_INFO_FREQUENCY_HI_OFST 8
-#define CLOCK_INFO_FREQUENCY_HI_LEN 4
-#define CLOCK_INFO_FREQUENCY_HI_LBN 64
-#define CLOCK_INFO_FREQUENCY_HI_WIDTH 32
-#define CLOCK_INFO_FREQUENCY_LBN 32
-#define CLOCK_INFO_FREQUENCY_WIDTH 64
-/* Human-readable ASCII name for clock, with NUL termination */
-#define CLOCK_INFO_NAME_OFST 12
-#define CLOCK_INFO_NAME_LEN 1
-#define CLOCK_INFO_NAME_NUM 16
-#define CLOCK_INFO_NAME_LBN 96
-#define CLOCK_INFO_NAME_WIDTH 8
-
/* SCHED_CREDIT_CHECK_RESULT structuredef */
#define SCHED_CREDIT_CHECK_RESULT_LEN 16
-/* The instance of the scheduler. Refer to XN-200389-AW for the location of
- * these schedulers in the hardware.
+/* The instance of the scheduler. Refer to XN-200389-AW (snic/hnic) and
+ * XN-200425-TC (cdx) for the location of these schedulers in the hardware.
*/
#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_OFST 0
#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_LEN 1
@@ -23914,6 +21687,16 @@
#define SCHED_CREDIT_CHECK_RESULT_DMAC_H2C 0x7 /* enum */
#define SCHED_CREDIT_CHECK_RESULT_HUB_NET_B 0x8 /* enum */
#define SCHED_CREDIT_CHECK_RESULT_HUB_NET_REPLAY 0x9 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_ADAPTER_C2H_C 0xa /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_A2_H2C_C 0xb /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_A3_SOFT_ADAPTOR_C 0xc /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_A4_DPU_WRITE_C 0xd /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_JRC_RRU 0xe /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_CDM_SINK 0xf /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_PCIE_SINK 0x10 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_UPORT_SINK 0x11 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_PSX_SINK 0x12 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_A5_DPU_READ_C 0x13 /* enum */
#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_LBN 0
#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_WIDTH 8
/* The type of node that this result refers to. */
@@ -23923,6 +21706,10 @@
#define SCHED_CREDIT_CHECK_RESULT_DEST 0x0
/* enum: Source node */
#define SCHED_CREDIT_CHECK_RESULT_SOURCE 0x1
+/* enum: Destination node credit type 1 (new to the Keystone schedulers, see
+ * SF-120268-TC)
+ */
+#define SCHED_CREDIT_CHECK_RESULT_DEST_CREDIT1 0x2
#define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_LBN 8
#define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_WIDTH 8
/* Level of node in scheduler hierarchy (level 0 is the bottom of the
@@ -23950,592 +21737,6 @@
/***********************************/
-/* MC_CMD_GET_CLOCKS_INFO
- * Get information about the device clocks
- */
-#define MC_CMD_GET_CLOCKS_INFO 0x166
-#undef MC_CMD_0x166_PRIVILEGE_CTG
-
-#define MC_CMD_0x166_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_CLOCKS_INFO_IN msgrequest */
-#define MC_CMD_GET_CLOCKS_INFO_IN_LEN 0
-
-/* MC_CMD_GET_CLOCKS_INFO_OUT msgresponse */
-#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMIN 0
-#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMAX 252
-#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMAX_MCDI2 1008
-#define MC_CMD_GET_CLOCKS_INFO_OUT_LEN(num) (0+28*(num))
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_NUM(len) (((len)-0)/28)
-/* An array of CLOCK_INFO structures. */
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_OFST 0
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_LEN 28
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MINNUM 0
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MAXNUM 9
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MAXNUM_MCDI2 36
-
-
-/***********************************/
-/* MC_CMD_VNIC_ENCAP_RULE_ADD
- * Add a rule for detecting encapsulations in the VNIC stage. Currently this
- * only affects checksum validation in VNIC RX - on TX the send descriptor
- * explicitly specifies encapsulation. These rules are per-VNIC, i.e. only
- * apply to the current driver. If a rule matches, then the packet is
- * considered to have the corresponding encapsulation type, and the inner
- * packet is parsed. It is up to the driver to ensure that overlapping rules
- * are not inserted. (If a packet would match multiple rules, a random one of
- * them will be used.) A rule with the exact same match criteria may not be
- * inserted twice (EALREADY). Only a limited number MATCH_FLAGS values are
- * supported, use MC_CMD_GET_PARSER_DISP_INFO with OP
- * OP_GET_SUPPORTED_VNIC_ENCAP_RULE_MATCHES to get a list of supported
- * combinations. Each driver may only have a limited set of active rules -
- * returns ENOSPC if the caller's table is full.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD 0x16d
-#undef MC_CMD_0x16d_PRIVILEGE_CTG
-
-#define MC_CMD_0x16d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VNIC_ENCAP_RULE_ADD_IN msgrequest */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_LEN 36
-/* Set to MAE_MPORT_SELECTOR_ASSIGNED. In the future this may be relaxed. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_OFST 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_LEN 4
-/* Any non-zero bits other than the ones named below or an unsupported
- * combination will cause the NIC to return EOPNOTSUPP. In the future more
- * flags may be added.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_LEN 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_LBN 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_LBN 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_LBN 2
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_LBN 3
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_LBN 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_WIDTH 1
-/* Only if MATCH_ETHER_TYPE is set. Ethertype value as bytes in network order.
- * Currently only IPv4 (0x0800) and IPv6 (0x86DD) ethertypes may be used.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_OFST 8
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_LEN 2
-/* Only if MATCH_OUTER_VLAN is set. VID value as bytes in network order.
- * (Deprecated)
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_LBN 80
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WIDTH 12
-/* Only if MATCH_OUTER_VLAN is set. Aligned wrapper for OUTER_VLAN_VID. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_OFST 10
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_LEN 2
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_OFST 10
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_LBN 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_WIDTH 12
-/* Only if MATCH_DST_IP is set. IP address as bytes in network order. In the
- * case of IPv4, the IP should be in the first 4 bytes and all other bytes
- * should be zero.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_OFST 12
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_LEN 16
-/* Only if MATCH_IP_PROTO is set. Currently only UDP proto (17) may be used. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_OFST 28
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_LEN 1
-/* Actions that should be applied to packets match the rule. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_OFST 29
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_LEN 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_OFST 29
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_LBN 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_OFST 29
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_LBN 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_OFST 29
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_LBN 2
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_WIDTH 1
-/* Only if MATCH_DST_PORT is set. Port number as bytes in network order. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_OFST 30
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_LEN 2
-/* Resulting encapsulation type, as per MAE_MCDI_ENCAP_TYPE enumeration. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_OFST 32
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_LEN 4
-
-/* MC_CMD_VNIC_ENCAP_RULE_ADD_OUT msgresponse */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_LEN 4
-/* Handle to inserted rule. Used for removing the rule. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_OFST 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_LEN 4
-
-
-/***********************************/
-/* MC_CMD_VNIC_ENCAP_RULE_REMOVE
- * Remove a VNIC encapsulation rule. Packets which would have previously
- * matched the rule will then be considered as unencapsulated. Returns EALREADY
- * if the input HANDLE doesn't correspond to an existing rule.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE 0x16e
-#undef MC_CMD_0x16e_PRIVILEGE_CTG
-
-#define MC_CMD_0x16e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN msgrequest */
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_LEN 4
-/* Handle which was returned by MC_CMD_VNIC_ENCAP_RULE_ADD. */
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_OFST 0
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_LEN 4
-
-/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT msgresponse */
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT_LEN 0
-
-/* UUID structuredef: An RFC4122 standard UUID. The values here are stored in
- * the endianness specified by the RFC; users should ignore the broken-out
- * fields and instead do straight memory copies to ensure correct ordering.
- */
-#define UUID_LEN 16
-#define UUID_TIME_LOW_OFST 0
-#define UUID_TIME_LOW_LEN 4
-#define UUID_TIME_LOW_LBN 0
-#define UUID_TIME_LOW_WIDTH 32
-#define UUID_TIME_MID_OFST 4
-#define UUID_TIME_MID_LEN 2
-#define UUID_TIME_MID_LBN 32
-#define UUID_TIME_MID_WIDTH 16
-#define UUID_TIME_HI_LBN 52
-#define UUID_TIME_HI_WIDTH 12
-#define UUID_VERSION_LBN 48
-#define UUID_VERSION_WIDTH 4
-#define UUID_RESERVED_LBN 64
-#define UUID_RESERVED_WIDTH 2
-#define UUID_CLK_SEQ_LBN 66
-#define UUID_CLK_SEQ_WIDTH 14
-#define UUID_NODE_OFST 10
-#define UUID_NODE_LEN 6
-#define UUID_NODE_LBN 80
-#define UUID_NODE_WIDTH 48
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_ALLOC
- * Create a handle to a datapath plugin's extension. This involves finding a
- * currently-loaded plugin offering the given functionality (as identified by
- * the UUID) and allocating a handle to track the usage of it. Plugin
- * functionality is identified by 'extension' rather than any other identifier
- * so that a single plugin bitfile may offer more than one piece of independent
- * functionality. If two bitfiles are loaded which both offer the same
- * extension, then the metadata is interrogated further to determine which is
- * the newest and that is the one opened. See SF-123625-SW for architectural
- * detail on datapath plugins.
- */
-#define MC_CMD_PLUGIN_ALLOC 0x1ad
-#undef MC_CMD_0x1ad_PRIVILEGE_CTG
-
-#define MC_CMD_0x1ad_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_ALLOC_IN msgrequest */
-#define MC_CMD_PLUGIN_ALLOC_IN_LEN 24
-/* The functionality requested of the plugin, as a UUID structure */
-#define MC_CMD_PLUGIN_ALLOC_IN_UUID_OFST 0
-#define MC_CMD_PLUGIN_ALLOC_IN_UUID_LEN 16
-/* Additional options for opening the handle */
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAGS_OFST 16
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAGS_LEN 4
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_OFST 16
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_LBN 0
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_WIDTH 1
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_OFST 16
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_LBN 1
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_WIDTH 1
-/* Load the extension only if it is in the specified administrative group.
- * Specify ANY to load the extension wherever it is found (if there are
- * multiple choices then the extension with the highest MINOR_VER/PATCH_VER
- * will be loaded). See MC_CMD_PLUGIN_GET_META_GLOBAL for a description of
- * administrative groups.
- */
-#define MC_CMD_PLUGIN_ALLOC_IN_ADMIN_GROUP_OFST 20
-#define MC_CMD_PLUGIN_ALLOC_IN_ADMIN_GROUP_LEN 2
-/* enum: Load the extension from any ADMIN_GROUP. */
-#define MC_CMD_PLUGIN_ALLOC_IN_ANY 0xffff
-/* Reserved */
-#define MC_CMD_PLUGIN_ALLOC_IN_RESERVED_OFST 22
-#define MC_CMD_PLUGIN_ALLOC_IN_RESERVED_LEN 2
-
-/* MC_CMD_PLUGIN_ALLOC_OUT msgresponse */
-#define MC_CMD_PLUGIN_ALLOC_OUT_LEN 4
-/* Unique identifier of this usage */
-#define MC_CMD_PLUGIN_ALLOC_OUT_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_ALLOC_OUT_HANDLE_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_FREE
- * Delete a handle to a plugin's extension.
- */
-#define MC_CMD_PLUGIN_FREE 0x1ae
-#undef MC_CMD_0x1ae_PRIVILEGE_CTG
-
-#define MC_CMD_0x1ae_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_FREE_IN msgrequest */
-#define MC_CMD_PLUGIN_FREE_IN_LEN 4
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_FREE_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_FREE_IN_HANDLE_LEN 4
-
-/* MC_CMD_PLUGIN_FREE_OUT msgresponse */
-#define MC_CMD_PLUGIN_FREE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_GET_META_GLOBAL
- * Returns the global metadata applying to the whole plugin extension. See the
- * other metadata calls for subtypes of data.
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL 0x1af
-#undef MC_CMD_0x1af_PRIVILEGE_CTG
-
-#define MC_CMD_0x1af_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_GET_META_GLOBAL_IN msgrequest */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_LEN 4
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_HANDLE_LEN 4
-
-/* MC_CMD_PLUGIN_GET_META_GLOBAL_OUT msgresponse */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_LEN 36
-/* Unique identifier of this plugin extension. This is identical to the value
- * which was requested when the handle was allocated.
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_UUID_OFST 0
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_UUID_LEN 16
-/* semver sub-version of this plugin extension */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MINOR_VER_OFST 16
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MINOR_VER_LEN 2
-/* semver micro-version of this plugin extension */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PATCH_VER_OFST 18
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PATCH_VER_LEN 2
-/* Number of different messages which can be sent to this extension */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_NUM_MSGS_OFST 20
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_NUM_MSGS_LEN 4
-/* Byte offset within the VI window of the plugin's mapped CSR window. */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_OFFSET_OFST 24
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_OFFSET_LEN 2
-/* Number of bytes mapped through to the plugin's CSRs. 0 if that feature was
- * not requested by the plugin (in which case MAPPED_CSR_OFFSET and
- * MAPPED_CSR_FLAGS are ignored).
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_SIZE_OFST 26
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_SIZE_LEN 2
-/* Flags indicating how to perform the CSR window mapping. */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAGS_OFST 28
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAGS_LEN 4
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_OFST 28
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_LBN 0
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_WIDTH 1
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_OFST 28
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_LBN 1
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_WIDTH 1
-/* Identifier of the set of extensions which all change state together.
- * Extensions having the same ADMIN_GROUP will always load and unload at the
- * same time. ADMIN_GROUP values themselves are arbitrary (but they contain a
- * generation number as an implementation detail to ensure that they're not
- * reused rapidly).
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_ADMIN_GROUP_OFST 32
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_ADMIN_GROUP_LEN 1
-/* Bitshift in MC_CMD_DEVEL_CLIENT_PRIVILEGE_MODIFY's MASK parameters
- * corresponding to this extension, i.e. set the bit 1<<PRIVILEGE_BIT to permit
- * access to this extension.
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PRIVILEGE_BIT_OFST 33
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PRIVILEGE_BIT_LEN 1
-/* Reserved */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_RESERVED_OFST 34
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_RESERVED_LEN 2
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_GET_META_PUBLISHER
- * Returns metadata supplied by the plugin author which describes this
- * extension in a human-readable way. Contrast with
- * MC_CMD_PLUGIN_GET_META_GLOBAL, which returns information needed for software
- * to operate.
- */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER 0x1b0
-#undef MC_CMD_0x1b0_PRIVILEGE_CTG
-
-#define MC_CMD_0x1b0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_GET_META_PUBLISHER_IN msgrequest */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_LEN 12
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_HANDLE_LEN 4
-/* Category of data to return */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_SUBTYPE_OFST 4
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_SUBTYPE_LEN 4
-/* enum: Top-level information about the extension. The returned data is an
- * array of key/value pairs using the keys in RFC5013 (Dublin Core) to describe
- * the extension. The data is a back-to-back list of zero-terminated strings;
- * the even-numbered fields (0,2,4,...) are keys and their following odd-
- * numbered fields are the corresponding values. Both keys and values are
- * nominally UTF-8. Per RFC5013, the same key may be repeated any number of
- * times. Note that all information (including the key/value structure itself
- * and the UTF-8 encoding) may have been provided by the plugin author, so
- * callers must be cautious about parsing it. Callers should parse only the
- * top-level structure to separate out the keys and values; the contents of the
- * values is not expected to be machine-readable.
- */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_EXTENSION_KVS 0x0
-/* Byte position of the data to be returned within the full data block of the
- * given SUBTYPE.
- */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_OFFSET_OFST 8
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_OFFSET_LEN 4
-
-/* MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT msgresponse */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LENMIN 4
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LENMAX 252
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LEN(num) (4+1*(num))
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_NUM(len) (((len)-4)/1)
-/* Full length of the data block of the requested SUBTYPE, in bytes. */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_TOTAL_SIZE_OFST 0
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_TOTAL_SIZE_LEN 4
-/* The information requested by SUBTYPE. */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_OFST 4
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_LEN 1
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_MINNUM 0
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_MAXNUM 248
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_MAXNUM_MCDI2 1016
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_GET_META_MSG
- * Returns the simple metadata for a specific plugin request message. This
- * supplies information necessary for the host to know how to build an
- * MC_CMD_PLUGIN_REQ request.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG 0x1b1
-#undef MC_CMD_0x1b1_PRIVILEGE_CTG
-
-#define MC_CMD_0x1b1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_GET_META_MSG_IN msgrequest */
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_LEN 8
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_HANDLE_LEN 4
-/* Unique message ID to obtain */
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_ID_OFST 4
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_ID_LEN 4
-
-/* MC_CMD_PLUGIN_GET_META_MSG_OUT msgresponse */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_LEN 44
-/* Unique message ID. This is the same value as the input parameter; it exists
- * to allow future MCDI extensions which enumerate all messages.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_ID_OFST 0
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_ID_LEN 4
-/* Packed index number of this message, assigned by the MC to give each message
- * a unique ID in an array to allow for more efficient storage/management.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_INDEX_OFST 4
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_INDEX_LEN 4
-/* Short human-readable codename for this message. This is conventionally
- * formatted as a C identifier in the basic ASCII character set with any spare
- * bytes at the end set to 0, however this convention is not enforced by the MC
- * so consumers must check for all potential malformations before using it for
- * a trusted purpose.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_NAME_OFST 8
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_NAME_LEN 32
-/* Number of bytes of data which must be passed from the host kernel to the MC
- * for this message's payload, and which are passed back again in the response.
- * The MC's plugin metadata loader will have validated that the number of bytes
- * specified here will fit in to MC_CMD_PLUGIN_REQ_IN_DATA in a single MCDI
- * message.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_DATA_SIZE_OFST 40
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_DATA_SIZE_LEN 4
-
-/* PLUGIN_EXTENSION structuredef: Used within MC_CMD_PLUGIN_GET_ALL to describe
- * an individual extension.
- */
-#define PLUGIN_EXTENSION_LEN 20
-#define PLUGIN_EXTENSION_UUID_OFST 0
-#define PLUGIN_EXTENSION_UUID_LEN 16
-#define PLUGIN_EXTENSION_UUID_LBN 0
-#define PLUGIN_EXTENSION_UUID_WIDTH 128
-#define PLUGIN_EXTENSION_ADMIN_GROUP_OFST 16
-#define PLUGIN_EXTENSION_ADMIN_GROUP_LEN 1
-#define PLUGIN_EXTENSION_ADMIN_GROUP_LBN 128
-#define PLUGIN_EXTENSION_ADMIN_GROUP_WIDTH 8
-#define PLUGIN_EXTENSION_FLAG_ENABLED_LBN 136
-#define PLUGIN_EXTENSION_FLAG_ENABLED_WIDTH 1
-#define PLUGIN_EXTENSION_RESERVED_LBN 137
-#define PLUGIN_EXTENSION_RESERVED_WIDTH 23
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_GET_ALL
- * Returns a list of all plugin extensions currently loaded and available. The
- * UUIDs returned can be passed to MC_CMD_PLUGIN_ALLOC in order to obtain more
- * detailed metadata via the MC_CMD_PLUGIN_GET_META_* family of requests. The
- * ADMIN_GROUP field collects how extensions are grouped in to units which are
- * loaded/unloaded together; extensions with the same value are in the same
- * group.
- */
-#define MC_CMD_PLUGIN_GET_ALL 0x1b2
-#undef MC_CMD_0x1b2_PRIVILEGE_CTG
-
-#define MC_CMD_0x1b2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_GET_ALL_IN msgrequest */
-#define MC_CMD_PLUGIN_GET_ALL_IN_LEN 4
-/* Additional options for querying. Note that if neither FLAG_INCLUDE_ENABLED
- * nor FLAG_INCLUDE_DISABLED are specified then the result set will be empty.
- */
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAGS_OFST 0
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAGS_LEN 4
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_ENABLED_OFST 0
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_ENABLED_LBN 0
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_ENABLED_WIDTH 1
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_DISABLED_OFST 0
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_DISABLED_LBN 1
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_DISABLED_WIDTH 1
-
-/* MC_CMD_PLUGIN_GET_ALL_OUT msgresponse */
-#define MC_CMD_PLUGIN_GET_ALL_OUT_LENMIN 0
-#define MC_CMD_PLUGIN_GET_ALL_OUT_LENMAX 240
-#define MC_CMD_PLUGIN_GET_ALL_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PLUGIN_GET_ALL_OUT_LEN(num) (0+20*(num))
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_NUM(len) (((len)-0)/20)
-/* The list of available plugin extensions, as an array of PLUGIN_EXTENSION
- * structs.
- */
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_OFST 0
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_LEN 20
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_MINNUM 0
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_MAXNUM 12
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_MAXNUM_MCDI2 51
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_REQ
- * Send a command to a plugin. A plugin may define an arbitrary number of
- * 'messages' which it allows applications on the host system to send, each
- * identified by a 32-bit ID.
- */
-#define MC_CMD_PLUGIN_REQ 0x1b3
-#undef MC_CMD_0x1b3_PRIVILEGE_CTG
-
-#define MC_CMD_0x1b3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_REQ_IN msgrequest */
-#define MC_CMD_PLUGIN_REQ_IN_LENMIN 8
-#define MC_CMD_PLUGIN_REQ_IN_LENMAX 252
-#define MC_CMD_PLUGIN_REQ_IN_LENMAX_MCDI2 1020
-#define MC_CMD_PLUGIN_REQ_IN_LEN(num) (8+1*(num))
-#define MC_CMD_PLUGIN_REQ_IN_DATA_NUM(len) (((len)-8)/1)
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_REQ_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_REQ_IN_HANDLE_LEN 4
-/* Message ID defined by the plugin author */
-#define MC_CMD_PLUGIN_REQ_IN_ID_OFST 4
-#define MC_CMD_PLUGIN_REQ_IN_ID_LEN 4
-/* Data blob being the parameter to the message. This must be of the length
- * specified by MC_CMD_PLUGIN_GET_META_MSG_IN_MCDI_PARAM_SIZE.
- */
-#define MC_CMD_PLUGIN_REQ_IN_DATA_OFST 8
-#define MC_CMD_PLUGIN_REQ_IN_DATA_LEN 1
-#define MC_CMD_PLUGIN_REQ_IN_DATA_MINNUM 0
-#define MC_CMD_PLUGIN_REQ_IN_DATA_MAXNUM 244
-#define MC_CMD_PLUGIN_REQ_IN_DATA_MAXNUM_MCDI2 1012
-
-/* MC_CMD_PLUGIN_REQ_OUT msgresponse */
-#define MC_CMD_PLUGIN_REQ_OUT_LENMIN 0
-#define MC_CMD_PLUGIN_REQ_OUT_LENMAX 252
-#define MC_CMD_PLUGIN_REQ_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PLUGIN_REQ_OUT_LEN(num) (0+1*(num))
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_NUM(len) (((len)-0)/1)
-/* The input data, as transformed and/or updated by the plugin's eBPF. Will be
- * the same size as the input DATA parameter.
- */
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_OFST 0
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_LEN 1
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_MINNUM 0
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_MAXNUM 252
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_MAXNUM_MCDI2 1020
-
-/* DESC_ADDR_REGION structuredef: Describes a contiguous region of DESC_ADDR
- * space that maps to a contiguous region of TRGT_ADDR space. Addresses
- * DESC_ADDR in the range [DESC_ADDR_BASE:DESC_ADDR_BASE + 1 <<
- * WINDOW_SIZE_LOG2) map to TRGT_ADDR = DESC_ADDR - DESC_ADDR_BASE +
- * TRGT_ADDR_BASE.
- */
-#define DESC_ADDR_REGION_LEN 32
-/* The start of the region in DESC_ADDR space. */
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_OFST 0
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LEN 8
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_OFST 0
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_LEN 4
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_LBN 0
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_WIDTH 32
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_OFST 4
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_LEN 4
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_LBN 32
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_WIDTH 32
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LBN 0
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_WIDTH 64
-/* The start of the region in TRGT_ADDR space. Drivers can set this via
- * MC_CMD_SET_DESC_ADDR_REGIONS.
- */
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_OFST 8
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LEN 8
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_OFST 8
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_LEN 4
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_LBN 64
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_WIDTH 32
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_OFST 12
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_LEN 4
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_LBN 96
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_WIDTH 32
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LBN 64
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_WIDTH 64
-/* The size of the region. */
-#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_OFST 16
-#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_LEN 4
-#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_LBN 128
-#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_WIDTH 32
-/* The alignment restriction on TRGT_ADDR. TRGT_ADDR values set by the driver
- * must be a multiple of 1 << TRGT_ADDR_ALIGN_LOG2.
- */
-#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_OFST 20
-#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_LEN 4
-#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_LBN 160
-#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_WIDTH 32
-#define DESC_ADDR_REGION_RSVD_OFST 24
-#define DESC_ADDR_REGION_RSVD_LEN 8
-#define DESC_ADDR_REGION_RSVD_LO_OFST 24
-#define DESC_ADDR_REGION_RSVD_LO_LEN 4
-#define DESC_ADDR_REGION_RSVD_LO_LBN 192
-#define DESC_ADDR_REGION_RSVD_LO_WIDTH 32
-#define DESC_ADDR_REGION_RSVD_HI_OFST 28
-#define DESC_ADDR_REGION_RSVD_HI_LEN 4
-#define DESC_ADDR_REGION_RSVD_HI_LBN 224
-#define DESC_ADDR_REGION_RSVD_HI_WIDTH 32
-#define DESC_ADDR_REGION_RSVD_LBN 192
-#define DESC_ADDR_REGION_RSVD_WIDTH 64
-
-
-/***********************************/
/* MC_CMD_GET_DESC_ADDR_INFO
* Returns a description of the mapping from DESC_ADDR to TRGT_ADDR for the calling function's address space.
*/
@@ -24836,122 +22037,6 @@
/***********************************/
-/* MC_CMD_GET_BOARD_ATTR
- * Retrieve physical build-level board attributes as configured at
- * manufacturing stage. Fields originate from EEPROM and per-platform constants
- * in firmware. Fields are used in development to identify/ differentiate
- * boards based on build levels/parameters, and also in manufacturing to cross
- * check "what was programmed in manufacturing" is same as "what firmware
- * thinks has been programmed" as there are two layers to translation within
- * firmware before the attributes reach this MCDI handler. Some parameters are
- * retrieved as part of other commands and therefore not replicated here. See
- * GET_VERSION_OUT.
- */
-#define MC_CMD_GET_BOARD_ATTR 0x1c6
-#undef MC_CMD_0x1c6_PRIVILEGE_CTG
-
-#define MC_CMD_0x1c6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_BOARD_ATTR_IN msgrequest */
-#define MC_CMD_GET_BOARD_ATTR_IN_LEN 0
-
-/* MC_CMD_GET_BOARD_ATTR_OUT msgresponse */
-#define MC_CMD_GET_BOARD_ATTR_OUT_LEN 16
-/* Defines board capabilities and validity of attributes returned in this
- * response-message.
- */
-#define MC_CMD_GET_BOARD_ATTR_OUT_FLAGS_OFST 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_FLAGS_LEN 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_FAN_OFST 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_FAN_LBN 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_FAN_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_SOC_OFST 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_SOC_LBN 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_SOC_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_AUX_POWER_OFST 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_AUX_POWER_LBN 2
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_AUX_POWER_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_ATTRIBUTES_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_ATTRIBUTES_LEN 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_SOC_EE_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_SOC_EE_LBN 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_SOC_EE_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_SUC_EE_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_SUC_EE_LBN 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_SUC_EE_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_FPGA_VOLTAGES_SUPPORTED_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_FPGA_VOLTAGES_SUPPORTED_LBN 16
-#define MC_CMD_GET_BOARD_ATTR_OUT_FPGA_VOLTAGES_SUPPORTED_WIDTH 8
-/* enum: The FPGA voltage on the adapter can be set to low */
-#define MC_CMD_FPGA_VOLTAGE_LOW 0x0
-/* enum: The FPGA voltage on the adapter can be set to regular */
-#define MC_CMD_FPGA_VOLTAGE_REG 0x1
-/* enum: The FPGA voltage on the adapter can be set to high */
-#define MC_CMD_FPGA_VOLTAGE_HIGH 0x2
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_COUNT_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_COUNT_LBN 24
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_COUNT_WIDTH 8
-/* An array of cage types on the board */
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_OFST 8
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_LEN 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_NUM 8
-/* enum: The cages are not known */
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_UNKNOWN 0x0
-/* enum: The cages are SFP/SFP+ */
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_SFP 0x1
-/* enum: The cages are QSFP/QSFP+ */
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_QSFP 0x2
-
-
-/***********************************/
-/* MC_CMD_GET_SOC_STATE
- * Retrieve current state of the System-on-Chip. This command is valid when
- * MC_CMD_GET_BOARD_ATTR:HAS_SOC is set.
- */
-#define MC_CMD_GET_SOC_STATE 0x1c7
-#undef MC_CMD_0x1c7_PRIVILEGE_CTG
-
-#define MC_CMD_0x1c7_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_SOC_STATE_IN msgrequest */
-#define MC_CMD_GET_SOC_STATE_IN_LEN 0
-
-/* MC_CMD_GET_SOC_STATE_OUT msgresponse */
-#define MC_CMD_GET_SOC_STATE_OUT_LEN 12
-/* Status flags for the SoC */
-#define MC_CMD_GET_SOC_STATE_OUT_FLAGS_OFST 0
-#define MC_CMD_GET_SOC_STATE_OUT_FLAGS_LEN 4
-#define MC_CMD_GET_SOC_STATE_OUT_SHOULD_THROTTLE_OFST 0
-#define MC_CMD_GET_SOC_STATE_OUT_SHOULD_THROTTLE_LBN 0
-#define MC_CMD_GET_SOC_STATE_OUT_SHOULD_THROTTLE_WIDTH 1
-#define MC_CMD_GET_SOC_STATE_OUT_OS_RECOVERY_REQUIRED_OFST 0
-#define MC_CMD_GET_SOC_STATE_OUT_OS_RECOVERY_REQUIRED_LBN 1
-#define MC_CMD_GET_SOC_STATE_OUT_OS_RECOVERY_REQUIRED_WIDTH 1
-#define MC_CMD_GET_SOC_STATE_OUT_WDT_FIRED_OFST 0
-#define MC_CMD_GET_SOC_STATE_OUT_WDT_FIRED_LBN 2
-#define MC_CMD_GET_SOC_STATE_OUT_WDT_FIRED_WIDTH 1
-/* Status fields for the SoC */
-#define MC_CMD_GET_SOC_STATE_OUT_ATTRIBUTES_OFST 4
-#define MC_CMD_GET_SOC_STATE_OUT_ATTRIBUTES_LEN 4
-#define MC_CMD_GET_SOC_STATE_OUT_RUN_STATE_OFST 4
-#define MC_CMD_GET_SOC_STATE_OUT_RUN_STATE_LBN 0
-#define MC_CMD_GET_SOC_STATE_OUT_RUN_STATE_WIDTH 8
-/* enum: Power on (set by SUC on power up) */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_BOOT 0x0
-/* enum: Running bootloader */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_BOOTLOADER 0x1
-/* enum: Bootloader has started OS. OS is booting */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_OS_START 0x2
-/* enum: OS is running */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_OS_RUNNING 0x3
-/* enum: Maintenance OS is running */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_OS_MAINTENANCE 0x4
-/* Number of SoC resets since power on */
-#define MC_CMD_GET_SOC_STATE_OUT_RESET_COUNT_OFST 8
-#define MC_CMD_GET_SOC_STATE_OUT_RESET_COUNT_LEN 4
-
-
-/***********************************/
/* MC_CMD_CHECK_SCHEDULER_CREDITS
* For debugging purposes. For each source and destination node in the hardware
* schedulers, check whether the number of credits is as it should be. This
@@ -25010,76 +22095,6 @@
/***********************************/
-/* MC_CMD_TXQ_STATS
- * Query per-TXQ statistics.
- */
-#define MC_CMD_TXQ_STATS 0x1d5
-#undef MC_CMD_0x1d5_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TXQ_STATS_IN msgrequest */
-#define MC_CMD_TXQ_STATS_IN_LEN 8
-/* Instance of TXQ to retrieve statistics for */
-#define MC_CMD_TXQ_STATS_IN_INSTANCE_OFST 0
-#define MC_CMD_TXQ_STATS_IN_INSTANCE_LEN 4
-/* Flags for the request */
-#define MC_CMD_TXQ_STATS_IN_FLAGS_OFST 4
-#define MC_CMD_TXQ_STATS_IN_FLAGS_LEN 4
-#define MC_CMD_TXQ_STATS_IN_CLEAR_OFST 4
-#define MC_CMD_TXQ_STATS_IN_CLEAR_LBN 0
-#define MC_CMD_TXQ_STATS_IN_CLEAR_WIDTH 1
-
-/* MC_CMD_TXQ_STATS_OUT msgresponse */
-#define MC_CMD_TXQ_STATS_OUT_LENMIN 0
-#define MC_CMD_TXQ_STATS_OUT_LENMAX 248
-#define MC_CMD_TXQ_STATS_OUT_LENMAX_MCDI2 1016
-#define MC_CMD_TXQ_STATS_OUT_LEN(num) (0+8*(num))
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_NUM(len) (((len)-0)/8)
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_OFST 0
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LEN 8
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_OFST 0
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_LEN 4
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_LBN 0
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_WIDTH 32
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_OFST 4
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_LEN 4
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_LBN 32
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_WIDTH 32
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MINNUM 0
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MAXNUM 31
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MAXNUM_MCDI2 127
-#define MC_CMD_TXQ_STATS_CTPIO_MAX_FILL 0x0 /* enum */
-
-/* FUNCTION_PERSONALITY structuredef: The meanings of the personalities are
- * defined in SF-120734-TC with more information in SF-122717-TC.
- */
-#define FUNCTION_PERSONALITY_LEN 4
-#define FUNCTION_PERSONALITY_ID_OFST 0
-#define FUNCTION_PERSONALITY_ID_LEN 4
-/* enum: Function has no assigned personality */
-#define FUNCTION_PERSONALITY_NULL 0x0
-/* enum: Function has an EF100-style function control window and VI windows
- * with both EF100 and vDPA doorbells.
- */
-#define FUNCTION_PERSONALITY_EF100 0x1
-/* enum: Function has virtio net device configuration registers and doorbells
- * for virtio queue pairs.
- */
-#define FUNCTION_PERSONALITY_VIRTIO_NET 0x2
-/* enum: Function has virtio block device configuration registers and a
- * doorbell for a single virtqueue.
- */
-#define FUNCTION_PERSONALITY_VIRTIO_BLK 0x3
-/* enum: Function is a Xilinx acceleration device - management function */
-#define FUNCTION_PERSONALITY_ACCEL_MGMT 0x4
-/* enum: Function is a Xilinx acceleration device - user function */
-#define FUNCTION_PERSONALITY_ACCEL_USR 0x5
-#define FUNCTION_PERSONALITY_ID_LBN 0
-#define FUNCTION_PERSONALITY_ID_WIDTH 32
-
-
-/***********************************/
/* MC_CMD_VIRTIO_GET_FEATURES
* Get a list of the virtio features supported by the device.
*/
@@ -25162,37 +22177,6 @@
/***********************************/
-/* MC_CMD_VIRTIO_GET_CAPABILITIES
- * Get virtio capabilities supported by the device. Returns general virtio
- * capabilities and limitations of the hardware / firmware implementation
- * (hardware device as a whole), rather than that of individual configured
- * virtio devices. At present, only the absolute maximum number of queues
- * allowed on multi-queue devices is returned. Response is expected to be
- * extended as necessary in the future.
- */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES 0x1d3
-#undef MC_CMD_0x1d3_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VIRTIO_GET_CAPABILITIES_IN msgrequest */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_LEN 4
-/* Type of device to get capabilities for. Matches the device id as defined by
- * the virtio spec.
- */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_DEVICE_ID_OFST 0
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_DEVICE_ID_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_VIRTIO_GET_FEATURES/MC_CMD_VIRTIO_GET_FEATURES_IN/DEVICE_ID */
-
-/* MC_CMD_VIRTIO_GET_CAPABILITIES_OUT msgresponse */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_LEN 4
-/* Maximum number of queues supported for a single device instance */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_MAX_QUEUES_OFST 0
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_MAX_QUEUES_LEN 4
-
-
-/***********************************/
/* MC_CMD_VIRTIO_INIT_QUEUE
* Create a virtio virtqueue. Fails with EALREADY if the queue already exists.
* Fails with ENOSUP if a feature is requested that isn't supported. Fails with
@@ -25474,866 +22458,6 @@
#define PCIE_FUNCTION_INTF_LBN 32
#define PCIE_FUNCTION_INTF_WIDTH 32
-/* QUEUE_ID structuredef: Structure representing an absolute queue identifier
- * (absolute VI number + VI relative queue number). On Keystone, a VI can
- * contain multiple queues (at present, up to 2), each with separate controls
- * for direction. This structure is required to uniquely identify the absolute
- * source queue for descriptor proxy functions.
- */
-#define QUEUE_ID_LEN 4
-/* Absolute VI number */
-#define QUEUE_ID_ABS_VI_OFST 0
-#define QUEUE_ID_ABS_VI_LEN 2
-#define QUEUE_ID_ABS_VI_LBN 0
-#define QUEUE_ID_ABS_VI_WIDTH 16
-/* Relative queue number within the VI */
-#define QUEUE_ID_REL_QUEUE_LBN 16
-#define QUEUE_ID_REL_QUEUE_WIDTH 1
-#define QUEUE_ID_RESERVED_LBN 17
-#define QUEUE_ID_RESERVED_WIDTH 15
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_CREATE
- * Descriptor proxy functions are abstract devices that forward all request
- * submitted to the host PCIe function (descriptors submitted to Virtio or
- * EF100 queues) to be handled on another function (most commonly on the
- * embedded Application Processor), via EF100 descriptor proxy, memory-to-
- * memory and descriptor-to-completion mechanisms. Primary user is Virtio-blk
- * subsystem, see SF-122927-TC. This function allocates a new descriptor proxy
- * function on the host and assigns a user-defined label. The actual function
- * configuration is not persisted until the caller configures it with
- * MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN and commits with
- * MC_CMD_DESC_PROXY_FUNC_COMMIT_IN.
- */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE 0x172
-#undef MC_CMD_0x172_PRIVILEGE_CTG
-
-#define MC_CMD_0x172_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_CREATE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LEN 52
-/* PCIe Function ID to allocate (as struct PCIE_FUNCTION). Set to
- * {PF_ANY,VF_ANY,interface} for "any available function" Set to
- * {PF_ANY,VF_NULL,interface} for "any available PF"
- */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LEN 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_LBN 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_LBN 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_PF_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_PF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_VF_OFST 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_VF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_INTF_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_INTF_LEN 4
-/* The personality to set. The meanings of the personalities are defined in
- * SF-120734-TC with more information in SF-122717-TC. At present, we only
- * support proxying for VIRTIO_BLK
- */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_PERSONALITY_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_PERSONALITY_LEN 4
-/* Enum values, see field(s): */
-/* FUNCTION_PERSONALITY/ID */
-/* User-defined label (zero-terminated ASCII string) to uniquely identify the
- * function
- */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LABEL_OFST 12
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LABEL_LEN 40
-
-/* MC_CMD_DESC_PROXY_FUNC_CREATE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_LEN 12
-/* Handle to the descriptor proxy function */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_HANDLE_LEN 4
-/* Allocated function ID (as struct PCIE_FUNCTION) */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LEN 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_LBN 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_LBN 64
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_PF_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_PF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_VF_OFST 6
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_VF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_INTF_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_INTF_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_DESTROY
- * Remove an existing descriptor proxy function. Underlying function
- * personality and configuration reverts back to factory default. Function
- * configuration is committed immediately to specified store and any function
- * ownership is released.
- */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY 0x173
-#undef MC_CMD_0x173_PRIVILEGE_CTG
-
-#define MC_CMD_0x173_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_DESTROY_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LEN 44
-/* User-defined label (zero-terminated ASCII string) to uniquely identify the
- * function
- */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LABEL_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LABEL_LEN 40
-/* Store from which to remove function configuration */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_STORE_OFST 40
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_STORE_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_DESC_PROXY_FUNC_COMMIT/MC_CMD_DESC_PROXY_FUNC_COMMIT_IN/STORE */
-
-/* MC_CMD_DESC_PROXY_FUNC_DESTROY_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_OUT_LEN 0
-
-/* VIRTIO_BLK_CONFIG structuredef: Virtio block device configuration. See
- * Virtio specification v1.1, Sections 5.2.3 and 6 for definition of feature
- * bits. See Virtio specification v1.1, Section 5.2.4 (struct
- * virtio_blk_config) for definition of remaining configuration fields
- */
-#define VIRTIO_BLK_CONFIG_LEN 68
-/* Virtio block device features to advertise, per Virtio 1.1, 5.2.3 and 6 */
-#define VIRTIO_BLK_CONFIG_FEATURES_OFST 0
-#define VIRTIO_BLK_CONFIG_FEATURES_LEN 8
-#define VIRTIO_BLK_CONFIG_FEATURES_LO_OFST 0
-#define VIRTIO_BLK_CONFIG_FEATURES_LO_LEN 4
-#define VIRTIO_BLK_CONFIG_FEATURES_LO_LBN 0
-#define VIRTIO_BLK_CONFIG_FEATURES_LO_WIDTH 32
-#define VIRTIO_BLK_CONFIG_FEATURES_HI_OFST 4
-#define VIRTIO_BLK_CONFIG_FEATURES_HI_LEN 4
-#define VIRTIO_BLK_CONFIG_FEATURES_HI_LBN 32
-#define VIRTIO_BLK_CONFIG_FEATURES_HI_WIDTH 32
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_LBN 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_LBN 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_LBN 2
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_LBN 4
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_LBN 5
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_LBN 6
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_LBN 7
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_LBN 9
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_LBN 10
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_LBN 11
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_LBN 12
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_LBN 13
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_LBN 14
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_LBN 28
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_LBN 29
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_LBN 32
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_LBN 33
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_LBN 34
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_LBN 35
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_LBN 36
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_LBN 37
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_LBN 38
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_WIDTH 1
-#define VIRTIO_BLK_CONFIG_FEATURES_LBN 0
-#define VIRTIO_BLK_CONFIG_FEATURES_WIDTH 64
-/* The capacity of the device (expressed in 512-byte sectors) */
-#define VIRTIO_BLK_CONFIG_CAPACITY_OFST 8
-#define VIRTIO_BLK_CONFIG_CAPACITY_LEN 8
-#define VIRTIO_BLK_CONFIG_CAPACITY_LO_OFST 8
-#define VIRTIO_BLK_CONFIG_CAPACITY_LO_LEN 4
-#define VIRTIO_BLK_CONFIG_CAPACITY_LO_LBN 64
-#define VIRTIO_BLK_CONFIG_CAPACITY_LO_WIDTH 32
-#define VIRTIO_BLK_CONFIG_CAPACITY_HI_OFST 12
-#define VIRTIO_BLK_CONFIG_CAPACITY_HI_LEN 4
-#define VIRTIO_BLK_CONFIG_CAPACITY_HI_LBN 96
-#define VIRTIO_BLK_CONFIG_CAPACITY_HI_WIDTH 32
-#define VIRTIO_BLK_CONFIG_CAPACITY_LBN 64
-#define VIRTIO_BLK_CONFIG_CAPACITY_WIDTH 64
-/* Maximum size of any single segment. Only valid when VIRTIO_BLK_F_SIZE_MAX is
- * set.
- */
-#define VIRTIO_BLK_CONFIG_SIZE_MAX_OFST 16
-#define VIRTIO_BLK_CONFIG_SIZE_MAX_LEN 4
-#define VIRTIO_BLK_CONFIG_SIZE_MAX_LBN 128
-#define VIRTIO_BLK_CONFIG_SIZE_MAX_WIDTH 32
-/* Maximum number of segments in a request. Only valid when
- * VIRTIO_BLK_F_SEG_MAX is set.
- */
-#define VIRTIO_BLK_CONFIG_SEG_MAX_OFST 20
-#define VIRTIO_BLK_CONFIG_SEG_MAX_LEN 4
-#define VIRTIO_BLK_CONFIG_SEG_MAX_LBN 160
-#define VIRTIO_BLK_CONFIG_SEG_MAX_WIDTH 32
-/* Disk-style geometry - cylinders. Only valid when VIRTIO_BLK_F_GEOMETRY is
- * set.
- */
-#define VIRTIO_BLK_CONFIG_CYLINDERS_OFST 24
-#define VIRTIO_BLK_CONFIG_CYLINDERS_LEN 2
-#define VIRTIO_BLK_CONFIG_CYLINDERS_LBN 192
-#define VIRTIO_BLK_CONFIG_CYLINDERS_WIDTH 16
-/* Disk-style geometry - heads. Only valid when VIRTIO_BLK_F_GEOMETRY is set.
- */
-#define VIRTIO_BLK_CONFIG_HEADS_OFST 26
-#define VIRTIO_BLK_CONFIG_HEADS_LEN 1
-#define VIRTIO_BLK_CONFIG_HEADS_LBN 208
-#define VIRTIO_BLK_CONFIG_HEADS_WIDTH 8
-/* Disk-style geometry - sectors. Only valid when VIRTIO_BLK_F_GEOMETRY is set.
- */
-#define VIRTIO_BLK_CONFIG_SECTORS_OFST 27
-#define VIRTIO_BLK_CONFIG_SECTORS_LEN 1
-#define VIRTIO_BLK_CONFIG_SECTORS_LBN 216
-#define VIRTIO_BLK_CONFIG_SECTORS_WIDTH 8
-/* Block size of disk. Only valid when VIRTIO_BLK_F_BLK_SIZE is set. */
-#define VIRTIO_BLK_CONFIG_BLK_SIZE_OFST 28
-#define VIRTIO_BLK_CONFIG_BLK_SIZE_LEN 4
-#define VIRTIO_BLK_CONFIG_BLK_SIZE_LBN 224
-#define VIRTIO_BLK_CONFIG_BLK_SIZE_WIDTH 32
-/* Block topology - number of logical blocks per physical block (log2). Only
- * valid when VIRTIO_BLK_F_TOPOLOGY is set.
- */
-#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_OFST 32
-#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_LEN 1
-#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_LBN 256
-#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_WIDTH 8
-/* Block topology - offset of first aligned logical block. Only valid when
- * VIRTIO_BLK_F_TOPOLOGY is set.
- */
-#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_OFST 33
-#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_LEN 1
-#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_LBN 264
-#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_WIDTH 8
-/* Block topology - suggested minimum I/O size in blocks. Only valid when
- * VIRTIO_BLK_F_TOPOLOGY is set.
- */
-#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_OFST 34
-#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_LEN 2
-#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_LBN 272
-#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_WIDTH 16
-/* Block topology - optimal (suggested maximum) I/O size in blocks. Only valid
- * when VIRTIO_BLK_F_TOPOLOGY is set.
- */
-#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_OFST 36
-#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_LEN 4
-#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_LBN 288
-#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_WIDTH 32
-/* Unused, set to zero. Note that virtio_blk_config.writeback is volatile and
- * not carried in config data.
- */
-#define VIRTIO_BLK_CONFIG_UNUSED0_OFST 40
-#define VIRTIO_BLK_CONFIG_UNUSED0_LEN 2
-#define VIRTIO_BLK_CONFIG_UNUSED0_LBN 320
-#define VIRTIO_BLK_CONFIG_UNUSED0_WIDTH 16
-/* Number of queues. Only valid if the VIRTIO_BLK_F_MQ feature is negotiated.
- */
-#define VIRTIO_BLK_CONFIG_NUM_QUEUES_OFST 42
-#define VIRTIO_BLK_CONFIG_NUM_QUEUES_LEN 2
-#define VIRTIO_BLK_CONFIG_NUM_QUEUES_LBN 336
-#define VIRTIO_BLK_CONFIG_NUM_QUEUES_WIDTH 16
-/* Maximum discard sectors size, in 512-byte units. Only valid if
- * VIRTIO_BLK_F_DISCARD is set.
- */
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_OFST 44
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_LEN 4
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_LBN 352
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_WIDTH 32
-/* Maximum discard segment number. Only valid if VIRTIO_BLK_F_DISCARD is set.
- */
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_OFST 48
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_LEN 4
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_LBN 384
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_WIDTH 32
-/* Discard sector alignment, in 512-byte units. Only valid if
- * VIRTIO_BLK_F_DISCARD is set.
- */
-#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_OFST 52
-#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_LEN 4
-#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_LBN 416
-#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_WIDTH 32
-/* Maximum write zeroes sectors size, in 512-byte units. Only valid if
- * VIRTIO_BLK_F_WRITE_ZEROES is set.
- */
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_OFST 56
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_LEN 4
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_LBN 448
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_WIDTH 32
-/* Maximum write zeroes segment number. Only valid if VIRTIO_BLK_F_WRITE_ZEROES
- * is set.
- */
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_OFST 60
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_LEN 4
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_LBN 480
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_WIDTH 32
-/* Write zeroes request can result in deallocating one or more sectors. Only
- * valid if VIRTIO_BLK_F_WRITE_ZEROES is set.
- */
-#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_OFST 64
-#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_LEN 1
-#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_LBN 512
-#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_WIDTH 8
-/* Unused, set to zero. */
-#define VIRTIO_BLK_CONFIG_UNUSED1_OFST 65
-#define VIRTIO_BLK_CONFIG_UNUSED1_LEN 3
-#define VIRTIO_BLK_CONFIG_UNUSED1_LBN 520
-#define VIRTIO_BLK_CONFIG_UNUSED1_WIDTH 24
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET
- * Set configuration for an existing descriptor proxy function. Configuration
- * data must match function personality. The actual function configuration is
- * not persisted until the caller commits with MC_CMD_DESC_PROXY_FUNC_COMMIT_IN
- */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET 0x174
-#undef MC_CMD_0x174_PRIVILEGE_CTG
-
-#define MC_CMD_0x174_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMIN 20
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMAX 252
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMAX_MCDI2 1020
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LEN(num) (20+1*(num))
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_NUM(len) (((len)-20)/1)
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_HANDLE_LEN 4
-/* Reserved for future extension, set to zero. */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_RESERVED_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_RESERVED_LEN 16
-/* Configuration data. Format of configuration data is determined implicitly
- * from function personality referred to by HANDLE. Currently, only supported
- * format is VIRTIO_BLK_CONFIG.
- */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_OFST 20
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_LEN 1
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MINNUM 0
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MAXNUM 232
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MAXNUM_MCDI2 1000
-
-/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_COMMIT
- * Commit function configuration to non-volatile or volatile store. Once
- * configuration is applied to hardware (which may happen immediately or on
- * next function/device reset) a DESC_PROXY_FUNC_CONFIG_SET MCDI event will be
- * delivered to callers MCDI event queue.
- */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT 0x175
-#undef MC_CMD_0x175_PRIVILEGE_CTG
-
-#define MC_CMD_0x175_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_COMMIT_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_LEN 8
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_HANDLE_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_STORE_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_STORE_LEN 4
-/* enum: Store into non-volatile (dynamic) config */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_NON_VOLATILE 0x0
-/* enum: Store into volatile (ephemeral) config */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_VOLATILE 0x1
-
-/* MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_LEN 4
-/* Generation count to be delivered in an event once configuration becomes live
- */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_CONFIG_GENERATION_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_CONFIG_GENERATION_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_OPEN
- * Retrieve a handle for an existing descriptor proxy function. Returns an
- * integer handle, valid until function is deallocated, MC rebooted or power-
- * cycle. Returns ENODEV if no function with given label exists.
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN 0x176
-#undef MC_CMD_0x176_PRIVILEGE_CTG
-
-#define MC_CMD_0x176_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_OPEN_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LEN 40
-/* User-defined label (zero-terminated ASCII string) to uniquely identify the
- * function
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LABEL_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LABEL_LEN 40
-
-/* MC_CMD_DESC_PROXY_FUNC_OPEN_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMIN 40
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMAX 252
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LEN(num) (40+1*(num))
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_NUM(len) (((len)-40)/1)
-/* Handle to the descriptor proxy function */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_HANDLE_LEN 4
-/* PCIe Function ID (as struct PCIE_FUNCTION) */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LEN 8
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_LBN 32
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_LBN 64
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_PF_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_PF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_VF_OFST 6
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_VF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_INTF_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_INTF_LEN 4
-/* Function personality */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PERSONALITY_OFST 12
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PERSONALITY_LEN 4
-/* Enum values, see field(s): */
-/* FUNCTION_PERSONALITY/ID */
-/* Function configuration state */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_STATUS_OFST 16
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_STATUS_LEN 4
-/* enum: Function configuration is visible to the host (live) */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LIVE 0x0
-/* enum: Function configuration is pending reset */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PENDING 0x1
-/* enum: Function configuration is missing (created, but no configuration
- * committed)
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_UNCONFIGURED 0x2
-/* Generation count to be delivered in an event once the configuration becomes
- * live (if status is "pending")
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_GENERATION_OFST 20
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_GENERATION_LEN 4
-/* Reserved for future extension, set to zero. */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_RESERVED_OFST 24
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_RESERVED_LEN 16
-/* Configuration data corresponding to function personality. Currently, only
- * supported format is VIRTIO_BLK_CONFIG. Not valid if status is UNCONFIGURED.
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_OFST 40
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_LEN 1
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MINNUM 0
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MAXNUM 212
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MAXNUM_MCDI2 980
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_CLOSE
- * Releases a handle for an open descriptor proxy function. If proxying was
- * enabled on the device, the caller is expected to gracefully stop it using
- * MC_CMD_DESC_PROXY_FUNC_DISABLE prior to calling this function. Closing an
- * active device without disabling proxying will result in forced close, which
- * will put the device into a failed state and signal the host driver of the
- * error (for virtio, DEVICE_NEEDS_RESET flag would be set on the host side)
- */
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE 0x1a1
-#undef MC_CMD_0x1a1_PRIVILEGE_CTG
-
-#define MC_CMD_0x1a1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_CLOSE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_LEN 4
-/* Handle to the descriptor proxy function */
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_HANDLE_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_CLOSE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE_OUT_LEN 0
-
-/* DESC_PROXY_FUNC_MAP structuredef */
-#define DESC_PROXY_FUNC_MAP_LEN 52
-/* PCIe function ID (as struct PCIE_FUNCTION) */
-#define DESC_PROXY_FUNC_MAP_FUNC_OFST 0
-#define DESC_PROXY_FUNC_MAP_FUNC_LEN 8
-#define DESC_PROXY_FUNC_MAP_FUNC_LO_OFST 0
-#define DESC_PROXY_FUNC_MAP_FUNC_LO_LEN 4
-#define DESC_PROXY_FUNC_MAP_FUNC_LO_LBN 0
-#define DESC_PROXY_FUNC_MAP_FUNC_LO_WIDTH 32
-#define DESC_PROXY_FUNC_MAP_FUNC_HI_OFST 4
-#define DESC_PROXY_FUNC_MAP_FUNC_HI_LEN 4
-#define DESC_PROXY_FUNC_MAP_FUNC_HI_LBN 32
-#define DESC_PROXY_FUNC_MAP_FUNC_HI_WIDTH 32
-#define DESC_PROXY_FUNC_MAP_FUNC_LBN 0
-#define DESC_PROXY_FUNC_MAP_FUNC_WIDTH 64
-#define DESC_PROXY_FUNC_MAP_FUNC_PF_OFST 0
-#define DESC_PROXY_FUNC_MAP_FUNC_PF_LEN 2
-#define DESC_PROXY_FUNC_MAP_FUNC_PF_LBN 0
-#define DESC_PROXY_FUNC_MAP_FUNC_PF_WIDTH 16
-#define DESC_PROXY_FUNC_MAP_FUNC_VF_OFST 2
-#define DESC_PROXY_FUNC_MAP_FUNC_VF_LEN 2
-#define DESC_PROXY_FUNC_MAP_FUNC_VF_LBN 16
-#define DESC_PROXY_FUNC_MAP_FUNC_VF_WIDTH 16
-#define DESC_PROXY_FUNC_MAP_FUNC_INTF_OFST 4
-#define DESC_PROXY_FUNC_MAP_FUNC_INTF_LEN 4
-#define DESC_PROXY_FUNC_MAP_FUNC_INTF_LBN 32
-#define DESC_PROXY_FUNC_MAP_FUNC_INTF_WIDTH 32
-/* Function personality */
-#define DESC_PROXY_FUNC_MAP_PERSONALITY_OFST 8
-#define DESC_PROXY_FUNC_MAP_PERSONALITY_LEN 4
-/* Enum values, see field(s): */
-/* FUNCTION_PERSONALITY/ID */
-#define DESC_PROXY_FUNC_MAP_PERSONALITY_LBN 64
-#define DESC_PROXY_FUNC_MAP_PERSONALITY_WIDTH 32
-/* User-defined label (zero-terminated ASCII string) to uniquely identify the
- * function
- */
-#define DESC_PROXY_FUNC_MAP_LABEL_OFST 12
-#define DESC_PROXY_FUNC_MAP_LABEL_LEN 40
-#define DESC_PROXY_FUNC_MAP_LABEL_LBN 96
-#define DESC_PROXY_FUNC_MAP_LABEL_WIDTH 320
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_ENUM
- * Enumerate existing descriptor proxy functions
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM 0x177
-#undef MC_CMD_0x177_PRIVILEGE_CTG
-
-#define MC_CMD_0x177_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_ENUM_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_IN_LEN 4
-/* Starting index, set to 0 on first request. See
- * MC_CMD_DESC_PROXY_FUNC_ENUM_OUT/FLAGS.
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_IN_START_IDX_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_IN_START_IDX_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_ENUM_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMIN 4
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMAX 212
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMAX_MCDI2 992
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LEN(num) (4+52*(num))
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_NUM(len) (((len)-4)/52)
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FLAGS_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FLAGS_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_LBN 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_WIDTH 1
-/* Function map, as array of DESC_PROXY_FUNC_MAP */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_LEN 52
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MINNUM 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MAXNUM 4
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MAXNUM_MCDI2 19
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE
- * Enable descriptor proxying for function into target event queue. Returns VI
- * allocation info for the proxy source function, so that the caller can map
- * absolute VI IDs from descriptor proxy events back to the originating
- * function. This is a legacy function that only supports single queue proxy
- * devices. It is also limited in that it can only be called after host driver
- * attach (once VI allocation is known) and will return MC_CMD_ERR_ENOTCONN
- * otherwise. For new code, see MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE which
- * supports multi-queue devices and has no dependency on host driver attach.
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE 0x178
-#undef MC_CMD_0x178_PRIVILEGE_CTG
-
-#define MC_CMD_0x178_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_LEN 8
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_HANDLE_LEN 4
-/* Descriptor proxy sink queue (caller function relative). Must be extended
- * width event queue
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_TARGET_EVQ_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_TARGET_EVQ_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_LEN 8
-/* The number of VIs allocated on the function */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_COUNT_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_COUNT_LEN 4
-/* The base absolute VI number allocated to the function. */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_BASE_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_BASE_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE
- * Enable descriptor proxying for a source queue on a host function into target
- * event queue. Source queue number is a relative virtqueue number on the
- * source function (0 to max_virtqueues-1). For a multi-queue device, the
- * caller must enable all source queues individually. To retrieve absolute VI
- * information for the source function (so that VI IDs from descriptor proxy
- * events can be mapped back to source function / queue) see
- * MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE 0x1d0
-#undef MC_CMD_0x1d0_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_LEN 12
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_HANDLE_LEN 4
-/* Source relative queue number to enable proxying on */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_SOURCE_QUEUE_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_SOURCE_QUEUE_LEN 4
-/* Descriptor proxy sink queue (caller function relative). Must be extended
- * width event queue
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_TARGET_EVQ_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_TARGET_EVQ_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE
- * Disable descriptor proxying for function. For multi-queue functions,
- * disables all queues.
- */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE 0x179
-#undef MC_CMD_0x179_PRIVILEGE_CTG
-
-#define MC_CMD_0x179_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_LEN 4
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_HANDLE_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE
- * Disable descriptor proxying for a specific source queue on a function.
- */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE 0x1d1
-#undef MC_CMD_0x1d1_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_LEN 8
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_HANDLE_LEN 4
-/* Source relative queue number to disable proxying on */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_SOURCE_QUEUE_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_SOURCE_QUEUE_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_GET_VI_INFO
- * Returns absolute VI allocation information for the descriptor proxy source
- * function referenced by HANDLE, so that the caller can map absolute VI IDs
- * from descriptor proxy events back to the originating function and queue. The
- * call is only valid after the host driver for the source function has
- * attached (after receiving a driver attach event for the descriptor proxy
- * function) and will fail with ENOTCONN otherwise.
- */
-#define MC_CMD_DESC_PROXY_GET_VI_INFO 0x1d2
-#undef MC_CMD_0x1d2_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_GET_VI_INFO_IN msgrequest */
-#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_LEN 4
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_HANDLE_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMIN 0
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMAX 252
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_NUM(len) (((len)-0)/4)
-/* VI information (VI ID + VI relative queue number) for each of the source
- * queues (in order from 0 to max_virtqueues-1), as array of QUEUE_ID
- * structures.
- */
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MINNUM 0
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MAXNUM 63
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MAXNUM_MCDI2 255
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_ABS_VI_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_ABS_VI_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_REL_QUEUE_LBN 16
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_REL_QUEUE_WIDTH 1
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_RESERVED_LBN 17
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_RESERVED_WIDTH 15
-
-
-/***********************************/
-/* MC_CMD_GET_ADDR_SPC_ID
- * Get Address space identifier for use in mem2mem descriptors for a given
- * target. See SF-120734-TC for details on ADDR_SPC_IDs and mem2mem
- * descriptors.
- */
-#define MC_CMD_GET_ADDR_SPC_ID 0x1a0
-#undef MC_CMD_0x1a0_PRIVILEGE_CTG
-
-#define MC_CMD_0x1a0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_GET_ADDR_SPC_ID_IN msgrequest */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_LEN 16
-/* Resource type to get ADDR_SPC_ID for */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_TYPE_OFST 0
-#define MC_CMD_GET_ADDR_SPC_ID_IN_TYPE_LEN 4
-/* enum: Address space ID for host/AP memory DMA over the same interface this
- * MCDI was called on
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_SELF 0x0
-/* enum: Address space ID for host/AP memory DMA via PCI interface and function
- * specified by FUNC
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_PCI_FUNC 0x1
-/* enum: Address space ID for host/AP memory DMA via PCI interface and function
- * specified by FUNC with PASID value specified by PASID
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_PCI_FUNC_PASID 0x2
-/* enum: Address space ID for host/AP memory DMA via PCI interface and function
- * specified by FUNC with PASID value of relative VI specified by VI
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_REL_VI 0x3
-/* enum: Address space ID for host/AP memory DMA via PCI interface, function
- * and PASID value of absolute VI specified by VI
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_ABS_VI 0x4
-/* enum: Address space ID for host memory DMA via PCI interface and function of
- * descriptor proxy function specified by HANDLE
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_DESC_PROXY_HANDLE 0x5
-/* enum: Address space ID for DMA to/from MC memory */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_MC_MEM 0x6
-/* enum: Address space ID for DMA to/from other SmartNIC memory (on-chip, DDR)
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_NIC_MEM 0x7
-/* PCIe Function ID (as struct PCIE_FUNCTION). Only valid if TYPE is PCI_FUNC,
- * PCI_FUNC_PASID or REL_VI.
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LEN 8
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_LEN 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_LBN 32
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_WIDTH 32
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_OFST 8
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_LEN 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_LBN 64
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_WIDTH 32
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_PF_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_PF_LEN 2
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_VF_OFST 6
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_VF_LEN 2
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_INTF_OFST 8
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_INTF_LEN 4
-/* PASID value. Only valid if TYPE is PCI_FUNC_PASID. */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_PASID_OFST 12
-#define MC_CMD_GET_ADDR_SPC_ID_IN_PASID_LEN 4
-/* Relative or absolute VI number. Only valid if TYPE is REL_VI or ABS_VI */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_VI_OFST 12
-#define MC_CMD_GET_ADDR_SPC_ID_IN_VI_LEN 4
-/* Descriptor proxy function handle. Only valid if TYPE is DESC_PROXY_HANDLE.
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_HANDLE_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_HANDLE_LEN 4
-
-/* MC_CMD_GET_ADDR_SPC_ID_OUT msgresponse */
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_LEN 8
-/* Address Space ID for the requested target. Only the lower 36 bits are valid
- * in the current SmartNIC implementation.
- */
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_OFST 0
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LEN 8
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_OFST 0
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_LEN 4
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_LBN 0
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_WIDTH 32
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_LEN 4
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_LBN 32
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_WIDTH 32
-
/***********************************/
/* MC_CMD_GET_CLIENT_HANDLE
@@ -26359,10 +22483,11 @@
* INTF=CALLER, PF=PF_NULL, VF=... to refer to a VF child of the calling PF or
* a sibling VF of the calling VF. - INTF=CALLER, PF=..., VF=VF_NULL to refer
* to a PF on the calling interface - INTF=CALLER, PF=..., VF=... to refer to a
- * VF on the calling interface - INTF=..., PF=..., VF=VF_NULL to refer to a PF
+ * VF on the calling interface - INTF=..., PF=PF_NULL, VF=VF_NULL to refer to
+ * the named interface itself - INTF=..., PF=..., VF=VF_NULL to refer to a PF
* on a named interface - INTF=..., PF=..., VF=... to refer to a VF on a named
* interface where ... refers to a small integer for the VF/PF fields, and to
- * values from the PCIE_INTERFACE enum for for the INTF field. It's only
+ * values from the PCIE_INTERFACE enum for the INTF field. It's only
* meaningful to use INTF=CALLER within a structure that's an argument to
* MC_CMD_DEVEL_GET_CLIENT_HANDLE.
*/
@@ -26380,6 +22505,7 @@
* backwards compatibility only, callers should use PCIE_INTERFACE_CALLER.
*/
#define MC_CMD_GET_CLIENT_HANDLE_IN_PCIE_FUNCTION_INTF_NULL 0xffffffff
+/* See structuredef: PCIE_FUNCTION */
#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_PF_OFST 4
#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_PF_LEN 2
#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_VF_OFST 6
@@ -27350,7 +23476,7 @@
/* MAE_MPORT_SELECTOR structuredef: MPORTS are identified by an opaque unsigned
* integer value (mport_id) that is guaranteed to be representable within
* 32-bits or within any NIC interface field that needs store the value
- * (whichever is narrowers). This selector structure provides a stable way to
+ * (whichever is narrower). This selector structure provides a stable way to
* refer to m-ports.
*/
#define MAE_MPORT_SELECTOR_LEN 4
@@ -27425,10 +23551,22 @@
#define MAE_MPORT_SELECTOR_FLAT_WIDTH 32
/* MAE_LINK_ENDPOINT_SELECTOR structuredef: Structure that identifies a real or
- * virtual network port by MAE port and link end
+ * virtual network port by MAE port and link end. Intended to be used by
+ * network port MCDI commands. Setting FLAT to MAE_LINK_ENDPOINT_COMPAT is
+ * equivalent to using the previous version of the command. Not all possible
+ * combinations of MPORT_END and MPORT_SELECTOR in MAE_LINK_ENDPOINT_SELECTOR
+ * will work in all circumstances. 1. Some will always work (e.g. a VF can
+ * always address its logical MAC using MPORT_SELECTOR=ASSIGNED,LINK_END=VNIC),
+ * 2. Some are not meaningful and will always fail with EINVAL (e.g. attempting
+ * to address the VNIC end of a link to a physical port), 3. Some are
+ * meaningful but require the MCDI client to have the required permission and
+ * fail with EPERM otherwise (e.g. trying to set the MAC on a VF the caller
+ * cannot administer), and 4. Some could be implementation-specific and fail
+ * with ENOTSUP if not available (no examples exist right now). See
+ * SF-123581-TC section 4.3 for more details.
*/
#define MAE_LINK_ENDPOINT_SELECTOR_LEN 8
-/* The MAE MPORT of interest */
+/* Identifier for the MAE MPORT of interest */
#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_OFST 0
#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_LEN 4
#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_LBN 0
@@ -27829,6 +23967,8 @@
#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_MAXNUM_MCDI2 253
/* enum: A counter ID that is guaranteed never to represent a real counter */
#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NULL 0xffffffff
+/* Other enum values, see field(s): */
+/* MAE_COUNTER_ID */
/***********************************/
@@ -28266,6 +24406,24 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_OFST 0
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_LBN 14
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_C_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_C_PL_LBN 15
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_C_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_D_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_D_PL_LBN 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_D_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_LBN 17
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_NET_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_NET_CHAN_LBN 18
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_NET_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_PLUGIN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_PLUGIN_LBN 19
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_PLUGIN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_INC_L4_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_INC_L4_LBN 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_INC_L4_WIDTH 1
/* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_TCI_BE_OFST 4
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_TCI_BE_LEN 2
@@ -28291,19 +24449,23 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DELIVER_OFST 20
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DELIVER_LEN 4
/* Allows an action set to trigger several counter updates. Set to
- * COUNTER_LIST_ID_NULL to request no counter action.
+ * MAE_COUNTER_ID_NULL to request no counter action.
*/
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID_OFST 24
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
/* If a driver only wished to update one counter within this action set, then
* it can supply a COUNTER_ID instead of allocating a single-element counter
* list. The ID must have been allocated with COUNTER_TYPE=AR. This field
- * should be set to COUNTER_ID_NULL if this behaviour is not required. It is
- * not valid to supply a non-NULL value for both COUNTER_LIST_ID and
+ * should be set to MAE_COUNTER_ID_NULL if this behaviour is not required. It
+ * is not valid to supply a non-NULL value for both COUNTER_LIST_ID and
* COUNTER_ID.
*/
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_ID_OFST 28
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_VALUE_OFST 32
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_VALUE_LEN 4
/* Set to MAC_ID_NULL to request no source MAC replacement. */
@@ -28347,6 +24509,24 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_OFST 0
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_LBN 14
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_C_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_C_PL_LBN 15
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_C_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_D_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_D_PL_LBN 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_D_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_LBN 17
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_NET_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_NET_CHAN_LBN 18
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_NET_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_PLUGIN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_PLUGIN_LBN 19
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_PLUGIN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_INC_L4_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_INC_L4_LBN 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_INC_L4_WIDTH 1
/* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_TCI_BE_OFST 4
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_TCI_BE_LEN 2
@@ -28372,19 +24552,23 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DELIVER_OFST 20
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DELIVER_LEN 4
/* Allows an action set to trigger several counter updates. Set to
- * COUNTER_LIST_ID_NULL to request no counter action.
+ * MAE_COUNTER_ID_NULL to request no counter action.
*/
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_LIST_ID_OFST 24
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_LIST_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
/* If a driver only wished to update one counter within this action set, then
* it can supply a COUNTER_ID instead of allocating a single-element counter
* list. The ID must have been allocated with COUNTER_TYPE=AR. This field
- * should be set to COUNTER_ID_NULL if this behaviour is not required. It is
- * not valid to supply a non-NULL value for both COUNTER_LIST_ID and
+ * should be set to MAE_COUNTER_ID_NULL if this behaviour is not required. It
+ * is not valid to supply a non-NULL value for both COUNTER_LIST_ID and
* COUNTER_ID.
*/
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_ID_OFST 28
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_VALUE_OFST 32
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_VALUE_LEN 4
/* Set to MAC_ID_NULL to request no source MAC replacement. */
@@ -28437,6 +24621,172 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_1_TO_CE_LBN 6
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_1_TO_CE_WIDTH 1
+/* MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN msgrequest: Only supported if
+ * MAE_ACTION_SET_ALLOC_V3_SUPPORTED is advertised in
+ * MC_CMD_GET_CAPABILITIES_V10_OUT.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LEN 53
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAGS_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAGS_LEN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_PUSH_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_PUSH_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_PUSH_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_POP_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_POP_LBN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_POP_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DECAP_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DECAP_LBN 8
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DECAP_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_LBN 9
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAG_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAG_LBN 10
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAG_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_NAT_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_NAT_LBN 11
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_NAT_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DECR_IP_TTL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DECR_IP_TTL_LBN 12
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DECR_IP_TTL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_SRC_MPORT_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_SRC_MPORT_LBN 13
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_SRC_MPORT_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SUPPRESS_SELF_DELIVERY_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SUPPRESS_SELF_DELIVERY_LBN 14
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_C_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_C_PL_LBN 15
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_C_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_D_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_D_PL_LBN 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_D_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_LBN 17
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_NET_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_NET_CHAN_LBN 18
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_NET_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_PLUGIN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_PLUGIN_LBN 19
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_PLUGIN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_INC_L4_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_INC_L4_LBN 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_INC_L4_WIDTH 1
+/* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_TCI_BE_OFST 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_TCI_BE_LEN 2
+/* If VLAN_PUSH >= 1, TPID value to be inserted as outermost VLAN. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_PROTO_BE_OFST 6
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_PROTO_BE_LEN 2
+/* If VLAN_PUSH == 2, inner TCI value to be inserted. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_TCI_BE_OFST 8
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_TCI_BE_LEN 2
+/* If VLAN_PUSH == 2, inner TPID value to be inserted. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_PROTO_BE_OFST 10
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_PROTO_BE_LEN 2
+/* Reserved. Ignored by firmware. Should be set to zero or 0xffffffff. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RSVD_OFST 12
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RSVD_LEN 4
+/* Set to ENCAP_HEADER_ID_NULL to request no encap action */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ENCAP_HEADER_ID_OFST 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ENCAP_HEADER_ID_LEN 4
+/* An m-port selector identifying the m-port that the modified packet should be
+ * delivered to. Set to MPORT_SELECTOR_NULL to request no delivery of the
+ * packet.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DELIVER_OFST 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DELIVER_LEN 4
+/* Allows an action set to trigger several counter updates. Set to
+ * MAE_COUNTER_ID_NULL to request no counter action.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_LIST_ID_OFST 24
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_LIST_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
+/* If a driver only wished to update one counter within this action set, then
+ * it can supply a COUNTER_ID instead of allocating a single-element counter
+ * list. The ID must have been allocated with COUNTER_TYPE=AR. This field
+ * should be set to MAE_COUNTER_ID_NULL if this behaviour is not required. It
+ * is not valid to supply a non-NULL value for both COUNTER_LIST_ID and
+ * COUNTER_ID.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_ID_OFST 28
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_VALUE_OFST 32
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_VALUE_LEN 4
+/* Set to MAC_ID_NULL to request no source MAC replacement. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SRC_MAC_ID_OFST 36
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SRC_MAC_ID_LEN 4
+/* Set to MAC_ID_NULL to request no destination MAC replacement. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DST_MAC_ID_OFST 40
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DST_MAC_ID_LEN 4
+/* Source m-port ID to be reported for DO_SET_SRC_MPORT action. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_REPORTED_SRC_MPORT_OFST 44
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_REPORTED_SRC_MPORT_LEN 4
+/* Actions for modifying the Differentiated Services Code-Point (DSCP) bits
+ * within IPv4 and IPv6 headers.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_CONTROL_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_CONTROL_LEN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_ENCAP_COPY_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_ENCAP_COPY_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_ENCAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_DECAP_COPY_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_DECAP_COPY_LBN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_DECAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_DSCP_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_DSCP_LBN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_DSCP_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_VALUE_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_VALUE_LBN 3
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_VALUE_WIDTH 6
+/* Actions for modifying the Explicit Congestion Notification (ECN) bits within
+ * IPv4 and IPv6 headers.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_CONTROL_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_CONTROL_LEN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_ENCAP_COPY_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_ENCAP_COPY_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_ENCAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_DECAP_COPY_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_DECAP_COPY_LBN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_DECAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_ECN_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_ECN_LBN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_ECN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_VALUE_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_VALUE_LBN 3
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_VALUE_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_0_TO_CE_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_0_TO_CE_LBN 5
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_0_TO_CE_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_1_TO_CE_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_1_TO_CE_LBN 6
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_1_TO_CE_WIDTH 1
+/* Actions for overwriting CH_ROUTE subfields. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OVERWRITE_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OVERWRITE_LEN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_C_PL_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_C_PL_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_C_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_D_PL_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_D_PL_LBN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_D_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_PL_CHAN_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_PL_CHAN_LBN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_PL_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OUT_HOST_CHAN_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OUT_HOST_CHAN_LBN 3
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OUT_HOST_CHAN_WIDTH 1
+/* Override outgoing CH_VC to network port for DO_SET_NET_CHAN action. Cannot
+ * be used in conjunction with DO_SET_SRC_MPORT action.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_NET_CHAN_OFST 52
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_NET_CHAN_LEN 1
+
/* MC_CMD_MAE_ACTION_SET_ALLOC_OUT msgresponse */
#define MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN 4
/* The MSB of the AS_ID is guaranteed to be clear if the ID is not
@@ -28680,58 +25030,6 @@
#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_MAXNUM 32
#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_MAXNUM_MCDI2 32
-
-/***********************************/
-/* MC_CMD_MAE_OUTER_RULE_UPDATE
- * Atomically change the response of an Outer Rule.
- */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE 0x17d
-#undef MC_CMD_0x17d_PRIVILEGE_CTG
-
-#define MC_CMD_0x17d_PRIVILEGE_CTG SRIOV_CTG_MAE
-
-/* MC_CMD_MAE_OUTER_RULE_UPDATE_IN msgrequest */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_LEN 16
-/* ID of outer rule to update */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_OR_ID_OFST 0
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_OR_ID_LEN 4
-/* Packets matching the rule will be parsed with this encapsulation. */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ENCAP_TYPE_OFST 4
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ENCAP_TYPE_LEN 4
-/* Enum values, see field(s): */
-/* MAE_MCDI_ENCAP_TYPE */
-/* This field controls the actions that are performed when a rule is hit. */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ACTION_CONTROL_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ACTION_CONTROL_LEN 4
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_LBN 0
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_WIDTH 1
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_LBN 1
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_WIDTH 2
-/* Enum values, see field(s): */
-/* MAE_CT_VNI_MODE */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_LBN 3
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_WIDTH 1
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_LBN 4
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_WIDTH 1
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_LBN 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_WIDTH 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_LBN 16
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_WIDTH 16
-/* ID of counter to increment when the rule is hit. Only used if the DO_COUNT
- * flag is set. The ID must have been allocated with COUNTER_TYPE=OR.
- */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_COUNTER_ID_OFST 12
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_COUNTER_ID_LEN 4
-
-/* MC_CMD_MAE_OUTER_RULE_UPDATE_OUT msgresponse */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_OUT_LEN 0
-
/* MAE_ACTION_RULE_RESPONSE structuredef */
#define MAE_ACTION_RULE_RESPONSE_LEN 16
#define MAE_ACTION_RULE_RESPONSE_ASL_ID_OFST 0
@@ -29122,142 +25420,6 @@
#define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_LBN 352
#define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_WIDTH 32
-/* MAE_MPORT_DESC_V2 structuredef */
-#define MAE_MPORT_DESC_V2_LEN 56
-#define MAE_MPORT_DESC_V2_MPORT_ID_OFST 0
-#define MAE_MPORT_DESC_V2_MPORT_ID_LEN 4
-#define MAE_MPORT_DESC_V2_MPORT_ID_LBN 0
-#define MAE_MPORT_DESC_V2_MPORT_ID_WIDTH 32
-/* Reserved for future purposes, contains information independent of caller */
-#define MAE_MPORT_DESC_V2_FLAGS_OFST 4
-#define MAE_MPORT_DESC_V2_FLAGS_LEN 4
-#define MAE_MPORT_DESC_V2_FLAGS_LBN 32
-#define MAE_MPORT_DESC_V2_FLAGS_WIDTH 32
-#define MAE_MPORT_DESC_V2_CALLER_FLAGS_OFST 8
-#define MAE_MPORT_DESC_V2_CALLER_FLAGS_LEN 4
-#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_OFST 8
-#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_LBN 0
-#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_WIDTH 1
-#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_OFST 8
-#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_LBN 1
-#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_WIDTH 1
-#define MAE_MPORT_DESC_V2_CAN_DELETE_OFST 8
-#define MAE_MPORT_DESC_V2_CAN_DELETE_LBN 2
-#define MAE_MPORT_DESC_V2_CAN_DELETE_WIDTH 1
-#define MAE_MPORT_DESC_V2_IS_ZOMBIE_OFST 8
-#define MAE_MPORT_DESC_V2_IS_ZOMBIE_LBN 3
-#define MAE_MPORT_DESC_V2_IS_ZOMBIE_WIDTH 1
-#define MAE_MPORT_DESC_V2_CALLER_FLAGS_LBN 64
-#define MAE_MPORT_DESC_V2_CALLER_FLAGS_WIDTH 32
-/* Not the ideal name; it's really the type of thing connected to the m-port */
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_OFST 12
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_LEN 4
-/* enum: Connected to a MAC... */
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_NET_PORT 0x0
-/* enum: Adds metadata and delivers to another m-port */
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_ALIAS 0x1
-/* enum: Connected to a VNIC. */
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_VNIC 0x2
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_LBN 96
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_WIDTH 32
-/* 128-bit value available to drivers for m-port identification. */
-#define MAE_MPORT_DESC_V2_UUID_OFST 16
-#define MAE_MPORT_DESC_V2_UUID_LEN 16
-#define MAE_MPORT_DESC_V2_UUID_LBN 128
-#define MAE_MPORT_DESC_V2_UUID_WIDTH 128
-/* Big wadge of space reserved for other common properties */
-#define MAE_MPORT_DESC_V2_RESERVED_OFST 32
-#define MAE_MPORT_DESC_V2_RESERVED_LEN 8
-#define MAE_MPORT_DESC_V2_RESERVED_LO_OFST 32
-#define MAE_MPORT_DESC_V2_RESERVED_LO_LEN 4
-#define MAE_MPORT_DESC_V2_RESERVED_LO_LBN 256
-#define MAE_MPORT_DESC_V2_RESERVED_LO_WIDTH 32
-#define MAE_MPORT_DESC_V2_RESERVED_HI_OFST 36
-#define MAE_MPORT_DESC_V2_RESERVED_HI_LEN 4
-#define MAE_MPORT_DESC_V2_RESERVED_HI_LBN 288
-#define MAE_MPORT_DESC_V2_RESERVED_HI_WIDTH 32
-#define MAE_MPORT_DESC_V2_RESERVED_LBN 256
-#define MAE_MPORT_DESC_V2_RESERVED_WIDTH 64
-/* Logical port index. Only valid when type NET Port. */
-#define MAE_MPORT_DESC_V2_NET_PORT_IDX_OFST 40
-#define MAE_MPORT_DESC_V2_NET_PORT_IDX_LEN 4
-#define MAE_MPORT_DESC_V2_NET_PORT_IDX_LBN 320
-#define MAE_MPORT_DESC_V2_NET_PORT_IDX_WIDTH 32
-/* The m-port delivered to */
-#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_OFST 40
-#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_LEN 4
-#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_LBN 320
-#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_WIDTH 32
-/* The type of thing that owns the VNIC */
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_OFST 40
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_LEN 4
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_FUNCTION 0x1 /* enum */
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_PLUGIN 0x2 /* enum */
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_LBN 320
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_WIDTH 32
-/* The PCIe interface on which the function lives. CJK: We need an enumeration
- * of interfaces that we extend as new interface (types) appear. This belongs
- * elsewhere and should be referenced from here
- */
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_OFST 44
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_LEN 4
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_LBN 352
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_WIDTH 32
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_OFST 48
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_LEN 2
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_LBN 384
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_WIDTH 16
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_OFST 50
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_LEN 2
-/* enum: Indicates that the function is a PF */
-#define MAE_MPORT_DESC_V2_VF_IDX_NULL 0xffff
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_LBN 400
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_WIDTH 16
-/* Reserved. Should be ignored for now. */
-#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_OFST 44
-#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_LEN 4
-#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_LBN 352
-#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_WIDTH 32
-/* A client handle for the VNIC's owner. Only valid for type VNIC. */
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_OFST 52
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_LEN 4
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_LBN 416
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_MAE_MPORT_ENUMERATE
- * Deprecated in favour of MAE_MPORT_READ_JOURNAL. Support for this command
- * will be removed at some future point.
- */
-#define MC_CMD_MAE_MPORT_ENUMERATE 0x17c
-#undef MC_CMD_0x17c_PRIVILEGE_CTG
-
-#define MC_CMD_0x17c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_MAE_MPORT_ENUMERATE_IN msgrequest */
-#define MC_CMD_MAE_MPORT_ENUMERATE_IN_LEN 0
-
-/* MC_CMD_MAE_MPORT_ENUMERATE_OUT msgresponse */
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMIN 8
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMAX 252
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LEN(num) (8+1*(num))
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_NUM(len) (((len)-8)/1)
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_COUNT_OFST 0
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_COUNT_LEN 4
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_SIZEOF_MPORT_DESC_OFST 4
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_SIZEOF_MPORT_DESC_LEN 4
-/* Any array of MAE_MPORT_DESC structures. The MAE_MPORT_DESC structure may
- * grow in future version of this command. Drivers should use a stride of
- * SIZEOF_MPORT_DESC. Fields beyond SIZEOF_MPORT_DESC are not present.
- */
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_OFST 8
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_LEN 1
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MINNUM 0
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MAXNUM 244
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MAXNUM_MCDI2 1012
-
/***********************************/
/* MC_CMD_MAE_MPORT_READ_JOURNAL
@@ -29570,73 +25732,6 @@
/***********************************/
-/* MC_CMD_TABLE_UPDATE
- * Update an existing entry in a table with a new response value. May return
- * EINVAL for unknown table ID or other bad request parameters, ENOENT if the
- * entry does not already exist, or EPERM if the operation is not permitted. In
- * case of an error, the additional MCDI error argument field returns the raw
- * error code from the underlying CAM driver.
- */
-#define MC_CMD_TABLE_UPDATE 0x1ce
-#undef MC_CMD_0x1ce_PRIVILEGE_CTG
-
-#define MC_CMD_0x1ce_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TABLE_UPDATE_IN msgrequest */
-#define MC_CMD_TABLE_UPDATE_IN_LENMIN 16
-#define MC_CMD_TABLE_UPDATE_IN_LENMAX 252
-#define MC_CMD_TABLE_UPDATE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_TABLE_UPDATE_IN_LEN(num) (12+4*(num))
-#define MC_CMD_TABLE_UPDATE_IN_DATA_NUM(len) (((len)-12)/4)
-/* Table identifier. */
-#define MC_CMD_TABLE_UPDATE_IN_TABLE_ID_OFST 0
-#define MC_CMD_TABLE_UPDATE_IN_TABLE_ID_LEN 4
-/* Enum values, see field(s): */
-/* TABLE_ID */
-/* Width in bits of supplied key data (must match table properties). */
-#define MC_CMD_TABLE_UPDATE_IN_KEY_WIDTH_OFST 4
-#define MC_CMD_TABLE_UPDATE_IN_KEY_WIDTH_LEN 2
-/* Width in bits of supplied mask data (0 for direct/BCAM tables, or for STCAM
- * when allocated MASK_ID is used instead).
- */
-#define MC_CMD_TABLE_UPDATE_IN_MASK_WIDTH_OFST 6
-#define MC_CMD_TABLE_UPDATE_IN_MASK_WIDTH_LEN 2
-/* Width in bits of supplied response data (for INSERT and UPDATE operations
- * this must match the table properties; for DELETE operations, no response
- * data is required and this must be 0).
- */
-#define MC_CMD_TABLE_UPDATE_IN_RESP_WIDTH_OFST 8
-#define MC_CMD_TABLE_UPDATE_IN_RESP_WIDTH_LEN 2
-/* Mask ID for STCAM table - used instead of mask data if the table descriptor
- * reports ALLOC_MASKS==1. Otherwise set to 0.
- */
-#define MC_CMD_TABLE_UPDATE_IN_MASK_ID_OFST 6
-#define MC_CMD_TABLE_UPDATE_IN_MASK_ID_LEN 2
-/* Priority for TCAM or STCAM, in range 0..N_PRIORITIES-1, otherwise 0. */
-#define MC_CMD_TABLE_UPDATE_IN_PRIORITY_OFST 8
-#define MC_CMD_TABLE_UPDATE_IN_PRIORITY_LEN 2
-/* (32-bit alignment padding - set to 0) */
-#define MC_CMD_TABLE_UPDATE_IN_RESERVED_OFST 10
-#define MC_CMD_TABLE_UPDATE_IN_RESERVED_LEN 2
-/* Sequence of key, mask (if MASK_WIDTH > 0), and response (if RESP_WIDTH > 0)
- * data values. Each of these items is logically treated as a single wide N-bit
- * value, in which the individual fields have been placed within that value per
- * the LBN and WIDTH information from the table field descriptors. The wide
- * N-bit value is padded with 0 bits at the MSB end if necessary to make a
- * multiple of 32 bits. The value is then packed into this command as a
- * sequence of 32-bit words, bits [31:0] first, then bits [63:32], etc.
- */
-#define MC_CMD_TABLE_UPDATE_IN_DATA_OFST 12
-#define MC_CMD_TABLE_UPDATE_IN_DATA_LEN 4
-#define MC_CMD_TABLE_UPDATE_IN_DATA_MINNUM 1
-#define MC_CMD_TABLE_UPDATE_IN_DATA_MAXNUM 60
-#define MC_CMD_TABLE_UPDATE_IN_DATA_MAXNUM_MCDI2 252
-
-/* MC_CMD_TABLE_UPDATE_OUT msgresponse */
-#define MC_CMD_TABLE_UPDATE_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_TABLE_DELETE
* Delete an existing entry in a table. May return EINVAL for unknown table ID
* or other bad request parameters, ENOENT if the entry does not exist, or
@@ -29702,5 +25797,124 @@
/* MC_CMD_TABLE_DELETE_OUT msgresponse */
#define MC_CMD_TABLE_DELETE_OUT_LEN 0
+/* MC_CMD_QUEUE_HANDLE structuredef: On X4, to distinguish between full-
+ * featured (X2-style) VIs and low-latency (X3-style) queues, we use the top
+ * bits of the queue handle to specify the queue type in all MCDI calls which
+ * refer to VIs/queues. These bits should be masked off when indexing into a
+ * queue in the BAR.
+ */
+#define MC_CMD_QUEUE_HANDLE_LEN 4
+/* Combined queue number and type. This is the ID returned by and passed into
+ * MCDI calls that use queues.
+ */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_HANDLE_OFST 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_HANDLE_LEN 4
+#define MC_CMD_QUEUE_HANDLE_QUEUE_NUM_OFST 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_NUM_LBN 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_NUM_WIDTH 24
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_OFST 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_LBN 24
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_WIDTH 8
+/* enum: Indicates that the queue instance is a full-featured VI */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_FF_VI 0x0
+/* enum: Indicates that the queue instance is a LL TXQ */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_LL_TXQ 0x1
+/* enum: Indicates that the queue instance is a LL RXQ */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_LL_RXQ 0x2
+/* enum: Indicates that the queue instance is a LL EVQ */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_LL_EVQ 0x3
+#define MC_CMD_QUEUE_HANDLE_QUEUE_HANDLE_LBN 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_HANDLE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_ALLOC_LL_QUEUES
+ * Allocate low latency (X3-style) queues for current PCI function. Can be
+ * called more than once if desired to allocate more queues.
+ */
+#define MC_CMD_ALLOC_LL_QUEUES 0x1dd
+#undef MC_CMD_0x1dd_PRIVILEGE_CTG
+
+#define MC_CMD_0x1dd_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_ALLOC_LL_QUEUES_IN msgrequest */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_LEN 24
+/* The minimum number of TXQs that is acceptable */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_TXQ_COUNT_OFST 0
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_TXQ_COUNT_LEN 4
+/* The maximum number of TXQs that would be useful */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_TXQ_COUNT_OFST 4
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_TXQ_COUNT_LEN 4
+/* The minimum number of RXQs that is acceptable */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_RXQ_COUNT_OFST 8
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_RXQ_COUNT_LEN 4
+/* The maximum number of RXQs that would be useful */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_RXQ_COUNT_OFST 12
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_RXQ_COUNT_LEN 4
+/* The minimum number of EVQs that is acceptable */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_EVQ_COUNT_OFST 16
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_EVQ_COUNT_LEN 4
+/* The maximum number of EVQs that would be useful */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_EVQ_COUNT_OFST 20
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_EVQ_COUNT_LEN 4
+
+/* MC_CMD_ALLOC_LL_QUEUES_OUT msgresponse */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_LENMIN 16
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_LENMAX 252
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_LEN(num) (12+4*(num))
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_NUM(len) (((len)-12)/4)
+/* The number of TXQs allocated in this request */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_TXQ_COUNT_OFST 0
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_TXQ_COUNT_LEN 4
+/* The number of RXQs allocated in this request */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_RXQ_COUNT_OFST 4
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_RXQ_COUNT_LEN 4
+/* The number of EVQs allocated in this request */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_EVQ_COUNT_OFST 8
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_EVQ_COUNT_LEN 4
+/* A list of allocated queues, returned as MC_CMD_QUEUE_HANDLEs, not
+ * necessarily contiguous. TXQs are first in the list, followed by RXQs then
+ * EVQs. The type of each queue is indicated by the top bits (see the
+ * QUEUE_TYPE enum)
+ */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_OFST 12
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_LEN 4
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_MINNUM 1
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_MAXNUM 60
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_MAXNUM_MCDI2 252
+
+
+/***********************************/
+/* MC_CMD_FREE_LL_QUEUES
+ * Free low latency (X3-style) queues for current PCI function.
+ */
+#define MC_CMD_FREE_LL_QUEUES 0x1de
+#undef MC_CMD_0x1de_PRIVILEGE_CTG
+
+#define MC_CMD_0x1de_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FREE_LL_QUEUES_IN msgrequest */
+#define MC_CMD_FREE_LL_QUEUES_IN_LENMIN 8
+#define MC_CMD_FREE_LL_QUEUES_IN_LENMAX 252
+#define MC_CMD_FREE_LL_QUEUES_IN_LENMAX_MCDI2 1020
+#define MC_CMD_FREE_LL_QUEUES_IN_LEN(num) (4+4*(num))
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_NUM(len) (((len)-4)/4)
+/* The number of queues to free. */
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUE_COUNT_OFST 0
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUE_COUNT_LEN 4
+/* A list of queues to free, as a list of MC_CMD_QUEUE_HANDLEs. They must have
+ * all been previously allocated by MC_CMD_ALLOC_LL_QUEUES. The type of each
+ * queue should be indicated by the top bits.
+ */
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_OFST 4
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_LEN 4
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_MINNUM 1
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_MAXNUM 62
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_MAXNUM_MCDI2 254
+
+/* MC_CMD_FREE_LL_QUEUES_OUT msgresponse */
+#define MC_CMD_FREE_LL_QUEUES_OUT_LEN 0
+
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index ad4694fa3dda..7b236d291d8c 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -17,58 +17,6 @@
#include "selftest.h"
#include "mcdi_port_common.h"
-static int efx_mcdi_mdio_read(struct net_device *net_dev,
- int prtad, int devad, u16 addr)
-{
- struct efx_nic *efx = efx_netdev_priv(net_dev);
- MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN);
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, efx->mdio_bus);
- MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad);
- MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad);
- MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- return rc;
-
- if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) !=
- MC_CMD_MDIO_STATUS_GOOD)
- return -EIO;
-
- return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
-}
-
-static int efx_mcdi_mdio_write(struct net_device *net_dev,
- int prtad, int devad, u16 addr, u16 value)
-{
- struct efx_nic *efx = efx_netdev_priv(net_dev);
- MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN);
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, efx->mdio_bus);
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad);
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad);
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr);
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_VALUE, value);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- return rc;
-
- if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) !=
- MC_CMD_MDIO_STATUS_GOOD)
- return -EIO;
-
- return 0;
-}
u32 efx_mcdi_phy_get_caps(struct efx_nic *efx)
{
@@ -97,12 +45,7 @@ int efx_mcdi_port_probe(struct efx_nic *efx)
{
int rc;
- /* Set up MDIO structure for PHY */
- efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
- efx->mdio.mdio_read = efx_mcdi_mdio_read;
- efx->mdio.mdio_write = efx_mcdi_mdio_write;
-
- /* Fill out MDIO structure, loopback modes, and initial link state */
+ /* Fill out loopback modes and initial link state */
rc = efx_mcdi_phy_probe(efx);
if (rc != 0)
return rc;
diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c
index 76ea26722ca4..dae684194ac8 100644
--- a/drivers/net/ethernet/sfc/mcdi_port_common.c
+++ b/drivers/net/ethernet/sfc/mcdi_port_common.c
@@ -448,15 +448,6 @@ int efx_mcdi_phy_probe(struct efx_nic *efx)
efx->phy_data = phy_data;
efx->phy_type = phy_data->type;
- efx->mdio_bus = phy_data->channel;
- efx->mdio.prtad = phy_data->port;
- efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
- efx->mdio.mode_support = 0;
- if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
- efx->mdio.mode_support |= MDIO_SUPPORTS_C22;
- if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
- efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
-
caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
mcdi_to_ethtool_linkset(phy_data->media, caps,
@@ -546,8 +537,6 @@ void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx, struct ethtool_link_ks
cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media);
cmd->base.phy_address = phy_cfg->port;
cmd->base.autoneg = !!(efx->link_advertising[0] & ADVERTISED_Autoneg);
- cmd->base.mdio_support = (efx->mdio.mode_support &
- (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
mcdi_to_ethtool_linkset(phy_cfg->media, phy_cfg->supported_cap,
cmd->link_modes.supported);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index b54662d32f55..b98c259f672d 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -15,7 +15,7 @@
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/timer.h>
-#include <linux/mdio.h>
+#include <linux/mii.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/device.h>
@@ -404,7 +404,6 @@ struct efx_rx_page_state {
* @old_rx_packets: Value of @rx_packets as of last efx_init_rx_queue()
* @old_rx_bytes: Value of @rx_bytes as of last efx_init_rx_queue()
* @xdp_rxq_info: XDP specific RX queue information.
- * @xdp_rxq_info_valid: Is xdp_rxq_info valid data?.
*/
struct efx_rx_queue {
struct efx_nic *efx;
@@ -443,7 +442,6 @@ struct efx_rx_queue {
unsigned long old_rx_packets;
unsigned long old_rx_bytes;
struct xdp_rxq_info xdp_rxq_info;
- bool xdp_rxq_info_valid;
};
enum efx_sync_events_state {
@@ -831,6 +829,7 @@ struct efx_arfs_rule {
/**
* struct efx_async_filter_insertion - Request to asynchronously insert a filter
* @net_dev: Reference to the netdevice
+ * @net_dev_tracker: reference tracker entry for @net_dev
* @spec: The filter to insert
* @work: Workitem for this request
* @rxq_index: Identifies the channel for which this request was made
@@ -838,6 +837,7 @@ struct efx_arfs_rule {
*/
struct efx_async_filter_insertion {
struct net_device *net_dev;
+ netdevice_tracker net_dev_tracker;
struct efx_filter_spec spec;
struct work_struct work;
u16 rxq_index;
@@ -954,8 +954,6 @@ struct efx_mae;
* @stats_buffer: DMA buffer for statistics
* @phy_type: PHY type
* @phy_data: PHY private data (including PHY-specific stats)
- * @mdio: PHY MDIO interface
- * @mdio_bus: PHY MDIO bus ID (only used by Siena)
* @phy_mode: PHY operating mode. Serialised by @mac_lock.
* @link_advertising: Autonegotiation advertising flags
* @fec_config: Forward Error Correction configuration flags. For bit positions
@@ -1004,6 +1002,7 @@ struct efx_mae;
* @dl_port: devlink port associated with the PF
* @mem_bar: The BAR that is mapped into membase.
* @reg_base: Offset from the start of the bar to the function control window.
+ * @reflash_mutex: Mutex for serialising firmware reflash operations.
* @monitor_work: Hardware monitor workitem
* @biu_lock: BIU (bus interface unit) lock
* @last_irq_cpu: Last CPU to handle a possible test interrupt. This
@@ -1129,8 +1128,6 @@ struct efx_nic {
unsigned int phy_type;
void *phy_data;
- struct mdio_if_info mdio;
- unsigned int mdio_bus;
enum efx_phy_mode phy_mode;
__ETHTOOL_DECLARE_LINK_MODE_MASK(link_advertising);
@@ -1189,6 +1186,7 @@ struct efx_nic {
struct devlink_port *dl_port;
unsigned int mem_bar;
u32 reg_base;
+ struct mutex reflash_mutex;
/* The following fields may be written more often */
@@ -1381,6 +1379,8 @@ struct efx_udp_tunnel {
* @can_rx_scatter: NIC is able to scatter packets to multiple buffers
* @always_rx_scatter: NIC will always scatter packets to multiple buffers
* @option_descriptors: NIC supports TX option descriptors
+ * @flash_auto_partition: firmware flash uses AUTO partition, driver does
+ * not need to perform image parsing
* @min_interrupt_mode: Lowest capability interrupt mode supported
* from &enum efx_int_mode.
* @timer_period_max: Maximum period of interrupt timer (in ticks)
@@ -1408,7 +1408,7 @@ struct efx_nic_type {
int (*fini_dmaq)(struct efx_nic *efx);
void (*prepare_flr)(struct efx_nic *efx);
void (*finish_flr)(struct efx_nic *efx);
- size_t (*describe_stats)(struct efx_nic *efx, u8 *names);
+ size_t (*describe_stats)(struct efx_nic *efx, u8 **names);
size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
struct rtnl_link_stats64 *core_stats);
size_t (*update_stats_atomic)(struct efx_nic *efx, u64 *full_stats,
@@ -1557,6 +1557,7 @@ struct efx_nic_type {
bool can_rx_scatter;
bool always_rx_scatter;
bool option_descriptors;
+ bool flash_auto_partition;
unsigned int min_interrupt_mode;
unsigned int timer_period_max;
netdev_features_t offload_features;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index a33ed473cc8a..80aa5e9c732a 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -299,18 +299,15 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
* bits in the first @count bits of @mask for which a name is defined.
*/
size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
- const unsigned long *mask, u8 *names)
+ const unsigned long *mask, u8 **names)
{
size_t visible = 0;
size_t index;
for_each_set_bit(index, mask, count) {
if (desc[index].name) {
- if (names) {
- strscpy(names, desc[index].name,
- ETH_GSTRING_LEN);
- names += ETH_GSTRING_LEN;
- }
+ if (names)
+ ethtool_puts(names, desc[index].name);
++visible;
}
}
diff --git a/drivers/net/ethernet/sfc/nic_common.h b/drivers/net/ethernet/sfc/nic_common.h
index 7ec4ac7b7ff5..821d91efda19 100644
--- a/drivers/net/ethernet/sfc/nic_common.h
+++ b/drivers/net/ethernet/sfc/nic_common.h
@@ -241,7 +241,7 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf);
#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
- const unsigned long *mask, u8 *names);
+ const unsigned long *mask, u8 **names);
int efx_nic_copy_stats(struct efx_nic *efx, __le64 *dest);
void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u64 *stats,
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index aaacdcfa54ae..4c7222bf26be 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -399,7 +399,7 @@ static const unsigned long efx_ptp_stat_mask[] = {
[0 ... BITS_TO_LONGS(PTP_STAT_COUNT) - 1] = ~0UL,
};
-size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings)
+size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 **strings)
{
if (!efx->ptp_data)
return 0;
@@ -1800,11 +1800,6 @@ int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
return NETDEV_TX_OK;
}
-int efx_ptp_get_mode(struct efx_nic *efx)
-{
- return efx->ptp_data->mode;
-}
-
int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
unsigned int new_mode)
{
diff --git a/drivers/net/ethernet/sfc/ptp.h b/drivers/net/ethernet/sfc/ptp.h
index 6946203499ef..cb9b077921e8 100644
--- a/drivers/net/ethernet/sfc/ptp.h
+++ b/drivers/net/ethernet/sfc/ptp.h
@@ -26,12 +26,11 @@ int efx_ptp_get_ts_config(struct efx_nic *efx,
void efx_ptp_get_ts_info(struct efx_nic *efx,
struct kernel_ethtool_ts_info *ts_info);
bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
-int efx_ptp_get_mode(struct efx_nic *efx);
int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
unsigned int new_mode);
int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
-size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings);
+size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 **strings);
size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats);
void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev);
void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index ab358fe13e1d..5306f4c44be4 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -269,8 +269,6 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
"Failure to initialise XDP queue information rc=%d\n",
rc);
efx->xdp_rxq_info_failed = true;
- } else {
- rx_queue->xdp_rxq_info_valid = true;
}
/* Set up RX descriptor ring */
@@ -285,7 +283,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
- del_timer_sync(&rx_queue->slow_fill);
+ timer_delete_sync(&rx_queue->slow_fill);
if (rx_queue->grant_credits)
flush_work(&rx_queue->grant_work);
@@ -302,10 +300,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
efx_fini_rx_recycle_ring(rx_queue);
- if (rx_queue->xdp_rxq_info_valid)
+ if (xdp_rxq_info_is_reg(&rx_queue->xdp_rxq_info))
xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
-
- rx_queue->xdp_rxq_info_valid = false;
}
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
@@ -352,7 +348,8 @@ void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
void efx_rx_slow_fill(struct timer_list *t)
{
- struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
+ struct efx_rx_queue *rx_queue = timer_container_of(rx_queue, t,
+ slow_fill);
/* Post an event to cause NAPI to run and refill the queue */
efx_nic_generate_fill_event(rx_queue);
@@ -897,7 +894,7 @@ static void efx_filter_rfs_work(struct work_struct *data)
/* Release references */
clear_bit(slot_idx, &efx->rps_slot_map);
- dev_put(req->net_dev);
+ netdev_put(req->net_dev, &req->net_dev_tracker);
}
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
@@ -989,7 +986,8 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
}
/* Queue the request */
- dev_hold(req->net_dev = net_dev);
+ req->net_dev = net_dev;
+ netdev_hold(req->net_dev, &req->net_dev_tracker, GFP_ATOMIC);
INIT_WORK(&req->work, efx_filter_rfs_work);
req->rxq_index = rxq_index;
req->flow_id = flow_id;
diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c
index d120b3c83ac0..fc075ab6b7b5 100644
--- a/drivers/net/ethernet/sfc/siena/efx_channels.c
+++ b/drivers/net/ethernet/sfc/siena/efx_channels.c
@@ -217,8 +217,8 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
if (efx_siena_separate_tx_channels) {
efx->n_tx_channels =
- min(max(n_channels / 2, 1U),
- efx->max_tx_channels);
+ clamp(n_channels / 2, 1U,
+ efx->max_tx_channels);
efx->tx_channel_offset =
n_channels - efx->n_tx_channels;
efx->n_rx_channels =
@@ -1300,7 +1300,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
time = jiffies - channel->rfs_last_expiry;
/* Would our quota be >= 20? */
if (channel->rfs_filter_count * time >= 600 * HZ)
- mod_delayed_work(system_wq, &channel->filter_work, 0);
+ mod_delayed_work(system_percpu_wq, &channel->filter_work, 0);
#endif
/* There is no race here; although napi_disable() will
diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c
index a0966f879664..35036cc902fe 100644
--- a/drivers/net/ethernet/sfc/siena/efx_common.c
+++ b/drivers/net/ethernet/sfc/siena/efx_common.c
@@ -1285,9 +1285,6 @@ out:
/* For simplicity and reliability, we always require a slot reset and try to
* reset the hardware when a pci error affecting the device is detected.
- * We leave both the link_reset and mmio_enabled callback unimplemented:
- * with our request for slot reset the mmio_enabled callback will never be
- * called, and the link_reset callback is not used by AER or EEH mechanisms.
*/
const struct pci_error_handlers efx_siena_err_handlers = {
.error_detected = efx_io_error_detected,
diff --git a/drivers/net/ethernet/sfc/siena/ethtool.c b/drivers/net/ethernet/sfc/siena/ethtool.c
index c5ad84db9613..8c3ebd0617fb 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool.c
+++ b/drivers/net/ethernet/sfc/siena/ethtool.c
@@ -217,7 +217,8 @@ static int efx_ethtool_set_wol(struct net_device *net_dev,
}
static void efx_ethtool_get_fec_stats(struct net_device *net_dev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -264,6 +265,7 @@ const struct ethtool_ops efx_siena_ethtool_ops = {
.get_rxfh_key_size = efx_siena_ethtool_get_rxfh_key_size,
.get_rxfh = efx_siena_ethtool_get_rxfh,
.set_rxfh = efx_siena_ethtool_set_rxfh,
+ .get_rxfh_fields = efx_siena_ethtool_get_rxfh_fields,
.get_ts_info = efx_ethtool_get_ts_info,
.get_module_info = efx_siena_ethtool_get_module_info,
.get_module_eeprom = efx_siena_ethtool_get_module_eeprom,
diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.c b/drivers/net/ethernet/sfc/siena/ethtool_common.c
index 075fef64de68..47cd16a113cf 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/siena/ethtool_common.c
@@ -395,7 +395,7 @@ fail:
test->flags |= ETH_TEST_FL_FAILED;
}
-static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
+static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 **strings)
{
size_t n_stats = 0;
struct efx_channel *channel;
@@ -403,24 +403,22 @@ static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
efx_for_each_channel(channel, efx) {
if (efx_channel_has_tx_queues(channel)) {
n_stats++;
- if (strings != NULL) {
- snprintf(strings, ETH_GSTRING_LEN,
- "tx-%u.tx_packets",
- channel->tx_queue[0].queue /
- EFX_MAX_TXQ_PER_CHANNEL);
+ if (!strings)
+ continue;
- strings += ETH_GSTRING_LEN;
- }
+ ethtool_sprintf(strings, "tx-%u.tx_packets",
+ channel->tx_queue[0].queue /
+ EFX_MAX_TXQ_PER_CHANNEL);
}
}
efx_for_each_channel(channel, efx) {
if (efx_channel_has_rx_queue(channel)) {
n_stats++;
- if (strings != NULL) {
- snprintf(strings, ETH_GSTRING_LEN,
- "rx-%d.rx_packets", channel->channel);
- strings += ETH_GSTRING_LEN;
- }
+ if (!strings)
+ continue;
+
+ ethtool_sprintf(strings, "rx-%d.rx_packets",
+ channel->channel);
}
}
if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
@@ -428,11 +426,11 @@ static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
n_stats++;
- if (strings) {
- snprintf(strings, ETH_GSTRING_LEN,
- "tx-xdp-cpu-%hu.tx_packets", xdp);
- strings += ETH_GSTRING_LEN;
- }
+ if (!strings)
+ continue;
+
+ ethtool_sprintf(strings, "tx-xdp-cpu-%hu.tx_packets",
+ xdp);
}
}
@@ -464,15 +462,11 @@ void efx_siena_ethtool_get_strings(struct net_device *net_dev,
switch (string_set) {
case ETH_SS_STATS:
- strings += (efx->type->describe_stats(efx, strings) *
- ETH_GSTRING_LEN);
+ efx->type->describe_stats(efx, &strings);
for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
- strscpy(strings + i * ETH_GSTRING_LEN,
- efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
- strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
- strings += (efx_describe_per_queue_stats(efx, strings) *
- ETH_GSTRING_LEN);
- efx_siena_ptp_describe_stats(efx, strings);
+ ethtool_puts(&strings, efx_sw_stat_desc[i].name);
+ efx_describe_per_queue_stats(efx, &strings);
+ efx_siena_ptp_describe_stats(efx, &strings);
break;
case ETH_SS_TEST:
efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
@@ -807,6 +801,46 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
return rc;
}
+int efx_siena_ethtool_get_rxfh_fields(struct net_device *net_dev,
+ struct ethtool_rxfh_fields *info)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ __u64 data;
+
+ data = 0;
+ if (!efx_rss_active(&efx->rss_context)) /* No RSS */
+ goto out_setdata;
+
+ switch (info->flow_type) {
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ if (efx->rss_context.rx_hash_udp_4tuple)
+ data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+ RXH_IP_SRC | RXH_IP_DST);
+ else
+ data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+ RXH_IP_SRC | RXH_IP_DST);
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ break;
+ }
+out_setdata:
+ info->data = data;
+ return 0;
+}
+
int efx_siena_ethtool_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
@@ -819,43 +853,6 @@ int efx_siena_ethtool_get_rxnfc(struct net_device *net_dev,
info->data = efx->n_rx_channels;
return 0;
- case ETHTOOL_GRXFH: {
- __u64 data;
-
- data = 0;
- if (!efx_rss_active(&efx->rss_context)) /* No RSS */
- goto out_setdata;
-
- switch (info->flow_type) {
- case UDP_V4_FLOW:
- case UDP_V6_FLOW:
- if (efx->rss_context.rx_hash_udp_4tuple)
- data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
- RXH_IP_SRC | RXH_IP_DST);
- else
- data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case TCP_V4_FLOW:
- case TCP_V6_FLOW:
- data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
- RXH_IP_SRC | RXH_IP_DST);
- break;
- case SCTP_V4_FLOW:
- case SCTP_V6_FLOW:
- case AH_ESP_V4_FLOW:
- case AH_ESP_V6_FLOW:
- case IPV4_FLOW:
- case IPV6_FLOW:
- data = RXH_IP_SRC | RXH_IP_DST;
- break;
- default:
- break;
- }
-out_setdata:
- info->data = data;
- return rc;
- }
-
case ETHTOOL_GRXCLSRLCNT:
info->data = efx_filter_get_rx_id_limit(efx);
if (info->data == 0)
diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.h b/drivers/net/ethernet/sfc/siena/ethtool_common.h
index d674bab0f65b..278d69e920d9 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool_common.h
+++ b/drivers/net/ethernet/sfc/siena/ethtool_common.h
@@ -46,6 +46,8 @@ int efx_siena_ethtool_get_rxfh(struct net_device *net_dev,
int efx_siena_ethtool_set_rxfh(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack);
+int efx_siena_ethtool_get_rxfh_fields(struct net_device *net_dev,
+ struct ethtool_rxfh_fields *info);
int efx_siena_ethtool_reset(struct net_device *net_dev, u32 *flags);
int efx_siena_ethtool_get_module_eeprom(struct net_device *net_dev,
struct ethtool_eeprom *ee,
diff --git a/drivers/net/ethernet/sfc/siena/farch.c b/drivers/net/ethernet/sfc/siena/farch.c
index 89ccd65c978b..562a038e38a7 100644
--- a/drivers/net/ethernet/sfc/siena/farch.c
+++ b/drivers/net/ethernet/sfc/siena/farch.c
@@ -1708,7 +1708,7 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
if (efx->vf_count > vf_limit) {
netif_err(efx, probe, efx->net_dev,
- "Reducing VF count from from %d to %d\n",
+ "Reducing VF count from %d to %d\n",
efx->vf_count, vf_limit);
efx->vf_count = vf_limit;
}
diff --git a/drivers/net/ethernet/sfc/siena/mcdi.c b/drivers/net/ethernet/sfc/siena/mcdi.c
index 3f7899daa86a..c8f0fb43e285 100644
--- a/drivers/net/ethernet/sfc/siena/mcdi.c
+++ b/drivers/net/ethernet/sfc/siena/mcdi.c
@@ -534,7 +534,7 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
* of it aborting the next request.
*/
if (!timeout)
- del_timer_sync(&mcdi->async_timer);
+ timer_delete_sync(&mcdi->async_timer);
spin_lock(&mcdi->async_lock);
async = list_first_entry(&mcdi->async_list,
@@ -609,7 +609,7 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
static void efx_mcdi_timeout_async(struct timer_list *t)
{
- struct efx_mcdi_iface *mcdi = from_timer(mcdi, t, async_timer);
+ struct efx_mcdi_iface *mcdi = timer_container_of(mcdi, t, async_timer);
efx_mcdi_complete_async(mcdi, true);
}
@@ -1145,7 +1145,7 @@ void efx_siena_mcdi_flush_async(struct efx_nic *efx)
/* We must be in poll or fail mode so no more requests can be queued */
BUG_ON(mcdi->mode == MCDI_MODE_EVENTS);
- del_timer_sync(&mcdi->async_timer);
+ timer_delete_sync(&mcdi->async_timer);
/* If a request is still running, make sure we give the MC
* time to complete it so that the response won't overwrite our
diff --git a/drivers/net/ethernet/sfc/siena/mcdi_pcol.h b/drivers/net/ethernet/sfc/siena/mcdi_pcol.h
index a3cc8b7ec732..b81b0aa460d2 100644
--- a/drivers/net/ethernet/sfc/siena/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/siena/mcdi_pcol.h
@@ -6704,16 +6704,16 @@
#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
-/* interpretation is is sensor-specific. */
+/* interpretation is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4
-/* interpretation is is sensor-specific. */
+/* interpretation is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4
-/* interpretation is is sensor-specific. */
+/* interpretation is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4
-/* interpretation is is sensor-specific. */
+/* interpretation is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4
@@ -7823,7 +7823,7 @@
* handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST
*
* Any handles which do not correspond to a sensor currently managed by the MC
- * will be dropped from from the response. This may happen when a sensor table
+ * will be dropped from the response. This may happen when a sensor table
* update is in progress, and effectively means the set of usable sensors is
* the intersection between the sets of sensors known to the driver and the MC.
*
@@ -7872,7 +7872,7 @@
* provided should be treated as erroneous.
*
* Any handles which do not correspond to a sensor currently managed by the MC
- * will be dropped from from the response. This may happen when a sensor table
+ * will be dropped from the response. This may happen when a sensor table
* update is in progress, and effectively means the set of usable sensors is
* the intersection between the sets of sensors known to the driver and the MC.
*
diff --git a/drivers/net/ethernet/sfc/siena/net_driver.h b/drivers/net/ethernet/sfc/siena/net_driver.h
index 3fa7c652ae9b..4cf556782133 100644
--- a/drivers/net/ethernet/sfc/siena/net_driver.h
+++ b/drivers/net/ethernet/sfc/siena/net_driver.h
@@ -384,7 +384,6 @@ struct efx_rx_page_state {
* @recycle_count: RX buffer recycle counter.
* @slow_fill: Timer used to defer efx_nic_generate_fill_event().
* @xdp_rxq_info: XDP specific RX queue information.
- * @xdp_rxq_info_valid: Is xdp_rxq_info valid data?.
*/
struct efx_rx_queue {
struct efx_nic *efx;
@@ -417,7 +416,6 @@ struct efx_rx_queue {
/* Statistics to supplement MAC stats */
unsigned long rx_packets;
struct xdp_rxq_info xdp_rxq_info;
- bool xdp_rxq_info_valid;
};
enum efx_sync_events_state {
@@ -753,6 +751,7 @@ struct efx_arfs_rule {
/**
* struct efx_async_filter_insertion - Request to asynchronously insert a filter
* @net_dev: Reference to the netdevice
+ * @net_dev_tracker: reference tracker entry for @net_dev
* @spec: The filter to insert
* @work: Workitem for this request
* @rxq_index: Identifies the channel for which this request was made
@@ -760,6 +759,7 @@ struct efx_arfs_rule {
*/
struct efx_async_filter_insertion {
struct net_device *net_dev;
+ netdevice_tracker net_dev_tracker;
struct efx_filter_spec spec;
struct work_struct work;
u16 rxq_index;
@@ -1307,7 +1307,7 @@ struct efx_nic_type {
void (*finish_flush)(struct efx_nic *efx);
void (*prepare_flr)(struct efx_nic *efx);
void (*finish_flr)(struct efx_nic *efx);
- size_t (*describe_stats)(struct efx_nic *efx, u8 *names);
+ size_t (*describe_stats)(struct efx_nic *efx, u8 **names);
size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
struct rtnl_link_stats64 *core_stats);
size_t (*update_stats_atomic)(struct efx_nic *efx, u64 *full_stats,
diff --git a/drivers/net/ethernet/sfc/siena/nic.c b/drivers/net/ethernet/sfc/siena/nic.c
index 0ea0433a6230..32fce70085e3 100644
--- a/drivers/net/ethernet/sfc/siena/nic.c
+++ b/drivers/net/ethernet/sfc/siena/nic.c
@@ -449,20 +449,20 @@ void efx_siena_get_regs(struct efx_nic *efx, void *buf)
* Returns the number of visible statistics, i.e. the number of set
* bits in the first @count bits of @mask for which a name is defined.
*/
-size_t efx_siena_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
- const unsigned long *mask, u8 *names)
+size_t efx_siena_describe_stats(const struct efx_hw_stat_desc *desc,
+ size_t count, const unsigned long *mask,
+ u8 **names)
{
size_t visible = 0;
size_t index;
for_each_set_bit(index, mask, count) {
if (desc[index].name) {
- if (names) {
- strscpy(names, desc[index].name,
- ETH_GSTRING_LEN);
- names += ETH_GSTRING_LEN;
- }
++visible;
+ if (!names)
+ continue;
+
+ ethtool_puts(names, desc[index].name);
}
}
diff --git a/drivers/net/ethernet/sfc/siena/nic_common.h b/drivers/net/ethernet/sfc/siena/nic_common.h
index 3af0405eeaa4..b7fbe198008d 100644
--- a/drivers/net/ethernet/sfc/siena/nic_common.h
+++ b/drivers/net/ethernet/sfc/siena/nic_common.h
@@ -239,8 +239,9 @@ void efx_siena_get_regs(struct efx_nic *efx, void *buf);
#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
-size_t efx_siena_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
- const unsigned long *mask, u8 *names);
+size_t efx_siena_describe_stats(const struct efx_hw_stat_desc *desc,
+ size_t count, const unsigned long *mask,
+ u8 **names);
void efx_siena_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u64 *stats,
const void *dma_buf, bool accumulate);
diff --git a/drivers/net/ethernet/sfc/siena/ptp.c b/drivers/net/ethernet/sfc/siena/ptp.c
index 85005196b4c5..062c77c92077 100644
--- a/drivers/net/ethernet/sfc/siena/ptp.c
+++ b/drivers/net/ethernet/sfc/siena/ptp.c
@@ -393,7 +393,7 @@ static const unsigned long efx_ptp_stat_mask[] = {
[0 ... BITS_TO_LONGS(PTP_STAT_COUNT) - 1] = ~0UL,
};
-size_t efx_siena_ptp_describe_stats(struct efx_nic *efx, u8 *strings)
+size_t efx_siena_ptp_describe_stats(struct efx_nic *efx, u8 **strings)
{
if (!efx->ptp_data)
return 0;
diff --git a/drivers/net/ethernet/sfc/siena/ptp.h b/drivers/net/ethernet/sfc/siena/ptp.h
index b6133e7c5608..54840036ab67 100644
--- a/drivers/net/ethernet/sfc/siena/ptp.h
+++ b/drivers/net/ethernet/sfc/siena/ptp.h
@@ -28,7 +28,7 @@ int efx_siena_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
unsigned int new_mode);
int efx_siena_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
void efx_siena_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
-size_t efx_siena_ptp_describe_stats(struct efx_nic *efx, u8 *strings);
+size_t efx_siena_ptp_describe_stats(struct efx_nic *efx, u8 **strings);
size_t efx_siena_ptp_update_stats(struct efx_nic *efx, u64 *stats);
void efx_siena_time_sync_event(struct efx_channel *channel, efx_qword_t *ev);
void __efx_siena_rx_skb_attach_timestamp(struct efx_channel *channel,
diff --git a/drivers/net/ethernet/sfc/siena/rx_common.c b/drivers/net/ethernet/sfc/siena/rx_common.c
index 082e35c6caaa..4ae09505e417 100644
--- a/drivers/net/ethernet/sfc/siena/rx_common.c
+++ b/drivers/net/ethernet/sfc/siena/rx_common.c
@@ -268,8 +268,6 @@ void efx_siena_init_rx_queue(struct efx_rx_queue *rx_queue)
"Failure to initialise XDP queue information rc=%d\n",
rc);
efx->xdp_rxq_info_failed = true;
- } else {
- rx_queue->xdp_rxq_info_valid = true;
}
/* Set up RX descriptor ring */
@@ -284,7 +282,7 @@ void efx_siena_fini_rx_queue(struct efx_rx_queue *rx_queue)
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
- del_timer_sync(&rx_queue->slow_fill);
+ timer_delete_sync(&rx_queue->slow_fill);
/* Release RX buffers from the current read ptr to the write ptr */
if (rx_queue->buffer) {
@@ -299,10 +297,8 @@ void efx_siena_fini_rx_queue(struct efx_rx_queue *rx_queue)
efx_fini_rx_recycle_ring(rx_queue);
- if (rx_queue->xdp_rxq_info_valid)
+ if (xdp_rxq_info_is_reg(&rx_queue->xdp_rxq_info))
xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
-
- rx_queue->xdp_rxq_info_valid = false;
}
void efx_siena_remove_rx_queue(struct efx_rx_queue *rx_queue)
@@ -349,7 +345,8 @@ void efx_siena_free_rx_buffers(struct efx_rx_queue *rx_queue,
void efx_siena_rx_slow_fill(struct timer_list *t)
{
- struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
+ struct efx_rx_queue *rx_queue = timer_container_of(rx_queue, t,
+ slow_fill);
/* Post an event to cause NAPI to run and refill the queue */
efx_nic_generate_fill_event(rx_queue);
@@ -888,7 +885,7 @@ static void efx_filter_rfs_work(struct work_struct *data)
/* Release references */
clear_bit(slot_idx, &efx->rps_slot_map);
- dev_put(req->net_dev);
+ netdev_put(req->net_dev, &req->net_dev_tracker);
}
int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
@@ -980,7 +977,8 @@ int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
}
/* Queue the request */
- dev_hold(req->net_dev = net_dev);
+ req->net_dev = net_dev;
+ netdev_hold(req->net_dev, &req->net_dev_tracker, GFP_ATOMIC);
INIT_WORK(&req->work, efx_filter_rfs_work);
req->rxq_index = rxq_index;
req->flow_id = flow_id;
diff --git a/drivers/net/ethernet/sfc/siena/siena.c b/drivers/net/ethernet/sfc/siena/siena.c
index ca33dc08e555..49f0c8a1a90a 100644
--- a/drivers/net/ethernet/sfc/siena/siena.c
+++ b/drivers/net/ethernet/sfc/siena/siena.c
@@ -545,7 +545,7 @@ static const unsigned long siena_stat_mask[] = {
[0 ... BITS_TO_LONGS(SIENA_STAT_COUNT) - 1] = ~0UL,
};
-static size_t siena_describe_nic_stats(struct efx_nic *efx, u8 *names)
+static size_t siena_describe_nic_stats(struct efx_nic *efx, u8 **names)
{
return efx_siena_describe_stats(siena_stat_desc, SIENA_STAT_COUNT,
siena_stat_mask, names);
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
index 0d93164988fc..fa94aa3cd5fe 100644
--- a/drivers/net/ethernet/sfc/tc.c
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -1043,7 +1043,7 @@ static int efx_tc_flower_handle_lhs_actions(struct efx_nic *efx,
return -EOPNOTSUPP;
}
if (fa->ct.action) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled ct.action %u for LHS rule\n",
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled ct.action %u for LHS rule",
fa->ct.action);
return -EOPNOTSUPP;
}
@@ -1056,7 +1056,7 @@ static int efx_tc_flower_handle_lhs_actions(struct efx_nic *efx,
act->zone = ct_zone;
break;
default:
- NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u for LHS rule\n",
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u for LHS rule",
fa->id);
return -EOPNOTSUPP;
}
@@ -1581,7 +1581,7 @@ static int efx_tc_flower_replace_foreign_lhs(struct efx_nic *efx,
type = efx_tc_indr_netdev_type(net_dev);
if (type == EFX_ENCAP_TYPE_NONE) {
- NL_SET_ERR_MSG_MOD(extack, "Egress encap match on unsupported tunnel device\n");
+ NL_SET_ERR_MSG_MOD(extack, "Egress encap match on unsupported tunnel device");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/sfc/tc_conntrack.c b/drivers/net/ethernet/sfc/tc_conntrack.c
index d90206f27161..c0603f54cec3 100644
--- a/drivers/net/ethernet/sfc/tc_conntrack.c
+++ b/drivers/net/ethernet/sfc/tc_conntrack.c
@@ -16,7 +16,7 @@ static int efx_tc_flow_block(enum tc_setup_type type, void *type_data,
void *cb_priv);
static const struct rhashtable_params efx_tc_ct_zone_ht_params = {
- .key_len = offsetof(struct efx_tc_ct_zone, linkage),
+ .key_len = sizeof_field(struct efx_tc_ct_zone, zone),
.key_offset = 0,
.head_offset = offsetof(struct efx_tc_ct_zone, linkage),
};
diff --git a/drivers/net/ethernet/sfc/tc_encap_actions.c b/drivers/net/ethernet/sfc/tc_encap_actions.c
index 87443f9dfd22..eef06e48185d 100644
--- a/drivers/net/ethernet/sfc/tc_encap_actions.c
+++ b/drivers/net/ethernet/sfc/tc_encap_actions.c
@@ -11,6 +11,8 @@
#include "tc_encap_actions.h"
#include "tc.h"
#include "mae.h"
+#include <net/flow.h>
+#include <net/inet_dscp.h>
#include <net/vxlan.h>
#include <net/geneve.h>
#include <net/netevent.h>
@@ -99,7 +101,7 @@ static int efx_bind_neigh(struct efx_nic *efx,
case EFX_ENCAP_TYPE_GENEVE:
flow4.flowi4_proto = IPPROTO_UDP;
flow4.fl4_dport = encap->key.tp_dst;
- flow4.flowi4_tos = encap->key.tos;
+ flow4.flowi4_dscp = inet_dsfield_to_dscp(encap->key.tos);
flow4.daddr = encap->key.u.ipv4.dst;
flow4.saddr = encap->key.u.ipv4.src;
break;
@@ -442,7 +444,7 @@ static void efx_tc_update_encap(struct efx_nic *efx,
rule = container_of(acts, struct efx_tc_flow_rule, acts);
if (rule->fallback)
fallback = rule->fallback;
- else /* fallback fallback: deliver to PF */
+ else /* fallback of the fallback: deliver to PF */
fallback = &efx->tc->facts.pf;
rc = efx_mae_update_rule(efx, fallback->fw_id,
rule->fw_id);
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 822ec6564b2d..4dff19b6ef17 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -49,14 +49,6 @@ static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue,
return (u8 *)page_buf->addr + offset;
}
-u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
- struct efx_tx_buffer *buffer, size_t len)
-{
- if (len > EFX_TX_CB_SIZE)
- return NULL;
- return efx_tx_get_copy_buffer(tx_queue, buffer);
-}
-
static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
{
/* We need to consider all queues that the net core sees as one */
diff --git a/drivers/net/ethernet/sfc/tx.h b/drivers/net/ethernet/sfc/tx.h
index f2c4d2f89919..f882749af8c3 100644
--- a/drivers/net/ethernet/sfc/tx.h
+++ b/drivers/net/ethernet/sfc/tx.h
@@ -15,9 +15,6 @@
unsigned int efx_tx_limit_len(struct efx_tx_queue *tx_queue,
dma_addr_t dma_addr, unsigned int len);
-u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
- struct efx_tx_buffer *buffer, size_t len);
-
/* What TXQ type will satisfy the checksum offloads required for this skb? */
static inline unsigned int efx_tx_csum_type_skb(struct sk_buff *skb)
{
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 4535579018c9..39731069d99e 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -570,7 +570,7 @@ static inline void ioc3_setup_duplex(struct ioc3_private *ip)
static void ioc3_timer(struct timer_list *t)
{
- struct ioc3_private *ip = from_timer(ip, t, ioc3_timer);
+ struct ioc3_private *ip = timer_container_of(ip, t, ioc3_timer);
/* Print the link status if it has changed */
mii_check_media(&ip->mii, 1, 0);
@@ -718,7 +718,7 @@ static void ioc3_init(struct net_device *dev)
struct ioc3_private *ip = netdev_priv(dev);
struct ioc3_ethregs *regs = ip->regs;
- del_timer_sync(&ip->ioc3_timer); /* Kill if running */
+ timer_delete_sync(&ip->ioc3_timer); /* Kill if running */
writel(EMCR_RST, &regs->emcr); /* Reset */
readl(&regs->emcr); /* Flush WB */
@@ -801,7 +801,7 @@ static int ioc3_close(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
- del_timer_sync(&ip->ioc3_timer);
+ timer_delete_sync(&ip->ioc3_timer);
netif_stop_queue(dev);
@@ -950,7 +950,7 @@ static int ioc3eth_probe(struct platform_device *pdev)
return 0;
out_stop:
- del_timer_sync(&ip->ioc3_timer);
+ timer_delete_sync(&ip->ioc3_timer);
if (ip->rxr)
dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr,
ip->rxr_dma);
@@ -971,7 +971,7 @@ static void ioc3eth_remove(struct platform_device *pdev)
dma_free_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, ip->tx_ring, ip->txr_dma);
unregister_netdev(dev);
- del_timer_sync(&ip->ioc3_timer);
+ timer_delete_sync(&ip->ioc3_timer);
free_netdev(dev);
}
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index dda4e488c77a..15e46e6ac262 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -758,7 +758,7 @@ static irqreturn_t sis190_irq(int irq, void *__dev)
if (status & LinkChange) {
netif_info(tp, intr, dev, "link change\n");
- del_timer(&tp->timer);
+ timer_delete(&tp->timer);
schedule_work(&tp->phy_task);
}
@@ -1023,7 +1023,7 @@ out_unlock:
static void sis190_phy_timer(struct timer_list *t)
{
- struct sis190_private *tp = from_timer(tp, t, timer);
+ struct sis190_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
if (likely(netif_running(dev)))
@@ -1034,7 +1034,7 @@ static inline void sis190_delete_timer(struct net_device *dev)
{
struct sis190_private *tp = netdev_priv(dev);
- del_timer_sync(&tp->timer);
+ timer_delete_sync(&tp->timer);
}
static inline void sis190_request_timer(struct net_device *dev)
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 85b850372efe..b461918dc5f4 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -468,7 +468,7 @@ static int sis900_probe(struct pci_dev *pci_dev,
SET_NETDEV_DEV(net_dev, &pci_dev->dev);
/* We do a request_region() to register /proc/ioports info. */
- ret = pci_request_regions(pci_dev, "sis900");
+ ret = pcim_request_all_regions(pci_dev, "sis900");
if (ret)
goto err_out;
@@ -1314,7 +1314,8 @@ static void sis630_set_eq(struct net_device *net_dev, u8 revision)
static void sis900_timer(struct timer_list *t)
{
- struct sis900_private *sis_priv = from_timer(sis_priv, t, timer);
+ struct sis900_private *sis_priv = timer_container_of(sis_priv, t,
+ timer);
struct net_device *net_dev = sis_priv->mii_info.dev;
struct mii_phy *mii_phy = sis_priv->mii;
static const int next_tick = 5*HZ;
@@ -1983,7 +1984,7 @@ static int sis900_close(struct net_device *net_dev)
/* Stop the chip's Tx and Rx Status Machine */
sw32(cr, RxDIS | TxDIS | sr32(cr));
- del_timer(&sis_priv->timer);
+ timer_delete(&sis_priv->timer);
free_irq(pdev->irq, net_dev);
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 013e90d69182..45f703fe0e5a 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -850,7 +850,7 @@ static void check_media(struct net_device *dev)
static void epic_timer(struct timer_list *t)
{
- struct epic_private *ep = from_timer(ep, t, timer);
+ struct epic_private *ep = timer_container_of(ep, t, timer);
struct net_device *dev = ep->mii.dev;
void __iomem *ioaddr = ep->ioaddr;
int next_tick = 5*HZ;
@@ -1292,7 +1292,7 @@ static int epic_close(struct net_device *dev)
netdev_dbg(dev, "Shutting down ethercard, status was %2.2x.\n",
er32(INTSTAT));
- del_timer_sync(&ep->timer);
+ timer_delete_sync(&ep->timer);
epic_disable_int(dev, ep);
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 86e3ec25df07..cc0c75694351 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1105,7 +1105,7 @@ static int smc_close(struct net_device *dev)
outw(CTL_POWERDOWN, ioaddr + CONTROL );
link->open--;
- del_timer_sync(&smc->media);
+ timer_delete_sync(&smc->media);
return 0;
} /* smc_close */
@@ -1713,7 +1713,7 @@ static void smc_reset(struct net_device *dev)
static void media_check(struct timer_list *t)
{
- struct smc_private *smc = from_timer(smc, t, media);
+ struct smc_private *smc = timer_container_of(smc, t, media);
struct net_device *dev = smc->mii_if.dev;
unsigned int ioaddr = dev->base_addr;
u_short i, media, saved_bank;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index f539813878f5..3ebd0664c697 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -43,7 +43,6 @@
#include <linux/smsc911x.h>
#include <linux/device.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_net.h>
#include <linux/acpi.h>
#include <linux/pm_runtime.h>
@@ -2163,10 +2162,20 @@ static const struct net_device_ops smsc911x_netdev_ops = {
static void smsc911x_read_mac_address(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
- u32 mac_high16 = smsc911x_mac_read(pdata, ADDRH);
- u32 mac_low32 = smsc911x_mac_read(pdata, ADDRL);
+ u32 mac_high16, mac_low32;
u8 addr[ETH_ALEN];
+ mac_high16 = smsc911x_mac_read(pdata, ADDRH);
+ mac_low32 = smsc911x_mac_read(pdata, ADDRL);
+
+ /* The first mac_read in some setups can incorrectly read 0. Re-read it
+ * to get the full MAC if this is observed.
+ */
+ if (mac_high16 == 0) {
+ SMSC_TRACE(pdata, probe, "Re-read MAC ADDRH\n");
+ mac_high16 = smsc911x_mac_read(pdata, ADDRH);
+ }
+
addr[0] = (u8)(mac_low32);
addr[1] = (u8)(mac_low32 >> 8);
addr[2] = (u8)(mac_low32 >> 16);
@@ -2351,7 +2360,7 @@ static void smsc911x_drv_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
-/* standard register acces */
+/* standard register access */
static const struct smsc911x_ops standard_smsc911x_ops = {
.reg_read = __smsc911x_reg_read,
.reg_write = __smsc911x_reg_write,
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index dc99821c6226..ee890de69ffe 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -970,7 +970,7 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
struct netsec_desc *desc = &dring->desc[idx];
struct page *page = virt_to_page(desc->addr);
- u32 xdp_result = NETSEC_XDP_PASS;
+ u32 metasize, xdp_result = NETSEC_XDP_PASS;
struct sk_buff *skb = NULL;
u16 pkt_len, desc_len;
dma_addr_t dma_handle;
@@ -1019,7 +1019,7 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
prefetch(desc->addr);
xdp_prepare_buff(&xdp, desc->addr, NETSEC_RXBUF_HEADROOM,
- pkt_len, false);
+ pkt_len, true);
if (xdp_prog) {
xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp);
@@ -1048,6 +1048,9 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
skb_reserve(skb, xdp.data - xdp.data_hard_start);
skb_put(skb, xdp.data_end - xdp.data);
+ metasize = xdp.data - xdp.data_meta;
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->protocol = eth_type_trans(skb, priv->ndev);
if (priv->rx_cksum_offload_flag &&
diff --git a/drivers/net/ethernet/spacemit/Kconfig b/drivers/net/ethernet/spacemit/Kconfig
new file mode 100644
index 000000000000..85ef61a9b4ef
--- /dev/null
+++ b/drivers/net/ethernet/spacemit/Kconfig
@@ -0,0 +1,29 @@
+config NET_VENDOR_SPACEMIT
+ bool "SpacemiT devices"
+ default y
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ help
+ If you have a network (Ethernet) device belonging to this class,
+ say Y.
+
+ Note that the answer to this question does not directly affect
+ the kernel: saying N will just cause the configurator to skip all
+ the questions regarding SpacemiT devices. If you say Y, you will
+ be asked for your specific chipset/driver in the following questions.
+
+if NET_VENDOR_SPACEMIT
+
+config SPACEMIT_K1_EMAC
+ tristate "SpacemiT K1 Ethernet MAC driver"
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ depends on MFD_SYSCON
+ depends on OF
+ default m if ARCH_SPACEMIT
+ select PHYLIB
+ help
+ This driver supports the Ethernet MAC in the SpacemiT K1 SoC.
+
+ To compile this driver as a module, choose M here: the module
+ will be called k1_emac.
+
+endif # NET_VENDOR_SPACEMIT
diff --git a/drivers/net/ethernet/spacemit/Makefile b/drivers/net/ethernet/spacemit/Makefile
new file mode 100644
index 000000000000..d29efd997a4f
--- /dev/null
+++ b/drivers/net/ethernet/spacemit/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the SpacemiT network device drivers.
+#
+
+obj-$(CONFIG_SPACEMIT_K1_EMAC) += k1_emac.o
diff --git a/drivers/net/ethernet/spacemit/k1_emac.c b/drivers/net/ethernet/spacemit/k1_emac.c
new file mode 100644
index 000000000000..220eb5ce7583
--- /dev/null
+++ b/drivers/net/ethernet/spacemit/k1_emac.c
@@ -0,0 +1,2162 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SpacemiT K1 Ethernet driver
+ *
+ * Copyright (C) 2023-2025 SpacemiT (Hangzhou) Technology Co. Ltd
+ * Copyright (C) 2025 Vivian Wang <wangruikang@iscas.ac.cn>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/rtnetlink.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+
+#include "k1_emac.h"
+
+#define DRIVER_NAME "k1_emac"
+
+#define EMAC_DEFAULT_BUFSIZE 1536
+#define EMAC_RX_BUF_2K 2048
+#define EMAC_RX_BUF_4K 4096
+
+/* Tuning parameters from SpacemiT */
+#define EMAC_TX_FRAMES 64
+#define EMAC_TX_COAL_TIMEOUT 40000
+#define EMAC_RX_FRAMES 64
+#define EMAC_RX_COAL_TIMEOUT (600 * 312)
+
+#define DEFAULT_FC_PAUSE_TIME 0xffff
+#define DEFAULT_FC_FIFO_HIGH 1600
+#define DEFAULT_TX_ALMOST_FULL 0x1f8
+#define DEFAULT_TX_THRESHOLD 1518
+#define DEFAULT_RX_THRESHOLD 12
+#define DEFAULT_TX_RING_NUM 1024
+#define DEFAULT_RX_RING_NUM 1024
+#define DEFAULT_DMA_BURST MREGBIT_BURST_16WORD
+#define HASH_TABLE_SIZE 64
+
+struct desc_buf {
+ u64 dma_addr;
+ void *buff_addr;
+ u16 dma_len;
+ u8 map_as_page;
+};
+
+struct emac_tx_desc_buffer {
+ struct sk_buff *skb;
+ struct desc_buf buf[2];
+};
+
+struct emac_rx_desc_buffer {
+ struct sk_buff *skb;
+ u64 dma_addr;
+ void *buff_addr;
+ u16 dma_len;
+ u8 map_as_page;
+};
+
+/**
+ * struct emac_desc_ring - Software-side information for one descriptor ring
+ * Same structure used for both RX and TX
+ * @desc_addr: Virtual address to the descriptor ring memory
+ * @desc_dma_addr: DMA address of the descriptor ring
+ * @total_size: Size of ring in bytes
+ * @total_cnt: Number of descriptors
+ * @head: Next descriptor to associate a buffer with
+ * @tail: Next descriptor to check status bit
+ * @rx_desc_buf: Array of descriptors for RX
+ * @tx_desc_buf: Array of descriptors for TX, with max of two buffers each
+ */
+struct emac_desc_ring {
+ void *desc_addr;
+ dma_addr_t desc_dma_addr;
+ u32 total_size;
+ u32 total_cnt;
+ u32 head;
+ u32 tail;
+ union {
+ struct emac_rx_desc_buffer *rx_desc_buf;
+ struct emac_tx_desc_buffer *tx_desc_buf;
+ };
+};
+
+struct emac_priv {
+ void __iomem *iobase;
+ u32 dma_buf_sz;
+ struct emac_desc_ring tx_ring;
+ struct emac_desc_ring rx_ring;
+
+ struct net_device *ndev;
+ struct napi_struct napi;
+ struct platform_device *pdev;
+ struct clk *bus_clk;
+ struct clk *ref_clk;
+ struct regmap *regmap_apmu;
+ u32 regmap_apmu_offset;
+ int irq;
+
+ phy_interface_t phy_interface;
+
+ union emac_hw_tx_stats tx_stats, tx_stats_off;
+ union emac_hw_rx_stats rx_stats, rx_stats_off;
+
+ u32 tx_count_frames;
+ u32 tx_coal_frames;
+ u32 tx_coal_timeout;
+ struct work_struct tx_timeout_task;
+
+ struct timer_list txtimer;
+ struct timer_list stats_timer;
+
+ u32 tx_delay;
+ u32 rx_delay;
+
+ bool flow_control_autoneg;
+ u8 flow_control;
+
+ /* Softirq-safe, hold while touching hardware statistics */
+ spinlock_t stats_lock;
+};
+
+static void emac_wr(struct emac_priv *priv, u32 reg, u32 val)
+{
+ writel(val, priv->iobase + reg);
+}
+
+static u32 emac_rd(struct emac_priv *priv, u32 reg)
+{
+ return readl(priv->iobase + reg);
+}
+
+static int emac_phy_interface_config(struct emac_priv *priv)
+{
+ u32 val = 0, mask = REF_CLK_SEL | RGMII_TX_CLK_SEL | PHY_INTF_RGMII;
+
+ if (phy_interface_mode_is_rgmii(priv->phy_interface))
+ val |= PHY_INTF_RGMII;
+
+ regmap_update_bits(priv->regmap_apmu,
+ priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG,
+ mask, val);
+
+ return 0;
+}
+
+/*
+ * Where the hardware expects a MAC address, it is laid out in this high, med,
+ * low order in three consecutive registers and in this format.
+ */
+
+static void emac_set_mac_addr_reg(struct emac_priv *priv,
+ const unsigned char *addr,
+ u32 reg)
+{
+ emac_wr(priv, reg + sizeof(u32) * 0, addr[1] << 8 | addr[0]);
+ emac_wr(priv, reg + sizeof(u32) * 1, addr[3] << 8 | addr[2]);
+ emac_wr(priv, reg + sizeof(u32) * 2, addr[5] << 8 | addr[4]);
+}
+
+static void emac_set_mac_addr(struct emac_priv *priv, const unsigned char *addr)
+{
+ /* We use only one address, so set the same for flow control as well */
+ emac_set_mac_addr_reg(priv, addr, MAC_ADDRESS1_HIGH);
+ emac_set_mac_addr_reg(priv, addr, MAC_FC_SOURCE_ADDRESS_HIGH);
+}
+
+static void emac_reset_hw(struct emac_priv *priv)
+{
+ /* Disable all interrupts */
+ emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
+ emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0);
+
+ /* Disable transmit and receive units */
+ emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0);
+ emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0);
+
+ /* Disable DMA */
+ emac_wr(priv, DMA_CONTROL, 0x0);
+}
+
+static void emac_init_hw(struct emac_priv *priv)
+{
+ /* Destination address for 802.3x Ethernet flow control */
+ u8 fc_dest_addr[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x01 };
+
+ u32 rxirq = 0, dma = 0;
+
+ regmap_set_bits(priv->regmap_apmu,
+ priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG,
+ AXI_SINGLE_ID);
+
+ /* Disable transmit and receive units */
+ emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0);
+ emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0);
+
+ /* Enable MAC address 1 filtering */
+ emac_wr(priv, MAC_ADDRESS_CONTROL, MREGBIT_MAC_ADDRESS1_ENABLE);
+
+ /* Zero initialize the multicast hash table */
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0);
+
+ /* Configure thresholds */
+ emac_wr(priv, MAC_TRANSMIT_FIFO_ALMOST_FULL, DEFAULT_TX_ALMOST_FULL);
+ emac_wr(priv, MAC_TRANSMIT_PACKET_START_THRESHOLD,
+ DEFAULT_TX_THRESHOLD);
+ emac_wr(priv, MAC_RECEIVE_PACKET_START_THRESHOLD, DEFAULT_RX_THRESHOLD);
+
+ /* Configure flow control (enabled in emac_adjust_link() later) */
+ emac_set_mac_addr_reg(priv, fc_dest_addr, MAC_FC_SOURCE_ADDRESS_HIGH);
+ emac_wr(priv, MAC_FC_PAUSE_HIGH_THRESHOLD, DEFAULT_FC_FIFO_HIGH);
+ emac_wr(priv, MAC_FC_HIGH_PAUSE_TIME, DEFAULT_FC_PAUSE_TIME);
+ emac_wr(priv, MAC_FC_PAUSE_LOW_THRESHOLD, 0);
+
+ /* RX IRQ mitigation */
+ rxirq = FIELD_PREP(MREGBIT_RECEIVE_IRQ_FRAME_COUNTER_MASK,
+ EMAC_RX_FRAMES);
+ rxirq |= FIELD_PREP(MREGBIT_RECEIVE_IRQ_TIMEOUT_COUNTER_MASK,
+ EMAC_RX_COAL_TIMEOUT);
+ rxirq |= MREGBIT_RECEIVE_IRQ_MITIGATION_ENABLE;
+ emac_wr(priv, DMA_RECEIVE_IRQ_MITIGATION_CTRL, rxirq);
+
+ /* Disable and set DMA config */
+ emac_wr(priv, DMA_CONTROL, 0x0);
+
+ emac_wr(priv, DMA_CONFIGURATION, MREGBIT_SOFTWARE_RESET);
+ usleep_range(9000, 10000);
+ emac_wr(priv, DMA_CONFIGURATION, 0x0);
+ usleep_range(9000, 10000);
+
+ dma |= MREGBIT_STRICT_BURST;
+ dma |= MREGBIT_DMA_64BIT_MODE;
+ dma |= DEFAULT_DMA_BURST;
+
+ emac_wr(priv, DMA_CONFIGURATION, dma);
+}
+
+static void emac_dma_start_transmit(struct emac_priv *priv)
+{
+ /* The actual value written does not matter */
+ emac_wr(priv, DMA_TRANSMIT_POLL_DEMAND, 1);
+}
+
+static void emac_enable_interrupt(struct emac_priv *priv)
+{
+ u32 val;
+
+ val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
+ val |= MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
+ val |= MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE;
+ emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
+}
+
+static void emac_disable_interrupt(struct emac_priv *priv)
+{
+ u32 val;
+
+ val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
+ val &= ~MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
+ val &= ~MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE;
+ emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
+}
+
+static u32 emac_tx_avail(struct emac_priv *priv)
+{
+ struct emac_desc_ring *tx_ring = &priv->tx_ring;
+ u32 avail;
+
+ if (tx_ring->tail > tx_ring->head)
+ avail = tx_ring->tail - tx_ring->head - 1;
+ else
+ avail = tx_ring->total_cnt - tx_ring->head + tx_ring->tail - 1;
+
+ return avail;
+}
+
+static void emac_tx_coal_timer_resched(struct emac_priv *priv)
+{
+ mod_timer(&priv->txtimer,
+ jiffies + usecs_to_jiffies(priv->tx_coal_timeout));
+}
+
+static void emac_tx_coal_timer(struct timer_list *t)
+{
+ struct emac_priv *priv = timer_container_of(priv, t, txtimer);
+
+ napi_schedule(&priv->napi);
+}
+
+static bool emac_tx_should_interrupt(struct emac_priv *priv, u32 pkt_num)
+{
+ priv->tx_count_frames += pkt_num;
+ if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
+ emac_tx_coal_timer_resched(priv);
+ return false;
+ }
+
+ priv->tx_count_frames = 0;
+ return true;
+}
+
+static void emac_free_tx_buf(struct emac_priv *priv, int i)
+{
+ struct emac_tx_desc_buffer *tx_buf;
+ struct emac_desc_ring *tx_ring;
+ struct desc_buf *buf;
+ int j;
+
+ tx_ring = &priv->tx_ring;
+ tx_buf = &tx_ring->tx_desc_buf[i];
+
+ for (j = 0; j < 2; j++) {
+ buf = &tx_buf->buf[j];
+ if (!buf->dma_addr)
+ continue;
+
+ if (buf->map_as_page)
+ dma_unmap_page(&priv->pdev->dev, buf->dma_addr,
+ buf->dma_len, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(&priv->pdev->dev,
+ buf->dma_addr, buf->dma_len,
+ DMA_TO_DEVICE);
+
+ buf->dma_addr = 0;
+ buf->map_as_page = false;
+ buf->buff_addr = NULL;
+ }
+
+ if (tx_buf->skb) {
+ dev_kfree_skb_any(tx_buf->skb);
+ tx_buf->skb = NULL;
+ }
+}
+
+static void emac_clean_tx_desc_ring(struct emac_priv *priv)
+{
+ struct emac_desc_ring *tx_ring = &priv->tx_ring;
+ u32 i;
+
+ for (i = 0; i < tx_ring->total_cnt; i++)
+ emac_free_tx_buf(priv, i);
+
+ tx_ring->head = 0;
+ tx_ring->tail = 0;
+}
+
+static void emac_clean_rx_desc_ring(struct emac_priv *priv)
+{
+ struct emac_rx_desc_buffer *rx_buf;
+ struct emac_desc_ring *rx_ring;
+ u32 i;
+
+ rx_ring = &priv->rx_ring;
+
+ for (i = 0; i < rx_ring->total_cnt; i++) {
+ rx_buf = &rx_ring->rx_desc_buf[i];
+
+ if (!rx_buf->skb)
+ continue;
+
+ dma_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
+ rx_buf->dma_len, DMA_FROM_DEVICE);
+
+ dev_kfree_skb(rx_buf->skb);
+ rx_buf->skb = NULL;
+ }
+
+ rx_ring->tail = 0;
+ rx_ring->head = 0;
+}
+
+static int emac_alloc_tx_resources(struct emac_priv *priv)
+{
+ struct emac_desc_ring *tx_ring = &priv->tx_ring;
+ struct platform_device *pdev = priv->pdev;
+
+ tx_ring->tx_desc_buf = kcalloc(tx_ring->total_cnt,
+ sizeof(*tx_ring->tx_desc_buf),
+ GFP_KERNEL);
+
+ if (!tx_ring->tx_desc_buf)
+ return -ENOMEM;
+
+ tx_ring->total_size = tx_ring->total_cnt * sizeof(struct emac_desc);
+ tx_ring->total_size = ALIGN(tx_ring->total_size, PAGE_SIZE);
+
+ tx_ring->desc_addr = dma_alloc_coherent(&pdev->dev, tx_ring->total_size,
+ &tx_ring->desc_dma_addr,
+ GFP_KERNEL);
+ if (!tx_ring->desc_addr) {
+ kfree(tx_ring->tx_desc_buf);
+ return -ENOMEM;
+ }
+
+ tx_ring->head = 0;
+ tx_ring->tail = 0;
+
+ return 0;
+}
+
+static int emac_alloc_rx_resources(struct emac_priv *priv)
+{
+ struct emac_desc_ring *rx_ring = &priv->rx_ring;
+ struct platform_device *pdev = priv->pdev;
+
+ rx_ring->rx_desc_buf = kcalloc(rx_ring->total_cnt,
+ sizeof(*rx_ring->rx_desc_buf),
+ GFP_KERNEL);
+ if (!rx_ring->rx_desc_buf)
+ return -ENOMEM;
+
+ rx_ring->total_size = rx_ring->total_cnt * sizeof(struct emac_desc);
+
+ rx_ring->total_size = ALIGN(rx_ring->total_size, PAGE_SIZE);
+
+ rx_ring->desc_addr = dma_alloc_coherent(&pdev->dev, rx_ring->total_size,
+ &rx_ring->desc_dma_addr,
+ GFP_KERNEL);
+ if (!rx_ring->desc_addr) {
+ kfree(rx_ring->rx_desc_buf);
+ return -ENOMEM;
+ }
+
+ rx_ring->head = 0;
+ rx_ring->tail = 0;
+
+ return 0;
+}
+
+static void emac_free_tx_resources(struct emac_priv *priv)
+{
+ struct emac_desc_ring *tr = &priv->tx_ring;
+ struct device *dev = &priv->pdev->dev;
+
+ emac_clean_tx_desc_ring(priv);
+
+ kfree(tr->tx_desc_buf);
+ tr->tx_desc_buf = NULL;
+
+ dma_free_coherent(dev, tr->total_size, tr->desc_addr,
+ tr->desc_dma_addr);
+ tr->desc_addr = NULL;
+}
+
+static void emac_free_rx_resources(struct emac_priv *priv)
+{
+ struct emac_desc_ring *rr = &priv->rx_ring;
+ struct device *dev = &priv->pdev->dev;
+
+ emac_clean_rx_desc_ring(priv);
+
+ kfree(rr->rx_desc_buf);
+ rr->rx_desc_buf = NULL;
+
+ dma_free_coherent(dev, rr->total_size, rr->desc_addr,
+ rr->desc_dma_addr);
+ rr->desc_addr = NULL;
+}
+
+static int emac_tx_clean_desc(struct emac_priv *priv)
+{
+ struct net_device *ndev = priv->ndev;
+ struct emac_desc_ring *tx_ring;
+ struct emac_desc *tx_desc;
+ u32 i;
+
+ netif_tx_lock(ndev);
+
+ tx_ring = &priv->tx_ring;
+
+ i = tx_ring->tail;
+
+ while (i != tx_ring->head) {
+ tx_desc = &((struct emac_desc *)tx_ring->desc_addr)[i];
+
+ /* Stop checking if desc still own by DMA */
+ if (READ_ONCE(tx_desc->desc0) & TX_DESC_0_OWN)
+ break;
+
+ emac_free_tx_buf(priv, i);
+ memset(tx_desc, 0, sizeof(struct emac_desc));
+
+ if (++i == tx_ring->total_cnt)
+ i = 0;
+ }
+
+ tx_ring->tail = i;
+
+ if (unlikely(netif_queue_stopped(ndev) &&
+ emac_tx_avail(priv) > tx_ring->total_cnt / 4))
+ netif_wake_queue(ndev);
+
+ netif_tx_unlock(ndev);
+
+ return 0;
+}
+
+static bool emac_rx_frame_good(struct emac_priv *priv, struct emac_desc *desc)
+{
+ const char *msg;
+ u32 len;
+
+ len = FIELD_GET(RX_DESC_0_FRAME_PACKET_LENGTH_MASK, desc->desc0);
+
+ if (WARN_ON_ONCE(!(desc->desc0 & RX_DESC_0_LAST_DESCRIPTOR)))
+ msg = "Not last descriptor"; /* This would be a bug */
+ else if (desc->desc0 & RX_DESC_0_FRAME_RUNT)
+ msg = "Runt frame";
+ else if (desc->desc0 & RX_DESC_0_FRAME_CRC_ERR)
+ msg = "Frame CRC error";
+ else if (desc->desc0 & RX_DESC_0_FRAME_MAX_LEN_ERR)
+ msg = "Frame exceeds max length";
+ else if (desc->desc0 & RX_DESC_0_FRAME_JABBER_ERR)
+ msg = "Frame jabber error";
+ else if (desc->desc0 & RX_DESC_0_FRAME_LENGTH_ERR)
+ msg = "Frame length error";
+ else if (len <= ETH_FCS_LEN || len > priv->dma_buf_sz)
+ msg = "Frame length unacceptable";
+ else
+ return true; /* All good */
+
+ dev_dbg_ratelimited(&priv->ndev->dev, "RX error: %s", msg);
+
+ return false;
+}
+
+static void emac_alloc_rx_desc_buffers(struct emac_priv *priv)
+{
+ struct emac_desc_ring *rx_ring = &priv->rx_ring;
+ struct emac_desc rx_desc, *rx_desc_addr;
+ struct net_device *ndev = priv->ndev;
+ struct emac_rx_desc_buffer *rx_buf;
+ struct sk_buff *skb;
+ u32 i;
+
+ i = rx_ring->head;
+ rx_buf = &rx_ring->rx_desc_buf[i];
+
+ while (!rx_buf->skb) {
+ skb = netdev_alloc_skb_ip_align(ndev, priv->dma_buf_sz);
+ if (!skb)
+ break;
+
+ skb->dev = ndev;
+
+ rx_buf->skb = skb;
+ rx_buf->dma_len = priv->dma_buf_sz;
+ rx_buf->dma_addr = dma_map_single(&priv->pdev->dev, skb->data,
+ priv->dma_buf_sz,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&priv->pdev->dev, rx_buf->dma_addr)) {
+ dev_err_ratelimited(&ndev->dev, "Mapping skb failed\n");
+ goto err_free_skb;
+ }
+
+ rx_desc_addr = &((struct emac_desc *)rx_ring->desc_addr)[i];
+
+ memset(&rx_desc, 0, sizeof(rx_desc));
+
+ rx_desc.buffer_addr_1 = rx_buf->dma_addr;
+ rx_desc.desc1 = FIELD_PREP(RX_DESC_1_BUFFER_SIZE_1_MASK,
+ rx_buf->dma_len);
+
+ if (++i == rx_ring->total_cnt) {
+ rx_desc.desc1 |= RX_DESC_1_END_RING;
+ i = 0;
+ }
+
+ *rx_desc_addr = rx_desc;
+ dma_wmb();
+ WRITE_ONCE(rx_desc_addr->desc0, rx_desc.desc0 | RX_DESC_0_OWN);
+
+ rx_buf = &rx_ring->rx_desc_buf[i];
+ }
+
+ rx_ring->head = i;
+ return;
+
+err_free_skb:
+ dev_kfree_skb_any(skb);
+ rx_buf->skb = NULL;
+}
+
+/* Returns number of packets received */
+static int emac_rx_clean_desc(struct emac_priv *priv, int budget)
+{
+ struct net_device *ndev = priv->ndev;
+ struct emac_rx_desc_buffer *rx_buf;
+ struct emac_desc_ring *rx_ring;
+ struct sk_buff *skb = NULL;
+ struct emac_desc *rx_desc;
+ u32 got = 0, skb_len, i;
+
+ rx_ring = &priv->rx_ring;
+
+ i = rx_ring->tail;
+
+ while (budget--) {
+ rx_desc = &((struct emac_desc *)rx_ring->desc_addr)[i];
+
+ /* Stop checking if rx_desc still owned by DMA */
+ if (READ_ONCE(rx_desc->desc0) & RX_DESC_0_OWN)
+ break;
+
+ dma_rmb();
+
+ rx_buf = &rx_ring->rx_desc_buf[i];
+
+ if (!rx_buf->skb)
+ break;
+
+ got++;
+
+ dma_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
+ rx_buf->dma_len, DMA_FROM_DEVICE);
+
+ if (likely(emac_rx_frame_good(priv, rx_desc))) {
+ skb = rx_buf->skb;
+
+ skb_len = FIELD_GET(RX_DESC_0_FRAME_PACKET_LENGTH_MASK,
+ rx_desc->desc0);
+ skb_len -= ETH_FCS_LEN;
+
+ skb_put(skb, skb_len);
+ skb->dev = ndev;
+ ndev->hard_header_len = ETH_HLEN;
+
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ napi_gro_receive(&priv->napi, skb);
+
+ memset(rx_desc, 0, sizeof(struct emac_desc));
+ rx_buf->skb = NULL;
+ } else {
+ dev_kfree_skb_irq(rx_buf->skb);
+ rx_buf->skb = NULL;
+ }
+
+ if (++i == rx_ring->total_cnt)
+ i = 0;
+ }
+
+ rx_ring->tail = i;
+
+ emac_alloc_rx_desc_buffers(priv);
+
+ return got;
+}
+
+static int emac_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct emac_priv *priv = container_of(napi, struct emac_priv, napi);
+ int work_done;
+
+ emac_tx_clean_desc(priv);
+
+ work_done = emac_rx_clean_desc(priv, budget);
+ if (work_done < budget && napi_complete_done(napi, work_done))
+ emac_enable_interrupt(priv);
+
+ return work_done;
+}
+
+/*
+ * For convenience, skb->data is fragment 0, frags[0] is fragment 1, etc.
+ *
+ * Each descriptor can hold up to two fragments, called buffer 1 and 2. For each
+ * fragment f, if f % 2 == 0, it uses buffer 1, otherwise it uses buffer 2.
+ */
+
+static int emac_tx_map_frag(struct device *dev, struct emac_desc *tx_desc,
+ struct emac_tx_desc_buffer *tx_buf,
+ struct sk_buff *skb, u32 frag_idx)
+{
+ bool map_as_page, buf_idx;
+ const skb_frag_t *frag;
+ phys_addr_t addr;
+ u32 len;
+ int ret;
+
+ buf_idx = frag_idx % 2;
+
+ if (frag_idx == 0) {
+ /* Non-fragmented part */
+ len = skb_headlen(skb);
+ addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
+ map_as_page = false;
+ } else {
+ /* Fragment */
+ frag = &skb_shinfo(skb)->frags[frag_idx - 1];
+ len = skb_frag_size(frag);
+ addr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
+ map_as_page = true;
+ }
+
+ ret = dma_mapping_error(dev, addr);
+ if (ret)
+ return ret;
+
+ tx_buf->buf[buf_idx].dma_addr = addr;
+ tx_buf->buf[buf_idx].dma_len = len;
+ tx_buf->buf[buf_idx].map_as_page = map_as_page;
+
+ if (buf_idx == 0) {
+ tx_desc->buffer_addr_1 = addr;
+ tx_desc->desc1 |= FIELD_PREP(TX_DESC_1_BUFFER_SIZE_1_MASK, len);
+ } else {
+ tx_desc->buffer_addr_2 = addr;
+ tx_desc->desc1 |= FIELD_PREP(TX_DESC_1_BUFFER_SIZE_2_MASK, len);
+ }
+
+ return 0;
+}
+
+static void emac_tx_mem_map(struct emac_priv *priv, struct sk_buff *skb)
+{
+ struct emac_desc_ring *tx_ring = &priv->tx_ring;
+ struct emac_desc tx_desc, *tx_desc_addr;
+ struct device *dev = &priv->pdev->dev;
+ struct emac_tx_desc_buffer *tx_buf;
+ u32 head, old_head, frag_num, f;
+ bool buf_idx;
+
+ frag_num = skb_shinfo(skb)->nr_frags;
+ head = tx_ring->head;
+ old_head = head;
+
+ for (f = 0; f < frag_num + 1; f++) {
+ buf_idx = f % 2;
+
+ /*
+ * If using buffer 1, initialize a new desc. Otherwise, use
+ * buffer 2 of previous fragment's desc.
+ */
+ if (!buf_idx) {
+ tx_buf = &tx_ring->tx_desc_buf[head];
+ tx_desc_addr =
+ &((struct emac_desc *)tx_ring->desc_addr)[head];
+ memset(&tx_desc, 0, sizeof(tx_desc));
+
+ /*
+ * Give ownership for all but first desc initially. For
+ * first desc, give at the end so DMA cannot start
+ * reading uninitialized descs.
+ */
+ if (head != old_head)
+ tx_desc.desc0 |= TX_DESC_0_OWN;
+
+ if (++head == tx_ring->total_cnt) {
+ /* Just used last desc in ring */
+ tx_desc.desc1 |= TX_DESC_1_END_RING;
+ head = 0;
+ }
+ }
+
+ if (emac_tx_map_frag(dev, &tx_desc, tx_buf, skb, f)) {
+ dev_err_ratelimited(&priv->ndev->dev,
+ "Map TX frag %d failed\n", f);
+ goto err_free_skb;
+ }
+
+ if (f == 0)
+ tx_desc.desc1 |= TX_DESC_1_FIRST_SEGMENT;
+
+ if (f == frag_num) {
+ tx_desc.desc1 |= TX_DESC_1_LAST_SEGMENT;
+ tx_buf->skb = skb;
+ if (emac_tx_should_interrupt(priv, frag_num + 1))
+ tx_desc.desc1 |=
+ TX_DESC_1_INTERRUPT_ON_COMPLETION;
+ }
+
+ *tx_desc_addr = tx_desc;
+ }
+
+ /* All descriptors are ready, give ownership for first desc */
+ tx_desc_addr = &((struct emac_desc *)tx_ring->desc_addr)[old_head];
+ dma_wmb();
+ WRITE_ONCE(tx_desc_addr->desc0, tx_desc_addr->desc0 | TX_DESC_0_OWN);
+
+ emac_dma_start_transmit(priv);
+
+ tx_ring->head = head;
+
+ return;
+
+err_free_skb:
+ dev_dstats_tx_dropped(priv->ndev);
+ dev_kfree_skb_any(skb);
+}
+
+static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ struct device *dev = &priv->pdev->dev;
+
+ if (unlikely(emac_tx_avail(priv) < nfrags + 1)) {
+ if (!netif_queue_stopped(ndev)) {
+ netif_stop_queue(ndev);
+ dev_err_ratelimited(dev, "TX ring full, stop TX queue\n");
+ }
+ return NETDEV_TX_BUSY;
+ }
+
+ emac_tx_mem_map(priv, skb);
+
+ /* Make sure there is space in the ring for the next TX. */
+ if (unlikely(emac_tx_avail(priv) <= MAX_SKB_FRAGS + 2))
+ netif_stop_queue(ndev);
+
+ return NETDEV_TX_OK;
+}
+
+static int emac_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ int ret = eth_mac_addr(ndev, addr);
+
+ if (ret)
+ return ret;
+
+ /* If running, set now; if not running it will be set in emac_up. */
+ if (netif_running(ndev))
+ emac_set_mac_addr(priv, ndev->dev_addr);
+
+ return 0;
+}
+
+static void emac_mac_multicast_filter_clear(struct emac_priv *priv)
+{
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0);
+}
+
+/*
+ * The upper 6 bits of the Ethernet CRC of the MAC address is used as the hash
+ * when matching multicast addresses.
+ */
+static u32 emac_ether_addr_hash(u8 addr[ETH_ALEN])
+{
+ u32 crc32 = ether_crc(ETH_ALEN, addr);
+
+ return crc32 >> 26;
+}
+
+/* Configure Multicast and Promiscuous modes */
+static void emac_set_rx_mode(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct netdev_hw_addr *ha;
+ u32 mc_filter[4] = { 0 };
+ u32 hash, reg, bit, val;
+
+ val = emac_rd(priv, MAC_ADDRESS_CONTROL);
+
+ val &= ~MREGBIT_PROMISCUOUS_MODE;
+
+ if (ndev->flags & IFF_PROMISC) {
+ /* Enable promisc mode */
+ val |= MREGBIT_PROMISCUOUS_MODE;
+ } else if ((ndev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(ndev) > HASH_TABLE_SIZE)) {
+ /* Accept all multicast frames by setting every bit */
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0xffff);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0xffff);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0xffff);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0xffff);
+ } else if (!netdev_mc_empty(ndev)) {
+ emac_mac_multicast_filter_clear(priv);
+ netdev_for_each_mc_addr(ha, ndev) {
+ /*
+ * The hash table is an array of 4 16-bit registers. It
+ * is treated like an array of 64 bits (bits[hash]).
+ */
+ hash = emac_ether_addr_hash(ha->addr);
+ reg = hash / 16;
+ bit = hash % 16;
+ mc_filter[reg] |= BIT(bit);
+ }
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, mc_filter[0]);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, mc_filter[1]);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, mc_filter[2]);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, mc_filter[3]);
+ }
+
+ emac_wr(priv, MAC_ADDRESS_CONTROL, val);
+}
+
+static int emac_change_mtu(struct net_device *ndev, int mtu)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ u32 frame_len;
+
+ if (netif_running(ndev)) {
+ netdev_err(ndev, "must be stopped to change MTU\n");
+ return -EBUSY;
+ }
+
+ frame_len = mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ if (frame_len <= EMAC_DEFAULT_BUFSIZE)
+ priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE;
+ else if (frame_len <= EMAC_RX_BUF_2K)
+ priv->dma_buf_sz = EMAC_RX_BUF_2K;
+ else
+ priv->dma_buf_sz = EMAC_RX_BUF_4K;
+
+ ndev->mtu = mtu;
+
+ return 0;
+}
+
+static void emac_tx_timeout(struct net_device *ndev, unsigned int txqueue)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ schedule_work(&priv->tx_timeout_task);
+}
+
+static int emac_mii_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ struct emac_priv *priv = bus->priv;
+ u32 cmd = 0, val;
+ int ret;
+
+ cmd |= FIELD_PREP(MREGBIT_PHY_ADDRESS, phy_addr);
+ cmd |= FIELD_PREP(MREGBIT_REGISTER_ADDRESS, regnum);
+ cmd |= MREGBIT_START_MDIO_TRANS | MREGBIT_MDIO_READ_WRITE;
+
+ emac_wr(priv, MAC_MDIO_DATA, 0x0);
+ emac_wr(priv, MAC_MDIO_CONTROL, cmd);
+
+ ret = readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
+ !(val & MREGBIT_START_MDIO_TRANS), 100, 10000);
+
+ if (ret)
+ return ret;
+
+ val = emac_rd(priv, MAC_MDIO_DATA);
+ return FIELD_GET(MREGBIT_MDIO_DATA, val);
+}
+
+static int emac_mii_write(struct mii_bus *bus, int phy_addr, int regnum,
+ u16 value)
+{
+ struct emac_priv *priv = bus->priv;
+ u32 cmd = 0, val;
+ int ret;
+
+ emac_wr(priv, MAC_MDIO_DATA, value);
+
+ cmd |= FIELD_PREP(MREGBIT_PHY_ADDRESS, phy_addr);
+ cmd |= FIELD_PREP(MREGBIT_REGISTER_ADDRESS, regnum);
+ cmd |= MREGBIT_START_MDIO_TRANS;
+
+ emac_wr(priv, MAC_MDIO_CONTROL, cmd);
+
+ ret = readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
+ !(val & MREGBIT_START_MDIO_TRANS), 100, 10000);
+
+ return ret;
+}
+
+static int emac_mdio_init(struct emac_priv *priv)
+{
+ struct device *dev = &priv->pdev->dev;
+ struct device_node *mii_np;
+ struct mii_bus *mii;
+ int ret;
+
+ mii = devm_mdiobus_alloc(dev);
+ if (!mii)
+ return -ENOMEM;
+
+ mii->priv = priv;
+ mii->name = "k1_emac_mii";
+ mii->read = emac_mii_read;
+ mii->write = emac_mii_write;
+ mii->parent = dev;
+ mii->phy_mask = ~0;
+ snprintf(mii->id, MII_BUS_ID_SIZE, "%s", priv->pdev->name);
+
+ mii_np = of_get_available_child_by_name(dev->of_node, "mdio-bus");
+
+ ret = devm_of_mdiobus_register(dev, mii, mii_np);
+ if (ret)
+ dev_err_probe(dev, ret, "Failed to register mdio bus\n");
+
+ of_node_put(mii_np);
+ return ret;
+}
+
+static void emac_set_tx_fc(struct emac_priv *priv, bool enable)
+{
+ u32 val;
+
+ val = emac_rd(priv, MAC_FC_CONTROL);
+
+ FIELD_MODIFY(MREGBIT_FC_GENERATION_ENABLE, &val, enable);
+ FIELD_MODIFY(MREGBIT_AUTO_FC_GENERATION_ENABLE, &val, enable);
+
+ emac_wr(priv, MAC_FC_CONTROL, val);
+}
+
+static void emac_set_rx_fc(struct emac_priv *priv, bool enable)
+{
+ u32 val = emac_rd(priv, MAC_FC_CONTROL);
+
+ FIELD_MODIFY(MREGBIT_FC_DECODE_ENABLE, &val, enable);
+
+ emac_wr(priv, MAC_FC_CONTROL, val);
+}
+
+static void emac_set_fc(struct emac_priv *priv, u8 fc)
+{
+ emac_set_tx_fc(priv, fc & FLOW_CTRL_TX);
+ emac_set_rx_fc(priv, fc & FLOW_CTRL_RX);
+ priv->flow_control = fc;
+}
+
+static void emac_set_fc_autoneg(struct emac_priv *priv)
+{
+ struct phy_device *phydev = priv->ndev->phydev;
+ u32 local_adv, remote_adv;
+ u8 fc;
+
+ local_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
+
+ remote_adv = 0;
+
+ if (phydev->pause)
+ remote_adv |= LPA_PAUSE_CAP;
+
+ if (phydev->asym_pause)
+ remote_adv |= LPA_PAUSE_ASYM;
+
+ fc = mii_resolve_flowctrl_fdx(local_adv, remote_adv);
+
+ priv->flow_control_autoneg = true;
+
+ emac_set_fc(priv, fc);
+}
+
+/*
+ * Even though this MAC supports gigabit operation, it only provides 32-bit
+ * statistics counters. The most overflow-prone counters are the "bytes" ones,
+ * which at gigabit overflow about twice a minute.
+ *
+ * Therefore, we maintain the high 32 bits of counters ourselves, incrementing
+ * every time statistics seem to go backwards. Also, update periodically to
+ * catch overflows when we are not otherwise checking the statistics often
+ * enough.
+ */
+
+#define EMAC_STATS_TIMER_PERIOD 20
+
+static int emac_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res,
+ u32 control_reg, u32 high_reg, u32 low_reg)
+{
+ u32 val, high, low;
+ int ret;
+
+ /* The "read" bit is the same for TX and RX */
+
+ val = MREGBIT_START_TX_COUNTER_READ | cnt;
+ emac_wr(priv, control_reg, val);
+ val = emac_rd(priv, control_reg);
+
+ ret = readl_poll_timeout_atomic(priv->iobase + control_reg, val,
+ !(val & MREGBIT_START_TX_COUNTER_READ),
+ 100, 10000);
+
+ if (ret) {
+ netdev_err(priv->ndev, "Read stat timeout\n");
+ return ret;
+ }
+
+ high = emac_rd(priv, high_reg);
+ low = emac_rd(priv, low_reg);
+ *res = high << 16 | lower_16_bits(low);
+
+ return 0;
+}
+
+static int emac_tx_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res)
+{
+ return emac_read_stat_cnt(priv, cnt, res, MAC_TX_STATCTR_CONTROL,
+ MAC_TX_STATCTR_DATA_HIGH,
+ MAC_TX_STATCTR_DATA_LOW);
+}
+
+static int emac_rx_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res)
+{
+ return emac_read_stat_cnt(priv, cnt, res, MAC_RX_STATCTR_CONTROL,
+ MAC_RX_STATCTR_DATA_HIGH,
+ MAC_RX_STATCTR_DATA_LOW);
+}
+
+static void emac_update_counter(u64 *counter, u32 new_low)
+{
+ u32 old_low = lower_32_bits(*counter);
+ u64 high = upper_32_bits(*counter);
+
+ if (old_low > new_low) {
+ /* Overflowed, increment high 32 bits */
+ high++;
+ }
+
+ *counter = (high << 32) | new_low;
+}
+
+static void emac_stats_update(struct emac_priv *priv)
+{
+ u64 *tx_stats_off = priv->tx_stats_off.array;
+ u64 *rx_stats_off = priv->rx_stats_off.array;
+ u64 *tx_stats = priv->tx_stats.array;
+ u64 *rx_stats = priv->rx_stats.array;
+ u32 i, res, offset;
+
+ assert_spin_locked(&priv->stats_lock);
+
+ if (!netif_running(priv->ndev) || !netif_device_present(priv->ndev)) {
+ /* Not up, don't try to update */
+ return;
+ }
+
+ for (i = 0; i < sizeof(priv->tx_stats) / sizeof(*tx_stats); i++) {
+ /*
+ * If reading stats times out, everything is broken and there's
+ * nothing we can do. Reading statistics also can't return an
+ * error, so just return without updating and without
+ * rescheduling.
+ */
+ if (emac_tx_read_stat_cnt(priv, i, &res))
+ return;
+
+ /*
+ * Re-initializing while bringing interface up resets counters
+ * to zero, so to provide continuity, we add the values saved
+ * last time we did emac_down() to the new hardware-provided
+ * value.
+ */
+ offset = lower_32_bits(tx_stats_off[i]);
+ emac_update_counter(&tx_stats[i], res + offset);
+ }
+
+ /* Similar remarks as TX stats */
+ for (i = 0; i < sizeof(priv->rx_stats) / sizeof(*rx_stats); i++) {
+ if (emac_rx_read_stat_cnt(priv, i, &res))
+ return;
+ offset = lower_32_bits(rx_stats_off[i]);
+ emac_update_counter(&rx_stats[i], res + offset);
+ }
+
+ mod_timer(&priv->stats_timer, jiffies + EMAC_STATS_TIMER_PERIOD * HZ);
+}
+
+static void emac_stats_timer(struct timer_list *t)
+{
+ struct emac_priv *priv = timer_container_of(priv, t, stats_timer);
+
+ spin_lock(&priv->stats_lock);
+
+ emac_stats_update(priv);
+
+ spin_unlock(&priv->stats_lock);
+}
+
+static const struct ethtool_rmon_hist_range emac_rmon_hist_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 4096 },
+ { /* sentinel */ },
+};
+
+/* Like dev_fetch_dstats(), but we only use tx_drops */
+static u64 emac_get_stat_tx_drops(struct emac_priv *priv)
+{
+ const struct pcpu_dstats *stats;
+ u64 tx_drops, total = 0;
+ unsigned int start;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ stats = per_cpu_ptr(priv->ndev->dstats, cpu);
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ tx_drops = u64_stats_read(&stats->tx_drops);
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ total += tx_drops;
+ }
+
+ return total;
+}
+
+static void emac_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ union emac_hw_tx_stats *tx_stats;
+ union emac_hw_rx_stats *rx_stats;
+
+ tx_stats = &priv->tx_stats;
+ rx_stats = &priv->rx_stats;
+
+ /* This is the only software counter */
+ storage->tx_dropped = emac_get_stat_tx_drops(priv);
+
+ spin_lock_bh(&priv->stats_lock);
+
+ emac_stats_update(priv);
+
+ storage->tx_packets = tx_stats->stats.tx_ok_pkts;
+ storage->tx_bytes = tx_stats->stats.tx_ok_bytes;
+ storage->tx_errors = tx_stats->stats.tx_err_pkts;
+
+ storage->rx_packets = rx_stats->stats.rx_ok_pkts;
+ storage->rx_bytes = rx_stats->stats.rx_ok_bytes;
+ storage->rx_errors = rx_stats->stats.rx_err_total_pkts;
+ storage->rx_crc_errors = rx_stats->stats.rx_crc_err_pkts;
+ storage->rx_frame_errors = rx_stats->stats.rx_align_err_pkts;
+ storage->rx_length_errors = rx_stats->stats.rx_len_err_pkts;
+
+ storage->collisions = tx_stats->stats.tx_singleclsn_pkts;
+ storage->collisions += tx_stats->stats.tx_multiclsn_pkts;
+ storage->collisions += tx_stats->stats.tx_excessclsn_pkts;
+
+ storage->rx_missed_errors = rx_stats->stats.rx_drp_fifo_full_pkts;
+ storage->rx_missed_errors += rx_stats->stats.rx_truncate_fifo_full_pkts;
+
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+static void emac_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ union emac_hw_rx_stats *rx_stats;
+
+ rx_stats = &priv->rx_stats;
+
+ *ranges = emac_rmon_hist_ranges;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ emac_stats_update(priv);
+
+ rmon_stats->undersize_pkts = rx_stats->stats.rx_len_undersize_pkts;
+ rmon_stats->oversize_pkts = rx_stats->stats.rx_len_oversize_pkts;
+ rmon_stats->fragments = rx_stats->stats.rx_len_fragment_pkts;
+ rmon_stats->jabbers = rx_stats->stats.rx_len_jabber_pkts;
+
+ /* Only RX has histogram stats */
+
+ rmon_stats->hist[0] = rx_stats->stats.rx_64_pkts;
+ rmon_stats->hist[1] = rx_stats->stats.rx_65_127_pkts;
+ rmon_stats->hist[2] = rx_stats->stats.rx_128_255_pkts;
+ rmon_stats->hist[3] = rx_stats->stats.rx_256_511_pkts;
+ rmon_stats->hist[4] = rx_stats->stats.rx_512_1023_pkts;
+ rmon_stats->hist[5] = rx_stats->stats.rx_1024_1518_pkts;
+ rmon_stats->hist[6] = rx_stats->stats.rx_1519_plus_pkts;
+
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+static void emac_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ union emac_hw_tx_stats *tx_stats;
+ union emac_hw_rx_stats *rx_stats;
+
+ tx_stats = &priv->tx_stats;
+ rx_stats = &priv->rx_stats;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ emac_stats_update(priv);
+
+ mac_stats->MulticastFramesXmittedOK = tx_stats->stats.tx_multicast_pkts;
+ mac_stats->BroadcastFramesXmittedOK = tx_stats->stats.tx_broadcast_pkts;
+
+ mac_stats->MulticastFramesReceivedOK =
+ rx_stats->stats.rx_multicast_pkts;
+ mac_stats->BroadcastFramesReceivedOK =
+ rx_stats->stats.rx_broadcast_pkts;
+
+ mac_stats->SingleCollisionFrames = tx_stats->stats.tx_singleclsn_pkts;
+ mac_stats->MultipleCollisionFrames = tx_stats->stats.tx_multiclsn_pkts;
+ mac_stats->LateCollisions = tx_stats->stats.tx_lateclsn_pkts;
+ mac_stats->FramesAbortedDueToXSColls =
+ tx_stats->stats.tx_excessclsn_pkts;
+
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+static void emac_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ union emac_hw_tx_stats *tx_stats;
+ union emac_hw_rx_stats *rx_stats;
+
+ tx_stats = &priv->tx_stats;
+ rx_stats = &priv->rx_stats;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ emac_stats_update(priv);
+
+ pause_stats->tx_pause_frames = tx_stats->stats.tx_pause_pkts;
+ pause_stats->rx_pause_frames = rx_stats->stats.rx_pause_pkts;
+
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+/* Other statistics that are not derivable from standard statistics */
+
+#define EMAC_ETHTOOL_STAT(type, name) \
+ { offsetof(type, stats.name) / sizeof(u64), #name }
+
+static const struct emac_ethtool_stats {
+ size_t offset;
+ char str[ETH_GSTRING_LEN];
+} emac_ethtool_rx_stats[] = {
+ EMAC_ETHTOOL_STAT(union emac_hw_rx_stats, rx_drp_fifo_full_pkts),
+ EMAC_ETHTOOL_STAT(union emac_hw_rx_stats, rx_truncate_fifo_full_pkts),
+};
+
+static int emac_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(emac_ethtool_rx_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void emac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(emac_ethtool_rx_stats); i++) {
+ memcpy(data, emac_ethtool_rx_stats[i].str,
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static void emac_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ u64 *rx_stats = (u64 *)&priv->rx_stats;
+ int i;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ emac_stats_update(priv);
+
+ for (i = 0; i < ARRAY_SIZE(emac_ethtool_rx_stats); i++)
+ data[i] = rx_stats[emac_ethtool_rx_stats[i].offset];
+
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+static int emac_ethtool_get_regs_len(struct net_device *dev)
+{
+ return (EMAC_DMA_REG_CNT + EMAC_MAC_REG_CNT) * sizeof(u32);
+}
+
+static void emac_ethtool_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *space)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ u32 *reg_space = space;
+ int i;
+
+ regs->version = 1;
+
+ for (i = 0; i < EMAC_DMA_REG_CNT; i++)
+ reg_space[i] = emac_rd(priv, DMA_CONFIGURATION + i * 4);
+
+ for (i = 0; i < EMAC_MAC_REG_CNT; i++)
+ reg_space[i + EMAC_DMA_REG_CNT] =
+ emac_rd(priv, MAC_GLOBAL_CONTROL + i * 4);
+}
+
+static void emac_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+
+ pause->autoneg = priv->flow_control_autoneg;
+ pause->tx_pause = !!(priv->flow_control & FLOW_CTRL_TX);
+ pause->rx_pause = !!(priv->flow_control & FLOW_CTRL_RX);
+}
+
+static int emac_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ u8 fc = 0;
+
+ if (!netif_running(dev))
+ return -ENETDOWN;
+
+ priv->flow_control_autoneg = pause->autoneg;
+
+ if (pause->autoneg) {
+ emac_set_fc_autoneg(priv);
+ } else {
+ if (pause->tx_pause)
+ fc |= FLOW_CTRL_TX;
+
+ if (pause->rx_pause)
+ fc |= FLOW_CTRL_RX;
+
+ emac_set_fc(priv, fc);
+ }
+
+ return 0;
+}
+
+static void emac_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ info->n_stats = ARRAY_SIZE(emac_ethtool_rx_stats);
+}
+
+static void emac_tx_timeout_task(struct work_struct *work)
+{
+ struct net_device *ndev;
+ struct emac_priv *priv;
+
+ priv = container_of(work, struct emac_priv, tx_timeout_task);
+ ndev = priv->ndev;
+
+ rtnl_lock();
+
+ /* No need to reset if already down */
+ if (!netif_running(ndev)) {
+ rtnl_unlock();
+ return;
+ }
+
+ netdev_err(ndev, "MAC reset due to TX timeout\n");
+
+ netif_trans_update(ndev); /* prevent tx timeout */
+ dev_close(ndev);
+ dev_open(ndev, NULL);
+
+ rtnl_unlock();
+}
+
+static void emac_sw_init(struct emac_priv *priv)
+{
+ priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE;
+
+ priv->tx_ring.total_cnt = DEFAULT_TX_RING_NUM;
+ priv->rx_ring.total_cnt = DEFAULT_RX_RING_NUM;
+
+ spin_lock_init(&priv->stats_lock);
+
+ INIT_WORK(&priv->tx_timeout_task, emac_tx_timeout_task);
+
+ priv->tx_coal_frames = EMAC_TX_FRAMES;
+ priv->tx_coal_timeout = EMAC_TX_COAL_TIMEOUT;
+
+ timer_setup(&priv->txtimer, emac_tx_coal_timer, 0);
+ timer_setup(&priv->stats_timer, emac_stats_timer, 0);
+}
+
+static irqreturn_t emac_interrupt_handler(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct emac_priv *priv = netdev_priv(ndev);
+ bool should_schedule = false;
+ u32 clr = 0;
+ u32 status;
+
+ status = emac_rd(priv, DMA_STATUS_IRQ);
+
+ if (status & MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ) {
+ clr |= MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ;
+ should_schedule = true;
+ }
+
+ if (status & MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ)
+ clr |= MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ;
+
+ if (status & MREGBIT_TRANSMIT_DMA_STOPPED_IRQ)
+ clr |= MREGBIT_TRANSMIT_DMA_STOPPED_IRQ;
+
+ if (status & MREGBIT_RECEIVE_TRANSFER_DONE_IRQ) {
+ clr |= MREGBIT_RECEIVE_TRANSFER_DONE_IRQ;
+ should_schedule = true;
+ }
+
+ if (status & MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ)
+ clr |= MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ;
+
+ if (status & MREGBIT_RECEIVE_DMA_STOPPED_IRQ)
+ clr |= MREGBIT_RECEIVE_DMA_STOPPED_IRQ;
+
+ if (status & MREGBIT_RECEIVE_MISSED_FRAME_IRQ)
+ clr |= MREGBIT_RECEIVE_MISSED_FRAME_IRQ;
+
+ if (should_schedule) {
+ if (napi_schedule_prep(&priv->napi)) {
+ emac_disable_interrupt(priv);
+ __napi_schedule_irqoff(&priv->napi);
+ }
+ }
+
+ emac_wr(priv, DMA_STATUS_IRQ, clr);
+
+ return IRQ_HANDLED;
+}
+
+static void emac_configure_tx(struct emac_priv *priv)
+{
+ u32 val;
+
+ /* Set base address */
+ val = (u32)priv->tx_ring.desc_dma_addr;
+ emac_wr(priv, DMA_TRANSMIT_BASE_ADDRESS, val);
+
+ /* Set TX inter-frame gap value, enable transmit */
+ val = emac_rd(priv, MAC_TRANSMIT_CONTROL);
+ val &= ~MREGBIT_IFG_LEN;
+ val |= MREGBIT_TRANSMIT_ENABLE;
+ val |= MREGBIT_TRANSMIT_AUTO_RETRY;
+ emac_wr(priv, MAC_TRANSMIT_CONTROL, val);
+
+ emac_wr(priv, DMA_TRANSMIT_AUTO_POLL_COUNTER, 0x0);
+
+ /* Start TX DMA */
+ val = emac_rd(priv, DMA_CONTROL);
+ val |= MREGBIT_START_STOP_TRANSMIT_DMA;
+ emac_wr(priv, DMA_CONTROL, val);
+}
+
+static void emac_configure_rx(struct emac_priv *priv)
+{
+ u32 val;
+
+ /* Set base address */
+ val = (u32)priv->rx_ring.desc_dma_addr;
+ emac_wr(priv, DMA_RECEIVE_BASE_ADDRESS, val);
+
+ /* Enable receive */
+ val = emac_rd(priv, MAC_RECEIVE_CONTROL);
+ val |= MREGBIT_RECEIVE_ENABLE;
+ val |= MREGBIT_STORE_FORWARD;
+ emac_wr(priv, MAC_RECEIVE_CONTROL, val);
+
+ /* Start RX DMA */
+ val = emac_rd(priv, DMA_CONTROL);
+ val |= MREGBIT_START_STOP_RECEIVE_DMA;
+ emac_wr(priv, DMA_CONTROL, val);
+}
+
+static void emac_adjust_link(struct net_device *dev)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
+ u32 ctrl;
+
+ if (phydev->link) {
+ ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
+
+ /* Update duplex and speed from PHY */
+
+ FIELD_MODIFY(MREGBIT_FULL_DUPLEX_MODE, &ctrl,
+ phydev->duplex == DUPLEX_FULL);
+
+ ctrl &= ~MREGBIT_SPEED;
+
+ switch (phydev->speed) {
+ case SPEED_1000:
+ ctrl |= MREGBIT_SPEED_1000M;
+ break;
+ case SPEED_100:
+ ctrl |= MREGBIT_SPEED_100M;
+ break;
+ case SPEED_10:
+ ctrl |= MREGBIT_SPEED_10M;
+ break;
+ default:
+ netdev_err(dev, "Unknown speed: %d\n", phydev->speed);
+ phydev->speed = SPEED_UNKNOWN;
+ break;
+ }
+
+ emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
+
+ emac_set_fc_autoneg(priv);
+ }
+
+ phy_print_status(phydev);
+}
+
+static void emac_update_delay_line(struct emac_priv *priv)
+{
+ u32 mask = 0, val = 0;
+
+ mask |= EMAC_RX_DLINE_EN;
+ mask |= EMAC_RX_DLINE_STEP_MASK | EMAC_RX_DLINE_CODE_MASK;
+ mask |= EMAC_TX_DLINE_EN;
+ mask |= EMAC_TX_DLINE_STEP_MASK | EMAC_TX_DLINE_CODE_MASK;
+
+ if (phy_interface_mode_is_rgmii(priv->phy_interface)) {
+ val |= EMAC_RX_DLINE_EN;
+ val |= FIELD_PREP(EMAC_RX_DLINE_STEP_MASK,
+ EMAC_DLINE_STEP_15P6);
+ val |= FIELD_PREP(EMAC_RX_DLINE_CODE_MASK, priv->rx_delay);
+
+ val |= EMAC_TX_DLINE_EN;
+ val |= FIELD_PREP(EMAC_TX_DLINE_STEP_MASK,
+ EMAC_DLINE_STEP_15P6);
+ val |= FIELD_PREP(EMAC_TX_DLINE_CODE_MASK, priv->tx_delay);
+ }
+
+ regmap_update_bits(priv->regmap_apmu,
+ priv->regmap_apmu_offset + APMU_EMAC_DLINE_REG,
+ mask, val);
+}
+
+static int emac_phy_connect(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct device *dev = &priv->pdev->dev;
+ struct phy_device *phydev;
+ struct device_node *np;
+ int ret;
+
+ ret = of_get_phy_mode(dev->of_node, &priv->phy_interface);
+ if (ret) {
+ netdev_err(ndev, "No phy-mode found");
+ return ret;
+ }
+
+ switch (priv->phy_interface) {
+ case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ break;
+ default:
+ netdev_err(ndev, "Unsupported PHY interface %s",
+ phy_modes(priv->phy_interface));
+ return -EINVAL;
+ }
+
+ np = of_parse_phandle(dev->of_node, "phy-handle", 0);
+ if (!np && of_phy_is_fixed_link(dev->of_node))
+ np = of_node_get(dev->of_node);
+
+ if (!np) {
+ netdev_err(ndev, "No PHY specified");
+ return -ENODEV;
+ }
+
+ ret = emac_phy_interface_config(priv);
+ if (ret)
+ goto err_node_put;
+
+ phydev = of_phy_connect(ndev, np, &emac_adjust_link, 0,
+ priv->phy_interface);
+ if (!phydev) {
+ netdev_err(ndev, "Could not attach to PHY\n");
+ ret = -ENODEV;
+ goto err_node_put;
+ }
+
+ phy_support_asym_pause(phydev);
+
+ phydev->mac_managed_pm = true;
+
+ emac_update_delay_line(priv);
+
+err_node_put:
+ of_node_put(np);
+ return ret;
+}
+
+static int emac_up(struct emac_priv *priv)
+{
+ struct platform_device *pdev = priv->pdev;
+ struct net_device *ndev = priv->ndev;
+ int ret;
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ ret = emac_phy_connect(ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "emac_phy_connect failed\n");
+ goto err_pm_put;
+ }
+
+ emac_init_hw(priv);
+
+ emac_set_mac_addr(priv, ndev->dev_addr);
+ emac_configure_tx(priv);
+ emac_configure_rx(priv);
+
+ emac_alloc_rx_desc_buffers(priv);
+
+ phy_start(ndev->phydev);
+
+ ret = request_irq(priv->irq, emac_interrupt_handler, IRQF_SHARED,
+ ndev->name, ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "request_irq failed\n");
+ goto err_reset_disconnect_phy;
+ }
+
+ /* Don't enable MAC interrupts */
+ emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
+
+ /* Enable DMA interrupts */
+ emac_wr(priv, DMA_INTERRUPT_ENABLE,
+ MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE |
+ MREGBIT_TRANSMIT_DMA_STOPPED_INTR_ENABLE |
+ MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
+ MREGBIT_RECEIVE_DMA_STOPPED_INTR_ENABLE |
+ MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE);
+
+ napi_enable(&priv->napi);
+
+ netif_start_queue(ndev);
+
+ mod_timer(&priv->stats_timer, jiffies);
+
+ return 0;
+
+err_reset_disconnect_phy:
+ emac_reset_hw(priv);
+ phy_disconnect(ndev->phydev);
+
+err_pm_put:
+ pm_runtime_put_sync(&pdev->dev);
+ return ret;
+}
+
+static int emac_down(struct emac_priv *priv)
+{
+ struct platform_device *pdev = priv->pdev;
+ struct net_device *ndev = priv->ndev;
+
+ netif_stop_queue(ndev);
+
+ phy_disconnect(ndev->phydev);
+
+ emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
+ emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0);
+
+ free_irq(priv->irq, ndev);
+
+ napi_disable(&priv->napi);
+
+ timer_delete_sync(&priv->txtimer);
+ cancel_work_sync(&priv->tx_timeout_task);
+
+ timer_delete_sync(&priv->stats_timer);
+
+ emac_reset_hw(priv);
+
+ /* Update and save current stats, see emac_stats_update() for usage */
+
+ spin_lock_bh(&priv->stats_lock);
+
+ emac_stats_update(priv);
+
+ priv->tx_stats_off = priv->tx_stats;
+ priv->rx_stats_off = priv->rx_stats;
+
+ spin_unlock_bh(&priv->stats_lock);
+
+ pm_runtime_put_sync(&pdev->dev);
+ return 0;
+}
+
+/* Called when net interface is brought up. */
+static int emac_open(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct device *dev = &priv->pdev->dev;
+ int ret;
+
+ ret = emac_alloc_tx_resources(priv);
+ if (ret) {
+ dev_err(dev, "Cannot allocate TX resources\n");
+ return ret;
+ }
+
+ ret = emac_alloc_rx_resources(priv);
+ if (ret) {
+ dev_err(dev, "Cannot allocate RX resources\n");
+ goto err_free_tx;
+ }
+
+ ret = emac_up(priv);
+ if (ret) {
+ dev_err(dev, "Error when bringing interface up\n");
+ goto err_free_rx;
+ }
+ return 0;
+
+err_free_rx:
+ emac_free_rx_resources(priv);
+err_free_tx:
+ emac_free_tx_resources(priv);
+
+ return ret;
+}
+
+/* Called when interface is brought down. */
+static int emac_stop(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ emac_down(priv);
+ emac_free_tx_resources(priv);
+ emac_free_rx_resources(priv);
+
+ return 0;
+}
+
+static const struct ethtool_ops emac_ethtool_ops = {
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_drvinfo = emac_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+
+ .get_regs = emac_ethtool_get_regs,
+ .get_regs_len = emac_ethtool_get_regs_len,
+
+ .get_rmon_stats = emac_get_rmon_stats,
+ .get_pause_stats = emac_get_pause_stats,
+ .get_eth_mac_stats = emac_get_eth_mac_stats,
+
+ .get_sset_count = emac_get_sset_count,
+ .get_strings = emac_get_strings,
+ .get_ethtool_stats = emac_get_ethtool_stats,
+
+ .get_pauseparam = emac_get_pauseparam,
+ .set_pauseparam = emac_set_pauseparam,
+};
+
+static const struct net_device_ops emac_netdev_ops = {
+ .ndo_open = emac_open,
+ .ndo_stop = emac_stop,
+ .ndo_start_xmit = emac_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = emac_set_mac_address,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
+ .ndo_change_mtu = emac_change_mtu,
+ .ndo_tx_timeout = emac_tx_timeout,
+ .ndo_set_rx_mode = emac_set_rx_mode,
+ .ndo_get_stats64 = emac_get_stats64,
+};
+
+/* Currently we always use 15.6 ps/step for the delay line */
+
+static u32 delay_ps_to_unit(u32 ps)
+{
+ return DIV_ROUND_CLOSEST(ps * 10, 156);
+}
+
+static u32 delay_unit_to_ps(u32 unit)
+{
+ return DIV_ROUND_CLOSEST(unit * 156, 10);
+}
+
+#define EMAC_MAX_DELAY_UNIT FIELD_MAX(EMAC_TX_DLINE_CODE_MASK)
+
+/* Minus one just to be safe from rounding errors */
+#define EMAC_MAX_DELAY_PS (delay_unit_to_ps(EMAC_MAX_DELAY_UNIT - 1))
+
+static int emac_config_dt(struct platform_device *pdev, struct emac_priv *priv)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ u8 mac_addr[ETH_ALEN] = { 0 };
+ int ret;
+
+ priv->iobase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->iobase))
+ return dev_err_probe(dev, PTR_ERR(priv->iobase),
+ "ioremap failed\n");
+
+ priv->regmap_apmu =
+ syscon_regmap_lookup_by_phandle_args(np, "spacemit,apmu", 1,
+ &priv->regmap_apmu_offset);
+
+ if (IS_ERR(priv->regmap_apmu))
+ return dev_err_probe(dev, PTR_ERR(priv->regmap_apmu),
+ "failed to get syscon\n");
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq < 0)
+ return priv->irq;
+
+ ret = of_get_mac_address(np, mac_addr);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
+ return dev_err_probe(dev, ret,
+ "Can't get MAC address\n");
+
+ dev_info(&pdev->dev, "Using random MAC address\n");
+ eth_hw_addr_random(priv->ndev);
+ } else {
+ eth_hw_addr_set(priv->ndev, mac_addr);
+ }
+
+ priv->tx_delay = 0;
+ priv->rx_delay = 0;
+
+ of_property_read_u32(np, "tx-internal-delay-ps", &priv->tx_delay);
+ of_property_read_u32(np, "rx-internal-delay-ps", &priv->rx_delay);
+
+ if (priv->tx_delay > EMAC_MAX_DELAY_PS) {
+ dev_err(&pdev->dev,
+ "tx-internal-delay-ps too large: max %d, got %d",
+ EMAC_MAX_DELAY_PS, priv->tx_delay);
+ return -EINVAL;
+ }
+
+ if (priv->rx_delay > EMAC_MAX_DELAY_PS) {
+ dev_err(&pdev->dev,
+ "rx-internal-delay-ps too large: max %d, got %d",
+ EMAC_MAX_DELAY_PS, priv->rx_delay);
+ return -EINVAL;
+ }
+
+ priv->tx_delay = delay_ps_to_unit(priv->tx_delay);
+ priv->rx_delay = delay_ps_to_unit(priv->rx_delay);
+
+ return 0;
+}
+
+static void emac_phy_deregister_fixed_link(void *data)
+{
+ struct device_node *of_node = data;
+
+ of_phy_deregister_fixed_link(of_node);
+}
+
+static int emac_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct reset_control *reset;
+ struct net_device *ndev;
+ struct emac_priv *priv;
+ int ret;
+
+ ndev = devm_alloc_etherdev(dev, sizeof(struct emac_priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ ndev->hw_features = NETIF_F_SG;
+ ndev->features |= ndev->hw_features;
+
+ ndev->max_mtu = EMAC_RX_BUF_4K - (ETH_HLEN + ETH_FCS_LEN);
+ ndev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
+
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+ priv->pdev = pdev;
+ platform_set_drvdata(pdev, priv);
+
+ ret = emac_config_dt(pdev, priv);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Configuration failed\n");
+
+ ndev->watchdog_timeo = 5 * HZ;
+ ndev->base_addr = (unsigned long)priv->iobase;
+ ndev->irq = priv->irq;
+
+ ndev->ethtool_ops = &emac_ethtool_ops;
+ ndev->netdev_ops = &emac_netdev_ops;
+
+ devm_pm_runtime_enable(&pdev->dev);
+
+ priv->bus_clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(priv->bus_clk))
+ return dev_err_probe(dev, PTR_ERR(priv->bus_clk),
+ "Failed to get clock\n");
+
+ reset = devm_reset_control_get_optional_exclusive_deasserted(&pdev->dev,
+ NULL);
+ if (IS_ERR(reset))
+ return dev_err_probe(dev, PTR_ERR(reset),
+ "Failed to get reset\n");
+
+ if (of_phy_is_fixed_link(dev->of_node)) {
+ ret = of_phy_register_fixed_link(dev->of_node);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register fixed-link\n");
+
+ ret = devm_add_action_or_reset(dev,
+ emac_phy_deregister_fixed_link,
+ dev->of_node);
+
+ if (ret) {
+ dev_err(dev, "devm_add_action_or_reset failed\n");
+ return ret;
+ }
+ }
+
+ emac_sw_init(priv);
+
+ ret = emac_mdio_init(priv);
+ if (ret)
+ goto err_timer_delete;
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ ret = devm_register_netdev(dev, ndev);
+ if (ret) {
+ dev_err(dev, "devm_register_netdev failed\n");
+ goto err_timer_delete;
+ }
+
+ netif_napi_add(ndev, &priv->napi, emac_rx_poll);
+ netif_carrier_off(ndev);
+
+ return 0;
+
+err_timer_delete:
+ timer_delete_sync(&priv->txtimer);
+ timer_delete_sync(&priv->stats_timer);
+
+ return ret;
+}
+
+static void emac_remove(struct platform_device *pdev)
+{
+ struct emac_priv *priv = platform_get_drvdata(pdev);
+
+ timer_shutdown_sync(&priv->txtimer);
+ cancel_work_sync(&priv->tx_timeout_task);
+
+ timer_shutdown_sync(&priv->stats_timer);
+
+ emac_reset_hw(priv);
+}
+
+static int emac_resume(struct device *dev)
+{
+ struct emac_priv *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->ndev;
+ int ret;
+
+ ret = clk_prepare_enable(priv->bus_clk);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable bus clock: %d\n", ret);
+ return ret;
+ }
+
+ if (!netif_running(ndev))
+ return 0;
+
+ ret = emac_open(ndev);
+ if (ret) {
+ clk_disable_unprepare(priv->bus_clk);
+ return ret;
+ }
+
+ netif_device_attach(ndev);
+
+ mod_timer(&priv->stats_timer, jiffies);
+
+ return 0;
+}
+
+static int emac_suspend(struct device *dev)
+{
+ struct emac_priv *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->ndev;
+
+ if (!ndev || !netif_running(ndev)) {
+ clk_disable_unprepare(priv->bus_clk);
+ return 0;
+ }
+
+ emac_stop(ndev);
+
+ clk_disable_unprepare(priv->bus_clk);
+ netif_device_detach(ndev);
+ return 0;
+}
+
+static const struct dev_pm_ops emac_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(emac_suspend, emac_resume)
+};
+
+static const struct of_device_id emac_of_match[] = {
+ { .compatible = "spacemit,k1-emac" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, emac_of_match);
+
+static struct platform_driver emac_driver = {
+ .probe = emac_probe,
+ .remove = emac_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(emac_of_match),
+ .pm = &emac_pm_ops,
+ },
+};
+module_platform_driver(emac_driver);
+
+MODULE_DESCRIPTION("SpacemiT K1 Ethernet driver");
+MODULE_AUTHOR("Vivian Wang <wangruikang@iscas.ac.cn>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/spacemit/k1_emac.h b/drivers/net/ethernet/spacemit/k1_emac.h
new file mode 100644
index 000000000000..577efe66573e
--- /dev/null
+++ b/drivers/net/ethernet/spacemit/k1_emac.h
@@ -0,0 +1,416 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SpacemiT K1 Ethernet hardware definitions
+ *
+ * Copyright (C) 2023-2025 SpacemiT (Hangzhou) Technology Co. Ltd
+ * Copyright (C) 2025 Vivian Wang <wangruikang@iscas.ac.cn>
+ */
+
+#ifndef _K1_EMAC_H_
+#define _K1_EMAC_H_
+
+#include <linux/stddef.h>
+
+/* APMU syscon registers */
+
+#define APMU_EMAC_CTRL_REG 0x0
+
+#define PHY_INTF_RGMII BIT(2)
+
+/*
+ * Only valid for RMII mode
+ * 0: Ref clock from External PHY
+ * 1: Ref clock from SoC
+ */
+#define REF_CLK_SEL BIT(3)
+
+/*
+ * Function clock select
+ * 0: 208 MHz
+ * 1: 312 MHz
+ */
+#define FUNC_CLK_SEL BIT(4)
+
+/* Only valid for RMII, invert TX clk */
+#define RMII_TX_CLK_SEL BIT(6)
+
+/* Only valid for RMII, invert RX clk */
+#define RMII_RX_CLK_SEL BIT(7)
+
+/*
+ * Only valid for RGMII
+ * 0: TX clk from RX clk
+ * 1: TX clk from SoC
+ */
+#define RGMII_TX_CLK_SEL BIT(8)
+
+#define PHY_IRQ_EN BIT(12)
+#define AXI_SINGLE_ID BIT(13)
+
+#define APMU_EMAC_DLINE_REG 0x4
+
+#define EMAC_RX_DLINE_EN BIT(0)
+#define EMAC_RX_DLINE_STEP_MASK GENMASK(5, 4)
+#define EMAC_RX_DLINE_CODE_MASK GENMASK(15, 8)
+
+#define EMAC_TX_DLINE_EN BIT(16)
+#define EMAC_TX_DLINE_STEP_MASK GENMASK(21, 20)
+#define EMAC_TX_DLINE_CODE_MASK GENMASK(31, 24)
+
+#define EMAC_DLINE_STEP_15P6 0 /* 15.6 ps/step */
+#define EMAC_DLINE_STEP_24P4 1 /* 24.4 ps/step */
+#define EMAC_DLINE_STEP_29P7 2 /* 29.7 ps/step */
+#define EMAC_DLINE_STEP_35P1 3 /* 35.1 ps/step */
+
+/* DMA register set */
+#define DMA_CONFIGURATION 0x0000
+#define DMA_CONTROL 0x0004
+#define DMA_STATUS_IRQ 0x0008
+#define DMA_INTERRUPT_ENABLE 0x000c
+
+#define DMA_TRANSMIT_AUTO_POLL_COUNTER 0x0010
+#define DMA_TRANSMIT_POLL_DEMAND 0x0014
+#define DMA_RECEIVE_POLL_DEMAND 0x0018
+
+#define DMA_TRANSMIT_BASE_ADDRESS 0x001c
+#define DMA_RECEIVE_BASE_ADDRESS 0x0020
+#define DMA_MISSED_FRAME_COUNTER 0x0024
+#define DMA_STOP_FLUSH_COUNTER 0x0028
+
+#define DMA_RECEIVE_IRQ_MITIGATION_CTRL 0x002c
+
+#define DMA_CURRENT_TRANSMIT_DESCRIPTOR_POINTER 0x0030
+#define DMA_CURRENT_TRANSMIT_BUFFER_POINTER 0x0034
+#define DMA_CURRENT_RECEIVE_DESCRIPTOR_POINTER 0x0038
+#define DMA_CURRENT_RECEIVE_BUFFER_POINTER 0x003c
+
+/* MAC Register set */
+#define MAC_GLOBAL_CONTROL 0x0100
+#define MAC_TRANSMIT_CONTROL 0x0104
+#define MAC_RECEIVE_CONTROL 0x0108
+#define MAC_MAXIMUM_FRAME_SIZE 0x010c
+#define MAC_TRANSMIT_JABBER_SIZE 0x0110
+#define MAC_RECEIVE_JABBER_SIZE 0x0114
+#define MAC_ADDRESS_CONTROL 0x0118
+#define MAC_MDIO_CLK_DIV 0x011c
+#define MAC_ADDRESS1_HIGH 0x0120
+#define MAC_ADDRESS1_MED 0x0124
+#define MAC_ADDRESS1_LOW 0x0128
+#define MAC_ADDRESS2_HIGH 0x012c
+#define MAC_ADDRESS2_MED 0x0130
+#define MAC_ADDRESS2_LOW 0x0134
+#define MAC_ADDRESS3_HIGH 0x0138
+#define MAC_ADDRESS3_MED 0x013c
+#define MAC_ADDRESS3_LOW 0x0140
+#define MAC_ADDRESS4_HIGH 0x0144
+#define MAC_ADDRESS4_MED 0x0148
+#define MAC_ADDRESS4_LOW 0x014c
+#define MAC_MULTICAST_HASH_TABLE1 0x0150
+#define MAC_MULTICAST_HASH_TABLE2 0x0154
+#define MAC_MULTICAST_HASH_TABLE3 0x0158
+#define MAC_MULTICAST_HASH_TABLE4 0x015c
+#define MAC_FC_CONTROL 0x0160
+#define MAC_FC_PAUSE_FRAME_GENERATE 0x0164
+#define MAC_FC_SOURCE_ADDRESS_HIGH 0x0168
+#define MAC_FC_SOURCE_ADDRESS_MED 0x016c
+#define MAC_FC_SOURCE_ADDRESS_LOW 0x0170
+#define MAC_FC_DESTINATION_ADDRESS_HIGH 0x0174
+#define MAC_FC_DESTINATION_ADDRESS_MED 0x0178
+#define MAC_FC_DESTINATION_ADDRESS_LOW 0x017c
+#define MAC_FC_PAUSE_TIME_VALUE 0x0180
+#define MAC_FC_HIGH_PAUSE_TIME 0x0184
+#define MAC_FC_LOW_PAUSE_TIME 0x0188
+#define MAC_FC_PAUSE_HIGH_THRESHOLD 0x018c
+#define MAC_FC_PAUSE_LOW_THRESHOLD 0x0190
+#define MAC_MDIO_CONTROL 0x01a0
+#define MAC_MDIO_DATA 0x01a4
+#define MAC_RX_STATCTR_CONTROL 0x01a8
+#define MAC_RX_STATCTR_DATA_HIGH 0x01ac
+#define MAC_RX_STATCTR_DATA_LOW 0x01b0
+#define MAC_TX_STATCTR_CONTROL 0x01b4
+#define MAC_TX_STATCTR_DATA_HIGH 0x01b8
+#define MAC_TX_STATCTR_DATA_LOW 0x01bc
+#define MAC_TRANSMIT_FIFO_ALMOST_FULL 0x01c0
+#define MAC_TRANSMIT_PACKET_START_THRESHOLD 0x01c4
+#define MAC_RECEIVE_PACKET_START_THRESHOLD 0x01c8
+#define MAC_STATUS_IRQ 0x01e0
+#define MAC_INTERRUPT_ENABLE 0x01e4
+
+/* Used for register dump */
+#define EMAC_DMA_REG_CNT 16
+#define EMAC_MAC_REG_CNT 124
+
+/* DMA_CONFIGURATION (0x0000) */
+
+/*
+ * 0-DMA controller in normal operation mode,
+ * 1-DMA controller reset to default state,
+ * clearing all internal state information
+ */
+#define MREGBIT_SOFTWARE_RESET BIT(0)
+
+#define MREGBIT_BURST_1WORD BIT(1)
+#define MREGBIT_BURST_2WORD BIT(2)
+#define MREGBIT_BURST_4WORD BIT(3)
+#define MREGBIT_BURST_8WORD BIT(4)
+#define MREGBIT_BURST_16WORD BIT(5)
+#define MREGBIT_BURST_32WORD BIT(6)
+#define MREGBIT_BURST_64WORD BIT(7)
+#define MREGBIT_BURST_LENGTH GENMASK(7, 1)
+#define MREGBIT_DESCRIPTOR_SKIP_LENGTH GENMASK(12, 8)
+
+/* For Receive and Transmit DMA operate in Big-Endian mode for Descriptors. */
+#define MREGBIT_DESCRIPTOR_BYTE_ORDERING BIT(13)
+
+#define MREGBIT_BIG_LITLE_ENDIAN BIT(14)
+#define MREGBIT_TX_RX_ARBITRATION BIT(15)
+#define MREGBIT_WAIT_FOR_DONE BIT(16)
+#define MREGBIT_STRICT_BURST BIT(17)
+#define MREGBIT_DMA_64BIT_MODE BIT(18)
+
+/* DMA_CONTROL (0x0004) */
+#define MREGBIT_START_STOP_TRANSMIT_DMA BIT(0)
+#define MREGBIT_START_STOP_RECEIVE_DMA BIT(1)
+
+/* DMA_STATUS_IRQ (0x0008) */
+#define MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ BIT(0)
+#define MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ BIT(1)
+#define MREGBIT_TRANSMIT_DMA_STOPPED_IRQ BIT(2)
+#define MREGBIT_RECEIVE_TRANSFER_DONE_IRQ BIT(4)
+#define MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ BIT(5)
+#define MREGBIT_RECEIVE_DMA_STOPPED_IRQ BIT(6)
+#define MREGBIT_RECEIVE_MISSED_FRAME_IRQ BIT(7)
+#define MREGBIT_MAC_IRQ BIT(8)
+#define MREGBIT_TRANSMIT_DMA_STATE GENMASK(18, 16)
+#define MREGBIT_RECEIVE_DMA_STATE GENMASK(23, 20)
+
+/* DMA_INTERRUPT_ENABLE (0x000c) */
+#define MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE BIT(0)
+#define MREGBIT_TRANSMIT_DES_UNAVAILABLE_INTR_ENABLE BIT(1)
+#define MREGBIT_TRANSMIT_DMA_STOPPED_INTR_ENABLE BIT(2)
+#define MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE BIT(4)
+#define MREGBIT_RECEIVE_DES_UNAVAILABLE_INTR_ENABLE BIT(5)
+#define MREGBIT_RECEIVE_DMA_STOPPED_INTR_ENABLE BIT(6)
+#define MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE BIT(7)
+#define MREGBIT_MAC_INTR_ENABLE BIT(8)
+
+/* DMA_RECEIVE_IRQ_MITIGATION_CTRL (0x002c) */
+#define MREGBIT_RECEIVE_IRQ_FRAME_COUNTER_MASK GENMASK(7, 0)
+#define MREGBIT_RECEIVE_IRQ_TIMEOUT_COUNTER_MASK GENMASK(27, 8)
+#define MREGBIT_RECEIVE_IRQ_FRAME_COUNTER_MODE BIT(30)
+#define MREGBIT_RECEIVE_IRQ_MITIGATION_ENABLE BIT(31)
+
+/* MAC_GLOBAL_CONTROL (0x0100) */
+#define MREGBIT_SPEED GENMASK(1, 0)
+#define MREGBIT_SPEED_10M 0x0
+#define MREGBIT_SPEED_100M BIT(0)
+#define MREGBIT_SPEED_1000M BIT(1)
+#define MREGBIT_FULL_DUPLEX_MODE BIT(2)
+#define MREGBIT_RESET_RX_STAT_COUNTERS BIT(3)
+#define MREGBIT_RESET_TX_STAT_COUNTERS BIT(4)
+#define MREGBIT_UNICAST_WAKEUP_MODE BIT(8)
+#define MREGBIT_MAGIC_PACKET_WAKEUP_MODE BIT(9)
+
+/* MAC_TRANSMIT_CONTROL (0x0104) */
+#define MREGBIT_TRANSMIT_ENABLE BIT(0)
+#define MREGBIT_INVERT_FCS BIT(1)
+#define MREGBIT_DISABLE_FCS_INSERT BIT(2)
+#define MREGBIT_TRANSMIT_AUTO_RETRY BIT(3)
+#define MREGBIT_IFG_LEN GENMASK(6, 4)
+#define MREGBIT_PREAMBLE_LENGTH GENMASK(9, 7)
+
+/* MAC_RECEIVE_CONTROL (0x0108) */
+#define MREGBIT_RECEIVE_ENABLE BIT(0)
+#define MREGBIT_DISABLE_FCS_CHECK BIT(1)
+#define MREGBIT_STRIP_FCS BIT(2)
+#define MREGBIT_STORE_FORWARD BIT(3)
+#define MREGBIT_STATUS_FIRST BIT(4)
+#define MREGBIT_PASS_BAD_FRAMES BIT(5)
+#define MREGBIT_ACOOUNT_VLAN BIT(6)
+
+/* MAC_MAXIMUM_FRAME_SIZE (0x010c) */
+#define MREGBIT_MAX_FRAME_SIZE GENMASK(13, 0)
+
+/* MAC_TRANSMIT_JABBER_SIZE (0x0110) */
+#define MREGBIT_TRANSMIT_JABBER_SIZE GENMASK(15, 0)
+
+/* MAC_RECEIVE_JABBER_SIZE (0x0114) */
+#define MREGBIT_RECEIVE_JABBER_SIZE GENMASK(15, 0)
+
+/* MAC_ADDRESS_CONTROL (0x0118) */
+#define MREGBIT_MAC_ADDRESS1_ENABLE BIT(0)
+#define MREGBIT_MAC_ADDRESS2_ENABLE BIT(1)
+#define MREGBIT_MAC_ADDRESS3_ENABLE BIT(2)
+#define MREGBIT_MAC_ADDRESS4_ENABLE BIT(3)
+#define MREGBIT_INVERSE_MAC_ADDRESS1_ENABLE BIT(4)
+#define MREGBIT_INVERSE_MAC_ADDRESS2_ENABLE BIT(5)
+#define MREGBIT_INVERSE_MAC_ADDRESS3_ENABLE BIT(6)
+#define MREGBIT_INVERSE_MAC_ADDRESS4_ENABLE BIT(7)
+#define MREGBIT_PROMISCUOUS_MODE BIT(8)
+
+/* MAC_FC_CONTROL (0x0160) */
+#define MREGBIT_FC_DECODE_ENABLE BIT(0)
+#define MREGBIT_FC_GENERATION_ENABLE BIT(1)
+#define MREGBIT_AUTO_FC_GENERATION_ENABLE BIT(2)
+#define MREGBIT_MULTICAST_MODE BIT(3)
+#define MREGBIT_BLOCK_PAUSE_FRAMES BIT(4)
+
+/* MAC_FC_PAUSE_FRAME_GENERATE (0x0164) */
+#define MREGBIT_GENERATE_PAUSE_FRAME BIT(0)
+
+/* MAC_FC_PAUSE_TIME_VALUE (0x0180) */
+#define MREGBIT_MAC_FC_PAUSE_TIME GENMASK(15, 0)
+
+/* MAC_MDIO_CONTROL (0x01a0) */
+#define MREGBIT_PHY_ADDRESS GENMASK(4, 0)
+#define MREGBIT_REGISTER_ADDRESS GENMASK(9, 5)
+#define MREGBIT_MDIO_READ_WRITE BIT(10)
+#define MREGBIT_START_MDIO_TRANS BIT(15)
+
+/* MAC_MDIO_DATA (0x01a4) */
+#define MREGBIT_MDIO_DATA GENMASK(15, 0)
+
+/* MAC_RX_STATCTR_CONTROL (0x01a8) */
+#define MREGBIT_RX_COUNTER_NUMBER GENMASK(4, 0)
+#define MREGBIT_START_RX_COUNTER_READ BIT(15)
+
+/* MAC_RX_STATCTR_DATA_HIGH (0x01ac) */
+#define MREGBIT_RX_STATCTR_DATA_HIGH GENMASK(15, 0)
+/* MAC_RX_STATCTR_DATA_LOW (0x01b0) */
+#define MREGBIT_RX_STATCTR_DATA_LOW GENMASK(15, 0)
+
+/* MAC_TX_STATCTR_CONTROL (0x01b4) */
+#define MREGBIT_TX_COUNTER_NUMBER GENMASK(4, 0)
+#define MREGBIT_START_TX_COUNTER_READ BIT(15)
+
+/* MAC_TX_STATCTR_DATA_HIGH (0x01b8) */
+#define MREGBIT_TX_STATCTR_DATA_HIGH GENMASK(15, 0)
+/* MAC_TX_STATCTR_DATA_LOW (0x01bc) */
+#define MREGBIT_TX_STATCTR_DATA_LOW GENMASK(15, 0)
+
+/* MAC_TRANSMIT_FIFO_ALMOST_FULL (0x01c0) */
+#define MREGBIT_TX_FIFO_AF GENMASK(13, 0)
+
+/* MAC_TRANSMIT_PACKET_START_THRESHOLD (0x01c4) */
+#define MREGBIT_TX_PACKET_START_THRESHOLD GENMASK(13, 0)
+
+/* MAC_RECEIVE_PACKET_START_THRESHOLD (0x01c8) */
+#define MREGBIT_RX_PACKET_START_THRESHOLD GENMASK(13, 0)
+
+/* MAC_STATUS_IRQ (0x01e0) */
+#define MREGBIT_MAC_UNDERRUN_IRQ BIT(0)
+#define MREGBIT_MAC_JABBER_IRQ BIT(1)
+
+/* MAC_INTERRUPT_ENABLE (0x01e4) */
+#define MREGBIT_MAC_UNDERRUN_INTERRUPT_ENABLE BIT(0)
+#define MREGBIT_JABBER_INTERRUPT_ENABLE BIT(1)
+
+/* RX DMA descriptor */
+
+#define RX_DESC_0_FRAME_PACKET_LENGTH_MASK GENMASK(13, 0)
+#define RX_DESC_0_FRAME_ALIGN_ERR BIT(14)
+#define RX_DESC_0_FRAME_RUNT BIT(15)
+#define RX_DESC_0_FRAME_ETHERNET_TYPE BIT(16)
+#define RX_DESC_0_FRAME_VLAN BIT(17)
+#define RX_DESC_0_FRAME_MULTICAST BIT(18)
+#define RX_DESC_0_FRAME_BROADCAST BIT(19)
+#define RX_DESC_0_FRAME_CRC_ERR BIT(20)
+#define RX_DESC_0_FRAME_MAX_LEN_ERR BIT(21)
+#define RX_DESC_0_FRAME_JABBER_ERR BIT(22)
+#define RX_DESC_0_FRAME_LENGTH_ERR BIT(23)
+#define RX_DESC_0_FRAME_MAC_ADDR1_MATCH BIT(24)
+#define RX_DESC_0_FRAME_MAC_ADDR2_MATCH BIT(25)
+#define RX_DESC_0_FRAME_MAC_ADDR3_MATCH BIT(26)
+#define RX_DESC_0_FRAME_MAC_ADDR4_MATCH BIT(27)
+#define RX_DESC_0_FRAME_PAUSE_CTRL BIT(28)
+#define RX_DESC_0_LAST_DESCRIPTOR BIT(29)
+#define RX_DESC_0_FIRST_DESCRIPTOR BIT(30)
+#define RX_DESC_0_OWN BIT(31)
+
+#define RX_DESC_1_BUFFER_SIZE_1_MASK GENMASK(11, 0)
+#define RX_DESC_1_BUFFER_SIZE_2_MASK GENMASK(23, 12)
+ /* [24] reserved */
+#define RX_DESC_1_SECOND_ADDRESS_CHAINED BIT(25)
+#define RX_DESC_1_END_RING BIT(26)
+ /* [29:27] reserved */
+#define RX_DESC_1_RX_TIMESTAMP BIT(30)
+#define RX_DESC_1_PTP_PKT BIT(31)
+
+/* TX DMA descriptor */
+
+ /* [29:0] unused */
+#define TX_DESC_0_TX_TIMESTAMP BIT(30)
+#define TX_DESC_0_OWN BIT(31)
+
+#define TX_DESC_1_BUFFER_SIZE_1_MASK GENMASK(11, 0)
+#define TX_DESC_1_BUFFER_SIZE_2_MASK GENMASK(23, 12)
+#define TX_DESC_1_FORCE_EOP_ERROR BIT(24)
+#define TX_DESC_1_SECOND_ADDRESS_CHAINED BIT(25)
+#define TX_DESC_1_END_RING BIT(26)
+#define TX_DESC_1_DISABLE_PADDING BIT(27)
+#define TX_DESC_1_ADD_CRC_DISABLE BIT(28)
+#define TX_DESC_1_FIRST_SEGMENT BIT(29)
+#define TX_DESC_1_LAST_SEGMENT BIT(30)
+#define TX_DESC_1_INTERRUPT_ON_COMPLETION BIT(31)
+
+struct emac_desc {
+ u32 desc0;
+ u32 desc1;
+ u32 buffer_addr_1;
+ u32 buffer_addr_2;
+};
+
+/* Keep stats in this order, index used for accessing hardware */
+
+union emac_hw_tx_stats {
+ struct individual_tx_stats {
+ u64 tx_ok_pkts;
+ u64 tx_total_pkts;
+ u64 tx_ok_bytes;
+ u64 tx_err_pkts;
+ u64 tx_singleclsn_pkts;
+ u64 tx_multiclsn_pkts;
+ u64 tx_lateclsn_pkts;
+ u64 tx_excessclsn_pkts;
+ u64 tx_unicast_pkts;
+ u64 tx_multicast_pkts;
+ u64 tx_broadcast_pkts;
+ u64 tx_pause_pkts;
+ } stats;
+
+ u64 array[sizeof(struct individual_tx_stats) / sizeof(u64)];
+};
+
+union emac_hw_rx_stats {
+ struct individual_rx_stats {
+ u64 rx_ok_pkts;
+ u64 rx_total_pkts;
+ u64 rx_crc_err_pkts;
+ u64 rx_align_err_pkts;
+ u64 rx_err_total_pkts;
+ u64 rx_ok_bytes;
+ u64 rx_total_bytes;
+ u64 rx_unicast_pkts;
+ u64 rx_multicast_pkts;
+ u64 rx_broadcast_pkts;
+ u64 rx_pause_pkts;
+ u64 rx_len_err_pkts;
+ u64 rx_len_undersize_pkts;
+ u64 rx_len_oversize_pkts;
+ u64 rx_len_fragment_pkts;
+ u64 rx_len_jabber_pkts;
+ u64 rx_64_pkts;
+ u64 rx_65_127_pkts;
+ u64 rx_128_255_pkts;
+ u64 rx_256_511_pkts;
+ u64 rx_512_1023_pkts;
+ u64 rx_1024_1518_pkts;
+ u64 rx_1519_plus_pkts;
+ u64 rx_drp_fifo_full_pkts;
+ u64 rx_truncate_fifo_full_pkts;
+ } stats;
+
+ u64 array[sizeof(struct individual_rx_stats) / sizeof(u64)];
+};
+
+#endif /* _K1_EMAC_H_ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 05cc07b8f48c..907fe2e927f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -3,12 +3,14 @@ config STMMAC_ETH
tristate "STMicroelectronics Multi-Gigabit Ethernet driver"
depends on HAS_IOMEM && HAS_DMA
depends on PTP_1588_CLOCK_OPTIONAL
+ depends on ETHTOOL_NETLINK
select MII
select PCS_XPCS
select PAGE_POOL
select PHYLINK
select CRC32
select RESET_CONTROLLER
+ select NET_DEVLINK
help
This is the driver for the Ethernet IPs built around a
Synopsys IP Core.
@@ -66,6 +68,15 @@ config DWMAC_ANARION
This selects the Anarion SoC glue layer support for the stmmac driver.
+config DWMAC_EIC7700
+ tristate "Support for Eswin eic7700 ethernet driver"
+ depends on OF && HAS_DMA && ARCH_ESWIN || COMPILE_TEST
+ help
+ This driver supports the Eswin EIC7700 Ethernet controller,
+ which integrates Synopsys DesignWare QoS features. It enables
+ high-speed networking with DMA acceleration and is optimized
+ for embedded systems.
+
config DWMAC_INGENIC
tristate "Ingenic MAC support"
default MACH_INGENIC
@@ -131,6 +142,19 @@ config DWMAC_QCOM_ETHQOS
This selects the Qualcomm ETHQOS glue layer support for the
stmmac device driver.
+config DWMAC_RENESAS_GBETH
+ tristate "Renesas RZ/V2H(P) GBETH and RZ/T2H, RZ/N2H GMAC support"
+ default ARCH_RENESAS
+ depends on OF && (ARCH_RENESAS || COMPILE_TEST)
+ select PCS_RZN1_MIIC
+ help
+ Support for Gigabit Ethernet Interface (GBETH)/ Ethernet MAC (GMAC)
+ on Renesas SoCs.
+
+ This selects Renesas SoC glue layer support for the stmmac device
+ driver. This driver is used for the RZ/V2H(P) family, RZ/T2H and
+ RZ/N2H SoCs.
+
config DWMAC_ROCKCHIP
tristate "Rockchip dwmac support"
default ARCH_ROCKCHIP
@@ -154,6 +178,18 @@ config DWMAC_RZN1
the stmmac device driver. This support can make use of a custom MII
converter PCS device.
+config DWMAC_S32
+ tristate "NXP S32G/S32R GMAC support"
+ default ARCH_S32
+ depends on OF && (ARCH_S32 || COMPILE_TEST)
+ help
+ Support for ethernet controller on NXP S32CC SOCs.
+
+ This selects NXP SoC glue layer support for the stmmac
+ device driver. This driver is used for the S32CC series
+ SOCs GMAC ethernet controller, ie. S32G2xx, S32G3xx and
+ S32R45.
+
config DWMAC_SOCFPGA
tristate "SOCFPGA dwmac support"
default ARCH_INTEL_SOCFPGA
@@ -169,6 +205,17 @@ config DWMAC_SOCFPGA
for the stmmac device driver. This driver is used for
arria5 and cyclone5 FPGA SoCs.
+config DWMAC_SOPHGO
+ tristate "Sophgo dwmac support"
+ depends on OF && (ARCH_SOPHGO || COMPILE_TEST)
+ default m if ARCH_SOPHGO
+ help
+ Support for ethernet controllers on Sophgo RISC-V SoCs
+
+ This selects the Sophgo SoC specific glue layer support
+ for the stmmac device driver. This driver is used for the
+ ethernet controllers on various Sophgo SoCs.
+
config DWMAC_STARFIVE
tristate "StarFive dwmac support"
depends on OF && (ARCH_STARFIVE || COMPILE_TEST)
@@ -228,6 +275,28 @@ config DWMAC_SUN8I
stmmac device driver. This driver is used for H3/A83T/A64
EMAC ethernet controller.
+config DWMAC_SUN55I
+ tristate "Allwinner sun55i GMAC200 support"
+ default ARCH_SUNXI
+ depends on OF && (ARCH_SUNXI || COMPILE_TEST)
+ select MDIO_BUS_MUX
+ help
+ Support for Allwinner A523/T527 GMAC200 ethernet controllers.
+
+ This selects Allwinner SoC glue layer support for the
+ stmmac device driver. This driver is used for A523/T527
+ GMAC200 ethernet controller.
+
+config DWMAC_THEAD
+ tristate "T-HEAD dwmac support"
+ depends on OF && (ARCH_THEAD || COMPILE_TEST)
+ help
+ Support for ethernet controllers on T-HEAD RISC-V SoCs
+
+ This selects the T-HEAD platform specific glue layer support for
+ the stmmac device driver. This driver is used for T-HEAD TH1520
+ ethernet controller.
+
config DWMAC_IMX8
tristate "NXP IMX8 DWMAC support"
default ARCH_MXC
@@ -280,11 +349,17 @@ config DWMAC_VISCONTI
endif
+config STMMAC_LIBPCI
+ tristate
+ help
+ This option enables the PCI bus helpers for the stmmac driver.
+
config DWMAC_INTEL
tristate "Intel GMAC support"
default X86
depends on X86 && STMMAC_ETH && PCI
depends on COMMON_CLK
+ depends on ACPI
help
This selects the Intel platform specific bus support for the
stmmac driver. This driver is used for Intel Quark/EHL/TGL.
@@ -292,16 +367,18 @@ config DWMAC_INTEL
config DWMAC_LOONGSON
tristate "Loongson PCI DWMAC support"
default MACH_LOONGSON64
- depends on (MACH_LOONGSON64 || COMPILE_TEST) && STMMAC_ETH && PCI
+ depends on (MACH_LOONGSON64 || COMPILE_TEST) && PCI
depends on COMMON_CLK
+ select STMMAC_LIBPCI
help
This selects the LOONGSON PCI bus support for the stmmac driver,
Support for ethernet controller on Loongson-2K1000 SoC and LS7A1000 bridge.
config STMMAC_PCI
tristate "STMMAC PCI bus support"
- depends on STMMAC_ETH && PCI
+ depends on PCI
depends on COMMON_CLK
+ select STMMAC_LIBPCI
help
This selects the platform specific bus support for the stmmac driver.
This driver was tested on XLINX XC2V3000 FF1152AMT0221
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 7e46dca90628..7bf528731034 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -6,28 +6,34 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \
dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \
stmmac_tc.o dwxgmac2_core.o dwxgmac2_dma.o dwxgmac2_descs.o \
- stmmac_xdp.o stmmac_est.o stmmac_fpe.o \
- $(stmmac-y)
+ stmmac_xdp.o stmmac_est.o stmmac_fpe.o stmmac_vlan.o \
+ stmmac_pcs.o $(stmmac-y)
stmmac-$(CONFIG_STMMAC_SELFTESTS) += stmmac_selftests.o
# Ordering matters. Generic driver must be last.
obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
obj-$(CONFIG_DWMAC_ANARION) += dwmac-anarion.o
+obj-$(CONFIG_DWMAC_EIC7700) += dwmac-eic7700.o
obj-$(CONFIG_DWMAC_INGENIC) += dwmac-ingenic.o
obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o
obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o
obj-$(CONFIG_DWMAC_MEDIATEK) += dwmac-mediatek.o
obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o
obj-$(CONFIG_DWMAC_QCOM_ETHQOS) += dwmac-qcom-ethqos.o
+obj-$(CONFIG_DWMAC_RENESAS_GBETH) += dwmac-renesas-gbeth.o
obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
obj-$(CONFIG_DWMAC_RZN1) += dwmac-rzn1.o
+obj-$(CONFIG_DWMAC_S32) += dwmac-s32.o
obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o
+obj-$(CONFIG_DWMAC_SOPHGO) += dwmac-sophgo.o
obj-$(CONFIG_DWMAC_STARFIVE) += dwmac-starfive.o
obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o
obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
obj-$(CONFIG_DWMAC_SUN8I) += dwmac-sun8i.o
+obj-$(CONFIG_DWMAC_SUN55I) += dwmac-sun55i.o
+obj-$(CONFIG_DWMAC_THEAD) += dwmac-thead.o
obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o
obj-$(CONFIG_DWMAC_INTEL_PLAT) += dwmac-intel-plat.o
obj-$(CONFIG_DWMAC_LOONGSON1) += dwmac-loongson1.o
@@ -38,6 +44,7 @@ obj-$(CONFIG_DWMAC_VISCONTI) += dwmac-visconti.o
stmmac-platform-objs:= stmmac_platform.o
dwmac-altr-socfpga-objs := dwmac-socfpga.o
+obj-$(CONFIG_STMMAC_LIBPCI) += stmmac_libpci.o
obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
obj-$(CONFIG_DWMAC_INTEL) += dwmac-intel.o
obj-$(CONFIG_DWMAC_LOONGSON) += dwmac-loongson.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index fb55efd52240..120a009c9992 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -83,14 +83,13 @@ static int jumbo_frm(struct stmmac_tx_queue *tx_q, struct sk_buff *skb,
return entry;
}
-static unsigned int is_jumbo_frm(int len, int enh_desc)
+static bool is_jumbo_frm(unsigned int len, bool enh_desc)
{
- unsigned int ret = 0;
+ bool ret = false;
if ((enh_desc && (len > BUF_SIZE_8KiB)) ||
- (!enh_desc && (len > BUF_SIZE_2KiB))) {
- ret = 1;
- }
+ (!enh_desc && (len > BUF_SIZE_2KiB)))
+ ret = true;
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 684489156dce..49df46be3669 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -26,6 +26,9 @@
#include "hwif.h"
#include "mmc.h"
+#define DWMAC_SNPSVER GENMASK_U32(7, 0)
+#define DWMAC_USERVER GENMASK_U32(15, 8)
+
/* Synopsys Core versions */
#define DWMAC_CORE_3_40 0x34
#define DWMAC_CORE_3_50 0x35
@@ -43,6 +46,11 @@
#define DWXGMAC_ID 0x76
#define DWXLGMAC_ID 0x27
+static inline bool dwmac_is_xmac(enum dwmac_core_type core_type)
+{
+ return core_type == DWMAC_CORE_GMAC4 || core_type == DWMAC_CORE_XGMAC;
+}
+
#define STMMAC_CHAN0 0 /* Always supported and default for all chips */
/* TX and RX Descriptor Length, these need to be power of two.
@@ -101,8 +109,8 @@ struct stmmac_rxq_stats {
/* Updates on each CPU protected by not allowing nested irqs. */
struct stmmac_pcpu_stats {
struct u64_stats_sync syncp;
- u64_stats_t rx_normal_irq_n[MTL_MAX_TX_QUEUES];
- u64_stats_t tx_normal_irq_n[MTL_MAX_RX_QUEUES];
+ u64_stats_t rx_normal_irq_n[MTL_MAX_RX_QUEUES];
+ u64_stats_t tx_normal_irq_n[MTL_MAX_TX_QUEUES];
};
/* Extra statistic and debug information exposed by ethtool */
@@ -192,9 +200,6 @@ struct stmmac_extra_stats {
unsigned long irq_pcs_ane_n;
unsigned long irq_pcs_link_n;
unsigned long irq_rgmii_n;
- unsigned long pcs_link;
- unsigned long pcs_duplex;
- unsigned long pcs_speed;
/* debug register */
unsigned long mtl_tx_status_fifo_full;
unsigned long mtl_tx_fifo_not_empty;
@@ -228,6 +233,7 @@ struct stmmac_extra_stats {
unsigned long mtl_est_btrlm;
unsigned long max_sdu_txq_drop[MTL_MAX_TX_QUEUES];
unsigned long mtl_est_txq_hlbf[MTL_MAX_TX_QUEUES];
+ unsigned long mtl_est_txq_hlbs[MTL_MAX_TX_QUEUES];
/* per queue statistics */
struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES];
struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES];
@@ -257,6 +263,8 @@ struct stmmac_safety_stats {
#define CSR_F_150M 150000000
#define CSR_F_250M 250000000
#define CSR_F_300M 300000000
+#define CSR_F_500M 500000000
+#define CSR_F_800M 800000000
#define MAC_CSR_H_FRQ_MASK 0x20
@@ -270,7 +278,6 @@ struct stmmac_safety_stats {
#define FLOW_AUTO (FLOW_TX | FLOW_RX)
/* PCS defines */
-#define STMMAC_PCS_RGMII (1 << 0)
#define STMMAC_PCS_SGMII (1 << 1)
#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
@@ -306,6 +313,16 @@ struct stmmac_safety_stats {
#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY iface */
#define DEFAULT_DMA_PBL 8
+/* phy_intf_sel_i and ACTPHYIF encodings */
+#define PHY_INTF_SEL_GMII_MII 0
+#define PHY_INTF_SEL_RGMII 1
+#define PHY_INTF_SEL_SGMII 2
+#define PHY_INTF_SEL_TBI 3
+#define PHY_INTF_SEL_RMII 4
+#define PHY_INTF_SEL_RTBI 5
+#define PHY_INTF_SEL_SMII 6
+#define PHY_INTF_SEL_REVMII 7
+
/* MSI defines */
#define STMMAC_MSI_VEC_MAX 32
@@ -394,17 +411,6 @@ enum request_irq_err {
#define CORE_IRQ_MTL_RX_OVERFLOW BIT(8)
-/* Physical Coding Sublayer */
-struct rgmii_adv {
- unsigned int pause;
- unsigned int duplex;
- unsigned int lp_pause;
- unsigned int lp_duplex;
-};
-
-#define STMMAC_PCS_PAUSE 1
-#define STMMAC_PCS_ASYM_PAUSE 2
-
/* DMA HW capabilities */
struct dma_features {
unsigned int mbps_10_100;
@@ -528,6 +534,33 @@ struct dma_features {
#define STMMAC_DEFAULT_TWT_LS 0x1E
#define STMMAC_ET_MAX 0xFFFFF
+/* Common LPI register bits */
+#define LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable, gmac4, xgmac2 only */
+#define LPI_CTRL_STATUS_LPIATE BIT(20) /* LPI Timer Enable, gmac4 only */
+#define LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */
+#define LPI_CTRL_STATUS_PLSEN BIT(18) /* Enable PHY Link Status */
+#define LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */
+#define LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */
+#define LPI_CTRL_STATUS_RLPIST BIT(9) /* Receive LPI state, gmac1000 only? */
+#define LPI_CTRL_STATUS_TLPIST BIT(8) /* Transmit LPI state, gmac1000 only? */
+#define LPI_CTRL_STATUS_RLPIEX BIT(3) /* Receive LPI Exit */
+#define LPI_CTRL_STATUS_RLPIEN BIT(2) /* Receive LPI Entry */
+#define LPI_CTRL_STATUS_TLPIEX BIT(1) /* Transmit LPI Exit */
+#define LPI_CTRL_STATUS_TLPIEN BIT(0) /* Transmit LPI Entry */
+
+/* Common definitions for AXI Master Bus Mode */
+#define DMA_AXI_AAL BIT(12)
+#define DMA_AXI_BLEN256 BIT(7)
+#define DMA_AXI_BLEN128 BIT(6)
+#define DMA_AXI_BLEN64 BIT(5)
+#define DMA_AXI_BLEN32 BIT(4)
+#define DMA_AXI_BLEN16 BIT(3)
+#define DMA_AXI_BLEN8 BIT(2)
+#define DMA_AXI_BLEN4 BIT(1)
+#define DMA_AXI_BLEN_MASK GENMASK(7, 1)
+
+void stmmac_axi_blen_to_mask(u32 *regval, const u32 *blen, size_t len);
+
#define STMMAC_CHAIN_MODE 0x1
#define STMMAC_RING_MODE 0x2
@@ -543,14 +576,8 @@ struct dma_features {
#define STMMAC_VLAN_INSERT 0x2
#define STMMAC_VLAN_REPLACE 0x3
-extern const struct stmmac_desc_ops enh_desc_ops;
-extern const struct stmmac_desc_ops ndesc_ops;
-
struct mac_device_info;
-extern const struct stmmac_hwtimestamp stmmac_ptp;
-extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
-
struct mac_link {
u32 caps;
u32 speed_mask;
@@ -592,6 +619,7 @@ struct mac_device_info {
const struct stmmac_tc_ops *tc;
const struct stmmac_mmc_ops *mmc;
const struct stmmac_est_ops *est;
+ const struct stmmac_vlan_ops *vlan;
struct dw_xpcs *xpcs;
struct phylink_pcs *phylink_pcs;
struct mii_regs mii; /* MII register Addresses */
@@ -602,14 +630,18 @@ struct mac_device_info {
unsigned int mcast_bits_log2;
unsigned int rx_csum;
unsigned int pcs;
- unsigned int pmt;
- unsigned int ps;
unsigned int xlgmac;
unsigned int num_vlan;
u32 vlan_filter[32];
bool vlan_fail_q_en;
u8 vlan_fail_q;
bool hw_vlan_en;
+ bool reverse_sgmii_enable;
+
+ /* This spinlock protects read-modify-write of the interrupt
+ * mask/enable registers.
+ */
+ spinlock_t irq_ctrl_lock;
};
struct stmmac_rx_routing {
@@ -637,8 +669,4 @@ void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable);
void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
-extern const struct stmmac_mode_ops ring_mode_ops;
-extern const struct stmmac_mode_ops chain_mode_ops;
-extern const struct stmmac_desc_ops dwmac4_desc_ops;
-
#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
index ef99ef3f1ab4..5e0fc31762d9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
@@ -34,7 +34,7 @@ static void gmac_write_reg(struct anarion_gmac *gmac, uint8_t reg, uint32_t val)
writel(val, gmac->ctl_block + reg);
}
-static int anarion_gmac_init(struct platform_device *pdev, void *priv)
+static int anarion_gmac_init(struct device *dev, void *priv)
{
uint32_t sw_config;
struct anarion_gmac *gmac = priv;
@@ -52,25 +52,25 @@ static int anarion_gmac_init(struct platform_device *pdev, void *priv)
return 0;
}
-static void anarion_gmac_exit(struct platform_device *pdev, void *priv)
+static void anarion_gmac_exit(struct device *dev, void *priv)
{
struct anarion_gmac *gmac = priv;
gmac_write_reg(gmac, GMAC_RESET_CONTROL_REG, 1);
}
-static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
+static struct anarion_gmac *
+anarion_config_dt(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat)
{
struct anarion_gmac *gmac;
- phy_interface_t phy_mode;
void __iomem *ctl_block;
- int err;
ctl_block = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(ctl_block)) {
- err = PTR_ERR(ctl_block);
- dev_err(&pdev->dev, "Cannot get reset region (%d)!\n", err);
- return ERR_PTR(err);
+ dev_err(&pdev->dev, "Cannot get reset region (%pe)!\n",
+ ctl_block);
+ return ERR_CAST(ctl_block);
}
gmac = devm_kzalloc(&pdev->dev, sizeof(*gmac), GFP_KERNEL);
@@ -79,21 +79,11 @@ static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
gmac->ctl_block = ctl_block;
- err = of_get_phy_mode(pdev->dev.of_node, &phy_mode);
- if (err)
- return ERR_PTR(err);
-
- switch (phy_mode) {
- case PHY_INTERFACE_MODE_RGMII:
- fallthrough;
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (phy_interface_mode_is_rgmii(plat_dat->phy_interface)) {
gmac->phy_intf_sel = GMAC_CONFIG_INTF_RGMII;
- break;
- default:
- dev_err(&pdev->dev, "Unsupported phy-mode (%d)\n",
- phy_mode);
+ } else {
+ dev_err(&pdev->dev, "Unsupported phy-mode (%s)\n",
+ phy_modes(plat_dat->phy_interface));
return ERR_PTR(-ENOTSUPP);
}
@@ -111,20 +101,19 @@ static int anarion_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- gmac = anarion_config_dt(pdev);
- if (IS_ERR(gmac))
- return PTR_ERR(gmac);
-
plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
+ gmac = anarion_config_dt(pdev, plat_dat);
+ if (IS_ERR(gmac))
+ return PTR_ERR(gmac);
+
plat_dat->init = anarion_gmac_init;
plat_dat->exit = anarion_gmac_exit;
- anarion_gmac_init(pdev, gmac);
plat_dat->bsp_priv = gmac;
- return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
static const struct of_device_id anarion_dwmac_match[] = {
@@ -135,7 +124,6 @@ MODULE_DEVICE_TABLE(of, anarion_dwmac_match);
static struct platform_driver anarion_dwmac_driver = {
.probe = anarion_dwmac_probe,
- .remove = stmmac_pltfr_remove,
.driver = {
.name = "anarion-dwmac",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 83290e707df5..d043bad4a862 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -29,10 +29,6 @@ struct tegra_eqos {
void __iomem *regs;
struct reset_control *rst;
- struct clk *clk_master;
- struct clk *clk_slave;
- struct clk *clk_tx;
- struct clk *clk_rx;
struct gpio_desc *reset;
};
@@ -42,11 +38,11 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
{
struct device *dev = &pdev->dev;
u32 burst_map = 0;
- u32 bit_index = 0;
- u32 a_index = 0;
if (!plat_dat->axi) {
- plat_dat->axi = kzalloc(sizeof(struct stmmac_axi), GFP_KERNEL);
+ plat_dat->axi = devm_kzalloc(&pdev->dev,
+ sizeof(struct stmmac_axi),
+ GFP_KERNEL);
if (!plat_dat->axi)
return -ENOMEM;
@@ -85,33 +81,11 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
}
device_property_read_u32(dev, "snps,burst-map", &burst_map);
- /* converts burst-map bitmask to burst array */
- for (bit_index = 0; bit_index < 7; bit_index++) {
- if (burst_map & (1 << bit_index)) {
- switch (bit_index) {
- case 0:
- plat_dat->axi->axi_blen[a_index] = 4; break;
- case 1:
- plat_dat->axi->axi_blen[a_index] = 8; break;
- case 2:
- plat_dat->axi->axi_blen[a_index] = 16; break;
- case 3:
- plat_dat->axi->axi_blen[a_index] = 32; break;
- case 4:
- plat_dat->axi->axi_blen[a_index] = 64; break;
- case 5:
- plat_dat->axi->axi_blen[a_index] = 128; break;
- case 6:
- plat_dat->axi->axi_blen[a_index] = 256; break;
- default:
- break;
- }
- a_index++;
- }
- }
+ plat_dat->axi->axi_blen_regval = FIELD_PREP(DMA_AXI_BLEN_MASK,
+ burst_map);
/* dwc-qos needs GMAC4, AAL, TSO and PMT */
- plat_dat->has_gmac4 = 1;
+ plat_dat->core_type = DWMAC_CORE_GMAC4;
plat_dat->dma_cfg->aal = 1;
plat_dat->flags |= STMMAC_FLAG_TSO_EN;
plat_dat->pmt = 1;
@@ -123,49 +97,9 @@ static int dwc_qos_probe(struct platform_device *pdev,
struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *stmmac_res)
{
- int err;
-
- plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
- if (IS_ERR(plat_dat->stmmac_clk)) {
- dev_err(&pdev->dev, "apb_pclk clock not found.\n");
- return PTR_ERR(plat_dat->stmmac_clk);
- }
-
- err = clk_prepare_enable(plat_dat->stmmac_clk);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to enable apb_pclk clock: %d\n",
- err);
- return err;
- }
-
- plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
- if (IS_ERR(plat_dat->pclk)) {
- dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
- err = PTR_ERR(plat_dat->pclk);
- goto disable;
- }
-
- err = clk_prepare_enable(plat_dat->pclk);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to enable phy_ref clock: %d\n",
- err);
- goto disable;
- }
+ plat_dat->pclk = stmmac_pltfr_find_clk(plat_dat, "phy_ref_clk");
return 0;
-
-disable:
- clk_disable_unprepare(plat_dat->stmmac_clk);
- return err;
-}
-
-static void dwc_qos_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct stmmac_priv *priv = netdev_priv(ndev);
-
- clk_disable_unprepare(priv->plat->pclk);
- clk_disable_unprepare(priv->plat->stmmac_clk);
}
#define SDMEMCOMPPADCTRL 0x8800
@@ -178,35 +112,34 @@ static void dwc_qos_remove(struct platform_device *pdev)
#define AUTO_CAL_STATUS 0x880c
#define AUTO_CAL_STATUS_ACTIVE BIT(31)
-static void tegra_eqos_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static void tegra_eqos_fix_speed(void *bsp_priv, int speed, unsigned int mode)
{
- struct tegra_eqos *eqos = priv;
- unsigned long rate = 125000000;
+ struct tegra_eqos *eqos = bsp_priv;
bool needs_calibration = false;
+ struct stmmac_priv *priv;
u32 value;
int err;
switch (speed) {
case SPEED_1000:
- needs_calibration = true;
- rate = 125000000;
- break;
-
case SPEED_100:
needs_calibration = true;
- rate = 25000000;
- break;
+ fallthrough;
case SPEED_10:
- rate = 2500000;
break;
default:
- dev_err(eqos->dev, "invalid speed %u\n", speed);
+ dev_err(eqos->dev, "invalid speed %d\n", speed);
break;
}
if (needs_calibration) {
+ priv = netdev_priv(dev_get_drvdata(eqos->dev));
+
+ /* Calibration should be done with the MDIO bus idle */
+ stmmac_mdio_lock(priv);
+
/* calibrate */
value = readl(eqos->regs + SDMEMCOMPPADCTRL);
value |= SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
@@ -240,33 +173,17 @@ static void tegra_eqos_fix_speed(void *priv, unsigned int speed, unsigned int mo
value = readl(eqos->regs + SDMEMCOMPPADCTRL);
value &= ~SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
writel(value, eqos->regs + SDMEMCOMPPADCTRL);
+
+ stmmac_mdio_unlock(priv);
} else {
value = readl(eqos->regs + AUTO_CAL_CONFIG);
value &= ~AUTO_CAL_CONFIG_ENABLE;
writel(value, eqos->regs + AUTO_CAL_CONFIG);
}
-
- err = clk_set_rate(eqos->clk_tx, rate);
- if (err < 0)
- dev_err(eqos->dev, "failed to set TX rate: %d\n", err);
-}
-
-static int tegra_eqos_init(struct platform_device *pdev, void *priv)
-{
- struct tegra_eqos *eqos = priv;
- unsigned long rate;
- u32 value;
-
- rate = clk_get_rate(eqos->clk_slave);
-
- value = (rate / 1000000) - 1;
- writel(value, eqos->regs + GMAC_1US_TIC_COUNTER);
-
- return 0;
}
static int tegra_eqos_probe(struct platform_device *pdev,
- struct plat_stmmacenet_data *data,
+ struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res)
{
struct device *dev = &pdev->dev;
@@ -283,59 +200,19 @@ static int tegra_eqos_probe(struct platform_device *pdev,
if (!is_of_node(dev->fwnode))
goto bypass_clk_reset_gpio;
- eqos->clk_master = devm_clk_get(&pdev->dev, "master_bus");
- if (IS_ERR(eqos->clk_master)) {
- err = PTR_ERR(eqos->clk_master);
- goto error;
- }
-
- err = clk_prepare_enable(eqos->clk_master);
- if (err < 0)
- goto error;
-
- eqos->clk_slave = devm_clk_get(&pdev->dev, "slave_bus");
- if (IS_ERR(eqos->clk_slave)) {
- err = PTR_ERR(eqos->clk_slave);
- goto disable_master;
- }
-
- data->stmmac_clk = eqos->clk_slave;
-
- err = clk_prepare_enable(eqos->clk_slave);
- if (err < 0)
- goto disable_master;
-
- eqos->clk_rx = devm_clk_get(&pdev->dev, "rx");
- if (IS_ERR(eqos->clk_rx)) {
- err = PTR_ERR(eqos->clk_rx);
- goto disable_slave;
- }
-
- err = clk_prepare_enable(eqos->clk_rx);
- if (err < 0)
- goto disable_slave;
-
- eqos->clk_tx = devm_clk_get(&pdev->dev, "tx");
- if (IS_ERR(eqos->clk_tx)) {
- err = PTR_ERR(eqos->clk_tx);
- goto disable_rx;
- }
-
- err = clk_prepare_enable(eqos->clk_tx);
- if (err < 0)
- goto disable_rx;
+ plat_dat->clk_tx_i = stmmac_pltfr_find_clk(plat_dat, "tx");
eqos->reset = devm_gpiod_get(&pdev->dev, "phy-reset", GPIOD_OUT_HIGH);
if (IS_ERR(eqos->reset)) {
err = PTR_ERR(eqos->reset);
- goto disable_tx;
+ return err;
}
usleep_range(2000, 4000);
gpiod_set_value(eqos->reset, 0);
/* MDIO bus was already reset just above */
- data->mdio_bus_data->needs_reset = false;
+ plat_dat->mdio_bus_data->needs_reset = false;
eqos->rst = devm_reset_control_get(&pdev->dev, "eqos");
if (IS_ERR(eqos->rst)) {
@@ -356,29 +233,18 @@ static int tegra_eqos_probe(struct platform_device *pdev,
usleep_range(2000, 4000);
bypass_clk_reset_gpio:
- data->fix_mac_speed = tegra_eqos_fix_speed;
- data->init = tegra_eqos_init;
- data->bsp_priv = eqos;
- data->flags |= STMMAC_FLAG_SPH_DISABLE;
-
- err = tegra_eqos_init(pdev, eqos);
- if (err < 0)
- goto reset;
+ plat_dat->fix_mac_speed = tegra_eqos_fix_speed;
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
+ plat_dat->bsp_priv = eqos;
+ plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE |
+ STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP |
+ STMMAC_FLAG_USE_PHY_WOL;
return 0;
-reset:
- reset_control_assert(eqos->rst);
+
reset_phy:
gpiod_set_value(eqos->reset, 1);
-disable_tx:
- clk_disable_unprepare(eqos->clk_tx);
-disable_rx:
- clk_disable_unprepare(eqos->clk_rx);
-disable_slave:
- clk_disable_unprepare(eqos->clk_slave);
-disable_master:
- clk_disable_unprepare(eqos->clk_master);
-error:
+
return err;
}
@@ -388,27 +254,29 @@ static void tegra_eqos_remove(struct platform_device *pdev)
reset_control_assert(eqos->rst);
gpiod_set_value(eqos->reset, 1);
- clk_disable_unprepare(eqos->clk_tx);
- clk_disable_unprepare(eqos->clk_rx);
- clk_disable_unprepare(eqos->clk_slave);
- clk_disable_unprepare(eqos->clk_master);
}
struct dwc_eth_dwmac_data {
int (*probe)(struct platform_device *pdev,
- struct plat_stmmacenet_data *data,
+ struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res);
void (*remove)(struct platform_device *pdev);
+ const char *stmmac_clk_name;
};
static const struct dwc_eth_dwmac_data dwc_qos_data = {
.probe = dwc_qos_probe,
- .remove = dwc_qos_remove,
+ .stmmac_clk_name = "apb_pclk",
};
static const struct dwc_eth_dwmac_data tegra_eqos_data = {
.probe = tegra_eqos_probe,
.remove = tegra_eqos_remove,
+ .stmmac_clk_name = "slave_bus",
+};
+
+static const struct dwc_eth_dwmac_data fsd_eqos_data = {
+ .stmmac_clk_name = "slave_bus",
};
static int dwc_eth_dwmac_probe(struct platform_device *pdev)
@@ -439,7 +307,16 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
- ret = data->probe(pdev, plat_dat, &stmmac_res);
+ ret = devm_clk_bulk_get_all_enabled(&pdev->dev, &plat_dat->clks);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Failed to retrieve and enable all required clocks\n");
+ plat_dat->num_clks = ret;
+
+ plat_dat->stmmac_clk = stmmac_pltfr_find_clk(plat_dat,
+ data->stmmac_clk_name);
+
+ if (data->probe)
+ ret = data->probe(pdev, plat_dat, &stmmac_res);
if (ret < 0) {
dev_err_probe(&pdev->dev, ret, "failed to probe subdriver\n");
return ret;
@@ -456,7 +333,8 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
return ret;
remove:
- data->remove(pdev);
+ if (data->remove)
+ data->remove(pdev);
return ret;
}
@@ -467,12 +345,14 @@ static void dwc_eth_dwmac_remove(struct platform_device *pdev)
stmmac_dvr_remove(&pdev->dev);
- data->remove(pdev);
+ if (data->remove)
+ data->remove(pdev);
}
static const struct of_device_id dwc_eth_dwmac_match[] = {
{ .compatible = "snps,dwc-qos-ethernet-4.10", .data = &dwc_qos_data },
{ .compatible = "nvidia,tegra186-eqos", .data = &tegra_eqos_data },
+ { .compatible = "tesla,fsd-ethqos", .data = &fsd_eqos_data },
{ }
};
MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-eic7700.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-eic7700.c
new file mode 100644
index 000000000000..bcb8e000e720
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-eic7700.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Eswin DWC Ethernet linux driver
+ *
+ * Copyright 2025, Beijing ESWIN Computing Technology Co., Ltd.
+ *
+ * Authors:
+ * Zhi Li <lizhi2@eswincomputing.com>
+ * Shuang Liang <liangshuang@eswincomputing.com>
+ * Shangjuan Wei <weishangjuan@eswincomputing.com>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/pm_runtime.h>
+#include <linux/stmmac.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+
+#include "stmmac_platform.h"
+
+/* eth_phy_ctrl_offset eth0:0x100 */
+#define EIC7700_ETH_TX_CLK_SEL BIT(16)
+#define EIC7700_ETH_PHY_INTF_SELI BIT(0)
+
+/* eth_axi_lp_ctrl_offset eth0:0x108 */
+#define EIC7700_ETH_CSYSREQ_VAL BIT(0)
+
+/*
+ * TX/RX Clock Delay Bit Masks:
+ * - TX Delay: bits [14:8] — TX_CLK delay (unit: 0.1ns per bit)
+ * - RX Delay: bits [30:24] — RX_CLK delay (unit: 0.1ns per bit)
+ */
+#define EIC7700_ETH_TX_ADJ_DELAY GENMASK(14, 8)
+#define EIC7700_ETH_RX_ADJ_DELAY GENMASK(30, 24)
+
+#define EIC7700_MAX_DELAY_UNIT 0x7F
+
+static const char * const eic7700_clk_names[] = {
+ "tx", "axi", "cfg",
+};
+
+struct eic7700_qos_priv {
+ struct plat_stmmacenet_data *plat_dat;
+};
+
+static int eic7700_clks_config(void *priv, bool enabled)
+{
+ struct eic7700_qos_priv *dwc = (struct eic7700_qos_priv *)priv;
+ struct plat_stmmacenet_data *plat = dwc->plat_dat;
+ int ret = 0;
+
+ if (enabled)
+ ret = clk_bulk_prepare_enable(plat->num_clks, plat->clks);
+ else
+ clk_bulk_disable_unprepare(plat->num_clks, plat->clks);
+
+ return ret;
+}
+
+static int eic7700_dwmac_init(struct device *dev, void *priv)
+{
+ struct eic7700_qos_priv *dwc = priv;
+
+ return eic7700_clks_config(dwc, true);
+}
+
+static void eic7700_dwmac_exit(struct device *dev, void *priv)
+{
+ struct eic7700_qos_priv *dwc = priv;
+
+ eic7700_clks_config(dwc, false);
+}
+
+static int eic7700_dwmac_suspend(struct device *dev, void *priv)
+{
+ return pm_runtime_force_suspend(dev);
+}
+
+static int eic7700_dwmac_resume(struct device *dev, void *priv)
+{
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ dev_err(dev, "%s failed: %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int eic7700_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct eic7700_qos_priv *dwc_priv;
+ struct regmap *eic7700_hsp_regmap;
+ u32 eth_axi_lp_ctrl_offset;
+ u32 eth_phy_ctrl_offset;
+ u32 eth_phy_ctrl_regset;
+ u32 eth_rxd_dly_offset;
+ u32 eth_dly_param = 0;
+ u32 delay_ps;
+ int i, ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to get resources\n");
+
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return dev_err_probe(&pdev->dev, PTR_ERR(plat_dat),
+ "dt configuration failed\n");
+
+ dwc_priv = devm_kzalloc(&pdev->dev, sizeof(*dwc_priv), GFP_KERNEL);
+ if (!dwc_priv)
+ return -ENOMEM;
+
+ /* Read rx-internal-delay-ps and update rx_clk delay */
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "rx-internal-delay-ps", &delay_ps)) {
+ u32 val = min(delay_ps / 100, EIC7700_MAX_DELAY_UNIT);
+
+ eth_dly_param &= ~EIC7700_ETH_RX_ADJ_DELAY;
+ eth_dly_param |= FIELD_PREP(EIC7700_ETH_RX_ADJ_DELAY, val);
+ } else {
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "missing required property rx-internal-delay-ps\n");
+ }
+
+ /* Read tx-internal-delay-ps and update tx_clk delay */
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "tx-internal-delay-ps", &delay_ps)) {
+ u32 val = min(delay_ps / 100, EIC7700_MAX_DELAY_UNIT);
+
+ eth_dly_param &= ~EIC7700_ETH_TX_ADJ_DELAY;
+ eth_dly_param |= FIELD_PREP(EIC7700_ETH_TX_ADJ_DELAY, val);
+ } else {
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "missing required property tx-internal-delay-ps\n");
+ }
+
+ eic7700_hsp_regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "eswin,hsp-sp-csr");
+ if (IS_ERR(eic7700_hsp_regmap))
+ return dev_err_probe(&pdev->dev,
+ PTR_ERR(eic7700_hsp_regmap),
+ "Failed to get hsp-sp-csr regmap\n");
+
+ ret = of_property_read_u32_index(pdev->dev.of_node,
+ "eswin,hsp-sp-csr",
+ 1, &eth_phy_ctrl_offset);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "can't get eth_phy_ctrl_offset\n");
+
+ regmap_read(eic7700_hsp_regmap, eth_phy_ctrl_offset,
+ &eth_phy_ctrl_regset);
+ eth_phy_ctrl_regset |=
+ (EIC7700_ETH_TX_CLK_SEL | EIC7700_ETH_PHY_INTF_SELI);
+ regmap_write(eic7700_hsp_regmap, eth_phy_ctrl_offset,
+ eth_phy_ctrl_regset);
+
+ ret = of_property_read_u32_index(pdev->dev.of_node,
+ "eswin,hsp-sp-csr",
+ 2, &eth_axi_lp_ctrl_offset);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "can't get eth_axi_lp_ctrl_offset\n");
+
+ regmap_write(eic7700_hsp_regmap, eth_axi_lp_ctrl_offset,
+ EIC7700_ETH_CSYSREQ_VAL);
+
+ ret = of_property_read_u32_index(pdev->dev.of_node,
+ "eswin,hsp-sp-csr",
+ 3, &eth_rxd_dly_offset);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "can't get eth_rxd_dly_offset\n");
+
+ regmap_write(eic7700_hsp_regmap, eth_rxd_dly_offset,
+ eth_dly_param);
+
+ plat_dat->num_clks = ARRAY_SIZE(eic7700_clk_names);
+ plat_dat->clks = devm_kcalloc(&pdev->dev,
+ plat_dat->num_clks,
+ sizeof(*plat_dat->clks),
+ GFP_KERNEL);
+ if (!plat_dat->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(eic7700_clk_names); i++)
+ plat_dat->clks[i].id = eic7700_clk_names[i];
+
+ ret = devm_clk_bulk_get_optional(&pdev->dev,
+ plat_dat->num_clks,
+ plat_dat->clks);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to get clocks\n");
+
+ plat_dat->clk_tx_i = stmmac_pltfr_find_clk(plat_dat, "tx");
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
+ plat_dat->clks_config = eic7700_clks_config;
+ plat_dat->bsp_priv = dwc_priv;
+ dwc_priv->plat_dat = plat_dat;
+ plat_dat->init = eic7700_dwmac_init;
+ plat_dat->exit = eic7700_dwmac_exit;
+ plat_dat->suspend = eic7700_dwmac_suspend;
+ plat_dat->resume = eic7700_dwmac_resume;
+
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
+}
+
+static const struct of_device_id eic7700_dwmac_match[] = {
+ { .compatible = "eswin,eic7700-qos-eth" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, eic7700_dwmac_match);
+
+static struct platform_driver eic7700_dwmac_driver = {
+ .probe = eic7700_dwmac_probe,
+ .driver = {
+ .name = "eic7700-eth-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = eic7700_dwmac_match,
+ },
+};
+module_platform_driver(eic7700_dwmac_driver);
+
+MODULE_AUTHOR("Zhi Li <lizhi2@eswincomputing.com>");
+MODULE_AUTHOR("Shuang Liang <liangshuang@eswincomputing.com>");
+MODULE_AUTHOR("Shangjuan Wei <weishangjuan@eswincomputing.com>");
+MODULE_DESCRIPTION("Eswin eic7700 qos ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
index 598eff926815..b9218c07eb6b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
@@ -56,6 +56,7 @@ static const struct of_device_id dwmac_generic_match[] = {
{ .compatible = "snps,dwmac-3.610"},
{ .compatible = "snps,dwmac-3.70a"},
{ .compatible = "snps,dwmac-3.710"},
+ { .compatible = "snps,dwmac-3.72a"},
{ .compatible = "snps,dwmac-4.00"},
{ .compatible = "snps,dwmac-4.10a"},
{ .compatible = "snps,dwmac"},
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 641f3cd019a3..db288fbd5a4d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -23,33 +23,32 @@
#include "stmmac_platform.h"
#define GPR_ENET_QOS_INTF_MODE_MASK GENMASK(21, 16)
-#define GPR_ENET_QOS_INTF_SEL_MII (0x0 << 16)
-#define GPR_ENET_QOS_INTF_SEL_RMII (0x4 << 16)
-#define GPR_ENET_QOS_INTF_SEL_RGMII (0x1 << 16)
+#define GPR_ENET_QOS_INTF_SEL_MASK GENMASK(20, 16)
#define GPR_ENET_QOS_CLK_GEN_EN (0x1 << 19)
#define GPR_ENET_QOS_CLK_TX_CLK_SEL (0x1 << 20)
#define GPR_ENET_QOS_RGMII_EN (0x1 << 21)
#define MX93_GPR_ENET_QOS_INTF_MODE_MASK GENMASK(3, 0)
-#define MX93_GPR_ENET_QOS_INTF_MASK GENMASK(3, 1)
-#define MX93_GPR_ENET_QOS_INTF_SEL_MII (0x0 << 1)
-#define MX93_GPR_ENET_QOS_INTF_SEL_RMII (0x4 << 1)
-#define MX93_GPR_ENET_QOS_INTF_SEL_RGMII (0x1 << 1)
+#define MX93_GPR_ENET_QOS_INTF_SEL_MASK GENMASK(3, 1)
#define MX93_GPR_ENET_QOS_CLK_GEN_EN (0x1 << 0)
+#define MX93_GPR_ENET_QOS_CLK_SEL_MASK BIT_MASK(0)
+#define MX93_GPR_CLK_SEL_OFFSET (4)
#define DMA_BUS_MODE 0x00001000
#define DMA_BUS_MODE_SFT_RESET (0x1 << 0)
#define RMII_RESET_SPEED (0x3 << 14)
#define CTRL_SPEED_MASK GENMASK(15, 14)
+struct imx_priv_data;
+
struct imx_dwmac_ops {
u32 addr_width;
u32 flags;
bool mac_rgmii_txclk_auto_adj;
- int (*fix_soc_reset)(void *priv, void __iomem *ioaddr);
- int (*set_intf_mode)(struct plat_stmmacenet_data *plat_dat);
- void (*fix_mac_speed)(void *priv, unsigned int speed, unsigned int mode);
+ int (*fix_soc_reset)(struct stmmac_priv *priv, void __iomem *ioaddr);
+ int (*set_intf_mode)(struct imx_priv_data *dwmac, u8 phy_intf_sel);
+ void (*fix_mac_speed)(void *priv, int speed, unsigned int mode);
};
struct imx_priv_data {
@@ -65,71 +64,46 @@ struct imx_priv_data {
struct plat_stmmacenet_data *plat_dat;
};
-static int imx8mp_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
+static int imx8mp_set_intf_mode(struct imx_priv_data *dwmac, u8 phy_intf_sel)
{
- struct imx_priv_data *dwmac = plat_dat->bsp_priv;
- int val;
-
- switch (plat_dat->mac_interface) {
- case PHY_INTERFACE_MODE_MII:
- val = GPR_ENET_QOS_INTF_SEL_MII;
- break;
- case PHY_INTERFACE_MODE_RMII:
- val = GPR_ENET_QOS_INTF_SEL_RMII;
- val |= (dwmac->rmii_refclk_ext ? 0 : GPR_ENET_QOS_CLK_TX_CLK_SEL);
- break;
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- val = GPR_ENET_QOS_INTF_SEL_RGMII |
- GPR_ENET_QOS_RGMII_EN;
- break;
- default:
- pr_debug("imx dwmac doesn't support %d interface\n",
- plat_dat->mac_interface);
- return -EINVAL;
- }
+ unsigned int val;
+
+ val = FIELD_PREP(GPR_ENET_QOS_INTF_SEL_MASK, phy_intf_sel) |
+ GPR_ENET_QOS_CLK_GEN_EN;
+
+ if (phy_intf_sel == PHY_INTF_SEL_RMII && !dwmac->rmii_refclk_ext)
+ val |= GPR_ENET_QOS_CLK_TX_CLK_SEL;
+ else if (phy_intf_sel == PHY_INTF_SEL_RGMII)
+ val |= GPR_ENET_QOS_RGMII_EN;
- val |= GPR_ENET_QOS_CLK_GEN_EN;
return regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off,
GPR_ENET_QOS_INTF_MODE_MASK, val);
};
static int
-imx8dxl_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
+imx8dxl_set_intf_mode(struct imx_priv_data *dwmac, u8 phy_intf_sel)
{
- int ret = 0;
-
/* TBD: depends on imx8dxl scu interfaces to be upstreamed */
- return ret;
+ return 0;
}
-static int imx93_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
+static int imx93_set_intf_mode(struct imx_priv_data *dwmac, u8 phy_intf_sel)
{
- struct imx_priv_data *dwmac = plat_dat->bsp_priv;
- int val;
-
- switch (plat_dat->mac_interface) {
- case PHY_INTERFACE_MODE_MII:
- val = MX93_GPR_ENET_QOS_INTF_SEL_MII;
- break;
- case PHY_INTERFACE_MODE_RMII:
- val = MX93_GPR_ENET_QOS_INTF_SEL_RMII;
- break;
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- val = MX93_GPR_ENET_QOS_INTF_SEL_RGMII;
- break;
- default:
- dev_dbg(dwmac->dev, "imx dwmac doesn't support %d interface\n",
- plat_dat->mac_interface);
- return -EINVAL;
+ unsigned int val;
+ int ret;
+
+ if (phy_intf_sel == PHY_INTF_SEL_RMII && dwmac->rmii_refclk_ext) {
+ ret = regmap_clear_bits(dwmac->intf_regmap,
+ dwmac->intf_reg_off +
+ MX93_GPR_CLK_SEL_OFFSET,
+ MX93_GPR_ENET_QOS_CLK_SEL_MASK);
+ if (ret)
+ return ret;
}
- val |= MX93_GPR_ENET_QOS_CLK_GEN_EN;
+ val = FIELD_PREP(MX93_GPR_ENET_QOS_INTF_SEL_MASK, phy_intf_sel) |
+ MX93_GPR_ENET_QOS_CLK_GEN_EN;
+
return regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off,
MX93_GPR_ENET_QOS_INTF_MODE_MASK, val);
};
@@ -160,54 +134,48 @@ static int imx_dwmac_clks_config(void *priv, bool enabled)
return ret;
}
-static int imx_dwmac_init(struct platform_device *pdev, void *priv)
+static int imx_set_phy_intf_sel(void *bsp_priv, u8 phy_intf_sel)
{
- struct plat_stmmacenet_data *plat_dat;
- struct imx_priv_data *dwmac = priv;
- int ret;
+ struct imx_priv_data *dwmac = bsp_priv;
- plat_dat = dwmac->plat_dat;
+ if (!dwmac->ops->set_intf_mode)
+ return 0;
- if (dwmac->ops->set_intf_mode) {
- ret = dwmac->ops->set_intf_mode(plat_dat);
- if (ret)
- return ret;
- }
+ if (phy_intf_sel != PHY_INTF_SEL_GMII_MII &&
+ phy_intf_sel != PHY_INTF_SEL_RGMII &&
+ phy_intf_sel != PHY_INTF_SEL_RMII)
+ return -EINVAL;
- return 0;
+ return dwmac->ops->set_intf_mode(dwmac, phy_intf_sel);
}
-static void imx_dwmac_exit(struct platform_device *pdev, void *priv)
+static int imx_dwmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- /* nothing to do now */
+ if (interface == PHY_INTERFACE_MODE_RMII ||
+ interface == PHY_INTERFACE_MODE_MII)
+ return 0;
+
+ return stmmac_set_clk_tx_rate(bsp_priv, clk_tx_i, interface, speed);
}
-static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static void imx_dwmac_fix_speed(void *priv, int speed, unsigned int mode)
{
struct plat_stmmacenet_data *plat_dat;
struct imx_priv_data *dwmac = priv;
- unsigned long rate;
+ long rate;
int err;
plat_dat = dwmac->plat_dat;
if (dwmac->ops->mac_rgmii_txclk_auto_adj ||
- (plat_dat->mac_interface == PHY_INTERFACE_MODE_RMII) ||
- (plat_dat->mac_interface == PHY_INTERFACE_MODE_MII))
+ (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII) ||
+ (plat_dat->phy_interface == PHY_INTERFACE_MODE_MII))
return;
- switch (speed) {
- case SPEED_1000:
- rate = 125000000;
- break;
- case SPEED_100:
- rate = 25000000;
- break;
- case SPEED_10:
- rate = 2500000;
- break;
- default:
- dev_err(dwmac->dev, "invalid speed %u\n", speed);
+ rate = rgmii_clock(speed);
+ if (rate < 0) {
+ dev_err(dwmac->dev, "invalid speed %d\n", speed);
return;
}
@@ -216,7 +184,7 @@ static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mod
dev_err(dwmac->dev, "failed to set tx rate %lu\n", rate);
}
-static void imx93_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static void imx93_dwmac_fix_speed(void *priv, int speed, unsigned int mode)
{
struct imx_priv_data *dwmac = priv;
unsigned int iface;
@@ -230,8 +198,8 @@ static void imx93_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int m
if (regmap_read(dwmac->intf_regmap, dwmac->intf_reg_off, &iface))
return;
- iface &= MX93_GPR_ENET_QOS_INTF_MASK;
- if (iface != MX93_GPR_ENET_QOS_INTF_SEL_RGMII)
+ if (FIELD_GET(MX93_GPR_ENET_QOS_INTF_SEL_MASK, iface) !=
+ PHY_INTF_SEL_RGMII)
return;
old_ctrl = readl(dwmac->base_addr + MAC_CTRL_REG);
@@ -244,6 +212,7 @@ static void imx93_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int m
readl(dwmac->base_addr + MAC_CTRL_REG);
usleep_range(10, 20);
+ iface &= MX93_GPR_ENET_QOS_INTF_SEL_MASK;
iface |= MX93_GPR_ENET_QOS_CLK_GEN_EN;
regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off,
MX93_GPR_ENET_QOS_INTF_MODE_MASK, iface);
@@ -251,16 +220,16 @@ static void imx93_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int m
writel(old_ctrl, dwmac->base_addr + MAC_CTRL_REG);
}
-static int imx_dwmac_mx93_reset(void *priv, void __iomem *ioaddr)
+static int imx_dwmac_mx93_reset(struct stmmac_priv *priv, void __iomem *ioaddr)
{
- struct plat_stmmacenet_data *plat_dat = priv;
+ struct plat_stmmacenet_data *plat_dat = priv->plat;
u32 value = readl(ioaddr + DMA_BUS_MODE);
/* DMA SW reset */
value |= DMA_BUS_MODE_SFT_RESET;
writel(value, ioaddr + DMA_BUS_MODE);
- if (plat_dat->mac_interface == PHY_INTERFACE_MODE_RMII) {
+ if (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII) {
usleep_range(100, 200);
writel(RMII_RESET_SPEED, ioaddr + MAC_CTRL_REG);
}
@@ -287,6 +256,7 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev)
dwmac->clk_mem = NULL;
if (of_machine_is_compatible("fsl,imx8dxl") ||
+ of_machine_is_compatible("fsl,imx91") ||
of_machine_is_compatible("fsl,imx93")) {
dwmac->clk_mem = devm_clk_get(dev, "mem");
if (IS_ERR(dwmac->clk_mem)) {
@@ -296,20 +266,17 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev)
}
if (of_machine_is_compatible("fsl,imx8mp") ||
+ of_machine_is_compatible("fsl,imx91") ||
of_machine_is_compatible("fsl,imx93")) {
/* Binding doc describes the propety:
- * is required by i.MX8MP, i.MX93.
+ * is required by i.MX8MP, i.MX91, i.MX93.
* is optinoal for i.MX8DXL.
*/
- dwmac->intf_regmap = syscon_regmap_lookup_by_phandle(np, "intf_mode");
+ dwmac->intf_regmap =
+ syscon_regmap_lookup_by_phandle_args(np, "intf_mode", 1,
+ &dwmac->intf_reg_off);
if (IS_ERR(dwmac->intf_regmap))
return PTR_ERR(dwmac->intf_regmap);
-
- err = of_property_read_u32_index(np, "intf_mode", 1, &dwmac->intf_reg_off);
- if (err) {
- dev_err(dev, "Can't get intf mode reg offset (%d)\n", err);
- return err;
- }
}
return err;
@@ -358,10 +325,8 @@ static int imx_dwmac_probe(struct platform_device *pdev)
plat_dat->tx_queues_cfg[i].tbs_en = 1;
plat_dat->host_dma_width = dwmac->ops->addr_width;
- plat_dat->init = imx_dwmac_init;
- plat_dat->exit = imx_dwmac_exit;
+ plat_dat->set_phy_intf_sel = imx_set_phy_intf_sel;
plat_dat->clks_config = imx_dwmac_clks_config;
- plat_dat->fix_mac_speed = imx_dwmac_fix_speed;
plat_dat->bsp_priv = dwmac;
dwmac->plat_dat = plat_dat;
dwmac->base_addr = stmmac_res.addr;
@@ -370,24 +335,19 @@ static int imx_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = imx_dwmac_init(pdev, dwmac);
- if (ret)
- goto err_dwmac_init;
-
- if (dwmac->ops->fix_mac_speed)
+ if (dwmac->ops->fix_mac_speed) {
plat_dat->fix_mac_speed = dwmac->ops->fix_mac_speed;
+ } else if (!dwmac->ops->mac_rgmii_txclk_auto_adj) {
+ plat_dat->clk_tx_i = dwmac->clk_tx;
+ plat_dat->set_clk_tx_rate = imx_dwmac_set_clk_tx_rate;
+ }
+
dwmac->plat_dat->fix_soc_reset = dwmac->ops->fix_soc_reset;
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ ret = stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
if (ret)
- goto err_drv_probe;
-
- return 0;
+ imx_dwmac_clks_config(dwmac, false);
-err_drv_probe:
- imx_dwmac_exit(pdev, plat_dat->bsp_priv);
-err_dwmac_init:
- imx_dwmac_clks_config(dwmac, false);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
index 066783d66422..8e4a30c11db0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
@@ -35,10 +35,6 @@
#define MACPHYC_RX_DELAY_MASK GENMASK(10, 4)
#define MACPHYC_SOFT_RST_MASK GENMASK(3, 3)
#define MACPHYC_PHY_INFT_MASK GENMASK(2, 0)
-#define MACPHYC_PHY_INFT_RMII 0x4
-#define MACPHYC_PHY_INFT_RGMII 0x1
-#define MACPHYC_PHY_INFT_GMII 0x0
-#define MACPHYC_PHY_INFT_MII 0x0
#define MACPHYC_TX_DELAY_PS_MAX 2496
#define MACPHYC_TX_DELAY_PS_MIN 20
@@ -56,6 +52,7 @@ enum ingenic_mac_version {
struct ingenic_mac {
const struct ingenic_soc_info *soc_info;
+ struct plat_stmmacenet_data *plat_dat;
struct device *dev;
struct regmap *regmap;
@@ -67,167 +64,93 @@ struct ingenic_soc_info {
enum ingenic_mac_version version;
u32 mask;
- int (*set_mode)(struct plat_stmmacenet_data *plat_dat);
-};
-
-static int ingenic_mac_init(struct plat_stmmacenet_data *plat_dat)
-{
- struct ingenic_mac *mac = plat_dat->bsp_priv;
- int ret;
-
- if (mac->soc_info->set_mode) {
- ret = mac->soc_info->set_mode(plat_dat);
- if (ret)
- return ret;
- }
+ int (*set_mode)(struct ingenic_mac *mac, u8 phy_intf_sel);
- return 0;
-}
+ u8 valid_phy_intf_sel;
+};
-static int jz4775_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
+static int jz4775_mac_set_mode(struct ingenic_mac *mac, u8 phy_intf_sel)
{
- struct ingenic_mac *mac = plat_dat->bsp_priv;
unsigned int val;
- switch (plat_dat->mac_interface) {
- case PHY_INTERFACE_MODE_MII:
- val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) |
- FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_MII);
- dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_MII\n");
- break;
-
- case PHY_INTERFACE_MODE_GMII:
- val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) |
- FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_GMII);
- dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_GMII\n");
- break;
-
- case PHY_INTERFACE_MODE_RMII:
- val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) |
- FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
- dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
- break;
-
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) |
- FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RGMII);
- dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RGMII\n");
- break;
-
- default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
- return -EINVAL;
- }
+ val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, phy_intf_sel) |
+ FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT);
/* Update MAC PHY control register */
return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, val);
}
-static int x1000_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
+static int x1000_mac_set_mode(struct ingenic_mac *mac, u8 phy_intf_sel)
{
- struct ingenic_mac *mac = plat_dat->bsp_priv;
-
- switch (plat_dat->mac_interface) {
- case PHY_INTERFACE_MODE_RMII:
- dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
- break;
-
- default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
- return -EINVAL;
- }
-
/* Update MAC PHY control register */
return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, 0);
}
-static int x1600_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
+static int x1600_mac_set_mode(struct ingenic_mac *mac, u8 phy_intf_sel)
{
- struct ingenic_mac *mac = plat_dat->bsp_priv;
unsigned int val;
- switch (plat_dat->mac_interface) {
- case PHY_INTERFACE_MODE_RMII:
- val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
- dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
- break;
-
- default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
- return -EINVAL;
- }
+ val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, phy_intf_sel);
/* Update MAC PHY control register */
return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, val);
}
-static int x1830_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
+static int x1830_mac_set_mode(struct ingenic_mac *mac, u8 phy_intf_sel)
{
- struct ingenic_mac *mac = plat_dat->bsp_priv;
unsigned int val;
- switch (plat_dat->mac_interface) {
- case PHY_INTERFACE_MODE_RMII:
- val = FIELD_PREP(MACPHYC_MODE_SEL_MASK, MACPHYC_MODE_SEL_RMII) |
- FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
- dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
- break;
-
- default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
- return -EINVAL;
- }
+ val = FIELD_PREP(MACPHYC_MODE_SEL_MASK, MACPHYC_MODE_SEL_RMII) |
+ FIELD_PREP(MACPHYC_PHY_INFT_MASK, phy_intf_sel);
/* Update MAC PHY control register */
return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, val);
}
-static int x2000_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
+static int x2000_mac_set_mode(struct ingenic_mac *mac, u8 phy_intf_sel)
{
- struct ingenic_mac *mac = plat_dat->bsp_priv;
unsigned int val;
- switch (plat_dat->mac_interface) {
- case PHY_INTERFACE_MODE_RMII:
- val = FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_ORIGIN) |
- FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_ORIGIN) |
- FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
- dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
- break;
-
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RGMII);
+ val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, phy_intf_sel);
+ if (phy_intf_sel == PHY_INTF_SEL_RMII) {
+ val |= FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_ORIGIN) |
+ FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_ORIGIN);
+ } else if (phy_intf_sel == PHY_INTF_SEL_RGMII) {
if (mac->tx_delay == 0)
val |= FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_ORIGIN);
else
val |= FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_DELAY) |
- FIELD_PREP(MACPHYC_TX_DELAY_MASK, (mac->tx_delay + 9750) / 19500 - 1);
+ FIELD_PREP(MACPHYC_TX_DELAY_MASK, (mac->tx_delay + 9750) / 19500 - 1);
if (mac->rx_delay == 0)
val |= FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_ORIGIN);
else
val |= FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_DELAY) |
FIELD_PREP(MACPHYC_RX_DELAY_MASK, (mac->rx_delay + 9750) / 19500 - 1);
-
- dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RGMII\n");
- break;
-
- default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
- return -EINVAL;
}
/* Update MAC PHY control register */
return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, val);
}
+static int ingenic_set_phy_intf_sel(void *bsp_priv, u8 phy_intf_sel)
+{
+ struct ingenic_mac *mac = bsp_priv;
+
+ if (!mac->soc_info->set_mode)
+ return 0;
+
+ if (phy_intf_sel >= BITS_PER_BYTE ||
+ ~mac->soc_info->valid_phy_intf_sel & BIT(phy_intf_sel))
+ return -EINVAL;
+
+ dev_dbg(mac->dev, "MAC PHY control register: interface %s\n",
+ phy_modes(mac->plat_dat->phy_interface));
+
+ return mac->soc_info->set_mode(mac, phy_intf_sel);
+}
+
static int ingenic_mac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -284,49 +207,22 @@ static int ingenic_mac_probe(struct platform_device *pdev)
mac->soc_info = data;
mac->dev = &pdev->dev;
+ mac->plat_dat = plat_dat;
plat_dat->bsp_priv = mac;
+ plat_dat->set_phy_intf_sel = ingenic_set_phy_intf_sel;
- ret = ingenic_mac_init(plat_dat);
- if (ret)
- return ret;
-
- return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int ingenic_mac_suspend(struct device *dev)
-{
- int ret;
-
- ret = stmmac_suspend(dev);
-
- return ret;
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
-static int ingenic_mac_resume(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- int ret;
-
- ret = ingenic_mac_init(priv->plat);
- if (ret)
- return ret;
-
- ret = stmmac_resume(dev);
-
- return ret;
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(ingenic_mac_pm_ops, ingenic_mac_suspend, ingenic_mac_resume);
-
static struct ingenic_soc_info jz4775_soc_info = {
.version = ID_JZ4775,
.mask = MACPHYC_TXCLK_SEL_MASK | MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK,
.set_mode = jz4775_mac_set_mode,
+ .valid_phy_intf_sel = BIT(PHY_INTF_SEL_GMII_MII) |
+ BIT(PHY_INTF_SEL_RGMII) |
+ BIT(PHY_INTF_SEL_RMII),
};
static struct ingenic_soc_info x1000_soc_info = {
@@ -334,6 +230,7 @@ static struct ingenic_soc_info x1000_soc_info = {
.mask = MACPHYC_SOFT_RST_MASK,
.set_mode = x1000_mac_set_mode,
+ .valid_phy_intf_sel = BIT(PHY_INTF_SEL_RMII),
};
static struct ingenic_soc_info x1600_soc_info = {
@@ -341,6 +238,7 @@ static struct ingenic_soc_info x1600_soc_info = {
.mask = MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK,
.set_mode = x1600_mac_set_mode,
+ .valid_phy_intf_sel = BIT(PHY_INTF_SEL_RMII),
};
static struct ingenic_soc_info x1830_soc_info = {
@@ -348,6 +246,7 @@ static struct ingenic_soc_info x1830_soc_info = {
.mask = MACPHYC_MODE_SEL_MASK | MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK,
.set_mode = x1830_mac_set_mode,
+ .valid_phy_intf_sel = BIT(PHY_INTF_SEL_RMII),
};
static struct ingenic_soc_info x2000_soc_info = {
@@ -356,6 +255,8 @@ static struct ingenic_soc_info x2000_soc_info = {
MACPHYC_RX_DELAY_MASK | MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK,
.set_mode = x2000_mac_set_mode,
+ .valid_phy_intf_sel = BIT(PHY_INTF_SEL_RGMII) |
+ BIT(PHY_INTF_SEL_RMII),
};
static const struct of_device_id ingenic_mac_of_matches[] = {
@@ -370,10 +271,9 @@ MODULE_DEVICE_TABLE(of, ingenic_mac_of_matches);
static struct platform_driver ingenic_mac_driver = {
.probe = ingenic_mac_probe,
- .remove = stmmac_pltfr_remove,
.driver = {
.name = "ingenic-mac",
- .pm = pm_ptr(&ingenic_mac_pm_ops),
+ .pm = &stmmac_pltfr_pm_ops,
.of_match_table = ingenic_mac_of_matches,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
index 230e79658c54..4ea7b0a803d7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
@@ -22,45 +22,12 @@ struct intel_dwmac {
};
struct intel_dwmac_data {
- void (*fix_mac_speed)(void *priv, unsigned int speed, unsigned int mode);
unsigned long ptp_ref_clk_rate;
unsigned long tx_clk_rate;
bool tx_clk_en;
};
-static void kmb_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
-{
- struct intel_dwmac *dwmac = priv;
- unsigned long rate;
- int ret;
-
- rate = clk_get_rate(dwmac->tx_clk);
-
- switch (speed) {
- case SPEED_1000:
- rate = 125000000;
- break;
-
- case SPEED_100:
- rate = 25000000;
- break;
-
- case SPEED_10:
- rate = 2500000;
- break;
-
- default:
- dev_err(dwmac->dev, "Invalid speed\n");
- break;
- }
-
- ret = clk_set_rate(dwmac->tx_clk, rate);
- if (ret)
- dev_err(dwmac->dev, "Failed to configure tx clock rate\n");
-}
-
static const struct intel_dwmac_data kmb_data = {
- .fix_mac_speed = kmb_eth_fix_mac_speed,
.ptp_ref_clk_rate = 200000000,
.tx_clk_rate = 125000000,
.tx_clk_en = true,
@@ -97,30 +64,36 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
dwmac->dev = &pdev->dev;
dwmac->tx_clk = NULL;
+ /*
+ * This cannot return NULL at this point because the driver’s
+ * compatibility with the device has already been validated in
+ * platform_match().
+ */
dwmac->data = device_get_match_data(&pdev->dev);
- if (dwmac->data) {
- if (dwmac->data->fix_mac_speed)
- plat_dat->fix_mac_speed = dwmac->data->fix_mac_speed;
-
- /* Enable TX clock */
- if (dwmac->data->tx_clk_en) {
- dwmac->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
- if (IS_ERR(dwmac->tx_clk))
- return PTR_ERR(dwmac->tx_clk);
-
- clk_prepare_enable(dwmac->tx_clk);
-
- /* Check and configure TX clock rate */
- rate = clk_get_rate(dwmac->tx_clk);
- if (dwmac->data->tx_clk_rate &&
- rate != dwmac->data->tx_clk_rate) {
- rate = dwmac->data->tx_clk_rate;
- ret = clk_set_rate(dwmac->tx_clk, rate);
- if (ret) {
- dev_err(&pdev->dev,
- "Failed to set tx_clk\n");
- return ret;
- }
+
+ /* Enable TX clock */
+ if (dwmac->data->tx_clk_en) {
+ dwmac->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+ if (IS_ERR(dwmac->tx_clk))
+ return PTR_ERR(dwmac->tx_clk);
+
+ ret = clk_prepare_enable(dwmac->tx_clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to enable tx_clk\n");
+ return ret;
+ }
+
+ /* Check and configure TX clock rate */
+ rate = clk_get_rate(dwmac->tx_clk);
+ if (dwmac->data->tx_clk_rate &&
+ rate != dwmac->data->tx_clk_rate) {
+ rate = dwmac->data->tx_clk_rate;
+ ret = clk_set_rate(dwmac->tx_clk, rate);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to set tx_clk\n");
+ goto err_tx_clk_disable;
}
}
@@ -133,28 +106,25 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Failed to set clk_ptp_ref\n");
- return ret;
+ goto err_tx_clk_disable;
}
}
}
+ plat_dat->clk_tx_i = dwmac->tx_clk;
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
plat_dat->bsp_priv = dwmac;
- plat_dat->eee_usecs_rate = plat_dat->clk_ptp_rate;
-
- if (plat_dat->eee_usecs_rate > 0) {
- u32 tx_lpi_usec;
-
- tx_lpi_usec = (plat_dat->eee_usecs_rate / 1000000) - 1;
- writel(tx_lpi_usec, stmmac_res.addr + GMAC_1US_TIC_COUNTER);
- }
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret) {
- clk_disable_unprepare(dwmac->tx_clk);
- return ret;
- }
+ if (ret)
+ goto err_tx_clk_disable;
return 0;
+
+err_tx_clk_disable:
+ if (dwmac->data->tx_clk_en)
+ clk_disable_unprepare(dwmac->tx_clk);
+ return ret;
}
static void intel_eth_plat_remove(struct platform_device *pdev)
@@ -162,7 +132,8 @@ static void intel_eth_plat_remove(struct platform_device *pdev)
struct intel_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
stmmac_pltfr_remove(pdev);
- clk_disable_unprepare(dwmac->tx_clk);
+ if (dwmac->data->tx_clk_en)
+ clk_disable_unprepare(dwmac->tx_clk);
}
static struct platform_driver intel_eth_plat_driver = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 48acba5eb178..aad1be1ec4c1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -5,15 +5,30 @@
#include <linux/clk-provider.h>
#include <linux/pci.h>
#include <linux/dmi.h>
+#include <linux/platform_data/x86/intel_pmc_ipc.h>
#include "dwmac-intel.h"
#include "dwmac4.h"
#include "stmmac.h"
#include "stmmac_ptp.h"
+struct pmc_serdes_regs {
+ u8 index;
+ u32 val;
+};
+
+struct pmc_serdes_reg_info {
+ const struct pmc_serdes_regs *regs;
+ u8 num_regs;
+};
+
struct intel_priv_data {
int mdio_adhoc_addr; /* mdio address for serdes & etc */
unsigned long crossts_adj;
bool is_pse;
+ const int *tsn_lane_regs;
+ int max_tsn_lane_regs;
+ struct pmc_serdes_reg_info pid_1g;
+ struct pmc_serdes_reg_info pid_2p5g;
};
/* This struct is used to associate PCI Function of MAC controller on a board,
@@ -35,6 +50,45 @@ struct stmmac_pci_info {
int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
};
+static const struct pmc_serdes_regs pid_modphy3_1g_regs[] = {
+ { PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_1G },
+ { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_1G },
+ { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_1G },
+ { PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_1G },
+ { PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_1G },
+ {}
+};
+
+static const struct pmc_serdes_regs pid_modphy3_2p5g_regs[] = {
+ { PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_2P5G },
+ { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_2P5G },
+ { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_2P5G },
+ { PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_2P5G },
+ { PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G },
+ {}
+};
+
+static const struct pmc_serdes_regs pid_modphy1_1g_regs[] = {
+ { PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_1G },
+ { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_1G },
+ { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_1G },
+ { PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_1G },
+ { PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_1G },
+ {}
+};
+
+static const struct pmc_serdes_regs pid_modphy1_2p5g_regs[] = {
+ { PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_2P5G },
+ { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_2P5G },
+ { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_2P5G },
+ { PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_2P5G },
+ { PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G },
+ {}
+};
+
+static const int ehl_tsn_lane_regs[] = {7, 8, 9, 10, 11};
+static const int adln_tsn_lane_regs[] = {6};
+
static int stmmac_pci_find_phy_addr(struct pci_dev *pdev,
const struct dmi_system_id *dmi_list)
{
@@ -93,7 +147,7 @@ static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
data &= ~SERDES_RATE_MASK;
data &= ~SERDES_PCLK_MASK;
- if (priv->plat->max_speed == 2500)
+ if (priv->plat->phy_interface == PHY_INTERFACE_MODE_2500BASEX)
data |= SERDES_RATE_PCIE_GEN2 << SERDES_RATE_PCIE_SHIFT |
SERDES_PCLK_37p5MHZ << SERDES_PCLK_SHIFT;
else
@@ -230,28 +284,28 @@ static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
}
}
-static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data)
+static void tgl_get_interfaces(struct stmmac_priv *priv, void *bsp_priv,
+ unsigned long *interfaces)
{
- struct intel_priv_data *intel_priv = intel_data;
- struct stmmac_priv *priv = netdev_priv(ndev);
- int serdes_phy_addr = 0;
- u32 data = 0;
-
- serdes_phy_addr = intel_priv->mdio_adhoc_addr;
+ struct intel_priv_data *intel_priv = bsp_priv;
+ phy_interface_t interface;
+ int data;
/* Determine the link speed mode: 2.5Gbps/1Gbps */
- data = mdiobus_read(priv->mii, serdes_phy_addr,
- SERDES_GCR);
+ data = mdiobus_read(priv->mii, intel_priv->mdio_adhoc_addr, SERDES_GCR);
+ if (data < 0)
+ return;
- if (((data & SERDES_LINK_MODE_MASK) >> SERDES_LINK_MODE_SHIFT) ==
- SERDES_LINK_MODE_2G5) {
+ if (FIELD_GET(SERDES_LINK_MODE_MASK, data) == SERDES_LINK_MODE_2G5) {
dev_info(priv->device, "Link Speed Mode: 2.5Gbps\n");
- priv->plat->max_speed = 2500;
- priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX;
priv->plat->mdio_bus_data->default_an_inband = false;
+ interface = PHY_INTERFACE_MODE_2500BASEX;
} else {
- priv->plat->max_speed = 1000;
+ interface = PHY_INTERFACE_MODE_SGMII;
}
+
+ __set_bit(interface, interfaces);
+ priv->plat->phy_interface = interface;
}
/* Program PTP Clock Frequency for different variant of
@@ -317,9 +371,6 @@ static int intel_crosststamp(ktime_t *device,
u32 acr_value;
int i;
- if (!boot_cpu_has(X86_FEATURE_ART))
- return -EOPNOTSUPP;
-
intel_priv = priv->plat->bsp_priv;
/* Both internal crosstimestamping and external triggered event
@@ -379,6 +430,12 @@ static int intel_crosststamp(ktime_t *device,
return -ETIMEDOUT;
}
+ *system = (struct system_counterval_t) {
+ .cycles = 0,
+ .cs_id = CSID_X86_ART,
+ .use_nsecs = false,
+ };
+
num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) &
GMAC_TIMESTAMP_ATSNS_MASK) >>
GMAC_TIMESTAMP_ATSNS_SHIFT;
@@ -394,7 +451,7 @@ static int intel_crosststamp(ktime_t *device,
}
system->cycles *= intel_priv->crossts_adj;
- system->cs_id = CSID_X86_ART;
+
priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
return 0;
@@ -415,33 +472,103 @@ static void intel_mgbe_pse_crossts_adj(struct intel_priv_data *intel_priv,
}
}
-static void common_default_data(struct plat_stmmacenet_data *plat)
+static int intel_tsn_lane_is_available(struct net_device *ndev,
+ struct intel_priv_data *intel_priv)
{
- plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
- plat->has_gmac = 1;
- plat->force_sf_dma_mode = 1;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct pmc_ipc_cmd tmp = {};
+ struct pmc_ipc_rbuf rbuf = {};
+ int ret = 0, i, j;
+ const int max_fia_regs = 5;
- plat->mdio_bus_data->needs_reset = true;
+ tmp.cmd = IPC_SOC_REGISTER_ACCESS;
+ tmp.sub_cmd = IPC_SOC_SUB_CMD_READ;
- /* Set default value for multicast hash bins */
- plat->multicast_filter_bins = HASH_TABLE_SIZE;
+ for (i = 0; i < max_fia_regs; i++) {
+ tmp.wbuf[0] = R_PCH_FIA_15_PCR_LOS1_REG_BASE + i;
- /* Set default value for unicast filter entries */
- plat->unicast_filter_entries = 1;
+ ret = intel_pmc_ipc(&tmp, &rbuf);
+ if (ret < 0) {
+ netdev_info(priv->dev, "Failed to read from PMC.\n");
+ return ret;
+ }
- /* Set the maxmtu to a default of JUMBO_LEN */
- plat->maxmtu = JUMBO_LEN;
+ for (j = 0; j <= intel_priv->max_tsn_lane_regs; j++)
+ if ((rbuf.buf[0] >>
+ (4 * (intel_priv->tsn_lane_regs[j] % 8)) &
+ B_PCH_FIA_PCR_L0O) == 0xB)
+ return 0;
+ }
- /* Set default number of RX and TX queues to use */
- plat->tx_queues_to_use = 1;
- plat->rx_queues_to_use = 1;
+ return -EINVAL;
+}
- /* Disable Priority config by default */
- plat->tx_queues_cfg[0].use_prio = false;
- plat->rx_queues_cfg[0].use_prio = false;
+static int intel_set_reg_access(const struct pmc_serdes_regs *regs, int max_regs)
+{
+ int ret = 0, i;
- /* Disable RX queues routing by default */
- plat->rx_queues_cfg[0].pkt_route = 0x0;
+ for (i = 0; i < max_regs; i++) {
+ struct pmc_ipc_cmd tmp = {};
+ struct pmc_ipc_rbuf rbuf = {};
+
+ tmp.cmd = IPC_SOC_REGISTER_ACCESS;
+ tmp.sub_cmd = IPC_SOC_SUB_CMD_WRITE;
+ tmp.wbuf[0] = (u32)regs[i].index;
+ tmp.wbuf[1] = regs[i].val;
+
+ ret = intel_pmc_ipc(&tmp, &rbuf);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int intel_mac_finish(struct net_device *ndev,
+ void *intel_data,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct intel_priv_data *intel_priv = intel_data;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ const struct pmc_serdes_regs *regs;
+ int max_regs = 0;
+ int ret = 0;
+
+ ret = intel_tsn_lane_is_available(ndev, intel_priv);
+ if (ret < 0) {
+ netdev_info(priv->dev, "No TSN lane available to set the registers.\n");
+ return ret;
+ }
+
+ if (interface == PHY_INTERFACE_MODE_2500BASEX) {
+ regs = intel_priv->pid_2p5g.regs;
+ max_regs = intel_priv->pid_2p5g.num_regs;
+ } else {
+ regs = intel_priv->pid_1g.regs;
+ max_regs = intel_priv->pid_1g.num_regs;
+ }
+
+ ret = intel_set_reg_access(regs, max_regs);
+ if (ret < 0)
+ return ret;
+
+ priv->plat->phy_interface = interface;
+
+ intel_serdes_powerdown(ndev, intel_priv);
+ intel_serdes_powerup(ndev, intel_priv);
+
+ return ret;
+}
+
+static void common_default_data(struct plat_stmmacenet_data *plat)
+{
+ /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ plat->clk_csr = STMMAC_CSR_20_35M;
+ plat->core_type = DWMAC_CORE_GMAC;
+ plat->force_sf_dma_mode = 1;
+
+ plat->mdio_bus_data->needs_reset = true;
}
static struct phylink_pcs *intel_mgbe_select_pcs(struct stmmac_priv *priv,
@@ -464,9 +591,8 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->pdev = pdev;
plat->phy_addr = -1;
- plat->clk_csr = 5;
- plat->has_gmac = 0;
- plat->has_gmac4 = 1;
+ plat->clk_csr = STMMAC_CSR_250_300M;
+ plat->core_type = DWMAC_CORE_GMAC4;
plat->force_sf_dma_mode = 0;
plat->flags |= (STMMAC_FLAG_TSO_EN | STMMAC_FLAG_SPH_DISABLE);
@@ -483,22 +609,12 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
- for (i = 0; i < plat->rx_queues_to_use; i++) {
+ for (i = 0; i < plat->rx_queues_to_use; i++)
plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
- plat->rx_queues_cfg[i].chan = i;
-
- /* Disable Priority config by default */
- plat->rx_queues_cfg[i].use_prio = false;
-
- /* Disable RX queues routing by default */
- plat->rx_queues_cfg[i].pkt_route = 0x0;
- }
for (i = 0; i < plat->tx_queues_to_use; i++) {
plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
- /* Disable Priority config by default */
- plat->tx_queues_cfg[i].use_prio = false;
/* Default TX Q0 to use TSO and rest TXQ for TBS */
if (i > 0)
plat->tx_queues_cfg[i].tbs_en = 1;
@@ -534,12 +650,10 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->axi->axi_xit_frm = 0;
plat->axi->axi_wr_osr_lmt = 1;
plat->axi->axi_rd_osr_lmt = 1;
- plat->axi->axi_blen[0] = 4;
- plat->axi->axi_blen[1] = 8;
- plat->axi->axi_blen[2] = 16;
+ plat->axi->axi_blen_regval = DMA_AXI_BLEN4 | DMA_AXI_BLEN8 |
+ DMA_AXI_BLEN16;
plat->ptp_max_adj = plat->clk_ptp_rate;
- plat->eee_usecs_rate = plat->clk_ptp_rate;
/* Set system clock */
sprintf(clk_name, "%s-%s", "stmmac", pci_name(pdev));
@@ -561,15 +675,6 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->ptp_clk_freq_config = intel_mgbe_ptp_clk_freq_config;
- /* Set default value for multicast hash bins */
- plat->multicast_filter_bins = HASH_TABLE_SIZE;
-
- /* Set default value for unicast filter entries */
- plat->unicast_filter_entries = 1;
-
- /* Set the maxmtu to a default of JUMBO_LEN */
- plat->maxmtu = JUMBO_LEN;
-
plat->flags |= STMMAC_FLAG_VLAN_FAIL_Q_EN;
/* Use the last Rx queue */
@@ -607,7 +712,9 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->int_snapshot_num = AUX_SNAPSHOT1;
- plat->crosststamp = intel_crosststamp;
+ if (boot_cpu_has(X86_FEATURE_ART))
+ plat->crosststamp = intel_crosststamp;
+
plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
/* Setup MSI vector offset specific to Intel mGbE controller */
@@ -624,6 +731,8 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
static int ehl_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
plat->rx_queues_to_use = 8;
plat->tx_queues_to_use = 8;
plat->flags |= STMMAC_FLAG_USE_PHY_WOL;
@@ -639,20 +748,29 @@ static int ehl_common_data(struct pci_dev *pdev,
plat->safety_feat_cfg->prtyen = 0;
plat->safety_feat_cfg->tmouten = 0;
+ intel_priv->tsn_lane_regs = ehl_tsn_lane_regs;
+ intel_priv->max_tsn_lane_regs = ARRAY_SIZE(ehl_tsn_lane_regs);
+
return intel_mgbe_common_data(pdev, plat);
}
static int ehl_sgmii_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
plat->bus_id = 1;
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
- plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
-
+ plat->mac_finish = intel_mac_finish;
plat->clk_ptp_rate = 204800000;
+ intel_priv->pid_1g.regs = pid_modphy3_1g_regs;
+ intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy3_1g_regs);
+ intel_priv->pid_2p5g.regs = pid_modphy3_2p5g_regs;
+ intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy3_2p5g_regs);
+
return ehl_common_data(pdev, plat);
}
@@ -705,10 +823,18 @@ static struct stmmac_pci_info ehl_pse0_rgmii1g_info = {
static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
- plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
+ plat->mac_finish = intel_mac_finish;
+
+ intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
+ intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
+ intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
+ intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
+
return ehl_pse0_common_data(pdev, plat);
}
@@ -746,10 +872,18 @@ static struct stmmac_pci_info ehl_pse1_rgmii1g_info = {
static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
- plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
+ plat->mac_finish = intel_mac_finish;
+
+ intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
+ intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
+ intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
+ intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
+
return ehl_pse1_common_data(pdev, plat);
}
@@ -763,7 +897,7 @@ static int tgl_common_data(struct pci_dev *pdev,
plat->rx_queues_to_use = 6;
plat->tx_queues_to_use = 4;
plat->clk_ptp_rate = 204800000;
- plat->speed_mode_2500 = intel_speed_mode_2500;
+ plat->get_interfaces = tgl_get_interfaces;
plat->safety_feat_cfg->tsoee = 1;
plat->safety_feat_cfg->mrxpee = 0;
@@ -782,7 +916,6 @@ static int tgl_sgmii_phy0_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 1;
- plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
return tgl_common_data(pdev, plat);
@@ -796,7 +929,6 @@ static int tgl_sgmii_phy1_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 2;
- plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
return tgl_common_data(pdev, plat);
@@ -810,7 +942,6 @@ static int adls_sgmii_phy0_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 1;
- plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
/* SerDes power up and power down are done in BIOS for ADL */
@@ -825,7 +956,6 @@ static int adls_sgmii_phy1_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 2;
- plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
/* SerDes power up and power down are done in BIOS for ADL */
@@ -835,6 +965,55 @@ static int adls_sgmii_phy1_data(struct pci_dev *pdev,
static struct stmmac_pci_info adls_sgmii1g_phy1_info = {
.setup = adls_sgmii_phy1_data,
};
+
+static int adln_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
+ plat->rx_queues_to_use = 6;
+ plat->tx_queues_to_use = 4;
+ plat->clk_ptp_rate = 204800000;
+
+ plat->safety_feat_cfg->tsoee = 1;
+ plat->safety_feat_cfg->mrxpee = 0;
+ plat->safety_feat_cfg->mestee = 1;
+ plat->safety_feat_cfg->mrxee = 1;
+ plat->safety_feat_cfg->mtxee = 1;
+ plat->safety_feat_cfg->epsi = 0;
+ plat->safety_feat_cfg->edpp = 0;
+ plat->safety_feat_cfg->prtyen = 0;
+ plat->safety_feat_cfg->tmouten = 0;
+
+ intel_priv->tsn_lane_regs = adln_tsn_lane_regs;
+ intel_priv->max_tsn_lane_regs = ARRAY_SIZE(adln_tsn_lane_regs);
+
+ return intel_mgbe_common_data(pdev, plat);
+}
+
+static int adln_sgmii_phy0_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
+ plat->bus_id = 1;
+ plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ plat->serdes_powerup = intel_serdes_powerup;
+ plat->serdes_powerdown = intel_serdes_powerdown;
+ plat->mac_finish = intel_mac_finish;
+
+ intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
+ intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
+ intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
+ intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
+
+ return adln_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info adln_sgmii1g_phy0_info = {
+ .setup = adln_sgmii_phy0_data,
+};
+
static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
{
.func = 6,
@@ -1011,6 +1190,37 @@ static int stmmac_config_multi_msi(struct pci_dev *pdev,
return 0;
}
+static int intel_eth_pci_suspend(struct device *dev, void *bsp_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = pci_save_state(pdev);
+ if (ret)
+ return ret;
+
+ pci_wake_from_d3(pdev, true);
+ pci_set_power_state(pdev, PCI_D3hot);
+ return 0;
+}
+
+static int intel_eth_pci_resume(struct device *dev, void *bsp_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ pci_restore_state(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ return 0;
+}
+
/**
* intel_eth_pci_probe
*
@@ -1036,7 +1246,7 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
if (!intel_priv)
return -ENOMEM;
- plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ plat = stmmac_plat_dat_alloc(&pdev->dev);
if (!plat)
return -ENOMEM;
@@ -1072,6 +1282,9 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
pci_set_master(pdev);
plat->bsp_priv = intel_priv;
+ plat->suspend = intel_eth_pci_suspend;
+ plat->resume = intel_eth_pci_resume;
+
intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR;
intel_priv->crossts_adj = 1;
@@ -1094,13 +1307,6 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
memset(&res, 0, sizeof(res));
res.addr = pcim_iomap_table(pdev)[0];
- if (plat->eee_usecs_rate > 0) {
- u32 tx_lpi_usec;
-
- tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1;
- writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER);
- }
-
ret = stmmac_config_multi_msi(pdev, plat, &res);
if (ret) {
ret = stmmac_config_single_msi(pdev, plat, &res);
@@ -1142,44 +1348,6 @@ static void intel_eth_pci_remove(struct pci_dev *pdev)
clk_unregister_fixed_rate(priv->plat->stmmac_clk);
}
-static int __maybe_unused intel_eth_pci_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- ret = stmmac_suspend(dev);
- if (ret)
- return ret;
-
- ret = pci_save_state(pdev);
- if (ret)
- return ret;
-
- pci_wake_from_d3(pdev, true);
- pci_set_power_state(pdev, PCI_D3hot);
- return 0;
-}
-
-static int __maybe_unused intel_eth_pci_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- pci_restore_state(pdev);
- pci_set_power_state(pdev, PCI_D0);
-
- ret = pcim_enable_device(pdev);
- if (ret)
- return ret;
-
- pci_set_master(pdev);
-
- return stmmac_resume(dev);
-}
-
-static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
- intel_eth_pci_resume);
-
#define PCI_DEVICE_ID_INTEL_QUARK 0x0937
#define PCI_DEVICE_ID_INTEL_EHL_RGMII1G 0x4b30
#define PCI_DEVICE_ID_INTEL_EHL_SGMII1G 0x4b31
@@ -1217,8 +1385,8 @@ static const struct pci_device_id intel_eth_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) },
{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) },
{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) },
- { PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &tgl_sgmii1g_phy0_info) },
- { PCI_DEVICE_DATA(INTEL, RPLP_SGMII1G, &tgl_sgmii1g_phy0_info) },
+ { PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &adln_sgmii1g_phy0_info) },
+ { PCI_DEVICE_DATA(INTEL, RPLP_SGMII1G, &adln_sgmii1g_phy0_info) },
{}
};
MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
@@ -1229,7 +1397,7 @@ static struct pci_driver intel_eth_pci_driver = {
.probe = intel_eth_pci_probe,
.remove = intel_eth_pci_remove,
.driver = {
- .pm = &intel_eth_pm_ops,
+ .pm = &stmmac_simple_pm_ops,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
index 0a37987478c1..7511c224b312 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
@@ -21,7 +21,6 @@
#define SERDES_RATE_MASK GENMASK(9, 8)
#define SERDES_PCLK_MASK GENMASK(14, 12) /* PCLK rate to PHY */
#define SERDES_LINK_MODE_MASK GENMASK(2, 1)
-#define SERDES_LINK_MODE_SHIFT 1
#define SERDES_PWR_ST_SHIFT 4
#define SERDES_PWR_ST_P0 0x0
#define SERDES_PWR_ST_P3 0x3
@@ -50,4 +49,33 @@
#define PCH_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0)
#define PCH_PTP_CLK_FREQ_200MHZ (0)
+/* Modphy Register index */
+#define R_PCH_FIA_15_PCR_LOS1_REG_BASE 8
+#define R_PCH_FIA_15_PCR_LOS2_REG_BASE 9
+#define R_PCH_FIA_15_PCR_LOS3_REG_BASE 10
+#define R_PCH_FIA_15_PCR_LOS4_REG_BASE 11
+#define R_PCH_FIA_15_PCR_LOS5_REG_BASE 12
+#define B_PCH_FIA_PCR_L0O GENMASK(3, 0)
+#define PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0 13
+#define PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2 14
+#define PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7 15
+#define PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10 16
+#define PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30 17
+#define PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0 18
+#define PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2 19
+#define PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7 20
+#define PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10 21
+#define PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30 22
+
+#define B_MODPHY_PCR_LCPLL_DWORD0_1G 0x46AAAA41
+#define N_MODPHY_PCR_LCPLL_DWORD2_1G 0x00000139
+#define N_MODPHY_PCR_LCPLL_DWORD7_1G 0x002A0003
+#define N_MODPHY_PCR_LPPLL_DWORD10_1G 0x00170008
+#define N_MODPHY_PCR_CMN_ANA_DWORD30_1G 0x0000D4AC
+#define B_MODPHY_PCR_LCPLL_DWORD0_2P5G 0x58555551
+#define N_MODPHY_PCR_LCPLL_DWORD2_2P5G 0x0000012D
+#define N_MODPHY_PCR_LCPLL_DWORD7_2P5G 0x001F0003
+#define N_MODPHY_PCR_LPPLL_DWORD10_2P5G 0x00170008
+#define N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G 0x8200ACAC
+
#endif /* __DWMAC_INTEL_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 61227dcf56dc..c05f85534f0c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -112,7 +112,7 @@ struct ipq806x_gmac {
phy_interface_t phy_mode;
};
-static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, unsigned int speed)
+static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, int speed)
{
struct device *dev = &gmac->pdev->dev;
int div;
@@ -138,7 +138,7 @@ static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, unsigned int speed)
return div;
}
-static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, unsigned int speed)
+static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, int speed)
{
struct device *dev = &gmac->pdev->dev;
int div;
@@ -164,7 +164,7 @@ static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, unsigned int speed)
return div;
}
-static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed)
+static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, int speed)
{
uint32_t clk_bits, val;
int div;
@@ -211,16 +211,12 @@ static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed)
return 0;
}
-static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
+static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac,
+ struct plat_stmmacenet_data *plat_dat)
{
struct device *dev = &gmac->pdev->dev;
- int ret;
- ret = of_get_phy_mode(dev->of_node, &gmac->phy_mode);
- if (ret) {
- dev_err(dev, "missing phy mode property\n");
- return -EINVAL;
- }
+ gmac->phy_mode = plat_dat->phy_interface;
if (of_property_read_u32(dev->of_node, "qcom,id", &gmac->id) < 0) {
dev_err(dev, "missing qcom id property\n");
@@ -260,11 +256,12 @@ static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
return PTR_ERR_OR_ZERO(gmac->qsgmii_csr);
}
-static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static int ipq806x_gmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- struct ipq806x_gmac *gmac = priv;
+ struct ipq806x_gmac *gmac = bsp_priv;
- ipq806x_gmac_set_speed(gmac, speed);
+ return ipq806x_gmac_set_speed(gmac, speed);
}
static int
@@ -397,7 +394,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
gmac->pdev = pdev;
- err = ipq806x_gmac_of_parse(gmac);
+ err = ipq806x_gmac_of_parse(gmac, plat_dat);
if (err) {
dev_err(dev, "device tree parsing error\n");
return err;
@@ -476,9 +473,9 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
return err;
}
- plat_dat->has_gmac = true;
+ plat_dat->core_type = DWMAC_CORE_GMAC;
plat_dat->bsp_priv = gmac;
- plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
+ plat_dat->set_clk_tx_rate = ipq806x_gmac_set_clk_tx_rate;
plat_dat->multicast_filter_bins = 0;
plat_dat->tx_fifo_size = 8192;
plat_dat->rx_fifo_size = 8192;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index bfe6e2d631bd..107a7c84ace8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -8,9 +8,12 @@
#include <linux/device.h>
#include <linux/of_irq.h>
#include "stmmac.h"
+#include "stmmac_libpci.h"
#include "dwmac_dma.h"
#include "dwmac1000.h"
+#define DRIVER_NAME "dwmac-loongson-pci"
+
/* Normal Loongson Tx Summary */
#define DMA_INTR_ENA_NIE_TX_LOONGSON 0x00040000
/* Normal Loongson Rx Summary */
@@ -64,12 +67,14 @@
DMA_STATUS_TPS | DMA_STATUS_TI | \
DMA_STATUS_MSK_COMMON_LOONGSON)
-#define PCI_DEVICE_ID_LOONGSON_GMAC 0x7a03
+#define PCI_DEVICE_ID_LOONGSON_GMAC1 0x7a03
+#define PCI_DEVICE_ID_LOONGSON_GMAC2 0x7a23
#define PCI_DEVICE_ID_LOONGSON_GNET 0x7a13
-#define DWMAC_CORE_LS_MULTICHAN 0x10 /* Loongson custom ID */
-#define CHANNEL_NUM 8
+#define DWMAC_CORE_MULTICHAN_V1 0x10 /* Loongson custom ID 0x10 */
+#define DWMAC_CORE_MULTICHAN_V2 0x12 /* Loongson custom ID 0x12 */
struct loongson_data {
+ u32 multichan;
u32 loongson_id;
struct device *dev;
};
@@ -81,64 +86,53 @@ struct stmmac_pci_info {
static void loongson_default_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct loongson_data *ld = plat->bsp_priv;
+
/* Get bus_id, this can be overwritten later */
plat->bus_id = pci_dev_id(pdev);
- plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
- plat->has_gmac = 1;
+ /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ plat->clk_csr = STMMAC_CSR_20_35M;
+ plat->core_type = DWMAC_CORE_GMAC;
plat->force_sf_dma_mode = 1;
- /* Set default value for multicast hash bins */
+ /* Increase the default value for multicast hash bins */
plat->multicast_filter_bins = 256;
- plat->mac_interface = PHY_INTERFACE_MODE_NA;
-
- /* Set default value for unicast filter entries */
- plat->unicast_filter_entries = 1;
-
- /* Set the maxmtu to a default of JUMBO_LEN */
- plat->maxmtu = JUMBO_LEN;
-
- /* Disable Priority config by default */
- plat->tx_queues_cfg[0].use_prio = false;
- plat->rx_queues_cfg[0].use_prio = false;
-
- /* Disable RX queues routing by default */
- plat->rx_queues_cfg[0].pkt_route = 0x0;
-
plat->clk_ref_rate = 125000000;
plat->clk_ptp_rate = 125000000;
- /* Default to phy auto-detection */
- plat->phy_addr = -1;
-
plat->dma_cfg->pbl = 32;
plat->dma_cfg->pblx8 = true;
-}
-
-static int loongson_gmac_data(struct pci_dev *pdev,
- struct plat_stmmacenet_data *plat)
-{
- struct loongson_data *ld;
- int i;
-
- ld = plat->bsp_priv;
-
- loongson_default_data(pdev, plat);
- if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) {
- plat->rx_queues_to_use = CHANNEL_NUM;
- plat->tx_queues_to_use = CHANNEL_NUM;
+ switch (ld->loongson_id) {
+ case DWMAC_CORE_MULTICHAN_V1:
+ ld->multichan = 1;
+ plat->rx_queues_to_use = 8;
+ plat->tx_queues_to_use = 8;
/* Only channel 0 supports checksum,
* so turn off checksum to enable multiple channels.
*/
- for (i = 1; i < CHANNEL_NUM; i++)
+ for (int i = 1; i < 8; i++)
plat->tx_queues_cfg[i].coe_unsupported = 1;
- } else {
- plat->tx_queues_to_use = 1;
- plat->rx_queues_to_use = 1;
+
+ break;
+ case DWMAC_CORE_MULTICHAN_V2:
+ ld->multichan = 1;
+ plat->rx_queues_to_use = 4;
+ plat->tx_queues_to_use = 4;
+ break;
+ default:
+ ld->multichan = 0;
+ break;
}
+}
+
+static int loongson_gmac_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ loongson_default_data(pdev, plat);
plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
@@ -149,8 +143,7 @@ static struct stmmac_pci_info loongson_gmac_pci_info = {
.setup = loongson_gmac_data,
};
-static void loongson_gnet_fix_speed(void *priv, unsigned int speed,
- unsigned int mode)
+static void loongson_gnet_fix_speed(void *priv, int speed, unsigned int mode)
{
struct loongson_data *ld = (struct loongson_data *)priv;
struct net_device *ndev = dev_get_drvdata(ld->dev);
@@ -171,27 +164,8 @@ static void loongson_gnet_fix_speed(void *priv, unsigned int speed,
static int loongson_gnet_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
- struct loongson_data *ld;
- int i;
-
- ld = plat->bsp_priv;
-
loongson_default_data(pdev, plat);
- if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) {
- plat->rx_queues_to_use = CHANNEL_NUM;
- plat->tx_queues_to_use = CHANNEL_NUM;
-
- /* Only channel 0 supports checksum,
- * so turn off checksum to enable multiple channels.
- */
- for (i = 1; i < CHANNEL_NUM; i++)
- plat->tx_queues_cfg[i].coe_unsupported = 1;
- } else {
- plat->tx_queues_to_use = 1;
- plat->rx_queues_to_use = 1;
- }
-
plat->phy_interface = PHY_INTERFACE_MODE_GMII;
plat->mdio_bus_data->phy_mask = ~(u32)BIT(2);
plat->fix_mac_speed = loongson_gnet_fix_speed;
@@ -329,10 +303,9 @@ static int loongson_dwmac_dma_interrupt(struct stmmac_priv *priv,
return ret;
}
-static struct mac_device_info *loongson_dwmac_setup(void *apriv)
+static int loongson_dwmac_setup(void *apriv, struct mac_device_info *mac)
{
struct stmmac_priv *priv = apriv;
- struct mac_device_info *mac;
struct stmmac_dma_ops *dma;
struct loongson_data *ld;
struct pci_dev *pdev;
@@ -340,23 +313,19 @@ static struct mac_device_info *loongson_dwmac_setup(void *apriv)
ld = priv->plat->bsp_priv;
pdev = to_pci_dev(priv->device);
- mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
- if (!mac)
- return NULL;
-
dma = devm_kzalloc(priv->device, sizeof(*dma), GFP_KERNEL);
if (!dma)
- return NULL;
+ return -ENOMEM;
/* The Loongson GMAC and GNET devices are based on the DW GMAC
- * v3.50a and v3.73a IP-cores. But the HW designers have changed the
- * GMAC_VERSION.SNPSVER field to the custom 0x10 value on the
- * network controllers with the multi-channels feature
+ * v3.50a and v3.73a IP-cores. But the HW designers have changed
+ * the GMAC_VERSION.SNPSVER field to the custom 0x10/0x12 value
+ * on the network controllers with the multi-channels feature
* available to emphasize the differences: multiple DMA-channels,
* AV feature and GMAC_INT_STATUS CSR flags layout. Get back the
* original value so the correct HW-interface would be selected.
*/
- if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) {
+ if (ld->multichan) {
priv->synopsys_id = DWMAC_CORE_3_70;
*dma = dwmac1000_dma_ops;
dma->init_chan = loongson_dwmac_dma_init_channel;
@@ -377,13 +346,13 @@ static struct mac_device_info *loongson_dwmac_setup(void *apriv)
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
- /* Loongson GMAC doesn't support the flow control. LS2K2000
- * GNET doesn't support the half-duplex link mode.
+ /* Loongson GMAC doesn't support the flow control. Loongson GNET
+ * without multi-channel doesn't support the half-duplex link mode.
*/
- if (pdev->device == PCI_DEVICE_ID_LOONGSON_GMAC) {
+ if (pdev->device != PCI_DEVICE_ID_LOONGSON_GNET) {
mac->link.caps = MAC_10 | MAC_100 | MAC_1000;
} else {
- if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
+ if (ld->multichan)
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000;
else
@@ -405,16 +374,18 @@ static struct mac_device_info *loongson_dwmac_setup(void *apriv)
mac->mii.clk_csr_shift = 2;
mac->mii.clk_csr_mask = GENMASK(5, 2);
- return mac;
+ return 0;
}
static int loongson_dwmac_msi_config(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat,
struct stmmac_resources *res)
{
- int i, ret, vecs;
+ int i, ch_num, ret, vecs;
- vecs = roundup_pow_of_two(CHANNEL_NUM * 2 + 1);
+ ch_num = min(plat->tx_queues_to_use, plat->rx_queues_to_use);
+
+ vecs = roundup_pow_of_two(ch_num * 2 + 1);
ret = pci_alloc_irq_vectors(pdev, vecs, vecs, PCI_IRQ_MSI);
if (ret < 0) {
dev_warn(&pdev->dev, "Failed to allocate MSI IRQs\n");
@@ -423,14 +394,12 @@ static int loongson_dwmac_msi_config(struct pci_dev *pdev,
res->irq = pci_irq_vector(pdev, 0);
- for (i = 0; i < plat->rx_queues_to_use; i++) {
- res->rx_irq[CHANNEL_NUM - 1 - i] =
- pci_irq_vector(pdev, 1 + i * 2);
+ for (i = 0; i < ch_num; i++) {
+ res->rx_irq[ch_num - 1 - i] = pci_irq_vector(pdev, 1 + i * 2);
}
- for (i = 0; i < plat->tx_queues_to_use; i++) {
- res->tx_irq[CHANNEL_NUM - 1 - i] =
- pci_irq_vector(pdev, 2 + i * 2);
+ for (i = 0; i < ch_num; i++) {
+ res->tx_irq[ch_num - 1 - i] = pci_irq_vector(pdev, 2 + i * 2);
}
plat->flags |= STMMAC_FLAG_MULTI_MSI_EN;
@@ -516,15 +485,33 @@ static int loongson_dwmac_acpi_config(struct pci_dev *pdev,
return 0;
}
+/* Loongson's DWMAC device may take nearly two seconds to complete DMA reset */
+static int loongson_dwmac_fix_reset(struct stmmac_priv *priv, void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
+
+ if (value & DMA_BUS_MODE_SFT_RESET) {
+ netdev_err(priv->dev, "the PHY clock is missing\n");
+ return -EINVAL;
+ }
+
+ value |= DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + DMA_BUS_MODE);
+
+ return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
+ !(value & DMA_BUS_MODE_SFT_RESET),
+ 10000, 2000000);
+}
+
static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct plat_stmmacenet_data *plat;
+ struct stmmac_resources res = {};
struct stmmac_pci_info *info;
- struct stmmac_resources res;
struct loongson_data *ld;
- int ret, i;
+ int ret;
- plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ plat = stmmac_plat_dat_alloc(&pdev->dev);
if (!plat)
return -ENOMEM;
@@ -552,20 +539,16 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
pci_set_master(pdev);
/* Get the base address of device */
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (pci_resource_len(pdev, i) == 0)
- continue;
- ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
- if (ret)
- goto err_disable_device;
- break;
- }
-
- memset(&res, 0, sizeof(res));
- res.addr = pcim_iomap_table(pdev)[0];
+ res.addr = pcim_iomap_region(pdev, 0, DRIVER_NAME);
+ ret = PTR_ERR_OR_ZERO(res.addr);
+ if (ret)
+ goto err_disable_device;
plat->bsp_priv = ld;
- plat->setup = loongson_dwmac_setup;
+ plat->mac_setup = loongson_dwmac_setup;
+ plat->fix_soc_reset = loongson_dwmac_fix_reset;
+ plat->suspend = stmmac_pci_plat_suspend;
+ plat->resume = stmmac_pci_plat_resume;
ld->dev = &pdev->dev;
ld->loongson_id = readl(res.addr + GMAC_VERSION) & 0xff;
@@ -574,6 +557,9 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
if (ret)
goto err_disable_device;
+ plat->tx_fifo_size = SZ_16K * plat->tx_queues_to_use;
+ plat->rx_fifo_size = SZ_16K * plat->rx_queues_to_use;
+
if (dev_of_node(&pdev->dev))
ret = loongson_dwmac_dt_config(pdev, plat, &res);
else
@@ -582,7 +568,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
goto err_disable_device;
/* Use the common MAC IRQ if per-channel MSIs allocation failed */
- if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
+ if (ld->multichan)
loongson_dwmac_msi_config(pdev, plat, &res);
ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
@@ -594,7 +580,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
err_plat_clear:
if (dev_of_node(&pdev->dev))
loongson_dwmac_dt_clear(pdev, plat);
- if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
+ if (ld->multichan)
loongson_dwmac_msi_clear(pdev);
err_disable_device:
pci_disable_device(pdev);
@@ -606,7 +592,6 @@ static void loongson_dwmac_remove(struct pci_dev *pdev)
struct net_device *ndev = dev_get_drvdata(&pdev->dev);
struct stmmac_priv *priv = netdev_priv(ndev);
struct loongson_data *ld;
- int i;
ld = priv->plat->bsp_priv;
stmmac_dvr_remove(&pdev->dev);
@@ -614,71 +599,27 @@ static void loongson_dwmac_remove(struct pci_dev *pdev)
if (dev_of_node(&pdev->dev))
loongson_dwmac_dt_clear(pdev, priv->plat);
- if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
+ if (ld->multichan)
loongson_dwmac_msi_clear(pdev);
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (pci_resource_len(pdev, i) == 0)
- continue;
- pcim_iounmap_regions(pdev, BIT(i));
- break;
- }
-
pci_disable_device(pdev);
}
-static int __maybe_unused loongson_dwmac_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- ret = stmmac_suspend(dev);
- if (ret)
- return ret;
-
- ret = pci_save_state(pdev);
- if (ret)
- return ret;
-
- pci_disable_device(pdev);
- pci_wake_from_d3(pdev, true);
- return 0;
-}
-
-static int __maybe_unused loongson_dwmac_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- pci_restore_state(pdev);
- pci_set_power_state(pdev, PCI_D0);
-
- ret = pci_enable_device(pdev);
- if (ret)
- return ret;
-
- pci_set_master(pdev);
-
- return stmmac_resume(dev);
-}
-
-static SIMPLE_DEV_PM_OPS(loongson_dwmac_pm_ops, loongson_dwmac_suspend,
- loongson_dwmac_resume);
-
static const struct pci_device_id loongson_dwmac_id_table[] = {
- { PCI_DEVICE_DATA(LOONGSON, GMAC, &loongson_gmac_pci_info) },
+ { PCI_DEVICE_DATA(LOONGSON, GMAC1, &loongson_gmac_pci_info) },
+ { PCI_DEVICE_DATA(LOONGSON, GMAC2, &loongson_gmac_pci_info) },
{ PCI_DEVICE_DATA(LOONGSON, GNET, &loongson_gnet_pci_info) },
{}
};
MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table);
static struct pci_driver loongson_dwmac_driver = {
- .name = "dwmac-loongson-pci",
+ .name = DRIVER_NAME,
.id_table = loongson_dwmac_id_table,
.probe = loongson_dwmac_probe,
.remove = loongson_dwmac_remove,
.driver = {
- .pm = &loongson_dwmac_pm_ops,
+ .pm = &stmmac_simple_pm_ops,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c
index 3e86810717d3..de9aba756aac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c
@@ -38,30 +38,54 @@
#define GMAC_SHUT BIT(6)
#define PHY_INTF_SELI GENMASK(30, 28)
-#define PHY_INTF_MII FIELD_PREP(PHY_INTF_SELI, 0)
-#define PHY_INTF_RMII FIELD_PREP(PHY_INTF_SELI, 4)
struct ls1x_dwmac {
struct plat_stmmacenet_data *plat_dat;
struct regmap *regmap;
+ unsigned int id;
};
-static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv)
+struct ls1x_data {
+ int (*setup)(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat);
+ int (*init)(struct device *dev, void *bsp_priv);
+};
+
+static int ls1b_dwmac_setup(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat)
{
- struct ls1x_dwmac *dwmac = priv;
- struct plat_stmmacenet_data *plat = dwmac->plat_dat;
- struct regmap *regmap = dwmac->regmap;
+ struct ls1x_dwmac *dwmac = plat_dat->bsp_priv;
struct resource *res;
- unsigned long reg_base;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
+ /* This shouldn't fail - stmmac_get_platform_resources()
+ * already mapped this resource.
+ */
dev_err(&pdev->dev, "Could not get IO_MEM resources\n");
return -EINVAL;
}
- reg_base = (unsigned long)res->start;
- if (reg_base == LS1B_GMAC0_BASE) {
+ if (res->start == LS1B_GMAC0_BASE) {
+ dwmac->id = 0;
+ } else if (res->start == LS1B_GMAC1_BASE) {
+ dwmac->id = 1;
+ } else {
+ dev_err(&pdev->dev, "Invalid Ethernet MAC base address %pR",
+ res);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ls1b_dwmac_syscon_init(struct device *dev, void *priv)
+{
+ struct ls1x_dwmac *dwmac = priv;
+ struct plat_stmmacenet_data *plat = dwmac->plat_dat;
+ struct regmap *regmap = dwmac->regmap;
+
+ if (dwmac->id == 0) {
switch (plat->phy_interface) {
case PHY_INTERFACE_MODE_RGMII_ID:
regmap_update_bits(regmap, LS1X_SYSCON0,
@@ -74,13 +98,13 @@ static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv)
GMAC0_USE_TXCLK | GMAC0_USE_PWM01);
break;
default:
- dev_err(&pdev->dev, "Unsupported PHY mode %u\n",
+ dev_err(dev, "Unsupported PHY mode %u\n",
plat->phy_interface);
return -EOPNOTSUPP;
}
regmap_update_bits(regmap, LS1X_SYSCON0, GMAC0_SHUT, 0);
- } else if (reg_base == LS1B_GMAC1_BASE) {
+ } else if (dwmac->id == 1) {
regmap_update_bits(regmap, LS1X_SYSCON0,
GMAC1_USE_UART1 | GMAC1_USE_UART0,
GMAC1_USE_UART1 | GMAC1_USE_UART0);
@@ -98,42 +122,34 @@ static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv)
GMAC1_USE_TXCLK | GMAC1_USE_PWM23);
break;
default:
- dev_err(&pdev->dev, "Unsupported PHY mode %u\n",
+ dev_err(dev, "Unsupported PHY mode %u\n",
plat->phy_interface);
return -EOPNOTSUPP;
}
regmap_update_bits(regmap, LS1X_SYSCON1, GMAC1_SHUT, 0);
- } else {
- dev_err(&pdev->dev, "Invalid Ethernet MAC base address %lx",
- reg_base);
- return -EINVAL;
}
return 0;
}
-static int ls1c_dwmac_syscon_init(struct platform_device *pdev, void *priv)
+static int ls1c_dwmac_syscon_init(struct device *dev, void *priv)
{
struct ls1x_dwmac *dwmac = priv;
struct plat_stmmacenet_data *plat = dwmac->plat_dat;
struct regmap *regmap = dwmac->regmap;
+ int phy_intf_sel;
- switch (plat->phy_interface) {
- case PHY_INTERFACE_MODE_MII:
- regmap_update_bits(regmap, LS1X_SYSCON1, PHY_INTF_SELI,
- PHY_INTF_MII);
- break;
- case PHY_INTERFACE_MODE_RMII:
- regmap_update_bits(regmap, LS1X_SYSCON1, PHY_INTF_SELI,
- PHY_INTF_RMII);
- break;
- default:
- dev_err(&pdev->dev, "Unsupported PHY-mode %u\n",
+ phy_intf_sel = stmmac_get_phy_intf_sel(plat->phy_interface);
+ if (phy_intf_sel != PHY_INTF_SEL_GMII_MII &&
+ phy_intf_sel != PHY_INTF_SEL_RMII) {
+ dev_err(dev, "Unsupported PHY-mode %u\n",
plat->phy_interface);
return -EOPNOTSUPP;
}
+ regmap_update_bits(regmap, LS1X_SYSCON1, PHY_INTF_SELI,
+ FIELD_PREP(PHY_INTF_SELI, phy_intf_sel));
regmap_update_bits(regmap, LS1X_SYSCON0, GMAC0_SHUT, 0);
return 0;
@@ -143,9 +159,9 @@ static int ls1x_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
+ const struct ls1x_data *data;
struct regmap *regmap;
struct ls1x_dwmac *dwmac;
- int (*init)(struct platform_device *pdev, void *priv);
int ret;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
@@ -159,8 +175,8 @@ static int ls1x_dwmac_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
"Unable to find syscon\n");
- init = of_device_get_match_data(&pdev->dev);
- if (!init) {
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
dev_err(&pdev->dev, "No of match data provided\n");
return -EINVAL;
}
@@ -175,21 +191,36 @@ static int ls1x_dwmac_probe(struct platform_device *pdev)
"dt configuration failed\n");
plat_dat->bsp_priv = dwmac;
- plat_dat->init = init;
+ plat_dat->init = data->init;
dwmac->plat_dat = plat_dat;
dwmac->regmap = regmap;
+ if (data->setup) {
+ ret = data->setup(pdev, plat_dat);
+ if (ret)
+ return ret;
+ }
+
return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
+static const struct ls1x_data ls1b_dwmac_data = {
+ .setup = ls1b_dwmac_setup,
+ .init = ls1b_dwmac_syscon_init,
+};
+
+static const struct ls1x_data ls1c_dwmac_data = {
+ .init = ls1c_dwmac_syscon_init,
+};
+
static const struct of_device_id ls1x_dwmac_match[] = {
{
.compatible = "loongson,ls1b-gmac",
- .data = &ls1b_dwmac_syscon_init,
+ .data = &ls1b_dwmac_data,
},
{
.compatible = "loongson,ls1c-emac",
- .data = &ls1c_dwmac_syscon_init,
+ .data = &ls1c_dwmac_data,
},
{ }
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
index 22653ffd2a04..c68d7de1f8ac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
@@ -21,16 +21,29 @@
/* Register defines for CREG syscon */
#define LPC18XX_CREG_CREG6 0x12c
-# define LPC18XX_CREG_CREG6_ETHMODE_MASK 0x7
-# define LPC18XX_CREG_CREG6_ETHMODE_MII 0x0
-# define LPC18XX_CREG_CREG6_ETHMODE_RMII 0x4
+# define LPC18XX_CREG_CREG6_ETHMODE_MASK GENMASK(2, 0)
+
+static int lpc18xx_set_phy_intf_sel(void *bsp_priv, u8 phy_intf_sel)
+{
+ struct regmap *reg = bsp_priv;
+
+ if (phy_intf_sel != PHY_INTF_SEL_GMII_MII &&
+ phy_intf_sel != PHY_INTF_SEL_RMII)
+ return -EINVAL;
+
+ regmap_update_bits(reg, LPC18XX_CREG_CREG6,
+ LPC18XX_CREG_CREG6_ETHMODE_MASK,
+ FIELD_PREP(LPC18XX_CREG_CREG6_ETHMODE_MASK,
+ phy_intf_sel));
+
+ return 0;
+}
static int lpc18xx_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
- struct regmap *reg;
- u8 ethmode;
+ struct regmap *regmap;
int ret;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
@@ -41,25 +54,16 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
- plat_dat->has_gmac = true;
+ plat_dat->core_type = DWMAC_CORE_GMAC;
- reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
- if (IS_ERR(reg)) {
+ regmap = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
+ if (IS_ERR(regmap)) {
dev_err(&pdev->dev, "syscon lookup failed\n");
- return PTR_ERR(reg);
- }
-
- if (plat_dat->mac_interface == PHY_INTERFACE_MODE_MII) {
- ethmode = LPC18XX_CREG_CREG6_ETHMODE_MII;
- } else if (plat_dat->mac_interface == PHY_INTERFACE_MODE_RMII) {
- ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII;
- } else {
- dev_err(&pdev->dev, "Only MII and RMII mode supported\n");
- return -EINVAL;
+ return PTR_ERR(regmap);
}
- regmap_update_bits(reg, LPC18XX_CREG_CREG6,
- LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode);
+ plat_dat->bsp_priv = regmap;
+ plat_dat->set_phy_intf_sel = lpc18xx_set_phy_intf_sel;
return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index f8ca81675407..1f2d7d19ca56 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -17,9 +17,6 @@
/* Peri Configuration register for mt2712 */
#define PERI_ETH_PHY_INTF_SEL 0x418
-#define PHY_INTF_MII 0
-#define PHY_INTF_RGMII 1
-#define PHY_INTF_RMII 4
#define RMII_CLK_SRC_RXC BIT(4)
#define RMII_CLK_SRC_INTERNAL BIT(5)
@@ -88,7 +85,8 @@ struct mediatek_dwmac_plat_data {
};
struct mediatek_dwmac_variant {
- int (*dwmac_set_phy_interface)(struct mediatek_dwmac_plat_data *plat);
+ int (*dwmac_set_phy_interface)(struct mediatek_dwmac_plat_data *plat,
+ u8 phy_intf_sel);
int (*dwmac_set_delay)(struct mediatek_dwmac_plat_data *plat);
/* clock ids to be requested */
@@ -109,29 +107,16 @@ static const char * const mt8195_dwmac_clk_l[] = {
"axi", "apb", "mac_cg", "mac_main", "ptp_ref"
};
-static int mt2712_set_interface(struct mediatek_dwmac_plat_data *plat)
+static int mt2712_set_interface(struct mediatek_dwmac_plat_data *plat,
+ u8 phy_intf_sel)
{
- int rmii_clk_from_mac = plat->rmii_clk_from_mac ? RMII_CLK_SRC_INTERNAL : 0;
- int rmii_rxc = plat->rmii_rxc ? RMII_CLK_SRC_RXC : 0;
- u32 intf_val = 0;
+ u32 intf_val = phy_intf_sel;
- /* select phy interface in top control domain */
- switch (plat->phy_mode) {
- case PHY_INTERFACE_MODE_MII:
- intf_val |= PHY_INTF_MII;
- break;
- case PHY_INTERFACE_MODE_RMII:
- intf_val |= (PHY_INTF_RMII | rmii_rxc | rmii_clk_from_mac);
- break;
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_ID:
- intf_val |= PHY_INTF_RGMII;
- break;
- default:
- dev_err(plat->dev, "phy interface not supported\n");
- return -EINVAL;
+ if (phy_intf_sel == PHY_INTF_SEL_RMII) {
+ if (plat->rmii_clk_from_mac)
+ intf_val |= RMII_CLK_SRC_INTERNAL;
+ if (plat->rmii_rxc)
+ intf_val |= RMII_CLK_SRC_RXC;
}
regmap_write(plat->peri_regmap, PERI_ETH_PHY_INTF_SEL, intf_val);
@@ -288,30 +273,16 @@ static const struct mediatek_dwmac_variant mt2712_gmac_variant = {
.tx_delay_max = 17600,
};
-static int mt8195_set_interface(struct mediatek_dwmac_plat_data *plat)
+static int mt8195_set_interface(struct mediatek_dwmac_plat_data *plat,
+ u8 phy_intf_sel)
{
- int rmii_clk_from_mac = plat->rmii_clk_from_mac ? MT8195_RMII_CLK_SRC_INTERNAL : 0;
- int rmii_rxc = plat->rmii_rxc ? MT8195_RMII_CLK_SRC_RXC : 0;
- u32 intf_val = 0;
+ u32 intf_val = FIELD_PREP(MT8195_ETH_INTF_SEL, phy_intf_sel);
- /* select phy interface in top control domain */
- switch (plat->phy_mode) {
- case PHY_INTERFACE_MODE_MII:
- intf_val |= FIELD_PREP(MT8195_ETH_INTF_SEL, PHY_INTF_MII);
- break;
- case PHY_INTERFACE_MODE_RMII:
- intf_val |= (rmii_rxc | rmii_clk_from_mac);
- intf_val |= FIELD_PREP(MT8195_ETH_INTF_SEL, PHY_INTF_RMII);
- break;
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_ID:
- intf_val |= FIELD_PREP(MT8195_ETH_INTF_SEL, PHY_INTF_RGMII);
- break;
- default:
- dev_err(plat->dev, "phy interface not supported\n");
- return -EINVAL;
+ if (phy_intf_sel == PHY_INTF_SEL_RMII) {
+ if (plat->rmii_clk_from_mac)
+ intf_val |= MT8195_RMII_CLK_SRC_INTERNAL;
+ if (plat->rmii_rxc)
+ intf_val |= MT8195_RMII_CLK_SRC_RXC;
}
/* MT8195 only support external PHY */
@@ -456,7 +427,6 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
{
struct mac_delay_struct *mac_delay = &plat->mac_delay;
u32 tx_delay_ps, rx_delay_ps;
- int err;
plat->peri_regmap = syscon_regmap_lookup_by_phandle(plat->np, "mediatek,pericfg");
if (IS_ERR(plat->peri_regmap)) {
@@ -464,12 +434,6 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
return PTR_ERR(plat->peri_regmap);
}
- err = of_get_phy_mode(plat->np, &plat->phy_mode);
- if (err) {
- dev_err(plat->dev, "not find phy-mode\n");
- return err;
- }
-
if (!of_property_read_u32(plat->np, "mediatek,tx-delay-ps", &tx_delay_ps)) {
if (tx_delay_ps < plat->variant->tx_delay_max) {
mac_delay->tx_delay = tx_delay_ps;
@@ -530,16 +494,24 @@ static int mediatek_dwmac_clk_init(struct mediatek_dwmac_plat_data *plat)
return ret;
}
-static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
+static int mediatek_dwmac_init(struct device *dev, void *priv)
{
struct mediatek_dwmac_plat_data *plat = priv;
const struct mediatek_dwmac_variant *variant = plat->variant;
- int ret;
+ int phy_intf_sel, ret;
if (variant->dwmac_set_phy_interface) {
- ret = variant->dwmac_set_phy_interface(plat);
+ phy_intf_sel = stmmac_get_phy_intf_sel(plat->phy_mode);
+ if (phy_intf_sel != PHY_INTF_SEL_GMII_MII &&
+ phy_intf_sel != PHY_INTF_SEL_RGMII &&
+ phy_intf_sel != PHY_INTF_SEL_RMII) {
+ dev_err(plat->dev, "phy interface not supported\n");
+ return phy_intf_sel < 0 ? phy_intf_sel : -EINVAL;
+ }
+
+ ret = variant->dwmac_set_phy_interface(plat, phy_intf_sel);
if (ret) {
- dev_err(plat->dev, "failed to set phy interface, err = %d\n", ret);
+ dev_err(dev, "failed to set phy interface, err = %d\n", ret);
return ret;
}
}
@@ -547,7 +519,7 @@ static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
if (variant->dwmac_set_delay) {
ret = variant->dwmac_set_delay(plat);
if (ret) {
- dev_err(plat->dev, "failed to set delay value, err = %d\n", ret);
+ dev_err(dev, "failed to set delay value, err = %d\n", ret);
return ret;
}
}
@@ -587,16 +559,16 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev,
{
int i;
- plat->mac_interface = priv_plat->phy_mode;
+ priv_plat->phy_mode = plat->phy_interface;
if (priv_plat->mac_wol)
- plat->flags |= STMMAC_FLAG_USE_PHY_WOL;
- else
plat->flags &= ~STMMAC_FLAG_USE_PHY_WOL;
+ else
+ plat->flags |= STMMAC_FLAG_USE_PHY_WOL;
plat->riwt_off = 1;
plat->maxmtu = ETH_DATA_LEN;
plat->host_dma_width = priv_plat->variant->dma_bit_mask;
plat->bsp_priv = priv_plat;
- plat->init = mediatek_dwmac_init;
+ plat->resume = mediatek_dwmac_init;
plat->clks_config = mediatek_dwmac_clks_config;
plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
@@ -661,7 +633,7 @@ static int mediatek_dwmac_probe(struct platform_device *pdev)
return PTR_ERR(plat_dat);
mediatek_dwmac_common_data(pdev, plat_dat, priv_plat);
- mediatek_dwmac_init(pdev, priv_plat);
+ mediatek_dwmac_init(&pdev->dev, priv_plat);
ret = mediatek_dwmac_clks_config(priv_plat, true);
if (ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index 5469fa1b429e..07c504d07604 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -22,9 +22,10 @@ struct meson_dwmac {
void __iomem *reg;
};
-static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static int meson6_dwmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- struct meson_dwmac *dwmac = priv;
+ struct meson_dwmac *dwmac = bsp_priv;
unsigned int val;
val = readl(dwmac->reg);
@@ -39,6 +40,8 @@ static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned
}
writel(val, dwmac->reg);
+
+ return 0;
}
static int meson6_dwmac_probe(struct platform_device *pdev)
@@ -65,7 +68,7 @@ static int meson6_dwmac_probe(struct platform_device *pdev)
return PTR_ERR(dwmac->reg);
plat_dat->bsp_priv = dwmac;
- plat_dat->fix_mac_speed = meson6_dwmac_fix_mac_speed;
+ plat_dat->set_clk_tx_rate = meson6_dwmac_set_clk_tx_rate;
return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 9c2d62d133ad..e4d5c41294f4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -26,8 +26,6 @@
#define PRG_ETH0_RGMII_MODE BIT(0)
#define PRG_ETH0_EXT_PHY_MODE_MASK GENMASK(2, 0)
-#define PRG_ETH0_EXT_RGMII_MODE 1
-#define PRG_ETH0_EXT_RMII_MODE 4
/* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */
#define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4)
@@ -238,28 +236,20 @@ static int meson8b_set_phy_mode(struct meson8b_dwmac *dwmac)
static int meson_axg_set_phy_mode(struct meson8b_dwmac *dwmac)
{
- switch (dwmac->phy_mode) {
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- /* enable RGMII mode */
- meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
- PRG_ETH0_EXT_PHY_MODE_MASK,
- PRG_ETH0_EXT_RGMII_MODE);
- break;
- case PHY_INTERFACE_MODE_RMII:
- /* disable RGMII mode -> enables RMII mode */
- meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
- PRG_ETH0_EXT_PHY_MODE_MASK,
- PRG_ETH0_EXT_RMII_MODE);
- break;
- default:
+ int phy_intf_sel;
+
+ phy_intf_sel = stmmac_get_phy_intf_sel(dwmac->phy_mode);
+ if (phy_intf_sel != PHY_INTF_SEL_RGMII &&
+ phy_intf_sel != PHY_INTF_SEL_RMII) {
dev_err(dwmac->dev, "fail to set phy-mode %s\n",
phy_modes(dwmac->phy_mode));
- return -EINVAL;
+ return phy_intf_sel < 0 ? phy_intf_sel : -EINVAL;
}
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_EXT_PHY_MODE_MASK,
+ FIELD_PREP(PRG_ETH0_EXT_PHY_MODE_MASK,
+ phy_intf_sel));
+
return 0;
}
@@ -417,11 +407,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
return PTR_ERR(dwmac->regs);
dwmac->dev = &pdev->dev;
- ret = of_get_phy_mode(pdev->dev.of_node, &dwmac->phy_mode);
- if (ret) {
- dev_err(&pdev->dev, "missing phy-mode property\n");
- return ret;
- }
+ dwmac->phy_mode = plat_dat->phy_interface;
/* use 2ns as fallback since this value was previously hardcoded */
if (of_property_read_u32(pdev->dev.of_node, "amlogic,tx-delay-ns",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 901a3c1959fa..0826a7bd32ff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -76,10 +76,6 @@
#define RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL BIT(6)
#define RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN BIT(5)
-/* MAC_CTRL_REG bits */
-#define ETHQOS_MAC_CTRL_SPEED_MODE BIT(14)
-#define ETHQOS_MAC_CTRL_PORT_SEL BIT(15)
-
/* EMAC_WRAPPER_SGMII_PHY_CNTRL1 bits */
#define SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN BIT(3)
@@ -96,7 +92,6 @@ struct ethqos_emac_driver_data {
bool rgmii_config_loopback_en;
bool has_emac_ge_3;
const char *link_clk_name;
- bool has_integrated_pcs;
u32 dma_addr_width;
struct dwmac4_addrs dwmac4_addrs;
bool needs_sgmii_loopback;
@@ -106,12 +101,11 @@ struct qcom_ethqos {
struct platform_device *pdev;
void __iomem *rgmii_base;
void __iomem *mac_base;
- int (*configure_func)(struct qcom_ethqos *ethqos);
+ int (*configure_func)(struct qcom_ethqos *ethqos, int speed);
unsigned int link_clk_rate;
struct clk *link_clk;
struct phy *serdes_phy;
- unsigned int speed;
int serdes_speed;
phy_interface_t phy_mode;
@@ -122,27 +116,39 @@ struct qcom_ethqos {
bool needs_sgmii_loopback;
};
-static int rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset)
+static u32 rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset)
{
return readl(ethqos->rgmii_base + offset);
}
-static void rgmii_writel(struct qcom_ethqos *ethqos,
- int value, unsigned int offset)
+static void rgmii_writel(struct qcom_ethqos *ethqos, u32 value,
+ unsigned int offset)
{
writel(value, ethqos->rgmii_base + offset);
}
-static void rgmii_updatel(struct qcom_ethqos *ethqos,
- int mask, int val, unsigned int offset)
+static void rgmii_updatel(struct qcom_ethqos *ethqos, u32 mask, u32 val,
+ unsigned int offset)
{
- unsigned int temp;
+ u32 temp;
temp = rgmii_readl(ethqos, offset);
temp = (temp & ~(mask)) | val;
rgmii_writel(ethqos, temp, offset);
}
+static void rgmii_setmask(struct qcom_ethqos *ethqos, u32 mask,
+ unsigned int offset)
+{
+ rgmii_updatel(ethqos, mask, mask, offset);
+}
+
+static void rgmii_clrmask(struct qcom_ethqos *ethqos, u32 mask,
+ unsigned int offset)
+{
+ rgmii_updatel(ethqos, mask, 0, offset);
+}
+
static void rgmii_dump(void *priv)
{
struct qcom_ethqos *ethqos = priv;
@@ -169,30 +175,17 @@ static void rgmii_dump(void *priv)
rgmii_readl(ethqos, EMAC_SYSTEM_LOW_POWER_DEBUG));
}
-/* Clock rates */
-#define RGMII_1000_NOM_CLK_FREQ (250 * 1000 * 1000UL)
-#define RGMII_ID_MODE_100_LOW_SVS_CLK_FREQ (50 * 1000 * 1000UL)
-#define RGMII_ID_MODE_10_LOW_SVS_CLK_FREQ (5 * 1000 * 1000UL)
-
static void
-ethqos_update_link_clk(struct qcom_ethqos *ethqos, unsigned int speed)
+ethqos_update_link_clk(struct qcom_ethqos *ethqos, int speed)
{
+ long rate;
+
if (!phy_interface_mode_is_rgmii(ethqos->phy_mode))
return;
- switch (speed) {
- case SPEED_1000:
- ethqos->link_clk_rate = RGMII_1000_NOM_CLK_FREQ;
- break;
-
- case SPEED_100:
- ethqos->link_clk_rate = RGMII_ID_MODE_100_LOW_SVS_CLK_FREQ;
- break;
-
- case SPEED_10:
- ethqos->link_clk_rate = RGMII_ID_MODE_10_LOW_SVS_CLK_FREQ;
- break;
- }
+ rate = rgmii_clock(speed);
+ if (rate > 0)
+ ethqos->link_clk_rate = rate * 2;
clk_set_rate(ethqos->link_clk, ethqos->link_clk_rate);
}
@@ -213,8 +206,7 @@ qcom_ethqos_set_sgmii_loopback(struct qcom_ethqos *ethqos, bool enable)
static void ethqos_set_func_clk_en(struct qcom_ethqos *ethqos)
{
qcom_ethqos_set_sgmii_loopback(ethqos, true);
- rgmii_updatel(ethqos, RGMII_CONFIG_FUNC_CLK_EN,
- RGMII_CONFIG_FUNC_CLK_EN, RGMII_IO_MACRO_CONFIG);
+ rgmii_setmask(ethqos, RGMII_CONFIG_FUNC_CLK_EN, RGMII_IO_MACRO_CONFIG);
}
static const struct ethqos_emac_por emac_v2_3_0_por[] = {
@@ -296,7 +288,6 @@ static const struct ethqos_emac_driver_data emac_v4_0_0_data = {
.rgmii_config_loopback_en = false,
.has_emac_ge_3 = true,
.link_clk_name = "phyaux",
- .has_integrated_pcs = true,
.needs_sgmii_loopback = true,
.dma_addr_width = 36,
.dwmac4_addrs = {
@@ -320,69 +311,55 @@ static const struct ethqos_emac_driver_data emac_v4_0_0_data = {
static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
{
struct device *dev = &ethqos->pdev->dev;
- unsigned int val;
- int retry = 1000;
+ u32 val;
/* Set CDR_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CDR_EN,
- SDCC_DLL_CONFIG_CDR_EN, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_CDR_EN, SDCC_HC_REG_DLL_CONFIG);
/* Set CDR_EXT_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CDR_EXT_EN,
- SDCC_DLL_CONFIG_CDR_EXT_EN, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_CDR_EXT_EN,
+ SDCC_HC_REG_DLL_CONFIG);
/* Clear CK_OUT_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
- 0, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_clrmask(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
+ SDCC_HC_REG_DLL_CONFIG);
/* Set DLL_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN,
- SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG);
if (!ethqos->has_emac_ge_3) {
- rgmii_updatel(ethqos, SDCC_DLL_MCLK_GATING_EN,
- 0, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_clrmask(ethqos, SDCC_DLL_MCLK_GATING_EN,
+ SDCC_HC_REG_DLL_CONFIG);
- rgmii_updatel(ethqos, SDCC_DLL_CDR_FINE_PHASE,
- 0, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_clrmask(ethqos, SDCC_DLL_CDR_FINE_PHASE,
+ SDCC_HC_REG_DLL_CONFIG);
}
/* Wait for CK_OUT_EN clear */
- do {
- val = rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG);
- val &= SDCC_DLL_CONFIG_CK_OUT_EN;
- if (!val)
- break;
- mdelay(1);
- retry--;
- } while (retry > 0);
- if (!retry)
+ if (read_poll_timeout_atomic(rgmii_readl, val,
+ !(val & SDCC_DLL_CONFIG_CK_OUT_EN),
+ 1000, 1000000, false,
+ ethqos, SDCC_HC_REG_DLL_CONFIG))
dev_err(dev, "Clear CK_OUT_EN timedout\n");
/* Set CK_OUT_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
- SDCC_DLL_CONFIG_CK_OUT_EN, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
+ SDCC_HC_REG_DLL_CONFIG);
/* Wait for CK_OUT_EN set */
- retry = 1000;
- do {
- val = rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG);
- val &= SDCC_DLL_CONFIG_CK_OUT_EN;
- if (val)
- break;
- mdelay(1);
- retry--;
- } while (retry > 0);
- if (!retry)
+ if (read_poll_timeout_atomic(rgmii_readl, val,
+ val & SDCC_DLL_CONFIG_CK_OUT_EN,
+ 1000, 1000000, false,
+ ethqos, SDCC_HC_REG_DLL_CONFIG))
dev_err(dev, "Set CK_OUT_EN timedout\n");
/* Set DDR_CAL_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_CAL_EN,
- SDCC_DLL_CONFIG2_DDR_CAL_EN, SDCC_HC_REG_DLL_CONFIG2);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG2_DDR_CAL_EN,
+ SDCC_HC_REG_DLL_CONFIG2);
if (!ethqos->has_emac_ge_3) {
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DLL_CLOCK_DIS,
- 0, SDCC_HC_REG_DLL_CONFIG2);
+ rgmii_clrmask(ethqos, SDCC_DLL_CONFIG2_DLL_CLOCK_DIS,
+ SDCC_HC_REG_DLL_CONFIG2);
rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_MCLK_FREQ_CALC,
0x1A << 10, SDCC_HC_REG_DLL_CONFIG2);
@@ -390,15 +367,14 @@ static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SEL,
BIT(2), SDCC_HC_REG_DLL_CONFIG2);
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW,
- SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW,
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW,
SDCC_HC_REG_DLL_CONFIG2);
}
return 0;
}
-static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
+static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos, int speed)
{
struct device *dev = &ethqos->pdev->dev;
int phase_shift;
@@ -412,8 +388,8 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
phase_shift = RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN;
/* Disable loopback mode */
- rgmii_updatel(ethqos, RGMII_CONFIG2_TX_TO_RX_LOOPBACK_EN,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_TX_TO_RX_LOOPBACK_EN,
+ RGMII_IO_MACRO_CONFIG2);
/* Determine if this platform wants loopback enabled after programming */
if (ethqos->rgmii_config_loopback_en)
@@ -422,29 +398,26 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
loopback = 0;
/* Select RGMII, write 0 to interface select */
- rgmii_updatel(ethqos, RGMII_CONFIG_INTF_SEL,
- 0, RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG_INTF_SEL, RGMII_IO_MACRO_CONFIG);
- switch (ethqos->speed) {
+ switch (speed) {
case SPEED_1000:
- rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
- RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
- 0, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
- RGMII_CONFIG_POS_NEG_DATA_SEL,
+ rgmii_setmask(ethqos, RGMII_CONFIG_DDR_MODE,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP,
- RGMII_CONFIG_PROG_SWAP, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_setmask(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_setmask(ethqos, RGMII_CONFIG_PROG_SWAP,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
+ RGMII_IO_MACRO_CONFIG2);
rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
phase_shift, RGMII_IO_MACRO_CONFIG2);
- rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
- 0, RGMII_IO_MACRO_CONFIG2);
- rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
- RGMII_CONFIG2_RX_PROG_SWAP,
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
+ RGMII_IO_MACRO_CONFIG2);
+ rgmii_setmask(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
RGMII_IO_MACRO_CONFIG2);
/* PRG_RCLK_DLY = TCXO period * TCXO_CYCLES_CNT / 2 * RX delay ns,
@@ -459,104 +432,95 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_RCLK_DLY,
57, SDCC_HC_REG_DDR_CONFIG);
}
- rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_DLY_EN,
- SDCC_DDR_CONFIG_PRG_DLY_EN,
+ rgmii_setmask(ethqos, SDCC_DDR_CONFIG_PRG_DLY_EN,
SDCC_HC_REG_DDR_CONFIG);
rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
loopback, RGMII_IO_MACRO_CONFIG);
break;
case SPEED_100:
- rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
- RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
- RGMII_CONFIG_BYPASS_TX_ID_EN,
+ rgmii_setmask(ethqos, RGMII_CONFIG_DDR_MODE,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_setmask(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG_PROG_SWAP,
RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
- 0, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP,
- 0, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
+ RGMII_IO_MACRO_CONFIG2);
rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
phase_shift, RGMII_IO_MACRO_CONFIG2);
rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_2,
BIT(6), RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
+ RGMII_IO_MACRO_CONFIG2);
if (ethqos->has_emac_ge_3)
- rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
- RGMII_CONFIG2_RX_PROG_SWAP,
+ rgmii_setmask(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
RGMII_IO_MACRO_CONFIG2);
else
- rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
+ RGMII_IO_MACRO_CONFIG2);
/* Write 0x5 to PRG_RCLK_DLY_CODE */
rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE,
(BIT(29) | BIT(27)), SDCC_HC_REG_DDR_CONFIG);
- rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
- SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
+ rgmii_setmask(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
SDCC_HC_REG_DDR_CONFIG);
- rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
- SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
+ rgmii_setmask(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
SDCC_HC_REG_DDR_CONFIG);
rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
loopback, RGMII_IO_MACRO_CONFIG);
break;
case SPEED_10:
- rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
- RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
- RGMII_CONFIG_BYPASS_TX_ID_EN,
+ rgmii_setmask(ethqos, RGMII_CONFIG_DDR_MODE,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_setmask(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
- 0, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP,
- 0, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_clrmask(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG_PROG_SWAP,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
+ RGMII_IO_MACRO_CONFIG2);
rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
phase_shift, RGMII_IO_MACRO_CONFIG2);
rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_9,
BIT(12) | GENMASK(9, 8),
RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
+ RGMII_IO_MACRO_CONFIG2);
if (ethqos->has_emac_ge_3)
- rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
- RGMII_CONFIG2_RX_PROG_SWAP,
+ rgmii_setmask(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
RGMII_IO_MACRO_CONFIG2);
else
- rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
+ RGMII_IO_MACRO_CONFIG2);
/* Write 0x5 to PRG_RCLK_DLY_CODE */
rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE,
(BIT(29) | BIT(27)), SDCC_HC_REG_DDR_CONFIG);
- rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
- SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
+ rgmii_setmask(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
SDCC_HC_REG_DDR_CONFIG);
- rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
- SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
+ rgmii_setmask(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
SDCC_HC_REG_DDR_CONFIG);
rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
loopback, RGMII_IO_MACRO_CONFIG);
break;
default:
- dev_err(dev, "Invalid speed %d\n", ethqos->speed);
+ dev_err(dev, "Invalid speed %d\n", speed);
return -EINVAL;
}
return 0;
}
-static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
+static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos, int speed)
{
struct device *dev = &ethqos->pdev->dev;
- volatile unsigned int dll_lock;
- unsigned int i, retry = 1000;
+ unsigned int i;
+ u32 val;
/* Reset to POR values and enable clk */
for (i = 0; i < ethqos->num_por; i++)
@@ -567,15 +531,15 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
/* Initialize the DLL first */
/* Set DLL_RST */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_RST,
- SDCC_DLL_CONFIG_DLL_RST, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_DLL_RST,
+ SDCC_HC_REG_DLL_CONFIG);
/* Set PDN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN,
- SDCC_DLL_CONFIG_PDN, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_PDN,
+ SDCC_HC_REG_DLL_CONFIG);
if (ethqos->has_emac_ge_3) {
- if (ethqos->speed == SPEED_1000) {
+ if (speed == SPEED_1000) {
rgmii_writel(ethqos, 0x1800000, SDCC_TEST_CTL);
rgmii_writel(ethqos, 0x2C010800, SDCC_USR_CTL);
rgmii_writel(ethqos, 0xA001, SDCC_HC_REG_DLL_CONFIG2);
@@ -586,21 +550,18 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
}
/* Clear DLL_RST */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_RST, 0,
- SDCC_HC_REG_DLL_CONFIG);
+ rgmii_clrmask(ethqos, SDCC_DLL_CONFIG_DLL_RST, SDCC_HC_REG_DLL_CONFIG);
/* Clear PDN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN, 0,
- SDCC_HC_REG_DLL_CONFIG);
+ rgmii_clrmask(ethqos, SDCC_DLL_CONFIG_PDN, SDCC_HC_REG_DLL_CONFIG);
- if (ethqos->speed != SPEED_100 && ethqos->speed != SPEED_10) {
+ if (speed != SPEED_100 && speed != SPEED_10) {
/* Set DLL_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN,
- SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_DLL_EN,
+ SDCC_HC_REG_DLL_CONFIG);
/* Set CK_OUT_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
- SDCC_DLL_CONFIG_CK_OUT_EN,
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
SDCC_HC_REG_DLL_CONFIG);
/* Set USR_CTL bit 26 with mask of 3 bits */
@@ -609,21 +570,17 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
SDCC_USR_CTL);
/* wait for DLL LOCK */
- do {
- mdelay(1);
- dll_lock = rgmii_readl(ethqos, SDC4_STATUS);
- if (dll_lock & SDC4_STATUS_DLL_LOCK)
- break;
- retry--;
- } while (retry > 0);
- if (!retry)
+ if (read_poll_timeout_atomic(rgmii_readl, val,
+ val & SDC4_STATUS_DLL_LOCK,
+ 1000, 1000000, true,
+ ethqos, SDC4_STATUS))
dev_err(dev, "Timeout while waiting for DLL lock\n");
}
- if (ethqos->speed == SPEED_1000)
+ if (speed == SPEED_1000)
ethqos_dll_configure(ethqos);
- ethqos_rgmii_macro_init(ethqos);
+ ethqos_rgmii_macro_init(ethqos, speed);
return 0;
}
@@ -636,77 +593,61 @@ static void ethqos_set_serdes_speed(struct qcom_ethqos *ethqos, int speed)
}
}
+static void ethqos_pcs_set_inband(struct stmmac_priv *priv, bool enable)
+{
+ stmmac_pcs_ctrl_ane(priv, enable, 0);
+}
+
/* On interface toggle MAC registers gets reset.
* Configure MAC block for SGMII on ethernet phy link up
*/
-static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
+static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos, int speed)
{
struct net_device *dev = platform_get_drvdata(ethqos->pdev);
struct stmmac_priv *priv = netdev_priv(dev);
- int val;
- val = readl(ethqos->mac_base + MAC_CTRL_REG);
-
- switch (ethqos->speed) {
+ switch (speed) {
case SPEED_2500:
- val &= ~ETHQOS_MAC_CTRL_PORT_SEL;
- rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
- RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
+ rgmii_setmask(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
RGMII_IO_MACRO_CONFIG2);
ethqos_set_serdes_speed(ethqos, SPEED_2500);
- stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 0, 0, 0);
+ ethqos_pcs_set_inband(priv, false);
break;
case SPEED_1000:
- val &= ~ETHQOS_MAC_CTRL_PORT_SEL;
- rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
- RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
+ rgmii_setmask(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
RGMII_IO_MACRO_CONFIG2);
ethqos_set_serdes_speed(ethqos, SPEED_1000);
- stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
+ ethqos_pcs_set_inband(priv, true);
break;
case SPEED_100:
- val |= ETHQOS_MAC_CTRL_PORT_SEL | ETHQOS_MAC_CTRL_SPEED_MODE;
ethqos_set_serdes_speed(ethqos, SPEED_1000);
- stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
+ ethqos_pcs_set_inband(priv, true);
break;
case SPEED_10:
- val |= ETHQOS_MAC_CTRL_PORT_SEL;
- val &= ~ETHQOS_MAC_CTRL_SPEED_MODE;
rgmii_updatel(ethqos, RGMII_CONFIG_SGMII_CLK_DVDR,
FIELD_PREP(RGMII_CONFIG_SGMII_CLK_DVDR,
SGMII_10M_RX_CLK_DVDR),
RGMII_IO_MACRO_CONFIG);
ethqos_set_serdes_speed(ethqos, SPEED_1000);
- stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
+ ethqos_pcs_set_inband(priv, true);
break;
}
- writel(val, ethqos->mac_base + MAC_CTRL_REG);
-
- return val;
-}
-
-static void qcom_ethqos_speed_mode_2500(struct net_device *ndev, void *data)
-{
- struct stmmac_priv *priv = netdev_priv(ndev);
-
- priv->plat->max_speed = 2500;
- priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX;
+ return 0;
}
-static int ethqos_configure(struct qcom_ethqos *ethqos)
+static int ethqos_configure(struct qcom_ethqos *ethqos, int speed)
{
- return ethqos->configure_func(ethqos);
+ return ethqos->configure_func(ethqos, speed);
}
-static void ethqos_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static void ethqos_fix_mac_speed(void *priv, int speed, unsigned int mode)
{
struct qcom_ethqos *ethqos = priv;
qcom_ethqos_set_sgmii_loopback(ethqos, false);
- ethqos->speed = speed;
ethqos_update_link_clk(ethqos, speed);
- ethqos_configure(ethqos);
+ ethqos_configure(ethqos, speed);
}
static int qcom_ethqos_serdes_powerup(struct net_device *ndev, void *priv)
@@ -722,7 +663,7 @@ static int qcom_ethqos_serdes_powerup(struct net_device *ndev, void *priv)
if (ret)
return ret;
- return phy_set_speed(ethqos->serdes_phy, ethqos->speed);
+ return phy_set_speed(ethqos->serdes_phy, ethqos->serdes_speed);
}
static void qcom_ethqos_serdes_powerdown(struct net_device *ndev, void *priv)
@@ -777,7 +718,7 @@ static void ethqos_ptp_clk_freq_config(struct stmmac_priv *priv)
netdev_err(priv->dev, "Failed to max out clk_ptp_ref: %d\n", err);
plat_dat->clk_ptp_rate = clk_get_rate(plat_dat->clk_ptp_ref);
- netdev_dbg(priv->dev, "PTP rate %d\n", plat_dat->clk_ptp_rate);
+ netdev_dbg(priv->dev, "PTP rate %lu\n", plat_dat->clk_ptp_rate);
}
static int qcom_ethqos_probe(struct platform_device *pdev)
@@ -807,9 +748,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
if (!ethqos)
return -ENOMEM;
- ret = of_get_phy_mode(np, &ethqos->phy_mode);
- if (ret)
- return dev_err_probe(dev, ret, "Failed to get phy mode\n");
+ ethqos->phy_mode = plat_dat->phy_interface;
switch (ethqos->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
@@ -818,8 +757,6 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
ethqos->configure_func = ethqos_configure_rgmii;
break;
case PHY_INTERFACE_MODE_2500BASEX:
- plat_dat->speed_mode_2500 = qcom_ethqos_speed_mode_2500;
- fallthrough;
case PHY_INTERFACE_MODE_SGMII:
ethqos->configure_func = ethqos_configure_sgmii;
break;
@@ -862,7 +799,6 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(ethqos->serdes_phy),
"Failed to get serdes phy\n");
- ethqos->speed = SPEED_1000;
ethqos->serdes_speed = SPEED_1000;
ethqos_update_link_clk(ethqos, SPEED_1000);
ethqos_set_func_clk_en(ethqos);
@@ -871,7 +807,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
plat_dat->fix_mac_speed = ethqos_fix_mac_speed;
plat_dat->dump_debug_regs = rgmii_dump;
plat_dat->ptp_clk_freq_config = ethqos_ptp_clk_freq_config;
- plat_dat->has_gmac4 = 1;
+ plat_dat->core_type = DWMAC_CORE_GMAC4;
if (ethqos->has_emac_ge_3)
plat_dat->dwmac4_addrs = &data->dwmac4_addrs;
plat_dat->pmt = 1;
@@ -879,8 +815,6 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
plat_dat->flags |= STMMAC_FLAG_TSO_EN;
if (of_device_is_compatible(np, "qcom,qcs404-ethqos"))
plat_dat->flags |= STMMAC_FLAG_RX_CLK_RUNS_IN_LPI;
- if (data->has_integrated_pcs)
- plat_dat->flags |= STMMAC_FLAG_HAS_INTEGRATED_PCS;
if (data->dma_addr_width)
plat_dat->host_dma_width = data->dma_addr_width;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
new file mode 100644
index 000000000000..be7f5eb2cdcf
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * dwmac-renesas-gbeth.c - DWMAC Specific Glue layer for Renesas GBETH
+ *
+ * The Rx and Tx clocks are supplied as follows for the GBETH IP.
+ *
+ * Rx / Tx
+ * -------+------------- on / off -------
+ * |
+ * | Rx-180 / Tx-180
+ * +---- not ---- on / off -------
+ *
+ * Copyright (C) 2025 Renesas Electronics Corporation
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pcs-rzn1-miic.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/types.h>
+
+#include "stmmac_platform.h"
+
+/**
+ * struct renesas_gbeth_of_data - OF data for Renesas GBETH
+ *
+ * @clks: Array of clock names
+ * @num_clks: Number of clocks
+ * @stmmac_flags: Flags for the stmmac driver
+ * @handle_reset: Flag to indicate if reset control is
+ * handled by the glue driver or core driver.
+ * @set_clk_tx_rate: Flag to indicate if Tx clock is fixed or
+ * set_clk_tx_rate is needed.
+ * @has_pcs: Flag to indicate if the MAC has a PCS
+ */
+struct renesas_gbeth_of_data {
+ const char * const *clks;
+ u8 num_clks;
+ u32 stmmac_flags;
+ bool handle_reset;
+ bool set_clk_tx_rate;
+ bool has_pcs;
+};
+
+struct renesas_gbeth {
+ const struct renesas_gbeth_of_data *of_data;
+ struct plat_stmmacenet_data *plat_dat;
+ struct reset_control *rstc;
+ struct device *dev;
+};
+
+static const char *const renesas_gbeth_clks[] = {
+ "tx", "tx-180", "rx", "rx-180",
+};
+
+static const char *const renesas_gmac_clks[] = {
+ "tx",
+};
+
+static int renesas_gmac_pcs_init(struct stmmac_priv *priv)
+{
+ struct device_node *np = priv->device->of_node;
+ struct device_node *pcs_node;
+ struct phylink_pcs *pcs;
+
+ pcs_node = of_parse_phandle(np, "pcs-handle", 0);
+ if (pcs_node) {
+ pcs = miic_create(priv->device, pcs_node);
+ of_node_put(pcs_node);
+ if (IS_ERR(pcs))
+ return PTR_ERR(pcs);
+
+ priv->hw->phylink_pcs = pcs;
+ }
+
+ return 0;
+}
+
+static void renesas_gmac_pcs_exit(struct stmmac_priv *priv)
+{
+ if (priv->hw->phylink_pcs)
+ miic_destroy(priv->hw->phylink_pcs);
+}
+
+static struct phylink_pcs *renesas_gmac_select_pcs(struct stmmac_priv *priv,
+ phy_interface_t interface)
+{
+ return priv->hw->phylink_pcs;
+}
+
+static int renesas_gbeth_init(struct device *dev, void *priv)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct renesas_gbeth *gbeth = priv;
+ int ret;
+
+ plat_dat = gbeth->plat_dat;
+
+ ret = reset_control_deassert(gbeth->rstc);
+ if (ret) {
+ dev_err(gbeth->dev, "Reset deassert failed\n");
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(plat_dat->num_clks,
+ plat_dat->clks);
+ if (ret)
+ reset_control_assert(gbeth->rstc);
+
+ return ret;
+}
+
+static void renesas_gbeth_exit(struct device *dev, void *priv)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct renesas_gbeth *gbeth = priv;
+ int ret;
+
+ plat_dat = gbeth->plat_dat;
+
+ clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks);
+
+ ret = reset_control_assert(gbeth->rstc);
+ if (ret)
+ dev_err(gbeth->dev, "Reset assert failed\n");
+}
+
+static int renesas_gbeth_probe(struct platform_device *pdev)
+{
+ const struct renesas_gbeth_of_data *of_data;
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct device *dev = &pdev->dev;
+ struct renesas_gbeth *gbeth;
+ unsigned int i;
+ int err;
+
+ err = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (err)
+ return dev_err_probe(dev, err,
+ "failed to get resources\n");
+
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return dev_err_probe(dev, PTR_ERR(plat_dat),
+ "dt configuration failed\n");
+
+ gbeth = devm_kzalloc(dev, sizeof(*gbeth), GFP_KERNEL);
+ if (!gbeth)
+ return -ENOMEM;
+
+ of_data = of_device_get_match_data(&pdev->dev);
+ gbeth->of_data = of_data;
+
+ plat_dat->num_clks = of_data->num_clks;
+ plat_dat->clks = devm_kcalloc(dev, plat_dat->num_clks,
+ sizeof(*plat_dat->clks), GFP_KERNEL);
+ if (!plat_dat->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < plat_dat->num_clks; i++)
+ plat_dat->clks[i].id = of_data->clks[i];
+
+ err = devm_clk_bulk_get(dev, plat_dat->num_clks, plat_dat->clks);
+ if (err < 0)
+ return err;
+
+ plat_dat->clk_tx_i = stmmac_pltfr_find_clk(plat_dat, "tx");
+ if (!plat_dat->clk_tx_i)
+ return dev_err_probe(dev, -EINVAL,
+ "error finding tx clock\n");
+
+ if (of_data->handle_reset) {
+ gbeth->rstc = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(gbeth->rstc))
+ return PTR_ERR(gbeth->rstc);
+ }
+
+ gbeth->dev = dev;
+ gbeth->plat_dat = plat_dat;
+ plat_dat->bsp_priv = gbeth;
+ if (of_data->set_clk_tx_rate)
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
+ plat_dat->init = renesas_gbeth_init;
+ plat_dat->exit = renesas_gbeth_exit;
+ plat_dat->flags |= STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP |
+ gbeth->of_data->stmmac_flags;
+ if (of_data->has_pcs) {
+ plat_dat->pcs_init = renesas_gmac_pcs_init;
+ plat_dat->pcs_exit = renesas_gmac_pcs_exit;
+ plat_dat->select_pcs = renesas_gmac_select_pcs;
+ }
+
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
+}
+
+static const struct renesas_gbeth_of_data renesas_gbeth_of_data = {
+ .clks = renesas_gbeth_clks,
+ .num_clks = ARRAY_SIZE(renesas_gbeth_clks),
+ .handle_reset = true,
+ .set_clk_tx_rate = true,
+ .stmmac_flags = STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY |
+ STMMAC_FLAG_SPH_DISABLE,
+};
+
+static const struct renesas_gbeth_of_data renesas_gmac_of_data = {
+ .clks = renesas_gmac_clks,
+ .num_clks = ARRAY_SIZE(renesas_gmac_clks),
+ .stmmac_flags = STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY,
+ .has_pcs = true,
+};
+
+static const struct of_device_id renesas_gbeth_match[] = {
+ { .compatible = "renesas,r9a09g077-gbeth", .data = &renesas_gmac_of_data },
+ { .compatible = "renesas,rzv2h-gbeth", .data = &renesas_gbeth_of_data },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, renesas_gbeth_match);
+
+static struct platform_driver renesas_gbeth_driver = {
+ .probe = renesas_gbeth_probe,
+ .driver = {
+ .name = "renesas-gbeth",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = renesas_gbeth_match,
+ },
+};
+module_platform_driver(renesas_gbeth_driver);
+
+MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
+MODULE_DESCRIPTION("Renesas GBETH DWMAC Specific Glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 8cb374668b74..0a95f54e725e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -8,6 +8,7 @@
*/
#include <linux/stmmac.h>
+#include <linux/hw_bitfield.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/phy.h>
@@ -24,15 +25,26 @@
#include "stmmac_platform.h"
struct rk_priv_data;
+
+struct rk_reg_speed_data {
+ unsigned int rgmii_10;
+ unsigned int rgmii_100;
+ unsigned int rgmii_1000;
+ unsigned int rmii_10;
+ unsigned int rmii_100;
+};
+
struct rk_gmac_ops {
void (*set_to_rgmii)(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay);
void (*set_to_rmii)(struct rk_priv_data *bsp_priv);
- void (*set_rgmii_speed)(struct rk_priv_data *bsp_priv, int speed);
- void (*set_rmii_speed)(struct rk_priv_data *bsp_priv, int speed);
+ int (*set_speed)(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed);
void (*set_clock_selection)(struct rk_priv_data *bsp_priv, bool input,
bool enable);
void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv);
+ void (*integrated_phy_powerdown)(struct rk_priv_data *bsp_priv);
+ bool php_grf_required;
bool regs_valid;
u32 regs[];
};
@@ -56,11 +68,10 @@ enum rk_clocks_index {
};
struct rk_priv_data {
- struct platform_device *pdev;
+ struct device *dev;
phy_interface_t phy_iface;
int id;
struct regulator *regulator;
- bool suspended;
const struct rk_gmac_ops *ops;
bool clk_enabled;
@@ -69,7 +80,6 @@ struct rk_priv_data {
struct clk_bulk_data *clks;
int num_clks;
- struct clk *clk_mac;
struct clk *clk_phy;
struct reset_control *phy_reset;
@@ -81,73 +91,191 @@ struct rk_priv_data {
struct regmap *php_grf;
};
-#define HIWORD_UPDATE(val, mask, shift) \
- ((val) << (shift) | (mask) << ((shift) + 16))
+static int rk_set_reg_speed(struct rk_priv_data *bsp_priv,
+ const struct rk_reg_speed_data *rsd,
+ unsigned int reg, phy_interface_t interface,
+ int speed)
+{
+ unsigned int val;
+
+ if (phy_interface_mode_is_rgmii(interface)) {
+ if (speed == SPEED_10) {
+ val = rsd->rgmii_10;
+ } else if (speed == SPEED_100) {
+ val = rsd->rgmii_100;
+ } else if (speed == SPEED_1000) {
+ val = rsd->rgmii_1000;
+ } else {
+ /* Phylink will not allow inappropriate speeds for
+ * interface modes, so this should never happen.
+ */
+ return -EINVAL;
+ }
+ } else if (interface == PHY_INTERFACE_MODE_RMII) {
+ if (speed == SPEED_10) {
+ val = rsd->rmii_10;
+ } else if (speed == SPEED_100) {
+ val = rsd->rmii_100;
+ } else {
+ /* Phylink will not allow inappropriate speeds for
+ * interface modes, so this should never happen.
+ */
+ return -EINVAL;
+ }
+ } else {
+ /* This should never happen, as .get_interfaces() limits
+ * the interface modes that are supported to RGMII and/or
+ * RMII.
+ */
+ return -EINVAL;
+ }
+
+ regmap_write(bsp_priv->grf, reg, val);
+
+ return 0;
+
+}
+
+static int rk_set_clk_mac_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
+{
+ struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
+ long rate;
+
+ rate = rgmii_clock(speed);
+ if (rate < 0)
+ return rate;
+
+ return clk_set_rate(clk_mac_speed, rate);
+}
-#define GRF_BIT(nr) (BIT(nr) | BIT(nr+16))
-#define GRF_CLR_BIT(nr) (BIT(nr+16))
+#define GRF_FIELD(hi, lo, val) \
+ FIELD_PREP_WM16(GENMASK_U16(hi, lo), val)
+#define GRF_FIELD_CONST(hi, lo, val) \
+ FIELD_PREP_WM16_CONST(GENMASK_U16(hi, lo), val)
+
+#define GRF_BIT(nr) (BIT(nr) | BIT(nr+16))
+#define GRF_CLR_BIT(nr) (BIT(nr+16))
#define DELAY_ENABLE(soc, tx, rx) \
(((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
+#define RK_GRF_MACPHY_CON0 0xb00
+#define RK_GRF_MACPHY_CON1 0xb04
+#define RK_GRF_MACPHY_CON2 0xb08
+#define RK_GRF_MACPHY_CON3 0xb0c
+
+#define RK_MACPHY_ENABLE GRF_BIT(0)
+#define RK_MACPHY_DISABLE GRF_CLR_BIT(0)
+#define RK_MACPHY_CFG_CLK_50M GRF_BIT(14)
+#define RK_GMAC2PHY_RMII_MODE GRF_FIELD(7, 6, 1)
+#define RK_GRF_CON2_MACPHY_ID GRF_FIELD(15, 0, 0x1234)
+#define RK_GRF_CON3_MACPHY_ID GRF_FIELD(5, 0, 0x35)
+
+static void rk_gmac_integrated_ephy_powerup(struct rk_priv_data *priv)
+{
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_CFG_CLK_50M);
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_GMAC2PHY_RMII_MODE);
+
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON2, RK_GRF_CON2_MACPHY_ID);
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON3, RK_GRF_CON3_MACPHY_ID);
+
+ if (priv->phy_reset) {
+ /* PHY needs to be disabled before trying to reset it */
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE);
+ if (priv->phy_reset)
+ reset_control_assert(priv->phy_reset);
+ usleep_range(10, 20);
+ if (priv->phy_reset)
+ reset_control_deassert(priv->phy_reset);
+ usleep_range(10, 20);
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_ENABLE);
+ msleep(30);
+ }
+}
+
+static void rk_gmac_integrated_ephy_powerdown(struct rk_priv_data *priv)
+{
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE);
+ if (priv->phy_reset)
+ reset_control_assert(priv->phy_reset);
+}
+
+#define RK_FEPHY_SHUTDOWN GRF_BIT(1)
+#define RK_FEPHY_POWERUP GRF_CLR_BIT(1)
+#define RK_FEPHY_INTERNAL_RMII_SEL GRF_BIT(6)
+#define RK_FEPHY_24M_CLK_SEL GRF_FIELD(9, 8, 3)
+#define RK_FEPHY_PHY_ID GRF_BIT(11)
+
+static void rk_gmac_integrated_fephy_powerup(struct rk_priv_data *priv,
+ unsigned int reg)
+{
+ reset_control_assert(priv->phy_reset);
+ usleep_range(20, 30);
+
+ regmap_write(priv->grf, reg,
+ RK_FEPHY_POWERUP |
+ RK_FEPHY_INTERNAL_RMII_SEL |
+ RK_FEPHY_24M_CLK_SEL |
+ RK_FEPHY_PHY_ID);
+ usleep_range(10000, 12000);
+
+ reset_control_deassert(priv->phy_reset);
+ usleep_range(50000, 60000);
+}
+
+static void rk_gmac_integrated_fephy_powerdown(struct rk_priv_data *priv,
+ unsigned int reg)
+{
+ regmap_write(priv->grf, reg, RK_FEPHY_SHUTDOWN);
+}
+
#define PX30_GRF_GMAC_CON1 0x0904
/* PX30_GRF_GMAC_CON1 */
-#define PX30_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | \
- GRF_BIT(6))
+#define PX30_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define PX30_GMAC_SPEED_10M GRF_CLR_BIT(2)
#define PX30_GMAC_SPEED_100M GRF_BIT(2)
static void px30_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
- PX30_GMAC_PHY_INTF_SEL_RMII);
+ PX30_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
}
-static void px30_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int px30_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
- struct device *dev = &bsp_priv->pdev->dev;
- int ret;
+ struct device *dev = bsp_priv->dev;
+ unsigned int con1;
+ long rate;
if (!clk_mac_speed) {
dev_err(dev, "%s: Missing clk_mac_speed clock\n", __func__);
- return;
+ return -EINVAL;
}
if (speed == 10) {
- regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
- PX30_GMAC_SPEED_10M);
-
- ret = clk_set_rate(clk_mac_speed, 2500000);
- if (ret)
- dev_err(dev, "%s: set clk_mac_speed rate 2500000 failed: %d\n",
- __func__, ret);
+ con1 = PX30_GMAC_SPEED_10M;
+ rate = 2500000;
} else if (speed == 100) {
- regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
- PX30_GMAC_SPEED_100M);
-
- ret = clk_set_rate(clk_mac_speed, 25000000);
- if (ret)
- dev_err(dev, "%s: set clk_mac_speed rate 25000000 failed: %d\n",
- __func__, ret);
-
+ con1 = PX30_GMAC_SPEED_100M;
+ rate = 25000000;
} else {
dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ return -EINVAL;
}
+
+ regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1, con1);
+
+ return clk_set_rate(clk_mac_speed, rate);
}
static const struct rk_gmac_ops px30_ops = {
.set_to_rmii = px30_set_to_rmii,
- .set_rmii_speed = px30_set_rmii_speed,
+ .set_speed = px30_set_speed,
};
#define RK3128_GRF_MAC_CON0 0x0168
@@ -158,38 +286,28 @@ static const struct rk_gmac_ops px30_ops = {
#define RK3128_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
#define RK3128_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
#define RK3128_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
-#define RK3128_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
-#define RK3128_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3128_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(13, 7, val)
+#define RK3128_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
/* RK3128_GRF_MAC_CON1 */
-#define RK3128_GMAC_PHY_INTF_SEL_RGMII \
- (GRF_BIT(6) | GRF_CLR_BIT(7) | GRF_CLR_BIT(8))
-#define RK3128_GMAC_PHY_INTF_SEL_RMII \
- (GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | GRF_BIT(8))
+#define RK3128_GMAC_PHY_INTF_SEL(val) GRF_FIELD(8, 6, val)
#define RK3128_GMAC_FLOW_CTRL GRF_BIT(9)
#define RK3128_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9)
#define RK3128_GMAC_SPEED_10M GRF_CLR_BIT(10)
#define RK3128_GMAC_SPEED_100M GRF_BIT(10)
#define RK3128_GMAC_RMII_CLK_25M GRF_BIT(11)
#define RK3128_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11)
-#define RK3128_GMAC_CLK_125M (GRF_CLR_BIT(12) | GRF_CLR_BIT(13))
-#define RK3128_GMAC_CLK_25M (GRF_BIT(12) | GRF_BIT(13))
-#define RK3128_GMAC_CLK_2_5M (GRF_CLR_BIT(12) | GRF_BIT(13))
+#define RK3128_GMAC_CLK_125M GRF_FIELD_CONST(13, 12, 0)
+#define RK3128_GMAC_CLK_25M GRF_FIELD_CONST(13, 12, 3)
+#define RK3128_GMAC_CLK_2_5M GRF_FIELD_CONST(13, 12, 2)
#define RK3128_GMAC_RMII_MODE GRF_BIT(14)
#define RK3128_GMAC_RMII_MODE_CLR GRF_CLR_BIT(14)
static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_PHY_INTF_SEL_RGMII |
+ RK3128_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3128_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON0,
DELAY_ENABLE(RK3128, tx_delay, rx_delay) |
@@ -199,66 +317,30 @@ static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3128_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_PHY_INTF_SEL_RMII | RK3128_GMAC_RMII_MODE);
+ RK3128_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
+ RK3128_GMAC_RMII_MODE);
}
-static void rk3128_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_CLK_2_5M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_CLK_25M);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_CLK_125M);
- else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
-}
+static const struct rk_reg_speed_data rk3128_reg_speed_data = {
+ .rgmii_10 = RK3128_GMAC_CLK_2_5M,
+ .rgmii_100 = RK3128_GMAC_CLK_25M,
+ .rgmii_1000 = RK3128_GMAC_CLK_125M,
+ .rmii_10 = RK3128_GMAC_RMII_CLK_2_5M | RK3128_GMAC_SPEED_10M,
+ .rmii_100 = RK3128_GMAC_RMII_CLK_25M | RK3128_GMAC_SPEED_100M,
+};
-static void rk3128_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3128_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
- if (speed == 10) {
- regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_RMII_CLK_2_5M |
- RK3128_GMAC_SPEED_10M);
- } else if (speed == 100) {
- regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_RMII_CLK_25M |
- RK3128_GMAC_SPEED_100M);
- } else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- }
+ return rk_set_reg_speed(bsp_priv, &rk3128_reg_speed_data,
+ RK3128_GRF_MAC_CON1, interface, speed);
}
static const struct rk_gmac_ops rk3128_ops = {
.set_to_rgmii = rk3128_set_to_rgmii,
.set_to_rmii = rk3128_set_to_rmii,
- .set_rgmii_speed = rk3128_set_rgmii_speed,
- .set_rmii_speed = rk3128_set_rmii_speed,
+ .set_speed = rk3128_set_speed,
};
#define RK3228_GRF_MAC_CON0 0x0900
@@ -267,23 +349,20 @@ static const struct rk_gmac_ops rk3128_ops = {
#define RK3228_GRF_CON_MUX 0x50
/* RK3228_GRF_MAC_CON0 */
-#define RK3228_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
-#define RK3228_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3228_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(13, 7, val)
+#define RK3228_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
/* RK3228_GRF_MAC_CON1 */
-#define RK3228_GMAC_PHY_INTF_SEL_RGMII \
- (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
-#define RK3228_GMAC_PHY_INTF_SEL_RMII \
- (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
+#define RK3228_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RK3228_GMAC_FLOW_CTRL GRF_BIT(3)
#define RK3228_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
#define RK3228_GMAC_SPEED_10M GRF_CLR_BIT(2)
#define RK3228_GMAC_SPEED_100M GRF_BIT(2)
#define RK3228_GMAC_RMII_CLK_25M GRF_BIT(7)
#define RK3228_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7)
-#define RK3228_GMAC_CLK_125M (GRF_CLR_BIT(8) | GRF_CLR_BIT(9))
-#define RK3228_GMAC_CLK_25M (GRF_BIT(8) | GRF_BIT(9))
-#define RK3228_GMAC_CLK_2_5M (GRF_CLR_BIT(8) | GRF_BIT(9))
+#define RK3228_GMAC_CLK_125M GRF_FIELD_CONST(9, 8, 0)
+#define RK3228_GMAC_CLK_25M GRF_FIELD_CONST(9, 8, 3)
+#define RK3228_GMAC_CLK_2_5M GRF_FIELD_CONST(9, 8, 2)
#define RK3228_GMAC_RMII_MODE GRF_BIT(10)
#define RK3228_GMAC_RMII_MODE_CLR GRF_CLR_BIT(10)
#define RK3228_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0)
@@ -297,15 +376,8 @@ static const struct rk_gmac_ops rk3128_ops = {
static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_PHY_INTF_SEL_RGMII |
+ RK3228_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3228_GMAC_RMII_MODE_CLR |
DELAY_ENABLE(RK3228, tx_delay, rx_delay));
@@ -316,95 +388,59 @@ static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3228_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_PHY_INTF_SEL_RMII |
+ RK3228_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
RK3228_GMAC_RMII_MODE);
/* set MAC to RMII mode */
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, GRF_BIT(11));
}
-static void rk3228_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_CLK_2_5M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_CLK_25M);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_CLK_125M);
- else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
-}
+static const struct rk_reg_speed_data rk3228_reg_speed_data = {
+ .rgmii_10 = RK3228_GMAC_CLK_2_5M,
+ .rgmii_100 = RK3228_GMAC_CLK_25M,
+ .rgmii_1000 = RK3228_GMAC_CLK_125M,
+ .rmii_10 = RK3228_GMAC_RMII_CLK_2_5M | RK3228_GMAC_SPEED_10M,
+ .rmii_100 = RK3228_GMAC_RMII_CLK_25M | RK3228_GMAC_SPEED_100M,
+};
-static void rk3228_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3228_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_RMII_CLK_2_5M |
- RK3228_GMAC_SPEED_10M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_RMII_CLK_25M |
- RK3228_GMAC_SPEED_100M);
- else
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ return rk_set_reg_speed(bsp_priv, &rk3228_reg_speed_data,
+ RK3228_GRF_MAC_CON1, interface, speed);
}
static void rk3228_integrated_phy_powerup(struct rk_priv_data *priv)
{
regmap_write(priv->grf, RK3228_GRF_CON_MUX,
RK3228_GRF_CON_MUX_GMAC_INTEGRATED_PHY);
+
+ rk_gmac_integrated_ephy_powerup(priv);
}
static const struct rk_gmac_ops rk3228_ops = {
.set_to_rgmii = rk3228_set_to_rgmii,
.set_to_rmii = rk3228_set_to_rmii,
- .set_rgmii_speed = rk3228_set_rgmii_speed,
- .set_rmii_speed = rk3228_set_rmii_speed,
- .integrated_phy_powerup = rk3228_integrated_phy_powerup,
+ .set_speed = rk3228_set_speed,
+ .integrated_phy_powerup = rk3228_integrated_phy_powerup,
+ .integrated_phy_powerdown = rk_gmac_integrated_ephy_powerdown,
};
#define RK3288_GRF_SOC_CON1 0x0248
#define RK3288_GRF_SOC_CON3 0x0250
/*RK3288_GRF_SOC_CON1*/
-#define RK3288_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(6) | GRF_CLR_BIT(7) | \
- GRF_CLR_BIT(8))
-#define RK3288_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | \
- GRF_BIT(8))
+#define RK3288_GMAC_PHY_INTF_SEL(val) GRF_FIELD(8, 6, val)
#define RK3288_GMAC_FLOW_CTRL GRF_BIT(9)
#define RK3288_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9)
#define RK3288_GMAC_SPEED_10M GRF_CLR_BIT(10)
#define RK3288_GMAC_SPEED_100M GRF_BIT(10)
#define RK3288_GMAC_RMII_CLK_25M GRF_BIT(11)
#define RK3288_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11)
-#define RK3288_GMAC_CLK_125M (GRF_CLR_BIT(12) | GRF_CLR_BIT(13))
-#define RK3288_GMAC_CLK_25M (GRF_BIT(12) | GRF_BIT(13))
-#define RK3288_GMAC_CLK_2_5M (GRF_CLR_BIT(12) | GRF_BIT(13))
+#define RK3288_GMAC_CLK_125M GRF_FIELD_CONST(13, 12, 0)
+#define RK3288_GMAC_CLK_25M GRF_FIELD_CONST(13, 12, 3)
+#define RK3288_GMAC_CLK_2_5M GRF_FIELD_CONST(13, 12, 2)
#define RK3288_GMAC_RMII_MODE GRF_BIT(14)
#define RK3288_GMAC_RMII_MODE_CLR GRF_CLR_BIT(14)
@@ -413,21 +449,14 @@ static const struct rk_gmac_ops rk3228_ops = {
#define RK3288_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
#define RK3288_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
#define RK3288_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
-#define RK3288_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
-#define RK3288_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3288_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(13, 7, val)
+#define RK3288_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_PHY_INTF_SEL_RGMII |
+ RK3288_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3288_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON3,
DELAY_ENABLE(RK3288, tx_delay, rx_delay) |
@@ -437,73 +466,36 @@ static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3288_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_PHY_INTF_SEL_RMII | RK3288_GMAC_RMII_MODE);
+ RK3288_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
+ RK3288_GMAC_RMII_MODE);
}
-static void rk3288_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_CLK_2_5M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_CLK_25M);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_CLK_125M);
- else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
-}
+static const struct rk_reg_speed_data rk3288_reg_speed_data = {
+ .rgmii_10 = RK3288_GMAC_CLK_2_5M,
+ .rgmii_100 = RK3288_GMAC_CLK_25M,
+ .rgmii_1000 = RK3288_GMAC_CLK_125M,
+ .rmii_10 = RK3288_GMAC_RMII_CLK_2_5M | RK3288_GMAC_SPEED_10M,
+ .rmii_100 = RK3288_GMAC_RMII_CLK_25M | RK3288_GMAC_SPEED_100M,
+};
-static void rk3288_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3288_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
- if (speed == 10) {
- regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_RMII_CLK_2_5M |
- RK3288_GMAC_SPEED_10M);
- } else if (speed == 100) {
- regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_RMII_CLK_25M |
- RK3288_GMAC_SPEED_100M);
- } else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- }
+ return rk_set_reg_speed(bsp_priv, &rk3288_reg_speed_data,
+ RK3288_GRF_SOC_CON1, interface, speed);
}
static const struct rk_gmac_ops rk3288_ops = {
.set_to_rgmii = rk3288_set_to_rgmii,
.set_to_rmii = rk3288_set_to_rmii,
- .set_rgmii_speed = rk3288_set_rgmii_speed,
- .set_rmii_speed = rk3288_set_rmii_speed,
+ .set_speed = rk3288_set_speed,
};
#define RK3308_GRF_MAC_CON0 0x04a0
/* RK3308_GRF_MAC_CON0 */
-#define RK3308_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(2) | GRF_CLR_BIT(3) | \
- GRF_BIT(4))
+#define RK3308_GMAC_PHY_INTF_SEL(val) GRF_FIELD(4, 2, val)
#define RK3308_GMAC_FLOW_CTRL GRF_BIT(3)
#define RK3308_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
#define RK3308_GMAC_SPEED_10M GRF_CLR_BIT(0)
@@ -511,40 +503,25 @@ static const struct rk_gmac_ops rk3288_ops = {
static void rk3308_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
- RK3308_GMAC_PHY_INTF_SEL_RMII);
+ RK3308_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
}
-static void rk3308_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
+static const struct rk_reg_speed_data rk3308_reg_speed_data = {
+ .rmii_10 = RK3308_GMAC_SPEED_10M,
+ .rmii_100 = RK3308_GMAC_SPEED_100M,
+};
- if (speed == 10) {
- regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
- RK3308_GMAC_SPEED_10M);
- } else if (speed == 100) {
- regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
- RK3308_GMAC_SPEED_100M);
- } else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- }
+static int rk3308_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
+{
+ return rk_set_reg_speed(bsp_priv, &rk3308_reg_speed_data,
+ RK3308_GRF_MAC_CON0, interface, speed);
}
static const struct rk_gmac_ops rk3308_ops = {
.set_to_rmii = rk3308_set_to_rmii,
- .set_rmii_speed = rk3308_set_rmii_speed,
+ .set_speed = rk3308_set_speed,
};
#define RK3328_GRF_MAC_CON0 0x0900
@@ -553,29 +530,24 @@ static const struct rk_gmac_ops rk3308_ops = {
#define RK3328_GRF_MACPHY_CON1 0xb04
/* RK3328_GRF_MAC_CON0 */
-#define RK3328_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
-#define RK3328_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3328_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(13, 7, val)
+#define RK3328_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
/* RK3328_GRF_MAC_CON1 */
-#define RK3328_GMAC_PHY_INTF_SEL_RGMII \
- (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
-#define RK3328_GMAC_PHY_INTF_SEL_RMII \
- (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
+#define RK3328_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RK3328_GMAC_FLOW_CTRL GRF_BIT(3)
#define RK3328_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
#define RK3328_GMAC_SPEED_10M GRF_CLR_BIT(2)
#define RK3328_GMAC_SPEED_100M GRF_BIT(2)
#define RK3328_GMAC_RMII_CLK_25M GRF_BIT(7)
#define RK3328_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7)
-#define RK3328_GMAC_CLK_125M (GRF_CLR_BIT(11) | GRF_CLR_BIT(12))
-#define RK3328_GMAC_CLK_25M (GRF_BIT(11) | GRF_BIT(12))
-#define RK3328_GMAC_CLK_2_5M (GRF_CLR_BIT(11) | GRF_BIT(12))
+#define RK3328_GMAC_CLK_125M GRF_FIELD_CONST(12, 11, 0)
+#define RK3328_GMAC_CLK_25M GRF_FIELD_CONST(12, 11, 3)
+#define RK3328_GMAC_CLK_2_5M GRF_FIELD_CONST(12, 11, 2)
#define RK3328_GMAC_RMII_MODE GRF_BIT(9)
#define RK3328_GMAC_RMII_MODE_CLR GRF_CLR_BIT(9)
#define RK3328_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0)
-#define RK3328_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(0)
#define RK3328_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1)
-#define RK3328_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(0)
/* RK3328_GRF_MACPHY_CON1 */
#define RK3328_MACPHY_RMII_MODE GRF_BIT(9)
@@ -583,15 +555,8 @@ static const struct rk_gmac_ops rk3308_ops = {
static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
- RK3328_GMAC_PHY_INTF_SEL_RGMII |
+ RK3328_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3328_GMAC_RMII_MODE_CLR |
RK3328_GMAC_RXCLK_DLY_ENABLE |
RK3328_GMAC_TXCLK_DLY_ENABLE);
@@ -603,100 +568,68 @@ static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3328_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
unsigned int reg;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
reg = bsp_priv->integrated_phy ? RK3328_GRF_MAC_CON2 :
RK3328_GRF_MAC_CON1;
regmap_write(bsp_priv->grf, reg,
- RK3328_GMAC_PHY_INTF_SEL_RMII |
+ RK3328_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
RK3328_GMAC_RMII_MODE);
}
-static void rk3328_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
- RK3328_GMAC_CLK_2_5M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
- RK3328_GMAC_CLK_25M);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
- RK3328_GMAC_CLK_125M);
- else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
-}
+static const struct rk_reg_speed_data rk3328_reg_speed_data = {
+ .rgmii_10 = RK3328_GMAC_CLK_2_5M,
+ .rgmii_100 = RK3328_GMAC_CLK_25M,
+ .rgmii_1000 = RK3328_GMAC_CLK_125M,
+ .rmii_10 = RK3328_GMAC_RMII_CLK_2_5M | RK3328_GMAC_SPEED_10M,
+ .rmii_100 = RK3328_GMAC_RMII_CLK_25M | RK3328_GMAC_SPEED_100M,
+};
-static void rk3328_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3328_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
unsigned int reg;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
- reg = bsp_priv->integrated_phy ? RK3328_GRF_MAC_CON2 :
- RK3328_GRF_MAC_CON1;
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, reg,
- RK3328_GMAC_RMII_CLK_2_5M |
- RK3328_GMAC_SPEED_10M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, reg,
- RK3328_GMAC_RMII_CLK_25M |
- RK3328_GMAC_SPEED_100M);
+ if (interface == PHY_INTERFACE_MODE_RMII && bsp_priv->integrated_phy)
+ reg = RK3328_GRF_MAC_CON2;
else
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ reg = RK3328_GRF_MAC_CON1;
+
+ return rk_set_reg_speed(bsp_priv, &rk3328_reg_speed_data, reg,
+ interface, speed);
}
static void rk3328_integrated_phy_powerup(struct rk_priv_data *priv)
{
regmap_write(priv->grf, RK3328_GRF_MACPHY_CON1,
RK3328_MACPHY_RMII_MODE);
+
+ rk_gmac_integrated_ephy_powerup(priv);
}
static const struct rk_gmac_ops rk3328_ops = {
.set_to_rgmii = rk3328_set_to_rgmii,
.set_to_rmii = rk3328_set_to_rmii,
- .set_rgmii_speed = rk3328_set_rgmii_speed,
- .set_rmii_speed = rk3328_set_rmii_speed,
- .integrated_phy_powerup = rk3328_integrated_phy_powerup,
+ .set_speed = rk3328_set_speed,
+ .integrated_phy_powerup = rk3328_integrated_phy_powerup,
+ .integrated_phy_powerdown = rk_gmac_integrated_ephy_powerdown,
};
#define RK3366_GRF_SOC_CON6 0x0418
#define RK3366_GRF_SOC_CON7 0x041c
/* RK3366_GRF_SOC_CON6 */
-#define RK3366_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(9) | GRF_CLR_BIT(10) | \
- GRF_CLR_BIT(11))
-#define RK3366_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(9) | GRF_CLR_BIT(10) | \
- GRF_BIT(11))
+#define RK3366_GMAC_PHY_INTF_SEL(val) GRF_FIELD(11, 9, val)
#define RK3366_GMAC_FLOW_CTRL GRF_BIT(8)
#define RK3366_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8)
#define RK3366_GMAC_SPEED_10M GRF_CLR_BIT(7)
#define RK3366_GMAC_SPEED_100M GRF_BIT(7)
#define RK3366_GMAC_RMII_CLK_25M GRF_BIT(3)
#define RK3366_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3)
-#define RK3366_GMAC_CLK_125M (GRF_CLR_BIT(4) | GRF_CLR_BIT(5))
-#define RK3366_GMAC_CLK_25M (GRF_BIT(4) | GRF_BIT(5))
-#define RK3366_GMAC_CLK_2_5M (GRF_CLR_BIT(4) | GRF_BIT(5))
+#define RK3366_GMAC_CLK_125M GRF_FIELD_CONST(5, 4, 0)
+#define RK3366_GMAC_CLK_25M GRF_FIELD_CONST(5, 4, 3)
+#define RK3366_GMAC_CLK_2_5M GRF_FIELD_CONST(5, 4, 2)
#define RK3366_GMAC_RMII_MODE GRF_BIT(6)
#define RK3366_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6)
@@ -705,21 +638,14 @@ static const struct rk_gmac_ops rk3328_ops = {
#define RK3366_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7)
#define RK3366_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
#define RK3366_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
-#define RK3366_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
-#define RK3366_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3366_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
+#define RK3366_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_PHY_INTF_SEL_RGMII |
+ RK3366_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3366_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON7,
DELAY_ENABLE(RK3366, tx_delay, rx_delay) |
@@ -729,85 +655,46 @@ static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3366_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_PHY_INTF_SEL_RMII | RK3366_GMAC_RMII_MODE);
+ RK3366_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
+ RK3366_GMAC_RMII_MODE);
}
-static void rk3366_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_CLK_2_5M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_CLK_25M);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_CLK_125M);
- else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
-}
+static const struct rk_reg_speed_data rk3366_reg_speed_data = {
+ .rgmii_10 = RK3366_GMAC_CLK_2_5M,
+ .rgmii_100 = RK3366_GMAC_CLK_25M,
+ .rgmii_1000 = RK3366_GMAC_CLK_125M,
+ .rmii_10 = RK3366_GMAC_RMII_CLK_2_5M | RK3366_GMAC_SPEED_10M,
+ .rmii_100 = RK3366_GMAC_RMII_CLK_25M | RK3366_GMAC_SPEED_100M,
+};
-static void rk3366_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3366_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
- if (speed == 10) {
- regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_RMII_CLK_2_5M |
- RK3366_GMAC_SPEED_10M);
- } else if (speed == 100) {
- regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_RMII_CLK_25M |
- RK3366_GMAC_SPEED_100M);
- } else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- }
+ return rk_set_reg_speed(bsp_priv, &rk3366_reg_speed_data,
+ RK3366_GRF_SOC_CON6, interface, speed);
}
static const struct rk_gmac_ops rk3366_ops = {
.set_to_rgmii = rk3366_set_to_rgmii,
.set_to_rmii = rk3366_set_to_rmii,
- .set_rgmii_speed = rk3366_set_rgmii_speed,
- .set_rmii_speed = rk3366_set_rmii_speed,
+ .set_speed = rk3366_set_speed,
};
#define RK3368_GRF_SOC_CON15 0x043c
#define RK3368_GRF_SOC_CON16 0x0440
/* RK3368_GRF_SOC_CON15 */
-#define RK3368_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(9) | GRF_CLR_BIT(10) | \
- GRF_CLR_BIT(11))
-#define RK3368_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(9) | GRF_CLR_BIT(10) | \
- GRF_BIT(11))
+#define RK3368_GMAC_PHY_INTF_SEL(val) GRF_FIELD(11, 9, val)
#define RK3368_GMAC_FLOW_CTRL GRF_BIT(8)
#define RK3368_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8)
#define RK3368_GMAC_SPEED_10M GRF_CLR_BIT(7)
#define RK3368_GMAC_SPEED_100M GRF_BIT(7)
#define RK3368_GMAC_RMII_CLK_25M GRF_BIT(3)
#define RK3368_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3)
-#define RK3368_GMAC_CLK_125M (GRF_CLR_BIT(4) | GRF_CLR_BIT(5))
-#define RK3368_GMAC_CLK_25M (GRF_BIT(4) | GRF_BIT(5))
-#define RK3368_GMAC_CLK_2_5M (GRF_CLR_BIT(4) | GRF_BIT(5))
+#define RK3368_GMAC_CLK_125M GRF_FIELD_CONST(5, 4, 0)
+#define RK3368_GMAC_CLK_25M GRF_FIELD_CONST(5, 4, 3)
+#define RK3368_GMAC_CLK_2_5M GRF_FIELD_CONST(5, 4, 2)
#define RK3368_GMAC_RMII_MODE GRF_BIT(6)
#define RK3368_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6)
@@ -816,21 +703,14 @@ static const struct rk_gmac_ops rk3366_ops = {
#define RK3368_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7)
#define RK3368_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
#define RK3368_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
-#define RK3368_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
-#define RK3368_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3368_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
+#define RK3368_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_PHY_INTF_SEL_RGMII |
+ RK3368_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3368_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON16,
DELAY_ENABLE(RK3368, tx_delay, rx_delay) |
@@ -840,85 +720,46 @@ static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3368_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_PHY_INTF_SEL_RMII | RK3368_GMAC_RMII_MODE);
+ RK3368_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
+ RK3368_GMAC_RMII_MODE);
}
-static void rk3368_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_CLK_2_5M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_CLK_25M);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_CLK_125M);
- else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
-}
+static const struct rk_reg_speed_data rk3368_reg_speed_data = {
+ .rgmii_10 = RK3368_GMAC_CLK_2_5M,
+ .rgmii_100 = RK3368_GMAC_CLK_25M,
+ .rgmii_1000 = RK3368_GMAC_CLK_125M,
+ .rmii_10 = RK3368_GMAC_RMII_CLK_2_5M | RK3368_GMAC_SPEED_10M,
+ .rmii_100 = RK3368_GMAC_RMII_CLK_25M | RK3368_GMAC_SPEED_100M,
+};
-static void rk3368_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3368_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
- if (speed == 10) {
- regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_RMII_CLK_2_5M |
- RK3368_GMAC_SPEED_10M);
- } else if (speed == 100) {
- regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_RMII_CLK_25M |
- RK3368_GMAC_SPEED_100M);
- } else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- }
+ return rk_set_reg_speed(bsp_priv, &rk3368_reg_speed_data,
+ RK3368_GRF_SOC_CON15, interface, speed);
}
static const struct rk_gmac_ops rk3368_ops = {
.set_to_rgmii = rk3368_set_to_rgmii,
.set_to_rmii = rk3368_set_to_rmii,
- .set_rgmii_speed = rk3368_set_rgmii_speed,
- .set_rmii_speed = rk3368_set_rmii_speed,
+ .set_speed = rk3368_set_speed,
};
#define RK3399_GRF_SOC_CON5 0xc214
#define RK3399_GRF_SOC_CON6 0xc218
/* RK3399_GRF_SOC_CON5 */
-#define RK3399_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(9) | GRF_CLR_BIT(10) | \
- GRF_CLR_BIT(11))
-#define RK3399_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(9) | GRF_CLR_BIT(10) | \
- GRF_BIT(11))
+#define RK3399_GMAC_PHY_INTF_SEL(val) GRF_FIELD(11, 9, val)
#define RK3399_GMAC_FLOW_CTRL GRF_BIT(8)
#define RK3399_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8)
#define RK3399_GMAC_SPEED_10M GRF_CLR_BIT(7)
#define RK3399_GMAC_SPEED_100M GRF_BIT(7)
#define RK3399_GMAC_RMII_CLK_25M GRF_BIT(3)
#define RK3399_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3)
-#define RK3399_GMAC_CLK_125M (GRF_CLR_BIT(4) | GRF_CLR_BIT(5))
-#define RK3399_GMAC_CLK_25M (GRF_BIT(4) | GRF_BIT(5))
-#define RK3399_GMAC_CLK_2_5M (GRF_CLR_BIT(4) | GRF_BIT(5))
+#define RK3399_GMAC_CLK_125M GRF_FIELD_CONST(5, 4, 0)
+#define RK3399_GMAC_CLK_25M GRF_FIELD_CONST(5, 4, 3)
+#define RK3399_GMAC_CLK_2_5M GRF_FIELD_CONST(5, 4, 2)
#define RK3399_GMAC_RMII_MODE GRF_BIT(6)
#define RK3399_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6)
@@ -927,21 +768,14 @@ static const struct rk_gmac_ops rk3368_ops = {
#define RK3399_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7)
#define RK3399_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
#define RK3399_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
-#define RK3399_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
-#define RK3399_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3399_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
+#define RK3399_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_PHY_INTF_SEL_RGMII |
+ RK3399_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3399_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON6,
DELAY_ENABLE(RK3399, tx_delay, rx_delay) |
@@ -951,66 +785,226 @@ static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3399_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
+ regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
+ RK3399_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
+ RK3399_GMAC_RMII_MODE);
+}
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
+static const struct rk_reg_speed_data rk3399_reg_speed_data = {
+ .rgmii_10 = RK3399_GMAC_CLK_2_5M,
+ .rgmii_100 = RK3399_GMAC_CLK_25M,
+ .rgmii_1000 = RK3399_GMAC_CLK_125M,
+ .rmii_10 = RK3399_GMAC_RMII_CLK_2_5M | RK3399_GMAC_SPEED_10M,
+ .rmii_100 = RK3399_GMAC_RMII_CLK_25M | RK3399_GMAC_SPEED_100M,
+};
- regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_PHY_INTF_SEL_RMII | RK3399_GMAC_RMII_MODE);
+static int rk3399_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
+{
+ return rk_set_reg_speed(bsp_priv, &rk3399_reg_speed_data,
+ RK3399_GRF_SOC_CON5, interface, speed);
}
-static void rk3399_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static const struct rk_gmac_ops rk3399_ops = {
+ .set_to_rgmii = rk3399_set_to_rgmii,
+ .set_to_rmii = rk3399_set_to_rmii,
+ .set_speed = rk3399_set_speed,
+};
+
+#define RK3506_GRF_SOC_CON8 0x0020
+#define RK3506_GRF_SOC_CON11 0x002c
+
+#define RK3506_GMAC_RMII_MODE GRF_BIT(1)
+
+#define RK3506_GMAC_CLK_RMII_DIV2 GRF_BIT(3)
+#define RK3506_GMAC_CLK_RMII_DIV20 GRF_CLR_BIT(3)
+
+#define RK3506_GMAC_CLK_SELECT_CRU GRF_CLR_BIT(5)
+#define RK3506_GMAC_CLK_SELECT_IO GRF_BIT(5)
+
+#define RK3506_GMAC_CLK_RMII_GATE GRF_BIT(2)
+#define RK3506_GMAC_CLK_RMII_NOGATE GRF_CLR_BIT(2)
+
+static void rk3506_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
+ unsigned int id = bsp_priv->id, offset;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
+ offset = (id == 1) ? RK3506_GRF_SOC_CON11 : RK3506_GRF_SOC_CON8;
+ regmap_write(bsp_priv->grf, offset, RK3506_GMAC_RMII_MODE);
+}
+
+static const struct rk_reg_speed_data rk3506_reg_speed_data = {
+ .rmii_10 = RK3506_GMAC_CLK_RMII_DIV20,
+ .rmii_100 = RK3506_GMAC_CLK_RMII_DIV2,
+};
+
+static int rk3506_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
+{
+ unsigned int id = bsp_priv->id, offset;
+
+ offset = (id == 1) ? RK3506_GRF_SOC_CON11 : RK3506_GRF_SOC_CON8;
+ return rk_set_reg_speed(bsp_priv, &rk3506_reg_speed_data,
+ offset, interface, speed);
+}
+
+static void rk3506_set_clock_selection(struct rk_priv_data *bsp_priv,
+ bool input, bool enable)
+{
+ unsigned int value, offset, id = bsp_priv->id;
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_CLK_2_5M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_CLK_25M);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_CLK_125M);
+ offset = (id == 1) ? RK3506_GRF_SOC_CON11 : RK3506_GRF_SOC_CON8;
+
+ value = input ? RK3506_GMAC_CLK_SELECT_IO :
+ RK3506_GMAC_CLK_SELECT_CRU;
+ value |= enable ? RK3506_GMAC_CLK_RMII_NOGATE :
+ RK3506_GMAC_CLK_RMII_GATE;
+ regmap_write(bsp_priv->grf, offset, value);
+}
+
+static const struct rk_gmac_ops rk3506_ops = {
+ .set_to_rmii = rk3506_set_to_rmii,
+ .set_speed = rk3506_set_speed,
+ .set_clock_selection = rk3506_set_clock_selection,
+ .regs_valid = true,
+ .regs = {
+ 0xff4c8000, /* gmac0 */
+ 0xff4d0000, /* gmac1 */
+ 0x0, /* sentinel */
+ },
+};
+
+#define RK3528_VO_GRF_GMAC_CON 0x0018
+#define RK3528_VO_GRF_MACPHY_CON0 0x001c
+#define RK3528_VO_GRF_MACPHY_CON1 0x0020
+#define RK3528_VPU_GRF_GMAC_CON5 0x0018
+#define RK3528_VPU_GRF_GMAC_CON6 0x001c
+
+#define RK3528_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
+#define RK3528_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
+#define RK3528_GMAC_TXCLK_DLY_ENABLE GRF_BIT(14)
+#define RK3528_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
+
+#define RK3528_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(15, 8, val)
+#define RK3528_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(7, 0, val)
+
+#define RK3528_GMAC0_PHY_INTF_SEL_RMII GRF_BIT(1)
+#define RK3528_GMAC1_PHY_INTF_SEL_RGMII GRF_CLR_BIT(8)
+#define RK3528_GMAC1_PHY_INTF_SEL_RMII GRF_BIT(8)
+
+#define RK3528_GMAC1_CLK_SELECT_CRU GRF_CLR_BIT(12)
+#define RK3528_GMAC1_CLK_SELECT_IO GRF_BIT(12)
+
+#define RK3528_GMAC0_CLK_RMII_DIV2 GRF_BIT(3)
+#define RK3528_GMAC0_CLK_RMII_DIV20 GRF_CLR_BIT(3)
+#define RK3528_GMAC1_CLK_RMII_DIV2 GRF_BIT(10)
+#define RK3528_GMAC1_CLK_RMII_DIV20 GRF_CLR_BIT(10)
+
+#define RK3528_GMAC1_CLK_RGMII_DIV1 GRF_FIELD_CONST(11, 10, 0)
+#define RK3528_GMAC1_CLK_RGMII_DIV5 GRF_FIELD_CONST(11, 10, 3)
+#define RK3528_GMAC1_CLK_RGMII_DIV50 GRF_FIELD_CONST(11, 10, 2)
+
+#define RK3528_GMAC0_CLK_RMII_GATE GRF_BIT(2)
+#define RK3528_GMAC0_CLK_RMII_NOGATE GRF_CLR_BIT(2)
+#define RK3528_GMAC1_CLK_RMII_GATE GRF_BIT(9)
+#define RK3528_GMAC1_CLK_RMII_NOGATE GRF_CLR_BIT(9)
+
+static void rk3528_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ RK3528_GMAC1_PHY_INTF_SEL_RGMII);
+
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ DELAY_ENABLE(RK3528, tx_delay, rx_delay));
+
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON6,
+ RK3528_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3528_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3528_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ if (bsp_priv->id == 1)
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ RK3528_GMAC1_PHY_INTF_SEL_RMII);
else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+ regmap_write(bsp_priv->grf, RK3528_VO_GRF_GMAC_CON,
+ RK3528_GMAC0_PHY_INTF_SEL_RMII |
+ RK3528_GMAC0_CLK_RMII_DIV2);
}
-static void rk3399_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static const struct rk_reg_speed_data rk3528_gmac0_reg_speed_data = {
+ .rmii_10 = RK3528_GMAC0_CLK_RMII_DIV20,
+ .rmii_100 = RK3528_GMAC0_CLK_RMII_DIV2,
+};
+
+static const struct rk_reg_speed_data rk3528_gmac1_reg_speed_data = {
+ .rgmii_10 = RK3528_GMAC1_CLK_RGMII_DIV50,
+ .rgmii_100 = RK3528_GMAC1_CLK_RGMII_DIV5,
+ .rgmii_1000 = RK3528_GMAC1_CLK_RGMII_DIV1,
+ .rmii_10 = RK3528_GMAC1_CLK_RMII_DIV20,
+ .rmii_100 = RK3528_GMAC1_CLK_RMII_DIV2,
+};
+
+static int rk3528_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
+ const struct rk_reg_speed_data *rsd;
+ unsigned int reg;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
+ if (bsp_priv->id == 1) {
+ rsd = &rk3528_gmac1_reg_speed_data;
+ reg = RK3528_VPU_GRF_GMAC_CON5;
+ } else {
+ rsd = &rk3528_gmac0_reg_speed_data;
+ reg = RK3528_VO_GRF_GMAC_CON;
}
- if (speed == 10) {
- regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_RMII_CLK_2_5M |
- RK3399_GMAC_SPEED_10M);
- } else if (speed == 100) {
- regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_RMII_CLK_25M |
- RK3399_GMAC_SPEED_100M);
+ return rk_set_reg_speed(bsp_priv, rsd, reg, interface, speed);
+}
+
+static void rk3528_set_clock_selection(struct rk_priv_data *bsp_priv,
+ bool input, bool enable)
+{
+ unsigned int val;
+
+ if (bsp_priv->id == 1) {
+ val = input ? RK3528_GMAC1_CLK_SELECT_IO :
+ RK3528_GMAC1_CLK_SELECT_CRU;
+ val |= enable ? RK3528_GMAC1_CLK_RMII_NOGATE :
+ RK3528_GMAC1_CLK_RMII_GATE;
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5, val);
} else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ val = enable ? RK3528_GMAC0_CLK_RMII_NOGATE :
+ RK3528_GMAC0_CLK_RMII_GATE;
+ regmap_write(bsp_priv->grf, RK3528_VO_GRF_GMAC_CON, val);
}
}
-static const struct rk_gmac_ops rk3399_ops = {
- .set_to_rgmii = rk3399_set_to_rgmii,
- .set_to_rmii = rk3399_set_to_rmii,
- .set_rgmii_speed = rk3399_set_rgmii_speed,
- .set_rmii_speed = rk3399_set_rmii_speed,
+static void rk3528_integrated_phy_powerup(struct rk_priv_data *bsp_priv)
+{
+ rk_gmac_integrated_fephy_powerup(bsp_priv, RK3528_VO_GRF_MACPHY_CON0);
+}
+
+static void rk3528_integrated_phy_powerdown(struct rk_priv_data *bsp_priv)
+{
+ rk_gmac_integrated_fephy_powerdown(bsp_priv, RK3528_VO_GRF_MACPHY_CON0);
+}
+
+static const struct rk_gmac_ops rk3528_ops = {
+ .set_to_rgmii = rk3528_set_to_rgmii,
+ .set_to_rmii = rk3528_set_to_rmii,
+ .set_speed = rk3528_set_speed,
+ .set_clock_selection = rk3528_set_clock_selection,
+ .integrated_phy_powerup = rk3528_integrated_phy_powerup,
+ .integrated_phy_powerdown = rk3528_integrated_phy_powerdown,
+ .regs_valid = true,
+ .regs = {
+ 0xffbd0000, /* gmac0 */
+ 0xffbe0000, /* gmac1 */
+ 0x0, /* sentinel */
+ },
};
#define RK3568_GRF_GMAC0_CON0 0x0380
@@ -1019,10 +1013,7 @@ static const struct rk_gmac_ops rk3399_ops = {
#define RK3568_GRF_GMAC1_CON1 0x038c
/* RK3568_GRF_GMAC0_CON1 && RK3568_GRF_GMAC1_CON1 */
-#define RK3568_GMAC_PHY_INTF_SEL_RGMII \
- (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
-#define RK3568_GMAC_PHY_INTF_SEL_RMII \
- (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
+#define RK3568_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RK3568_GMAC_FLOW_CTRL GRF_BIT(3)
#define RK3568_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
#define RK3568_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1)
@@ -1031,20 +1022,14 @@ static const struct rk_gmac_ops rk3399_ops = {
#define RK3568_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(0)
/* RK3568_GRF_GMAC0_CON0 && RK3568_GRF_GMAC1_CON0 */
-#define RK3568_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
-#define RK3568_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3568_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
+#define RK3568_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
static void rk3568_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
u32 con0, con1;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
con0 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON0 :
RK3568_GRF_GMAC0_CON0;
con1 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON1 :
@@ -1055,59 +1040,25 @@ static void rk3568_set_to_rgmii(struct rk_priv_data *bsp_priv,
RK3568_GMAC_CLK_TX_DL_CFG(tx_delay));
regmap_write(bsp_priv->grf, con1,
- RK3568_GMAC_PHY_INTF_SEL_RGMII |
+ RK3568_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3568_GMAC_RXCLK_DLY_ENABLE |
RK3568_GMAC_TXCLK_DLY_ENABLE);
}
static void rk3568_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
u32 con1;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
con1 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON1 :
RK3568_GRF_GMAC0_CON1;
- regmap_write(bsp_priv->grf, con1, RK3568_GMAC_PHY_INTF_SEL_RMII);
-}
-
-static void rk3568_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
- struct device *dev = &bsp_priv->pdev->dev;
- unsigned long rate;
- int ret;
-
- switch (speed) {
- case 10:
- rate = 2500000;
- break;
- case 100:
- rate = 25000000;
- break;
- case 1000:
- rate = 125000000;
- break;
- default:
- dev_err(dev, "unknown speed value for GMAC speed=%d", speed);
- return;
- }
-
- ret = clk_set_rate(clk_mac_speed, rate);
- if (ret)
- dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
- __func__, rate, ret);
+ regmap_write(bsp_priv->grf, con1,
+ RK3568_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
}
static const struct rk_gmac_ops rk3568_ops = {
.set_to_rgmii = rk3568_set_to_rgmii,
.set_to_rmii = rk3568_set_to_rmii,
- .set_rgmii_speed = rk3568_set_gmac_speed,
- .set_rmii_speed = rk3568_set_gmac_speed,
+ .set_speed = rk_set_clk_mac_speed,
.regs_valid = true,
.regs = {
0xfe2a0000, /* gmac0 */
@@ -1127,8 +1078,8 @@ static const struct rk_gmac_ops rk3568_ops = {
#define RK3576_GMAC_TXCLK_DLY_ENABLE GRF_BIT(7)
#define RK3576_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7)
-#define RK3576_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
-#define RK3576_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3576_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
+#define RK3576_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
/* SDGMAC_GRF */
#define RK3576_GRF_GMAC_CON0 0X0020
@@ -1143,12 +1094,9 @@ static const struct rk_gmac_ops rk3568_ops = {
#define RK3576_GMAC_CLK_RMII_DIV2 GRF_BIT(5)
#define RK3576_GMAC_CLK_RMII_DIV20 GRF_CLR_BIT(5)
-#define RK3576_GMAC_CLK_RGMII_DIV1 \
- (GRF_CLR_BIT(6) | GRF_CLR_BIT(5))
-#define RK3576_GMAC_CLK_RGMII_DIV5 \
- (GRF_BIT(6) | GRF_BIT(5))
-#define RK3576_GMAC_CLK_RGMII_DIV50 \
- (GRF_BIT(6) | GRF_CLR_BIT(5))
+#define RK3576_GMAC_CLK_RGMII_DIV1 GRF_FIELD_CONST(6, 5, 0)
+#define RK3576_GMAC_CLK_RGMII_DIV5 GRF_FIELD_CONST(6, 5, 3)
+#define RK3576_GMAC_CLK_RGMII_DIV50 GRF_FIELD_CONST(6, 5, 2)
#define RK3576_GMAC_CLK_RMII_GATE GRF_BIT(4)
#define RK3576_GMAC_CLK_RMII_NOGATE GRF_CLR_BIT(4)
@@ -1156,14 +1104,8 @@ static const struct rk_gmac_ops rk3568_ops = {
static void rk3576_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
unsigned int offset_con;
- if (IS_ERR(bsp_priv->grf) || IS_ERR(bsp_priv->php_grf)) {
- dev_err(dev, "Missing rockchip,grf or rockchip,php-grf property\n");
- return;
- }
-
offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
RK3576_GRF_GMAC_CON0;
@@ -1189,56 +1131,32 @@ static void rk3576_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3576_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
unsigned int offset_con;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
RK3576_GRF_GMAC_CON0;
regmap_write(bsp_priv->grf, offset_con, RK3576_GMAC_RMII_MODE);
}
-static void rk3576_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
- unsigned int val = 0, offset_con;
+static const struct rk_reg_speed_data rk3578_reg_speed_data = {
+ .rgmii_10 = RK3576_GMAC_CLK_RGMII_DIV50,
+ .rgmii_100 = RK3576_GMAC_CLK_RGMII_DIV5,
+ .rgmii_1000 = RK3576_GMAC_CLK_RGMII_DIV1,
+ .rmii_10 = RK3576_GMAC_CLK_RMII_DIV20,
+ .rmii_100 = RK3576_GMAC_CLK_RMII_DIV2,
+};
- switch (speed) {
- case 10:
- if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
- val = RK3576_GMAC_CLK_RMII_DIV20;
- else
- val = RK3576_GMAC_CLK_RGMII_DIV50;
- break;
- case 100:
- if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
- val = RK3576_GMAC_CLK_RMII_DIV2;
- else
- val = RK3576_GMAC_CLK_RGMII_DIV5;
- break;
- case 1000:
- if (bsp_priv->phy_iface != PHY_INTERFACE_MODE_RMII)
- val = RK3576_GMAC_CLK_RGMII_DIV1;
- else
- goto err;
- break;
- default:
- goto err;
- }
+static int rk3576_set_gmac_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
+{
+ unsigned int offset_con;
offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
RK3576_GRF_GMAC_CON0;
- regmap_write(bsp_priv->grf, offset_con, val);
-
- return;
-err:
- dev_err(dev, "unknown speed value for GMAC speed=%d", speed);
+ return rk_set_reg_speed(bsp_priv, &rk3578_reg_speed_data, offset_con,
+ interface, speed);
}
static void rk3576_set_clock_selection(struct rk_priv_data *bsp_priv, bool input,
@@ -1260,9 +1178,9 @@ static void rk3576_set_clock_selection(struct rk_priv_data *bsp_priv, bool input
static const struct rk_gmac_ops rk3576_ops = {
.set_to_rgmii = rk3576_set_to_rgmii,
.set_to_rmii = rk3576_set_to_rmii,
- .set_rgmii_speed = rk3576_set_gmac_speed,
- .set_rmii_speed = rk3576_set_gmac_speed,
+ .set_speed = rk3576_set_gmac_speed,
.set_clock_selection = rk3576_set_clock_selection,
+ .php_grf_required = true,
.regs_valid = true,
.regs = {
0x2a220000, /* gmac0 */
@@ -1281,17 +1199,15 @@ static const struct rk_gmac_ops rk3576_ops = {
#define RK3588_GMAC_TXCLK_DLY_ENABLE(id) GRF_BIT(2 * (id) + 2)
#define RK3588_GMAC_TXCLK_DLY_DISABLE(id) GRF_CLR_BIT(2 * (id) + 2)
-#define RK3588_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 8)
-#define RK3588_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 0)
+#define RK3588_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(15, 8, val)
+#define RK3588_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(7, 0, val)
/* php_grf */
#define RK3588_GRF_GMAC_CON0 0X0008
#define RK3588_GRF_CLK_CON1 0X0070
-#define RK3588_GMAC_PHY_INTF_SEL_RGMII(id) \
- (GRF_BIT(3 + (id) * 6) | GRF_CLR_BIT(4 + (id) * 6) | GRF_CLR_BIT(5 + (id) * 6))
-#define RK3588_GMAC_PHY_INTF_SEL_RMII(id) \
- (GRF_CLR_BIT(3 + (id) * 6) | GRF_CLR_BIT(4 + (id) * 6) | GRF_BIT(5 + (id) * 6))
+#define RK3588_GMAC_PHY_INTF_SEL(id, val) \
+ (GRF_FIELD(5, 3, val) << ((id) * 6))
#define RK3588_GMAC_CLK_RMII_MODE(id) GRF_BIT(5 * (id))
#define RK3588_GMAC_CLK_RGMII_MODE(id) GRF_CLR_BIT(5 * (id))
@@ -1303,11 +1219,11 @@ static const struct rk_gmac_ops rk3576_ops = {
#define RK3588_GMA_CLK_RMII_DIV20(id) GRF_CLR_BIT(5 * (id) + 2)
#define RK3588_GMAC_CLK_RGMII_DIV1(id) \
- (GRF_CLR_BIT(5 * (id) + 2) | GRF_CLR_BIT(5 * (id) + 3))
+ (GRF_FIELD_CONST(3, 2, 0) << ((id) * 5))
#define RK3588_GMAC_CLK_RGMII_DIV5(id) \
- (GRF_BIT(5 * (id) + 2) | GRF_BIT(5 * (id) + 3))
+ (GRF_FIELD_CONST(3, 2, 3) << ((id) * 5))
#define RK3588_GMAC_CLK_RGMII_DIV50(id) \
- (GRF_CLR_BIT(5 * (id) + 2) | GRF_BIT(5 * (id) + 3))
+ (GRF_FIELD_CONST(3, 2, 2) << ((id) * 5))
#define RK3588_GMAC_CLK_RMII_GATE(id) GRF_BIT(5 * (id) + 1)
#define RK3588_GMAC_CLK_RMII_NOGATE(id) GRF_CLR_BIT(5 * (id) + 1)
@@ -1315,19 +1231,13 @@ static const struct rk_gmac_ops rk3576_ops = {
static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
u32 offset_con, id = bsp_priv->id;
- if (IS_ERR(bsp_priv->grf) || IS_ERR(bsp_priv->php_grf)) {
- dev_err(dev, "Missing rockchip,grf or rockchip,php_grf property\n");
- return;
- }
-
offset_con = bsp_priv->id == 1 ? RK3588_GRF_GMAC_CON9 :
RK3588_GRF_GMAC_CON8;
regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0,
- RK3588_GMAC_PHY_INTF_SEL_RGMII(id));
+ RK3588_GMAC_PHY_INTF_SEL(id, PHY_INTF_SEL_RGMII));
regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1,
RK3588_GMAC_CLK_RGMII_MODE(id));
@@ -1343,40 +1253,33 @@ static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3588_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->php_grf)) {
- dev_err(dev, "%s: Missing rockchip,php_grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0,
- RK3588_GMAC_PHY_INTF_SEL_RMII(bsp_priv->id));
+ RK3588_GMAC_PHY_INTF_SEL(bsp_priv->id, PHY_INTF_SEL_RMII));
regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1,
RK3588_GMAC_CLK_RMII_MODE(bsp_priv->id));
}
-static void rk3588_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3588_set_gmac_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
unsigned int val = 0, id = bsp_priv->id;
switch (speed) {
case 10:
- if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+ if (interface == PHY_INTERFACE_MODE_RMII)
val = RK3588_GMA_CLK_RMII_DIV20(id);
else
val = RK3588_GMAC_CLK_RGMII_DIV50(id);
break;
case 100:
- if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+ if (interface == PHY_INTERFACE_MODE_RMII)
val = RK3588_GMA_CLK_RMII_DIV2(id);
else
val = RK3588_GMAC_CLK_RGMII_DIV5(id);
break;
case 1000:
- if (bsp_priv->phy_iface != PHY_INTERFACE_MODE_RMII)
+ if (interface != PHY_INTERFACE_MODE_RMII)
val = RK3588_GMAC_CLK_RGMII_DIV1(id);
else
goto err;
@@ -1387,9 +1290,9 @@ static void rk3588_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1, val);
- return;
+ return 0;
err:
- dev_err(dev, "unknown speed value for GMAC speed=%d", speed);
+ return -EINVAL;
}
static void rk3588_set_clock_selection(struct rk_priv_data *bsp_priv, bool input,
@@ -1407,9 +1310,9 @@ static void rk3588_set_clock_selection(struct rk_priv_data *bsp_priv, bool input
static const struct rk_gmac_ops rk3588_ops = {
.set_to_rgmii = rk3588_set_to_rgmii,
.set_to_rmii = rk3588_set_to_rmii,
- .set_rgmii_speed = rk3588_set_gmac_speed,
- .set_rmii_speed = rk3588_set_gmac_speed,
+ .set_speed = rk3588_set_gmac_speed,
.set_clock_selection = rk3588_set_clock_selection,
+ .php_grf_required = true,
.regs_valid = true,
.regs = {
0xfe1b0000, /* gmac0 */
@@ -1421,8 +1324,7 @@ static const struct rk_gmac_ops rk3588_ops = {
#define RV1108_GRF_GMAC_CON0 0X0900
/* RV1108_GRF_GMAC_CON0 */
-#define RV1108_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | \
- GRF_BIT(6))
+#define RV1108_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RV1108_GMAC_FLOW_CTRL GRF_BIT(3)
#define RV1108_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
#define RV1108_GMAC_SPEED_10M GRF_CLR_BIT(2)
@@ -1432,42 +1334,25 @@ static const struct rk_gmac_ops rk3588_ops = {
static void rv1108_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0,
- RV1108_GMAC_PHY_INTF_SEL_RMII);
+ RV1108_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
}
-static void rv1108_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
+static const struct rk_reg_speed_data rv1108_reg_speed_data = {
+ .rmii_10 = RV1108_GMAC_RMII_CLK_2_5M | RV1108_GMAC_SPEED_10M,
+ .rmii_100 = RV1108_GMAC_RMII_CLK_25M | RV1108_GMAC_SPEED_100M,
+};
- if (speed == 10) {
- regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0,
- RV1108_GMAC_RMII_CLK_2_5M |
- RV1108_GMAC_SPEED_10M);
- } else if (speed == 100) {
- regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0,
- RV1108_GMAC_RMII_CLK_25M |
- RV1108_GMAC_SPEED_100M);
- } else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- }
+static int rv1108_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
+{
+ return rk_set_reg_speed(bsp_priv, &rv1108_reg_speed_data,
+ RV1108_GRF_GMAC_CON0, interface, speed);
}
static const struct rk_gmac_ops rv1108_ops = {
.set_to_rmii = rv1108_set_to_rmii,
- .set_rmii_speed = rv1108_set_rmii_speed,
+ .set_speed = rv1108_set_speed,
};
#define RV1126_GRF_GMAC_CON0 0X0070
@@ -1475,10 +1360,7 @@ static const struct rk_gmac_ops rv1108_ops = {
#define RV1126_GRF_GMAC_CON2 0X0078
/* RV1126_GRF_GMAC_CON0 */
-#define RV1126_GMAC_PHY_INTF_SEL_RGMII \
- (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
-#define RV1126_GMAC_PHY_INTF_SEL_RMII \
- (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
+#define RV1126_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RV1126_GMAC_FLOW_CTRL GRF_BIT(7)
#define RV1126_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(7)
#define RV1126_GMAC_M0_RXCLK_DLY_ENABLE GRF_BIT(1)
@@ -1491,24 +1373,17 @@ static const struct rk_gmac_ops rv1108_ops = {
#define RV1126_GMAC_M1_TXCLK_DLY_DISABLE GRF_CLR_BIT(2)
/* RV1126_GRF_GMAC_CON1 */
-#define RV1126_GMAC_M0_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
-#define RV1126_GMAC_M0_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RV1126_GMAC_M0_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
+#define RV1126_GMAC_M0_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
/* RV1126_GRF_GMAC_CON2 */
-#define RV1126_GMAC_M1_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
-#define RV1126_GMAC_M1_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RV1126_GMAC_M1_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
+#define RV1126_GMAC_M1_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
- RV1126_GMAC_PHY_INTF_SEL_RGMII |
+ RV1126_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RV1126_GMAC_M0_RXCLK_DLY_ENABLE |
RV1126_GMAC_M0_TXCLK_DLY_ENABLE |
RV1126_GMAC_M1_RXCLK_DLY_ENABLE |
@@ -1525,126 +1400,21 @@ static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rv1126_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
- RV1126_GMAC_PHY_INTF_SEL_RMII);
-}
-
-static void rv1126_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
- struct device *dev = &bsp_priv->pdev->dev;
- unsigned long rate;
- int ret;
-
- switch (speed) {
- case 10:
- rate = 2500000;
- break;
- case 100:
- rate = 25000000;
- break;
- case 1000:
- rate = 125000000;
- break;
- default:
- dev_err(dev, "unknown speed value for RGMII speed=%d", speed);
- return;
- }
-
- ret = clk_set_rate(clk_mac_speed, rate);
- if (ret)
- dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
- __func__, rate, ret);
-}
-
-static void rv1126_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
- struct device *dev = &bsp_priv->pdev->dev;
- unsigned long rate;
- int ret;
-
- switch (speed) {
- case 10:
- rate = 2500000;
- break;
- case 100:
- rate = 25000000;
- break;
- default:
- dev_err(dev, "unknown speed value for RGMII speed=%d", speed);
- return;
- }
-
- ret = clk_set_rate(clk_mac_speed, rate);
- if (ret)
- dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
- __func__, rate, ret);
+ RV1126_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
}
static const struct rk_gmac_ops rv1126_ops = {
.set_to_rgmii = rv1126_set_to_rgmii,
.set_to_rmii = rv1126_set_to_rmii,
- .set_rgmii_speed = rv1126_set_rgmii_speed,
- .set_rmii_speed = rv1126_set_rmii_speed,
+ .set_speed = rk_set_clk_mac_speed,
};
-#define RK_GRF_MACPHY_CON0 0xb00
-#define RK_GRF_MACPHY_CON1 0xb04
-#define RK_GRF_MACPHY_CON2 0xb08
-#define RK_GRF_MACPHY_CON3 0xb0c
-
-#define RK_MACPHY_ENABLE GRF_BIT(0)
-#define RK_MACPHY_DISABLE GRF_CLR_BIT(0)
-#define RK_MACPHY_CFG_CLK_50M GRF_BIT(14)
-#define RK_GMAC2PHY_RMII_MODE (GRF_BIT(6) | GRF_CLR_BIT(7))
-#define RK_GRF_CON2_MACPHY_ID HIWORD_UPDATE(0x1234, 0xffff, 0)
-#define RK_GRF_CON3_MACPHY_ID HIWORD_UPDATE(0x35, 0x3f, 0)
-
-static void rk_gmac_integrated_phy_powerup(struct rk_priv_data *priv)
-{
- if (priv->ops->integrated_phy_powerup)
- priv->ops->integrated_phy_powerup(priv);
-
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_CFG_CLK_50M);
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_GMAC2PHY_RMII_MODE);
-
- regmap_write(priv->grf, RK_GRF_MACPHY_CON2, RK_GRF_CON2_MACPHY_ID);
- regmap_write(priv->grf, RK_GRF_MACPHY_CON3, RK_GRF_CON3_MACPHY_ID);
-
- if (priv->phy_reset) {
- /* PHY needs to be disabled before trying to reset it */
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE);
- if (priv->phy_reset)
- reset_control_assert(priv->phy_reset);
- usleep_range(10, 20);
- if (priv->phy_reset)
- reset_control_deassert(priv->phy_reset);
- usleep_range(10, 20);
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_ENABLE);
- msleep(30);
- }
-}
-
-static void rk_gmac_integrated_phy_powerdown(struct rk_priv_data *priv)
-{
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE);
- if (priv->phy_reset)
- reset_control_assert(priv->phy_reset);
-}
-
static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
{
struct rk_priv_data *bsp_priv = plat->bsp_priv;
- struct device *dev = &bsp_priv->pdev->dev;
int phy_iface = bsp_priv->phy_iface;
+ struct device *dev = bsp_priv->dev;
int i, j, ret;
bsp_priv->clk_enabled = false;
@@ -1671,16 +1441,10 @@ static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
if (ret)
return dev_err_probe(dev, ret, "Failed to get clocks\n");
- /* "stmmaceth" will be enabled by the core */
- bsp_priv->clk_mac = devm_clk_get(dev, "stmmaceth");
- ret = PTR_ERR_OR_ZERO(bsp_priv->clk_mac);
- if (ret)
- return dev_err_probe(dev, ret, "Cannot get stmmaceth clock\n");
-
if (bsp_priv->clock_input) {
dev_info(dev, "clock input from PHY\n");
} else if (phy_iface == PHY_INTERFACE_MODE_RMII) {
- clk_set_rate(bsp_priv->clk_mac, 50000000);
+ clk_set_rate(plat->stmmac_clk, 50000000);
}
if (plat->phy_node && bsp_priv->integrated_phy) {
@@ -1718,14 +1482,15 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
}
} else {
if (bsp_priv->clk_enabled) {
+ if (bsp_priv->ops && bsp_priv->ops->set_clock_selection) {
+ bsp_priv->ops->set_clock_selection(bsp_priv,
+ bsp_priv->clock_input, false);
+ }
+
clk_bulk_disable_unprepare(bsp_priv->num_clks,
bsp_priv->clks);
clk_disable_unprepare(bsp_priv->clk_phy);
- if (bsp_priv->ops && bsp_priv->ops->set_clock_selection)
- bsp_priv->ops->set_clock_selection(bsp_priv,
- bsp_priv->clock_input, false);
-
bsp_priv->clk_enabled = false;
}
}
@@ -1736,8 +1501,8 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
{
struct regulator *ldo = bsp_priv->regulator;
+ struct device *dev = bsp_priv->dev;
int ret;
- struct device *dev = &bsp_priv->pdev->dev;
if (enable) {
ret = regulator_enable(ldo);
@@ -1767,7 +1532,7 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
if (!bsp_priv)
return ERR_PTR(-ENOMEM);
- of_get_phy_mode(dev->of_node, &bsp_priv->phy_iface);
+ bsp_priv->phy_iface = plat->phy_interface;
bsp_priv->ops = ops;
/* Some SoCs have multiple MAC controllers, which need
@@ -1830,8 +1595,22 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
bsp_priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
"rockchip,grf");
- bsp_priv->php_grf = syscon_regmap_lookup_by_phandle(dev->of_node,
- "rockchip,php-grf");
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err_probe(dev, PTR_ERR(bsp_priv->grf),
+ "failed to lookup rockchip,grf\n");
+ return ERR_CAST(bsp_priv->grf);
+ }
+
+ if (ops->php_grf_required) {
+ bsp_priv->php_grf =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,php-grf");
+ if (IS_ERR(bsp_priv->php_grf)) {
+ dev_err_probe(dev, PTR_ERR(bsp_priv->php_grf),
+ "failed to lookup rockchip,php-grf\n");
+ return ERR_CAST(bsp_priv->php_grf);
+ }
+ }
if (plat->phy_node) {
bsp_priv->integrated_phy = of_property_read_bool(plat->phy_node,
@@ -1847,7 +1626,7 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
dev_info(dev, "integrated PHY? (%s).\n",
bsp_priv->integrated_phy ? "yes" : "no");
- bsp_priv->pdev = pdev;
+ bsp_priv->dev = dev;
return bsp_priv;
}
@@ -1867,7 +1646,7 @@ static int rk_gmac_check_ops(struct rk_priv_data *bsp_priv)
return -EINVAL;
break;
default:
- dev_err(&bsp_priv->pdev->dev,
+ dev_err(bsp_priv->dev,
"unsupported interface %d", bsp_priv->phy_iface);
}
return 0;
@@ -1875,8 +1654,8 @@ static int rk_gmac_check_ops(struct rk_priv_data *bsp_priv)
static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
{
+ struct device *dev = bsp_priv->dev;
int ret;
- struct device *dev = &bsp_priv->pdev->dev;
ret = rk_gmac_check_ops(bsp_priv);
if (ret)
@@ -1921,43 +1700,82 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
pm_runtime_get_sync(dev);
- if (bsp_priv->integrated_phy)
- rk_gmac_integrated_phy_powerup(bsp_priv);
+ if (bsp_priv->integrated_phy && bsp_priv->ops->integrated_phy_powerup)
+ bsp_priv->ops->integrated_phy_powerup(bsp_priv);
return 0;
}
static void rk_gmac_powerdown(struct rk_priv_data *gmac)
{
- if (gmac->integrated_phy)
- rk_gmac_integrated_phy_powerdown(gmac);
+ if (gmac->integrated_phy && gmac->ops->integrated_phy_powerdown)
+ gmac->ops->integrated_phy_powerdown(gmac);
- pm_runtime_put_sync(&gmac->pdev->dev);
+ pm_runtime_put_sync(gmac->dev);
phy_power_on(gmac, false);
gmac_clk_enable(gmac, false);
}
-static void rk_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static void rk_get_interfaces(struct stmmac_priv *priv, void *bsp_priv,
+ unsigned long *interfaces)
{
- struct rk_priv_data *bsp_priv = priv;
- struct device *dev = &bsp_priv->pdev->dev;
+ struct rk_priv_data *rk = bsp_priv;
- switch (bsp_priv->phy_iface) {
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- if (bsp_priv->ops->set_rgmii_speed)
- bsp_priv->ops->set_rgmii_speed(bsp_priv, speed);
- break;
- case PHY_INTERFACE_MODE_RMII:
- if (bsp_priv->ops->set_rmii_speed)
- bsp_priv->ops->set_rmii_speed(bsp_priv, speed);
- break;
- default:
- dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
- }
+ if (rk->ops->set_to_rgmii)
+ phy_interface_set_rgmii(interfaces);
+
+ if (rk->ops->set_to_rmii)
+ __set_bit(PHY_INTERFACE_MODE_RMII, interfaces);
+}
+
+static int rk_set_clk_tx_rate(void *bsp_priv_, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
+{
+ struct rk_priv_data *bsp_priv = bsp_priv_;
+
+ if (bsp_priv->ops->set_speed)
+ return bsp_priv->ops->set_speed(bsp_priv, interface, speed);
+
+ return -EINVAL;
+}
+
+static int rk_gmac_suspend(struct device *dev, void *bsp_priv_)
+{
+ struct rk_priv_data *bsp_priv = bsp_priv_;
+
+ /* Keep the PHY up if we use Wake-on-Lan. */
+ if (!device_may_wakeup(dev))
+ rk_gmac_powerdown(bsp_priv);
+
+ return 0;
+}
+
+static int rk_gmac_resume(struct device *dev, void *bsp_priv_)
+{
+ struct rk_priv_data *bsp_priv = bsp_priv_;
+
+ /* The PHY was up for Wake-on-Lan. */
+ if (!device_may_wakeup(dev))
+ rk_gmac_powerup(bsp_priv);
+
+ return 0;
+}
+
+static int rk_gmac_init(struct device *dev, void *bsp_priv)
+{
+ return rk_gmac_powerup(bsp_priv);
+}
+
+static void rk_gmac_exit(struct device *dev, void *bsp_priv_)
+{
+ struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dev));
+ struct rk_priv_data *bsp_priv = bsp_priv_;
+
+ rk_gmac_powerdown(bsp_priv);
+
+ if (priv->plat->phy_node && bsp_priv->integrated_phy)
+ clk_put(bsp_priv->clk_phy);
}
static int rk_gmac_probe(struct platform_device *pdev)
@@ -1984,9 +1802,18 @@ static int rk_gmac_probe(struct platform_device *pdev)
/* If the stmmac is not already selected as gmac4,
* then make sure we fallback to gmac.
*/
- if (!plat_dat->has_gmac4)
- plat_dat->has_gmac = true;
- plat_dat->fix_mac_speed = rk_fix_speed;
+ if (plat_dat->core_type != DWMAC_CORE_GMAC4) {
+ plat_dat->core_type = DWMAC_CORE_GMAC;
+ plat_dat->rx_fifo_size = 4096;
+ plat_dat->tx_fifo_size = 2048;
+ }
+
+ plat_dat->get_interfaces = rk_get_interfaces;
+ plat_dat->set_clk_tx_rate = rk_set_clk_tx_rate;
+ plat_dat->init = rk_gmac_init;
+ plat_dat->exit = rk_gmac_exit;
+ plat_dat->suspend = rk_gmac_suspend;
+ plat_dat->resume = rk_gmac_resume;
plat_dat->bsp_priv = rk_gmac_setup(pdev, plat_dat, data);
if (IS_ERR(plat_dat->bsp_priv))
@@ -1996,62 +1823,9 @@ static int rk_gmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = rk_gmac_powerup(plat_dat->bsp_priv);
- if (ret)
- return ret;
-
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- goto err_gmac_powerdown;
-
- return 0;
-
-err_gmac_powerdown:
- rk_gmac_powerdown(plat_dat->bsp_priv);
-
- return ret;
-}
-
-static void rk_gmac_remove(struct platform_device *pdev)
-{
- struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(&pdev->dev);
-
- stmmac_dvr_remove(&pdev->dev);
-
- rk_gmac_powerdown(bsp_priv);
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
-#ifdef CONFIG_PM_SLEEP
-static int rk_gmac_suspend(struct device *dev)
-{
- struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(dev);
- int ret = stmmac_suspend(dev);
-
- /* Keep the PHY up if we use Wake-on-Lan. */
- if (!device_may_wakeup(dev)) {
- rk_gmac_powerdown(bsp_priv);
- bsp_priv->suspended = true;
- }
-
- return ret;
-}
-
-static int rk_gmac_resume(struct device *dev)
-{
- struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(dev);
-
- /* The PHY was up for Wake-on-Lan. */
- if (bsp_priv->suspended) {
- rk_gmac_powerup(bsp_priv);
- bsp_priv->suspended = false;
- }
-
- return stmmac_resume(dev);
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume);
-
static const struct of_device_id rk_gmac_dwmac_match[] = {
{ .compatible = "rockchip,px30-gmac", .data = &px30_ops },
{ .compatible = "rockchip,rk3128-gmac", .data = &rk3128_ops },
@@ -2062,6 +1836,8 @@ static const struct of_device_id rk_gmac_dwmac_match[] = {
{ .compatible = "rockchip,rk3366-gmac", .data = &rk3366_ops },
{ .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
{ .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops },
+ { .compatible = "rockchip,rk3506-gmac", .data = &rk3506_ops },
+ { .compatible = "rockchip,rk3528-gmac", .data = &rk3528_ops },
{ .compatible = "rockchip,rk3568-gmac", .data = &rk3568_ops },
{ .compatible = "rockchip,rk3576-gmac", .data = &rk3576_ops },
{ .compatible = "rockchip,rk3588-gmac", .data = &rk3588_ops },
@@ -2073,10 +1849,9 @@ MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match);
static struct platform_driver rk_gmac_dwmac_driver = {
.probe = rk_gmac_probe,
- .remove = rk_gmac_remove,
.driver = {
.name = "rk_gmac-dwmac",
- .pm = &rk_gmac_pm_ops,
+ .pm = &stmmac_simple_pm_ops,
.of_match_table = rk_gmac_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
new file mode 100644
index 000000000000..5a485ee98fa7
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NXP S32G/R GMAC glue layer
+ *
+ * Copyright 2019-2024 NXP
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/ethtool.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_address.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <linux/platform_device.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+#define GMAC_INTF_RATE_125M 125000000 /* 125MHz */
+
+/* SoC PHY interface control register */
+#define S32_PHY_INTF_SEL_MII 0x00
+#define S32_PHY_INTF_SEL_SGMII 0x01
+#define S32_PHY_INTF_SEL_RGMII 0x02
+#define S32_PHY_INTF_SEL_RMII 0x08
+
+struct s32_priv_data {
+ void __iomem *ioaddr;
+ void __iomem *ctrl_sts;
+ struct device *dev;
+ phy_interface_t *intf_mode;
+ struct clk *tx_clk;
+ struct clk *rx_clk;
+};
+
+static int s32_gmac_write_phy_intf_select(struct s32_priv_data *gmac)
+{
+ writel(S32_PHY_INTF_SEL_RGMII, gmac->ctrl_sts);
+
+ dev_dbg(gmac->dev, "PHY mode set to %s\n", phy_modes(*gmac->intf_mode));
+
+ return 0;
+}
+
+static int s32_gmac_init(struct device *dev, void *priv)
+{
+ struct s32_priv_data *gmac = priv;
+ int ret;
+
+ /* Set initial TX interface clock */
+ ret = clk_prepare_enable(gmac->tx_clk);
+ if (ret) {
+ dev_err(dev, "Can't enable tx clock\n");
+ return ret;
+ }
+ ret = clk_set_rate(gmac->tx_clk, GMAC_INTF_RATE_125M);
+ if (ret) {
+ dev_err(dev, "Can't set tx clock\n");
+ goto err_tx_disable;
+ }
+
+ /* Set initial RX interface clock */
+ ret = clk_prepare_enable(gmac->rx_clk);
+ if (ret) {
+ dev_err(dev, "Can't enable rx clock\n");
+ goto err_tx_disable;
+ }
+ ret = clk_set_rate(gmac->rx_clk, GMAC_INTF_RATE_125M);
+ if (ret) {
+ dev_err(dev, "Can't set rx clock\n");
+ goto err_txrx_disable;
+ }
+
+ /* Set interface mode */
+ ret = s32_gmac_write_phy_intf_select(gmac);
+ if (ret) {
+ dev_err(dev, "Can't set PHY interface mode\n");
+ goto err_txrx_disable;
+ }
+
+ return 0;
+
+err_txrx_disable:
+ clk_disable_unprepare(gmac->rx_clk);
+err_tx_disable:
+ clk_disable_unprepare(gmac->tx_clk);
+ return ret;
+}
+
+static void s32_gmac_exit(struct device *dev, void *priv)
+{
+ struct s32_priv_data *gmac = priv;
+
+ clk_disable_unprepare(gmac->tx_clk);
+ clk_disable_unprepare(gmac->rx_clk);
+}
+
+static int s32_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat;
+ struct device *dev = &pdev->dev;
+ struct stmmac_resources res;
+ struct s32_priv_data *gmac;
+ int ret;
+
+ gmac = devm_kzalloc(&pdev->dev, sizeof(*gmac), GFP_KERNEL);
+ if (!gmac)
+ return -ENOMEM;
+
+ gmac->dev = &pdev->dev;
+
+ ret = stmmac_get_platform_resources(pdev, &res);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to get platform resources\n");
+
+ plat = devm_stmmac_probe_config_dt(pdev, res.mac);
+ if (IS_ERR(plat))
+ return dev_err_probe(dev, PTR_ERR(plat),
+ "dt configuration failed\n");
+
+ /* PHY interface mode control reg */
+ gmac->ctrl_sts = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
+ if (IS_ERR(gmac->ctrl_sts))
+ return dev_err_probe(dev, PTR_ERR(gmac->ctrl_sts),
+ "S32CC config region is missing\n");
+
+ /* tx clock */
+ gmac->tx_clk = devm_clk_get(&pdev->dev, "tx");
+ if (IS_ERR(gmac->tx_clk))
+ return dev_err_probe(dev, PTR_ERR(gmac->tx_clk),
+ "tx clock not found\n");
+
+ /* rx clock */
+ gmac->rx_clk = devm_clk_get(&pdev->dev, "rx");
+ if (IS_ERR(gmac->rx_clk))
+ return dev_err_probe(dev, PTR_ERR(gmac->rx_clk),
+ "rx clock not found\n");
+
+ gmac->intf_mode = &plat->phy_interface;
+ gmac->ioaddr = res.addr;
+
+ /* S32CC core feature set */
+ plat->core_type = DWMAC_CORE_GMAC4;
+ plat->pmt = 1;
+ plat->flags |= STMMAC_FLAG_SPH_DISABLE;
+ plat->rx_fifo_size = 20480;
+ plat->tx_fifo_size = 20480;
+
+ plat->init = s32_gmac_init;
+ plat->exit = s32_gmac_exit;
+
+ plat->clk_tx_i = gmac->tx_clk;
+ plat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
+
+ plat->bsp_priv = gmac;
+
+ return stmmac_pltfr_probe(pdev, plat, &res);
+}
+
+static const struct of_device_id s32_dwmac_match[] = {
+ { .compatible = "nxp,s32g2-dwmac" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, s32_dwmac_match);
+
+static struct platform_driver s32_dwmac_driver = {
+ .probe = s32_dwmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "s32-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = s32_dwmac_match,
+ },
+};
+module_platform_driver(s32_dwmac_driver);
+
+MODULE_AUTHOR("Jan Petrous (OSS) <jan.petrous@oss.nxp.com>");
+MODULE_DESCRIPTION("NXP S32G/R common chassis GMAC driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 0745117d5872..a2b52d2c4eb6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -5,6 +5,7 @@
*/
#include <linux/mfd/altera-sysmgr.h>
+#include <linux/clocksource_ids.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_net.h>
@@ -15,8 +16,10 @@
#include <linux/reset.h>
#include <linux/stmmac.h>
+#include "dwxgmac2.h"
#include "stmmac.h"
#include "stmmac_platform.h"
+#include "stmmac_ptp.h"
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1
@@ -41,15 +44,24 @@
#define SGMII_ADAPTER_ENABLE 0x0000
#define SGMII_ADAPTER_DISABLE 0x0001
+#define SMTG_MDIO_ADDR 0x15
+#define SMTG_TSC_WORD0 0xC
+#define SMTG_TSC_WORD1 0xD
+#define SMTG_TSC_WORD2 0xE
+#define SMTG_TSC_WORD3 0xF
+#define SMTG_TSC_SHIFT 16
+
struct socfpga_dwmac;
struct socfpga_dwmac_ops {
int (*set_phy_mode)(struct socfpga_dwmac *dwmac_priv);
+ void (*setup_plat_dat)(struct socfpga_dwmac *dwmac_priv);
};
struct socfpga_dwmac {
u32 reg_offset;
u32 reg_shift;
struct device *dev;
+ struct plat_stmmacenet_data *plat_dat;
struct regmap *sys_mgr_base_addr;
struct reset_control *stmmac_rst;
struct reset_control *stmmac_ocp_rst;
@@ -58,17 +70,15 @@ struct socfpga_dwmac {
void __iomem *sgmii_adapter_base;
bool f2h_ptp_ref_clk;
const struct socfpga_dwmac_ops *ops;
- struct mdio_device *pcs_mdiodev;
};
-static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static void socfpga_dwmac_fix_mac_speed(void *bsp_priv, int speed,
+ unsigned int mode)
{
- struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
+ struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)bsp_priv;
+ struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dwmac->dev));
void __iomem *splitter_base = dwmac->splitter_base;
void __iomem *sgmii_adapter_base = dwmac->sgmii_adapter_base;
- struct device *dev = dwmac->dev;
- struct net_device *ndev = dev_get_drvdata(dev);
- struct phy_device *phy_dev = ndev->phydev;
u32 val;
if (sgmii_adapter_base)
@@ -95,7 +105,9 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned
writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
}
- if (phy_dev && sgmii_adapter_base)
+ if ((priv->plat->phy_interface == PHY_INTERFACE_MODE_SGMII ||
+ priv->plat->phy_interface == PHY_INTERFACE_MODE_1000BASEX) &&
+ sgmii_adapter_base)
writew(SGMII_ADAPTER_ENABLE,
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
}
@@ -233,10 +245,7 @@ err_node_put:
static int socfpga_get_plat_phymode(struct socfpga_dwmac *dwmac)
{
- struct net_device *ndev = dev_get_drvdata(dwmac->dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
-
- return priv->plat->mac_interface;
+ return dwmac->plat_dat->phy_interface;
}
static void socfpga_sgmii_config(struct socfpga_dwmac *dwmac, bool enable)
@@ -258,6 +267,7 @@ static int socfpga_set_phy_mode_common(int phymode, u32 *val)
case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_GMII:
case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
*val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
break;
case PHY_INTERFACE_MODE_RMII:
@@ -269,6 +279,112 @@ static int socfpga_set_phy_mode_common(int phymode, u32 *val)
return 0;
}
+static void get_smtgtime(struct mii_bus *mii, int smtg_addr, u64 *smtg_time)
+{
+ u64 ns;
+
+ ns = mdiobus_read(mii, smtg_addr, SMTG_TSC_WORD3);
+ ns <<= SMTG_TSC_SHIFT;
+ ns |= mdiobus_read(mii, smtg_addr, SMTG_TSC_WORD2);
+ ns <<= SMTG_TSC_SHIFT;
+ ns |= mdiobus_read(mii, smtg_addr, SMTG_TSC_WORD1);
+ ns <<= SMTG_TSC_SHIFT;
+ ns |= mdiobus_read(mii, smtg_addr, SMTG_TSC_WORD0);
+
+ *smtg_time = ns;
+}
+
+static int smtg_crosststamp(ktime_t *device, struct system_counterval_t *system,
+ void *ctx)
+{
+ struct stmmac_priv *priv = (struct stmmac_priv *)ctx;
+ u32 num_snapshot, gpio_value, acr_value;
+ void __iomem *ptpaddr = priv->ptpaddr;
+ void __iomem *ioaddr = priv->hw->pcsr;
+ unsigned long flags;
+ u64 smtg_time = 0;
+ u64 ptp_time = 0;
+ int i, ret;
+ u32 v;
+
+ /* Both internal crosstimestamping and external triggered event
+ * timestamping cannot be run concurrently.
+ */
+ if (priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN)
+ return -EBUSY;
+
+ mutex_lock(&priv->aux_ts_lock);
+ /* Enable Internal snapshot trigger */
+ acr_value = readl(ptpaddr + PTP_ACR);
+ acr_value &= ~PTP_ACR_MASK;
+ switch (priv->plat->int_snapshot_num) {
+ case AUX_SNAPSHOT0:
+ acr_value |= PTP_ACR_ATSEN0;
+ break;
+ case AUX_SNAPSHOT1:
+ acr_value |= PTP_ACR_ATSEN1;
+ break;
+ case AUX_SNAPSHOT2:
+ acr_value |= PTP_ACR_ATSEN2;
+ break;
+ case AUX_SNAPSHOT3:
+ acr_value |= PTP_ACR_ATSEN3;
+ break;
+ default:
+ mutex_unlock(&priv->aux_ts_lock);
+ return -EINVAL;
+ }
+ writel(acr_value, ptpaddr + PTP_ACR);
+
+ /* Clear FIFO */
+ acr_value = readl(ptpaddr + PTP_ACR);
+ acr_value |= PTP_ACR_ATSFC;
+ writel(acr_value, ptpaddr + PTP_ACR);
+ /* Release the mutex */
+ mutex_unlock(&priv->aux_ts_lock);
+
+ /* Trigger Internal snapshot signal. Create a rising edge by just toggle
+ * the GPO0 to low and back to high.
+ */
+ gpio_value = readl(ioaddr + XGMAC_GPIO_STATUS);
+ gpio_value &= ~XGMAC_GPIO_GPO0;
+ writel(gpio_value, ioaddr + XGMAC_GPIO_STATUS);
+ gpio_value |= XGMAC_GPIO_GPO0;
+ writel(gpio_value, ioaddr + XGMAC_GPIO_STATUS);
+
+ /* Poll for time sync operation done */
+ ret = readl_poll_timeout(priv->ioaddr + XGMAC_INT_STATUS, v,
+ (v & XGMAC_INT_TSIS), 100, 10000);
+ if (ret) {
+ netdev_err(priv->dev, "%s: Wait for time sync operation timeout\n",
+ __func__);
+ return ret;
+ }
+
+ *system = (struct system_counterval_t) {
+ .cycles = 0,
+ .cs_id = CSID_ARM_ARCH_COUNTER,
+ .use_nsecs = false,
+ };
+
+ num_snapshot = (readl(ioaddr + XGMAC_TIMESTAMP_STATUS) &
+ XGMAC_TIMESTAMP_ATSNS_MASK) >>
+ XGMAC_TIMESTAMP_ATSNS_SHIFT;
+
+ /* Repeat until the timestamps are from the FIFO last segment */
+ for (i = 0; i < num_snapshot; i++) {
+ read_lock_irqsave(&priv->ptp_lock, flags);
+ stmmac_get_ptptime(priv, ptpaddr, &ptp_time);
+ *device = ns_to_ktime(ptp_time);
+ read_unlock_irqrestore(&priv->ptp_lock, flags);
+ }
+
+ get_smtgtime(priv->mii, SMTG_MDIO_ADDR, &smtg_time);
+ system->cycles = smtg_time;
+
+ return 0;
+}
+
static int socfpga_gen5_set_phy_mode(struct socfpga_dwmac *dwmac)
{
struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr;
@@ -435,6 +551,50 @@ static struct phylink_pcs *socfpga_dwmac_select_pcs(struct stmmac_priv *priv,
return priv->hw->phylink_pcs;
}
+static int socfpga_dwmac_init(struct device *dev, void *bsp_priv)
+{
+ struct socfpga_dwmac *dwmac = bsp_priv;
+
+ return dwmac->ops->set_phy_mode(dwmac);
+}
+
+static void socfpga_gen5_setup_plat_dat(struct socfpga_dwmac *dwmac)
+{
+ struct plat_stmmacenet_data *plat_dat = dwmac->plat_dat;
+
+ plat_dat->core_type = DWMAC_CORE_GMAC;
+
+ /* Rx watchdog timer in dwmac is buggy in this hw */
+ plat_dat->riwt_off = 1;
+}
+
+static void socfpga_agilex5_setup_plat_dat(struct socfpga_dwmac *dwmac)
+{
+ struct plat_stmmacenet_data *plat_dat = dwmac->plat_dat;
+
+ plat_dat->core_type = DWMAC_CORE_XGMAC;
+
+ /* Enable TSO */
+ plat_dat->flags |= STMMAC_FLAG_TSO_EN;
+
+ /* Enable TBS */
+ switch (plat_dat->tx_queues_to_use) {
+ case 8:
+ plat_dat->tx_queues_cfg[7].tbs_en = true;
+ fallthrough;
+ case 7:
+ plat_dat->tx_queues_cfg[6].tbs_en = true;
+ break;
+ default:
+ /* Tx Queues 0 - 5 doesn't support TBS on Agilex5 */
+ break;
+ }
+
+ /* Hw supported cross-timestamp */
+ plat_dat->int_snapshot_num = AUX_SNAPSHOT0;
+ plat_dat->crosststamp = smtg_crosststamp;
+}
+
static int socfpga_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -442,8 +602,6 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int ret;
struct socfpga_dwmac *dwmac;
- struct net_device *ndev;
- struct stmmac_priv *stpriv;
const struct socfpga_dwmac_ops *ops;
ops = device_get_match_data(&pdev->dev);
@@ -479,113 +637,54 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
return ret;
}
+ /* The socfpga driver needs to control the stmmac reset to set the phy
+ * mode. Create a copy of the core reset handle so it can be used by
+ * the driver later.
+ */
+ dwmac->stmmac_rst = plat_dat->stmmac_rst;
dwmac->ops = ops;
+ dwmac->plat_dat = plat_dat;
+
plat_dat->bsp_priv = dwmac;
plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
+ plat_dat->init = socfpga_dwmac_init;
plat_dat->pcs_init = socfpga_dwmac_pcs_init;
plat_dat->pcs_exit = socfpga_dwmac_pcs_exit;
plat_dat->select_pcs = socfpga_dwmac_select_pcs;
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- return ret;
-
- ndev = platform_get_drvdata(pdev);
- stpriv = netdev_priv(ndev);
-
- /* The socfpga driver needs to control the stmmac reset to set the phy
- * mode. Create a copy of the core reset handle so it can be used by
- * the driver later.
- */
- dwmac->stmmac_rst = stpriv->plat->stmmac_rst;
-
- ret = ops->set_phy_mode(dwmac);
- if (ret)
- goto err_dvr_remove;
-
- return 0;
-
-err_dvr_remove:
- stmmac_dvr_remove(&pdev->dev);
-
- return ret;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int socfpga_dwmac_resume(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct socfpga_dwmac *dwmac_priv = get_stmmac_bsp_priv(dev);
-
- dwmac_priv->ops->set_phy_mode(priv->plat->bsp_priv);
-
- /* Before the enet controller is suspended, the phy is suspended.
- * This causes the phy clock to be gated. The enet controller is
- * resumed before the phy, so the clock is still gated "off" when
- * the enet controller is resumed. This code makes sure the phy
- * is "resumed" before reinitializing the enet controller since
- * the enet controller depends on an active phy clock to complete
- * a DMA reset. A DMA reset will "time out" if executed
- * with no phy clock input on the Synopsys enet controller.
- * Verified through Synopsys Case #8000711656.
- *
- * Note that the phy clock is also gated when the phy is isolated.
- * Phy "suspend" and "isolate" controls are located in phy basic
- * control register 0, and can be modified by the phy driver
- * framework.
- */
- if (ndev->phydev)
- phy_resume(ndev->phydev);
+ ops->setup_plat_dat(dwmac);
- return stmmac_resume(dev);
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
-#endif /* CONFIG_PM_SLEEP */
-
-static int __maybe_unused socfpga_dwmac_runtime_suspend(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
-
- stmmac_bus_clks_config(priv, false);
-
- return 0;
-}
-
-static int __maybe_unused socfpga_dwmac_runtime_resume(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
-
- return stmmac_bus_clks_config(priv, true);
-}
-
-static const struct dev_pm_ops socfpga_dwmac_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(stmmac_suspend, socfpga_dwmac_resume)
- SET_RUNTIME_PM_OPS(socfpga_dwmac_runtime_suspend, socfpga_dwmac_runtime_resume, NULL)
-};
static const struct socfpga_dwmac_ops socfpga_gen5_ops = {
.set_phy_mode = socfpga_gen5_set_phy_mode,
+ .setup_plat_dat = socfpga_gen5_setup_plat_dat,
};
static const struct socfpga_dwmac_ops socfpga_gen10_ops = {
.set_phy_mode = socfpga_gen10_set_phy_mode,
+ .setup_plat_dat = socfpga_gen5_setup_plat_dat,
+};
+
+static const struct socfpga_dwmac_ops socfpga_agilex5_ops = {
+ .set_phy_mode = socfpga_gen10_set_phy_mode,
+ .setup_plat_dat = socfpga_agilex5_setup_plat_dat,
};
static const struct of_device_id socfpga_dwmac_match[] = {
{ .compatible = "altr,socfpga-stmmac", .data = &socfpga_gen5_ops },
{ .compatible = "altr,socfpga-stmmac-a10-s10", .data = &socfpga_gen10_ops },
+ { .compatible = "altr,socfpga-stmmac-agilex5", .data = &socfpga_agilex5_ops },
{ }
};
MODULE_DEVICE_TABLE(of, socfpga_dwmac_match);
static struct platform_driver socfpga_dwmac_driver = {
.probe = socfpga_dwmac_probe,
- .remove = stmmac_pltfr_remove,
.driver = {
.name = "socfpga-dwmac",
- .pm = &socfpga_dwmac_pm_ops,
+ .pm = &stmmac_pltfr_pm_ops,
.of_match_table = socfpga_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c
new file mode 100644
index 000000000000..44d4ceb8415f
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Sophgo DWMAC platform driver
+ *
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+
+#include "stmmac_platform.h"
+
+struct sophgo_dwmac_data {
+ bool has_internal_rx_delay;
+};
+
+static int sophgo_sg2044_dwmac_init(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *stmmac_res)
+{
+ plat_dat->clk_tx_i = devm_clk_get_enabled(&pdev->dev, "tx");
+ if (IS_ERR(plat_dat->clk_tx_i))
+ return dev_err_probe(&pdev->dev, PTR_ERR(plat_dat->clk_tx_i),
+ "failed to get tx clock\n");
+
+ plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE;
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
+ plat_dat->multicast_filter_bins = 0;
+
+ return 0;
+}
+
+static int sophgo_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ const struct sophgo_dwmac_data *data;
+ struct stmmac_resources stmmac_res;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get platform resources\n");
+
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return dev_err_probe(dev, PTR_ERR(plat_dat),
+ "failed to parse DT parameters\n");
+
+ ret = sophgo_sg2044_dwmac_init(pdev, plat_dat, &stmmac_res);
+ if (ret)
+ return ret;
+
+ data = device_get_match_data(&pdev->dev);
+ if (data && data->has_internal_rx_delay) {
+ plat_dat->phy_interface = phy_fix_phy_mode_for_mac_delays(plat_dat->phy_interface,
+ false, true);
+ if (plat_dat->phy_interface == PHY_INTERFACE_MODE_NA)
+ return -EINVAL;
+ }
+
+ return stmmac_dvr_probe(dev, plat_dat, &stmmac_res);
+}
+
+static const struct sophgo_dwmac_data sg2042_dwmac_data = {
+ .has_internal_rx_delay = true,
+};
+
+static const struct of_device_id sophgo_dwmac_match[] = {
+ { .compatible = "sophgo,sg2042-dwmac", .data = &sg2042_dwmac_data },
+ { .compatible = "sophgo,sg2044-dwmac" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sophgo_dwmac_match);
+
+static struct platform_driver sophgo_dwmac_driver = {
+ .probe = sophgo_dwmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "sophgo-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = sophgo_dwmac_match,
+ },
+};
+module_platform_driver(sophgo_dwmac_driver);
+
+MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>");
+MODULE_DESCRIPTION("Sophgo DWMAC platform driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
index 421666279dd3..16b955a6d77b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
@@ -15,8 +15,6 @@
#include "stmmac_platform.h"
-#define STARFIVE_DWMAC_PHY_INFT_RGMII 0x1
-#define STARFIVE_DWMAC_PHY_INFT_RMII 0x4
#define STARFIVE_DWMAC_PHY_INFT_FIELD 0x7U
#define JH7100_SYSMAIN_REGISTER49_DLYCHAIN 0xc8
@@ -27,62 +25,23 @@ struct starfive_dwmac_data {
struct starfive_dwmac {
struct device *dev;
- struct clk *clk_tx;
const struct starfive_dwmac_data *data;
};
-static void starfive_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
-{
- struct starfive_dwmac *dwmac = priv;
- unsigned long rate;
- int err;
-
- rate = clk_get_rate(dwmac->clk_tx);
-
- switch (speed) {
- case SPEED_1000:
- rate = 125000000;
- break;
- case SPEED_100:
- rate = 25000000;
- break;
- case SPEED_10:
- rate = 2500000;
- break;
- default:
- dev_err(dwmac->dev, "invalid speed %u\n", speed);
- break;
- }
-
- err = clk_set_rate(dwmac->clk_tx, rate);
- if (err)
- dev_err(dwmac->dev, "failed to set tx rate %lu\n", rate);
-}
-
static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
{
struct starfive_dwmac *dwmac = plat_dat->bsp_priv;
struct regmap *regmap;
unsigned int args[2];
- unsigned int mode;
+ int phy_intf_sel;
int err;
- switch (plat_dat->mac_interface) {
- case PHY_INTERFACE_MODE_RMII:
- mode = STARFIVE_DWMAC_PHY_INFT_RMII;
- break;
-
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- mode = STARFIVE_DWMAC_PHY_INFT_RGMII;
- break;
-
- default:
- dev_err(dwmac->dev, "unsupported interface %d\n",
- plat_dat->mac_interface);
- return -EINVAL;
+ phy_intf_sel = stmmac_get_phy_intf_sel(plat_dat->phy_interface);
+ if (phy_intf_sel != PHY_INTF_SEL_RGMII &&
+ phy_intf_sel != PHY_INTF_SEL_RMII) {
+ dev_err(dwmac->dev, "unsupported interface %s\n",
+ phy_modes(plat_dat->phy_interface));
+ return phy_intf_sel < 0 ? phy_intf_sel : -EINVAL;
}
regmap = syscon_regmap_lookup_by_phandle_args(dwmac->dev->of_node,
@@ -94,7 +53,7 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
/* args[0]:offset args[1]: shift */
err = regmap_update_bits(regmap, args[0],
STARFIVE_DWMAC_PHY_INFT_FIELD << args[1],
- mode << args[1]);
+ phy_intf_sel << args[1]);
if (err)
return dev_err_probe(dwmac->dev, err, "error setting phy mode\n");
@@ -133,9 +92,9 @@ static int starfive_dwmac_probe(struct platform_device *pdev)
dwmac->data = device_get_match_data(&pdev->dev);
- dwmac->clk_tx = devm_clk_get_enabled(&pdev->dev, "tx");
- if (IS_ERR(dwmac->clk_tx))
- return dev_err_probe(&pdev->dev, PTR_ERR(dwmac->clk_tx),
+ plat_dat->clk_tx_i = devm_clk_get_enabled(&pdev->dev, "tx");
+ if (IS_ERR(plat_dat->clk_tx_i))
+ return dev_err_probe(&pdev->dev, PTR_ERR(plat_dat->clk_tx_i),
"error getting tx clock\n");
clk_gtx = devm_clk_get_enabled(&pdev->dev, "gtx");
@@ -150,9 +109,10 @@ static int starfive_dwmac_probe(struct platform_device *pdev)
* internally, because rgmii_rxin will be adaptively adjusted.
*/
if (!device_property_read_bool(&pdev->dev, "starfive,tx-use-rgmii-clk"))
- plat_dat->fix_mac_speed = starfive_dwmac_fix_mac_speed;
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
dwmac->dev = &pdev->dev;
+ plat_dat->flags |= STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP;
plat_dat->bsp_priv = dwmac;
plat_dat->dma_cfg->dche = true;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index a6ff02d905a9..f50547b67fbc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -21,17 +21,9 @@
#include "stmmac_platform.h"
-#define DWMAC_125MHZ 125000000
#define DWMAC_50MHZ 50000000
-#define DWMAC_25MHZ 25000000
-#define DWMAC_2_5MHZ 2500000
-#define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \
- iface == PHY_INTERFACE_MODE_RGMII_ID || \
- iface == PHY_INTERFACE_MODE_RGMII_RXID || \
- iface == PHY_INTERFACE_MODE_RGMII_TXID)
-
-#define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \
+#define IS_PHY_IF_MODE_GBIT(iface) (phy_interface_mode_is_rgmii(iface) || \
iface == PHY_INTERFACE_MODE_GMII)
/* STiH4xx register definitions (STiH407/STiH410 families)
@@ -85,13 +77,9 @@
* 001-RGMII
* 010-SGMII
* 100-RMII
+ * These are the DW MAC phy_intf_sel values.
*/
#define MII_PHY_SEL_MASK GENMASK(4, 2)
-#define ETH_PHY_SEL_RMII BIT(4)
-#define ETH_PHY_SEL_SGMII BIT(3)
-#define ETH_PHY_SEL_RGMII BIT(2)
-#define ETH_PHY_SEL_GMII 0x0
-#define ETH_PHY_SEL_MII 0x0
struct sti_dwmac {
phy_interface_t interface; /* MII interface */
@@ -102,21 +90,12 @@ struct sti_dwmac {
int clk_sel_reg; /* GMAC ext clk selection register */
struct regmap *regmap;
bool gmac_en;
- u32 speed;
- void (*fix_retime_src)(void *priv, unsigned int speed, unsigned int mode);
+ int speed;
+ void (*fix_retime_src)(void *priv, int speed, unsigned int mode);
};
struct sti_dwmac_of_data {
- void (*fix_retime_src)(void *priv, unsigned int speed, unsigned int mode);
-};
-
-static u32 phy_intf_sels[] = {
- [PHY_INTERFACE_MODE_MII] = ETH_PHY_SEL_MII,
- [PHY_INTERFACE_MODE_GMII] = ETH_PHY_SEL_GMII,
- [PHY_INTERFACE_MODE_RGMII] = ETH_PHY_SEL_RGMII,
- [PHY_INTERFACE_MODE_RGMII_ID] = ETH_PHY_SEL_RGMII,
- [PHY_INTERFACE_MODE_SGMII] = ETH_PHY_SEL_SGMII,
- [PHY_INTERFACE_MODE_RMII] = ETH_PHY_SEL_RMII,
+ void (*fix_retime_src)(void *priv, int speed, unsigned int mode);
};
enum {
@@ -135,12 +114,12 @@ static u32 stih4xx_tx_retime_val[] = {
| STIH4XX_ETH_SEL_INTERNAL_NOTEXT_PHYCLK,
};
-static void stih4xx_fix_retime_src(void *priv, u32 spd, unsigned int mode)
+static void stih4xx_fix_retime_src(void *priv, int spd, unsigned int mode)
{
struct sti_dwmac *dwmac = priv;
u32 src = dwmac->tx_retime_src;
u32 reg = dwmac->ctrl_reg;
- u32 freq = 0;
+ long freq = 0;
if (dwmac->interface == PHY_INTERFACE_MODE_MII) {
src = TX_RETIME_SRC_TXCLK;
@@ -151,40 +130,44 @@ static void stih4xx_fix_retime_src(void *priv, u32 spd, unsigned int mode)
src = TX_RETIME_SRC_CLKGEN;
freq = DWMAC_50MHZ;
}
- } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) {
+ } else if (phy_interface_mode_is_rgmii(dwmac->interface)) {
/* On GiGa clk source can be either ext or from clkgen */
- if (spd == SPEED_1000) {
- freq = DWMAC_125MHZ;
- } else {
+ freq = rgmii_clock(spd);
+
+ if (spd != SPEED_1000 && freq > 0)
/* Switch to clkgen for these speeds */
src = TX_RETIME_SRC_CLKGEN;
- if (spd == SPEED_100)
- freq = DWMAC_25MHZ;
- else if (spd == SPEED_10)
- freq = DWMAC_2_5MHZ;
- }
}
- if (src == TX_RETIME_SRC_CLKGEN && freq)
+ if (src == TX_RETIME_SRC_CLKGEN && freq > 0)
clk_set_rate(dwmac->clk, freq);
regmap_update_bits(dwmac->regmap, reg, STIH4XX_RETIME_SRC_MASK,
stih4xx_tx_retime_val[src]);
}
-static int sti_dwmac_set_mode(struct sti_dwmac *dwmac)
+static int sti_set_phy_intf_sel(void *bsp_priv, u8 phy_intf_sel)
{
- struct regmap *regmap = dwmac->regmap;
- int iface = dwmac->interface;
- u32 reg = dwmac->ctrl_reg;
- u32 val;
+ struct sti_dwmac *dwmac = bsp_priv;
+ struct regmap *regmap;
+ u32 reg, val;
+
+ regmap = dwmac->regmap;
+ reg = dwmac->ctrl_reg;
if (dwmac->gmac_en)
regmap_update_bits(regmap, reg, EN_MASK, EN);
- regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, phy_intf_sels[iface]);
+ if (phy_intf_sel != PHY_INTF_SEL_GMII_MII &&
+ phy_intf_sel != PHY_INTF_SEL_RGMII &&
+ phy_intf_sel != PHY_INTF_SEL_SGMII &&
+ phy_intf_sel != PHY_INTF_SEL_RMII)
+ phy_intf_sel = PHY_INTF_SEL_GMII_MII;
+
+ regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK,
+ FIELD_PREP(MII_PHY_SEL_MASK, phy_intf_sel));
- val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
+ val = (dwmac->interface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
regmap_update_bits(regmap, reg, ENMII_MASK, val);
dwmac->fix_retime_src(dwmac, dwmac->speed, 0);
@@ -193,7 +176,8 @@ static int sti_dwmac_set_mode(struct sti_dwmac *dwmac)
}
static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
- struct platform_device *pdev)
+ struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat)
{
struct resource *res;
struct device *dev = &pdev->dev;
@@ -207,22 +191,12 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
if (res)
dwmac->clk_sel_reg = res->start;
- regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon");
+ regmap = syscon_regmap_lookup_by_phandle_args(np, "st,syscon",
+ 1, &dwmac->ctrl_reg);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->ctrl_reg);
- if (err) {
- dev_err(dev, "Can't get sysconfig ctrl offset (%d)\n", err);
- return err;
- }
-
- err = of_get_phy_mode(np, &dwmac->interface);
- if (err && err != -ENODEV) {
- dev_err(dev, "Can't get phy-mode\n");
- return err;
- }
-
+ dwmac->interface = plat_dat->phy_interface;
dwmac->regmap = regmap;
dwmac->gmac_en = of_property_read_bool(np, "st,gmac_en");
dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
@@ -255,6 +229,20 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
return 0;
}
+static int sti_dwmac_init(struct device *dev, void *bsp_priv)
+{
+ struct sti_dwmac *dwmac = bsp_priv;
+
+ return clk_prepare_enable(dwmac->clk);
+}
+
+static void sti_dwmac_exit(struct device *dev, void *bsp_priv)
+{
+ struct sti_dwmac *dwmac = bsp_priv;
+
+ clk_disable_unprepare(dwmac->clk);
+}
+
static int sti_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -281,7 +269,7 @@ static int sti_dwmac_probe(struct platform_device *pdev)
if (!dwmac)
return -ENOMEM;
- ret = sti_dwmac_parse_data(dwmac, pdev);
+ ret = sti_dwmac_parse_data(dwmac, pdev, plat_dat);
if (ret) {
dev_err(&pdev->dev, "Unable to parse OF data\n");
return ret;
@@ -290,62 +278,14 @@ static int sti_dwmac_probe(struct platform_device *pdev)
dwmac->fix_retime_src = data->fix_retime_src;
plat_dat->bsp_priv = dwmac;
+ plat_dat->set_phy_intf_sel = sti_set_phy_intf_sel;
plat_dat->fix_mac_speed = data->fix_retime_src;
+ plat_dat->init = sti_dwmac_init;
+ plat_dat->exit = sti_dwmac_exit;
- ret = clk_prepare_enable(dwmac->clk);
- if (ret)
- return ret;
-
- ret = sti_dwmac_set_mode(dwmac);
- if (ret)
- goto disable_clk;
-
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- goto disable_clk;
-
- return 0;
-
-disable_clk:
- clk_disable_unprepare(dwmac->clk);
-
- return ret;
-}
-
-static void sti_dwmac_remove(struct platform_device *pdev)
-{
- struct sti_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
-
- stmmac_dvr_remove(&pdev->dev);
-
- clk_disable_unprepare(dwmac->clk);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int sti_dwmac_suspend(struct device *dev)
-{
- struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev);
- int ret = stmmac_suspend(dev);
-
- clk_disable_unprepare(dwmac->clk);
-
- return ret;
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
-static int sti_dwmac_resume(struct device *dev)
-{
- struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev);
-
- clk_prepare_enable(dwmac->clk);
- sti_dwmac_set_mode(dwmac);
-
- return stmmac_resume(dev);
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(sti_dwmac_pm_ops, sti_dwmac_suspend,
- sti_dwmac_resume);
-
static const struct sti_dwmac_of_data stih4xx_dwmac_data = {
.fix_retime_src = stih4xx_fix_retime_src,
};
@@ -358,10 +298,9 @@ MODULE_DEVICE_TABLE(of, sti_dwmac_match);
static struct platform_driver sti_dwmac_driver = {
.probe = sti_dwmac_probe,
- .remove = sti_dwmac_remove,
.driver = {
.name = "sti-dwmac",
- .pm = &sti_dwmac_pm_ops,
+ .pm = &stmmac_pltfr_pm_ops,
.of_match_table = sti_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 1e8bac665cc9..e1b260ed4790 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -47,23 +47,18 @@
*------------------------------------------
*/
#define SYSCFG_PMCR_ETH_SEL_MII BIT(20)
-#define SYSCFG_PMCR_ETH_SEL_RGMII BIT(21)
-#define SYSCFG_PMCR_ETH_SEL_RMII BIT(23)
-#define SYSCFG_PMCR_ETH_SEL_GMII 0
+#define SYSCFG_PMCR_PHY_INTF_SEL_MASK GENMASK(23, 21)
#define SYSCFG_MCU_ETH_SEL_MII 0
#define SYSCFG_MCU_ETH_SEL_RMII 1
/* STM32MP2 register definitions */
#define SYSCFG_MP2_ETH_MASK GENMASK(31, 0)
+#define SYSCFG_ETHCR_ETH_SEL_MASK GENMASK(6, 4)
#define SYSCFG_ETHCR_ETH_PTP_CLK_SEL BIT(2)
#define SYSCFG_ETHCR_ETH_CLK_SEL BIT(1)
#define SYSCFG_ETHCR_ETH_REF_CLK_SEL BIT(0)
-#define SYSCFG_ETHCR_ETH_SEL_MII 0
-#define SYSCFG_ETHCR_ETH_SEL_RGMII BIT(4)
-#define SYSCFG_ETHCR_ETH_SEL_RMII BIT(6)
-
/* STM32MPx register definitions
*
* Below table summarizes the clock requirement and clock sources for
@@ -119,7 +114,7 @@ struct stm32_ops {
u32 syscfg_clr_off;
};
-static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac, bool resume)
+static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac)
{
int ret;
@@ -127,11 +122,9 @@ static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac, bool resume)
if (ret)
goto err_clk_tx;
- if (!dwmac->ops->clk_rx_enable_in_suspend || !resume) {
- ret = clk_prepare_enable(dwmac->clk_rx);
- if (ret)
- goto err_clk_rx;
- }
+ ret = clk_prepare_enable(dwmac->clk_rx);
+ if (ret)
+ goto err_clk_rx;
ret = clk_prepare_enable(dwmac->syscfg_clk);
if (ret)
@@ -148,15 +141,14 @@ static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac, bool resume)
err_clk_eth_ck:
clk_disable_unprepare(dwmac->syscfg_clk);
err_syscfg_clk:
- if (!dwmac->ops->clk_rx_enable_in_suspend || !resume)
- clk_disable_unprepare(dwmac->clk_rx);
+ clk_disable_unprepare(dwmac->clk_rx);
err_clk_rx:
clk_disable_unprepare(dwmac->clk_tx);
err_clk_tx:
return ret;
}
-static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat, bool resume)
+static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
{
struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
int ret;
@@ -167,14 +159,14 @@ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat, bool resume)
return ret;
}
- return stm32_dwmac_clk_enable(dwmac, resume);
+ return stm32_dwmac_clk_enable(dwmac);
}
static int stm32mp1_select_ethck_external(struct plat_stmmacenet_data *plat_dat)
{
struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
dwmac->enable_eth_ck = dwmac->ext_phyclk;
return 0;
@@ -196,7 +188,7 @@ static int stm32mp1_select_ethck_external(struct plat_stmmacenet_data *plat_dat)
default:
dwmac->enable_eth_ck = false;
dev_err(dwmac->dev, "Mode %s not supported",
- phy_modes(plat_dat->mac_interface));
+ phy_modes(plat_dat->phy_interface));
return -EINVAL;
}
}
@@ -209,7 +201,7 @@ static int stm32mp1_validate_ethck_rate(struct plat_stmmacenet_data *plat_dat)
if (!dwmac->enable_eth_ck)
return 0;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_GMII:
if (clk_rate == ETH_CK_F_25M)
@@ -231,17 +223,20 @@ static int stm32mp1_validate_ethck_rate(struct plat_stmmacenet_data *plat_dat)
}
dev_err(dwmac->dev, "Mode %s does not match eth-ck frequency %d Hz",
- phy_modes(plat_dat->mac_interface), clk_rate);
+ phy_modes(plat_dat->phy_interface), clk_rate);
return -EINVAL;
}
-static int stm32mp1_configure_pmcr(struct plat_stmmacenet_data *plat_dat)
+static int stm32mp1_configure_pmcr(struct plat_stmmacenet_data *plat_dat,
+ u8 phy_intf_sel)
{
struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
u32 reg = dwmac->mode_reg;
- int val = 0;
+ int val;
- switch (plat_dat->mac_interface) {
+ val = FIELD_PREP(SYSCFG_PMCR_PHY_INTF_SEL_MASK, phy_intf_sel);
+
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
/*
* STM32MP15xx supports both MII and GMII, STM32MP13xx MII only.
@@ -253,12 +248,10 @@ static int stm32mp1_configure_pmcr(struct plat_stmmacenet_data *plat_dat)
val |= SYSCFG_PMCR_ETH_SEL_MII;
break;
case PHY_INTERFACE_MODE_GMII:
- val = SYSCFG_PMCR_ETH_SEL_GMII;
if (dwmac->enable_eth_ck)
val |= SYSCFG_PMCR_ETH_CLK_SEL;
break;
case PHY_INTERFACE_MODE_RMII:
- val = SYSCFG_PMCR_ETH_SEL_RMII;
if (dwmac->enable_eth_ck)
val |= SYSCFG_PMCR_ETH_REF_CLK_SEL;
break;
@@ -266,18 +259,17 @@ static int stm32mp1_configure_pmcr(struct plat_stmmacenet_data *plat_dat)
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
- val = SYSCFG_PMCR_ETH_SEL_RGMII;
if (dwmac->enable_eth_ck)
val |= SYSCFG_PMCR_ETH_CLK_SEL;
break;
default:
dev_err(dwmac->dev, "Mode %s not supported",
- phy_modes(plat_dat->mac_interface));
+ phy_modes(plat_dat->phy_interface));
/* Do not manage others interfaces */
return -EINVAL;
}
- dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->mac_interface));
+ dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->phy_interface));
/* Shift value at correct ethernet MAC offset in SYSCFG_PMCSETR */
val <<= ffs(dwmac->mode_mask) - ffs(SYSCFG_MP1_ETH_MASK);
@@ -291,18 +283,20 @@ static int stm32mp1_configure_pmcr(struct plat_stmmacenet_data *plat_dat)
dwmac->mode_mask, val);
}
-static int stm32mp2_configure_syscfg(struct plat_stmmacenet_data *plat_dat)
+static int stm32mp2_configure_syscfg(struct plat_stmmacenet_data *plat_dat,
+ u8 phy_intf_sel)
{
struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
u32 reg = dwmac->mode_reg;
- int val = 0;
+ int val;
- switch (plat_dat->mac_interface) {
+ val = FIELD_PREP(SYSCFG_ETHCR_ETH_SEL_MASK, phy_intf_sel);
+
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
/* ETH_REF_CLK_SEL bit in SYSCFG register is not applicable in MII mode */
break;
case PHY_INTERFACE_MODE_RMII:
- val = SYSCFG_ETHCR_ETH_SEL_RMII;
if (dwmac->enable_eth_ck) {
/* Internal clock ETH_CLK of 50MHz from RCC is used */
val |= SYSCFG_ETHCR_ETH_REF_CLK_SEL;
@@ -312,8 +306,6 @@ static int stm32mp2_configure_syscfg(struct plat_stmmacenet_data *plat_dat)
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
- val = SYSCFG_ETHCR_ETH_SEL_RGMII;
- fallthrough;
case PHY_INTERFACE_MODE_GMII:
if (dwmac->enable_eth_ck) {
/* Internal clock ETH_CLK of 125MHz from RCC is used */
@@ -322,12 +314,12 @@ static int stm32mp2_configure_syscfg(struct plat_stmmacenet_data *plat_dat)
break;
default:
dev_err(dwmac->dev, "Mode %s not supported",
- phy_modes(plat_dat->mac_interface));
+ phy_modes(plat_dat->phy_interface));
/* Do not manage others interfaces */
return -EINVAL;
}
- dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->mac_interface));
+ dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->phy_interface));
/* Select PTP (IEEE1588) clock selection from RCC (ck_ker_ethxptp) */
val |= SYSCFG_ETHCR_ETH_PTP_CLK_SEL;
@@ -340,7 +332,7 @@ static int stm32mp2_configure_syscfg(struct plat_stmmacenet_data *plat_dat)
static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
{
struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
- int ret;
+ int phy_intf_sel, ret;
ret = stm32mp1_select_ethck_external(plat_dat);
if (ret)
@@ -350,10 +342,19 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
if (ret)
return ret;
+ phy_intf_sel = stmmac_get_phy_intf_sel(plat_dat->phy_interface);
+ if (phy_intf_sel != PHY_INTF_SEL_GMII_MII &&
+ phy_intf_sel != PHY_INTF_SEL_RGMII &&
+ phy_intf_sel != PHY_INTF_SEL_RMII) {
+ dev_err(dwmac->dev, "Mode %s not supported\n",
+ phy_modes(plat_dat->phy_interface));
+ return phy_intf_sel < 0 ? phy_intf_sel : -EINVAL;
+ }
+
if (!dwmac->ops->is_mp2)
- return stm32mp1_configure_pmcr(plat_dat);
+ return stm32mp1_configure_pmcr(plat_dat, phy_intf_sel);
else
- return stm32mp2_configure_syscfg(plat_dat);
+ return stm32mp2_configure_syscfg(plat_dat, phy_intf_sel);
}
static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat)
@@ -362,7 +363,7 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat)
u32 reg = dwmac->mode_reg;
int val;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
val = SYSCFG_MCU_ETH_SEL_MII;
break;
@@ -371,23 +372,21 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
dev_err(dwmac->dev, "Mode %s not supported",
- phy_modes(plat_dat->mac_interface));
+ phy_modes(plat_dat->phy_interface));
/* Do not manage others interfaces */
return -EINVAL;
}
- dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->mac_interface));
+ dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->phy_interface));
return regmap_update_bits(dwmac->regmap, reg,
SYSCFG_MCU_ETH_MASK, val << 23);
}
-static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac, bool suspend)
+static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac)
{
clk_disable_unprepare(dwmac->clk_tx);
- if (!dwmac->ops->clk_rx_enable_in_suspend || !suspend)
- clk_disable_unprepare(dwmac->clk_rx);
-
+ clk_disable_unprepare(dwmac->clk_rx);
clk_disable_unprepare(dwmac->syscfg_clk);
if (dwmac->enable_eth_ck)
clk_disable_unprepare(dwmac->clk_eth_ck);
@@ -419,16 +418,11 @@ static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac,
}
/* Get mode register */
- dwmac->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon");
+ dwmac->regmap = syscon_regmap_lookup_by_phandle_args(np, "st,syscon",
+ 1, &dwmac->mode_reg);
if (IS_ERR(dwmac->regmap))
return PTR_ERR(dwmac->regmap);
- err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->mode_reg);
- if (err) {
- dev_err(dev, "Can't get sysconfig mode offset (%d)\n", err);
- return err;
- }
-
if (dwmac->ops->is_mp2)
return 0;
@@ -508,6 +502,26 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac,
return err;
}
+static int stm32_dwmac_suspend(struct device *dev, void *bsp_priv)
+{
+ struct stm32_dwmac *dwmac = bsp_priv;
+
+ stm32_dwmac_clk_disable(dwmac);
+
+ return dwmac->ops->suspend ? dwmac->ops->suspend(dwmac) : 0;
+}
+
+static int stm32_dwmac_resume(struct device *dev, void *bsp_priv)
+{
+ struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dev));
+ struct stm32_dwmac *dwmac = bsp_priv;
+
+ if (dwmac->ops->resume)
+ dwmac->ops->resume(dwmac);
+
+ return stm32_dwmac_init(priv->plat);
+}
+
static int stm32_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -543,20 +557,37 @@ static int stm32_dwmac_probe(struct platform_device *pdev)
return ret;
}
+ plat_dat->flags |= STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP;
plat_dat->bsp_priv = dwmac;
+ plat_dat->suspend = stm32_dwmac_suspend;
+ plat_dat->resume = stm32_dwmac_resume;
- ret = stm32_dwmac_init(plat_dat, false);
+ ret = stm32_dwmac_init(plat_dat);
if (ret)
return ret;
+ /* If this platform requires the clock to be running in suspend,
+ * prepare and enable the receive clock an additional time to keep
+ * it running.
+ */
+ if (dwmac->ops->clk_rx_enable_in_suspend) {
+ ret = clk_prepare_enable(dwmac->clk_rx);
+ if (ret)
+ goto err_clk_disable;
+ }
+
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
- goto err_clk_disable;
+ goto err_clk_disable_suspend;
return 0;
+err_clk_disable_suspend:
+ if (dwmac->ops->clk_rx_enable_in_suspend)
+ clk_disable_unprepare(dwmac->clk_rx);
+
err_clk_disable:
- stm32_dwmac_clk_disable(dwmac, false);
+ stm32_dwmac_clk_disable(dwmac);
return ret;
}
@@ -569,7 +600,15 @@ static void stm32_dwmac_remove(struct platform_device *pdev)
stmmac_dvr_remove(&pdev->dev);
- stm32_dwmac_clk_disable(dwmac, false);
+ /* If this platform requires the clock to be running in suspend,
+ * we need to disable and unprepare the receive clock an additional
+ * time to balance the extra clk_prepare_enable() in the probe
+ * function.
+ */
+ if (dwmac->ops->clk_rx_enable_in_suspend)
+ clk_disable_unprepare(dwmac->clk_rx);
+
+ stm32_dwmac_clk_disable(dwmac);
if (dwmac->irq_pwr_wakeup >= 0) {
dev_pm_clear_wake_irq(&pdev->dev);
@@ -587,50 +626,6 @@ static void stm32mp1_resume(struct stm32_dwmac *dwmac)
clk_disable_unprepare(dwmac->clk_ethstp);
}
-#ifdef CONFIG_PM_SLEEP
-static int stm32_dwmac_suspend(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct stm32_dwmac *dwmac = priv->plat->bsp_priv;
-
- int ret;
-
- ret = stmmac_suspend(dev);
- if (ret)
- return ret;
-
- stm32_dwmac_clk_disable(dwmac, true);
-
- if (dwmac->ops->suspend)
- ret = dwmac->ops->suspend(dwmac);
-
- return ret;
-}
-
-static int stm32_dwmac_resume(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct stm32_dwmac *dwmac = priv->plat->bsp_priv;
- int ret;
-
- if (dwmac->ops->resume)
- dwmac->ops->resume(dwmac);
-
- ret = stm32_dwmac_init(priv->plat, true);
- if (ret)
- return ret;
-
- ret = stmmac_resume(dev);
-
- return ret;
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(stm32_dwmac_pm_ops,
- stm32_dwmac_suspend, stm32_dwmac_resume);
-
static struct stm32_ops stm32mcu_dwmac_data = {
.set_mode = stm32mcu_set_mode
};
@@ -678,7 +673,7 @@ static struct platform_driver stm32_dwmac_driver = {
.remove = stm32_dwmac_remove,
.driver = {
.name = "stm32-dwmac",
- .pm = &stm32_dwmac_pm_ops,
+ .pm = &stmmac_simple_pm_ops,
.of_match_table = stm32_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun55i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun55i.c
new file mode 100644
index 000000000000..862df173d963
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun55i.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * dwmac-sun55i.c - Allwinner sun55i GMAC200 specific glue layer
+ *
+ * Copyright (C) 2025 Chen-Yu Tsai <wens@csie.org>
+ *
+ * syscon parts taken from dwmac-sun8i.c, which is
+ *
+ * Copyright (C) 2017 Corentin Labbe <clabbe.montjoie@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/stmmac.h>
+
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+#define SYSCON_REG 0x34
+
+/* RMII specific bits */
+#define SYSCON_RMII_EN BIT(13) /* 1: enable RMII (overrides EPIT) */
+/* Generic system control EMAC_CLK bits */
+#define SYSCON_ETXDC_MASK GENMASK(12, 10)
+#define SYSCON_ERXDC_MASK GENMASK(9, 5)
+/* EMAC PHY Interface Type */
+#define SYSCON_EPIT BIT(2) /* 1: RGMII, 0: MII */
+#define SYSCON_ETCS_MASK GENMASK(1, 0)
+#define SYSCON_ETCS_MII 0x0
+#define SYSCON_ETCS_EXT_GMII 0x1
+#define SYSCON_ETCS_INT_GMII 0x2
+
+static int sun55i_gmac200_set_syscon(struct device *dev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct device_node *node = dev->of_node;
+ struct regmap *regmap;
+ u32 val, reg = 0;
+ int ret;
+
+ regmap = syscon_regmap_lookup_by_phandle(node, "syscon");
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap), "Unable to map syscon\n");
+
+ if (!of_property_read_u32(node, "tx-internal-delay-ps", &val)) {
+ if (val % 100)
+ return dev_err_probe(dev, -EINVAL,
+ "tx-delay must be a multiple of 100ps\n");
+ val /= 100;
+ dev_dbg(dev, "set tx-delay to %x\n", val);
+ if (!FIELD_FIT(SYSCON_ETXDC_MASK, val))
+ return dev_err_probe(dev, -EINVAL,
+ "TX clock delay exceeds maximum (%u00ps > %lu00ps)\n",
+ val, FIELD_MAX(SYSCON_ETXDC_MASK));
+
+ reg |= FIELD_PREP(SYSCON_ETXDC_MASK, val);
+ }
+
+ if (!of_property_read_u32(node, "rx-internal-delay-ps", &val)) {
+ if (val % 100)
+ return dev_err_probe(dev, -EINVAL,
+ "rx-delay must be a multiple of 100ps\n");
+ val /= 100;
+ dev_dbg(dev, "set rx-delay to %x\n", val);
+ if (!FIELD_FIT(SYSCON_ERXDC_MASK, val))
+ return dev_err_probe(dev, -EINVAL,
+ "RX clock delay exceeds maximum (%u00ps > %lu00ps)\n",
+ val, FIELD_MAX(SYSCON_ERXDC_MASK));
+
+ reg |= FIELD_PREP(SYSCON_ERXDC_MASK, val);
+ }
+
+ switch (plat->phy_interface) {
+ case PHY_INTERFACE_MODE_MII:
+ /* default */
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ reg |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ reg |= SYSCON_RMII_EN;
+ break;
+ default:
+ return dev_err_probe(dev, -EINVAL, "Unsupported interface mode: %s",
+ phy_modes(plat->phy_interface));
+ }
+
+ ret = regmap_write(regmap, SYSCON_REG, reg);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to write to syscon\n");
+
+ return 0;
+}
+
+static int sun55i_gmac200_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct device *dev = &pdev->dev;
+ struct clk *clk;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ /* BSP disables it */
+ plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE;
+ plat_dat->host_dma_width = 32;
+
+ ret = sun55i_gmac200_set_syscon(dev, plat_dat);
+ if (ret)
+ return ret;
+
+ clk = devm_clk_get_enabled(dev, "mbus");
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk),
+ "Failed to get or enable MBUS clock\n");
+
+ ret = devm_regulator_get_enable_optional(dev, "phy");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get or enable PHY supply\n");
+
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
+}
+
+static const struct of_device_id sun55i_gmac200_match[] = {
+ { .compatible = "allwinner,sun55i-a523-gmac200" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sun55i_gmac200_match);
+
+static struct platform_driver sun55i_gmac200_driver = {
+ .probe = sun55i_gmac200_probe,
+ .driver = {
+ .name = "dwmac-sun55i",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = sun55i_gmac200_match,
+ },
+};
+module_platform_driver(sun55i_gmac200_driver);
+
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
+MODULE_DESCRIPTION("Allwinner sun55i GMAC200 specific glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 4b7b2582a120..8aa496ac85cc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -31,10 +31,6 @@
*/
/* struct emac_variant - Describe dwmac-sun8i hardware variant
- * @default_syscon_value: The default value of the EMAC register in syscon
- * This value is used for disabling properly EMAC
- * and used as a good starting value in case of the
- * boot process(uboot) leave some stuff.
* @syscon_field reg_field for the syscon's gmac register
* @soc_has_internal_phy: Does the MAC embed an internal PHY
* @support_mii: Does the MAC handle MII
@@ -48,7 +44,6 @@
* value of zero indicates this is not supported.
*/
struct emac_variant {
- u32 default_syscon_value;
const struct reg_field *syscon_field;
bool soc_has_internal_phy;
bool support_mii;
@@ -94,7 +89,6 @@ static const struct reg_field sun8i_ccu_reg_field = {
};
static const struct emac_variant emac_variant_h3 = {
- .default_syscon_value = 0x58000,
.syscon_field = &sun8i_syscon_reg_field,
.soc_has_internal_phy = true,
.support_mii = true,
@@ -105,14 +99,12 @@ static const struct emac_variant emac_variant_h3 = {
};
static const struct emac_variant emac_variant_v3s = {
- .default_syscon_value = 0x38000,
.syscon_field = &sun8i_syscon_reg_field,
.soc_has_internal_phy = true,
.support_mii = true
};
static const struct emac_variant emac_variant_a83t = {
- .default_syscon_value = 0,
.syscon_field = &sun8i_syscon_reg_field,
.soc_has_internal_phy = false,
.support_mii = true,
@@ -122,7 +114,6 @@ static const struct emac_variant emac_variant_a83t = {
};
static const struct emac_variant emac_variant_r40 = {
- .default_syscon_value = 0,
.syscon_field = &sun8i_ccu_reg_field,
.support_mii = true,
.support_rgmii = true,
@@ -130,7 +121,6 @@ static const struct emac_variant emac_variant_r40 = {
};
static const struct emac_variant emac_variant_a64 = {
- .default_syscon_value = 0,
.syscon_field = &sun8i_syscon_reg_field,
.soc_has_internal_phy = false,
.support_mii = true,
@@ -141,7 +131,6 @@ static const struct emac_variant emac_variant_a64 = {
};
static const struct emac_variant emac_variant_h6 = {
- .default_syscon_value = 0x50000,
.syscon_field = &sun8i_syscon_reg_field,
/* The "Internal PHY" of H6 is not on the die. It's on the
* co-packaged AC200 chip instead.
@@ -582,16 +571,16 @@ static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = {
static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv);
-static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
+static int sun8i_dwmac_init(struct device *dev, void *priv)
{
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
struct sunxi_priv_data *gmac = priv;
int ret;
if (gmac->regulator) {
ret = regulator_enable(gmac->regulator);
if (ret) {
- dev_err(&pdev->dev, "Fail to enable regulator\n");
+ dev_err(dev, "Fail to enable regulator\n");
return ret;
}
}
@@ -933,25 +922,11 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
struct sunxi_priv_data *gmac = plat->bsp_priv;
struct device_node *node = dev->of_node;
int ret;
- u32 reg, val;
-
- ret = regmap_field_read(gmac->regmap_field, &val);
- if (ret) {
- dev_err(dev, "Fail to read from regmap field.\n");
- return ret;
- }
-
- reg = gmac->variant->default_syscon_value;
- if (reg != val)
- dev_warn(dev,
- "Current syscon value is not the default %x (expect %x)\n",
- val, reg);
+ u32 reg = 0, val;
if (gmac->variant->soc_has_internal_phy) {
if (of_property_read_bool(node, "allwinner,leds-active-low"))
reg |= H3_EPHY_LED_POL;
- else
- reg &= ~H3_EPHY_LED_POL;
/* Force EPHY xtal frequency to 24MHz. */
reg |= H3_EPHY_CLK_SEL;
@@ -964,12 +939,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
/* of_mdio_parse_addr returns a valid (0 ~ 31) PHY
* address. No need to mask it again.
*/
- reg |= 1 << H3_EPHY_ADDR_SHIFT;
- } else {
- /* For SoCs without internal PHY the PHY selection bit should be
- * set to 0 (external PHY).
- */
- reg &= ~H3_EPHY_SELECT;
+ reg |= ret << H3_EPHY_ADDR_SHIFT;
}
if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) {
@@ -980,8 +950,6 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
val /= 100;
dev_dbg(dev, "set tx-delay to %x\n", val);
if (val <= gmac->variant->tx_delay_max) {
- reg &= ~(gmac->variant->tx_delay_max <<
- SYSCON_ETXDC_SHIFT);
reg |= (val << SYSCON_ETXDC_SHIFT);
} else {
dev_err(dev, "Invalid TX clock delay: %d\n",
@@ -998,8 +966,6 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
val /= 100;
dev_dbg(dev, "set rx-delay to %x\n", val);
if (val <= gmac->variant->rx_delay_max) {
- reg &= ~(gmac->variant->rx_delay_max <<
- SYSCON_ERXDC_SHIFT);
reg |= (val << SYSCON_ERXDC_SHIFT);
} else {
dev_err(dev, "Invalid RX clock delay: %d\n",
@@ -1008,12 +974,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
}
}
- /* Clear interface mode bits */
- reg &= ~(SYSCON_ETCS_MASK | SYSCON_EPIT);
- if (gmac->variant->support_rmii)
- reg &= ~SYSCON_RMII_EN;
-
- switch (plat->mac_interface) {
+ switch (plat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
/* default */
break;
@@ -1028,7 +989,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
break;
default:
dev_err(dev, "Unsupported interface mode: %s",
- phy_modes(plat->mac_interface));
+ phy_modes(plat->phy_interface));
return -EINVAL;
}
@@ -1039,12 +1000,12 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
static void sun8i_dwmac_unset_syscon(struct sunxi_priv_data *gmac)
{
- u32 reg = gmac->variant->default_syscon_value;
-
- regmap_field_write(gmac->regmap_field, reg);
+ if (gmac->variant->soc_has_internal_phy)
+ regmap_field_write(gmac->regmap_field,
+ (H3_EPHY_SHUTDOWN | H3_EPHY_SELECT));
}
-static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
+static void sun8i_dwmac_exit(struct device *dev, void *priv)
{
struct sunxi_priv_data *gmac = priv;
@@ -1079,15 +1040,10 @@ static const struct stmmac_ops sun8i_dwmac_ops = {
.set_mac_loopback = sun8i_dwmac_set_mac_loopback,
};
-static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
+static int sun8i_dwmac_setup(void *ppriv, struct mac_device_info *mac)
{
- struct mac_device_info *mac;
struct stmmac_priv *priv = ppriv;
- mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
- if (!mac)
- return NULL;
-
mac->pcsr = priv->ioaddr;
mac->mac = &sun8i_dwmac_ops;
mac->dma = &sun8i_dwmac_dma_ops;
@@ -1118,7 +1074,7 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
/* Synopsys Id is not available */
priv->synopsys_id = 0;
- return mac;
+ return 0;
}
static struct regmap *sun8i_dwmac_get_syscon_from_dev(struct device_node *node)
@@ -1155,11 +1111,10 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
struct stmmac_resources stmmac_res;
struct sunxi_priv_data *gmac;
struct device *dev = &pdev->dev;
- phy_interface_t interface;
- int ret;
struct stmmac_priv *priv;
struct net_device *ndev;
struct regmap *regmap;
+ int ret;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
if (ret)
@@ -1219,10 +1174,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
return ret;
}
- ret = of_get_phy_mode(dev->of_node, &interface);
- if (ret)
- return -EINVAL;
-
plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
@@ -1230,14 +1181,13 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
/* platform data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers.
*/
- plat_dat->mac_interface = interface;
plat_dat->rx_coe = STMMAC_RX_COE_TYPE2;
plat_dat->tx_coe = 1;
plat_dat->flags |= STMMAC_FLAG_HAS_SUN8I;
plat_dat->bsp_priv = gmac;
plat_dat->init = sun8i_dwmac_init;
plat_dat->exit = sun8i_dwmac_exit;
- plat_dat->setup = sun8i_dwmac_setup;
+ plat_dat->mac_setup = sun8i_dwmac_setup;
plat_dat->tx_fifo_size = 4096;
plat_dat->rx_fifo_size = 16384;
@@ -1245,14 +1195,10 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv);
+ ret = stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
if (ret)
goto dwmac_syscon;
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- goto dwmac_exit;
-
ndev = dev_get_drvdata(&pdev->dev);
priv = netdev_priv(ndev);
@@ -1289,9 +1235,7 @@ dwmac_mux:
clk_put(gmac->ephy_clk);
dwmac_remove:
pm_runtime_put_noidle(&pdev->dev);
- stmmac_dvr_remove(&pdev->dev);
-dwmac_exit:
- sun8i_dwmac_exit(pdev, gmac);
+ stmmac_pltfr_remove(pdev);
dwmac_syscon:
sun8i_dwmac_unset_syscon(gmac);
@@ -1321,7 +1265,7 @@ static void sun8i_dwmac_shutdown(struct platform_device *pdev)
struct stmmac_priv *priv = netdev_priv(ndev);
struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
- sun8i_dwmac_exit(pdev, gmac);
+ sun8i_dwmac_exit(&pdev->dev, gmac);
}
static const struct of_device_id sun8i_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 9ae318436c4a..52593ba3a3a3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -27,7 +27,7 @@ struct sunxi_priv_data {
#define SUN7I_GMAC_GMII_RGMII_RATE 125000000
#define SUN7I_GMAC_MII_RATE 25000000
-static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
+static int sun7i_gmac_init(struct device *dev, void *priv)
{
struct sunxi_priv_data *gmac = priv;
int ret = 0;
@@ -58,7 +58,7 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
return ret;
}
-static void sun7i_gmac_exit(struct platform_device *pdev, void *priv)
+static void sun7i_gmac_exit(struct device *dev, void *priv)
{
struct sunxi_priv_data *gmac = priv;
@@ -72,28 +72,28 @@ static void sun7i_gmac_exit(struct platform_device *pdev, void *priv)
regulator_disable(gmac->regulator);
}
-static void sun7i_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static int sun7i_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- struct sunxi_priv_data *gmac = priv;
-
- /* only GMII mode requires us to reconfigure the clock lines */
- if (gmac->interface != PHY_INTERFACE_MODE_GMII)
- return;
-
- if (gmac->clk_enabled) {
- clk_disable(gmac->tx_clk);
- gmac->clk_enabled = 0;
- }
- clk_unprepare(gmac->tx_clk);
-
- if (speed == 1000) {
- clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
- clk_prepare_enable(gmac->tx_clk);
- gmac->clk_enabled = 1;
- } else {
- clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
- clk_prepare(gmac->tx_clk);
+ struct sunxi_priv_data *gmac = bsp_priv;
+
+ if (interface == PHY_INTERFACE_MODE_GMII) {
+ if (gmac->clk_enabled) {
+ clk_disable(gmac->tx_clk);
+ gmac->clk_enabled = 0;
+ }
+ clk_unprepare(gmac->tx_clk);
+
+ if (speed == 1000) {
+ clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
+ clk_prepare_enable(gmac->tx_clk);
+ gmac->clk_enabled = 1;
+ } else {
+ clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
+ clk_prepare(gmac->tx_clk);
+ }
}
+ return 0;
}
static int sun7i_gmac_probe(struct platform_device *pdev)
@@ -116,11 +116,7 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
if (!gmac)
return -ENOMEM;
- ret = of_get_phy_mode(dev->of_node, &gmac->interface);
- if (ret && ret != -ENODEV) {
- dev_err(dev, "Can't get phy-mode\n");
- return ret;
- }
+ gmac->interface = plat_dat->phy_interface;
gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
if (IS_ERR(gmac->tx_clk)) {
@@ -140,28 +136,15 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
/* platform data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers. */
plat_dat->tx_coe = 1;
- plat_dat->has_gmac = true;
+ plat_dat->core_type = DWMAC_CORE_GMAC;
plat_dat->bsp_priv = gmac;
plat_dat->init = sun7i_gmac_init;
plat_dat->exit = sun7i_gmac_exit;
- plat_dat->fix_mac_speed = sun7i_fix_speed;
+ plat_dat->set_clk_tx_rate = sun7i_set_clk_tx_rate;
plat_dat->tx_fifo_size = 4096;
plat_dat->rx_fifo_size = 16384;
- ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv);
- if (ret)
- return ret;
-
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- goto err_gmac_exit;
-
- return 0;
-
-err_gmac_exit:
- sun7i_gmac_exit(pdev, plat_dat->bsp_priv);
-
- return ret;
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
static const struct of_device_id sun7i_dwmac_match[] = {
@@ -172,7 +155,6 @@ MODULE_DEVICE_TABLE(of, sun7i_dwmac_match);
static struct platform_driver sun7i_dwmac_driver = {
.probe = sun7i_gmac_probe,
- .remove = stmmac_pltfr_remove,
.driver = {
.name = "sun7i-dwmac",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
index 3827997d2132..d765acbe3754 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/iommu.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/module.h>
@@ -19,6 +20,8 @@ struct tegra_mgbe {
struct reset_control *rst_mac;
struct reset_control *rst_pcs;
+ u32 iommu_sid;
+
void __iomem *hv;
void __iomem *regs;
void __iomem *xpcs;
@@ -50,7 +53,6 @@ struct tegra_mgbe {
#define MGBE_WRAP_COMMON_INTR_ENABLE 0x8704
#define MAC_SBD_INTR BIT(2)
#define MGBE_WRAP_AXI_ASID0_CTRL 0x8400
-#define MGBE_SID 0x6
static int __maybe_unused tegra_mgbe_suspend(struct device *dev)
{
@@ -84,7 +86,7 @@ static int __maybe_unused tegra_mgbe_resume(struct device *dev)
writel(MAC_SBD_INTR, mgbe->regs + MGBE_WRAP_COMMON_INTR_ENABLE);
/* Program SID */
- writel(MGBE_SID, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
+ writel(mgbe->iommu_sid, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_STATUS);
if ((value & XPCS_WRAP_UPHY_STATUS_TX_P_UP) == 0) {
@@ -241,6 +243,12 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
if (IS_ERR(mgbe->xpcs))
return PTR_ERR(mgbe->xpcs);
+ /* get controller's stream id from iommu property in device tree */
+ if (!tegra_dev_iommu_get_stream_id(mgbe->dev, &mgbe->iommu_sid)) {
+ dev_err(mgbe->dev, "failed to get iommu stream id\n");
+ return -EINVAL;
+ }
+
res.addr = mgbe->regs;
res.irq = irq;
@@ -300,7 +308,7 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
goto disable_clks;
}
- plat->has_xgmac = 1;
+ plat->core_type = DWMAC_CORE_XGMAC;
plat->flags |= STMMAC_FLAG_TSO_EN;
plat->pmt = 1;
plat->bsp_priv = mgbe;
@@ -346,7 +354,7 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
writel(MAC_SBD_INTR, mgbe->regs + MGBE_WRAP_COMMON_INTR_ENABLE);
/* Program SID */
- writel(MGBE_SID, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
+ writel(mgbe->iommu_sid, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
plat->flags |= STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
new file mode 100644
index 000000000000..e291028ba56e
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * T-HEAD DWMAC platform driver
+ *
+ * Copyright (C) 2021 Alibaba Group Holding Limited.
+ * Copyright (C) 2023 Jisheng Zhang <jszhang@kernel.org>
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/platform_device.h>
+
+#include "stmmac_platform.h"
+
+#define GMAC_CLK_EN 0x00
+#define GMAC_TX_CLK_EN BIT(1)
+#define GMAC_TX_CLK_N_EN BIT(2)
+#define GMAC_TX_CLK_OUT_EN BIT(3)
+#define GMAC_RX_CLK_EN BIT(4)
+#define GMAC_RX_CLK_N_EN BIT(5)
+#define GMAC_EPHY_REF_CLK_EN BIT(6)
+#define GMAC_RXCLK_DELAY_CTRL 0x04
+#define GMAC_RXCLK_BYPASS BIT(15)
+#define GMAC_RXCLK_INVERT BIT(14)
+#define GMAC_RXCLK_DELAY GENMASK(4, 0)
+#define GMAC_TXCLK_DELAY_CTRL 0x08
+#define GMAC_TXCLK_BYPASS BIT(15)
+#define GMAC_TXCLK_INVERT BIT(14)
+#define GMAC_TXCLK_DELAY GENMASK(4, 0)
+#define GMAC_PLLCLK_DIV 0x0c
+#define GMAC_PLLCLK_DIV_EN BIT(31)
+#define GMAC_PLLCLK_DIV_NUM GENMASK(7, 0)
+#define GMAC_GTXCLK_SEL 0x18
+#define GMAC_GTXCLK_SEL_PLL BIT(0)
+#define GMAC_INTF_CTRL 0x1c
+#define PHY_INTF_MASK BIT(0)
+#define PHY_INTF_RGMII FIELD_PREP(PHY_INTF_MASK, 1)
+#define PHY_INTF_MII_GMII FIELD_PREP(PHY_INTF_MASK, 0)
+#define GMAC_TXCLK_OEN 0x20
+#define TXCLK_DIR_MASK BIT(0)
+#define TXCLK_DIR_OUTPUT FIELD_PREP(TXCLK_DIR_MASK, 0)
+#define TXCLK_DIR_INPUT FIELD_PREP(TXCLK_DIR_MASK, 1)
+
+struct thead_dwmac {
+ struct plat_stmmacenet_data *plat;
+ void __iomem *apb_base;
+ struct device *dev;
+};
+
+static int thead_dwmac_set_phy_if(struct plat_stmmacenet_data *plat)
+{
+ struct thead_dwmac *dwmac = plat->bsp_priv;
+ u32 phyif;
+
+ switch (plat->phy_interface) {
+ case PHY_INTERFACE_MODE_MII:
+ phyif = PHY_INTF_MII_GMII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ phyif = PHY_INTF_RGMII;
+ break;
+ default:
+ dev_err(dwmac->dev, "unsupported phy interface %s\n",
+ phy_modes(plat->phy_interface));
+ return -EINVAL;
+ }
+
+ writel(phyif, dwmac->apb_base + GMAC_INTF_CTRL);
+ return 0;
+}
+
+static int thead_dwmac_set_txclk_dir(struct plat_stmmacenet_data *plat)
+{
+ struct thead_dwmac *dwmac = plat->bsp_priv;
+ u32 txclk_dir;
+
+ switch (plat->phy_interface) {
+ case PHY_INTERFACE_MODE_MII:
+ txclk_dir = TXCLK_DIR_INPUT;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ txclk_dir = TXCLK_DIR_OUTPUT;
+ break;
+ default:
+ dev_err(dwmac->dev, "unsupported phy interface %s\n",
+ phy_modes(plat->phy_interface));
+ return -EINVAL;
+ }
+
+ writel(txclk_dir, dwmac->apb_base + GMAC_TXCLK_OEN);
+ return 0;
+}
+
+static int thead_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
+{
+ struct thead_dwmac *dwmac = bsp_priv;
+ struct plat_stmmacenet_data *plat;
+ unsigned long rate;
+ long tx_rate;
+ u32 div, reg;
+
+ plat = dwmac->plat;
+
+ switch (plat->phy_interface) {
+ /* For MII, rxc/txc is provided by phy */
+ case PHY_INTERFACE_MODE_MII:
+ return 0;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ rate = clk_get_rate(plat->stmmac_clk);
+
+ writel(0, dwmac->apb_base + GMAC_PLLCLK_DIV);
+
+ tx_rate = rgmii_clock(speed);
+ if (tx_rate < 0) {
+ dev_err(dwmac->dev, "invalid speed %d\n", speed);
+ return tx_rate;
+ }
+
+ div = rate / tx_rate;
+ if (rate != tx_rate * div) {
+ dev_err(dwmac->dev, "invalid gmac rate %lu\n", rate);
+ return -EINVAL;
+ }
+
+ reg = FIELD_PREP(GMAC_PLLCLK_DIV_EN, 1) |
+ FIELD_PREP(GMAC_PLLCLK_DIV_NUM, div);
+ writel(reg, dwmac->apb_base + GMAC_PLLCLK_DIV);
+ return 0;
+
+ default:
+ dev_err(dwmac->dev, "unsupported phy interface %s\n",
+ phy_modes(plat->phy_interface));
+ return -EINVAL;
+ }
+}
+
+static int thead_dwmac_enable_clk(struct plat_stmmacenet_data *plat)
+{
+ struct thead_dwmac *dwmac = plat->bsp_priv;
+ u32 reg, div;
+
+ switch (plat->phy_interface) {
+ case PHY_INTERFACE_MODE_MII:
+ reg = GMAC_RX_CLK_EN | GMAC_TX_CLK_EN;
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ /* use pll */
+ div = clk_get_rate(plat->stmmac_clk) / rgmii_clock(SPEED_1000);
+ reg = FIELD_PREP(GMAC_PLLCLK_DIV_EN, 1) |
+ FIELD_PREP(GMAC_PLLCLK_DIV_NUM, div);
+
+ writel(0, dwmac->apb_base + GMAC_PLLCLK_DIV);
+ writel(reg, dwmac->apb_base + GMAC_PLLCLK_DIV);
+
+ writel(GMAC_GTXCLK_SEL_PLL, dwmac->apb_base + GMAC_GTXCLK_SEL);
+ reg = GMAC_TX_CLK_EN | GMAC_TX_CLK_N_EN | GMAC_TX_CLK_OUT_EN |
+ GMAC_RX_CLK_EN | GMAC_RX_CLK_N_EN;
+ break;
+
+ default:
+ dev_err(dwmac->dev, "unsupported phy interface %s\n",
+ phy_modes(plat->phy_interface));
+ return -EINVAL;
+ }
+
+ writel(reg, dwmac->apb_base + GMAC_CLK_EN);
+ return 0;
+}
+
+static int thead_dwmac_init(struct device *dev, void *priv)
+{
+ struct thead_dwmac *dwmac = priv;
+ unsigned int reg;
+ int ret;
+
+ ret = thead_dwmac_set_phy_if(dwmac->plat);
+ if (ret)
+ return ret;
+
+ ret = thead_dwmac_set_txclk_dir(dwmac->plat);
+ if (ret)
+ return ret;
+
+ reg = readl(dwmac->apb_base + GMAC_RXCLK_DELAY_CTRL);
+ reg &= ~(GMAC_RXCLK_DELAY);
+ reg |= FIELD_PREP(GMAC_RXCLK_DELAY, 0);
+ writel(reg, dwmac->apb_base + GMAC_RXCLK_DELAY_CTRL);
+
+ reg = readl(dwmac->apb_base + GMAC_TXCLK_DELAY_CTRL);
+ reg &= ~(GMAC_TXCLK_DELAY);
+ reg |= FIELD_PREP(GMAC_TXCLK_DELAY, 0);
+ writel(reg, dwmac->apb_base + GMAC_TXCLK_DELAY_CTRL);
+
+ return thead_dwmac_enable_clk(dwmac->plat);
+}
+
+static int thead_dwmac_probe(struct platform_device *pdev)
+{
+ struct stmmac_resources stmmac_res;
+ struct plat_stmmacenet_data *plat;
+ struct thead_dwmac *dwmac;
+ struct clk *apb_clk;
+ void __iomem *apb;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to get resources\n");
+
+ plat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat))
+ return dev_err_probe(&pdev->dev, PTR_ERR(plat),
+ "dt configuration failed\n");
+
+ /*
+ * The APB clock is essential for accessing glue registers. However,
+ * old devicetrees don't describe it correctly. We continue to probe
+ * and emit a warning if it isn't present.
+ */
+ apb_clk = devm_clk_get_enabled(&pdev->dev, "apb");
+ if (PTR_ERR(apb_clk) == -ENOENT)
+ dev_warn(&pdev->dev,
+ "cannot get apb clock, link may break after speed changes\n");
+ else if (IS_ERR(apb_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(apb_clk),
+ "failed to get apb clock\n");
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac)
+ return -ENOMEM;
+
+ apb = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(apb))
+ return dev_err_probe(&pdev->dev, PTR_ERR(apb),
+ "failed to remap gmac apb registers\n");
+
+ dwmac->dev = &pdev->dev;
+ dwmac->plat = plat;
+ dwmac->apb_base = apb;
+ plat->bsp_priv = dwmac;
+ plat->set_clk_tx_rate = thead_set_clk_tx_rate;
+ plat->init = thead_dwmac_init;
+
+ return devm_stmmac_pltfr_probe(pdev, plat, &stmmac_res);
+}
+
+static const struct of_device_id thead_dwmac_match[] = {
+ { .compatible = "thead,th1520-gmac" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, thead_dwmac_match);
+
+static struct platform_driver thead_dwmac_driver = {
+ .probe = thead_dwmac_probe,
+ .driver = {
+ .name = "thead-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = thead_dwmac_match,
+ },
+};
+module_platform_driver(thead_dwmac_driver);
+
+MODULE_AUTHOR("Jisheng Zhang <jszhang@kernel.org>");
+MODULE_AUTHOR("Drew Fustini <drew@pdp7.com>");
+MODULE_DESCRIPTION("T-HEAD DWMAC platform driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
index eccf7f537467..9497b13a5753 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -42,69 +42,45 @@
#define ETHER_CLK_SEL_RX_TX_CLK_EN (ETHER_CLK_SEL_RX_CLK_EN | ETHER_CLK_SEL_TX_CLK_EN)
-#define ETHER_CONFIG_INTF_MII 0
-#define ETHER_CONFIG_INTF_RGMII BIT(0)
-#define ETHER_CONFIG_INTF_RMII BIT(2)
-
struct visconti_eth {
void __iomem *reg;
- u32 phy_intf_sel;
struct clk *phy_ref_clk;
struct device *dev;
- spinlock_t lock; /* lock to protect register update */
};
-static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static int visconti_eth_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- struct visconti_eth *dwmac = priv;
- struct net_device *netdev = dev_get_drvdata(dwmac->dev);
- unsigned int val, clk_sel_val = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&dwmac->lock, flags);
-
- /* adjust link */
- val = readl(dwmac->reg + MAC_CTRL_REG);
- val &= ~(GMAC_CONFIG_PS | GMAC_CONFIG_FES);
-
- switch (speed) {
- case SPEED_1000:
- if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RGMII)
- clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_125M;
- break;
- case SPEED_100:
- if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RGMII)
- clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_25M;
- if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RMII)
- clk_sel_val = ETHER_CLK_SEL_DIV_SEL_2;
- val |= GMAC_CONFIG_PS | GMAC_CONFIG_FES;
- break;
- case SPEED_10:
- if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RGMII)
- clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_2P5M;
- if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RMII)
- clk_sel_val = ETHER_CLK_SEL_DIV_SEL_20;
- val |= GMAC_CONFIG_PS;
- break;
- default:
- /* No bit control */
- netdev_err(netdev, "Unsupported speed request (%d)", speed);
- spin_unlock_irqrestore(&dwmac->lock, flags);
- return;
- }
-
- writel(val, dwmac->reg + MAC_CTRL_REG);
-
- /* Stop internal clock */
- val = readl(dwmac->reg + REG_ETHER_CLOCK_SEL);
- val &= ~(ETHER_CLK_SEL_RMII_CLK_EN | ETHER_CLK_SEL_RX_TX_CLK_EN);
- val |= ETHER_CLK_SEL_TX_O_E_N_IN;
- writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+ struct visconti_eth *dwmac = bsp_priv;
+ unsigned long clk_sel, val;
+
+ if (phy_interface_mode_is_rgmii(interface)) {
+ switch (speed) {
+ case SPEED_1000:
+ clk_sel = ETHER_CLK_SEL_FREQ_SEL_125M;
+ break;
+
+ case SPEED_100:
+ clk_sel = ETHER_CLK_SEL_FREQ_SEL_25M;
+ break;
+
+ case SPEED_10:
+ clk_sel = ETHER_CLK_SEL_FREQ_SEL_2P5M;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Stop internal clock */
+ val = readl(dwmac->reg + REG_ETHER_CLOCK_SEL);
+ val &= ~(ETHER_CLK_SEL_RMII_CLK_EN |
+ ETHER_CLK_SEL_RX_TX_CLK_EN);
+ val |= ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
- /* Set Clock-Mux, Start clock, Set TX_O direction */
- switch (dwmac->phy_intf_sel) {
- case ETHER_CONFIG_INTF_RGMII:
- val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC;
+ /* Set Clock-Mux, Start clock, Set TX_O direction */
+ val = clk_sel | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
@@ -112,11 +88,32 @@ static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned
val &= ~ETHER_CLK_SEL_TX_O_E_N_IN;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
- break;
- case ETHER_CONFIG_INTF_RMII:
- val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_DIV |
- ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV | ETHER_CLK_SEL_TX_O_E_N_IN |
- ETHER_CLK_SEL_RMII_CLK_SEL_RX_C;
+ } else if (interface == PHY_INTERFACE_MODE_RMII) {
+ switch (speed) {
+ case SPEED_100:
+ clk_sel = ETHER_CLK_SEL_DIV_SEL_2;
+ break;
+
+ case SPEED_10:
+ clk_sel = ETHER_CLK_SEL_DIV_SEL_20;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Stop internal clock */
+ val = readl(dwmac->reg + REG_ETHER_CLOCK_SEL);
+ val &= ~(ETHER_CLK_SEL_RMII_CLK_EN |
+ ETHER_CLK_SEL_RX_TX_CLK_EN);
+ val |= ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ /* Set Clock-Mux, Start clock, Set TX_O direction */
+ val = clk_sel | ETHER_CLK_SEL_RX_CLK_EXT_SEL_DIV |
+ ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV |
+ ETHER_CLK_SEL_TX_O_E_N_IN |
+ ETHER_CLK_SEL_RMII_CLK_SEL_RX_C;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
val |= ETHER_CLK_SEL_RMII_CLK_RST;
@@ -124,46 +121,42 @@ static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned
val |= ETHER_CLK_SEL_RMII_CLK_EN | ETHER_CLK_SEL_RX_TX_CLK_EN;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
- break;
- case ETHER_CONFIG_INTF_MII:
- default:
- val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC |
- ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC | ETHER_CLK_SEL_TX_O_E_N_IN;
+ } else {
+ /* Stop internal clock */
+ val = readl(dwmac->reg + REG_ETHER_CLOCK_SEL);
+ val &= ~(ETHER_CLK_SEL_RMII_CLK_EN |
+ ETHER_CLK_SEL_RX_TX_CLK_EN);
+ val |= ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ /* Set Clock-Mux, Start clock, Set TX_O direction */
+ val = ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC |
+ ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC |
+ ETHER_CLK_SEL_TX_O_E_N_IN;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
- break;
}
- spin_unlock_irqrestore(&dwmac->lock, flags);
+ return 0;
}
static int visconti_eth_init_hw(struct platform_device *pdev, struct plat_stmmacenet_data *plat_dat)
{
struct visconti_eth *dwmac = plat_dat->bsp_priv;
- unsigned int reg_val, clk_sel_val;
-
- switch (plat_dat->phy_interface) {
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- dwmac->phy_intf_sel = ETHER_CONFIG_INTF_RGMII;
- break;
- case PHY_INTERFACE_MODE_MII:
- dwmac->phy_intf_sel = ETHER_CONFIG_INTF_MII;
- break;
- case PHY_INTERFACE_MODE_RMII:
- dwmac->phy_intf_sel = ETHER_CONFIG_INTF_RMII;
- break;
- default:
+ unsigned int clk_sel_val;
+ int phy_intf_sel;
+
+ phy_intf_sel = stmmac_get_phy_intf_sel(plat_dat->phy_interface);
+ if (phy_intf_sel != PHY_INTF_SEL_GMII_MII &&
+ phy_intf_sel != PHY_INTF_SEL_RGMII &&
+ phy_intf_sel != PHY_INTF_SEL_RMII) {
dev_err(&pdev->dev, "Unsupported phy-mode (%d)\n", plat_dat->phy_interface);
return -EOPNOTSUPP;
}
- reg_val = dwmac->phy_intf_sel;
- writel(reg_val, dwmac->reg + REG_ETHER_CONTROL);
+ writel(phy_intf_sel, dwmac->reg + REG_ETHER_CONTROL);
/* Enable TX/RX clock */
clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_125M;
@@ -173,8 +166,8 @@ static int visconti_eth_init_hw(struct platform_device *pdev, struct plat_stmmac
dwmac->reg + REG_ETHER_CLOCK_SEL);
/* release internal-reset */
- reg_val |= ETHER_ETH_CONTROL_RESET;
- writel(reg_val, dwmac->reg + REG_ETHER_CONTROL);
+ phy_intf_sel |= ETHER_ETH_CONTROL_RESET;
+ writel(phy_intf_sel, dwmac->reg + REG_ETHER_CONTROL);
return 0;
}
@@ -228,11 +221,10 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev)
if (!dwmac)
return -ENOMEM;
- spin_lock_init(&dwmac->lock);
dwmac->reg = stmmac_res.addr;
dwmac->dev = &pdev->dev;
plat_dat->bsp_priv = dwmac;
- plat_dat->fix_mac_speed = visconti_eth_fix_mac_speed;
+ plat_dat->set_clk_tx_rate = visconti_eth_set_clk_tx_rate;
ret = visconti_eth_clock_probe(pdev, plat_dat);
if (ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 4296ddda8aaa..697bba641e05 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -38,11 +38,10 @@
#define GMAC_INT_DISABLE_PCSAN BIT(2)
#define GMAC_INT_DISABLE_PMT BIT(3)
#define GMAC_INT_DISABLE_TIMESTAMP BIT(9)
-#define GMAC_INT_DISABLE_PCS (GMAC_INT_DISABLE_RGMII | \
+#define GMAC_INT_DEFAULT_MASK (GMAC_INT_DISABLE_RGMII | \
GMAC_INT_DISABLE_PCSLINK | \
- GMAC_INT_DISABLE_PCSAN)
-#define GMAC_INT_DEFAULT_MASK (GMAC_INT_DISABLE_TIMESTAMP | \
- GMAC_INT_DISABLE_PCS)
+ GMAC_INT_DISABLE_PCSAN | \
+ GMAC_INT_DISABLE_TIMESTAMP)
/* PMT Control and Status */
#define GMAC_PMT 0x0000002c
@@ -59,22 +58,11 @@ enum power_event {
/* Energy Efficient Ethernet (EEE)
*
* LPI status, timer and control register offset
+ * For LPI control and status bit definitions, see common.h.
*/
#define LPI_CTRL_STATUS 0x0030
#define LPI_TIMER_CTRL 0x0034
-/* LPI control and status defines */
-#define LPI_CTRL_STATUS_LPITXA 0x00080000 /* Enable LPI TX Automate */
-#define LPI_CTRL_STATUS_PLSEN 0x00040000 /* Enable PHY Link Status */
-#define LPI_CTRL_STATUS_PLS 0x00020000 /* PHY Link Status */
-#define LPI_CTRL_STATUS_LPIEN 0x00010000 /* LPI Enable */
-#define LPI_CTRL_STATUS_RLPIST 0x00000200 /* Receive LPI state */
-#define LPI_CTRL_STATUS_TLPIST 0x00000100 /* Transmit LPI state */
-#define LPI_CTRL_STATUS_RLPIEX 0x00000008 /* Receive LPI Exit */
-#define LPI_CTRL_STATUS_RLPIEN 0x00000004 /* Receive LPI Entry */
-#define LPI_CTRL_STATUS_TLPIEX 0x00000002 /* Transmit LPI Exit */
-#define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */
-
/* GMAC HW ADDR regs */
#define GMAC_ADDR_HIGH(reg) ((reg > 15) ? 0x00000800 + (reg - 16) * 8 : \
0x00000040 + (reg * 8))
@@ -329,5 +317,17 @@ enum rtc_control {
#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
#define GMAC_EXTHASH_BASE 0x500
+/* PTP and timestamping registers */
+
+#define GMAC3_X_ATSNS GENMASK(29, 25)
+#define GMAC3_X_ATSNS_SHIFT 25
+
+#define GMAC_PTP_TCR_ATSFC BIT(24)
+#define GMAC_PTP_TCR_ATSEN0 BIT(25)
+
+#define GMAC3_X_TIMESTAMP_STATUS 0x28
+#define GMAC_PTP_ATNR 0x30
+#define GMAC_PTP_ATSR 0x34
+
extern const struct stmmac_dma_ops dwmac1000_dma_ops;
#endif /* __DWMAC1000_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index d413d76a8936..a2ae136d2c0e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -16,51 +16,41 @@
#include <linux/slab.h>
#include <linux/ethtool.h>
#include <linux/io.h>
+#include <linux/string_choices.h>
#include "stmmac.h"
#include "stmmac_pcs.h"
+#include "stmmac_ptp.h"
#include "dwmac1000.h"
+static int dwmac1000_pcs_init(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.pcs)
+ return 0;
+
+ return stmmac_integrated_pcs_init(priv, GMAC_PCS_BASE,
+ GMAC_INT_DISABLE_PCSLINK |
+ GMAC_INT_DISABLE_PCSAN);
+}
+
static void dwmac1000_core_init(struct mac_device_info *hw,
struct net_device *dev)
{
void __iomem *ioaddr = hw->pcsr;
- u32 value = readl(ioaddr + GMAC_CONTROL);
int mtu = dev->mtu;
+ u32 value;
/* Configure GMAC core */
- value |= GMAC_CORE_INIT;
+ value = readl(ioaddr + GMAC_CONTROL);
if (mtu > 1500)
value |= GMAC_CONTROL_2K;
if (mtu > 2000)
value |= GMAC_CONTROL_JE;
- if (hw->ps) {
- value |= GMAC_CONTROL_TE;
-
- value &= ~hw->link.speed_mask;
- switch (hw->ps) {
- case SPEED_1000:
- value |= hw->link.speed1000;
- break;
- case SPEED_100:
- value |= hw->link.speed100;
- break;
- case SPEED_10:
- value |= hw->link.speed10;
- break;
- }
- }
-
- writel(value, ioaddr + GMAC_CONTROL);
+ writel(value | GMAC_CORE_INIT, ioaddr + GMAC_CONTROL);
/* Mask GMAC interrupts */
- value = GMAC_INT_DEFAULT_MASK;
-
- if (hw->pcs)
- value &= ~GMAC_INT_DISABLE_PCS;
-
- writel(value, ioaddr + GMAC_INT_MASK);
+ writel(GMAC_INT_DEFAULT_MASK, ioaddr + GMAC_INT_MASK);
#ifdef STMMAC_VLAN_TAG_USED
/* Tag detection without filtering */
@@ -68,6 +58,20 @@ static void dwmac1000_core_init(struct mac_device_info *hw,
#endif
}
+static void dwmac1000_irq_modify(struct mac_device_info *hw, u32 disable,
+ u32 enable)
+{
+ void __iomem *int_mask = hw->pcsr + GMAC_INT_MASK;
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&hw->irq_ctrl_lock, flags);
+ value = readl(int_mask) | disable;
+ value &= ~enable;
+ writel(value, int_mask);
+ spin_unlock_irqrestore(&hw->irq_ctrl_lock, flags);
+}
+
static int dwmac1000_rx_ipc_enable(struct mac_device_info *hw)
{
void __iomem *ioaddr = hw->pcsr;
@@ -261,39 +265,6 @@ static void dwmac1000_pmt(struct mac_device_info *hw, unsigned long mode)
writel(pmt, ioaddr + GMAC_PMT);
}
-/* RGMII or SMII interface */
-static void dwmac1000_rgsmii(void __iomem *ioaddr, struct stmmac_extra_stats *x)
-{
- u32 status;
-
- status = readl(ioaddr + GMAC_RGSMIIIS);
- x->irq_rgmii_n++;
-
- /* Check the link status */
- if (status & GMAC_RGSMIIIS_LNKSTS) {
- int speed_value;
-
- x->pcs_link = 1;
-
- speed_value = ((status & GMAC_RGSMIIIS_SPEED) >>
- GMAC_RGSMIIIS_SPEED_SHIFT);
- if (speed_value == GMAC_RGSMIIIS_SPEED_125)
- x->pcs_speed = SPEED_1000;
- else if (speed_value == GMAC_RGSMIIIS_SPEED_25)
- x->pcs_speed = SPEED_100;
- else
- x->pcs_speed = SPEED_10;
-
- x->pcs_duplex = (status & GMAC_RGSMIIIS_LNKMOD_MASK);
-
- pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
- x->pcs_duplex ? "Full" : "Half");
- } else {
- x->pcs_link = 0;
- pr_info("Link is Down\n");
- }
-}
-
static int dwmac1000_irq_status(struct mac_device_info *hw,
struct stmmac_extra_stats *x)
{
@@ -335,37 +306,27 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
- if (intr_status & PCS_RGSMIIIS_IRQ)
- dwmac1000_rgsmii(ioaddr, x);
-
return ret;
}
-static void dwmac1000_set_eee_mode(struct mac_device_info *hw,
- bool en_tx_lpi_clockgating)
+static int dwmac1000_set_lpi_mode(struct mac_device_info *hw,
+ enum stmmac_lpi_mode mode,
+ bool en_tx_lpi_clockgating, u32 et)
{
void __iomem *ioaddr = hw->pcsr;
u32 value;
- /*TODO - en_tx_lpi_clockgating treatment */
+ if (mode == STMMAC_LPI_TIMER)
+ return -EOPNOTSUPP;
- /* Enable the link status receive on RGMII, SGMII ore SMII
- * receive path and instruct the transmit to enter in LPI
- * state.
- */
value = readl(ioaddr + LPI_CTRL_STATUS);
- value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
+ if (mode == STMMAC_LPI_FORCED)
+ value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
+ else
+ value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA);
writel(value, ioaddr + LPI_CTRL_STATUS);
-}
-
-static void dwmac1000_reset_eee_mode(struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
- value = readl(ioaddr + LPI_CTRL_STATUS);
- value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA);
- writel(value, ioaddr + LPI_CTRL_STATUS);
+ return 0;
}
static void dwmac1000_set_eee_pls(struct mac_device_info *hw, int link)
@@ -398,15 +359,10 @@ static void dwmac1000_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
writel(value, ioaddr + LPI_TIMER_CTRL);
}
-static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
- bool loopback)
-{
- dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
-}
-
-static void dwmac1000_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
+static void dwmac1000_ctrl_ane(struct stmmac_priv *priv, bool ane,
+ bool srgmi_ral)
{
- dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
+ dwmac_ctrl_ane(priv->ioaddr, GMAC_PCS_BASE, ane, srgmi_ral);
}
static void dwmac1000_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
@@ -498,7 +454,9 @@ static void dwmac1000_set_mac_loopback(void __iomem *ioaddr, bool enable)
}
const struct stmmac_ops dwmac1000_ops = {
+ .pcs_init = dwmac1000_pcs_init,
.core_init = dwmac1000_core_init,
+ .irq_modify = dwmac1000_irq_modify,
.set_mac = stmmac_set_mac,
.rx_ipc = dwmac1000_rx_ipc_enable,
.dump_regs = dwmac1000_dump_regs,
@@ -508,13 +466,11 @@ const struct stmmac_ops dwmac1000_ops = {
.pmt = dwmac1000_pmt,
.set_umac_addr = dwmac1000_set_umac_addr,
.get_umac_addr = dwmac1000_get_umac_addr,
- .set_eee_mode = dwmac1000_set_eee_mode,
- .reset_eee_mode = dwmac1000_reset_eee_mode,
+ .set_lpi_mode = dwmac1000_set_lpi_mode,
.set_eee_timer = dwmac1000_set_eee_timer,
.set_eee_pls = dwmac1000_set_eee_pls,
.debug = dwmac1000_debug,
.pcs_ctrl_ane = dwmac1000_ctrl_ane,
- .pcs_get_adv_lp = dwmac1000_get_adv_lp,
.set_mac_loopback = dwmac1000_set_mac_loopback,
};
@@ -551,3 +507,103 @@ int dwmac1000_setup(struct stmmac_priv *priv)
return 0;
}
+
+/* DWMAC 1000 HW Timestaming ops */
+
+void dwmac1000_get_ptptime(void __iomem *ptpaddr, u64 *ptp_time)
+{
+ u64 ns;
+
+ ns = readl(ptpaddr + GMAC_PTP_ATNR);
+ ns += (u64)readl(ptpaddr + GMAC_PTP_ATSR) * NSEC_PER_SEC;
+
+ *ptp_time = ns;
+}
+
+void dwmac1000_timestamp_interrupt(struct stmmac_priv *priv)
+{
+ struct ptp_clock_event event;
+ u32 ts_status, num_snapshot;
+ unsigned long flags;
+ u64 ptp_time;
+ int i;
+
+ /* Clears the timestamp interrupt */
+ ts_status = readl(priv->ptpaddr + GMAC3_X_TIMESTAMP_STATUS);
+
+ if (!(priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN))
+ return;
+
+ num_snapshot = (ts_status & GMAC3_X_ATSNS) >> GMAC3_X_ATSNS_SHIFT;
+
+ for (i = 0; i < num_snapshot; i++) {
+ read_lock_irqsave(&priv->ptp_lock, flags);
+ stmmac_get_ptptime(priv, priv->ptpaddr, &ptp_time);
+ read_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 0;
+ event.timestamp = ptp_time;
+ ptp_clock_event(priv->ptp_clock, &event);
+ }
+}
+
+/* DWMAC 1000 ptp_clock_info ops */
+
+static void dwmac1000_timestamp_interrupt_cfg(struct stmmac_priv *priv, bool en)
+{
+ void __iomem *ioaddr = priv->ioaddr;
+
+ u32 intr_mask = readl(ioaddr + GMAC_INT_MASK);
+
+ if (en)
+ intr_mask &= ~GMAC_INT_DISABLE_TIMESTAMP;
+ else
+ intr_mask |= GMAC_INT_DISABLE_TIMESTAMP;
+
+ writel(intr_mask, ioaddr + GMAC_INT_MASK);
+}
+
+int dwmac1000_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct stmmac_priv *priv =
+ container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+ void __iomem *ptpaddr = priv->ptpaddr;
+ int ret = -EOPNOTSUPP;
+ u32 tcr_val;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ mutex_lock(&priv->aux_ts_lock);
+ tcr_val = readl(ptpaddr + PTP_TCR);
+
+ if (on) {
+ tcr_val |= GMAC_PTP_TCR_ATSEN0;
+ tcr_val |= GMAC_PTP_TCR_ATSFC;
+ priv->plat->flags |= STMMAC_FLAG_EXT_SNAPSHOT_EN;
+ } else {
+ tcr_val &= ~GMAC_PTP_TCR_ATSEN0;
+ priv->plat->flags &= ~STMMAC_FLAG_EXT_SNAPSHOT_EN;
+ }
+
+ netdev_dbg(priv->dev, "Auxiliary Snapshot %s.\n",
+ str_enabled_disabled(on));
+ writel(tcr_val, ptpaddr + PTP_TCR);
+
+ /* wait for auxts fifo clear to finish */
+ ret = readl_poll_timeout(ptpaddr + PTP_TCR, tcr_val,
+ !(tcr_val & GMAC_PTP_TCR_ATSFC),
+ 10, 10000);
+
+ mutex_unlock(&priv->aux_ts_lock);
+
+ dwmac1000_timestamp_interrupt_cfg(priv, on);
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 118a22406a2e..5877fec9f6c3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -19,7 +19,6 @@
static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
{
u32 value = readl(ioaddr + DMA_AXI_BUS_MODE);
- int i;
pr_info("dwmac1000: Master AXI performs %s burst length\n",
!(value & DMA_AXI_UNDEF) ? "fixed" : "any");
@@ -39,33 +38,10 @@ static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
/* Depending on the UNDEF bit the Master AXI will perform any burst
* length according to the BLEN programmed (by default all BLEN are
- * set).
+ * set). Note that the UNDEF bit is readonly, and is the inverse of
+ * Bus Mode bit 16.
*/
- for (i = 0; i < AXI_BLEN; i++) {
- switch (axi->axi_blen[i]) {
- case 256:
- value |= DMA_AXI_BLEN256;
- break;
- case 128:
- value |= DMA_AXI_BLEN128;
- break;
- case 64:
- value |= DMA_AXI_BLEN64;
- break;
- case 32:
- value |= DMA_AXI_BLEN32;
- break;
- case 16:
- value |= DMA_AXI_BLEN16;
- break;
- case 8:
- value |= DMA_AXI_BLEN8;
- break;
- case 4:
- value |= DMA_AXI_BLEN4;
- break;
- }
- }
+ value = (value & ~DMA_AXI_BLEN_MASK) | axi->axi_blen_regval;
writel(value, ioaddr + DMA_AXI_BUS_MODE);
}
@@ -159,10 +135,10 @@ static void dwmac1000_dma_operation_mode_rx(struct stmmac_priv *priv,
if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable RX store and forward mode\n");
- csr6 |= DMA_CONTROL_RSF;
+ csr6 |= DMA_CONTROL_RSF | DMA_CONTROL_DFF;
} else {
pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
- csr6 &= ~DMA_CONTROL_RSF;
+ csr6 &= ~(DMA_CONTROL_RSF | DMA_CONTROL_DFF);
csr6 &= DMA_CONTROL_TC_RX_MASK;
if (mode <= 32)
csr6 |= DMA_CONTROL_RTC_32;
@@ -286,6 +262,7 @@ const struct stmmac_dma_ops dwmac1000_dma_ops = {
.dma_rx_mode = dwmac1000_dma_operation_mode_rx,
.dma_tx_mode = dwmac1000_dma_operation_mode_tx,
.enable_dma_transmission = dwmac_enable_dma_transmission,
+ .enable_dma_reception = dwmac_enable_dma_reception,
.enable_dma_irq = dwmac_enable_dma_irq,
.disable_dma_irq = dwmac_disable_dma_irq,
.start_tx = dwmac_dma_start_tx,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 0c050324997a..3cb733781e1e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -17,11 +17,7 @@
#define GMAC_EXT_CONFIG 0x00000004
#define GMAC_PACKET_FILTER 0x00000008
#define GMAC_HASH_TAB(x) (0x10 + (x) * 4)
-#define GMAC_VLAN_TAG 0x00000050
-#define GMAC_VLAN_TAG_DATA 0x00000054
-#define GMAC_VLAN_HASH_TABLE 0x00000058
#define GMAC_RX_FLOW_CTRL 0x00000090
-#define GMAC_VLAN_INCL 0x00000060
#define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4)
#define GMAC_TXQ_PRTY_MAP0 0x98
#define GMAC_TXQ_PRTY_MAP1 0x9C
@@ -31,7 +27,6 @@
#define GMAC_RXQ_CTRL3 0x000000ac
#define GMAC_INT_STATUS 0x000000b0
#define GMAC_INT_EN 0x000000b4
-#define GMAC_1US_TIC_COUNTER 0x000000dc
#define GMAC_PCS_BASE 0x000000e0
#define GMAC_PHYIF_CONTROL_STATUS 0x000000f8
#define GMAC_PMT 0x000000c0
@@ -82,42 +77,6 @@
#define GMAC_MAX_PERFECT_ADDRESSES 128
-/* MAC VLAN */
-#define GMAC_VLAN_EDVLP BIT(26)
-#define GMAC_VLAN_VTHM BIT(25)
-#define GMAC_VLAN_DOVLTC BIT(20)
-#define GMAC_VLAN_ESVL BIT(18)
-#define GMAC_VLAN_ETV BIT(16)
-#define GMAC_VLAN_VID GENMASK(15, 0)
-#define GMAC_VLAN_VLTI BIT(20)
-#define GMAC_VLAN_CSVL BIT(19)
-#define GMAC_VLAN_VLC GENMASK(17, 16)
-#define GMAC_VLAN_VLC_SHIFT 16
-#define GMAC_VLAN_VLHT GENMASK(15, 0)
-
-/* MAC VLAN Tag */
-#define GMAC_VLAN_TAG_VID GENMASK(15, 0)
-#define GMAC_VLAN_TAG_ETV BIT(16)
-
-/* MAC VLAN Tag Control */
-#define GMAC_VLAN_TAG_CTRL_OB BIT(0)
-#define GMAC_VLAN_TAG_CTRL_CT BIT(1)
-#define GMAC_VLAN_TAG_CTRL_OFS_MASK GENMASK(6, 2)
-#define GMAC_VLAN_TAG_CTRL_OFS_SHIFT 2
-#define GMAC_VLAN_TAG_CTRL_EVLS_MASK GENMASK(22, 21)
-#define GMAC_VLAN_TAG_CTRL_EVLS_SHIFT 21
-#define GMAC_VLAN_TAG_CTRL_EVLRXS BIT(24)
-
-#define GMAC_VLAN_TAG_STRIP_NONE (0x0 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
-#define GMAC_VLAN_TAG_STRIP_PASS (0x1 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
-#define GMAC_VLAN_TAG_STRIP_FAIL (0x2 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
-#define GMAC_VLAN_TAG_STRIP_ALL (0x3 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
-
-/* MAC VLAN Tag Data/Filter */
-#define GMAC_VLAN_TAG_DATA_VID GENMASK(15, 0)
-#define GMAC_VLAN_TAG_DATA_VEN BIT(16)
-#define GMAC_VLAN_TAG_DATA_ETV BIT(17)
-
/* MAC RX Queue Enable */
#define GMAC_RX_QUEUE_CLEAR(queue) ~(GENMASK(1, 0) << ((queue) * 2))
#define GMAC_RX_AV_QUEUE_ENABLE(queue) BIT((queue) * 2)
@@ -147,9 +106,6 @@
#define GMAC_INT_LPI_EN BIT(5)
#define GMAC_INT_TSIE BIT(12)
-#define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \
- GMAC_INT_PCS_ANE)
-
#define GMAC_INT_DEFAULT_ENABLE (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN | \
GMAC_INT_TSIE)
@@ -177,23 +133,13 @@ enum power_event {
/* Energy Efficient Ethernet (EEE) for GMAC4
*
* LPI status, timer and control register offset
+ * For LPI control and status bit definitions, see common.h.
*/
#define GMAC4_LPI_CTRL_STATUS 0xd0
#define GMAC4_LPI_TIMER_CTRL 0xd4
#define GMAC4_LPI_ENTRY_TIMER 0xd8
#define GMAC4_MAC_ONEUS_TIC_COUNTER 0xdc
-/* LPI control and status defines */
-#define GMAC4_LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable */
-#define GMAC4_LPI_CTRL_STATUS_LPIATE BIT(20) /* LPI Timer Enable */
-#define GMAC4_LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */
-#define GMAC4_LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */
-#define GMAC4_LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */
-#define GMAC4_LPI_CTRL_STATUS_RLPIEX BIT(3) /* Receive LPI Exit */
-#define GMAC4_LPI_CTRL_STATUS_RLPIEN BIT(2) /* Receive LPI Entry */
-#define GMAC4_LPI_CTRL_STATUS_TLPIEX BIT(1) /* Transmit LPI Exit */
-#define GMAC4_LPI_CTRL_STATUS_TLPIEN BIT(0) /* Transmit LPI Entry */
-
/* MAC Debug bitmap */
#define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17)
#define GMAC_DEBUG_TFCSTS_SHIFT 17
@@ -392,9 +338,10 @@ static inline u32 mtl_chanx_base_addr(const struct dwmac4_addrs *addrs,
#define MTL_OP_MODE_RFA_SHIFT 8
#define MTL_OP_MODE_EHFC BIT(7)
+#define MTL_OP_MODE_DIS_TCP_EF BIT(6)
-#define MTL_OP_MODE_RTC_MASK 0x18
-#define MTL_OP_MODE_RTC_SHIFT 3
+#define MTL_OP_MODE_RTC_MASK GENMASK(1, 0)
+#define MTL_OP_MODE_RTC_SHIFT 0
#define MTL_OP_MODE_RTC_32 (1 << MTL_OP_MODE_RTC_SHIFT)
#define MTL_OP_MODE_RTC_64 0
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index c25781874aa7..a4282fd7c3c7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -18,54 +18,55 @@
#include "stmmac.h"
#include "stmmac_fpe.h"
#include "stmmac_pcs.h"
+#include "stmmac_vlan.h"
#include "dwmac4.h"
#include "dwmac5.h"
+static int dwmac4_pcs_init(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.pcs)
+ return 0;
+
+ return stmmac_integrated_pcs_init(priv, GMAC_PCS_BASE,
+ GMAC_INT_PCS_LINK | GMAC_INT_PCS_ANE);
+}
+
static void dwmac4_core_init(struct mac_device_info *hw,
struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
void __iomem *ioaddr = hw->pcsr;
- u32 value = readl(ioaddr + GMAC_CONFIG);
- u32 clk_rate;
-
- value |= GMAC_CORE_INIT;
-
- if (hw->ps) {
- value |= GMAC_CONFIG_TE;
-
- value &= hw->link.speed_mask;
- switch (hw->ps) {
- case SPEED_1000:
- value |= hw->link.speed1000;
- break;
- case SPEED_100:
- value |= hw->link.speed100;
- break;
- case SPEED_10:
- value |= hw->link.speed10;
- break;
- }
- }
+ unsigned long clk_rate;
+ u32 value;
- writel(value, ioaddr + GMAC_CONFIG);
+ value = readl(ioaddr + GMAC_CONFIG);
+ writel(value | GMAC_CORE_INIT, ioaddr + GMAC_CONFIG);
/* Configure LPI 1us counter to number of CSR clock ticks in 1us - 1 */
clk_rate = clk_get_rate(priv->plat->stmmac_clk);
writel((clk_rate / 1000000) - 1, ioaddr + GMAC4_MAC_ONEUS_TIC_COUNTER);
/* Enable GMAC interrupts */
- value = GMAC_INT_DEFAULT_ENABLE;
-
- if (hw->pcs)
- value |= GMAC_PCS_IRQ_DEFAULT;
-
- writel(value, ioaddr + GMAC_INT_EN);
+ writel(GMAC_INT_DEFAULT_ENABLE, ioaddr + GMAC_INT_EN);
if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE)
init_waitqueue_head(&priv->tstamp_busy_wait);
}
+static void dwmac4_irq_modify(struct mac_device_info *hw, u32 disable,
+ u32 enable)
+{
+ void __iomem *int_mask = hw->pcsr + GMAC_INT_EN;
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&hw->irq_ctrl_lock, flags);
+ value = readl(int_mask) & ~disable;
+ value |= enable;
+ writel(value, int_mask);
+ spin_unlock_irqrestore(&hw->irq_ctrl_lock, flags);
+}
+
static void dwmac4_update_caps(struct stmmac_priv *priv)
{
if (priv->plat->tx_queues_to_use > 1)
@@ -376,33 +377,46 @@ static void dwmac4_get_umac_addr(struct mac_device_info *hw,
GMAC_ADDR_LOW(reg_n));
}
-static void dwmac4_set_eee_mode(struct mac_device_info *hw,
- bool en_tx_lpi_clockgating)
+static int dwmac4_set_lpi_mode(struct mac_device_info *hw,
+ enum stmmac_lpi_mode mode,
+ bool en_tx_lpi_clockgating, u32 et)
{
void __iomem *ioaddr = hw->pcsr;
- u32 value;
+ u32 value, mask;
- /* Enable the link status receive on RGMII, SGMII ore SMII
- * receive path and instruct the transmit to enter in LPI
- * state.
- */
- value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
- value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
+ if (mode == STMMAC_LPI_DISABLE) {
+ value = 0;
+ } else {
+ value = LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
- if (en_tx_lpi_clockgating)
- value |= GMAC4_LPI_CTRL_STATUS_LPITCSE;
+ if (mode == STMMAC_LPI_TIMER) {
+ /* Return ERANGE if the timer is larger than the
+ * register field.
+ */
+ if (et > STMMAC_ET_MAX)
+ return -ERANGE;
- writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
-}
+ /* Set the hardware LPI entry timer */
+ writel(et, ioaddr + GMAC4_LPI_ENTRY_TIMER);
-static void dwmac4_reset_eee_mode(struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
+ /* Interpret a zero LPI entry timer to mean
+ * immediate entry into LPI mode.
+ */
+ if (et)
+ value |= LPI_CTRL_STATUS_LPIATE;
+ }
- value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
- value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA);
+ if (en_tx_lpi_clockgating)
+ value |= LPI_CTRL_STATUS_LPITCSE;
+ }
+
+ mask = LPI_CTRL_STATUS_LPIATE | LPI_CTRL_STATUS_LPIEN |
+ LPI_CTRL_STATUS_LPITXA | LPI_CTRL_STATUS_LPITCSE;
+
+ value |= readl(ioaddr + GMAC4_LPI_CTRL_STATUS) & ~mask;
writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
+
+ return 0;
}
static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
@@ -413,34 +427,13 @@ static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
if (link)
- value |= GMAC4_LPI_CTRL_STATUS_PLS;
+ value |= LPI_CTRL_STATUS_PLS;
else
- value &= ~GMAC4_LPI_CTRL_STATUS_PLS;
+ value &= ~LPI_CTRL_STATUS_PLS;
writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
}
-static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, int et)
-{
- void __iomem *ioaddr = hw->pcsr;
- int value = et & STMMAC_ET_MAX;
- int regval;
-
- /* Program LPI entry timer value into register */
- writel(value, ioaddr + GMAC4_LPI_ENTRY_TIMER);
-
- /* Enable/disable LPI entry timer */
- regval = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
- regval |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
-
- if (et)
- regval |= GMAC4_LPI_CTRL_STATUS_LPIATE;
- else
- regval &= ~GMAC4_LPI_CTRL_STATUS_LPIATE;
-
- writel(regval, ioaddr + GMAC4_LPI_CTRL_STATUS);
-}
-
static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
{
void __iomem *ioaddr = hw->pcsr;
@@ -456,165 +449,6 @@ static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
}
-static void dwmac4_write_single_vlan(struct net_device *dev, u16 vid)
-{
- void __iomem *ioaddr = (void __iomem *)dev->base_addr;
- u32 val;
-
- val = readl(ioaddr + GMAC_VLAN_TAG);
- val &= ~GMAC_VLAN_TAG_VID;
- val |= GMAC_VLAN_TAG_ETV | vid;
-
- writel(val, ioaddr + GMAC_VLAN_TAG);
-}
-
-static int dwmac4_write_vlan_filter(struct net_device *dev,
- struct mac_device_info *hw,
- u8 index, u32 data)
-{
- void __iomem *ioaddr = (void __iomem *)dev->base_addr;
- int ret;
- u32 val;
-
- if (index >= hw->num_vlan)
- return -EINVAL;
-
- writel(data, ioaddr + GMAC_VLAN_TAG_DATA);
-
- val = readl(ioaddr + GMAC_VLAN_TAG);
- val &= ~(GMAC_VLAN_TAG_CTRL_OFS_MASK |
- GMAC_VLAN_TAG_CTRL_CT |
- GMAC_VLAN_TAG_CTRL_OB);
- val |= (index << GMAC_VLAN_TAG_CTRL_OFS_SHIFT) | GMAC_VLAN_TAG_CTRL_OB;
-
- writel(val, ioaddr + GMAC_VLAN_TAG);
-
- ret = readl_poll_timeout(ioaddr + GMAC_VLAN_TAG, val,
- !(val & GMAC_VLAN_TAG_CTRL_OB),
- 1000, 500000);
- if (ret) {
- netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
- struct mac_device_info *hw,
- __be16 proto, u16 vid)
-{
- int index = -1;
- u32 val = 0;
- int i, ret;
-
- if (vid > 4095)
- return -EINVAL;
-
- /* Single Rx VLAN Filter */
- if (hw->num_vlan == 1) {
- /* For single VLAN filter, VID 0 means VLAN promiscuous */
- if (vid == 0) {
- netdev_warn(dev, "Adding VLAN ID 0 is not supported\n");
- return -EPERM;
- }
-
- if (hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) {
- netdev_err(dev, "Only single VLAN ID supported\n");
- return -EPERM;
- }
-
- hw->vlan_filter[0] = vid;
- dwmac4_write_single_vlan(dev, vid);
-
- return 0;
- }
-
- /* Extended Rx VLAN Filter Enable */
- val |= GMAC_VLAN_TAG_DATA_ETV | GMAC_VLAN_TAG_DATA_VEN | vid;
-
- for (i = 0; i < hw->num_vlan; i++) {
- if (hw->vlan_filter[i] == val)
- return 0;
- else if (!(hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN))
- index = i;
- }
-
- if (index == -1) {
- netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n",
- hw->num_vlan);
- return -EPERM;
- }
-
- ret = dwmac4_write_vlan_filter(dev, hw, index, val);
-
- if (!ret)
- hw->vlan_filter[index] = val;
-
- return ret;
-}
-
-static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
- struct mac_device_info *hw,
- __be16 proto, u16 vid)
-{
- int i, ret = 0;
-
- /* Single Rx VLAN Filter */
- if (hw->num_vlan == 1) {
- if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) {
- hw->vlan_filter[0] = 0;
- dwmac4_write_single_vlan(dev, 0);
- }
- return 0;
- }
-
- /* Extended Rx VLAN Filter Enable */
- for (i = 0; i < hw->num_vlan; i++) {
- if ((hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VID) == vid) {
- ret = dwmac4_write_vlan_filter(dev, hw, i, 0);
-
- if (!ret)
- hw->vlan_filter[i] = 0;
- else
- return ret;
- }
- }
-
- return ret;
-}
-
-static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
- struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
- u32 hash;
- u32 val;
- int i;
-
- /* Single Rx VLAN Filter */
- if (hw->num_vlan == 1) {
- dwmac4_write_single_vlan(dev, hw->vlan_filter[0]);
- return;
- }
-
- /* Extended Rx VLAN Filter Enable */
- for (i = 0; i < hw->num_vlan; i++) {
- if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
- val = hw->vlan_filter[i];
- dwmac4_write_vlan_filter(dev, hw, i, val);
- }
- }
-
- hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
- if (hash & GMAC_VLAN_VLHT) {
- value = readl(ioaddr + GMAC_VLAN_TAG);
- value |= GMAC_VLAN_VTHM;
- writel(value, ioaddr + GMAC_VLAN_TAG);
- }
-}
-
static void dwmac4_set_filter(struct mac_device_info *hw,
struct net_device *dev)
{
@@ -749,48 +583,9 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
}
}
-static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
- bool loopback)
-{
- dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
-}
-
-static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
+static void dwmac4_ctrl_ane(struct stmmac_priv *priv, bool ane, bool srgmi_ral)
{
- dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
-}
-
-/* RGMII or SMII interface */
-static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
-{
- u32 status;
-
- status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS);
- x->irq_rgmii_n++;
-
- /* Check the link status */
- if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) {
- int speed_value;
-
- x->pcs_link = 1;
-
- speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >>
- GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT);
- if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125)
- x->pcs_speed = SPEED_1000;
- else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25)
- x->pcs_speed = SPEED_100;
- else
- x->pcs_speed = SPEED_10;
-
- x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD);
-
- pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
- x->pcs_duplex ? "Full" : "Half");
- } else {
- x->pcs_link = 0;
- pr_info("Link is Down\n");
- }
+ dwmac_ctrl_ane(priv->ioaddr, GMAC_PCS_BASE, ane, srgmi_ral);
}
static int dwmac4_irq_mtl_status(struct stmmac_priv *priv,
@@ -849,23 +644,21 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
/* Clear LPI interrupt by reading MAC_LPI_Control_Status */
u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
- if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) {
+ if (status & LPI_CTRL_STATUS_TLPIEN) {
ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
x->irq_tx_path_in_lpi_mode_n++;
}
- if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) {
+ if (status & LPI_CTRL_STATUS_TLPIEX) {
ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
x->irq_tx_path_exit_lpi_mode_n++;
}
- if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN)
+ if (status & LPI_CTRL_STATUS_RLPIEN)
x->irq_rx_path_in_lpi_mode_n++;
- if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX)
+ if (status & LPI_CTRL_STATUS_RLPIEX)
x->irq_rx_path_exit_lpi_mode_n++;
}
dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
- if (intr_status & PCS_RGSMIIIS_IRQ)
- dwmac4_phystatus(ioaddr, x);
return ret;
}
@@ -973,45 +766,6 @@ static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
writel(value, ioaddr + GMAC_CONFIG);
}
-static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
- u16 perfect_match, bool is_double)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
-
- writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE);
-
- value = readl(ioaddr + GMAC_VLAN_TAG);
-
- if (hash) {
- value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
- if (is_double) {
- value |= GMAC_VLAN_EDVLP;
- value |= GMAC_VLAN_ESVL;
- value |= GMAC_VLAN_DOVLTC;
- }
-
- writel(value, ioaddr + GMAC_VLAN_TAG);
- } else if (perfect_match) {
- u32 value = GMAC_VLAN_ETV;
-
- if (is_double) {
- value |= GMAC_VLAN_EDVLP;
- value |= GMAC_VLAN_ESVL;
- value |= GMAC_VLAN_DOVLTC;
- }
-
- writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG);
- } else {
- value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV);
- value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL);
- value &= ~GMAC_VLAN_DOVLTC;
- value &= ~GMAC_VLAN_VID;
-
- writel(value, ioaddr + GMAC_VLAN_TAG);
- }
-}
-
static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
{
u32 value = readl(ioaddr + GMAC_CONFIG);
@@ -1022,19 +776,6 @@ static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
writel(value, ioaddr + GMAC_CONFIG);
}
-static void dwmac4_enable_vlan(struct mac_device_info *hw, u32 type)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
-
- value = readl(ioaddr + GMAC_VLAN_INCL);
- value |= GMAC_VLAN_VLTI;
- value |= GMAC_VLAN_CSVL; /* Only use SVLAN */
- value &= ~GMAC_VLAN_VLC;
- value |= (type << GMAC_VLAN_VLC_SHIFT) & GMAC_VLAN_VLC;
- writel(value, ioaddr + GMAC_VLAN_INCL);
-}
-
static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en,
u32 addr)
{
@@ -1151,37 +892,10 @@ static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
return 0;
}
-static void dwmac4_rx_hw_vlan(struct mac_device_info *hw,
- struct dma_desc *rx_desc, struct sk_buff *skb)
-{
- if (hw->desc->get_rx_vlan_valid(rx_desc)) {
- u16 vid = hw->desc->get_rx_vlan_tci(rx_desc);
-
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
- }
-}
-
-static void dwmac4_set_hw_vlan_mode(struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value = readl(ioaddr + GMAC_VLAN_TAG);
-
- value &= ~GMAC_VLAN_TAG_CTRL_EVLS_MASK;
-
- if (hw->hw_vlan_en)
- /* Always strip VLAN on Receive */
- value |= GMAC_VLAN_TAG_STRIP_ALL;
- else
- /* Do not strip VLAN on Receive */
- value |= GMAC_VLAN_TAG_STRIP_NONE;
-
- /* Enable outer VLAN Tag in Rx DMA descriptor */
- value |= GMAC_VLAN_TAG_CTRL_EVLRXS;
- writel(value, ioaddr + GMAC_VLAN_TAG);
-}
-
const struct stmmac_ops dwmac4_ops = {
+ .pcs_init = dwmac4_pcs_init,
.core_init = dwmac4_core_init,
+ .irq_modify = dwmac4_irq_modify,
.update_caps = dwmac4_update_caps,
.set_mac = stmmac_set_mac,
.rx_ipc = dwmac4_rx_ipc_enable,
@@ -1201,31 +915,23 @@ const struct stmmac_ops dwmac4_ops = {
.pmt = dwmac4_pmt,
.set_umac_addr = dwmac4_set_umac_addr,
.get_umac_addr = dwmac4_get_umac_addr,
- .set_eee_mode = dwmac4_set_eee_mode,
- .reset_eee_mode = dwmac4_reset_eee_mode,
- .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
+ .set_lpi_mode = dwmac4_set_lpi_mode,
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
- .pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
.set_mac_loopback = dwmac4_set_mac_loopback,
- .update_vlan_hash = dwmac4_update_vlan_hash,
.sarc_configure = dwmac4_sarc_configure,
- .enable_vlan = dwmac4_enable_vlan,
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
- .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
- .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
- .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
- .rx_hw_vlan = dwmac4_rx_hw_vlan,
- .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
};
const struct stmmac_ops dwmac410_ops = {
+ .pcs_init = dwmac4_pcs_init,
.core_init = dwmac4_core_init,
+ .irq_modify = dwmac4_irq_modify,
.update_caps = dwmac4_update_caps,
.set_mac = stmmac_dwmac4_set_mac,
.rx_ipc = dwmac4_rx_ipc_enable,
@@ -1245,33 +951,25 @@ const struct stmmac_ops dwmac410_ops = {
.pmt = dwmac4_pmt,
.set_umac_addr = dwmac4_set_umac_addr,
.get_umac_addr = dwmac4_get_umac_addr,
- .set_eee_mode = dwmac4_set_eee_mode,
- .reset_eee_mode = dwmac4_reset_eee_mode,
- .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
+ .set_lpi_mode = dwmac4_set_lpi_mode,
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
- .pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
.flex_pps_config = dwmac5_flex_pps_config,
.set_mac_loopback = dwmac4_set_mac_loopback,
- .update_vlan_hash = dwmac4_update_vlan_hash,
.sarc_configure = dwmac4_sarc_configure,
- .enable_vlan = dwmac4_enable_vlan,
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
.fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
- .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
- .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
- .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
- .rx_hw_vlan = dwmac4_rx_hw_vlan,
- .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
};
const struct stmmac_ops dwmac510_ops = {
+ .pcs_init = dwmac4_pcs_init,
.core_init = dwmac4_core_init,
+ .irq_modify = dwmac4_irq_modify,
.update_caps = dwmac4_update_caps,
.set_mac = stmmac_dwmac4_set_mac,
.rx_ipc = dwmac4_rx_ipc_enable,
@@ -1291,13 +989,10 @@ const struct stmmac_ops dwmac510_ops = {
.pmt = dwmac4_pmt,
.set_umac_addr = dwmac4_set_umac_addr,
.get_umac_addr = dwmac4_get_umac_addr,
- .set_eee_mode = dwmac4_set_eee_mode,
- .reset_eee_mode = dwmac4_reset_eee_mode,
- .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
+ .set_lpi_mode = dwmac4_set_lpi_mode,
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
- .pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
.safety_feat_config = dwmac5_safety_feat_config,
@@ -1306,51 +1001,13 @@ const struct stmmac_ops dwmac510_ops = {
.rxp_config = dwmac5_rxp_config,
.flex_pps_config = dwmac5_flex_pps_config,
.set_mac_loopback = dwmac4_set_mac_loopback,
- .update_vlan_hash = dwmac4_update_vlan_hash,
.sarc_configure = dwmac4_sarc_configure,
- .enable_vlan = dwmac4_enable_vlan,
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
.fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
- .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
- .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
- .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
- .rx_hw_vlan = dwmac4_rx_hw_vlan,
- .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
};
-static u32 dwmac4_get_num_vlan(void __iomem *ioaddr)
-{
- u32 val, num_vlan;
-
- val = readl(ioaddr + GMAC_HW_FEATURE3);
- switch (val & GMAC_HW_FEAT_NRVF) {
- case 0:
- num_vlan = 1;
- break;
- case 1:
- num_vlan = 4;
- break;
- case 2:
- num_vlan = 8;
- break;
- case 3:
- num_vlan = 16;
- break;
- case 4:
- num_vlan = 24;
- break;
- case 5:
- num_vlan = 32;
- break;
- default:
- num_vlan = 1;
- }
-
- return num_vlan;
-}
-
int dwmac4_setup(struct stmmac_priv *priv)
{
struct mac_device_info *mac = priv->hw;
@@ -1382,7 +1039,7 @@ int dwmac4_setup(struct stmmac_priv *priv)
mac->mii.reg_mask = GENMASK(20, 16);
mac->mii.clk_csr_shift = 8;
mac->mii.clk_csr_mask = GENMASK(11, 8);
- mac->num_vlan = dwmac4_get_num_vlan(priv->ioaddr);
+ mac->num_vlan = stmmac_get_num_vlan(priv->ioaddr);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index a5fb31eb0192..aac68dc28dc1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -110,16 +110,20 @@ static int dwmac4_wrback_get_rx_status(struct stmmac_extra_stats *x,
message_type = (rdes1 & ERDES4_MSG_TYPE_MASK) >> 8;
- if (rdes1 & RDES1_IP_HDR_ERROR)
+ if (rdes1 & RDES1_IP_HDR_ERROR) {
x->ip_hdr_err++;
+ ret |= csum_none;
+ }
if (rdes1 & RDES1_IP_CSUM_BYPASSED)
x->ip_csum_bypassed++;
if (rdes1 & RDES1_IPV4_HEADER)
x->ipv4_pkt_rcvd++;
if (rdes1 & RDES1_IPV6_HEADER)
x->ipv6_pkt_rcvd++;
- if (rdes1 & RDES1_IP_PAYLOAD_ERROR)
+ if (rdes1 & RDES1_IP_PAYLOAD_ERROR) {
x->ip_payload_err++;
+ ret |= csum_none;
+ }
if (message_type == RDES_EXT_NO_PTP)
x->no_ptp_rx_msg_type_ext++;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
index 1ce6f43d545a..806555976496 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
@@ -144,4 +144,7 @@
/* TDS3 use for both format (read and write back) */
#define RDES3_OWN BIT(31)
+extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
+extern const struct stmmac_desc_ops dwmac4_desc_ops;
+
#endif /* __DWMAC4_DESCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index 22a044d93e17..7b513324cfb0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -18,7 +18,6 @@
static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
{
u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
- int i;
pr_info("dwmac4: Master AXI performs %s burst length\n",
(value & DMA_SYS_BUS_FB) ? "fixed" : "any");
@@ -38,33 +37,10 @@ static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
/* Depending on the UNDEF bit the Master AXI will perform any burst
* length according to the BLEN programmed (by default all BLEN are
- * set).
+ * set). Note that the UNDEF bit is readonly, and is the inverse of
+ * Bus Mode bit 16.
*/
- for (i = 0; i < AXI_BLEN; i++) {
- switch (axi->axi_blen[i]) {
- case 256:
- value |= DMA_AXI_BLEN256;
- break;
- case 128:
- value |= DMA_AXI_BLEN128;
- break;
- case 64:
- value |= DMA_AXI_BLEN64;
- break;
- case 32:
- value |= DMA_AXI_BLEN32;
- break;
- case 16:
- value |= DMA_AXI_BLEN16;
- break;
- case 8:
- value |= DMA_AXI_BLEN8;
- break;
- case 4:
- value |= DMA_AXI_BLEN4;
- break;
- }
- }
+ value = (value & ~DMA_AXI_BLEN_MASK) | axi->axi_blen_regval;
writel(value, ioaddr + DMA_SYS_BUS_MODE);
}
@@ -268,13 +244,15 @@ static void dwmac4_dma_rx_chan_op_mode(struct stmmac_priv *priv,
mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(dwmac4_addrs, channel));
+ mtl_rx_op |= MTL_OP_MODE_DIS_TCP_EF;
+
if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable RX store and forward mode\n");
mtl_rx_op |= MTL_OP_MODE_RSF;
} else {
pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
mtl_rx_op &= ~MTL_OP_MODE_RSF;
- mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
+ mtl_rx_op &= ~MTL_OP_MODE_RTC_MASK;
if (mode <= 32)
mtl_rx_op |= MTL_OP_MODE_RTC_32;
else if (mode <= 64)
@@ -343,7 +321,7 @@ static void dwmac4_dma_tx_chan_op_mode(struct stmmac_priv *priv,
} else {
pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode);
mtl_tx_op &= ~MTL_OP_MODE_TSF;
- mtl_tx_op &= MTL_OP_MODE_TTC_MASK;
+ mtl_tx_op &= ~MTL_OP_MODE_TTC_MASK;
/* Set the transmit threshold */
if (mode <= 32)
mtl_tx_op |= MTL_OP_MODE_TTC_32;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index 4f980dcd3958..f27126f05551 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -69,15 +69,8 @@
#define DMA_SYS_BUS_MB BIT(14)
#define DMA_AXI_1KBBE BIT(13)
-#define DMA_SYS_BUS_AAL BIT(12)
+#define DMA_SYS_BUS_AAL DMA_AXI_AAL
#define DMA_SYS_BUS_EAME BIT(11)
-#define DMA_AXI_BLEN256 BIT(7)
-#define DMA_AXI_BLEN128 BIT(6)
-#define DMA_AXI_BLEN64 BIT(5)
-#define DMA_AXI_BLEN32 BIT(4)
-#define DMA_AXI_BLEN16 BIT(3)
-#define DMA_AXI_BLEN8 BIT(2)
-#define DMA_AXI_BLEN4 BIT(1)
#define DMA_SYS_BUS_FB BIT(0)
#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \
@@ -85,8 +78,6 @@
DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \
DMA_AXI_BLEN4)
-#define DMA_AXI_BURST_LEN_MASK 0x000000FE
-
/* DMA TBS Control */
#define DMA_TBS_FTOS GENMASK(31, 8)
#define DMA_TBS_FTOV BIT(0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index 0d185e54eb7e..57c03d491774 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -185,8 +185,6 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
x->rx_buf_unav_irq++;
if (unlikely(intr_status & DMA_CHAN_STATUS_RPS))
x->rx_process_stopped_irq++;
- if (unlikely(intr_status & DMA_CHAN_STATUS_RWT))
- x->rx_watchdog_irq++;
if (unlikely(intr_status & DMA_CHAN_STATUS_ETI))
x->tx_early_irq++;
if (unlikely(intr_status & DMA_CHAN_STATUS_TPS)) {
@@ -198,6 +196,10 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
ret = tx_hard_error;
}
}
+
+ if (unlikely(intr_status & DMA_CHAN_STATUS_RWT))
+ x->rx_watchdog_irq++;
+
/* TX/RX NORMAL interrupts */
if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
u64_stats_update_begin(&stats->syncp);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 5d9c18f5bbf5..054ecb20ce3f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -68,23 +68,14 @@ static inline u32 dma_chan_base_addr(u32 base, u32 chan)
#define DMA_AXI_OSR_MAX 0xf
#define DMA_AXI_MAX_OSR_LIMIT ((DMA_AXI_OSR_MAX << DMA_AXI_WR_OSR_LMT_SHIFT) | \
(DMA_AXI_OSR_MAX << DMA_AXI_RD_OSR_LMT_SHIFT))
-#define DMA_AXI_1KBBE BIT(13)
-#define DMA_AXI_AAL BIT(12)
-#define DMA_AXI_BLEN256 BIT(7)
-#define DMA_AXI_BLEN128 BIT(6)
-#define DMA_AXI_BLEN64 BIT(5)
-#define DMA_AXI_BLEN32 BIT(4)
-#define DMA_AXI_BLEN16 BIT(3)
-#define DMA_AXI_BLEN8 BIT(2)
-#define DMA_AXI_BLEN4 BIT(1)
#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \
DMA_AXI_BLEN64 | DMA_AXI_BLEN32 | \
DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \
DMA_AXI_BLEN4)
-#define DMA_AXI_UNDEF BIT(0)
+#define DMA_AXI_1KBBE BIT(13)
-#define DMA_AXI_BURST_LEN_MASK 0x000000FE
+#define DMA_AXI_UNDEF BIT(0)
#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
@@ -178,6 +169,7 @@ static inline u32 dma_chan_base_addr(u32 base, u32 chan)
#define NUM_DWMAC4_DMA_REGS 27
void dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan);
+void dwmac_enable_dma_reception(void __iomem *ioaddr, u32 chan);
void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx);
void dwmac_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 4846bf49c576..97a803d68e3a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -33,6 +33,11 @@ void dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan)
writel(1, ioaddr + DMA_CHAN_XMT_POLL_DEMAND(chan));
}
+void dwmac_enable_dma_reception(void __iomem *ioaddr, u32 chan)
+{
+ writel(1, ioaddr + DMA_CHAN_RCV_POLL_DEMAND(chan));
+}
+
void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx)
{
@@ -251,7 +256,7 @@ void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr)
void stmmac_set_mac_addr(void __iomem *ioaddr, const u8 addr[6],
unsigned int high, unsigned int low)
{
- unsigned long data;
+ u32 data;
data = (addr[5] << 8) | addr[4];
/* For MAC Addr registers we have to set the Address Enable (AE)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index a04a79003692..fecda3034d36 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -57,19 +57,6 @@
#define XGMAC_FILTER_PR BIT(0)
#define XGMAC_HASH_TABLE(x) (0x00000010 + (x) * 4)
#define XGMAC_MAX_HASH_TABLE 8
-#define XGMAC_VLAN_TAG 0x00000050
-#define XGMAC_VLAN_EDVLP BIT(26)
-#define XGMAC_VLAN_VTHM BIT(25)
-#define XGMAC_VLAN_DOVLTC BIT(20)
-#define XGMAC_VLAN_ESVL BIT(18)
-#define XGMAC_VLAN_ETV BIT(16)
-#define XGMAC_VLAN_VID GENMASK(15, 0)
-#define XGMAC_VLAN_HASH_TABLE 0x00000058
-#define XGMAC_VLAN_INCL 0x00000060
-#define XGMAC_VLAN_VLTI BIT(20)
-#define XGMAC_VLAN_CSVL BIT(19)
-#define XGMAC_VLAN_VLC GENMASK(17, 16)
-#define XGMAC_VLAN_VLC_SHIFT 16
#define XGMAC_RXQ_CTRL0 0x000000a0
#define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2)
#define XGMAC_RXQEN_SHIFT(x) ((x) * 2)
@@ -92,6 +79,7 @@
#define XGMAC_PSRQ(x) GENMASK((x) * 8 + 7, (x) * 8)
#define XGMAC_PSRQ_SHIFT(x) ((x) * 8)
#define XGMAC_INT_STATUS 0x000000b0
+#define XGMAC_INT_TSIS BIT(12)
#define XGMAC_LPIIS BIT(5)
#define XGMAC_PMTIS BIT(4)
#define XGMAC_INT_EN 0x000000b4
@@ -112,14 +100,7 @@
#define XGMAC_MGKPKTEN BIT(1)
#define XGMAC_PWRDWN BIT(0)
#define XGMAC_LPI_CTRL 0x000000d0
-#define XGMAC_TXCGE BIT(21)
-#define XGMAC_LPITXA BIT(19)
-#define XGMAC_PLS BIT(17)
-#define XGMAC_LPITXEN BIT(16)
-#define XGMAC_RLPIEX BIT(3)
-#define XGMAC_RLPIEN BIT(2)
-#define XGMAC_TLPIEX BIT(1)
-#define XGMAC_TLPIEN BIT(0)
+/* For definitions, see LPI_CTRL_STATUS_xxx in common.h */
#define XGMAC_LPI_TIMER_CTRL 0x000000d4
#define XGMAC_HW_FEATURE0 0x0000011c
#define XGMAC_HWFEAT_EDMA BIT(31)
@@ -193,6 +174,8 @@
#define XGMAC_MDIO_ADDR 0x00000200
#define XGMAC_MDIO_DATA 0x00000204
#define XGMAC_MDIO_C22P 0x00000220
+#define XGMAC_GPIO_STATUS 0x0000027c
+#define XGMAC_GPIO_GPO0 BIT(16)
#define XGMAC_ADDRx_HIGH(x) (0x00000300 + (x) * 0x8)
#define XGMAC_ADDR_MAX 32
#define XGMAC_AE BIT(31)
@@ -240,6 +223,8 @@
#define XGMAC_OB BIT(0)
#define XGMAC_RSS_DATA 0x00000c8c
#define XGMAC_TIMESTAMP_STATUS 0x00000d20
+#define XGMAC_TIMESTAMP_ATSNS_MASK GENMASK(29, 25)
+#define XGMAC_TIMESTAMP_ATSNS_SHIFT 25
#define XGMAC_TXTSC BIT(15)
#define XGMAC_TXTIMESTAMP_NSEC 0x00000d30
#define XGMAC_TXTSSTSLO GENMASK(30, 0)
@@ -353,16 +338,9 @@
#define XGMAC_RD_OSR_LMT_SHIFT 16
#define XGMAC_EN_LPI BIT(15)
#define XGMAC_LPI_XIT_PKT BIT(14)
-#define XGMAC_AAL BIT(12)
+#define XGMAC_AAL DMA_AXI_AAL
#define XGMAC_EAME BIT(11)
-#define XGMAC_BLEN GENMASK(7, 1)
-#define XGMAC_BLEN256 BIT(7)
-#define XGMAC_BLEN128 BIT(6)
-#define XGMAC_BLEN64 BIT(5)
-#define XGMAC_BLEN32 BIT(4)
-#define XGMAC_BLEN16 BIT(3)
-#define XGMAC_BLEN8 BIT(2)
-#define XGMAC_BLEN4 BIT(1)
+/* XGMAC_BLEN* are now defined as DMA_AXI_BLEN* in common.h */
#define XGMAC_UNDEF BIT(0)
#define XGMAC_TX_EDMA_CTRL 0x00003040
#define XGMAC_TDPS GENMASK(29, 0)
@@ -484,6 +462,7 @@
#define XGMAC_RDES3_RSV BIT(26)
#define XGMAC_RDES3_L34T GENMASK(23, 20)
#define XGMAC_RDES3_L34T_SHIFT 20
+#define XGMAC_RDES3_ET_LT GENMASK(19, 16)
#define XGMAC_L34T_IP4TCP 0x1
#define XGMAC_L34T_IP4UDP 0x2
#define XGMAC_L34T_IP6TCP 0x9
@@ -493,4 +472,20 @@
#define XGMAC_RDES3_TSD BIT(6)
#define XGMAC_RDES3_TSA BIT(4)
+/* RDES0 (write back format) */
+#define XGMAC_RDES0_VLAN_TAG_MASK GENMASK(15, 0)
+
+/* Error Type or L2 Type(ET/LT) Field Number */
+#define XGMAC_ET_LT_VLAN_STAG 8
+#define XGMAC_ET_LT_VLAN_CTAG 9
+#define XGMAC_ET_LT_DVLAN_CTAG_CTAG 10
+#define XGMAC_ET_LT_DVLAN_STAG_STAG 11
+#define XGMAC_ET_LT_DVLAN_CTAG_STAG 12
+#define XGMAC_ET_LT_DVLAN_STAG_CTAG 13
+
+extern const struct stmmac_ops dwxgmac210_ops;
+extern const struct stmmac_ops dwxlgmac2_ops;
+extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
+extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
+
#endif /* __STMMAC_DWXGMAC2_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 9a60a6e8f633..b40b3ea50e25 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -10,6 +10,7 @@
#include "stmmac.h"
#include "stmmac_fpe.h"
#include "stmmac_ptp.h"
+#include "stmmac_vlan.h"
#include "dwxlgmac2.h"
#include "dwxgmac2.h"
@@ -22,30 +23,31 @@ static void dwxgmac2_core_init(struct mac_device_info *hw,
tx = readl(ioaddr + XGMAC_TX_CONFIG);
rx = readl(ioaddr + XGMAC_RX_CONFIG);
- tx |= XGMAC_CORE_INIT_TX;
- rx |= XGMAC_CORE_INIT_RX;
+ writel(tx | XGMAC_CORE_INIT_TX, ioaddr + XGMAC_TX_CONFIG);
+ writel(rx | XGMAC_CORE_INIT_RX, ioaddr + XGMAC_RX_CONFIG);
+ writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
+}
- if (hw->ps) {
- tx |= XGMAC_CONFIG_TE;
- tx &= ~hw->link.speed_mask;
+static void dwxgmac2_irq_modify(struct mac_device_info *hw, u32 disable,
+ u32 enable)
+{
+ void __iomem *int_mask = hw->pcsr + XGMAC_INT_EN;
+ unsigned long flags;
+ u32 value;
- switch (hw->ps) {
- case SPEED_10000:
- tx |= hw->link.xgmii.speed10000;
- break;
- case SPEED_2500:
- tx |= hw->link.speed2500;
- break;
- case SPEED_1000:
- default:
- tx |= hw->link.speed1000;
- break;
- }
- }
+ spin_lock_irqsave(&hw->irq_ctrl_lock, flags);
+ value = readl(int_mask) & ~disable;
+ value |= enable;
+ writel(value, int_mask);
+ spin_unlock_irqrestore(&hw->irq_ctrl_lock, flags);
+}
- writel(tx, ioaddr + XGMAC_TX_CONFIG);
- writel(rx, ioaddr + XGMAC_RX_CONFIG);
- writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
+static void dwxgmac2_update_caps(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.mbps_10_100)
+ priv->hw->link.caps &= ~(MAC_10 | MAC_100);
+ else if (!priv->dma_cap.half_duplex)
+ priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD);
}
static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
@@ -316,17 +318,17 @@ static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
if (stat & XGMAC_LPIIS) {
u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL);
- if (lpi & XGMAC_TLPIEN) {
+ if (lpi & LPI_CTRL_STATUS_TLPIEN) {
ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
x->irq_tx_path_in_lpi_mode_n++;
}
- if (lpi & XGMAC_TLPIEX) {
+ if (lpi & LPI_CTRL_STATUS_TLPIEX) {
ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
x->irq_tx_path_exit_lpi_mode_n++;
}
- if (lpi & XGMAC_RLPIEN)
+ if (lpi & LPI_CTRL_STATUS_RLPIEN)
x->irq_rx_path_in_lpi_mode_n++;
- if (lpi & XGMAC_RLPIEX)
+ if (lpi & LPI_CTRL_STATUS_RLPIEX)
x->irq_rx_path_exit_lpi_mode_n++;
}
@@ -425,29 +427,28 @@ static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
addr[5] = (hi_addr >> 8) & 0xff;
}
-static void dwxgmac2_set_eee_mode(struct mac_device_info *hw,
- bool en_tx_lpi_clockgating)
+static int dwxgmac2_set_lpi_mode(struct mac_device_info *hw,
+ enum stmmac_lpi_mode mode,
+ bool en_tx_lpi_clockgating, u32 et)
{
void __iomem *ioaddr = hw->pcsr;
u32 value;
- value = readl(ioaddr + XGMAC_LPI_CTRL);
-
- value |= XGMAC_LPITXEN | XGMAC_LPITXA;
- if (en_tx_lpi_clockgating)
- value |= XGMAC_TXCGE;
-
- writel(value, ioaddr + XGMAC_LPI_CTRL);
-}
-
-static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
+ if (mode == STMMAC_LPI_TIMER)
+ return -EOPNOTSUPP;
value = readl(ioaddr + XGMAC_LPI_CTRL);
- value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE);
+ if (mode == STMMAC_LPI_FORCED) {
+ value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
+ if (en_tx_lpi_clockgating)
+ value |= LPI_CTRL_STATUS_LPITCSE;
+ } else {
+ value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA |
+ LPI_CTRL_STATUS_LPITCSE);
+ }
writel(value, ioaddr + XGMAC_LPI_CTRL);
+
+ return 0;
}
static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
@@ -457,9 +458,9 @@ static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
value = readl(ioaddr + XGMAC_LPI_CTRL);
if (link)
- value |= XGMAC_PLS;
+ value |= LPI_CTRL_STATUS_PLS;
else
- value &= ~XGMAC_PLS;
+ value &= ~LPI_CTRL_STATUS_PLS;
writel(value, ioaddr + XGMAC_LPI_CTRL);
}
@@ -615,76 +616,6 @@ static int dwxgmac2_rss_configure(struct mac_device_info *hw,
return 0;
}
-static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
- u16 perfect_match, bool is_double)
-{
- void __iomem *ioaddr = hw->pcsr;
-
- writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE);
-
- if (hash) {
- u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
-
- value |= XGMAC_FILTER_VTFE;
-
- writel(value, ioaddr + XGMAC_PACKET_FILTER);
-
- value = readl(ioaddr + XGMAC_VLAN_TAG);
-
- value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
- if (is_double) {
- value |= XGMAC_VLAN_EDVLP;
- value |= XGMAC_VLAN_ESVL;
- value |= XGMAC_VLAN_DOVLTC;
- } else {
- value &= ~XGMAC_VLAN_EDVLP;
- value &= ~XGMAC_VLAN_ESVL;
- value &= ~XGMAC_VLAN_DOVLTC;
- }
-
- value &= ~XGMAC_VLAN_VID;
- writel(value, ioaddr + XGMAC_VLAN_TAG);
- } else if (perfect_match) {
- u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
-
- value |= XGMAC_FILTER_VTFE;
-
- writel(value, ioaddr + XGMAC_PACKET_FILTER);
-
- value = readl(ioaddr + XGMAC_VLAN_TAG);
-
- value &= ~XGMAC_VLAN_VTHM;
- value |= XGMAC_VLAN_ETV;
- if (is_double) {
- value |= XGMAC_VLAN_EDVLP;
- value |= XGMAC_VLAN_ESVL;
- value |= XGMAC_VLAN_DOVLTC;
- } else {
- value &= ~XGMAC_VLAN_EDVLP;
- value &= ~XGMAC_VLAN_ESVL;
- value &= ~XGMAC_VLAN_DOVLTC;
- }
-
- value &= ~XGMAC_VLAN_VID;
- writel(value | perfect_match, ioaddr + XGMAC_VLAN_TAG);
- } else {
- u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
-
- value &= ~XGMAC_FILTER_VTFE;
-
- writel(value, ioaddr + XGMAC_PACKET_FILTER);
-
- value = readl(ioaddr + XGMAC_VLAN_TAG);
-
- value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV);
- value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL);
- value &= ~XGMAC_VLAN_DOVLTC;
- value &= ~XGMAC_VLAN_VID;
-
- writel(value, ioaddr + XGMAC_VLAN_TAG);
- }
-}
-
struct dwxgmac3_error_desc {
bool valid;
const char *desc;
@@ -1301,19 +1232,6 @@ static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val)
writel(value, ioaddr + XGMAC_TX_CONFIG);
}
-static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
-
- value = readl(ioaddr + XGMAC_VLAN_INCL);
- value |= XGMAC_VLAN_VLTI;
- value |= XGMAC_VLAN_CSVL; /* Only use SVLAN */
- value &= ~XGMAC_VLAN_VLC;
- value |= (type << XGMAC_VLAN_VLC_SHIFT) & XGMAC_VLAN_VLC;
- writel(value, ioaddr + XGMAC_VLAN_INCL);
-}
-
static int dwxgmac2_filter_wait(struct mac_device_info *hw)
{
void __iomem *ioaddr = hw->pcsr;
@@ -1507,6 +1425,8 @@ static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
const struct stmmac_ops dwxgmac210_ops = {
.core_init = dwxgmac2_core_init,
+ .irq_modify = dwxgmac2_irq_modify,
+ .update_caps = dwxgmac2_update_caps,
.set_mac = dwxgmac2_set_mac,
.rx_ipc = dwxgmac2_rx_ipc,
.rx_queue_enable = dwxgmac2_rx_queue_enable,
@@ -1525,8 +1445,7 @@ const struct stmmac_ops dwxgmac210_ops = {
.pmt = dwxgmac2_pmt,
.set_umac_addr = dwxgmac2_set_umac_addr,
.get_umac_addr = dwxgmac2_get_umac_addr,
- .set_eee_mode = dwxgmac2_set_eee_mode,
- .reset_eee_mode = dwxgmac2_reset_eee_mode,
+ .set_lpi_mode = dwxgmac2_set_lpi_mode,
.set_eee_timer = dwxgmac2_set_eee_timer,
.set_eee_pls = dwxgmac2_set_eee_pls,
.debug = NULL,
@@ -1536,12 +1455,10 @@ const struct stmmac_ops dwxgmac210_ops = {
.safety_feat_dump = dwxgmac3_safety_feat_dump,
.set_mac_loopback = dwxgmac2_set_mac_loopback,
.rss_configure = dwxgmac2_rss_configure,
- .update_vlan_hash = dwxgmac2_update_vlan_hash,
.rxp_config = dwxgmac3_rxp_config,
.get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
.flex_pps_config = dwxgmac2_flex_pps_config,
.sarc_configure = dwxgmac2_sarc_configure,
- .enable_vlan = dwxgmac2_enable_vlan,
.config_l3_filter = dwxgmac2_config_l3_filter,
.config_l4_filter = dwxgmac2_config_l4_filter,
.set_arp_offload = dwxgmac2_set_arp_offload,
@@ -1564,6 +1481,7 @@ static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
const struct stmmac_ops dwxlgmac2_ops = {
.core_init = dwxgmac2_core_init,
+ .irq_modify = dwxgmac2_irq_modify,
.set_mac = dwxgmac2_set_mac,
.rx_ipc = dwxgmac2_rx_ipc,
.rx_queue_enable = dwxlgmac2_rx_queue_enable,
@@ -1582,8 +1500,7 @@ const struct stmmac_ops dwxlgmac2_ops = {
.pmt = dwxgmac2_pmt,
.set_umac_addr = dwxgmac2_set_umac_addr,
.get_umac_addr = dwxgmac2_get_umac_addr,
- .set_eee_mode = dwxgmac2_set_eee_mode,
- .reset_eee_mode = dwxgmac2_reset_eee_mode,
+ .set_lpi_mode = dwxgmac2_set_lpi_mode,
.set_eee_timer = dwxgmac2_set_eee_timer,
.set_eee_pls = dwxgmac2_set_eee_pls,
.debug = NULL,
@@ -1593,12 +1510,10 @@ const struct stmmac_ops dwxlgmac2_ops = {
.safety_feat_dump = dwxgmac3_safety_feat_dump,
.set_mac_loopback = dwxgmac2_set_mac_loopback,
.rss_configure = dwxgmac2_rss_configure,
- .update_vlan_hash = dwxgmac2_update_vlan_hash,
.rxp_config = dwxgmac3_rxp_config,
.get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
.flex_pps_config = dwxgmac2_flex_pps_config,
.sarc_configure = dwxgmac2_sarc_configure,
- .enable_vlan = dwxgmac2_enable_vlan,
.config_l3_filter = dwxgmac2_config_l3_filter,
.config_l4_filter = dwxgmac2_config_l4_filter,
.set_arp_offload = dwxgmac2_set_arp_offload,
@@ -1621,8 +1536,8 @@ int dwxgmac2_setup(struct stmmac_priv *priv)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
- MAC_1000FD | MAC_2500FD | MAC_5000FD |
- MAC_10000FD;
+ MAC_10 | MAC_100 | MAC_1000FD |
+ MAC_2500FD | MAC_5000FD | MAC_10000FD;
mac->link.duplex = 0;
mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
@@ -1641,6 +1556,7 @@ int dwxgmac2_setup(struct stmmac_priv *priv)
mac->mii.reg_mask = GENMASK(15, 0);
mac->mii.clk_csr_shift = 19;
mac->mii.clk_csr_mask = GENMASK(21, 19);
+ mac->num_vlan = stmmac_get_num_vlan(priv->ioaddr);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index 389aad7b5c1e..a2980482fcce 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -4,6 +4,7 @@
* stmmac XGMAC support.
*/
+#include <linux/bitfield.h>
#include <linux/stmmac.h>
#include "common.h"
#include "dwxgmac2.h"
@@ -69,6 +70,21 @@ static int dwxgmac2_get_tx_ls(struct dma_desc *p)
return (le32_to_cpu(p->des3) & XGMAC_RDES3_LD) > 0;
}
+static u16 dwxgmac2_wrback_get_rx_vlan_tci(struct dma_desc *p)
+{
+ return le32_to_cpu(p->des0) & XGMAC_RDES0_VLAN_TAG_MASK;
+}
+
+static bool dwxgmac2_wrback_get_rx_vlan_valid(struct dma_desc *p)
+{
+ u32 et_lt;
+
+ et_lt = FIELD_GET(XGMAC_RDES3_ET_LT, le32_to_cpu(p->des3));
+
+ return et_lt >= XGMAC_ET_LT_VLAN_STAG &&
+ et_lt <= XGMAC_ET_LT_DVLAN_STAG_CTAG;
+}
+
static int dwxgmac2_get_rx_frame_len(struct dma_desc *p, int rx_coe)
{
return (le32_to_cpu(p->des3) & XGMAC_RDES3_PL);
@@ -351,6 +367,8 @@ const struct stmmac_desc_ops dwxgmac210_desc_ops = {
.set_tx_owner = dwxgmac2_set_tx_owner,
.set_rx_owner = dwxgmac2_set_rx_owner,
.get_tx_ls = dwxgmac2_get_tx_ls,
+ .get_rx_vlan_tci = dwxgmac2_wrback_get_rx_vlan_tci,
+ .get_rx_vlan_valid = dwxgmac2_wrback_get_rx_vlan_valid,
.get_rx_frame_len = dwxgmac2_get_rx_frame_len,
.enable_tx_timestamp = dwxgmac2_enable_tx_timestamp,
.get_tx_timestamp_status = dwxgmac2_get_tx_timestamp_status,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 7840bc403788..cc1bdc0975d5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -84,7 +84,6 @@ static void dwxgmac2_dma_init_tx_chan(struct stmmac_priv *priv,
static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
{
u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
- int i;
if (axi->axi_lpi_en)
value |= XGMAC_EN_LPI;
@@ -102,32 +101,12 @@ static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
if (!axi->axi_fb)
value |= XGMAC_UNDEF;
- value &= ~XGMAC_BLEN;
- for (i = 0; i < AXI_BLEN; i++) {
- switch (axi->axi_blen[i]) {
- case 256:
- value |= XGMAC_BLEN256;
- break;
- case 128:
- value |= XGMAC_BLEN128;
- break;
- case 64:
- value |= XGMAC_BLEN64;
- break;
- case 32:
- value |= XGMAC_BLEN32;
- break;
- case 16:
- value |= XGMAC_BLEN16;
- break;
- case 8:
- value |= XGMAC_BLEN8;
- break;
- case 4:
- value |= XGMAC_BLEN4;
- break;
- }
- }
+ /* Depending on the UNDEF bit the Master AXI will perform any burst
+ * length according to the BLEN programmed (by default all BLEN are
+ * set). Note that the UNDEF bit is readonly, and is the inverse of
+ * Bus Mode bit 16.
+ */
+ value = (value & ~DMA_AXI_BLEN_MASK) | axi->axi_blen_regval;
writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
writel(XGMAC_TDPS, ioaddr + XGMAC_TX_EDMA_CTRL);
@@ -203,10 +182,6 @@ static void dwxgmac2_dma_rx_mode(struct stmmac_priv *priv, void __iomem *ioaddr,
}
writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
-
- /* Enable MTL RX overflow */
- value = readl(ioaddr + XGMAC_MTL_QINTEN(channel));
- writel(value | XGMAC_RXOIE, ioaddr + XGMAC_MTL_QINTEN(channel));
}
static void dwxgmac2_dma_tx_mode(struct stmmac_priv *priv, void __iomem *ioaddr,
@@ -364,19 +339,17 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
}
/* TX/RX NORMAL interrupts */
- if (likely(intr_status & XGMAC_NIS)) {
- if (likely(intr_status & XGMAC_RI)) {
- u64_stats_update_begin(&stats->syncp);
- u64_stats_inc(&stats->rx_normal_irq_n[chan]);
- u64_stats_update_end(&stats->syncp);
- ret |= handle_rx;
- }
- if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
- u64_stats_update_begin(&stats->syncp);
- u64_stats_inc(&stats->tx_normal_irq_n[chan]);
- u64_stats_update_end(&stats->syncp);
- ret |= handle_tx;
- }
+ if (likely(intr_status & XGMAC_RI)) {
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
+ ret |= handle_rx;
+ }
+ if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
+ ret |= handle_tx;
}
/* Clear interrupts */
@@ -388,8 +361,11 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
struct dma_features *dma_cap)
{
+ struct stmmac_priv *priv;
u32 hw_cap;
+ priv = container_of(dma_cap, struct stmmac_priv, dma_cap);
+
/* MAC HW feature 0 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0);
dma_cap->edma = (hw_cap & XGMAC_HWFEAT_EDMA) >> 31;
@@ -412,6 +388,8 @@ static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
dma_cap->vlhash = (hw_cap & XGMAC_HWFEAT_VLHASH) >> 4;
dma_cap->half_duplex = (hw_cap & XGMAC_HWFEAT_HDSEL) >> 3;
dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1;
+ if (dma_cap->mbps_1000 && priv->synopsys_id >= DWXGMAC_CORE_2_20)
+ dma_cap->mbps_10_100 = 1;
/* MAC HW feature 1 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index cfc50289aed6..014f7cd79a3c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -9,32 +9,46 @@
#include "stmmac_fpe.h"
#include "stmmac_ptp.h"
#include "stmmac_est.h"
+#include "stmmac_vlan.h"
+#include "dwmac4_descs.h"
+#include "dwxgmac2.h"
-static u32 stmmac_get_id(struct stmmac_priv *priv, u32 id_reg)
+struct stmmac_version {
+ u8 snpsver;
+ u8 dev_id;
+};
+
+static void stmmac_get_version(struct stmmac_priv *priv,
+ struct stmmac_version *ver)
{
- u32 reg = readl(priv->ioaddr + id_reg);
+ enum dwmac_core_type core_type = priv->plat->core_type;
+ unsigned int version_offset;
+ u32 version;
- if (!reg) {
- dev_info(priv->device, "Version ID not available\n");
- return 0x0;
- }
+ ver->snpsver = 0;
+ ver->dev_id = 0;
- dev_info(priv->device, "User ID: 0x%x, Synopsys ID: 0x%x\n",
- (unsigned int)(reg & GENMASK(15, 8)) >> 8,
- (unsigned int)(reg & GENMASK(7, 0)));
- return reg & GENMASK(7, 0);
-}
+ if (core_type == DWMAC_CORE_MAC100)
+ return;
-static u32 stmmac_get_dev_id(struct stmmac_priv *priv, u32 id_reg)
-{
- u32 reg = readl(priv->ioaddr + id_reg);
+ if (core_type == DWMAC_CORE_GMAC)
+ version_offset = GMAC_VERSION;
+ else
+ version_offset = GMAC4_VERSION;
- if (!reg) {
+ version = readl(priv->ioaddr + version_offset);
+ if (version == 0) {
dev_info(priv->device, "Version ID not available\n");
- return 0x0;
+ return;
}
- return (reg & GENMASK(15, 8)) >> 8;
+ dev_info(priv->device, "User ID: 0x%x, Synopsys ID: 0x%x\n",
+ FIELD_GET(DWMAC_USERVER, version),
+ FIELD_GET(DWMAC_SNPSVER, version));
+
+ ver->snpsver = FIELD_GET(DWMAC_SNPSVER, version);
+ if (core_type == DWMAC_CORE_XGMAC)
+ ver->dev_id = FIELD_GET(DWMAC_USERVER, version);
}
static void stmmac_dwmac_mode_quirk(struct stmmac_priv *priv)
@@ -89,23 +103,19 @@ static int stmmac_dwxlgmac_quirks(struct stmmac_priv *priv)
return 0;
}
-int stmmac_reset(struct stmmac_priv *priv, void __iomem *ioaddr)
+int stmmac_reset(struct stmmac_priv *priv)
{
- struct plat_stmmacenet_data *plat = priv ? priv->plat : NULL;
-
- if (!priv)
- return -EINVAL;
+ struct plat_stmmacenet_data *plat = priv->plat;
+ void __iomem *ioaddr = priv->ioaddr;
if (plat && plat->fix_soc_reset)
- return plat->fix_soc_reset(plat, ioaddr);
+ return plat->fix_soc_reset(priv, ioaddr);
return stmmac_do_callback(priv, dma, reset, ioaddr);
}
static const struct stmmac_hwif_entry {
- bool gmac;
- bool gmac4;
- bool xgmac;
+ enum dwmac_core_type core_type;
u32 min_id;
u32 dev_id;
const struct stmmac_regs_off regs;
@@ -113,18 +123,18 @@ static const struct stmmac_hwif_entry {
const void *dma;
const void *mac;
const void *hwtimestamp;
+ const void *ptp;
const void *mode;
const void *tc;
const void *mmc;
const void *est;
+ const void *vlan;
int (*setup)(struct stmmac_priv *priv);
int (*quirks)(struct stmmac_priv *priv);
} stmmac_hw[] = {
/* NOTE: New HW versions shall go to the end of this table */
{
- .gmac = false,
- .gmac4 = false,
- .xgmac = false,
+ .core_type = DWMAC_CORE_MAC100,
.min_id = 0,
.regs = {
.ptp_off = PTP_GMAC3_X_OFFSET,
@@ -133,16 +143,15 @@ static const struct stmmac_hwif_entry {
.desc = NULL,
.dma = &dwmac100_dma_ops,
.mac = &dwmac100_ops,
- .hwtimestamp = &stmmac_ptp,
+ .hwtimestamp = &dwmac1000_ptp,
+ .ptp = &dwmac1000_ptp_clock_ops,
.mode = NULL,
.tc = NULL,
.mmc = &dwmac_mmc_ops,
.setup = dwmac100_setup,
.quirks = stmmac_dwmac1_quirks,
}, {
- .gmac = true,
- .gmac4 = false,
- .xgmac = false,
+ .core_type = DWMAC_CORE_GMAC,
.min_id = 0,
.regs = {
.ptp_off = PTP_GMAC3_X_OFFSET,
@@ -151,16 +160,15 @@ static const struct stmmac_hwif_entry {
.desc = NULL,
.dma = &dwmac1000_dma_ops,
.mac = &dwmac1000_ops,
- .hwtimestamp = &stmmac_ptp,
+ .hwtimestamp = &dwmac1000_ptp,
+ .ptp = &dwmac1000_ptp_clock_ops,
.mode = NULL,
.tc = NULL,
.mmc = &dwmac_mmc_ops,
.setup = dwmac1000_setup,
.quirks = stmmac_dwmac1_quirks,
}, {
- .gmac = false,
- .gmac4 = true,
- .xgmac = false,
+ .core_type = DWMAC_CORE_GMAC4,
.min_id = 0,
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
@@ -170,7 +178,9 @@ static const struct stmmac_hwif_entry {
.desc = &dwmac4_desc_ops,
.dma = &dwmac4_dma_ops,
.mac = &dwmac4_ops,
+ .vlan = &dwmac_vlan_ops,
.hwtimestamp = &stmmac_ptp,
+ .ptp = &stmmac_ptp_clock_ops,
.mode = NULL,
.tc = &dwmac4_tc_ops,
.mmc = &dwmac_mmc_ops,
@@ -178,9 +188,7 @@ static const struct stmmac_hwif_entry {
.setup = dwmac4_setup,
.quirks = stmmac_dwmac4_quirks,
}, {
- .gmac = false,
- .gmac4 = true,
- .xgmac = false,
+ .core_type = DWMAC_CORE_GMAC4,
.min_id = DWMAC_CORE_4_00,
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
@@ -191,7 +199,9 @@ static const struct stmmac_hwif_entry {
.desc = &dwmac4_desc_ops,
.dma = &dwmac4_dma_ops,
.mac = &dwmac410_ops,
+ .vlan = &dwmac_vlan_ops,
.hwtimestamp = &stmmac_ptp,
+ .ptp = &stmmac_ptp_clock_ops,
.mode = &dwmac4_ring_mode_ops,
.tc = &dwmac510_tc_ops,
.mmc = &dwmac_mmc_ops,
@@ -199,9 +209,7 @@ static const struct stmmac_hwif_entry {
.setup = dwmac4_setup,
.quirks = NULL,
}, {
- .gmac = false,
- .gmac4 = true,
- .xgmac = false,
+ .core_type = DWMAC_CORE_GMAC4,
.min_id = DWMAC_CORE_4_10,
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
@@ -212,7 +220,9 @@ static const struct stmmac_hwif_entry {
.desc = &dwmac4_desc_ops,
.dma = &dwmac410_dma_ops,
.mac = &dwmac410_ops,
+ .vlan = &dwmac_vlan_ops,
.hwtimestamp = &stmmac_ptp,
+ .ptp = &stmmac_ptp_clock_ops,
.mode = &dwmac4_ring_mode_ops,
.tc = &dwmac510_tc_ops,
.mmc = &dwmac_mmc_ops,
@@ -220,9 +230,7 @@ static const struct stmmac_hwif_entry {
.setup = dwmac4_setup,
.quirks = NULL,
}, {
- .gmac = false,
- .gmac4 = true,
- .xgmac = false,
+ .core_type = DWMAC_CORE_GMAC4,
.min_id = DWMAC_CORE_5_10,
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
@@ -233,7 +241,9 @@ static const struct stmmac_hwif_entry {
.desc = &dwmac4_desc_ops,
.dma = &dwmac410_dma_ops,
.mac = &dwmac510_ops,
+ .vlan = &dwmac_vlan_ops,
.hwtimestamp = &stmmac_ptp,
+ .ptp = &stmmac_ptp_clock_ops,
.mode = &dwmac4_ring_mode_ops,
.tc = &dwmac510_tc_ops,
.mmc = &dwmac_mmc_ops,
@@ -241,9 +251,7 @@ static const struct stmmac_hwif_entry {
.setup = dwmac4_setup,
.quirks = NULL,
}, {
- .gmac = false,
- .gmac4 = false,
- .xgmac = true,
+ .core_type = DWMAC_CORE_XGMAC,
.min_id = DWXGMAC_CORE_2_10,
.dev_id = DWXGMAC_ID,
.regs = {
@@ -255,17 +263,17 @@ static const struct stmmac_hwif_entry {
.desc = &dwxgmac210_desc_ops,
.dma = &dwxgmac210_dma_ops,
.mac = &dwxgmac210_ops,
+ .vlan = &dwxgmac210_vlan_ops,
.hwtimestamp = &stmmac_ptp,
+ .ptp = &stmmac_ptp_clock_ops,
.mode = NULL,
- .tc = &dwxgmac_tc_ops,
+ .tc = &dwmac510_tc_ops,
.mmc = &dwxgmac_mmc_ops,
.est = &dwmac510_est_ops,
.setup = dwxgmac2_setup,
.quirks = NULL,
}, {
- .gmac = false,
- .gmac4 = false,
- .xgmac = true,
+ .core_type = DWMAC_CORE_XGMAC,
.min_id = DWXLGMAC_CORE_2_00,
.dev_id = DWXLGMAC_ID,
.regs = {
@@ -277,9 +285,11 @@ static const struct stmmac_hwif_entry {
.desc = &dwxgmac210_desc_ops,
.dma = &dwxgmac210_dma_ops,
.mac = &dwxlgmac2_ops,
+ .vlan = &dwxlgmac2_vlan_ops,
.hwtimestamp = &stmmac_ptp,
+ .ptp = &stmmac_ptp_clock_ops,
.mode = NULL,
- .tc = &dwxgmac_tc_ops,
+ .tc = &dwmac510_tc_ops,
.mmc = &dwxgmac_mmc_ops,
.est = &dwmac510_est_ops,
.setup = dwxlgmac2_setup,
@@ -287,97 +297,114 @@ static const struct stmmac_hwif_entry {
},
};
+static const struct stmmac_hwif_entry *
+stmmac_hwif_find(enum dwmac_core_type core_type, u8 snpsver, u8 dev_id)
+{
+ const struct stmmac_hwif_entry *entry;
+ int i;
+
+ for (i = ARRAY_SIZE(stmmac_hw) - 1; i >= 0; i--) {
+ entry = &stmmac_hw[i];
+
+ if (core_type != entry->core_type)
+ continue;
+ /* Use synopsys_id var because some setups can override this */
+ if (snpsver < entry->min_id)
+ continue;
+ if (core_type == DWMAC_CORE_XGMAC &&
+ dev_id != entry->dev_id)
+ continue;
+
+ return entry;
+ }
+
+ return NULL;
+}
+
int stmmac_hwif_init(struct stmmac_priv *priv)
{
- bool needs_xgmac = priv->plat->has_xgmac;
- bool needs_gmac4 = priv->plat->has_gmac4;
- bool needs_gmac = priv->plat->has_gmac;
+ enum dwmac_core_type core_type = priv->plat->core_type;
const struct stmmac_hwif_entry *entry;
+ struct stmmac_version version;
struct mac_device_info *mac;
bool needs_setup = true;
- u32 id, dev_id = 0;
- int i, ret;
-
- if (needs_gmac) {
- id = stmmac_get_id(priv, GMAC_VERSION);
- } else if (needs_gmac4 || needs_xgmac) {
- id = stmmac_get_id(priv, GMAC4_VERSION);
- if (needs_xgmac)
- dev_id = stmmac_get_dev_id(priv, GMAC4_VERSION);
- } else {
- id = 0;
- }
+ int ret;
+
+ stmmac_get_version(priv, &version);
/* Save ID for later use */
- priv->synopsys_id = id;
+ priv->synopsys_id = version.snpsver;
/* Lets assume some safe values first */
- priv->ptpaddr = priv->ioaddr +
- (needs_gmac4 ? PTP_GMAC4_OFFSET : PTP_GMAC3_X_OFFSET);
- priv->mmcaddr = priv->ioaddr +
- (needs_gmac4 ? MMC_GMAC4_OFFSET : MMC_GMAC3_X_OFFSET);
- if (needs_gmac4)
+ if (core_type == DWMAC_CORE_GMAC4) {
+ priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
+ priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
priv->estaddr = priv->ioaddr + EST_GMAC4_OFFSET;
- else if (needs_xgmac)
- priv->estaddr = priv->ioaddr + EST_XGMAC_OFFSET;
-
- /* Check for HW specific setup first */
- if (priv->plat->setup) {
- mac = priv->plat->setup(priv);
- needs_setup = false;
} else {
- mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
+ priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
+ priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
+ if (core_type == DWMAC_CORE_XGMAC)
+ priv->estaddr = priv->ioaddr + EST_XGMAC_OFFSET;
}
+ mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
if (!mac)
return -ENOMEM;
+ /* Check for HW specific setup first */
+ if (priv->plat->mac_setup) {
+ ret = priv->plat->mac_setup(priv, mac);
+ if (ret)
+ return ret;
+
+ needs_setup = false;
+ }
+
+ spin_lock_init(&mac->irq_ctrl_lock);
+
/* Fallback to generic HW */
- for (i = ARRAY_SIZE(stmmac_hw) - 1; i >= 0; i--) {
- entry = &stmmac_hw[i];
- if (needs_gmac ^ entry->gmac)
- continue;
- if (needs_gmac4 ^ entry->gmac4)
- continue;
- if (needs_xgmac ^ entry->xgmac)
- continue;
- /* Use synopsys_id var because some setups can override this */
- if (priv->synopsys_id < entry->min_id)
- continue;
- if (needs_xgmac && (dev_id ^ entry->dev_id))
- continue;
+ /* Use synopsys_id var because some setups can override this */
+ entry = stmmac_hwif_find(core_type, priv->synopsys_id, version.dev_id);
+ if (!entry) {
+ dev_err(priv->device,
+ "Failed to find HW IF (id=0x%x, gmac=%d/%d)\n",
+ version.snpsver, core_type == DWMAC_CORE_GMAC,
+ core_type == DWMAC_CORE_GMAC4);
- /* Only use generic HW helpers if needed */
- mac->desc = mac->desc ? : entry->desc;
- mac->dma = mac->dma ? : entry->dma;
- mac->mac = mac->mac ? : entry->mac;
- mac->ptp = mac->ptp ? : entry->hwtimestamp;
- mac->mode = mac->mode ? : entry->mode;
- mac->tc = mac->tc ? : entry->tc;
- mac->mmc = mac->mmc ? : entry->mmc;
- mac->est = mac->est ? : entry->est;
-
- priv->hw = mac;
- priv->fpe_cfg.reg = entry->regs.fpe_reg;
- priv->ptpaddr = priv->ioaddr + entry->regs.ptp_off;
- priv->mmcaddr = priv->ioaddr + entry->regs.mmc_off;
- if (entry->est)
- priv->estaddr = priv->ioaddr + entry->regs.est_off;
-
- /* Entry found */
- if (needs_setup) {
- ret = entry->setup(priv);
- if (ret)
- return ret;
- }
+ return -EINVAL;
+ }
- /* Save quirks, if needed for posterior use */
- priv->hwif_quirks = entry->quirks;
- return 0;
+ /* Only use generic HW helpers if needed */
+ mac->desc = mac->desc ? : entry->desc;
+ mac->dma = mac->dma ? : entry->dma;
+ mac->mac = mac->mac ? : entry->mac;
+ mac->ptp = mac->ptp ? : entry->hwtimestamp;
+ mac->mode = mac->mode ? : entry->mode;
+ mac->tc = mac->tc ? : entry->tc;
+ mac->mmc = mac->mmc ? : entry->mmc;
+ mac->est = mac->est ? : entry->est;
+ mac->vlan = mac->vlan ? : entry->vlan;
+
+ priv->hw = mac;
+ priv->fpe_cfg.reg = entry->regs.fpe_reg;
+ priv->ptpaddr = priv->ioaddr + entry->regs.ptp_off;
+ priv->mmcaddr = priv->ioaddr + entry->regs.mmc_off;
+ memcpy(&priv->ptp_clock_ops, entry->ptp,
+ sizeof(struct ptp_clock_info));
+
+ if (entry->est)
+ priv->estaddr = priv->ioaddr + entry->regs.est_off;
+
+ /* Entry found */
+ if (needs_setup) {
+ ret = entry->setup(priv);
+ if (ret)
+ return ret;
}
- dev_err(priv->device, "Failed to find HW IF (id=0x%x, gmac=%d/%d)\n",
- id, needs_gmac, needs_gmac4);
- return -EINVAL;
+ /* Save quirks, if needed for posterior use */
+ priv->hwif_quirks = entry->quirks;
+
+ return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 64f8ed67dcc4..df6e8a567b1f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -201,6 +201,7 @@ struct stmmac_dma_ops {
void (*dma_diagnostic_fr)(struct stmmac_extra_stats *x,
void __iomem *ioaddr);
void (*enable_dma_transmission)(void __iomem *ioaddr, u32 chan);
+ void (*enable_dma_reception)(void __iomem *ioaddr, u32 chan);
void (*enable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx);
void (*disable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr,
@@ -261,6 +262,8 @@ struct stmmac_dma_ops {
stmmac_do_void_callback(__priv, dma, dma_diagnostic_fr, __args)
#define stmmac_enable_dma_transmission(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, enable_dma_transmission, __args)
+#define stmmac_enable_dma_reception(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, enable_dma_reception, __args)
#define stmmac_enable_dma_irq(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, enable_dma_irq, __priv, __args)
#define stmmac_disable_dma_irq(__priv, __args...) \
@@ -300,18 +303,27 @@ struct stmmac_dma_ops {
struct mac_device_info;
struct net_device;
-struct rgmii_adv;
struct stmmac_tc_entry;
struct stmmac_pps_cfg;
struct stmmac_rss;
struct stmmac_est;
+enum stmmac_lpi_mode {
+ STMMAC_LPI_DISABLE,
+ STMMAC_LPI_FORCED,
+ STMMAC_LPI_TIMER,
+};
+
/* Helpers to program the MAC core */
struct stmmac_ops {
+ /* Initialise any PCS instances */
+ int (*pcs_init)(struct stmmac_priv *priv);
/* MAC core initialization */
void (*core_init)(struct mac_device_info *hw, struct net_device *dev);
/* Update MAC capabilities */
void (*update_caps)(struct stmmac_priv *priv);
+ /* Change the interrupt enable setting. Enable takes precedence. */
+ void (*irq_modify)(struct mac_device_info *hw, u32 disable, u32 enable);
/* Enable the MAC RX/TX */
void (*set_mac)(void __iomem *ioaddr, bool enable);
/* Enable and verify that the IPC module is supported */
@@ -360,19 +372,17 @@ struct stmmac_ops {
unsigned int reg_n);
void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr,
unsigned int reg_n);
- void (*set_eee_mode)(struct mac_device_info *hw,
- bool en_tx_lpi_clockgating);
- void (*reset_eee_mode)(struct mac_device_info *hw);
- void (*set_eee_lpi_entry_timer)(struct mac_device_info *hw, int et);
+ int (*set_lpi_mode)(struct mac_device_info *hw,
+ enum stmmac_lpi_mode mode,
+ bool en_tx_lpi_clockgating, u32 et);
void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
void (*set_eee_pls)(struct mac_device_info *hw, int link);
void (*debug)(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 rx_queues,
u32 tx_queues);
/* PCS calls */
- void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral,
- bool loopback);
- void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv);
+ void (*pcs_ctrl_ane)(struct stmmac_priv *priv, bool ane,
+ bool srgmi_ral);
/* Safety Features */
int (*safety_feat_config)(void __iomem *ioaddr, unsigned int asp,
struct stmmac_safety_feature_cfg *safety_cfg);
@@ -393,21 +403,6 @@ struct stmmac_ops {
/* RSS */
int (*rss_configure)(struct mac_device_info *hw,
struct stmmac_rss *cfg, u32 num_rxq);
- /* VLAN */
- void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
- u16 perfect_match, bool is_double);
- void (*enable_vlan)(struct mac_device_info *hw, u32 type);
- void (*rx_hw_vlan)(struct mac_device_info *hw, struct dma_desc *rx_desc,
- struct sk_buff *skb);
- void (*set_hw_vlan_mode)(struct mac_device_info *hw);
- int (*add_hw_vlan_rx_fltr)(struct net_device *dev,
- struct mac_device_info *hw,
- __be16 proto, u16 vid);
- int (*del_hw_vlan_rx_fltr)(struct net_device *dev,
- struct mac_device_info *hw,
- __be16 proto, u16 vid);
- void (*restore_hw_vlan_rx_fltr)(struct net_device *dev,
- struct mac_device_info *hw);
/* TX Timestamp */
int (*get_mac_tx_timestamp)(struct mac_device_info *hw, u64 *ts);
/* Source Address Insertion / Replacement */
@@ -425,10 +420,14 @@ struct stmmac_ops {
u32 pclass);
};
+#define stmmac_mac_pcs_init(__priv) \
+ stmmac_do_callback(__priv, mac, pcs_init, __priv)
#define stmmac_core_init(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, core_init, __args)
#define stmmac_mac_update_caps(__priv) \
stmmac_do_void_callback(__priv, mac, update_caps, __priv)
+#define stmmac_mac_irq_modify(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, irq_modify, (__priv)->hw, __args)
#define stmmac_mac_set(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_mac, __args)
#define stmmac_rx_ipc(__priv, __args...) \
@@ -467,12 +466,8 @@ struct stmmac_ops {
stmmac_do_void_callback(__priv, mac, set_umac_addr, __args)
#define stmmac_get_umac_addr(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, get_umac_addr, __args)
-#define stmmac_set_eee_mode(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, set_eee_mode, __args)
-#define stmmac_reset_eee_mode(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, reset_eee_mode, __args)
-#define stmmac_set_eee_lpi_timer(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, set_eee_lpi_entry_timer, __args)
+#define stmmac_set_lpi_mode(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, set_lpi_mode, __args)
#define stmmac_set_eee_timer(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_eee_timer, __args)
#define stmmac_set_eee_pls(__priv, __args...) \
@@ -480,9 +475,7 @@ struct stmmac_ops {
#define stmmac_mac_debug(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, debug, __priv, __args)
#define stmmac_pcs_ctrl_ane(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, pcs_ctrl_ane, __args)
-#define stmmac_pcs_get_adv_lp(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, pcs_get_adv_lp, __args)
+ stmmac_do_void_callback(__priv, mac, pcs_ctrl_ane, __priv, __args)
#define stmmac_safety_feat_config(__priv, __args...) \
stmmac_do_callback(__priv, mac, safety_feat_config, __args)
#define stmmac_safety_feat_irq_status(__priv, __args...) \
@@ -497,20 +490,6 @@ struct stmmac_ops {
stmmac_do_void_callback(__priv, mac, set_mac_loopback, __args)
#define stmmac_rss_configure(__priv, __args...) \
stmmac_do_callback(__priv, mac, rss_configure, __args)
-#define stmmac_update_vlan_hash(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, update_vlan_hash, __args)
-#define stmmac_enable_vlan(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, enable_vlan, __args)
-#define stmmac_rx_hw_vlan(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, rx_hw_vlan, __args)
-#define stmmac_set_hw_vlan_mode(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, set_hw_vlan_mode, __args)
-#define stmmac_add_hw_vlan_rx_fltr(__priv, __args...) \
- stmmac_do_callback(__priv, mac, add_hw_vlan_rx_fltr, __args)
-#define stmmac_del_hw_vlan_rx_fltr(__priv, __args...) \
- stmmac_do_callback(__priv, mac, del_hw_vlan_rx_fltr, __args)
-#define stmmac_restore_hw_vlan_rx_fltr(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, restore_hw_vlan_rx_fltr, __args)
#define stmmac_get_mac_tx_timestamp(__priv, __args...) \
stmmac_do_callback(__priv, mac, get_mac_tx_timestamp, __args)
#define stmmac_sarc_configure(__priv, __args...) \
@@ -565,7 +544,7 @@ struct stmmac_rx_queue;
struct stmmac_mode_ops {
void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
unsigned int extend_desc);
- unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
+ bool (*is_jumbo_frm)(unsigned int len, bool enh_desc);
int (*jumbo_frm)(struct stmmac_tx_queue *tx_q, struct sk_buff *skb,
int csum);
int (*set_16kib_bfsize)(int mtu);
@@ -658,6 +637,39 @@ struct stmmac_est_ops {
#define stmmac_est_irq_status(__priv, __args...) \
stmmac_do_void_callback(__priv, est, irq_status, __args)
+struct stmmac_vlan_ops {
+ /* VLAN */
+ void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
+ u16 perfect_match, bool is_double);
+ void (*enable_vlan)(struct mac_device_info *hw, u32 type);
+ void (*rx_hw_vlan)(struct mac_device_info *hw, struct dma_desc *rx_desc,
+ struct sk_buff *skb);
+ void (*set_hw_vlan_mode)(struct mac_device_info *hw);
+ int (*add_hw_vlan_rx_fltr)(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid);
+ int (*del_hw_vlan_rx_fltr)(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid);
+ void (*restore_hw_vlan_rx_fltr)(struct net_device *dev,
+ struct mac_device_info *hw);
+};
+
+#define stmmac_update_vlan_hash(__priv, __args...) \
+ stmmac_do_void_callback(__priv, vlan, update_vlan_hash, __args)
+#define stmmac_enable_vlan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, vlan, enable_vlan, __args)
+#define stmmac_rx_hw_vlan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, vlan, rx_hw_vlan, __args)
+#define stmmac_set_hw_vlan_mode(__priv, __args...) \
+ stmmac_do_void_callback(__priv, vlan, set_hw_vlan_mode, __args)
+#define stmmac_add_hw_vlan_rx_fltr(__priv, __args...) \
+ stmmac_do_callback(__priv, vlan, add_hw_vlan_rx_fltr, __args)
+#define stmmac_del_hw_vlan_rx_fltr(__priv, __args...) \
+ stmmac_do_callback(__priv, vlan, del_hw_vlan_rx_fltr, __args)
+#define stmmac_restore_hw_vlan_rx_fltr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, vlan, restore_hw_vlan_rx_fltr, __args)
+
struct stmmac_regs_off {
const struct stmmac_fpe_reg *fpe_reg;
u32 ptp_off;
@@ -665,6 +677,15 @@ struct stmmac_regs_off {
u32 est_off;
};
+extern const struct stmmac_desc_ops enh_desc_ops;
+extern const struct stmmac_desc_ops ndesc_ops;
+
+extern const struct stmmac_hwtimestamp stmmac_ptp;
+extern const struct stmmac_hwtimestamp dwmac1000_ptp;
+
+extern const struct stmmac_mode_ops ring_mode_ops;
+extern const struct stmmac_mode_ops chain_mode_ops;
+
extern const struct stmmac_ops dwmac100_ops;
extern const struct stmmac_dma_ops dwmac100_dma_ops;
extern const struct stmmac_ops dwmac1000_ops;
@@ -676,19 +697,11 @@ extern const struct stmmac_dma_ops dwmac410_dma_ops;
extern const struct stmmac_ops dwmac510_ops;
extern const struct stmmac_tc_ops dwmac4_tc_ops;
extern const struct stmmac_tc_ops dwmac510_tc_ops;
-extern const struct stmmac_tc_ops dwxgmac_tc_ops;
-extern const struct stmmac_ops dwxgmac210_ops;
-extern const struct stmmac_ops dwxlgmac2_ops;
-extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
-extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
-extern const struct stmmac_mmc_ops dwmac_mmc_ops;
-extern const struct stmmac_mmc_ops dwxgmac_mmc_ops;
-extern const struct stmmac_est_ops dwmac510_est_ops;
#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
#define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */
-int stmmac_reset(struct stmmac_priv *priv, void __iomem *ioaddr);
+int stmmac_reset(struct stmmac_priv *priv);
int stmmac_hwif_init(struct stmmac_priv *priv);
#endif /* __STMMAC_HWIF_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 5d1ea3e07459..1cba39fb2c44 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -139,4 +139,7 @@ struct stmmac_counters {
unsigned int mmc_rx_fpe_fragment_cntr;
};
+extern const struct stmmac_mmc_ops dwmac_mmc_ops;
+extern const struct stmmac_mmc_ops dwxgmac_mmc_ops;
+
#endif /* __MMC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index d218412ca832..382d94a3b972 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -91,14 +91,9 @@ static int jumbo_frm(struct stmmac_tx_queue *tx_q, struct sk_buff *skb,
return entry;
}
-static unsigned int is_jumbo_frm(int len, int enh_desc)
+static bool is_jumbo_frm(unsigned int len, bool enh_desc)
{
- unsigned int ret = 0;
-
- if (len >= BUF_SIZE_4KiB)
- ret = 1;
-
- return ret;
+ return len >= BUF_SIZE_4KiB;
}
static void refill_desc3(struct stmmac_rx_queue *rx_q, struct dma_desc *p)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 1d86439b8a14..012b0a477255 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -25,6 +25,8 @@
#include <net/xdp.h>
#include <uapi/linux/bpf.h>
+struct stmmac_pcs;
+
struct stmmac_resources {
void __iomem *addr;
u8 mac[ETH_ALEN];
@@ -106,6 +108,8 @@ struct stmmac_metadata_request {
struct stmmac_priv *priv;
struct dma_desc *tx_desc;
bool *set_ic;
+ struct dma_edesc *edesc;
+ int tbs;
};
struct stmmac_xsk_tx_complete {
@@ -126,7 +130,7 @@ struct stmmac_rx_queue {
unsigned int cur_rx;
unsigned int dirty_rx;
unsigned int buf_alloc_num;
- u32 rx_zeroc_thresh;
+ unsigned int napi_skb_frag_size;
dma_addr_t dma_rx_phy;
u32 rx_tail_addr;
unsigned int state_saved;
@@ -147,21 +151,9 @@ struct stmmac_channel {
};
struct stmmac_fpe_cfg {
- /* Serialize access to MAC Merge state between ethtool requests
- * and link state updates.
- */
- spinlock_t lock;
-
+ struct ethtool_mmsv mmsv;
const struct stmmac_fpe_reg *reg;
- u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */
-
- enum ethtool_mm_verify_status status;
- struct timer_list verify_timer;
- bool verify_enabled;
- int verify_retries;
- bool pmac_enabled;
- u32 verify_time;
- bool tx_enabled;
+ u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */
};
struct stmmac_tc_entry {
@@ -257,18 +249,17 @@ struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
u32 tx_coal_frames[MTL_MAX_TX_QUEUES];
u32 tx_coal_timer[MTL_MAX_TX_QUEUES];
- u32 rx_coal_frames[MTL_MAX_TX_QUEUES];
+ u32 rx_coal_frames[MTL_MAX_RX_QUEUES];
int hwts_tx_en;
bool tx_path_in_lpi_mode;
bool tso;
- int sph;
- int sph_cap;
+ bool sph_active;
+ bool sph_capable;
u32 sarc_type;
-
- unsigned int rx_copybreak;
- u32 rx_riwt[MTL_MAX_TX_QUEUES];
+ u32 rx_riwt[MTL_MAX_RX_QUEUES];
int hwts_rx_en;
+ bool tsfupdt_coarse;
void __iomem *ioaddr;
struct net_device *dev;
@@ -282,11 +273,11 @@ struct stmmac_priv {
/* Generic channel for NAPI */
struct stmmac_channel channel[STMMAC_CH_MAX];
- int speed;
- unsigned int flow_ctrl;
- unsigned int pause;
+ unsigned int pause_time;
struct mii_bus *mii;
+ struct stmmac_pcs *integrated_pcs;
+
struct phylink_config phylink_config;
struct phylink *phylink;
@@ -301,22 +292,21 @@ struct stmmac_priv {
int hw_cap_support;
int synopsys_id;
u32 msg_enable;
+ /* Our MAC Wake-on-Lan options */
int wolopts;
int wol_irq;
- bool wol_irq_disabled;
- int clk_csr;
+ u32 gmii_address_bus_config;
struct timer_list eee_ctrl_timer;
int lpi_irq;
- int eee_enabled;
- int eee_active;
- int tx_lpi_timer;
- int tx_lpi_enabled;
- int eee_tw_timer;
+ u32 tx_lpi_timer;
+ bool tx_lpi_clk_stop;
+ bool eee_enabled;
+ bool eee_active;
bool eee_sw_timer_en;
unsigned int mode;
unsigned int chain_mode;
int extend_desc;
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_ops;
unsigned int default_addend;
@@ -346,7 +336,7 @@ struct stmmac_priv {
char int_name_sfty[IFNAMSIZ + 10];
char int_name_sfty_ce[IFNAMSIZ + 10];
char int_name_sfty_ue[IFNAMSIZ + 10];
- char int_name_rx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 14];
+ char int_name_rx_irq[MTL_MAX_RX_QUEUES][IFNAMSIZ + 14];
char int_name_tx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 18];
#ifdef CONFIG_DEBUG_FS
@@ -380,6 +370,8 @@ struct stmmac_priv {
/* XDP BPF Program */
unsigned long *af_xdp_zc_qps;
struct bpf_prog *xdp_prog;
+
+ struct devlink *devlink;
};
enum stmmac_state {
@@ -389,43 +381,40 @@ enum stmmac_state {
STMMAC_SERVICE_SCHED,
};
+extern const struct dev_pm_ops stmmac_simple_pm_ops;
+
int stmmac_mdio_unregister(struct net_device *ndev);
int stmmac_mdio_register(struct net_device *ndev);
int stmmac_mdio_reset(struct mii_bus *mii);
+void stmmac_mdio_lock(struct stmmac_priv *priv);
+void stmmac_mdio_unlock(struct stmmac_priv *priv);
int stmmac_pcs_setup(struct net_device *ndev);
void stmmac_pcs_clean(struct net_device *ndev);
void stmmac_set_ethtool_ops(struct net_device *netdev);
-int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags);
void stmmac_ptp_register(struct stmmac_priv *priv);
void stmmac_ptp_unregister(struct stmmac_priv *priv);
int stmmac_xdp_open(struct net_device *dev);
void stmmac_xdp_release(struct net_device *dev);
+int stmmac_get_phy_intf_sel(phy_interface_t interface);
int stmmac_resume(struct device *dev);
int stmmac_suspend(struct device *dev);
void stmmac_dvr_remove(struct device *dev);
int stmmac_dvr_probe(struct device *device,
struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res);
-void stmmac_disable_eee_mode(struct stmmac_priv *priv);
-bool stmmac_eee_init(struct stmmac_priv *priv);
int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt);
int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size);
-int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled);
+int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed);
+
+struct plat_stmmacenet_data *stmmac_plat_dat_alloc(struct device *dev);
static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv)
{
return !!priv->xdp_prog;
}
-static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
-{
- if (stmmac_xdp_is_enabled(priv))
- return XDP_PACKET_HEADROOM;
-
- return 0;
-}
-
void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
index c9693f77e1f6..afc516059b89 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
@@ -32,6 +32,11 @@ static int est_configure(struct stmmac_priv *priv, struct stmmac_est *cfg,
int i, ret = 0;
u32 ctrl;
+ if (!ptp_rate) {
+ netdev_warn(priv->dev, "Invalid PTP rate");
+ return -EINVAL;
+ }
+
ret |= est_write(est_addr, EST_BTR_LOW, cfg->btr[0], false);
ret |= est_write(est_addr, EST_BTR_HIGH, cfg->btr[1], false);
ret |= est_write(est_addr, EST_TER, cfg->ter, false);
@@ -48,7 +53,7 @@ static int est_configure(struct stmmac_priv *priv, struct stmmac_est *cfg,
}
ctrl = readl(est_addr + EST_CONTROL);
- if (priv->plat->has_xgmac) {
+ if (priv->plat->core_type == DWMAC_CORE_XGMAC) {
ctrl &= ~EST_XGMAC_PTOV;
ctrl |= ((NSEC_PER_SEC / ptp_rate) * EST_XGMAC_PTOV_MUL) <<
EST_XGMAC_PTOV_SHIFT;
@@ -58,7 +63,7 @@ static int est_configure(struct stmmac_priv *priv, struct stmmac_est *cfg,
EST_GMAC5_PTOV_SHIFT;
}
if (cfg->enable)
- ctrl |= EST_EEST | EST_SSWL;
+ ctrl |= EST_EEST | EST_SSWL | EST_DFBS;
else
ctrl &= ~EST_EEST;
@@ -104,6 +109,10 @@ static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev,
x->mtl_est_hlbs++;
+ for (i = 0; i < txqcnt; i++)
+ if (value & BIT(i))
+ x->mtl_est_txq_hlbs[i]++;
+
/* Clear Interrupt */
writel(value, est_addr + EST_SCH_ERR);
@@ -126,10 +135,9 @@ static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev,
x->mtl_est_hlbf++;
- for (i = 0; i < txqcnt; i++) {
+ for (i = 0; i < txqcnt; i++)
if (feqn & BIT(i))
x->mtl_est_txq_hlbf[i]++;
- }
/* Clear Interrupt */
writel(feqn, est_addr + EST_FRM_SZ_ERR);
@@ -140,7 +148,7 @@ static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev,
}
if (status & EST_BTRE) {
- if (priv->plat->has_xgmac) {
+ if (priv->plat->core_type == DWMAC_CORE_XGMAC) {
btrl = FIELD_GET(EST_XGMAC_BTRL, status);
btrl_max = FIELD_MAX(EST_XGMAC_BTRL);
} else {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
index 7a858c566e7e..f70221c9c84a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
@@ -16,6 +16,7 @@
#define EST_XGMAC_PTOV_MUL 9
#define EST_SSWL BIT(1)
#define EST_EEST BIT(0)
+#define EST_DFBS BIT(5)
#define EST_STATUS 0x00000008
#define EST_GMAC5_BTRL GENMASK(11, 8)
@@ -62,3 +63,5 @@
#define EST_SRWO BIT(0)
#define EST_GCL_DATA 0x00000034
+
+extern const struct stmmac_est_ops dwmac510_est_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 1d77389ce953..b155e71aac51 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -37,7 +37,7 @@
#define ETHTOOL_DMA_OFFSET 55
struct stmmac_stats {
- char stat_string[ETH_GSTRING_LEN];
+ char stat_string[ETH_GSTRING_LEN] __nonstring;
int sizeof_stat;
int stat_offset;
};
@@ -303,9 +303,10 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
{
struct stmmac_priv *priv = netdev_priv(dev);
- if (priv->plat->has_gmac || priv->plat->has_gmac4)
+ if (priv->plat->core_type == DWMAC_CORE_GMAC ||
+ priv->plat->core_type == DWMAC_CORE_GMAC4)
strscpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
- else if (priv->plat->has_xgmac)
+ else if (priv->plat->core_type == DWMAC_CORE_XGMAC)
strscpy(info->driver, XGMAC_ETHTOOL_NAME, sizeof(info->driver));
else
strscpy(info->driver, MAC100_ETHTOOL_NAME,
@@ -322,84 +323,6 @@ static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
{
struct stmmac_priv *priv = netdev_priv(dev);
- if (!(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS) &&
- (priv->hw->pcs & STMMAC_PCS_RGMII ||
- priv->hw->pcs & STMMAC_PCS_SGMII)) {
- struct rgmii_adv adv;
- u32 supported, advertising, lp_advertising;
-
- if (!priv->xstats.pcs_link) {
- cmd->base.speed = SPEED_UNKNOWN;
- cmd->base.duplex = DUPLEX_UNKNOWN;
- return 0;
- }
- cmd->base.duplex = priv->xstats.pcs_duplex;
-
- cmd->base.speed = priv->xstats.pcs_speed;
-
- /* Get and convert ADV/LP_ADV from the HW AN registers */
- if (stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv))
- return -EOPNOTSUPP; /* should never happen indeed */
-
- /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
-
- ethtool_convert_link_mode_to_legacy_u32(
- &supported, cmd->link_modes.supported);
- ethtool_convert_link_mode_to_legacy_u32(
- &advertising, cmd->link_modes.advertising);
- ethtool_convert_link_mode_to_legacy_u32(
- &lp_advertising, cmd->link_modes.lp_advertising);
-
- if (adv.pause & STMMAC_PCS_PAUSE)
- advertising |= ADVERTISED_Pause;
- if (adv.pause & STMMAC_PCS_ASYM_PAUSE)
- advertising |= ADVERTISED_Asym_Pause;
- if (adv.lp_pause & STMMAC_PCS_PAUSE)
- lp_advertising |= ADVERTISED_Pause;
- if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE)
- lp_advertising |= ADVERTISED_Asym_Pause;
-
- /* Reg49[3] always set because ANE is always supported */
- cmd->base.autoneg = ADVERTISED_Autoneg;
- supported |= SUPPORTED_Autoneg;
- advertising |= ADVERTISED_Autoneg;
- lp_advertising |= ADVERTISED_Autoneg;
-
- if (adv.duplex) {
- supported |= (SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full |
- SUPPORTED_10baseT_Full);
- advertising |= (ADVERTISED_1000baseT_Full |
- ADVERTISED_100baseT_Full |
- ADVERTISED_10baseT_Full);
- } else {
- supported |= (SUPPORTED_1000baseT_Half |
- SUPPORTED_100baseT_Half |
- SUPPORTED_10baseT_Half);
- advertising |= (ADVERTISED_1000baseT_Half |
- ADVERTISED_100baseT_Half |
- ADVERTISED_10baseT_Half);
- }
- if (adv.lp_duplex)
- lp_advertising |= (ADVERTISED_1000baseT_Full |
- ADVERTISED_100baseT_Full |
- ADVERTISED_10baseT_Full);
- else
- lp_advertising |= (ADVERTISED_1000baseT_Half |
- ADVERTISED_100baseT_Half |
- ADVERTISED_10baseT_Half);
- cmd->base.port = PORT_OTHER;
-
- ethtool_convert_legacy_u32_to_link_mode(
- cmd->link_modes.supported, supported);
- ethtool_convert_legacy_u32_to_link_mode(
- cmd->link_modes.advertising, advertising);
- ethtool_convert_legacy_u32_to_link_mode(
- cmd->link_modes.lp_advertising, lp_advertising);
-
- return 0;
- }
-
return phylink_ethtool_ksettings_get(priv->phylink, cmd);
}
@@ -409,20 +332,6 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev,
{
struct stmmac_priv *priv = netdev_priv(dev);
- if (!(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS) &&
- (priv->hw->pcs & STMMAC_PCS_RGMII ||
- priv->hw->pcs & STMMAC_PCS_SGMII)) {
- /* Only support ANE */
- if (cmd->base.autoneg != AUTONEG_ENABLE)
- return -EINVAL;
-
- mutex_lock(&priv->lock);
- stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
- mutex_unlock(&priv->lock);
-
- return 0;
- }
-
return phylink_ethtool_ksettings_set(priv->phylink, cmd);
}
@@ -443,9 +352,9 @@ static int stmmac_ethtool_get_regs_len(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
- if (priv->plat->has_xgmac)
+ if (priv->plat->core_type == DWMAC_CORE_XGMAC)
return XGMAC_REGSIZE * 4;
- else if (priv->plat->has_gmac4)
+ else if (priv->plat->core_type == DWMAC_CORE_GMAC4)
return GMAC4_REG_SPACE_SIZE;
return REG_SPACE_SIZE;
}
@@ -460,12 +369,12 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space);
/* Copy DMA registers to where ethtool expects them */
- if (priv->plat->has_gmac4) {
+ if (priv->plat->core_type == DWMAC_CORE_GMAC4) {
/* GMAC4 dumps its DMA registers at its DMA_CHAN_BASE_ADDR */
memcpy(&reg_space[ETHTOOL_DMA_OFFSET],
&reg_space[GMAC4_DMA_CHAN_BASE_ADDR / 4],
NUM_DWMAC4_DMA_REGS * 4);
- } else if (!priv->plat->has_xgmac) {
+ } else if (priv->plat->core_type != DWMAC_CORE_XGMAC) {
memcpy(&reg_space[ETHTOOL_DMA_OFFSET],
&reg_space[DMA_BUS_MODE / 4],
NUM_DWMAC1000_DMA_REGS * 4);
@@ -515,15 +424,8 @@ stmmac_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct stmmac_priv *priv = netdev_priv(netdev);
- struct rgmii_adv adv_lp;
- if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) {
- pause->autoneg = 1;
- if (!adv_lp.pause)
- return;
- } else {
- phylink_ethtool_get_pauseparam(priv->phylink, pause);
- }
+ phylink_ethtool_get_pauseparam(priv->phylink, pause);
}
static int
@@ -531,16 +433,8 @@ stmmac_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct stmmac_priv *priv = netdev_priv(netdev);
- struct rgmii_adv adv_lp;
- if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) {
- pause->autoneg = 1;
- if (!adv_lp.pause)
- return -EOPNOTSUPP;
- return 0;
- } else {
- return phylink_ethtool_set_pauseparam(priv->phylink, pause);
- }
+ return phylink_ethtool_set_pauseparam(priv->phylink, pause);
}
static u64 stmmac_get_rx_normal_irq_n(struct stmmac_priv *priv, int q)
@@ -654,7 +548,7 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
(*(u32 *)p);
}
}
- if (priv->eee_enabled) {
+ if (priv->dma_cap.eee) {
int val = phylink_get_eee_err(priv->phylink);
if (val)
priv->xstats.phy_eee_wakeup_error_n = val;
@@ -830,64 +724,14 @@ static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct stmmac_priv *priv = netdev_priv(dev);
- if (!priv->plat->pmt)
- return phylink_ethtool_get_wol(priv->phylink, wol);
-
- mutex_lock(&priv->lock);
- if (device_can_wakeup(priv->device)) {
- wol->supported = WAKE_MAGIC | WAKE_UCAST;
- if (priv->hw_cap_support && !priv->dma_cap.pmt_magic_frame)
- wol->supported &= ~WAKE_MAGIC;
- wol->wolopts = priv->wolopts;
- }
- mutex_unlock(&priv->lock);
+ return phylink_ethtool_get_wol(priv->phylink, wol);
}
static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct stmmac_priv *priv = netdev_priv(dev);
- u32 support = WAKE_MAGIC | WAKE_UCAST;
-
- if (!device_can_wakeup(priv->device))
- return -EOPNOTSUPP;
- if (!priv->plat->pmt) {
- int ret = phylink_ethtool_set_wol(priv->phylink, wol);
-
- if (!ret)
- device_set_wakeup_enable(priv->device, !!wol->wolopts);
- return ret;
- }
-
- /* By default almost all GMAC devices support the WoL via
- * magic frame but we can disable it if the HW capability
- * register shows no support for pmt_magic_frame. */
- if ((priv->hw_cap_support) && (!priv->dma_cap.pmt_magic_frame))
- wol->wolopts &= ~WAKE_MAGIC;
-
- if (wol->wolopts & ~support)
- return -EINVAL;
-
- if (wol->wolopts) {
- pr_info("stmmac: wakeup enable\n");
- device_set_wakeup_enable(priv->device, 1);
- /* Avoid unbalanced enable_irq_wake calls */
- if (priv->wol_irq_disabled)
- enable_irq_wake(priv->wol_irq);
- priv->wol_irq_disabled = false;
- } else {
- device_set_wakeup_enable(priv->device, 0);
- /* Avoid unbalanced disable_irq_wake calls */
- if (!priv->wol_irq_disabled)
- disable_irq_wake(priv->wol_irq);
- priv->wol_irq_disabled = true;
- }
-
- mutex_lock(&priv->lock);
- priv->wolopts = wol->wolopts;
- mutex_unlock(&priv->lock);
-
- return 0;
+ return phylink_ethtool_set_wol(priv->phylink, wol);
}
static int stmmac_ethtool_op_get_eee(struct net_device *dev,
@@ -895,12 +739,6 @@ static int stmmac_ethtool_op_get_eee(struct net_device *dev,
{
struct stmmac_priv *priv = netdev_priv(dev);
- if (!priv->dma_cap.eee)
- return -EOPNOTSUPP;
-
- edata->tx_lpi_timer = priv->tx_lpi_timer;
- edata->tx_lpi_enabled = priv->tx_lpi_enabled;
-
return phylink_ethtool_get_eee(priv->phylink, edata);
}
@@ -908,29 +746,8 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
struct ethtool_keee *edata)
{
struct stmmac_priv *priv = netdev_priv(dev);
- int ret;
- if (!priv->dma_cap.eee)
- return -EOPNOTSUPP;
-
- if (priv->tx_lpi_enabled != edata->tx_lpi_enabled)
- netdev_warn(priv->dev,
- "Setting EEE tx-lpi is not supported\n");
-
- if (!edata->eee_enabled)
- stmmac_disable_eee_mode(priv);
-
- ret = phylink_ethtool_set_eee(priv->phylink, edata);
- if (ret)
- return ret;
-
- if (edata->eee_enabled &&
- priv->tx_lpi_timer != edata->tx_lpi_timer) {
- priv->tx_lpi_timer = edata->tx_lpi_timer;
- stmmac_eee_init(priv);
- }
-
- return 0;
+ return phylink_ethtool_set_eee(priv->phylink, edata);
}
static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
@@ -1227,77 +1044,20 @@ static int stmmac_get_ts_info(struct net_device *dev,
return ethtool_op_get_ts_info(dev, info);
}
-static int stmmac_get_tunable(struct net_device *dev,
- const struct ethtool_tunable *tuna, void *data)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- *(u32 *)data = priv->rx_copybreak;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static int stmmac_set_tunable(struct net_device *dev,
- const struct ethtool_tunable *tuna,
- const void *data)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- priv->rx_copybreak = *(u32 *)data;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
static int stmmac_get_mm(struct net_device *ndev,
struct ethtool_mm_state *state)
{
struct stmmac_priv *priv = netdev_priv(ndev);
- unsigned long flags;
u32 frag_size;
if (!stmmac_fpe_supported(priv))
return -EOPNOTSUPP;
- spin_lock_irqsave(&priv->fpe_cfg.lock, flags);
-
- state->max_verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS;
- state->verify_enabled = priv->fpe_cfg.verify_enabled;
- state->pmac_enabled = priv->fpe_cfg.pmac_enabled;
- state->verify_time = priv->fpe_cfg.verify_time;
- state->tx_enabled = priv->fpe_cfg.tx_enabled;
- state->verify_status = priv->fpe_cfg.status;
state->rx_min_frag_size = ETH_ZLEN;
-
- /* FPE active if common tx_enabled and
- * (verification success or disabled(forced))
- */
- if (state->tx_enabled &&
- (state->verify_status == ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED ||
- state->verify_status == ETHTOOL_MM_VERIFY_STATUS_DISABLED))
- state->tx_active = true;
- else
- state->tx_active = false;
-
frag_size = stmmac_fpe_get_add_frag_size(priv);
state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(frag_size);
- spin_unlock_irqrestore(&priv->fpe_cfg.lock, flags);
+ ethtool_mmsv_get_mm(&priv->fpe_cfg.mmsv, state);
return 0;
}
@@ -1306,8 +1066,6 @@ static int stmmac_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
struct netlink_ext_ack *extack)
{
struct stmmac_priv *priv = netdev_priv(ndev);
- struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
- unsigned long flags;
u32 frag_size;
int err;
@@ -1316,23 +1074,8 @@ static int stmmac_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
if (err)
return err;
- /* Wait for the verification that's currently in progress to finish */
- timer_shutdown_sync(&fpe_cfg->verify_timer);
-
- spin_lock_irqsave(&fpe_cfg->lock, flags);
-
- fpe_cfg->verify_enabled = cfg->verify_enabled;
- fpe_cfg->pmac_enabled = cfg->pmac_enabled;
- fpe_cfg->verify_time = cfg->verify_time;
- fpe_cfg->tx_enabled = cfg->tx_enabled;
-
- if (!cfg->verify_enabled)
- fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
-
stmmac_fpe_set_add_frag_size(priv, frag_size);
- stmmac_fpe_apply(priv);
-
- spin_unlock_irqrestore(&fpe_cfg->lock, flags);
+ ethtool_mmsv_set_mm(&priv->fpe_cfg.mmsv, cfg);
return 0;
}
@@ -1390,8 +1133,6 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.set_per_queue_coalesce = stmmac_set_per_queue_coalesce,
.get_channels = stmmac_get_channels,
.set_channels = stmmac_set_channels,
- .get_tunable = stmmac_get_tunable,
- .set_tunable = stmmac_set_tunable,
.get_link_ksettings = stmmac_ethtool_get_link_ksettings,
.set_link_ksettings = stmmac_ethtool_set_link_ksettings,
.get_mm = stmmac_get_mm,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c
index 3a4bee029c7f..c54c70224351 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c
@@ -27,12 +27,6 @@
#define STMMAC_MAC_FPE_CTRL_STS_SVER BIT(1)
#define STMMAC_MAC_FPE_CTRL_STS_EFPE BIT(0)
-/* FPE link-partner hand-shaking mPacket type */
-enum stmmac_mpacket_type {
- MPACKET_VERIFY = 0,
- MPACKET_RESPONSE = 1,
-};
-
struct stmmac_fpe_reg {
const u32 mac_fpe_reg; /* offset of MAC_FPE_CTRL_STS */
const u32 mtl_fpe_reg; /* offset of MTL_FPE_CTRL_STS */
@@ -48,10 +42,10 @@ bool stmmac_fpe_supported(struct stmmac_priv *priv)
priv->hw->mac->fpe_map_preemption_class;
}
-static void stmmac_fpe_configure(struct stmmac_priv *priv, bool tx_enable,
- bool pmac_enable)
+static void stmmac_fpe_configure_tx(struct ethtool_mmsv *mmsv, bool tx_enable)
{
- struct stmmac_fpe_cfg *cfg = &priv->fpe_cfg;
+ struct stmmac_fpe_cfg *cfg = container_of(mmsv, struct stmmac_fpe_cfg, mmsv);
+ struct stmmac_priv *priv = container_of(cfg, struct stmmac_priv, fpe_cfg);
const struct stmmac_fpe_reg *reg = cfg->reg;
u32 num_rxq = priv->plat->rx_queues_to_use;
void __iomem *ioaddr = priv->ioaddr;
@@ -68,7 +62,18 @@ static void stmmac_fpe_configure(struct stmmac_priv *priv, bool tx_enable,
cfg->fpe_csr = 0;
}
writel(cfg->fpe_csr, ioaddr + reg->mac_fpe_reg);
+}
+
+static void stmmac_fpe_configure_pmac(struct ethtool_mmsv *mmsv, bool pmac_enable)
+{
+ struct stmmac_fpe_cfg *cfg = container_of(mmsv, struct stmmac_fpe_cfg, mmsv);
+ struct stmmac_priv *priv = container_of(cfg, struct stmmac_priv, fpe_cfg);
+ const struct stmmac_fpe_reg *reg = cfg->reg;
+ void __iomem *ioaddr = priv->ioaddr;
+ unsigned long flags;
+ u32 value;
+ spin_lock_irqsave(&priv->hw->irq_ctrl_lock, flags);
value = readl(ioaddr + reg->int_en_reg);
if (pmac_enable) {
@@ -83,49 +88,48 @@ static void stmmac_fpe_configure(struct stmmac_priv *priv, bool tx_enable,
}
writel(value, ioaddr + reg->int_en_reg);
+ spin_unlock_irqrestore(&priv->hw->irq_ctrl_lock, flags);
}
-static void stmmac_fpe_send_mpacket(struct stmmac_priv *priv,
- enum stmmac_mpacket_type type)
+static void stmmac_fpe_send_mpacket(struct ethtool_mmsv *mmsv,
+ enum ethtool_mpacket type)
{
- const struct stmmac_fpe_reg *reg = priv->fpe_cfg.reg;
+ struct stmmac_fpe_cfg *cfg = container_of(mmsv, struct stmmac_fpe_cfg, mmsv);
+ struct stmmac_priv *priv = container_of(cfg, struct stmmac_priv, fpe_cfg);
+ const struct stmmac_fpe_reg *reg = cfg->reg;
void __iomem *ioaddr = priv->ioaddr;
- u32 value = priv->fpe_cfg.fpe_csr;
+ u32 value = cfg->fpe_csr;
- if (type == MPACKET_VERIFY)
+ if (type == ETHTOOL_MPACKET_VERIFY)
value |= STMMAC_MAC_FPE_CTRL_STS_SVER;
- else if (type == MPACKET_RESPONSE)
+ else if (type == ETHTOOL_MPACKET_RESPONSE)
value |= STMMAC_MAC_FPE_CTRL_STS_SRSP;
writel(value, ioaddr + reg->mac_fpe_reg);
}
+static const struct ethtool_mmsv_ops stmmac_mmsv_ops = {
+ .configure_tx = stmmac_fpe_configure_tx,
+ .configure_pmac = stmmac_fpe_configure_pmac,
+ .send_mpacket = stmmac_fpe_send_mpacket,
+};
+
static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
{
struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
+ struct ethtool_mmsv *mmsv = &fpe_cfg->mmsv;
- /* This is interrupt context, just spin_lock() */
- spin_lock(&fpe_cfg->lock);
-
- if (!fpe_cfg->pmac_enabled || status == FPE_EVENT_UNKNOWN)
- goto unlock_out;
+ if (status == FPE_EVENT_UNKNOWN)
+ return;
- /* LP has sent verify mPacket */
if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER)
- stmmac_fpe_send_mpacket(priv, MPACKET_RESPONSE);
+ ethtool_mmsv_event_handle(mmsv, ETHTOOL_MMSV_LP_SENT_VERIFY_MPACKET);
- /* Local has sent verify mPacket */
- if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER &&
- fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED)
- fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_VERIFYING;
+ if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER)
+ ethtool_mmsv_event_handle(mmsv, ETHTOOL_MMSV_LD_SENT_VERIFY_MPACKET);
- /* LP has sent response mPacket */
- if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP &&
- fpe_cfg->status == ETHTOOL_MM_VERIFY_STATUS_VERIFYING)
- fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED;
-
-unlock_out:
- spin_unlock(&fpe_cfg->lock);
+ if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
+ ethtool_mmsv_event_handle(mmsv, ETHTOOL_MMSV_LP_SENT_RESPONSE_MPACKET);
}
void stmmac_fpe_irq_status(struct stmmac_priv *priv)
@@ -164,119 +168,16 @@ void stmmac_fpe_irq_status(struct stmmac_priv *priv)
stmmac_fpe_event_status(priv, status);
}
-/**
- * stmmac_fpe_verify_timer - Timer for MAC Merge verification
- * @t: timer_list struct containing private info
- *
- * Verify the MAC Merge capability in the local TX direction, by
- * transmitting Verify mPackets up to 3 times. Wait until link
- * partner responds with a Response mPacket, otherwise fail.
- */
-static void stmmac_fpe_verify_timer(struct timer_list *t)
-{
- struct stmmac_fpe_cfg *fpe_cfg = from_timer(fpe_cfg, t, verify_timer);
- struct stmmac_priv *priv = container_of(fpe_cfg, struct stmmac_priv,
- fpe_cfg);
- unsigned long flags;
- bool rearm = false;
-
- spin_lock_irqsave(&fpe_cfg->lock, flags);
-
- switch (fpe_cfg->status) {
- case ETHTOOL_MM_VERIFY_STATUS_INITIAL:
- case ETHTOOL_MM_VERIFY_STATUS_VERIFYING:
- if (fpe_cfg->verify_retries != 0) {
- stmmac_fpe_send_mpacket(priv, MPACKET_VERIFY);
- rearm = true;
- } else {
- fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_FAILED;
- }
-
- fpe_cfg->verify_retries--;
- break;
-
- case ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED:
- stmmac_fpe_configure(priv, true, true);
- break;
-
- default:
- break;
- }
-
- if (rearm) {
- mod_timer(&fpe_cfg->verify_timer,
- jiffies + msecs_to_jiffies(fpe_cfg->verify_time));
- }
-
- spin_unlock_irqrestore(&fpe_cfg->lock, flags);
-}
-
-static void stmmac_fpe_verify_timer_arm(struct stmmac_fpe_cfg *fpe_cfg)
-{
- if (fpe_cfg->pmac_enabled && fpe_cfg->tx_enabled &&
- fpe_cfg->verify_enabled &&
- fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_FAILED &&
- fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED) {
- timer_setup(&fpe_cfg->verify_timer, stmmac_fpe_verify_timer, 0);
- mod_timer(&fpe_cfg->verify_timer, jiffies);
- }
-}
-
void stmmac_fpe_init(struct stmmac_priv *priv)
{
- priv->fpe_cfg.verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
- priv->fpe_cfg.verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS;
- priv->fpe_cfg.status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
- timer_setup(&priv->fpe_cfg.verify_timer, stmmac_fpe_verify_timer, 0);
- spin_lock_init(&priv->fpe_cfg.lock);
+ ethtool_mmsv_init(&priv->fpe_cfg.mmsv, priv->dev,
+ &stmmac_mmsv_ops);
if ((!priv->fpe_cfg.reg || !priv->hw->mac->fpe_map_preemption_class) &&
priv->dma_cap.fpesel)
dev_info(priv->device, "FPE is not supported by driver.\n");
}
-void stmmac_fpe_apply(struct stmmac_priv *priv)
-{
- struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
-
- /* If verification is disabled, configure FPE right away.
- * Otherwise let the timer code do it.
- */
- if (!fpe_cfg->verify_enabled) {
- stmmac_fpe_configure(priv, fpe_cfg->tx_enabled,
- fpe_cfg->pmac_enabled);
- } else {
- fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_INITIAL;
- fpe_cfg->verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
-
- if (netif_running(priv->dev))
- stmmac_fpe_verify_timer_arm(fpe_cfg);
- }
-}
-
-void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
-{
- struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
- unsigned long flags;
-
- timer_shutdown_sync(&fpe_cfg->verify_timer);
-
- spin_lock_irqsave(&fpe_cfg->lock, flags);
-
- if (is_up && fpe_cfg->pmac_enabled) {
- /* VERIFY process requires pmac enabled when NIC comes up */
- stmmac_fpe_configure(priv, false, true);
-
- /* New link => maybe new partner => new verification process */
- stmmac_fpe_apply(priv);
- } else {
- /* No link => turn off EFPE */
- stmmac_fpe_configure(priv, false, false);
- }
-
- spin_unlock_irqrestore(&fpe_cfg->lock, flags);
-}
-
int stmmac_fpe_get_add_frag_size(struct stmmac_priv *priv)
{
const struct stmmac_fpe_reg *reg = priv->fpe_cfg.reg;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h
index b884eac7142d..3fc46acf7001 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h
@@ -9,15 +9,10 @@
#include <linux/types.h>
#include <linux/netdevice.h>
-#define STMMAC_FPE_MM_MAX_VERIFY_RETRIES 3
-#define STMMAC_FPE_MM_MAX_VERIFY_TIME_MS 128
-
struct stmmac_priv;
-void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up);
bool stmmac_fpe_supported(struct stmmac_priv *priv);
void stmmac_fpe_init(struct stmmac_priv *priv);
-void stmmac_fpe_apply(struct stmmac_priv *priv);
void stmmac_fpe_irq_status(struct stmmac_priv *priv);
int stmmac_fpe_get_add_frag_size(struct stmmac_priv *priv);
void stmmac_fpe_set_add_frag_size(struct stmmac_priv *priv, u32 add_frag_size);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 5ef52ef2698f..bb110124f21e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -18,9 +18,22 @@
#include "dwmac4.h"
#include "stmmac.h"
+#define STMMAC_HWTS_CFG_MASK (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
+ PTP_TCR_TSINIT | PTP_TCR_TSUPDT | \
+ PTP_TCR_TSCTRLSSR | PTP_TCR_SNAPTYPSEL_1 | \
+ PTP_TCR_TSIPV4ENA | PTP_TCR_TSIPV6ENA | \
+ PTP_TCR_TSEVNTENA | PTP_TCR_TSMSTRENA | \
+ PTP_TCR_TSVER2ENA | PTP_TCR_TSIPENA | \
+ PTP_TCR_TSTRIG | PTP_TCR_TSENALL)
+
static void config_hw_tstamping(void __iomem *ioaddr, u32 data)
{
- writel(data, ioaddr + PTP_TCR);
+ u32 regval = readl(ioaddr + PTP_TCR);
+
+ regval &= ~STMMAC_HWTS_CFG_MASK;
+ regval |= data;
+
+ writel(regval, ioaddr + PTP_TCR);
}
static void config_sub_second_increment(void __iomem *ioaddr,
@@ -122,7 +135,6 @@ static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
static int config_addend(void __iomem *ioaddr, u32 addend)
{
u32 value;
- int limit;
writel(addend, ioaddr + PTP_TAR);
/* issue command to update the addend value */
@@ -131,23 +143,15 @@ static int config_addend(void __iomem *ioaddr, u32 addend)
writel(value, ioaddr + PTP_TCR);
/* wait for present addend update to complete */
- limit = 10;
- while (limit--) {
- if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSADDREG))
- break;
- mdelay(10);
- }
- if (limit < 0)
- return -EBUSY;
-
- return 0;
+ return readl_poll_timeout_atomic(ioaddr + PTP_TCR, value,
+ !(value & PTP_TCR_TSADDREG),
+ 10, 100000);
}
static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
int add_sub, int gmac4)
{
u32 value;
- int limit;
if (add_sub) {
/* If the new sec value needs to be subtracted with
@@ -174,16 +178,9 @@ static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
writel(value, ioaddr + PTP_TCR);
/* wait for present system time adjust/update to complete */
- limit = 10;
- while (limit--) {
- if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSUPDT))
- break;
- mdelay(10);
- }
- if (limit < 0)
- return -EBUSY;
-
- return 0;
+ return readl_poll_timeout_atomic(ioaddr + PTP_TCR, value,
+ !(value & PTP_TCR_TSUPDT),
+ 10, 100000);
}
static void get_systime(void __iomem *ioaddr, u64 *systime)
@@ -209,7 +206,7 @@ static void get_ptptime(void __iomem *ptpaddr, u64 *ptp_time)
u64 ns;
ns = readl(ptpaddr + PTP_ATNR);
- ns += readl(ptpaddr + PTP_ATSR) * NSEC_PER_SEC;
+ ns += (u64)readl(ptpaddr + PTP_ATSR) * NSEC_PER_SEC;
*ptp_time = ns;
}
@@ -269,3 +266,14 @@ const struct stmmac_hwtimestamp stmmac_ptp = {
.timestamp_interrupt = timestamp_interrupt,
.hwtstamp_correct_latency = hwtstamp_correct_latency,
};
+
+const struct stmmac_hwtimestamp dwmac1000_ptp = {
+ .config_hw_tstamping = config_hw_tstamping,
+ .init_systime = init_systime,
+ .config_sub_second_increment = config_sub_second_increment,
+ .config_addend = config_addend,
+ .adjust_systime = adjust_systime,
+ .get_systime = get_systime,
+ .get_ptptime = dwmac1000_get_ptptime,
+ .timestamp_interrupt = dwmac1000_timestamp_interrupt,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.c
new file mode 100644
index 000000000000..5c5dd502f79a
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PCI bus helpers for STMMAC driver
+ * Copyright (C) 2025 Yao Zi <ziyao@disroot.org>
+ */
+
+#include <linux/device.h>
+#include <linux/pci.h>
+
+#include "stmmac_libpci.h"
+
+int stmmac_pci_plat_suspend(struct device *dev, void *bsp_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = pci_save_state(pdev);
+ if (ret)
+ return ret;
+
+ pci_disable_device(pdev);
+ pci_wake_from_d3(pdev, true);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stmmac_pci_plat_suspend);
+
+int stmmac_pci_plat_resume(struct device *dev, void *bsp_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ pci_restore_state(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stmmac_pci_plat_resume);
+
+MODULE_DESCRIPTION("STMMAC PCI helper library");
+MODULE_AUTHOR("Yao Zi <ziyao@disroot.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.h
new file mode 100644
index 000000000000..71553184f982
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2025 Yao Zi <ziyao@disroot.org>
+ */
+
+#ifndef __STMMAC_LIBPCI_H__
+#define __STMMAC_LIBPCI_H__
+
+int stmmac_pci_plat_suspend(struct device *dev, void *bsp_priv);
+int stmmac_pci_plat_resume(struct device *dev, void *bsp_priv);
+
+#endif /* __STMMAC_LIBPCI_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 12f0db0e8830..da206b24aaed 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -29,6 +29,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
#include <linux/prefetch.h>
#include <linux/pinctrl/consumer.h>
#ifdef CONFIG_DEBUG_FS
@@ -39,12 +40,14 @@
#include <linux/phylink.h>
#include <linux/udp.h>
#include <linux/bpf_trace.h>
+#include <net/devlink.h>
#include <net/page_pool/helpers.h>
#include <net/pkt_cls.h>
#include <net/xdp_sock_drv.h>
#include "stmmac_ptp.h"
#include "stmmac_fpe.h"
#include "stmmac.h"
+#include "stmmac_pcs.h"
#include "stmmac_xdp.h"
#include <linux/reset.h>
#include <linux/of_mdio.h>
@@ -56,8 +59,7 @@
* with fine resolution and binary rollover. This avoid non-monotonic behavior
* (clock jumps) when changing timestamping settings at runtime.
*/
-#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
- PTP_TCR_TSCTRLSSR)
+#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCTRLSSR)
#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
@@ -77,7 +79,6 @@ module_param(phyaddr, int, 0444);
MODULE_PARM_DESC(phyaddr, "Physical device address");
#define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
-#define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
/* Limit to make sure XDP TX and slow path can coexist */
#define STMMAC_XSK_TX_BUDGET_MAX 256
@@ -89,33 +90,32 @@ MODULE_PARM_DESC(phyaddr, "Physical device address");
#define STMMAC_XDP_TX BIT(1)
#define STMMAC_XDP_REDIRECT BIT(2)
-static int flow_ctrl = FLOW_AUTO;
+static int flow_ctrl = 0xdead;
module_param(flow_ctrl, int, 0644);
-MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
+MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off] (obsolete)");
static int pause = PAUSE_TIME;
module_param(pause, int, 0644);
-MODULE_PARM_DESC(pause, "Flow Control Pause Time");
+MODULE_PARM_DESC(pause, "Flow Control Pause Time (units of 512 bit times)");
#define TC_DEFAULT 64
static int tc = TC_DEFAULT;
module_param(tc, int, 0644);
MODULE_PARM_DESC(tc, "DMA threshold control value");
+/* This is unused */
#define DEFAULT_BUFSIZE 1536
static int buf_sz = DEFAULT_BUFSIZE;
module_param(buf_sz, int, 0644);
MODULE_PARM_DESC(buf_sz, "DMA buffer size");
-#define STMMAC_RX_COPYBREAK 256
-
static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
#define STMMAC_DEFAULT_LPI_TIMER 1000
-static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
-module_param(eee_timer, int, 0644);
+static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
+module_param(eee_timer, uint, 0644);
MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
#define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
@@ -148,37 +148,84 @@ static void stmmac_exit_fs(struct net_device *dev);
#define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
-int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
+struct stmmac_devlink_priv {
+ struct stmmac_priv *stmmac_priv;
+};
+
+enum stmmac_dl_param_id {
+ STMMAC_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ STMMAC_DEVLINK_PARAM_ID_TS_COARSE,
+};
+
+/**
+ * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
+ * @bsp_priv: BSP private data structure (unused)
+ * @clk_tx_i: the transmit clock
+ * @interface: the selected interface mode
+ * @speed: the speed that the MAC will be operating at
+ *
+ * Set the transmit clock rate for the MAC, normally 2.5MHz for 10Mbps,
+ * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least
+ * MII, GMII, RGMII and RMII interface modes. Platforms can hook this into
+ * the plat_data->set_clk_tx_rate method directly, call it via their own
+ * implementation, or implement their own method should they have more
+ * complex requirements. It is intended to only be used in this method.
+ *
+ * plat_data->clk_tx_i must be filled in.
+ */
+int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- int ret = 0;
+ long rate = rgmii_clock(speed);
- if (enabled) {
- ret = clk_prepare_enable(priv->plat->stmmac_clk);
- if (ret)
- return ret;
- ret = clk_prepare_enable(priv->plat->pclk);
- if (ret) {
- clk_disable_unprepare(priv->plat->stmmac_clk);
- return ret;
- }
- if (priv->plat->clks_config) {
- ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
- if (ret) {
- clk_disable_unprepare(priv->plat->stmmac_clk);
- clk_disable_unprepare(priv->plat->pclk);
- return ret;
- }
+ /* Silently ignore unsupported speeds as rgmii_clock() only
+ * supports 10, 100 and 1000Mbps. We do not want to spit
+ * errors for 2500 and higher speeds here.
+ */
+ if (rate < 0)
+ return 0;
+
+ return clk_set_rate(clk_tx_i, rate);
+}
+EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate);
+
+/**
+ * stmmac_axi_blen_to_mask() - convert a burst length array to reg value
+ * @regval: pointer to a u32 for the resulting register value
+ * @blen: pointer to an array of u32 containing the burst length values in bytes
+ * @len: the number of entries in the @blen array
+ */
+void stmmac_axi_blen_to_mask(u32 *regval, const u32 *blen, size_t len)
+{
+ size_t i;
+ u32 val;
+
+ for (val = i = 0; i < len; i++) {
+ u32 burst = blen[i];
+
+ /* Burst values of zero must be skipped. */
+ if (!burst)
+ continue;
+
+ /* The valid range for the burst length is 4 to 256 inclusive,
+ * and it must be a power of two.
+ */
+ if (burst < 4 || burst > 256 || !is_power_of_2(burst)) {
+ pr_err("stmmac: invalid burst length %u at index %zu\n",
+ burst, i);
+ continue;
}
- } else {
- clk_disable_unprepare(priv->plat->stmmac_clk);
- clk_disable_unprepare(priv->plat->pclk);
- if (priv->plat->clks_config)
- priv->plat->clks_config(priv->plat->bsp_priv, enabled);
+
+ /* Since burst is a power of two, and the register field starts
+ * with burst = 4, shift right by two bits so bit 0 of the field
+ * corresponds with the minimum value.
+ */
+ val |= burst >> 2;
}
- return ret;
+ *regval = FIELD_PREP(DMA_AXI_BLEN_MASK, val);
}
-EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
+EXPORT_SYMBOL_GPL(stmmac_axi_blen_to_mask);
/**
* stmmac_verify_args - verify the driver parameters.
@@ -189,16 +236,11 @@ static void stmmac_verify_args(void)
{
if (unlikely(watchdog < 0))
watchdog = TX_TIMEO;
- if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
- buf_sz = DEFAULT_BUFSIZE;
- if (unlikely(flow_ctrl > 1))
- flow_ctrl = FLOW_AUTO;
- else if (likely(flow_ctrl < 0))
- flow_ctrl = FLOW_OFF;
if (unlikely((pause < 0) || (pause > 0xffff)))
pause = PAUSE_TIME;
- if (eee_timer < 0)
- eee_timer = STMMAC_DEFAULT_LPI_TIMER;
+
+ if (flow_ctrl != 0xdead)
+ pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configuration\n");
}
static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
@@ -287,73 +329,6 @@ static void stmmac_global_err(struct stmmac_priv *priv)
stmmac_service_event_schedule(priv);
}
-/**
- * stmmac_clk_csr_set - dynamically set the MDC clock
- * @priv: driver private structure
- * Description: this is to dynamically set the MDC clock according to the csr
- * clock input.
- * Note:
- * If a specific clk_csr value is passed from the platform
- * this means that the CSR Clock Range selection cannot be
- * changed at run-time and it is fixed (as reported in the driver
- * documentation). Viceversa the driver will try to set the MDC
- * clock dynamically according to the actual clock input.
- */
-static void stmmac_clk_csr_set(struct stmmac_priv *priv)
-{
- u32 clk_rate;
-
- clk_rate = clk_get_rate(priv->plat->stmmac_clk);
-
- /* Platform provided default clk_csr would be assumed valid
- * for all other cases except for the below mentioned ones.
- * For values higher than the IEEE 802.3 specified frequency
- * we can not estimate the proper divider as it is not known
- * the frequency of clk_csr_i. So we do not change the default
- * divider.
- */
- if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
- if (clk_rate < CSR_F_35M)
- priv->clk_csr = STMMAC_CSR_20_35M;
- else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
- priv->clk_csr = STMMAC_CSR_35_60M;
- else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
- priv->clk_csr = STMMAC_CSR_60_100M;
- else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
- priv->clk_csr = STMMAC_CSR_100_150M;
- else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
- priv->clk_csr = STMMAC_CSR_150_250M;
- else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
- priv->clk_csr = STMMAC_CSR_250_300M;
- }
-
- if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
- if (clk_rate > 160000000)
- priv->clk_csr = 0x03;
- else if (clk_rate > 80000000)
- priv->clk_csr = 0x02;
- else if (clk_rate > 40000000)
- priv->clk_csr = 0x01;
- else
- priv->clk_csr = 0;
- }
-
- if (priv->plat->has_xgmac) {
- if (clk_rate > 400000000)
- priv->clk_csr = 0x5;
- else if (clk_rate > 350000000)
- priv->clk_csr = 0x4;
- else if (clk_rate > 300000000)
- priv->clk_csr = 0x3;
- else if (clk_rate > 250000000)
- priv->clk_csr = 0x2;
- else if (clk_rate > 150000000)
- priv->clk_csr = 0x1;
- else
- priv->clk_csr = 0x0;
- }
-}
-
static void print_pkt(unsigned char *buf, int len)
{
pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
@@ -391,23 +366,7 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
return dirty;
}
-static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
-{
- int tx_lpi_timer;
-
- /* Clear/set the SW EEE timer flag based on LPI ET enablement */
- priv->eee_sw_timer_en = en ? 0 : 1;
- tx_lpi_timer = en ? priv->tx_lpi_timer : 0;
- stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
-}
-
-/**
- * stmmac_enable_eee_mode - check and enter in LPI mode
- * @priv: driver private structure
- * Description: this function is to verify and enter in LPI mode in case of
- * EEE.
- */
-static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
+static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 queue;
@@ -417,31 +376,45 @@ static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
if (tx_q->dirty_tx != tx_q->cur_tx)
- return -EBUSY; /* still unfinished work */
+ return true; /* still unfinished work */
}
- /* Check and enter in LPI mode */
- if (!priv->tx_path_in_lpi_mode)
- stmmac_set_eee_mode(priv, priv->hw,
- priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
- return 0;
+ return false;
+}
+
+static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
+{
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
}
/**
- * stmmac_disable_eee_mode - disable and exit from LPI mode
+ * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
* @priv: driver private structure
- * Description: this function is to exit and disable EEE in case of
- * LPI state is true. This is called by the xmit.
+ * Description: this function is to verify and enter in LPI mode in case of
+ * EEE.
*/
-void stmmac_disable_eee_mode(struct stmmac_priv *priv)
+static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
{
- if (!priv->eee_sw_timer_en) {
- stmmac_lpi_entry_timer_config(priv, 0);
+ if (stmmac_eee_tx_busy(priv)) {
+ stmmac_restart_sw_lpi_timer(priv);
return;
}
- stmmac_reset_eee_mode(priv, priv->hw);
- del_timer_sync(&priv->eee_ctrl_timer);
+ /* Check and enter in LPI mode */
+ if (!priv->tx_path_in_lpi_mode)
+ stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED,
+ priv->tx_lpi_clk_stop, 0);
+}
+
+/**
+ * stmmac_stop_sw_lpi - stop transmitting LPI
+ * @priv: driver private structure
+ * Description: When using software-controlled LPI, stop transmitting LPI state.
+ */
+static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
+{
+ timer_delete_sync(&priv->eee_ctrl_timer);
+ stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
priv->tx_path_in_lpi_mode = false;
}
@@ -454,69 +427,9 @@ void stmmac_disable_eee_mode(struct stmmac_priv *priv)
*/
static void stmmac_eee_ctrl_timer(struct timer_list *t)
{
- struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
+ struct stmmac_priv *priv = timer_container_of(priv, t, eee_ctrl_timer);
- if (stmmac_enable_eee_mode(priv))
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
-}
-
-/**
- * stmmac_eee_init - init EEE
- * @priv: driver private structure
- * Description:
- * if the GMAC supports the EEE (from the HW cap reg) and the phy device
- * can also manage EEE, this function enable the LPI state and start related
- * timer.
- */
-bool stmmac_eee_init(struct stmmac_priv *priv)
-{
- int eee_tw_timer = priv->eee_tw_timer;
-
- /* Check if MAC core supports the EEE feature. */
- if (!priv->dma_cap.eee)
- return false;
-
- mutex_lock(&priv->lock);
-
- /* Check if it needs to be deactivated */
- if (!priv->eee_active) {
- if (priv->eee_enabled) {
- netdev_dbg(priv->dev, "disable EEE\n");
- stmmac_lpi_entry_timer_config(priv, 0);
- del_timer_sync(&priv->eee_ctrl_timer);
- stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
- if (priv->hw->xpcs)
- xpcs_config_eee(priv->hw->xpcs,
- priv->plat->mult_fact_100ns,
- false);
- }
- mutex_unlock(&priv->lock);
- return false;
- }
-
- if (priv->eee_active && !priv->eee_enabled) {
- timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
- stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
- eee_tw_timer);
- if (priv->hw->xpcs)
- xpcs_config_eee(priv->hw->xpcs,
- priv->plat->mult_fact_100ns,
- true);
- }
-
- if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
- del_timer_sync(&priv->eee_ctrl_timer);
- priv->tx_path_in_lpi_mode = false;
- stmmac_lpi_entry_timer_config(priv, 1);
- } else {
- stmmac_lpi_entry_timer_config(priv, 0);
- mod_timer(&priv->eee_ctrl_timer,
- STMMAC_LPI_T(priv->tx_lpi_timer));
- }
-
- mutex_unlock(&priv->lock);
- netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
- return true;
+ stmmac_try_to_start_sw_lpi(priv);
}
/* stmmac_get_tx_hwtstamp - get HW TX timestamps
@@ -580,7 +493,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
if (!priv->hwts_rx_en)
return;
/* For GMAC4, the valid timestamp is from CTX next desc. */
- if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
+ if (dwmac_is_xmac(priv->plat->core_type))
desc = np;
/* Check if timestamp is available */
@@ -598,21 +511,49 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
}
}
+static void stmmac_update_subsecond_increment(struct stmmac_priv *priv)
+{
+ bool xmac = dwmac_is_xmac(priv->plat->core_type);
+ u32 sec_inc = 0;
+ u64 temp = 0;
+
+ stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
+
+ /* program Sub Second Increment reg */
+ stmmac_config_sub_second_increment(priv, priv->ptpaddr,
+ priv->plat->clk_ptp_rate,
+ xmac, &sec_inc);
+ temp = div_u64(1000000000ULL, sec_inc);
+
+ /* Store sub second increment for later use */
+ priv->sub_second_inc = sec_inc;
+
+ /* calculate default added value:
+ * formula is :
+ * addend = (2^32)/freq_div_ratio;
+ * where, freq_div_ratio = 1e9ns/sec_inc
+ */
+ temp = (u64)(temp << 32);
+ priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
+ stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
+}
+
/**
* stmmac_hwtstamp_set - control hardware timestamping.
* @dev: device pointer.
- * @ifr: An IOCTL specific structure, that can contain a pointer to
- * a proprietary structure used to pass information to the driver.
+ * @config: the timestamping configuration.
+ * @extack: netlink extended ack structure for error reporting.
* Description:
* This function configures the MAC to enable/disable both outgoing(TX)
* and incoming(RX) packets time stamping based on user input.
* Return Value:
* 0 on success and an appropriate -ve integer on failure.
*/
-static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+static int stmmac_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct stmmac_priv *priv = netdev_priv(dev);
- struct hwtstamp_config config;
u32 ptp_v2 = 0;
u32 tstamp_all = 0;
u32 ptp_over_ipv4_udp = 0;
@@ -623,34 +564,36 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
u32 ts_event_en = 0;
if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
- netdev_alert(priv->dev, "No support for HW time stamping\n");
+ NL_SET_ERR_MSG_MOD(extack, "No support for HW time stamping");
priv->hwts_tx_en = 0;
priv->hwts_rx_en = 0;
return -EOPNOTSUPP;
}
- if (copy_from_user(&config, ifr->ifr_data,
- sizeof(config)))
- return -EFAULT;
+ if (!netif_running(dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot change timestamping configuration while down");
+ return -ENODEV;
+ }
netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
- __func__, config.flags, config.tx_type, config.rx_filter);
+ __func__, config->flags, config->tx_type, config->rx_filter);
- if (config.tx_type != HWTSTAMP_TX_OFF &&
- config.tx_type != HWTSTAMP_TX_ON)
+ if (config->tx_type != HWTSTAMP_TX_OFF &&
+ config->tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
if (priv->adv_ts) {
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
/* time stamp no incoming packet at all */
- config.rx_filter = HWTSTAMP_FILTER_NONE;
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
/* PTP v1, UDP, any kind of event packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
/* 'xmac' hardware can support Sync, Pdelay_Req and
* Pdelay_resp by setting bit14 and bits17/16 to 01
* This leaves Delay_Req timestamps out.
@@ -664,7 +607,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
/* PTP v1, UDP, Sync packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
/* take time stamp for SYNC messages only */
ts_event_en = PTP_TCR_TSEVNTENA;
@@ -674,7 +617,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
/* PTP v1, UDP, Delay_req packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
/* take time stamp for Delay_Req messages only */
ts_master_en = PTP_TCR_TSMSTRENA;
ts_event_en = PTP_TCR_TSEVNTENA;
@@ -685,7 +628,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
/* PTP v2, UDP, any kind of event packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for all event messages */
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
@@ -696,7 +639,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
/* PTP v2, UDP, Sync packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for SYNC messages only */
ts_event_en = PTP_TCR_TSEVNTENA;
@@ -707,7 +650,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
/* PTP v2, UDP, Delay_req packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for Delay_Req messages only */
ts_master_en = PTP_TCR_TSMSTRENA;
@@ -719,7 +662,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
/* PTP v2/802.AS1 any layer, any kind of event packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
if (priv->synopsys_id < DWMAC_CORE_4_10)
@@ -731,7 +674,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
/* PTP v2/802.AS1, any layer, Sync packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for SYNC messages only */
ts_event_en = PTP_TCR_TSEVNTENA;
@@ -743,7 +686,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
/* PTP v2/802.AS1, any layer, Delay_req packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for Delay_Req messages only */
ts_master_en = PTP_TCR_TSMSTRENA;
@@ -757,7 +700,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
/* time stamp any incoming packet */
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
tstamp_all = PTP_TCR_TSENALL;
break;
@@ -765,20 +708,22 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
return -ERANGE;
}
} else {
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
- config.rx_filter = HWTSTAMP_FILTER_NONE;
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
break;
default:
/* PTP v1, UDP, any kind of event packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
}
}
- priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
- priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
+ priv->hwts_rx_en = config->rx_filter != HWTSTAMP_FILTER_NONE;
+ priv->hwts_tx_en = config->tx_type == HWTSTAMP_TX_ON;
priv->systime_flags = STMMAC_HWTS_ACTIVE;
+ if (!priv->tsfupdt_coarse)
+ priv->systime_flags |= PTP_TCR_TSCFUPDT;
if (priv->hwts_tx_en || priv->hwts_rx_en) {
priv->systime_flags |= tstamp_all | ptp_v2 |
@@ -789,31 +734,30 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
- memcpy(&priv->tstamp_config, &config, sizeof(config));
+ priv->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
+ return 0;
}
/**
* stmmac_hwtstamp_get - read hardware timestamping.
* @dev: device pointer.
- * @ifr: An IOCTL specific structure, that can contain a pointer to
- * a proprietary structure used to pass information to the driver.
+ * @config: the timestamping configuration.
* Description:
* This function obtain the current hardware timestamping settings
* as requested.
*/
-static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+static int stmmac_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
{
struct stmmac_priv *priv = netdev_priv(dev);
- struct hwtstamp_config *config = &priv->tstamp_config;
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP;
- return copy_to_user(ifr->ifr_data, config,
- sizeof(*config)) ? -EFAULT : 0;
+ *config = priv->tstamp_config;
+
+ return 0;
}
/**
@@ -826,36 +770,20 @@ static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
* Will be rerun after resuming from suspend, case in which the timestamping
* flags updated by stmmac_hwtstamp_set() also need to be restored.
*/
-int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
+static int stmmac_init_tstamp_counter(struct stmmac_priv *priv,
+ u32 systime_flags)
{
- bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
struct timespec64 now;
- u32 sec_inc = 0;
- u64 temp = 0;
- if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
- return -EOPNOTSUPP;
+ if (!priv->plat->clk_ptp_rate) {
+ netdev_err(priv->dev, "Invalid PTP clock rate");
+ return -EINVAL;
+ }
stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
priv->systime_flags = systime_flags;
- /* program Sub Second Increment reg */
- stmmac_config_sub_second_increment(priv, priv->ptpaddr,
- priv->plat->clk_ptp_rate,
- xmac, &sec_inc);
- temp = div_u64(1000000000ULL, sec_inc);
-
- /* Store sub second increment for later use */
- priv->sub_second_inc = sec_inc;
-
- /* calculate default added value:
- * formula is :
- * addend = (2^32)/freq_div_ratio;
- * where, freq_div_ratio = 1e9ns/sec_inc
- */
- temp = (u64)(temp << 32);
- priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
- stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
+ stmmac_update_subsecond_increment(priv);
/* initialize system time */
ktime_get_real_ts64(&now);
@@ -865,26 +793,33 @@ int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
return 0;
}
-EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
/**
- * stmmac_init_ptp - init PTP
+ * stmmac_init_timestamping - initialise timestamping
* @priv: driver private structure
* Description: this is to verify if the HW supports the PTPv1 or PTPv2.
* This is done by looking at the HW cap. register.
* This function also registers the ptp driver.
*/
-static int stmmac_init_ptp(struct stmmac_priv *priv)
+static int stmmac_init_timestamping(struct stmmac_priv *priv)
{
- bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+ bool xmac = dwmac_is_xmac(priv->plat->core_type);
int ret;
if (priv->plat->ptp_clk_freq_config)
priv->plat->ptp_clk_freq_config(priv);
- ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
- if (ret)
+ if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
+ netdev_info(priv->dev, "PTP not supported by HW\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE |
+ PTP_TCR_TSCFUPDT);
+ if (ret) {
+ netdev_warn(priv->dev, "PTP init failed\n");
return ret;
+ }
priv->adv_ts = 0;
/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
@@ -910,24 +845,40 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
return 0;
}
+static void stmmac_setup_ptp(struct stmmac_priv *priv)
+{
+ int ret;
+
+ ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
+ if (ret < 0)
+ netdev_warn(priv->dev,
+ "failed to enable PTP reference clock: %pe\n",
+ ERR_PTR(ret));
+
+ if (stmmac_init_timestamping(priv) == 0)
+ stmmac_ptp_register(priv);
+}
+
static void stmmac_release_ptp(struct stmmac_priv *priv)
{
- clk_disable_unprepare(priv->plat->clk_ptp_ref);
stmmac_ptp_unregister(priv);
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
}
/**
* stmmac_mac_flow_ctrl - Configure flow control in all queues
* @priv: driver private structure
* @duplex: duplex passed to the next function
+ * @flow_ctrl: desired flow control modes
* Description: It is used for configuring the flow control in all queues
*/
-static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
+static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex,
+ unsigned int flow_ctrl)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
- stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
- priv->pause, tx_cnt);
+ stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time,
+ tx_cnt);
}
static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
@@ -958,6 +909,13 @@ static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
return pcs;
}
+ /* The PCS control register is only relevant for SGMII, TBI and RTBI
+ * modes. We no longer support TBI or RTBI, so only configure this
+ * register when operating in SGMII mode with the integrated PCS.
+ */
+ if (priv->hw->pcs & STMMAC_PCS_SGMII && priv->integrated_pcs)
+ return &priv->integrated_pcs->pcs;
+
return NULL;
}
@@ -967,19 +925,29 @@ static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
/* Nothing to do, xpcs_config() handles everything */
}
+static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ if (priv->plat->mac_finish)
+ priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface);
+
+ return 0;
+}
+
static void stmmac_mac_link_down(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
stmmac_mac_set(priv, priv->ioaddr, false);
- priv->eee_active = false;
- priv->tx_lpi_enabled = false;
- priv->eee_enabled = stmmac_eee_init(priv);
- stmmac_set_eee_pls(priv, priv->hw, false);
+ if (priv->dma_cap.eee)
+ stmmac_set_eee_pls(priv, priv->hw, false);
if (stmmac_fpe_supported(priv))
- stmmac_fpe_link_state_handle(priv, false);
+ ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, false);
}
static void stmmac_mac_link_up(struct phylink_config *config,
@@ -989,7 +957,9 @@ static void stmmac_mac_link_up(struct phylink_config *config,
bool tx_pause, bool rx_pause)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+ unsigned int flow_ctrl;
u32 old_ctrl, ctrl;
+ int ret;
if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
priv->plat->serdes_powerup)
@@ -1057,8 +1027,6 @@ static void stmmac_mac_link_up(struct phylink_config *config,
}
}
- priv->speed = speed;
-
if (priv->plat->fix_mac_speed)
priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
@@ -1069,42 +1037,124 @@ static void stmmac_mac_link_up(struct phylink_config *config,
/* Flow Control operation */
if (rx_pause && tx_pause)
- priv->flow_ctrl = FLOW_AUTO;
+ flow_ctrl = FLOW_AUTO;
else if (rx_pause && !tx_pause)
- priv->flow_ctrl = FLOW_RX;
+ flow_ctrl = FLOW_RX;
else if (!rx_pause && tx_pause)
- priv->flow_ctrl = FLOW_TX;
+ flow_ctrl = FLOW_TX;
else
- priv->flow_ctrl = FLOW_OFF;
+ flow_ctrl = FLOW_OFF;
- stmmac_mac_flow_ctrl(priv, duplex);
+ stmmac_mac_flow_ctrl(priv, duplex, flow_ctrl);
if (ctrl != old_ctrl)
writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
+ if (priv->plat->set_clk_tx_rate) {
+ ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv,
+ priv->plat->clk_tx_i,
+ interface, speed);
+ if (ret < 0)
+ netdev_err(priv->dev,
+ "failed to configure %s transmit clock for %dMbps: %pe\n",
+ phy_modes(interface), speed, ERR_PTR(ret));
+ }
+
stmmac_mac_set(priv, priv->ioaddr, true);
- if (phy && priv->dma_cap.eee) {
- priv->eee_active =
- phy_init_eee(phy, !(priv->plat->flags &
- STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
- priv->eee_enabled = stmmac_eee_init(priv);
- priv->tx_lpi_enabled = priv->eee_enabled;
+ if (priv->dma_cap.eee)
stmmac_set_eee_pls(priv, priv->hw, true);
- }
if (stmmac_fpe_supported(priv))
- stmmac_fpe_link_state_handle(priv, true);
+ ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, true);
if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
stmmac_hwtstamp_correct_latency(priv, priv);
}
+static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ priv->eee_active = false;
+
+ mutex_lock(&priv->lock);
+
+ priv->eee_enabled = false;
+
+ netdev_dbg(priv->dev, "disable EEE\n");
+ priv->eee_sw_timer_en = false;
+ timer_delete_sync(&priv->eee_ctrl_timer);
+ stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
+ priv->tx_path_in_lpi_mode = false;
+
+ stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS);
+ mutex_unlock(&priv->lock);
+}
+
+static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+ int ret;
+
+ priv->tx_lpi_timer = timer;
+ priv->eee_active = true;
+
+ mutex_lock(&priv->lock);
+
+ priv->eee_enabled = true;
+
+ /* Update the transmit clock stop according to PHY capability if
+ * the platform allows
+ */
+ if (priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP)
+ priv->tx_lpi_clk_stop = tx_clk_stop;
+
+ stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
+ STMMAC_DEFAULT_TWT_LS);
+
+ /* Try to cnfigure the hardware timer. */
+ ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER,
+ priv->tx_lpi_clk_stop, priv->tx_lpi_timer);
+
+ if (ret) {
+ /* Hardware timer mode not supported, or value out of range.
+ * Fall back to using software LPI mode
+ */
+ priv->eee_sw_timer_en = true;
+ stmmac_restart_sw_lpi_timer(priv);
+ }
+
+ mutex_unlock(&priv->lock);
+ netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
+
+ return 0;
+}
+
+static int stmmac_mac_wol_set(struct phylink_config *config, u32 wolopts,
+ const u8 *sopass)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ device_set_wakeup_enable(priv->device, !!wolopts);
+
+ mutex_lock(&priv->lock);
+ priv->wolopts = wolopts;
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
.mac_get_caps = stmmac_mac_get_caps,
.mac_select_pcs = stmmac_mac_select_pcs,
.mac_config = stmmac_mac_config,
+ .mac_finish = stmmac_mac_finish,
.mac_link_down = stmmac_mac_link_down,
.mac_link_up = stmmac_mac_link_up,
+ .mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
+ .mac_wol_set = stmmac_mac_wol_set,
};
/**
@@ -1116,18 +1166,26 @@ static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
*/
static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
{
- int interface = priv->plat->mac_interface;
-
- if (priv->dma_cap.pcs) {
- if ((interface == PHY_INTERFACE_MODE_RGMII) ||
- (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
- (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
- (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
- netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
- priv->hw->pcs = STMMAC_PCS_RGMII;
- } else if (interface == PHY_INTERFACE_MODE_SGMII) {
- netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
- priv->hw->pcs = STMMAC_PCS_SGMII;
+ int interface = priv->plat->phy_interface;
+ int speed = priv->plat->mac_port_sel_speed;
+
+ if (priv->dma_cap.pcs && interface == PHY_INTERFACE_MODE_SGMII) {
+ netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
+ priv->hw->pcs = STMMAC_PCS_SGMII;
+
+ switch (speed) {
+ case SPEED_10:
+ case SPEED_100:
+ case SPEED_1000:
+ priv->hw->reverse_sgmii_enable = true;
+ break;
+
+ default:
+ dev_warn(priv->device, "invalid port speed\n");
+ fallthrough;
+ case 0:
+ priv->hw->reverse_sgmii_enable = false;
+ break;
}
}
}
@@ -1143,13 +1201,19 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
static int stmmac_init_phy(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ int mode = priv->plat->phy_interface;
struct fwnode_handle *phy_fwnode;
struct fwnode_handle *fwnode;
+ struct ethtool_keee eee;
int ret;
if (!phylink_expects_phy(priv->phylink))
return 0;
+ if (priv->hw->xpcs &&
+ xpcs_get_an_mode(priv->hw->xpcs, mode) == DW_AN_C73)
+ return 0;
+
fwnode = priv->plat->port_node;
if (!fwnode)
fwnode = dev_fwnode(priv->device);
@@ -1183,52 +1247,110 @@ static int stmmac_init_phy(struct net_device *dev)
ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
}
- if (!priv->plat->pmt) {
- struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+ if (ret) {
+ netdev_err(priv->dev, "cannot attach to PHY (error: %pe)\n",
+ ERR_PTR(ret));
+ return ret;
+ }
- phylink_ethtool_get_wol(priv->phylink, &wol);
- device_set_wakeup_capable(priv->device, !!wol.supported);
- device_set_wakeup_enable(priv->device, !!wol.wolopts);
+ /* Configure phylib's copy of the LPI timer. Normally,
+ * phylink_config.lpi_timer_default would do this, but there is a
+ * chance that userspace could change the eee_timer setting via sysfs
+ * before the first open. Thus, preserve existing behaviour.
+ */
+ if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
+ eee.tx_lpi_timer = priv->tx_lpi_timer;
+ phylink_ethtool_set_eee(priv->phylink, &eee);
}
- return ret;
+ return 0;
}
-static int stmmac_phy_setup(struct stmmac_priv *priv)
+static int stmmac_phylink_setup(struct stmmac_priv *priv)
{
struct stmmac_mdio_bus_data *mdio_bus_data;
- int mode = priv->plat->phy_interface;
+ struct phylink_config *config;
struct fwnode_handle *fwnode;
+ struct phylink_pcs *pcs;
struct phylink *phylink;
- priv->phylink_config.dev = &priv->dev->dev;
- priv->phylink_config.type = PHYLINK_NETDEV;
- priv->phylink_config.mac_managed_pm = true;
+ config = &priv->phylink_config;
+
+ config->dev = &priv->dev->dev;
+ config->type = PHYLINK_NETDEV;
+ config->mac_managed_pm = true;
/* Stmmac always requires an RX clock for hardware initialization */
- priv->phylink_config.mac_requires_rxc = true;
+ config->mac_requires_rxc = true;
+
+ /* Disable EEE RX clock stop to ensure VLAN register access works
+ * correctly.
+ */
+ if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI) &&
+ !(priv->dev->features & NETIF_F_VLAN_FEATURES))
+ config->eee_rx_clk_stop_enable = true;
+
+ /* Set the default transmit clock stop bit based on the platform glue */
+ priv->tx_lpi_clk_stop = priv->plat->flags &
+ STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
mdio_bus_data = priv->plat->mdio_bus_data;
if (mdio_bus_data)
- priv->phylink_config.default_an_inband =
- mdio_bus_data->default_an_inband;
+ config->default_an_inband = mdio_bus_data->default_an_inband;
+
+ /* Get the PHY interface modes (at the PHY end of the link) that
+ * are supported by the platform.
+ */
+ if (priv->plat->get_interfaces)
+ priv->plat->get_interfaces(priv, priv->plat->bsp_priv,
+ config->supported_interfaces);
- /* Set the platform/firmware specified interface mode. Note, phylink
- * deals with the PHY interface mode, not the MAC interface mode.
+ /* Set the platform/firmware specified interface mode if the
+ * supported interfaces have not already been provided using
+ * phy_interface as a last resort.
*/
- __set_bit(mode, priv->phylink_config.supported_interfaces);
+ if (phy_interface_empty(config->supported_interfaces))
+ __set_bit(priv->plat->phy_interface,
+ config->supported_interfaces);
/* If we have an xpcs, it defines which PHY interfaces are supported. */
if (priv->hw->xpcs)
- xpcs_get_interfaces(priv->hw->xpcs,
- priv->phylink_config.supported_interfaces);
+ pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
+ else
+ pcs = priv->hw->phylink_pcs;
+
+ if (pcs)
+ phy_interface_or(config->supported_interfaces,
+ config->supported_interfaces,
+ pcs->supported_interfaces);
+
+ if (priv->dma_cap.eee) {
+ /* Assume all supported interfaces also support LPI */
+ memcpy(config->lpi_interfaces, config->supported_interfaces,
+ sizeof(config->lpi_interfaces));
+
+ /* All full duplex speeds above 100Mbps are supported */
+ config->lpi_capabilities = ~(MAC_1000FD - 1) | MAC_100FD;
+ config->lpi_timer_default = eee_timer * 1000;
+ config->eee_enabled_default = true;
+ }
+
+ config->wol_phy_speed_ctrl = true;
+ if (priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL) {
+ config->wol_phy_legacy = true;
+ } else {
+ if (priv->dma_cap.pmt_remote_wake_up)
+ config->wol_mac_support |= WAKE_UCAST;
+ if (priv->dma_cap.pmt_magic_frame)
+ config->wol_mac_support |= WAKE_MAGIC;
+ }
fwnode = priv->plat->port_node;
if (!fwnode)
fwnode = dev_fwnode(priv->device);
- phylink = phylink_create(&priv->phylink_config, fwnode,
- mode, &stmmac_phylink_mac_ops);
+ phylink = phylink_create(config, fwnode, priv->plat->phy_interface,
+ &stmmac_phylink_mac_ops);
if (IS_ERR(phylink))
return PTR_ERR(phylink);
@@ -1304,9 +1426,17 @@ static void stmmac_display_rings(struct stmmac_priv *priv,
stmmac_display_tx_rings(priv, dma_conf);
}
-static int stmmac_set_bfsize(int mtu, int bufsize)
+static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
+{
+ if (stmmac_xdp_is_enabled(priv))
+ return XDP_PACKET_HEADROOM;
+
+ return NET_SKB_PAD;
+}
+
+static int stmmac_set_bfsize(int mtu)
{
- int ret = bufsize;
+ int ret;
if (mtu >= BUF_SIZE_8KiB)
ret = BUF_SIZE_16KiB;
@@ -1435,7 +1565,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
buf->page_offset = stmmac_rx_offset(priv);
}
- if (priv->sph && !buf->sec_page) {
+ if (priv->sph_active && !buf->sec_page) {
buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->sec_page)
return -ENOMEM;
@@ -2000,22 +2130,31 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
struct stmmac_channel *ch = &priv->channel[queue];
bool xdp_prog = stmmac_xdp_is_enabled(priv);
struct page_pool_params pp_params = { 0 };
- unsigned int num_pages;
+ unsigned int dma_buf_sz_pad, num_pages;
unsigned int napi_id;
int ret;
+ dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
+
rx_q->queue_index = queue;
rx_q->priv_data = priv;
+ rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pp_params.pool_size = dma_conf->dma_rx_size;
- num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
- pp_params.order = ilog2(num_pages);
+ pp_params.order = order_base_2(num_pages);
pp_params.nid = dev_to_node(priv->device);
pp_params.dev = priv->device;
pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
pp_params.offset = stmmac_rx_offset(priv);
- pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
+ pp_params.max_len = dma_conf->dma_buf_sz;
+
+ if (priv->sph_active) {
+ pp_params.offset = 0;
+ pp_params.max_len += stmmac_rx_offset(priv);
+ }
rx_q->page_pool = page_pool_create(&pp_params);
if (IS_ERR(rx_q->page_pool)) {
@@ -2353,7 +2492,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
txfifosz = priv->dma_cap.tx_fifo_size;
/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
- if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
+ if (dwmac_is_xmac(priv->plat->core_type)) {
rxfifosz /= rx_channels_count;
txfifosz /= tx_channels_count;
}
@@ -2442,9 +2581,20 @@ static u64 stmmac_xsk_fill_timestamp(void *_priv)
return 0;
}
+static void stmmac_xsk_request_launch_time(u64 launch_time, void *_priv)
+{
+ struct timespec64 ts = ns_to_timespec64(launch_time);
+ struct stmmac_metadata_request *meta_req = _priv;
+
+ if (meta_req->tbs & STMMAC_TBS_EN)
+ stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec,
+ ts.tv_nsec);
+}
+
static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
.tmo_request_timestamp = stmmac_xsk_request_timestamp,
.tmo_fill_timestamp = stmmac_xsk_fill_timestamp,
+ .tmo_request_launch_time = stmmac_xsk_request_launch_time,
};
static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
@@ -2452,6 +2602,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
+ bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
struct xsk_buff_pool *pool = tx_q->xsk_pool;
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc = NULL;
@@ -2464,7 +2615,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
budget = min(budget, stmmac_tx_avail(priv, queue));
- while (budget-- > 0) {
+ for (; budget > 0; budget--) {
struct stmmac_metadata_request meta_req;
struct xsk_tx_metadata *meta = NULL;
dma_addr_t dma_addr;
@@ -2528,6 +2679,8 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
meta_req.priv = priv;
meta_req.tx_desc = tx_desc;
meta_req.set_ic = &set_ic;
+ meta_req.tbs = tx_q->tbs;
+ meta_req.edesc = &tx_q->dma_entx[entry];
xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
&meta_req);
if (set_ic) {
@@ -2537,7 +2690,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
}
stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
- true, priv->mode, true, true,
+ csum, priv->mode, true, true,
xdp_desc.len);
stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
@@ -2754,11 +2907,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
xmits = budget;
}
- if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
- priv->eee_sw_timer_en) {
- if (stmmac_enable_eee_mode(priv))
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
- }
+ if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
+ stmmac_restart_sw_lpi_timer(priv);
/* We still have pending packets, let's call for a new scheduling */
if (tx_q->dirty_tx != tx_q->cur_tx)
@@ -2974,6 +3124,56 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
}
}
+int stmmac_get_phy_intf_sel(phy_interface_t interface)
+{
+ int phy_intf_sel = -EINVAL;
+
+ if (interface == PHY_INTERFACE_MODE_MII ||
+ interface == PHY_INTERFACE_MODE_GMII)
+ phy_intf_sel = PHY_INTF_SEL_GMII_MII;
+ else if (phy_interface_mode_is_rgmii(interface))
+ phy_intf_sel = PHY_INTF_SEL_RGMII;
+ else if (interface == PHY_INTERFACE_MODE_SGMII)
+ phy_intf_sel = PHY_INTF_SEL_SGMII;
+ else if (interface == PHY_INTERFACE_MODE_RMII)
+ phy_intf_sel = PHY_INTF_SEL_RMII;
+ else if (interface == PHY_INTERFACE_MODE_REVMII)
+ phy_intf_sel = PHY_INTF_SEL_REVMII;
+
+ return phy_intf_sel;
+}
+EXPORT_SYMBOL_GPL(stmmac_get_phy_intf_sel);
+
+static int stmmac_prereset_configure(struct stmmac_priv *priv)
+{
+ struct plat_stmmacenet_data *plat_dat = priv->plat;
+ phy_interface_t interface;
+ int phy_intf_sel, ret;
+
+ if (!plat_dat->set_phy_intf_sel)
+ return 0;
+
+ interface = plat_dat->phy_interface;
+ phy_intf_sel = stmmac_get_phy_intf_sel(interface);
+ if (phy_intf_sel < 0) {
+ netdev_err(priv->dev,
+ "failed to get phy_intf_sel for %s: %pe\n",
+ phy_modes(interface), ERR_PTR(phy_intf_sel));
+ return phy_intf_sel;
+ }
+
+ ret = plat_dat->set_phy_intf_sel(plat_dat->bsp_priv, phy_intf_sel);
+ if (ret == -EINVAL)
+ netdev_err(priv->dev, "platform does not support %s\n",
+ phy_modes(interface));
+ else if (ret < 0)
+ netdev_err(priv->dev,
+ "platform failed to set interface %s: %pe\n",
+ phy_modes(interface), ERR_PTR(ret));
+
+ return ret;
+}
+
/**
* stmmac_init_dma_engine - DMA init.
* @priv: driver private structure
@@ -2993,16 +3193,20 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
int ret = 0;
if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
- dev_err(priv->device, "Invalid DMA configuration\n");
+ netdev_err(priv->dev, "Invalid DMA configuration\n");
return -EINVAL;
}
if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
priv->plat->dma_cfg->atds = 1;
- ret = stmmac_reset(priv, priv->ioaddr);
+ ret = stmmac_prereset_configure(priv);
+ if (ret)
+ return ret;
+
+ ret = stmmac_reset(priv);
if (ret) {
- dev_err(priv->device, "Failed to reset the dma\n");
+ netdev_err(priv->dev, "Failed to reset the dma\n");
return ret;
}
@@ -3120,8 +3324,7 @@ static void stmmac_init_coalesce(struct stmmac_priv *priv)
priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
- hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- tx_q->txtimer.function = stmmac_tx_timer;
+ hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
for (chan = 0; chan < rx_channel_count; chan++)
@@ -3346,7 +3549,6 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
/**
* stmmac_hw_setup - setup mac in a usable state.
* @dev : pointer to the device structure.
- * @ptp_register: register PTP if set
* Description:
* this is the main function to setup the HW in a usable state because the
* dma engine is reset, the core registers are configured (e.g. AXI,
@@ -3356,7 +3558,7 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
-static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
+static int stmmac_hw_setup(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_cnt = priv->plat->rx_queues_to_use;
@@ -3369,9 +3571,18 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
if (priv->hw->phylink_pcs)
phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
+ /* Note that clk_rx_i must be running for reset to complete. This
+ * clock may also be required when setting the MAC address.
+ *
+ * Block the receive clock stop for LPI mode at the PHY in case
+ * the link is established with EEE mode active.
+ */
+ phylink_rx_clk_stop_block(priv->phylink);
+
/* DMA initialization and SW reset */
ret = stmmac_init_dma_engine(priv);
if (ret < 0) {
+ phylink_rx_clk_stop_unblock(priv->phylink);
netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
__func__);
return ret;
@@ -3379,19 +3590,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
/* Copy the MAC addr into the HW */
stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
-
- /* PS and related bits will be programmed according to the speed */
- if (priv->hw->pcs) {
- int speed = priv->plat->mac_port_sel_speed;
-
- if ((speed == SPEED_10) || (speed == SPEED_100) ||
- (speed == SPEED_1000)) {
- priv->hw->ps = speed;
- } else {
- dev_warn(priv->device, "invalid port speed\n");
- priv->hw->ps = 0;
- }
- }
+ phylink_rx_clk_stop_unblock(priv->phylink);
/* Initialize the MAC Core */
stmmac_core_init(priv, priv->hw, dev);
@@ -3417,28 +3616,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
stmmac_mmc_setup(priv);
- if (ptp_register) {
- ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
- if (ret < 0)
- netdev_warn(priv->dev,
- "failed to enable PTP reference clock: %pe\n",
- ERR_PTR(ret));
- }
-
- ret = stmmac_init_ptp(priv);
- if (ret == -EOPNOTSUPP)
- netdev_info(priv->dev, "PTP not supported by HW\n");
- else if (ret)
- netdev_warn(priv->dev, "PTP init failed\n");
- else if (ptp_register)
- stmmac_ptp_register(priv);
-
- priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
-
- /* Convert the timer from msec to usec */
- if (!priv->tx_lpi_timer)
- priv->tx_lpi_timer = eee_timer * 1000;
-
if (priv->use_riwt) {
u32 queue;
@@ -3451,9 +3628,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
}
}
- if (priv->hw->pcs)
- stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
-
/* set TX and RX rings length */
stmmac_set_rings_length(priv);
@@ -3471,7 +3645,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
}
/* Enable Split Header */
- sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
for (chan = 0; chan < rx_cnt; chan++)
stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
@@ -3495,18 +3669,13 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
/* Start the ball rolling... */
stmmac_start_all_dma(priv);
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_hw_vlan_mode(priv, priv->hw);
+ phylink_rx_clk_stop_unblock(priv->phylink);
return 0;
}
-static void stmmac_hw_teardown(struct net_device *dev)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
-
- clk_disable_unprepare(priv->plat->clk_ptp_ref);
-}
-
static void stmmac_free_irq(struct net_device *dev,
enum request_irq_err irq_err, int irq_idx)
{
@@ -3567,7 +3736,6 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
enum request_irq_err irq_err;
- cpumask_t cpu_mask;
int irq_idx = 0;
char *int_name;
int ret;
@@ -3589,7 +3757,6 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
/* Request the Wake IRQ in case of another line
* is used for WoL
*/
- priv->wol_irq_disabled = true;
if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
int_name = priv->int_name_wol;
sprintf(int_name, "%s:%s", dev->name, "wol");
@@ -3696,9 +3863,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
irq_idx = i;
goto irq_error;
}
- cpumask_clear(&cpu_mask);
- cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
- irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
+ irq_set_affinity_hint(priv->rx_irq[i],
+ cpumask_of(i % num_online_cpus()));
}
/* Request Tx MSI irq */
@@ -3721,9 +3887,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
irq_idx = i;
goto irq_error;
}
- cpumask_clear(&cpu_mask);
- cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
- irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
+ irq_set_affinity_hint(priv->tx_irq[i],
+ cpumask_of(i % num_online_cpus()));
}
return 0;
@@ -3835,12 +4000,13 @@ stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
return ERR_PTR(-ENOMEM);
}
+ /* Returns 0 or BUF_SIZE_16KiB if mtu > 8KiB and dwmac4 or ring mode */
bfsize = stmmac_set_16kib_bfsize(priv, mtu);
if (bfsize < 0)
bfsize = 0;
if (bfsize < BUF_SIZE_16KiB)
- bfsize = stmmac_set_bfsize(mtu, 0);
+ bfsize = stmmac_set_bfsize(mtu);
dma_conf->dma_buf_sz = bfsize;
/* Chose the tx/rx size from the already defined one in the
@@ -3900,28 +4066,9 @@ static int __stmmac_open(struct net_device *dev,
struct stmmac_dma_conf *dma_conf)
{
struct stmmac_priv *priv = netdev_priv(dev);
- int mode = priv->plat->phy_interface;
u32 chan;
int ret;
- ret = pm_runtime_resume_and_get(priv->device);
- if (ret < 0)
- return ret;
-
- if ((!priv->hw->xpcs ||
- xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
- ret = stmmac_init_phy(dev);
- if (ret) {
- netdev_err(priv->dev,
- "%s: Cannot attach to PHY (error: %d)\n",
- __func__, ret);
- goto init_phy_error;
- }
- }
-
- priv->rx_copybreak = STMMAC_RX_COPYBREAK;
-
- buf_sz = dma_conf->dma_buf_sz;
for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
@@ -3939,17 +4086,17 @@ static int __stmmac_open(struct net_device *dev,
}
}
- ret = stmmac_hw_setup(dev, true);
+ ret = stmmac_hw_setup(dev);
if (ret < 0) {
netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
goto init_error;
}
+ stmmac_setup_ptp(priv);
+
stmmac_init_coalesce(priv);
phylink_start(priv->phylink);
- /* We may have called phylink_speed_down before */
- phylink_speed_up(priv->phylink);
ret = stmmac_request_irq(dev);
if (ret)
@@ -3967,11 +4114,8 @@ irq_error:
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
- stmmac_hw_teardown(dev);
+ stmmac_release_ptp(priv);
init_error:
- phylink_disconnect_phy(priv->phylink);
-init_phy_error:
- pm_runtime_put(priv->device);
return ret;
}
@@ -3981,34 +4125,50 @@ static int stmmac_open(struct net_device *dev)
struct stmmac_dma_conf *dma_conf;
int ret;
+ /* Initialise the tx lpi timer, converting from msec to usec */
+ if (!priv->tx_lpi_timer)
+ priv->tx_lpi_timer = eee_timer * 1000;
+
dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
if (IS_ERR(dma_conf))
return PTR_ERR(dma_conf);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
+ goto err_dma_resources;
+
+ ret = stmmac_init_phy(dev);
+ if (ret)
+ goto err_runtime_pm;
+
ret = __stmmac_open(dev, dma_conf);
if (ret)
- free_dma_desc_resources(priv, dma_conf);
+ goto err_disconnect_phy;
kfree(dma_conf);
+
+ /* We may have called phylink_speed_down before */
+ phylink_speed_up(priv->phylink);
+
+ return ret;
+
+err_disconnect_phy:
+ phylink_disconnect_phy(priv->phylink);
+err_runtime_pm:
+ pm_runtime_put(priv->device);
+err_dma_resources:
+ free_dma_desc_resources(priv, dma_conf);
+ kfree(dma_conf);
return ret;
}
-/**
- * stmmac_release - close entry point of the driver
- * @dev : device pointer.
- * Description:
- * This is the stop entry point of the driver.
- */
-static int stmmac_release(struct net_device *dev)
+static void __stmmac_release(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 chan;
- if (device_may_wakeup(priv->device))
- phylink_speed_down(priv->phylink, false);
/* Stop and disconnect the PHY */
phylink_stop(priv->phylink);
- phylink_disconnect_phy(priv->phylink);
stmmac_disable_all_queues(priv);
@@ -4020,20 +4180,12 @@ static int stmmac_release(struct net_device *dev)
/* Free the IRQ lines */
stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
- if (priv->eee_enabled) {
- priv->tx_path_in_lpi_mode = false;
- del_timer_sync(&priv->eee_ctrl_timer);
- }
-
/* Stop TX/RX DMA and clear the descriptors */
stmmac_stop_all_dma(priv);
/* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv, &priv->dma_conf);
- /* Disable the MAC Rx/Tx */
- stmmac_mac_set(priv, priv->ioaddr, false);
-
/* Powerdown Serdes if there is */
if (priv->plat->serdes_powerdown)
priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
@@ -4041,8 +4193,29 @@ static int stmmac_release(struct net_device *dev)
stmmac_release_ptp(priv);
if (stmmac_fpe_supported(priv))
- timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
+ ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
+}
+/**
+ * stmmac_release - close entry point of the driver
+ * @dev : device pointer.
+ * Description:
+ * This is the stop entry point of the driver.
+ */
+static int stmmac_release(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ /* If the PHY or MAC has WoL enabled, then the PHY will not be
+ * suspended when phylink_stop() is called below. Set the PHY
+ * to its slowest speed to save power.
+ */
+ if (device_may_wakeup(priv->device))
+ phylink_speed_down(priv->phylink, false);
+
+ __stmmac_release(dev);
+
+ phylink_disconnect_phy(priv->phylink);
pm_runtime_put(priv->device);
return 0;
@@ -4051,18 +4224,11 @@ static int stmmac_release(struct net_device *dev)
static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
struct stmmac_tx_queue *tx_q)
{
- u16 tag = 0x0, inner_tag = 0x0;
- u32 inner_type = 0x0;
struct dma_desc *p;
+ u16 tag = 0x0;
- if (!priv->dma_cap.vlins)
+ if (!priv->dma_cap.vlins || !skb_vlan_tag_present(skb))
return false;
- if (!skb_vlan_tag_present(skb))
- return false;
- if (skb->vlan_proto == htons(ETH_P_8021AD)) {
- inner_tag = skb_vlan_tag_get(skb);
- inner_type = STMMAC_VLAN_INSERT;
- }
tag = skb_vlan_tag_get(skb);
@@ -4071,7 +4237,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
else
p = &tx_q->dma_tx[tx_q->cur_tx];
- if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
+ if (stmmac_set_desc_vlan_tag(priv, p, tag, 0x0, 0x0))
return false;
stmmac_set_tx_owner(priv, p);
@@ -4113,11 +4279,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
desc = &tx_q->dma_tx[tx_q->cur_tx];
curr_addr = des + (total_len - tmp_len);
- if (priv->dma_cap.addr64 <= 32)
- desc->des0 = cpu_to_le32(curr_addr);
- else
- stmmac_set_desc_addr(priv, desc, curr_addr);
-
+ stmmac_set_desc_addr(priv, desc, curr_addr);
buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
TSO_MAX_BUFF_SIZE : tmp_len;
@@ -4163,17 +4325,27 @@ static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
* First Descriptor
* --------
* | DES0 |---> buffer1 = L2/L3/L4 header
- * | DES1 |---> TCP Payload (can continue on next descr...)
- * | DES2 |---> buffer 1 and 2 len
+ * | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
+ * | | width is 32-bit, but we never use it.
+ * | | Also can be used as the most-significant 8-bits or 16-bits of
+ * | | buffer1 address pointer if the DMA AXI address width is 40-bit
+ * | | or 48-bit, and we always use it.
+ * | DES2 |---> buffer1 len
* | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
* --------
+ * --------
+ * | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
+ * | DES1 |---> same as the First Descriptor
+ * | DES2 |---> buffer1 len
+ * | DES3 |
+ * --------
* |
* ...
* |
* --------
- * | DES0 | --| Split TCP Payload on Buffers 1 and 2
- * | DES1 | --|
- * | DES2 | --> buffer 1 and 2 len
+ * | DES0 |---> buffer1 = Split TCP Payload
+ * | DES1 |---> same as the First Descriptor
+ * | DES2 |---> buffer1 len
* | DES3 |
* --------
*
@@ -4183,15 +4355,14 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dma_desc *desc, *first, *mss_desc = NULL;
struct stmmac_priv *priv = netdev_priv(dev);
- int tmp_pay_len = 0, first_tx, nfrags;
unsigned int first_entry, tx_packets;
struct stmmac_txq_stats *txq_stats;
struct stmmac_tx_queue *tx_q;
u32 pay_len, mss, queue;
+ int i, first_tx, nfrags;
u8 proto_hdr_len, hdr;
dma_addr_t des;
bool set_ic;
- int i;
/* Always insert VLAN tag to SKB payload for TSO frames.
*
@@ -4276,23 +4447,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
if (dma_mapping_error(priv->device, des))
goto dma_map_err;
- if (priv->dma_cap.addr64 <= 32) {
- first->des0 = cpu_to_le32(des);
-
- /* Fill start of payload in buff2 of first descriptor */
- if (pay_len)
- first->des1 = cpu_to_le32(des + proto_hdr_len);
-
- /* If needed take extra descriptors to fill the remaining payload */
- tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
- } else {
- stmmac_set_desc_addr(priv, first, des);
- tmp_pay_len = pay_len;
- des += proto_hdr_len;
- pay_len = 0;
- }
-
- stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
+ stmmac_set_desc_addr(priv, first, des);
+ stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
+ (nfrags == 0), queue);
/* In case two or more DMA transmit descriptors are allocated for this
* non-paged SKB data, the DMA buffer address should be saved to
@@ -4386,8 +4543,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
- skb_tx_timestamp(skb);
-
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
priv->hwts_tx_en)) {
/* declare that device is doing timestamping */
@@ -4396,11 +4551,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Complete the first descriptor before granting the DMA */
- stmmac_prepare_tso_tx_desc(priv, first, 1,
- proto_hdr_len,
- pay_len,
- 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
- hdr / 4, (skb->len - proto_hdr_len));
+ stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
+ tx_q->tx_skbuff_dma[first_entry].last_segment,
+ hdr / 4, (skb->len - proto_hdr_len));
/* If context desc is used to change MSS */
if (mss_desc) {
@@ -4422,6 +4575,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
}
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
+ skb_tx_timestamp(skb);
stmmac_flush_tx_descriptors(priv, queue);
stmmac_tx_timer_arm(priv, queue);
@@ -4467,41 +4621,48 @@ static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
*/
static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
{
- unsigned int first_entry, tx_packets, enh_desc;
+ bool enh_desc, has_vlan, set_ic, is_jumbo = false;
struct stmmac_priv *priv = netdev_priv(dev);
unsigned int nopaged_len = skb_headlen(skb);
- int i, csum_insertion = 0, is_jumbo = 0;
u32 queue = skb_get_queue_mapping(skb);
int nfrags = skb_shinfo(skb)->nr_frags;
+ unsigned int first_entry, tx_packets;
int gso = skb_shinfo(skb)->gso_type;
struct stmmac_txq_stats *txq_stats;
struct dma_edesc *tbs_desc = NULL;
struct dma_desc *desc, *first;
struct stmmac_tx_queue *tx_q;
- bool has_vlan, set_ic;
+ int i, csum_insertion = 0;
int entry, first_tx;
dma_addr_t des;
+ u32 sdu_len;
tx_q = &priv->dma_conf.tx_queue[queue];
txq_stats = &priv->xstats.txq_stats[queue];
first_tx = tx_q->cur_tx;
if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
- stmmac_disable_eee_mode(priv);
+ stmmac_stop_sw_lpi(priv);
/* Manage oversized TCP frames for GMAC4 device */
if (skb_is_gso(skb) && priv->tso) {
if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
return stmmac_tso_xmit(skb, dev);
- if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
+ if (priv->plat->core_type == DWMAC_CORE_GMAC4 &&
+ (gso & SKB_GSO_UDP_L4))
return stmmac_tso_xmit(skb, dev);
}
if (priv->est && priv->est->enable &&
- priv->est->max_sdu[queue] &&
- skb->len > priv->est->max_sdu[queue]){
- priv->xstats.max_sdu_txq_drop[queue]++;
- goto max_sdu_err;
+ priv->est->max_sdu[queue]) {
+ sdu_len = skb->len;
+ /* Add VLAN tag length if VLAN tag insertion offload is requested */
+ if (priv->dma_cap.vlins && skb_vlan_tag_present(skb))
+ sdu_len += VLAN_HLEN;
+ if (sdu_len > priv->est->max_sdu[queue]) {
+ priv->xstats.max_sdu_txq_drop[queue]++;
+ goto max_sdu_err;
+ }
}
if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
@@ -4665,8 +4826,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
- skb_tx_timestamp(skb);
-
/* Ready to fill the first descriptor and set the OWN bit w/o any
* problems because all the descriptors are actually ready to be
* passed to the DMA engine.
@@ -4713,7 +4872,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
-
+ skb_tx_timestamp(skb);
stmmac_flush_tx_descriptors(priv, queue);
stmmac_tx_timer_arm(priv, queue);
@@ -4778,7 +4937,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
break;
}
- if (priv->sph && !buf->sec_page) {
+ if (priv->sph_active && !buf->sec_page) {
buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->sec_page)
break;
@@ -4789,7 +4948,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
stmmac_set_desc_addr(priv, p, buf->addr);
- if (priv->sph)
+ if (priv->sph_active)
stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
else
stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
@@ -4814,6 +4973,8 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
rx_q->rx_tail_addr = rx_q->dma_rx_phy +
(rx_q->dirty_rx * sizeof(struct dma_desc));
stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
+ /* Wake up Rx DMA from the suspend state if required */
+ stmmac_enable_dma_reception(priv, priv->ioaddr, queue);
}
static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
@@ -4824,12 +4985,12 @@ static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
int coe = priv->hw->rx_csum;
/* Not first descriptor, buffer is always zero */
- if (priv->sph && len)
+ if (priv->sph_active && len)
return 0;
/* First descriptor, get split header length */
stmmac_get_rx_header_len(priv, p, &hlen);
- if (priv->sph && hlen) {
+ if (priv->sph_active && hlen) {
priv->xstats.rx_split_hdr_pkt_n++;
return hlen;
}
@@ -4852,7 +5013,7 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
unsigned int plen = 0;
/* Not split header, buffer is not available */
- if (!priv->sph)
+ if (!priv->sph_active)
return 0;
/* Not last descriptor */
@@ -4870,6 +5031,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
{
struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc;
dma_addr_t dma_addr;
@@ -4921,7 +5083,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
stmmac_set_desc_addr(priv, tx_desc, dma_addr);
stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
- true, priv->mode, true, true,
+ csum, priv->mode, true, true,
xdpf->len);
tx_q->tx_count_frames++;
@@ -5234,10 +5396,10 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
len = 0;
}
+read_again:
if (count >= limit)
break;
-read_again:
buf1_len = 0;
entry = next_entry;
buf = &rx_q->buf_pool[entry];
@@ -5394,10 +5556,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
struct sk_buff *skb = NULL;
struct stmmac_xdp_buff ctx;
int xdp_status = 0;
- int buf_sz;
+ int bufsz;
dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
- buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
+ bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
if (netif_msg_rx_status(priv)) {
@@ -5468,7 +5630,7 @@ read_again:
if (priv->extend_desc)
stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
if (unlikely(status == discard_frame)) {
- page_pool_recycle_direct(rx_q->page_pool, buf->page);
+ page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
buf->page = NULL;
error = 1;
if (!priv->hwts_rx_en)
@@ -5486,10 +5648,6 @@ read_again:
/* Buffer is good. Go on. */
- prefetch(page_address(buf->page) + buf->page_offset);
- if (buf->sec_page)
- prefetch(page_address(buf->sec_page));
-
buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
len += buf1_len;
buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
@@ -5511,8 +5669,10 @@ read_again:
dma_sync_single_for_cpu(priv->device, buf->addr,
buf1_len, dma_dir);
+ net_prefetch(page_address(buf->page) +
+ buf->page_offset);
- xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
+ xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq);
xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
buf->page_offset, buf1_len, true);
@@ -5564,22 +5724,26 @@ read_again:
}
if (!skb) {
+ unsigned int head_pad_len;
+
/* XDP program may expand or reduce tail */
buf1_len = ctx.xdp.data_end - ctx.xdp.data;
- skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
+ skb = napi_build_skb(page_address(buf->page),
+ rx_q->napi_skb_frag_size);
if (!skb) {
+ page_pool_recycle_direct(rx_q->page_pool,
+ buf->page);
rx_dropped++;
count++;
goto drain_data;
}
/* XDP program may adjust header */
- skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
+ head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
+ skb_reserve(skb, head_pad_len);
skb_put(skb, buf1_len);
-
- /* Data payload copied into SKB, page ready for recycle */
- page_pool_recycle_direct(rx_q->page_pool, buf->page);
+ skb_mark_for_recycle(skb);
buf->page = NULL;
} else if (buf1_len) {
dma_sync_single_for_cpu(priv->device, buf->addr,
@@ -5587,9 +5751,6 @@ read_again:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->page, buf->page_offset, buf1_len,
priv->dma_conf.dma_buf_sz);
-
- /* Data payload appended into SKB */
- skb_mark_for_recycle(skb);
buf->page = NULL;
}
@@ -5599,9 +5760,6 @@ read_again:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->sec_page, 0, buf2_len,
priv->dma_conf.dma_buf_sz);
-
- /* Data payload appended into SKB */
- skb_mark_for_recycle(skb);
buf->sec_page = NULL;
}
@@ -5624,7 +5782,8 @@ drain_data:
skb->protocol = eth_type_trans(skb, priv->dev);
- if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
+ if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb) ||
+ (status & csum_none))
skb_checksum_none_assert(skb);
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -5800,6 +5959,9 @@ static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
* whenever multicast addresses must be enabled/disabled.
* Return value:
* void.
+ *
+ * FIXME: This may need RXC to be running, but it may be called with BH
+ * disabled, which means we can't call phylink_rx_clk_stop*().
*/
static void stmmac_set_rx_mode(struct net_device *dev)
{
@@ -5853,7 +6015,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
return PTR_ERR(dma_conf);
}
- stmmac_release(dev);
+ __stmmac_release(dev);
ret = __stmmac_open(dev, dma_conf);
if (ret) {
@@ -5919,8 +6081,8 @@ static int stmmac_set_features(struct net_device *netdev,
*/
stmmac_rx_ipc(priv, priv->hw);
- if (priv->sph_cap) {
- bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ if (priv->sph_capable) {
+ bool sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
u32 chan;
for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
@@ -5932,7 +6094,9 @@ static int stmmac_set_features(struct net_device *netdev,
else
priv->hw->hw_vlan_en = false;
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_hw_vlan_mode(priv, priv->hw);
+ phylink_rx_clk_stop_unblock(priv->phylink);
return 0;
}
@@ -5945,7 +6109,7 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv)
u32 queue;
bool xmac;
- xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+ xmac = dwmac_is_xmac(priv->plat->core_type);
queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
if (priv->irq_wake)
@@ -5959,7 +6123,7 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv)
stmmac_fpe_irq_status(priv);
/* To handle GMAC own interrupts */
- if ((priv->plat->has_gmac) || xmac) {
+ if (priv->plat->core_type == DWMAC_CORE_GMAC || xmac) {
int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
if (unlikely(status)) {
@@ -5973,15 +6137,6 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv)
for (queue = 0; queue < queues_count; queue++)
stmmac_host_mtl_irq_status(priv, priv->hw, queue);
- /* PCS link status */
- if (priv->hw->pcs &&
- !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
- if (priv->xstats.pcs_link)
- netif_carrier_on(priv->dev);
- else
- netif_carrier_off(priv->dev);
- }
-
stmmac_timestamp_interrupt(priv, priv);
}
}
@@ -6118,12 +6273,6 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
case SIOCSMIIREG:
ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
break;
- case SIOCSHWTSTAMP:
- ret = stmmac_hwtstamp_set(dev, rq);
- break;
- case SIOCGHWTSTAMP:
- ret = stmmac_hwtstamp_get(dev, rq);
- break;
default:
break;
}
@@ -6216,7 +6365,9 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
if (ret)
goto set_mac_error;
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
+ phylink_rx_clk_stop_unblock(priv->phylink);
set_mac_error:
pm_runtime_put(priv->device);
@@ -6333,7 +6484,7 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
(priv->dma_cap.mbps_1000) ? "Y" : "N");
seq_printf(seq, "\tHalf duplex: %s\n",
(priv->dma_cap.half_duplex) ? "Y" : "N");
- if (priv->plat->has_xgmac) {
+ if (priv->plat->core_type == DWMAC_CORE_XGMAC) {
seq_printf(seq,
"\tNumber of Additional MAC address registers: %d\n",
priv->dma_cap.multi_addr);
@@ -6357,7 +6508,7 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
(priv->dma_cap.time_stamp) ? "Y" : "N");
seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
(priv->dma_cap.atime_stamp) ? "Y" : "N");
- if (priv->plat->has_xgmac)
+ if (priv->plat->core_type == DWMAC_CORE_XGMAC)
seq_printf(seq, "\tTimestamp System Time Source: %s\n",
dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
@@ -6366,7 +6517,7 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
seq_printf(seq, "\tChecksum Offload in TX: %s\n",
(priv->dma_cap.tx_coe) ? "Y" : "N");
if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
- priv->plat->has_xgmac) {
+ priv->plat->core_type == DWMAC_CORE_XGMAC) {
seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
(priv->dma_cap.rx_coe) ? "Y" : "N");
} else {
@@ -6484,11 +6635,7 @@ static int stmmac_device_event(struct notifier_block *unused,
switch (event) {
case NETDEV_CHANGENAME:
- if (priv->dbgfs_dir)
- priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
- priv->dbgfs_dir,
- stmmac_fs_dir,
- dev->name);
+ debugfs_change_name(priv->dbgfs_dir, "%s", dev->name);
break;
}
done:
@@ -6576,6 +6723,9 @@ static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
}
+/* FIXME: This may need RXC to be running, but it may be called with BH
+ * disabled, which means we can't call phylink_rx_clk_stop*().
+ */
static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -6607,6 +6757,9 @@ err_pm_put:
return ret;
}
+/* FIXME: This may need RXC to be running, but it may be called with BH
+ * disabled, which means we can't call phylink_rx_clk_stop*().
+ */
static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -6878,7 +7031,7 @@ int stmmac_xdp_open(struct net_device *dev)
}
/* Adjust Split header */
- sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
/* DMA RX Channel Configuration */
for (chan = 0; chan < rx_cnt; chan++) {
@@ -6918,8 +7071,7 @@ int stmmac_xdp_open(struct net_device *dev)
stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
tx_q->tx_tail_addr, chan);
- hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- tx_q->txtimer.function = stmmac_tx_timer;
+ hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
/* Enable the MAC Rx/Tx */
@@ -6944,7 +7096,6 @@ irq_error:
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
- stmmac_hw_teardown(dev);
init_error:
free_dma_desc_resources(priv, &priv->dma_conf);
dma_desc_error:
@@ -7059,6 +7210,8 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_bpf = stmmac_bpf,
.ndo_xdp_xmit = stmmac_xdp_xmit,
.ndo_xsk_wakeup = stmmac_xsk_wakeup,
+ .ndo_hwtstamp_get = stmmac_hwtstamp_get,
+ .ndo_hwtstamp_set = stmmac_hwtstamp_set,
};
static void stmmac_reset_subtask(struct stmmac_priv *priv)
@@ -7127,7 +7280,6 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
priv->plat->enh_desc = priv->dma_cap.enh_desc;
priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
- priv->hw->pmt = priv->plat->pmt;
if (priv->dma_cap.hash_tb_sz) {
priv->hw->multicast_filter_bins =
(BIT(priv->dma_cap.hash_tb_sz) << 5);
@@ -7165,11 +7317,42 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
if (priv->plat->pmt) {
dev_info(priv->device, "Wake-Up On Lan supported\n");
device_set_wakeup_capable(priv->device, 1);
+ devm_pm_set_wake_irq(priv->device, priv->wol_irq);
}
if (priv->dma_cap.tsoen)
dev_info(priv->device, "TSO supported\n");
+ if (priv->dma_cap.number_rx_queues &&
+ priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
+ dev_warn(priv->device,
+ "Number of Rx queues (%u) exceeds dma capability\n",
+ priv->plat->rx_queues_to_use);
+ priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
+ }
+ if (priv->dma_cap.number_tx_queues &&
+ priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
+ dev_warn(priv->device,
+ "Number of Tx queues (%u) exceeds dma capability\n",
+ priv->plat->tx_queues_to_use);
+ priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
+ }
+
+ if (priv->dma_cap.rx_fifo_size &&
+ priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
+ dev_warn(priv->device,
+ "Rx FIFO size (%u) exceeds dma capability\n",
+ priv->plat->rx_fifo_size);
+ priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
+ }
+ if (priv->dma_cap.tx_fifo_size &&
+ priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
+ dev_warn(priv->device,
+ "Tx FIFO size (%u) exceeds dma capability\n",
+ priv->plat->tx_fifo_size);
+ priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
+ }
+
priv->hw->vlan_fail_q_en =
(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
@@ -7186,13 +7369,21 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
* has to be disable and this can be done by passing the
* riwt_off field from the platform.
*/
- if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
- (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
+ if ((priv->synopsys_id >= DWMAC_CORE_3_50 ||
+ priv->plat->core_type == DWMAC_CORE_XGMAC) &&
+ !priv->plat->riwt_off) {
priv->use_riwt = 1;
dev_info(priv->device,
"Enable RX Mitigation via HW Watchdog Timer\n");
}
+ /* Unimplemented PCS init (as indicated by stmmac_do_callback()
+ * perversely returning -EINVAL) is non-fatal.
+ */
+ ret = stmmac_mac_pcs_init(priv);
+ if (ret != -EINVAL)
+ return ret;
+
return 0;
}
@@ -7301,7 +7492,7 @@ static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
return -ENODATA;
/* For GMAC4, the valid timestamp is from CTX next desc. */
- if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
+ if (dwmac_is_xmac(priv->plat->core_type))
desc_contains_ts = ndesc;
/* Check if timestamp is available */
@@ -7319,19 +7510,133 @@ static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
.xmo_rx_timestamp = stmmac_xdp_rx_timestamp,
};
-/**
- * stmmac_dvr_probe
- * @device: device pointer
- * @plat_dat: platform data pointer
- * @res: stmmac resource pointer
- * Description: this is the main probe function used to
- * call the alloc_etherdev, allocate the priv structure.
- * Return:
- * returns 0 on success, otherwise errno.
- */
-int stmmac_dvr_probe(struct device *device,
- struct plat_stmmacenet_data *plat_dat,
- struct stmmac_resources *res)
+static int stmmac_dl_ts_coarse_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct stmmac_devlink_priv *dl_priv = devlink_priv(dl);
+ struct stmmac_priv *priv = dl_priv->stmmac_priv;
+
+ priv->tsfupdt_coarse = ctx->val.vbool;
+
+ if (priv->tsfupdt_coarse)
+ priv->systime_flags &= ~PTP_TCR_TSCFUPDT;
+ else
+ priv->systime_flags |= PTP_TCR_TSCFUPDT;
+
+ /* In Coarse mode, we can use a smaller subsecond increment, let's
+ * reconfigure the systime, subsecond increment and addend.
+ */
+ stmmac_update_subsecond_increment(priv);
+
+ return 0;
+}
+
+static int stmmac_dl_ts_coarse_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct stmmac_devlink_priv *dl_priv = devlink_priv(dl);
+ struct stmmac_priv *priv = dl_priv->stmmac_priv;
+
+ ctx->val.vbool = priv->tsfupdt_coarse;
+
+ return 0;
+}
+
+static const struct devlink_param stmmac_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(STMMAC_DEVLINK_PARAM_ID_TS_COARSE, "phc_coarse_adj",
+ DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ stmmac_dl_ts_coarse_get,
+ stmmac_dl_ts_coarse_set, NULL),
+};
+
+/* None of the generic devlink parameters are implemented */
+static const struct devlink_ops stmmac_devlink_ops = {};
+
+static int stmmac_register_devlink(struct stmmac_priv *priv)
+{
+ struct stmmac_devlink_priv *dl_priv;
+ int ret;
+
+ /* For now, what is exposed over devlink is only relevant when
+ * timestamping is available and we have a valid ptp clock rate
+ */
+ if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp) ||
+ !priv->plat->clk_ptp_rate)
+ return 0;
+
+ priv->devlink = devlink_alloc(&stmmac_devlink_ops, sizeof(*dl_priv),
+ priv->device);
+ if (!priv->devlink)
+ return -ENOMEM;
+
+ dl_priv = devlink_priv(priv->devlink);
+ dl_priv->stmmac_priv = priv;
+
+ ret = devlink_params_register(priv->devlink, stmmac_devlink_params,
+ ARRAY_SIZE(stmmac_devlink_params));
+ if (ret)
+ goto dl_free;
+
+ devlink_register(priv->devlink);
+ return 0;
+
+dl_free:
+ devlink_free(priv->devlink);
+
+ return ret;
+}
+
+static void stmmac_unregister_devlink(struct stmmac_priv *priv)
+{
+ if (!priv->devlink)
+ return;
+
+ devlink_unregister(priv->devlink);
+ devlink_params_unregister(priv->devlink, stmmac_devlink_params,
+ ARRAY_SIZE(stmmac_devlink_params));
+ devlink_free(priv->devlink);
+}
+
+struct plat_stmmacenet_data *stmmac_plat_dat_alloc(struct device *dev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ int i;
+
+ plat_dat = devm_kzalloc(dev, sizeof(*plat_dat), GFP_KERNEL);
+ if (!plat_dat)
+ return NULL;
+
+ /* Set the defaults:
+ * - phy autodetection
+ * - determine GMII_Address CR field from CSR clock
+ * - allow MTU up to JUMBO_LEN
+ * - hash table size
+ * - one unicast filter entry
+ */
+ plat_dat->phy_addr = -1;
+ plat_dat->clk_csr = -1;
+ plat_dat->maxmtu = JUMBO_LEN;
+ plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
+ plat_dat->unicast_filter_entries = 1;
+
+ /* Set the mtl defaults */
+ plat_dat->tx_queues_to_use = 1;
+ plat_dat->rx_queues_to_use = 1;
+
+ /* Setup the default RX queue channel map */
+ for (i = 0; i < ARRAY_SIZE(plat_dat->rx_queues_cfg); i++)
+ plat_dat->rx_queues_cfg[i].chan = i;
+
+ return plat_dat;
+}
+EXPORT_SYMBOL_GPL(stmmac_plat_dat_alloc);
+
+static int __stmmac_dvr_probe(struct device *device,
+ struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *res)
{
struct net_device *ndev = NULL;
struct stmmac_priv *priv;
@@ -7362,7 +7667,7 @@ int stmmac_dvr_probe(struct device *device,
return -ENOMEM;
stmmac_set_ethtool_ops(ndev);
- priv->pause = pause;
+ priv->pause_time = pause;
priv->plat = plat_dat;
priv->ioaddr = res->addr;
priv->dev->base_addr = (unsigned long)res->addr;
@@ -7402,6 +7707,8 @@ int stmmac_dvr_probe(struct device *device,
INIT_WORK(&priv->service_task, stmmac_service_task);
+ timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
+
/* Override with kernel parameters if supplied XXX CRS XXX
* this needs to have multiple instances
*/
@@ -7455,7 +7762,7 @@ int stmmac_dvr_probe(struct device *device,
if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
- if (priv->plat->has_gmac4)
+ if (priv->plat->core_type == DWMAC_CORE_GMAC4)
ndev->hw_features |= NETIF_F_GSO_UDP_L4;
priv->tso = true;
dev_info(priv->device, "TSO feature enabled\n");
@@ -7464,8 +7771,8 @@ int stmmac_dvr_probe(struct device *device,
if (priv->dma_cap.sphen &&
!(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
ndev->hw_features |= NETIF_F_GRO;
- priv->sph_cap = true;
- priv->sph = priv->sph_cap;
+ priv->sph_capable = true;
+ priv->sph_active = priv->sph_capable;
dev_info(priv->device, "SPH feature enabled\n");
}
@@ -7508,7 +7815,7 @@ int stmmac_dvr_probe(struct device *device,
#ifdef STMMAC_VLAN_TAG_USED
/* Both mac100 and gmac support receive VLAN tag detection */
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
- if (priv->plat->has_gmac4) {
+ if (dwmac_is_xmac(priv->plat->core_type)) {
ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
priv->hw->hw_vlan_en = true;
}
@@ -7516,11 +7823,8 @@ int stmmac_dvr_probe(struct device *device,
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
}
- if (priv->dma_cap.vlins) {
+ if (priv->dma_cap.vlins)
ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
- if (priv->dma_cap.dvlan)
- ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
- }
#endif
priv->msg_enable = netif_msg_init(debug, default_msg_level);
@@ -7539,25 +7843,23 @@ int stmmac_dvr_probe(struct device *device,
/* MTU range: 46 - hw-specific max */
ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
- if (priv->plat->has_xgmac)
+
+ if (priv->plat->core_type == DWMAC_CORE_XGMAC)
ndev->max_mtu = XGMAC_JUMBO_LEN;
- else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
+ else if (priv->plat->enh_desc || priv->synopsys_id >= DWMAC_CORE_4_00)
ndev->max_mtu = JUMBO_LEN;
else
ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
- /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
- * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
+
+ /* Warn if the platform's maxmtu is smaller than the minimum MTU,
+ * otherwise clamp the maximum MTU above to the platform's maxmtu.
*/
- if ((priv->plat->maxmtu < ndev->max_mtu) &&
- (priv->plat->maxmtu >= ndev->min_mtu))
- ndev->max_mtu = priv->plat->maxmtu;
- else if (priv->plat->maxmtu < ndev->min_mtu)
+ if (priv->plat->maxmtu < ndev->min_mtu)
dev_warn(priv->device,
"%s: warning: maxmtu having invalid value (%d)\n",
__func__, priv->plat->maxmtu);
-
- if (flow_ctrl)
- priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
+ else if (priv->plat->maxmtu < ndev->max_mtu)
+ ndev->max_mtu = priv->plat->maxmtu;
ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
@@ -7568,17 +7870,6 @@ int stmmac_dvr_probe(struct device *device,
stmmac_fpe_init(priv);
- /* If a specific clk_csr value is passed from the platform
- * this means that the CSR Clock Range selection cannot be
- * changed at run-time and it is fixed. Viceversa the driver'll try to
- * set the MDC clock dynamically according to the csr actual
- * clock input.
- */
- if (priv->plat->clk_csr >= 0)
- priv->clk_csr = priv->plat->clk_csr;
- else
- stmmac_clk_csr_set(priv);
-
stmmac_check_pcs_mode(priv);
pm_runtime_get_noresume(device);
@@ -7594,19 +7885,20 @@ int stmmac_dvr_probe(struct device *device,
goto error_mdio_register;
}
- if (priv->plat->speed_mode_2500)
- priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
-
ret = stmmac_pcs_setup(ndev);
if (ret)
goto error_pcs_setup;
- ret = stmmac_phy_setup(priv);
+ ret = stmmac_phylink_setup(priv);
if (ret) {
netdev_err(ndev, "failed to setup phy (%d)\n", ret);
goto error_phy_setup;
}
+ ret = stmmac_register_devlink(priv);
+ if (ret)
+ goto error_devlink_setup;
+
ret = register_netdev(ndev);
if (ret) {
dev_err(priv->device, "%s: ERROR %i registering the device\n",
@@ -7629,6 +7921,8 @@ int stmmac_dvr_probe(struct device *device,
return ret;
error_netdev_register:
+ stmmac_unregister_devlink(priv);
+error_devlink_setup:
phylink_destroy(priv->phylink);
error_phy_setup:
stmmac_pcs_clean(ndev);
@@ -7643,6 +7937,34 @@ error_wq_init:
return ret;
}
+
+/**
+ * stmmac_dvr_probe
+ * @dev: device pointer
+ * @plat_dat: platform data pointer
+ * @res: stmmac resource pointer
+ * Description: this is the main probe function used to
+ * call the alloc_etherdev, allocate the priv structure.
+ * Return:
+ * returns 0 on success, otherwise errno.
+ */
+int stmmac_dvr_probe(struct device *dev, struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *res)
+{
+ int ret;
+
+ if (plat_dat->init) {
+ ret = plat_dat->init(dev, plat_dat->bsp_priv);
+ if (ret)
+ return ret;
+ }
+
+ ret = __stmmac_dvr_probe(dev, plat_dat, res);
+ if (ret && plat_dat->exit)
+ plat_dat->exit(dev, plat_dat->bsp_priv);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
/**
@@ -7660,13 +7982,13 @@ void stmmac_dvr_remove(struct device *dev)
pm_runtime_get_sync(dev);
- stmmac_stop_all_dma(priv);
- stmmac_mac_set(priv, priv->ioaddr, false);
unregister_netdev(ndev);
#ifdef CONFIG_DEBUG_FS
stmmac_exit_fs(ndev);
#endif
+ stmmac_unregister_devlink(priv);
+
phylink_destroy(priv->phylink);
if (priv->plat->stmmac_rst)
reset_control_assert(priv->plat->stmmac_rst);
@@ -7681,6 +8003,9 @@ void stmmac_dvr_remove(struct device *dev)
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
+
+ if (priv->plat->exit)
+ priv->plat->exit(dev, priv->plat->bsp_priv);
}
EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
@@ -7709,9 +8034,9 @@ int stmmac_suspend(struct device *dev)
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
- if (priv->eee_enabled) {
+ if (priv->eee_sw_timer_en) {
priv->tx_path_in_lpi_mode = false;
- del_timer_sync(&priv->eee_ctrl_timer);
+ timer_delete_sync(&priv->eee_ctrl_timer);
}
/* Stop TX/RX DMA */
@@ -7721,7 +8046,7 @@ int stmmac_suspend(struct device *dev)
priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
/* Enable Power down mode by programming the PMT regs */
- if (device_may_wakeup(priv->device) && priv->plat->pmt) {
+ if (priv->wolopts) {
stmmac_pmt(priv, priv->hw, priv->wolopts);
priv->irq_wake = 1;
} else {
@@ -7732,19 +8057,15 @@ int stmmac_suspend(struct device *dev)
mutex_unlock(&priv->lock);
rtnl_lock();
- if (device_may_wakeup(priv->device) && priv->plat->pmt) {
- phylink_suspend(priv->phylink, true);
- } else {
- if (device_may_wakeup(priv->device))
- phylink_speed_down(priv->phylink, false);
- phylink_suspend(priv->phylink, false);
- }
+ phylink_suspend(priv->phylink, !!priv->wolopts);
rtnl_unlock();
if (stmmac_fpe_supported(priv))
- timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
+ ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
+
+ if (priv->plat->suspend)
+ return priv->plat->suspend(dev, priv->plat->bsp_priv);
- priv->speed = SPEED_UNKNOWN;
return 0;
}
EXPORT_SYMBOL_GPL(stmmac_suspend);
@@ -7797,6 +8118,12 @@ int stmmac_resume(struct device *dev)
struct stmmac_priv *priv = netdev_priv(ndev);
int ret;
+ if (priv->plat->resume) {
+ ret = priv->plat->resume(dev, priv->plat->bsp_priv);
+ if (ret)
+ return ret;
+ }
+
if (!netif_running(ndev))
return 0;
@@ -7806,7 +8133,7 @@ int stmmac_resume(struct device *dev)
* this bit because it can generate problems while resuming
* from another devices (e.g. serial console).
*/
- if (device_may_wakeup(priv->device) && priv->plat->pmt) {
+ if (priv->wolopts) {
mutex_lock(&priv->lock);
stmmac_pmt(priv, priv->hw, 0);
mutex_unlock(&priv->lock);
@@ -7828,16 +8155,12 @@ int stmmac_resume(struct device *dev)
}
rtnl_lock();
- if (device_may_wakeup(priv->device) && priv->plat->pmt) {
- phylink_resume(priv->phylink);
- } else {
- phylink_resume(priv->phylink);
- if (device_may_wakeup(priv->device))
- phylink_speed_up(priv->phylink);
- }
- rtnl_unlock();
- rtnl_lock();
+ /* Prepare the PHY to resume, ensuring that its clocks which are
+ * necessary for the MAC DMA reset to complete are running
+ */
+ phylink_prepare_resume(priv->phylink);
+
mutex_lock(&priv->lock);
stmmac_reset_queues_param(priv);
@@ -7845,16 +8168,33 @@ int stmmac_resume(struct device *dev)
stmmac_free_tx_skbufs(priv);
stmmac_clear_descriptors(priv, &priv->dma_conf);
- stmmac_hw_setup(ndev, false);
+ ret = stmmac_hw_setup(ndev);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
+ mutex_unlock(&priv->lock);
+ rtnl_unlock();
+ return ret;
+ }
+
+ stmmac_init_timestamping(priv);
+
stmmac_init_coalesce(priv);
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_rx_mode(ndev);
stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
+ phylink_rx_clk_stop_unblock(priv->phylink);
stmmac_enable_all_queues(priv);
stmmac_enable_all_dma_irq(priv);
mutex_unlock(&priv->lock);
+
+ /* phylink_resume() must be called after the hardware has been
+ * initialised because it may bring the link up immediately in a
+ * workqueue thread, which will race with initialisation.
+ */
+ phylink_resume(priv->phylink);
rtnl_unlock();
netif_device_attach(ndev);
@@ -7863,6 +8203,10 @@ int stmmac_resume(struct device *dev)
}
EXPORT_SYMBOL_GPL(stmmac_resume);
+/* This is not the same as EXPORT_GPL_SIMPLE_DEV_PM_OPS() when CONFIG_PM=n */
+DEFINE_SIMPLE_DEV_PM_OPS(stmmac_simple_pm_ops, stmmac_suspend, stmmac_resume);
+EXPORT_SYMBOL_GPL(stmmac_simple_pm_ops);
+
#ifndef MODULE
static int __init stmmac_cmdline_opt(char *str)
{
@@ -7877,9 +8221,6 @@ static int __init stmmac_cmdline_opt(char *str)
} else if (!strncmp(opt, "phyaddr:", 8)) {
if (kstrtoint(opt + 8, 0, &phyaddr))
goto err;
- } else if (!strncmp(opt, "buf_sz:", 7)) {
- if (kstrtoint(opt + 7, 0, &buf_sz))
- goto err;
} else if (!strncmp(opt, "tc:", 3)) {
if (kstrtoint(opt + 3, 0, &tc))
goto err;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 0c7d81ddd440..1e82850f2a25 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -23,9 +23,9 @@
#include "dwxgmac2.h"
#include "stmmac.h"
-#define MII_BUSY 0x00000001
-#define MII_WRITE 0x00000002
-#define MII_DATA_MASK GENMASK(15, 0)
+#define MII_ADDR_GBUSY BIT(0)
+#define MII_ADDR_GWRITE BIT(1)
+#define MII_DATA_GD_MASK GENMASK(15, 0)
/* GMAC4 defines */
#define MII_GMAC4_GOC_SHIFT 2
@@ -45,6 +45,16 @@
#define MII_XGMAC_PA_SHIFT 16
#define MII_XGMAC_DA_SHIFT 21
+static int stmmac_mdio_wait(void __iomem *reg, u32 mask)
+{
+ u32 v;
+
+ if (readl_poll_timeout(reg, v, !(v & mask), 100, 10000))
+ return -EBUSY;
+
+ return 0;
+}
+
static void stmmac_xgmac2_c45_format(struct stmmac_priv *priv, int phyaddr,
int devad, int phyreg, u32 *hw_addr)
{
@@ -83,7 +93,6 @@ static int stmmac_xgmac2_mdio_read(struct stmmac_priv *priv, u32 addr,
{
unsigned int mii_address = priv->hw->mii.addr;
unsigned int mii_data = priv->hw->mii.data;
- u32 tmp;
int ret;
ret = pm_runtime_resume_and_get(priv->device);
@@ -91,33 +100,25 @@ static int stmmac_xgmac2_mdio_read(struct stmmac_priv *priv, u32 addr,
return ret;
/* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
- ret = -EBUSY;
+ ret = stmmac_mdio_wait(priv->ioaddr + mii_data, MII_XGMAC_BUSY);
+ if (ret)
goto err_disable_clks;
- }
- value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
- & priv->hw->mii.clk_csr_mask;
- value |= MII_XGMAC_READ;
+ value |= priv->gmii_address_bus_config | MII_XGMAC_READ;
/* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
- ret = -EBUSY;
+ ret = stmmac_mdio_wait(priv->ioaddr + mii_data, MII_XGMAC_BUSY);
+ if (ret)
goto err_disable_clks;
- }
/* Set the MII address register to read */
writel(addr, priv->ioaddr + mii_address);
writel(value, priv->ioaddr + mii_data);
/* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
- ret = -EBUSY;
+ ret = stmmac_mdio_wait(priv->ioaddr + mii_data, MII_XGMAC_BUSY);
+ if (ret)
goto err_disable_clks;
- }
/* Read the data from the MII data register */
ret = (int)readl(priv->ioaddr + mii_data) & GENMASK(15, 0);
@@ -131,12 +132,9 @@ err_disable_clks:
static int stmmac_xgmac2_mdio_read_c22(struct mii_bus *bus, int phyaddr,
int phyreg)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
u32 addr;
- priv = netdev_priv(ndev);
-
/* Until ver 2.20 XGMAC does not support C22 addr >= 4 */
if (priv->synopsys_id < DWXGMAC_CORE_2_20 &&
phyaddr > MII_XGMAC_MAX_C22ADDR)
@@ -150,12 +148,9 @@ static int stmmac_xgmac2_mdio_read_c22(struct mii_bus *bus, int phyaddr,
static int stmmac_xgmac2_mdio_read_c45(struct mii_bus *bus, int phyaddr,
int devad, int phyreg)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
u32 addr;
- priv = netdev_priv(ndev);
-
stmmac_xgmac2_c45_format(priv, phyaddr, devad, phyreg, &addr);
return stmmac_xgmac2_mdio_read(priv, addr, MII_XGMAC_BUSY);
@@ -166,7 +161,6 @@ static int stmmac_xgmac2_mdio_write(struct stmmac_priv *priv, u32 addr,
{
unsigned int mii_address = priv->hw->mii.addr;
unsigned int mii_data = priv->hw->mii.data;
- u32 tmp;
int ret;
ret = pm_runtime_resume_and_get(priv->device);
@@ -174,31 +168,23 @@ static int stmmac_xgmac2_mdio_write(struct stmmac_priv *priv, u32 addr,
return ret;
/* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
- ret = -EBUSY;
+ ret = stmmac_mdio_wait(priv->ioaddr + mii_data, MII_XGMAC_BUSY);
+ if (ret)
goto err_disable_clks;
- }
- value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
- & priv->hw->mii.clk_csr_mask;
- value |= phydata;
- value |= MII_XGMAC_WRITE;
+ value |= priv->gmii_address_bus_config | phydata | MII_XGMAC_WRITE;
/* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
- ret = -EBUSY;
+ ret = stmmac_mdio_wait(priv->ioaddr + mii_data, MII_XGMAC_BUSY);
+ if (ret)
goto err_disable_clks;
- }
/* Set the MII address register to write */
writel(addr, priv->ioaddr + mii_address);
writel(value, priv->ioaddr + mii_data);
/* Wait until any existing MII operation is complete */
- ret = readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000);
+ ret = stmmac_mdio_wait(priv->ioaddr + mii_data, MII_XGMAC_BUSY);
err_disable_clks:
pm_runtime_put(priv->device);
@@ -209,12 +195,9 @@ err_disable_clks:
static int stmmac_xgmac2_mdio_write_c22(struct mii_bus *bus, int phyaddr,
int phyreg, u16 phydata)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
u32 addr;
- priv = netdev_priv(ndev);
-
/* Until ver 2.20 XGMAC does not support C22 addr >= 4 */
if (priv->synopsys_id < DWXGMAC_CORE_2_20 &&
phyaddr > MII_XGMAC_MAX_C22ADDR)
@@ -229,37 +212,78 @@ static int stmmac_xgmac2_mdio_write_c22(struct mii_bus *bus, int phyaddr,
static int stmmac_xgmac2_mdio_write_c45(struct mii_bus *bus, int phyaddr,
int devad, int phyreg, u16 phydata)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
u32 addr;
- priv = netdev_priv(ndev);
-
stmmac_xgmac2_c45_format(priv, phyaddr, devad, phyreg, &addr);
return stmmac_xgmac2_mdio_write(priv, addr, MII_XGMAC_BUSY,
phydata);
}
-static int stmmac_mdio_read(struct stmmac_priv *priv, int data, u32 value)
+/**
+ * stmmac_mdio_format_addr() - format the address register
+ * @priv: struct stmmac_priv pointer
+ * @pa: 5-bit MDIO package address
+ * @gr: 5-bit MDIO register address (C22) or MDIO device address (C45)
+ *
+ * Return: formatted address register
+ */
+static u32 stmmac_mdio_format_addr(struct stmmac_priv *priv,
+ unsigned int pa, unsigned int gr)
{
- unsigned int mii_address = priv->hw->mii.addr;
- unsigned int mii_data = priv->hw->mii.data;
- u32 v;
+ const struct mii_regs *mii_regs = &priv->hw->mii;
- if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
- 100, 10000))
- return -EBUSY;
+ return ((pa << mii_regs->addr_shift) & mii_regs->addr_mask) |
+ ((gr << mii_regs->reg_shift) & mii_regs->reg_mask) |
+ priv->gmii_address_bus_config |
+ MII_ADDR_GBUSY;
+}
- writel(data, priv->ioaddr + mii_data);
- writel(value, priv->ioaddr + mii_address);
+static int stmmac_mdio_access(struct stmmac_priv *priv, unsigned int pa,
+ unsigned int gr, u32 cmd, u32 data, bool read)
+{
+ void __iomem *mii_address = priv->ioaddr + priv->hw->mii.addr;
+ void __iomem *mii_data = priv->ioaddr + priv->hw->mii.data;
+ u32 addr;
+ int ret;
- if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
- 100, 10000))
- return -EBUSY;
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
+ return ret;
- /* Read the data from the MII data register */
- return readl(priv->ioaddr + mii_data) & MII_DATA_MASK;
+ ret = stmmac_mdio_wait(mii_address, MII_ADDR_GBUSY);
+ if (ret)
+ goto out;
+
+ addr = stmmac_mdio_format_addr(priv, pa, gr) | cmd;
+
+ writel(data, mii_data);
+ writel(addr, mii_address);
+
+ ret = stmmac_mdio_wait(mii_address, MII_ADDR_GBUSY);
+ if (ret)
+ goto out;
+
+ /* Read the data from the MII data register if in read mode */
+ ret = read ? readl(mii_data) & MII_DATA_GD_MASK : 0;
+
+out:
+ pm_runtime_put(priv->device);
+
+ return ret;
+}
+
+static int stmmac_mdio_read(struct stmmac_priv *priv, unsigned int pa,
+ unsigned int gr, u32 cmd, int data)
+{
+ return stmmac_mdio_access(priv, pa, gr, cmd, data, true);
+}
+
+static int stmmac_mdio_write(struct stmmac_priv *priv, unsigned int pa,
+ unsigned int gr, u32 cmd, int data)
+{
+ return stmmac_mdio_access(priv, pa, gr, cmd, data, false);
}
/**
@@ -274,28 +298,15 @@ static int stmmac_mdio_read(struct stmmac_priv *priv, int data, u32 value)
*/
static int stmmac_mdio_read_c22(struct mii_bus *bus, int phyaddr, int phyreg)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv = netdev_priv(ndev);
- u32 value = MII_BUSY;
- int data = 0;
-
- data = pm_runtime_resume_and_get(priv->device);
- if (data < 0)
- return data;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
+ u32 cmd;
- value |= (phyaddr << priv->hw->mii.addr_shift)
- & priv->hw->mii.addr_mask;
- value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
- value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
- & priv->hw->mii.clk_csr_mask;
- if (priv->plat->has_gmac4)
- value |= MII_GMAC4_READ;
-
- data = stmmac_mdio_read(priv, data, value);
-
- pm_runtime_put(priv->device);
+ if (priv->plat->core_type == DWMAC_CORE_GMAC4)
+ cmd = MII_GMAC4_READ;
+ else
+ cmd = 0;
- return data;
+ return stmmac_mdio_read(priv, phyaddr, phyreg, cmd, 0);
}
/**
@@ -312,54 +323,11 @@ static int stmmac_mdio_read_c22(struct mii_bus *bus, int phyaddr, int phyreg)
static int stmmac_mdio_read_c45(struct mii_bus *bus, int phyaddr, int devad,
int phyreg)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv = netdev_priv(ndev);
- u32 value = MII_BUSY;
- int data = 0;
-
- data = pm_runtime_get_sync(priv->device);
- if (data < 0) {
- pm_runtime_put_noidle(priv->device);
- return data;
- }
-
- value |= (phyaddr << priv->hw->mii.addr_shift)
- & priv->hw->mii.addr_mask;
- value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
- value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
- & priv->hw->mii.clk_csr_mask;
- value |= MII_GMAC4_READ;
- value |= MII_GMAC4_C45E;
- value &= ~priv->hw->mii.reg_mask;
- value |= (devad << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
-
- data |= phyreg << MII_GMAC4_REG_ADDR_SHIFT;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
+ int data = phyreg << MII_GMAC4_REG_ADDR_SHIFT;
+ u32 cmd = MII_GMAC4_READ | MII_GMAC4_C45E;
- data = stmmac_mdio_read(priv, data, value);
-
- pm_runtime_put(priv->device);
-
- return data;
-}
-
-static int stmmac_mdio_write(struct stmmac_priv *priv, int data, u32 value)
-{
- unsigned int mii_address = priv->hw->mii.addr;
- unsigned int mii_data = priv->hw->mii.data;
- u32 v;
-
- /* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
- 100, 10000))
- return -EBUSY;
-
- /* Set the MII address register to write */
- writel(data, priv->ioaddr + mii_data);
- writel(value, priv->ioaddr + mii_address);
-
- /* Wait until any existing MII operation is complete */
- return readl_poll_timeout(priv->ioaddr + mii_address, v,
- !(v & MII_BUSY), 100, 10000);
+ return stmmac_mdio_read(priv, phyaddr, devad, cmd, data);
}
/**
@@ -373,31 +341,15 @@ static int stmmac_mdio_write(struct stmmac_priv *priv, int data, u32 value)
static int stmmac_mdio_write_c22(struct mii_bus *bus, int phyaddr, int phyreg,
u16 phydata)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv = netdev_priv(ndev);
- int ret, data = phydata;
- u32 value = MII_BUSY;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
+ u32 cmd;
- ret = pm_runtime_resume_and_get(priv->device);
- if (ret < 0)
- return ret;
-
- value |= (phyaddr << priv->hw->mii.addr_shift)
- & priv->hw->mii.addr_mask;
- value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
-
- value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
- & priv->hw->mii.clk_csr_mask;
- if (priv->plat->has_gmac4)
- value |= MII_GMAC4_WRITE;
+ if (priv->plat->core_type == DWMAC_CORE_GMAC4)
+ cmd = MII_GMAC4_WRITE;
else
- value |= MII_WRITE;
+ cmd = MII_ADDR_GWRITE;
- ret = stmmac_mdio_write(priv, data, value);
-
- pm_runtime_put(priv->device);
-
- return ret;
+ return stmmac_mdio_write(priv, phyaddr, phyreg, cmd, phydata);
}
/**
@@ -412,36 +364,13 @@ static int stmmac_mdio_write_c22(struct mii_bus *bus, int phyaddr, int phyreg,
static int stmmac_mdio_write_c45(struct mii_bus *bus, int phyaddr,
int devad, int phyreg, u16 phydata)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv = netdev_priv(ndev);
- int ret, data = phydata;
- u32 value = MII_BUSY;
-
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
- return ret;
- }
-
- value |= (phyaddr << priv->hw->mii.addr_shift)
- & priv->hw->mii.addr_mask;
- value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
-
- value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
- & priv->hw->mii.clk_csr_mask;
-
- value |= MII_GMAC4_WRITE;
- value |= MII_GMAC4_C45E;
- value &= ~priv->hw->mii.reg_mask;
- value |= (devad << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
+ u32 cmd = MII_GMAC4_WRITE | MII_GMAC4_C45E;
+ int data = phydata;
data |= phyreg << MII_GMAC4_REG_ADDR_SHIFT;
- ret = stmmac_mdio_write(priv, data, value);
-
- pm_runtime_put(priv->device);
-
- return ret;
+ return stmmac_mdio_write(priv, phyaddr, devad, cmd, data);
}
/**
@@ -452,8 +381,7 @@ static int stmmac_mdio_write_c45(struct mii_bus *bus, int phyaddr,
int stmmac_mdio_reset(struct mii_bus *bus)
{
#if IS_ENABLED(CONFIG_STMMAC_PLATFORM)
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv = netdev_priv(ndev);
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
unsigned int mii_address = priv->hw->mii.addr;
#ifdef CONFIG_OF
@@ -489,7 +417,7 @@ int stmmac_mdio_reset(struct mii_bus *bus)
* on MDC, so perform a dummy mdio read. To be updated for GMAC4
* if needed.
*/
- if (!priv->plat->has_gmac4)
+ if (priv->plat->core_type != DWMAC_CORE_GMAC4)
writel(0, priv->ioaddr + mii_address);
#endif
return 0;
@@ -497,12 +425,11 @@ int stmmac_mdio_reset(struct mii_bus *bus)
int stmmac_pcs_setup(struct net_device *ndev)
{
+ struct stmmac_priv *priv = netdev_priv(ndev);
struct fwnode_handle *devnode, *pcsnode;
struct dw_xpcs *xpcs = NULL;
- struct stmmac_priv *priv;
int addr, ret;
- priv = netdev_priv(ndev);
devnode = priv->plat->port_node;
if (priv->plat->pcs_init) {
@@ -524,6 +451,9 @@ int stmmac_pcs_setup(struct net_device *ndev)
if (ret)
return dev_err_probe(priv->device, ret, "No xPCS found\n");
+ if (xpcs)
+ xpcs_config_eee_mult_fact(xpcs, priv->plat->mult_fact_100ns);
+
priv->hw->xpcs = xpcs;
return 0;
@@ -544,6 +474,102 @@ void stmmac_pcs_clean(struct net_device *ndev)
}
/**
+ * stmmac_clk_csr_set - dynamically set the MDC clock
+ * @priv: driver private structure
+ * Description: this is to dynamically set the MDC clock according to the csr
+ * clock input.
+ * Return: MII register CR field value
+ * Note:
+ * If a specific clk_csr value is passed from the platform
+ * this means that the CSR Clock Range selection cannot be
+ * changed at run-time and it is fixed (as reported in the driver
+ * documentation). Viceversa the driver will try to set the MDC
+ * clock dynamically according to the actual clock input.
+ */
+static u32 stmmac_clk_csr_set(struct stmmac_priv *priv)
+{
+ unsigned long clk_rate;
+ u32 value = ~0;
+
+ clk_rate = clk_get_rate(priv->plat->stmmac_clk);
+
+ /* Platform provided default clk_csr would be assumed valid
+ * for all other cases except for the below mentioned ones.
+ * For values higher than the IEEE 802.3 specified frequency
+ * we can not estimate the proper divider as it is not known
+ * the frequency of clk_csr_i. So we do not change the default
+ * divider.
+ */
+ if (clk_rate < CSR_F_35M)
+ value = STMMAC_CSR_20_35M;
+ else if (clk_rate < CSR_F_60M)
+ value = STMMAC_CSR_35_60M;
+ else if (clk_rate < CSR_F_100M)
+ value = STMMAC_CSR_60_100M;
+ else if (clk_rate < CSR_F_150M)
+ value = STMMAC_CSR_100_150M;
+ else if (clk_rate < CSR_F_250M)
+ value = STMMAC_CSR_150_250M;
+ else if (clk_rate <= CSR_F_300M)
+ value = STMMAC_CSR_250_300M;
+ else if (clk_rate < CSR_F_500M)
+ value = STMMAC_CSR_300_500M;
+ else if (clk_rate < CSR_F_800M)
+ value = STMMAC_CSR_500_800M;
+
+ if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
+ if (clk_rate > 160000000)
+ value = 0x03;
+ else if (clk_rate > 80000000)
+ value = 0x02;
+ else if (clk_rate > 40000000)
+ value = 0x01;
+ else
+ value = 0;
+ }
+
+ if (priv->plat->core_type == DWMAC_CORE_XGMAC) {
+ if (clk_rate > 400000000)
+ value = 0x5;
+ else if (clk_rate > 350000000)
+ value = 0x4;
+ else if (clk_rate > 300000000)
+ value = 0x3;
+ else if (clk_rate > 250000000)
+ value = 0x2;
+ else if (clk_rate > 150000000)
+ value = 0x1;
+ else
+ value = 0x0;
+ }
+
+ return value;
+}
+
+static void stmmac_mdio_bus_config(struct stmmac_priv *priv)
+{
+ u32 value;
+
+ /* If a specific clk_csr value is passed from the platform, this means
+ * that the CSR Clock Range value should not be computed from the CSR
+ * clock.
+ */
+ if (priv->plat->clk_csr >= 0)
+ value = priv->plat->clk_csr;
+ else
+ value = stmmac_clk_csr_set(priv);
+
+ value <<= priv->hw->mii.clk_csr_shift;
+
+ if (value & ~priv->hw->mii.clk_csr_mask)
+ dev_warn(priv->device,
+ "clk_csr value out of range (0x%08x exceeds mask 0x%08x), truncating\n",
+ value, priv->hw->mii.clk_csr_mask);
+
+ priv->gmii_address_bus_config = value & priv->hw->mii.clk_csr_mask;
+}
+
+/**
* stmmac_mdio_register
* @ndev: net device structure
* Description: it registers the MII bus
@@ -557,12 +583,15 @@ int stmmac_mdio_register(struct net_device *ndev)
struct device_node *mdio_node = priv->plat->mdio_node;
struct device *dev = ndev->dev.parent;
struct fwnode_handle *fixed_node;
+ int max_addr = PHY_MAX_ADDR - 1;
struct fwnode_handle *fwnode;
- int addr, found, max_addr;
+ struct phy_device *phydev;
if (!mdio_bus_data)
return 0;
+ stmmac_mdio_bus_config(priv);
+
new_bus = mdiobus_alloc();
if (!new_bus)
return -ENOMEM;
@@ -572,7 +601,7 @@ int stmmac_mdio_register(struct net_device *ndev)
new_bus->name = "stmmac";
- if (priv->plat->has_xgmac) {
+ if (priv->plat->core_type == DWMAC_CORE_XGMAC) {
new_bus->read = &stmmac_xgmac2_mdio_read_c22;
new_bus->write = &stmmac_xgmac2_mdio_write_c22;
new_bus->read_c45 = &stmmac_xgmac2_mdio_read_c45;
@@ -580,25 +609,20 @@ int stmmac_mdio_register(struct net_device *ndev)
if (priv->synopsys_id < DWXGMAC_CORE_2_20) {
/* Right now only C22 phys are supported */
- max_addr = MII_XGMAC_MAX_C22ADDR + 1;
+ max_addr = MII_XGMAC_MAX_C22ADDR;
/* Check if DT specified an unsupported phy addr */
if (priv->plat->phy_addr > MII_XGMAC_MAX_C22ADDR)
dev_err(dev, "Unsupported phy_addr (max=%d)\n",
MII_XGMAC_MAX_C22ADDR);
- } else {
- /* XGMAC version 2.20 onwards support 32 phy addr */
- max_addr = PHY_MAX_ADDR;
}
} else {
new_bus->read = &stmmac_mdio_read_c22;
new_bus->write = &stmmac_mdio_write_c22;
- if (priv->plat->has_gmac4) {
+ if (priv->plat->core_type == DWMAC_CORE_GMAC4) {
new_bus->read_c45 = &stmmac_mdio_read_c45;
new_bus->write_c45 = &stmmac_mdio_write_c45;
}
-
- max_addr = PHY_MAX_ADDR;
}
if (mdio_bus_data->needs_reset)
@@ -621,7 +645,7 @@ int stmmac_mdio_register(struct net_device *ndev)
}
/* Looks like we need a dummy read for XGMAC only and C45 PHYs */
- if (priv->plat->has_xgmac)
+ if (priv->plat->core_type == DWMAC_CORE_XGMAC)
stmmac_xgmac2_mdio_read_c45(new_bus, 0, 0, 0);
/* If fixed-link is set, skip PHY scanning */
@@ -640,41 +664,31 @@ int stmmac_mdio_register(struct net_device *ndev)
if (priv->plat->phy_node || mdio_node)
goto bus_register_done;
- found = 0;
- for (addr = 0; addr < max_addr; addr++) {
- struct phy_device *phydev = mdiobus_get_phy(new_bus, addr);
-
- if (!phydev)
- continue;
-
- /*
- * If an IRQ was provided to be assigned after
- * the bus probe, do it here.
- */
- if (!mdio_bus_data->irqs &&
- (mdio_bus_data->probed_phy_irq > 0)) {
- new_bus->irq[addr] = mdio_bus_data->probed_phy_irq;
- phydev->irq = mdio_bus_data->probed_phy_irq;
- }
-
- /*
- * If we're going to bind the MAC to this PHY bus,
- * and no PHY number was provided to the MAC,
- * use the one probed here.
- */
- if (priv->plat->phy_addr == -1)
- priv->plat->phy_addr = addr;
-
- phy_attached_info(phydev);
- found = 1;
- }
-
- if (!found && !mdio_node) {
+ phydev = phy_find_first(new_bus);
+ if (!phydev || phydev->mdio.addr > max_addr) {
dev_warn(dev, "No PHY found\n");
err = -ENODEV;
goto no_phy_found;
}
+ /*
+ * If an IRQ was provided to be assigned after
+ * the bus probe, do it here.
+ */
+ if (!mdio_bus_data->irqs && mdio_bus_data->probed_phy_irq > 0) {
+ new_bus->irq[phydev->mdio.addr] = mdio_bus_data->probed_phy_irq;
+ phydev->irq = mdio_bus_data->probed_phy_irq;
+ }
+
+ /*
+ * If we're going to bind the MAC to this PHY bus, and no PHY number
+ * was provided to the MAC, use the one probed here.
+ */
+ if (priv->plat->phy_addr == -1)
+ priv->plat->phy_addr = phydev->mdio.addr;
+
+ phy_attached_info(phydev);
+
bus_register_done:
priv->mii = new_bus;
@@ -706,3 +720,17 @@ int stmmac_mdio_unregister(struct net_device *ndev)
return 0;
}
+
+void stmmac_mdio_lock(struct stmmac_priv *priv)
+{
+ if (priv->mii)
+ mutex_lock(&priv->mii->mdio_lock);
+}
+EXPORT_SYMBOL_GPL(stmmac_mdio_lock);
+
+void stmmac_mdio_unlock(struct stmmac_priv *priv)
+{
+ if (priv->mii)
+ mutex_unlock(&priv->mii->mdio_lock);
+}
+EXPORT_SYMBOL_GPL(stmmac_mdio_unlock);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 352b01678c22..270ad066ced3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -14,6 +14,7 @@
#include <linux/dmi.h>
#include "stmmac.h"
+#include "stmmac_libpci.h"
struct stmmac_pci_info {
int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
@@ -21,31 +22,12 @@ struct stmmac_pci_info {
static void common_default_data(struct plat_stmmacenet_data *plat)
{
- plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
- plat->has_gmac = 1;
+ /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ plat->clk_csr = STMMAC_CSR_20_35M;
+ plat->core_type = DWMAC_CORE_GMAC;
plat->force_sf_dma_mode = 1;
plat->mdio_bus_data->needs_reset = true;
-
- /* Set default value for multicast hash bins */
- plat->multicast_filter_bins = HASH_TABLE_SIZE;
-
- /* Set default value for unicast filter entries */
- plat->unicast_filter_entries = 1;
-
- /* Set the maxmtu to a default of JUMBO_LEN */
- plat->maxmtu = JUMBO_LEN;
-
- /* Set default number of RX and TX queues to use */
- plat->tx_queues_to_use = 1;
- plat->rx_queues_to_use = 1;
-
- /* Disable Priority config by default */
- plat->tx_queues_cfg[0].use_prio = false;
- plat->rx_queues_cfg[0].use_prio = false;
-
- /* Disable RX queues routing by default */
- plat->rx_queues_cfg[0].pkt_route = 0x0;
}
static int stmmac_default_data(struct pci_dev *pdev,
@@ -74,28 +56,18 @@ static int snps_gmac5_default_data(struct pci_dev *pdev,
{
int i;
- plat->clk_csr = 5;
- plat->has_gmac4 = 1;
+ plat->clk_csr = STMMAC_CSR_250_300M;
+ plat->core_type = DWMAC_CORE_GMAC4;
plat->force_sf_dma_mode = 1;
plat->flags |= STMMAC_FLAG_TSO_EN;
plat->pmt = 1;
- /* Set default value for multicast hash bins */
- plat->multicast_filter_bins = HASH_TABLE_SIZE;
-
- /* Set default value for unicast filter entries */
- plat->unicast_filter_entries = 1;
-
- /* Set the maxmtu to a default of JUMBO_LEN */
- plat->maxmtu = JUMBO_LEN;
-
/* Set default number of RX and TX queues to use */
plat->tx_queues_to_use = 4;
plat->rx_queues_to_use = 4;
plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
for (i = 0; i < plat->tx_queues_to_use; i++) {
- plat->tx_queues_cfg[i].use_prio = false;
plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
plat->tx_queues_cfg[i].weight = 25;
if (i > 0)
@@ -103,15 +75,10 @@ static int snps_gmac5_default_data(struct pci_dev *pdev,
}
plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
- for (i = 0; i < plat->rx_queues_to_use; i++) {
- plat->rx_queues_cfg[i].use_prio = false;
+ for (i = 0; i < plat->rx_queues_to_use; i++)
plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
- plat->rx_queues_cfg[i].pkt_route = 0x0;
- plat->rx_queues_cfg[i].chan = i;
- }
plat->bus_id = 1;
- plat->phy_addr = -1;
plat->phy_interface = PHY_INTERFACE_MODE_GMII;
plat->dma_cfg->pbl = 32;
@@ -126,10 +93,8 @@ static int snps_gmac5_default_data(struct pci_dev *pdev,
plat->axi->axi_rd_osr_lmt = 31;
plat->axi->axi_fb = false;
- plat->axi->axi_blen[0] = 4;
- plat->axi->axi_blen[1] = 8;
- plat->axi->axi_blen[2] = 16;
- plat->axi->axi_blen[3] = 32;
+ plat->axi->axi_blen_regval = DMA_AXI_BLEN4 | DMA_AXI_BLEN8 |
+ DMA_AXI_BLEN16 | DMA_AXI_BLEN32;
return 0;
}
@@ -155,11 +120,11 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
{
struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
struct plat_stmmacenet_data *plat;
- struct stmmac_resources res;
- int i;
+ struct stmmac_resources res = {};
int ret;
+ int i;
- plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ plat = stmmac_plat_dat_alloc(&pdev->dev);
if (!plat)
return -ENOMEM;
@@ -192,9 +157,9 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
- ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev));
- if (ret)
- return ret;
+ res.addr = pcim_iomap_region(pdev, i, STMMAC_RESOURCE_NAME);
+ if (IS_ERR(res.addr))
+ return PTR_ERR(res.addr);
break;
}
@@ -204,8 +169,6 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- memset(&res, 0, sizeof(res));
- res.addr = pcim_iomap_table(pdev)[i];
res.wol_irq = pdev->irq;
res.irq = pdev->irq;
@@ -219,6 +182,9 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
plat->safety_feat_cfg->prtyen = 1;
plat->safety_feat_cfg->tmouten = 1;
+ plat->suspend = stmmac_pci_plat_suspend;
+ plat->resume = stmmac_pci_plat_resume;
+
return stmmac_dvr_probe(&pdev->dev, plat, &res);
}
@@ -226,60 +192,13 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
* stmmac_pci_remove
*
* @pdev: platform device pointer
- * Description: this function calls the main to free the net resources
- * and releases the PCI resources.
+ * Description: this function calls the main to free the net resources.
*/
static void stmmac_pci_remove(struct pci_dev *pdev)
{
- int i;
-
stmmac_dvr_remove(&pdev->dev);
-
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (pci_resource_len(pdev, i) == 0)
- continue;
- pcim_iounmap_regions(pdev, BIT(i));
- break;
- }
-}
-
-static int __maybe_unused stmmac_pci_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- ret = stmmac_suspend(dev);
- if (ret)
- return ret;
-
- ret = pci_save_state(pdev);
- if (ret)
- return ret;
-
- pci_disable_device(pdev);
- pci_wake_from_d3(pdev, true);
- return 0;
}
-static int __maybe_unused stmmac_pci_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- pci_restore_state(pdev);
- pci_set_power_state(pdev, PCI_D0);
-
- ret = pci_enable_device(pdev);
- if (ret)
- return ret;
-
- pci_set_master(pdev);
-
- return stmmac_resume(dev);
-}
-
-static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
-
/* synthetic ID, no official vendor */
#define PCI_VENDOR_ID_STMMAC 0x0700
@@ -301,7 +220,7 @@ static struct pci_driver stmmac_pci_driver = {
.probe = stmmac_pci_probe,
.remove = stmmac_pci_remove,
.driver = {
- .pm = &stmmac_pm_ops,
+ .pm = &stmmac_simple_pm_ops,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.c
new file mode 100644
index 000000000000..e2f531c11986
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include "stmmac.h"
+#include "stmmac_pcs.h"
+
+static int dwmac_integrated_pcs_enable(struct phylink_pcs *pcs)
+{
+ struct stmmac_pcs *spcs = phylink_pcs_to_stmmac_pcs(pcs);
+
+ stmmac_mac_irq_modify(spcs->priv, 0, spcs->int_mask);
+
+ return 0;
+}
+
+static void dwmac_integrated_pcs_disable(struct phylink_pcs *pcs)
+{
+ struct stmmac_pcs *spcs = phylink_pcs_to_stmmac_pcs(pcs);
+
+ stmmac_mac_irq_modify(spcs->priv, spcs->int_mask, 0);
+}
+
+static void dwmac_integrated_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
+ struct phylink_link_state *state)
+{
+ state->link = false;
+}
+
+static int dwmac_integrated_pcs_config(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct stmmac_pcs *spcs = phylink_pcs_to_stmmac_pcs(pcs);
+
+ dwmac_ctrl_ane(spcs->base, 0, 1, spcs->priv->hw->reverse_sgmii_enable);
+
+ return 0;
+}
+
+static const struct phylink_pcs_ops dwmac_integrated_pcs_ops = {
+ .pcs_enable = dwmac_integrated_pcs_enable,
+ .pcs_disable = dwmac_integrated_pcs_disable,
+ .pcs_get_state = dwmac_integrated_pcs_get_state,
+ .pcs_config = dwmac_integrated_pcs_config,
+};
+
+int stmmac_integrated_pcs_init(struct stmmac_priv *priv, unsigned int offset,
+ u32 int_mask)
+{
+ struct stmmac_pcs *spcs;
+
+ spcs = devm_kzalloc(priv->device, sizeof(*spcs), GFP_KERNEL);
+ if (!spcs)
+ return -ENOMEM;
+
+ spcs->priv = priv;
+ spcs->base = priv->ioaddr + offset;
+ spcs->int_mask = int_mask;
+ spcs->pcs.ops = &dwmac_integrated_pcs_ops;
+
+ __set_bit(PHY_INTERFACE_MODE_SGMII, spcs->pcs.supported_interfaces);
+
+ priv->integrated_pcs = spcs;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
index 1bdf87b237c4..cda93894168e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
@@ -9,6 +9,7 @@
#ifndef __STMMAC_PCS_H__
#define __STMMAC_PCS_H__
+#include <linux/phylink.h>
#include <linux/slab.h>
#include <linux/io.h>
#include "common.h"
@@ -16,6 +17,8 @@
/* PCS registers (AN/TBI/SGMII/RGMII) offsets */
#define GMAC_AN_CTRL(x) (x) /* AN control */
#define GMAC_AN_STATUS(x) (x + 0x4) /* AN status */
+
+/* ADV, LPA and EXP are only available for the TBI and RTBI interfaces */
#define GMAC_ANE_ADV(x) (x + 0x8) /* ANE Advertisement */
#define GMAC_ANE_LPA(x) (x + 0xc) /* ANE link partener ability */
#define GMAC_ANE_EXP(x) (x + 0x10) /* ANE expansion */
@@ -44,6 +47,24 @@
#define GMAC_ANE_RFE_SHIFT 12
#define GMAC_ANE_ACK BIT(14)
+struct stmmac_priv;
+
+struct stmmac_pcs {
+ struct stmmac_priv *priv;
+ void __iomem *base;
+ u32 int_mask;
+ struct phylink_pcs pcs;
+};
+
+static inline struct stmmac_pcs *
+phylink_pcs_to_stmmac_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct stmmac_pcs, pcs);
+}
+
+int stmmac_integrated_pcs_init(struct stmmac_priv *priv, unsigned int offset,
+ u32 int_mask);
+
/**
* dwmac_pcs_isr - TBI, RTBI, or SGMII PHY ISR
* @ioaddr: IO registers pointer
@@ -80,13 +101,12 @@ static inline void dwmac_pcs_isr(void __iomem *ioaddr, u32 reg,
* @reg: Base address of the AN Control Register.
* @ane: to enable the auto-negotiation
* @srgmi_ral: to manage MAC-2-MAC SGMII connections.
- * @loopback: to cause the PHY to loopback tx data into rx path.
* Description: this is the main function to configure the AN control register
* and init the ANE, select loopback (usually for debugging purpose) and
* configure SGMII RAL.
*/
static inline void dwmac_ctrl_ane(void __iomem *ioaddr, u32 reg, bool ane,
- bool srgmi_ral, bool loopback)
+ bool srgmi_ral)
{
u32 value = readl(ioaddr + GMAC_AN_CTRL(reg));
@@ -102,39 +122,6 @@ static inline void dwmac_ctrl_ane(void __iomem *ioaddr, u32 reg, bool ane,
if (srgmi_ral)
value |= GMAC_AN_CTRL_SGMRAL;
- if (loopback)
- value |= GMAC_AN_CTRL_ELE;
-
writel(value, ioaddr + GMAC_AN_CTRL(reg));
}
-
-/**
- * dwmac_get_adv_lp - Get ADV and LP cap
- * @ioaddr: IO registers pointer
- * @reg: Base address of the AN Control Register.
- * @adv_lp: structure to store the adv,lp status
- * Description: this is to expose the ANE advertisement and Link partner ability
- * status to ethtool support.
- */
-static inline void dwmac_get_adv_lp(void __iomem *ioaddr, u32 reg,
- struct rgmii_adv *adv_lp)
-{
- u32 value = readl(ioaddr + GMAC_ANE_ADV(reg));
-
- if (value & GMAC_ANE_FD)
- adv_lp->duplex = DUPLEX_FULL;
- if (value & GMAC_ANE_HD)
- adv_lp->duplex |= DUPLEX_HALF;
-
- adv_lp->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
-
- value = readl(ioaddr + GMAC_ANE_LPA(reg));
-
- if (value & GMAC_ANE_FD)
- adv_lp->lp_duplex = DUPLEX_FULL;
- if (value & GMAC_ANE_HD)
- adv_lp->lp_duplex = DUPLEX_HALF;
-
- adv_lp->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
-}
#endif /* __STMMAC_PCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index ad868e8d195d..8979a50b5507 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -60,7 +60,7 @@ static int dwmac1000_validate_mcast_bins(struct device *dev, int mcast_bins)
* Description:
* This function validates the number of Unicast address entries supported
* by a particular Synopsys 10/100/1000 controller. The Synopsys controller
- * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter
+ * supports 1..32, 64, or 128 Unicast filter entries for its Unicast filter
* logic. This function validates a valid, supported configuration is
* selected, and defaults to 1 Unicast address if an unsupported
* configuration is selected.
@@ -95,6 +95,7 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
{
struct device_node *np;
struct stmmac_axi *axi;
+ u32 axi_blen[AXI_BLEN];
np = of_parse_phandle(pdev->dev.of_node, "snps,axi-config", 0);
if (!np)
@@ -117,7 +118,8 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
axi->axi_wr_osr_lmt = 1;
if (of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt))
axi->axi_rd_osr_lmt = 1;
- of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN);
+ of_property_read_u32_array(np, "snps,blen", axi_blen, AXI_BLEN);
+ stmmac_axi_blen_to_mask(&axi->axi_blen_regval, axi_blen, AXI_BLEN);
of_node_put(np);
return axi;
@@ -137,13 +139,6 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
u8 queue = 0;
int ret = 0;
- /* For backwards-compatibility with device trees that don't have any
- * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back
- * to one RX and TX queues each.
- */
- plat->rx_queues_to_use = 1;
- plat->tx_queues_to_use = 1;
-
/* First Queue must always be in DCB mode. As MTL_QUEUE_DCB = 1 we need
* to always set this, otherwise Queue will be classified as AVB
* (because MTL_QUEUE_AVB = 0).
@@ -162,9 +157,8 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
}
/* Processing RX queues common config */
- if (of_property_read_u32(rx_node, "snps,rx-queues-to-use",
- &plat->rx_queues_to_use))
- plat->rx_queues_to_use = 1;
+ of_property_read_u32(rx_node, "snps,rx-queues-to-use",
+ &plat->rx_queues_to_use);
if (of_property_read_bool(rx_node, "snps,rx-sched-sp"))
plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
@@ -185,18 +179,13 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
else
plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
- if (of_property_read_u32(q_node, "snps,map-to-dma-channel",
- &plat->rx_queues_cfg[queue].chan))
- plat->rx_queues_cfg[queue].chan = queue;
+ of_property_read_u32(q_node, "snps,map-to-dma-channel",
+ &plat->rx_queues_cfg[queue].chan);
/* TODO: Dynamic mapping to be included in the future */
- if (of_property_read_u32(q_node, "snps,priority",
- &plat->rx_queues_cfg[queue].prio)) {
- plat->rx_queues_cfg[queue].prio = 0;
- plat->rx_queues_cfg[queue].use_prio = false;
- } else {
+ if (!of_property_read_u32(q_node, "snps,priority",
+ &plat->rx_queues_cfg[queue].prio))
plat->rx_queues_cfg[queue].use_prio = true;
- }
/* RX queue specific packet type routing */
if (of_property_read_bool(q_node, "snps,route-avcp"))
@@ -209,8 +198,6 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ;
else if (of_property_read_bool(q_node, "snps,route-multi-broad"))
plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ;
- else
- plat->rx_queues_cfg[queue].pkt_route = 0x0;
queue++;
}
@@ -221,9 +208,8 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
}
/* Processing TX queues common config */
- if (of_property_read_u32(tx_node, "snps,tx-queues-to-use",
- &plat->tx_queues_to_use))
- plat->tx_queues_to_use = 1;
+ of_property_read_u32(tx_node, "snps,tx-queues-to-use",
+ &plat->tx_queues_to_use);
if (of_property_read_bool(tx_node, "snps,tx-sched-wrr"))
plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
@@ -268,13 +254,9 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
}
- if (of_property_read_u32(q_node, "snps,priority",
- &plat->tx_queues_cfg[queue].prio)) {
- plat->tx_queues_cfg[queue].prio = 0;
- plat->tx_queues_cfg[queue].use_prio = false;
- } else {
+ if (!of_property_read_u32(q_node, "snps,priority",
+ &plat->tx_queues_cfg[queue].prio))
plat->tx_queues_cfg[queue].use_prio = true;
- }
plat->tx_queues_cfg[queue].coe_unsupported =
of_property_read_bool(q_node, "snps,coe-unsupported");
@@ -405,21 +387,17 @@ static int stmmac_of_get_mac_mode(struct device_node *np)
return -ENODEV;
}
-/**
- * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt()
- * @pdev: platform_device structure
- * @plat: driver data platform structure
- *
- * Release resources claimed by stmmac_probe_config_dt().
- */
-static void stmmac_remove_config_dt(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat)
-{
- clk_disable_unprepare(plat->stmmac_clk);
- clk_disable_unprepare(plat->pclk);
- of_node_put(plat->phy_node);
- of_node_put(plat->mdio_node);
-}
+/* Compatible string array for all gmac4 devices */
+static const char * const stmmac_gmac4_compats[] = {
+ "snps,dwmac-4.00",
+ "snps,dwmac-4.10a",
+ "snps,dwmac-4.20a",
+ "snps,dwmac-5.00a",
+ "snps,dwmac-5.10a",
+ "snps,dwmac-5.20",
+ "snps,dwmac-5.30a",
+ NULL
+};
/**
* stmmac_probe_config_dt - parse device-tree driver parameters
@@ -435,11 +413,12 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
struct device_node *np = pdev->dev.of_node;
struct plat_stmmacenet_data *plat;
struct stmmac_dma_cfg *dma_cfg;
+ static int bus_id = -ENODEV;
int phy_mode;
void *ret;
int rc;
- plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ plat = stmmac_plat_dat_alloc(&pdev->dev);
if (!plat)
return ERR_PTR(-ENOMEM);
@@ -456,8 +435,12 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
return ERR_PTR(phy_mode);
plat->phy_interface = phy_mode;
+
rc = stmmac_of_get_mac_mode(np);
- plat->mac_interface = rc < 0 ? plat->phy_interface : rc;
+ if (rc >= 0 && rc != phy_mode)
+ dev_warn(&pdev->dev,
+ "\"mac-mode\" property used for %s but differs to \"phy-mode\" of %s, and will be ignored. Please report.\n",
+ phy_modes(rc), phy_modes(phy_mode));
/* Some wrapper drivers still rely on phy_node. Let's save it while
* they are not converted to phylink. */
@@ -470,16 +453,15 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
of_property_read_u32(np, "max-speed", &plat->max_speed);
plat->bus_id = of_alias_get_id(np, "ethernet");
- if (plat->bus_id < 0)
- plat->bus_id = 0;
-
- /* Default to phy auto-detection */
- plat->phy_addr = -1;
+ if (plat->bus_id < 0) {
+ if (bus_id < 0)
+ bus_id = of_alias_get_highest_id("ethernet");
+ /* No ethernet alias found, init at -1 so first bus_id is 0 */
+ if (bus_id < 0)
+ bus_id = -1;
+ plat->bus_id = ++bus_id;
+ }
- /* Default to get clk_csr from stmmac_clk_csr_set(),
- * or get clk_csr from device tree.
- */
- plat->clk_csr = -1;
if (of_property_read_u32(np, "snps,clk-csr", &plat->clk_csr))
of_property_read_u32(np, "clk_csr", &plat->clk_csr);
@@ -490,8 +472,10 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
rc = stmmac_mdio_setup(plat, np, &pdev->dev);
- if (rc)
- return ERR_PTR(rc);
+ if (rc) {
+ ret = ERR_PTR(rc);
+ goto error_put_phy;
+ }
of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
@@ -500,19 +484,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
plat->force_sf_dma_mode =
of_property_read_bool(np, "snps,force_sf_dma_mode");
- if (of_property_read_bool(np, "snps,en-tx-lpi-clockgating"))
+ if (of_property_read_bool(np, "snps,en-tx-lpi-clockgating")) {
+ dev_warn(&pdev->dev,
+ "OF property snps,en-tx-lpi-clockgating is deprecated, please convert driver to use STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP\n");
plat->flags |= STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
-
- /* Set the maxmtu to a default of JUMBO_LEN in case the
- * parameter is not present in the device tree.
- */
- plat->maxmtu = JUMBO_LEN;
-
- /* Set default value for multicast hash bins */
- plat->multicast_filter_bins = HASH_TABLE_SIZE;
-
- /* Set default value for unicast filter entries */
- plat->unicast_filter_entries = 1;
+ }
/*
* Currently only the properties needed on SPEAr600
@@ -522,6 +498,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
if (of_device_is_compatible(np, "st,spear600-gmac") ||
of_device_is_compatible(np, "snps,dwmac-3.50a") ||
of_device_is_compatible(np, "snps,dwmac-3.70a") ||
+ of_device_is_compatible(np, "snps,dwmac-3.72a") ||
of_device_is_compatible(np, "snps,dwmac")) {
/* Note that the max-frame-size parameter as defined in the
* ePAPR v1.1 spec is defined as max-frame-size, it's
@@ -539,25 +516,20 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
&pdev->dev, plat->unicast_filter_entries);
plat->multicast_filter_bins = dwmac1000_validate_mcast_bins(
&pdev->dev, plat->multicast_filter_bins);
- plat->has_gmac = 1;
+ plat->core_type = DWMAC_CORE_GMAC;
plat->pmt = 1;
}
if (of_device_is_compatible(np, "snps,dwmac-3.40a")) {
- plat->has_gmac = 1;
+ plat->core_type = DWMAC_CORE_GMAC;
plat->enh_desc = 1;
plat->tx_coe = 1;
plat->bugged_jumbo = 1;
plat->pmt = 1;
}
- if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
- of_device_is_compatible(np, "snps,dwmac-4.10a") ||
- of_device_is_compatible(np, "snps,dwmac-4.20a") ||
- of_device_is_compatible(np, "snps,dwmac-5.10a") ||
- of_device_is_compatible(np, "snps,dwmac-5.20")) {
- plat->has_gmac4 = 1;
- plat->has_gmac = 0;
+ if (of_device_compatible_match(np, stmmac_gmac4_compats)) {
+ plat->core_type = DWMAC_CORE_GMAC4;
plat->pmt = 1;
if (of_property_read_bool(np, "snps,tso"))
plat->flags |= STMMAC_FLAG_TSO_EN;
@@ -571,17 +543,19 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
}
if (of_device_is_compatible(np, "snps,dwxgmac")) {
- plat->has_xgmac = 1;
+ plat->core_type = DWMAC_CORE_XGMAC;
plat->pmt = 1;
if (of_property_read_bool(np, "snps,tso"))
plat->flags |= STMMAC_FLAG_TSO_EN;
+ of_property_read_u32(np, "snps,multicast-filter-bins",
+ &plat->multicast_filter_bins);
}
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
GFP_KERNEL);
if (!dma_cfg) {
- stmmac_remove_config_dt(pdev, plat);
- return ERR_PTR(-ENOMEM);
+ ret = ERR_PTR(-ENOMEM);
+ goto error_put_mdio;
}
plat->dma_cfg = dma_cfg;
@@ -609,8 +583,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
rc = stmmac_mtl_setup(pdev, plat);
if (rc) {
- stmmac_remove_config_dt(pdev, plat);
- return ERR_PTR(rc);
+ ret = ERR_PTR(rc);
+ goto error_put_mdio;
}
/* clock setup */
@@ -639,7 +613,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
dev_info(&pdev->dev, "PTP uses main clock\n");
} else {
plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref);
- dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
+ dev_dbg(&pdev->dev, "PTP rate %lu\n", plat->clk_ptp_rate);
}
plat->stmmac_rst = devm_reset_control_get_optional(&pdev->dev,
@@ -662,6 +636,10 @@ error_hw_init:
clk_disable_unprepare(plat->pclk);
error_pclk_get:
clk_disable_unprepare(plat->stmmac_clk);
+error_put_mdio:
+ of_node_put(plat->mdio_node);
+error_put_phy:
+ of_node_put(plat->phy_node);
return ret;
}
@@ -670,16 +648,17 @@ static void devm_stmmac_remove_config_dt(void *data)
{
struct plat_stmmacenet_data *plat = data;
- /* Platform data argument is unused */
- stmmac_remove_config_dt(NULL, plat);
+ clk_disable_unprepare(plat->stmmac_clk);
+ clk_disable_unprepare(plat->pclk);
+ of_node_put(plat->mdio_node);
+ of_node_put(plat->phy_node);
}
/**
* devm_stmmac_probe_config_dt
* @pdev: platform_device structure
* @mac: MAC address to use
- * Description: Devres variant of stmmac_probe_config_dt(). Does not require
- * the user to call stmmac_remove_config_dt() at driver detach.
+ * Description: Devres variant of stmmac_probe_config_dt().
*/
struct plat_stmmacenet_data *
devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
@@ -707,6 +686,17 @@ devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
#endif /* CONFIG_OF */
EXPORT_SYMBOL_GPL(devm_stmmac_probe_config_dt);
+struct clk *stmmac_pltfr_find_clk(struct plat_stmmacenet_data *plat_dat,
+ const char *name)
+{
+ for (int i = 0; i < plat_dat->num_clks; i++)
+ if (strcmp(plat_dat->clks[i].id, name) == 0)
+ return plat_dat->clks[i].clk;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(stmmac_pltfr_find_clk);
+
int stmmac_get_platform_resources(struct platform_device *pdev,
struct stmmac_resources *stmmac_res)
{
@@ -759,33 +749,49 @@ EXPORT_SYMBOL_GPL(stmmac_get_platform_resources);
/**
* stmmac_pltfr_init
- * @pdev: pointer to the platform device
+ * @dev: pointer to the device structure
* @plat: driver data platform structure
* Description: Call the platform's init callback (if any) and propagate
* the return value.
*/
-static int stmmac_pltfr_init(struct platform_device *pdev,
+static int stmmac_pltfr_init(struct device *dev,
struct plat_stmmacenet_data *plat)
{
int ret = 0;
if (plat->init)
- ret = plat->init(pdev, plat->bsp_priv);
+ ret = plat->init(dev, plat->bsp_priv);
return ret;
}
/**
* stmmac_pltfr_exit
- * @pdev: pointer to the platform device
+ * @dev: pointer to the device structure
* @plat: driver data platform structure
* Description: Call the platform's exit callback (if any).
*/
-static void stmmac_pltfr_exit(struct platform_device *pdev,
+static void stmmac_pltfr_exit(struct device *dev,
struct plat_stmmacenet_data *plat)
{
if (plat->exit)
- plat->exit(pdev, plat->bsp_priv);
+ plat->exit(dev, plat->bsp_priv);
+}
+
+static int stmmac_plat_suspend(struct device *dev, void *bsp_priv)
+{
+ struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dev));
+
+ stmmac_pltfr_exit(dev, priv->plat);
+
+ return 0;
+}
+
+static int stmmac_plat_resume(struct device *dev, void *bsp_priv)
+{
+ struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dev));
+
+ return stmmac_pltfr_init(dev, priv->plat);
}
/**
@@ -800,19 +806,12 @@ int stmmac_pltfr_probe(struct platform_device *pdev,
struct plat_stmmacenet_data *plat,
struct stmmac_resources *res)
{
- int ret;
-
- ret = stmmac_pltfr_init(pdev, plat);
- if (ret)
- return ret;
-
- ret = stmmac_dvr_probe(&pdev->dev, plat, res);
- if (ret) {
- stmmac_pltfr_exit(pdev, plat);
- return ret;
- }
+ if (!plat->suspend && plat->exit)
+ plat->suspend = stmmac_plat_suspend;
+ if (!plat->resume && plat->init)
+ plat->resume = stmmac_plat_resume;
- return ret;
+ return stmmac_dvr_probe(&pdev->dev, plat, res);
}
EXPORT_SYMBOL_GPL(stmmac_pltfr_probe);
@@ -854,54 +853,40 @@ EXPORT_SYMBOL_GPL(devm_stmmac_pltfr_probe);
*/
void stmmac_pltfr_remove(struct platform_device *pdev)
{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct plat_stmmacenet_data *plat = priv->plat;
-
stmmac_dvr_remove(&pdev->dev);
- stmmac_pltfr_exit(pdev, plat);
}
EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
-/**
- * stmmac_pltfr_suspend
- * @dev: device pointer
- * Description: this function is invoked when suspend the driver and it direcly
- * call the main suspend function and then, if required, on some platform, it
- * can call an exit helper.
- */
-static int __maybe_unused stmmac_pltfr_suspend(struct device *dev)
+static int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
{
+ struct plat_stmmacenet_data *plat_dat = priv->plat;
int ret;
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct platform_device *pdev = to_platform_device(dev);
-
- ret = stmmac_suspend(dev);
- stmmac_pltfr_exit(pdev, priv->plat);
-
- return ret;
-}
-/**
- * stmmac_pltfr_resume
- * @dev: device pointer
- * Description: this function is invoked when resume the driver before calling
- * the main resume function, on some platforms, it can call own init helper
- * if required.
- */
-static int __maybe_unused stmmac_pltfr_resume(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct platform_device *pdev = to_platform_device(dev);
- int ret;
-
- ret = stmmac_pltfr_init(pdev, priv->plat);
- if (ret)
- return ret;
+ if (enabled) {
+ ret = clk_prepare_enable(plat_dat->stmmac_clk);
+ if (ret)
+ return ret;
+ ret = clk_prepare_enable(plat_dat->pclk);
+ if (ret) {
+ clk_disable_unprepare(plat_dat->stmmac_clk);
+ return ret;
+ }
+ if (plat_dat->clks_config) {
+ ret = plat_dat->clks_config(plat_dat->bsp_priv, enabled);
+ if (ret) {
+ clk_disable_unprepare(plat_dat->stmmac_clk);
+ clk_disable_unprepare(plat_dat->pclk);
+ return ret;
+ }
+ }
+ } else {
+ clk_disable_unprepare(plat_dat->stmmac_clk);
+ clk_disable_unprepare(plat_dat->pclk);
+ if (plat_dat->clks_config)
+ plat_dat->clks_config(plat_dat->bsp_priv, enabled);
+ }
- return stmmac_resume(dev);
+ return 0;
}
static int __maybe_unused stmmac_runtime_suspend(struct device *dev)
@@ -931,7 +916,7 @@ static int __maybe_unused stmmac_pltfr_noirq_suspend(struct device *dev)
if (!netif_running(ndev))
return 0;
- if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+ if (!priv->wolopts) {
/* Disable clock in case of PWM is off */
clk_disable_unprepare(priv->plat->clk_ptp_ref);
@@ -952,7 +937,7 @@ static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
if (!netif_running(ndev))
return 0;
- if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+ if (!priv->wolopts) {
/* enable the clk previously disabled */
ret = pm_runtime_force_resume(dev);
if (ret)
@@ -971,7 +956,7 @@ static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
}
const struct dev_pm_ops stmmac_pltfr_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_suspend, stmmac_pltfr_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(stmmac_suspend, stmmac_resume)
SET_RUNTIME_PM_OPS(stmmac_runtime_suspend, stmmac_runtime_resume, NULL)
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_noirq_suspend, stmmac_pltfr_noirq_resume)
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index 72dc1a32e46d..6e6561e29d6e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -14,6 +14,9 @@
struct plat_stmmacenet_data *
devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac);
+struct clk *stmmac_pltfr_find_clk(struct plat_stmmacenet_data *plat_dat,
+ const char *name);
+
int stmmac_get_platform_resources(struct platform_device *pdev,
struct stmmac_resources *stmmac_res);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index a6b1de9a251d..3e30172fa129 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -9,7 +9,8 @@
*******************************************************************************/
#include "stmmac.h"
#include "stmmac_ptp.h"
-#include "dwmac4.h"
+
+#define PTP_SAFE_TIME_OFFSET_NS 500000
/**
* stmmac_adjust_freq
@@ -56,7 +57,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
bool xmac, est_rst = false;
int ret;
- xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+ xmac = dwmac_is_xmac(priv->plat->core_type);
if (delta < 0) {
neg_adj = 1;
@@ -172,7 +173,11 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
u32 acr_value;
switch (rq->type) {
- case PTP_CLK_REQ_PEROUT:
+ case PTP_CLK_REQ_PEROUT: {
+ struct timespec64 curr_time;
+ u64 target_ns = 0;
+ u64 ns = 0;
+
/* Reject requests with unsupported flags */
if (rq->perout.flags)
return -EOPNOTSUPP;
@@ -181,6 +186,31 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
cfg->start.tv_sec = rq->perout.start.sec;
cfg->start.tv_nsec = rq->perout.start.nsec;
+
+ /* A time set in the past won't trigger the start of the flexible PPS generation for
+ * the GMAC5. For some reason it does for the GMAC4 but setting a time in the past
+ * should be addressed anyway. Therefore, any value set it the past is considered as
+ * an offset compared to the current MAC system time.
+ * Be aware that an offset too low may not trigger flexible PPS generation
+ * if time spent in this configuration makes the targeted time already outdated.
+ * To address this, add a safe time offset.
+ */
+ if (!cfg->start.tv_sec && cfg->start.tv_nsec < PTP_SAFE_TIME_OFFSET_NS)
+ cfg->start.tv_nsec += PTP_SAFE_TIME_OFFSET_NS;
+
+ target_ns = cfg->start.tv_nsec + ((u64)cfg->start.tv_sec * NSEC_PER_SEC);
+
+ stmmac_get_systime(priv, priv->ptpaddr, &ns);
+ if (ns > TIME64_MAX - PTP_SAFE_TIME_OFFSET_NS)
+ return -EINVAL;
+
+ curr_time = ns_to_timespec64(ns);
+ if (target_ns < ns + PTP_SAFE_TIME_OFFSET_NS) {
+ cfg->start = timespec64_add_safe(cfg->start, curr_time);
+ if (cfg->start.tv_sec == TIME64_MAX)
+ return -EINVAL;
+ }
+
cfg->period.tv_sec = rq->perout.period.sec;
cfg->period.tv_nsec = rq->perout.period.nsec;
@@ -191,6 +221,7 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
priv->systime_flags);
write_unlock_irqrestore(&priv->ptp_lock, flags);
break;
+ }
case PTP_CLK_REQ_EXTTS: {
u8 channel;
@@ -248,10 +279,7 @@ static int stmmac_get_syncdevicetime(ktime_t *device,
{
struct stmmac_priv *priv = (struct stmmac_priv *)ctx;
- if (priv->plat->crosststamp)
- return priv->plat->crosststamp(device, system, ctx);
- else
- return -EOPNOTSUPP;
+ return priv->plat->crosststamp(device, system, ctx);
}
static int stmmac_getcrosststamp(struct ptp_clock_info *ptp,
@@ -265,7 +293,7 @@ static int stmmac_getcrosststamp(struct ptp_clock_info *ptp,
}
/* structure describing a PTP hardware clock */
-static struct ptp_clock_info stmmac_ptp_clock_ops = {
+const struct ptp_clock_info stmmac_ptp_clock_ops = {
.owner = THIS_MODULE,
.name = "stmmac ptp",
.max_adj = 62500000,
@@ -279,7 +307,23 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = {
.gettime64 = stmmac_get_time,
.settime64 = stmmac_set_time,
.enable = stmmac_enable,
- .getcrosststamp = stmmac_getcrosststamp,
+};
+
+/* structure describing a PTP hardware clock */
+const struct ptp_clock_info dwmac1000_ptp_clock_ops = {
+ .owner = THIS_MODULE,
+ .name = "stmmac ptp",
+ .max_adj = 62500000,
+ .n_alarm = 0,
+ .n_ext_ts = 1,
+ .n_per_out = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfine = stmmac_adjust_freq,
+ .adjtime = stmmac_adjust_time,
+ .gettime64 = stmmac_get_time,
+ .settime64 = stmmac_set_time,
+ .enable = dwmac1000_ptp_enable,
};
/**
@@ -298,28 +342,40 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
priv->pps[i].available = true;
}
- if (priv->plat->ptp_max_adj)
- stmmac_ptp_clock_ops.max_adj = priv->plat->ptp_max_adj;
-
/* Calculate the clock domain crossing (CDC) error if necessary */
priv->plat->cdc_error_adj = 0;
- if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate)
+ if (priv->plat->core_type == DWMAC_CORE_GMAC4)
priv->plat->cdc_error_adj = (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate;
- stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num;
- stmmac_ptp_clock_ops.n_ext_ts = priv->dma_cap.aux_snapshot_n;
+ /* Update the ptp clock parameters based on feature discovery, when
+ * available
+ */
+ if (priv->dma_cap.pps_out_num)
+ priv->ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num;
+
+ if (priv->dma_cap.aux_snapshot_n)
+ priv->ptp_clock_ops.n_ext_ts = priv->dma_cap.aux_snapshot_n;
+
+ if (priv->plat->ptp_max_adj)
+ priv->ptp_clock_ops.max_adj = priv->plat->ptp_max_adj;
+
+ if (priv->plat->crosststamp)
+ priv->ptp_clock_ops.getcrosststamp = stmmac_getcrosststamp;
rwlock_init(&priv->ptp_lock);
mutex_init(&priv->aux_ts_lock);
- priv->ptp_clock_ops = stmmac_ptp_clock_ops;
priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops,
priv->device);
if (IS_ERR(priv->ptp_clock)) {
netdev_err(priv->dev, "ptp_clock_register failed\n");
priv->ptp_clock = NULL;
- } else if (priv->ptp_clock)
+ }
+
+ if (priv->ptp_clock)
netdev_info(priv->dev, "registered PTP clock\n");
+ else
+ mutex_destroy(&priv->aux_ts_lock);
}
/**
@@ -335,7 +391,7 @@ void stmmac_ptp_unregister(struct stmmac_priv *priv)
priv->ptp_clock = NULL;
pr_debug("Removed PTP HW clock successfully on %s\n",
priv->dev->name);
- }
- mutex_destroy(&priv->aux_ts_lock);
+ mutex_destroy(&priv->aux_ts_lock);
+ }
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index fce3fba2ffd2..3fe0e3a80e80 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -94,4 +94,17 @@ enum aux_snapshot {
AUX_SNAPSHOT3 = 0x80,
};
+struct ptp_clock_info;
+struct ptp_clock_request;
+struct stmmac_priv;
+
+int dwmac1000_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on);
+
+void dwmac1000_get_ptptime(void __iomem *ptpaddr, u64 *ptp_time);
+void dwmac1000_timestamp_interrupt(struct stmmac_priv *priv);
+
+extern const struct ptp_clock_info stmmac_ptp_clock_ops;
+extern const struct ptp_clock_info dwmac1000_ptp_clock_ops;
+
#endif /* __STMMAC_PTP_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index 3ca1c2a816ff..e90a2c469b9a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -382,14 +382,14 @@ static int stmmac_test_phy_loopback(struct stmmac_priv *priv)
if (!priv->dev->phydev)
return -EOPNOTSUPP;
- ret = phy_loopback(priv->dev->phydev, true);
+ ret = phy_loopback(priv->dev->phydev, true, 0);
if (ret)
return ret;
attr.dst = priv->dev->dev_addr;
ret = __stmmac_test_loopback(priv, &attr);
- phy_loopback(priv->dev->phydev, false);
+ phy_loopback(priv->dev->phydev, false, 0);
return ret;
}
@@ -1721,7 +1721,7 @@ static int stmmac_test_sph(struct stmmac_priv *priv)
struct stmmac_packet_attrs attr = { };
int ret;
- if (!priv->sph)
+ if (!priv->sph_active)
return -EOPNOTSUPP;
/* Check for UDP first */
@@ -1985,7 +1985,7 @@ void stmmac_selftest_run(struct net_device *dev,
case STMMAC_LOOPBACK_PHY:
ret = -EOPNOTSUPP;
if (dev->phydev)
- ret = phy_loopback(dev->phydev, true);
+ ret = phy_loopback(dev->phydev, true, 0);
if (!ret)
break;
fallthrough;
@@ -2018,7 +2018,7 @@ void stmmac_selftest_run(struct net_device *dev,
case STMMAC_LOOPBACK_PHY:
ret = -EOPNOTSUPP;
if (dev->phydev)
- ret = phy_loopback(dev->phydev, false);
+ ret = phy_loopback(dev->phydev, false, 0);
if (!ret)
break;
fallthrough;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 75ad2da1a37f..d78652718599 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -262,10 +262,10 @@ static int tc_init(struct stmmac_priv *priv)
unsigned int count;
int ret, i;
- if (dma_cap->l3l4fnum) {
- priv->flow_entries_max = dma_cap->l3l4fnum;
+ priv->flow_entries_max = dma_cap->l3l4fnum;
+ if (priv->flow_entries_max) {
priv->flow_entries = devm_kcalloc(priv->device,
- dma_cap->l3l4fnum,
+ priv->flow_entries_max,
sizeof(*priv->flow_entries),
GFP_KERNEL);
if (!priv->flow_entries)
@@ -981,7 +981,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
if (qopt->cmd == TAPRIO_CMD_DESTROY)
goto disable;
- if (qopt->num_entries >= dep)
+ if (qopt->num_entries > dep)
return -EINVAL;
if (!qopt->cycle_time)
return -ERANGE;
@@ -1012,7 +1012,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
s64 delta_ns = qopt->entries[i].interval;
u32 gates = qopt->entries[i].gate_mask;
- if (delta_ns > GENMASK(wid, 0))
+ if (delta_ns > GENMASK(wid - 1, 0))
return -ERANGE;
if (gates > GENMASK(31 - wid, 0))
return -ERANGE;
@@ -1080,6 +1080,7 @@ disable:
for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
priv->xstats.max_sdu_txq_drop[i] = 0;
priv->xstats.mtl_est_txq_hlbf[i] = 0;
+ priv->xstats.mtl_est_txq_hlbs[i] = 0;
}
mutex_unlock(&priv->est_lock);
}
@@ -1097,7 +1098,8 @@ static void tc_taprio_stats(struct stmmac_priv *priv,
for (i = 0; i < priv->plat->tx_queues_to_use; i++)
window_drops += priv->xstats.max_sdu_txq_drop[i] +
- priv->xstats.mtl_est_txq_hlbf[i];
+ priv->xstats.mtl_est_txq_hlbf[i] +
+ priv->xstats.mtl_est_txq_hlbs[i];
qopt->stats.window_drops = window_drops;
/* Transmission overrun doesn't happen for stmmac, hence always 0 */
@@ -1111,7 +1113,8 @@ static void tc_taprio_queue_stats(struct stmmac_priv *priv,
int queue = qopt->queue_stats.queue;
q_stats->stats.window_drops = priv->xstats.max_sdu_txq_drop[queue] +
- priv->xstats.mtl_est_txq_hlbf[queue];
+ priv->xstats.mtl_est_txq_hlbf[queue] +
+ priv->xstats.mtl_est_txq_hlbs[queue];
/* Transmission overrun doesn't happen for stmmac, hence always 0 */
q_stats->stats.tx_overruns = 0;
@@ -1284,14 +1287,3 @@ const struct stmmac_tc_ops dwmac510_tc_ops = {
.query_caps = tc_query_caps,
.setup_mqprio = tc_setup_dwmac510_mqprio,
};
-
-const struct stmmac_tc_ops dwxgmac_tc_ops = {
- .init = tc_init,
- .setup_cls_u32 = tc_setup_cls_u32,
- .setup_cbs = tc_setup_cbs,
- .setup_cls = tc_setup_cls,
- .setup_taprio = tc_setup_taprio_without_fpe,
- .setup_etf = tc_setup_etf,
- .query_caps = tc_query_caps,
- .setup_mqprio = tc_setup_mqprio_unimplemented,
-};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
new file mode 100644
index 000000000000..b18404dd5a8b
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025, Altera Corporation
+ * stmmac VLAN (802.1Q) handling
+ */
+
+#include "stmmac.h"
+#include "stmmac_vlan.h"
+
+static void vlan_write_single(struct net_device *dev, u16 vid)
+{
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+ u32 val;
+
+ val = readl(ioaddr + VLAN_TAG);
+ val &= ~VLAN_TAG_VID;
+ val |= VLAN_TAG_ETV | vid;
+
+ writel(val, ioaddr + VLAN_TAG);
+}
+
+static int vlan_write_filter(struct net_device *dev,
+ struct mac_device_info *hw,
+ u8 index, u32 data)
+{
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+ int ret;
+ u32 val;
+
+ if (index >= hw->num_vlan)
+ return -EINVAL;
+
+ writel(data, ioaddr + VLAN_TAG_DATA);
+
+ val = readl(ioaddr + VLAN_TAG);
+ val &= ~(VLAN_TAG_CTRL_OFS_MASK |
+ VLAN_TAG_CTRL_CT |
+ VLAN_TAG_CTRL_OB);
+ val |= (index << VLAN_TAG_CTRL_OFS_SHIFT) | VLAN_TAG_CTRL_OB;
+
+ writel(val, ioaddr + VLAN_TAG);
+
+ ret = readl_poll_timeout(ioaddr + VLAN_TAG, val,
+ !(val & VLAN_TAG_CTRL_OB),
+ 1000, 500000);
+ if (ret) {
+ netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int vlan_add_hw_rx_fltr(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid)
+{
+ int index = -1;
+ u32 val = 0;
+ int i, ret;
+
+ if (vid > 4095)
+ return -EINVAL;
+
+ /* Single Rx VLAN Filter */
+ if (hw->num_vlan == 1) {
+ /* For single VLAN filter, VID 0 means VLAN promiscuous */
+ if (vid == 0) {
+ netdev_warn(dev, "Adding VLAN ID 0 is not supported\n");
+ return -EPERM;
+ }
+
+ if (hw->vlan_filter[0] & VLAN_TAG_VID) {
+ netdev_err(dev, "Only single VLAN ID supported\n");
+ return -EPERM;
+ }
+
+ hw->vlan_filter[0] = vid;
+ vlan_write_single(dev, vid);
+
+ return 0;
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+ val |= VLAN_TAG_DATA_ETV | VLAN_TAG_DATA_VEN | vid;
+
+ for (i = 0; i < hw->num_vlan; i++) {
+ if (hw->vlan_filter[i] == val)
+ return 0;
+ else if (!(hw->vlan_filter[i] & VLAN_TAG_DATA_VEN))
+ index = i;
+ }
+
+ if (index == -1) {
+ netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n",
+ hw->num_vlan);
+ return -EPERM;
+ }
+
+ ret = vlan_write_filter(dev, hw, index, val);
+
+ if (!ret)
+ hw->vlan_filter[index] = val;
+
+ return ret;
+}
+
+static int vlan_del_hw_rx_fltr(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid)
+{
+ int i, ret = 0;
+
+ /* Single Rx VLAN Filter */
+ if (hw->num_vlan == 1) {
+ if ((hw->vlan_filter[0] & VLAN_TAG_VID) == vid) {
+ hw->vlan_filter[0] = 0;
+ vlan_write_single(dev, 0);
+ }
+ return 0;
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+ for (i = 0; i < hw->num_vlan; i++) {
+ if ((hw->vlan_filter[i] & VLAN_TAG_DATA_VEN) &&
+ ((hw->vlan_filter[i] & VLAN_TAG_DATA_VID) == vid)) {
+ ret = vlan_write_filter(dev, hw, i, 0);
+
+ if (!ret)
+ hw->vlan_filter[i] = 0;
+ else
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void vlan_restore_hw_rx_fltr(struct net_device *dev,
+ struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+ u32 hash;
+ u32 val;
+ int i;
+
+ /* Single Rx VLAN Filter */
+ if (hw->num_vlan == 1) {
+ vlan_write_single(dev, hw->vlan_filter[0]);
+ return;
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+ for (i = 0; i < hw->num_vlan; i++) {
+ if (hw->vlan_filter[i] & VLAN_TAG_DATA_VEN) {
+ val = hw->vlan_filter[i];
+ vlan_write_filter(dev, hw, i, val);
+ }
+ }
+
+ hash = readl(ioaddr + VLAN_HASH_TABLE);
+ if (hash & VLAN_VLHT) {
+ value = readl(ioaddr + VLAN_TAG);
+ value |= VLAN_VTHM;
+ writel(value, ioaddr + VLAN_TAG);
+ }
+}
+
+static void vlan_update_hash(struct mac_device_info *hw, u32 hash,
+ u16 perfect_match, bool is_double)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ writel(hash, ioaddr + VLAN_HASH_TABLE);
+
+ value = readl(ioaddr + VLAN_TAG);
+
+ if (hash) {
+ value |= VLAN_VTHM | VLAN_ETV;
+ if (is_double) {
+ value |= VLAN_EDVLP;
+ value |= VLAN_ESVL;
+ value |= VLAN_DOVLTC;
+ }
+
+ writel(value, ioaddr + VLAN_TAG);
+ } else if (perfect_match) {
+ u32 value = VLAN_ETV;
+
+ if (is_double) {
+ value |= VLAN_EDVLP;
+ value |= VLAN_ESVL;
+ value |= VLAN_DOVLTC;
+ }
+
+ writel(value | perfect_match, ioaddr + VLAN_TAG);
+ } else {
+ value &= ~(VLAN_VTHM | VLAN_ETV);
+ value &= ~(VLAN_EDVLP | VLAN_ESVL);
+ value &= ~VLAN_DOVLTC;
+ value &= ~VLAN_VID;
+
+ writel(value, ioaddr + VLAN_TAG);
+ }
+}
+
+static void vlan_enable(struct mac_device_info *hw, u32 type)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + VLAN_INCL);
+ value |= VLAN_VLTI;
+ value &= ~VLAN_CSVL; /* Only use CVLAN */
+ value &= ~VLAN_VLC;
+ value |= (type << VLAN_VLC_SHIFT) & VLAN_VLC;
+ writel(value, ioaddr + VLAN_INCL);
+}
+
+static void vlan_rx_hw(struct mac_device_info *hw,
+ struct dma_desc *rx_desc, struct sk_buff *skb)
+{
+ if (hw->desc->get_rx_vlan_valid(rx_desc)) {
+ u16 vid = hw->desc->get_rx_vlan_tci(rx_desc);
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+ }
+}
+
+static void vlan_set_hw_mode(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + VLAN_TAG);
+
+ value &= ~VLAN_TAG_CTRL_EVLS_MASK;
+
+ if (hw->hw_vlan_en)
+ /* Always strip VLAN on Receive */
+ value |= VLAN_TAG_STRIP_ALL;
+ else
+ /* Do not strip VLAN on Receive */
+ value |= VLAN_TAG_STRIP_NONE;
+
+ /* Enable outer VLAN Tag in Rx DMA descriptor */
+ value |= VLAN_TAG_CTRL_EVLRXS;
+ writel(value, ioaddr + VLAN_TAG);
+}
+
+static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
+ u16 perfect_match, bool is_double)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ writel(hash, ioaddr + VLAN_HASH_TABLE);
+
+ if (hash) {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value |= XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + VLAN_TAG);
+
+ value |= VLAN_VTHM | VLAN_ETV;
+ if (is_double) {
+ value |= VLAN_EDVLP;
+ value |= VLAN_ESVL;
+ value |= VLAN_DOVLTC;
+ } else {
+ value &= ~VLAN_EDVLP;
+ value &= ~VLAN_ESVL;
+ value &= ~VLAN_DOVLTC;
+ }
+
+ value &= ~VLAN_VID;
+ writel(value, ioaddr + VLAN_TAG);
+ } else if (perfect_match) {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value |= XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + VLAN_TAG);
+
+ value &= ~VLAN_VTHM;
+ value |= VLAN_ETV;
+ if (is_double) {
+ value |= VLAN_EDVLP;
+ value |= VLAN_ESVL;
+ value |= VLAN_DOVLTC;
+ } else {
+ value &= ~VLAN_EDVLP;
+ value &= ~VLAN_ESVL;
+ value &= ~VLAN_DOVLTC;
+ }
+
+ value &= ~VLAN_VID;
+ writel(value | perfect_match, ioaddr + VLAN_TAG);
+ } else {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value &= ~XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + VLAN_TAG);
+
+ value &= ~(VLAN_VTHM | VLAN_ETV);
+ value &= ~(VLAN_EDVLP | VLAN_ESVL);
+ value &= ~VLAN_DOVLTC;
+ value &= ~VLAN_VID;
+
+ writel(value, ioaddr + VLAN_TAG);
+ }
+}
+
+const struct stmmac_vlan_ops dwmac_vlan_ops = {
+ .update_vlan_hash = vlan_update_hash,
+ .enable_vlan = vlan_enable,
+ .add_hw_vlan_rx_fltr = vlan_add_hw_rx_fltr,
+ .del_hw_vlan_rx_fltr = vlan_del_hw_rx_fltr,
+ .restore_hw_vlan_rx_fltr = vlan_restore_hw_rx_fltr,
+ .rx_hw_vlan = vlan_rx_hw,
+ .set_hw_vlan_mode = vlan_set_hw_mode,
+};
+
+const struct stmmac_vlan_ops dwxlgmac2_vlan_ops = {
+ .update_vlan_hash = dwxgmac2_update_vlan_hash,
+ .enable_vlan = vlan_enable,
+};
+
+const struct stmmac_vlan_ops dwxgmac210_vlan_ops = {
+ .update_vlan_hash = dwxgmac2_update_vlan_hash,
+ .enable_vlan = vlan_enable,
+ .add_hw_vlan_rx_fltr = vlan_add_hw_rx_fltr,
+ .del_hw_vlan_rx_fltr = vlan_del_hw_rx_fltr,
+ .restore_hw_vlan_rx_fltr = vlan_restore_hw_rx_fltr,
+ .rx_hw_vlan = vlan_rx_hw,
+ .set_hw_vlan_mode = vlan_set_hw_mode,
+};
+
+u32 stmmac_get_num_vlan(void __iomem *ioaddr)
+{
+ u32 val, num_vlan;
+
+ val = readl(ioaddr + HW_FEATURE3);
+ switch (val & VLAN_HW_FEAT_NRVF) {
+ case 0:
+ num_vlan = 1;
+ break;
+ case 1:
+ num_vlan = 4;
+ break;
+ case 2:
+ num_vlan = 8;
+ break;
+ case 3:
+ num_vlan = 16;
+ break;
+ case 4:
+ num_vlan = 24;
+ break;
+ case 5:
+ num_vlan = 32;
+ break;
+ default:
+ num_vlan = 1;
+ }
+
+ return num_vlan;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.h
new file mode 100644
index 000000000000..c24f89a9049b
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2025, Altera Corporation
+ * stmmac VLAN(802.1Q) handling
+ */
+
+#ifndef __STMMAC_VLAN_H__
+#define __STMMAC_VLAN_H__
+
+#include <linux/bitfield.h>
+#include "dwxgmac2.h"
+
+#define VLAN_TAG 0x00000050
+#define VLAN_TAG_DATA 0x00000054
+#define VLAN_HASH_TABLE 0x00000058
+#define VLAN_INCL 0x00000060
+
+/* MAC VLAN */
+#define VLAN_EDVLP BIT(26)
+#define VLAN_VTHM BIT(25)
+#define VLAN_DOVLTC BIT(20)
+#define VLAN_ESVL BIT(18)
+#define VLAN_ETV BIT(16)
+#define VLAN_VID GENMASK(15, 0)
+#define VLAN_VLTI BIT(20)
+#define VLAN_CSVL BIT(19)
+#define VLAN_VLC GENMASK(17, 16)
+#define VLAN_VLC_SHIFT 16
+#define VLAN_VLHT GENMASK(15, 0)
+
+/* MAC VLAN Tag */
+#define VLAN_TAG_VID GENMASK(15, 0)
+#define VLAN_TAG_ETV BIT(16)
+
+/* MAC VLAN Tag Control */
+#define VLAN_TAG_CTRL_OB BIT(0)
+#define VLAN_TAG_CTRL_CT BIT(1)
+#define VLAN_TAG_CTRL_OFS_MASK GENMASK(6, 2)
+#define VLAN_TAG_CTRL_OFS_SHIFT 2
+#define VLAN_TAG_CTRL_EVLS_MASK GENMASK(22, 21)
+#define VLAN_TAG_CTRL_EVLS_SHIFT 21
+#define VLAN_TAG_CTRL_EVLRXS BIT(24)
+
+#define VLAN_TAG_STRIP_NONE FIELD_PREP(VLAN_TAG_CTRL_EVLS_MASK, 0x0)
+#define VLAN_TAG_STRIP_PASS FIELD_PREP(VLAN_TAG_CTRL_EVLS_MASK, 0x1)
+#define VLAN_TAG_STRIP_FAIL FIELD_PREP(VLAN_TAG_CTRL_EVLS_MASK, 0x2)
+#define VLAN_TAG_STRIP_ALL FIELD_PREP(VLAN_TAG_CTRL_EVLS_MASK, 0x3)
+
+/* MAC VLAN Tag Data/Filter */
+#define VLAN_TAG_DATA_VID GENMASK(15, 0)
+#define VLAN_TAG_DATA_VEN BIT(16)
+#define VLAN_TAG_DATA_ETV BIT(17)
+
+/* MAC VLAN HW FEAT */
+#define HW_FEATURE3 0x00000128
+#define VLAN_HW_FEAT_NRVF GENMASK(2, 0)
+
+extern const struct stmmac_vlan_ops dwmac_vlan_ops;
+extern const struct stmmac_vlan_ops dwxgmac210_vlan_ops;
+extern const struct stmmac_vlan_ops dwxlgmac2_vlan_ops;
+
+u32 stmmac_get_num_vlan(void __iomem *ioaddr);
+
+#endif /* __STMMAC_VLAN_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
index aa6f16d3df64..d7e4db7224b0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
@@ -129,7 +129,7 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog,
bpf_prog_put(old_prog);
/* Disable RX SPH for XDP operation */
- priv->sph = priv->sph_cap && !stmmac_xdp_is_enabled(priv);
+ priv->sph_active = priv->sph_capable && !stmmac_xdp_is_enabled(priv);
if (if_running && need_update)
stmmac_xdp_open(dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h
index 896dc987d4ef..77ce8cfbe976 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h
@@ -4,7 +4,6 @@
#ifndef _STMMAC_XDP_H_
#define _STMMAC_XDP_H_
-#define STMMAC_MAX_RX_BUF_SIZE(num) (((num) * PAGE_SIZE) - XDP_PACKET_HEADROOM)
#define STMMAC_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
int stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool,
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index b8948d5b779a..acfb523214b9 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -3779,7 +3779,7 @@ static void cas_shutdown(struct cas *cp)
/* Make us not-running to avoid timers respawning */
cp->hw_running = 0;
- del_timer_sync(&cp->link_timer);
+ timer_delete_sync(&cp->link_timer);
/* Stop the reset task */
#if 0
@@ -4021,7 +4021,7 @@ done:
static void cas_link_timer(struct timer_list *t)
{
- struct cas *cp = from_timer(cp, t, link_timer);
+ struct cas *cp = timer_container_of(cp, t, link_timer);
int mask, pending = 0, reset = 0;
unsigned long flags;
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index a9a6670b5ff1..6fc37ab27f7b 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -390,7 +390,7 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
return 0;
err_out_del_timer:
- del_timer_sync(&port->clean_timer);
+ timer_delete_sync(&port->clean_timer);
list_del_rcu(&port->list);
synchronize_rcu();
netif_napi_del(&port->napi);
@@ -408,8 +408,8 @@ static void vsw_port_remove(struct vio_dev *vdev)
unsigned long flags;
if (port) {
- del_timer_sync(&port->vio.timer);
- del_timer_sync(&port->clean_timer);
+ timer_delete_sync(&port->vio.timer);
+ timer_delete_sync(&port->clean_timer);
napi_disable(&port->napi);
unregister_netdev(port->dev);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index df6d35d41b97..893216b0e08d 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -2225,7 +2225,7 @@ static int niu_link_status(struct niu *np, int *link_up_p)
static void niu_timer(struct timer_list *t)
{
- struct niu *np = from_timer(np, t, timer);
+ struct niu *np = timer_container_of(np, t, timer);
unsigned long off;
int err, link_up;
@@ -3303,7 +3303,7 @@ static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
addr &= PAGE_MASK;
pp = &rp->rxhash[h];
for (; (p = *pp) != NULL; pp = &niu_next_page(p)) {
- if (p->index == addr) {
+ if (p->private == addr) {
*link = pp;
goto found;
}
@@ -3318,7 +3318,7 @@ static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
{
unsigned int h = niu_hash_rxaddr(rp, base);
- page->index = base;
+ page->private = base;
niu_next_page(page) = rp->rxhash[h];
rp->rxhash[h] = page;
}
@@ -3336,7 +3336,7 @@ static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
addr = np->ops->map_page(np->device, page, 0,
PAGE_SIZE, DMA_FROM_DEVICE);
- if (!addr) {
+ if (np->ops->mapping_error(np->device, addr)) {
__free_page(page);
return -ENOMEM;
}
@@ -3400,11 +3400,11 @@ static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
RCR_ENTRY_PKTBUFSZ_SHIFT];
- if ((page->index + PAGE_SIZE) - rcr_size == addr) {
+ if ((page->private + PAGE_SIZE) - rcr_size == addr) {
*link = niu_next_page(page);
- np->ops->unmap_page(np->device, page->index,
+ np->ops->unmap_page(np->device, page->private,
PAGE_SIZE, DMA_FROM_DEVICE);
- page->index = 0;
+ page->private = 0;
niu_next_page(page) = NULL;
__free_page(page);
rp->rbr_refill_pending++;
@@ -3469,11 +3469,11 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
append_size = append_size - skb->len;
niu_rx_skb_append(skb, page, off, append_size, rcr_size);
- if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
+ if ((page->private + rp->rbr_block_size) - rcr_size == addr) {
*link = niu_next_page(page);
- np->ops->unmap_page(np->device, page->index,
+ np->ops->unmap_page(np->device, page->private,
PAGE_SIZE, DMA_FROM_DEVICE);
- page->index = 0;
+ page->private = 0;
niu_next_page(page) = NULL;
rp->rbr_refill_pending++;
} else
@@ -3538,11 +3538,11 @@ static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
page = rp->rxhash[i];
while (page) {
struct page *next = niu_next_page(page);
- u64 base = page->index;
+ u64 base = page->private;
np->ops->unmap_page(np->device, base, PAGE_SIZE,
DMA_FROM_DEVICE);
- page->index = 0;
+ page->private = 0;
niu_next_page(page) = NULL;
__free_page(page);
@@ -5825,7 +5825,7 @@ static int niu_init_mac(struct niu *np)
/* This looks hookey but the RX MAC reset we just did will
* undo some of the state we setup in niu_init_tx_mac() so we
* have to call it again. In particular, the RX MAC reset will
- * set the XMAC_MAX register back to it's default value.
+ * set the XMAC_MAX register back to its default value.
*/
niu_init_tx_mac(np);
niu_enable_tx_mac(np, 1);
@@ -6086,7 +6086,7 @@ static void niu_enable_napi(struct niu *np)
int i;
for (i = 0; i < np->num_ldg; i++)
- napi_enable(&np->ldg[i].napi);
+ napi_enable_locked(&np->ldg[i].napi);
}
static void niu_disable_napi(struct niu *np)
@@ -6116,7 +6116,9 @@ static int niu_open(struct net_device *dev)
if (err)
goto out_free_channels;
+ netdev_lock(dev);
niu_enable_napi(np);
+ netdev_unlock(dev);
spin_lock_irq(&np->lock);
@@ -6163,7 +6165,7 @@ static void niu_full_shutdown(struct niu *np, struct net_device *dev)
niu_disable_napi(np);
netif_tx_stop_all_queues(dev);
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
spin_lock_irq(&np->lock);
@@ -6460,7 +6462,7 @@ static void niu_reset_buffers(struct niu *np)
page = rp->rxhash[j];
while (page) {
struct page *next = niu_next_page(page);
- u64 base = page->index;
+ u64 base = page->private;
base = base >> RBR_DESCR_ADDR_SHIFT;
rp->rbr[k++] = cpu_to_le32(base);
page = next;
@@ -6509,7 +6511,7 @@ static void niu_reset_task(struct work_struct *work)
spin_unlock_irqrestore(&np->lock, flags);
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
niu_netif_stop(np);
@@ -6521,6 +6523,7 @@ static void niu_reset_task(struct work_struct *work)
niu_reset_buffers(np);
+ netdev_lock(np->dev);
spin_lock_irqsave(&np->lock, flags);
err = niu_init_hw(np);
@@ -6531,6 +6534,7 @@ static void niu_reset_task(struct work_struct *work)
}
spin_unlock_irqrestore(&np->lock, flags);
+ netdev_unlock(np->dev);
}
static void niu_tx_timeout(struct net_device *dev, unsigned int txqueue)
@@ -6672,6 +6676,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
len = skb_headlen(skb);
mapping = np->ops->map_single(np->device, skb->data,
len, DMA_TO_DEVICE);
+ if (np->ops->mapping_error(np->device, mapping))
+ goto out_drop;
prod = rp->prod;
@@ -6713,6 +6719,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
mapping = np->ops->map_page(np->device, skb_frag_page(frag),
skb_frag_off(frag), len,
DMA_TO_DEVICE);
+ if (np->ops->mapping_error(np->device, mapping))
+ goto out_unmap;
rp->tx_buffs[prod].skb = NULL;
rp->tx_buffs[prod].mapping = mapping;
@@ -6737,6 +6745,19 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
out:
return NETDEV_TX_OK;
+out_unmap:
+ while (i--) {
+ const skb_frag_t *frag;
+
+ prod = PREVIOUS_TX(rp, prod);
+ frag = &skb_shinfo(skb)->frags[i];
+ np->ops->unmap_page(np->device, rp->tx_buffs[prod].mapping,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ }
+
+ np->ops->unmap_single(np->device, rp->tx_buffs[rp->prod].mapping,
+ skb_headlen(skb), DMA_TO_DEVICE);
+
out_drop:
rp->tx_errors++;
kfree_skb(skb);
@@ -6761,7 +6782,9 @@ static int niu_change_mtu(struct net_device *dev, int new_mtu)
niu_free_channels(np);
+ netdev_lock(dev);
niu_enable_napi(np);
+ netdev_unlock(dev);
err = niu_alloc_channels(np);
if (err)
@@ -7071,8 +7094,10 @@ static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key)
}
-static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
+static int niu_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *nfc)
{
+ struct niu *np = netdev_priv(dev);
u64 class;
nfc->data = 0;
@@ -7284,9 +7309,6 @@ static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int ret = 0;
switch (cmd->cmd) {
- case ETHTOOL_GRXFH:
- ret = niu_get_hash_opts(np, cmd);
- break;
case ETHTOOL_GRXRINGS:
cmd->data = np->num_rx_rings;
break;
@@ -7307,8 +7329,11 @@ static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return ret;
}
-static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
+static int niu_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct niu *np = netdev_priv(dev);
u64 class;
u64 flow_key = 0;
unsigned long flags;
@@ -7650,9 +7675,6 @@ static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
int ret = 0;
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = niu_set_hash_opts(np, cmd);
- break;
case ETHTOOL_SRXCLSRLINS:
ret = niu_add_ethtool_tcam_entry(np, cmd);
break;
@@ -7906,6 +7928,8 @@ static const struct ethtool_ops niu_ethtool_ops = {
.set_phys_id = niu_set_phys_id,
.get_rxnfc = niu_get_nfc,
.set_rxnfc = niu_set_nfc,
+ .get_rxfh_fields = niu_get_rxfh_fields,
+ .set_rxfh_fields = niu_set_rxfh_fields,
.get_link_ksettings = niu_get_link_ksettings,
.set_link_ksettings = niu_set_link_ksettings,
};
@@ -9058,6 +9082,8 @@ static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
msi_vec[i].entry = i;
}
+ pdev->dev_flags |= PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST;
+
num_irqs = pci_enable_msix_range(pdev, msi_vec, 1, num_irqs);
if (num_irqs < 0) {
np->flags &= ~NIU_FLAGS_MSIX;
@@ -9636,6 +9662,11 @@ static void niu_pci_unmap_single(struct device *dev, u64 dma_address,
dma_unmap_single(dev, dma_address, size, direction);
}
+static int niu_pci_mapping_error(struct device *dev, u64 addr)
+{
+ return dma_mapping_error(dev, addr);
+}
+
static const struct niu_ops niu_pci_ops = {
.alloc_coherent = niu_pci_alloc_coherent,
.free_coherent = niu_pci_free_coherent,
@@ -9643,6 +9674,7 @@ static const struct niu_ops niu_pci_ops = {
.unmap_page = niu_pci_unmap_page,
.map_single = niu_pci_map_single,
.unmap_single = niu_pci_unmap_single,
+ .mapping_error = niu_pci_mapping_error,
};
static void niu_driver_version(void)
@@ -9908,7 +9940,7 @@ static int __maybe_unused niu_suspend(struct device *dev_d)
flush_work(&np->reset_task);
niu_netif_stop(np);
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
spin_lock_irqsave(&np->lock, flags);
niu_enable_interrupts(np, 0);
@@ -9937,6 +9969,7 @@ static int __maybe_unused niu_resume(struct device *dev_d)
spin_lock_irqsave(&np->lock, flags);
+ netdev_lock(dev);
err = niu_init_hw(np);
if (!err) {
np->timer.expires = jiffies + HZ;
@@ -9945,6 +9978,7 @@ static int __maybe_unused niu_resume(struct device *dev_d)
}
spin_unlock_irqrestore(&np->lock, flags);
+ netdev_unlock(dev);
return err;
}
@@ -10009,6 +10043,11 @@ static void niu_phys_unmap_single(struct device *dev, u64 dma_address,
/* Nothing to do. */
}
+static int niu_phys_mapping_error(struct device *dev, u64 dma_address)
+{
+ return false;
+}
+
static const struct niu_ops niu_phys_ops = {
.alloc_coherent = niu_phys_alloc_coherent,
.free_coherent = niu_phys_free_coherent,
@@ -10016,6 +10055,7 @@ static const struct niu_ops niu_phys_ops = {
.unmap_page = niu_phys_unmap_page,
.map_single = niu_phys_map_single,
.unmap_single = niu_phys_unmap_single,
+ .mapping_error = niu_phys_mapping_error,
};
static int niu_of_probe(struct platform_device *op)
diff --git a/drivers/net/ethernet/sun/niu.h b/drivers/net/ethernet/sun/niu.h
index 04c215f91fc0..d8368043fc3b 100644
--- a/drivers/net/ethernet/sun/niu.h
+++ b/drivers/net/ethernet/sun/niu.h
@@ -2879,6 +2879,9 @@ struct tx_ring_info {
#define NEXT_TX(tp, index) \
(((index) + 1) < (tp)->pending ? ((index) + 1) : 0)
+#define PREVIOUS_TX(tp, index) \
+ (((index) - 1) >= 0 ? ((index) - 1) : (((tp)->pending) - 1))
+
static inline u32 niu_tx_avail(struct tx_ring_info *tp)
{
return (tp->pending -
@@ -3140,6 +3143,7 @@ struct niu_ops {
enum dma_data_direction direction);
void (*unmap_single)(struct device *dev, u64 dma_address,
size_t size, enum dma_data_direction direction);
+ int (*mapping_error)(struct device *dev, u64 dma_address);
};
struct niu_link_config {
@@ -3246,8 +3250,8 @@ struct niu {
struct niu_parent *parent;
u32 flags;
-#define NIU_FLAGS_HOTPLUG_PHY_PRESENT 0x02000000 /* Removeable PHY detected*/
-#define NIU_FLAGS_HOTPLUG_PHY 0x01000000 /* Removeable PHY */
+#define NIU_FLAGS_HOTPLUG_PHY_PRESENT 0x02000000 /* Removable PHY detected*/
+#define NIU_FLAGS_HOTPLUG_PHY 0x01000000 /* Removable PHY */
#define NIU_FLAGS_VPD_VALID 0x00800000 /* VPD has valid version */
#define NIU_FLAGS_MSIX 0x00400000 /* MSI-X in use */
#define NIU_FLAGS_MCAST 0x00200000 /* multicast filter enabled */
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index bbb3a6ca19ed..edb2fd3a6551 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -526,7 +526,7 @@ static int try_next_permutation(struct bigmac *bp, void __iomem *tregs)
static void bigmac_timer(struct timer_list *t)
{
- struct bigmac *bp = from_timer(bp, t, bigmac_timer);
+ struct bigmac *bp = timer_container_of(bp, t, bigmac_timer);
void __iomem *tregs = bp->tregs;
int restart_timer = 0;
@@ -931,7 +931,7 @@ static int bigmac_close(struct net_device *dev)
{
struct bigmac *bp = netdev_priv(dev);
- del_timer(&bp->bigmac_timer);
+ timer_delete(&bp->bigmac_timer);
bp->timer_state = asleep;
bp->timer_ticks = 0;
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 3e5f9b17c777..8e69d917d827 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -1481,7 +1481,7 @@ static int gem_mdio_link_not_up(struct gem *gp)
static void gem_link_timer(struct timer_list *t)
{
- struct gem *gp = from_timer(gp, t, link_timer);
+ struct gem *gp = timer_container_of(gp, t, link_timer);
struct net_device *dev = gp->dev;
int restart_aneg = 0;
@@ -2180,7 +2180,7 @@ static void gem_do_stop(struct net_device *dev, int wol)
gem_disable_ints(gp);
/* Stop the link timer */
- del_timer_sync(&gp->link_timer);
+ timer_delete_sync(&gp->link_timer);
/* We cannot cancel the reset task while holding the
* rtnl lock, we'd get an A->B / B->A deadlock stituation
@@ -2230,7 +2230,7 @@ static void gem_reset_task(struct work_struct *work)
}
/* Stop the link timer */
- del_timer_sync(&gp->link_timer);
+ timer_delete_sync(&gp->link_timer);
/* Stop NAPI and tx */
gem_netif_stop(gp);
@@ -2610,7 +2610,7 @@ static int gem_set_link_ksettings(struct net_device *dev,
/* Apply settings and restart link process. */
if (netif_device_present(gp->dev)) {
- del_timer_sync(&gp->link_timer);
+ timer_delete_sync(&gp->link_timer);
gem_begin_auto_negotiation(gp, cmd);
}
@@ -2626,7 +2626,7 @@ static int gem_nway_reset(struct net_device *dev)
/* Restart link process */
if (netif_device_present(gp->dev)) {
- del_timer_sync(&gp->link_timer);
+ timer_delete_sync(&gp->link_timer);
gem_begin_auto_negotiation(gp, NULL);
}
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 50ace461a1af..48f0a96c0e9e 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -451,7 +451,7 @@ static void happy_meal_tcvr_write(struct happy_meal *hp,
/* Auto negotiation. The scheme is very simple. We have a timer routine
* that keeps watching the auto negotiation process as it progresses.
* The DP83840 is first told to start doing it's thing, we set up the time
- * and place the timer state machine in it's initial state.
+ * and place the timer state machine in its initial state.
*
* Here the timer peeks at the DP83840 status registers at each click to see
* if the auto negotiation has completed, we assume here that the DP83840 PHY
@@ -721,7 +721,7 @@ force_link:
static void happy_meal_timer(struct timer_list *t)
{
- struct happy_meal *hp = from_timer(hp, t, happy_timer);
+ struct happy_meal *hp = timer_container_of(hp, t, happy_timer);
void __iomem *tregs = hp->tcvregs;
int restart_timer = 0;
@@ -1265,7 +1265,7 @@ static int happy_meal_init(struct happy_meal *hp)
u32 regtmp, rxcfg;
/* If auto-negotiation timer is running, kill it. */
- del_timer(&hp->happy_timer);
+ timer_delete(&hp->happy_timer);
HMD("happy_flags[%08x]\n", hp->happy_flags);
if (!(hp->happy_flags & HFLAG_INIT)) {
@@ -1922,7 +1922,7 @@ static int happy_meal_close(struct net_device *dev)
happy_meal_clean_rings(hp);
/* If auto-negotiation timer is running, kill it. */
- del_timer(&hp->happy_timer);
+ timer_delete(&hp->happy_timer);
spin_unlock_irq(&hp->happy_lock);
@@ -2184,7 +2184,7 @@ static int hme_set_link_ksettings(struct net_device *dev,
/* Ok, do it to it. */
spin_lock_irq(&hp->happy_lock);
- del_timer(&hp->happy_timer);
+ timer_delete(&hp->happy_timer);
happy_meal_begin_auto_negotiation(hp, hp->tcvregs, cmd);
spin_unlock_irq(&hp->happy_lock);
diff --git a/drivers/net/ethernet/sun/sunqe.h b/drivers/net/ethernet/sun/sunqe.h
index 0daed05b7c83..300631e8ac0d 100644
--- a/drivers/net/ethernet/sun/sunqe.h
+++ b/drivers/net/ethernet/sun/sunqe.h
@@ -36,7 +36,7 @@
#define GLOB_PSIZE_6144 0x10 /* 6k packet size */
#define GLOB_PSIZE_8192 0x11 /* 8k packet size */
-/* In MACE mode, there are four qe channels. Each channel has it's own
+/* In MACE mode, there are four qe channels. Each channel has its own
* status bits in the QEC status register. This macro picks out the
* ones you want.
*/
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 1e887d951a04..a2a3e94da4b8 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -505,7 +505,7 @@ static void vnet_port_remove(struct vio_dev *vdev)
struct vnet_port *port = dev_get_drvdata(&vdev->dev);
if (port) {
- del_timer_sync(&port->vio.timer);
+ timer_delete_sync(&port->vio.timer);
napi_disable(&port->napi);
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index 1cacb2a0ee03..0212853c9430 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -1044,7 +1044,7 @@ static inline void vnet_free_skbs(struct sk_buff *skb)
void sunvnet_clean_timer_expire_common(struct timer_list *t)
{
- struct vnet_port *port = from_timer(port, t, clean_timer);
+ struct vnet_port *port = timer_container_of(port, t, clean_timer);
struct sk_buff *freeskbs;
unsigned pending;
@@ -1058,7 +1058,7 @@ void sunvnet_clean_timer_expire_common(struct timer_list *t)
(void)mod_timer(&port->clean_timer,
jiffies + VNET_CLEAN_TIMEOUT);
else
- del_timer(&port->clean_timer);
+ timer_delete(&port->clean_timer);
}
EXPORT_SYMBOL_GPL(sunvnet_clean_timer_expire_common);
@@ -1513,7 +1513,7 @@ out_dropped:
(void)mod_timer(&port->clean_timer,
jiffies + VNET_CLEAN_TIMEOUT);
else if (port)
- del_timer(&port->clean_timer);
+ timer_delete(&port->clean_timer);
rcu_read_unlock();
dev_kfree_skb(skb);
vnet_free_skbs(freeskbs);
@@ -1707,7 +1707,7 @@ EXPORT_SYMBOL_GPL(sunvnet_port_free_tx_bufs_common);
void vnet_port_reset(struct vnet_port *port)
{
- del_timer(&port->clean_timer);
+ timer_delete(&port->clean_timer);
sunvnet_port_free_tx_bufs_common(port);
port->rmtu = 0;
port->tso = (port->vsw == 0); /* no tso in vsw, misbehaves in bridge */
diff --git a/drivers/net/ethernet/sunplus/spl2sw_driver.c b/drivers/net/ethernet/sunplus/spl2sw_driver.c
index 721d8ed3f302..5e0e4c9ecbb0 100644
--- a/drivers/net/ethernet/sunplus/spl2sw_driver.c
+++ b/drivers/net/ethernet/sunplus/spl2sw_driver.c
@@ -199,7 +199,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_start_xmit = spl2sw_ethernet_start_xmit,
.ndo_set_rx_mode = spl2sw_ethernet_set_rx_mode,
.ndo_set_mac_address = spl2sw_ethernet_set_mac_address,
- .ndo_do_ioctl = phy_do_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
.ndo_tx_timeout = spl2sw_ethernet_tx_timeout,
};
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
index d1793b6154c7..37101dc50d04 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -360,7 +360,8 @@ static irqreturn_t xlgmac_dma_isr(int irq, void *data)
static void xlgmac_tx_timer(struct timer_list *t)
{
- struct xlgmac_channel *channel = from_timer(channel, t, tx_timer);
+ struct xlgmac_channel *channel = timer_container_of(channel, t,
+ tx_timer);
struct xlgmac_pdata *pdata = channel->pdata;
struct napi_struct *napi;
@@ -405,7 +406,7 @@ static void xlgmac_stop_timers(struct xlgmac_pdata *pdata)
if (!channel->tx_ring)
break;
- del_timer_sync(&channel->tx_timer);
+ timer_delete_sync(&channel->tx_timer);
}
}
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index fc77f424f90b..2cee1f05ac47 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -276,7 +276,7 @@ static irqreturn_t bdx_isr_napi(int irq, void *dev)
* currently intrs are disabled (since we read ISR),
* and we have failed to register next poll.
* so we read the regs to trigger chip
- * and allow further interupts. */
+ * and allow further interrupts. */
READ_REG(priv, regTXF_WPTR_0);
READ_REG(priv, regRXD_WPTR_0);
}
diff --git a/drivers/net/ethernet/tehuti/tn40.c b/drivers/net/ethernet/tehuti/tn40.c
index 259bdac24cf2..558b791a97ed 100644
--- a/drivers/net/ethernet/tehuti/tn40.c
+++ b/drivers/net/ethernet/tehuti/tn40.c
@@ -1778,7 +1778,7 @@ static int tn40_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = tn40_phy_register(priv);
if (ret) {
dev_err(&pdev->dev, "failed to set up PHY.\n");
- goto err_free_irq;
+ goto err_cleanup_swnodes;
}
ret = tn40_priv_init(priv);
@@ -1795,6 +1795,8 @@ static int tn40_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_unregister_phydev:
tn40_phy_unregister(priv);
+err_cleanup_swnodes:
+ tn40_swnodes_cleanup(priv);
err_free_irq:
pci_free_irq_vectors(pdev);
err_unset_drvdata:
@@ -1816,6 +1818,7 @@ static void tn40_remove(struct pci_dev *pdev)
unregister_netdev(ndev);
tn40_phy_unregister(priv);
+ tn40_swnodes_cleanup(priv);
pci_free_irq_vectors(priv->pdev);
pci_set_drvdata(pdev, NULL);
iounmap(priv->regs);
@@ -1832,6 +1835,10 @@ static const struct pci_device_id tn40_id_table[] = {
PCI_VENDOR_ID_ASUSTEK, 0x8709) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022,
PCI_VENDOR_ID_EDIMAX, 0x8103) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, PCI_DEVICE_ID_TEHUTI_TN9510,
+ PCI_VENDOR_ID_TEHUTI, 0x3015) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, PCI_DEVICE_ID_TEHUTI_TN9510,
+ PCI_VENDOR_ID_EDIMAX, 0x8102) },
{ }
};
diff --git a/drivers/net/ethernet/tehuti/tn40.h b/drivers/net/ethernet/tehuti/tn40.h
index 490781fe5120..25da8686d469 100644
--- a/drivers/net/ethernet/tehuti/tn40.h
+++ b/drivers/net/ethernet/tehuti/tn40.h
@@ -4,10 +4,13 @@
#ifndef _TN40_H_
#define _TN40_H_
+#include <linux/property.h>
#include "tn40_regs.h"
#define TN40_DRV_NAME "tn40xx"
+#define PCI_DEVICE_ID_TEHUTI_TN9510 0x4025
+
#define TN40_MDIO_SPEED_1MHZ (1)
#define TN40_MDIO_SPEED_6MHZ (6)
@@ -102,10 +105,39 @@ struct tn40_txdb {
int size; /* Number of elements in the db */
};
+#define NODE_PROP(_NAME, _PROP) ( \
+ (const struct software_node) { \
+ .name = _NAME, \
+ .properties = _PROP, \
+ })
+
+#define NODE_PAR_PROP(_NAME, _PAR, _PROP) ( \
+ (const struct software_node) { \
+ .name = _NAME, \
+ .parent = _PAR, \
+ .properties = _PROP, \
+ })
+
+enum tn40_swnodes {
+ SWNODE_MDIO,
+ SWNODE_PHY,
+ SWNODE_MAX
+};
+
+struct tn40_nodes {
+ char phy_name[32];
+ char mdio_name[32];
+ struct property_entry phy_props[3];
+ struct software_node swnodes[SWNODE_MAX];
+ const struct software_node *group[SWNODE_MAX + 1];
+};
+
struct tn40_priv {
struct net_device *ndev;
struct pci_dev *pdev;
+ struct tn40_nodes nodes;
+
struct napi_struct napi;
/* RX FIFOs: 1 for data (full) descs, and 2 for free descs */
struct tn40_rxd_fifo rxd_fifo0;
@@ -225,6 +257,7 @@ static inline void tn40_write_reg(struct tn40_priv *priv, u32 reg, u32 val)
int tn40_set_link_speed(struct tn40_priv *priv, u32 speed);
+void tn40_swnodes_cleanup(struct tn40_priv *priv);
int tn40_mdiobus_init(struct tn40_priv *priv);
int tn40_phy_register(struct tn40_priv *priv);
diff --git a/drivers/net/ethernet/tehuti/tn40_mdio.c b/drivers/net/ethernet/tehuti/tn40_mdio.c
index af18615d64a8..fb1a4a2e4dbc 100644
--- a/drivers/net/ethernet/tehuti/tn40_mdio.c
+++ b/drivers/net/ethernet/tehuti/tn40_mdio.c
@@ -14,6 +14,8 @@
(FIELD_PREP(TN40_MDIO_PRTAD_MASK, (port))))
#define TN40_MDIO_CMD_READ BIT(15)
+#define AQR105_FIRMWARE "tehuti/aqr105-tn40xx.cld"
+
static void tn40_mdio_set_speed(struct tn40_priv *priv, u32 speed)
{
void __iomem *regs = priv->regs;
@@ -111,6 +113,56 @@ static int tn40_mdio_write_c45(struct mii_bus *mii_bus, int addr, int devnum,
return tn40_mdio_write(mii_bus->priv, addr, devnum, regnum, val);
}
+/* registers an mdio node and an aqr105 PHY at address 1
+ * tn40_mdio-%id {
+ * ethernet-phy@1 {
+ * compatible = "ethernet-phy-id03a1.b4a3";
+ * reg = <1>;
+ * firmware-name = AQR105_FIRMWARE;
+ * };
+ * };
+ */
+static int tn40_swnodes_register(struct tn40_priv *priv)
+{
+ struct tn40_nodes *nodes = &priv->nodes;
+ struct pci_dev *pdev = priv->pdev;
+ struct software_node *swnodes;
+ u32 id;
+
+ id = pci_dev_id(pdev);
+
+ snprintf(nodes->phy_name, sizeof(nodes->phy_name), "ethernet-phy@1");
+ snprintf(nodes->mdio_name, sizeof(nodes->mdio_name), "tn40_mdio-%x",
+ id);
+
+ swnodes = nodes->swnodes;
+
+ swnodes[SWNODE_MDIO] = NODE_PROP(nodes->mdio_name, NULL);
+
+ nodes->phy_props[0] = PROPERTY_ENTRY_STRING("compatible",
+ "ethernet-phy-id03a1.b4a3");
+ nodes->phy_props[1] = PROPERTY_ENTRY_U32("reg", 1);
+ nodes->phy_props[2] = PROPERTY_ENTRY_STRING("firmware-name",
+ AQR105_FIRMWARE);
+ swnodes[SWNODE_PHY] = NODE_PAR_PROP(nodes->phy_name,
+ &swnodes[SWNODE_MDIO],
+ nodes->phy_props);
+
+ nodes->group[SWNODE_PHY] = &swnodes[SWNODE_PHY];
+ nodes->group[SWNODE_MDIO] = &swnodes[SWNODE_MDIO];
+ return software_node_register_node_group(nodes->group);
+}
+
+void tn40_swnodes_cleanup(struct tn40_priv *priv)
+{
+ /* cleanup of swnodes is only needed for AQR105-based cards */
+ if (priv->pdev->device == PCI_DEVICE_ID_TEHUTI_TN9510) {
+ fwnode_handle_put(dev_fwnode(&priv->mdio->dev));
+ device_remove_software_node(&priv->mdio->dev);
+ software_node_unregister_node_group(priv->nodes.group);
+ }
+}
+
int tn40_mdiobus_init(struct tn40_priv *priv)
{
struct pci_dev *pdev = priv->pdev;
@@ -129,14 +181,40 @@ int tn40_mdiobus_init(struct tn40_priv *priv)
bus->read_c45 = tn40_mdio_read_c45;
bus->write_c45 = tn40_mdio_write_c45;
+ priv->mdio = bus;
+ /* provide swnodes for AQR105-based cards only */
+ if (pdev->device == PCI_DEVICE_ID_TEHUTI_TN9510) {
+ ret = tn40_swnodes_register(priv);
+ if (ret) {
+ pr_err("swnodes failed\n");
+ return ret;
+ }
+
+ ret = device_add_software_node(&bus->dev,
+ priv->nodes.group[SWNODE_MDIO]);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "device_add_software_node failed: %d\n", ret);
+ goto err_swnodes_unregister;
+ }
+ }
+
+ tn40_mdio_set_speed(priv, TN40_MDIO_SPEED_6MHZ);
ret = devm_mdiobus_register(&pdev->dev, bus);
if (ret) {
dev_err(&pdev->dev, "failed to register mdiobus %d %u %u\n",
ret, bus->state, MDIOBUS_UNREGISTERED);
- return ret;
+ goto err_swnodes_cleanup;
}
- tn40_mdio_set_speed(priv, TN40_MDIO_SPEED_6MHZ);
- priv->mdio = bus;
return 0;
+
+err_swnodes_unregister:
+ software_node_unregister_node_group(priv->nodes.group);
+ return ret;
+err_swnodes_cleanup:
+ tn40_swnodes_cleanup(priv);
+ return ret;
}
+
+MODULE_FIRMWARE(AQR105_FIRMWARE);
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 0d5a862cd78a..a54d71155263 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -99,6 +99,7 @@ config TI_K3_AM65_CPSW_NUSS
select NET_DEVLINK
select TI_DAVINCI_MDIO
select PHYLINK
+ select PAGE_POOL
select TI_K3_CPPI_DESC_POOL
imply PHY_TI_GMII_SEL
depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS
@@ -204,6 +205,7 @@ config TI_ICSSG_PRUETH_SR1
select PHYLIB
select TI_ICSS_IEP
select TI_K3_CPPI_DESC_POOL
+ select PAGE_POOL
depends on PRU_REMOTEPROC
depends on NET_SWITCHDEV
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
@@ -227,4 +229,16 @@ config TI_ICSS_IEP
To compile this driver as a module, choose M here. The module
will be called icss_iep.
+config TI_PRUETH
+ tristate "TI PRU Ethernet EMAC driver"
+ depends on PRU_REMOTEPROC
+ depends on NET_SWITCHDEV
+ select TI_ICSS_IEP
+ imply PTP_1588_CLOCK
+ help
+ Some TI SoCs has Programmable Realtime Unit (PRU) cores which can
+ support Single or Dual Ethernet ports with the help of firmware code
+ running on PRU cores. This driver supports remoteproc based
+ communication to PRU firmware to expose Ethernet interface to Linux.
+
endif # NET_VENDOR_TI
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index cbcf44806924..93c0a4d0e33a 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -3,6 +3,9 @@
# Makefile for the TI network device drivers.
#
+obj-$(CONFIG_TI_PRUETH) += icssm-prueth.o
+icssm-prueth-y := icssm/icssm_prueth.o
+
obj-$(CONFIG_TI_CPSW) += cpsw-common.o
obj-$(CONFIG_TI_DAVINCI_EMAC) += cpsw-common.o
obj-$(CONFIG_TI_CPSW_SWITCHDEV) += cpsw-common.o
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index 9032444435e9..c57497074ae6 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -694,17 +694,20 @@ static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev,
struct kernel_ethtool_ts_info *info)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- unsigned int ptp_v2_filter;
-
- ptp_v2_filter = BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
- BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+ unsigned int ptp_filter;
+
+ ptp_filter = BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ);
if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
return ethtool_op_get_ts_info(ndev, info);
@@ -716,7 +719,7 @@ static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev,
SOF_TIMESTAMPING_RAW_HARDWARE;
info->phc_index = am65_cpts_phc_index(common->cpts);
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
- info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | ptp_v2_filter;
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | ptp_filter;
return 0;
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 6201a09fa5f0..5924db6be3fe 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -32,6 +32,7 @@
#include <linux/dma/ti-cppi5.h>
#include <linux/dma/k3-udma-glue.h>
#include <net/page_pool/helpers.h>
+#include <net/dsa.h>
#include <net/switchdev.h>
#include "cpsw_ale.h"
@@ -71,6 +72,8 @@
#define AM65_CPSW_PORT_REG_RX_PRI_MAP 0x020
#define AM65_CPSW_PORT_REG_RX_MAXLEN 0x024
+#define AM65_CPSW_PORTN_REG_CTL 0x004
+#define AM65_CPSW_PORTN_REG_DSCP_MAP 0x120
#define AM65_CPSW_PORTN_REG_SA_L 0x308
#define AM65_CPSW_PORTN_REG_SA_H 0x30c
#define AM65_CPSW_PORTN_REG_TS_CTL 0x310
@@ -94,6 +97,10 @@
/* AM65_CPSW_PORT_REG_PRI_CTL */
#define AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN BIT(8)
+/* AM65_CPSW_PN_REG_CTL */
+#define AM65_CPSW_PN_REG_CTL_DSCP_IPV4_EN BIT(1)
+#define AM65_CPSW_PN_REG_CTL_DSCP_IPV6_EN BIT(2)
+
/* AM65_CPSW_PN_TS_CTL register fields */
#define AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN BIT(4)
#define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN BIT(5)
@@ -157,6 +164,7 @@
#define AM65_CPSW_CPPI_TX_PKT_TYPE 0x7
/* XDP */
+#define AM65_CPSW_XDP_TX BIT(2)
#define AM65_CPSW_XDP_CONSUMED BIT(1)
#define AM65_CPSW_XDP_REDIRECT BIT(0)
#define AM65_CPSW_XDP_PASS 0
@@ -176,6 +184,99 @@ static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave,
writel(mac_lo, slave->port_base + AM65_CPSW_PORTN_REG_SA_L);
}
+#define AM65_CPSW_DSCP_MAX GENMASK(5, 0)
+#define AM65_CPSW_PRI_MAX GENMASK(2, 0)
+#define AM65_CPSW_DSCP_PRI_PER_REG 8
+#define AM65_CPSW_DSCP_PRI_SIZE 4 /* in bits */
+static int am65_cpsw_port_set_dscp_map(struct am65_cpsw_port *slave, u8 dscp, u8 pri)
+{
+ int reg_ofs;
+ int bit_ofs;
+ u32 val;
+
+ if (dscp > AM65_CPSW_DSCP_MAX)
+ return -EINVAL;
+
+ if (pri > AM65_CPSW_PRI_MAX)
+ return -EINVAL;
+
+ /* 32-bit register offset to this dscp */
+ reg_ofs = (dscp / AM65_CPSW_DSCP_PRI_PER_REG) * 4;
+ /* bit field offset to this dscp */
+ bit_ofs = AM65_CPSW_DSCP_PRI_SIZE * (dscp % AM65_CPSW_DSCP_PRI_PER_REG);
+
+ val = readl(slave->port_base + AM65_CPSW_PORTN_REG_DSCP_MAP + reg_ofs);
+ val &= ~(AM65_CPSW_PRI_MAX << bit_ofs); /* clear */
+ val |= pri << bit_ofs; /* set */
+ writel(val, slave->port_base + AM65_CPSW_PORTN_REG_DSCP_MAP + reg_ofs);
+
+ return 0;
+}
+
+static void am65_cpsw_port_enable_dscp_map(struct am65_cpsw_port *slave)
+{
+ int dscp, pri;
+ u32 val;
+
+ /* Default DSCP to User Priority mapping as per:
+ * https://datatracker.ietf.org/doc/html/rfc8325#section-4.3
+ * and
+ * https://datatracker.ietf.org/doc/html/rfc8622#section-11
+ */
+ for (dscp = 0; dscp <= AM65_CPSW_DSCP_MAX; dscp++) {
+ switch (dscp) {
+ case 56: /* CS7 */
+ case 48: /* CS6 */
+ pri = 7;
+ break;
+ case 46: /* EF */
+ case 44: /* VA */
+ pri = 6;
+ break;
+ case 40: /* CS5 */
+ pri = 5;
+ break;
+ case 34: /* AF41 */
+ case 36: /* AF42 */
+ case 38: /* AF43 */
+ case 32: /* CS4 */
+ case 26: /* AF31 */
+ case 28: /* AF32 */
+ case 30: /* AF33 */
+ case 24: /* CS3 */
+ pri = 4;
+ break;
+ case 18: /* AF21 */
+ case 20: /* AF22 */
+ case 22: /* AF23 */
+ pri = 3;
+ break;
+ case 16: /* CS2 */
+ case 10: /* AF11 */
+ case 12: /* AF12 */
+ case 14: /* AF13 */
+ case 0: /* DF */
+ pri = 0;
+ break;
+ case 8: /* CS1 */
+ case 1: /* LE */
+ pri = 1;
+ break;
+ default:
+ pri = 0;
+ break;
+ }
+
+ am65_cpsw_port_set_dscp_map(slave, dscp, pri);
+ }
+
+ /* enable port IPV4 and IPV6 DSCP for this port */
+ val = readl(slave->port_base + AM65_CPSW_PORTN_REG_CTL);
+ val |= AM65_CPSW_PN_REG_CTL_DSCP_IPV4_EN |
+ AM65_CPSW_PN_REG_CTL_DSCP_IPV6_EN;
+ writel(val, slave->port_base + AM65_CPSW_PORTN_REG_CTL);
+}
+
static void am65_cpsw_sl_ctl_reset(struct am65_cpsw_port *port)
{
cpsw_sl_reset(port->slave.mac_sl, 100);
@@ -326,7 +427,7 @@ static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
if (netif_tx_queue_stopped(netif_txq)) {
/* try recover if stopped by us */
- txq_trans_update(netif_txq);
+ txq_trans_update(ndev, netif_txq);
netif_tx_wake_queue(netif_txq);
}
}
@@ -337,9 +438,9 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
struct cppi5_host_desc_t *desc_rx;
struct device *dev = common->dev;
+ struct am65_cpsw_swdata *swdata;
dma_addr_t desc_dma;
dma_addr_t buf_dma;
- void *swdata;
desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
if (!desc_rx) {
@@ -363,7 +464,8 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
cppi5_hdesc_attach_buf(desc_rx, buf_dma, AM65_CPSW_MAX_PACKET_SIZE,
buf_dma, AM65_CPSW_MAX_PACKET_SIZE);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- *((void **)swdata) = page_address(page);
+ swdata->page = page;
+ swdata->flow_id = flow_idx;
return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, flow_idx,
desc_rx, desc_dma);
@@ -397,35 +499,62 @@ static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common);
static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common);
static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port);
static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
+static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow,
+ struct page *page,
+ bool allow_direct);
+static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma);
+static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma);
-static void am65_cpsw_destroy_xdp_rxqs(struct am65_cpsw_common *common)
+static void am65_cpsw_destroy_rxq(struct am65_cpsw_common *common, int id)
{
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
struct am65_cpsw_rx_flow *flow;
struct xdp_rxq_info *rxq;
- int id, port;
+ int port;
- for (id = 0; id < common->rx_ch_num_flows; id++) {
- flow = &rx_chn->flows[id];
+ flow = &rx_chn->flows[id];
+ napi_disable(&flow->napi_rx);
+ hrtimer_cancel(&flow->rx_hrtimer);
+ k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, id, rx_chn,
+ am65_cpsw_nuss_rx_cleanup);
- for (port = 0; port < common->port_num; port++) {
- if (!common->ports[port].ndev)
- continue;
+ for (port = 0; port < common->port_num; port++) {
+ if (!common->ports[port].ndev)
+ continue;
- rxq = &common->ports[port].xdp_rxq[id];
+ rxq = &common->ports[port].xdp_rxq[id];
- if (xdp_rxq_info_is_reg(rxq))
- xdp_rxq_info_unreg(rxq);
- }
+ if (xdp_rxq_info_is_reg(rxq))
+ xdp_rxq_info_unreg(rxq);
+ }
- if (flow->page_pool) {
- page_pool_destroy(flow->page_pool);
- flow->page_pool = NULL;
- }
+ if (flow->page_pool) {
+ page_pool_destroy(flow->page_pool);
+ flow->page_pool = NULL;
}
}
-static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common)
+static void am65_cpsw_destroy_rxqs(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ int id;
+
+ reinit_completion(&common->tdown_complete);
+ k3_udma_glue_tdown_rx_chn(rx_chn->rx_chn, true);
+
+ if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
+ id = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000));
+ if (!id)
+ dev_err(common->dev, "rx teardown timeout\n");
+ }
+
+ for (id = common->rx_ch_num_flows - 1; id >= 0; id--)
+ am65_cpsw_destroy_rxq(common, id);
+
+ k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn);
+}
+
+static int am65_cpsw_create_rxq(struct am65_cpsw_common *common, int id)
{
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
struct page_pool_params pp_params = {
@@ -440,45 +569,162 @@ static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common)
struct am65_cpsw_rx_flow *flow;
struct xdp_rxq_info *rxq;
struct page_pool *pool;
- int id, port, ret;
+ struct page *page;
+ int port, ret, i;
+
+ flow = &rx_chn->flows[id];
+ pp_params.napi = &flow->napi_rx;
+ pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool)) {
+ ret = PTR_ERR(pool);
+ return ret;
+ }
+
+ flow->page_pool = pool;
+
+ /* using same page pool is allowed as no running rx handlers
+ * simultaneously for both ndevs
+ */
+ for (port = 0; port < common->port_num; port++) {
+ if (!common->ports[port].ndev)
+ /* FIXME should we BUG here? */
+ continue;
+
+ rxq = &common->ports[port].xdp_rxq[id];
+ ret = xdp_rxq_info_reg(rxq, common->ports[port].ndev,
+ id, flow->napi_rx.napi_id);
+ if (ret)
+ goto err;
+
+ ret = xdp_rxq_info_reg_mem_model(rxq,
+ MEM_TYPE_PAGE_POOL,
+ pool);
+ if (ret)
+ goto err;
+ }
+
+ for (i = 0; i < AM65_CPSW_MAX_RX_DESC; i++) {
+ page = page_pool_dev_alloc_pages(flow->page_pool);
+ if (!page) {
+ dev_err(common->dev, "cannot allocate page in flow %d\n",
+ id);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = am65_cpsw_nuss_rx_push(common, page, id);
+ if (ret < 0) {
+ dev_err(common->dev,
+ "cannot submit page to rx channel flow %d, error %d\n",
+ id, ret);
+ am65_cpsw_put_page(flow, page, false);
+ goto err;
+ }
+ }
+
+ napi_enable(&flow->napi_rx);
+ return 0;
+
+err:
+ am65_cpsw_destroy_rxq(common, id);
+ return ret;
+}
+
+static int am65_cpsw_create_rxqs(struct am65_cpsw_common *common)
+{
+ int id, ret;
for (id = 0; id < common->rx_ch_num_flows; id++) {
- flow = &rx_chn->flows[id];
- pp_params.napi = &flow->napi_rx;
- pool = page_pool_create(&pp_params);
- if (IS_ERR(pool)) {
- ret = PTR_ERR(pool);
+ ret = am65_cpsw_create_rxq(common, id);
+ if (ret) {
+ dev_err(common->dev, "couldn't create rxq %d: %d\n",
+ id, ret);
goto err;
}
+ }
- flow->page_pool = pool;
+ ret = k3_udma_glue_enable_rx_chn(common->rx_chns.rx_chn);
+ if (ret) {
+ dev_err(common->dev, "couldn't enable rx chn: %d\n", ret);
+ goto err;
+ }
- /* using same page pool is allowed as no running rx handlers
- * simultaneously for both ndevs
- */
- for (port = 0; port < common->port_num; port++) {
- if (!common->ports[port].ndev)
- continue;
+ return 0;
+
+err:
+ for (--id; id >= 0; id--)
+ am65_cpsw_destroy_rxq(common, id);
+
+ return ret;
+}
- rxq = &common->ports[port].xdp_rxq[id];
+static void am65_cpsw_destroy_txq(struct am65_cpsw_common *common, int id)
+{
+ struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[id];
- ret = xdp_rxq_info_reg(rxq, common->ports[port].ndev,
- id, flow->napi_rx.napi_id);
- if (ret)
- goto err;
+ napi_disable(&tx_chn->napi_tx);
+ hrtimer_cancel(&tx_chn->tx_hrtimer);
+ k3_udma_glue_reset_tx_chn(tx_chn->tx_chn, tx_chn,
+ am65_cpsw_nuss_tx_cleanup);
+ k3_udma_glue_disable_tx_chn(tx_chn->tx_chn);
+}
- ret = xdp_rxq_info_reg_mem_model(rxq,
- MEM_TYPE_PAGE_POOL,
- pool);
- if (ret)
- goto err;
+static void am65_cpsw_destroy_txqs(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
+ int id;
+
+ /* shutdown tx channels */
+ atomic_set(&common->tdown_cnt, common->tx_ch_num);
+ /* ensure new tdown_cnt value is visible */
+ smp_mb__after_atomic();
+ reinit_completion(&common->tdown_complete);
+
+ for (id = 0; id < common->tx_ch_num; id++)
+ k3_udma_glue_tdown_tx_chn(tx_chn[id].tx_chn, false);
+
+ id = wait_for_completion_timeout(&common->tdown_complete,
+ msecs_to_jiffies(1000));
+ if (!id)
+ dev_err(common->dev, "tx teardown timeout\n");
+
+ for (id = common->tx_ch_num - 1; id >= 0; id--)
+ am65_cpsw_destroy_txq(common, id);
+}
+
+static int am65_cpsw_create_txq(struct am65_cpsw_common *common, int id)
+{
+ struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[id];
+ int ret;
+
+ ret = k3_udma_glue_enable_tx_chn(tx_chn->tx_chn);
+ if (ret)
+ return ret;
+
+ napi_enable(&tx_chn->napi_tx);
+
+ return 0;
+}
+
+static int am65_cpsw_create_txqs(struct am65_cpsw_common *common)
+{
+ int id, ret;
+
+ for (id = 0; id < common->tx_ch_num; id++) {
+ ret = am65_cpsw_create_txq(common, id);
+ if (ret) {
+ dev_err(common->dev, "couldn't create txq %d: %d\n",
+ id, ret);
+ goto err;
}
}
return 0;
err:
- am65_cpsw_destroy_xdp_rxqs(common);
+ for (--id; id >= 0; id--)
+ am65_cpsw_destroy_txq(common, id);
+
return ret;
}
@@ -519,36 +765,30 @@ static enum am65_cpsw_tx_buf_type am65_cpsw_nuss_buf_type(struct am65_cpsw_tx_ch
static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow,
struct page *page,
- bool allow_direct,
- int desc_idx)
+ bool allow_direct)
{
page_pool_put_full_page(flow->page_pool, page, allow_direct);
- flow->pages[desc_idx] = NULL;
}
static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
{
- struct am65_cpsw_rx_flow *flow = data;
+ struct am65_cpsw_rx_chn *rx_chn = data;
struct cppi5_host_desc_t *desc_rx;
- struct am65_cpsw_rx_chn *rx_chn;
+ struct am65_cpsw_swdata *swdata;
dma_addr_t buf_dma;
+ struct page *page;
u32 buf_dma_len;
- void *page_addr;
- void **swdata;
- int desc_idx;
+ u32 flow_id;
- rx_chn = &flow->common->rx_chns;
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- page_addr = *swdata;
+ page = swdata->page;
+ flow_id = swdata->flow_id;
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
-
- desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx,
- rx_chn->dsize_log2);
- am65_cpsw_put_page(flow, virt_to_page(page_addr), false, desc_idx);
+ am65_cpsw_put_page(&rx_chn->flows[flow_id], page, false);
}
static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
@@ -589,31 +829,38 @@ static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
{
struct am65_cpsw_tx_chn *tx_chn = data;
+ enum am65_cpsw_tx_buf_type buf_type;
+ struct am65_cpsw_tx_swdata *swdata;
struct cppi5_host_desc_t *desc_tx;
+ struct xdp_frame *xdpf;
struct sk_buff *skb;
- void **swdata;
desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
- skb = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
+ buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
+ if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
+ skb = swdata->skb;
+ dev_kfree_skb_any(skb);
+ } else {
+ xdpf = swdata->xdpf;
+ xdp_return_frame(xdpf);
+ }
- dev_kfree_skb_any(skb);
+ am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
}
static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
struct net_device *ndev,
- unsigned int len)
+ unsigned int len,
+ unsigned int headroom)
{
struct sk_buff *skb;
- len += AM65_CPSW_HEADROOM;
-
skb = build_skb(page_addr, len);
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, AM65_CPSW_HEADROOM);
+ skb_reserve(skb, headroom);
skb->dev = ndev;
return skb;
@@ -622,12 +869,8 @@ static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
{
struct am65_cpsw_host *host_p = am65_common_get_host(common);
- struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
- struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
- int port_idx, i, ret, tx, flow_idx;
- struct am65_cpsw_rx_flow *flow;
u32 val, port_mask;
- struct page *page;
+ int port_idx, ret;
if (common->usage_count)
return 0;
@@ -667,7 +910,7 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
ALE_DEFAULT_THREAD_ID, 0);
cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
ALE_DEFAULT_THREAD_ENABLE, 1);
- /* switch to vlan unaware mode */
+ /* switch to vlan aware mode */
cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 1);
cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
@@ -687,152 +930,38 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
am65_cpsw_qos_tx_p0_rate_init(common);
- ret = am65_cpsw_create_xdp_rxqs(common);
- if (ret) {
- dev_err(common->dev, "Failed to create XDP rx queues\n");
+ ret = am65_cpsw_create_rxqs(common);
+ if (ret)
return ret;
- }
-
- for (flow_idx = 0; flow_idx < common->rx_ch_num_flows; flow_idx++) {
- flow = &rx_chn->flows[flow_idx];
- for (i = 0; i < AM65_CPSW_MAX_RX_DESC; i++) {
- page = page_pool_dev_alloc_pages(flow->page_pool);
- if (!page) {
- dev_err(common->dev, "cannot allocate page in flow %d\n",
- flow_idx);
- ret = -ENOMEM;
- goto fail_rx;
- }
- flow->pages[i] = page;
- ret = am65_cpsw_nuss_rx_push(common, page, flow_idx);
- if (ret < 0) {
- dev_err(common->dev,
- "cannot submit page to rx channel flow %d, error %d\n",
- flow_idx, ret);
- am65_cpsw_put_page(flow, page, false, i);
- goto fail_rx;
- }
- }
- }
-
- ret = k3_udma_glue_enable_rx_chn(rx_chn->rx_chn);
- if (ret) {
- dev_err(common->dev, "couldn't enable rx chn: %d\n", ret);
- goto fail_rx;
- }
-
- for (i = 0; i < common->rx_ch_num_flows ; i++) {
- napi_enable(&rx_chn->flows[i].napi_rx);
- if (rx_chn->flows[i].irq_disabled) {
- rx_chn->flows[i].irq_disabled = false;
- enable_irq(rx_chn->flows[i].irq);
- }
- }
-
- for (tx = 0; tx < common->tx_ch_num; tx++) {
- ret = k3_udma_glue_enable_tx_chn(tx_chn[tx].tx_chn);
- if (ret) {
- dev_err(common->dev, "couldn't enable tx chn %d: %d\n",
- tx, ret);
- tx--;
- goto fail_tx;
- }
- napi_enable(&tx_chn[tx].napi_tx);
- }
+ ret = am65_cpsw_create_txqs(common);
+ if (ret)
+ goto cleanup_rx;
dev_dbg(common->dev, "cpsw_nuss started\n");
return 0;
-fail_tx:
- while (tx >= 0) {
- napi_disable(&tx_chn[tx].napi_tx);
- k3_udma_glue_disable_tx_chn(tx_chn[tx].tx_chn);
- tx--;
- }
-
- for (flow_idx = 0; i < common->rx_ch_num_flows; flow_idx++) {
- flow = &rx_chn->flows[flow_idx];
- if (!flow->irq_disabled) {
- disable_irq(flow->irq);
- flow->irq_disabled = true;
- }
- napi_disable(&flow->napi_rx);
- }
-
- k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
-
-fail_rx:
- for (i = 0; i < common->rx_ch_num_flows; i++)
- k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, &rx_chn->flows[i],
- am65_cpsw_nuss_rx_cleanup, 0);
-
- am65_cpsw_destroy_xdp_rxqs(common);
+cleanup_rx:
+ am65_cpsw_destroy_rxqs(common);
return ret;
}
static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
{
- struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
- struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
- int i;
-
if (common->usage_count != 1)
return 0;
cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
- /* shutdown tx channels */
- atomic_set(&common->tdown_cnt, common->tx_ch_num);
- /* ensure new tdown_cnt value is visible */
- smp_mb__after_atomic();
- reinit_completion(&common->tdown_complete);
-
- for (i = 0; i < common->tx_ch_num; i++)
- k3_udma_glue_tdown_tx_chn(tx_chn[i].tx_chn, false);
-
- i = wait_for_completion_timeout(&common->tdown_complete,
- msecs_to_jiffies(1000));
- if (!i)
- dev_err(common->dev, "tx timeout\n");
- for (i = 0; i < common->tx_ch_num; i++) {
- napi_disable(&tx_chn[i].napi_tx);
- hrtimer_cancel(&tx_chn[i].tx_hrtimer);
- }
-
- for (i = 0; i < common->tx_ch_num; i++) {
- k3_udma_glue_reset_tx_chn(tx_chn[i].tx_chn, &tx_chn[i],
- am65_cpsw_nuss_tx_cleanup);
- k3_udma_glue_disable_tx_chn(tx_chn[i].tx_chn);
- }
-
- reinit_completion(&common->tdown_complete);
- k3_udma_glue_tdown_rx_chn(rx_chn->rx_chn, true);
-
- if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
- i = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000));
- if (!i)
- dev_err(common->dev, "rx teardown timeout\n");
- }
-
- for (i = 0; i < common->rx_ch_num_flows; i++) {
- napi_disable(&rx_chn->flows[i].napi_rx);
- hrtimer_cancel(&rx_chn->flows[i].rx_hrtimer);
- k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, &rx_chn->flows[i],
- am65_cpsw_nuss_rx_cleanup, 0);
- }
-
- k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
-
+ am65_cpsw_destroy_txqs(common);
+ am65_cpsw_destroy_rxqs(common);
cpsw_ale_stop(common->ale);
writel(0, common->cpsw_base + AM65_CPSW_REG_CTL);
writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
- am65_cpsw_destroy_xdp_rxqs(common);
-
dev_dbg(common->dev, "cpsw_nuss stopped\n");
return 0;
}
@@ -920,7 +1049,17 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
common->usage_count++;
+ /* VLAN aware CPSW mode is incompatible with some DSA tagging schemes.
+ * Therefore disable VLAN_AWARE mode if any of the ports is a DSA Port.
+ */
+ if (netdev_uses_dsa(ndev)) {
+ reg = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
+ reg &= ~AM65_CPSW_CTL_VLAN_AWARE;
+ writel(reg, common->cpsw_base + AM65_CPSW_REG_CTL);
+ }
+
am65_cpsw_port_set_sl_mac(port, ndev->dev_addr);
+ am65_cpsw_port_enable_dscp_map(port);
if (common->is_emac_mode)
am65_cpsw_init_port_emac_ale(port);
@@ -958,10 +1097,10 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
struct cppi5_host_desc_t *host_desc;
+ struct am65_cpsw_tx_swdata *swdata;
struct netdev_queue *netif_txq;
dma_addr_t dma_desc, dma_buf;
u32 pkt_len = xdpf->len;
- void **swdata;
int ret;
host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
@@ -991,7 +1130,8 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf, pkt_len);
swdata = cppi5_hdesc_get_swdata(host_desc);
- *(swdata) = xdpf;
+ swdata->ndev = ndev;
+ swdata->xdpf = xdpf;
/* Report BQL before sending the packet */
netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
@@ -1027,20 +1167,21 @@ pool_free:
static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
struct am65_cpsw_port *port,
- struct xdp_buff *xdp,
- int desc_idx, int cpu, int *len)
+ struct xdp_buff *xdp, int *len)
{
struct am65_cpsw_common *common = flow->common;
struct net_device *ndev = port->ndev;
int ret = AM65_CPSW_XDP_CONSUMED;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
+ int cpu = smp_processor_id();
struct xdp_frame *xdpf;
struct bpf_prog *prog;
- struct page *page;
+ int pkt_len;
u32 act;
int err;
+ pkt_len = *len;
prog = READ_ONCE(port->xdp_prog);
if (!prog)
return AM65_CPSW_XDP_PASS;
@@ -1051,15 +1192,16 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
switch (act) {
case XDP_PASS:
- ret = AM65_CPSW_XDP_PASS;
- goto out;
+ return AM65_CPSW_XDP_PASS;
case XDP_TX:
tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_QUEUES];
netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
xdpf = xdp_convert_buff_to_frame(xdp);
- if (unlikely(!xdpf))
+ if (unlikely(!xdpf)) {
+ ndev->stats.tx_dropped++;
goto drop;
+ }
__netif_tx_lock(netif_txq, cpu);
err = am65_cpsw_xdp_tx_frame(ndev, tx_chn, xdpf,
@@ -1068,16 +1210,14 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
if (err)
goto drop;
- dev_sw_netstats_tx_add(ndev, 1, *len);
- ret = AM65_CPSW_XDP_CONSUMED;
- goto out;
+ dev_sw_netstats_rx_add(ndev, pkt_len);
+ return AM65_CPSW_XDP_TX;
case XDP_REDIRECT:
if (unlikely(xdp_do_redirect(ndev, xdp, prog)))
goto drop;
- dev_sw_netstats_rx_add(ndev, *len);
- ret = AM65_CPSW_XDP_REDIRECT;
- goto out;
+ dev_sw_netstats_rx_add(ndev, pkt_len);
+ return AM65_CPSW_XDP_REDIRECT;
default:
bpf_warn_invalid_xdp_action(ndev, prog, act);
fallthrough;
@@ -1089,10 +1229,6 @@ drop:
ndev->stats.rx_dropped++;
}
- page = virt_to_head_page(xdp->data);
- am65_cpsw_put_page(flow, page, true, desc_idx);
-
-out:
return ret;
}
@@ -1130,7 +1266,7 @@ static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info)
}
static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
- int cpu, int *xdp_state)
+ int *xdp_state)
{
struct am65_cpsw_rx_chn *rx_chn = &flow->common->rx_chns;
u32 buf_dma_len, pkt_len, port_id = 0, csum_info;
@@ -1138,16 +1274,16 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
struct am65_cpsw_ndev_priv *ndev_priv;
struct cppi5_host_desc_t *desc_rx;
struct device *dev = common->dev;
+ struct am65_cpsw_swdata *swdata;
struct page *page, *new_page;
dma_addr_t desc_dma, buf_dma;
struct am65_cpsw_port *port;
- int headroom, desc_idx, ret;
struct net_device *ndev;
u32 flow_idx = flow->id;
struct sk_buff *skb;
struct xdp_buff xdp;
+ int headroom, ret;
void *page_addr;
- void **swdata;
u32 *psdata;
*xdp_state = AM65_CPSW_XDP_PASS;
@@ -1170,8 +1306,8 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
__func__, flow_idx, &desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- page_addr = *swdata;
- page = virt_to_page(page_addr);
+ page = swdata->page;
+ page_addr = page_address(page);
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
@@ -1184,31 +1320,32 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
-
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx,
- rx_chn->dsize_log2);
-
- skb = am65_cpsw_build_skb(page_addr, ndev,
- AM65_CPSW_MAX_PACKET_SIZE);
- if (unlikely(!skb)) {
- new_page = page;
- goto requeue;
- }
-
if (port->xdp_prog) {
xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]);
xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM,
pkt_len, false);
- *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp, desc_idx,
- cpu, &pkt_len);
+ *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp, &pkt_len);
+ if (*xdp_state == AM65_CPSW_XDP_CONSUMED) {
+ page = virt_to_head_page(xdp.data);
+ am65_cpsw_put_page(flow, page, true);
+ goto allocate;
+ }
+
if (*xdp_state != AM65_CPSW_XDP_PASS)
goto allocate;
- /* Compute additional headroom to be reserved */
- headroom = (xdp.data - xdp.data_hard_start) - skb_headroom(skb);
- skb_reserve(skb, headroom);
+ headroom = xdp.data - xdp.data_hard_start;
+ } else {
+ headroom = AM65_CPSW_HEADROOM;
+ }
+
+ skb = am65_cpsw_build_skb(page_addr, ndev,
+ PAGE_SIZE, headroom);
+ if (unlikely(!skb)) {
+ new_page = page;
+ goto requeue;
}
ndev_priv = netdev_priv(ndev);
@@ -1230,10 +1367,8 @@ allocate:
return -ENOMEM;
}
- flow->pages[desc_idx] = new_page;
-
if (netif_dormant(ndev)) {
- am65_cpsw_put_page(flow, new_page, true, desc_idx);
+ am65_cpsw_put_page(flow, new_page, true);
ndev->stats.rx_dropped++;
return 0;
}
@@ -1241,7 +1376,7 @@ allocate:
requeue:
ret = am65_cpsw_nuss_rx_push(common, new_page, flow_idx);
if (WARN_ON(ret < 0)) {
- am65_cpsw_put_page(flow, new_page, true, desc_idx);
+ am65_cpsw_put_page(flow, new_page, true);
ndev->stats.rx_errors++;
ndev->stats.rx_dropped++;
}
@@ -1263,7 +1398,6 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
{
struct am65_cpsw_rx_flow *flow = am65_cpsw_napi_to_rx_flow(napi_rx);
struct am65_cpsw_common *common = flow->common;
- int cpu = smp_processor_id();
int xdp_state_or = 0;
int cur_budget, ret;
int xdp_state;
@@ -1272,7 +1406,7 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
/* process only this flow */
cur_budget = budget;
while (cur_budget--) {
- ret = am65_cpsw_nuss_rx_packets(flow, cpu, &xdp_state);
+ ret = am65_cpsw_nuss_rx_packets(flow, &xdp_state);
xdp_state_or |= xdp_state;
if (ret)
break;
@@ -1300,52 +1434,6 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
return num_rx;
}
-static struct sk_buff *
-am65_cpsw_nuss_tx_compl_packet_skb(struct am65_cpsw_tx_chn *tx_chn,
- dma_addr_t desc_dma)
-{
- struct cppi5_host_desc_t *desc_tx;
- struct sk_buff *skb;
- void **swdata;
-
- desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
- desc_dma);
- swdata = cppi5_hdesc_get_swdata(desc_tx);
- skb = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
-
- am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
-
- dev_sw_netstats_tx_add(skb->dev, 1, skb->len);
-
- return skb;
-}
-
-static struct xdp_frame *
-am65_cpsw_nuss_tx_compl_packet_xdp(struct am65_cpsw_common *common,
- struct am65_cpsw_tx_chn *tx_chn,
- dma_addr_t desc_dma,
- struct net_device **ndev)
-{
- struct cppi5_host_desc_t *desc_tx;
- struct am65_cpsw_port *port;
- struct xdp_frame *xdpf;
- u32 port_id = 0;
- void **swdata;
-
- desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
- cppi5_desc_get_tags_ids(&desc_tx->hdr, NULL, &port_id);
- swdata = cppi5_hdesc_get_swdata(desc_tx);
- xdpf = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
-
- port = am65_common_get_port(common, port_id);
- dev_sw_netstats_tx_add(port->ndev, 1, xdpf->len);
- *ndev = port->ndev;
-
- return xdpf;
-}
-
static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev,
struct netdev_queue *netif_txq)
{
@@ -1366,13 +1454,17 @@ static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_d
static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
int chn, unsigned int budget, bool *tdown)
{
+ bool single_port = AM65_CPSW_IS_CPSW2G(common);
enum am65_cpsw_tx_buf_type buf_type;
+ struct am65_cpsw_tx_swdata *swdata;
+ struct cppi5_host_desc_t *desc_tx;
struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
unsigned int total_bytes = 0;
struct net_device *ndev;
struct xdp_frame *xdpf;
+ unsigned int pkt_len;
struct sk_buff *skb;
dma_addr_t desc_dma;
int res, num_tx = 0;
@@ -1380,9 +1472,12 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
tx_chn = &common->tx_chns[chn];
while (true) {
- spin_lock(&tx_chn->lock);
+ if (!single_port)
+ spin_lock(&tx_chn->lock);
res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
- spin_unlock(&tx_chn->lock);
+ if (!single_port)
+ spin_unlock(&tx_chn->lock);
+
if (res == -ENODATA)
break;
@@ -1393,27 +1488,43 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
break;
}
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+ ndev = swdata->ndev;
buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
- skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma);
- ndev = skb->dev;
- total_bytes = skb->len;
+ skb = swdata->skb;
+ am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
+ pkt_len = skb->len;
napi_consume_skb(skb, budget);
} else {
- xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn,
- desc_dma, &ndev);
- total_bytes = xdpf->len;
+ xdpf = swdata->xdpf;
+ pkt_len = xdpf->len;
if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX)
xdp_return_frame_rx_napi(xdpf);
else
xdp_return_frame(xdpf);
}
+
+ total_bytes += pkt_len;
num_tx++;
+ am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
+ dev_sw_netstats_tx_add(ndev, 1, pkt_len);
+ if (!single_port) {
+ /* as packets from multi ports can be interleaved
+ * on the same channel, we have to figure out the
+ * port/queue at every packet and report it/wake queue.
+ */
+ netif_txq = netdev_get_tx_queue(ndev, chn);
+ netdev_tx_completed_queue(netif_txq, 1, pkt_len);
+ am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
+ }
+ }
+ if (single_port && num_tx) {
netif_txq = netdev_get_tx_queue(ndev, chn);
-
netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
-
am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
}
@@ -1422,66 +1533,6 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
return num_tx;
}
-static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
- int chn, unsigned int budget, bool *tdown)
-{
- enum am65_cpsw_tx_buf_type buf_type;
- struct device *dev = common->dev;
- struct am65_cpsw_tx_chn *tx_chn;
- struct netdev_queue *netif_txq;
- unsigned int total_bytes = 0;
- struct net_device *ndev;
- struct xdp_frame *xdpf;
- struct sk_buff *skb;
- dma_addr_t desc_dma;
- int res, num_tx = 0;
-
- tx_chn = &common->tx_chns[chn];
-
- while (true) {
- res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
- if (res == -ENODATA)
- break;
-
- if (cppi5_desc_is_tdcm(desc_dma)) {
- if (atomic_dec_and_test(&common->tdown_cnt))
- complete(&common->tdown_complete);
- *tdown = true;
- break;
- }
-
- buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
- if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
- skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma);
- ndev = skb->dev;
- total_bytes += skb->len;
- napi_consume_skb(skb, budget);
- } else {
- xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn,
- desc_dma, &ndev);
- total_bytes += xdpf->len;
- if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX)
- xdp_return_frame_rx_napi(xdpf);
- else
- xdp_return_frame(xdpf);
- }
- num_tx++;
- }
-
- if (!num_tx)
- return 0;
-
- netif_txq = netdev_get_tx_queue(ndev, chn);
-
- netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
-
- am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
-
- dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
-
- return num_tx;
-}
-
static enum hrtimer_restart am65_cpsw_nuss_tx_timer_callback(struct hrtimer *timer)
{
struct am65_cpsw_tx_chn *tx_chns =
@@ -1497,13 +1548,8 @@ static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
bool tdown = false;
int num_tx;
- if (AM65_CPSW_IS_CPSW2G(tx_chn->common))
- num_tx = am65_cpsw_nuss_tx_compl_packets_2g(tx_chn->common, tx_chn->id,
- budget, &tdown);
- else
- num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common,
- tx_chn->id, budget, &tdown);
-
+ num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common,
+ tx_chn->id, budget, &tdown);
if (num_tx >= budget)
return budget;
@@ -1547,12 +1593,12 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_tx_swdata *swdata;
struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
dma_addr_t desc_dma, buf_dma;
int ret, q_idx, i;
- void **swdata;
u32 *psdata;
u32 pkt_len;
@@ -1598,7 +1644,8 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
swdata = cppi5_hdesc_get_swdata(first_desc);
- *(swdata) = skb;
+ swdata->ndev = ndev;
+ swdata->skb = skb;
psdata = cppi5_hdesc_get_psdata(first_desc);
/* HW csum offload if enabled */
@@ -1741,31 +1788,34 @@ static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev,
}
static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
u32 ts_ctrl, seq_id, ts_ctrl_ltype2, ts_vlan_ltype;
- struct hwtstamp_config cfg;
- if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
+ if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)) {
+ NL_SET_ERR_MSG(extack, "Time stamping is not supported");
return -EOPNOTSUPP;
-
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
+ }
/* TX HW timestamp */
- switch (cfg.tx_type) {
+ switch (cfg->tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
break;
default:
+ NL_SET_ERR_MSG(extack, "TX mode is not supported");
return -ERANGE;
}
- switch (cfg.rx_filter) {
+ switch (cfg->rx_filter) {
case HWTSTAMP_FILTER_NONE:
port->rx_ts_enabled = false;
break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
@@ -1776,17 +1826,19 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
port->rx_ts_enabled = true;
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT | HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
case HWTSTAMP_FILTER_NTP_ALL:
+ NL_SET_ERR_MSG(extack, "RX filter is not supported");
return -EOPNOTSUPP;
default:
+ NL_SET_ERR_MSG(extack, "RX filter is not supported");
return -ERANGE;
}
- port->tx_ts_enabled = (cfg.tx_type == HWTSTAMP_TX_ON);
+ port->tx_ts_enabled = (cfg->tx_type == HWTSTAMP_TX_ON);
/* cfg TX timestamp */
seq_id = (AM65_CPSW_TS_SEQ_ID_OFFSET <<
@@ -1822,25 +1874,24 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2);
writel(ts_ctrl, port->port_base + AM65_CPSW_PORTN_REG_TS_CTL);
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *cfg)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
- struct hwtstamp_config cfg;
if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
return -EOPNOTSUPP;
- cfg.flags = 0;
- cfg.tx_type = port->tx_ts_enabled ?
+ cfg->flags = 0;
+ cfg->tx_type = port->tx_ts_enabled ?
HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- cfg.rx_filter = port->rx_ts_enabled ?
- HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE;
+ cfg->rx_filter = port->rx_ts_enabled ? HWTSTAMP_FILTER_PTP_V2_EVENT |
+ HWTSTAMP_FILTER_PTP_V1_L4_EVENT : HWTSTAMP_FILTER_NONE;
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
@@ -1851,13 +1902,6 @@ static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
if (!netif_running(ndev))
return -EINVAL;
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return am65_cpsw_nuss_hwtstamp_set(ndev, req);
- case SIOCGHWTSTAMP:
- return am65_cpsw_nuss_hwtstamp_get(ndev, req);
- }
-
return phylink_mii_ioctl(port->slave.phylink, req, cmd);
}
@@ -1941,6 +1985,8 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
.ndo_set_tx_maxrate = am65_cpsw_qos_ndo_tx_p0_set_maxrate,
.ndo_bpf = am65_cpsw_ndo_bpf,
.ndo_xdp_xmit = am65_cpsw_ndo_xdp_xmit,
+ .ndo_hwtstamp_get = am65_cpsw_nuss_hwtstamp_get,
+ .ndo_hwtstamp_set = am65_cpsw_nuss_hwtstamp_set,
};
static void am65_cpsw_disable_phy(struct phy *phy)
@@ -2152,13 +2198,11 @@ static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
struct device *dev = common->dev;
int i;
- devm_remove_action(dev, am65_cpsw_nuss_free_tx_chns, common);
-
common->tx_ch_rate_msk = 0;
for (i = 0; i < common->tx_ch_num; i++) {
struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
- if (tx_chn->irq)
+ if (tx_chn->irq > 0)
devm_free_irq(dev, tx_chn->irq, tx_chn);
netif_napi_del(&tx_chn->napi_tx);
@@ -2170,15 +2214,17 @@ static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
{
struct device *dev = common->dev;
+ struct am65_cpsw_tx_chn *tx_chn;
int i, ret = 0;
for (i = 0; i < common->tx_ch_num; i++) {
- struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+ tx_chn = &common->tx_chns[i];
+
+ hrtimer_setup(&tx_chn->tx_hrtimer, &am65_cpsw_nuss_tx_timer_callback,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
am65_cpsw_nuss_tx_poll);
- hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
- tx_chn->tx_hrtimer.function = &am65_cpsw_nuss_tx_timer_callback;
ret = devm_request_irq(dev, tx_chn->irq,
am65_cpsw_nuss_tx_irq,
@@ -2191,7 +2237,16 @@ static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
}
}
+ return 0;
+
err:
+ netif_napi_del(&tx_chn->napi_tx);
+ for (--i; i >= 0; i--) {
+ tx_chn = &common->tx_chns[i];
+ devm_free_irq(dev, tx_chn->irq, tx_chn);
+ netif_napi_del(&tx_chn->napi_tx);
+ }
+
return ret;
}
@@ -2272,12 +2327,10 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
goto err;
}
+ return 0;
+
err:
- i = devm_add_action(dev, am65_cpsw_nuss_free_tx_chns, common);
- if (i) {
- dev_err(dev, "Failed to add free_tx_chns action %d\n", i);
- return i;
- }
+ am65_cpsw_nuss_free_tx_chns(common);
return ret;
}
@@ -2305,7 +2358,6 @@ static void am65_cpsw_nuss_remove_rx_chns(struct am65_cpsw_common *common)
rx_chn = &common->rx_chns;
flows = rx_chn->flows;
- devm_remove_action(dev, am65_cpsw_nuss_free_rx_chns, common);
for (i = 0; i < common->rx_ch_num_flows; i++) {
if (!(flows[i].irq < 0))
@@ -2343,10 +2395,6 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
for (i = 0; i < common->rx_ch_num_flows; i++) {
flow = &rx_chn->flows[i];
flow->page_pool = NULL;
- flow->pages = devm_kcalloc(dev, AM65_CPSW_MAX_RX_DESC,
- sizeof(*flow->pages), GFP_KERNEL);
- if (!flow->pages)
- return -ENOMEM;
}
rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
@@ -2396,17 +2444,19 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
flow = &rx_chn->flows[i];
flow->id = i;
flow->common = common;
+ flow->irq = -EINVAL;
rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
rx_flow_cfg.rx_cfg.size = max_desc_num;
- rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
+ /* share same FDQ for all flows */
+ rx_flow_cfg.rxfdq_cfg.size = max_desc_num * rx_cfg.flow_id_num;
rx_flow_cfg.rxfdq_cfg.mode = common->pdata.fdqring_mode;
ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
i, &rx_flow_cfg);
if (ret) {
dev_err(dev, "Failed to init rx flow%d %d\n", i, ret);
- goto err;
+ goto err_flow;
}
if (!i)
fdqring_id =
@@ -2418,17 +2468,17 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
dev_err(dev, "Failed to get rx dma irq %d\n",
flow->irq);
ret = flow->irq;
- goto err;
+ goto err_flow;
}
snprintf(flow->name,
sizeof(flow->name), "%s-rx%d",
dev_name(dev), i);
+ hrtimer_setup(&flow->rx_hrtimer, &am65_cpsw_nuss_rx_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+
netif_napi_add(common->dma_ndev, &flow->napi_rx,
am65_cpsw_nuss_rx_poll);
- hrtimer_init(&flow->rx_hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- flow->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback;
ret = devm_request_irq(dev, flow->irq,
am65_cpsw_nuss_rx_irq,
@@ -2437,20 +2487,29 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
if (ret) {
dev_err(dev, "failure requesting rx %d irq %u, %d\n",
i, flow->irq, ret);
- goto err;
+ flow->irq = -EINVAL;
+ goto err_request_irq;
}
}
/* setup classifier to route priorities to flows */
cpsw_ale_classifier_setup_default(common->ale, common->rx_ch_num_flows);
-err:
- i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common);
- if (i) {
- dev_err(dev, "Failed to add free_rx_chns action %d\n", i);
- return i;
+ return 0;
+
+err_request_irq:
+ netif_napi_del(&flow->napi_rx);
+
+err_flow:
+ for (--i; i >= 0; i--) {
+ flow = &rx_chn->flows[i];
+ devm_free_irq(dev, flow->irq, flow);
+ netif_napi_del(&flow->napi_rx);
}
+err:
+ am65_cpsw_nuss_free_rx_chns(common);
+
return ret;
}
@@ -2470,20 +2529,15 @@ static int am65_cpsw_am654_get_efuse_macid(struct device_node *of_node,
{
u32 mac_lo, mac_hi, offset;
struct regmap *syscon;
- int ret;
- syscon = syscon_regmap_lookup_by_phandle(of_node, "ti,syscon-efuse");
+ syscon = syscon_regmap_lookup_by_phandle_args(of_node, "ti,syscon-efuse",
+ 1, &offset);
if (IS_ERR(syscon)) {
if (PTR_ERR(syscon) == -ENODEV)
return 0;
return PTR_ERR(syscon);
}
- ret = of_property_read_u32_index(of_node, "ti,syscon-efuse", 1,
- &offset);
- if (ret)
- return ret;
-
regmap_read(syscon, offset, &mac_lo);
regmap_read(syscon, offset + 4, &mac_hi);
@@ -2545,6 +2599,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
return -ENOENT;
for_each_child_of_node(node, port_np) {
+ phy_interface_t phy_if;
struct am65_cpsw_port *port;
u32 port_id;
@@ -2609,26 +2664,50 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
of_property_read_bool(port_np, "ti,mac-only");
/* get phy/link info */
- port->slave.port_np = port_np;
- ret = of_get_phy_mode(port_np, &port->slave.phy_if);
+ port->slave.port_np = of_node_get(port_np);
+ ret = of_get_phy_mode(port_np, &phy_if);
if (ret) {
dev_err(dev, "%pOF read phy-mode err %d\n",
port_np, ret);
goto of_node_put;
}
- ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET, port->slave.phy_if);
+ /* CPSW controllers supported by this driver have a fixed
+ * internal TX delay in RGMII mode. Fix up PHY mode to account
+ * for this and warn about Device Trees that claim to have a TX
+ * delay on the PCB.
+ */
+ switch (phy_if) {
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ phy_if = PHY_INTERFACE_MODE_RGMII_RXID;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ phy_if = PHY_INTERFACE_MODE_RGMII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ dev_warn(dev,
+ "RGMII mode without internal TX delay unsupported; please fix your Device Tree\n");
+ break;
+ default:
+ break;
+ }
+
+ port->slave.phy_if = phy_if;
+ ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET, phy_if);
if (ret)
goto of_node_put;
ret = of_get_mac_address(port_np, port->slave.mac_addr);
- if (ret) {
+ if (ret == -EPROBE_DEFER) {
+ goto of_node_put;
+ } else if (ret) {
am65_cpsw_am654_get_efuse_macid(port_np,
port->port_id,
port->slave.mac_addr);
if (!is_valid_ether_addr(port->slave.mac_addr)) {
eth_random_addr(port->slave.mac_addr);
- dev_err(dev, "Use random MAC address\n");
+ dev_info(dev, "Use random MAC address\n");
}
}
@@ -2663,6 +2742,17 @@ static void am65_cpsw_nuss_phylink_cleanup(struct am65_cpsw_common *common)
}
}
+static void am65_cpsw_remove_dt(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_port *port;
+ int i;
+
+ for (i = 0; i < common->port_num; i++) {
+ port = &common->ports[i];
+ of_node_put(port->slave.port_np);
+ }
+}
+
static int
am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
{
@@ -2692,7 +2782,7 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
mutex_init(&ndev_priv->mm_lock);
port->qos.link_speed = SPEED_UNKNOWN;
SET_NETDEV_DEV(port->ndev, dev);
- port->ndev->dev.of_node = port->slave.port_np;
+ device_set_node(&port->ndev->dev, of_fwnode_handle(port->slave.port_np));
eth_hw_addr_set(port->ndev, port->slave.mac_addr);
@@ -2978,7 +3068,8 @@ static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common)
}
static int am65_cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
struct am65_cpsw_common *common = dl_priv->common;
@@ -3260,7 +3351,7 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
return ret;
ret = am65_cpsw_nuss_init_rx_chns(common);
if (ret)
- return ret;
+ goto err_remove_tx;
/* The DMA Channels are not guaranteed to be in a clean state.
* Reset and disable them to ensure that they are back to the
@@ -3274,14 +3365,14 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
for (i = 0; i < common->rx_ch_num_flows; i++)
k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i,
- &rx_chan->flows[i],
- am65_cpsw_nuss_rx_cleanup, 0);
+ rx_chan,
+ am65_cpsw_nuss_rx_cleanup);
k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
ret = am65_cpsw_nuss_register_devlink(common);
if (ret)
- return ret;
+ goto err_remove_rx;
for (i = 0; i < common->port_num; i++) {
port = &common->ports[i];
@@ -3312,6 +3403,10 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
err_cleanup_ndev:
am65_cpsw_nuss_cleanup_ndev(common);
am65_cpsw_unregister_devlink(common);
+err_remove_rx:
+ am65_cpsw_nuss_remove_rx_chns(common);
+err_remove_tx:
+ am65_cpsw_nuss_remove_tx_chns(common);
return ret;
}
@@ -3331,6 +3426,8 @@ int am65_cpsw_nuss_update_tx_rx_chns(struct am65_cpsw_common *common,
return ret;
ret = am65_cpsw_nuss_init_rx_chns(common);
+ if (ret)
+ am65_cpsw_nuss_remove_tx_chns(common);
return ret;
}
@@ -3429,6 +3526,10 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
__be64 id_temp;
int ret, i;
+ BUILD_BUG_ON_MSG(sizeof(struct am65_cpsw_tx_swdata) > AM65_CPSW_NAV_SW_DATA_SIZE,
+ "TX SW_DATA size exceeds AM65_CPSW_NAV_SW_DATA_SIZE");
+ BUILD_BUG_ON_MSG(sizeof(struct am65_cpsw_swdata) > AM65_CPSW_NAV_SW_DATA_SIZE,
+ "SW_DATA size exceeds AM65_CPSW_NAV_SW_DATA_SIZE");
common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
if (!common)
return -ENOMEM;
@@ -3462,7 +3563,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
init_completion(&common->tdown_complete);
common->tx_ch_num = AM65_CPSW_DEFAULT_TX_CHNS;
common->rx_ch_num_flows = AM65_CPSW_DEFAULT_RX_CHN_FLOWS;
- common->pf_p0_rx_ptype_rrobin = false;
+ common->pf_p0_rx_ptype_rrobin = true;
common->default_vlan = 1;
common->ports = devm_kcalloc(dev, common->port_num,
@@ -3483,6 +3584,16 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
return ret;
}
+ am65_cpsw_nuss_get_ver(common);
+
+ ret = am65_cpsw_nuss_init_host_p(common);
+ if (ret)
+ goto err_pm_clear;
+
+ ret = am65_cpsw_nuss_init_slave_ports(common);
+ if (ret)
+ goto err_pm_clear;
+
node = of_get_child_by_name(dev->of_node, "mdio");
if (!node) {
dev_warn(dev, "MDIO node not found\n");
@@ -3499,16 +3610,6 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
}
of_node_put(node);
- am65_cpsw_nuss_get_ver(common);
-
- ret = am65_cpsw_nuss_init_host_p(common);
- if (ret)
- goto err_of_clear;
-
- ret = am65_cpsw_nuss_init_slave_ports(common);
- if (ret)
- goto err_of_clear;
-
/* init common data */
ale_params.dev = dev;
ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT;
@@ -3555,6 +3656,7 @@ err_ndevs_clear:
am65_cpsw_nuss_cleanup_ndev(common);
am65_cpsw_nuss_phylink_cleanup(common);
am65_cpts_release(common->cpts);
+ am65_cpsw_remove_dt(common);
err_of_clear:
if (common->mdio_dev)
of_platform_device_destroy(common->mdio_dev, NULL);
@@ -3589,9 +3691,12 @@ static void am65_cpsw_nuss_remove(struct platform_device *pdev)
*/
am65_cpsw_nuss_cleanup_ndev(common);
am65_cpsw_unregister_devlink(common);
+ am65_cpsw_nuss_remove_rx_chns(common);
+ am65_cpsw_nuss_remove_tx_chns(common);
am65_cpsw_nuss_phylink_cleanup(common);
am65_cpts_release(common->cpts);
am65_cpsw_disable_serdes_phy(common);
+ am65_cpsw_remove_dt(common);
if (common->mdio_dev)
of_platform_device_destroy(common->mdio_dev, NULL);
@@ -3650,8 +3755,10 @@ static int am65_cpsw_nuss_resume(struct device *dev)
if (ret)
return ret;
ret = am65_cpsw_nuss_init_rx_chns(common);
- if (ret)
+ if (ret) {
+ am65_cpsw_nuss_remove_tx_chns(common);
return ret;
+ }
/* If RX IRQ was disabled before suspend, keep it disabled */
for (i = 0; i < common->rx_ch_num_flows; i++) {
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index 3f3e353dfe88..917c37e4e89b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -101,10 +101,22 @@ struct am65_cpsw_rx_flow {
struct hrtimer rx_hrtimer;
unsigned long rx_pace_timeout;
struct page_pool *page_pool;
- struct page **pages;
char name[32];
};
+struct am65_cpsw_tx_swdata {
+ struct net_device *ndev;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
+};
+
+struct am65_cpsw_swdata {
+ u32 flow_id;
+ struct page *page;
+};
+
struct am65_cpsw_rx_chn {
struct device *dev;
struct device *dma_dev;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c
index fa96db7c1a13..66e8b224827b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c
@@ -276,9 +276,31 @@ static int am65_cpsw_iet_set_verify_timeout_count(struct am65_cpsw_port *port)
/* The number of wireside clocks contained in the verify
* timeout counter. The default is 0x1312d0
* (10ms at 125Mhz in 1G mode).
+ * The frequency of the clock depends on the link speed
+ * and the PHY interface.
*/
- val = 125 * HZ_PER_MHZ; /* assuming 125MHz wireside clock */
+ switch (port->slave.phy_if) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (port->qos.link_speed == SPEED_1000)
+ val = 125 * HZ_PER_MHZ; /* 125 MHz at 1000Mbps*/
+ else if (port->qos.link_speed == SPEED_100)
+ val = 25 * HZ_PER_MHZ; /* 25 MHz at 100Mbps*/
+ else
+ val = (25 * HZ_PER_MHZ) / 10; /* 2.5 MHz at 10Mbps*/
+ break;
+
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ val = 125 * HZ_PER_MHZ; /* 125 MHz */
+ break;
+ default:
+ netdev_err(port->ndev, "selected mode does not supported IET\n");
+ return -EOPNOTSUPP;
+ }
val /= MILLIHZ_PER_HZ; /* count per ms timeout */
val *= verify_time_ms; /* count for timeout ms */
@@ -295,20 +317,21 @@ static int am65_cpsw_iet_verify_wait(struct am65_cpsw_port *port)
u32 ctrl, status;
int try;
- try = 20;
- do {
- /* Reset the verify state machine by writing 1
- * to LINKFAIL
- */
- ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
- ctrl |= AM65_CPSW_PN_IET_MAC_LINKFAIL;
- writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ try = 3;
- /* Clear MAC_LINKFAIL bit to start Verify. */
- ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
- ctrl &= ~AM65_CPSW_PN_IET_MAC_LINKFAIL;
- writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ /* Reset the verify state machine by writing 1
+ * to LINKFAIL
+ */
+ ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ ctrl |= AM65_CPSW_PN_IET_MAC_LINKFAIL;
+ writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ /* Clear MAC_LINKFAIL bit to start Verify. */
+ ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ ctrl &= ~AM65_CPSW_PN_IET_MAC_LINKFAIL;
+ writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+
+ do {
msleep(port->qos.iet.verify_time_ms);
status = readl(port->port_base + AM65_CPSW_PN_REG_IET_STATUS);
@@ -330,7 +353,7 @@ static int am65_cpsw_iet_verify_wait(struct am65_cpsw_port *port)
netdev_dbg(port->ndev, "MAC Merge verify error\n");
return -ENODEV;
}
- } while (try-- > 0);
+ } while (--try > 0);
netdev_dbg(port->ndev, "MAC Merge verify timeout\n");
return -ETIMEDOUT;
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index 59d6ab989c55..8ffbfaa3ab18 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -163,7 +163,9 @@ struct am65_cpts {
struct device_node *clk_mux_np;
struct clk *refclk;
u32 refclk_freq;
- struct list_head events;
+ /* separate lists to handle TX and RX timestamp independently */
+ struct list_head events_tx;
+ struct list_head events_rx;
struct list_head pool;
struct am65_cpts_event pool_data[AM65_CPTS_MAX_EVENTS];
spinlock_t lock; /* protects events lists*/
@@ -227,6 +229,24 @@ static void am65_cpts_disable(struct am65_cpts *cpts)
am65_cpts_write32(cpts, 0, int_enable);
}
+static int am65_cpts_purge_event_list(struct am65_cpts *cpts,
+ struct list_head *events)
+{
+ struct list_head *this, *next;
+ struct am65_cpts_event *event;
+ int removed = 0;
+
+ list_for_each_safe(this, next, events) {
+ event = list_entry(this, struct am65_cpts_event, list);
+ if (time_after(jiffies, event->tmo)) {
+ list_del_init(&event->list);
+ list_add(&event->list, &cpts->pool);
+ ++removed;
+ }
+ }
+ return removed;
+}
+
static int am65_cpts_event_get_port(struct am65_cpts_event *event)
{
return (event->event1 & AM65_CPTS_EVENT_1_PORT_NUMBER_MASK) >>
@@ -239,20 +259,12 @@ static int am65_cpts_event_get_type(struct am65_cpts_event *event)
AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT;
}
-static int am65_cpts_cpts_purge_events(struct am65_cpts *cpts)
+static int am65_cpts_purge_events(struct am65_cpts *cpts)
{
- struct list_head *this, *next;
- struct am65_cpts_event *event;
int removed = 0;
- list_for_each_safe(this, next, &cpts->events) {
- event = list_entry(this, struct am65_cpts_event, list);
- if (time_after(jiffies, event->tmo)) {
- list_del_init(&event->list);
- list_add(&event->list, &cpts->pool);
- ++removed;
- }
- }
+ removed += am65_cpts_purge_event_list(cpts, &cpts->events_tx);
+ removed += am65_cpts_purge_event_list(cpts, &cpts->events_rx);
if (removed)
dev_dbg(cpts->dev, "event pool cleaned up %d\n", removed);
@@ -287,7 +299,7 @@ static int __am65_cpts_fifo_read(struct am65_cpts *cpts)
struct am65_cpts_event, list);
if (!event) {
- if (am65_cpts_cpts_purge_events(cpts)) {
+ if (am65_cpts_purge_events(cpts)) {
dev_err(cpts->dev, "cpts: event pool empty\n");
ret = -1;
goto out;
@@ -306,11 +318,21 @@ static int __am65_cpts_fifo_read(struct am65_cpts *cpts)
cpts->timestamp);
break;
case AM65_CPTS_EV_RX:
+ event->tmo = jiffies +
+ msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT);
+
+ list_move_tail(&event->list, &cpts->events_rx);
+
+ dev_dbg(cpts->dev,
+ "AM65_CPTS_EV_RX e1:%08x e2:%08x t:%lld\n",
+ event->event1, event->event2,
+ event->timestamp);
+ break;
case AM65_CPTS_EV_TX:
event->tmo = jiffies +
msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT);
- list_move_tail(&event->list, &cpts->events);
+ list_move_tail(&event->list, &cpts->events_tx);
dev_dbg(cpts->dev,
"AM65_CPTS_EV_TX e1:%08x e2:%08x t:%lld\n",
@@ -828,7 +850,7 @@ static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
return found;
}
-static void am65_cpts_find_ts(struct am65_cpts *cpts)
+static void am65_cpts_find_tx_ts(struct am65_cpts *cpts)
{
struct am65_cpts_event *event;
struct list_head *this, *next;
@@ -837,7 +859,7 @@ static void am65_cpts_find_ts(struct am65_cpts *cpts)
LIST_HEAD(events);
spin_lock_irqsave(&cpts->lock, flags);
- list_splice_init(&cpts->events, &events);
+ list_splice_init(&cpts->events_tx, &events);
spin_unlock_irqrestore(&cpts->lock, flags);
list_for_each_safe(this, next, &events) {
@@ -850,7 +872,7 @@ static void am65_cpts_find_ts(struct am65_cpts *cpts)
}
spin_lock_irqsave(&cpts->lock, flags);
- list_splice_tail(&events, &cpts->events);
+ list_splice_tail(&events, &cpts->events_tx);
list_splice_tail(&events_free, &cpts->pool);
spin_unlock_irqrestore(&cpts->lock, flags);
}
@@ -861,7 +883,7 @@ static long am65_cpts_ts_work(struct ptp_clock_info *ptp)
unsigned long flags;
long delay = -1;
- am65_cpts_find_ts(cpts);
+ am65_cpts_find_tx_ts(cpts);
spin_lock_irqsave(&cpts->txq.lock, flags);
if (!skb_queue_empty(&cpts->txq))
@@ -905,7 +927,7 @@ static u64 am65_cpts_find_rx_ts(struct am65_cpts *cpts, u32 skb_mtype_seqid)
spin_lock_irqsave(&cpts->lock, flags);
__am65_cpts_fifo_read(cpts);
- list_for_each_safe(this, next, &cpts->events) {
+ list_for_each_safe(this, next, &cpts->events_rx) {
event = list_entry(this, struct am65_cpts_event, list);
if (time_after(jiffies, event->tmo)) {
list_move(&event->list, &cpts->pool);
@@ -1155,7 +1177,8 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
return ERR_PTR(ret);
mutex_init(&cpts->ptp_clk_lock);
- INIT_LIST_HEAD(&cpts->events);
+ INIT_LIST_HEAD(&cpts->events_tx);
+ INIT_LIST_HEAD(&cpts->events_rx);
INIT_LIST_HEAD(&cpts->pool);
spin_lock_init(&cpts->lock);
skb_queue_head_init(&cpts->txq);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 4ef8cf6ea135..54c24cd3d3be 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -351,6 +351,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
int ret = 0, port, ch = xmeta->ch;
int headroom = CPSW_HEADROOM_NA;
struct net_device *ndev = xmeta->ndev;
+ u32 metasize = 0;
struct cpsw_priv *priv;
struct page_pool *pool;
struct sk_buff *skb;
@@ -400,7 +401,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
}
- xdp_prepare_buff(&xdp, pa, headroom, size, false);
+ xdp_prepare_buff(&xdp, pa, headroom, size, true);
port = priv->emac_port + cpsw->data.dual_emac;
ret = cpsw_run_xdp(priv, ch, &xdp, page, port, &len);
@@ -408,6 +409,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
goto requeue;
headroom = xdp.data - xdp.data_hard_start;
+ metasize = xdp.data - xdp.data_meta;
/* XDP prog can modify vlan tag, so can't use encap header */
status &= ~CPDMA_RX_VLAN_ENCAP;
@@ -423,6 +425,8 @@ static void cpsw_rx_handler(void *token, int len, int status)
skb_reserve(skb, headroom);
skb_put(skb, len);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->dev = ndev;
if (status & CPDMA_RX_VLAN_ENCAP)
cpsw_rx_vlan_encap(skb);
@@ -635,6 +639,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave->phy = phy;
+ phy_disable_eee(slave->phy);
+
phy_attached_info(slave->phy);
phy_start(slave->phy);
@@ -684,7 +690,7 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
soft_reset("cpsw", &cpsw->regs->soft_reset);
cpsw_ale_start(cpsw->ale);
- /* switch to vlan unaware mode */
+ /* switch to vlan aware mode */
cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
CPSW_ALE_VLAN_AWARE);
control_reg = readl(&cpsw->regs->control);
@@ -1150,6 +1156,27 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
}
#endif
+/* We need a custom implementation of phy_do_ioctl_running() because in switch
+ * mode, dev->phydev may be different than the phy of the active_slave. We need
+ * to operate on the locally saved phy instead.
+ */
+static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+ struct cpsw_priv *priv = netdev_priv(dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+ struct phy_device *phy;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ phy = cpsw->slaves[slave_no].phy;
+ if (phy)
+ return phy_mii_ioctl(phy, req, cmd);
+
+ return -EOPNOTSUPP;
+}
+
static const struct net_device_ops cpsw_netdev_ops = {
.ndo_open = cpsw_ndo_open,
.ndo_stop = cpsw_ndo_stop,
@@ -1168,6 +1195,8 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_setup_tc = cpsw_ndo_setup_tc,
.ndo_bpf = cpsw_ndo_bpf,
.ndo_xdp_xmit = cpsw_ndo_xdp_xmit,
+ .ndo_hwtstamp_get = cpsw_hwtstamp_get,
+ .ndo_hwtstamp_set = cpsw_hwtstamp_set,
};
static void cpsw_get_drvinfo(struct net_device *ndev,
@@ -1225,7 +1254,6 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
.get_link_ksettings = cpsw_get_link_ksettings,
.set_link_ksettings = cpsw_set_link_ksettings,
.get_eee = cpsw_get_eee,
- .set_eee = cpsw_set_eee,
.nway_reset = cpsw_nway_reset,
.get_ringparam = cpsw_get_ringparam,
.set_ringparam = cpsw_set_ringparam,
@@ -1641,6 +1669,9 @@ static int cpsw_probe(struct platform_device *pdev)
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_NDO_XMIT;
+ /* Hijack PHY timestamping requests in order to block them */
+ if (!cpsw->data.dual_emac)
+ ndev->see_all_hwtstamp_requests = true;
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index d361caa80d05..fbe35af615a6 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -127,15 +127,15 @@ struct cpsw_ale_dev_id {
static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
{
- int idx, idx2;
+ int idx, idx2, index;
u32 hi_val = 0;
idx = start / 32;
idx2 = (start + bits - 1) / 32;
/* Check if bits to be fetched exceed a word */
if (idx != idx2) {
- idx2 = 2 - idx2; /* flip */
- hi_val = ale_entry[idx2] << ((idx2 * 32) - start);
+ index = 2 - idx2; /* flip */
+ hi_val = ale_entry[index] << ((idx2 * 32) - start);
}
start -= idx * 32;
idx = 2 - idx; /* flip */
@@ -145,16 +145,16 @@ static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
u32 value)
{
- int idx, idx2;
+ int idx, idx2, index;
value &= BITMASK(bits);
idx = start / 32;
idx2 = (start + bits - 1) / 32;
/* Check if bits to be set exceed a word */
if (idx != idx2) {
- idx2 = 2 - idx2; /* flip */
- ale_entry[idx2] &= ~(BITMASK(bits + start - (idx2 * 32)));
- ale_entry[idx2] |= (value >> ((idx2 * 32) - start));
+ index = 2 - idx2; /* flip */
+ ale_entry[index] &= ~(BITMASK(bits + start - (idx2 * 32)));
+ ale_entry[index] |= (value >> ((idx2 * 32) - start));
}
start -= idx * 32;
idx = 2 - idx; /* flip */
@@ -1231,7 +1231,7 @@ int cpsw_ale_rx_ratelimit_bc(struct cpsw_ale *ale, int port, unsigned int rateli
static void cpsw_ale_timer(struct timer_list *t)
{
- struct cpsw_ale *ale = from_timer(ale, t, timer);
+ struct cpsw_ale *ale = timer_container_of(ale, t, timer);
cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
@@ -1287,7 +1287,7 @@ static void cpsw_ale_aging_stop(struct cpsw_ale *ale)
return;
}
- del_timer_sync(&ale->timer);
+ timer_delete_sync(&ale->timer);
}
void cpsw_ale_start(struct cpsw_ale *ale)
@@ -1704,26 +1704,34 @@ static void cpsw_ale_policer_reset(struct cpsw_ale *ale)
void cpsw_ale_classifier_setup_default(struct cpsw_ale *ale, int num_rx_ch)
{
int pri, idx;
- /* IEEE802.1D-2004, Standard for Local and metropolitan area networks
- * Table G-2 - Traffic type acronyms
- * Table G-3 - Defining traffic types
- * User priority values 1 and 2 effectively communicate a lower
- * priority than 0. In the below table 0 is assigned to higher priority
- * thread than 1 and 2 wherever possible.
- * The below table maps which thread the user priority needs to be
+
+ /* Reference:
+ * IEEE802.1Q-2014, Standard for Local and metropolitan area networks
+ * Table I-2 - Traffic type acronyms
+ * Table I-3 - Defining traffic types
+ * Section I.4 Traffic types and priority values, states:
+ * "0 is thus used both for default priority and for Best Effort, and
+ * Background is associated with a priority value of 1. This means
+ * that the value 1 effectively communicates a lower priority than 0."
+ *
+ * In the table below, Priority Code Point (PCP) 0 is assigned
+ * to a higher priority thread than PCP 1 wherever possible.
+ * The table maps which thread the PCP traffic needs to be
* sent to for a given number of threads (RX channels). Upper threads
* have higher priority.
* e.g. if number of threads is 8 then user priority 0 will map to
- * pri_thread_map[8-1][0] i.e. thread 2
+ * pri_thread_map[8-1][0] i.e. thread 1
*/
- int pri_thread_map[8][8] = { { 0, 0, 0, 0, 0, 0, 0, 0, },
+
+ int pri_thread_map[8][8] = { /* BK,BE,EE,CA,VI,VO,IC,NC */
+ { 0, 0, 0, 0, 0, 0, 0, 0, },
{ 0, 0, 0, 0, 1, 1, 1, 1, },
{ 0, 0, 0, 0, 1, 1, 2, 2, },
- { 1, 0, 0, 1, 2, 2, 3, 3, },
- { 1, 0, 0, 1, 2, 3, 4, 4, },
- { 1, 0, 0, 2, 3, 4, 5, 5, },
- { 1, 0, 0, 2, 3, 4, 5, 6, },
- { 2, 0, 1, 3, 4, 5, 6, 7, } };
+ { 0, 0, 1, 1, 2, 2, 3, 3, },
+ { 0, 0, 1, 1, 2, 2, 3, 4, },
+ { 1, 0, 2, 2, 3, 3, 4, 5, },
+ { 1, 0, 2, 3, 4, 4, 5, 6, },
+ { 1, 0, 2, 3, 4, 5, 6, 7 } };
cpsw_ale_policer_reset(ale);
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index 21d55a180ef6..bdc4db0d169c 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -434,18 +434,6 @@ int cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
return -EOPNOTSUPP;
}
-int cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int slave_no = cpsw_slave_index(cpsw, priv);
-
- if (cpsw->slaves[slave_no].phy)
- return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata);
- else
- return -EOPNOTSUPP;
-}
-
int cpsw_nway_reset(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index a98bcc5eb566..ab88d4c02cbd 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -293,6 +293,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
struct page_pool *pool;
struct sk_buff *skb;
struct xdp_buff xdp;
+ u32 metasize = 0;
int ret = 0;
dma_addr_t dma;
@@ -345,13 +346,14 @@ static void cpsw_rx_handler(void *token, int len, int status)
size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
}
- xdp_prepare_buff(&xdp, pa, headroom, size, false);
+ xdp_prepare_buff(&xdp, pa, headroom, size, true);
ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port, &len);
if (ret != CPSW_XDP_PASS)
goto requeue;
headroom = xdp.data - xdp.data_hard_start;
+ metasize = xdp.data - xdp.data_meta;
/* XDP prog can modify vlan tag, so can't use encap header */
status &= ~CPDMA_RX_VLAN_ENCAP;
@@ -368,6 +370,8 @@ static void cpsw_rx_handler(void *token, int len, int status)
skb->offload_fwd_mark = priv->offload_fwd_mark;
skb_reserve(skb, headroom);
skb_put(skb, len);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->dev = ndev;
if (status & CPDMA_RX_VLAN_ENCAP)
cpsw_rx_vlan_encap(skb);
@@ -554,7 +558,7 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
soft_reset("cpsw", &cpsw->regs->soft_reset);
cpsw_ale_start(cpsw->ale);
- /* switch to vlan unaware mode */
+ /* switch to vlan aware mode */
cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
CPSW_ALE_VLAN_AWARE);
control_reg = readl(&cpsw->regs->control);
@@ -778,6 +782,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave->phy = phy;
+ phy_disable_eee(slave->phy);
+
phy_attached_info(slave->phy);
phy_start(slave->phy);
@@ -1126,7 +1132,7 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_stop = cpsw_ndo_stop,
.ndo_start_xmit = cpsw_ndo_start_xmit,
.ndo_set_mac_address = cpsw_ndo_set_mac_address,
- .ndo_eth_ioctl = cpsw_ndo_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = cpsw_ndo_tx_timeout,
.ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
@@ -1141,6 +1147,8 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_bpf = cpsw_ndo_bpf,
.ndo_xdp_xmit = cpsw_ndo_xdp_xmit,
.ndo_get_port_parent_id = cpsw_get_port_parent_id,
+ .ndo_hwtstamp_get = cpsw_hwtstamp_get,
+ .ndo_hwtstamp_set = cpsw_hwtstamp_set,
};
static void cpsw_get_drvinfo(struct net_device *ndev,
@@ -1209,7 +1217,6 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
.get_link_ksettings = cpsw_get_link_ksettings,
.set_link_ksettings = cpsw_set_link_ksettings,
.get_eee = cpsw_get_eee,
- .set_eee = cpsw_set_eee,
.nway_reset = cpsw_nway_reset,
.get_ringparam = cpsw_get_ringparam,
.set_ringparam = cpsw_set_ringparam,
@@ -1408,7 +1415,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_TC;
- ndev->netns_local = true;
+ ndev->netns_immutable = true;
ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
NETDEV_XDP_ACT_REDIRECT |
@@ -1417,6 +1424,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
SET_NETDEV_DEV(ndev, dev);
+ ndev->dev.of_node = slave_data->slave_node;
if (!napi_ndev) {
/* CPSW Host port CPDMA interface is shared between
@@ -1610,7 +1618,8 @@ static const struct devlink_ops cpsw_devlink_ops = {
};
static int cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct cpsw_devlink *dl_priv = devlink_priv(dl);
struct cpsw_common *cpsw = dl_priv->cpsw;
@@ -1745,7 +1754,8 @@ exit:
}
static int cpsw_dl_ale_ctrl_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct cpsw_devlink *dl_priv = devlink_priv(dl);
struct cpsw_common *cpsw = dl_priv->cpsw;
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 6fe4edabba44..bc4fdf17a99e 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -614,24 +614,29 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
}
-static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+int cpsw_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct cpsw_priv *priv = netdev_priv(dev);
struct cpsw_common *cpsw = priv->cpsw;
- struct hwtstamp_config cfg;
+
+ /* This will only execute if dev->see_all_hwtstamp_requests is set */
+ if (cfg->source != HWTSTAMP_SOURCE_NETDEV) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Switch mode only supports MAC timestamping");
+ return -EOPNOTSUPP;
+ }
if (cpsw->version != CPSW_VERSION_1 &&
cpsw->version != CPSW_VERSION_2 &&
cpsw->version != CPSW_VERSION_3)
return -EOPNOTSUPP;
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
-
- if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
+ if (cfg->tx_type != HWTSTAMP_TX_OFF && cfg->tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
- switch (cfg.rx_filter) {
+ switch (cfg->rx_filter) {
case HWTSTAMP_FILTER_NONE:
priv->rx_ts_enabled = 0;
break;
@@ -651,13 +656,13 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
default:
return -ERANGE;
}
- priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
+ priv->tx_ts_enabled = cfg->tx_type == HWTSTAMP_TX_ON;
switch (cpsw->version) {
case CPSW_VERSION_1:
@@ -671,65 +676,40 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
WARN_ON(1);
}
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
-static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+int cpsw_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
{
struct cpsw_common *cpsw = ndev_to_cpsw(dev);
struct cpsw_priv *priv = netdev_priv(dev);
- struct hwtstamp_config cfg;
if (cpsw->version != CPSW_VERSION_1 &&
cpsw->version != CPSW_VERSION_2 &&
cpsw->version != CPSW_VERSION_3)
return -EOPNOTSUPP;
- cfg.flags = 0;
- cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- cfg.rx_filter = priv->rx_ts_enabled;
+ cfg->tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg->rx_filter = priv->rx_ts_enabled;
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
#else
-static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+int cpsw_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
{
return -EOPNOTSUPP;
}
-static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+int cpsw_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
#endif /*CONFIG_TI_CPTS*/
-int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
-{
- struct cpsw_priv *priv = netdev_priv(dev);
- struct cpsw_common *cpsw = priv->cpsw;
- int slave_no = cpsw_slave_index(cpsw, priv);
- struct phy_device *phy;
-
- if (!netif_running(dev))
- return -EINVAL;
-
- phy = cpsw->slaves[slave_no].phy;
-
- if (!phy_has_hwtstamp(phy)) {
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return cpsw_hwtstamp_set(dev, req);
- case SIOCGHWTSTAMP:
- return cpsw_hwtstamp_get(dev, req);
- }
- }
-
- if (phy)
- return phy_mii_ioctl(phy, req, cmd);
-
- return -EOPNOTSUPP;
-}
-
int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
{
struct cpsw_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index 1f448290b9f4..91add8925e23 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -461,7 +461,6 @@ void soft_reset(const char *module, void __iomem *reg);
void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv);
void cpsw_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue);
int cpsw_need_resplit(struct cpsw_common *cpsw);
-int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd);
int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate);
int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data);
@@ -469,6 +468,11 @@ bool cpsw_shp_is_off(struct cpsw_priv *priv);
void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv);
void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv);
void cpsw_qos_clsflower_resume(struct cpsw_priv *priv);
+int cpsw_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg);
+int cpsw_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
/* ethtool */
u32 cpsw_get_msglevel(struct net_device *ndev);
@@ -497,7 +501,6 @@ int cpsw_get_link_ksettings(struct net_device *ndev,
int cpsw_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *ecmd);
int cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata);
-int cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata);
int cpsw_nway_reset(struct net_device *ndev);
void cpsw_get_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ering,
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index dbbea9146040..2ba4c8795d60 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -181,7 +181,7 @@ void cpts_misc_interrupt(struct cpts *cpts)
}
EXPORT_SYMBOL_GPL(cpts_misc_interrupt);
-static u64 cpts_systim_read(const struct cyclecounter *cc)
+static u64 cpts_systim_read(struct cyclecounter *cc)
{
struct cpts *cpts = container_of(cc, struct cpts, cc);
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 68507126be8e..48f85a3649b2 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -234,7 +234,6 @@ static int davinci_mdiobb_read_c22(struct mii_bus *bus, int phy, int reg)
ret = mdiobb_read_c22(bus, phy, reg);
- pm_runtime_mark_last_busy(bus->parent);
pm_runtime_put_autosuspend(bus->parent);
return ret;
@@ -251,7 +250,6 @@ static int davinci_mdiobb_write_c22(struct mii_bus *bus, int phy, int reg,
ret = mdiobb_write_c22(bus, phy, reg, val);
- pm_runtime_mark_last_busy(bus->parent);
pm_runtime_put_autosuspend(bus->parent);
return ret;
@@ -268,7 +266,6 @@ static int davinci_mdiobb_read_c45(struct mii_bus *bus, int phy, int devad,
ret = mdiobb_read_c45(bus, phy, devad, reg);
- pm_runtime_mark_last_busy(bus->parent);
pm_runtime_put_autosuspend(bus->parent);
return ret;
@@ -285,7 +282,6 @@ static int davinci_mdiobb_write_c45(struct mii_bus *bus, int phy, int devad,
ret = mdiobb_write_c45(bus, phy, devad, reg, val);
- pm_runtime_mark_last_busy(bus->parent);
pm_runtime_put_autosuspend(bus->parent);
return ret;
@@ -332,7 +328,6 @@ static int davinci_mdio_common_reset(struct davinci_mdio_data *data)
data->bus->phy_mask = phy_mask;
done:
- pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
return 0;
@@ -441,7 +436,6 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
break;
}
- pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
return ret;
}
@@ -478,7 +472,6 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
break;
}
- pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
return ret;
@@ -548,8 +541,8 @@ static int davinci_mdio_probe(struct platform_device *pdev)
struct davinci_mdio_data *data;
struct resource *res;
struct phy_device *phy;
- int ret, addr;
int autosuspend_delay_ms = -1;
+ int ret;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -652,14 +645,10 @@ static int davinci_mdio_probe(struct platform_device *pdev)
goto bail_out;
/* scan and dump the bus */
- for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
- phy = mdiobus_get_phy(data->bus, addr);
- if (phy) {
- dev_info(dev, "phy[%d]: device %s, driver %s\n",
- phy->mdio.addr, phydev_name(phy),
- phy->drv ? phy->drv->name : "unknown");
- }
- }
+ mdiobus_for_each_phy(data->bus, phy)
+ dev_info(dev, "phy[%d]: device %s, driver %s\n",
+ phy->mdio.addr, phydev_name(phy),
+ phy->drv ? phy->drv->name : "unknown");
return 0;
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
index 5d6d1cf78e93..ec085897edf0 100644
--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
@@ -215,6 +215,9 @@ static void icss_iep_enable_shadow_mode(struct icss_iep *iep)
for (cmp = IEP_MIN_CMP; cmp < IEP_MAX_CMP; cmp++) {
regmap_update_bits(iep->map, ICSS_IEP_CMP_STAT_REG,
IEP_CMP_STATUS(cmp), IEP_CMP_STATUS(cmp));
+
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(cmp), 0);
}
/* enable reset counter on CMP0 event */
@@ -403,66 +406,79 @@ static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns)
static int icss_iep_perout_enable_hw(struct icss_iep *iep,
struct ptp_perout_request *req, int on)
{
+ struct timespec64 ts;
+ u64 ns_start;
+ u64 ns_width;
int ret;
u64 cmp;
+ if (!on) {
+ /* Disable CMP 1 */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(1), 0);
+
+ /* clear CMP regs */
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
+
+ /* Disable sync */
+ regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0);
+
+ return 0;
+ }
+
+ /* Calculate width of the signal for PPS/PEROUT handling */
+ ts.tv_sec = req->on.sec;
+ ts.tv_nsec = req->on.nsec;
+ ns_width = timespec64_to_ns(&ts);
+
+ if (req->flags & PTP_PEROUT_PHASE) {
+ ts.tv_sec = req->phase.sec;
+ ts.tv_nsec = req->phase.nsec;
+ ns_start = timespec64_to_ns(&ts);
+ } else {
+ ns_start = 0;
+ }
+
if (iep->ops && iep->ops->perout_enable) {
ret = iep->ops->perout_enable(iep->clockops_data, req, on, &cmp);
if (ret)
return ret;
- if (on) {
- /* Configure CMP */
- regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp));
- if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
- regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp));
- /* Configure SYNC, 1ms pulse width */
- regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, 1000000);
- regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
- regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, 0);
- regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */
- /* Enable CMP 1 */
- regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
- IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
- } else {
- /* Disable CMP 1 */
- regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
- IEP_CMP_CFG_CMP_EN(1), 0);
-
- /* clear regs */
- regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
- if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
- regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
- }
+ /* Configure CMP */
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp));
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp));
+ /* Configure SYNC, based on req on width */
+ regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG,
+ div_u64(ns_width, iep->def_inc));
+ regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
+ regmap_write(iep->map, ICSS_IEP_SYNC_START_REG,
+ div_u64(ns_start, iep->def_inc));
+ regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */
+ /* Enable CMP 1 */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
} else {
- if (on) {
- u64 start_ns;
-
- iep->period = ((u64)req->period.sec * NSEC_PER_SEC) +
- req->period.nsec;
- start_ns = ((u64)req->period.sec * NSEC_PER_SEC)
- + req->period.nsec;
- icss_iep_update_to_next_boundary(iep, start_ns);
-
- /* Enable Sync in single shot mode */
- regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG,
- IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN);
- /* Enable CMP 1 */
- regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
- IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
- } else {
- /* Disable CMP 1 */
- regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
- IEP_CMP_CFG_CMP_EN(1), 0);
-
- /* clear CMP regs */
- regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
- if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
- regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
-
- /* Disable sync */
- regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0);
- }
+ u64 start_ns;
+
+ iep->period = ((u64)req->period.sec * NSEC_PER_SEC) +
+ req->period.nsec;
+ start_ns = ((u64)req->period.sec * NSEC_PER_SEC)
+ + req->period.nsec;
+ icss_iep_update_to_next_boundary(iep, start_ns);
+
+ regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG,
+ div_u64(ns_width, iep->def_inc));
+ regmap_write(iep->map, ICSS_IEP_SYNC_START_REG,
+ div_u64(ns_start, iep->def_inc));
+ /* Enable Sync in single shot mode */
+ regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG,
+ IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN);
+ /* Enable CMP 1 */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
}
return 0;
@@ -473,6 +489,21 @@ static int icss_iep_perout_enable(struct icss_iep *iep,
{
int ret = 0;
+ if (!on)
+ goto disable;
+
+ /* Reject requests with unsupported flags */
+ if (req->flags & ~(PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE))
+ return -EOPNOTSUPP;
+
+ /* Set default "on" time (1ms) for the signal if not passed by the app */
+ if (!(req->flags & PTP_PEROUT_DUTY_CYCLE)) {
+ req->on.sec = 0;
+ req->on.nsec = NSEC_PER_MSEC;
+ }
+
+disable:
mutex_lock(&iep->ptp_clk_mutex);
if (iep->pps_enabled) {
@@ -565,10 +596,13 @@ static int icss_iep_pps_enable(struct icss_iep *iep, int on)
if (on) {
ns = icss_iep_gettime(iep, NULL);
ts = ns_to_timespec64(ns);
+ rq.perout.flags = 0;
rq.perout.period.sec = 1;
rq.perout.period.nsec = 0;
rq.perout.start.sec = ts.tv_sec + 2;
rq.perout.start.nsec = 0;
+ rq.perout.on.sec = 0;
+ rq.perout.on.nsec = NSEC_PER_MSEC;
ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
} else {
ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
@@ -587,7 +621,8 @@ exit:
static int icss_iep_extts_enable(struct icss_iep *iep, u32 index, int on)
{
- u32 val, cap, ret = 0;
+ u32 val, cap;
+ int ret = 0;
mutex_lock(&iep->ptp_clk_mutex);
@@ -651,11 +686,17 @@ struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx)
struct platform_device *pdev;
struct device_node *iep_np;
struct icss_iep *iep;
+ int ret;
iep_np = of_parse_phandle(np, "ti,iep", idx);
- if (!iep_np || !of_device_is_available(iep_np))
+ if (!iep_np)
return ERR_PTR(-ENODEV);
+ if (!of_device_is_available(iep_np)) {
+ of_node_put(iep_np);
+ return ERR_PTR(-ENODEV);
+ }
+
pdev = of_find_device_by_node(iep_np);
of_node_put(iep_np);
@@ -664,21 +705,28 @@ struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx)
return ERR_PTR(-EPROBE_DEFER);
iep = platform_get_drvdata(pdev);
- if (!iep)
- return ERR_PTR(-EPROBE_DEFER);
+ if (!iep) {
+ ret = -EPROBE_DEFER;
+ goto err_put_pdev;
+ }
device_lock(iep->dev);
if (iep->client_np) {
device_unlock(iep->dev);
dev_err(iep->dev, "IEP is already acquired by %s",
iep->client_np->name);
- return ERR_PTR(-EBUSY);
+ ret = -EBUSY;
+ goto err_put_pdev;
}
iep->client_np = np;
device_unlock(iep->dev);
- get_device(iep->dev);
return iep;
+
+err_put_pdev:
+ put_device(&pdev->dev);
+
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(icss_iep_get_idx);
@@ -780,6 +828,11 @@ int icss_iep_exit(struct icss_iep *iep)
}
icss_iep_disable(iep);
+ if (iep->pps_enabled)
+ icss_iep_pps_enable(iep, false);
+ else if (iep->perout_enabled)
+ icss_iep_perout_enable(iep, NULL, false);
+
return 0;
}
EXPORT_SYMBOL_GPL(icss_iep_exit);
@@ -929,11 +982,112 @@ static const struct icss_iep_plat_data am654_icss_iep_plat_data = {
.config = &am654_icss_iep_regmap_config,
};
+static const struct icss_iep_plat_data am57xx_icss_iep_plat_data = {
+ .flags = ICSS_IEP_64BIT_COUNTER_SUPPORT |
+ ICSS_IEP_SLOW_COMPEN_REG_SUPPORT,
+ .reg_offs = {
+ [ICSS_IEP_GLOBAL_CFG_REG] = 0x00,
+ [ICSS_IEP_COMPEN_REG] = 0x08,
+ [ICSS_IEP_SLOW_COMPEN_REG] = 0x0c,
+ [ICSS_IEP_COUNT_REG0] = 0x10,
+ [ICSS_IEP_COUNT_REG1] = 0x14,
+ [ICSS_IEP_CAPTURE_CFG_REG] = 0x18,
+ [ICSS_IEP_CAPTURE_STAT_REG] = 0x1c,
+
+ [ICSS_IEP_CAP6_RISE_REG0] = 0x50,
+ [ICSS_IEP_CAP6_RISE_REG1] = 0x54,
+
+ [ICSS_IEP_CAP7_RISE_REG0] = 0x60,
+ [ICSS_IEP_CAP7_RISE_REG1] = 0x64,
+
+ [ICSS_IEP_CMP_CFG_REG] = 0x70,
+ [ICSS_IEP_CMP_STAT_REG] = 0x74,
+ [ICSS_IEP_CMP0_REG0] = 0x78,
+ [ICSS_IEP_CMP0_REG1] = 0x7c,
+ [ICSS_IEP_CMP1_REG0] = 0x80,
+ [ICSS_IEP_CMP1_REG1] = 0x84,
+
+ [ICSS_IEP_CMP8_REG0] = 0xc0,
+ [ICSS_IEP_CMP8_REG1] = 0xc4,
+ [ICSS_IEP_SYNC_CTRL_REG] = 0x180,
+ [ICSS_IEP_SYNC0_STAT_REG] = 0x188,
+ [ICSS_IEP_SYNC1_STAT_REG] = 0x18c,
+ [ICSS_IEP_SYNC_PWIDTH_REG] = 0x190,
+ [ICSS_IEP_SYNC0_PERIOD_REG] = 0x194,
+ [ICSS_IEP_SYNC1_DELAY_REG] = 0x198,
+ [ICSS_IEP_SYNC_START_REG] = 0x19c,
+ },
+ .config = &am654_icss_iep_regmap_config,
+};
+
+static bool am335x_icss_iep_valid_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ICSS_IEP_GLOBAL_CFG_REG ... ICSS_IEP_CAPTURE_STAT_REG:
+ case ICSS_IEP_CAP6_RISE_REG0:
+ case ICSS_IEP_CMP_CFG_REG ... ICSS_IEP_CMP0_REG0:
+ case ICSS_IEP_CMP8_REG0 ... ICSS_IEP_SYNC_START_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config am335x_icss_iep_regmap_config = {
+ .name = "icss iep",
+ .reg_stride = 1,
+ .reg_write = icss_iep_regmap_write,
+ .reg_read = icss_iep_regmap_read,
+ .writeable_reg = am335x_icss_iep_valid_reg,
+ .readable_reg = am335x_icss_iep_valid_reg,
+};
+
+static const struct icss_iep_plat_data am335x_icss_iep_plat_data = {
+ .flags = 0,
+ .reg_offs = {
+ [ICSS_IEP_GLOBAL_CFG_REG] = 0x00,
+ [ICSS_IEP_COMPEN_REG] = 0x08,
+ [ICSS_IEP_COUNT_REG0] = 0x0c,
+ [ICSS_IEP_CAPTURE_CFG_REG] = 0x10,
+ [ICSS_IEP_CAPTURE_STAT_REG] = 0x14,
+
+ [ICSS_IEP_CAP6_RISE_REG0] = 0x30,
+
+ [ICSS_IEP_CAP7_RISE_REG0] = 0x38,
+
+ [ICSS_IEP_CMP_CFG_REG] = 0x40,
+ [ICSS_IEP_CMP_STAT_REG] = 0x44,
+ [ICSS_IEP_CMP0_REG0] = 0x48,
+
+ [ICSS_IEP_CMP8_REG0] = 0x88,
+ [ICSS_IEP_SYNC_CTRL_REG] = 0x100,
+ [ICSS_IEP_SYNC0_STAT_REG] = 0x108,
+ [ICSS_IEP_SYNC1_STAT_REG] = 0x10c,
+ [ICSS_IEP_SYNC_PWIDTH_REG] = 0x110,
+ [ICSS_IEP_SYNC0_PERIOD_REG] = 0x114,
+ [ICSS_IEP_SYNC1_DELAY_REG] = 0x118,
+ [ICSS_IEP_SYNC_START_REG] = 0x11c,
+ },
+ .config = &am335x_icss_iep_regmap_config,
+};
+
static const struct of_device_id icss_iep_of_match[] = {
{
.compatible = "ti,am654-icss-iep",
.data = &am654_icss_iep_plat_data,
},
+ {
+ .compatible = "ti,am5728-icss-iep",
+ .data = &am57xx_icss_iep_plat_data,
+ },
+ {
+ .compatible = "ti,am4376-icss-iep",
+ .data = &am335x_icss_iep_plat_data,
+ },
+ {
+ .compatible = "ti,am3356-icss-iep",
+ .data = &am335x_icss_iep_plat_data,
+ },
{},
};
MODULE_DEVICE_TABLE(of, icss_iep_of_match);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
index fdebeb2f84e0..090aa74d3ce7 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -45,6 +45,11 @@ void prueth_cleanup_rx_chns(struct prueth_emac *emac,
struct prueth_rx_chn *rx_chn,
int max_rflows)
{
+ if (rx_chn->pg_pool) {
+ page_pool_destroy(rx_chn->pg_pool);
+ rx_chn->pg_pool = NULL;
+ }
+
if (rx_chn->desc_pool)
k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
@@ -88,15 +93,91 @@ void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num)
}
EXPORT_SYMBOL_GPL(prueth_ndev_del_tx_napi);
+static int emac_xsk_xmit_zc(struct prueth_emac *emac,
+ unsigned int q_idx)
+{
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[q_idx];
+ struct xsk_buff_pool *pool = tx_chn->xsk_pool;
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *host_desc;
+ dma_addr_t dma_desc, dma_buf;
+ struct prueth_swdata *swdata;
+ struct xdp_desc xdp_desc;
+ int num_tx = 0, pkt_len;
+ int descs_avail, ret;
+ u32 *epib;
+ int i;
+
+ descs_avail = k3_cppi_desc_pool_avail(tx_chn->desc_pool);
+ /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS
+ * will be available for normal TX path and queue is stopped there if
+ * necessary
+ */
+ if (descs_avail <= MAX_SKB_FRAGS)
+ return 0;
+
+ descs_avail -= MAX_SKB_FRAGS;
+
+ for (i = 0; i < descs_avail; i++) {
+ if (!xsk_tx_peek_desc(pool, &xdp_desc))
+ break;
+
+ dma_buf = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
+ pkt_len = xdp_desc.len;
+ xsk_buff_raw_dma_sync_for_device(pool, dma_buf, pkt_len);
+
+ host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (unlikely(!host_desc))
+ break;
+
+ cppi5_hdesc_init(host_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ cppi5_hdesc_set_pkttype(host_desc, 0);
+ epib = host_desc->epib;
+ epib[0] = 0;
+ epib[1] = 0;
+ cppi5_hdesc_set_pktlen(host_desc, pkt_len);
+ cppi5_desc_set_tags_ids(&host_desc->hdr, 0,
+ (emac->port_id | (q_idx << 8)));
+
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &dma_buf);
+ cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf,
+ pkt_len);
+
+ swdata = cppi5_hdesc_get_swdata(host_desc);
+ swdata->type = PRUETH_SWDATA_XSK;
+
+ dma_desc = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
+ host_desc);
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn,
+ host_desc, dma_desc);
+
+ if (ret) {
+ ndev->stats.tx_errors++;
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, host_desc);
+ break;
+ }
+
+ num_tx++;
+ }
+
+ xsk_tx_release(tx_chn->xsk_pool);
+ return num_tx;
+}
+
void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
struct cppi5_host_desc_t *desc)
{
struct cppi5_host_desc_t *first_desc, *next_desc;
dma_addr_t buf_dma, next_desc_dma;
+ struct prueth_swdata *swdata;
u32 buf_dma_len;
first_desc = desc;
next_desc = first_desc;
+ swdata = cppi5_hdesc_get_swdata(first_desc);
+ if (swdata->type == PRUETH_SWDATA_XSK)
+ goto free_pool;
cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
@@ -121,6 +202,7 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
}
+free_pool:
k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
}
EXPORT_SYMBOL_GPL(prueth_xmit_free);
@@ -131,12 +213,15 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
struct net_device *ndev = emac->ndev;
struct cppi5_host_desc_t *desc_tx;
struct netdev_queue *netif_txq;
+ struct prueth_swdata *swdata;
struct prueth_tx_chn *tx_chn;
unsigned int total_bytes = 0;
+ int xsk_frames_done = 0;
+ struct xdp_frame *xdpf;
+ unsigned int pkt_len;
struct sk_buff *skb;
dma_addr_t desc_dma;
int res, num_tx = 0;
- void **swdata;
tx_chn = &emac->tx_chns[chn];
@@ -157,20 +242,31 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
- /* was this command's TX complete? */
- if (emac->is_sr1 && *(swdata) == emac->cmd_data) {
+ switch (swdata->type) {
+ case PRUETH_SWDATA_SKB:
+ skb = swdata->data.skb;
+ dev_sw_netstats_tx_add(skb->dev, 1, skb->len);
+ total_bytes += skb->len;
+ napi_consume_skb(skb, budget);
+ break;
+ case PRUETH_SWDATA_XDPF:
+ xdpf = swdata->data.xdpf;
+ dev_sw_netstats_tx_add(ndev, 1, xdpf->len);
+ total_bytes += xdpf->len;
+ xdp_return_frame(xdpf);
+ break;
+ case PRUETH_SWDATA_XSK:
+ pkt_len = cppi5_hdesc_get_pktlen(desc_tx);
+ dev_sw_netstats_tx_add(ndev, 1, pkt_len);
+ xsk_frames_done++;
+ break;
+ default:
prueth_xmit_free(tx_chn, desc_tx);
+ ndev->stats.tx_dropped++;
continue;
}
- skb = *(swdata);
prueth_xmit_free(tx_chn, desc_tx);
-
- ndev = skb->dev;
- ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
- total_bytes += skb->len;
- napi_consume_skb(skb, budget);
num_tx++;
}
@@ -192,6 +288,18 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
__netif_tx_unlock(netif_txq);
}
+ if (tx_chn->xsk_pool) {
+ if (xsk_frames_done)
+ xsk_tx_completed(tx_chn->xsk_pool, xsk_frames_done);
+
+ if (xsk_uses_need_wakeup(tx_chn->xsk_pool))
+ xsk_set_tx_need_wakeup(tx_chn->xsk_pool);
+
+ netif_txq = netdev_get_tx_queue(ndev, chn);
+ txq_trans_cond_update(netif_txq);
+ emac_xsk_xmit_zc(emac, chn);
+ }
+
return num_tx;
}
@@ -200,7 +308,10 @@ static enum hrtimer_restart emac_tx_timer_callback(struct hrtimer *timer)
struct prueth_tx_chn *tx_chns =
container_of(timer, struct prueth_tx_chn, tx_hrtimer);
- enable_irq(tx_chns->irq);
+ if (tx_chns->irq_disabled) {
+ tx_chns->irq_disabled = false;
+ enable_irq(tx_chns->irq);
+ }
return HRTIMER_NORESTART;
}
@@ -223,7 +334,10 @@ static int emac_napi_tx_poll(struct napi_struct *napi_tx, int budget)
ns_to_ktime(tx_chn->tx_pace_timeout_ns),
HRTIMER_MODE_REL_PINNED);
} else {
- enable_irq(tx_chn->irq);
+ if (tx_chn->irq_disabled) {
+ tx_chn->irq_disabled = false;
+ enable_irq(tx_chn->irq);
+ }
}
}
@@ -234,6 +348,7 @@ static irqreturn_t prueth_tx_irq(int irq, void *dev_id)
{
struct prueth_tx_chn *tx_chn = dev_id;
+ tx_chn->irq_disabled = true;
disable_irq_nosync(irq);
napi_schedule(&tx_chn->napi_tx);
@@ -249,9 +364,8 @@ int prueth_ndev_add_tx_napi(struct prueth_emac *emac)
struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
netif_napi_add_tx(emac->ndev, &tx_chn->napi_tx, emac_napi_tx_poll);
- hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- tx_chn->tx_hrtimer.function = &emac_tx_timer_callback;
+ hrtimer_setup(&tx_chn->tx_hrtimer, &emac_tx_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
ret = request_irq(tx_chn->irq, prueth_tx_irq,
IRQF_TRIGGER_HIGH, tx_chn->name,
tx_chn);
@@ -351,6 +465,29 @@ fail:
}
EXPORT_SYMBOL_GPL(prueth_init_tx_chns);
+static struct page_pool *prueth_create_page_pool(struct prueth_emac *emac,
+ struct device *dma_dev,
+ int size)
+{
+ struct page_pool_params pp_params = { 0 };
+ struct page_pool *pool;
+
+ pp_params.order = 0;
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.pool_size = size;
+ pp_params.nid = dev_to_node(emac->prueth->dev);
+ pp_params.dma_dir = DMA_BIDIRECTIONAL;
+ pp_params.dev = dma_dev;
+ pp_params.napi = &emac->napi_rx;
+ pp_params.max_len = PAGE_SIZE;
+
+ pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool))
+ netdev_err(emac->ndev, "cannot create rx page pool\n");
+
+ return pool;
+}
+
int prueth_init_rx_chns(struct prueth_emac *emac,
struct prueth_rx_chn *rx_chn,
char *name, u32 max_rflows,
@@ -360,6 +497,7 @@ int prueth_init_rx_chns(struct prueth_emac *emac,
struct device *dev = emac->prueth->dev;
struct net_device *ndev = emac->ndev;
u32 fdqring_id, hdesc_size;
+ struct page_pool *pool;
int i, ret = 0, slice;
int flow_id_base;
@@ -402,6 +540,14 @@ int prueth_init_rx_chns(struct prueth_emac *emac,
goto fail;
}
+ pool = prueth_create_page_pool(emac, rx_chn->dma_dev, rx_chn->descs_num);
+ if (IS_ERR(pool)) {
+ ret = PTR_ERR(pool);
+ goto fail;
+ }
+
+ rx_chn->pg_pool = pool;
+
flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
if (emac->is_sr1 && !strcmp(name, "rxmgm")) {
emac->rx_mgm_flow_id_base = flow_id_base;
@@ -461,17 +607,17 @@ fail:
}
EXPORT_SYMBOL_GPL(prueth_init_rx_chns);
-int prueth_dma_rx_push(struct prueth_emac *emac,
- struct sk_buff *skb,
- struct prueth_rx_chn *rx_chn)
+int prueth_dma_rx_push_mapped(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ struct page *page, u32 buf_len)
{
struct net_device *ndev = emac->ndev;
struct cppi5_host_desc_t *desc_rx;
- u32 pkt_len = skb_tailroom(skb);
+ struct prueth_swdata *swdata;
dma_addr_t desc_dma;
dma_addr_t buf_dma;
- void **swdata;
+ buf_dma = page_pool_get_dma_addr(page) + PRUETH_HEADROOM;
desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
if (!desc_rx) {
netdev_err(ndev, "rx push: failed to allocate descriptor\n");
@@ -479,25 +625,19 @@ int prueth_dma_rx_push(struct prueth_emac *emac,
}
desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
- buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
- k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- netdev_err(ndev, "rx push: failed to map rx pkt buffer\n");
- return -EINVAL;
- }
-
cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
PRUETH_NAV_PS_DATA_SIZE);
k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
- cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
+ cppi5_hdesc_attach_buf(desc_rx, buf_dma, buf_len, buf_dma, buf_len);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- *swdata = skb;
+ swdata->type = PRUETH_SWDATA_PAGE;
+ swdata->data.page = page;
- return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0,
+ return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, PRUETH_RX_FLOW_DATA,
desc_rx, desc_dma);
}
-EXPORT_SYMBOL_GPL(prueth_dma_rx_push);
+EXPORT_SYMBOL_GPL(prueth_dma_rx_push_mapped);
u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns)
{
@@ -535,18 +675,360 @@ void emac_rx_timestamp(struct prueth_emac *emac,
ssh->hwtstamp = ns_to_ktime(ns);
}
-static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
+/**
+ * emac_xmit_xdp_frame - transmits an XDP frame
+ * @emac: emac device
+ * @xdpf: data to transmit
+ * @q_idx: queue id
+ * @buff_type: Type of buffer to be transmitted
+ *
+ * Return: XDP state
+ */
+u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
+ struct xdp_frame *xdpf,
+ unsigned int q_idx,
+ enum prueth_tx_buff_type buff_type)
+{
+ struct cppi5_host_desc_t *first_desc;
+ struct net_device *ndev = emac->ndev;
+ struct netdev_queue *netif_txq;
+ struct prueth_tx_chn *tx_chn;
+ dma_addr_t desc_dma, buf_dma;
+ struct prueth_swdata *swdata;
+ struct page *page;
+ u32 *epib;
+ int ret;
+
+ if (q_idx >= PRUETH_MAX_TX_QUEUES) {
+ netdev_err(ndev, "xdp tx: invalid q_id %d\n", q_idx);
+ return ICSSG_XDP_CONSUMED; /* drop */
+ }
+
+ tx_chn = &emac->tx_chns[q_idx];
+
+ first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (!first_desc) {
+ netdev_dbg(ndev, "xdp tx: failed to allocate descriptor\n");
+ return ICSSG_XDP_CONSUMED; /* drop */
+ }
+
+ if (buff_type == PRUETH_TX_BUFF_TYPE_XDP_TX) { /* already DMA mapped by page_pool */
+ page = virt_to_head_page(xdpf->data);
+ if (unlikely(!page)) {
+ netdev_err(ndev, "xdp tx: failed to get page from xdpf\n");
+ goto drop_free_descs;
+ }
+ buf_dma = page_pool_get_dma_addr(page);
+ buf_dma += xdpf->headroom + sizeof(struct xdp_frame);
+ } else { /* Map the linear buffer */
+ buf_dma = dma_map_single(tx_chn->dma_dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
+ netdev_err(ndev, "xdp tx: failed to map data buffer\n");
+ goto drop_free_descs; /* drop */
+ }
+ }
+
+ cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ cppi5_hdesc_set_pkttype(first_desc, 0);
+ epib = first_desc->epib;
+ epib[0] = 0;
+ epib[1] = 0;
+
+ /* set dst tag to indicate internal qid at the firmware which is at
+ * bit8..bit15. bit0..bit7 indicates port num for directed
+ * packets in case of switch mode operation
+ */
+ cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8)));
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
+ cppi5_hdesc_attach_buf(first_desc, buf_dma, xdpf->len, buf_dma, xdpf->len);
+ swdata = cppi5_hdesc_get_swdata(first_desc);
+ swdata->type = PRUETH_SWDATA_XDPF;
+ swdata->data.xdpf = xdpf;
+
+ /* Report BQL before sending the packet */
+ netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
+ netdev_tx_sent_queue(netif_txq, xdpf->len);
+
+ cppi5_hdesc_set_pktlen(first_desc, xdpf->len);
+ desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
+
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ if (ret) {
+ netdev_err(ndev, "xdp tx: push failed: %d\n", ret);
+ netdev_tx_completed_queue(netif_txq, 1, xdpf->len);
+ goto drop_free_descs;
+ }
+
+ return ICSSG_XDP_TX;
+
+drop_free_descs:
+ prueth_xmit_free(tx_chn, first_desc);
+ return ICSSG_XDP_CONSUMED;
+}
+EXPORT_SYMBOL_GPL(emac_xmit_xdp_frame);
+
+/**
+ * emac_run_xdp - run an XDP program
+ * @emac: emac device
+ * @xdp: XDP buffer containing the frame
+ * @len: Rx descriptor packet length
+ *
+ * Return: XDP state
+ */
+static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp, u32 *len)
+{
+ struct net_device *ndev = emac->ndev;
+ struct netdev_queue *netif_txq;
+ int cpu = smp_processor_id();
+ struct bpf_prog *xdp_prog;
+ struct xdp_frame *xdpf;
+ u32 pkt_len = *len;
+ u32 act, result;
+ int q_idx, err;
+
+ xdp_prog = READ_ONCE(emac->xdp_prog);
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ return ICSSG_XDP_PASS;
+ case XDP_TX:
+ /* Send packet to TX ring for immediate transmission */
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf)) {
+ ndev->stats.tx_dropped++;
+ goto drop;
+ }
+
+ q_idx = cpu % emac->tx_ch_num;
+ netif_txq = netdev_get_tx_queue(ndev, q_idx);
+ __netif_tx_lock(netif_txq, cpu);
+ result = emac_xmit_xdp_frame(emac, xdpf, q_idx,
+ PRUETH_TX_BUFF_TYPE_XDP_TX);
+ __netif_tx_unlock(netif_txq);
+ if (result == ICSSG_XDP_CONSUMED) {
+ ndev->stats.tx_dropped++;
+ goto drop;
+ }
+
+ dev_sw_netstats_rx_add(ndev, xdpf->len);
+ return result;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(emac->ndev, xdp, xdp_prog);
+ if (err)
+ goto drop;
+
+ dev_sw_netstats_rx_add(ndev, pkt_len);
+ return ICSSG_XDP_REDIR;
+ default:
+ bpf_warn_invalid_xdp_action(emac->ndev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+drop:
+ trace_xdp_exception(emac->ndev, xdp_prog, act);
+ fallthrough; /* handle aborts by dropping packet */
+ case XDP_DROP:
+ ndev->stats.rx_dropped++;
+ return ICSSG_XDP_CONSUMED;
+ }
+}
+
+static int prueth_dma_rx_push_mapped_zc(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ struct xdp_buff *xdp)
+{
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *desc_rx;
+ struct prueth_swdata *swdata;
+ dma_addr_t desc_dma;
+ dma_addr_t buf_dma;
+ int buf_len;
+
+ buf_dma = xsk_buff_xdp_get_dma(xdp);
+ desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
+ if (!desc_rx) {
+ netdev_err(ndev, "rx push: failed to allocate descriptor\n");
+ return -ENOMEM;
+ }
+ desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
+
+ cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
+ buf_len = xsk_pool_get_rx_frame_size(rx_chn->xsk_pool);
+ cppi5_hdesc_attach_buf(desc_rx, buf_dma, buf_len, buf_dma, buf_len);
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ swdata->type = PRUETH_SWDATA_XSK;
+ swdata->data.xdp = xdp;
+
+ return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, PRUETH_RX_FLOW_DATA,
+ desc_rx, desc_dma);
+}
+
+static int prueth_rx_alloc_zc(struct prueth_emac *emac, int budget)
+{
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+ struct xdp_buff *xdp;
+ int i, ret;
+
+ for (i = 0; i < budget; i++) {
+ xdp = xsk_buff_alloc(rx_chn->xsk_pool);
+ if (!xdp)
+ break;
+
+ ret = prueth_dma_rx_push_mapped_zc(emac, rx_chn, xdp);
+ if (ret) {
+ netdev_err(emac->ndev, "rx alloc: failed to map descriptors to xdp buff\n");
+ xsk_buff_free(xdp);
+ break;
+ }
+ }
+
+ return i;
+}
+
+static void emac_dispatch_skb_zc(struct prueth_emac *emac, struct xdp_buff *xdp, u32 *psdata)
+{
+ unsigned int headroom = xdp->data - xdp->data_hard_start;
+ unsigned int pkt_len = xdp->data_end - xdp->data;
+ struct net_device *ndev = emac->ndev;
+ struct sk_buff *skb;
+
+ skb = napi_alloc_skb(&emac->napi_rx, xdp->data_end - xdp->data_hard_start);
+ if (unlikely(!skb)) {
+ ndev->stats.rx_dropped++;
+ return;
+ }
+
+ skb_reserve(skb, headroom);
+ skb_put(skb, pkt_len);
+ skb->dev = ndev;
+
+ /* RX HW timestamp */
+ if (emac->rx_ts_enabled)
+ emac_rx_timestamp(emac, skb, psdata);
+
+ if (emac->prueth->is_switch_mode)
+ skb->offload_fwd_mark = emac->offload_fwd_mark;
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ skb_mark_for_recycle(skb);
+ napi_gro_receive(&emac->napi_rx, skb);
+ ndev->stats.rx_bytes += pkt_len;
+ ndev->stats.rx_packets++;
+}
+
+static int emac_rx_packet_zc(struct prueth_emac *emac, u32 flow_id,
+ int budget)
{
struct prueth_rx_chn *rx_chn = &emac->rx_chns;
u32 buf_dma_len, pkt_len, port_id = 0;
struct net_device *ndev = emac->ndev;
struct cppi5_host_desc_t *desc_rx;
- struct sk_buff *skb, *new_skb;
+ struct prueth_swdata *swdata;
dma_addr_t desc_dma, buf_dma;
- void **swdata;
+ struct xdp_buff *xdp;
+ int xdp_status = 0;
+ int count = 0;
u32 *psdata;
int ret;
+ while (count < budget) {
+ ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
+ if (ret) {
+ if (ret != -ENODATA)
+ netdev_err(ndev, "rx pop: failed: %d\n", ret);
+ break;
+ }
+
+ if (cppi5_desc_is_tdcm(desc_dma)) {
+ complete(&emac->tdown_complete);
+ break;
+ }
+
+ desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ if (swdata->type != PRUETH_SWDATA_XSK) {
+ netdev_err(ndev, "rx_pkt: invalid swdata->type %d\n", swdata->type);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+ break;
+ }
+
+ xdp = swdata->data.xdp;
+ cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
+ pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
+ /* firmware adds 4 CRC bytes, strip them */
+ pkt_len -= 4;
+ cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
+ psdata = cppi5_hdesc_get_psdata(desc_rx);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+ count++;
+ xsk_buff_set_size(xdp, pkt_len);
+ xsk_buff_dma_sync_for_cpu(xdp);
+
+ if (prueth_xdp_is_enabled(emac)) {
+ ret = emac_run_xdp(emac, xdp, &pkt_len);
+ switch (ret) {
+ case ICSSG_XDP_PASS:
+ /* prepare skb and send to n/w stack */
+ emac_dispatch_skb_zc(emac, xdp, psdata);
+ xsk_buff_free(xdp);
+ break;
+ case ICSSG_XDP_CONSUMED:
+ xsk_buff_free(xdp);
+ break;
+ case ICSSG_XDP_TX:
+ case ICSSG_XDP_REDIR:
+ xdp_status |= ret;
+ break;
+ }
+ } else {
+ /* prepare skb and send to n/w stack */
+ emac_dispatch_skb_zc(emac, xdp, psdata);
+ xsk_buff_free(xdp);
+ }
+ }
+
+ if (xdp_status & ICSSG_XDP_REDIR)
+ xdp_do_flush();
+
+ /* Allocate xsk buffers from the pool for the "count" number of
+ * packets processed in order to be able to receive more packets.
+ */
+ ret = prueth_rx_alloc_zc(emac, count);
+
+ if (xsk_uses_need_wakeup(rx_chn->xsk_pool)) {
+ /* If the user space doesn't provide enough buffers then it must
+ * explicitly wake up the kernel when new buffers are available
+ */
+ if (ret < count)
+ xsk_set_rx_need_wakeup(rx_chn->xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(rx_chn->xsk_pool);
+ }
+
+ return count;
+}
+
+static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state)
+{
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+ u32 buf_dma_len, pkt_len, port_id = 0;
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *desc_rx;
+ struct prueth_swdata *swdata;
+ dma_addr_t desc_dma, buf_dma;
+ struct page *page, *new_page;
+ struct page_pool *pool;
+ struct sk_buff *skb;
+ struct xdp_buff xdp;
+ int headroom, ret;
+ u32 *psdata;
+ void *pa;
+
+ *xdp_state = 0;
+ pool = rx_chn->pg_pool;
ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
if (ret) {
if (ret != -ENODATA)
@@ -554,52 +1036,85 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
return ret;
}
- if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown ? */
+ if (cppi5_desc_is_tdcm(desc_dma)) {
+ complete(&emac->tdown_complete);
return 0;
+ }
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
-
swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
-
- psdata = cppi5_hdesc_get_psdata(desc_rx);
- /* RX HW timestamp */
- if (emac->rx_ts_enabled)
- emac_rx_timestamp(emac, skb, psdata);
+ if (swdata->type != PRUETH_SWDATA_PAGE) {
+ netdev_err(ndev, "rx_pkt: invalid swdata->type %d\n", swdata->type);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+ return 0;
+ }
+ page = swdata->data.page;
+ page_pool_dma_sync_for_cpu(pool, page, 0, PAGE_SIZE);
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
/* firmware adds 4 CRC bytes, strip them */
pkt_len -= 4;
cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
-
- dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- skb->dev = ndev;
- new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE);
/* if allocation fails we drop the packet but push the
- * descriptor back to the ring with old skb to prevent a stall
+ * descriptor back to the ring with old page to prevent a stall
*/
- if (!new_skb) {
+ new_page = page_pool_dev_alloc_pages(pool);
+ if (unlikely(!new_page)) {
+ new_page = page;
ndev->stats.rx_dropped++;
- new_skb = skb;
+ goto requeue;
+ }
+
+ pa = page_address(page);
+ if (prueth_xdp_is_enabled(emac)) {
+ xdp_init_buff(&xdp, PAGE_SIZE, &rx_chn->xdp_rxq);
+ xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false);
+
+ *xdp_state = emac_run_xdp(emac, &xdp, &pkt_len);
+ if (*xdp_state != ICSSG_XDP_PASS)
+ goto requeue;
+ headroom = xdp.data - xdp.data_hard_start;
+ pkt_len = xdp.data_end - xdp.data;
} else {
- /* send the filled skb up the n/w stack */
- skb_put(skb, pkt_len);
- if (emac->prueth->is_switch_mode)
- skb->offload_fwd_mark = emac->offload_fwd_mark;
- skb->protocol = eth_type_trans(skb, ndev);
- napi_gro_receive(&emac->napi_rx, skb);
- ndev->stats.rx_bytes += pkt_len;
- ndev->stats.rx_packets++;
+ headroom = PRUETH_HEADROOM;
+ }
+
+ /* prepare skb and send to n/w stack */
+ skb = napi_build_skb(pa, PAGE_SIZE);
+ if (!skb) {
+ ndev->stats.rx_dropped++;
+ page_pool_recycle_direct(pool, page);
+ goto requeue;
}
+ skb_reserve(skb, headroom);
+ skb_put(skb, pkt_len);
+ skb->dev = ndev;
+
+ psdata = cppi5_hdesc_get_psdata(desc_rx);
+ /* RX HW timestamp */
+ if (emac->rx_ts_enabled)
+ emac_rx_timestamp(emac, skb, psdata);
+
+ if (emac->prueth->is_switch_mode)
+ skb->offload_fwd_mark = emac->offload_fwd_mark;
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ skb_mark_for_recycle(skb);
+ napi_gro_receive(&emac->napi_rx, skb);
+ ndev->stats.rx_bytes += pkt_len;
+ ndev->stats.rx_packets++;
+
+requeue:
/* queue another RX DMA */
- ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_chns);
+ ret = prueth_dma_rx_push_mapped(emac, &emac->rx_chns, new_page,
+ PRUETH_MAX_PKT_SIZE);
if (WARN_ON(ret < 0)) {
- dev_kfree_skb_any(new_skb);
+ page_pool_recycle_direct(pool, new_page);
ndev->stats.rx_errors++;
ndev->stats.rx_dropped++;
}
@@ -607,27 +1122,29 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
return ret;
}
-static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
+void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
{
struct prueth_rx_chn *rx_chn = data;
struct cppi5_host_desc_t *desc_rx;
- struct sk_buff *skb;
- dma_addr_t buf_dma;
- u32 buf_dma_len;
- void **swdata;
+ struct prueth_swdata *swdata;
+ struct page_pool *pool;
+ struct xdp_buff *xdp;
+ struct page *page;
+ pool = rx_chn->pg_pool;
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
- cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
- k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
+ if (rx_chn->xsk_pool) {
+ xdp = swdata->data.xdp;
+ xsk_buff_free(xdp);
+ } else {
+ page = swdata->data.page;
+ page_pool_recycle_direct(pool, page);
+ }
- dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len,
- DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
-
- dev_kfree_skb_any(skb);
}
+EXPORT_SYMBOL_GPL(prueth_rx_cleanup);
static int prueth_tx_ts_cookie_get(struct prueth_emac *emac)
{
@@ -662,13 +1179,13 @@ enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev
struct prueth_emac *emac = netdev_priv(ndev);
struct prueth *prueth = emac->prueth;
struct netdev_queue *netif_txq;
+ struct prueth_swdata *swdata;
struct prueth_tx_chn *tx_chn;
dma_addr_t desc_dma, buf_dma;
u32 pkt_len, dst_tag_id;
int i, ret = 0, q_idx;
bool in_tx_ts = 0;
int tx_ts_cookie;
- void **swdata;
u32 *epib;
pkt_len = skb_headlen(skb);
@@ -730,7 +1247,8 @@ enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev
k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
swdata = cppi5_hdesc_get_swdata(first_desc);
- *swdata = skb;
+ swdata->type = PRUETH_SWDATA_SKB;
+ swdata->data.skb = skb;
/* Handle the case where skb is fragmented in pages */
cur_desc = first_desc;
@@ -780,6 +1298,7 @@ enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev
ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
if (ret) {
netdev_err(ndev, "tx: push failed: %d\n", ret);
+ netdev_tx_completed_queue(netif_txq, 1, pkt_len);
goto drop_free_descs;
}
@@ -829,25 +1348,44 @@ drop_stop_q_busy:
}
EXPORT_SYMBOL_GPL(icssg_ndo_start_xmit);
-static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
+void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
{
struct prueth_tx_chn *tx_chn = data;
struct cppi5_host_desc_t *desc_tx;
+ struct xsk_buff_pool *xsk_pool;
+ struct prueth_swdata *swdata;
+ struct xdp_frame *xdpf;
struct sk_buff *skb;
- void **swdata;
desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
- skb = *(swdata);
- prueth_xmit_free(tx_chn, desc_tx);
- dev_kfree_skb_any(skb);
+ switch (swdata->type) {
+ case PRUETH_SWDATA_SKB:
+ skb = swdata->data.skb;
+ dev_kfree_skb_any(skb);
+ break;
+ case PRUETH_SWDATA_XDPF:
+ xdpf = swdata->data.xdpf;
+ xdp_return_frame(xdpf);
+ break;
+ case PRUETH_SWDATA_XSK:
+ xsk_pool = tx_chn->xsk_pool;
+ xsk_tx_completed(xsk_pool, 1);
+ break;
+ default:
+ break;
+ }
+
+ prueth_xmit_free(tx_chn, desc_tx);
}
+EXPORT_SYMBOL_GPL(prueth_tx_cleanup);
irqreturn_t prueth_rx_irq(int irq, void *dev_id)
{
struct prueth_emac *emac = dev_id;
+ emac->rx_chns.irq_disabled = true;
disable_irq_nosync(irq);
napi_schedule(&emac->napi_rx);
@@ -855,31 +1393,6 @@ irqreturn_t prueth_rx_irq(int irq, void *dev_id)
}
EXPORT_SYMBOL_GPL(prueth_rx_irq);
-void prueth_emac_stop(struct prueth_emac *emac)
-{
- struct prueth *prueth = emac->prueth;
- int slice;
-
- switch (emac->port_id) {
- case PRUETH_PORT_MII0:
- slice = ICSS_SLICE0;
- break;
- case PRUETH_PORT_MII1:
- slice = ICSS_SLICE1;
- break;
- default:
- netdev_err(emac->ndev, "invalid port\n");
- return;
- }
-
- emac->fw_running = 0;
- if (!emac->is_sr1)
- rproc_shutdown(prueth->txpru[slice]);
- rproc_shutdown(prueth->rtu[slice]);
- rproc_shutdown(prueth->pru[slice]);
-}
-EXPORT_SYMBOL_GPL(prueth_emac_stop);
-
void prueth_cleanup_tx_ts(struct prueth_emac *emac)
{
int i;
@@ -900,31 +1413,46 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
PRUETH_RX_FLOW_DATA_SR1 : PRUETH_RX_FLOW_DATA;
int flow = emac->is_sr1 ?
PRUETH_MAX_RX_FLOWS_SR1 : PRUETH_MAX_RX_FLOWS;
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+ int xdp_state_or = 0;
int num_rx = 0;
int cur_budget;
+ u32 xdp_state;
int ret;
while (flow--) {
- cur_budget = budget - num_rx;
-
- while (cur_budget--) {
- ret = emac_rx_packet(emac, flow);
- if (ret)
- break;
- num_rx++;
+ if (rx_chn->xsk_pool) {
+ num_rx = emac_rx_packet_zc(emac, flow, budget);
+ } else {
+ cur_budget = budget - num_rx;
+
+ while (cur_budget--) {
+ ret = emac_rx_packet(emac, flow, &xdp_state);
+ xdp_state_or |= xdp_state;
+ if (ret)
+ break;
+ num_rx++;
+ }
}
if (num_rx >= budget)
break;
}
+ if (xdp_state_or & ICSSG_XDP_REDIR)
+ xdp_do_flush();
+
if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
if (unlikely(emac->rx_pace_timeout_ns)) {
hrtimer_start(&emac->rx_hrtimer,
ns_to_ktime(emac->rx_pace_timeout_ns),
HRTIMER_MODE_REL_PINNED);
} else {
- enable_irq(emac->rx_chns.irq[rx_flow]);
+ if (emac->rx_chns.irq_disabled) {
+ /* re-enable the RX IRQ */
+ emac->rx_chns.irq_disabled = false;
+ enable_irq(emac->rx_chns.irq[rx_flow]);
+ }
}
}
@@ -936,25 +1464,53 @@ int prueth_prepare_rx_chan(struct prueth_emac *emac,
struct prueth_rx_chn *chn,
int buf_size)
{
- struct sk_buff *skb;
+ struct page *page;
+ int desc_avail;
int i, ret;
- for (i = 0; i < chn->descs_num; i++) {
- skb = __netdev_alloc_skb_ip_align(NULL, buf_size, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
+ desc_avail = k3_cppi_desc_pool_avail(chn->desc_pool);
+ if (desc_avail < chn->descs_num)
+ netdev_warn(emac->ndev,
+ "not enough RX descriptors available %d < %d\n",
+ desc_avail, chn->descs_num);
- ret = prueth_dma_rx_push(emac, skb, chn);
- if (ret < 0) {
- netdev_err(emac->ndev,
- "cannot submit skb for rx chan %s ret %d\n",
- chn->name, ret);
- kfree_skb(skb);
- return ret;
+ if (chn->xsk_pool) {
+ /* get pages from xsk_pool and push to RX ring
+ * queue as much as possible
+ */
+ ret = prueth_rx_alloc_zc(emac, desc_avail);
+ if (!ret)
+ goto recycle_alloc_pg;
+ } else {
+ for (i = 0; i < desc_avail; i++) {
+ /* NOTE: we're not using memory efficiently here.
+ * 1 full page (4KB?) used here instead of
+ * PRUETH_MAX_PKT_SIZE (~1.5KB?)
+ */
+ page = page_pool_dev_alloc_pages(chn->pg_pool);
+ if (!page) {
+ netdev_err(emac->ndev, "couldn't allocate rx page\n");
+ ret = -ENOMEM;
+ goto recycle_alloc_pg;
+ }
+
+ ret = prueth_dma_rx_push_mapped(emac, chn, page, buf_size);
+ if (ret < 0) {
+ netdev_err(emac->ndev,
+ "cannot submit page for rx chan %s ret %d\n",
+ chn->name, ret);
+ page_pool_recycle_direct(chn->pg_pool, page);
+ goto recycle_alloc_pg;
+ }
}
}
return 0;
+
+recycle_alloc_pg:
+ prueth_reset_rx_chan(&emac->rx_chns, PRUETH_MAX_RX_FLOWS, false);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(prueth_prepare_rx_chan);
@@ -980,7 +1536,7 @@ void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
for (i = 0; i < num_flows; i++)
k3_udma_glue_reset_rx_chn(chn->rx_chn, i, chn,
- prueth_rx_cleanup, !!i);
+ prueth_rx_cleanup);
if (disable)
k3_udma_glue_disable_rx_chn(chn->rx_chn);
}
@@ -992,15 +1548,13 @@ void icssg_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
}
EXPORT_SYMBOL_GPL(icssg_ndo_tx_timeout);
-static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr)
+int icssg_ndo_set_ts_config(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct prueth_emac *emac = netdev_priv(ndev);
- struct hwtstamp_config config;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
emac->tx_ts_enabled = 0;
break;
@@ -1011,7 +1565,7 @@ static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr)
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
emac->rx_ts_enabled = 0;
break;
@@ -1031,43 +1585,28 @@ static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
emac->rx_ts_enabled = 1;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
+EXPORT_SYMBOL_GPL(icssg_ndo_set_ts_config);
-static int emac_get_ts_config(struct net_device *ndev, struct ifreq *ifr)
+int icssg_ndo_get_ts_config(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config)
{
struct prueth_emac *emac = netdev_priv(ndev);
- struct hwtstamp_config config;
-
- config.flags = 0;
- config.tx_type = emac->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- config.rx_filter = emac->rx_ts_enabled ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
-}
+ config->flags = 0;
+ config->tx_type = emac->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config->rx_filter = emac->rx_ts_enabled ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
-int icssg_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- case SIOCGHWTSTAMP:
- return emac_get_ts_config(ndev, ifr);
- case SIOCSHWTSTAMP:
- return emac_set_ts_config(ndev, ifr);
- default:
- break;
- }
-
- return phy_do_ioctl(ndev, ifr, cmd);
+ return 0;
}
-EXPORT_SYMBOL_GPL(icssg_ndo_ioctl);
+EXPORT_SYMBOL_GPL(icssg_ndo_get_ts_config);
void icssg_ndo_get_stats64(struct net_device *ndev,
struct rtnl_link_stats64 *stats)
@@ -1084,10 +1623,28 @@ void icssg_ndo_get_stats64(struct net_device *ndev,
stats->rx_over_errors = emac_get_stat_by_name(emac, "rx_over_errors");
stats->multicast = emac_get_stat_by_name(emac, "rx_multicast_frames");
- stats->rx_errors = ndev->stats.rx_errors;
- stats->rx_dropped = ndev->stats.rx_dropped;
+ stats->rx_errors = ndev->stats.rx_errors +
+ emac_get_stat_by_name(emac, "FW_RX_ERROR") +
+ emac_get_stat_by_name(emac, "FW_RX_EOF_SHORT_FRMERR") +
+ emac_get_stat_by_name(emac, "FW_RX_B0_DROP_EARLY_EOF") +
+ emac_get_stat_by_name(emac, "FW_RX_EXP_FRAG_Q_DROP") +
+ emac_get_stat_by_name(emac, "FW_RX_FIFO_OVERRUN");
+ stats->rx_dropped = ndev->stats.rx_dropped +
+ emac_get_stat_by_name(emac, "FW_DROPPED_PKT") +
+ emac_get_stat_by_name(emac, "FW_INF_PORT_DISABLED") +
+ emac_get_stat_by_name(emac, "FW_INF_SAV") +
+ emac_get_stat_by_name(emac, "FW_INF_SA_DL") +
+ emac_get_stat_by_name(emac, "FW_INF_PORT_BLOCKED") +
+ emac_get_stat_by_name(emac, "FW_INF_DROP_TAGGED") +
+ emac_get_stat_by_name(emac, "FW_INF_DROP_PRIOTAGGED") +
+ emac_get_stat_by_name(emac, "FW_INF_DROP_NOTAG") +
+ emac_get_stat_by_name(emac, "FW_INF_DROP_NOTMEMBER");
stats->tx_errors = ndev->stats.tx_errors;
- stats->tx_dropped = ndev->stats.tx_dropped;
+ stats->tx_dropped = ndev->stats.tx_dropped +
+ emac_get_stat_by_name(emac, "FW_RTU_PKT_DROP") +
+ emac_get_stat_by_name(emac, "FW_TX_DROPPED_PACKET") +
+ emac_get_stat_by_name(emac, "FW_TX_TS_DROPPED_PACKET") +
+ emac_get_stat_by_name(emac, "FW_TX_JUMBO_FRM_CUTOFF");
}
EXPORT_SYMBOL_GPL(icssg_ndo_get_stats64);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c
index 5d2491c2943a..3f8237c17d09 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.c
@@ -66,6 +66,9 @@
#define FDB_GEN_CFG1 0x60
#define SMEM_VLAN_OFFSET 8
#define SMEM_VLAN_OFFSET_MASK GENMASK(25, 8)
+#define FDB_HASH_SIZE_MASK GENMASK(6, 3)
+#define FDB_HASH_SIZE_SHIFT 3
+#define FDB_HASH_SIZE 3
#define FDB_GEN_CFG2 0x64
#define FDB_VLAN_EN BIT(6)
@@ -288,8 +291,12 @@ static int prueth_fw_offload_buffer_setup(struct prueth_emac *emac)
int i;
addr = lower_32_bits(prueth->msmcram.pa);
- if (slice)
- addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
+ if (slice) {
+ if (prueth->pdata.banked_ms_ram)
+ addr += MSMC_RAM_BANK_SIZE;
+ else
+ addr += PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE;
+ }
if (addr % SZ_64K) {
dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n");
@@ -297,43 +304,66 @@ static int prueth_fw_offload_buffer_setup(struct prueth_emac *emac)
}
bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
- /* workaround for f/w bug. bpool 0 needs to be initialized */
- for (i = 0; i < PRUETH_NUM_BUF_POOLS; i++) {
+
+ /* Configure buffer pools for forwarding buffers
+ * - used by firmware to store packets to be forwarded to other port
+ * - 8 total pools per slice
+ */
+ for (i = 0; i < PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; i++) {
writel(addr, &bpool_cfg[i].addr);
- writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len);
- addr += PRUETH_EMAC_BUF_POOL_SIZE;
+ writel(PRUETH_SW_FWD_BUF_POOL_SIZE, &bpool_cfg[i].len);
+ addr += PRUETH_SW_FWD_BUF_POOL_SIZE;
}
- if (!slice)
- addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
- else
- addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST;
-
- for (i = PRUETH_NUM_BUF_POOLS;
- i < 2 * PRUETH_SW_NUM_BUF_POOLS_HOST + PRUETH_NUM_BUF_POOLS;
- i++) {
- /* The driver only uses first 4 queues per PRU so only initialize them */
- if (i % PRUETH_SW_NUM_BUF_POOLS_HOST < PRUETH_SW_NUM_BUF_POOLS_PER_PRU) {
- writel(addr, &bpool_cfg[i].addr);
- writel(PRUETH_SW_BUF_POOL_SIZE_HOST, &bpool_cfg[i].len);
- addr += PRUETH_SW_BUF_POOL_SIZE_HOST;
+ /* Configure buffer pools for Local Injection buffers
+ * - used by firmware to store packets received from host core
+ * - 16 total pools per slice
+ */
+ for (i = 0; i < PRUETH_NUM_LI_BUF_POOLS_PER_SLICE; i++) {
+ int cfg_idx = i + PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE;
+
+ /* The driver only uses first 4 queues per PRU,
+ * so only initialize buffer for them
+ */
+ if ((i % PRUETH_NUM_LI_BUF_POOLS_PER_PORT_PER_SLICE)
+ < PRUETH_SW_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE) {
+ writel(addr, &bpool_cfg[cfg_idx].addr);
+ writel(PRUETH_SW_LI_BUF_POOL_SIZE,
+ &bpool_cfg[cfg_idx].len);
+ addr += PRUETH_SW_LI_BUF_POOL_SIZE;
} else {
- writel(0, &bpool_cfg[i].addr);
- writel(0, &bpool_cfg[i].len);
+ writel(0, &bpool_cfg[cfg_idx].addr);
+ writel(0, &bpool_cfg[cfg_idx].len);
}
}
- if (!slice)
- addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST;
- else
- addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
+ /* Express RX buffer queue
+ * - used by firmware to store express packets to be transmitted
+ * to the host core
+ */
+ rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET;
+ for (i = 0; i < 3; i++)
+ writel(addr, &rxq_ctx->start[i]);
+ addr += PRUETH_SW_HOST_EXP_BUF_POOL_SIZE;
+ writel(addr, &rxq_ctx->end);
+
+ /* Pre-emptible RX buffer queue
+ * - used by firmware to store preemptible packets to be transmitted
+ * to the host core
+ */
rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET;
for (i = 0; i < 3; i++)
writel(addr, &rxq_ctx->start[i]);
- addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
- writel(addr - SZ_2K, &rxq_ctx->end);
+ addr += PRUETH_SW_HOST_PRE_BUF_POOL_SIZE;
+ writel(addr, &rxq_ctx->end);
+
+ /* Set pointer for default dropped packet write
+ * - used by firmware to temporarily store packet to be dropped
+ */
+ rxq_ctx = emac->dram.va + DEFAULT_MSMC_Q_OFFSET;
+ writel(addr, &rxq_ctx->start[0]);
return 0;
}
@@ -347,13 +377,13 @@ static int prueth_emac_buffer_setup(struct prueth_emac *emac)
u32 addr;
int i;
- /* Layout to have 64KB aligned buffer pool
- * |BPOOL0|BPOOL1|RX_CTX0|RX_CTX1|
- */
-
addr = lower_32_bits(prueth->msmcram.pa);
- if (slice)
- addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
+ if (slice) {
+ if (prueth->pdata.banked_ms_ram)
+ addr += MSMC_RAM_BANK_SIZE;
+ else
+ addr += PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE;
+ }
if (addr % SZ_64K) {
dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n");
@@ -361,43 +391,70 @@ static int prueth_emac_buffer_setup(struct prueth_emac *emac)
}
bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
- /* workaround for f/w bug. bpool 0 needs to be initilalized */
- writel(addr, &bpool_cfg[0].addr);
- writel(0, &bpool_cfg[0].len);
- for (i = PRUETH_EMAC_BUF_POOL_START;
- i < PRUETH_EMAC_BUF_POOL_START + PRUETH_NUM_BUF_POOLS;
- i++) {
- writel(addr, &bpool_cfg[i].addr);
- writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len);
- addr += PRUETH_EMAC_BUF_POOL_SIZE;
+ /* Configure buffer pools for forwarding buffers
+ * - in mac mode - no forwarding so initialize all pools to 0
+ * - 8 total pools per slice
+ */
+ for (i = 0; i < PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; i++) {
+ writel(0, &bpool_cfg[i].addr);
+ writel(0, &bpool_cfg[i].len);
}
- if (!slice)
- addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
- else
- addr += PRUETH_EMAC_RX_CTX_BUF_SIZE * 2;
+ /* Configure buffer pools for Local Injection buffers
+ * - used by firmware to store packets received from host core
+ * - 16 total pools per slice
+ */
+ bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
+ for (i = 0; i < PRUETH_NUM_LI_BUF_POOLS_PER_SLICE; i++) {
+ int cfg_idx = i + PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE;
+
+ /* In EMAC mode, only first 4 buffers are used,
+ * as 1 slice needs to handle only 1 port
+ */
+ if (i < PRUETH_EMAC_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE) {
+ writel(addr, &bpool_cfg[cfg_idx].addr);
+ writel(PRUETH_EMAC_LI_BUF_POOL_SIZE,
+ &bpool_cfg[cfg_idx].len);
+ addr += PRUETH_EMAC_LI_BUF_POOL_SIZE;
+ } else {
+ writel(0, &bpool_cfg[cfg_idx].addr);
+ writel(0, &bpool_cfg[cfg_idx].len);
+ }
+ }
- /* Pre-emptible RX buffer queue */
- rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET;
+ /* Express RX buffer queue
+ * - used by firmware to store express packets to be transmitted
+ * to host core
+ */
+ rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET;
for (i = 0; i < 3; i++)
writel(addr, &rxq_ctx->start[i]);
- addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
+ addr += PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE;
writel(addr, &rxq_ctx->end);
- /* Express RX buffer queue */
- rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET;
+ /* Pre-emptible RX buffer queue
+ * - used by firmware to store preemptible packets to be transmitted
+ * to host core
+ */
+ rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET;
for (i = 0; i < 3; i++)
writel(addr, &rxq_ctx->start[i]);
- addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
+ addr += PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE;
writel(addr, &rxq_ctx->end);
+ /* Set pointer for default dropped packet write
+ * - used by firmware to temporarily store packet to be dropped
+ */
+ rxq_ctx = emac->dram.va + DEFAULT_MSMC_Q_OFFSET;
+ writel(addr, &rxq_ctx->start[0]);
+
return 0;
}
-static void icssg_init_emac_mode(struct prueth *prueth)
+void icssg_init_emac_mode(struct prueth *prueth)
{
/* When the device is configured as a bridge and it is being brought
* back to the emac mode, the host mac address has to be set as 0.
@@ -406,12 +463,11 @@ static void icssg_init_emac_mode(struct prueth *prueth)
int i;
u8 mac[ETH_ALEN] = { 0 };
- if (prueth->emacs_initialized)
- return;
-
/* Set VLAN TABLE address base */
regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
addr << SMEM_VLAN_OFFSET);
+ regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, FDB_HASH_SIZE_MASK,
+ FDB_HASH_SIZE << FDB_HASH_SIZE_SHIFT);
/* Set enable VLAN aware mode, and FDBs for all PRUs */
regmap_write(prueth->miig_rt, FDB_GEN_CFG2, (FDB_PRU0_EN | FDB_PRU1_EN | FDB_HOST_EN));
prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va +
@@ -423,18 +479,18 @@ static void icssg_init_emac_mode(struct prueth *prueth)
/* Clear host MAC address */
icssg_class_set_host_mac_addr(prueth->miig_rt, mac);
}
+EXPORT_SYMBOL_GPL(icssg_init_emac_mode);
-static void icssg_init_fw_offload_mode(struct prueth *prueth)
+void icssg_init_fw_offload_mode(struct prueth *prueth)
{
u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET;
int i;
- if (prueth->emacs_initialized)
- return;
-
/* Set VLAN TABLE address base */
regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
addr << SMEM_VLAN_OFFSET);
+ regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, FDB_HASH_SIZE_MASK,
+ FDB_HASH_SIZE << FDB_HASH_SIZE_SHIFT);
/* Set enable VLAN aware mode, and FDBs for all PRUs */
regmap_write(prueth->miig_rt, FDB_GEN_CFG2, FDB_EN_ALL);
prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va +
@@ -448,6 +504,7 @@ static void icssg_init_fw_offload_mode(struct prueth *prueth)
icssg_class_set_host_mac_addr(prueth->miig_rt, prueth->hw_bridge_dev->dev_addr);
icssg_set_pvid(prueth, prueth->default_vlan, PRUETH_PORT_HOST);
}
+EXPORT_SYMBOL_GPL(icssg_init_fw_offload_mode);
int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
{
@@ -455,11 +512,6 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
struct icssg_flow_cfg __iomem *flow_cfg;
int ret;
- if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
- icssg_init_fw_offload_mode(prueth);
- else
- icssg_init_emac_mode(prueth);
-
memset_io(config, 0, TAS_GATE_MASK_LIST0);
icssg_miig_queues_init(prueth, slice);
@@ -786,3 +838,27 @@ void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port)
writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT0_DEFAULT_VLAN_OFFSET);
}
EXPORT_SYMBOL_GPL(icssg_set_pvid);
+
+int emac_fdb_flow_id_updated(struct prueth_emac *emac)
+{
+ struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 };
+ int slice = prueth_emac_slice(emac);
+ struct mgmt_cmd fdb_cmd = { 0 };
+ int ret;
+
+ fdb_cmd.header = ICSSG_FW_MGMT_CMD_HEADER;
+ fdb_cmd.type = ICSSG_FW_MGMT_FDB_CMD_TYPE_RX_FLOW;
+ fdb_cmd.seqnum = ++(emac->prueth->icssg_hwcmdseq);
+ fdb_cmd.param = 0;
+
+ fdb_cmd.param |= (slice << 4);
+ fdb_cmd.cmd_args[0] = 0;
+
+ ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp);
+ if (ret)
+ return ret;
+
+ WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum);
+ return fdb_cmd_rsp.status == 1 ? 0 : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(emac_fdb_flow_id_updated);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.h b/drivers/net/ethernet/ti/icssg/icssg_config.h
index 92c2deaa3068..60d69744ffae 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.h
@@ -26,21 +26,71 @@ struct icssg_flow_cfg {
#define PRUETH_MAX_RX_FLOWS 1 /* excluding default flow */
#define PRUETH_RX_FLOW_DATA 0
-#define PRUETH_EMAC_BUF_POOL_SIZE SZ_8K
-#define PRUETH_EMAC_POOLS_PER_SLICE 24
-#define PRUETH_EMAC_BUF_POOL_START 8
-#define PRUETH_NUM_BUF_POOLS 8
-#define PRUETH_EMAC_RX_CTX_BUF_SIZE SZ_16K /* per slice */
-#define MSMC_RAM_SIZE \
- (2 * (PRUETH_EMAC_BUF_POOL_SIZE * PRUETH_NUM_BUF_POOLS + \
- PRUETH_EMAC_RX_CTX_BUF_SIZE * 2))
-
-#define PRUETH_SW_BUF_POOL_SIZE_HOST SZ_4K
-#define PRUETH_SW_NUM_BUF_POOLS_HOST 8
-#define PRUETH_SW_NUM_BUF_POOLS_PER_PRU 4
-#define MSMC_RAM_SIZE_SWITCH_MODE \
- (MSMC_RAM_SIZE + \
- (2 * PRUETH_SW_BUF_POOL_SIZE_HOST * PRUETH_SW_NUM_BUF_POOLS_HOST))
+/* Defines for forwarding path buffer pools:
+ * - used by firmware to store packets to be forwarded to other port
+ * - 8 total pools per slice
+ * - only used in switch mode (as no forwarding in mac mode)
+ */
+#define PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE 8
+#define PRUETH_SW_FWD_BUF_POOL_SIZE (SZ_8K)
+
+/* Defines for local injection path buffer pools:
+ * - used by firmware to store packets received from host core
+ * - 16 total pools per slice
+ * - 8 pools per port per slice and each slice handles both ports
+ * - only 4 out of 8 pools used per port (as only 4 real QoS levels in ICSSG)
+ * - switch mode: 8 total pools used
+ * - mac mode: 4 total pools used
+ */
+#define PRUETH_NUM_LI_BUF_POOLS_PER_SLICE 16
+#define PRUETH_NUM_LI_BUF_POOLS_PER_PORT_PER_SLICE 8
+#define PRUETH_SW_LI_BUF_POOL_SIZE SZ_4K
+#define PRUETH_SW_USED_LI_BUF_POOLS_PER_SLICE 8
+#define PRUETH_SW_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE 4
+#define PRUETH_EMAC_LI_BUF_POOL_SIZE SZ_8K
+#define PRUETH_EMAC_USED_LI_BUF_POOLS_PER_SLICE 4
+#define PRUETH_EMAC_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE 4
+
+/* Defines for host egress path - express and preemptible buffers
+ * - used by firmware to store express and preemptible packets
+ * to be transmitted to host core
+ * - used by both mac/switch modes
+ */
+#define PRUETH_SW_HOST_EXP_BUF_POOL_SIZE SZ_16K
+#define PRUETH_SW_HOST_PRE_BUF_POOL_SIZE (SZ_16K - SZ_2K)
+#define PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE PRUETH_SW_HOST_EXP_BUF_POOL_SIZE
+#define PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE PRUETH_SW_HOST_PRE_BUF_POOL_SIZE
+
+/* Buffer used by firmware to temporarily store packet to be dropped */
+#define PRUETH_SW_DROP_PKT_BUF_SIZE SZ_2K
+#define PRUETH_EMAC_DROP_PKT_BUF_SIZE PRUETH_SW_DROP_PKT_BUF_SIZE
+
+/* Total switch mode memory usage for buffers per slice */
+#define PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE \
+ (PRUETH_SW_FWD_BUF_POOL_SIZE * PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE + \
+ PRUETH_SW_LI_BUF_POOL_SIZE * PRUETH_SW_USED_LI_BUF_POOLS_PER_SLICE + \
+ PRUETH_SW_HOST_EXP_BUF_POOL_SIZE + \
+ PRUETH_SW_HOST_PRE_BUF_POOL_SIZE + \
+ PRUETH_SW_DROP_PKT_BUF_SIZE)
+
+/* Total switch mode memory usage for all buffers */
+#define PRUETH_SW_TOTAL_BUF_SIZE \
+ (2 * PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE)
+
+/* Total mac mode memory usage for buffers per slice */
+#define PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE \
+ (PRUETH_EMAC_LI_BUF_POOL_SIZE * \
+ PRUETH_EMAC_USED_LI_BUF_POOLS_PER_SLICE + \
+ PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE + \
+ PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE + \
+ PRUETH_EMAC_DROP_PKT_BUF_SIZE)
+
+/* Total mac mode memory usage for all buffers */
+#define PRUETH_EMAC_TOTAL_BUF_SIZE \
+ (2 * PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE)
+
+/* Size of 1 bank of MSMC/OC_SRAM memory */
+#define MSMC_RAM_BANK_SIZE SZ_256K
#define PRUETH_SWITCH_FDB_MASK ((SIZE_OF_FDB / NUMBER_OF_FDB_BUCKET_ENTRIES) - 1)
@@ -55,6 +105,7 @@ struct icssg_rxq_ctx {
#define ICSSG_FW_MGMT_FDB_CMD_TYPE 0x03
#define ICSSG_FW_MGMT_CMD_TYPE 0x04
#define ICSSG_FW_MGMT_PKT 0x80000000
+#define ICSSG_FW_MGMT_FDB_CMD_TYPE_RX_FLOW 0x05
struct icssg_r30_cmd {
u32 cmd[4];
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 0556910938fa..f65041662173 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -16,6 +16,7 @@
#include <linux/if_hsr.h>
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
@@ -46,9 +47,14 @@
NETIF_F_HW_HSR_TAG_INS | \
NETIF_F_HW_HSR_TAG_RM)
+#define PRUETH_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC |\
+ DMA_ATTR_WEAK_ORDERING)
+
/* CTRLMMR_ICSSG_RGMII_CTRL register bits */
#define ICSSG_CTRL_RGMII_ID_MODE BIT(24)
+static void emac_adjust_link(struct net_device *ndev);
+
static int emac_get_tx_ts(struct prueth_emac *emac,
struct emac_tx_ts_response *rsp)
{
@@ -124,101 +130,198 @@ static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct icssg_firmwares icssg_hsr_firmwares[] = {
- {
- .pru = "ti-pruss/am65x-sr2-pru0-pruhsr-fw.elf",
- .rtu = "ti-pruss/am65x-sr2-rtu0-pruhsr-fw.elf",
- .txpru = "ti-pruss/am65x-sr2-txpru0-pruhsr-fw.elf",
- },
- {
- .pru = "ti-pruss/am65x-sr2-pru1-pruhsr-fw.elf",
- .rtu = "ti-pruss/am65x-sr2-rtu1-pruhsr-fw.elf",
- .txpru = "ti-pruss/am65x-sr2-txpru1-pruhsr-fw.elf",
- }
-};
+static int prueth_start(struct rproc *rproc, const char *fw_name)
+{
+ int ret;
-static struct icssg_firmwares icssg_switch_firmwares[] = {
- {
- .pru = "ti-pruss/am65x-sr2-pru0-prusw-fw.elf",
- .rtu = "ti-pruss/am65x-sr2-rtu0-prusw-fw.elf",
- .txpru = "ti-pruss/am65x-sr2-txpru0-prusw-fw.elf",
- },
- {
- .pru = "ti-pruss/am65x-sr2-pru1-prusw-fw.elf",
- .rtu = "ti-pruss/am65x-sr2-rtu1-prusw-fw.elf",
- .txpru = "ti-pruss/am65x-sr2-txpru1-prusw-fw.elf",
- }
-};
+ ret = rproc_set_firmware(rproc, fw_name);
+ if (ret)
+ return ret;
+ return rproc_boot(rproc);
+}
-static struct icssg_firmwares icssg_emac_firmwares[] = {
- {
- .pru = "ti-pruss/am65x-sr2-pru0-prueth-fw.elf",
- .rtu = "ti-pruss/am65x-sr2-rtu0-prueth-fw.elf",
- .txpru = "ti-pruss/am65x-sr2-txpru0-prueth-fw.elf",
- },
- {
- .pru = "ti-pruss/am65x-sr2-pru1-prueth-fw.elf",
- .rtu = "ti-pruss/am65x-sr2-rtu1-prueth-fw.elf",
- .txpru = "ti-pruss/am65x-sr2-txpru1-prueth-fw.elf",
- }
-};
+static void prueth_shutdown(struct rproc *rproc)
+{
+ rproc_shutdown(rproc);
+}
-static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
+static int prueth_emac_start(struct prueth *prueth)
{
struct icssg_firmwares *firmwares;
struct device *dev = prueth->dev;
- int slice, ret;
+ int ret, slice;
if (prueth->is_switch_mode)
- firmwares = icssg_switch_firmwares;
- else if (prueth->is_hsr_offload_mode)
- firmwares = icssg_hsr_firmwares;
+ firmwares = prueth->icssg_switch_firmwares;
+ else if (prueth->is_hsr_offload_mode && HSR_V1 == prueth->hsr_prp_version)
+ firmwares = prueth->icssg_hsr_firmwares;
+ else if (prueth->is_hsr_offload_mode && PRP_V1 == prueth->hsr_prp_version)
+ firmwares = prueth->icssg_prp_firmwares;
else
- firmwares = icssg_emac_firmwares;
+ firmwares = prueth->icssg_emac_firmwares;
- slice = prueth_emac_slice(emac);
- if (slice < 0) {
- netdev_err(emac->ndev, "invalid port\n");
- return -EINVAL;
+ for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
+ ret = prueth_start(prueth->pru[slice], firmwares[slice].pru);
+ if (ret) {
+ dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
+ goto unwind_slices;
+ }
+
+ ret = prueth_start(prueth->rtu[slice], firmwares[slice].rtu);
+ if (ret) {
+ dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
+ rproc_shutdown(prueth->pru[slice]);
+ goto unwind_slices;
+ }
+
+ ret = prueth_start(prueth->txpru[slice], firmwares[slice].txpru);
+ if (ret) {
+ dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
+ rproc_shutdown(prueth->rtu[slice]);
+ rproc_shutdown(prueth->pru[slice]);
+ goto unwind_slices;
+ }
}
- ret = icssg_config(prueth, emac, slice);
- if (ret)
- return ret;
+ return 0;
- ret = rproc_set_firmware(prueth->pru[slice], firmwares[slice].pru);
- ret = rproc_boot(prueth->pru[slice]);
- if (ret) {
- dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
- return -EINVAL;
+unwind_slices:
+ while (--slice >= 0) {
+ prueth_shutdown(prueth->txpru[slice]);
+ prueth_shutdown(prueth->rtu[slice]);
+ prueth_shutdown(prueth->pru[slice]);
}
- ret = rproc_set_firmware(prueth->rtu[slice], firmwares[slice].rtu);
- ret = rproc_boot(prueth->rtu[slice]);
- if (ret) {
- dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
- goto halt_pru;
+ return ret;
+}
+
+static void prueth_emac_stop(struct prueth *prueth)
+{
+ int slice;
+
+ for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
+ prueth_shutdown(prueth->txpru[slice]);
+ prueth_shutdown(prueth->rtu[slice]);
+ prueth_shutdown(prueth->pru[slice]);
}
+}
+
+static void icssg_enable_fw_offload(struct prueth *prueth)
+{
+ struct prueth_emac *emac;
+ int mac;
- ret = rproc_set_firmware(prueth->txpru[slice], firmwares[slice].txpru);
- ret = rproc_boot(prueth->txpru[slice]);
+ for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
+ emac = prueth->emac[mac];
+ if (prueth->is_hsr_offload_mode) {
+ if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM)
+ icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE);
+ else
+ icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE);
+ }
+
+ if (prueth->is_switch_mode || prueth->is_hsr_offload_mode) {
+ if (netif_running(emac->ndev)) {
+ icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,
+ ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_BLOCK,
+ true);
+ icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID,
+ BIT(emac->port_id) | DEFAULT_PORT_MASK,
+ BIT(emac->port_id) | DEFAULT_UNTAG_MASK,
+ true);
+ if (prueth->is_hsr_offload_mode)
+ icssg_vtbl_modify(emac, DEFAULT_VID,
+ DEFAULT_PORT_MASK,
+ DEFAULT_UNTAG_MASK, true);
+ icssg_set_pvid(prueth, emac->port_vlan, emac->port_id);
+ if (prueth->is_switch_mode)
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
+ }
+ }
+ }
+}
+
+static int prueth_emac_common_start(struct prueth *prueth)
+{
+ struct prueth_emac *emac;
+ int ret = 0;
+ int slice;
+
+ if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
+ return -EINVAL;
+
+ /* clear SMEM and MSMC settings for all slices */
+ memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
+ memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
+
+ icssg_class_default(prueth->miig_rt, ICSS_SLICE0, 0, false);
+ icssg_class_default(prueth->miig_rt, ICSS_SLICE1, 0, false);
+
+ if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
+ icssg_init_fw_offload_mode(prueth);
+ else
+ icssg_init_emac_mode(prueth);
+
+ for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
+ emac = prueth->emac[slice];
+ if (!emac)
+ continue;
+ ret = icssg_config(prueth, emac, slice);
+ if (ret)
+ goto disable_class;
+
+ mutex_lock(&emac->ndev->phydev->lock);
+ emac_adjust_link(emac->ndev);
+ mutex_unlock(&emac->ndev->phydev->lock);
+ }
+
+ ret = prueth_emac_start(prueth);
+ if (ret)
+ goto disable_class;
+
+ emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
+ prueth->emac[ICSS_SLICE1];
+ ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
+ emac, IEP_DEFAULT_CYCLE_TIME_NS);
if (ret) {
- dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
- goto halt_rtu;
+ dev_err(prueth->dev, "Failed to initialize IEP module\n");
+ goto stop_pruss;
}
- emac->fw_running = 1;
return 0;
-halt_rtu:
- rproc_shutdown(prueth->rtu[slice]);
+stop_pruss:
+ prueth_emac_stop(prueth);
-halt_pru:
- rproc_shutdown(prueth->pru[slice]);
+disable_class:
+ icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
+ icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
return ret;
}
+static int prueth_emac_common_stop(struct prueth *prueth)
+{
+ struct prueth_emac *emac;
+
+ if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
+ return -EINVAL;
+
+ icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
+ icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
+
+ prueth_emac_stop(prueth);
+
+ emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
+ prueth->emac[ICSS_SLICE1];
+ icss_iep_exit(emac->iep);
+
+ return 0;
+}
+
/* called back by PHY layer if there is change in link state of hw port*/
static void emac_adjust_link(struct net_device *ndev)
{
@@ -292,7 +395,11 @@ static enum hrtimer_restart emac_rx_timer_callback(struct hrtimer *timer)
container_of(timer, struct prueth_emac, rx_hrtimer);
int rx_flow = PRUETH_RX_FLOW_DATA;
- enable_irq(emac->rx_chns.irq[rx_flow]);
+ if (emac->rx_chns.irq_disabled) {
+ /* re-enable the RX IRQ */
+ emac->rx_chns.irq_disabled = false;
+ enable_irq(emac->rx_chns.irq[rx_flow]);
+ }
return HRTIMER_NORESTART;
}
@@ -373,9 +480,6 @@ static void prueth_iep_settime(void *clockops_data, u64 ns)
u32 cycletime;
int timeout;
- if (!emac->fw_running)
- return;
-
sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET;
cycletime = IEP_DEFAULT_CYCLE_TIME_NS;
@@ -411,6 +515,8 @@ static int prueth_perout_enable(void *clockops_data,
struct prueth_emac *emac = clockops_data;
u32 reduction_factor = 0, offset = 0;
struct timespec64 ts;
+ u64 current_cycle;
+ u64 start_offset;
u64 ns_period;
if (!on)
@@ -449,8 +555,14 @@ static int prueth_perout_enable(void *clockops_data,
writel(reduction_factor, emac->prueth->shram.va +
TIMESYNC_FW_WC_SYNCOUT_REDUCTION_FACTOR_OFFSET);
- writel(0, emac->prueth->shram.va +
- TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET);
+ current_cycle = icssg_read_time(emac->prueth->shram.va +
+ TIMESYNC_FW_WC_CYCLECOUNT_OFFSET);
+
+ /* Rounding of current_cycle count to next second */
+ start_offset = roundup(current_cycle, MSEC_PER_SEC);
+
+ hi_lo_writeq(start_offset, emac->prueth->shram.va +
+ TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET);
return 0;
}
@@ -461,65 +573,305 @@ const struct icss_iep_clockops prueth_iep_clockops = {
.perout_enable = prueth_perout_enable,
};
+static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac)
+{
+ struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
+
+ if (xdp_rxq_info_is_reg(rxq))
+ xdp_rxq_info_unreg(rxq);
+}
+
+static int prueth_create_xdp_rxqs(struct prueth_emac *emac)
+{
+ struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
+ struct page_pool *pool = emac->rx_chns.pg_pool;
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+ int ret;
+
+ ret = xdp_rxq_info_reg(rxq, emac->ndev, 0, emac->napi_rx.napi_id);
+ if (ret)
+ return ret;
+
+ if (rx_chn->xsk_pool) {
+ ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_XSK_BUFF_POOL, NULL);
+ if (ret)
+ goto xdp_unreg;
+ xsk_pool_set_rxq_info(rx_chn->xsk_pool, rxq);
+ } else {
+ ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
+ if (ret)
+ goto xdp_unreg;
+ }
+
+ return 0;
+
+xdp_unreg:
+ prueth_destroy_xdp_rxqs(emac);
+ return ret;
+}
+
static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr)
{
- struct prueth_emac *emac = netdev_priv(ndev);
- int port_mask = BIT(emac->port_id);
+ struct net_device *real_dev;
+ struct prueth_emac *emac;
+ int port_mask;
+ u8 vlan_id;
+
+ vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC;
+ real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
+ emac = netdev_priv(real_dev);
- port_mask |= icssg_fdb_lookup(emac, addr, 0);
- icssg_fdb_add_del(emac, addr, 0, port_mask, true);
- icssg_vtbl_modify(emac, 0, port_mask, port_mask, true);
+ port_mask = BIT(emac->port_id) | icssg_fdb_lookup(emac, addr, vlan_id);
+ icssg_fdb_add_del(emac, addr, vlan_id, port_mask, true);
+ icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, true);
return 0;
}
static int icssg_prueth_del_mcast(struct net_device *ndev, const u8 *addr)
{
- struct prueth_emac *emac = netdev_priv(ndev);
- int port_mask = BIT(emac->port_id);
+ struct net_device *real_dev;
+ struct prueth_emac *emac;
int other_port_mask;
+ int port_mask;
+ u8 vlan_id;
+
+ vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC;
+ real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
+ emac = netdev_priv(real_dev);
- other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, 0);
+ port_mask = BIT(emac->port_id);
+ other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, vlan_id);
- icssg_fdb_add_del(emac, addr, 0, port_mask, false);
- icssg_vtbl_modify(emac, 0, port_mask, port_mask, false);
+ icssg_fdb_add_del(emac, addr, vlan_id, port_mask, false);
+ icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, false);
if (other_port_mask) {
- icssg_fdb_add_del(emac, addr, 0, other_port_mask, true);
- icssg_vtbl_modify(emac, 0, other_port_mask, other_port_mask, true);
+ icssg_fdb_add_del(emac, addr, vlan_id, other_port_mask, true);
+ icssg_vtbl_modify(emac, vlan_id, other_port_mask,
+ other_port_mask, true);
}
return 0;
}
-static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
+static void icssg_prueth_hsr_fdb_add_del(struct prueth_emac *emac,
+ const u8 *addr, u8 vid, bool add)
{
- struct prueth_emac *emac = netdev_priv(ndev);
- struct prueth *prueth = emac->prueth;
-
- icssg_fdb_add_del(emac, addr, prueth->default_vlan,
+ icssg_fdb_add_del(emac, addr, vid,
ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
- ICSSG_FDB_ENTRY_BLOCK, true);
+ ICSSG_FDB_ENTRY_BLOCK, add);
+
+ if (add)
+ icssg_vtbl_modify(emac, vid, BIT(emac->port_id),
+ BIT(emac->port_id), add);
+}
+
+static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
+{
+ struct net_device *real_dev, *port_dev;
+ struct prueth_emac *emac;
+ u8 vlan_id, i;
+
+ vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR;
+ real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
+
+ if (is_hsr_master(real_dev)) {
+ for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
+ port_dev = hsr_get_port_ndev(real_dev, i);
+ emac = netdev_priv(port_dev);
+ if (!emac) {
+ dev_put(port_dev);
+ return -EINVAL;
+ }
+ icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
+ true);
+ dev_put(port_dev);
+ }
+ } else {
+ emac = netdev_priv(real_dev);
+ icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, true);
+ }
- icssg_vtbl_modify(emac, emac->port_vlan, BIT(emac->port_id),
- BIT(emac->port_id), true);
return 0;
}
static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr)
{
- struct prueth_emac *emac = netdev_priv(ndev);
- struct prueth *prueth = emac->prueth;
+ struct net_device *real_dev, *port_dev;
+ struct prueth_emac *emac;
+ u8 vlan_id, i;
+
+ vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR;
+ real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
+
+ if (is_hsr_master(real_dev)) {
+ for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
+ port_dev = hsr_get_port_ndev(real_dev, i);
+ emac = netdev_priv(port_dev);
+ if (!emac) {
+ dev_put(port_dev);
+ return -EINVAL;
+ }
+ icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
+ false);
+ dev_put(port_dev);
+ }
+ } else {
+ emac = netdev_priv(real_dev);
+ icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, false);
+ }
- icssg_fdb_add_del(emac, addr, prueth->default_vlan,
- ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
- ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
- ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
- ICSSG_FDB_ENTRY_BLOCK, false);
+ return 0;
+}
+
+static int icssg_update_vlan_mcast(struct net_device *vdev, int vid,
+ void *args)
+{
+ struct prueth_emac *emac = args;
+
+ if (!vdev || !vid)
+ return 0;
+
+ netif_addr_lock_bh(vdev);
+ __hw_addr_sync_multiple(&emac->vlan_mcast_list[vid], &vdev->mc,
+ vdev->addr_len);
+ netif_addr_unlock_bh(vdev);
+
+ if (emac->prueth->is_hsr_offload_mode)
+ __hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev,
+ icssg_prueth_hsr_add_mcast,
+ icssg_prueth_hsr_del_mcast);
+ else
+ __hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev,
+ icssg_prueth_add_mcast,
+ icssg_prueth_del_mcast);
+
+ return 0;
+}
+
+static void prueth_set_xsk_pool(struct prueth_emac *emac, u16 queue_id)
+{
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[queue_id];
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+
+ if (emac->xsk_qid != queue_id) {
+ rx_chn->xsk_pool = NULL;
+ tx_chn->xsk_pool = NULL;
+ } else {
+ rx_chn->xsk_pool = xsk_get_pool_from_qid(emac->ndev, queue_id);
+ tx_chn->xsk_pool = xsk_get_pool_from_qid(emac->ndev, queue_id);
+ }
+}
+
+static void prueth_destroy_txq(struct prueth_emac *emac)
+{
+ int ret, i;
+
+ atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
+ /* ensure new tdown_cnt value is visible */
+ smp_mb__after_atomic();
+ /* tear down and disable UDMA channels */
+ reinit_completion(&emac->tdown_complete);
+ for (i = 0; i < emac->tx_ch_num; i++)
+ k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
+
+ ret = wait_for_completion_timeout(&emac->tdown_complete,
+ msecs_to_jiffies(1000));
+ if (!ret)
+ netdev_err(emac->ndev, "tx teardown timeout\n");
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ napi_disable(&emac->tx_chns[i].napi_tx);
+ hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer);
+ k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn,
+ &emac->tx_chns[i],
+ prueth_tx_cleanup);
+ k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn);
+ }
+}
+
+static void prueth_destroy_rxq(struct prueth_emac *emac)
+{
+ int i, ret;
+
+ /* tear down and disable UDMA channels */
+ reinit_completion(&emac->tdown_complete);
+ k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
+
+ /* When RX DMA Channel Teardown is initiated, it will result in an
+ * interrupt and a Teardown Completion Marker (TDCM) is queued into
+ * the RX Completion queue. Acknowledging the interrupt involves
+ * popping the TDCM descriptor from the RX Completion queue via the
+ * RX NAPI Handler. To avoid timing out when waiting for the TDCM to
+ * be popped, schedule the RX NAPI handler to run immediately.
+ */
+ if (!napi_if_scheduled_mark_missed(&emac->napi_rx)) {
+ if (napi_schedule_prep(&emac->napi_rx))
+ __napi_schedule(&emac->napi_rx);
+ }
+
+ ret = wait_for_completion_timeout(&emac->tdown_complete,
+ msecs_to_jiffies(1000));
+ if (!ret)
+ netdev_err(emac->ndev, "rx teardown timeout\n");
+
+ for (i = 0; i < PRUETH_MAX_RX_FLOWS; i++) {
+ napi_disable(&emac->napi_rx);
+ hrtimer_cancel(&emac->rx_hrtimer);
+ k3_udma_glue_reset_rx_chn(emac->rx_chns.rx_chn, i,
+ &emac->rx_chns,
+ prueth_rx_cleanup);
+ }
+
+ prueth_destroy_xdp_rxqs(emac);
+ k3_udma_glue_disable_rx_chn(emac->rx_chns.rx_chn);
+}
+static int prueth_create_txq(struct prueth_emac *emac)
+{
+ int ret, i;
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
+ if (ret)
+ goto reset_tx_chan;
+ napi_enable(&emac->tx_chns[i].napi_tx);
+ }
return 0;
+
+reset_tx_chan:
+ /* Since interface is not yet up, there is wouldn't be
+ * any SKB for completion. So set false to free_skb
+ */
+ prueth_reset_tx_chan(emac, i, false);
+ return ret;
+}
+
+static int prueth_create_rxq(struct prueth_emac *emac)
+{
+ int ret;
+
+ ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
+ if (ret)
+ return ret;
+
+ ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
+ if (ret)
+ goto reset_rx_chn;
+
+ ret = prueth_create_xdp_rxqs(emac);
+ if (ret)
+ goto reset_rx_chn;
+
+ napi_enable(&emac->napi_rx);
+ return 0;
+
+reset_rx_chn:
+ prueth_reset_rx_chan(&emac->rx_chns, PRUETH_MAX_RX_FLOWS, false);
+ return ret;
}
/**
@@ -533,24 +885,18 @@ static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr)
static int emac_ndo_open(struct net_device *ndev)
{
struct prueth_emac *emac = netdev_priv(ndev);
- int ret, i, num_data_chn = emac->tx_ch_num;
+ int ret, num_data_chn = emac->tx_ch_num;
+ struct icssg_flow_cfg __iomem *flow_cfg;
struct prueth *prueth = emac->prueth;
int slice = prueth_emac_slice(emac);
struct device *dev = prueth->dev;
int max_rx_flows;
int rx_flow;
- /* clear SMEM and MSMC settings for all slices */
- if (!prueth->emacs_initialized) {
- memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
- memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
- }
-
/* set h/w MAC as user might have re-configured */
ether_addr_copy(emac->mac_addr, ndev->dev_addr);
icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
- icssg_class_default(prueth->miig_rt, slice, 0, false);
icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
/* Notify the stack of the actual queue counts. */
@@ -560,6 +906,7 @@ static int emac_ndo_open(struct net_device *ndev)
return ret;
}
+ emac->xsk_qid = -EINVAL;
init_completion(&emac->cmd_complete);
ret = prueth_init_tx_chns(emac);
if (ret) {
@@ -588,42 +935,37 @@ static int emac_ndo_open(struct net_device *ndev)
goto cleanup_napi;
}
- /* reset and start PRU firmware */
- ret = prueth_emac_start(prueth, emac);
- if (ret)
- goto free_rx_irq;
+ if (!prueth->emacs_initialized) {
+ ret = prueth_emac_common_start(prueth);
+ if (ret)
+ goto free_rx_irq;
+ icssg_enable_fw_offload(prueth);
+ }
- icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
+ flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET;
+ writew(emac->rx_flow_id_base, &flow_cfg->rx_base_flow);
+ ret = emac_fdb_flow_id_updated(emac);
- if (!prueth->emacs_initialized) {
- ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
- emac, IEP_DEFAULT_CYCLE_TIME_NS);
+ if (ret) {
+ netdev_err(ndev, "Failed to update Rx Flow ID %d", ret);
+ goto stop;
}
+ icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
+
ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq,
IRQF_ONESHOT, dev_name(dev), emac);
if (ret)
goto stop;
/* Prepare RX */
- ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
+ ret = prueth_create_rxq(emac);
if (ret)
goto free_tx_ts_irq;
- ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
+ ret = prueth_create_txq(emac);
if (ret)
- goto reset_rx_chn;
-
- for (i = 0; i < emac->tx_ch_num; i++) {
- ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
- if (ret)
- goto reset_tx_chan;
- }
-
- /* Enable NAPI in Tx and Rx direction */
- for (i = 0; i < emac->tx_ch_num; i++)
- napi_enable(&emac->tx_chns[i].napi_tx);
- napi_enable(&emac->napi_rx);
+ goto destroy_rxq;
/* start PHY */
phy_start(ndev->phydev);
@@ -634,17 +976,13 @@ static int emac_ndo_open(struct net_device *ndev)
return 0;
-reset_tx_chan:
- /* Since interface is not yet up, there is wouldn't be
- * any SKB for completion. So set false to free_skb
- */
- prueth_reset_tx_chan(emac, i, false);
-reset_rx_chn:
- prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
+destroy_rxq:
+ prueth_destroy_rxq(emac);
free_tx_ts_irq:
free_irq(emac->tx_ts_irq, emac);
stop:
- prueth_emac_stop(emac);
+ if (!prueth->emacs_initialized)
+ prueth_emac_common_stop(prueth);
free_rx_irq:
free_irq(emac->rx_chns.irq[rx_flow], emac);
cleanup_napi:
@@ -669,9 +1007,6 @@ static int emac_ndo_stop(struct net_device *ndev)
{
struct prueth_emac *emac = netdev_priv(ndev);
struct prueth *prueth = emac->prueth;
- int rx_flow = PRUETH_RX_FLOW_DATA;
- int max_rx_flows;
- int ret, i;
/* inform the upper layers. */
netif_tx_stop_all_queues(ndev);
@@ -680,57 +1015,29 @@ static int emac_ndo_stop(struct net_device *ndev)
if (ndev->phydev)
phy_stop(ndev->phydev);
- icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac));
-
if (emac->prueth->is_hsr_offload_mode)
__dev_mc_unsync(ndev, icssg_prueth_hsr_del_mcast);
else
__dev_mc_unsync(ndev, icssg_prueth_del_mcast);
- atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
- /* ensure new tdown_cnt value is visible */
- smp_mb__after_atomic();
- /* tear down and disable UDMA channels */
- reinit_completion(&emac->tdown_complete);
- for (i = 0; i < emac->tx_ch_num; i++)
- k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
-
- ret = wait_for_completion_timeout(&emac->tdown_complete,
- msecs_to_jiffies(1000));
- if (!ret)
- netdev_err(ndev, "tx teardown timeout\n");
-
- prueth_reset_tx_chan(emac, emac->tx_ch_num, true);
- for (i = 0; i < emac->tx_ch_num; i++) {
- napi_disable(&emac->tx_chns[i].napi_tx);
- hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer);
- }
-
- max_rx_flows = PRUETH_MAX_RX_FLOWS;
- k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
-
- prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
-
- napi_disable(&emac->napi_rx);
- hrtimer_cancel(&emac->rx_hrtimer);
+ prueth_destroy_txq(emac);
+ prueth_destroy_rxq(emac);
cancel_work_sync(&emac->rx_mode_work);
/* Destroying the queued work in ndo_stop() */
cancel_delayed_work_sync(&emac->stats_work);
- if (prueth->emacs_initialized == 1)
- icss_iep_exit(emac->iep);
-
/* stop PRUs */
- prueth_emac_stop(emac);
+ if (prueth->emacs_initialized == 1)
+ prueth_emac_common_stop(prueth);
free_irq(emac->tx_ts_irq, emac);
- free_irq(emac->rx_chns.irq[rx_flow], emac);
+ free_irq(emac->rx_chns.irq[PRUETH_RX_FLOW_DATA], emac);
prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
- prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
+ prueth_cleanup_rx_chns(emac, &emac->rx_chns, PRUETH_MAX_RX_FLOWS);
prueth_cleanup_tx_chns(emac);
prueth->emacs_initialized--;
@@ -763,12 +1070,22 @@ static void emac_ndo_set_rx_mode_work(struct work_struct *work)
return;
}
- if (emac->prueth->is_hsr_offload_mode)
+ if (emac->prueth->is_hsr_offload_mode) {
__dev_mc_sync(ndev, icssg_prueth_hsr_add_mcast,
icssg_prueth_hsr_del_mcast);
- else
+ if (rtnl_trylock()) {
+ vlan_for_each(emac->prueth->hsr_dev,
+ icssg_update_vlan_mcast, emac);
+ rtnl_unlock();
+ }
+ } else {
__dev_mc_sync(ndev, icssg_prueth_add_mcast,
icssg_prueth_del_mcast);
+ if (rtnl_trylock()) {
+ vlan_for_each(ndev, icssg_update_vlan_mcast, emac);
+ rtnl_unlock();
+ }
+ }
}
/**
@@ -808,6 +1125,283 @@ static netdev_features_t emac_ndo_fix_features(struct net_device *ndev,
return features;
}
+static int emac_ndo_vlan_rx_add_vid(struct net_device *ndev,
+ __be16 proto, u16 vid)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ int port_mask = BIT(emac->port_id);
+ int untag_mask = 0;
+
+ if (prueth->is_hsr_offload_mode)
+ port_mask |= BIT(PRUETH_PORT_HOST);
+
+ __hw_addr_init(&emac->vlan_mcast_list[vid]);
+ netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X\n",
+ vid, port_mask, untag_mask);
+
+ icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true);
+ icssg_set_pvid(emac->prueth, vid, emac->port_id);
+
+ return 0;
+}
+
+static int emac_ndo_vlan_rx_del_vid(struct net_device *ndev,
+ __be16 proto, u16 vid)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ int port_mask = BIT(emac->port_id);
+ int untag_mask = 0;
+
+ if (prueth->is_hsr_offload_mode)
+ port_mask = BIT(PRUETH_PORT_HOST);
+
+ netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X untag_mask %X\n",
+ vid, port_mask, untag_mask);
+ icssg_vtbl_modify(emac, vid, port_mask, untag_mask, false);
+
+ return 0;
+}
+
+/**
+ * emac_xdp_xmit - Implements ndo_xdp_xmit
+ * @dev: netdev
+ * @n: number of frames
+ * @frames: array of XDP buffer pointers
+ * @flags: XDP extra info
+ *
+ * Return: number of frames successfully sent. Failed frames
+ * will be free'ed by XDP core.
+ *
+ * For error cases, a negative errno code is returned and no-frames
+ * are transmitted (caller must handle freeing frames).
+ **/
+static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ struct prueth_emac *emac = netdev_priv(dev);
+ struct net_device *ndev = emac->ndev;
+ struct netdev_queue *netif_txq;
+ int cpu = smp_processor_id();
+ struct xdp_frame *xdpf;
+ unsigned int q_idx;
+ int nxmit = 0;
+ u32 err;
+ int i;
+
+ q_idx = cpu % emac->tx_ch_num;
+ netif_txq = netdev_get_tx_queue(ndev, q_idx);
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ __netif_tx_lock(netif_txq, cpu);
+ for (i = 0; i < n; i++) {
+ xdpf = frames[i];
+ err = emac_xmit_xdp_frame(emac, xdpf, q_idx,
+ PRUETH_TX_BUFF_TYPE_XDP_NDO);
+ if (err != ICSSG_XDP_TX) {
+ ndev->stats.tx_dropped++;
+ break;
+ }
+ nxmit++;
+ }
+ __netif_tx_unlock(netif_txq);
+
+ return nxmit;
+}
+
+/**
+ * emac_xdp_setup - add/remove an XDP program
+ * @emac: emac device
+ * @bpf: XDP program
+ *
+ * Return: Always 0 (Success)
+ **/
+static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf)
+{
+ struct bpf_prog *prog = bpf->prog;
+
+ if (!emac->xdpi.prog && !prog)
+ return 0;
+
+ WRITE_ONCE(emac->xdp_prog, prog);
+
+ xdp_attachment_setup(&emac->xdpi, bpf);
+
+ return 0;
+}
+
+static int prueth_xsk_pool_enable(struct prueth_emac *emac,
+ struct xsk_buff_pool *pool, u16 queue_id)
+{
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+ u32 frame_size;
+ int ret;
+
+ if (queue_id >= PRUETH_MAX_RX_FLOWS ||
+ queue_id >= emac->tx_ch_num) {
+ netdev_err(emac->ndev, "Invalid XSK queue ID %d\n", queue_id);
+ return -EINVAL;
+ }
+
+ frame_size = xsk_pool_get_rx_frame_size(pool);
+ if (frame_size < PRUETH_MAX_PKT_SIZE)
+ return -EOPNOTSUPP;
+
+ ret = xsk_pool_dma_map(pool, rx_chn->dma_dev, PRUETH_RX_DMA_ATTR);
+ if (ret) {
+ netdev_err(emac->ndev, "Failed to map XSK pool: %d\n", ret);
+ return ret;
+ }
+
+ if (netif_running(emac->ndev)) {
+ /* stop packets from wire for graceful teardown */
+ ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
+ if (ret)
+ return ret;
+ prueth_destroy_rxq(emac);
+ }
+
+ emac->xsk_qid = queue_id;
+ prueth_set_xsk_pool(emac, queue_id);
+
+ if (netif_running(emac->ndev)) {
+ ret = prueth_create_rxq(emac);
+ if (ret) {
+ netdev_err(emac->ndev, "Failed to create RX queue: %d\n", ret);
+ return ret;
+ }
+ ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
+ if (ret) {
+ prueth_destroy_rxq(emac);
+ return ret;
+ }
+ ret = prueth_xsk_wakeup(emac->ndev, queue_id, XDP_WAKEUP_RX);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int prueth_xsk_pool_disable(struct prueth_emac *emac, u16 queue_id)
+{
+ struct xsk_buff_pool *pool;
+ int ret;
+
+ if (queue_id >= PRUETH_MAX_RX_FLOWS ||
+ queue_id >= emac->tx_ch_num) {
+ netdev_err(emac->ndev, "Invalid XSK queue ID %d\n", queue_id);
+ return -EINVAL;
+ }
+
+ if (emac->xsk_qid != queue_id) {
+ netdev_err(emac->ndev, "XSK queue ID %d not registered\n", queue_id);
+ return -EINVAL;
+ }
+
+ pool = xsk_get_pool_from_qid(emac->ndev, queue_id);
+ if (!pool) {
+ netdev_err(emac->ndev, "No XSK pool registered for queue %d\n", queue_id);
+ return -EINVAL;
+ }
+
+ if (netif_running(emac->ndev)) {
+ /* stop packets from wire for graceful teardown */
+ ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
+ if (ret)
+ return ret;
+ prueth_destroy_rxq(emac);
+ }
+
+ xsk_pool_dma_unmap(pool, PRUETH_RX_DMA_ATTR);
+ emac->xsk_qid = -EINVAL;
+ prueth_set_xsk_pool(emac, queue_id);
+
+ if (netif_running(emac->ndev)) {
+ ret = prueth_create_rxq(emac);
+ if (ret) {
+ netdev_err(emac->ndev, "Failed to create RX queue: %d\n", ret);
+ return ret;
+ }
+ ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
+ if (ret) {
+ prueth_destroy_rxq(emac);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * emac_ndo_bpf - implements ndo_bpf for icssg_prueth
+ * @ndev: network adapter device
+ * @bpf: XDP program
+ *
+ * Return: 0 on success, error code on failure.
+ **/
+static int emac_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return emac_xdp_setup(emac, bpf);
+ case XDP_SETUP_XSK_POOL:
+ return bpf->xsk.pool ?
+ prueth_xsk_pool_enable(emac, bpf->xsk.pool, bpf->xsk.queue_id) :
+ prueth_xsk_pool_disable(emac, bpf->xsk.queue_id);
+ default:
+ return -EINVAL;
+ }
+}
+
+int prueth_xsk_wakeup(struct net_device *ndev, u32 qid, u32 flags)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[qid];
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+
+ if (emac->xsk_qid != qid) {
+ netdev_err(ndev, "XSK queue %d not registered\n", qid);
+ return -EINVAL;
+ }
+
+ if (qid >= PRUETH_MAX_RX_FLOWS || qid >= emac->tx_ch_num) {
+ netdev_err(ndev, "Invalid XSK queue ID %d\n", qid);
+ return -EINVAL;
+ }
+
+ if (!tx_chn->xsk_pool) {
+ netdev_err(ndev, "XSK pool not registered for queue %d\n", qid);
+ return -EINVAL;
+ }
+
+ if (!rx_chn->xsk_pool) {
+ netdev_err(ndev, "XSK pool not registered for RX queue %d\n", qid);
+ return -EINVAL;
+ }
+
+ if (flags & XDP_WAKEUP_TX) {
+ if (!napi_if_scheduled_mark_missed(&tx_chn->napi_tx)) {
+ if (likely(napi_schedule_prep(&tx_chn->napi_tx)))
+ __napi_schedule(&tx_chn->napi_tx);
+ }
+ }
+
+ if (flags & XDP_WAKEUP_RX) {
+ if (!napi_if_scheduled_mark_missed(&emac->napi_rx)) {
+ if (likely(napi_schedule_prep(&emac->napi_rx)))
+ __napi_schedule(&emac->napi_rx);
+ }
+ }
+
+ return 0;
+}
+
static const struct net_device_ops emac_netdev_ops = {
.ndo_open = emac_ndo_open,
.ndo_stop = emac_ndo_stop,
@@ -816,10 +1410,17 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = icssg_ndo_tx_timeout,
.ndo_set_rx_mode = emac_ndo_set_rx_mode,
- .ndo_eth_ioctl = icssg_ndo_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
.ndo_get_stats64 = icssg_ndo_get_stats64,
.ndo_get_phys_port_name = icssg_ndo_get_phys_port_name,
.ndo_fix_features = emac_ndo_fix_features,
+ .ndo_vlan_rx_add_vid = emac_ndo_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_del_vid,
+ .ndo_bpf = emac_ndo_bpf,
+ .ndo_xdp_xmit = emac_xdp_xmit,
+ .ndo_hwtstamp_get = icssg_ndo_get_ts_config,
+ .ndo_hwtstamp_set = icssg_ndo_set_ts_config,
+ .ndo_xsk_wakeup = prueth_xsk_wakeup,
};
static int prueth_netdev_init(struct prueth *prueth,
@@ -848,6 +1449,8 @@ static int prueth_netdev_init(struct prueth *prueth,
emac->prueth = prueth;
emac->ndev = ndev;
emac->port_id = port;
+ emac->xdp_prog = NULL;
+ emac->ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
if (!emac->cmd_wq) {
ret = -ENOMEM;
@@ -890,8 +1493,7 @@ static int prueth_netdev_init(struct prueth *prueth,
} else if (of_phy_is_fixed_link(eth_node)) {
ret = of_phy_register_fixed_link(eth_node);
if (ret) {
- ret = dev_err_probe(prueth->dev, ret,
- "failed to register fixed-link phy\n");
+ dev_err_probe(prueth->dev, ret, "failed to register fixed-link phy\n");
goto free;
}
@@ -947,13 +1549,17 @@ static int prueth_netdev_init(struct prueth *prueth,
ndev->netdev_ops = &emac_netdev_ops;
ndev->ethtool_ops = &icssg_ethtool_ops;
ndev->hw_features = NETIF_F_SG;
- ndev->features = ndev->hw_features;
+ ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES;
+ xdp_set_features_flag(ndev,
+ NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY);
netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
- hrtimer_init(&emac->rx_hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- emac->rx_hrtimer.function = &emac_rx_timer_callback;
+ hrtimer_setup(&emac->rx_hrtimer, &emac_rx_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
prueth->emac[mac] = emac;
return 0;
@@ -1001,10 +1607,11 @@ static void prueth_offload_fwd_mark_update(struct prueth *prueth)
}
}
-static void prueth_emac_restart(struct prueth *prueth)
+static int prueth_emac_restart(struct prueth *prueth)
{
struct prueth_emac *emac0 = prueth->emac[PRUETH_MAC0];
struct prueth_emac *emac1 = prueth->emac[PRUETH_MAC1];
+ int ret;
/* Detach the net_device for both PRUeth ports*/
if (netif_running(emac0->ndev))
@@ -1013,66 +1620,47 @@ static void prueth_emac_restart(struct prueth *prueth)
netif_device_detach(emac1->ndev);
/* Disable both PRUeth ports */
- icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE);
- icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE);
+ ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE);
+ ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE);
+ if (ret)
+ return ret;
/* Stop both pru cores for both PRUeth ports*/
- prueth_emac_stop(emac0);
- prueth->emacs_initialized--;
- prueth_emac_stop(emac1);
- prueth->emacs_initialized--;
+ ret = prueth_emac_common_stop(prueth);
+ if (ret) {
+ dev_err(prueth->dev, "Failed to stop the firmwares");
+ return ret;
+ }
/* Start both pru cores for both PRUeth ports */
- prueth_emac_start(prueth, emac0);
- prueth->emacs_initialized++;
- prueth_emac_start(prueth, emac1);
- prueth->emacs_initialized++;
+ ret = prueth_emac_common_start(prueth);
+ if (ret) {
+ dev_err(prueth->dev, "Failed to start the firmwares");
+ return ret;
+ }
/* Enable forwarding for both PRUeth ports */
- icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD);
- icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD);
+ ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD);
+ ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD);
/* Attache net_device for both PRUeth ports */
netif_device_attach(emac0->ndev);
netif_device_attach(emac1->ndev);
+
+ return ret;
}
static void icssg_change_mode(struct prueth *prueth)
{
- struct prueth_emac *emac;
- int mac;
-
- prueth_emac_restart(prueth);
-
- for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
- emac = prueth->emac[mac];
- if (prueth->is_hsr_offload_mode) {
- if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM)
- icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE);
- else
- icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE);
- }
+ int ret;
- if (netif_running(emac->ndev)) {
- icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,
- ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
- ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
- ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
- ICSSG_FDB_ENTRY_BLOCK,
- true);
- icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID,
- BIT(emac->port_id) | DEFAULT_PORT_MASK,
- BIT(emac->port_id) | DEFAULT_UNTAG_MASK,
- true);
- if (prueth->is_hsr_offload_mode)
- icssg_vtbl_modify(emac, DEFAULT_VID,
- DEFAULT_PORT_MASK,
- DEFAULT_UNTAG_MASK, true);
- icssg_set_pvid(prueth, emac->port_vlan, emac->port_id);
- if (prueth->is_switch_mode)
- icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
- }
+ ret = prueth_emac_restart(prueth);
+ if (ret) {
+ dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
+ return;
}
+
+ icssg_enable_fw_offload(prueth);
}
static int prueth_netdevice_port_link(struct net_device *ndev,
@@ -1106,7 +1694,7 @@ static int prueth_netdevice_port_link(struct net_device *ndev,
if (prueth->br_members & BIT(PRUETH_PORT_MII0) &&
prueth->br_members & BIT(PRUETH_PORT_MII1)) {
prueth->is_switch_mode = true;
- prueth->default_vlan = 1;
+ prueth->default_vlan = PRUETH_DFLT_VLAN_SW;
emac->port_vlan = prueth->default_vlan;
icssg_change_mode(prueth);
}
@@ -1121,13 +1709,18 @@ static void prueth_netdevice_port_unlink(struct net_device *ndev)
{
struct prueth_emac *emac = netdev_priv(ndev);
struct prueth *prueth = emac->prueth;
+ int ret;
prueth->br_members &= ~BIT(emac->port_id);
if (prueth->is_switch_mode) {
prueth->is_switch_mode = false;
emac->port_vlan = 0;
- prueth_emac_restart(prueth);
+ ret = prueth_emac_restart(prueth);
+ if (ret) {
+ dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
+ return;
+ }
}
prueth_offload_fwd_mark_update(prueth);
@@ -1159,7 +1752,7 @@ static int prueth_hsr_port_link(struct net_device *ndev)
NETIF_PRUETH_HSR_OFFLOAD_FEATURES))
return -EOPNOTSUPP;
prueth->is_hsr_offload_mode = true;
- prueth->default_vlan = 1;
+ prueth->default_vlan = PRUETH_DFLT_VLAN_HSR;
emac0->port_vlan = prueth->default_vlan;
emac1->port_vlan = prueth->default_vlan;
icssg_change_mode(prueth);
@@ -1176,6 +1769,7 @@ static void prueth_hsr_port_unlink(struct net_device *ndev)
struct prueth *prueth = emac->prueth;
struct prueth_emac *emac0;
struct prueth_emac *emac1;
+ int ret;
emac0 = prueth->emac[PRUETH_MAC0];
emac1 = prueth->emac[PRUETH_MAC1];
@@ -1186,7 +1780,11 @@ static void prueth_hsr_port_unlink(struct net_device *ndev)
emac0->port_vlan = 0;
emac1->port_vlan = 0;
prueth->hsr_dev = NULL;
- prueth_emac_restart(prueth);
+ ret = prueth_emac_restart(prueth);
+ if (ret) {
+ dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
+ return;
+ }
netdev_dbg(ndev, "Disabling HSR Offload mode\n");
}
}
@@ -1200,6 +1798,7 @@ static int prueth_netdevice_event(struct notifier_block *unused,
struct netdev_notifier_changeupper_info *info;
struct prueth_emac *emac = netdev_priv(ndev);
struct prueth *prueth = emac->prueth;
+ enum hsr_version hsr_ndev_version;
int ret = NOTIFY_DONE;
if (ndev->netdev_ops != &emac_netdev_ops)
@@ -1211,6 +1810,11 @@ static int prueth_netdevice_event(struct notifier_block *unused,
if ((ndev->features & NETIF_PRUETH_HSR_OFFLOAD_FEATURES) &&
is_hsr_master(info->upper_dev)) {
+ hsr_get_version(info->upper_dev, &hsr_ndev_version);
+ if (hsr_ndev_version != HSR_V1 && hsr_ndev_version != PRP_V1)
+ return -EOPNOTSUPP;
+ prueth->hsr_prp_version = hsr_ndev_version;
+
if (info->linking) {
if (!prueth->hsr_dev) {
prueth->hsr_dev = info->upper_dev;
@@ -1266,6 +1870,87 @@ static void prueth_unregister_notifiers(struct prueth *prueth)
unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
}
+static void icssg_read_firmware_names(struct device_node *np,
+ struct icssg_firmwares *fw)
+{
+ int i;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ of_property_read_string_index(np, "firmware-name", i * 3 + 0,
+ &fw[i].pru);
+ of_property_read_string_index(np, "firmware-name", i * 3 + 1,
+ &fw[i].rtu);
+ of_property_read_string_index(np, "firmware-name", i * 3 + 2,
+ &fw[i].txpru);
+ }
+}
+
+/* icssg_firmware_name_replace - Replace a substring in firmware name
+ * @dev: device pointer for memory allocation
+ * @src: source firmware name string
+ * @from: substring to replace
+ * @to: replacement substring
+ *
+ * Return: a newly allocated string with the replacement, or the original
+ * string if replacement is not possible.
+ */
+static const char *icssg_firmware_name_replace(struct device *dev,
+ const char *src,
+ const char *from,
+ const char *to)
+{
+ size_t prefix, from_len, to_len, total;
+ const char *p = strstr(src, from);
+ char *buf;
+
+ if (!p)
+ return src; /* fallback: no replacement, use original */
+
+ prefix = p - src;
+ from_len = strlen(from);
+ to_len = strlen(to);
+ total = strlen(src) - from_len + to_len + 1;
+
+ buf = devm_kzalloc(dev, total, GFP_KERNEL);
+ if (!buf)
+ return src; /* fallback: allocation failed, use original */
+
+ strscpy(buf, src, prefix + 1);
+ strscpy(buf + prefix, to, to_len + 1);
+ strscpy(buf + prefix + to_len, p + from_len, total - prefix - to_len);
+
+ return buf;
+}
+
+/**
+ * icssg_mode_firmware_names - Generate firmware names for a specific mode
+ * @dev: device pointer for logging and context
+ * @src: source array of firmware name structures
+ * @dst: destination array to store updated firmware name structures
+ * @from: substring in firmware names to be replaced
+ * @to: substring to replace @from in firmware names
+ *
+ * Iterates over all MACs and replaces occurrences of the @from substring
+ * with @to in the firmware names (pru, rtu, txpru) for each MAC. The
+ * updated firmware names are stored in the @dst array.
+ */
+static void icssg_mode_firmware_names(struct device *dev,
+ struct icssg_firmwares *src,
+ struct icssg_firmwares *dst,
+ const char *from, const char *to)
+{
+ int i;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ dst[i].pru = icssg_firmware_name_replace(dev, src[i].pru,
+ from, to);
+ dst[i].rtu = icssg_firmware_name_replace(dev, src[i].rtu,
+ from, to);
+ dst[i].txpru = icssg_firmware_name_replace(dev, src[i].txpru,
+ from, to);
+ }
+}
+
static int prueth_probe(struct platform_device *pdev)
{
struct device_node *eth_node, *eth_ports_node;
@@ -1283,6 +1968,9 @@ static int prueth_probe(struct platform_device *pdev)
np = dev->of_node;
+ BUILD_BUG_ON_MSG((sizeof(struct prueth_swdata) > PRUETH_NAV_SW_DATA_SIZE),
+ "insufficient SW_DATA size");
+
prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
if (!prueth)
return -ENOMEM;
@@ -1361,13 +2049,10 @@ static int prueth_probe(struct platform_device *pdev)
prueth->pa_stats = NULL;
}
- if (eth0_node) {
+ if (eth0_node || eth1_node) {
ret = prueth_get_cores(prueth, ICSS_SLICE0, false);
if (ret)
goto put_cores;
- }
-
- if (eth1_node) {
ret = prueth_get_cores(prueth, ICSS_SLICE1, false);
if (ret)
goto put_cores;
@@ -1398,10 +2083,15 @@ static int prueth_probe(struct platform_device *pdev)
goto put_mem;
}
- msmc_ram_size = MSMC_RAM_SIZE;
prueth->is_switchmode_supported = prueth->pdata.switch_mode;
- if (prueth->is_switchmode_supported)
- msmc_ram_size = MSMC_RAM_SIZE_SWITCH_MODE;
+ if (prueth->pdata.banked_ms_ram) {
+ /* Reserve 2 MSMC RAM banks for buffers to avoid arbitration */
+ msmc_ram_size = (2 * MSMC_RAM_BANK_SIZE);
+ } else {
+ msmc_ram_size = PRUETH_EMAC_TOTAL_BUF_SIZE;
+ if (prueth->is_switchmode_supported)
+ msmc_ram_size = PRUETH_SW_TOTAL_BUF_SIZE;
+ }
/* NOTE: FW bug needs buffer base to be 64KB aligned */
prueth->msmcram.va =
@@ -1442,7 +2132,19 @@ static int prueth_probe(struct platform_device *pdev)
icss_iep_init_fw(prueth->iep1);
}
+ /* Read EMAC firmware names from device tree */
+ icssg_read_firmware_names(np, prueth->icssg_emac_firmwares);
+
+ /* Generate other mode firmware names based on EMAC firmware names */
+ icssg_mode_firmware_names(dev, prueth->icssg_emac_firmwares,
+ prueth->icssg_switch_firmwares, "eth", "sw");
+ icssg_mode_firmware_names(dev, prueth->icssg_emac_firmwares,
+ prueth->icssg_hsr_firmwares, "eth", "hsr");
+ icssg_mode_firmware_names(dev, prueth->icssg_emac_firmwares,
+ prueth->icssg_prp_firmwares, "eth", "prp");
+
spin_lock_init(&prueth->vtbl_lock);
+ spin_lock_init(&prueth->stats_lock);
/* setup netdev interfaces */
if (eth0_node) {
ret = prueth_netdev_init(prueth, eth0_node);
@@ -1557,7 +2259,8 @@ put_iep0:
free_pool:
gen_pool_free(prueth->sram_pool,
- (unsigned long)prueth->msmcram.va, msmc_ram_size);
+ (unsigned long)prueth->msmcram.va,
+ prueth->msmcram.size);
put_mem:
pruss_release_mem_region(prueth->pruss, &prueth->shram);
@@ -1566,14 +2269,12 @@ put_pruss:
pruss_put(prueth->pruss);
put_cores:
- if (eth1_node) {
- prueth_put_cores(prueth, ICSS_SLICE1);
- of_node_put(eth1_node);
- }
-
- if (eth0_node) {
+ if (eth0_node || eth1_node) {
prueth_put_cores(prueth, ICSS_SLICE0);
of_node_put(eth0_node);
+
+ prueth_put_cores(prueth, ICSS_SLICE1);
+ of_node_put(eth1_node);
}
return ret;
@@ -1611,8 +2312,8 @@ static void prueth_remove(struct platform_device *pdev)
icss_iep_put(prueth->iep0);
gen_pool_free(prueth->sram_pool,
- (unsigned long)prueth->msmcram.va,
- MSMC_RAM_SIZE);
+ (unsigned long)prueth->msmcram.va,
+ prueth->msmcram.size);
pruss_release_mem_region(prueth->pruss, &prueth->shram);
@@ -1629,12 +2330,14 @@ static const struct prueth_pdata am654_icssg_pdata = {
.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
.quirk_10m_link_issue = 1,
.switch_mode = 1,
+ .banked_ms_ram = 0,
};
static const struct prueth_pdata am64x_icssg_pdata = {
.fdqring_mode = K3_RINGACC_RING_MODE_RING,
.quirk_10m_link_issue = 1,
.switch_mode = 1,
+ .banked_ms_ram = 1,
};
static const struct of_device_id prueth_dt_match[] = {
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index 8722bb4a268a..10eadd356650 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -8,9 +8,12 @@
#ifndef __NET_TI_ICSSG_PRUETH_H
#define __NET_TI_ICSSG_PRUETH_H
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <linux/etherdevice.h>
#include <linux/genalloc.h>
#include <linux/if_vlan.h>
+#include <linux/if_hsr.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
@@ -33,6 +36,10 @@
#include <linux/dma/k3-udma-glue.h>
#include <net/devlink.h>
+#include <net/xdp.h>
+#include <net/page_pool/helpers.h>
+#include <net/xsk_buff_pool.h>
+#include <net/xdp_sock_drv.h>
#include "icssg_config.h"
#include "icss_iep.h"
@@ -50,7 +57,7 @@
#define ICSSG_MAX_RFLOWS 8 /* per slice */
-#define ICSSG_NUM_PA_STATS 4
+#define ICSSG_NUM_PA_STATS 32
#define ICSSG_NUM_MIIG_STATS 60
/* Number of ICSSG related stats */
#define ICSSG_NUM_STATS (ICSSG_NUM_MIIG_STATS + ICSSG_NUM_PA_STATS)
@@ -83,6 +90,12 @@
#define ICSS_CMD_ADD_FILTER 0x7
#define ICSS_CMD_ADD_MAC 0x8
+/* VLAN Filtering Related MACROs */
+#define PRUETH_DFLT_VLAN_HSR 1
+#define PRUETH_DFLT_VLAN_SW 1
+#define PRUETH_DFLT_VLAN_MAC 0
+#define MAX_VLAN_ID 256
+
/* In switch mode there are 3 real ports i.e. 3 mac addrs.
* however Linux sees only the host side port. The other 2 ports
* are the switch ports.
@@ -115,6 +128,8 @@ struct prueth_tx_chn {
char name[32];
struct hrtimer tx_hrtimer;
unsigned long tx_pace_timeout_ns;
+ struct xsk_buff_pool *xsk_pool;
+ bool irq_disabled;
};
struct prueth_rx_chn {
@@ -125,6 +140,35 @@ struct prueth_rx_chn {
u32 descs_num;
unsigned int irq[ICSSG_MAX_RFLOWS]; /* separate irq per flow */
char name[32];
+ struct page_pool *pg_pool;
+ struct xdp_rxq_info xdp_rxq;
+ struct xsk_buff_pool *xsk_pool;
+ bool irq_disabled;
+};
+
+enum prueth_swdata_type {
+ PRUETH_SWDATA_INVALID = 0,
+ PRUETH_SWDATA_SKB,
+ PRUETH_SWDATA_PAGE,
+ PRUETH_SWDATA_CMD,
+ PRUETH_SWDATA_XDPF,
+ PRUETH_SWDATA_XSK,
+};
+
+enum prueth_tx_buff_type {
+ PRUETH_TX_BUFF_TYPE_XDP_TX,
+ PRUETH_TX_BUFF_TYPE_XDP_NDO,
+};
+
+struct prueth_swdata {
+ enum prueth_swdata_type type;
+ union prueth_data {
+ struct sk_buff *skb;
+ struct page *page;
+ u32 cmd;
+ struct xdp_frame *xdpf;
+ struct xdp_buff *xdp;
+ } data;
};
/* There are 4 Tx DMA channels, but the highest priority is CH3 (thread 3)
@@ -134,13 +178,18 @@ struct prueth_rx_chn {
#define PRUETH_MAX_TX_TS_REQUESTS 50 /* Max simultaneous TX_TS requests */
+/* XDP BPF state */
+#define ICSSG_XDP_PASS 0
+#define ICSSG_XDP_CONSUMED BIT(0)
+#define ICSSG_XDP_TX BIT(1)
+#define ICSSG_XDP_REDIR BIT(2)
+
/* Minimum coalesce time in usecs for both Tx and Rx */
#define ICSSG_MIN_COALESCE_USECS 20
/* data for each emac port */
struct prueth_emac {
bool is_sr1;
- bool fw_running;
struct prueth *prueth;
struct net_device *ndev;
u8 mac_addr[6];
@@ -201,24 +250,35 @@ struct prueth_emac {
/* RX IRQ Coalescing Related */
struct hrtimer rx_hrtimer;
unsigned long rx_pace_timeout_ns;
+
+ struct netdev_hw_addr_list vlan_mcast_list[MAX_VLAN_ID];
+ struct bpf_prog *xdp_prog;
+ struct xdp_attachment_info xdpi;
+ int xsk_qid;
};
+/* The buf includes headroom compatible with both skb and xdpf */
+#define PRUETH_HEADROOM_NA (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + NET_IP_ALIGN)
+#define PRUETH_HEADROOM ALIGN(PRUETH_HEADROOM_NA, sizeof(long))
+
/**
* struct prueth_pdata - PRUeth platform data
* @fdqring_mode: Free desc queue mode
* @quirk_10m_link_issue: 10M link detect errata
* @switch_mode: switch firmware support
+ * @banked_ms_ram: banked memory support
*/
struct prueth_pdata {
enum k3_ring_mode fdqring_mode;
u32 quirk_10m_link_issue:1;
u32 switch_mode:1;
+ u32 banked_ms_ram:1;
};
struct icssg_firmwares {
- char *pru;
- char *rtu;
- char *txpru;
+ const char *pru;
+ const char *rtu;
+ const char *txpru;
};
/**
@@ -247,6 +307,7 @@ struct icssg_firmwares {
* @vlan_tbl: VLAN-FID table pointer
* @hw_bridge_dev: pointer to HW bridge net device
* @hsr_dev: pointer to the HSR net device
+ * @hsr_prp_version: enum to store the protocol version of hsr master
* @br_members: bitmask of bridge member ports
* @hsr_members: bitmask of hsr member ports
* @prueth_netdevice_nb: netdevice notifier block
@@ -257,6 +318,10 @@ struct icssg_firmwares {
* @is_switchmode_supported: indicates platform support for switch mode
* @switch_id: ID for mapping switch ports to bridge
* @default_vlan: Default VLAN for host
+ * @icssg_emac_firmwares: Firmware names for EMAC mode, indexed per MAC
+ * @icssg_switch_firmwares: Firmware names for SWITCH mode, indexed per MAC
+ * @icssg_hsr_firmwares: Firmware names for HSR mode, indexed per MAC
+ * @icssg_prp_firmwares: Firmware names for PRP mode, indexed per MAC
*/
struct prueth {
struct device *dev;
@@ -286,6 +351,7 @@ struct prueth {
struct net_device *hw_bridge_dev;
struct net_device *hsr_dev;
+ enum hsr_version hsr_prp_version;
u8 br_members;
u8 hsr_members;
struct notifier_block prueth_netdevice_nb;
@@ -298,6 +364,12 @@ struct prueth {
int default_vlan;
/** @vtbl_lock: Lock for vtbl in shared memory */
spinlock_t vtbl_lock;
+ /** @stats_lock: Lock for reading icssg stats */
+ spinlock_t stats_lock;
+ struct icssg_firmwares icssg_emac_firmwares[PRUETH_NUM_MACS];
+ struct icssg_firmwares icssg_switch_firmwares[PRUETH_NUM_MACS];
+ struct icssg_firmwares icssg_hsr_firmwares[PRUETH_NUM_MACS];
+ struct icssg_firmwares icssg_prp_firmwares[PRUETH_NUM_MACS];
};
struct emac_tx_ts_response {
@@ -330,6 +402,18 @@ static inline int prueth_emac_slice(struct prueth_emac *emac)
extern const struct ethtool_ops icssg_ethtool_ops;
extern const struct dev_pm_ops prueth_dev_pm_ops;
+static inline u64 icssg_read_time(const void __iomem *addr)
+{
+ u32 low, high;
+
+ do {
+ high = readl(addr + 4);
+ low = readl(addr);
+ } while (high != readl(addr + 4));
+
+ return low + ((u64)high << 32);
+}
+
/* Classifier helpers */
void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac);
void icssg_class_set_host_mac_addr(struct regmap *miig_rt, const u8 *mac);
@@ -349,6 +433,8 @@ int icssg_set_port_state(struct prueth_emac *emac,
enum icssg_port_state_cmd state);
void icssg_config_set_speed(struct prueth_emac *emac);
void icssg_config_half_duplex(struct prueth_emac *emac);
+void icssg_init_emac_mode(struct prueth *prueth);
+void icssg_init_fw_offload_mode(struct prueth *prueth);
/* Buffer queue helpers */
int icssg_queue_pop(struct prueth *prueth, u8 queue);
@@ -365,6 +451,7 @@ void icssg_vtbl_modify(struct prueth_emac *emac, u8 vid, u8 port_mask,
u8 untag_mask, bool add);
u16 icssg_get_pvid(struct prueth_emac *emac);
void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port);
+int emac_fdb_flow_id_updated(struct prueth_emac *emac);
#define prueth_napi_to_tx_chn(pnapi) \
container_of(pnapi, struct prueth_tx_chn, napi_tx)
@@ -388,14 +475,14 @@ int prueth_init_rx_chns(struct prueth_emac *emac,
struct prueth_rx_chn *rx_chn,
char *name, u32 max_rflows,
u32 max_desc_num);
-int prueth_dma_rx_push(struct prueth_emac *emac,
- struct sk_buff *skb,
- struct prueth_rx_chn *rx_chn);
+int prueth_dma_rx_push_mapped(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ struct page *page, u32 buf_len);
+unsigned int prueth_rxbuf_total_len(unsigned int len);
void emac_rx_timestamp(struct prueth_emac *emac,
struct sk_buff *skb, u32 *psdata);
enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev);
irqreturn_t prueth_rx_irq(int irq, void *dev_id);
-void prueth_emac_stop(struct prueth_emac *emac);
void prueth_cleanup_tx_ts(struct prueth_emac *emac);
int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget);
int prueth_prepare_rx_chan(struct prueth_emac *emac,
@@ -406,7 +493,11 @@ void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num,
void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
int num_flows, bool disable);
void icssg_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue);
-int icssg_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd);
+int icssg_ndo_get_ts_config(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config);
+int icssg_ndo_set_ts_config(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void icssg_ndo_get_stats64(struct net_device *ndev,
struct rtnl_link_stats64 *stats);
int icssg_ndo_get_phys_port_name(struct net_device *ndev, char *name,
@@ -420,5 +511,16 @@ void prueth_put_cores(struct prueth *prueth, int slice);
/* Revision specific helper */
u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns);
+u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
+ struct xdp_frame *xdpf,
+ unsigned int q_idx,
+ enum prueth_tx_buff_type buff_type);
+void prueth_rx_cleanup(void *data, dma_addr_t desc_dma);
+void prueth_tx_cleanup(void *data, dma_addr_t desc_dma);
+int prueth_xsk_wakeup(struct net_device *ndev, u32 qid, u32 flags);
+static inline bool prueth_xdp_is_enabled(struct prueth_emac *emac)
+{
+ return !!READ_ONCE(emac->xdp_prog);
+}
#endif /* __NET_TI_ICSSG_PRUETH_H */
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
index 5024f0647a0d..7bb4f0d850cc 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
@@ -84,7 +84,7 @@ static int emac_send_command_sr1(struct prueth_emac *emac, u32 cmd)
__le32 *data = emac->cmd_data;
dma_addr_t desc_dma, buf_dma;
struct prueth_tx_chn *tx_chn;
- void **swdata;
+ struct prueth_swdata *swdata;
int ret = 0;
u32 *epib;
@@ -122,7 +122,8 @@ static int emac_send_command_sr1(struct prueth_emac *emac, u32 cmd)
cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
swdata = cppi5_hdesc_get_swdata(first_desc);
- *swdata = data;
+ swdata->type = PRUETH_SWDATA_CMD;
+ swdata->data.cmd = le32_to_cpu(data[0]);
cppi5_hdesc_set_pktlen(first_desc, pkt_len);
desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
@@ -268,16 +269,16 @@ static int emac_phy_connect(struct prueth_emac *emac)
* Returns skb pointer if packet found else NULL
* Caller must free the returned skb.
*/
-static struct sk_buff *prueth_process_rx_mgm(struct prueth_emac *emac,
- u32 flow_id)
+static struct page *prueth_process_rx_mgm(struct prueth_emac *emac,
+ u32 flow_id)
{
struct prueth_rx_chn *rx_chn = &emac->rx_mgm_chn;
struct net_device *ndev = emac->ndev;
struct cppi5_host_desc_t *desc_rx;
- struct sk_buff *skb, *new_skb;
+ struct page *page, *new_page;
+ struct prueth_swdata *swdata;
dma_addr_t desc_dma, buf_dma;
- u32 buf_dma_len, pkt_len;
- void **swdata;
+ u32 buf_dma_len;
int ret;
ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
@@ -299,34 +300,31 @@ static struct sk_buff *prueth_process_rx_mgm(struct prueth_emac *emac,
}
swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
+ page = swdata->data.page;
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
- pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE);
+ new_page = page_pool_dev_alloc_pages(rx_chn->pg_pool);
/* if allocation fails we drop the packet but push the
* descriptor back to the ring with old skb to prevent a stall
*/
- if (!new_skb) {
+ if (!new_page) {
netdev_err(ndev,
- "skb alloc failed, dropped mgm pkt from flow %d\n",
+ "page alloc failed, dropped mgm pkt from flow %d\n",
flow_id);
- new_skb = skb;
- skb = NULL; /* return NULL */
- } else {
- /* return the filled skb */
- skb_put(skb, pkt_len);
+ new_page = page;
+ page = NULL; /* return NULL */
}
/* queue another DMA */
- ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_mgm_chn);
+ ret = prueth_dma_rx_push_mapped(emac, &emac->rx_chns, new_page,
+ PRUETH_MAX_PKT_SIZE);
if (WARN_ON(ret < 0))
- dev_kfree_skb_any(new_skb);
+ page_pool_recycle_direct(rx_chn->pg_pool, new_page);
- return skb;
+ return page;
}
static void prueth_tx_ts_sr1(struct prueth_emac *emac,
@@ -362,14 +360,14 @@ static void prueth_tx_ts_sr1(struct prueth_emac *emac,
static irqreturn_t prueth_rx_mgm_ts_thread_sr1(int irq, void *dev_id)
{
struct prueth_emac *emac = dev_id;
- struct sk_buff *skb;
+ struct page *page;
- skb = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1);
- if (!skb)
+ page = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1);
+ if (!page)
return IRQ_NONE;
- prueth_tx_ts_sr1(emac, (void *)skb->data);
- dev_kfree_skb_any(skb);
+ prueth_tx_ts_sr1(emac, (void *)page_address(page));
+ page_pool_recycle_direct(pp_page_to_nmdesc(page)->pp, page);
return IRQ_HANDLED;
}
@@ -377,15 +375,15 @@ static irqreturn_t prueth_rx_mgm_ts_thread_sr1(int irq, void *dev_id)
static irqreturn_t prueth_rx_mgm_rsp_thread(int irq, void *dev_id)
{
struct prueth_emac *emac = dev_id;
- struct sk_buff *skb;
+ struct page *page;
u32 rsp;
- skb = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_RESPONSE_SR1);
- if (!skb)
+ page = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_RESPONSE_SR1);
+ if (!page)
return IRQ_NONE;
/* Process command response */
- rsp = le32_to_cpu(*(__le32 *)skb->data) & 0xffff0000;
+ rsp = le32_to_cpu(*(__le32 *)page_address(page)) & 0xffff0000;
if (rsp == ICSSG_SHUTDOWN_CMD_SR1) {
netdev_dbg(emac->ndev, "f/w Shutdown cmd resp %x\n", rsp);
complete(&emac->cmd_complete);
@@ -394,7 +392,7 @@ static irqreturn_t prueth_rx_mgm_rsp_thread(int irq, void *dev_id)
complete(&emac->cmd_complete);
}
- dev_kfree_skb_any(skb);
+ page_pool_recycle_direct(pp_page_to_nmdesc(page)->pp, page);
return IRQ_HANDLED;
}
@@ -440,7 +438,6 @@ static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
goto halt_pru;
}
- emac->fw_running = 1;
return 0;
halt_pru:
@@ -449,6 +446,29 @@ halt_pru:
return ret;
}
+static void prueth_emac_stop(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ int slice;
+
+ switch (emac->port_id) {
+ case PRUETH_PORT_MII0:
+ slice = ICSS_SLICE0;
+ break;
+ case PRUETH_PORT_MII1:
+ slice = ICSS_SLICE1;
+ break;
+ default:
+ netdev_err(emac->ndev, "invalid port\n");
+ return;
+ }
+
+ if (!emac->is_sr1)
+ rproc_shutdown(prueth->txpru[slice]);
+ rproc_shutdown(prueth->rtu[slice]);
+ rproc_shutdown(prueth->pru[slice]);
+}
+
/**
* emac_ndo_open - EMAC device open
* @ndev: network adapter device
@@ -727,9 +747,11 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = icssg_ndo_tx_timeout,
.ndo_set_rx_mode = emac_ndo_set_rx_mode_sr1,
- .ndo_eth_ioctl = icssg_ndo_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
.ndo_get_stats64 = icssg_ndo_get_stats64,
.ndo_get_phys_port_name = icssg_ndo_get_phys_port_name,
+ .ndo_hwtstamp_get = icssg_ndo_get_ts_config,
+ .ndo_hwtstamp_set = icssg_ndo_set_ts_config,
};
static int prueth_netdev_init(struct prueth *prueth,
@@ -796,8 +818,7 @@ static int prueth_netdev_init(struct prueth *prueth,
} else if (of_phy_is_fixed_link(eth_node)) {
ret = of_phy_register_fixed_link(eth_node);
if (ret) {
- ret = dev_err_probe(prueth->dev, ret,
- "failed to register fixed-link phy\n");
+ dev_err_probe(prueth->dev, ret, "failed to register fixed-link phy\n");
goto free;
}
@@ -1009,8 +1030,6 @@ static int prueth_probe(struct platform_device *pdev)
(unsigned long)prueth->msmcram.va);
prueth->msmcram.size = msmc_ram_size;
memset_io(prueth->msmcram.va, 0, msmc_ram_size);
- dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa,
- prueth->msmcram.va, prueth->msmcram.size);
prueth->iep0 = icss_iep_get_idx(np, 0);
if (IS_ERR(prueth->iep0)) {
diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c
index 8800bd3a8d07..7159baa0155c 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_stats.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c
@@ -11,7 +11,6 @@
#define ICSSG_TX_PACKET_OFFSET 0xA0
#define ICSSG_TX_BYTE_OFFSET 0xEC
-#define ICSSG_FW_STATS_BASE 0x0248
static u32 stats_base[] = { 0x54c, /* Slice 0 stats start */
0xb18, /* Slice 1 stats start */
@@ -26,7 +25,17 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
u32 val, reg;
int i;
+ spin_lock(&prueth->stats_lock);
+
for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++) {
+ /* In MII mode TX lines are swapped inside ICSSG, so read Tx stats
+ * from slice1 for port0 and slice0 for port1 to get accurate Tx
+ * stats for a given port
+ */
+ if (emac->phy_if == PHY_INTERFACE_MODE_MII &&
+ icssg_all_miig_stats[i].offset >= ICSSG_TX_PACKET_OFFSET &&
+ icssg_all_miig_stats[i].offset <= ICSSG_TX_BYTE_OFFSET)
+ base = stats_base[slice ^ 1];
regmap_read(prueth->miig_rt,
base + icssg_all_miig_stats[i].offset,
&val);
@@ -44,13 +53,14 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
if (prueth->pa_stats) {
for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) {
- reg = ICSSG_FW_STATS_BASE +
- icssg_all_pa_stats[i].offset *
- PRUETH_NUM_MACS + slice * sizeof(u32);
+ reg = icssg_all_pa_stats[i].offset +
+ slice * sizeof(u32);
regmap_read(prueth->pa_stats, reg, &val);
emac->pa_stats[i] += val;
}
}
+
+ spin_unlock(&prueth->stats_lock);
}
void icssg_stats_work_handler(struct work_struct *work)
@@ -76,7 +86,7 @@ int emac_get_stat_by_name(struct prueth_emac *emac, char *stat_name)
if (emac->prueth->pa_stats) {
for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) {
if (!strcmp(icssg_all_pa_stats[i].name, stat_name))
- return emac->pa_stats[icssg_all_pa_stats[i].offset / sizeof(u32)];
+ return emac->pa_stats[i];
}
}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.h b/drivers/net/ethernet/ti/icssg/icssg_stats.h
index e88b919f532c..5ec0b38e0c67 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_stats.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_stats.h
@@ -155,24 +155,10 @@ static const struct icssg_miig_stats icssg_all_miig_stats[] = {
ICSSG_MIIG_STATS(tx_bytes, true),
};
-/**
- * struct pa_stats_regs - ICSSG Firmware maintained PA Stats register
- * @fw_rx_cnt: Number of valid packets sent by Rx PRU to Host on PSI
- * @fw_tx_cnt: Number of valid packets copied by RTU0 to Tx queues
- * @fw_tx_pre_overflow: Host Egress Q (Pre-emptible) Overflow Counter
- * @fw_tx_exp_overflow: Host Egress Q (Express) Overflow Counter
- */
-struct pa_stats_regs {
- u32 fw_rx_cnt;
- u32 fw_tx_cnt;
- u32 fw_tx_pre_overflow;
- u32 fw_tx_exp_overflow;
-};
-
-#define ICSSG_PA_STATS(field) \
-{ \
- #field, \
- offsetof(struct pa_stats_regs, field), \
+#define ICSSG_PA_STATS(field) \
+{ \
+ #field, \
+ field, \
}
struct icssg_pa_stats {
@@ -181,10 +167,38 @@ struct icssg_pa_stats {
};
static const struct icssg_pa_stats icssg_all_pa_stats[] = {
- ICSSG_PA_STATS(fw_rx_cnt),
- ICSSG_PA_STATS(fw_tx_cnt),
- ICSSG_PA_STATS(fw_tx_pre_overflow),
- ICSSG_PA_STATS(fw_tx_exp_overflow),
+ ICSSG_PA_STATS(FW_RTU_PKT_DROP),
+ ICSSG_PA_STATS(FW_Q0_OVERFLOW),
+ ICSSG_PA_STATS(FW_Q1_OVERFLOW),
+ ICSSG_PA_STATS(FW_Q2_OVERFLOW),
+ ICSSG_PA_STATS(FW_Q3_OVERFLOW),
+ ICSSG_PA_STATS(FW_Q4_OVERFLOW),
+ ICSSG_PA_STATS(FW_Q5_OVERFLOW),
+ ICSSG_PA_STATS(FW_Q6_OVERFLOW),
+ ICSSG_PA_STATS(FW_Q7_OVERFLOW),
+ ICSSG_PA_STATS(FW_DROPPED_PKT),
+ ICSSG_PA_STATS(FW_RX_ERROR),
+ ICSSG_PA_STATS(FW_RX_DS_INVALID),
+ ICSSG_PA_STATS(FW_TX_DROPPED_PACKET),
+ ICSSG_PA_STATS(FW_TX_TS_DROPPED_PACKET),
+ ICSSG_PA_STATS(FW_INF_PORT_DISABLED),
+ ICSSG_PA_STATS(FW_INF_SAV),
+ ICSSG_PA_STATS(FW_INF_SA_DL),
+ ICSSG_PA_STATS(FW_INF_PORT_BLOCKED),
+ ICSSG_PA_STATS(FW_INF_DROP_TAGGED),
+ ICSSG_PA_STATS(FW_INF_DROP_PRIOTAGGED),
+ ICSSG_PA_STATS(FW_INF_DROP_NOTAG),
+ ICSSG_PA_STATS(FW_INF_DROP_NOTMEMBER),
+ ICSSG_PA_STATS(FW_RX_EOF_SHORT_FRMERR),
+ ICSSG_PA_STATS(FW_RX_B0_DROP_EARLY_EOF),
+ ICSSG_PA_STATS(FW_TX_JUMBO_FRM_CUTOFF),
+ ICSSG_PA_STATS(FW_RX_EXP_FRAG_Q_DROP),
+ ICSSG_PA_STATS(FW_RX_FIFO_OVERRUN),
+ ICSSG_PA_STATS(FW_CUT_THR_PKT),
+ ICSSG_PA_STATS(FW_HOST_RX_PKT_CNT),
+ ICSSG_PA_STATS(FW_HOST_TX_PKT_CNT),
+ ICSSG_PA_STATS(FW_HOST_EGRESS_Q_PRE_OVERFLOW),
+ ICSSG_PA_STATS(FW_HOST_EGRESS_Q_EXP_OVERFLOW),
};
#endif /* __NET_TI_ICSSG_STATS_H */
diff --git a/drivers/net/ethernet/ti/icssg/icssg_switch_map.h b/drivers/net/ethernet/ti/icssg/icssg_switch_map.h
index 424a7e945ea8..7e053b8af3ec 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_switch_map.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_switch_map.h
@@ -180,6 +180,9 @@
/* Used to notify the FW of the current link speed */
#define PORT_LINK_SPEED_OFFSET 0x00A8
+/* 2k memory pointer reserved for default writes by PRU0*/
+#define DEFAULT_MSMC_Q_OFFSET 0x00AC
+
/* TAS gate mask for windows list0 */
#define TAS_GATE_MASK_LIST0 0x0100
@@ -231,4 +234,37 @@
/* Start of 32 bits PA_STAT counters */
#define PA_STAT_32b_START_OFFSET 0x0080
+#define FW_RTU_PKT_DROP 0x0088
+#define FW_Q0_OVERFLOW 0x0090
+#define FW_Q1_OVERFLOW 0x0098
+#define FW_Q2_OVERFLOW 0x00A0
+#define FW_Q3_OVERFLOW 0x00A8
+#define FW_Q4_OVERFLOW 0x00B0
+#define FW_Q5_OVERFLOW 0x00B8
+#define FW_Q6_OVERFLOW 0x00C0
+#define FW_Q7_OVERFLOW 0x00C8
+#define FW_DROPPED_PKT 0x00F8
+#define FW_RX_ERROR 0x0100
+#define FW_RX_DS_INVALID 0x0108
+#define FW_TX_DROPPED_PACKET 0x0110
+#define FW_TX_TS_DROPPED_PACKET 0x0118
+#define FW_INF_PORT_DISABLED 0x0120
+#define FW_INF_SAV 0x0128
+#define FW_INF_SA_DL 0x0130
+#define FW_INF_PORT_BLOCKED 0x0138
+#define FW_INF_DROP_TAGGED 0x0140
+#define FW_INF_DROP_PRIOTAGGED 0x0148
+#define FW_INF_DROP_NOTAG 0x0150
+#define FW_INF_DROP_NOTMEMBER 0x0158
+#define FW_RX_EOF_SHORT_FRMERR 0x0188
+#define FW_RX_B0_DROP_EARLY_EOF 0x0190
+#define FW_TX_JUMBO_FRM_CUTOFF 0x0198
+#define FW_RX_EXP_FRAG_Q_DROP 0x01A0
+#define FW_RX_FIFO_OVERRUN 0x01A8
+#define FW_CUT_THR_PKT 0x01B0
+#define FW_HOST_RX_PKT_CNT 0x0248
+#define FW_HOST_TX_PKT_CNT 0x0250
+#define FW_HOST_EGRESS_Q_PRE_OVERFLOW 0x0258
+#define FW_HOST_EGRESS_Q_EXP_OVERFLOW 0x0260
+
#endif /* __NET_TI_ICSSG_SWITCH_MAP_H */
diff --git a/drivers/net/ethernet/ti/icssm/icssm_prueth.c b/drivers/net/ethernet/ti/icssm/icssm_prueth.c
new file mode 100644
index 000000000000..293b7af04263
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssm/icssm_prueth.c
@@ -0,0 +1,1746 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Texas Instruments ICSSM Ethernet Driver
+ *
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/genalloc.h>
+#include <linux/if_bridge.h>
+#include <linux/if_hsr.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/remoteproc/pruss.h>
+#include <linux/ptp_classify.h>
+#include <linux/regmap.h>
+#include <linux/remoteproc.h>
+#include <net/pkt_cls.h>
+
+#include "icssm_prueth.h"
+#include "../icssg/icssg_mii_rt.h"
+#include "../icssg/icss_iep.h"
+
+#define OCMC_RAM_SIZE (SZ_64K)
+
+#define TX_START_DELAY 0x40
+#define TX_CLK_DELAY_100M 0x6
+#define HR_TIMER_TX_DELAY_US 100
+
+static void icssm_prueth_write_reg(struct prueth *prueth,
+ enum prueth_mem region,
+ unsigned int reg, u32 val)
+{
+ writel_relaxed(val, prueth->mem[region].va + reg);
+}
+
+/* Below macro is for 1528 Byte Frame support, to Allow even with
+ * Redundancy tag
+ */
+#define PRUSS_MII_RT_RX_FRMS_MAX_SUPPORT_EMAC (VLAN_ETH_FRAME_LEN + \
+ ETH_FCS_LEN + \
+ ICSSM_LRE_TAG_SIZE)
+
+/* ensure that order of PRUSS mem regions is same as enum prueth_mem */
+static enum pruss_mem pruss_mem_ids[] = { PRUSS_MEM_DRAM0, PRUSS_MEM_DRAM1,
+ PRUSS_MEM_SHRD_RAM2 };
+
+static const struct prueth_queue_info queue_infos[][NUM_QUEUES] = {
+ [PRUETH_PORT_QUEUE_HOST] = {
+ [PRUETH_QUEUE1] = {
+ P0_Q1_BUFFER_OFFSET,
+ HOST_QUEUE_DESC_OFFSET,
+ P0_Q1_BD_OFFSET,
+ P0_Q1_BD_OFFSET + ((HOST_QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P0_Q2_BUFFER_OFFSET,
+ HOST_QUEUE_DESC_OFFSET + 8,
+ P0_Q2_BD_OFFSET,
+ P0_Q2_BD_OFFSET + ((HOST_QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P0_Q3_BUFFER_OFFSET,
+ HOST_QUEUE_DESC_OFFSET + 16,
+ P0_Q3_BD_OFFSET,
+ P0_Q3_BD_OFFSET + ((HOST_QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P0_Q4_BUFFER_OFFSET,
+ HOST_QUEUE_DESC_OFFSET + 24,
+ P0_Q4_BD_OFFSET,
+ P0_Q4_BD_OFFSET + ((HOST_QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+ [PRUETH_PORT_QUEUE_MII0] = {
+ [PRUETH_QUEUE1] = {
+ P1_Q1_BUFFER_OFFSET,
+ P1_Q1_BUFFER_OFFSET + ((QUEUE_1_SIZE - 1) *
+ ICSS_BLOCK_SIZE),
+ P1_Q1_BD_OFFSET,
+ P1_Q1_BD_OFFSET + ((QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P1_Q2_BUFFER_OFFSET,
+ P1_Q2_BUFFER_OFFSET + ((QUEUE_2_SIZE - 1) *
+ ICSS_BLOCK_SIZE),
+ P1_Q2_BD_OFFSET,
+ P1_Q2_BD_OFFSET + ((QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P1_Q3_BUFFER_OFFSET,
+ P1_Q3_BUFFER_OFFSET + ((QUEUE_3_SIZE - 1) *
+ ICSS_BLOCK_SIZE),
+ P1_Q3_BD_OFFSET,
+ P1_Q3_BD_OFFSET + ((QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P1_Q4_BUFFER_OFFSET,
+ P1_Q4_BUFFER_OFFSET + ((QUEUE_4_SIZE - 1) *
+ ICSS_BLOCK_SIZE),
+ P1_Q4_BD_OFFSET,
+ P1_Q4_BD_OFFSET + ((QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+ [PRUETH_PORT_QUEUE_MII1] = {
+ [PRUETH_QUEUE1] = {
+ P2_Q1_BUFFER_OFFSET,
+ P2_Q1_BUFFER_OFFSET + ((QUEUE_1_SIZE - 1) *
+ ICSS_BLOCK_SIZE),
+ P2_Q1_BD_OFFSET,
+ P2_Q1_BD_OFFSET + ((QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P2_Q2_BUFFER_OFFSET,
+ P2_Q2_BUFFER_OFFSET + ((QUEUE_2_SIZE - 1) *
+ ICSS_BLOCK_SIZE),
+ P2_Q2_BD_OFFSET,
+ P2_Q2_BD_OFFSET + ((QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P2_Q3_BUFFER_OFFSET,
+ P2_Q3_BUFFER_OFFSET + ((QUEUE_3_SIZE - 1) *
+ ICSS_BLOCK_SIZE),
+ P2_Q3_BD_OFFSET,
+ P2_Q3_BD_OFFSET + ((QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P2_Q4_BUFFER_OFFSET,
+ P2_Q4_BUFFER_OFFSET + ((QUEUE_4_SIZE - 1) *
+ ICSS_BLOCK_SIZE),
+ P2_Q4_BD_OFFSET,
+ P2_Q4_BD_OFFSET + ((QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+};
+
+static const struct prueth_queue_desc queue_descs[][NUM_QUEUES] = {
+ [PRUETH_PORT_QUEUE_HOST] = {
+ { .rd_ptr = P0_Q1_BD_OFFSET, .wr_ptr = P0_Q1_BD_OFFSET, },
+ { .rd_ptr = P0_Q2_BD_OFFSET, .wr_ptr = P0_Q2_BD_OFFSET, },
+ { .rd_ptr = P0_Q3_BD_OFFSET, .wr_ptr = P0_Q3_BD_OFFSET, },
+ { .rd_ptr = P0_Q4_BD_OFFSET, .wr_ptr = P0_Q4_BD_OFFSET, },
+ },
+ [PRUETH_PORT_QUEUE_MII0] = {
+ { .rd_ptr = P1_Q1_BD_OFFSET, .wr_ptr = P1_Q1_BD_OFFSET, },
+ { .rd_ptr = P1_Q2_BD_OFFSET, .wr_ptr = P1_Q2_BD_OFFSET, },
+ { .rd_ptr = P1_Q3_BD_OFFSET, .wr_ptr = P1_Q3_BD_OFFSET, },
+ { .rd_ptr = P1_Q4_BD_OFFSET, .wr_ptr = P1_Q4_BD_OFFSET, },
+ },
+ [PRUETH_PORT_QUEUE_MII1] = {
+ { .rd_ptr = P2_Q1_BD_OFFSET, .wr_ptr = P2_Q1_BD_OFFSET, },
+ { .rd_ptr = P2_Q2_BD_OFFSET, .wr_ptr = P2_Q2_BD_OFFSET, },
+ { .rd_ptr = P2_Q3_BD_OFFSET, .wr_ptr = P2_Q3_BD_OFFSET, },
+ { .rd_ptr = P2_Q4_BD_OFFSET, .wr_ptr = P2_Q4_BD_OFFSET, },
+ }
+};
+
+static void icssm_prueth_hostconfig(struct prueth *prueth)
+{
+ void __iomem *sram_base = prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+ void __iomem *sram;
+
+ /* queue size lookup table */
+ sram = sram_base + HOST_QUEUE_SIZE_ADDR;
+ writew(HOST_QUEUE_1_SIZE, sram);
+ writew(HOST_QUEUE_2_SIZE, sram + 2);
+ writew(HOST_QUEUE_3_SIZE, sram + 4);
+ writew(HOST_QUEUE_4_SIZE, sram + 6);
+
+ /* queue information table */
+ sram = sram_base + HOST_Q1_RX_CONTEXT_OFFSET;
+ memcpy_toio(sram, queue_infos[PRUETH_PORT_QUEUE_HOST],
+ sizeof(queue_infos[PRUETH_PORT_QUEUE_HOST]));
+
+ /* buffer offset table */
+ sram = sram_base + HOST_QUEUE_OFFSET_ADDR;
+ writew(P0_Q1_BUFFER_OFFSET, sram);
+ writew(P0_Q2_BUFFER_OFFSET, sram + 2);
+ writew(P0_Q3_BUFFER_OFFSET, sram + 4);
+ writew(P0_Q4_BUFFER_OFFSET, sram + 6);
+
+ /* buffer descriptor offset table*/
+ sram = sram_base + HOST_QUEUE_DESCRIPTOR_OFFSET_ADDR;
+ writew(P0_Q1_BD_OFFSET, sram);
+ writew(P0_Q2_BD_OFFSET, sram + 2);
+ writew(P0_Q3_BD_OFFSET, sram + 4);
+ writew(P0_Q4_BD_OFFSET, sram + 6);
+
+ /* queue table */
+ sram = sram_base + HOST_QUEUE_DESC_OFFSET;
+ memcpy_toio(sram, queue_descs[PRUETH_PORT_QUEUE_HOST],
+ sizeof(queue_descs[PRUETH_PORT_QUEUE_HOST]));
+}
+
+static void icssm_prueth_mii_init(struct prueth *prueth)
+{
+ struct regmap *mii_rt;
+ u32 rxcfg_reg, rxcfg;
+ u32 txcfg_reg, txcfg;
+
+ mii_rt = prueth->mii_rt;
+
+ rxcfg = PRUSS_MII_RT_RXCFG_RX_ENABLE |
+ PRUSS_MII_RT_RXCFG_RX_DATA_RDY_MODE_DIS |
+ PRUSS_MII_RT_RXCFG_RX_L2_EN |
+ PRUSS_MII_RT_RXCFG_RX_CUT_PREAMBLE |
+ PRUSS_MII_RT_RXCFG_RX_L2_EOF_SCLR_DIS;
+
+ /* Configuration of Port 0 Rx */
+ rxcfg_reg = PRUSS_MII_RT_RXCFG0;
+
+ regmap_write(mii_rt, rxcfg_reg, rxcfg);
+
+ /* Configuration of Port 1 Rx */
+ rxcfg_reg = PRUSS_MII_RT_RXCFG1;
+
+ rxcfg |= PRUSS_MII_RT_RXCFG_RX_MUX_SEL;
+
+ regmap_write(mii_rt, rxcfg_reg, rxcfg);
+
+ txcfg = PRUSS_MII_RT_TXCFG_TX_ENABLE |
+ PRUSS_MII_RT_TXCFG_TX_AUTO_PREAMBLE |
+ PRUSS_MII_RT_TXCFG_TX_32_MODE_EN |
+ (TX_START_DELAY << PRUSS_MII_RT_TXCFG_TX_START_DELAY_SHIFT) |
+ (TX_CLK_DELAY_100M << PRUSS_MII_RT_TXCFG_TX_CLK_DELAY_SHIFT);
+
+ /* Configuration of Port 0 Tx */
+ txcfg_reg = PRUSS_MII_RT_TXCFG0;
+
+ regmap_write(mii_rt, txcfg_reg, txcfg);
+
+ txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
+
+ /* Configuration of Port 1 Tx */
+ txcfg_reg = PRUSS_MII_RT_TXCFG1;
+
+ regmap_write(mii_rt, txcfg_reg, txcfg);
+
+ txcfg_reg = PRUSS_MII_RT_RX_FRMS0;
+
+ /* Min frame length should be set to 64 to allow receive of standard
+ * Ethernet frames such as PTP, LLDP that will not have the tag/rct.
+ * Actual size written to register is size - 1 per TRM. This also
+ * includes CRC/FCS.
+ */
+ txcfg = FIELD_PREP(PRUSS_MII_RT_RX_FRMS_MIN_FRM_MASK,
+ (PRUSS_MII_RT_RX_FRMS_MIN_FRM - 1));
+
+ /* For EMAC, set Max frame size to 1528 i.e size with VLAN.
+ * Actual size written to register is size - 1 as per TRM.
+ * Since driver support run time change of protocol, driver
+ * must overwrite the values based on Ethernet type.
+ */
+ txcfg |= FIELD_PREP(PRUSS_MII_RT_RX_FRMS_MAX_FRM_MASK,
+ (PRUSS_MII_RT_RX_FRMS_MAX_SUPPORT_EMAC - 1));
+
+ regmap_write(mii_rt, txcfg_reg, txcfg);
+
+ txcfg_reg = PRUSS_MII_RT_RX_FRMS1;
+
+ regmap_write(mii_rt, txcfg_reg, txcfg);
+}
+
+static void icssm_prueth_clearmem(struct prueth *prueth, enum prueth_mem region)
+{
+ memset_io(prueth->mem[region].va, 0, prueth->mem[region].size);
+}
+
+static void icssm_prueth_hostinit(struct prueth *prueth)
+{
+ /* Clear shared RAM */
+ icssm_prueth_clearmem(prueth, PRUETH_MEM_SHARED_RAM);
+
+ /* Clear OCMC RAM */
+ icssm_prueth_clearmem(prueth, PRUETH_MEM_OCMC);
+
+ /* Clear data RAMs */
+ if (prueth->eth_node[PRUETH_MAC0])
+ icssm_prueth_clearmem(prueth, PRUETH_MEM_DRAM0);
+ if (prueth->eth_node[PRUETH_MAC1])
+ icssm_prueth_clearmem(prueth, PRUETH_MEM_DRAM1);
+
+ /* Initialize host queues in shared RAM */
+ icssm_prueth_hostconfig(prueth);
+
+ /* Configure MII_RT */
+ icssm_prueth_mii_init(prueth);
+}
+
+/* This function initialize the driver in EMAC mode
+ * based on eth_type
+ */
+static void icssm_prueth_init_ethernet_mode(struct prueth *prueth)
+{
+ icssm_prueth_hostinit(prueth);
+}
+
+static void icssm_prueth_port_enable(struct prueth_emac *emac, bool enable)
+{
+ struct prueth *prueth = emac->prueth;
+ void __iomem *port_ctrl;
+ void __iomem *ram;
+
+ ram = prueth->mem[emac->dram].va;
+ port_ctrl = ram + PORT_CONTROL_ADDR;
+ writeb(!!enable, port_ctrl);
+}
+
+static int icssm_prueth_emac_config(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ u32 sharedramaddr, ocmcaddr;
+ void __iomem *dram_base;
+ void __iomem *mac_addr;
+ void __iomem *dram;
+ void __iomem *sram;
+
+ /* PRU needs local shared RAM address for C28 */
+ sharedramaddr = ICSS_LOCAL_SHARED_RAM;
+ /* PRU needs real global OCMC address for C30*/
+ ocmcaddr = (u32)prueth->mem[PRUETH_MEM_OCMC].pa;
+ sram = prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+
+ /* Clear data RAM */
+ icssm_prueth_clearmem(prueth, emac->dram);
+
+ dram_base = prueth->mem[emac->dram].va;
+
+ /* setup mac address */
+ mac_addr = dram_base + PORT_MAC_ADDR;
+ memcpy_toio(mac_addr, emac->mac_addr, 6);
+
+ /* queue information table */
+ dram = dram_base + TX_CONTEXT_Q1_OFFSET_ADDR;
+ memcpy_toio(dram, queue_infos[emac->port_id],
+ sizeof(queue_infos[emac->port_id]));
+
+ /* queue table */
+ dram = dram_base + PORT_QUEUE_DESC_OFFSET;
+ memcpy_toio(dram, queue_descs[emac->port_id],
+ sizeof(queue_descs[emac->port_id]));
+
+ emac->rx_queue_descs = sram + HOST_QUEUE_DESC_OFFSET;
+ emac->tx_queue_descs = dram;
+
+ /* Set in constant table C28 of PRU0 to ICSS Shared memory */
+ pru_rproc_set_ctable(emac->pru, PRU_C28, sharedramaddr);
+
+ /* Set in constant table C30 of PRU0 to OCMC memory */
+ pru_rproc_set_ctable(emac->pru, PRU_C30, ocmcaddr);
+
+ return 0;
+}
+
+/* called back by PHY layer if there is change in link state of hw port*/
+static void icssm_emac_adjust_link(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct phy_device *phydev = emac->phydev;
+ struct prueth *prueth = emac->prueth;
+ bool new_state = false;
+ enum prueth_mem region;
+ unsigned long flags;
+ u32 port_status = 0;
+ u32 txcfg, mask;
+ u32 delay;
+
+ spin_lock_irqsave(&emac->lock, flags);
+
+ if (phydev->link) {
+ /* check the mode of operation */
+ if (phydev->duplex != emac->duplex) {
+ new_state = true;
+ emac->duplex = phydev->duplex;
+ }
+ if (phydev->speed != emac->speed) {
+ new_state = true;
+ emac->speed = phydev->speed;
+ }
+ if (!emac->link) {
+ new_state = true;
+ emac->link = 1;
+ }
+ } else if (emac->link) {
+ new_state = true;
+ emac->link = 0;
+ }
+
+ if (new_state) {
+ phy_print_status(phydev);
+ region = emac->dram;
+
+ /* update phy/port status information based on PHY values*/
+ if (emac->link) {
+ port_status |= PORT_LINK_MASK;
+
+ icssm_prueth_write_reg(prueth, region, PHY_SPEED_OFFSET,
+ emac->speed);
+
+ delay = TX_CLK_DELAY_100M;
+ delay = delay << PRUSS_MII_RT_TXCFG_TX_CLK_DELAY_SHIFT;
+ mask = PRUSS_MII_RT_TXCFG_TX_CLK_DELAY_MASK;
+
+ if (emac->port_id)
+ txcfg = PRUSS_MII_RT_TXCFG1;
+ else
+ txcfg = PRUSS_MII_RT_TXCFG0;
+
+ regmap_update_bits(prueth->mii_rt, txcfg, mask, delay);
+ }
+
+ writeb(port_status, prueth->mem[region].va +
+ PORT_STATUS_OFFSET);
+ }
+
+ if (emac->link) {
+ /* reactivate the transmit queue if it is stopped */
+ if (netif_running(ndev) && netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+ } else {
+ if (!netif_queue_stopped(ndev))
+ netif_stop_queue(ndev);
+ }
+
+ spin_unlock_irqrestore(&emac->lock, flags);
+}
+
+static unsigned int
+icssm_get_buff_desc_count(const struct prueth_queue_info *queue)
+{
+ unsigned int buffer_desc_count;
+
+ buffer_desc_count = queue->buffer_desc_end -
+ queue->buffer_desc_offset;
+ buffer_desc_count /= BD_SIZE;
+ buffer_desc_count++;
+
+ return buffer_desc_count;
+}
+
+static void icssm_get_block(struct prueth_queue_desc __iomem *queue_desc,
+ const struct prueth_queue_info *queue,
+ int *write_block, int *read_block)
+{
+ *write_block = (readw(&queue_desc->wr_ptr) -
+ queue->buffer_desc_offset) / BD_SIZE;
+ *read_block = (readw(&queue_desc->rd_ptr) -
+ queue->buffer_desc_offset) / BD_SIZE;
+}
+
+/**
+ * icssm_emac_rx_irq - EMAC Rx interrupt handler
+ * @irq: interrupt number
+ * @dev_id: pointer to net_device
+ *
+ * EMAC Interrupt handler - we only schedule NAPI and not process any packets
+ * here.
+ *
+ * Return: IRQ_HANDLED if the interrupt handled
+ */
+static irqreturn_t icssm_emac_rx_irq(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ if (likely(netif_running(ndev))) {
+ /* disable Rx system event */
+ disable_irq_nosync(emac->rx_irq);
+ napi_schedule(&emac->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * icssm_prueth_tx_enqueue - queue a packet to firmware for transmission
+ *
+ * @emac: EMAC data structure
+ * @skb: packet data buffer
+ * @queue_id: priority queue id
+ *
+ * Return: 0 (Success)
+ */
+static int icssm_prueth_tx_enqueue(struct prueth_emac *emac,
+ struct sk_buff *skb,
+ enum prueth_queue_id queue_id)
+{
+ struct prueth_queue_desc __iomem *queue_desc;
+ const struct prueth_queue_info *txqueue;
+ struct net_device *ndev = emac->ndev;
+ unsigned int buffer_desc_count;
+ int free_blocks, update_block;
+ bool buffer_wrapped = false;
+ int write_block, read_block;
+ void *src_addr, *dst_addr;
+ int pkt_block_size;
+ void __iomem *dram;
+ int txport, pktlen;
+ u16 update_wr_ptr;
+ u32 wr_buf_desc;
+ void *ocmc_ram;
+
+ dram = emac->prueth->mem[emac->dram].va;
+ if (eth_skb_pad(skb)) {
+ if (netif_msg_tx_err(emac) && net_ratelimit())
+ netdev_err(ndev, "packet pad failed\n");
+ return -ENOMEM;
+ }
+
+ /* which port to tx: MII0 or MII1 */
+ txport = emac->tx_port_queue;
+ src_addr = skb->data;
+ pktlen = skb->len;
+ /* Get the tx queue */
+ queue_desc = emac->tx_queue_descs + queue_id;
+ txqueue = &queue_infos[txport][queue_id];
+
+ buffer_desc_count = icssm_get_buff_desc_count(txqueue);
+
+ /* the PRU firmware deals mostly in pointers already
+ * offset into ram, we would like to deal in indexes
+ * within the queue we are working with for code
+ * simplicity, calculate this here
+ */
+ icssm_get_block(queue_desc, txqueue, &write_block, &read_block);
+
+ if (write_block > read_block) {
+ free_blocks = buffer_desc_count - write_block;
+ free_blocks += read_block;
+ } else if (write_block < read_block) {
+ free_blocks = read_block - write_block;
+ } else { /* they are all free */
+ free_blocks = buffer_desc_count;
+ }
+
+ pkt_block_size = DIV_ROUND_UP(pktlen, ICSS_BLOCK_SIZE);
+ if (pkt_block_size > free_blocks) /* out of queue space */
+ return -ENOBUFS;
+
+ /* calculate end BD address post write */
+ update_block = write_block + pkt_block_size;
+
+ /* Check for wrap around */
+ if (update_block >= buffer_desc_count) {
+ update_block %= buffer_desc_count;
+ buffer_wrapped = true;
+ }
+
+ /* OCMC RAM is not cached and write order is not important */
+ ocmc_ram = (__force void *)emac->prueth->mem[PRUETH_MEM_OCMC].va;
+ dst_addr = ocmc_ram + txqueue->buffer_offset +
+ (write_block * ICSS_BLOCK_SIZE);
+
+ /* Copy the data from socket buffer(DRAM) to PRU buffers(OCMC) */
+ if (buffer_wrapped) { /* wrapped around buffer */
+ int bytes = (buffer_desc_count - write_block) * ICSS_BLOCK_SIZE;
+ int remaining;
+
+ /* bytes is integral multiple of ICSS_BLOCK_SIZE but
+ * entire packet may have fit within the last BD
+ * if pkt_info.length is not integral multiple of
+ * ICSS_BLOCK_SIZE
+ */
+ if (pktlen < bytes)
+ bytes = pktlen;
+
+ /* copy non-wrapped part */
+ memcpy(dst_addr, src_addr, bytes);
+
+ /* copy wrapped part */
+ src_addr += bytes;
+ remaining = pktlen - bytes;
+ dst_addr = ocmc_ram + txqueue->buffer_offset;
+ memcpy(dst_addr, src_addr, remaining);
+ } else {
+ memcpy(dst_addr, src_addr, pktlen);
+ }
+
+ /* update first buffer descriptor */
+ wr_buf_desc = (pktlen << PRUETH_BD_LENGTH_SHIFT) &
+ PRUETH_BD_LENGTH_MASK;
+ writel(wr_buf_desc, dram + readw(&queue_desc->wr_ptr));
+
+ /* update the write pointer in this queue descriptor, the firmware
+ * polls for this change so this will signal the start of transmission
+ */
+ update_wr_ptr = txqueue->buffer_desc_offset + (update_block * BD_SIZE);
+ writew(update_wr_ptr, &queue_desc->wr_ptr);
+
+ return 0;
+}
+
+void icssm_parse_packet_info(struct prueth *prueth, u32 buffer_descriptor,
+ struct prueth_packet_info *pkt_info)
+{
+ pkt_info->shadow = !!(buffer_descriptor & PRUETH_BD_SHADOW_MASK);
+ pkt_info->port = (buffer_descriptor & PRUETH_BD_PORT_MASK) >>
+ PRUETH_BD_PORT_SHIFT;
+ pkt_info->length = (buffer_descriptor & PRUETH_BD_LENGTH_MASK) >>
+ PRUETH_BD_LENGTH_SHIFT;
+ pkt_info->broadcast = !!(buffer_descriptor & PRUETH_BD_BROADCAST_MASK);
+ pkt_info->error = !!(buffer_descriptor & PRUETH_BD_ERROR_MASK);
+ pkt_info->lookup_success = !!(buffer_descriptor &
+ PRUETH_BD_LOOKUP_SUCCESS_MASK);
+ pkt_info->flood = !!(buffer_descriptor & PRUETH_BD_SW_FLOOD_MASK);
+ pkt_info->timestamp = !!(buffer_descriptor & PRUETH_BD_TIMESTAMP_MASK);
+}
+
+/**
+ * icssm_emac_rx_packet - EMAC Receive function
+ *
+ * @emac: EMAC data structure
+ * @bd_rd_ptr: Buffer descriptor read pointer
+ * @pkt_info: packet information structure
+ * @rxqueue: Receive queue information structure
+ *
+ * Get a packet from receive queue
+ *
+ * Return: 0 (Success)
+ */
+int icssm_emac_rx_packet(struct prueth_emac *emac, u16 *bd_rd_ptr,
+ struct prueth_packet_info *pkt_info,
+ const struct prueth_queue_info *rxqueue)
+{
+ struct net_device *ndev = emac->ndev;
+ unsigned int buffer_desc_count;
+ int read_block, update_block;
+ unsigned int actual_pkt_len;
+ bool buffer_wrapped = false;
+ void *src_addr, *dst_addr;
+ struct sk_buff *skb;
+ int pkt_block_size;
+ void *ocmc_ram;
+
+ /* the PRU firmware deals mostly in pointers already
+ * offset into ram, we would like to deal in indexes
+ * within the queue we are working with for code
+ * simplicity, calculate this here
+ */
+ buffer_desc_count = icssm_get_buff_desc_count(rxqueue);
+ read_block = (*bd_rd_ptr - rxqueue->buffer_desc_offset) / BD_SIZE;
+ pkt_block_size = DIV_ROUND_UP(pkt_info->length, ICSS_BLOCK_SIZE);
+
+ /* calculate end BD address post read */
+ update_block = read_block + pkt_block_size;
+
+ /* Check for wrap around */
+ if (update_block >= buffer_desc_count) {
+ update_block %= buffer_desc_count;
+ if (update_block)
+ buffer_wrapped = true;
+ }
+
+ /* calculate new pointer in ram */
+ *bd_rd_ptr = rxqueue->buffer_desc_offset + (update_block * BD_SIZE);
+
+ actual_pkt_len = pkt_info->length;
+
+ /* Allocate a socket buffer for this packet */
+ skb = netdev_alloc_skb_ip_align(ndev, actual_pkt_len);
+ if (!skb) {
+ if (netif_msg_rx_err(emac) && net_ratelimit())
+ netdev_err(ndev, "failed rx buffer alloc\n");
+ return -ENOMEM;
+ }
+
+ dst_addr = skb->data;
+
+ /* OCMC RAM is not cached and read order is not important */
+ ocmc_ram = (__force void *)emac->prueth->mem[PRUETH_MEM_OCMC].va;
+
+ /* Get the start address of the first buffer from
+ * the read buffer description
+ */
+ src_addr = ocmc_ram + rxqueue->buffer_offset +
+ (read_block * ICSS_BLOCK_SIZE);
+
+ /* Copy the data from PRU buffers(OCMC) to socket buffer(DRAM) */
+ if (buffer_wrapped) { /* wrapped around buffer */
+ int bytes = (buffer_desc_count - read_block) * ICSS_BLOCK_SIZE;
+ int remaining;
+ /* bytes is integral multiple of ICSS_BLOCK_SIZE but
+ * entire packet may have fit within the last BD
+ * if pkt_info.length is not integral multiple of
+ * ICSS_BLOCK_SIZE
+ */
+ if (pkt_info->length < bytes)
+ bytes = pkt_info->length;
+
+ /* copy non-wrapped part */
+ memcpy(dst_addr, src_addr, bytes);
+
+ /* copy wrapped part */
+ dst_addr += bytes;
+ remaining = actual_pkt_len - bytes;
+
+ src_addr = ocmc_ram + rxqueue->buffer_offset;
+ memcpy(dst_addr, src_addr, remaining);
+ src_addr += remaining;
+ } else {
+ memcpy(dst_addr, src_addr, actual_pkt_len);
+ src_addr += actual_pkt_len;
+ }
+
+ skb_put(skb, actual_pkt_len);
+
+ /* send packet up the stack */
+ skb->protocol = eth_type_trans(skb, ndev);
+ netif_receive_skb(skb);
+
+ /* update stats */
+ emac->stats.rx_bytes += actual_pkt_len;
+ emac->stats.rx_packets++;
+
+ return 0;
+}
+
+static int icssm_emac_rx_packets(struct prueth_emac *emac, int budget)
+{
+ struct prueth_queue_desc __iomem *queue_desc;
+ const struct prueth_queue_info *rxqueue;
+ struct prueth *prueth = emac->prueth;
+ struct prueth_packet_info pkt_info;
+ int start_queue, end_queue;
+ void __iomem *shared_ram;
+ u16 bd_rd_ptr, bd_wr_ptr;
+ u16 update_rd_ptr;
+ u8 overflow_cnt;
+ u32 rd_buf_desc;
+ int used = 0;
+ int i, ret;
+
+ shared_ram = emac->prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+
+ start_queue = emac->rx_queue_start;
+ end_queue = emac->rx_queue_end;
+
+ /* skip Rx if budget is 0 */
+ if (!budget)
+ return 0;
+
+ /* search host queues for packets */
+ for (i = start_queue; i <= end_queue; i++) {
+ queue_desc = emac->rx_queue_descs + i;
+ rxqueue = &queue_infos[PRUETH_PORT_HOST][i];
+
+ overflow_cnt = readb(&queue_desc->overflow_cnt);
+ if (overflow_cnt > 0) {
+ emac->stats.rx_over_errors += overflow_cnt;
+ /* reset to zero */
+ writeb(0, &queue_desc->overflow_cnt);
+ }
+
+ bd_rd_ptr = readw(&queue_desc->rd_ptr);
+ bd_wr_ptr = readw(&queue_desc->wr_ptr);
+
+ /* while packets are available in this queue */
+ while (bd_rd_ptr != bd_wr_ptr) {
+ /* get packet info from the read buffer descriptor */
+ rd_buf_desc = readl(shared_ram + bd_rd_ptr);
+ icssm_parse_packet_info(prueth, rd_buf_desc, &pkt_info);
+
+ if (pkt_info.length <= 0) {
+ /* a packet length of zero will cause us to
+ * never move the read pointer ahead, locking
+ * the driver, so we manually have to move it
+ * to the write pointer, discarding all
+ * remaining packets in this queue. This should
+ * never happen.
+ */
+ update_rd_ptr = bd_wr_ptr;
+ emac->stats.rx_length_errors++;
+ } else if (pkt_info.length > EMAC_MAX_FRM_SUPPORT) {
+ /* if the packet is too large we skip it but we
+ * still need to move the read pointer ahead
+ * and assume something is wrong with the read
+ * pointer as the firmware should be filtering
+ * these packets
+ */
+ update_rd_ptr = bd_wr_ptr;
+ emac->stats.rx_length_errors++;
+ } else {
+ update_rd_ptr = bd_rd_ptr;
+ ret = icssm_emac_rx_packet(emac, &update_rd_ptr,
+ &pkt_info, rxqueue);
+ if (ret)
+ return used;
+ used++;
+ }
+
+ /* after reading the buffer descriptor we clear it
+ * to prevent improperly moved read pointer errors
+ * from simply looking like old packets.
+ */
+ writel(0, shared_ram + bd_rd_ptr);
+
+ /* update read pointer in queue descriptor */
+ writew(update_rd_ptr, &queue_desc->rd_ptr);
+ bd_rd_ptr = update_rd_ptr;
+
+ /* all we have room for? */
+ if (used >= budget)
+ return used;
+ }
+ }
+
+ return used;
+}
+
+static int icssm_emac_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct prueth_emac *emac = container_of(napi, struct prueth_emac, napi);
+ int num_rx;
+
+ num_rx = icssm_emac_rx_packets(emac, budget);
+
+ if (num_rx < budget && napi_complete_done(napi, num_rx))
+ enable_irq(emac->rx_irq);
+
+ return num_rx;
+}
+
+static int icssm_emac_set_boot_pru(struct prueth_emac *emac,
+ struct net_device *ndev)
+{
+ const struct prueth_firmware *pru_firmwares;
+ struct prueth *prueth = emac->prueth;
+ const char *fw_name;
+ int ret;
+
+ pru_firmwares = &prueth->fw_data->fw_pru[emac->port_id - 1];
+ fw_name = pru_firmwares->fw_name[prueth->eth_type];
+ if (!fw_name) {
+ netdev_err(ndev, "eth_type %d not supported\n",
+ prueth->eth_type);
+ return -ENODEV;
+ }
+
+ ret = rproc_set_firmware(emac->pru, fw_name);
+ if (ret) {
+ netdev_err(ndev, "failed to set %s firmware: %d\n",
+ fw_name, ret);
+ return ret;
+ }
+
+ ret = rproc_boot(emac->pru);
+ if (ret) {
+ netdev_err(ndev, "failed to boot %s firmware: %d\n",
+ fw_name, ret);
+ return ret;
+ }
+ return ret;
+}
+
+static int icssm_emac_request_irqs(struct prueth_emac *emac)
+{
+ struct net_device *ndev = emac->ndev;
+ int ret;
+
+ ret = request_irq(emac->rx_irq, icssm_emac_rx_irq,
+ IRQF_TRIGGER_HIGH,
+ ndev->name, ndev);
+ if (ret) {
+ netdev_err(ndev, "unable to request RX IRQ\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static void icssm_ptp_dram_init(struct prueth_emac *emac)
+{
+ void __iomem *sram = emac->prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+ u64 temp64;
+
+ writew(0, sram + MII_RX_CORRECTION_OFFSET);
+ writew(0, sram + MII_TX_CORRECTION_OFFSET);
+
+ /* Initialize RCF to 1 (Linux N/A) */
+ writel(1 * 1024, sram + TIMESYNC_TC_RCF_OFFSET);
+
+ /* This flag will be set and cleared by firmware */
+ /* Write Sync0 period for sync signal generation in PTP
+ * memory in shared RAM
+ */
+ writel(200000000 / 50, sram + TIMESYNC_SYNC0_WIDTH_OFFSET);
+
+ /* Write CMP1 period for sync signal generation in PTP
+ * memory in shared RAM
+ */
+ temp64 = 1000000;
+ memcpy_toio(sram + TIMESYNC_CMP1_CMP_OFFSET, &temp64, sizeof(temp64));
+
+ /* Write Sync0 period for sync signal generation in PTP
+ * memory in shared RAM
+ */
+ writel(1000000, sram + TIMESYNC_CMP1_PERIOD_OFFSET);
+
+ /* Configures domainNumber list. Firmware supports 2 domains */
+ writeb(0, sram + TIMESYNC_DOMAIN_NUMBER_LIST);
+ writeb(0, sram + TIMESYNC_DOMAIN_NUMBER_LIST + 1);
+
+ /* Configure 1-step/2-step */
+ writeb(1, sram + DISABLE_SWITCH_SYNC_RELAY_OFFSET);
+
+ /* Configures the setting to Link local frame without HSR tag */
+ writeb(0, sram + LINK_LOCAL_FRAME_HAS_HSR_TAG);
+
+ /* Enable E2E/UDP PTP message timestamping */
+ writeb(1, sram + PTP_IPV4_UDP_E2E_ENABLE);
+}
+
+/**
+ * icssm_emac_ndo_open - EMAC device open
+ * @ndev: network adapter device
+ *
+ * Called when system wants to start the interface.
+ *
+ * Return: 0 for a successful open, or appropriate error code
+ */
+static int icssm_emac_ndo_open(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ int ret;
+
+ /* set h/w MAC as user might have re-configured */
+ ether_addr_copy(emac->mac_addr, ndev->dev_addr);
+
+ if (!prueth->emac_configured)
+ icssm_prueth_init_ethernet_mode(prueth);
+
+ icssm_prueth_emac_config(emac);
+
+ if (!prueth->emac_configured) {
+ icssm_ptp_dram_init(emac);
+ ret = icss_iep_init(prueth->iep, NULL, NULL, 0);
+ if (ret) {
+ netdev_err(ndev, "Failed to initialize iep: %d\n", ret);
+ goto iep_exit;
+ }
+ }
+
+ ret = icssm_emac_set_boot_pru(emac, ndev);
+ if (ret)
+ goto iep_exit;
+
+ ret = icssm_emac_request_irqs(emac);
+ if (ret)
+ goto rproc_shutdown;
+
+ napi_enable(&emac->napi);
+
+ /* start PHY */
+ phy_start(emac->phydev);
+
+ /* enable the port and vlan */
+ icssm_prueth_port_enable(emac, true);
+
+ prueth->emac_configured |= BIT(emac->port_id);
+
+ if (netif_msg_drv(emac))
+ dev_notice(&ndev->dev, "started\n");
+
+ return 0;
+
+rproc_shutdown:
+ rproc_shutdown(emac->pru);
+
+iep_exit:
+ if (!prueth->emac_configured)
+ icss_iep_exit(prueth->iep);
+
+ return ret;
+}
+
+/**
+ * icssm_emac_ndo_stop - EMAC device stop
+ * @ndev: network adapter device
+ *
+ * Called when system wants to stop or down the interface.
+ *
+ * Return: Always 0 (Success)
+ */
+static int icssm_emac_ndo_stop(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+
+ prueth->emac_configured &= ~BIT(emac->port_id);
+
+ /* disable the mac port */
+ icssm_prueth_port_enable(emac, false);
+
+ /* stop PHY */
+ phy_stop(emac->phydev);
+
+ napi_disable(&emac->napi);
+ hrtimer_cancel(&emac->tx_hrtimer);
+
+ /* stop the PRU */
+ rproc_shutdown(emac->pru);
+
+ /* free rx interrupts */
+ free_irq(emac->rx_irq, ndev);
+
+ if (netif_msg_drv(emac))
+ dev_notice(&ndev->dev, "stopped\n");
+
+ return 0;
+}
+
+/* VLAN-tag PCP to priority queue map for EMAC/Switch/HSR/PRP used by driver
+ * Index is PCP val / 2.
+ * low - pcp 0..3 maps to Q4 for Host
+ * high - pcp 4..7 maps to Q3 for Host
+ * low - pcp 0..3 maps to Q2 (FWD Queue) for PRU-x
+ * where x = 1 for PRUETH_PORT_MII0
+ * 0 for PRUETH_PORT_MII1
+ * high - pcp 4..7 maps to Q1 (FWD Queue) for PRU-x
+ */
+static const unsigned short emac_pcp_tx_priority_queue_map[] = {
+ PRUETH_QUEUE4, PRUETH_QUEUE4,
+ PRUETH_QUEUE3, PRUETH_QUEUE3,
+ PRUETH_QUEUE2, PRUETH_QUEUE2,
+ PRUETH_QUEUE1, PRUETH_QUEUE1,
+};
+
+static u16 icssm_prueth_get_tx_queue_id(struct prueth *prueth,
+ struct sk_buff *skb)
+{
+ u16 vlan_tci, pcp;
+ int err;
+
+ err = vlan_get_tag(skb, &vlan_tci);
+ if (likely(err))
+ pcp = 0;
+ else
+ pcp = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+
+ /* Below code (pcp >>= 1) is made common for all
+ * protocols (i.e., EMAC, RSTP, HSR and PRP)*
+ * pcp value 0,1 will be updated to 0 mapped to QUEUE4
+ * pcp value 2,3 will be updated to 1 mapped to QUEUE4
+ * pcp value 4,5 will be updated to 2 mapped to QUEUE3
+ * pcp value 6,7 will be updated to 3 mapped to QUEUE3
+ */
+ pcp >>= 1;
+
+ return emac_pcp_tx_priority_queue_map[pcp];
+}
+
+/**
+ * icssm_emac_ndo_start_xmit - EMAC Transmit function
+ * @skb: SKB pointer
+ * @ndev: EMAC network adapter
+ *
+ * Called by the system to transmit a packet - we queue the packet in
+ * EMAC hardware transmit queue
+ *
+ * Return: enum netdev_tx
+ */
+static enum netdev_tx icssm_emac_ndo_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int ret;
+ u16 qid;
+
+ qid = icssm_prueth_get_tx_queue_id(emac->prueth, skb);
+ ret = icssm_prueth_tx_enqueue(emac, skb, qid);
+ if (ret) {
+ if (ret != -ENOBUFS && netif_msg_tx_err(emac) &&
+ net_ratelimit())
+ netdev_err(ndev, "packet queue failed: %d\n", ret);
+ goto fail_tx;
+ }
+
+ emac->stats.tx_packets++;
+ emac->stats.tx_bytes += skb->len;
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+
+fail_tx:
+ if (ret == -ENOBUFS) {
+ netif_stop_queue(ndev);
+ hrtimer_start(&emac->tx_hrtimer,
+ us_to_ktime(HR_TIMER_TX_DELAY_US),
+ HRTIMER_MODE_REL_PINNED);
+ ret = NETDEV_TX_BUSY;
+ } else {
+ /* error */
+ emac->stats.tx_dropped++;
+ ret = NET_XMIT_DROP;
+ }
+
+ return ret;
+}
+
+/**
+ * icssm_emac_ndo_get_stats64 - EMAC get statistics function
+ * @ndev: The EMAC network adapter
+ * @stats: rtnl_link_stats structure
+ *
+ * Called when system wants to get statistics from the device.
+ *
+ */
+static void icssm_emac_ndo_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ stats->rx_packets = emac->stats.rx_packets;
+ stats->rx_bytes = emac->stats.rx_bytes;
+ stats->tx_packets = emac->stats.tx_packets;
+ stats->tx_bytes = emac->stats.tx_bytes;
+ stats->tx_dropped = emac->stats.tx_dropped;
+ stats->rx_over_errors = emac->stats.rx_over_errors;
+ stats->rx_length_errors = emac->stats.rx_length_errors;
+}
+
+static const struct net_device_ops emac_netdev_ops = {
+ .ndo_open = icssm_emac_ndo_open,
+ .ndo_stop = icssm_emac_ndo_stop,
+ .ndo_start_xmit = icssm_emac_ndo_start_xmit,
+ .ndo_get_stats64 = icssm_emac_ndo_get_stats64,
+};
+
+/* get emac_port corresponding to eth_node name */
+static int icssm_prueth_node_port(struct device_node *eth_node)
+{
+ u32 port_id;
+ int ret;
+
+ ret = of_property_read_u32(eth_node, "reg", &port_id);
+ if (ret)
+ return ret;
+
+ if (port_id == 0)
+ return PRUETH_PORT_MII0;
+ else if (port_id == 1)
+ return PRUETH_PORT_MII1;
+ else
+ return PRUETH_PORT_INVALID;
+}
+
+/* get MAC instance corresponding to eth_node name */
+static int icssm_prueth_node_mac(struct device_node *eth_node)
+{
+ u32 port_id;
+ int ret;
+
+ ret = of_property_read_u32(eth_node, "reg", &port_id);
+ if (ret)
+ return ret;
+
+ if (port_id == 0)
+ return PRUETH_MAC0;
+ else if (port_id == 1)
+ return PRUETH_MAC1;
+ else
+ return PRUETH_MAC_INVALID;
+}
+
+static enum hrtimer_restart icssm_emac_tx_timer_callback(struct hrtimer *timer)
+{
+ struct prueth_emac *emac =
+ container_of(timer, struct prueth_emac, tx_hrtimer);
+
+ if (netif_queue_stopped(emac->ndev))
+ netif_wake_queue(emac->ndev);
+
+ return HRTIMER_NORESTART;
+}
+
+static int icssm_prueth_netdev_init(struct prueth *prueth,
+ struct device_node *eth_node)
+{
+ struct prueth_emac *emac;
+ struct net_device *ndev;
+ enum prueth_port port;
+ enum prueth_mac mac;
+ int ret;
+
+ port = icssm_prueth_node_port(eth_node);
+ if (port == PRUETH_PORT_INVALID)
+ return -EINVAL;
+
+ mac = icssm_prueth_node_mac(eth_node);
+ if (mac == PRUETH_MAC_INVALID)
+ return -EINVAL;
+
+ ndev = devm_alloc_etherdev(prueth->dev, sizeof(*emac));
+ if (!ndev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(ndev, prueth->dev);
+ emac = netdev_priv(ndev);
+ prueth->emac[mac] = emac;
+ emac->prueth = prueth;
+ emac->ndev = ndev;
+ emac->port_id = port;
+
+ /* by default eth_type is EMAC */
+ switch (port) {
+ case PRUETH_PORT_MII0:
+ emac->tx_port_queue = PRUETH_PORT_QUEUE_MII0;
+
+ /* packets from MII0 are on queues 1 through 2 */
+ emac->rx_queue_start = PRUETH_QUEUE1;
+ emac->rx_queue_end = PRUETH_QUEUE2;
+
+ emac->dram = PRUETH_MEM_DRAM0;
+ emac->pru = prueth->pru0;
+ break;
+ case PRUETH_PORT_MII1:
+ emac->tx_port_queue = PRUETH_PORT_QUEUE_MII1;
+
+ /* packets from MII1 are on queues 3 through 4 */
+ emac->rx_queue_start = PRUETH_QUEUE3;
+ emac->rx_queue_end = PRUETH_QUEUE4;
+
+ emac->dram = PRUETH_MEM_DRAM1;
+ emac->pru = prueth->pru1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ emac->rx_irq = of_irq_get_byname(eth_node, "rx");
+ if (emac->rx_irq < 0) {
+ ret = emac->rx_irq;
+ if (ret != -EPROBE_DEFER)
+ dev_err(prueth->dev, "could not get rx irq\n");
+ goto free;
+ }
+
+ /* get mac address from DT and set private and netdev addr */
+ ret = of_get_ethdev_address(eth_node, ndev);
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ eth_hw_addr_random(ndev);
+ dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n",
+ port, ndev->dev_addr);
+ }
+ ether_addr_copy(emac->mac_addr, ndev->dev_addr);
+
+ /* connect PHY */
+ emac->phydev = of_phy_get_and_connect(ndev, eth_node,
+ icssm_emac_adjust_link);
+ if (!emac->phydev) {
+ dev_dbg(prueth->dev, "PHY connection failed\n");
+ ret = -ENODEV;
+ goto free;
+ }
+
+ /* remove unsupported modes */
+ phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+
+ phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+
+ phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
+ phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
+
+ ndev->dev.of_node = eth_node;
+ ndev->netdev_ops = &emac_netdev_ops;
+
+ netif_napi_add(ndev, &emac->napi, icssm_emac_napi_poll);
+
+ hrtimer_setup(&emac->tx_hrtimer, &icssm_emac_tx_timer_callback,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
+
+ return 0;
+free:
+ emac->ndev = NULL;
+ prueth->emac[mac] = NULL;
+
+ return ret;
+}
+
+static void icssm_prueth_netdev_exit(struct prueth *prueth,
+ struct device_node *eth_node)
+{
+ struct prueth_emac *emac;
+ enum prueth_mac mac;
+
+ mac = icssm_prueth_node_mac(eth_node);
+ if (mac == PRUETH_MAC_INVALID)
+ return;
+
+ emac = prueth->emac[mac];
+ if (!emac)
+ return;
+
+ phy_disconnect(emac->phydev);
+
+ netif_napi_del(&emac->napi);
+ prueth->emac[mac] = NULL;
+}
+
+static int icssm_prueth_probe(struct platform_device *pdev)
+{
+ struct device_node *eth0_node = NULL, *eth1_node = NULL;
+ struct device_node *eth_node, *eth_ports_node;
+ enum pruss_pru_id pruss_id0, pruss_id1;
+ struct device *dev = &pdev->dev;
+ struct device_node *np;
+ struct prueth *prueth;
+ struct pruss *pruss;
+ int i, ret;
+
+ np = dev->of_node;
+ if (!np)
+ return -ENODEV; /* we don't support non DT */
+
+ prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
+ if (!prueth)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, prueth);
+ prueth->dev = dev;
+ prueth->fw_data = device_get_match_data(dev);
+
+ eth_ports_node = of_get_child_by_name(np, "ethernet-ports");
+ if (!eth_ports_node)
+ return -ENOENT;
+
+ for_each_child_of_node(eth_ports_node, eth_node) {
+ u32 reg;
+
+ if (strcmp(eth_node->name, "ethernet-port"))
+ continue;
+ ret = of_property_read_u32(eth_node, "reg", &reg);
+ if (ret < 0) {
+ dev_err(dev, "%pOF error reading port_id %d\n",
+ eth_node, ret);
+ of_node_put(eth_node);
+ return ret;
+ }
+
+ of_node_get(eth_node);
+
+ if (reg == 0 && !eth0_node) {
+ eth0_node = eth_node;
+ if (!of_device_is_available(eth0_node)) {
+ of_node_put(eth0_node);
+ eth0_node = NULL;
+ }
+ } else if (reg == 1 && !eth1_node) {
+ eth1_node = eth_node;
+ if (!of_device_is_available(eth1_node)) {
+ of_node_put(eth1_node);
+ eth1_node = NULL;
+ }
+ } else {
+ if (reg == 0 || reg == 1)
+ dev_err(dev, "duplicate port reg value: %d\n",
+ reg);
+ else
+ dev_err(dev, "invalid port reg value: %d\n",
+ reg);
+
+ of_node_put(eth_node);
+ }
+ }
+
+ of_node_put(eth_ports_node);
+
+ /* At least one node must be present and available else we fail */
+ if (!eth0_node && !eth1_node) {
+ dev_err(dev, "neither port0 nor port1 node available\n");
+ return -ENODEV;
+ }
+
+ prueth->eth_node[PRUETH_MAC0] = eth0_node;
+ prueth->eth_node[PRUETH_MAC1] = eth1_node;
+
+ prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt");
+ if (IS_ERR(prueth->mii_rt)) {
+ dev_err(dev, "couldn't get mii-rt syscon regmap\n");
+ ret = PTR_ERR(prueth->mii_rt);
+ goto put_eth;
+ }
+
+ if (eth0_node) {
+ prueth->pru0 = pru_rproc_get(np, 0, &pruss_id0);
+ if (IS_ERR(prueth->pru0)) {
+ ret = PTR_ERR(prueth->pru0);
+ dev_err_probe(dev, ret, "unable to get PRU0");
+ goto put_eth;
+ }
+ }
+
+ if (eth1_node) {
+ prueth->pru1 = pru_rproc_get(np, 1, &pruss_id1);
+ if (IS_ERR(prueth->pru1)) {
+ ret = PTR_ERR(prueth->pru1);
+ dev_err_probe(dev, ret, "unable to get PRU1");
+ goto put_pru0;
+ }
+ }
+
+ pruss = pruss_get(prueth->pru0 ? prueth->pru0 : prueth->pru1);
+ if (IS_ERR(pruss)) {
+ ret = PTR_ERR(pruss);
+ dev_err(dev, "unable to get pruss handle\n");
+ goto put_pru1;
+ }
+ prueth->pruss = pruss;
+
+ /* Configure PRUSS */
+ if (eth0_node)
+ pruss_cfg_gpimode(pruss, pruss_id0, PRUSS_GPI_MODE_MII);
+ if (eth1_node)
+ pruss_cfg_gpimode(pruss, pruss_id1, PRUSS_GPI_MODE_MII);
+ pruss_cfg_miirt_enable(pruss, true);
+ pruss_cfg_xfr_enable(pruss, PRU_TYPE_PRU, true);
+
+ /* Get PRUSS mem resources */
+ /* OCMC is system resource which we get separately */
+ for (i = 0; i < ARRAY_SIZE(pruss_mem_ids); i++) {
+ /* skip appropriate DRAM if not required */
+ if (!eth0_node && i == PRUETH_MEM_DRAM0)
+ continue;
+
+ if (!eth1_node && i == PRUETH_MEM_DRAM1)
+ continue;
+
+ ret = pruss_request_mem_region(pruss, pruss_mem_ids[i],
+ &prueth->mem[i]);
+ if (ret) {
+ dev_err(dev, "unable to get PRUSS resource %d: %d\n",
+ i, ret);
+ goto put_mem;
+ }
+ }
+
+ prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
+ if (!prueth->sram_pool) {
+ dev_err(dev, "unable to get SRAM pool\n");
+ ret = -ENODEV;
+ goto put_mem;
+ }
+
+ prueth->ocmc_ram_size = OCMC_RAM_SIZE;
+ /* Decreased by 8KB to address the reserved region for AM33x */
+ if (prueth->fw_data->driver_data == PRUSS_AM33XX)
+ prueth->ocmc_ram_size = (SZ_64K - SZ_8K);
+
+ prueth->mem[PRUETH_MEM_OCMC].va =
+ (void __iomem *)gen_pool_alloc(prueth->sram_pool,
+ prueth->ocmc_ram_size);
+ if (!prueth->mem[PRUETH_MEM_OCMC].va) {
+ dev_err(dev, "unable to allocate OCMC resource\n");
+ ret = -ENOMEM;
+ goto put_mem;
+ }
+ prueth->mem[PRUETH_MEM_OCMC].pa = gen_pool_virt_to_phys
+ (prueth->sram_pool, (unsigned long)
+ prueth->mem[PRUETH_MEM_OCMC].va);
+ prueth->mem[PRUETH_MEM_OCMC].size = prueth->ocmc_ram_size;
+ dev_dbg(dev, "ocmc: pa %pa va %p size %#zx\n",
+ &prueth->mem[PRUETH_MEM_OCMC].pa,
+ prueth->mem[PRUETH_MEM_OCMC].va,
+ prueth->mem[PRUETH_MEM_OCMC].size);
+
+ /* setup netdev interfaces */
+ if (eth0_node) {
+ ret = icssm_prueth_netdev_init(prueth, eth0_node);
+ if (ret) {
+ if (ret != -EPROBE_DEFER) {
+ dev_err(dev, "netdev init %s failed: %d\n",
+ eth0_node->name, ret);
+ }
+ goto free_pool;
+ }
+ }
+
+ if (eth1_node) {
+ ret = icssm_prueth_netdev_init(prueth, eth1_node);
+ if (ret) {
+ if (ret != -EPROBE_DEFER) {
+ dev_err(dev, "netdev init %s failed: %d\n",
+ eth1_node->name, ret);
+ }
+ goto netdev_exit;
+ }
+ }
+
+ prueth->iep = icss_iep_get(np);
+ if (IS_ERR(prueth->iep)) {
+ ret = PTR_ERR(prueth->iep);
+ dev_err(dev, "unable to get IEP\n");
+ goto netdev_exit;
+ }
+
+ /* register the network devices */
+ if (eth0_node) {
+ ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev);
+ if (ret) {
+ dev_err(dev, "can't register netdev for port MII0");
+ goto iep_put;
+ }
+
+ prueth->registered_netdevs[PRUETH_MAC0] =
+ prueth->emac[PRUETH_MAC0]->ndev;
+ }
+
+ if (eth1_node) {
+ ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev);
+ if (ret) {
+ dev_err(dev, "can't register netdev for port MII1");
+ goto netdev_unregister;
+ }
+
+ prueth->registered_netdevs[PRUETH_MAC1] =
+ prueth->emac[PRUETH_MAC1]->ndev;
+ }
+
+ dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n",
+ (!eth0_node || !eth1_node) ? "single" : "dual");
+
+ if (eth1_node)
+ of_node_put(eth1_node);
+ if (eth0_node)
+ of_node_put(eth0_node);
+ return 0;
+
+netdev_unregister:
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ if (!prueth->registered_netdevs[i])
+ continue;
+ unregister_netdev(prueth->registered_netdevs[i]);
+ }
+
+iep_put:
+ icss_iep_put(prueth->iep);
+ prueth->iep = NULL;
+
+netdev_exit:
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ eth_node = prueth->eth_node[i];
+ if (!eth_node)
+ continue;
+
+ icssm_prueth_netdev_exit(prueth, eth_node);
+ }
+
+free_pool:
+ gen_pool_free(prueth->sram_pool,
+ (unsigned long)prueth->mem[PRUETH_MEM_OCMC].va,
+ prueth->ocmc_ram_size);
+
+put_mem:
+ for (i = PRUETH_MEM_DRAM0; i < PRUETH_MEM_OCMC; i++) {
+ if (prueth->mem[i].va)
+ pruss_release_mem_region(pruss, &prueth->mem[i]);
+ }
+ pruss_put(prueth->pruss);
+
+put_pru1:
+ if (eth1_node)
+ pru_rproc_put(prueth->pru1);
+put_pru0:
+ if (eth0_node)
+ pru_rproc_put(prueth->pru0);
+put_eth:
+ of_node_put(eth1_node);
+ of_node_put(eth0_node);
+
+ return ret;
+}
+
+static void icssm_prueth_remove(struct platform_device *pdev)
+{
+ struct prueth *prueth = platform_get_drvdata(pdev);
+ struct device_node *eth_node;
+ int i;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ if (!prueth->registered_netdevs[i])
+ continue;
+ unregister_netdev(prueth->registered_netdevs[i]);
+ }
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ eth_node = prueth->eth_node[i];
+ if (!eth_node)
+ continue;
+
+ icssm_prueth_netdev_exit(prueth, eth_node);
+ of_node_put(eth_node);
+ }
+
+ gen_pool_free(prueth->sram_pool,
+ (unsigned long)prueth->mem[PRUETH_MEM_OCMC].va,
+ prueth->ocmc_ram_size);
+
+ for (i = PRUETH_MEM_DRAM0; i < PRUETH_MEM_OCMC; i++) {
+ if (prueth->mem[i].va)
+ pruss_release_mem_region(prueth->pruss,
+ &prueth->mem[i]);
+ }
+
+ icss_iep_put(prueth->iep);
+ prueth->iep = NULL;
+
+ pruss_put(prueth->pruss);
+
+ if (prueth->eth_node[PRUETH_MAC0])
+ pru_rproc_put(prueth->pru0);
+ if (prueth->eth_node[PRUETH_MAC1])
+ pru_rproc_put(prueth->pru1);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int icssm_prueth_suspend(struct device *dev)
+{
+ struct prueth *prueth = dev_get_drvdata(dev);
+ struct net_device *ndev;
+ int i, ret;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ ndev = prueth->registered_netdevs[i];
+
+ if (!ndev)
+ continue;
+
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ ret = icssm_emac_ndo_stop(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to stop: %d", ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int icssm_prueth_resume(struct device *dev)
+{
+ struct prueth *prueth = dev_get_drvdata(dev);
+ struct net_device *ndev;
+ int i, ret;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ ndev = prueth->registered_netdevs[i];
+
+ if (!ndev)
+ continue;
+
+ if (netif_running(ndev)) {
+ ret = icssm_emac_ndo_open(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to start: %d", ret);
+ return ret;
+ }
+ netif_device_attach(ndev);
+ }
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops prueth_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(icssm_prueth_suspend, icssm_prueth_resume)
+};
+
+/* AM335x SoC-specific firmware data */
+static struct prueth_private_data am335x_prueth_pdata = {
+ .driver_data = PRUSS_AM33XX,
+ .fw_pru[PRUSS_PRU0] = {
+ .fw_name[PRUSS_ETHTYPE_EMAC] =
+ "ti-pruss/am335x-pru0-prueth-fw.elf",
+ },
+ .fw_pru[PRUSS_PRU1] = {
+ .fw_name[PRUSS_ETHTYPE_EMAC] =
+ "ti-pruss/am335x-pru1-prueth-fw.elf",
+ },
+};
+
+/* AM437x SoC-specific firmware data */
+static struct prueth_private_data am437x_prueth_pdata = {
+ .driver_data = PRUSS_AM43XX,
+ .fw_pru[PRUSS_PRU0] = {
+ .fw_name[PRUSS_ETHTYPE_EMAC] =
+ "ti-pruss/am437x-pru0-prueth-fw.elf",
+ },
+ .fw_pru[PRUSS_PRU1] = {
+ .fw_name[PRUSS_ETHTYPE_EMAC] =
+ "ti-pruss/am437x-pru1-prueth-fw.elf",
+ },
+};
+
+/* AM57xx SoC-specific firmware data */
+static struct prueth_private_data am57xx_prueth_pdata = {
+ .driver_data = PRUSS_AM57XX,
+ .fw_pru[PRUSS_PRU0] = {
+ .fw_name[PRUSS_ETHTYPE_EMAC] =
+ "ti-pruss/am57xx-pru0-prueth-fw.elf",
+ },
+ .fw_pru[PRUSS_PRU1] = {
+ .fw_name[PRUSS_ETHTYPE_EMAC] =
+ "ti-pruss/am57xx-pru1-prueth-fw.elf",
+ },
+};
+
+static const struct of_device_id prueth_dt_match[] = {
+ { .compatible = "ti,am57-prueth", .data = &am57xx_prueth_pdata, },
+ { .compatible = "ti,am4376-prueth", .data = &am437x_prueth_pdata, },
+ { .compatible = "ti,am3359-prueth", .data = &am335x_prueth_pdata, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, prueth_dt_match);
+
+static struct platform_driver prueth_driver = {
+ .probe = icssm_prueth_probe,
+ .remove = icssm_prueth_remove,
+ .driver = {
+ .name = "prueth",
+ .of_match_table = prueth_dt_match,
+ .pm = &prueth_dev_pm_ops,
+ },
+};
+module_platform_driver(prueth_driver);
+
+MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("PRUSS ICSSM Ethernet Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/icssm/icssm_prueth.h b/drivers/net/ethernet/ti/icssm/icssm_prueth.h
new file mode 100644
index 000000000000..8e7e0af08144
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssm/icssm_prueth.h
@@ -0,0 +1,262 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Texas Instruments ICSSM Ethernet driver
+ *
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#ifndef __NET_TI_PRUETH_H
+#define __NET_TI_PRUETH_H
+
+#include <linux/phy.h>
+#include <linux/types.h>
+#include <linux/pruss_driver.h>
+#include <linux/remoteproc/pruss.h>
+
+#include "icssm_switch.h"
+#include "icssm_prueth_ptp.h"
+
+/* ICSSM size of redundancy tag */
+#define ICSSM_LRE_TAG_SIZE 6
+
+/* PRUSS local memory map */
+#define ICSS_LOCAL_SHARED_RAM 0x00010000
+#define EMAC_MAX_PKTLEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
+/* Below macro is for 1528 Byte Frame support, to Allow even with
+ * Redundancy tag
+ */
+#define EMAC_MAX_FRM_SUPPORT (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN + \
+ ICSSM_LRE_TAG_SIZE)
+
+/* PRU Ethernet Type - Ethernet functionality (protocol
+ * implemented) provided by the PRU firmware being loaded.
+ */
+enum pruss_ethtype {
+ PRUSS_ETHTYPE_EMAC = 0,
+ PRUSS_ETHTYPE_HSR,
+ PRUSS_ETHTYPE_PRP,
+ PRUSS_ETHTYPE_SWITCH,
+ PRUSS_ETHTYPE_MAX,
+};
+
+#define PRUETH_IS_EMAC(p) ((p)->eth_type == PRUSS_ETHTYPE_EMAC)
+#define PRUETH_IS_SWITCH(p) ((p)->eth_type == PRUSS_ETHTYPE_SWITCH)
+
+/**
+ * struct prueth_queue_desc - Queue descriptor
+ * @rd_ptr: Read pointer, points to a buffer descriptor in Shared PRU RAM.
+ * @wr_ptr: Write pointer, points to a buffer descriptor in Shared PRU RAM.
+ * @busy_s: Slave queue busy flag, set by slave(us) to request access from
+ * master(PRU).
+ * @status: Bit field status register, Bits:
+ * 0: Master queue busy flag.
+ * 1: Packet has been placed in collision queue.
+ * 2: Packet has been discarded due to overflow.
+ * @max_fill_level: Maximum queue usage seen.
+ * @overflow_cnt: Count of queue overflows.
+ *
+ * Each port has up to 4 queues with variable length. The queue is processed
+ * as ring buffer with read and write pointers. Both pointers are address
+ * pointers and increment by 4 for each buffer descriptor position. Queue has
+ * a length defined in constants and a status.
+ */
+struct prueth_queue_desc {
+ u16 rd_ptr;
+ u16 wr_ptr;
+ u8 busy_s;
+ u8 status;
+ u8 max_fill_level;
+ u8 overflow_cnt;
+};
+
+/**
+ * struct prueth_queue_info - Information about a queue in memory
+ * @buffer_offset: buffer offset in OCMC RAM
+ * @queue_desc_offset: queue descriptor offset in Shared RAM
+ * @buffer_desc_offset: buffer descriptors offset in Shared RAM
+ * @buffer_desc_end: end address of buffer descriptors in Shared RAM
+ */
+struct prueth_queue_info {
+ u16 buffer_offset;
+ u16 queue_desc_offset;
+ u16 buffer_desc_offset;
+ u16 buffer_desc_end;
+};
+
+/**
+ * struct prueth_packet_info - Info about a packet in buffer
+ * @shadow: this packet is stored in the collision queue
+ * @port: port packet is on
+ * @length: length of packet
+ * @broadcast: this packet is a broadcast packet
+ * @error: this packet has an error
+ * @lookup_success: src mac found in FDB
+ * @flood: packet is to be flooded
+ * @timestamp: Specifies if timestamp is appended to the packet
+ */
+struct prueth_packet_info {
+ bool shadow;
+ unsigned int port;
+ unsigned int length;
+ bool broadcast;
+ bool error;
+ bool lookup_success;
+ bool flood;
+ bool timestamp;
+};
+
+/* In switch mode there are 3 real ports i.e. 3 mac addrs.
+ * however Linux sees only the host side port. The other 2 ports
+ * are the switch ports.
+ * In emac mode there are 2 real ports i.e. 2 mac addrs.
+ * Linux sees both the ports.
+ */
+enum prueth_port {
+ PRUETH_PORT_HOST = 0, /* host side port */
+ PRUETH_PORT_MII0, /* physical port MII 0 */
+ PRUETH_PORT_MII1, /* physical port MII 1 */
+ PRUETH_PORT_INVALID, /* Invalid prueth port */
+};
+
+enum prueth_mac {
+ PRUETH_MAC0 = 0,
+ PRUETH_MAC1,
+ PRUETH_NUM_MACS,
+ PRUETH_MAC_INVALID,
+};
+
+/* In both switch & emac modes there are 3 port queues
+ * EMAC mode:
+ * RX packets for both MII0 & MII1 ports come on
+ * QUEUE_HOST.
+ * TX packets for MII0 go on QUEUE_MII0, TX packets
+ * for MII1 go on QUEUE_MII1.
+ * Switch mode:
+ * Host port RX packets come on QUEUE_HOST
+ * TX packets might have to go on MII0 or MII1 or both.
+ * MII0 TX queue is QUEUE_MII0 and MII1 TX queue is
+ * QUEUE_MII1.
+ */
+enum prueth_port_queue_id {
+ PRUETH_PORT_QUEUE_HOST = 0,
+ PRUETH_PORT_QUEUE_MII0,
+ PRUETH_PORT_QUEUE_MII1,
+ PRUETH_PORT_QUEUE_MAX,
+};
+
+/* Each port queue has 4 queues and 1 collision queue */
+enum prueth_queue_id {
+ PRUETH_QUEUE1 = 0,
+ PRUETH_QUEUE2,
+ PRUETH_QUEUE3,
+ PRUETH_QUEUE4,
+ PRUETH_COLQUEUE, /* collision queue */
+};
+
+/**
+ * struct prueth_firmware - PRU Ethernet FW data
+ * @fw_name: firmware names of firmware to run on PRU
+ */
+struct prueth_firmware {
+ const char *fw_name[PRUSS_ETHTYPE_MAX];
+};
+
+/* PRUeth memory range identifiers */
+enum prueth_mem {
+ PRUETH_MEM_DRAM0 = 0,
+ PRUETH_MEM_DRAM1,
+ PRUETH_MEM_SHARED_RAM,
+ PRUETH_MEM_OCMC,
+ PRUETH_MEM_MAX,
+};
+
+enum pruss_device {
+ PRUSS_AM57XX = 0,
+ PRUSS_AM43XX,
+ PRUSS_AM33XX,
+ PRUSS_K2G
+};
+
+/**
+ * struct prueth_private_data - PRU Ethernet private data
+ * @driver_data: PRU Ethernet device name
+ * @fw_pru: firmware names to be used for PRUSS ethernet usecases
+ */
+struct prueth_private_data {
+ enum pruss_device driver_data;
+ const struct prueth_firmware fw_pru[PRUSS_NUM_PRUS];
+};
+
+struct prueth_emac_stats {
+ u64 tx_packets;
+ u64 tx_dropped;
+ u64 tx_bytes;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_length_errors;
+ u64 rx_over_errors;
+};
+
+/* data for each emac port */
+struct prueth_emac {
+ struct prueth *prueth;
+ struct net_device *ndev;
+ struct napi_struct napi;
+
+ struct rproc *pru;
+ struct phy_device *phydev;
+ struct prueth_queue_desc __iomem *rx_queue_descs;
+ struct prueth_queue_desc __iomem *tx_queue_descs;
+
+ int link;
+ int speed;
+ int duplex;
+ int rx_irq;
+
+ enum prueth_port_queue_id tx_port_queue;
+ enum prueth_queue_id rx_queue_start;
+ enum prueth_queue_id rx_queue_end;
+ enum prueth_port port_id;
+ enum prueth_mem dram;
+ const char *phy_id;
+ u32 msg_enable;
+ u8 mac_addr[6];
+ phy_interface_t phy_if;
+
+ /* spin lock used to protect
+ * during link configuration
+ */
+ spinlock_t lock;
+
+ struct hrtimer tx_hrtimer;
+ struct prueth_emac_stats stats;
+};
+
+struct prueth {
+ struct device *dev;
+ struct pruss *pruss;
+ struct rproc *pru0, *pru1;
+ struct pruss_mem_region mem[PRUETH_MEM_MAX];
+ struct gen_pool *sram_pool;
+ struct regmap *mii_rt;
+ struct icss_iep *iep;
+
+ const struct prueth_private_data *fw_data;
+ struct prueth_fw_offsets *fw_offsets;
+
+ struct device_node *eth_node[PRUETH_NUM_MACS];
+ struct prueth_emac *emac[PRUETH_NUM_MACS];
+ struct net_device *registered_netdevs[PRUETH_NUM_MACS];
+
+ unsigned int eth_type;
+ size_t ocmc_ram_size;
+ u8 emac_configured;
+};
+
+void icssm_parse_packet_info(struct prueth *prueth, u32 buffer_descriptor,
+ struct prueth_packet_info *pkt_info);
+int icssm_emac_rx_packet(struct prueth_emac *emac, u16 *bd_rd_ptr,
+ struct prueth_packet_info *pkt_info,
+ const struct prueth_queue_info *rxqueue);
+
+#endif /* __NET_TI_PRUETH_H */
diff --git a/drivers/net/ethernet/ti/icssm/icssm_prueth_ptp.h b/drivers/net/ethernet/ti/icssm/icssm_prueth_ptp.h
new file mode 100644
index 000000000000..e0bf692beda1
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssm/icssm_prueth_ptp.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com
+ */
+#ifndef PRUETH_PTP_H
+#define PRUETH_PTP_H
+
+#define RX_SYNC_TIMESTAMP_OFFSET_P1 0x8 /* 8 bytes */
+#define RX_PDELAY_REQ_TIMESTAMP_OFFSET_P1 0x14 /* 12 bytes */
+
+#define DISABLE_PTP_FRAME_FORWARDING_CTRL_OFFSET 0x14 /* 1 byte */
+
+#define RX_PDELAY_RESP_TIMESTAMP_OFFSET_P1 0x20 /* 12 bytes */
+#define RX_SYNC_TIMESTAMP_OFFSET_P2 0x2c /* 12 bytes */
+#define RX_PDELAY_REQ_TIMESTAMP_OFFSET_P2 0x38 /* 12 bytes */
+#define RX_PDELAY_RESP_TIMESTAMP_OFFSET_P2 0x44 /* 12 bytes */
+#define TIMESYNC_DOMAIN_NUMBER_LIST 0x50 /* 2 bytes */
+#define P1_SMA_LINE_DELAY_OFFSET 0x52 /* 4 bytes */
+#define P2_SMA_LINE_DELAY_OFFSET 0x56 /* 4 bytes */
+#define TIMESYNC_SECONDS_COUNT_OFFSET 0x5a /* 6 bytes */
+#define TIMESYNC_TC_RCF_OFFSET 0x60 /* 4 bytes */
+#define DUT_IS_MASTER_OFFSET 0x64 /* 1 byte */
+#define MASTER_PORT_NUM_OFFSET 0x65 /* 1 byte */
+#define SYNC_MASTER_MAC_OFFSET 0x66 /* 6 bytes */
+#define TX_TS_NOTIFICATION_OFFSET_SYNC_P1 0x6c /* 1 byte */
+#define TX_TS_NOTIFICATION_OFFSET_PDEL_REQ_P1 0x6d /* 1 byte */
+#define TX_TS_NOTIFICATION_OFFSET_PDEL_RES_P1 0x6e /* 1 byte */
+#define TX_TS_NOTIFICATION_OFFSET_SYNC_P2 0x6f /* 1 byte */
+#define TX_TS_NOTIFICATION_OFFSET_PDEL_REQ_P2 0x70 /* 1 byte */
+#define TX_TS_NOTIFICATION_OFFSET_PDEL_RES_P2 0x71 /* 1 byte */
+#define TX_SYNC_TIMESTAMP_OFFSET_P1 0x72 /* 12 bytes */
+#define TX_PDELAY_REQ_TIMESTAMP_OFFSET_P1 0x7e /* 12 bytes */
+#define TX_PDELAY_RESP_TIMESTAMP_OFFSET_P1 0x8a /* 12 bytes */
+#define TX_SYNC_TIMESTAMP_OFFSET_P2 0x96 /* 12 bytes */
+#define TX_PDELAY_REQ_TIMESTAMP_OFFSET_P2 0xa2 /* 12 bytes */
+#define TX_PDELAY_RESP_TIMESTAMP_OFFSET_P2 0xae /* 12 bytes */
+#define TIMESYNC_CTRL_VAR_OFFSET 0xba /* 1 byte */
+#define DISABLE_SWITCH_SYNC_RELAY_OFFSET 0xbb /* 1 byte */
+#define MII_RX_CORRECTION_OFFSET 0xbc /* 2 bytes */
+#define MII_TX_CORRECTION_OFFSET 0xbe /* 2 bytes */
+#define TIMESYNC_CMP1_CMP_OFFSET 0xc0 /* 8 bytes */
+#define TIMESYNC_SYNC0_CMP_OFFSET 0xc8 /* 8 bytes */
+#define TIMESYNC_CMP1_PERIOD_OFFSET 0xd0 /* 4 bytes */
+#define TIMESYNC_SYNC0_WIDTH_OFFSET 0xd4 /* 4 bytes */
+#define SINGLE_STEP_IEP_OFFSET_P1 0xd8 /* 8 bytes */
+#define SINGLE_STEP_SECONDS_OFFSET_P1 0xe0 /* 8 bytes */
+#define SINGLE_STEP_IEP_OFFSET_P2 0xe8 /* 8 bytes */
+#define SINGLE_STEP_SECONDS_OFFSET_P2 0xf0 /* 8 bytes */
+#define LINK_LOCAL_FRAME_HAS_HSR_TAG 0xf8 /* 1 bytes */
+#define PTP_PREV_TX_TIMESTAMP_P1 0xf9 /* 8 bytes */
+#define PTP_PREV_TX_TIMESTAMP_P2 0x101 /* 8 bytes */
+#define PTP_CLK_IDENTITY_OFFSET 0x109 /* 8 bytes */
+#define PTP_SCRATCH_MEM 0x111 /* 16 byte */
+#define PTP_IPV4_UDP_E2E_ENABLE 0x121 /* 1 byte */
+
+enum {
+ PRUETH_PTP_SYNC,
+ PRUETH_PTP_DLY_REQ,
+ PRUETH_PTP_DLY_RESP,
+ PRUETH_PTP_TS_EVENTS,
+};
+
+#define PRUETH_PTP_TS_SIZE 12
+#define PRUETH_PTP_TS_NOTIFY_SIZE 1
+#define PRUETH_PTP_TS_NOTIFY_MASK 0xff
+
+/* Bit definitions for TIMESYNC_CTRL */
+#define TIMESYNC_CTRL_BG_ENABLE BIT(0)
+#define TIMESYNC_CTRL_FORCED_2STEP BIT(1)
+
+static inline u32 icssm_prueth_tx_ts_offs_get(u8 port, u8 event)
+{
+ return TX_SYNC_TIMESTAMP_OFFSET_P1 + port *
+ PRUETH_PTP_TS_EVENTS * PRUETH_PTP_TS_SIZE +
+ event * PRUETH_PTP_TS_SIZE;
+}
+
+static inline u32 icssm_prueth_tx_ts_notify_offs_get(u8 port, u8 event)
+{
+ return TX_TS_NOTIFICATION_OFFSET_SYNC_P1 +
+ PRUETH_PTP_TS_EVENTS * PRUETH_PTP_TS_NOTIFY_SIZE * port +
+ event * PRUETH_PTP_TS_NOTIFY_SIZE;
+}
+
+#endif /* PRUETH_PTP_H */
diff --git a/drivers/net/ethernet/ti/icssm/icssm_switch.h b/drivers/net/ethernet/ti/icssm/icssm_switch.h
new file mode 100644
index 000000000000..8b494ffdcde7
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssm/icssm_switch.h
@@ -0,0 +1,257 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (C) 2015-2021 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#ifndef __ICSS_SWITCH_H
+#define __ICSS_SWITCH_H
+
+/* Basic Switch Parameters
+ * Used to auto compute offset addresses on L3 OCMC RAM. Do not modify these
+ * without changing firmware accordingly
+ */
+#define SWITCH_BUFFER_SIZE (64 * 1024) /* L3 buffer */
+#define ICSS_BLOCK_SIZE 32 /* data bytes per BD */
+#define BD_SIZE 4 /* byte buffer descriptor */
+#define NUM_QUEUES 4 /* Queues on Port 0/1/2 */
+
+#define PORT_LINK_MASK 0x1
+#define PORT_IS_HD_MASK 0x2
+
+/* Physical Port queue size (number of BDs). Same for both ports */
+#define QUEUE_1_SIZE 97 /* Network Management high */
+#define QUEUE_2_SIZE 97 /* Network Management low */
+#define QUEUE_3_SIZE 97 /* Protocol specific */
+#define QUEUE_4_SIZE 97 /* NRT (IP,ARP, ICMP) */
+
+/* Host queue size (number of BDs). Each BD points to data buffer of 32 bytes.
+ * HOST PORT QUEUES can buffer up to 4 full sized frames per queue
+ */
+#define HOST_QUEUE_1_SIZE 194 /* Protocol and VLAN priority 7 & 6 */
+#define HOST_QUEUE_2_SIZE 194 /* Protocol mid */
+#define HOST_QUEUE_3_SIZE 194 /* Protocol low */
+#define HOST_QUEUE_4_SIZE 194 /* NRT (IP, ARP, ICMP) */
+
+#define COL_QUEUE_SIZE 0
+
+/* NRT Buffer descriptor definition
+ * Each buffer descriptor points to a max 32 byte block and has 32 bit in size
+ * to have atomic operation.
+ * PRU can address bytewise into memory.
+ * Definition of 32 bit descriptor is as follows
+ *
+ * Bits Name Meaning
+ * =============================================================================
+ * 0..7 Index points to index in buffer queue, max 256 x 32
+ * byte blocks can be addressed
+ * 6 LookupSuccess For switch, FDB lookup was successful (source
+ * MAC address found in FDB).
+ * For RED, NodeTable lookup was successful.
+ * 7 Flood Packet should be flooded (destination MAC
+ * address found in FDB). For switch only.
+ * 8..12 Block_length number of valid bytes in this specific block.
+ * Will be <=32 bytes on last block of packet
+ * 13 More "More" bit indicating that there are more blocks
+ * 14 Shadow indicates that "index" is pointing into shadow
+ * buffer
+ * 15 TimeStamp indicates that this packet has time stamp in
+ * separate buffer - only needed if PTP runs on
+ * host
+ * 16..17 Port different meaning for ingress and egress,
+ * Ingress: Port = 0 indicates phy port 1 and
+ * Port = 1 indicates phy port 2.
+ * Egress: 0 sends on phy port 1 and 1 sends on
+ * phy port 2. Port = 2 goes over MAC table
+ * look-up
+ * 18..28 Length 11 bit of total packet length which is put into
+ * first BD only so that host access only one BD
+ * 29 VlanTag indicates that packet has Length/Type field of
+ * 0x08100 with VLAN tag in following byte
+ * 30 Broadcast indicates that packet goes out on both physical
+ * ports, there will be two bd but only one buffer
+ * 31 Error indicates there was an error in the packet
+ */
+#define PRUETH_BD_START_FLAG_MASK BIT(0)
+#define PRUETH_BD_START_FLAG_SHIFT 0
+
+#define PRUETH_BD_HSR_FRAME_MASK BIT(4)
+#define PRUETH_BD_HSR_FRAME_SHIFT 4
+
+#define PRUETH_BD_SUP_HSR_FRAME_MASK BIT(5)
+#define PRUETH_BD_SUP_HSR_FRAME_SHIFT 5
+
+#define PRUETH_BD_LOOKUP_SUCCESS_MASK BIT(6)
+#define PRUETH_BD_LOOKUP_SUCCESS_SHIFT 6
+
+#define PRUETH_BD_SW_FLOOD_MASK BIT(7)
+#define PRUETH_BD_SW_FLOOD_SHIFT 7
+
+#define PRUETH_BD_SHADOW_MASK BIT(14)
+#define PRUETH_BD_SHADOW_SHIFT 14
+
+#define PRUETH_BD_TIMESTAMP_MASK BIT(15)
+#define PRUETH_BD_TIMESTAMP_SHIFT 15
+
+#define PRUETH_BD_PORT_MASK GENMASK(17, 16)
+#define PRUETH_BD_PORT_SHIFT 16
+
+#define PRUETH_BD_LENGTH_MASK GENMASK(28, 18)
+#define PRUETH_BD_LENGTH_SHIFT 18
+
+#define PRUETH_BD_BROADCAST_MASK BIT(30)
+#define PRUETH_BD_BROADCAST_SHIFT 30
+
+#define PRUETH_BD_ERROR_MASK BIT(31)
+#define PRUETH_BD_ERROR_SHIFT 31
+
+/* The following offsets indicate which sections of the memory are used
+ * for EMAC internal tasks
+ */
+#define DRAM_START_OFFSET 0x1E98
+#define SRAM_START_OFFSET 0x400
+
+/* General Purpose Statistics
+ * These are present on both PRU0 and PRU1 DRAM
+ */
+/* base statistics offset */
+#define STATISTICS_OFFSET 0x1F00
+#define STAT_SIZE 0x98
+
+/* Offset for storing
+ * 1. Storm Prevention Params
+ * 2. PHY Speed Offset
+ * 3. Port Status Offset
+ * These are present on both PRU0 and PRU1
+ */
+/* 4 bytes */
+#define STORM_PREVENTION_OFFSET_BC (STATISTICS_OFFSET + STAT_SIZE)
+/* 4 bytes */
+#define PHY_SPEED_OFFSET (STATISTICS_OFFSET + STAT_SIZE + 4)
+/* 1 byte */
+#define PORT_STATUS_OFFSET (STATISTICS_OFFSET + STAT_SIZE + 8)
+/* 1 byte */
+#define COLLISION_COUNTER (STATISTICS_OFFSET + STAT_SIZE + 9)
+/* 4 bytes */
+#define RX_PKT_SIZE_OFFSET (STATISTICS_OFFSET + STAT_SIZE + 10)
+/* 4 bytes */
+#define PORT_CONTROL_ADDR (STATISTICS_OFFSET + STAT_SIZE + 14)
+/* 6 bytes */
+#define PORT_MAC_ADDR (STATISTICS_OFFSET + STAT_SIZE + 18)
+/* 1 byte */
+#define RX_INT_STATUS_OFFSET (STATISTICS_OFFSET + STAT_SIZE + 24)
+/* 4 bytes */
+#define STORM_PREVENTION_OFFSET_MC (STATISTICS_OFFSET + STAT_SIZE + 25)
+/* 4 bytes */
+#define STORM_PREVENTION_OFFSET_UC (STATISTICS_OFFSET + STAT_SIZE + 29)
+/* 4 bytes ? */
+#define STP_INVALID_STATE_OFFSET (STATISTICS_OFFSET + STAT_SIZE + 33)
+
+/* DRAM Offsets for EMAC
+ * Present on Both DRAM0 and DRAM1
+ */
+
+/* 4 queue descriptors for port tx = 32 bytes */
+#define TX_CONTEXT_Q1_OFFSET_ADDR (PORT_QUEUE_DESC_OFFSET + 32)
+#define PORT_QUEUE_DESC_OFFSET (ICSS_EMAC_TTS_CYC_TX_SOF + 8)
+
+/* EMAC Time Triggered Send Offsets */
+#define ICSS_EMAC_TTS_CYC_TX_SOF (ICSS_EMAC_TTS_PREV_TX_SOF + 8)
+#define ICSS_EMAC_TTS_PREV_TX_SOF \
+ (ICSS_EMAC_TTS_MISSED_CYCLE_CNT_OFFSET + 4)
+#define ICSS_EMAC_TTS_MISSED_CYCLE_CNT_OFFSET (ICSS_EMAC_TTS_STATUS_OFFSET \
+ + 4)
+#define ICSS_EMAC_TTS_STATUS_OFFSET (ICSS_EMAC_TTS_CFG_TIME_OFFSET + 4)
+#define ICSS_EMAC_TTS_CFG_TIME_OFFSET (ICSS_EMAC_TTS_CYCLE_PERIOD_OFFSET + 4)
+#define ICSS_EMAC_TTS_CYCLE_PERIOD_OFFSET \
+ (ICSS_EMAC_TTS_CYCLE_START_OFFSET + 8)
+#define ICSS_EMAC_TTS_CYCLE_START_OFFSET ICSS_EMAC_TTS_BASE_OFFSET
+#define ICSS_EMAC_TTS_BASE_OFFSET DRAM_START_OFFSET
+
+/* Shared RAM offsets for EMAC */
+
+/* Queue Descriptors */
+
+/* 4 queue descriptors for port 0 (host receive). 32 bytes */
+#define HOST_QUEUE_DESC_OFFSET (HOST_QUEUE_SIZE_ADDR + 16)
+
+/* table offset for queue size:
+ * 3 ports * 4 Queues * 1 byte offset = 12 bytes
+ */
+#define HOST_QUEUE_SIZE_ADDR (HOST_QUEUE_OFFSET_ADDR + 8)
+/* table offset for queue:
+ * 4 Queues * 2 byte offset = 8 bytes
+ */
+#define HOST_QUEUE_OFFSET_ADDR (HOST_QUEUE_DESCRIPTOR_OFFSET_ADDR + 8)
+/* table offset for Host queue descriptors:
+ * 1 ports * 4 Queues * 2 byte offset = 8 bytes
+ */
+#define HOST_QUEUE_DESCRIPTOR_OFFSET_ADDR (HOST_Q4_RX_CONTEXT_OFFSET + 8)
+
+/* Host Port Rx Context */
+#define HOST_Q4_RX_CONTEXT_OFFSET (HOST_Q3_RX_CONTEXT_OFFSET + 8)
+#define HOST_Q3_RX_CONTEXT_OFFSET (HOST_Q2_RX_CONTEXT_OFFSET + 8)
+#define HOST_Q2_RX_CONTEXT_OFFSET (HOST_Q1_RX_CONTEXT_OFFSET + 8)
+#define HOST_Q1_RX_CONTEXT_OFFSET (EMAC_PROMISCUOUS_MODE_OFFSET + 4)
+
+/* Promiscuous mode control */
+#define EMAC_P1_PROMISCUOUS_BIT BIT(0)
+#define EMAC_P2_PROMISCUOUS_BIT BIT(1)
+#define EMAC_PROMISCUOUS_MODE_OFFSET (EMAC_RESERVED + 4)
+#define EMAC_RESERVED EOF_48K_BUFFER_BD
+
+/* allow for max 48k buffer which spans the descriptors up to 0x1800 6kB */
+#define EOF_48K_BUFFER_BD (P0_BUFFER_DESC_OFFSET + HOST_BD_SIZE + \
+ PORT_BD_SIZE)
+
+#define HOST_BD_SIZE ((HOST_QUEUE_1_SIZE + \
+ HOST_QUEUE_2_SIZE + HOST_QUEUE_3_SIZE + \
+ HOST_QUEUE_4_SIZE) * BD_SIZE)
+#define PORT_BD_SIZE ((QUEUE_1_SIZE + QUEUE_2_SIZE + \
+ QUEUE_3_SIZE + QUEUE_4_SIZE) * 2 * BD_SIZE)
+
+#define END_OF_BD_POOL (P2_Q4_BD_OFFSET + QUEUE_4_SIZE * BD_SIZE)
+#define P2_Q4_BD_OFFSET (P2_Q3_BD_OFFSET + QUEUE_3_SIZE * BD_SIZE)
+#define P2_Q3_BD_OFFSET (P2_Q2_BD_OFFSET + QUEUE_2_SIZE * BD_SIZE)
+#define P2_Q2_BD_OFFSET (P2_Q1_BD_OFFSET + QUEUE_1_SIZE * BD_SIZE)
+#define P2_Q1_BD_OFFSET (P1_Q4_BD_OFFSET + QUEUE_4_SIZE * BD_SIZE)
+#define P1_Q4_BD_OFFSET (P1_Q3_BD_OFFSET + QUEUE_3_SIZE * BD_SIZE)
+#define P1_Q3_BD_OFFSET (P1_Q2_BD_OFFSET + QUEUE_2_SIZE * BD_SIZE)
+#define P1_Q2_BD_OFFSET (P1_Q1_BD_OFFSET + QUEUE_1_SIZE * BD_SIZE)
+#define P1_Q1_BD_OFFSET (P0_Q4_BD_OFFSET + HOST_QUEUE_4_SIZE * BD_SIZE)
+#define P0_Q4_BD_OFFSET (P0_Q3_BD_OFFSET + HOST_QUEUE_3_SIZE * BD_SIZE)
+#define P0_Q3_BD_OFFSET (P0_Q2_BD_OFFSET + HOST_QUEUE_2_SIZE * BD_SIZE)
+#define P0_Q2_BD_OFFSET (P0_Q1_BD_OFFSET + HOST_QUEUE_1_SIZE * BD_SIZE)
+#define P0_Q1_BD_OFFSET P0_BUFFER_DESC_OFFSET
+#define P0_BUFFER_DESC_OFFSET SRAM_START_OFFSET
+
+/* Memory Usage of L3 OCMC RAM */
+
+/* L3 64KB Memory - mainly buffer Pool */
+#define END_OF_BUFFER_POOL (P2_Q4_BUFFER_OFFSET + QUEUE_4_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P2_Q4_BUFFER_OFFSET (P2_Q3_BUFFER_OFFSET + QUEUE_3_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P2_Q3_BUFFER_OFFSET (P2_Q2_BUFFER_OFFSET + QUEUE_2_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P2_Q2_BUFFER_OFFSET (P2_Q1_BUFFER_OFFSET + QUEUE_1_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P2_Q1_BUFFER_OFFSET (P1_Q4_BUFFER_OFFSET + QUEUE_4_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P1_Q4_BUFFER_OFFSET (P1_Q3_BUFFER_OFFSET + QUEUE_3_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P1_Q3_BUFFER_OFFSET (P1_Q2_BUFFER_OFFSET + QUEUE_2_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P1_Q2_BUFFER_OFFSET (P1_Q1_BUFFER_OFFSET + QUEUE_1_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P1_Q1_BUFFER_OFFSET (P0_Q4_BUFFER_OFFSET + HOST_QUEUE_4_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P0_Q4_BUFFER_OFFSET (P0_Q3_BUFFER_OFFSET + HOST_QUEUE_3_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P0_Q3_BUFFER_OFFSET (P0_Q2_BUFFER_OFFSET + HOST_QUEUE_2_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P0_Q2_BUFFER_OFFSET (P0_Q1_BUFFER_OFFSET + HOST_QUEUE_1_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P0_COL_BUFFER_OFFSET 0xEE00
+#define P0_Q1_BUFFER_OFFSET 0x0000
+
+#endif /* __ICSS_SWITCH_H */
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
index 7007eb8bed36..b9cbd3b4a8a2 100644
--- a/drivers/net/ethernet/ti/netcp.h
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -207,6 +207,11 @@ struct netcp_module {
int (*del_vid)(void *intf_priv, int vid);
int (*ioctl)(void *intf_priv, struct ifreq *req, int cmd);
int (*set_rx_mode)(void *intf_priv, bool promisc);
+ int (*hwtstamp_get)(void *intf_priv,
+ struct kernel_hwtstamp_config *cfg);
+ int (*hwtstamp_set)(void *intf_priv,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
/* used internally */
struct list_head module_list;
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 857820657bac..5ed1c46bbcb1 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1338,10 +1338,10 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
tx_pipe->dma_channel = knav_dma_open_channel(dev,
tx_pipe->dma_chan_name, &config);
- if (IS_ERR(tx_pipe->dma_channel)) {
+ if (!tx_pipe->dma_channel) {
dev_err(dev, "failed opening tx chan(%s)\n",
tx_pipe->dma_chan_name);
- ret = PTR_ERR(tx_pipe->dma_channel);
+ ret = -EINVAL;
goto err;
}
@@ -1359,7 +1359,7 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
return 0;
err:
- if (!IS_ERR_OR_NULL(tx_pipe->dma_channel))
+ if (tx_pipe->dma_channel)
knav_dma_close_channel(tx_pipe->dma_channel);
tx_pipe->dma_channel = NULL;
return ret;
@@ -1678,10 +1678,10 @@ static int netcp_setup_navigator_resources(struct net_device *ndev)
netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device,
netcp->dma_chan_name, &config);
- if (IS_ERR(netcp->rx_channel)) {
+ if (!netcp->rx_channel) {
dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n",
netcp->dma_chan_name);
- ret = PTR_ERR(netcp->rx_channel);
+ ret = -EINVAL;
goto fail;
}
@@ -1781,6 +1781,62 @@ static int netcp_ndo_stop(struct net_device *ndev)
return 0;
}
+static int netcp_ndo_hwtstamp_get(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct netcp_intf_modpriv *intf_modpriv;
+ struct netcp_module *module;
+ int err = -EOPNOTSUPP;
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ for_each_module(netcp, intf_modpriv) {
+ module = intf_modpriv->netcp_module;
+ if (!module->hwtstamp_get)
+ continue;
+
+ err = module->hwtstamp_get(intf_modpriv->module_priv, config);
+ break;
+ }
+
+ return err;
+}
+
+static int netcp_ndo_hwtstamp_set(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct netcp_intf_modpriv *intf_modpriv;
+ struct netcp_module *module;
+ int ret = -1, err = -EOPNOTSUPP;
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ for_each_module(netcp, intf_modpriv) {
+ module = intf_modpriv->netcp_module;
+ if (!module->hwtstamp_set)
+ continue;
+
+ err = module->hwtstamp_set(intf_modpriv->module_priv, config,
+ extack);
+ if ((err < 0) && (err != -EOPNOTSUPP)) {
+ NL_SET_ERR_MSG_WEAK_MOD(extack,
+ "At least one module failed to setup HW timestamps");
+ ret = err;
+ goto out;
+ }
+ if (err == 0)
+ ret = err;
+ }
+
+out:
+ return (ret == 0) ? 0 : err;
+}
+
static int netcp_ndo_ioctl(struct net_device *ndev,
struct ifreq *req, int cmd)
{
@@ -1952,6 +2008,8 @@ static const struct net_device_ops netcp_netdev_ops = {
.ndo_tx_timeout = netcp_ndo_tx_timeout,
.ndo_select_queue = dev_pick_tx_zero,
.ndo_setup_tc = netcp_setup_tc,
+ .ndo_hwtstamp_get = netcp_ndo_hwtstamp_get,
+ .ndo_hwtstamp_set = netcp_ndo_hwtstamp_set,
};
static int netcp_create_interface(struct netcp_device *netcp_device,
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 63e686f0b119..8f46e9be76b1 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -2591,20 +2591,26 @@ static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info)
return 0;
}
-static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr)
+static int gbe_hwtstamp_get(void *intf_priv, struct kernel_hwtstamp_config *cfg)
{
- struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
- struct cpts *cpts = gbe_dev->cpts;
- struct hwtstamp_config cfg;
+ struct gbe_intf *gbe_intf = intf_priv;
+ struct gbe_priv *gbe_dev;
+ struct phy_device *phy;
+
+ gbe_dev = gbe_intf->gbe_dev;
- if (!cpts)
+ if (!gbe_dev->cpts)
+ return -EOPNOTSUPP;
+
+ phy = gbe_intf->slave->phy;
+ if (phy_has_hwtstamp(phy))
return -EOPNOTSUPP;
- cfg.flags = 0;
- cfg.tx_type = gbe_dev->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- cfg.rx_filter = gbe_dev->rx_ts_enabled;
+ cfg->flags = 0;
+ cfg->tx_type = gbe_dev->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg->rx_filter = gbe_dev->rx_ts_enabled;
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
@@ -2637,19 +2643,23 @@ static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
writel(ctl, GBE_REG_ADDR(slave, port_regs, ts_ctl_ltype2));
}
-static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
+static int gbe_hwtstamp_set(void *intf_priv, struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
- struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
- struct cpts *cpts = gbe_dev->cpts;
- struct hwtstamp_config cfg;
+ struct gbe_intf *gbe_intf = intf_priv;
+ struct gbe_priv *gbe_dev;
+ struct phy_device *phy;
- if (!cpts)
+ gbe_dev = gbe_intf->gbe_dev;
+
+ if (!gbe_dev->cpts)
return -EOPNOTSUPP;
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
+ phy = gbe_intf->slave->phy;
+ if (phy_has_hwtstamp(phy))
+ return phy->mii_ts->hwtstamp_set(phy->mii_ts, cfg, extack);
- switch (cfg.tx_type) {
+ switch (cfg->tx_type) {
case HWTSTAMP_TX_OFF:
gbe_dev->tx_ts_enabled = 0;
break;
@@ -2660,7 +2670,7 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
return -ERANGE;
}
- switch (cfg.rx_filter) {
+ switch (cfg->rx_filter) {
case HWTSTAMP_FILTER_NONE:
gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_NONE;
break;
@@ -2668,7 +2678,7 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ cfg->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
@@ -2680,7 +2690,7 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
default:
return -ERANGE;
@@ -2688,7 +2698,7 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
gbe_hwtstamp(gbe_intf);
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
static void gbe_register_cpts(struct gbe_priv *gbe_dev)
@@ -2745,12 +2755,15 @@ static inline void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
{
}
-static inline int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *req)
+static inline int gbe_hwtstamp_get(void *intf_priv,
+ struct kernel_hwtstamp_config *cfg)
{
return -EOPNOTSUPP;
}
-static inline int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *req)
+static inline int gbe_hwtstamp_set(void *intf_priv,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
@@ -2816,15 +2829,6 @@ static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
struct gbe_intf *gbe_intf = intf_priv;
struct phy_device *phy = gbe_intf->slave->phy;
- if (!phy_has_hwtstamp(phy)) {
- switch (cmd) {
- case SIOCGHWTSTAMP:
- return gbe_hwtstamp_get(gbe_intf, req);
- case SIOCSHWTSTAMP:
- return gbe_hwtstamp_set(gbe_intf, req);
- }
- }
-
if (phy)
return phy_mii_ioctl(phy, req, cmd);
@@ -2833,7 +2837,7 @@ static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
static void netcp_ethss_timer(struct timer_list *t)
{
- struct gbe_priv *gbe_dev = from_timer(gbe_dev, t, timer);
+ struct gbe_priv *gbe_dev = timer_container_of(gbe_dev, t, timer);
struct gbe_intf *gbe_intf;
struct gbe_slave *slave;
@@ -3796,7 +3800,7 @@ static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
{
struct gbe_priv *gbe_dev = inst_priv;
- del_timer_sync(&gbe_dev->timer);
+ timer_delete_sync(&gbe_dev->timer);
cpts_release(gbe_dev->cpts);
cpsw_ale_stop(gbe_dev->ale);
netcp_txpipe_close(&gbe_dev->tx_pipe);
@@ -3824,6 +3828,8 @@ static struct netcp_module gbe_module = {
.add_vid = gbe_add_vid,
.del_vid = gbe_del_vid,
.ioctl = gbe_ioctl,
+ .hwtstamp_get = gbe_hwtstamp_get,
+ .hwtstamp_set = gbe_hwtstamp_set,
};
static struct netcp_module xgbe_module = {
@@ -3841,6 +3847,8 @@ static struct netcp_module xgbe_module = {
.add_vid = gbe_add_vid,
.del_vid = gbe_del_vid,
.ioctl = gbe_ioctl,
+ .hwtstamp_get = gbe_hwtstamp_get,
+ .hwtstamp_set = gbe_hwtstamp_set,
};
static int __init keystone_gbe_init(void)
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index b3da76efa8f5..a55b0f951181 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -332,13 +332,13 @@ static void tlan_stop(struct net_device *dev)
{
struct tlan_priv *priv = netdev_priv(dev);
- del_timer_sync(&priv->media_timer);
+ timer_delete_sync(&priv->media_timer);
tlan_read_and_clear_stats(dev, TLAN_RECORD);
outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
/* Reset and power down phy */
tlan_reset_adapter(dev);
if (priv->timer.function != NULL) {
- del_timer_sync(&priv->timer);
+ timer_delete_sync(&priv->timer);
priv->timer.function = NULL;
}
}
@@ -1815,7 +1815,7 @@ ThunderLAN driver timer function
static void tlan_timer(struct timer_list *t)
{
- struct tlan_priv *priv = from_timer(priv, t, timer);
+ struct tlan_priv *priv = timer_container_of(priv, t, timer);
struct net_device *dev = priv->dev;
u32 elapsed;
unsigned long flags = 0;
@@ -2746,7 +2746,7 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
static void tlan_phy_monitor(struct timer_list *t)
{
- struct tlan_priv *priv = from_timer(priv, t, media_timer);
+ struct tlan_priv *priv = timer_container_of(priv, t, media_timer);
struct net_device *dev = priv->dev;
u16 phy;
u16 phy_status;
diff --git a/drivers/net/ethernet/toshiba/Kconfig b/drivers/net/ethernet/toshiba/Kconfig
index 701e9b7c1c3b..b1e27e3b99eb 100644
--- a/drivers/net/ethernet/toshiba/Kconfig
+++ b/drivers/net/ethernet/toshiba/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_TOSHIBA
bool "Toshiba devices"
default y
- depends on PCI && (PPC_IBM_CELL_BLADE || MIPS) || PPC_PS3
+ depends on PCI && MIPS || PPC_PS3
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -39,15 +39,6 @@ config GELIC_WIRELESS
the driver automatically distinguishes the models, you can
safely enable this option even if you have a wireless-less model.
-config SPIDER_NET
- tristate "Spider Gigabit Ethernet driver"
- depends on PCI && PPC_IBM_CELL_BLADE
- select FW_LOADER
- select SUNGEM_PHY
- help
- This driver supports the Gigabit Ethernet chips present on the
- Cell Processor-Based Blades from IBM.
-
config TC35815
tristate "TOSHIBA TC35815 Ethernet support"
depends on PCI && MIPS
diff --git a/drivers/net/ethernet/toshiba/Makefile b/drivers/net/ethernet/toshiba/Makefile
index f434fd0f429e..27e2164cf7e9 100644
--- a/drivers/net/ethernet/toshiba/Makefile
+++ b/drivers/net/ethernet/toshiba/Makefile
@@ -6,6 +6,4 @@
obj-$(CONFIG_GELIC_NET) += ps3_gelic.o
gelic_wireless-$(CONFIG_GELIC_WIRELESS) += ps3_gelic_wireless.o
ps3_gelic-objs += ps3_gelic_net.o $(gelic_wireless-y)
-spidernet-y += spider_net.o spider_net_ethtool.o
-obj-$(CONFIG_SPIDER_NET) += spidernet.o
obj-$(CONFIG_TC35815) += tc35815.o
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 5ee8e8980393..d35d1f3c10a1 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -260,6 +260,7 @@ void gelic_card_down(struct gelic_card *card)
if (atomic_dec_if_positive(&card->users) == 0) {
pr_debug("%s: real do\n", __func__);
napi_disable(&card->napi);
+ timer_delete_sync(&card->rx_oom_timer);
/*
* Disable irq. Wireless interrupts will
* be disabled later if any
@@ -363,6 +364,7 @@ static int gelic_card_init_chain(struct gelic_card *card,
* gelic_descr_prepare_rx - reinitializes a rx descriptor
* @card: card structure
* @descr: descriptor to re-init
+ * @napi_mode: is it running in napi poll
*
* return 0 on success, <0 on failure
*
@@ -373,7 +375,8 @@ static int gelic_card_init_chain(struct gelic_card *card,
* must be a multiple of GELIC_NET_RXBUF_ALIGN.
*/
static int gelic_descr_prepare_rx(struct gelic_card *card,
- struct gelic_descr *descr)
+ struct gelic_descr *descr,
+ bool napi_mode)
{
static const unsigned int rx_skb_size =
ALIGN(GELIC_NET_MAX_FRAME, GELIC_NET_RXBUF_ALIGN) +
@@ -391,7 +394,10 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
descr->hw_regs.payload.dev_addr = 0;
descr->hw_regs.payload.size = 0;
- descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size);
+ if (napi_mode)
+ descr->skb = napi_alloc_skb(&card->napi, rx_skb_size);
+ else
+ descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size);
if (!descr->skb) {
descr->hw_regs.payload.dev_addr = 0; /* tell DMAC don't touch memory */
return -ENOMEM;
@@ -463,7 +469,7 @@ static int gelic_card_fill_rx_chain(struct gelic_card *card)
do {
if (!descr->skb) {
- ret = gelic_descr_prepare_rx(card, descr);
+ ret = gelic_descr_prepare_rx(card, descr, false);
if (ret)
goto rewind;
}
@@ -963,14 +969,15 @@ static void gelic_net_pass_skb_up(struct gelic_descr *descr,
netdev->stats.rx_bytes += skb->len;
/* pass skb up to stack */
- netif_receive_skb(skb);
+ napi_gro_receive(&card->napi, skb);
}
/**
* gelic_card_decode_one_descr - processes an rx descriptor
* @card: card structure
*
- * returns 1 if a packet has been sent to the stack, otherwise 0
+ * returns 1 if a packet has been sent to the stack, -ENOMEM on skb alloc
+ * failure, otherwise 0
*
* processes an rx descriptor by iommu-unmapping the data buffer and passing
* the packet up to the stack
@@ -981,16 +988,18 @@ static int gelic_card_decode_one_descr(struct gelic_card *card)
struct gelic_descr_chain *chain = &card->rx_chain;
struct gelic_descr *descr = chain->head;
struct net_device *netdev = NULL;
- int dmac_chain_ended;
+ int dmac_chain_ended = 0;
+ int prepare_rx_ret;
status = gelic_descr_get_status(descr);
if (status == GELIC_DESCR_DMA_CARDOWNED)
return 0;
- if (status == GELIC_DESCR_DMA_NOT_IN_USE) {
+ if (status == GELIC_DESCR_DMA_NOT_IN_USE || !descr->skb) {
dev_dbg(ctodev(card), "dormant descr? %p\n", descr);
- return 0;
+ dmac_chain_ended = 1;
+ goto refill;
}
/* netdevice select */
@@ -1048,9 +1057,10 @@ static int gelic_card_decode_one_descr(struct gelic_card *card)
refill:
/* is the current descriptor terminated with next_descr == NULL? */
- dmac_chain_ended =
- be32_to_cpu(descr->hw_regs.dmac_cmd_status) &
- GELIC_DESCR_RX_DMA_CHAIN_END;
+ if (!dmac_chain_ended)
+ dmac_chain_ended =
+ be32_to_cpu(descr->hw_regs.dmac_cmd_status) &
+ GELIC_DESCR_RX_DMA_CHAIN_END;
/*
* So that always DMAC can see the end
* of the descriptor chain to avoid
@@ -1062,10 +1072,11 @@ refill:
gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
/*
- * this call can fail, but for now, just leave this
- * descriptor without skb
+ * this call can fail, propagate the error
*/
- gelic_descr_prepare_rx(card, descr);
+ prepare_rx_ret = gelic_descr_prepare_rx(card, descr, true);
+ if (prepare_rx_ret)
+ return prepare_rx_ret;
chain->tail = descr;
chain->head = descr->next;
@@ -1087,6 +1098,13 @@ refill:
return 1;
}
+static void gelic_rx_oom_timer(struct timer_list *t)
+{
+ struct gelic_card *card = timer_container_of(card, t, rx_oom_timer);
+
+ napi_schedule(&card->napi);
+}
+
/**
* gelic_net_poll - NAPI poll function called by the stack to return packets
* @napi: napi structure
@@ -1099,14 +1117,22 @@ static int gelic_net_poll(struct napi_struct *napi, int budget)
{
struct gelic_card *card = container_of(napi, struct gelic_card, napi);
int packets_done = 0;
+ int work_result = 0;
while (packets_done < budget) {
- if (!gelic_card_decode_one_descr(card))
+ work_result = gelic_card_decode_one_descr(card);
+ if (work_result != 1)
break;
packets_done++;
}
+ if (work_result == -ENOMEM) {
+ napi_complete_done(napi, packets_done);
+ mod_timer(&card->rx_oom_timer, jiffies + 1);
+ return packets_done;
+ }
+
if (packets_done < budget) {
napi_complete_done(napi, packets_done);
gelic_card_rx_irq_on(card);
@@ -1576,6 +1602,8 @@ static struct gelic_card *gelic_alloc_card_net(struct net_device **netdev)
mutex_init(&card->updown_lock);
atomic_set(&card->users, 0);
+ timer_setup(&card->rx_oom_timer, gelic_rx_oom_timer, 0);
+
return card;
}
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index f7d7931e51b7..c10f1984a5a1 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -268,6 +268,7 @@ struct gelic_vlan_id {
struct gelic_card {
struct napi_struct napi;
struct net_device *netdev[GELIC_PORT_MAX];
+ struct timer_list rx_oom_timer;
/*
* hypervisor requires irq_status should be
* 8 bytes aligned, but u64 member is
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
deleted file mode 100644
index a4937c18d7cb..000000000000
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ /dev/null
@@ -1,2556 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Network device driver for Cell Processor-Based Blade and Celleb platform
- *
- * (C) Copyright IBM Corp. 2005
- * (C) Copyright 2006 TOSHIBA CORPORATION
- *
- * Authors : Utz Bacher <utz.bacher@de.ibm.com>
- * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
- */
-
-#include <linux/compiler.h>
-#include <linux/crc32.h>
-#include <linux/delay.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/firmware.h>
-#include <linux/if_vlan.h>
-#include <linux/in.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/gfp.h>
-#include <linux/ioport.h>
-#include <linux/ip.h>
-#include <linux/kernel.h>
-#include <linux/mii.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/device.h>
-#include <linux/pci.h>
-#include <linux/skbuff.h>
-#include <linux/tcp.h>
-#include <linux/types.h>
-#include <linux/vmalloc.h>
-#include <linux/wait.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
-#include <linux/of.h>
-#include <net/checksum.h>
-
-#include "spider_net.h"
-
-MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \
- "<Jens.Osterkamp@de.ibm.com>");
-MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(VERSION);
-MODULE_FIRMWARE(SPIDER_NET_FIRMWARE_NAME);
-
-static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT;
-static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT;
-
-module_param(rx_descriptors, int, 0444);
-module_param(tx_descriptors, int, 0444);
-
-MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \
- "in rx chains");
-MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \
- "in tx chain");
-
-char spider_net_driver_name[] = "spidernet";
-
-static const struct pci_device_id spider_net_pci_tbl[] = {
- { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl);
-
-/**
- * spider_net_read_reg - reads an SMMIO register of a card
- * @card: device structure
- * @reg: register to read from
- *
- * returns the content of the specified SMMIO register.
- */
-static inline u32
-spider_net_read_reg(struct spider_net_card *card, u32 reg)
-{
- /* We use the powerpc specific variants instead of readl_be() because
- * we know spidernet is not a real PCI device and we can thus avoid the
- * performance hit caused by the PCI workarounds.
- */
- return in_be32(card->regs + reg);
-}
-
-/**
- * spider_net_write_reg - writes to an SMMIO register of a card
- * @card: device structure
- * @reg: register to write to
- * @value: value to write into the specified SMMIO register
- */
-static inline void
-spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
-{
- /* We use the powerpc specific variants instead of writel_be() because
- * we know spidernet is not a real PCI device and we can thus avoid the
- * performance hit caused by the PCI workarounds.
- */
- out_be32(card->regs + reg, value);
-}
-
-/**
- * spider_net_write_phy - write to phy register
- * @netdev: adapter to be written to
- * @mii_id: id of MII
- * @reg: PHY register
- * @val: value to be written to phy register
- *
- * spider_net_write_phy_register writes to an arbitrary PHY
- * register via the spider GPCWOPCMD register. We assume the queue does
- * not run full (not more than 15 commands outstanding).
- **/
-static void
-spider_net_write_phy(struct net_device *netdev, int mii_id,
- int reg, int val)
-{
- struct spider_net_card *card = netdev_priv(netdev);
- u32 writevalue;
-
- writevalue = ((u32)mii_id << 21) |
- ((u32)reg << 16) | ((u32)val);
-
- spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue);
-}
-
-/**
- * spider_net_read_phy - read from phy register
- * @netdev: network device to be read from
- * @mii_id: id of MII
- * @reg: PHY register
- *
- * Returns value read from PHY register
- *
- * spider_net_write_phy reads from an arbitrary PHY
- * register via the spider GPCROPCMD register
- **/
-static int
-spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
-{
- struct spider_net_card *card = netdev_priv(netdev);
- u32 readvalue;
-
- readvalue = ((u32)mii_id << 21) | ((u32)reg << 16);
- spider_net_write_reg(card, SPIDER_NET_GPCROPCMD, readvalue);
-
- /* we don't use semaphores to wait for an SPIDER_NET_GPROPCMPINT
- * interrupt, as we poll for the completion of the read operation
- * in spider_net_read_phy. Should take about 50 us
- */
- do {
- readvalue = spider_net_read_reg(card, SPIDER_NET_GPCROPCMD);
- } while (readvalue & SPIDER_NET_GPREXEC);
-
- readvalue &= SPIDER_NET_GPRDAT_MASK;
-
- return readvalue;
-}
-
-/**
- * spider_net_setup_aneg - initial auto-negotiation setup
- * @card: device structure
- **/
-static void
-spider_net_setup_aneg(struct spider_net_card *card)
-{
- struct mii_phy *phy = &card->phy;
- u32 advertise = 0;
- u16 bmsr, estat;
-
- bmsr = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
- estat = spider_net_read_phy(card->netdev, phy->mii_id, MII_ESTATUS);
-
- if (bmsr & BMSR_10HALF)
- advertise |= ADVERTISED_10baseT_Half;
- if (bmsr & BMSR_10FULL)
- advertise |= ADVERTISED_10baseT_Full;
- if (bmsr & BMSR_100HALF)
- advertise |= ADVERTISED_100baseT_Half;
- if (bmsr & BMSR_100FULL)
- advertise |= ADVERTISED_100baseT_Full;
-
- if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_TFULL))
- advertise |= SUPPORTED_1000baseT_Full;
- if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_THALF))
- advertise |= SUPPORTED_1000baseT_Half;
-
- sungem_phy_probe(phy, phy->mii_id);
- phy->def->ops->setup_aneg(phy, advertise);
-
-}
-
-/**
- * spider_net_rx_irq_off - switch off rx irq on this spider card
- * @card: device structure
- *
- * switches off rx irq by masking them out in the GHIINTnMSK register
- */
-static void
-spider_net_rx_irq_off(struct spider_net_card *card)
-{
- u32 regvalue;
-
- regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT);
- spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
-}
-
-/**
- * spider_net_rx_irq_on - switch on rx irq on this spider card
- * @card: device structure
- *
- * switches on rx irq by enabling them in the GHIINTnMSK register
- */
-static void
-spider_net_rx_irq_on(struct spider_net_card *card)
-{
- u32 regvalue;
-
- regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT;
- spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
-}
-
-/**
- * spider_net_set_promisc - sets the unicast address or the promiscuous mode
- * @card: card structure
- *
- * spider_net_set_promisc sets the unicast destination address filter and
- * thus either allows for non-promisc mode or promisc mode
- */
-static void
-spider_net_set_promisc(struct spider_net_card *card)
-{
- u32 macu, macl;
- struct net_device *netdev = card->netdev;
-
- if (netdev->flags & IFF_PROMISC) {
- /* clear destination entry 0 */
- spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, 0);
- spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, 0);
- spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
- SPIDER_NET_PROMISC_VALUE);
- } else {
- macu = netdev->dev_addr[0];
- macu <<= 8;
- macu |= netdev->dev_addr[1];
- memcpy(&macl, &netdev->dev_addr[2], sizeof(macl));
-
- macu |= SPIDER_NET_UA_DESCR_VALUE;
- spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, macu);
- spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, macl);
- spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
- SPIDER_NET_NONPROMISC_VALUE);
- }
-}
-
-/**
- * spider_net_get_descr_status -- returns the status of a descriptor
- * @hwdescr: descriptor to look at
- *
- * returns the status as in the dmac_cmd_status field of the descriptor
- */
-static inline int
-spider_net_get_descr_status(struct spider_net_hw_descr *hwdescr)
-{
- return hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
-}
-
-/**
- * spider_net_free_chain - free descriptor chain
- * @card: card structure
- * @chain: address of chain
- *
- */
-static void
-spider_net_free_chain(struct spider_net_card *card,
- struct spider_net_descr_chain *chain)
-{
- struct spider_net_descr *descr;
-
- descr = chain->ring;
- do {
- descr->bus_addr = 0;
- descr->hwdescr->next_descr_addr = 0;
- descr = descr->next;
- } while (descr != chain->ring);
-
- dma_free_coherent(&card->pdev->dev, chain->num_desc * sizeof(struct spider_net_hw_descr),
- chain->hwring, chain->dma_addr);
-}
-
-/**
- * spider_net_init_chain - alloc and link descriptor chain
- * @card: card structure
- * @chain: address of chain
- *
- * We manage a circular list that mirrors the hardware structure,
- * except that the hardware uses bus addresses.
- *
- * Returns 0 on success, <0 on failure
- */
-static int
-spider_net_init_chain(struct spider_net_card *card,
- struct spider_net_descr_chain *chain)
-{
- int i;
- struct spider_net_descr *descr;
- struct spider_net_hw_descr *hwdescr;
- dma_addr_t buf;
- size_t alloc_size;
-
- alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr);
-
- chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
- &chain->dma_addr, GFP_KERNEL);
- if (!chain->hwring)
- return -ENOMEM;
-
- /* Set up the hardware pointers in each descriptor */
- descr = chain->ring;
- hwdescr = chain->hwring;
- buf = chain->dma_addr;
- for (i=0; i < chain->num_desc; i++, descr++, hwdescr++) {
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
- hwdescr->next_descr_addr = 0;
-
- descr->hwdescr = hwdescr;
- descr->bus_addr = buf;
- descr->next = descr + 1;
- descr->prev = descr - 1;
-
- buf += sizeof(struct spider_net_hw_descr);
- }
- /* do actual circular list */
- (descr-1)->next = chain->ring;
- chain->ring->prev = descr-1;
-
- spin_lock_init(&chain->lock);
- chain->head = chain->ring;
- chain->tail = chain->ring;
- return 0;
-}
-
-/**
- * spider_net_free_rx_chain_contents - frees descr contents in rx chain
- * @card: card structure
- *
- * returns 0 on success, <0 on failure
- */
-static void
-spider_net_free_rx_chain_contents(struct spider_net_card *card)
-{
- struct spider_net_descr *descr;
-
- descr = card->rx_chain.head;
- do {
- if (descr->skb) {
- dma_unmap_single(&card->pdev->dev,
- descr->hwdescr->buf_addr,
- SPIDER_NET_MAX_FRAME,
- DMA_BIDIRECTIONAL);
- dev_kfree_skb(descr->skb);
- descr->skb = NULL;
- }
- descr = descr->next;
- } while (descr != card->rx_chain.head);
-}
-
-/**
- * spider_net_prepare_rx_descr - Reinitialize RX descriptor
- * @card: card structure
- * @descr: descriptor to re-init
- *
- * Return 0 on success, <0 on failure.
- *
- * Allocates a new rx skb, iommu-maps it and attaches it to the
- * descriptor. Mark the descriptor as activated, ready-to-use.
- */
-static int
-spider_net_prepare_rx_descr(struct spider_net_card *card,
- struct spider_net_descr *descr)
-{
- struct spider_net_hw_descr *hwdescr = descr->hwdescr;
- dma_addr_t buf;
- int offset;
- int bufsize;
-
- /* we need to round up the buffer size to a multiple of 128 */
- bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) &
- (~(SPIDER_NET_RXBUF_ALIGN - 1));
-
- /* and we need to have it 128 byte aligned, therefore we allocate a
- * bit more
- */
- /* allocate an skb */
- descr->skb = netdev_alloc_skb(card->netdev,
- bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
- if (!descr->skb) {
- if (netif_msg_rx_err(card) && net_ratelimit())
- dev_err(&card->netdev->dev,
- "Not enough memory to allocate rx buffer\n");
- card->spider_stats.alloc_rx_skb_error++;
- return -ENOMEM;
- }
- hwdescr->buf_size = bufsize;
- hwdescr->result_size = 0;
- hwdescr->valid_size = 0;
- hwdescr->data_status = 0;
- hwdescr->data_error = 0;
-
- offset = ((unsigned long)descr->skb->data) &
- (SPIDER_NET_RXBUF_ALIGN - 1);
- if (offset)
- skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
- /* iommu-map the skb */
- buf = dma_map_single(&card->pdev->dev, descr->skb->data,
- SPIDER_NET_MAX_FRAME, DMA_FROM_DEVICE);
- if (dma_mapping_error(&card->pdev->dev, buf)) {
- dev_kfree_skb_any(descr->skb);
- descr->skb = NULL;
- if (netif_msg_rx_err(card) && net_ratelimit())
- dev_err(&card->netdev->dev, "Could not iommu-map rx buffer\n");
- card->spider_stats.rx_iommu_map_error++;
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
- } else {
- hwdescr->buf_addr = buf;
- wmb();
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
- SPIDER_NET_DMAC_NOINTR_COMPLETE;
- }
-
- return 0;
-}
-
-/**
- * spider_net_enable_rxchtails - sets RX dmac chain tail addresses
- * @card: card structure
- *
- * spider_net_enable_rxchtails sets the RX DMAC chain tail addresses in the
- * chip by writing to the appropriate register. DMA is enabled in
- * spider_net_enable_rxdmac.
- */
-static inline void
-spider_net_enable_rxchtails(struct spider_net_card *card)
-{
- /* assume chain is aligned correctly */
- spider_net_write_reg(card, SPIDER_NET_GDADCHA ,
- card->rx_chain.tail->bus_addr);
-}
-
-/**
- * spider_net_enable_rxdmac - enables a receive DMA controller
- * @card: card structure
- *
- * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
- * in the GDADMACCNTR register
- */
-static inline void
-spider_net_enable_rxdmac(struct spider_net_card *card)
-{
- wmb();
- spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
- SPIDER_NET_DMA_RX_VALUE);
-}
-
-/**
- * spider_net_disable_rxdmac - disables the receive DMA controller
- * @card: card structure
- *
- * spider_net_disable_rxdmac terminates processing on the DMA controller
- * by turing off the DMA controller, with the force-end flag set.
- */
-static inline void
-spider_net_disable_rxdmac(struct spider_net_card *card)
-{
- spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
- SPIDER_NET_DMA_RX_FEND_VALUE);
-}
-
-/**
- * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
- * @card: card structure
- *
- * refills descriptors in the rx chain: allocates skbs and iommu-maps them.
- */
-static void
-spider_net_refill_rx_chain(struct spider_net_card *card)
-{
- struct spider_net_descr_chain *chain = &card->rx_chain;
- unsigned long flags;
-
- /* one context doing the refill (and a second context seeing that
- * and omitting it) is ok. If called by NAPI, we'll be called again
- * as spider_net_decode_one_descr is called several times. If some
- * interrupt calls us, the NAPI is about to clean up anyway.
- */
- if (!spin_trylock_irqsave(&chain->lock, flags))
- return;
-
- while (spider_net_get_descr_status(chain->head->hwdescr) ==
- SPIDER_NET_DESCR_NOT_IN_USE) {
- if (spider_net_prepare_rx_descr(card, chain->head))
- break;
- chain->head = chain->head->next;
- }
-
- spin_unlock_irqrestore(&chain->lock, flags);
-}
-
-/**
- * spider_net_alloc_rx_skbs - Allocates rx skbs in rx descriptor chains
- * @card: card structure
- *
- * Returns 0 on success, <0 on failure.
- */
-static int
-spider_net_alloc_rx_skbs(struct spider_net_card *card)
-{
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *start = chain->tail;
- struct spider_net_descr *descr = start;
-
- /* Link up the hardware chain pointers */
- do {
- descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
- descr = descr->next;
- } while (descr != start);
-
- /* Put at least one buffer into the chain. if this fails,
- * we've got a problem. If not, spider_net_refill_rx_chain
- * will do the rest at the end of this function.
- */
- if (spider_net_prepare_rx_descr(card, chain->head))
- goto error;
- else
- chain->head = chain->head->next;
-
- /* This will allocate the rest of the rx buffers;
- * if not, it's business as usual later on.
- */
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
- return 0;
-
-error:
- spider_net_free_rx_chain_contents(card);
- return -ENOMEM;
-}
-
-/**
- * spider_net_get_multicast_hash - generates hash for multicast filter table
- * @netdev: interface device structure
- * @addr: multicast address
- *
- * returns the hash value.
- *
- * spider_net_get_multicast_hash calculates a hash value for a given multicast
- * address, that is used to set the multicast filter tables
- */
-static u8
-spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
-{
- u32 crc;
- u8 hash;
- char addr_for_crc[ETH_ALEN] = { 0, };
- int i, bit;
-
- for (i = 0; i < ETH_ALEN * 8; i++) {
- bit = (addr[i / 8] >> (i % 8)) & 1;
- addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8));
- }
-
- crc = crc32_be(~0, addr_for_crc, netdev->addr_len);
-
- hash = (crc >> 27);
- hash <<= 3;
- hash |= crc & 7;
- hash &= 0xff;
-
- return hash;
-}
-
-/**
- * spider_net_set_multi - sets multicast addresses and promisc flags
- * @netdev: interface device structure
- *
- * spider_net_set_multi configures multicast addresses as needed for the
- * netdev interface. It also sets up multicast, allmulti and promisc
- * flags appropriately
- */
-static void
-spider_net_set_multi(struct net_device *netdev)
-{
- struct netdev_hw_addr *ha;
- u8 hash;
- int i;
- u32 reg;
- struct spider_net_card *card = netdev_priv(netdev);
- DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES);
-
- spider_net_set_promisc(card);
-
- if (netdev->flags & IFF_ALLMULTI) {
- bitmap_fill(bitmask, SPIDER_NET_MULTICAST_HASHES);
- goto write_hash;
- }
-
- bitmap_zero(bitmask, SPIDER_NET_MULTICAST_HASHES);
-
- /* well, we know, what the broadcast hash value is: it's xfd
- hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
- __set_bit(0xfd, bitmask);
-
- netdev_for_each_mc_addr(ha, netdev) {
- hash = spider_net_get_multicast_hash(netdev, ha->addr);
- __set_bit(hash, bitmask);
- }
-
-write_hash:
- for (i = 0; i < SPIDER_NET_MULTICAST_HASHES / 4; i++) {
- reg = 0;
- if (test_bit(i * 4, bitmask))
- reg += 0x08;
- reg <<= 8;
- if (test_bit(i * 4 + 1, bitmask))
- reg += 0x08;
- reg <<= 8;
- if (test_bit(i * 4 + 2, bitmask))
- reg += 0x08;
- reg <<= 8;
- if (test_bit(i * 4 + 3, bitmask))
- reg += 0x08;
-
- spider_net_write_reg(card, SPIDER_NET_GMRMHFILnR + i * 4, reg);
- }
-}
-
-/**
- * spider_net_prepare_tx_descr - fill tx descriptor with skb data
- * @card: card structure
- * @skb: packet to use
- *
- * returns 0 on success, <0 on failure.
- *
- * fills out the descriptor structure with skb data and len. Copies data,
- * if needed (32bit DMA!)
- */
-static int
-spider_net_prepare_tx_descr(struct spider_net_card *card,
- struct sk_buff *skb)
-{
- struct spider_net_descr_chain *chain = &card->tx_chain;
- struct spider_net_descr *descr;
- struct spider_net_hw_descr *hwdescr;
- dma_addr_t buf;
- unsigned long flags;
-
- buf = dma_map_single(&card->pdev->dev, skb->data, skb->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&card->pdev->dev, buf)) {
- if (netif_msg_tx_err(card) && net_ratelimit())
- dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). "
- "Dropping packet\n", skb->data, skb->len);
- card->spider_stats.tx_iommu_map_error++;
- return -ENOMEM;
- }
-
- spin_lock_irqsave(&chain->lock, flags);
- descr = card->tx_chain.head;
- if (descr->next == chain->tail->prev) {
- spin_unlock_irqrestore(&chain->lock, flags);
- dma_unmap_single(&card->pdev->dev, buf, skb->len,
- DMA_TO_DEVICE);
- return -ENOMEM;
- }
- hwdescr = descr->hwdescr;
- chain->head = descr->next;
-
- descr->skb = skb;
- hwdescr->buf_addr = buf;
- hwdescr->buf_size = skb->len;
- hwdescr->next_descr_addr = 0;
- hwdescr->data_status = 0;
-
- hwdescr->dmac_cmd_status =
- SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_TXFRMTL;
- spin_unlock_irqrestore(&chain->lock, flags);
-
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- switch (ip_hdr(skb)->protocol) {
- case IPPROTO_TCP:
- hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
- break;
- case IPPROTO_UDP:
- hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
- break;
- }
-
- /* Chain the bus address, so that the DMA engine finds this descr. */
- wmb();
- descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
-
- netif_trans_update(card->netdev); /* set netdev watchdog timer */
- return 0;
-}
-
-static int
-spider_net_set_low_watermark(struct spider_net_card *card)
-{
- struct spider_net_descr *descr = card->tx_chain.tail;
- struct spider_net_hw_descr *hwdescr;
- unsigned long flags;
- int status;
- int cnt=0;
- int i;
-
- /* Measure the length of the queue. Measurement does not
- * need to be precise -- does not need a lock.
- */
- while (descr != card->tx_chain.head) {
- status = descr->hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
- if (status == SPIDER_NET_DESCR_NOT_IN_USE)
- break;
- descr = descr->next;
- cnt++;
- }
-
- /* If TX queue is short, don't even bother with interrupts */
- if (cnt < card->tx_chain.num_desc/4)
- return cnt;
-
- /* Set low-watermark 3/4th's of the way into the queue. */
- descr = card->tx_chain.tail;
- cnt = (cnt*3)/4;
- for (i=0;i<cnt; i++)
- descr = descr->next;
-
- /* Set the new watermark, clear the old watermark */
- spin_lock_irqsave(&card->tx_chain.lock, flags);
- descr->hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG;
- if (card->low_watermark && card->low_watermark != descr) {
- hwdescr = card->low_watermark->hwdescr;
- hwdescr->dmac_cmd_status =
- hwdescr->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG;
- }
- card->low_watermark = descr;
- spin_unlock_irqrestore(&card->tx_chain.lock, flags);
- return cnt;
-}
-
-/**
- * spider_net_release_tx_chain - processes sent tx descriptors
- * @card: adapter structure
- * @brutal: if set, don't care about whether descriptor seems to be in use
- *
- * returns 0 if the tx ring is empty, otherwise 1.
- *
- * spider_net_release_tx_chain releases the tx descriptors that spider has
- * finished with (if non-brutal) or simply release tx descriptors (if brutal).
- * If some other context is calling this function, we return 1 so that we're
- * scheduled again (if we were scheduled) and will not lose initiative.
- */
-static int
-spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
-{
- struct net_device *dev = card->netdev;
- struct spider_net_descr_chain *chain = &card->tx_chain;
- struct spider_net_descr *descr;
- struct spider_net_hw_descr *hwdescr;
- struct sk_buff *skb;
- u32 buf_addr;
- unsigned long flags;
- int status;
-
- while (1) {
- spin_lock_irqsave(&chain->lock, flags);
- if (chain->tail == chain->head) {
- spin_unlock_irqrestore(&chain->lock, flags);
- return 0;
- }
- descr = chain->tail;
- hwdescr = descr->hwdescr;
-
- status = spider_net_get_descr_status(hwdescr);
- switch (status) {
- case SPIDER_NET_DESCR_COMPLETE:
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += descr->skb->len;
- break;
-
- case SPIDER_NET_DESCR_CARDOWNED:
- if (!brutal) {
- spin_unlock_irqrestore(&chain->lock, flags);
- return 1;
- }
-
- /* fallthrough, if we release the descriptors
- * brutally (then we don't care about
- * SPIDER_NET_DESCR_CARDOWNED)
- */
- fallthrough;
-
- case SPIDER_NET_DESCR_RESPONSE_ERROR:
- case SPIDER_NET_DESCR_PROTECTION_ERROR:
- case SPIDER_NET_DESCR_FORCE_END:
- if (netif_msg_tx_err(card))
- dev_err(&card->netdev->dev, "forcing end of tx descriptor "
- "with status x%02x\n", status);
- dev->stats.tx_errors++;
- break;
-
- default:
- dev->stats.tx_dropped++;
- if (!brutal) {
- spin_unlock_irqrestore(&chain->lock, flags);
- return 1;
- }
- }
-
- chain->tail = descr->next;
- hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
- skb = descr->skb;
- descr->skb = NULL;
- buf_addr = hwdescr->buf_addr;
- spin_unlock_irqrestore(&chain->lock, flags);
-
- /* unmap the skb */
- if (skb) {
- dma_unmap_single(&card->pdev->dev, buf_addr, skb->len,
- DMA_TO_DEVICE);
- dev_consume_skb_any(skb);
- }
- }
- return 0;
-}
-
-/**
- * spider_net_kick_tx_dma - enables TX DMA processing
- * @card: card structure
- *
- * This routine will start the transmit DMA running if
- * it is not already running. This routine ned only be
- * called when queueing a new packet to an empty tx queue.
- * Writes the current tx chain head as start address
- * of the tx descriptor chain and enables the transmission
- * DMA engine.
- */
-static inline void
-spider_net_kick_tx_dma(struct spider_net_card *card)
-{
- struct spider_net_descr *descr;
-
- if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) &
- SPIDER_NET_TX_DMA_EN)
- goto out;
-
- descr = card->tx_chain.tail;
- for (;;) {
- if (spider_net_get_descr_status(descr->hwdescr) ==
- SPIDER_NET_DESCR_CARDOWNED) {
- spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
- descr->bus_addr);
- spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
- SPIDER_NET_DMA_TX_VALUE);
- break;
- }
- if (descr == card->tx_chain.head)
- break;
- descr = descr->next;
- }
-
-out:
- mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
-}
-
-/**
- * spider_net_xmit - transmits a frame over the device
- * @skb: packet to send out
- * @netdev: interface device structure
- *
- * returns NETDEV_TX_OK on success, NETDEV_TX_BUSY on failure
- */
-static netdev_tx_t
-spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
-{
- int cnt;
- struct spider_net_card *card = netdev_priv(netdev);
-
- spider_net_release_tx_chain(card, 0);
-
- if (spider_net_prepare_tx_descr(card, skb) != 0) {
- netdev->stats.tx_dropped++;
- netif_stop_queue(netdev);
- return NETDEV_TX_BUSY;
- }
-
- cnt = spider_net_set_low_watermark(card);
- if (cnt < 5)
- spider_net_kick_tx_dma(card);
- return NETDEV_TX_OK;
-}
-
-/**
- * spider_net_cleanup_tx_ring - cleans up the TX ring
- * @t: timer context used to obtain the pointer to net card data structure
- *
- * spider_net_cleanup_tx_ring is called by either the tx_timer
- * or from the NAPI polling routine.
- * This routine releases resources associted with transmitted
- * packets, including updating the queue tail pointer.
- */
-static void
-spider_net_cleanup_tx_ring(struct timer_list *t)
-{
- struct spider_net_card *card = from_timer(card, t, tx_timer);
- if ((spider_net_release_tx_chain(card, 0) != 0) &&
- (card->netdev->flags & IFF_UP)) {
- spider_net_kick_tx_dma(card);
- netif_wake_queue(card->netdev);
- }
-}
-
-/**
- * spider_net_do_ioctl - called for device ioctls
- * @netdev: interface device structure
- * @ifr: request parameter structure for ioctl
- * @cmd: command code for ioctl
- *
- * returns 0 on success, <0 on failure. Currently, we have no special ioctls.
- * -EOPNOTSUPP is returned, if an unknown ioctl was requested
- */
-static int
-spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- default:
- return -EOPNOTSUPP;
- }
-}
-
-/**
- * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
- * @descr: descriptor to process
- * @card: card structure
- *
- * Fills out skb structure and passes the data to the stack.
- * The descriptor state is not changed.
- */
-static void
-spider_net_pass_skb_up(struct spider_net_descr *descr,
- struct spider_net_card *card)
-{
- struct spider_net_hw_descr *hwdescr = descr->hwdescr;
- struct sk_buff *skb = descr->skb;
- struct net_device *netdev = card->netdev;
- u32 data_status = hwdescr->data_status;
- u32 data_error = hwdescr->data_error;
-
- skb_put(skb, hwdescr->valid_size);
-
- /* the card seems to add 2 bytes of junk in front
- * of the ethernet frame
- */
-#define SPIDER_MISALIGN 2
- skb_pull(skb, SPIDER_MISALIGN);
- skb->protocol = eth_type_trans(skb, netdev);
-
- /* checksum offload */
- skb_checksum_none_assert(skb);
- if (netdev->features & NETIF_F_RXCSUM) {
- if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
- SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
- !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- }
-
- if (data_status & SPIDER_NET_VLAN_PACKET) {
- /* further enhancements: HW-accel VLAN */
- }
-
- /* update netdevice statistics */
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += skb->len;
-
- /* pass skb up to stack */
- netif_receive_skb(skb);
-}
-
-static void show_rx_chain(struct spider_net_card *card)
-{
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *start= chain->tail;
- struct spider_net_descr *descr= start;
- struct spider_net_hw_descr *hwd = start->hwdescr;
- struct device *dev = &card->netdev->dev;
- u32 curr_desc, next_desc;
- int status;
-
- int tot = 0;
- int cnt = 0;
- int off = start - chain->ring;
- int cstat = hwd->dmac_cmd_status;
-
- dev_info(dev, "Total number of descrs=%d\n",
- chain->num_desc);
- dev_info(dev, "Chain tail located at descr=%d, status=0x%x\n",
- off, cstat);
-
- curr_desc = spider_net_read_reg(card, SPIDER_NET_GDACTDPA);
- next_desc = spider_net_read_reg(card, SPIDER_NET_GDACNEXTDA);
-
- status = cstat;
- do
- {
- hwd = descr->hwdescr;
- off = descr - chain->ring;
- status = hwd->dmac_cmd_status;
-
- if (descr == chain->head)
- dev_info(dev, "Chain head is at %d, head status=0x%x\n",
- off, status);
-
- if (curr_desc == descr->bus_addr)
- dev_info(dev, "HW curr desc (GDACTDPA) is at %d, status=0x%x\n",
- off, status);
-
- if (next_desc == descr->bus_addr)
- dev_info(dev, "HW next desc (GDACNEXTDA) is at %d, status=0x%x\n",
- off, status);
-
- if (hwd->next_descr_addr == 0)
- dev_info(dev, "chain is cut at %d\n", off);
-
- if (cstat != status) {
- int from = (chain->num_desc + off - cnt) % chain->num_desc;
- int to = (chain->num_desc + off - 1) % chain->num_desc;
- dev_info(dev, "Have %d (from %d to %d) descrs "
- "with stat=0x%08x\n", cnt, from, to, cstat);
- cstat = status;
- cnt = 0;
- }
-
- cnt ++;
- tot ++;
- descr = descr->next;
- } while (descr != start);
-
- dev_info(dev, "Last %d descrs with stat=0x%08x "
- "for a total of %d descrs\n", cnt, cstat, tot);
-
-#ifdef DEBUG
- /* Now dump the whole ring */
- descr = start;
- do
- {
- struct spider_net_hw_descr *hwd = descr->hwdescr;
- status = spider_net_get_descr_status(hwd);
- cnt = descr - chain->ring;
- dev_info(dev, "Descr %d stat=0x%08x skb=%p\n",
- cnt, status, descr->skb);
- dev_info(dev, "bus addr=%08x buf addr=%08x sz=%d\n",
- descr->bus_addr, hwd->buf_addr, hwd->buf_size);
- dev_info(dev, "next=%08x result sz=%d valid sz=%d\n",
- hwd->next_descr_addr, hwd->result_size,
- hwd->valid_size);
- dev_info(dev, "dmac=%08x data stat=%08x data err=%08x\n",
- hwd->dmac_cmd_status, hwd->data_status,
- hwd->data_error);
- dev_info(dev, "\n");
-
- descr = descr->next;
- } while (descr != start);
-#endif
-
-}
-
-/**
- * spider_net_resync_head_ptr - Advance head ptr past empty descrs
- * @card: card structure
- *
- * If the driver fails to keep up and empty the queue, then the
- * hardware wil run out of room to put incoming packets. This
- * will cause the hardware to skip descrs that are full (instead
- * of halting/retrying). Thus, once the driver runs, it wil need
- * to "catch up" to where the hardware chain pointer is at.
- */
-static void spider_net_resync_head_ptr(struct spider_net_card *card)
-{
- unsigned long flags;
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *descr;
- int i, status;
-
- /* Advance head pointer past any empty descrs */
- descr = chain->head;
- status = spider_net_get_descr_status(descr->hwdescr);
-
- if (status == SPIDER_NET_DESCR_NOT_IN_USE)
- return;
-
- spin_lock_irqsave(&chain->lock, flags);
-
- descr = chain->head;
- status = spider_net_get_descr_status(descr->hwdescr);
- for (i=0; i<chain->num_desc; i++) {
- if (status != SPIDER_NET_DESCR_CARDOWNED) break;
- descr = descr->next;
- status = spider_net_get_descr_status(descr->hwdescr);
- }
- chain->head = descr;
-
- spin_unlock_irqrestore(&chain->lock, flags);
-}
-
-static int spider_net_resync_tail_ptr(struct spider_net_card *card)
-{
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *descr;
- int i, status;
-
- /* Advance tail pointer past any empty and reaped descrs */
- descr = chain->tail;
- status = spider_net_get_descr_status(descr->hwdescr);
-
- for (i=0; i<chain->num_desc; i++) {
- if ((status != SPIDER_NET_DESCR_CARDOWNED) &&
- (status != SPIDER_NET_DESCR_NOT_IN_USE)) break;
- descr = descr->next;
- status = spider_net_get_descr_status(descr->hwdescr);
- }
- chain->tail = descr;
-
- if ((i == chain->num_desc) || (i == 0))
- return 1;
- return 0;
-}
-
-/**
- * spider_net_decode_one_descr - processes an RX descriptor
- * @card: card structure
- *
- * Returns 1 if a packet has been sent to the stack, otherwise 0.
- *
- * Processes an RX descriptor by iommu-unmapping the data buffer
- * and passing the packet up to the stack. This function is called
- * in softirq context, e.g. either bottom half from interrupt or
- * NAPI polling context.
- */
-static int
-spider_net_decode_one_descr(struct spider_net_card *card)
-{
- struct net_device *dev = card->netdev;
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *descr = chain->tail;
- struct spider_net_hw_descr *hwdescr = descr->hwdescr;
- u32 hw_buf_addr;
- int status;
-
- status = spider_net_get_descr_status(hwdescr);
-
- /* Nothing in the descriptor, or ring must be empty */
- if ((status == SPIDER_NET_DESCR_CARDOWNED) ||
- (status == SPIDER_NET_DESCR_NOT_IN_USE))
- return 0;
-
- /* descriptor definitively used -- move on tail */
- chain->tail = descr->next;
-
- /* unmap descriptor */
- hw_buf_addr = hwdescr->buf_addr;
- hwdescr->buf_addr = 0xffffffff;
- dma_unmap_single(&card->pdev->dev, hw_buf_addr, SPIDER_NET_MAX_FRAME,
- DMA_FROM_DEVICE);
-
- if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
- (status == SPIDER_NET_DESCR_PROTECTION_ERROR) ||
- (status == SPIDER_NET_DESCR_FORCE_END) ) {
- if (netif_msg_rx_err(card))
- dev_err(&dev->dev,
- "dropping RX descriptor with state %d\n", status);
- dev->stats.rx_dropped++;
- goto bad_desc;
- }
-
- if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
- (status != SPIDER_NET_DESCR_FRAME_END) ) {
- if (netif_msg_rx_err(card))
- dev_err(&card->netdev->dev,
- "RX descriptor with unknown state %d\n", status);
- card->spider_stats.rx_desc_unk_state++;
- goto bad_desc;
- }
-
- /* The cases we'll throw away the packet immediately */
- if (hwdescr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
- if (netif_msg_rx_err(card))
- dev_err(&card->netdev->dev,
- "error in received descriptor found, "
- "data_status=x%08x, data_error=x%08x\n",
- hwdescr->data_status, hwdescr->data_error);
- goto bad_desc;
- }
-
- if (hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_BAD_STATUS) {
- dev_err(&card->netdev->dev, "bad status, cmd_status=x%08x\n",
- hwdescr->dmac_cmd_status);
- pr_err("buf_addr=x%08x\n", hw_buf_addr);
- pr_err("buf_size=x%08x\n", hwdescr->buf_size);
- pr_err("next_descr_addr=x%08x\n", hwdescr->next_descr_addr);
- pr_err("result_size=x%08x\n", hwdescr->result_size);
- pr_err("valid_size=x%08x\n", hwdescr->valid_size);
- pr_err("data_status=x%08x\n", hwdescr->data_status);
- pr_err("data_error=x%08x\n", hwdescr->data_error);
- pr_err("which=%ld\n", descr - card->rx_chain.ring);
-
- card->spider_stats.rx_desc_error++;
- goto bad_desc;
- }
-
- /* Ok, we've got a packet in descr */
- spider_net_pass_skb_up(descr, card);
- descr->skb = NULL;
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
- return 1;
-
-bad_desc:
- if (netif_msg_rx_err(card))
- show_rx_chain(card);
- dev_kfree_skb_irq(descr->skb);
- descr->skb = NULL;
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
- return 0;
-}
-
-/**
- * spider_net_poll - NAPI poll function called by the stack to return packets
- * @napi: napi device structure
- * @budget: number of packets we can pass to the stack at most
- *
- * returns 0 if no more packets available to the driver/stack. Returns 1,
- * if the quota is exceeded, but the driver has still packets.
- *
- * spider_net_poll returns all packets from the rx descriptors to the stack
- * (using netif_receive_skb). If all/enough packets are up, the driver
- * reenables interrupts and returns 0. If not, 1 is returned.
- */
-static int spider_net_poll(struct napi_struct *napi, int budget)
-{
- struct spider_net_card *card = container_of(napi, struct spider_net_card, napi);
- int packets_done = 0;
-
- while (packets_done < budget) {
- if (!spider_net_decode_one_descr(card))
- break;
-
- packets_done++;
- }
-
- if ((packets_done == 0) && (card->num_rx_ints != 0)) {
- if (!spider_net_resync_tail_ptr(card))
- packets_done = budget;
- spider_net_resync_head_ptr(card);
- }
- card->num_rx_ints = 0;
-
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
-
- spider_net_cleanup_tx_ring(&card->tx_timer);
-
- /* if all packets are in the stack, enable interrupts and return 0 */
- /* if not, return 1 */
- if (packets_done < budget) {
- napi_complete_done(napi, packets_done);
- spider_net_rx_irq_on(card);
- card->ignore_rx_ramfull = 0;
- }
-
- return packets_done;
-}
-
-/**
- * spider_net_set_mac - sets the MAC of an interface
- * @netdev: interface device structure
- * @p: pointer to new MAC address
- *
- * Returns 0 on success, <0 on failure. Currently, we don't support this
- * and will always return EOPNOTSUPP.
- */
-static int
-spider_net_set_mac(struct net_device *netdev, void *p)
-{
- struct spider_net_card *card = netdev_priv(netdev);
- u32 macl, macu, regvalue;
- struct sockaddr *addr = p;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- eth_hw_addr_set(netdev, addr->sa_data);
-
- /* switch off GMACTPE and GMACRPE */
- regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
- regvalue &= ~((1 << 5) | (1 << 6));
- spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
-
- /* write mac */
- macu = (netdev->dev_addr[0]<<24) + (netdev->dev_addr[1]<<16) +
- (netdev->dev_addr[2]<<8) + (netdev->dev_addr[3]);
- macl = (netdev->dev_addr[4]<<8) + (netdev->dev_addr[5]);
- spider_net_write_reg(card, SPIDER_NET_GMACUNIMACU, macu);
- spider_net_write_reg(card, SPIDER_NET_GMACUNIMACL, macl);
-
- /* switch GMACTPE and GMACRPE back on */
- regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
- regvalue |= ((1 << 5) | (1 << 6));
- spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
-
- spider_net_set_promisc(card);
-
- return 0;
-}
-
-/**
- * spider_net_link_reset
- * @netdev: net device structure
- *
- * This is called when the PHY_LINK signal is asserted. For the blade this is
- * not connected so we should never get here.
- *
- */
-static void
-spider_net_link_reset(struct net_device *netdev)
-{
-
- struct spider_net_card *card = netdev_priv(netdev);
-
- del_timer_sync(&card->aneg_timer);
-
- /* clear interrupt, block further interrupts */
- spider_net_write_reg(card, SPIDER_NET_GMACST,
- spider_net_read_reg(card, SPIDER_NET_GMACST));
- spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
-
- /* reset phy and setup aneg */
- card->aneg_count = 0;
- card->medium = BCM54XX_COPPER;
- spider_net_setup_aneg(card);
- mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
-
-}
-
-/**
- * spider_net_handle_error_irq - handles errors raised by an interrupt
- * @card: card structure
- * @status_reg: interrupt status register 0 (GHIINT0STS)
- * @error_reg1: interrupt status register 1 (GHIINT1STS)
- * @error_reg2: interrupt status register 2 (GHIINT2STS)
- *
- * spider_net_handle_error_irq treats or ignores all error conditions
- * found when an interrupt is presented
- */
-static void
-spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
- u32 error_reg1, u32 error_reg2)
-{
- u32 i;
- int show_error = 1;
-
- /* check GHIINT0STS ************************************/
- if (status_reg)
- for (i = 0; i < 32; i++)
- if (status_reg & (1<<i))
- switch (i)
- {
- /* let error_reg1 and error_reg2 evaluation decide, what to do
- case SPIDER_NET_PHYINT:
- case SPIDER_NET_GMAC2INT:
- case SPIDER_NET_GMAC1INT:
- case SPIDER_NET_GFIFOINT:
- case SPIDER_NET_DMACINT:
- case SPIDER_NET_GSYSINT:
- break; */
-
- case SPIDER_NET_GIPSINT:
- show_error = 0;
- break;
-
- case SPIDER_NET_GPWOPCMPINT:
- /* PHY write operation completed */
- show_error = 0;
- break;
- case SPIDER_NET_GPROPCMPINT:
- /* PHY read operation completed */
- /* we don't use semaphores, as we poll for the completion
- * of the read operation in spider_net_read_phy. Should take
- * about 50 us
- */
- show_error = 0;
- break;
- case SPIDER_NET_GPWFFINT:
- /* PHY command queue full */
- if (netif_msg_intr(card))
- dev_err(&card->netdev->dev, "PHY write queue full\n");
- show_error = 0;
- break;
-
- /* case SPIDER_NET_GRMDADRINT: not used. print a message */
- /* case SPIDER_NET_GRMARPINT: not used. print a message */
- /* case SPIDER_NET_GRMMPINT: not used. print a message */
-
- case SPIDER_NET_GDTDEN0INT:
- /* someone has set TX_DMA_EN to 0 */
- show_error = 0;
- break;
-
- case SPIDER_NET_GDDDEN0INT:
- case SPIDER_NET_GDCDEN0INT:
- case SPIDER_NET_GDBDEN0INT:
- case SPIDER_NET_GDADEN0INT:
- /* someone has set RX_DMA_EN to 0 */
- show_error = 0;
- break;
-
- /* RX interrupts */
- case SPIDER_NET_GDDFDCINT:
- case SPIDER_NET_GDCFDCINT:
- case SPIDER_NET_GDBFDCINT:
- case SPIDER_NET_GDAFDCINT:
- /* case SPIDER_NET_GDNMINT: not used. print a message */
- /* case SPIDER_NET_GCNMINT: not used. print a message */
- /* case SPIDER_NET_GBNMINT: not used. print a message */
- /* case SPIDER_NET_GANMINT: not used. print a message */
- /* case SPIDER_NET_GRFNMINT: not used. print a message */
- show_error = 0;
- break;
-
- /* TX interrupts */
- case SPIDER_NET_GDTFDCINT:
- show_error = 0;
- break;
- case SPIDER_NET_GTTEDINT:
- show_error = 0;
- break;
- case SPIDER_NET_GDTDCEINT:
- /* chain end. If a descriptor should be sent, kick off
- * tx dma
- if (card->tx_chain.tail != card->tx_chain.head)
- spider_net_kick_tx_dma(card);
- */
- show_error = 0;
- break;
-
- /* case SPIDER_NET_G1TMCNTINT: not used. print a message */
- /* case SPIDER_NET_GFREECNTINT: not used. print a message */
- }
-
- /* check GHIINT1STS ************************************/
- if (error_reg1)
- for (i = 0; i < 32; i++)
- if (error_reg1 & (1<<i))
- switch (i)
- {
- case SPIDER_NET_GTMFLLINT:
- /* TX RAM full may happen on a usual case.
- * Logging is not needed.
- */
- show_error = 0;
- break;
- case SPIDER_NET_GRFDFLLINT:
- case SPIDER_NET_GRFCFLLINT:
- case SPIDER_NET_GRFBFLLINT:
- case SPIDER_NET_GRFAFLLINT:
- case SPIDER_NET_GRMFLLINT:
- /* Could happen when rx chain is full */
- if (card->ignore_rx_ramfull == 0) {
- card->ignore_rx_ramfull = 1;
- spider_net_resync_head_ptr(card);
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
- card->num_rx_ints ++;
- napi_schedule(&card->napi);
- }
- show_error = 0;
- break;
-
- /* case SPIDER_NET_GTMSHTINT: problem, print a message */
- case SPIDER_NET_GDTINVDINT:
- /* allrighty. tx from previous descr ok */
- show_error = 0;
- break;
-
- /* chain end */
- case SPIDER_NET_GDDDCEINT:
- case SPIDER_NET_GDCDCEINT:
- case SPIDER_NET_GDBDCEINT:
- case SPIDER_NET_GDADCEINT:
- spider_net_resync_head_ptr(card);
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
- card->num_rx_ints ++;
- napi_schedule(&card->napi);
- show_error = 0;
- break;
-
- /* invalid descriptor */
- case SPIDER_NET_GDDINVDINT:
- case SPIDER_NET_GDCINVDINT:
- case SPIDER_NET_GDBINVDINT:
- case SPIDER_NET_GDAINVDINT:
- /* Could happen when rx chain is full */
- spider_net_resync_head_ptr(card);
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
- card->num_rx_ints ++;
- napi_schedule(&card->napi);
- show_error = 0;
- break;
-
- /* case SPIDER_NET_GDTRSERINT: problem, print a message */
- /* case SPIDER_NET_GDDRSERINT: problem, print a message */
- /* case SPIDER_NET_GDCRSERINT: problem, print a message */
- /* case SPIDER_NET_GDBRSERINT: problem, print a message */
- /* case SPIDER_NET_GDARSERINT: problem, print a message */
- /* case SPIDER_NET_GDSERINT: problem, print a message */
- /* case SPIDER_NET_GDTPTERINT: problem, print a message */
- /* case SPIDER_NET_GDDPTERINT: problem, print a message */
- /* case SPIDER_NET_GDCPTERINT: problem, print a message */
- /* case SPIDER_NET_GDBPTERINT: problem, print a message */
- /* case SPIDER_NET_GDAPTERINT: problem, print a message */
- default:
- show_error = 1;
- break;
- }
-
- /* check GHIINT2STS ************************************/
- if (error_reg2)
- for (i = 0; i < 32; i++)
- if (error_reg2 & (1<<i))
- switch (i)
- {
- /* there is nothing we can (want to) do at this time. Log a
- * message, we can switch on and off the specific values later on
- case SPIDER_NET_GPROPERINT:
- case SPIDER_NET_GMCTCRSNGINT:
- case SPIDER_NET_GMCTLCOLINT:
- case SPIDER_NET_GMCTTMOTINT:
- case SPIDER_NET_GMCRCAERINT:
- case SPIDER_NET_GMCRCALERINT:
- case SPIDER_NET_GMCRALNERINT:
- case SPIDER_NET_GMCROVRINT:
- case SPIDER_NET_GMCRRNTINT:
- case SPIDER_NET_GMCRRXERINT:
- case SPIDER_NET_GTITCSERINT:
- case SPIDER_NET_GTIFMTERINT:
- case SPIDER_NET_GTIPKTRVKINT:
- case SPIDER_NET_GTISPINGINT:
- case SPIDER_NET_GTISADNGINT:
- case SPIDER_NET_GTISPDNGINT:
- case SPIDER_NET_GRIFMTERINT:
- case SPIDER_NET_GRIPKTRVKINT:
- case SPIDER_NET_GRISPINGINT:
- case SPIDER_NET_GRISADNGINT:
- case SPIDER_NET_GRISPDNGINT:
- break;
- */
- default:
- break;
- }
-
- if ((show_error) && (netif_msg_intr(card)) && net_ratelimit())
- dev_err(&card->netdev->dev, "Error interrupt, GHIINT0STS = 0x%08x, "
- "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
- status_reg, error_reg1, error_reg2);
-
- /* clear interrupt sources */
- spider_net_write_reg(card, SPIDER_NET_GHIINT1STS, error_reg1);
- spider_net_write_reg(card, SPIDER_NET_GHIINT2STS, error_reg2);
-}
-
-/**
- * spider_net_interrupt - interrupt handler for spider_net
- * @irq: interrupt number
- * @ptr: pointer to net_device
- *
- * returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no
- * interrupt found raised by card.
- *
- * This is the interrupt handler, that turns off
- * interrupts for this device and makes the stack poll the driver
- */
-static irqreturn_t
-spider_net_interrupt(int irq, void *ptr)
-{
- struct net_device *netdev = ptr;
- struct spider_net_card *card = netdev_priv(netdev);
- u32 status_reg, error_reg1, error_reg2;
-
- status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS);
- error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS);
- error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS);
-
- if (!(status_reg & SPIDER_NET_INT0_MASK_VALUE) &&
- !(error_reg1 & SPIDER_NET_INT1_MASK_VALUE) &&
- !(error_reg2 & SPIDER_NET_INT2_MASK_VALUE))
- return IRQ_NONE;
-
- if (status_reg & SPIDER_NET_RXINT ) {
- spider_net_rx_irq_off(card);
- napi_schedule(&card->napi);
- card->num_rx_ints ++;
- }
- if (status_reg & SPIDER_NET_TXINT)
- napi_schedule(&card->napi);
-
- if (status_reg & SPIDER_NET_LINKINT)
- spider_net_link_reset(netdev);
-
- if (status_reg & SPIDER_NET_ERRINT )
- spider_net_handle_error_irq(card, status_reg,
- error_reg1, error_reg2);
-
- /* clear interrupt sources */
- spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
-
- return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * spider_net_poll_controller - artificial interrupt for netconsole etc.
- * @netdev: interface device structure
- *
- * see Documentation/networking/netconsole.rst
- */
-static void
-spider_net_poll_controller(struct net_device *netdev)
-{
- disable_irq(netdev->irq);
- spider_net_interrupt(netdev->irq, netdev);
- enable_irq(netdev->irq);
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
-/**
- * spider_net_enable_interrupts - enable interrupts
- * @card: card structure
- *
- * spider_net_enable_interrupt enables several interrupts
- */
-static void
-spider_net_enable_interrupts(struct spider_net_card *card)
-{
- spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK,
- SPIDER_NET_INT0_MASK_VALUE);
- spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK,
- SPIDER_NET_INT1_MASK_VALUE);
- spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK,
- SPIDER_NET_INT2_MASK_VALUE);
-}
-
-/**
- * spider_net_disable_interrupts - disable interrupts
- * @card: card structure
- *
- * spider_net_disable_interrupts disables all the interrupts
- */
-static void
-spider_net_disable_interrupts(struct spider_net_card *card)
-{
- spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
- spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
- spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
- spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
-}
-
-/**
- * spider_net_init_card - initializes the card
- * @card: card structure
- *
- * spider_net_init_card initializes the card so that other registers can
- * be used
- */
-static void
-spider_net_init_card(struct spider_net_card *card)
-{
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_STOP_VALUE);
-
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_RUN_VALUE);
-
- /* trigger ETOMOD signal */
- spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
- spider_net_read_reg(card, SPIDER_NET_GMACOPEMD) | 0x4);
-
- spider_net_disable_interrupts(card);
-}
-
-/**
- * spider_net_enable_card - enables the card by setting all kinds of regs
- * @card: card structure
- *
- * spider_net_enable_card sets a lot of SMMIO registers to enable the device
- */
-static void
-spider_net_enable_card(struct spider_net_card *card)
-{
- int i;
- /* the following array consists of (register),(value) pairs
- * that are set in this function. A register of 0 ends the list
- */
- u32 regs[][2] = {
- { SPIDER_NET_GRESUMINTNUM, 0 },
- { SPIDER_NET_GREINTNUM, 0 },
-
- /* set interrupt frame number registers */
- /* clear the single DMA engine registers first */
- { SPIDER_NET_GFAFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
- { SPIDER_NET_GFBFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
- { SPIDER_NET_GFCFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
- { SPIDER_NET_GFDFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
- /* then set, what we really need */
- { SPIDER_NET_GFFRMNUM, SPIDER_NET_FRAMENUM_VALUE },
-
- /* timer counter registers and stuff */
- { SPIDER_NET_GFREECNNUM, 0 },
- { SPIDER_NET_GONETIMENUM, 0 },
- { SPIDER_NET_GTOUTFRMNUM, 0 },
-
- /* RX mode setting */
- { SPIDER_NET_GRXMDSET, SPIDER_NET_RXMODE_VALUE },
- /* TX mode setting */
- { SPIDER_NET_GTXMDSET, SPIDER_NET_TXMODE_VALUE },
- /* IPSEC mode setting */
- { SPIDER_NET_GIPSECINIT, SPIDER_NET_IPSECINIT_VALUE },
-
- { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE },
-
- { SPIDER_NET_GMRWOLCTRL, 0 },
- { SPIDER_NET_GTESTMD, 0x10000000 },
- { SPIDER_NET_GTTQMSK, 0x00400040 },
-
- { SPIDER_NET_GMACINTEN, 0 },
-
- /* flow control stuff */
- { SPIDER_NET_GMACAPAUSE, SPIDER_NET_MACAPAUSE_VALUE },
- { SPIDER_NET_GMACTXPAUSE, SPIDER_NET_TXPAUSE_VALUE },
-
- { SPIDER_NET_GMACBSTLMT, SPIDER_NET_BURSTLMT_VALUE },
- { 0, 0}
- };
-
- i = 0;
- while (regs[i][0]) {
- spider_net_write_reg(card, regs[i][0], regs[i][1]);
- i++;
- }
-
- /* clear unicast filter table entries 1 to 14 */
- for (i = 1; i <= 14; i++) {
- spider_net_write_reg(card,
- SPIDER_NET_GMRUAFILnR + i * 8,
- 0x00080000);
- spider_net_write_reg(card,
- SPIDER_NET_GMRUAFILnR + i * 8 + 4,
- 0x00000000);
- }
-
- spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, 0x08080000);
-
- spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE);
-
- /* set chain tail address for RX chains and
- * enable DMA
- */
- spider_net_enable_rxchtails(card);
- spider_net_enable_rxdmac(card);
-
- spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE);
-
- spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,
- SPIDER_NET_LENLMT_VALUE);
- spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
- SPIDER_NET_OPMODE_VALUE);
-
- spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
- SPIDER_NET_GDTBSTA);
-}
-
-/**
- * spider_net_download_firmware - loads firmware into the adapter
- * @card: card structure
- * @firmware_ptr: pointer to firmware data
- *
- * spider_net_download_firmware loads the firmware data into the
- * adapter. It assumes the length etc. to be allright.
- */
-static int
-spider_net_download_firmware(struct spider_net_card *card,
- const void *firmware_ptr)
-{
- int sequencer, i;
- const u32 *fw_ptr = firmware_ptr;
-
- /* stop sequencers */
- spider_net_write_reg(card, SPIDER_NET_GSINIT,
- SPIDER_NET_STOP_SEQ_VALUE);
-
- for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
- sequencer++) {
- spider_net_write_reg(card,
- SPIDER_NET_GSnPRGADR + sequencer * 8, 0);
- for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
- spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
- sequencer * 8, *fw_ptr);
- fw_ptr++;
- }
- }
-
- if (spider_net_read_reg(card, SPIDER_NET_GSINIT))
- return -EIO;
-
- spider_net_write_reg(card, SPIDER_NET_GSINIT,
- SPIDER_NET_RUN_SEQ_VALUE);
-
- return 0;
-}
-
-/**
- * spider_net_init_firmware - reads in firmware parts
- * @card: card structure
- *
- * Returns 0 on success, <0 on failure
- *
- * spider_net_init_firmware opens the sequencer firmware and does some basic
- * checks. This function opens and releases the firmware structure. A call
- * to download the firmware is performed before the release.
- *
- * Firmware format
- * ===============
- * spider_fw.bin is expected to be a file containing 6*1024*4 bytes, 4k being
- * the program for each sequencer. Use the command
- * tail -q -n +2 Seq_code1_0x088.txt Seq_code2_0x090.txt \
- * Seq_code3_0x098.txt Seq_code4_0x0A0.txt Seq_code5_0x0A8.txt \
- * Seq_code6_0x0B0.txt | xxd -r -p -c4 > spider_fw.bin
- *
- * to generate spider_fw.bin, if you have sequencer programs with something
- * like the following contents for each sequencer:
- * <ONE LINE COMMENT>
- * <FIRST 4-BYTES-WORD FOR SEQUENCER>
- * <SECOND 4-BYTES-WORD FOR SEQUENCER>
- * ...
- * <1024th 4-BYTES-WORD FOR SEQUENCER>
- */
-static int
-spider_net_init_firmware(struct spider_net_card *card)
-{
- struct firmware *firmware = NULL;
- struct device_node *dn;
- const u8 *fw_prop = NULL;
- int err = -ENOENT;
- int fw_size;
-
- if (request_firmware((const struct firmware **)&firmware,
- SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) {
- if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) &&
- netif_msg_probe(card) ) {
- dev_err(&card->netdev->dev,
- "Incorrect size of spidernet firmware in " \
- "filesystem. Looking in host firmware...\n");
- goto try_host_fw;
- }
- err = spider_net_download_firmware(card, firmware->data);
-
- release_firmware(firmware);
- if (err)
- goto try_host_fw;
-
- goto done;
- }
-
-try_host_fw:
- dn = pci_device_to_OF_node(card->pdev);
- if (!dn)
- goto out_err;
-
- fw_prop = of_get_property(dn, "firmware", &fw_size);
- if (!fw_prop)
- goto out_err;
-
- if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) &&
- netif_msg_probe(card) ) {
- dev_err(&card->netdev->dev,
- "Incorrect size of spidernet firmware in host firmware\n");
- goto done;
- }
-
- err = spider_net_download_firmware(card, fw_prop);
-
-done:
- return err;
-out_err:
- if (netif_msg_probe(card))
- dev_err(&card->netdev->dev,
- "Couldn't find spidernet firmware in filesystem " \
- "or host firmware\n");
- return err;
-}
-
-/**
- * spider_net_open - called upon ifonfig up
- * @netdev: interface device structure
- *
- * returns 0 on success, <0 on failure
- *
- * spider_net_open allocates all the descriptors and memory needed for
- * operation, sets up multicast list and enables interrupts
- */
-int
-spider_net_open(struct net_device *netdev)
-{
- struct spider_net_card *card = netdev_priv(netdev);
- int result;
-
- result = spider_net_init_firmware(card);
- if (result)
- goto init_firmware_failed;
-
- /* start probing with copper */
- card->aneg_count = 0;
- card->medium = BCM54XX_COPPER;
- spider_net_setup_aneg(card);
- if (card->phy.def->phy_id)
- mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
-
- result = spider_net_init_chain(card, &card->tx_chain);
- if (result)
- goto alloc_tx_failed;
- card->low_watermark = NULL;
-
- result = spider_net_init_chain(card, &card->rx_chain);
- if (result)
- goto alloc_rx_failed;
-
- /* Allocate rx skbs */
- result = spider_net_alloc_rx_skbs(card);
- if (result)
- goto alloc_skbs_failed;
-
- spider_net_set_multi(netdev);
-
- /* further enhancement: setup hw vlan, if needed */
-
- result = -EBUSY;
- if (request_irq(netdev->irq, spider_net_interrupt,
- IRQF_SHARED, netdev->name, netdev))
- goto register_int_failed;
-
- spider_net_enable_card(card);
-
- netif_start_queue(netdev);
- netif_carrier_on(netdev);
- napi_enable(&card->napi);
-
- spider_net_enable_interrupts(card);
-
- return 0;
-
-register_int_failed:
- spider_net_free_rx_chain_contents(card);
-alloc_skbs_failed:
- spider_net_free_chain(card, &card->rx_chain);
-alloc_rx_failed:
- spider_net_free_chain(card, &card->tx_chain);
-alloc_tx_failed:
- del_timer_sync(&card->aneg_timer);
-init_firmware_failed:
- return result;
-}
-
-/**
- * spider_net_link_phy
- * @t: timer context used to obtain the pointer to net card data structure
- */
-static void spider_net_link_phy(struct timer_list *t)
-{
- struct spider_net_card *card = from_timer(card, t, aneg_timer);
- struct mii_phy *phy = &card->phy;
-
- /* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */
- if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) {
-
- pr_debug("%s: link is down trying to bring it up\n",
- card->netdev->name);
-
- switch (card->medium) {
- case BCM54XX_COPPER:
- /* enable fiber with autonegotiation first */
- if (phy->def->ops->enable_fiber)
- phy->def->ops->enable_fiber(phy, 1);
- card->medium = BCM54XX_FIBER;
- break;
-
- case BCM54XX_FIBER:
- /* fiber didn't come up, try to disable fiber autoneg */
- if (phy->def->ops->enable_fiber)
- phy->def->ops->enable_fiber(phy, 0);
- card->medium = BCM54XX_UNKNOWN;
- break;
-
- case BCM54XX_UNKNOWN:
- /* copper, fiber with and without failed,
- * retry from beginning
- */
- spider_net_setup_aneg(card);
- card->medium = BCM54XX_COPPER;
- break;
- }
-
- card->aneg_count = 0;
- mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
- return;
- }
-
- /* link still not up, try again later */
- if (!(phy->def->ops->poll_link(phy))) {
- card->aneg_count++;
- mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
- return;
- }
-
- /* link came up, get abilities */
- phy->def->ops->read_link(phy);
-
- spider_net_write_reg(card, SPIDER_NET_GMACST,
- spider_net_read_reg(card, SPIDER_NET_GMACST));
- spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0x4);
-
- if (phy->speed == 1000)
- spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0x00000001);
- else
- spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0);
-
- card->aneg_count = 0;
-
- pr_info("%s: link up, %i Mbps, %s-duplex %sautoneg.\n",
- card->netdev->name, phy->speed,
- phy->duplex == 1 ? "Full" : "Half",
- phy->autoneg == 1 ? "" : "no ");
-}
-
-/**
- * spider_net_setup_phy - setup PHY
- * @card: card structure
- *
- * returns 0 on success, <0 on failure
- *
- * spider_net_setup_phy is used as part of spider_net_probe.
- **/
-static int
-spider_net_setup_phy(struct spider_net_card *card)
-{
- struct mii_phy *phy = &card->phy;
-
- spider_net_write_reg(card, SPIDER_NET_GDTDMASEL,
- SPIDER_NET_DMASEL_VALUE);
- spider_net_write_reg(card, SPIDER_NET_GPCCTRL,
- SPIDER_NET_PHY_CTRL_VALUE);
-
- phy->dev = card->netdev;
- phy->mdio_read = spider_net_read_phy;
- phy->mdio_write = spider_net_write_phy;
-
- for (phy->mii_id = 1; phy->mii_id <= 31; phy->mii_id++) {
- unsigned short id;
- id = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
- if (id != 0x0000 && id != 0xffff) {
- if (!sungem_phy_probe(phy, phy->mii_id)) {
- pr_info("Found %s.\n", phy->def->name);
- break;
- }
- }
- }
-
- return 0;
-}
-
-/**
- * spider_net_workaround_rxramfull - work around firmware bug
- * @card: card structure
- *
- * no return value
- **/
-static void
-spider_net_workaround_rxramfull(struct spider_net_card *card)
-{
- int i, sequencer = 0;
-
- /* cancel reset */
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_RUN_VALUE);
-
- /* empty sequencer data */
- for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
- sequencer++) {
- spider_net_write_reg(card, SPIDER_NET_GSnPRGADR +
- sequencer * 8, 0x0);
- for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
- spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
- sequencer * 8, 0x0);
- }
- }
-
- /* set sequencer operation */
- spider_net_write_reg(card, SPIDER_NET_GSINIT, 0x000000fe);
-
- /* reset */
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_STOP_VALUE);
-}
-
-/**
- * spider_net_stop - called upon ifconfig down
- * @netdev: interface device structure
- *
- * always returns 0
- */
-int
-spider_net_stop(struct net_device *netdev)
-{
- struct spider_net_card *card = netdev_priv(netdev);
-
- napi_disable(&card->napi);
- netif_carrier_off(netdev);
- netif_stop_queue(netdev);
- del_timer_sync(&card->tx_timer);
- del_timer_sync(&card->aneg_timer);
-
- spider_net_disable_interrupts(card);
-
- free_irq(netdev->irq, netdev);
-
- spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
- SPIDER_NET_DMA_TX_FEND_VALUE);
-
- /* turn off DMA, force end */
- spider_net_disable_rxdmac(card);
-
- /* release chains */
- spider_net_release_tx_chain(card, 1);
- spider_net_free_rx_chain_contents(card);
-
- spider_net_free_chain(card, &card->tx_chain);
- spider_net_free_chain(card, &card->rx_chain);
-
- return 0;
-}
-
-/**
- * spider_net_tx_timeout_task - task scheduled by the watchdog timeout
- * function (to be called not under interrupt status)
- * @work: work context used to obtain the pointer to net card data structure
- *
- * called as task when tx hangs, resets interface (if interface is up)
- */
-static void
-spider_net_tx_timeout_task(struct work_struct *work)
-{
- struct spider_net_card *card =
- container_of(work, struct spider_net_card, tx_timeout_task);
- struct net_device *netdev = card->netdev;
-
- if (!(netdev->flags & IFF_UP))
- goto out;
-
- netif_device_detach(netdev);
- spider_net_stop(netdev);
-
- spider_net_workaround_rxramfull(card);
- spider_net_init_card(card);
-
- if (spider_net_setup_phy(card))
- goto out;
-
- spider_net_open(netdev);
- spider_net_kick_tx_dma(card);
- netif_device_attach(netdev);
-
-out:
- atomic_dec(&card->tx_timeout_task_counter);
-}
-
-/**
- * spider_net_tx_timeout - called when the tx timeout watchdog kicks in.
- * @netdev: interface device structure
- * @txqueue: unused
- *
- * called, if tx hangs. Schedules a task that resets the interface
- */
-static void
-spider_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
-{
- struct spider_net_card *card;
-
- card = netdev_priv(netdev);
- atomic_inc(&card->tx_timeout_task_counter);
- if (netdev->flags & IFF_UP)
- schedule_work(&card->tx_timeout_task);
- else
- atomic_dec(&card->tx_timeout_task_counter);
- card->spider_stats.tx_timeouts++;
-}
-
-static const struct net_device_ops spider_net_ops = {
- .ndo_open = spider_net_open,
- .ndo_stop = spider_net_stop,
- .ndo_start_xmit = spider_net_xmit,
- .ndo_set_rx_mode = spider_net_set_multi,
- .ndo_set_mac_address = spider_net_set_mac,
- .ndo_eth_ioctl = spider_net_do_ioctl,
- .ndo_tx_timeout = spider_net_tx_timeout,
- .ndo_validate_addr = eth_validate_addr,
- /* HW VLAN */
-#ifdef CONFIG_NET_POLL_CONTROLLER
- /* poll controller */
- .ndo_poll_controller = spider_net_poll_controller,
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-};
-
-/**
- * spider_net_setup_netdev_ops - initialization of net_device operations
- * @netdev: net_device structure
- *
- * fills out function pointers in the net_device structure
- */
-static void
-spider_net_setup_netdev_ops(struct net_device *netdev)
-{
- netdev->netdev_ops = &spider_net_ops;
- netdev->watchdog_timeo = SPIDER_NET_WATCHDOG_TIMEOUT;
- /* ethtool ops */
- netdev->ethtool_ops = &spider_net_ethtool_ops;
-}
-
-/**
- * spider_net_setup_netdev - initialization of net_device
- * @card: card structure
- *
- * Returns 0 on success or <0 on failure
- *
- * spider_net_setup_netdev initializes the net_device structure
- **/
-static int
-spider_net_setup_netdev(struct spider_net_card *card)
-{
- int result;
- struct net_device *netdev = card->netdev;
- struct device_node *dn;
- struct sockaddr addr;
- const u8 *mac;
-
- SET_NETDEV_DEV(netdev, &card->pdev->dev);
-
- pci_set_drvdata(card->pdev, netdev);
-
- timer_setup(&card->tx_timer, spider_net_cleanup_tx_ring, 0);
- netdev->irq = card->pdev->irq;
-
- card->aneg_count = 0;
- timer_setup(&card->aneg_timer, spider_net_link_phy, 0);
-
- netif_napi_add(netdev, &card->napi, spider_net_poll);
-
- spider_net_setup_netdev_ops(netdev);
-
- netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
- if (SPIDER_NET_RX_CSUM_DEFAULT)
- netdev->features |= NETIF_F_RXCSUM;
- netdev->features |= NETIF_F_IP_CSUM;
- /* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- * NETIF_F_HW_VLAN_CTAG_FILTER
- */
- netdev->lltx = true;
-
- /* MTU range: 64 - 2294 */
- netdev->min_mtu = SPIDER_NET_MIN_MTU;
- netdev->max_mtu = SPIDER_NET_MAX_MTU;
-
- netdev->irq = card->pdev->irq;
- card->num_rx_ints = 0;
- card->ignore_rx_ramfull = 0;
-
- dn = pci_device_to_OF_node(card->pdev);
- if (!dn)
- return -EIO;
-
- mac = of_get_property(dn, "local-mac-address", NULL);
- if (!mac)
- return -EIO;
- memcpy(addr.sa_data, mac, ETH_ALEN);
-
- result = spider_net_set_mac(netdev, &addr);
- if ((result) && (netif_msg_probe(card)))
- dev_err(&card->netdev->dev,
- "Failed to set MAC address: %i\n", result);
-
- result = register_netdev(netdev);
- if (result) {
- if (netif_msg_probe(card))
- dev_err(&card->netdev->dev,
- "Couldn't register net_device: %i\n", result);
- return result;
- }
-
- if (netif_msg_probe(card))
- pr_info("Initialized device %s.\n", netdev->name);
-
- return 0;
-}
-
-/**
- * spider_net_alloc_card - allocates net_device and card structure
- *
- * returns the card structure or NULL in case of errors
- *
- * the card and net_device structures are linked to each other
- */
-static struct spider_net_card *
-spider_net_alloc_card(void)
-{
- struct net_device *netdev;
- struct spider_net_card *card;
-
- netdev = alloc_etherdev(struct_size(card, darray,
- size_add(tx_descriptors, rx_descriptors)));
- if (!netdev)
- return NULL;
-
- card = netdev_priv(netdev);
- card->netdev = netdev;
- card->msg_enable = SPIDER_NET_DEFAULT_MSG;
- INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task);
- init_waitqueue_head(&card->waitq);
- atomic_set(&card->tx_timeout_task_counter, 0);
-
- card->rx_chain.num_desc = rx_descriptors;
- card->rx_chain.ring = card->darray;
- card->tx_chain.num_desc = tx_descriptors;
- card->tx_chain.ring = card->darray + rx_descriptors;
-
- return card;
-}
-
-/**
- * spider_net_undo_pci_setup - releases PCI ressources
- * @card: card structure
- *
- * spider_net_undo_pci_setup releases the mapped regions
- */
-static void
-spider_net_undo_pci_setup(struct spider_net_card *card)
-{
- iounmap(card->regs);
- pci_release_regions(card->pdev);
-}
-
-/**
- * spider_net_setup_pci_dev - sets up the device in terms of PCI operations
- * @pdev: PCI device
- *
- * Returns the card structure or NULL if any errors occur
- *
- * spider_net_setup_pci_dev initializes pdev and together with the
- * functions called in spider_net_open configures the device so that
- * data can be transferred over it
- * The net_device structure is attached to the card structure, if the
- * function returns without error.
- **/
-static struct spider_net_card *
-spider_net_setup_pci_dev(struct pci_dev *pdev)
-{
- struct spider_net_card *card;
- unsigned long mmio_start, mmio_len;
-
- if (pci_enable_device(pdev)) {
- dev_err(&pdev->dev, "Couldn't enable PCI device\n");
- return NULL;
- }
-
- if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- dev_err(&pdev->dev,
- "Couldn't find proper PCI device base address.\n");
- goto out_disable_dev;
- }
-
- if (pci_request_regions(pdev, spider_net_driver_name)) {
- dev_err(&pdev->dev,
- "Couldn't obtain PCI resources, aborting.\n");
- goto out_disable_dev;
- }
-
- pci_set_master(pdev);
-
- card = spider_net_alloc_card();
- if (!card) {
- dev_err(&pdev->dev,
- "Couldn't allocate net_device structure, aborting.\n");
- goto out_release_regions;
- }
- card->pdev = pdev;
-
- /* fetch base address and length of first resource */
- mmio_start = pci_resource_start(pdev, 0);
- mmio_len = pci_resource_len(pdev, 0);
-
- card->netdev->mem_start = mmio_start;
- card->netdev->mem_end = mmio_start + mmio_len;
- card->regs = ioremap(mmio_start, mmio_len);
-
- if (!card->regs) {
- dev_err(&pdev->dev,
- "Couldn't obtain PCI resources, aborting.\n");
- goto out_release_regions;
- }
-
- return card;
-
-out_release_regions:
- pci_release_regions(pdev);
-out_disable_dev:
- pci_disable_device(pdev);
- return NULL;
-}
-
-/**
- * spider_net_probe - initialization of a device
- * @pdev: PCI device
- * @ent: entry in the device id list
- *
- * Returns 0 on success, <0 on failure
- *
- * spider_net_probe initializes pdev and registers a net_device
- * structure for it. After that, the device can be ifconfig'ed up
- **/
-static int
-spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int err = -EIO;
- struct spider_net_card *card;
-
- card = spider_net_setup_pci_dev(pdev);
- if (!card)
- goto out;
-
- spider_net_workaround_rxramfull(card);
- spider_net_init_card(card);
-
- err = spider_net_setup_phy(card);
- if (err)
- goto out_undo_pci;
-
- err = spider_net_setup_netdev(card);
- if (err)
- goto out_undo_pci;
-
- return 0;
-
-out_undo_pci:
- spider_net_undo_pci_setup(card);
- free_netdev(card->netdev);
-out:
- return err;
-}
-
-/**
- * spider_net_remove - removal of a device
- * @pdev: PCI device
- *
- * Returns 0 on success, <0 on failure
- *
- * spider_net_remove is called to remove the device and unregisters the
- * net_device
- **/
-static void
-spider_net_remove(struct pci_dev *pdev)
-{
- struct net_device *netdev;
- struct spider_net_card *card;
-
- netdev = pci_get_drvdata(pdev);
- card = netdev_priv(netdev);
-
- wait_event(card->waitq,
- atomic_read(&card->tx_timeout_task_counter) == 0);
-
- unregister_netdev(netdev);
-
- /* switch off card */
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_STOP_VALUE);
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_RUN_VALUE);
-
- spider_net_undo_pci_setup(card);
- free_netdev(netdev);
-}
-
-static struct pci_driver spider_net_driver = {
- .name = spider_net_driver_name,
- .id_table = spider_net_pci_tbl,
- .probe = spider_net_probe,
- .remove = spider_net_remove
-};
-
-/**
- * spider_net_init - init function when the driver is loaded
- *
- * spider_net_init registers the device driver
- */
-static int __init spider_net_init(void)
-{
- printk(KERN_INFO "Spidernet version %s.\n", VERSION);
-
- if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) {
- rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN;
- pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
- }
- if (rx_descriptors > SPIDER_NET_RX_DESCRIPTORS_MAX) {
- rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MAX;
- pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
- }
- if (tx_descriptors < SPIDER_NET_TX_DESCRIPTORS_MIN) {
- tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MIN;
- pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
- }
- if (tx_descriptors > SPIDER_NET_TX_DESCRIPTORS_MAX) {
- tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MAX;
- pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
- }
-
- return pci_register_driver(&spider_net_driver);
-}
-
-/**
- * spider_net_cleanup - exit function when driver is unloaded
- *
- * spider_net_cleanup unregisters the device driver
- */
-static void __exit spider_net_cleanup(void)
-{
- pci_unregister_driver(&spider_net_driver);
-}
-
-module_init(spider_net_init);
-module_exit(spider_net_cleanup);
diff --git a/drivers/net/ethernet/toshiba/spider_net.h b/drivers/net/ethernet/toshiba/spider_net.h
deleted file mode 100644
index 51948e2b3a34..000000000000
--- a/drivers/net/ethernet/toshiba/spider_net.h
+++ /dev/null
@@ -1,475 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Network device driver for Cell Processor-Based Blade and Celleb platform
- *
- * (C) Copyright IBM Corp. 2005
- * (C) Copyright 2006 TOSHIBA CORPORATION
- *
- * Authors : Utz Bacher <utz.bacher@de.ibm.com>
- * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
- */
-
-#ifndef _SPIDER_NET_H
-#define _SPIDER_NET_H
-
-#define VERSION "2.0 B"
-
-#include <linux/sungem_phy.h>
-
-int spider_net_stop(struct net_device *netdev);
-int spider_net_open(struct net_device *netdev);
-
-extern const struct ethtool_ops spider_net_ethtool_ops;
-
-extern char spider_net_driver_name[];
-
-#define SPIDER_NET_MAX_FRAME 2312
-#define SPIDER_NET_MAX_MTU 2294
-#define SPIDER_NET_MIN_MTU 64
-
-#define SPIDER_NET_RXBUF_ALIGN 128
-
-#define SPIDER_NET_RX_DESCRIPTORS_DEFAULT 256
-#define SPIDER_NET_RX_DESCRIPTORS_MIN 16
-#define SPIDER_NET_RX_DESCRIPTORS_MAX 512
-
-#define SPIDER_NET_TX_DESCRIPTORS_DEFAULT 256
-#define SPIDER_NET_TX_DESCRIPTORS_MIN 16
-#define SPIDER_NET_TX_DESCRIPTORS_MAX 512
-
-#define SPIDER_NET_TX_TIMER (HZ/5)
-#define SPIDER_NET_ANEG_TIMER (HZ)
-#define SPIDER_NET_ANEG_TIMEOUT 5
-
-#define SPIDER_NET_RX_CSUM_DEFAULT 1
-
-#define SPIDER_NET_WATCHDOG_TIMEOUT 50*HZ
-
-#define SPIDER_NET_FIRMWARE_SEQS 6
-#define SPIDER_NET_FIRMWARE_SEQWORDS 1024
-#define SPIDER_NET_FIRMWARE_LEN (SPIDER_NET_FIRMWARE_SEQS * \
- SPIDER_NET_FIRMWARE_SEQWORDS * \
- sizeof(u32))
-#define SPIDER_NET_FIRMWARE_NAME "spider_fw.bin"
-
-/** spider_net SMMIO registers */
-#define SPIDER_NET_GHIINT0STS 0x00000000
-#define SPIDER_NET_GHIINT1STS 0x00000004
-#define SPIDER_NET_GHIINT2STS 0x00000008
-#define SPIDER_NET_GHIINT0MSK 0x00000010
-#define SPIDER_NET_GHIINT1MSK 0x00000014
-#define SPIDER_NET_GHIINT2MSK 0x00000018
-
-#define SPIDER_NET_GRESUMINTNUM 0x00000020
-#define SPIDER_NET_GREINTNUM 0x00000024
-
-#define SPIDER_NET_GFFRMNUM 0x00000028
-#define SPIDER_NET_GFAFRMNUM 0x0000002c
-#define SPIDER_NET_GFBFRMNUM 0x00000030
-#define SPIDER_NET_GFCFRMNUM 0x00000034
-#define SPIDER_NET_GFDFRMNUM 0x00000038
-
-/* clear them (don't use it) */
-#define SPIDER_NET_GFREECNNUM 0x0000003c
-#define SPIDER_NET_GONETIMENUM 0x00000040
-
-#define SPIDER_NET_GTOUTFRMNUM 0x00000044
-
-#define SPIDER_NET_GTXMDSET 0x00000050
-#define SPIDER_NET_GPCCTRL 0x00000054
-#define SPIDER_NET_GRXMDSET 0x00000058
-#define SPIDER_NET_GIPSECINIT 0x0000005c
-#define SPIDER_NET_GFTRESTRT 0x00000060
-#define SPIDER_NET_GRXDMAEN 0x00000064
-#define SPIDER_NET_GMRWOLCTRL 0x00000068
-#define SPIDER_NET_GPCWOPCMD 0x0000006c
-#define SPIDER_NET_GPCROPCMD 0x00000070
-#define SPIDER_NET_GTTFRMCNT 0x00000078
-#define SPIDER_NET_GTESTMD 0x0000007c
-
-#define SPIDER_NET_GSINIT 0x00000080
-#define SPIDER_NET_GSnPRGADR 0x00000084
-#define SPIDER_NET_GSnPRGDAT 0x00000088
-
-#define SPIDER_NET_GMACOPEMD 0x00000100
-#define SPIDER_NET_GMACLENLMT 0x00000108
-#define SPIDER_NET_GMACST 0x00000110
-#define SPIDER_NET_GMACINTEN 0x00000118
-#define SPIDER_NET_GMACPHYCTRL 0x00000120
-
-#define SPIDER_NET_GMACAPAUSE 0x00000154
-#define SPIDER_NET_GMACTXPAUSE 0x00000164
-
-#define SPIDER_NET_GMACMODE 0x000001b0
-#define SPIDER_NET_GMACBSTLMT 0x000001b4
-
-#define SPIDER_NET_GMACUNIMACU 0x000001c0
-#define SPIDER_NET_GMACUNIMACL 0x000001c8
-
-#define SPIDER_NET_GMRMHFILnR 0x00000400
-#define SPIDER_NET_MULTICAST_HASHES 256
-
-#define SPIDER_NET_GMRUAFILnR 0x00000500
-#define SPIDER_NET_GMRUA0FIL15R 0x00000578
-
-#define SPIDER_NET_GTTQMSK 0x00000934
-
-/* RX DMA controller registers, all 0x00000a.. are for DMA controller A,
- * 0x00000b.. for DMA controller B, etc. */
-#define SPIDER_NET_GDADCHA 0x00000a00
-#define SPIDER_NET_GDADMACCNTR 0x00000a04
-#define SPIDER_NET_GDACTDPA 0x00000a08
-#define SPIDER_NET_GDACTDCNT 0x00000a0c
-#define SPIDER_NET_GDACDBADDR 0x00000a20
-#define SPIDER_NET_GDACDBSIZE 0x00000a24
-#define SPIDER_NET_GDACNEXTDA 0x00000a28
-#define SPIDER_NET_GDACCOMST 0x00000a2c
-#define SPIDER_NET_GDAWBCOMST 0x00000a30
-#define SPIDER_NET_GDAWBRSIZE 0x00000a34
-#define SPIDER_NET_GDAWBVSIZE 0x00000a38
-#define SPIDER_NET_GDAWBTRST 0x00000a3c
-#define SPIDER_NET_GDAWBTRERR 0x00000a40
-
-/* TX DMA controller registers */
-#define SPIDER_NET_GDTDCHA 0x00000e00
-#define SPIDER_NET_GDTDMACCNTR 0x00000e04
-#define SPIDER_NET_GDTCDPA 0x00000e08
-#define SPIDER_NET_GDTDMASEL 0x00000e14
-
-#define SPIDER_NET_ECMODE 0x00000f00
-/* clock and reset control register */
-#define SPIDER_NET_CKRCTRL 0x00000ff0
-
-/** SCONFIG registers */
-#define SPIDER_NET_SCONFIG_IOACTE 0x00002810
-
-/** interrupt mask registers */
-#define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe2c7
-#define SPIDER_NET_INT1_MASK_VALUE 0x0000fff2
-#define SPIDER_NET_INT2_MASK_VALUE 0x000003f1
-
-/* we rely on flagged descriptor interrupts */
-#define SPIDER_NET_FRAMENUM_VALUE 0x00000000
-/* set this first, then the FRAMENUM_VALUE */
-#define SPIDER_NET_GFXFRAMES_VALUE 0x00000000
-
-#define SPIDER_NET_STOP_SEQ_VALUE 0x00000000
-#define SPIDER_NET_RUN_SEQ_VALUE 0x0000007e
-
-#define SPIDER_NET_PHY_CTRL_VALUE 0x00040040
-/* #define SPIDER_NET_PHY_CTRL_VALUE 0x01070080*/
-#define SPIDER_NET_RXMODE_VALUE 0x00000011
-/* auto retransmission in case of MAC aborts */
-#define SPIDER_NET_TXMODE_VALUE 0x00010000
-#define SPIDER_NET_RESTART_VALUE 0x00000000
-#define SPIDER_NET_WOL_VALUE 0x00001111
-#if 0
-#define SPIDER_NET_WOL_VALUE 0x00000000
-#endif
-#define SPIDER_NET_IPSECINIT_VALUE 0x6f716f71
-
-/* pause frames: automatic, no upper retransmission count */
-/* outside loopback mode: ETOMOD signal dont matter, not connected */
-/* ETOMOD signal is brought to PHY reset. bit 2 must be 1 in Celleb */
-#define SPIDER_NET_OPMODE_VALUE 0x00000067
-/*#define SPIDER_NET_OPMODE_VALUE 0x001b0062*/
-#define SPIDER_NET_LENLMT_VALUE 0x00000908
-
-#define SPIDER_NET_MACAPAUSE_VALUE 0x00000800 /* about 1 ms */
-#define SPIDER_NET_TXPAUSE_VALUE 0x00000000
-
-#define SPIDER_NET_MACMODE_VALUE 0x00000001
-#define SPIDER_NET_BURSTLMT_VALUE 0x00000200 /* about 16 us */
-
-/* DMAC control register GDMACCNTR
- *
- * 1(0) enable r/tx dma
- * 0000000 fixed to 0
- *
- * 000000 fixed to 0
- * 0(1) en/disable descr writeback on force end
- * 0(1) force end
- *
- * 000000 fixed to 0
- * 00 burst alignment: 128 bytes
- * 11 burst alignment: 1024 bytes
- *
- * 00000 fixed to 0
- * 0 descr writeback size 32 bytes
- * 0(1) descr chain end interrupt enable
- * 0(1) descr status writeback enable */
-
-/* to set RX_DMA_EN */
-#define SPIDER_NET_DMA_RX_VALUE 0x80000000
-#define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003
-/* to set TX_DMA_EN */
-#define SPIDER_NET_TX_DMA_EN 0x80000000
-#define SPIDER_NET_GDTBSTA 0x00000300
-#define SPIDER_NET_GDTDCEIDIS 0x00000002
-#define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \
- SPIDER_NET_GDTDCEIDIS | \
- SPIDER_NET_GDTBSTA
-
-#define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003
-
-/* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */
-#define SPIDER_NET_UA_DESCR_VALUE 0x00080000
-#define SPIDER_NET_PROMISC_VALUE 0x00080000
-#define SPIDER_NET_NONPROMISC_VALUE 0x00000000
-
-#define SPIDER_NET_DMASEL_VALUE 0x00000001
-
-#define SPIDER_NET_ECMODE_VALUE 0x00000000
-
-#define SPIDER_NET_CKRCTRL_RUN_VALUE 0x1fff010f
-#define SPIDER_NET_CKRCTRL_STOP_VALUE 0x0000010f
-
-#define SPIDER_NET_SBIMSTATE_VALUE 0x00000000
-#define SPIDER_NET_SBTMSTATE_VALUE 0x00000000
-
-/* SPIDER_NET_GHIINT0STS bits, in reverse order so that they can be used
- * with 1 << SPIDER_NET_... */
-enum spider_net_int0_status {
- SPIDER_NET_GPHYINT = 0,
- SPIDER_NET_GMAC2INT,
- SPIDER_NET_GMAC1INT,
- SPIDER_NET_GIPSINT,
- SPIDER_NET_GFIFOINT,
- SPIDER_NET_GDMACINT,
- SPIDER_NET_GSYSINT,
- SPIDER_NET_GPWOPCMPINT,
- SPIDER_NET_GPROPCMPINT,
- SPIDER_NET_GPWFFINT,
- SPIDER_NET_GRMDADRINT,
- SPIDER_NET_GRMARPINT,
- SPIDER_NET_GRMMPINT,
- SPIDER_NET_GDTDEN0INT,
- SPIDER_NET_GDDDEN0INT,
- SPIDER_NET_GDCDEN0INT,
- SPIDER_NET_GDBDEN0INT,
- SPIDER_NET_GDADEN0INT,
- SPIDER_NET_GDTFDCINT,
- SPIDER_NET_GDDFDCINT,
- SPIDER_NET_GDCFDCINT,
- SPIDER_NET_GDBFDCINT,
- SPIDER_NET_GDAFDCINT,
- SPIDER_NET_GTTEDINT,
- SPIDER_NET_GDTDCEINT,
- SPIDER_NET_GRFDNMINT,
- SPIDER_NET_GRFCNMINT,
- SPIDER_NET_GRFBNMINT,
- SPIDER_NET_GRFANMINT,
- SPIDER_NET_GRFNMINT,
- SPIDER_NET_G1TMCNTINT,
- SPIDER_NET_GFREECNTINT
-};
-/* GHIINT1STS bits */
-enum spider_net_int1_status {
- SPIDER_NET_GTMFLLINT = 0,
- SPIDER_NET_GRMFLLINT,
- SPIDER_NET_GTMSHTINT,
- SPIDER_NET_GDTINVDINT,
- SPIDER_NET_GRFDFLLINT,
- SPIDER_NET_GDDDCEINT,
- SPIDER_NET_GDDINVDINT,
- SPIDER_NET_GRFCFLLINT,
- SPIDER_NET_GDCDCEINT,
- SPIDER_NET_GDCINVDINT,
- SPIDER_NET_GRFBFLLINT,
- SPIDER_NET_GDBDCEINT,
- SPIDER_NET_GDBINVDINT,
- SPIDER_NET_GRFAFLLINT,
- SPIDER_NET_GDADCEINT,
- SPIDER_NET_GDAINVDINT,
- SPIDER_NET_GDTRSERINT,
- SPIDER_NET_GDDRSERINT,
- SPIDER_NET_GDCRSERINT,
- SPIDER_NET_GDBRSERINT,
- SPIDER_NET_GDARSERINT,
- SPIDER_NET_GDSERINT,
- SPIDER_NET_GDTPTERINT,
- SPIDER_NET_GDDPTERINT,
- SPIDER_NET_GDCPTERINT,
- SPIDER_NET_GDBPTERINT,
- SPIDER_NET_GDAPTERINT
-};
-/* GHIINT2STS bits */
-enum spider_net_int2_status {
- SPIDER_NET_GPROPERINT = 0,
- SPIDER_NET_GMCTCRSNGINT,
- SPIDER_NET_GMCTLCOLINT,
- SPIDER_NET_GMCTTMOTINT,
- SPIDER_NET_GMCRCAERINT,
- SPIDER_NET_GMCRCALERINT,
- SPIDER_NET_GMCRALNERINT,
- SPIDER_NET_GMCROVRINT,
- SPIDER_NET_GMCRRNTINT,
- SPIDER_NET_GMCRRXERINT,
- SPIDER_NET_GTITCSERINT,
- SPIDER_NET_GTIFMTERINT,
- SPIDER_NET_GTIPKTRVKINT,
- SPIDER_NET_GTISPINGINT,
- SPIDER_NET_GTISADNGINT,
- SPIDER_NET_GTISPDNGINT,
- SPIDER_NET_GRIFMTERINT,
- SPIDER_NET_GRIPKTRVKINT,
- SPIDER_NET_GRISPINGINT,
- SPIDER_NET_GRISADNGINT,
- SPIDER_NET_GRISPDNGINT
-};
-
-#define SPIDER_NET_TXINT (1 << SPIDER_NET_GDTFDCINT)
-
-/* We rely on flagged descriptor interrupts */
-#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) )
-
-#define SPIDER_NET_LINKINT ( 1 << SPIDER_NET_GMAC2INT )
-
-#define SPIDER_NET_ERRINT ( 0xffffffff & \
- (~SPIDER_NET_TXINT) & \
- (~SPIDER_NET_RXINT) & \
- (~SPIDER_NET_LINKINT) )
-
-#define SPIDER_NET_GPREXEC 0x80000000
-#define SPIDER_NET_GPRDAT_MASK 0x0000ffff
-
-#define SPIDER_NET_DMAC_NOINTR_COMPLETE 0x00800000
-#define SPIDER_NET_DMAC_TXFRMTL 0x00040000
-#define SPIDER_NET_DMAC_TCP 0x00020000
-#define SPIDER_NET_DMAC_UDP 0x00030000
-#define SPIDER_NET_TXDCEST 0x08000000
-
-#define SPIDER_NET_DESCR_RXFDIS 0x00000001
-#define SPIDER_NET_DESCR_RXDCEIS 0x00000002
-#define SPIDER_NET_DESCR_RXDEN0IS 0x00000004
-#define SPIDER_NET_DESCR_RXINVDIS 0x00000008
-#define SPIDER_NET_DESCR_RXRERRIS 0x00000010
-#define SPIDER_NET_DESCR_RXFDCIMS 0x00000100
-#define SPIDER_NET_DESCR_RXDCEIMS 0x00000200
-#define SPIDER_NET_DESCR_RXDEN0IMS 0x00000400
-#define SPIDER_NET_DESCR_RXINVDIMS 0x00000800
-#define SPIDER_NET_DESCR_RXRERRMIS 0x00001000
-#define SPIDER_NET_DESCR_UNUSED 0x077fe0e0
-
-#define SPIDER_NET_DESCR_IND_PROC_MASK 0xF0000000
-#define SPIDER_NET_DESCR_COMPLETE 0x00000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_RESPONSE_ERROR 0x10000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_PROTECTION_ERROR 0x20000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_FRAME_END 0x40000000 /* used in rx */
-#define SPIDER_NET_DESCR_FORCE_END 0x50000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_CARDOWNED 0xA0000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000
-#define SPIDER_NET_DESCR_TXDESFLG 0x00800000
-
-#define SPIDER_NET_DESCR_BAD_STATUS (SPIDER_NET_DESCR_RXDEN0IS | \
- SPIDER_NET_DESCR_RXRERRIS | \
- SPIDER_NET_DESCR_RXDEN0IMS | \
- SPIDER_NET_DESCR_RXINVDIMS | \
- SPIDER_NET_DESCR_RXRERRMIS | \
- SPIDER_NET_DESCR_UNUSED)
-
-/* Descriptor, as defined by the hardware */
-struct spider_net_hw_descr {
- u32 buf_addr;
- u32 buf_size;
- u32 next_descr_addr;
- u32 dmac_cmd_status;
- u32 result_size;
- u32 valid_size; /* all zeroes for tx */
- u32 data_status;
- u32 data_error; /* all zeroes for tx */
-} __attribute__((aligned(32)));
-
-struct spider_net_descr {
- struct spider_net_hw_descr *hwdescr;
- struct sk_buff *skb;
- u32 bus_addr;
- struct spider_net_descr *next;
- struct spider_net_descr *prev;
-};
-
-struct spider_net_descr_chain {
- spinlock_t lock;
- struct spider_net_descr *head;
- struct spider_net_descr *tail;
- struct spider_net_descr *ring;
- int num_desc;
- struct spider_net_hw_descr *hwring;
- dma_addr_t dma_addr;
-};
-
-/* descriptor data_status bits */
-#define SPIDER_NET_RX_IPCHK 29
-#define SPIDER_NET_RX_TCPCHK 28
-#define SPIDER_NET_VLAN_PACKET 21
-#define SPIDER_NET_DATA_STATUS_CKSUM_MASK ( (1 << SPIDER_NET_RX_IPCHK) | \
- (1 << SPIDER_NET_RX_TCPCHK) )
-
-/* descriptor data_error bits */
-#define SPIDER_NET_RX_IPCHKERR 27
-#define SPIDER_NET_RX_RXTCPCHKERR 28
-
-#define SPIDER_NET_DATA_ERR_CKSUM_MASK (1 << SPIDER_NET_RX_IPCHKERR)
-
-/* the cases we don't pass the packet to the stack.
- * 701b8000 would be correct, but every packets gets that flag */
-#define SPIDER_NET_DESTROY_RX_FLAGS 0x700b8000
-
-#define SPIDER_NET_DEFAULT_MSG ( NETIF_MSG_DRV | \
- NETIF_MSG_PROBE | \
- NETIF_MSG_LINK | \
- NETIF_MSG_TIMER | \
- NETIF_MSG_IFDOWN | \
- NETIF_MSG_IFUP | \
- NETIF_MSG_RX_ERR | \
- NETIF_MSG_TX_ERR | \
- NETIF_MSG_TX_QUEUED | \
- NETIF_MSG_INTR | \
- NETIF_MSG_TX_DONE | \
- NETIF_MSG_RX_STATUS | \
- NETIF_MSG_PKTDATA | \
- NETIF_MSG_HW | \
- NETIF_MSG_WOL )
-
-struct spider_net_extra_stats {
- unsigned long rx_desc_error;
- unsigned long tx_timeouts;
- unsigned long alloc_rx_skb_error;
- unsigned long rx_iommu_map_error;
- unsigned long tx_iommu_map_error;
- unsigned long rx_desc_unk_state;
-};
-
-struct spider_net_card {
- struct net_device *netdev;
- struct pci_dev *pdev;
- struct mii_phy phy;
-
- struct napi_struct napi;
-
- int medium;
-
- void __iomem *regs;
-
- struct spider_net_descr_chain tx_chain;
- struct spider_net_descr_chain rx_chain;
- struct spider_net_descr *low_watermark;
-
- int aneg_count;
- struct timer_list aneg_timer;
- struct timer_list tx_timer;
- struct work_struct tx_timeout_task;
- atomic_t tx_timeout_task_counter;
- wait_queue_head_t waitq;
- int num_rx_ints;
- int ignore_rx_ramfull;
-
- /* for ethtool */
- int msg_enable;
- struct spider_net_extra_stats spider_stats;
-
- /* Must be last item in struct */
- struct spider_net_descr darray[];
-};
-
-#endif
diff --git a/drivers/net/ethernet/toshiba/spider_net_ethtool.c b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
deleted file mode 100644
index fef9fd127b5e..000000000000
--- a/drivers/net/ethernet/toshiba/spider_net_ethtool.c
+++ /dev/null
@@ -1,174 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Network device driver for Cell Processor-Based Blade
- *
- * (C) Copyright IBM Corp. 2005
- *
- * Authors : Utz Bacher <utz.bacher@de.ibm.com>
- * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
- */
-
-#include <linux/netdevice.h>
-#include <linux/ethtool.h>
-#include <linux/pci.h>
-
-#include "spider_net.h"
-
-
-static struct {
- const char str[ETH_GSTRING_LEN];
-} ethtool_stats_keys[] = {
- { "tx_packets" },
- { "tx_bytes" },
- { "rx_packets" },
- { "rx_bytes" },
- { "tx_errors" },
- { "tx_dropped" },
- { "rx_dropped" },
- { "rx_descriptor_error" },
- { "tx_timeouts" },
- { "alloc_rx_skb_error" },
- { "rx_iommu_map_error" },
- { "tx_iommu_map_error" },
- { "rx_desc_unk_state" },
-};
-
-static int
-spider_net_ethtool_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *cmd)
-{
- struct spider_net_card *card;
- card = netdev_priv(netdev);
-
- ethtool_link_ksettings_zero_link_mode(cmd, supported);
- ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
-
- ethtool_link_ksettings_zero_link_mode(cmd, advertising);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
-
- cmd->base.port = PORT_FIBRE;
- cmd->base.speed = card->phy.speed;
- cmd->base.duplex = DUPLEX_FULL;
-
- return 0;
-}
-
-static void
-spider_net_ethtool_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct spider_net_card *card;
- card = netdev_priv(netdev);
-
- /* clear and fill out info */
- strscpy(drvinfo->driver, spider_net_driver_name,
- sizeof(drvinfo->driver));
- strscpy(drvinfo->version, VERSION, sizeof(drvinfo->version));
- strscpy(drvinfo->fw_version, "no information",
- sizeof(drvinfo->fw_version));
- strscpy(drvinfo->bus_info, pci_name(card->pdev),
- sizeof(drvinfo->bus_info));
-}
-
-static void
-spider_net_ethtool_get_wol(struct net_device *netdev,
- struct ethtool_wolinfo *wolinfo)
-{
- /* no support for wol */
- wolinfo->supported = 0;
- wolinfo->wolopts = 0;
-}
-
-static u32
-spider_net_ethtool_get_msglevel(struct net_device *netdev)
-{
- struct spider_net_card *card;
- card = netdev_priv(netdev);
- return card->msg_enable;
-}
-
-static void
-spider_net_ethtool_set_msglevel(struct net_device *netdev,
- u32 level)
-{
- struct spider_net_card *card;
- card = netdev_priv(netdev);
- card->msg_enable = level;
-}
-
-static int
-spider_net_ethtool_nway_reset(struct net_device *netdev)
-{
- if (netif_running(netdev)) {
- spider_net_stop(netdev);
- spider_net_open(netdev);
- }
- return 0;
-}
-
-static void
-spider_net_ethtool_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering,
- struct kernel_ethtool_ringparam *kernel_ering,
- struct netlink_ext_ack *extack)
-{
- struct spider_net_card *card = netdev_priv(netdev);
-
- ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX;
- ering->tx_pending = card->tx_chain.num_desc;
- ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX;
- ering->rx_pending = card->rx_chain.num_desc;
-}
-
-static int spider_net_get_sset_count(struct net_device *netdev, int sset)
-{
- switch (sset) {
- case ETH_SS_STATS:
- return ARRAY_SIZE(ethtool_stats_keys);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void spider_net_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct spider_net_card *card = netdev_priv(netdev);
-
- data[0] = netdev->stats.tx_packets;
- data[1] = netdev->stats.tx_bytes;
- data[2] = netdev->stats.rx_packets;
- data[3] = netdev->stats.rx_bytes;
- data[4] = netdev->stats.tx_errors;
- data[5] = netdev->stats.tx_dropped;
- data[6] = netdev->stats.rx_dropped;
- data[7] = card->spider_stats.rx_desc_error;
- data[8] = card->spider_stats.tx_timeouts;
- data[9] = card->spider_stats.alloc_rx_skb_error;
- data[10] = card->spider_stats.rx_iommu_map_error;
- data[11] = card->spider_stats.tx_iommu_map_error;
- data[12] = card->spider_stats.rx_desc_unk_state;
-}
-
-static void spider_net_get_strings(struct net_device *netdev, u32 stringset,
- u8 *data)
-{
- memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys));
-}
-
-const struct ethtool_ops spider_net_ethtool_ops = {
- .get_drvinfo = spider_net_ethtool_get_drvinfo,
- .get_wol = spider_net_ethtool_get_wol,
- .get_msglevel = spider_net_ethtool_get_msglevel,
- .set_msglevel = spider_net_ethtool_set_msglevel,
- .get_link = ethtool_op_get_link,
- .nway_reset = spider_net_ethtool_nway_reset,
- .get_ringparam = spider_net_ethtool_get_ringparam,
- .get_strings = spider_net_get_strings,
- .get_sset_count = spider_net_get_sset_count,
- .get_ethtool_stats = spider_net_get_ethtool_stats,
- .get_link_ksettings = spider_net_ethtool_get_link_ksettings,
-};
-
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index c6957e3b7f0f..7e0b3d694ac0 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1379,7 +1379,7 @@ static int tsi108_close(struct net_device *dev)
netif_stop_queue(dev);
napi_disable(&data->napi);
- del_timer_sync(&data->timer);
+ timer_delete_sync(&data->timer);
tsi108_stop_ethernet(dev);
tsi108_kill_phy(dev);
@@ -1652,7 +1652,7 @@ regs_fail:
static void tsi108_timed_checker(struct timer_list *t)
{
- struct tsi108_prv_data *data = from_timer(data, t, timer);
+ struct tsi108_prv_data *data = timer_container_of(data, t, timer);
struct net_device *dev = data->dev;
tsi108_check_phy(dev);
diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c
index a04d4073def9..a75bca1243e3 100644
--- a/drivers/net/ethernet/vertexcom/mse102x.c
+++ b/drivers/net/ethernet/vertexcom/mse102x.c
@@ -6,7 +6,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/if_vlan.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
@@ -15,6 +17,7 @@
#include <linux/cache.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/string_choices.h>
#include <linux/spi/spi.h>
#include <linux/of_net.h>
@@ -33,7 +36,7 @@
#define CMD_CTR (0x2 << CMD_SHIFT)
#define CMD_MASK GENMASK(15, CMD_SHIFT)
-#define LEN_MASK GENMASK(CMD_SHIFT - 1, 0)
+#define LEN_MASK GENMASK(CMD_SHIFT - 2, 0)
#define DET_CMD_LEN 4
#define DET_SOF_LEN 2
@@ -44,7 +47,6 @@
struct mse102x_stats {
u64 xfer_err;
- u64 invalid_cmd;
u64 invalid_ctr;
u64 invalid_dft;
u64 invalid_len;
@@ -55,7 +57,6 @@ struct mse102x_stats {
static const char mse102x_gstrings_stats[][ETH_GSTRING_LEN] = {
"SPI transfer errors",
- "Invalid command",
"Invalid CTR",
"Invalid DFT",
"Invalid frame length",
@@ -84,6 +85,8 @@ struct mse102x_net_spi {
struct spi_message spi_msg;
struct spi_transfer spi_xfer;
+ bool valid_cmd_received;
+
#ifdef CONFIG_DEBUG_FS
struct dentry *device_root;
#endif
@@ -97,16 +100,18 @@ static int mse102x_info_show(struct seq_file *s, void *what)
{
struct mse102x_net_spi *mses = s->private;
- seq_printf(s, "TX ring size : %u\n",
+ seq_printf(s, "TX ring size : %u\n",
skb_queue_len(&mses->mse102x.txq));
- seq_printf(s, "IRQ : %d\n",
+ seq_printf(s, "IRQ : %d\n",
mses->spidev->irq);
- seq_printf(s, "SPI effective speed : %lu\n",
+ seq_printf(s, "SPI effective speed : %lu\n",
(unsigned long)mses->spi_xfer.effective_speed_hz);
- seq_printf(s, "SPI mode : %x\n",
+ seq_printf(s, "SPI mode : %x\n",
mses->spidev->mode);
+ seq_printf(s, "Received valid CMD once : %s\n",
+ str_yes_no(mses->valid_cmd_received));
return 0;
}
@@ -193,10 +198,10 @@ static int mse102x_rx_cmd_spi(struct mse102x_net *mse, u8 *rxb)
} else if (*cmd != cpu_to_be16(DET_CMD)) {
net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
__func__, *cmd);
- mse->stats.invalid_cmd++;
ret = -EIO;
} else {
memcpy(rxb, trx + 2, 2);
+ mses->valid_cmd_received = true;
}
return ret;
@@ -222,7 +227,7 @@ static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp,
struct mse102x_net_spi *mses = to_mse102x_spi(mse);
struct spi_transfer *xfer = &mses->spi_xfer;
struct spi_message *msg = &mses->spi_msg;
- struct sk_buff *tskb;
+ struct sk_buff *tskb = NULL;
int ret;
netif_dbg(mse, tx_queued, mse->ndev, "%s: skb %p, %d@%p\n",
@@ -235,7 +240,6 @@ static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp,
if (!tskb)
return -ENOMEM;
- dev_kfree_skb(txp);
txp = tskb;
}
@@ -257,11 +261,13 @@ static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp,
mse->stats.xfer_err++;
}
+ dev_kfree_skb(tskb);
+
return ret;
}
static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff,
- unsigned int frame_len)
+ unsigned int frame_len, bool drop)
{
struct mse102x_net_spi *mses = to_mse102x_spi(mse);
struct spi_transfer *xfer = &mses->spi_xfer;
@@ -279,6 +285,9 @@ static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff,
netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
__func__, ret);
mse->stats.xfer_err++;
+ } else if (drop) {
+ netdev_dbg(mse->ndev, "%s: Drop frame\n", __func__);
+ ret = -EINVAL;
} else if (*sof != cpu_to_be16(DET_SOF)) {
netdev_dbg(mse->ndev, "%s: SPI start of frame is invalid (0x%04x)\n",
__func__, *sof);
@@ -301,61 +310,60 @@ static void mse102x_dump_packet(const char *msg, int len, const char *data)
data, len, true);
}
-static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
+static irqreturn_t mse102x_rx_pkt_spi(struct mse102x_net *mse)
{
struct sk_buff *skb;
unsigned int rxalign;
unsigned int rxlen;
+ bool drop = false;
__be16 rx = 0;
u16 cmd_resp;
u8 *rxpkt;
- int ret;
mse102x_tx_cmd_spi(mse, CMD_CTR);
- ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx);
- cmd_resp = be16_to_cpu(rx);
-
- if (ret || ((cmd_resp & CMD_MASK) != CMD_RTS)) {
+ if (mse102x_rx_cmd_spi(mse, (u8 *)&rx)) {
usleep_range(50, 100);
+ return IRQ_NONE;
+ }
- mse102x_tx_cmd_spi(mse, CMD_CTR);
- ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx);
- if (ret)
- return;
-
- cmd_resp = be16_to_cpu(rx);
- if ((cmd_resp & CMD_MASK) != CMD_RTS) {
- net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
- __func__, cmd_resp);
- mse->stats.invalid_rts++;
- return;
- }
-
- net_dbg_ratelimited("%s: Unexpected response to first CMD\n",
- __func__);
+ cmd_resp = be16_to_cpu(rx);
+ if ((cmd_resp & CMD_MASK) != CMD_RTS) {
+ net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
+ __func__, cmd_resp);
+ mse->stats.invalid_rts++;
+ drop = true;
+ goto drop;
}
rxlen = cmd_resp & LEN_MASK;
- if (!rxlen) {
- net_dbg_ratelimited("%s: No frame length defined\n", __func__);
+ if (rxlen < ETH_ZLEN || rxlen > VLAN_ETH_FRAME_LEN) {
+ net_dbg_ratelimited("%s: Invalid frame length: %d\n", __func__,
+ rxlen);
mse->stats.invalid_len++;
- return;
+ drop = true;
}
+ /* In case of a invalid CMD_RTS, the frame must be consumed anyway.
+ * So assume the maximum possible frame length.
+ */
+drop:
+ if (drop)
+ rxlen = VLAN_ETH_FRAME_LEN;
+
rxalign = ALIGN(rxlen + DET_SOF_LEN + DET_DFT_LEN, 4);
skb = netdev_alloc_skb_ip_align(mse->ndev, rxalign);
if (!skb)
- return;
+ return IRQ_NONE;
/* 2 bytes Start of frame (before ethernet header)
* 2 bytes Data frame tail (after ethernet frame)
* They are copied, but ignored.
*/
rxpkt = skb_put(skb, rxlen) - DET_SOF_LEN;
- if (mse102x_rx_frame_spi(mse, rxpkt, rxlen)) {
+ if (mse102x_rx_frame_spi(mse, rxpkt, rxlen, drop)) {
mse->ndev->stats.rx_errors++;
dev_kfree_skb(skb);
- return;
+ return IRQ_HANDLED;
}
if (netif_msg_pktdata(mse))
@@ -366,6 +374,8 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
mse->ndev->stats.rx_packets++;
mse->ndev->stats.rx_bytes += rxlen;
+
+ return IRQ_HANDLED;
}
static int mse102x_tx_pkt_spi(struct mse102x_net *mse, struct sk_buff *txb,
@@ -436,13 +446,15 @@ static void mse102x_tx_work(struct work_struct *work)
mse = &mses->mse102x;
while ((txb = skb_dequeue(&mse->txq))) {
+ unsigned int len = max_t(unsigned int, txb->len, ETH_ZLEN);
+
mutex_lock(&mses->lock);
ret = mse102x_tx_pkt_spi(mse, txb, work_timeout);
mutex_unlock(&mses->lock);
if (ret) {
mse->ndev->stats.tx_dropped++;
} else {
- mse->ndev->stats.tx_bytes += txb->len;
+ mse->ndev->stats.tx_bytes += len;
mse->ndev->stats.tx_packets++;
}
@@ -495,19 +507,36 @@ static irqreturn_t mse102x_irq(int irq, void *_mse)
{
struct mse102x_net *mse = _mse;
struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+ irqreturn_t ret;
mutex_lock(&mses->lock);
- mse102x_rx_pkt_spi(mse);
+ ret = mse102x_rx_pkt_spi(mse);
mutex_unlock(&mses->lock);
- return IRQ_HANDLED;
+ return ret;
}
static int mse102x_net_open(struct net_device *ndev)
{
+ struct irq_data *irq_data = irq_get_irq_data(ndev->irq);
struct mse102x_net *mse = netdev_priv(ndev);
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
int ret;
+ if (!irq_data) {
+ netdev_err(ndev, "Invalid IRQ: %d\n", ndev->irq);
+ return -EINVAL;
+ }
+
+ switch (irqd_get_trigger_type(irq_data)) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ case IRQ_TYPE_LEVEL_LOW:
+ break;
+ default:
+ netdev_warn_once(ndev, "Only IRQ type level recommended, please update your device tree firmware.\n");
+ break;
+ }
+
ret = request_threaded_irq(ndev->irq, NULL, mse102x_irq, IRQF_ONESHOT,
ndev->name, mse);
if (ret < 0) {
@@ -521,6 +550,14 @@ static int mse102x_net_open(struct net_device *ndev)
netif_carrier_on(ndev);
+ /* The SPI interrupt can stuck in case of pending packet(s).
+ * So poll for possible packet(s) to re-arm the interrupt.
+ */
+ mutex_lock(&mses->lock);
+ if (mse102x_rx_pkt_spi(mse) == IRQ_NONE)
+ mse102x_rx_pkt_spi(mse);
+ mutex_unlock(&mses->lock);
+
netif_dbg(mse, ifup, ndev, "network device up\n");
return 0;
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 894911f3d560..e56ebbdd428d 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1568,7 +1568,7 @@ static void init_registers(struct net_device *dev)
if (rp->quirks & rqMgmt)
rhine_init_cam_filter(dev);
- napi_enable(&rp->napi);
+ napi_enable_locked(&rp->napi);
iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
@@ -1696,7 +1696,10 @@ static int rhine_open(struct net_device *dev)
rhine_power_init(dev);
rhine_chip_reset(dev);
rhine_task_enable(rp);
+
+ netdev_lock(dev);
init_registers(dev);
+ netdev_unlock(dev);
netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
__func__, ioread16(ioaddr + ChipCmd),
@@ -1727,6 +1730,8 @@ static void rhine_reset_task(struct work_struct *work)
napi_disable(&rp->napi);
netif_tx_disable(dev);
+
+ netdev_lock(dev);
spin_lock_bh(&rp->lock);
/* clear all descriptors */
@@ -1740,6 +1745,7 @@ static void rhine_reset_task(struct work_struct *work)
init_registers(dev);
spin_unlock_bh(&rp->lock);
+ netdev_unlock(dev);
netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
@@ -2541,9 +2547,12 @@ static int rhine_resume(struct device *device)
alloc_tbufs(dev);
rhine_reset_rbufs(rp);
rhine_task_enable(rp);
+
+ netdev_lock(dev);
spin_lock_bh(&rp->lock);
init_registers(dev);
spin_unlock_bh(&rp->lock);
+ netdev_unlock(dev);
netif_device_attach(dev);
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index dd4a07c97eee..5aa93144a4f5 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2320,7 +2320,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
if (ret < 0)
goto out_free_tmp_vptr_1;
- napi_disable(&vptr->napi);
+ netdev_lock(dev);
+ napi_disable_locked(&vptr->napi);
spin_lock_irqsave(&vptr->lock, flags);
@@ -2342,12 +2343,13 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
velocity_give_many_rx_descs(vptr);
- napi_enable(&vptr->napi);
+ napi_enable_locked(&vptr->napi);
mac_enable_int(vptr->mac_regs);
netif_start_queue(dev);
spin_unlock_irqrestore(&vptr->lock, flags);
+ netdev_unlock(dev);
velocity_free_rings(tmp_vptr);
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
index e46ccebcfd22..d138dea7d208 100644
--- a/drivers/net/ethernet/wangxun/Kconfig
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -18,13 +18,16 @@ if NET_VENDOR_WANGXUN
config LIBWX
tristate
+ depends on PTP_1588_CLOCK_OPTIONAL
select PAGE_POOL
+ select DIMLIB
help
Common library for Wangxun(R) Ethernet drivers.
config NGBE
tristate "Wangxun(R) GbE PCI Express adapters support"
depends on PCI
+ depends on PTP_1588_CLOCK_OPTIONAL
select LIBWX
select PHYLINK
help
@@ -38,10 +41,11 @@ config NGBE
will be called ngbe.
config TXGBE
- tristate "Wangxun(R) 10GbE PCI Express adapters support"
+ tristate "Wangxun(R) 10/25/40GbE PCI Express adapters support"
depends on PCI
depends on COMMON_CLK
depends on I2C_DESIGNWARE_PLATFORM
+ depends on PTP_1588_CLOCK_OPTIONAL
select MARVELL_10G_PHY
select REGMAP
select PHYLINK
@@ -52,7 +56,7 @@ config TXGBE
select PCS_XPCS
select LIBWX
help
- This driver supports Wangxun(R) 10GbE PCI Express family of
+ This driver supports Wangxun(R) 10/25/40GbE PCI Express family of
adapters.
More specific information on configuring the driver is in
@@ -61,4 +65,39 @@ config TXGBE
To compile this driver as a module, choose M here. The module
will be called txgbe.
+config TXGBEVF
+ tristate "Wangxun(R) 10/25/40G Virtual Function Ethernet support"
+ depends on PCI
+ depends on PCI_MSI
+ depends on PTP_1588_CLOCK_OPTIONAL
+ select LIBWX
+ select PHYLINK
+ help
+ This driver supports virtual functions for SP1000A, WX1820AL,
+ WX5XXX, WX5XXXAL.
+
+ This driver was formerly named txgbevf.
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/device_drivers/ethernet/wangxun/txgbevf.rst>.
+
+ To compile this driver as a module, choose M here. MSI-X interrupt
+ support is required for this driver to work correctly.
+
+config NGBEVF
+ tristate "Wangxun(R) GbE Virtual Function Ethernet support"
+ depends on PCI_MSI
+ depends on PTP_1588_CLOCK_OPTIONAL
+ select LIBWX
+ help
+ This driver supports virtual functions for WX1860, WX1860AL.
+
+ This driver was formerly named ngbevf.
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/device_drivers/ethernet/wangxun/ngbevf.rst>.
+
+ To compile this driver as a module, choose M here. MSI-X interrupt
+ support is required for this driver to work correctly.
+
endif # NET_VENDOR_WANGXUN
diff --git a/drivers/net/ethernet/wangxun/Makefile b/drivers/net/ethernet/wangxun/Makefile
index ca19311dbe38..0a71a710b717 100644
--- a/drivers/net/ethernet/wangxun/Makefile
+++ b/drivers/net/ethernet/wangxun/Makefile
@@ -5,4 +5,6 @@
obj-$(CONFIG_LIBWX) += libwx/
obj-$(CONFIG_TXGBE) += txgbe/
+obj-$(CONFIG_TXGBEVF) += txgbevf/
obj-$(CONFIG_NGBE) += ngbe/
+obj-$(CONFIG_NGBEVF) += ngbevf/
diff --git a/drivers/net/ethernet/wangxun/libwx/Makefile b/drivers/net/ethernet/wangxun/libwx/Makefile
index 42ccd6e4052e..a71b0ad77de3 100644
--- a/drivers/net/ethernet/wangxun/libwx/Makefile
+++ b/drivers/net/ethernet/wangxun/libwx/Makefile
@@ -4,4 +4,5 @@
obj-$(CONFIG_LIBWX) += libwx.o
-libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o
+libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o wx_ptp.o wx_mbx.o wx_sriov.o
+libwx-objs += wx_vf.o wx_vf_lib.o wx_vf_common.o
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
index abe5921dde02..f362e51c73ee 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
@@ -41,6 +41,9 @@ static const struct wx_stats wx_gstrings_stats[] = {
WX_STAT("rx_csum_offload_good_count", hw_csum_rx_good),
WX_STAT("rx_csum_offload_errors", hw_csum_rx_error),
WX_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
+ WX_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+ WX_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped),
+ WX_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
};
static const struct wx_stats wx_gstrings_fdir_stats[] = {
@@ -48,6 +51,11 @@ static const struct wx_stats wx_gstrings_fdir_stats[] = {
WX_STAT("fdir_miss", stats.fdirmiss),
};
+static const struct wx_stats wx_gstrings_rsc_stats[] = {
+ WX_STAT("rsc_aggregated", rsc_count),
+ WX_STAT("rsc_flushed", rsc_flush),
+};
+
/* drivers allocates num_tx_queues and num_rx_queues symmetrically so
* we set the num_rx_queues to evaluate to num_tx_queues. This is
* used because we do not have a good way to get the max number of
@@ -61,16 +69,21 @@ static const struct wx_stats wx_gstrings_fdir_stats[] = {
(sizeof(struct wx_queue_stats) / sizeof(u64)))
#define WX_GLOBAL_STATS_LEN ARRAY_SIZE(wx_gstrings_stats)
#define WX_FDIR_STATS_LEN ARRAY_SIZE(wx_gstrings_fdir_stats)
+#define WX_RSC_STATS_LEN ARRAY_SIZE(wx_gstrings_rsc_stats)
#define WX_STATS_LEN (WX_GLOBAL_STATS_LEN + WX_QUEUE_STATS_LEN)
int wx_get_sset_count(struct net_device *netdev, int sset)
{
struct wx *wx = netdev_priv(netdev);
+ int len = WX_STATS_LEN;
switch (sset) {
case ETH_SS_STATS:
- return (wx->mac.type == wx_mac_sp) ?
- WX_STATS_LEN + WX_FDIR_STATS_LEN : WX_STATS_LEN;
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))
+ len += WX_FDIR_STATS_LEN;
+ if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags))
+ len += WX_RSC_STATS_LEN;
+ return len;
default:
return -EOPNOTSUPP;
}
@@ -87,10 +100,14 @@ void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
case ETH_SS_STATS:
for (i = 0; i < WX_GLOBAL_STATS_LEN; i++)
ethtool_puts(&p, wx_gstrings_stats[i].stat_string);
- if (wx->mac.type == wx_mac_sp) {
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) {
for (i = 0; i < WX_FDIR_STATS_LEN; i++)
ethtool_puts(&p, wx_gstrings_fdir_stats[i].stat_string);
}
+ if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) {
+ for (i = 0; i < WX_RSC_STATS_LEN; i++)
+ ethtool_puts(&p, wx_gstrings_rsc_stats[i].stat_string);
+ }
for (i = 0; i < netdev->num_tx_queues; i++) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i);
ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
@@ -121,13 +138,20 @@ void wx_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- if (wx->mac.type == wx_mac_sp) {
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) {
for (k = 0; k < WX_FDIR_STATS_LEN; k++) {
p = (char *)wx + wx_gstrings_fdir_stats[k].stat_offset;
data[i++] = *(u64 *)p;
}
}
+ if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) {
+ for (k = 0; k < WX_RSC_STATS_LEN; k++) {
+ p = (char *)wx + wx_gstrings_rsc_stats[k].stat_offset;
+ data[i++] = *(u64 *)p;
+ }
+ }
+
for (j = 0; j < netdev->num_tx_queues; j++) {
ring = wx->tx_ring[j];
if (!ring) {
@@ -196,7 +220,7 @@ void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
unsigned int stats_len = WX_STATS_LEN;
struct wx *wx = netdev_priv(netdev);
- if (wx->mac.type == wx_mac_sp)
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))
stats_len += WX_FDIR_STATS_LEN;
strscpy(info->driver, wx->driver_name, sizeof(info->driver));
@@ -288,6 +312,11 @@ int wx_get_coalesce(struct net_device *netdev,
else
ec->rx_coalesce_usecs = wx->rx_itr_setting >> 2;
+ if (wx->adaptive_itr) {
+ ec->use_adaptive_rx_coalesce = 1;
+ ec->use_adaptive_tx_coalesce = 1;
+ }
+
/* if in mixed tx/rx queues per vector mode, report only rx settings */
if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count)
return 0;
@@ -302,6 +331,40 @@ int wx_get_coalesce(struct net_device *netdev,
}
EXPORT_SYMBOL(wx_get_coalesce);
+static void wx_update_rsc(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ bool need_reset = false;
+
+ /* nothing to do if LRO or RSC are not enabled */
+ if (!test_bit(WX_FLAG_RSC_CAPABLE, wx->flags) ||
+ !(netdev->features & NETIF_F_LRO))
+ return;
+
+ /* check the feature flag value and enable RSC if necessary */
+ if (wx->rx_itr_setting == 1 ||
+ wx->rx_itr_setting > WX_MIN_RSC_ITR) {
+ if (!test_bit(WX_FLAG_RSC_ENABLED, wx->flags)) {
+ set_bit(WX_FLAG_RSC_ENABLED, wx->flags);
+ dev_info(&wx->pdev->dev,
+ "rx-usecs value high enough to re-enable RSC\n");
+
+ need_reset = true;
+ }
+ /* if interrupt rate is too high then disable RSC */
+ } else if (test_bit(WX_FLAG_RSC_ENABLED, wx->flags)) {
+ clear_bit(WX_FLAG_RSC_ENABLED, wx->flags);
+ dev_info(&wx->pdev->dev,
+ "rx-usecs set too low, disabling RSC\n");
+
+ need_reset = true;
+ }
+
+ /* reset the device to apply the new RSC setting */
+ if (need_reset && wx->do_reset)
+ wx->do_reset(netdev);
+}
+
int wx_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
@@ -319,42 +382,66 @@ int wx_set_coalesce(struct net_device *netdev,
return -EOPNOTSUPP;
}
- if (ec->tx_max_coalesced_frames_irq)
- wx->tx_work_limit = ec->tx_max_coalesced_frames_irq;
+ if (ec->tx_max_coalesced_frames_irq > U16_MAX ||
+ !ec->tx_max_coalesced_frames_irq)
+ return -EINVAL;
+
+ wx->tx_work_limit = ec->tx_max_coalesced_frames_irq;
- if (wx->mac.type == wx_mac_sp)
+ switch (wx->mac.type) {
+ case wx_mac_sp:
max_eitr = WX_SP_MAX_EITR;
- else
+ rx_itr_param = WX_20K_ITR;
+ tx_itr_param = WX_12K_ITR;
+ break;
+ case wx_mac_aml:
+ case wx_mac_aml40:
+ max_eitr = WX_AML_MAX_EITR;
+ rx_itr_param = WX_20K_ITR;
+ tx_itr_param = WX_12K_ITR;
+ break;
+ default:
max_eitr = WX_EM_MAX_EITR;
+ rx_itr_param = WX_7K_ITR;
+ tx_itr_param = WX_7K_ITR;
+ break;
+ }
if ((ec->rx_coalesce_usecs > (max_eitr >> 2)) ||
(ec->tx_coalesce_usecs > (max_eitr >> 2)))
return -EINVAL;
+ if (ec->use_adaptive_rx_coalesce) {
+ wx->adaptive_itr = true;
+ wx->rx_itr_setting = 1;
+ wx->tx_itr_setting = 1;
+ return 0;
+ }
+
if (ec->rx_coalesce_usecs > 1)
wx->rx_itr_setting = ec->rx_coalesce_usecs << 2;
else
wx->rx_itr_setting = ec->rx_coalesce_usecs;
- if (wx->rx_itr_setting == 1)
- rx_itr_param = WX_20K_ITR;
- else
- rx_itr_param = wx->rx_itr_setting;
-
if (ec->tx_coalesce_usecs > 1)
wx->tx_itr_setting = ec->tx_coalesce_usecs << 2;
else
wx->tx_itr_setting = ec->tx_coalesce_usecs;
- if (wx->tx_itr_setting == 1) {
- if (wx->mac.type == wx_mac_sp)
- tx_itr_param = WX_12K_ITR;
- else
- tx_itr_param = WX_20K_ITR;
- } else {
- tx_itr_param = wx->tx_itr_setting;
+ if (wx->adaptive_itr) {
+ wx->adaptive_itr = false;
+ wx->rx_itr_setting = rx_itr_param;
+ wx->tx_itr_setting = tx_itr_param;
+ } else if (wx->rx_itr_setting == 1 || wx->tx_itr_setting == 1) {
+ wx->adaptive_itr = true;
}
+ if (wx->rx_itr_setting != 1)
+ rx_itr_param = wx->rx_itr_setting;
+
+ if (wx->tx_itr_setting != 1)
+ tx_itr_param = wx->tx_itr_setting;
+
/* mixed Rx/Tx */
if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count)
wx->tx_itr_setting = wx->rx_itr_setting;
@@ -370,6 +457,8 @@ int wx_set_coalesce(struct net_device *netdev,
wx_write_eitr(q_vector);
}
+ wx_update_rsc(wx);
+
return 0;
}
EXPORT_SYMBOL(wx_set_coalesce);
@@ -383,7 +472,7 @@ static unsigned int wx_max_channels(struct wx *wx)
max_combined = 1;
} else {
/* support up to max allowed queues with RSS */
- if (wx->mac.type == wx_mac_sp)
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
max_combined = 63;
else
max_combined = 8;
@@ -437,6 +526,142 @@ int wx_set_channels(struct net_device *dev,
}
EXPORT_SYMBOL(wx_set_channels);
+u32 wx_rss_indir_size(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ return wx_rss_indir_tbl_entries(wx);
+}
+EXPORT_SYMBOL(wx_rss_indir_size);
+
+u32 wx_get_rxfh_key_size(struct net_device *netdev)
+{
+ return WX_RSS_KEY_SIZE;
+}
+EXPORT_SYMBOL(wx_get_rxfh_key_size);
+
+static void wx_get_reta(struct wx *wx, u32 *indir)
+{
+ u32 reta_size = wx_rss_indir_tbl_entries(wx);
+ u16 rss_m = wx->ring_feature[RING_F_RSS].mask;
+
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
+ rss_m = wx->ring_feature[RING_F_RSS].indices - 1;
+
+ for (u32 i = 0; i < reta_size; i++)
+ indir[i] = wx->rss_indir_tbl[i] & rss_m;
+}
+
+int wx_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+
+ if (rxfh->indir)
+ wx_get_reta(wx, rxfh->indir);
+
+ if (rxfh->key)
+ memcpy(rxfh->key, wx->rss_key, WX_RSS_KEY_SIZE);
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_get_rxfh);
+
+int wx_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct wx *wx = netdev_priv(netdev);
+ u32 reta_entries, i;
+
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ reta_entries = wx_rss_indir_tbl_entries(wx);
+ /* Fill out the redirection table */
+ if (rxfh->indir) {
+ for (i = 0; i < reta_entries; i++)
+ wx->rss_indir_tbl[i] = rxfh->indir[i];
+
+ wx_store_reta(wx);
+ }
+
+ /* Fill out the rss hash key */
+ if (rxfh->key) {
+ memcpy(wx->rss_key, rxfh->key, WX_RSS_KEY_SIZE);
+ wx_store_rsskey(wx);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_set_rxfh);
+
+static const struct wx_rss_flow_map rss_flow_table[] = {
+ { TCP_V4_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV4_TCP },
+ { TCP_V6_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV6_TCP },
+ { UDP_V4_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV4_UDP },
+ { UDP_V6_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV6_UDP },
+ { SCTP_V4_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV4_SCTP },
+ { SCTP_V6_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV6_SCTP },
+};
+
+int wx_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *nfc)
+{
+ struct wx *wx = netdev_priv(dev);
+
+ nfc->data = RXH_IP_SRC | RXH_IP_DST;
+
+ for (u32 i = 0; i < ARRAY_SIZE(rss_flow_table); i++) {
+ const struct wx_rss_flow_map *entry = &rss_flow_table[i];
+
+ if (entry->flow_type == nfc->flow_type) {
+ if (wx->rss_flags & entry->flag)
+ nfc->data |= entry->data;
+ break;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_get_rxfh_fields);
+
+int wx_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
+{
+ struct wx *wx = netdev_priv(dev);
+ u8 flags = wx->rss_flags;
+
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+
+ for (u32 i = 0; i < ARRAY_SIZE(rss_flow_table); i++) {
+ const struct wx_rss_flow_map *entry = &rss_flow_table[i];
+
+ if (entry->flow_type == nfc->flow_type) {
+ if (nfc->data & entry->data)
+ flags |= entry->flag;
+ else
+ flags &= ~entry->flag;
+
+ if (flags != wx->rss_flags) {
+ wx->rss_flags = flags;
+ wx_config_rss_field(wx);
+ }
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(wx_set_rxfh_fields);
+
u32 wx_get_msglevel(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
@@ -452,3 +677,86 @@ void wx_set_msglevel(struct net_device *netdev, u32 data)
wx->msg_enable = data;
}
EXPORT_SYMBOL(wx_set_msglevel);
+
+int wx_get_ts_info(struct net_device *dev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct wx *wx = netdev_priv(dev);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (wx->ptp_clock)
+ info->phc_index = ptp_clock_index(wx->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_get_ts_info);
+
+void wx_get_ptp_stats(struct net_device *dev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct wx *wx = netdev_priv(dev);
+
+ if (wx->ptp_clock) {
+ ts_stats->pkts = wx->tx_hwtstamp_pkts;
+ ts_stats->lost = wx->tx_hwtstamp_timeouts +
+ wx->tx_hwtstamp_skipped +
+ wx->rx_hwtstamp_cleared;
+ ts_stats->err = wx->tx_hwtstamp_errors;
+ }
+}
+EXPORT_SYMBOL(wx_get_ptp_stats);
+
+static int wx_get_link_ksettings_vf(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.port = PORT_NONE;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.speed = wx->speed;
+
+ return 0;
+}
+
+static const struct ethtool_ops wx_ethtool_ops_vf = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
+ .get_drvinfo = wx_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = wx_get_ringparam,
+ .get_msglevel = wx_get_msglevel,
+ .get_coalesce = wx_get_coalesce,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = wx_get_link_ksettings_vf,
+};
+
+void wx_set_ethtool_ops_vf(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &wx_ethtool_ops_vf;
+}
+EXPORT_SYMBOL(wx_set_ethtool_ops_vf);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
index 600c3b597d1a..727093970462 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
@@ -38,6 +38,23 @@ void wx_get_channels(struct net_device *dev,
struct ethtool_channels *ch);
int wx_set_channels(struct net_device *dev,
struct ethtool_channels *ch);
+u32 wx_rss_indir_size(struct net_device *netdev);
+u32 wx_get_rxfh_key_size(struct net_device *netdev);
+int wx_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh);
+int wx_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack);
+int wx_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd);
+int wx_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack);
u32 wx_get_msglevel(struct net_device *netdev);
void wx_set_msglevel(struct net_device *netdev, u32 data);
+int wx_get_ts_info(struct net_device *dev,
+ struct kernel_ethtool_ts_info *info);
+void wx_get_ptp_stats(struct net_device *dev,
+ struct ethtool_ts_stats *ts_stats);
+void wx_set_ethtool_ops_vf(struct net_device *netdev);
#endif /* _WX_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 1bf9c38e4125..58b8300e3d2c 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -10,6 +10,8 @@
#include "wx_type.h"
#include "wx_lib.h"
+#include "wx_sriov.h"
+#include "wx_vf.h"
#include "wx_hw.h"
static int wx_phy_read_reg_mdi(struct mii_bus *bus, int phy_addr, int devnum, int regnum)
@@ -112,7 +114,7 @@ static void wx_intr_disable(struct wx *wx, u64 qmask)
if (mask)
wr32(wx, WX_PX_IMS(0), mask);
- if (wx->mac.type == wx_mac_sp) {
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
mask = (qmask >> 32);
if (mask)
wr32(wx, WX_PX_IMS(1), mask);
@@ -123,10 +125,16 @@ void wx_intr_enable(struct wx *wx, u64 qmask)
{
u32 mask;
+ if (wx->pdev->is_virtfn) {
+ wr32(wx, WX_VXIMC, qmask);
+ return;
+ }
+
mask = (qmask & U32_MAX);
if (mask)
wr32(wx, WX_PX_IMC(0), mask);
- if (wx->mac.type == wx_mac_sp) {
+
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
mask = (qmask >> 32);
if (mask)
wr32(wx, WX_PX_IMC(1), mask);
@@ -278,22 +286,8 @@ static int wx_acquire_sw_sync(struct wx *wx, u32 mask)
return ret;
}
-/**
- * wx_host_interface_command - Issue command to manageability block
- * @wx: pointer to the HW structure
- * @buffer: contains the command to write and where the return status will
- * be placed
- * @length: length of buffer, must be multiple of 4 bytes
- * @timeout: time in ms to wait for command completion
- * @return_data: read and return data from the buffer (true) or not (false)
- * Needed because FW structures are big endian and decoding of
- * these fields can be 8 bit or 16 bit based on command. Decoding
- * is not easily understood without making a table of commands.
- * So we will leave this up to the caller to read back the data
- * in these cases.
- **/
-int wx_host_interface_command(struct wx *wx, u32 *buffer,
- u32 length, u32 timeout, bool return_data)
+static int wx_host_interface_command_s(struct wx *wx, u32 *buffer,
+ u32 length, u32 timeout, bool return_data)
{
u32 hdr_size = sizeof(struct wx_hic_hdr);
u32 hicr, i, bi, buf[64] = {};
@@ -301,22 +295,10 @@ int wx_host_interface_command(struct wx *wx, u32 *buffer,
u32 dword_len;
u16 buf_len;
- if (length == 0 || length > WX_HI_MAX_BLOCK_BYTE_LENGTH) {
- wx_err(wx, "Buffer length failure buffersize=%d.\n", length);
- return -EINVAL;
- }
-
status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB);
if (status != 0)
return status;
- /* Calculate length in DWORDs. We must be DWORD aligned */
- if ((length % (sizeof(u32))) != 0) {
- wx_err(wx, "Buffer length failure, not aligned to dword");
- status = -EINVAL;
- goto rel_out;
- }
-
dword_len = length >> 2;
/* The device driver writes the relevant command block
@@ -334,27 +316,25 @@ int wx_host_interface_command(struct wx *wx, u32 *buffer,
status = read_poll_timeout(rd32, hicr, hicr & WX_MNG_MBOX_CTL_FWRDY, 1000,
timeout * 1000, false, wx, WX_MNG_MBOX_CTL);
+ buf[0] = rd32(wx, WX_MNG_MBOX);
+ if ((buf[0] & 0xff0000) >> 16 == 0x80) {
+ wx_err(wx, "Unknown FW command: 0x%x\n", buffer[0] & 0xff);
+ status = -EINVAL;
+ goto rel_out;
+ }
+
/* Check command completion */
if (status) {
- wx_dbg(wx, "Command has failed with no status valid.\n");
-
- buf[0] = rd32(wx, WX_MNG_MBOX);
- if ((buffer[0] & 0xff) != (~buf[0] >> 24)) {
- status = -EINVAL;
- goto rel_out;
- }
- if ((buf[0] & 0xff0000) >> 16 == 0x80) {
- wx_dbg(wx, "It's unknown cmd.\n");
- status = -EINVAL;
- goto rel_out;
- }
-
+ wx_err(wx, "Command has failed with no status valid.\n");
wx_dbg(wx, "write value:\n");
for (i = 0; i < dword_len; i++)
wx_dbg(wx, "%x ", buffer[i]);
wx_dbg(wx, "read value:\n");
for (i = 0; i < dword_len; i++)
wx_dbg(wx, "%x ", buf[i]);
+ wx_dbg(wx, "\ncheck: %x %x\n", buffer[0] & 0xff, ~buf[0] >> 24);
+
+ goto rel_out;
}
if (!return_data)
@@ -393,8 +373,166 @@ rel_out:
wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB);
return status;
}
+
+static bool wx_poll_fw_reply(struct wx *wx, u32 *buffer, u8 send_cmd)
+{
+ u32 dword_len = sizeof(struct wx_hic_hdr) >> 2;
+ struct wx_hic_hdr *recv_hdr;
+ u32 i;
+
+ /* read hdr */
+ for (i = 0; i < dword_len; i++) {
+ buffer[i] = rd32a(wx, WX_FW2SW_MBOX, i);
+ le32_to_cpus(&buffer[i]);
+ }
+
+ /* check hdr */
+ recv_hdr = (struct wx_hic_hdr *)buffer;
+ if (recv_hdr->cmd == send_cmd &&
+ recv_hdr->index == wx->swfw_index)
+ return true;
+
+ return false;
+}
+
+static int wx_host_interface_command_r(struct wx *wx, u32 *buffer,
+ u32 length, u32 timeout, bool return_data)
+{
+ struct wx_hic_hdr *hdr = (struct wx_hic_hdr *)buffer;
+ u32 hdr_size = sizeof(struct wx_hic_hdr);
+ bool busy, reply;
+ u32 dword_len;
+ u16 buf_len;
+ int err = 0;
+ u8 send_cmd;
+ u32 i;
+
+ /* wait to get lock */
+ might_sleep();
+ err = read_poll_timeout(test_and_set_bit, busy, !busy, 1000, timeout * 1000,
+ false, WX_STATE_SWFW_BUSY, wx->state);
+ if (err)
+ return err;
+
+ /* index to unique seq id for each mbox message */
+ hdr->index = wx->swfw_index;
+ send_cmd = hdr->cmd;
+
+ dword_len = length >> 2;
+ /* write data to SW-FW mbox array */
+ for (i = 0; i < dword_len; i++) {
+ wr32a(wx, WX_SW2FW_MBOX, i, (__force u32)cpu_to_le32(buffer[i]));
+ /* write flush */
+ rd32a(wx, WX_SW2FW_MBOX, i);
+ }
+
+ /* generate interrupt to notify FW */
+ wr32m(wx, WX_SW2FW_MBOX_CMD, WX_SW2FW_MBOX_CMD_VLD, 0);
+ wr32m(wx, WX_SW2FW_MBOX_CMD, WX_SW2FW_MBOX_CMD_VLD, WX_SW2FW_MBOX_CMD_VLD);
+
+ /* polling reply from FW */
+ err = read_poll_timeout(wx_poll_fw_reply, reply, reply, 2000,
+ timeout * 1000, true, wx, buffer, send_cmd);
+ if (err) {
+ wx_err(wx, "Polling from FW messages timeout, cmd: 0x%x, index: %d\n",
+ send_cmd, wx->swfw_index);
+ goto rel_out;
+ }
+
+ if (hdr->cmd_or_resp.ret_status == 0x80) {
+ wx_err(wx, "Unknown FW command: 0x%x\n", send_cmd);
+ err = -EINVAL;
+ goto rel_out;
+ }
+
+ /* expect no reply from FW then return */
+ if (!return_data)
+ goto rel_out;
+
+ /* If there is any thing in data position pull it in */
+ buf_len = hdr->buf_len;
+ if (buf_len == 0)
+ goto rel_out;
+
+ if (length < buf_len + hdr_size) {
+ wx_err(wx, "Buffer not large enough for reply message.\n");
+ err = -EFAULT;
+ goto rel_out;
+ }
+
+ /* Calculate length in DWORDs, add 3 for odd lengths */
+ dword_len = (buf_len + 3) >> 2;
+ for (i = hdr_size >> 2; i <= dword_len; i++) {
+ buffer[i] = rd32a(wx, WX_FW2SW_MBOX, i);
+ le32_to_cpus(&buffer[i]);
+ }
+
+rel_out:
+ /* index++, index replace wx_hic_hdr.checksum */
+ if (wx->swfw_index == WX_HIC_HDR_INDEX_MAX)
+ wx->swfw_index = 0;
+ else
+ wx->swfw_index++;
+
+ clear_bit(WX_STATE_SWFW_BUSY, wx->state);
+ return err;
+}
+
+/**
+ * wx_host_interface_command - Issue command to manageability block
+ * @wx: pointer to the HW structure
+ * @buffer: contains the command to write and where the return status will
+ * be placed
+ * @length: length of buffer, must be multiple of 4 bytes
+ * @timeout: time in ms to wait for command completion
+ * @return_data: read and return data from the buffer (true) or not (false)
+ * Needed because FW structures are big endian and decoding of
+ * these fields can be 8 bit or 16 bit based on command. Decoding
+ * is not easily understood without making a table of commands.
+ * So we will leave this up to the caller to read back the data
+ * in these cases.
+ **/
+int wx_host_interface_command(struct wx *wx, u32 *buffer,
+ u32 length, u32 timeout, bool return_data)
+{
+ if (length == 0 || length > WX_HI_MAX_BLOCK_BYTE_LENGTH) {
+ wx_err(wx, "Buffer length failure buffersize=%d.\n", length);
+ return -EINVAL;
+ }
+
+ /* Calculate length in DWORDs. We must be DWORD aligned */
+ if ((length % (sizeof(u32))) != 0) {
+ wx_err(wx, "Buffer length failure, not aligned to dword");
+ return -EINVAL;
+ }
+
+ if (test_bit(WX_FLAG_SWFW_RING, wx->flags))
+ return wx_host_interface_command_r(wx, buffer, length,
+ timeout, return_data);
+
+ return wx_host_interface_command_s(wx, buffer, length, timeout, return_data);
+}
EXPORT_SYMBOL(wx_host_interface_command);
+int wx_set_pps(struct wx *wx, bool enable, u64 nsec, u64 cycles)
+{
+ struct wx_hic_set_pps pps_cmd;
+
+ pps_cmd.hdr.cmd = FW_PPS_SET_CMD;
+ pps_cmd.hdr.buf_len = FW_PPS_SET_LEN;
+ pps_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+ pps_cmd.lan_id = wx->bus.func;
+ pps_cmd.enable = (u8)enable;
+ pps_cmd.nsec = nsec;
+ pps_cmd.cycles = cycles;
+ pps_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+
+ return wx_host_interface_command(wx, (u32 *)&pps_cmd,
+ sizeof(pps_cmd),
+ WX_HI_COMMAND_TIMEOUT,
+ false);
+}
+
/**
* wx_read_ee_hostif_data - Read EEPROM word using a host interface cmd
* assuming that the semaphore is already obtained.
@@ -425,7 +563,10 @@ static int wx_read_ee_hostif_data(struct wx *wx, u16 offset, u16 *data)
if (status != 0)
return status;
- *data = (u16)rd32a(wx, WX_MNG_MBOX, FW_NVM_DATA_OFFSET);
+ if (!test_bit(WX_FLAG_SWFW_RING, wx->flags))
+ *data = (u16)rd32a(wx, WX_MNG_MBOX, FW_NVM_DATA_OFFSET);
+ else
+ *data = (u16)rd32a(wx, WX_FW2SW_MBOX, FW_NVM_DATA_OFFSET);
return status;
}
@@ -469,6 +610,7 @@ int wx_read_ee_hostif_buffer(struct wx *wx,
u16 words_to_read;
u32 value = 0;
int status;
+ u32 mbox;
u32 i;
/* Take semaphore for the entire operation. */
@@ -501,8 +643,12 @@ int wx_read_ee_hostif_buffer(struct wx *wx,
goto out;
}
+ if (!test_bit(WX_FLAG_SWFW_RING, wx->flags))
+ mbox = WX_MNG_MBOX;
+ else
+ mbox = WX_FW2SW_MBOX;
for (i = 0; i < words_to_read; i++) {
- u32 reg = WX_MNG_MBOX + (FW_NVM_DATA_OFFSET << 2) + 2 * i;
+ u32 reg = mbox + (FW_NVM_DATA_OFFSET << 2) + 2 * i;
value = rd32(wx, reg);
data[current_word] = (u16)(value & 0xffff);
@@ -552,12 +698,18 @@ void wx_init_eeprom_params(struct wx *wx)
}
}
- if (wx->mac.type == wx_mac_sp) {
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
+ case wx_mac_aml40:
if (wx_read_ee_hostif(wx, WX_SW_REGION_PTR, &data)) {
wx_err(wx, "NVM Read Error\n");
return;
}
data = data >> 1;
+ break;
+ default:
+ break;
}
eeprom->sw_region_offset = data;
@@ -618,7 +770,8 @@ static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools,
/* setup VMDq pool mapping */
wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF);
- if (wx->mac.type == wx_mac_sp)
+
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32);
/* HW expects these in little endian so we reverse the byte
@@ -757,7 +910,7 @@ void wx_init_rx_addrs(struct wx *wx)
wx_set_rar(wx, 0, wx->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV);
- if (wx->mac.type == wx_mac_sp) {
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
/* clear VMDq pool/queue selection for RAR 0 */
wx_clear_vmdq(wx, 0, WX_CLEAR_VMDQ_ALL);
}
@@ -804,11 +957,28 @@ static void wx_sync_mac_table(struct wx *wx)
}
}
+static void wx_full_sync_mac_table(struct wx *wx)
+{
+ int i;
+
+ for (i = 0; i < wx->mac.num_rar_entries; i++) {
+ if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) {
+ wx_set_rar(wx, i,
+ wx->mac_table[i].addr,
+ wx->mac_table[i].pools,
+ WX_PSR_MAC_SWC_AD_H_AV);
+ } else {
+ wx_clear_rar(wx, i);
+ }
+ wx->mac_table[i].state &= ~(WX_MAC_STATE_MODIFIED);
+ }
+}
+
/* this function destroys the first RAR entry */
void wx_mac_set_default_filter(struct wx *wx, u8 *addr)
{
memcpy(&wx->mac_table[0].addr, addr, ETH_ALEN);
- wx->mac_table[0].pools = 1ULL;
+ wx->mac_table[0].pools = BIT(VMDQ_P(0));
wx->mac_table[0].state = (WX_MAC_STATE_DEFAULT | WX_MAC_STATE_IN_USE);
wx_set_rar(wx, 0, wx->mac_table[0].addr,
wx->mac_table[0].pools,
@@ -833,7 +1003,7 @@ void wx_flush_sw_mac_table(struct wx *wx)
}
EXPORT_SYMBOL(wx_flush_sw_mac_table);
-static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool)
+int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool)
{
u32 i;
@@ -864,7 +1034,7 @@ static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool)
return -ENOMEM;
}
-static int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool)
+int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool)
{
u32 i;
@@ -943,7 +1113,7 @@ static int wx_write_uc_addr_list(struct net_device *netdev, int pool)
* by the MO field of the MCSTCTRL. The MO field is set during initialization
* to mc_filter_type.
**/
-static u32 wx_mta_vector(struct wx *wx, u8 *mc_addr)
+u32 wx_mta_vector(struct wx *wx, u8 *mc_addr)
{
u32 vector = 0;
@@ -1046,6 +1216,35 @@ static void wx_update_mc_addr_list(struct wx *wx, struct net_device *netdev)
wx_dbg(wx, "Update mc addr list Complete\n");
}
+static void wx_restore_vf_multicasts(struct wx *wx)
+{
+ u32 i, j, vector_bit, vector_reg;
+ struct vf_data_storage *vfinfo;
+
+ for (i = 0; i < wx->num_vfs; i++) {
+ u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(i));
+
+ vfinfo = &wx->vfinfo[i];
+ for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
+ wx->addr_ctrl.mta_in_use++;
+ vector_reg = WX_PSR_MC_TBL_REG(vfinfo->vf_mc_hashes[j]);
+ vector_bit = WX_PSR_MC_TBL_BIT(vfinfo->vf_mc_hashes[j]);
+ wr32m(wx, WX_PSR_MC_TBL(vector_reg),
+ BIT(vector_bit), BIT(vector_bit));
+ /* errata 5: maintain a copy of the reg table conf */
+ wx->mac.mta_shadow[vector_reg] |= BIT(vector_bit);
+ }
+ if (vfinfo->num_vf_mc_hashes)
+ vmolr |= WX_PSR_VM_L2CTL_ROMPE;
+ else
+ vmolr &= ~WX_PSR_VM_L2CTL_ROMPE;
+ wr32(wx, WX_PSR_VM_L2CTL(i), vmolr);
+ }
+
+ /* Restore any VF macvlans */
+ wx_full_sync_mac_table(wx);
+}
+
/**
* wx_write_mc_addr_list - write multicast addresses to MTA
* @netdev: network interface device structure
@@ -1063,6 +1262,9 @@ static int wx_write_mc_addr_list(struct net_device *netdev)
wx_update_mc_addr_list(wx, netdev);
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
+ wx_restore_vf_multicasts(wx);
+
return netdev_mc_count(netdev);
}
@@ -1083,7 +1285,7 @@ int wx_set_mac(struct net_device *netdev, void *p)
if (retval)
return retval;
- wx_del_mac_filter(wx, wx->mac.addr, 0);
+ wx_del_mac_filter(wx, wx->mac.addr, VMDQ_P(0));
eth_hw_addr_set(netdev, addr->sa_data);
memcpy(wx->mac.addr, addr->sa_data, netdev->addr_len);
@@ -1185,6 +1387,10 @@ static int wx_hpbthresh(struct wx *wx)
/* Calculate delay value for device */
dv_id = WX_DV(link, tc);
+ /* Loopback switch introduces additional latency */
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
+ dv_id += WX_B2BT(tc);
+
/* Delay value is calculated in bit times convert to KB */
kb = WX_BT2KB(dv_id);
rx_pba = rd32(wx, WX_RDB_PB_SZ(0)) >> WX_RDB_PB_SZ_SHIFT;
@@ -1240,12 +1446,107 @@ static void wx_pbthresh_setup(struct wx *wx)
wx->fc.low_water = 0;
}
+static void wx_set_ethertype_anti_spoofing(struct wx *wx, bool enable, int vf)
+{
+ u32 pfvfspoof, reg_offset, vf_shift;
+
+ vf_shift = WX_VF_IND_SHIFT(vf);
+ reg_offset = WX_VF_REG_OFFSET(vf);
+
+ pfvfspoof = rd32(wx, WX_TDM_ETYPE_AS(reg_offset));
+ if (enable)
+ pfvfspoof |= BIT(vf_shift);
+ else
+ pfvfspoof &= ~BIT(vf_shift);
+ wr32(wx, WX_TDM_ETYPE_AS(reg_offset), pfvfspoof);
+}
+
+int wx_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
+{
+ u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf);
+ struct wx *wx = netdev_priv(netdev);
+ u32 regval;
+
+ if (vf >= wx->num_vfs)
+ return -EINVAL;
+
+ wx->vfinfo[vf].spoofchk_enabled = setting;
+
+ regval = (setting << vf_bit);
+ wr32m(wx, WX_TDM_MAC_AS(index), regval | BIT(vf_bit), regval);
+
+ if (wx->vfinfo[vf].vlan_count)
+ wr32m(wx, WX_TDM_VLAN_AS(index), regval | BIT(vf_bit), regval);
+
+ return 0;
+}
+
+static void wx_configure_virtualization(struct wx *wx)
+{
+ u16 pool = wx->num_rx_pools;
+ u32 reg_offset, vf_shift;
+ u32 i;
+
+ if (!test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
+ return;
+
+ wr32m(wx, WX_PSR_VM_CTL,
+ WX_PSR_VM_CTL_POOL_MASK | WX_PSR_VM_CTL_REPLEN,
+ FIELD_PREP(WX_PSR_VM_CTL_POOL_MASK, VMDQ_P(0)) |
+ WX_PSR_VM_CTL_REPLEN);
+ while (pool--)
+ wr32m(wx, WX_PSR_VM_L2CTL(pool),
+ WX_PSR_VM_L2CTL_AUPE, WX_PSR_VM_L2CTL_AUPE);
+
+ if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ vf_shift = BIT(VMDQ_P(0));
+ /* Enable only the PF pools for Tx/Rx */
+ wr32(wx, WX_RDM_VF_RE(0), vf_shift);
+ wr32(wx, WX_TDM_VF_TE(0), vf_shift);
+ } else {
+ vf_shift = WX_VF_IND_SHIFT(VMDQ_P(0));
+ reg_offset = WX_VF_REG_OFFSET(VMDQ_P(0));
+
+ /* Enable only the PF pools for Tx/Rx */
+ wr32(wx, WX_RDM_VF_RE(reg_offset), GENMASK(31, vf_shift));
+ wr32(wx, WX_RDM_VF_RE(reg_offset ^ 1), reg_offset - 1);
+ wr32(wx, WX_TDM_VF_TE(reg_offset), GENMASK(31, vf_shift));
+ wr32(wx, WX_TDM_VF_TE(reg_offset ^ 1), reg_offset - 1);
+ }
+
+ /* clear VLAN promisc flag so VFTA will be updated if necessary */
+ clear_bit(WX_FLAG_VLAN_PROMISC, wx->flags);
+
+ for (i = 0; i < wx->num_vfs; i++) {
+ if (!wx->vfinfo[i].spoofchk_enabled)
+ wx_set_vf_spoofchk(wx->netdev, i, false);
+ /* enable ethertype anti spoofing if hw supports it */
+ wx_set_ethertype_anti_spoofing(wx, true, i);
+ }
+}
+
static void wx_configure_port(struct wx *wx)
{
u32 value, i;
- value = WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ;
+ if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ value = (wx->num_vfs == 0) ?
+ WX_CFG_PORT_CTL_NUM_VT_NONE :
+ WX_CFG_PORT_CTL_NUM_VT_8;
+ } else {
+ if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) {
+ if (wx->ring_feature[RING_F_RSS].indices == 4)
+ value = WX_CFG_PORT_CTL_NUM_VT_32;
+ else
+ value = WX_CFG_PORT_CTL_NUM_VT_64;
+ } else {
+ value = 0;
+ }
+ }
+
+ value |= WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ;
wr32m(wx, WX_CFG_PORT_CTL,
+ WX_CFG_PORT_CTL_NUM_VT_MASK |
WX_CFG_PORT_CTL_D_VLAN |
WX_CFG_PORT_CTL_QINQ,
value);
@@ -1306,6 +1607,83 @@ static void wx_vlan_strip_control(struct wx *wx, bool enable)
}
}
+static void wx_vlan_promisc_enable(struct wx *wx)
+{
+ u32 vlnctrl, i, vind, bits, reg_idx;
+
+ vlnctrl = rd32(wx, WX_PSR_VLAN_CTL);
+ if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) {
+ /* we need to keep the VLAN filter on in SRIOV */
+ vlnctrl |= WX_PSR_VLAN_CTL_VFE;
+ wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
+ } else {
+ vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
+ wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
+ return;
+ }
+ /* We are already in VLAN promisc, nothing to do */
+ if (test_bit(WX_FLAG_VLAN_PROMISC, wx->flags))
+ return;
+ /* Set flag so we don't redo unnecessary work */
+ set_bit(WX_FLAG_VLAN_PROMISC, wx->flags);
+ /* Add PF to all active pools */
+ for (i = WX_PSR_VLAN_SWC_ENTRIES; --i;) {
+ wr32(wx, WX_PSR_VLAN_SWC_IDX, i);
+ vind = WX_VF_IND_SHIFT(VMDQ_P(0));
+ reg_idx = WX_VF_REG_OFFSET(VMDQ_P(0));
+ bits = rd32(wx, WX_PSR_VLAN_SWC_VM(reg_idx));
+ bits |= BIT(vind);
+ wr32(wx, WX_PSR_VLAN_SWC_VM(reg_idx), bits);
+ }
+ /* Set all bits in the VLAN filter table array */
+ for (i = 0; i < wx->mac.vft_size; i++)
+ wr32(wx, WX_PSR_VLAN_TBL(i), U32_MAX);
+}
+
+static void wx_scrub_vfta(struct wx *wx)
+{
+ u32 i, vid, bits, vfta, vind, vlvf, reg_idx;
+
+ for (i = WX_PSR_VLAN_SWC_ENTRIES; --i;) {
+ wr32(wx, WX_PSR_VLAN_SWC_IDX, i);
+ vlvf = rd32(wx, WX_PSR_VLAN_SWC_IDX);
+ /* pull VLAN ID from VLVF */
+ vid = vlvf & ~WX_PSR_VLAN_SWC_VIEN;
+ if (vlvf & WX_PSR_VLAN_SWC_VIEN) {
+ /* if PF is part of this then continue */
+ if (test_bit(vid, wx->active_vlans))
+ continue;
+ }
+ /* remove PF from the pool */
+ vind = WX_VF_IND_SHIFT(VMDQ_P(0));
+ reg_idx = WX_VF_REG_OFFSET(VMDQ_P(0));
+ bits = rd32(wx, WX_PSR_VLAN_SWC_VM(reg_idx));
+ bits &= ~BIT(vind);
+ wr32(wx, WX_PSR_VLAN_SWC_VM(reg_idx), bits);
+ }
+ /* extract values from vft_shadow and write back to VFTA */
+ for (i = 0; i < wx->mac.vft_size; i++) {
+ vfta = wx->mac.vft_shadow[i];
+ wr32(wx, WX_PSR_VLAN_TBL(i), vfta);
+ }
+}
+
+static void wx_vlan_promisc_disable(struct wx *wx)
+{
+ u32 vlnctrl;
+
+ /* configure vlan filtering */
+ vlnctrl = rd32(wx, WX_PSR_VLAN_CTL);
+ vlnctrl |= WX_PSR_VLAN_CTL_VFE;
+ wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
+ /* We are not in VLAN promisc, nothing to do */
+ if (!test_bit(WX_FLAG_VLAN_PROMISC, wx->flags))
+ return;
+ /* Set flag so we don't redo unnecessary work */
+ clear_bit(WX_FLAG_VLAN_PROMISC, wx->flags);
+ wx_scrub_vfta(wx);
+}
+
void wx_set_rx_mode(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
@@ -1318,7 +1696,7 @@ void wx_set_rx_mode(struct net_device *netdev)
/* Check for Promiscuous and All Multicast modes */
fctrl = rd32(wx, WX_PSR_CTL);
fctrl &= ~(WX_PSR_CTL_UPE | WX_PSR_CTL_MPE);
- vmolr = rd32(wx, WX_PSR_VM_L2CTL(0));
+ vmolr = rd32(wx, WX_PSR_VM_L2CTL(VMDQ_P(0)));
vmolr &= ~(WX_PSR_VM_L2CTL_UPE |
WX_PSR_VM_L2CTL_MPE |
WX_PSR_VM_L2CTL_ROPE |
@@ -1339,7 +1717,10 @@ void wx_set_rx_mode(struct net_device *netdev)
fctrl |= WX_PSR_CTL_UPE | WX_PSR_CTL_MPE;
/* pf don't want packets routing to vf, so clear UPE */
vmolr |= WX_PSR_VM_L2CTL_MPE;
- vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
+ if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags) &&
+ test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
+ vlnctrl |= WX_PSR_VLAN_CTL_VFE;
+ features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
}
if (netdev->flags & IFF_ALLMULTI) {
@@ -1362,7 +1743,7 @@ void wx_set_rx_mode(struct net_device *netdev)
* sufficient space to store all the addresses then enable
* unicast promiscuous mode
*/
- count = wx_write_uc_addr_list(netdev, 0);
+ count = wx_write_uc_addr_list(netdev, VMDQ_P(0));
if (count < 0) {
vmolr &= ~WX_PSR_VM_L2CTL_ROPE;
vmolr |= WX_PSR_VM_L2CTL_UPE;
@@ -1380,7 +1761,7 @@ void wx_set_rx_mode(struct net_device *netdev)
wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
wr32(wx, WX_PSR_CTL, fctrl);
- wr32(wx, WX_PSR_VM_L2CTL(0), vmolr);
+ wr32(wx, WX_PSR_VM_L2CTL(VMDQ_P(0)), vmolr);
if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
(features & NETIF_F_HW_VLAN_STAG_RX))
@@ -1388,13 +1769,19 @@ void wx_set_rx_mode(struct net_device *netdev)
else
wx_vlan_strip_control(wx, false);
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ wx_vlan_promisc_disable(wx);
+ else
+ wx_vlan_promisc_enable(wx);
}
EXPORT_SYMBOL(wx_set_rx_mode);
static void wx_set_rx_buffer_len(struct wx *wx)
{
struct net_device *netdev = wx->netdev;
+ struct wx_ring *rx_ring;
u32 mhadd, max_frame;
+ int i;
max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
/* adjust max frame to be at least the size of a standard frame */
@@ -1404,6 +1791,19 @@ static void wx_set_rx_buffer_len(struct wx *wx)
mhadd = rd32(wx, WX_PSR_MAX_SZ);
if (max_frame != mhadd)
wr32(wx, WX_PSR_MAX_SZ, max_frame);
+
+ /*
+ * Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring
+ */
+ for (i = 0; i < wx->num_rx_queues; i++) {
+ rx_ring = wx->rx_ring[i];
+ rx_ring->rx_buf_len = WX_RXBUFFER_2K;
+#if (PAGE_SIZE < 8192)
+ if (test_bit(WX_FLAG_RSC_ENABLED, wx->flags))
+ rx_ring->rx_buf_len = WX_RXBUFFER_3K;
+#endif
+ }
}
/**
@@ -1448,7 +1848,7 @@ void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring)
}
EXPORT_SYMBOL(wx_disable_rx_queue);
-static void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring)
+void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring)
{
u8 reg_idx = ring->reg_idx;
u32 rxdctl;
@@ -1464,6 +1864,7 @@ static void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring)
reg_idx);
}
}
+EXPORT_SYMBOL(wx_enable_rx_queue);
static void wx_configure_srrctl(struct wx *wx,
struct wx_ring *rx_ring)
@@ -1479,11 +1880,27 @@ static void wx_configure_srrctl(struct wx *wx,
srrctl |= WX_RXBUFFER_256 << WX_PX_RR_CFG_BHDRSIZE_SHIFT;
/* configure the packet buffer length */
- srrctl |= WX_RX_BUFSZ >> WX_PX_RR_CFG_BSIZEPKT_SHIFT;
+ srrctl |= rx_ring->rx_buf_len >> WX_PX_RR_CFG_BSIZEPKT_SHIFT;
wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
}
+static void wx_configure_rscctl(struct wx *wx,
+ struct wx_ring *ring)
+{
+ u8 reg_idx = ring->reg_idx;
+ u32 rscctrl;
+
+ if (!test_bit(WX_FLAG_RSC_ENABLED, wx->flags))
+ return;
+
+ rscctrl = rd32(wx, WX_PX_RR_CFG(reg_idx));
+ rscctrl |= WX_PX_RR_CFG_RSC;
+ rscctrl |= WX_PX_RR_CFG_MAX_RSCBUF_16;
+
+ wr32(wx, WX_PX_RR_CFG(reg_idx), rscctrl);
+}
+
static void wx_configure_tx_ring(struct wx *wx,
struct wx_ring *ring)
{
@@ -1519,6 +1936,15 @@ static void wx_configure_tx_ring(struct wx *wx,
memset(ring->tx_buffer_info, 0,
sizeof(struct wx_tx_buffer) * ring->count);
+ if (ring->headwb_mem) {
+ wr32(wx, WX_PX_TR_HEAD_ADDRL(reg_idx),
+ ring->headwb_dma & DMA_BIT_MASK(32));
+ wr32(wx, WX_PX_TR_HEAD_ADDRH(reg_idx),
+ upper_32_bits(ring->headwb_dma));
+
+ txdctl |= WX_PX_TR_CFG_HEAD_WB;
+ }
+
/* enable queue */
wr32(wx, WX_PX_TR_CFG(reg_idx), txdctl);
@@ -1533,7 +1959,6 @@ static void wx_configure_rx_ring(struct wx *wx,
struct wx_ring *ring)
{
u16 reg_idx = ring->reg_idx;
- union wx_rx_desc *rx_desc;
u64 rdba = ring->dma;
u32 rxdctl;
@@ -1550,6 +1975,10 @@ static void wx_configure_rx_ring(struct wx *wx,
rxdctl |= (ring->count / 128) << WX_PX_RR_CFG_RR_SIZE_SHIFT;
rxdctl |= 0x1 << WX_PX_RR_CFG_RR_THER_SHIFT;
+
+ if (test_bit(WX_FLAG_RX_MERGE_ENABLED, wx->flags))
+ rxdctl |= WX_PX_RR_CFG_DESC_MERGE;
+
wr32(wx, WX_PX_RR_CFG(reg_idx), rxdctl);
/* reset head and tail pointers */
@@ -1558,14 +1987,15 @@ static void wx_configure_rx_ring(struct wx *wx,
ring->tail = wx->hw_addr + WX_PX_RR_WP(reg_idx);
wx_configure_srrctl(wx, ring);
+ wx_configure_rscctl(wx, ring);
/* initialize rx_buffer_info */
memset(ring->rx_buffer_info, 0,
sizeof(struct wx_rx_buffer) * ring->count);
- /* initialize Rx descriptor 0 */
- rx_desc = WX_RX_DESC(ring, 0);
- rx_desc->wb.upper.length = 0;
+ /* reset ntu and ntc to place SW in sync with hardware */
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
/* enable receive descriptor ring */
wr32m(wx, WX_PX_RR_CFG(reg_idx),
@@ -1613,8 +2043,17 @@ static void wx_restore_vlan(struct wx *wx)
wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), vid);
}
-static void wx_store_reta(struct wx *wx)
+u32 wx_rss_indir_tbl_entries(struct wx *wx)
{
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
+ return 64;
+ else
+ return 128;
+}
+
+void wx_store_reta(struct wx *wx)
+{
+ u32 reta_entries = wx_rss_indir_tbl_entries(wx);
u8 *indir_tbl = wx->rss_indir_tbl;
u32 reta = 0;
u32 i;
@@ -1622,61 +2061,146 @@ static void wx_store_reta(struct wx *wx)
/* Fill out the redirection table as follows:
* - 8 bit wide entries containing 4 bit RSS index
*/
- for (i = 0; i < WX_MAX_RETA_ENTRIES; i++) {
+ for (i = 0; i < reta_entries; i++) {
reta |= indir_tbl[i] << (i & 0x3) * 8;
if ((i & 3) == 3) {
- wr32(wx, WX_RDB_RSSTBL(i >> 2), reta);
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags) &&
+ test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
+ wr32(wx, WX_RDB_VMRSSTBL(i >> 2, wx->num_vfs), reta);
+ else
+ wr32(wx, WX_RDB_RSSTBL(i >> 2), reta);
reta = 0;
}
}
}
-static void wx_setup_reta(struct wx *wx)
+void wx_store_rsskey(struct wx *wx)
{
- u16 rss_i = wx->ring_feature[RING_F_RSS].indices;
- u32 random_key_size = WX_RSS_KEY_SIZE / 4;
- u32 i, j;
+ u32 key_size = WX_RSS_KEY_SIZE / 4;
+ u32 i;
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags) &&
+ test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ for (i = 0; i < key_size; i++)
+ wr32(wx, WX_RDB_VMRSSRK(i, wx->num_vfs),
+ wx->rss_key[i]);
+ } else {
+ for (i = 0; i < key_size; i++)
+ wr32(wx, WX_RDB_RSSRK(i), wx->rss_key[i]);
+ }
+}
+
+static void wx_setup_reta(struct wx *wx)
+{
/* Fill out hash function seeds */
- for (i = 0; i < random_key_size; i++)
- wr32(wx, WX_RDB_RSSRK(i), wx->rss_key[i]);
+ wx_store_rsskey(wx);
/* Fill out redirection table */
- memset(wx->rss_indir_tbl, 0, sizeof(wx->rss_indir_tbl));
+ if (!netif_is_rxfh_configured(wx->netdev)) {
+ u16 rss_i = wx->ring_feature[RING_F_RSS].indices;
+ u32 reta_entries = wx_rss_indir_tbl_entries(wx);
+ u32 i, j;
+
+ memset(wx->rss_indir_tbl, 0, sizeof(wx->rss_indir_tbl));
+
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags)) {
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
+ rss_i = rss_i < 2 ? 2 : rss_i;
+ else
+ rss_i = 1;
+ }
- for (i = 0, j = 0; i < WX_MAX_RETA_ENTRIES; i++, j++) {
- if (j == rss_i)
- j = 0;
+ for (i = 0, j = 0; i < reta_entries; i++, j++) {
+ if (j == rss_i)
+ j = 0;
- wx->rss_indir_tbl[i] = j;
+ wx->rss_indir_tbl[i] = j;
+ }
}
wx_store_reta(wx);
}
-static void wx_setup_mrqc(struct wx *wx)
+void wx_config_rss_field(struct wx *wx)
{
- u32 rss_field = 0;
+ u32 rss_field;
- /* Disable indicating checksum in descriptor, enables RSS hash */
- wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_PCSD, WX_PSR_CTL_PCSD);
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags) &&
+ test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ rss_field = rd32(wx, WX_RDB_PL_CFG(wx->num_vfs));
+ rss_field &= ~WX_RDB_PL_CFG_RSS_MASK;
+ rss_field |= FIELD_PREP(WX_RDB_PL_CFG_RSS_MASK, wx->rss_flags);
+ wr32(wx, WX_RDB_PL_CFG(wx->num_vfs), rss_field);
- /* Perform hash on these packet types */
- rss_field = WX_RDB_RA_CTL_RSS_IPV4 |
- WX_RDB_RA_CTL_RSS_IPV4_TCP |
- WX_RDB_RA_CTL_RSS_IPV4_UDP |
- WX_RDB_RA_CTL_RSS_IPV6 |
- WX_RDB_RA_CTL_RSS_IPV6_TCP |
- WX_RDB_RA_CTL_RSS_IPV6_UDP;
+ /* Enable global RSS and multiple RSS to make the RSS
+ * field of each pool take effect.
+ */
+ wr32m(wx, WX_RDB_RA_CTL,
+ WX_RDB_RA_CTL_MULTI_RSS | WX_RDB_RA_CTL_RSS_EN,
+ WX_RDB_RA_CTL_MULTI_RSS | WX_RDB_RA_CTL_RSS_EN);
+ } else {
+ rss_field = rd32(wx, WX_RDB_RA_CTL);
+ rss_field &= ~WX_RDB_RA_CTL_RSS_MASK;
+ rss_field |= FIELD_PREP(WX_RDB_RA_CTL_RSS_MASK, wx->rss_flags);
+ wr32(wx, WX_RDB_RA_CTL, rss_field);
+ }
+}
- netdev_rss_key_fill(wx->rss_key, sizeof(wx->rss_key));
+void wx_enable_rss(struct wx *wx, bool enable)
+{
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags) &&
+ test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ if (enable)
+ wr32m(wx, WX_RDB_PL_CFG(wx->num_vfs),
+ WX_RDB_PL_CFG_RSS_EN, WX_RDB_PL_CFG_RSS_EN);
+ else
+ wr32m(wx, WX_RDB_PL_CFG(wx->num_vfs),
+ WX_RDB_PL_CFG_RSS_EN, 0);
+ } else {
+ if (enable)
+ wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN,
+ WX_RDB_RA_CTL_RSS_EN);
+ else
+ wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, 0);
+ }
+}
- wx_setup_reta(wx);
+#define WX_RDB_RSS_PL_2 FIELD_PREP(GENMASK(31, 29), 1)
+#define WX_RDB_RSS_PL_4 FIELD_PREP(GENMASK(31, 29), 2)
+static void wx_setup_psrtype(struct wx *wx)
+{
+ int rss_i = wx->ring_feature[RING_F_RSS].indices;
+ u32 psrtype;
+ int pool;
- if (wx->rss_enabled)
- rss_field |= WX_RDB_RA_CTL_RSS_EN;
+ psrtype = WX_RDB_PL_CFG_L4HDR |
+ WX_RDB_PL_CFG_L3HDR |
+ WX_RDB_PL_CFG_L2HDR |
+ WX_RDB_PL_CFG_TUN_OUTL2HDR |
+ WX_RDB_PL_CFG_TUN_TUNHDR;
+
+ if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ for_each_set_bit(pool, &wx->fwd_bitmask, 8)
+ wr32(wx, WX_RDB_PL_CFG(VMDQ_P(pool)), psrtype);
+ } else {
+ if (rss_i > 3)
+ psrtype |= WX_RDB_RSS_PL_4;
+ else if (rss_i > 1)
+ psrtype |= WX_RDB_RSS_PL_2;
+
+ for_each_set_bit(pool, &wx->fwd_bitmask, 32)
+ wr32(wx, WX_RDB_PL_CFG(VMDQ_P(pool)), psrtype);
+ }
+}
+
+static void wx_setup_mrqc(struct wx *wx)
+{
+ /* Disable indicating checksum in descriptor, enables RSS hash */
+ wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_PCSD, WX_PSR_CTL_PCSD);
- wr32(wx, WX_RDB_RA_CTL, rss_field);
+ wx_config_rss_field(wx);
+ wx_enable_rss(wx, wx->rss_enabled);
+ wx_setup_reta(wx);
}
/**
@@ -1687,27 +2211,24 @@ static void wx_setup_mrqc(struct wx *wx)
**/
void wx_configure_rx(struct wx *wx)
{
- u32 psrtype, i;
int ret;
+ u32 i;
wx_disable_rx(wx);
-
- psrtype = WX_RDB_PL_CFG_L4HDR |
- WX_RDB_PL_CFG_L3HDR |
- WX_RDB_PL_CFG_L2HDR |
- WX_RDB_PL_CFG_TUN_TUNHDR;
- wr32(wx, WX_RDB_PL_CFG(0), psrtype);
+ wx_setup_psrtype(wx);
/* enable hw crc stripping */
wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_CRC_STRIP, WX_RSC_CTL_CRC_STRIP);
- if (wx->mac.type == wx_mac_sp) {
+ if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) {
u32 psrctl;
/* RSC Setup */
psrctl = rd32(wx, WX_PSR_CTL);
psrctl |= WX_PSR_CTL_RSC_ACK; /* Disable RSC for ACK packets */
- psrctl |= WX_PSR_CTL_RSC_DIS;
+ psrctl &= ~WX_PSR_CTL_RSC_DIS;
+ if (!test_bit(WX_FLAG_RSC_ENABLED, wx->flags))
+ psrctl |= WX_PSR_CTL_RSC_DIS;
wr32(wx, WX_PSR_CTL, psrctl);
}
@@ -1716,6 +2237,12 @@ void wx_configure_rx(struct wx *wx)
/* set_rx_buffer_len must be called before ring initialization */
wx_set_rx_buffer_len(wx);
+ if (test_bit(WX_FLAG_RX_MERGE_ENABLED, wx->flags)) {
+ wr32(wx, WX_RDM_DCACHE_CTL, WX_RDM_DCACHE_CTL_EN);
+ wr32m(wx, WX_RDM_RSC_CTL,
+ WX_RDM_RSC_CTL_FREE_CTL | WX_RDM_RSC_CTL_FREE_CNT_DIS,
+ WX_RDM_RSC_CTL_FREE_CTL);
+ }
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring
*/
@@ -1744,6 +2271,7 @@ void wx_configure(struct wx *wx)
{
wx_set_rxpba(wx);
wx_pbthresh_setup(wx);
+ wx_configure_virtualization(wx);
wx_configure_port(wx);
wx_set_rx_mode(wx->netdev);
@@ -1838,10 +2366,8 @@ int wx_stop_adapter(struct wx *wx)
}
EXPORT_SYMBOL(wx_stop_adapter);
-void wx_reset_misc(struct wx *wx)
+void wx_reset_mac(struct wx *wx)
{
- int i;
-
/* receive packets that size > 2048 */
wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_JE, WX_MAC_RX_CFG_JE);
@@ -1853,6 +2379,14 @@ void wx_reset_misc(struct wx *wx)
WX_MAC_RX_FLOW_CTRL_RFE, WX_MAC_RX_FLOW_CTRL_RFE);
wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
+}
+EXPORT_SYMBOL(wx_reset_mac);
+
+void wx_reset_misc(struct wx *wx)
+{
+ int i;
+
+ wx_reset_mac(wx);
wr32m(wx, WX_MIS_RST_ST,
WX_MIS_RST_ST_RST_INIT, 0x1E00);
@@ -1946,9 +2480,11 @@ int wx_sw_init(struct wx *wx)
wx->oem_svid = pdev->subsystem_vendor;
wx->oem_ssid = pdev->subsystem_device;
wx->bus.device = PCI_SLOT(pdev->devfn);
- wx->bus.func = PCI_FUNC(pdev->devfn);
+ wx->bus.func = FIELD_GET(WX_CFG_PORT_ST_LANID,
+ rd32(wx, WX_CFG_PORT_ST));
- if (wx->oem_svid == PCI_VENDOR_ID_WANGXUN) {
+ if (wx->oem_svid == PCI_VENDOR_ID_WANGXUN ||
+ pdev->is_virtfn) {
wx->subsystem_vendor_id = pdev->subsystem_vendor;
wx->subsystem_device_id = pdev->subsystem_device;
} else {
@@ -1966,6 +2502,8 @@ int wx_sw_init(struct wx *wx)
wx_err(wx, "rss key allocation failed\n");
return err;
}
+ wx->rss_flags = WX_RSS_FIELD_IPV4 | WX_RSS_FIELD_IPV4_TCP |
+ WX_RSS_FIELD_IPV6 | WX_RSS_FIELD_IPV6_TCP;
wx->mac_table = kcalloc(wx->mac.num_rar_entries,
sizeof(struct wx_mac_addr),
@@ -2102,7 +2640,7 @@ static int wx_set_vlvf(struct wx *wx, u32 vlan, u32 vind, bool vlan_on,
*
* Turn on/off specified VLAN in the VLAN filter table.
**/
-static int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on)
+int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on)
{
u32 bitindex, vfta, targetbit;
bool vfta_changed = false;
@@ -2321,6 +2859,18 @@ void wx_update_stats(struct wx *wx)
wx->hw_csum_rx_error = hw_csum_rx_error;
wx->hw_csum_rx_good = hw_csum_rx_good;
+ if (test_bit(WX_FLAG_RSC_ENABLED, wx->flags)) {
+ u64 rsc_count = 0;
+ u64 rsc_flush = 0;
+
+ for (i = 0; i < wx->num_rx_queues; i++) {
+ rsc_count += wx->rx_ring[i]->rx_stats.rsc_count;
+ rsc_flush += wx->rx_ring[i]->rx_stats.rsc_flush;
+ }
+ wx->rsc_count = rsc_count;
+ wx->rsc_flush = rsc_flush;
+ }
+
for (i = 0; i < wx->num_tx_queues; i++) {
struct wx_ring *tx_ring = wx->tx_ring[i];
@@ -2353,12 +2903,15 @@ void wx_update_stats(struct wx *wx)
hwstats->b2ogprc += rd32(wx, WX_RDM_BMC2OS_CNT);
hwstats->rdmdrop += rd32(wx, WX_RDM_DRP_PKT);
- if (wx->mac.type == wx_mac_sp) {
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) {
hwstats->fdirmatch += rd32(wx, WX_RDB_FDIR_MATCH);
hwstats->fdirmiss += rd32(wx, WX_RDB_FDIR_MISS);
}
- for (i = 0; i < wx->mac.max_rx_queues; i++)
+ /* qmprc is not cleared on read, manual reset it */
+ hwstats->qmprc = 0;
+ for (i = wx->num_vfs * wx->num_rx_queues_per_pool;
+ i < wx->mac.max_rx_queues; i++)
hwstats->qmprc += rd32(wx, WX_PX_MPRC(i));
}
EXPORT_SYMBOL(wx_update_stats);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
index 11fb33349482..13857376bbad 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
@@ -18,6 +18,7 @@ void wx_control_hw(struct wx *wx, bool drv);
int wx_mng_present(struct wx *wx);
int wx_host_interface_command(struct wx *wx, u32 *buffer,
u32 length, u32 timeout, bool return_data);
+int wx_set_pps(struct wx *wx, bool enable, u64 nsec, u64 cycles);
int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data);
int wx_read_ee_hostif_buffer(struct wx *wx,
u16 offset, u16 words, u16 *data);
@@ -25,22 +26,34 @@ void wx_init_eeprom_params(struct wx *wx);
void wx_get_mac_addr(struct wx *wx, u8 *mac_addr);
void wx_init_rx_addrs(struct wx *wx);
void wx_mac_set_default_filter(struct wx *wx, u8 *addr);
+int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool);
+int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool);
void wx_flush_sw_mac_table(struct wx *wx);
+u32 wx_mta_vector(struct wx *wx, u8 *mc_addr);
int wx_set_mac(struct net_device *netdev, void *p);
void wx_disable_rx(struct wx *wx);
+int wx_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
int wx_disable_sec_rx_path(struct wx *wx);
void wx_enable_sec_rx_path(struct wx *wx);
void wx_set_rx_mode(struct net_device *netdev);
int wx_change_mtu(struct net_device *netdev, int new_mtu);
void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring);
+void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring);
+u32 wx_rss_indir_tbl_entries(struct wx *wx);
+void wx_store_reta(struct wx *wx);
+void wx_store_rsskey(struct wx *wx);
+void wx_config_rss_field(struct wx *wx);
+void wx_enable_rss(struct wx *wx, bool enable);
void wx_configure_rx(struct wx *wx);
void wx_configure(struct wx *wx);
void wx_start_hw(struct wx *wx);
int wx_disable_pcie_master(struct wx *wx);
int wx_stop_adapter(struct wx *wx);
+void wx_reset_mac(struct wx *wx);
void wx_reset_misc(struct wx *wx);
int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count);
int wx_sw_init(struct wx *wx);
+int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on);
int wx_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 2b3d6586f44a..32cadafa4b3b 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -5,6 +5,7 @@
#include <net/ip6_checksum.h>
#include <net/page_pool/helpers.h>
#include <net/inet_ecn.h>
+#include <linux/workqueue.h>
#include <linux/iopoll.h>
#include <linux/sctp.h>
#include <linux/pci.h>
@@ -13,7 +14,9 @@
#include "wx_type.h"
#include "wx_lib.h"
+#include "wx_ptp.h"
#include "wx_hw.h"
+#include "wx_vf_lib.h"
/* Lookup table mapping the HW PTYPE to the bit field for decoding */
static struct wx_dec_ptype wx_ptype_lookup[256] = {
@@ -172,10 +175,6 @@ static void wx_dma_sync_frag(struct wx_ring *rx_ring,
skb_frag_off(frag),
skb_frag_size(frag),
DMA_FROM_DEVICE);
-
- /* If the page was released, just unmap it. */
- if (unlikely(WX_CB(skb)->page_released))
- page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
}
static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring,
@@ -225,10 +224,6 @@ static void wx_put_rx_buffer(struct wx_ring *rx_ring,
struct sk_buff *skb,
int rx_buffer_pgcnt)
{
- if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma)
- /* the page has been released from the ring */
- WX_CB(skb)->page_released = true;
-
/* clear contents of rx_buffer */
rx_buffer->page = NULL;
rx_buffer->skb = NULL;
@@ -240,7 +235,7 @@ static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring,
{
unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
#if (PAGE_SIZE < 8192)
- unsigned int truesize = WX_RX_BUFSZ;
+ unsigned int truesize = wx_rx_pg_size(rx_ring) / 2;
#else
unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
#endif
@@ -309,10 +304,11 @@ static bool wx_alloc_mapped_page(struct wx_ring *rx_ring,
return true;
page = page_pool_dev_alloc_pages(rx_ring->page_pool);
- WARN_ON(!page);
+ if (unlikely(!page))
+ return false;
dma = page_pool_get_dma_addr(page);
- bi->page_dma = dma;
+ bi->dma = dma;
bi->page = page;
bi->page_offset = 0;
@@ -345,11 +341,11 @@ void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count)
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
bi->page_offset,
- WX_RX_BUFSZ,
+ rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
rx_desc->read.pkt_addr =
- cpu_to_le64(bi->page_dma + bi->page_offset);
+ cpu_to_le64(bi->dma + bi->page_offset);
rx_desc++;
bi++;
@@ -362,6 +358,8 @@ void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count)
/* clear the status bits for the next_to_use descriptor */
rx_desc->wb.upper.status_error = 0;
+ /* clear the length for the next_to_use descriptor */
+ rx_desc->wb.upper.length = 0;
cleaned_count--;
} while (cleaned_count);
@@ -406,6 +404,7 @@ static bool wx_is_non_eop(struct wx_ring *rx_ring,
union wx_rx_desc *rx_desc,
struct sk_buff *skb)
{
+ struct wx *wx = rx_ring->q_vector->wx;
u32 ntc = rx_ring->next_to_clean + 1;
/* fetch, update, and store next to clean */
@@ -414,6 +413,24 @@ static bool wx_is_non_eop(struct wx_ring *rx_ring,
prefetch(WX_RX_DESC(rx_ring, ntc));
+ /* update RSC append count if present */
+ if (test_bit(WX_FLAG_RSC_ENABLED, wx->flags)) {
+ __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
+ cpu_to_le32(WX_RXD_RSCCNT_MASK);
+
+ if (unlikely(rsc_enabled)) {
+ u32 rsc_cnt = le32_to_cpu(rsc_enabled);
+
+ rsc_cnt >>= WX_RXD_RSCCNT_SHIFT;
+ WX_CB(skb)->append_cnt += rsc_cnt - 1;
+
+ /* update ntc based on RSC value */
+ ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
+ ntc &= WX_RXD_NEXTP_MASK;
+ ntc >>= WX_RXD_NEXTP_SHIFT;
+ }
+ }
+
/* if we are the last buffer then there is nothing else to do */
if (likely(wx_test_staterr(rx_desc, WX_RXD_STAT_EOP)))
return false;
@@ -545,7 +562,8 @@ static void wx_rx_checksum(struct wx_ring *ring,
return;
/* Hardware can't guarantee csum if IPv6 Dest Header found */
- if (dptype.prot != WX_DEC_PTYPE_PROT_SCTP && WX_RXD_IPV6EX(rx_desc))
+ if (dptype.prot != WX_DEC_PTYPE_PROT_SCTP &&
+ wx_test_staterr(rx_desc, WX_RXD_STAT_IPV6EX))
return;
/* if L4 checksum error */
@@ -583,6 +601,33 @@ static void wx_rx_vlan(struct wx_ring *ring, union wx_rx_desc *rx_desc,
}
}
+static void wx_set_rsc_gso_size(struct wx_ring *ring,
+ struct sk_buff *skb)
+{
+ u16 hdr_len = skb_headlen(skb);
+
+ /* set gso_size to avoid messing up TCP MSS */
+ skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
+ WX_CB(skb)->append_cnt);
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+}
+
+static void wx_update_rsc_stats(struct wx_ring *rx_ring,
+ struct sk_buff *skb)
+{
+ /* if append_cnt is 0 then frame is not RSC */
+ if (!WX_CB(skb)->append_cnt)
+ return;
+
+ rx_ring->rx_stats.rsc_count += WX_CB(skb)->append_cnt;
+ rx_ring->rx_stats.rsc_flush++;
+
+ wx_set_rsc_gso_size(rx_ring, skb);
+
+ /* gso_size is computed using append_cnt so always clear it last */
+ WX_CB(skb)->append_cnt = 0;
+}
+
/**
* wx_process_skb_fields - Populate skb header fields from Rx descriptor
* @rx_ring: rx descriptor ring packet is being transacted on
@@ -597,8 +642,20 @@ static void wx_process_skb_fields(struct wx_ring *rx_ring,
union wx_rx_desc *rx_desc,
struct sk_buff *skb)
{
+ struct wx *wx = netdev_priv(rx_ring->netdev);
+
+ if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags))
+ wx_update_rsc_stats(rx_ring, skb);
+
wx_rx_hash(rx_ring, rx_desc, skb);
wx_rx_checksum(rx_ring, rx_desc, skb);
+
+ if (unlikely(test_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, wx->flags)) &&
+ unlikely(wx_test_staterr(rx_desc, WX_RXD_STAT_TS))) {
+ wx_ptp_rx_hwtstamp(rx_ring->q_vector->wx, skb);
+ rx_ring->last_rx_timestamp = jiffies;
+ }
+
wx_rx_vlan(rx_ring, rx_desc, skb);
skb_record_rx_queue(skb, rx_ring->queue_index);
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
@@ -705,6 +762,7 @@ static bool wx_clean_tx_irq(struct wx_q_vector *q_vector,
{
unsigned int budget = q_vector->wx->tx_work_limit;
unsigned int total_bytes = 0, total_packets = 0;
+ struct wx *wx = netdev_priv(tx_ring->netdev);
unsigned int i = tx_ring->next_to_clean;
struct wx_tx_buffer *tx_buffer;
union wx_tx_desc *tx_desc;
@@ -726,9 +784,22 @@ static bool wx_clean_tx_irq(struct wx_q_vector *q_vector,
/* prevent any other reads prior to eop_desc */
smp_rmb();
- /* if DD is not set pending work has not been completed */
- if (!(eop_desc->wb.status & cpu_to_le32(WX_TXD_STAT_DD)))
+ if (tx_ring->headwb_mem) {
+ u32 head = *tx_ring->headwb_mem;
+
+ if (head == tx_ring->next_to_clean)
+ break;
+ else if (head > tx_ring->next_to_clean &&
+ !(tx_buffer->next_eop >= tx_ring->next_to_clean &&
+ tx_buffer->next_eop < head))
+ break;
+ else if (!(tx_buffer->next_eop >= tx_ring->next_to_clean ||
+ tx_buffer->next_eop < head))
+ break;
+ } else if (!(eop_desc->wb.status & cpu_to_le32(WX_TXD_STAT_DD))) {
+ /* if DD is not set pending work has not been completed */
break;
+ }
/* clear next_to_watch to prevent false hangs */
tx_buffer->next_to_watch = NULL;
@@ -737,6 +808,11 @@ static bool wx_clean_tx_irq(struct wx_q_vector *q_vector,
total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs;
+ /* schedule check for Tx timestamp */
+ if (unlikely(test_bit(WX_STATE_PTP_TX_IN_PROGRESS, wx->state)) &&
+ skb_shinfo(tx_buffer->skb)->tx_flags & SKBTX_IN_PROGRESS)
+ ptp_schedule_worker(wx->ptp_clock, 0);
+
/* free the skb */
napi_consume_skb(tx_buffer->skb, napi_budget);
@@ -819,6 +895,36 @@ static bool wx_clean_tx_irq(struct wx_q_vector *q_vector,
return !!budget;
}
+static void wx_update_rx_dim_sample(struct wx_q_vector *q_vector)
+{
+ struct dim_sample sample = {};
+
+ dim_update_sample(q_vector->total_events,
+ q_vector->rx.total_packets,
+ q_vector->rx.total_bytes,
+ &sample);
+
+ net_dim(&q_vector->rx.dim, &sample);
+}
+
+static void wx_update_tx_dim_sample(struct wx_q_vector *q_vector)
+{
+ struct dim_sample sample = {};
+
+ dim_update_sample(q_vector->total_events,
+ q_vector->tx.total_packets,
+ q_vector->tx.total_bytes,
+ &sample);
+
+ net_dim(&q_vector->tx.dim, &sample);
+}
+
+static void wx_update_dim_sample(struct wx_q_vector *q_vector)
+{
+ wx_update_rx_dim_sample(q_vector);
+ wx_update_tx_dim_sample(q_vector);
+}
+
/**
* wx_poll - NAPI polling RX/TX cleanup routine
* @napi: napi struct with our devices info in it
@@ -865,6 +971,8 @@ static int wx_poll(struct napi_struct *napi, int budget)
/* all work done, exit the polling mode */
if (likely(napi_complete_done(napi, work_done))) {
+ if (wx->adaptive_itr)
+ wx_update_dim_sample(q_vector);
if (netif_running(wx->netdev))
wx_intr_enable(wx, WX_INTR_Q(q_vector->v_idx));
}
@@ -932,9 +1040,9 @@ static void wx_tx_olinfo_status(union wx_tx_desc *tx_desc,
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}
-static void wx_tx_map(struct wx_ring *tx_ring,
- struct wx_tx_buffer *first,
- const u8 hdr_len)
+static int wx_tx_map(struct wx_ring *tx_ring,
+ struct wx_tx_buffer *first,
+ const u8 hdr_len)
{
struct sk_buff *skb = first->skb;
struct wx_tx_buffer *tx_buffer;
@@ -1013,6 +1121,8 @@ static void wx_tx_map(struct wx_ring *tx_ring,
netdev_tx_sent_queue(wx_txring_txq(tx_ring), first->bytecount);
+ /* set the timestamp */
+ first->time_stamp = jiffies;
skb_tx_timestamp(skb);
/* Force memory writes to complete before letting h/w know there
@@ -1027,6 +1137,10 @@ static void wx_tx_map(struct wx_ring *tx_ring,
/* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
+ /* set next_eop for amlite tx head wb */
+ if (tx_ring->headwb_mem)
+ first->next_eop = i;
+
i++;
if (i == tx_ring->count)
i = 0;
@@ -1038,7 +1152,7 @@ static void wx_tx_map(struct wx_ring *tx_ring,
if (netif_xmit_stopped(wx_txring_txq(tx_ring)) || !netdev_xmit_more())
writel(i, tx_ring->tail);
- return;
+ return 0;
dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n");
@@ -1062,6 +1176,8 @@ dma_error:
first->skb = NULL;
tx_ring->next_to_use = i;
+
+ return -ENOMEM;
}
static void wx_tx_ctxtdesc(struct wx_ring *tx_ring, u32 vlan_macip_lens,
@@ -1082,26 +1198,6 @@ static void wx_tx_ctxtdesc(struct wx_ring *tx_ring, u32 vlan_macip_lens,
context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
}
-static void wx_get_ipv6_proto(struct sk_buff *skb, int offset, u8 *nexthdr)
-{
- struct ipv6hdr *hdr = (struct ipv6hdr *)(skb->data + offset);
-
- *nexthdr = hdr->nexthdr;
- offset += sizeof(struct ipv6hdr);
- while (ipv6_ext_hdr(*nexthdr)) {
- struct ipv6_opt_hdr _hdr, *hp;
-
- if (*nexthdr == NEXTHDR_NONE)
- return;
- hp = skb_header_pointer(skb, offset, sizeof(_hdr), &_hdr);
- if (!hp)
- return;
- if (*nexthdr == NEXTHDR_FRAGMENT)
- break;
- *nexthdr = hp->nexthdr;
- }
-}
-
union network_header {
struct iphdr *ipv4;
struct ipv6hdr *ipv6;
@@ -1112,6 +1208,8 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first)
{
u8 tun_prot = 0, l4_prot = 0, ptype = 0;
struct sk_buff *skb = first->skb;
+ unsigned char *exthdr, *l4_hdr;
+ __be16 frag_off;
if (skb->encapsulation) {
union network_header hdr;
@@ -1122,14 +1220,18 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first)
ptype = WX_PTYPE_TUN_IPV4;
break;
case htons(ETH_P_IPV6):
- wx_get_ipv6_proto(skb, skb_network_offset(skb), &tun_prot);
+ l4_hdr = skb_transport_header(skb);
+ exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr);
+ tun_prot = ipv6_hdr(skb)->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data, &tun_prot, &frag_off);
ptype = WX_PTYPE_TUN_IPV6;
break;
default:
return ptype;
}
- if (tun_prot == IPPROTO_IPIP) {
+ if (tun_prot == IPPROTO_IPIP || tun_prot == IPPROTO_IPV6) {
hdr.raw = (void *)inner_ip_hdr(skb);
ptype |= WX_PTYPE_PKT_IPIP;
} else if (tun_prot == IPPROTO_UDP) {
@@ -1166,7 +1268,11 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first)
l4_prot = hdr.ipv4->protocol;
break;
case 6:
- wx_get_ipv6_proto(skb, skb_inner_network_offset(skb), &l4_prot);
+ l4_hdr = skb_inner_transport_header(skb);
+ exthdr = skb_inner_network_header(skb) + sizeof(struct ipv6hdr);
+ l4_prot = inner_ipv6_hdr(skb)->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_prot, &frag_off);
ptype |= WX_PTYPE_PKT_IPV6;
break;
default:
@@ -1179,7 +1285,11 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first)
ptype = WX_PTYPE_PKT_IP;
break;
case htons(ETH_P_IPV6):
- wx_get_ipv6_proto(skb, skb_network_offset(skb), &l4_prot);
+ l4_hdr = skb_transport_header(skb);
+ exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr);
+ l4_prot = ipv6_hdr(skb)->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_prot, &frag_off);
ptype = WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6;
break;
default:
@@ -1269,13 +1379,20 @@ static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
if (enc) {
+ unsigned char *exthdr, *l4_hdr;
+ __be16 frag_off;
+
switch (first->protocol) {
case htons(ETH_P_IP):
tun_prot = ip_hdr(skb)->protocol;
first->tx_flags |= WX_TX_FLAGS_OUTER_IPV4;
break;
case htons(ETH_P_IPV6):
+ l4_hdr = skb_transport_header(skb);
+ exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr);
tun_prot = ipv6_hdr(skb)->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data, &tun_prot, &frag_off);
break;
default:
break;
@@ -1298,6 +1415,7 @@ static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
WX_TXD_TUNNEL_LEN_SHIFT);
break;
case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) -
(char *)ip_hdr(skb)) >> 2) <<
WX_TXD_OUTER_IPLEN_SHIFT;
@@ -1335,12 +1453,15 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
u8 tun_prot = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
+csum_failed:
if (!(first->tx_flags & WX_TX_FLAGS_HW_VLAN) &&
!(first->tx_flags & WX_TX_FLAGS_CC))
return;
vlan_macip_lens = skb_network_offset(skb) <<
WX_TXD_MACLEN_SHIFT;
} else {
+ unsigned char *exthdr, *l4_hdr;
+ __be16 frag_off;
u8 l4_prot = 0;
union {
struct iphdr *ipv4;
@@ -1362,7 +1483,12 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
tun_prot = ip_hdr(skb)->protocol;
break;
case htons(ETH_P_IPV6):
+ l4_hdr = skb_transport_header(skb);
+ exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr);
tun_prot = ipv6_hdr(skb)->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &tun_prot, &frag_off);
break;
default:
return;
@@ -1386,6 +1512,7 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
WX_TXD_TUNNEL_LEN_SHIFT);
break;
case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) -
(char *)ip_hdr(skb)) >> 2) <<
WX_TXD_OUTER_IPLEN_SHIFT;
@@ -1408,7 +1535,10 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
break;
case 6:
vlan_macip_lens |= (transport_hdr.raw - network_hdr.raw) >> 1;
+ exthdr = network_hdr.raw + sizeof(struct ipv6hdr);
l4_prot = network_hdr.ipv6->nexthdr;
+ if (transport_hdr.raw != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_prot, &frag_off);
break;
default:
break;
@@ -1428,7 +1558,8 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
WX_TXD_L4LEN_SHIFT;
break;
default:
- break;
+ skb_checksum_help(skb);
+ goto csum_failed;
}
/* update TX checksum flag */
@@ -1486,6 +1617,20 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb,
tx_flags |= WX_TX_FLAGS_HW_VLAN;
}
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ wx->ptp_clock) {
+ if (wx->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
+ !test_and_set_bit_lock(WX_STATE_PTP_TX_IN_PROGRESS,
+ wx->state)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ tx_flags |= WX_TX_FLAGS_TSTAMP;
+ wx->ptp_tx_skb = skb_get(skb);
+ wx->ptp_tx_start = jiffies;
+ } else {
+ wx->tx_hwtstamp_skipped++;
+ }
+ }
+
/* record initial flags and protocol */
first->tx_flags = tx_flags;
first->protocol = vlan_get_protocol(skb);
@@ -1501,12 +1646,20 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb,
if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags) && tx_ring->atr_sample_rate)
wx->atr(tx_ring, first, ptype);
- wx_tx_map(tx_ring, first, hdr_len);
+ if (wx_tx_map(tx_ring, first, hdr_len))
+ goto cleanup_tx_tstamp;
return NETDEV_TX_OK;
out_drop:
dev_kfree_skb_any(first->skb);
first->skb = NULL;
+cleanup_tx_tstamp:
+ if (unlikely(tx_flags & WX_TX_FLAGS_TSTAMP)) {
+ dev_kfree_skb_any(wx->ptp_tx_skb);
+ wx->ptp_tx_skb = NULL;
+ wx->tx_hwtstamp_errors++;
+ clear_bit_unlock(WX_STATE_PTP_TX_IN_PROGRESS, wx->state);
+ }
return NETDEV_TX_OK;
}
@@ -1537,6 +1690,65 @@ netdev_tx_t wx_xmit_frame(struct sk_buff *skb,
}
EXPORT_SYMBOL(wx_xmit_frame);
+static void wx_set_itr(struct wx_q_vector *q_vector)
+{
+ struct wx *wx = q_vector->wx;
+ u32 new_itr;
+
+ if (!wx->adaptive_itr)
+ return;
+
+ /* use the smallest value of new ITR delay calculations */
+ new_itr = min(q_vector->rx.itr, q_vector->tx.itr);
+ new_itr <<= 2;
+
+ if (new_itr != q_vector->itr) {
+ /* save the algorithm value here */
+ q_vector->itr = new_itr;
+
+ if (wx->pdev->is_virtfn)
+ wx_write_eitr_vf(q_vector);
+ else
+ wx_write_eitr(q_vector);
+ }
+}
+
+static void wx_rx_dim_work(struct work_struct *work)
+{
+ struct dim *dim = container_of(work, struct dim, work);
+ struct dim_cq_moder rx_moder;
+ struct wx_ring_container *rx;
+ struct wx_q_vector *q_vector;
+
+ rx = container_of(dim, struct wx_ring_container, dim);
+
+ rx_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ rx->itr = rx_moder.usec;
+
+ q_vector = container_of(rx, struct wx_q_vector, rx);
+ wx_set_itr(q_vector);
+
+ dim->state = DIM_START_MEASURE;
+}
+
+static void wx_tx_dim_work(struct work_struct *work)
+{
+ struct dim *dim = container_of(work, struct dim, work);
+ struct dim_cq_moder tx_moder;
+ struct wx_ring_container *tx;
+ struct wx_q_vector *q_vector;
+
+ tx = container_of(dim, struct wx_ring_container, dim);
+
+ tx_moder = net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
+ tx->itr = tx_moder.usec;
+
+ q_vector = container_of(tx, struct wx_q_vector, tx);
+ wx_set_itr(q_vector);
+
+ dim->state = DIM_START_MEASURE;
+}
+
void wx_napi_enable_all(struct wx *wx)
{
struct wx_q_vector *q_vector;
@@ -1544,6 +1756,11 @@ void wx_napi_enable_all(struct wx *wx)
for (q_idx = 0; q_idx < wx->num_q_vectors; q_idx++) {
q_vector = wx->q_vector[q_idx];
+
+ INIT_WORK(&q_vector->rx.dim.work, wx_rx_dim_work);
+ INIT_WORK(&q_vector->tx.dim.work, wx_tx_dim_work);
+ q_vector->rx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
+ q_vector->tx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
napi_enable(&q_vector->napi);
}
}
@@ -1557,10 +1774,71 @@ void wx_napi_disable_all(struct wx *wx)
for (q_idx = 0; q_idx < wx->num_q_vectors; q_idx++) {
q_vector = wx->q_vector[q_idx];
napi_disable(&q_vector->napi);
+ disable_work_sync(&q_vector->rx.dim.work);
+ disable_work_sync(&q_vector->tx.dim.work);
}
}
EXPORT_SYMBOL(wx_napi_disable_all);
+static bool wx_set_vmdq_queues(struct wx *wx)
+{
+ u16 vmdq_i = wx->ring_feature[RING_F_VMDQ].limit;
+ u16 rss_i = wx->ring_feature[RING_F_RSS].limit;
+ u16 rss_m = WX_RSS_DISABLED_MASK;
+ u16 vmdq_m = 0;
+
+ /* only proceed if VMDq is enabled */
+ if (!test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags))
+ return false;
+ /* Add starting offset to total pool count */
+ vmdq_i += wx->ring_feature[RING_F_VMDQ].offset;
+
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ /* double check we are limited to maximum pools */
+ vmdq_i = min_t(u16, 64, vmdq_i);
+
+ /* 64 pool mode with 2 queues per pool, or
+ * 16/32/64 pool mode with 1 queue per pool
+ */
+ if (vmdq_i > 32 || rss_i < 4) {
+ vmdq_m = WX_VMDQ_2Q_MASK;
+ rss_m = WX_RSS_2Q_MASK;
+ rss_i = min_t(u16, rss_i, 2);
+ /* 32 pool mode with 4 queues per pool */
+ } else {
+ vmdq_m = WX_VMDQ_4Q_MASK;
+ rss_m = WX_RSS_4Q_MASK;
+ rss_i = 4;
+ }
+ } else {
+ /* double check we are limited to maximum pools */
+ vmdq_i = min_t(u16, 8, vmdq_i);
+
+ /* when VMDQ on, disable RSS */
+ rss_i = 1;
+ }
+
+ /* remove the starting offset from the pool count */
+ vmdq_i -= wx->ring_feature[RING_F_VMDQ].offset;
+
+ /* save features for later use */
+ wx->ring_feature[RING_F_VMDQ].indices = vmdq_i;
+ wx->ring_feature[RING_F_VMDQ].mask = vmdq_m;
+
+ /* limit RSS based on user input and save for later use */
+ wx->ring_feature[RING_F_RSS].indices = rss_i;
+ wx->ring_feature[RING_F_RSS].mask = rss_m;
+
+ wx->queues_per_pool = rss_i;/*maybe same to num_rx_queues_per_pool*/
+ wx->num_rx_pools = vmdq_i;
+ wx->num_rx_queues_per_pool = rss_i;
+
+ wx->num_rx_queues = vmdq_i * rss_i;
+ wx->num_tx_queues = vmdq_i * rss_i;
+
+ return true;
+}
+
/**
* wx_set_rss_queues: Allocate queues for RSS
* @wx: board private structure to initialize
@@ -1575,6 +1853,10 @@ static void wx_set_rss_queues(struct wx *wx)
/* set mask for 16 queue limit of RSS */
f = &wx->ring_feature[RING_F_RSS];
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
+ f->mask = WX_RSS_64Q_MASK;
+ else
+ f->mask = WX_RSS_8Q_MASK;
f->indices = f->limit;
if (!(test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)))
@@ -1582,6 +1864,7 @@ static void wx_set_rss_queues(struct wx *wx)
clear_bit(WX_FLAG_FDIR_HASH, wx->flags);
+ wx->ring_feature[RING_F_FDIR].indices = 1;
/* Use Flow Director in addition to RSS to ensure the best
* distribution of flows across cores, even when an FDIR flow
* isn't matched.
@@ -1607,6 +1890,9 @@ static void wx_set_num_queues(struct wx *wx)
wx->num_tx_queues = 1;
wx->queues_per_pool = 1;
+ if (wx_set_vmdq_queues(wx))
+ return;
+
wx_set_rss_queues(wx);
}
@@ -1620,7 +1906,7 @@ static void wx_set_num_queues(struct wx *wx)
*/
static int wx_acquire_msix_vectors(struct wx *wx)
{
- struct irq_affinity affd = { .pre_vectors = 1 };
+ struct irq_affinity affd = { .post_vectors = 1 };
int nvecs, i;
/* We start by asking for one vector per queue pair */
@@ -1657,16 +1943,24 @@ static int wx_acquire_msix_vectors(struct wx *wx)
return nvecs;
}
- wx->msix_entry->entry = 0;
- wx->msix_entry->vector = pci_irq_vector(wx->pdev, 0);
nvecs -= 1;
for (i = 0; i < nvecs; i++) {
wx->msix_q_entries[i].entry = i;
- wx->msix_q_entries[i].vector = pci_irq_vector(wx->pdev, i + 1);
+ wx->msix_q_entries[i].vector = pci_irq_vector(wx->pdev, i);
}
wx->num_q_vectors = nvecs;
+ wx->msix_entry->entry = nvecs;
+ wx->msix_entry->vector = pci_irq_vector(wx->pdev, nvecs);
+
+ if (test_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags)) {
+ wx->msix_entry->entry = 0;
+ wx->msix_entry->vector = pci_irq_vector(wx->pdev, 0);
+ wx->msix_q_entries[0].entry = 0;
+ wx->msix_q_entries[0].vector = pci_irq_vector(wx->pdev, 1);
+ }
+
return 0;
}
@@ -1684,9 +1978,13 @@ static int wx_set_interrupt_capability(struct wx *wx)
/* We will try to get MSI-X interrupts first */
ret = wx_acquire_msix_vectors(wx);
- if (ret == 0 || (ret == -ENOMEM))
+ if (ret == 0 || (ret == -ENOMEM) || pdev->is_virtfn)
return ret;
+ /* Disable VMDq support */
+ dev_warn(&wx->pdev->dev, "Disabling VMQQ support\n");
+ clear_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
+
/* Disable RSS */
dev_warn(&wx->pdev->dev, "Disabling RSS support\n");
wx->ring_feature[RING_F_RSS].limit = 1;
@@ -1713,6 +2011,49 @@ static int wx_set_interrupt_capability(struct wx *wx)
return 0;
}
+static bool wx_cache_ring_vmdq(struct wx *wx)
+{
+ struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
+ struct wx_ring_feature *rss = &wx->ring_feature[RING_F_RSS];
+ u16 reg_idx;
+ int i;
+
+ /* only proceed if VMDq is enabled */
+ if (!test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags))
+ return false;
+
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ /* start at VMDq register offset for SR-IOV enabled setups */
+ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
+ for (i = 0; i < wx->num_rx_queues; i++, reg_idx++) {
+ /* If we are greater than indices move to next pool */
+ if ((reg_idx & ~vmdq->mask) >= rss->indices)
+ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ wx->rx_ring[i]->reg_idx = reg_idx;
+ }
+ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
+ for (i = 0; i < wx->num_tx_queues; i++, reg_idx++) {
+ /* If we are greater than indices move to next pool */
+ if ((reg_idx & rss->mask) >= rss->indices)
+ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ wx->tx_ring[i]->reg_idx = reg_idx;
+ }
+ } else {
+ /* start at VMDq register offset for SR-IOV enabled setups */
+ reg_idx = vmdq->offset;
+ for (i = 0; i < wx->num_rx_queues; i++)
+ /* If we are greater than indices move to next pool */
+ wx->rx_ring[i]->reg_idx = reg_idx + i;
+
+ reg_idx = vmdq->offset;
+ for (i = 0; i < wx->num_tx_queues; i++)
+ /* If we are greater than indices move to next pool */
+ wx->tx_ring[i]->reg_idx = reg_idx + i;
+ }
+
+ return true;
+}
+
/**
* wx_cache_ring_rss - Descriptor ring to register mapping for RSS
* @wx: board private structure to initialize
@@ -1724,6 +2065,9 @@ static void wx_cache_ring_rss(struct wx *wx)
{
u16 i;
+ if (wx_cache_ring_vmdq(wx))
+ return;
+
for (i = 0; i < wx->num_rx_queues; i++)
wx->rx_ring[i]->reg_idx = i;
@@ -1781,10 +2125,17 @@ static int wx_alloc_q_vector(struct wx *wx,
/* initialize pointer to rings */
ring = q_vector->ring;
- if (wx->mac.type == wx_mac_sp)
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
+ case wx_mac_aml40:
default_itr = WX_12K_ITR;
- else
+ break;
+ default:
default_itr = WX_7K_ITR;
+ break;
+ }
+
/* initialize ITR */
if (txr_count && !rxr_count)
/* tx only vector */
@@ -1978,7 +2329,12 @@ int wx_init_interrupt_scheme(struct wx *wx)
int ret;
/* Number of supported queues */
- wx_set_num_queues(wx);
+ if (wx->pdev->is_virtfn) {
+ if (wx->set_num_queues)
+ wx->set_num_queues(wx);
+ } else {
+ wx_set_num_queues(wx);
+ }
/* Set interrupt mode */
ret = wx_set_interrupt_capability(wx);
@@ -2006,8 +2362,10 @@ irqreturn_t wx_msix_clean_rings(int __always_unused irq, void *data)
struct wx_q_vector *q_vector = data;
/* EIAM disabled interrupts (on this vector) for us */
- if (q_vector->rx.ring || q_vector->tx.ring)
+ if (q_vector->rx.ring || q_vector->tx.ring) {
napi_schedule_irqoff(&q_vector->napi);
+ q_vector->total_events++;
+ }
return IRQ_HANDLED;
}
@@ -2108,6 +2466,8 @@ static void wx_set_ivar(struct wx *wx, s8 direction,
if (direction == -1) {
/* other causes */
+ if (test_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags))
+ msix_vector = 0;
msix_vector |= WX_PX_IVAR_ALLOC_VAL;
index = 0;
ivar = rd32(wx, WX_PX_MISC_IVAR);
@@ -2116,7 +2476,6 @@ static void wx_set_ivar(struct wx *wx, s8 direction,
wr32(wx, WX_PX_MISC_IVAR, ivar);
} else {
/* tx or rx causes */
- msix_vector += 1; /* offset for queue vectors */
msix_vector |= WX_PX_IVAR_ALLOC_VAL;
index = ((16 * (queue & 1)) + (8 * direction));
ivar = rd32(wx, WX_PX_IVAR(queue >> 1));
@@ -2140,14 +2499,22 @@ void wx_write_eitr(struct wx_q_vector *q_vector)
int v_idx = q_vector->v_idx;
u32 itr_reg;
- if (wx->mac.type == wx_mac_sp)
+ switch (wx->mac.type) {
+ case wx_mac_sp:
itr_reg = q_vector->itr & WX_SP_MAX_EITR;
- else
+ break;
+ case wx_mac_aml:
+ case wx_mac_aml40:
+ itr_reg = (q_vector->itr >> 3) & WX_AML_MAX_EITR;
+ break;
+ default:
itr_reg = q_vector->itr & WX_EM_MAX_EITR;
+ break;
+ }
itr_reg |= WX_PX_ITR_CNT_WDIS;
- wr32(wx, WX_PX_ITR(v_idx + 1), itr_reg);
+ wr32(wx, WX_PX_ITR(v_idx), itr_reg);
}
/**
@@ -2161,10 +2528,17 @@ void wx_configure_vectors(struct wx *wx)
{
struct pci_dev *pdev = wx->pdev;
u32 eitrsel = 0;
- u16 v_idx;
+ u16 v_idx, i;
if (pdev->msix_enabled) {
/* Populate MSIX to EITR Select */
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ if (wx->num_vfs >= 32)
+ eitrsel = BIT(wx->num_vfs % 32) - 1;
+ } else {
+ for (i = 0; i < wx->num_vfs; i++)
+ eitrsel |= BIT(i);
+ }
wr32(wx, WX_PX_ITRSEL, eitrsel);
/* use EIAM to auto-mask when MSI-X interrupt is asserted
* this saves a register write for every interrupt
@@ -2193,9 +2567,9 @@ void wx_configure_vectors(struct wx *wx)
wx_write_eitr(q_vector);
}
- wx_set_ivar(wx, -1, 0, 0);
+ wx_set_ivar(wx, -1, 0, v_idx);
if (pdev->msix_enabled)
- wr32(wx, WX_PX_ITR(0), 1950);
+ wr32(wx, WX_PX_ITR(v_idx), 1950);
}
EXPORT_SYMBOL(wx_configure_vectors);
@@ -2215,9 +2589,6 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring)
if (rx_buffer->skb) {
struct sk_buff *skb = rx_buffer->skb;
- if (WX_CB(skb)->page_released)
- page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
-
dev_kfree_skb(skb);
}
@@ -2227,7 +2598,7 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring)
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
- WX_RX_BUFSZ,
+ rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
/* free resources associated with mapping */
@@ -2241,6 +2612,9 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring)
}
}
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -2375,6 +2749,16 @@ void wx_clean_all_tx_rings(struct wx *wx)
}
EXPORT_SYMBOL(wx_clean_all_tx_rings);
+static void wx_free_headwb_resources(struct wx_ring *tx_ring)
+{
+ if (!tx_ring->headwb_mem)
+ return;
+
+ dma_free_coherent(tx_ring->dev, sizeof(u32),
+ tx_ring->headwb_mem, tx_ring->headwb_dma);
+ tx_ring->headwb_mem = NULL;
+}
+
/**
* wx_free_tx_resources - Free Tx Resources per Queue
* @tx_ring: Tx descriptor ring for a specific queue
@@ -2394,6 +2778,8 @@ static void wx_free_tx_resources(struct wx_ring *tx_ring)
dma_free_coherent(tx_ring->dev, tx_ring->size,
tx_ring->desc, tx_ring->dma);
tx_ring->desc = NULL;
+
+ wx_free_headwb_resources(tx_ring);
}
/**
@@ -2423,13 +2809,14 @@ static int wx_alloc_page_pool(struct wx_ring *rx_ring)
struct page_pool_params pp_params = {
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
- .order = 0,
- .pool_size = rx_ring->size,
+ .order = wx_rx_pg_order(rx_ring),
+ .pool_size = rx_ring->count * rx_ring->rx_buf_len /
+ wx_rx_pg_size(rx_ring),
.nid = dev_to_node(rx_ring->dev),
.dev = rx_ring->dev,
.dma_dir = DMA_FROM_DEVICE,
.offset = 0,
- .max_len = PAGE_SIZE,
+ .max_len = wx_rx_pg_size(rx_ring),
};
rx_ring->page_pool = page_pool_create(&pp_params);
@@ -2532,6 +2919,24 @@ err_setup_rx:
return err;
}
+static void wx_setup_headwb_resources(struct wx_ring *tx_ring)
+{
+ struct wx *wx = netdev_priv(tx_ring->netdev);
+
+ if (!test_bit(WX_FLAG_TXHEAD_WB_ENABLED, wx->flags))
+ return;
+
+ if (!tx_ring->q_vector)
+ return;
+
+ tx_ring->headwb_mem = dma_alloc_coherent(tx_ring->dev,
+ sizeof(u32),
+ &tx_ring->headwb_dma,
+ GFP_KERNEL);
+ if (!tx_ring->headwb_mem)
+ dev_info(tx_ring->dev, "Allocate headwb memory failed, disable it\n");
+}
+
/**
* wx_setup_tx_resources - allocate Tx resources (Descriptors)
* @tx_ring: tx descriptor ring (for a specific queue) to setup
@@ -2572,6 +2977,8 @@ static int wx_setup_tx_resources(struct wx_ring *tx_ring)
if (!tx_ring->desc)
goto err;
+ wx_setup_headwb_resources(tx_ring);
+
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
@@ -2708,24 +3115,35 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
struct wx *wx = netdev_priv(netdev);
bool need_reset = false;
- if (features & NETIF_F_RXHASH) {
- wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN,
- WX_RDB_RA_CTL_RSS_EN);
- wx->rss_enabled = true;
- } else {
- wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, 0);
- wx->rss_enabled = false;
- }
+ wx->rss_enabled = !!(features & NETIF_F_RXHASH);
+ wx_enable_rss(wx, wx->rss_enabled);
netdev->features = features;
- if (wx->mac.type == wx_mac_sp && changed & NETIF_F_HW_VLAN_CTAG_RX)
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX && wx->do_reset)
wx->do_reset(netdev);
else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER))
wx_set_rx_mode(netdev);
+ if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) {
+ if (!(features & NETIF_F_LRO)) {
+ if (test_bit(WX_FLAG_RSC_ENABLED, wx->flags))
+ need_reset = true;
+ clear_bit(WX_FLAG_RSC_ENABLED, wx->flags);
+ } else if (!(test_bit(WX_FLAG_RSC_ENABLED, wx->flags))) {
+ if (wx->rx_itr_setting == 1 ||
+ wx->rx_itr_setting > WX_MIN_RSC_ITR) {
+ set_bit(WX_FLAG_RSC_ENABLED, wx->flags);
+ need_reset = true;
+ } else if (changed & NETIF_F_LRO) {
+ dev_info(&wx->pdev->dev,
+ "rx-usecs set too low, disable RSC\n");
+ }
+ }
+ }
+
if (!(test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)))
- return 0;
+ goto out;
/* Check if Flow Director n-tuple support was enabled or disabled. If
* the state changed, we need to reset.
@@ -2751,7 +3169,8 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
break;
}
- if (need_reset)
+out:
+ if (need_reset && wx->do_reset)
wx->do_reset(netdev);
return 0;
@@ -2800,10 +3219,45 @@ netdev_features_t wx_fix_features(struct net_device *netdev,
}
}
+ /* If Rx checksum is disabled, then RSC/LRO should also be disabled */
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_LRO;
+
+ /* Turn off LRO if not RSC capable */
+ if (!test_bit(WX_FLAG_RSC_CAPABLE, wx->flags))
+ features &= ~NETIF_F_LRO;
+
return features;
}
EXPORT_SYMBOL(wx_fix_features);
+#define WX_MAX_TUNNEL_HDR_LEN 80
+netdev_features_t wx_features_check(struct sk_buff *skb,
+ struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ if (!skb->encapsulation)
+ return features;
+
+ if (wx->mac.type == wx_mac_em)
+ return features & ~NETIF_F_CSUM_MASK;
+
+ if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) >
+ WX_MAX_TUNNEL_HDR_LEN))
+ return features & ~NETIF_F_CSUM_MASK;
+
+ if (skb->inner_protocol_type == ENCAP_TYPE_ETHER &&
+ skb->inner_protocol != htons(ETH_P_IP) &&
+ skb->inner_protocol != htons(ETH_P_IPV6) &&
+ skb->inner_protocol != htons(ETH_P_TEB))
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+
+ return features;
+}
+EXPORT_SYMBOL(wx_features_check);
+
void wx_set_ring(struct wx *wx, u32 new_tx_count,
u32 new_rx_count, struct wx_ring *temp_ring)
{
@@ -2870,5 +3324,35 @@ void wx_set_ring(struct wx *wx, u32 new_tx_count,
}
EXPORT_SYMBOL(wx_set_ring);
+void wx_service_event_schedule(struct wx *wx)
+{
+ if (!test_and_set_bit(WX_STATE_SERVICE_SCHED, wx->state))
+ queue_work(system_power_efficient_wq, &wx->service_task);
+}
+EXPORT_SYMBOL(wx_service_event_schedule);
+
+void wx_service_event_complete(struct wx *wx)
+{
+ if (WARN_ON(!test_bit(WX_STATE_SERVICE_SCHED, wx->state)))
+ return;
+
+ /* flush memory to make sure state is correct before next watchdog */
+ smp_mb__before_atomic();
+ clear_bit(WX_STATE_SERVICE_SCHED, wx->state);
+}
+EXPORT_SYMBOL(wx_service_event_complete);
+
+void wx_service_timer(struct timer_list *t)
+{
+ struct wx *wx = timer_container_of(wx, t, service_timer);
+ unsigned long next_event_offset = HZ * 2;
+
+ /* Reset the timer */
+ mod_timer(&wx->service_timer, next_event_offset + jiffies);
+
+ wx_service_event_schedule(wx);
+}
+EXPORT_SYMBOL(wx_service_timer);
+
MODULE_DESCRIPTION("Common library for Wangxun(R) Ethernet drivers.");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
index fdeb0c315b75..aed6ea8cf0d6 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
@@ -33,7 +33,13 @@ void wx_get_stats64(struct net_device *netdev,
int wx_set_features(struct net_device *netdev, netdev_features_t features);
netdev_features_t wx_fix_features(struct net_device *netdev,
netdev_features_t features);
+netdev_features_t wx_features_check(struct sk_buff *skb,
+ struct net_device *netdev,
+ netdev_features_t features);
void wx_set_ring(struct wx *wx, u32 new_tx_count,
u32 new_rx_count, struct wx_ring *temp_ring);
+void wx_service_event_schedule(struct wx *wx);
+void wx_service_event_complete(struct wx *wx);
+void wx_service_timer(struct timer_list *t);
-#endif /* _NGBE_LIB_H_ */
+#endif /* _WX_LIB_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_mbx.c b/drivers/net/ethernet/wangxun/libwx/wx_mbx.c
new file mode 100644
index 000000000000..2aa03eadf064
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_mbx.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/pci.h>
+#include "wx_type.h"
+#include "wx_mbx.h"
+
+/**
+ * wx_obtain_mbx_lock_pf - obtain mailbox lock
+ * @wx: pointer to the HW structure
+ * @vf: the VF index
+ *
+ * Return: return 0 on success and -EBUSY on failure
+ **/
+static int wx_obtain_mbx_lock_pf(struct wx *wx, u16 vf)
+{
+ int count = 5;
+ u32 mailbox;
+
+ while (count--) {
+ /* Take ownership of the buffer */
+ wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_PFU);
+
+ /* reserve mailbox for vf use */
+ mailbox = rd32(wx, WX_PXMAILBOX(vf));
+ if (mailbox & WX_PXMAILBOX_PFU)
+ return 0;
+ else if (count)
+ udelay(10);
+ }
+ wx_err(wx, "Failed to obtain mailbox lock for PF%d", vf);
+
+ return -EBUSY;
+}
+
+static int wx_check_for_bit_pf(struct wx *wx, u32 mask, int index)
+{
+ u32 mbvficr = rd32(wx, WX_MBVFICR(index));
+
+ if (!(mbvficr & mask))
+ return -EBUSY;
+ wr32(wx, WX_MBVFICR(index), mask);
+
+ return 0;
+}
+
+/**
+ * wx_check_for_ack_pf - checks to see if the VF has acked
+ * @wx: pointer to the HW structure
+ * @vf: the VF index
+ *
+ * Return: return 0 if the VF has set the status bit or else -EBUSY
+ **/
+int wx_check_for_ack_pf(struct wx *wx, u16 vf)
+{
+ u32 index = vf / 16, vf_bit = vf % 16;
+
+ return wx_check_for_bit_pf(wx,
+ FIELD_PREP(WX_MBVFICR_VFACK_MASK,
+ BIT(vf_bit)),
+ index);
+}
+
+/**
+ * wx_check_for_msg_pf - checks to see if the VF has sent mail
+ * @wx: pointer to the HW structure
+ * @vf: the VF index
+ *
+ * Return: return 0 if the VF has got req bit or else -EBUSY
+ **/
+int wx_check_for_msg_pf(struct wx *wx, u16 vf)
+{
+ u32 index = vf / 16, vf_bit = vf % 16;
+
+ return wx_check_for_bit_pf(wx,
+ FIELD_PREP(WX_MBVFICR_VFREQ_MASK,
+ BIT(vf_bit)),
+ index);
+}
+
+/**
+ * wx_write_mbx_pf - Places a message in the mailbox
+ * @wx: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf: the VF index
+ *
+ * Return: return 0 on success and -EINVAL/-EBUSY on failure
+ **/
+int wx_write_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf)
+{
+ struct wx_mbx_info *mbx = &wx->mbx;
+ int ret, i;
+
+ /* mbx->size is up to 15 */
+ if (size > mbx->size) {
+ wx_err(wx, "Invalid mailbox message size %d", size);
+ return -EINVAL;
+ }
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret = wx_obtain_mbx_lock_pf(wx, vf);
+ if (ret)
+ return ret;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ wx_check_for_msg_pf(wx, vf);
+ wx_check_for_ack_pf(wx, vf);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ wr32a(wx, WX_PXMBMEM(vf), i, msg[i]);
+
+ /* Interrupt VF to tell it a message has been sent and release buffer */
+ /* set mirrored mailbox flags */
+ wr32a(wx, WX_PXMBMEM(vf), WX_VXMAILBOX_SIZE, WX_PXMAILBOX_STS);
+ wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_STS);
+
+ return 0;
+}
+
+/**
+ * wx_read_mbx_pf - Read a message from the mailbox
+ * @wx: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf: the VF index
+ *
+ * Return: return 0 on success and -EBUSY on failure
+ **/
+int wx_read_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf)
+{
+ struct wx_mbx_info *mbx = &wx->mbx;
+ int ret;
+ u16 i;
+
+ /* limit read to size of mailbox and mbx->size is up to 15 */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret = wx_obtain_mbx_lock_pf(wx, vf);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < size; i++)
+ msg[i] = rd32a(wx, WX_PXMBMEM(vf), i);
+
+ /* Acknowledge the message and release buffer */
+ /* set mirrored mailbox flags */
+ wr32a(wx, WX_PXMBMEM(vf), WX_VXMAILBOX_SIZE, WX_PXMAILBOX_ACK);
+ wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_ACK);
+
+ return 0;
+}
+
+/**
+ * wx_check_for_rst_pf - checks to see if the VF has reset
+ * @wx: pointer to the HW structure
+ * @vf: the VF index
+ *
+ * Return: return 0 on success and -EBUSY on failure
+ **/
+int wx_check_for_rst_pf(struct wx *wx, u16 vf)
+{
+ u32 reg_offset = WX_VF_REG_OFFSET(vf);
+ u32 vf_shift = WX_VF_IND_SHIFT(vf);
+ u32 vflre = 0;
+
+ vflre = rd32(wx, WX_VFLRE(reg_offset));
+ if (!(vflre & BIT(vf_shift)))
+ return -EBUSY;
+ wr32(wx, WX_VFLREC(reg_offset), BIT(vf_shift));
+
+ return 0;
+}
+
+static u32 wx_read_v2p_mailbox(struct wx *wx)
+{
+ u32 mailbox = rd32(wx, WX_VXMAILBOX);
+
+ mailbox |= wx->mbx.mailbox;
+ wx->mbx.mailbox |= mailbox & WX_VXMAILBOX_R2C_BITS;
+
+ return mailbox;
+}
+
+static u32 wx_mailbox_get_lock_vf(struct wx *wx)
+{
+ wr32(wx, WX_VXMAILBOX, WX_VXMAILBOX_VFU);
+ return wx_read_v2p_mailbox(wx);
+}
+
+/**
+ * wx_obtain_mbx_lock_vf - obtain mailbox lock
+ * @wx: pointer to the HW structure
+ *
+ * Return: return 0 on success and -EBUSY on failure
+ **/
+static int wx_obtain_mbx_lock_vf(struct wx *wx)
+{
+ int count = 5, ret;
+ u32 mailbox;
+
+ ret = readx_poll_timeout_atomic(wx_mailbox_get_lock_vf, wx, mailbox,
+ (mailbox & WX_VXMAILBOX_VFU),
+ 1, count);
+ if (ret)
+ wx_err(wx, "Failed to obtain mailbox lock for VF.\n");
+
+ return ret;
+}
+
+static int wx_check_for_bit_vf(struct wx *wx, u32 mask)
+{
+ u32 mailbox = wx_read_v2p_mailbox(wx);
+
+ wx->mbx.mailbox &= ~mask;
+
+ return (mailbox & mask ? 0 : -EBUSY);
+}
+
+/**
+ * wx_check_for_ack_vf - checks to see if the PF has ACK'd
+ * @wx: pointer to the HW structure
+ *
+ * Return: return 0 if the PF has set the status bit or else -EBUSY
+ **/
+static int wx_check_for_ack_vf(struct wx *wx)
+{
+ /* read clear the pf ack bit */
+ return wx_check_for_bit_vf(wx, WX_VXMAILBOX_PFACK);
+}
+
+/**
+ * wx_check_for_msg_vf - checks to see if the PF has sent mail
+ * @wx: pointer to the HW structure
+ *
+ * Return: return 0 if the PF has got req bit or else -EBUSY
+ **/
+int wx_check_for_msg_vf(struct wx *wx)
+{
+ /* read clear the pf sts bit */
+ return wx_check_for_bit_vf(wx, WX_VXMAILBOX_PFSTS);
+}
+
+/**
+ * wx_check_for_rst_vf - checks to see if the PF has reset
+ * @wx: pointer to the HW structure
+ *
+ * Return: return 0 if the PF has set the reset done and -EBUSY on failure
+ **/
+int wx_check_for_rst_vf(struct wx *wx)
+{
+ /* read clear the pf reset done bit */
+ return wx_check_for_bit_vf(wx,
+ WX_VXMAILBOX_RSTD |
+ WX_VXMAILBOX_RSTI);
+}
+
+/**
+ * wx_poll_for_msg - Wait for message notification
+ * @wx: pointer to the HW structure
+ *
+ * Return: return 0 if the VF has successfully received a message notification
+ **/
+static int wx_poll_for_msg(struct wx *wx)
+{
+ struct wx_mbx_info *mbx = &wx->mbx;
+ u32 val;
+
+ return readx_poll_timeout_atomic(wx_check_for_msg_vf, wx, val,
+ (val == 0), mbx->udelay, mbx->timeout);
+}
+
+/**
+ * wx_poll_for_ack - Wait for message acknowledgment
+ * @wx: pointer to the HW structure
+ *
+ * Return: return 0 if the VF has successfully received a message ack
+ **/
+static int wx_poll_for_ack(struct wx *wx)
+{
+ struct wx_mbx_info *mbx = &wx->mbx;
+ u32 val;
+
+ return readx_poll_timeout_atomic(wx_check_for_ack_vf, wx, val,
+ (val == 0), mbx->udelay, mbx->timeout);
+}
+
+/**
+ * wx_read_posted_mbx - Wait for message notification and receive message
+ * @wx: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * Return: returns 0 if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+int wx_read_posted_mbx(struct wx *wx, u32 *msg, u16 size)
+{
+ int ret;
+
+ ret = wx_poll_for_msg(wx);
+ /* if ack received read message, otherwise we timed out */
+ if (ret)
+ return ret;
+
+ return wx_read_mbx_vf(wx, msg, size);
+}
+
+/**
+ * wx_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @wx: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * Return: returns 0 if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+int wx_write_posted_mbx(struct wx *wx, u32 *msg, u16 size)
+{
+ int ret;
+
+ /* send msg */
+ ret = wx_write_mbx_vf(wx, msg, size);
+ /* if msg sent wait until we receive an ack */
+ if (ret)
+ return ret;
+
+ return wx_poll_for_ack(wx);
+}
+
+/**
+ * wx_write_mbx_vf - Write a message to the mailbox
+ * @wx: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * Return: returns 0 if it successfully copied message into the buffer
+ **/
+int wx_write_mbx_vf(struct wx *wx, u32 *msg, u16 size)
+{
+ struct wx_mbx_info *mbx = &wx->mbx;
+ int ret, i;
+
+ /* mbx->size is up to 15 */
+ if (size > mbx->size) {
+ wx_err(wx, "Invalid mailbox message size %d", size);
+ return -EINVAL;
+ }
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret = wx_obtain_mbx_lock_vf(wx);
+ if (ret)
+ return ret;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ wx_check_for_msg_vf(wx);
+ wx_check_for_ack_vf(wx);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ wr32a(wx, WX_VXMBMEM, i, msg[i]);
+
+ /* Drop VFU and interrupt the PF to tell it a message has been sent */
+ wr32(wx, WX_VXMAILBOX, WX_VXMAILBOX_REQ);
+
+ return 0;
+}
+
+/**
+ * wx_read_mbx_vf - Reads a message from the inbox intended for vf
+ * @wx: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * Return: returns 0 if it successfully copied message into the buffer
+ **/
+int wx_read_mbx_vf(struct wx *wx, u32 *msg, u16 size)
+{
+ struct wx_mbx_info *mbx = &wx->mbx;
+ int ret, i;
+
+ /* limit read to size of mailbox and mbx->size is up to 15 */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret = wx_obtain_mbx_lock_vf(wx);
+ if (ret)
+ return ret;
+
+ /* copy the message from the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = rd32a(wx, WX_VXMBMEM, i);
+
+ /* Acknowledge receipt and release mailbox, then we're done */
+ wr32(wx, WX_VXMAILBOX, WX_VXMAILBOX_ACK);
+
+ return 0;
+}
+
+int wx_init_mbx_params_vf(struct wx *wx)
+{
+ wx->vfinfo = kzalloc(sizeof(struct vf_data_storage),
+ GFP_KERNEL);
+ if (!wx->vfinfo)
+ return -ENOMEM;
+
+ /* Initialize mailbox parameters */
+ wx->mbx.size = WX_VXMAILBOX_SIZE;
+ wx->mbx.mailbox = WX_VXMAILBOX;
+ wx->mbx.udelay = 10;
+ wx->mbx.timeout = 1000;
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_init_mbx_params_vf);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_mbx.h b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
new file mode 100644
index 000000000000..82df9218490a
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+#ifndef _WX_MBX_H_
+#define _WX_MBX_H_
+
+#define WX_VXMAILBOX_SIZE 15
+
+/* PF Registers */
+#define WX_PXMAILBOX(i) (0x600 + (4 * (i))) /* i=[0,63] */
+#define WX_PXMAILBOX_STS BIT(0) /* Initiate message send to VF */
+#define WX_PXMAILBOX_ACK BIT(1) /* Ack message recv'd from VF */
+#define WX_PXMAILBOX_PFU BIT(3) /* PF owns the mailbox buffer */
+
+/* VF Registers */
+#define WX_VXMAILBOX 0x600
+#define WX_VXMAILBOX_REQ BIT(0) /* Request for PF Ready bit */
+#define WX_VXMAILBOX_ACK BIT(1) /* Ack PF message received */
+#define WX_VXMAILBOX_VFU BIT(2) /* VF owns the mailbox buffer */
+#define WX_VXMAILBOX_PFU BIT(3) /* PF owns the mailbox buffer */
+#define WX_VXMAILBOX_PFSTS BIT(4) /* PF wrote a message in the MB */
+#define WX_VXMAILBOX_PFACK BIT(5) /* PF ack the previous VF msg */
+#define WX_VXMAILBOX_RSTI BIT(6) /* PF has reset indication */
+#define WX_VXMAILBOX_RSTD BIT(7) /* PF has indicated reset done */
+#define WX_VXMAILBOX_R2C_BITS (WX_VXMAILBOX_RSTD | \
+ WX_VXMAILBOX_PFSTS | WX_VXMAILBOX_PFACK)
+
+#define WX_VXMBMEM 0x00C00 /* 16*4B */
+#define WX_PXMBMEM(i) (0x5000 + (64 * (i))) /* i=[0,63] */
+
+#define WX_VFLRE(i) (0x4A0 + (4 * (i))) /* i=[0,1] */
+#define WX_VFLREC(i) (0x4A8 + (4 * (i))) /* i=[0,1] */
+
+/* SR-IOV specific macros */
+#define WX_MBVFICR(i) (0x480 + (4 * (i))) /* i=[0,3] */
+#define WX_MBVFICR_VFREQ_MASK GENMASK(15, 0)
+#define WX_MBVFICR_VFACK_MASK GENMASK(31, 16)
+
+#define WX_VT_MSGTYPE_ACK BIT(31)
+#define WX_VT_MSGTYPE_NACK BIT(30)
+#define WX_VT_MSGTYPE_CTS BIT(29)
+#define WX_VT_MSGINFO_SHIFT 16
+#define WX_VT_MSGINFO_MASK GENMASK(23, 16)
+
+enum wx_pfvf_api_rev {
+ wx_mbox_api_null,
+ wx_mbox_api_13 = 4, /* API version 1.3 */
+ wx_mbox_api_unknown, /* indicates that API version is not known */
+};
+
+/* mailbox API */
+#define WX_VF_RESET 0x01 /* VF requests reset */
+#define WX_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define WX_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define WX_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+#define WX_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+#define WX_VF_SET_MACVLAN 0x06 /* VF requests PF unicast filter */
+#define WX_VF_API_NEGOTIATE 0x08 /* negotiate API version */
+#define WX_VF_GET_QUEUES 0x09 /* get queue configuration */
+#define WX_VF_GET_RETA 0x0a /* VF request for RETA */
+#define WX_VF_GET_RSS_KEY 0x0b /* get RSS key */
+#define WX_VF_UPDATE_XCAST_MODE 0x0c
+#define WX_VF_GET_LINK_STATE 0x10 /* get vf link state */
+#define WX_VF_GET_FW_VERSION 0x11 /* get fw version */
+
+#define WX_VF_BACKUP 0x8001 /* VF requests backup */
+
+#define WX_PF_CONTROL_MSG BIT(8) /* PF control message */
+#define WX_PF_NOFITY_VF_LINK_STATUS 0x1
+#define WX_PF_NOFITY_VF_NET_NOT_RUNNING BIT(31)
+
+#define WX_VF_TX_QUEUES 1 /* number of Tx queues supported */
+#define WX_VF_RX_QUEUES 2 /* number of Rx queues supported */
+#define WX_VF_TRANS_VLAN 3 /* Indication of port vlan */
+#define WX_VF_DEF_QUEUE 4 /* Default queue offset */
+
+#define WX_VF_PERMADDR_MSG_LEN 4
+
+enum wxvf_xcast_modes {
+ WXVF_XCAST_MODE_NONE = 0,
+ WXVF_XCAST_MODE_MULTI,
+ WXVF_XCAST_MODE_ALLMULTI,
+ WXVF_XCAST_MODE_PROMISC,
+};
+
+int wx_write_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf);
+int wx_read_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf);
+int wx_check_for_rst_pf(struct wx *wx, u16 mbx_id);
+int wx_check_for_msg_pf(struct wx *wx, u16 mbx_id);
+int wx_check_for_ack_pf(struct wx *wx, u16 mbx_id);
+
+int wx_read_posted_mbx(struct wx *wx, u32 *msg, u16 size);
+int wx_write_posted_mbx(struct wx *wx, u32 *msg, u16 size);
+int wx_check_for_rst_vf(struct wx *wx);
+int wx_check_for_msg_vf(struct wx *wx);
+int wx_read_mbx_vf(struct wx *wx, u32 *msg, u16 size);
+int wx_write_mbx_vf(struct wx *wx, u32 *msg, u16 size);
+int wx_init_mbx_params_vf(struct wx *wx);
+
+#endif /* _WX_MBX_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ptp.c b/drivers/net/ethernet/wangxun/libwx/wx_ptp.c
new file mode 100644
index 000000000000..44f3e6505246
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ptp.c
@@ -0,0 +1,905 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+/* Copyright (c) 1999 - 2025 Intel Corporation. */
+
+#include <linux/ptp_classify.h>
+#include <linux/clocksource.h>
+#include <linux/pci.h>
+
+#include "wx_type.h"
+#include "wx_ptp.h"
+#include "wx_hw.h"
+
+#define WX_INCVAL_10GB 0xCCCCCC
+#define WX_INCVAL_1GB 0x800000
+#define WX_INCVAL_100 0xA00000
+#define WX_INCVAL_10 0xC7F380
+#define WX_INCVAL_EM 0x2000000
+#define WX_INCVAL_AML 0xA00000
+
+#define WX_INCVAL_SHIFT_10GB 20
+#define WX_INCVAL_SHIFT_1GB 18
+#define WX_INCVAL_SHIFT_100 15
+#define WX_INCVAL_SHIFT_10 12
+#define WX_INCVAL_SHIFT_EM 22
+#define WX_INCVAL_SHIFT_AML 21
+
+#define WX_OVERFLOW_PERIOD (HZ * 30)
+#define WX_PTP_TX_TIMEOUT (HZ)
+
+#define WX_1588_PPS_WIDTH_EM 120
+
+#define WX_NS_PER_SEC 1000000000ULL
+
+static u64 wx_ptp_timecounter_cyc2time(struct wx *wx, u64 timestamp)
+{
+ unsigned int seq;
+ u64 ns;
+
+ do {
+ seq = read_seqbegin(&wx->hw_tc_lock);
+ ns = timecounter_cyc2time(&wx->hw_tc, timestamp);
+ } while (read_seqretry(&wx->hw_tc_lock, seq));
+
+ return ns;
+}
+
+static u64 wx_ptp_readtime(struct wx *wx, struct ptp_system_timestamp *sts)
+{
+ u32 timeh1, timeh2, timel;
+
+ timeh1 = rd32ptp(wx, WX_TSC_1588_SYSTIMH);
+ ptp_read_system_prets(sts);
+ timel = rd32ptp(wx, WX_TSC_1588_SYSTIML);
+ ptp_read_system_postts(sts);
+ timeh2 = rd32ptp(wx, WX_TSC_1588_SYSTIMH);
+
+ if (timeh1 != timeh2) {
+ ptp_read_system_prets(sts);
+ timel = rd32ptp(wx, WX_TSC_1588_SYSTIML);
+ ptp_read_system_prets(sts);
+ }
+ return (u64)timel | (u64)timeh2 << 32;
+}
+
+static int wx_ptp_adjfine(struct ptp_clock_info *ptp, long ppb)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+ u64 incval, mask;
+
+ smp_mb(); /* Force any pending update before accessing. */
+ incval = READ_ONCE(wx->base_incval);
+ incval = adjust_by_scaled_ppm(incval, ppb);
+
+ mask = (wx->mac.type == wx_mac_em) ? 0x7FFFFFF : 0xFFFFFF;
+ incval &= mask;
+ if (wx->mac.type != wx_mac_em)
+ incval |= 2 << 24;
+
+ wr32ptp(wx, WX_TSC_1588_INC, incval);
+
+ return 0;
+}
+
+static int wx_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+ unsigned long flags;
+
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ timecounter_adjtime(&wx->hw_tc, delta);
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+
+ if (wx->ptp_setup_sdp)
+ wx->ptp_setup_sdp(wx);
+
+ return 0;
+}
+
+static int wx_ptp_gettimex64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+ u64 ns, stamp;
+
+ stamp = wx_ptp_readtime(wx, sts);
+ ns = wx_ptp_timecounter_cyc2time(wx, stamp);
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int wx_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+ unsigned long flags;
+ u64 ns;
+
+ ns = timespec64_to_ns(ts);
+ /* reset the timecounter */
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ timecounter_init(&wx->hw_tc, &wx->hw_cc, ns);
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+
+ if (wx->ptp_setup_sdp)
+ wx->ptp_setup_sdp(wx);
+
+ return 0;
+}
+
+/**
+ * wx_ptp_clear_tx_timestamp - utility function to clear Tx timestamp state
+ * @wx: the private board structure
+ *
+ * This function should be called whenever the state related to a Tx timestamp
+ * needs to be cleared. This helps ensure that all related bits are reset for
+ * the next Tx timestamp event.
+ */
+static void wx_ptp_clear_tx_timestamp(struct wx *wx)
+{
+ rd32ptp(wx, WX_TSC_1588_STMPH);
+ if (wx->ptp_tx_skb) {
+ dev_kfree_skb_any(wx->ptp_tx_skb);
+ wx->ptp_tx_skb = NULL;
+ }
+ clear_bit_unlock(WX_STATE_PTP_TX_IN_PROGRESS, wx->state);
+}
+
+/**
+ * wx_ptp_convert_to_hwtstamp - convert register value to hw timestamp
+ * @wx: private board structure
+ * @hwtstamp: stack timestamp structure
+ * @timestamp: unsigned 64bit system time value
+ *
+ * We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value
+ * which can be used by the stack's ptp functions.
+ *
+ * The lock is used to protect consistency of the cyclecounter and the SYSTIME
+ * registers. However, it does not need to protect against the Rx or Tx
+ * timestamp registers, as there can't be a new timestamp until the old one is
+ * unlatched by reading.
+ *
+ * In addition to the timestamp in hardware, some controllers need a software
+ * overflow cyclecounter, and this function takes this into account as well.
+ **/
+static void wx_ptp_convert_to_hwtstamp(struct wx *wx,
+ struct skb_shared_hwtstamps *hwtstamp,
+ u64 timestamp)
+{
+ u64 ns;
+
+ ns = wx_ptp_timecounter_cyc2time(wx, timestamp);
+ hwtstamp->hwtstamp = ns_to_ktime(ns);
+}
+
+/**
+ * wx_ptp_tx_hwtstamp - utility function which checks for TX time stamp
+ * @wx: the private board struct
+ *
+ * if the timestamp is valid, we convert it into the timecounter ns
+ * value, then store that result into the shhwtstamps structure which
+ * is passed up the network stack
+ */
+static void wx_ptp_tx_hwtstamp(struct wx *wx)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct sk_buff *skb = wx->ptp_tx_skb;
+ u64 regval = 0;
+
+ regval |= (u64)rd32ptp(wx, WX_TSC_1588_STMPL);
+ regval |= (u64)rd32ptp(wx, WX_TSC_1588_STMPH) << 32;
+
+ wx_ptp_convert_to_hwtstamp(wx, &shhwtstamps, regval);
+
+ wx->ptp_tx_skb = NULL;
+ clear_bit_unlock(WX_STATE_PTP_TX_IN_PROGRESS, wx->state);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
+ wx->tx_hwtstamp_pkts++;
+}
+
+static int wx_ptp_tx_hwtstamp_work(struct wx *wx)
+{
+ u32 tsynctxctl;
+
+ /* we have to have a valid skb to poll for a timestamp */
+ if (!wx->ptp_tx_skb) {
+ wx_ptp_clear_tx_timestamp(wx);
+ return 0;
+ }
+
+ /* stop polling once we have a valid timestamp */
+ tsynctxctl = rd32ptp(wx, WX_TSC_1588_CTL);
+ if (tsynctxctl & WX_TSC_1588_CTL_VALID) {
+ wx_ptp_tx_hwtstamp(wx);
+ return 0;
+ }
+
+ return -1;
+}
+
+/**
+ * wx_ptp_overflow_check - watchdog task to detect SYSTIME overflow
+ * @wx: pointer to wx struct
+ *
+ * this watchdog task periodically reads the timecounter
+ * in order to prevent missing when the system time registers wrap
+ * around. This needs to be run approximately twice a minute for the fastest
+ * overflowing hardware. We run it for all hardware since it shouldn't have a
+ * large impact.
+ */
+static void wx_ptp_overflow_check(struct wx *wx)
+{
+ bool timeout = time_is_before_jiffies(wx->last_overflow_check +
+ WX_OVERFLOW_PERIOD);
+ unsigned long flags;
+
+ if (timeout) {
+ /* Update the timecounter */
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ timecounter_read(&wx->hw_tc);
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+
+ wx->last_overflow_check = jiffies;
+ }
+}
+
+/**
+ * wx_ptp_rx_hang - detect error case when Rx timestamp registers latched
+ * @wx: pointer to wx struct
+ *
+ * this watchdog task is scheduled to detect error case where hardware has
+ * dropped an Rx packet that was timestamped when the ring is full. The
+ * particular error is rare but leaves the device in a state unable to
+ * timestamp any future packets.
+ */
+static void wx_ptp_rx_hang(struct wx *wx)
+{
+ struct wx_ring *rx_ring;
+ unsigned long rx_event;
+ u32 tsyncrxctl;
+ int n;
+
+ tsyncrxctl = rd32(wx, WX_PSR_1588_CTL);
+
+ /* if we don't have a valid timestamp in the registers, just update the
+ * timeout counter and exit
+ */
+ if (!(tsyncrxctl & WX_PSR_1588_CTL_VALID)) {
+ wx->last_rx_ptp_check = jiffies;
+ return;
+ }
+
+ /* determine the most recent watchdog or rx_timestamp event */
+ rx_event = wx->last_rx_ptp_check;
+ for (n = 0; n < wx->num_rx_queues; n++) {
+ rx_ring = wx->rx_ring[n];
+ if (time_after(rx_ring->last_rx_timestamp, rx_event))
+ rx_event = rx_ring->last_rx_timestamp;
+ }
+
+ /* only need to read the high RXSTMP register to clear the lock */
+ if (time_is_before_jiffies(rx_event + 5 * HZ)) {
+ rd32(wx, WX_PSR_1588_STMPH);
+ wx->last_rx_ptp_check = jiffies;
+
+ wx->rx_hwtstamp_cleared++;
+ dev_warn(&wx->pdev->dev, "clearing RX Timestamp hang");
+ }
+}
+
+/**
+ * wx_ptp_tx_hang - detect error case where Tx timestamp never finishes
+ * @wx: private network wx structure
+ */
+static void wx_ptp_tx_hang(struct wx *wx)
+{
+ bool timeout = time_is_before_jiffies(wx->ptp_tx_start +
+ WX_PTP_TX_TIMEOUT);
+
+ if (!wx->ptp_tx_skb)
+ return;
+
+ if (!test_bit(WX_STATE_PTP_TX_IN_PROGRESS, wx->state))
+ return;
+
+ /* If we haven't received a timestamp within the timeout, it is
+ * reasonable to assume that it will never occur, so we can unlock the
+ * timestamp bit when this occurs.
+ */
+ if (timeout) {
+ wx_ptp_clear_tx_timestamp(wx);
+ wx->tx_hwtstamp_timeouts++;
+ dev_warn(&wx->pdev->dev, "clearing Tx timestamp hang\n");
+ }
+}
+
+static long wx_ptp_do_aux_work(struct ptp_clock_info *ptp)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+ int ts_done;
+
+ ts_done = wx_ptp_tx_hwtstamp_work(wx);
+
+ wx_ptp_overflow_check(wx);
+ if (unlikely(test_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER,
+ wx->flags)))
+ wx_ptp_rx_hang(wx);
+ wx_ptp_tx_hang(wx);
+
+ return ts_done ? 1 : HZ;
+}
+
+static u64 wx_ptp_trigger_calc(struct wx *wx)
+{
+ struct cyclecounter *cc = &wx->hw_cc;
+ unsigned long flags;
+ u64 ns = 0;
+ u32 rem;
+
+ /* Read the current clock time, and save the cycle counter value */
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ ns = timecounter_read(&wx->hw_tc);
+ wx->pps_edge_start = wx->hw_tc.cycle_last;
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+ wx->pps_edge_end = wx->pps_edge_start;
+
+ /* Figure out how far past the next second we are */
+ div_u64_rem(ns, WX_NS_PER_SEC, &rem);
+
+ /* Figure out how many nanoseconds to add to round the clock edge up
+ * to the next full second
+ */
+ rem = (WX_NS_PER_SEC - rem);
+
+ /* Adjust the clock edge to align with the next full second. */
+ wx->pps_edge_start += div_u64(((u64)rem << cc->shift), cc->mult);
+ wx->pps_edge_end += div_u64(((u64)(rem + wx->pps_width) <<
+ cc->shift), cc->mult);
+
+ return (ns + rem);
+}
+
+static int wx_ptp_setup_sdp(struct wx *wx)
+{
+ struct cyclecounter *cc = &wx->hw_cc;
+ u32 tsauxc;
+ u64 nsec;
+
+ if (wx->pps_width >= WX_NS_PER_SEC) {
+ wx_err(wx, "PTP pps width cannot be longer than 1s!\n");
+ return -EINVAL;
+ }
+
+ /* disable the pin first */
+ wr32ptp(wx, WX_TSC_1588_AUX_CTL, 0);
+ WX_WRITE_FLUSH(wx);
+
+ if (!test_bit(WX_FLAG_PTP_PPS_ENABLED, wx->flags)) {
+ if (wx->pps_enabled) {
+ wx->pps_enabled = false;
+ wx_set_pps(wx, false, 0, 0);
+ }
+ return 0;
+ }
+
+ wx->pps_enabled = true;
+ nsec = wx_ptp_trigger_calc(wx);
+ wx_set_pps(wx, wx->pps_enabled, nsec, wx->pps_edge_start);
+
+ tsauxc = WX_TSC_1588_AUX_CTL_PLSG | WX_TSC_1588_AUX_CTL_EN_TT0 |
+ WX_TSC_1588_AUX_CTL_EN_TT1 | WX_TSC_1588_AUX_CTL_EN_TS0;
+ wr32ptp(wx, WX_TSC_1588_TRGT_L(0), (u32)wx->pps_edge_start);
+ wr32ptp(wx, WX_TSC_1588_TRGT_H(0), (u32)(wx->pps_edge_start >> 32));
+ wr32ptp(wx, WX_TSC_1588_TRGT_L(1), (u32)wx->pps_edge_end);
+ wr32ptp(wx, WX_TSC_1588_TRGT_H(1), (u32)(wx->pps_edge_end >> 32));
+ wr32ptp(wx, WX_TSC_1588_SDP(0),
+ WX_TSC_1588_SDP_FUN_SEL_TT0 | WX_TSC_1588_SDP_OUT_LEVEL_H);
+ wr32ptp(wx, WX_TSC_1588_SDP(1), WX_TSC_1588_SDP_FUN_SEL_TS0);
+ wr32ptp(wx, WX_TSC_1588_AUX_CTL, tsauxc);
+ wr32ptp(wx, WX_TSC_1588_INT_EN, WX_TSC_1588_INT_EN_TT1);
+ WX_WRITE_FLUSH(wx);
+
+ /* Adjust the clock edge to align with the next full second. */
+ wx->sec_to_cc = div_u64(((u64)WX_NS_PER_SEC << cc->shift), cc->mult);
+
+ return 0;
+}
+
+static int wx_ptp_feature_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+
+ /**
+ * When PPS is enabled, unmask the interrupt for the ClockOut
+ * feature, so that the interrupt handler can send the PPS
+ * event when the clock SDP triggers. Clear mask when PPS is
+ * disabled
+ */
+ if (rq->type != PTP_CLK_REQ_PEROUT || !wx->ptp_setup_sdp)
+ return -EOPNOTSUPP;
+
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE))
+ return -EOPNOTSUPP;
+
+ if (rq->perout.phase.sec || rq->perout.phase.nsec) {
+ wx_err(wx, "Absolute start time not supported.\n");
+ return -EINVAL;
+ }
+
+ if (rq->perout.period.sec != 1 || rq->perout.period.nsec) {
+ wx_err(wx, "Only 1pps is supported.\n");
+ return -EINVAL;
+ }
+
+ if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
+ struct timespec64 ts_on;
+
+ ts_on.tv_sec = rq->perout.on.sec;
+ ts_on.tv_nsec = rq->perout.on.nsec;
+ wx->pps_width = timespec64_to_ns(&ts_on);
+ } else {
+ wx->pps_width = 120000000;
+ }
+
+ if (on)
+ set_bit(WX_FLAG_PTP_PPS_ENABLED, wx->flags);
+ else
+ clear_bit(WX_FLAG_PTP_PPS_ENABLED, wx->flags);
+
+ return wx->ptp_setup_sdp(wx);
+}
+
+void wx_ptp_check_pps_event(struct wx *wx)
+{
+ u32 tsauxc, int_status;
+
+ /* this check is necessary in case the interrupt was enabled via some
+ * alternative means (ex. debug_fs). Better to check here than
+ * everywhere that calls this function.
+ */
+ if (!wx->ptp_clock)
+ return;
+
+ int_status = rd32ptp(wx, WX_TSC_1588_INT_ST);
+ if (int_status & WX_TSC_1588_INT_ST_TT1) {
+ /* disable the pin first */
+ wr32ptp(wx, WX_TSC_1588_AUX_CTL, 0);
+ WX_WRITE_FLUSH(wx);
+
+ wx_ptp_trigger_calc(wx);
+
+ tsauxc = WX_TSC_1588_AUX_CTL_PLSG | WX_TSC_1588_AUX_CTL_EN_TT0 |
+ WX_TSC_1588_AUX_CTL_EN_TT1 | WX_TSC_1588_AUX_CTL_EN_TS0;
+ wr32ptp(wx, WX_TSC_1588_TRGT_L(0), (u32)wx->pps_edge_start);
+ wr32ptp(wx, WX_TSC_1588_TRGT_H(0), (u32)(wx->pps_edge_start >> 32));
+ wr32ptp(wx, WX_TSC_1588_TRGT_L(1), (u32)wx->pps_edge_end);
+ wr32ptp(wx, WX_TSC_1588_TRGT_H(1), (u32)(wx->pps_edge_end >> 32));
+ wr32ptp(wx, WX_TSC_1588_AUX_CTL, tsauxc);
+ WX_WRITE_FLUSH(wx);
+ }
+}
+EXPORT_SYMBOL(wx_ptp_check_pps_event);
+
+static long wx_ptp_create_clock(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ long err;
+
+ /* do nothing if we already have a clock device */
+ if (!IS_ERR_OR_NULL(wx->ptp_clock))
+ return 0;
+
+ snprintf(wx->ptp_caps.name, sizeof(wx->ptp_caps.name),
+ "%s", netdev->name);
+ wx->ptp_caps.owner = THIS_MODULE;
+ wx->ptp_caps.n_alarm = 0;
+ wx->ptp_caps.n_ext_ts = 0;
+ wx->ptp_caps.pps = 0;
+ wx->ptp_caps.adjfine = wx_ptp_adjfine;
+ wx->ptp_caps.adjtime = wx_ptp_adjtime;
+ wx->ptp_caps.gettimex64 = wx_ptp_gettimex64;
+ wx->ptp_caps.settime64 = wx_ptp_settime64;
+ wx->ptp_caps.do_aux_work = wx_ptp_do_aux_work;
+ switch (wx->mac.type) {
+ case wx_mac_aml:
+ case wx_mac_aml40:
+ wx->ptp_caps.max_adj = 250000000;
+ wx->ptp_caps.n_per_out = 1;
+ wx->ptp_setup_sdp = wx_ptp_setup_sdp;
+ wx->ptp_caps.enable = wx_ptp_feature_enable;
+ break;
+ case wx_mac_sp:
+ wx->ptp_caps.max_adj = 250000000;
+ wx->ptp_caps.n_per_out = 0;
+ wx->ptp_setup_sdp = NULL;
+ break;
+ case wx_mac_em:
+ wx->ptp_caps.max_adj = 500000000;
+ wx->ptp_caps.n_per_out = 1;
+ wx->ptp_setup_sdp = wx_ptp_setup_sdp;
+ wx->ptp_caps.enable = wx_ptp_feature_enable;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ wx->ptp_clock = ptp_clock_register(&wx->ptp_caps, &wx->pdev->dev);
+ if (IS_ERR(wx->ptp_clock)) {
+ err = PTR_ERR(wx->ptp_clock);
+ wx->ptp_clock = NULL;
+ wx_err(wx, "ptp clock register failed\n");
+ return err;
+ } else if (wx->ptp_clock) {
+ dev_info(&wx->pdev->dev, "registered PHC device on %s\n",
+ netdev->name);
+ }
+
+ /* Set the default timestamp mode to disabled here. We do this in
+ * create_clock instead of initialization, because we don't want to
+ * override the previous settings during a suspend/resume cycle.
+ */
+ wx->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ wx->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
+ return 0;
+}
+
+static int wx_ptp_set_timestamp_mode(struct wx *wx,
+ struct kernel_hwtstamp_config *config)
+{
+ u32 tsync_tx_ctl = WX_TSC_1588_CTL_ENABLED;
+ u32 tsync_rx_ctl = WX_PSR_1588_CTL_ENABLED;
+ DECLARE_BITMAP(flags, WX_PF_FLAGS_NBITS);
+ u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
+ bool is_l2 = false;
+ u32 regval;
+
+ memcpy(flags, wx->flags, sizeof(wx->flags));
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tsync_tx_ctl = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tsync_rx_ctl = 0;
+ tsync_rx_mtrl = 0;
+ clear_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, flags);
+ clear_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, flags);
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ tsync_rx_ctl |= WX_PSR_1588_CTL_TYPE_L4_V1;
+ tsync_rx_mtrl |= WX_PSR_1588_MSG_V1_SYNC;
+ set_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, flags);
+ set_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, flags);
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ tsync_rx_ctl |= WX_PSR_1588_CTL_TYPE_L4_V1;
+ tsync_rx_mtrl |= WX_PSR_1588_MSG_V1_DELAY_REQ;
+ set_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, flags);
+ set_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, flags);
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ tsync_rx_ctl |= WX_PSR_1588_CTL_TYPE_EVENT_V2;
+ is_l2 = true;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ set_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, flags);
+ set_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, flags);
+ break;
+ default:
+ /* register PSR_1588_MSG must be set in order to do V1 packets,
+ * therefore it is not possible to time stamp both V1 Sync and
+ * Delay_Req messages unless hardware supports timestamping all
+ * packets => return error
+ */
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
+ return -ERANGE;
+ }
+
+ /* define ethertype filter for timestamping L2 packets */
+ if (is_l2)
+ wr32(wx, WX_PSR_ETYPE_SWC(WX_PSR_ETYPE_SWC_FILTER_1588),
+ (WX_PSR_ETYPE_SWC_FILTER_EN | /* enable filter */
+ WX_PSR_ETYPE_SWC_1588 | /* enable timestamping */
+ ETH_P_1588)); /* 1588 eth protocol type */
+ else
+ wr32(wx, WX_PSR_ETYPE_SWC(WX_PSR_ETYPE_SWC_FILTER_1588), 0);
+
+ /* enable/disable TX */
+ regval = rd32ptp(wx, WX_TSC_1588_CTL);
+ regval &= ~WX_TSC_1588_CTL_ENABLED;
+ regval |= tsync_tx_ctl;
+ wr32ptp(wx, WX_TSC_1588_CTL, regval);
+
+ /* enable/disable RX */
+ regval = rd32(wx, WX_PSR_1588_CTL);
+ regval &= ~(WX_PSR_1588_CTL_ENABLED | WX_PSR_1588_CTL_TYPE_MASK);
+ regval |= tsync_rx_ctl;
+ wr32(wx, WX_PSR_1588_CTL, regval);
+
+ /* define which PTP packets are time stamped */
+ wr32(wx, WX_PSR_1588_MSG, tsync_rx_mtrl);
+
+ WX_WRITE_FLUSH(wx);
+
+ /* configure adapter flags only when HW is actually configured */
+ memcpy(wx->flags, flags, sizeof(wx->flags));
+
+ /* clear TX/RX timestamp state, just to be sure */
+ wx_ptp_clear_tx_timestamp(wx);
+ rd32(wx, WX_PSR_1588_STMPH);
+
+ return 0;
+}
+
+static u64 wx_ptp_read(struct cyclecounter *hw_cc)
+{
+ struct wx *wx = container_of(hw_cc, struct wx, hw_cc);
+
+ return wx_ptp_readtime(wx, NULL);
+}
+
+static void wx_ptp_link_speed_adjust(struct wx *wx, u32 *shift, u32 *incval)
+{
+ switch (wx->mac.type) {
+ case wx_mac_aml:
+ case wx_mac_aml40:
+ *shift = WX_INCVAL_SHIFT_AML;
+ *incval = WX_INCVAL_AML;
+ return;
+ case wx_mac_em:
+ *shift = WX_INCVAL_SHIFT_EM;
+ *incval = WX_INCVAL_EM;
+ return;
+ default:
+ break;
+ }
+
+ switch (wx->speed) {
+ case SPEED_10:
+ *shift = WX_INCVAL_SHIFT_10;
+ *incval = WX_INCVAL_10;
+ break;
+ case SPEED_100:
+ *shift = WX_INCVAL_SHIFT_100;
+ *incval = WX_INCVAL_100;
+ break;
+ case SPEED_1000:
+ *shift = WX_INCVAL_SHIFT_1GB;
+ *incval = WX_INCVAL_1GB;
+ break;
+ case SPEED_10000:
+ default:
+ *shift = WX_INCVAL_SHIFT_10GB;
+ *incval = WX_INCVAL_10GB;
+ break;
+ }
+}
+
+/**
+ * wx_ptp_reset_cyclecounter - create the cycle counter from hw
+ * @wx: pointer to the wx structure
+ *
+ * This function should be called to set the proper values for the TSC_1588_INC
+ * register and tell the cyclecounter structure what the tick rate of SYSTIME
+ * is. It does not directly modify SYSTIME registers or the timecounter
+ * structure. It should be called whenever a new TSC_1588_INC value is
+ * necessary, such as during initialization or when the link speed changes.
+ */
+void wx_ptp_reset_cyclecounter(struct wx *wx)
+{
+ u32 incval = 0, mask = 0;
+ struct cyclecounter cc;
+ unsigned long flags;
+
+ /* For some of the boards below this mask is technically incorrect.
+ * The timestamp mask overflows at approximately 61bits. However the
+ * particular hardware does not overflow on an even bitmask value.
+ * Instead, it overflows due to conversion of upper 32bits billions of
+ * cycles. Timecounters are not really intended for this purpose so
+ * they do not properly function if the overflow point isn't 2^N-1.
+ * However, the actual SYSTIME values in question take ~138 years to
+ * overflow. In practice this means they won't actually overflow. A
+ * proper fix to this problem would require modification of the
+ * timecounter delta calculations.
+ */
+ cc.mask = CLOCKSOURCE_MASK(64);
+ cc.mult = 1;
+ cc.shift = 0;
+
+ cc.read = wx_ptp_read;
+ wx_ptp_link_speed_adjust(wx, &cc.shift, &incval);
+
+ /* update the base incval used to calculate frequency adjustment */
+ WRITE_ONCE(wx->base_incval, incval);
+
+ mask = (wx->mac.type == wx_mac_em) ? 0x7FFFFFF : 0xFFFFFF;
+ incval &= mask;
+ if (wx->mac.type != wx_mac_em)
+ incval |= 2 << 24;
+ wr32ptp(wx, WX_TSC_1588_INC, incval);
+
+ smp_mb(); /* Force the above update. */
+
+ /* need lock to prevent incorrect read while modifying cyclecounter */
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ memcpy(&wx->hw_cc, &cc, sizeof(wx->hw_cc));
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+}
+EXPORT_SYMBOL(wx_ptp_reset_cyclecounter);
+
+void wx_ptp_reset(struct wx *wx)
+{
+ unsigned long flags;
+
+ /* reset the hardware timestamping mode */
+ wx_ptp_set_timestamp_mode(wx, &wx->tstamp_config);
+ wx_ptp_reset_cyclecounter(wx);
+
+ wr32ptp(wx, WX_TSC_1588_SYSTIML, 0);
+ wr32ptp(wx, WX_TSC_1588_SYSTIMH, 0);
+ WX_WRITE_FLUSH(wx);
+
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ timecounter_init(&wx->hw_tc, &wx->hw_cc,
+ ktime_to_ns(ktime_get_real()));
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+
+ wx->last_overflow_check = jiffies;
+ ptp_schedule_worker(wx->ptp_clock, HZ);
+
+ /* Now that the shift has been calculated and the systime
+ * registers reset, (re-)enable the Clock out feature
+ */
+ if (wx->ptp_setup_sdp)
+ wx->ptp_setup_sdp(wx);
+}
+EXPORT_SYMBOL(wx_ptp_reset);
+
+void wx_ptp_init(struct wx *wx)
+{
+ /* Initialize the seqlock_t first, since the user might call the clock
+ * functions any time after we've initialized the ptp clock device.
+ */
+ seqlock_init(&wx->hw_tc_lock);
+
+ /* obtain a ptp clock device, or re-use an existing device */
+ if (wx_ptp_create_clock(wx))
+ return;
+
+ wx->tx_hwtstamp_pkts = 0;
+ wx->tx_hwtstamp_timeouts = 0;
+ wx->tx_hwtstamp_skipped = 0;
+ wx->tx_hwtstamp_errors = 0;
+ wx->rx_hwtstamp_cleared = 0;
+ /* reset the ptp related hardware bits */
+ wx_ptp_reset(wx);
+
+ /* enter the WX_STATE_PTP_RUNNING state */
+ set_bit(WX_STATE_PTP_RUNNING, wx->state);
+}
+EXPORT_SYMBOL(wx_ptp_init);
+
+/**
+ * wx_ptp_suspend - stop ptp work items
+ * @wx: pointer to wx struct
+ *
+ * This function suspends ptp activity, and prevents more work from being
+ * generated, but does not destroy the clock device.
+ */
+void wx_ptp_suspend(struct wx *wx)
+{
+ /* leave the WX_STATE_PTP_RUNNING STATE */
+ if (!test_and_clear_bit(WX_STATE_PTP_RUNNING, wx->state))
+ return;
+
+ clear_bit(WX_FLAG_PTP_PPS_ENABLED, wx->flags);
+ if (wx->ptp_setup_sdp)
+ wx->ptp_setup_sdp(wx);
+
+ wx_ptp_clear_tx_timestamp(wx);
+}
+EXPORT_SYMBOL(wx_ptp_suspend);
+
+/**
+ * wx_ptp_stop - destroy the ptp_clock device
+ * @wx: pointer to wx struct
+ *
+ * Completely destroy the ptp_clock device, and disable all PTP related
+ * features. Intended to be run when the device is being closed.
+ */
+void wx_ptp_stop(struct wx *wx)
+{
+ /* first, suspend ptp activity */
+ wx_ptp_suspend(wx);
+
+ /* now destroy the ptp clock device */
+ if (wx->ptp_clock) {
+ ptp_clock_unregister(wx->ptp_clock);
+ wx->ptp_clock = NULL;
+ dev_info(&wx->pdev->dev, "removed PHC on %s\n", wx->netdev->name);
+ }
+}
+EXPORT_SYMBOL(wx_ptp_stop);
+
+/**
+ * wx_ptp_rx_hwtstamp - utility function which checks for RX time stamp
+ * @wx: pointer to wx struct
+ * @skb: particular skb to send timestamp with
+ *
+ * if the timestamp is valid, we convert it into the timecounter ns
+ * value, then store that result into the shhwtstamps structure which
+ * is passed up the network stack
+ */
+void wx_ptp_rx_hwtstamp(struct wx *wx, struct sk_buff *skb)
+{
+ u64 regval = 0;
+ u32 tsyncrxctl;
+
+ /* Read the tsyncrxctl register afterwards in order to prevent taking an
+ * I/O hit on every packet.
+ */
+ tsyncrxctl = rd32(wx, WX_PSR_1588_CTL);
+ if (!(tsyncrxctl & WX_PSR_1588_CTL_VALID))
+ return;
+
+ regval |= (u64)rd32(wx, WX_PSR_1588_STMPL);
+ regval |= (u64)rd32(wx, WX_PSR_1588_STMPH) << 32;
+
+ wx_ptp_convert_to_hwtstamp(wx, skb_hwtstamps(skb), regval);
+}
+
+int wx_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct wx *wx = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ *cfg = wx->tstamp_config;
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_hwtstamp_get);
+
+int wx_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct wx *wx = netdev_priv(dev);
+ int err;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ err = wx_ptp_set_timestamp_mode(wx, cfg);
+ if (err)
+ return err;
+
+ /* save these settings for future reference */
+ memcpy(&wx->tstamp_config, cfg, sizeof(wx->tstamp_config));
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_hwtstamp_set);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ptp.h b/drivers/net/ethernet/wangxun/libwx/wx_ptp.h
new file mode 100644
index 000000000000..50db90a6e3ee
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ptp.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _WX_PTP_H_
+#define _WX_PTP_H_
+
+void wx_ptp_check_pps_event(struct wx *wx);
+void wx_ptp_reset_cyclecounter(struct wx *wx);
+void wx_ptp_reset(struct wx *wx);
+void wx_ptp_init(struct wx *wx);
+void wx_ptp_suspend(struct wx *wx);
+void wx_ptp_stop(struct wx *wx);
+void wx_ptp_rx_hwtstamp(struct wx *wx, struct sk_buff *skb);
+int wx_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg);
+int wx_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
+
+#endif /* _WX_PTP_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
new file mode 100644
index 000000000000..493da5fffdb6
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
@@ -0,0 +1,929 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+
+#include "wx_type.h"
+#include "wx_hw.h"
+#include "wx_mbx.h"
+#include "wx_sriov.h"
+
+static void wx_vf_configuration(struct pci_dev *pdev, int event_mask)
+{
+ bool enable = !!WX_VF_ENABLE_CHECK(event_mask);
+ struct wx *wx = pci_get_drvdata(pdev);
+ u32 vfn = WX_VF_NUM_GET(event_mask);
+
+ if (enable)
+ eth_zero_addr(wx->vfinfo[vfn].vf_mac_addr);
+}
+
+static int wx_alloc_vf_macvlans(struct wx *wx, u8 num_vfs)
+{
+ struct vf_macvlans *mv_list;
+ int num_vf_macvlans, i;
+
+ /* Initialize list of VF macvlans */
+ INIT_LIST_HEAD(&wx->vf_mvs.mvlist);
+
+ num_vf_macvlans = wx->mac.num_rar_entries -
+ (WX_MAX_PF_MACVLANS + 1 + num_vfs);
+ if (!num_vf_macvlans)
+ return -EINVAL;
+
+ mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans),
+ GFP_KERNEL);
+ if (!mv_list)
+ return -ENOMEM;
+
+ for (i = 0; i < num_vf_macvlans; i++) {
+ mv_list[i].vf = -1;
+ mv_list[i].free = true;
+ list_add(&mv_list[i].mvlist, &wx->vf_mvs.mvlist);
+ }
+ wx->mv_list = mv_list;
+
+ return 0;
+}
+
+static void wx_sriov_clear_data(struct wx *wx)
+{
+ /* set num VFs to 0 to prevent access to vfinfo */
+ wx->num_vfs = 0;
+
+ /* free VF control structures */
+ kfree(wx->vfinfo);
+ wx->vfinfo = NULL;
+
+ /* free macvlan list */
+ kfree(wx->mv_list);
+ wx->mv_list = NULL;
+
+ /* set default pool back to 0 */
+ wr32m(wx, WX_PSR_VM_CTL, WX_PSR_VM_CTL_POOL_MASK, 0);
+ wx->ring_feature[RING_F_VMDQ].offset = 0;
+
+ clear_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags);
+ clear_bit(WX_FLAG_SRIOV_ENABLED, wx->flags);
+ /* Disable VMDq flag so device will be set in NM mode */
+ if (wx->ring_feature[RING_F_VMDQ].limit == 1)
+ clear_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
+}
+
+static int __wx_enable_sriov(struct wx *wx, u8 num_vfs)
+{
+ int i, ret = 0;
+ u32 value = 0;
+
+ set_bit(WX_FLAG_SRIOV_ENABLED, wx->flags);
+ dev_info(&wx->pdev->dev, "SR-IOV enabled with %d VFs\n", num_vfs);
+
+ if (num_vfs == 7 && wx->mac.type == wx_mac_em)
+ set_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags);
+
+ /* Enable VMDq flag so device will be set in VM mode */
+ set_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
+ if (!wx->ring_feature[RING_F_VMDQ].limit)
+ wx->ring_feature[RING_F_VMDQ].limit = 1;
+ wx->ring_feature[RING_F_VMDQ].offset = num_vfs;
+
+ wx->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
+ GFP_KERNEL);
+ if (!wx->vfinfo)
+ return -ENOMEM;
+
+ ret = wx_alloc_vf_macvlans(wx, num_vfs);
+ if (ret)
+ return ret;
+
+ /* Initialize default switching mode VEB */
+ wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_SW_EN, WX_PSR_CTL_SW_EN);
+
+ for (i = 0; i < num_vfs; i++) {
+ /* enable spoof checking for all VFs */
+ wx->vfinfo[i].spoofchk_enabled = true;
+ wx->vfinfo[i].link_enable = true;
+ /* untrust all VFs */
+ wx->vfinfo[i].trusted = false;
+ /* set the default xcast mode */
+ wx->vfinfo[i].xcast_mode = WXVF_XCAST_MODE_NONE;
+ }
+
+ if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ value = WX_CFG_PORT_CTL_NUM_VT_8;
+ } else {
+ if (num_vfs < 32)
+ value = WX_CFG_PORT_CTL_NUM_VT_32;
+ else
+ value = WX_CFG_PORT_CTL_NUM_VT_64;
+ }
+ wr32m(wx, WX_CFG_PORT_CTL,
+ WX_CFG_PORT_CTL_NUM_VT_MASK,
+ value);
+
+ /* Disable RSC when in SR-IOV mode */
+ clear_bit(WX_FLAG_RSC_CAPABLE, wx->flags);
+ clear_bit(WX_FLAG_RSC_ENABLED, wx->flags);
+
+ return ret;
+}
+
+static void wx_sriov_reinit(struct wx *wx)
+{
+ rtnl_lock();
+ wx->setup_tc(wx->netdev, netdev_get_num_tc(wx->netdev));
+ rtnl_unlock();
+}
+
+void wx_disable_sriov(struct wx *wx)
+{
+ if (!pci_vfs_assigned(wx->pdev))
+ pci_disable_sriov(wx->pdev);
+ else
+ wx_err(wx, "Unloading driver while VFs are assigned.\n");
+
+ /* clear flags and free allloced data */
+ wx_sriov_clear_data(wx);
+}
+EXPORT_SYMBOL(wx_disable_sriov);
+
+static int wx_pci_sriov_enable(struct pci_dev *dev,
+ int num_vfs)
+{
+ struct wx *wx = pci_get_drvdata(dev);
+ int err = 0, i;
+
+ if (netif_is_rxfh_configured(wx->netdev)) {
+ wx_err(wx, "Cannot enable SR-IOV while RXFH is configured\n");
+ wx_err(wx, "Run 'ethtool -X <if> default' to reset RSS table\n");
+ return -EBUSY;
+ }
+
+ err = __wx_enable_sriov(wx, num_vfs);
+ if (err)
+ return err;
+
+ wx->num_vfs = num_vfs;
+ for (i = 0; i < wx->num_vfs; i++)
+ wx_vf_configuration(dev, (i | WX_VF_ENABLE));
+
+ /* reset before enabling SRIOV to avoid mailbox issues */
+ wx_sriov_reinit(wx);
+
+ err = pci_enable_sriov(dev, num_vfs);
+ if (err) {
+ wx_err(wx, "Failed to enable PCI sriov: %d\n", err);
+ goto err_out;
+ }
+
+ return num_vfs;
+err_out:
+ wx_sriov_clear_data(wx);
+ return err;
+}
+
+static int wx_pci_sriov_disable(struct pci_dev *dev)
+{
+ struct wx *wx = pci_get_drvdata(dev);
+
+ if (netif_is_rxfh_configured(wx->netdev)) {
+ wx_err(wx, "Cannot disable SR-IOV while RXFH is configured\n");
+ wx_err(wx, "Run 'ethtool -X <if> default' to reset RSS table\n");
+ return -EBUSY;
+ }
+
+ wx_disable_sriov(wx);
+ wx_sriov_reinit(wx);
+
+ return 0;
+}
+
+int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct wx *wx = pci_get_drvdata(pdev);
+ int err;
+
+ if (!num_vfs) {
+ if (!pci_vfs_assigned(pdev))
+ return wx_pci_sriov_disable(pdev);
+
+ wx_err(wx, "can't free VFs because some are assigned to VMs.\n");
+ return -EBUSY;
+ }
+
+ err = wx_pci_sriov_enable(pdev, num_vfs);
+ if (err)
+ return err;
+
+ return num_vfs;
+}
+EXPORT_SYMBOL(wx_pci_sriov_configure);
+
+static int wx_set_vf_mac(struct wx *wx, u16 vf, const u8 *mac_addr)
+{
+ u8 hw_addr[ETH_ALEN];
+ int ret = 0;
+
+ ether_addr_copy(hw_addr, mac_addr);
+ wx_del_mac_filter(wx, wx->vfinfo[vf].vf_mac_addr, vf);
+ ret = wx_add_mac_filter(wx, hw_addr, vf);
+ if (ret >= 0)
+ ether_addr_copy(wx->vfinfo[vf].vf_mac_addr, mac_addr);
+ else
+ eth_zero_addr(wx->vfinfo[vf].vf_mac_addr);
+
+ return ret;
+}
+
+static void wx_set_vmolr(struct wx *wx, u16 vf, bool aupe)
+{
+ u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
+
+ vmolr |= WX_PSR_VM_L2CTL_BAM;
+ if (aupe)
+ vmolr |= WX_PSR_VM_L2CTL_AUPE;
+ else
+ vmolr &= ~WX_PSR_VM_L2CTL_AUPE;
+ wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
+}
+
+static void wx_set_vmvir(struct wx *wx, u16 vid, u16 qos, u16 vf)
+{
+ u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) |
+ WX_TDM_VLAN_INS_VLANA_DEFAULT;
+
+ wr32(wx, WX_TDM_VLAN_INS(vf), vmvir);
+}
+
+static int wx_set_vf_vlan(struct wx *wx, int add, int vid, u16 vf)
+{
+ if (!vid && !add)
+ return 0;
+
+ return wx_set_vfta(wx, vid, vf, (bool)add);
+}
+
+static void wx_set_vlan_anti_spoofing(struct wx *wx, bool enable, int vf)
+{
+ u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf);
+ u32 pfvfspoof;
+
+ pfvfspoof = rd32(wx, WX_TDM_VLAN_AS(index));
+ if (enable)
+ pfvfspoof |= BIT(vf_bit);
+ else
+ pfvfspoof &= ~BIT(vf_bit);
+ wr32(wx, WX_TDM_VLAN_AS(index), pfvfspoof);
+}
+
+static void wx_write_qde(struct wx *wx, u32 vf, u32 qde)
+{
+ struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
+ u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
+ u32 reg = 0, n = vf * q_per_pool / 32;
+ u32 i = vf * q_per_pool;
+
+ reg = rd32(wx, WX_RDM_PF_QDE(n));
+ for (i = (vf * q_per_pool - n * 32);
+ i < ((vf + 1) * q_per_pool - n * 32);
+ i++) {
+ if (qde == 1)
+ reg |= qde << i;
+ else
+ reg &= qde << i;
+ }
+
+ wr32(wx, WX_RDM_PF_QDE(n), reg);
+}
+
+static void wx_clear_vmvir(struct wx *wx, u32 vf)
+{
+ wr32(wx, WX_TDM_VLAN_INS(vf), 0);
+}
+
+static void wx_ping_vf(struct wx *wx, int vf)
+{
+ u32 ping = WX_PF_CONTROL_MSG;
+
+ if (wx->vfinfo[vf].clear_to_send)
+ ping |= WX_VT_MSGTYPE_CTS;
+ wx_write_mbx_pf(wx, &ping, 1, vf);
+}
+
+static void wx_set_vf_rx_tx(struct wx *wx, int vf)
+{
+ u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf);
+ u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx;
+
+ reg_cur_tx = rd32(wx, WX_TDM_VF_TE(index));
+ reg_cur_rx = rd32(wx, WX_RDM_VF_RE(index));
+
+ if (wx->vfinfo[vf].link_enable) {
+ reg_req_tx = reg_cur_tx | BIT(vf_bit);
+ reg_req_rx = reg_cur_rx | BIT(vf_bit);
+ /* Enable particular VF */
+ if (reg_cur_tx != reg_req_tx)
+ wr32(wx, WX_TDM_VF_TE(index), reg_req_tx);
+ if (reg_cur_rx != reg_req_rx)
+ wr32(wx, WX_RDM_VF_RE(index), reg_req_rx);
+ } else {
+ reg_req_tx = BIT(vf_bit);
+ reg_req_rx = BIT(vf_bit);
+ /* Disable particular VF */
+ if (reg_cur_tx & reg_req_tx)
+ wr32(wx, WX_TDM_VFTE_CLR(index), reg_req_tx);
+ if (reg_cur_rx & reg_req_rx)
+ wr32(wx, WX_RDM_VFRE_CLR(index), reg_req_rx);
+ }
+}
+
+static int wx_get_vf_queues(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
+ unsigned int default_tc = 0;
+
+ msgbuf[WX_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
+ msgbuf[WX_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
+
+ if (wx->vfinfo[vf].pf_vlan || wx->vfinfo[vf].pf_qos)
+ msgbuf[WX_VF_TRANS_VLAN] = 1;
+ else
+ msgbuf[WX_VF_TRANS_VLAN] = 0;
+
+ /* notify VF of default queue */
+ msgbuf[WX_VF_DEF_QUEUE] = default_tc;
+
+ return 0;
+}
+
+static void wx_vf_reset_event(struct wx *wx, u16 vf)
+{
+ struct vf_data_storage *vfinfo = &wx->vfinfo[vf];
+ u8 num_tcs = netdev_get_num_tc(wx->netdev);
+
+ /* add PF assigned VLAN */
+ wx_set_vf_vlan(wx, true, vfinfo->pf_vlan, vf);
+
+ /* reset offloads to defaults */
+ wx_set_vmolr(wx, vf, !vfinfo->pf_vlan);
+
+ /* set outgoing tags for VFs */
+ if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
+ wx_clear_vmvir(wx, vf);
+ } else {
+ if (vfinfo->pf_qos || !num_tcs)
+ wx_set_vmvir(wx, vfinfo->pf_vlan,
+ vfinfo->pf_qos, vf);
+ else
+ wx_set_vmvir(wx, vfinfo->pf_vlan,
+ wx->default_up, vf);
+ }
+
+ /* reset multicast table array for vf */
+ wx->vfinfo[vf].num_vf_mc_hashes = 0;
+
+ /* Flush and reset the mta with the new values */
+ wx_set_rx_mode(wx->netdev);
+
+ wx_del_mac_filter(wx, wx->vfinfo[vf].vf_mac_addr, vf);
+ /* reset VF api back to unknown */
+ wx->vfinfo[vf].vf_api = wx_mbox_api_null;
+}
+
+static void wx_vf_reset_msg(struct wx *wx, u16 vf)
+{
+ const u8 *vf_mac = wx->vfinfo[vf].vf_mac_addr;
+ struct net_device *dev = wx->netdev;
+ u32 msgbuf[5] = {0, 0, 0, 0, 0};
+ u8 *addr = (u8 *)(&msgbuf[1]);
+ u32 reg = 0, index, vf_bit;
+ int pf_max_frame;
+
+ /* reset the filters for the device */
+ wx_vf_reset_event(wx, vf);
+
+ /* set vf mac address */
+ if (!is_zero_ether_addr(vf_mac))
+ wx_set_vf_mac(wx, vf, vf_mac);
+
+ index = WX_VF_REG_OFFSET(vf);
+ vf_bit = WX_VF_IND_SHIFT(vf);
+
+ /* force drop enable for all VF Rx queues */
+ wx_write_qde(wx, vf, 1);
+
+ /* set transmit and receive for vf */
+ wx_set_vf_rx_tx(wx, vf);
+
+ pf_max_frame = dev->mtu + ETH_HLEN;
+
+ if (pf_max_frame > ETH_FRAME_LEN)
+ reg = BIT(vf_bit);
+ wr32(wx, WX_RDM_VFRE_CLR(index), reg);
+
+ /* enable VF mailbox for further messages */
+ wx->vfinfo[vf].clear_to_send = true;
+
+ /* reply to reset with ack and vf mac address */
+ msgbuf[0] = WX_VF_RESET;
+ if (!is_zero_ether_addr(vf_mac)) {
+ msgbuf[0] |= WX_VT_MSGTYPE_ACK;
+ memcpy(addr, vf_mac, ETH_ALEN);
+ } else {
+ msgbuf[0] |= WX_VT_MSGTYPE_NACK;
+ wx_err(wx, "VF %d has no MAC address assigned", vf);
+ }
+
+ msgbuf[3] = wx->mac.mc_filter_type;
+ wx_write_mbx_pf(wx, msgbuf, WX_VF_PERMADDR_MSG_LEN, vf);
+}
+
+static int wx_set_vf_mac_addr(struct wx *wx, u32 *msgbuf, u16 vf)
+{
+ const u8 *new_mac = ((u8 *)(&msgbuf[1]));
+ int ret;
+
+ if (!is_valid_ether_addr(new_mac)) {
+ wx_err(wx, "VF %d attempted to set invalid mac\n", vf);
+ return -EINVAL;
+ }
+
+ if (wx->vfinfo[vf].pf_set_mac &&
+ memcmp(wx->vfinfo[vf].vf_mac_addr, new_mac, ETH_ALEN)) {
+ wx_err(wx,
+ "VF %d attempt to set a MAC but it already had a MAC.",
+ vf);
+ return -EBUSY;
+ }
+
+ ret = wx_set_vf_mac(wx, vf, new_mac);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void wx_set_vf_multicasts(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ struct vf_data_storage *vfinfo = &wx->vfinfo[vf];
+ u16 entries = (msgbuf[0] & WX_VT_MSGINFO_MASK)
+ >> WX_VT_MSGINFO_SHIFT;
+ u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
+ u32 vector_bit, vector_reg, mta_reg, i;
+ u16 *hash_list = (u16 *)&msgbuf[1];
+
+ /* only so many hash values supported */
+ entries = min_t(u16, entries, WX_MAX_VF_MC_ENTRIES);
+ vfinfo->num_vf_mc_hashes = entries;
+
+ for (i = 0; i < entries; i++)
+ vfinfo->vf_mc_hashes[i] = hash_list[i];
+
+ for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
+ vector_reg = WX_PSR_MC_TBL_REG(vfinfo->vf_mc_hashes[i]);
+ vector_bit = WX_PSR_MC_TBL_BIT(vfinfo->vf_mc_hashes[i]);
+ mta_reg = wx->mac.mta_shadow[vector_reg];
+ mta_reg |= BIT(vector_bit);
+ wx->mac.mta_shadow[vector_reg] = mta_reg;
+ wr32(wx, WX_PSR_MC_TBL(vector_reg), mta_reg);
+ }
+ vmolr |= WX_PSR_VM_L2CTL_ROMPE;
+ wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
+}
+
+static void wx_set_vf_lpe(struct wx *wx, u32 max_frame, u32 vf)
+{
+ u32 index, vf_bit, vfre;
+ u32 max_frs, reg_val;
+
+ /* determine VF receive enable location */
+ index = WX_VF_REG_OFFSET(vf);
+ vf_bit = WX_VF_IND_SHIFT(vf);
+
+ vfre = rd32(wx, WX_RDM_VF_RE(index));
+ vfre |= BIT(vf_bit);
+ wr32(wx, WX_RDM_VF_RE(index), vfre);
+
+ /* pull current max frame size from hardware */
+ max_frs = DIV_ROUND_UP(max_frame, 1024);
+ reg_val = rd32(wx, WX_MAC_WDG_TIMEOUT) & WX_MAC_WDG_TIMEOUT_WTO_MASK;
+ if (max_frs > (reg_val + WX_MAC_WDG_TIMEOUT_WTO_DELTA))
+ wr32(wx, WX_MAC_WDG_TIMEOUT,
+ max_frs - WX_MAC_WDG_TIMEOUT_WTO_DELTA);
+}
+
+static int wx_find_vlvf_entry(struct wx *wx, u32 vlan)
+{
+ int regindex;
+ u32 vlvf;
+
+ /* short cut the special case */
+ if (vlan == 0)
+ return 0;
+
+ /* Search for the vlan id in the VLVF entries */
+ for (regindex = 1; regindex < WX_PSR_VLAN_SWC_ENTRIES; regindex++) {
+ wr32(wx, WX_PSR_VLAN_SWC_IDX, regindex);
+ vlvf = rd32(wx, WX_PSR_VLAN_SWC);
+ if ((vlvf & VLAN_VID_MASK) == vlan)
+ break;
+ }
+
+ /* Return a negative value if not found */
+ if (regindex >= WX_PSR_VLAN_SWC_ENTRIES)
+ regindex = -EINVAL;
+
+ return regindex;
+}
+
+static int wx_set_vf_macvlan(struct wx *wx,
+ u16 vf, int index, unsigned char *mac_addr)
+{
+ struct vf_macvlans *entry;
+ struct list_head *pos;
+ int retval = 0;
+
+ if (index <= 1) {
+ list_for_each(pos, &wx->vf_mvs.mvlist) {
+ entry = list_entry(pos, struct vf_macvlans, mvlist);
+ if (entry->vf == vf) {
+ entry->vf = -1;
+ entry->free = true;
+ entry->is_macvlan = false;
+ wx_del_mac_filter(wx, entry->vf_macvlan, vf);
+ }
+ }
+ }
+
+ if (!index)
+ return 0;
+
+ entry = NULL;
+ list_for_each(pos, &wx->vf_mvs.mvlist) {
+ entry = list_entry(pos, struct vf_macvlans, mvlist);
+ if (entry->free)
+ break;
+ }
+
+ if (!entry || !entry->free)
+ return -ENOSPC;
+
+ retval = wx_add_mac_filter(wx, mac_addr, vf);
+ if (retval >= 0) {
+ entry->free = false;
+ entry->is_macvlan = true;
+ entry->vf = vf;
+ memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
+ }
+
+ return retval;
+}
+
+static int wx_set_vf_vlan_msg(struct wx *wx, u32 *msgbuf, u16 vf)
+{
+ int add = (msgbuf[0] & WX_VT_MSGINFO_MASK) >> WX_VT_MSGINFO_SHIFT;
+ int vid = (msgbuf[1] & WX_PSR_VLAN_SWC_VLANID_MASK);
+ int ret;
+
+ if (add)
+ wx->vfinfo[vf].vlan_count++;
+ else if (wx->vfinfo[vf].vlan_count)
+ wx->vfinfo[vf].vlan_count--;
+
+ /* in case of promiscuous mode any VLAN filter set for a VF must
+ * also have the PF pool added to it.
+ */
+ if (add && wx->netdev->flags & IFF_PROMISC)
+ wx_set_vf_vlan(wx, add, vid, VMDQ_P(0));
+
+ ret = wx_set_vf_vlan(wx, add, vid, vf);
+ if (!ret && wx->vfinfo[vf].spoofchk_enabled)
+ wx_set_vlan_anti_spoofing(wx, true, vf);
+
+ /* Go through all the checks to see if the VLAN filter should
+ * be wiped completely.
+ */
+ if (!add && wx->netdev->flags & IFF_PROMISC) {
+ u32 bits = 0, vlvf;
+ int reg_ndx;
+
+ reg_ndx = wx_find_vlvf_entry(wx, vid);
+ if (reg_ndx < 0)
+ return -ENOSPC;
+ wr32(wx, WX_PSR_VLAN_SWC_IDX, reg_ndx);
+ vlvf = rd32(wx, WX_PSR_VLAN_SWC);
+ /* See if any other pools are set for this VLAN filter
+ * entry other than the PF.
+ */
+ if (VMDQ_P(0) < 32) {
+ bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L);
+ bits &= ~BIT(VMDQ_P(0));
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
+ bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_H);
+ } else {
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
+ bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H);
+ bits &= ~BIT(VMDQ_P(0) % 32);
+ bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_L);
+ }
+ /* If the filter was removed then ensure PF pool bit
+ * is cleared if the PF only added itself to the pool
+ * because the PF is in promiscuous mode.
+ */
+ if ((vlvf & VLAN_VID_MASK) == vid && !bits)
+ wx_set_vf_vlan(wx, add, vid, VMDQ_P(0));
+ }
+
+ return 0;
+}
+
+static int wx_set_vf_macvlan_msg(struct wx *wx, u32 *msgbuf, u16 vf)
+{
+ int index = (msgbuf[0] & WX_VT_MSGINFO_MASK) >>
+ WX_VT_MSGINFO_SHIFT;
+ u8 *new_mac = ((u8 *)(&msgbuf[1]));
+ int err;
+
+ if (wx->vfinfo[vf].pf_set_mac && index > 0) {
+ wx_err(wx, "VF %d request MACVLAN filter but is denied\n", vf);
+ return -EINVAL;
+ }
+
+ /* An non-zero index indicates the VF is setting a filter */
+ if (index) {
+ if (!is_valid_ether_addr(new_mac)) {
+ wx_err(wx, "VF %d attempted to set invalid mac\n", vf);
+ return -EINVAL;
+ }
+ /* If the VF is allowed to set MAC filters then turn off
+ * anti-spoofing to avoid false positives.
+ */
+ if (wx->vfinfo[vf].spoofchk_enabled)
+ wx_set_vf_spoofchk(wx->netdev, vf, false);
+ }
+
+ err = wx_set_vf_macvlan(wx, vf, index, new_mac);
+ if (err == -ENOSPC)
+ wx_err(wx,
+ "VF %d request MACVLAN filter but there is no space\n",
+ vf);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int wx_negotiate_vf_api(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ int api = msgbuf[1];
+
+ switch (api) {
+ case wx_mbox_api_13:
+ wx->vfinfo[vf].vf_api = api;
+ return 0;
+ default:
+ wx_err(wx, "VF %d requested invalid api version %u\n", vf, api);
+ return -EINVAL;
+ }
+}
+
+static int wx_get_vf_link_state(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ msgbuf[1] = wx->vfinfo[vf].link_enable;
+
+ return 0;
+}
+
+static int wx_get_fw_version(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ unsigned long fw_version = 0ULL;
+ int ret = 0;
+
+ ret = kstrtoul(wx->eeprom_id, 16, &fw_version);
+ if (ret)
+ return -EOPNOTSUPP;
+ msgbuf[1] = fw_version;
+
+ return 0;
+}
+
+static int wx_update_vf_xcast_mode(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ int xcast_mode = msgbuf[1];
+ u32 vmolr, disable, enable;
+
+ if (wx->vfinfo[vf].xcast_mode == xcast_mode)
+ return 0;
+
+ switch (xcast_mode) {
+ case WXVF_XCAST_MODE_NONE:
+ disable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
+ WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE |
+ WX_PSR_VM_L2CTL_VPE;
+ enable = 0;
+ break;
+ case WXVF_XCAST_MODE_MULTI:
+ disable = WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE |
+ WX_PSR_VM_L2CTL_VPE;
+ enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE;
+ break;
+ case WXVF_XCAST_MODE_ALLMULTI:
+ disable = WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_VPE;
+ enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
+ WX_PSR_VM_L2CTL_MPE;
+ break;
+ case WXVF_XCAST_MODE_PROMISC:
+ disable = 0;
+ enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
+ WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE |
+ WX_PSR_VM_L2CTL_VPE;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
+ vmolr &= ~disable;
+ vmolr |= enable;
+ wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
+
+ wx->vfinfo[vf].xcast_mode = xcast_mode;
+ msgbuf[1] = xcast_mode;
+
+ return 0;
+}
+
+static void wx_rcv_msg_from_vf(struct wx *wx, u16 vf)
+{
+ u16 mbx_size = WX_VXMAILBOX_SIZE;
+ u32 msgbuf[WX_VXMAILBOX_SIZE];
+ int retval;
+
+ retval = wx_read_mbx_pf(wx, msgbuf, mbx_size, vf);
+ if (retval) {
+ wx_err(wx, "Error receiving message from VF\n");
+ return;
+ }
+
+ /* this is a message we already processed, do nothing */
+ if (msgbuf[0] & (WX_VT_MSGTYPE_ACK | WX_VT_MSGTYPE_NACK))
+ return;
+
+ if (msgbuf[0] == WX_VF_RESET) {
+ wx_vf_reset_msg(wx, vf);
+ return;
+ }
+
+ /* until the vf completes a virtual function reset it should not be
+ * allowed to start any configuration.
+ */
+ if (!wx->vfinfo[vf].clear_to_send) {
+ msgbuf[0] |= WX_VT_MSGTYPE_NACK;
+ wx_write_mbx_pf(wx, msgbuf, 1, vf);
+ return;
+ }
+
+ switch ((msgbuf[0] & U16_MAX)) {
+ case WX_VF_SET_MAC_ADDR:
+ retval = wx_set_vf_mac_addr(wx, msgbuf, vf);
+ break;
+ case WX_VF_SET_MULTICAST:
+ wx_set_vf_multicasts(wx, msgbuf, vf);
+ retval = 0;
+ break;
+ case WX_VF_SET_VLAN:
+ retval = wx_set_vf_vlan_msg(wx, msgbuf, vf);
+ break;
+ case WX_VF_SET_LPE:
+ wx_set_vf_lpe(wx, msgbuf[1], vf);
+ retval = 0;
+ break;
+ case WX_VF_SET_MACVLAN:
+ retval = wx_set_vf_macvlan_msg(wx, msgbuf, vf);
+ break;
+ case WX_VF_API_NEGOTIATE:
+ retval = wx_negotiate_vf_api(wx, msgbuf, vf);
+ break;
+ case WX_VF_GET_QUEUES:
+ retval = wx_get_vf_queues(wx, msgbuf, vf);
+ break;
+ case WX_VF_GET_LINK_STATE:
+ retval = wx_get_vf_link_state(wx, msgbuf, vf);
+ break;
+ case WX_VF_GET_FW_VERSION:
+ retval = wx_get_fw_version(wx, msgbuf, vf);
+ break;
+ case WX_VF_UPDATE_XCAST_MODE:
+ retval = wx_update_vf_xcast_mode(wx, msgbuf, vf);
+ break;
+ case WX_VF_BACKUP:
+ break;
+ default:
+ wx_err(wx, "Unhandled Msg %8.8x\n", msgbuf[0]);
+ break;
+ }
+
+ /* notify the VF of the results of what it sent us */
+ if (retval)
+ msgbuf[0] |= WX_VT_MSGTYPE_NACK;
+ else
+ msgbuf[0] |= WX_VT_MSGTYPE_ACK;
+
+ msgbuf[0] |= WX_VT_MSGTYPE_CTS;
+
+ wx_write_mbx_pf(wx, msgbuf, mbx_size, vf);
+}
+
+static void wx_rcv_ack_from_vf(struct wx *wx, u16 vf)
+{
+ u32 msg = WX_VT_MSGTYPE_NACK;
+
+ /* if device isn't clear to send it shouldn't be reading either */
+ if (!wx->vfinfo[vf].clear_to_send)
+ wx_write_mbx_pf(wx, &msg, 1, vf);
+}
+
+void wx_msg_task(struct wx *wx)
+{
+ u16 vf;
+
+ for (vf = 0; vf < wx->num_vfs; vf++) {
+ /* process any reset requests */
+ if (!wx_check_for_rst_pf(wx, vf))
+ wx_vf_reset_event(wx, vf);
+
+ /* process any messages pending */
+ if (!wx_check_for_msg_pf(wx, vf))
+ wx_rcv_msg_from_vf(wx, vf);
+
+ /* process any acks */
+ if (!wx_check_for_ack_pf(wx, vf))
+ wx_rcv_ack_from_vf(wx, vf);
+ }
+}
+EXPORT_SYMBOL(wx_msg_task);
+
+void wx_disable_vf_rx_tx(struct wx *wx)
+{
+ wr32(wx, WX_TDM_VFTE_CLR(0), U32_MAX);
+ wr32(wx, WX_RDM_VFRE_CLR(0), U32_MAX);
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ wr32(wx, WX_TDM_VFTE_CLR(1), U32_MAX);
+ wr32(wx, WX_RDM_VFRE_CLR(1), U32_MAX);
+ }
+}
+EXPORT_SYMBOL(wx_disable_vf_rx_tx);
+
+void wx_ping_all_vfs_with_link_status(struct wx *wx, bool link_up)
+{
+ u32 msgbuf[2] = {0, 0};
+ u16 i;
+
+ if (!wx->num_vfs)
+ return;
+ msgbuf[0] = WX_PF_NOFITY_VF_LINK_STATUS | WX_PF_CONTROL_MSG;
+ if (link_up)
+ msgbuf[1] = FIELD_PREP(GENMASK(31, 1), wx->speed) | link_up;
+ if (wx->notify_down)
+ msgbuf[1] |= WX_PF_NOFITY_VF_NET_NOT_RUNNING;
+ for (i = 0; i < wx->num_vfs; i++) {
+ if (wx->vfinfo[i].clear_to_send)
+ msgbuf[0] |= WX_VT_MSGTYPE_CTS;
+ wx_write_mbx_pf(wx, msgbuf, 2, i);
+ }
+}
+EXPORT_SYMBOL(wx_ping_all_vfs_with_link_status);
+
+static void wx_set_vf_link_state(struct wx *wx, int vf, int state)
+{
+ wx->vfinfo[vf].link_state = state;
+ switch (state) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ if (netif_running(wx->netdev))
+ wx->vfinfo[vf].link_enable = true;
+ else
+ wx->vfinfo[vf].link_enable = false;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ wx->vfinfo[vf].link_enable = true;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ wx->vfinfo[vf].link_enable = false;
+ break;
+ }
+ /* restart the VF */
+ wx->vfinfo[vf].clear_to_send = false;
+ wx_ping_vf(wx, vf);
+
+ wx_set_vf_rx_tx(wx, vf);
+}
+
+void wx_set_all_vfs(struct wx *wx)
+{
+ int i;
+
+ for (i = 0; i < wx->num_vfs; i++)
+ wx_set_vf_link_state(wx, i, wx->vfinfo[i].link_state);
+}
+EXPORT_SYMBOL(wx_set_all_vfs);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.h b/drivers/net/ethernet/wangxun/libwx/wx_sriov.h
new file mode 100644
index 000000000000..8a3a47bb5815
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _WX_SRIOV_H_
+#define _WX_SRIOV_H_
+
+#define WX_VF_ENABLE_CHECK(_m) FIELD_GET(BIT(31), (_m))
+#define WX_VF_NUM_GET(_m) FIELD_GET(GENMASK(5, 0), (_m))
+#define WX_VF_ENABLE BIT(31)
+
+void wx_disable_sriov(struct wx *wx);
+int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs);
+void wx_msg_task(struct wx *wx);
+void wx_disable_vf_rx_tx(struct wx *wx);
+void wx_ping_all_vfs_with_link_status(struct wx *wx, bool link_up);
+void wx_set_all_vfs(struct wx *wx);
+
+#endif /* _WX_SRIOV_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index b54bffda027b..29e5c5470c94 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -4,10 +4,13 @@
#ifndef _WX_TYPE_H_
#define _WX_TYPE_H_
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
#include <linux/bitfield.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/phylink.h>
+#include <linux/dim.h>
#include <net/ip.h>
#define WX_NCSI_SUP 0x8000
@@ -18,8 +21,13 @@
/* MSI-X capability fields masks */
#define WX_PCIE_MSIX_TBL_SZ_MASK 0x7FF
#define WX_PCI_LINK_STATUS 0xB2
+#define WX_MAX_PF_MACVLANS 15
+#define WX_MAX_VF_MC_ENTRIES 30
/**************** Global Registers ****************************/
+#define WX_VF_REG_OFFSET(_v) FIELD_GET(GENMASK(15, 5), (_v))
+#define WX_VF_IND_SHIFT(_v) FIELD_GET(GENMASK(4, 0), (_v))
+
/* chip control Registers */
#define WX_MIS_PWR 0x10000
#define WX_MIS_RST 0x1000C
@@ -74,6 +82,14 @@
#define WX_MAC_LXONOFFRXC 0x11E0C
/*********************** Receive DMA registers **************************/
+#define WX_RDM_VF_RE(_i) (0x12004 + ((_i) * 4))
+#define WX_RDM_RSC_CTL 0x1200C
+#define WX_RDM_RSC_CTL_FREE_CNT_DIS BIT(8)
+#define WX_RDM_RSC_CTL_FREE_CTL BIT(7)
+#define WX_RDM_PF_QDE(_i) (0x12080 + ((_i) * 4))
+#define WX_RDM_VFRE_CLR(_i) (0x120A0 + ((_i) * 4))
+#define WX_RDM_DCACHE_CTL 0x120A8
+#define WX_RDM_DCACHE_CTL_EN BIT(0)
#define WX_RDM_DRP_PKT 0x12500
#define WX_RDM_PKT_CNT 0x12504
#define WX_RDM_BYTE_CNT_LSB 0x12508
@@ -82,12 +98,19 @@
/************************* Port Registers ************************************/
/* port cfg Registers */
#define WX_CFG_PORT_CTL 0x14400
+#define WX_CFG_PORT_CTL_PFRSTD BIT(14)
#define WX_CFG_PORT_CTL_DRV_LOAD BIT(3)
#define WX_CFG_PORT_CTL_QINQ BIT(2)
#define WX_CFG_PORT_CTL_D_VLAN BIT(0) /* double vlan*/
+#define WX_CFG_PORT_ST 0x14404
+#define WX_CFG_PORT_ST_LANID GENMASK(9, 8)
#define WX_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4))
#define WX_CFG_PORT_CTL_NUM_VT_MASK GENMASK(13, 12) /* number of TVs */
+#define WX_CFG_PORT_CTL_NUM_VT_NONE 0
+#define WX_CFG_PORT_CTL_NUM_VT_8 FIELD_PREP(GENMASK(13, 12), 1)
+#define WX_CFG_PORT_CTL_NUM_VT_32 FIELD_PREP(GENMASK(13, 12), 2)
+#define WX_CFG_PORT_CTL_NUM_VT_64 FIELD_PREP(GENMASK(13, 12), 3)
/* GPIO Registers */
#define WX_GPIO_DR 0x14800
@@ -110,6 +133,11 @@
/*********************** Transmit DMA registers **************************/
/* transmit global control */
#define WX_TDM_CTL 0x18000
+#define WX_TDM_VF_TE(_i) (0x18004 + ((_i) * 4))
+#define WX_TDM_MAC_AS(_i) (0x18060 + ((_i) * 4))
+#define WX_TDM_VLAN_AS(_i) (0x18070 + ((_i) * 4))
+#define WX_TDM_VFTE_CLR(_i) (0x180A0 + ((_i) * 4))
+
/* TDM CTL BIT */
#define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */
#define WX_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4))
@@ -147,9 +175,12 @@
#define WX_RDB_PL_CFG_L2HDR BIT(3)
#define WX_RDB_PL_CFG_TUN_TUNHDR BIT(4)
#define WX_RDB_PL_CFG_TUN_OUTL2HDR BIT(5)
+#define WX_RDB_PL_CFG_RSS_EN BIT(24)
+#define WX_RDB_PL_CFG_RSS_MASK GENMASK(23, 16)
#define WX_RDB_RSSTBL(_i) (0x19400 + ((_i) * 4))
#define WX_RDB_RSSRK(_i) (0x19480 + ((_i) * 4))
#define WX_RDB_RA_CTL 0x194F4
+#define WX_RDB_RA_CTL_MULTI_RSS BIT(0)
#define WX_RDB_RA_CTL_RSS_EN BIT(2) /* RSS Enable */
#define WX_RDB_RA_CTL_RSS_IPV4_TCP BIT(16)
#define WX_RDB_RA_CTL_RSS_IPV4 BIT(17)
@@ -157,12 +188,17 @@
#define WX_RDB_RA_CTL_RSS_IPV6_TCP BIT(21)
#define WX_RDB_RA_CTL_RSS_IPV4_UDP BIT(22)
#define WX_RDB_RA_CTL_RSS_IPV6_UDP BIT(23)
+#define WX_RDB_RA_CTL_RSS_MASK GENMASK(23, 16)
#define WX_RDB_FDIR_MATCH 0x19558
#define WX_RDB_FDIR_MISS 0x1955C
+/* VM RSS */
+#define WX_RDB_VMRSSRK(_i, _p) (0x1A000 + ((_i) * 4) + ((_p) * 0x40))
+#define WX_RDB_VMRSSTBL(_i, _p) (0x1B000 + ((_i) * 4) + ((_p) * 0x40))
/******************************* PSR Registers *******************************/
/* psr control */
#define WX_PSR_CTL 0x15000
+#define WX_PSR_VM_CTL 0x151B0
/* Header split receive */
#define WX_PSR_CTL_SW_EN BIT(18)
#define WX_PSR_CTL_RSC_ACK BIT(17)
@@ -180,14 +216,36 @@
#define WX_PSR_VLAN_CTL 0x15088
#define WX_PSR_VLAN_CTL_CFIEN BIT(29) /* bit 29 */
#define WX_PSR_VLAN_CTL_VFE BIT(30) /* bit 30 */
+/* EType Queue Filter */
+#define WX_PSR_ETYPE_SWC(_i) (0x15128 + ((_i) * 4))
+#define WX_PSR_ETYPE_SWC_FILTER_1588 3
+#define WX_PSR_ETYPE_SWC_FILTER_EN BIT(31)
+#define WX_PSR_ETYPE_SWC_1588 BIT(30)
+/* 1588 */
+#define WX_PSR_1588_MSG 0x15120
+#define WX_PSR_1588_MSG_V1_SYNC FIELD_PREP(GENMASK(7, 0), 0)
+#define WX_PSR_1588_MSG_V1_DELAY_REQ FIELD_PREP(GENMASK(7, 0), 1)
+#define WX_PSR_1588_STMPL 0x151E8
+#define WX_PSR_1588_STMPH 0x151A4
+#define WX_PSR_1588_CTL 0x15188
+#define WX_PSR_1588_CTL_ENABLED BIT(4)
+#define WX_PSR_1588_CTL_TYPE_MASK GENMASK(3, 1)
+#define WX_PSR_1588_CTL_TYPE_L4_V1 FIELD_PREP(GENMASK(3, 1), 1)
+#define WX_PSR_1588_CTL_TYPE_EVENT_V2 FIELD_PREP(GENMASK(3, 1), 5)
+#define WX_PSR_1588_CTL_VALID BIT(0)
/* mcasst/ucast overflow tbl */
#define WX_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4))
+#define WX_PSR_MC_TBL_REG(_i) FIELD_GET(GENMASK(11, 5), (_i))
+#define WX_PSR_MC_TBL_BIT(_i) FIELD_GET(GENMASK(4, 0), (_i))
#define WX_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4))
+#define WX_PSR_VM_CTL_REPLEN BIT(30) /* replication enabled */
+#define WX_PSR_VM_CTL_POOL_MASK GENMASK(12, 7)
/* VM L2 contorl */
#define WX_PSR_VM_L2CTL(_i) (0x15600 + ((_i) * 4))
#define WX_PSR_VM_L2CTL_UPE BIT(4) /* unicast promiscuous */
#define WX_PSR_VM_L2CTL_VACC BIT(6) /* accept nomatched vlan */
+#define WX_PSR_VM_L2CTL_VPE BIT(7) /* vlan promiscuous mode */
#define WX_PSR_VM_L2CTL_AUPE BIT(8) /* accept untagged packets */
#define WX_PSR_VM_L2CTL_ROMPE BIT(9) /* accept packets in MTA tbl */
#define WX_PSR_VM_L2CTL_ROPE BIT(10) /* accept packets in UC tbl */
@@ -226,10 +284,12 @@
#define WX_PSR_VLAN_SWC 0x16220
#define WX_PSR_VLAN_SWC_VM_L 0x16224
#define WX_PSR_VLAN_SWC_VM_H 0x16228
+#define WX_PSR_VLAN_SWC_VM(_i) (0x16224 + ((_i) * 4))
#define WX_PSR_VLAN_SWC_IDX 0x16230 /* 64 vlan entries */
/* VLAN pool filtering masks */
#define WX_PSR_VLAN_SWC_VIEN BIT(31) /* filter is valid */
#define WX_PSR_VLAN_SWC_ENTRIES 64
+#define WX_PSR_VLAN_SWC_VLANID_MASK GENMASK(11, 0)
/********************************* RSEC **************************************/
/* general rsec */
@@ -240,6 +300,13 @@
#define WX_RSC_ST 0x17004
#define WX_RSC_ST_RSEC_RDY BIT(0)
+/*********************** Transmit DMA registers **************************/
+/* transmit global control */
+#define WX_TDM_ETYPE_AS(_i) (0x18058 + ((_i) * 4))
+#define WX_TDM_VLAN_INS(_i) (0x18100 + ((_i) * 4))
+/* Per VF Port VLAN insertion rules */
+#define WX_TDM_VLAN_INS_VLANA_DEFAULT BIT(30) /* Always use default VLAN*/
+
/****************************** TDB ******************************************/
#define WX_TDB_PB_SZ(_i) (0x1CC00 + ((_i) * 4))
#define WX_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */
@@ -253,6 +320,32 @@
#define WX_TSC_ST_SECTX_RDY BIT(0)
#define WX_TSC_BUF_AE 0x1D00C
#define WX_TSC_BUF_AE_THR GENMASK(9, 0)
+/* 1588 */
+#define WX_TSC_1588_CTL 0x11F00
+#define WX_TSC_1588_CTL_ENABLED BIT(4)
+#define WX_TSC_1588_CTL_VALID BIT(0)
+#define WX_TSC_1588_STMPL 0x11F04
+#define WX_TSC_1588_STMPH 0x11F08
+#define WX_TSC_1588_SYSTIML 0x11F0C
+#define WX_TSC_1588_SYSTIMH 0x11F10
+#define WX_TSC_1588_INC 0x11F14
+#define WX_TSC_1588_INT_ST 0x11F20
+#define WX_TSC_1588_INT_ST_TT1 BIT(5)
+#define WX_TSC_1588_INT_EN 0x11F24
+#define WX_TSC_1588_INT_EN_TT1 BIT(5)
+#define WX_TSC_1588_AUX_CTL 0x11F28
+#define WX_TSC_1588_AUX_CTL_EN_TS0 BIT(8)
+#define WX_TSC_1588_AUX_CTL_EN_TT1 BIT(2)
+#define WX_TSC_1588_AUX_CTL_PLSG BIT(1)
+#define WX_TSC_1588_AUX_CTL_EN_TT0 BIT(0)
+#define WX_TSC_1588_TRGT_L(i) (0x11F2C + ((i) * 8)) /* [0,1] */
+#define WX_TSC_1588_TRGT_H(i) (0x11F30 + ((i) * 8)) /* [0,1] */
+#define WX_TSC_1588_SDP(i) (0x11F5C + ((i) * 4)) /* [0,3] */
+#define WX_TSC_1588_SDP_OUT_LEVEL_H FIELD_PREP(BIT(4), 0)
+#define WX_TSC_1588_SDP_OUT_LEVEL_L FIELD_PREP(BIT(4), 1)
+#define WX_TSC_1588_SDP_FUN_SEL_MASK GENMASK(2, 0)
+#define WX_TSC_1588_SDP_FUN_SEL_TT0 FIELD_PREP(WX_TSC_1588_SDP_FUN_SEL_MASK, 1)
+#define WX_TSC_1588_SDP_FUN_SEL_TS0 FIELD_PREP(WX_TSC_1588_SDP_FUN_SEL_MASK, 5)
/************************************** MNG ********************************/
#define WX_MNG_SWFW_SYNC 0x1E008
@@ -264,6 +357,10 @@
#define WX_MNG_MBOX_CTL_FWRDY BIT(2)
#define WX_MNG_BMC2OS_CNT 0x1E090
#define WX_MNG_OS2BMC_CNT 0x1E094
+#define WX_SW2FW_MBOX_CMD 0x1E0A0
+#define WX_SW2FW_MBOX_CMD_VLD BIT(31)
+#define WX_SW2FW_MBOX 0x1E200
+#define WX_FW2SW_MBOX 0x1E300
/************************************* ETH MAC *****************************/
#define WX_MAC_TX_CFG 0x11000
@@ -279,6 +376,9 @@
#define WX_MAC_WDG_TIMEOUT 0x1100C
#define WX_MAC_RX_FLOW_CTRL 0x11090
#define WX_MAC_RX_FLOW_CTRL_RFE BIT(0) /* receive fc enable */
+
+#define WX_MAC_WDG_TIMEOUT_WTO_MASK GENMASK(3, 0)
+#define WX_MAC_WDG_TIMEOUT_WTO_DELTA 2
/* MDIO Registers */
#define WX_MSCA 0x11200
#define WX_MSCA_RA(v) FIELD_PREP(U16_MAX, v)
@@ -326,7 +426,9 @@ enum WX_MSCA_CMD_value {
#define WX_7K_ITR 595
#define WX_12K_ITR 336
#define WX_20K_ITR 200
+#define WX_MIN_RSC_ITR 24
#define WX_SP_MAX_EITR 0x00000FF8U
+#define WX_AML_MAX_EITR 0x00000FFFU
#define WX_EM_MAX_EITR 0x00007FFCU
/* transmit DMA Registers */
@@ -335,12 +437,15 @@ enum WX_MSCA_CMD_value {
#define WX_PX_TR_WP(_i) (0x03008 + ((_i) * 0x40))
#define WX_PX_TR_RP(_i) (0x0300C + ((_i) * 0x40))
#define WX_PX_TR_CFG(_i) (0x03010 + ((_i) * 0x40))
+#define WX_PX_TR_HEAD_ADDRL(_i) (0x03028 + ((_i) * 0x40))
+#define WX_PX_TR_HEAD_ADDRH(_i) (0x0302C + ((_i) * 0x40))
/* Transmit Config masks */
#define WX_PX_TR_CFG_ENABLE BIT(0) /* Ena specific Tx Queue */
#define WX_PX_TR_CFG_TR_SIZE_SHIFT 1 /* tx desc number per ring */
#define WX_PX_TR_CFG_SWFLSH BIT(26) /* Tx Desc. wr-bk flushing */
#define WX_PX_TR_CFG_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
#define WX_PX_TR_CFG_THRE_SHIFT 8
+#define WX_PX_TR_CFG_HEAD_WB BIT(27)
/* Receive DMA Registers */
#define WX_PX_RR_BAL(_i) (0x01000 + ((_i) * 0x40))
@@ -352,7 +457,10 @@ enum WX_MSCA_CMD_value {
/* PX_RR_CFG bit definitions */
#define WX_PX_RR_CFG_VLAN BIT(31)
#define WX_PX_RR_CFG_DROP_EN BIT(30)
+#define WX_PX_RR_CFG_RSC BIT(29)
#define WX_PX_RR_CFG_SPLIT_MODE BIT(26)
+#define WX_PX_RR_CFG_MAX_RSCBUF_16 FIELD_PREP(GENMASK(24, 23), 3)
+#define WX_PX_RR_CFG_DESC_MERGE BIT(19)
#define WX_PX_RR_CFG_RR_THER_SHIFT 16
#define WX_PX_RR_CFG_RR_HDR_SZ GENMASK(15, 12)
#define WX_PX_RR_CFG_RR_BUF_SZ GENMASK(11, 8)
@@ -367,9 +475,19 @@ enum WX_MSCA_CMD_value {
/* Number of 80 microseconds we wait for PCI Express master disable */
#define WX_PCI_MASTER_DISABLE_TIMEOUT 80000
+#define WX_RSS_64Q_MASK 0x3F
+#define WX_RSS_8Q_MASK 0x7
+#define WX_RSS_4Q_MASK 0x3
+#define WX_RSS_2Q_MASK 0x1
+#define WX_RSS_DISABLED_MASK 0x0
+
+#define WX_VMDQ_4Q_MASK 0x7C
+#define WX_VMDQ_2Q_MASK 0x7E
+
/****************** Manageablility Host Interface defines ********************/
#define WX_HI_MAX_BLOCK_BYTE_LENGTH 256 /* Num of bytes in range */
#define WX_HI_COMMAND_TIMEOUT 1000 /* Process HI command limit */
+#define WX_HIC_HDR_INDEX_MAX 255
#define FW_READ_SHADOW_RAM_CMD 0x31
#define FW_READ_SHADOW_RAM_LEN 0x6
@@ -382,6 +500,8 @@ enum WX_MSCA_CMD_value {
#define FW_CEM_CMD_RESERVED 0X0
#define FW_CEM_MAX_RETRIES 3
#define FW_CEM_RESP_STATUS_SUCCESS 0x1
+#define FW_PPS_SET_CMD 0xF6
+#define FW_PPS_SET_LEN 0x14
#define WX_SW_REGION_PTR 0x1C
@@ -431,19 +551,14 @@ enum WX_MSCA_CMD_value {
#define WX_REQ_TX_DESCRIPTOR_MULTIPLE 128
#define WX_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */
-#define VMDQ_P(p) p
+#define VMDQ_P(p) ((p) + wx->ring_feature[RING_F_VMDQ].offset)
/* Supported Rx Buffer Sizes */
#define WX_RXBUFFER_256 256 /* Used for skb receive header */
#define WX_RXBUFFER_2K 2048
+#define WX_RXBUFFER_3K 3072
#define WX_MAX_RXBUFFER 16384 /* largest size for single descriptor */
-#if MAX_SKB_FRAGS < 8
-#define WX_RX_BUFSZ ALIGN(WX_MAX_RXBUFFER / MAX_SKB_FRAGS, 1024)
-#else
-#define WX_RX_BUFSZ WX_RXBUFFER_2K
-#endif
-
#define WX_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define WX_MAX_DATA_PER_TXD BIT(14)
@@ -451,8 +566,6 @@ enum WX_MSCA_CMD_value {
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), WX_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
-#define WX_CFG_PORT_ST 0x14404
-
/******************* Receive Descriptor bit definitions **********************/
#define WX_RXD_STAT_DD BIT(0) /* Done */
#define WX_RXD_STAT_EOP BIT(1) /* End of Packet */
@@ -460,6 +573,8 @@ enum WX_MSCA_CMD_value {
#define WX_RXD_STAT_L4CS BIT(7) /* L4 xsum calculated */
#define WX_RXD_STAT_IPCS BIT(8) /* IP xsum calculated */
#define WX_RXD_STAT_OUTERIPCS BIT(10) /* Cloud IP xsum calculated*/
+#define WX_RXD_STAT_IPV6EX BIT(12) /* IPv6 Dest Header */
+#define WX_RXD_STAT_TS BIT(14) /* IEEE1588 Time Stamp */
#define WX_RXD_ERR_OUTERIPER BIT(26) /* CRC IP Header error */
#define WX_RXD_ERR_RXE BIT(29) /* Any MAC Error */
@@ -535,8 +650,12 @@ enum wx_l2_ptypes {
#define WX_RXD_PKTTYPE(_rxd) \
((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 9) & 0xFF)
-#define WX_RXD_IPV6EX(_rxd) \
- ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 6) & 0x1)
+
+#define WX_RXD_RSCCNT_MASK GENMASK(20, 17)
+#define WX_RXD_RSCCNT_SHIFT 17
+#define WX_RXD_NEXTP_MASK GENMASK(19, 4)
+#define WX_RXD_NEXTP_SHIFT 4
+
/*********************** Transmit Descriptor Config Masks ****************/
#define WX_TXD_STAT_DD BIT(0) /* Descriptor Done */
#define WX_TXD_DTYP_DATA 0 /* Adv Data Descriptor */
@@ -663,21 +782,30 @@ struct wx_hic_hdr {
u8 cmd_resv;
u8 ret_status;
} cmd_or_resp;
- u8 checksum;
+ union {
+ u8 checksum;
+ u8 index;
+ };
};
struct wx_hic_hdr2_req {
u8 cmd;
u8 buf_lenh;
u8 buf_lenl;
- u8 checksum;
+ union {
+ u8 checksum;
+ u8 index;
+ };
};
struct wx_hic_hdr2_rsp {
u8 cmd;
u8 buf_lenl;
u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */
- u8 checksum;
+ union {
+ u8 checksum;
+ u8 index;
+ };
};
union wx_hic_hdr2 {
@@ -701,12 +829,30 @@ struct wx_hic_reset {
u16 reset_type;
};
+struct wx_hic_set_pps {
+ struct wx_hic_hdr hdr;
+ u8 lan_id;
+ u8 enable;
+ u16 pad2;
+ u64 nsec;
+ u64 cycles;
+};
+
/* Bus parameters */
struct wx_bus_info {
u8 func;
u16 device;
};
+struct wx_mbx_info {
+ u16 size;
+ u32 mailbox;
+ u32 udelay;
+ u32 timeout;
+ /* lock mbx access */
+ spinlock_t mbx_lock;
+};
+
struct wx_thermal_sensor_data {
s16 temp;
s16 alarm_thresh;
@@ -716,14 +862,16 @@ struct wx_thermal_sensor_data {
enum wx_mac_type {
wx_mac_unknown = 0,
wx_mac_sp,
- wx_mac_em
+ wx_mac_em,
+ wx_mac_aml,
+ wx_mac_aml40,
};
-enum sp_media_type {
- sp_media_unknown = 0,
- sp_media_fiber,
- sp_media_copper,
- sp_media_backplane
+enum wx_media_type {
+ wx_media_unknown = 0,
+ wx_media_fiber,
+ wx_media_copper,
+ wx_media_backplane
};
enum em_mac_type {
@@ -787,7 +935,6 @@ enum wx_reset_type {
struct wx_cb {
dma_addr_t dma;
u16 append_cnt; /* number of skb's appended */
- bool page_released;
bool dma_released;
};
@@ -863,6 +1010,7 @@ struct wx_tx_context_desc {
*/
struct wx_tx_buffer {
union wx_tx_desc *next_to_watch;
+ unsigned long time_stamp;
struct sk_buff *skb;
unsigned int bytecount;
unsigned short gso_segs;
@@ -870,12 +1018,12 @@ struct wx_tx_buffer {
DEFINE_DMA_UNMAP_LEN(len);
__be16 protocol;
u32 tx_flags;
+ u32 next_eop;
};
struct wx_rx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
- dma_addr_t page_dma;
struct page *page;
unsigned int page_offset;
};
@@ -895,6 +1043,8 @@ struct wx_rx_queue_stats {
u64 csum_good_cnt;
u64 csum_err;
u64 alloc_rx_buff_failed;
+ u64 rsc_count;
+ u64 rsc_flush;
};
/* iterator for handling rings in ring container */
@@ -907,6 +1057,7 @@ struct wx_ring_container {
unsigned int total_packets; /* total packets processed this int */
u8 count; /* total number of rings in vector */
u8 itr; /* current ITR setting for ring */
+ struct dim dim; /* data for net_dim algorithm */
};
struct wx_ring {
struct wx_ring *next; /* pointer to next ring in q_vector */
@@ -921,9 +1072,12 @@ struct wx_ring {
};
u8 __iomem *tail;
dma_addr_t dma; /* phys. address of descriptor ring */
+ dma_addr_t headwb_dma;
+ u32 *headwb_mem;
unsigned int size; /* length in bytes */
u16 count; /* amount of descriptors */
+ unsigned long last_rx_timestamp;
u8 queue_index; /* needed for multiqueue queue management */
u8 reg_idx; /* holds the special value that gets
@@ -933,6 +1087,7 @@ struct wx_ring {
*/
u16 next_to_use;
u16 next_to_clean;
+ u16 rx_buf_len;
union {
u16 next_to_alloc;
struct {
@@ -962,6 +1117,8 @@ struct wx_q_vector {
struct napi_struct napi;
struct rcu_head rcu; /* to avoid race with update stats on free */
+ u16 total_events; /* number of interrupts processed */
+
char name[IFNAMSIZ + 17];
/* for dynamic allocation of rings associated with this q_vector */
@@ -977,6 +1134,7 @@ struct wx_ring_feature {
enum wx_ring_f_enum {
RING_F_NONE = 0,
+ RING_F_VMDQ,
RING_F_RSS,
RING_F_FDIR,
RING_F_ARRAY_SIZE /* must be last in enum set */
@@ -1026,13 +1184,76 @@ struct wx_hw_stats {
enum wx_state {
WX_STATE_RESETTING,
- WX_STATE_NBITS, /* must be last */
+ WX_STATE_SWFW_BUSY,
+ WX_STATE_PTP_RUNNING,
+ WX_STATE_PTP_TX_IN_PROGRESS,
+ WX_STATE_SERVICE_SCHED,
+ WX_STATE_NBITS /* must be last */
+};
+
+struct vf_data_storage {
+ struct pci_dev *vfdev;
+ unsigned char vf_mac_addr[ETH_ALEN];
+ bool spoofchk_enabled;
+ bool link_enable;
+ bool trusted;
+ int xcast_mode;
+ unsigned int vf_api;
+ bool clear_to_send;
+ u16 pf_vlan; /* When set, guest VLAN config not allowed. */
+ u16 pf_qos;
+ bool pf_set_mac;
+
+ u16 vf_mc_hashes[WX_MAX_VF_MC_ENTRIES];
+ u16 num_vf_mc_hashes;
+ u16 vlan_count;
+ int link_state;
+};
+
+struct vf_macvlans {
+ struct list_head mvlist;
+ int vf;
+ bool free;
+ bool is_macvlan;
+ u8 vf_macvlan[ETH_ALEN];
+};
+
+#define WX_RSS_FIELD_IPV4_TCP BIT(0)
+#define WX_RSS_FIELD_IPV4 BIT(1)
+#define WX_RSS_FIELD_IPV4_SCTP BIT(2)
+#define WX_RSS_FIELD_IPV6_SCTP BIT(3)
+#define WX_RSS_FIELD_IPV6_TCP BIT(4)
+#define WX_RSS_FIELD_IPV6 BIT(5)
+#define WX_RSS_FIELD_IPV4_UDP BIT(6)
+#define WX_RSS_FIELD_IPV6_UDP BIT(7)
+
+struct wx_rss_flow_map {
+ u8 flow_type;
+ u32 data;
+ u8 flag;
};
enum wx_pf_flags {
+ WX_FLAG_MULTI_64_FUNC,
+ WX_FLAG_SWFW_RING,
+ WX_FLAG_VMDQ_ENABLED,
+ WX_FLAG_VLAN_PROMISC,
+ WX_FLAG_SRIOV_ENABLED,
+ WX_FLAG_IRQ_VECTOR_SHARED,
WX_FLAG_FDIR_CAPABLE,
WX_FLAG_FDIR_HASH,
WX_FLAG_FDIR_PERFECT,
+ WX_FLAG_RSC_CAPABLE,
+ WX_FLAG_RSC_ENABLED,
+ WX_FLAG_RX_HWTSTAMP_ENABLED,
+ WX_FLAG_RX_HWTSTAMP_IN_REGISTER,
+ WX_FLAG_PTP_PPS_ENABLED,
+ WX_FLAG_NEED_LINK_CONFIG,
+ WX_FLAG_NEED_MODULE_RESET,
+ WX_FLAG_NEED_UPDATE_LINK,
+ WX_FLAG_NEED_DO_RESET,
+ WX_FLAG_RX_MERGE_ENABLED,
+ WX_FLAG_TXHEAD_WB_ENABLED,
WX_PF_FLAGS_NBITS /* must be last */
};
@@ -1043,12 +1264,14 @@ struct wx {
void *priv;
u8 __iomem *hw_addr;
+ u8 __iomem *b4_addr; /* vf only */
struct pci_dev *pdev;
struct net_device *netdev;
struct wx_bus_info bus;
+ struct wx_mbx_info mbx;
struct wx_mac_info mac;
enum em_mac_type mac_type;
- enum sp_media_type media_type;
+ enum wx_media_type media_type;
struct wx_eeprom_info eeprom;
struct wx_addr_filter_info addr_ctrl;
struct wx_fc_info fc;
@@ -1066,8 +1289,10 @@ struct wx {
char eeprom_id[32];
char *driver_name;
enum wx_reset_type reset_type;
+ u8 swfw_index;
/* PHY stuff */
+ bool notify_down;
unsigned int link;
int speed;
int duplex;
@@ -1089,6 +1314,7 @@ struct wx {
int num_rx_queues;
u16 rx_itr_setting;
u16 rx_work_limit;
+ bool adaptive_itr;
int num_q_vectors; /* current number of q_vectors for device */
int max_q_vectors; /* upper limit of q_vectors for device */
@@ -1099,6 +1325,8 @@ struct wx {
struct wx_ring *tx_ring[64] ____cacheline_aligned_in_smp;
struct wx_ring *rx_ring[64];
struct wx_q_vector *q_vector[64];
+ int num_rx_pools;
+ int num_rx_queues_per_pool;
unsigned int queues_per_pool;
struct msix_entry *msix_q_entries;
@@ -1110,16 +1338,20 @@ struct wx {
u32 *isb_mem;
u32 isb_tag[WX_ISB_MAX];
bool misc_irq_domain;
+ u32 eims_other;
+ u32 eims_enable_mask;
#define WX_MAX_RETA_ENTRIES 128
#define WX_RSS_INDIR_TBL_MAX 64
u8 rss_indir_tbl[WX_MAX_RETA_ENTRIES];
+ u8 rss_flags;
bool rss_enabled;
#define WX_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
u32 *rss_key;
u32 wol;
u16 bd_number;
+ bool default_up;
struct wx_hw_stats stats;
u64 tx_busy;
@@ -1128,15 +1360,50 @@ struct wx {
u64 hw_csum_rx_good;
u64 hw_csum_rx_error;
u64 alloc_rx_buff_failed;
+ u64 rsc_count;
+ u64 rsc_flush;
+ unsigned int num_vfs;
+ struct vf_data_storage *vfinfo;
+ struct vf_macvlans vf_mvs;
+ struct vf_macvlans *mv_list;
+ unsigned long fwd_bitmask;
u32 atr_sample_rate;
void (*atr)(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype);
void (*configure_fdir)(struct wx *wx);
+ int (*setup_tc)(struct net_device *netdev, u8 tc);
void (*do_reset)(struct net_device *netdev);
+ int (*ptp_setup_sdp)(struct wx *wx);
+ void (*set_num_queues)(struct wx *wx);
+
+ bool pps_enabled;
+ u64 pps_width;
+ u64 pps_edge_start;
+ u64 pps_edge_end;
+ u64 sec_to_cc;
+ u32 base_incval;
+ u32 tx_hwtstamp_pkts;
+ u32 tx_hwtstamp_timeouts;
+ u32 tx_hwtstamp_skipped;
+ u32 tx_hwtstamp_errors;
+ u32 rx_hwtstamp_cleared;
+ unsigned long last_overflow_check;
+ unsigned long last_rx_ptp_check;
+ unsigned long ptp_tx_start;
+ seqlock_t hw_tc_lock; /* seqlock for ptp */
+ struct cyclecounter hw_cc;
+ struct timecounter hw_tc;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_caps;
+ struct kernel_hwtstamp_config tstamp_config;
+ struct sk_buff *ptp_tx_skb;
+
+ struct timer_list service_timer;
+ struct work_struct service_task;
};
#define WX_INTR_ALL (~0ULL)
-#define WX_INTR_Q(i) BIT((i) + 1)
+#define WX_INTR_Q(i) BIT((i))
/* register operations */
#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
@@ -1177,6 +1444,24 @@ rd64(struct wx *wx, u32 reg)
return (lsb | msb << 32);
}
+static inline u32
+rd32ptp(struct wx *wx, u32 reg)
+{
+ if (wx->mac.type == wx_mac_em)
+ return rd32(wx, reg);
+
+ return rd32(wx, reg + 0xB500);
+}
+
+static inline void
+wr32ptp(struct wx *wx, u32 reg, u32 value)
+{
+ if (wx->mac.type == wx_mac_em)
+ return wr32(wx, reg, value);
+
+ return wr32(wx, reg + 0xB500, value);
+}
+
/* On some domestic CPU platforms, sometimes IO is not synchronized with
* flushing memory, here use readl() to flush PCI read and write.
*/
@@ -1208,4 +1493,15 @@ static inline int wx_set_state_reset(struct wx *wx)
return 0;
}
+static inline unsigned int wx_rx_pg_order(struct wx_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring->rx_buf_len == WX_RXBUFFER_3K)
+ return 1;
+#endif
+ return 0;
+}
+
+#define wx_rx_pg_size(_ring) (PAGE_SIZE << wx_rx_pg_order(_ring))
+
#endif /* _WX_TYPE_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf.c b/drivers/net/ethernet/wangxun/libwx/wx_vf.c
new file mode 100644
index 000000000000..7567216a005f
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_vf.c
@@ -0,0 +1,599 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+
+#include "wx_type.h"
+#include "wx_hw.h"
+#include "wx_mbx.h"
+#include "wx_vf.h"
+
+static void wx_virt_clr_reg(struct wx *wx)
+{
+ u32 vfsrrctl, i;
+
+ /* VRSRRCTL default values (BSIZEPACKET = 2048, BSIZEHEADER = 256) */
+ vfsrrctl = WX_VXRXDCTL_HDRSZ(wx_hdr_sz(WX_RX_HDR_SIZE));
+ vfsrrctl |= WX_VXRXDCTL_BUFSZ(wx_buf_sz(WX_RX_BUF_SIZE));
+
+ /* clear all rxd ctl */
+ for (i = 0; i < WX_VF_MAX_RING_NUMS; i++)
+ wr32m(wx, WX_VXRXDCTL(i),
+ WX_VXRXDCTL_HDRSZ_MASK | WX_VXRXDCTL_BUFSZ_MASK,
+ vfsrrctl);
+
+ rd32(wx, WX_VXSTATUS);
+}
+
+/**
+ * wx_init_hw_vf - virtual function hardware initialization
+ * @wx: pointer to hardware structure
+ *
+ * Initialize the mac address
+ **/
+void wx_init_hw_vf(struct wx *wx)
+{
+ wx_get_mac_addr_vf(wx, wx->mac.addr);
+}
+EXPORT_SYMBOL(wx_init_hw_vf);
+
+static int wx_mbx_write_and_read_reply(struct wx *wx, u32 *req_buf,
+ u32 *resp_buf, u16 size)
+{
+ int ret;
+
+ ret = wx_write_posted_mbx(wx, req_buf, size);
+ if (ret)
+ return ret;
+
+ return wx_read_posted_mbx(wx, resp_buf, size);
+}
+
+/**
+ * wx_reset_hw_vf - Performs hardware reset
+ * @wx: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks and
+ * clears all interrupts.
+ *
+ * Return: returns 0 on success, negative error code on failure
+ **/
+int wx_reset_hw_vf(struct wx *wx)
+{
+ struct wx_mbx_info *mbx = &wx->mbx;
+ u32 msgbuf[4] = {WX_VF_RESET};
+ u8 *addr = (u8 *)(&msgbuf[1]);
+ u32 b4_buf[16] = {0};
+ u32 timeout = 200;
+ int ret;
+ u32 i;
+
+ /* Call wx stop to disable tx/rx and clear interrupts */
+ wx_stop_adapter_vf(wx);
+
+ /* reset the api version */
+ wx->vfinfo->vf_api = wx_mbox_api_null;
+
+ /* backup msix vectors */
+ if (wx->b4_addr) {
+ for (i = 0; i < 16; i++)
+ b4_buf[i] = readl(wx->b4_addr + i * 4);
+ }
+
+ wr32m(wx, WX_VXCTRL, WX_VXCTRL_RST, WX_VXCTRL_RST);
+ rd32(wx, WX_VXSTATUS);
+
+ /* we cannot reset while the RSTI / RSTD bits are asserted */
+ while (!wx_check_for_rst_vf(wx) && timeout) {
+ timeout--;
+ udelay(5);
+ }
+
+ /* restore msix vectors */
+ if (wx->b4_addr) {
+ for (i = 0; i < 16; i++)
+ writel(b4_buf[i], wx->b4_addr + i * 4);
+ }
+
+ /* amlite: bme */
+ if (wx->mac.type == wx_mac_aml || wx->mac.type == wx_mac_aml40)
+ wr32(wx, WX_VX_PF_BME, WX_VF_BME_ENABLE);
+
+ if (!timeout)
+ return -EBUSY;
+
+ /* Reset VF registers to initial values */
+ wx_virt_clr_reg(wx);
+
+ /* mailbox timeout can now become active */
+ mbx->timeout = 2000;
+
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+
+ if (msgbuf[0] != (WX_VF_RESET | WX_VT_MSGTYPE_ACK) &&
+ msgbuf[0] != (WX_VF_RESET | WX_VT_MSGTYPE_NACK))
+ return -EINVAL;
+
+ if (msgbuf[0] == (WX_VF_RESET | WX_VT_MSGTYPE_ACK))
+ ether_addr_copy(wx->mac.perm_addr, addr);
+
+ wx->mac.mc_filter_type = msgbuf[3];
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_reset_hw_vf);
+
+/**
+ * wx_stop_adapter_vf - Generic stop Tx/Rx units
+ * @wx: pointer to hardware structure
+ *
+ * Clears interrupts, disables transmit and receive units.
+ **/
+void wx_stop_adapter_vf(struct wx *wx)
+{
+ u32 reg_val;
+ u16 i;
+
+ /* Clear interrupt mask to stop from interrupts being generated */
+ wr32(wx, WX_VXIMS, WX_VF_IRQ_CLEAR_MASK);
+
+ /* Clear any pending interrupts, flush previous writes */
+ wr32(wx, WX_VXICR, U32_MAX);
+
+ /* Disable the transmit unit. Each queue must be disabled. */
+ for (i = 0; i < wx->mac.max_tx_queues; i++)
+ wr32(wx, WX_VXTXDCTL(i), WX_VXTXDCTL_FLUSH);
+
+ /* Disable the receive unit by stopping each queue */
+ for (i = 0; i < wx->mac.max_rx_queues; i++) {
+ reg_val = rd32(wx, WX_VXRXDCTL(i));
+ reg_val &= ~WX_VXRXDCTL_ENABLE;
+ wr32(wx, WX_VXRXDCTL(i), reg_val);
+ }
+ /* Clear packet split and pool config */
+ wr32(wx, WX_VXMRQC, 0);
+
+ /* flush all queues disables */
+ rd32(wx, WX_VXSTATUS);
+}
+EXPORT_SYMBOL(wx_stop_adapter_vf);
+
+/**
+ * wx_set_rar_vf - set device MAC address
+ * @wx: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @enable_addr: set flag that address is active
+ *
+ * Return: returns 0 on success, negative error code on failure
+ **/
+int wx_set_rar_vf(struct wx *wx, u32 index, u8 *addr, u32 enable_addr)
+{
+ u32 msgbuf[3] = {WX_VF_SET_MAC_ADDR};
+ u8 *msg_addr = (u8 *)(&msgbuf[1]);
+ int ret;
+
+ memcpy(msg_addr, addr, ETH_ALEN);
+
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+ msgbuf[0] &= ~WX_VT_MSGTYPE_CTS;
+
+ /* if nacked the address was rejected, use "perm_addr" */
+ if (msgbuf[0] == (WX_VF_SET_MAC_ADDR | WX_VT_MSGTYPE_NACK)) {
+ wx_get_mac_addr_vf(wx, wx->mac.addr);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_set_rar_vf);
+
+/**
+ * wx_update_mc_addr_list_vf - Update Multicast addresses
+ * @wx: pointer to the HW structure
+ * @netdev: pointer to the net device structure
+ *
+ * Updates the Multicast Table Array.
+ *
+ * Return: returns 0 on success, negative error code on failure
+ **/
+int wx_update_mc_addr_list_vf(struct wx *wx, struct net_device *netdev)
+{
+ u32 msgbuf[WX_VXMAILBOX_SIZE] = {WX_VF_SET_MULTICAST};
+ u16 *vector_l = (u16 *)&msgbuf[1];
+ struct netdev_hw_addr *ha;
+ u32 cnt, i;
+
+ cnt = netdev_mc_count(netdev);
+ if (cnt > 28)
+ cnt = 28;
+ msgbuf[0] |= cnt << WX_VT_MSGINFO_SHIFT;
+
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev) {
+ if (i == cnt)
+ break;
+ if (is_link_local_ether_addr(ha->addr))
+ continue;
+
+ vector_l[i++] = wx_mta_vector(wx, ha->addr);
+ }
+
+ return wx_write_posted_mbx(wx, msgbuf, ARRAY_SIZE(msgbuf));
+}
+EXPORT_SYMBOL(wx_update_mc_addr_list_vf);
+
+/**
+ * wx_update_xcast_mode_vf - Update Multicast mode
+ * @wx: pointer to the HW structure
+ * @xcast_mode: new multicast mode
+ *
+ * Updates the Multicast Mode of VF.
+ *
+ * Return: returns 0 on success, negative error code on failure
+ **/
+int wx_update_xcast_mode_vf(struct wx *wx, int xcast_mode)
+{
+ u32 msgbuf[2] = {WX_VF_UPDATE_XCAST_MODE, xcast_mode};
+ int ret = 0;
+
+ if (wx->vfinfo->vf_api < wx_mbox_api_13)
+ return -EINVAL;
+
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+
+ msgbuf[0] &= ~WX_VT_MSGTYPE_CTS;
+ if (msgbuf[0] == (WX_VF_UPDATE_XCAST_MODE | WX_VT_MSGTYPE_NACK))
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_update_xcast_mode_vf);
+
+/**
+ * wx_get_link_state_vf - Get VF link state from PF
+ * @wx: pointer to the HW structure
+ * @link_state: link state storage
+ *
+ * Return: return state of the operation error or success.
+ **/
+int wx_get_link_state_vf(struct wx *wx, u16 *link_state)
+{
+ u32 msgbuf[2] = {WX_VF_GET_LINK_STATE};
+ int ret;
+
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+
+ if (msgbuf[0] & WX_VT_MSGTYPE_NACK)
+ return -EINVAL;
+
+ *link_state = msgbuf[1];
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_get_link_state_vf);
+
+/**
+ * wx_set_vfta_vf - Set/Unset vlan filter table address
+ * @wx: pointer to the HW structure
+ * @vlan: 12 bit VLAN ID
+ * @vind: unused by VF drivers
+ * @vlan_on: if true then set bit, else clear bit
+ * @vlvf_bypass: boolean flag indicating updating default pool is okay
+ *
+ * Turn on/off specified VLAN in the VLAN filter table.
+ *
+ * Return: returns 0 on success, negative error code on failure
+ **/
+int wx_set_vfta_vf(struct wx *wx, u32 vlan, u32 vind, bool vlan_on,
+ bool vlvf_bypass)
+{
+ u32 msgbuf[2] = {WX_VF_SET_VLAN, vlan};
+ bool vlan_offload = false;
+ int ret;
+
+ /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
+ msgbuf[0] |= vlan_on << WX_VT_MSGINFO_SHIFT;
+ /* if vf vlan offload is disabled, allow to create vlan under pf port vlan */
+ msgbuf[0] |= BIT(vlan_offload);
+
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+
+ if (msgbuf[0] & WX_VT_MSGTYPE_ACK)
+ return 0;
+
+ return msgbuf[0] & WX_VT_MSGTYPE_NACK;
+}
+EXPORT_SYMBOL(wx_set_vfta_vf);
+
+void wx_get_mac_addr_vf(struct wx *wx, u8 *mac_addr)
+{
+ ether_addr_copy(mac_addr, wx->mac.perm_addr);
+}
+EXPORT_SYMBOL(wx_get_mac_addr_vf);
+
+int wx_get_fw_version_vf(struct wx *wx)
+{
+ u32 msgbuf[2] = {WX_VF_GET_FW_VERSION};
+ int ret;
+
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+
+ if (msgbuf[0] & WX_VT_MSGTYPE_NACK)
+ return -EINVAL;
+ snprintf(wx->eeprom_id, 32, "0x%08x", msgbuf[1]);
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_get_fw_version_vf);
+
+int wx_set_uc_addr_vf(struct wx *wx, u32 index, u8 *addr)
+{
+ u32 msgbuf[3] = {WX_VF_SET_MACVLAN};
+ u8 *msg_addr = (u8 *)(&msgbuf[1]);
+ int ret;
+
+ /* If index is one then this is the start of a new list and needs
+ * indication to the PF so it can do it's own list management.
+ * If it is zero then that tells the PF to just clear all of
+ * this VF's macvlans and there is no new list.
+ */
+ msgbuf[0] |= index << WX_VT_MSGINFO_SHIFT;
+ if (addr)
+ memcpy(msg_addr, addr, 6);
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+
+ msgbuf[0] &= ~WX_VT_MSGTYPE_CTS;
+
+ if (msgbuf[0] == (WX_VF_SET_MACVLAN | WX_VT_MSGTYPE_NACK))
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_set_uc_addr_vf);
+
+/**
+ * wx_rlpml_set_vf - Set the maximum receive packet length
+ * @wx: pointer to the HW structure
+ * @max_size: value to assign to max frame size
+ *
+ * Return: returns 0 on success, negative error code on failure
+ **/
+int wx_rlpml_set_vf(struct wx *wx, u16 max_size)
+{
+ u32 msgbuf[2] = {WX_VF_SET_LPE, max_size};
+ int ret;
+
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+ if ((msgbuf[0] & WX_VF_SET_LPE) &&
+ (msgbuf[0] & WX_VT_MSGTYPE_NACK))
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_rlpml_set_vf);
+
+/**
+ * wx_negotiate_api_version - Negotiate supported API version
+ * @wx: pointer to the HW structure
+ * @api: integer containing requested API version
+ *
+ * Return: returns 0 on success, negative error code on failure
+ **/
+int wx_negotiate_api_version(struct wx *wx, int api)
+{
+ u32 msgbuf[2] = {WX_VF_API_NEGOTIATE, api};
+ int ret;
+
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+
+ msgbuf[0] &= ~WX_VT_MSGTYPE_CTS;
+
+ /* Store value and return 0 on success */
+ if (msgbuf[0] == (WX_VF_API_NEGOTIATE | WX_VT_MSGTYPE_NACK))
+ return -EINVAL;
+ wx->vfinfo->vf_api = api;
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_negotiate_api_version);
+
+int wx_get_queues_vf(struct wx *wx, u32 *num_tcs, u32 *default_tc)
+{
+ u32 msgbuf[5] = {WX_VF_GET_QUEUES};
+ int ret;
+
+ /* do nothing if API doesn't support wx_get_queues */
+ if (wx->vfinfo->vf_api < wx_mbox_api_13)
+ return -EINVAL;
+
+ /* Fetch queue configuration from the PF */
+ ret = wx_mbx_write_and_read_reply(wx, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (ret)
+ return ret;
+ msgbuf[0] &= ~WX_VT_MSGTYPE_CTS;
+
+ /* if we didn't get an ACK there must have been
+ * some sort of mailbox error so we should treat it
+ * as such
+ */
+ if (msgbuf[0] != (WX_VF_GET_QUEUES | WX_VT_MSGTYPE_ACK))
+ return -EINVAL;
+ /* record and validate values from message */
+ wx->mac.max_tx_queues = msgbuf[WX_VF_TX_QUEUES];
+ if (wx->mac.max_tx_queues == 0 ||
+ wx->mac.max_tx_queues > WX_VF_MAX_TX_QUEUES)
+ wx->mac.max_tx_queues = WX_VF_MAX_TX_QUEUES;
+
+ wx->mac.max_rx_queues = msgbuf[WX_VF_RX_QUEUES];
+ if (wx->mac.max_rx_queues == 0 ||
+ wx->mac.max_rx_queues > WX_VF_MAX_RX_QUEUES)
+ wx->mac.max_rx_queues = WX_VF_MAX_RX_QUEUES;
+
+ *num_tcs = msgbuf[WX_VF_TRANS_VLAN];
+ /* in case of unknown state assume we cannot tag frames */
+ if (*num_tcs > wx->mac.max_rx_queues)
+ *num_tcs = 1;
+ *default_tc = msgbuf[WX_VF_DEF_QUEUE];
+ /* default to queue 0 on out-of-bounds queue number */
+ if (*default_tc >= wx->mac.max_tx_queues)
+ *default_tc = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_get_queues_vf);
+
+static int wx_get_link_status_from_pf(struct wx *wx, u32 *msgbuf)
+{
+ u32 links_reg = msgbuf[1];
+
+ if (msgbuf[1] & WX_PF_NOFITY_VF_NET_NOT_RUNNING)
+ wx->notify_down = true;
+ else
+ wx->notify_down = false;
+
+ if (wx->notify_down) {
+ wx->link = false;
+ wx->speed = SPEED_UNKNOWN;
+ return 0;
+ }
+
+ wx->link = WX_PFLINK_STATUS(links_reg);
+ wx->speed = WX_PFLINK_SPEED(links_reg);
+
+ return 0;
+}
+
+static int wx_pf_ping_vf(struct wx *wx, u32 *msgbuf)
+{
+ if (!(msgbuf[0] & WX_VT_MSGTYPE_CTS))
+ /* msg is not CTS, we need to do reset */
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct wx_link_reg_fields wx_speed_lookup_vf[] = {
+ {wx_mac_unknown},
+ {wx_mac_sp, SPEED_10000, SPEED_1000, SPEED_100, SPEED_UNKNOWN, SPEED_UNKNOWN},
+ {wx_mac_em, SPEED_1000, SPEED_100, SPEED_10, SPEED_UNKNOWN, SPEED_UNKNOWN},
+ {wx_mac_aml, SPEED_40000, SPEED_25000, SPEED_10000, SPEED_1000, SPEED_UNKNOWN},
+ {wx_mac_aml40, SPEED_40000, SPEED_25000, SPEED_10000, SPEED_1000, SPEED_UNKNOWN},
+};
+
+static void wx_check_physical_link(struct wx *wx)
+{
+ u32 val, link_val;
+ int ret;
+
+ /* get link status from hw status reg
+ * for SFP+ modules and DA cables, it can take up to 500usecs
+ * before the link status is correct
+ */
+ if (wx->mac.type == wx_mac_em)
+ ret = read_poll_timeout_atomic(rd32, val, val & GENMASK(4, 1),
+ 100, 500, false, wx, WX_VXSTATUS);
+ else
+ ret = read_poll_timeout_atomic(rd32, val, val & BIT(0), 100,
+ 500, false, wx, WX_VXSTATUS);
+ if (ret) {
+ wx->speed = SPEED_UNKNOWN;
+ wx->link = false;
+ return;
+ }
+
+ wx->link = true;
+ link_val = WX_VXSTATUS_SPEED(val);
+
+ if (link_val & BIT(0))
+ wx->speed = wx_speed_lookup_vf[wx->mac.type].bit0_f;
+ else if (link_val & BIT(1))
+ wx->speed = wx_speed_lookup_vf[wx->mac.type].bit1_f;
+ else if (link_val & BIT(2))
+ wx->speed = wx_speed_lookup_vf[wx->mac.type].bit2_f;
+ else if (link_val & BIT(3))
+ wx->speed = wx_speed_lookup_vf[wx->mac.type].bit3_f;
+ else
+ wx->speed = SPEED_UNKNOWN;
+}
+
+int wx_check_mac_link_vf(struct wx *wx)
+{
+ struct wx_mbx_info *mbx = &wx->mbx;
+ u32 msgbuf[2] = {0};
+ int ret = 0;
+
+ if (!mbx->timeout)
+ goto out;
+
+ wx_check_for_rst_vf(wx);
+ if (!wx_check_for_msg_vf(wx))
+ ret = wx_read_mbx_vf(wx, msgbuf, 2);
+ if (ret)
+ goto out;
+
+ switch (msgbuf[0] & GENMASK(8, 0)) {
+ case WX_PF_NOFITY_VF_LINK_STATUS | WX_PF_CONTROL_MSG:
+ ret = wx_get_link_status_from_pf(wx, msgbuf);
+ goto out;
+ case WX_PF_CONTROL_MSG:
+ ret = wx_pf_ping_vf(wx, msgbuf);
+ goto out;
+ case 0:
+ if (msgbuf[0] & WX_VT_MSGTYPE_NACK) {
+ /* msg is NACK, we must have lost CTS status */
+ ret = -EBUSY;
+ goto out;
+ }
+ /* no message, check link status */
+ wx_check_physical_link(wx);
+ goto out;
+ default:
+ break;
+ }
+
+ if (!(msgbuf[0] & WX_VT_MSGTYPE_CTS)) {
+ /* msg is not CTS and is NACK we must have lost CTS status */
+ if (msgbuf[0] & WX_VT_MSGTYPE_NACK)
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* the pf is talking, if we timed out in the past we reinit */
+ if (!mbx->timeout) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+out:
+ return ret;
+}
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf.h b/drivers/net/ethernet/wangxun/libwx/wx_vf.h
new file mode 100644
index 000000000000..eb6ca3fe4e97
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_vf.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _WX_VF_H_
+#define _WX_VF_H_
+
+/* Control registers */
+#define WX_VF_MAX_RING_NUMS 8
+#define WX_VX_PF_BME 0x4B8
+#define WX_VF_BME_ENABLE BIT(0)
+#define WX_VXSTATUS 0x4
+#define WX_VXCTRL 0x8
+#define WX_VXCTRL_RST BIT(0)
+
+#define WX_VXMRQC 0x78
+#define WX_VXMRQC_PSR_L4HDR BIT(0)
+#define WX_VXMRQC_PSR_L3HDR BIT(1)
+#define WX_VXMRQC_PSR_L2HDR BIT(2)
+#define WX_VXMRQC_PSR_TUNHDR BIT(3)
+#define WX_VXMRQC_PSR_TUNMAC BIT(4)
+#define WX_VXMRQC_PSR_MASK GENMASK(5, 1)
+#define WX_VXMRQC_PSR(f) FIELD_PREP(GENMASK(5, 1), f)
+#define WX_VXMRQC_RSS_HASH(f) FIELD_PREP(GENMASK(15, 13), f)
+#define WX_VXMRQC_RSS_MASK GENMASK(31, 16)
+#define WX_VXMRQC_RSS(f) FIELD_PREP(GENMASK(31, 16), f)
+#define WX_VXMRQC_RSS_ALG_IPV4_TCP BIT(0)
+#define WX_VXMRQC_RSS_ALG_IPV4 BIT(1)
+#define WX_VXMRQC_RSS_ALG_IPV6 BIT(4)
+#define WX_VXMRQC_RSS_ALG_IPV6_TCP BIT(5)
+#define WX_VXMRQC_RSS_EN BIT(8)
+
+#define WX_VXRSSRK(i) (0x80 + ((i) * 4)) /* i=[0,9] */
+#define WX_VXRETA(i) (0xC0 + ((i) * 4)) /* i=[0,15] */
+
+/* Interrupt registers */
+#define WX_VXICR 0x100
+#define WX_VXIMS 0x108
+#define WX_VXIMC 0x10C
+#define WX_VF_IRQ_CLEAR_MASK 7
+#define WX_VF_MAX_TX_QUEUES 4
+#define WX_VF_MAX_RX_QUEUES 4
+
+#define WX_VXITR(i) (0x200 + (4 * (i))) /* i=[0,1] */
+#define WX_VXITR_MASK GENMASK(8, 0)
+#define WX_VXITR_CNT_WDIS BIT(31)
+#define WX_VXIVAR_MISC 0x260
+#define WX_VXIVAR(i) (0x240 + (4 * (i))) /* i=[0,3] */
+
+#define wx_conf_size(v, mwidth, uwidth) ({ \
+ typeof(v) _v = (v); \
+ (_v == 2 << (mwidth) ? 0 : _v >> (uwidth)); \
+})
+#define wx_buf_len(v) wx_conf_size(v, 13, 7)
+#define wx_hdr_sz(v) wx_conf_size(v, 10, 6)
+#define wx_buf_sz(v) wx_conf_size(v, 14, 10)
+#define wx_pkt_thresh(v) wx_conf_size(v, 4, 0)
+
+#define WX_RX_HDR_SIZE 256
+#define WX_RX_BUF_SIZE 2048
+
+#define WX_RXBUFFER_2048 (2048)
+#define WX_RXBUFFER_3072 3072
+
+/* Receive Path */
+#define WX_VXRDBAL(r) (0x1000 + (0x40 * (r)))
+#define WX_VXRDBAH(r) (0x1004 + (0x40 * (r)))
+#define WX_VXRDT(r) (0x1008 + (0x40 * (r)))
+#define WX_VXRDH(r) (0x100C + (0x40 * (r)))
+#define WX_VXRXDCTL(r) (0x1010 + (0x40 * (r)))
+#define WX_VXRXDCTL_ENABLE BIT(0)
+#define WX_VXRXDCTL_BUFLEN_MASK GENMASK(6, 1)
+#define WX_VXRXDCTL_BUFLEN(f) FIELD_PREP(GENMASK(6, 1), f)
+#define WX_VXRXDCTL_BUFSZ_MASK GENMASK(11, 8)
+#define WX_VXRXDCTL_BUFSZ(f) FIELD_PREP(GENMASK(11, 8), f)
+#define WX_VXRXDCTL_HDRSZ_MASK GENMASK(15, 12)
+#define WX_VXRXDCTL_HDRSZ(f) FIELD_PREP(GENMASK(15, 12), f)
+#define WX_VXRXDCTL_DESC_MERGE BIT(19)
+#define WX_VXRXDCTL_RSCMAX_MASK GENMASK(24, 23)
+#define WX_VXRXDCTL_RSCMAX(f) FIELD_PREP(GENMASK(24, 23), f)
+#define WX_VXRXDCTL_RSCEN BIT(29)
+#define WX_VXRXDCTL_DROP BIT(30)
+#define WX_VXRXDCTL_VLAN BIT(31)
+
+/* Transimit Path */
+#define WX_VXTDBAL(r) (0x3000 + (0x40 * (r)))
+#define WX_VXTDBAH(r) (0x3004 + (0x40 * (r)))
+#define WX_VXTDT(r) (0x3008 + (0x40 * (r)))
+#define WX_VXTDH(r) (0x300C + (0x40 * (r)))
+#define WX_VXTXDCTL(r) (0x3010 + (0x40 * (r)))
+#define WX_VXTXDCTL_ENABLE BIT(0)
+#define WX_VXTXDCTL_BUFLEN(f) FIELD_PREP(GENMASK(6, 1), f)
+#define WX_VXTXDCTL_PTHRESH(f) FIELD_PREP(GENMASK(11, 8), f)
+#define WX_VXTXDCTL_WTHRESH(f) FIELD_PREP(GENMASK(22, 16), f)
+#define WX_VXTXDCTL_FLUSH BIT(26)
+#define WX_VXTXDCTL_HEAD_WB BIT(27)
+#define WX_VXTXD_HEAD_ADDRL(r) (0x3028 + (0x40 * (r)))
+#define WX_VXTXD_HEAD_ADDRH(r) (0x302C + (0x40 * (r)))
+
+#define WX_PFLINK_STATUS(g) FIELD_GET(BIT(0), g)
+#define WX_PFLINK_SPEED(g) FIELD_GET(GENMASK(31, 1), g)
+#define WX_VXSTATUS_SPEED(g) FIELD_GET(GENMASK(4, 1), g)
+
+struct wx_link_reg_fields {
+ u32 mac_type;
+ u32 bit0_f;
+ u32 bit1_f;
+ u32 bit2_f;
+ u32 bit3_f;
+ u32 bit4_f;
+};
+
+void wx_init_hw_vf(struct wx *wx);
+int wx_reset_hw_vf(struct wx *wx);
+void wx_get_mac_addr_vf(struct wx *wx, u8 *mac_addr);
+void wx_stop_adapter_vf(struct wx *wx);
+int wx_get_fw_version_vf(struct wx *wx);
+int wx_set_rar_vf(struct wx *wx, u32 index, u8 *addr, u32 enable_addr);
+int wx_update_mc_addr_list_vf(struct wx *wx, struct net_device *netdev);
+int wx_set_uc_addr_vf(struct wx *wx, u32 index, u8 *addr);
+int wx_rlpml_set_vf(struct wx *wx, u16 max_size);
+int wx_negotiate_api_version(struct wx *wx, int api);
+int wx_get_queues_vf(struct wx *wx, u32 *num_tcs, u32 *default_tc);
+int wx_update_xcast_mode_vf(struct wx *wx, int xcast_mode);
+int wx_get_link_state_vf(struct wx *wx, u16 *link_state);
+int wx_set_vfta_vf(struct wx *wx, u32 vlan, u32 vind, bool vlan_on,
+ bool vlvf_bypass);
+int wx_check_mac_link_vf(struct wx *wx);
+
+#endif /* _WX_VF_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf_common.c b/drivers/net/ethernet/wangxun/libwx/wx_vf_common.c
new file mode 100644
index 000000000000..ade2bfe563aa
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_vf_common.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+
+#include "wx_type.h"
+#include "wx_mbx.h"
+#include "wx_lib.h"
+#include "wx_vf.h"
+#include "wx_vf_lib.h"
+#include "wx_vf_common.h"
+
+int wxvf_suspend(struct device *dev_d)
+{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
+ struct wx *wx = pci_get_drvdata(pdev);
+
+ netif_device_detach(wx->netdev);
+ wx_clear_interrupt_scheme(wx);
+ pci_disable_device(pdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(wxvf_suspend);
+
+void wxvf_shutdown(struct pci_dev *pdev)
+{
+ wxvf_suspend(&pdev->dev);
+}
+EXPORT_SYMBOL(wxvf_shutdown);
+
+int wxvf_resume(struct device *dev_d)
+{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
+ struct wx *wx = pci_get_drvdata(pdev);
+
+ pci_set_master(pdev);
+ wx_init_interrupt_scheme(wx);
+ netif_device_attach(wx->netdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(wxvf_resume);
+
+void wxvf_remove(struct pci_dev *pdev)
+{
+ struct wx *wx = pci_get_drvdata(pdev);
+ struct net_device *netdev;
+
+ cancel_work_sync(&wx->service_task);
+ netdev = wx->netdev;
+ unregister_netdev(netdev);
+ kfree(wx->vfinfo);
+ kfree(wx->rss_key);
+ kfree(wx->mac_table);
+ wx_clear_interrupt_scheme(wx);
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_disable_device(pdev);
+}
+EXPORT_SYMBOL(wxvf_remove);
+
+static irqreturn_t wx_msix_misc_vf(int __always_unused irq, void *data)
+{
+ struct wx *wx = data;
+
+ set_bit(WX_FLAG_NEED_UPDATE_LINK, wx->flags);
+ /* Clear the interrupt */
+ if (netif_running(wx->netdev))
+ wr32(wx, WX_VXIMC, wx->eims_other);
+
+ return IRQ_HANDLED;
+}
+
+int wx_request_msix_irqs_vf(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ int vector, err;
+
+ for (vector = 0; vector < wx->num_q_vectors; vector++) {
+ struct wx_q_vector *q_vector = wx->q_vector[vector];
+ struct msix_entry *entry = &wx->msix_q_entries[vector];
+
+ if (q_vector->tx.ring && q_vector->rx.ring)
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-TxRx-%d", netdev->name, entry->entry);
+ else
+ /* skip this unused q_vector */
+ continue;
+
+ err = request_irq(entry->vector, wx_msix_clean_rings, 0,
+ q_vector->name, q_vector);
+ if (err) {
+ wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n",
+ q_vector->name, err);
+ goto free_queue_irqs;
+ }
+ }
+
+ err = request_threaded_irq(wx->msix_entry->vector, wx_msix_misc_vf,
+ NULL, IRQF_ONESHOT, netdev->name, wx);
+ if (err) {
+ wx_err(wx, "request_irq for msix_other failed: %d\n", err);
+ goto free_queue_irqs;
+ }
+
+ return 0;
+
+free_queue_irqs:
+ while (vector) {
+ vector--;
+ free_irq(wx->msix_q_entries[vector].vector,
+ wx->q_vector[vector]);
+ }
+ wx_reset_interrupt_capability(wx);
+ return err;
+}
+EXPORT_SYMBOL(wx_request_msix_irqs_vf);
+
+void wx_negotiate_api_vf(struct wx *wx)
+{
+ int api[] = {
+ wx_mbox_api_13,
+ wx_mbox_api_null};
+ int err = 0, idx = 0;
+
+ spin_lock_bh(&wx->mbx.mbx_lock);
+ while (api[idx] != wx_mbox_api_null) {
+ err = wx_negotiate_api_version(wx, api[idx]);
+ if (!err)
+ break;
+ idx++;
+ }
+ spin_unlock_bh(&wx->mbx.mbx_lock);
+}
+EXPORT_SYMBOL(wx_negotiate_api_vf);
+
+void wx_reset_vf(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ int ret = 0;
+
+ ret = wx_reset_hw_vf(wx);
+ if (!ret)
+ wx_init_hw_vf(wx);
+ wx_negotiate_api_vf(wx);
+ if (is_valid_ether_addr(wx->mac.addr)) {
+ eth_hw_addr_set(netdev, wx->mac.addr);
+ ether_addr_copy(netdev->perm_addr, wx->mac.addr);
+ }
+}
+EXPORT_SYMBOL(wx_reset_vf);
+
+void wx_set_rx_mode_vf(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+ unsigned int flags = netdev->flags;
+ int xcast_mode;
+
+ xcast_mode = (flags & IFF_ALLMULTI) ? WXVF_XCAST_MODE_ALLMULTI :
+ (flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
+ WXVF_XCAST_MODE_MULTI : WXVF_XCAST_MODE_NONE;
+ /* request the most inclusive mode we need */
+ if (flags & IFF_PROMISC)
+ xcast_mode = WXVF_XCAST_MODE_PROMISC;
+ else if (flags & IFF_ALLMULTI)
+ xcast_mode = WXVF_XCAST_MODE_ALLMULTI;
+ else if (flags & (IFF_BROADCAST | IFF_MULTICAST))
+ xcast_mode = WXVF_XCAST_MODE_MULTI;
+ else
+ xcast_mode = WXVF_XCAST_MODE_NONE;
+
+ spin_lock_bh(&wx->mbx.mbx_lock);
+ wx_update_xcast_mode_vf(wx, xcast_mode);
+ wx_update_mc_addr_list_vf(wx, netdev);
+ wx_write_uc_addr_list_vf(netdev);
+ spin_unlock_bh(&wx->mbx.mbx_lock);
+}
+EXPORT_SYMBOL(wx_set_rx_mode_vf);
+
+/**
+ * wx_configure_rx_vf - Configure Receive Unit after Reset
+ * @wx: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void wx_configure_rx_vf(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ int i, ret;
+
+ wx_setup_psrtype_vf(wx);
+ wx_setup_vfmrqc_vf(wx);
+
+ spin_lock_bh(&wx->mbx.mbx_lock);
+ ret = wx_rlpml_set_vf(wx,
+ netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
+ spin_unlock_bh(&wx->mbx.mbx_lock);
+ if (ret)
+ wx_dbg(wx, "Failed to set MTU at %d\n", netdev->mtu);
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring
+ */
+ for (i = 0; i < wx->num_rx_queues; i++) {
+ struct wx_ring *rx_ring = wx->rx_ring[i];
+#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC
+ wx_set_rx_buffer_len_vf(wx, rx_ring);
+#endif
+ wx_configure_rx_ring_vf(wx, rx_ring);
+ }
+}
+
+void wx_configure_vf(struct wx *wx)
+{
+ wx_set_rx_mode_vf(wx->netdev);
+ wx_configure_tx_vf(wx);
+ wx_configure_rx_vf(wx);
+}
+EXPORT_SYMBOL(wx_configure_vf);
+
+int wx_set_mac_vf(struct net_device *netdev, void *p)
+{
+ struct wx *wx = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(netdev, addr);
+ if (ret)
+ return ret;
+
+ spin_lock_bh(&wx->mbx.mbx_lock);
+ ret = wx_set_rar_vf(wx, 1, (u8 *)addr->sa_data, 1);
+ spin_unlock_bh(&wx->mbx.mbx_lock);
+
+ if (ret)
+ return -EPERM;
+
+ memcpy(wx->mac.addr, addr->sa_data, netdev->addr_len);
+ memcpy(wx->mac.perm_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_set_mac_vf);
+
+void wxvf_watchdog_update_link(struct wx *wx)
+{
+ int err;
+
+ if (!test_bit(WX_FLAG_NEED_UPDATE_LINK, wx->flags))
+ return;
+
+ spin_lock_bh(&wx->mbx.mbx_lock);
+ err = wx_check_mac_link_vf(wx);
+ spin_unlock_bh(&wx->mbx.mbx_lock);
+ if (err) {
+ wx->link = false;
+ set_bit(WX_FLAG_NEED_DO_RESET, wx->flags);
+ }
+ clear_bit(WX_FLAG_NEED_UPDATE_LINK, wx->flags);
+}
+EXPORT_SYMBOL(wxvf_watchdog_update_link);
+
+static void wxvf_irq_enable(struct wx *wx)
+{
+ wr32(wx, WX_VXIMC, wx->eims_enable_mask);
+}
+
+static void wxvf_up_complete(struct wx *wx)
+{
+ /* Always set the carrier off */
+ netif_carrier_off(wx->netdev);
+ mod_timer(&wx->service_timer, jiffies + HZ);
+ set_bit(WX_FLAG_NEED_UPDATE_LINK, wx->flags);
+
+ wx_configure_msix_vf(wx);
+ smp_mb__before_atomic();
+ wx_napi_enable_all(wx);
+
+ /* clear any pending interrupts, may auto mask */
+ wr32(wx, WX_VXICR, U32_MAX);
+ wxvf_irq_enable(wx);
+ /* enable transmits */
+ netif_tx_start_all_queues(wx->netdev);
+}
+
+int wxvf_open(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+ int err;
+
+ err = wx_setup_resources(wx);
+ if (err)
+ goto err_reset;
+ wx_configure_vf(wx);
+
+ err = wx_request_msix_irqs_vf(wx);
+ if (err)
+ goto err_free_resources;
+
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues);
+ if (err)
+ goto err_free_irq;
+
+ err = netif_set_real_num_rx_queues(netdev, wx->num_rx_queues);
+ if (err)
+ goto err_free_irq;
+
+ wxvf_up_complete(wx);
+
+ return 0;
+err_free_irq:
+ wx_free_irq(wx);
+err_free_resources:
+ wx_free_resources(wx);
+err_reset:
+ wx_reset_vf(wx);
+ return err;
+}
+EXPORT_SYMBOL(wxvf_open);
+
+static void wxvf_down(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+
+ timer_delete_sync(&wx->service_timer);
+ netif_tx_stop_all_queues(netdev);
+ netif_tx_disable(netdev);
+ netif_carrier_off(netdev);
+ wx_napi_disable_all(wx);
+ wx_reset_vf(wx);
+
+ wx_clean_all_tx_rings(wx);
+ wx_clean_all_rx_rings(wx);
+}
+
+static void wxvf_reinit_locked(struct wx *wx)
+{
+ while (test_and_set_bit(WX_STATE_RESETTING, wx->state))
+ usleep_range(1000, 2000);
+ wxvf_down(wx);
+ wx_free_irq(wx);
+ wx_configure_vf(wx);
+ wx_request_msix_irqs_vf(wx);
+ wxvf_up_complete(wx);
+ clear_bit(WX_STATE_RESETTING, wx->state);
+}
+
+static void wxvf_reset_subtask(struct wx *wx)
+{
+ if (!test_bit(WX_FLAG_NEED_DO_RESET, wx->flags))
+ return;
+ clear_bit(WX_FLAG_NEED_DO_RESET, wx->flags);
+
+ rtnl_lock();
+ if (test_bit(WX_STATE_RESETTING, wx->state) ||
+ !(netif_running(wx->netdev))) {
+ rtnl_unlock();
+ return;
+ }
+ wxvf_reinit_locked(wx);
+ rtnl_unlock();
+}
+
+int wxvf_close(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ wxvf_down(wx);
+ wx_free_irq(wx);
+ wx_free_resources(wx);
+
+ return 0;
+}
+EXPORT_SYMBOL(wxvf_close);
+
+static void wxvf_link_config_subtask(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+
+ wxvf_watchdog_update_link(wx);
+ if (wx->link) {
+ if (netif_carrier_ok(netdev))
+ return;
+ netif_carrier_on(netdev);
+ netdev_info(netdev, "Link is Up - %s\n",
+ phy_speed_to_str(wx->speed));
+ } else {
+ if (!netif_carrier_ok(netdev))
+ return;
+ netif_carrier_off(netdev);
+ netdev_info(netdev, "Link is Down\n");
+ }
+}
+
+static void wxvf_service_task(struct work_struct *work)
+{
+ struct wx *wx = container_of(work, struct wx, service_task);
+
+ wxvf_link_config_subtask(wx);
+ wxvf_reset_subtask(wx);
+ wx_service_event_complete(wx);
+}
+
+void wxvf_init_service(struct wx *wx)
+{
+ timer_setup(&wx->service_timer, wx_service_timer, 0);
+ INIT_WORK(&wx->service_task, wxvf_service_task);
+ clear_bit(WX_STATE_SERVICE_SCHED, wx->state);
+}
+EXPORT_SYMBOL(wxvf_init_service);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf_common.h b/drivers/net/ethernet/wangxun/libwx/wx_vf_common.h
new file mode 100644
index 000000000000..cbbb1b178cb2
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_vf_common.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _WX_VF_COMMON_H_
+#define _WX_VF_COMMON_H_
+
+int wxvf_suspend(struct device *dev_d);
+void wxvf_shutdown(struct pci_dev *pdev);
+int wxvf_resume(struct device *dev_d);
+void wxvf_remove(struct pci_dev *pdev);
+int wx_request_msix_irqs_vf(struct wx *wx);
+void wx_negotiate_api_vf(struct wx *wx);
+void wx_reset_vf(struct wx *wx);
+void wx_set_rx_mode_vf(struct net_device *netdev);
+void wx_configure_vf(struct wx *wx);
+int wx_set_mac_vf(struct net_device *netdev, void *p);
+void wxvf_watchdog_update_link(struct wx *wx);
+int wxvf_open(struct net_device *netdev);
+int wxvf_close(struct net_device *netdev);
+void wxvf_init_service(struct wx *wx);
+
+#endif /* _WX_VF_COMMON_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c
new file mode 100644
index 000000000000..aa8be036956c
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+
+#include "wx_type.h"
+#include "wx_hw.h"
+#include "wx_lib.h"
+#include "wx_vf.h"
+#include "wx_vf_lib.h"
+
+void wx_write_eitr_vf(struct wx_q_vector *q_vector)
+{
+ struct wx *wx = q_vector->wx;
+ int v_idx = q_vector->v_idx;
+ u32 itr_reg;
+
+ itr_reg = q_vector->itr & WX_VXITR_MASK;
+
+ /* set the WDIS bit to not clear the timer bits and cause an
+ * immediate assertion of the interrupt
+ */
+ itr_reg |= WX_VXITR_CNT_WDIS;
+
+ wr32(wx, WX_VXITR(v_idx), itr_reg);
+}
+
+static void wx_set_ivar_vf(struct wx *wx, s8 direction, u8 queue,
+ u8 msix_vector)
+{
+ u32 ivar, index;
+
+ if (direction == -1) {
+ /* other causes */
+ msix_vector |= WX_PX_IVAR_ALLOC_VAL;
+ ivar = rd32(wx, WX_VXIVAR_MISC);
+ ivar &= ~0xFF;
+ ivar |= msix_vector;
+ wr32(wx, WX_VXIVAR_MISC, ivar);
+ } else {
+ /* tx or rx causes */
+ msix_vector |= WX_PX_IVAR_ALLOC_VAL;
+ index = ((16 * (queue & 1)) + (8 * direction));
+ ivar = rd32(wx, WX_VXIVAR(queue >> 1));
+ ivar &= ~(0xFF << index);
+ ivar |= (msix_vector << index);
+ wr32(wx, WX_VXIVAR(queue >> 1), ivar);
+ }
+}
+
+void wx_configure_msix_vf(struct wx *wx)
+{
+ int v_idx;
+
+ wx->eims_enable_mask = 0;
+ for (v_idx = 0; v_idx < wx->num_q_vectors; v_idx++) {
+ struct wx_q_vector *q_vector = wx->q_vector[v_idx];
+ struct wx_ring *ring;
+
+ wx_for_each_ring(ring, q_vector->rx)
+ wx_set_ivar_vf(wx, 0, ring->reg_idx, v_idx);
+
+ wx_for_each_ring(ring, q_vector->tx)
+ wx_set_ivar_vf(wx, 1, ring->reg_idx, v_idx);
+
+ /* add q_vector eims value to global eims_enable_mask */
+ wx->eims_enable_mask |= BIT(v_idx);
+ wx_write_eitr_vf(q_vector);
+ }
+
+ wx_set_ivar_vf(wx, -1, 1, v_idx);
+
+ /* setup eims_other and add value to global eims_enable_mask */
+ wx->eims_other = BIT(v_idx);
+ wx->eims_enable_mask |= wx->eims_other;
+}
+
+int wx_write_uc_addr_list_vf(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+ int count = 0;
+
+ if (!netdev_uc_empty(netdev)) {
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_uc_addr(ha, netdev)
+ wx_set_uc_addr_vf(wx, ++count, ha->addr);
+ } else {
+ /*
+ * If the list is empty then send message to PF driver to
+ * clear all macvlans on this VF.
+ */
+ wx_set_uc_addr_vf(wx, 0, NULL);
+ }
+
+ return count;
+}
+
+/**
+ * wx_configure_tx_ring_vf - Configure Tx ring after Reset
+ * @wx: board private structure
+ * @ring: structure containing ring specific data
+ *
+ * Configure the Tx descriptor ring after a reset.
+ **/
+static void wx_configure_tx_ring_vf(struct wx *wx, struct wx_ring *ring)
+{
+ u8 reg_idx = ring->reg_idx;
+ u64 tdba = ring->dma;
+ u32 txdctl = 0;
+ int ret;
+
+ /* disable queue to avoid issues while updating state */
+ wr32(wx, WX_VXTXDCTL(reg_idx), WX_VXTXDCTL_FLUSH);
+ wr32(wx, WX_VXTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
+ wr32(wx, WX_VXTDBAH(reg_idx), tdba >> 32);
+
+ /* enable relaxed ordering */
+ pcie_capability_clear_and_set_word(wx->pdev, PCI_EXP_DEVCTL,
+ 0, PCI_EXP_DEVCTL_RELAX_EN);
+
+ /* reset head and tail pointers */
+ wr32(wx, WX_VXTDH(reg_idx), 0);
+ wr32(wx, WX_VXTDT(reg_idx), 0);
+ ring->tail = wx->hw_addr + WX_VXTDT(reg_idx);
+
+ /* reset ntu and ntc to place SW in sync with hardwdare */
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+
+ txdctl |= WX_VXTXDCTL_BUFLEN(wx_buf_len(ring->count));
+ txdctl |= WX_VXTXDCTL_ENABLE;
+
+ if (ring->headwb_mem) {
+ wr32(wx, WX_VXTXD_HEAD_ADDRL(reg_idx),
+ ring->headwb_dma & DMA_BIT_MASK(32));
+ wr32(wx, WX_VXTXD_HEAD_ADDRH(reg_idx),
+ upper_32_bits(ring->headwb_dma));
+
+ txdctl |= WX_VXTXDCTL_HEAD_WB;
+ }
+
+ /* reinitialize tx_buffer_info */
+ memset(ring->tx_buffer_info, 0,
+ sizeof(struct wx_tx_buffer) * ring->count);
+
+ wr32(wx, WX_VXTXDCTL(reg_idx), txdctl);
+ /* poll to verify queue is enabled */
+ ret = read_poll_timeout(rd32, txdctl, txdctl & WX_VXTXDCTL_ENABLE,
+ 1000, 10000, true, wx, WX_VXTXDCTL(reg_idx));
+ if (ret == -ETIMEDOUT)
+ wx_err(wx, "Could not enable Tx Queue %d\n", reg_idx);
+}
+
+/**
+ * wx_configure_tx_vf - Configure Transmit Unit after Reset
+ * @wx: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+void wx_configure_tx_vf(struct wx *wx)
+{
+ u32 i;
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+ for (i = 0; i < wx->num_tx_queues; i++)
+ wx_configure_tx_ring_vf(wx, wx->tx_ring[i]);
+}
+
+static void wx_configure_srrctl_vf(struct wx *wx, struct wx_ring *ring,
+ int index)
+{
+ u32 srrctl;
+
+ srrctl = rd32m(wx, WX_VXRXDCTL(index),
+ (u32)~(WX_VXRXDCTL_HDRSZ_MASK | WX_VXRXDCTL_BUFSZ_MASK));
+ srrctl |= WX_VXRXDCTL_DROP;
+ srrctl |= WX_VXRXDCTL_HDRSZ(wx_hdr_sz(WX_RX_HDR_SIZE));
+ srrctl |= WX_VXRXDCTL_BUFSZ(wx_buf_sz(WX_RX_BUF_SIZE));
+
+ wr32(wx, WX_VXRXDCTL(index), srrctl);
+}
+
+void wx_setup_psrtype_vf(struct wx *wx)
+{
+ /* PSRTYPE must be initialized */
+ u32 psrtype = WX_VXMRQC_PSR_L2HDR |
+ WX_VXMRQC_PSR_L3HDR |
+ WX_VXMRQC_PSR_L4HDR |
+ WX_VXMRQC_PSR_TUNHDR |
+ WX_VXMRQC_PSR_TUNMAC;
+
+ wr32m(wx, WX_VXMRQC, WX_VXMRQC_PSR_MASK, WX_VXMRQC_PSR(psrtype));
+}
+
+void wx_setup_vfmrqc_vf(struct wx *wx)
+{
+ u16 rss_i = wx->num_rx_queues;
+ u32 vfmrqc = 0, vfreta = 0;
+ u8 i, j;
+
+ /* Fill out hash function seeds */
+ netdev_rss_key_fill(wx->rss_key, WX_RSS_KEY_SIZE);
+ for (i = 0; i < WX_RSS_KEY_SIZE / 4; i++)
+ wr32(wx, WX_VXRSSRK(i), wx->rss_key[i]);
+
+ for (i = 0, j = 0; i < WX_MAX_RETA_ENTRIES; i++, j++) {
+ if (j == rss_i)
+ j = 0;
+
+ wx->rss_indir_tbl[i] = j;
+
+ vfreta |= j << (i & 0x3) * 8;
+ if ((i & 3) == 3) {
+ wr32(wx, WX_VXRETA(i >> 2), vfreta);
+ vfreta = 0;
+ }
+ }
+
+ /* Perform hash on these packet types */
+ vfmrqc |= WX_VXMRQC_RSS_ALG_IPV4 |
+ WX_VXMRQC_RSS_ALG_IPV4_TCP |
+ WX_VXMRQC_RSS_ALG_IPV6 |
+ WX_VXMRQC_RSS_ALG_IPV6_TCP;
+
+ vfmrqc |= WX_VXMRQC_RSS_EN;
+
+ if (wx->num_rx_queues > 3)
+ vfmrqc |= WX_VXMRQC_RSS_HASH(2);
+ else if (wx->num_rx_queues > 1)
+ vfmrqc |= WX_VXMRQC_RSS_HASH(1);
+ wr32m(wx, WX_VXMRQC, WX_VXMRQC_RSS_MASK, WX_VXMRQC_RSS(vfmrqc));
+}
+
+void wx_configure_rx_ring_vf(struct wx *wx, struct wx_ring *ring)
+{
+ u8 reg_idx = ring->reg_idx;
+ union wx_rx_desc *rx_desc;
+ u64 rdba = ring->dma;
+ u32 rxdctl;
+
+ /* disable queue to avoid issues while updating state */
+ rxdctl = rd32(wx, WX_VXRXDCTL(reg_idx));
+ wx_disable_rx_queue(wx, ring);
+
+ wr32(wx, WX_VXRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
+ wr32(wx, WX_VXRDBAH(reg_idx), rdba >> 32);
+
+ /* enable relaxed ordering */
+ pcie_capability_clear_and_set_word(wx->pdev, PCI_EXP_DEVCTL,
+ 0, PCI_EXP_DEVCTL_RELAX_EN);
+
+ /* reset head and tail pointers */
+ wr32(wx, WX_VXRDH(reg_idx), 0);
+ wr32(wx, WX_VXRDT(reg_idx), 0);
+ ring->tail = wx->hw_addr + WX_VXRDT(reg_idx);
+
+ /* initialize rx_buffer_info */
+ memset(ring->rx_buffer_info, 0,
+ sizeof(struct wx_rx_buffer) * ring->count);
+
+ /* initialize Rx descriptor 0 */
+ rx_desc = WX_RX_DESC(ring, 0);
+ rx_desc->wb.upper.length = 0;
+
+ /* reset ntu and ntc to place SW in sync with hardwdare */
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+ ring->next_to_alloc = 0;
+
+ wx_configure_srrctl_vf(wx, ring, reg_idx);
+
+ /* allow any size packet since we can handle overflow */
+ rxdctl &= ~WX_VXRXDCTL_BUFLEN_MASK;
+ rxdctl |= WX_VXRXDCTL_BUFLEN(wx_buf_len(ring->count));
+ rxdctl |= WX_VXRXDCTL_ENABLE | WX_VXRXDCTL_VLAN;
+
+ /* enable RSC */
+ rxdctl &= ~WX_VXRXDCTL_RSCMAX_MASK;
+ rxdctl |= WX_VXRXDCTL_RSCMAX(0);
+ rxdctl |= WX_VXRXDCTL_RSCEN;
+
+ if (test_bit(WX_FLAG_RX_MERGE_ENABLED, wx->flags))
+ rxdctl |= WX_VXRXDCTL_DESC_MERGE;
+
+ wr32(wx, WX_VXRXDCTL(reg_idx), rxdctl);
+
+ /* pf/vf reuse */
+ wx_enable_rx_queue(wx, ring);
+ wx_alloc_rx_buffers(ring, wx_desc_unused(ring));
+}
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h
new file mode 100644
index 000000000000..a4bd23c92800
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _WX_VF_LIB_H_
+#define _WX_VF_LIB_H_
+
+void wx_write_eitr_vf(struct wx_q_vector *q_vector);
+void wx_configure_msix_vf(struct wx *wx);
+int wx_write_uc_addr_list_vf(struct net_device *netdev);
+void wx_setup_psrtype_vf(struct wx *wx);
+void wx_setup_vfmrqc_vf(struct wx *wx);
+void wx_configure_tx_vf(struct wx *wx);
+void wx_configure_rx_ring_vf(struct wx *wx, struct wx_ring *ring);
+
+#endif /* _WX_VF_LIB_H_ */
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
index e868f7ef4920..662f28bdde8a 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
@@ -115,7 +115,8 @@ static int ngbe_set_channels(struct net_device *dev,
static const struct ethtool_ops ngbe_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
- ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = wx_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ksettings = wx_get_link_ksettings,
@@ -136,8 +137,16 @@ static const struct ethtool_ops ngbe_ethtool_ops = {
.set_coalesce = wx_set_coalesce,
.get_channels = wx_get_channels,
.set_channels = ngbe_set_channels,
+ .get_rxfh_fields = wx_get_rxfh_fields,
+ .set_rxfh_fields = wx_set_rxfh_fields,
+ .get_rxfh_indir_size = wx_rss_indir_size,
+ .get_rxfh_key_size = wx_get_rxfh_key_size,
+ .get_rxfh = wx_get_rxfh,
+ .set_rxfh = wx_set_rxfh,
.get_msglevel = wx_get_msglevel,
.set_msglevel = wx_set_msglevel,
+ .get_ts_info = wx_get_ts_info,
+ .get_ts_stats = wx_get_ptp_stats,
};
void ngbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index 53aeae2f884b..58488e138beb 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -14,6 +14,9 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_hw.h"
#include "../libwx/wx_lib.h"
+#include "../libwx/wx_ptp.h"
+#include "../libwx/wx_mbx.h"
+#include "../libwx/wx_sriov.h"
#include "ngbe_type.h"
#include "ngbe_mdio.h"
#include "ngbe_hw.h"
@@ -116,9 +119,9 @@ static int ngbe_sw_init(struct wx *wx)
num_online_cpus());
wx->rss_enabled = true;
- /* enable itr by default in dynamic mode */
- wx->rx_itr_setting = 1;
- wx->tx_itr_setting = 1;
+ wx->adaptive_itr = false;
+ wx->rx_itr_setting = WX_7K_ITR;
+ wx->tx_itr_setting = WX_7K_ITR;
/* set default ring sizes */
wx->tx_ring_count = NGBE_DEFAULT_TXD;
@@ -128,6 +131,10 @@ static int ngbe_sw_init(struct wx *wx)
wx->tx_work_limit = NGBE_DEFAULT_TX_WORK;
wx->rx_work_limit = NGBE_DEFAULT_RX_WORK;
+ wx->mbx.size = WX_VXMAILBOX_SIZE;
+ wx->setup_tc = ngbe_setup_tc;
+ set_bit(0, &wx->fwd_bitmask);
+
return 0;
}
@@ -154,7 +161,7 @@ static void ngbe_irq_enable(struct wx *wx, bool queues)
if (queues)
wx_intr_enable(wx, NGBE_INTR_ALL);
else
- wx_intr_enable(wx, NGBE_INTR_MISC);
+ wx_intr_enable(wx, NGBE_INTR_MISC(wx));
}
/**
@@ -167,7 +174,7 @@ static irqreturn_t ngbe_intr(int __always_unused irq, void *data)
struct wx_q_vector *q_vector;
struct wx *wx = data;
struct pci_dev *pdev;
- u32 eicr;
+ u32 eicr, eicr_misc;
q_vector = wx->q_vector[0];
pdev = wx->pdev;
@@ -185,6 +192,10 @@ static irqreturn_t ngbe_intr(int __always_unused irq, void *data)
if (!(pdev->msi_enabled))
wr32(wx, WX_PX_INTA, 1);
+ eicr_misc = wx_misc_isb(wx, WX_ISB_MISC);
+ if (unlikely(eicr_misc & NGBE_PX_MISC_IC_TIMESYNC))
+ wx_ptp_check_pps_event(wx);
+
wx->isb_mem[WX_ISB_MISC] = 0;
/* would disable interrupts here but it is auto disabled */
napi_schedule_irqoff(&q_vector->napi);
@@ -195,9 +206,13 @@ static irqreturn_t ngbe_intr(int __always_unused irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data)
+static irqreturn_t __ngbe_msix_misc(struct wx *wx, u32 eicr)
{
- struct wx *wx = data;
+ if (eicr & NGBE_PX_MISC_IC_VF_MBOX)
+ wx_msg_task(wx);
+
+ if (unlikely(eicr & NGBE_PX_MISC_IC_TIMESYNC))
+ wx_ptp_check_pps_event(wx);
/* re-enable the original interrupt state, no lsc, no queues */
if (netif_running(wx->netdev))
@@ -206,6 +221,35 @@ static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data)
return IRQ_HANDLED;
}
+static irqreturn_t ngbe_msix_misc(int __always_unused irq, void *data)
+{
+ struct wx *wx = data;
+ u32 eicr;
+
+ eicr = wx_misc_isb(wx, WX_ISB_MISC);
+
+ return __ngbe_msix_misc(wx, eicr);
+}
+
+static irqreturn_t ngbe_misc_and_queue(int __always_unused irq, void *data)
+{
+ struct wx_q_vector *q_vector;
+ struct wx *wx = data;
+ u32 eicr;
+
+ eicr = wx_misc_isb(wx, WX_ISB_MISC);
+ if (!eicr) {
+ /* queue */
+ q_vector = wx->q_vector[0];
+ napi_schedule_irqoff(&q_vector->napi);
+ if (netif_running(wx->netdev))
+ ngbe_irq_enable(wx, true);
+ return IRQ_HANDLED;
+ }
+
+ return __ngbe_msix_misc(wx, eicr);
+}
+
/**
* ngbe_request_msix_irqs - Initialize MSI-X interrupts
* @wx: board private structure
@@ -238,8 +282,16 @@ static int ngbe_request_msix_irqs(struct wx *wx)
}
}
- err = request_irq(wx->msix_entry->vector,
- ngbe_msix_other, 0, netdev->name, wx);
+ /* Due to hardware design, when num_vfs < 7, pf can use 0 for misc and 1
+ * for queue. But when num_vfs == 7, vector[1] is assigned to vf6.
+ * Misc and queue should reuse interrupt vector[0].
+ */
+ if (test_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags))
+ err = request_irq(wx->msix_entry->vector,
+ ngbe_misc_and_queue, 0, netdev->name, wx);
+ else
+ err = request_irq(wx->msix_entry->vector,
+ ngbe_msix_misc, 0, netdev->name, wx);
if (err) {
wx_err(wx, "request_irq for msix_other failed: %d\n", err);
@@ -291,6 +343,22 @@ static void ngbe_disable_device(struct wx *wx)
struct net_device *netdev = wx->netdev;
u32 i;
+ if (wx->num_vfs) {
+ /* Clear EITR Select mapping */
+ wr32(wx, WX_PX_ITRSEL, 0);
+
+ /* Mark all the VFs as inactive */
+ for (i = 0; i < wx->num_vfs; i++)
+ wx->vfinfo[i].clear_to_send = 0;
+ wx->notify_down = true;
+ /* ping all the active vfs to let them know we are going down */
+ wx_ping_all_vfs_with_link_status(wx, false);
+ wx->notify_down = false;
+
+ /* Disable all VFTE/VFRE TX/RX */
+ wx_disable_vf_rx_tx(wx);
+ }
+
/* disable all enabled rx queues */
for (i = 0; i < wx->num_rx_queues; i++)
/* this call also flushes the previous write */
@@ -313,10 +381,19 @@ static void ngbe_disable_device(struct wx *wx)
wx_update_stats(wx);
}
+static void ngbe_reset(struct wx *wx)
+{
+ wx_flush_sw_mac_table(wx);
+ wx_mac_set_default_filter(wx, wx->mac.addr);
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset(wx);
+}
+
void ngbe_down(struct wx *wx)
{
phylink_stop(wx->phylink);
ngbe_disable_device(wx);
+ ngbe_reset(wx);
wx_clean_all_tx_rings(wx);
wx_clean_all_rx_rings(wx);
}
@@ -339,6 +416,11 @@ void ngbe_up(struct wx *wx)
ngbe_sfp_modules_txrx_powerctl(wx, true);
phylink_start(wx->phylink);
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ wr32m(wx, WX_CFG_PORT_CTL,
+ WX_CFG_PORT_CTL_PFRSTD, WX_CFG_PORT_CTL_PFRSTD);
+ if (wx->num_vfs)
+ wx_ping_all_vfs_with_link_status(wx, false);
}
/**
@@ -379,6 +461,8 @@ static int ngbe_open(struct net_device *netdev)
if (err)
goto err_dis_phy;
+ wx_ptp_init(wx);
+
ngbe_up(wx);
return 0;
@@ -407,6 +491,7 @@ static int ngbe_close(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
+ wx_ptp_stop(wx);
ngbe_down(wx);
wx_free_irq(wx);
wx_free_isb_resources(wx);
@@ -502,11 +587,14 @@ static const struct net_device_ops ngbe_netdev_ops = {
.ndo_set_rx_mode = wx_set_rx_mode,
.ndo_set_features = wx_set_features,
.ndo_fix_features = wx_fix_features,
+ .ndo_features_check = wx_features_check,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = wx_set_mac,
.ndo_get_stats64 = wx_get_stats64,
.ndo_vlan_rx_add_vid = wx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = wx_vlan_rx_kill_vid,
+ .ndo_hwtstamp_set = wx_hwtstamp_set,
+ .ndo_hwtstamp_get = wx_hwtstamp_get,
};
/**
@@ -578,6 +666,10 @@ static int ngbe_probe(struct pci_dev *pdev,
goto err_pci_release_regions;
}
+ /* The emerald supports up to 8 VFs per pf, but physical
+ * function also need one pool for basic networking.
+ */
+ pci_sriov_set_totalvfs(pdev, NGBE_MAX_VFS_DRV_LIMIT);
wx->driver_name = ngbe_driver_name;
ngbe_set_ethtool_ops(netdev);
netdev->netdev_ops = &ngbe_netdev_ops;
@@ -607,7 +699,7 @@ static int ngbe_probe(struct pci_dev *pdev,
/* setup the private structure */
err = ngbe_sw_init(wx);
if (err)
- goto err_free_mac_table;
+ goto err_pci_release_regions;
/* check if flash load is done after hw power up */
err = wx_check_flash_load(wx, NGBE_SPI_ILDR_STATUS_PERST);
@@ -701,6 +793,7 @@ err_register:
err_clear_interrupt_scheme:
wx_clear_interrupt_scheme(wx);
err_free_mac_table:
+ kfree(wx->rss_key);
kfree(wx->mac_table);
err_pci_release_regions:
pci_release_selected_regions(pdev,
@@ -725,6 +818,7 @@ static void ngbe_remove(struct pci_dev *pdev)
struct net_device *netdev;
netdev = wx->netdev;
+ wx_disable_sriov(wx);
unregister_netdev(netdev);
phylink_destroy(wx->phylink);
pci_release_selected_regions(pdev,
@@ -784,6 +878,7 @@ static struct pci_driver ngbe_driver = {
.suspend = ngbe_suspend,
.resume = ngbe_resume,
.shutdown = ngbe_shutdown,
+ .sriov_configure = wx_pci_sriov_configure,
};
module_pci_driver(ngbe_driver);
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
index a5e9b779c44d..c63bb6e6f405 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
@@ -7,7 +7,9 @@
#include <linux/phy.h>
#include "../libwx/wx_type.h"
+#include "../libwx/wx_ptp.h"
#include "../libwx/wx_hw.h"
+#include "../libwx/wx_sriov.h"
#include "ngbe_type.h"
#include "ngbe_mdio.h"
@@ -64,6 +66,13 @@ static void ngbe_mac_config(struct phylink_config *config, unsigned int mode,
static void ngbe_mac_link_down(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
+ struct wx *wx = phylink_to_wx(config);
+
+ wx->speed = SPEED_UNKNOWN;
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset_cyclecounter(wx);
+ /* ping all the active vfs to let them know we are going down */
+ wx_ping_all_vfs_with_link_status(wx, false);
}
static void ngbe_mac_link_up(struct phylink_config *config,
@@ -103,6 +112,13 @@ static void ngbe_mac_link_up(struct phylink_config *config,
wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
reg = rd32(wx, WX_MAC_WDG_TIMEOUT);
wr32(wx, WX_MAC_WDG_TIMEOUT, reg);
+
+ wx->speed = speed;
+ wx->last_rx_ptp_check = jiffies;
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset_cyclecounter(wx);
+ /* ping all the active vfs to let them know we are going up */
+ wx_ping_all_vfs_with_link_status(wx, true);
}
static const struct phylink_mac_ops ngbe_mac_ops = {
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
index f48ed7fc1805..3b2ca7f47e33 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
@@ -70,17 +70,24 @@
/* Extended Interrupt Enable Set */
#define NGBE_PX_MISC_IEN_DEV_RST BIT(10)
+#define NGBE_PX_MISC_IEN_TIMESYNC BIT(11)
#define NGBE_PX_MISC_IEN_ETH_LK BIT(18)
#define NGBE_PX_MISC_IEN_INT_ERR BIT(20)
+#define NGBE_PX_MISC_IC_VF_MBOX BIT(23)
#define NGBE_PX_MISC_IEN_GPIO BIT(26)
#define NGBE_PX_MISC_IEN_MASK ( \
NGBE_PX_MISC_IEN_DEV_RST | \
+ NGBE_PX_MISC_IEN_TIMESYNC | \
NGBE_PX_MISC_IEN_ETH_LK | \
NGBE_PX_MISC_IEN_INT_ERR | \
+ NGBE_PX_MISC_IC_VF_MBOX | \
NGBE_PX_MISC_IEN_GPIO)
+/* Extended Interrupt Cause Read */
+#define NGBE_PX_MISC_IC_TIMESYNC BIT(11) /* time sync */
+
#define NGBE_INTR_ALL 0x1FF
-#define NGBE_INTR_MISC BIT(0)
+#define NGBE_INTR_MISC(A) BIT((A)->msix_entry->entry)
#define NGBE_PHY_CONFIG(reg_offset) (0x14000 + ((reg_offset) * 4))
#define NGBE_CFG_LAN_SPEED 0x14440
@@ -129,6 +136,7 @@
#define NGBE_MAX_RXD 8192
#define NGBE_MIN_RXD 128
+#define NGBE_MAX_VFS_DRV_LIMIT 7
extern char ngbe_driver_name[];
void ngbe_down(struct wx *wx);
diff --git a/drivers/net/ethernet/wangxun/ngbevf/Makefile b/drivers/net/ethernet/wangxun/ngbevf/Makefile
new file mode 100644
index 000000000000..11a4f15e2ce3
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/ngbevf/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd.
+#
+# Makefile for the Wangxun(R) 1GbE virtual functions driver
+#
+
+obj-$(CONFIG_NGBE) += ngbevf.o
+
+ngbevf-objs := ngbevf_main.o
diff --git a/drivers/net/ethernet/wangxun/ngbevf/ngbevf_main.c b/drivers/net/ethernet/wangxun/ngbevf/ngbevf_main.c
new file mode 100644
index 000000000000..6ef43adcc425
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/ngbevf/ngbevf_main.c
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/etherdevice.h>
+
+#include "../libwx/wx_type.h"
+#include "../libwx/wx_hw.h"
+#include "../libwx/wx_lib.h"
+#include "../libwx/wx_mbx.h"
+#include "../libwx/wx_vf.h"
+#include "../libwx/wx_vf_common.h"
+#include "../libwx/wx_ethtool.h"
+#include "ngbevf_type.h"
+
+/* ngbevf_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static const struct pci_device_id ngbevf_pci_tbl[] = {
+ { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860AL_W), 0},
+ { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860A2), 0},
+ { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860A2S), 0},
+ { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860A4), 0},
+ { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860A4S), 0},
+ { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860AL2), 0},
+ { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860AL2S), 0},
+ { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860AL4), 0},
+ { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860AL4S), 0},
+ { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860NCSI), 0},
+ { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860A1), 0},
+ { PCI_VDEVICE(WANGXUN, NGBEVF_DEV_ID_EM_WX1860AL1), 0},
+ /* required last entry */
+ { .device = 0 }
+};
+
+static const struct net_device_ops ngbevf_netdev_ops = {
+ .ndo_open = wxvf_open,
+ .ndo_stop = wxvf_close,
+ .ndo_start_xmit = wx_xmit_frame,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = wx_set_mac_vf,
+};
+
+static void ngbevf_set_num_queues(struct wx *wx)
+{
+ /* Start with base case */
+ wx->num_rx_queues = 1;
+ wx->num_tx_queues = 1;
+}
+
+static int ngbevf_sw_init(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ struct pci_dev *pdev = wx->pdev;
+ int err;
+
+ /* Initialize pcie info and common capability flags */
+ err = wx_sw_init(wx);
+ if (err < 0)
+ goto err_wx_sw_init;
+
+ /* Initialize the mailbox */
+ err = wx_init_mbx_params_vf(wx);
+ if (err)
+ goto err_init_mbx_params;
+
+ /* Initialize the device type */
+ wx->mac.type = wx_mac_em;
+ wx->mac.max_msix_vectors = NGBEVF_MAX_MSIX_VECTORS;
+ /* lock to protect mailbox accesses */
+ spin_lock_init(&wx->mbx.mbx_lock);
+
+ err = wx_reset_hw_vf(wx);
+ if (err) {
+ wx_err(wx, "PF still in reset state. Is the PF interface up?\n");
+ goto err_reset_hw;
+ }
+ wx_init_hw_vf(wx);
+ wx_negotiate_api_vf(wx);
+ if (is_zero_ether_addr(wx->mac.addr))
+ dev_info(&pdev->dev,
+ "MAC address not assigned by administrator.\n");
+ eth_hw_addr_set(netdev, wx->mac.addr);
+
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ dev_info(&pdev->dev, "Assigning random MAC address\n");
+ eth_hw_addr_random(netdev);
+ ether_addr_copy(wx->mac.addr, netdev->dev_addr);
+ ether_addr_copy(wx->mac.perm_addr, netdev->dev_addr);
+ }
+
+ wx->mac.max_tx_queues = NGBEVF_MAX_TX_QUEUES;
+ wx->mac.max_rx_queues = NGBEVF_MAX_RX_QUEUES;
+ /* Enable dynamic interrupt throttling rates */
+ wx->adaptive_itr = true;
+ wx->rx_itr_setting = 1;
+ wx->tx_itr_setting = 1;
+ /* set default ring sizes */
+ wx->tx_ring_count = NGBEVF_DEFAULT_TXD;
+ wx->rx_ring_count = NGBEVF_DEFAULT_RXD;
+ /* set default work limits */
+ wx->tx_work_limit = NGBEVF_DEFAULT_TX_WORK;
+ wx->rx_work_limit = NGBEVF_DEFAULT_RX_WORK;
+ wx->set_num_queues = ngbevf_set_num_queues;
+
+ return 0;
+err_reset_hw:
+ kfree(wx->vfinfo);
+err_init_mbx_params:
+ kfree(wx->rss_key);
+ kfree(wx->mac_table);
+err_wx_sw_init:
+ return err;
+}
+
+/**
+ * ngbevf_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in ngbevf_pci_tbl
+ *
+ * Return: return 0 on success, negative on failure
+ *
+ * ngbevf_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int ngbevf_probe(struct pci_dev *pdev,
+ const struct pci_device_id __always_unused *ent)
+{
+ struct net_device *netdev;
+ struct wx *wx = NULL;
+ int err;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_pci_disable_dev;
+ }
+
+ err = pci_request_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM),
+ dev_driver_string(&pdev->dev));
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_request_selected_regions failed 0x%x\n", err);
+ goto err_pci_disable_dev;
+ }
+
+ pci_set_master(pdev);
+
+ netdev = devm_alloc_etherdev_mqs(&pdev->dev,
+ sizeof(struct wx),
+ NGBEVF_MAX_TX_QUEUES,
+ NGBEVF_MAX_RX_QUEUES);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_pci_release_regions;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ wx = netdev_priv(netdev);
+ wx->netdev = netdev;
+ wx->pdev = pdev;
+
+ wx->msg_enable = netif_msg_init(-1, NETIF_MSG_DRV |
+ NETIF_MSG_PROBE | NETIF_MSG_LINK);
+ wx->hw_addr = devm_ioremap(&pdev->dev,
+ pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!wx->hw_addr) {
+ err = -EIO;
+ goto err_pci_release_regions;
+ }
+
+ wx->driver_name = KBUILD_MODNAME;
+ wx_set_ethtool_ops_vf(netdev);
+ netdev->netdev_ops = &ngbevf_netdev_ops;
+
+ /* setup the private structure */
+ err = ngbevf_sw_init(wx);
+ if (err)
+ goto err_pci_release_regions;
+
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ eth_hw_addr_set(netdev, wx->mac.perm_addr);
+ ether_addr_copy(netdev->perm_addr, wx->mac.addr);
+
+ wxvf_init_service(wx);
+ err = wx_init_interrupt_scheme(wx);
+ if (err)
+ goto err_free_sw_init;
+
+ wx_get_fw_version_vf(wx);
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ pci_set_drvdata(pdev, wx);
+ netif_tx_stop_all_queues(netdev);
+
+ return 0;
+
+err_register:
+ wx_clear_interrupt_scheme(wx);
+err_free_sw_init:
+ timer_delete_sync(&wx->service_timer);
+ cancel_work_sync(&wx->service_task);
+ kfree(wx->vfinfo);
+ kfree(wx->rss_key);
+ kfree(wx->mac_table);
+err_pci_release_regions:
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_disable_dev:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * ngbevf_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * ngbevf_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void ngbevf_remove(struct pci_dev *pdev)
+{
+ wxvf_remove(pdev);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(ngbevf_pm_ops, wxvf_suspend, wxvf_resume);
+
+static struct pci_driver ngbevf_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = ngbevf_pci_tbl,
+ .probe = ngbevf_probe,
+ .remove = ngbevf_remove,
+ .shutdown = wxvf_shutdown,
+ /* Power Management Hooks */
+ .driver.pm = pm_sleep_ptr(&ngbevf_pm_ops)
+};
+
+module_pci_driver(ngbevf_driver);
+
+MODULE_DEVICE_TABLE(pci, ngbevf_pci_tbl);
+MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@trustnetic.com>");
+MODULE_DESCRIPTION("WangXun(R) Gigabit PCI Express Network Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wangxun/ngbevf/ngbevf_type.h b/drivers/net/ethernet/wangxun/ngbevf/ngbevf_type.h
new file mode 100644
index 000000000000..67e761089e99
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/ngbevf/ngbevf_type.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _NGBEVF_TYPE_H_
+#define _NGBEVF_TYPE_H_
+
+/* Device IDs */
+#define NGBEVF_DEV_ID_EM_WX1860AL_W 0x0110
+#define NGBEVF_DEV_ID_EM_WX1860A2 0x0111
+#define NGBEVF_DEV_ID_EM_WX1860A2S 0x0112
+#define NGBEVF_DEV_ID_EM_WX1860A4 0x0113
+#define NGBEVF_DEV_ID_EM_WX1860A4S 0x0114
+#define NGBEVF_DEV_ID_EM_WX1860AL2 0x0115
+#define NGBEVF_DEV_ID_EM_WX1860AL2S 0x0116
+#define NGBEVF_DEV_ID_EM_WX1860AL4 0x0117
+#define NGBEVF_DEV_ID_EM_WX1860AL4S 0x0118
+#define NGBEVF_DEV_ID_EM_WX1860NCSI 0x0119
+#define NGBEVF_DEV_ID_EM_WX1860A1 0x011a
+#define NGBEVF_DEV_ID_EM_WX1860AL1 0x011b
+
+#define NGBEVF_MAX_MSIX_VECTORS 1
+#define NGBEVF_MAX_RX_QUEUES 1
+#define NGBEVF_MAX_TX_QUEUES 1
+#define NGBEVF_DEFAULT_TXD 128
+#define NGBEVF_DEFAULT_RXD 128
+#define NGBEVF_DEFAULT_TX_WORK 256
+#define NGBEVF_DEFAULT_RX_WORK 256
+
+#endif /* _NGBEVF_TYPE_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile
index f74576fe7062..c757fa95e58e 100644
--- a/drivers/net/ethernet/wangxun/txgbe/Makefile
+++ b/drivers/net/ethernet/wangxun/txgbe/Makefile
@@ -11,4 +11,5 @@ txgbe-objs := txgbe_main.o \
txgbe_phy.o \
txgbe_irq.o \
txgbe_fdir.o \
- txgbe_ethtool.o
+ txgbe_ethtool.o \
+ txgbe_aml.o
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c
new file mode 100644
index 000000000000..62d7f47d4f8d
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c
@@ -0,0 +1,530 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/phylink.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+#include <linux/phy.h>
+
+#include "../libwx/wx_type.h"
+#include "../libwx/wx_lib.h"
+#include "../libwx/wx_ptp.h"
+#include "../libwx/wx_hw.h"
+#include "../libwx/wx_sriov.h"
+#include "txgbe_type.h"
+#include "txgbe_aml.h"
+#include "txgbe_hw.h"
+
+void txgbe_gpio_init_aml(struct wx *wx)
+{
+ u32 status, mod_rst;
+
+ if (wx->mac.type == wx_mac_aml40)
+ mod_rst = TXGBE_GPIOBIT_4;
+ else
+ mod_rst = TXGBE_GPIOBIT_2;
+
+ wr32(wx, WX_GPIO_INTTYPE_LEVEL, mod_rst);
+ wr32(wx, WX_GPIO_INTEN, mod_rst);
+
+ status = rd32(wx, WX_GPIO_INTSTATUS);
+ for (int i = 0; i < 6; i++) {
+ if (status & BIT(i))
+ wr32(wx, WX_GPIO_EOI, BIT(i));
+ }
+}
+
+irqreturn_t txgbe_gpio_irq_handler_aml(int irq, void *data)
+{
+ struct txgbe *txgbe = data;
+ struct wx *wx = txgbe->wx;
+ u32 status, mod_rst;
+
+ if (wx->mac.type == wx_mac_aml40)
+ mod_rst = TXGBE_GPIOBIT_4;
+ else
+ mod_rst = TXGBE_GPIOBIT_2;
+
+ wr32(wx, WX_GPIO_INTMASK, 0xFF);
+ status = rd32(wx, WX_GPIO_INTSTATUS);
+ if (status & mod_rst) {
+ set_bit(WX_FLAG_NEED_MODULE_RESET, wx->flags);
+ wr32(wx, WX_GPIO_EOI, mod_rst);
+ wx_service_event_schedule(wx);
+ }
+
+ wr32(wx, WX_GPIO_INTMASK, 0);
+ return IRQ_HANDLED;
+}
+
+int txgbe_test_hostif(struct wx *wx)
+{
+ struct txgbe_hic_ephy_getlink buffer;
+
+ if (wx->mac.type == wx_mac_sp)
+ return 0;
+
+ buffer.hdr.cmd = FW_PHY_GET_LINK_CMD;
+ buffer.hdr.buf_len = sizeof(struct txgbe_hic_ephy_getlink) -
+ sizeof(struct wx_hic_hdr);
+ buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+
+ return wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer),
+ WX_HI_COMMAND_TIMEOUT, true);
+}
+
+int txgbe_read_eeprom_hostif(struct wx *wx,
+ struct txgbe_hic_i2c_read *buffer,
+ u32 length, u8 *data)
+{
+ u32 dword_len, offset, value, i;
+ int err;
+
+ buffer->hdr.cmd = FW_READ_EEPROM_CMD;
+ buffer->hdr.buf_len = sizeof(struct txgbe_hic_i2c_read) -
+ sizeof(struct wx_hic_hdr);
+ buffer->hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+
+ err = wx_host_interface_command(wx, (u32 *)buffer,
+ sizeof(struct txgbe_hic_i2c_read),
+ WX_HI_COMMAND_TIMEOUT, false);
+ if (err != 0)
+ return err;
+
+ /* buffer length offset to read return data */
+ offset = sizeof(struct txgbe_hic_i2c_read) >> 2;
+ dword_len = round_up(length, 4) >> 2;
+
+ for (i = 0; i < dword_len; i++) {
+ value = rd32a(wx, WX_FW2SW_MBOX, i + offset);
+ le32_to_cpus(&value);
+
+ memcpy(data, &value, 4);
+ data += 4;
+ }
+
+ return 0;
+}
+
+static int txgbe_identify_module_hostif(struct wx *wx,
+ struct txgbe_hic_get_module_info *buffer)
+{
+ buffer->hdr.cmd = FW_GET_MODULE_INFO_CMD;
+ buffer->hdr.buf_len = sizeof(struct txgbe_hic_get_module_info) -
+ sizeof(struct wx_hic_hdr);
+ buffer->hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+
+ return wx_host_interface_command(wx, (u32 *)buffer,
+ sizeof(struct txgbe_hic_get_module_info),
+ WX_HI_COMMAND_TIMEOUT, true);
+}
+
+static int txgbe_set_phy_link_hostif(struct wx *wx, int speed, int autoneg, int duplex)
+{
+ struct txgbe_hic_ephy_setlink buffer;
+
+ buffer.hdr.cmd = FW_PHY_SET_LINK_CMD;
+ buffer.hdr.buf_len = sizeof(struct txgbe_hic_ephy_setlink) -
+ sizeof(struct wx_hic_hdr);
+ buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+
+ switch (speed) {
+ case SPEED_40000:
+ buffer.speed = TXGBE_LINK_SPEED_40GB_FULL;
+ break;
+ case SPEED_25000:
+ buffer.speed = TXGBE_LINK_SPEED_25GB_FULL;
+ break;
+ case SPEED_10000:
+ buffer.speed = TXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ default:
+ buffer.speed = TXGBE_LINK_SPEED_UNKNOWN;
+ break;
+ }
+
+ buffer.fec_mode = TXGBE_PHY_FEC_AUTO;
+ buffer.autoneg = autoneg;
+ buffer.duplex = duplex;
+
+ return wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer),
+ WX_HI_COMMAND_TIMEOUT, true);
+}
+
+static void txgbe_get_link_capabilities(struct wx *wx, int *speed,
+ int *autoneg, int *duplex)
+{
+ struct txgbe *txgbe = wx->priv;
+
+ if (test_bit(PHY_INTERFACE_MODE_XLGMII, txgbe->link_interfaces))
+ *speed = SPEED_40000;
+ else if (test_bit(PHY_INTERFACE_MODE_25GBASER, txgbe->link_interfaces))
+ *speed = SPEED_25000;
+ else if (test_bit(PHY_INTERFACE_MODE_10GBASER, txgbe->link_interfaces))
+ *speed = SPEED_10000;
+ else
+ *speed = SPEED_UNKNOWN;
+
+ *autoneg = phylink_test(txgbe->advertising, Autoneg);
+ *duplex = *speed == SPEED_UNKNOWN ? DUPLEX_HALF : DUPLEX_FULL;
+}
+
+static void txgbe_get_mac_link(struct wx *wx, int *speed)
+{
+ u32 status;
+
+ status = rd32(wx, TXGBE_CFG_PORT_ST);
+ if (!(status & TXGBE_CFG_PORT_ST_LINK_UP))
+ *speed = SPEED_UNKNOWN;
+ else if (status & TXGBE_CFG_PORT_ST_LINK_AML_40G)
+ *speed = SPEED_40000;
+ else if (status & TXGBE_CFG_PORT_ST_LINK_AML_25G)
+ *speed = SPEED_25000;
+ else if (status & TXGBE_CFG_PORT_ST_LINK_AML_10G)
+ *speed = SPEED_10000;
+ else
+ *speed = SPEED_UNKNOWN;
+}
+
+int txgbe_set_phy_link(struct wx *wx)
+{
+ int speed, autoneg, duplex, err;
+
+ txgbe_get_link_capabilities(wx, &speed, &autoneg, &duplex);
+
+ err = txgbe_set_phy_link_hostif(wx, speed, autoneg, duplex);
+ if (err) {
+ wx_err(wx, "Failed to setup link\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int txgbe_sfp_to_linkmodes(struct wx *wx, struct txgbe_sff_id *id)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, };
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
+ struct txgbe *txgbe = wx->priv;
+
+ if (id->cable_tech & TXGBE_SFF_DA_PASSIVE_CABLE) {
+ txgbe->link_port = PORT_DA;
+ phylink_set(modes, Autoneg);
+ if (id->com_25g_code == TXGBE_SFF_25GBASECR_91FEC ||
+ id->com_25g_code == TXGBE_SFF_25GBASECR_74FEC ||
+ id->com_25g_code == TXGBE_SFF_25GBASECR_NOFEC) {
+ phylink_set(modes, 25000baseCR_Full);
+ phylink_set(modes, 10000baseCR_Full);
+ __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ } else {
+ phylink_set(modes, 10000baseCR_Full);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+ } else if (id->cable_tech & TXGBE_SFF_DA_ACTIVE_CABLE) {
+ txgbe->link_port = PORT_DA;
+ phylink_set(modes, Autoneg);
+ phylink_set(modes, 25000baseCR_Full);
+ __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces);
+ } else {
+ if (id->com_25g_code == TXGBE_SFF_25GBASESR_CAPABLE ||
+ id->com_25g_code == TXGBE_SFF_25GBASEER_CAPABLE ||
+ id->com_25g_code == TXGBE_SFF_25GBASELR_CAPABLE) {
+ txgbe->link_port = PORT_FIBRE;
+ phylink_set(modes, 25000baseSR_Full);
+ __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces);
+ }
+ if (id->com_10g_code & TXGBE_SFF_10GBASESR_CAPABLE) {
+ txgbe->link_port = PORT_FIBRE;
+ phylink_set(modes, 10000baseSR_Full);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+ if (id->com_10g_code & TXGBE_SFF_10GBASELR_CAPABLE) {
+ txgbe->link_port = PORT_FIBRE;
+ phylink_set(modes, 10000baseLR_Full);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+ }
+
+ if (phy_interface_empty(interfaces)) {
+ wx_err(wx, "unsupported SFP module\n");
+ return -EINVAL;
+ }
+
+ phylink_set(modes, Pause);
+ phylink_set(modes, Asym_Pause);
+ phylink_set(modes, FIBRE);
+
+ if (!linkmode_equal(txgbe->link_support, modes)) {
+ linkmode_copy(txgbe->link_support, modes);
+ phy_interface_and(txgbe->link_interfaces,
+ wx->phylink_config.supported_interfaces,
+ interfaces);
+ linkmode_copy(txgbe->advertising, modes);
+
+ set_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags);
+ }
+
+ return 0;
+}
+
+static int txgbe_qsfp_to_linkmodes(struct wx *wx, struct txgbe_sff_id *id)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, };
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
+ struct txgbe *txgbe = wx->priv;
+
+ if (id->transceiver_type & TXGBE_SFF_ETHERNET_40G_CR4) {
+ txgbe->link_port = PORT_DA;
+ phylink_set(modes, Autoneg);
+ phylink_set(modes, 40000baseCR4_Full);
+ phylink_set(modes, 10000baseCR_Full);
+ __set_bit(PHY_INTERFACE_MODE_XLGMII, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+ if (id->transceiver_type & TXGBE_SFF_ETHERNET_40G_SR4) {
+ txgbe->link_port = PORT_FIBRE;
+ phylink_set(modes, 40000baseSR4_Full);
+ __set_bit(PHY_INTERFACE_MODE_XLGMII, interfaces);
+ }
+ if (id->transceiver_type & TXGBE_SFF_ETHERNET_40G_LR4) {
+ txgbe->link_port = PORT_FIBRE;
+ phylink_set(modes, 40000baseLR4_Full);
+ __set_bit(PHY_INTERFACE_MODE_XLGMII, interfaces);
+ }
+ if (id->transceiver_type & TXGBE_SFF_ETHERNET_40G_ACTIVE) {
+ txgbe->link_port = PORT_DA;
+ phylink_set(modes, Autoneg);
+ phylink_set(modes, 40000baseCR4_Full);
+ __set_bit(PHY_INTERFACE_MODE_XLGMII, interfaces);
+ }
+ if (id->transceiver_type & TXGBE_SFF_ETHERNET_RSRVD) {
+ if (id->sff_opt1 & TXGBE_SFF_ETHERNET_100G_CR4) {
+ txgbe->link_port = PORT_DA;
+ phylink_set(modes, Autoneg);
+ phylink_set(modes, 40000baseCR4_Full);
+ phylink_set(modes, 25000baseCR_Full);
+ phylink_set(modes, 10000baseCR_Full);
+ __set_bit(PHY_INTERFACE_MODE_XLGMII, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+ }
+
+ if (phy_interface_empty(interfaces)) {
+ wx_err(wx, "unsupported QSFP module\n");
+ return -EINVAL;
+ }
+
+ phylink_set(modes, Pause);
+ phylink_set(modes, Asym_Pause);
+ phylink_set(modes, FIBRE);
+
+ if (!linkmode_equal(txgbe->link_support, modes)) {
+ linkmode_copy(txgbe->link_support, modes);
+ phy_interface_and(txgbe->link_interfaces,
+ wx->phylink_config.supported_interfaces,
+ interfaces);
+ linkmode_copy(txgbe->advertising, modes);
+
+ set_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags);
+ }
+
+ return 0;
+}
+
+int txgbe_identify_module(struct wx *wx)
+{
+ struct txgbe_hic_get_module_info buffer;
+ struct txgbe_sff_id *id;
+ int err = 0;
+ u32 mod_abs;
+ u32 gpio;
+
+ if (wx->mac.type == wx_mac_aml40)
+ mod_abs = TXGBE_GPIOBIT_4;
+ else
+ mod_abs = TXGBE_GPIOBIT_2;
+
+ gpio = rd32(wx, WX_GPIO_EXT);
+ if (gpio & mod_abs)
+ return -ENODEV;
+
+ err = txgbe_identify_module_hostif(wx, &buffer);
+ if (err) {
+ wx_err(wx, "Failed to identify module\n");
+ return err;
+ }
+
+ id = &buffer.id;
+ if (id->identifier != TXGBE_SFF_IDENTIFIER_SFP &&
+ id->identifier != TXGBE_SFF_IDENTIFIER_QSFP &&
+ id->identifier != TXGBE_SFF_IDENTIFIER_QSFP_PLUS &&
+ id->identifier != TXGBE_SFF_IDENTIFIER_QSFP28) {
+ wx_err(wx, "Invalid module\n");
+ return -ENODEV;
+ }
+
+ if (id->transceiver_type == 0xFF)
+ return txgbe_sfp_to_linkmodes(wx, id);
+
+ return txgbe_qsfp_to_linkmodes(wx, id);
+}
+
+void txgbe_setup_link(struct wx *wx)
+{
+ struct txgbe *txgbe = wx->priv;
+
+ phy_interface_zero(txgbe->link_interfaces);
+ linkmode_zero(txgbe->link_support);
+
+ set_bit(WX_FLAG_NEED_MODULE_RESET, wx->flags);
+ wx_service_event_schedule(wx);
+}
+
+static void txgbe_get_link_state(struct phylink_config *config,
+ struct phylink_link_state *state)
+{
+ struct wx *wx = phylink_to_wx(config);
+ int speed;
+
+ txgbe_get_mac_link(wx, &speed);
+ state->link = speed != SPEED_UNKNOWN;
+ state->speed = speed;
+ state->duplex = state->link ? DUPLEX_FULL : DUPLEX_UNKNOWN;
+}
+
+static void txgbe_reconfig_mac(struct wx *wx)
+{
+ u32 wdg, fc;
+
+ wdg = rd32(wx, WX_MAC_WDG_TIMEOUT);
+ fc = rd32(wx, WX_MAC_RX_FLOW_CTRL);
+
+ wr32(wx, WX_MIS_RST, TXGBE_MIS_RST_MAC_RST(wx->bus.func));
+ /* wait for MAC reset complete */
+ usleep_range(1000, 1500);
+
+ wr32m(wx, TXGBE_MAC_MISC_CTL, TXGBE_MAC_MISC_CTL_LINK_STS_MOD,
+ TXGBE_MAC_MISC_CTL_LINK_BOTH);
+ wx_reset_mac(wx);
+
+ wr32(wx, WX_MAC_WDG_TIMEOUT, wdg);
+ wr32(wx, WX_MAC_RX_FLOW_CTRL, fc);
+}
+
+static void txgbe_mac_link_up_aml(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct wx *wx = phylink_to_wx(config);
+ u32 txcfg;
+
+ wx_fc_enable(wx, tx_pause, rx_pause);
+
+ txgbe_reconfig_mac(wx);
+ txgbe_enable_sec_tx_path(wx);
+
+ txcfg = rd32(wx, TXGBE_AML_MAC_TX_CFG);
+ txcfg &= ~TXGBE_AML_MAC_TX_CFG_SPEED_MASK;
+
+ switch (speed) {
+ case SPEED_40000:
+ txcfg |= TXGBE_AML_MAC_TX_CFG_SPEED_40G;
+ break;
+ case SPEED_25000:
+ txcfg |= TXGBE_AML_MAC_TX_CFG_SPEED_25G;
+ break;
+ case SPEED_10000:
+ txcfg |= TXGBE_AML_MAC_TX_CFG_SPEED_10G;
+ break;
+ default:
+ break;
+ }
+
+ wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE);
+ wr32(wx, TXGBE_AML_MAC_TX_CFG, txcfg | TXGBE_AML_MAC_TX_CFG_TE);
+
+ wx->speed = speed;
+ wx->last_rx_ptp_check = jiffies;
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset_cyclecounter(wx);
+ /* ping all the active vfs to let them know we are going up */
+ wx_ping_all_vfs_with_link_status(wx, true);
+}
+
+static void txgbe_mac_link_down_aml(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct wx *wx = phylink_to_wx(config);
+
+ wr32m(wx, TXGBE_AML_MAC_TX_CFG, TXGBE_AML_MAC_TX_CFG_TE, 0);
+ wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, 0);
+
+ wx->speed = SPEED_UNKNOWN;
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset_cyclecounter(wx);
+ /* ping all the active vfs to let them know we are going down */
+ wx_ping_all_vfs_with_link_status(wx, false);
+}
+
+static void txgbe_mac_config_aml(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static const struct phylink_mac_ops txgbe_mac_ops_aml = {
+ .mac_config = txgbe_mac_config_aml,
+ .mac_link_down = txgbe_mac_link_down_aml,
+ .mac_link_up = txgbe_mac_link_up_aml,
+};
+
+int txgbe_phylink_init_aml(struct txgbe *txgbe)
+{
+ struct phylink_link_state state;
+ struct phylink_config *config;
+ struct wx *wx = txgbe->wx;
+ phy_interface_t phy_mode;
+ struct phylink *phylink;
+ int err;
+
+ config = &wx->phylink_config;
+ config->dev = &wx->netdev->dev;
+ config->type = PHYLINK_NETDEV;
+ config->mac_capabilities = MAC_25000FD | MAC_10000FD |
+ MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
+ config->get_fixed_state = txgbe_get_link_state;
+
+ if (wx->mac.type == wx_mac_aml40) {
+ config->mac_capabilities |= MAC_40000FD;
+ phy_mode = PHY_INTERFACE_MODE_XLGMII;
+ __set_bit(PHY_INTERFACE_MODE_XLGMII, config->supported_interfaces);
+ state.speed = SPEED_40000;
+ state.duplex = DUPLEX_FULL;
+ } else {
+ phy_mode = PHY_INTERFACE_MODE_25GBASER;
+ state.speed = SPEED_25000;
+ state.duplex = DUPLEX_FULL;
+ }
+
+ __set_bit(PHY_INTERFACE_MODE_25GBASER, config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, config->supported_interfaces);
+
+ phylink = phylink_create(config, NULL, phy_mode, &txgbe_mac_ops_aml);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+
+ err = phylink_set_fixed_link(phylink, &state);
+ if (err) {
+ wx_err(wx, "Failed to set fixed link\n");
+ return err;
+ }
+
+ wx->phylink = phylink;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h
new file mode 100644
index 000000000000..4f6df0ee860b
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _TXGBE_AML_H_
+#define _TXGBE_AML_H_
+
+void txgbe_gpio_init_aml(struct wx *wx);
+irqreturn_t txgbe_gpio_irq_handler_aml(int irq, void *data);
+int txgbe_test_hostif(struct wx *wx);
+int txgbe_read_eeprom_hostif(struct wx *wx,
+ struct txgbe_hic_i2c_read *buffer,
+ u32 length, u8 *data);
+int txgbe_set_phy_link(struct wx *wx);
+int txgbe_identify_module(struct wx *wx);
+void txgbe_setup_link(struct wx *wx);
+int txgbe_phylink_init_aml(struct txgbe *txgbe);
+
+#endif /* _TXGBE_AML_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
index d98314b26c19..f3cb00109529 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
@@ -10,8 +10,32 @@
#include "../libwx/wx_lib.h"
#include "txgbe_type.h"
#include "txgbe_fdir.h"
+#include "txgbe_aml.h"
#include "txgbe_ethtool.h"
+int txgbe_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct wx *wx = netdev_priv(netdev);
+ struct txgbe *txgbe = wx->priv;
+ int err;
+
+ err = wx_get_link_ksettings(netdev, cmd);
+ if (err)
+ return err;
+
+ if (wx->mac.type == wx_mac_sp)
+ return 0;
+
+ cmd->base.port = txgbe->link_port;
+ cmd->base.autoneg = phylink_test(txgbe->advertising, Autoneg) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE;
+ linkmode_copy(cmd->link_modes.supported, txgbe->link_support);
+ linkmode_copy(cmd->link_modes.advertising, txgbe->advertising);
+
+ return 0;
+}
+
static int txgbe_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
@@ -342,12 +366,19 @@ static int txgbe_add_ethtool_fdir_entry(struct txgbe *txgbe,
queue = TXGBE_RDB_FDIR_DROP_QUEUE;
} else {
u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+ u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
- if (ring >= wx->num_rx_queues)
+ if (!vf && ring >= wx->num_rx_queues)
+ return -EINVAL;
+ else if (vf && (vf > wx->num_vfs ||
+ ring >= wx->num_rx_queues_per_pool))
return -EINVAL;
/* Map the ring onto the absolute queue index */
- queue = wx->rx_ring[ring]->reg_idx;
+ if (!vf)
+ queue = wx->rx_ring[ring]->reg_idx;
+ else
+ queue = ((vf - 1) * wx->num_rx_queues_per_pool) + ring;
}
/* Don't allow indexes to exist outside of available space */
@@ -504,13 +535,42 @@ static int txgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return ret;
}
+static int
+txgbe_get_module_eeprom_by_page(struct net_device *netdev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct wx *wx = netdev_priv(netdev);
+ struct txgbe_hic_i2c_read buffer;
+ int err;
+
+ if (!test_bit(WX_FLAG_SWFW_RING, wx->flags))
+ return -EOPNOTSUPP;
+
+ buffer.length = cpu_to_be32(page_data->length);
+ buffer.offset = cpu_to_be32(page_data->offset);
+ buffer.page = page_data->page;
+ buffer.bank = page_data->bank;
+ buffer.i2c_address = page_data->i2c_address;
+
+ err = txgbe_read_eeprom_hostif(wx, &buffer, page_data->length,
+ page_data->data);
+ if (err) {
+ wx_err(wx, "Failed to read module EEPROM\n");
+ return err;
+ }
+
+ return page_data->length;
+}
+
static const struct ethtool_ops txgbe_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
- ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = wx_get_drvinfo,
.nway_reset = wx_nway_reset,
.get_link = ethtool_op_get_link,
- .get_link_ksettings = wx_get_link_ksettings,
+ .get_link_ksettings = txgbe_get_link_ksettings,
.set_link_ksettings = wx_set_link_ksettings,
.get_sset_count = wx_get_sset_count,
.get_strings = wx_get_strings,
@@ -527,8 +587,17 @@ static const struct ethtool_ops txgbe_ethtool_ops = {
.set_channels = txgbe_set_channels,
.get_rxnfc = txgbe_get_rxnfc,
.set_rxnfc = txgbe_set_rxnfc,
+ .get_rxfh_fields = wx_get_rxfh_fields,
+ .set_rxfh_fields = wx_set_rxfh_fields,
+ .get_rxfh_indir_size = wx_rss_indir_size,
+ .get_rxfh_key_size = wx_get_rxfh_key_size,
+ .get_rxfh = wx_get_rxfh,
+ .set_rxfh = wx_set_rxfh,
.get_msglevel = wx_get_msglevel,
.set_msglevel = wx_set_msglevel,
+ .get_ts_info = wx_get_ts_info,
+ .get_ts_stats = wx_get_ptp_stats,
+ .get_module_eeprom_by_page = txgbe_get_module_eeprom_by_page,
};
void txgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h
index ace1b3571012..66dbc8ec1bb6 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h
@@ -4,6 +4,8 @@
#ifndef _TXGBE_ETHTOOL_H_
#define _TXGBE_ETHTOOL_H_
+int txgbe_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd);
void txgbe_set_ethtool_ops(struct net_device *netdev);
#endif /* _TXGBE_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c
index ef50efbaec0f..a84010828551 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c
@@ -307,6 +307,7 @@ void txgbe_atr(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype)
int txgbe_fdir_set_input_mask(struct wx *wx, union txgbe_atr_input *input_mask)
{
u32 fdirm = 0, fdirtcpm = 0, flex = 0;
+ int index, offset;
/* Program the relevant mask registers. If src/dst_port or src/dst_addr
* are zero, then assume a full mask for that field. Also assume that
@@ -352,15 +353,17 @@ int txgbe_fdir_set_input_mask(struct wx *wx, union txgbe_atr_input *input_mask)
/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
wr32(wx, TXGBE_RDB_FDIR_OTHER_MSK, fdirm);
- flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0));
- flex &= ~TXGBE_RDB_FDIR_FLEX_CFG_FIELD0;
+ index = VMDQ_P(0) / 4;
+ offset = VMDQ_P(0) % 4;
+ flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(index));
+ flex &= ~(TXGBE_RDB_FDIR_FLEX_CFG_FIELD0 << (offset * 8));
flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC |
- TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6));
+ TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6)) << (offset * 8);
switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) {
case 0x0000:
/* Mask Flex Bytes */
- flex |= TXGBE_RDB_FDIR_FLEX_CFG_MSK;
+ flex |= TXGBE_RDB_FDIR_FLEX_CFG_MSK << (offset * 8);
break;
case 0xFFFF:
break;
@@ -368,7 +371,7 @@ int txgbe_fdir_set_input_mask(struct wx *wx, union txgbe_atr_input *input_mask)
wx_err(wx, "Error on flexible byte mask\n");
return -EINVAL;
}
- wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0), flex);
+ wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(index), flex);
/* store the TCP/UDP port masks, bit reversed from port layout */
fdirtcpm = ntohs(input_mask->formatted.dst_port);
@@ -516,14 +519,16 @@ static void txgbe_fdir_enable(struct wx *wx, u32 fdirctrl)
static void txgbe_init_fdir_signature(struct wx *wx)
{
u32 fdirctrl = TXGBE_FDIR_PBALLOC_64K;
+ int index = VMDQ_P(0) / 4;
+ int offset = VMDQ_P(0) % 4;
u32 flex = 0;
- flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0));
- flex &= ~TXGBE_RDB_FDIR_FLEX_CFG_FIELD0;
+ flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(index));
+ flex &= ~(TXGBE_RDB_FDIR_FLEX_CFG_FIELD0 << (offset * 8));
flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC |
- TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6));
- wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0), flex);
+ TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6)) << (offset * 8);
+ wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(index), flex);
/* Continue setup of fdirctrl register bits:
* Move the flexible bytes to use the ethertype - shift 6 words
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
index cd1372da92a9..e551ae0e2069 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
@@ -99,9 +99,15 @@ static int txgbe_calc_eeprom_checksum(struct wx *wx, u16 *checksum)
}
local_buffer = eeprom_ptrs;
- for (i = 0; i < TXGBE_EEPROM_LAST_WORD; i++)
+ for (i = 0; i < TXGBE_EEPROM_LAST_WORD; i++) {
+ if (wx->mac.type == wx_mac_aml) {
+ if (i >= TXGBE_EEPROM_I2C_SRART_PTR &&
+ i < TXGBE_EEPROM_I2C_END_PTR)
+ local_buffer[i] = 0xffff;
+ }
if (i != wx->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM)
*checksum += local_buffer[i];
+ }
kvfree(eeprom_ptrs);
@@ -182,7 +188,7 @@ int txgbe_reset_hw(struct wx *wx)
if (status != 0)
return status;
- if (wx->media_type != sp_media_copper) {
+ if (wx->media_type != wx_media_copper) {
u32 val;
val = WX_MIS_RST_LAN_RST(wx->bus.func);
@@ -197,6 +203,12 @@ int txgbe_reset_hw(struct wx *wx)
txgbe_reset_misc(wx);
+ if (wx->mac.type != wx_mac_sp) {
+ wr32(wx, TXGBE_PX_PF_BME, 0x1);
+ wr32m(wx, TXGBE_RDM_RSC_CTL, TXGBE_RDM_RSC_CTL_FREE_CTL,
+ TXGBE_RDM_RSC_CTL_FREE_CTL);
+ }
+
wx_clear_hw_cntrs(wx);
/* Store the permanent mac address */
@@ -206,7 +218,7 @@ int txgbe_reset_hw(struct wx *wx)
* clear the multicast table. Also reset num_rar_entries to 128,
* since we modify this value when programming the SAN MAC address.
*/
- wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES;
+ wx->mac.num_rar_entries = TXGBE_RAR_ENTRIES;
wx_init_rx_addrs(wx);
pci_set_master(wx->pdev);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
index a4cf682dca65..aa14958d439a 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
@@ -6,10 +6,13 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_lib.h"
+#include "../libwx/wx_ptp.h"
#include "../libwx/wx_hw.h"
+#include "../libwx/wx_sriov.h"
#include "txgbe_type.h"
#include "txgbe_phy.h"
#include "txgbe_irq.h"
+#include "txgbe_aml.h"
/**
* txgbe_irq_enable - Enable default interrupt generation settings
@@ -18,10 +21,17 @@
**/
void txgbe_irq_enable(struct wx *wx, bool queues)
{
- wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK);
+ u32 misc_ien = TXGBE_PX_MISC_IEN_MASK;
+
+ if (wx->mac.type != wx_mac_sp) {
+ misc_ien |= TXGBE_PX_MISC_GPIO;
+ txgbe_gpio_init_aml(wx);
+ }
+
+ wr32(wx, WX_PX_MISC_IEN, misc_ien);
/* unmask interrupt */
- wx_intr_enable(wx, TXGBE_INTR_MISC);
+ wx_intr_enable(wx, TXGBE_INTR_MISC(wx));
if (queues)
wx_intr_enable(wx, TXGBE_INTR_QALL(wx));
}
@@ -68,18 +78,9 @@ free_queue_irqs:
free_irq(wx->msix_q_entries[vector].vector,
wx->q_vector[vector]);
}
- wx_reset_interrupt_capability(wx);
return err;
}
-static int txgbe_request_gpio_irq(struct txgbe *txgbe)
-{
- txgbe->gpio_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
- return request_threaded_irq(txgbe->gpio_irq, NULL,
- txgbe_gpio_irq_handler,
- IRQF_ONESHOT, "txgbe-gpio-irq", txgbe);
-}
-
static int txgbe_request_link_irq(struct txgbe *txgbe)
{
txgbe->link_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK);
@@ -88,6 +89,14 @@ static int txgbe_request_link_irq(struct txgbe *txgbe)
IRQF_ONESHOT, "txgbe-link-irq", txgbe);
}
+static int txgbe_request_gpio_irq(struct txgbe *txgbe)
+{
+ txgbe->gpio_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
+ return request_threaded_irq(txgbe->gpio_irq, NULL,
+ txgbe_gpio_irq_handler_aml,
+ IRQF_ONESHOT, "txgbe-gpio-irq", txgbe);
+}
+
static const struct irq_chip txgbe_irq_chip = {
.name = "txgbe-misc-irq",
};
@@ -117,8 +126,15 @@ static irqreturn_t txgbe_misc_irq_handle(int irq, void *data)
struct wx *wx = txgbe->wx;
u32 eicr;
- if (wx->pdev->msix_enabled)
+ if (wx->pdev->msix_enabled) {
+ eicr = wx_misc_isb(wx, WX_ISB_MISC);
+ txgbe->eicr = eicr;
+ if (eicr & TXGBE_PX_MISC_IC_VF_MBOX) {
+ wx_msg_task(txgbe->wx);
+ wx_intr_enable(wx, TXGBE_INTR_MISC(wx));
+ }
return IRQ_WAKE_THREAD;
+ }
eicr = wx_misc_isb(wx, WX_ISB_VEC0);
if (!eicr) {
@@ -137,6 +153,8 @@ static irqreturn_t txgbe_misc_irq_handle(int irq, void *data)
q_vector = wx->q_vector[0];
napi_schedule_irqoff(&q_vector->napi);
+ txgbe->eicr = wx_misc_isb(wx, WX_ISB_MISC);
+
return IRQ_WAKE_THREAD;
}
@@ -148,20 +166,24 @@ static irqreturn_t txgbe_misc_irq_thread_fn(int irq, void *data)
unsigned int sub_irq;
u32 eicr;
- eicr = wx_misc_isb(wx, WX_ISB_MISC);
+ eicr = txgbe->eicr;
+ if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN |
+ TXGBE_PX_MISC_ETH_AN)) {
+ sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK);
+ handle_nested_irq(sub_irq);
+ nhandled++;
+ }
if (eicr & TXGBE_PX_MISC_GPIO) {
sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
handle_nested_irq(sub_irq);
nhandled++;
}
- if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN |
- TXGBE_PX_MISC_ETH_AN)) {
- sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK);
- handle_nested_irq(sub_irq);
+ if (unlikely(eicr & TXGBE_PX_MISC_IC_TIMESYNC)) {
+ wx_ptp_check_pps_event(wx);
nhandled++;
}
- wx_intr_enable(wx, TXGBE_INTR_MISC);
+ wx_intr_enable(wx, TXGBE_INTR_MISC(wx));
return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
}
@@ -179,10 +201,13 @@ static void txgbe_del_irq_domain(struct txgbe *txgbe)
void txgbe_free_misc_irq(struct txgbe *txgbe)
{
- free_irq(txgbe->gpio_irq, txgbe);
+ if (txgbe->wx->mac.type != wx_mac_sp)
+ free_irq(txgbe->gpio_irq, txgbe);
+
free_irq(txgbe->link_irq, txgbe);
free_irq(txgbe->misc.irq, txgbe);
txgbe_del_irq_domain(txgbe);
+ txgbe->wx->misc_irq_domain = false;
}
int txgbe_setup_misc_irq(struct txgbe *txgbe)
@@ -191,9 +216,9 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
struct wx *wx = txgbe->wx;
int hwirq, err;
- txgbe->misc.nirqs = 2;
- txgbe->misc.domain = irq_domain_add_simple(NULL, txgbe->misc.nirqs, 0,
- &txgbe_misc_irq_domain_ops, txgbe);
+ txgbe->misc.nirqs = TXGBE_IRQ_MAX;
+ txgbe->misc.domain = irq_domain_create_simple(NULL, txgbe->misc.nirqs, 0,
+ &txgbe_misc_irq_domain_ops, txgbe);
if (!txgbe->misc.domain)
return -ENOMEM;
@@ -216,20 +241,24 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
if (err)
goto del_misc_irq;
- err = txgbe_request_gpio_irq(txgbe);
+ err = txgbe_request_link_irq(txgbe);
if (err)
goto free_msic_irq;
- err = txgbe_request_link_irq(txgbe);
+ if (wx->mac.type == wx_mac_sp)
+ goto skip_sp_irq;
+
+ err = txgbe_request_gpio_irq(txgbe);
if (err)
- goto free_gpio_irq;
+ goto free_link_irq;
+skip_sp_irq:
wx->misc_irq_domain = true;
return 0;
-free_gpio_irq:
- free_irq(txgbe->gpio_irq, txgbe);
+free_link_irq:
+ free_irq(txgbe->link_irq, txgbe);
free_msic_irq:
free_irq(txgbe->misc.irq, txgbe);
del_misc_irq:
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 93180225a6f1..0de051450a82 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -8,15 +8,20 @@
#include <linux/string.h>
#include <linux/etherdevice.h>
#include <linux/phylink.h>
+#include <net/udp_tunnel.h>
#include <net/ip.h>
#include <linux/if_vlan.h>
#include "../libwx/wx_type.h"
#include "../libwx/wx_lib.h"
+#include "../libwx/wx_ptp.h"
#include "../libwx/wx_hw.h"
+#include "../libwx/wx_mbx.h"
+#include "../libwx/wx_sriov.h"
#include "txgbe_type.h"
#include "txgbe_hw.h"
#include "txgbe_phy.h"
+#include "txgbe_aml.h"
#include "txgbe_irq.h"
#include "txgbe_fdir.h"
#include "txgbe_ethtool.h"
@@ -34,6 +39,12 @@ char txgbe_driver_name[] = "txgbe";
static const struct pci_device_id txgbe_pci_tbl[] = {
{ PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_SP1000), 0},
{ PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_WX1820), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5010), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5110), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5025), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5125), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5040), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5140), 0},
/* required last entry */
{ .device = 0 }
};
@@ -78,11 +89,62 @@ static int txgbe_enumerate_functions(struct wx *wx)
return physfns;
}
+static void txgbe_module_detection_subtask(struct wx *wx)
+{
+ int err;
+
+ if (!test_bit(WX_FLAG_NEED_MODULE_RESET, wx->flags))
+ return;
+
+ /* wait for SFF module ready */
+ msleep(200);
+
+ err = txgbe_identify_module(wx);
+ if (err)
+ return;
+
+ clear_bit(WX_FLAG_NEED_MODULE_RESET, wx->flags);
+}
+
+static void txgbe_link_config_subtask(struct wx *wx)
+{
+ int err;
+
+ if (!test_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags))
+ return;
+
+ err = txgbe_set_phy_link(wx);
+ if (err)
+ return;
+
+ clear_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags);
+}
+
+/**
+ * txgbe_service_task - manages and runs subtasks
+ * @work: pointer to work_struct containing our data
+ **/
+static void txgbe_service_task(struct work_struct *work)
+{
+ struct wx *wx = container_of(work, struct wx, service_task);
+
+ txgbe_module_detection_subtask(wx);
+ txgbe_link_config_subtask(wx);
+
+ wx_service_event_complete(wx);
+}
+
+static void txgbe_init_service(struct wx *wx)
+{
+ timer_setup(&wx->service_timer, wx_service_timer, 0);
+ INIT_WORK(&wx->service_task, txgbe_service_task);
+ clear_bit(WX_STATE_SERVICE_SCHED, wx->state);
+}
+
static void txgbe_up_complete(struct wx *wx)
{
struct net_device *netdev = wx->netdev;
- txgbe_reinit_gpio_intr(wx);
wx_control_hw(wx, true);
wx_configure_vectors(wx);
@@ -90,7 +152,23 @@ static void txgbe_up_complete(struct wx *wx)
smp_mb__before_atomic();
wx_napi_enable_all(wx);
- phylink_start(wx->phylink);
+ switch (wx->mac.type) {
+ case wx_mac_aml40:
+ txgbe_setup_link(wx);
+ phylink_start(wx->phylink);
+ break;
+ case wx_mac_aml:
+ /* Enable TX laser */
+ wr32m(wx, WX_GPIO_DR, TXGBE_GPIOBIT_1, 0);
+ txgbe_setup_link(wx);
+ phylink_start(wx->phylink);
+ break;
+ case wx_mac_sp:
+ phylink_start(wx->phylink);
+ break;
+ default:
+ break;
+ }
/* clear any pending interrupts, may auto mask */
rd32(wx, WX_PX_IC(0));
@@ -100,6 +178,13 @@ static void txgbe_up_complete(struct wx *wx)
/* enable transmits */
netif_tx_start_all_queues(netdev);
+ mod_timer(&wx->service_timer, jiffies);
+
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ wr32m(wx, WX_CFG_PORT_CTL, WX_CFG_PORT_CTL_PFRSTD,
+ WX_CFG_PORT_CTL_PFRSTD);
+ /* update setting rx tx for all active vfs */
+ wx_set_all_vfs(wx);
}
static void txgbe_reset(struct wx *wx)
@@ -117,6 +202,9 @@ static void txgbe_reset(struct wx *wx)
memcpy(old_addr, &wx->mac_table[0].addr, netdev->addr_len);
wx_flush_sw_mac_table(wx);
wx_mac_set_default_filter(wx, old_addr);
+
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset(wx);
}
static void txgbe_disable_device(struct wx *wx)
@@ -139,12 +227,24 @@ static void txgbe_disable_device(struct wx *wx)
wx_irq_disable(wx);
wx_napi_disable_all(wx);
+ timer_delete_sync(&wx->service_timer);
+
if (wx->bus.func < 2)
wr32m(wx, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN_UP(wx->bus.func), 0);
else
wx_err(wx, "%s: invalid bus lan id %d\n",
__func__, wx->bus.func);
+ if (wx->num_vfs) {
+ /* Clear EITR Select mapping */
+ wr32(wx, WX_PX_ITRSEL, 0);
+ /* Mark all the VFs as inactive */
+ for (i = 0; i < wx->num_vfs; i++)
+ wx->vfinfo[i].clear_to_send = 0;
+ /* update setting rx tx for all active vfs */
+ wx_set_all_vfs(wx);
+ }
+
if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) {
/* disable mac transmiter */
@@ -168,7 +268,22 @@ void txgbe_down(struct wx *wx)
{
txgbe_disable_device(wx);
txgbe_reset(wx);
- phylink_stop(wx->phylink);
+
+ switch (wx->mac.type) {
+ case wx_mac_aml40:
+ phylink_stop(wx->phylink);
+ break;
+ case wx_mac_aml:
+ phylink_stop(wx->phylink);
+ /* Disable TX laser */
+ wr32m(wx, WX_GPIO_DR, TXGBE_GPIOBIT_1, TXGBE_GPIOBIT_1);
+ break;
+ case wx_mac_sp:
+ phylink_stop(wx->phylink);
+ break;
+ default:
+ break;
+ }
wx_clean_all_tx_rings(wx);
wx_clean_all_rx_rings(wx);
@@ -177,6 +292,7 @@ void txgbe_down(struct wx *wx)
void txgbe_up(struct wx *wx)
{
wx_configure(wx);
+ wx_ptp_init(wx);
txgbe_up_complete(wx);
}
@@ -193,6 +309,16 @@ static void txgbe_init_type_code(struct wx *wx)
case TXGBE_DEV_ID_WX1820:
wx->mac.type = wx_mac_sp;
break;
+ case TXGBE_DEV_ID_AML5010:
+ case TXGBE_DEV_ID_AML5110:
+ case TXGBE_DEV_ID_AML5025:
+ case TXGBE_DEV_ID_AML5125:
+ wx->mac.type = wx_mac_aml;
+ break;
+ case TXGBE_DEV_ID_AML5040:
+ case TXGBE_DEV_ID_AML5140:
+ wx->mac.type = wx_mac_aml40;
+ break;
default:
wx->mac.type = wx_mac_unknown;
break;
@@ -200,25 +326,25 @@ static void txgbe_init_type_code(struct wx *wx)
switch (device_type) {
case TXGBE_ID_SFP:
- wx->media_type = sp_media_fiber;
+ wx->media_type = wx_media_fiber;
break;
case TXGBE_ID_XAUI:
case TXGBE_ID_SGMII:
- wx->media_type = sp_media_copper;
+ wx->media_type = wx_media_copper;
break;
case TXGBE_ID_KR_KX_KX4:
case TXGBE_ID_MAC_XAUI:
case TXGBE_ID_MAC_SGMII:
- wx->media_type = sp_media_backplane;
+ wx->media_type = wx_media_backplane;
break;
case TXGBE_ID_SFI_XAUI:
if (wx->bus.func == 0)
- wx->media_type = sp_media_fiber;
+ wx->media_type = wx_media_fiber;
else
- wx->media_type = sp_media_copper;
+ wx->media_type = wx_media_copper;
break;
default:
- wx->media_type = sp_media_unknown;
+ wx->media_type = wx_media_unknown;
break;
}
}
@@ -232,13 +358,13 @@ static int txgbe_sw_init(struct wx *wx)
u16 msix_count = 0;
int err;
- wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES;
- wx->mac.max_tx_queues = TXGBE_SP_MAX_TX_QUEUES;
- wx->mac.max_rx_queues = TXGBE_SP_MAX_RX_QUEUES;
- wx->mac.mcft_size = TXGBE_SP_MC_TBL_SIZE;
- wx->mac.vft_size = TXGBE_SP_VFT_TBL_SIZE;
- wx->mac.rx_pb_size = TXGBE_SP_RX_PB_SIZE;
- wx->mac.tx_pb_size = TXGBE_SP_TDB_PB_SZ;
+ wx->mac.num_rar_entries = TXGBE_RAR_ENTRIES;
+ wx->mac.max_tx_queues = TXGBE_MAX_TXQ;
+ wx->mac.max_rx_queues = TXGBE_MAX_RXQ;
+ wx->mac.mcft_size = TXGBE_MC_TBL_SIZE;
+ wx->mac.vft_size = TXGBE_VFT_TBL_SIZE;
+ wx->mac.rx_pb_size = TXGBE_RX_PB_SIZE;
+ wx->mac.tx_pb_size = TXGBE_TDB_PB_SZ;
/* PCI config space info */
err = wx_sw_init(wx);
@@ -266,19 +392,41 @@ static int txgbe_sw_init(struct wx *wx)
wx->atr = txgbe_atr;
wx->configure_fdir = txgbe_configure_fdir;
+ set_bit(WX_FLAG_RSC_CAPABLE, wx->flags);
+ set_bit(WX_FLAG_RSC_ENABLED, wx->flags);
+ set_bit(WX_FLAG_MULTI_64_FUNC, wx->flags);
+
/* enable itr by default in dynamic mode */
+ wx->adaptive_itr = true;
wx->rx_itr_setting = 1;
wx->tx_itr_setting = 1;
/* set default ring sizes */
wx->tx_ring_count = TXGBE_DEFAULT_TXD;
wx->rx_ring_count = TXGBE_DEFAULT_RXD;
+ wx->mbx.size = WX_VXMAILBOX_SIZE;
/* set default work limits */
wx->tx_work_limit = TXGBE_DEFAULT_TX_WORK;
wx->rx_work_limit = TXGBE_DEFAULT_RX_WORK;
+ wx->setup_tc = txgbe_setup_tc;
wx->do_reset = txgbe_do_reset;
+ set_bit(0, &wx->fwd_bitmask);
+
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ break;
+ case wx_mac_aml:
+ case wx_mac_aml40:
+ set_bit(WX_FLAG_RX_MERGE_ENABLED, wx->flags);
+ set_bit(WX_FLAG_TXHEAD_WB_ENABLED, wx->flags);
+ set_bit(WX_FLAG_SWFW_RING, wx->flags);
+ wx->swfw_index = 0;
+ break;
+ default:
+ break;
+ }
return 0;
}
@@ -309,10 +457,14 @@ static int txgbe_open(struct net_device *netdev)
wx_configure(wx);
- err = txgbe_request_queue_irqs(wx);
+ err = txgbe_setup_misc_irq(wx->priv);
if (err)
goto err_free_resources;
+ err = txgbe_request_queue_irqs(wx);
+ if (err)
+ goto err_free_misc_irq;
+
/* Notify the stack of the actual queue counts. */
err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues);
if (err)
@@ -322,12 +474,17 @@ static int txgbe_open(struct net_device *netdev)
if (err)
goto err_free_irq;
+ wx_ptp_init(wx);
+
txgbe_up_complete(wx);
return 0;
err_free_irq:
wx_free_irq(wx);
+err_free_misc_irq:
+ txgbe_free_misc_irq(wx->priv);
+ wx_reset_interrupt_capability(wx);
err_free_resources:
wx_free_resources(wx);
err_reset:
@@ -345,6 +502,7 @@ err_reset:
*/
static void txgbe_close_suspend(struct wx *wx)
{
+ wx_ptp_suspend(wx);
txgbe_disable_device(wx);
wx_free_resources(wx);
}
@@ -364,8 +522,10 @@ static int txgbe_close(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
+ wx_ptp_stop(wx);
txgbe_down(wx);
wx_free_irq(wx);
+ txgbe_free_misc_irq(wx->priv);
wx_free_resources(wx);
txgbe_fdir_filter_exit(wx);
wx_control_hw(wx, false);
@@ -411,7 +571,6 @@ static void txgbe_shutdown(struct pci_dev *pdev)
int txgbe_setup_tc(struct net_device *dev, u8 tc)
{
struct wx *wx = netdev_priv(dev);
- struct txgbe *txgbe = wx->priv;
/* Hardware has to reinitialize queues and interrupts to
* match packet buffer alignment. Unfortunately, the
@@ -422,7 +581,6 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc)
else
txgbe_reset(wx);
- txgbe_free_misc_irq(txgbe);
wx_clear_interrupt_scheme(wx);
if (tc)
@@ -431,7 +589,6 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc)
netdev_reset_tc(dev);
wx_init_interrupt_scheme(wx);
- txgbe_setup_misc_irq(txgbe);
if (netif_running(dev))
txgbe_open(dev);
@@ -467,6 +624,39 @@ void txgbe_do_reset(struct net_device *netdev)
txgbe_reset(wx);
}
+static int txgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table)
+{
+ struct wx *wx = netdev_priv(dev);
+ struct udp_tunnel_info ti;
+
+ udp_tunnel_nic_get_port(dev, table, 0, &ti);
+ switch (ti.type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ wr32(wx, TXGBE_CFG_VXLAN, ntohs(ti.port));
+ break;
+ case UDP_TUNNEL_TYPE_VXLAN_GPE:
+ wr32(wx, TXGBE_CFG_VXLAN_GPE, ntohs(ti.port));
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ wr32(wx, TXGBE_CFG_GENEVE, ntohs(ti.port));
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const struct udp_tunnel_nic_info txgbe_udp_tunnels = {
+ .sync_table = txgbe_udp_tunnel_sync,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, },
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+ },
+};
+
static const struct net_device_ops txgbe_netdev_ops = {
.ndo_open = txgbe_open,
.ndo_stop = txgbe_close,
@@ -475,11 +665,14 @@ static const struct net_device_ops txgbe_netdev_ops = {
.ndo_set_rx_mode = wx_set_rx_mode,
.ndo_set_features = wx_set_features,
.ndo_fix_features = wx_fix_features,
+ .ndo_features_check = wx_features_check,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = wx_set_mac,
.ndo_get_stats64 = wx_get_stats64,
.ndo_vlan_rx_add_vid = wx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = wx_vlan_rx_kill_vid,
+ .ndo_hwtstamp_set = wx_hwtstamp_set,
+ .ndo_hwtstamp_get = wx_hwtstamp_get,
};
/**
@@ -553,14 +746,19 @@ static int txgbe_probe(struct pci_dev *pdev,
goto err_pci_release_regions;
}
+ /* The sapphire supports up to 63 VFs per pf, but physical
+ * function also need one pool for basic networking.
+ */
+ pci_sriov_set_totalvfs(pdev, TXGBE_MAX_VFS_DRV_LIMIT);
wx->driver_name = txgbe_driver_name;
txgbe_set_ethtool_ops(netdev);
netdev->netdev_ops = &txgbe_netdev_ops;
+ netdev->udp_tunnel_nic_info = &txgbe_udp_tunnels;
/* setup the private structure */
err = txgbe_sw_init(wx);
if (err)
- goto err_free_mac_table;
+ goto err_pci_release_regions;
/* check if flash load is done after hw power up */
err = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_PERST);
@@ -601,6 +799,9 @@ static int txgbe_probe(struct pci_dev *pdev,
netdev->features |= NETIF_F_HIGHDMA;
netdev->hw_features |= NETIF_F_GRO;
netdev->features |= NETIF_F_GRO;
+ netdev->hw_features |= NETIF_F_LRO;
+ netdev->features |= NETIF_F_LRO;
+ netdev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
@@ -622,9 +823,11 @@ static int txgbe_probe(struct pci_dev *pdev,
eth_hw_addr_set(netdev, wx->mac.perm_addr);
wx_mac_set_default_filter(wx, wx->mac.perm_addr);
+ txgbe_init_service(wx);
+
err = wx_init_interrupt_scheme(wx);
if (err)
- goto err_free_mac_table;
+ goto err_cancel_service;
/* Save off EEPROM version number and Option Rom version which
* together make a unique identify for the eeprom
@@ -667,6 +870,13 @@ static int txgbe_probe(struct pci_dev *pdev,
if (etrack_id < 0x20010)
dev_warn(&pdev->dev, "Please upgrade the firmware to 0x20010 or above.\n");
+ err = txgbe_test_hostif(wx);
+ if (err != 0) {
+ dev_err(&pdev->dev, "Mismatched Firmware version\n");
+ err = -EIO;
+ goto err_release_hw;
+ }
+
txgbe = devm_kzalloc(&pdev->dev, sizeof(*txgbe), GFP_KERNEL);
if (!txgbe) {
err = -ENOMEM;
@@ -678,13 +888,9 @@ static int txgbe_probe(struct pci_dev *pdev,
txgbe_init_fdir(txgbe);
- err = txgbe_setup_misc_irq(txgbe);
- if (err)
- goto err_release_hw;
-
err = txgbe_init_phy(txgbe);
if (err)
- goto err_free_misc_irq;
+ goto err_release_hw;
err = register_netdev(netdev);
if (err)
@@ -712,12 +918,14 @@ static int txgbe_probe(struct pci_dev *pdev,
err_remove_phy:
txgbe_remove_phy(txgbe);
-err_free_misc_irq:
- txgbe_free_misc_irq(txgbe);
err_release_hw:
wx_clear_interrupt_scheme(wx);
wx_control_hw(wx, false);
+err_cancel_service:
+ timer_delete_sync(&wx->service_timer);
+ cancel_work_sync(&wx->service_task);
err_free_mac_table:
+ kfree(wx->rss_key);
kfree(wx->mac_table);
err_pci_release_regions:
pci_release_selected_regions(pdev,
@@ -742,11 +950,13 @@ static void txgbe_remove(struct pci_dev *pdev)
struct txgbe *txgbe = wx->priv;
struct net_device *netdev;
+ cancel_work_sync(&wx->service_task);
+
netdev = wx->netdev;
+ wx_disable_sriov(wx);
unregister_netdev(netdev);
txgbe_remove_phy(txgbe);
- txgbe_free_misc_irq(txgbe);
wx_free_isb_resources(wx);
pci_release_selected_regions(pdev,
@@ -765,11 +975,12 @@ static struct pci_driver txgbe_driver = {
.probe = txgbe_probe,
.remove = txgbe_remove,
.shutdown = txgbe_shutdown,
+ .sriov_configure = wx_pci_sriov_configure,
};
module_pci_driver(txgbe_driver);
MODULE_DEVICE_TABLE(pci, txgbe_pci_tbl);
MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@trustnetic.com>");
-MODULE_DESCRIPTION("WangXun(R) 10 Gigabit PCI Express Network Driver");
+MODULE_DESCRIPTION("WangXun(R) 10/25/40 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index a0e4920b4761..8ea7aa07ae4e 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -15,8 +15,12 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_lib.h"
+#include "../libwx/wx_ptp.h"
+#include "../libwx/wx_sriov.h"
+#include "../libwx/wx_mbx.h"
#include "../libwx/wx_hw.h"
#include "txgbe_type.h"
+#include "txgbe_aml.h"
#include "txgbe_phy.h"
#include "txgbe_hw.h"
@@ -162,7 +166,7 @@ static struct phylink_pcs *txgbe_phylink_mac_select(struct phylink_config *confi
struct wx *wx = phylink_to_wx(config);
struct txgbe *txgbe = wx->priv;
- if (interface == PHY_INTERFACE_MODE_10GBASER)
+ if (wx->media_type != wx_media_copper)
return txgbe->pcs;
return NULL;
@@ -179,6 +183,12 @@ static void txgbe_mac_link_down(struct phylink_config *config,
struct wx *wx = phylink_to_wx(config);
wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0);
+
+ wx->speed = SPEED_UNKNOWN;
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset_cyclecounter(wx);
+ /* ping all the active vfs to let them know we are going down */
+ wx_ping_all_vfs_with_link_status(wx, false);
}
static void txgbe_mac_link_up(struct phylink_config *config,
@@ -215,6 +225,13 @@ static void txgbe_mac_link_up(struct phylink_config *config,
wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
wdg = rd32(wx, WX_MAC_WDG_TIMEOUT);
wr32(wx, WX_MAC_WDG_TIMEOUT, wdg);
+
+ wx->speed = speed;
+ wx->last_rx_ptp_check = jiffies;
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset_cyclecounter(wx);
+ /* ping all the active vfs to let them know we are going up */
+ wx_ping_all_vfs_with_link_status(wx, true);
}
static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode,
@@ -262,7 +279,7 @@ static int txgbe_phylink_init(struct txgbe *txgbe)
config->mac_capabilities = MAC_10000FD | MAC_1000FD | MAC_100FD |
MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
- if (wx->media_type == sp_media_copper) {
+ if (wx->media_type == wx_media_copper) {
phy_mode = PHY_INTERFACE_MODE_XAUI;
__set_bit(PHY_INTERFACE_MODE_XAUI, config->supported_interfaces);
} else {
@@ -302,7 +319,10 @@ irqreturn_t txgbe_link_irq_handler(int irq, void *data)
status = rd32(wx, TXGBE_CFG_PORT_ST);
up = !!(status & TXGBE_CFG_PORT_ST_LINK_UP);
- phylink_pcs_change(txgbe->pcs, up);
+ if (txgbe->pcs)
+ phylink_pcs_change(txgbe->pcs, up);
+ else
+ phylink_mac_change(wx->phylink, up);
return IRQ_HANDLED;
}
@@ -358,169 +378,8 @@ static int txgbe_gpio_direction_out(struct gpio_chip *chip, unsigned int offset,
return 0;
}
-static void txgbe_gpio_irq_ack(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- irq_hw_number_t hwirq = irqd_to_hwirq(d);
- struct wx *wx = gpiochip_get_data(gc);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&wx->gpio_lock, flags);
- wr32(wx, WX_GPIO_EOI, BIT(hwirq));
- raw_spin_unlock_irqrestore(&wx->gpio_lock, flags);
-}
-
-static void txgbe_gpio_irq_mask(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- irq_hw_number_t hwirq = irqd_to_hwirq(d);
- struct wx *wx = gpiochip_get_data(gc);
- unsigned long flags;
-
- gpiochip_disable_irq(gc, hwirq);
-
- raw_spin_lock_irqsave(&wx->gpio_lock, flags);
- wr32m(wx, WX_GPIO_INTMASK, BIT(hwirq), BIT(hwirq));
- raw_spin_unlock_irqrestore(&wx->gpio_lock, flags);
-}
-
-static void txgbe_gpio_irq_unmask(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- irq_hw_number_t hwirq = irqd_to_hwirq(d);
- struct wx *wx = gpiochip_get_data(gc);
- unsigned long flags;
-
- gpiochip_enable_irq(gc, hwirq);
-
- raw_spin_lock_irqsave(&wx->gpio_lock, flags);
- wr32m(wx, WX_GPIO_INTMASK, BIT(hwirq), 0);
- raw_spin_unlock_irqrestore(&wx->gpio_lock, flags);
-}
-
-static void txgbe_toggle_trigger(struct gpio_chip *gc, unsigned int offset)
-{
- struct wx *wx = gpiochip_get_data(gc);
- u32 pol, val;
-
- pol = rd32(wx, WX_GPIO_POLARITY);
- val = rd32(wx, WX_GPIO_EXT);
-
- if (val & BIT(offset))
- pol &= ~BIT(offset);
- else
- pol |= BIT(offset);
-
- wr32(wx, WX_GPIO_POLARITY, pol);
-}
-
-static int txgbe_gpio_set_type(struct irq_data *d, unsigned int type)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- irq_hw_number_t hwirq = irqd_to_hwirq(d);
- struct wx *wx = gpiochip_get_data(gc);
- u32 level, polarity, mask;
- unsigned long flags;
-
- mask = BIT(hwirq);
-
- if (type & IRQ_TYPE_LEVEL_MASK) {
- level = 0;
- irq_set_handler_locked(d, handle_level_irq);
- } else {
- level = mask;
- irq_set_handler_locked(d, handle_edge_irq);
- }
-
- if (type == IRQ_TYPE_EDGE_RISING || type == IRQ_TYPE_LEVEL_HIGH)
- polarity = mask;
- else
- polarity = 0;
-
- raw_spin_lock_irqsave(&wx->gpio_lock, flags);
-
- wr32m(wx, WX_GPIO_INTEN, mask, mask);
- wr32m(wx, WX_GPIO_INTTYPE_LEVEL, mask, level);
- if (type == IRQ_TYPE_EDGE_BOTH)
- txgbe_toggle_trigger(gc, hwirq);
- else
- wr32m(wx, WX_GPIO_POLARITY, mask, polarity);
-
- raw_spin_unlock_irqrestore(&wx->gpio_lock, flags);
-
- return 0;
-}
-
-static const struct irq_chip txgbe_gpio_irq_chip = {
- .name = "txgbe-gpio-irq",
- .irq_ack = txgbe_gpio_irq_ack,
- .irq_mask = txgbe_gpio_irq_mask,
- .irq_unmask = txgbe_gpio_irq_unmask,
- .irq_set_type = txgbe_gpio_set_type,
- .flags = IRQCHIP_IMMUTABLE,
- GPIOCHIP_IRQ_RESOURCE_HELPERS,
-};
-
-irqreturn_t txgbe_gpio_irq_handler(int irq, void *data)
-{
- struct txgbe *txgbe = data;
- struct wx *wx = txgbe->wx;
- irq_hw_number_t hwirq;
- unsigned long gpioirq;
- struct gpio_chip *gc;
- unsigned long flags;
-
- gpioirq = rd32(wx, WX_GPIO_INTSTATUS);
-
- gc = txgbe->gpio;
- for_each_set_bit(hwirq, &gpioirq, gc->ngpio) {
- int gpio = irq_find_mapping(gc->irq.domain, hwirq);
- struct irq_data *d = irq_get_irq_data(gpio);
- u32 irq_type = irq_get_trigger_type(gpio);
-
- txgbe_gpio_irq_ack(d);
- handle_nested_irq(gpio);
-
- if ((irq_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
- raw_spin_lock_irqsave(&wx->gpio_lock, flags);
- txgbe_toggle_trigger(gc, hwirq);
- raw_spin_unlock_irqrestore(&wx->gpio_lock, flags);
- }
- }
-
- return IRQ_HANDLED;
-}
-
-void txgbe_reinit_gpio_intr(struct wx *wx)
-{
- struct txgbe *txgbe = wx->priv;
- irq_hw_number_t hwirq;
- unsigned long gpioirq;
- struct gpio_chip *gc;
- unsigned long flags;
-
- /* for gpio interrupt pending before irq enable */
- gpioirq = rd32(wx, WX_GPIO_INTSTATUS);
-
- gc = txgbe->gpio;
- for_each_set_bit(hwirq, &gpioirq, gc->ngpio) {
- int gpio = irq_find_mapping(gc->irq.domain, hwirq);
- struct irq_data *d = irq_get_irq_data(gpio);
- u32 irq_type = irq_get_trigger_type(gpio);
-
- txgbe_gpio_irq_ack(d);
-
- if ((irq_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
- raw_spin_lock_irqsave(&wx->gpio_lock, flags);
- txgbe_toggle_trigger(gc, hwirq);
- raw_spin_unlock_irqrestore(&wx->gpio_lock, flags);
- }
- }
-}
-
static int txgbe_gpio_init(struct txgbe *txgbe)
{
- struct gpio_irq_chip *girq;
struct gpio_chip *gc;
struct device *dev;
struct wx *wx;
@@ -550,11 +409,6 @@ static int txgbe_gpio_init(struct txgbe *txgbe)
gc->direction_input = txgbe_gpio_direction_in;
gc->direction_output = txgbe_gpio_direction_out;
- girq = &gc->irq;
- gpio_irq_chip_set_chip(girq, &txgbe_gpio_irq_chip);
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_bad_irq;
-
ret = devm_gpiochip_add_data(dev, gc, wx);
if (ret)
return ret;
@@ -723,8 +577,17 @@ int txgbe_init_phy(struct txgbe *txgbe)
struct wx *wx = txgbe->wx;
int ret;
- if (txgbe->wx->media_type == sp_media_copper)
- return txgbe_ext_phy_init(txgbe);
+ switch (wx->mac.type) {
+ case wx_mac_aml40:
+ case wx_mac_aml:
+ return txgbe_phylink_init_aml(txgbe);
+ case wx_mac_sp:
+ if (wx->media_type == wx_media_copper)
+ return txgbe_ext_phy_init(txgbe);
+ break;
+ default:
+ break;
+ }
ret = txgbe_swnodes_register(txgbe);
if (ret) {
@@ -787,10 +650,20 @@ err_unregister_swnode:
void txgbe_remove_phy(struct txgbe *txgbe)
{
- if (txgbe->wx->media_type == sp_media_copper) {
- phylink_disconnect_phy(txgbe->wx->phylink);
+ switch (txgbe->wx->mac.type) {
+ case wx_mac_aml40:
+ case wx_mac_aml:
phylink_destroy(txgbe->wx->phylink);
return;
+ case wx_mac_sp:
+ if (txgbe->wx->media_type == wx_media_copper) {
+ phylink_disconnect_phy(txgbe->wx->phylink);
+ phylink_destroy(txgbe->wx->phylink);
+ return;
+ }
+ break;
+ default:
+ break;
}
platform_device_unregister(txgbe->sfp_dev);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
index 8a026d804fe2..a32b19d71ea2 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
@@ -4,10 +4,8 @@
#ifndef _TXGBE_PHY_H_
#define _TXGBE_PHY_H_
-irqreturn_t txgbe_gpio_irq_handler(int irq, void *data);
-void txgbe_reinit_gpio_intr(struct wx *wx);
irqreturn_t txgbe_link_irq_handler(int irq, void *data);
int txgbe_init_phy(struct txgbe *txgbe);
void txgbe_remove_phy(struct txgbe *txgbe);
-#endif /* _TXGBE_NODE_H_ */
+#endif /* _TXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index cc3a7b62fe9e..82433e9cb0e3 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -6,10 +6,18 @@
#include <linux/property.h>
#include <linux/irq.h>
+#include <linux/phy.h>
+#include "../libwx/wx_type.h"
/* Device IDs */
#define TXGBE_DEV_ID_SP1000 0x1001
#define TXGBE_DEV_ID_WX1820 0x2001
+#define TXGBE_DEV_ID_AML5010 0x5010
+#define TXGBE_DEV_ID_AML5110 0x5110
+#define TXGBE_DEV_ID_AML5025 0x5025
+#define TXGBE_DEV_ID_AML5125 0x5125
+#define TXGBE_DEV_ID_AML5040 0x5040
+#define TXGBE_DEV_ID_AML5140 0x5140
/* Subsystem IDs */
/* SFP */
@@ -44,6 +52,8 @@
/**************** SP Registers ****************************/
/* chip control Registers */
+#define TXGBE_MIS_RST 0x1000C
+#define TXGBE_MIS_RST_MAC_RST(_i) BIT(20 - (_i) * 3)
#define TXGBE_MIS_PRB_CTL 0x10010
#define TXGBE_MIS_PRB_CTL_LAN_UP(_i) BIT(1 - (_i))
/* FMGR Registers */
@@ -56,6 +66,11 @@
#define TXGBE_TS_CTL 0x10300
#define TXGBE_TS_CTL_EVAL_MD BIT(31)
+/* MAC Misc Registers */
+#define TXGBE_MAC_MISC_CTL 0x11F00
+#define TXGBE_MAC_MISC_CTL_LINK_STS_MOD BIT(0)
+#define TXGBE_MAC_MISC_CTL_LINK_PCS FIELD_PREP(BIT(0), 0)
+#define TXGBE_MAC_MISC_CTL_LINK_BOTH FIELD_PREP(BIT(0), 1)
/* GPIO register bit */
#define TXGBE_GPIOBIT_0 BIT(0) /* I:tx fault */
#define TXGBE_GPIOBIT_1 BIT(1) /* O:tx disabled */
@@ -67,20 +82,28 @@
/* Extended Interrupt Enable Set */
#define TXGBE_PX_MISC_ETH_LKDN BIT(8)
#define TXGBE_PX_MISC_DEV_RST BIT(10)
+#define TXGBE_PX_MISC_IC_TIMESYNC BIT(11)
#define TXGBE_PX_MISC_ETH_EVENT BIT(17)
#define TXGBE_PX_MISC_ETH_LK BIT(18)
#define TXGBE_PX_MISC_ETH_AN BIT(19)
#define TXGBE_PX_MISC_INT_ERR BIT(20)
+#define TXGBE_PX_MISC_IC_VF_MBOX BIT(23)
#define TXGBE_PX_MISC_GPIO BIT(26)
#define TXGBE_PX_MISC_IEN_MASK \
(TXGBE_PX_MISC_ETH_LKDN | TXGBE_PX_MISC_DEV_RST | \
TXGBE_PX_MISC_ETH_EVENT | TXGBE_PX_MISC_ETH_LK | \
- TXGBE_PX_MISC_ETH_AN | TXGBE_PX_MISC_INT_ERR | \
- TXGBE_PX_MISC_GPIO)
+ TXGBE_PX_MISC_ETH_AN | TXGBE_PX_MISC_INT_ERR | \
+ TXGBE_PX_MISC_IC_VF_MBOX | TXGBE_PX_MISC_IC_TIMESYNC)
/* Port cfg registers */
#define TXGBE_CFG_PORT_ST 0x14404
#define TXGBE_CFG_PORT_ST_LINK_UP BIT(0)
+#define TXGBE_CFG_PORT_ST_LINK_AML_40G BIT(2)
+#define TXGBE_CFG_PORT_ST_LINK_AML_25G BIT(3)
+#define TXGBE_CFG_PORT_ST_LINK_AML_10G BIT(4)
+#define TXGBE_CFG_VXLAN 0x14410
+#define TXGBE_CFG_VXLAN_GPE 0x14414
+#define TXGBE_CFG_GENEVE 0x14418
/* I2C registers */
#define TXGBE_I2C_BASE 0x14900
@@ -138,6 +161,17 @@
#define TXGBE_RDB_FDIR_FLEX_CFG_MSK BIT(2)
#define TXGBE_RDB_FDIR_FLEX_CFG_OFST(v) FIELD_PREP(GENMASK(7, 3), v)
+/*************************** Amber Lite Registers ****************************/
+#define TXGBE_PX_PF_BME 0x4B8
+#define TXGBE_AML_MAC_TX_CFG 0x11000
+#define TXGBE_AML_MAC_TX_CFG_TE BIT(0)
+#define TXGBE_AML_MAC_TX_CFG_SPEED_MASK GENMASK(30, 27)
+#define TXGBE_AML_MAC_TX_CFG_SPEED_40G FIELD_PREP(GENMASK(30, 27), 0)
+#define TXGBE_AML_MAC_TX_CFG_SPEED_25G FIELD_PREP(GENMASK(30, 27), 2)
+#define TXGBE_AML_MAC_TX_CFG_SPEED_10G FIELD_PREP(GENMASK(30, 27), 8)
+#define TXGBE_RDM_RSC_CTL 0x1200C
+#define TXGBE_RDM_RSC_CTL_FREE_CTL BIT(7)
+
/* Checksum and EEPROM pointers */
#define TXGBE_EEPROM_LAST_WORD 0x800
#define TXGBE_EEPROM_CHECKSUM 0x2F
@@ -145,6 +179,8 @@
#define TXGBE_EEPROM_VERSION_L 0x1D
#define TXGBE_EEPROM_VERSION_H 0x1E
#define TXGBE_ISCSI_BOOT_CONFIG 0x07
+#define TXGBE_EEPROM_I2C_SRART_PTR 0x580
+#define TXGBE_EEPROM_I2C_END_PTR 0x800
#define TXGBE_MAX_MSIX_VECTORS 64
#define TXGBE_MAX_FDIR_INDICES 63
@@ -153,13 +189,15 @@
#define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
#define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
-#define TXGBE_SP_MAX_TX_QUEUES 128
-#define TXGBE_SP_MAX_RX_QUEUES 128
-#define TXGBE_SP_RAR_ENTRIES 128
-#define TXGBE_SP_MC_TBL_SIZE 128
-#define TXGBE_SP_VFT_TBL_SIZE 128
-#define TXGBE_SP_RX_PB_SIZE 512
-#define TXGBE_SP_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */
+#define TXGBE_MAX_TXQ 128
+#define TXGBE_MAX_RXQ 128
+#define TXGBE_RAR_ENTRIES 128
+#define TXGBE_MC_TBL_SIZE 128
+#define TXGBE_VFT_TBL_SIZE 128
+#define TXGBE_RX_PB_SIZE 512
+#define TXGBE_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */
+
+#define TXGBE_MAX_VFS_DRV_LIMIT 63
#define TXGBE_DEFAULT_ATR_SAMPLE_RATE 20
@@ -250,7 +288,7 @@ struct txgbe_fdir_filter {
struct hlist_node fdir_node;
union txgbe_atr_input filter;
u16 sw_idx;
- u16 action;
+ u64 action;
};
/* TX/RX descriptor defines */
@@ -265,8 +303,8 @@ struct txgbe_fdir_filter {
#define TXGBE_DEFAULT_RX_WORK 128
#endif
-#define TXGBE_INTR_MISC BIT(0)
-#define TXGBE_INTR_QALL(A) GENMASK((A)->num_q_vectors, 1)
+#define TXGBE_INTR_MISC(A) BIT((A)->num_q_vectors)
+#define TXGBE_INTR_QALL(A) (TXGBE_INTR_MISC(A) - 1)
#define TXGBE_MAX_EITR GENMASK(11, 3)
@@ -277,6 +315,96 @@ void txgbe_up(struct wx *wx);
int txgbe_setup_tc(struct net_device *dev, u8 tc);
void txgbe_do_reset(struct net_device *netdev);
+#define TXGBE_LINK_SPEED_UNKNOWN 0
+#define TXGBE_LINK_SPEED_10GB_FULL 4
+#define TXGBE_LINK_SPEED_25GB_FULL 0x10
+#define TXGBE_LINK_SPEED_40GB_FULL 0x20
+
+#define TXGBE_SFF_IDENTIFIER_SFP 0x3
+#define TXGBE_SFF_IDENTIFIER_QSFP 0xC
+#define TXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD
+#define TXGBE_SFF_IDENTIFIER_QSFP28 0x11
+#define TXGBE_SFF_DA_PASSIVE_CABLE 0x4
+#define TXGBE_SFF_DA_ACTIVE_CABLE 0x8
+#define TXGBE_SFF_DA_SPEC_ACTIVE_LIMIT 0x4
+#define TXGBE_SFF_FCPI4_LIMITING 0x3
+#define TXGBE_SFF_10GBASESR_CAPABLE 0x10
+#define TXGBE_SFF_10GBASELR_CAPABLE 0x20
+#define TXGBE_SFF_25GBASESR_CAPABLE 0x2
+#define TXGBE_SFF_25GBASELR_CAPABLE 0x3
+#define TXGBE_SFF_25GBASEER_CAPABLE 0x4
+#define TXGBE_SFF_25GBASECR_91FEC 0xB
+#define TXGBE_SFF_25GBASECR_74FEC 0xC
+#define TXGBE_SFF_25GBASECR_NOFEC 0xD
+#define TXGBE_SFF_ETHERNET_RSRVD BIT(7)
+#define TXGBE_SFF_ETHERNET_40G_CR4 BIT(3)
+#define TXGBE_SFF_ETHERNET_40G_SR4 BIT(2)
+#define TXGBE_SFF_ETHERNET_40G_LR4 BIT(1)
+#define TXGBE_SFF_ETHERNET_40G_ACTIVE BIT(0)
+#define TXGBE_SFF_ETHERNET_100G_CR4 0xB
+
+#define TXGBE_PHY_FEC_RS BIT(0)
+#define TXGBE_PHY_FEC_BASER BIT(1)
+#define TXGBE_PHY_FEC_OFF BIT(2)
+#define TXGBE_PHY_FEC_AUTO (TXGBE_PHY_FEC_OFF | \
+ TXGBE_PHY_FEC_BASER |\
+ TXGBE_PHY_FEC_RS)
+
+#define FW_PHY_GET_LINK_CMD 0xC0
+#define FW_PHY_SET_LINK_CMD 0xC1
+#define FW_GET_MODULE_INFO_CMD 0xC5
+#define FW_READ_EEPROM_CMD 0xC6
+
+struct txgbe_sff_id {
+ u8 identifier; /* A0H 0x00 */
+ u8 com_1g_code; /* A0H 0x06 */
+ u8 com_10g_code; /* A0H 0x03 */
+ u8 com_25g_code; /* A0H 0x24 */
+ u8 cable_spec; /* A0H 0x3C */
+ u8 cable_tech; /* A0H 0x08 */
+ u8 vendor_oui0; /* A0H 0x25 */
+ u8 vendor_oui1; /* A0H 0x26 */
+ u8 vendor_oui2; /* A0H 0x27 */
+ u8 transceiver_type; /* A0H 0x83 */
+ u8 sff_opt1; /* A0H 0xC0 */
+ u8 reserved[5];
+};
+
+struct txgbe_hic_get_module_info {
+ struct wx_hic_hdr hdr;
+ struct txgbe_sff_id id;
+};
+
+struct txgbe_hic_ephy_setlink {
+ struct wx_hic_hdr hdr;
+ u8 speed;
+ u8 duplex;
+ u8 autoneg;
+ u8 fec_mode;
+ u8 resv[4];
+};
+
+struct txgbe_hic_ephy_getlink {
+ struct wx_hic_hdr hdr;
+ u8 speed;
+ u8 duplex;
+ u8 autoneg;
+ u8 flow_ctl;
+ u8 power;
+ u8 fec_mode;
+ u8 resv[6];
+};
+
+struct txgbe_hic_i2c_read {
+ struct wx_hic_hdr hdr;
+ __be32 offset;
+ __be32 length;
+ u8 page;
+ u8 bank;
+ u8 i2c_address;
+ u8 resv;
+};
+
#define NODE_PROP(_NAME, _PROP) \
(const struct software_node) { \
.name = _NAME, \
@@ -313,8 +441,8 @@ struct txgbe_nodes {
};
enum txgbe_misc_irqs {
- TXGBE_IRQ_GPIO = 0,
- TXGBE_IRQ_LINK,
+ TXGBE_IRQ_LINK = 0,
+ TXGBE_IRQ_GPIO,
TXGBE_IRQ_MAX
};
@@ -335,14 +463,20 @@ struct txgbe {
struct clk_lookup *clock;
struct clk *clk;
struct gpio_chip *gpio;
- unsigned int gpio_irq;
unsigned int link_irq;
+ unsigned int gpio_irq;
+ u32 eicr;
/* flow director */
struct hlist_head fdir_filter_list;
union txgbe_atr_input fdir_mask;
int fdir_filter_count;
spinlock_t fdir_perfect_lock; /* spinlock for FDIR */
+
+ DECLARE_PHY_INTERFACE_MASK(link_interfaces);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(link_support);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ u8 link_port;
};
#endif /* _TXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbevf/Makefile b/drivers/net/ethernet/wangxun/txgbevf/Makefile
new file mode 100644
index 000000000000..4c7e6de04424
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbevf/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd.
+#
+# Makefile for the Wangxun(R) 10/25/40GbE virtual functions driver
+#
+
+obj-$(CONFIG_TXGBE) += txgbevf.o
+
+txgbevf-objs := txgbevf_main.o
diff --git a/drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c b/drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c
new file mode 100644
index 000000000000..37e4ec487afd
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/etherdevice.h>
+
+#include "../libwx/wx_type.h"
+#include "../libwx/wx_hw.h"
+#include "../libwx/wx_lib.h"
+#include "../libwx/wx_mbx.h"
+#include "../libwx/wx_vf.h"
+#include "../libwx/wx_vf_common.h"
+#include "../libwx/wx_ethtool.h"
+#include "txgbevf_type.h"
+
+/* txgbevf_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static const struct pci_device_id txgbevf_pci_tbl[] = {
+ { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_SP1000), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_WX1820), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML500F), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML510F), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML5024), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML5124), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML503F), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBEVF_DEV_ID_AML513F), 0},
+ /* required last entry */
+ { .device = 0 }
+};
+
+static const struct net_device_ops txgbevf_netdev_ops = {
+ .ndo_open = wxvf_open,
+ .ndo_stop = wxvf_close,
+ .ndo_start_xmit = wx_xmit_frame,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = wx_set_mac_vf,
+};
+
+static void txgbevf_set_num_queues(struct wx *wx)
+{
+ u32 def_q = 0, num_tcs = 0;
+ u16 rss, queue;
+ int ret = 0;
+
+ /* Start with base case */
+ wx->num_rx_queues = 1;
+ wx->num_tx_queues = 1;
+
+ spin_lock_bh(&wx->mbx.mbx_lock);
+ /* fetch queue configuration from the PF */
+ ret = wx_get_queues_vf(wx, &num_tcs, &def_q);
+ spin_unlock_bh(&wx->mbx.mbx_lock);
+
+ if (ret)
+ return;
+
+ /* we need as many queues as traffic classes */
+ if (num_tcs > 1) {
+ wx->num_rx_queues = num_tcs;
+ } else {
+ rss = min_t(u16, num_online_cpus(), TXGBEVF_MAX_RSS_NUM);
+ queue = min_t(u16, wx->mac.max_rx_queues, wx->mac.max_tx_queues);
+ rss = min_t(u16, queue, rss);
+
+ if (wx->vfinfo->vf_api >= wx_mbox_api_13) {
+ wx->num_rx_queues = rss;
+ wx->num_tx_queues = rss;
+ }
+ }
+}
+
+static void txgbevf_init_type_code(struct wx *wx)
+{
+ switch (wx->device_id) {
+ case TXGBEVF_DEV_ID_SP1000:
+ case TXGBEVF_DEV_ID_WX1820:
+ wx->mac.type = wx_mac_sp;
+ break;
+ case TXGBEVF_DEV_ID_AML500F:
+ case TXGBEVF_DEV_ID_AML510F:
+ case TXGBEVF_DEV_ID_AML5024:
+ case TXGBEVF_DEV_ID_AML5124:
+ case TXGBEVF_DEV_ID_AML503F:
+ case TXGBEVF_DEV_ID_AML513F:
+ wx->mac.type = wx_mac_aml;
+ break;
+ default:
+ wx->mac.type = wx_mac_unknown;
+ break;
+ }
+}
+
+static int txgbevf_sw_init(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ struct pci_dev *pdev = wx->pdev;
+ int err;
+
+ /* Initialize pcie info and common capability flags */
+ err = wx_sw_init(wx);
+ if (err < 0)
+ goto err_wx_sw_init;
+
+ /* Initialize the mailbox */
+ err = wx_init_mbx_params_vf(wx);
+ if (err)
+ goto err_init_mbx_params;
+
+ /* max q_vectors */
+ wx->mac.max_msix_vectors = TXGBEVF_MAX_MSIX_VECTORS;
+ /* Initialize the device type */
+ txgbevf_init_type_code(wx);
+ /* lock to protect mailbox accesses */
+ spin_lock_init(&wx->mbx.mbx_lock);
+
+ err = wx_reset_hw_vf(wx);
+ if (err) {
+ wx_err(wx, "PF still in reset state. Is the PF interface up?\n");
+ goto err_reset_hw;
+ }
+ wx_init_hw_vf(wx);
+ wx_negotiate_api_vf(wx);
+ if (is_zero_ether_addr(wx->mac.addr))
+ dev_info(&pdev->dev,
+ "MAC address not assigned by administrator.\n");
+ eth_hw_addr_set(netdev, wx->mac.addr);
+
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ dev_info(&pdev->dev, "Assigning random MAC address\n");
+ eth_hw_addr_random(netdev);
+ ether_addr_copy(wx->mac.addr, netdev->dev_addr);
+ ether_addr_copy(wx->mac.perm_addr, netdev->dev_addr);
+ }
+
+ wx->mac.max_tx_queues = TXGBEVF_MAX_TX_QUEUES;
+ wx->mac.max_rx_queues = TXGBEVF_MAX_RX_QUEUES;
+ /* Enable dynamic interrupt throttling rates */
+ wx->adaptive_itr = true;
+ wx->rx_itr_setting = 1;
+ wx->tx_itr_setting = 1;
+ /* set default ring sizes */
+ wx->tx_ring_count = TXGBEVF_DEFAULT_TXD;
+ wx->rx_ring_count = TXGBEVF_DEFAULT_RXD;
+ /* set default work limits */
+ wx->tx_work_limit = TXGBEVF_DEFAULT_TX_WORK;
+ wx->rx_work_limit = TXGBEVF_DEFAULT_RX_WORK;
+
+ wx->set_num_queues = txgbevf_set_num_queues;
+
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ break;
+ case wx_mac_aml:
+ case wx_mac_aml40:
+ set_bit(WX_FLAG_RX_MERGE_ENABLED, wx->flags);
+ set_bit(WX_FLAG_TXHEAD_WB_ENABLED, wx->flags);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+err_reset_hw:
+ kfree(wx->vfinfo);
+err_init_mbx_params:
+ kfree(wx->rss_key);
+ kfree(wx->mac_table);
+err_wx_sw_init:
+ return err;
+}
+
+/**
+ * txgbevf_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in txgbevf_pci_tbl
+ *
+ * Return: return 0 on success, negative on failure
+ *
+ * txgbevf_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int txgbevf_probe(struct pci_dev *pdev,
+ const struct pci_device_id __always_unused *ent)
+{
+ struct net_device *netdev;
+ struct wx *wx = NULL;
+ int err;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_pci_disable_dev;
+ }
+
+ err = pci_request_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM),
+ dev_driver_string(&pdev->dev));
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_request_selected_regions failed 0x%x\n", err);
+ goto err_pci_disable_dev;
+ }
+
+ pci_set_master(pdev);
+
+ netdev = devm_alloc_etherdev_mqs(&pdev->dev,
+ sizeof(struct wx),
+ TXGBEVF_MAX_TX_QUEUES,
+ TXGBEVF_MAX_RX_QUEUES);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_pci_release_regions;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ wx = netdev_priv(netdev);
+ wx->netdev = netdev;
+ wx->pdev = pdev;
+
+ wx->msg_enable = netif_msg_init(-1, NETIF_MSG_DRV |
+ NETIF_MSG_PROBE | NETIF_MSG_LINK);
+ wx->hw_addr = devm_ioremap(&pdev->dev,
+ pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!wx->hw_addr) {
+ err = -EIO;
+ goto err_pci_release_regions;
+ }
+
+ wx->b4_addr = devm_ioremap(&pdev->dev,
+ pci_resource_start(pdev, 4),
+ pci_resource_len(pdev, 4));
+ if (!wx->b4_addr) {
+ err = -EIO;
+ goto err_pci_release_regions;
+ }
+
+ wx->driver_name = KBUILD_MODNAME;
+ wx_set_ethtool_ops_vf(netdev);
+ netdev->netdev_ops = &txgbevf_netdev_ops;
+
+ /* setup the private structure */
+ err = txgbevf_sw_init(wx);
+ if (err)
+ goto err_pci_release_regions;
+
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ eth_hw_addr_set(netdev, wx->mac.perm_addr);
+ ether_addr_copy(netdev->perm_addr, wx->mac.addr);
+
+ wxvf_init_service(wx);
+ err = wx_init_interrupt_scheme(wx);
+ if (err)
+ goto err_free_sw_init;
+
+ wx_get_fw_version_vf(wx);
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ pci_set_drvdata(pdev, wx);
+ netif_tx_stop_all_queues(netdev);
+
+ return 0;
+
+err_register:
+ wx_clear_interrupt_scheme(wx);
+err_free_sw_init:
+ timer_delete_sync(&wx->service_timer);
+ cancel_work_sync(&wx->service_task);
+ kfree(wx->vfinfo);
+ kfree(wx->rss_key);
+ kfree(wx->mac_table);
+err_pci_release_regions:
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_disable_dev:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * txgbevf_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * txgbevf_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void txgbevf_remove(struct pci_dev *pdev)
+{
+ wxvf_remove(pdev);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(txgbevf_pm_ops, wxvf_suspend, wxvf_resume);
+
+static struct pci_driver txgbevf_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = txgbevf_pci_tbl,
+ .probe = txgbevf_probe,
+ .remove = txgbevf_remove,
+ .shutdown = wxvf_shutdown,
+ /* Power Management Hooks */
+ .driver.pm = pm_sleep_ptr(&txgbevf_pm_ops)
+};
+
+module_pci_driver(txgbevf_driver);
+
+MODULE_DEVICE_TABLE(pci, txgbevf_pci_tbl);
+MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@trustnetic.com>");
+MODULE_DESCRIPTION("WangXun(R) 10/25/40 Gigabit Virtual Function Network Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wangxun/txgbevf/txgbevf_type.h b/drivers/net/ethernet/wangxun/txgbevf/txgbevf_type.h
new file mode 100644
index 000000000000..1364d2b58bb0
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbevf/txgbevf_type.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _TXGBEVF_TYPE_H_
+#define _TXGBEVF_TYPE_H_
+
+/* Device IDs */
+#define TXGBEVF_DEV_ID_SP1000 0x1000
+#define TXGBEVF_DEV_ID_WX1820 0x2000
+#define TXGBEVF_DEV_ID_AML500F 0x500F
+#define TXGBEVF_DEV_ID_AML510F 0x510F
+#define TXGBEVF_DEV_ID_AML5024 0x5024
+#define TXGBEVF_DEV_ID_AML5124 0x5124
+#define TXGBEVF_DEV_ID_AML503F 0x503f
+#define TXGBEVF_DEV_ID_AML513F 0x513f
+
+#define TXGBEVF_MAX_MSIX_VECTORS 2
+#define TXGBEVF_MAX_RSS_NUM 4
+#define TXGBEVF_MAX_RX_QUEUES 4
+#define TXGBEVF_MAX_TX_QUEUES 4
+#define TXGBEVF_DEFAULT_TXD 128
+#define TXGBEVF_DEFAULT_RXD 128
+#define TXGBEVF_DEFAULT_TX_WORK 256
+#define TXGBEVF_DEFAULT_RX_WORK 256
+
+#endif /* _TXGBEVF_TYPE_H_ */
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index b77f096eaf99..c5424d882135 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -1142,7 +1142,7 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops,
if (err < 0)
goto err_register;
- priv->xfer_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
+ priv->xfer_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_PERCPU, 0,
netdev_name(ndev));
if (!priv->xfer_wq) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 35d96c633a33..7502214cc7d5 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -28,6 +28,7 @@ config XILINX_AXI_EMAC
depends on HAS_IOMEM
depends on XILINX_DMA
select PHYLINK
+ select DIMLIB
help
This driver supports the 10/100/1000 Ethernet from Xilinx for the
AXI bus interface used in Xilinx Virtex FPGAs and Soc's.
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index edb36ff07a0c..711ed9c2631b 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1309,7 +1309,7 @@ ll_temac_ethtools_set_ringparam(struct net_device *ndev,
if (ering->rx_pending > RX_BD_NUM_MAX ||
ering->rx_mini_pending ||
ering->rx_jumbo_pending ||
- ering->rx_pending > TX_BD_NUM_MAX)
+ ering->tx_pending > TX_BD_NUM_MAX)
return -EINVAL;
if (netif_running(ndev))
@@ -1595,7 +1595,7 @@ static int temac_probe(struct platform_device *pdev)
if (temac_np) {
lp->phy_node = of_parse_phandle(temac_np, "phy-handle", 0);
if (lp->phy_node)
- dev_dbg(lp->dev, "using PHY node %pOF\n", temac_np);
+ dev_dbg(lp->dev, "using PHY node %pOF\n", lp->phy_node);
} else if (pdata) {
snprintf(lp->phy_name, sizeof(lp->phy_name),
PHY_ID_FMT, lp->mii_bus->id, pdata->phy_addr);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index d64b8abcf018..5ff742103beb 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -9,6 +9,7 @@
#ifndef XILINX_AXIENET_H
#define XILINX_AXIENET_H
+#include <linux/dim.h>
#include <linux/netdevice.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
@@ -112,19 +113,18 @@
#define XAXIDMA_DELAY_MASK 0xFF000000 /* Delay timeout counter */
#define XAXIDMA_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
-#define XAXIDMA_DELAY_SHIFT 24
-#define XAXIDMA_COALESCE_SHIFT 16
-
#define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */
#define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
#define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
#define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */
+/* Constant to convert delay counts to microseconds */
+#define XAXIDMA_DELAY_SCALE (125ULL * USEC_PER_SEC)
+
/* Default TX/RX Threshold and delay timer values for SGDMA mode */
#define XAXIDMA_DFT_TX_THRESHOLD 24
#define XAXIDMA_DFT_TX_USEC 50
-#define XAXIDMA_DFT_RX_THRESHOLD 1
-#define XAXIDMA_DFT_RX_USEC 50
+#define XAXIDMA_DFT_RX_USEC 16
#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
#define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
@@ -484,7 +484,12 @@ struct skbuf_dma_descriptor {
* @regs: Base address for the axienet_local device address space
* @dma_regs: Base address for the axidma device address space
* @napi_rx: NAPI RX control structure
+ * @rx_dim: DIM state for the receive queue
+ * @rx_dim_enabled: Whether DIM is enabled or not
+ * @rx_irqs: Number of interrupts
+ * @rx_cr_lock: Lock protecting @rx_dma_cr, its register, and @rx_dma_started
* @rx_dma_cr: Nominal content of RX DMA control register
+ * @rx_dma_started: Set when RX DMA is started
* @rx_bd_v: Virtual address of the RX buffer descriptor ring
* @rx_bd_p: Physical address(start address) of the RX buffer descr. ring
* @rx_bd_num: Size of RX buffer descriptor ring
@@ -494,7 +499,9 @@ struct skbuf_dma_descriptor {
* @rx_bytes: RX byte count for statistics
* @rx_stat_sync: Synchronization object for RX stats
* @napi_tx: NAPI TX control structure
+ * @tx_cr_lock: Lock protecting @tx_dma_cr, its register, and @tx_dma_started
* @tx_dma_cr: Nominal content of TX DMA control register
+ * @tx_dma_started: Set when TX DMA is started
* @tx_bd_v: Virtual address of the TX buffer descriptor ring
* @tx_bd_p: Physical address(start address) of the TX buffer descr. ring
* @tx_bd_num: Size of TX buffer descriptor ring
@@ -529,10 +536,6 @@ struct skbuf_dma_descriptor {
* supported, the maximum frame size would be 9k. Else it is
* 1522 bytes (assuming support for basic VLAN)
* @rxmem: Stores rx memory size for jumbo frame handling.
- * @coalesce_count_rx: Store the irq coalesce on RX side.
- * @coalesce_usec_rx: IRQ coalesce delay for RX
- * @coalesce_count_tx: Store the irq coalesce on TX side.
- * @coalesce_usec_tx: IRQ coalesce delay for TX
* @use_dmaengine: flag to check dmaengine framework usage.
* @tx_chan: TX DMA channel.
* @rx_chan: RX DMA channel.
@@ -566,7 +569,12 @@ struct axienet_local {
void __iomem *dma_regs;
struct napi_struct napi_rx;
+ struct dim rx_dim;
+ bool rx_dim_enabled;
+ u16 rx_irqs;
+ spinlock_t rx_cr_lock;
u32 rx_dma_cr;
+ bool rx_dma_started;
struct axidma_bd *rx_bd_v;
dma_addr_t rx_bd_p;
u32 rx_bd_num;
@@ -576,7 +584,9 @@ struct axienet_local {
struct u64_stats_sync rx_stat_sync;
struct napi_struct napi_tx;
+ spinlock_t tx_cr_lock;
u32 tx_dma_cr;
+ bool tx_dma_started;
struct axidma_bd *tx_bd_v;
dma_addr_t tx_bd_p;
u32 tx_bd_num;
@@ -607,10 +617,6 @@ struct axienet_local {
u32 max_frm_size;
u32 rxmem;
- u32 coalesce_count_rx;
- u32 coalesce_usec_rx;
- u32 coalesce_count_tx;
- u32 coalesce_usec_tx;
u8 use_dmaengine;
struct dma_chan *tx_chan;
struct dma_chan *rx_chan;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 273ec5f70005..284031fb2e2c 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -223,26 +223,64 @@ static void axienet_dma_bd_release(struct net_device *ndev)
lp->rx_bd_p);
}
+static u64 axienet_dma_rate(struct axienet_local *lp)
+{
+ if (lp->axi_clk)
+ return clk_get_rate(lp->axi_clk);
+ return 125000000; /* arbitrary guess if no clock rate set */
+}
+
/**
- * axienet_usec_to_timer - Calculate IRQ delay timer value
- * @lp: Pointer to the axienet_local structure
- * @coalesce_usec: Microseconds to convert into timer value
+ * axienet_calc_cr() - Calculate control register value
+ * @lp: Device private data
+ * @count: Number of completions before an interrupt
+ * @usec: Microseconds after the last completion before an interrupt
+ *
+ * Calculate a control register value based on the coalescing settings. The
+ * run/stop bit is not set.
+ *
+ * Return: Control register value with coalescing settings configured.
*/
-static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
+static u32 axienet_calc_cr(struct axienet_local *lp, u32 count, u32 usec)
{
- u32 result;
- u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
+ u32 cr;
- if (lp->axi_clk)
- clk_rate = clk_get_rate(lp->axi_clk);
+ cr = FIELD_PREP(XAXIDMA_COALESCE_MASK, count) | XAXIDMA_IRQ_IOC_MASK |
+ XAXIDMA_IRQ_ERROR_MASK;
+ /* Only set interrupt delay timer if not generating an interrupt on
+ * the first packet. Otherwise leave at 0 to disable delay interrupt.
+ */
+ if (count > 1) {
+ u64 clk_rate = axienet_dma_rate(lp);
+ u32 timer;
+
+ /* 1 Timeout Interval = 125 * (clock period of SG clock) */
+ timer = DIV64_U64_ROUND_CLOSEST((u64)usec * clk_rate,
+ XAXIDMA_DELAY_SCALE);
+
+ timer = min(timer, FIELD_MAX(XAXIDMA_DELAY_MASK));
+ cr |= FIELD_PREP(XAXIDMA_DELAY_MASK, timer) |
+ XAXIDMA_IRQ_DELAY_MASK;
+ }
+
+ return cr;
+}
- /* 1 Timeout Interval = 125 * (clock period of SG clock) */
- result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
- (u64)125000000);
- if (result > 255)
- result = 255;
+/**
+ * axienet_coalesce_params() - Extract coalesce parameters from the CR
+ * @lp: Device private data
+ * @cr: The control register to parse
+ * @count: Number of packets before an interrupt
+ * @usec: Idle time (in usec) before an interrupt
+ */
+static void axienet_coalesce_params(struct axienet_local *lp, u32 cr,
+ u32 *count, u32 *usec)
+{
+ u64 clk_rate = axienet_dma_rate(lp);
+ u64 timer = FIELD_GET(XAXIDMA_DELAY_MASK, cr);
- return result;
+ *count = FIELD_GET(XAXIDMA_COALESCE_MASK, cr);
+ *usec = DIV64_U64_ROUND_CLOSEST(timer * XAXIDMA_DELAY_SCALE, clk_rate);
}
/**
@@ -251,30 +289,12 @@ static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
*/
static void axienet_dma_start(struct axienet_local *lp)
{
+ spin_lock_irq(&lp->rx_cr_lock);
+
/* Start updating the Rx channel control register */
- lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
- XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
- /* Only set interrupt delay timer if not generating an interrupt on
- * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
- */
- if (lp->coalesce_count_rx > 1)
- lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
- << XAXIDMA_DELAY_SHIFT) |
- XAXIDMA_IRQ_DELAY_MASK;
+ lp->rx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK;
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
- /* Start updating the Tx channel control register */
- lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
- XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
- /* Only set interrupt delay timer if not generating an interrupt on
- * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
- */
- if (lp->coalesce_count_tx > 1)
- lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
- << XAXIDMA_DELAY_SHIFT) |
- XAXIDMA_IRQ_DELAY_MASK;
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
-
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
* halted state. This will make the Rx side ready for reception.
*/
@@ -283,6 +303,14 @@ static void axienet_dma_start(struct axienet_local *lp)
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
(sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
+ lp->rx_dma_started = true;
+
+ spin_unlock_irq(&lp->rx_cr_lock);
+ spin_lock_irq(&lp->tx_cr_lock);
+
+ /* Start updating the Tx channel control register */
+ lp->tx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK;
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
@@ -291,6 +319,9 @@ static void axienet_dma_start(struct axienet_local *lp)
axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
+ lp->tx_dma_started = true;
+
+ spin_unlock_irq(&lp->tx_cr_lock);
}
/**
@@ -626,14 +657,22 @@ static void axienet_dma_stop(struct axienet_local *lp)
int count;
u32 cr, sr;
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ spin_lock_irq(&lp->rx_cr_lock);
+
+ cr = lp->rx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ lp->rx_dma_started = false;
+
+ spin_unlock_irq(&lp->rx_cr_lock);
synchronize_irq(lp->rx_irq);
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ spin_lock_irq(&lp->tx_cr_lock);
+
+ cr = lp->tx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ lp->tx_dma_started = false;
+
+ spin_unlock_irq(&lp->tx_cr_lock);
synchronize_irq(lp->tx_irq);
/* Give DMAs a chance to halt gracefully */
@@ -665,7 +704,8 @@ static void axienet_dma_stop(struct axienet_local *lp)
* are connected to Axi Ethernet reset lines, this in turn resets the Axi
* Ethernet core. No separate hardware reset is done for the Axi Ethernet
* core.
- * Returns 0 on success or a negative error number otherwise.
+ *
+ * Return: 0 on success or a negative error number otherwise.
*/
static int axienet_device_reset(struct net_device *ndev)
{
@@ -736,7 +776,8 @@ static int axienet_device_reset(struct net_device *ndev)
*
* Would either be called after a successful transmit operation, or after
* there was an error when setting up the chain.
- * Returns the number of packets handled.
+ *
+ * Return: The number of packets handled.
*/
static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
int nr_bds, bool force, u32 *sizep, int budget)
@@ -843,7 +884,7 @@ static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result)
dev_consume_skb_any(skbuf_dma->skb);
netif_txq_completed_wake(txq, 1, len,
CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
- 2 * MAX_SKB_FRAGS);
+ 2);
}
/**
@@ -877,7 +918,7 @@ axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
dma_dev = lp->tx_chan->device;
sg_len = skb_shinfo(skb)->nr_frags + 1;
- if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) {
+ if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= 1) {
netif_stop_queue(ndev);
if (net_ratelimit())
netdev_warn(ndev, "TX ring unexpectedly full\n");
@@ -924,13 +965,13 @@ axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
skbuf_dma->sg_len = sg_len;
dma_tx_desc->callback_param = lp;
dma_tx_desc->callback_result = axienet_dma_tx_cb;
- dmaengine_submit(dma_tx_desc);
- dma_async_issue_pending(lp->tx_chan);
txq = skb_get_tx_queue(lp->ndev, skb);
netdev_tx_sent_queue(txq, skb->len);
netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
- MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS);
+ 1, 2);
+ dmaengine_submit(dma_tx_desc);
+ dma_async_issue_pending(lp->tx_chan);
return NETDEV_TX_OK;
xmit_error_unmap_sg:
@@ -965,6 +1006,7 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget)
&size, budget);
if (packets) {
+ netdev_completed_queue(ndev, packets, size);
u64_stats_update_begin(&lp->tx_stat_sync);
u64_stats_add(&lp->tx_packets, packets);
u64_stats_add(&lp->tx_bytes, size);
@@ -982,7 +1024,9 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget)
* cause an immediate interrupt if any TX packets are
* already pending.
*/
+ spin_lock_irq(&lp->tx_cr_lock);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
+ spin_unlock_irq(&lp->tx_cr_lock);
}
return packets;
}
@@ -1086,6 +1130,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (++new_tail_ptr >= lp->tx_bd_num)
new_tail_ptr = 0;
WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
+ netdev_sent_queue(ndev, skb->len);
/* Start the transfer */
axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
@@ -1119,6 +1164,7 @@ static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
struct axienet_local *lp = data;
struct sk_buff *skb;
u32 *app_metadata;
+ int i;
skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
skb = skbuf_dma->skb;
@@ -1126,6 +1172,15 @@ static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
&meta_max_len);
dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size,
DMA_FROM_DEVICE);
+
+ if (IS_ERR(app_metadata)) {
+ if (net_ratelimit())
+ netdev_err(lp->ndev, "Failed to get RX metadata pointer\n");
+ dev_kfree_skb_any(skb);
+ lp->ndev->stats.rx_dropped++;
+ goto rx_submit;
+ }
+
/* TODO: Derive app word index programmatically */
rx_len = (app_metadata[LEN_APP] & 0xFFFF);
skb_put(skb, rx_len);
@@ -1137,7 +1192,11 @@ static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
u64_stats_add(&lp->rx_packets, 1);
u64_stats_add(&lp->rx_bytes, rx_len);
u64_stats_update_end(&lp->rx_stat_sync);
- axienet_rx_submit_desc(lp->ndev);
+
+rx_submit:
+ for (i = 0; i < CIRC_SPACE(lp->rx_ring_head, lp->rx_ring_tail,
+ RX_BUF_NUM_DEFAULT); i++)
+ axienet_rx_submit_desc(lp->ndev);
dma_async_issue_pending(lp->rx_chan);
}
@@ -1244,11 +1303,25 @@ static int axienet_rx_poll(struct napi_struct *napi, int budget)
axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
if (packets < budget && napi_complete_done(napi, packets)) {
+ if (READ_ONCE(lp->rx_dim_enabled)) {
+ struct dim_sample sample = {
+ .time = ktime_get(),
+ /* Safe because we are the only writer */
+ .pkt_ctr = u64_stats_read(&lp->rx_packets),
+ .byte_ctr = u64_stats_read(&lp->rx_bytes),
+ .event_ctr = READ_ONCE(lp->rx_irqs),
+ };
+
+ net_dim(&lp->rx_dim, &sample);
+ }
+
/* Re-enable RX completion interrupts. This should
* cause an immediate interrupt if any RX packets are
* already pending.
*/
+ spin_lock_irq(&lp->rx_cr_lock);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
+ spin_unlock_irq(&lp->rx_cr_lock);
}
return packets;
}
@@ -1286,11 +1359,14 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
/* Disable further TX completion interrupts and schedule
* NAPI to handle the completions.
*/
- u32 cr = lp->tx_dma_cr;
-
- cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
if (napi_schedule_prep(&lp->napi_tx)) {
+ u32 cr;
+
+ spin_lock(&lp->tx_cr_lock);
+ cr = lp->tx_dma_cr;
+ cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ spin_unlock(&lp->tx_cr_lock);
__napi_schedule(&lp->napi_tx);
}
}
@@ -1331,11 +1407,16 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
/* Disable further RX completion interrupts and schedule
* NAPI receive.
*/
- u32 cr = lp->rx_dma_cr;
-
- cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
+ WRITE_ONCE(lp->rx_irqs, READ_ONCE(lp->rx_irqs) + 1);
if (napi_schedule_prep(&lp->napi_rx)) {
+ u32 cr;
+
+ spin_lock(&lp->rx_cr_lock);
+ cr = lp->rx_dma_cr;
+ cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ spin_unlock(&lp->rx_cr_lock);
+
__napi_schedule(&lp->napi_rx);
}
}
@@ -1394,7 +1475,6 @@ static void axienet_rx_submit_desc(struct net_device *ndev)
if (!skbuf_dma)
return;
- lp->rx_ring_head++;
skb = netdev_alloc_skb(ndev, lp->max_frm_size);
if (!skb)
return;
@@ -1419,6 +1499,7 @@ static void axienet_rx_submit_desc(struct net_device *ndev)
skbuf_dma->desc = dma_rx_desc;
dma_rx_desc->callback_param = lp;
dma_rx_desc->callback_result = axienet_dma_rx_cb;
+ lp->rx_ring_head++;
dmaengine_submit(dma_rx_desc);
return;
@@ -1628,6 +1709,7 @@ err_free_eth_irq:
if (lp->eth_irq > 0)
free_irq(lp->eth_irq, ndev);
err_phy:
+ cancel_work_sync(&lp->rx_dim.work);
cancel_delayed_work_sync(&lp->stats_work);
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
@@ -1657,6 +1739,7 @@ static int axienet_stop(struct net_device *ndev)
napi_disable(&lp->napi_rx);
}
+ cancel_work_sync(&lp->rx_dim.work);
cancel_delayed_work_sync(&lp->stats_work);
phylink_stop(lp->phylink);
@@ -1688,6 +1771,7 @@ static int axienet_stop(struct net_device *ndev)
dma_release_channel(lp->tx_chan);
}
+ netdev_reset_queue(ndev);
axienet_iow(lp, XAE_IE_OFFSET, 0);
if (lp->eth_irq > 0)
@@ -2002,6 +2086,89 @@ axienet_ethtools_set_pauseparam(struct net_device *ndev,
}
/**
+ * axienet_update_coalesce_rx() - Set RX CR
+ * @lp: Device private data
+ * @cr: Value to write to the RX CR
+ * @mask: Bits to set from @cr
+ */
+static void axienet_update_coalesce_rx(struct axienet_local *lp, u32 cr,
+ u32 mask)
+{
+ spin_lock_irq(&lp->rx_cr_lock);
+ lp->rx_dma_cr &= ~mask;
+ lp->rx_dma_cr |= cr;
+ /* If DMA isn't started, then the settings will be applied the next
+ * time dma_start() is called.
+ */
+ if (lp->rx_dma_started) {
+ u32 reg = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+
+ /* Don't enable IRQs if they are disabled by NAPI */
+ if (reg & XAXIDMA_IRQ_ALL_MASK)
+ cr = lp->rx_dma_cr;
+ else
+ cr = lp->rx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK;
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ }
+ spin_unlock_irq(&lp->rx_cr_lock);
+}
+
+/**
+ * axienet_dim_coalesce_count_rx() - RX coalesce count for DIM
+ * @lp: Device private data
+ *
+ * Return: RX coalescing frame count value for DIM.
+ */
+static u32 axienet_dim_coalesce_count_rx(struct axienet_local *lp)
+{
+ return min(1 << (lp->rx_dim.profile_ix << 1), 255);
+}
+
+/**
+ * axienet_rx_dim_work() - Adjust RX DIM settings
+ * @work: The work struct
+ */
+static void axienet_rx_dim_work(struct work_struct *work)
+{
+ struct axienet_local *lp =
+ container_of(work, struct axienet_local, rx_dim.work);
+ u32 cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), 0);
+ u32 mask = XAXIDMA_COALESCE_MASK | XAXIDMA_IRQ_IOC_MASK |
+ XAXIDMA_IRQ_ERROR_MASK;
+
+ axienet_update_coalesce_rx(lp, cr, mask);
+ lp->rx_dim.state = DIM_START_MEASURE;
+}
+
+/**
+ * axienet_update_coalesce_tx() - Set TX CR
+ * @lp: Device private data
+ * @cr: Value to write to the TX CR
+ * @mask: Bits to set from @cr
+ */
+static void axienet_update_coalesce_tx(struct axienet_local *lp, u32 cr,
+ u32 mask)
+{
+ spin_lock_irq(&lp->tx_cr_lock);
+ lp->tx_dma_cr &= ~mask;
+ lp->tx_dma_cr |= cr;
+ /* If DMA isn't started, then the settings will be applied the next
+ * time dma_start() is called.
+ */
+ if (lp->tx_dma_started) {
+ u32 reg = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+
+ /* Don't enable IRQs if they are disabled by NAPI */
+ if (reg & XAXIDMA_IRQ_ALL_MASK)
+ cr = lp->tx_dma_cr;
+ else
+ cr = lp->tx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK;
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ }
+ spin_unlock_irq(&lp->tx_cr_lock);
+}
+
+/**
* axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
* @ndev: Pointer to net_device structure
* @ecoalesce: Pointer to ethtool_coalesce structure
@@ -2021,11 +2188,23 @@ axienet_ethtools_get_coalesce(struct net_device *ndev,
struct netlink_ext_ack *extack)
{
struct axienet_local *lp = netdev_priv(ndev);
-
- ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
- ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
- ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
- ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
+ u32 cr;
+
+ ecoalesce->use_adaptive_rx_coalesce = lp->rx_dim_enabled;
+
+ spin_lock_irq(&lp->rx_cr_lock);
+ cr = lp->rx_dma_cr;
+ spin_unlock_irq(&lp->rx_cr_lock);
+ axienet_coalesce_params(lp, cr,
+ &ecoalesce->rx_max_coalesced_frames,
+ &ecoalesce->rx_coalesce_usecs);
+
+ spin_lock_irq(&lp->tx_cr_lock);
+ cr = lp->tx_dma_cr;
+ spin_unlock_irq(&lp->tx_cr_lock);
+ axienet_coalesce_params(lp, cr,
+ &ecoalesce->tx_max_coalesced_frames,
+ &ecoalesce->tx_coalesce_usecs);
return 0;
}
@@ -2049,22 +2228,56 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
struct netlink_ext_ack *extack)
{
struct axienet_local *lp = netdev_priv(ndev);
+ bool new_dim = ecoalesce->use_adaptive_rx_coalesce;
+ bool old_dim = lp->rx_dim_enabled;
+ u32 cr, mask = ~XAXIDMA_CR_RUNSTOP_MASK;
- if (netif_running(ndev)) {
+ if (ecoalesce->rx_max_coalesced_frames > 255 ||
+ ecoalesce->tx_max_coalesced_frames > 255) {
+ NL_SET_ERR_MSG(extack, "frames must be less than 256");
+ return -EINVAL;
+ }
+
+ if (!ecoalesce->rx_max_coalesced_frames ||
+ !ecoalesce->tx_max_coalesced_frames) {
+ NL_SET_ERR_MSG(extack, "frames must be non-zero");
+ return -EINVAL;
+ }
+
+ if (((ecoalesce->rx_max_coalesced_frames > 1 || new_dim) &&
+ !ecoalesce->rx_coalesce_usecs) ||
+ (ecoalesce->tx_max_coalesced_frames > 1 &&
+ !ecoalesce->tx_coalesce_usecs)) {
NL_SET_ERR_MSG(extack,
- "Please stop netif before applying configuration");
- return -EBUSY;
+ "usecs must be non-zero when frames is greater than one");
+ return -EINVAL;
+ }
+
+ if (new_dim && !old_dim) {
+ cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp),
+ ecoalesce->rx_coalesce_usecs);
+ } else if (!new_dim) {
+ if (old_dim) {
+ WRITE_ONCE(lp->rx_dim_enabled, false);
+ napi_synchronize(&lp->napi_rx);
+ flush_work(&lp->rx_dim.work);
+ }
+
+ cr = axienet_calc_cr(lp, ecoalesce->rx_max_coalesced_frames,
+ ecoalesce->rx_coalesce_usecs);
+ } else {
+ /* Dummy value for count just to calculate timer */
+ cr = axienet_calc_cr(lp, 2, ecoalesce->rx_coalesce_usecs);
+ mask = XAXIDMA_DELAY_MASK | XAXIDMA_IRQ_DELAY_MASK;
}
- if (ecoalesce->rx_max_coalesced_frames)
- lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
- if (ecoalesce->rx_coalesce_usecs)
- lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
- if (ecoalesce->tx_max_coalesced_frames)
- lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
- if (ecoalesce->tx_coalesce_usecs)
- lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
+ axienet_update_coalesce_rx(lp, cr, mask);
+ if (new_dim && !old_dim)
+ WRITE_ONCE(lp->rx_dim_enabled, true);
+ cr = axienet_calc_cr(lp, ecoalesce->tx_max_coalesced_frames,
+ ecoalesce->tx_coalesce_usecs);
+ axienet_update_coalesce_tx(lp, cr, ~XAXIDMA_CR_RUNSTOP_MASK);
return 0;
}
@@ -2302,7 +2515,8 @@ axienet_ethtool_get_rmon_stats(struct net_device *dev,
static const struct ethtool_ops axienet_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
- ETHTOOL_COALESCE_USECS,
+ ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = axienet_ethtools_get_drvinfo,
.get_regs_len = axienet_ethtools_get_regs_len,
.get_regs = axienet_ethtools_get_regs,
@@ -2331,11 +2545,12 @@ static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
}
static void axienet_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
- phylink_mii_c22_pcs_get_state(pcs_phy, state);
+ phylink_mii_c22_pcs_get_state(pcs_phy, neg_mode, state);
}
static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
@@ -2484,6 +2699,7 @@ static void axienet_dma_err_handler(struct work_struct *work)
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
axienet_dma_stop(lp);
+ netdev_reset_queue(ndev);
for (i = 0; i < lp->tx_bd_num; i++) {
cur_p = &lp->tx_bd_v[i];
@@ -2784,7 +3000,7 @@ static int axienet_probe(struct platform_device *pdev)
}
}
if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
- dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
+ dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit architecture\n");
ret = -EINVAL;
goto cleanup_clk;
}
@@ -2843,10 +3059,15 @@ static int axienet_probe(struct platform_device *pdev)
axienet_set_mac_address(ndev, NULL);
}
- lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
- lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
- lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
- lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
+ spin_lock_init(&lp->rx_cr_lock);
+ spin_lock_init(&lp->tx_cr_lock);
+ INIT_WORK(&lp->rx_dim.work, axienet_rx_dim_work);
+ lp->rx_dim_enabled = true;
+ lp->rx_dim.profile_ix = 1;
+ lp->rx_dma_cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp),
+ XAXIDMA_DFT_RX_USEC);
+ lp->tx_dma_cr = axienet_calc_cr(lp, XAXIDMA_DFT_TX_THRESHOLD,
+ XAXIDMA_DFT_TX_USEC);
ret = axienet_mdio_setup(lp);
if (ret)
@@ -2876,12 +3097,12 @@ static int axienet_probe(struct platform_device *pdev)
}
of_node_put(np);
lp->pcs.ops = &axienet_pcs_ops;
- lp->pcs.neg_mode = true;
lp->pcs.poll = true;
}
lp->phylink_config.dev = &ndev->dev;
lp->phylink_config.type = PHYLINK_NETDEV;
+ lp->phylink_config.mac_managed_pm = true;
lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
MAC_10FD | MAC_100FD | MAC_1000FD;
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index ecf47107146d..4719d40a63ba 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -286,7 +286,7 @@ static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
/* Read the remaining data */
for (; length > 0; length--)
- *to_u8_ptr = *from_u8_ptr;
+ *to_u8_ptr++ = *from_u8_ptr++;
}
}
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index a31d5d5e6593..97e88886253f 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1576,7 +1576,7 @@ do_reset(struct net_device *dev, int full)
msleep(40); /* wait 40 msec to let it complete */
}
if (full_duplex)
- PutByte(XIRCREG1_ECR, GetByte(XIRCREG1_ECR | FullDuplex));
+ PutByte(XIRCREG1_ECR, GetByte(XIRCREG1_ECR) | FullDuplex);
} else { /* No MII */
SelectPage(0);
value = GetByte(XIRCREG_ESR); /* read the ESR */
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index a2ab1c150822..e1e7f65553e7 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -394,16 +394,20 @@ static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb)
__raw_writel(TX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
}
-static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
+static int ixp4xx_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config cfg;
struct ixp46x_ts_regs *regs;
struct port *port = netdev_priv(netdev);
int ret;
int ch;
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
+ if (!cpu_is_ixp46x())
+ return -EOPNOTSUPP;
+
+ if (!netif_running(netdev))
+ return -EINVAL;
ret = ixp46x_ptp_find(&port->timesync_regs, &port->phc_index);
if (ret)
@@ -412,10 +416,10 @@ static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
ch = PORT2CHANNEL(port);
regs = port->timesync_regs;
- if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
+ if (cfg->tx_type != HWTSTAMP_TX_OFF && cfg->tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
- switch (cfg.rx_filter) {
+ switch (cfg->rx_filter) {
case HWTSTAMP_FILTER_NONE:
port->hwts_rx_en = 0;
break;
@@ -431,39 +435,45 @@ static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
return -ERANGE;
}
- port->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON;
+ port->hwts_tx_en = cfg->tx_type == HWTSTAMP_TX_ON;
/* Clear out any old time stamps. */
__raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED,
&regs->channel[ch].ch_event);
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
-static int hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+static int ixp4xx_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg)
{
- struct hwtstamp_config cfg;
struct port *port = netdev_priv(netdev);
- cfg.flags = 0;
- cfg.tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ if (!cpu_is_ixp46x())
+ return -EOPNOTSUPP;
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ cfg->flags = 0;
+ cfg->tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
switch (port->hwts_rx_en) {
case 0:
- cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ cfg->rx_filter = HWTSTAMP_FILTER_NONE;
break;
case PTP_SLAVE_MODE:
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ cfg->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
break;
case PTP_MASTER_MODE:
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ cfg->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
break;
default:
WARN_ON_ONCE(1);
return -ERANGE;
}
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
@@ -985,21 +995,6 @@ static void eth_set_mcast_list(struct net_device *dev)
}
-static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
-{
- if (!netif_running(dev))
- return -EINVAL;
-
- if (cpu_is_ixp46x()) {
- if (cmd == SIOCSHWTSTAMP)
- return hwtstamp_set(dev, req);
- if (cmd == SIOCGHWTSTAMP)
- return hwtstamp_get(dev, req);
- }
-
- return phy_mii_ioctl(dev->phydev, req, cmd);
-}
-
/* ethtool support */
static void ixp4xx_get_drvinfo(struct net_device *dev,
@@ -1433,9 +1428,11 @@ static const struct net_device_ops ixp4xx_netdev_ops = {
.ndo_change_mtu = ixp4xx_eth_change_mtu,
.ndo_start_xmit = eth_xmit,
.ndo_set_rx_mode = eth_set_mcast_list,
- .ndo_eth_ioctl = eth_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_hwtstamp_get = ixp4xx_hwtstamp_get,
+ .ndo_hwtstamp_set = ixp4xx_hwtstamp_set,
};
static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev)
diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c
index f5c25acaa577..2098c3068d34 100644
--- a/drivers/net/fddi/defza.c
+++ b/drivers/net/fddi/defza.c
@@ -983,7 +983,7 @@ static irqreturn_t fza_interrupt(int irq, void *dev_id)
case FZA_STATE_UNINITIALIZED:
netif_carrier_off(dev);
- del_timer_sync(&fp->reset_timer);
+ timer_delete_sync(&fp->reset_timer);
fp->ring_cmd_index = 0;
fp->ring_uns_index = 0;
fp->ring_rmc_tx_index = 0;
@@ -1017,7 +1017,7 @@ static irqreturn_t fza_interrupt(int irq, void *dev_id)
fp->queue_active = 0;
netif_stop_queue(dev);
pr_debug("%s: queue stopped\n", fp->name);
- del_timer_sync(&fp->reset_timer);
+ timer_delete_sync(&fp->reset_timer);
pr_warn("%s: halted, reason: %x\n", fp->name,
FZA_STATUS_GET_HALT(status));
fza_regs_dump(fp);
@@ -1044,7 +1044,7 @@ static irqreturn_t fza_interrupt(int irq, void *dev_id)
static void fza_reset_timer(struct timer_list *t)
{
- struct fza_private *fp = from_timer(fp, t, reset_timer);
+ struct fza_private *fp = timer_container_of(fp, t, reset_timer);
if (!fp->timer_state) {
pr_err("%s: RESET timed out!\n", fp->name);
@@ -1227,7 +1227,7 @@ static int fza_close(struct net_device *dev)
netif_stop_queue(dev);
pr_debug("%s: queue stopped\n", fp->name);
- del_timer_sync(&fp->reset_timer);
+ timer_delete_sync(&fp->reset_timer);
spin_lock_irqsave(&fp->lock, flags);
fp->state = FZA_STATE_UNINITIALIZED;
fp->state_chg_flag = 0;
@@ -1493,7 +1493,7 @@ static int fza_probe(struct device *bdev)
return 0;
err_out_irq:
- del_timer_sync(&fp->reset_timer);
+ timer_delete_sync(&fp->reset_timer);
fza_do_shutdown(fp);
free_irq(dev->irq, dev);
@@ -1520,7 +1520,7 @@ static int fza_remove(struct device *bdev)
unregister_netdev(dev);
- del_timer_sync(&fp->reset_timer);
+ timer_delete_sync(&fp->reset_timer);
fza_do_shutdown(fp);
free_irq(dev->irq, dev);
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index 4a4ed2ccf72f..b63965d9a1ba 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -1364,14 +1364,15 @@ static int fjes_probe(struct platform_device *plat_dev)
adapter->force_reset = false;
adapter->open_guard = false;
- adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
+ adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (unlikely(!adapter->txrx_wq)) {
err = -ENOMEM;
goto err_free_netdev;
}
adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (unlikely(!adapter->control_wq)) {
err = -ENOMEM;
goto err_free_txrx_wq;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 2f29b1386b1c..77b0c3d52041 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -18,6 +18,7 @@
#include <net/rtnetlink.h>
#include <net/geneve.h>
#include <net/gro.h>
+#include <net/netdev_lock.h>
#include <net/protocol.h>
#define GENEVE_NETDEV_VER "0.6"
@@ -40,6 +41,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
/* per-network namespace private data for this module */
struct geneve_net {
struct list_head geneve_list;
+ /* sock_list is protected by rtnl lock */
struct list_head sock_list;
};
@@ -51,12 +53,16 @@ struct geneve_dev_node {
};
struct geneve_config {
- struct ip_tunnel_info info;
bool collect_md;
bool use_udp6_rx_checksums;
bool ttl_inherit;
enum ifla_geneve_df df;
bool inner_proto_inherit;
+ u16 port_min;
+ u16 port_max;
+
+ /* Must be last --ends in a flexible-array member. */
+ struct ip_tunnel_info info;
};
/* Pseudo network device */
@@ -235,7 +241,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
vni_to_tunnel_id(gnvh->vni),
gnvh->opt_len * 4);
if (!tun_dst) {
- DEV_STATS_INC(geneve->dev, rx_dropped);
+ dev_dstats_rx_dropped(geneve->dev);
goto drop;
}
/* Update tunnel dst according to Geneve options. */
@@ -322,7 +328,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
len = skb->len;
err = gro_cells_receive(&geneve->gro_cells, skb);
if (likely(err == NET_RX_SUCCESS))
- dev_sw_netstats_rx_add(geneve->dev, len);
+ dev_dstats_rx_add(geneve->dev, len);
return;
drop:
@@ -387,14 +393,14 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (unlikely((!geneve->cfg.inner_proto_inherit &&
inner_proto != htons(ETH_P_TEB)))) {
- DEV_STATS_INC(geneve->dev, rx_dropped);
+ dev_dstats_rx_dropped(geneve->dev);
goto drop;
}
opts_len = geneveh->opt_len * 4;
if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, inner_proto,
!net_eq(geneve->net, dev_net(geneve->dev)))) {
- DEV_STATS_INC(geneve->dev, rx_dropped);
+ dev_dstats_rx_dropped(geneve->dev);
goto drop;
}
@@ -835,7 +841,9 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
use_cache = ip_tunnel_dst_cache_usable(skb, info);
tos = geneve_get_dsfield(skb, dev, info, &use_cache);
- sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+ sport = udp_flow_src_port(geneve->net, skb,
+ geneve->cfg.port_min,
+ geneve->cfg.port_max, true);
rt = udp_tunnel_dst_lookup(skb, dev, geneve->net, 0, &saddr,
&info->key,
@@ -895,7 +903,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (geneve->cfg.df == GENEVE_DF_SET) {
df = htons(IP_DF);
} else if (geneve->cfg.df == GENEVE_DF_INHERIT) {
- struct ethhdr *eth = eth_hdr(skb);
+ struct ethhdr *eth = skb_eth_hdr(skb);
if (ntohs(eth->h_proto) == ETH_P_IPV6) {
df = htons(IP_DF);
@@ -916,8 +924,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, saddr, info->key.u.ipv4.dst,
tos, ttl, df, sport, geneve->cfg.info.key.tp_dst,
!net_eq(geneve->net, dev_net(geneve->dev)),
- !test_bit(IP_TUNNEL_CSUM_BIT,
- info->key.tun_flags));
+ !test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags),
+ 0);
return 0;
}
@@ -945,7 +953,9 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
use_cache = ip_tunnel_dst_cache_usable(skb, info);
prio = geneve_get_dsfield(skb, dev, info, &use_cache);
- sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+ sport = udp_flow_src_port(geneve->net, skb,
+ geneve->cfg.port_min,
+ geneve->cfg.port_max, true);
dst = udp_tunnel6_dst_lookup(skb, dev, geneve->net, gs6->sock, 0,
&saddr, key, sport,
@@ -1007,7 +1017,8 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
&saddr, &key->u.ipv6.dst, prio, ttl,
info->key.label, sport, geneve->cfg.info.key.tp_dst,
!test_bit(IP_TUNNEL_CSUM_BIT,
- info->key.tun_flags));
+ info->key.tun_flags),
+ 0);
return 0;
}
#endif
@@ -1023,7 +1034,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
netdev_dbg(dev, "no tunnel metadata\n");
dev_kfree_skb(skb);
- DEV_STATS_INC(dev, tx_dropped);
+ dev_dstats_tx_dropped(dev);
return NETDEV_TX_OK;
}
} else {
@@ -1084,7 +1095,8 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
use_cache = ip_tunnel_dst_cache_usable(skb, info);
tos = geneve_get_dsfield(skb, dev, info, &use_cache);
sport = udp_flow_src_port(geneve->net, skb,
- 1, USHRT_MAX, true);
+ geneve->cfg.port_min,
+ geneve->cfg.port_max, true);
rt = udp_tunnel_dst_lookup(skb, dev, geneve->net, 0, &saddr,
&info->key,
@@ -1110,7 +1122,8 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
use_cache = ip_tunnel_dst_cache_usable(skb, info);
prio = geneve_get_dsfield(skb, dev, info, &use_cache);
sport = udp_flow_src_port(geneve->net, skb,
- 1, USHRT_MAX, true);
+ geneve->cfg.port_min,
+ geneve->cfg.port_max, true);
dst = udp_tunnel6_dst_lookup(skb, dev, geneve->net, gs6->sock, 0,
&saddr, &info->key, sport,
@@ -1170,8 +1183,9 @@ static void geneve_offload_rx_ports(struct net_device *dev, bool push)
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_sock *gs;
- rcu_read_lock();
- list_for_each_entry_rcu(gs, &gn->sock_list, list) {
+ ASSERT_RTNL();
+
+ list_for_each_entry(gs, &gn->sock_list, list) {
if (push) {
udp_tunnel_push_rx_port(dev, gs->sock,
UDP_TUNNEL_TYPE_GENEVE);
@@ -1180,7 +1194,6 @@ static void geneve_offload_rx_ports(struct net_device *dev, bool push)
UDP_TUNNEL_TYPE_GENEVE);
}
}
- rcu_read_unlock();
}
/* Initialize the device structure. */
@@ -1202,7 +1215,7 @@ static void geneve_setup(struct net_device *dev)
dev->hw_features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
- dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
/* MTU range: 68 - (something less than 65535) */
dev->min_mtu = ETH_MIN_MTU;
/* The max_mtu calculation does not take account of GENEVE
@@ -1234,6 +1247,7 @@ static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
[IFLA_GENEVE_TTL_INHERIT] = { .type = NLA_U8 },
[IFLA_GENEVE_DF] = { .type = NLA_U8 },
[IFLA_GENEVE_INNER_PROTO_INHERIT] = { .type = NLA_FLAG },
+ [IFLA_GENEVE_PORT_RANGE] = NLA_POLICY_EXACT_LEN(sizeof(struct ifla_geneve_port_range)),
};
static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -1279,6 +1293,17 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
}
}
+ if (data[IFLA_GENEVE_PORT_RANGE]) {
+ const struct ifla_geneve_port_range *p;
+
+ p = nla_data(data[IFLA_GENEVE_PORT_RANGE]);
+ if (ntohs(p->high) < ntohs(p->low)) {
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_PORT_RANGE],
+ "Invalid source port range");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -1506,6 +1531,18 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
info->key.tp_dst = nla_get_be16(data[IFLA_GENEVE_PORT]);
}
+ if (data[IFLA_GENEVE_PORT_RANGE]) {
+ const struct ifla_geneve_port_range *p;
+
+ if (changelink) {
+ attrtype = IFLA_GENEVE_PORT_RANGE;
+ goto change_notsup;
+ }
+ p = nla_data(data[IFLA_GENEVE_PORT_RANGE]);
+ cfg->port_min = ntohs(p->low);
+ cfg->port_max = ntohs(p->high);
+ }
+
if (data[IFLA_GENEVE_COLLECT_METADATA]) {
if (changelink) {
attrtype = IFLA_GENEVE_COLLECT_METADATA;
@@ -1614,15 +1651,20 @@ static void geneve_link_config(struct net_device *dev,
geneve_change_mtu(dev, ldev_mtu - info->options_len);
}
-static int geneve_newlink(struct net *net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int geneve_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct geneve_config cfg = {
.df = GENEVE_DF_UNSET,
.use_udp6_rx_checksums = false,
.ttl_inherit = false,
.collect_md = false,
+ .port_min = 1,
+ .port_max = USHRT_MAX,
};
int err;
@@ -1631,7 +1673,7 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
if (err)
return err;
- err = geneve_configure(net, dev, extack, &cfg);
+ err = geneve_configure(link_net, dev, extack, &cfg);
if (err)
return err;
@@ -1741,6 +1783,7 @@ static size_t geneve_get_size(const struct net_device *dev)
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_RX */
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL_INHERIT */
nla_total_size(0) + /* IFLA_GENEVE_INNER_PROTO_INHERIT */
+ nla_total_size(sizeof(struct ifla_geneve_port_range)) + /* IFLA_GENEVE_PORT_RANGE */
0;
}
@@ -1750,6 +1793,10 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
struct ip_tunnel_info *info = &geneve->cfg.info;
bool ttl_inherit = geneve->cfg.ttl_inherit;
bool metadata = geneve->cfg.collect_md;
+ struct ifla_geneve_port_range ports = {
+ .low = htons(geneve->cfg.port_min),
+ .high = htons(geneve->cfg.port_max),
+ };
__u8 tmp_vni[3];
__u32 vni;
@@ -1806,6 +1853,9 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_flag(skb, IFLA_GENEVE_INNER_PROTO_INHERIT))
goto nla_put_failure;
+ if (nla_put(skb, IFLA_GENEVE_PORT_RANGE, sizeof(ports), &ports))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
@@ -1838,6 +1888,8 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
.use_udp6_rx_checksums = true,
.ttl_inherit = false,
.collect_md = true,
+ .port_min = 1,
+ .port_max = USHRT_MAX,
};
memset(tb, 0, sizeof(tb));
@@ -1898,34 +1950,14 @@ static __net_init int geneve_init_net(struct net *net)
return 0;
}
-static void geneve_destroy_tunnels(struct net *net, struct list_head *head)
+static void __net_exit geneve_exit_rtnl_net(struct net *net,
+ struct list_head *dev_to_kill)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_dev *geneve, *next;
- struct net_device *dev, *aux;
-
- /* gather any geneve devices that were moved into this ns */
- for_each_netdev_safe(net, dev, aux)
- if (dev->rtnl_link_ops == &geneve_link_ops)
- unregister_netdevice_queue(dev, head);
-
- /* now gather any other geneve devices that were created in this ns */
- list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) {
- /* If geneve->dev is in the same netns, it was already added
- * to the list by the previous loop.
- */
- if (!net_eq(dev_net(geneve->dev), net))
- unregister_netdevice_queue(geneve->dev, head);
- }
-}
-
-static void __net_exit geneve_exit_batch_rtnl(struct list_head *net_list,
- struct list_head *dev_to_kill)
-{
- struct net *net;
- list_for_each_entry(net, net_list, exit_list)
- geneve_destroy_tunnels(net, dev_to_kill);
+ list_for_each_entry_safe(geneve, next, &gn->geneve_list, next)
+ geneve_dellink(geneve->dev, dev_to_kill);
}
static void __net_exit geneve_exit_net(struct net *net)
@@ -1937,7 +1969,7 @@ static void __net_exit geneve_exit_net(struct net *net)
static struct pernet_operations geneve_net_ops = {
.init = geneve_init_net,
- .exit_batch_rtnl = geneve_exit_batch_rtnl,
+ .exit_rtnl = geneve_exit_rtnl_net,
.exit = geneve_exit_net,
.id = &geneve_net_id,
.size = sizeof(struct geneve_net),
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 70f981887518..4213c3b2d532 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -21,8 +21,11 @@
#include <linux/file.h>
#include <linux/gtp.h>
+#include <net/flow.h>
+#include <net/inet_dscp.h>
#include <net/net_namespace.h>
#include <net/protocol.h>
+#include <net/inet_sock.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/udp.h>
@@ -350,7 +353,7 @@ static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
fl4->flowi4_oif = sk->sk_bound_dev_if;
fl4->daddr = daddr;
fl4->saddr = saddr;
- fl4->flowi4_tos = ip_sock_rt_tos(sk);
+ fl4->flowi4_dscp = inet_sk_dscp(inet_sk(sk));
fl4->flowi4_scope = ip_sock_rt_scope(sk);
fl4->flowi4_proto = sk->sk_protocol;
@@ -444,7 +447,8 @@ static int gtp0_send_echo_resp_ip(struct gtp_dev *gtp, struct sk_buff *skb)
htons(GTP0_PORT), htons(GTP0_PORT),
!net_eq(sock_net(gtp->sk1u),
dev_net(gtp->dev)),
- false);
+ false,
+ 0);
return 0;
}
@@ -629,7 +633,7 @@ static void gtp1u_build_echo_msg(struct gtp1_header_long *hdr, __u8 msg_type)
hdr->tid = 0;
/* seq, npdu and next should be counted to the length of the GTP packet
- * that's why szie of gtp1_header should be subtracted,
+ * that's why size of gtp1_header should be subtracted,
* not size of gtp1_header_long.
*/
@@ -702,7 +706,8 @@ static int gtp1u_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
htons(GTP1U_PORT), htons(GTP1U_PORT),
!net_eq(sock_net(gtp->sk1u),
dev_net(gtp->dev)),
- false);
+ false,
+ 0);
return 0;
}
@@ -1302,7 +1307,7 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
pktinfo.gtph_port, pktinfo.gtph_port,
!net_eq(sock_net(pktinfo.pctx->sk),
dev_net(dev)),
- false);
+ false, 0);
break;
case AF_INET6:
#if IS_ENABLED(CONFIG_IPV6)
@@ -1312,7 +1317,7 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
ip6_dst_hoplimit(&pktinfo.rt->dst),
0,
pktinfo.gtph_port, pktinfo.gtph_port,
- false);
+ false, 0);
#else
goto tx_err;
#endif
@@ -1460,10 +1465,12 @@ static int gtp_create_sockets(struct gtp_dev *gtp, const struct nlattr *nla,
#define GTP_TH_MAXLEN (sizeof(struct udphdr) + sizeof(struct gtp0_header))
#define GTP_IPV6_MAXLEN (sizeof(struct ipv6hdr) + GTP_TH_MAXLEN)
-static int gtp_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int gtp_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
+ struct nlattr **data = params->data;
unsigned int role = GTP_ROLE_GGSN;
struct gtp_dev *gtp;
struct gtp_net *gn;
@@ -1491,12 +1498,10 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
}
gtp->role = role;
- if (!data[IFLA_GTP_RESTART_COUNT])
- gtp->restart_count = 0;
- else
- gtp->restart_count = nla_get_u8(data[IFLA_GTP_RESTART_COUNT]);
+ gtp->restart_count = nla_get_u8_default(data[IFLA_GTP_RESTART_COUNT],
+ 0);
- gtp->net = src_net;
+ gtp->net = link_net;
err = gtp_hashtable_new(gtp, hashsize);
if (err < 0)
@@ -1526,8 +1531,8 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
goto out_encap;
}
- gn = net_generic(dev_net(dev), gtp_net_id);
- list_add_rcu(&gtp->list, &gn->gtp_dev_list);
+ gn = net_generic(link_net, gtp_net_id);
+ list_add(&gtp->list, &gn->gtp_dev_list);
dev->priv_destructor = gtp_destructor;
netdev_dbg(dev, "registered new GTP interface\n");
@@ -1553,7 +1558,7 @@ static void gtp_dellink(struct net_device *dev, struct list_head *head)
hlist_for_each_entry_safe(pctx, next, &gtp->tid_hash[i], hlist_tid)
pdp_context_delete(pctx);
- list_del_rcu(&gtp->list);
+ list_del(&gtp->list);
unregister_netdevice_queue(dev, head);
}
@@ -1829,10 +1834,7 @@ static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
version = nla_get_u32(info->attrs[GTPA_VERSION]);
- if (info->attrs[GTPA_FAMILY])
- family = nla_get_u8(info->attrs[GTPA_FAMILY]);
- else
- family = AF_INET;
+ family = nla_get_u8_default(info->attrs[GTPA_FAMILY], AF_INET);
#if !IS_ENABLED(CONFIG_IPV6)
if (family == AF_INET6)
@@ -2069,10 +2071,7 @@ static struct pdp_ctx *gtp_find_pdp_by_link(struct net *net,
struct gtp_dev *gtp;
int family;
- if (nla[GTPA_FAMILY])
- family = nla_get_u8(nla[GTPA_FAMILY]);
- else
- family = AF_INET;
+ family = nla_get_u8_default(nla[GTPA_FAMILY], AF_INET);
gtp = gtp_find_dev(net, nla);
if (!gtp)
@@ -2279,16 +2278,19 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb,
struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
int i, j, bucket = cb->args[0], skip = cb->args[1];
struct net *net = sock_net(skb->sk);
+ struct net_device *dev;
struct pdp_ctx *pctx;
- struct gtp_net *gn;
-
- gn = net_generic(net, gtp_net_id);
if (cb->args[4])
return 0;
rcu_read_lock();
- list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
+ for_each_netdev_rcu(net, dev) {
+ if (dev->rtnl_link_ops != &gtp_link_ops)
+ continue;
+
+ gtp = netdev_priv(dev);
+
if (last_gtp && last_gtp != gtp)
continue;
else
@@ -2400,13 +2402,13 @@ static int gtp_genl_send_echo_req(struct sk_buff *skb, struct genl_info *info)
udp_tunnel_xmit_skb(rt, sk, skb_to_send,
fl4.saddr, fl4.daddr,
- fl4.flowi4_tos,
+ inet_dscp_to_dsfield(fl4.flowi4_dscp),
ip4_dst_hoplimit(&rt->dst),
0,
port, port,
!net_eq(sock_net(sk),
dev_net(gtp->dev)),
- false);
+ false, 0);
return 0;
}
@@ -2476,23 +2478,19 @@ static int __net_init gtp_net_init(struct net *net)
return 0;
}
-static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list,
- struct list_head *dev_to_kill)
+static void __net_exit gtp_net_exit_rtnl(struct net *net,
+ struct list_head *dev_to_kill)
{
- struct net *net;
-
- list_for_each_entry(net, net_list, exit_list) {
- struct gtp_net *gn = net_generic(net, gtp_net_id);
- struct gtp_dev *gtp;
+ struct gtp_net *gn = net_generic(net, gtp_net_id);
+ struct gtp_dev *gtp, *gtp_next;
- list_for_each_entry(gtp, &gn->gtp_dev_list, list)
- gtp_dellink(gtp->dev, dev_to_kill);
- }
+ list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list)
+ gtp_dellink(gtp->dev, dev_to_kill);
}
static struct pernet_operations gtp_net_ops = {
.init = gtp_net_init,
- .exit_batch_rtnl = gtp_net_exit_batch_rtnl,
+ .exit_rtnl = gtp_net_exit_rtnl,
.id = &gtp_net_id,
.size = sizeof(struct gtp_net),
};
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 3bf6785f9057..885992951e8a 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -115,8 +115,6 @@ struct sixpack {
struct timer_list tx_t;
struct timer_list resync_t;
- refcount_t refcnt;
- struct completion dead;
spinlock_t lock;
};
@@ -133,7 +131,7 @@ static int encode_sixpack(unsigned char *, unsigned char *, int, unsigned char);
static void sp_xmit_on_air(struct timer_list *t)
{
- struct sixpack *sp = from_timer(sp, t, tx_t);
+ struct sixpack *sp = timer_container_of(sp, t, tx_t);
int actual, when = sp->slottime;
static unsigned char random;
@@ -354,41 +352,12 @@ out_mem:
/* ----------------------------------------------------------------------- */
/*
- * We have a potential race on dereferencing tty->disc_data, because the tty
- * layer provides no locking at all - thus one cpu could be running
- * sixpack_receive_buf while another calls sixpack_close, which zeroes
- * tty->disc_data and frees the memory that sixpack_receive_buf is using. The
- * best way to fix this is to use a rwlock in the tty struct, but for now we
- * use a single global rwlock for all ttys in ppp line discipline.
- */
-static DEFINE_RWLOCK(disc_data_lock);
-
-static struct sixpack *sp_get(struct tty_struct *tty)
-{
- struct sixpack *sp;
-
- read_lock(&disc_data_lock);
- sp = tty->disc_data;
- if (sp)
- refcount_inc(&sp->refcnt);
- read_unlock(&disc_data_lock);
-
- return sp;
-}
-
-static void sp_put(struct sixpack *sp)
-{
- if (refcount_dec_and_test(&sp->refcnt))
- complete(&sp->dead);
-}
-
-/*
* Called by the TTY driver when there's room for more data. If we have
* more packets to send, we send them here.
*/
static void sixpack_write_wakeup(struct tty_struct *tty)
{
- struct sixpack *sp = sp_get(tty);
+ struct sixpack *sp = tty->disc_data;
int actual;
if (!sp)
@@ -400,7 +369,7 @@ static void sixpack_write_wakeup(struct tty_struct *tty)
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
sp->tx_enable = 0;
netif_wake_queue(sp->dev);
- goto out;
+ return;
}
if (sp->tx_enable) {
@@ -408,9 +377,6 @@ static void sixpack_write_wakeup(struct tty_struct *tty)
sp->xleft -= actual;
sp->xhead += actual;
}
-
-out:
- sp_put(sp);
}
/* ----------------------------------------------------------------------- */
@@ -430,7 +396,7 @@ static void sixpack_receive_buf(struct tty_struct *tty, const u8 *cp,
if (!count)
return;
- sp = sp_get(tty);
+ sp = tty->disc_data;
if (!sp)
return;
@@ -446,7 +412,6 @@ static void sixpack_receive_buf(struct tty_struct *tty, const u8 *cp,
}
sixpack_decode(sp, cp, count1);
- sp_put(sp);
tty_unthrottle(tty);
}
@@ -491,7 +456,7 @@ static inline void tnc_set_sync_state(struct sixpack *sp, int new_tnc_state)
static void resync_tnc(struct timer_list *t)
{
- struct sixpack *sp = from_timer(sp, t, resync_t);
+ struct sixpack *sp = timer_container_of(sp, t, resync_t);
static char resync_cmd = 0xe8;
/* clear any data that might have been received */
@@ -561,8 +526,6 @@ static int sixpack_open(struct tty_struct *tty)
spin_lock_init(&sp->lock);
spin_lock_init(&sp->rxlock);
- refcount_set(&sp->refcnt, 1);
- init_completion(&sp->dead);
/* !!! length of the buffers. MTU is IP MTU, not PACLEN! */
@@ -638,19 +601,11 @@ static void sixpack_close(struct tty_struct *tty)
{
struct sixpack *sp;
- write_lock_irq(&disc_data_lock);
sp = tty->disc_data;
- tty->disc_data = NULL;
- write_unlock_irq(&disc_data_lock);
if (!sp)
return;
- /*
- * We have now ensured that nobody can start using ap from now on, but
- * we have to wait for all existing users to finish.
- */
- if (!refcount_dec_and_test(&sp->refcnt))
- wait_for_completion(&sp->dead);
+ tty->disc_data = NULL;
/* We must stop the queue to avoid potentially scribbling
* on the free buffers. The sp->dead completion is not sufficient
@@ -660,8 +615,8 @@ static void sixpack_close(struct tty_struct *tty)
unregister_netdev(sp->dev);
- del_timer_sync(&sp->tx_t);
- del_timer_sync(&sp->resync_t);
+ timer_delete_sync(&sp->tx_t);
+ timer_delete_sync(&sp->resync_t);
/* Free all 6pack frame buffers after unreg. */
kfree(sp->xbuff);
@@ -673,7 +628,7 @@ static void sixpack_close(struct tty_struct *tty)
static int sixpack_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg)
{
- struct sixpack *sp = sp_get(tty);
+ struct sixpack *sp = tty->disc_data;
struct net_device *dev;
unsigned int tmp, err;
@@ -725,8 +680,6 @@ static int sixpack_ioctl(struct tty_struct *tty, unsigned int cmd,
err = tty_mode_ioctl(tty, cmd, arg);
}
- sp_put(sp);
-
return err;
}
@@ -937,7 +890,7 @@ sixpack_decode(struct sixpack *sp, const u8 *pre_rbuff, size_t count)
inbyte = pre_rbuff[count1];
if (inbyte == SIXP_FOUND_TNC) {
tnc_set_sync_state(sp, TNC_IN_SYNC);
- del_timer(&sp->resync_t);
+ timer_delete(&sp->resync_t);
}
if ((inbyte & SIXP_PRIO_CMD_MASK) != 0)
decode_prio_command(sp, inbyte);
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 9e366f275406..5fda7a0fcce0 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -1074,7 +1074,7 @@ static int baycom_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
return 0;
case HDLCDRVCTL_DRIVERNAME:
- strscpy_pad(hi.data.drivername, "baycom_epp", sizeof(hi.data.drivername));
+ strscpy_pad(hi.data.drivername, "baycom_epp");
break;
case HDLCDRVCTL_GETMODE:
@@ -1091,8 +1091,7 @@ static int baycom_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
return baycom_setmode(bc, hi.data.modename);
case HDLCDRVCTL_MODELIST:
- strscpy_pad(hi.data.modename, "intclk,extclk,intmodem,extmodem,divider=x",
- sizeof(hi.data.modename));
+ strscpy_pad(hi.data.modename, "intclk,extclk,intmodem,extmodem,divider=x");
break;
case HDLCDRVCTL_MODEMPARMASK:
diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c
index 00ebc25d0b22..f03797103c6a 100644
--- a/drivers/net/hamradio/baycom_par.c
+++ b/drivers/net/hamradio/baycom_par.c
@@ -427,7 +427,7 @@ static int baycom_ioctl(struct net_device *dev, void __user *data,
break;
case HDLCDRVCTL_GETMODE:
- strcpy(hi->data.modename, bc->options ? "par96" : "picpar");
+ strscpy(hi->data.modename, bc->options ? "par96" : "picpar");
if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
@@ -439,7 +439,7 @@ static int baycom_ioctl(struct net_device *dev, void __user *data,
return baycom_setmode(bc, hi->data.modename);
case HDLCDRVCTL_MODELIST:
- strcpy(hi->data.modename, "par96,picpar");
+ strscpy(hi->data.modename, "par96,picpar");
if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index 646f605e358f..ee5bd3c12040 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -373,6 +373,7 @@ static enum uart ser12_check_uart(unsigned int iobase)
static int ser12_open(struct net_device *dev)
{
+ const unsigned int nr_irqs = irq_get_nr_irqs();
struct baycom_state *bc = netdev_priv(dev);
enum uart u;
@@ -530,7 +531,7 @@ static int baycom_ioctl(struct net_device *dev, void __user *data,
return baycom_setmode(bc, hi->data.modename);
case HDLCDRVCTL_MODELIST:
- strcpy(hi->data.modename, "ser12,ser3,ser24");
+ strscpy(hi->data.modename, "ser12,ser3,ser24");
if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c
index 5d1ab4840753..05bdad214799 100644
--- a/drivers/net/hamradio/baycom_ser_hdx.c
+++ b/drivers/net/hamradio/baycom_ser_hdx.c
@@ -570,7 +570,7 @@ static int baycom_ioctl(struct net_device *dev, void __user *data,
break;
case HDLCDRVCTL_GETMODE:
- strcpy(hi->data.modename, "ser12");
+ strscpy(hi->data.modename, "ser12");
if (bc->opt_dcd <= 0)
strcat(hi->data.modename, (!bc->opt_dcd) ? "*" : (bc->opt_dcd == -2) ? "@" : "+");
if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
@@ -584,7 +584,7 @@ static int baycom_ioctl(struct net_device *dev, void __user *data,
return baycom_setmode(bc, hi->data.modename);
case HDLCDRVCTL_MODELIST:
- strcpy(hi->data.modename, "ser12");
+ strscpy(hi->data.modename, "ser12");
if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index bac1bb69d63a..045c5177262e 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -77,6 +77,7 @@
#include <net/ip.h>
#include <net/arp.h>
+#include <net/netdev_lock.h>
#include <net/net_namespace.h>
#include <linux/bpqether.h>
@@ -107,27 +108,6 @@ struct bpqdev {
static LIST_HEAD(bpq_devices);
-/*
- * bpqether network devices are paired with ethernet devices below them, so
- * form a special "super class" of normal ethernet devices; split their locks
- * off into a separate class since they always nest.
- */
-static struct lock_class_key bpq_netdev_xmit_lock_key;
-static struct lock_class_key bpq_netdev_addr_lock_key;
-
-static void bpq_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock, &bpq_netdev_xmit_lock_key);
-}
-
-static void bpq_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock, &bpq_netdev_addr_lock_key);
- netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL);
-}
-
/* ------------------------------------------------------------------------ */
@@ -158,7 +138,7 @@ static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev)
static inline int dev_is_ethdev(struct net_device *dev)
{
- return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5);
+ return dev->type == ARPHRD_ETHER && !netdev_need_ops_lock(dev);
}
/* ------------------------------------------------------------------------ */
@@ -454,6 +434,8 @@ static const struct net_device_ops bpq_netdev_ops = {
static void bpq_setup(struct net_device *dev)
{
+ netdev_lockdep_set_classes(dev);
+
dev->netdev_ops = &bpq_netdev_ops;
dev->needs_free_netdev = true;
@@ -499,7 +481,6 @@ static int bpq_new_device(struct net_device *edev)
err = register_netdevice(ndev);
if (err)
goto error;
- bpq_set_lockdep_class(ndev);
/* List protected by RTNL */
list_add_rcu(&bpq->bpq_list, &bpq_devices);
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index a9184a78650b..ae5048efde68 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -794,8 +794,8 @@ static inline void init_brg(struct scc_channel *scc)
static void init_channel(struct scc_channel *scc)
{
- del_timer(&scc->tx_t);
- del_timer(&scc->tx_wdog);
+ timer_delete(&scc->tx_t);
+ timer_delete(&scc->tx_wdog);
disable_irq(scc->irq);
@@ -999,7 +999,7 @@ static void __scc_start_tx_timer(struct scc_channel *scc,
void (*handler)(struct timer_list *t),
unsigned long when)
{
- del_timer(&scc->tx_t);
+ timer_delete(&scc->tx_t);
if (when == 0)
{
@@ -1029,7 +1029,7 @@ static void scc_start_defer(struct scc_channel *scc)
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
- del_timer(&scc->tx_wdog);
+ timer_delete(&scc->tx_wdog);
if (scc->kiss.maxdefer != 0 && scc->kiss.maxdefer != TIMER_OFF)
{
@@ -1045,7 +1045,7 @@ static void scc_start_maxkeyup(struct scc_channel *scc)
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
- del_timer(&scc->tx_wdog);
+ timer_delete(&scc->tx_wdog);
if (scc->kiss.maxkeyup != 0 && scc->kiss.maxkeyup != TIMER_OFF)
{
@@ -1127,7 +1127,7 @@ static inline int is_grouped(struct scc_channel *scc)
static void t_dwait(struct timer_list *t)
{
- struct scc_channel *scc = from_timer(scc, t, tx_t);
+ struct scc_channel *scc = timer_container_of(scc, t, tx_t);
if (scc->stat.tx_state == TXS_WAIT) /* maxkeyup or idle timeout */
{
@@ -1169,7 +1169,7 @@ static void t_dwait(struct timer_list *t)
static void t_txdelay(struct timer_list *t)
{
- struct scc_channel *scc = from_timer(scc, t, tx_t);
+ struct scc_channel *scc = timer_container_of(scc, t, tx_t);
scc_start_maxkeyup(scc);
@@ -1190,11 +1190,11 @@ static void t_txdelay(struct timer_list *t)
static void t_tail(struct timer_list *t)
{
- struct scc_channel *scc = from_timer(scc, t, tx_t);
+ struct scc_channel *scc = timer_container_of(scc, t, tx_t);
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
- del_timer(&scc->tx_wdog);
+ timer_delete(&scc->tx_wdog);
scc_key_trx(scc, TX_OFF);
spin_unlock_irqrestore(&scc->lock, flags);
@@ -1217,9 +1217,9 @@ static void t_tail(struct timer_list *t)
static void t_busy(struct timer_list *t)
{
- struct scc_channel *scc = from_timer(scc, t, tx_wdog);
+ struct scc_channel *scc = timer_container_of(scc, t, tx_wdog);
- del_timer(&scc->tx_t);
+ timer_delete(&scc->tx_t);
netif_stop_queue(scc->dev); /* don't pile on the wabbit! */
scc_discard_buffers(scc);
@@ -1236,7 +1236,7 @@ static void t_busy(struct timer_list *t)
static void t_maxkeyup(struct timer_list *t)
{
- struct scc_channel *scc = from_timer(scc, t, tx_wdog);
+ struct scc_channel *scc = timer_container_of(scc, t, tx_wdog);
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
@@ -1248,7 +1248,7 @@ static void t_maxkeyup(struct timer_list *t)
netif_stop_queue(scc->dev);
scc_discard_buffers(scc);
- del_timer(&scc->tx_t);
+ timer_delete(&scc->tx_t);
cl(scc, R1, TxINT_ENAB); /* force an ABORT, but don't */
cl(scc, R15, TxUIE); /* count it. */
@@ -1270,9 +1270,9 @@ static void t_maxkeyup(struct timer_list *t)
static void t_idle(struct timer_list *t)
{
- struct scc_channel *scc = from_timer(scc, t, tx_t);
+ struct scc_channel *scc = timer_container_of(scc, t, tx_t);
- del_timer(&scc->tx_wdog);
+ timer_delete(&scc->tx_wdog);
scc_key_trx(scc, TX_OFF);
if(scc->kiss.mintime)
@@ -1403,11 +1403,11 @@ static unsigned long scc_get_param(struct scc_channel *scc, unsigned int cmd)
static void scc_stop_calibrate(struct timer_list *t)
{
- struct scc_channel *scc = from_timer(scc, t, tx_wdog);
+ struct scc_channel *scc = timer_container_of(scc, t, tx_wdog);
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
- del_timer(&scc->tx_wdog);
+ timer_delete(&scc->tx_wdog);
scc_key_trx(scc, TX_OFF);
wr(scc, R6, 0);
wr(scc, R7, FLAG);
@@ -1428,7 +1428,7 @@ scc_start_calibrate(struct scc_channel *scc, int duration, unsigned char pattern
netif_stop_queue(scc->dev);
scc_discard_buffers(scc);
- del_timer(&scc->tx_wdog);
+ timer_delete(&scc->tx_wdog);
scc->tx_wdog.function = scc_stop_calibrate;
scc->tx_wdog.expires = jiffies + HZ*duration;
@@ -1460,6 +1460,7 @@ scc_start_calibrate(struct scc_channel *scc, int duration, unsigned char pattern
static void z8530_init(void)
{
+ const unsigned int nr_irqs = irq_get_nr_irqs();
struct scc_channel *scc;
int chip, k;
unsigned long flags;
@@ -1608,8 +1609,8 @@ static int scc_net_close(struct net_device *dev)
wr(scc,R3,0);
spin_unlock_irqrestore(&scc->lock, flags);
- del_timer_sync(&scc->tx_t);
- del_timer_sync(&scc->tx_wdog);
+ timer_delete_sync(&scc->tx_t);
+ timer_delete_sync(&scc->tx_wdog);
scc_discard_buffers(scc);
@@ -1735,7 +1736,7 @@ static int scc_net_siocdevprivate(struct net_device *dev,
if (hwcfg.irq == 2) hwcfg.irq = 9;
- if (hwcfg.irq < 0 || hwcfg.irq >= nr_irqs)
+ if (hwcfg.irq < 0 || hwcfg.irq >= irq_get_nr_irqs())
return -EINVAL;
if (!Ivec[hwcfg.irq].used && hwcfg.irq)
@@ -2117,6 +2118,7 @@ static int __init scc_init_driver (void)
static void __exit scc_cleanup_driver(void)
{
+ const unsigned int nr_irqs = irq_get_nr_irqs();
io_port ctrl;
int k;
struct scc_channel *scc;
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 2ed2f836f09a..f29997b20fd7 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1158,7 +1158,7 @@ static void __exit yam_cleanup_driver(void)
struct yam_mcs *p;
int i;
- del_timer_sync(&yam_timer);
+ timer_delete_sync(&yam_timer);
for (i = 0; i < NR_PORTS; i++) {
struct net_device *dev = yam_devs[i];
if (dev) {
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index aa8f828a0ae7..7b7e7a47a75e 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -1154,7 +1154,7 @@ static inline void rr_raz_rx(struct rr_private *rrpriv,
static void rr_timer(struct timer_list *t)
{
- struct rr_private *rrpriv = from_timer(rrpriv, t, timer);
+ struct rr_private *rrpriv = timer_container_of(rrpriv, t, timer);
struct net_device *dev = pci_get_drvdata(rrpriv->pci_dev);
struct rr_regs __iomem *regs = rrpriv->regs;
unsigned long flags;
@@ -1357,7 +1357,7 @@ static int rr_close(struct net_device *dev)
rrpriv->fw_running = 0;
spin_unlock_irqrestore(&rrpriv->lock, flags);
- del_timer_sync(&rrpriv->timer);
+ timer_delete_sync(&rrpriv->timer);
spin_lock_irqsave(&rrpriv->lock, flags);
writel(0, &regs->TxPi);
diff --git a/drivers/net/hyperv/Kconfig b/drivers/net/hyperv/Kconfig
index c8cbd85adcf9..982964c1a9fb 100644
--- a/drivers/net/hyperv/Kconfig
+++ b/drivers/net/hyperv/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config HYPERV_NET
tristate "Microsoft Hyper-V virtual network driver"
- depends on HYPERV
+ depends on HYPERV_VMBUS
select UCS2_STRING
select NLS
help
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index e690b95b1bbb..7397c693f984 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -158,7 +158,6 @@ struct hv_netvsc_packet {
u8 cp_partial; /* partial copy into send buffer */
u8 rmsg_size; /* RNDIS header and PPI size */
- u8 rmsg_pgcnt; /* page count of RNDIS header and PPI */
u8 page_buf_cnt;
u16 q_idx;
@@ -464,7 +463,7 @@ struct nvsp_1_message_send_receive_buffer_complete {
* LargeOffset SmallOffset
*/
- struct nvsp_1_receive_buffer_section sections[1];
+ struct nvsp_1_receive_buffer_section sections[];
} __packed;
/*
@@ -893,6 +892,18 @@ struct nvsp_message {
sizeof(struct nvsp_message))
#define NETVSC_MIN_IN_MSG_SIZE sizeof(struct vmpacket_descriptor)
+/* Maximum # of contiguous data ranges that can make up a trasmitted packet.
+ * Typically it's the max SKB fragments plus 2 for the rndis packet and the
+ * linear portion of the SKB. But if MAX_SKB_FRAGS is large, the value may
+ * need to be limited to MAX_PAGE_BUFFER_COUNT, which is the max # of entries
+ * in a GPA direct packet sent to netvsp over VMBus.
+ */
+#if MAX_SKB_FRAGS + 2 < MAX_PAGE_BUFFER_COUNT
+#define MAX_DATA_RANGES (MAX_SKB_FRAGS + 2)
+#else
+#define MAX_DATA_RANGES MAX_PAGE_BUFFER_COUNT
+#endif
+
/* Estimated requestor size:
* out_ring_size/min_out_msg_size + in_ring_size/min_in_msg_size
*/
@@ -1050,6 +1061,7 @@ struct net_device_context {
struct net_device __rcu *vf_netdev;
struct netvsc_vf_pcpu_stats __percpu *vf_stats;
struct delayed_work vf_takeover;
+ struct delayed_work vfns_work;
/* 1: allocated, serial number is valid. 0: not allocated */
u32 vf_alloc;
@@ -1064,6 +1076,8 @@ struct net_device_context {
struct netvsc_device_info *saved_netvsc_dev_info;
};
+void netvsc_vfns_work(struct work_struct *w);
+
/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
* packets. We can use ethtool to change UDP hash level when necessary.
*/
@@ -1166,6 +1180,8 @@ struct netvsc_device {
u32 max_chn;
u32 num_chn;
+ u32 netvsc_gso_max_size;
+
atomic_t open_chn;
struct work_struct subchan_work;
wait_queue_head_t subchan_open;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 9afb08dbc350..60a4629fe6ba 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -866,7 +866,8 @@ static void netvsc_send_completion(struct net_device *ndev,
case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
if (msglen < sizeof(struct nvsp_message_header) +
- sizeof(struct nvsp_1_message_send_receive_buffer_complete)) {
+ struct_size_t(struct nvsp_1_message_send_receive_buffer_complete,
+ sections, 1)) {
netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
msglen);
return;
@@ -952,8 +953,7 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
+ pend_size;
int i;
u32 padding = 0;
- u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
- packet->page_buf_cnt;
+ u32 page_count = packet->cp_partial ? 1 : packet->page_buf_cnt;
u32 remain;
/* Add padding */
@@ -1054,6 +1054,42 @@ static int netvsc_dma_map(struct hv_device *hv_dev,
return 0;
}
+/* Build an "array" of mpb entries describing the data to be transferred
+ * over VMBus. After the desc header fields, each "array" entry is variable
+ * size, and each entry starts after the end of the previous entry. The
+ * "offset" and "len" fields for each entry imply the size of the entry.
+ *
+ * The pfns are in HV_HYP_PAGE_SIZE, because all communication with Hyper-V
+ * uses that granularity, even if the system page size of the guest is larger.
+ * Each entry in the input "pb" array must describe a contiguous range of
+ * guest physical memory so that the pfns are sequential if the range crosses
+ * a page boundary. The offset field must be < HV_HYP_PAGE_SIZE.
+ */
+static inline void netvsc_build_mpb_array(struct hv_page_buffer *pb,
+ u32 page_buffer_count,
+ struct vmbus_packet_mpb_array *desc,
+ u32 *desc_size)
+{
+ struct hv_mpb_array *mpb_entry = &desc->range;
+ int i, j;
+
+ for (i = 0; i < page_buffer_count; i++) {
+ u32 offset = pb[i].offset;
+ u32 len = pb[i].len;
+
+ mpb_entry->offset = offset;
+ mpb_entry->len = len;
+
+ for (j = 0; j < HVPFN_UP(offset + len); j++)
+ mpb_entry->pfn_array[j] = pb[i].pfn + j;
+
+ mpb_entry = (struct hv_mpb_array *)&mpb_entry->pfn_array[j];
+ }
+
+ desc->rangecount = page_buffer_count;
+ *desc_size = (char *)mpb_entry - (char *)desc;
+}
+
static inline int netvsc_send_pkt(
struct hv_device *device,
struct hv_netvsc_packet *packet,
@@ -1096,8 +1132,11 @@ static inline int netvsc_send_pkt(
packet->dma_range = NULL;
if (packet->page_buf_cnt) {
+ struct vmbus_channel_packet_page_buffer desc;
+ u32 desc_size;
+
if (packet->cp_partial)
- pb += packet->rmsg_pgcnt;
+ pb++;
ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb);
if (ret) {
@@ -1105,11 +1144,12 @@ static inline int netvsc_send_pkt(
goto exit;
}
- ret = vmbus_sendpacket_pagebuffer(out_channel,
- pb, packet->page_buf_cnt,
- &nvmsg, sizeof(nvmsg),
- req_id);
-
+ netvsc_build_mpb_array(pb, packet->page_buf_cnt,
+ (struct vmbus_packet_mpb_array *)&desc,
+ &desc_size);
+ ret = vmbus_sendpacket_mpb_desc(out_channel,
+ (struct vmbus_packet_mpb_array *)&desc,
+ desc_size, &nvmsg, sizeof(nvmsg), req_id);
if (ret)
netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
} else {
@@ -1258,7 +1298,7 @@ int netvsc_send(struct net_device *ndev,
packet->send_buf_index = section_index;
if (packet->cp_partial) {
- packet->page_buf_cnt -= packet->rmsg_pgcnt;
+ packet->page_buf_cnt--;
packet->total_data_buflen = msd_len + packet->rmsg_size;
} else {
packet->page_buf_cnt = 0;
@@ -1772,6 +1812,11 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
/* Enable NAPI handler before init callbacks */
netif_napi_add(ndev, &net_device->chan_table[0].napi, netvsc_poll);
+ napi_enable(&net_device->chan_table[0].napi);
+ netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX,
+ &net_device->chan_table[0].napi);
+ netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX,
+ &net_device->chan_table[0].napi);
/* Open the channel */
device->channel->next_request_id_callback = vmbus_next_request_id;
@@ -1791,12 +1836,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
/* Channel is opened */
netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
- napi_enable(&net_device->chan_table[0].napi);
- netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX,
- &net_device->chan_table[0].napi);
- netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX,
- &net_device->chan_table[0].napi);
-
/* Connect with the NetVsp */
ret = netvsc_connect_vsp(device, net_device, device_info);
if (ret != 0) {
@@ -1814,14 +1853,14 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
close:
RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
- netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX, NULL);
- netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX, NULL);
- napi_disable(&net_device->chan_table[0].napi);
/* Now, we can close the channel safely */
vmbus_close(device->channel);
cleanup:
+ netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX, NULL);
+ netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX, NULL);
+ napi_disable(&net_device->chan_table[0].napi);
netif_napi_del(&net_device->chan_table[0].napi);
cleanup2:
diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c
index e01c5997a551..1dd3755d9e6d 100644
--- a/drivers/net/hyperv/netvsc_bpf.c
+++ b/drivers/net/hyperv/netvsc_bpf.c
@@ -183,7 +183,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
xdp.command = XDP_SETUP_PROG;
xdp.prog = prog;
- ret = dev_xdp_propagate(vf_netdev, &xdp);
+ ret = netif_xdp_propagate(vf_netdev, &xdp);
if (ret && prog)
bpf_prog_put(prog);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index d6c4abfc3a28..3d47d749ef9f 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -29,6 +29,7 @@
#include <linux/bpf.h>
#include <net/arp.h>
+#include <net/netdev_lock.h>
#include <net/route.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
@@ -325,43 +326,10 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
return txq;
}
-static u32 fill_pg_buf(unsigned long hvpfn, u32 offset, u32 len,
- struct hv_page_buffer *pb)
-{
- int j = 0;
-
- hvpfn += offset >> HV_HYP_PAGE_SHIFT;
- offset = offset & ~HV_HYP_PAGE_MASK;
-
- while (len > 0) {
- unsigned long bytes;
-
- bytes = HV_HYP_PAGE_SIZE - offset;
- if (bytes > len)
- bytes = len;
- pb[j].pfn = hvpfn;
- pb[j].offset = offset;
- pb[j].len = bytes;
-
- offset += bytes;
- len -= bytes;
-
- if (offset == HV_HYP_PAGE_SIZE && len) {
- hvpfn++;
- offset = 0;
- j++;
- }
- }
-
- return j + 1;
-}
-
static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
struct hv_netvsc_packet *packet,
struct hv_page_buffer *pb)
{
- u32 slots_used = 0;
- char *data = skb->data;
int frags = skb_shinfo(skb)->nr_frags;
int i;
@@ -370,28 +338,27 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
* 2. skb linear data
* 3. skb fragment data
*/
- slots_used += fill_pg_buf(virt_to_hvpfn(hdr),
- offset_in_hvpage(hdr),
- len,
- &pb[slots_used]);
+ pb[0].offset = offset_in_hvpage(hdr);
+ pb[0].len = len;
+ pb[0].pfn = virt_to_hvpfn(hdr);
packet->rmsg_size = len;
- packet->rmsg_pgcnt = slots_used;
- slots_used += fill_pg_buf(virt_to_hvpfn(data),
- offset_in_hvpage(data),
- skb_headlen(skb),
- &pb[slots_used]);
+ pb[1].offset = offset_in_hvpage(skb->data);
+ pb[1].len = skb_headlen(skb);
+ pb[1].pfn = virt_to_hvpfn(skb->data);
for (i = 0; i < frags; i++) {
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+ struct hv_page_buffer *cur_pb = &pb[i + 2];
+ u64 pfn = page_to_hvpfn(skb_frag_page(frag));
+ u32 offset = skb_frag_off(frag);
- slots_used += fill_pg_buf(page_to_hvpfn(skb_frag_page(frag)),
- skb_frag_off(frag),
- skb_frag_size(frag),
- &pb[slots_used]);
+ cur_pb->offset = offset_in_hvpage(offset);
+ cur_pb->len = skb_frag_size(frag);
+ cur_pb->pfn = pfn + (offset >> HV_HYP_PAGE_SHIFT);
}
- return slots_used;
+ return frags + 2;
}
static int count_skb_frag_slots(struct sk_buff *skb)
@@ -482,7 +449,7 @@ static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
struct net_device *vf_netdev;
u32 rndis_msg_size;
u32 hash;
- struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
+ struct hv_page_buffer pb[MAX_DATA_RANGES];
/* If VF is present and up then redirect packets to it.
* Skip the VF if it is marked down or has no carrier.
@@ -1404,7 +1371,7 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
struct net_device_context *ndc = netdev_priv(ndev);
struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
- struct sockaddr *addr = p;
+ struct sockaddr_storage *addr = p;
int err;
err = eth_prepare_mac_addr_change(ndev, p);
@@ -1420,12 +1387,12 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
return err;
}
- err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
+ err = rndis_filter_set_device_mac(nvdev, addr->__data);
if (!err) {
eth_commit_mac_addr_change(ndev, p);
} else if (vf_netdev) {
/* rollback change on VF */
- memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN);
+ memcpy(addr->__data, ndev->dev_addr, ETH_ALEN);
dev_set_mac_address(vf_netdev, addr, NULL);
}
@@ -1613,9 +1580,10 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
}
static int
-netvsc_get_rss_hash_opts(struct net_device_context *ndc,
- struct ethtool_rxnfc *info)
+netvsc_get_rxfh_fields(struct net_device *ndev,
+ struct ethtool_rxfh_fields *info)
{
+ struct net_device_context *ndc = netdev_priv(ndev);
const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3;
info->data = RXH_IP_SRC | RXH_IP_DST;
@@ -1656,30 +1624,24 @@ netvsc_get_rss_hash_opts(struct net_device_context *ndc,
return 0;
}
-static int
-netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
- u32 *rules)
+static u32 netvsc_get_rx_ring_count(struct net_device *dev)
{
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
if (!nvdev)
- return -ENODEV;
-
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = nvdev->num_chn;
return 0;
- case ETHTOOL_GRXFH:
- return netvsc_get_rss_hash_opts(ndc, info);
- }
- return -EOPNOTSUPP;
+ return nvdev->num_chn;
}
-static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
- struct ethtool_rxnfc *info)
+static int
+netvsc_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *info,
+ struct netlink_ext_ack *extack)
{
+ struct net_device_context *ndc = netdev_priv(dev);
+
if (info->data == (RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
switch (info->flow_type) {
@@ -1734,17 +1696,6 @@ static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
return -EOPNOTSUPP;
}
-static int
-netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info)
-{
- struct net_device_context *ndc = netdev_priv(ndev);
-
- if (info->cmd == ETHTOOL_SRXFH)
- return netvsc_set_rss_hash_opts(ndc, info);
-
- return -EOPNOTSUPP;
-}
-
static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
{
return NETVSC_HASH_KEYLEN;
@@ -2011,12 +1962,13 @@ static const struct ethtool_ops ethtool_ops = {
.get_channels = netvsc_get_channels,
.set_channels = netvsc_set_channels,
.get_ts_info = ethtool_op_get_ts_info,
- .get_rxnfc = netvsc_get_rxnfc,
- .set_rxnfc = netvsc_set_rxnfc,
+ .get_rx_ring_count = netvsc_get_rx_ring_count,
.get_rxfh_key_size = netvsc_get_rxfh_key_size,
.get_rxfh_indir_size = netvsc_rss_indir_size,
.get_rxfh = netvsc_get_rxfh,
.set_rxfh = netvsc_set_rxfh,
+ .get_rxfh_fields = netvsc_get_rxfh_fields,
+ .set_rxfh_fields = netvsc_set_rxfh_fields,
.get_link_ksettings = netvsc_get_link_ksettings,
.set_link_ksettings = netvsc_set_link_ksettings,
.get_ringparam = netvsc_get_ringparam,
@@ -2350,8 +2302,11 @@ static int netvsc_prepare_bonding(struct net_device *vf_netdev)
if (!ndev)
return NOTIFY_DONE;
- /* set slave flag before open to prevent IPv6 addrconf */
+ /* Set slave flag and no addrconf flag before open
+ * to prevent IPv6 addrconf.
+ */
vf_netdev->flags |= IFF_SLAVE;
+ vf_netdev->priv_flags |= IFF_NO_ADDRCONF;
return NOTIFY_DONE;
}
@@ -2461,6 +2416,21 @@ static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event)
} else {
netdev_info(ndev, "Data path switched %s VF: %s\n",
vf_is_up ? "to" : "from", vf_netdev->name);
+
+ /* In Azure, when accelerated networking in enabled, other NICs
+ * like MANA, MLX, are configured as a bonded nic with
+ * Netvsc(failover) NIC. For bonded NICs, the min of the max
+ * pkt aggregate size of the members is propagated in the stack.
+ * In order to allow these NICs (MANA/MLX) to use up to
+ * GSO_MAX_SIZE gso packet size, we need to allow Netvsc NIC to
+ * also support this in the guest.
+ * This value is only increased for netvsc NIC when datapath is
+ * switched over to the VF
+ */
+ if (vf_is_up)
+ netif_set_tso_max_size(ndev, vf_netdev->tso_max_size);
+ else
+ netif_set_tso_max_size(ndev, netvsc_dev->netvsc_gso_max_size);
}
return NOTIFY_OK;
@@ -2480,8 +2450,6 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
- netvsc_vf_setxdp(vf_netdev, NULL);
-
reinit_completion(&net_device_ctx->vf_add);
netdev_rx_handler_unregister(vf_netdev);
netdev_upper_dev_unlink(vf_netdev, ndev);
@@ -2547,6 +2515,7 @@ static int netvsc_probe(struct hv_device *dev,
spin_lock_init(&net_device_ctx->lock);
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
+ INIT_DELAYED_WORK(&net_device_ctx->vfns_work, netvsc_vfns_work);
net_device_ctx->vf_stats
= netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
@@ -2649,7 +2618,9 @@ static int netvsc_probe(struct hv_device *dev,
continue;
netvsc_prepare_bonding(vf_netdev);
+ netdev_lock_ops(vf_netdev);
netvsc_register_vf(vf_netdev, VF_REG_IN_PROBE);
+ netdev_unlock_ops(vf_netdev);
__netvsc_vf_setup(net, vf_netdev);
break;
}
@@ -2689,6 +2660,8 @@ static void netvsc_remove(struct hv_device *dev)
cancel_delayed_work_sync(&ndev_ctx->dwork);
rtnl_lock();
+ cancel_delayed_work_sync(&ndev_ctx->vfns_work);
+
nvdev = rtnl_dereference(ndev_ctx->nvdev);
if (nvdev) {
cancel_work_sync(&nvdev->subchan_work);
@@ -2730,6 +2703,7 @@ static int netvsc_suspend(struct hv_device *dev)
cancel_delayed_work_sync(&ndev_ctx->dwork);
rtnl_lock();
+ cancel_delayed_work_sync(&ndev_ctx->vfns_work);
nvdev = rtnl_dereference(ndev_ctx->nvdev);
if (nvdev == NULL) {
@@ -2823,6 +2797,27 @@ static void netvsc_event_set_vf_ns(struct net_device *ndev)
}
}
+void netvsc_vfns_work(struct work_struct *w)
+{
+ struct net_device_context *ndev_ctx =
+ container_of(w, struct net_device_context, vfns_work.work);
+ struct net_device *ndev;
+
+ if (!rtnl_trylock()) {
+ schedule_delayed_work(&ndev_ctx->vfns_work, 1);
+ return;
+ }
+
+ ndev = hv_get_drvdata(ndev_ctx->device_ctx);
+ if (!ndev)
+ goto out;
+
+ netvsc_event_set_vf_ns(ndev);
+
+out:
+ rtnl_unlock();
+}
+
/*
* On Hyper-V, every VF interface is matched with a corresponding
* synthetic interface. The synthetic interface is presented first
@@ -2833,10 +2828,12 @@ static int netvsc_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
+ struct net_device_context *ndev_ctx;
int ret = 0;
if (event_dev->netdev_ops == &device_ops && event == NETDEV_REGISTER) {
- netvsc_event_set_vf_ns(event_dev);
+ ndev_ctx = netdev_priv(event_dev);
+ schedule_delayed_work(&ndev_ctx->vfns_work, 0);
return NOTIFY_DONE;
}
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index c0ceeef4fcd8..c35f9685b6bf 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -225,8 +225,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
struct rndis_request *req)
{
struct hv_netvsc_packet *packet;
- struct hv_page_buffer page_buf[2];
- struct hv_page_buffer *pb = page_buf;
+ struct hv_page_buffer pb;
int ret;
/* Setup the packet to send it */
@@ -235,27 +234,14 @@ static int rndis_filter_send_request(struct rndis_device *dev,
packet->total_data_buflen = req->request_msg.msg_len;
packet->page_buf_cnt = 1;
- pb[0].pfn = virt_to_phys(&req->request_msg) >>
- HV_HYP_PAGE_SHIFT;
- pb[0].len = req->request_msg.msg_len;
- pb[0].offset = offset_in_hvpage(&req->request_msg);
-
- /* Add one page_buf when request_msg crossing page boundary */
- if (pb[0].offset + pb[0].len > HV_HYP_PAGE_SIZE) {
- packet->page_buf_cnt++;
- pb[0].len = HV_HYP_PAGE_SIZE -
- pb[0].offset;
- pb[1].pfn = virt_to_phys((void *)&req->request_msg
- + pb[0].len) >> HV_HYP_PAGE_SHIFT;
- pb[1].offset = 0;
- pb[1].len = req->request_msg.msg_len -
- pb[0].len;
- }
+ pb.pfn = virt_to_phys(&req->request_msg) >> HV_HYP_PAGE_SHIFT;
+ pb.len = req->request_msg.msg_len;
+ pb.offset = offset_in_hvpage(&req->request_msg);
trace_rndis_send(dev->ndev, 0, &req->request_msg);
rcu_read_lock_bh();
- ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL, false);
+ ret = netvsc_send(dev->ndev, packet, NULL, &pb, NULL, false);
rcu_read_unlock_bh();
return ret;
@@ -1266,17 +1252,26 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
new_sc->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
new_sc->max_pkt_size = NETVSC_MAX_PKT_SIZE;
+ /* Enable napi before opening the vmbus channel to avoid races
+ * as the host placing data on the host->guest ring may be left
+ * out if napi was not enabled.
+ */
+ napi_enable(&nvchan->napi);
+ netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX,
+ &nvchan->napi);
+ netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX,
+ &nvchan->napi);
+
ret = vmbus_open(new_sc, netvsc_ring_bytes,
netvsc_ring_bytes, NULL, 0,
netvsc_channel_cb, nvchan);
- if (ret == 0) {
- napi_enable(&nvchan->napi);
- netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX,
- &nvchan->napi);
- netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX,
- &nvchan->napi);
- } else {
+ if (ret != 0) {
netdev_notice(ndev, "sub channel open failed: %d\n", ret);
+ netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX,
+ NULL);
+ netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX,
+ NULL);
+ napi_disable(&nvchan->napi);
}
if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn)
@@ -1356,9 +1351,10 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
struct net_device_context *net_device_ctx = netdev_priv(net);
struct ndis_offload hwcaps;
struct ndis_offload_params offloads;
- unsigned int gso_max_size = GSO_LEGACY_MAX_SIZE;
int ret;
+ nvdev->netvsc_gso_max_size = GSO_LEGACY_MAX_SIZE;
+
/* Find HW offload capabilities */
ret = rndis_query_hwcaps(rndis_device, nvdev, &hwcaps);
if (ret != 0)
@@ -1390,8 +1386,8 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
net->hw_features |= NETIF_F_TSO;
- if (hwcaps.lsov2.ip4_maxsz < gso_max_size)
- gso_max_size = hwcaps.lsov2.ip4_maxsz;
+ if (hwcaps.lsov2.ip4_maxsz < nvdev->netvsc_gso_max_size)
+ nvdev->netvsc_gso_max_size = hwcaps.lsov2.ip4_maxsz;
}
if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) {
@@ -1411,8 +1407,8 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
net->hw_features |= NETIF_F_TSO6;
- if (hwcaps.lsov2.ip6_maxsz < gso_max_size)
- gso_max_size = hwcaps.lsov2.ip6_maxsz;
+ if (hwcaps.lsov2.ip6_maxsz < nvdev->netvsc_gso_max_size)
+ nvdev->netvsc_gso_max_size = hwcaps.lsov2.ip6_maxsz;
}
if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) {
@@ -1438,7 +1434,7 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
*/
net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features;
- netif_set_tso_max_size(net, gso_max_size);
+ netif_set_tso_max_size(net, nvdev->netvsc_gso_max_size);
ret = rndis_filter_set_offload_params(net, nvdev, &offloads);
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index f632b0cfd5ae..fd91f8a45bce 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -776,8 +776,8 @@ at86rf230_setup_spi_messages(struct at86rf230_local *lp,
state->trx.tx_buf = state->buf;
state->trx.rx_buf = state->buf;
spi_message_add_tail(&state->trx, &state->msg);
- hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- state->timer.function = at86rf230_async_state_timer;
+ hrtimer_setup(&state->timer, at86rf230_async_state_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
static irqreturn_t at86rf230_isr(int irq, void *data)
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index e685a7f946f0..ebc4f1b18e7b 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -52,12 +52,10 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
-#include <linux/gpio.h>
#include <linux/ieee802154.h>
#include <linux/io.h>
#include <linux/kfifo.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/poll.h>
@@ -350,8 +348,8 @@ struct work_priv_container {
* @extclockenable: true if the external clock is to be enabled
* @extclockfreq: frequency of the external clock
* @extclockgpio: ca8210 output gpio of the external clock
- * @gpio_reset: gpio number of ca8210 reset line
- * @gpio_irq: gpio number of ca8210 interrupt line
+ * @reset_gpio: ca8210 reset GPIO descriptor
+ * @irq_gpio: ca8210 interrupt GPIO descriptor
* @irq_id: identifier for the ca8210 irq
*
*/
@@ -359,8 +357,8 @@ struct ca8210_platform_data {
bool extclockenable;
unsigned int extclockfreq;
unsigned int extclockgpio;
- int gpio_reset;
- int gpio_irq;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *irq_gpio;
int irq_id;
};
@@ -627,14 +625,15 @@ static int ca8210_spi_transfer(
*/
static void ca8210_reset_send(struct spi_device *spi, unsigned int ms)
{
- struct ca8210_platform_data *pdata = spi->dev.platform_data;
+ struct device *dev = &spi->dev;
+ struct ca8210_platform_data *pdata = dev_get_platdata(dev);
struct ca8210_priv *priv = spi_get_drvdata(spi);
long status;
- gpio_set_value(pdata->gpio_reset, 0);
+ gpiod_set_value(pdata->reset_gpio, 1);
reinit_completion(&priv->ca8210_is_awake);
msleep(ms);
- gpio_set_value(pdata->gpio_reset, 1);
+ gpiod_set_value(pdata->reset_gpio, 0);
priv->promiscuous = false;
/* Wait until wakeup indication seen */
@@ -1446,8 +1445,7 @@ static u8 mcps_data_request(
command.pdata.data_req.src_addr_mode = src_addr_mode;
command.pdata.data_req.dst.mode = dst_address_mode;
if (dst_address_mode != MAC_MODE_NO_ADDR) {
- command.pdata.data_req.dst.pan_id[0] = LS_BYTE(dst_pan_id);
- command.pdata.data_req.dst.pan_id[1] = MS_BYTE(dst_pan_id);
+ put_unaligned_le16(dst_pan_id, command.pdata.data_req.dst.pan_id);
if (dst_address_mode == MAC_MODE_SHORT_ADDR) {
command.pdata.data_req.dst.address[0] = LS_BYTE(
dst_addr->short_address
@@ -1795,12 +1793,12 @@ static int ca8210_skb_rx(
}
hdr.source.mode = data_ind[0];
dev_dbg(&priv->spi->dev, "srcAddrMode: %#03x\n", hdr.source.mode);
- hdr.source.pan_id = *(u16 *)&data_ind[1];
+ hdr.source.pan_id = cpu_to_le16(get_unaligned_le16(&data_ind[1]));
dev_dbg(&priv->spi->dev, "srcPanId: %#06x\n", hdr.source.pan_id);
memcpy(&hdr.source.extended_addr, &data_ind[3], 8);
hdr.dest.mode = data_ind[11];
dev_dbg(&priv->spi->dev, "dstAddrMode: %#03x\n", hdr.dest.mode);
- hdr.dest.pan_id = *(u16 *)&data_ind[12];
+ hdr.dest.pan_id = cpu_to_le16(get_unaligned_le16(&data_ind[12]));
dev_dbg(&priv->spi->dev, "dstPanId: %#06x\n", hdr.dest.pan_id);
memcpy(&hdr.dest.extended_addr, &data_ind[14], 8);
@@ -1927,7 +1925,7 @@ static int ca8210_skb_tx(
status = mcps_data_request(
header.source.mode,
header.dest.mode,
- header.dest.pan_id,
+ le16_to_cpu(header.dest.pan_id),
(union macaddr *)&header.dest.extended_addr,
skb->len - mac_len,
&skb->data[mac_len],
@@ -2737,9 +2735,10 @@ static int ca8210_config_extern_clk(
*/
static int ca8210_register_ext_clock(struct spi_device *spi)
{
+ struct device *dev = &spi->dev;
+ struct ca8210_platform_data *pdata = dev_get_platdata(dev);
struct device_node *np = spi->dev.of_node;
struct ca8210_priv *priv = spi_get_drvdata(spi);
- struct ca8210_platform_data *pdata = spi->dev.platform_data;
if (!np)
return -EFAULT;
@@ -2785,25 +2784,16 @@ static void ca8210_unregister_ext_clock(struct spi_device *spi)
*/
static int ca8210_reset_init(struct spi_device *spi)
{
- int ret;
- struct ca8210_platform_data *pdata = spi->dev.platform_data;
-
- pdata->gpio_reset = of_get_named_gpio(
- spi->dev.of_node,
- "reset-gpio",
- 0
- );
+ struct device *dev = &spi->dev;
+ struct ca8210_platform_data *pdata = dev_get_platdata(dev);
- ret = gpio_direction_output(pdata->gpio_reset, 1);
- if (ret < 0) {
- dev_crit(
- &spi->dev,
- "Reset GPIO %d did not set to output mode\n",
- pdata->gpio_reset
- );
+ pdata->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(pdata->reset_gpio)) {
+ dev_crit(dev, "Reset GPIO did not set to output mode\n");
+ return PTR_ERR(pdata->reset_gpio);
}
- return ret;
+ return 0;
}
/**
@@ -2814,23 +2804,19 @@ static int ca8210_reset_init(struct spi_device *spi)
*/
static int ca8210_interrupt_init(struct spi_device *spi)
{
+ struct device *dev = &spi->dev;
+ struct ca8210_platform_data *pdata = dev_get_platdata(dev);
int ret;
- struct ca8210_platform_data *pdata = spi->dev.platform_data;
- pdata->gpio_irq = of_get_named_gpio(
- spi->dev.of_node,
- "irq-gpio",
- 0
- );
+ pdata->irq_gpio = devm_gpiod_get(dev, "irq", GPIOD_IN);
+ if (IS_ERR(pdata->irq_gpio)) {
+ dev_crit(dev, "Could not retrieve IRQ GPIO\n");
+ return PTR_ERR(pdata->irq_gpio);
+ }
- pdata->irq_id = gpio_to_irq(pdata->gpio_irq);
+ pdata->irq_id = gpiod_to_irq(pdata->irq_gpio);
if (pdata->irq_id < 0) {
- dev_crit(
- &spi->dev,
- "Could not get irq for gpio pin %d\n",
- pdata->gpio_irq
- );
- gpio_free(pdata->gpio_irq);
+ dev_crit(dev, "Could not get irq for IRQ GPIO\n");
return pdata->irq_id;
}
@@ -2841,10 +2827,8 @@ static int ca8210_interrupt_init(struct spi_device *spi)
"ca8210-irq",
spi_get_drvdata(spi)
);
- if (ret) {
+ if (ret)
dev_crit(&spi->dev, "request_irq %d failed\n", pdata->irq_id);
- gpio_free(pdata->gpio_irq);
- }
return ret;
}
@@ -3072,7 +3056,11 @@ static int ca8210_probe(struct spi_device *spi_device)
spi_set_drvdata(priv->spi, priv);
if (IS_ENABLED(CONFIG_IEEE802154_CA8210_DEBUGFS)) {
cascoda_api_upstream = ca8210_test_int_driver_write;
- ca8210_test_interface_init(priv);
+ ret = ca8210_test_interface_init(priv);
+ if (ret) {
+ dev_crit(&spi_device->dev, "ca8210_test_interface_init failed\n");
+ goto error;
+ }
} else {
cascoda_api_upstream = NULL;
}
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 2c1b5def4a0b..d3dc0914450a 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -333,6 +333,7 @@ static void ifb_setup(struct net_device *dev)
dev->min_mtu = 0;
dev->max_mtu = 0;
+ netif_set_tso_max_size(dev, GSO_MAX_SIZE);
}
static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -426,22 +427,21 @@ static int __init ifb_init_module(void)
{
int i, err;
- down_write(&pernet_ops_rwsem);
- rtnl_lock();
- err = __rtnl_link_register(&ifb_link_ops);
+ err = rtnl_link_register(&ifb_link_ops);
if (err < 0)
- goto out;
+ return err;
+
+ rtnl_net_lock(&init_net);
for (i = 0; i < numifbs && !err; i++) {
err = ifb_init_one(i);
cond_resched();
}
- if (err)
- __rtnl_link_unregister(&ifb_link_ops);
-out:
- rtnl_unlock();
- up_write(&pernet_ops_rwsem);
+ rtnl_net_unlock(&init_net);
+
+ if (err)
+ rtnl_link_unregister(&ifb_link_ops);
return err;
}
diff --git a/drivers/net/ipa/Kconfig b/drivers/net/ipa/Kconfig
index 6782c2cbf542..01d219d3760c 100644
--- a/drivers/net/ipa/Kconfig
+++ b/drivers/net/ipa/Kconfig
@@ -5,7 +5,7 @@ config QCOM_IPA
depends on INTERCONNECT
depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST)
depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
- select QCOM_MDT_LOADER if ARCH_QCOM
+ select QCOM_MDT_LOADER
select QCOM_SCM
select QCOM_QMI_HELPERS
help
diff --git a/drivers/net/ipa/data/ipa_data-v3.1.c b/drivers/net/ipa/data/ipa_data-v3.1.c
index e902d731776d..65dba4729155 100644
--- a/drivers/net/ipa/data/ipa_data-v3.1.c
+++ b/drivers/net/ipa/data/ipa_data-v3.1.c
@@ -493,7 +493,6 @@ static const struct ipa_mem_data ipa_mem_data = {
.local = ipa_mem_local_data,
.imem_addr = 0x146bd000,
.imem_size = 0x00002000,
- .smem_id = 497,
.smem_size = 0x00002000,
};
diff --git a/drivers/net/ipa/data/ipa_data-v3.5.1.c b/drivers/net/ipa/data/ipa_data-v3.5.1.c
index f632aab56f4c..315e617a8eeb 100644
--- a/drivers/net/ipa/data/ipa_data-v3.5.1.c
+++ b/drivers/net/ipa/data/ipa_data-v3.5.1.c
@@ -374,7 +374,6 @@ static const struct ipa_mem_data ipa_mem_data = {
.local = ipa_mem_local_data,
.imem_addr = 0x146bd000,
.imem_size = 0x00002000,
- .smem_id = 497,
.smem_size = 0x00002000,
};
diff --git a/drivers/net/ipa/data/ipa_data-v4.11.c b/drivers/net/ipa/data/ipa_data-v4.11.c
index c1428483ca34..f5d66779c2fb 100644
--- a/drivers/net/ipa/data/ipa_data-v4.11.c
+++ b/drivers/net/ipa/data/ipa_data-v4.11.c
@@ -367,7 +367,6 @@ static const struct ipa_mem_data ipa_mem_data = {
.local = ipa_mem_local_data,
.imem_addr = 0x146a8000,
.imem_size = 0x00002000,
- .smem_id = 497,
.smem_size = 0x00009000,
};
diff --git a/drivers/net/ipa/data/ipa_data-v4.2.c b/drivers/net/ipa/data/ipa_data-v4.2.c
index 2c7e8cb429b9..f5ed5d745aeb 100644
--- a/drivers/net/ipa/data/ipa_data-v4.2.c
+++ b/drivers/net/ipa/data/ipa_data-v4.2.c
@@ -340,7 +340,6 @@ static const struct ipa_mem_data ipa_mem_data = {
.local = ipa_mem_local_data,
.imem_addr = 0x146a8000,
.imem_size = 0x00002000,
- .smem_id = 497,
.smem_size = 0x00002000,
};
diff --git a/drivers/net/ipa/data/ipa_data-v4.5.c b/drivers/net/ipa/data/ipa_data-v4.5.c
index 57dc78c526b0..730d8c43a45c 100644
--- a/drivers/net/ipa/data/ipa_data-v4.5.c
+++ b/drivers/net/ipa/data/ipa_data-v4.5.c
@@ -418,7 +418,6 @@ static const struct ipa_mem_data ipa_mem_data = {
.local = ipa_mem_local_data,
.imem_addr = 0x14688000,
.imem_size = 0x00003000,
- .smem_id = 497,
.smem_size = 0x00009000,
};
diff --git a/drivers/net/ipa/data/ipa_data-v4.7.c b/drivers/net/ipa/data/ipa_data-v4.7.c
index c8c23d9be961..5e1d9049c62b 100644
--- a/drivers/net/ipa/data/ipa_data-v4.7.c
+++ b/drivers/net/ipa/data/ipa_data-v4.7.c
@@ -28,20 +28,18 @@ enum ipa_resource_type {
enum ipa_rsrc_group_id {
/* Source resource group identifiers */
IPA_RSRC_GROUP_SRC_UL_DL = 0,
- IPA_RSRC_GROUP_SRC_UC_RX_Q,
IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
/* Destination resource group identifiers */
- IPA_RSRC_GROUP_DST_UL_DL_DPL = 0,
- IPA_RSRC_GROUP_DST_UNUSED_1,
+ IPA_RSRC_GROUP_DST_UL_DL = 0,
IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
};
/* QSB configuration data for an SoC having IPA v4.7 */
static const struct ipa_qsb_data ipa_qsb_data[] = {
[IPA_QSB_MASTER_DDR] = {
- .max_writes = 8,
- .max_reads = 0, /* no limit (hardware max) */
+ .max_writes = 12,
+ .max_reads = 13,
.max_reads_beats = 120,
},
};
@@ -81,7 +79,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
},
.endpoint = {
.config = {
- .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL,
.aggregation = true,
.status_enable = true,
.rx = {
@@ -106,6 +104,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.filter_support = true,
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .checksum = true,
.qmap = true,
.status_enable = true,
.tx = {
@@ -128,7 +127,8 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
},
.endpoint = {
.config = {
- .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL,
+ .checksum = true,
.qmap = true,
.aggregation = true,
.rx = {
@@ -197,12 +197,12 @@ static const struct ipa_resource ipa_resource_src[] = {
/* Destination resource configuration data for an SoC having IPA v4.7 */
static const struct ipa_resource ipa_resource_dst[] = {
[IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
- .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL] = {
.min = 7, .max = 7,
},
},
[IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
- .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL] = {
.min = 2, .max = 2,
},
},
@@ -360,7 +360,6 @@ static const struct ipa_mem_data ipa_mem_data = {
.local = ipa_mem_local_data,
.imem_addr = 0x146a8000,
.imem_size = 0x00002000,
- .smem_id = 497,
.smem_size = 0x00009000,
};
diff --git a/drivers/net/ipa/data/ipa_data-v4.9.c b/drivers/net/ipa/data/ipa_data-v4.9.c
index 4eb9c909d5b3..da472a2a2e29 100644
--- a/drivers/net/ipa/data/ipa_data-v4.9.c
+++ b/drivers/net/ipa/data/ipa_data-v4.9.c
@@ -416,7 +416,6 @@ static const struct ipa_mem_data ipa_mem_data = {
.local = ipa_mem_local_data,
.imem_addr = 0x146bd000,
.imem_size = 0x00002000,
- .smem_id = 497,
.smem_size = 0x00009000,
};
diff --git a/drivers/net/ipa/data/ipa_data-v5.0.c b/drivers/net/ipa/data/ipa_data-v5.0.c
index 050580c99b65..bc5722e4b053 100644
--- a/drivers/net/ipa/data/ipa_data-v5.0.c
+++ b/drivers/net/ipa/data/ipa_data-v5.0.c
@@ -442,7 +442,6 @@ static const struct ipa_mem_data ipa_mem_data = {
.local = ipa_mem_local_data,
.imem_addr = 0x14688000,
.imem_size = 0x00003000,
- .smem_id = 497,
.smem_size = 0x00009000,
};
diff --git a/drivers/net/ipa/data/ipa_data-v5.5.c b/drivers/net/ipa/data/ipa_data-v5.5.c
index 0e6663e22533..741ae21d9d78 100644
--- a/drivers/net/ipa/data/ipa_data-v5.5.c
+++ b/drivers/net/ipa/data/ipa_data-v5.5.c
@@ -448,7 +448,6 @@ static const struct ipa_mem_data ipa_mem_data = {
.local = ipa_mem_local_data,
.imem_addr = 0x14688000,
.imem_size = 0x00002000,
- .smem_id = 497,
.smem_size = 0x0000b000,
};
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index d88cbbbf18b7..2fd03f0799b2 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -180,7 +180,6 @@ struct ipa_resource_data {
* @local: array of IPA-local memory region descriptors
* @imem_addr: physical address of IPA region within IMEM
* @imem_size: size in bytes of IPA IMEM region
- * @smem_id: item identifier for IPA region within SMEM memory
* @smem_size: size in bytes of the IPA SMEM region
*/
struct ipa_mem_data {
@@ -188,7 +187,6 @@ struct ipa_mem_data {
const struct ipa_mem *local;
u32 imem_addr;
u32 imem_size;
- u32 smem_id;
u32 smem_size;
};
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
index 245a06997055..8336596b1247 100644
--- a/drivers/net/ipa/ipa_interrupt.c
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -149,7 +149,6 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
iowrite32(pending, ipa->reg_virt + reg_offset(reg));
}
out_power_put:
- pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
return IRQ_HANDLED;
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index f25f6e2cf58c..95a61bae3124 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -9,7 +9,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/types.h>
@@ -586,7 +586,6 @@ static void ipa_deconfig(struct ipa *ipa)
static int ipa_firmware_load(struct device *dev)
{
const struct firmware *fw;
- struct device_node *node;
struct resource res;
phys_addr_t phys;
const char *path;
@@ -594,14 +593,7 @@ static int ipa_firmware_load(struct device *dev)
void *virt;
int ret;
- node = of_parse_phandle(dev->of_node, "memory-region", 0);
- if (!node) {
- dev_err(dev, "DT error getting \"memory-region\" property\n");
- return -EINVAL;
- }
-
- ret = of_address_to_resource(node, 0, &res);
- of_node_put(node);
+ ret = of_reserved_mem_region_to_resource(dev->of_node, 0, &res);
if (ret) {
dev_err(dev, "error %d getting \"memory-region\" resource\n",
ret);
@@ -911,7 +903,6 @@ static int ipa_probe(struct platform_device *pdev)
if (ret)
goto err_deconfig;
done:
- pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
return 0;
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index dee985eb08cb..835a3c9c1fd4 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -26,6 +26,8 @@
/* SMEM host id representing the modem. */
#define QCOM_SMEM_HOST_MODEM 1
+#define SMEM_IPA_FILTER_TABLE 497
+
const struct ipa_mem *ipa_mem_find(struct ipa *ipa, enum ipa_mem_id mem_id)
{
u32 i;
@@ -509,7 +511,6 @@ static void ipa_imem_exit(struct ipa *ipa)
/**
* ipa_smem_init() - Initialize SMEM memory used by the IPA
* @ipa: IPA pointer
- * @item: Item ID of SMEM memory
* @size: Size (bytes) of SMEM memory region
*
* SMEM is a managed block of shared DRAM, from which numbered "items"
@@ -523,7 +524,7 @@ static void ipa_imem_exit(struct ipa *ipa)
*
* Note: @size and the item address are is not guaranteed to be page-aligned.
*/
-static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
+static int ipa_smem_init(struct ipa *ipa, size_t size)
{
struct device *dev = ipa->dev;
struct iommu_domain *domain;
@@ -545,25 +546,25 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
* The item might have already been allocated, in which case we
* use it unless the size isn't what we expect.
*/
- ret = qcom_smem_alloc(QCOM_SMEM_HOST_MODEM, item, size);
+ ret = qcom_smem_alloc(QCOM_SMEM_HOST_MODEM, SMEM_IPA_FILTER_TABLE, size);
if (ret && ret != -EEXIST) {
- dev_err(dev, "error %d allocating size %zu SMEM item %u\n",
- ret, size, item);
+ dev_err(dev, "error %d allocating size %zu SMEM item\n",
+ ret, size);
return ret;
}
/* Now get the address of the SMEM memory region */
- virt = qcom_smem_get(QCOM_SMEM_HOST_MODEM, item, &actual);
+ virt = qcom_smem_get(QCOM_SMEM_HOST_MODEM, SMEM_IPA_FILTER_TABLE, &actual);
if (IS_ERR(virt)) {
ret = PTR_ERR(virt);
- dev_err(dev, "error %d getting SMEM item %u\n", ret, item);
+ dev_err(dev, "error %d getting SMEM item\n", ret);
return ret;
}
/* In case the region was already allocated, verify the size */
if (ret && actual != size) {
- dev_err(dev, "SMEM item %u has size %zu, expected %zu\n",
- item, actual, size);
+ dev_err(dev, "SMEM item has size %zu, expected %zu\n",
+ actual, size);
return -EINVAL;
}
@@ -659,7 +660,7 @@ int ipa_mem_init(struct ipa *ipa, struct platform_device *pdev,
if (ret)
goto err_unmap;
- ret = ipa_smem_init(ipa, mem_data->smem_id, mem_data->smem_size);
+ ret = ipa_smem_init(ipa, mem_data->smem_size);
if (ret)
goto err_imem_exit;
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
index 8fe0d0e1a00f..9b136f6b8b4a 100644
--- a/drivers/net/ipa/ipa_modem.c
+++ b/drivers/net/ipa/ipa_modem.c
@@ -71,7 +71,6 @@ static int ipa_open(struct net_device *netdev)
netif_start_queue(netdev);
- pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
return 0;
@@ -102,7 +101,6 @@ static int ipa_stop(struct net_device *netdev)
ipa_endpoint_disable_one(priv->rx);
ipa_endpoint_disable_one(priv->tx);
out_power_put:
- pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
return 0;
@@ -175,7 +173,6 @@ ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = ipa_endpoint_skb_tx(endpoint, skb);
- pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
if (ret) {
@@ -432,7 +429,6 @@ static void ipa_modem_crashed(struct ipa *ipa)
dev_err(dev, "error %d zeroing modem memory regions\n", ret);
out_power_put:
- pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
}
diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c
index fcaadd111a8a..420098796eec 100644
--- a/drivers/net/ipa/ipa_smp2p.c
+++ b/drivers/net/ipa/ipa_smp2p.c
@@ -171,7 +171,6 @@ static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id)
WARN(ret != 0, "error %d from ipa_setup()\n", ret);
out_power_put:
- pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
return IRQ_HANDLED;
@@ -213,7 +212,6 @@ static void ipa_smp2p_power_release(struct ipa *ipa)
if (!ipa->smp2p->power_on)
return;
- pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
ipa->smp2p->power_on = false;
}
diff --git a/drivers/net/ipa/ipa_sysfs.c b/drivers/net/ipa/ipa_sysfs.c
index a59bd215494c..a53e9e6f6cdf 100644
--- a/drivers/net/ipa/ipa_sysfs.c
+++ b/drivers/net/ipa/ipa_sysfs.c
@@ -37,8 +37,12 @@ static const char *ipa_version_string(struct ipa *ipa)
return "4.11";
case IPA_VERSION_5_0:
return "5.0";
+ case IPA_VERSION_5_1:
+ return "5.1";
+ case IPA_VERSION_5_5:
+ return "5.5";
default:
- return "0.0"; /* Won't happen (checked at probe time) */
+ return "0.0"; /* Should not happen */
}
}
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index 2963db83ab6b..dc7e92f2a4fb 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -158,7 +158,6 @@ static void ipa_uc_response_hdlr(struct ipa *ipa)
if (ipa->uc_powered) {
ipa->uc_loaded = true;
ipa_power_retention(ipa, true);
- pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
ipa->uc_powered = false;
} else {
@@ -203,7 +202,6 @@ void ipa_uc_deconfig(struct ipa *ipa)
if (!ipa->uc_powered)
return;
- pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
}
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 025e0c19ec25..50de3ee204db 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -166,8 +166,7 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type);
void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
unsigned int len, bool success, bool mcast);
-int ipvlan_link_new(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+int ipvlan_link_new(struct net_device *dev, struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack);
void ipvlan_link_delete(struct net_device *dev, struct list_head *head);
void ipvlan_link_setup(struct net_device *dev);
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index fd591ddb3884..dea411e132db 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -2,7 +2,7 @@
/* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
*/
-#include <net/inet_dscp.h>
+#include <net/flow.h>
#include <net/ip.h>
#include "ipvlan.h"
@@ -52,8 +52,8 @@ static u8 ipvlan_get_v4_hash(const void *iaddr)
{
const struct in_addr *ip4_addr = iaddr;
- return jhash_1word(ip4_addr->s_addr, ipvlan_jhash_secret) &
- IPVLAN_HASH_MASK;
+ return jhash_1word((__force u32)ip4_addr->s_addr, ipvlan_jhash_secret) &
+ IPVLAN_HASH_MASK;
}
static bool addr_equal(bool is_v6, struct ipvl_addr *addr, const void *iaddr)
@@ -219,7 +219,7 @@ void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type)
unsigned int ipvlan_mac_hash(const unsigned char *addr)
{
- u32 hash = jhash_1word(__get_unaligned_cpu32(addr+2),
+ u32 hash = jhash_1word(get_unaligned((u32 *)(addr + 2)),
ipvlan_jhash_secret);
return hash & IPVLAN_MAC_FILTER_MASK;
@@ -416,20 +416,25 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
{
- const struct iphdr *ip4h = ip_hdr(skb);
struct net_device *dev = skb->dev;
struct net *net = dev_net(dev);
- struct rtable *rt;
int err, ret = NET_XMIT_DROP;
+ const struct iphdr *ip4h;
+ struct rtable *rt;
struct flowi4 fl4 = {
.flowi4_oif = dev->ifindex,
- .flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h)),
.flowi4_flags = FLOWI_FLAG_ANYSRC,
.flowi4_mark = skb->mark,
- .daddr = ip4h->daddr,
- .saddr = ip4h->saddr,
};
+ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
+ goto err;
+
+ ip4h = ip_hdr(skb);
+ fl4.daddr = ip4h->daddr;
+ fl4.saddr = ip4h->saddr;
+ fl4.flowi4_dscp = ip4h_dscp(ip4h);
+
rt = ip_route_output_flow(net, &fl4, NULL);
if (IS_ERR(rt))
goto err;
@@ -488,6 +493,12 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
struct net_device *dev = skb->dev;
int err, ret = NET_XMIT_DROP;
+ if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) {
+ DEV_STATS_INC(dev, tx_errors);
+ kfree_skb(skb);
+ return ret;
+ }
+
err = ipvlan_route_v6_outbound(dev, skb);
if (unlikely(err)) {
DEV_STATS_INC(dev, tx_errors);
diff --git a/drivers/net/ipvlan/ipvlan_l3s.c b/drivers/net/ipvlan/ipvlan_l3s.c
index b4ef386bdb1b..7c017fe35522 100644
--- a/drivers/net/ipvlan/ipvlan_l3s.c
+++ b/drivers/net/ipvlan/ipvlan_l3s.c
@@ -226,5 +226,4 @@ void ipvlan_l3s_unregister(struct ipvl_port *port)
dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
- dev->l3mdev_ops = NULL;
}
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index ee2c3cf4df36..660f3db11766 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -3,6 +3,7 @@
*/
#include <linux/ethtool.h>
+#include <net/netdev_lock.h>
#include "ipvlan.h"
@@ -532,11 +533,13 @@ err:
return ret;
}
-int ipvlan_link_new(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+int ipvlan_link_new(struct net_device *dev, struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct ipvl_port *port;
struct net_device *phy_dev;
int err;
@@ -545,7 +548,7 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
if (!tb[IFLA_LINK])
return -EINVAL;
- phy_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+ phy_dev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK]));
if (!phy_dev)
return -ENODEV;
@@ -781,9 +784,9 @@ static int ipvlan_device_event(struct notifier_block *unused,
case NETDEV_PRE_CHANGEADDR:
prechaddr_info = ptr;
list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
- err = dev_pre_changeaddr_notify(ipvlan->dev,
- prechaddr_info->dev_addr,
- extack);
+ err = netif_pre_changeaddr_notify(ipvlan->dev,
+ prechaddr_info->dev_addr,
+ extack);
if (err)
return notifier_from_errno(err);
}
@@ -799,6 +802,12 @@ static int ipvlan_device_event(struct notifier_block *unused,
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid underlying device to change its type. */
return NOTIFY_BAD;
+
+ case NETDEV_NOTIFY_PEERS:
+ case NETDEV_BONDING_FAILOVER:
+ case NETDEV_RESEND_IGMP:
+ list_for_each_entry(ipvlan, &port->ipvlans, pnode)
+ call_netdevice_notifiers(event, ipvlan->dev);
}
return NOTIFY_DONE;
}
@@ -1085,3 +1094,4 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mahesh Bandewar <maheshb@google.com>");
MODULE_DESCRIPTION("Driver for L3 (IPv6/IPv4) based VLANs");
MODULE_ALIAS_RTNL_LINK("ipvlan");
+MODULE_IMPORT_NS("NETDEV_INTERNAL");
diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
index 1afc4c47be73..edd13916831a 100644
--- a/drivers/net/ipvlan/ipvtap.c
+++ b/drivers/net/ipvlan/ipvtap.c
@@ -73,8 +73,8 @@ static void ipvtap_update_features(struct tap_dev *tap,
netdev_update_features(vlan->dev);
}
-static int ipvtap_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int ipvtap_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
struct ipvtap_dev *vlantap = netdev_priv(dev);
@@ -97,7 +97,7 @@ static int ipvtap_newlink(struct net *src_net, struct net_device *dev,
/* Don't put anything that may fail after macvlan_common_newlink
* because we can't undo what it does.
*/
- err = ipvlan_link_new(src_net, dev, tb, data, extack);
+ err = ipvlan_link_new(dev, params, extack);
if (err) {
netdev_rx_handler_unregister(dev);
return err;
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 1993b90b1a5f..1fb6ce6843ad 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -54,6 +54,7 @@
#include <linux/percpu.h>
#include <linux/net_tstamp.h>
#include <net/net_namespace.h>
+#include <net/netdev_lock.h>
#include <linux/u64_stats_sync.h>
/* blackhole_netdev - a device used for dsts that are marked expired!
@@ -172,7 +173,7 @@ static void gen_lo_setup(struct net_device *dev,
dev->flags = IFF_LOOPBACK;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
dev->lltx = true;
- dev->netns_local = true;
+ dev->netns_immutable = true;
netif_keep_dst(dev);
dev->hw_features = NETIF_F_GSO_SOFTWARE;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
@@ -244,8 +245,22 @@ static netdev_tx_t blackhole_netdev_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+static int blackhole_neigh_output(struct neighbour *n, struct sk_buff *skb)
+{
+ kfree_skb(skb);
+ return 0;
+}
+
+static int blackhole_neigh_construct(struct net_device *dev,
+ struct neighbour *n)
+{
+ n->output = blackhole_neigh_output;
+ return 0;
+}
+
static const struct net_device_ops blackhole_netdev_ops = {
.ndo_start_xmit = blackhole_netdev_xmit,
+ .ndo_neigh_construct = blackhole_neigh_construct,
};
/* This is a dst-dummy device used specifically for invalidated
@@ -264,13 +279,12 @@ static int __init blackhole_netdev_init(void)
if (!blackhole_netdev)
return -ENOMEM;
- rtnl_lock();
+ rtnl_net_lock(&init_net);
dev_init_scheduler(blackhole_netdev);
dev_activate(blackhole_netdev);
- rtnl_unlock();
+ rtnl_net_unlock(&init_net);
blackhole_netdev->flags |= IFF_UP | IFF_RUNNING;
- dev_net_set(blackhole_netdev, &init_net);
return 0;
}
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index ee2159282573..5200fd5a10e5 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -19,6 +19,7 @@
#include <net/gro_cells.h>
#include <net/macsec.h>
#include <net/dst_metadata.h>
+#include <net/netdev_lock.h>
#include <linux/phy.h>
#include <linux/byteorder/generic.h>
#include <linux/if_arp.h>
@@ -246,15 +247,39 @@ static sci_t make_sci(const u8 *addr, __be16 port)
return sci;
}
-static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present)
+static sci_t macsec_active_sci(struct macsec_secy *secy)
{
- sci_t sci;
+ struct macsec_rx_sc *rx_sc = rcu_dereference_bh(secy->rx_sc);
+
+ /* Case single RX SC */
+ if (rx_sc && !rcu_dereference_bh(rx_sc->next))
+ return (rx_sc->active) ? rx_sc->sci : 0;
+ /* Case no RX SC or multiple */
+ else
+ return 0;
+}
+
+static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present,
+ struct macsec_rxh_data *rxd)
+{
+ struct macsec_dev *macsec;
+ sci_t sci = 0;
- if (sci_present)
+ /* SC = 1 */
+ if (sci_present) {
memcpy(&sci, hdr->secure_channel_id,
sizeof(hdr->secure_channel_id));
- else
+ /* SC = 0; ES = 0 */
+ } else if ((!(hdr->tci_an & (MACSEC_TCI_ES | MACSEC_TCI_SC))) &&
+ (list_is_singular(&rxd->secys))) {
+ /* Only one SECY should exist on this scenario */
+ macsec = list_first_or_null_rcu(&rxd->secys, struct macsec_dev,
+ secys);
+ if (macsec)
+ return macsec_active_sci(&macsec->secy);
+ } else {
sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES);
+ }
return sci;
}
@@ -1108,7 +1133,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
struct macsec_rxh_data *rxd;
struct macsec_dev *macsec;
unsigned int len;
- sci_t sci;
+ sci_t sci = 0;
u32 hdr_pn;
bool cbit;
struct pcpu_rx_sc_stats *rxsc_stats;
@@ -1155,11 +1180,14 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
- sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci);
rcu_read_lock();
rxd = macsec_data_rcu(skb->dev);
+ sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci, rxd);
+ if (!sci)
+ goto drop_nosc;
+
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
@@ -1282,6 +1310,7 @@ drop:
macsec_rxsa_put(rx_sa);
drop_nosa:
macsec_rxsc_put(rx_sc);
+drop_nosc:
rcu_read_unlock();
drop_direct:
kfree_skb(skb);
@@ -1554,9 +1583,6 @@ static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
if (IS_ERR(dev))
return ERR_CAST(dev);
- if (*assoc_num >= MACSEC_NUM_AN)
- return ERR_PTR(-EINVAL);
-
secy = &macsec_priv(dev)->secy;
tx_sc = &secy->tx_sc;
@@ -1617,8 +1643,6 @@ static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net,
return ERR_PTR(-EINVAL);
*assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
- if (*assoc_num >= MACSEC_NUM_AN)
- return ERR_PTR(-EINVAL);
rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp);
if (IS_ERR(rx_sc))
@@ -1641,24 +1665,21 @@ static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
[MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 },
- [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 },
+ [MACSEC_RXSC_ATTR_ACTIVE] = NLA_POLICY_MAX(NLA_U8, 1),
};
static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
- [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
- [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
- [MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4),
- [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
- .len = MACSEC_KEYID_LEN, },
- [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
- .len = MACSEC_MAX_KEY_LEN, },
+ [MACSEC_SA_ATTR_AN] = NLA_POLICY_MAX(NLA_U8, MACSEC_NUM_AN - 1),
+ [MACSEC_SA_ATTR_ACTIVE] = NLA_POLICY_MAX(NLA_U8, 1),
+ [MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN(NLA_UINT, 1),
+ [MACSEC_SA_ATTR_KEYID] = NLA_POLICY_EXACT_LEN(MACSEC_KEYID_LEN),
+ [MACSEC_SA_ATTR_KEY] = NLA_POLICY_MAX_LEN(MACSEC_MAX_KEY_LEN),
[MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 },
- [MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY,
- .len = MACSEC_SALT_LEN, },
+ [MACSEC_SA_ATTR_SALT] = NLA_POLICY_EXACT_LEN(MACSEC_SALT_LEN),
};
static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = {
- [MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 },
+ [MACSEC_OFFLOAD_ATTR_TYPE] = NLA_POLICY_MAX(NLA_U8, MACSEC_OFFLOAD_MAX),
};
/* Offloads an operation to a device driver */
@@ -1710,21 +1731,6 @@ static bool validate_add_rxsa(struct nlattr **attrs)
!attrs[MACSEC_SA_ATTR_KEYID])
return false;
- if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
- return false;
-
- if (attrs[MACSEC_SA_ATTR_PN] &&
- nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
- return false;
-
- if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
- if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
- return false;
- }
-
- if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
- return false;
-
return true;
}
@@ -1783,14 +1789,6 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
rtnl_unlock();
return -EINVAL;
}
-
- if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
- pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
- nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
- MACSEC_SALT_LEN);
- rtnl_unlock();
- return -EINVAL;
- }
}
rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
@@ -1815,7 +1813,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
if (tb_sa[MACSEC_SA_ATTR_PN]) {
spin_lock_bh(&rx_sa->lock);
- rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
+ rx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&rx_sa->lock);
}
@@ -1866,19 +1864,6 @@ cleanup:
return err;
}
-static bool validate_add_rxsc(struct nlattr **attrs)
-{
- if (!attrs[MACSEC_RXSC_ATTR_SCI])
- return false;
-
- if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) {
- if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1)
- return false;
- }
-
- return true;
-}
-
static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
{
struct net_device *dev;
@@ -1896,7 +1881,7 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
if (parse_rxsc_config(attrs, tb_rxsc))
return -EINVAL;
- if (!validate_add_rxsc(tb_rxsc))
+ if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
return -EINVAL;
rtnl_lock();
@@ -1955,20 +1940,6 @@ static bool validate_add_txsa(struct nlattr **attrs)
!attrs[MACSEC_SA_ATTR_KEYID])
return false;
- if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
- return false;
-
- if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
- return false;
-
- if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
- if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
- return false;
- }
-
- if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
- return false;
-
return true;
}
@@ -2026,14 +1997,6 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
rtnl_unlock();
return -EINVAL;
}
-
- if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
- pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
- nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
- MACSEC_SALT_LEN);
- rtnl_unlock();
- return -EINVAL;
- }
}
tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
@@ -2057,7 +2020,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
}
spin_lock_bh(&tx_sa->lock);
- tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
+ tx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&tx_sa->lock);
if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
@@ -2310,17 +2273,6 @@ static bool validate_upd_sa(struct nlattr **attrs)
attrs[MACSEC_SA_ATTR_SALT])
return false;
- if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
- return false;
-
- if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
- return false;
-
- if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
- if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
- return false;
- }
-
return true;
}
@@ -2369,7 +2321,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
spin_lock_bh(&tx_sa->lock);
prev_pn = tx_sa->next_pn_halves;
- tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
+ tx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&tx_sa->lock);
}
@@ -2467,7 +2419,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
spin_lock_bh(&rx_sa->lock);
prev_pn = rx_sa->next_pn_halves;
- rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
+ rx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&rx_sa->lock);
}
@@ -2527,7 +2479,7 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
if (parse_rxsc_config(attrs, tb_rxsc))
return -EINVAL;
- if (!validate_add_rxsc(tb_rxsc))
+ if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
return -EINVAL;
rtnl_lock();
@@ -2621,6 +2573,17 @@ static void macsec_set_head_tail_room(struct net_device *dev)
dev->needed_tailroom = real_dev->needed_tailroom + needed_tailroom;
}
+static void macsec_inherit_tso_max(struct net_device *dev)
+{
+ struct macsec_dev *macsec = macsec_priv(dev);
+
+ /* if macsec is offloaded, we need to follow the lower
+ * device's capabilities. otherwise, we can ignore them.
+ */
+ if (macsec_is_offloaded(macsec))
+ netif_inherit_tso_max(dev, macsec->real_dev);
+}
+
static int macsec_update_offload(struct net_device *dev, enum macsec_offload offload)
{
enum macsec_offload prev_offload;
@@ -2666,6 +2629,10 @@ static int macsec_update_offload(struct net_device *dev, enum macsec_offload off
macsec_set_head_tail_room(dev);
macsec->insert_tx_tag = macsec_needs_tx_tag(macsec, ops);
+ macsec_inherit_tso_max(dev);
+
+ netdev_update_features(dev);
+
return ret;
}
@@ -3521,6 +3488,10 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
#define MACSEC_FEATURES \
(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
+#define MACSEC_OFFLOAD_FEATURES \
+ (MACSEC_FEATURES | NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES | \
+ NETIF_F_LRO | NETIF_F_RXHASH | NETIF_F_CSUM_MASK | NETIF_F_RXCSUM)
+
static int macsec_dev_init(struct net_device *dev)
{
struct macsec_dev *macsec = macsec_priv(dev);
@@ -3531,7 +3502,12 @@ static int macsec_dev_init(struct net_device *dev)
if (err)
return err;
- dev->features = real_dev->features & MACSEC_FEATURES;
+ macsec_inherit_tso_max(dev);
+
+ dev->hw_features = real_dev->hw_features & MACSEC_OFFLOAD_FEATURES;
+ dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+
+ dev->features = real_dev->features & MACSEC_OFFLOAD_FEATURES;
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->lltx = true;
dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
@@ -3561,8 +3537,12 @@ static netdev_features_t macsec_fix_features(struct net_device *dev,
{
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
+ netdev_features_t mask;
+
+ mask = macsec_is_offloaded(macsec) ? MACSEC_OFFLOAD_FEATURES
+ : MACSEC_FEATURES;
- features &= (real_dev->features & MACSEC_FEATURES) |
+ features &= (real_dev->features & mask) |
NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
return features;
@@ -3777,21 +3757,23 @@ static const struct device_type macsec_type = {
.name = "macsec",
};
+static int validate_cipher_suite(const struct nlattr *attr,
+ struct netlink_ext_ack *extack);
static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
[IFLA_MACSEC_SCI] = { .type = NLA_U64 },
[IFLA_MACSEC_PORT] = { .type = NLA_U16 },
- [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
- [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
+ [IFLA_MACSEC_ICV_LEN] = NLA_POLICY_RANGE(NLA_U8, MACSEC_MIN_ICV_LEN, MACSEC_STD_ICV_LEN),
+ [IFLA_MACSEC_CIPHER_SUITE] = NLA_POLICY_VALIDATE_FN(NLA_U64, validate_cipher_suite),
[IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
- [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 },
- [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 },
- [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 },
- [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 },
- [IFLA_MACSEC_ES] = { .type = NLA_U8 },
- [IFLA_MACSEC_SCB] = { .type = NLA_U8 },
- [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
- [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
- [IFLA_MACSEC_OFFLOAD] = { .type = NLA_U8 },
+ [IFLA_MACSEC_ENCODING_SA] = NLA_POLICY_MAX(NLA_U8, MACSEC_NUM_AN - 1),
+ [IFLA_MACSEC_ENCRYPT] = NLA_POLICY_MAX(NLA_U8, 1),
+ [IFLA_MACSEC_PROTECT] = NLA_POLICY_MAX(NLA_U8, 1),
+ [IFLA_MACSEC_INC_SCI] = NLA_POLICY_MAX(NLA_U8, 1),
+ [IFLA_MACSEC_ES] = NLA_POLICY_MAX(NLA_U8, 1),
+ [IFLA_MACSEC_SCB] = NLA_POLICY_MAX(NLA_U8, 1),
+ [IFLA_MACSEC_REPLAY_PROTECT] = NLA_POLICY_MAX(NLA_U8, 1),
+ [IFLA_MACSEC_VALIDATION] = NLA_POLICY_MAX(NLA_U8, MACSEC_VALIDATE_MAX),
+ [IFLA_MACSEC_OFFLOAD] = NLA_POLICY_MAX(NLA_U8, MACSEC_OFFLOAD_MAX),
};
static void macsec_free_netdev(struct net_device *dev)
@@ -3811,7 +3793,7 @@ static void macsec_setup(struct net_device *dev)
ether_setup(dev);
dev->min_mtu = 0;
dev->max_mtu = ETH_MAX_MTU;
- dev->priv_flags |= IFF_NO_QUEUE;
+ dev->priv_flags |= IFF_NO_QUEUE | IFF_UNICAST_FLT;
dev->netdev_ops = &macsec_netdev_ops;
dev->needs_free_netdev = true;
dev->priv_destructor = macsec_free_netdev;
@@ -4113,11 +4095,14 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
static struct lock_class_key macsec_netdev_addr_lock_key;
-static int macsec_newlink(struct net *net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int macsec_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
struct macsec_dev *macsec = macsec_priv(dev);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
rx_handler_func_t *rx_handler;
u8 icv_len = MACSEC_DEFAULT_ICV_LEN;
struct net_device *real_dev;
@@ -4126,7 +4111,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
if (!tb[IFLA_LINK])
return -EINVAL;
- real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
+ real_dev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK]));
if (!real_dev)
return -ENODEV;
if (real_dev->type != ARPHRD_ETHER)
@@ -4226,6 +4211,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
if (err < 0)
goto del_dev;
+ netdev_update_features(dev);
netif_stacked_transfer_operstate(real_dev, dev);
linkwatch_fire_event(dev);
@@ -4242,20 +4228,30 @@ unregister:
return err;
}
+static int validate_cipher_suite(const struct nlattr *attr,
+ struct netlink_ext_ack *extack)
+{
+ switch (nla_get_u64(attr)) {
+ case MACSEC_CIPHER_ID_GCM_AES_128:
+ case MACSEC_CIPHER_ID_GCM_AES_256:
+ case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
+ case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
+ case MACSEC_DEFAULT_CIPHER_ID:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
- u64 csid = MACSEC_DEFAULT_CIPHER_ID;
u8 icv_len = MACSEC_DEFAULT_ICV_LEN;
- int flag;
bool es, scb, sci;
if (!data)
return 0;
- if (data[IFLA_MACSEC_CIPHER_SUITE])
- csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]);
-
if (data[IFLA_MACSEC_ICV_LEN]) {
icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
if (icv_len != MACSEC_DEFAULT_ICV_LEN) {
@@ -4271,45 +4267,13 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
}
}
- switch (csid) {
- case MACSEC_CIPHER_ID_GCM_AES_128:
- case MACSEC_CIPHER_ID_GCM_AES_256:
- case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
- case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
- case MACSEC_DEFAULT_CIPHER_ID:
- if (icv_len < MACSEC_MIN_ICV_LEN ||
- icv_len > MACSEC_STD_ICV_LEN)
- return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
-
- if (data[IFLA_MACSEC_ENCODING_SA]) {
- if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN)
- return -EINVAL;
- }
-
- for (flag = IFLA_MACSEC_ENCODING_SA + 1;
- flag < IFLA_MACSEC_VALIDATION;
- flag++) {
- if (data[flag]) {
- if (nla_get_u8(data[flag]) > 1)
- return -EINVAL;
- }
- }
-
- es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false;
- sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false;
- scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false;
+ es = nla_get_u8_default(data[IFLA_MACSEC_ES], false);
+ sci = nla_get_u8_default(data[IFLA_MACSEC_INC_SCI], false);
+ scb = nla_get_u8_default(data[IFLA_MACSEC_SCB], false);
if ((sci && (scb || es)) || (scb && es))
return -EINVAL;
- if (data[IFLA_MACSEC_VALIDATION] &&
- nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
- return -EINVAL;
-
if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
!data[IFLA_MACSEC_WINDOW])
@@ -4428,31 +4392,26 @@ static int macsec_notify(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *real_dev = netdev_notifier_info_to_dev(ptr);
+ struct macsec_rxh_data *rxd;
+ struct macsec_dev *m, *n;
LIST_HEAD(head);
if (!is_macsec_master(real_dev))
return NOTIFY_DONE;
+ rxd = macsec_data_rtnl(real_dev);
+
switch (event) {
case NETDEV_DOWN:
case NETDEV_UP:
- case NETDEV_CHANGE: {
- struct macsec_dev *m, *n;
- struct macsec_rxh_data *rxd;
-
- rxd = macsec_data_rtnl(real_dev);
+ case NETDEV_CHANGE:
list_for_each_entry_safe(m, n, &rxd->secys, secys) {
struct net_device *dev = m->secy.netdev;
netif_stacked_transfer_operstate(real_dev, dev);
}
break;
- }
- case NETDEV_UNREGISTER: {
- struct macsec_dev *m, *n;
- struct macsec_rxh_data *rxd;
-
- rxd = macsec_data_rtnl(real_dev);
+ case NETDEV_UNREGISTER:
list_for_each_entry_safe(m, n, &rxd->secys, secys) {
macsec_common_dellink(m->secy.netdev, &head);
}
@@ -4462,12 +4421,7 @@ static int macsec_notify(struct notifier_block *this, unsigned long event,
unregister_netdevice_many(&head);
break;
- }
- case NETDEV_CHANGEMTU: {
- struct macsec_dev *m;
- struct macsec_rxh_data *rxd;
-
- rxd = macsec_data_rtnl(real_dev);
+ case NETDEV_CHANGEMTU:
list_for_each_entry(m, &rxd->secys, secys) {
struct net_device *dev = m->secy.netdev;
unsigned int mtu = real_dev->mtu - (m->secy.icv_len +
@@ -4476,7 +4430,13 @@ static int macsec_notify(struct notifier_block *this, unsigned long event,
if (dev->mtu > mtu)
dev_set_mtu(dev, mtu);
}
- }
+ break;
+ case NETDEV_FEAT_CHANGE:
+ list_for_each_entry(m, &rxd->secys, secys) {
+ macsec_inherit_tso_max(m->secy.netdev);
+ netdev_update_features(m->secy.netdev);
+ }
+ break;
}
return NOTIFY_OK;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index edbd5afcec41..7966545512cf 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -28,6 +28,7 @@
#include <linux/if_macvlan.h>
#include <linux/hash.h>
#include <linux/workqueue.h>
+#include <net/netdev_lock.h>
#include <net/rtnetlink.h>
#include <net/xfrm.h>
#include <linux/netpoll.h>
@@ -253,7 +254,7 @@ static u32 macvlan_hash_mix(const struct macvlan_dev *vlan)
static unsigned int mc_hash(const struct macvlan_dev *vlan,
const unsigned char *addr)
{
- u32 val = __get_unaligned_cpu32(addr + 2);
+ u32 val = get_unaligned((u32 *)(addr + 2));
val ^= macvlan_hash_mix(vlan);
return hash_32(val, MACVLAN_MC_FILTER_BITS);
@@ -368,7 +369,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
}
spin_unlock(&port->bc_queue.lock);
- queue_work(system_unbound_wq, &port->bc_work);
+ queue_work(system_dfl_wq, &port->bc_work);
if (err)
goto free_nskb;
@@ -753,13 +754,13 @@ static int macvlan_sync_address(struct net_device *dev,
static int macvlan_set_mac_address(struct net_device *dev, void *p)
{
struct macvlan_dev *vlan = netdev_priv(dev);
- struct sockaddr *addr = p;
+ struct sockaddr_storage *addr = p;
- if (!is_valid_ether_addr(addr->sa_data))
+ if (!is_valid_ether_addr(addr->__data))
return -EADDRNOTAVAIL;
/* If the addresses are the same, this is a no-op */
- if (ether_addr_equal(dev->dev_addr, addr->sa_data))
+ if (ether_addr_equal(dev->dev_addr, addr->__data))
return 0;
if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
@@ -767,10 +768,10 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p)
return dev_set_mac_address(vlan->lowerdev, addr, NULL);
}
- if (macvlan_addr_busy(vlan->port, addr->sa_data))
+ if (macvlan_addr_busy(vlan->port, addr->__data))
return -EADDRINUSE;
- return macvlan_sync_address(dev, addr->sa_data);
+ return macvlan_sync_address(dev, addr->__data);
}
static void macvlan_change_rx_flags(struct net_device *dev, int change)
@@ -1024,7 +1025,7 @@ static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
- u16 flags,
+ u16 flags, bool *notified,
struct netlink_ext_ack *extack)
{
struct macvlan_dev *vlan = netdev_priv(dev);
@@ -1049,7 +1050,7 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr, u16 vid,
+ const unsigned char *addr, u16 vid, bool *notified,
struct netlink_ext_ack *extack)
{
struct macvlan_dev *vlan = netdev_priv(dev);
@@ -1294,11 +1295,11 @@ static void macvlan_port_destroy(struct net_device *dev)
*/
if (macvlan_passthru(port) &&
!ether_addr_equal(port->dev->dev_addr, port->perm_addr)) {
- struct sockaddr sa;
+ struct sockaddr_storage ss;
- sa.sa_family = port->dev->type;
- memcpy(&sa.sa_data, port->perm_addr, port->dev->addr_len);
- dev_set_mac_address(port->dev, &sa, NULL);
+ ss.ss_family = port->dev->type;
+ memcpy(&ss.__data, port->perm_addr, port->dev->addr_len);
+ dev_set_mac_address(port->dev, &ss, NULL);
}
kfree(port);
@@ -1440,21 +1441,24 @@ static int macvlan_changelink_sources(struct macvlan_dev *vlan, u32 mode,
return 0;
}
-int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+int macvlan_common_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
struct macvlan_dev *vlan = netdev_priv(dev);
- struct macvlan_port *port;
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct net_device *lowerdev;
- int err;
- int macmode;
+ struct macvlan_port *port;
bool create = false;
+ int macmode;
+ int err;
if (!tb[IFLA_LINK])
return -EINVAL;
- lowerdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+ lowerdev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK]));
if (lowerdev == NULL)
return -ENODEV;
@@ -1565,11 +1569,11 @@ destroy_macvlan_port:
}
EXPORT_SYMBOL_GPL(macvlan_common_newlink);
-static int macvlan_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int macvlan_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
- return macvlan_common_newlink(src_net, dev, tb, data, extack);
+ return macvlan_common_newlink(dev, params, extack);
}
void macvlan_dellink(struct net_device *dev, struct list_head *head)
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 29a5929d48e5..b391a0f740a3 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -77,8 +77,8 @@ static void macvtap_update_features(struct tap_dev *tap,
netdev_update_features(vlan->dev);
}
-static int macvtap_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int macvtap_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
struct macvtap_dev *vlantap = netdev_priv(dev);
@@ -105,7 +105,7 @@ static int macvtap_newlink(struct net *src_net, struct net_device *dev,
/* Don't put anything that may fail after macvlan_common_newlink
* because we can't undo what it does.
*/
- err = macvlan_common_newlink(src_net, dev, tb, data, extack);
+ err = macvlan_common_newlink(dev, params, extack);
if (err) {
netdev_rx_handler_unregister(dev);
return err;
diff --git a/drivers/net/mctp/Kconfig b/drivers/net/mctp/Kconfig
index 15860d6ac39f..cf325ab0b1ef 100644
--- a/drivers/net/mctp/Kconfig
+++ b/drivers/net/mctp/Kconfig
@@ -47,6 +47,16 @@ config MCTP_TRANSPORT_I3C
A MCTP protocol network device is created for each I3C bus
having a "mctp-controller" devicetree property.
+config MCTP_TRANSPORT_USB
+ tristate "MCTP USB transport"
+ depends on USB
+ help
+ Provides a driver to access MCTP devices over USB transport,
+ defined by DMTF specification DSP0283.
+
+ MCTP-over-USB interfaces are peer-to-peer, so each interface
+ represents a physical connection to one remote MCTP endpoint.
+
endmenu
endif
diff --git a/drivers/net/mctp/Makefile b/drivers/net/mctp/Makefile
index e1cb99ced54a..c36006849a1e 100644
--- a/drivers/net/mctp/Makefile
+++ b/drivers/net/mctp/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_MCTP_SERIAL) += mctp-serial.o
obj-$(CONFIG_MCTP_TRANSPORT_I2C) += mctp-i2c.o
obj-$(CONFIG_MCTP_TRANSPORT_I3C) += mctp-i3c.o
+obj-$(CONFIG_MCTP_TRANSPORT_USB) += mctp-usb.o
diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c
index e70fb6687994..f782d93f826e 100644
--- a/drivers/net/mctp/mctp-i2c.c
+++ b/drivers/net/mctp/mctp-i2c.c
@@ -177,8 +177,7 @@ static struct mctp_i2c_client *mctp_i2c_new_client(struct i2c_client *client)
return mcli;
err:
if (mcli) {
- if (mcli->client)
- i2c_unregister_device(mcli->client);
+ i2c_unregister_device(mcli->client);
kfree(mcli);
}
return ERR_PTR(rc);
@@ -538,7 +537,7 @@ static void mctp_i2c_xmit(struct mctp_i2c_dev *midev, struct sk_buff *skb)
rc = __i2c_transfer(midev->adapter, &msg, 1);
/* on tx errors, the flow can no longer be considered valid */
- if (rc)
+ if (rc < 0)
mctp_i2c_invalidate_tx_flow(midev, skb);
break;
@@ -584,6 +583,7 @@ static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev,
struct mctp_i2c_hdr *hdr;
struct mctp_hdr *mhdr;
u8 lldst, llsrc;
+ int rc;
if (len > MCTP_I2C_MAXMTU)
return -EMSGSIZE;
@@ -594,6 +594,10 @@ static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev,
lldst = *((u8 *)daddr);
llsrc = *((u8 *)saddr);
+ rc = skb_cow_head(skb, sizeof(struct mctp_i2c_hdr));
+ if (rc)
+ return rc;
+
skb_push(skb, sizeof(struct mctp_i2c_hdr));
skb_reset_mac_header(skb);
hdr = (void *)skb_mac_header(skb);
@@ -880,7 +884,8 @@ static int mctp_i2c_add_netdev(struct mctp_i2c_client *mcli,
goto err;
}
- rc = mctp_register_netdev(ndev, &mctp_i2c_mctp_ops);
+ rc = mctp_register_netdev(ndev, &mctp_i2c_mctp_ops,
+ MCTP_PHYS_BINDING_SMBUS);
if (rc < 0) {
dev_err(&mcli->client->dev,
"register netdev \"%s\" failed %d\n",
diff --git a/drivers/net/mctp/mctp-i3c.c b/drivers/net/mctp/mctp-i3c.c
index 1bc87a062686..36c2405677c2 100644
--- a/drivers/net/mctp/mctp-i3c.c
+++ b/drivers/net/mctp/mctp-i3c.c
@@ -99,7 +99,7 @@ struct mctp_i3c_internal_hdr {
static int mctp_i3c_read(struct mctp_i3c_device *mi)
{
- struct i3c_priv_xfer xfer = { .rnw = 1, .len = mi->mrl };
+ struct i3c_xfer xfer = { .rnw = 1, .len = mi->mrl };
struct net_device_stats *stats = &mi->mbus->ndev->stats;
struct mctp_i3c_internal_hdr *ihdr = NULL;
struct sk_buff *skb = NULL;
@@ -125,7 +125,9 @@ static int mctp_i3c_read(struct mctp_i3c_device *mi)
xfer.data.in = skb_put(skb, mi->mrl);
- rc = i3c_device_do_priv_xfers(mi->i3c, &xfer, 1);
+ /* Make sure netif_rx() is read in the same order as i3c. */
+ mutex_lock(&mi->lock);
+ rc = i3c_device_do_xfers(mi->i3c, &xfer, 1, I3C_SDR);
if (rc < 0)
goto err;
@@ -166,8 +168,10 @@ static int mctp_i3c_read(struct mctp_i3c_device *mi)
stats->rx_dropped++;
}
+ mutex_unlock(&mi->lock);
return 0;
err:
+ mutex_unlock(&mi->lock);
kfree_skb(skb);
return rc;
}
@@ -356,7 +360,7 @@ mctp_i3c_lookup(struct mctp_i3c_bus *mbus, u64 pid)
static void mctp_i3c_xmit(struct mctp_i3c_bus *mbus, struct sk_buff *skb)
{
struct net_device_stats *stats = &mbus->ndev->stats;
- struct i3c_priv_xfer xfer = { .rnw = false };
+ struct i3c_xfer xfer = { .rnw = false };
struct mctp_i3c_internal_hdr *ihdr = NULL;
struct mctp_i3c_device *mi = NULL;
unsigned int data_len;
@@ -405,7 +409,7 @@ static void mctp_i3c_xmit(struct mctp_i3c_bus *mbus, struct sk_buff *skb)
data[data_len] = pec;
xfer.data.out = data;
- rc = i3c_device_do_priv_xfers(mi->i3c, &xfer, 1);
+ rc = i3c_device_do_xfers(mi->i3c, &xfer, 1, I3C_SDR);
if (rc == 0) {
stats->tx_bytes += data_len;
stats->tx_packets++;
@@ -502,6 +506,14 @@ static int mctp_i3c_header_create(struct sk_buff *skb, struct net_device *dev,
const void *saddr, unsigned int len)
{
struct mctp_i3c_internal_hdr *ihdr;
+ int rc;
+
+ if (!daddr || !saddr)
+ return -EINVAL;
+
+ rc = skb_cow_head(skb, sizeof(struct mctp_i3c_internal_hdr));
+ if (rc)
+ return rc;
skb_push(skb, sizeof(struct mctp_i3c_internal_hdr));
skb_reset_mac_header(skb);
@@ -607,7 +619,7 @@ __must_hold(&busdevs_lock)
goto err_free_uninit;
}
- rc = mctp_register_netdev(ndev, NULL);
+ rc = mctp_register_netdev(ndev, NULL, MCTP_PHYS_BINDING_I3C);
if (rc < 0) {
dev_warn(&ndev->dev, "netdev register failed: %d\n", rc);
goto err_free_netdev;
diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c
index e63720ec3238..26c9a33fd636 100644
--- a/drivers/net/mctp/mctp-serial.c
+++ b/drivers/net/mctp/mctp-serial.c
@@ -23,6 +23,7 @@
#include <linux/mctp.h>
#include <net/mctp.h>
+#include <net/mctpdevice.h>
#include <net/pkt_sched.h>
#define MCTP_SERIAL_MTU 68 /* base mtu (64) + mctp header */
@@ -470,7 +471,7 @@ static int mctp_serial_open(struct tty_struct *tty)
spin_lock_init(&dev->lock);
INIT_WORK(&dev->tx_work, mctp_serial_tx_work);
- rc = register_netdev(ndev);
+ rc = mctp_register_netdev(ndev, NULL, MCTP_PHYS_BINDING_SERIAL);
if (rc)
goto free_netdev;
@@ -492,7 +493,7 @@ static void mctp_serial_close(struct tty_struct *tty)
struct mctp_serial *dev = tty->disc_data;
int idx = dev->idx;
- unregister_netdev(dev->netdev);
+ mctp_unregister_netdev(dev->netdev);
ida_free(&mctp_serial_ida, idx);
}
diff --git a/drivers/net/mctp/mctp-usb.c b/drivers/net/mctp/mctp-usb.c
new file mode 100644
index 000000000000..ef860cfc629f
--- /dev/null
+++ b/drivers/net/mctp/mctp-usb.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * mctp-usb.c - MCTP-over-USB (DMTF DSP0283) transport binding driver.
+ *
+ * DSP0283 is available at:
+ * https://www.dmtf.org/sites/default/files/standards/documents/DSP0283_1.0.1.pdf
+ *
+ * Copyright (C) 2024-2025 Code Construct Pty Ltd
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/usb.h>
+#include <linux/usb/mctp-usb.h>
+
+#include <net/mctp.h>
+#include <net/mctpdevice.h>
+#include <net/pkt_sched.h>
+
+#include <uapi/linux/if_arp.h>
+
+struct mctp_usb {
+ struct usb_device *usbdev;
+ struct usb_interface *intf;
+ bool stopped;
+
+ struct net_device *netdev;
+
+ u8 ep_in;
+ u8 ep_out;
+
+ struct urb *tx_urb;
+ struct urb *rx_urb;
+
+ struct delayed_work rx_retry_work;
+};
+
+static void mctp_usb_out_complete(struct urb *urb)
+{
+ struct sk_buff *skb = urb->context;
+ struct net_device *netdev = skb->dev;
+ int status;
+
+ status = urb->status;
+
+ switch (status) {
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ case -EPROTO:
+ dev_dstats_tx_dropped(netdev);
+ break;
+ case 0:
+ dev_dstats_tx_add(netdev, skb->len);
+ netif_wake_queue(netdev);
+ consume_skb(skb);
+ return;
+ default:
+ netdev_dbg(netdev, "unexpected tx urb status: %d\n", status);
+ dev_dstats_tx_dropped(netdev);
+ }
+
+ kfree_skb(skb);
+}
+
+static netdev_tx_t mctp_usb_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct mctp_usb *mctp_usb = netdev_priv(dev);
+ struct mctp_usb_hdr *hdr;
+ unsigned int plen;
+ struct urb *urb;
+ int rc;
+
+ plen = skb->len;
+
+ if (plen + sizeof(*hdr) > MCTP_USB_XFER_SIZE)
+ goto err_drop;
+
+ rc = skb_cow_head(skb, sizeof(*hdr));
+ if (rc)
+ goto err_drop;
+
+ hdr = skb_push(skb, sizeof(*hdr));
+ if (!hdr)
+ goto err_drop;
+
+ hdr->id = cpu_to_be16(MCTP_USB_DMTF_ID);
+ hdr->rsvd = 0;
+ hdr->len = plen + sizeof(*hdr);
+
+ urb = mctp_usb->tx_urb;
+
+ usb_fill_bulk_urb(urb, mctp_usb->usbdev,
+ usb_sndbulkpipe(mctp_usb->usbdev, mctp_usb->ep_out),
+ skb->data, skb->len,
+ mctp_usb_out_complete, skb);
+
+ /* Stops TX queue first to prevent race condition with URB complete */
+ netif_stop_queue(dev);
+ rc = usb_submit_urb(urb, GFP_ATOMIC);
+ if (rc) {
+ netif_wake_queue(dev);
+ goto err_drop;
+ }
+
+ return NETDEV_TX_OK;
+
+err_drop:
+ dev_dstats_tx_dropped(dev);
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static void mctp_usb_in_complete(struct urb *urb);
+
+/* If we fail to queue an in urb atomically (either due to skb allocation or
+ * urb submission), we will schedule a rx queue in nonatomic context
+ * after a delay, specified in jiffies
+ */
+static const unsigned long RX_RETRY_DELAY = HZ / 4;
+
+static int mctp_usb_rx_queue(struct mctp_usb *mctp_usb, gfp_t gfp)
+{
+ struct sk_buff *skb;
+ int rc;
+
+ skb = __netdev_alloc_skb(mctp_usb->netdev, MCTP_USB_XFER_SIZE, gfp);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto err_retry;
+ }
+
+ usb_fill_bulk_urb(mctp_usb->rx_urb, mctp_usb->usbdev,
+ usb_rcvbulkpipe(mctp_usb->usbdev, mctp_usb->ep_in),
+ skb->data, MCTP_USB_XFER_SIZE,
+ mctp_usb_in_complete, skb);
+
+ rc = usb_submit_urb(mctp_usb->rx_urb, gfp);
+ if (rc) {
+ netdev_dbg(mctp_usb->netdev, "rx urb submit failure: %d\n", rc);
+ kfree_skb(skb);
+ if (rc == -ENOMEM)
+ goto err_retry;
+ }
+
+ return rc;
+
+err_retry:
+ schedule_delayed_work(&mctp_usb->rx_retry_work, RX_RETRY_DELAY);
+ return rc;
+}
+
+static void mctp_usb_in_complete(struct urb *urb)
+{
+ struct sk_buff *skb = urb->context;
+ struct net_device *netdev = skb->dev;
+ struct mctp_usb *mctp_usb = netdev_priv(netdev);
+ struct mctp_skb_cb *cb;
+ unsigned int len;
+ int status;
+
+ status = urb->status;
+
+ switch (status) {
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ case -EPROTO:
+ kfree_skb(skb);
+ return;
+ case 0:
+ break;
+ default:
+ netdev_dbg(netdev, "unexpected rx urb status: %d\n", status);
+ kfree_skb(skb);
+ return;
+ }
+
+ len = urb->actual_length;
+ __skb_put(skb, len);
+
+ while (skb) {
+ struct sk_buff *skb2 = NULL;
+ struct mctp_usb_hdr *hdr;
+ u8 pkt_len; /* length of MCTP packet, no USB header */
+
+ skb_reset_mac_header(skb);
+ hdr = skb_pull_data(skb, sizeof(*hdr));
+ if (!hdr)
+ break;
+
+ if (be16_to_cpu(hdr->id) != MCTP_USB_DMTF_ID) {
+ netdev_dbg(netdev, "rx: invalid id %04x\n",
+ be16_to_cpu(hdr->id));
+ break;
+ }
+
+ if (hdr->len <
+ sizeof(struct mctp_hdr) + sizeof(struct mctp_usb_hdr)) {
+ netdev_dbg(netdev, "rx: short packet (hdr) %d\n",
+ hdr->len);
+ break;
+ }
+
+ /* we know we have at least sizeof(struct mctp_usb_hdr) here */
+ pkt_len = hdr->len - sizeof(struct mctp_usb_hdr);
+ if (pkt_len > skb->len) {
+ netdev_dbg(netdev,
+ "rx: short packet (xfer) %d, actual %d\n",
+ hdr->len, skb->len);
+ break;
+ }
+
+ if (pkt_len < skb->len) {
+ /* more packets may follow - clone to a new
+ * skb to use on the next iteration
+ */
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (skb2) {
+ if (!skb_pull(skb2, pkt_len)) {
+ kfree_skb(skb2);
+ skb2 = NULL;
+ }
+ }
+ skb_trim(skb, pkt_len);
+ }
+
+ dev_dstats_rx_add(netdev, skb->len);
+
+ skb->protocol = htons(ETH_P_MCTP);
+ skb_reset_network_header(skb);
+ cb = __mctp_cb(skb);
+ cb->halen = 0;
+ netif_rx(skb);
+
+ skb = skb2;
+ }
+
+ if (skb)
+ kfree_skb(skb);
+
+ mctp_usb_rx_queue(mctp_usb, GFP_ATOMIC);
+}
+
+static void mctp_usb_rx_retry_work(struct work_struct *work)
+{
+ struct mctp_usb *mctp_usb = container_of(work, struct mctp_usb,
+ rx_retry_work.work);
+
+ if (READ_ONCE(mctp_usb->stopped))
+ return;
+
+ mctp_usb_rx_queue(mctp_usb, GFP_KERNEL);
+}
+
+static int mctp_usb_open(struct net_device *dev)
+{
+ struct mctp_usb *mctp_usb = netdev_priv(dev);
+
+ WRITE_ONCE(mctp_usb->stopped, false);
+
+ netif_start_queue(dev);
+
+ return mctp_usb_rx_queue(mctp_usb, GFP_KERNEL);
+}
+
+static int mctp_usb_stop(struct net_device *dev)
+{
+ struct mctp_usb *mctp_usb = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ /* prevent RX submission retry */
+ WRITE_ONCE(mctp_usb->stopped, true);
+
+ usb_kill_urb(mctp_usb->rx_urb);
+ usb_kill_urb(mctp_usb->tx_urb);
+
+ cancel_delayed_work_sync(&mctp_usb->rx_retry_work);
+
+ return 0;
+}
+
+static const struct net_device_ops mctp_usb_netdev_ops = {
+ .ndo_start_xmit = mctp_usb_start_xmit,
+ .ndo_open = mctp_usb_open,
+ .ndo_stop = mctp_usb_stop,
+};
+
+static void mctp_usb_netdev_setup(struct net_device *dev)
+{
+ dev->type = ARPHRD_MCTP;
+
+ dev->mtu = MCTP_USB_MTU_MIN;
+ dev->min_mtu = MCTP_USB_MTU_MIN;
+ dev->max_mtu = MCTP_USB_MTU_MAX;
+
+ dev->hard_header_len = sizeof(struct mctp_usb_hdr);
+ dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
+ dev->flags = IFF_NOARP;
+ dev->netdev_ops = &mctp_usb_netdev_ops;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
+}
+
+static int mctp_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_endpoint_descriptor *ep_in, *ep_out;
+ struct usb_host_interface *iface_desc;
+ struct net_device *netdev;
+ struct mctp_usb *dev;
+ int rc;
+
+ /* only one alternate */
+ iface_desc = intf->cur_altsetting;
+
+ rc = usb_find_common_endpoints(iface_desc, &ep_in, &ep_out, NULL, NULL);
+ if (rc) {
+ dev_err(&intf->dev, "invalid endpoints on device?\n");
+ return rc;
+ }
+
+ netdev = alloc_netdev(sizeof(*dev), "mctpusb%d", NET_NAME_ENUM,
+ mctp_usb_netdev_setup);
+ if (!netdev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(netdev, &intf->dev);
+ dev = netdev_priv(netdev);
+ dev->netdev = netdev;
+ dev->usbdev = usb_get_dev(interface_to_usbdev(intf));
+ dev->intf = intf;
+ usb_set_intfdata(intf, dev);
+
+ dev->ep_in = ep_in->bEndpointAddress;
+ dev->ep_out = ep_out->bEndpointAddress;
+
+ dev->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ dev->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!dev->tx_urb || !dev->rx_urb) {
+ rc = -ENOMEM;
+ goto err_free_urbs;
+ }
+
+ INIT_DELAYED_WORK(&dev->rx_retry_work, mctp_usb_rx_retry_work);
+
+ rc = mctp_register_netdev(netdev, NULL, MCTP_PHYS_BINDING_USB);
+ if (rc)
+ goto err_free_urbs;
+
+ return 0;
+
+err_free_urbs:
+ usb_free_urb(dev->tx_urb);
+ usb_free_urb(dev->rx_urb);
+ free_netdev(netdev);
+ return rc;
+}
+
+static void mctp_usb_disconnect(struct usb_interface *intf)
+{
+ struct mctp_usb *dev = usb_get_intfdata(intf);
+
+ mctp_unregister_netdev(dev->netdev);
+ usb_free_urb(dev->tx_urb);
+ usb_free_urb(dev->rx_urb);
+ usb_put_dev(dev->usbdev);
+ free_netdev(dev->netdev);
+}
+
+static const struct usb_device_id mctp_usb_devices[] = {
+ { USB_INTERFACE_INFO(USB_CLASS_MCTP, 0x0, 0x1) },
+ { 0 },
+};
+
+MODULE_DEVICE_TABLE(usb, mctp_usb_devices);
+
+static struct usb_driver mctp_usb_driver = {
+ .name = "mctp-usb",
+ .id_table = mctp_usb_devices,
+ .probe = mctp_usb_probe,
+ .disconnect = mctp_usb_disconnect,
+};
+
+module_usb_driver(mctp_usb_driver)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jeremy Kerr <jk@codeconstruct.com.au>");
+MODULE_DESCRIPTION("MCTP USB transport");
diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c
index e08c90ac0c6e..f67a4d4005e7 100644
--- a/drivers/net/mdio.c
+++ b/drivers/net/mdio.c
@@ -167,178 +167,6 @@ static u32 mdio45_get_an(const struct mdio_if_info *mdio, u16 addr)
}
/**
- * mdio45_ethtool_gset_npage - get settings for ETHTOOL_GSET
- * @mdio: MDIO interface
- * @ecmd: Ethtool request structure
- * @npage_adv: Modes currently advertised on next pages
- * @npage_lpa: Modes advertised by link partner on next pages
- *
- * The @ecmd parameter is expected to have been cleared before calling
- * mdio45_ethtool_gset_npage().
- *
- * Since the CSRs for auto-negotiation using next pages are not fully
- * standardised, this function does not attempt to decode them. The
- * caller must pass them in.
- */
-void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
- struct ethtool_cmd *ecmd,
- u32 npage_adv, u32 npage_lpa)
-{
- int reg;
- u32 speed;
-
- BUILD_BUG_ON(MDIO_SUPPORTS_C22 != ETH_MDIO_SUPPORTS_C22);
- BUILD_BUG_ON(MDIO_SUPPORTS_C45 != ETH_MDIO_SUPPORTS_C45);
-
- ecmd->transceiver = XCVR_INTERNAL;
- ecmd->phy_address = mdio->prtad;
- ecmd->mdio_support =
- mdio->mode_support & (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22);
-
- reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
- MDIO_CTRL2);
- switch (reg & MDIO_PMA_CTRL2_TYPE) {
- case MDIO_PMA_CTRL2_10GBT:
- case MDIO_PMA_CTRL2_1000BT:
- case MDIO_PMA_CTRL2_100BTX:
- case MDIO_PMA_CTRL2_10BT:
- ecmd->port = PORT_TP;
- ecmd->supported = SUPPORTED_TP;
- reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
- MDIO_SPEED);
- if (reg & MDIO_SPEED_10G)
- ecmd->supported |= SUPPORTED_10000baseT_Full;
- if (reg & MDIO_PMA_SPEED_1000)
- ecmd->supported |= (SUPPORTED_1000baseT_Full |
- SUPPORTED_1000baseT_Half);
- if (reg & MDIO_PMA_SPEED_100)
- ecmd->supported |= (SUPPORTED_100baseT_Full |
- SUPPORTED_100baseT_Half);
- if (reg & MDIO_PMA_SPEED_10)
- ecmd->supported |= (SUPPORTED_10baseT_Full |
- SUPPORTED_10baseT_Half);
- ecmd->advertising = ADVERTISED_TP;
- break;
-
- case MDIO_PMA_CTRL2_10GBCX4:
- ecmd->port = PORT_OTHER;
- ecmd->supported = 0;
- ecmd->advertising = 0;
- break;
-
- case MDIO_PMA_CTRL2_10GBKX4:
- case MDIO_PMA_CTRL2_10GBKR:
- case MDIO_PMA_CTRL2_1000BKX:
- ecmd->port = PORT_OTHER;
- ecmd->supported = SUPPORTED_Backplane;
- reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
- MDIO_PMA_EXTABLE);
- if (reg & MDIO_PMA_EXTABLE_10GBKX4)
- ecmd->supported |= SUPPORTED_10000baseKX4_Full;
- if (reg & MDIO_PMA_EXTABLE_10GBKR)
- ecmd->supported |= SUPPORTED_10000baseKR_Full;
- if (reg & MDIO_PMA_EXTABLE_1000BKX)
- ecmd->supported |= SUPPORTED_1000baseKX_Full;
- reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
- MDIO_PMA_10GBR_FECABLE);
- if (reg & MDIO_PMA_10GBR_FECABLE_ABLE)
- ecmd->supported |= SUPPORTED_10000baseR_FEC;
- ecmd->advertising = ADVERTISED_Backplane;
- break;
-
- /* All the other defined modes are flavours of optical */
- default:
- ecmd->port = PORT_FIBRE;
- ecmd->supported = SUPPORTED_FIBRE;
- ecmd->advertising = ADVERTISED_FIBRE;
- break;
- }
-
- if (mdio->mmds & MDIO_DEVS_AN) {
- ecmd->supported |= SUPPORTED_Autoneg;
- reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_AN,
- MDIO_CTRL1);
- if (reg & MDIO_AN_CTRL1_ENABLE) {
- ecmd->autoneg = AUTONEG_ENABLE;
- ecmd->advertising |=
- ADVERTISED_Autoneg |
- mdio45_get_an(mdio, MDIO_AN_ADVERTISE) |
- npage_adv;
- } else {
- ecmd->autoneg = AUTONEG_DISABLE;
- }
- } else {
- ecmd->autoneg = AUTONEG_DISABLE;
- }
-
- if (ecmd->autoneg) {
- u32 modes = 0;
- int an_stat = mdio->mdio_read(mdio->dev, mdio->prtad,
- MDIO_MMD_AN, MDIO_STAT1);
-
- /* If AN is complete and successful, report best common
- * mode, otherwise report best advertised mode. */
- if (an_stat & MDIO_AN_STAT1_COMPLETE) {
- ecmd->lp_advertising =
- mdio45_get_an(mdio, MDIO_AN_LPA) | npage_lpa;
- if (an_stat & MDIO_AN_STAT1_LPABLE)
- ecmd->lp_advertising |= ADVERTISED_Autoneg;
- modes = ecmd->advertising & ecmd->lp_advertising;
- }
- if ((modes & ~ADVERTISED_Autoneg) == 0)
- modes = ecmd->advertising;
-
- if (modes & (ADVERTISED_10000baseT_Full |
- ADVERTISED_10000baseKX4_Full |
- ADVERTISED_10000baseKR_Full)) {
- speed = SPEED_10000;
- ecmd->duplex = DUPLEX_FULL;
- } else if (modes & (ADVERTISED_1000baseT_Full |
- ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseKX_Full)) {
- speed = SPEED_1000;
- ecmd->duplex = !(modes & ADVERTISED_1000baseT_Half);
- } else if (modes & (ADVERTISED_100baseT_Full |
- ADVERTISED_100baseT_Half)) {
- speed = SPEED_100;
- ecmd->duplex = !!(modes & ADVERTISED_100baseT_Full);
- } else {
- speed = SPEED_10;
- ecmd->duplex = !!(modes & ADVERTISED_10baseT_Full);
- }
- } else {
- /* Report forced settings */
- reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
- MDIO_CTRL1);
- speed = (((reg & MDIO_PMA_CTRL1_SPEED1000) ? 100 : 1)
- * ((reg & MDIO_PMA_CTRL1_SPEED100) ? 100 : 10));
- ecmd->duplex = (reg & MDIO_CTRL1_FULLDPLX ||
- speed == SPEED_10000);
- }
-
- ethtool_cmd_speed_set(ecmd, speed);
-
- /* 10GBASE-T MDI/MDI-X */
- if (ecmd->port == PORT_TP
- && (ethtool_cmd_speed(ecmd) == SPEED_10000)) {
- switch (mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
- MDIO_PMA_10GBT_SWAPPOL)) {
- case MDIO_PMA_10GBT_SWAPPOL_ABNX | MDIO_PMA_10GBT_SWAPPOL_CDNX:
- ecmd->eth_tp_mdix = ETH_TP_MDI;
- break;
- case 0:
- ecmd->eth_tp_mdix = ETH_TP_MDI_X;
- break;
- default:
- /* It's complicated... */
- ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
- break;
- }
- }
-}
-EXPORT_SYMBOL(mdio45_ethtool_gset_npage);
-
-/**
* mdio45_ethtool_ksettings_get_npage - get settings for ETHTOOL_GLINKSETTINGS
* @mdio: MDIO interface
* @cmd: Ethtool request structure
diff --git a/drivers/net/mdio/Kconfig b/drivers/net/mdio/Kconfig
index 4a7a303be2f7..44380378911b 100644
--- a/drivers/net/mdio/Kconfig
+++ b/drivers/net/mdio/Kconfig
@@ -3,48 +3,31 @@
# MDIO Layer Configuration
#
-menuconfig MDIO_DEVICE
- tristate "MDIO bus device drivers"
- help
- MDIO devices and driver infrastructure code.
-
-if MDIO_DEVICE
-
-config MDIO_BUS
- tristate
- default m if PHYLIB=m
- default MDIO_DEVICE
- help
- This internal symbol is used for link time dependencies and it
- reflects whether the mdio_bus/mdio_device code is built as a
- loadable module or built-in.
+if PHYLIB
config FWNODE_MDIO
- def_tristate PHYLIB
- depends on (ACPI || OF) || COMPILE_TEST
+ def_tristate (ACPI || OF) || COMPILE_TEST
select FIXED_PHY
help
FWNODE MDIO bus (Ethernet PHY) accessors
config OF_MDIO
- def_tristate PHYLIB
- depends on OF
- depends on PHYLIB
+ def_tristate OF
select FIXED_PHY
help
OpenFirmware MDIO bus (Ethernet PHY) accessors
config ACPI_MDIO
- def_tristate PHYLIB
- depends on ACPI
- depends on PHYLIB
+ def_tristate ACPI
help
ACPI MDIO bus (Ethernet PHY) accessors
-if MDIO_BUS
-
-config MDIO_DEVRES
- tristate
+config MDIO_AIROHA
+ tristate "Airoha AN7583 MDIO bus controller"
+ depends on ARCH_AIROHA || COMPILE_TEST
+ help
+ This module provides a driver for the MDIO busses found in the
+ Airoha AN7583 SoC's.
config MDIO_SUN4I
tristate "Allwinner sun4i MDIO interface support"
@@ -65,7 +48,6 @@ config MDIO_ASPEED
tristate "ASPEED MDIO bus controller"
depends on ARCH_ASPEED || COMPILE_TEST
depends on OF_MDIO && HAS_IOMEM
- depends on MDIO_DEVRES
help
This module provides a driver for the independent MDIO bus
controllers found in the ASPEED AST2600 SoC. This is a driver for the
@@ -135,7 +117,6 @@ config MDIO_I2C
config MDIO_MVUSB
tristate "Marvell USB to MDIO Adapter"
depends on USB
- select MDIO_DEVRES
help
A USB to MDIO converter present on development boards for
Marvell's Link Street family of Ethernet switches.
@@ -143,7 +124,6 @@ config MDIO_MVUSB
config MDIO_MSCC_MIIM
tristate "Microsemi MIIM interface support"
depends on HAS_IOMEM && REGMAP_MMIO
- select MDIO_DEVRES
help
This driver supports the MIIM (MDIO) interface found in the network
switches of the Microsemi SoCs; it is recommended to switch on
@@ -161,7 +141,6 @@ config MDIO_OCTEON
depends on (64BIT && OF_MDIO) || COMPILE_TEST
depends on HAS_IOMEM
select MDIO_CAVIUM
- select MDIO_DEVRES
help
This module provides a driver for the Octeon and ThunderX MDIO
buses. It is required by the Octeon and ThunderX ethernet device
@@ -171,7 +150,6 @@ config MDIO_IPQ4019
tristate "Qualcomm IPQ4019 MDIO interface support"
depends on HAS_IOMEM && OF_MDIO
depends on COMMON_CLK
- depends on MDIO_DEVRES
help
This driver supports the MDIO interface found in Qualcomm
IPQ40xx, IPQ60xx, IPQ807x and IPQ50xx series Soc-s.
@@ -180,11 +158,17 @@ config MDIO_IPQ8064
tristate "Qualcomm IPQ8064 MDIO interface support"
depends on HAS_IOMEM && OF_MDIO
depends on MFD_SYSCON
- depends on MDIO_DEVRES
help
This driver supports the MDIO interface found in the network
interface units of the IPQ8064 SoC
+config MDIO_REALTEK_RTL9300
+ tristate "Realtek RTL9300 MDIO interface support"
+ depends on MACH_REALTEK_RTL || COMPILE_TEST
+ help
+ This driver supports the MDIO interface found in the Realtek
+ RTL9300 family of Ethernet switches with integrated SoC.
+
config MDIO_REGMAP
tristate
help
@@ -201,7 +185,6 @@ config MDIO_THUNDER
depends on 64BIT
depends on PCI
select MDIO_CAVIUM
- select MDIO_DEVRES
help
This driver supports the MDIO interfaces found on Cavium
ThunderX SoCs when the MDIO bus device appears as a PCI
@@ -299,4 +282,3 @@ config MDIO_BUS_MUX_MMIOREG
endif
-endif
diff --git a/drivers/net/mdio/Makefile b/drivers/net/mdio/Makefile
index 1015f0db4531..fbec636700e7 100644
--- a/drivers/net/mdio/Makefile
+++ b/drivers/net/mdio/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_ACPI_MDIO) += acpi_mdio.o
obj-$(CONFIG_FWNODE_MDIO) += fwnode_mdio.o
obj-$(CONFIG_OF_MDIO) += of_mdio.o
+obj-$(CONFIG_MDIO_AIROHA) += mdio-airoha.o
obj-$(CONFIG_MDIO_ASPEED) += mdio-aspeed.o
obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o
obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o
@@ -19,6 +20,7 @@ obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
obj-$(CONFIG_MDIO_MSCC_MIIM) += mdio-mscc-miim.o
obj-$(CONFIG_MDIO_MVUSB) += mdio-mvusb.o
obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
+obj-$(CONFIG_MDIO_REALTEK_RTL9300) += mdio-realtek-rtl9300.o
obj-$(CONFIG_MDIO_REGMAP) += mdio-regmap.o
obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o
diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c
index b156493d7084..ba7091518265 100644
--- a/drivers/net/mdio/fwnode_mdio.c
+++ b/drivers/net/mdio/fwnode_mdio.c
@@ -18,7 +18,8 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("FWNODE MDIO bus (Ethernet PHY) accessors");
static struct pse_control *
-fwnode_find_pse_control(struct fwnode_handle *fwnode)
+fwnode_find_pse_control(struct fwnode_handle *fwnode,
+ struct phy_device *phydev)
{
struct pse_control *psec;
struct device_node *np;
@@ -30,7 +31,7 @@ fwnode_find_pse_control(struct fwnode_handle *fwnode)
if (!np)
return NULL;
- psec = of_pse_control_get(np);
+ psec = of_pse_control_get(np, phydev);
if (PTR_ERR(psec) == -ENOENT)
return NULL;
@@ -40,6 +41,7 @@ fwnode_find_pse_control(struct fwnode_handle *fwnode)
static struct mii_timestamper *
fwnode_find_mii_timestamper(struct fwnode_handle *fwnode)
{
+ struct mii_timestamper *mii_ts;
struct of_phandle_args arg;
int err;
@@ -53,10 +55,16 @@ fwnode_find_mii_timestamper(struct fwnode_handle *fwnode)
else if (err)
return ERR_PTR(err);
- if (arg.args_count != 1)
- return ERR_PTR(-EINVAL);
+ if (arg.args_count != 1) {
+ mii_ts = ERR_PTR(-EINVAL);
+ goto put_node;
+ }
+
+ mii_ts = register_mii_timestamper(arg.np, arg.args[0]);
- return register_mii_timestamper(arg.np, arg.args[0]);
+put_node:
+ of_node_put(arg.np);
+ return mii_ts;
}
int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
@@ -84,11 +92,6 @@ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
if (fwnode_property_read_bool(child, "broken-turn-around"))
mdio->phy_ignore_ta_mask |= 1 << addr;
- fwnode_property_read_u32(child, "reset-assert-us",
- &phy->mdio.reset_assert_delay);
- fwnode_property_read_u32(child, "reset-deassert-us",
- &phy->mdio.reset_deassert_delay);
-
/* Associate the fwnode with the device structure so it
* can be looked up later
*/
@@ -121,15 +124,9 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus,
u32 phy_id;
int rc;
- psec = fwnode_find_pse_control(child);
- if (IS_ERR(psec))
- return PTR_ERR(psec);
-
mii_ts = fwnode_find_mii_timestamper(child);
- if (IS_ERR(mii_ts)) {
- rc = PTR_ERR(mii_ts);
- goto clean_pse;
- }
+ if (IS_ERR(mii_ts))
+ return PTR_ERR(mii_ts);
is_c45 = fwnode_device_is_compatible(child, "ethernet-phy-ieee802.3-c45");
if (is_c45 || fwnode_get_phy_id(child, &phy_id))
@@ -162,6 +159,12 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus,
goto clean_phy;
}
+ psec = fwnode_find_pse_control(child, phy);
+ if (IS_ERR(psec)) {
+ rc = PTR_ERR(psec);
+ goto unregister_phy;
+ }
+
phy->psec = psec;
/* phy->mii_ts may already be defined by the PHY driver. A
@@ -173,12 +176,13 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus,
return 0;
+unregister_phy:
+ if (is_acpi_node(child) || is_of_node(child))
+ phy_device_remove(phy);
clean_phy:
phy_device_free(phy);
clean_mii_ts:
unregister_mii_timestamper(mii_ts);
-clean_pse:
- pse_control_put(psec);
return rc;
}
diff --git a/drivers/net/mdio/mdio-airoha.c b/drivers/net/mdio/mdio-airoha.c
new file mode 100644
index 000000000000..52e7475121ea
--- /dev/null
+++ b/drivers/net/mdio/mdio-airoha.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Airoha AN7583 MDIO interface driver
+ *
+ * Copyright (C) 2025 Christian Marangi <ansuelsmth@gmail.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+/* MII address register definitions */
+#define AN7583_MII_BUSY BIT(31)
+#define AN7583_MII_RDY BIT(30) /* RO signal BUS is ready */
+#define AN7583_MII_CL22_REG_ADDR GENMASK(29, 25)
+#define AN7583_MII_CL45_DEV_ADDR AN7583_MII_CL22_REG_ADDR
+#define AN7583_MII_PHY_ADDR GENMASK(24, 20)
+#define AN7583_MII_CMD GENMASK(19, 18)
+#define AN7583_MII_CMD_CL22_WRITE FIELD_PREP_CONST(AN7583_MII_CMD, 0x1)
+#define AN7583_MII_CMD_CL22_READ FIELD_PREP_CONST(AN7583_MII_CMD, 0x2)
+#define AN7583_MII_CMD_CL45_ADDR FIELD_PREP_CONST(AN7583_MII_CMD, 0x0)
+#define AN7583_MII_CMD_CL45_WRITE FIELD_PREP_CONST(AN7583_MII_CMD, 0x1)
+#define AN7583_MII_CMD_CL45_POSTREAD_INCADDR FIELD_PREP_CONST(AN7583_MII_CMD, 0x2)
+#define AN7583_MII_CMD_CL45_READ FIELD_PREP_CONST(AN7583_MII_CMD, 0x3)
+#define AN7583_MII_ST GENMASK(17, 16)
+#define AN7583_MII_ST_CL45 FIELD_PREP_CONST(AN7583_MII_ST, 0x0)
+#define AN7583_MII_ST_CL22 FIELD_PREP_CONST(AN7583_MII_ST, 0x1)
+#define AN7583_MII_RWDATA GENMASK(15, 0)
+#define AN7583_MII_CL45_REG_ADDR AN7583_MII_RWDATA
+
+#define AN7583_MII_MDIO_DELAY_USEC 100
+#define AN7583_MII_MDIO_RETRY_MSEC 100
+
+struct airoha_mdio_data {
+ u32 base_addr;
+ struct regmap *regmap;
+ struct clk *clk;
+ struct reset_control *reset;
+};
+
+static int airoha_mdio_wait_busy(struct airoha_mdio_data *priv)
+{
+ u32 busy;
+
+ return regmap_read_poll_timeout(priv->regmap, priv->base_addr, busy,
+ !(busy & AN7583_MII_BUSY),
+ AN7583_MII_MDIO_DELAY_USEC,
+ AN7583_MII_MDIO_RETRY_MSEC * USEC_PER_MSEC);
+}
+
+static void airoha_mdio_reset(struct airoha_mdio_data *priv)
+{
+ /* There seems to be Hardware bug where AN7583_MII_RWDATA
+ * is not wiped in the context of unconnected PHY and the
+ * previous read value is returned.
+ *
+ * Example: (only one PHY on the BUS at 0x1f)
+ * - read at 0x1f report at 0x2 0x7500
+ * - read at 0x0 report 0x7500 on every address
+ *
+ * To workaround this, we reset the Mdio BUS at every read
+ * to have consistent values on read operation.
+ */
+ reset_control_assert(priv->reset);
+ reset_control_deassert(priv->reset);
+}
+
+static int airoha_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct airoha_mdio_data *priv = bus->priv;
+ u32 val;
+ int ret;
+
+ airoha_mdio_reset(priv);
+
+ val = AN7583_MII_BUSY | AN7583_MII_ST_CL22 |
+ AN7583_MII_CMD_CL22_READ;
+ val |= FIELD_PREP(AN7583_MII_PHY_ADDR, addr);
+ val |= FIELD_PREP(AN7583_MII_CL22_REG_ADDR, regnum);
+
+ ret = regmap_write(priv->regmap, priv->base_addr, val);
+ if (ret)
+ return ret;
+
+ ret = airoha_mdio_wait_busy(priv);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(priv->regmap, priv->base_addr, &val);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(AN7583_MII_RWDATA, val);
+}
+
+static int airoha_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 value)
+{
+ struct airoha_mdio_data *priv = bus->priv;
+ u32 val;
+ int ret;
+
+ val = AN7583_MII_BUSY | AN7583_MII_ST_CL22 |
+ AN7583_MII_CMD_CL22_WRITE;
+ val |= FIELD_PREP(AN7583_MII_PHY_ADDR, addr);
+ val |= FIELD_PREP(AN7583_MII_CL22_REG_ADDR, regnum);
+ val |= FIELD_PREP(AN7583_MII_RWDATA, value);
+
+ ret = regmap_write(priv->regmap, priv->base_addr, val);
+ if (ret)
+ return ret;
+
+ ret = airoha_mdio_wait_busy(priv);
+
+ return ret;
+}
+
+static int airoha_mdio_cl45_read(struct mii_bus *bus, int addr, int devnum,
+ int regnum)
+{
+ struct airoha_mdio_data *priv = bus->priv;
+ u32 val;
+ int ret;
+
+ airoha_mdio_reset(priv);
+
+ val = AN7583_MII_BUSY | AN7583_MII_ST_CL45 |
+ AN7583_MII_CMD_CL45_ADDR;
+ val |= FIELD_PREP(AN7583_MII_PHY_ADDR, addr);
+ val |= FIELD_PREP(AN7583_MII_CL45_DEV_ADDR, devnum);
+ val |= FIELD_PREP(AN7583_MII_CL45_REG_ADDR, regnum);
+
+ ret = regmap_write(priv->regmap, priv->base_addr, val);
+ if (ret)
+ return ret;
+
+ ret = airoha_mdio_wait_busy(priv);
+ if (ret)
+ return ret;
+
+ val = AN7583_MII_BUSY | AN7583_MII_ST_CL45 |
+ AN7583_MII_CMD_CL45_READ;
+ val |= FIELD_PREP(AN7583_MII_PHY_ADDR, addr);
+ val |= FIELD_PREP(AN7583_MII_CL45_DEV_ADDR, devnum);
+
+ ret = regmap_write(priv->regmap, priv->base_addr, val);
+ if (ret)
+ return ret;
+
+ ret = airoha_mdio_wait_busy(priv);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(priv->regmap, priv->base_addr, &val);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(AN7583_MII_RWDATA, val);
+}
+
+static int airoha_mdio_cl45_write(struct mii_bus *bus, int addr, int devnum,
+ int regnum, u16 value)
+{
+ struct airoha_mdio_data *priv = bus->priv;
+ u32 val;
+ int ret;
+
+ val = AN7583_MII_BUSY | AN7583_MII_ST_CL45 |
+ AN7583_MII_CMD_CL45_ADDR;
+ val |= FIELD_PREP(AN7583_MII_PHY_ADDR, addr);
+ val |= FIELD_PREP(AN7583_MII_CL45_DEV_ADDR, devnum);
+ val |= FIELD_PREP(AN7583_MII_CL45_REG_ADDR, regnum);
+
+ ret = regmap_write(priv->regmap, priv->base_addr, val);
+ if (ret)
+ return ret;
+
+ ret = airoha_mdio_wait_busy(priv);
+ if (ret)
+ return ret;
+
+ val = AN7583_MII_BUSY | AN7583_MII_ST_CL45 |
+ AN7583_MII_CMD_CL45_WRITE;
+ val |= FIELD_PREP(AN7583_MII_PHY_ADDR, addr);
+ val |= FIELD_PREP(AN7583_MII_CL45_DEV_ADDR, devnum);
+ val |= FIELD_PREP(AN7583_MII_RWDATA, value);
+
+ ret = regmap_write(priv->regmap, priv->base_addr, val);
+ if (ret)
+ return ret;
+
+ ret = airoha_mdio_wait_busy(priv);
+
+ return ret;
+}
+
+static int airoha_mdio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct airoha_mdio_data *priv;
+ struct mii_bus *bus;
+ u32 addr, freq;
+ int ret;
+
+ ret = of_property_read_u32(dev->of_node, "reg", &addr);
+ if (ret)
+ return ret;
+
+ bus = devm_mdiobus_alloc_size(dev, sizeof(*priv));
+ if (!bus)
+ return -ENOMEM;
+
+ priv = bus->priv;
+ priv->base_addr = addr;
+ priv->regmap = device_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ priv->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(priv->reset))
+ return PTR_ERR(priv->reset);
+
+ reset_control_deassert(priv->reset);
+
+ bus->name = "airoha_mdio_bus";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(dev));
+ bus->parent = dev;
+ bus->read = airoha_mdio_read;
+ bus->write = airoha_mdio_write;
+ bus->read_c45 = airoha_mdio_cl45_read;
+ bus->write_c45 = airoha_mdio_cl45_write;
+
+ /* Check if a custom frequency is defined in DT or default to 2.5 MHz */
+ if (of_property_read_u32(dev->of_node, "clock-frequency", &freq))
+ freq = 2500000;
+
+ ret = clk_set_rate(priv->clk, freq);
+ if (ret)
+ return ret;
+
+ ret = devm_of_mdiobus_register(dev, bus, dev->of_node);
+ if (ret) {
+ reset_control_assert(priv->reset);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id airoha_mdio_dt_ids[] = {
+ { .compatible = "airoha,an7583-mdio" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, airoha_mdio_dt_ids);
+
+static struct platform_driver airoha_mdio_driver = {
+ .probe = airoha_mdio_probe,
+ .driver = {
+ .name = "airoha-mdio",
+ .of_match_table = airoha_mdio_dt_ids,
+ },
+};
+
+module_platform_driver(airoha_mdio_driver);
+
+MODULE_DESCRIPTION("Airoha AN7583 MDIO interface driver");
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c
index 074d96328f41..37e35f282d9a 100644
--- a/drivers/net/mdio/mdio-bcm-unimac.c
+++ b/drivers/net/mdio/mdio-bcm-unimac.c
@@ -209,14 +209,15 @@ static int unimac_mdio_clk_set(struct unimac_mdio_priv *priv)
if (ret)
return ret;
- if (!priv->clk)
+ rate = clk_get_rate(priv->clk);
+ if (!rate)
rate = 250000000;
- else
- rate = clk_get_rate(priv->clk);
div = (rate / (2 * priv->clk_freq)) - 1;
if (div & ~MDIO_CLK_DIV_MASK) {
- pr_warn("Incorrect MDIO clock frequency, ignoring\n");
+ dev_warn(priv->mii_bus->parent,
+ "Ignoring MDIO clock frequency request: %d vs. rate: %ld\n",
+ priv->clk_freq, rate);
ret = 0;
goto out;
}
@@ -334,9 +335,9 @@ static SIMPLE_DEV_PM_OPS(unimac_mdio_pm_ops,
NULL, unimac_mdio_resume);
static const struct of_device_id unimac_mdio_ids[] = {
+ { .compatible = "brcm,asp-v3.0-mdio", },
{ .compatible = "brcm,asp-v2.2-mdio", },
{ .compatible = "brcm,asp-v2.1-mdio", },
- { .compatible = "brcm,asp-v2.0-mdio", },
{ .compatible = "brcm,bcm6846-mdio", },
{ .compatible = "brcm,genet-mdio-v5", },
{ .compatible = "brcm,genet-mdio-v4", },
diff --git a/drivers/net/mdio/mdio-i2c.c b/drivers/net/mdio/mdio-i2c.c
index da2001ea1f99..ed20352a589a 100644
--- a/drivers/net/mdio/mdio-i2c.c
+++ b/drivers/net/mdio/mdio-i2c.c
@@ -106,6 +106,73 @@ static int i2c_mii_write_default_c22(struct mii_bus *bus, int phy_id, int reg,
return i2c_mii_write_default_c45(bus, phy_id, -1, reg, val);
}
+static int smbus_byte_mii_read_default_c22(struct mii_bus *bus, int phy_id,
+ int reg)
+{
+ struct i2c_adapter *i2c = bus->priv;
+ union i2c_smbus_data smbus_data;
+ int val = 0, ret;
+
+ if (!i2c_mii_valid_phy_id(phy_id))
+ return 0;
+
+ i2c_lock_bus(i2c, I2C_LOCK_SEGMENT);
+
+ ret = __i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0,
+ I2C_SMBUS_READ, reg,
+ I2C_SMBUS_BYTE_DATA, &smbus_data);
+ if (ret < 0)
+ goto unlock;
+
+ val = (smbus_data.byte & 0xff) << 8;
+
+ ret = __i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0,
+ I2C_SMBUS_READ, reg,
+ I2C_SMBUS_BYTE_DATA, &smbus_data);
+
+unlock:
+ i2c_unlock_bus(i2c, I2C_LOCK_SEGMENT);
+
+ if (ret < 0)
+ return ret;
+
+ val |= smbus_data.byte & 0xff;
+
+ return val;
+}
+
+static int smbus_byte_mii_write_default_c22(struct mii_bus *bus, int phy_id,
+ int reg, u16 val)
+{
+ struct i2c_adapter *i2c = bus->priv;
+ union i2c_smbus_data smbus_data;
+ int ret;
+
+ if (!i2c_mii_valid_phy_id(phy_id))
+ return 0;
+
+ smbus_data.byte = (val & 0xff00) >> 8;
+
+ i2c_lock_bus(i2c, I2C_LOCK_SEGMENT);
+
+ ret = __i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0,
+ I2C_SMBUS_WRITE, reg,
+ I2C_SMBUS_BYTE_DATA, &smbus_data);
+ if (ret < 0)
+ goto unlock;
+
+ smbus_data.byte = val & 0xff;
+
+ ret = __i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0,
+ I2C_SMBUS_WRITE, reg,
+ I2C_SMBUS_BYTE_DATA, &smbus_data);
+
+unlock:
+ i2c_unlock_bus(i2c, I2C_LOCK_SEGMENT);
+
+ return ret < 0 ? ret : 0;
+}
+
/* RollBall SFPs do not access internal PHY via I2C address 0x56, but
* instead via address 0x51, when SFP page is set to 0x03 and password to
* 0xffffffff.
@@ -378,13 +445,26 @@ static int i2c_mii_init_rollball(struct i2c_adapter *i2c)
return 0;
}
+static bool mdio_i2c_check_functionality(struct i2c_adapter *i2c,
+ enum mdio_i2c_proto protocol)
+{
+ if (i2c_check_functionality(i2c, I2C_FUNC_I2C))
+ return true;
+
+ if (i2c_check_functionality(i2c, I2C_FUNC_SMBUS_BYTE_DATA) &&
+ protocol == MDIO_I2C_MARVELL_C22)
+ return true;
+
+ return false;
+}
+
struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c,
enum mdio_i2c_proto protocol)
{
struct mii_bus *mii;
int ret;
- if (!i2c_check_functionality(i2c, I2C_FUNC_I2C))
+ if (!mdio_i2c_check_functionality(i2c, protocol))
return ERR_PTR(-EINVAL);
mii = mdiobus_alloc();
@@ -395,6 +475,14 @@ struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c,
mii->parent = parent;
mii->priv = i2c;
+ /* Only use SMBus if we have no other choice */
+ if (i2c_check_functionality(i2c, I2C_FUNC_SMBUS_BYTE_DATA) &&
+ !i2c_check_functionality(i2c, I2C_FUNC_I2C)) {
+ mii->read = smbus_byte_mii_read_default_c22;
+ mii->write = smbus_byte_mii_write_default_c22;
+ return mii;
+ }
+
switch (protocol) {
case MDIO_I2C_ROLLBALL:
ret = i2c_mii_init_rollball(i2c);
diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c
index dd3ed2d6430b..d9a94df482d9 100644
--- a/drivers/net/mdio/mdio-ipq4019.c
+++ b/drivers/net/mdio/mdio-ipq4019.c
@@ -352,8 +352,11 @@ static int ipq4019_mdio_probe(struct platform_device *pdev)
/* The platform resource is provided on the chipset IPQ5018 */
/* This resource is optional */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (res)
+ if (res) {
priv->eth_ldo_rdy = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->eth_ldo_rdy))
+ return PTR_ERR(priv->eth_ldo_rdy);
+ }
bus->name = "ipq4019_mdio";
bus->read = ipq4019_mdio_read_c22;
diff --git a/drivers/net/mdio/mdio-mux-gpio.c b/drivers/net/mdio/mdio-mux-gpio.c
index ef77bd1abae9..fefa40ea5227 100644
--- a/drivers/net/mdio/mdio-mux-gpio.c
+++ b/drivers/net/mdio/mdio-mux-gpio.c
@@ -30,8 +30,7 @@ static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
values[0] = desired_child;
- gpiod_set_array_value_cansleep(s->gpios->ndescs, s->gpios->desc,
- s->gpios->info, values);
+ gpiod_multi_set_value_cansleep(s->gpios, values);
return 0;
}
diff --git a/drivers/net/mdio/mdio-mux-meson-gxl.c b/drivers/net/mdio/mdio-mux-meson-gxl.c
index 00c66240136b..3dd12a8c8b03 100644
--- a/drivers/net/mdio/mdio-mux-meson-gxl.c
+++ b/drivers/net/mdio/mdio-mux-meson-gxl.c
@@ -17,6 +17,7 @@
#define REG2_LEDACT GENMASK(23, 22)
#define REG2_LEDLINK GENMASK(25, 24)
#define REG2_DIV4SEL BIT(27)
+#define REG2_REVERSED BIT(28)
#define REG2_ADCBYPASS BIT(30)
#define REG2_CLKINSEL BIT(31)
#define ETH_REG3 0x4
@@ -65,7 +66,7 @@ static void gxl_enable_internal_mdio(struct gxl_mdio_mux *priv)
* The only constraint is that it must match the one in
* drivers/net/phy/meson-gxl.c to properly match the PHY.
*/
- writel(FIELD_PREP(REG2_PHYID, EPHY_GXL_ID),
+ writel(REG2_REVERSED | FIELD_PREP(REG2_PHYID, EPHY_GXL_ID),
priv->regs + ETH_REG2);
/* Enable the internal phy */
diff --git a/drivers/net/mdio/mdio-octeon.c b/drivers/net/mdio/mdio-octeon.c
index 2beb83154d39..cb53dccbde1a 100644
--- a/drivers/net/mdio/mdio-octeon.c
+++ b/drivers/net/mdio/mdio-octeon.c
@@ -17,37 +17,20 @@ static int octeon_mdiobus_probe(struct platform_device *pdev)
{
struct cavium_mdiobus *bus;
struct mii_bus *mii_bus;
- struct resource *res_mem;
- resource_size_t mdio_phys;
- resource_size_t regsize;
union cvmx_smix_en smi_en;
- int err = -ENOENT;
+ int err;
mii_bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*bus));
if (!mii_bus)
return -ENOMEM;
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res_mem == NULL) {
- dev_err(&pdev->dev, "found no memory resource\n");
- return -ENXIO;
- }
-
bus = mii_bus->priv;
bus->mii_bus = mii_bus;
- mdio_phys = res_mem->start;
- regsize = resource_size(res_mem);
- if (!devm_request_mem_region(&pdev->dev, mdio_phys, regsize,
- res_mem->name)) {
- dev_err(&pdev->dev, "request_mem_region failed\n");
- return -ENXIO;
- }
-
- bus->register_base = devm_ioremap(&pdev->dev, mdio_phys, regsize);
- if (!bus->register_base) {
+ bus->register_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(bus->register_base)) {
dev_err(&pdev->dev, "dev_ioremap failed\n");
- return -ENOMEM;
+ return PTR_ERR(bus->register_base);
}
smi_en.u64 = 0;
diff --git a/drivers/net/mdio/mdio-realtek-rtl9300.c b/drivers/net/mdio/mdio-realtek-rtl9300.c
new file mode 100644
index 000000000000..33694c3ff9a7
--- /dev/null
+++ b/drivers/net/mdio/mdio-realtek-rtl9300.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MDIO controller for RTL9300 switches with integrated SoC.
+ *
+ * The MDIO communication is abstracted by the switch. At the software level
+ * communication uses the switch port to address the PHY. We work out the
+ * mapping based on the MDIO bus described in device tree and phandles on the
+ * ethernet-ports property.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/bits.h>
+#include <linux/find.h>
+#include <linux/mdio.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+#define SMI_GLB_CTRL 0xca00
+#define GLB_CTRL_INTF_SEL(intf) BIT(16 + (intf))
+#define SMI_PORT0_15_POLLING_SEL 0xca08
+#define SMI_ACCESS_PHY_CTRL_0 0xcb70
+#define SMI_ACCESS_PHY_CTRL_1 0xcb74
+#define PHY_CTRL_REG_ADDR GENMASK(24, 20)
+#define PHY_CTRL_PARK_PAGE GENMASK(19, 15)
+#define PHY_CTRL_MAIN_PAGE GENMASK(14, 3)
+#define PHY_CTRL_WRITE BIT(2)
+#define PHY_CTRL_READ 0
+#define PHY_CTRL_TYPE_C45 BIT(1)
+#define PHY_CTRL_TYPE_C22 0
+#define PHY_CTRL_CMD BIT(0)
+#define PHY_CTRL_FAIL BIT(25)
+#define SMI_ACCESS_PHY_CTRL_2 0xcb78
+#define PHY_CTRL_INDATA GENMASK(31, 16)
+#define PHY_CTRL_DATA GENMASK(15, 0)
+#define SMI_ACCESS_PHY_CTRL_3 0xcb7c
+#define PHY_CTRL_MMD_DEVAD GENMASK(20, 16)
+#define PHY_CTRL_MMD_REG GENMASK(15, 0)
+#define SMI_PORT0_5_ADDR_CTRL 0xcb80
+
+#define MAX_PORTS 28
+#define MAX_SMI_BUSSES 4
+#define MAX_SMI_ADDR 0x1f
+
+struct rtl9300_mdio_priv {
+ struct regmap *regmap;
+ struct mutex lock; /* protect HW access */
+ DECLARE_BITMAP(valid_ports, MAX_PORTS);
+ u8 smi_bus[MAX_PORTS];
+ u8 smi_addr[MAX_PORTS];
+ bool smi_bus_is_c45[MAX_SMI_BUSSES];
+ struct mii_bus *bus[MAX_SMI_BUSSES];
+};
+
+struct rtl9300_mdio_chan {
+ struct rtl9300_mdio_priv *priv;
+ u8 mdio_bus;
+};
+
+static int rtl9300_mdio_phy_to_port(struct mii_bus *bus, int phy_id)
+{
+ struct rtl9300_mdio_chan *chan = bus->priv;
+ struct rtl9300_mdio_priv *priv;
+ int i;
+
+ priv = chan->priv;
+
+ for_each_set_bit(i, priv->valid_ports, MAX_PORTS)
+ if (priv->smi_bus[i] == chan->mdio_bus &&
+ priv->smi_addr[i] == phy_id)
+ return i;
+
+ return -ENOENT;
+}
+
+static int rtl9300_mdio_wait_ready(struct rtl9300_mdio_priv *priv)
+{
+ struct regmap *regmap = priv->regmap;
+ u32 val;
+
+ lockdep_assert_held(&priv->lock);
+
+ return regmap_read_poll_timeout(regmap, SMI_ACCESS_PHY_CTRL_1,
+ val, !(val & PHY_CTRL_CMD), 10, 1000);
+}
+
+static int rtl9300_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum)
+{
+ struct rtl9300_mdio_chan *chan = bus->priv;
+ struct rtl9300_mdio_priv *priv;
+ struct regmap *regmap;
+ int port;
+ u32 val;
+ int err;
+
+ priv = chan->priv;
+ regmap = priv->regmap;
+
+ port = rtl9300_mdio_phy_to_port(bus, phy_id);
+ if (port < 0)
+ return port;
+
+ mutex_lock(&priv->lock);
+ err = rtl9300_mdio_wait_ready(priv);
+ if (err)
+ goto out_err;
+
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_2, FIELD_PREP(PHY_CTRL_INDATA, port));
+ if (err)
+ goto out_err;
+
+ val = FIELD_PREP(PHY_CTRL_REG_ADDR, regnum) |
+ FIELD_PREP(PHY_CTRL_PARK_PAGE, 0x1f) |
+ FIELD_PREP(PHY_CTRL_MAIN_PAGE, 0xfff) |
+ PHY_CTRL_READ | PHY_CTRL_TYPE_C22 | PHY_CTRL_CMD;
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_1, val);
+ if (err)
+ goto out_err;
+
+ err = rtl9300_mdio_wait_ready(priv);
+ if (err)
+ goto out_err;
+
+ err = regmap_read(regmap, SMI_ACCESS_PHY_CTRL_2, &val);
+ if (err)
+ goto out_err;
+
+ mutex_unlock(&priv->lock);
+ return FIELD_GET(PHY_CTRL_DATA, val);
+
+out_err:
+ mutex_unlock(&priv->lock);
+ return err;
+}
+
+static int rtl9300_mdio_write_c22(struct mii_bus *bus, int phy_id, int regnum, u16 value)
+{
+ struct rtl9300_mdio_chan *chan = bus->priv;
+ struct rtl9300_mdio_priv *priv;
+ struct regmap *regmap;
+ int port;
+ u32 val;
+ int err;
+
+ priv = chan->priv;
+ regmap = priv->regmap;
+
+ port = rtl9300_mdio_phy_to_port(bus, phy_id);
+ if (port < 0)
+ return port;
+
+ mutex_lock(&priv->lock);
+ err = rtl9300_mdio_wait_ready(priv);
+ if (err)
+ goto out_err;
+
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_0, BIT(port));
+ if (err)
+ goto out_err;
+
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_2, FIELD_PREP(PHY_CTRL_INDATA, value));
+ if (err)
+ goto out_err;
+
+ val = FIELD_PREP(PHY_CTRL_REG_ADDR, regnum) |
+ FIELD_PREP(PHY_CTRL_PARK_PAGE, 0x1f) |
+ FIELD_PREP(PHY_CTRL_MAIN_PAGE, 0xfff) |
+ PHY_CTRL_WRITE | PHY_CTRL_TYPE_C22 | PHY_CTRL_CMD;
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_1, val);
+ if (err)
+ goto out_err;
+
+ err = regmap_read_poll_timeout(regmap, SMI_ACCESS_PHY_CTRL_1,
+ val, !(val & PHY_CTRL_CMD), 10, 100);
+ if (err)
+ goto out_err;
+
+ if (val & PHY_CTRL_FAIL) {
+ err = -ENXIO;
+ goto out_err;
+ }
+
+ mutex_unlock(&priv->lock);
+ return 0;
+
+out_err:
+ mutex_unlock(&priv->lock);
+ return err;
+}
+
+static int rtl9300_mdio_read_c45(struct mii_bus *bus, int phy_id, int dev_addr, int regnum)
+{
+ struct rtl9300_mdio_chan *chan = bus->priv;
+ struct rtl9300_mdio_priv *priv;
+ struct regmap *regmap;
+ int port;
+ u32 val;
+ int err;
+
+ priv = chan->priv;
+ regmap = priv->regmap;
+
+ port = rtl9300_mdio_phy_to_port(bus, phy_id);
+ if (port < 0)
+ return port;
+
+ mutex_lock(&priv->lock);
+ err = rtl9300_mdio_wait_ready(priv);
+ if (err)
+ goto out_err;
+
+ val = FIELD_PREP(PHY_CTRL_INDATA, port);
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_2, val);
+ if (err)
+ goto out_err;
+
+ val = FIELD_PREP(PHY_CTRL_MMD_DEVAD, dev_addr) |
+ FIELD_PREP(PHY_CTRL_MMD_REG, regnum);
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_3, val);
+ if (err)
+ goto out_err;
+
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_1,
+ PHY_CTRL_READ | PHY_CTRL_TYPE_C45 | PHY_CTRL_CMD);
+ if (err)
+ goto out_err;
+
+ err = rtl9300_mdio_wait_ready(priv);
+ if (err)
+ goto out_err;
+
+ err = regmap_read(regmap, SMI_ACCESS_PHY_CTRL_2, &val);
+ if (err)
+ goto out_err;
+
+ mutex_unlock(&priv->lock);
+ return FIELD_GET(PHY_CTRL_DATA, val);
+
+out_err:
+ mutex_unlock(&priv->lock);
+ return err;
+}
+
+static int rtl9300_mdio_write_c45(struct mii_bus *bus, int phy_id, int dev_addr,
+ int regnum, u16 value)
+{
+ struct rtl9300_mdio_chan *chan = bus->priv;
+ struct rtl9300_mdio_priv *priv;
+ struct regmap *regmap;
+ int port;
+ u32 val;
+ int err;
+
+ priv = chan->priv;
+ regmap = priv->regmap;
+
+ port = rtl9300_mdio_phy_to_port(bus, phy_id);
+ if (port < 0)
+ return port;
+
+ mutex_lock(&priv->lock);
+ err = rtl9300_mdio_wait_ready(priv);
+ if (err)
+ goto out_err;
+
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_0, BIT(port));
+ if (err)
+ goto out_err;
+
+ val = FIELD_PREP(PHY_CTRL_INDATA, value);
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_2, val);
+ if (err)
+ goto out_err;
+
+ val = FIELD_PREP(PHY_CTRL_MMD_DEVAD, dev_addr) |
+ FIELD_PREP(PHY_CTRL_MMD_REG, regnum);
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_3, val);
+ if (err)
+ goto out_err;
+
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_1,
+ PHY_CTRL_TYPE_C45 | PHY_CTRL_WRITE | PHY_CTRL_CMD);
+ if (err)
+ goto out_err;
+
+ err = regmap_read_poll_timeout(regmap, SMI_ACCESS_PHY_CTRL_1,
+ val, !(val & PHY_CTRL_CMD), 10, 100);
+ if (err)
+ goto out_err;
+
+ if (val & PHY_CTRL_FAIL) {
+ err = -ENXIO;
+ goto out_err;
+ }
+
+ mutex_unlock(&priv->lock);
+ return 0;
+
+out_err:
+ mutex_unlock(&priv->lock);
+ return err;
+}
+
+static int rtl9300_mdiobus_init(struct rtl9300_mdio_priv *priv)
+{
+ u32 glb_ctrl_mask = 0, glb_ctrl_val = 0;
+ struct regmap *regmap = priv->regmap;
+ u32 port_addr[5] = { 0 };
+ u32 poll_sel[2] = { 0 };
+ int i, err;
+
+ /* Associate the port with the SMI interface and PHY */
+ for_each_set_bit(i, priv->valid_ports, MAX_PORTS) {
+ int pos;
+
+ pos = (i % 6) * 5;
+ port_addr[i / 6] |= (priv->smi_addr[i] & 0x1f) << pos;
+
+ pos = (i % 16) * 2;
+ poll_sel[i / 16] |= (priv->smi_bus[i] & 0x3) << pos;
+ }
+
+ /* Put the interfaces into C45 mode if required */
+ glb_ctrl_mask = GENMASK(19, 16);
+ for (i = 0; i < MAX_SMI_BUSSES; i++)
+ if (priv->smi_bus_is_c45[i])
+ glb_ctrl_val |= GLB_CTRL_INTF_SEL(i);
+
+ err = regmap_bulk_write(regmap, SMI_PORT0_5_ADDR_CTRL,
+ port_addr, 5);
+ if (err)
+ return err;
+
+ err = regmap_bulk_write(regmap, SMI_PORT0_15_POLLING_SEL,
+ poll_sel, 2);
+ if (err)
+ return err;
+
+ err = regmap_update_bits(regmap, SMI_GLB_CTRL,
+ glb_ctrl_mask, glb_ctrl_val);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int rtl9300_mdiobus_probe_one(struct device *dev, struct rtl9300_mdio_priv *priv,
+ struct fwnode_handle *node)
+{
+ struct rtl9300_mdio_chan *chan;
+ struct fwnode_handle *child;
+ struct mii_bus *bus;
+ u32 mdio_bus;
+ int err;
+
+ err = fwnode_property_read_u32(node, "reg", &mdio_bus);
+ if (err)
+ return err;
+
+ /* The MDIO accesses from the kernel work with the PHY polling unit in
+ * the switch. We need to tell the PPU to operate either in GPHY (i.e.
+ * clause 22) or 10GPHY mode (i.e. clause 45).
+ *
+ * We select 10GPHY mode if there is at least one PHY that declares
+ * compatible = "ethernet-phy-ieee802.3-c45". This does mean we can't
+ * support both c45 and c22 on the same MDIO bus.
+ */
+ fwnode_for_each_child_node(node, child)
+ if (fwnode_device_is_compatible(child, "ethernet-phy-ieee802.3-c45"))
+ priv->smi_bus_is_c45[mdio_bus] = true;
+
+ bus = devm_mdiobus_alloc_size(dev, sizeof(*chan));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "Realtek Switch MDIO Bus";
+ if (priv->smi_bus_is_c45[mdio_bus]) {
+ bus->read_c45 = rtl9300_mdio_read_c45;
+ bus->write_c45 = rtl9300_mdio_write_c45;
+ } else {
+ bus->read = rtl9300_mdio_read_c22;
+ bus->write = rtl9300_mdio_write_c22;
+ }
+ bus->parent = dev;
+ chan = bus->priv;
+ chan->mdio_bus = mdio_bus;
+ chan->priv = priv;
+
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", dev_name(dev), mdio_bus);
+
+ err = devm_of_mdiobus_register(dev, bus, to_of_node(node));
+ if (err)
+ return dev_err_probe(dev, err, "cannot register MDIO bus\n");
+
+ return 0;
+}
+
+/* The mdio-controller is part of a switch block so we parse the sibling
+ * ethernet-ports node and build a mapping of the switch port to MDIO bus/addr
+ * based on the phy-handle.
+ */
+static int rtl9300_mdiobus_map_ports(struct device *dev)
+{
+ struct rtl9300_mdio_priv *priv = dev_get_drvdata(dev);
+ struct device *parent = dev->parent;
+ struct fwnode_handle *port;
+ int err;
+
+ struct fwnode_handle *ports __free(fwnode_handle) =
+ device_get_named_child_node(parent, "ethernet-ports");
+ if (!ports)
+ return dev_err_probe(dev, -EINVAL, "%pfwP missing ethernet-ports\n",
+ dev_fwnode(parent));
+
+ fwnode_for_each_child_node(ports, port) {
+ struct device_node *mdio_dn;
+ u32 addr;
+ u32 bus;
+ u32 pn;
+
+ struct device_node *phy_dn __free(device_node) =
+ of_parse_phandle(to_of_node(port), "phy-handle", 0);
+ /* skip ports without phys */
+ if (!phy_dn)
+ continue;
+
+ mdio_dn = phy_dn->parent;
+ /* only map ports that are connected to this mdio-controller */
+ if (mdio_dn->parent != dev->of_node)
+ continue;
+
+ err = fwnode_property_read_u32(port, "reg", &pn);
+ if (err)
+ return err;
+
+ if (pn >= MAX_PORTS)
+ return dev_err_probe(dev, -EINVAL, "illegal port number %d\n", pn);
+
+ if (test_bit(pn, priv->valid_ports))
+ return dev_err_probe(dev, -EINVAL, "duplicated port number %d\n", pn);
+
+ err = of_property_read_u32(mdio_dn, "reg", &bus);
+ if (err)
+ return err;
+
+ if (bus >= MAX_SMI_BUSSES)
+ return dev_err_probe(dev, -EINVAL, "illegal smi bus number %d\n", bus);
+
+ err = of_property_read_u32(phy_dn, "reg", &addr);
+ if (err)
+ return err;
+
+ __set_bit(pn, priv->valid_ports);
+ priv->smi_bus[pn] = bus;
+ priv->smi_addr[pn] = addr;
+ }
+
+ return 0;
+}
+
+static int rtl9300_mdiobus_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rtl9300_mdio_priv *priv;
+ struct fwnode_handle *child;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ err = devm_mutex_init(dev, &priv->lock);
+ if (err)
+ return err;
+
+ priv->regmap = syscon_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ platform_set_drvdata(pdev, priv);
+
+ err = rtl9300_mdiobus_map_ports(dev);
+ if (err)
+ return err;
+
+ device_for_each_child_node(dev, child) {
+ err = rtl9300_mdiobus_probe_one(dev, priv, child);
+ if (err)
+ return err;
+ }
+
+ err = rtl9300_mdiobus_init(priv);
+ if (err)
+ return dev_err_probe(dev, err, "failed to initialise MDIO bus controller\n");
+
+ return 0;
+}
+
+static const struct of_device_id rtl9300_mdio_ids[] = {
+ { .compatible = "realtek,rtl9301-mdio" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rtl9300_mdio_ids);
+
+static struct platform_driver rtl9300_mdio_driver = {
+ .probe = rtl9300_mdiobus_probe,
+ .driver = {
+ .name = "mdio-rtl9300",
+ .of_match_table = rtl9300_mdio_ids,
+ },
+};
+
+module_platform_driver(rtl9300_mdio_driver);
+
+MODULE_DESCRIPTION("RTL9300 MDIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/mdio/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c
index 1e1aa72b1eff..a3047f7258a7 100644
--- a/drivers/net/mdio/mdio-thunder.c
+++ b/drivers/net/mdio/mdio-thunder.c
@@ -40,16 +40,16 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev,
return err;
}
- err = pci_request_regions(pdev, KBUILD_MODNAME);
+ err = pcim_request_all_regions(pdev, KBUILD_MODNAME);
if (err) {
- dev_err(&pdev->dev, "pci_request_regions failed\n");
+ dev_err(&pdev->dev, "pcim_request_all_regions failed\n");
goto err_disable_device;
}
nexus->bar0 = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
if (!nexus->bar0) {
err = -ENOMEM;
- goto err_release_regions;
+ goto err_disable_device;
}
i = 0;
@@ -107,9 +107,6 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev,
}
return 0;
-err_release_regions:
- pci_release_regions(pdev);
-
err_disable_device:
pci_set_drvdata(pdev, NULL);
return err;
@@ -129,7 +126,6 @@ static void thunder_mdiobus_pci_remove(struct pci_dev *pdev)
mdiobus_unregister(bus->mii_bus);
oct_mdio_writeq(0, bus->register_base + SMI_EN);
}
- pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
index 2f4fc664d2e1..b8d298c04d3f 100644
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -63,14 +63,11 @@ static int of_mdiobus_register_device(struct mii_bus *mdio,
/* Associate the OF node with the device structure so it
* can be looked up later.
*/
- fwnode_handle_get(fwnode);
- device_set_node(&mdiodev->dev, fwnode);
+ device_set_node(&mdiodev->dev, fwnode_handle_get(fwnode));
/* All data is now stored in the mdiodev struct; register it. */
rc = mdio_device_register(mdiodev);
if (rc) {
- device_set_node(&mdiodev->dev, NULL);
- fwnode_handle_put(fwnode);
mdio_device_free(mdiodev);
return rc;
}
@@ -447,6 +444,8 @@ int of_phy_register_fixed_link(struct device_node *np)
/* Old binding */
if (of_property_read_u32_array(np, "fixed-link", fixed_link_prop,
ARRAY_SIZE(fixed_link_prop)) == 0) {
+ pr_warn_once("%pOF uses deprecated array-style fixed-link binding!\n",
+ np);
status.link = 1;
status.duplex = fixed_link_prop[1];
status.speed = fixed_link_prop[2];
@@ -458,7 +457,7 @@ int of_phy_register_fixed_link(struct device_node *np)
return -ENODEV;
register_phy:
- return PTR_ERR_OR_ZERO(fixed_phy_register(PHY_POLL, &status, np));
+ return PTR_ERR_OR_ZERO(fixed_phy_register(&status, np));
}
EXPORT_SYMBOL(of_phy_register_fixed_link);
@@ -473,6 +472,5 @@ void of_phy_deregister_fixed_link(struct device_node *np)
fixed_phy_unregister(phydev);
put_device(&phydev->mdio.dev); /* of_phy_find_device() */
- phy_device_free(phydev); /* fixed_phy_register() */
}
EXPORT_SYMBOL(of_phy_deregister_fixed_link);
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index 22680f47385d..37bc3131d31a 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -213,6 +213,9 @@ void mii_ethtool_get_link_ksettings(struct mii_if_info *mii,
lp_advertising = 0;
}
+ if (!(bmsr & BMSR_LSTATUS))
+ cmd->base.speed = SPEED_UNKNOWN;
+
mii->full_duplex = cmd->base.duplex;
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index 54c8b9d5b5fc..5b50d9186f12 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -734,7 +734,7 @@ struct failover *net_failover_create(struct net_device *standby_dev)
failover_dev->lltx = true;
/* Don't allow failover devices to change network namespaces. */
- failover_dev->netns_local = true;
+ failover_dev->netns_immutable = true;
failover_dev->hw_features = FAILOVER_VLAN_FEATURES |
NETIF_F_HW_VLAN_CTAG_TX |
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 4ea44a2f48f7..9cb4dfc242f5 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -36,6 +36,7 @@
#include <linux/inet.h>
#include <linux/configfs.h>
#include <linux/etherdevice.h>
+#include <linux/u64_stats_sync.h>
#include <linux/utsname.h>
#include <linux/rtnetlink.h>
@@ -44,12 +45,12 @@ MODULE_DESCRIPTION("Console driver for network interfaces");
MODULE_LICENSE("GPL");
#define MAX_PARAM_LENGTH 256
-#define MAX_USERDATA_ENTRY_LENGTH 256
-#define MAX_USERDATA_VALUE_LENGTH 200
+#define MAX_EXTRADATA_ENTRY_LEN 256
+#define MAX_EXTRADATA_VALUE_LEN 200
/* The number 3 comes from userdata entry format characters (' ', '=', '\n') */
-#define MAX_USERDATA_NAME_LENGTH (MAX_USERDATA_ENTRY_LENGTH - \
- MAX_USERDATA_VALUE_LENGTH - 3)
-#define MAX_USERDATA_ITEMS 16
+#define MAX_EXTRADATA_NAME_LEN (MAX_EXTRADATA_ENTRY_LEN - \
+ MAX_EXTRADATA_VALUE_LEN - 3)
+#define MAX_USERDATA_ITEMS 256
#define MAX_PRINT_CHUNK 1000
static char config[MAX_PARAM_LENGTH];
@@ -85,18 +86,50 @@ static DEFINE_SPINLOCK(target_list_lock);
static DEFINE_MUTEX(target_cleanup_list_lock);
/*
- * Console driver for extended netconsoles. Registered on the first use to
- * avoid unnecessarily enabling ext message formatting.
+ * Console driver for netconsoles. Register only consoles that have
+ * an associated target of the same type.
*/
-static struct console netconsole_ext;
+static struct console netconsole_ext, netconsole;
+
+struct netconsole_target_stats {
+ u64_stats_t xmit_drop_count;
+ u64_stats_t enomem_count;
+ struct u64_stats_sync syncp;
+};
+
+enum console_type {
+ CONS_BASIC = BIT(0),
+ CONS_EXTENDED = BIT(1),
+};
+
+/* Features enabled in sysdata. Contrary to userdata, this data is populated by
+ * the kernel. The fields are designed as bitwise flags, allowing multiple
+ * features to be set in sysdata_fields.
+ */
+enum sysdata_feature {
+ /* Populate the CPU that sends the message */
+ SYSDATA_CPU_NR = BIT(0),
+ /* Populate the task name (as in current->comm) in sysdata */
+ SYSDATA_TASKNAME = BIT(1),
+ /* Kernel release/version as part of sysdata */
+ SYSDATA_RELEASE = BIT(2),
+ /* Include a per-target message ID as part of sysdata */
+ SYSDATA_MSGID = BIT(3),
+ /* Sentinel: highest bit position */
+ MAX_SYSDATA_ITEMS = 4,
+};
/**
* struct netconsole_target - Represents a configured netconsole target.
* @list: Links this target into the target_list.
* @group: Links us into the configfs subsystem hierarchy.
* @userdata_group: Links to the userdata configfs hierarchy
- * @userdata_complete: Cached, formatted string of append
- * @userdata_length: String length of userdata_complete
+ * @userdata: Cached, formatted string of append
+ * @userdata_length: String length of userdata.
+ * @sysdata: Cached, formatted string of append
+ * @sysdata_fields: Sysdata features enabled.
+ * @msgcounter: Message sent counter.
+ * @stats: Packet send stats for the target. Used for debugging.
* @enabled: On / off knob to enable / disable target.
* Visible from userspace (read-write).
* We maintain a strict 1:1 correspondence between this and
@@ -115,19 +148,29 @@ static struct console netconsole_ext;
* remote_ip (read-write)
* local_mac (read-only)
* remote_mac (read-write)
+ * @buf: The buffer used to send the full msg to the network stack
*/
struct netconsole_target {
struct list_head list;
#ifdef CONFIG_NETCONSOLE_DYNAMIC
struct config_group group;
struct config_group userdata_group;
- char userdata_complete[MAX_USERDATA_ENTRY_LENGTH * MAX_USERDATA_ITEMS];
+ char *userdata;
size_t userdata_length;
+ char sysdata[MAX_EXTRADATA_ENTRY_LEN * MAX_SYSDATA_ITEMS];
+
+ /* bit-wise with sysdata_feature bits */
+ u32 sysdata_fields;
+ /* protected by target_list_lock */
+ u32 msgcounter;
#endif
+ struct netconsole_target_stats stats;
bool enabled;
bool extended;
bool release;
struct netpoll np;
+ /* protected by target_list_lock */
+ char buf[MAX_PRINT_CHUNK];
};
#ifdef CONFIG_NETCONSOLE_DYNAMIC
@@ -245,6 +288,50 @@ static void netconsole_process_cleanups_core(void)
mutex_unlock(&target_cleanup_list_lock);
}
+static void netconsole_print_banner(struct netpoll *np)
+{
+ np_info(np, "local port %d\n", np->local_port);
+ if (np->ipv6)
+ np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
+ else
+ np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
+ np_info(np, "interface name '%s'\n", np->dev_name);
+ np_info(np, "local ethernet address '%pM'\n", np->dev_mac);
+ np_info(np, "remote port %d\n", np->remote_port);
+ if (np->ipv6)
+ np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
+ else
+ np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
+ np_info(np, "remote ethernet address %pM\n", np->remote_mac);
+}
+
+/* Parse the string and populate the `inet_addr` union. Return 0 if IPv4 is
+ * populated, 1 if IPv6 is populated, and -1 upon failure.
+ */
+static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
+{
+ const char *end = NULL;
+ int len;
+
+ len = strlen(str);
+ if (!len)
+ return -1;
+
+ if (str[len - 1] == '\n')
+ len -= 1;
+
+ if (in4_pton(str, len, (void *)addr, -1, &end) > 0 &&
+ (!end || *end == 0 || *end == '\n'))
+ return 0;
+
+ if (IS_ENABLED(CONFIG_IPV6) &&
+ in6_pton(str, len, (void *)addr, -1, &end) > 0 &&
+ (!end || *end == 0 || *end == '\n'))
+ return 1;
+
+ return -1;
+}
+
#ifdef CONFIG_NETCONSOLE_DYNAMIC
/*
@@ -262,6 +349,7 @@ static void netconsole_process_cleanups_core(void)
* | remote_ip
* | local_mac
* | remote_mac
+ * | transmit_errors
* | userdata/
* | <key>/
* | value
@@ -371,6 +459,101 @@ static ssize_t remote_mac_show(struct config_item *item, char *buf)
return sysfs_emit(buf, "%pM\n", to_target(item)->np.remote_mac);
}
+static ssize_t transmit_errors_show(struct config_item *item, char *buf)
+{
+ struct netconsole_target *nt = to_target(item);
+ u64 xmit_drop_count, enomem_count;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&nt->stats.syncp);
+ xmit_drop_count = u64_stats_read(&nt->stats.xmit_drop_count);
+ enomem_count = u64_stats_read(&nt->stats.enomem_count);
+ } while (u64_stats_fetch_retry(&nt->stats.syncp, start));
+
+ return sysfs_emit(buf, "%llu\n", xmit_drop_count + enomem_count);
+}
+
+/* configfs helper to display if cpu_nr sysdata feature is enabled */
+static ssize_t sysdata_cpu_nr_enabled_show(struct config_item *item, char *buf)
+{
+ struct netconsole_target *nt = to_target(item->ci_parent);
+ bool cpu_nr_enabled;
+
+ mutex_lock(&dynamic_netconsole_mutex);
+ cpu_nr_enabled = !!(nt->sysdata_fields & SYSDATA_CPU_NR);
+ mutex_unlock(&dynamic_netconsole_mutex);
+
+ return sysfs_emit(buf, "%d\n", cpu_nr_enabled);
+}
+
+/* configfs helper to display if taskname sysdata feature is enabled */
+static ssize_t sysdata_taskname_enabled_show(struct config_item *item,
+ char *buf)
+{
+ struct netconsole_target *nt = to_target(item->ci_parent);
+ bool taskname_enabled;
+
+ mutex_lock(&dynamic_netconsole_mutex);
+ taskname_enabled = !!(nt->sysdata_fields & SYSDATA_TASKNAME);
+ mutex_unlock(&dynamic_netconsole_mutex);
+
+ return sysfs_emit(buf, "%d\n", taskname_enabled);
+}
+
+static ssize_t sysdata_release_enabled_show(struct config_item *item,
+ char *buf)
+{
+ struct netconsole_target *nt = to_target(item->ci_parent);
+ bool release_enabled;
+
+ mutex_lock(&dynamic_netconsole_mutex);
+ release_enabled = !!(nt->sysdata_fields & SYSDATA_TASKNAME);
+ mutex_unlock(&dynamic_netconsole_mutex);
+
+ return sysfs_emit(buf, "%d\n", release_enabled);
+}
+
+/* Iterate in the list of target, and make sure we don't have any console
+ * register without targets of the same type
+ */
+static void unregister_netcons_consoles(void)
+{
+ struct netconsole_target *nt;
+ u32 console_type_needed = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&target_list_lock, flags);
+ list_for_each_entry(nt, &target_list, list) {
+ if (nt->extended)
+ console_type_needed |= CONS_EXTENDED;
+ else
+ console_type_needed |= CONS_BASIC;
+ }
+ spin_unlock_irqrestore(&target_list_lock, flags);
+
+ if (!(console_type_needed & CONS_EXTENDED) &&
+ console_is_registered(&netconsole_ext))
+ unregister_console(&netconsole_ext);
+
+ if (!(console_type_needed & CONS_BASIC) &&
+ console_is_registered(&netconsole))
+ unregister_console(&netconsole);
+}
+
+static ssize_t sysdata_msgid_enabled_show(struct config_item *item,
+ char *buf)
+{
+ struct netconsole_target *nt = to_target(item->ci_parent);
+ bool msgid_enabled;
+
+ mutex_lock(&dynamic_netconsole_mutex);
+ msgid_enabled = !!(nt->sysdata_fields & SYSDATA_MSGID);
+ mutex_unlock(&dynamic_netconsole_mutex);
+
+ return sysfs_emit(buf, "%d\n", msgid_enabled);
+}
+
/*
* This one is special -- targets created through the configfs interface
* are not enabled (and the corresponding netpoll activated) by default.
@@ -404,14 +587,24 @@ static ssize_t enabled_store(struct config_item *item,
goto out_unlock;
}
- if (nt->extended && !console_is_registered(&netconsole_ext))
+ if (nt->extended && !console_is_registered(&netconsole_ext)) {
+ netconsole_ext.flags |= CON_ENABLED;
register_console(&netconsole_ext);
+ }
+
+ /* User might be enabling the basic format target for the very
+ * first time, make sure the console is registered.
+ */
+ if (!nt->extended && !console_is_registered(&netconsole)) {
+ netconsole.flags |= CON_ENABLED;
+ register_console(&netconsole);
+ }
/*
- * Skip netpoll_parse_options() -- all the attributes are
+ * Skip netconsole_parser_cmdline() -- all the attributes are
* already configured via configfs. Just print them out.
*/
- netpoll_print_options(&nt->np);
+ netconsole_print_banner(&nt->np);
ret = netpoll_setup(&nt->np);
if (ret)
@@ -433,6 +626,10 @@ static ssize_t enabled_store(struct config_item *item,
list_move(&nt->list, &target_cleanup_list);
spin_unlock_irqrestore(&target_list_lock, flags);
mutex_unlock(&target_cleanup_list_lock);
+ /* Unregister consoles, whose the last target of that type got
+ * disabled.
+ */
+ unregister_netcons_consoles();
}
ret = strnlen(buf, count);
@@ -565,6 +762,7 @@ static ssize_t local_ip_store(struct config_item *item, const char *buf,
{
struct netconsole_target *nt = to_target(item);
ssize_t ret = -EINVAL;
+ int ipv6;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
@@ -573,23 +771,10 @@ static ssize_t local_ip_store(struct config_item *item, const char *buf,
goto out_unlock;
}
- if (strnchr(buf, count, ':')) {
- const char *end;
-
- if (in6_pton(buf, count, nt->np.local_ip.in6.s6_addr, -1, &end) > 0) {
- if (*end && *end != '\n') {
- pr_err("invalid IPv6 address at: <%c>\n", *end);
- goto out_unlock;
- }
- nt->np.ipv6 = true;
- } else
- goto out_unlock;
- } else {
- if (!nt->np.ipv6)
- nt->np.local_ip.ip = in_aton(buf);
- else
- goto out_unlock;
- }
+ ipv6 = netpoll_parse_ip_addr(buf, &nt->np.local_ip);
+ if (ipv6 == -1)
+ goto out_unlock;
+ nt->np.ipv6 = !!ipv6;
ret = strnlen(buf, count);
out_unlock:
@@ -602,6 +787,7 @@ static ssize_t remote_ip_store(struct config_item *item, const char *buf,
{
struct netconsole_target *nt = to_target(item);
ssize_t ret = -EINVAL;
+ int ipv6;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
@@ -610,23 +796,10 @@ static ssize_t remote_ip_store(struct config_item *item, const char *buf,
goto out_unlock;
}
- if (strnchr(buf, count, ':')) {
- const char *end;
-
- if (in6_pton(buf, count, nt->np.remote_ip.in6.s6_addr, -1, &end) > 0) {
- if (*end && *end != '\n') {
- pr_err("invalid IPv6 address at: <%c>\n", *end);
- goto out_unlock;
- }
- nt->np.ipv6 = true;
- } else
- goto out_unlock;
- } else {
- if (!nt->np.ipv6)
- nt->np.remote_ip.ip = in_aton(buf);
- else
- goto out_unlock;
- }
+ ipv6 = netpoll_parse_ip_addr(buf, &nt->np.remote_ip);
+ if (ipv6 == -1)
+ goto out_unlock;
+ nt->np.ipv6 = !!ipv6;
ret = strnlen(buf, count);
out_unlock:
@@ -634,6 +807,16 @@ out_unlock:
return ret;
}
+/* Count number of entries we have in userdata.
+ * This is important because userdata only supports MAX_USERDATA_ITEMS
+ * entries. Before enabling any new userdata feature, number of entries needs
+ * to checked for available space.
+ */
+static size_t count_userdata_entries(struct netconsole_target *nt)
+{
+ return list_count_nodes(&nt->userdata_group.cg_children);
+}
+
static ssize_t remote_mac_store(struct config_item *item, const char *buf,
size_t count)
{
@@ -650,7 +833,7 @@ static ssize_t remote_mac_store(struct config_item *item, const char *buf,
if (!mac_pton(buf, remote_mac))
goto out_unlock;
- if (buf[3 * ETH_ALEN - 1] && buf[3 * ETH_ALEN - 1] != '\n')
+ if (buf[MAC_ADDR_STR_LEN] && buf[MAC_ADDR_STR_LEN] != '\n')
goto out_unlock;
memcpy(nt->np.remote_mac, remote_mac, ETH_ALEN);
@@ -662,7 +845,7 @@ out_unlock:
struct userdatum {
struct config_item item;
- char value[MAX_USERDATA_VALUE_LENGTH];
+ char value[MAX_EXTRADATA_VALUE_LEN];
};
static struct userdatum *to_userdatum(struct config_item *item)
@@ -692,40 +875,77 @@ static ssize_t userdatum_value_show(struct config_item *item, char *buf)
return sysfs_emit(buf, "%s\n", &(to_userdatum(item)->value[0]));
}
-static void update_userdata(struct netconsole_target *nt)
+/* Navigate configfs and calculate the lentgh of the formatted string
+ * representing userdata.
+ * Must be called holding netconsole_subsys.su_mutex
+ */
+static int calc_userdata_len(struct netconsole_target *nt)
{
- int complete_idx = 0, child_count = 0;
+ struct userdatum *udm_item;
+ struct config_item *item;
struct list_head *entry;
-
- /* Clear the current string in case the last userdatum was deleted */
- nt->userdata_length = 0;
- nt->userdata_complete[0] = 0;
+ int len = 0;
list_for_each(entry, &nt->userdata_group.cg_children) {
- struct userdatum *udm_item;
- struct config_item *item;
+ item = container_of(entry, struct config_item, ci_entry);
+ udm_item = to_userdatum(item);
+ /* Skip userdata with no value set */
+ if (udm_item->value[0]) {
+ len += snprintf(NULL, 0, " %s=%s\n", item->ci_name,
+ udm_item->value);
+ }
+ }
+ return len;
+}
- if (child_count >= MAX_USERDATA_ITEMS)
- break;
- child_count++;
+static int update_userdata(struct netconsole_target *nt)
+{
+ struct userdatum *udm_item;
+ struct config_item *item;
+ struct list_head *entry;
+ char *old_buf = NULL;
+ char *new_buf = NULL;
+ unsigned long flags;
+ int offset = 0;
+ int len;
+
+ /* Calculate required buffer size */
+ len = calc_userdata_len(nt);
+
+ if (WARN_ON_ONCE(len > MAX_EXTRADATA_ENTRY_LEN * MAX_USERDATA_ITEMS))
+ return -ENOSPC;
+ /* Allocate new buffer */
+ if (len) {
+ new_buf = kmalloc(len + 1, GFP_KERNEL);
+ if (!new_buf)
+ return -ENOMEM;
+ }
+
+ /* Write userdata to new buffer */
+ list_for_each(entry, &nt->userdata_group.cg_children) {
item = container_of(entry, struct config_item, ci_entry);
udm_item = to_userdatum(item);
-
/* Skip userdata with no value set */
- if (strnlen(udm_item->value, MAX_USERDATA_VALUE_LENGTH) == 0)
- continue;
-
- /* This doesn't overflow userdata_complete since it will write
- * one entry length (1/MAX_USERDATA_ITEMS long), entry count is
- * checked to not exceed MAX items with child_count above
- */
- complete_idx += scnprintf(&nt->userdata_complete[complete_idx],
- MAX_USERDATA_ENTRY_LENGTH, " %s=%s\n",
- item->ci_name, udm_item->value);
+ if (udm_item->value[0]) {
+ offset += scnprintf(&new_buf[offset], len + 1 - offset,
+ " %s=%s\n", item->ci_name,
+ udm_item->value);
+ }
}
- nt->userdata_length = strnlen(nt->userdata_complete,
- sizeof(nt->userdata_complete));
+
+ WARN_ON_ONCE(offset != len);
+
+ /* Switch to new buffer and free old buffer */
+ spin_lock_irqsave(&target_list_lock, flags);
+ old_buf = nt->userdata;
+ nt->userdata = new_buf;
+ nt->userdata_length = offset;
+ spin_unlock_irqrestore(&target_list_lock, flags);
+
+ kfree(old_buf);
+
+ return 0;
}
static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
@@ -736,9 +956,10 @@ static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
struct userdata *ud;
ssize_t ret;
- if (count > MAX_USERDATA_VALUE_LENGTH)
+ if (count > MAX_EXTRADATA_VALUE_LEN)
return -EMSGSIZE;
+ mutex_lock(&netconsole_subsys.su_mutex);
mutex_lock(&dynamic_netconsole_mutex);
ret = strscpy(udm->value, buf, sizeof(udm->value));
@@ -748,14 +969,153 @@ static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
ud = to_userdata(item->ci_parent);
nt = userdata_to_target(ud);
- update_userdata(nt);
+ ret = update_userdata(nt);
+ if (ret < 0)
+ goto out_unlock;
ret = count;
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
+ mutex_unlock(&netconsole_subsys.su_mutex);
+ return ret;
+}
+
+/* disable_sysdata_feature - Disable sysdata feature and clean sysdata
+ * @nt: target that is disabling the feature
+ * @feature: feature being disabled
+ */
+static void disable_sysdata_feature(struct netconsole_target *nt,
+ enum sysdata_feature feature)
+{
+ nt->sysdata_fields &= ~feature;
+ nt->sysdata[0] = 0;
+}
+
+static ssize_t sysdata_msgid_enabled_store(struct config_item *item,
+ const char *buf, size_t count)
+{
+ struct netconsole_target *nt = to_target(item->ci_parent);
+ bool msgid_enabled, curr;
+ ssize_t ret;
+
+ ret = kstrtobool(buf, &msgid_enabled);
+ if (ret)
+ return ret;
+
+ mutex_lock(&netconsole_subsys.su_mutex);
+ mutex_lock(&dynamic_netconsole_mutex);
+ curr = !!(nt->sysdata_fields & SYSDATA_MSGID);
+ if (msgid_enabled == curr)
+ goto unlock_ok;
+
+ if (msgid_enabled)
+ nt->sysdata_fields |= SYSDATA_MSGID;
+ else
+ disable_sysdata_feature(nt, SYSDATA_MSGID);
+
+unlock_ok:
+ ret = strnlen(buf, count);
+ mutex_unlock(&dynamic_netconsole_mutex);
+ mutex_unlock(&netconsole_subsys.su_mutex);
+ return ret;
+}
+
+static ssize_t sysdata_release_enabled_store(struct config_item *item,
+ const char *buf, size_t count)
+{
+ struct netconsole_target *nt = to_target(item->ci_parent);
+ bool release_enabled, curr;
+ ssize_t ret;
+
+ ret = kstrtobool(buf, &release_enabled);
+ if (ret)
+ return ret;
+
+ mutex_lock(&netconsole_subsys.su_mutex);
+ mutex_lock(&dynamic_netconsole_mutex);
+ curr = !!(nt->sysdata_fields & SYSDATA_RELEASE);
+ if (release_enabled == curr)
+ goto unlock_ok;
+
+ if (release_enabled)
+ nt->sysdata_fields |= SYSDATA_RELEASE;
+ else
+ disable_sysdata_feature(nt, SYSDATA_RELEASE);
+
+unlock_ok:
+ ret = strnlen(buf, count);
+ mutex_unlock(&dynamic_netconsole_mutex);
+ mutex_unlock(&netconsole_subsys.su_mutex);
+ return ret;
+}
+
+static ssize_t sysdata_taskname_enabled_store(struct config_item *item,
+ const char *buf, size_t count)
+{
+ struct netconsole_target *nt = to_target(item->ci_parent);
+ bool taskname_enabled, curr;
+ ssize_t ret;
+
+ ret = kstrtobool(buf, &taskname_enabled);
+ if (ret)
+ return ret;
+
+ mutex_lock(&netconsole_subsys.su_mutex);
+ mutex_lock(&dynamic_netconsole_mutex);
+ curr = !!(nt->sysdata_fields & SYSDATA_TASKNAME);
+ if (taskname_enabled == curr)
+ goto unlock_ok;
+
+ if (taskname_enabled)
+ nt->sysdata_fields |= SYSDATA_TASKNAME;
+ else
+ disable_sysdata_feature(nt, SYSDATA_TASKNAME);
+
+unlock_ok:
+ ret = strnlen(buf, count);
+ mutex_unlock(&dynamic_netconsole_mutex);
+ mutex_unlock(&netconsole_subsys.su_mutex);
+ return ret;
+}
+
+/* configfs helper to sysdata cpu_nr feature */
+static ssize_t sysdata_cpu_nr_enabled_store(struct config_item *item,
+ const char *buf, size_t count)
+{
+ struct netconsole_target *nt = to_target(item->ci_parent);
+ bool cpu_nr_enabled, curr;
+ ssize_t ret;
+
+ ret = kstrtobool(buf, &cpu_nr_enabled);
+ if (ret)
+ return ret;
+
+ mutex_lock(&netconsole_subsys.su_mutex);
+ mutex_lock(&dynamic_netconsole_mutex);
+ curr = !!(nt->sysdata_fields & SYSDATA_CPU_NR);
+ if (cpu_nr_enabled == curr)
+ /* no change requested */
+ goto unlock_ok;
+
+ if (cpu_nr_enabled)
+ nt->sysdata_fields |= SYSDATA_CPU_NR;
+ else
+ /* This is special because sysdata might have remaining data
+ * from previous sysdata, and it needs to be cleaned.
+ */
+ disable_sysdata_feature(nt, SYSDATA_CPU_NR);
+
+unlock_ok:
+ ret = strnlen(buf, count);
+ mutex_unlock(&dynamic_netconsole_mutex);
+ mutex_unlock(&netconsole_subsys.su_mutex);
return ret;
}
CONFIGFS_ATTR(userdatum_, value);
+CONFIGFS_ATTR(sysdata_, cpu_nr_enabled);
+CONFIGFS_ATTR(sysdata_, taskname_enabled);
+CONFIGFS_ATTR(sysdata_, release_enabled);
+CONFIGFS_ATTR(sysdata_, msgid_enabled);
static struct configfs_attribute *userdatum_attrs[] = {
&userdatum_attr_value,
@@ -783,15 +1143,13 @@ static struct config_item *userdatum_make_item(struct config_group *group,
struct netconsole_target *nt;
struct userdatum *udm;
struct userdata *ud;
- size_t child_count;
- if (strlen(name) > MAX_USERDATA_NAME_LENGTH)
+ if (strlen(name) > MAX_EXTRADATA_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
ud = to_userdata(&group->cg_item);
nt = userdata_to_target(ud);
- child_count = list_count_nodes(&nt->userdata_group.cg_children);
- if (child_count >= MAX_USERDATA_ITEMS)
+ if (count_userdata_entries(nt) >= MAX_USERDATA_ITEMS)
return ERR_PTR(-ENOSPC);
udm = kzalloc(sizeof(*udm), GFP_KERNEL);
@@ -817,6 +1175,10 @@ static void userdatum_drop(struct config_group *group, struct config_item *item)
}
static struct configfs_attribute *userdata_attrs[] = {
+ &sysdata_attr_cpu_nr_enabled,
+ &sysdata_attr_taskname_enabled,
+ &sysdata_attr_release_enabled,
+ &sysdata_attr_msgid_enabled,
NULL,
};
@@ -842,6 +1204,7 @@ CONFIGFS_ATTR(, remote_ip);
CONFIGFS_ATTR_RO(, local_mac);
CONFIGFS_ATTR(, remote_mac);
CONFIGFS_ATTR(, release);
+CONFIGFS_ATTR_RO(, transmit_errors);
static struct configfs_attribute *netconsole_target_attrs[] = {
&attr_enabled,
@@ -854,6 +1217,7 @@ static struct configfs_attribute *netconsole_target_attrs[] = {
&attr_remote_ip,
&attr_local_mac,
&attr_remote_mac,
+ &attr_transmit_errors,
NULL,
};
@@ -863,7 +1227,10 @@ static struct configfs_attribute *netconsole_target_attrs[] = {
static void netconsole_target_release(struct config_item *item)
{
- kfree(to_target(item));
+ struct netconsole_target *nt = to_target(item);
+
+ kfree(nt->userdata);
+ kfree(nt);
}
static struct configfs_item_operations netconsole_target_item_ops = {
@@ -990,6 +1357,61 @@ static void populate_configfs_item(struct netconsole_target *nt,
init_target_config_group(nt, target_name);
}
+static int sysdata_append_cpu_nr(struct netconsole_target *nt, int offset)
+{
+ return scnprintf(&nt->sysdata[offset],
+ MAX_EXTRADATA_ENTRY_LEN, " cpu=%u\n",
+ raw_smp_processor_id());
+}
+
+static int sysdata_append_taskname(struct netconsole_target *nt, int offset)
+{
+ return scnprintf(&nt->sysdata[offset],
+ MAX_EXTRADATA_ENTRY_LEN, " taskname=%s\n",
+ current->comm);
+}
+
+static int sysdata_append_release(struct netconsole_target *nt, int offset)
+{
+ return scnprintf(&nt->sysdata[offset],
+ MAX_EXTRADATA_ENTRY_LEN, " release=%s\n",
+ init_utsname()->release);
+}
+
+static int sysdata_append_msgid(struct netconsole_target *nt, int offset)
+{
+ wrapping_assign_add(nt->msgcounter, 1);
+ return scnprintf(&nt->sysdata[offset],
+ MAX_EXTRADATA_ENTRY_LEN, " msgid=%u\n",
+ nt->msgcounter);
+}
+
+/*
+ * prepare_sysdata - append sysdata in runtime
+ * @nt: target to send message to
+ */
+static int prepare_sysdata(struct netconsole_target *nt)
+{
+ int sysdata_len = 0;
+
+ if (!nt->sysdata_fields)
+ goto out;
+
+ if (nt->sysdata_fields & SYSDATA_CPU_NR)
+ sysdata_len += sysdata_append_cpu_nr(nt, sysdata_len);
+ if (nt->sysdata_fields & SYSDATA_TASKNAME)
+ sysdata_len += sysdata_append_taskname(nt, sysdata_len);
+ if (nt->sysdata_fields & SYSDATA_RELEASE)
+ sysdata_len += sysdata_append_release(nt, sysdata_len);
+ if (nt->sysdata_fields & SYSDATA_MSGID)
+ sysdata_len += sysdata_append_msgid(nt, sysdata_len);
+
+ WARN_ON_ONCE(sysdata_len >
+ MAX_EXTRADATA_ENTRY_LEN * MAX_SYSDATA_ITEMS);
+
+out:
+ return sysdata_len;
+}
#endif /* CONFIG_NETCONSOLE_DYNAMIC */
/* Handle network interface device notifications */
@@ -1058,34 +1480,67 @@ static struct notifier_block netconsole_netdev_notifier = {
.notifier_call = netconsole_netdev_event,
};
+/**
+ * send_udp - Wrapper for netpoll_send_udp that counts errors
+ * @nt: target to send message to
+ * @msg: message to send
+ * @len: length of message
+ *
+ * Calls netpoll_send_udp and classifies the return value. If an error
+ * occurred it increments statistics in nt->stats accordingly.
+ * Only calls netpoll_send_udp if CONFIG_NETCONSOLE_DYNAMIC is disabled.
+ */
+static void send_udp(struct netconsole_target *nt, const char *msg, int len)
+{
+ int result = netpoll_send_udp(&nt->np, msg, len);
+
+ if (IS_ENABLED(CONFIG_NETCONSOLE_DYNAMIC)) {
+ if (result == NET_XMIT_DROP) {
+ u64_stats_update_begin(&nt->stats.syncp);
+ u64_stats_inc(&nt->stats.xmit_drop_count);
+ u64_stats_update_end(&nt->stats.syncp);
+ } else if (result == -ENOMEM) {
+ u64_stats_update_begin(&nt->stats.syncp);
+ u64_stats_inc(&nt->stats.enomem_count);
+ u64_stats_update_end(&nt->stats.syncp);
+ }
+ }
+}
+
static void send_msg_no_fragmentation(struct netconsole_target *nt,
const char *msg,
int msg_len,
int release_len)
{
- static char buf[MAX_PRINT_CHUNK]; /* protected by target_list_lock */
const char *userdata = NULL;
+ const char *sysdata = NULL;
const char *release;
#ifdef CONFIG_NETCONSOLE_DYNAMIC
- userdata = nt->userdata_complete;
+ userdata = nt->userdata;
+ sysdata = nt->sysdata;
#endif
if (release_len) {
release = init_utsname()->release;
- scnprintf(buf, MAX_PRINT_CHUNK, "%s,%s", release, msg);
+ scnprintf(nt->buf, MAX_PRINT_CHUNK, "%s,%s", release, msg);
msg_len += release_len;
} else {
- memcpy(buf, msg, msg_len);
+ memcpy(nt->buf, msg, msg_len);
}
if (userdata)
- msg_len += scnprintf(&buf[msg_len],
- MAX_PRINT_CHUNK - msg_len,
- "%s", userdata);
+ msg_len += scnprintf(&nt->buf[msg_len],
+ MAX_PRINT_CHUNK - msg_len, "%s",
+ userdata);
+
+ if (sysdata)
+ msg_len += scnprintf(&nt->buf[msg_len],
+ MAX_PRINT_CHUNK - msg_len, "%s",
+ sysdata);
- netpoll_send_udp(&nt->np, buf, msg_len);
+ send_udp(nt, nt->buf, msg_len);
}
static void append_release(char *buf)
@@ -1096,99 +1551,103 @@ static void append_release(char *buf)
scnprintf(buf, MAX_PRINT_CHUNK, "%s,", release);
}
-static void send_fragmented_body(struct netconsole_target *nt, char *buf,
- const char *msgbody, int header_len,
- int msgbody_len)
+static void send_fragmented_body(struct netconsole_target *nt,
+ const char *msgbody_ptr, int header_len,
+ int msgbody_len, int sysdata_len)
{
- const char *userdata = NULL;
- int body_len, offset = 0;
+ const char *userdata_ptr = NULL;
+ const char *sysdata_ptr = NULL;
+ int data_len, data_sent = 0;
+ int userdata_offset = 0;
+ int sysdata_offset = 0;
+ int msgbody_offset = 0;
int userdata_len = 0;
#ifdef CONFIG_NETCONSOLE_DYNAMIC
- userdata = nt->userdata_complete;
+ userdata_ptr = nt->userdata;
+ sysdata_ptr = nt->sysdata;
userdata_len = nt->userdata_length;
#endif
+ if (WARN_ON_ONCE(!userdata_ptr && userdata_len != 0))
+ return;
+
+ if (WARN_ON_ONCE(!sysdata_ptr && sysdata_len != 0))
+ return;
- /* body_len represents the number of bytes that will be sent. This is
+ /* data_len represents the number of bytes that will be sent. This is
* bigger than MAX_PRINT_CHUNK, thus, it will be split in multiple
* packets
*/
- body_len = msgbody_len + userdata_len;
+ data_len = msgbody_len + userdata_len + sysdata_len;
/* In each iteration of the while loop below, we send a packet
- * containing the header and a portion of the body. The body is
- * composed of two parts: msgbody and userdata. We keep track of how
- * many bytes have been sent so far using the offset variable, which
- * ranges from 0 to the total length of the body.
+ * containing the header and a portion of the data. The data is
+ * composed of three parts: msgbody, userdata, and sysdata.
+ * We keep track of how many bytes have been sent from each part using
+ * the *_offset variables.
+ * We keep track of how many bytes have been sent overall using the
+ * data_sent variable, which ranges from 0 to the total bytes to be
+ * sent.
*/
- while (offset < body_len) {
- int this_header = header_len;
- bool msgbody_written = false;
- int this_offset = 0;
+ while (data_sent < data_len) {
+ int userdata_left = userdata_len - userdata_offset;
+ int sysdata_left = sysdata_len - sysdata_offset;
+ int msgbody_left = msgbody_len - msgbody_offset;
+ int buf_offset = 0;
int this_chunk = 0;
- this_header += scnprintf(buf + this_header,
- MAX_PRINT_CHUNK - this_header,
- ",ncfrag=%d/%d;", offset,
- body_len);
-
- /* Not all msgbody data has been written yet */
- if (offset < msgbody_len) {
- this_chunk = min(msgbody_len - offset,
- MAX_PRINT_CHUNK - this_header);
- if (WARN_ON_ONCE(this_chunk <= 0))
- return;
- memcpy(buf + this_header, msgbody + offset, this_chunk);
- this_offset += this_chunk;
+ /* header is already populated in nt->buf, just append to it */
+ buf_offset = header_len;
+
+ buf_offset += scnprintf(nt->buf + buf_offset,
+ MAX_PRINT_CHUNK - buf_offset,
+ ",ncfrag=%d/%d;", data_sent,
+ data_len);
+
+ /* append msgbody first */
+ this_chunk = min(msgbody_left, MAX_PRINT_CHUNK - buf_offset);
+ memcpy(nt->buf + buf_offset, msgbody_ptr + msgbody_offset,
+ this_chunk);
+ msgbody_offset += this_chunk;
+ buf_offset += this_chunk;
+ data_sent += this_chunk;
+
+ /* after msgbody, append userdata */
+ if (userdata_ptr && userdata_left) {
+ this_chunk = min(userdata_left,
+ MAX_PRINT_CHUNK - buf_offset);
+ memcpy(nt->buf + buf_offset,
+ userdata_ptr + userdata_offset, this_chunk);
+ userdata_offset += this_chunk;
+ buf_offset += this_chunk;
+ data_sent += this_chunk;
}
- /* msgbody was finally written, either in the previous
- * messages and/or in the current buf. Time to write
- * the userdata.
- */
- msgbody_written |= offset + this_offset >= msgbody_len;
-
- /* Msg body is fully written and there is pending userdata to
- * write, append userdata in this chunk
- */
- if (msgbody_written && offset + this_offset < body_len) {
- /* Track how much user data was already sent. First
- * time here, sent_userdata is zero
- */
- int sent_userdata = (offset + this_offset) - msgbody_len;
- /* offset of bytes used in current buf */
- int preceding_bytes = this_chunk + this_header;
-
- if (WARN_ON_ONCE(sent_userdata < 0))
- return;
-
- this_chunk = min(userdata_len - sent_userdata,
- MAX_PRINT_CHUNK - preceding_bytes);
- if (WARN_ON_ONCE(this_chunk < 0))
- /* this_chunk could be zero if all the previous
- * message used all the buffer. This is not a
- * problem, userdata will be sent in the next
- * iteration
- */
- return;
-
- memcpy(buf + this_header + this_offset,
- userdata + sent_userdata,
- this_chunk);
- this_offset += this_chunk;
+ /* after userdata, append sysdata */
+ if (sysdata_ptr && sysdata_left) {
+ this_chunk = min(sysdata_left,
+ MAX_PRINT_CHUNK - buf_offset);
+ memcpy(nt->buf + buf_offset,
+ sysdata_ptr + sysdata_offset, this_chunk);
+ sysdata_offset += this_chunk;
+ buf_offset += this_chunk;
+ data_sent += this_chunk;
}
- netpoll_send_udp(&nt->np, buf, this_header + this_offset);
- offset += this_offset;
+ /* if all is good, send the packet out */
+ if (WARN_ON_ONCE(data_sent > data_len))
+ return;
+
+ send_udp(nt, nt->buf, buf_offset);
}
}
static void send_msg_fragmented(struct netconsole_target *nt,
const char *msg,
int msg_len,
- int release_len)
+ int release_len,
+ int sysdata_len)
{
- static char buf[MAX_PRINT_CHUNK]; /* protected by target_list_lock */
int header_len, msgbody_len;
const char *msgbody;
@@ -1206,16 +1665,17 @@ static void send_msg_fragmented(struct netconsole_target *nt,
* "ncfrag=<byte-offset>/<total-bytes>"
*/
if (release_len)
- append_release(buf);
+ append_release(nt->buf);
/* Copy the header into the buffer */
- memcpy(buf + release_len, msg, header_len);
+ memcpy(nt->buf + release_len, msg, header_len);
header_len += release_len;
/* for now on, the header will be persisted, and the msgbody
* will be replaced
*/
- send_fragmented_body(nt, buf, msgbody, header_len, msgbody_len);
+ send_fragmented_body(nt, msgbody, header_len, msgbody_len,
+ sysdata_len);
}
/**
@@ -1233,18 +1693,20 @@ static void send_ext_msg_udp(struct netconsole_target *nt, const char *msg,
{
int userdata_len = 0;
int release_len = 0;
+ int sysdata_len = 0;
#ifdef CONFIG_NETCONSOLE_DYNAMIC
+ sysdata_len = prepare_sysdata(nt);
userdata_len = nt->userdata_length;
#endif
-
if (nt->release)
release_len = strlen(init_utsname()->release) + 1;
- if (msg_len + release_len + userdata_len <= MAX_PRINT_CHUNK)
+ if (msg_len + release_len + sysdata_len + userdata_len <= MAX_PRINT_CHUNK)
return send_msg_no_fragmentation(nt, msg, msg_len, release_len);
- return send_msg_fragmented(nt, msg, msg_len, release_len);
+ return send_msg_fragmented(nt, msg, msg_len, release_len,
+ sysdata_len);
}
static void write_ext_msg(struct console *con, const char *msg,
@@ -1288,7 +1750,7 @@ static void write_msg(struct console *con, const char *msg, unsigned int len)
tmp = msg;
for (left = len; left;) {
frag = min(left, MAX_PRINT_CHUNK);
- netpoll_send_udp(&nt->np, tmp, frag);
+ send_udp(nt, tmp, frag);
tmp += frag;
left -= frag;
}
@@ -1297,6 +1759,100 @@ static void write_msg(struct console *con, const char *msg, unsigned int len)
spin_unlock_irqrestore(&target_list_lock, flags);
}
+static int netconsole_parser_cmdline(struct netpoll *np, char *opt)
+{
+ bool ipversion_set = false;
+ char *cur = opt;
+ char *delim;
+ int ipv6;
+
+ if (*cur != '@') {
+ delim = strchr(cur, '@');
+ if (!delim)
+ goto parse_failed;
+ *delim = 0;
+ if (kstrtou16(cur, 10, &np->local_port))
+ goto parse_failed;
+ cur = delim;
+ }
+ cur++;
+
+ if (*cur != '/') {
+ ipversion_set = true;
+ delim = strchr(cur, '/');
+ if (!delim)
+ goto parse_failed;
+ *delim = 0;
+ ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
+ if (ipv6 < 0)
+ goto parse_failed;
+ else
+ np->ipv6 = (bool)ipv6;
+ cur = delim;
+ }
+ cur++;
+
+ if (*cur != ',') {
+ /* parse out dev_name or dev_mac */
+ delim = strchr(cur, ',');
+ if (!delim)
+ goto parse_failed;
+ *delim = 0;
+
+ np->dev_name[0] = '\0';
+ eth_broadcast_addr(np->dev_mac);
+ if (!strchr(cur, ':'))
+ strscpy(np->dev_name, cur, sizeof(np->dev_name));
+ else if (!mac_pton(cur, np->dev_mac))
+ goto parse_failed;
+
+ cur = delim;
+ }
+ cur++;
+
+ if (*cur != '@') {
+ /* dst port */
+ delim = strchr(cur, '@');
+ if (!delim)
+ goto parse_failed;
+ *delim = 0;
+ if (*cur == ' ' || *cur == '\t')
+ np_info(np, "warning: whitespace is not allowed\n");
+ if (kstrtou16(cur, 10, &np->remote_port))
+ goto parse_failed;
+ cur = delim;
+ }
+ cur++;
+
+ /* dst ip */
+ delim = strchr(cur, '/');
+ if (!delim)
+ goto parse_failed;
+ *delim = 0;
+ ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
+ if (ipv6 < 0)
+ goto parse_failed;
+ else if (ipversion_set && np->ipv6 != (bool)ipv6)
+ goto parse_failed;
+ else
+ np->ipv6 = (bool)ipv6;
+ cur = delim + 1;
+
+ if (*cur != 0) {
+ /* MAC address */
+ if (!mac_pton(cur, np->remote_mac))
+ goto parse_failed;
+ }
+
+ netconsole_print_banner(np);
+
+ return 0;
+
+ parse_failed:
+ np_info(np, "couldn't parse config at '%s'!\n", cur);
+ return -1;
+}
+
/* Allocate new target (from boot/module param) and setup netpoll for it */
static struct netconsole_target *alloc_param_target(char *target_config,
int cmdline_count)
@@ -1326,7 +1882,7 @@ static struct netconsole_target *alloc_param_target(char *target_config,
}
/* Parse parameters and setup netpoll */
- err = netpoll_parse_options(&nt->np, target_config);
+ err = netconsole_parser_cmdline(&nt->np, target_config);
if (err)
goto fail;
@@ -1355,6 +1911,9 @@ fail:
static void free_param_target(struct netconsole_target *nt)
{
netpoll_cleanup(&nt->np);
+#ifdef CONFIG_NETCONSOLE_DYNAMIC
+ kfree(nt->userdata);
+#endif
kfree(nt);
}
@@ -1374,8 +1933,8 @@ static int __init init_netconsole(void)
{
int err;
struct netconsole_target *nt, *tmp;
+ u32 console_type_needed = 0;
unsigned int count = 0;
- bool extended = false;
unsigned long flags;
char *target_config;
char *input = config;
@@ -1391,9 +1950,10 @@ static int __init init_netconsole(void)
}
/* Dump existing printks when we register */
if (nt->extended) {
- extended = true;
+ console_type_needed |= CONS_EXTENDED;
netconsole_ext.flags |= CON_PRINTBUFFER;
} else {
+ console_type_needed |= CONS_BASIC;
netconsole.flags |= CON_PRINTBUFFER;
}
@@ -1412,9 +1972,10 @@ static int __init init_netconsole(void)
if (err)
goto undonotifier;
- if (extended)
+ if (console_type_needed & CONS_EXTENDED)
register_console(&netconsole_ext);
- register_console(&netconsole);
+ if (console_type_needed & CONS_BASIC)
+ register_console(&netconsole);
pr_info("network logging started\n");
return err;
@@ -1444,7 +2005,8 @@ static void __exit cleanup_netconsole(void)
if (console_is_registered(&netconsole_ext))
unregister_console(&netconsole_ext);
- unregister_console(&netconsole);
+ if (console_is_registered(&netconsole))
+ unregister_console(&netconsole);
dynamic_netconsole_exit();
unregister_netdevice_notifier(&netconsole_netdev_notifier);
diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile
index f8de93bc5f5b..14a553e000ec 100644
--- a/drivers/net/netdevsim/Makefile
+++ b/drivers/net/netdevsim/Makefile
@@ -18,6 +18,10 @@ ifneq ($(CONFIG_PSAMPLE),)
netdevsim-objs += psample.o
endif
+ifneq ($(CONFIG_INET_PSP),)
+netdevsim-objs += psp.o
+endif
+
ifneq ($(CONFIG_MACSEC),)
netdevsim-objs += macsec.o
endif
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index 608953d4f98d..49537d3c4120 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -296,7 +296,8 @@ static int nsim_setup_prog_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
NSIM_EA(bpf->extack, "attempt to load offloaded prog to drv");
return -EINVAL;
}
- if (ns->netdev->mtu > NSIM_XDP_MAX_MTU) {
+ if (bpf->prog && !bpf->prog->aux->xdp_has_frags &&
+ ns->netdev->mtu > NSIM_XDP_MAX_MTU) {
NSIM_EA(bpf->extack, "MTU too large w/ XDP enabled");
return -EINVAL;
}
diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c
index 64c0cdd31bf8..70e8c38ddad6 100644
--- a/drivers/net/netdevsim/bus.c
+++ b/drivers/net/netdevsim/bus.c
@@ -66,17 +66,35 @@ new_port_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+ u8 eth_addr[ETH_ALEN] = {};
unsigned int port_index;
+ bool addr_set = false;
int ret;
/* Prevent to use nsim_bus_dev before initialization. */
if (!smp_load_acquire(&nsim_bus_dev->init))
return -EBUSY;
- ret = kstrtouint(buf, 0, &port_index);
- if (ret)
- return ret;
- ret = nsim_drv_port_add(nsim_bus_dev, NSIM_DEV_PORT_TYPE_PF, port_index);
+ ret = sscanf(buf, "%u %hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &port_index,
+ &eth_addr[0], &eth_addr[1], &eth_addr[2], &eth_addr[3],
+ &eth_addr[4], &eth_addr[5]);
+ switch (ret) {
+ case 7:
+ if (!is_valid_ether_addr(eth_addr)) {
+ pr_err("The supplied perm_addr is not a valid MAC address\n");
+ return -EINVAL;
+ }
+ addr_set = true;
+ fallthrough;
+ case 1:
+ break;
+ default:
+ pr_err("Format for adding new port is \"id [perm_addr]\" (uint MAC).\n");
+ return -EINVAL;
+ }
+
+ ret = nsim_drv_port_add(nsim_bus_dev, NSIM_DEV_PORT_TYPE_PF, port_index,
+ addr_set ? eth_addr : NULL);
return ret ? ret : count;
}
@@ -366,6 +384,9 @@ static ssize_t unlink_device_store(const struct bus_type *bus, const char *buf,
err = 0;
RCU_INIT_POINTER(nsim->peer, NULL);
RCU_INIT_POINTER(peer->peer, NULL);
+ synchronize_net();
+ netif_tx_wake_all_queues(dev);
+ netif_tx_wake_all_queues(peer->netdev);
out_put_netns:
put_net(ns);
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 3e0b61202f0c..2683a989873e 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -314,10 +314,14 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
&nsim_dev->fw_update_status);
debugfs_create_u32("fw_update_overwrite_mask", 0600, nsim_dev->ddir,
&nsim_dev->fw_update_overwrite_mask);
+ debugfs_create_u32("fw_update_flash_chunk_time_ms", 0600, nsim_dev->ddir,
+ &nsim_dev->fw_update_flash_chunk_time_ms);
debugfs_create_u32("max_macs", 0600, nsim_dev->ddir,
&nsim_dev->max_macs);
debugfs_create_bool("test1", 0600, nsim_dev->ddir,
&nsim_dev->test1);
+ debugfs_create_u32("test2", 0600, nsim_dev->ddir,
+ &nsim_dev->test2);
nsim_dev->take_snapshot = debugfs_create_file("take_snapshot",
0200,
nsim_dev->ddir,
@@ -388,6 +392,17 @@ static const struct file_operations nsim_dev_rate_parent_fops = {
.owner = THIS_MODULE,
};
+static void nsim_dev_tc_bw_debugfs_init(struct dentry *ddir, u32 *tc_bw)
+{
+ int i;
+
+ for (i = 0; i < DEVLINK_RATE_TCS_MAX; i++) {
+ char name[16];
+
+ snprintf(name, sizeof(name), "tc%d_bw", i);
+ debugfs_create_u32(name, 0400, ddir, &tc_bw[i]);
+ }
+}
static int nsim_dev_port_debugfs_init(struct nsim_dev *nsim_dev,
struct nsim_dev_port *nsim_dev_port)
{
@@ -415,6 +430,8 @@ static int nsim_dev_port_debugfs_init(struct nsim_dev *nsim_dev,
nsim_dev_port->ddir,
&nsim_dev_port->parent_name,
&nsim_dev_rate_parent_fops);
+ nsim_dev_tc_bw_debugfs_init(nsim_dev_port->ddir,
+ nsim_dev_port->tc_bw);
}
debugfs_create_symlink("dev", nsim_dev_port->ddir, dev_link_name);
@@ -506,8 +523,53 @@ err_out:
enum nsim_devlink_param_id {
NSIM_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
NSIM_DEVLINK_PARAM_ID_TEST1,
+ NSIM_DEVLINK_PARAM_ID_TEST2,
};
+static int
+nsim_devlink_param_test2_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+
+ ctx->val.vu32 = nsim_dev->test2;
+ return 0;
+}
+
+static int
+nsim_devlink_param_test2_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+
+ nsim_dev->test2 = ctx->val.vu32;
+ return 0;
+}
+
+#define NSIM_DEV_TEST2_DEFAULT 1234
+
+static int
+nsim_devlink_param_test2_get_default(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ ctx->val.vu32 = NSIM_DEV_TEST2_DEFAULT;
+ return 0;
+}
+
+static int
+nsim_devlink_param_test2_reset_default(struct devlink *devlink, u32 id,
+ enum devlink_param_cmode cmode,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+
+ nsim_dev->test2 = NSIM_DEV_TEST2_DEFAULT;
+ return 0;
+}
+
static const struct devlink_param nsim_devlink_params[] = {
DEVLINK_PARAM_GENERIC(MAX_MACS,
BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
@@ -516,6 +578,14 @@ static const struct devlink_param nsim_devlink_params[] = {
"test1", DEVLINK_PARAM_TYPE_BOOL,
BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL, NULL),
+ DEVLINK_PARAM_DRIVER_WITH_DEFAULTS(NSIM_DEVLINK_PARAM_ID_TEST2,
+ "test2", DEVLINK_PARAM_TYPE_U32,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ nsim_devlink_param_test2_get,
+ nsim_devlink_param_test2_set,
+ NULL,
+ nsim_devlink_param_test2_get_default,
+ nsim_devlink_param_test2_reset_default),
};
static void nsim_devlink_set_params_init_values(struct nsim_dev *nsim_dev,
@@ -576,7 +646,7 @@ static void nsim_dev_dummy_region_exit(struct nsim_dev *nsim_dev)
static int
__nsim_dev_port_add(struct nsim_dev *nsim_dev, enum nsim_dev_port_type type,
- unsigned int port_index);
+ unsigned int port_index, u8 perm_addr[ETH_ALEN]);
static void __nsim_dev_port_del(struct nsim_dev_port *nsim_dev_port);
static int nsim_esw_legacy_enable(struct nsim_dev *nsim_dev,
@@ -600,7 +670,7 @@ static int nsim_esw_switchdev_enable(struct nsim_dev *nsim_dev,
int i, err;
for (i = 0; i < nsim_dev_get_vfs(nsim_dev); i++) {
- err = __nsim_dev_port_add(nsim_dev, NSIM_DEV_PORT_TYPE_VF, i);
+ err = __nsim_dev_port_add(nsim_dev, NSIM_DEV_PORT_TYPE_VF, i, NULL);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to initialize VFs' netdevsim ports");
pr_err("Failed to initialize VF id=%d. %d.\n", i, err);
@@ -836,7 +906,7 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
nsim_dev = nsim_trap_data->nsim_dev;
if (!devl_trylock(priv_to_devlink(nsim_dev))) {
- queue_delayed_work(system_unbound_wq,
+ queue_delayed_work(system_dfl_wq,
&nsim_dev->trap_data->trap_report_dw, 1);
return;
}
@@ -852,7 +922,7 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
cond_resched();
}
devl_unlock(priv_to_devlink(nsim_dev));
- queue_delayed_work(system_unbound_wq,
+ queue_delayed_work(system_dfl_wq,
&nsim_dev->trap_data->trap_report_dw,
msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
}
@@ -909,7 +979,7 @@ static int nsim_dev_traps_init(struct devlink *devlink)
INIT_DELAYED_WORK(&nsim_dev->trap_data->trap_report_dw,
nsim_dev_trap_report_work);
- queue_delayed_work(system_unbound_wq,
+ queue_delayed_work(system_dfl_wq,
&nsim_dev->trap_data->trap_report_dw,
msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
@@ -1002,9 +1072,9 @@ static int nsim_dev_info_get(struct devlink *devlink,
DEVLINK_INFO_VERSION_TYPE_COMPONENT);
}
-#define NSIM_DEV_FLASH_SIZE 500000
+#define NSIM_DEV_FLASH_SIZE 50000
#define NSIM_DEV_FLASH_CHUNK_SIZE 1000
-#define NSIM_DEV_FLASH_CHUNK_TIME_MS 10
+#define NSIM_DEV_FLASH_CHUNK_TIME_MS_DEFAULT 100
static int nsim_dev_flash_update(struct devlink *devlink,
struct devlink_flash_update_params *params,
@@ -1028,7 +1098,7 @@ static int nsim_dev_flash_update(struct devlink *devlink,
params->component,
i * NSIM_DEV_FLASH_CHUNK_SIZE,
NSIM_DEV_FLASH_SIZE);
- msleep(NSIM_DEV_FLASH_CHUNK_TIME_MS);
+ msleep(nsim_dev->fw_update_flash_chunk_time_ms ?: 1);
}
if (nsim_dev->fw_update_status) {
@@ -1172,6 +1242,19 @@ static int nsim_rate_bytes_to_units(char *name, u64 *rate, struct netlink_ext_ac
return 0;
}
+static int nsim_leaf_tc_bw_set(struct devlink_rate *devlink_rate,
+ void *priv, u32 *tc_bw,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev_port *nsim_dev_port = priv;
+ int i;
+
+ for (i = 0; i < DEVLINK_RATE_TCS_MAX; i++)
+ nsim_dev_port->tc_bw[i] = tc_bw[i];
+
+ return 0;
+}
+
static int nsim_leaf_tx_share_set(struct devlink_rate *devlink_rate, void *priv,
u64 tx_share, struct netlink_ext_ack *extack)
{
@@ -1210,8 +1293,21 @@ struct nsim_rate_node {
char *parent_name;
u16 tx_share;
u16 tx_max;
+ u32 tc_bw[DEVLINK_RATE_TCS_MAX];
};
+static int nsim_node_tc_bw_set(struct devlink_rate *devlink_rate, void *priv,
+ u32 *tc_bw, struct netlink_ext_ack *extack)
+{
+ struct nsim_rate_node *nsim_node = priv;
+ int i;
+
+ for (i = 0; i < DEVLINK_RATE_TCS_MAX; i++)
+ nsim_node->tc_bw[i] = tc_bw[i];
+
+ return 0;
+}
+
static int nsim_node_tx_share_set(struct devlink_rate *devlink_rate, void *priv,
u64 tx_share, struct netlink_ext_ack *extack)
{
@@ -1264,6 +1360,8 @@ static int nsim_rate_node_new(struct devlink_rate *node, void **priv,
&nsim_node->parent_name,
&nsim_dev_rate_parent_fops);
+ nsim_dev_tc_bw_debugfs_init(nsim_node->ddir, nsim_node->tc_bw);
+
*priv = nsim_node;
return 0;
}
@@ -1340,8 +1438,10 @@ static const struct devlink_ops nsim_dev_devlink_ops = {
.trap_policer_counter_get = nsim_dev_devlink_trap_policer_counter_get,
.rate_leaf_tx_share_set = nsim_leaf_tx_share_set,
.rate_leaf_tx_max_set = nsim_leaf_tx_max_set,
+ .rate_leaf_tc_bw_set = nsim_leaf_tc_bw_set,
.rate_node_tx_share_set = nsim_node_tx_share_set,
.rate_node_tx_max_set = nsim_node_tx_max_set,
+ .rate_node_tc_bw_set = nsim_node_tc_bw_set,
.rate_node_new = nsim_rate_node_new,
.rate_node_del = nsim_rate_node_del,
.rate_leaf_parent_set = nsim_rate_leaf_parent_set,
@@ -1353,7 +1453,7 @@ static const struct devlink_ops nsim_dev_devlink_ops = {
#define NSIM_DEV_TEST1_DEFAULT true
static int __nsim_dev_port_add(struct nsim_dev *nsim_dev, enum nsim_dev_port_type type,
- unsigned int port_index)
+ unsigned int port_index, u8 perm_addr[ETH_ALEN])
{
struct devlink_port_attrs attrs = {};
struct nsim_dev_port *nsim_dev_port;
@@ -1390,7 +1490,7 @@ static int __nsim_dev_port_add(struct nsim_dev *nsim_dev, enum nsim_dev_port_typ
if (err)
goto err_dl_port_unregister;
- nsim_dev_port->ns = nsim_create(nsim_dev, nsim_dev_port);
+ nsim_dev_port->ns = nsim_create(nsim_dev, nsim_dev_port, perm_addr);
if (IS_ERR(nsim_dev_port->ns)) {
err = PTR_ERR(nsim_dev_port->ns);
goto err_port_debugfs_exit;
@@ -1446,7 +1546,7 @@ static int nsim_dev_port_add_all(struct nsim_dev *nsim_dev,
int i, err;
for (i = 0; i < port_count; i++) {
- err = __nsim_dev_port_add(nsim_dev, NSIM_DEV_PORT_TYPE_PF, i);
+ err = __nsim_dev_port_add(nsim_dev, NSIM_DEV_PORT_TYPE_PF, i, NULL);
if (err)
goto err_port_del_all;
}
@@ -1542,8 +1642,10 @@ int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev)
INIT_LIST_HEAD(&nsim_dev->port_list);
nsim_dev->fw_update_status = true;
nsim_dev->fw_update_overwrite_mask = 0;
+ nsim_dev->fw_update_flash_chunk_time_ms = NSIM_DEV_FLASH_CHUNK_TIME_MS_DEFAULT;
nsim_dev->max_macs = NSIM_DEV_MAX_MACS_DEFAULT;
nsim_dev->test1 = NSIM_DEV_TEST1_DEFAULT;
+ nsim_dev->test2 = NSIM_DEV_TEST2_DEFAULT;
spin_lock_init(&nsim_dev->fa_cookie_lock);
dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev);
@@ -1702,7 +1804,7 @@ __nsim_dev_port_lookup(struct nsim_dev *nsim_dev, enum nsim_dev_port_type type,
}
int nsim_drv_port_add(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type type,
- unsigned int port_index)
+ unsigned int port_index, u8 perm_addr[ETH_ALEN])
{
struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
int err;
@@ -1711,7 +1813,7 @@ int nsim_drv_port_add(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type
if (__nsim_dev_port_lookup(nsim_dev, type, port_index))
err = -EEXIST;
else
- err = __nsim_dev_port_add(nsim_dev, type, port_index);
+ err = __nsim_dev_port_add(nsim_dev, type, port_index, perm_addr);
devl_unlock(priv_to_devlink(nsim_dev));
return err;
}
diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
index 5fe1eaef99b5..36a201533aae 100644
--- a/drivers/net/netdevsim/ethtool.c
+++ b/drivers/net/netdevsim/ethtool.c
@@ -2,8 +2,8 @@
// Copyright (c) 2020 Facebook
#include <linux/debugfs.h>
-#include <linux/ethtool.h>
#include <linux/random.h>
+#include <net/netdev_queues.h>
#include "netdevsim.h"
@@ -72,6 +72,10 @@ static void nsim_get_ringparam(struct net_device *dev,
struct netdevsim *ns = netdev_priv(dev);
memcpy(ring, &ns->ethtool.ring, sizeof(ns->ethtool.ring));
+ kernel_ring->hds_thresh_max = NSIM_HDS_THRESHOLD_MAX;
+
+ if (dev->cfg->hds_config == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN)
+ kernel_ring->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
}
static int nsim_set_ringparam(struct net_device *dev,
@@ -97,20 +101,39 @@ nsim_get_channels(struct net_device *dev, struct ethtool_channels *ch)
ch->combined_count = ns->ethtool.channels;
}
+static void
+nsim_wake_queues(struct net_device *dev)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ struct netdevsim *peer;
+
+ synchronize_net();
+ netif_tx_wake_all_queues(dev);
+
+ rcu_read_lock();
+ peer = rcu_dereference(ns->peer);
+ if (peer)
+ netif_tx_wake_all_queues(peer->netdev);
+ rcu_read_unlock();
+}
+
static int
nsim_set_channels(struct net_device *dev, struct ethtool_channels *ch)
{
struct netdevsim *ns = netdev_priv(dev);
int err;
- mutex_lock(&dev->lock);
err = netif_set_real_num_queues(dev, ch->combined_count,
ch->combined_count);
- mutex_unlock(&dev->lock);
if (err)
return err;
ns->ethtool.channels = ch->combined_count;
+
+ /* Only wake up queues if devices are linked */
+ if (rcu_access_pointer(ns->peer))
+ nsim_wake_queues(dev);
+
return 0;
}
@@ -142,11 +165,34 @@ nsim_set_fecparam(struct net_device *dev, struct ethtool_fecparam *fecparam)
return 0;
}
+static const struct ethtool_fec_hist_range netdevsim_fec_ranges[] = {
+ { 0, 0},
+ { 1, 3},
+ { 4, 7},
+ { 0, 0}
+};
+
static void
-nsim_get_fec_stats(struct net_device *dev, struct ethtool_fec_stats *fec_stats)
+nsim_get_fec_stats(struct net_device *dev, struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
+ struct ethtool_fec_hist_value *values = hist->values;
+
+ hist->ranges = netdevsim_fec_ranges;
+
fec_stats->corrected_blocks.total = 123;
fec_stats->uncorrectable_blocks.total = 4;
+
+ values[0].per_lane[0] = 125;
+ values[0].per_lane[1] = 120;
+ values[0].per_lane[2] = 100;
+ values[0].per_lane[3] = 100;
+ values[1].sum = 12;
+ values[2].sum = 2;
+ values[2].per_lane[0] = 2;
+ values[2].per_lane[1] = 0;
+ values[2].per_lane[2] = 0;
+ values[2].per_lane[3] = 0;
}
static int nsim_get_ts_info(struct net_device *dev,
@@ -161,6 +207,8 @@ static int nsim_get_ts_info(struct net_device *dev,
static const struct ethtool_ops nsim_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_ALL_PARAMS,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT |
+ ETHTOOL_RING_USE_HDS_THRS,
.get_pause_stats = nsim_get_pause_stats,
.get_pauseparam = nsim_get_pauseparam,
.set_pauseparam = nsim_set_pauseparam,
@@ -178,9 +226,11 @@ static const struct ethtool_ops nsim_ethtool_ops = {
static void nsim_ethtool_ring_init(struct netdevsim *ns)
{
+ ns->ethtool.ring.rx_pending = 512;
ns->ethtool.ring.rx_max_pending = 4096;
ns->ethtool.ring.rx_jumbo_max_pending = 4096;
ns->ethtool.ring.rx_mini_max_pending = 4096;
+ ns->ethtool.ring.tx_pending = 512;
ns->ethtool.ring.tx_max_pending = 4096;
}
diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c
index 70e8bdf34be9..3bd0e7a489c3 100644
--- a/drivers/net/netdevsim/health.c
+++ b/drivers/net/netdevsim/health.c
@@ -149,6 +149,8 @@ static ssize_t nsim_dev_health_break_write(struct file *file,
char *break_msg;
int err;
+ if (count == 0 || count > PAGE_SIZE)
+ return -EINVAL;
break_msg = memdup_user_nul(data, count);
if (IS_ERR(break_msg))
return PTR_ERR(break_msg);
@@ -181,14 +183,14 @@ int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink)
health->empty_reporter =
devl_health_reporter_create(devlink,
&nsim_dev_empty_reporter_ops,
- 0, health);
+ health);
if (IS_ERR(health->empty_reporter))
return PTR_ERR(health->empty_reporter);
health->dummy_reporter =
devl_health_reporter_create(devlink,
&nsim_dev_dummy_reporter_ops,
- 0, health);
+ health);
if (IS_ERR(health->dummy_reporter)) {
err = PTR_ERR(health->dummy_reporter);
goto err_empty_reporter_destroy;
diff --git a/drivers/net/netdevsim/hwstats.c b/drivers/net/netdevsim/hwstats.c
index 0e58aa7f0374..1abe48e35ca3 100644
--- a/drivers/net/netdevsim/hwstats.c
+++ b/drivers/net/netdevsim/hwstats.c
@@ -220,7 +220,6 @@ nsim_dev_hwstats_enable_ifindex(struct nsim_dev_hwstats *hwstats,
struct nsim_dev_hwstats_netdev *hwsdev;
struct nsim_dev *nsim_dev;
struct net_device *netdev;
- bool notify = false;
struct net *net;
int err = 0;
@@ -251,11 +250,9 @@ nsim_dev_hwstats_enable_ifindex(struct nsim_dev_hwstats *hwstats,
if (netdev_offload_xstats_enabled(netdev, type)) {
nsim_dev_hwsdev_enable(hwsdev, NULL);
- notify = true;
+ rtnl_offload_xstats_notify(netdev);
}
- if (notify)
- rtnl_offload_xstats_notify(netdev);
rtnl_unlock();
return err;
@@ -331,7 +328,6 @@ enum nsim_dev_hwstats_do {
};
struct nsim_dev_hwstats_fops {
- const struct file_operations fops;
enum nsim_dev_hwstats_do action;
enum netdev_offload_xstats_type type;
};
@@ -342,13 +338,12 @@ nsim_dev_hwstats_do_write(struct file *file,
size_t count, loff_t *ppos)
{
struct nsim_dev_hwstats *hwstats = file->private_data;
- struct nsim_dev_hwstats_fops *hwsfops;
+ const struct nsim_dev_hwstats_fops *hwsfops;
struct list_head *hwsdev_list;
int ifindex;
int err;
- hwsfops = container_of(debugfs_real_fops(file),
- struct nsim_dev_hwstats_fops, fops);
+ hwsfops = debugfs_get_aux(file);
err = kstrtoint_from_user(data, count, 0, &ifindex);
if (err)
@@ -381,14 +376,13 @@ nsim_dev_hwstats_do_write(struct file *file,
return count;
}
+static struct debugfs_short_fops debugfs_ops = {
+ .write = nsim_dev_hwstats_do_write,
+ .llseek = generic_file_llseek,
+};
+
#define NSIM_DEV_HWSTATS_FOPS(ACTION, TYPE) \
{ \
- .fops = { \
- .open = simple_open, \
- .write = nsim_dev_hwstats_do_write, \
- .llseek = generic_file_llseek, \
- .owner = THIS_MODULE, \
- }, \
.action = ACTION, \
.type = TYPE, \
}
@@ -433,12 +427,12 @@ int nsim_dev_hwstats_init(struct nsim_dev *nsim_dev)
goto err_remove_hwstats_recursive;
}
- debugfs_create_file("enable_ifindex", 0200, hwstats->l3_ddir, hwstats,
- &nsim_dev_hwstats_l3_enable_fops.fops);
- debugfs_create_file("disable_ifindex", 0200, hwstats->l3_ddir, hwstats,
- &nsim_dev_hwstats_l3_disable_fops.fops);
- debugfs_create_file("fail_next_enable", 0200, hwstats->l3_ddir, hwstats,
- &nsim_dev_hwstats_l3_fail_fops.fops);
+ debugfs_create_file_aux("enable_ifindex", 0200, hwstats->l3_ddir, hwstats,
+ &nsim_dev_hwstats_l3_enable_fops, &debugfs_ops);
+ debugfs_create_file_aux("disable_ifindex", 0200, hwstats->l3_ddir, hwstats,
+ &nsim_dev_hwstats_l3_disable_fops, &debugfs_ops);
+ debugfs_create_file_aux("fail_next_enable", 0200, hwstats->l3_ddir, hwstats,
+ &nsim_dev_hwstats_l3_fail_fops, &debugfs_ops);
INIT_DELAYED_WORK(&hwstats->traffic_dw,
&nsim_dev_hwstats_traffic_work);
diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c
index 88187dd4eb2d..36a1be4923d6 100644
--- a/drivers/net/netdevsim/ipsec.c
+++ b/drivers/net/netdevsim/ipsec.c
@@ -85,11 +85,11 @@ static int nsim_ipsec_find_empty_idx(struct nsim_ipsec *ipsec)
return -ENOSPC;
}
-static int nsim_ipsec_parse_proto_keys(struct xfrm_state *xs,
+static int nsim_ipsec_parse_proto_keys(struct net_device *dev,
+ struct xfrm_state *xs,
u32 *mykey, u32 *mysalt)
{
const char aes_gcm_name[] = "rfc4106(gcm(aes))";
- struct net_device *dev = xs->xso.real_dev;
unsigned char *key_data;
char *alg_name = NULL;
int key_len;
@@ -129,17 +129,16 @@ static int nsim_ipsec_parse_proto_keys(struct xfrm_state *xs,
return 0;
}
-static int nsim_ipsec_add_sa(struct xfrm_state *xs,
+static int nsim_ipsec_add_sa(struct net_device *dev,
+ struct xfrm_state *xs,
struct netlink_ext_ack *extack)
{
struct nsim_ipsec *ipsec;
- struct net_device *dev;
struct netdevsim *ns;
struct nsim_sa sa;
u16 sa_idx;
int ret;
- dev = xs->xso.real_dev;
ns = netdev_priv(dev);
ipsec = &ns->ipsec;
@@ -174,7 +173,7 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs,
sa.crypt = xs->ealg || xs->aead;
/* get the key and salt */
- ret = nsim_ipsec_parse_proto_keys(xs, sa.key, &sa.salt);
+ ret = nsim_ipsec_parse_proto_keys(dev, xs, sa.key, &sa.salt);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for SA table");
return ret;
@@ -200,9 +199,9 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs,
return 0;
}
-static void nsim_ipsec_del_sa(struct xfrm_state *xs)
+static void nsim_ipsec_del_sa(struct net_device *dev, struct xfrm_state *xs)
{
- struct netdevsim *ns = netdev_priv(xs->xso.real_dev);
+ struct netdevsim *ns = netdev_priv(dev);
struct nsim_ipsec *ipsec = &ns->ipsec;
u16 sa_idx;
@@ -217,20 +216,9 @@ static void nsim_ipsec_del_sa(struct xfrm_state *xs)
ipsec->count--;
}
-static bool nsim_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
-{
- struct netdevsim *ns = netdev_priv(xs->xso.real_dev);
- struct nsim_ipsec *ipsec = &ns->ipsec;
-
- ipsec->ok++;
-
- return true;
-}
-
static const struct xfrmdev_ops nsim_xfrmdev_ops = {
.xdo_dev_state_add = nsim_ipsec_add_sa,
.xdo_dev_state_delete = nsim_ipsec_del_sa,
- .xdo_dev_offload_ok = nsim_ipsec_offload_ok,
};
bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb)
@@ -289,6 +277,7 @@ void nsim_ipsec_init(struct netdevsim *ns)
NETIF_F_GSO_ESP)
ns->netdev->features |= NSIM_ESP_FEATURES;
+ ns->netdev->hw_features |= NSIM_ESP_FEATURES;
ns->netdev->hw_enc_features |= NSIM_ESP_FEATURES;
ns->ipsec.pfile = debugfs_create_file("ipsec", 0400,
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index cad85bb0cf54..6927c1962277 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -15,23 +15,75 @@
#include <linux/debugfs.h>
#include <linux/etherdevice.h>
+#include <linux/ethtool_netlink.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
#include <net/page_pool/helpers.h>
#include <net/netlink.h>
#include <net/net_shaper.h>
+#include <net/netdev_lock.h>
#include <net/pkt_cls.h>
#include <net/rtnetlink.h>
#include <net/udp_tunnel.h>
+#include <net/busy_poll.h>
#include "netdevsim.h"
+MODULE_IMPORT_NS("NETDEV_INTERNAL");
+
#define NSIM_RING_SIZE 256
-static int nsim_napi_rx(struct nsim_rq *rq, struct sk_buff *skb)
+static void nsim_start_peer_tx_queue(struct net_device *dev, struct nsim_rq *rq)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ struct net_device *peer_dev;
+ struct netdevsim *peer_ns;
+ struct netdev_queue *txq;
+ u16 idx;
+
+ idx = rq->napi.index;
+ rcu_read_lock();
+ peer_ns = rcu_dereference(ns->peer);
+ if (!peer_ns)
+ goto out;
+
+ /* TX device */
+ peer_dev = peer_ns->netdev;
+ if (dev->real_num_tx_queues != peer_dev->num_rx_queues)
+ goto out;
+
+ txq = netdev_get_tx_queue(peer_dev, idx);
+ if (!netif_tx_queue_stopped(txq))
+ goto out;
+
+ netif_tx_wake_queue(txq);
+out:
+ rcu_read_unlock();
+}
+
+static void nsim_stop_tx_queue(struct net_device *tx_dev,
+ struct net_device *rx_dev,
+ struct nsim_rq *rq,
+ u16 idx)
+{
+ /* If different queues size, do not stop, since it is not
+ * easy to find which TX queue is mapped here
+ */
+ if (rx_dev->real_num_tx_queues != tx_dev->num_rx_queues)
+ return;
+
+ /* rq is the queue on the receive side */
+ netif_subqueue_try_stop(tx_dev, idx,
+ NSIM_RING_SIZE - skb_queue_len(&rq->skb_queue),
+ NSIM_RING_SIZE / 2);
+}
+
+static int nsim_napi_rx(struct net_device *tx_dev, struct net_device *rx_dev,
+ struct nsim_rq *rq, struct sk_buff *skb)
{
if (skb_queue_len(&rq->skb_queue) > NSIM_RING_SIZE) {
dev_kfree_skb_any(skb);
@@ -39,58 +91,94 @@ static int nsim_napi_rx(struct nsim_rq *rq, struct sk_buff *skb)
}
skb_queue_tail(&rq->skb_queue, skb);
+
+ /* Stop the peer TX queue avoiding dropping packets later */
+ if (skb_queue_len(&rq->skb_queue) >= NSIM_RING_SIZE)
+ nsim_stop_tx_queue(tx_dev, rx_dev, rq,
+ skb_get_queue_mapping(skb));
+
return NET_RX_SUCCESS;
}
-static int nsim_forward_skb(struct net_device *dev, struct sk_buff *skb,
- struct nsim_rq *rq)
+static int nsim_forward_skb(struct net_device *tx_dev,
+ struct net_device *rx_dev,
+ struct sk_buff *skb,
+ struct nsim_rq *rq,
+ struct skb_ext *psp_ext)
{
- return __dev_forward_skb(dev, skb) ?: nsim_napi_rx(rq, skb);
+ int ret;
+
+ ret = __dev_forward_skb(rx_dev, skb);
+ if (ret)
+ return ret;
+
+ nsim_psp_handle_ext(skb, psp_ext);
+
+ return nsim_napi_rx(tx_dev, rx_dev, rq, skb);
}
static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct netdevsim *ns = netdev_priv(dev);
+ struct skb_ext *psp_ext = NULL;
struct net_device *peer_dev;
unsigned int len = skb->len;
struct netdevsim *peer_ns;
+ struct netdev_config *cfg;
struct nsim_rq *rq;
int rxq;
+ int dr;
rcu_read_lock();
if (!nsim_ipsec_tx(ns, skb))
- goto out_drop_free;
+ goto out_drop_any;
- peer_ns = rcu_dereference(ns->peer);
- if (!peer_ns)
+ /* Check if loopback mode is enabled */
+ if (dev->features & NETIF_F_LOOPBACK) {
+ peer_ns = ns;
+ peer_dev = dev;
+ } else {
+ peer_ns = rcu_dereference(ns->peer);
+ if (!peer_ns)
+ goto out_drop_any;
+ peer_dev = peer_ns->netdev;
+ }
+
+ dr = nsim_do_psp(skb, ns, peer_ns, &psp_ext);
+ if (dr)
goto out_drop_free;
- peer_dev = peer_ns->netdev;
rxq = skb_get_queue_mapping(skb);
if (rxq >= peer_dev->num_rx_queues)
rxq = rxq % peer_dev->num_rx_queues;
- rq = &peer_ns->rq[rxq];
+ rq = peer_ns->rq[rxq];
+
+ cfg = peer_dev->cfg;
+ if (skb_is_nonlinear(skb) &&
+ (cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED ||
+ (cfg->hds_config == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
+ cfg->hds_thresh > len)))
+ skb_linearize(skb);
skb_tx_timestamp(skb);
- if (unlikely(nsim_forward_skb(peer_dev, skb, rq) == NET_RX_DROP))
+ if (unlikely(nsim_forward_skb(dev, peer_dev,
+ skb, rq, psp_ext) == NET_RX_DROP))
goto out_drop_cnt;
- napi_schedule(&rq->napi);
+ if (!hrtimer_active(&rq->napi_timer))
+ hrtimer_start(&rq->napi_timer, us_to_ktime(5), HRTIMER_MODE_REL);
rcu_read_unlock();
- u64_stats_update_begin(&ns->syncp);
- ns->tx_packets++;
- ns->tx_bytes += len;
- u64_stats_update_end(&ns->syncp);
+ dev_dstats_tx_add(dev, len);
return NETDEV_TX_OK;
+out_drop_any:
+ dr = SKB_DROP_REASON_NOT_SPECIFIED;
out_drop_free:
- dev_kfree_skb(skb);
+ kfree_skb_reason(skb, dr);
out_drop_cnt:
rcu_read_unlock();
- u64_stats_update_begin(&ns->syncp);
- ns->tx_dropped++;
- u64_stats_update_end(&ns->syncp);
+ dev_dstats_tx_dropped(dev);
return NETDEV_TX_OK;
}
@@ -102,7 +190,8 @@ static int nsim_change_mtu(struct net_device *dev, int new_mtu)
{
struct netdevsim *ns = netdev_priv(dev);
- if (ns->xdp.prog && new_mtu > NSIM_XDP_MAX_MTU)
+ if (ns->xdp.prog && !ns->xdp.prog->aux->xdp_has_frags &&
+ new_mtu > NSIM_XDP_MAX_MTU)
return -EBUSY;
WRITE_ONCE(dev->mtu, new_mtu);
@@ -110,20 +199,6 @@ static int nsim_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static void
-nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
-{
- struct netdevsim *ns = netdev_priv(dev);
- unsigned int start;
-
- do {
- start = u64_stats_fetch_begin(&ns->syncp);
- stats->tx_bytes = ns->tx_bytes;
- stats->tx_packets = ns->tx_packets;
- stats->tx_dropped = ns->tx_dropped;
- } while (u64_stats_fetch_retry(&ns->syncp, start));
-}
-
static int
nsim_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
{
@@ -334,17 +409,41 @@ static int nsim_get_iflink(const struct net_device *dev)
static int nsim_rcv(struct nsim_rq *rq, int budget)
{
+ struct net_device *dev = rq->napi.dev;
+ struct bpf_prog *xdp_prog;
+ struct netdevsim *ns;
struct sk_buff *skb;
- int i;
+ unsigned int skblen;
+ int i, ret;
+
+ ns = netdev_priv(dev);
+ xdp_prog = READ_ONCE(ns->xdp.prog);
for (i = 0; i < budget; i++) {
if (skb_queue_empty(&rq->skb_queue))
break;
skb = skb_dequeue(&rq->skb_queue);
- netif_receive_skb(skb);
+
+ if (xdp_prog) {
+ /* skb might be freed directly by XDP, save the len */
+ skblen = skb->len;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ skb_checksum_help(skb);
+ ret = do_xdp_generic(xdp_prog, &skb);
+ if (ret != XDP_PASS) {
+ dev_dstats_rx_add(dev, skblen);
+ continue;
+ }
+ }
+
+ /* skb might be discard at netif_receive_skb, save the len */
+ dev_dstats_rx_add(dev, skb->len);
+ napi_gro_receive(&rq->napi, skb);
}
+ nsim_start_peer_tx_queue(dev, rq);
return i;
}
@@ -354,30 +453,30 @@ static int nsim_poll(struct napi_struct *napi, int budget)
int done;
done = nsim_rcv(rq, budget);
- napi_complete(napi);
+ if (done < budget)
+ napi_complete_done(napi, done);
return done;
}
-static int nsim_create_page_pool(struct nsim_rq *rq)
+static int nsim_create_page_pool(struct page_pool **p, struct napi_struct *napi)
{
- struct page_pool_params p = {
+ struct page_pool_params params = {
.order = 0,
.pool_size = NSIM_RING_SIZE,
.nid = NUMA_NO_NODE,
- .dev = &rq->napi.dev->dev,
- .napi = &rq->napi,
+ .dev = &napi->dev->dev,
+ .napi = napi,
.dma_dir = DMA_BIDIRECTIONAL,
- .netdev = rq->napi.dev,
+ .netdev = napi->dev,
};
+ struct page_pool *pool;
- rq->page_pool = page_pool_create(&p);
- if (IS_ERR(rq->page_pool)) {
- int err = PTR_ERR(rq->page_pool);
+ pool = page_pool_create(&params);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
- rq->page_pool = NULL;
- return err;
- }
+ *p = pool;
return 0;
}
@@ -388,15 +487,15 @@ static int nsim_init_napi(struct netdevsim *ns)
int err, i;
for (i = 0; i < dev->num_rx_queues; i++) {
- rq = &ns->rq[i];
+ rq = ns->rq[i];
- netif_napi_add(dev, &rq->napi, nsim_poll);
+ netif_napi_add_config_locked(dev, &rq->napi, nsim_poll, i);
}
for (i = 0; i < dev->num_rx_queues; i++) {
- rq = &ns->rq[i];
+ rq = ns->rq[i];
- err = nsim_create_page_pool(rq);
+ err = nsim_create_page_pool(&rq->page_pool, &rq->napi);
if (err)
goto err_pp_destroy;
}
@@ -405,40 +504,65 @@ static int nsim_init_napi(struct netdevsim *ns)
err_pp_destroy:
while (i--) {
- page_pool_destroy(ns->rq[i].page_pool);
- ns->rq[i].page_pool = NULL;
+ page_pool_destroy(ns->rq[i]->page_pool);
+ ns->rq[i]->page_pool = NULL;
}
for (i = 0; i < dev->num_rx_queues; i++)
- __netif_napi_del(&ns->rq[i].napi);
+ __netif_napi_del_locked(&ns->rq[i]->napi);
return err;
}
+static enum hrtimer_restart nsim_napi_schedule(struct hrtimer *timer)
+{
+ struct nsim_rq *rq;
+
+ rq = container_of(timer, struct nsim_rq, napi_timer);
+ napi_schedule(&rq->napi);
+
+ return HRTIMER_NORESTART;
+}
+
+static void nsim_rq_timer_init(struct nsim_rq *rq)
+{
+ hrtimer_setup(&rq->napi_timer, nsim_napi_schedule, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+}
+
static void nsim_enable_napi(struct netdevsim *ns)
{
struct net_device *dev = ns->netdev;
int i;
for (i = 0; i < dev->num_rx_queues; i++) {
- struct nsim_rq *rq = &ns->rq[i];
+ struct nsim_rq *rq = ns->rq[i];
netif_queue_set_napi(dev, i, NETDEV_QUEUE_TYPE_RX, &rq->napi);
- napi_enable(&rq->napi);
+ napi_enable_locked(&rq->napi);
}
}
static int nsim_open(struct net_device *dev)
{
struct netdevsim *ns = netdev_priv(dev);
+ struct netdevsim *peer;
int err;
+ netdev_assert_locked(dev);
+
err = nsim_init_napi(ns);
if (err)
return err;
nsim_enable_napi(ns);
+ peer = rtnl_dereference(ns->peer);
+ if (peer && netif_running(peer->netdev)) {
+ netif_carrier_on(dev);
+ netif_carrier_on(peer->netdev);
+ }
+
return 0;
}
@@ -448,16 +572,16 @@ static void nsim_del_napi(struct netdevsim *ns)
int i;
for (i = 0; i < dev->num_rx_queues; i++) {
- struct nsim_rq *rq = &ns->rq[i];
+ struct nsim_rq *rq = ns->rq[i];
- napi_disable(&rq->napi);
- __netif_napi_del(&rq->napi);
+ napi_disable_locked(&rq->napi);
+ __netif_napi_del_locked(&rq->napi);
}
synchronize_net();
for (i = 0; i < dev->num_rx_queues; i++) {
- page_pool_destroy(ns->rq[i].page_pool);
- ns->rq[i].page_pool = NULL;
+ page_pool_destroy(ns->rq[i]->page_pool);
+ ns->rq[i]->page_pool = NULL;
}
}
@@ -466,6 +590,8 @@ static int nsim_stop(struct net_device *dev)
struct netdevsim *ns = netdev_priv(dev);
struct netdevsim *peer;
+ netdev_assert_locked(dev);
+
netif_carrier_off(dev);
peer = rtnl_dereference(ns->peer);
if (peer)
@@ -519,7 +645,6 @@ static const struct net_device_ops nsim_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = nsim_change_mtu,
- .ndo_get_stats64 = nsim_get_stats64,
.ndo_set_vf_mac = nsim_set_vf_mac,
.ndo_set_vf_vlan = nsim_set_vf_vlan,
.ndo_set_vf_rate = nsim_set_vf_rate,
@@ -543,7 +668,6 @@ static const struct net_device_ops nsim_vf_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = nsim_change_mtu,
- .ndo_get_stats64 = nsim_get_stats64,
.ndo_setup_tc = nsim_setup_tc,
.ndo_set_features = nsim_set_features,
};
@@ -557,7 +681,7 @@ static void nsim_get_queue_stats_rx(struct net_device *dev, int idx,
struct rtnl_link_stats64 rtstats = {};
if (!idx)
- nsim_get_stats64(dev, &rtstats);
+ dev_get_stats(dev, &rtstats);
stats->packets = rtstats.rx_packets - !!rtstats.rx_packets;
stats->bytes = rtstats.rx_bytes;
@@ -569,7 +693,7 @@ static void nsim_get_queue_stats_tx(struct net_device *dev, int idx,
struct rtnl_link_stats64 rtstats = {};
if (!idx)
- nsim_get_stats64(dev, &rtstats);
+ dev_get_stats(dev, &rtstats);
stats->packets = rtstats.tx_packets - !!rtstats.tx_packets;
stats->bytes = rtstats.tx_bytes;
@@ -581,7 +705,7 @@ static void nsim_get_base_stats(struct net_device *dev,
{
struct rtnl_link_stats64 rtstats = {};
- nsim_get_stats64(dev, &rtstats);
+ dev_get_stats(dev, &rtstats);
rx->packets = !!rtstats.rx_packets;
rx->bytes = 0;
@@ -595,6 +719,196 @@ static const struct netdev_stat_ops nsim_stat_ops = {
.get_base_stats = nsim_get_base_stats,
};
+static struct nsim_rq *nsim_queue_alloc(void)
+{
+ struct nsim_rq *rq;
+
+ rq = kzalloc(sizeof(*rq), GFP_KERNEL_ACCOUNT);
+ if (!rq)
+ return NULL;
+
+ skb_queue_head_init(&rq->skb_queue);
+ nsim_rq_timer_init(rq);
+ return rq;
+}
+
+static void nsim_queue_free(struct net_device *dev, struct nsim_rq *rq)
+{
+ hrtimer_cancel(&rq->napi_timer);
+
+ if (rq->skb_queue.qlen) {
+ local_bh_disable();
+ dev_dstats_rx_dropped_add(dev, rq->skb_queue.qlen);
+ local_bh_enable();
+ }
+
+ skb_queue_purge_reason(&rq->skb_queue, SKB_DROP_REASON_QUEUE_PURGE);
+ kfree(rq);
+}
+
+/* Queue reset mode is controlled by ns->rq_reset_mode.
+ * - normal - new NAPI new pool (old NAPI enabled when new added)
+ * - mode 1 - allocate new pool (NAPI is only disabled / enabled)
+ * - mode 2 - new NAPI new pool (old NAPI removed before new added)
+ * - mode 3 - new NAPI new pool (old NAPI disabled when new added)
+ */
+struct nsim_queue_mem {
+ struct nsim_rq *rq;
+ struct page_pool *pp;
+};
+
+static int
+nsim_queue_mem_alloc(struct net_device *dev, void *per_queue_mem, int idx)
+{
+ struct nsim_queue_mem *qmem = per_queue_mem;
+ struct netdevsim *ns = netdev_priv(dev);
+ int err;
+
+ if (ns->rq_reset_mode > 3)
+ return -EINVAL;
+
+ if (ns->rq_reset_mode == 1) {
+ if (!netif_running(ns->netdev))
+ return -ENETDOWN;
+ return nsim_create_page_pool(&qmem->pp, &ns->rq[idx]->napi);
+ }
+
+ qmem->rq = nsim_queue_alloc();
+ if (!qmem->rq)
+ return -ENOMEM;
+
+ err = nsim_create_page_pool(&qmem->rq->page_pool, &qmem->rq->napi);
+ if (err)
+ goto err_free;
+
+ if (!ns->rq_reset_mode)
+ netif_napi_add_config_locked(dev, &qmem->rq->napi, nsim_poll,
+ idx);
+
+ return 0;
+
+err_free:
+ nsim_queue_free(dev, qmem->rq);
+ return err;
+}
+
+static void nsim_queue_mem_free(struct net_device *dev, void *per_queue_mem)
+{
+ struct nsim_queue_mem *qmem = per_queue_mem;
+ struct netdevsim *ns = netdev_priv(dev);
+
+ page_pool_destroy(qmem->pp);
+ if (qmem->rq) {
+ if (!ns->rq_reset_mode)
+ netif_napi_del_locked(&qmem->rq->napi);
+ page_pool_destroy(qmem->rq->page_pool);
+ nsim_queue_free(dev, qmem->rq);
+ }
+}
+
+static int
+nsim_queue_start(struct net_device *dev, void *per_queue_mem, int idx)
+{
+ struct nsim_queue_mem *qmem = per_queue_mem;
+ struct netdevsim *ns = netdev_priv(dev);
+
+ netdev_assert_locked(dev);
+
+ if (ns->rq_reset_mode == 1) {
+ ns->rq[idx]->page_pool = qmem->pp;
+ napi_enable_locked(&ns->rq[idx]->napi);
+ return 0;
+ }
+
+ /* netif_napi_add()/_del() should normally be called from alloc/free,
+ * here we want to test various call orders.
+ */
+ if (ns->rq_reset_mode == 2) {
+ netif_napi_del_locked(&ns->rq[idx]->napi);
+ netif_napi_add_config_locked(dev, &qmem->rq->napi, nsim_poll,
+ idx);
+ } else if (ns->rq_reset_mode == 3) {
+ netif_napi_add_config_locked(dev, &qmem->rq->napi, nsim_poll,
+ idx);
+ netif_napi_del_locked(&ns->rq[idx]->napi);
+ }
+
+ ns->rq[idx] = qmem->rq;
+ napi_enable_locked(&ns->rq[idx]->napi);
+
+ return 0;
+}
+
+static int nsim_queue_stop(struct net_device *dev, void *per_queue_mem, int idx)
+{
+ struct nsim_queue_mem *qmem = per_queue_mem;
+ struct netdevsim *ns = netdev_priv(dev);
+
+ netdev_assert_locked(dev);
+
+ napi_disable_locked(&ns->rq[idx]->napi);
+
+ if (ns->rq_reset_mode == 1) {
+ qmem->pp = ns->rq[idx]->page_pool;
+ page_pool_disable_direct_recycling(qmem->pp);
+ } else {
+ qmem->rq = ns->rq[idx];
+ }
+
+ return 0;
+}
+
+static const struct netdev_queue_mgmt_ops nsim_queue_mgmt_ops = {
+ .ndo_queue_mem_size = sizeof(struct nsim_queue_mem),
+ .ndo_queue_mem_alloc = nsim_queue_mem_alloc,
+ .ndo_queue_mem_free = nsim_queue_mem_free,
+ .ndo_queue_start = nsim_queue_start,
+ .ndo_queue_stop = nsim_queue_stop,
+};
+
+static ssize_t
+nsim_qreset_write(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct netdevsim *ns = file->private_data;
+ unsigned int queue, mode;
+ char buf[32];
+ ssize_t ret;
+
+ if (count >= sizeof(buf))
+ return -EINVAL;
+ if (copy_from_user(buf, data, count))
+ return -EFAULT;
+ buf[count] = '\0';
+
+ ret = sscanf(buf, "%u %u", &queue, &mode);
+ if (ret != 2)
+ return -EINVAL;
+
+ netdev_lock(ns->netdev);
+ if (queue >= ns->netdev->real_num_rx_queues) {
+ ret = -EINVAL;
+ goto exit_unlock;
+ }
+
+ ns->rq_reset_mode = mode;
+ ret = netdev_rx_queue_restart(ns->netdev, queue);
+ ns->rq_reset_mode = 0;
+ if (ret)
+ goto exit_unlock;
+
+ ret = count;
+exit_unlock:
+ netdev_unlock(ns->netdev);
+ return ret;
+}
+
+static const struct file_operations nsim_qreset_fops = {
+ .open = simple_open,
+ .write = nsim_qreset_write,
+ .owner = THIS_MODULE,
+};
+
static ssize_t
nsim_pp_hold_read(struct file *file, char __user *data,
size_t count, loff_t *ppos)
@@ -628,17 +942,18 @@ nsim_pp_hold_write(struct file *file, const char __user *data,
if (!netif_running(ns->netdev) && val) {
ret = -ENETDOWN;
} else if (val) {
- ns->page = page_pool_dev_alloc_pages(ns->rq[0].page_pool);
+ ns->page = page_pool_dev_alloc_pages(ns->rq[0]->page_pool);
if (!ns->page)
ret = -ENOMEM;
} else {
- page_pool_put_full_page(ns->page->pp, ns->page, false);
+ page_pool_put_full_page(pp_page_to_nmdesc(ns->page)->pp,
+ ns->page, false);
ns->page = NULL;
}
- rtnl_unlock();
exit:
- return count;
+ rtnl_unlock();
+ return ret;
}
static const struct file_operations nsim_pp_hold_fops = {
@@ -654,18 +969,24 @@ static void nsim_setup(struct net_device *dev)
ether_setup(dev);
eth_hw_addr_random(dev);
- dev->tx_queue_len = 0;
dev->flags &= ~IFF_MULTICAST;
- dev->priv_flags |= IFF_LIVE_ADDR_CHANGE |
- IFF_NO_QUEUE;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
dev->features |= NETIF_F_HIGHDMA |
NETIF_F_SG |
NETIF_F_FRAGLIST |
NETIF_F_HW_CSUM |
+ NETIF_F_LRO |
NETIF_F_TSO;
- dev->hw_features |= NETIF_F_HW_TC;
+ dev->hw_features |= NETIF_F_HW_TC |
+ NETIF_F_SG |
+ NETIF_F_FRAGLIST |
+ NETIF_F_HW_CSUM |
+ NETIF_F_LRO |
+ NETIF_F_TSO |
+ NETIF_F_LOOPBACK;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
dev->max_mtu = ETH_MAX_MTU;
- dev->xdp_features = NETDEV_XDP_ACT_HW_OFFLOAD;
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_HW_OFFLOAD;
}
static int nsim_queue_init(struct netdevsim *ns)
@@ -673,32 +994,41 @@ static int nsim_queue_init(struct netdevsim *ns)
struct net_device *dev = ns->netdev;
int i;
- ns->rq = kvcalloc(dev->num_rx_queues, sizeof(*ns->rq),
- GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
+ ns->rq = kcalloc(dev->num_rx_queues, sizeof(*ns->rq),
+ GFP_KERNEL_ACCOUNT);
if (!ns->rq)
return -ENOMEM;
- for (i = 0; i < dev->num_rx_queues; i++)
- skb_queue_head_init(&ns->rq[i].skb_queue);
+ for (i = 0; i < dev->num_rx_queues; i++) {
+ ns->rq[i] = nsim_queue_alloc();
+ if (!ns->rq[i])
+ goto err_free_prev;
+ }
return 0;
+
+err_free_prev:
+ while (i--)
+ kfree(ns->rq[i]);
+ kfree(ns->rq);
+ return -ENOMEM;
}
-static void nsim_queue_free(struct netdevsim *ns)
+static void nsim_queue_uninit(struct netdevsim *ns)
{
struct net_device *dev = ns->netdev;
int i;
for (i = 0; i < dev->num_rx_queues; i++)
- skb_queue_purge_reason(&ns->rq[i].skb_queue,
- SKB_DROP_REASON_QUEUE_PURGE);
+ nsim_queue_free(dev, ns->rq[i]);
- kvfree(ns->rq);
+ kfree(ns->rq);
ns->rq = NULL;
}
static int nsim_init_netdevsim(struct netdevsim *ns)
{
+ struct netdevsim *peer;
struct mock_phc *phc;
int err;
@@ -709,6 +1039,8 @@ static int nsim_init_netdevsim(struct netdevsim *ns)
ns->phc = phc;
ns->netdev->netdev_ops = &nsim_netdev_ops;
ns->netdev->stat_ops = &nsim_stat_ops;
+ ns->netdev->queue_mgmt_ops = &nsim_queue_mgmt_ops;
+ netdev_lockdep_set_classes(ns->netdev);
err = nsim_udp_tunnels_info_create(ns->nsim_dev, ns->netdev);
if (err)
@@ -730,14 +1062,33 @@ static int nsim_init_netdevsim(struct netdevsim *ns)
if (err)
goto err_ipsec_teardown;
rtnl_unlock();
+
+ err = nsim_psp_init(ns);
+ if (err)
+ goto err_unregister_netdev;
+
+ if (IS_ENABLED(CONFIG_DEBUG_NET)) {
+ ns->nb.notifier_call = netdev_debug_event;
+ if (register_netdevice_notifier_dev_net(ns->netdev, &ns->nb,
+ &ns->nn))
+ ns->nb.notifier_call = NULL;
+ }
+
return 0;
+err_unregister_netdev:
+ rtnl_lock();
+ peer = rtnl_dereference(ns->peer);
+ if (peer)
+ RCU_INIT_POINTER(peer->peer, NULL);
+ RCU_INIT_POINTER(ns->peer, NULL);
+ unregister_netdevice(ns->netdev);
err_ipsec_teardown:
nsim_ipsec_teardown(ns);
nsim_macsec_teardown(ns);
nsim_bpf_uninit(ns);
err_rq_destroy:
- nsim_queue_free(ns);
+ nsim_queue_uninit(ns);
err_utn_destroy:
rtnl_unlock();
nsim_udp_tunnels_info_destroy(ns->netdev);
@@ -763,8 +1114,9 @@ static void nsim_exit_netdevsim(struct netdevsim *ns)
mock_phc_destroy(ns->phc);
}
-struct netdevsim *
-nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
+struct netdevsim *nsim_create(struct nsim_dev *nsim_dev,
+ struct nsim_dev_port *nsim_dev_port,
+ u8 perm_addr[ETH_ALEN])
{
struct net_device *dev;
struct netdevsim *ns;
@@ -775,10 +1127,12 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
if (!dev)
return ERR_PTR(-ENOMEM);
+ if (perm_addr)
+ memcpy(dev->perm_addr, perm_addr, ETH_ALEN);
+
dev_net_set(dev, nsim_dev_net(nsim_dev));
ns = netdev_priv(dev);
ns->netdev = dev;
- u64_stats_init(&ns->syncp);
ns->nsim_dev = nsim_dev;
ns->nsim_dev_port = nsim_dev_port;
ns->nsim_bus_dev = nsim_dev->nsim_bus_dev;
@@ -794,7 +1148,9 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
ns->pp_dfs = debugfs_create_file("pp_hold", 0600, nsim_dev_port->ddir,
ns, &nsim_pp_hold_fops);
-
+ ns->qr_dfs = debugfs_create_file("queue_reset", 0200,
+ nsim_dev_port->ddir, ns,
+ &nsim_qreset_fops);
return ns;
err_free_netdev:
@@ -807,8 +1163,15 @@ void nsim_destroy(struct netdevsim *ns)
struct net_device *dev = ns->netdev;
struct netdevsim *peer;
+ debugfs_remove(ns->qr_dfs);
debugfs_remove(ns->pp_dfs);
+ if (ns->nb.notifier_call)
+ unregister_netdevice_notifier_dev_net(ns->netdev, &ns->nb,
+ &ns->nn);
+
+ nsim_psp_uninit(ns);
+
rtnl_lock();
peer = rtnl_dereference(ns->peer);
if (peer)
@@ -819,7 +1182,7 @@ void nsim_destroy(struct netdevsim *ns)
nsim_macsec_teardown(ns);
nsim_ipsec_teardown(ns);
nsim_bpf_uninit(ns);
- nsim_queue_free(ns);
+ nsim_queue_uninit(ns);
}
rtnl_unlock();
if (nsim_dev_port_is_pf(ns->nsim_dev_port))
@@ -827,7 +1190,8 @@ void nsim_destroy(struct netdevsim *ns)
/* Put this intentionally late to exercise the orphaning path */
if (ns->page) {
- page_pool_put_full_page(ns->page->pp, ns->page, false);
+ page_pool_put_full_page(pp_page_to_nmdesc(ns->page)->pp,
+ ns->page, false);
ns->page = NULL;
}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index bf02efa10956..d1a941e2b18f 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -16,6 +16,7 @@
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/netdevice.h>
@@ -36,6 +37,8 @@
#define NSIM_IPSEC_VALID BIT(31)
#define NSIM_UDP_TUNNEL_N_PORTS 4
+#define NSIM_HDS_THRESHOLD_MAX 1024
+
struct nsim_sa {
struct xfrm_state *xs;
__be32 ipaddr[4];
@@ -51,7 +54,6 @@ struct nsim_ipsec {
struct dentry *pfile;
u32 count;
u32 tx;
- u32 ok;
};
#define NSIM_MACSEC_MAX_SECY_COUNT 3
@@ -94,6 +96,7 @@ struct nsim_rq {
struct napi_struct napi;
struct sk_buff_head skb_queue;
struct page_pool *page_pool;
+ struct hrtimer napi_timer;
};
struct netdevsim {
@@ -101,12 +104,20 @@ struct netdevsim {
struct nsim_dev *nsim_dev;
struct nsim_dev_port *nsim_dev_port;
struct mock_phc *phc;
- struct nsim_rq *rq;
+ struct nsim_rq **rq;
+
+ int rq_reset_mode;
- u64 tx_packets;
- u64 tx_bytes;
- u64 tx_dropped;
- struct u64_stats_sync syncp;
+ struct {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+ struct psp_dev *dev;
+ u32 spi;
+ u32 assoc_cnt;
+ } psp;
struct nsim_bus_dev *nsim_bus_dev;
@@ -126,21 +137,26 @@ struct netdevsim {
struct nsim_macsec macsec;
struct {
u32 inject_error;
- u32 sleep;
u32 __ports[2][NSIM_UDP_TUNNEL_N_PORTS];
u32 (*ports)[NSIM_UDP_TUNNEL_N_PORTS];
+ struct dentry *ddir;
struct debugfs_u32_array dfs_ports[2];
} udp_ports;
struct page *page;
struct dentry *pp_dfs;
+ struct dentry *qr_dfs;
struct nsim_ethtool ethtool;
struct netdevsim __rcu *peer;
+
+ struct notifier_block nb;
+ struct netdev_net_notifier nn;
};
-struct netdevsim *
-nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port);
+struct netdevsim *nsim_create(struct nsim_dev *nsim_dev,
+ struct nsim_dev_port *nsim_dev_port,
+ u8 perm_addr[ETH_ALEN]);
void nsim_destroy(struct netdevsim *ns);
bool netdev_is_nsim(struct net_device *dev);
@@ -272,6 +288,7 @@ struct nsim_dev_port {
struct dentry *ddir;
struct dentry *rate_parent;
char *parent_name;
+ u32 tc_bw[DEVLINK_RATE_TCS_MAX];
struct netdevsim *ns;
};
@@ -311,8 +328,10 @@ struct nsim_dev {
struct list_head port_list;
bool fw_update_status;
u32 fw_update_overwrite_mask;
+ u32 fw_update_flash_chunk_time_ms;
u32 max_macs;
bool test1;
+ u32 test2;
bool dont_allow_reload;
bool fail_reload;
struct devlink_region *dummy_region;
@@ -332,7 +351,6 @@ struct nsim_dev {
bool ipv4_only;
bool shared;
bool static_iana_vxlan;
- u32 sleep;
} udp_ports;
struct nsim_dev_psample *psample;
u16 esw_mode;
@@ -358,8 +376,8 @@ void nsim_dev_exit(void);
int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev);
void nsim_drv_remove(struct nsim_bus_dev *nsim_bus_dev);
int nsim_drv_port_add(struct nsim_bus_dev *nsim_bus_dev,
- enum nsim_dev_port_type type,
- unsigned int port_index);
+ enum nsim_dev_port_type type, unsigned int port_index,
+ u8 perm_addr[ETH_ALEN]);
int nsim_drv_port_del(struct nsim_bus_dev *nsim_bus_dev,
enum nsim_dev_port_type type,
unsigned int port_index);
@@ -415,6 +433,27 @@ static inline void nsim_macsec_teardown(struct netdevsim *ns)
}
#endif
+#if IS_ENABLED(CONFIG_INET_PSP)
+int nsim_psp_init(struct netdevsim *ns);
+void nsim_psp_uninit(struct netdevsim *ns);
+void nsim_psp_handle_ext(struct sk_buff *skb, struct skb_ext *psp_ext);
+enum skb_drop_reason
+nsim_do_psp(struct sk_buff *skb, struct netdevsim *ns,
+ struct netdevsim *peer_ns, struct skb_ext **psp_ext);
+#else
+static inline int nsim_psp_init(struct netdevsim *ns) { return 0; }
+static inline void nsim_psp_uninit(struct netdevsim *ns) {}
+static inline enum skb_drop_reason
+nsim_do_psp(struct sk_buff *skb, struct netdevsim *ns,
+ struct netdevsim *peer_ns, struct skb_ext **psp_ext)
+{
+ return 0;
+}
+
+static inline void
+nsim_psp_handle_ext(struct sk_buff *skb, struct skb_ext *psp_ext) {}
+#endif
+
struct nsim_bus_dev {
struct device dev;
struct list_head list;
diff --git a/drivers/net/netdevsim/psp.c b/drivers/net/netdevsim/psp.c
new file mode 100644
index 000000000000..727da06101ca
--- /dev/null
+++ b/drivers/net/netdevsim/psp.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <net/ip6_checksum.h>
+#include <net/psp.h>
+#include <net/sock.h>
+
+#include "netdevsim.h"
+
+void nsim_psp_handle_ext(struct sk_buff *skb, struct skb_ext *psp_ext)
+{
+ if (psp_ext)
+ __skb_ext_set(skb, SKB_EXT_PSP, psp_ext);
+}
+
+enum skb_drop_reason
+nsim_do_psp(struct sk_buff *skb, struct netdevsim *ns,
+ struct netdevsim *peer_ns, struct skb_ext **psp_ext)
+{
+ enum skb_drop_reason rc = 0;
+ struct psp_assoc *pas;
+ struct net *net;
+ void **ptr;
+
+ rcu_read_lock();
+ pas = psp_skb_get_assoc_rcu(skb);
+ if (!pas) {
+ rc = SKB_NOT_DROPPED_YET;
+ goto out_unlock;
+ }
+
+ if (!skb_transport_header_was_set(skb)) {
+ rc = SKB_DROP_REASON_PSP_OUTPUT;
+ goto out_unlock;
+ }
+
+ ptr = psp_assoc_drv_data(pas);
+ if (*ptr != ns) {
+ rc = SKB_DROP_REASON_PSP_OUTPUT;
+ goto out_unlock;
+ }
+
+ net = sock_net(skb->sk);
+ if (!psp_dev_encapsulate(net, skb, pas->tx.spi, pas->version, 0)) {
+ rc = SKB_DROP_REASON_PSP_OUTPUT;
+ goto out_unlock;
+ }
+
+ /* Now pretend we just received this frame */
+ if (peer_ns->psp.dev->config.versions & (1 << pas->version)) {
+ bool strip_icv = false;
+ u8 generation;
+
+ /* We cheat a bit and put the generation in the key.
+ * In real life if generation was too old, then decryption would
+ * fail. Here, we just make it so a bad key causes a bad
+ * generation too, and psp_sk_rx_policy_check() will fail.
+ */
+ generation = pas->tx.key[0];
+
+ skb_ext_reset(skb);
+ skb->mac_len = ETH_HLEN;
+ if (psp_dev_rcv(skb, peer_ns->psp.dev->id, generation,
+ strip_icv)) {
+ rc = SKB_DROP_REASON_PSP_OUTPUT;
+ goto out_unlock;
+ }
+
+ *psp_ext = skb->extensions;
+ refcount_inc(&(*psp_ext)->refcnt);
+ skb->decrypted = 1;
+
+ u64_stats_update_begin(&ns->psp.syncp);
+ ns->psp.tx_packets++;
+ ns->psp.rx_packets++;
+ ns->psp.tx_bytes += skb->len - skb_inner_transport_offset(skb);
+ ns->psp.rx_bytes += skb->len - skb_inner_transport_offset(skb);
+ u64_stats_update_end(&ns->psp.syncp);
+ } else {
+ struct ipv6hdr *ip6h __maybe_unused;
+ struct iphdr *iph;
+ struct udphdr *uh;
+ __wsum csum;
+
+ /* Do not decapsulate. Receive the skb with the udp and psp
+ * headers still there as if this is a normal udp packet.
+ * psp_dev_encapsulate() sets udp checksum to 0, so we need to
+ * provide a valid checksum here, so the skb isn't dropped.
+ */
+ uh = udp_hdr(skb);
+ csum = skb_checksum(skb, skb_transport_offset(skb),
+ ntohs(uh->len), 0);
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ iph = ip_hdr(skb);
+ uh->check = udp_v4_check(ntohs(uh->len), iph->saddr,
+ iph->daddr, csum);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+ ip6h = ipv6_hdr(skb);
+ uh->check = udp_v6_check(ntohs(uh->len), &ip6h->saddr,
+ &ip6h->daddr, csum);
+ break;
+#endif
+ }
+
+ uh->check = uh->check ?: CSUM_MANGLED_0;
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
+out_unlock:
+ rcu_read_unlock();
+ return rc;
+}
+
+static int
+nsim_psp_set_config(struct psp_dev *psd, struct psp_dev_config *conf,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+static int
+nsim_rx_spi_alloc(struct psp_dev *psd, u32 version,
+ struct psp_key_parsed *assoc,
+ struct netlink_ext_ack *extack)
+{
+ struct netdevsim *ns = psd->drv_priv;
+ unsigned int new;
+ int i;
+
+ new = ++ns->psp.spi & PSP_SPI_KEY_ID;
+ if (psd->generation & 1)
+ new |= PSP_SPI_KEY_PHASE;
+
+ assoc->spi = cpu_to_be32(new);
+ assoc->key[0] = psd->generation;
+ for (i = 1; i < PSP_MAX_KEY; i++)
+ assoc->key[i] = ns->psp.spi + i;
+
+ return 0;
+}
+
+static int nsim_assoc_add(struct psp_dev *psd, struct psp_assoc *pas,
+ struct netlink_ext_ack *extack)
+{
+ struct netdevsim *ns = psd->drv_priv;
+ void **ptr = psp_assoc_drv_data(pas);
+
+ /* Copy drv_priv from psd to assoc */
+ *ptr = psd->drv_priv;
+ ns->psp.assoc_cnt++;
+
+ return 0;
+}
+
+static int nsim_key_rotate(struct psp_dev *psd, struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+static void nsim_assoc_del(struct psp_dev *psd, struct psp_assoc *pas)
+{
+ struct netdevsim *ns = psd->drv_priv;
+ void **ptr = psp_assoc_drv_data(pas);
+
+ *ptr = NULL;
+ ns->psp.assoc_cnt--;
+}
+
+static void nsim_get_stats(struct psp_dev *psd, struct psp_dev_stats *stats)
+{
+ struct netdevsim *ns = psd->drv_priv;
+ unsigned int start;
+
+ /* WARNING: do *not* blindly zero stats in real drivers!
+ * All required stats must be reported by the device!
+ */
+ memset(stats, 0, sizeof(struct psp_dev_stats));
+
+ do {
+ start = u64_stats_fetch_begin(&ns->psp.syncp);
+ stats->rx_bytes = ns->psp.rx_bytes;
+ stats->rx_packets = ns->psp.rx_packets;
+ stats->tx_bytes = ns->psp.tx_bytes;
+ stats->tx_packets = ns->psp.tx_packets;
+ } while (u64_stats_fetch_retry(&ns->psp.syncp, start));
+}
+
+static struct psp_dev_ops nsim_psp_ops = {
+ .set_config = nsim_psp_set_config,
+ .rx_spi_alloc = nsim_rx_spi_alloc,
+ .tx_key_add = nsim_assoc_add,
+ .tx_key_del = nsim_assoc_del,
+ .key_rotate = nsim_key_rotate,
+ .get_stats = nsim_get_stats,
+};
+
+static struct psp_dev_caps nsim_psp_caps = {
+ .versions = 1 << PSP_VERSION_HDR0_AES_GCM_128 |
+ 1 << PSP_VERSION_HDR0_AES_GMAC_128 |
+ 1 << PSP_VERSION_HDR0_AES_GCM_256 |
+ 1 << PSP_VERSION_HDR0_AES_GMAC_256,
+ .assoc_drv_spc = sizeof(void *),
+};
+
+void nsim_psp_uninit(struct netdevsim *ns)
+{
+ if (!IS_ERR(ns->psp.dev))
+ psp_dev_unregister(ns->psp.dev);
+ WARN_ON(ns->psp.assoc_cnt);
+}
+
+static ssize_t
+nsim_psp_rereg_write(struct file *file, const char __user *data, size_t count,
+ loff_t *ppos)
+{
+ struct netdevsim *ns = file->private_data;
+ int err;
+
+ nsim_psp_uninit(ns);
+
+ ns->psp.dev = psp_dev_create(ns->netdev, &nsim_psp_ops,
+ &nsim_psp_caps, ns);
+ err = PTR_ERR_OR_ZERO(ns->psp.dev);
+ return err ?: count;
+}
+
+static const struct file_operations nsim_psp_rereg_fops = {
+ .open = simple_open,
+ .write = nsim_psp_rereg_write,
+ .llseek = generic_file_llseek,
+ .owner = THIS_MODULE,
+};
+
+int nsim_psp_init(struct netdevsim *ns)
+{
+ struct dentry *ddir = ns->nsim_dev_port->ddir;
+ int err;
+
+ ns->psp.dev = psp_dev_create(ns->netdev, &nsim_psp_ops,
+ &nsim_psp_caps, ns);
+ err = PTR_ERR_OR_ZERO(ns->psp.dev);
+ if (err)
+ return err;
+
+ debugfs_create_file("psp_rereg", 0200, ddir, ns, &nsim_psp_rereg_fops);
+ return 0;
+}
diff --git a/drivers/net/netdevsim/udp_tunnels.c b/drivers/net/netdevsim/udp_tunnels.c
index 02dc3123eb6c..89fff76e51cf 100644
--- a/drivers/net/netdevsim/udp_tunnels.c
+++ b/drivers/net/netdevsim/udp_tunnels.c
@@ -18,9 +18,6 @@ nsim_udp_tunnel_set_port(struct net_device *dev, unsigned int table,
ret = -ns->udp_ports.inject_error;
ns->udp_ports.inject_error = 0;
- if (ns->udp_ports.sleep)
- msleep(ns->udp_ports.sleep);
-
if (!ret) {
if (ns->udp_ports.ports[table][entry]) {
WARN(1, "entry already in use\n");
@@ -47,8 +44,6 @@ nsim_udp_tunnel_unset_port(struct net_device *dev, unsigned int table,
ret = -ns->udp_ports.inject_error;
ns->udp_ports.inject_error = 0;
- if (ns->udp_ports.sleep)
- msleep(ns->udp_ports.sleep);
if (!ret) {
u32 val = be16_to_cpu(ti->port) << 16 | ti->type;
@@ -112,10 +107,10 @@ nsim_udp_tunnels_info_reset_write(struct file *file, const char __user *data,
struct net_device *dev = file->private_data;
struct netdevsim *ns = netdev_priv(dev);
- memset(ns->udp_ports.ports, 0, sizeof(ns->udp_ports.__ports));
- rtnl_lock();
- udp_tunnel_nic_reset_ntf(dev);
- rtnl_unlock();
+ if (dev->reg_state == NETREG_REGISTERED) {
+ memset(ns->udp_ports.ports, 0, sizeof(ns->udp_ports.__ports));
+ udp_tunnel_nic_reset_ntf(dev);
+ }
return count;
}
@@ -144,23 +139,23 @@ int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
else
ns->udp_ports.ports = nsim_dev->udp_ports.__ports;
- debugfs_create_u32("udp_ports_inject_error", 0600,
- ns->nsim_dev_port->ddir,
+ ns->udp_ports.ddir = debugfs_create_dir("udp_ports",
+ ns->nsim_dev_port->ddir);
+
+ debugfs_create_u32("inject_error", 0600, ns->udp_ports.ddir,
&ns->udp_ports.inject_error);
ns->udp_ports.dfs_ports[0].array = ns->udp_ports.ports[0];
ns->udp_ports.dfs_ports[0].n_elements = NSIM_UDP_TUNNEL_N_PORTS;
- debugfs_create_u32_array("udp_ports_table0", 0400,
- ns->nsim_dev_port->ddir,
+ debugfs_create_u32_array("table0", 0400, ns->udp_ports.ddir,
&ns->udp_ports.dfs_ports[0]);
ns->udp_ports.dfs_ports[1].array = ns->udp_ports.ports[1];
ns->udp_ports.dfs_ports[1].n_elements = NSIM_UDP_TUNNEL_N_PORTS;
- debugfs_create_u32_array("udp_ports_table1", 0400,
- ns->nsim_dev_port->ddir,
+ debugfs_create_u32_array("table1", 0400, ns->udp_ports.ddir,
&ns->udp_ports.dfs_ports[1]);
- debugfs_create_file("udp_ports_reset", 0200, ns->nsim_dev_port->ddir,
+ debugfs_create_file("reset", 0200, ns->udp_ports.ddir,
dev, &nsim_udp_tunnels_info_reset_fops);
/* Note: it's not normal to allocate the info struct like this!
@@ -170,7 +165,6 @@ int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
GFP_KERNEL);
if (!info)
return -ENOMEM;
- ns->udp_ports.sleep = nsim_dev->udp_ports.sleep;
if (nsim_dev->udp_ports.sync_all) {
info->set_port = NULL;
@@ -179,8 +173,6 @@ int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
info->sync_table = NULL;
}
- if (ns->udp_ports.sleep)
- info->flags |= UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
if (nsim_dev->udp_ports.open_only)
info->flags |= UDP_TUNNEL_NIC_INFO_OPEN_ONLY;
if (nsim_dev->udp_ports.ipv4_only)
@@ -196,6 +188,9 @@ int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
void nsim_udp_tunnels_info_destroy(struct net_device *dev)
{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ debugfs_remove_recursive(ns->udp_ports.ddir);
kfree(dev->udp_tunnel_nic_info);
dev->udp_tunnel_nic_info = NULL;
}
@@ -212,6 +207,4 @@ void nsim_udp_tunnels_debugfs_create(struct nsim_dev *nsim_dev)
&nsim_dev->udp_ports.shared);
debugfs_create_bool("udp_ports_static_iana_vxlan", 0600, nsim_dev->ddir,
&nsim_dev->udp_ports.static_iana_vxlan);
- debugfs_create_u32("udp_ports_sleep", 0600, nsim_dev->ddir,
- &nsim_dev->udp_ports.sleep);
}
diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c
index cd8360b9bbde..0a2fef7caccb 100644
--- a/drivers/net/netkit.c
+++ b/drivers/net/netkit.c
@@ -16,23 +16,24 @@
#define DRV_NAME "netkit"
struct netkit {
- /* Needed in fast-path */
+ __cacheline_group_begin(netkit_fastpath);
struct net_device __rcu *peer;
struct bpf_mprog_entry __rcu *active;
enum netkit_action policy;
enum netkit_scrub scrub;
struct bpf_mprog_bundle bundle;
+ __cacheline_group_end(netkit_fastpath);
- /* Needed in slow-path */
+ __cacheline_group_begin(netkit_slowpath);
enum netkit_mode mode;
bool primary;
u32 headroom;
+ __cacheline_group_end(netkit_slowpath);
};
struct netkit_link {
struct bpf_link link;
struct net_device *dev;
- u32 location;
};
static __always_inline int
@@ -65,7 +66,6 @@ static void netkit_prep_forward(struct sk_buff *skb,
skb_reset_mac_header(skb);
if (!xnet)
return;
- ipvs_reset(skb);
skb_clear_tstamp(skb);
if (xnet_scrub)
netkit_xnet(skb);
@@ -327,36 +327,35 @@ static int netkit_validate(struct nlattr *tb[], struct nlattr *data[],
static struct rtnl_link_ops netkit_link_ops;
-static int netkit_new_link(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int netkit_new_link(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
- struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb, *attr;
- enum netkit_action policy_prim = NETKIT_PASS;
- enum netkit_action policy_peer = NETKIT_PASS;
+ struct net *peer_net = rtnl_newlink_peer_net(params);
enum netkit_scrub scrub_prim = NETKIT_SCRUB_DEFAULT;
enum netkit_scrub scrub_peer = NETKIT_SCRUB_DEFAULT;
+ struct nlattr *peer_tb[IFLA_MAX + 1], **tbp, *attr;
+ enum netkit_action policy_prim = NETKIT_PASS;
+ enum netkit_action policy_peer = NETKIT_PASS;
+ struct nlattr **data = params->data;
enum netkit_mode mode = NETKIT_L3;
unsigned char ifname_assign_type;
+ struct nlattr **tb = params->tb;
+ u16 headroom = 0, tailroom = 0;
struct ifinfomsg *ifmp = NULL;
struct net_device *peer;
char ifname[IFNAMSIZ];
struct netkit *nk;
- struct net *net;
int err;
+ tbp = tb;
if (data) {
if (data[IFLA_NETKIT_MODE])
mode = nla_get_u32(data[IFLA_NETKIT_MODE]);
if (data[IFLA_NETKIT_PEER_INFO]) {
attr = data[IFLA_NETKIT_PEER_INFO];
ifmp = nla_data(attr);
- err = rtnl_nla_parse_ifinfomsg(peer_tb, attr, extack);
- if (err < 0)
- return err;
- err = netkit_validate(peer_tb, NULL, extack);
- if (err < 0)
- return err;
+ rtnl_nla_parse_ifinfomsg(peer_tb, attr, extack);
tbp = peer_tb;
}
if (data[IFLA_NETKIT_SCRUB])
@@ -377,6 +376,10 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
if (err < 0)
return err;
}
+ if (data[IFLA_NETKIT_HEADROOM])
+ headroom = nla_get_u16(data[IFLA_NETKIT_HEADROOM]);
+ if (data[IFLA_NETKIT_TAILROOM])
+ tailroom = nla_get_u16(data[IFLA_NETKIT_TAILROOM]);
}
if (ifmp && tbp[IFLA_IFNAME]) {
@@ -390,18 +393,20 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
(tb[IFLA_ADDRESS] || tbp[IFLA_ADDRESS]))
return -EOPNOTSUPP;
- net = rtnl_link_get_net(src_net, tbp);
- if (IS_ERR(net))
- return PTR_ERR(net);
-
- peer = rtnl_create_link(net, ifname, ifname_assign_type,
+ peer = rtnl_create_link(peer_net, ifname, ifname_assign_type,
&netkit_link_ops, tbp, extack);
- if (IS_ERR(peer)) {
- put_net(net);
+ if (IS_ERR(peer))
return PTR_ERR(peer);
- }
netif_inherit_tso_max(peer, dev);
+ if (headroom) {
+ peer->needed_headroom = headroom;
+ dev->needed_headroom = headroom;
+ }
+ if (tailroom) {
+ peer->needed_tailroom = tailroom;
+ dev->needed_tailroom = tailroom;
+ }
if (mode == NETKIT_L2 && !(ifmp && tbp[IFLA_ADDRESS]))
eth_hw_addr_random(peer);
@@ -413,10 +418,10 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
nk->policy = policy_peer;
nk->scrub = scrub_peer;
nk->mode = mode;
+ nk->headroom = headroom;
bpf_mprog_bundle_init(&nk->bundle);
err = register_netdevice(peer);
- put_net(net);
if (err < 0)
goto err_register_peer;
netif_carrier_off(peer);
@@ -439,6 +444,7 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
nk->policy = policy_prim;
nk->scrub = scrub_prim;
nk->mode = mode;
+ nk->headroom = headroom;
bpf_mprog_bundle_init(&nk->bundle);
err = register_netdevice(dev);
@@ -728,8 +734,8 @@ static void netkit_link_fdinfo(const struct bpf_link *link, struct seq_file *seq
seq_printf(seq, "ifindex:\t%u\n", ifindex);
seq_printf(seq, "attach_type:\t%u (%s)\n",
- nkl->location,
- nkl->location == BPF_NETKIT_PRIMARY ? "primary" : "peer");
+ link->attach_type,
+ link->attach_type == BPF_NETKIT_PRIMARY ? "primary" : "peer");
}
static int netkit_link_fill_info(const struct bpf_link *link,
@@ -744,7 +750,7 @@ static int netkit_link_fill_info(const struct bpf_link *link,
rtnl_unlock();
info->netkit.ifindex = ifindex;
- info->netkit.attach_type = nkl->location;
+ info->netkit.attach_type = link->attach_type;
return 0;
}
@@ -770,8 +776,7 @@ static int netkit_link_init(struct netkit_link *nkl,
struct bpf_prog *prog)
{
bpf_link_init(&nkl->link, BPF_LINK_TYPE_NETKIT,
- &netkit_link_lops, prog);
- nkl->location = attr->link_create.attach_type;
+ &netkit_link_lops, prog, attr->link_create.attach_type);
nkl->dev = dev;
return bpf_link_prime(&nkl->link, link_primer);
}
@@ -863,7 +868,18 @@ static int netkit_change_link(struct net_device *dev, struct nlattr *tb[],
struct net_device *peer = rtnl_dereference(nk->peer);
enum netkit_action policy;
struct nlattr *attr;
- int err;
+ int err, i;
+ static const struct {
+ u32 attr;
+ char *name;
+ } fixed_params[] = {
+ { IFLA_NETKIT_MODE, "operating mode" },
+ { IFLA_NETKIT_SCRUB, "scrubbing" },
+ { IFLA_NETKIT_PEER_SCRUB, "peer scrubbing" },
+ { IFLA_NETKIT_PEER_INFO, "peer info" },
+ { IFLA_NETKIT_HEADROOM, "headroom" },
+ { IFLA_NETKIT_TAILROOM, "tailroom" },
+ };
if (!nk->primary) {
NL_SET_ERR_MSG(extack,
@@ -871,28 +887,14 @@ static int netkit_change_link(struct net_device *dev, struct nlattr *tb[],
return -EACCES;
}
- if (data[IFLA_NETKIT_MODE]) {
- NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_MODE],
- "netkit link operating mode cannot be changed after device creation");
- return -EACCES;
- }
-
- if (data[IFLA_NETKIT_SCRUB]) {
- NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_SCRUB],
- "netkit scrubbing cannot be changed after device creation");
- return -EACCES;
- }
-
- if (data[IFLA_NETKIT_PEER_SCRUB]) {
- NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_PEER_SCRUB],
- "netkit scrubbing cannot be changed after device creation");
- return -EACCES;
- }
-
- if (data[IFLA_NETKIT_PEER_INFO]) {
- NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_PEER_INFO],
- "netkit peer info cannot be changed after device creation");
- return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(fixed_params); i++) {
+ attr = data[fixed_params[i].attr];
+ if (attr) {
+ NL_SET_ERR_MSG_ATTR_FMT(extack, attr,
+ "netkit link %s cannot be changed after device creation",
+ fixed_params[i].name);
+ return -EACCES;
+ }
}
if (data[IFLA_NETKIT_POLICY]) {
@@ -927,6 +929,8 @@ static size_t netkit_get_size(const struct net_device *dev)
nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_PEER_SCRUB */
nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_MODE */
nla_total_size(sizeof(u8)) + /* IFLA_NETKIT_PRIMARY */
+ nla_total_size(sizeof(u16)) + /* IFLA_NETKIT_HEADROOM */
+ nla_total_size(sizeof(u16)) + /* IFLA_NETKIT_TAILROOM */
0;
}
@@ -943,6 +947,10 @@ static int netkit_fill_info(struct sk_buff *skb, const struct net_device *dev)
return -EMSGSIZE;
if (nla_put_u32(skb, IFLA_NETKIT_SCRUB, nk->scrub))
return -EMSGSIZE;
+ if (nla_put_u16(skb, IFLA_NETKIT_HEADROOM, dev->needed_headroom))
+ return -EMSGSIZE;
+ if (nla_put_u16(skb, IFLA_NETKIT_TAILROOM, dev->needed_tailroom))
+ return -EMSGSIZE;
if (peer) {
nk = netkit_priv(peer);
@@ -960,6 +968,8 @@ static const struct nla_policy netkit_policy[IFLA_NETKIT_MAX + 1] = {
[IFLA_NETKIT_MODE] = NLA_POLICY_MAX(NLA_U32, NETKIT_L3),
[IFLA_NETKIT_POLICY] = { .type = NLA_U32 },
[IFLA_NETKIT_PEER_POLICY] = { .type = NLA_U32 },
+ [IFLA_NETKIT_HEADROOM] = { .type = NLA_U16 },
+ [IFLA_NETKIT_TAILROOM] = { .type = NLA_U16 },
[IFLA_NETKIT_SCRUB] = NLA_POLICY_MAX(NLA_U32, NETKIT_SCRUB_DEFAULT),
[IFLA_NETKIT_PEER_SCRUB] = NLA_POLICY_MAX(NLA_U32, NETKIT_SCRUB_DEFAULT),
[IFLA_NETKIT_PRIMARY] = { .type = NLA_REJECT,
@@ -978,6 +988,7 @@ static struct rtnl_link_ops netkit_link_ops = {
.fill_info = netkit_fill_info,
.policy = netkit_policy,
.validate = netkit_validate,
+ .peer_type = IFLA_NETKIT_PEER_INFO,
.maxtype = IFLA_NETKIT_MAX,
};
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index ef6df0e37bea..fbeae05817e9 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -229,7 +229,7 @@ err:
static void ntb_netdev_tx_timer(struct timer_list *t)
{
- struct ntb_netdev *dev = from_timer(dev, t, tx_timer);
+ struct ntb_netdev *dev = timer_container_of(dev, t, tx_timer);
struct net_device *ndev = dev->ndev;
if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) {
@@ -291,7 +291,7 @@ static int ntb_netdev_close(struct net_device *ndev)
while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
dev_kfree_skb(skb);
- del_timer_sync(&dev->tx_timer);
+ timer_delete_sync(&dev->tx_timer);
return 0;
}
diff --git a/drivers/net/ovpn/Makefile b/drivers/net/ovpn/Makefile
new file mode 100644
index 000000000000..229be66167e1
--- /dev/null
+++ b/drivers/net/ovpn/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# ovpn -- OpenVPN data channel offload in kernel space
+#
+# Copyright (C) 2020-2025 OpenVPN, Inc.
+#
+# Author: Antonio Quartulli <antonio@openvpn.net>
+
+obj-$(CONFIG_OVPN) := ovpn.o
+ovpn-y += bind.o
+ovpn-y += crypto.o
+ovpn-y += crypto_aead.o
+ovpn-y += main.o
+ovpn-y += io.o
+ovpn-y += netlink.o
+ovpn-y += netlink-gen.o
+ovpn-y += peer.o
+ovpn-y += pktid.o
+ovpn-y += socket.o
+ovpn-y += stats.o
+ovpn-y += tcp.o
+ovpn-y += udp.o
diff --git a/drivers/net/ovpn/bind.c b/drivers/net/ovpn/bind.c
new file mode 100644
index 000000000000..24d2788a277e
--- /dev/null
+++ b/drivers/net/ovpn/bind.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2012-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/netdevice.h>
+#include <linux/socket.h>
+
+#include "ovpnpriv.h"
+#include "bind.h"
+#include "peer.h"
+
+/**
+ * ovpn_bind_from_sockaddr - retrieve binding matching sockaddr
+ * @ss: the sockaddr to match
+ *
+ * Return: the bind matching the passed sockaddr if found, NULL otherwise
+ */
+struct ovpn_bind *ovpn_bind_from_sockaddr(const struct sockaddr_storage *ss)
+{
+ struct ovpn_bind *bind;
+ size_t sa_len;
+
+ if (ss->ss_family == AF_INET)
+ sa_len = sizeof(struct sockaddr_in);
+ else if (ss->ss_family == AF_INET6)
+ sa_len = sizeof(struct sockaddr_in6);
+ else
+ return ERR_PTR(-EAFNOSUPPORT);
+
+ bind = kzalloc(sizeof(*bind), GFP_ATOMIC);
+ if (unlikely(!bind))
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&bind->remote, ss, sa_len);
+
+ return bind;
+}
+
+/**
+ * ovpn_bind_reset - assign new binding to peer
+ * @peer: the peer whose binding has to be replaced
+ * @new: the new bind to assign
+ */
+void ovpn_bind_reset(struct ovpn_peer *peer, struct ovpn_bind *new)
+{
+ lockdep_assert_held(&peer->lock);
+
+ kfree_rcu(rcu_replace_pointer(peer->bind, new,
+ lockdep_is_held(&peer->lock)), rcu);
+}
diff --git a/drivers/net/ovpn/bind.h b/drivers/net/ovpn/bind.h
new file mode 100644
index 000000000000..4e0b8398bfd9
--- /dev/null
+++ b/drivers/net/ovpn/bind.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2012-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNBIND_H_
+#define _NET_OVPN_OVPNBIND_H_
+
+#include <net/ip.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+
+struct ovpn_peer;
+
+/**
+ * union ovpn_sockaddr - basic transport layer address
+ * @in4: IPv4 address
+ * @in6: IPv6 address
+ */
+union ovpn_sockaddr {
+ struct sockaddr_in in4;
+ struct sockaddr_in6 in6;
+};
+
+/**
+ * struct ovpn_bind - remote peer binding
+ * @remote: the remote peer sockaddress
+ * @local: local endpoint used to talk to the peer
+ * @local.ipv4: local IPv4 used to talk to the peer
+ * @local.ipv6: local IPv6 used to talk to the peer
+ * @rcu: used to schedule RCU cleanup job
+ */
+struct ovpn_bind {
+ union ovpn_sockaddr remote; /* remote sockaddr */
+
+ union {
+ struct in_addr ipv4;
+ struct in6_addr ipv6;
+ } local;
+
+ struct rcu_head rcu;
+};
+
+/**
+ * ovpn_bind_skb_src_match - match packet source with binding
+ * @bind: the binding to match
+ * @skb: the packet to match
+ *
+ * Return: true if the packet source matches the remote peer sockaddr
+ * in the binding
+ */
+static inline bool ovpn_bind_skb_src_match(const struct ovpn_bind *bind,
+ const struct sk_buff *skb)
+{
+ const union ovpn_sockaddr *remote;
+
+ if (unlikely(!bind))
+ return false;
+
+ remote = &bind->remote;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ if (unlikely(remote->in4.sin_family != AF_INET))
+ return false;
+
+ if (unlikely(remote->in4.sin_addr.s_addr != ip_hdr(skb)->saddr))
+ return false;
+
+ if (unlikely(remote->in4.sin_port != udp_hdr(skb)->source))
+ return false;
+ break;
+ case htons(ETH_P_IPV6):
+ if (unlikely(remote->in6.sin6_family != AF_INET6))
+ return false;
+
+ if (unlikely(!ipv6_addr_equal(&remote->in6.sin6_addr,
+ &ipv6_hdr(skb)->saddr)))
+ return false;
+
+ if (unlikely(remote->in6.sin6_port != udp_hdr(skb)->source))
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+struct ovpn_bind *ovpn_bind_from_sockaddr(const struct sockaddr_storage *sa);
+void ovpn_bind_reset(struct ovpn_peer *peer, struct ovpn_bind *bind);
+
+#endif /* _NET_OVPN_OVPNBIND_H_ */
diff --git a/drivers/net/ovpn/crypto.c b/drivers/net/ovpn/crypto.c
new file mode 100644
index 000000000000..90580e32052f
--- /dev/null
+++ b/drivers/net/ovpn/crypto.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/types.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <uapi/linux/ovpn.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "pktid.h"
+#include "crypto_aead.h"
+#include "crypto.h"
+
+static void ovpn_ks_destroy_rcu(struct rcu_head *head)
+{
+ struct ovpn_crypto_key_slot *ks;
+
+ ks = container_of(head, struct ovpn_crypto_key_slot, rcu);
+ ovpn_aead_crypto_key_slot_destroy(ks);
+}
+
+void ovpn_crypto_key_slot_release(struct kref *kref)
+{
+ struct ovpn_crypto_key_slot *ks;
+
+ ks = container_of(kref, struct ovpn_crypto_key_slot, refcount);
+ call_rcu(&ks->rcu, ovpn_ks_destroy_rcu);
+}
+
+/* can only be invoked when all peer references have been dropped (i.e. RCU
+ * release routine)
+ */
+void ovpn_crypto_state_release(struct ovpn_crypto_state *cs)
+{
+ struct ovpn_crypto_key_slot *ks;
+
+ ks = rcu_access_pointer(cs->slots[0]);
+ if (ks) {
+ RCU_INIT_POINTER(cs->slots[0], NULL);
+ ovpn_crypto_key_slot_put(ks);
+ }
+
+ ks = rcu_access_pointer(cs->slots[1]);
+ if (ks) {
+ RCU_INIT_POINTER(cs->slots[1], NULL);
+ ovpn_crypto_key_slot_put(ks);
+ }
+}
+
+/* removes the key matching the specified id from the crypto context */
+bool ovpn_crypto_kill_key(struct ovpn_crypto_state *cs, u8 key_id)
+{
+ struct ovpn_crypto_key_slot *ks = NULL;
+
+ spin_lock_bh(&cs->lock);
+ if (rcu_access_pointer(cs->slots[0])->key_id == key_id) {
+ ks = rcu_replace_pointer(cs->slots[0], NULL,
+ lockdep_is_held(&cs->lock));
+ } else if (rcu_access_pointer(cs->slots[1])->key_id == key_id) {
+ ks = rcu_replace_pointer(cs->slots[1], NULL,
+ lockdep_is_held(&cs->lock));
+ }
+ spin_unlock_bh(&cs->lock);
+
+ if (ks)
+ ovpn_crypto_key_slot_put(ks);
+
+ /* let the caller know if a key was actually killed */
+ return ks;
+}
+
+/* Reset the ovpn_crypto_state object in a way that is atomic
+ * to RCU readers.
+ */
+int ovpn_crypto_state_reset(struct ovpn_crypto_state *cs,
+ const struct ovpn_peer_key_reset *pkr)
+{
+ struct ovpn_crypto_key_slot *old = NULL, *new;
+ u8 idx;
+
+ if (pkr->slot != OVPN_KEY_SLOT_PRIMARY &&
+ pkr->slot != OVPN_KEY_SLOT_SECONDARY)
+ return -EINVAL;
+
+ new = ovpn_aead_crypto_key_slot_new(&pkr->key);
+ if (IS_ERR(new))
+ return PTR_ERR(new);
+
+ spin_lock_bh(&cs->lock);
+ idx = cs->primary_idx;
+ switch (pkr->slot) {
+ case OVPN_KEY_SLOT_PRIMARY:
+ old = rcu_replace_pointer(cs->slots[idx], new,
+ lockdep_is_held(&cs->lock));
+ break;
+ case OVPN_KEY_SLOT_SECONDARY:
+ old = rcu_replace_pointer(cs->slots[!idx], new,
+ lockdep_is_held(&cs->lock));
+ break;
+ }
+ spin_unlock_bh(&cs->lock);
+
+ if (old)
+ ovpn_crypto_key_slot_put(old);
+
+ return 0;
+}
+
+void ovpn_crypto_key_slot_delete(struct ovpn_crypto_state *cs,
+ enum ovpn_key_slot slot)
+{
+ struct ovpn_crypto_key_slot *ks = NULL;
+ u8 idx;
+
+ if (slot != OVPN_KEY_SLOT_PRIMARY &&
+ slot != OVPN_KEY_SLOT_SECONDARY) {
+ pr_warn("Invalid slot to release: %u\n", slot);
+ return;
+ }
+
+ spin_lock_bh(&cs->lock);
+ idx = cs->primary_idx;
+ switch (slot) {
+ case OVPN_KEY_SLOT_PRIMARY:
+ ks = rcu_replace_pointer(cs->slots[idx], NULL,
+ lockdep_is_held(&cs->lock));
+ break;
+ case OVPN_KEY_SLOT_SECONDARY:
+ ks = rcu_replace_pointer(cs->slots[!idx], NULL,
+ lockdep_is_held(&cs->lock));
+ break;
+ }
+ spin_unlock_bh(&cs->lock);
+
+ if (!ks) {
+ pr_debug("Key slot already released: %u\n", slot);
+ return;
+ }
+
+ pr_debug("deleting key slot %u, key_id=%u\n", slot, ks->key_id);
+ ovpn_crypto_key_slot_put(ks);
+}
+
+void ovpn_crypto_key_slots_swap(struct ovpn_crypto_state *cs)
+{
+ const struct ovpn_crypto_key_slot *old_primary, *old_secondary;
+ u8 idx;
+
+ spin_lock_bh(&cs->lock);
+ idx = cs->primary_idx;
+ old_primary = rcu_dereference_protected(cs->slots[idx],
+ lockdep_is_held(&cs->lock));
+ old_secondary = rcu_dereference_protected(cs->slots[!idx],
+ lockdep_is_held(&cs->lock));
+ /* perform real swap by switching the index of the primary key */
+ WRITE_ONCE(cs->primary_idx, !cs->primary_idx);
+
+ pr_debug("key swapped: (old primary) %d <-> (new primary) %d\n",
+ old_primary ? old_primary->key_id : -1,
+ old_secondary ? old_secondary->key_id : -1);
+
+ spin_unlock_bh(&cs->lock);
+}
+
+/**
+ * ovpn_crypto_config_get - populate keyconf object with non-sensible key data
+ * @cs: the crypto state to extract the key data from
+ * @slot: the specific slot to inspect
+ * @keyconf: the output object to populate
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int ovpn_crypto_config_get(struct ovpn_crypto_state *cs,
+ enum ovpn_key_slot slot,
+ struct ovpn_key_config *keyconf)
+{
+ struct ovpn_crypto_key_slot *ks;
+ int idx;
+
+ switch (slot) {
+ case OVPN_KEY_SLOT_PRIMARY:
+ idx = cs->primary_idx;
+ break;
+ case OVPN_KEY_SLOT_SECONDARY:
+ idx = !cs->primary_idx;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rcu_read_lock();
+ ks = rcu_dereference(cs->slots[idx]);
+ if (!ks) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ keyconf->cipher_alg = ovpn_aead_crypto_alg(ks);
+ keyconf->key_id = ks->key_id;
+ rcu_read_unlock();
+
+ return 0;
+}
diff --git a/drivers/net/ovpn/crypto.h b/drivers/net/ovpn/crypto.h
new file mode 100644
index 000000000000..0e284fec3a75
--- /dev/null
+++ b/drivers/net/ovpn/crypto.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNCRYPTO_H_
+#define _NET_OVPN_OVPNCRYPTO_H_
+
+#include "pktid.h"
+#include "proto.h"
+
+/* info needed for both encrypt and decrypt directions */
+struct ovpn_key_direction {
+ const u8 *cipher_key;
+ size_t cipher_key_size;
+ const u8 *nonce_tail; /* only needed for GCM modes */
+ size_t nonce_tail_size; /* only needed for GCM modes */
+};
+
+/* all info for a particular symmetric key (primary or secondary) */
+struct ovpn_key_config {
+ enum ovpn_cipher_alg cipher_alg;
+ u8 key_id;
+ struct ovpn_key_direction encrypt;
+ struct ovpn_key_direction decrypt;
+};
+
+/* used to pass settings from netlink to the crypto engine */
+struct ovpn_peer_key_reset {
+ enum ovpn_key_slot slot;
+ struct ovpn_key_config key;
+};
+
+struct ovpn_crypto_key_slot {
+ u8 key_id;
+
+ struct crypto_aead *encrypt;
+ struct crypto_aead *decrypt;
+ u8 nonce_tail_xmit[OVPN_NONCE_TAIL_SIZE];
+ u8 nonce_tail_recv[OVPN_NONCE_TAIL_SIZE];
+
+ struct ovpn_pktid_recv pid_recv ____cacheline_aligned_in_smp;
+ struct ovpn_pktid_xmit pid_xmit ____cacheline_aligned_in_smp;
+ struct kref refcount;
+ struct rcu_head rcu;
+};
+
+struct ovpn_crypto_state {
+ struct ovpn_crypto_key_slot __rcu *slots[2];
+ u8 primary_idx;
+
+ /* protects primary and secondary slots */
+ spinlock_t lock;
+};
+
+static inline bool ovpn_crypto_key_slot_hold(struct ovpn_crypto_key_slot *ks)
+{
+ return kref_get_unless_zero(&ks->refcount);
+}
+
+static inline void ovpn_crypto_state_init(struct ovpn_crypto_state *cs)
+{
+ RCU_INIT_POINTER(cs->slots[0], NULL);
+ RCU_INIT_POINTER(cs->slots[1], NULL);
+ cs->primary_idx = 0;
+ spin_lock_init(&cs->lock);
+}
+
+static inline struct ovpn_crypto_key_slot *
+ovpn_crypto_key_id_to_slot(const struct ovpn_crypto_state *cs, u8 key_id)
+{
+ struct ovpn_crypto_key_slot *ks;
+ u8 idx;
+
+ if (unlikely(!cs))
+ return NULL;
+
+ rcu_read_lock();
+ idx = READ_ONCE(cs->primary_idx);
+ ks = rcu_dereference(cs->slots[idx]);
+ if (ks && ks->key_id == key_id) {
+ if (unlikely(!ovpn_crypto_key_slot_hold(ks)))
+ ks = NULL;
+ goto out;
+ }
+
+ ks = rcu_dereference(cs->slots[!idx]);
+ if (ks && ks->key_id == key_id) {
+ if (unlikely(!ovpn_crypto_key_slot_hold(ks)))
+ ks = NULL;
+ goto out;
+ }
+
+ /* when both key slots are occupied but no matching key ID is found, ks
+ * has to be reset to NULL to avoid carrying a stale pointer
+ */
+ ks = NULL;
+out:
+ rcu_read_unlock();
+
+ return ks;
+}
+
+static inline struct ovpn_crypto_key_slot *
+ovpn_crypto_key_slot_primary(const struct ovpn_crypto_state *cs)
+{
+ struct ovpn_crypto_key_slot *ks;
+
+ rcu_read_lock();
+ ks = rcu_dereference(cs->slots[cs->primary_idx]);
+ if (unlikely(ks && !ovpn_crypto_key_slot_hold(ks)))
+ ks = NULL;
+ rcu_read_unlock();
+
+ return ks;
+}
+
+void ovpn_crypto_key_slot_release(struct kref *kref);
+
+static inline void ovpn_crypto_key_slot_put(struct ovpn_crypto_key_slot *ks)
+{
+ kref_put(&ks->refcount, ovpn_crypto_key_slot_release);
+}
+
+int ovpn_crypto_state_reset(struct ovpn_crypto_state *cs,
+ const struct ovpn_peer_key_reset *pkr);
+
+void ovpn_crypto_key_slot_delete(struct ovpn_crypto_state *cs,
+ enum ovpn_key_slot slot);
+
+void ovpn_crypto_state_release(struct ovpn_crypto_state *cs);
+
+void ovpn_crypto_key_slots_swap(struct ovpn_crypto_state *cs);
+
+int ovpn_crypto_config_get(struct ovpn_crypto_state *cs,
+ enum ovpn_key_slot slot,
+ struct ovpn_key_config *keyconf);
+
+bool ovpn_crypto_kill_key(struct ovpn_crypto_state *cs, u8 key_id);
+
+#endif /* _NET_OVPN_OVPNCRYPTO_H_ */
diff --git a/drivers/net/ovpn/crypto_aead.c b/drivers/net/ovpn/crypto_aead.c
new file mode 100644
index 000000000000..2cca759feffa
--- /dev/null
+++ b/drivers/net/ovpn/crypto_aead.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <crypto/aead.h>
+#include <linux/skbuff.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "io.h"
+#include "pktid.h"
+#include "crypto_aead.h"
+#include "crypto.h"
+#include "peer.h"
+#include "proto.h"
+#include "skb.h"
+
+#define OVPN_AUTH_TAG_SIZE 16
+#define OVPN_AAD_SIZE (OVPN_OPCODE_SIZE + OVPN_NONCE_WIRE_SIZE)
+
+#define ALG_NAME_AES "gcm(aes)"
+#define ALG_NAME_CHACHAPOLY "rfc7539(chacha20,poly1305)"
+
+static int ovpn_aead_encap_overhead(const struct ovpn_crypto_key_slot *ks)
+{
+ return OVPN_OPCODE_SIZE + /* OP header size */
+ sizeof(u32) + /* Packet ID */
+ crypto_aead_authsize(ks->encrypt); /* Auth Tag */
+}
+
+int ovpn_aead_encrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks,
+ struct sk_buff *skb)
+{
+ const unsigned int tag_size = crypto_aead_authsize(ks->encrypt);
+ struct aead_request *req;
+ struct sk_buff *trailer;
+ struct scatterlist *sg;
+ int nfrags, ret;
+ u32 pktid, op;
+ u8 *iv;
+
+ ovpn_skb_cb(skb)->peer = peer;
+ ovpn_skb_cb(skb)->ks = ks;
+
+ /* Sample AEAD header format:
+ * 48000001 00000005 7e7046bd 444a7e28 cc6387b1 64a4d6c1 380275a...
+ * [ OP32 ] [seq # ] [ auth tag ] [ payload ... ]
+ * [4-byte
+ * IV head]
+ */
+
+ /* check that there's enough headroom in the skb for packet
+ * encapsulation
+ */
+ if (unlikely(skb_cow_head(skb, OVPN_HEAD_ROOM)))
+ return -ENOBUFS;
+
+ /* get number of skb frags and ensure that packet data is writable */
+ nfrags = skb_cow_data(skb, 0, &trailer);
+ if (unlikely(nfrags < 0))
+ return nfrags;
+
+ if (unlikely(nfrags + 2 > (MAX_SKB_FRAGS + 2)))
+ return -ENOSPC;
+
+ /* sg may be required by async crypto */
+ ovpn_skb_cb(skb)->sg = kmalloc(sizeof(*ovpn_skb_cb(skb)->sg) *
+ (nfrags + 2), GFP_ATOMIC);
+ if (unlikely(!ovpn_skb_cb(skb)->sg))
+ return -ENOMEM;
+
+ sg = ovpn_skb_cb(skb)->sg;
+
+ /* sg table:
+ * 0: op, wire nonce (AD, len=OVPN_OP_SIZE_V2+OVPN_NONCE_WIRE_SIZE),
+ * 1, 2, 3, ..., n: payload,
+ * n+1: auth_tag (len=tag_size)
+ */
+ sg_init_table(sg, nfrags + 2);
+
+ /* build scatterlist to encrypt packet payload */
+ ret = skb_to_sgvec_nomark(skb, sg + 1, 0, skb->len);
+ if (unlikely(ret < 0)) {
+ netdev_err(peer->ovpn->dev,
+ "encrypt: cannot map skb to sg: %d\n", ret);
+ return ret;
+ }
+
+ /* append auth_tag onto scatterlist */
+ __skb_push(skb, tag_size);
+ sg_set_buf(sg + ret + 1, skb->data, tag_size);
+
+ /* obtain packet ID, which is used both as a first
+ * 4 bytes of nonce and last 4 bytes of associated data.
+ */
+ ret = ovpn_pktid_xmit_next(&ks->pid_xmit, &pktid);
+ if (unlikely(ret < 0))
+ return ret;
+
+ /* iv may be required by async crypto */
+ ovpn_skb_cb(skb)->iv = kmalloc(OVPN_NONCE_SIZE, GFP_ATOMIC);
+ if (unlikely(!ovpn_skb_cb(skb)->iv))
+ return -ENOMEM;
+
+ iv = ovpn_skb_cb(skb)->iv;
+
+ /* concat 4 bytes packet id and 8 bytes nonce tail into 12 bytes
+ * nonce
+ */
+ ovpn_pktid_aead_write(pktid, ks->nonce_tail_xmit, iv);
+
+ /* make space for packet id and push it to the front */
+ __skb_push(skb, OVPN_NONCE_WIRE_SIZE);
+ memcpy(skb->data, iv, OVPN_NONCE_WIRE_SIZE);
+
+ /* add packet op as head of additional data */
+ op = ovpn_opcode_compose(OVPN_DATA_V2, ks->key_id, peer->id);
+ __skb_push(skb, OVPN_OPCODE_SIZE);
+ BUILD_BUG_ON(sizeof(op) != OVPN_OPCODE_SIZE);
+ *((__force __be32 *)skb->data) = htonl(op);
+
+ /* AEAD Additional data */
+ sg_set_buf(sg, skb->data, OVPN_AAD_SIZE);
+
+ req = aead_request_alloc(ks->encrypt, GFP_ATOMIC);
+ if (unlikely(!req))
+ return -ENOMEM;
+
+ ovpn_skb_cb(skb)->req = req;
+
+ /* setup async crypto operation */
+ aead_request_set_tfm(req, ks->encrypt);
+ aead_request_set_callback(req, 0, ovpn_encrypt_post, skb);
+ aead_request_set_crypt(req, sg, sg,
+ skb->len - ovpn_aead_encap_overhead(ks), iv);
+ aead_request_set_ad(req, OVPN_AAD_SIZE);
+
+ /* encrypt it */
+ return crypto_aead_encrypt(req);
+}
+
+int ovpn_aead_decrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks,
+ struct sk_buff *skb)
+{
+ const unsigned int tag_size = crypto_aead_authsize(ks->decrypt);
+ int ret, payload_len, nfrags;
+ unsigned int payload_offset;
+ struct aead_request *req;
+ struct sk_buff *trailer;
+ struct scatterlist *sg;
+ u8 *iv;
+
+ payload_offset = OVPN_AAD_SIZE + tag_size;
+ payload_len = skb->len - payload_offset;
+
+ ovpn_skb_cb(skb)->payload_offset = payload_offset;
+ ovpn_skb_cb(skb)->peer = peer;
+ ovpn_skb_cb(skb)->ks = ks;
+
+ /* sanity check on packet size, payload size must be >= 0 */
+ if (unlikely(payload_len < 0))
+ return -EINVAL;
+
+ /* Prepare the skb data buffer to be accessed up until the auth tag.
+ * This is required because this area is directly mapped into the sg
+ * list.
+ */
+ if (unlikely(!pskb_may_pull(skb, payload_offset)))
+ return -ENODATA;
+
+ /* get number of skb frags and ensure that packet data is writable */
+ nfrags = skb_cow_data(skb, 0, &trailer);
+ if (unlikely(nfrags < 0))
+ return nfrags;
+
+ if (unlikely(nfrags + 2 > (MAX_SKB_FRAGS + 2)))
+ return -ENOSPC;
+
+ /* sg may be required by async crypto */
+ ovpn_skb_cb(skb)->sg = kmalloc(sizeof(*ovpn_skb_cb(skb)->sg) *
+ (nfrags + 2), GFP_ATOMIC);
+ if (unlikely(!ovpn_skb_cb(skb)->sg))
+ return -ENOMEM;
+
+ sg = ovpn_skb_cb(skb)->sg;
+
+ /* sg table:
+ * 0: op, wire nonce (AD, len=OVPN_OPCODE_SIZE+OVPN_NONCE_WIRE_SIZE),
+ * 1, 2, 3, ..., n: payload,
+ * n+1: auth_tag (len=tag_size)
+ */
+ sg_init_table(sg, nfrags + 2);
+
+ /* packet op is head of additional data */
+ sg_set_buf(sg, skb->data, OVPN_AAD_SIZE);
+
+ /* build scatterlist to decrypt packet payload */
+ ret = skb_to_sgvec_nomark(skb, sg + 1, payload_offset, payload_len);
+ if (unlikely(ret < 0)) {
+ netdev_err(peer->ovpn->dev,
+ "decrypt: cannot map skb to sg: %d\n", ret);
+ return ret;
+ }
+
+ /* append auth_tag onto scatterlist */
+ sg_set_buf(sg + ret + 1, skb->data + OVPN_AAD_SIZE, tag_size);
+
+ /* iv may be required by async crypto */
+ ovpn_skb_cb(skb)->iv = kmalloc(OVPN_NONCE_SIZE, GFP_ATOMIC);
+ if (unlikely(!ovpn_skb_cb(skb)->iv))
+ return -ENOMEM;
+
+ iv = ovpn_skb_cb(skb)->iv;
+
+ /* copy nonce into IV buffer */
+ memcpy(iv, skb->data + OVPN_OPCODE_SIZE, OVPN_NONCE_WIRE_SIZE);
+ memcpy(iv + OVPN_NONCE_WIRE_SIZE, ks->nonce_tail_recv,
+ OVPN_NONCE_TAIL_SIZE);
+
+ req = aead_request_alloc(ks->decrypt, GFP_ATOMIC);
+ if (unlikely(!req))
+ return -ENOMEM;
+
+ ovpn_skb_cb(skb)->req = req;
+
+ /* setup async crypto operation */
+ aead_request_set_tfm(req, ks->decrypt);
+ aead_request_set_callback(req, 0, ovpn_decrypt_post, skb);
+ aead_request_set_crypt(req, sg, sg, payload_len + tag_size, iv);
+
+ aead_request_set_ad(req, OVPN_AAD_SIZE);
+
+ /* decrypt it */
+ return crypto_aead_decrypt(req);
+}
+
+/* Initialize a struct crypto_aead object */
+static struct crypto_aead *ovpn_aead_init(const char *title,
+ const char *alg_name,
+ const unsigned char *key,
+ unsigned int keylen)
+{
+ struct crypto_aead *aead;
+ int ret;
+
+ aead = crypto_alloc_aead(alg_name, 0, 0);
+ if (IS_ERR(aead)) {
+ ret = PTR_ERR(aead);
+ pr_err("%s crypto_alloc_aead failed, err=%d\n", title, ret);
+ aead = NULL;
+ goto error;
+ }
+
+ ret = crypto_aead_setkey(aead, key, keylen);
+ if (ret) {
+ pr_err("%s crypto_aead_setkey size=%u failed, err=%d\n", title,
+ keylen, ret);
+ goto error;
+ }
+
+ ret = crypto_aead_setauthsize(aead, OVPN_AUTH_TAG_SIZE);
+ if (ret) {
+ pr_err("%s crypto_aead_setauthsize failed, err=%d\n", title,
+ ret);
+ goto error;
+ }
+
+ /* basic AEAD assumption */
+ if (crypto_aead_ivsize(aead) != OVPN_NONCE_SIZE) {
+ pr_err("%s IV size must be %d\n", title, OVPN_NONCE_SIZE);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ pr_debug("********* Cipher %s (%s)\n", alg_name, title);
+ pr_debug("*** IV size=%u\n", crypto_aead_ivsize(aead));
+ pr_debug("*** req size=%u\n", crypto_aead_reqsize(aead));
+ pr_debug("*** block size=%u\n", crypto_aead_blocksize(aead));
+ pr_debug("*** auth size=%u\n", crypto_aead_authsize(aead));
+ pr_debug("*** alignmask=0x%x\n", crypto_aead_alignmask(aead));
+
+ return aead;
+
+error:
+ crypto_free_aead(aead);
+ return ERR_PTR(ret);
+}
+
+void ovpn_aead_crypto_key_slot_destroy(struct ovpn_crypto_key_slot *ks)
+{
+ if (!ks)
+ return;
+
+ crypto_free_aead(ks->encrypt);
+ crypto_free_aead(ks->decrypt);
+ kfree(ks);
+}
+
+struct ovpn_crypto_key_slot *
+ovpn_aead_crypto_key_slot_new(const struct ovpn_key_config *kc)
+{
+ struct ovpn_crypto_key_slot *ks = NULL;
+ const char *alg_name;
+ int ret;
+
+ /* validate crypto alg */
+ switch (kc->cipher_alg) {
+ case OVPN_CIPHER_ALG_AES_GCM:
+ alg_name = ALG_NAME_AES;
+ break;
+ case OVPN_CIPHER_ALG_CHACHA20_POLY1305:
+ alg_name = ALG_NAME_CHACHAPOLY;
+ break;
+ default:
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ if (kc->encrypt.nonce_tail_size != OVPN_NONCE_TAIL_SIZE ||
+ kc->decrypt.nonce_tail_size != OVPN_NONCE_TAIL_SIZE)
+ return ERR_PTR(-EINVAL);
+
+ /* build the key slot */
+ ks = kmalloc(sizeof(*ks), GFP_KERNEL);
+ if (!ks)
+ return ERR_PTR(-ENOMEM);
+
+ ks->encrypt = NULL;
+ ks->decrypt = NULL;
+ kref_init(&ks->refcount);
+ ks->key_id = kc->key_id;
+
+ ks->encrypt = ovpn_aead_init("encrypt", alg_name,
+ kc->encrypt.cipher_key,
+ kc->encrypt.cipher_key_size);
+ if (IS_ERR(ks->encrypt)) {
+ ret = PTR_ERR(ks->encrypt);
+ ks->encrypt = NULL;
+ goto destroy_ks;
+ }
+
+ ks->decrypt = ovpn_aead_init("decrypt", alg_name,
+ kc->decrypt.cipher_key,
+ kc->decrypt.cipher_key_size);
+ if (IS_ERR(ks->decrypt)) {
+ ret = PTR_ERR(ks->decrypt);
+ ks->decrypt = NULL;
+ goto destroy_ks;
+ }
+
+ memcpy(ks->nonce_tail_xmit, kc->encrypt.nonce_tail,
+ OVPN_NONCE_TAIL_SIZE);
+ memcpy(ks->nonce_tail_recv, kc->decrypt.nonce_tail,
+ OVPN_NONCE_TAIL_SIZE);
+
+ /* init packet ID generation/validation */
+ ovpn_pktid_xmit_init(&ks->pid_xmit);
+ ovpn_pktid_recv_init(&ks->pid_recv);
+
+ return ks;
+
+destroy_ks:
+ ovpn_aead_crypto_key_slot_destroy(ks);
+ return ERR_PTR(ret);
+}
+
+enum ovpn_cipher_alg ovpn_aead_crypto_alg(struct ovpn_crypto_key_slot *ks)
+{
+ const char *alg_name;
+
+ if (!ks->encrypt)
+ return OVPN_CIPHER_ALG_NONE;
+
+ alg_name = crypto_tfm_alg_name(crypto_aead_tfm(ks->encrypt));
+
+ if (!strcmp(alg_name, ALG_NAME_AES))
+ return OVPN_CIPHER_ALG_AES_GCM;
+ else if (!strcmp(alg_name, ALG_NAME_CHACHAPOLY))
+ return OVPN_CIPHER_ALG_CHACHA20_POLY1305;
+ else
+ return OVPN_CIPHER_ALG_NONE;
+}
diff --git a/drivers/net/ovpn/crypto_aead.h b/drivers/net/ovpn/crypto_aead.h
new file mode 100644
index 000000000000..65a2ff307898
--- /dev/null
+++ b/drivers/net/ovpn/crypto_aead.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNAEAD_H_
+#define _NET_OVPN_OVPNAEAD_H_
+
+#include "crypto.h"
+
+#include <asm/types.h>
+#include <linux/skbuff.h>
+
+int ovpn_aead_encrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks,
+ struct sk_buff *skb);
+int ovpn_aead_decrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks,
+ struct sk_buff *skb);
+
+struct ovpn_crypto_key_slot *
+ovpn_aead_crypto_key_slot_new(const struct ovpn_key_config *kc);
+void ovpn_aead_crypto_key_slot_destroy(struct ovpn_crypto_key_slot *ks);
+
+enum ovpn_cipher_alg ovpn_aead_crypto_alg(struct ovpn_crypto_key_slot *ks);
+
+#endif /* _NET_OVPN_OVPNAEAD_H_ */
diff --git a/drivers/net/ovpn/io.c b/drivers/net/ovpn/io.c
new file mode 100644
index 000000000000..3e9e7f8444b3
--- /dev/null
+++ b/drivers/net/ovpn/io.c
@@ -0,0 +1,465 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2019-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <crypto/aead.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <net/gro_cells.h>
+#include <net/gso.h>
+#include <net/ip.h>
+
+#include "ovpnpriv.h"
+#include "peer.h"
+#include "io.h"
+#include "bind.h"
+#include "crypto.h"
+#include "crypto_aead.h"
+#include "netlink.h"
+#include "proto.h"
+#include "tcp.h"
+#include "udp.h"
+#include "skb.h"
+#include "socket.h"
+
+const unsigned char ovpn_keepalive_message[OVPN_KEEPALIVE_SIZE] = {
+ 0x2a, 0x18, 0x7b, 0xf3, 0x64, 0x1e, 0xb4, 0xcb,
+ 0x07, 0xed, 0x2d, 0x0a, 0x98, 0x1f, 0xc7, 0x48
+};
+
+/**
+ * ovpn_is_keepalive - check if skb contains a keepalive message
+ * @skb: packet to check
+ *
+ * Assumes that the first byte of skb->data is defined.
+ *
+ * Return: true if skb contains a keepalive or false otherwise
+ */
+static bool ovpn_is_keepalive(struct sk_buff *skb)
+{
+ if (*skb->data != ovpn_keepalive_message[0])
+ return false;
+
+ if (skb->len != OVPN_KEEPALIVE_SIZE)
+ return false;
+
+ if (!pskb_may_pull(skb, OVPN_KEEPALIVE_SIZE))
+ return false;
+
+ return !memcmp(skb->data, ovpn_keepalive_message, OVPN_KEEPALIVE_SIZE);
+}
+
+/* Called after decrypt to write the IP packet to the device.
+ * This method is expected to manage/free the skb.
+ */
+static void ovpn_netdev_write(struct ovpn_peer *peer, struct sk_buff *skb)
+{
+ unsigned int pkt_len;
+ int ret;
+
+ /*
+ * GSO state from the transport layer is not valid for the tunnel/data
+ * path. Reset all GSO fields to prevent any further GSO processing
+ * from entering an inconsistent state.
+ */
+ skb_gso_reset(skb);
+
+ /* we can't guarantee the packet wasn't corrupted before entering the
+ * VPN, therefore we give other layers a chance to check that
+ */
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* skb hash for transport packet no longer valid after decapsulation */
+ skb_clear_hash(skb);
+
+ /* post-decrypt scrub -- prepare to inject encapsulated packet onto the
+ * interface, based on __skb_tunnel_rx() in dst.h
+ */
+ skb->dev = peer->ovpn->dev;
+ skb_set_queue_mapping(skb, 0);
+ skb_scrub_packet(skb, true);
+
+ /* network header reset in ovpn_decrypt_post() */
+ skb_reset_transport_header(skb);
+ skb_reset_inner_headers(skb);
+
+ /* cause packet to be "received" by the interface */
+ pkt_len = skb->len;
+ ret = gro_cells_receive(&peer->ovpn->gro_cells, skb);
+ if (likely(ret == NET_RX_SUCCESS)) {
+ /* update RX stats with the size of decrypted packet */
+ ovpn_peer_stats_increment_rx(&peer->vpn_stats, pkt_len);
+ dev_dstats_rx_add(peer->ovpn->dev, pkt_len);
+ }
+}
+
+void ovpn_decrypt_post(void *data, int ret)
+{
+ struct ovpn_crypto_key_slot *ks;
+ unsigned int payload_offset = 0;
+ struct sk_buff *skb = data;
+ struct ovpn_socket *sock;
+ struct ovpn_peer *peer;
+ __be16 proto;
+ __be32 *pid;
+
+ /* crypto is happening asynchronously. this function will be called
+ * again later by the crypto callback with a proper return code
+ */
+ if (unlikely(ret == -EINPROGRESS))
+ return;
+
+ payload_offset = ovpn_skb_cb(skb)->payload_offset;
+ ks = ovpn_skb_cb(skb)->ks;
+ peer = ovpn_skb_cb(skb)->peer;
+
+ /* crypto is done, cleanup skb CB and its members */
+ kfree(ovpn_skb_cb(skb)->iv);
+ kfree(ovpn_skb_cb(skb)->sg);
+ aead_request_free(ovpn_skb_cb(skb)->req);
+
+ if (unlikely(ret < 0))
+ goto drop;
+
+ /* PID sits after the op */
+ pid = (__force __be32 *)(skb->data + OVPN_OPCODE_SIZE);
+ ret = ovpn_pktid_recv(&ks->pid_recv, ntohl(*pid), 0);
+ if (unlikely(ret < 0)) {
+ net_err_ratelimited("%s: PKT ID RX error for peer %u: %d\n",
+ netdev_name(peer->ovpn->dev), peer->id,
+ ret);
+ goto drop;
+ }
+
+ /* keep track of last received authenticated packet for keepalive */
+ WRITE_ONCE(peer->last_recv, ktime_get_real_seconds());
+
+ rcu_read_lock();
+ sock = rcu_dereference(peer->sock);
+ if (sock && sock->sk->sk_protocol == IPPROTO_UDP)
+ /* check if this peer changed local or remote endpoint */
+ ovpn_peer_endpoints_update(peer, skb);
+ rcu_read_unlock();
+
+ /* point to encapsulated IP packet */
+ __skb_pull(skb, payload_offset);
+
+ /* check if this is a valid datapacket that has to be delivered to the
+ * ovpn interface
+ */
+ skb_reset_network_header(skb);
+ proto = ovpn_ip_check_protocol(skb);
+ if (unlikely(!proto)) {
+ /* check if null packet */
+ if (unlikely(!pskb_may_pull(skb, 1))) {
+ net_info_ratelimited("%s: NULL packet received from peer %u\n",
+ netdev_name(peer->ovpn->dev),
+ peer->id);
+ goto drop;
+ }
+
+ if (ovpn_is_keepalive(skb)) {
+ net_dbg_ratelimited("%s: ping received from peer %u\n",
+ netdev_name(peer->ovpn->dev),
+ peer->id);
+ /* we drop the packet, but this is not a failure */
+ consume_skb(skb);
+ goto drop_nocount;
+ }
+
+ net_info_ratelimited("%s: unsupported protocol received from peer %u\n",
+ netdev_name(peer->ovpn->dev), peer->id);
+ goto drop;
+ }
+ skb->protocol = proto;
+
+ /* perform Reverse Path Filtering (RPF) */
+ if (unlikely(!ovpn_peer_check_by_src(peer->ovpn, skb, peer))) {
+ if (skb->protocol == htons(ETH_P_IPV6))
+ net_dbg_ratelimited("%s: RPF dropped packet from peer %u, src: %pI6c\n",
+ netdev_name(peer->ovpn->dev),
+ peer->id, &ipv6_hdr(skb)->saddr);
+ else
+ net_dbg_ratelimited("%s: RPF dropped packet from peer %u, src: %pI4\n",
+ netdev_name(peer->ovpn->dev),
+ peer->id, &ip_hdr(skb)->saddr);
+ goto drop;
+ }
+
+ ovpn_netdev_write(peer, skb);
+ /* skb is passed to upper layer - don't free it */
+ skb = NULL;
+drop:
+ if (unlikely(skb))
+ dev_dstats_rx_dropped(peer->ovpn->dev);
+ kfree_skb(skb);
+drop_nocount:
+ if (likely(peer))
+ ovpn_peer_put(peer);
+ if (likely(ks))
+ ovpn_crypto_key_slot_put(ks);
+}
+
+/* RX path entry point: decrypt packet and forward it to the device */
+void ovpn_recv(struct ovpn_peer *peer, struct sk_buff *skb)
+{
+ struct ovpn_crypto_key_slot *ks;
+ u8 key_id;
+
+ ovpn_peer_stats_increment_rx(&peer->link_stats, skb->len);
+
+ /* get the key slot matching the key ID in the received packet */
+ key_id = ovpn_key_id_from_skb(skb);
+ ks = ovpn_crypto_key_id_to_slot(&peer->crypto, key_id);
+ if (unlikely(!ks)) {
+ net_info_ratelimited("%s: no available key for peer %u, key-id: %u\n",
+ netdev_name(peer->ovpn->dev), peer->id,
+ key_id);
+ dev_dstats_rx_dropped(peer->ovpn->dev);
+ kfree_skb(skb);
+ ovpn_peer_put(peer);
+ return;
+ }
+
+ memset(ovpn_skb_cb(skb), 0, sizeof(struct ovpn_cb));
+ ovpn_decrypt_post(skb, ovpn_aead_decrypt(peer, ks, skb));
+}
+
+void ovpn_encrypt_post(void *data, int ret)
+{
+ struct ovpn_crypto_key_slot *ks;
+ struct sk_buff *skb = data;
+ struct ovpn_socket *sock;
+ struct ovpn_peer *peer;
+ unsigned int orig_len;
+
+ /* encryption is happening asynchronously. This function will be
+ * called later by the crypto callback with a proper return value
+ */
+ if (unlikely(ret == -EINPROGRESS))
+ return;
+
+ ks = ovpn_skb_cb(skb)->ks;
+ peer = ovpn_skb_cb(skb)->peer;
+
+ /* crypto is done, cleanup skb CB and its members */
+ kfree(ovpn_skb_cb(skb)->iv);
+ kfree(ovpn_skb_cb(skb)->sg);
+ aead_request_free(ovpn_skb_cb(skb)->req);
+
+ if (unlikely(ret == -ERANGE)) {
+ /* we ran out of IVs and we must kill the key as it can't be
+ * use anymore
+ */
+ netdev_warn(peer->ovpn->dev,
+ "killing key %u for peer %u\n", ks->key_id,
+ peer->id);
+ if (ovpn_crypto_kill_key(&peer->crypto, ks->key_id))
+ /* let userspace know so that a new key must be negotiated */
+ ovpn_nl_key_swap_notify(peer, ks->key_id);
+
+ goto err;
+ }
+
+ if (unlikely(ret < 0))
+ goto err;
+
+ skb_mark_not_on_list(skb);
+ orig_len = skb->len;
+
+ rcu_read_lock();
+ sock = rcu_dereference(peer->sock);
+ if (unlikely(!sock))
+ goto err_unlock;
+
+ switch (sock->sk->sk_protocol) {
+ case IPPROTO_UDP:
+ ovpn_udp_send_skb(peer, sock->sk, skb);
+ break;
+ case IPPROTO_TCP:
+ ovpn_tcp_send_skb(peer, sock->sk, skb);
+ break;
+ default:
+ /* no transport configured yet */
+ goto err_unlock;
+ }
+
+ ovpn_peer_stats_increment_tx(&peer->link_stats, orig_len);
+ /* keep track of last sent packet for keepalive */
+ WRITE_ONCE(peer->last_sent, ktime_get_real_seconds());
+ /* skb passed down the stack - don't free it */
+ skb = NULL;
+err_unlock:
+ rcu_read_unlock();
+err:
+ if (unlikely(skb))
+ dev_dstats_tx_dropped(peer->ovpn->dev);
+ if (likely(peer))
+ ovpn_peer_put(peer);
+ if (likely(ks))
+ ovpn_crypto_key_slot_put(ks);
+ kfree_skb(skb);
+}
+
+static bool ovpn_encrypt_one(struct ovpn_peer *peer, struct sk_buff *skb)
+{
+ struct ovpn_crypto_key_slot *ks;
+
+ /* get primary key to be used for encrypting data */
+ ks = ovpn_crypto_key_slot_primary(&peer->crypto);
+ if (unlikely(!ks))
+ return false;
+
+ /* take a reference to the peer because the crypto code may run async.
+ * ovpn_encrypt_post() will release it upon completion
+ */
+ if (unlikely(!ovpn_peer_hold(peer))) {
+ DEBUG_NET_WARN_ON_ONCE(1);
+ ovpn_crypto_key_slot_put(ks);
+ return false;
+ }
+
+ memset(ovpn_skb_cb(skb), 0, sizeof(struct ovpn_cb));
+ ovpn_encrypt_post(skb, ovpn_aead_encrypt(peer, ks, skb));
+ return true;
+}
+
+/* send skb to connected peer, if any */
+static void ovpn_send(struct ovpn_priv *ovpn, struct sk_buff *skb,
+ struct ovpn_peer *peer)
+{
+ struct sk_buff *curr, *next;
+
+ /* this might be a GSO-segmented skb list: process each skb
+ * independently
+ */
+ skb_list_walk_safe(skb, curr, next) {
+ if (unlikely(!ovpn_encrypt_one(peer, curr))) {
+ dev_dstats_tx_dropped(ovpn->dev);
+ kfree_skb(curr);
+ }
+ }
+
+ ovpn_peer_put(peer);
+}
+
+/* Send user data to the network
+ */
+netdev_tx_t ovpn_net_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ovpn_priv *ovpn = netdev_priv(dev);
+ struct sk_buff *segments, *curr, *next;
+ struct sk_buff_head skb_list;
+ struct ovpn_peer *peer;
+ __be16 proto;
+ int ret;
+
+ /* reset netfilter state */
+ nf_reset_ct(skb);
+
+ /* verify IP header size in network packet */
+ proto = ovpn_ip_check_protocol(skb);
+ if (unlikely(!proto || skb->protocol != proto))
+ goto drop;
+
+ if (skb_is_gso(skb)) {
+ segments = skb_gso_segment(skb, 0);
+ if (IS_ERR(segments)) {
+ ret = PTR_ERR(segments);
+ net_err_ratelimited("%s: cannot segment payload packet: %d\n",
+ netdev_name(dev), ret);
+ goto drop;
+ }
+
+ consume_skb(skb);
+ skb = segments;
+ }
+
+ /* from this moment on, "skb" might be a list */
+
+ __skb_queue_head_init(&skb_list);
+ skb_list_walk_safe(skb, curr, next) {
+ skb_mark_not_on_list(curr);
+
+ curr = skb_share_check(curr, GFP_ATOMIC);
+ if (unlikely(!curr)) {
+ net_err_ratelimited("%s: skb_share_check failed for payload packet\n",
+ netdev_name(dev));
+ dev_dstats_tx_dropped(ovpn->dev);
+ continue;
+ }
+
+ __skb_queue_tail(&skb_list, curr);
+ }
+ skb_list.prev->next = NULL;
+
+ /* retrieve peer serving the destination IP of this packet */
+ peer = ovpn_peer_get_by_dst(ovpn, skb);
+ if (unlikely(!peer)) {
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ net_dbg_ratelimited("%s: no peer to send data to dst=%pI4\n",
+ netdev_name(ovpn->dev),
+ &ip_hdr(skb)->daddr);
+ break;
+ case htons(ETH_P_IPV6):
+ net_dbg_ratelimited("%s: no peer to send data to dst=%pI6c\n",
+ netdev_name(ovpn->dev),
+ &ipv6_hdr(skb)->daddr);
+ break;
+ }
+ goto drop;
+ }
+ /* dst was needed for peer selection - it can now be dropped */
+ skb_dst_drop(skb);
+
+ ovpn_peer_stats_increment_tx(&peer->vpn_stats, skb->len);
+ ovpn_send(ovpn, skb_list.next, peer);
+
+ return NETDEV_TX_OK;
+
+drop:
+ dev_dstats_tx_dropped(ovpn->dev);
+ skb_tx_error(skb);
+ kfree_skb_list(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * ovpn_xmit_special - encrypt and transmit an out-of-band message to peer
+ * @peer: peer to send the message to
+ * @data: message content
+ * @len: message length
+ *
+ * Assumes that caller holds a reference to peer, which will be
+ * passed to ovpn_send()
+ */
+void ovpn_xmit_special(struct ovpn_peer *peer, const void *data,
+ const unsigned int len)
+{
+ struct ovpn_priv *ovpn;
+ struct sk_buff *skb;
+
+ ovpn = peer->ovpn;
+ if (unlikely(!ovpn)) {
+ ovpn_peer_put(peer);
+ return;
+ }
+
+ skb = alloc_skb(256 + len, GFP_ATOMIC);
+ if (unlikely(!skb)) {
+ ovpn_peer_put(peer);
+ return;
+ }
+
+ skb_reserve(skb, 128);
+ skb->priority = TC_PRIO_BESTEFFORT;
+ __skb_put_data(skb, data, len);
+
+ ovpn_send(ovpn, skb, peer);
+}
diff --git a/drivers/net/ovpn/io.h b/drivers/net/ovpn/io.h
new file mode 100644
index 000000000000..db9e10f9077c
--- /dev/null
+++ b/drivers/net/ovpn/io.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2019-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPN_H_
+#define _NET_OVPN_OVPN_H_
+
+/* DATA_V2 header size with AEAD encryption */
+#define OVPN_HEAD_ROOM (OVPN_OPCODE_SIZE + OVPN_NONCE_WIRE_SIZE + \
+ 16 /* AEAD TAG length */ + \
+ max(sizeof(struct udphdr), sizeof(struct tcphdr)) +\
+ max(sizeof(struct ipv6hdr), sizeof(struct iphdr)))
+
+/* max padding required by encryption */
+#define OVPN_MAX_PADDING 16
+
+#define OVPN_KEEPALIVE_SIZE 16
+extern const unsigned char ovpn_keepalive_message[OVPN_KEEPALIVE_SIZE];
+
+netdev_tx_t ovpn_net_xmit(struct sk_buff *skb, struct net_device *dev);
+
+void ovpn_recv(struct ovpn_peer *peer, struct sk_buff *skb);
+void ovpn_xmit_special(struct ovpn_peer *peer, const void *data,
+ const unsigned int len);
+
+void ovpn_encrypt_post(void *data, int ret);
+void ovpn_decrypt_post(void *data, int ret);
+
+#endif /* _NET_OVPN_OVPN_H_ */
diff --git a/drivers/net/ovpn/main.c b/drivers/net/ovpn/main.c
new file mode 100644
index 000000000000..1bb1afe766a4
--- /dev/null
+++ b/drivers/net/ovpn/main.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ * James Yonan <james@openvpn.net>
+ */
+
+#include <linux/ethtool.h>
+#include <linux/genetlink.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <net/gro_cells.h>
+#include <net/ip.h>
+#include <net/rtnetlink.h>
+#include <uapi/linux/if_arp.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "netlink.h"
+#include "io.h"
+#include "peer.h"
+#include "proto.h"
+#include "tcp.h"
+#include "udp.h"
+
+static void ovpn_priv_free(struct net_device *net)
+{
+ struct ovpn_priv *ovpn = netdev_priv(net);
+
+ kfree(ovpn->peers);
+}
+
+static int ovpn_mp_alloc(struct ovpn_priv *ovpn)
+{
+ struct in_device *dev_v4;
+ int i;
+
+ if (ovpn->mode != OVPN_MODE_MP)
+ return 0;
+
+ dev_v4 = __in_dev_get_rtnl(ovpn->dev);
+ if (dev_v4) {
+ /* disable redirects as Linux gets confused by ovpn
+ * handling same-LAN routing.
+ * This happens because a multipeer interface is used as
+ * relay point between hosts in the same subnet, while
+ * in a classic LAN this would not be needed because the
+ * two hosts would be able to talk directly.
+ */
+ IN_DEV_CONF_SET(dev_v4, SEND_REDIRECTS, false);
+ IPV4_DEVCONF_ALL(dev_net(ovpn->dev), SEND_REDIRECTS) = false;
+ }
+
+ /* the peer container is fairly large, therefore we allocate it only in
+ * MP mode
+ */
+ ovpn->peers = kzalloc(sizeof(*ovpn->peers), GFP_KERNEL);
+ if (!ovpn->peers)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(ovpn->peers->by_id); i++) {
+ INIT_HLIST_HEAD(&ovpn->peers->by_id[i]);
+ INIT_HLIST_NULLS_HEAD(&ovpn->peers->by_vpn_addr4[i], i);
+ INIT_HLIST_NULLS_HEAD(&ovpn->peers->by_vpn_addr6[i], i);
+ INIT_HLIST_NULLS_HEAD(&ovpn->peers->by_transp_addr[i], i);
+ }
+
+ return 0;
+}
+
+static int ovpn_net_init(struct net_device *dev)
+{
+ struct ovpn_priv *ovpn = netdev_priv(dev);
+ int err = gro_cells_init(&ovpn->gro_cells, dev);
+
+ if (err < 0)
+ return err;
+
+ err = ovpn_mp_alloc(ovpn);
+ if (err < 0) {
+ gro_cells_destroy(&ovpn->gro_cells);
+ return err;
+ }
+
+ return 0;
+}
+
+static void ovpn_net_uninit(struct net_device *dev)
+{
+ struct ovpn_priv *ovpn = netdev_priv(dev);
+
+ gro_cells_destroy(&ovpn->gro_cells);
+}
+
+static const struct net_device_ops ovpn_netdev_ops = {
+ .ndo_init = ovpn_net_init,
+ .ndo_uninit = ovpn_net_uninit,
+ .ndo_start_xmit = ovpn_net_xmit,
+};
+
+static const struct device_type ovpn_type = {
+ .name = OVPN_FAMILY_NAME,
+};
+
+static const struct nla_policy ovpn_policy[IFLA_OVPN_MAX + 1] = {
+ [IFLA_OVPN_MODE] = NLA_POLICY_RANGE(NLA_U8, OVPN_MODE_P2P,
+ OVPN_MODE_MP),
+};
+
+/**
+ * ovpn_dev_is_valid - check if the netdevice is of type 'ovpn'
+ * @dev: the interface to check
+ *
+ * Return: whether the netdevice is of type 'ovpn'
+ */
+bool ovpn_dev_is_valid(const struct net_device *dev)
+{
+ return dev->netdev_ops == &ovpn_netdev_ops;
+}
+
+static void ovpn_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strscpy(info->driver, "ovpn", sizeof(info->driver));
+ strscpy(info->bus_info, "ovpn", sizeof(info->bus_info));
+}
+
+static const struct ethtool_ops ovpn_ethtool_ops = {
+ .get_drvinfo = ovpn_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static void ovpn_setup(struct net_device *dev)
+{
+ netdev_features_t feat = NETIF_F_SG | NETIF_F_GSO |
+ NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA;
+
+ dev->needs_free_netdev = true;
+
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
+
+ dev->ethtool_ops = &ovpn_ethtool_ops;
+ dev->netdev_ops = &ovpn_netdev_ops;
+
+ dev->priv_destructor = ovpn_priv_free;
+
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->mtu = ETH_DATA_LEN - OVPN_HEAD_ROOM;
+ dev->min_mtu = IPV4_MIN_MTU;
+ dev->max_mtu = IP_MAX_MTU - OVPN_HEAD_ROOM;
+
+ dev->type = ARPHRD_NONE;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->priv_flags |= IFF_NO_QUEUE;
+ /* when routing packets to a LAN behind a client, we rely on the
+ * route entry that originally brought the packet into ovpn, so
+ * don't release it
+ */
+ netif_keep_dst(dev);
+
+ dev->lltx = true;
+ dev->features |= feat;
+ dev->hw_features |= feat;
+ dev->hw_enc_features |= feat;
+
+ dev->needed_headroom = ALIGN(OVPN_HEAD_ROOM, 4);
+ dev->needed_tailroom = OVPN_MAX_PADDING;
+
+ SET_NETDEV_DEVTYPE(dev, &ovpn_type);
+}
+
+static int ovpn_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct ovpn_priv *ovpn = netdev_priv(dev);
+ struct nlattr **data = params->data;
+ enum ovpn_mode mode = OVPN_MODE_P2P;
+
+ if (data && data[IFLA_OVPN_MODE]) {
+ mode = nla_get_u8(data[IFLA_OVPN_MODE]);
+ netdev_dbg(dev, "setting device mode: %u\n", mode);
+ }
+
+ ovpn->dev = dev;
+ ovpn->mode = mode;
+ spin_lock_init(&ovpn->lock);
+ INIT_DELAYED_WORK(&ovpn->keepalive_work, ovpn_peer_keepalive_work);
+
+ /* Set carrier explicitly after registration, this way state is
+ * clearly defined.
+ *
+ * In case of MP interfaces we keep the carrier always on.
+ *
+ * Carrier for P2P interfaces is initially off and it is then
+ * switched on and off when the remote peer is added or deleted.
+ */
+ if (ovpn->mode == OVPN_MODE_MP)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+
+ return register_netdevice(dev);
+}
+
+static void ovpn_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct ovpn_priv *ovpn = netdev_priv(dev);
+
+ cancel_delayed_work_sync(&ovpn->keepalive_work);
+ ovpn_peers_free(ovpn, NULL, OVPN_DEL_PEER_REASON_TEARDOWN);
+ unregister_netdevice_queue(dev, head);
+}
+
+static int ovpn_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct ovpn_priv *ovpn = netdev_priv(dev);
+
+ if (nla_put_u8(skb, IFLA_OVPN_MODE, ovpn->mode))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static struct rtnl_link_ops ovpn_link_ops = {
+ .kind = "ovpn",
+ .netns_refund = false,
+ .priv_size = sizeof(struct ovpn_priv),
+ .setup = ovpn_setup,
+ .policy = ovpn_policy,
+ .maxtype = IFLA_OVPN_MAX,
+ .newlink = ovpn_newlink,
+ .dellink = ovpn_dellink,
+ .fill_info = ovpn_fill_info,
+};
+
+static int __init ovpn_init(void)
+{
+ int err = rtnl_link_register(&ovpn_link_ops);
+
+ if (err) {
+ pr_err("ovpn: can't register rtnl link ops: %d\n", err);
+ return err;
+ }
+
+ err = ovpn_nl_register();
+ if (err) {
+ pr_err("ovpn: can't register netlink family: %d\n", err);
+ goto unreg_rtnl;
+ }
+
+ ovpn_tcp_init();
+
+ return 0;
+
+unreg_rtnl:
+ rtnl_link_unregister(&ovpn_link_ops);
+ return err;
+}
+
+static __exit void ovpn_cleanup(void)
+{
+ ovpn_nl_unregister();
+ rtnl_link_unregister(&ovpn_link_ops);
+
+ rcu_barrier();
+}
+
+module_init(ovpn_init);
+module_exit(ovpn_cleanup);
+
+MODULE_DESCRIPTION("OpenVPN data channel offload (ovpn)");
+MODULE_AUTHOR("Antonio Quartulli <antonio@openvpn.net>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ovpn/main.h b/drivers/net/ovpn/main.h
new file mode 100644
index 000000000000..017cd0100765
--- /dev/null
+++ b/drivers/net/ovpn/main.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_MAIN_H_
+#define _NET_OVPN_MAIN_H_
+
+bool ovpn_dev_is_valid(const struct net_device *dev);
+
+#endif /* _NET_OVPN_MAIN_H_ */
diff --git a/drivers/net/ovpn/netlink-gen.c b/drivers/net/ovpn/netlink-gen.c
new file mode 100644
index 000000000000..ecbe9dcf4f7d
--- /dev/null
+++ b/drivers/net/ovpn/netlink-gen.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/ovpn.yaml */
+/* YNL-GEN kernel source */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "netlink-gen.h"
+
+#include <uapi/linux/ovpn.h>
+
+/* Integer value ranges */
+static const struct netlink_range_validation ovpn_a_peer_id_range = {
+ .max = 16777215ULL,
+};
+
+static const struct netlink_range_validation ovpn_a_keyconf_peer_id_range = {
+ .max = 16777215ULL,
+};
+
+/* Common nested types */
+const struct nla_policy ovpn_keyconf_nl_policy[OVPN_A_KEYCONF_DECRYPT_DIR + 1] = {
+ [OVPN_A_KEYCONF_PEER_ID] = NLA_POLICY_FULL_RANGE(NLA_U32, &ovpn_a_keyconf_peer_id_range),
+ [OVPN_A_KEYCONF_SLOT] = NLA_POLICY_MAX(NLA_U32, 1),
+ [OVPN_A_KEYCONF_KEY_ID] = NLA_POLICY_MAX(NLA_U32, 7),
+ [OVPN_A_KEYCONF_CIPHER_ALG] = NLA_POLICY_MAX(NLA_U32, 2),
+ [OVPN_A_KEYCONF_ENCRYPT_DIR] = NLA_POLICY_NESTED(ovpn_keydir_nl_policy),
+ [OVPN_A_KEYCONF_DECRYPT_DIR] = NLA_POLICY_NESTED(ovpn_keydir_nl_policy),
+};
+
+const struct nla_policy ovpn_keyconf_del_input_nl_policy[OVPN_A_KEYCONF_SLOT + 1] = {
+ [OVPN_A_KEYCONF_PEER_ID] = NLA_POLICY_FULL_RANGE(NLA_U32, &ovpn_a_keyconf_peer_id_range),
+ [OVPN_A_KEYCONF_SLOT] = NLA_POLICY_MAX(NLA_U32, 1),
+};
+
+const struct nla_policy ovpn_keyconf_get_nl_policy[OVPN_A_KEYCONF_CIPHER_ALG + 1] = {
+ [OVPN_A_KEYCONF_PEER_ID] = NLA_POLICY_FULL_RANGE(NLA_U32, &ovpn_a_keyconf_peer_id_range),
+ [OVPN_A_KEYCONF_SLOT] = NLA_POLICY_MAX(NLA_U32, 1),
+ [OVPN_A_KEYCONF_KEY_ID] = NLA_POLICY_MAX(NLA_U32, 7),
+ [OVPN_A_KEYCONF_CIPHER_ALG] = NLA_POLICY_MAX(NLA_U32, 2),
+};
+
+const struct nla_policy ovpn_keyconf_swap_input_nl_policy[OVPN_A_KEYCONF_PEER_ID + 1] = {
+ [OVPN_A_KEYCONF_PEER_ID] = NLA_POLICY_FULL_RANGE(NLA_U32, &ovpn_a_keyconf_peer_id_range),
+};
+
+const struct nla_policy ovpn_keydir_nl_policy[OVPN_A_KEYDIR_NONCE_TAIL + 1] = {
+ [OVPN_A_KEYDIR_CIPHER_KEY] = NLA_POLICY_MAX_LEN(256),
+ [OVPN_A_KEYDIR_NONCE_TAIL] = NLA_POLICY_EXACT_LEN(OVPN_NONCE_TAIL_SIZE),
+};
+
+const struct nla_policy ovpn_peer_nl_policy[OVPN_A_PEER_LINK_TX_PACKETS + 1] = {
+ [OVPN_A_PEER_ID] = NLA_POLICY_FULL_RANGE(NLA_U32, &ovpn_a_peer_id_range),
+ [OVPN_A_PEER_REMOTE_IPV4] = { .type = NLA_BE32, },
+ [OVPN_A_PEER_REMOTE_IPV6] = NLA_POLICY_EXACT_LEN(16),
+ [OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID] = { .type = NLA_U32, },
+ [OVPN_A_PEER_REMOTE_PORT] = NLA_POLICY_MIN(NLA_BE16, 1),
+ [OVPN_A_PEER_SOCKET] = { .type = NLA_U32, },
+ [OVPN_A_PEER_SOCKET_NETNSID] = { .type = NLA_S32, },
+ [OVPN_A_PEER_VPN_IPV4] = { .type = NLA_BE32, },
+ [OVPN_A_PEER_VPN_IPV6] = NLA_POLICY_EXACT_LEN(16),
+ [OVPN_A_PEER_LOCAL_IPV4] = { .type = NLA_BE32, },
+ [OVPN_A_PEER_LOCAL_IPV6] = NLA_POLICY_EXACT_LEN(16),
+ [OVPN_A_PEER_LOCAL_PORT] = NLA_POLICY_MIN(NLA_BE16, 1),
+ [OVPN_A_PEER_KEEPALIVE_INTERVAL] = { .type = NLA_U32, },
+ [OVPN_A_PEER_KEEPALIVE_TIMEOUT] = { .type = NLA_U32, },
+ [OVPN_A_PEER_DEL_REASON] = NLA_POLICY_MAX(NLA_U32, 4),
+ [OVPN_A_PEER_VPN_RX_BYTES] = { .type = NLA_UINT, },
+ [OVPN_A_PEER_VPN_TX_BYTES] = { .type = NLA_UINT, },
+ [OVPN_A_PEER_VPN_RX_PACKETS] = { .type = NLA_UINT, },
+ [OVPN_A_PEER_VPN_TX_PACKETS] = { .type = NLA_UINT, },
+ [OVPN_A_PEER_LINK_RX_BYTES] = { .type = NLA_UINT, },
+ [OVPN_A_PEER_LINK_TX_BYTES] = { .type = NLA_UINT, },
+ [OVPN_A_PEER_LINK_RX_PACKETS] = { .type = NLA_UINT, },
+ [OVPN_A_PEER_LINK_TX_PACKETS] = { .type = NLA_UINT, },
+};
+
+const struct nla_policy ovpn_peer_del_input_nl_policy[OVPN_A_PEER_ID + 1] = {
+ [OVPN_A_PEER_ID] = NLA_POLICY_FULL_RANGE(NLA_U32, &ovpn_a_peer_id_range),
+};
+
+const struct nla_policy ovpn_peer_new_input_nl_policy[OVPN_A_PEER_KEEPALIVE_TIMEOUT + 1] = {
+ [OVPN_A_PEER_ID] = NLA_POLICY_FULL_RANGE(NLA_U32, &ovpn_a_peer_id_range),
+ [OVPN_A_PEER_REMOTE_IPV4] = { .type = NLA_BE32, },
+ [OVPN_A_PEER_REMOTE_IPV6] = NLA_POLICY_EXACT_LEN(16),
+ [OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID] = { .type = NLA_U32, },
+ [OVPN_A_PEER_REMOTE_PORT] = NLA_POLICY_MIN(NLA_BE16, 1),
+ [OVPN_A_PEER_SOCKET] = { .type = NLA_U32, },
+ [OVPN_A_PEER_VPN_IPV4] = { .type = NLA_BE32, },
+ [OVPN_A_PEER_VPN_IPV6] = NLA_POLICY_EXACT_LEN(16),
+ [OVPN_A_PEER_LOCAL_IPV4] = { .type = NLA_BE32, },
+ [OVPN_A_PEER_LOCAL_IPV6] = NLA_POLICY_EXACT_LEN(16),
+ [OVPN_A_PEER_KEEPALIVE_INTERVAL] = { .type = NLA_U32, },
+ [OVPN_A_PEER_KEEPALIVE_TIMEOUT] = { .type = NLA_U32, },
+};
+
+const struct nla_policy ovpn_peer_set_input_nl_policy[OVPN_A_PEER_KEEPALIVE_TIMEOUT + 1] = {
+ [OVPN_A_PEER_ID] = NLA_POLICY_FULL_RANGE(NLA_U32, &ovpn_a_peer_id_range),
+ [OVPN_A_PEER_REMOTE_IPV4] = { .type = NLA_BE32, },
+ [OVPN_A_PEER_REMOTE_IPV6] = NLA_POLICY_EXACT_LEN(16),
+ [OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID] = { .type = NLA_U32, },
+ [OVPN_A_PEER_REMOTE_PORT] = NLA_POLICY_MIN(NLA_BE16, 1),
+ [OVPN_A_PEER_VPN_IPV4] = { .type = NLA_BE32, },
+ [OVPN_A_PEER_VPN_IPV6] = NLA_POLICY_EXACT_LEN(16),
+ [OVPN_A_PEER_LOCAL_IPV4] = { .type = NLA_BE32, },
+ [OVPN_A_PEER_LOCAL_IPV6] = NLA_POLICY_EXACT_LEN(16),
+ [OVPN_A_PEER_KEEPALIVE_INTERVAL] = { .type = NLA_U32, },
+ [OVPN_A_PEER_KEEPALIVE_TIMEOUT] = { .type = NLA_U32, },
+};
+
+/* OVPN_CMD_PEER_NEW - do */
+static const struct nla_policy ovpn_peer_new_nl_policy[OVPN_A_PEER + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_new_input_nl_policy),
+};
+
+/* OVPN_CMD_PEER_SET - do */
+static const struct nla_policy ovpn_peer_set_nl_policy[OVPN_A_PEER + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_set_input_nl_policy),
+};
+
+/* OVPN_CMD_PEER_GET - do */
+static const struct nla_policy ovpn_peer_get_do_nl_policy[OVPN_A_PEER + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_nl_policy),
+};
+
+/* OVPN_CMD_PEER_GET - dump */
+static const struct nla_policy ovpn_peer_get_dump_nl_policy[OVPN_A_IFINDEX + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+};
+
+/* OVPN_CMD_PEER_DEL - do */
+static const struct nla_policy ovpn_peer_del_nl_policy[OVPN_A_PEER + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_del_input_nl_policy),
+};
+
+/* OVPN_CMD_KEY_NEW - do */
+static const struct nla_policy ovpn_key_new_nl_policy[OVPN_A_KEYCONF + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_nl_policy),
+};
+
+/* OVPN_CMD_KEY_GET - do */
+static const struct nla_policy ovpn_key_get_nl_policy[OVPN_A_KEYCONF + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_get_nl_policy),
+};
+
+/* OVPN_CMD_KEY_SWAP - do */
+static const struct nla_policy ovpn_key_swap_nl_policy[OVPN_A_KEYCONF + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_swap_input_nl_policy),
+};
+
+/* OVPN_CMD_KEY_DEL - do */
+static const struct nla_policy ovpn_key_del_nl_policy[OVPN_A_KEYCONF + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_del_input_nl_policy),
+};
+
+/* Ops table for ovpn */
+static const struct genl_split_ops ovpn_nl_ops[] = {
+ {
+ .cmd = OVPN_CMD_PEER_NEW,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_peer_new_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_peer_new_nl_policy,
+ .maxattr = OVPN_A_PEER,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = OVPN_CMD_PEER_SET,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_peer_set_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_peer_set_nl_policy,
+ .maxattr = OVPN_A_PEER,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = OVPN_CMD_PEER_GET,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_peer_get_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_peer_get_do_nl_policy,
+ .maxattr = OVPN_A_PEER,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = OVPN_CMD_PEER_GET,
+ .dumpit = ovpn_nl_peer_get_dumpit,
+ .policy = ovpn_peer_get_dump_nl_policy,
+ .maxattr = OVPN_A_IFINDEX,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DUMP,
+ },
+ {
+ .cmd = OVPN_CMD_PEER_DEL,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_peer_del_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_peer_del_nl_policy,
+ .maxattr = OVPN_A_PEER,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = OVPN_CMD_KEY_NEW,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_key_new_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_key_new_nl_policy,
+ .maxattr = OVPN_A_KEYCONF,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = OVPN_CMD_KEY_GET,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_key_get_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_key_get_nl_policy,
+ .maxattr = OVPN_A_KEYCONF,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = OVPN_CMD_KEY_SWAP,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_key_swap_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_key_swap_nl_policy,
+ .maxattr = OVPN_A_KEYCONF,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = OVPN_CMD_KEY_DEL,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_key_del_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_key_del_nl_policy,
+ .maxattr = OVPN_A_KEYCONF,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+};
+
+static const struct genl_multicast_group ovpn_nl_mcgrps[] = {
+ [OVPN_NLGRP_PEERS] = { "peers", },
+};
+
+struct genl_family ovpn_nl_family __ro_after_init = {
+ .name = OVPN_FAMILY_NAME,
+ .version = OVPN_FAMILY_VERSION,
+ .netnsok = true,
+ .parallel_ops = true,
+ .module = THIS_MODULE,
+ .split_ops = ovpn_nl_ops,
+ .n_split_ops = ARRAY_SIZE(ovpn_nl_ops),
+ .mcgrps = ovpn_nl_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(ovpn_nl_mcgrps),
+};
diff --git a/drivers/net/ovpn/netlink-gen.h b/drivers/net/ovpn/netlink-gen.h
new file mode 100644
index 000000000000..b2301580770f
--- /dev/null
+++ b/drivers/net/ovpn/netlink-gen.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/ovpn.yaml */
+/* YNL-GEN kernel header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
+
+#ifndef _LINUX_OVPN_GEN_H
+#define _LINUX_OVPN_GEN_H
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include <uapi/linux/ovpn.h>
+
+/* Common nested types */
+extern const struct nla_policy ovpn_keyconf_nl_policy[OVPN_A_KEYCONF_DECRYPT_DIR + 1];
+extern const struct nla_policy ovpn_keyconf_del_input_nl_policy[OVPN_A_KEYCONF_SLOT + 1];
+extern const struct nla_policy ovpn_keyconf_get_nl_policy[OVPN_A_KEYCONF_CIPHER_ALG + 1];
+extern const struct nla_policy ovpn_keyconf_swap_input_nl_policy[OVPN_A_KEYCONF_PEER_ID + 1];
+extern const struct nla_policy ovpn_keydir_nl_policy[OVPN_A_KEYDIR_NONCE_TAIL + 1];
+extern const struct nla_policy ovpn_peer_nl_policy[OVPN_A_PEER_LINK_TX_PACKETS + 1];
+extern const struct nla_policy ovpn_peer_del_input_nl_policy[OVPN_A_PEER_ID + 1];
+extern const struct nla_policy ovpn_peer_new_input_nl_policy[OVPN_A_PEER_KEEPALIVE_TIMEOUT + 1];
+extern const struct nla_policy ovpn_peer_set_input_nl_policy[OVPN_A_PEER_KEEPALIVE_TIMEOUT + 1];
+
+int ovpn_nl_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info);
+void
+ovpn_nl_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info);
+
+int ovpn_nl_peer_new_doit(struct sk_buff *skb, struct genl_info *info);
+int ovpn_nl_peer_set_doit(struct sk_buff *skb, struct genl_info *info);
+int ovpn_nl_peer_get_doit(struct sk_buff *skb, struct genl_info *info);
+int ovpn_nl_peer_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
+int ovpn_nl_peer_del_doit(struct sk_buff *skb, struct genl_info *info);
+int ovpn_nl_key_new_doit(struct sk_buff *skb, struct genl_info *info);
+int ovpn_nl_key_get_doit(struct sk_buff *skb, struct genl_info *info);
+int ovpn_nl_key_swap_doit(struct sk_buff *skb, struct genl_info *info);
+int ovpn_nl_key_del_doit(struct sk_buff *skb, struct genl_info *info);
+
+enum {
+ OVPN_NLGRP_PEERS,
+};
+
+extern struct genl_family ovpn_nl_family;
+
+#endif /* _LINUX_OVPN_GEN_H */
diff --git a/drivers/net/ovpn/netlink.c b/drivers/net/ovpn/netlink.c
new file mode 100644
index 000000000000..c7f382437630
--- /dev/null
+++ b/drivers/net/ovpn/netlink.c
@@ -0,0 +1,1293 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <net/genetlink.h>
+
+#include <uapi/linux/ovpn.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "netlink.h"
+#include "netlink-gen.h"
+#include "bind.h"
+#include "crypto.h"
+#include "peer.h"
+#include "socket.h"
+
+MODULE_ALIAS_GENL_FAMILY(OVPN_FAMILY_NAME);
+
+/**
+ * ovpn_get_dev_from_attrs - retrieve the ovpn private data from the netdevice
+ * a netlink message is targeting
+ * @net: network namespace where to look for the interface
+ * @info: generic netlink info from the user request
+ * @tracker: tracker object to be used for the netdev reference acquisition
+ *
+ * Return: the ovpn private data, if found, or an error otherwise
+ */
+static struct ovpn_priv *
+ovpn_get_dev_from_attrs(struct net *net, const struct genl_info *info,
+ netdevice_tracker *tracker)
+{
+ struct ovpn_priv *ovpn;
+ struct net_device *dev;
+ int ifindex;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_IFINDEX))
+ return ERR_PTR(-EINVAL);
+
+ ifindex = nla_get_u32(info->attrs[OVPN_A_IFINDEX]);
+
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(net, ifindex);
+ if (!dev) {
+ rcu_read_unlock();
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "ifindex does not match any interface");
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (!ovpn_dev_is_valid(dev)) {
+ rcu_read_unlock();
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "specified interface is not ovpn");
+ NL_SET_BAD_ATTR(info->extack, info->attrs[OVPN_A_IFINDEX]);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ovpn = netdev_priv(dev);
+ netdev_hold(dev, tracker, GFP_ATOMIC);
+ rcu_read_unlock();
+
+ return ovpn;
+}
+
+int ovpn_nl_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info)
+{
+ netdevice_tracker *tracker = (netdevice_tracker *)&info->user_ptr[1];
+ struct ovpn_priv *ovpn = ovpn_get_dev_from_attrs(genl_info_net(info),
+ info, tracker);
+
+ if (IS_ERR(ovpn))
+ return PTR_ERR(ovpn);
+
+ info->user_ptr[0] = ovpn;
+
+ return 0;
+}
+
+void ovpn_nl_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info)
+{
+ netdevice_tracker *tracker = (netdevice_tracker *)&info->user_ptr[1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+
+ if (ovpn)
+ netdev_put(ovpn->dev, tracker);
+}
+
+static bool ovpn_nl_attr_sockaddr_remote(struct nlattr **attrs,
+ struct sockaddr_storage *ss)
+{
+ struct sockaddr_in6 *sin6;
+ struct sockaddr_in *sin;
+ struct in6_addr *in6;
+ __be16 port = 0;
+ __be32 *in;
+
+ ss->ss_family = AF_UNSPEC;
+
+ if (attrs[OVPN_A_PEER_REMOTE_PORT])
+ port = nla_get_be16(attrs[OVPN_A_PEER_REMOTE_PORT]);
+
+ if (attrs[OVPN_A_PEER_REMOTE_IPV4]) {
+ ss->ss_family = AF_INET;
+ in = nla_data(attrs[OVPN_A_PEER_REMOTE_IPV4]);
+ } else if (attrs[OVPN_A_PEER_REMOTE_IPV6]) {
+ ss->ss_family = AF_INET6;
+ in6 = nla_data(attrs[OVPN_A_PEER_REMOTE_IPV6]);
+ } else {
+ return false;
+ }
+
+ switch (ss->ss_family) {
+ case AF_INET6:
+ /* If this is a regular IPv6 just break and move on,
+ * otherwise switch to AF_INET and extract the IPv4 accordingly
+ */
+ if (!ipv6_addr_v4mapped(in6)) {
+ sin6 = (struct sockaddr_in6 *)ss;
+ sin6->sin6_port = port;
+ memcpy(&sin6->sin6_addr, in6, sizeof(*in6));
+ break;
+ }
+
+ /* v4-mapped-v6 address */
+ ss->ss_family = AF_INET;
+ in = &in6->s6_addr32[3];
+ fallthrough;
+ case AF_INET:
+ sin = (struct sockaddr_in *)ss;
+ sin->sin_port = port;
+ sin->sin_addr.s_addr = *in;
+ break;
+ }
+
+ return true;
+}
+
+static u8 *ovpn_nl_attr_local_ip(struct nlattr **attrs)
+{
+ u8 *addr6;
+
+ if (!attrs[OVPN_A_PEER_LOCAL_IPV4] && !attrs[OVPN_A_PEER_LOCAL_IPV6])
+ return NULL;
+
+ if (attrs[OVPN_A_PEER_LOCAL_IPV4])
+ return nla_data(attrs[OVPN_A_PEER_LOCAL_IPV4]);
+
+ addr6 = nla_data(attrs[OVPN_A_PEER_LOCAL_IPV6]);
+ /* this is an IPv4-mapped IPv6 address, therefore extract the actual
+ * v4 address from the last 4 bytes
+ */
+ if (ipv6_addr_v4mapped((struct in6_addr *)addr6))
+ return addr6 + 12;
+
+ return addr6;
+}
+
+static sa_family_t ovpn_nl_family_get(struct nlattr *addr4,
+ struct nlattr *addr6)
+{
+ if (addr4)
+ return AF_INET;
+
+ if (addr6) {
+ if (ipv6_addr_v4mapped((struct in6_addr *)nla_data(addr6)))
+ return AF_INET;
+ return AF_INET6;
+ }
+
+ return AF_UNSPEC;
+}
+
+static int ovpn_nl_peer_precheck(struct ovpn_priv *ovpn,
+ struct genl_info *info,
+ struct nlattr **attrs)
+{
+ sa_family_t local_fam, remote_fam;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_PEER], attrs,
+ OVPN_A_PEER_ID))
+ return -EINVAL;
+
+ if (attrs[OVPN_A_PEER_REMOTE_IPV4] && attrs[OVPN_A_PEER_REMOTE_IPV6]) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "cannot specify both remote IPv4 or IPv6 address");
+ return -EINVAL;
+ }
+
+ if (!attrs[OVPN_A_PEER_REMOTE_IPV4] &&
+ !attrs[OVPN_A_PEER_REMOTE_IPV6] && attrs[OVPN_A_PEER_REMOTE_PORT]) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "cannot specify remote port without IP address");
+ return -EINVAL;
+ }
+
+ if ((attrs[OVPN_A_PEER_REMOTE_IPV4] ||
+ attrs[OVPN_A_PEER_REMOTE_IPV6]) &&
+ !attrs[OVPN_A_PEER_REMOTE_PORT]) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "cannot specify remote IP address without port");
+ return -EINVAL;
+ }
+
+ if (!attrs[OVPN_A_PEER_REMOTE_IPV4] &&
+ attrs[OVPN_A_PEER_LOCAL_IPV4]) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "cannot specify local IPv4 address without remote");
+ return -EINVAL;
+ }
+
+ if (!attrs[OVPN_A_PEER_REMOTE_IPV6] &&
+ attrs[OVPN_A_PEER_LOCAL_IPV6]) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "cannot specify local IPV6 address without remote");
+ return -EINVAL;
+ }
+
+ /* check that local and remote address families are the same even
+ * after parsing v4mapped IPv6 addresses.
+ * (if addresses are not provided, family will be AF_UNSPEC and
+ * the check is skipped)
+ */
+ local_fam = ovpn_nl_family_get(attrs[OVPN_A_PEER_LOCAL_IPV4],
+ attrs[OVPN_A_PEER_LOCAL_IPV6]);
+ remote_fam = ovpn_nl_family_get(attrs[OVPN_A_PEER_REMOTE_IPV4],
+ attrs[OVPN_A_PEER_REMOTE_IPV6]);
+ if (local_fam != AF_UNSPEC && remote_fam != AF_UNSPEC &&
+ local_fam != remote_fam) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "mismatching local and remote address families");
+ return -EINVAL;
+ }
+
+ if (remote_fam != AF_INET6 && attrs[OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID]) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "cannot specify scope id without remote IPv6 address");
+ return -EINVAL;
+ }
+
+ /* VPN IPs are needed only in MP mode for selecting the right peer */
+ if (ovpn->mode == OVPN_MODE_P2P && (attrs[OVPN_A_PEER_VPN_IPV4] ||
+ attrs[OVPN_A_PEER_VPN_IPV6])) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "unexpected VPN IP in P2P mode");
+ return -EINVAL;
+ }
+
+ if ((attrs[OVPN_A_PEER_KEEPALIVE_INTERVAL] &&
+ !attrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT]) ||
+ (!attrs[OVPN_A_PEER_KEEPALIVE_INTERVAL] &&
+ attrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT])) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "keepalive interval and timeout are required together");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ovpn_nl_peer_modify - modify the peer attributes according to the incoming msg
+ * @peer: the peer to modify
+ * @info: generic netlink info from the user request
+ * @attrs: the attributes from the user request
+ *
+ * Return: a negative error code in case of failure, 0 on success or 1 on
+ * success and the VPN IPs have been modified (requires rehashing in MP
+ * mode)
+ */
+static int ovpn_nl_peer_modify(struct ovpn_peer *peer, struct genl_info *info,
+ struct nlattr **attrs)
+{
+ struct sockaddr_storage ss = {};
+ void *local_ip = NULL;
+ u32 interv, timeout;
+ bool rehash = false;
+ int ret;
+
+ spin_lock_bh(&peer->lock);
+
+ if (ovpn_nl_attr_sockaddr_remote(attrs, &ss)) {
+ /* we carry the local IP in a generic container.
+ * ovpn_peer_reset_sockaddr() will properly interpret it
+ * based on ss.ss_family
+ */
+ local_ip = ovpn_nl_attr_local_ip(attrs);
+
+ /* set peer sockaddr */
+ ret = ovpn_peer_reset_sockaddr(peer, &ss, local_ip);
+ if (ret < 0) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot set peer sockaddr: %d",
+ ret);
+ goto err_unlock;
+ }
+ dst_cache_reset(&peer->dst_cache);
+ }
+
+ if (attrs[OVPN_A_PEER_VPN_IPV4]) {
+ rehash = true;
+ peer->vpn_addrs.ipv4.s_addr =
+ nla_get_in_addr(attrs[OVPN_A_PEER_VPN_IPV4]);
+ }
+
+ if (attrs[OVPN_A_PEER_VPN_IPV6]) {
+ rehash = true;
+ peer->vpn_addrs.ipv6 =
+ nla_get_in6_addr(attrs[OVPN_A_PEER_VPN_IPV6]);
+ }
+
+ /* when setting the keepalive, both parameters have to be configured */
+ if (attrs[OVPN_A_PEER_KEEPALIVE_INTERVAL] &&
+ attrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT]) {
+ interv = nla_get_u32(attrs[OVPN_A_PEER_KEEPALIVE_INTERVAL]);
+ timeout = nla_get_u32(attrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT]);
+ ovpn_peer_keepalive_set(peer, interv, timeout);
+ }
+
+ netdev_dbg(peer->ovpn->dev,
+ "modify peer id=%u endpoint=%pIScp VPN-IPv4=%pI4 VPN-IPv6=%pI6c\n",
+ peer->id, &ss,
+ &peer->vpn_addrs.ipv4.s_addr, &peer->vpn_addrs.ipv6);
+
+ spin_unlock_bh(&peer->lock);
+
+ return rehash ? 1 : 0;
+err_unlock:
+ spin_unlock_bh(&peer->lock);
+ return ret;
+}
+
+int ovpn_nl_peer_new_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[OVPN_A_PEER_MAX + 1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ struct ovpn_socket *ovpn_sock;
+ struct socket *sock = NULL;
+ struct ovpn_peer *peer;
+ u32 sockfd, peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_PEER))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_PEER_MAX, info->attrs[OVPN_A_PEER],
+ ovpn_peer_new_input_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ ret = ovpn_nl_peer_precheck(ovpn, info, attrs);
+ if (ret < 0)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_PEER], attrs,
+ OVPN_A_PEER_SOCKET))
+ return -EINVAL;
+
+ /* in MP mode VPN IPs are required for selecting the right peer */
+ if (ovpn->mode == OVPN_MODE_MP && !attrs[OVPN_A_PEER_VPN_IPV4] &&
+ !attrs[OVPN_A_PEER_VPN_IPV6]) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "VPN IP must be provided in MP mode");
+ return -EINVAL;
+ }
+
+ peer_id = nla_get_u32(attrs[OVPN_A_PEER_ID]);
+ peer = ovpn_peer_new(ovpn, peer_id);
+ if (IS_ERR(peer)) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot create new peer object for peer %u: %ld",
+ peer_id, PTR_ERR(peer));
+ return PTR_ERR(peer);
+ }
+
+ /* lookup the fd in the kernel table and extract the socket object */
+ sockfd = nla_get_u32(attrs[OVPN_A_PEER_SOCKET]);
+ /* sockfd_lookup() increases sock's refcounter */
+ sock = sockfd_lookup(sockfd, &ret);
+ if (!sock) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot lookup peer socket (fd=%u): %d",
+ sockfd, ret);
+ ret = -ENOTSOCK;
+ goto peer_release;
+ }
+
+ /* Only when using UDP as transport protocol the remote endpoint
+ * can be configured so that ovpn knows where to send packets to.
+ */
+ if (sock->sk->sk_protocol == IPPROTO_UDP &&
+ !attrs[OVPN_A_PEER_REMOTE_IPV4] &&
+ !attrs[OVPN_A_PEER_REMOTE_IPV6]) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "missing remote IP address for UDP socket");
+ sockfd_put(sock);
+ ret = -EINVAL;
+ goto peer_release;
+ }
+
+ /* In case of TCP, the socket is connected to the peer and ovpn
+ * will just send bytes over it, without the need to specify a
+ * destination.
+ */
+ if (sock->sk->sk_protocol == IPPROTO_TCP &&
+ (attrs[OVPN_A_PEER_REMOTE_IPV4] ||
+ attrs[OVPN_A_PEER_REMOTE_IPV6])) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "unexpected remote IP address with TCP socket");
+ sockfd_put(sock);
+ ret = -EINVAL;
+ goto peer_release;
+ }
+
+ ovpn_sock = ovpn_socket_new(sock, peer);
+ /* at this point we unconditionally drop the reference to the socket:
+ * - in case of error, the socket has to be dropped
+ * - if case of success, the socket is configured and let
+ * userspace own the reference, so that the latter can
+ * trigger the final close()
+ */
+ sockfd_put(sock);
+ if (IS_ERR(ovpn_sock)) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot encapsulate socket: %ld",
+ PTR_ERR(ovpn_sock));
+ ret = -ENOTSOCK;
+ goto peer_release;
+ }
+
+ rcu_assign_pointer(peer->sock, ovpn_sock);
+
+ ret = ovpn_nl_peer_modify(peer, info, attrs);
+ if (ret < 0)
+ goto sock_release;
+
+ ret = ovpn_peer_add(ovpn, peer);
+ if (ret < 0) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot add new peer (id=%u) to hashtable: %d",
+ peer->id, ret);
+ goto sock_release;
+ }
+
+ return 0;
+
+sock_release:
+ ovpn_socket_release(peer);
+peer_release:
+ /* release right away because peer was not yet hashed, thus it is not
+ * used in any context
+ */
+ ovpn_peer_release(peer);
+
+ return ret;
+}
+
+int ovpn_nl_peer_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[OVPN_A_PEER_MAX + 1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ struct ovpn_socket *sock;
+ struct ovpn_peer *peer;
+ u32 peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_PEER))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_PEER_MAX, info->attrs[OVPN_A_PEER],
+ ovpn_peer_set_input_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ ret = ovpn_nl_peer_precheck(ovpn, info, attrs);
+ if (ret < 0)
+ return ret;
+
+ if (attrs[OVPN_A_PEER_SOCKET]) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "socket cannot be modified");
+ return -EINVAL;
+ }
+
+ peer_id = nla_get_u32(attrs[OVPN_A_PEER_ID]);
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+ if (!peer) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot find peer with id %u", peer_id);
+ return -ENOENT;
+ }
+
+ /* when using a TCP socket the remote IP is not expected */
+ rcu_read_lock();
+ sock = rcu_dereference(peer->sock);
+ if (sock && sock->sk->sk_protocol == IPPROTO_TCP &&
+ (attrs[OVPN_A_PEER_REMOTE_IPV4] ||
+ attrs[OVPN_A_PEER_REMOTE_IPV6])) {
+ rcu_read_unlock();
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "unexpected remote IP address with TCP socket");
+ ovpn_peer_put(peer);
+ return -EINVAL;
+ }
+ rcu_read_unlock();
+
+ spin_lock_bh(&ovpn->lock);
+ ret = ovpn_nl_peer_modify(peer, info, attrs);
+ if (ret < 0) {
+ spin_unlock_bh(&ovpn->lock);
+ ovpn_peer_put(peer);
+ return ret;
+ }
+
+ /* ret == 1 means that VPN IPv4/6 has been modified and rehashing
+ * is required
+ */
+ if (ret > 0)
+ ovpn_peer_hash_vpn_ip(peer);
+ spin_unlock_bh(&ovpn->lock);
+ ovpn_peer_put(peer);
+
+ return 0;
+}
+
+static int ovpn_nl_send_peer(struct sk_buff *skb, const struct genl_info *info,
+ const struct ovpn_peer *peer, u32 portid, u32 seq,
+ int flags)
+{
+ const struct ovpn_bind *bind;
+ struct ovpn_socket *sock;
+ int ret = -EMSGSIZE;
+ struct nlattr *attr;
+ __be16 local_port;
+ void *hdr;
+ int id;
+
+ hdr = genlmsg_put(skb, portid, seq, &ovpn_nl_family, flags,
+ OVPN_CMD_PEER_GET);
+ if (!hdr)
+ return -ENOBUFS;
+
+ attr = nla_nest_start(skb, OVPN_A_PEER);
+ if (!attr)
+ goto err;
+
+ rcu_read_lock();
+ sock = rcu_dereference(peer->sock);
+ if (!sock) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ if (!net_eq(genl_info_net(info), sock_net(sock->sk))) {
+ id = peernet2id_alloc(genl_info_net(info),
+ sock_net(sock->sk),
+ GFP_ATOMIC);
+ if (nla_put_s32(skb, OVPN_A_PEER_SOCKET_NETNSID, id))
+ goto err_unlock;
+ }
+ local_port = inet_sk(sock->sk)->inet_sport;
+ rcu_read_unlock();
+
+ if (nla_put_u32(skb, OVPN_A_PEER_ID, peer->id))
+ goto err;
+
+ if (peer->vpn_addrs.ipv4.s_addr != htonl(INADDR_ANY))
+ if (nla_put_in_addr(skb, OVPN_A_PEER_VPN_IPV4,
+ peer->vpn_addrs.ipv4.s_addr))
+ goto err;
+
+ if (!ipv6_addr_equal(&peer->vpn_addrs.ipv6, &in6addr_any))
+ if (nla_put_in6_addr(skb, OVPN_A_PEER_VPN_IPV6,
+ &peer->vpn_addrs.ipv6))
+ goto err;
+
+ if (nla_put_u32(skb, OVPN_A_PEER_KEEPALIVE_INTERVAL,
+ peer->keepalive_interval) ||
+ nla_put_u32(skb, OVPN_A_PEER_KEEPALIVE_TIMEOUT,
+ peer->keepalive_timeout))
+ goto err;
+
+ rcu_read_lock();
+ bind = rcu_dereference(peer->bind);
+ if (bind) {
+ if (bind->remote.in4.sin_family == AF_INET) {
+ if (nla_put_in_addr(skb, OVPN_A_PEER_REMOTE_IPV4,
+ bind->remote.in4.sin_addr.s_addr) ||
+ nla_put_net16(skb, OVPN_A_PEER_REMOTE_PORT,
+ bind->remote.in4.sin_port) ||
+ nla_put_in_addr(skb, OVPN_A_PEER_LOCAL_IPV4,
+ bind->local.ipv4.s_addr))
+ goto err_unlock;
+ } else if (bind->remote.in4.sin_family == AF_INET6) {
+ if (nla_put_in6_addr(skb, OVPN_A_PEER_REMOTE_IPV6,
+ &bind->remote.in6.sin6_addr) ||
+ nla_put_u32(skb, OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID,
+ bind->remote.in6.sin6_scope_id) ||
+ nla_put_net16(skb, OVPN_A_PEER_REMOTE_PORT,
+ bind->remote.in6.sin6_port) ||
+ nla_put_in6_addr(skb, OVPN_A_PEER_LOCAL_IPV6,
+ &bind->local.ipv6))
+ goto err_unlock;
+ }
+ }
+ rcu_read_unlock();
+
+ if (nla_put_net16(skb, OVPN_A_PEER_LOCAL_PORT, local_port) ||
+ /* VPN RX stats */
+ nla_put_uint(skb, OVPN_A_PEER_VPN_RX_BYTES,
+ atomic64_read(&peer->vpn_stats.rx.bytes)) ||
+ nla_put_uint(skb, OVPN_A_PEER_VPN_RX_PACKETS,
+ atomic64_read(&peer->vpn_stats.rx.packets)) ||
+ /* VPN TX stats */
+ nla_put_uint(skb, OVPN_A_PEER_VPN_TX_BYTES,
+ atomic64_read(&peer->vpn_stats.tx.bytes)) ||
+ nla_put_uint(skb, OVPN_A_PEER_VPN_TX_PACKETS,
+ atomic64_read(&peer->vpn_stats.tx.packets)) ||
+ /* link RX stats */
+ nla_put_uint(skb, OVPN_A_PEER_LINK_RX_BYTES,
+ atomic64_read(&peer->link_stats.rx.bytes)) ||
+ nla_put_uint(skb, OVPN_A_PEER_LINK_RX_PACKETS,
+ atomic64_read(&peer->link_stats.rx.packets)) ||
+ /* link TX stats */
+ nla_put_uint(skb, OVPN_A_PEER_LINK_TX_BYTES,
+ atomic64_read(&peer->link_stats.tx.bytes)) ||
+ nla_put_uint(skb, OVPN_A_PEER_LINK_TX_PACKETS,
+ atomic64_read(&peer->link_stats.tx.packets)))
+ goto err;
+
+ nla_nest_end(skb, attr);
+ genlmsg_end(skb, hdr);
+
+ return 0;
+err_unlock:
+ rcu_read_unlock();
+err:
+ genlmsg_cancel(skb, hdr);
+ return ret;
+}
+
+int ovpn_nl_peer_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[OVPN_A_PEER_MAX + 1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ struct ovpn_peer *peer;
+ struct sk_buff *msg;
+ u32 peer_id;
+ int ret, i;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_PEER))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_PEER_MAX, info->attrs[OVPN_A_PEER],
+ ovpn_peer_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_PEER], attrs,
+ OVPN_A_PEER_ID))
+ return -EINVAL;
+
+ /* OVPN_CMD_PEER_GET expects only the PEER_ID, therefore
+ * ensure that the user hasn't specified any other attribute.
+ *
+ * Unfortunately this check cannot be performed via netlink
+ * spec/policy and must be open-coded.
+ */
+ for (i = 0; i < OVPN_A_PEER_MAX + 1; i++) {
+ if (i == OVPN_A_PEER_ID)
+ continue;
+
+ if (attrs[i]) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "unexpected attribute %u", i);
+ return -EINVAL;
+ }
+ }
+
+ peer_id = nla_get_u32(attrs[OVPN_A_PEER_ID]);
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+ if (!peer) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot find peer with id %u", peer_id);
+ return -ENOENT;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = ovpn_nl_send_peer(msg, info, peer, info->snd_portid,
+ info->snd_seq, 0);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ goto err;
+ }
+
+ ret = genlmsg_reply(msg, info);
+err:
+ ovpn_peer_put(peer);
+ return ret;
+}
+
+int ovpn_nl_peer_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct genl_info *info = genl_info_dump(cb);
+ int bkt, last_idx = cb->args[1], dumped = 0;
+ netdevice_tracker tracker;
+ struct ovpn_priv *ovpn;
+ struct ovpn_peer *peer;
+
+ ovpn = ovpn_get_dev_from_attrs(sock_net(cb->skb->sk), info, &tracker);
+ if (IS_ERR(ovpn))
+ return PTR_ERR(ovpn);
+
+ if (ovpn->mode == OVPN_MODE_P2P) {
+ /* if we already dumped a peer it means we are done */
+ if (last_idx)
+ goto out;
+
+ rcu_read_lock();
+ peer = rcu_dereference(ovpn->peer);
+ if (peer) {
+ if (ovpn_nl_send_peer(skb, info, peer,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI) == 0)
+ dumped++;
+ }
+ rcu_read_unlock();
+ } else {
+ rcu_read_lock();
+ hash_for_each_rcu(ovpn->peers->by_id, bkt, peer,
+ hash_entry_id) {
+ /* skip already dumped peers that were dumped by
+ * previous invocations
+ */
+ if (last_idx > 0) {
+ last_idx--;
+ continue;
+ }
+
+ if (ovpn_nl_send_peer(skb, info, peer,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI) < 0)
+ break;
+
+ /* count peers being dumped during this invocation */
+ dumped++;
+ }
+ rcu_read_unlock();
+ }
+
+out:
+ netdev_put(ovpn->dev, &tracker);
+
+ /* sum up peers dumped in this message, so that at the next invocation
+ * we can continue from where we left
+ */
+ cb->args[1] += dumped;
+ return skb->len;
+}
+
+int ovpn_nl_peer_del_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[OVPN_A_PEER_MAX + 1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ struct ovpn_peer *peer;
+ u32 peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_PEER))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_PEER_MAX, info->attrs[OVPN_A_PEER],
+ ovpn_peer_del_input_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_PEER], attrs,
+ OVPN_A_PEER_ID))
+ return -EINVAL;
+
+ peer_id = nla_get_u32(attrs[OVPN_A_PEER_ID]);
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+ if (!peer) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot find peer with id %u", peer_id);
+ return -ENOENT;
+ }
+
+ netdev_dbg(ovpn->dev, "del peer %u\n", peer->id);
+ ret = ovpn_peer_del(peer, OVPN_DEL_PEER_REASON_USERSPACE);
+ ovpn_peer_put(peer);
+
+ return ret;
+}
+
+static int ovpn_nl_get_key_dir(struct genl_info *info, struct nlattr *key,
+ enum ovpn_cipher_alg cipher,
+ struct ovpn_key_direction *dir)
+{
+ struct nlattr *attrs[OVPN_A_KEYDIR_MAX + 1];
+ int ret;
+
+ ret = nla_parse_nested(attrs, OVPN_A_KEYDIR_MAX, key,
+ ovpn_keydir_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ switch (cipher) {
+ case OVPN_CIPHER_ALG_AES_GCM:
+ case OVPN_CIPHER_ALG_CHACHA20_POLY1305:
+ if (NL_REQ_ATTR_CHECK(info->extack, key, attrs,
+ OVPN_A_KEYDIR_CIPHER_KEY) ||
+ NL_REQ_ATTR_CHECK(info->extack, key, attrs,
+ OVPN_A_KEYDIR_NONCE_TAIL))
+ return -EINVAL;
+
+ dir->cipher_key = nla_data(attrs[OVPN_A_KEYDIR_CIPHER_KEY]);
+ dir->cipher_key_size = nla_len(attrs[OVPN_A_KEYDIR_CIPHER_KEY]);
+
+ /* These algorithms require a 96bit nonce,
+ * Construct it by combining 4-bytes packet id and
+ * 8-bytes nonce-tail from userspace
+ */
+ dir->nonce_tail = nla_data(attrs[OVPN_A_KEYDIR_NONCE_TAIL]);
+ dir->nonce_tail_size = nla_len(attrs[OVPN_A_KEYDIR_NONCE_TAIL]);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(info->extack, "unsupported cipher");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ovpn_nl_key_new_doit - configure a new key for the specified peer
+ * @skb: incoming netlink message
+ * @info: genetlink metadata
+ *
+ * This function allows the user to install a new key in the peer crypto
+ * state.
+ * Each peer has two 'slots', namely 'primary' and 'secondary', where
+ * keys can be installed. The key in the 'primary' slot is used for
+ * encryption, while both keys can be used for decryption by matching the
+ * key ID carried in the incoming packet.
+ *
+ * The user is responsible for rotating keys when necessary. The user
+ * may fetch peer traffic statistics via netlink in order to better
+ * identify the right time to rotate keys.
+ * The renegotiation follows these steps:
+ * 1. a new key is computed by the user and is installed in the 'secondary'
+ * slot
+ * 2. at user discretion (usually after a predetermined time) 'primary' and
+ * 'secondary' contents are swapped and the new key starts being used for
+ * encryption, while the old key is kept around for decryption of late
+ * packets.
+ *
+ * Return: 0 on success or a negative error code otherwise.
+ */
+int ovpn_nl_key_new_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[OVPN_A_KEYCONF_MAX + 1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ struct ovpn_peer_key_reset pkr;
+ struct ovpn_peer *peer;
+ u32 peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_KEYCONF))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_KEYCONF_MAX,
+ info->attrs[OVPN_A_KEYCONF],
+ ovpn_keyconf_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_PEER_ID))
+ return -EINVAL;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_SLOT) ||
+ NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_KEY_ID) ||
+ NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_CIPHER_ALG) ||
+ NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_ENCRYPT_DIR) ||
+ NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_DECRYPT_DIR))
+ return -EINVAL;
+
+ pkr.slot = nla_get_u32(attrs[OVPN_A_KEYCONF_SLOT]);
+ pkr.key.key_id = nla_get_u32(attrs[OVPN_A_KEYCONF_KEY_ID]);
+ pkr.key.cipher_alg = nla_get_u32(attrs[OVPN_A_KEYCONF_CIPHER_ALG]);
+
+ ret = ovpn_nl_get_key_dir(info, attrs[OVPN_A_KEYCONF_ENCRYPT_DIR],
+ pkr.key.cipher_alg, &pkr.key.encrypt);
+ if (ret < 0)
+ return ret;
+
+ ret = ovpn_nl_get_key_dir(info, attrs[OVPN_A_KEYCONF_DECRYPT_DIR],
+ pkr.key.cipher_alg, &pkr.key.decrypt);
+ if (ret < 0)
+ return ret;
+
+ peer_id = nla_get_u32(attrs[OVPN_A_KEYCONF_PEER_ID]);
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+ if (!peer) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "no peer with id %u to set key for",
+ peer_id);
+ return -ENOENT;
+ }
+
+ ret = ovpn_crypto_state_reset(&peer->crypto, &pkr);
+ if (ret < 0) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot install new key for peer %u",
+ peer_id);
+ goto out;
+ }
+
+ netdev_dbg(ovpn->dev, "new key installed (id=%u) for peer %u\n",
+ pkr.key.key_id, peer_id);
+out:
+ ovpn_peer_put(peer);
+ return ret;
+}
+
+static int ovpn_nl_send_key(struct sk_buff *skb, const struct genl_info *info,
+ u32 peer_id, enum ovpn_key_slot slot,
+ const struct ovpn_key_config *keyconf)
+{
+ struct nlattr *attr;
+ void *hdr;
+
+ hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq, &ovpn_nl_family,
+ 0, OVPN_CMD_KEY_GET);
+ if (!hdr)
+ return -ENOBUFS;
+
+ attr = nla_nest_start(skb, OVPN_A_KEYCONF);
+ if (!attr)
+ goto err;
+
+ if (nla_put_u32(skb, OVPN_A_KEYCONF_PEER_ID, peer_id))
+ goto err;
+
+ if (nla_put_u32(skb, OVPN_A_KEYCONF_SLOT, slot) ||
+ nla_put_u32(skb, OVPN_A_KEYCONF_KEY_ID, keyconf->key_id) ||
+ nla_put_u32(skb, OVPN_A_KEYCONF_CIPHER_ALG, keyconf->cipher_alg))
+ goto err;
+
+ nla_nest_end(skb, attr);
+ genlmsg_end(skb, hdr);
+
+ return 0;
+err:
+ genlmsg_cancel(skb, hdr);
+ return -EMSGSIZE;
+}
+
+int ovpn_nl_key_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[OVPN_A_KEYCONF_MAX + 1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ struct ovpn_key_config keyconf = { 0 };
+ enum ovpn_key_slot slot;
+ struct ovpn_peer *peer;
+ struct sk_buff *msg;
+ u32 peer_id;
+ int ret, i;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_KEYCONF))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_KEYCONF_MAX,
+ info->attrs[OVPN_A_KEYCONF],
+ ovpn_keyconf_get_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_PEER_ID))
+ return -EINVAL;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_SLOT))
+ return -EINVAL;
+
+ /* OVPN_CMD_KEY_GET expects only the PEER_ID and the SLOT, therefore
+ * ensure that the user hasn't specified any other attribute.
+ *
+ * Unfortunately this check cannot be performed via netlink
+ * spec/policy and must be open-coded.
+ */
+ for (i = 0; i < OVPN_A_KEYCONF_MAX + 1; i++) {
+ if (i == OVPN_A_KEYCONF_PEER_ID ||
+ i == OVPN_A_KEYCONF_SLOT)
+ continue;
+
+ if (attrs[i]) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "unexpected attribute %u", i);
+ return -EINVAL;
+ }
+ }
+
+ peer_id = nla_get_u32(attrs[OVPN_A_KEYCONF_PEER_ID]);
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+ if (!peer) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot find peer with id %u", peer_id);
+ return -ENOENT;
+ }
+
+ slot = nla_get_u32(attrs[OVPN_A_KEYCONF_SLOT]);
+
+ ret = ovpn_crypto_config_get(&peer->crypto, slot, &keyconf);
+ if (ret < 0) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot extract key from slot %u for peer %u",
+ slot, peer_id);
+ goto err;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = ovpn_nl_send_key(msg, info, peer->id, slot, &keyconf);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ goto err;
+ }
+
+ ret = genlmsg_reply(msg, info);
+err:
+ ovpn_peer_put(peer);
+ return ret;
+}
+
+int ovpn_nl_key_swap_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ struct nlattr *attrs[OVPN_A_PEER_MAX + 1];
+ struct ovpn_peer *peer;
+ u32 peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_KEYCONF))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_KEYCONF_MAX,
+ info->attrs[OVPN_A_KEYCONF],
+ ovpn_keyconf_swap_input_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_PEER_ID))
+ return -EINVAL;
+
+ peer_id = nla_get_u32(attrs[OVPN_A_KEYCONF_PEER_ID]);
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+ if (!peer) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "no peer with id %u to swap keys for",
+ peer_id);
+ return -ENOENT;
+ }
+
+ ovpn_crypto_key_slots_swap(&peer->crypto);
+ ovpn_peer_put(peer);
+
+ return 0;
+}
+
+int ovpn_nl_key_del_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[OVPN_A_KEYCONF_MAX + 1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ enum ovpn_key_slot slot;
+ struct ovpn_peer *peer;
+ u32 peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_KEYCONF))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_KEYCONF_MAX,
+ info->attrs[OVPN_A_KEYCONF],
+ ovpn_keyconf_del_input_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_PEER_ID))
+ return -EINVAL;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_SLOT))
+ return -EINVAL;
+
+ peer_id = nla_get_u32(attrs[OVPN_A_KEYCONF_PEER_ID]);
+ slot = nla_get_u32(attrs[OVPN_A_KEYCONF_SLOT]);
+
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+ if (!peer) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "no peer with id %u to delete key for",
+ peer_id);
+ return -ENOENT;
+ }
+
+ ovpn_crypto_key_slot_delete(&peer->crypto, slot);
+ ovpn_peer_put(peer);
+
+ return 0;
+}
+
+/**
+ * ovpn_nl_peer_del_notify - notify userspace about peer being deleted
+ * @peer: the peer being deleted
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int ovpn_nl_peer_del_notify(struct ovpn_peer *peer)
+{
+ struct ovpn_socket *sock;
+ struct sk_buff *msg;
+ struct nlattr *attr;
+ int ret = -EMSGSIZE;
+ void *hdr;
+
+ netdev_info(peer->ovpn->dev, "deleting peer with id %u, reason %d\n",
+ peer->id, peer->delete_reason);
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(msg, 0, 0, &ovpn_nl_family, 0, OVPN_CMD_PEER_DEL_NTF);
+ if (!hdr) {
+ ret = -ENOBUFS;
+ goto err_free_msg;
+ }
+
+ if (nla_put_u32(msg, OVPN_A_IFINDEX, peer->ovpn->dev->ifindex))
+ goto err_cancel_msg;
+
+ attr = nla_nest_start(msg, OVPN_A_PEER);
+ if (!attr)
+ goto err_cancel_msg;
+
+ if (nla_put_u32(msg, OVPN_A_PEER_DEL_REASON, peer->delete_reason))
+ goto err_cancel_msg;
+
+ if (nla_put_u32(msg, OVPN_A_PEER_ID, peer->id))
+ goto err_cancel_msg;
+
+ nla_nest_end(msg, attr);
+
+ genlmsg_end(msg, hdr);
+
+ rcu_read_lock();
+ sock = rcu_dereference(peer->sock);
+ if (!sock) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+ genlmsg_multicast_netns(&ovpn_nl_family, sock_net(sock->sk), msg, 0,
+ OVPN_NLGRP_PEERS, GFP_ATOMIC);
+ rcu_read_unlock();
+
+ return 0;
+
+err_unlock:
+ rcu_read_unlock();
+err_cancel_msg:
+ genlmsg_cancel(msg, hdr);
+err_free_msg:
+ nlmsg_free(msg);
+ return ret;
+}
+
+/**
+ * ovpn_nl_key_swap_notify - notify userspace peer's key must be renewed
+ * @peer: the peer whose key needs to be renewed
+ * @key_id: the ID of the key that needs to be renewed
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int ovpn_nl_key_swap_notify(struct ovpn_peer *peer, u8 key_id)
+{
+ struct ovpn_socket *sock;
+ struct nlattr *k_attr;
+ struct sk_buff *msg;
+ int ret = -EMSGSIZE;
+ void *hdr;
+
+ netdev_info(peer->ovpn->dev, "peer with id %u must rekey - primary key unusable.\n",
+ peer->id);
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(msg, 0, 0, &ovpn_nl_family, 0, OVPN_CMD_KEY_SWAP_NTF);
+ if (!hdr) {
+ ret = -ENOBUFS;
+ goto err_free_msg;
+ }
+
+ if (nla_put_u32(msg, OVPN_A_IFINDEX, peer->ovpn->dev->ifindex))
+ goto err_cancel_msg;
+
+ k_attr = nla_nest_start(msg, OVPN_A_KEYCONF);
+ if (!k_attr)
+ goto err_cancel_msg;
+
+ if (nla_put_u32(msg, OVPN_A_KEYCONF_PEER_ID, peer->id))
+ goto err_cancel_msg;
+
+ if (nla_put_u16(msg, OVPN_A_KEYCONF_KEY_ID, key_id))
+ goto err_cancel_msg;
+
+ nla_nest_end(msg, k_attr);
+ genlmsg_end(msg, hdr);
+
+ rcu_read_lock();
+ sock = rcu_dereference(peer->sock);
+ if (!sock) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+ genlmsg_multicast_netns(&ovpn_nl_family, sock_net(sock->sk), msg, 0,
+ OVPN_NLGRP_PEERS, GFP_ATOMIC);
+ rcu_read_unlock();
+
+ return 0;
+err_unlock:
+ rcu_read_unlock();
+err_cancel_msg:
+ genlmsg_cancel(msg, hdr);
+err_free_msg:
+ nlmsg_free(msg);
+ return ret;
+}
+
+/**
+ * ovpn_nl_register - perform any needed registration in the NL subsustem
+ *
+ * Return: 0 on success, a negative error code otherwise
+ */
+int __init ovpn_nl_register(void)
+{
+ int ret = genl_register_family(&ovpn_nl_family);
+
+ if (ret) {
+ pr_err("ovpn: genl_register_family failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ovpn_nl_unregister - undo any module wide netlink registration
+ */
+void ovpn_nl_unregister(void)
+{
+ genl_unregister_family(&ovpn_nl_family);
+}
diff --git a/drivers/net/ovpn/netlink.h b/drivers/net/ovpn/netlink.h
new file mode 100644
index 000000000000..8615dfc3c472
--- /dev/null
+++ b/drivers/net/ovpn/netlink.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_NETLINK_H_
+#define _NET_OVPN_NETLINK_H_
+
+int ovpn_nl_register(void);
+void ovpn_nl_unregister(void);
+
+int ovpn_nl_peer_del_notify(struct ovpn_peer *peer);
+int ovpn_nl_key_swap_notify(struct ovpn_peer *peer, u8 key_id);
+
+#endif /* _NET_OVPN_NETLINK_H_ */
diff --git a/drivers/net/ovpn/ovpnpriv.h b/drivers/net/ovpn/ovpnpriv.h
new file mode 100644
index 000000000000..5898f6adada7
--- /dev/null
+++ b/drivers/net/ovpn/ovpnpriv.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2019-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNSTRUCT_H_
+#define _NET_OVPN_OVPNSTRUCT_H_
+
+#include <linux/workqueue.h>
+#include <net/gro_cells.h>
+#include <uapi/linux/if_link.h>
+#include <uapi/linux/ovpn.h>
+
+/**
+ * struct ovpn_peer_collection - container of peers for MultiPeer mode
+ * @by_id: table of peers index by ID
+ * @by_vpn_addr4: table of peers indexed by VPN IPv4 address (items can be
+ * rehashed on the fly due to peer IP change)
+ * @by_vpn_addr6: table of peers indexed by VPN IPv6 address (items can be
+ * rehashed on the fly due to peer IP change)
+ * @by_transp_addr: table of peers indexed by transport address (items can be
+ * rehashed on the fly due to peer IP change)
+ */
+struct ovpn_peer_collection {
+ DECLARE_HASHTABLE(by_id, 12);
+ struct hlist_nulls_head by_vpn_addr4[1 << 12];
+ struct hlist_nulls_head by_vpn_addr6[1 << 12];
+ struct hlist_nulls_head by_transp_addr[1 << 12];
+};
+
+/**
+ * struct ovpn_priv - per ovpn interface state
+ * @dev: the actual netdev representing the tunnel
+ * @mode: device operation mode (i.e. p2p, mp, ..)
+ * @lock: protect this object
+ * @peers: data structures holding multi-peer references
+ * @peer: in P2P mode, this is the only remote peer
+ * @gro_cells: pointer to the Generic Receive Offload cell
+ * @keepalive_work: struct used to schedule keepalive periodic job
+ */
+struct ovpn_priv {
+ struct net_device *dev;
+ enum ovpn_mode mode;
+ spinlock_t lock; /* protect writing to the ovpn_priv object */
+ struct ovpn_peer_collection *peers;
+ struct ovpn_peer __rcu *peer;
+ struct gro_cells gro_cells;
+ struct delayed_work keepalive_work;
+};
+
+#endif /* _NET_OVPN_OVPNSTRUCT_H_ */
diff --git a/drivers/net/ovpn/peer.c b/drivers/net/ovpn/peer.c
new file mode 100644
index 000000000000..4bfcab0c8652
--- /dev/null
+++ b/drivers/net/ovpn/peer.c
@@ -0,0 +1,1364 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/skbuff.h>
+#include <linux/list.h>
+#include <linux/hashtable.h>
+#include <net/ip6_route.h>
+
+#include "ovpnpriv.h"
+#include "bind.h"
+#include "pktid.h"
+#include "crypto.h"
+#include "io.h"
+#include "main.h"
+#include "netlink.h"
+#include "peer.h"
+#include "socket.h"
+
+static void unlock_ovpn(struct ovpn_priv *ovpn,
+ struct llist_head *release_list)
+ __releases(&ovpn->lock)
+{
+ struct ovpn_peer *peer;
+
+ spin_unlock_bh(&ovpn->lock);
+
+ llist_for_each_entry(peer, release_list->first, release_entry) {
+ ovpn_socket_release(peer);
+ ovpn_peer_put(peer);
+ }
+}
+
+/**
+ * ovpn_peer_keepalive_set - configure keepalive values for peer
+ * @peer: the peer to configure
+ * @interval: outgoing keepalive interval
+ * @timeout: incoming keepalive timeout
+ */
+void ovpn_peer_keepalive_set(struct ovpn_peer *peer, u32 interval, u32 timeout)
+{
+ time64_t now = ktime_get_real_seconds();
+
+ netdev_dbg(peer->ovpn->dev,
+ "scheduling keepalive for peer %u: interval=%u timeout=%u\n",
+ peer->id, interval, timeout);
+
+ peer->keepalive_interval = interval;
+ WRITE_ONCE(peer->last_sent, now);
+ peer->keepalive_xmit_exp = now + interval;
+
+ peer->keepalive_timeout = timeout;
+ WRITE_ONCE(peer->last_recv, now);
+ peer->keepalive_recv_exp = now + timeout;
+
+ /* now that interval and timeout have been changed, kick
+ * off the worker so that the next delay can be recomputed
+ */
+ mod_delayed_work(system_wq, &peer->ovpn->keepalive_work, 0);
+}
+
+/**
+ * ovpn_peer_keepalive_send - periodic worker sending keepalive packets
+ * @work: pointer to the work member of the related peer object
+ *
+ * NOTE: the reference to peer is not dropped because it gets inherited
+ * by ovpn_xmit_special()
+ */
+static void ovpn_peer_keepalive_send(struct work_struct *work)
+{
+ struct ovpn_peer *peer = container_of(work, struct ovpn_peer,
+ keepalive_work);
+
+ local_bh_disable();
+ ovpn_xmit_special(peer, ovpn_keepalive_message,
+ sizeof(ovpn_keepalive_message));
+ local_bh_enable();
+}
+
+/**
+ * ovpn_peer_new - allocate and initialize a new peer object
+ * @ovpn: the openvpn instance inside which the peer should be created
+ * @id: the ID assigned to this peer
+ *
+ * Return: a pointer to the new peer on success or an error code otherwise
+ */
+struct ovpn_peer *ovpn_peer_new(struct ovpn_priv *ovpn, u32 id)
+{
+ struct ovpn_peer *peer;
+ int ret;
+
+ /* alloc and init peer object */
+ peer = kzalloc(sizeof(*peer), GFP_KERNEL);
+ if (!peer)
+ return ERR_PTR(-ENOMEM);
+
+ peer->id = id;
+ peer->ovpn = ovpn;
+
+ peer->vpn_addrs.ipv4.s_addr = htonl(INADDR_ANY);
+ peer->vpn_addrs.ipv6 = in6addr_any;
+
+ RCU_INIT_POINTER(peer->bind, NULL);
+ ovpn_crypto_state_init(&peer->crypto);
+ spin_lock_init(&peer->lock);
+ kref_init(&peer->refcount);
+ ovpn_peer_stats_init(&peer->vpn_stats);
+ ovpn_peer_stats_init(&peer->link_stats);
+ INIT_WORK(&peer->keepalive_work, ovpn_peer_keepalive_send);
+
+ ret = dst_cache_init(&peer->dst_cache, GFP_KERNEL);
+ if (ret < 0) {
+ netdev_err(ovpn->dev,
+ "cannot initialize dst cache for peer %u\n",
+ peer->id);
+ kfree(peer);
+ return ERR_PTR(ret);
+ }
+
+ netdev_hold(ovpn->dev, &peer->dev_tracker, GFP_KERNEL);
+
+ return peer;
+}
+
+/**
+ * ovpn_peer_reset_sockaddr - recreate binding for peer
+ * @peer: peer to recreate the binding for
+ * @ss: sockaddr to use as remote endpoint for the binding
+ * @local_ip: local IP for the binding
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int ovpn_peer_reset_sockaddr(struct ovpn_peer *peer,
+ const struct sockaddr_storage *ss,
+ const void *local_ip)
+{
+ struct ovpn_bind *bind;
+ size_t ip_len;
+
+ lockdep_assert_held(&peer->lock);
+
+ /* create new ovpn_bind object */
+ bind = ovpn_bind_from_sockaddr(ss);
+ if (IS_ERR(bind))
+ return PTR_ERR(bind);
+
+ if (local_ip) {
+ if (ss->ss_family == AF_INET) {
+ ip_len = sizeof(struct in_addr);
+ } else if (ss->ss_family == AF_INET6) {
+ ip_len = sizeof(struct in6_addr);
+ } else {
+ net_dbg_ratelimited("%s: invalid family %u for remote endpoint for peer %u\n",
+ netdev_name(peer->ovpn->dev),
+ ss->ss_family, peer->id);
+ kfree(bind);
+ return -EINVAL;
+ }
+
+ memcpy(&bind->local, local_ip, ip_len);
+ }
+
+ /* set binding */
+ ovpn_bind_reset(peer, bind);
+
+ return 0;
+}
+
+/* variable name __tbl2 needs to be different from __tbl1
+ * in the macro below to avoid confusing clang
+ */
+#define ovpn_get_hash_slot(_tbl, _key, _key_len) ({ \
+ typeof(_tbl) *__tbl2 = &(_tbl); \
+ jhash(_key, _key_len, 0) % HASH_SIZE(*__tbl2); \
+})
+
+#define ovpn_get_hash_head(_tbl, _key, _key_len) ({ \
+ typeof(_tbl) *__tbl1 = &(_tbl); \
+ &(*__tbl1)[ovpn_get_hash_slot(*__tbl1, _key, _key_len)];\
+})
+
+/**
+ * ovpn_peer_endpoints_update - update remote or local endpoint for peer
+ * @peer: peer to update the remote endpoint for
+ * @skb: incoming packet to retrieve the source/destination address from
+ */
+void ovpn_peer_endpoints_update(struct ovpn_peer *peer, struct sk_buff *skb)
+{
+ struct hlist_nulls_head *nhead;
+ struct sockaddr_storage ss;
+ struct sockaddr_in6 *sa6;
+ bool reset_cache = false;
+ struct sockaddr_in *sa;
+ struct ovpn_bind *bind;
+ const void *local_ip;
+ size_t salen = 0;
+
+ spin_lock_bh(&peer->lock);
+ bind = rcu_dereference_protected(peer->bind,
+ lockdep_is_held(&peer->lock));
+ if (unlikely(!bind))
+ goto unlock;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ /* float check */
+ if (unlikely(!ovpn_bind_skb_src_match(bind, skb))) {
+ /* unconditionally save local endpoint in case
+ * of float, as it may have changed as well
+ */
+ local_ip = &ip_hdr(skb)->daddr;
+ sa = (struct sockaddr_in *)&ss;
+ sa->sin_family = AF_INET;
+ sa->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ sa->sin_port = udp_hdr(skb)->source;
+ salen = sizeof(*sa);
+ reset_cache = true;
+ break;
+ }
+
+ /* if no float happened, let's double check if the local endpoint
+ * has changed
+ */
+ if (unlikely(bind->local.ipv4.s_addr != ip_hdr(skb)->daddr)) {
+ net_dbg_ratelimited("%s: learning local IPv4 for peer %d (%pI4 -> %pI4)\n",
+ netdev_name(peer->ovpn->dev),
+ peer->id, &bind->local.ipv4.s_addr,
+ &ip_hdr(skb)->daddr);
+ bind->local.ipv4.s_addr = ip_hdr(skb)->daddr;
+ reset_cache = true;
+ }
+ break;
+ case htons(ETH_P_IPV6):
+ /* float check */
+ if (unlikely(!ovpn_bind_skb_src_match(bind, skb))) {
+ /* unconditionally save local endpoint in case
+ * of float, as it may have changed as well
+ */
+ local_ip = &ipv6_hdr(skb)->daddr;
+ sa6 = (struct sockaddr_in6 *)&ss;
+ sa6->sin6_family = AF_INET6;
+ sa6->sin6_addr = ipv6_hdr(skb)->saddr;
+ sa6->sin6_port = udp_hdr(skb)->source;
+ sa6->sin6_scope_id = ipv6_iface_scope_id(&ipv6_hdr(skb)->saddr,
+ skb->skb_iif);
+ salen = sizeof(*sa6);
+ reset_cache = true;
+ break;
+ }
+
+ /* if no float happened, let's double check if the local endpoint
+ * has changed
+ */
+ if (unlikely(!ipv6_addr_equal(&bind->local.ipv6,
+ &ipv6_hdr(skb)->daddr))) {
+ net_dbg_ratelimited("%s: learning local IPv6 for peer %d (%pI6c -> %pI6c)\n",
+ netdev_name(peer->ovpn->dev),
+ peer->id, &bind->local.ipv6,
+ &ipv6_hdr(skb)->daddr);
+ bind->local.ipv6 = ipv6_hdr(skb)->daddr;
+ reset_cache = true;
+ }
+ break;
+ default:
+ goto unlock;
+ }
+
+ if (unlikely(reset_cache))
+ dst_cache_reset(&peer->dst_cache);
+
+ /* if the peer did not float, we can bail out now */
+ if (likely(!salen))
+ goto unlock;
+
+ if (unlikely(ovpn_peer_reset_sockaddr(peer,
+ (struct sockaddr_storage *)&ss,
+ local_ip) < 0))
+ goto unlock;
+
+ net_dbg_ratelimited("%s: peer %d floated to %pIScp",
+ netdev_name(peer->ovpn->dev), peer->id, &ss);
+
+ spin_unlock_bh(&peer->lock);
+
+ /* rehashing is required only in MP mode as P2P has one peer
+ * only and thus there is no hashtable
+ */
+ if (peer->ovpn->mode == OVPN_MODE_MP) {
+ spin_lock_bh(&peer->ovpn->lock);
+ spin_lock_bh(&peer->lock);
+ bind = rcu_dereference_protected(peer->bind,
+ lockdep_is_held(&peer->lock));
+ if (unlikely(!bind)) {
+ spin_unlock_bh(&peer->lock);
+ spin_unlock_bh(&peer->ovpn->lock);
+ return;
+ }
+
+ /* This function may be invoked concurrently, therefore another
+ * float may have happened in parallel: perform rehashing
+ * using the peer->bind->remote directly as key
+ */
+
+ switch (bind->remote.in4.sin_family) {
+ case AF_INET:
+ salen = sizeof(*sa);
+ break;
+ case AF_INET6:
+ salen = sizeof(*sa6);
+ break;
+ }
+
+ /* remove old hashing */
+ hlist_nulls_del_init_rcu(&peer->hash_entry_transp_addr);
+ /* re-add with new transport address */
+ nhead = ovpn_get_hash_head(peer->ovpn->peers->by_transp_addr,
+ &bind->remote, salen);
+ hlist_nulls_add_head_rcu(&peer->hash_entry_transp_addr, nhead);
+ spin_unlock_bh(&peer->lock);
+ spin_unlock_bh(&peer->ovpn->lock);
+ }
+ return;
+unlock:
+ spin_unlock_bh(&peer->lock);
+}
+
+/**
+ * ovpn_peer_release_rcu - RCU callback performing last peer release steps
+ * @head: RCU member of the ovpn_peer
+ */
+static void ovpn_peer_release_rcu(struct rcu_head *head)
+{
+ struct ovpn_peer *peer = container_of(head, struct ovpn_peer, rcu);
+
+ /* this call will immediately free the dst_cache, therefore we
+ * perform it in the RCU callback, when all contexts are done
+ */
+ dst_cache_destroy(&peer->dst_cache);
+ kfree(peer);
+}
+
+/**
+ * ovpn_peer_release - release peer private members
+ * @peer: the peer to release
+ */
+void ovpn_peer_release(struct ovpn_peer *peer)
+{
+ ovpn_crypto_state_release(&peer->crypto);
+ spin_lock_bh(&peer->lock);
+ ovpn_bind_reset(peer, NULL);
+ spin_unlock_bh(&peer->lock);
+ call_rcu(&peer->rcu, ovpn_peer_release_rcu);
+ netdev_put(peer->ovpn->dev, &peer->dev_tracker);
+}
+
+/**
+ * ovpn_peer_release_kref - callback for kref_put
+ * @kref: the kref object belonging to the peer
+ */
+void ovpn_peer_release_kref(struct kref *kref)
+{
+ struct ovpn_peer *peer = container_of(kref, struct ovpn_peer, refcount);
+
+ ovpn_peer_release(peer);
+}
+
+/**
+ * ovpn_peer_skb_to_sockaddr - fill sockaddr with skb source address
+ * @skb: the packet to extract data from
+ * @ss: the sockaddr to fill
+ *
+ * Return: sockaddr length on success or -1 otherwise
+ */
+static int ovpn_peer_skb_to_sockaddr(struct sk_buff *skb,
+ struct sockaddr_storage *ss)
+{
+ struct sockaddr_in6 *sa6;
+ struct sockaddr_in *sa4;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ sa4 = (struct sockaddr_in *)ss;
+ sa4->sin_family = AF_INET;
+ sa4->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ sa4->sin_port = udp_hdr(skb)->source;
+ return sizeof(*sa4);
+ case htons(ETH_P_IPV6):
+ sa6 = (struct sockaddr_in6 *)ss;
+ sa6->sin6_family = AF_INET6;
+ sa6->sin6_addr = ipv6_hdr(skb)->saddr;
+ sa6->sin6_port = udp_hdr(skb)->source;
+ return sizeof(*sa6);
+ }
+
+ return -1;
+}
+
+/**
+ * ovpn_nexthop_from_skb4 - retrieve IPv4 nexthop for outgoing skb
+ * @skb: the outgoing packet
+ *
+ * Return: the IPv4 of the nexthop
+ */
+static __be32 ovpn_nexthop_from_skb4(struct sk_buff *skb)
+{
+ const struct rtable *rt = skb_rtable(skb);
+
+ if (rt && rt->rt_uses_gateway)
+ return rt->rt_gw4;
+
+ return ip_hdr(skb)->daddr;
+}
+
+/**
+ * ovpn_nexthop_from_skb6 - retrieve IPv6 nexthop for outgoing skb
+ * @skb: the outgoing packet
+ *
+ * Return: the IPv6 of the nexthop
+ */
+static struct in6_addr ovpn_nexthop_from_skb6(struct sk_buff *skb)
+{
+ const struct rt6_info *rt = skb_rt6_info(skb);
+
+ if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
+ return ipv6_hdr(skb)->daddr;
+
+ return rt->rt6i_gateway;
+}
+
+/**
+ * ovpn_peer_get_by_vpn_addr4 - retrieve peer by its VPN IPv4 address
+ * @ovpn: the openvpn instance to search
+ * @addr: VPN IPv4 to use as search key
+ *
+ * Refcounter is not increased for the returned peer.
+ *
+ * Return: the peer if found or NULL otherwise
+ */
+static struct ovpn_peer *ovpn_peer_get_by_vpn_addr4(struct ovpn_priv *ovpn,
+ __be32 addr)
+{
+ struct hlist_nulls_head *nhead;
+ struct hlist_nulls_node *ntmp;
+ struct ovpn_peer *tmp;
+ unsigned int slot;
+
+begin:
+ slot = ovpn_get_hash_slot(ovpn->peers->by_vpn_addr4, &addr,
+ sizeof(addr));
+ nhead = &ovpn->peers->by_vpn_addr4[slot];
+
+ hlist_nulls_for_each_entry_rcu(tmp, ntmp, nhead, hash_entry_addr4)
+ if (addr == tmp->vpn_addrs.ipv4.s_addr)
+ return tmp;
+
+ /* item may have moved during lookup - check nulls and restart
+ * if that's the case
+ */
+ if (get_nulls_value(ntmp) != slot)
+ goto begin;
+
+ return NULL;
+}
+
+/**
+ * ovpn_peer_get_by_vpn_addr6 - retrieve peer by its VPN IPv6 address
+ * @ovpn: the openvpn instance to search
+ * @addr: VPN IPv6 to use as search key
+ *
+ * Refcounter is not increased for the returned peer.
+ *
+ * Return: the peer if found or NULL otherwise
+ */
+static struct ovpn_peer *ovpn_peer_get_by_vpn_addr6(struct ovpn_priv *ovpn,
+ struct in6_addr *addr)
+{
+ struct hlist_nulls_head *nhead;
+ struct hlist_nulls_node *ntmp;
+ struct ovpn_peer *tmp;
+ unsigned int slot;
+
+begin:
+ slot = ovpn_get_hash_slot(ovpn->peers->by_vpn_addr6, addr,
+ sizeof(*addr));
+ nhead = &ovpn->peers->by_vpn_addr6[slot];
+
+ hlist_nulls_for_each_entry_rcu(tmp, ntmp, nhead, hash_entry_addr6)
+ if (ipv6_addr_equal(addr, &tmp->vpn_addrs.ipv6))
+ return tmp;
+
+ /* item may have moved during lookup - check nulls and restart
+ * if that's the case
+ */
+ if (get_nulls_value(ntmp) != slot)
+ goto begin;
+
+ return NULL;
+}
+
+/**
+ * ovpn_peer_transp_match - check if sockaddr and peer binding match
+ * @peer: the peer to get the binding from
+ * @ss: the sockaddr to match
+ *
+ * Return: true if sockaddr and binding match or false otherwise
+ */
+static bool ovpn_peer_transp_match(const struct ovpn_peer *peer,
+ const struct sockaddr_storage *ss)
+{
+ struct ovpn_bind *bind = rcu_dereference(peer->bind);
+ struct sockaddr_in6 *sa6;
+ struct sockaddr_in *sa4;
+
+ if (unlikely(!bind))
+ return false;
+
+ if (ss->ss_family != bind->remote.in4.sin_family)
+ return false;
+
+ switch (ss->ss_family) {
+ case AF_INET:
+ sa4 = (struct sockaddr_in *)ss;
+ if (sa4->sin_addr.s_addr != bind->remote.in4.sin_addr.s_addr)
+ return false;
+ if (sa4->sin_port != bind->remote.in4.sin_port)
+ return false;
+ break;
+ case AF_INET6:
+ sa6 = (struct sockaddr_in6 *)ss;
+ if (!ipv6_addr_equal(&sa6->sin6_addr,
+ &bind->remote.in6.sin6_addr))
+ return false;
+ if (sa6->sin6_port != bind->remote.in6.sin6_port)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ovpn_peer_get_by_transp_addr_p2p - get peer by transport address in a P2P
+ * instance
+ * @ovpn: the openvpn instance to search
+ * @ss: the transport socket address
+ *
+ * Return: the peer if found or NULL otherwise
+ */
+static struct ovpn_peer *
+ovpn_peer_get_by_transp_addr_p2p(struct ovpn_priv *ovpn,
+ struct sockaddr_storage *ss)
+{
+ struct ovpn_peer *tmp, *peer = NULL;
+
+ rcu_read_lock();
+ tmp = rcu_dereference(ovpn->peer);
+ if (likely(tmp && ovpn_peer_transp_match(tmp, ss) &&
+ ovpn_peer_hold(tmp)))
+ peer = tmp;
+ rcu_read_unlock();
+
+ return peer;
+}
+
+/**
+ * ovpn_peer_get_by_transp_addr - retrieve peer by transport address
+ * @ovpn: the openvpn instance to search
+ * @skb: the skb to retrieve the source transport address from
+ *
+ * Return: a pointer to the peer if found or NULL otherwise
+ */
+struct ovpn_peer *ovpn_peer_get_by_transp_addr(struct ovpn_priv *ovpn,
+ struct sk_buff *skb)
+{
+ struct ovpn_peer *tmp, *peer = NULL;
+ struct sockaddr_storage ss = { 0 };
+ struct hlist_nulls_head *nhead;
+ struct hlist_nulls_node *ntmp;
+ unsigned int slot;
+ ssize_t sa_len;
+
+ sa_len = ovpn_peer_skb_to_sockaddr(skb, &ss);
+ if (unlikely(sa_len < 0))
+ return NULL;
+
+ if (ovpn->mode == OVPN_MODE_P2P)
+ return ovpn_peer_get_by_transp_addr_p2p(ovpn, &ss);
+
+ rcu_read_lock();
+begin:
+ slot = ovpn_get_hash_slot(ovpn->peers->by_transp_addr, &ss, sa_len);
+ nhead = &ovpn->peers->by_transp_addr[slot];
+
+ hlist_nulls_for_each_entry_rcu(tmp, ntmp, nhead,
+ hash_entry_transp_addr) {
+ if (!ovpn_peer_transp_match(tmp, &ss))
+ continue;
+
+ if (!ovpn_peer_hold(tmp))
+ continue;
+
+ peer = tmp;
+ break;
+ }
+
+ /* item may have moved during lookup - check nulls and restart
+ * if that's the case
+ */
+ if (!peer && get_nulls_value(ntmp) != slot)
+ goto begin;
+ rcu_read_unlock();
+
+ return peer;
+}
+
+/**
+ * ovpn_peer_get_by_id_p2p - get peer by ID in a P2P instance
+ * @ovpn: the openvpn instance to search
+ * @peer_id: the ID of the peer to find
+ *
+ * Return: the peer if found or NULL otherwise
+ */
+static struct ovpn_peer *ovpn_peer_get_by_id_p2p(struct ovpn_priv *ovpn,
+ u32 peer_id)
+{
+ struct ovpn_peer *tmp, *peer = NULL;
+
+ rcu_read_lock();
+ tmp = rcu_dereference(ovpn->peer);
+ if (likely(tmp && tmp->id == peer_id && ovpn_peer_hold(tmp)))
+ peer = tmp;
+ rcu_read_unlock();
+
+ return peer;
+}
+
+/**
+ * ovpn_peer_get_by_id - retrieve peer by ID
+ * @ovpn: the openvpn instance to search
+ * @peer_id: the unique peer identifier to match
+ *
+ * Return: a pointer to the peer if found or NULL otherwise
+ */
+struct ovpn_peer *ovpn_peer_get_by_id(struct ovpn_priv *ovpn, u32 peer_id)
+{
+ struct ovpn_peer *tmp, *peer = NULL;
+ struct hlist_head *head;
+
+ if (ovpn->mode == OVPN_MODE_P2P)
+ return ovpn_peer_get_by_id_p2p(ovpn, peer_id);
+
+ head = ovpn_get_hash_head(ovpn->peers->by_id, &peer_id,
+ sizeof(peer_id));
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tmp, head, hash_entry_id) {
+ if (tmp->id != peer_id)
+ continue;
+
+ if (!ovpn_peer_hold(tmp))
+ continue;
+
+ peer = tmp;
+ break;
+ }
+ rcu_read_unlock();
+
+ return peer;
+}
+
+static void ovpn_peer_remove(struct ovpn_peer *peer,
+ enum ovpn_del_peer_reason reason,
+ struct llist_head *release_list)
+{
+ lockdep_assert_held(&peer->ovpn->lock);
+
+ switch (peer->ovpn->mode) {
+ case OVPN_MODE_MP:
+ /* prevent double remove */
+ if (hlist_unhashed(&peer->hash_entry_id))
+ return;
+
+ hlist_del_init_rcu(&peer->hash_entry_id);
+ hlist_nulls_del_init_rcu(&peer->hash_entry_addr4);
+ hlist_nulls_del_init_rcu(&peer->hash_entry_addr6);
+ hlist_nulls_del_init_rcu(&peer->hash_entry_transp_addr);
+ break;
+ case OVPN_MODE_P2P:
+ /* prevent double remove */
+ if (peer != rcu_access_pointer(peer->ovpn->peer))
+ return;
+
+ RCU_INIT_POINTER(peer->ovpn->peer, NULL);
+ /* in P2P mode the carrier is switched off when the peer is
+ * deleted so that third party protocols can react accordingly
+ */
+ netif_carrier_off(peer->ovpn->dev);
+ break;
+ }
+
+ peer->delete_reason = reason;
+ ovpn_nl_peer_del_notify(peer);
+
+ /* append to provided list for later socket release and ref drop */
+ llist_add(&peer->release_entry, release_list);
+}
+
+/**
+ * ovpn_peer_get_by_dst - Lookup peer to send skb to
+ * @ovpn: the private data representing the current VPN session
+ * @skb: the skb to extract the destination address from
+ *
+ * This function takes a tunnel packet and looks up the peer to send it to
+ * after encapsulation. The skb is expected to be the in-tunnel packet, without
+ * any OpenVPN related header.
+ *
+ * Assume that the IP header is accessible in the skb data.
+ *
+ * Return: the peer if found or NULL otherwise.
+ */
+struct ovpn_peer *ovpn_peer_get_by_dst(struct ovpn_priv *ovpn,
+ struct sk_buff *skb)
+{
+ struct ovpn_peer *peer = NULL;
+ struct in6_addr addr6;
+ __be32 addr4;
+
+ /* in P2P mode, no matter the destination, packets are always sent to
+ * the single peer listening on the other side
+ */
+ if (ovpn->mode == OVPN_MODE_P2P) {
+ rcu_read_lock();
+ peer = rcu_dereference(ovpn->peer);
+ if (unlikely(peer && !ovpn_peer_hold(peer)))
+ peer = NULL;
+ rcu_read_unlock();
+ return peer;
+ }
+
+ rcu_read_lock();
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ addr4 = ovpn_nexthop_from_skb4(skb);
+ peer = ovpn_peer_get_by_vpn_addr4(ovpn, addr4);
+ break;
+ case htons(ETH_P_IPV6):
+ addr6 = ovpn_nexthop_from_skb6(skb);
+ peer = ovpn_peer_get_by_vpn_addr6(ovpn, &addr6);
+ break;
+ }
+
+ if (unlikely(peer && !ovpn_peer_hold(peer)))
+ peer = NULL;
+ rcu_read_unlock();
+
+ return peer;
+}
+
+/**
+ * ovpn_nexthop_from_rt4 - look up the IPv4 nexthop for the given destination
+ * @ovpn: the private data representing the current VPN session
+ * @dest: the destination to be looked up
+ *
+ * Looks up in the IPv4 system routing table the IP of the nexthop to be used
+ * to reach the destination passed as argument. If no nexthop can be found, the
+ * destination itself is returned as it probably has to be used as nexthop.
+ *
+ * Return: the IP of the next hop if found or dest itself otherwise
+ */
+static __be32 ovpn_nexthop_from_rt4(struct ovpn_priv *ovpn, __be32 dest)
+{
+ struct rtable *rt;
+ struct flowi4 fl = {
+ .daddr = dest
+ };
+
+ rt = ip_route_output_flow(dev_net(ovpn->dev), &fl, NULL);
+ if (IS_ERR(rt)) {
+ net_dbg_ratelimited("%s: no route to host %pI4\n",
+ netdev_name(ovpn->dev), &dest);
+ /* if we end up here this packet is probably going to be
+ * thrown away later
+ */
+ return dest;
+ }
+
+ if (!rt->rt_uses_gateway)
+ goto out;
+
+ dest = rt->rt_gw4;
+out:
+ ip_rt_put(rt);
+ return dest;
+}
+
+/**
+ * ovpn_nexthop_from_rt6 - look up the IPv6 nexthop for the given destination
+ * @ovpn: the private data representing the current VPN session
+ * @dest: the destination to be looked up
+ *
+ * Looks up in the IPv6 system routing table the IP of the nexthop to be used
+ * to reach the destination passed as argument. If no nexthop can be found, the
+ * destination itself is returned as it probably has to be used as nexthop.
+ *
+ * Return: the IP of the next hop if found or dest itself otherwise
+ */
+static struct in6_addr ovpn_nexthop_from_rt6(struct ovpn_priv *ovpn,
+ struct in6_addr dest)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ struct dst_entry *entry;
+ struct rt6_info *rt;
+ struct flowi6 fl = {
+ .daddr = dest,
+ };
+
+ entry = ipv6_stub->ipv6_dst_lookup_flow(dev_net(ovpn->dev), NULL, &fl,
+ NULL);
+ if (IS_ERR(entry)) {
+ net_dbg_ratelimited("%s: no route to host %pI6c\n",
+ netdev_name(ovpn->dev), &dest);
+ /* if we end up here this packet is probably going to be
+ * thrown away later
+ */
+ return dest;
+ }
+
+ rt = dst_rt6_info(entry);
+
+ if (!(rt->rt6i_flags & RTF_GATEWAY))
+ goto out;
+
+ dest = rt->rt6i_gateway;
+out:
+ dst_release((struct dst_entry *)rt);
+#endif
+ return dest;
+}
+
+/**
+ * ovpn_peer_check_by_src - check that skb source is routed via peer
+ * @ovpn: the openvpn instance to search
+ * @skb: the packet to extract source address from
+ * @peer: the peer to check against the source address
+ *
+ * Return: true if the peer is matching or false otherwise
+ */
+bool ovpn_peer_check_by_src(struct ovpn_priv *ovpn, struct sk_buff *skb,
+ struct ovpn_peer *peer)
+{
+ bool match = false;
+ struct in6_addr addr6;
+ __be32 addr4;
+
+ if (ovpn->mode == OVPN_MODE_P2P) {
+ /* in P2P mode, no matter the destination, packets are always
+ * sent to the single peer listening on the other side
+ */
+ return peer == rcu_access_pointer(ovpn->peer);
+ }
+
+ /* This function performs a reverse path check, therefore we now
+ * lookup the nexthop we would use if we wanted to route a packet
+ * to the source IP. If the nexthop matches the sender we know the
+ * latter is valid and we allow the packet to come in
+ */
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ addr4 = ovpn_nexthop_from_rt4(ovpn, ip_hdr(skb)->saddr);
+ rcu_read_lock();
+ match = (peer == ovpn_peer_get_by_vpn_addr4(ovpn, addr4));
+ rcu_read_unlock();
+ break;
+ case htons(ETH_P_IPV6):
+ addr6 = ovpn_nexthop_from_rt6(ovpn, ipv6_hdr(skb)->saddr);
+ rcu_read_lock();
+ match = (peer == ovpn_peer_get_by_vpn_addr6(ovpn, &addr6));
+ rcu_read_unlock();
+ break;
+ }
+
+ return match;
+}
+
+void ovpn_peer_hash_vpn_ip(struct ovpn_peer *peer)
+{
+ struct hlist_nulls_head *nhead;
+
+ lockdep_assert_held(&peer->ovpn->lock);
+
+ /* rehashing makes sense only in multipeer mode */
+ if (peer->ovpn->mode != OVPN_MODE_MP)
+ return;
+
+ if (peer->vpn_addrs.ipv4.s_addr != htonl(INADDR_ANY)) {
+ /* remove potential old hashing */
+ hlist_nulls_del_init_rcu(&peer->hash_entry_addr4);
+
+ nhead = ovpn_get_hash_head(peer->ovpn->peers->by_vpn_addr4,
+ &peer->vpn_addrs.ipv4,
+ sizeof(peer->vpn_addrs.ipv4));
+ hlist_nulls_add_head_rcu(&peer->hash_entry_addr4, nhead);
+ }
+
+ if (!ipv6_addr_any(&peer->vpn_addrs.ipv6)) {
+ /* remove potential old hashing */
+ hlist_nulls_del_init_rcu(&peer->hash_entry_addr6);
+
+ nhead = ovpn_get_hash_head(peer->ovpn->peers->by_vpn_addr6,
+ &peer->vpn_addrs.ipv6,
+ sizeof(peer->vpn_addrs.ipv6));
+ hlist_nulls_add_head_rcu(&peer->hash_entry_addr6, nhead);
+ }
+}
+
+/**
+ * ovpn_peer_add_mp - add peer to related tables in a MP instance
+ * @ovpn: the instance to add the peer to
+ * @peer: the peer to add
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_peer_add_mp(struct ovpn_priv *ovpn, struct ovpn_peer *peer)
+{
+ struct sockaddr_storage sa = { 0 };
+ struct hlist_nulls_head *nhead;
+ struct sockaddr_in6 *sa6;
+ struct sockaddr_in *sa4;
+ struct ovpn_bind *bind;
+ struct ovpn_peer *tmp;
+ size_t salen;
+ int ret = 0;
+
+ spin_lock_bh(&ovpn->lock);
+ /* do not add duplicates */
+ tmp = ovpn_peer_get_by_id(ovpn, peer->id);
+ if (tmp) {
+ ovpn_peer_put(tmp);
+ ret = -EEXIST;
+ goto out;
+ }
+
+ bind = rcu_dereference_protected(peer->bind, true);
+ /* peers connected via TCP have bind == NULL */
+ if (bind) {
+ switch (bind->remote.in4.sin_family) {
+ case AF_INET:
+ sa4 = (struct sockaddr_in *)&sa;
+
+ sa4->sin_family = AF_INET;
+ sa4->sin_addr.s_addr = bind->remote.in4.sin_addr.s_addr;
+ sa4->sin_port = bind->remote.in4.sin_port;
+ salen = sizeof(*sa4);
+ break;
+ case AF_INET6:
+ sa6 = (struct sockaddr_in6 *)&sa;
+
+ sa6->sin6_family = AF_INET6;
+ sa6->sin6_addr = bind->remote.in6.sin6_addr;
+ sa6->sin6_port = bind->remote.in6.sin6_port;
+ salen = sizeof(*sa6);
+ break;
+ default:
+ ret = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ nhead = ovpn_get_hash_head(ovpn->peers->by_transp_addr, &sa,
+ salen);
+ hlist_nulls_add_head_rcu(&peer->hash_entry_transp_addr, nhead);
+ }
+
+ hlist_add_head_rcu(&peer->hash_entry_id,
+ ovpn_get_hash_head(ovpn->peers->by_id, &peer->id,
+ sizeof(peer->id)));
+
+ ovpn_peer_hash_vpn_ip(peer);
+out:
+ spin_unlock_bh(&ovpn->lock);
+ return ret;
+}
+
+/**
+ * ovpn_peer_add_p2p - add peer to related tables in a P2P instance
+ * @ovpn: the instance to add the peer to
+ * @peer: the peer to add
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_peer_add_p2p(struct ovpn_priv *ovpn, struct ovpn_peer *peer)
+{
+ LLIST_HEAD(release_list);
+ struct ovpn_peer *tmp;
+
+ spin_lock_bh(&ovpn->lock);
+ /* in p2p mode it is possible to have a single peer only, therefore the
+ * old one is released and substituted by the new one
+ */
+ tmp = rcu_dereference_protected(ovpn->peer,
+ lockdep_is_held(&ovpn->lock));
+ if (tmp)
+ ovpn_peer_remove(tmp, OVPN_DEL_PEER_REASON_TEARDOWN,
+ &release_list);
+
+ rcu_assign_pointer(ovpn->peer, peer);
+ /* in P2P mode the carrier is switched on when the peer is added */
+ netif_carrier_on(ovpn->dev);
+ unlock_ovpn(ovpn, &release_list);
+
+ return 0;
+}
+
+/**
+ * ovpn_peer_add - add peer to the related tables
+ * @ovpn: the openvpn instance the peer belongs to
+ * @peer: the peer object to add
+ *
+ * Assume refcounter was increased by caller
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int ovpn_peer_add(struct ovpn_priv *ovpn, struct ovpn_peer *peer)
+{
+ switch (ovpn->mode) {
+ case OVPN_MODE_MP:
+ return ovpn_peer_add_mp(ovpn, peer);
+ case OVPN_MODE_P2P:
+ return ovpn_peer_add_p2p(ovpn, peer);
+ }
+
+ return -EOPNOTSUPP;
+}
+
+/**
+ * ovpn_peer_del_mp - delete peer from related tables in a MP instance
+ * @peer: the peer to delete
+ * @reason: reason why the peer was deleted (sent to userspace)
+ * @release_list: list where delete peer should be appended
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_peer_del_mp(struct ovpn_peer *peer,
+ enum ovpn_del_peer_reason reason,
+ struct llist_head *release_list)
+{
+ struct ovpn_peer *tmp;
+ int ret = -ENOENT;
+
+ lockdep_assert_held(&peer->ovpn->lock);
+
+ tmp = ovpn_peer_get_by_id(peer->ovpn, peer->id);
+ if (tmp == peer) {
+ ovpn_peer_remove(peer, reason, release_list);
+ ret = 0;
+ }
+
+ if (tmp)
+ ovpn_peer_put(tmp);
+
+ return ret;
+}
+
+/**
+ * ovpn_peer_del_p2p - delete peer from related tables in a P2P instance
+ * @peer: the peer to delete
+ * @reason: reason why the peer was deleted (sent to userspace)
+ * @release_list: list where delete peer should be appended
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_peer_del_p2p(struct ovpn_peer *peer,
+ enum ovpn_del_peer_reason reason,
+ struct llist_head *release_list)
+{
+ struct ovpn_peer *tmp;
+
+ lockdep_assert_held(&peer->ovpn->lock);
+
+ tmp = rcu_dereference_protected(peer->ovpn->peer,
+ lockdep_is_held(&peer->ovpn->lock));
+ if (tmp != peer)
+ return -ENOENT;
+
+ ovpn_peer_remove(peer, reason, release_list);
+
+ return 0;
+}
+
+/**
+ * ovpn_peer_del - delete peer from related tables
+ * @peer: the peer object to delete
+ * @reason: reason for deleting peer (will be sent to userspace)
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int ovpn_peer_del(struct ovpn_peer *peer, enum ovpn_del_peer_reason reason)
+{
+ LLIST_HEAD(release_list);
+ int ret = -EOPNOTSUPP;
+
+ spin_lock_bh(&peer->ovpn->lock);
+ switch (peer->ovpn->mode) {
+ case OVPN_MODE_MP:
+ ret = ovpn_peer_del_mp(peer, reason, &release_list);
+ break;
+ case OVPN_MODE_P2P:
+ ret = ovpn_peer_del_p2p(peer, reason, &release_list);
+ break;
+ default:
+ break;
+ }
+ unlock_ovpn(peer->ovpn, &release_list);
+
+ return ret;
+}
+
+/**
+ * ovpn_peer_release_p2p - release peer upon P2P device teardown
+ * @ovpn: the instance being torn down
+ * @sk: if not NULL, release peer only if it's using this specific socket
+ * @reason: the reason for releasing the peer
+ */
+static void ovpn_peer_release_p2p(struct ovpn_priv *ovpn, struct sock *sk,
+ enum ovpn_del_peer_reason reason)
+{
+ struct ovpn_socket *ovpn_sock;
+ LLIST_HEAD(release_list);
+ struct ovpn_peer *peer;
+
+ spin_lock_bh(&ovpn->lock);
+ peer = rcu_dereference_protected(ovpn->peer,
+ lockdep_is_held(&ovpn->lock));
+ if (!peer) {
+ spin_unlock_bh(&ovpn->lock);
+ return;
+ }
+
+ if (sk) {
+ ovpn_sock = rcu_access_pointer(peer->sock);
+ if (!ovpn_sock || ovpn_sock->sk != sk) {
+ spin_unlock_bh(&ovpn->lock);
+ ovpn_peer_put(peer);
+ return;
+ }
+ }
+
+ ovpn_peer_remove(peer, reason, &release_list);
+ unlock_ovpn(ovpn, &release_list);
+}
+
+static void ovpn_peers_release_mp(struct ovpn_priv *ovpn, struct sock *sk,
+ enum ovpn_del_peer_reason reason)
+{
+ struct ovpn_socket *ovpn_sock;
+ LLIST_HEAD(release_list);
+ struct ovpn_peer *peer;
+ struct hlist_node *tmp;
+ int bkt;
+
+ spin_lock_bh(&ovpn->lock);
+ hash_for_each_safe(ovpn->peers->by_id, bkt, tmp, peer, hash_entry_id) {
+ bool remove = true;
+
+ /* if a socket was passed as argument, skip all peers except
+ * those using it
+ */
+ if (sk) {
+ rcu_read_lock();
+ ovpn_sock = rcu_dereference(peer->sock);
+ remove = ovpn_sock && ovpn_sock->sk == sk;
+ rcu_read_unlock();
+ }
+
+ if (remove)
+ ovpn_peer_remove(peer, reason, &release_list);
+ }
+ unlock_ovpn(ovpn, &release_list);
+}
+
+/**
+ * ovpn_peers_free - free all peers in the instance
+ * @ovpn: the instance whose peers should be released
+ * @sk: if not NULL, only peers using this socket are removed and the socket
+ * is released immediately
+ * @reason: the reason for releasing all peers
+ */
+void ovpn_peers_free(struct ovpn_priv *ovpn, struct sock *sk,
+ enum ovpn_del_peer_reason reason)
+{
+ switch (ovpn->mode) {
+ case OVPN_MODE_P2P:
+ ovpn_peer_release_p2p(ovpn, sk, reason);
+ break;
+ case OVPN_MODE_MP:
+ ovpn_peers_release_mp(ovpn, sk, reason);
+ break;
+ }
+}
+
+static time64_t ovpn_peer_keepalive_work_single(struct ovpn_peer *peer,
+ time64_t now,
+ struct llist_head *release_list)
+{
+ time64_t last_recv, last_sent, next_run1, next_run2;
+ unsigned long timeout, interval;
+ bool expired;
+
+ spin_lock_bh(&peer->lock);
+ /* we expect both timers to be configured at the same time,
+ * therefore bail out if either is not set
+ */
+ if (!peer->keepalive_timeout || !peer->keepalive_interval) {
+ spin_unlock_bh(&peer->lock);
+ return 0;
+ }
+
+ /* check for peer timeout */
+ expired = false;
+ timeout = peer->keepalive_timeout;
+ last_recv = READ_ONCE(peer->last_recv);
+ if (now < last_recv + timeout) {
+ peer->keepalive_recv_exp = last_recv + timeout;
+ next_run1 = peer->keepalive_recv_exp;
+ } else if (peer->keepalive_recv_exp > now) {
+ next_run1 = peer->keepalive_recv_exp;
+ } else {
+ expired = true;
+ }
+
+ if (expired) {
+ /* peer is dead -> kill it and move on */
+ spin_unlock_bh(&peer->lock);
+ netdev_dbg(peer->ovpn->dev, "peer %u expired\n",
+ peer->id);
+ ovpn_peer_remove(peer, OVPN_DEL_PEER_REASON_EXPIRED,
+ release_list);
+ return 0;
+ }
+
+ /* check for peer keepalive */
+ expired = false;
+ interval = peer->keepalive_interval;
+ last_sent = READ_ONCE(peer->last_sent);
+ if (now < last_sent + interval) {
+ peer->keepalive_xmit_exp = last_sent + interval;
+ next_run2 = peer->keepalive_xmit_exp;
+ } else if (peer->keepalive_xmit_exp > now) {
+ next_run2 = peer->keepalive_xmit_exp;
+ } else {
+ expired = true;
+ next_run2 = now + interval;
+ }
+ spin_unlock_bh(&peer->lock);
+
+ if (expired) {
+ /* a keepalive packet is required */
+ netdev_dbg(peer->ovpn->dev,
+ "sending keepalive to peer %u\n",
+ peer->id);
+ if (schedule_work(&peer->keepalive_work))
+ ovpn_peer_hold(peer);
+ }
+
+ if (next_run1 < next_run2)
+ return next_run1;
+
+ return next_run2;
+}
+
+static time64_t ovpn_peer_keepalive_work_mp(struct ovpn_priv *ovpn,
+ time64_t now,
+ struct llist_head *release_list)
+{
+ time64_t tmp_next_run, next_run = 0;
+ struct hlist_node *tmp;
+ struct ovpn_peer *peer;
+ int bkt;
+
+ lockdep_assert_held(&ovpn->lock);
+
+ hash_for_each_safe(ovpn->peers->by_id, bkt, tmp, peer, hash_entry_id) {
+ tmp_next_run = ovpn_peer_keepalive_work_single(peer, now,
+ release_list);
+ if (!tmp_next_run)
+ continue;
+
+ /* the next worker run will be scheduled based on the shortest
+ * required interval across all peers
+ */
+ if (!next_run || tmp_next_run < next_run)
+ next_run = tmp_next_run;
+ }
+
+ return next_run;
+}
+
+static time64_t ovpn_peer_keepalive_work_p2p(struct ovpn_priv *ovpn,
+ time64_t now,
+ struct llist_head *release_list)
+{
+ struct ovpn_peer *peer;
+ time64_t next_run = 0;
+
+ lockdep_assert_held(&ovpn->lock);
+
+ peer = rcu_dereference_protected(ovpn->peer,
+ lockdep_is_held(&ovpn->lock));
+ if (peer)
+ next_run = ovpn_peer_keepalive_work_single(peer, now,
+ release_list);
+
+ return next_run;
+}
+
+/**
+ * ovpn_peer_keepalive_work - run keepalive logic on each known peer
+ * @work: pointer to the work member of the related ovpn object
+ *
+ * Each peer has two timers (if configured):
+ * 1. peer timeout: when no data is received for a certain interval,
+ * the peer is considered dead and it gets killed.
+ * 2. peer keepalive: when no data is sent to a certain peer for a
+ * certain interval, a special 'keepalive' packet is explicitly sent.
+ *
+ * This function iterates across the whole peer collection while
+ * checking the timers described above.
+ */
+void ovpn_peer_keepalive_work(struct work_struct *work)
+{
+ struct ovpn_priv *ovpn = container_of(work, struct ovpn_priv,
+ keepalive_work.work);
+ time64_t next_run = 0, now = ktime_get_real_seconds();
+ LLIST_HEAD(release_list);
+
+ spin_lock_bh(&ovpn->lock);
+ switch (ovpn->mode) {
+ case OVPN_MODE_MP:
+ next_run = ovpn_peer_keepalive_work_mp(ovpn, now,
+ &release_list);
+ break;
+ case OVPN_MODE_P2P:
+ next_run = ovpn_peer_keepalive_work_p2p(ovpn, now,
+ &release_list);
+ break;
+ }
+
+ /* prevent rearming if the interface is being destroyed */
+ if (next_run > 0) {
+ netdev_dbg(ovpn->dev,
+ "scheduling keepalive work: now=%llu next_run=%llu delta=%llu\n",
+ next_run, now, next_run - now);
+ schedule_delayed_work(&ovpn->keepalive_work,
+ (next_run - now) * HZ);
+ }
+ unlock_ovpn(ovpn, &release_list);
+}
diff --git a/drivers/net/ovpn/peer.h b/drivers/net/ovpn/peer.h
new file mode 100644
index 000000000000..a1423f2b09e0
--- /dev/null
+++ b/drivers/net/ovpn/peer.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNPEER_H_
+#define _NET_OVPN_OVPNPEER_H_
+
+#include <net/dst_cache.h>
+#include <net/strparser.h>
+
+#include "crypto.h"
+#include "socket.h"
+#include "stats.h"
+
+/**
+ * struct ovpn_peer - the main remote peer object
+ * @ovpn: main openvpn instance this peer belongs to
+ * @dev_tracker: reference tracker for associated dev
+ * @id: unique identifier
+ * @vpn_addrs: IP addresses assigned over the tunnel
+ * @vpn_addrs.ipv4: IPv4 assigned to peer on the tunnel
+ * @vpn_addrs.ipv6: IPv6 assigned to peer on the tunnel
+ * @hash_entry_id: entry in the peer ID hashtable
+ * @hash_entry_addr4: entry in the peer IPv4 hashtable
+ * @hash_entry_addr6: entry in the peer IPv6 hashtable
+ * @hash_entry_transp_addr: entry in the peer transport address hashtable
+ * @sock: the socket being used to talk to this peer
+ * @tcp: keeps track of TCP specific state
+ * @tcp.strp: stream parser context (TCP only)
+ * @tcp.user_queue: received packets that have to go to userspace (TCP only)
+ * @tcp.out_queue: packets on hold while socket is taken by user (TCP only)
+ * @tcp.tx_in_progress: true if TX is already ongoing (TCP only)
+ * @tcp.out_msg.skb: packet scheduled for sending (TCP only)
+ * @tcp.out_msg.offset: offset where next send should start (TCP only)
+ * @tcp.out_msg.len: remaining data to send within packet (TCP only)
+ * @tcp.sk_cb.sk_data_ready: pointer to original cb (TCP only)
+ * @tcp.sk_cb.sk_write_space: pointer to original cb (TCP only)
+ * @tcp.sk_cb.prot: pointer to original prot object (TCP only)
+ * @tcp.sk_cb.ops: pointer to the original prot_ops object (TCP only)
+ * @crypto: the crypto configuration (ciphers, keys, etc..)
+ * @dst_cache: cache for dst_entry used to send to peer
+ * @bind: remote peer binding
+ * @keepalive_interval: seconds after which a new keepalive should be sent
+ * @keepalive_xmit_exp: future timestamp when next keepalive should be sent
+ * @last_sent: timestamp of the last successfully sent packet
+ * @keepalive_timeout: seconds after which an inactive peer is considered dead
+ * @keepalive_recv_exp: future timestamp when the peer should expire
+ * @last_recv: timestamp of the last authenticated received packet
+ * @vpn_stats: per-peer in-VPN TX/RX stats
+ * @link_stats: per-peer link/transport TX/RX stats
+ * @delete_reason: why peer was deleted (i.e. timeout, transport error, ..)
+ * @lock: protects binding to peer (bind) and keepalive* fields
+ * @refcount: reference counter
+ * @rcu: used to free peer in an RCU safe way
+ * @release_entry: entry for the socket release list
+ * @keepalive_work: used to schedule keepalive sending
+ */
+struct ovpn_peer {
+ struct ovpn_priv *ovpn;
+ netdevice_tracker dev_tracker;
+ u32 id;
+ struct {
+ struct in_addr ipv4;
+ struct in6_addr ipv6;
+ } vpn_addrs;
+ struct hlist_node hash_entry_id;
+ struct hlist_nulls_node hash_entry_addr4;
+ struct hlist_nulls_node hash_entry_addr6;
+ struct hlist_nulls_node hash_entry_transp_addr;
+ struct ovpn_socket __rcu *sock;
+
+ struct {
+ struct strparser strp;
+ struct sk_buff_head user_queue;
+ struct sk_buff_head out_queue;
+ bool tx_in_progress;
+
+ struct {
+ struct sk_buff *skb;
+ int offset;
+ int len;
+ } out_msg;
+
+ struct {
+ void (*sk_data_ready)(struct sock *sk);
+ void (*sk_write_space)(struct sock *sk);
+ struct proto *prot;
+ const struct proto_ops *ops;
+ } sk_cb;
+
+ struct work_struct defer_del_work;
+ } tcp;
+ struct ovpn_crypto_state crypto;
+ struct dst_cache dst_cache;
+ struct ovpn_bind __rcu *bind;
+ unsigned long keepalive_interval;
+ unsigned long keepalive_xmit_exp;
+ time64_t last_sent;
+ unsigned long keepalive_timeout;
+ unsigned long keepalive_recv_exp;
+ time64_t last_recv;
+ struct ovpn_peer_stats vpn_stats;
+ struct ovpn_peer_stats link_stats;
+ enum ovpn_del_peer_reason delete_reason;
+ spinlock_t lock; /* protects bind and keepalive* */
+ struct kref refcount;
+ struct rcu_head rcu;
+ struct llist_node release_entry;
+ struct work_struct keepalive_work;
+};
+
+/**
+ * ovpn_peer_hold - increase reference counter
+ * @peer: the peer whose counter should be increased
+ *
+ * Return: true if the counter was increased or false if it was zero already
+ */
+static inline bool ovpn_peer_hold(struct ovpn_peer *peer)
+{
+ return kref_get_unless_zero(&peer->refcount);
+}
+
+void ovpn_peer_release(struct ovpn_peer *peer);
+void ovpn_peer_release_kref(struct kref *kref);
+
+/**
+ * ovpn_peer_put - decrease reference counter
+ * @peer: the peer whose counter should be decreased
+ */
+static inline void ovpn_peer_put(struct ovpn_peer *peer)
+{
+ kref_put(&peer->refcount, ovpn_peer_release_kref);
+}
+
+struct ovpn_peer *ovpn_peer_new(struct ovpn_priv *ovpn, u32 id);
+int ovpn_peer_add(struct ovpn_priv *ovpn, struct ovpn_peer *peer);
+int ovpn_peer_del(struct ovpn_peer *peer, enum ovpn_del_peer_reason reason);
+void ovpn_peers_free(struct ovpn_priv *ovpn, struct sock *sock,
+ enum ovpn_del_peer_reason reason);
+
+struct ovpn_peer *ovpn_peer_get_by_transp_addr(struct ovpn_priv *ovpn,
+ struct sk_buff *skb);
+struct ovpn_peer *ovpn_peer_get_by_id(struct ovpn_priv *ovpn, u32 peer_id);
+struct ovpn_peer *ovpn_peer_get_by_dst(struct ovpn_priv *ovpn,
+ struct sk_buff *skb);
+void ovpn_peer_hash_vpn_ip(struct ovpn_peer *peer);
+bool ovpn_peer_check_by_src(struct ovpn_priv *ovpn, struct sk_buff *skb,
+ struct ovpn_peer *peer);
+
+void ovpn_peer_keepalive_set(struct ovpn_peer *peer, u32 interval, u32 timeout);
+void ovpn_peer_keepalive_work(struct work_struct *work);
+
+void ovpn_peer_endpoints_update(struct ovpn_peer *peer, struct sk_buff *skb);
+int ovpn_peer_reset_sockaddr(struct ovpn_peer *peer,
+ const struct sockaddr_storage *ss,
+ const void *local_ip);
+
+#endif /* _NET_OVPN_OVPNPEER_H_ */
diff --git a/drivers/net/ovpn/pktid.c b/drivers/net/ovpn/pktid.c
new file mode 100644
index 000000000000..2f29049897e3
--- /dev/null
+++ b/drivers/net/ovpn/pktid.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ * James Yonan <james@openvpn.net>
+ */
+
+#include <linux/atomic.h>
+#include <linux/jiffies.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "pktid.h"
+
+void ovpn_pktid_xmit_init(struct ovpn_pktid_xmit *pid)
+{
+ atomic_set(&pid->seq_num, 1);
+}
+
+void ovpn_pktid_recv_init(struct ovpn_pktid_recv *pr)
+{
+ memset(pr, 0, sizeof(*pr));
+ spin_lock_init(&pr->lock);
+}
+
+/* Packet replay detection.
+ * Allows ID backtrack of up to REPLAY_WINDOW_SIZE - 1.
+ */
+int ovpn_pktid_recv(struct ovpn_pktid_recv *pr, u32 pkt_id, u32 pkt_time)
+{
+ const unsigned long now = jiffies;
+ int ret;
+
+ /* ID must not be zero */
+ if (unlikely(pkt_id == 0))
+ return -EINVAL;
+
+ spin_lock_bh(&pr->lock);
+
+ /* expire backtracks at or below pr->id after PKTID_RECV_EXPIRE time */
+ if (unlikely(time_after_eq(now, pr->expire)))
+ pr->id_floor = pr->id;
+
+ /* time changed? */
+ if (unlikely(pkt_time != pr->time)) {
+ if (pkt_time > pr->time) {
+ /* time moved forward, accept */
+ pr->base = 0;
+ pr->extent = 0;
+ pr->id = 0;
+ pr->time = pkt_time;
+ pr->id_floor = 0;
+ } else {
+ /* time moved backward, reject */
+ ret = -ETIME;
+ goto out;
+ }
+ }
+
+ if (likely(pkt_id == pr->id + 1)) {
+ /* well-formed ID sequence (incremented by 1) */
+ pr->base = REPLAY_INDEX(pr->base, -1);
+ pr->history[pr->base / 8] |= (1 << (pr->base % 8));
+ if (pr->extent < REPLAY_WINDOW_SIZE)
+ ++pr->extent;
+ pr->id = pkt_id;
+ } else if (pkt_id > pr->id) {
+ /* ID jumped forward by more than one */
+ const unsigned int delta = pkt_id - pr->id;
+
+ if (delta < REPLAY_WINDOW_SIZE) {
+ unsigned int i;
+
+ pr->base = REPLAY_INDEX(pr->base, -delta);
+ pr->history[pr->base / 8] |= (1 << (pr->base % 8));
+ pr->extent += delta;
+ if (pr->extent > REPLAY_WINDOW_SIZE)
+ pr->extent = REPLAY_WINDOW_SIZE;
+ for (i = 1; i < delta; ++i) {
+ unsigned int newb = REPLAY_INDEX(pr->base, i);
+
+ pr->history[newb / 8] &= ~BIT(newb % 8);
+ }
+ } else {
+ pr->base = 0;
+ pr->extent = REPLAY_WINDOW_SIZE;
+ memset(pr->history, 0, sizeof(pr->history));
+ pr->history[0] = 1;
+ }
+ pr->id = pkt_id;
+ } else {
+ /* ID backtrack */
+ const unsigned int delta = pr->id - pkt_id;
+
+ if (delta > pr->max_backtrack)
+ pr->max_backtrack = delta;
+ if (delta < pr->extent) {
+ if (pkt_id > pr->id_floor) {
+ const unsigned int ri = REPLAY_INDEX(pr->base,
+ delta);
+ u8 *p = &pr->history[ri / 8];
+ const u8 mask = (1 << (ri % 8));
+
+ if (*p & mask) {
+ ret = -EINVAL;
+ goto out;
+ }
+ *p |= mask;
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ pr->expire = now + PKTID_RECV_EXPIRE;
+ ret = 0;
+out:
+ spin_unlock_bh(&pr->lock);
+ return ret;
+}
diff --git a/drivers/net/ovpn/pktid.h b/drivers/net/ovpn/pktid.h
new file mode 100644
index 000000000000..0262d026d15e
--- /dev/null
+++ b/drivers/net/ovpn/pktid.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ * James Yonan <james@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNPKTID_H_
+#define _NET_OVPN_OVPNPKTID_H_
+
+#include "proto.h"
+
+/* If no packets received for this length of time, set a backtrack floor
+ * at highest received packet ID thus far.
+ */
+#define PKTID_RECV_EXPIRE (30 * HZ)
+
+/* Packet-ID state for transmitter */
+struct ovpn_pktid_xmit {
+ atomic_t seq_num;
+};
+
+/* replay window sizing in bytes = 2^REPLAY_WINDOW_ORDER */
+#define REPLAY_WINDOW_ORDER 8
+
+#define REPLAY_WINDOW_BYTES BIT(REPLAY_WINDOW_ORDER)
+#define REPLAY_WINDOW_SIZE (REPLAY_WINDOW_BYTES * 8)
+#define REPLAY_INDEX(base, i) (((base) + (i)) & (REPLAY_WINDOW_SIZE - 1))
+
+/* Packet-ID state for receiver.
+ * Other than lock member, can be zeroed to initialize.
+ */
+struct ovpn_pktid_recv {
+ /* "sliding window" bitmask of recent packet IDs received */
+ u8 history[REPLAY_WINDOW_BYTES];
+ /* bit position of deque base in history */
+ unsigned int base;
+ /* extent (in bits) of deque in history */
+ unsigned int extent;
+ /* expiration of history in jiffies */
+ unsigned long expire;
+ /* highest sequence number received */
+ u32 id;
+ /* highest time stamp received */
+ u32 time;
+ /* we will only accept backtrack IDs > id_floor */
+ u32 id_floor;
+ unsigned int max_backtrack;
+ /* protects entire pktd ID state */
+ spinlock_t lock;
+};
+
+/* Get the next packet ID for xmit */
+static inline int ovpn_pktid_xmit_next(struct ovpn_pktid_xmit *pid, u32 *pktid)
+{
+ const u32 seq_num = atomic_fetch_add_unless(&pid->seq_num, 1, 0);
+ /* when the 32bit space is over, we return an error because the packet
+ * ID is used to create the cipher IV and we do not want to reuse the
+ * same value more than once
+ */
+ if (unlikely(!seq_num))
+ return -ERANGE;
+
+ *pktid = seq_num;
+
+ return 0;
+}
+
+/* Write 12-byte AEAD IV to dest */
+static inline void ovpn_pktid_aead_write(const u32 pktid,
+ const u8 nt[],
+ unsigned char *dest)
+{
+ *(__force __be32 *)(dest) = htonl(pktid);
+ BUILD_BUG_ON(4 + OVPN_NONCE_TAIL_SIZE != OVPN_NONCE_SIZE);
+ memcpy(dest + 4, nt, OVPN_NONCE_TAIL_SIZE);
+}
+
+void ovpn_pktid_xmit_init(struct ovpn_pktid_xmit *pid);
+void ovpn_pktid_recv_init(struct ovpn_pktid_recv *pr);
+
+int ovpn_pktid_recv(struct ovpn_pktid_recv *pr, u32 pkt_id, u32 pkt_time);
+
+#endif /* _NET_OVPN_OVPNPKTID_H_ */
diff --git a/drivers/net/ovpn/proto.h b/drivers/net/ovpn/proto.h
new file mode 100644
index 000000000000..b7d285b4d9c1
--- /dev/null
+++ b/drivers/net/ovpn/proto.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ * James Yonan <james@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_PROTO_H_
+#define _NET_OVPN_PROTO_H_
+
+#include "main.h"
+
+#include <linux/bitfield.h>
+#include <linux/skbuff.h>
+
+/* When the OpenVPN protocol is ran in AEAD mode, use
+ * the OpenVPN packet ID as the AEAD nonce:
+ *
+ * 00000005 521c3b01 4308c041
+ * [seq # ] [ nonce_tail ]
+ * [ 12-byte full IV ] -> OVPN_NONCE_SIZE
+ * [4-bytes -> OVPN_NONCE_WIRE_SIZE
+ * on wire]
+ */
+
+/* nonce size (96bits) as required by AEAD ciphers */
+#define OVPN_NONCE_SIZE 12
+/* last 8 bytes of AEAD nonce: provided by userspace and usually derived
+ * from key material generated during TLS handshake
+ */
+#define OVPN_NONCE_TAIL_SIZE 8
+
+/* OpenVPN nonce size reduced by 8-byte nonce tail -- this is the
+ * size of the AEAD Associated Data (AD) sent over the wire
+ * and is normally the head of the IV
+ */
+#define OVPN_NONCE_WIRE_SIZE (OVPN_NONCE_SIZE - OVPN_NONCE_TAIL_SIZE)
+
+#define OVPN_OPCODE_SIZE 4 /* DATA_V2 opcode size */
+#define OVPN_OPCODE_KEYID_MASK 0x07000000
+#define OVPN_OPCODE_PKTTYPE_MASK 0xF8000000
+#define OVPN_OPCODE_PEERID_MASK 0x00FFFFFF
+
+/* packet opcodes of interest to us */
+#define OVPN_DATA_V1 6 /* data channel v1 packet */
+#define OVPN_DATA_V2 9 /* data channel v2 packet */
+
+#define OVPN_PEER_ID_UNDEF 0x00FFFFFF
+
+/**
+ * ovpn_opcode_from_skb - extract OP code from skb at specified offset
+ * @skb: the packet to extract the OP code from
+ * @offset: the offset in the data buffer where the OP code is located
+ *
+ * Note: this function assumes that the skb head was pulled enough
+ * to access the first 4 bytes.
+ *
+ * Return: the OP code
+ */
+static inline u8 ovpn_opcode_from_skb(const struct sk_buff *skb, u16 offset)
+{
+ u32 opcode = be32_to_cpu(*(__be32 *)(skb->data + offset));
+
+ return FIELD_GET(OVPN_OPCODE_PKTTYPE_MASK, opcode);
+}
+
+/**
+ * ovpn_peer_id_from_skb - extract peer ID from skb at specified offset
+ * @skb: the packet to extract the OP code from
+ * @offset: the offset in the data buffer where the OP code is located
+ *
+ * Note: this function assumes that the skb head was pulled enough
+ * to access the first 4 bytes.
+ *
+ * Return: the peer ID
+ */
+static inline u32 ovpn_peer_id_from_skb(const struct sk_buff *skb, u16 offset)
+{
+ u32 opcode = be32_to_cpu(*(__be32 *)(skb->data + offset));
+
+ return FIELD_GET(OVPN_OPCODE_PEERID_MASK, opcode);
+}
+
+/**
+ * ovpn_key_id_from_skb - extract key ID from the skb head
+ * @skb: the packet to extract the key ID code from
+ *
+ * Note: this function assumes that the skb head was pulled enough
+ * to access the first 4 bytes.
+ *
+ * Return: the key ID
+ */
+static inline u8 ovpn_key_id_from_skb(const struct sk_buff *skb)
+{
+ u32 opcode = be32_to_cpu(*(__be32 *)skb->data);
+
+ return FIELD_GET(OVPN_OPCODE_KEYID_MASK, opcode);
+}
+
+/**
+ * ovpn_opcode_compose - combine OP code, key ID and peer ID to wire format
+ * @opcode: the OP code
+ * @key_id: the key ID
+ * @peer_id: the peer ID
+ *
+ * Return: a 4 bytes integer obtained combining all input values following the
+ * OpenVPN wire format. This integer can then be written to the packet header.
+ */
+static inline u32 ovpn_opcode_compose(u8 opcode, u8 key_id, u32 peer_id)
+{
+ return FIELD_PREP(OVPN_OPCODE_PKTTYPE_MASK, opcode) |
+ FIELD_PREP(OVPN_OPCODE_KEYID_MASK, key_id) |
+ FIELD_PREP(OVPN_OPCODE_PEERID_MASK, peer_id);
+}
+
+#endif /* _NET_OVPN_OVPNPROTO_H_ */
diff --git a/drivers/net/ovpn/skb.h b/drivers/net/ovpn/skb.h
new file mode 100644
index 000000000000..64430880f1da
--- /dev/null
+++ b/drivers/net/ovpn/skb.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ * James Yonan <james@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_SKB_H_
+#define _NET_OVPN_SKB_H_
+
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/types.h>
+
+struct ovpn_cb {
+ struct ovpn_peer *peer;
+ struct ovpn_crypto_key_slot *ks;
+ struct aead_request *req;
+ struct scatterlist *sg;
+ u8 *iv;
+ unsigned int payload_offset;
+ bool nosignal;
+};
+
+static inline struct ovpn_cb *ovpn_skb_cb(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ovpn_cb) > sizeof(skb->cb));
+ return (struct ovpn_cb *)skb->cb;
+}
+
+/* Return IP protocol version from skb header.
+ * Return 0 if protocol is not IPv4/IPv6 or cannot be read.
+ */
+static inline __be16 ovpn_ip_check_protocol(struct sk_buff *skb)
+{
+ __be16 proto = 0;
+
+ /* skb could be non-linear,
+ * make sure IP header is in non-fragmented part
+ */
+ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
+ return 0;
+
+ if (ip_hdr(skb)->version == 4) {
+ proto = htons(ETH_P_IP);
+ } else if (ip_hdr(skb)->version == 6) {
+ if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
+ return 0;
+ proto = htons(ETH_P_IPV6);
+ }
+
+ return proto;
+}
+
+#endif /* _NET_OVPN_SKB_H_ */
diff --git a/drivers/net/ovpn/socket.c b/drivers/net/ovpn/socket.c
new file mode 100644
index 000000000000..9750871ab65c
--- /dev/null
+++ b/drivers/net/ovpn/socket.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/udp.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "io.h"
+#include "peer.h"
+#include "socket.h"
+#include "tcp.h"
+#include "udp.h"
+
+static void ovpn_socket_release_kref(struct kref *kref)
+{
+ struct ovpn_socket *sock = container_of(kref, struct ovpn_socket,
+ refcount);
+
+ if (sock->sk->sk_protocol == IPPROTO_UDP)
+ ovpn_udp_socket_detach(sock);
+ else if (sock->sk->sk_protocol == IPPROTO_TCP)
+ ovpn_tcp_socket_detach(sock);
+}
+
+/**
+ * ovpn_socket_put - decrease reference counter
+ * @peer: peer whose socket reference counter should be decreased
+ * @sock: the RCU protected peer socket
+ *
+ * This function is only used internally. Users willing to release
+ * references to the ovpn_socket should use ovpn_socket_release()
+ *
+ * Return: true if the socket was released, false otherwise
+ */
+static bool ovpn_socket_put(struct ovpn_peer *peer, struct ovpn_socket *sock)
+{
+ return kref_put(&sock->refcount, ovpn_socket_release_kref);
+}
+
+/**
+ * ovpn_socket_release - release resources owned by socket user
+ * @peer: peer whose socket should be released
+ *
+ * This function should be invoked when the peer is being removed
+ * and wants to drop its link to the socket.
+ *
+ * In case of UDP, the detach routine will drop a reference to the
+ * ovpn netdev, pointed by the ovpn_socket.
+ *
+ * In case of TCP, releasing the socket will cause dropping
+ * the refcounter for the peer it is linked to, thus allowing the peer
+ * disappear as well.
+ *
+ * This function is expected to be invoked exactly once per peer
+ *
+ * NOTE: this function may sleep
+ */
+void ovpn_socket_release(struct ovpn_peer *peer)
+{
+ struct ovpn_socket *sock;
+ bool released;
+
+ might_sleep();
+
+ sock = rcu_replace_pointer(peer->sock, NULL, true);
+ /* release may be invoked after socket was detached */
+ if (!sock)
+ return;
+
+ /* Drop the reference while holding the sock lock to avoid
+ * concurrent ovpn_socket_new call to mess up with a partially
+ * detached socket.
+ *
+ * Holding the lock ensures that a socket with refcnt 0 is fully
+ * detached before it can be picked by a concurrent reader.
+ */
+ lock_sock(sock->sk);
+ released = ovpn_socket_put(peer, sock);
+ release_sock(sock->sk);
+
+ /* align all readers with sk_user_data being NULL */
+ synchronize_rcu();
+
+ /* following cleanup should happen with lock released */
+ if (released) {
+ if (sock->sk->sk_protocol == IPPROTO_UDP) {
+ netdev_put(sock->ovpn->dev, &sock->dev_tracker);
+ } else if (sock->sk->sk_protocol == IPPROTO_TCP) {
+ /* wait for TCP jobs to terminate */
+ ovpn_tcp_socket_wait_finish(sock);
+ ovpn_peer_put(sock->peer);
+ }
+ /* drop reference acquired in ovpn_socket_new() */
+ sock_put(sock->sk);
+ /* we can call plain kfree() because we already waited one RCU
+ * period due to synchronize_rcu()
+ */
+ kfree(sock);
+ }
+}
+
+static bool ovpn_socket_hold(struct ovpn_socket *sock)
+{
+ return kref_get_unless_zero(&sock->refcount);
+}
+
+static int ovpn_socket_attach(struct ovpn_socket *ovpn_sock,
+ struct socket *sock,
+ struct ovpn_peer *peer)
+{
+ if (sock->sk->sk_protocol == IPPROTO_UDP)
+ return ovpn_udp_socket_attach(ovpn_sock, sock, peer->ovpn);
+ else if (sock->sk->sk_protocol == IPPROTO_TCP)
+ return ovpn_tcp_socket_attach(ovpn_sock, peer);
+
+ return -EOPNOTSUPP;
+}
+
+/**
+ * ovpn_socket_new - create a new socket and initialize it
+ * @sock: the kernel socket to embed
+ * @peer: the peer reachable via this socket
+ *
+ * Return: an openvpn socket on success or a negative error code otherwise
+ */
+struct ovpn_socket *ovpn_socket_new(struct socket *sock, struct ovpn_peer *peer)
+{
+ struct ovpn_socket *ovpn_sock;
+ struct sock *sk = sock->sk;
+ int ret;
+
+ lock_sock(sk);
+
+ /* a TCP socket can only be owned by a single peer, therefore there
+ * can't be any other user
+ */
+ if (sk->sk_protocol == IPPROTO_TCP && sk->sk_user_data) {
+ ovpn_sock = ERR_PTR(-EBUSY);
+ goto sock_release;
+ }
+
+ /* a UDP socket can be shared across multiple peers, but we must make
+ * sure it is not owned by something else
+ */
+ if (sk->sk_protocol == IPPROTO_UDP) {
+ u8 type = READ_ONCE(udp_sk(sk)->encap_type);
+
+ /* socket owned by other encapsulation module */
+ if (type && type != UDP_ENCAP_OVPNINUDP) {
+ ovpn_sock = ERR_PTR(-EBUSY);
+ goto sock_release;
+ }
+
+ rcu_read_lock();
+ ovpn_sock = rcu_dereference_sk_user_data(sk);
+ if (ovpn_sock) {
+ /* socket owned by another ovpn instance, we can't use it */
+ if (ovpn_sock->ovpn != peer->ovpn) {
+ ovpn_sock = ERR_PTR(-EBUSY);
+ rcu_read_unlock();
+ goto sock_release;
+ }
+
+ /* this socket is already owned by this instance,
+ * therefore we can increase the refcounter and
+ * use it as expected
+ */
+ if (WARN_ON(!ovpn_socket_hold(ovpn_sock))) {
+ /* this should never happen because setting
+ * the refcnt to 0 and detaching the socket
+ * is expected to be atomic
+ */
+ ovpn_sock = ERR_PTR(-EAGAIN);
+ rcu_read_unlock();
+ goto sock_release;
+ }
+
+ rcu_read_unlock();
+ goto sock_release;
+ }
+ rcu_read_unlock();
+ }
+
+ /* socket is not owned: attach to this ovpn instance */
+
+ ovpn_sock = kzalloc(sizeof(*ovpn_sock), GFP_KERNEL);
+ if (!ovpn_sock) {
+ ovpn_sock = ERR_PTR(-ENOMEM);
+ goto sock_release;
+ }
+
+ ovpn_sock->sk = sk;
+ kref_init(&ovpn_sock->refcount);
+
+ /* the newly created ovpn_socket is holding reference to sk,
+ * therefore we increase its refcounter.
+ *
+ * This ovpn_socket instance is referenced by all peers
+ * using the same socket.
+ *
+ * ovpn_socket_release() will take care of dropping the reference.
+ */
+ sock_hold(sk);
+
+ ret = ovpn_socket_attach(ovpn_sock, sock, peer);
+ if (ret < 0) {
+ sock_put(sk);
+ kfree(ovpn_sock);
+ ovpn_sock = ERR_PTR(ret);
+ goto sock_release;
+ }
+
+ /* TCP sockets are per-peer, therefore they are linked to their unique
+ * peer
+ */
+ if (sk->sk_protocol == IPPROTO_TCP) {
+ INIT_WORK(&ovpn_sock->tcp_tx_work, ovpn_tcp_tx_work);
+ ovpn_sock->peer = peer;
+ ovpn_peer_hold(peer);
+ } else if (sk->sk_protocol == IPPROTO_UDP) {
+ /* in UDP we only link the ovpn instance since the socket is
+ * shared among multiple peers
+ */
+ ovpn_sock->ovpn = peer->ovpn;
+ netdev_hold(peer->ovpn->dev, &ovpn_sock->dev_tracker,
+ GFP_KERNEL);
+ }
+
+ rcu_assign_sk_user_data(sk, ovpn_sock);
+sock_release:
+ release_sock(sk);
+ return ovpn_sock;
+}
diff --git a/drivers/net/ovpn/socket.h b/drivers/net/ovpn/socket.h
new file mode 100644
index 000000000000..4afcec71040d
--- /dev/null
+++ b/drivers/net/ovpn/socket.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_SOCK_H_
+#define _NET_OVPN_SOCK_H_
+
+#include <linux/net.h>
+#include <linux/kref.h>
+#include <net/sock.h>
+
+struct ovpn_priv;
+struct ovpn_peer;
+
+/**
+ * struct ovpn_socket - a kernel socket referenced in the ovpn code
+ * @ovpn: ovpn instance owning this socket (UDP only)
+ * @dev_tracker: reference tracker for associated dev (UDP only)
+ * @peer: unique peer transmitting over this socket (TCP only)
+ * @sk: the low level sock object
+ * @refcount: amount of contexts currently referencing this object
+ * @work: member used to schedule release routine (it may block)
+ * @tcp_tx_work: work for deferring outgoing packet processing (TCP only)
+ */
+struct ovpn_socket {
+ union {
+ struct {
+ struct ovpn_priv *ovpn;
+ netdevice_tracker dev_tracker;
+ };
+ struct ovpn_peer *peer;
+ };
+
+ struct sock *sk;
+ struct kref refcount;
+ struct work_struct work;
+ struct work_struct tcp_tx_work;
+};
+
+struct ovpn_socket *ovpn_socket_new(struct socket *sock,
+ struct ovpn_peer *peer);
+void ovpn_socket_release(struct ovpn_peer *peer);
+
+#endif /* _NET_OVPN_SOCK_H_ */
diff --git a/drivers/net/ovpn/stats.c b/drivers/net/ovpn/stats.c
new file mode 100644
index 000000000000..d637143473bb
--- /dev/null
+++ b/drivers/net/ovpn/stats.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/atomic.h>
+
+#include "stats.h"
+
+void ovpn_peer_stats_init(struct ovpn_peer_stats *ps)
+{
+ atomic64_set(&ps->rx.bytes, 0);
+ atomic64_set(&ps->rx.packets, 0);
+
+ atomic64_set(&ps->tx.bytes, 0);
+ atomic64_set(&ps->tx.packets, 0);
+}
diff --git a/drivers/net/ovpn/stats.h b/drivers/net/ovpn/stats.h
new file mode 100644
index 000000000000..53433d8b6c33
--- /dev/null
+++ b/drivers/net/ovpn/stats.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ * Lev Stipakov <lev@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNSTATS_H_
+#define _NET_OVPN_OVPNSTATS_H_
+
+/* one stat */
+struct ovpn_peer_stat {
+ atomic64_t bytes;
+ atomic64_t packets;
+};
+
+/* rx and tx stats combined */
+struct ovpn_peer_stats {
+ struct ovpn_peer_stat rx;
+ struct ovpn_peer_stat tx;
+};
+
+void ovpn_peer_stats_init(struct ovpn_peer_stats *ps);
+
+static inline void ovpn_peer_stats_increment(struct ovpn_peer_stat *stat,
+ const unsigned int n)
+{
+ atomic64_add(n, &stat->bytes);
+ atomic64_inc(&stat->packets);
+}
+
+static inline void ovpn_peer_stats_increment_rx(struct ovpn_peer_stats *stats,
+ const unsigned int n)
+{
+ ovpn_peer_stats_increment(&stats->rx, n);
+}
+
+static inline void ovpn_peer_stats_increment_tx(struct ovpn_peer_stats *stats,
+ const unsigned int n)
+{
+ ovpn_peer_stats_increment(&stats->tx, n);
+}
+
+#endif /* _NET_OVPN_OVPNSTATS_H_ */
diff --git a/drivers/net/ovpn/tcp.c b/drivers/net/ovpn/tcp.c
new file mode 100644
index 000000000000..0d7f30360d87
--- /dev/null
+++ b/drivers/net/ovpn/tcp.c
@@ -0,0 +1,619 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2019-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/skbuff.h>
+#include <net/hotdata.h>
+#include <net/inet_common.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <net/transp_v6.h>
+#include <net/route.h>
+#include <trace/events/sock.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "io.h"
+#include "peer.h"
+#include "proto.h"
+#include "skb.h"
+#include "tcp.h"
+
+#define OVPN_TCP_DEPTH_NESTING 2
+#if OVPN_TCP_DEPTH_NESTING == SINGLE_DEPTH_NESTING
+#error "OVPN TCP requires its own lockdep subclass"
+#endif
+
+static struct proto ovpn_tcp_prot __ro_after_init;
+static struct proto_ops ovpn_tcp_ops __ro_after_init;
+static struct proto ovpn_tcp6_prot __ro_after_init;
+static struct proto_ops ovpn_tcp6_ops __ro_after_init;
+
+static int ovpn_tcp_parse(struct strparser *strp, struct sk_buff *skb)
+{
+ struct strp_msg *rxm = strp_msg(skb);
+ __be16 blen;
+ u16 len;
+ int err;
+
+ /* when packets are written to the TCP stream, they are prepended with
+ * two bytes indicating the actual packet size.
+ * Parse accordingly and return the actual size (including the size
+ * header)
+ */
+
+ if (skb->len < rxm->offset + 2)
+ return 0;
+
+ err = skb_copy_bits(skb, rxm->offset, &blen, sizeof(blen));
+ if (err < 0)
+ return err;
+
+ len = be16_to_cpu(blen);
+ if (len < 2)
+ return -EINVAL;
+
+ return len + 2;
+}
+
+/* queue skb for sending to userspace via recvmsg on the socket */
+static void ovpn_tcp_to_userspace(struct ovpn_peer *peer, struct sock *sk,
+ struct sk_buff *skb)
+{
+ skb_set_owner_r(skb, sk);
+ memset(skb->cb, 0, sizeof(skb->cb));
+ skb_queue_tail(&peer->tcp.user_queue, skb);
+ peer->tcp.sk_cb.sk_data_ready(sk);
+}
+
+static void ovpn_tcp_rcv(struct strparser *strp, struct sk_buff *skb)
+{
+ struct ovpn_peer *peer = container_of(strp, struct ovpn_peer, tcp.strp);
+ struct strp_msg *msg = strp_msg(skb);
+ size_t pkt_len = msg->full_len - 2;
+ size_t off = msg->offset + 2;
+ u8 opcode;
+
+ /* ensure skb->data points to the beginning of the openvpn packet */
+ if (!pskb_pull(skb, off)) {
+ net_warn_ratelimited("%s: packet too small for peer %u\n",
+ netdev_name(peer->ovpn->dev), peer->id);
+ goto err;
+ }
+
+ /* strparser does not trim the skb for us, therefore we do it now */
+ if (pskb_trim(skb, pkt_len) != 0) {
+ net_warn_ratelimited("%s: trimming skb failed for peer %u\n",
+ netdev_name(peer->ovpn->dev), peer->id);
+ goto err;
+ }
+
+ /* we need the first 4 bytes of data to be accessible
+ * to extract the opcode and the key ID later on
+ */
+ if (!pskb_may_pull(skb, OVPN_OPCODE_SIZE)) {
+ net_warn_ratelimited("%s: packet too small to fetch opcode for peer %u\n",
+ netdev_name(peer->ovpn->dev), peer->id);
+ goto err;
+ }
+
+ /* DATA_V2 packets are handled in kernel, the rest goes to user space */
+ opcode = ovpn_opcode_from_skb(skb, 0);
+ if (unlikely(opcode != OVPN_DATA_V2)) {
+ if (opcode == OVPN_DATA_V1) {
+ net_warn_ratelimited("%s: DATA_V1 detected on the TCP stream\n",
+ netdev_name(peer->ovpn->dev));
+ goto err;
+ }
+
+ /* The packet size header must be there when sending the packet
+ * to userspace, therefore we put it back
+ */
+ skb_push(skb, 2);
+ ovpn_tcp_to_userspace(peer, strp->sk, skb);
+ return;
+ }
+
+ /* hold reference to peer as required by ovpn_recv().
+ *
+ * NOTE: in this context we should already be holding a reference to
+ * this peer, therefore ovpn_peer_hold() is not expected to fail
+ */
+ if (WARN_ON(!ovpn_peer_hold(peer)))
+ goto err_nopeer;
+
+ ovpn_recv(peer, skb);
+ return;
+err:
+ /* take reference for deferred peer deletion. should never fail */
+ if (WARN_ON(!ovpn_peer_hold(peer)))
+ goto err_nopeer;
+ schedule_work(&peer->tcp.defer_del_work);
+ dev_dstats_rx_dropped(peer->ovpn->dev);
+err_nopeer:
+ kfree_skb(skb);
+}
+
+static int ovpn_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int flags, int *addr_len)
+{
+ int err = 0, off, copied = 0, ret;
+ struct ovpn_socket *sock;
+ struct ovpn_peer *peer;
+ struct sk_buff *skb;
+
+ rcu_read_lock();
+ sock = rcu_dereference_sk_user_data(sk);
+ if (unlikely(!sock || !sock->peer || !ovpn_peer_hold(sock->peer))) {
+ rcu_read_unlock();
+ return -EBADF;
+ }
+ peer = sock->peer;
+ rcu_read_unlock();
+
+ skb = __skb_recv_datagram(sk, &peer->tcp.user_queue, flags, &off, &err);
+ if (!skb) {
+ if (err == -EAGAIN && sk->sk_shutdown & RCV_SHUTDOWN) {
+ ret = 0;
+ goto out;
+ }
+ ret = err;
+ goto out;
+ }
+
+ copied = len;
+ if (copied > skb->len)
+ copied = skb->len;
+ else if (copied < skb->len)
+ msg->msg_flags |= MSG_TRUNC;
+
+ err = skb_copy_datagram_msg(skb, 0, msg, copied);
+ if (unlikely(err)) {
+ kfree_skb(skb);
+ ret = err;
+ goto out;
+ }
+
+ if (flags & MSG_TRUNC)
+ copied = skb->len;
+ kfree_skb(skb);
+ ret = copied;
+out:
+ ovpn_peer_put(peer);
+ return ret;
+}
+
+void ovpn_tcp_socket_detach(struct ovpn_socket *ovpn_sock)
+{
+ struct ovpn_peer *peer = ovpn_sock->peer;
+ struct sock *sk = ovpn_sock->sk;
+
+ strp_stop(&peer->tcp.strp);
+ skb_queue_purge(&peer->tcp.user_queue);
+
+ /* restore CBs that were saved in ovpn_sock_set_tcp_cb() */
+ sk->sk_data_ready = peer->tcp.sk_cb.sk_data_ready;
+ sk->sk_write_space = peer->tcp.sk_cb.sk_write_space;
+ sk->sk_prot = peer->tcp.sk_cb.prot;
+ sk->sk_socket->ops = peer->tcp.sk_cb.ops;
+
+ rcu_assign_sk_user_data(sk, NULL);
+}
+
+void ovpn_tcp_socket_wait_finish(struct ovpn_socket *sock)
+{
+ struct ovpn_peer *peer = sock->peer;
+
+ /* NOTE: we don't wait for peer->tcp.defer_del_work to finish:
+ * either the worker is not running or this function
+ * was invoked by that worker.
+ */
+
+ cancel_work_sync(&sock->tcp_tx_work);
+ strp_done(&peer->tcp.strp);
+
+ skb_queue_purge(&peer->tcp.out_queue);
+ kfree_skb(peer->tcp.out_msg.skb);
+ peer->tcp.out_msg.skb = NULL;
+}
+
+static void ovpn_tcp_send_sock(struct ovpn_peer *peer, struct sock *sk)
+{
+ struct sk_buff *skb = peer->tcp.out_msg.skb;
+ int ret, flags;
+
+ if (!skb)
+ return;
+
+ if (peer->tcp.tx_in_progress)
+ return;
+
+ peer->tcp.tx_in_progress = true;
+
+ do {
+ flags = ovpn_skb_cb(skb)->nosignal ? MSG_NOSIGNAL : 0;
+ ret = skb_send_sock_locked_with_flags(sk, skb,
+ peer->tcp.out_msg.offset,
+ peer->tcp.out_msg.len,
+ flags);
+ if (unlikely(ret < 0)) {
+ if (ret == -EAGAIN)
+ goto out;
+
+ net_warn_ratelimited("%s: TCP error to peer %u: %d\n",
+ netdev_name(peer->ovpn->dev),
+ peer->id, ret);
+
+ /* in case of TCP error we can't recover the VPN
+ * stream therefore we abort the connection
+ */
+ ovpn_peer_hold(peer);
+ schedule_work(&peer->tcp.defer_del_work);
+
+ /* we bail out immediately and keep tx_in_progress set
+ * to true. This way we prevent more TX attempts
+ * which would lead to more invocations of
+ * schedule_work()
+ */
+ return;
+ }
+
+ peer->tcp.out_msg.len -= ret;
+ peer->tcp.out_msg.offset += ret;
+ } while (peer->tcp.out_msg.len > 0);
+
+ if (!peer->tcp.out_msg.len) {
+ preempt_disable();
+ dev_dstats_tx_add(peer->ovpn->dev, skb->len);
+ preempt_enable();
+ }
+
+ kfree_skb(peer->tcp.out_msg.skb);
+ peer->tcp.out_msg.skb = NULL;
+ peer->tcp.out_msg.len = 0;
+ peer->tcp.out_msg.offset = 0;
+
+out:
+ peer->tcp.tx_in_progress = false;
+}
+
+void ovpn_tcp_tx_work(struct work_struct *work)
+{
+ struct ovpn_socket *sock;
+
+ sock = container_of(work, struct ovpn_socket, tcp_tx_work);
+
+ lock_sock(sock->sk);
+ if (sock->peer)
+ ovpn_tcp_send_sock(sock->peer, sock->sk);
+ release_sock(sock->sk);
+}
+
+static void ovpn_tcp_send_sock_skb(struct ovpn_peer *peer, struct sock *sk,
+ struct sk_buff *skb)
+{
+ if (peer->tcp.out_msg.skb)
+ ovpn_tcp_send_sock(peer, sk);
+
+ if (peer->tcp.out_msg.skb) {
+ dev_dstats_tx_dropped(peer->ovpn->dev);
+ kfree_skb(skb);
+ return;
+ }
+
+ peer->tcp.out_msg.skb = skb;
+ peer->tcp.out_msg.len = skb->len;
+ peer->tcp.out_msg.offset = 0;
+ ovpn_tcp_send_sock(peer, sk);
+}
+
+void ovpn_tcp_send_skb(struct ovpn_peer *peer, struct sock *sk,
+ struct sk_buff *skb)
+{
+ u16 len = skb->len;
+
+ *(__be16 *)__skb_push(skb, sizeof(u16)) = htons(len);
+
+ spin_lock_nested(&sk->sk_lock.slock, OVPN_TCP_DEPTH_NESTING);
+ if (sock_owned_by_user(sk)) {
+ if (skb_queue_len(&peer->tcp.out_queue) >=
+ READ_ONCE(net_hotdata.max_backlog)) {
+ dev_dstats_tx_dropped(peer->ovpn->dev);
+ kfree_skb(skb);
+ goto unlock;
+ }
+ __skb_queue_tail(&peer->tcp.out_queue, skb);
+ } else {
+ ovpn_tcp_send_sock_skb(peer, sk, skb);
+ }
+unlock:
+ spin_unlock(&sk->sk_lock.slock);
+}
+
+static void ovpn_tcp_release(struct sock *sk)
+{
+ struct sk_buff_head queue;
+ struct ovpn_socket *sock;
+ struct ovpn_peer *peer;
+ struct sk_buff *skb;
+
+ rcu_read_lock();
+ sock = rcu_dereference_sk_user_data(sk);
+ if (!sock) {
+ rcu_read_unlock();
+ return;
+ }
+
+ peer = sock->peer;
+
+ /* during initialization this function is called before
+ * assigning sock->peer
+ */
+ if (unlikely(!peer || !ovpn_peer_hold(peer))) {
+ rcu_read_unlock();
+ return;
+ }
+ rcu_read_unlock();
+
+ __skb_queue_head_init(&queue);
+ skb_queue_splice_init(&peer->tcp.out_queue, &queue);
+
+ while ((skb = __skb_dequeue(&queue)))
+ ovpn_tcp_send_sock_skb(peer, sk, skb);
+
+ peer->tcp.sk_cb.prot->release_cb(sk);
+ ovpn_peer_put(peer);
+}
+
+static int ovpn_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
+{
+ struct ovpn_socket *sock;
+ int ret, linear = PAGE_SIZE;
+ struct ovpn_peer *peer;
+ struct sk_buff *skb;
+
+ lock_sock(sk);
+ rcu_read_lock();
+ sock = rcu_dereference_sk_user_data(sk);
+ if (unlikely(!sock || !sock->peer || !ovpn_peer_hold(sock->peer))) {
+ rcu_read_unlock();
+ release_sock(sk);
+ return -EIO;
+ }
+ rcu_read_unlock();
+ peer = sock->peer;
+
+ if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL)) {
+ ret = -EOPNOTSUPP;
+ goto peer_free;
+ }
+
+ if (peer->tcp.out_msg.skb) {
+ ret = -EAGAIN;
+ goto peer_free;
+ }
+
+ if (size < linear)
+ linear = size;
+
+ skb = sock_alloc_send_pskb(sk, linear, size - linear,
+ msg->msg_flags & MSG_DONTWAIT, &ret, 0);
+ if (!skb) {
+ net_err_ratelimited("%s: skb alloc failed: %d\n",
+ netdev_name(peer->ovpn->dev), ret);
+ goto peer_free;
+ }
+
+ skb_put(skb, linear);
+ skb->len = size;
+ skb->data_len = size - linear;
+
+ ret = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
+ if (ret) {
+ kfree_skb(skb);
+ net_err_ratelimited("%s: skb copy from iter failed: %d\n",
+ netdev_name(peer->ovpn->dev), ret);
+ goto peer_free;
+ }
+
+ ovpn_skb_cb(skb)->nosignal = msg->msg_flags & MSG_NOSIGNAL;
+ ovpn_tcp_send_sock_skb(peer, sk, skb);
+ ret = size;
+peer_free:
+ release_sock(sk);
+ ovpn_peer_put(peer);
+ return ret;
+}
+
+static int ovpn_tcp_disconnect(struct sock *sk, int flags)
+{
+ return -EBUSY;
+}
+
+static void ovpn_tcp_data_ready(struct sock *sk)
+{
+ struct ovpn_socket *sock;
+
+ trace_sk_data_ready(sk);
+
+ rcu_read_lock();
+ sock = rcu_dereference_sk_user_data(sk);
+ if (likely(sock && sock->peer))
+ strp_data_ready(&sock->peer->tcp.strp);
+ rcu_read_unlock();
+}
+
+static void ovpn_tcp_write_space(struct sock *sk)
+{
+ struct ovpn_socket *sock;
+
+ rcu_read_lock();
+ sock = rcu_dereference_sk_user_data(sk);
+ if (likely(sock && sock->peer)) {
+ schedule_work(&sock->tcp_tx_work);
+ sock->peer->tcp.sk_cb.sk_write_space(sk);
+ }
+ rcu_read_unlock();
+}
+
+static void ovpn_tcp_build_protos(struct proto *new_prot,
+ struct proto_ops *new_ops,
+ const struct proto *orig_prot,
+ const struct proto_ops *orig_ops);
+
+static void ovpn_tcp_peer_del_work(struct work_struct *work)
+{
+ struct ovpn_peer *peer = container_of(work, struct ovpn_peer,
+ tcp.defer_del_work);
+
+ ovpn_peer_del(peer, OVPN_DEL_PEER_REASON_TRANSPORT_ERROR);
+ ovpn_peer_put(peer);
+}
+
+/* Set TCP encapsulation callbacks */
+int ovpn_tcp_socket_attach(struct ovpn_socket *ovpn_sock,
+ struct ovpn_peer *peer)
+{
+ struct strp_callbacks cb = {
+ .rcv_msg = ovpn_tcp_rcv,
+ .parse_msg = ovpn_tcp_parse,
+ };
+ int ret;
+
+ /* make sure no pre-existing encapsulation handler exists */
+ if (ovpn_sock->sk->sk_user_data)
+ return -EBUSY;
+
+ /* only a fully connected socket is expected. Connection should be
+ * handled in userspace
+ */
+ if (ovpn_sock->sk->sk_state != TCP_ESTABLISHED) {
+ net_err_ratelimited("%s: provided TCP socket is not in ESTABLISHED state: %d\n",
+ netdev_name(peer->ovpn->dev),
+ ovpn_sock->sk->sk_state);
+ return -EINVAL;
+ }
+
+ ret = strp_init(&peer->tcp.strp, ovpn_sock->sk, &cb);
+ if (ret < 0) {
+ DEBUG_NET_WARN_ON_ONCE(1);
+ return ret;
+ }
+
+ INIT_WORK(&peer->tcp.defer_del_work, ovpn_tcp_peer_del_work);
+
+ __sk_dst_reset(ovpn_sock->sk);
+ skb_queue_head_init(&peer->tcp.user_queue);
+ skb_queue_head_init(&peer->tcp.out_queue);
+
+ /* save current CBs so that they can be restored upon socket release */
+ peer->tcp.sk_cb.sk_data_ready = ovpn_sock->sk->sk_data_ready;
+ peer->tcp.sk_cb.sk_write_space = ovpn_sock->sk->sk_write_space;
+ peer->tcp.sk_cb.prot = ovpn_sock->sk->sk_prot;
+ peer->tcp.sk_cb.ops = ovpn_sock->sk->sk_socket->ops;
+
+ /* assign our static CBs and prot/ops */
+ ovpn_sock->sk->sk_data_ready = ovpn_tcp_data_ready;
+ ovpn_sock->sk->sk_write_space = ovpn_tcp_write_space;
+
+ if (ovpn_sock->sk->sk_family == AF_INET) {
+ ovpn_sock->sk->sk_prot = &ovpn_tcp_prot;
+ ovpn_sock->sk->sk_socket->ops = &ovpn_tcp_ops;
+ } else {
+ ovpn_sock->sk->sk_prot = &ovpn_tcp6_prot;
+ ovpn_sock->sk->sk_socket->ops = &ovpn_tcp6_ops;
+ }
+
+ /* avoid using task_frag */
+ ovpn_sock->sk->sk_allocation = GFP_ATOMIC;
+ ovpn_sock->sk->sk_use_task_frag = false;
+
+ /* enqueue the RX worker */
+ strp_check_rcv(&peer->tcp.strp);
+
+ return 0;
+}
+
+static void ovpn_tcp_close(struct sock *sk, long timeout)
+{
+ struct ovpn_socket *sock;
+ struct ovpn_peer *peer;
+
+ rcu_read_lock();
+ sock = rcu_dereference_sk_user_data(sk);
+ if (!sock || !sock->peer || !ovpn_peer_hold(sock->peer)) {
+ rcu_read_unlock();
+ return;
+ }
+ peer = sock->peer;
+ rcu_read_unlock();
+
+ ovpn_peer_del(sock->peer, OVPN_DEL_PEER_REASON_TRANSPORT_DISCONNECT);
+ peer->tcp.sk_cb.prot->close(sk, timeout);
+ ovpn_peer_put(peer);
+}
+
+static __poll_t ovpn_tcp_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
+{
+ struct sk_buff_head *queue = &sock->sk->sk_receive_queue;
+ struct ovpn_socket *ovpn_sock;
+ struct ovpn_peer *peer = NULL;
+ __poll_t mask;
+
+ rcu_read_lock();
+ ovpn_sock = rcu_dereference_sk_user_data(sock->sk);
+ /* if we landed in this callback, we expect to have a
+ * meaningful state. The ovpn_socket lifecycle would
+ * prevent it otherwise.
+ */
+ if (WARN(!ovpn_sock || !ovpn_sock->peer,
+ "ovpn: null state in ovpn_tcp_poll!")) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ if (ovpn_peer_hold(ovpn_sock->peer)) {
+ peer = ovpn_sock->peer;
+ queue = &peer->tcp.user_queue;
+ }
+ rcu_read_unlock();
+
+ mask = datagram_poll_queue(file, sock, wait, queue);
+
+ if (peer)
+ ovpn_peer_put(peer);
+
+ return mask;
+}
+
+static void ovpn_tcp_build_protos(struct proto *new_prot,
+ struct proto_ops *new_ops,
+ const struct proto *orig_prot,
+ const struct proto_ops *orig_ops)
+{
+ memcpy(new_prot, orig_prot, sizeof(*new_prot));
+ memcpy(new_ops, orig_ops, sizeof(*new_ops));
+ new_prot->recvmsg = ovpn_tcp_recvmsg;
+ new_prot->sendmsg = ovpn_tcp_sendmsg;
+ new_prot->disconnect = ovpn_tcp_disconnect;
+ new_prot->close = ovpn_tcp_close;
+ new_prot->release_cb = ovpn_tcp_release;
+ new_ops->poll = ovpn_tcp_poll;
+}
+
+/* Initialize TCP static objects */
+void __init ovpn_tcp_init(void)
+{
+ ovpn_tcp_build_protos(&ovpn_tcp_prot, &ovpn_tcp_ops, &tcp_prot,
+ &inet_stream_ops);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ ovpn_tcp_build_protos(&ovpn_tcp6_prot, &ovpn_tcp6_ops, &tcpv6_prot,
+ &inet6_stream_ops);
+#endif
+}
diff --git a/drivers/net/ovpn/tcp.h b/drivers/net/ovpn/tcp.h
new file mode 100644
index 000000000000..a3aa3570ae5e
--- /dev/null
+++ b/drivers/net/ovpn/tcp.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2019-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_TCP_H_
+#define _NET_OVPN_TCP_H_
+
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+#include "peer.h"
+#include "skb.h"
+#include "socket.h"
+
+void __init ovpn_tcp_init(void);
+
+int ovpn_tcp_socket_attach(struct ovpn_socket *ovpn_sock,
+ struct ovpn_peer *peer);
+void ovpn_tcp_socket_detach(struct ovpn_socket *ovpn_sock);
+void ovpn_tcp_socket_wait_finish(struct ovpn_socket *sock);
+
+/* Prepare skb and enqueue it for sending to peer.
+ *
+ * Preparation consist in prepending the skb payload with its size.
+ * Required by the OpenVPN protocol in order to extract packets from
+ * the TCP stream on the receiver side.
+ */
+void ovpn_tcp_send_skb(struct ovpn_peer *peer, struct sock *sk,
+ struct sk_buff *skb);
+void ovpn_tcp_tx_work(struct work_struct *work);
+
+#endif /* _NET_OVPN_TCP_H_ */
diff --git a/drivers/net/ovpn/udp.c b/drivers/net/ovpn/udp.c
new file mode 100644
index 000000000000..d6a0f7a0b75d
--- /dev/null
+++ b/drivers/net/ovpn/udp.c
@@ -0,0 +1,448 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2019-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/udp.h>
+#include <net/addrconf.h>
+#include <net/dst_cache.h>
+#include <net/route.h>
+#include <net/ipv6_stubs.h>
+#include <net/transp_v6.h>
+#include <net/udp.h>
+#include <net/udp_tunnel.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "bind.h"
+#include "io.h"
+#include "peer.h"
+#include "proto.h"
+#include "socket.h"
+#include "udp.h"
+
+/* Retrieve the corresponding ovpn object from a UDP socket
+ * rcu_read_lock must be held on entry
+ */
+static struct ovpn_socket *ovpn_socket_from_udp_sock(struct sock *sk)
+{
+ struct ovpn_socket *ovpn_sock;
+
+ if (unlikely(READ_ONCE(udp_sk(sk)->encap_type) != UDP_ENCAP_OVPNINUDP))
+ return NULL;
+
+ ovpn_sock = rcu_dereference_sk_user_data(sk);
+ if (unlikely(!ovpn_sock))
+ return NULL;
+
+ /* make sure that sk matches our stored transport socket */
+ if (unlikely(!ovpn_sock->sk || sk != ovpn_sock->sk))
+ return NULL;
+
+ return ovpn_sock;
+}
+
+/**
+ * ovpn_udp_encap_recv - Start processing a received UDP packet.
+ * @sk: socket over which the packet was received
+ * @skb: the received packet
+ *
+ * If the first byte of the payload is:
+ * - DATA_V2 the packet is accepted for further processing,
+ * - DATA_V1 the packet is dropped as not supported,
+ * - anything else the packet is forwarded to the UDP stack for
+ * delivery to user space.
+ *
+ * Return:
+ * 0 if skb was consumed or dropped
+ * >0 if skb should be passed up to userspace as UDP (packet not consumed)
+ * <0 if skb should be resubmitted as proto -N (packet not consumed)
+ */
+static int ovpn_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+ struct ovpn_socket *ovpn_sock;
+ struct ovpn_priv *ovpn;
+ struct ovpn_peer *peer;
+ u32 peer_id;
+ u8 opcode;
+
+ ovpn_sock = ovpn_socket_from_udp_sock(sk);
+ if (unlikely(!ovpn_sock)) {
+ net_err_ratelimited("ovpn: %s invoked on non ovpn socket\n",
+ __func__);
+ goto drop_noovpn;
+ }
+
+ ovpn = ovpn_sock->ovpn;
+ if (unlikely(!ovpn)) {
+ net_err_ratelimited("ovpn: cannot obtain ovpn object from UDP socket\n");
+ goto drop_noovpn;
+ }
+
+ /* Make sure the first 4 bytes of the skb data buffer after the UDP
+ * header are accessible.
+ * They are required to fetch the OP code, the key ID and the peer ID.
+ */
+ if (unlikely(!pskb_may_pull(skb, sizeof(struct udphdr) +
+ OVPN_OPCODE_SIZE))) {
+ net_dbg_ratelimited("%s: packet too small from UDP socket\n",
+ netdev_name(ovpn->dev));
+ goto drop;
+ }
+
+ opcode = ovpn_opcode_from_skb(skb, sizeof(struct udphdr));
+ if (unlikely(opcode != OVPN_DATA_V2)) {
+ /* DATA_V1 is not supported */
+ if (opcode == OVPN_DATA_V1)
+ goto drop;
+
+ /* unknown or control packet: let it bubble up to userspace */
+ return 1;
+ }
+
+ peer_id = ovpn_peer_id_from_skb(skb, sizeof(struct udphdr));
+ /* some OpenVPN server implementations send data packets with the
+ * peer-id set to UNDEF. In this case we skip the peer lookup by peer-id
+ * and we try with the transport address
+ */
+ if (peer_id == OVPN_PEER_ID_UNDEF)
+ peer = ovpn_peer_get_by_transp_addr(ovpn, skb);
+ else
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+
+ if (unlikely(!peer))
+ goto drop;
+
+ /* pop off outer UDP header */
+ __skb_pull(skb, sizeof(struct udphdr));
+ ovpn_recv(peer, skb);
+ return 0;
+
+drop:
+ dev_dstats_rx_dropped(ovpn->dev);
+drop_noovpn:
+ kfree_skb(skb);
+ return 0;
+}
+
+/**
+ * ovpn_udp4_output - send IPv4 packet over udp socket
+ * @peer: the destination peer
+ * @bind: the binding related to the destination peer
+ * @cache: dst cache
+ * @sk: the socket to send the packet over
+ * @skb: the packet to send
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_udp4_output(struct ovpn_peer *peer, struct ovpn_bind *bind,
+ struct dst_cache *cache, struct sock *sk,
+ struct sk_buff *skb)
+{
+ struct rtable *rt;
+ struct flowi4 fl = {
+ .saddr = bind->local.ipv4.s_addr,
+ .daddr = bind->remote.in4.sin_addr.s_addr,
+ .fl4_sport = inet_sk(sk)->inet_sport,
+ .fl4_dport = bind->remote.in4.sin_port,
+ .flowi4_proto = sk->sk_protocol,
+ .flowi4_mark = sk->sk_mark,
+ };
+ int ret;
+
+ local_bh_disable();
+ rt = dst_cache_get_ip4(cache, &fl.saddr);
+ if (rt)
+ goto transmit;
+
+ if (unlikely(!inet_confirm_addr(sock_net(sk), NULL, 0, fl.saddr,
+ RT_SCOPE_HOST))) {
+ /* we may end up here when the cached address is not usable
+ * anymore. In this case we reset address/cache and perform a
+ * new look up
+ */
+ fl.saddr = 0;
+ spin_lock_bh(&peer->lock);
+ bind->local.ipv4.s_addr = 0;
+ spin_unlock_bh(&peer->lock);
+ dst_cache_reset(cache);
+ }
+
+ rt = ip_route_output_flow(sock_net(sk), &fl, sk);
+ if (IS_ERR(rt) && PTR_ERR(rt) == -EINVAL) {
+ fl.saddr = 0;
+ spin_lock_bh(&peer->lock);
+ bind->local.ipv4.s_addr = 0;
+ spin_unlock_bh(&peer->lock);
+ dst_cache_reset(cache);
+
+ rt = ip_route_output_flow(sock_net(sk), &fl, sk);
+ }
+
+ if (IS_ERR(rt)) {
+ ret = PTR_ERR(rt);
+ net_dbg_ratelimited("%s: no route to host %pISpc: %d\n",
+ netdev_name(peer->ovpn->dev),
+ &bind->remote.in4,
+ ret);
+ goto err;
+ }
+ dst_cache_set_ip4(cache, &rt->dst, fl.saddr);
+
+transmit:
+ udp_tunnel_xmit_skb(rt, sk, skb, fl.saddr, fl.daddr, 0,
+ ip4_dst_hoplimit(&rt->dst), 0, fl.fl4_sport,
+ fl.fl4_dport, false, sk->sk_no_check_tx, 0);
+ ret = 0;
+err:
+ local_bh_enable();
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+/**
+ * ovpn_udp6_output - send IPv6 packet over udp socket
+ * @peer: the destination peer
+ * @bind: the binding related to the destination peer
+ * @cache: dst cache
+ * @sk: the socket to send the packet over
+ * @skb: the packet to send
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_udp6_output(struct ovpn_peer *peer, struct ovpn_bind *bind,
+ struct dst_cache *cache, struct sock *sk,
+ struct sk_buff *skb)
+{
+ struct dst_entry *dst;
+ int ret;
+
+ struct flowi6 fl = {
+ .saddr = bind->local.ipv6,
+ .daddr = bind->remote.in6.sin6_addr,
+ .fl6_sport = inet_sk(sk)->inet_sport,
+ .fl6_dport = bind->remote.in6.sin6_port,
+ .flowi6_proto = sk->sk_protocol,
+ .flowi6_mark = sk->sk_mark,
+ .flowi6_oif = bind->remote.in6.sin6_scope_id,
+ };
+
+ local_bh_disable();
+ dst = dst_cache_get_ip6(cache, &fl.saddr);
+ if (dst)
+ goto transmit;
+
+ if (unlikely(!ipv6_chk_addr(sock_net(sk), &fl.saddr, NULL, 0))) {
+ /* we may end up here when the cached address is not usable
+ * anymore. In this case we reset address/cache and perform a
+ * new look up
+ */
+ fl.saddr = in6addr_any;
+ spin_lock_bh(&peer->lock);
+ bind->local.ipv6 = in6addr_any;
+ spin_unlock_bh(&peer->lock);
+ dst_cache_reset(cache);
+ }
+
+ dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sk), sk, &fl, NULL);
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ net_dbg_ratelimited("%s: no route to host %pISpc: %d\n",
+ netdev_name(peer->ovpn->dev),
+ &bind->remote.in6, ret);
+ goto err;
+ }
+ dst_cache_set_ip6(cache, dst, &fl.saddr);
+
+transmit:
+ /* user IPv6 packets may be larger than the transport interface
+ * MTU (after encapsulation), however, since they are locally
+ * generated we should ensure they get fragmented.
+ * Setting the ignore_df flag to 1 will instruct ip6_fragment() to
+ * fragment packets if needed.
+ *
+ * NOTE: this is not needed for IPv4 because we pass df=0 to
+ * udp_tunnel_xmit_skb()
+ */
+ skb->ignore_df = 1;
+ udp_tunnel6_xmit_skb(dst, sk, skb, skb->dev, &fl.saddr, &fl.daddr, 0,
+ ip6_dst_hoplimit(dst), 0, fl.fl6_sport,
+ fl.fl6_dport, udp_get_no_check6_tx(sk), 0);
+ ret = 0;
+err:
+ local_bh_enable();
+ return ret;
+}
+#endif
+
+/**
+ * ovpn_udp_output - transmit skb using udp-tunnel
+ * @peer: the destination peer
+ * @cache: dst cache
+ * @sk: the socket to send the packet over
+ * @skb: the packet to send
+ *
+ * rcu_read_lock should be held on entry.
+ * On return, the skb is consumed.
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_udp_output(struct ovpn_peer *peer, struct dst_cache *cache,
+ struct sock *sk, struct sk_buff *skb)
+{
+ struct ovpn_bind *bind;
+ int ret;
+
+ /* set sk to null if skb is already orphaned */
+ if (!skb->destructor)
+ skb->sk = NULL;
+
+ rcu_read_lock();
+ bind = rcu_dereference(peer->bind);
+ if (unlikely(!bind)) {
+ net_warn_ratelimited("%s: no bind for remote peer %u\n",
+ netdev_name(peer->ovpn->dev), peer->id);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ switch (bind->remote.in4.sin_family) {
+ case AF_INET:
+ ret = ovpn_udp4_output(peer, bind, cache, sk, skb);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ ret = ovpn_udp6_output(peer, bind, cache, sk, skb);
+ break;
+#endif
+ default:
+ ret = -EAFNOSUPPORT;
+ break;
+ }
+
+out:
+ rcu_read_unlock();
+ return ret;
+}
+
+/**
+ * ovpn_udp_send_skb - prepare skb and send it over via UDP
+ * @peer: the destination peer
+ * @sk: peer socket
+ * @skb: the packet to send
+ */
+void ovpn_udp_send_skb(struct ovpn_peer *peer, struct sock *sk,
+ struct sk_buff *skb)
+{
+ int ret;
+
+ skb->dev = peer->ovpn->dev;
+ skb->mark = READ_ONCE(sk->sk_mark);
+ /* no checksum performed at this layer */
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* crypto layer -> transport (UDP) */
+ ret = ovpn_udp_output(peer, &peer->dst_cache, sk, skb);
+ if (unlikely(ret < 0))
+ kfree_skb(skb);
+}
+
+static void ovpn_udp_encap_destroy(struct sock *sk)
+{
+ struct ovpn_socket *sock;
+ struct ovpn_priv *ovpn;
+
+ rcu_read_lock();
+ sock = rcu_dereference_sk_user_data(sk);
+ if (!sock || !sock->ovpn) {
+ rcu_read_unlock();
+ return;
+ }
+ ovpn = sock->ovpn;
+ rcu_read_unlock();
+
+ ovpn_peers_free(ovpn, sk, OVPN_DEL_PEER_REASON_TRANSPORT_DISCONNECT);
+}
+
+/**
+ * ovpn_udp_socket_attach - set udp-tunnel CBs on socket and link it to ovpn
+ * @ovpn_sock: socket to configure
+ * @sock: the socket container to be passed to setup_udp_tunnel_sock()
+ * @ovpn: the openvp instance to link
+ *
+ * After invoking this function, the sock will be controlled by ovpn so that
+ * any incoming packet may be processed by ovpn first.
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock, struct socket *sock,
+ struct ovpn_priv *ovpn)
+{
+ struct udp_tunnel_sock_cfg cfg = {
+ .encap_type = UDP_ENCAP_OVPNINUDP,
+ .encap_rcv = ovpn_udp_encap_recv,
+ .encap_destroy = ovpn_udp_encap_destroy,
+ };
+ struct ovpn_socket *old_data;
+ int ret;
+
+ /* make sure no pre-existing encapsulation handler exists */
+ rcu_read_lock();
+ old_data = rcu_dereference_sk_user_data(ovpn_sock->sk);
+ if (!old_data) {
+ /* socket is currently unused - we can take it */
+ rcu_read_unlock();
+ setup_udp_tunnel_sock(sock_net(ovpn_sock->sk), sock, &cfg);
+ return 0;
+ }
+
+ /* socket is in use. We need to understand if it's owned by this ovpn
+ * instance or by something else.
+ * In the former case, we can increase the refcounter and happily
+ * use it, because the same UDP socket is expected to be shared among
+ * different peers.
+ *
+ * Unlikely TCP, a single UDP socket can be used to talk to many remote
+ * hosts and therefore openvpn instantiates one only for all its peers
+ */
+ if ((READ_ONCE(udp_sk(ovpn_sock->sk)->encap_type) == UDP_ENCAP_OVPNINUDP) &&
+ old_data->ovpn == ovpn) {
+ netdev_dbg(ovpn->dev,
+ "provided socket already owned by this interface\n");
+ ret = -EALREADY;
+ } else {
+ netdev_dbg(ovpn->dev,
+ "provided socket already taken by other user\n");
+ ret = -EBUSY;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+/**
+ * ovpn_udp_socket_detach - clean udp-tunnel status for this socket
+ * @ovpn_sock: the socket to clean
+ */
+void ovpn_udp_socket_detach(struct ovpn_socket *ovpn_sock)
+{
+ struct sock *sk = ovpn_sock->sk;
+
+ /* Re-enable multicast loopback */
+ inet_set_bit(MC_LOOP, sk);
+ /* Disable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */
+ inet_dec_convert_csum(sk);
+
+ WRITE_ONCE(udp_sk(sk)->encap_type, 0);
+ WRITE_ONCE(udp_sk(sk)->encap_rcv, NULL);
+ WRITE_ONCE(udp_sk(sk)->encap_destroy, NULL);
+
+ rcu_assign_sk_user_data(sk, NULL);
+}
diff --git a/drivers/net/ovpn/udp.h b/drivers/net/ovpn/udp.h
new file mode 100644
index 000000000000..fe26fbe25c5a
--- /dev/null
+++ b/drivers/net/ovpn/udp.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2019-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_UDP_H_
+#define _NET_OVPN_UDP_H_
+
+#include <net/sock.h>
+
+struct ovpn_peer;
+struct ovpn_priv;
+struct socket;
+
+int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock, struct socket *sock,
+ struct ovpn_priv *ovpn);
+void ovpn_udp_socket_detach(struct ovpn_socket *ovpn_sock);
+
+void ovpn_udp_send_skb(struct ovpn_peer *peer, struct sock *sk,
+ struct sk_buff *skb);
+
+#endif /* _NET_OVPN_UDP_H_ */
diff --git a/drivers/net/pcs/Kconfig b/drivers/net/pcs/Kconfig
index f6aa437473de..ecbc3530e780 100644
--- a/drivers/net/pcs/Kconfig
+++ b/drivers/net/pcs/Kconfig
@@ -26,11 +26,12 @@ config PCS_MTK_LYNXI
which is part of MediaTek's SoC and Ethernet switch ICs.
config PCS_RZN1_MIIC
- tristate "Renesas RZ/N1 MII converter"
- depends on OF && (ARCH_RZN1 || COMPILE_TEST)
+ tristate "Renesas RZ/N1, RZ/N2H, RZ/T2H MII converter"
+ depends on OF
+ depends on ARCH_RENESAS || COMPILE_TEST
help
- This module provides a driver for the MII converter that is available
- on RZ/N1 SoCs. This PCS converts MII to RMII/RGMII or can be set in
- pass-through mode for MII.
+ This module provides a driver for the MII converter available on
+ Renesas RZ/N1, RZ/N2H, and RZ/T2H SoCs. This PCS converts MII to
+ RMII/RGMII, or can be set in pass-through mode for MII.
endmenu
diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c
index b79aedad855b..73e1364ad1ed 100644
--- a/drivers/net/pcs/pcs-lynx.c
+++ b/drivers/net/pcs/pcs-lynx.c
@@ -35,6 +35,28 @@ enum sgmii_speed {
#define phylink_pcs_to_lynx(pl_pcs) container_of((pl_pcs), struct lynx_pcs, pcs)
#define lynx_to_phylink_pcs(lynx) (&(lynx)->pcs)
+static unsigned int lynx_pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+
+ case PHY_INTERFACE_MODE_10GBASER:
+ return LINK_INBAND_DISABLE;
+
+ case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_10G_QXGMII:
+ return LINK_INBAND_ENABLE;
+
+ default:
+ return 0;
+ }
+}
+
static void lynx_pcs_get_state_usxgmii(struct mdio_device *pcs,
struct phylink_link_state *state)
{
@@ -58,42 +80,20 @@ static void lynx_pcs_get_state_usxgmii(struct mdio_device *pcs,
phylink_decode_usxgmii_word(state, lpa);
}
-static void lynx_pcs_get_state_2500basex(struct mdio_device *pcs,
- struct phylink_link_state *state)
-{
- int bmsr;
-
- bmsr = mdiodev_read(pcs, MII_BMSR);
- if (bmsr < 0) {
- state->link = false;
- return;
- }
-
- state->link = !!(bmsr & BMSR_LSTATUS);
- state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
- if (!state->link)
- return;
-
- state->speed = SPEED_2500;
- state->pause |= MLO_PAUSE_TX | MLO_PAUSE_RX;
- state->duplex = DUPLEX_FULL;
-}
-
-static void lynx_pcs_get_state(struct phylink_pcs *pcs,
+static void lynx_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct lynx_pcs *lynx = phylink_pcs_to_lynx(pcs);
switch (state->interface) {
case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_QSGMII:
- phylink_mii_c22_pcs_get_state(lynx->mdio, state);
- break;
- case PHY_INTERFACE_MODE_2500BASEX:
- lynx_pcs_get_state_2500basex(lynx->mdio, state);
+ phylink_mii_c22_pcs_get_state(lynx->mdio, neg_mode, state);
break;
case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_10G_QXGMII:
lynx_pcs_get_state_usxgmii(lynx->mdio, state);
break;
case PHY_INTERFACE_MODE_10GBASER:
@@ -129,7 +129,8 @@ static int lynx_pcs_config_giga(struct mdio_device *pcs,
mdiodev_write(pcs, LINK_TIMER_HI, link_timer >> 16);
}
- if (interface == PHY_INTERFACE_MODE_1000BASEX) {
+ if (interface == PHY_INTERFACE_MODE_1000BASEX ||
+ interface == PHY_INTERFACE_MODE_2500BASEX) {
if_mode = 0;
} else {
/* SGMII and QSGMII */
@@ -149,6 +150,7 @@ static int lynx_pcs_config_giga(struct mdio_device *pcs,
}
static int lynx_pcs_config_usxgmii(struct mdio_device *pcs,
+ phy_interface_t interface,
const unsigned long *advertising,
unsigned int neg_mode)
{
@@ -156,7 +158,8 @@ static int lynx_pcs_config_usxgmii(struct mdio_device *pcs,
int addr = pcs->addr;
if (neg_mode != PHYLINK_PCS_NEG_INBAND_ENABLED) {
- dev_err(&pcs->dev, "USXGMII only supports in-band AN for now\n");
+ dev_err(&pcs->dev, "%s only supports in-band AN for now\n",
+ phy_modes(interface));
return -EOPNOTSUPP;
}
@@ -177,17 +180,12 @@ static int lynx_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_2500BASEX:
return lynx_pcs_config_giga(lynx->mdio, ifmode, advertising,
neg_mode);
- case PHY_INTERFACE_MODE_2500BASEX:
- if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) {
- dev_err(&lynx->mdio->dev,
- "AN not supported on 3.125GHz SerDes lane\n");
- return -EOPNOTSUPP;
- }
- break;
case PHY_INTERFACE_MODE_USXGMII:
- return lynx_pcs_config_usxgmii(lynx->mdio, advertising,
+ case PHY_INTERFACE_MODE_10G_QXGMII:
+ return lynx_pcs_config_usxgmii(lynx->mdio, ifmode, advertising,
neg_mode);
case PHY_INTERFACE_MODE_10GBASER:
/* Nothing to do here for 10GBASER */
@@ -245,42 +243,6 @@ static void lynx_pcs_link_up_sgmii(struct mdio_device *pcs,
if_mode);
}
-/* 2500Base-X is SerDes protocol 7 on Felix and 6 on ENETC. It is a SerDes lane
- * clocked at 3.125 GHz which encodes symbols with 8b/10b and does not have
- * auto-negotiation of any link parameters. Electrically it is compatible with
- * a single lane of XAUI.
- * The hardware reference manual wants to call this mode SGMII, but it isn't
- * really, since the fundamental features of SGMII:
- * - Downgrading the link speed by duplicating symbols
- * - Auto-negotiation
- * are not there.
- * The speed is configured at 1000 in the IF_MODE because the clock frequency
- * is actually given by a PLL configured in the Reset Configuration Word (RCW).
- * Since there is no difference between fixed speed SGMII w/o AN and 802.3z w/o
- * AN, we call this PHY interface type 2500Base-X. In case a PHY negotiates a
- * lower link speed on line side, the system-side interface remains fixed at
- * 2500 Mbps and we do rate adaptation through pause frames.
- */
-static void lynx_pcs_link_up_2500basex(struct mdio_device *pcs,
- unsigned int neg_mode,
- int speed, int duplex)
-{
- u16 if_mode = 0;
-
- if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) {
- dev_err(&pcs->dev, "AN not supported for 2500BaseX\n");
- return;
- }
-
- if (duplex == DUPLEX_HALF)
- if_mode |= IF_MODE_HALF_DUPLEX;
- if_mode |= IF_MODE_SPEED(SGMII_SPEED_2500);
-
- mdiodev_modify(pcs, IF_MODE,
- IF_MODE_HALF_DUPLEX | IF_MODE_SPEED_MSK,
- if_mode);
-}
-
static void lynx_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface,
int speed, int duplex)
@@ -292,10 +254,8 @@ static void lynx_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
case PHY_INTERFACE_MODE_QSGMII:
lynx_pcs_link_up_sgmii(lynx->mdio, neg_mode, speed, duplex);
break;
- case PHY_INTERFACE_MODE_2500BASEX:
- lynx_pcs_link_up_2500basex(lynx->mdio, neg_mode, speed, duplex);
- break;
case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_10G_QXGMII:
/* At the moment, only in-band AN is supported for USXGMII
* so nothing to do in link_up
*/
@@ -306,15 +266,27 @@ static void lynx_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
}
static const struct phylink_pcs_ops lynx_pcs_phylink_ops = {
+ .pcs_inband_caps = lynx_pcs_inband_caps,
.pcs_get_state = lynx_pcs_get_state,
.pcs_config = lynx_pcs_config,
.pcs_an_restart = lynx_pcs_an_restart,
.pcs_link_up = lynx_pcs_link_up,
};
+static const phy_interface_t lynx_interfaces[] = {
+ PHY_INTERFACE_MODE_SGMII,
+ PHY_INTERFACE_MODE_QSGMII,
+ PHY_INTERFACE_MODE_1000BASEX,
+ PHY_INTERFACE_MODE_2500BASEX,
+ PHY_INTERFACE_MODE_10GBASER,
+ PHY_INTERFACE_MODE_USXGMII,
+ PHY_INTERFACE_MODE_10G_QXGMII,
+};
+
static struct phylink_pcs *lynx_pcs_create(struct mdio_device *mdio)
{
struct lynx_pcs *lynx;
+ int i;
lynx = kzalloc(sizeof(*lynx), GFP_KERNEL);
if (!lynx)
@@ -323,9 +295,11 @@ static struct phylink_pcs *lynx_pcs_create(struct mdio_device *mdio)
mdio_device_get(mdio);
lynx->mdio = mdio;
lynx->pcs.ops = &lynx_pcs_phylink_ops;
- lynx->pcs.neg_mode = true;
lynx->pcs.poll = true;
+ for (i = 0; i < ARRAY_SIZE(lynx_interfaces); i++)
+ __set_bit(lynx_interfaces[i], lynx->pcs.supported_interfaces);
+
return lynx_to_phylink_pcs(lynx);
}
diff --git a/drivers/net/pcs/pcs-mtk-lynxi.c b/drivers/net/pcs/pcs-mtk-lynxi.c
index 4f63abe638c4..149ddf51d785 100644
--- a/drivers/net/pcs/pcs-mtk-lynxi.c
+++ b/drivers/net/pcs/pcs-mtk-lynxi.c
@@ -88,7 +88,24 @@ static struct mtk_pcs_lynxi *pcs_to_mtk_pcs_lynxi(struct phylink_pcs *pcs)
return container_of(pcs, struct mtk_pcs_lynxi, pcs);
}
+static unsigned int mtk_pcs_lynxi_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return LINK_INBAND_DISABLE;
+
+ default:
+ return 0;
+ }
+}
+
static void mtk_pcs_lynxi_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mtk_pcs_lynxi *mpcs = pcs_to_mtk_pcs_lynxi(pcs);
@@ -98,7 +115,8 @@ static void mtk_pcs_lynxi_get_state(struct phylink_pcs *pcs,
regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &bm);
regmap_read(mpcs->regmap, SGMSYS_PCS_ADVERTISE, &adv);
- phylink_mii_c22_pcs_decode_state(state, FIELD_GET(SGMII_BMSR, bm),
+ phylink_mii_c22_pcs_decode_state(state, neg_mode,
+ FIELD_GET(SGMII_BMSR, bm),
FIELD_GET(SGMII_LPA, adv));
}
@@ -241,6 +259,7 @@ static void mtk_pcs_lynxi_disable(struct phylink_pcs *pcs)
}
static const struct phylink_pcs_ops mtk_pcs_lynxi_ops = {
+ .pcs_inband_caps = mtk_pcs_lynxi_inband_caps,
.pcs_get_state = mtk_pcs_lynxi_get_state,
.pcs_config = mtk_pcs_lynxi_config,
.pcs_an_restart = mtk_pcs_lynxi_restart_an,
@@ -286,10 +305,13 @@ struct phylink_pcs *mtk_pcs_lynxi_create(struct device *dev,
mpcs->regmap = regmap;
mpcs->flags = flags;
mpcs->pcs.ops = &mtk_pcs_lynxi_ops;
- mpcs->pcs.neg_mode = true;
mpcs->pcs.poll = true;
mpcs->interface = PHY_INTERFACE_MODE_NA;
+ __set_bit(PHY_INTERFACE_MODE_SGMII, mpcs->pcs.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, mpcs->pcs.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, mpcs->pcs.supported_interfaces);
+
return &mpcs->pcs;
}
EXPORT_SYMBOL(mtk_pcs_lynxi_create);
diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c
index 61944574d087..885f17c32643 100644
--- a/drivers/net/pcs/pcs-rzn1-miic.c
+++ b/drivers/net/pcs/pcs-rzn1-miic.c
@@ -5,8 +5,12 @@
* Clément Léger <clement.leger@bootlin.com>
*/
+#include <linux/array_size.h>
+#include <linux/bits.h>
+#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/device.h>
+#include <linux/device/devres.h>
#include <linux/mdio.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -14,13 +18,15 @@
#include <linux/phylink.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
#include <dt-bindings/net/pcs-rzn1-miic.h>
+#include <dt-bindings/net/renesas,r9a09g077-pcs-miic.h>
#define MIIC_PRCMD 0x0
#define MIIC_ESID_CODE 0x4
-#define MIIC_MODCTRL 0x20
-#define MIIC_MODCTRL_SW_MODE GENMASK(4, 0)
+#define MIIC_MODCTRL 0x8
#define MIIC_CONVCTRL(port) (0x100 + (port) * 4)
@@ -46,21 +52,23 @@
#define MIIC_SWCTRL 0x304
#define MIIC_SWDUPC 0x308
-#define MIIC_MAX_NR_PORTS 5
-
-#define MIIC_MODCTRL_CONF_CONV_NUM 6
+#define MIIC_MODCTRL_CONF_CONV_MAX 6
#define MIIC_MODCTRL_CONF_NONE -1
+#define MIIC_MAX_NUM_RSTS 2
+
/**
* struct modctrl_match - Matching table entry for convctrl configuration
* See section 8.2.1 of manual.
* @mode_cfg: Configuration value for convctrl
* @conv: Configuration of ethernet port muxes. First index is SWITCH_PORTIN,
- * then index 1 - 5 are CONV1 - CONV5.
+ * then index 1 - 5 are CONV1 - CONV5 for RZ/N1 SoCs. In case
+ * of RZ/T2H and RZ/N2H SoCs, the first index is SWITCH_PORTIN then
+ * index 0 - 3 are CONV0 - CONV3.
*/
struct modctrl_match {
u32 mode_cfg;
- u8 conv[MIIC_MODCTRL_CONF_CONV_NUM];
+ u8 conv[MIIC_MODCTRL_CONF_CONV_MAX];
};
static struct modctrl_match modctrl_match_table[] = {
@@ -109,7 +117,7 @@ static const char * const conf_to_string[] = {
[MIIC_HSR_PORTB] = "HSR_PORTB",
};
-static const char *index_to_string[MIIC_MODCTRL_CONF_CONV_NUM] = {
+static const char * const index_to_string[] = {
"SWITCH_PORTIN",
"CONV1",
"CONV2",
@@ -118,16 +126,106 @@ static const char *index_to_string[MIIC_MODCTRL_CONF_CONV_NUM] = {
"CONV5",
};
+static struct modctrl_match rzt2h_modctrl_match_table[] = {
+ {0x0, {ETHSS_GMAC0_PORT, ETHSS_ETHSW_PORT0, ETHSS_ETHSW_PORT1,
+ ETHSS_ETHSW_PORT2, ETHSS_GMAC1_PORT}},
+
+ {0x1, {MIIC_MODCTRL_CONF_NONE, ETHSS_ESC_PORT0, ETHSS_ESC_PORT1,
+ ETHSS_GMAC2_PORT, ETHSS_GMAC1_PORT}},
+
+ {0x2, {ETHSS_GMAC0_PORT, ETHSS_ESC_PORT0, ETHSS_ESC_PORT1,
+ ETHSS_ETHSW_PORT2, ETHSS_GMAC1_PORT}},
+
+ {0x3, {MIIC_MODCTRL_CONF_NONE, ETHSS_ESC_PORT0, ETHSS_ESC_PORT1,
+ ETHSS_ESC_PORT2, ETHSS_GMAC1_PORT}},
+
+ {0x4, {ETHSS_GMAC0_PORT, ETHSS_ETHSW_PORT0, ETHSS_ESC_PORT1,
+ ETHSS_ESC_PORT2, ETHSS_GMAC1_PORT}},
+
+ {0x5, {ETHSS_GMAC0_PORT, ETHSS_ETHSW_PORT0, ETHSS_ESC_PORT1,
+ ETHSS_ETHSW_PORT2, ETHSS_GMAC1_PORT}},
+
+ {0x6, {ETHSS_GMAC0_PORT, ETHSS_ETHSW_PORT0, ETHSS_ETHSW_PORT1,
+ ETHSS_GMAC2_PORT, ETHSS_GMAC1_PORT}},
+
+ {0x7, {MIIC_MODCTRL_CONF_NONE, ETHSS_GMAC0_PORT, ETHSS_GMAC1_PORT,
+ ETHSS_GMAC2_PORT, MIIC_MODCTRL_CONF_NONE}}
+};
+
+static const char * const rzt2h_conf_to_string[] = {
+ [ETHSS_GMAC0_PORT] = "GMAC0_PORT",
+ [ETHSS_GMAC1_PORT] = "GMAC1_PORT",
+ [ETHSS_GMAC2_PORT] = "GMAC2_PORT",
+ [ETHSS_ESC_PORT0] = "ETHERCAT_PORT0",
+ [ETHSS_ESC_PORT1] = "ETHERCAT_PORT1",
+ [ETHSS_ESC_PORT2] = "ETHERCAT_PORT2",
+ [ETHSS_ETHSW_PORT0] = "SWITCH_PORT0",
+ [ETHSS_ETHSW_PORT1] = "SWITCH_PORT1",
+ [ETHSS_ETHSW_PORT2] = "SWITCH_PORT2",
+};
+
+static const char * const rzt2h_index_to_string[] = {
+ "SWITCH_PORTIN",
+ "CONV0",
+ "CONV1",
+ "CONV2",
+ "CONV3",
+};
+
+static const char * const rzt2h_reset_ids[] = {
+ "rst",
+ "crst",
+};
+
/**
* struct miic - MII converter structure
* @base: base address of the MII converter
* @dev: Device associated to the MII converter
* @lock: Lock used for read-modify-write access
+ * @rsts: Reset controls for the MII converter
+ * @of_data: Pointer to OF data
*/
struct miic {
void __iomem *base;
struct device *dev;
spinlock_t lock;
+ struct reset_control_bulk_data rsts[MIIC_MAX_NUM_RSTS];
+ const struct miic_of_data *of_data;
+};
+
+/**
+ * struct miic_of_data - OF data for MII converter
+ * @match_table: Matching table for convctrl configuration
+ * @match_table_count: Number of entries in the matching table
+ * @conf_conv_count: Number of entries in the conf_conv array
+ * @conf_to_string: String representations of the configuration values
+ * @conf_to_string_count: Number of entries in the conf_to_string array
+ * @index_to_string: String representations of the index values
+ * @index_to_string_count: Number of entries in the index_to_string array
+ * @miic_port_start: MIIC port start number
+ * @miic_port_max: Maximum MIIC supported
+ * @sw_mode_mask: Switch mode mask
+ * @reset_ids: Reset names array
+ * @reset_count: Number of entries in the reset_ids array
+ * @init_unlock_lock_regs: Flag to indicate if registers need to be unlocked
+ * before access.
+ * @miic_write: Function pointer to write a value to a MIIC register
+ */
+struct miic_of_data {
+ struct modctrl_match *match_table;
+ u8 match_table_count;
+ u8 conf_conv_count;
+ const char * const *conf_to_string;
+ u8 conf_to_string_count;
+ const char * const *index_to_string;
+ u8 index_to_string_count;
+ u8 miic_port_start;
+ u8 miic_port_max;
+ u8 sw_mode_mask;
+ const char * const *reset_ids;
+ u8 reset_count;
+ bool init_unlock_lock_regs;
+ void (*miic_write)(struct miic *miic, int offset, u32 value);
};
/**
@@ -149,9 +247,36 @@ static struct miic_port *phylink_pcs_to_miic_port(struct phylink_pcs *pcs)
return container_of(pcs, struct miic_port, pcs);
}
-static void miic_reg_writel(struct miic *miic, int offset, u32 value)
+static void miic_unlock_regs(struct miic *miic)
+{
+ /* Unprotect register writes */
+ writel(0x00A5, miic->base + MIIC_PRCMD);
+ writel(0x0001, miic->base + MIIC_PRCMD);
+ writel(0xFFFE, miic->base + MIIC_PRCMD);
+ writel(0x0001, miic->base + MIIC_PRCMD);
+}
+
+static void miic_lock_regs(struct miic *miic)
+{
+ /* Protect register writes */
+ writel(0x0000, miic->base + MIIC_PRCMD);
+}
+
+static void miic_reg_writel_unlocked(struct miic *miic, int offset, u32 value)
+{
+ writel(value, miic->base + offset);
+}
+
+static void miic_reg_writel_locked(struct miic *miic, int offset, u32 value)
{
+ miic_unlock_regs(miic);
writel(value, miic->base + offset);
+ miic_lock_regs(miic);
+}
+
+static void miic_reg_writel(struct miic *miic, int offset, u32 value)
+{
+ miic->of_data->miic_write(miic, offset, value);
}
static u32 miic_reg_readl(struct miic *miic, int offset)
@@ -268,17 +393,6 @@ static void miic_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
(MIIC_CONVCTRL_CONV_SPEED | MIIC_CONVCTRL_FULLD), val);
}
-static int miic_validate(struct phylink_pcs *pcs, unsigned long *supported,
- const struct phylink_link_state *state)
-{
- if (phy_interface_mode_is_rgmii(state->interface) ||
- state->interface == PHY_INTERFACE_MODE_RMII ||
- state->interface == PHY_INTERFACE_MODE_MII)
- return 1;
-
- return -EINVAL;
-}
-
static int miic_pre_init(struct phylink_pcs *pcs)
{
struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs);
@@ -307,7 +421,6 @@ static int miic_pre_init(struct phylink_pcs *pcs)
}
static const struct phylink_pcs_ops miic_phylink_ops = {
- .pcs_validate = miic_validate,
.pcs_config = miic_config,
.pcs_link_up = miic_link_up,
.pcs_pre_init = miic_pre_init,
@@ -315,6 +428,7 @@ static const struct phylink_pcs_ops miic_phylink_ops = {
struct phylink_pcs *miic_create(struct device *dev, struct device_node *np)
{
+ const struct miic_of_data *of_data;
struct platform_device *pdev;
struct miic_port *miic_port;
struct device_node *pcs_np;
@@ -327,9 +441,6 @@ struct phylink_pcs *miic_create(struct device *dev, struct device_node *np)
if (of_property_read_u32(np, "reg", &port))
return ERR_PTR(-EINVAL);
- if (port > MIIC_MAX_NR_PORTS || port < 1)
- return ERR_PTR(-EINVAL);
-
/* The PCS pdev is attached to the parent node */
pcs_np = of_get_parent(np);
if (!pcs_np)
@@ -348,20 +459,29 @@ struct phylink_pcs *miic_create(struct device *dev, struct device_node *np)
return ERR_PTR(-EPROBE_DEFER);
}
+ miic = platform_get_drvdata(pdev);
+ of_data = miic->of_data;
+ if (port > of_data->miic_port_max || port < of_data->miic_port_start) {
+ put_device(&pdev->dev);
+ return ERR_PTR(-EINVAL);
+ }
+
miic_port = kzalloc(sizeof(*miic_port), GFP_KERNEL);
if (!miic_port) {
put_device(&pdev->dev);
return ERR_PTR(-ENOMEM);
}
- miic = platform_get_drvdata(pdev);
device_link_add(dev, miic->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
put_device(&pdev->dev);
miic_port->miic = miic;
- miic_port->port = port - 1;
+ miic_port->port = port - of_data->miic_port_start;
miic_port->pcs.ops = &miic_phylink_ops;
- miic_port->pcs.neg_mode = true;
+
+ phy_interface_set_rgmii(miic_port->pcs.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII, miic_port->pcs.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII, miic_port->pcs.supported_interfaces);
return &miic_port->pcs;
}
@@ -378,21 +498,23 @@ EXPORT_SYMBOL(miic_destroy);
static int miic_init_hw(struct miic *miic, u32 cfg_mode)
{
+ u8 sw_mode_mask = miic->of_data->sw_mode_mask;
int port;
/* Unlock write access to accessory registers (cf datasheet). If this
* is going to be used in conjunction with the Cortex-M3, this sequence
* will have to be moved in register write
*/
- miic_reg_writel(miic, MIIC_PRCMD, 0x00A5);
- miic_reg_writel(miic, MIIC_PRCMD, 0x0001);
- miic_reg_writel(miic, MIIC_PRCMD, 0xFFFE);
- miic_reg_writel(miic, MIIC_PRCMD, 0x0001);
+ if (miic->of_data->init_unlock_lock_regs)
+ miic_unlock_regs(miic);
+ /* TODO: Replace with FIELD_PREP() when compile-time constant
+ * restriction is lifted. Currently __ffs() returns 0 for sw_mode_mask.
+ */
miic_reg_writel(miic, MIIC_MODCTRL,
- FIELD_PREP(MIIC_MODCTRL_SW_MODE, cfg_mode));
+ ((cfg_mode << __ffs(sw_mode_mask)) & sw_mode_mask));
- for (port = 0; port < MIIC_MAX_NR_PORTS; port++) {
+ for (port = 0; port < miic->of_data->miic_port_max; port++) {
miic_converter_enable(miic, port, 0);
/* Disable speed/duplex control from these registers, datasheet
* says switch registers should be used to setup switch port
@@ -405,12 +527,11 @@ static int miic_init_hw(struct miic *miic, u32 cfg_mode)
return 0;
}
-static bool miic_modctrl_match(s8 table_val[MIIC_MODCTRL_CONF_CONV_NUM],
- s8 dt_val[MIIC_MODCTRL_CONF_CONV_NUM])
+static bool miic_modctrl_match(s8 *table_val, s8 *dt_val, u8 count)
{
int i;
- for (i = 0; i < MIIC_MODCTRL_CONF_CONV_NUM; i++) {
+ for (i = 0; i < count; i++) {
if (dt_val[i] == MIIC_MODCTRL_CONF_NONE)
continue;
@@ -421,69 +542,119 @@ static bool miic_modctrl_match(s8 table_val[MIIC_MODCTRL_CONF_CONV_NUM],
return true;
}
-static void miic_dump_conf(struct device *dev,
- s8 conf[MIIC_MODCTRL_CONF_CONV_NUM])
+static void miic_dump_conf(struct miic *miic, s8 *conf)
{
+ const struct miic_of_data *of_data = miic->of_data;
const char *conf_name;
int i;
- for (i = 0; i < MIIC_MODCTRL_CONF_CONV_NUM; i++) {
+ for (i = 0; i < of_data->conf_conv_count; i++) {
if (conf[i] != MIIC_MODCTRL_CONF_NONE)
- conf_name = conf_to_string[conf[i]];
+ conf_name = of_data->conf_to_string[conf[i]];
else
conf_name = "NONE";
- dev_err(dev, "%s: %s\n", index_to_string[i], conf_name);
+ dev_err(miic->dev, "%s: %s\n",
+ of_data->index_to_string[i], conf_name);
}
}
-static int miic_match_dt_conf(struct device *dev,
- s8 dt_val[MIIC_MODCTRL_CONF_CONV_NUM],
- u32 *mode_cfg)
+static int miic_match_dt_conf(struct miic *miic, s8 *dt_val, u32 *mode_cfg)
{
+ const struct miic_of_data *of_data = miic->of_data;
struct modctrl_match *table_entry;
int i;
- for (i = 0; i < ARRAY_SIZE(modctrl_match_table); i++) {
- table_entry = &modctrl_match_table[i];
+ for (i = 0; i < of_data->match_table_count; i++) {
+ table_entry = &of_data->match_table[i];
- if (miic_modctrl_match(table_entry->conv, dt_val)) {
+ if (miic_modctrl_match(table_entry->conv, dt_val,
+ miic->of_data->conf_conv_count)) {
*mode_cfg = table_entry->mode_cfg;
return 0;
}
}
- dev_err(dev, "Failed to apply requested configuration\n");
- miic_dump_conf(dev, dt_val);
+ dev_err(miic->dev, "Failed to apply requested configuration\n");
+ miic_dump_conf(miic, dt_val);
return -EINVAL;
}
-static int miic_parse_dt(struct device *dev, u32 *mode_cfg)
+static int miic_parse_dt(struct miic *miic, u32 *mode_cfg)
{
- s8 dt_val[MIIC_MODCTRL_CONF_CONV_NUM];
- struct device_node *np = dev->of_node;
+ struct device_node *np = miic->dev->of_node;
struct device_node *conv;
+ int port, ret;
+ s8 *dt_val;
u32 conf;
- int port;
- memset(dt_val, MIIC_MODCTRL_CONF_NONE, sizeof(dt_val));
+ dt_val = kmalloc_array(miic->of_data->conf_conv_count,
+ sizeof(*dt_val), GFP_KERNEL);
+ if (!dt_val)
+ return -ENOMEM;
+
+ memset(dt_val, MIIC_MODCTRL_CONF_NONE, sizeof(*dt_val));
if (of_property_read_u32(np, "renesas,miic-switch-portin", &conf) == 0)
dt_val[0] = conf;
- for_each_child_of_node(np, conv) {
+ for_each_available_child_of_node(np, conv) {
if (of_property_read_u32(conv, "reg", &port))
continue;
- if (!of_device_is_available(conv))
- continue;
-
+ /* Adjust for 0 based index */
+ port += !miic->of_data->miic_port_start;
if (of_property_read_u32(conv, "renesas,miic-input", &conf) == 0)
dt_val[port] = conf;
}
- return miic_match_dt_conf(dev, dt_val, mode_cfg);
+ ret = miic_match_dt_conf(miic, dt_val, mode_cfg);
+ kfree(dt_val);
+
+ return ret;
+}
+
+static void miic_reset_control_bulk_assert(void *data)
+{
+ struct miic *miic = data;
+ int ret;
+
+ ret = reset_control_bulk_assert(miic->of_data->reset_count, miic->rsts);
+ if (ret)
+ dev_err(miic->dev, "failed to assert reset lines\n");
+}
+
+static int miic_reset_control_init(struct miic *miic)
+{
+ const struct miic_of_data *of_data = miic->of_data;
+ struct device *dev = miic->dev;
+ int ret;
+ u8 i;
+
+ if (!of_data->reset_count)
+ return 0;
+
+ for (i = 0; i < of_data->reset_count; i++)
+ miic->rsts[i].id = of_data->reset_ids[i];
+
+ ret = devm_reset_control_bulk_get_exclusive(dev, of_data->reset_count,
+ miic->rsts);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get bulk reset lines\n");
+
+ ret = reset_control_bulk_deassert(of_data->reset_count, miic->rsts);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to deassert reset lines\n");
+
+ ret = devm_add_action_or_reset(dev, miic_reset_control_bulk_assert,
+ miic);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add reset action\n");
+
+ return 0;
}
static int miic_probe(struct platform_device *pdev)
@@ -493,20 +664,26 @@ static int miic_probe(struct platform_device *pdev)
u32 mode_cfg;
int ret;
- ret = miic_parse_dt(dev, &mode_cfg);
- if (ret < 0)
- return ret;
-
miic = devm_kzalloc(dev, sizeof(*miic), GFP_KERNEL);
if (!miic)
return -ENOMEM;
- spin_lock_init(&miic->lock);
+ miic->of_data = of_device_get_match_data(dev);
miic->dev = dev;
+
+ ret = miic_parse_dt(miic, &mode_cfg);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_init(&miic->lock);
miic->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(miic->base))
return PTR_ERR(miic->base);
+ ret = miic_reset_control_init(miic);
+ if (ret)
+ return ret;
+
ret = devm_pm_runtime_enable(dev);
if (ret < 0)
return ret;
@@ -539,9 +716,41 @@ static void miic_remove(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
}
+static struct miic_of_data rzn1_miic_of_data = {
+ .match_table = modctrl_match_table,
+ .match_table_count = ARRAY_SIZE(modctrl_match_table),
+ .conf_conv_count = MIIC_MODCTRL_CONF_CONV_MAX,
+ .conf_to_string = conf_to_string,
+ .conf_to_string_count = ARRAY_SIZE(conf_to_string),
+ .index_to_string = index_to_string,
+ .index_to_string_count = ARRAY_SIZE(index_to_string),
+ .miic_port_start = 1,
+ .miic_port_max = 5,
+ .sw_mode_mask = GENMASK(4, 0),
+ .init_unlock_lock_regs = true,
+ .miic_write = miic_reg_writel_unlocked,
+};
+
+static struct miic_of_data rzt2h_miic_of_data = {
+ .match_table = rzt2h_modctrl_match_table,
+ .match_table_count = ARRAY_SIZE(rzt2h_modctrl_match_table),
+ .conf_conv_count = 5,
+ .conf_to_string = rzt2h_conf_to_string,
+ .conf_to_string_count = ARRAY_SIZE(rzt2h_conf_to_string),
+ .index_to_string = rzt2h_index_to_string,
+ .index_to_string_count = ARRAY_SIZE(rzt2h_index_to_string),
+ .miic_port_start = 0,
+ .miic_port_max = 4,
+ .sw_mode_mask = GENMASK(2, 0),
+ .reset_ids = rzt2h_reset_ids,
+ .reset_count = ARRAY_SIZE(rzt2h_reset_ids),
+ .miic_write = miic_reg_writel_locked,
+};
+
static const struct of_device_id miic_of_mtable[] = {
- { .compatible = "renesas,rzn1-miic" },
- { /* sentinel */ },
+ { .compatible = "renesas,r9a09g077-miic", .data = &rzt2h_miic_of_data },
+ { .compatible = "renesas,rzn1-miic", .data = &rzn1_miic_of_data },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, miic_of_mtable);
diff --git a/drivers/net/pcs/pcs-xpcs-plat.c b/drivers/net/pcs/pcs-xpcs-plat.c
index 629315f1e57c..b8c48f9effbf 100644
--- a/drivers/net/pcs/pcs-xpcs-plat.c
+++ b/drivers/net/pcs/pcs-xpcs-plat.c
@@ -66,7 +66,7 @@ static int xpcs_mmio_read_reg_indirect(struct dw_xpcs_plat *pxpcs,
switch (pxpcs->reg_width) {
case 4:
writel(page, pxpcs->reg_base + (DW_VR_CSR_VIEWPORT << 2));
- ret = readl(pxpcs->reg_base + (ofs << 2));
+ ret = readl(pxpcs->reg_base + (ofs << 2)) & 0xffff;
break;
default:
writew(page, pxpcs->reg_base + (DW_VR_CSR_VIEWPORT << 1));
@@ -124,7 +124,7 @@ static int xpcs_mmio_read_reg_direct(struct dw_xpcs_plat *pxpcs,
switch (pxpcs->reg_width) {
case 4:
- ret = readl(pxpcs->reg_base + (csr << 2));
+ ret = readl(pxpcs->reg_base + (csr << 2)) & 0xffff;
break;
default:
ret = readw(pxpcs->reg_base + (csr << 1));
@@ -280,7 +280,7 @@ static int xpcs_plat_init_clk(struct dw_xpcs_plat *pxpcs)
struct device *dev = &pxpcs->pdev->dev;
int ret;
- pxpcs->cclk = devm_clk_get(dev, "csr");
+ pxpcs->cclk = devm_clk_get_optional(dev, "csr");
if (IS_ERR(pxpcs->cclk))
return dev_err_probe(dev, PTR_ERR(pxpcs->cclk),
"Failed to get CSR clock\n");
@@ -365,9 +365,6 @@ static int xpcs_plat_init_dev(struct dw_xpcs_plat *pxpcs)
err_clean_data:
mdiodev->dev.platform_data = NULL;
- fwnode_handle_put(dev_fwnode(&mdiodev->dev));
- device_set_node(&mdiodev->dev, NULL);
-
mdio_device_free(mdiodev);
return ret;
@@ -456,5 +453,5 @@ static struct platform_driver xpcs_plat_driver = {
module_platform_driver(xpcs_plat_driver);
MODULE_DESCRIPTION("Synopsys DesignWare XPCS platform device driver");
-MODULE_AUTHOR("Signed-off-by: Serge Semin <fancer.lancer@gmail.com>");
+MODULE_AUTHOR("Serge Semin <fancer.lancer@gmail.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 7246a910728d..9679f2b35a44 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -37,6 +37,16 @@ static const int xpcs_10gkr_features[] = {
__ETHTOOL_LINK_MODE_MASK_NBITS,
};
+static const int xpcs_25gbaser_features[] = {
+ ETHTOOL_LINK_MODE_MII_BIT,
+ ETHTOOL_LINK_MODE_Pause_BIT,
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+};
+
static const int xpcs_xlgmii_features[] = {
ETHTOOL_LINK_MODE_Pause_BIT,
ETHTOOL_LINK_MODE_Asym_Pause_BIT,
@@ -67,6 +77,40 @@ static const int xpcs_xlgmii_features[] = {
__ETHTOOL_LINK_MODE_MASK_NBITS,
};
+static const int xpcs_50gbaser_features[] = {
+ ETHTOOL_LINK_MODE_MII_BIT,
+ ETHTOOL_LINK_MODE_Pause_BIT,
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseDR_Full_BIT,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+};
+
+static const int xpcs_50gbaser2_features[] = {
+ ETHTOOL_LINK_MODE_MII_BIT,
+ ETHTOOL_LINK_MODE_Pause_BIT,
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+};
+
+static const int xpcs_100gbasep_features[] = {
+ ETHTOOL_LINK_MODE_MII_BIT,
+ ETHTOOL_LINK_MODE_Pause_BIT,
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+};
+
static const int xpcs_10gbaser_features[] = {
ETHTOOL_LINK_MODE_Pause_BIT,
ETHTOOL_LINK_MODE_Asym_Pause_BIT,
@@ -523,9 +567,57 @@ static int xpcs_get_max_xlgmii_speed(struct dw_xpcs *xpcs,
return speed;
}
-static void xpcs_resolve_pma(struct dw_xpcs *xpcs,
- struct phylink_link_state *state)
+static int xpcs_c45_read_pcs_speed(struct dw_xpcs *xpcs,
+ struct phylink_link_state *state)
{
+ int pcs_ctrl1;
+
+ pcs_ctrl1 = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_CTRL1);
+ if (pcs_ctrl1 < 0)
+ return pcs_ctrl1;
+
+ switch (pcs_ctrl1 & MDIO_CTRL1_SPEEDSEL) {
+ case MDIO_PCS_CTRL1_SPEED25G:
+ state->speed = SPEED_25000;
+ break;
+ case MDIO_PCS_CTRL1_SPEED50G:
+ state->speed = SPEED_50000;
+ break;
+ case MDIO_PCS_CTRL1_SPEED100G:
+ state->speed = SPEED_100000;
+ break;
+ default:
+ state->speed = SPEED_UNKNOWN;
+ break;
+ }
+
+ return 0;
+}
+
+static int xpcs_resolve_pma(struct dw_xpcs *xpcs,
+ struct phylink_link_state *state)
+{
+ int pmd_rxdet, err = 0;
+
+ /* The Meta Platforms FBNIC PMD will go into a training state for
+ * about 4 seconds when the link first comes up. During this time the
+ * PCS link will bounce. To avoid reporting link up too soon we include
+ * the PMD state provided by the driver.
+ */
+ if (xpcs->info.pma == MP_FBNIC_XPCS_PMA_100G_ID) {
+ pmd_rxdet = xpcs_read(xpcs, MDIO_MMD_PMAPMD, MDIO_PMA_RXDET);
+ if (pmd_rxdet < 0) {
+ state->link = false;
+ return pmd_rxdet;
+ }
+
+ /* Verify Rx lanes are trained before reporting link up */
+ if (!(pmd_rxdet & MDIO_PMD_RXDET_GLOBAL)) {
+ state->link = false;
+ return 0;
+ }
+ }
+
state->pause = MLO_PAUSE_TX | MLO_PAUSE_RX;
state->duplex = DUPLEX_FULL;
@@ -536,10 +628,18 @@ static void xpcs_resolve_pma(struct dw_xpcs *xpcs,
case PHY_INTERFACE_MODE_XLGMII:
state->speed = xpcs_get_max_xlgmii_speed(xpcs, state);
break;
+ case PHY_INTERFACE_MODE_100GBASEP:
+ case PHY_INTERFACE_MODE_LAUI:
+ case PHY_INTERFACE_MODE_50GBASER:
+ case PHY_INTERFACE_MODE_25GBASER:
+ err = xpcs_c45_read_pcs_speed(xpcs, state);
+ break;
default:
state->speed = SPEED_UNKNOWN;
break;
}
+
+ return err;
}
static int xpcs_validate(struct phylink_pcs *pcs, unsigned long *supported,
@@ -567,44 +667,56 @@ static int xpcs_validate(struct phylink_pcs *pcs, unsigned long *supported,
return 0;
}
-void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces)
+static unsigned int xpcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
{
+ struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs);
const struct dw_xpcs_compat *compat;
- for (compat = xpcs->desc->compat; compat->supported; compat++)
- __set_bit(compat->interface, interfaces);
+ compat = xpcs_find_compat(xpcs, interface);
+ if (!compat)
+ return 0;
+
+ switch (compat->an_mode) {
+ case DW_AN_C73:
+ return LINK_INBAND_ENABLE;
+
+ case DW_AN_C37_SGMII:
+ case DW_AN_C37_1000BASEX:
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+
+ case DW_10GBASER:
+ case DW_2500BASEX:
+ return LINK_INBAND_DISABLE;
+
+ default:
+ return 0;
+ }
}
-EXPORT_SYMBOL_GPL(xpcs_get_interfaces);
-int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable)
+static void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces)
{
- u16 mask, val;
- int ret;
+ const struct dw_xpcs_compat *compat;
- mask = DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN |
- DW_VR_MII_EEE_TX_QUIET_EN | DW_VR_MII_EEE_RX_QUIET_EN |
- DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL |
- DW_VR_MII_EEE_MULT_FACT_100NS;
+ for (compat = xpcs->desc->compat; compat->supported; compat++)
+ __set_bit(compat->interface, interfaces);
+}
- if (enable)
- val = DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN |
- DW_VR_MII_EEE_TX_QUIET_EN | DW_VR_MII_EEE_RX_QUIET_EN |
- DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL |
- FIELD_PREP(DW_VR_MII_EEE_MULT_FACT_100NS,
- mult_fact_100ns);
- else
- val = 0;
+static int xpcs_switch_interface_mode(struct dw_xpcs *xpcs,
+ phy_interface_t interface)
+{
+ int ret = 0;
- ret = xpcs_modify(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL0, mask,
- val);
- if (ret < 0)
- return ret;
+ if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID) {
+ ret = txgbe_xpcs_switch_mode(xpcs, interface);
+ } else if (xpcs->interface != interface) {
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ xpcs->need_reset = true;
+ xpcs->interface = interface;
+ }
- return xpcs_modify(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL1,
- DW_VR_MII_EEE_TRN_LPI,
- enable ? DW_VR_MII_EEE_TRN_LPI : 0);
+ return ret;
}
-EXPORT_SYMBOL_GPL(xpcs_config_eee);
static void xpcs_pre_config(struct phylink_pcs *pcs, phy_interface_t interface)
{
@@ -612,6 +724,11 @@ static void xpcs_pre_config(struct phylink_pcs *pcs, phy_interface_t interface)
const struct dw_xpcs_compat *compat;
int ret;
+ ret = xpcs_switch_interface_mode(xpcs, interface);
+ if (ret)
+ dev_err(&xpcs->mdiodev->dev, "switch interface failed: %pe\n",
+ ERR_PTR(ret));
+
if (!xpcs->need_reset)
return;
@@ -684,7 +801,9 @@ static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs,
if (ret < 0)
return ret;
- mask = DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
+ val = 0;
+ mask = DW_VR_MII_DIG_CTRL1_2G5_EN | DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
+
if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
val = DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
@@ -801,10 +920,6 @@ static int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
return -ENODEV;
if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID) {
- ret = txgbe_xpcs_switch_mode(xpcs, interface);
- if (ret)
- return ret;
-
/* Wangxun devices need backplane CL37 AN enabled for
* SGMII and 1000base-X
*/
@@ -930,10 +1045,10 @@ static int xpcs_get_state_c73(struct dw_xpcs *xpcs,
phylink_resolve_c73(state);
} else {
- xpcs_resolve_pma(xpcs, state);
+ ret = xpcs_resolve_pma(xpcs, state);
}
- return 0;
+ return ret;
}
static int xpcs_get_state_c37_sgmii(struct dw_xpcs *xpcs,
@@ -1004,6 +1119,7 @@ static int xpcs_get_state_c37_sgmii(struct dw_xpcs *xpcs,
}
static int xpcs_get_state_c37_1000basex(struct dw_xpcs *xpcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
int lpa, bmsr;
@@ -1032,7 +1148,7 @@ static int xpcs_get_state_c37_1000basex(struct dw_xpcs *xpcs,
}
}
- phylink_mii_c22_pcs_decode_state(state, bmsr, lpa);
+ phylink_mii_c22_pcs_decode_state(state, neg_mode, bmsr, lpa);
}
return 0;
@@ -1060,7 +1176,7 @@ static int xpcs_get_state_2500basex(struct dw_xpcs *xpcs,
return 0;
}
-static void xpcs_get_state(struct phylink_pcs *pcs,
+static void xpcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs);
@@ -1088,7 +1204,7 @@ static void xpcs_get_state(struct phylink_pcs *pcs,
"xpcs_get_state_c37_sgmii", ERR_PTR(ret));
break;
case DW_AN_C37_1000BASEX:
- ret = xpcs_get_state_c37_1000basex(xpcs, state);
+ ret = xpcs_get_state_c37_1000basex(xpcs, neg_mode, state);
if (ret)
dev_err(&xpcs->mdiodev->dev, "%s returned %pe\n",
"xpcs_get_state_c37_1000basex", ERR_PTR(ret));
@@ -1164,6 +1280,63 @@ static void xpcs_an_restart(struct phylink_pcs *pcs)
BMCR_ANRESTART);
}
+static int xpcs_config_eee(struct dw_xpcs *xpcs, bool enable)
+{
+ u16 mask, val;
+ int ret;
+
+ mask = DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN |
+ DW_VR_MII_EEE_TX_QUIET_EN | DW_VR_MII_EEE_RX_QUIET_EN |
+ DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL |
+ DW_VR_MII_EEE_MULT_FACT_100NS;
+
+ if (enable)
+ val = DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN |
+ DW_VR_MII_EEE_TX_QUIET_EN | DW_VR_MII_EEE_RX_QUIET_EN |
+ DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL |
+ FIELD_PREP(DW_VR_MII_EEE_MULT_FACT_100NS,
+ xpcs->eee_mult_fact);
+ else
+ val = 0;
+
+ ret = xpcs_modify(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL0, mask,
+ val);
+ if (ret < 0)
+ return ret;
+
+ return xpcs_modify(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL1,
+ DW_VR_MII_EEE_TRN_LPI,
+ enable ? DW_VR_MII_EEE_TRN_LPI : 0);
+}
+
+static void xpcs_disable_eee(struct phylink_pcs *pcs)
+{
+ struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs);
+
+ xpcs_config_eee(xpcs, false);
+}
+
+static void xpcs_enable_eee(struct phylink_pcs *pcs)
+{
+ struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs);
+
+ xpcs_config_eee(xpcs, true);
+}
+
+/**
+ * xpcs_config_eee_mult_fact() - set the EEE clock multiplying factor
+ * @xpcs: pointer to a &struct dw_xpcs instance
+ * @mult_fact: the multiplying factor
+ *
+ * Configure the EEE clock multiplying factor. This value should be such that
+ * clk_eee_time_period * (mult_fact + 1) is within the range 80 to 120ns.
+ */
+void xpcs_config_eee_mult_fact(struct dw_xpcs *xpcs, u8 mult_fact)
+{
+ xpcs->eee_mult_fact = mult_fact;
+}
+EXPORT_SYMBOL_GPL(xpcs_config_eee_mult_fact);
+
static int xpcs_read_ids(struct dw_xpcs *xpcs)
{
int ret;
@@ -1211,17 +1384,16 @@ static int xpcs_read_ids(struct dw_xpcs *xpcs)
if (ret < 0)
return ret;
- id = ret;
+ id = ret << 16;
ret = xpcs_read(xpcs, MDIO_MMD_PMAPMD, MDIO_DEVID2);
if (ret < 0)
return ret;
- /* Note the inverted dword order and masked out Model/Revision numbers
- * with respect to what is done with the PCS ID...
+ /* For now we only record the OUI for the PMAPMD, we may want to
+ * add the model number at some point in the future.
*/
- ret = (ret >> 10) & 0x3F;
- id |= ret << 16;
+ id |= ret & MDIO_DEVID2_OUI;
/* Set the PMA ID if it hasn't been pre-initialized */
if (xpcs->info.pma == DW_XPCS_PMA_ID_NATIVE)
@@ -1240,10 +1412,26 @@ static const struct dw_xpcs_compat synopsys_xpcs_compat[] = {
.supported = xpcs_10gkr_features,
.an_mode = DW_AN_C73,
}, {
+ .interface = PHY_INTERFACE_MODE_25GBASER,
+ .supported = xpcs_25gbaser_features,
+ .an_mode = DW_AN_C73,
+ }, {
.interface = PHY_INTERFACE_MODE_XLGMII,
.supported = xpcs_xlgmii_features,
.an_mode = DW_AN_C73,
}, {
+ .interface = PHY_INTERFACE_MODE_50GBASER,
+ .supported = xpcs_50gbaser_features,
+ .an_mode = DW_AN_C73,
+ }, {
+ .interface = PHY_INTERFACE_MODE_LAUI,
+ .supported = xpcs_50gbaser2_features,
+ .an_mode = DW_AN_C73,
+ }, {
+ .interface = PHY_INTERFACE_MODE_100GBASEP,
+ .supported = xpcs_100gbasep_features,
+ .an_mode = DW_AN_C73,
+ }, {
.interface = PHY_INTERFACE_MODE_10GBASER,
.supported = xpcs_10gbaser_features,
.an_mode = DW_10GBASER,
@@ -1306,11 +1494,14 @@ static const struct dw_xpcs_desc xpcs_desc_list[] = {
static const struct phylink_pcs_ops xpcs_phylink_ops = {
.pcs_validate = xpcs_validate,
+ .pcs_inband_caps = xpcs_inband_caps,
.pcs_pre_config = xpcs_pre_config,
.pcs_config = xpcs_config,
.pcs_get_state = xpcs_get_state,
.pcs_an_restart = xpcs_an_restart,
.pcs_link_up = xpcs_link_up,
+ .pcs_disable_eee = xpcs_disable_eee,
+ .pcs_enable_eee = xpcs_enable_eee,
};
static int xpcs_identify(struct dw_xpcs *xpcs)
@@ -1344,7 +1535,6 @@ static struct dw_xpcs *xpcs_create_data(struct mdio_device *mdiodev)
mdio_device_get(mdiodev);
xpcs->mdiodev = mdiodev;
xpcs->pcs.ops = &xpcs_phylink_ops;
- xpcs->pcs.neg_mode = true;
xpcs->pcs.poll = true;
return xpcs;
@@ -1418,7 +1608,10 @@ static struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev)
if (ret)
goto out_clear_clks;
- if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID)
+ xpcs_get_interfaces(xpcs, xpcs->pcs.supported_interfaces);
+
+ if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID ||
+ xpcs->info.pma == MP_FBNIC_XPCS_PMA_100G_ID)
xpcs->pcs.poll = false;
else
xpcs->need_reset = true;
diff --git a/drivers/net/pcs/pcs-xpcs.h b/drivers/net/pcs/pcs-xpcs.h
index adc5a0b3c883..929fa238445e 100644
--- a/drivers/net/pcs/pcs-xpcs.h
+++ b/drivers/net/pcs/pcs-xpcs.h
@@ -55,23 +55,11 @@
/* Clause 37 Defines */
/* VR MII MMD registers offsets */
#define DW_VR_MII_DIG_CTRL1 0x8000
-#define DW_VR_MII_AN_CTRL 0x8001
-#define DW_VR_MII_AN_INTR_STS 0x8002
-/* EEE Mode Control Register */
-#define DW_VR_MII_EEE_MCTRL0 0x8006
-#define DW_VR_MII_EEE_MCTRL1 0x800b
-#define DW_VR_MII_DIG_CTRL2 0x80e1
-
-/* VR_MII_DIG_CTRL1 */
#define DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW BIT(9)
#define DW_VR_MII_DIG_CTRL1_2G5_EN BIT(2)
#define DW_VR_MII_DIG_CTRL1_PHY_MODE_CTRL BIT(0)
-/* VR_MII_DIG_CTRL2 */
-#define DW_VR_MII_DIG_CTRL2_TX_POL_INV BIT(4)
-#define DW_VR_MII_DIG_CTRL2_RX_POL_INV BIT(0)
-
-/* VR_MII_AN_CTRL */
+#define DW_VR_MII_AN_CTRL 0x8001
#define DW_VR_MII_AN_CTRL_8BIT BIT(8)
#define DW_VR_MII_TX_CONFIG_MASK BIT(3)
#define DW_VR_MII_TX_CONFIG_PHY_SIDE_SGMII 0x1
@@ -81,7 +69,7 @@
#define DW_VR_MII_PCS_MODE_C37_SGMII 0x2
#define DW_VR_MII_AN_INTR_EN BIT(0)
-/* VR_MII_AN_INTR_STS */
+#define DW_VR_MII_AN_INTR_STS 0x8002
#define DW_VR_MII_AN_STS_C37_ANCMPLT_INTR BIT(0)
#define DW_VR_MII_AN_STS_C37_ANSGM_FD BIT(1)
#define DW_VR_MII_AN_STS_C37_ANSGM_SP GENMASK(3, 2)
@@ -90,19 +78,22 @@
#define DW_VR_MII_C37_ANSGM_SP_1000 0x2
#define DW_VR_MII_C37_ANSGM_SP_LNKSTS BIT(4)
-/* VR MII EEE Control 0 defines */
+#define DW_VR_MII_EEE_MCTRL0 0x8006
#define DW_VR_MII_EEE_LTX_EN BIT(0) /* LPI Tx Enable */
#define DW_VR_MII_EEE_LRX_EN BIT(1) /* LPI Rx Enable */
#define DW_VR_MII_EEE_TX_QUIET_EN BIT(2) /* Tx Quiet Enable */
#define DW_VR_MII_EEE_RX_QUIET_EN BIT(3) /* Rx Quiet Enable */
#define DW_VR_MII_EEE_TX_EN_CTRL BIT(4) /* Tx Control Enable */
#define DW_VR_MII_EEE_RX_EN_CTRL BIT(7) /* Rx Control Enable */
-
#define DW_VR_MII_EEE_MULT_FACT_100NS GENMASK(11, 8)
-/* VR MII EEE Control 1 defines */
+#define DW_VR_MII_EEE_MCTRL1 0x800b
#define DW_VR_MII_EEE_TRN_LPI BIT(0) /* Transparent Mode Enable */
+#define DW_VR_MII_DIG_CTRL2 0x80e1
+#define DW_VR_MII_DIG_CTRL2_TX_POL_INV BIT(4)
+#define DW_VR_MII_DIG_CTRL2_RX_POL_INV BIT(0)
+
#define DW_XPCS_INFO_DECLARE(_name, _pcs, _pma) \
static const struct dw_xpcs_info _name = { .pcs = _pcs, .pma = _pma }
@@ -122,6 +113,7 @@ struct dw_xpcs {
struct phylink_pcs pcs;
phy_interface_t interface;
bool need_reset;
+ u8 eee_mult_fact;
};
int xpcs_read(struct dw_xpcs *xpcs, int dev, u32 reg);
diff --git a/drivers/net/pfcp.c b/drivers/net/pfcp.c
index 69434fd13f96..28e6bc4a1f14 100644
--- a/drivers/net/pfcp.c
+++ b/drivers/net/pfcp.c
@@ -184,15 +184,16 @@ static int pfcp_add_sock(struct pfcp_dev *pfcp)
return PTR_ERR_OR_ZERO(pfcp->sock);
}
-static int pfcp_newlink(struct net *net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int pfcp_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
struct pfcp_dev *pfcp = netdev_priv(dev);
struct pfcp_net *pn;
int err;
- pfcp->net = net;
+ pfcp->net = link_net;
err = pfcp_add_sock(pfcp);
if (err) {
@@ -206,8 +207,8 @@ static int pfcp_newlink(struct net *net, struct net_device *dev,
goto exit_del_pfcp_sock;
}
- pn = net_generic(dev_net(dev), pfcp_net_id);
- list_add_rcu(&pfcp->list, &pn->pfcp_dev_list);
+ pn = net_generic(link_net, pfcp_net_id);
+ list_add(&pfcp->list, &pn->pfcp_dev_list);
netdev_dbg(dev, "registered new PFCP interface\n");
@@ -224,7 +225,7 @@ static void pfcp_dellink(struct net_device *dev, struct list_head *head)
{
struct pfcp_dev *pfcp = netdev_priv(dev);
- list_del_rcu(&pfcp->list);
+ list_del(&pfcp->list);
unregister_netdevice_queue(dev, head);
}
@@ -244,25 +245,21 @@ static int __net_init pfcp_net_init(struct net *net)
return 0;
}
-static void __net_exit pfcp_net_exit(struct net *net)
+static void __net_exit pfcp_net_exit_rtnl(struct net *net,
+ struct list_head *dev_to_kill)
{
struct pfcp_net *pn = net_generic(net, pfcp_net_id);
- struct pfcp_dev *pfcp;
- LIST_HEAD(list);
-
- rtnl_lock();
- list_for_each_entry(pfcp, &pn->pfcp_dev_list, list)
- pfcp_dellink(pfcp->dev, &list);
+ struct pfcp_dev *pfcp, *pfcp_next;
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ list_for_each_entry_safe(pfcp, pfcp_next, &pn->pfcp_dev_list, list)
+ pfcp_dellink(pfcp->dev, dev_to_kill);
}
static struct pernet_operations pfcp_net_ops = {
- .init = pfcp_net_init,
- .exit = pfcp_net_exit,
- .id = &pfcp_net_id,
- .size = sizeof(struct pfcp_net),
+ .init = pfcp_net_init,
+ .exit_rtnl = pfcp_net_exit_rtnl,
+ .id = &pfcp_net_id,
+ .size = sizeof(struct pfcp_net),
};
static int __init pfcp_init(void)
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index ee3ea0b56d48..a7ade7b95a2e 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -3,9 +3,13 @@
# PHY Layer Configuration
#
+config MDIO_BUS
+ tristate "MDIO bus consumer layer"
+ help
+ MDIO bus consumer layer
+
config PHYLINK
tristate
- depends on NETDEVICES
select PHYLIB
select SWPHY
help
@@ -15,9 +19,7 @@ config PHYLINK
menuconfig PHYLIB
tristate "PHY Device support and infrastructure"
- depends on NETDEVICES
- select MDIO_DEVICE
- select MDIO_DEVRES
+ select MDIO_BUS
help
Ethernet controllers are usually attached to PHY
devices. This option provides infrastructure for
@@ -28,6 +30,9 @@ if PHYLIB
config SWPHY
bool
+config PHY_PACKAGE
+ tristate
+
config LED_TRIGGER_PHY
bool "Support LED triggers for tracking link state"
depends on LEDS_TRIGGERS
@@ -79,6 +84,18 @@ config SFP
comment "MII PHY device drivers"
+config AS21XXX_PHY
+ tristate "Aeonsemi AS21xxx PHYs"
+ help
+ Currently supports the Aeonsemi AS21xxx PHY.
+
+ These are C45 PHYs 10G that require all a generic firmware.
+
+ Supported PHYs AS21011JB1, AS21011PB1, AS21010JB1, AS21010PB1,
+ AS21511JB1, AS21511PB1, AS21510JB1, AS21510PB1, AS21210JB1,
+ AS21210PB1 that all register with the PHY ID 0x7500 0x7500
+ before the firmware is loaded.
+
config AIR_EN8811H_PHY
tristate "Airoha EN8811H 2.5 Gigabit PHY"
help
@@ -148,6 +165,7 @@ config BCM54140_PHY
tristate "Broadcom BCM54140 PHY"
depends on HWMON || HWMON=n
select BCM_NET_PHYLIB
+ select PHY_PACKAGE
help
Support the Broadcom BCM54140 Quad SGMII/QSGMII PHY.
@@ -266,33 +284,31 @@ config MAXLINEAR_GPHY
Support for the Maxlinear GPY115, GPY211, GPY212, GPY215,
GPY241, GPY245 PHYs.
-config MEDIATEK_GE_PHY
- tristate "MediaTek Gigabit Ethernet PHYs"
+config MAXLINEAR_86110_PHY
+ tristate "MaxLinear MXL86110 PHY support"
help
- Supports the MediaTek Gigabit Ethernet PHYs.
+ Support for the MaxLinear MXL86110 Gigabit Ethernet
+ Physical Layer transceiver.
+ The MXL86110 is commonly used in networking equipment such as
+ routers, switches, and embedded systems, providing the
+ physical interface for 10/100/1000 Mbps Ethernet connections
+ over copper media.
+ If you are using a board with the MXL86110 PHY connected to your
+ Ethernet MAC, you should enable this option.
-config MEDIATEK_GE_SOC_PHY
- tristate "MediaTek SoC Ethernet PHYs"
- depends on (ARM64 && ARCH_MEDIATEK) || COMPILE_TEST
- depends on NVMEM_MTK_EFUSE
- help
- Supports MediaTek SoC built-in Gigabit Ethernet PHYs.
-
- Include support for built-in Ethernet PHYs which are present in
- the MT7981 and MT7988 SoCs. These PHYs need calibration data
- present in the SoCs efuse and will dynamically calibrate VCM
- (common-mode voltage) during startup.
+source "drivers/net/phy/mediatek/Kconfig"
config MICREL_PHY
tristate "Micrel PHYs"
depends on PTP_1588_CLOCK_OPTIONAL
+ select PHY_PACKAGE
help
- Supports the KSZ9021, VSC8201, KS8001 PHYs.
+ Supports the KSZ8xxx, KSZ9xxx, and LAN88xx families of Micrel/Microchip PHYs.
config MICROCHIP_T1S_PHY
tristate "Microchip 10BASE-T1S Ethernet PHYs"
help
- Currently supports the LAN8670/1/2 Rev.B1/C1/C2 and
+ Currently supports the LAN8670/1/2 Rev.B1/C1/C2/D0 and
LAN8650/1 Rev.B0/B1 Internal PHYs.
config MICROCHIP_PHY
@@ -302,14 +318,22 @@ config MICROCHIP_PHY
config MICROCHIP_T1_PHY
tristate "Microchip T1 PHYs"
+ select MICROCHIP_PHY_RDS_PTP if NETWORK_PHY_TIMESTAMPING
+ depends on PTP_1588_CLOCK_OPTIONAL
help
- Supports the LAN87XX PHYs.
+ Supports the LAN8XXX PHYs.
+
+config MICROCHIP_PHY_RDS_PTP
+ tristate
+ help
+ Currently supports LAN887X T1 PHY
config MICROSEMI_PHY
tristate "Microsemi PHYs"
depends on MACSEC || MACSEC=n
depends on PTP_1588_CLOCK_OPTIONAL || !NETWORK_PHY_TIMESTAMPING
select CRYPTO_LIB_AES if MACSEC
+ select PHY_PACKAGE
help
Currently supports VSC8514, VSC8530, VSC8531, VSC8540 and VSC8541 PHYs
@@ -336,13 +360,13 @@ config NXP_C45_TJA11XX_PHY
depends on MACSEC || !MACSEC
help
Enable support for NXP C45 TJA11XX PHYs.
- Currently supports the TJA1103, TJA1104 and TJA1120 PHYs.
+ Currently supports the TJA1103, TJA1104, TJA1120 and TJA1121 PHYs.
config NXP_TJA11XX_PHY
tristate "NXP TJA11xx PHYs support"
depends on HWMON
help
- Currently supports the NXP TJA1100 and TJA1101 PHY.
+ Currently supports the NXP TJA1100, TJA1101 and TJA1102 PHYs.
config NCN26000_PHY
tristate "Onsemi 10BASE-T1S Ethernet PHY"
@@ -358,10 +382,7 @@ config QSEMI_PHY
help
Currently supports the qs6612
-config REALTEK_PHY
- tristate "Realtek PHYs"
- help
- Supports the Realtek 821x PHY.
+source "drivers/net/phy/realtek/Kconfig"
config RENESAS_PHY
tristate "Renesas PHYs"
@@ -449,7 +470,3 @@ config XILINX_GMII2RGMII
Ethernet physical media devices and the Gigabit Ethernet controller.
endif # PHYLIB
-
-config MICREL_KS8995MA
- tristate "Micrel KS8995MA 5-ports 10/100 managed Ethernet switch"
- depends on SPI
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 90f886844381..76e0db40f879 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -2,30 +2,24 @@
# Makefile for Linux PHY drivers
libphy-y := phy.o phy-c45.o phy-core.o phy_device.o \
- linkmode.o phy_link_topology.o
+ linkmode.o phy_link_topology.o \
+ phy_caps.o mdio_bus_provider.o
mdio-bus-y += mdio_bus.o mdio_device.o
-ifdef CONFIG_MDIO_DEVICE
-obj-y += mdio-boardinfo.o
-endif
-
-# PHYLIB implies MDIO_DEVICE, in that case, we have a bunch of circular
-# dependencies that does not make it possible to split mdio-bus objects into a
-# dedicated loadable module, so we bundle them all together into libphy.ko
ifdef CONFIG_PHYLIB
-libphy-y += $(mdio-bus-y)
-# the stubs are built-in whenever PHYLIB is built-in or module
+# built-in whenever PHYLIB is built-in or module
obj-y += stubs.o
-else
-obj-$(CONFIG_MDIO_DEVICE) += mdio-bus.o
endif
-obj-$(CONFIG_MDIO_DEVRES) += mdio_devres.o
+
libphy-$(CONFIG_SWPHY) += swphy.o
libphy-$(CONFIG_LED_TRIGGER_PHY) += phy_led_triggers.o
libphy-$(CONFIG_OPEN_ALLIANCE_HELPERS) += open_alliance_helpers.o
+obj-$(CONFIG_MDIO_BUS) += mdio-bus.o
obj-$(CONFIG_PHYLINK) += phylink.o
obj-$(CONFIG_PHYLIB) += libphy.o
+obj-$(CONFIG_PHYLIB) += mdio_devres.o
+obj-$(CONFIG_PHY_PACKAGE) += phy_package.o
obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += mii_timestamper.o
@@ -39,6 +33,7 @@ obj-$(CONFIG_AIR_EN8811H_PHY) += air_en8811h.o
obj-$(CONFIG_AMD_PHY) += amd.o
obj-$(CONFIG_AMCC_QT2025_PHY) += qt2025.o
obj-$(CONFIG_AQUANTIA_PHY) += aquantia/
+obj-$(CONFIG_AS21XXX_PHY) += as21xxx.o
ifdef CONFIG_AX88796B_RUST_PHY
obj-$(CONFIG_AX88796B_PHY) += ax88796b_rust.o
else
@@ -74,12 +69,12 @@ obj-$(CONFIG_MARVELL_PHY) += marvell.o
obj-$(CONFIG_MARVELL_88Q2XXX_PHY) += marvell-88q2xxx.o
obj-$(CONFIG_MARVELL_88X2222_PHY) += marvell-88x2222.o
obj-$(CONFIG_MAXLINEAR_GPHY) += mxl-gpy.o
-obj-$(CONFIG_MEDIATEK_GE_PHY) += mediatek-ge.o
-obj-$(CONFIG_MEDIATEK_GE_SOC_PHY) += mediatek-ge-soc.o
+obj-$(CONFIG_MAXLINEAR_86110_PHY) += mxl-86110.o
+obj-y += mediatek/
obj-$(CONFIG_MESON_GXL_PHY) += meson-gxl.o
-obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
obj-$(CONFIG_MICREL_PHY) += micrel.o
obj-$(CONFIG_MICROCHIP_PHY) += microchip.o
+obj-$(CONFIG_MICROCHIP_PHY_RDS_PTP) += microchip_rds_ptp.o
obj-$(CONFIG_MICROCHIP_T1_PHY) += microchip_t1.o
obj-$(CONFIG_MICROCHIP_T1S_PHY) += microchip_t1s.o
obj-$(CONFIG_MICROSEMI_PHY) += mscc/
@@ -95,7 +90,7 @@ obj-$(CONFIG_NXP_CBTX_PHY) += nxp-cbtx.o
obj-$(CONFIG_NXP_TJA11XX_PHY) += nxp-tja11xx.o
obj-y += qcom/
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
-obj-$(CONFIG_REALTEK_PHY) += realtek.o
+obj-$(CONFIG_REALTEK_PHY) += realtek/
obj-$(CONFIG_RENESAS_PHY) += uPD60620.o
obj-$(CONFIG_ROCKCHIP_PHY) += rockchip.o
obj-$(CONFIG_SMSC_PHY) += smsc.o
diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c
index a2a862bae2ed..7fa713ca8d45 100644
--- a/drivers/net/phy/adin.c
+++ b/drivers/net/phy/adin.c
@@ -1038,7 +1038,7 @@ static struct phy_driver adin_driver[] = {
module_phy_driver(adin_driver);
-static struct mdio_device_id __maybe_unused adin_tbl[] = {
+static const struct mdio_device_id __maybe_unused adin_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_ADIN1200) },
{ PHY_ID_MATCH_MODEL(PHY_ID_ADIN1300) },
{ }
diff --git a/drivers/net/phy/adin1100.c b/drivers/net/phy/adin1100.c
index 85f910e2d4fb..8f9753d4318c 100644
--- a/drivers/net/phy/adin1100.c
+++ b/drivers/net/phy/adin1100.c
@@ -192,16 +192,15 @@ static irqreturn_t adin_phy_handle_interrupt(struct phy_device *phydev)
static int adin_set_powerdown_mode(struct phy_device *phydev, bool en)
{
int ret;
- int val;
- val = en ? ADIN_CRSM_SFT_PD_CNTRL_EN : 0;
ret = phy_write_mmd(phydev, MDIO_MMD_VEND1,
- ADIN_CRSM_SFT_PD_CNTRL, val);
+ ADIN_CRSM_SFT_PD_CNTRL,
+ en ? ADIN_CRSM_SFT_PD_CNTRL_EN : 0);
if (ret < 0)
return ret;
return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, ADIN_CRSM_STAT, ret,
- (ret & ADIN_CRSM_SFT_PD_RDY) == val,
+ !!(ret & ADIN_CRSM_SFT_PD_RDY) == en,
1000, 30000, true);
}
@@ -215,8 +214,11 @@ static int adin_resume(struct phy_device *phydev)
return adin_set_powerdown_mode(phydev, false);
}
-static int adin_set_loopback(struct phy_device *phydev, bool enable)
+static int adin_set_loopback(struct phy_device *phydev, bool enable, int speed)
{
+ if (enable && speed)
+ return -EOPNOTSUPP;
+
if (enable)
return phy_set_bits_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_10T1L_CTRL,
BMCR_LOOPBACK);
@@ -340,7 +342,7 @@ static struct phy_driver adin_driver[] = {
module_phy_driver(adin_driver);
-static struct mdio_device_id __maybe_unused adin_tbl[] = {
+static const struct mdio_device_id __maybe_unused adin_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_ADIN1100) },
{ PHY_ID_MATCH_MODEL(PHY_ID_ADIN1110) },
{ PHY_ID_MATCH_MODEL(PHY_ID_ADIN2111) },
diff --git a/drivers/net/phy/air_en8811h.c b/drivers/net/phy/air_en8811h.c
index 8d076b9609fd..badd65f0ccee 100644
--- a/drivers/net/phy/air_en8811h.c
+++ b/drivers/net/phy/air_en8811h.c
@@ -11,6 +11,8 @@
* Copyright (C) 2023 Airoha Technology Corp.
*/
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/phy.h>
#include <linux/firmware.h>
#include <linux/property.h>
@@ -115,6 +117,11 @@
#define EN8811H_GPIO_OUTPUT 0xcf8b8
#define EN8811H_GPIO_OUTPUT_345 (BIT(3) | BIT(4) | BIT(5))
+#define EN8811H_HWTRAP1 0xcf914
+#define EN8811H_HWTRAP1_CKO BIT(12)
+#define EN8811H_CLK_CGM 0xcf958
+#define EN8811H_CLK_CGM_CKO BIT(26)
+
#define EN8811H_FW_CTRL_1 0x0f0018
#define EN8811H_FW_CTRL_1_START 0x0
#define EN8811H_FW_CTRL_1_FINISH 0x1
@@ -142,10 +149,16 @@ struct led {
unsigned long state;
};
+#define clk_hw_to_en8811h_priv(_hw) \
+ container_of(_hw, struct en8811h_priv, hw)
+
struct en8811h_priv {
- u32 firmware_version;
- bool mcu_needs_restart;
- struct led led[EN8811H_LED_COUNT];
+ u32 firmware_version;
+ bool mcu_needs_restart;
+ struct led led[EN8811H_LED_COUNT];
+ struct clk_hw hw;
+ struct phy_device *phydev;
+ unsigned int cko_is_enabled;
};
enum {
@@ -806,6 +819,105 @@ static int en8811h_led_hw_is_supported(struct phy_device *phydev, u8 index,
return 0;
};
+static unsigned long en8811h_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent)
+{
+ struct en8811h_priv *priv = clk_hw_to_en8811h_priv(hw);
+ struct phy_device *phydev = priv->phydev;
+ u32 pbus_value;
+ int ret;
+
+ ret = air_buckpbus_reg_read(phydev, EN8811H_HWTRAP1, &pbus_value);
+ if (ret < 0)
+ return ret;
+
+ return (pbus_value & EN8811H_HWTRAP1_CKO) ? 50000000 : 25000000;
+}
+
+static int en8811h_clk_enable(struct clk_hw *hw)
+{
+ struct en8811h_priv *priv = clk_hw_to_en8811h_priv(hw);
+ struct phy_device *phydev = priv->phydev;
+
+ return air_buckpbus_reg_modify(phydev, EN8811H_CLK_CGM,
+ EN8811H_CLK_CGM_CKO,
+ EN8811H_CLK_CGM_CKO);
+}
+
+static void en8811h_clk_disable(struct clk_hw *hw)
+{
+ struct en8811h_priv *priv = clk_hw_to_en8811h_priv(hw);
+ struct phy_device *phydev = priv->phydev;
+
+ air_buckpbus_reg_modify(phydev, EN8811H_CLK_CGM,
+ EN8811H_CLK_CGM_CKO, 0);
+}
+
+static int en8811h_clk_is_enabled(struct clk_hw *hw)
+{
+ struct en8811h_priv *priv = clk_hw_to_en8811h_priv(hw);
+ struct phy_device *phydev = priv->phydev;
+ u32 pbus_value;
+ int ret;
+
+ ret = air_buckpbus_reg_read(phydev, EN8811H_CLK_CGM, &pbus_value);
+ if (ret < 0)
+ return ret;
+
+ return (pbus_value & EN8811H_CLK_CGM_CKO);
+}
+
+static int en8811h_clk_save_context(struct clk_hw *hw)
+{
+ struct en8811h_priv *priv = clk_hw_to_en8811h_priv(hw);
+
+ priv->cko_is_enabled = en8811h_clk_is_enabled(hw);
+
+ return 0;
+}
+
+static void en8811h_clk_restore_context(struct clk_hw *hw)
+{
+ struct en8811h_priv *priv = clk_hw_to_en8811h_priv(hw);
+
+ if (!priv->cko_is_enabled)
+ en8811h_clk_disable(hw);
+}
+
+static const struct clk_ops en8811h_clk_ops = {
+ .recalc_rate = en8811h_clk_recalc_rate,
+ .enable = en8811h_clk_enable,
+ .disable = en8811h_clk_disable,
+ .is_enabled = en8811h_clk_is_enabled,
+ .save_context = en8811h_clk_save_context,
+ .restore_context = en8811h_clk_restore_context,
+};
+
+static int en8811h_clk_provider_setup(struct device *dev, struct clk_hw *hw)
+{
+ struct clk_init_data init;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_COMMON_CLK))
+ return 0;
+
+ init.name = devm_kasprintf(dev, GFP_KERNEL, "%s-cko",
+ fwnode_get_name(dev_fwnode(dev)));
+ if (!init.name)
+ return -ENOMEM;
+
+ init.ops = &en8811h_clk_ops;
+ init.flags = 0;
+ init.num_parents = 0;
+ hw->init = &init;
+
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret)
+ return ret;
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, hw);
+}
+
static int en8811h_probe(struct phy_device *phydev)
{
struct en8811h_priv *priv;
@@ -838,6 +950,12 @@ static int en8811h_probe(struct phy_device *phydev)
return ret;
}
+ priv->phydev = phydev;
+ /* Co-Clock Output */
+ ret = en8811h_clk_provider_setup(&phydev->mdio.dev, &priv->hw);
+ if (ret)
+ return ret;
+
/* Configure led gpio pins as output */
ret = air_buckpbus_reg_modify(phydev, EN8811H_GPIO_OUTPUT,
EN8811H_GPIO_OUTPUT_345,
@@ -1052,6 +1170,20 @@ static irqreturn_t en8811h_handle_interrupt(struct phy_device *phydev)
return IRQ_HANDLED;
}
+static int en8811h_resume(struct phy_device *phydev)
+{
+ clk_restore_context();
+
+ return genphy_resume(phydev);
+}
+
+static int en8811h_suspend(struct phy_device *phydev)
+{
+ clk_save_context();
+
+ return genphy_suspend(phydev);
+}
+
static struct phy_driver en8811h_driver[] = {
{
PHY_ID_MATCH_MODEL(EN8811H_PHY_ID),
@@ -1062,6 +1194,8 @@ static struct phy_driver en8811h_driver[] = {
.get_rate_matching = en8811h_get_rate_matching,
.config_aneg = en8811h_config_aneg,
.read_status = en8811h_read_status,
+ .resume = en8811h_resume,
+ .suspend = en8811h_suspend,
.config_intr = en8811h_clear_intr,
.handle_interrupt = en8811h_handle_interrupt,
.led_hw_is_supported = en8811h_led_hw_is_supported,
@@ -1075,7 +1209,7 @@ static struct phy_driver en8811h_driver[] = {
module_phy_driver(en8811h_driver);
-static struct mdio_device_id __maybe_unused en8811h_tbl[] = {
+static const struct mdio_device_id __maybe_unused en8811h_tbl[] = {
{ PHY_ID_MATCH_MODEL(EN8811H_PHY_ID) },
{ }
};
diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c
index 930b15fa6ce9..75b5fe65500a 100644
--- a/drivers/net/phy/amd.c
+++ b/drivers/net/phy/amd.c
@@ -111,7 +111,7 @@ static struct phy_driver am79c_drivers[] = {
module_phy_driver(am79c_drivers);
-static struct mdio_device_id __maybe_unused amd_tbl[] = {
+static const struct mdio_device_id __maybe_unused amd_tbl[] = {
{ PHY_ID_AC101L, 0xfffffff0 },
{ PHY_ID_AM79C874, 0xfffffff0 },
{ }
diff --git a/drivers/net/phy/aquantia/aquantia.h b/drivers/net/phy/aquantia/aquantia.h
index 0c78bfabace5..31427ee343e3 100644
--- a/drivers/net/phy/aquantia/aquantia.h
+++ b/drivers/net/phy/aquantia/aquantia.h
@@ -55,6 +55,7 @@
#define VEND1_GLOBAL_CFG_SERDES_MODE_SGMII 3
#define VEND1_GLOBAL_CFG_SERDES_MODE_OCSGMII 4
#define VEND1_GLOBAL_CFG_SERDES_MODE_XFI5G 6
+#define VEND1_GLOBAL_CFG_AUTONEG_ENA BIT(3)
#define VEND1_GLOBAL_CFG_RATE_ADAPT GENMASK(8, 7)
#define VEND1_GLOBAL_CFG_RATE_ADAPT_NONE 0
#define VEND1_GLOBAL_CFG_RATE_ADAPT_USX 1
@@ -152,6 +153,28 @@
#define AQR_MAX_LEDS 3
+/* Custom driver definitions for constructing a single variable out of
+ * aggregate firmware build information. These do not represent hardware
+ * fields.
+ */
+#define AQR_FW_FINGERPRINT_MAJOR GENMASK_ULL(63, 56)
+#define AQR_FW_FINGERPRINT_MINOR GENMASK_ULL(55, 48)
+#define AQR_FW_FINGERPRINT_BUILD_ID GENMASK_ULL(47, 40)
+#define AQR_FW_FINGERPRINT_PROV_ID GENMASK_ULL(39, 32)
+#define AQR_FW_FINGERPRINT_MISC_ID GENMASK_ULL(31, 16)
+#define AQR_FW_FINGERPRINT_MISC_VER GENMASK_ULL(15, 0)
+#define AQR_FW_FINGERPRINT(major, minor, build_id, prov_id, misc_id, misc_ver) \
+ (FIELD_PREP(AQR_FW_FINGERPRINT_MAJOR, major) | \
+ FIELD_PREP(AQR_FW_FINGERPRINT_MINOR, minor) | \
+ FIELD_PREP(AQR_FW_FINGERPRINT_BUILD_ID, build_id) | \
+ FIELD_PREP(AQR_FW_FINGERPRINT_PROV_ID, prov_id) | \
+ FIELD_PREP(AQR_FW_FINGERPRINT_MISC_ID, misc_id) | \
+ FIELD_PREP(AQR_FW_FINGERPRINT_MISC_VER, misc_ver))
+
+/* 10G-QXGMII firmware for NXP SPF-30841 riser board (AQR412C) */
+#define AQR_G3_V4_3_C_AQR_NXP_SPF_30841_MUSX_ID40019_VER1198 \
+ AQR_FW_FINGERPRINT(4, 3, 0xc, 1, 40019, 1198)
+
struct aqr107_hw_stat {
const char *name;
int reg;
@@ -174,10 +197,39 @@ static const struct aqr107_hw_stat aqr107_hw_stats[] = {
#define AQR107_SGMII_STAT_SZ ARRAY_SIZE(aqr107_hw_stats)
+static const struct {
+ int speed;
+ u16 reg;
+} aqr_global_cfg_regs[] = {
+ { SPEED_10, VEND1_GLOBAL_CFG_10M, },
+ { SPEED_100, VEND1_GLOBAL_CFG_100M, },
+ { SPEED_1000, VEND1_GLOBAL_CFG_1G, },
+ { SPEED_2500, VEND1_GLOBAL_CFG_2_5G, },
+ { SPEED_5000, VEND1_GLOBAL_CFG_5G, },
+ { SPEED_10000, VEND1_GLOBAL_CFG_10G, },
+};
+
+#define AQR_NUM_GLOBAL_CFG ARRAY_SIZE(aqr_global_cfg_regs)
+
+enum aqr_rate_adaptation {
+ AQR_RATE_ADAPT_NONE,
+ AQR_RATE_ADAPT_USX,
+ AQR_RATE_ADAPT_PAUSE,
+};
+
+struct aqr_global_syscfg {
+ int speed;
+ phy_interface_t interface;
+ enum aqr_rate_adaptation rate_adapt;
+};
+
struct aqr107_priv {
u64 sgmii_stats[AQR107_SGMII_STAT_SZ];
+ u64 fingerprint;
unsigned long leds_active_low;
unsigned long leds_active_high;
+ bool wait_on_global_cfg;
+ struct aqr_global_syscfg global_cfg[AQR_NUM_GLOBAL_CFG];
};
#if IS_REACHABLE(CONFIG_HWMON)
diff --git a/drivers/net/phy/aquantia/aquantia_firmware.c b/drivers/net/phy/aquantia/aquantia_firmware.c
index dab3af80593f..569256152689 100644
--- a/drivers/net/phy/aquantia/aquantia_firmware.c
+++ b/drivers/net/phy/aquantia/aquantia_firmware.c
@@ -328,10 +328,11 @@ static int aqr_firmware_load_fs(struct phy_device *phydev)
const char *fw_name;
int ret;
- ret = of_property_read_string(dev->of_node, "firmware-name",
- &fw_name);
- if (ret)
+ ret = device_property_read_string(dev, "firmware-name", &fw_name);
+ if (ret) {
+ phydev_err(phydev, "failed to read firmware-name: %d\n", ret);
return ret;
+ }
ret = request_firmware(&fw, fw_name, dev);
if (ret) {
@@ -368,7 +369,7 @@ int aqr_firmware_load(struct phy_device *phydev)
* assume that, and load a new image.
*/
ret = aqr_firmware_load_nvmem(phydev);
- if (!ret)
+ if (ret == -EPROBE_DEFER || !ret)
return ret;
ret = aqr_firmware_load_fs(phydev);
diff --git a/drivers/net/phy/aquantia/aquantia_hwmon.c b/drivers/net/phy/aquantia/aquantia_hwmon.c
index 7b3c49c3bf49..1a714b56b765 100644
--- a/drivers/net/phy/aquantia/aquantia_hwmon.c
+++ b/drivers/net/phy/aquantia/aquantia_hwmon.c
@@ -172,33 +172,13 @@ static const struct hwmon_ops aqr_hwmon_ops = {
.write = aqr_hwmon_write,
};
-static u32 aqr_hwmon_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0,
-};
-
-static const struct hwmon_channel_info aqr_hwmon_chip = {
- .type = hwmon_chip,
- .config = aqr_hwmon_chip_config,
-};
-
-static u32 aqr_hwmon_temp_config[] = {
- HWMON_T_INPUT |
- HWMON_T_MAX | HWMON_T_MIN |
- HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM |
- HWMON_T_CRIT | HWMON_T_LCRIT |
- HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM,
- 0,
-};
-
-static const struct hwmon_channel_info aqr_hwmon_temp = {
- .type = hwmon_temp,
- .config = aqr_hwmon_temp_config,
-};
-
static const struct hwmon_channel_info * const aqr_hwmon_info[] = {
- &aqr_hwmon_chip,
- &aqr_hwmon_temp,
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT |
+ HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM |
+ HWMON_T_CRIT | HWMON_T_LCRIT |
+ HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM),
NULL,
};
diff --git a/drivers/net/phy/aquantia/aquantia_leds.c b/drivers/net/phy/aquantia/aquantia_leds.c
index 00ad2313fed3..951f46104eff 100644
--- a/drivers/net/phy/aquantia/aquantia_leds.c
+++ b/drivers/net/phy/aquantia/aquantia_leds.c
@@ -156,5 +156,5 @@ int aqr_phy_led_polarity_set(struct phy_device *phydev, int index, unsigned long
if (force_active_high || force_active_low)
return aqr_phy_led_active_low_set(phydev, index, force_active_low);
- unreachable();
+ return -EINVAL;
}
diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c
index 38d0dd5c80a4..41f3676c7f1e 100644
--- a/drivers/net/phy/aquantia/aquantia_main.c
+++ b/drivers/net/phy/aquantia/aquantia_main.c
@@ -26,13 +26,18 @@
#define PHY_ID_AQR111 0x03a1b610
#define PHY_ID_AQR111B0 0x03a1b612
#define PHY_ID_AQR112 0x03a1b662
-#define PHY_ID_AQR412 0x03a1b712
+#define PHY_ID_AQR412 0x03a1b6f2
+#define PHY_ID_AQR412C 0x03a1b712
#define PHY_ID_AQR113 0x31c31c40
#define PHY_ID_AQR113C 0x31c31c12
#define PHY_ID_AQR114C 0x31c31c22
+#define PHY_ID_AQR115 0x31c31c63
#define PHY_ID_AQR115C 0x31c31c33
#define PHY_ID_AQR813 0x31c31cb2
+#define MDIO_PHYXS_VEND_PROV2 0xc441
+#define MDIO_PHYXS_VEND_PROV2_USX_AN BIT(3)
+
#define MDIO_PHYXS_VEND_IF_STATUS 0xe812
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK GENMASK(7, 3)
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR 0
@@ -50,10 +55,17 @@
#define MDIO_AN_VEND_PROV_1000BASET_HALF BIT(14)
#define MDIO_AN_VEND_PROV_5000BASET_FULL BIT(11)
#define MDIO_AN_VEND_PROV_2500BASET_FULL BIT(10)
+#define MDIO_AN_VEND_PROV_EXC_PHYID_INFO BIT(6)
#define MDIO_AN_VEND_PROV_DOWNSHIFT_EN BIT(4)
#define MDIO_AN_VEND_PROV_DOWNSHIFT_MASK GENMASK(3, 0)
#define MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT 4
+#define MDIO_AN_RESVD_VEND_PROV 0xc410
+#define MDIO_AN_RESVD_VEND_PROV_MDIX_AUTO 0
+#define MDIO_AN_RESVD_VEND_PROV_MDIX_MDI 1
+#define MDIO_AN_RESVD_VEND_PROV_MDIX_MDIX 2
+#define MDIO_AN_RESVD_VEND_PROV_MDIX_MASK GENMASK(1, 0)
+
#define MDIO_AN_TX_VEND_STATUS1 0xc800
#define MDIO_AN_TX_VEND_STATUS1_RATE_MASK GENMASK(3, 1)
#define MDIO_AN_TX_VEND_STATUS1_10BASET 0
@@ -64,6 +76,9 @@
#define MDIO_AN_TX_VEND_STATUS1_5000BASET 5
#define MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX BIT(0)
+#define MDIO_AN_RESVD_VEND_STATUS1 0xc810
+#define MDIO_AN_RESVD_VEND_STATUS1_MDIX BIT(8)
+
#define MDIO_AN_TX_VEND_INT_STATUS1 0xcc00
#define MDIO_AN_TX_VEND_INT_STATUS1_DOWNSHIFT BIT(1)
@@ -73,6 +88,9 @@
#define MDIO_AN_TX_VEND_INT_MASK2 0xd401
#define MDIO_AN_TX_VEND_INT_MASK2_LINK BIT(0)
+#define PMAPMD_FW_MISC_ID 0xc41d
+#define PMAPMD_FW_MISC_VER 0xc41e
+
#define PMAPMD_RSVD_VEND_PROV 0xe400
#define PMAPMD_RSVD_VEND_PROV_MDI_CONF GENMASK(1, 0)
#define PMAPMD_RSVD_VEND_PROV_MDI_REVERSE BIT(0)
@@ -155,12 +173,40 @@ static void aqr107_get_stats(struct phy_device *phydev,
}
}
+static int aqr_set_mdix(struct phy_device *phydev, int mdix)
+{
+ u16 val = 0;
+
+ switch (mdix) {
+ case ETH_TP_MDI:
+ val = MDIO_AN_RESVD_VEND_PROV_MDIX_MDI;
+ break;
+ case ETH_TP_MDI_X:
+ val = MDIO_AN_RESVD_VEND_PROV_MDIX_MDIX;
+ break;
+ case ETH_TP_MDI_AUTO:
+ case ETH_TP_MDI_INVALID:
+ default:
+ val = MDIO_AN_RESVD_VEND_PROV_MDIX_AUTO;
+ break;
+ }
+
+ return phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_RESVD_VEND_PROV,
+ MDIO_AN_RESVD_VEND_PROV_MDIX_MASK, val);
+}
+
static int aqr_config_aneg(struct phy_device *phydev)
{
bool changed = false;
u16 reg;
int ret;
+ ret = aqr_set_mdix(phydev, phydev->mdix_ctrl);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
if (phydev->autoneg == AUTONEG_DISABLE)
return genphy_c45_pma_setup_forced(phydev);
@@ -278,12 +324,157 @@ static int aqr_read_status(struct phy_device *phydev)
val & MDIO_AN_RX_LP_STAT1_1000BASET_HALF);
}
+ val = genphy_c45_aneg_done(phydev);
+ if (val < 0)
+ return val;
+ if (val) {
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RESVD_VEND_STATUS1);
+ if (val < 0)
+ return val;
+ if (val & MDIO_AN_RESVD_VEND_STATUS1_MDIX)
+ phydev->mdix = ETH_TP_MDI_X;
+ else
+ phydev->mdix = ETH_TP_MDI;
+ } else {
+ phydev->mdix = ETH_TP_MDI_INVALID;
+ }
+
return genphy_c45_read_status(phydev);
}
-static int aqr107_read_rate(struct phy_device *phydev)
+static int aqr105_get_features(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Normal feature discovery */
+ ret = genphy_c45_pma_read_abilities(phydev);
+ if (ret)
+ return ret;
+
+ /* The AQR105 PHY misses to indicate the 2.5G and 5G modes, so add them
+ * here
+ */
+ linkmode_set_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ phydev->supported);
+
+ /* The AQR105 PHY suppports both RJ45 and SFP+ interfaces */
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported);
+
+ return 0;
+}
+
+static int aqr105_setup_forced(struct phy_device *phydev)
+{
+ int vend = MDIO_AN_VEND_PROV_EXC_PHYID_INFO;
+ int ctrl10 = 0;
+ int adv = ADVERTISE_CSMA;
+ int ret;
+
+ switch (phydev->speed) {
+ case SPEED_100:
+ adv |= ADVERTISE_100FULL;
+ break;
+ case SPEED_1000:
+ adv |= ADVERTISE_NPAGE;
+ if (phydev->duplex == DUPLEX_FULL)
+ vend |= MDIO_AN_VEND_PROV_1000BASET_FULL;
+ else
+ vend |= MDIO_AN_VEND_PROV_1000BASET_HALF;
+ break;
+ case SPEED_2500:
+ adv |= (ADVERTISE_NPAGE | ADVERTISE_RESV);
+ vend |= MDIO_AN_VEND_PROV_2500BASET_FULL;
+ break;
+ case SPEED_5000:
+ adv |= (ADVERTISE_NPAGE | ADVERTISE_RESV);
+ vend |= MDIO_AN_VEND_PROV_5000BASET_FULL;
+ break;
+ case SPEED_10000:
+ adv |= (ADVERTISE_NPAGE | ADVERTISE_RESV);
+ ctrl10 |= MDIO_AN_10GBT_CTRL_ADV10G;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, adv);
+ if (ret < 0)
+ return ret;
+ ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV, vend);
+ if (ret < 0)
+ return ret;
+ ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL, ctrl10);
+ if (ret < 0)
+ return ret;
+
+ /* set by vendor driver, but should be on by default */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1,
+ MDIO_AN_CTRL1_XNP);
+ if (ret < 0)
+ return ret;
+
+ return genphy_c45_an_disable_aneg(phydev);
+}
+
+static int aqr105_config_aneg(struct phy_device *phydev)
+{
+ bool changed = false;
+ u16 reg;
+ int ret;
+
+ ret = aqr_set_mdix(phydev, phydev->mdix_ctrl);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ if (phydev->autoneg == AUTONEG_DISABLE)
+ return aqr105_setup_forced(phydev);
+
+ ret = genphy_c45_an_config_aneg(phydev);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ /* Clause 45 has no standardized support for 1000BaseT, therefore
+ * use vendor registers for this mode.
+ */
+ reg = 0;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->advertising))
+ reg |= MDIO_AN_VEND_PROV_1000BASET_FULL;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phydev->advertising))
+ reg |= MDIO_AN_VEND_PROV_1000BASET_HALF;
+
+ /* Handle the case when the 2.5G and 5G speeds are not advertised */
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ phydev->advertising))
+ reg |= MDIO_AN_VEND_PROV_2500BASET_FULL;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ phydev->advertising))
+ reg |= MDIO_AN_VEND_PROV_5000BASET_FULL;
+
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV,
+ MDIO_AN_VEND_PROV_1000BASET_HALF |
+ MDIO_AN_VEND_PROV_1000BASET_FULL |
+ MDIO_AN_VEND_PROV_2500BASET_FULL |
+ MDIO_AN_VEND_PROV_5000BASET_FULL, reg);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ return genphy_c45_check_and_restart_aneg(phydev, changed);
+}
+
+static int aqr_gen1_read_rate(struct phy_device *phydev)
{
- u32 config_reg;
int val;
val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_STATUS1);
@@ -298,49 +489,56 @@ static int aqr107_read_rate(struct phy_device *phydev)
switch (FIELD_GET(MDIO_AN_TX_VEND_STATUS1_RATE_MASK, val)) {
case MDIO_AN_TX_VEND_STATUS1_10BASET:
phydev->speed = SPEED_10;
- config_reg = VEND1_GLOBAL_CFG_10M;
break;
case MDIO_AN_TX_VEND_STATUS1_100BASETX:
phydev->speed = SPEED_100;
- config_reg = VEND1_GLOBAL_CFG_100M;
break;
case MDIO_AN_TX_VEND_STATUS1_1000BASET:
phydev->speed = SPEED_1000;
- config_reg = VEND1_GLOBAL_CFG_1G;
break;
case MDIO_AN_TX_VEND_STATUS1_2500BASET:
phydev->speed = SPEED_2500;
- config_reg = VEND1_GLOBAL_CFG_2_5G;
break;
case MDIO_AN_TX_VEND_STATUS1_5000BASET:
phydev->speed = SPEED_5000;
- config_reg = VEND1_GLOBAL_CFG_5G;
break;
case MDIO_AN_TX_VEND_STATUS1_10GBASET:
phydev->speed = SPEED_10000;
- config_reg = VEND1_GLOBAL_CFG_10G;
break;
default:
phydev->speed = SPEED_UNKNOWN;
- return 0;
}
- val = phy_read_mmd(phydev, MDIO_MMD_VEND1, config_reg);
- if (val < 0)
- return val;
+ return 0;
+}
- if (FIELD_GET(VEND1_GLOBAL_CFG_RATE_ADAPT, val) ==
- VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE)
- phydev->rate_matching = RATE_MATCH_PAUSE;
- else
- phydev->rate_matching = RATE_MATCH_NONE;
+/* Quad port PHYs like AQR412(C) have 4 system interfaces, but they can also be
+ * used with a single system interface over which all 4 ports are multiplexed
+ * (10G-QXGMII). To the MDIO registers, this mode is indistinguishable from
+ * USXGMII (which implies a single 10G port).
+ *
+ * To not rely solely on the device tree, we allow the regular system interface
+ * detection to work as usual, but we replace USXGMII with 10G-QXGMII based on
+ * the specific fingerprint of firmware images that are known to be for MUSX.
+ */
+static phy_interface_t aqr_translate_interface(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ struct aqr107_priv *priv = phydev->priv;
- return 0;
+ if (phy_id_compare(phydev->drv->phy_id, PHY_ID_AQR412C, phydev->drv->phy_id_mask) &&
+ priv->fingerprint == AQR_G3_V4_3_C_AQR_NXP_SPF_30841_MUSX_ID40019_VER1198 &&
+ interface == PHY_INTERFACE_MODE_USXGMII)
+ return PHY_INTERFACE_MODE_10G_QXGMII;
+
+ return interface;
}
-static int aqr107_read_status(struct phy_device *phydev)
+static int aqr_gen1_read_status(struct phy_device *phydev)
{
- int val, ret;
+ phy_interface_t interface;
+ int ret;
+ int val;
ret = aqr_read_status(phydev);
if (ret)
@@ -349,8 +547,7 @@ static int aqr107_read_status(struct phy_device *phydev)
if (!phydev->link || phydev->autoneg == AUTONEG_DISABLE)
return 0;
- /**
- * The status register is not immediately correct on line side link up.
+ /* The status register is not immediately correct on line side link up.
* Poll periodically until it reflects the correct ON state.
* Only return fail for read error, timeout defaults to OFF state.
*/
@@ -365,38 +562,65 @@ static int aqr107_read_status(struct phy_device *phydev)
switch (FIELD_GET(MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK, val)) {
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR:
- phydev->interface = PHY_INTERFACE_MODE_10GKR;
+ interface = PHY_INTERFACE_MODE_10GKR;
break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KX:
- phydev->interface = PHY_INTERFACE_MODE_1000BASEKX;
+ interface = PHY_INTERFACE_MODE_1000BASEKX;
break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI:
- phydev->interface = PHY_INTERFACE_MODE_10GBASER;
+ interface = PHY_INTERFACE_MODE_10GBASER;
break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII:
- phydev->interface = PHY_INTERFACE_MODE_USXGMII;
+ interface = PHY_INTERFACE_MODE_USXGMII;
break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XAUI:
- phydev->interface = PHY_INTERFACE_MODE_XAUI;
+ interface = PHY_INTERFACE_MODE_XAUI;
break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII:
- phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ interface = PHY_INTERFACE_MODE_SGMII;
break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_RXAUI:
- phydev->interface = PHY_INTERFACE_MODE_RXAUI;
+ interface = PHY_INTERFACE_MODE_RXAUI;
break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII:
- phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ interface = PHY_INTERFACE_MODE_2500BASEX;
break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OFF:
default:
phydev->link = false;
- phydev->interface = PHY_INTERFACE_MODE_NA;
+ interface = PHY_INTERFACE_MODE_NA;
break;
}
- /* Read possibly downshifted rate from vendor register */
- return aqr107_read_rate(phydev);
+ phydev->interface = aqr_translate_interface(phydev, interface);
+
+ /* Read rate from vendor register */
+ return aqr_gen1_read_rate(phydev);
+}
+
+static int aqr_gen2_read_status(struct phy_device *phydev)
+{
+ struct aqr107_priv *priv = phydev->priv;
+ int i, ret;
+
+ ret = aqr_gen1_read_status(phydev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < AQR_NUM_GLOBAL_CFG; i++) {
+ struct aqr_global_syscfg *syscfg = &priv->global_cfg[i];
+
+ if (syscfg->speed != phydev->speed)
+ continue;
+
+ if (syscfg->rate_adapt == AQR_RATE_ADAPT_PAUSE)
+ phydev->rate_matching = RATE_MATCH_PAUSE;
+ else
+ phydev->rate_matching = RATE_MATCH_NONE;
+ break;
+ }
+
+ return 0;
}
static int aqr107_get_downshift(struct phy_device *phydev, u8 *data)
@@ -481,27 +705,46 @@ int aqr_wait_reset_complete(struct phy_device *phydev)
return ret;
}
-static void aqr107_chip_info(struct phy_device *phydev)
+static int aqr_build_fingerprint(struct phy_device *phydev)
{
u8 fw_major, fw_minor, build_id, prov_id;
+ struct aqr107_priv *priv = phydev->priv;
+ u16 misc_id, misc_ver;
int val;
val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID);
if (val < 0)
- return;
+ return val;
fw_major = FIELD_GET(VEND1_GLOBAL_FW_ID_MAJOR, val);
fw_minor = FIELD_GET(VEND1_GLOBAL_FW_ID_MINOR, val);
val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_RSVD_STAT1);
if (val < 0)
- return;
+ return val;
build_id = FIELD_GET(VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID, val);
prov_id = FIELD_GET(VEND1_GLOBAL_RSVD_STAT1_PROV_ID, val);
- phydev_dbg(phydev, "FW %u.%u, Build %u, Provisioning %u\n",
- fw_major, fw_minor, build_id, prov_id);
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_FW_MISC_ID);
+ if (val < 0)
+ return val;
+
+ misc_id = val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_FW_MISC_VER);
+ if (val < 0)
+ return val;
+
+ misc_ver = val;
+
+ priv->fingerprint = AQR_FW_FINGERPRINT(fw_major, fw_minor, build_id,
+ prov_id, misc_id, misc_ver);
+
+ phydev_dbg(phydev, "FW %u.%u, Build %u, Provisioning %u, Misc ID %u, Version %u\n",
+ fw_major, fw_minor, build_id, prov_id, misc_id, misc_ver);
+
+ return 0;
}
static int aqr107_config_mdi(struct phy_device *phydev)
@@ -527,7 +770,7 @@ static int aqr107_config_mdi(struct phy_device *phydev)
mdi_conf | PMAPMD_RSVD_VEND_PROV_MDI_FORCE);
}
-static int aqr107_config_init(struct phy_device *phydev)
+static int aqr_gen1_config_init(struct phy_device *phydev)
{
struct aqr107_priv *priv = phydev->priv;
u32 led_idx;
@@ -539,6 +782,7 @@ static int aqr107_config_init(struct phy_device *phydev)
phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
phydev->interface != PHY_INTERFACE_MODE_XGMII &&
phydev->interface != PHY_INTERFACE_MODE_USXGMII &&
+ phydev->interface != PHY_INTERFACE_MODE_10G_QXGMII &&
phydev->interface != PHY_INTERFACE_MODE_10GKR &&
phydev->interface != PHY_INTERFACE_MODE_10GBASER &&
phydev->interface != PHY_INTERFACE_MODE_XAUI &&
@@ -549,8 +793,14 @@ static int aqr107_config_init(struct phy_device *phydev)
"Your devicetree is out of date, please update it. The AQR107 family doesn't support XGMII, maybe you mean USXGMII.\n");
ret = aqr_wait_reset_complete(phydev);
- if (!ret)
- aqr107_chip_info(phydev);
+ if (!ret) {
+ /* The PHY might work without a firmware image, so only build a
+ * fingerprint if the firmware was initialized.
+ */
+ ret = aqr_build_fingerprint(phydev);
+ if (ret)
+ return ret;
+ }
ret = aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
if (ret)
@@ -576,20 +826,145 @@ static int aqr107_config_init(struct phy_device *phydev)
return 0;
}
-static int aqcs109_config_init(struct phy_device *phydev)
+/* Walk the media-speed configuration registers to determine which
+ * host-side serdes modes may be used by the PHY depending on the
+ * negotiated media speed.
+ */
+static int aqr_gen2_read_global_syscfg(struct phy_device *phydev)
+{
+ struct aqr107_priv *priv = phydev->priv;
+ unsigned int serdes_mode, rate_adapt;
+ phy_interface_t interface;
+ int i, val;
+
+ for (i = 0; i < AQR_NUM_GLOBAL_CFG; i++) {
+ struct aqr_global_syscfg *syscfg = &priv->global_cfg[i];
+
+ syscfg->speed = aqr_global_cfg_regs[i].speed;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ aqr_global_cfg_regs[i].reg);
+ if (val < 0)
+ return val;
+
+ serdes_mode = FIELD_GET(VEND1_GLOBAL_CFG_SERDES_MODE, val);
+ rate_adapt = FIELD_GET(VEND1_GLOBAL_CFG_RATE_ADAPT, val);
+
+ switch (serdes_mode) {
+ case VEND1_GLOBAL_CFG_SERDES_MODE_XFI:
+ if (rate_adapt == VEND1_GLOBAL_CFG_RATE_ADAPT_USX)
+ interface = PHY_INTERFACE_MODE_USXGMII;
+ else
+ interface = PHY_INTERFACE_MODE_10GBASER;
+ break;
+
+ case VEND1_GLOBAL_CFG_SERDES_MODE_XFI5G:
+ interface = PHY_INTERFACE_MODE_5GBASER;
+ break;
+
+ case VEND1_GLOBAL_CFG_SERDES_MODE_OCSGMII:
+ interface = PHY_INTERFACE_MODE_2500BASEX;
+ break;
+
+ case VEND1_GLOBAL_CFG_SERDES_MODE_SGMII:
+ interface = PHY_INTERFACE_MODE_SGMII;
+ break;
+
+ default:
+ phydev_warn(phydev, "unrecognised serdes mode %u\n",
+ serdes_mode);
+ interface = PHY_INTERFACE_MODE_NA;
+ break;
+ }
+
+ syscfg->interface = aqr_translate_interface(phydev, interface);
+
+ switch (rate_adapt) {
+ case VEND1_GLOBAL_CFG_RATE_ADAPT_NONE:
+ syscfg->rate_adapt = AQR_RATE_ADAPT_NONE;
+ break;
+ case VEND1_GLOBAL_CFG_RATE_ADAPT_USX:
+ syscfg->rate_adapt = AQR_RATE_ADAPT_USX;
+ break;
+ case VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE:
+ syscfg->rate_adapt = AQR_RATE_ADAPT_PAUSE;
+ break;
+ default:
+ phydev_warn(phydev, "unrecognized rate adapt mode %u\n",
+ rate_adapt);
+ break;
+ }
+
+ phydev_dbg(phydev,
+ "Media speed %d uses host interface %s with %s\n",
+ syscfg->speed, phy_modes(syscfg->interface),
+ syscfg->rate_adapt == AQR_RATE_ADAPT_NONE ? "no rate adaptation" :
+ syscfg->rate_adapt == AQR_RATE_ADAPT_PAUSE ? "rate adaptation through flow control" :
+ syscfg->rate_adapt == AQR_RATE_ADAPT_USX ? "rate adaptation through symbol replication" :
+ "unrecognized rate adaptation type");
+ }
+
+ return 0;
+}
+
+static int aqr_gen2_fill_interface_modes(struct phy_device *phydev)
+{
+ unsigned long *possible = phydev->possible_interfaces;
+ struct aqr107_priv *priv = phydev->priv;
+ phy_interface_t interface;
+ int i, val, ret;
+
+ /* It's been observed on some models that - when coming out of suspend
+ * - the FW signals that the PHY is ready but the GLOBAL_CFG registers
+ * continue on returning zeroes for some time. Let's poll the 100M
+ * register until it returns a real value as both 113c and 115c support
+ * this mode.
+ */
+ if (priv->wait_on_global_cfg) {
+ ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
+ VEND1_GLOBAL_CFG_100M, val,
+ val != 0, 1000, 100000, false);
+ if (ret)
+ return ret;
+ }
+
+ ret = aqr_gen2_read_global_syscfg(phydev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < AQR_NUM_GLOBAL_CFG; i++) {
+ interface = priv->global_cfg[i].interface;
+ if (interface != PHY_INTERFACE_MODE_NA)
+ __set_bit(interface, possible);
+ }
+
+ return 0;
+}
+
+static int aqr_gen2_config_init(struct phy_device *phydev)
{
int ret;
+ ret = aqr_gen1_config_init(phydev);
+ if (ret)
+ return ret;
+
+ return aqr_gen2_fill_interface_modes(phydev);
+}
+
+static int aqr_gen3_config_init(struct phy_device *phydev)
+{
+ return aqr_gen2_config_init(phydev);
+}
+
+static int aqcs109_config_init(struct phy_device *phydev)
+{
/* Check that the PHY interface type is compatible */
if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
phydev->interface != PHY_INTERFACE_MODE_2500BASEX)
return -ENODEV;
- ret = aqr_wait_reset_complete(phydev);
- if (!ret)
- aqr107_chip_info(phydev);
-
- return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
+ return aqr_gen2_config_init(phydev);
}
static void aqr107_link_change_notify(struct phy_device *phydev)
@@ -637,7 +1012,7 @@ static void aqr107_link_change_notify(struct phy_device *phydev)
phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n");
}
-static int aqr107_wait_processor_intensive_op(struct phy_device *phydev)
+static int aqr_gen1_wait_processor_intensive_op(struct phy_device *phydev)
{
int val, err;
@@ -661,17 +1036,16 @@ static int aqr107_wait_processor_intensive_op(struct phy_device *phydev)
return 0;
}
-static int aqr107_get_rate_matching(struct phy_device *phydev,
- phy_interface_t iface)
+static int aqr_gen2_get_rate_matching(struct phy_device *phydev,
+ phy_interface_t iface)
{
if (iface == PHY_INTERFACE_MODE_10GBASER ||
- iface == PHY_INTERFACE_MODE_2500BASEX ||
- iface == PHY_INTERFACE_MODE_NA)
+ iface == PHY_INTERFACE_MODE_2500BASEX)
return RATE_MATCH_PAUSE;
return RATE_MATCH_NONE;
}
-static int aqr107_suspend(struct phy_device *phydev)
+static int aqr_gen1_suspend(struct phy_device *phydev)
{
int err;
@@ -680,10 +1054,10 @@ static int aqr107_suspend(struct phy_device *phydev)
if (err)
return err;
- return aqr107_wait_processor_intensive_op(phydev);
+ return aqr_gen1_wait_processor_intensive_op(phydev);
}
-static int aqr107_resume(struct phy_device *phydev)
+static int aqr_gen1_resume(struct phy_device *phydev)
{
int err;
@@ -692,89 +1066,7 @@ static int aqr107_resume(struct phy_device *phydev)
if (err)
return err;
- return aqr107_wait_processor_intensive_op(phydev);
-}
-
-static const u16 aqr_global_cfg_regs[] = {
- VEND1_GLOBAL_CFG_10M,
- VEND1_GLOBAL_CFG_100M,
- VEND1_GLOBAL_CFG_1G,
- VEND1_GLOBAL_CFG_2_5G,
- VEND1_GLOBAL_CFG_5G,
- VEND1_GLOBAL_CFG_10G
-};
-
-static int aqr107_fill_interface_modes(struct phy_device *phydev)
-{
- unsigned long *possible = phydev->possible_interfaces;
- unsigned int serdes_mode, rate_adapt;
- phy_interface_t interface;
- int i, val;
-
- /* Walk the media-speed configuration registers to determine which
- * host-side serdes modes may be used by the PHY depending on the
- * negotiated media speed.
- */
- for (i = 0; i < ARRAY_SIZE(aqr_global_cfg_regs); i++) {
- val = phy_read_mmd(phydev, MDIO_MMD_VEND1,
- aqr_global_cfg_regs[i]);
- if (val < 0)
- return val;
-
- serdes_mode = FIELD_GET(VEND1_GLOBAL_CFG_SERDES_MODE, val);
- rate_adapt = FIELD_GET(VEND1_GLOBAL_CFG_RATE_ADAPT, val);
-
- switch (serdes_mode) {
- case VEND1_GLOBAL_CFG_SERDES_MODE_XFI:
- if (rate_adapt == VEND1_GLOBAL_CFG_RATE_ADAPT_USX)
- interface = PHY_INTERFACE_MODE_USXGMII;
- else
- interface = PHY_INTERFACE_MODE_10GBASER;
- break;
-
- case VEND1_GLOBAL_CFG_SERDES_MODE_XFI5G:
- interface = PHY_INTERFACE_MODE_5GBASER;
- break;
-
- case VEND1_GLOBAL_CFG_SERDES_MODE_OCSGMII:
- interface = PHY_INTERFACE_MODE_2500BASEX;
- break;
-
- case VEND1_GLOBAL_CFG_SERDES_MODE_SGMII:
- interface = PHY_INTERFACE_MODE_SGMII;
- break;
-
- default:
- phydev_warn(phydev, "unrecognised serdes mode %u\n",
- serdes_mode);
- interface = PHY_INTERFACE_MODE_NA;
- break;
- }
-
- if (interface != PHY_INTERFACE_MODE_NA)
- __set_bit(interface, possible);
- }
-
- return 0;
-}
-
-static int aqr113c_fill_interface_modes(struct phy_device *phydev)
-{
- int val, ret;
-
- /* It's been observed on some models that - when coming out of suspend
- * - the FW signals that the PHY is ready but the GLOBAL_CFG registers
- * continue on returning zeroes for some time. Let's poll the 100M
- * register until it returns a real value as both 113c and 115c support
- * this mode.
- */
- ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
- VEND1_GLOBAL_CFG_100M, val, val != 0,
- 1000, 100000, false);
- if (ret)
- return ret;
-
- return aqr107_fill_interface_modes(phydev);
+ return aqr_gen1_wait_processor_intensive_op(phydev);
}
static int aqr115c_get_features(struct phy_device *phydev)
@@ -802,11 +1094,14 @@ static int aqr111_get_features(struct phy_device *phydev)
return 0;
}
-static int aqr113c_config_init(struct phy_device *phydev)
+static int aqr_gen4_config_init(struct phy_device *phydev)
{
+ struct aqr107_priv *priv = phydev->priv;
int ret;
- ret = aqr107_config_init(phydev);
+ priv->wait_on_global_cfg = true;
+
+ ret = aqr_gen3_config_init(phydev);
if (ret < 0)
return ret;
@@ -815,11 +1110,55 @@ static int aqr113c_config_init(struct phy_device *phydev)
if (ret)
return ret;
- ret = aqr107_wait_processor_intensive_op(phydev);
- if (ret)
- return ret;
+ return aqr_gen1_wait_processor_intensive_op(phydev);
+}
+
+static unsigned int aqr_gen2_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ interface == PHY_INTERFACE_MODE_USXGMII ||
+ interface == PHY_INTERFACE_MODE_10G_QXGMII)
+ return LINK_INBAND_ENABLE | LINK_INBAND_DISABLE;
+
+ return 0;
+}
+
+static int aqr_gen2_config_inband(struct phy_device *phydev, unsigned int modes)
+{
+ struct aqr107_priv *priv = phydev->priv;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_USXGMII ||
+ phydev->interface == PHY_INTERFACE_MODE_10G_QXGMII) {
+ u16 set = 0;
+
+ if (modes == LINK_INBAND_ENABLE)
+ set = MDIO_PHYXS_VEND_PROV2_USX_AN;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_PHYXS,
+ MDIO_PHYXS_VEND_PROV2,
+ MDIO_PHYXS_VEND_PROV2_USX_AN, set);
+ }
+
+ for (int i = 0; i < AQR_NUM_GLOBAL_CFG; i++) {
+ struct aqr_global_syscfg *syscfg = &priv->global_cfg[i];
+ u16 set = 0;
+ int err;
+
+ if (syscfg->interface != phydev->interface)
+ continue;
+
+ if (modes == LINK_INBAND_ENABLE)
+ set = VEND1_GLOBAL_CFG_AUTONEG_ENA;
- return aqr113c_fill_interface_modes(phydev);
+ err = phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ aqr_global_cfg_regs[i].reg,
+ VEND1_GLOBAL_CFG_AUTONEG_ENA, set);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
static int aqr107_probe(struct phy_device *phydev)
@@ -859,12 +1198,15 @@ static struct phy_driver aqr_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR105),
.name = "Aquantia AQR105",
- .config_aneg = aqr_config_aneg,
+ .get_features = aqr105_get_features,
+ .probe = aqr107_probe,
+ .config_init = aqr_gen1_config_init,
+ .config_aneg = aqr105_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr_read_status,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .read_status = aqr_gen1_read_status,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR106),
@@ -878,16 +1220,16 @@ static struct phy_driver aqr_driver[] = {
PHY_ID_MATCH_MODEL(PHY_ID_AQR107),
.name = "Aquantia AQR107",
.probe = aqr107_probe,
- .get_rate_matching = aqr107_get_rate_matching,
- .config_init = aqr107_config_init,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .config_init = aqr_gen2_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr107_read_status,
+ .read_status = aqr_gen2_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -897,21 +1239,23 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQCS109),
.name = "Aquantia AQCS109",
.probe = aqr107_probe,
- .get_rate_matching = aqr107_get_rate_matching,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
.config_init = aqcs109_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr107_read_status,
+ .read_status = aqr_gen2_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -922,21 +1266,23 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR111),
.name = "Aquantia AQR111",
.probe = aqr107_probe,
- .get_rate_matching = aqr107_get_rate_matching,
- .config_init = aqr107_config_init,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .config_init = aqr_gen3_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr107_read_status,
+ .read_status = aqr_gen2_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -947,21 +1293,23 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR111B0),
.name = "Aquantia AQR111B0",
.probe = aqr107_probe,
- .get_rate_matching = aqr107_get_rate_matching,
- .config_init = aqr107_config_init,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .config_init = aqr_gen3_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr107_read_status,
+ .read_status = aqr_gen2_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -972,6 +1320,8 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR405),
@@ -980,20 +1330,23 @@ static struct phy_driver aqr_driver[] = {
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
.read_status = aqr_read_status,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR112),
.name = "Aquantia AQR112",
.probe = aqr107_probe,
+ .config_init = aqr_gen3_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
- .read_status = aqr107_read_status,
- .get_rate_matching = aqr107_get_rate_matching,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
+ .read_status = aqr_gen2_read_status,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -1003,39 +1356,65 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR412),
.name = "Aquantia AQR412",
.probe = aqr107_probe,
+ .config_init = aqr_gen3_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
- .read_status = aqr107_read_status,
- .get_rate_matching = aqr107_get_rate_matching,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
+ .read_status = aqr_gen2_read_status,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
.link_change_notify = aqr107_link_change_notify,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
+},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR412C),
+ .name = "Aquantia AQR412C",
+ .probe = aqr107_probe,
+ .config_init = aqr_gen3_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
+ .read_status = aqr_gen2_read_status,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR113),
.name = "Aquantia AQR113",
.probe = aqr107_probe,
- .get_rate_matching = aqr107_get_rate_matching,
- .config_init = aqr113c_config_init,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .config_init = aqr_gen4_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr107_read_status,
+ .read_status = aqr_gen2_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -1045,21 +1424,23 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR113C),
.name = "Aquantia AQR113C",
.probe = aqr107_probe,
- .get_rate_matching = aqr107_get_rate_matching,
- .config_init = aqr113c_config_init,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .config_init = aqr_gen4_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr107_read_status,
+ .read_status = aqr_gen2_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -1069,21 +1450,23 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR114C),
.name = "Aquantia AQR114C",
.probe = aqr107_probe,
- .get_rate_matching = aqr107_get_rate_matching,
- .config_init = aqr107_config_init,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .config_init = aqr_gen4_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr107_read_status,
+ .read_status = aqr_gen2_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -1094,21 +1477,50 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
+},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR115),
+ .name = "Aquantia AQR115",
+ .probe = aqr107_probe,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .config_init = aqr_gen4_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .read_status = aqr_gen2_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .get_features = aqr115c_get_features,
+ .link_change_notify = aqr107_link_change_notify,
+ .led_brightness_set = aqr_phy_led_brightness_set,
+ .led_hw_is_supported = aqr_phy_led_hw_is_supported,
+ .led_hw_control_set = aqr_phy_led_hw_control_set,
+ .led_hw_control_get = aqr_phy_led_hw_control_get,
+ .led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR115C),
.name = "Aquantia AQR115C",
.probe = aqr107_probe,
- .get_rate_matching = aqr107_get_rate_matching,
- .config_init = aqr113c_config_init,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .config_init = aqr_gen4_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr107_read_status,
+ .read_status = aqr_gen2_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -1119,21 +1531,23 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR813),
.name = "Aquantia AQR813",
.probe = aqr107_probe,
- .get_rate_matching = aqr107_get_rate_matching,
- .config_init = aqr107_config_init,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .config_init = aqr_gen4_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr107_read_status,
+ .read_status = aqr_gen2_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -1143,12 +1557,14 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
};
module_phy_driver(aqr_driver);
-static struct mdio_device_id __maybe_unused aqr_tbl[] = {
+static const struct mdio_device_id __maybe_unused aqr_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_AQ1202) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQ2104) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR105) },
@@ -1160,9 +1576,11 @@ static struct mdio_device_id __maybe_unused aqr_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR111B0) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR112) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR412) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR412C) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR113) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR113C) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR114C) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR115) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR115C) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR813) },
{ }
diff --git a/drivers/net/phy/as21xxx.c b/drivers/net/phy/as21xxx.c
new file mode 100644
index 000000000000..005277360656
--- /dev/null
+++ b/drivers/net/phy/as21xxx.c
@@ -0,0 +1,1088 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Aeonsemi AS21XXxX PHY Driver
+ *
+ * Author: Christian Marangi <ansuelsmth@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+
+#define VEND1_GLB_REG_CPU_RESET_ADDR_LO_BASEADDR 0x3
+#define VEND1_GLB_REG_CPU_RESET_ADDR_HI_BASEADDR 0x4
+
+#define VEND1_GLB_REG_CPU_CTRL 0xe
+#define VEND1_GLB_CPU_CTRL_MASK GENMASK(4, 0)
+#define VEND1_GLB_CPU_CTRL_LED_POLARITY_MASK GENMASK(12, 8)
+#define VEND1_GLB_CPU_CTRL_LED_POLARITY(_n) FIELD_PREP(VEND1_GLB_CPU_CTRL_LED_POLARITY_MASK, \
+ BIT(_n))
+
+#define VEND1_FW_START_ADDR 0x100
+
+#define VEND1_GLB_REG_MDIO_INDIRECT_ADDRCMD 0x101
+#define VEND1_GLB_REG_MDIO_INDIRECT_LOAD 0x102
+
+#define VEND1_GLB_REG_MDIO_INDIRECT_STATUS 0x103
+
+#define VEND1_PTP_CLK 0x142
+#define VEND1_PTP_CLK_EN BIT(6)
+
+/* 5 LED at step of 0x20
+ * FE: Fast-Ethernet (10/100)
+ * GE: Gigabit-Ethernet (1000)
+ * NG: New-Generation (2500/5000/10000)
+ */
+#define VEND1_LED_REG(_n) (0x1800 + ((_n) * 0x10))
+#define VEND1_LED_REG_A_EVENT GENMASK(15, 11)
+#define VEND1_LED_CONF 0x1881
+#define VEND1_LED_CONFG_BLINK GENMASK(7, 0)
+
+#define VEND1_SPEED_STATUS 0x4002
+#define VEND1_SPEED_MASK GENMASK(7, 0)
+#define VEND1_SPEED_10000 FIELD_PREP_CONST(VEND1_SPEED_MASK, 0x3)
+#define VEND1_SPEED_5000 FIELD_PREP_CONST(VEND1_SPEED_MASK, 0x5)
+#define VEND1_SPEED_2500 FIELD_PREP_CONST(VEND1_SPEED_MASK, 0x9)
+#define VEND1_SPEED_1000 FIELD_PREP_CONST(VEND1_SPEED_MASK, 0x10)
+#define VEND1_SPEED_100 FIELD_PREP_CONST(VEND1_SPEED_MASK, 0x20)
+#define VEND1_SPEED_10 FIELD_PREP_CONST(VEND1_SPEED_MASK, 0x0)
+
+#define VEND1_IPC_CMD 0x5801
+#define AEON_IPC_CMD_PARITY BIT(15)
+#define AEON_IPC_CMD_SIZE GENMASK(10, 6)
+#define AEON_IPC_CMD_OPCODE GENMASK(5, 0)
+
+#define IPC_CMD_NOOP 0x0 /* Do nothing */
+#define IPC_CMD_INFO 0x1 /* Get Firmware Version */
+#define IPC_CMD_SYS_CPU 0x2 /* SYS_CPU */
+#define IPC_CMD_BULK_DATA 0xa /* Pass bulk data in ipc registers. */
+#define IPC_CMD_BULK_WRITE 0xc /* Write bulk data to memory */
+#define IPC_CMD_CFG_PARAM 0x1a /* Write config parameters to memory */
+#define IPC_CMD_NG_TESTMODE 0x1b /* Set NG test mode and tone */
+#define IPC_CMD_TEMP_MON 0x15 /* Temperature monitoring function */
+#define IPC_CMD_SET_LED 0x23 /* Set led */
+
+#define VEND1_IPC_STS 0x5802
+#define AEON_IPC_STS_PARITY BIT(15)
+#define AEON_IPC_STS_SIZE GENMASK(14, 10)
+#define AEON_IPC_STS_OPCODE GENMASK(9, 4)
+#define AEON_IPC_STS_STATUS GENMASK(3, 0)
+#define AEON_IPC_STS_STATUS_RCVD FIELD_PREP_CONST(AEON_IPC_STS_STATUS, 0x1)
+#define AEON_IPC_STS_STATUS_PROCESS FIELD_PREP_CONST(AEON_IPC_STS_STATUS, 0x2)
+#define AEON_IPC_STS_STATUS_SUCCESS FIELD_PREP_CONST(AEON_IPC_STS_STATUS, 0x4)
+#define AEON_IPC_STS_STATUS_ERROR FIELD_PREP_CONST(AEON_IPC_STS_STATUS, 0x8)
+#define AEON_IPC_STS_STATUS_BUSY FIELD_PREP_CONST(AEON_IPC_STS_STATUS, 0xe)
+#define AEON_IPC_STS_STATUS_READY FIELD_PREP_CONST(AEON_IPC_STS_STATUS, 0xf)
+
+#define VEND1_IPC_DATA0 0x5808
+#define VEND1_IPC_DATA1 0x5809
+#define VEND1_IPC_DATA2 0x580a
+#define VEND1_IPC_DATA3 0x580b
+#define VEND1_IPC_DATA4 0x580c
+#define VEND1_IPC_DATA5 0x580d
+#define VEND1_IPC_DATA6 0x580e
+#define VEND1_IPC_DATA7 0x580f
+#define VEND1_IPC_DATA(_n) (VEND1_IPC_DATA0 + (_n))
+
+/* Sub command of CMD_INFO */
+#define IPC_INFO_VERSION 0x1
+
+/* Sub command of CMD_SYS_CPU */
+#define IPC_SYS_CPU_REBOOT 0x3
+#define IPC_SYS_CPU_IMAGE_OFST 0x4
+#define IPC_SYS_CPU_IMAGE_CHECK 0x5
+#define IPC_SYS_CPU_PHY_ENABLE 0x6
+
+/* Sub command of CMD_CFG_PARAM */
+#define IPC_CFG_PARAM_DIRECT 0x4
+
+/* CFG DIRECT sub command */
+#define IPC_CFG_PARAM_DIRECT_NG_PHYCTRL 0x1
+#define IPC_CFG_PARAM_DIRECT_CU_AN 0x2
+#define IPC_CFG_PARAM_DIRECT_SDS_PCS 0x3
+#define IPC_CFG_PARAM_DIRECT_AUTO_EEE 0x4
+#define IPC_CFG_PARAM_DIRECT_SDS_PMA 0x5
+#define IPC_CFG_PARAM_DIRECT_DPC_RA 0x6
+#define IPC_CFG_PARAM_DIRECT_DPC_PKT_CHK 0x7
+#define IPC_CFG_PARAM_DIRECT_DPC_SDS_WAIT_ETH 0x8
+#define IPC_CFG_PARAM_DIRECT_WDT 0x9
+#define IPC_CFG_PARAM_DIRECT_SDS_RESTART_AN 0x10
+#define IPC_CFG_PARAM_DIRECT_TEMP_MON 0x11
+#define IPC_CFG_PARAM_DIRECT_WOL 0x12
+
+/* Sub command of CMD_TEMP_MON */
+#define IPC_CMD_TEMP_MON_GET 0x4
+
+#define AS21XXX_MDIO_AN_C22 0xffe0
+
+#define PHY_ID_AS21XXX 0x75009410
+/* AS21xxx ID Legend
+ * AS21x1xxB1
+ * ^ ^^
+ * | |J: Supports SyncE/PTP
+ * | |P: No SyncE/PTP support
+ * | 1: Supports 2nd Serdes
+ * | 2: Not 2nd Serdes support
+ * 0: 10G, 5G, 2.5G
+ * 5: 5G, 2.5G
+ * 2: 2.5G
+ */
+#define PHY_ID_AS21011JB1 0x75009402
+#define PHY_ID_AS21011PB1 0x75009412
+#define PHY_ID_AS21010JB1 0x75009422
+#define PHY_ID_AS21010PB1 0x75009432
+#define PHY_ID_AS21511JB1 0x75009442
+#define PHY_ID_AS21511PB1 0x75009452
+#define PHY_ID_AS21510JB1 0x75009462
+#define PHY_ID_AS21510PB1 0x75009472
+#define PHY_ID_AS21210JB1 0x75009482
+#define PHY_ID_AS21210PB1 0x75009492
+#define PHY_VENDOR_AEONSEMI 0x75009400
+
+#define AEON_MAX_LEDS 5
+#define AEON_IPC_DELAY 10000
+#define AEON_IPC_TIMEOUT (AEON_IPC_DELAY * 100)
+#define AEON_IPC_DATA_NUM_REGISTERS 8
+#define AEON_IPC_DATA_MAX (AEON_IPC_DATA_NUM_REGISTERS * sizeof(u16))
+
+#define AEON_BOOT_ADDR 0x1000
+#define AEON_CPU_BOOT_ADDR 0x2000
+#define AEON_CPU_CTRL_FW_LOAD (BIT(4) | BIT(2) | BIT(1) | BIT(0))
+#define AEON_CPU_CTRL_FW_START BIT(0)
+
+enum as21xxx_led_event {
+ VEND1_LED_REG_A_EVENT_ON_10 = 0x0,
+ VEND1_LED_REG_A_EVENT_ON_100,
+ VEND1_LED_REG_A_EVENT_ON_1000,
+ VEND1_LED_REG_A_EVENT_ON_2500,
+ VEND1_LED_REG_A_EVENT_ON_5000,
+ VEND1_LED_REG_A_EVENT_ON_10000,
+ VEND1_LED_REG_A_EVENT_ON_FE_GE,
+ VEND1_LED_REG_A_EVENT_ON_NG,
+ VEND1_LED_REG_A_EVENT_ON_FULL_DUPLEX,
+ VEND1_LED_REG_A_EVENT_ON_COLLISION,
+ VEND1_LED_REG_A_EVENT_BLINK_TX,
+ VEND1_LED_REG_A_EVENT_BLINK_RX,
+ VEND1_LED_REG_A_EVENT_BLINK_ACT,
+ VEND1_LED_REG_A_EVENT_ON_LINK,
+ VEND1_LED_REG_A_EVENT_ON_LINK_BLINK_ACT,
+ VEND1_LED_REG_A_EVENT_ON_LINK_BLINK_RX,
+ VEND1_LED_REG_A_EVENT_ON_FE_GE_BLINK_ACT,
+ VEND1_LED_REG_A_EVENT_ON_NG_BLINK_ACT,
+ VEND1_LED_REG_A_EVENT_ON_NG_BLINK_FE_GE,
+ VEND1_LED_REG_A_EVENT_ON_FD_BLINK_COLLISION,
+ VEND1_LED_REG_A_EVENT_ON,
+ VEND1_LED_REG_A_EVENT_OFF,
+};
+
+struct as21xxx_led_pattern_info {
+ unsigned int pattern;
+ u16 val;
+};
+
+struct as21xxx_priv {
+ bool parity_status;
+ /* Protect concurrent IPC access */
+ struct mutex ipc_lock;
+};
+
+static struct as21xxx_led_pattern_info as21xxx_led_supported_pattern[] = {
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_10),
+ .val = VEND1_LED_REG_A_EVENT_ON_10
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_100),
+ .val = VEND1_LED_REG_A_EVENT_ON_100
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_1000),
+ .val = VEND1_LED_REG_A_EVENT_ON_1000
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_2500),
+ .val = VEND1_LED_REG_A_EVENT_ON_2500
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_5000),
+ .val = VEND1_LED_REG_A_EVENT_ON_5000
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_10000),
+ .val = VEND1_LED_REG_A_EVENT_ON_10000
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK),
+ .val = VEND1_LED_REG_A_EVENT_ON_LINK
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000),
+ .val = VEND1_LED_REG_A_EVENT_ON_FE_GE
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_2500) |
+ BIT(TRIGGER_NETDEV_LINK_5000) |
+ BIT(TRIGGER_NETDEV_LINK_10000),
+ .val = VEND1_LED_REG_A_EVENT_ON_NG
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_FULL_DUPLEX),
+ .val = VEND1_LED_REG_A_EVENT_ON_FULL_DUPLEX
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_TX),
+ .val = VEND1_LED_REG_A_EVENT_BLINK_TX
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_RX),
+ .val = VEND1_LED_REG_A_EVENT_BLINK_RX
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_TX) |
+ BIT(TRIGGER_NETDEV_RX),
+ .val = VEND1_LED_REG_A_EVENT_BLINK_ACT
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_LINK_2500) |
+ BIT(TRIGGER_NETDEV_LINK_5000) |
+ BIT(TRIGGER_NETDEV_LINK_10000),
+ .val = VEND1_LED_REG_A_EVENT_ON_LINK
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_LINK_2500) |
+ BIT(TRIGGER_NETDEV_LINK_5000) |
+ BIT(TRIGGER_NETDEV_LINK_10000) |
+ BIT(TRIGGER_NETDEV_TX) |
+ BIT(TRIGGER_NETDEV_RX),
+ .val = VEND1_LED_REG_A_EVENT_ON_LINK_BLINK_ACT
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_LINK_2500) |
+ BIT(TRIGGER_NETDEV_LINK_5000) |
+ BIT(TRIGGER_NETDEV_LINK_10000) |
+ BIT(TRIGGER_NETDEV_RX),
+ .val = VEND1_LED_REG_A_EVENT_ON_LINK_BLINK_RX
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_TX) |
+ BIT(TRIGGER_NETDEV_RX),
+ .val = VEND1_LED_REG_A_EVENT_ON_FE_GE_BLINK_ACT
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_2500) |
+ BIT(TRIGGER_NETDEV_LINK_5000) |
+ BIT(TRIGGER_NETDEV_LINK_10000) |
+ BIT(TRIGGER_NETDEV_TX) |
+ BIT(TRIGGER_NETDEV_RX),
+ .val = VEND1_LED_REG_A_EVENT_ON_NG_BLINK_ACT
+ }
+};
+
+static int aeon_firmware_boot(struct phy_device *phydev, const u8 *data,
+ size_t size)
+{
+ int i, ret;
+ u16 val;
+
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLB_REG_CPU_CTRL,
+ VEND1_GLB_CPU_CTRL_MASK, AEON_CPU_CTRL_FW_LOAD);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_FW_START_ADDR,
+ AEON_BOOT_ADDR);
+ if (ret)
+ return ret;
+
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GLB_REG_MDIO_INDIRECT_ADDRCMD,
+ 0x3ffc, 0xc000);
+ if (ret)
+ return ret;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GLB_REG_MDIO_INDIRECT_STATUS);
+ if (val > 1) {
+ phydev_err(phydev, "wrong origin mdio_indirect_status: %x\n", val);
+ return -EINVAL;
+ }
+
+ /* Firmware is always aligned to u16 */
+ for (i = 0; i < size; i += 2) {
+ val = data[i + 1] << 8 | data[i];
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GLB_REG_MDIO_INDIRECT_LOAD, val);
+ if (ret)
+ return ret;
+ }
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GLB_REG_CPU_RESET_ADDR_LO_BASEADDR,
+ lower_16_bits(AEON_CPU_BOOT_ADDR));
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GLB_REG_CPU_RESET_ADDR_HI_BASEADDR,
+ upper_16_bits(AEON_CPU_BOOT_ADDR));
+ if (ret)
+ return ret;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLB_REG_CPU_CTRL,
+ VEND1_GLB_CPU_CTRL_MASK, AEON_CPU_CTRL_FW_START);
+}
+
+static int aeon_firmware_load(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ const struct firmware *fw;
+ const char *fw_name;
+ int ret;
+
+ ret = of_property_read_string(dev->of_node, "firmware-name",
+ &fw_name);
+ if (ret)
+ return ret;
+
+ ret = request_firmware(&fw, fw_name, dev);
+ if (ret) {
+ phydev_err(phydev, "failed to find FW file %s (%d)\n",
+ fw_name, ret);
+ return ret;
+ }
+
+ ret = aeon_firmware_boot(phydev, fw->data, fw->size);
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+static bool aeon_ipc_ready(u16 val, bool parity_status)
+{
+ u16 status;
+
+ if (FIELD_GET(AEON_IPC_STS_PARITY, val) != parity_status)
+ return false;
+
+ status = val & AEON_IPC_STS_STATUS;
+
+ return status != AEON_IPC_STS_STATUS_RCVD &&
+ status != AEON_IPC_STS_STATUS_PROCESS &&
+ status != AEON_IPC_STS_STATUS_BUSY;
+}
+
+static int aeon_ipc_wait_cmd(struct phy_device *phydev, bool parity_status)
+{
+ u16 val;
+
+ /* Exit condition logic:
+ * - Wait for parity bit equal
+ * - Wait for status success, error OR ready
+ */
+ return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, VEND1_IPC_STS, val,
+ aeon_ipc_ready(val, parity_status),
+ AEON_IPC_DELAY, AEON_IPC_TIMEOUT, false);
+}
+
+static int aeon_ipc_send_cmd(struct phy_device *phydev,
+ struct as21xxx_priv *priv,
+ u16 cmd, u16 *ret_sts)
+{
+ bool curr_parity;
+ int ret;
+
+ /* The IPC sync by using a single parity bit.
+ * Each CMD have alternately this bit set or clear
+ * to understand correct flow and packet order.
+ */
+ curr_parity = priv->parity_status;
+ if (priv->parity_status)
+ cmd |= AEON_IPC_CMD_PARITY;
+
+ /* Always update parity for next packet */
+ priv->parity_status = !priv->parity_status;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_IPC_CMD, cmd);
+ if (ret)
+ return ret;
+
+ /* Wait for packet to be processed */
+ usleep_range(AEON_IPC_DELAY, AEON_IPC_DELAY + 5000);
+
+ /* With no ret_sts, ignore waiting for packet completion
+ * (ipc parity bit sync)
+ */
+ if (!ret_sts)
+ return 0;
+
+ ret = aeon_ipc_wait_cmd(phydev, curr_parity);
+ if (ret)
+ return ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_IPC_STS);
+ if (ret < 0)
+ return ret;
+
+ *ret_sts = ret;
+ if ((*ret_sts & AEON_IPC_STS_STATUS) != AEON_IPC_STS_STATUS_SUCCESS)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* If data is NULL, return 0 or negative error.
+ * If data not NULL, return number of Bytes received from IPC or
+ * a negative error.
+ */
+static int aeon_ipc_send_msg(struct phy_device *phydev,
+ u16 opcode, u16 *data, unsigned int data_len,
+ u16 *ret_data)
+{
+ struct as21xxx_priv *priv = phydev->priv;
+ unsigned int ret_size;
+ u16 cmd, ret_sts;
+ int ret;
+ int i;
+
+ /* IPC have a max of 8 register to transfer data,
+ * make sure we never exceed this.
+ */
+ if (data_len > AEON_IPC_DATA_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < data_len / sizeof(u16); i++)
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_IPC_DATA(i),
+ data[i]);
+
+ cmd = FIELD_PREP(AEON_IPC_CMD_SIZE, data_len) |
+ FIELD_PREP(AEON_IPC_CMD_OPCODE, opcode);
+
+ mutex_lock(&priv->ipc_lock);
+
+ ret = aeon_ipc_send_cmd(phydev, priv, cmd, &ret_sts);
+ if (ret) {
+ phydev_err(phydev, "failed to send ipc msg for %x: %d\n",
+ opcode, ret);
+ goto out;
+ }
+
+ if (!data)
+ goto out;
+
+ if ((ret_sts & AEON_IPC_STS_STATUS) == AEON_IPC_STS_STATUS_ERROR) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Prevent IPC from stack smashing the kernel.
+ * We can't trust IPC to return a good value and we always
+ * preallocate space for 16 Bytes.
+ */
+ ret_size = FIELD_GET(AEON_IPC_STS_SIZE, ret_sts);
+ if (ret_size > AEON_IPC_DATA_MAX) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Read data from IPC data register for ret_size value from IPC */
+ for (i = 0; i < DIV_ROUND_UP(ret_size, sizeof(u16)); i++) {
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_IPC_DATA(i));
+ if (ret < 0)
+ goto out;
+
+ ret_data[i] = ret;
+ }
+
+ ret = ret_size;
+
+out:
+ mutex_unlock(&priv->ipc_lock);
+
+ return ret;
+}
+
+static int aeon_ipc_noop(struct phy_device *phydev,
+ struct as21xxx_priv *priv, u16 *ret_sts)
+{
+ u16 cmd;
+
+ cmd = FIELD_PREP(AEON_IPC_CMD_SIZE, 0) |
+ FIELD_PREP(AEON_IPC_CMD_OPCODE, IPC_CMD_NOOP);
+
+ return aeon_ipc_send_cmd(phydev, priv, cmd, ret_sts);
+}
+
+/* Logic to sync parity bit with IPC.
+ * We send 2 NOP cmd with same partity and we wait for IPC
+ * to handle the packet only for the second one. This way
+ * we make sure we are sync for every next cmd.
+ */
+static int aeon_ipc_sync_parity(struct phy_device *phydev,
+ struct as21xxx_priv *priv)
+{
+ u16 ret_sts;
+ int ret;
+
+ mutex_lock(&priv->ipc_lock);
+
+ /* Send NOP with no parity */
+ aeon_ipc_noop(phydev, priv, NULL);
+
+ /* Reset packet parity */
+ priv->parity_status = false;
+
+ /* Send second NOP with no parity */
+ ret = aeon_ipc_noop(phydev, priv, &ret_sts);
+
+ mutex_unlock(&priv->ipc_lock);
+
+ /* We expect to return -EINVAL */
+ if (ret != -EINVAL)
+ return ret;
+
+ if ((ret_sts & AEON_IPC_STS_STATUS) != AEON_IPC_STS_STATUS_READY) {
+ phydev_err(phydev, "Invalid IPC status on sync parity: %x\n",
+ ret_sts);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int aeon_ipc_get_fw_version(struct phy_device *phydev)
+{
+ u16 ret_data[AEON_IPC_DATA_NUM_REGISTERS], data[1];
+ char fw_version[AEON_IPC_DATA_MAX + 1];
+ int ret;
+
+ data[0] = IPC_INFO_VERSION;
+
+ ret = aeon_ipc_send_msg(phydev, IPC_CMD_INFO, data,
+ sizeof(data), ret_data);
+ if (ret < 0)
+ return ret;
+
+ /* Make sure FW version is NULL terminated */
+ memcpy(fw_version, ret_data, ret);
+ fw_version[ret] = '\0';
+
+ phydev_info(phydev, "Firmware Version: %s\n", fw_version);
+
+ return 0;
+}
+
+static int aeon_dpc_ra_enable(struct phy_device *phydev)
+{
+ u16 data[2];
+
+ data[0] = IPC_CFG_PARAM_DIRECT;
+ data[1] = IPC_CFG_PARAM_DIRECT_DPC_RA;
+
+ return aeon_ipc_send_msg(phydev, IPC_CMD_CFG_PARAM, data,
+ sizeof(data), NULL);
+}
+
+static int as21xxx_probe(struct phy_device *phydev)
+{
+ struct as21xxx_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&phydev->mdio.dev,
+ sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ phydev->priv = priv;
+
+ ret = devm_mutex_init(&phydev->mdio.dev,
+ &priv->ipc_lock);
+ if (ret)
+ return ret;
+
+ ret = aeon_ipc_sync_parity(phydev, priv);
+ if (ret)
+ return ret;
+
+ ret = aeon_ipc_get_fw_version(phydev);
+ if (ret)
+ return ret;
+
+ /* Enable PTP clk if not already Enabled */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CLK,
+ VEND1_PTP_CLK_EN);
+ if (ret)
+ return ret;
+
+ return aeon_dpc_ra_enable(phydev);
+}
+
+static int as21xxx_read_link(struct phy_device *phydev, int *bmcr)
+{
+ int status;
+
+ /* Normal C22 BMCR report inconsistent data, use
+ * the mapped C22 in C45 to have more consistent link info.
+ */
+ *bmcr = phy_read_mmd(phydev, MDIO_MMD_AN,
+ AS21XXX_MDIO_AN_C22 + MII_BMCR);
+ if (*bmcr < 0)
+ return *bmcr;
+
+ /* Autoneg is being started, therefore disregard current
+ * link status and report link as down.
+ */
+ if (*bmcr & BMCR_ANRESTART) {
+ phydev->link = 0;
+ return 0;
+ }
+
+ status = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+ if (status < 0)
+ return status;
+
+ phydev->link = !!(status & MDIO_STAT1_LSTATUS);
+
+ return 0;
+}
+
+static int as21xxx_read_c22_lpa(struct phy_device *phydev)
+{
+ int lpagb;
+
+ /* MII_STAT1000 are only filled in the mapped C22
+ * in C45, use that to fill lpagb values and check.
+ */
+ lpagb = phy_read_mmd(phydev, MDIO_MMD_AN,
+ AS21XXX_MDIO_AN_C22 + MII_STAT1000);
+ if (lpagb < 0)
+ return lpagb;
+
+ if (lpagb & LPA_1000MSFAIL) {
+ int adv = phy_read_mmd(phydev, MDIO_MMD_AN,
+ AS21XXX_MDIO_AN_C22 + MII_CTRL1000);
+
+ if (adv < 0)
+ return adv;
+
+ if (adv & CTL1000_ENABLE_MASTER)
+ phydev_err(phydev, "Master/Slave resolution failed, maybe conflicting manual settings?\n");
+ else
+ phydev_err(phydev, "Master/Slave resolution failed\n");
+ return -ENOLINK;
+ }
+
+ mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising,
+ lpagb);
+
+ return 0;
+}
+
+static int as21xxx_read_status(struct phy_device *phydev)
+{
+ int bmcr, old_link = phydev->link;
+ int ret;
+
+ ret = as21xxx_read_link(phydev, &bmcr);
+ if (ret)
+ return ret;
+
+ /* why bother the PHY if nothing can have changed */
+ if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
+ return 0;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ ret = genphy_c45_read_lpa(phydev);
+ if (ret)
+ return ret;
+
+ ret = as21xxx_read_c22_lpa(phydev);
+ if (ret)
+ return ret;
+
+ phy_resolve_aneg_linkmode(phydev);
+ } else {
+ int speed;
+
+ linkmode_zero(phydev->lp_advertising);
+
+ speed = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_SPEED_STATUS);
+ if (speed < 0)
+ return speed;
+
+ switch (speed & VEND1_SPEED_STATUS) {
+ case VEND1_SPEED_10000:
+ phydev->speed = SPEED_10000;
+ phydev->duplex = DUPLEX_FULL;
+ break;
+ case VEND1_SPEED_5000:
+ phydev->speed = SPEED_5000;
+ phydev->duplex = DUPLEX_FULL;
+ break;
+ case VEND1_SPEED_2500:
+ phydev->speed = SPEED_2500;
+ phydev->duplex = DUPLEX_FULL;
+ break;
+ case VEND1_SPEED_1000:
+ phydev->speed = SPEED_1000;
+ if (bmcr & BMCR_FULLDPLX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+ break;
+ case VEND1_SPEED_100:
+ phydev->speed = SPEED_100;
+ phydev->duplex = DUPLEX_FULL;
+ break;
+ case VEND1_SPEED_10:
+ phydev->speed = SPEED_10;
+ phydev->duplex = DUPLEX_FULL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int as21xxx_led_brightness_set(struct phy_device *phydev,
+ u8 index, enum led_brightness value)
+{
+ u16 val = VEND1_LED_REG_A_EVENT_OFF;
+
+ if (index > AEON_MAX_LEDS)
+ return -EINVAL;
+
+ if (value)
+ val = VEND1_LED_REG_A_EVENT_ON;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_LED_REG(index),
+ VEND1_LED_REG_A_EVENT,
+ FIELD_PREP(VEND1_LED_REG_A_EVENT, val));
+}
+
+static int as21xxx_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ int i;
+
+ if (index > AEON_MAX_LEDS)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(as21xxx_led_supported_pattern); i++)
+ if (rules == as21xxx_led_supported_pattern[i].pattern)
+ return 0;
+
+ return -EOPNOTSUPP;
+}
+
+static int as21xxx_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ int i, val;
+
+ if (index > AEON_MAX_LEDS)
+ return -EINVAL;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_LED_REG(index));
+ if (val < 0)
+ return val;
+
+ val = FIELD_GET(VEND1_LED_REG_A_EVENT, val);
+ for (i = 0; i < ARRAY_SIZE(as21xxx_led_supported_pattern); i++)
+ if (val == as21xxx_led_supported_pattern[i].val) {
+ *rules = as21xxx_led_supported_pattern[i].pattern;
+ return 0;
+ }
+
+ /* Should be impossible */
+ return -EINVAL;
+}
+
+static int as21xxx_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 val = 0;
+ int i;
+
+ if (index > AEON_MAX_LEDS)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(as21xxx_led_supported_pattern); i++)
+ if (rules == as21xxx_led_supported_pattern[i].pattern) {
+ val = as21xxx_led_supported_pattern[i].val;
+ break;
+ }
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_LED_REG(index),
+ VEND1_LED_REG_A_EVENT,
+ FIELD_PREP(VEND1_LED_REG_A_EVENT, val));
+}
+
+static int as21xxx_led_polarity_set(struct phy_device *phydev, int index,
+ unsigned long modes)
+{
+ bool led_active_low = false;
+ u16 mask, val = 0;
+ u32 mode;
+
+ if (index > AEON_MAX_LEDS)
+ return -EINVAL;
+
+ for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) {
+ switch (mode) {
+ case PHY_LED_ACTIVE_LOW:
+ led_active_low = true;
+ break;
+ case PHY_LED_ACTIVE_HIGH: /* default mode */
+ led_active_low = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ mask = VEND1_GLB_CPU_CTRL_LED_POLARITY(index);
+ if (led_active_low)
+ val = VEND1_GLB_CPU_CTRL_LED_POLARITY(index);
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GLB_REG_CPU_CTRL,
+ mask, val);
+}
+
+static int as21xxx_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
+{
+ struct as21xxx_priv *priv;
+ u16 ret_sts;
+ u32 phy_id;
+ int ret;
+
+ /* Skip PHY that are not AS21xxx */
+ if (!phy_id_compare_vendor(phydev->c45_ids.device_ids[MDIO_MMD_PCS],
+ PHY_VENDOR_AEONSEMI))
+ return genphy_match_phy_device(phydev, phydrv);
+
+ /* Read PHY ID to handle firmware loaded or HW reset */
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MII_PHYSID1);
+ if (ret < 0)
+ return ret;
+ phy_id = ret << 16;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MII_PHYSID2);
+ if (ret < 0)
+ return ret;
+ phy_id |= ret;
+
+ /* With PHY ID not the generic AS21xxx one assume
+ * the firmware just loaded
+ */
+ if (phy_id != PHY_ID_AS21XXX)
+ return phy_id == phydrv->phy_id;
+
+ /* Allocate temp priv and load the firmware */
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->ipc_lock);
+
+ ret = aeon_firmware_load(phydev);
+ if (ret)
+ goto out;
+
+ /* Sync parity... */
+ ret = aeon_ipc_sync_parity(phydev, priv);
+ if (ret)
+ goto out;
+
+ /* ...and send a third NOOP cmd to wait for firmware finish loading */
+ ret = aeon_ipc_noop(phydev, priv, &ret_sts);
+ if (ret)
+ goto out;
+
+out:
+ mutex_destroy(&priv->ipc_lock);
+ kfree(priv);
+
+ /* Return can either be 0 or a negative error code.
+ * Returning 0 here means THIS is NOT a suitable PHY.
+ *
+ * For the specific case of the generic Aeonsemi PHY ID that
+ * needs the firmware the be loaded first to have a correct PHY ID,
+ * this is OK as a matching PHY ID will be found right after.
+ * This relies on the driver probe order where the first PHY driver
+ * probed is the generic one.
+ */
+ return ret;
+}
+
+static struct phy_driver as21xxx_drivers[] = {
+ {
+ /* PHY expose in C45 as 0x7500 0x9410
+ * before firmware is loaded.
+ * This driver entry must be attempted first to load
+ * the firmware and thus update the ID registers.
+ */
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21XXX),
+ .name = "Aeonsemi AS21xxx",
+ .match_phy_device = as21xxx_match_phy_device,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21011JB1),
+ .name = "Aeonsemi AS21011JB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21011PB1),
+ .name = "Aeonsemi AS21011PB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21010PB1),
+ .name = "Aeonsemi AS21010PB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21010JB1),
+ .name = "Aeonsemi AS21010JB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21210PB1),
+ .name = "Aeonsemi AS21210PB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21510JB1),
+ .name = "Aeonsemi AS21510JB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21510PB1),
+ .name = "Aeonsemi AS21510PB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21511JB1),
+ .name = "Aeonsemi AS21511JB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21210JB1),
+ .name = "Aeonsemi AS21210JB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21511PB1),
+ .name = "Aeonsemi AS21511PB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+};
+module_phy_driver(as21xxx_drivers);
+
+static struct mdio_device_id __maybe_unused as21xxx_tbl[] = {
+ { PHY_ID_MATCH_VENDOR(PHY_VENDOR_AEONSEMI) },
+ { }
+};
+MODULE_DEVICE_TABLE(mdio, as21xxx_tbl);
+
+MODULE_DESCRIPTION("Aeonsemi AS21xxx PHY driver");
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/ax88796b.c b/drivers/net/phy/ax88796b.c
index eb74a8cf8df1..f20ddf649149 100644
--- a/drivers/net/phy/ax88796b.c
+++ b/drivers/net/phy/ax88796b.c
@@ -112,19 +112,18 @@ static struct phy_driver asix_driver[] = {
.resume = genphy_resume,
.soft_reset = asix_soft_reset,
}, {
- .phy_id = PHY_ID_ASIX_AX88796B,
+ PHY_ID_MATCH_MODEL(PHY_ID_ASIX_AX88796B),
.name = "Asix Electronics AX88796B",
- .phy_id_mask = 0xfffffff0,
/* PHY_BASIC_FEATURES */
.soft_reset = asix_soft_reset,
} };
module_phy_driver(asix_driver);
-static struct mdio_device_id __maybe_unused asix_tbl[] = {
+static const struct mdio_device_id __maybe_unused asix_tbl[] = {
{ PHY_ID_MATCH_EXACT(PHY_ID_ASIX_AX88772A) },
{ PHY_ID_MATCH_EXACT(PHY_ID_ASIX_AX88772C) },
- { PHY_ID_ASIX_AX88796B, 0xfffffff0 },
+ { PHY_ID_MATCH_MODEL(PHY_ID_ASIX_AX88796B) },
{ }
};
diff --git a/drivers/net/phy/ax88796b_rust.rs b/drivers/net/phy/ax88796b_rust.rs
index 8c7eb009d9fc..bc73ebccc2aa 100644
--- a/drivers/net/phy/ax88796b_rust.rs
+++ b/drivers/net/phy/ax88796b_rust.rs
@@ -19,7 +19,7 @@ kernel::module_phy_driver! {
DeviceId::new_with_driver::<PhyAX88796B>()
],
name: "rust_asix_phy",
- author: "FUJITA Tomonori <fujita.tomonori@gmail.com>",
+ authors: ["FUJITA Tomonori <fujita.tomonori@gmail.com>"],
description: "Rust Asix PHYs driver",
license: "GPL",
}
diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
index da8f7cb41b44..15cbef8202bc 100644
--- a/drivers/net/phy/bcm-cygnus.c
+++ b/drivers/net/phy/bcm-cygnus.c
@@ -278,7 +278,7 @@ static struct phy_driver bcm_cygnus_phy_driver[] = {
}
};
-static struct mdio_device_id __maybe_unused bcm_cygnus_phy_tbl[] = {
+static const struct mdio_device_id __maybe_unused bcm_cygnus_phy_tbl[] = {
{ PHY_ID_BCM_CYGNUS, 0xfffffff0, },
{ PHY_ID_BCM_OMEGA, 0xfffffff0, },
{ }
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index 6c52f7dda514..5198d66dbbc0 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -523,8 +523,7 @@ void bcm_phy_get_strings(struct phy_device *phydev, u8 *data)
unsigned int i;
for (i = 0; i < ARRAY_SIZE(bcm_phy_hw_stats); i++)
- strscpy(data + i * ETH_GSTRING_LEN,
- bcm_phy_hw_stats[i].string, ETH_GSTRING_LEN);
+ ethtool_puts(&data, bcm_phy_hw_stats[i].string);
}
EXPORT_SYMBOL_GPL(bcm_phy_get_strings);
@@ -1137,7 +1136,7 @@ int bcm_config_lre_aneg(struct phy_device *phydev, bool changed)
{
int err;
- if (genphy_config_eee_advert(phydev))
+ if (genphy_c45_an_config_eee_aneg(phydev) > 0)
changed = true;
err = bcm_setup_lre_master_slave(phydev);
diff --git a/drivers/net/phy/bcm-phy-ptp.c b/drivers/net/phy/bcm-phy-ptp.c
index 208e8f561e06..65d609ed69fb 100644
--- a/drivers/net/phy/bcm-phy-ptp.c
+++ b/drivers/net/phy/bcm-phy-ptp.c
@@ -597,9 +597,6 @@ static int bcm_ptp_perout_locked(struct bcm_ptp_private *priv,
period = BCM_MAX_PERIOD_8NS; /* write nonzero value */
- if (req->flags & PTP_PEROUT_PHASE)
- return -EOPNOTSUPP;
-
if (req->flags & PTP_PEROUT_DUTY_CYCLE)
pulse = ktime_to_ns(ktime_set(req->on.sec, req->on.nsec));
else
@@ -740,6 +737,8 @@ static const struct ptp_clock_info bcm_ptp_clock_info = {
.n_pins = 1,
.n_per_out = 1,
.n_ext_ts = 1,
+ .supported_perout_flags = PTP_PEROUT_DUTY_CYCLE,
+ .supported_extts_flags = PTP_STRICT_FLAGS | PTP_RISING_EDGE,
};
static void bcm_ptp_txtstamp(struct mii_timestamper *mii_ts,
@@ -781,9 +780,21 @@ out:
kfree_skb(skb);
}
-static int bcm_ptp_hwtstamp(struct mii_timestamper *mii_ts,
- struct kernel_hwtstamp_config *cfg,
- struct netlink_ext_ack *extack)
+static int bcm_ptp_hwtstamp_get(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct bcm_ptp_private *priv = mii2priv(mii_ts);
+
+ cfg->rx_filter = priv->hwts_rx ? HWTSTAMP_FILTER_PTP_V2_EVENT
+ : HWTSTAMP_FILTER_NONE;
+ cfg->tx_type = priv->tx_type;
+
+ return 0;
+}
+
+static int bcm_ptp_hwtstamp_set(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct bcm_ptp_private *priv = mii2priv(mii_ts);
u16 mode, ctrl;
@@ -899,7 +910,8 @@ static void bcm_ptp_init(struct bcm_ptp_private *priv)
priv->mii_ts.rxtstamp = bcm_ptp_rxtstamp;
priv->mii_ts.txtstamp = bcm_ptp_txtstamp;
- priv->mii_ts.hwtstamp = bcm_ptp_hwtstamp;
+ priv->mii_ts.hwtstamp_set = bcm_ptp_hwtstamp_set;
+ priv->mii_ts.hwtstamp_get = bcm_ptp_hwtstamp_get;
priv->mii_ts.ts_info = bcm_ptp_ts_info;
priv->phydev->mii_ts = &priv->mii_ts;
diff --git a/drivers/net/phy/bcm54140.c b/drivers/net/phy/bcm54140.c
index 2eea3d09b1e6..a8edf45fd733 100644
--- a/drivers/net/phy/bcm54140.c
+++ b/drivers/net/phy/bcm54140.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/phy.h>
+#include "phylib.h"
#include "bcm-phy-lib.h"
/* RDB per-port registers
@@ -883,7 +884,7 @@ static struct phy_driver bcm54140_drivers[] = {
};
module_phy_driver(bcm54140_drivers);
-static struct mdio_device_id __maybe_unused bcm54140_tbl[] = {
+static const struct mdio_device_id __maybe_unused bcm54140_tbl[] = {
{ PHY_ID_BCM54140, BCM54140_PHY_ID_MASK },
{ }
};
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index 0eb33be824f1..b46a736a3130 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -93,7 +93,7 @@ static struct phy_driver bcm63xx_driver[] = {
module_phy_driver(bcm63xx_driver);
-static struct mdio_device_id __maybe_unused bcm63xx_tbl[] = {
+static const struct mdio_device_id __maybe_unused bcm63xx_tbl[] = {
{ 0x00406000, 0xfffffc00 },
{ 0x002bdc00, 0xfffffc00 },
{ }
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 97638ba7ae85..00e8fa14aa77 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -929,7 +929,7 @@ static struct phy_driver bcm7xxx_driver[] = {
BCM7XXX_16NM_EPHY(PHY_ID_BCM7712, "Broadcom BCM7712"),
};
-static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
+static const struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
{ PHY_ID_BCM72113, 0xfffffff0 },
{ PHY_ID_BCM72116, 0xfffffff0, },
{ PHY_ID_BCM72165, 0xfffffff0, },
diff --git a/drivers/net/phy/bcm84881.c b/drivers/net/phy/bcm84881.c
index 97da3aee4942..d7f7cc44c532 100644
--- a/drivers/net/phy/bcm84881.c
+++ b/drivers/net/phy/bcm84881.c
@@ -235,11 +235,21 @@ static int bcm84881_read_status(struct phy_device *phydev)
return genphy_c45_read_mdix(phydev);
}
+/* The Broadcom BCM84881 in the Methode DM7052 is unable to provide a SGMII
+ * or 802.3z control word, so inband will not work.
+ */
+static unsigned int bcm84881_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ return LINK_INBAND_DISABLE;
+}
+
static struct phy_driver bcm84881_drivers[] = {
{
.phy_id = 0xae025150,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM84881",
+ .inband_caps = bcm84881_inband_caps,
.config_init = bcm84881_config_init,
.probe = bcm84881_probe,
.get_features = bcm84881_get_features,
@@ -252,7 +262,7 @@ static struct phy_driver bcm84881_drivers[] = {
module_phy_driver(bcm84881_drivers);
/* FIXME: module auto-loading for Clause 45 PHYs seems non-functional */
-static struct mdio_device_id __maybe_unused bcm84881_tbl[] = {
+static const struct mdio_device_id __maybe_unused bcm84881_tbl[] = {
{ 0xae025150, 0xfffffff0 },
{ },
};
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index e81404bf8994..299f9a8f30f4 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -185,14 +185,10 @@ static irqreturn_t bcm87xx_handle_interrupt(struct phy_device *phydev)
return IRQ_HANDLED;
}
-static int bcm8706_match_phy_device(struct phy_device *phydev)
+static int bcm87xx_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
- return phydev->c45_ids.device_ids[4] == PHY_ID_BCM8706;
-}
-
-static int bcm8727_match_phy_device(struct phy_device *phydev)
-{
- return phydev->c45_ids.device_ids[4] == PHY_ID_BCM8727;
+ return phydev->c45_ids.device_ids[4] == phydrv->phy_id;
}
static struct phy_driver bcm87xx_driver[] = {
@@ -206,7 +202,7 @@ static struct phy_driver bcm87xx_driver[] = {
.read_status = bcm87xx_read_status,
.config_intr = bcm87xx_config_intr,
.handle_interrupt = bcm87xx_handle_interrupt,
- .match_phy_device = bcm8706_match_phy_device,
+ .match_phy_device = bcm87xx_match_phy_device,
}, {
.phy_id = PHY_ID_BCM8727,
.phy_id_mask = 0xffffffff,
@@ -217,7 +213,7 @@ static struct phy_driver bcm87xx_driver[] = {
.read_status = bcm87xx_read_status,
.config_intr = bcm87xx_config_intr,
.handle_interrupt = bcm87xx_handle_interrupt,
- .match_phy_device = bcm8727_match_phy_device,
+ .match_phy_device = bcm87xx_match_phy_device,
} };
module_phy_driver(bcm87xx_driver);
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index ddded162c44c..cb306f9e80cc 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -16,16 +16,13 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/phy.h>
-#include <linux/pm_wakeup.h>
+#include <linux/device.h>
#include <linux/brcmphy.h>
#include <linux/of.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/gpio/consumer.h>
-#define BRCM_PHY_MODEL(phydev) \
- ((phydev)->drv->phy_id & (phydev)->drv->phy_id_mask)
-
#define BRCM_PHY_REV(phydev) \
((phydev)->drv->phy_id & ~((phydev)->drv->phy_id_mask))
@@ -249,8 +246,8 @@ static int bcm54xx_phydsp_config(struct phy_device *phydev)
if (err < 0)
return err;
- if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 ||
- BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) {
+ if (phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM50610) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM50610M)) {
/* Clear bit 9 to fix a phy interop issue. */
err = bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_EXP08,
MII_BCM54XX_EXP_EXP08_RJCT_2MHZ);
@@ -264,7 +261,7 @@ static int bcm54xx_phydsp_config(struct phy_device *phydev)
}
}
- if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM57780) {
+ if (phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM57780)) {
int val;
val = bcm_phy_read_exp(phydev, MII_BCM54XX_EXP_EXP75);
@@ -292,12 +289,12 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
bool clk125en = true;
/* Abort if we are using an untested phy. */
- if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 &&
- BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 &&
- BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M &&
- BRCM_PHY_MODEL(phydev) != PHY_ID_BCM54210E &&
- BRCM_PHY_MODEL(phydev) != PHY_ID_BCM54810 &&
- BRCM_PHY_MODEL(phydev) != PHY_ID_BCM54811)
+ if (!(phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM57780) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM50610) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM50610M) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM54210E) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM54810) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM54811)))
return;
val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_SCR3);
@@ -306,8 +303,8 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
orig = val;
- if ((BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 ||
- BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) &&
+ if ((phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM50610) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM50610M)) &&
BRCM_PHY_REV(phydev) >= 0x3) {
/*
* Here, bit 0 _disables_ CLK125 when set.
@@ -316,7 +313,8 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
clk125en = false;
} else {
if (phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED) {
- if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM54811) {
+ if (!phy_id_compare_model(phydev->drv->phy_id,
+ PHY_ID_BCM54811)) {
/* Here, bit 0 _enables_ CLK125 when set */
val &= ~BCM54XX_SHD_SCR3_DEF_CLK125;
}
@@ -330,9 +328,9 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
val |= BCM54XX_SHD_SCR3_DLLAPD_DIS;
if (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY) {
- if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E ||
- BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810 ||
- BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54811)
+ if (phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM54210E) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM54810) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM54811))
val |= BCM54XX_SHD_SCR3_RXCTXC_DIS;
else
val |= BCM54XX_SHD_SCR3_TRDDAPD;
@@ -407,7 +405,7 @@ static int bcm5481x_set_brrmode(struct phy_device *phydev, bool on)
static int bcm54811_config_init(struct phy_device *phydev)
{
struct bcm54xx_phy_priv *priv = phydev->priv;
- int err, reg;
+ int err, reg, exp_sync_ethernet, aux_rgmii_en;
/* Enable CLK125 MUX on LED4 if ref clock is enabled. */
if (!(phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED)) {
@@ -424,6 +422,36 @@ static int bcm54811_config_init(struct phy_device *phydev)
if (priv->brr_mode)
phydev->autoneg = 0;
+ /* Enable MII Lite (No TXER, RXER, CRS, COL) if configured */
+ if (phydev->interface == PHY_INTERFACE_MODE_MIILITE)
+ exp_sync_ethernet = BCM_EXP_SYNC_ETHERNET_MII_LITE;
+ else
+ exp_sync_ethernet = 0;
+
+ err = bcm_phy_modify_exp(phydev, BCM_EXP_SYNC_ETHERNET,
+ BCM_EXP_SYNC_ETHERNET_MII_LITE,
+ exp_sync_ethernet);
+ if (err < 0)
+ return err;
+
+ /* Enable RGMII if configured */
+ if (phy_interface_is_rgmii(phydev))
+ aux_rgmii_en = MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_EN |
+ MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
+ else
+ aux_rgmii_en = 0;
+
+ /* Also writing Reserved bits 6:5 because the documentation requires
+ * them to be written to 0b11
+ */
+ err = bcm54xx_auxctl_write(phydev,
+ MII_BCM54XX_AUXCTL_SHDWSEL_MISC,
+ MII_BCM54XX_AUXCTL_MISC_WREN |
+ aux_rgmii_en |
+ MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RSVD);
+ if (err < 0)
+ return err;
+
return bcm5481x_set_brrmode(phydev, priv->brr_mode);
}
@@ -449,14 +477,14 @@ static int bcm54xx_config_init(struct phy_device *phydev)
if (err < 0)
return err;
- if ((BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 ||
- BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) &&
+ if ((phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM50610) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM50610M)) &&
(phydev->dev_flags & PHY_BRCM_CLEAR_RGMII_MODE))
bcm_phy_write_shadow(phydev, BCM54XX_SHD_RGMII_MODE, 0);
bcm54xx_adjust_rxrefclk(phydev);
- switch (BRCM_PHY_MODEL(phydev)) {
+ switch (phydev->drv->phy_id & PHY_ID_MATCH_MODEL_MASK) {
case PHY_ID_BCM50610:
case PHY_ID_BCM50610M:
err = bcm54xx_config_clock_delay(phydev);
@@ -655,7 +683,7 @@ static int bcm5481x_read_abilities(struct phy_device *phydev)
{
struct device_node *np = phydev->mdio.dev.of_node;
struct bcm54xx_phy_priv *priv = phydev->priv;
- int i, val, err;
+ int i, val, err, aneg;
for (i = 0; i < ARRAY_SIZE(bcm54811_linkmodes); i++)
linkmode_clear_bit(bcm54811_linkmodes[i], phydev->supported);
@@ -676,9 +704,19 @@ static int bcm5481x_read_abilities(struct phy_device *phydev)
if (val < 0)
return val;
+ /* BCM54811 is not capable of LDS but the corresponding bit
+ * in LRESR is set to 1 and marked "Ignore" in the datasheet.
+ * So we must read the bcm54811 as unable to auto-negotiate
+ * in BroadR-Reach mode.
+ */
+ if (phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM54811))
+ aneg = 0;
+ else
+ aneg = val & LRESR_LDSABILITY;
+
linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
phydev->supported,
- val & LRESR_LDSABILITY);
+ aneg);
linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT,
phydev->supported,
val & LRESR_100_1PAIR);
@@ -735,8 +773,15 @@ static int bcm54811_config_aneg(struct phy_device *phydev)
/* Aneg firstly. */
if (priv->brr_mode) {
- /* BCM54811 is only capable of autonegotiation in IEEE mode */
- phydev->autoneg = 0;
+ /* BCM54811 is only capable of autonegotiation in IEEE mode.
+ * In BroadR-Reach mode, disable the Long Distance Signaling,
+ * the BRR mode autoneg as supported in other Broadcom PHYs.
+ * This bit is marked as "Reserved" and "Default 1, must be
+ * written to 0 after every device reset" in the datasheet.
+ */
+ ret = phy_modify(phydev, MII_BCM54XX_LRECR, LRECR_LDSEN, 0);
+ if (ret < 0)
+ return ret;
ret = bcm_config_lre_aneg(phydev, false);
} else {
ret = genphy_config_aneg(phydev);
@@ -859,7 +904,7 @@ static int brcm_fet_config_init(struct phy_device *phydev)
return reg;
/* Unmask events we are interested in and mask interrupts globally. */
- if (phydev->phy_id == PHY_ID_BCM5221)
+ if (phydev->drv->phy_id == PHY_ID_BCM5221)
reg = MII_BRCM_FET_IR_ENABLE |
MII_BRCM_FET_IR_MASK;
else
@@ -888,7 +933,7 @@ static int brcm_fet_config_init(struct phy_device *phydev)
return err;
}
- if (phydev->phy_id != PHY_ID_BCM5221) {
+ if (phydev->drv->phy_id != PHY_ID_BCM5221) {
/* Set the LED mode */
reg = __phy_read(phydev, MII_BRCM_FET_SHDW_AUXMODE4);
if (reg < 0) {
@@ -1009,7 +1054,7 @@ static int brcm_fet_suspend(struct phy_device *phydev)
return err;
}
- if (phydev->phy_id == PHY_ID_BCM5221)
+ if (phydev->drv->phy_id == PHY_ID_BCM5221)
/* Force Low Power Mode with clock enabled */
reg = BCM5221_SHDW_AM4_EN_CLK_LPM | BCM5221_SHDW_AM4_FORCE_LPM;
else
@@ -1409,8 +1454,7 @@ static int bcm54811_read_status(struct phy_device *phydev)
static struct phy_driver broadcom_drivers[] = {
{
- .phy_id = PHY_ID_BCM5411,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM5411),
.name = "Broadcom BCM5411",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1422,8 +1466,7 @@ static struct phy_driver broadcom_drivers[] = {
.handle_interrupt = bcm_phy_handle_interrupt,
.link_change_notify = bcm54xx_link_change_notify,
}, {
- .phy_id = PHY_ID_BCM5421,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM5421),
.name = "Broadcom BCM5421",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1435,8 +1478,7 @@ static struct phy_driver broadcom_drivers[] = {
.handle_interrupt = bcm_phy_handle_interrupt,
.link_change_notify = bcm54xx_link_change_notify,
}, {
- .phy_id = PHY_ID_BCM54210E,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM54210E),
.name = "Broadcom BCM54210E",
/* PHY_GBIT_FEATURES */
.flags = PHY_ALWAYS_CALL_SUSPEND,
@@ -1454,8 +1496,7 @@ static struct phy_driver broadcom_drivers[] = {
.set_wol = bcm54xx_phy_set_wol,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM5461,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM5461),
.name = "Broadcom BCM5461",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1468,8 +1509,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM54612E,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM54612E),
.name = "Broadcom BCM54612E",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1484,8 +1524,7 @@ static struct phy_driver broadcom_drivers[] = {
.suspend = bcm54xx_suspend,
.resume = bcm54xx_resume,
}, {
- .phy_id = PHY_ID_BCM54616S,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM54616S),
.name = "Broadcom BCM54616S",
/* PHY_GBIT_FEATURES */
.soft_reset = genphy_soft_reset,
@@ -1498,8 +1537,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM5464,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM5464),
.name = "Broadcom BCM5464",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1514,8 +1552,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM5481,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM5481),
.name = "Broadcom BCM5481",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1529,8 +1566,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM54810,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM54810),
.name = "Broadcom BCM54810",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1548,8 +1584,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM54811,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM54811),
.name = "Broadcom BCM54811",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1567,8 +1602,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM5482,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM5482),
.name = "Broadcom BCM5482",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1581,8 +1615,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM50610,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM50610),
.name = "Broadcom BCM50610",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1597,8 +1630,7 @@ static struct phy_driver broadcom_drivers[] = {
.resume = bcm54xx_resume,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM50610M,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM50610M),
.name = "Broadcom BCM50610M",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1613,8 +1645,7 @@ static struct phy_driver broadcom_drivers[] = {
.resume = bcm54xx_resume,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM57780,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM57780),
.name = "Broadcom BCM57780",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1627,8 +1658,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCMAC131,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCMAC131),
.name = "Broadcom BCMAC131",
/* PHY_BASIC_FEATURES */
.config_init = brcm_fet_config_init,
@@ -1637,8 +1667,7 @@ static struct phy_driver broadcom_drivers[] = {
.suspend = brcm_fet_suspend,
.resume = brcm_fet_config_init,
}, {
- .phy_id = PHY_ID_BCM5241,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM5241),
.name = "Broadcom BCM5241",
/* PHY_BASIC_FEATURES */
.config_init = brcm_fet_config_init,
@@ -1647,8 +1676,7 @@ static struct phy_driver broadcom_drivers[] = {
.suspend = brcm_fet_suspend,
.resume = brcm_fet_config_init,
}, {
- .phy_id = PHY_ID_BCM5221,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM5221),
.name = "Broadcom BCM5221",
/* PHY_BASIC_FEATURES */
.config_init = brcm_fet_config_init,
@@ -1659,8 +1687,7 @@ static struct phy_driver broadcom_drivers[] = {
.config_aneg = bcm5221_config_aneg,
.read_status = bcm5221_read_status,
}, {
- .phy_id = PHY_ID_BCM5395,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM5395),
.name = "Broadcom BCM5395",
.flags = PHY_IS_INTERNAL,
/* PHY_GBIT_FEATURES */
@@ -1671,8 +1698,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM53125,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM53125),
.name = "Broadcom BCM53125",
.flags = PHY_IS_INTERNAL,
/* PHY_GBIT_FEATURES */
@@ -1686,8 +1712,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM53128,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM53128),
.name = "Broadcom BCM53128",
.flags = PHY_IS_INTERNAL,
/* PHY_GBIT_FEATURES */
@@ -1701,8 +1726,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM89610,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM89610),
.name = "Broadcom BCM89610",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1717,28 +1741,28 @@ static struct phy_driver broadcom_drivers[] = {
module_phy_driver(broadcom_drivers);
-static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
- { PHY_ID_BCM5411, 0xfffffff0 },
- { PHY_ID_BCM5421, 0xfffffff0 },
- { PHY_ID_BCM54210E, 0xfffffff0 },
- { PHY_ID_BCM5461, 0xfffffff0 },
- { PHY_ID_BCM54612E, 0xfffffff0 },
- { PHY_ID_BCM54616S, 0xfffffff0 },
- { PHY_ID_BCM5464, 0xfffffff0 },
- { PHY_ID_BCM5481, 0xfffffff0 },
- { PHY_ID_BCM54810, 0xfffffff0 },
- { PHY_ID_BCM54811, 0xfffffff0 },
- { PHY_ID_BCM5482, 0xfffffff0 },
- { PHY_ID_BCM50610, 0xfffffff0 },
- { PHY_ID_BCM50610M, 0xfffffff0 },
- { PHY_ID_BCM57780, 0xfffffff0 },
- { PHY_ID_BCMAC131, 0xfffffff0 },
- { PHY_ID_BCM5221, 0xfffffff0 },
- { PHY_ID_BCM5241, 0xfffffff0 },
- { PHY_ID_BCM5395, 0xfffffff0 },
- { PHY_ID_BCM53125, 0xfffffff0 },
- { PHY_ID_BCM53128, 0xfffffff0 },
- { PHY_ID_BCM89610, 0xfffffff0 },
+static const struct mdio_device_id __maybe_unused broadcom_tbl[] = {
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM5411) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM5421) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM54210E) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM5461) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM54612E) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM54616S) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM5464) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM5481) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM54810) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM54811) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM5482) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM50610) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM50610M) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM57780) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCMAC131) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM5221) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM5241) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM5395) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM53125) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM53128) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM89610) },
{ }
};
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index ef5f412e101f..d87cf8b94cf8 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -145,7 +145,7 @@ static struct phy_driver cis820x_driver[] = {
module_phy_driver(cis820x_driver);
-static struct mdio_device_id __maybe_unused cicada_tbl[] = {
+static const struct mdio_device_id __maybe_unused cicada_tbl[] = {
{ 0x000fc410, 0x000ffff0 },
{ 0x000fc440, 0x000fffc0 },
{ }
diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c
index 40514a94e6ff..3b65f37f1c57 100644
--- a/drivers/net/phy/cortina.c
+++ b/drivers/net/phy/cortina.c
@@ -87,7 +87,7 @@ static struct phy_driver cortina_driver[] = {
module_phy_driver(cortina_driver);
-static struct mdio_device_id __maybe_unused cortina_tbl[] = {
+static const struct mdio_device_id __maybe_unused cortina_tbl[] = {
{ PHY_ID_CS4340, 0xffffffff},
{},
};
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 4ac4bce1bf32..fa3692508f16 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -209,7 +209,7 @@ static struct phy_driver dm91xx_driver[] = {
module_phy_driver(dm91xx_driver);
-static struct mdio_device_id __maybe_unused davicom_tbl[] = {
+static const struct mdio_device_id __maybe_unused davicom_tbl[] = {
{ 0x0181b880, 0x0ffffff0 },
{ 0x0181b8b0, 0x0ffffff0 },
{ 0x0181b8a0, 0x0ffffff0 },
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 075d2beea716..b950acc9c49b 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -478,13 +478,6 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* Reject requests to enable time stamping on both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
@@ -513,9 +506,6 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
return 0;
case PTP_CLK_REQ_PEROUT:
- /* Reject requests with unsupported flags */
- if (rq->perout.flags)
- return -EOPNOTSUPP;
if (rq->perout.index >= N_PER_OUT)
return -EINVAL;
return periodic_output(clock, rq, on, rq->perout.index);
@@ -963,30 +953,6 @@ static void decode_status_frame(struct dp83640_private *dp83640,
}
}
-static void dp83640_free_clocks(void)
-{
- struct dp83640_clock *clock;
- struct list_head *this, *next;
-
- mutex_lock(&phyter_clocks_lock);
-
- list_for_each_safe(this, next, &phyter_clocks) {
- clock = list_entry(this, struct dp83640_clock, list);
- if (!list_empty(&clock->phylist)) {
- pr_warn("phy list non-empty while unloading\n");
- BUG();
- }
- list_del(&clock->list);
- mutex_destroy(&clock->extreg_lock);
- mutex_destroy(&clock->clock_lock);
- put_device(&clock->bus->dev);
- kfree(clock->caps.pin_config);
- kfree(clock);
- }
-
- mutex_unlock(&phyter_clocks_lock);
-}
-
static void dp83640_clock_init(struct dp83640_clock *clock, struct mii_bus *bus)
{
INIT_LIST_HEAD(&clock->list);
@@ -1002,6 +968,9 @@ static void dp83640_clock_init(struct dp83640_clock *clock, struct mii_bus *bus)
clock->caps.n_per_out = N_PER_OUT;
clock->caps.n_pins = DP83640_N_PINS;
clock->caps.pps = 0;
+ clock->caps.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
clock->caps.adjfine = ptp_dp83640_adjfine;
clock->caps.adjtime = ptp_dp83640_adjtime;
clock->caps.gettime64 = ptp_dp83640_gettime;
@@ -1207,9 +1176,21 @@ static irqreturn_t dp83640_handle_interrupt(struct phy_device *phydev)
return IRQ_HANDLED;
}
-static int dp83640_hwtstamp(struct mii_timestamper *mii_ts,
- struct kernel_hwtstamp_config *cfg,
- struct netlink_ext_ack *extack)
+static int dp83640_hwtstamp_get(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct dp83640_private *dp83640 =
+ container_of(mii_ts, struct dp83640_private, mii_ts);
+
+ cfg->rx_filter = dp83640->hwts_rx_en;
+ cfg->tx_type = dp83640->hwts_tx_en;
+
+ return 0;
+}
+
+static int dp83640_hwtstamp_set(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct dp83640_private *dp83640 =
container_of(mii_ts, struct dp83640_private, mii_ts);
@@ -1229,7 +1210,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts,
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- dp83640->hwts_rx_en = 1;
+ dp83640->hwts_rx_en = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
dp83640->layer = PTP_CLASS_L4;
dp83640->version = PTP_CLASS_V1;
cfg->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
@@ -1237,7 +1218,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts,
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- dp83640->hwts_rx_en = 1;
+ dp83640->hwts_rx_en = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
dp83640->layer = PTP_CLASS_L4;
dp83640->version = PTP_CLASS_V2;
cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
@@ -1245,7 +1226,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts,
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- dp83640->hwts_rx_en = 1;
+ dp83640->hwts_rx_en = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
dp83640->layer = PTP_CLASS_L2;
dp83640->version = PTP_CLASS_V2;
cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
@@ -1253,7 +1234,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts,
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- dp83640->hwts_rx_en = 1;
+ dp83640->hwts_rx_en = HWTSTAMP_FILTER_PTP_V2_EVENT;
dp83640->layer = PTP_CLASS_L4 | PTP_CLASS_L2;
dp83640->version = PTP_CLASS_V2;
cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
@@ -1438,7 +1419,8 @@ static int dp83640_probe(struct phy_device *phydev)
dp83640->phydev = phydev;
dp83640->mii_ts.rxtstamp = dp83640_rxtstamp;
dp83640->mii_ts.txtstamp = dp83640_txtstamp;
- dp83640->mii_ts.hwtstamp = dp83640_hwtstamp;
+ dp83640->mii_ts.hwtstamp_set = dp83640_hwtstamp_set;
+ dp83640->mii_ts.hwtstamp_get = dp83640_hwtstamp_get;
dp83640->mii_ts.ts_info = dp83640_ts_info;
INIT_DELAYED_WORK(&dp83640->ts_work, rx_timestamp_work);
@@ -1486,6 +1468,7 @@ static void dp83640_remove(struct phy_device *phydev)
struct dp83640_clock *clock;
struct list_head *this, *next;
struct dp83640_private *tmp, *dp83640 = phydev->priv;
+ bool remove_clock = false;
if (phydev->mdio.addr == BROADCAST_ADDR)
return;
@@ -1513,11 +1496,27 @@ static void dp83640_remove(struct phy_device *phydev)
}
}
+ if (!clock->chosen && list_empty(&clock->phylist))
+ remove_clock = true;
+
dp83640_clock_put(clock);
kfree(dp83640);
+
+ if (remove_clock) {
+ mutex_lock(&phyter_clocks_lock);
+ list_del(&clock->list);
+ mutex_unlock(&phyter_clocks_lock);
+
+ mutex_destroy(&clock->extreg_lock);
+ mutex_destroy(&clock->clock_lock);
+ put_device(&clock->bus->dev);
+ kfree(clock->caps.pin_config);
+ kfree(clock);
+ }
}
-static struct phy_driver dp83640_driver = {
+static struct phy_driver dp83640_driver[] = {
+{
.phy_id = DP83640_PHY_ID,
.phy_id_mask = 0xfffffff0,
.name = "NatSemi DP83640",
@@ -1528,27 +1527,16 @@ static struct phy_driver dp83640_driver = {
.config_init = dp83640_config_init,
.config_intr = dp83640_config_intr,
.handle_interrupt = dp83640_handle_interrupt,
+},
};
-static int __init dp83640_init(void)
-{
- return phy_driver_register(&dp83640_driver, THIS_MODULE);
-}
-
-static void __exit dp83640_exit(void)
-{
- dp83640_free_clocks();
- phy_driver_unregister(&dp83640_driver);
-}
+module_phy_driver(dp83640_driver);
MODULE_DESCRIPTION("National Semiconductor DP83640 PHY driver");
MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
MODULE_LICENSE("GPL");
-module_init(dp83640_init);
-module_exit(dp83640_exit);
-
-static struct mdio_device_id __maybe_unused dp83640_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83640_tbl[] = {
{ DP83640_PHY_ID, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index cf8b6d0bfaa9..33db21251f2e 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -22,8 +22,6 @@
#define DP83826C_PHY_ID 0x2000a130
#define DP83826NC_PHY_ID 0x2000a110
-#define DP83822_DEVADDR 0x1f
-
#define MII_DP83822_CTRL_2 0x0a
#define MII_DP83822_PHYSTS 0x10
#define MII_DP83822_PHYSCR 0x11
@@ -32,6 +30,12 @@
#define MII_DP83822_FCSCR 0x14
#define MII_DP83822_RCSR 0x17
#define MII_DP83822_RESET_CTRL 0x1f
+#define MII_DP83822_MLEDCR 0x25
+#define MII_DP83822_LDCTRL 0x403
+#define MII_DP83822_LEDCFG1 0x460
+#define MII_DP83822_IOCTRL 0x461
+#define MII_DP83822_IOCTRL1 0x462
+#define MII_DP83822_IOCTRL2 0x463
#define MII_DP83822_GENCFG 0x465
#define MII_DP83822_SOR1 0x467
@@ -106,6 +110,56 @@
#define DP83822_RX_CLK_SHIFT BIT(12)
#define DP83822_TX_CLK_SHIFT BIT(11)
+/* MLEDCR bits */
+#define DP83822_MLEDCR_CFG GENMASK(6, 3)
+#define DP83822_MLEDCR_ROUTE GENMASK(1, 0)
+#define DP83822_MLEDCR_ROUTE_LED_0 DP83822_MLEDCR_ROUTE
+
+/* LEDCFG1 bits */
+#define DP83822_LEDCFG1_LED1_CTRL GENMASK(11, 8)
+#define DP83822_LEDCFG1_LED3_CTRL GENMASK(7, 4)
+
+/* IOCTRL bits */
+#define DP83822_IOCTRL_MAC_IMPEDANCE_CTRL GENMASK(4, 1)
+
+/* IOCTRL1 bits */
+#define DP83822_IOCTRL1_GPIO3_CTRL GENMASK(10, 8)
+#define DP83822_IOCTRL1_GPIO3_CTRL_LED3 BIT(0)
+#define DP83822_IOCTRL1_GPIO1_CTRL GENMASK(2, 0)
+#define DP83822_IOCTRL1_GPIO1_CTRL_LED_1 BIT(0)
+
+/* LDCTRL bits */
+#define DP83822_100BASE_TX_LINE_DRIVER_SWING GENMASK(7, 4)
+
+/* IOCTRL2 bits */
+#define DP83822_IOCTRL2_GPIO2_CLK_SRC GENMASK(6, 4)
+#define DP83822_IOCTRL2_GPIO2_CTRL GENMASK(2, 0)
+#define DP83822_IOCTRL2_GPIO2_CTRL_CLK_REF GENMASK(1, 0)
+#define DP83822_IOCTRL2_GPIO2_CTRL_MLED BIT(0)
+
+#define DP83822_CLK_SRC_MAC_IF 0x0
+#define DP83822_CLK_SRC_XI 0x1
+#define DP83822_CLK_SRC_INT_REF 0x2
+#define DP83822_CLK_SRC_RMII_MASTER_MODE_REF 0x4
+#define DP83822_CLK_SRC_FREE_RUNNING 0x6
+#define DP83822_CLK_SRC_RECOVERED 0x7
+
+#define DP83822_LED_FN_LINK 0x0 /* Link established */
+#define DP83822_LED_FN_RX_TX 0x1 /* Receive or Transmit activity */
+#define DP83822_LED_FN_TX 0x2 /* Transmit activity */
+#define DP83822_LED_FN_RX 0x3 /* Receive activity */
+#define DP83822_LED_FN_COLLISION 0x4 /* Collision detected */
+#define DP83822_LED_FN_LINK_100_BTX 0x5 /* 100 BTX link established */
+#define DP83822_LED_FN_LINK_10_BT 0x6 /* 10BT link established */
+#define DP83822_LED_FN_FULL_DUPLEX 0x7 /* Full duplex */
+#define DP83822_LED_FN_LINK_RX_TX 0x8 /* Link established, blink for rx or tx activity */
+#define DP83822_LED_FN_ACTIVE_STRETCH 0x9 /* Active Stretch Signal */
+#define DP83822_LED_FN_MII_LINK 0xa /* MII LINK (100BT+FD) */
+#define DP83822_LED_FN_LPI_MODE 0xb /* LPI Mode (EEE) */
+#define DP83822_LED_FN_RX_TX_ERR 0xc /* TX/RX MII Error */
+#define DP83822_LED_FN_LINK_LOST 0xd /* Link Lost */
+#define DP83822_LED_FN_PRBS_ERR 0xe /* Blink for PRBS error */
+
/* SOR1 mode */
#define DP83822_STRAP_MODE1 0
#define DP83822_STRAP_MODE2 BIT(0)
@@ -134,6 +188,13 @@
ADVERTISED_FIBRE | \
ADVERTISED_Pause | ADVERTISED_Asym_Pause)
+#define DP83822_MAX_LED_PINS 4
+
+#define DP83822_LED_INDEX_LED_0 0
+#define DP83822_LED_INDEX_LED_1_GPIO1 1
+#define DP83822_LED_INDEX_COL_GPIO2 2
+#define DP83822_LED_INDEX_RX_D3_GPIO3 3
+
struct dp83822_private {
bool fx_signal_det_low;
int fx_enabled;
@@ -141,6 +202,11 @@ struct dp83822_private {
u8 cfg_dac_minus;
u8 cfg_dac_plus;
struct ethtool_wolinfo wol;
+ bool set_gpio2_clk_out;
+ u32 gpio2_clk_out;
+ bool led_pin_enable[DP83822_MAX_LED_PINS];
+ int tx_amplitude_100base_tx_index;
+ int mac_termination_index;
};
static int dp83822_config_wol(struct phy_device *phydev,
@@ -159,14 +225,14 @@ static int dp83822_config_wol(struct phy_device *phydev,
/* MAC addresses start with byte 5, but stored in mac[0].
* 822 PHYs store bytes 4|5, 2|3, 0|1
*/
- phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_DA1,
+ phy_write_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_DA1,
(mac[1] << 8) | mac[0]);
- phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_DA2,
+ phy_write_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_DA2,
(mac[3] << 8) | mac[2]);
- phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_DA3,
+ phy_write_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_DA3,
(mac[5] << 8) | mac[4]);
- value = phy_read_mmd(phydev, DP83822_DEVADDR,
+ value = phy_read_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_WOL_CFG);
if (wol->wolopts & WAKE_MAGIC)
value |= DP83822_WOL_MAGIC_EN;
@@ -174,13 +240,13 @@ static int dp83822_config_wol(struct phy_device *phydev,
value &= ~DP83822_WOL_MAGIC_EN;
if (wol->wolopts & WAKE_MAGICSECURE) {
- phy_write_mmd(phydev, DP83822_DEVADDR,
+ phy_write_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RXSOP1,
(wol->sopass[1] << 8) | wol->sopass[0]);
- phy_write_mmd(phydev, DP83822_DEVADDR,
+ phy_write_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RXSOP2,
(wol->sopass[3] << 8) | wol->sopass[2]);
- phy_write_mmd(phydev, DP83822_DEVADDR,
+ phy_write_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RXSOP3,
(wol->sopass[5] << 8) | wol->sopass[4]);
value |= DP83822_WOL_SECURE_ON;
@@ -194,10 +260,10 @@ static int dp83822_config_wol(struct phy_device *phydev,
value |= DP83822_WOL_EN | DP83822_WOL_INDICATION_SEL |
DP83822_WOL_CLR_INDICATION;
- return phy_write_mmd(phydev, DP83822_DEVADDR,
+ return phy_write_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_WOL_CFG, value);
} else {
- return phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_WOL_CFG,
DP83822_WOL_EN |
DP83822_WOL_MAGIC_EN |
@@ -226,23 +292,23 @@ static void dp83822_get_wol(struct phy_device *phydev,
wol->supported = (WAKE_MAGIC | WAKE_MAGICSECURE);
wol->wolopts = 0;
- value = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG);
+ value = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_CFG);
if (value & DP83822_WOL_MAGIC_EN)
wol->wolopts |= WAKE_MAGIC;
if (value & DP83822_WOL_SECURE_ON) {
- sopass_val = phy_read_mmd(phydev, DP83822_DEVADDR,
+ sopass_val = phy_read_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RXSOP1);
wol->sopass[0] = (sopass_val & 0xff);
wol->sopass[1] = (sopass_val >> 8);
- sopass_val = phy_read_mmd(phydev, DP83822_DEVADDR,
+ sopass_val = phy_read_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RXSOP2);
wol->sopass[2] = (sopass_val & 0xff);
wol->sopass[3] = (sopass_val >> 8);
- sopass_val = phy_read_mmd(phydev, DP83822_DEVADDR,
+ sopass_val = phy_read_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RXSOP3);
wol->sopass[4] = (sopass_val & 0xff);
wol->sopass[5] = (sopass_val >> 8);
@@ -405,43 +471,107 @@ static int dp83822_read_status(struct phy_device *phydev)
return 0;
}
+static int dp83822_config_init_leds(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822 = phydev->priv;
+ int ret;
+
+ if (dp83822->led_pin_enable[DP83822_LED_INDEX_LED_0]) {
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_MLEDCR,
+ DP83822_MLEDCR_ROUTE,
+ FIELD_PREP(DP83822_MLEDCR_ROUTE,
+ DP83822_MLEDCR_ROUTE_LED_0));
+ if (ret)
+ return ret;
+ } else if (dp83822->led_pin_enable[DP83822_LED_INDEX_COL_GPIO2]) {
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_IOCTRL2,
+ DP83822_IOCTRL2_GPIO2_CTRL,
+ FIELD_PREP(DP83822_IOCTRL2_GPIO2_CTRL,
+ DP83822_IOCTRL2_GPIO2_CTRL_MLED));
+ if (ret)
+ return ret;
+ }
+
+ if (dp83822->led_pin_enable[DP83822_LED_INDEX_LED_1_GPIO1]) {
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_IOCTRL1,
+ DP83822_IOCTRL1_GPIO1_CTRL,
+ FIELD_PREP(DP83822_IOCTRL1_GPIO1_CTRL,
+ DP83822_IOCTRL1_GPIO1_CTRL_LED_1));
+ if (ret)
+ return ret;
+ }
+
+ if (dp83822->led_pin_enable[DP83822_LED_INDEX_RX_D3_GPIO3]) {
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_IOCTRL1,
+ DP83822_IOCTRL1_GPIO3_CTRL,
+ FIELD_PREP(DP83822_IOCTRL1_GPIO3_CTRL,
+ DP83822_IOCTRL1_GPIO3_CTRL_LED3));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int dp83822_config_init(struct phy_device *phydev)
{
struct dp83822_private *dp83822 = phydev->priv;
- struct device *dev = &phydev->mdio.dev;
int rgmii_delay = 0;
s32 rx_int_delay;
s32 tx_int_delay;
int err = 0;
int bmcr;
+ if (dp83822->set_gpio2_clk_out)
+ phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_IOCTRL2,
+ DP83822_IOCTRL2_GPIO2_CTRL |
+ DP83822_IOCTRL2_GPIO2_CLK_SRC,
+ FIELD_PREP(DP83822_IOCTRL2_GPIO2_CTRL,
+ DP83822_IOCTRL2_GPIO2_CTRL_CLK_REF) |
+ FIELD_PREP(DP83822_IOCTRL2_GPIO2_CLK_SRC,
+ dp83822->gpio2_clk_out));
+
+ if (dp83822->tx_amplitude_100base_tx_index >= 0)
+ phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_LDCTRL,
+ DP83822_100BASE_TX_LINE_DRIVER_SWING,
+ FIELD_PREP(DP83822_100BASE_TX_LINE_DRIVER_SWING,
+ dp83822->tx_amplitude_100base_tx_index));
+
+ if (dp83822->mac_termination_index >= 0)
+ phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_IOCTRL,
+ DP83822_IOCTRL_MAC_IMPEDANCE_CTRL,
+ FIELD_PREP(DP83822_IOCTRL_MAC_IMPEDANCE_CTRL,
+ dp83822->mac_termination_index));
+
+ err = dp83822_config_init_leds(phydev);
+ if (err)
+ return err;
+
if (phy_interface_is_rgmii(phydev)) {
- rx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0,
- true);
+ rx_int_delay = phy_get_internal_delay(phydev, NULL, 0, true);
/* Set DP83822_RX_CLK_SHIFT to enable rx clk internal delay */
if (rx_int_delay > 0)
rgmii_delay |= DP83822_RX_CLK_SHIFT;
- tx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0,
- false);
+ tx_int_delay = phy_get_internal_delay(phydev, NULL, 0, false);
/* Set DP83822_TX_CLK_SHIFT to disable tx clk internal delay */
if (tx_int_delay <= 0)
rgmii_delay |= DP83822_TX_CLK_SHIFT;
- err = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ err = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RCSR,
DP83822_RX_CLK_SHIFT | DP83822_TX_CLK_SHIFT, rgmii_delay);
if (err)
return err;
- err = phy_set_bits_mmd(phydev, DP83822_DEVADDR,
+ err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
if (err)
return err;
} else {
- err = phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
+ err = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
if (err)
@@ -496,7 +626,7 @@ static int dp83822_config_init(struct phy_device *phydev)
return err;
if (dp83822->fx_signal_det_low) {
- err = phy_set_bits_mmd(phydev, DP83822_DEVADDR,
+ err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_GENCFG,
DP83822_SIG_DET_LOW);
if (err)
@@ -514,10 +644,10 @@ static int dp8382x_config_rmii_mode(struct phy_device *phydev)
if (!device_property_read_string(dev, "ti,rmii-mode", &of_val)) {
if (strcmp(of_val, "master") == 0) {
- ret = phy_clear_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RCSR,
DP83822_RMII_MODE_SEL);
} else if (strcmp(of_val, "slave") == 0) {
- ret = phy_set_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RCSR,
DP83822_RMII_MODE_SEL);
} else {
phydev_err(phydev, "Invalid value for ti,rmii-mode property (%s)\n",
@@ -539,7 +669,7 @@ static int dp83826_config_init(struct phy_device *phydev)
int ret;
if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
- ret = phy_set_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RCSR,
DP83822_RMII_MODE_EN);
if (ret)
return ret;
@@ -548,7 +678,7 @@ static int dp83826_config_init(struct phy_device *phydev)
if (ret)
return ret;
} else {
- ret = phy_clear_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RCSR,
DP83822_RMII_MODE_EN);
if (ret)
return ret;
@@ -560,7 +690,7 @@ static int dp83826_config_init(struct phy_device *phydev)
FIELD_GET(DP83826_CFG_DAC_MINUS_MDIX_5_TO_4,
dp83822->cfg_dac_minus));
mask = DP83826_VOD_CFG1_MINUS_MDIX_MASK | DP83826_VOD_CFG1_MINUS_MDI_MASK;
- ret = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83826_VOD_CFG1, mask, val);
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83826_VOD_CFG1, mask, val);
if (ret)
return ret;
@@ -568,7 +698,7 @@ static int dp83826_config_init(struct phy_device *phydev)
FIELD_GET(DP83826_CFG_DAC_MINUS_MDIX_3_TO_0,
dp83822->cfg_dac_minus));
mask = DP83826_VOD_CFG2_MINUS_MDIX_MASK;
- ret = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83826_VOD_CFG2, mask, val);
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83826_VOD_CFG2, mask, val);
if (ret)
return ret;
}
@@ -577,7 +707,7 @@ static int dp83826_config_init(struct phy_device *phydev)
val = FIELD_PREP(DP83826_VOD_CFG2_PLUS_MDIX_MASK, dp83822->cfg_dac_plus) |
FIELD_PREP(DP83826_VOD_CFG2_PLUS_MDI_MASK, dp83822->cfg_dac_plus);
mask = DP83826_VOD_CFG2_PLUS_MDIX_MASK | DP83826_VOD_CFG2_PLUS_MDI_MASK;
- ret = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83826_VOD_CFG2, mask, val);
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83826_VOD_CFG2, mask, val);
if (ret)
return ret;
}
@@ -608,11 +738,78 @@ static int dp83822_phy_reset(struct phy_device *phydev)
return phydev->drv->config_init(phydev);
}
-#ifdef CONFIG_OF_MDIO
+#if IS_ENABLED(CONFIG_OF_MDIO)
+static const u32 tx_amplitude_100base_tx_gain[] = {
+ 80, 82, 83, 85, 87, 88, 90, 92,
+ 93, 95, 97, 98, 100, 102, 103, 105,
+};
+
+static const u32 mac_termination[] = {
+ 99, 91, 84, 78, 73, 69, 65, 61, 58, 55, 53, 50, 48, 46, 44, 43,
+};
+
+static int dp83822_of_init_leds(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct dp83822_private *dp83822 = phydev->priv;
+ struct device_node *leds;
+ u32 index;
+ int err;
+
+ if (!node)
+ return 0;
+
+ leds = of_get_child_by_name(node, "leds");
+ if (!leds)
+ return 0;
+
+ for_each_available_child_of_node_scoped(leds, led) {
+ err = of_property_read_u32(led, "reg", &index);
+ if (err) {
+ of_node_put(leds);
+ return err;
+ }
+
+ if (index <= DP83822_LED_INDEX_RX_D3_GPIO3) {
+ dp83822->led_pin_enable[index] = true;
+ } else {
+ of_node_put(leds);
+ return -EINVAL;
+ }
+ }
+
+ of_node_put(leds);
+ /* LED_0 and COL(GPIO2) use the MLED function. MLED can be routed to
+ * only one of these two pins at a time.
+ */
+ if (dp83822->led_pin_enable[DP83822_LED_INDEX_LED_0] &&
+ dp83822->led_pin_enable[DP83822_LED_INDEX_COL_GPIO2]) {
+ phydev_err(phydev, "LED_0 and COL(GPIO2) cannot be used as LED output at the same time\n");
+ return -EINVAL;
+ }
+
+ if (dp83822->led_pin_enable[DP83822_LED_INDEX_COL_GPIO2] &&
+ dp83822->set_gpio2_clk_out) {
+ phydev_err(phydev, "COL(GPIO2) cannot be used as LED output, already used as clock output\n");
+ return -EINVAL;
+ }
+
+ if (dp83822->led_pin_enable[DP83822_LED_INDEX_RX_D3_GPIO3] &&
+ phydev->interface != PHY_INTERFACE_MODE_RMII) {
+ phydev_err(phydev, "RX_D3 can only be used as LED output when in RMII mode\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int dp83822_of_init(struct phy_device *phydev)
{
struct dp83822_private *dp83822 = phydev->priv;
struct device *dev = &phydev->mdio.dev;
+ const char *of_val;
+ int i, ret;
+ u32 val;
/* Signal detection for the PHY is only enabled if the FX_EN and the
* SD_EN pins are strapped. Signal detection can only enabled if FX_EN
@@ -625,7 +822,66 @@ static int dp83822_of_init(struct phy_device *phydev)
dp83822->fx_enabled = device_property_present(dev,
"ti,fiber-mode");
- return 0;
+ if (!device_property_read_string(dev, "ti,gpio2-clk-out", &of_val)) {
+ if (strcmp(of_val, "mac-if") == 0) {
+ dp83822->gpio2_clk_out = DP83822_CLK_SRC_MAC_IF;
+ } else if (strcmp(of_val, "xi") == 0) {
+ dp83822->gpio2_clk_out = DP83822_CLK_SRC_XI;
+ } else if (strcmp(of_val, "int-ref") == 0) {
+ dp83822->gpio2_clk_out = DP83822_CLK_SRC_INT_REF;
+ } else if (strcmp(of_val, "rmii-master-mode-ref") == 0) {
+ dp83822->gpio2_clk_out = DP83822_CLK_SRC_RMII_MASTER_MODE_REF;
+ } else if (strcmp(of_val, "free-running") == 0) {
+ dp83822->gpio2_clk_out = DP83822_CLK_SRC_FREE_RUNNING;
+ } else if (strcmp(of_val, "recovered") == 0) {
+ dp83822->gpio2_clk_out = DP83822_CLK_SRC_RECOVERED;
+ } else {
+ phydev_err(phydev,
+ "Invalid value for ti,gpio2-clk-out property (%s)\n",
+ of_val);
+ return -EINVAL;
+ }
+
+ dp83822->set_gpio2_clk_out = true;
+ }
+
+ ret = phy_get_tx_amplitude_gain(phydev, dev,
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ &val);
+ if (!ret) {
+ for (i = 0; i < ARRAY_SIZE(tx_amplitude_100base_tx_gain); i++) {
+ if (tx_amplitude_100base_tx_gain[i] == val) {
+ dp83822->tx_amplitude_100base_tx_index = i;
+ break;
+ }
+ }
+
+ if (dp83822->tx_amplitude_100base_tx_index < 0) {
+ phydev_err(phydev,
+ "Invalid value for tx-amplitude-100base-tx-percent property (%u)\n",
+ val);
+ return -EINVAL;
+ }
+ }
+
+ ret = phy_get_mac_termination(phydev, dev, &val);
+ if (!ret) {
+ for (i = 0; i < ARRAY_SIZE(mac_termination); i++) {
+ if (mac_termination[i] == val) {
+ dp83822->mac_termination_index = i;
+ break;
+ }
+ }
+
+ if (dp83822->mac_termination_index < 0) {
+ phydev_err(phydev,
+ "Invalid value for mac-termination-ohms property (%u)\n",
+ val);
+ return -EINVAL;
+ }
+ }
+
+ return dp83822_of_init_leds(phydev);
}
static int dp83826_to_dac_minus_one_regval(int percent)
@@ -673,7 +929,7 @@ static int dp83822_read_straps(struct phy_device *phydev)
int fx_enabled, fx_sd_enable;
int val;
- val = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_SOR1);
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_SOR1);
if (val < 0)
return val;
@@ -703,6 +959,8 @@ static int dp8382x_probe(struct phy_device *phydev)
if (!dp83822)
return -ENOMEM;
+ dp83822->tx_amplitude_100base_tx_index = -1;
+ dp83822->mac_termination_index = -1;
phydev->priv = dp83822;
return 0;
@@ -723,7 +981,9 @@ static int dp83822_probe(struct phy_device *phydev)
if (ret)
return ret;
- dp83822_of_init(phydev);
+ ret = dp83822_of_init(phydev);
+ if (ret)
+ return ret;
if (dp83822->fx_enabled)
phydev->port = PORT_FIBRE;
@@ -748,7 +1008,7 @@ static int dp83822_suspend(struct phy_device *phydev)
{
int value;
- value = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG);
+ value = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_CFG);
if (!(value & DP83822_WOL_EN))
genphy_suspend(phydev);
@@ -762,14 +1022,138 @@ static int dp83822_resume(struct phy_device *phydev)
genphy_resume(phydev);
- value = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG);
+ value = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_CFG);
- phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG, value |
+ phy_write_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_CFG, value |
DP83822_WOL_CLR_INDICATION);
return 0;
}
+static int dp83822_led_mode(u8 index, unsigned long rules)
+{
+ switch (rules) {
+ case BIT(TRIGGER_NETDEV_LINK):
+ return DP83822_LED_FN_LINK;
+ case BIT(TRIGGER_NETDEV_LINK_10):
+ return DP83822_LED_FN_LINK_10_BT;
+ case BIT(TRIGGER_NETDEV_LINK_100):
+ return DP83822_LED_FN_LINK_100_BTX;
+ case BIT(TRIGGER_NETDEV_FULL_DUPLEX):
+ return DP83822_LED_FN_FULL_DUPLEX;
+ case BIT(TRIGGER_NETDEV_TX):
+ return DP83822_LED_FN_TX;
+ case BIT(TRIGGER_NETDEV_RX):
+ return DP83822_LED_FN_RX;
+ case BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX):
+ return DP83822_LED_FN_RX_TX;
+ case BIT(TRIGGER_NETDEV_TX_ERR) | BIT(TRIGGER_NETDEV_RX_ERR):
+ return DP83822_LED_FN_RX_TX_ERR;
+ case BIT(TRIGGER_NETDEV_LINK) | BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX):
+ return DP83822_LED_FN_LINK_RX_TX;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int dp83822_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ int mode;
+
+ mode = dp83822_led_mode(index, rules);
+ if (mode < 0)
+ return mode;
+
+ return 0;
+}
+
+static int dp83822_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ int mode;
+
+ mode = dp83822_led_mode(index, rules);
+ if (mode < 0)
+ return mode;
+
+ if (index == DP83822_LED_INDEX_LED_0 || index == DP83822_LED_INDEX_COL_GPIO2)
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2,
+ MII_DP83822_MLEDCR, DP83822_MLEDCR_CFG,
+ FIELD_PREP(DP83822_MLEDCR_CFG, mode));
+ else if (index == DP83822_LED_INDEX_LED_1_GPIO1)
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2,
+ MII_DP83822_LEDCFG1,
+ DP83822_LEDCFG1_LED1_CTRL,
+ FIELD_PREP(DP83822_LEDCFG1_LED1_CTRL,
+ mode));
+ else
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2,
+ MII_DP83822_LEDCFG1,
+ DP83822_LEDCFG1_LED3_CTRL,
+ FIELD_PREP(DP83822_LEDCFG1_LED3_CTRL,
+ mode));
+}
+
+static int dp83822_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ int val;
+
+ if (index == DP83822_LED_INDEX_LED_0 || index == DP83822_LED_INDEX_COL_GPIO2) {
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_MLEDCR);
+ if (val < 0)
+ return val;
+
+ val = FIELD_GET(DP83822_MLEDCR_CFG, val);
+ } else {
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_LEDCFG1);
+ if (val < 0)
+ return val;
+
+ if (index == DP83822_LED_INDEX_LED_1_GPIO1)
+ val = FIELD_GET(DP83822_LEDCFG1_LED1_CTRL, val);
+ else
+ val = FIELD_GET(DP83822_LEDCFG1_LED3_CTRL, val);
+ }
+
+ switch (val) {
+ case DP83822_LED_FN_LINK:
+ *rules = BIT(TRIGGER_NETDEV_LINK);
+ break;
+ case DP83822_LED_FN_LINK_10_BT:
+ *rules = BIT(TRIGGER_NETDEV_LINK_10);
+ break;
+ case DP83822_LED_FN_LINK_100_BTX:
+ *rules = BIT(TRIGGER_NETDEV_LINK_100);
+ break;
+ case DP83822_LED_FN_FULL_DUPLEX:
+ *rules = BIT(TRIGGER_NETDEV_FULL_DUPLEX);
+ break;
+ case DP83822_LED_FN_TX:
+ *rules = BIT(TRIGGER_NETDEV_TX);
+ break;
+ case DP83822_LED_FN_RX:
+ *rules = BIT(TRIGGER_NETDEV_RX);
+ break;
+ case DP83822_LED_FN_RX_TX:
+ *rules = BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX);
+ break;
+ case DP83822_LED_FN_RX_TX_ERR:
+ *rules = BIT(TRIGGER_NETDEV_TX_ERR) | BIT(TRIGGER_NETDEV_RX_ERR);
+ break;
+ case DP83822_LED_FN_LINK_RX_TX:
+ *rules = BIT(TRIGGER_NETDEV_LINK) | BIT(TRIGGER_NETDEV_TX) |
+ BIT(TRIGGER_NETDEV_RX);
+ break;
+ default:
+ *rules = 0;
+ break;
+ }
+
+ return 0;
+}
+
#define DP83822_PHY_DRIVER(_id, _name) \
{ \
PHY_ID_MATCH_MODEL(_id), \
@@ -785,6 +1169,9 @@ static int dp83822_resume(struct phy_device *phydev)
.handle_interrupt = dp83822_handle_interrupt, \
.suspend = dp83822_suspend, \
.resume = dp83822_resume, \
+ .led_hw_is_supported = dp83822_led_hw_is_supported, \
+ .led_hw_control_set = dp83822_led_hw_control_set, \
+ .led_hw_control_get = dp83822_led_hw_control_get, \
}
#define DP83825_PHY_DRIVER(_id, _name) \
@@ -830,7 +1217,7 @@ static struct phy_driver dp83822_driver[] = {
};
module_phy_driver(dp83822_driver);
-static struct mdio_device_id __maybe_unused dp83822_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83822_tbl[] = {
{ DP83822_PHY_ID, 0xfffffff0 },
{ DP83825I_PHY_ID, 0xfffffff0 },
{ DP83826C_PHY_ID, 0xfffffff0 },
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 937061acfc61..d88b1999d596 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -123,7 +123,7 @@ static int dp83848_config_init(struct phy_device *phydev)
return 0;
}
-static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83848_tbl[] = {
{ TI_DP83848C_PHY_ID, 0xfffffff0 },
{ NS_DP83848C_PHY_ID, 0xfffffff0 },
{ TI_DP83620_PHY_ID, 0xfffffff0 },
@@ -147,6 +147,8 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
/* IRQ related */ \
.config_intr = dp83848_config_intr, \
.handle_interrupt = dp83848_handle_interrupt, \
+ \
+ .flags = PHY_RST_AFTER_CLK_EN, \
}
static struct phy_driver dp83848_driver[] = {
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 4120385c5a79..5f5de01c41e1 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -92,11 +92,6 @@
#define DP83867_STRAP_STS1_RESERVED BIT(11)
/* STRAP_STS2 bits */
-#define DP83867_STRAP_STS2_CLK_SKEW_TX_MASK GENMASK(6, 4)
-#define DP83867_STRAP_STS2_CLK_SKEW_TX_SHIFT 4
-#define DP83867_STRAP_STS2_CLK_SKEW_RX_MASK GENMASK(2, 0)
-#define DP83867_STRAP_STS2_CLK_SKEW_RX_SHIFT 0
-#define DP83867_STRAP_STS2_CLK_SKEW_NONE BIT(2)
#define DP83867_STRAP_STS2_STRAP_FLD BIT(10)
/* PHY CTRL bits */
@@ -111,10 +106,8 @@
/* RGMIIDCTL bits */
#define DP83867_RGMII_TX_CLK_DELAY_MAX 0xf
#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
-#define DP83867_RGMII_TX_CLK_DELAY_INV (DP83867_RGMII_TX_CLK_DELAY_MAX + 1)
#define DP83867_RGMII_RX_CLK_DELAY_MAX 0xf
#define DP83867_RGMII_RX_CLK_DELAY_SHIFT 0
-#define DP83867_RGMII_RX_CLK_DELAY_INV (DP83867_RGMII_RX_CLK_DELAY_MAX + 1)
/* IO_MUX_CFG bits */
#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MASK 0x1f
@@ -506,48 +499,6 @@ static int dp83867_config_port_mirroring(struct phy_device *phydev)
return 0;
}
-static int dp83867_verify_rgmii_cfg(struct phy_device *phydev)
-{
- struct dp83867_private *dp83867 = phydev->priv;
-
- /* Existing behavior was to use default pin strapping delay in rgmii
- * mode, but rgmii should have meant no delay. Warn existing users.
- */
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
- const u16 val = phy_read_mmd(phydev, DP83867_DEVADDR,
- DP83867_STRAP_STS2);
- const u16 txskew = (val & DP83867_STRAP_STS2_CLK_SKEW_TX_MASK) >>
- DP83867_STRAP_STS2_CLK_SKEW_TX_SHIFT;
- const u16 rxskew = (val & DP83867_STRAP_STS2_CLK_SKEW_RX_MASK) >>
- DP83867_STRAP_STS2_CLK_SKEW_RX_SHIFT;
-
- if (txskew != DP83867_STRAP_STS2_CLK_SKEW_NONE ||
- rxskew != DP83867_STRAP_STS2_CLK_SKEW_NONE)
- phydev_warn(phydev,
- "PHY has delays via pin strapping, but phy-mode = 'rgmii'\n"
- "Should be 'rgmii-id' to use internal delays txskew:%x rxskew:%x\n",
- txskew, rxskew);
- }
-
- /* RX delay *must* be specified if internal delay of RX is used. */
- if ((phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) &&
- dp83867->rx_id_delay == DP83867_RGMII_RX_CLK_DELAY_INV) {
- phydev_err(phydev, "ti,rx-internal-delay must be specified\n");
- return -EINVAL;
- }
-
- /* TX delay *must* be specified if internal delay of TX is used. */
- if ((phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) &&
- dp83867->tx_id_delay == DP83867_RGMII_TX_CLK_DELAY_INV) {
- phydev_err(phydev, "ti,tx-internal-delay must be specified\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
#if IS_ENABLED(CONFIG_OF_MDIO)
static int dp83867_of_init_io_impedance(struct phy_device *phydev)
{
@@ -631,7 +582,7 @@ static int dp83867_of_init(struct phy_device *phydev)
dp83867->sgmii_ref_clk_en = of_property_read_bool(of_node,
"ti,sgmii-ref-clock-output-enable");
- dp83867->rx_id_delay = DP83867_RGMII_RX_CLK_DELAY_INV;
+ dp83867->rx_id_delay = DP83867_RGMIIDCTL_2_00_NS;
ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
&dp83867->rx_id_delay);
if (!ret && dp83867->rx_id_delay > DP83867_RGMII_RX_CLK_DELAY_MAX) {
@@ -641,7 +592,7 @@ static int dp83867_of_init(struct phy_device *phydev)
return -EINVAL;
}
- dp83867->tx_id_delay = DP83867_RGMII_TX_CLK_DELAY_INV;
+ dp83867->tx_id_delay = DP83867_RGMIIDCTL_2_00_NS;
ret = of_property_read_u32(of_node, "ti,tx-internal-delay",
&dp83867->tx_id_delay);
if (!ret && dp83867->tx_id_delay > DP83867_RGMII_TX_CLK_DELAY_MAX) {
@@ -761,7 +712,6 @@ static int dp83867_config_init(struct phy_device *phydev)
{
struct dp83867_private *dp83867 = phydev->priv;
int ret, val, bs;
- u16 delay;
/* Force speed optimization for the PHY even if it strapped */
ret = phy_modify(phydev, DP83867_CFG2, DP83867_DOWNSHIFT_EN,
@@ -769,10 +719,6 @@ static int dp83867_config_init(struct phy_device *phydev)
if (ret)
return ret;
- ret = dp83867_verify_rgmii_cfg(phydev);
- if (ret)
- return ret;
-
/* RX_DV/RX_CTRL strapped in mode 1 or mode 2 workaround */
if (dp83867->rxctrl_strap_quirk)
phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
@@ -792,6 +738,12 @@ static int dp83867_config_init(struct phy_device *phydev)
return ret;
}
+ /* Although the DP83867 reports EEE capability through the
+ * MDIO_PCS_EEE_ABLE and MDIO_AN_EEE_ADV registers, the feature
+ * is not actually implemented in hardware.
+ */
+ phy_disable_eee(phydev);
+
if (phy_interface_is_rgmii(phydev) ||
phydev->interface == PHY_INTERFACE_MODE_SGMII) {
val = phy_read(phydev, MII_DP83867_PHYCTRL);
@@ -836,13 +788,7 @@ static int dp83867_config_init(struct phy_device *phydev)
if (ret)
return ret;
- /* If rgmii mode with no internal delay is selected, we do NOT use
- * aligned mode as one might expect. Instead we use the PHY's default
- * based on pin strapping. And the "mode 0" default is to *use*
- * internal delay with a value of 7 (2.00 ns).
- *
- * Set up RGMII delays
- */
+ /* Set up RGMII delays */
val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL);
val &= ~(DP83867_RGMII_TX_CLK_DELAY_EN | DP83867_RGMII_RX_CLK_DELAY_EN);
@@ -857,15 +803,9 @@ static int dp83867_config_init(struct phy_device *phydev)
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL, val);
- delay = 0;
- if (dp83867->rx_id_delay != DP83867_RGMII_RX_CLK_DELAY_INV)
- delay |= dp83867->rx_id_delay;
- if (dp83867->tx_id_delay != DP83867_RGMII_TX_CLK_DELAY_INV)
- delay |= dp83867->tx_id_delay <<
- DP83867_RGMII_TX_CLK_DELAY_SHIFT;
-
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIIDCTL,
- delay);
+ dp83867->rx_id_delay |
+ (dp83867->tx_id_delay << DP83867_RGMII_TX_CLK_DELAY_SHIFT));
}
/* If specified, set io impedance */
@@ -997,20 +937,23 @@ static void dp83867_link_change_notify(struct phy_device *phydev)
* whenever there is a link change.
*/
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
- int val = 0;
+ int val;
- val = phy_clear_bits(phydev, DP83867_CFG2,
- DP83867_SGMII_AUTONEG_EN);
- if (val < 0)
- return;
+ val = phy_modify_changed(phydev, DP83867_CFG2,
+ DP83867_SGMII_AUTONEG_EN, 0);
- phy_set_bits(phydev, DP83867_CFG2,
- DP83867_SGMII_AUTONEG_EN);
+ /* Keep the in-band setting made by dp83867_config_inband() */
+ if (val != 0)
+ phy_set_bits(phydev, DP83867_CFG2,
+ DP83867_SGMII_AUTONEG_EN);
}
}
-static int dp83867_loopback(struct phy_device *phydev, bool enable)
+static int dp83867_loopback(struct phy_device *phydev, bool enable, int speed)
{
+ if (enable && speed)
+ return -EOPNOTSUPP;
+
return phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK,
enable ? BMCR_LOOPBACK : 0);
}
@@ -1173,6 +1116,25 @@ static int dp83867_led_polarity_set(struct phy_device *phydev, int index,
DP83867_LED_POLARITY(index), polarity);
}
+static unsigned int dp83867_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ return LINK_INBAND_ENABLE | LINK_INBAND_DISABLE;
+
+ return 0;
+}
+
+static int dp83867_config_inband(struct phy_device *phydev, unsigned int modes)
+{
+ int val = 0;
+
+ if (modes == LINK_INBAND_ENABLE)
+ val = DP83867_SGMII_AUTONEG_EN;
+
+ return phy_modify(phydev, DP83867_CFG2, DP83867_SGMII_AUTONEG_EN, val);
+}
+
static struct phy_driver dp83867_driver[] = {
{
.phy_id = DP83867_PHY_ID,
@@ -1206,11 +1168,14 @@ static struct phy_driver dp83867_driver[] = {
.led_hw_control_set = dp83867_led_hw_control_set,
.led_hw_control_get = dp83867_led_hw_control_get,
.led_polarity_set = dp83867_led_polarity_set,
+
+ .inband_caps = dp83867_inband_caps,
+ .config_inband = dp83867_config_inband,
},
};
module_phy_driver(dp83867_driver);
-static struct mdio_device_id __maybe_unused dp83867_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83867_tbl[] = {
{ DP83867_PHY_ID, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index 5f056d7db83e..1f381d7b13ff 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -84,7 +84,7 @@
#define DP83869_CLK_DELAY_DEF 7
/* STRAP_STS1 bits */
-#define DP83869_STRAP_OP_MODE_MASK GENMASK(2, 0)
+#define DP83869_STRAP_OP_MODE_MASK GENMASK(11, 9)
#define DP83869_STRAP_STS1_RESERVED BIT(11)
#define DP83869_STRAP_MIRROR_ENABLED BIT(12)
@@ -153,19 +153,32 @@ struct dp83869_private {
int mode;
};
+static int dp83869_config_aneg(struct phy_device *phydev)
+{
+ struct dp83869_private *dp83869 = phydev->priv;
+
+ if (dp83869->mode != DP83869_RGMII_1000_BASE)
+ return genphy_config_aneg(phydev);
+
+ return genphy_c37_config_aneg(phydev);
+}
+
static int dp83869_read_status(struct phy_device *phydev)
{
struct dp83869_private *dp83869 = phydev->priv;
+ bool changed;
int ret;
+ if (dp83869->mode == DP83869_RGMII_1000_BASE)
+ return genphy_c37_read_status(phydev, &changed);
+
ret = genphy_read_status(phydev);
if (ret)
return ret;
- if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported)) {
+ if (dp83869->mode == DP83869_RGMII_100_BASE) {
if (phydev->link) {
- if (dp83869->mode == DP83869_RGMII_100_BASE)
- phydev->speed = SPEED_100;
+ phydev->speed = SPEED_100;
} else {
phydev->speed = SPEED_UNKNOWN;
phydev->duplex = DUPLEX_UNKNOWN;
@@ -515,7 +528,7 @@ static int dp83869_set_strapped_mode(struct phy_device *phydev)
if (val < 0)
return val;
- dp83869->mode = val & DP83869_STRAP_OP_MODE_MASK;
+ dp83869->mode = FIELD_GET(DP83869_STRAP_OP_MODE_MASK, val);
return 0;
}
@@ -527,9 +540,8 @@ static const int dp83869_internal_delay[] = {250, 500, 750, 1000, 1250, 1500,
static int dp83869_of_init(struct phy_device *phydev)
{
+ struct device_node *of_node = phydev->mdio.dev.of_node;
struct dp83869_private *dp83869 = phydev->priv;
- struct device *dev = &phydev->mdio.dev;
- struct device_node *of_node = dev->of_node;
int delay_size = ARRAY_SIZE(dp83869_internal_delay);
int ret;
@@ -584,13 +596,13 @@ static int dp83869_of_init(struct phy_device *phydev)
&dp83869->tx_fifo_depth))
dp83869->tx_fifo_depth = DP83869_PHYCR_FIFO_DEPTH_4_B_NIB;
- dp83869->rx_int_delay = phy_get_internal_delay(phydev, dev,
+ dp83869->rx_int_delay = phy_get_internal_delay(phydev,
&dp83869_internal_delay[0],
delay_size, true);
if (dp83869->rx_int_delay < 0)
dp83869->rx_int_delay = DP83869_CLK_DELAY_DEF;
- dp83869->tx_int_delay = phy_get_internal_delay(phydev, dev,
+ dp83869->tx_int_delay = phy_get_internal_delay(phydev,
&dp83869_internal_delay[0],
delay_size, false);
if (dp83869->tx_int_delay < 0)
@@ -898,6 +910,7 @@ static int dp83869_phy_reset(struct phy_device *phydev)
.soft_reset = dp83869_phy_reset, \
.config_intr = dp83869_config_intr, \
.handle_interrupt = dp83869_handle_interrupt, \
+ .config_aneg = dp83869_config_aneg, \
.read_status = dp83869_read_status, \
.get_tunable = dp83869_get_tunable, \
.set_tunable = dp83869_set_tunable, \
@@ -914,7 +927,7 @@ static struct phy_driver dp83869_driver[] = {
};
module_phy_driver(dp83869_driver);
-static struct mdio_device_id __maybe_unused dp83869_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83869_tbl[] = {
{ PHY_ID_MATCH_MODEL(DP83869_PHY_ID) },
{ PHY_ID_MATCH_MODEL(DP83561_PHY_ID) },
{ }
diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c
index 7ea32fb77190..e480c2a07450 100644
--- a/drivers/net/phy/dp83tc811.c
+++ b/drivers/net/phy/dp83tc811.c
@@ -403,7 +403,7 @@ static struct phy_driver dp83811_driver[] = {
};
module_phy_driver(dp83811_driver);
-static struct mdio_device_id __maybe_unused dp83811_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83811_tbl[] = {
{ DP83TC811_PHY_ID, 0xfffffff0 },
{ },
};
diff --git a/drivers/net/phy/dp83td510.c b/drivers/net/phy/dp83td510.c
index 92aa3a2b9744..d75dae6071ad 100644
--- a/drivers/net/phy/dp83td510.c
+++ b/drivers/net/phy/dp83td510.c
@@ -34,10 +34,34 @@
#define DP83TD510E_CTRL_HW_RESET BIT(15)
#define DP83TD510E_CTRL_SW_RESET BIT(14)
+/*
+ * DP83TD510E_PKT_STAT_x registers correspond to similarly named registers
+ * in the datasheet (PKT_STAT_1 through PKT_STAT_6). These registers store
+ * 32-bit or 16-bit counters for TX and RX statistics and must be read in
+ * sequence to ensure the counters are cleared correctly.
+ *
+ * - DP83TD510E_PKT_STAT_1: Contains TX packet count bits [15:0].
+ * - DP83TD510E_PKT_STAT_2: Contains TX packet count bits [31:16].
+ * - DP83TD510E_PKT_STAT_3: Contains TX error packet count.
+ * - DP83TD510E_PKT_STAT_4: Contains RX packet count bits [15:0].
+ * - DP83TD510E_PKT_STAT_5: Contains RX packet count bits [31:16].
+ * - DP83TD510E_PKT_STAT_6: Contains RX error packet count.
+ *
+ * Keeping the register names as defined in the datasheet helps maintain
+ * clarity and alignment with the documentation.
+ */
+#define DP83TD510E_PKT_STAT_1 0x12b
+#define DP83TD510E_PKT_STAT_2 0x12c
+#define DP83TD510E_PKT_STAT_3 0x12d
+#define DP83TD510E_PKT_STAT_4 0x12e
+#define DP83TD510E_PKT_STAT_5 0x12f
+#define DP83TD510E_PKT_STAT_6 0x130
+
#define DP83TD510E_AN_STAT_1 0x60c
#define DP83TD510E_MASTER_SLAVE_RESOL_FAIL BIT(15)
#define DP83TD510E_MSE_DETECT 0xa85
+#define DP83TD510E_MSE_MAX U16_MAX
#define DP83TD510_SQI_MAX 7
@@ -58,8 +82,16 @@ static const u16 dp83td510_mse_sqi_map[] = {
0x0000 /* 24dB =< SNR */
};
+struct dp83td510_stats {
+ u64 tx_pkt_cnt;
+ u64 tx_err_pkt_cnt;
+ u64 rx_pkt_cnt;
+ u64 rx_err_pkt_cnt;
+};
+
struct dp83td510_priv {
bool alcd_test_active;
+ struct dp83td510_stats stats;
};
/* Time Domain Reflectometry (TDR) Functionality of DP83TD510 PHY
@@ -173,10 +205,328 @@ struct dp83td510_priv {
#define DP83TD510E_UNKN_030E 0x30e
#define DP83TD510E_030E_VAL 0x2520
+#define DP83TD510E_LEDS_CFG_1 0x460
+#define DP83TD510E_LED_FN(idx, val) (((val) & 0xf) << ((idx) * 4))
+#define DP83TD510E_LED_FN_MASK(idx) (0xf << ((idx) * 4))
+/* link OK */
+#define DP83TD510E_LED_MODE_LINK_OK 0x0
+/* TX/RX activity */
+#define DP83TD510E_LED_MODE_TX_RX_ACTIVITY 0x1
+/* TX activity */
+#define DP83TD510E_LED_MODE_TX_ACTIVITY 0x2
+/* RX activity */
+#define DP83TD510E_LED_MODE_RX_ACTIVITY 0x3
+/* LR */
+#define DP83TD510E_LED_MODE_LR 0x4
+/* SR */
+#define DP83TD510E_LED_MODE_SR 0x5
+/* LED SPEED: High for 10Base-T */
+#define DP83TD510E_LED_MODE_LED_SPEED 0x6
+/* Duplex mode */
+#define DP83TD510E_LED_MODE_DUPLEX 0x7
+/* link + blink on activity with stretch option */
+#define DP83TD510E_LED_MODE_LINK_BLINK 0x8
+/* blink on activity with stretch option */
+#define DP83TD510E_LED_MODE_BLINK_ACTIVITY 0x9
+/* blink on tx activity with stretch option */
+#define DP83TD510E_LED_MODE_BLINK_TX 0xa
+/* blink on rx activity with stretch option */
+#define DP83TD510E_LED_MODE_BLINK_RX 0xb
+/* link_lost */
+#define DP83TD510E_LED_MODE_LINK_LOST 0xc
+/* PRBS error: toggles on error */
+#define DP83TD510E_LED_MODE_PRBS_ERROR 0xd
+/* XMII TX/RX Error with stretch option */
+#define DP83TD510E_LED_MODE_XMII_ERR 0xe
+
+#define DP83TD510E_LED_COUNT 4
+
+#define DP83TD510E_LEDS_CFG_2 0x469
+#define DP83TD510E_LED_POLARITY(idx) BIT((idx) * 4 + 2)
+#define DP83TD510E_LED_DRV_VAL(idx) BIT((idx) * 4 + 1)
+#define DP83TD510E_LED_DRV_EN(idx) BIT((idx) * 4)
+
#define DP83TD510E_ALCD_STAT 0xa9f
#define DP83TD510E_ALCD_COMPLETE BIT(15)
#define DP83TD510E_ALCD_CABLE_LENGTH GENMASK(10, 0)
+static int dp83td510_get_mse_capability(struct phy_device *phydev,
+ struct phy_mse_capability *cap)
+{
+ /* DP83TD510E documents only a single (average) MSE register
+ * (used to derive SQI); no peak or worst-peak counters are
+ * described. Advertise only PHY_MSE_CAP_AVG.
+ */
+ cap->supported_caps = PHY_MSE_CAP_AVG;
+ /* 10BASE-T1L is a single-pair medium, so there are no B/C/D channels.
+ * We still advertise PHY_MSE_CAP_CHANNEL_A to indicate that the PHY
+ * can attribute the measurement to a specific pair (the only one),
+ * rather than exposing it only as a link-aggregate.
+ *
+ * Rationale:
+ * - Keeps the ethtool MSE_GET selection logic consistent: per-channel
+ * (A/B/C/D) is preferred over WORST/LINK, so userspace receives a
+ * CHANNEL_A nest instead of LINK.
+ * - Signals to tools that "per-pair" data is available (even if there's
+ * just one pair), avoiding the impression that only aggregate values
+ * are supported.
+ * - Remains compatible with multi-pair PHYs and uniform UI handling.
+ *
+ * Note: WORST and other channels are not advertised on 10BASE-T1L.
+ */
+ cap->supported_caps |= PHY_MSE_CHANNEL_A | PHY_MSE_CAP_LINK;
+ cap->max_average_mse = DP83TD510E_MSE_MAX;
+
+ /* The datasheet does not specify the refresh rate or symbol count,
+ * but based on similar PHYs and standards, we can assume a common
+ * value. For 10BASE-T1L, the symbol rate is 7.5 MBd. A common
+ * diagnostic interval is around 1ms.
+ * 7.5e6 symbols/sec * 0.001 sec = 7500 symbols.
+ */
+ cap->refresh_rate_ps = 1000000000; /* 1 ms */
+ cap->num_symbols = 7500;
+
+ return 0;
+}
+
+static int dp83td510_get_mse_snapshot(struct phy_device *phydev,
+ enum phy_mse_channel channel,
+ struct phy_mse_snapshot *snapshot)
+{
+ int ret;
+
+ if (channel != PHY_MSE_CHANNEL_LINK &&
+ channel != PHY_MSE_CHANNEL_A)
+ return -EOPNOTSUPP;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_MSE_DETECT);
+ if (ret < 0)
+ return ret;
+
+ snapshot->average_mse = ret;
+
+ return 0;
+}
+
+static int dp83td510_led_brightness_set(struct phy_device *phydev, u8 index,
+ enum led_brightness brightness)
+{
+ u32 val;
+
+ if (index >= DP83TD510E_LED_COUNT)
+ return -EINVAL;
+
+ val = DP83TD510E_LED_DRV_EN(index);
+
+ if (brightness)
+ val |= DP83TD510E_LED_DRV_VAL(index);
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_LEDS_CFG_2,
+ DP83TD510E_LED_DRV_VAL(index) |
+ DP83TD510E_LED_DRV_EN(index), val);
+}
+
+static int dp83td510_led_mode(u8 index, unsigned long rules)
+{
+ if (index >= DP83TD510E_LED_COUNT)
+ return -EINVAL;
+
+ switch (rules) {
+ case BIT(TRIGGER_NETDEV_LINK):
+ return DP83TD510E_LED_MODE_LINK_OK;
+ case BIT(TRIGGER_NETDEV_LINK_10):
+ return DP83TD510E_LED_MODE_LED_SPEED;
+ case BIT(TRIGGER_NETDEV_FULL_DUPLEX):
+ return DP83TD510E_LED_MODE_DUPLEX;
+ case BIT(TRIGGER_NETDEV_TX):
+ return DP83TD510E_LED_MODE_TX_ACTIVITY;
+ case BIT(TRIGGER_NETDEV_RX):
+ return DP83TD510E_LED_MODE_RX_ACTIVITY;
+ case BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX):
+ return DP83TD510E_LED_MODE_TX_RX_ACTIVITY;
+ case BIT(TRIGGER_NETDEV_LINK) | BIT(TRIGGER_NETDEV_TX) |
+ BIT(TRIGGER_NETDEV_RX):
+ return DP83TD510E_LED_MODE_LINK_BLINK;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int dp83td510_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ int ret;
+
+ ret = dp83td510_led_mode(index, rules);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int dp83td510_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ int mode, ret;
+
+ mode = dp83td510_led_mode(index, rules);
+ if (mode < 0)
+ return mode;
+
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_LEDS_CFG_1,
+ DP83TD510E_LED_FN_MASK(index),
+ DP83TD510E_LED_FN(index, mode));
+ if (ret)
+ return ret;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_LEDS_CFG_2,
+ DP83TD510E_LED_DRV_EN(index), 0);
+}
+
+static int dp83td510_led_hw_control_get(struct phy_device *phydev,
+ u8 index, unsigned long *rules)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_LEDS_CFG_1);
+ if (val < 0)
+ return val;
+
+ val &= DP83TD510E_LED_FN_MASK(index);
+ val >>= index * 4;
+
+ switch (val) {
+ case DP83TD510E_LED_MODE_LINK_OK:
+ *rules = BIT(TRIGGER_NETDEV_LINK);
+ break;
+ /* LED mode: LED SPEED (10BaseT1L indicator) */
+ case DP83TD510E_LED_MODE_LED_SPEED:
+ *rules = BIT(TRIGGER_NETDEV_LINK_10);
+ break;
+ case DP83TD510E_LED_MODE_DUPLEX:
+ *rules = BIT(TRIGGER_NETDEV_FULL_DUPLEX);
+ break;
+ case DP83TD510E_LED_MODE_TX_ACTIVITY:
+ *rules = BIT(TRIGGER_NETDEV_TX);
+ break;
+ case DP83TD510E_LED_MODE_RX_ACTIVITY:
+ *rules = BIT(TRIGGER_NETDEV_RX);
+ break;
+ case DP83TD510E_LED_MODE_TX_RX_ACTIVITY:
+ *rules = BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX);
+ break;
+ case DP83TD510E_LED_MODE_LINK_BLINK:
+ *rules = BIT(TRIGGER_NETDEV_LINK) |
+ BIT(TRIGGER_NETDEV_TX) |
+ BIT(TRIGGER_NETDEV_RX);
+ break;
+ default:
+ *rules = 0;
+ break;
+ }
+
+ return 0;
+}
+
+static int dp83td510_led_polarity_set(struct phy_device *phydev, int index,
+ unsigned long modes)
+{
+ u16 polarity = DP83TD510E_LED_POLARITY(index);
+ u32 mode;
+
+ for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) {
+ switch (mode) {
+ case PHY_LED_ACTIVE_LOW:
+ polarity = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_LEDS_CFG_2,
+ DP83TD510E_LED_POLARITY(index), polarity);
+}
+
+/**
+ * dp83td510_update_stats - Update the PHY statistics for the DP83TD510 PHY.
+ * @phydev: Pointer to the phy_device structure.
+ *
+ * The function reads the PHY statistics registers and updates the statistics
+ * structure.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static int dp83td510_update_stats(struct phy_device *phydev)
+{
+ struct dp83td510_priv *priv = phydev->priv;
+ u32 count;
+ int ret;
+
+ /* The DP83TD510E_PKT_STAT registers are divided into two groups:
+ * - Group 1 (TX stats): DP83TD510E_PKT_STAT_1 to DP83TD510E_PKT_STAT_3
+ * - Group 2 (RX stats): DP83TD510E_PKT_STAT_4 to DP83TD510E_PKT_STAT_6
+ *
+ * Registers in each group are cleared only after reading them in a
+ * plain sequence (e.g., 1, 2, 3 for Group 1 or 4, 5, 6 for Group 2).
+ * Any deviation from the sequence, such as reading 1, 2, 1, 2, 3, will
+ * prevent the group from being cleared. Additionally, the counters
+ * for a group are frozen as soon as the first register in that group
+ * is accessed.
+ */
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PKT_STAT_1);
+ if (ret < 0)
+ return ret;
+ /* tx_pkt_cnt_15_0 */
+ count = ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PKT_STAT_2);
+ if (ret < 0)
+ return ret;
+ /* tx_pkt_cnt_31_16 */
+ count |= ret << 16;
+ priv->stats.tx_pkt_cnt += count;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PKT_STAT_3);
+ if (ret < 0)
+ return ret;
+ /* tx_err_pkt_cnt */
+ priv->stats.tx_err_pkt_cnt += ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PKT_STAT_4);
+ if (ret < 0)
+ return ret;
+ /* rx_pkt_cnt_15_0 */
+ count = ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PKT_STAT_5);
+ if (ret < 0)
+ return ret;
+ /* rx_pkt_cnt_31_16 */
+ count |= ret << 16;
+ priv->stats.rx_pkt_cnt += count;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PKT_STAT_6);
+ if (ret < 0)
+ return ret;
+ /* rx_err_pkt_cnt */
+ priv->stats.rx_err_pkt_cnt += ret;
+
+ return 0;
+}
+
+static void dp83td510_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *eth_stats,
+ struct ethtool_phy_stats *stats)
+{
+ struct dp83td510_priv *priv = phydev->priv;
+
+ stats->tx_packets = priv->stats.tx_pkt_cnt;
+ stats->tx_errors = priv->stats.tx_err_pkt_cnt;
+ stats->rx_packets = priv->stats.rx_pkt_cnt;
+ stats->rx_errors = priv->stats.rx_err_pkt_cnt;
+}
+
static int dp83td510_config_intr(struct phy_device *phydev)
{
int ret;
@@ -599,13 +949,24 @@ static struct phy_driver dp83td510_driver[] = {
.get_sqi_max = dp83td510_get_sqi_max,
.cable_test_start = dp83td510_cable_test_start,
.cable_test_get_status = dp83td510_cable_test_get_status,
+ .get_phy_stats = dp83td510_get_phy_stats,
+ .update_stats = dp83td510_update_stats,
+
+ .get_mse_capability = dp83td510_get_mse_capability,
+ .get_mse_snapshot = dp83td510_get_mse_snapshot,
+
+ .led_brightness_set = dp83td510_led_brightness_set,
+ .led_hw_is_supported = dp83td510_led_hw_is_supported,
+ .led_hw_control_set = dp83td510_led_hw_control_set,
+ .led_hw_control_get = dp83td510_led_hw_control_get,
+ .led_polarity_set = dp83td510_led_polarity_set,
.suspend = genphy_suspend,
.resume = genphy_resume,
} };
module_phy_driver(dp83td510_driver);
-static struct mdio_device_id __maybe_unused dp83td510_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83td510_tbl[] = {
{ PHY_ID_MATCH_MODEL(DP83TD510E_PHY_ID) },
{ }
};
diff --git a/drivers/net/phy/dp83tg720.c b/drivers/net/phy/dp83tg720.c
index 0ef4d7dba065..391c1d868808 100644
--- a/drivers/net/phy/dp83tg720.c
+++ b/drivers/net/phy/dp83tg720.c
@@ -4,12 +4,102 @@
*/
#include <linux/bitfield.h>
#include <linux/ethtool_netlink.h>
+#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/phy.h>
+#include <linux/random.h>
#include "open_alliance_helpers.h"
+/*
+ * DP83TG720 PHY Limitations and Workarounds
+ *
+ * The DP83TG720 1000BASE-T1 PHY has several limitations that require
+ * software-side mitigations. These workarounds are implemented throughout
+ * this driver. This section documents the known issues and their corresponding
+ * mitigation strategies.
+ *
+ * 1. Unreliable Link Detection and Synchronized Reset Deadlock
+ * ------------------------------------------------------------
+ * After a link loss or during link establishment, the DP83TG720 PHY may fail
+ * to detect or report link status correctly. As of June 2025, no public
+ * errata sheet for the DP83TG720 PHY documents this behavior.
+ * The "DP83TC81x, DP83TG72x Software Implementation Guide" application note
+ * (SNLA404, available at https://www.ti.com/lit/an/snla404/snla404.pdf)
+ * recommends performing a soft restart if polling for a link fails to establish
+ * a connection after 100ms. This procedure is adopted as the workaround for the
+ * observed link detection issue.
+ *
+ * However, in point-to-point setups where both link partners use the same
+ * driver (e.g. Linux on both sides), a synchronized reset pattern may emerge.
+ * This leads to a deadlock, where both PHYs reset at the same time and
+ * continuously miss each other during auto-negotiation.
+ *
+ * To address this, the reset procedure includes two components:
+ *
+ * - A **fixed minimum delay of 1ms** after a hardware reset. The datasheet
+ * "DP83TG720S-Q1 1000BASE-T1 Automotive Ethernet PHY with SGMII and RGMII"
+ * specifies this as the "Post reset stabilization-time prior to MDC preamble
+ * for register access" (T6.2), ensuring the PHY is ready for MDIO
+ * operations.
+ *
+ * - An **additional asymmetric delay**, empirically chosen based on
+ * master/slave role. This reduces the risk of synchronized resets on both
+ * link partners. Values are selected to avoid periodic overlap and ensure
+ * the link is re-established within a few cycles.
+ *
+ * The functions that implement this logic are:
+ * - dp83tg720_soft_reset()
+ * - dp83tg720_get_next_update_time()
+ *
+ * 2. Polling-Based Link Detection and IRQ Support
+ * -----------------------------------------------
+ * Due to the PHY-specific limitation described in section 1, link-up events
+ * cannot be reliably detected via interrupts on the DP83TG720. Therefore,
+ * polling is required to detect transitions from link-down to link-up.
+ *
+ * While link-down events *can* be detected via IRQs on this PHY, this driver
+ * currently does **not** implement interrupt support. As a result, all link
+ * state changes must be detected using polling.
+ *
+ * Polling behavior:
+ * - When the link is up: slow polling (e.g. 1s).
+ * - When the link just went down: fast polling for a short time.
+ * - When the link stays down: fallback to slow polling.
+ *
+ * This design balances responsiveness and CPU usage. It sacrifices fast link-up
+ * times in cases where the link is expected to remain down for extended periods,
+ * assuming that such systems do not require immediate reactivity.
+ */
+
+/*
+ * DP83TG720S_POLL_ACTIVE_LINK - Polling interval in milliseconds when the link
+ * is active.
+ * DP83TG720S_POLL_NO_LINK - Polling interval in milliseconds when the
+ * link is down.
+ * DP83TG720S_FAST_POLL_DURATION_MS - Timeout in milliseconds for no-link
+ * polling after which polling interval is
+ * increased.
+ * DP83TG720S_POLL_SLOW - Slow polling interval when there is no
+ * link for a prolongued period.
+ * DP83TG720S_RESET_DELAY_MS_MASTER - Delay after a reset before attempting
+ * to establish a link again for master phy.
+ * DP83TG720S_RESET_DELAY_MS_SLAVE - Delay after a reset before attempting
+ * to establish a link again for slave phy.
+ *
+ * These values are not documented or officially recommended by the vendor but
+ * were determined through empirical testing. They achieve a good balance in
+ * minimizing the number of reset retries while ensuring reliable link recovery
+ * within a reasonable timeframe.
+ */
+#define DP83TG720S_POLL_ACTIVE_LINK 421
+#define DP83TG720S_POLL_NO_LINK 149
+#define DP83TG720S_FAST_POLL_DURATION_MS 6000
+#define DP83TG720S_POLL_SLOW 1117
+#define DP83TG720S_RESET_DELAY_MS_MASTER 97
+#define DP83TG720S_RESET_DELAY_MS_SLAVE 149
+
#define DP83TG720S_PHY_ID 0x2000a284
/* MDIO_MMD_VEND2 registers */
@@ -51,6 +141,9 @@
/* Register 0x0405: Unknown Register */
#define DP83TG720S_UNKNOWN_0405 0x405
+#define DP83TG720S_LINK_QUAL_3 0x547
+#define DP83TG720S_LINK_LOSS_CNT_MASK GENMASK(15, 10)
+
/* Register 0x0576: TDR Master Link Down Control */
#define DP83TG720S_TDR_MASTER_LINK_DOWN 0x576
@@ -60,6 +153,29 @@
/* In RGMII mode, Enable or disable the internal delay for TXD */
#define DP83TG720S_RGMII_TX_CLK_SEL BIT(0)
+/*
+ * DP83TG720S_PKT_STAT_x registers correspond to similarly named registers
+ * in the datasheet (PKT_STAT_1 through PKT_STAT_6). These registers store
+ * 32-bit or 16-bit counters for TX and RX statistics and must be read in
+ * sequence to ensure the counters are cleared correctly.
+ *
+ * - DP83TG720S_PKT_STAT_1: Contains TX packet count bits [15:0].
+ * - DP83TG720S_PKT_STAT_2: Contains TX packet count bits [31:16].
+ * - DP83TG720S_PKT_STAT_3: Contains TX error packet count.
+ * - DP83TG720S_PKT_STAT_4: Contains RX packet count bits [15:0].
+ * - DP83TG720S_PKT_STAT_5: Contains RX packet count bits [31:16].
+ * - DP83TG720S_PKT_STAT_6: Contains RX error packet count.
+ *
+ * Keeping the register names as defined in the datasheet helps maintain
+ * clarity and alignment with the documentation.
+ */
+#define DP83TG720S_PKT_STAT_1 0x639
+#define DP83TG720S_PKT_STAT_2 0x63a
+#define DP83TG720S_PKT_STAT_3 0x63b
+#define DP83TG720S_PKT_STAT_4 0x63c
+#define DP83TG720S_PKT_STAT_5 0x63d
+#define DP83TG720S_PKT_STAT_6 0x63e
+
/* Register 0x083F: Unknown Register */
#define DP83TG720S_UNKNOWN_083F 0x83f
@@ -69,6 +185,134 @@
#define DP83TG720_SQI_MAX 7
+struct dp83tg720_stats {
+ u64 link_loss_cnt;
+ u64 tx_pkt_cnt;
+ u64 tx_err_pkt_cnt;
+ u64 rx_pkt_cnt;
+ u64 rx_err_pkt_cnt;
+};
+
+struct dp83tg720_priv {
+ struct dp83tg720_stats stats;
+ unsigned long last_link_down_jiffies;
+};
+
+/**
+ * dp83tg720_update_stats - Update the PHY statistics for the DP83TD510 PHY.
+ * @phydev: Pointer to the phy_device structure.
+ *
+ * The function reads the PHY statistics registers and updates the statistics
+ * structure.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static int dp83tg720_update_stats(struct phy_device *phydev)
+{
+ struct dp83tg720_priv *priv = phydev->priv;
+ u32 count;
+ int ret;
+
+ /* Read the link loss count */
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_LINK_QUAL_3);
+ if (ret < 0)
+ return ret;
+ /* link_loss_cnt */
+ count = FIELD_GET(DP83TG720S_LINK_LOSS_CNT_MASK, ret);
+ priv->stats.link_loss_cnt += count;
+
+ /* The DP83TG720S_PKT_STAT registers are divided into two groups:
+ * - Group 1 (TX stats): DP83TG720S_PKT_STAT_1 to DP83TG720S_PKT_STAT_3
+ * - Group 2 (RX stats): DP83TG720S_PKT_STAT_4 to DP83TG720S_PKT_STAT_6
+ *
+ * Registers in each group are cleared only after reading them in a
+ * plain sequence (e.g., 1, 2, 3 for Group 1 or 4, 5, 6 for Group 2).
+ * Any deviation from the sequence, such as reading 1, 2, 1, 2, 3, will
+ * prevent the group from being cleared. Additionally, the counters
+ * for a group are frozen as soon as the first register in that group
+ * is accessed.
+ */
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_PKT_STAT_1);
+ if (ret < 0)
+ return ret;
+ /* tx_pkt_cnt_15_0 */
+ count = ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_PKT_STAT_2);
+ if (ret < 0)
+ return ret;
+ /* tx_pkt_cnt_31_16 */
+ count |= ret << 16;
+ priv->stats.tx_pkt_cnt += count;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_PKT_STAT_3);
+ if (ret < 0)
+ return ret;
+ /* tx_err_pkt_cnt */
+ priv->stats.tx_err_pkt_cnt += ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_PKT_STAT_4);
+ if (ret < 0)
+ return ret;
+ /* rx_pkt_cnt_15_0 */
+ count = ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_PKT_STAT_5);
+ if (ret < 0)
+ return ret;
+ /* rx_pkt_cnt_31_16 */
+ count |= ret << 16;
+ priv->stats.rx_pkt_cnt += count;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_PKT_STAT_6);
+ if (ret < 0)
+ return ret;
+ /* rx_err_pkt_cnt */
+ priv->stats.rx_err_pkt_cnt += ret;
+
+ return 0;
+}
+
+static int dp83tg720_soft_reset(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_write(phydev, DP83TG720S_PHY_RESET, DP83TG720S_HW_RESET);
+ if (ret)
+ return ret;
+
+ /* Include mandatory MDC-access delay (1ms) + extra asymmetric delay to
+ * avoid synchronized reset deadlock. See section 1 in the top-of-file
+ * comment block.
+ */
+ if (phydev->master_slave_state == MASTER_SLAVE_STATE_SLAVE)
+ msleep(DP83TG720S_RESET_DELAY_MS_SLAVE);
+ else
+ msleep(DP83TG720S_RESET_DELAY_MS_MASTER);
+
+ return ret;
+}
+
+static void dp83tg720_get_link_stats(struct phy_device *phydev,
+ struct ethtool_link_ext_stats *link_stats)
+{
+ struct dp83tg720_priv *priv = phydev->priv;
+
+ link_stats->link_down_events = priv->stats.link_loss_cnt;
+}
+
+static void dp83tg720_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *eth_stats,
+ struct ethtool_phy_stats *stats)
+{
+ struct dp83tg720_priv *priv = phydev->priv;
+
+ stats->tx_packets = priv->stats.tx_pkt_cnt;
+ stats->tx_errors = priv->stats.tx_err_pkt_cnt;
+ stats->rx_packets = priv->stats.rx_pkt_cnt;
+ stats->rx_errors = priv->stats.rx_err_pkt_cnt;
+}
+
/**
* dp83tg720_cable_test_start - Start the cable test for the DP83TG720 PHY.
* @phydev: Pointer to the phy_device structure.
@@ -182,6 +426,11 @@ static int dp83tg720_cable_test_get_status(struct phy_device *phydev,
ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, stat);
+ /* save the current stats before resetting the PHY */
+ ret = dp83tg720_update_stats(phydev);
+ if (ret)
+ return ret;
+
return phy_init_hw(phydev);
}
@@ -217,12 +466,14 @@ static int dp83tg720_read_status(struct phy_device *phydev)
phy_sts = phy_read(phydev, DP83TG720S_MII_REG_10);
phydev->link = !!(phy_sts & DP83TG720S_LINK_STATUS);
if (!phydev->link) {
+ /* save the current stats before resetting the PHY */
+ ret = dp83tg720_update_stats(phydev);
+ if (ret)
+ return ret;
+
/* According to the "DP83TC81x, DP83TG72x Software
* Implementation Guide", the PHY needs to be reset after a
* link loss or if no link is created after at least 100ms.
- *
- * Currently we are polling with the PHY_STATE_TIME (1000ms)
- * interval, which is still enough for not automotive use cases.
*/
ret = phy_init_hw(phydev);
if (ret)
@@ -308,19 +559,11 @@ static int dp83tg720_config_init(struct phy_device *phydev)
{
int ret;
- /* Software Restart is not enough to recover from a link failure.
- * Using Hardware Reset instead.
- */
- ret = phy_write(phydev, DP83TG720S_PHY_RESET, DP83TG720S_HW_RESET);
+ /* Reset the PHY to recover from a link failure */
+ ret = dp83tg720_soft_reset(phydev);
if (ret)
return ret;
- /* Wait until MDC can be used again.
- * The wait value of one 1ms is documented in "DP83TG720S-Q1 1000BASE-T1
- * Automotive Ethernet PHY with SGMII and RGMII" datasheet.
- */
- usleep_range(1000, 2000);
-
if (phy_interface_is_rgmii(phydev)) {
ret = dp83tg720_config_rgmii_delay(phydev);
if (ret)
@@ -341,12 +584,71 @@ static int dp83tg720_config_init(struct phy_device *phydev)
return genphy_c45_pma_baset1_read_master_slave(phydev);
}
+static int dp83tg720_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct dp83tg720_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
+/**
+ * dp83tg720_get_next_update_time - Return next polling interval for PHY state
+ * @phydev: Pointer to the phy_device structure
+ *
+ * Implements adaptive polling interval logic depending on link state and
+ * downtime duration. See the "2. Polling-Based Link Detection and IRQ Support"
+ * section at the top of this file for details.
+ *
+ * Return: Time (in jiffies) until the next poll
+ */
+static unsigned int dp83tg720_get_next_update_time(struct phy_device *phydev)
+{
+ struct dp83tg720_priv *priv = phydev->priv;
+ unsigned int next_time_jiffies;
+
+ if (phydev->link) {
+ priv->last_link_down_jiffies = 0;
+
+ /* When the link is up, use a slower interval (in jiffies) */
+ next_time_jiffies =
+ msecs_to_jiffies(DP83TG720S_POLL_ACTIVE_LINK);
+ } else {
+ unsigned long now = jiffies;
+
+ if (!priv->last_link_down_jiffies)
+ priv->last_link_down_jiffies = now;
+
+ if (time_before(now, priv->last_link_down_jiffies +
+ msecs_to_jiffies(DP83TG720S_FAST_POLL_DURATION_MS))) {
+ /* Link recently went down: fast polling */
+ next_time_jiffies =
+ msecs_to_jiffies(DP83TG720S_POLL_NO_LINK);
+ } else {
+ /* Link has been down for a while: slow polling */
+ next_time_jiffies =
+ msecs_to_jiffies(DP83TG720S_POLL_SLOW);
+ }
+ }
+
+ /* Ensure the polling time is at least one jiffy */
+ return max(next_time_jiffies, 1U);
+}
+
static struct phy_driver dp83tg720_driver[] = {
{
PHY_ID_MATCH_MODEL(DP83TG720S_PHY_ID),
.name = "TI DP83TG720S",
.flags = PHY_POLL_CABLE_TEST,
+ .probe = dp83tg720_probe,
+ .soft_reset = dp83tg720_soft_reset,
.config_aneg = dp83tg720_config_aneg,
.read_status = dp83tg720_read_status,
.get_features = genphy_c45_pma_read_ext_abilities,
@@ -355,13 +657,17 @@ static struct phy_driver dp83tg720_driver[] = {
.get_sqi_max = dp83tg720_get_sqi_max,
.cable_test_start = dp83tg720_cable_test_start,
.cable_test_get_status = dp83tg720_cable_test_get_status,
+ .get_link_stats = dp83tg720_get_link_stats,
+ .get_phy_stats = dp83tg720_get_phy_stats,
+ .update_stats = dp83tg720_update_stats,
+ .get_next_update_time = dp83tg720_get_next_update_time,
.suspend = genphy_suspend,
.resume = genphy_resume,
} };
module_phy_driver(dp83tg720_driver);
-static struct mdio_device_id __maybe_unused dp83tg720_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83tg720_tbl[] = {
{ PHY_ID_MATCH_MODEL(DP83TG720S_PHY_ID) },
{ }
};
diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c
index be1b71d7cab7..6cd8d77586fd 100644
--- a/drivers/net/phy/et1011c.c
+++ b/drivers/net/phy/et1011c.c
@@ -94,7 +94,7 @@ static struct phy_driver et1011c_driver[] = { {
module_phy_driver(et1011c_driver);
-static struct mdio_device_id __maybe_unused et1011c_tbl[] = {
+static const struct mdio_device_id __maybe_unused et1011c_tbl[] = {
{ 0x0282f014, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index aef739c20ac4..50684271f81a 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/list.h>
#include <linux/mii.h>
#include <linux/phy.h>
@@ -18,83 +17,64 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/gpio/consumer.h>
#include <linux/idr.h>
#include <linux/netdevice.h>
-#include <linux/linkmode.h>
#include "swphy.h"
-struct fixed_mdio_bus {
- struct mii_bus *mii_bus;
- struct list_head phys;
-};
-
struct fixed_phy {
int addr;
struct phy_device *phydev;
struct fixed_phy_status status;
- bool no_carrier;
int (*link_update)(struct net_device *, struct fixed_phy_status *);
struct list_head node;
- struct gpio_desc *link_gpiod;
};
-static struct platform_device *pdev;
-static struct fixed_mdio_bus platform_fmb = {
- .phys = LIST_HEAD_INIT(platform_fmb.phys),
-};
+static struct mii_bus *fmb_mii_bus;
+static LIST_HEAD(fmb_phys);
+
+static struct fixed_phy *fixed_phy_find(int addr)
+{
+ struct fixed_phy *fp;
+
+ list_for_each_entry(fp, &fmb_phys, node) {
+ if (fp->addr == addr)
+ return fp;
+ }
+
+ return NULL;
+}
int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier)
{
- struct fixed_mdio_bus *fmb = &platform_fmb;
struct phy_device *phydev = dev->phydev;
struct fixed_phy *fp;
if (!phydev || !phydev->mdio.bus)
return -EINVAL;
- list_for_each_entry(fp, &fmb->phys, node) {
- if (fp->addr == phydev->mdio.addr) {
- fp->no_carrier = !new_carrier;
- return 0;
- }
- }
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(fixed_phy_change_carrier);
+ fp = fixed_phy_find(phydev->mdio.addr);
+ if (!fp)
+ return -EINVAL;
-static void fixed_phy_update(struct fixed_phy *fp)
-{
- if (!fp->no_carrier && fp->link_gpiod)
- fp->status.link = !!gpiod_get_value_cansleep(fp->link_gpiod);
+ fp->status.link = new_carrier;
+
+ return 0;
}
+EXPORT_SYMBOL_GPL(fixed_phy_change_carrier);
static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
{
- struct fixed_mdio_bus *fmb = bus->priv;
struct fixed_phy *fp;
- list_for_each_entry(fp, &fmb->phys, node) {
- if (fp->addr == phy_addr) {
- struct fixed_phy_status state;
-
- fp->status.link = !fp->no_carrier;
-
- /* Issue callback if user registered it. */
- if (fp->link_update)
- fp->link_update(fp->phydev->attached_dev,
- &fp->status);
-
- /* Check the GPIO for change in status */
- fixed_phy_update(fp);
- state = fp->status;
+ fp = fixed_phy_find(phy_addr);
+ if (!fp)
+ return 0xffff;
- return swphy_read_reg(reg_num, &state);
- }
- }
+ if (fp->link_update)
+ fp->link_update(fp->phydev->attached_dev, &fp->status);
- return 0xFFFF;
+ return swphy_read_reg(reg_num, &fp->status);
}
static int fixed_mdio_write(struct mii_bus *bus, int phy_addr, int reg_num,
@@ -112,31 +92,27 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
int (*link_update)(struct net_device *,
struct fixed_phy_status *))
{
- struct fixed_mdio_bus *fmb = &platform_fmb;
struct fixed_phy *fp;
if (!phydev || !phydev->mdio.bus)
return -EINVAL;
- list_for_each_entry(fp, &fmb->phys, node) {
- if (fp->addr == phydev->mdio.addr) {
- fp->link_update = link_update;
- fp->phydev = phydev;
- return 0;
- }
- }
+ fp = fixed_phy_find(phydev->mdio.addr);
+ if (!fp)
+ return -ENOENT;
+
+ fp->link_update = link_update;
+ fp->phydev = phydev;
- return -ENOENT;
+ return 0;
}
EXPORT_SYMBOL_GPL(fixed_phy_set_link_update);
-static int fixed_phy_add_gpiod(unsigned int irq, int phy_addr,
- struct fixed_phy_status *status,
- struct gpio_desc *gpiod)
+static int __fixed_phy_add(int phy_addr,
+ const struct fixed_phy_status *status)
{
- int ret;
- struct fixed_mdio_bus *fmb = &platform_fmb;
struct fixed_phy *fp;
+ int ret;
ret = swphy_validate_state(status);
if (ret < 0)
@@ -146,156 +122,61 @@ static int fixed_phy_add_gpiod(unsigned int irq, int phy_addr,
if (!fp)
return -ENOMEM;
- if (irq != PHY_POLL)
- fmb->mii_bus->irq[phy_addr] = irq;
-
fp->addr = phy_addr;
fp->status = *status;
- fp->link_gpiod = gpiod;
+ fp->status.link = true;
- fixed_phy_update(fp);
-
- list_add_tail(&fp->node, &fmb->phys);
+ list_add_tail(&fp->node, &fmb_phys);
return 0;
}
-int fixed_phy_add(unsigned int irq, int phy_addr,
- struct fixed_phy_status *status)
-{
- return fixed_phy_add_gpiod(irq, phy_addr, status, NULL);
-}
-EXPORT_SYMBOL_GPL(fixed_phy_add);
-
static DEFINE_IDA(phy_fixed_ida);
static void fixed_phy_del(int phy_addr)
{
- struct fixed_mdio_bus *fmb = &platform_fmb;
- struct fixed_phy *fp, *tmp;
-
- list_for_each_entry_safe(fp, tmp, &fmb->phys, node) {
- if (fp->addr == phy_addr) {
- list_del(&fp->node);
- if (fp->link_gpiod)
- gpiod_put(fp->link_gpiod);
- kfree(fp);
- ida_free(&phy_fixed_ida, phy_addr);
- return;
- }
- }
-}
+ struct fixed_phy *fp;
-#ifdef CONFIG_OF_GPIO
-static struct gpio_desc *fixed_phy_get_gpiod(struct device_node *np)
-{
- struct device_node *fixed_link_node;
- struct gpio_desc *gpiod;
-
- if (!np)
- return NULL;
-
- fixed_link_node = of_get_child_by_name(np, "fixed-link");
- if (!fixed_link_node)
- return NULL;
-
- /*
- * As the fixed link is just a device tree node without any
- * Linux device associated with it, we simply have obtain
- * the GPIO descriptor from the device tree like this.
- */
- gpiod = fwnode_gpiod_get_index(of_fwnode_handle(fixed_link_node),
- "link", 0, GPIOD_IN, "mdio");
- if (IS_ERR(gpiod) && PTR_ERR(gpiod) != -EPROBE_DEFER) {
- if (PTR_ERR(gpiod) != -ENOENT)
- pr_err("error getting GPIO for fixed link %pOF, proceed without\n",
- fixed_link_node);
- gpiod = NULL;
- }
- of_node_put(fixed_link_node);
+ fp = fixed_phy_find(phy_addr);
+ if (!fp)
+ return;
- return gpiod;
+ list_del(&fp->node);
+ kfree(fp);
+ ida_free(&phy_fixed_ida, phy_addr);
}
-#else
-static struct gpio_desc *fixed_phy_get_gpiod(struct device_node *np)
-{
- return NULL;
-}
-#endif
-static struct phy_device *__fixed_phy_register(unsigned int irq,
- struct fixed_phy_status *status,
- struct device_node *np,
- struct gpio_desc *gpiod)
+struct phy_device *fixed_phy_register(const struct fixed_phy_status *status,
+ struct device_node *np)
{
- struct fixed_mdio_bus *fmb = &platform_fmb;
struct phy_device *phy;
int phy_addr;
int ret;
- if (!fmb->mii_bus || fmb->mii_bus->state != MDIOBUS_REGISTERED)
+ if (!fmb_mii_bus || fmb_mii_bus->state != MDIOBUS_REGISTERED)
return ERR_PTR(-EPROBE_DEFER);
- /* Check if we have a GPIO associated with this fixed phy */
- if (!gpiod) {
- gpiod = fixed_phy_get_gpiod(np);
- if (IS_ERR(gpiod))
- return ERR_CAST(gpiod);
- }
-
/* Get the next available PHY address, up to PHY_MAX_ADDR */
phy_addr = ida_alloc_max(&phy_fixed_ida, PHY_MAX_ADDR - 1, GFP_KERNEL);
if (phy_addr < 0)
return ERR_PTR(phy_addr);
- ret = fixed_phy_add_gpiod(irq, phy_addr, status, gpiod);
+ ret = __fixed_phy_add(phy_addr, status);
if (ret < 0) {
ida_free(&phy_fixed_ida, phy_addr);
return ERR_PTR(ret);
}
- phy = get_phy_device(fmb->mii_bus, phy_addr, false);
+ phy = get_phy_device(fmb_mii_bus, phy_addr, false);
if (IS_ERR(phy)) {
fixed_phy_del(phy_addr);
return ERR_PTR(-EINVAL);
}
- /* propagate the fixed link values to struct phy_device */
- phy->link = status->link;
- if (status->link) {
- phy->speed = status->speed;
- phy->duplex = status->duplex;
- phy->pause = status->pause;
- phy->asym_pause = status->asym_pause;
- }
-
of_node_get(np);
phy->mdio.dev.of_node = np;
phy->is_pseudo_fixed_link = true;
- switch (status->speed) {
- case SPEED_1000:
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
- phy->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- phy->supported);
- fallthrough;
- case SPEED_100:
- linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
- phy->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
- phy->supported);
- fallthrough;
- case SPEED_10:
- default:
- linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
- phy->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
- phy->supported);
- }
-
- phy_advertise_supported(phy);
-
ret = phy_device_register(phy);
if (ret) {
phy_device_free(phy);
@@ -306,79 +187,62 @@ static struct phy_device *__fixed_phy_register(unsigned int irq,
return phy;
}
-
-struct phy_device *fixed_phy_register(unsigned int irq,
- struct fixed_phy_status *status,
- struct device_node *np)
-{
- return __fixed_phy_register(irq, status, np, NULL);
-}
EXPORT_SYMBOL_GPL(fixed_phy_register);
-struct phy_device *
-fixed_phy_register_with_gpiod(unsigned int irq,
- struct fixed_phy_status *status,
- struct gpio_desc *gpiod)
+struct phy_device *fixed_phy_register_100fd(void)
{
- return __fixed_phy_register(irq, status, NULL, gpiod);
+ static const struct fixed_phy_status status = {
+ .speed = SPEED_100,
+ .duplex = DUPLEX_FULL,
+ };
+
+ return fixed_phy_register(&status, NULL);
}
-EXPORT_SYMBOL_GPL(fixed_phy_register_with_gpiod);
+EXPORT_SYMBOL_GPL(fixed_phy_register_100fd);
void fixed_phy_unregister(struct phy_device *phy)
{
phy_device_remove(phy);
of_node_put(phy->mdio.dev.of_node);
fixed_phy_del(phy->mdio.addr);
+ phy_device_free(phy);
}
EXPORT_SYMBOL_GPL(fixed_phy_unregister);
static int __init fixed_mdio_bus_init(void)
{
- struct fixed_mdio_bus *fmb = &platform_fmb;
int ret;
- pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
-
- fmb->mii_bus = mdiobus_alloc();
- if (fmb->mii_bus == NULL) {
- ret = -ENOMEM;
- goto err_mdiobus_reg;
- }
+ fmb_mii_bus = mdiobus_alloc();
+ if (!fmb_mii_bus)
+ return -ENOMEM;
- snprintf(fmb->mii_bus->id, MII_BUS_ID_SIZE, "fixed-0");
- fmb->mii_bus->name = "Fixed MDIO Bus";
- fmb->mii_bus->priv = fmb;
- fmb->mii_bus->parent = &pdev->dev;
- fmb->mii_bus->read = &fixed_mdio_read;
- fmb->mii_bus->write = &fixed_mdio_write;
- fmb->mii_bus->phy_mask = ~0;
+ snprintf(fmb_mii_bus->id, MII_BUS_ID_SIZE, "fixed-0");
+ fmb_mii_bus->name = "Fixed MDIO Bus";
+ fmb_mii_bus->read = &fixed_mdio_read;
+ fmb_mii_bus->write = &fixed_mdio_write;
+ fmb_mii_bus->phy_mask = ~0;
- ret = mdiobus_register(fmb->mii_bus);
+ ret = mdiobus_register(fmb_mii_bus);
if (ret)
goto err_mdiobus_alloc;
return 0;
err_mdiobus_alloc:
- mdiobus_free(fmb->mii_bus);
-err_mdiobus_reg:
- platform_device_unregister(pdev);
+ mdiobus_free(fmb_mii_bus);
return ret;
}
module_init(fixed_mdio_bus_init);
static void __exit fixed_mdio_bus_exit(void)
{
- struct fixed_mdio_bus *fmb = &platform_fmb;
struct fixed_phy *fp, *tmp;
- mdiobus_unregister(fmb->mii_bus);
- mdiobus_free(fmb->mii_bus);
- platform_device_unregister(pdev);
+ mdiobus_unregister(fmb_mii_bus);
+ mdiobus_free(fmb_mii_bus);
- list_for_each_entry_safe(fp, tmp, &fmb->phys, node) {
+ list_for_each_entry_safe(fp, tmp, &fmb_phys, node) {
list_del(&fp->node);
kfree(fp);
}
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index ee438b71a0b4..c0c4f19cfb6a 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -520,12 +520,14 @@ static int ip101a_g_match_phy_device(struct phy_device *phydev, bool ip101a)
return ip101a == !ret;
}
-static int ip101a_match_phy_device(struct phy_device *phydev)
+static int ip101a_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return ip101a_g_match_phy_device(phydev, true);
}
-static int ip101g_match_phy_device(struct phy_device *phydev)
+static int ip101g_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return ip101a_g_match_phy_device(phydev, false);
}
@@ -623,7 +625,7 @@ static struct phy_driver icplus_driver[] = {
module_phy_driver(icplus_driver);
-static struct mdio_device_id __maybe_unused icplus_tbl[] = {
+static const struct mdio_device_id __maybe_unused icplus_tbl[] = {
{ PHY_ID_MATCH_MODEL(IP175C_PHY_ID) },
{ PHY_ID_MATCH_MODEL(IP1001_PHY_ID) },
{ PHY_ID_MATCH_EXACT(IP101A_PHY_ID) },
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
index b672c55a7a4e..9766dd99afaa 100644
--- a/drivers/net/phy/intel-xway.c
+++ b/drivers/net/phy/intel-xway.c
@@ -174,7 +174,6 @@ static const int xway_internal_delay[] = {0, 500, 1000, 1500, 2000, 2500,
static int xway_gphy_rgmii_init(struct phy_device *phydev)
{
- struct device *dev = &phydev->mdio.dev;
unsigned int delay_size = ARRAY_SIZE(xway_internal_delay);
s32 int_delay;
int val = 0;
@@ -207,8 +206,7 @@ static int xway_gphy_rgmii_init(struct phy_device *phydev)
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
- int_delay = phy_get_internal_delay(phydev, dev,
- xway_internal_delay,
+ int_delay = phy_get_internal_delay(phydev, xway_internal_delay,
delay_size, true);
/* if rx-internal-delay-ps is missing, use default of 2.0 ns */
@@ -220,8 +218,7 @@ static int xway_gphy_rgmii_init(struct phy_device *phydev)
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
- int_delay = phy_get_internal_delay(phydev, dev,
- xway_internal_delay,
+ int_delay = phy_get_internal_delay(phydev, xway_internal_delay,
delay_size, false);
/* if tx-internal-delay-ps is missing, use default of 2.0 ns */
@@ -529,7 +526,7 @@ static int xway_gphy_led_polarity_set(struct phy_device *phydev, int index,
if (force_active_high)
return phy_clear_bits(phydev, XWAY_MDIO_LED, XWAY_GPHY_LED_INV(index));
- unreachable();
+ return -EINVAL;
}
static struct phy_driver xway_gphy[] = {
@@ -691,7 +688,7 @@ static struct phy_driver xway_gphy[] = {
};
module_phy_driver(xway_gphy);
-static struct mdio_device_id __maybe_unused xway_gphy_tbl[] = {
+static const struct mdio_device_id __maybe_unused xway_gphy_tbl[] = {
{ PHY_ID_PHY11G_1_3, 0xffffffff },
{ PHY_ID_PHY22F_1_3, 0xffffffff },
{ PHY_ID_PHY11G_1_4, 0xffffffff },
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index e3bf827b7959..5251a61c8b0f 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -348,7 +348,7 @@ static struct phy_driver lxt97x_driver[] = {
module_phy_driver(lxt97x_driver);
-static struct mdio_device_id __maybe_unused lxt_tbl[] = {
+static const struct mdio_device_id __maybe_unused lxt_tbl[] = {
{ 0x78100000, 0xfffffff0 },
{ 0x001378e0, 0xfffffff0 },
{ 0x00137a10, 0xfffffff0 },
diff --git a/drivers/net/phy/marvell-88q2xxx.c b/drivers/net/phy/marvell-88q2xxx.c
index 5107f58338af..f3d83b04c953 100644
--- a/drivers/net/phy/marvell-88q2xxx.c
+++ b/drivers/net/phy/marvell-88q2xxx.c
@@ -7,30 +7,34 @@
* Copyright (C) 2024 Liebherr-Electronics and Drives GmbH
*/
#include <linux/ethtool_netlink.h>
+#include <linux/hwmon.h>
#include <linux/marvell_phy.h>
+#include <linux/of.h>
#include <linux/phy.h>
-#include <linux/hwmon.h>
-#define PHY_ID_88Q2220_REVB0 (MARVELL_PHY_ID_88Q2220 | 0x1)
-#define PHY_ID_88Q2220_REVB1 (MARVELL_PHY_ID_88Q2220 | 0x2)
-#define PHY_ID_88Q2220_REVB2 (MARVELL_PHY_ID_88Q2220 | 0x3)
+#define PHY_ID_88Q2220_REVB0 (MARVELL_PHY_ID_88Q2220 | 0x1)
+#define PHY_ID_88Q2220_REVB1 (MARVELL_PHY_ID_88Q2220 | 0x2)
+#define PHY_ID_88Q2220_REVB2 (MARVELL_PHY_ID_88Q2220 | 0x3)
-#define MDIO_MMD_AN_MV_STAT 32769
-#define MDIO_MMD_AN_MV_STAT_ANEG 0x0100
-#define MDIO_MMD_AN_MV_STAT_LOCAL_RX 0x1000
-#define MDIO_MMD_AN_MV_STAT_REMOTE_RX 0x2000
-#define MDIO_MMD_AN_MV_STAT_LOCAL_MASTER 0x4000
-#define MDIO_MMD_AN_MV_STAT_MS_CONF_FAULT 0x8000
+#define MDIO_MMD_AN_MV_STAT 32769
+#define MDIO_MMD_AN_MV_STAT_ANEG 0x0100
+#define MDIO_MMD_AN_MV_STAT_LOCAL_RX 0x1000
+#define MDIO_MMD_AN_MV_STAT_REMOTE_RX 0x2000
+#define MDIO_MMD_AN_MV_STAT_LOCAL_MASTER 0x4000
+#define MDIO_MMD_AN_MV_STAT_MS_CONF_FAULT 0x8000
-#define MDIO_MMD_AN_MV_STAT2 32794
-#define MDIO_MMD_AN_MV_STAT2_AN_RESOLVED 0x0800
-#define MDIO_MMD_AN_MV_STAT2_100BT1 0x2000
-#define MDIO_MMD_AN_MV_STAT2_1000BT1 0x4000
+#define MDIO_MMD_AN_MV_STAT2 32794
+#define MDIO_MMD_AN_MV_STAT2_AN_RESOLVED 0x0800
+#define MDIO_MMD_AN_MV_STAT2_100BT1 0x2000
+#define MDIO_MMD_AN_MV_STAT2_1000BT1 0x4000
-#define MDIO_MMD_PCS_MV_INT_EN 32784
-#define MDIO_MMD_PCS_MV_INT_EN_LINK_UP 0x0040
-#define MDIO_MMD_PCS_MV_INT_EN_LINK_DOWN 0x0080
-#define MDIO_MMD_PCS_MV_INT_EN_100BT1 0x1000
+#define MDIO_MMD_PCS_MV_RESET_CTRL 32768
+#define MDIO_MMD_PCS_MV_RESET_CTRL_TX_DISABLE 0x8
+
+#define MDIO_MMD_PCS_MV_INT_EN 32784
+#define MDIO_MMD_PCS_MV_INT_EN_LINK_UP 0x0040
+#define MDIO_MMD_PCS_MV_INT_EN_LINK_DOWN 0x0080
+#define MDIO_MMD_PCS_MV_INT_EN_100BT1 0x1000
#define MDIO_MMD_PCS_MV_GPIO_INT_STAT 32785
#define MDIO_MMD_PCS_MV_GPIO_INT_STAT_LINK_UP 0x0040
@@ -40,6 +44,22 @@
#define MDIO_MMD_PCS_MV_GPIO_INT_CTRL 32787
#define MDIO_MMD_PCS_MV_GPIO_INT_CTRL_TRI_DIS 0x0800
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL 32790
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_1_MASK GENMASK(7, 4)
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_0_MASK GENMASK(3, 0)
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK 0x0 /* Link established */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_RX_TX 0x1 /* Link established, blink for rx or tx activity */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_1000BT1 0x2 /* Blink 3x for 1000BT1 link established */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_RX_TX_ON 0x3 /* Receive or transmit activity */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_RX_TX 0x4 /* Blink on receive or transmit activity */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_TX 0x5 /* Transmit activity */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_COPPER 0x6 /* Copper Link established */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_1000BT1_ON 0x7 /* 1000BT1 link established */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_FORCE_OFF 0x8 /* Force off */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_FORCE_ON 0x9 /* Force on */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_FORCE_HIGHZ 0xa /* Force Hi-Z */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_FORCE_BLINK 0xb /* Force blink */
+
#define MDIO_MMD_PCS_MV_TEMP_SENSOR1 32833
#define MDIO_MMD_PCS_MV_TEMP_SENSOR1_RAW_INT 0x0001
#define MDIO_MMD_PCS_MV_TEMP_SENSOR1_INT 0x0040
@@ -60,11 +80,11 @@
#define MDIO_MMD_PCS_MV_100BT1_STAT1_REMOTE_RX 0x2000
#define MDIO_MMD_PCS_MV_100BT1_STAT1_LOCAL_MASTER 0x4000
-#define MDIO_MMD_PCS_MV_100BT1_STAT2 33033
-#define MDIO_MMD_PCS_MV_100BT1_STAT2_JABBER 0x0001
-#define MDIO_MMD_PCS_MV_100BT1_STAT2_POL 0x0002
-#define MDIO_MMD_PCS_MV_100BT1_STAT2_LINK 0x0004
-#define MDIO_MMD_PCS_MV_100BT1_STAT2_ANGE 0x0008
+#define MDIO_MMD_PCS_MV_100BT1_STAT2 33033
+#define MDIO_MMD_PCS_MV_100BT1_STAT2_JABBER 0x0001
+#define MDIO_MMD_PCS_MV_100BT1_STAT2_POL 0x0002
+#define MDIO_MMD_PCS_MV_100BT1_STAT2_LINK 0x0004
+#define MDIO_MMD_PCS_MV_100BT1_STAT2_ANGE 0x0008
#define MDIO_MMD_PCS_MV_100BT1_INT_EN 33042
#define MDIO_MMD_PCS_MV_100BT1_INT_EN_LINKEVENT 0x0400
@@ -72,7 +92,7 @@
#define MDIO_MMD_PCS_MV_COPPER_INT_STAT 33043
#define MDIO_MMD_PCS_MV_COPPER_INT_STAT_LINKEVENT 0x0400
-#define MDIO_MMD_PCS_MV_RX_STAT 33328
+#define MDIO_MMD_PCS_MV_RX_STAT 33328
#define MDIO_MMD_PCS_MV_TDR_RESET 65226
#define MDIO_MMD_PCS_MV_TDR_RESET_TDR_RST 0x1000
@@ -95,6 +115,13 @@
#define MDIO_MMD_PCS_MV_TDR_OFF_CUTOFF 65246
+#define MV88Q2XXX_LED_INDEX_TX_ENABLE 0
+#define MV88Q2XXX_LED_INDEX_GPIO 1
+
+struct mv88q2xxx_priv {
+ bool enable_led0;
+};
+
struct mmd_val {
int devad;
u32 regnum;
@@ -454,25 +481,6 @@ static int mv88q2xxx_config_aneg(struct phy_device *phydev)
return phydev->drv->soft_reset(phydev);
}
-static int mv88q2xxx_config_init(struct phy_device *phydev)
-{
- /* The 88Q2XXX PHYs do have the extended ability register available, but
- * register MDIO_PMA_EXTABLE where they should signalize it does not
- * work according to specification. Therefore, we force it here.
- */
- phydev->pma_extable = MDIO_PMA_EXTABLE_BT1;
-
- /* Configure interrupt with default settings, output is driven low for
- * active interrupt and high for inactive.
- */
- if (phy_interrupt_is_valid(phydev))
- return phy_set_bits_mmd(phydev, MDIO_MMD_PCS,
- MDIO_MMD_PCS_MV_GPIO_INT_CTRL,
- MDIO_MMD_PCS_MV_GPIO_INT_CTRL_TRI_DIS);
-
- return 0;
-}
-
static int mv88q2xxx_get_sqi(struct phy_device *phydev)
{
int ret;
@@ -615,6 +623,12 @@ static int mv88q2xxx_resume(struct phy_device *phydev)
}
#if IS_ENABLED(CONFIG_HWMON)
+static int mv88q2xxx_enable_temp_sense(struct phy_device *phydev)
+{
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TEMP_SENSOR2,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK, 0);
+}
+
static const struct hwmon_channel_info * const mv88q2xxx_hwmon_info[] = {
HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_ALARM),
NULL
@@ -712,22 +726,13 @@ static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
struct device *hwmon;
- char *hwmon_name;
int ret;
- /* Enable temperature sense */
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TEMP_SENSOR2,
- MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK, 0);
+ ret = mv88q2xxx_enable_temp_sense(phydev);
if (ret < 0)
return ret;
- hwmon_name = devm_hwmon_sanitize_name(dev, dev_name(dev));
- if (IS_ERR(hwmon_name))
- return PTR_ERR(hwmon_name);
-
- hwmon = devm_hwmon_device_register_with_info(dev,
- hwmon_name,
- phydev,
+ hwmon = devm_hwmon_device_register_with_info(dev, NULL, phydev,
&mv88q2xxx_hwmon_chip_info,
NULL);
@@ -735,17 +740,120 @@ static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
}
#else
+static int mv88q2xxx_enable_temp_sense(struct phy_device *phydev)
+{
+ return 0;
+}
+
static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
{
return 0;
}
#endif
+#if IS_ENABLED(CONFIG_OF_MDIO)
+static int mv88q2xxx_leds_probe(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct mv88q2xxx_priv *priv = phydev->priv;
+ struct device_node *leds;
+ int ret = 0;
+ u32 index;
+
+ if (!node)
+ return 0;
+
+ leds = of_get_child_by_name(node, "leds");
+ if (!leds)
+ return 0;
+
+ for_each_available_child_of_node_scoped(leds, led) {
+ ret = of_property_read_u32(led, "reg", &index);
+ if (ret)
+ goto exit;
+
+ if (index > MV88Q2XXX_LED_INDEX_GPIO) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (index == MV88Q2XXX_LED_INDEX_TX_ENABLE)
+ priv->enable_led0 = true;
+ }
+
+exit:
+ of_node_put(leds);
+
+ return ret;
+}
+
+#else
+static int mv88q2xxx_leds_probe(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif
+
static int mv88q2xxx_probe(struct phy_device *phydev)
{
+ struct mv88q2xxx_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ ret = mv88q2xxx_leds_probe(phydev);
+ if (ret)
+ return ret;
+
return mv88q2xxx_hwmon_probe(phydev);
}
+static int mv88q2xxx_config_init(struct phy_device *phydev)
+{
+ struct mv88q2xxx_priv *priv = phydev->priv;
+ int ret;
+
+ /* The 88Q2XXX PHYs do have the extended ability register available, but
+ * register MDIO_PMA_EXTABLE where they should signalize it does not
+ * work according to specification. Therefore, we force it here.
+ */
+ phydev->pma_extable = MDIO_PMA_EXTABLE_BT1;
+
+ /* Configure interrupt with default settings, output is driven low for
+ * active interrupt and high for inactive.
+ */
+ if (phy_interrupt_is_valid(phydev)) {
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_GPIO_INT_CTRL,
+ MDIO_MMD_PCS_MV_GPIO_INT_CTRL_TRI_DIS);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Enable LED function and disable TX disable feature on LED/TX_ENABLE */
+ if (priv->enable_led0) {
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_RESET_CTRL,
+ MDIO_MMD_PCS_MV_RESET_CTRL_TX_DISABLE);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Enable temperature sense again. There might have been a hard reset
+ * of the PHY and in this case the register content is restored to
+ * defaults and we need to enable it again.
+ */
+ ret = mv88q2xxx_enable_temp_sense(phydev);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static int mv88q2110_config_init(struct phy_device *phydev)
{
int ret;
@@ -899,11 +1007,104 @@ static int mv88q222x_cable_test_get_status(struct phy_device *phydev,
return 0;
}
+static int mv88q2xxx_led_mode(u8 index, unsigned long rules)
+{
+ switch (rules) {
+ case BIT(TRIGGER_NETDEV_LINK):
+ return MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK;
+ case BIT(TRIGGER_NETDEV_LINK_1000):
+ return MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_1000BT1_ON;
+ case BIT(TRIGGER_NETDEV_TX):
+ return MDIO_MMD_PCS_MV_LED_FUNC_CTRL_TX;
+ case BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX):
+ return MDIO_MMD_PCS_MV_LED_FUNC_CTRL_RX_TX;
+ case BIT(TRIGGER_NETDEV_LINK) | BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX):
+ return MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_RX_TX;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mv88q2xxx_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ int mode;
+
+ mode = mv88q2xxx_led_mode(index, rules);
+ if (mode < 0)
+ return mode;
+
+ return 0;
+}
+
+static int mv88q2xxx_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ int mode;
+
+ mode = mv88q2xxx_led_mode(index, rules);
+ if (mode < 0)
+ return mode;
+
+ if (index == MV88Q2XXX_LED_INDEX_TX_ENABLE)
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_LED_FUNC_CTRL,
+ MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_0_MASK,
+ FIELD_PREP(MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_0_MASK,
+ mode));
+ else
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_LED_FUNC_CTRL,
+ MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_1_MASK,
+ FIELD_PREP(MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_1_MASK,
+ mode));
+}
+
+static int mv88q2xxx_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_LED_FUNC_CTRL);
+ if (val < 0)
+ return val;
+
+ if (index == MV88Q2XXX_LED_INDEX_TX_ENABLE)
+ val = FIELD_GET(MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_0_MASK, val);
+ else
+ val = FIELD_GET(MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_1_MASK, val);
+
+ switch (val) {
+ case MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK:
+ *rules = BIT(TRIGGER_NETDEV_LINK);
+ break;
+ case MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_1000BT1_ON:
+ *rules = BIT(TRIGGER_NETDEV_LINK_1000);
+ break;
+ case MDIO_MMD_PCS_MV_LED_FUNC_CTRL_TX:
+ *rules = BIT(TRIGGER_NETDEV_TX);
+ break;
+ case MDIO_MMD_PCS_MV_LED_FUNC_CTRL_RX_TX:
+ *rules = BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX);
+ break;
+ case MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_RX_TX:
+ *rules = BIT(TRIGGER_NETDEV_LINK) | BIT(TRIGGER_NETDEV_TX) |
+ BIT(TRIGGER_NETDEV_RX);
+ break;
+ default:
+ *rules = 0;
+ break;
+ }
+
+ return 0;
+}
+
static struct phy_driver mv88q2xxx_driver[] = {
{
.phy_id = MARVELL_PHY_ID_88Q2110,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "mv88q2110",
+ .probe = mv88q2xxx_probe,
.get_features = mv88q2xxx_get_features,
.config_aneg = mv88q2xxx_config_aneg,
.config_init = mv88q2110_config_init,
@@ -934,12 +1135,15 @@ static struct phy_driver mv88q2xxx_driver[] = {
.get_sqi_max = mv88q2xxx_get_sqi_max,
.suspend = mv88q2xxx_suspend,
.resume = mv88q2xxx_resume,
+ .led_hw_is_supported = mv88q2xxx_led_hw_is_supported,
+ .led_hw_control_set = mv88q2xxx_led_hw_control_set,
+ .led_hw_control_get = mv88q2xxx_led_hw_control_get,
},
};
module_phy_driver(mv88q2xxx_driver);
-static struct mdio_device_id __maybe_unused mv88q2xxx_tbl[] = {
+static const struct mdio_device_id __maybe_unused mv88q2xxx_tbl[] = {
{ MARVELL_PHY_ID_88Q2110, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88Q2220, MARVELL_PHY_ID_MASK },
{ /*sentinel*/ }
diff --git a/drivers/net/phy/marvell-88x2222.c b/drivers/net/phy/marvell-88x2222.c
index 0b777cdd7078..894bcee61e65 100644
--- a/drivers/net/phy/marvell-88x2222.c
+++ b/drivers/net/phy/marvell-88x2222.c
@@ -475,21 +475,20 @@ static int mv2222_config_init(struct phy_device *phydev)
static int mv2222_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
{
- DECLARE_PHY_INTERFACE_MASK(interfaces);
struct phy_device *phydev = upstream;
+ const struct sfp_module_caps *caps;
phy_interface_t sfp_interface;
struct mv2222_data *priv;
struct device *dev;
int ret;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_supported) = { 0, };
-
priv = phydev->priv;
dev = &phydev->mdio.dev;
- sfp_parse_support(phydev->sfp_bus, id, sfp_supported, interfaces);
- phydev->port = sfp_parse_port(phydev->sfp_bus, id, sfp_supported);
- sfp_interface = sfp_select_interface(phydev->sfp_bus, sfp_supported);
+ caps = sfp_get_module_caps(phydev->sfp_bus);
+
+ phydev->port = caps->port;
+ sfp_interface = sfp_select_interface(phydev->sfp_bus, caps->link_modes);
dev_info(dev, "%s SFP module inserted\n", phy_modes(sfp_interface));
@@ -502,7 +501,7 @@ static int mv2222_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
}
priv->line_interface = sfp_interface;
- linkmode_and(priv->supported, phydev->supported, sfp_supported);
+ linkmode_and(priv->supported, phydev->supported, caps->link_modes);
ret = mv2222_config_line(phydev);
if (ret < 0)
@@ -613,7 +612,7 @@ static struct phy_driver mv2222_drivers[] = {
};
module_phy_driver(mv2222_drivers);
-static struct mdio_device_id __maybe_unused mv2222_tbl[] = {
+static const struct mdio_device_id __maybe_unused mv2222_tbl[] = {
{ MARVELL_PHY_ID_88X2222, MARVELL_PHY_ID_MASK },
{ }
};
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index cd50cd6a7f75..c248c90510ae 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -717,6 +717,48 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev)
return genphy_check_and_restart_aneg(phydev, changed);
}
+static unsigned int m88e1111_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ /* In 1000base-X and SGMII modes, the inband mode can be changed
+ * through the Fibre page BMCR ANENABLE bit.
+ */
+ if (interface == PHY_INTERFACE_MODE_1000BASEX ||
+ interface == PHY_INTERFACE_MODE_SGMII)
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE |
+ LINK_INBAND_BYPASS;
+
+ return 0;
+}
+
+static int m88e1111_config_inband(struct phy_device *phydev, unsigned int modes)
+{
+ u16 extsr, bmcr;
+ int err;
+
+ if (phydev->interface != PHY_INTERFACE_MODE_1000BASEX &&
+ phydev->interface != PHY_INTERFACE_MODE_SGMII)
+ return -EINVAL;
+
+ if (modes == LINK_INBAND_BYPASS)
+ extsr = MII_M1111_HWCFG_SERIAL_AN_BYPASS;
+ else
+ extsr = 0;
+
+ if (modes == LINK_INBAND_DISABLE)
+ bmcr = 0;
+ else
+ bmcr = BMCR_ANENABLE;
+
+ err = phy_modify(phydev, MII_M1111_PHY_EXT_SR,
+ MII_M1111_HWCFG_SERIAL_AN_BYPASS, extsr);
+ if (err < 0)
+ return extsr;
+
+ return phy_modify_paged(phydev, MII_MARVELL_FIBER_PAGE, MII_BMCR,
+ BMCR_ANENABLE, bmcr);
+}
+
static int m88e1111_config_aneg(struct phy_device *phydev)
{
int extsr = phy_read(phydev, MII_M1111_PHY_EXT_SR);
@@ -1508,7 +1550,6 @@ static int m88e1540_get_fld(struct phy_device *phydev, u8 *msecs)
static int m88e1540_set_fld(struct phy_device *phydev, const u8 *msecs)
{
- struct ethtool_keee eee;
int val, ret;
if (*msecs == ETHTOOL_PHY_FAST_LINK_DOWN_OFF)
@@ -1518,8 +1559,7 @@ static int m88e1540_set_fld(struct phy_device *phydev, const u8 *msecs)
/* According to the Marvell data sheet EEE must be disabled for
* Fast Link Down detection to work properly
*/
- ret = genphy_c45_ethtool_get_eee(phydev, &eee);
- if (!ret && eee.eee_enabled) {
+ if (phydev->eee_cfg.eee_enabled) {
phydev_warn(phydev, "Fast Link Down detection requires EEE to be disabled!\n");
return -EBUSY;
}
@@ -1862,6 +1902,43 @@ error:
return err;
}
+/* m88e1510_resume
+ *
+ * The 88e1510 PHY has an erratum where the phy downshift counter is not cleared
+ * after phy being suspended(BMCR_PDOWN set) and then later resumed(BMCR_PDOWN
+ * cleared). This can cause the link to intermittently downshift to a lower speed.
+ *
+ * Disabling and re-enabling the downshift feature clears the counter, allowing
+ * the PHY to retry gigabit link negotiation up to the programmed retry count
+ * before downshifting. This behavior has been observed on copper links.
+ */
+static int m88e1510_resume(struct phy_device *phydev)
+{
+ int err;
+ u8 cnt = 0;
+
+ err = marvell_resume(phydev);
+ if (err < 0)
+ return err;
+
+ /* read downshift counter value */
+ err = m88e1011_get_downshift(phydev, &cnt);
+ if (err < 0)
+ return err;
+
+ if (cnt) {
+ /* downshift disabled */
+ err = m88e1011_set_downshift(phydev, 0);
+ if (err < 0)
+ return err;
+
+ /* downshift enabled, with previous counter value */
+ err = m88e1011_set_downshift(phydev, cnt);
+ }
+
+ return err;
+}
+
static int marvell_aneg_done(struct phy_device *phydev)
{
int retval = phy_read(phydev, MII_M1011_PHY_STATUS);
@@ -2091,52 +2168,52 @@ static void marvell_get_stats_simple(struct phy_device *phydev,
data[i] = marvell_get_stat_simple(phydev, i);
}
-static int m88e1510_loopback(struct phy_device *phydev, bool enable)
+static int m88e1510_loopback(struct phy_device *phydev, bool enable, int speed)
{
+ u16 bmcr_ctl, mscr2_ctl = 0;
int err;
- if (enable) {
- u16 bmcr_ctl, mscr2_ctl = 0;
-
- bmcr_ctl = mii_bmcr_encode_fixed(phydev->speed, phydev->duplex);
+ if (!enable)
+ return genphy_loopback(phydev, enable, 0);
- err = phy_write(phydev, MII_BMCR, bmcr_ctl);
- if (err < 0)
- return err;
+ if (speed == SPEED_10 || speed == SPEED_100 || speed == SPEED_1000)
+ phydev->speed = speed;
+ else if (speed)
+ return -EINVAL;
- if (phydev->speed == SPEED_1000)
- mscr2_ctl = BMCR_SPEED1000;
- else if (phydev->speed == SPEED_100)
- mscr2_ctl = BMCR_SPEED100;
+ bmcr_ctl = mii_bmcr_encode_fixed(phydev->speed, phydev->duplex);
- err = phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE,
- MII_88E1510_MSCR_2, BMCR_SPEED1000 |
- BMCR_SPEED100, mscr2_ctl);
- if (err < 0)
- return err;
+ err = phy_write(phydev, MII_BMCR, bmcr_ctl);
+ if (err < 0)
+ return err;
- /* Need soft reset to have speed configuration takes effect */
- err = genphy_soft_reset(phydev);
- if (err < 0)
- return err;
+ if (phydev->speed == SPEED_1000)
+ mscr2_ctl = BMCR_SPEED1000;
+ else if (phydev->speed == SPEED_100)
+ mscr2_ctl = BMCR_SPEED100;
- err = phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK,
- BMCR_LOOPBACK);
+ err = phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE,
+ MII_88E1510_MSCR_2, BMCR_SPEED1000 |
+ BMCR_SPEED100, mscr2_ctl);
+ if (err < 0)
+ return err;
- if (!err) {
- /* It takes some time for PHY device to switch
- * into/out-of loopback mode.
- */
- msleep(1000);
- }
+ /* Need soft reset to have speed configuration takes effect */
+ err = genphy_soft_reset(phydev);
+ if (err < 0)
return err;
- } else {
- err = phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK, 0);
- if (err < 0)
- return err;
- return phy_config_aneg(phydev);
+ err = phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK,
+ BMCR_LOOPBACK);
+
+ if (!err) {
+ /*
+ * It takes some time for PHY device to switch into loopback
+ * mode.
+ */
+ msleep(1000);
}
+ return err;
}
static int marvell_vct5_wait_complete(struct phy_device *phydev)
@@ -3084,33 +3161,13 @@ static umode_t marvell_hwmon_is_visible(const void *data,
}
}
-static u32 marvell_hwmon_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0
-};
-
-static const struct hwmon_channel_info marvell_hwmon_chip = {
- .type = hwmon_chip,
- .config = marvell_hwmon_chip_config,
-};
-
/* we can define HWMON_T_CRIT and HWMON_T_MAX_ALARM even though these are not
* defined for all PHYs, because the hwmon code checks whether the attributes
* exists via the .is_visible method
*/
-static u32 marvell_hwmon_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_MAX_ALARM,
- 0
-};
-
-static const struct hwmon_channel_info marvell_hwmon_temp = {
- .type = hwmon_temp,
- .config = marvell_hwmon_temp_config,
-};
-
static const struct hwmon_channel_info * const marvell_hwmon_info[] = {
- &marvell_hwmon_chip,
- &marvell_hwmon_temp,
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_MAX_ALARM),
NULL
};
@@ -3543,20 +3600,18 @@ static int marvell_probe(struct phy_device *phydev)
static int m88e1510_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
{
- DECLARE_PHY_INTERFACE_MASK(interfaces);
struct phy_device *phydev = upstream;
+ const struct sfp_module_caps *caps;
phy_interface_t interface;
struct device *dev;
int oldpage;
int ret = 0;
u16 mode;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
-
dev = &phydev->mdio.dev;
- sfp_parse_support(phydev->sfp_bus, id, supported, interfaces);
- interface = sfp_select_interface(phydev->sfp_bus, supported);
+ caps = sfp_get_module_caps(phydev->sfp_bus);
+ interface = sfp_select_interface(phydev->sfp_bus, caps->link_modes);
dev_info(dev, "%s SFP module inserted\n", phy_modes(interface));
@@ -3677,6 +3732,8 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1112",
/* PHY_GBIT_FEATURES */
.probe = marvell_probe,
+ .inband_caps = m88e1111_inband_caps,
+ .config_inband = m88e1111_config_inband,
.config_init = m88e1112_config_init,
.config_aneg = marvell_config_aneg,
.config_intr = marvell_config_intr,
@@ -3698,6 +3755,8 @@ static struct phy_driver marvell_drivers[] = {
/* PHY_GBIT_FEATURES */
.flags = PHY_POLL_CABLE_TEST,
.probe = marvell_probe,
+ .inband_caps = m88e1111_inband_caps,
+ .config_inband = m88e1111_config_inband,
.config_init = m88e1111gbe_config_init,
.config_aneg = m88e1111_config_aneg,
.read_status = marvell_read_status,
@@ -3721,6 +3780,8 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1111 (Finisar)",
/* PHY_GBIT_FEATURES */
.probe = marvell_probe,
+ .inband_caps = m88e1111_inband_caps,
+ .config_inband = m88e1111_config_inband,
.config_init = m88e1111gbe_config_init,
.config_aneg = m88e1111_config_aneg,
.read_status = marvell_read_status,
@@ -3897,7 +3958,7 @@ static struct phy_driver marvell_drivers[] = {
.handle_interrupt = marvell_handle_interrupt,
.get_wol = m88e1318_get_wol,
.set_wol = m88e1318_set_wol,
- .resume = marvell_resume,
+ .resume = m88e1510_resume,
.suspend = marvell_suspend,
.read_page = marvell_read_page,
.write_page = marvell_write_page,
@@ -4143,7 +4204,7 @@ static struct phy_driver marvell_drivers[] = {
module_phy_driver(marvell_drivers);
-static struct mdio_device_id __maybe_unused marvell_tbl[] = {
+static const struct mdio_device_id __maybe_unused marvell_tbl[] = {
{ MARVELL_PHY_ID_88E1101, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E3082, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1112, MARVELL_PHY_ID_MASK },
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 6642eb642d4b..8fd42131cdbf 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -230,29 +230,9 @@ static const struct hwmon_ops mv3310_hwmon_ops = {
.read = mv3310_hwmon_read,
};
-static u32 mv3310_hwmon_chip_config[] = {
- HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL,
- 0,
-};
-
-static const struct hwmon_channel_info mv3310_hwmon_chip = {
- .type = hwmon_chip,
- .config = mv3310_hwmon_chip_config,
-};
-
-static u32 mv3310_hwmon_temp_config[] = {
- HWMON_T_INPUT,
- 0,
-};
-
-static const struct hwmon_channel_info mv3310_hwmon_temp = {
- .type = hwmon_temp,
- .config = mv3310_hwmon_temp_config,
-};
-
static const struct hwmon_channel_info * const mv3310_hwmon_info[] = {
- &mv3310_hwmon_chip,
- &mv3310_hwmon_temp,
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
NULL,
};
@@ -486,12 +466,11 @@ static int mv3310_set_edpd(struct phy_device *phydev, u16 edpd)
static int mv3310_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
{
struct phy_device *phydev = upstream;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
- DECLARE_PHY_INTERFACE_MASK(interfaces);
+ const struct sfp_module_caps *caps;
phy_interface_t iface;
- sfp_parse_support(phydev->sfp_bus, id, support, interfaces);
- iface = sfp_select_interface(phydev->sfp_bus, support);
+ caps = sfp_get_module_caps(phydev->sfp_bus);
+ iface = sfp_select_interface(phydev->sfp_bus, caps->link_modes);
if (iface != PHY_INTERFACE_MODE_10GBASER) {
dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
@@ -1284,7 +1263,8 @@ static int mv3310_get_number_of_ports(struct phy_device *phydev)
return ret + 1;
}
-static int mv3310_match_phy_device(struct phy_device *phydev)
+static int mv3310_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
if ((phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] &
MARVELL_PHY_ID_MASK) != MARVELL_PHY_ID_88X3310)
@@ -1293,7 +1273,8 @@ static int mv3310_match_phy_device(struct phy_device *phydev)
return mv3310_get_number_of_ports(phydev) == 1;
}
-static int mv3340_match_phy_device(struct phy_device *phydev)
+static int mv3340_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
if ((phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] &
MARVELL_PHY_ID_MASK) != MARVELL_PHY_ID_88X3310)
@@ -1317,12 +1298,14 @@ static int mv211x_match_phy_device(struct phy_device *phydev, bool has_5g)
return !!(val & MDIO_PCS_SPEED_5G) == has_5g;
}
-static int mv2110_match_phy_device(struct phy_device *phydev)
+static int mv2110_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return mv211x_match_phy_device(phydev, true);
}
-static int mv2111_match_phy_device(struct phy_device *phydev)
+static int mv2111_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return mv211x_match_phy_device(phydev, false);
}
@@ -1484,7 +1467,7 @@ static struct phy_driver mv3310_drivers[] = {
module_phy_driver(mv3310_drivers);
-static struct mdio_device_id __maybe_unused mv3310_tbl[] = {
+static const struct mdio_device_id __maybe_unused mv3310_tbl[] = {
{ MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E2110, MARVELL_PHY_ID_MASK },
{ },
diff --git a/drivers/net/phy/mdio-boardinfo.c b/drivers/net/phy/mdio-boardinfo.c
deleted file mode 100644
index 2de679a68115..000000000000
--- a/drivers/net/phy/mdio-boardinfo.c
+++ /dev/null
@@ -1,80 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * mdio-boardinfo - Collect pre-declarations for MDIO devices
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <linux/mutex.h>
-#include <linux/list.h>
-
-#include "mdio-boardinfo.h"
-
-static LIST_HEAD(mdio_board_list);
-static DEFINE_MUTEX(mdio_board_lock);
-
-/**
- * mdiobus_setup_mdiodev_from_board_info - create and setup MDIO devices
- * from pre-collected board specific MDIO information
- * @bus: Bus the board_info belongs to
- * @cb: Callback to create device on bus
- * Context: can sleep
- */
-void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus,
- int (*cb)
- (struct mii_bus *bus,
- struct mdio_board_info *bi))
-{
- struct mdio_board_entry *be;
- struct mdio_board_entry *tmp;
- struct mdio_board_info *bi;
- int ret;
-
- mutex_lock(&mdio_board_lock);
- list_for_each_entry_safe(be, tmp, &mdio_board_list, list) {
- bi = &be->board_info;
-
- if (strcmp(bus->id, bi->bus_id))
- continue;
-
- mutex_unlock(&mdio_board_lock);
- ret = cb(bus, bi);
- mutex_lock(&mdio_board_lock);
- if (ret)
- continue;
-
- }
- mutex_unlock(&mdio_board_lock);
-}
-EXPORT_SYMBOL(mdiobus_setup_mdiodev_from_board_info);
-
-/**
- * mdiobus_register_board_info - register MDIO devices for a given board
- * @info: array of devices descriptors
- * @n: number of descriptors provided
- * Context: can sleep
- *
- * The board info passed can be marked with __initdata but be pointers
- * such as platform_data etc. are copied as-is
- */
-int mdiobus_register_board_info(const struct mdio_board_info *info,
- unsigned int n)
-{
- struct mdio_board_entry *be;
- unsigned int i;
-
- be = kcalloc(n, sizeof(*be), GFP_KERNEL);
- if (!be)
- return -ENOMEM;
-
- for (i = 0; i < n; i++, be++, info++) {
- memcpy(&be->board_info, info, sizeof(*info));
- mutex_lock(&mdio_board_lock);
- list_add_tail(&be->list, &mdio_board_list);
- mutex_unlock(&mdio_board_lock);
- }
-
- return 0;
-}
-EXPORT_SYMBOL(mdiobus_register_board_info);
diff --git a/drivers/net/phy/mdio-boardinfo.h b/drivers/net/phy/mdio-boardinfo.h
deleted file mode 100644
index 773bb51399be..000000000000
--- a/drivers/net/phy/mdio-boardinfo.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * mdio-boardinfo.h - board info interface internal to the mdio_bus
- * component
- */
-
-#ifndef __MDIO_BOARD_INFO_H
-#define __MDIO_BOARD_INFO_H
-
-#include <linux/phy.h>
-#include <linux/mutex.h>
-
-struct mdio_board_entry {
- struct list_head list;
- struct mdio_board_info board_info;
-};
-
-void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus,
- int (*cb)
- (struct mii_bus *bus,
- struct mdio_board_info *bi));
-
-#endif /* __MDIO_BOARD_INFO_H */
diff --git a/drivers/net/phy/mdio-open-alliance.h b/drivers/net/phy/mdio-open-alliance.h
index 931e14660d75..449d0fb67093 100644
--- a/drivers/net/phy/mdio-open-alliance.h
+++ b/drivers/net/phy/mdio-open-alliance.h
@@ -43,4 +43,53 @@
/* Version Identifiers */
#define OATC14_IDM 0x0a00
+/*
+ * Open Alliance TC14 (10BASE-T1S) - Advanced Diagnostic Features Registers
+ *
+ * Refer to the OPEN Alliance documentation:
+ * https://opensig.org/automotive-ethernet-specifications/
+ *
+ * Specification:
+ * "10BASE-T1S Advanced Diagnostic PHY Features"
+ * https://opensig.org/wp-content/uploads/2025/06/OPEN_Alliance_10BASE-T1S_Advanced_PHY_features_for-automotive_Ethernet_V2.1b.pdf
+ */
+/* Advanced Diagnostic Features Capability Register*/
+#define MDIO_OATC14_ADFCAP 0xcc00
+#define OATC14_ADFCAP_HDD_CAPABILITY GENMASK(10, 8)
+#define OATC14_ADFCAP_SQIPLUS_CAPABILITY GENMASK(4, 1)
+#define OATC14_ADFCAP_SQI_CAPABILITY BIT(0)
+
+/* Harness Defect Detection Register */
+#define MDIO_OATC14_HDD 0xcc01
+#define OATC14_HDD_CONTROL BIT(15)
+#define OATC14_HDD_READY BIT(14)
+#define OATC14_HDD_START_CONTROL BIT(13)
+#define OATC14_HDD_VALID BIT(2)
+#define OATC14_HDD_SHORT_OPEN_STATUS GENMASK(1, 0)
+
+/* Dynamic Channel Quality SQI Register */
+#define MDIO_OATC14_DCQ_SQI 0xcc03
+#define OATC14_DCQ_SQI_VALUE GENMASK(2, 0)
+
+/* Dynamic Channel Quality SQI Plus Register */
+#define MDIO_OATC14_DCQ_SQIPLUS 0xcc04
+#define OATC14_DCQ_SQIPLUS_VALUE GENMASK(7, 0)
+
+/* SQI is supported using 3 bits means 8 levels (0-7) */
+#define OATC14_SQI_MAX_LEVEL 7
+
+/* Bus Short/Open Status:
+ * 0 0 - no fault; everything is ok. (Default)
+ * 0 1 - detected as an open or missing termination(s)
+ * 1 0 - detected as a short or extra termination(s)
+ * 1 1 - fault but fault type not detectable. More details can be available by
+ * vender specific register if supported.
+ */
+enum oatc14_hdd_status {
+ OATC14_HDD_STATUS_CABLE_OK = 0,
+ OATC14_HDD_STATUS_OPEN,
+ OATC14_HDD_STATUS_SHORT,
+ OATC14_HDD_STATUS_NOT_DETECTABLE,
+};
+
#endif /* __MDIO_OPEN_ALLIANCE__ */
diff --git a/drivers/net/phy/mdio-private.h b/drivers/net/phy/mdio-private.h
new file mode 100644
index 000000000000..8bc6d9088af1
--- /dev/null
+++ b/drivers/net/phy/mdio-private.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __MDIO_PRIVATE_H
+#define __MDIO_PRIVATE_H
+
+/* MDIO internal helpers
+ */
+
+int mdio_device_register_reset(struct mdio_device *mdiodev);
+void mdio_device_unregister_reset(struct mdio_device *mdiodev);
+
+#endif /* __MDIO_PRIVATE_H */
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 7e2f10182c0c..afdf1ad6c0e6 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -8,17 +8,14 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
-#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/micrel_phy.h>
#include <linux/mii.h>
#include <linux/mm.h>
#include <linux/module.h>
@@ -27,45 +24,16 @@
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/reset.h>
-#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/unistd.h>
+#include "mdio-private.h"
#define CREATE_TRACE_POINTS
#include <trace/events/mdio.h>
-#include "mdio-boardinfo.h"
-
-static int mdiobus_register_gpiod(struct mdio_device *mdiodev)
-{
- /* Deassert the optional reset signal */
- mdiodev->reset_gpio = gpiod_get_optional(&mdiodev->dev,
- "reset", GPIOD_OUT_LOW);
- if (IS_ERR(mdiodev->reset_gpio))
- return PTR_ERR(mdiodev->reset_gpio);
-
- if (mdiodev->reset_gpio)
- gpiod_set_consumer_name(mdiodev->reset_gpio, "PHY reset");
-
- return 0;
-}
-
-static int mdiobus_register_reset(struct mdio_device *mdiodev)
-{
- struct reset_control *reset;
-
- reset = reset_control_get_optional_exclusive(&mdiodev->dev, "phy");
- if (IS_ERR(reset))
- return PTR_ERR(reset);
-
- mdiodev->reset_ctrl = reset;
-
- return 0;
-}
-
int mdiobus_register_device(struct mdio_device *mdiodev)
{
int err;
@@ -74,11 +42,7 @@ int mdiobus_register_device(struct mdio_device *mdiodev)
return -EBUSY;
if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY) {
- err = mdiobus_register_gpiod(mdiodev);
- if (err)
- return err;
-
- err = mdiobus_register_reset(mdiodev);
+ err = mdio_device_register_reset(mdiodev);
if (err)
return err;
@@ -97,7 +61,7 @@ int mdiobus_unregister_device(struct mdio_device *mdiodev)
if (mdiodev->bus->mdio_map[mdiodev->addr] != mdiodev)
return -EINVAL;
- reset_control_put(mdiodev->reset_ctrl);
+ mdio_device_unregister_reset(mdiodev);
mdiodev->bus->mdio_map[mdiodev->addr] = NULL;
@@ -137,45 +101,6 @@ bool mdiobus_is_registered_device(struct mii_bus *bus, int addr)
EXPORT_SYMBOL(mdiobus_is_registered_device);
/**
- * mdiobus_alloc_size - allocate a mii_bus structure
- * @size: extra amount of memory to allocate for private storage.
- * If non-zero, then bus->priv is points to that memory.
- *
- * Description: called by a bus driver to allocate an mii_bus
- * structure to fill in.
- */
-struct mii_bus *mdiobus_alloc_size(size_t size)
-{
- struct mii_bus *bus;
- size_t aligned_size = ALIGN(sizeof(*bus), NETDEV_ALIGN);
- size_t alloc_size;
- int i;
-
- /* If we alloc extra space, it should be aligned */
- if (size)
- alloc_size = aligned_size + size;
- else
- alloc_size = sizeof(*bus);
-
- bus = kzalloc(alloc_size, GFP_KERNEL);
- if (!bus)
- return NULL;
-
- bus->state = MDIOBUS_ALLOCATED;
- if (size)
- bus->priv = (void *)bus + aligned_size;
-
- /* Initialise the interrupts to polling and 64-bit seqcounts */
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- bus->irq[i] = PHY_POLL;
- u64_stats_init(&bus->stats[i].syncp);
- }
-
- return bus;
-}
-EXPORT_SYMBOL(mdiobus_alloc_size);
-
-/**
* mdiobus_release - mii_bus device release callback
* @d: the target struct device that contains the mii_bus
*
@@ -403,17 +328,18 @@ static const struct attribute_group *mdio_bus_groups[] = {
NULL,
};
-static struct class mdio_bus_class = {
+const struct class mdio_bus_class = {
.name = "mdio_bus",
.dev_release = mdiobus_release,
.dev_groups = mdio_bus_groups,
};
+EXPORT_SYMBOL_GPL(mdio_bus_class);
/**
* mdio_find_bus - Given the name of a mdiobus, find the mii_bus.
* @mdio_name: The name of a mdiobus.
*
- * Returns a reference to the mii_bus, or NULL if none found. The
+ * Return: a reference to the mii_bus, or NULL if none found. The
* embedded struct device will have its reference count incremented,
* and this must be put_deviced'ed once the bus is finished with.
*/
@@ -431,7 +357,7 @@ EXPORT_SYMBOL(mdio_find_bus);
* of_mdio_find_bus - Given an mii_bus node, find the mii_bus.
* @mdio_bus_np: Pointer to the mii_bus.
*
- * Returns a reference to the mii_bus, or NULL if none found. The
+ * Return: a reference to the mii_bus, or NULL if none found. The
* embedded struct device will have its reference count incremented,
* and this must be put once the bus is finished with.
*
@@ -451,408 +377,8 @@ struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np)
return d ? to_mii_bus(d) : NULL;
}
EXPORT_SYMBOL(of_mdio_find_bus);
-
-/* Walk the list of subnodes of a mdio bus and look for a node that
- * matches the mdio device's address with its 'reg' property. If
- * found, set the of_node pointer for the mdio device. This allows
- * auto-probed phy devices to be supplied with information passed in
- * via DT.
- * If a PHY package is found, PHY is searched also there.
- */
-static int of_mdiobus_find_phy(struct device *dev, struct mdio_device *mdiodev,
- struct device_node *np)
-{
- struct device_node *child;
-
- for_each_available_child_of_node(np, child) {
- int addr;
-
- if (of_node_name_eq(child, "ethernet-phy-package")) {
- /* Validate PHY package reg presence */
- if (!of_property_present(child, "reg")) {
- of_node_put(child);
- return -EINVAL;
- }
-
- if (!of_mdiobus_find_phy(dev, mdiodev, child)) {
- /* The refcount for the PHY package will be
- * incremented later when PHY join the Package.
- */
- of_node_put(child);
- return 0;
- }
-
- continue;
- }
-
- addr = of_mdio_parse_addr(dev, child);
- if (addr < 0)
- continue;
-
- if (addr == mdiodev->addr) {
- device_set_node(dev, of_fwnode_handle(child));
- /* The refcount on "child" is passed to the mdio
- * device. Do _not_ use of_node_put(child) here.
- */
- return 0;
- }
- }
-
- return -ENODEV;
-}
-
-static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
- struct mdio_device *mdiodev)
-{
- struct device *dev = &mdiodev->dev;
-
- if (dev->of_node || !bus->dev.of_node)
- return;
-
- of_mdiobus_find_phy(dev, mdiodev, bus->dev.of_node);
-}
-#else /* !IS_ENABLED(CONFIG_OF_MDIO) */
-static inline void of_mdiobus_link_mdiodev(struct mii_bus *mdio,
- struct mdio_device *mdiodev)
-{
-}
#endif
-/**
- * mdiobus_create_device - create a full MDIO device given
- * a mdio_board_info structure
- * @bus: MDIO bus to create the devices on
- * @bi: mdio_board_info structure describing the devices
- *
- * Returns 0 on success or < 0 on error.
- */
-static int mdiobus_create_device(struct mii_bus *bus,
- struct mdio_board_info *bi)
-{
- struct mdio_device *mdiodev;
- int ret = 0;
-
- mdiodev = mdio_device_create(bus, bi->mdio_addr);
- if (IS_ERR(mdiodev))
- return -ENODEV;
-
- strscpy(mdiodev->modalias, bi->modalias,
- sizeof(mdiodev->modalias));
- mdiodev->bus_match = mdio_device_bus_match;
- mdiodev->dev.platform_data = (void *)bi->platform_data;
-
- ret = mdio_device_register(mdiodev);
- if (ret)
- mdio_device_free(mdiodev);
-
- return ret;
-}
-
-static struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr, bool c45)
-{
- struct phy_device *phydev = ERR_PTR(-ENODEV);
- int err;
-
- phydev = get_phy_device(bus, addr, c45);
- if (IS_ERR(phydev))
- return phydev;
-
- /* For DT, see if the auto-probed phy has a corresponding child
- * in the bus node, and set the of_node pointer in this case.
- */
- of_mdiobus_link_mdiodev(bus, &phydev->mdio);
-
- err = phy_device_register(phydev);
- if (err) {
- phy_device_free(phydev);
- return ERR_PTR(-ENODEV);
- }
-
- return phydev;
-}
-
-/**
- * mdiobus_scan_c22 - scan one address on a bus for C22 MDIO devices.
- * @bus: mii_bus to scan
- * @addr: address on bus to scan
- *
- * This function scans one address on the MDIO bus, looking for
- * devices which can be identified using a vendor/product ID in
- * registers 2 and 3. Not all MDIO devices have such registers, but
- * PHY devices typically do. Hence this function assumes anything
- * found is a PHY, or can be treated as a PHY. Other MDIO devices,
- * such as switches, will probably not be found during the scan.
- */
-struct phy_device *mdiobus_scan_c22(struct mii_bus *bus, int addr)
-{
- return mdiobus_scan(bus, addr, false);
-}
-EXPORT_SYMBOL(mdiobus_scan_c22);
-
-/**
- * mdiobus_scan_c45 - scan one address on a bus for C45 MDIO devices.
- * @bus: mii_bus to scan
- * @addr: address on bus to scan
- *
- * This function scans one address on the MDIO bus, looking for
- * devices which can be identified using a vendor/product ID in
- * registers 2 and 3. Not all MDIO devices have such registers, but
- * PHY devices typically do. Hence this function assumes anything
- * found is a PHY, or can be treated as a PHY. Other MDIO devices,
- * such as switches, will probably not be found during the scan.
- */
-static struct phy_device *mdiobus_scan_c45(struct mii_bus *bus, int addr)
-{
- return mdiobus_scan(bus, addr, true);
-}
-
-static int mdiobus_scan_bus_c22(struct mii_bus *bus)
-{
- int i;
-
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- if ((bus->phy_mask & BIT(i)) == 0) {
- struct phy_device *phydev;
-
- phydev = mdiobus_scan_c22(bus, i);
- if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV))
- return PTR_ERR(phydev);
- }
- }
- return 0;
-}
-
-static int mdiobus_scan_bus_c45(struct mii_bus *bus)
-{
- int i;
-
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- if ((bus->phy_mask & BIT(i)) == 0) {
- struct phy_device *phydev;
-
- /* Don't scan C45 if we already have a C22 device */
- if (bus->mdio_map[i])
- continue;
-
- phydev = mdiobus_scan_c45(bus, i);
- if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV))
- return PTR_ERR(phydev);
- }
- }
- return 0;
-}
-
-/* There are some C22 PHYs which do bad things when where is a C45
- * transaction on the bus, like accepting a read themselves, and
- * stomping over the true devices reply, to performing a write to
- * themselves which was intended for another device. Now that C22
- * devices have been found, see if any of them are bad for C45, and if we
- * should skip the C45 scan.
- */
-static bool mdiobus_prevent_c45_scan(struct mii_bus *bus)
-{
- int i;
-
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- struct phy_device *phydev;
- u32 oui;
-
- phydev = mdiobus_get_phy(bus, i);
- if (!phydev)
- continue;
- oui = phydev->phy_id >> 10;
-
- if (oui == MICREL_OUI)
- return true;
- }
- return false;
-}
-
-/**
- * __mdiobus_register - bring up all the PHYs on a given bus and attach them to bus
- * @bus: target mii_bus
- * @owner: module containing bus accessor functions
- *
- * Description: Called by a bus driver to bring up all the PHYs
- * on a given bus, and attach them to the bus. Drivers should use
- * mdiobus_register() rather than __mdiobus_register() unless they
- * need to pass a specific owner module. MDIO devices which are not
- * PHYs will not be brought up by this function. They are expected
- * to be explicitly listed in DT and instantiated by of_mdiobus_register().
- *
- * Returns 0 on success or < 0 on error.
- */
-int __mdiobus_register(struct mii_bus *bus, struct module *owner)
-{
- struct mdio_device *mdiodev;
- struct gpio_desc *gpiod;
- bool prevent_c45_scan;
- int i, err;
-
- if (!bus || !bus->name)
- return -EINVAL;
-
- /* An access method always needs both read and write operations */
- if (!!bus->read != !!bus->write || !!bus->read_c45 != !!bus->write_c45)
- return -EINVAL;
-
- /* At least one method is mandatory */
- if (!bus->read && !bus->read_c45)
- return -EINVAL;
-
- if (bus->parent && bus->parent->of_node)
- bus->parent->of_node->fwnode.flags |=
- FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD;
-
- WARN(bus->state != MDIOBUS_ALLOCATED &&
- bus->state != MDIOBUS_UNREGISTERED,
- "%s: not in ALLOCATED or UNREGISTERED state\n", bus->id);
-
- bus->owner = owner;
- bus->dev.parent = bus->parent;
- bus->dev.class = &mdio_bus_class;
- bus->dev.groups = NULL;
- dev_set_name(&bus->dev, "%s", bus->id);
-
- /* If the bus state is allocated, we're registering a fresh bus
- * that may have a fwnode associated with it. Grab a reference
- * to the fwnode. This will be dropped when the bus is released.
- * If the bus was set to unregistered, it means that the bus was
- * previously registered, and we've already grabbed a reference.
- */
- if (bus->state == MDIOBUS_ALLOCATED)
- fwnode_handle_get(dev_fwnode(&bus->dev));
-
- /* We need to set state to MDIOBUS_UNREGISTERED to correctly release
- * the device in mdiobus_free()
- *
- * State will be updated later in this function in case of success
- */
- bus->state = MDIOBUS_UNREGISTERED;
-
- err = device_register(&bus->dev);
- if (err) {
- pr_err("mii_bus %s failed to register\n", bus->id);
- return -EINVAL;
- }
-
- mutex_init(&bus->mdio_lock);
- mutex_init(&bus->shared_lock);
-
- /* assert bus level PHY GPIO reset */
- gpiod = devm_gpiod_get_optional(&bus->dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(gpiod)) {
- err = dev_err_probe(&bus->dev, PTR_ERR(gpiod),
- "mii_bus %s couldn't get reset GPIO\n",
- bus->id);
- device_del(&bus->dev);
- return err;
- } else if (gpiod) {
- bus->reset_gpiod = gpiod;
- fsleep(bus->reset_delay_us);
- gpiod_set_value_cansleep(gpiod, 0);
- if (bus->reset_post_delay_us > 0)
- fsleep(bus->reset_post_delay_us);
- }
-
- if (bus->reset) {
- err = bus->reset(bus);
- if (err)
- goto error_reset_gpiod;
- }
-
- if (bus->read) {
- err = mdiobus_scan_bus_c22(bus);
- if (err)
- goto error;
- }
-
- prevent_c45_scan = mdiobus_prevent_c45_scan(bus);
-
- if (!prevent_c45_scan && bus->read_c45) {
- err = mdiobus_scan_bus_c45(bus);
- if (err)
- goto error;
- }
-
- mdiobus_setup_mdiodev_from_board_info(bus, mdiobus_create_device);
-
- bus->state = MDIOBUS_REGISTERED;
- dev_dbg(&bus->dev, "probed\n");
- return 0;
-
-error:
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- mdiodev = bus->mdio_map[i];
- if (!mdiodev)
- continue;
-
- mdiodev->device_remove(mdiodev);
- mdiodev->device_free(mdiodev);
- }
-error_reset_gpiod:
- /* Put PHYs in RESET to save power */
- if (bus->reset_gpiod)
- gpiod_set_value_cansleep(bus->reset_gpiod, 1);
-
- device_del(&bus->dev);
- return err;
-}
-EXPORT_SYMBOL(__mdiobus_register);
-
-void mdiobus_unregister(struct mii_bus *bus)
-{
- struct mdio_device *mdiodev;
- int i;
-
- if (WARN_ON_ONCE(bus->state != MDIOBUS_REGISTERED))
- return;
- bus->state = MDIOBUS_UNREGISTERED;
-
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- mdiodev = bus->mdio_map[i];
- if (!mdiodev)
- continue;
-
- if (mdiodev->reset_gpio)
- gpiod_put(mdiodev->reset_gpio);
-
- mdiodev->device_remove(mdiodev);
- mdiodev->device_free(mdiodev);
- }
-
- /* Put PHYs in RESET to save power */
- if (bus->reset_gpiod)
- gpiod_set_value_cansleep(bus->reset_gpiod, 1);
-
- device_del(&bus->dev);
-}
-EXPORT_SYMBOL(mdiobus_unregister);
-
-/**
- * mdiobus_free - free a struct mii_bus
- * @bus: mii_bus to free
- *
- * This function releases the reference to the underlying device
- * object in the mii_bus. If this is the last reference, the mii_bus
- * will be freed.
- */
-void mdiobus_free(struct mii_bus *bus)
-{
- /* For compatibility with error handling in drivers. */
- if (bus->state == MDIOBUS_ALLOCATED) {
- kfree(bus);
- return;
- }
-
- WARN(bus->state != MDIOBUS_UNREGISTERED,
- "%s: not in UNREGISTERED state\n", bus->id);
- bus->state = MDIOBUS_RELEASED;
-
- put_device(&bus->dev);
-}
-EXPORT_SYMBOL(mdiobus_free);
-
static void mdiobus_stats_acct(struct mdio_bus_stats *stats, bool op, int ret)
{
preempt_disable();
@@ -879,6 +405,8 @@ out:
* @addr: the phy address
* @regnum: register number to read
*
+ * Return: The register value if successful, negative error code on failure
+ *
* Read a MDIO bus register. Caller must hold the mdio bus lock.
*
* NOTE: MUST NOT be called from interrupt context.
@@ -889,6 +417,9 @@ int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
lockdep_assert_held_once(&bus->mdio_lock);
+ if (addr >= PHY_MAX_ADDR)
+ return -ENXIO;
+
if (bus->read)
retval = bus->read(bus, addr, regnum);
else
@@ -908,6 +439,8 @@ EXPORT_SYMBOL(__mdiobus_read);
* @regnum: register number to write
* @val: value to write to @regnum
*
+ * Return: Zero if successful, negative error code on failure
+ *
* Write a MDIO bus register. Caller must hold the mdio bus lock.
*
* NOTE: MUST NOT be called from interrupt context.
@@ -918,6 +451,9 @@ int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
lockdep_assert_held_once(&bus->mdio_lock);
+ if (addr >= PHY_MAX_ADDR)
+ return -ENXIO;
+
if (bus->write)
err = bus->write(bus, addr, regnum, val);
else
@@ -938,8 +474,11 @@ EXPORT_SYMBOL(__mdiobus_write);
* @mask: bit mask of bits to clear
* @set: bit mask of bits to set
*
+ * Return: 1 if the register was modified, 0 if no change was needed,
+ * negative on any error condition
+ *
* Read, modify, and if any change, write the register value back to the
- * device. Any error returns a negative number.
+ * device.
*
* NOTE: MUST NOT be called from interrupt context.
*/
@@ -969,6 +508,8 @@ EXPORT_SYMBOL_GPL(__mdiobus_modify_changed);
* @devad: device address to read
* @regnum: register number to read
*
+ * Return: The register value if successful, negative error code on failure
+ *
* Read a MDIO bus register. Caller must hold the mdio bus lock.
*
* NOTE: MUST NOT be called from interrupt context.
@@ -979,6 +520,9 @@ int __mdiobus_c45_read(struct mii_bus *bus, int addr, int devad, u32 regnum)
lockdep_assert_held_once(&bus->mdio_lock);
+ if (addr >= PHY_MAX_ADDR)
+ return -ENXIO;
+
if (bus->read_c45)
retval = bus->read_c45(bus, addr, devad, regnum);
else
@@ -999,6 +543,8 @@ EXPORT_SYMBOL(__mdiobus_c45_read);
* @regnum: register number to write
* @val: value to write to @regnum
*
+ * Return: Zero if successful, negative error code on failure
+ *
* Write a MDIO bus register. Caller must hold the mdio bus lock.
*
* NOTE: MUST NOT be called from interrupt context.
@@ -1010,6 +556,9 @@ int __mdiobus_c45_write(struct mii_bus *bus, int addr, int devad, u32 regnum,
lockdep_assert_held_once(&bus->mdio_lock);
+ if (addr >= PHY_MAX_ADDR)
+ return -ENXIO;
+
if (bus->write_c45)
err = bus->write_c45(bus, addr, devad, regnum, val);
else
@@ -1031,6 +580,9 @@ EXPORT_SYMBOL(__mdiobus_c45_write);
* @mask: bit mask of bits to clear
* @set: bit mask of bits to set
*
+ * Return: 1 if the register was modified, 0 if no change was needed,
+ * negative on any error condition
+ *
* Read, modify, and if any change, write the register value back to the
* device. Any error returns a negative number.
*
@@ -1061,6 +613,8 @@ static int __mdiobus_c45_modify_changed(struct mii_bus *bus, int addr,
* @addr: the phy address
* @regnum: register number to read
*
+ * Return: The register value if successful, negative error code on failure
+ *
* In case of nested MDIO bus access avoid lockdep false positives by
* using mutex_lock_nested().
*
@@ -1086,6 +640,8 @@ EXPORT_SYMBOL(mdiobus_read_nested);
* @addr: the phy address
* @regnum: register number to read
*
+ * Return: The register value if successful, negative error code on failure
+ *
* NOTE: MUST NOT be called from interrupt context,
* because the bus read/write functions may wait for an interrupt
* to conclude the operation.
@@ -1109,6 +665,8 @@ EXPORT_SYMBOL(mdiobus_read);
* @devad: device address to read
* @regnum: register number to read
*
+ * Return: The register value if successful, negative error code on failure
+ *
* NOTE: MUST NOT be called from interrupt context,
* because the bus read/write functions may wait for an interrupt
* to conclude the operation.
@@ -1132,6 +690,8 @@ EXPORT_SYMBOL(mdiobus_c45_read);
* @devad: device address to read
* @regnum: register number to read
*
+ * Return: The register value if successful, negative error code on failure
+ *
* In case of nested MDIO bus access avoid lockdep false positives by
* using mutex_lock_nested().
*
@@ -1159,6 +719,8 @@ EXPORT_SYMBOL(mdiobus_c45_read_nested);
* @regnum: register number to write
* @val: value to write to @regnum
*
+ * Return: Zero if successful, negative error code on failure
+ *
* In case of nested MDIO bus access avoid lockdep false positives by
* using mutex_lock_nested().
*
@@ -1185,6 +747,8 @@ EXPORT_SYMBOL(mdiobus_write_nested);
* @regnum: register number to write
* @val: value to write to @regnum
*
+ * Return: Zero if successful, negative error code on failure
+ *
* NOTE: MUST NOT be called from interrupt context,
* because the bus read/write functions may wait for an interrupt
* to conclude the operation.
@@ -1209,6 +773,8 @@ EXPORT_SYMBOL(mdiobus_write);
* @regnum: register number to write
* @val: value to write to @regnum
*
+ * Return: Zero if successful, negative error code on failure
+ *
* NOTE: MUST NOT be called from interrupt context,
* because the bus read/write functions may wait for an interrupt
* to conclude the operation.
@@ -1234,6 +800,8 @@ EXPORT_SYMBOL(mdiobus_c45_write);
* @regnum: register number to write
* @val: value to write to @regnum
*
+ * Return: Zero if successful, negative error code on failure
+ *
* In case of nested MDIO bus access avoid lockdep false positives by
* using mutex_lock_nested().
*
@@ -1262,6 +830,8 @@ EXPORT_SYMBOL(mdiobus_c45_write_nested);
* @regnum: register number to write
* @mask: bit mask of bits to clear
* @set: bit mask of bits to set
+ *
+ * Return: 0 on success, negative on any error condition
*/
int __mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask,
u16 set)
@@ -1282,6 +852,8 @@ EXPORT_SYMBOL_GPL(__mdiobus_modify);
* @regnum: register number to write
* @mask: bit mask of bits to clear
* @set: bit mask of bits to set
+ *
+ * Return: 0 on success, negative on any error condition
*/
int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask, u16 set)
{
@@ -1304,6 +876,8 @@ EXPORT_SYMBOL_GPL(mdiobus_modify);
* @regnum: register number to write
* @mask: bit mask of bits to clear
* @set: bit mask of bits to set
+ *
+ * Return: 0 on success, negative on any error condition
*/
int mdiobus_c45_modify(struct mii_bus *bus, int addr, int devad, u32 regnum,
u16 mask, u16 set)
@@ -1327,6 +901,9 @@ EXPORT_SYMBOL_GPL(mdiobus_c45_modify);
* @regnum: register number to write
* @mask: bit mask of bits to clear
* @set: bit mask of bits to set
+ *
+ * Return: 1 if the register was modified, 0 if no change was needed,
+ * negative on any error condition
*/
int mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
u16 mask, u16 set)
@@ -1350,6 +927,9 @@ EXPORT_SYMBOL_GPL(mdiobus_modify_changed);
* @regnum: register number to write
* @mask: bit mask of bits to clear
* @set: bit mask of bits to set
+ *
+ * Return: 1 if the register was modified, 0 if no change was needed,
+ * negative on any error condition
*/
int mdiobus_c45_modify_changed(struct mii_bus *bus, int addr, int devad,
u32 regnum, u16 mask, u16 set)
@@ -1370,10 +950,10 @@ EXPORT_SYMBOL_GPL(mdiobus_c45_modify_changed);
* @dev: target MDIO device
* @drv: given MDIO driver
*
- * Description: Given a MDIO device, and a MDIO driver, return 1 if
- * the driver supports the device. Otherwise, return 0. This may
- * require calling the devices own match function, since different classes
- * of MDIO devices have different match criteria.
+ * Return: 1 if the driver supports the device, 0 otherwise
+ *
+ * Description: This may require calling the devices own match function,
+ * since different classes of MDIO devices have different match criteria.
*/
static int mdio_bus_match(struct device *dev, const struct device_driver *drv)
{
@@ -1432,7 +1012,7 @@ const struct bus_type mdio_bus_type = {
};
EXPORT_SYMBOL(mdio_bus_type);
-int __init mdio_bus_init(void)
+static int __init mdio_bus_init(void)
{
int ret;
@@ -1446,16 +1026,14 @@ int __init mdio_bus_init(void)
return ret;
}
-#if IS_ENABLED(CONFIG_PHYLIB)
-void mdio_bus_exit(void)
+static void __exit mdio_bus_exit(void)
{
class_unregister(&mdio_bus_class);
bus_unregister(&mdio_bus_type);
}
-EXPORT_SYMBOL_GPL(mdio_bus_exit);
-#else
-module_init(mdio_bus_init);
-/* no module_exit, intentional */
+
+subsys_initcall(mdio_bus_init);
+module_exit(mdio_bus_exit);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MDIO bus/device layer");
-#endif
diff --git a/drivers/net/phy/mdio_bus_provider.c b/drivers/net/phy/mdio_bus_provider.c
new file mode 100644
index 000000000000..4b0637405740
--- /dev/null
+++ b/drivers/net/phy/mdio_bus_provider.c
@@ -0,0 +1,442 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* MDIO Bus provider interface
+ *
+ * Author: Andy Fleming
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/micrel_phy.h>
+#include <linux/mii.h>
+#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/unistd.h>
+
+/**
+ * mdiobus_alloc_size - allocate a mii_bus structure
+ * @size: extra amount of memory to allocate for private storage.
+ * If non-zero, then bus->priv is points to that memory.
+ *
+ * Description: called by a bus driver to allocate an mii_bus
+ * structure to fill in.
+ */
+struct mii_bus *mdiobus_alloc_size(size_t size)
+{
+ struct mii_bus *bus;
+ size_t aligned_size = ALIGN(sizeof(*bus), NETDEV_ALIGN);
+ size_t alloc_size;
+ int i;
+
+ /* If we alloc extra space, it should be aligned */
+ if (size)
+ alloc_size = aligned_size + size;
+ else
+ alloc_size = sizeof(*bus);
+
+ bus = kzalloc(alloc_size, GFP_KERNEL);
+ if (!bus)
+ return NULL;
+
+ bus->state = MDIOBUS_ALLOCATED;
+ if (size)
+ bus->priv = (void *)bus + aligned_size;
+
+ /* Initialise the interrupts to polling and 64-bit seqcounts */
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ bus->irq[i] = PHY_POLL;
+ u64_stats_init(&bus->stats[i].syncp);
+ }
+
+ return bus;
+}
+EXPORT_SYMBOL(mdiobus_alloc_size);
+
+#if IS_ENABLED(CONFIG_OF_MDIO)
+/* Walk the list of subnodes of a mdio bus and look for a node that
+ * matches the mdio device's address with its 'reg' property. If
+ * found, set the of_node pointer for the mdio device. This allows
+ * auto-probed phy devices to be supplied with information passed in
+ * via DT.
+ * If a PHY package is found, PHY is searched also there.
+ */
+static int of_mdiobus_find_phy(struct device *dev, struct mdio_device *mdiodev,
+ struct device_node *np)
+{
+ struct device_node *child;
+
+ for_each_available_child_of_node(np, child) {
+ int addr;
+
+ if (of_node_name_eq(child, "ethernet-phy-package")) {
+ /* Validate PHY package reg presence */
+ if (!of_property_present(child, "reg")) {
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ if (!of_mdiobus_find_phy(dev, mdiodev, child)) {
+ /* The refcount for the PHY package will be
+ * incremented later when PHY join the Package.
+ */
+ of_node_put(child);
+ return 0;
+ }
+
+ continue;
+ }
+
+ addr = of_mdio_parse_addr(dev, child);
+ if (addr < 0)
+ continue;
+
+ if (addr == mdiodev->addr) {
+ device_set_node(dev, of_fwnode_handle(child));
+ /* The refcount on "child" is passed to the mdio
+ * device. Do _not_ use of_node_put(child) here.
+ */
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+
+static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
+ struct mdio_device *mdiodev)
+{
+ struct device *dev = &mdiodev->dev;
+
+ if (dev->of_node || !bus->dev.of_node)
+ return;
+
+ of_mdiobus_find_phy(dev, mdiodev, bus->dev.of_node);
+}
+#endif
+
+static struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr, bool c45)
+{
+ struct phy_device *phydev = ERR_PTR(-ENODEV);
+ struct fwnode_handle *fwnode;
+ char node_name[16];
+ int err;
+
+ phydev = get_phy_device(bus, addr, c45);
+ if (IS_ERR(phydev))
+ return phydev;
+
+#if IS_ENABLED(CONFIG_OF_MDIO)
+ /* For DT, see if the auto-probed phy has a corresponding child
+ * in the bus node, and set the of_node pointer in this case.
+ */
+ of_mdiobus_link_mdiodev(bus, &phydev->mdio);
+#endif
+
+ /* Search for a swnode for the phy in the swnode hierarchy of the bus.
+ * If there is no swnode for the phy provided, just ignore it.
+ */
+ if (dev_fwnode(&bus->dev) && !dev_fwnode(&phydev->mdio.dev)) {
+ snprintf(node_name, sizeof(node_name), "ethernet-phy@%d",
+ addr);
+ fwnode = fwnode_get_named_child_node(dev_fwnode(&bus->dev),
+ node_name);
+ if (fwnode)
+ device_set_node(&phydev->mdio.dev, fwnode);
+ }
+
+ err = phy_device_register(phydev);
+ if (err) {
+ phy_device_free(phydev);
+ return ERR_PTR(-ENODEV);
+ }
+
+ return phydev;
+}
+
+/**
+ * mdiobus_scan_c22 - scan one address on a bus for C22 MDIO devices.
+ * @bus: mii_bus to scan
+ * @addr: address on bus to scan
+ *
+ * This function scans one address on the MDIO bus, looking for
+ * devices which can be identified using a vendor/product ID in
+ * registers 2 and 3. Not all MDIO devices have such registers, but
+ * PHY devices typically do. Hence this function assumes anything
+ * found is a PHY, or can be treated as a PHY. Other MDIO devices,
+ * such as switches, will probably not be found during the scan.
+ */
+struct phy_device *mdiobus_scan_c22(struct mii_bus *bus, int addr)
+{
+ return mdiobus_scan(bus, addr, false);
+}
+EXPORT_SYMBOL(mdiobus_scan_c22);
+
+/**
+ * mdiobus_scan_c45 - scan one address on a bus for C45 MDIO devices.
+ * @bus: mii_bus to scan
+ * @addr: address on bus to scan
+ *
+ * This function scans one address on the MDIO bus, looking for
+ * devices which can be identified using a vendor/product ID in
+ * registers 2 and 3. Not all MDIO devices have such registers, but
+ * PHY devices typically do. Hence this function assumes anything
+ * found is a PHY, or can be treated as a PHY. Other MDIO devices,
+ * such as switches, will probably not be found during the scan.
+ */
+static struct phy_device *mdiobus_scan_c45(struct mii_bus *bus, int addr)
+{
+ return mdiobus_scan(bus, addr, true);
+}
+
+static int mdiobus_scan_bus_c22(struct mii_bus *bus)
+{
+ int i;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ if ((bus->phy_mask & BIT(i)) == 0) {
+ struct phy_device *phydev;
+
+ phydev = mdiobus_scan_c22(bus, i);
+ if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV))
+ return PTR_ERR(phydev);
+ }
+ }
+ return 0;
+}
+
+static int mdiobus_scan_bus_c45(struct mii_bus *bus)
+{
+ int i;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ if ((bus->phy_mask & BIT(i)) == 0) {
+ struct phy_device *phydev;
+
+ /* Don't scan C45 if we already have a C22 device */
+ if (bus->mdio_map[i])
+ continue;
+
+ phydev = mdiobus_scan_c45(bus, i);
+ if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV))
+ return PTR_ERR(phydev);
+ }
+ }
+ return 0;
+}
+
+/* There are some C22 PHYs which do bad things when where is a C45
+ * transaction on the bus, like accepting a read themselves, and
+ * stomping over the true devices reply, to performing a write to
+ * themselves which was intended for another device. Now that C22
+ * devices have been found, see if any of them are bad for C45, and if we
+ * should skip the C45 scan.
+ */
+static bool mdiobus_prevent_c45_scan(struct mii_bus *bus)
+{
+ struct phy_device *phydev;
+
+ mdiobus_for_each_phy(bus, phydev) {
+ u32 oui = phydev->phy_id >> 10;
+
+ if (oui == MICREL_OUI)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * __mdiobus_register - bring up all the PHYs on a given bus and attach them to bus
+ * @bus: target mii_bus
+ * @owner: module containing bus accessor functions
+ *
+ * Description: Called by a bus driver to bring up all the PHYs
+ * on a given bus, and attach them to the bus. Drivers should use
+ * mdiobus_register() rather than __mdiobus_register() unless they
+ * need to pass a specific owner module. MDIO devices which are not
+ * PHYs will not be brought up by this function. They are expected
+ * to be explicitly listed in DT and instantiated by of_mdiobus_register().
+ *
+ * Returns 0 on success or < 0 on error.
+ */
+int __mdiobus_register(struct mii_bus *bus, struct module *owner)
+{
+ struct mdio_device *mdiodev;
+ struct gpio_desc *gpiod;
+ bool prevent_c45_scan;
+ int i, err;
+
+ if (!bus || !bus->name)
+ return -EINVAL;
+
+ /* An access method always needs both read and write operations */
+ if (!!bus->read != !!bus->write || !!bus->read_c45 != !!bus->write_c45)
+ return -EINVAL;
+
+ /* At least one method is mandatory */
+ if (!bus->read && !bus->read_c45)
+ return -EINVAL;
+
+ if (bus->parent && bus->parent->of_node)
+ bus->parent->of_node->fwnode.flags |=
+ FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD;
+
+ WARN(bus->state != MDIOBUS_ALLOCATED &&
+ bus->state != MDIOBUS_UNREGISTERED,
+ "%s: not in ALLOCATED or UNREGISTERED state\n", bus->id);
+
+ bus->owner = owner;
+ bus->dev.parent = bus->parent;
+ bus->dev.class = &mdio_bus_class;
+ bus->dev.groups = NULL;
+ dev_set_name(&bus->dev, "%s", bus->id);
+
+ /* If the bus state is allocated, we're registering a fresh bus
+ * that may have a fwnode associated with it. Grab a reference
+ * to the fwnode. This will be dropped when the bus is released.
+ * If the bus was set to unregistered, it means that the bus was
+ * previously registered, and we've already grabbed a reference.
+ */
+ if (bus->state == MDIOBUS_ALLOCATED)
+ fwnode_handle_get(dev_fwnode(&bus->dev));
+
+ /* We need to set state to MDIOBUS_UNREGISTERED to correctly release
+ * the device in mdiobus_free()
+ *
+ * State will be updated later in this function in case of success
+ */
+ bus->state = MDIOBUS_UNREGISTERED;
+
+ err = device_register(&bus->dev);
+ if (err) {
+ pr_err("mii_bus %s failed to register\n", bus->id);
+ return -EINVAL;
+ }
+
+ mutex_init(&bus->mdio_lock);
+ mutex_init(&bus->shared_lock);
+
+ /* assert bus level PHY GPIO reset */
+ gpiod = devm_gpiod_get_optional(&bus->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpiod)) {
+ err = dev_err_probe(&bus->dev, PTR_ERR(gpiod),
+ "mii_bus %s couldn't get reset GPIO\n",
+ bus->id);
+ device_del(&bus->dev);
+ return err;
+ } else if (gpiod) {
+ bus->reset_gpiod = gpiod;
+ fsleep(bus->reset_delay_us);
+ gpiod_set_value_cansleep(gpiod, 0);
+ if (bus->reset_post_delay_us > 0)
+ fsleep(bus->reset_post_delay_us);
+ }
+
+ if (bus->reset) {
+ err = bus->reset(bus);
+ if (err)
+ goto error_reset_gpiod;
+ }
+
+ if (bus->read) {
+ err = mdiobus_scan_bus_c22(bus);
+ if (err)
+ goto error;
+ }
+
+ prevent_c45_scan = mdiobus_prevent_c45_scan(bus);
+
+ if (!prevent_c45_scan && bus->read_c45) {
+ err = mdiobus_scan_bus_c45(bus);
+ if (err)
+ goto error;
+ }
+
+ bus->state = MDIOBUS_REGISTERED;
+ dev_dbg(&bus->dev, "probed\n");
+ return 0;
+
+error:
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ mdiodev = bus->mdio_map[i];
+ if (!mdiodev)
+ continue;
+
+ mdiodev->device_remove(mdiodev);
+ mdiodev->device_free(mdiodev);
+ }
+error_reset_gpiod:
+ /* Put PHYs in RESET to save power */
+ if (bus->reset_gpiod)
+ gpiod_set_value_cansleep(bus->reset_gpiod, 1);
+
+ device_del(&bus->dev);
+ return err;
+}
+EXPORT_SYMBOL(__mdiobus_register);
+
+void mdiobus_unregister(struct mii_bus *bus)
+{
+ struct mdio_device *mdiodev;
+ int i;
+
+ if (WARN_ON_ONCE(bus->state != MDIOBUS_REGISTERED))
+ return;
+ bus->state = MDIOBUS_UNREGISTERED;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ mdiodev = bus->mdio_map[i];
+ if (!mdiodev)
+ continue;
+
+ mdiodev->device_remove(mdiodev);
+ mdiodev->device_free(mdiodev);
+ }
+
+ /* Put PHYs in RESET to save power */
+ if (bus->reset_gpiod)
+ gpiod_set_value_cansleep(bus->reset_gpiod, 1);
+
+ device_del(&bus->dev);
+}
+EXPORT_SYMBOL(mdiobus_unregister);
+
+/**
+ * mdiobus_free - free a struct mii_bus
+ * @bus: mii_bus to free
+ *
+ * This function releases the reference to the underlying device
+ * object in the mii_bus. If this is the last reference, the mii_bus
+ * will be freed.
+ */
+void mdiobus_free(struct mii_bus *bus)
+{
+ /* For compatibility with error handling in drivers. */
+ if (bus->state == MDIOBUS_ALLOCATED) {
+ kfree(bus);
+ return;
+ }
+
+ WARN(bus->state != MDIOBUS_UNREGISTERED,
+ "%s: not in UNREGISTERED state\n", bus->id);
+ bus->state = MDIOBUS_RELEASED;
+
+ put_device(&bus->dev);
+}
+EXPORT_SYMBOL(mdiobus_free);
diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c
index e747ee63c665..6e90ed42cd98 100644
--- a/drivers/net/phy/mdio_device.c
+++ b/drivers/net/phy/mdio_device.c
@@ -22,6 +22,7 @@
#include <linux/string.h>
#include <linux/unistd.h>
#include <linux/property.h>
+#include "mdio-private.h"
void mdio_device_free(struct mdio_device *mdiodev)
{
@@ -35,7 +36,8 @@ static void mdio_device_release(struct device *dev)
kfree(to_mdio_device(dev));
}
-int mdio_device_bus_match(struct device *dev, const struct device_driver *drv)
+static int mdio_device_bus_match(struct device *dev,
+ const struct device_driver *drv)
{
struct mdio_device *mdiodev = to_mdio_device(dev);
const struct mdio_driver *mdiodrv = to_mdio_driver(drv);
@@ -58,6 +60,7 @@ struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr)
mdiodev->dev.release = mdio_device_release;
mdiodev->dev.parent = &bus->dev;
mdiodev->dev.bus = &mdio_bus_type;
+ mdiodev->bus_match = mdio_device_bus_match;
mdiodev->device_free = mdio_device_free;
mdiodev->device_remove = mdio_device_remove;
mdiodev->bus = bus;
@@ -75,6 +78,8 @@ EXPORT_SYMBOL(mdio_device_create);
/**
* mdio_device_register - Register the mdio device on the MDIO bus
* @mdiodev: mdio_device structure to be added to the MDIO bus
+ *
+ * Return: Zero if successful, negative error code on failure
*/
int mdio_device_register(struct mdio_device *mdiodev)
{
@@ -116,6 +121,59 @@ void mdio_device_remove(struct mdio_device *mdiodev)
}
EXPORT_SYMBOL(mdio_device_remove);
+/**
+ * mdio_device_register_reset - Read and initialize the reset properties of
+ * an mdio device
+ * @mdiodev: mdio_device structure
+ *
+ * Return: Zero if successful, negative error code on failure
+ */
+int mdio_device_register_reset(struct mdio_device *mdiodev)
+{
+ struct reset_control *reset;
+
+ /* Deassert the optional reset signal */
+ mdiodev->reset_gpio = gpiod_get_optional(&mdiodev->dev,
+ "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(mdiodev->reset_gpio))
+ return PTR_ERR(mdiodev->reset_gpio);
+
+ if (mdiodev->reset_gpio)
+ gpiod_set_consumer_name(mdiodev->reset_gpio, "PHY reset");
+
+ reset = reset_control_get_optional_exclusive(&mdiodev->dev, "phy");
+ if (IS_ERR(reset)) {
+ gpiod_put(mdiodev->reset_gpio);
+ mdiodev->reset_gpio = NULL;
+ return PTR_ERR(reset);
+ }
+
+ mdiodev->reset_ctrl = reset;
+
+ /* Read optional firmware properties */
+ device_property_read_u32(&mdiodev->dev, "reset-assert-us",
+ &mdiodev->reset_assert_delay);
+ device_property_read_u32(&mdiodev->dev, "reset-deassert-us",
+ &mdiodev->reset_deassert_delay);
+
+ return 0;
+}
+
+/**
+ * mdio_device_unregister_reset - uninitialize the reset properties of
+ * an mdio device
+ * @mdiodev: mdio_device structure
+ */
+void mdio_device_unregister_reset(struct mdio_device *mdiodev)
+{
+ gpiod_put(mdiodev->reset_gpio);
+ mdiodev->reset_gpio = NULL;
+ reset_control_put(mdiodev->reset_ctrl);
+ mdiodev->reset_ctrl = NULL;
+ mdiodev->reset_assert_delay = 0;
+ mdiodev->reset_deassert_delay = 0;
+}
+
void mdio_device_reset(struct mdio_device *mdiodev, int value)
{
unsigned int d;
@@ -150,6 +208,8 @@ EXPORT_SYMBOL(mdio_device_reset);
*
* Description: Take care of setting up the mdio_device structure
* and calling the driver to probe the device.
+ *
+ * Return: Zero if successful, negative error code on failure
*/
static int mdio_probe(struct device *dev)
{
@@ -200,6 +260,8 @@ static void mdio_shutdown(struct device *dev)
/**
* mdio_driver_register - register an mdio_driver with the MDIO layer
* @drv: new mdio_driver to register
+ *
+ * Return: Zero if successful, negative error code on failure
*/
int mdio_driver_register(struct mdio_driver *drv)
{
diff --git a/drivers/net/phy/mediatek-ge.c b/drivers/net/phy/mediatek-ge.c
deleted file mode 100644
index 54ea64a37ab3..000000000000
--- a/drivers/net/phy/mediatek-ge.c
+++ /dev/null
@@ -1,111 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-#include <linux/bitfield.h>
-#include <linux/module.h>
-#include <linux/phy.h>
-
-#define MTK_EXT_PAGE_ACCESS 0x1f
-#define MTK_PHY_PAGE_STANDARD 0x0000
-#define MTK_PHY_PAGE_EXTENDED 0x0001
-#define MTK_PHY_PAGE_EXTENDED_2 0x0002
-#define MTK_PHY_PAGE_EXTENDED_3 0x0003
-#define MTK_PHY_PAGE_EXTENDED_2A30 0x2a30
-#define MTK_PHY_PAGE_EXTENDED_52B5 0x52b5
-
-static int mtk_gephy_read_page(struct phy_device *phydev)
-{
- return __phy_read(phydev, MTK_EXT_PAGE_ACCESS);
-}
-
-static int mtk_gephy_write_page(struct phy_device *phydev, int page)
-{
- return __phy_write(phydev, MTK_EXT_PAGE_ACCESS, page);
-}
-
-static void mtk_gephy_config_init(struct phy_device *phydev)
-{
- /* Enable HW auto downshift */
- phy_modify_paged(phydev, MTK_PHY_PAGE_EXTENDED, 0x14, 0, BIT(4));
-
- /* Increase SlvDPSready time */
- phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
- __phy_write(phydev, 0x10, 0xafae);
- __phy_write(phydev, 0x12, 0x2f);
- __phy_write(phydev, 0x10, 0x8fae);
- phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
-
- /* Adjust 100_mse_threshold */
- phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x123, 0xffff);
-
- /* Disable mcc */
- phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xa6, 0x300);
-}
-
-static int mt7530_phy_config_init(struct phy_device *phydev)
-{
- mtk_gephy_config_init(phydev);
-
- /* Increase post_update_timer */
- phy_write_paged(phydev, MTK_PHY_PAGE_EXTENDED_3, 0x11, 0x4b);
-
- return 0;
-}
-
-static int mt7531_phy_config_init(struct phy_device *phydev)
-{
- mtk_gephy_config_init(phydev);
-
- /* PHY link down power saving enable */
- phy_set_bits(phydev, 0x17, BIT(4));
- phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, 0xc6, 0x300);
-
- /* Set TX Pair delay selection */
- phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x13, 0x404);
- phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x14, 0x404);
-
- return 0;
-}
-
-static struct phy_driver mtk_gephy_driver[] = {
- {
- PHY_ID_MATCH_EXACT(0x03a29412),
- .name = "MediaTek MT7530 PHY",
- .config_init = mt7530_phy_config_init,
- /* Interrupts are handled by the switch, not the PHY
- * itself.
- */
- .config_intr = genphy_no_config_intr,
- .handle_interrupt = genphy_handle_interrupt_no_ack,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
- .read_page = mtk_gephy_read_page,
- .write_page = mtk_gephy_write_page,
- },
- {
- PHY_ID_MATCH_EXACT(0x03a29441),
- .name = "MediaTek MT7531 PHY",
- .config_init = mt7531_phy_config_init,
- /* Interrupts are handled by the switch, not the PHY
- * itself.
- */
- .config_intr = genphy_no_config_intr,
- .handle_interrupt = genphy_handle_interrupt_no_ack,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
- .read_page = mtk_gephy_read_page,
- .write_page = mtk_gephy_write_page,
- },
-};
-
-module_phy_driver(mtk_gephy_driver);
-
-static struct mdio_device_id __maybe_unused mtk_gephy_tbl[] = {
- { PHY_ID_MATCH_EXACT(0x03a29441) },
- { PHY_ID_MATCH_EXACT(0x03a29412) },
- { }
-};
-
-MODULE_DESCRIPTION("MediaTek Gigabit Ethernet PHY driver");
-MODULE_AUTHOR("DENG, Qingfang <dqfext@gmail.com>");
-MODULE_LICENSE("GPL");
-
-MODULE_DEVICE_TABLE(mdio, mtk_gephy_tbl);
diff --git a/drivers/net/phy/mediatek/Kconfig b/drivers/net/phy/mediatek/Kconfig
new file mode 100644
index 000000000000..bb7dc876271e
--- /dev/null
+++ b/drivers/net/phy/mediatek/Kconfig
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config MEDIATEK_2P5GE_PHY
+ tristate "MediaTek 2.5Gb Ethernet PHYs"
+ depends on (ARM64 && ARCH_MEDIATEK) || COMPILE_TEST
+ select MTK_NET_PHYLIB
+ help
+ Supports MediaTek SoC built-in 2.5Gb Ethernet PHYs.
+
+ This will load necessary firmware and add appropriate time delay.
+ Accelerate this procedure through internal pbus instead of MDIO
+ bus. Certain link-up issues will also be fixed here.
+
+config MEDIATEK_GE_PHY
+ tristate "MediaTek Gigabit Ethernet PHYs"
+ select MTK_NET_PHYLIB
+ help
+ Supports the MediaTek non-built-in Gigabit Ethernet PHYs.
+
+ Non-built-in Gigabit Ethernet PHYs include mt7530/mt7531.
+ You may find mt7530 inside mt7621. This driver shares some
+ common operations with MediaTek SoC built-in Gigabit
+ Ethernet PHYs.
+
+config MEDIATEK_GE_SOC_PHY
+ tristate "MediaTek SoC Ethernet PHYs"
+ depends on ARM64 || COMPILE_TEST
+ depends on ARCH_AIROHA || (ARCH_MEDIATEK && NVMEM_MTK_EFUSE) || \
+ COMPILE_TEST
+ select MTK_NET_PHYLIB
+ select PHY_PACKAGE
+ help
+ Supports MediaTek SoC built-in Gigabit Ethernet PHYs.
+
+ Include support for built-in Ethernet PHYs which are present in
+ the MT7981 and MT7988 SoCs. These PHYs need calibration data
+ present in the SoCs efuse and will dynamically calibrate VCM
+ (common-mode voltage) during startup.
+
+config MTK_NET_PHYLIB
+ tristate
diff --git a/drivers/net/phy/mediatek/Makefile b/drivers/net/phy/mediatek/Makefile
new file mode 100644
index 000000000000..ac57ecc799fc
--- /dev/null
+++ b/drivers/net/phy/mediatek/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_MEDIATEK_2P5GE_PHY) += mtk-2p5ge.o
+obj-$(CONFIG_MEDIATEK_GE_PHY) += mtk-ge.o
+obj-$(CONFIG_MEDIATEK_GE_SOC_PHY) += mtk-ge-soc.o
+obj-$(CONFIG_MTK_NET_PHYLIB) += mtk-phy-lib.o
diff --git a/drivers/net/phy/mediatek/mtk-2p5ge.c b/drivers/net/phy/mediatek/mtk-2p5ge.c
new file mode 100644
index 000000000000..de8a41a1841d
--- /dev/null
+++ b/drivers/net/phy/mediatek/mtk-2p5ge.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <linux/bitfield.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/phy.h>
+
+#include "mtk.h"
+
+#define MTK_2P5GPHY_ID_MT7988 0x00339c11
+
+#define MT7988_2P5GE_PMB_FW "mediatek/mt7988/i2p5ge-phy-pmb.bin"
+#define MT7988_2P5GE_PMB_FW_SIZE 0x20000
+#define MT7988_2P5GE_PMB_FW_BASE 0x0f100000
+#define MT7988_2P5GE_PMB_FW_LEN 0x20000
+#define MTK_2P5GPHY_MCU_CSR_BASE 0x0f0f0000
+#define MTK_2P5GPHY_MCU_CSR_LEN 0x20
+#define MD32_EN_CFG 0x18
+#define MD32_EN BIT(0)
+
+#define BASE100T_STATUS_EXTEND 0x10
+#define BASE1000T_STATUS_EXTEND 0x11
+#define EXTEND_CTRL_AND_STATUS 0x16
+
+#define PHY_AUX_CTRL_STATUS 0x1d
+#define PHY_AUX_DPX_MASK GENMASK(5, 5)
+#define PHY_AUX_SPEED_MASK GENMASK(4, 2)
+
+/* Registers on MDIO_MMD_VEND1 */
+#define MTK_PHY_LPI_PCS_DSP_CTRL 0x121
+#define MTK_PHY_LPI_SIG_EN_LO_THRESH100_MASK GENMASK(12, 8)
+
+#define MTK_PHY_HOST_CMD1 0x800e
+#define MTK_PHY_HOST_CMD2 0x800f
+/* Registers on Token Ring debug nodes */
+/* ch_addr = 0x0, node_addr = 0xf, data_addr = 0x3c */
+#define AUTO_NP_10XEN BIT(6)
+
+enum {
+ PHY_AUX_SPD_10 = 0,
+ PHY_AUX_SPD_100,
+ PHY_AUX_SPD_1000,
+ PHY_AUX_SPD_2500,
+};
+
+static int mt798x_2p5ge_phy_load_fw(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ void __iomem *mcu_csr_base, *pmb_addr;
+ const struct firmware *fw;
+ int ret, i;
+ u32 reg;
+
+ pmb_addr = ioremap(MT7988_2P5GE_PMB_FW_BASE, MT7988_2P5GE_PMB_FW_LEN);
+ if (!pmb_addr)
+ return -ENOMEM;
+ mcu_csr_base = ioremap(MTK_2P5GPHY_MCU_CSR_BASE,
+ MTK_2P5GPHY_MCU_CSR_LEN);
+ if (!mcu_csr_base) {
+ ret = -ENOMEM;
+ goto free_pmb;
+ }
+
+ ret = request_firmware_direct(&fw, MT7988_2P5GE_PMB_FW, dev);
+ if (ret) {
+ dev_err(dev, "failed to load firmware: %s, ret: %d\n",
+ MT7988_2P5GE_PMB_FW, ret);
+ goto free;
+ }
+
+ if (fw->size != MT7988_2P5GE_PMB_FW_SIZE) {
+ dev_err(dev, "Firmware size 0x%zx != 0x%x\n",
+ fw->size, MT7988_2P5GE_PMB_FW_SIZE);
+ ret = -EINVAL;
+ goto release_fw;
+ }
+
+ reg = readw(mcu_csr_base + MD32_EN_CFG);
+ if (reg & MD32_EN) {
+ phy_set_bits(phydev, MII_BMCR, BMCR_RESET);
+ usleep_range(10000, 11000);
+ }
+ phy_set_bits(phydev, MII_BMCR, BMCR_PDOWN);
+
+ /* Write magic number to safely stall MCU */
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_HOST_CMD1, 0x1100);
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_HOST_CMD2, 0x00df);
+
+ for (i = 0; i < MT7988_2P5GE_PMB_FW_SIZE - 1; i += 4)
+ writel(*((uint32_t *)(fw->data + i)), pmb_addr + i);
+
+ writew(reg & ~MD32_EN, mcu_csr_base + MD32_EN_CFG);
+ writew(reg | MD32_EN, mcu_csr_base + MD32_EN_CFG);
+ phy_set_bits(phydev, MII_BMCR, BMCR_RESET);
+ /* We need a delay here to stabilize initialization of MCU */
+ usleep_range(7000, 8000);
+
+ dev_info(dev, "Firmware date code: %x/%x/%x, version: %x.%x\n",
+ be16_to_cpu(*((__be16 *)(fw->data +
+ MT7988_2P5GE_PMB_FW_SIZE - 8))),
+ *(fw->data + MT7988_2P5GE_PMB_FW_SIZE - 6),
+ *(fw->data + MT7988_2P5GE_PMB_FW_SIZE - 5),
+ *(fw->data + MT7988_2P5GE_PMB_FW_SIZE - 2),
+ *(fw->data + MT7988_2P5GE_PMB_FW_SIZE - 1));
+
+release_fw:
+ release_firmware(fw);
+free:
+ iounmap(mcu_csr_base);
+free_pmb:
+ iounmap(pmb_addr);
+
+ return ret;
+}
+
+static int mt798x_2p5ge_phy_config_init(struct phy_device *phydev)
+{
+ /* Check if PHY interface type is compatible */
+ if (phydev->interface != PHY_INTERFACE_MODE_INTERNAL)
+ return -ENODEV;
+
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_LPI_PCS_DSP_CTRL,
+ MTK_PHY_LPI_SIG_EN_LO_THRESH100_MASK, 0);
+
+ /* Enable 16-bit next page exchange bit if 1000-BT isn't advertising */
+ mtk_tr_modify(phydev, 0x0, 0xf, 0x3c, AUTO_NP_10XEN,
+ FIELD_PREP(AUTO_NP_10XEN, 0x1));
+
+ /* Enable HW auto downshift */
+ phy_modify_paged(phydev, MTK_PHY_PAGE_EXTENDED_1,
+ MTK_PHY_AUX_CTRL_AND_STATUS,
+ 0, MTK_PHY_ENABLE_DOWNSHIFT);
+
+ return 0;
+}
+
+static int mt798x_2p5ge_phy_config_aneg(struct phy_device *phydev)
+{
+ bool changed = false;
+ u32 adv;
+ int ret;
+
+ ret = genphy_c45_an_config_aneg(phydev);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ /* Clause 45 doesn't define 1000BaseT support. Use Clause 22 instead in
+ * our design.
+ */
+ adv = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising);
+ ret = phy_modify_changed(phydev, MII_CTRL1000, ADVERTISE_1000FULL, adv);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ return genphy_c45_check_and_restart_aneg(phydev, changed);
+}
+
+static int mt798x_2p5ge_phy_get_features(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_c45_pma_read_abilities(phydev);
+ if (ret)
+ return ret;
+
+ /* This phy can't handle collision, and neither can (XFI)MAC it's
+ * connected to. Although it can do HDX handshake, it doesn't support
+ * CSMA/CD that HDX requires.
+ */
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ phydev->supported);
+
+ return 0;
+}
+
+static int mt798x_2p5ge_phy_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ /* When MDIO_STAT1_LSTATUS is raised genphy_c45_read_link(), this phy
+ * actually hasn't finished AN. So use CL22's link update function
+ * instead.
+ */
+ ret = genphy_update_link(phydev);
+ if (ret)
+ return ret;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ /* We'll read link speed through vendor specific registers down below.
+ * So remove phy_resolve_aneg_linkmode (AN on) & genphy_c45_read_pma
+ * (AN off).
+ */
+ if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
+ ret = genphy_c45_read_lpa(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Clause 45 doesn't define 1000BaseT support. Read the link
+ * partner's 1G advertisement via Clause 22.
+ */
+ ret = phy_read(phydev, MII_STAT1000);
+ if (ret < 0)
+ return ret;
+ mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, ret);
+ } else if (phydev->autoneg == AUTONEG_DISABLE) {
+ linkmode_zero(phydev->lp_advertising);
+ }
+
+ if (phydev->link) {
+ ret = phy_read(phydev, PHY_AUX_CTRL_STATUS);
+ if (ret < 0)
+ return ret;
+
+ switch (FIELD_GET(PHY_AUX_SPEED_MASK, ret)) {
+ case PHY_AUX_SPD_10:
+ phydev->speed = SPEED_10;
+ break;
+ case PHY_AUX_SPD_100:
+ phydev->speed = SPEED_100;
+ break;
+ case PHY_AUX_SPD_1000:
+ phydev->speed = SPEED_1000;
+ break;
+ case PHY_AUX_SPD_2500:
+ phydev->speed = SPEED_2500;
+ break;
+ }
+
+ phydev->duplex = DUPLEX_FULL;
+ phydev->rate_matching = RATE_MATCH_PAUSE;
+ }
+
+ return 0;
+}
+
+static int mt798x_2p5ge_phy_get_rate_matching(struct phy_device *phydev,
+ phy_interface_t iface)
+{
+ return RATE_MATCH_PAUSE;
+}
+
+static const unsigned long supported_triggers =
+ BIT(TRIGGER_NETDEV_FULL_DUPLEX) |
+ BIT(TRIGGER_NETDEV_LINK) |
+ BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_LINK_2500) |
+ BIT(TRIGGER_NETDEV_RX) |
+ BIT(TRIGGER_NETDEV_TX);
+
+static int mt798x_2p5ge_phy_led_blink_set(struct phy_device *phydev, u8 index,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ bool blinking = false;
+ int err = 0;
+
+ err = mtk_phy_led_num_dly_cfg(index, delay_on, delay_off, &blinking);
+ if (err < 0)
+ return err;
+
+ err = mtk_phy_hw_led_blink_set(phydev, index, blinking);
+ if (err)
+ return err;
+
+ if (blinking)
+ mtk_phy_hw_led_on_set(phydev, index, MTK_2P5GPHY_LED_ON_MASK,
+ false);
+
+ return 0;
+}
+
+static int mt798x_2p5ge_phy_led_brightness_set(struct phy_device *phydev,
+ u8 index,
+ enum led_brightness value)
+{
+ int err;
+
+ err = mtk_phy_hw_led_blink_set(phydev, index, false);
+ if (err)
+ return err;
+
+ return mtk_phy_hw_led_on_set(phydev, index, MTK_2P5GPHY_LED_ON_MASK,
+ (value != LED_OFF));
+}
+
+static int mt798x_2p5ge_phy_led_hw_is_supported(struct phy_device *phydev,
+ u8 index, unsigned long rules)
+{
+ return mtk_phy_led_hw_is_supported(phydev, index, rules,
+ supported_triggers);
+}
+
+static int mt798x_2p5ge_phy_led_hw_control_get(struct phy_device *phydev,
+ u8 index, unsigned long *rules)
+{
+ return mtk_phy_led_hw_ctrl_get(phydev, index, rules,
+ MTK_2P5GPHY_LED_ON_SET,
+ MTK_2P5GPHY_LED_RX_BLINK_SET,
+ MTK_2P5GPHY_LED_TX_BLINK_SET);
+};
+
+static int mt798x_2p5ge_phy_led_hw_control_set(struct phy_device *phydev,
+ u8 index, unsigned long rules)
+{
+ return mtk_phy_led_hw_ctrl_set(phydev, index, rules,
+ MTK_2P5GPHY_LED_ON_SET,
+ MTK_2P5GPHY_LED_RX_BLINK_SET,
+ MTK_2P5GPHY_LED_TX_BLINK_SET);
+};
+
+static int mt798x_2p5ge_phy_probe(struct phy_device *phydev)
+{
+ struct mtk_socphy_priv *priv;
+ struct pinctrl *pinctrl;
+ int ret;
+
+ switch (phydev->drv->phy_id) {
+ case MTK_2P5GPHY_ID_MT7988:
+ /* This built-in 2.5GbE hardware only sets MDIO_DEVS_PMAPMD.
+ * Set the rest by this driver since PCS/AN/VEND1/VEND2 MDIO
+ * manageable devices actually exist.
+ */
+ phydev->c45_ids.mmds_present |= MDIO_DEVS_PCS |
+ MDIO_DEVS_AN |
+ MDIO_DEVS_VEND1 |
+ MDIO_DEVS_VEND2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = mt798x_2p5ge_phy_load_fw(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Setup LED. On default, LED0 is on/off when link is up/down. As for
+ * LED1, it blinks as tx/rx transmission takes place.
+ */
+ phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MTK_PHY_LED0_ON_CTRL,
+ MTK_PHY_LED_ON_POLARITY | MTK_2P5GPHY_LED_ON_SET);
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MTK_PHY_LED0_BLINK_CTRL,
+ MTK_2P5GPHY_LED_TX_BLINK_SET |
+ MTK_2P5GPHY_LED_RX_BLINK_SET);
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MTK_PHY_LED1_ON_CTRL,
+ MTK_PHY_LED_ON_FDX | MTK_PHY_LED_ON_HDX |
+ MTK_2P5GPHY_LED_ON_SET);
+ phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MTK_PHY_LED1_BLINK_CTRL,
+ MTK_2P5GPHY_LED_TX_BLINK_SET |
+ MTK_2P5GPHY_LED_RX_BLINK_SET);
+
+ /* Switch pinctrl after setting polarity to avoid bogus blinking */
+ pinctrl = devm_pinctrl_get_select(&phydev->mdio.dev, "i2p5gbe-led");
+ if (IS_ERR(pinctrl))
+ dev_err(&phydev->mdio.dev, "Fail to set LED pins!\n");
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(struct mtk_socphy_priv),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ phydev->priv = priv;
+
+ mtk_phy_leds_state_init(phydev);
+
+ return 0;
+}
+
+static struct phy_driver mtk_2p5gephy_driver[] = {
+ {
+ PHY_ID_MATCH_MODEL(MTK_2P5GPHY_ID_MT7988),
+ .name = "MediaTek MT7988 2.5GbE PHY",
+ .probe = mt798x_2p5ge_phy_probe,
+ .config_init = mt798x_2p5ge_phy_config_init,
+ .config_aneg = mt798x_2p5ge_phy_config_aneg,
+ .get_features = mt798x_2p5ge_phy_get_features,
+ .read_status = mt798x_2p5ge_phy_read_status,
+ .get_rate_matching = mt798x_2p5ge_phy_get_rate_matching,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .read_page = mtk_phy_read_page,
+ .write_page = mtk_phy_write_page,
+ .led_blink_set = mt798x_2p5ge_phy_led_blink_set,
+ .led_brightness_set = mt798x_2p5ge_phy_led_brightness_set,
+ .led_hw_is_supported = mt798x_2p5ge_phy_led_hw_is_supported,
+ .led_hw_control_get = mt798x_2p5ge_phy_led_hw_control_get,
+ .led_hw_control_set = mt798x_2p5ge_phy_led_hw_control_set,
+ },
+};
+
+module_phy_driver(mtk_2p5gephy_driver);
+
+static struct mdio_device_id __maybe_unused mtk_2p5ge_phy_tbl[] = {
+ { PHY_ID_MATCH_VENDOR(0x00339c00) },
+ { }
+};
+
+MODULE_DESCRIPTION("MediaTek 2.5Gb Ethernet PHY driver");
+MODULE_AUTHOR("SkyLake Huang <SkyLake.Huang@mediatek.com>");
+MODULE_LICENSE("GPL");
+
+MODULE_DEVICE_TABLE(mdio, mtk_2p5ge_phy_tbl);
+MODULE_FIRMWARE(MT7988_2P5GE_PMB_FW);
diff --git a/drivers/net/phy/mediatek-ge-soc.c b/drivers/net/phy/mediatek/mtk-ge-soc.c
index a931832b1418..cd09fbf92ef2 100644
--- a/drivers/net/phy/mediatek-ge-soc.c
+++ b/drivers/net/phy/mediatek/mtk-ge-soc.c
@@ -7,9 +7,17 @@
#include <linux/pinctrl/consumer.h>
#include <linux/phy.h>
#include <linux/regmap.h>
+#include <linux/of.h>
+
+#include "../phylib.h"
+#include "mtk.h"
+
+#define MTK_PHY_MAX_LEDS 2
#define MTK_GPHY_ID_MT7981 0x03a29461
#define MTK_GPHY_ID_MT7988 0x03a29481
+#define MTK_GPHY_ID_AN7581 0x03a294c1
+#define MTK_GPHY_ID_AN7583 0xc0ff0420
#define MTK_EXT_PAGE_ACCESS 0x1f
#define MTK_PHY_PAGE_STANDARD 0x0000
@@ -22,7 +30,107 @@
#define MTK_PHY_SMI_DET_ON_THRESH_MASK GENMASK(13, 8)
#define MTK_PHY_PAGE_EXTENDED_2A30 0x2a30
-#define MTK_PHY_PAGE_EXTENDED_52B5 0x52b5
+
+/* Registers on Token Ring debug nodes */
+/* ch_addr = 0x0, node_addr = 0x7, data_addr = 0x15 */
+/* NormMseLoThresh */
+#define NORMAL_MSE_LO_THRESH_MASK GENMASK(15, 8)
+
+/* ch_addr = 0x0, node_addr = 0xf, data_addr = 0x3c */
+/* RemAckCntLimitCtrl */
+#define REMOTE_ACK_COUNT_LIMIT_CTRL_MASK GENMASK(2, 1)
+
+/* ch_addr = 0x1, node_addr = 0xd, data_addr = 0x20 */
+/* VcoSlicerThreshBitsHigh */
+#define VCO_SLICER_THRESH_HIGH_MASK GENMASK(23, 0)
+
+/* ch_addr = 0x1, node_addr = 0xf, data_addr = 0x0 */
+/* DfeTailEnableVgaThresh1000 */
+#define DFE_TAIL_EANBLE_VGA_TRHESH_1000 GENMASK(5, 1)
+
+/* ch_addr = 0x1, node_addr = 0xf, data_addr = 0x1 */
+/* MrvlTrFix100Kp */
+#define MRVL_TR_FIX_100KP_MASK GENMASK(22, 20)
+/* MrvlTrFix100Kf */
+#define MRVL_TR_FIX_100KF_MASK GENMASK(19, 17)
+/* MrvlTrFix1000Kp */
+#define MRVL_TR_FIX_1000KP_MASK GENMASK(16, 14)
+/* MrvlTrFix1000Kf */
+#define MRVL_TR_FIX_1000KF_MASK GENMASK(13, 11)
+
+/* ch_addr = 0x1, node_addr = 0xf, data_addr = 0x12 */
+/* VgaDecRate */
+#define VGA_DECIMATION_RATE_MASK GENMASK(8, 5)
+
+/* ch_addr = 0x1, node_addr = 0xf, data_addr = 0x17 */
+/* SlvDSPreadyTime */
+#define SLAVE_DSP_READY_TIME_MASK GENMASK(22, 15)
+/* MasDSPreadyTime */
+#define MASTER_DSP_READY_TIME_MASK GENMASK(14, 7)
+
+/* ch_addr = 0x1, node_addr = 0xf, data_addr = 0x18 */
+/* EnabRandUpdTrig */
+#define ENABLE_RANDOM_UPDOWN_COUNTER_TRIGGER BIT(8)
+
+/* ch_addr = 0x1, node_addr = 0xf, data_addr = 0x20 */
+/* ResetSyncOffset */
+#define RESET_SYNC_OFFSET_MASK GENMASK(11, 8)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x0 */
+/* FfeUpdGainForceVal */
+#define FFE_UPDATE_GAIN_FORCE_VAL_MASK GENMASK(9, 7)
+/* FfeUpdGainForce */
+#define FFE_UPDATE_GAIN_FORCE BIT(6)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x3 */
+/* TrFreeze */
+#define TR_FREEZE_MASK GENMASK(11, 0)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x6 */
+/* SS: Steady-state, KP: Proportional Gain */
+/* SSTrKp100 */
+#define SS_TR_KP100_MASK GENMASK(21, 19)
+/* SSTrKf100 */
+#define SS_TR_KF100_MASK GENMASK(18, 16)
+/* SSTrKp1000Mas */
+#define SS_TR_KP1000_MASTER_MASK GENMASK(15, 13)
+/* SSTrKf1000Mas */
+#define SS_TR_KF1000_MASTER_MASK GENMASK(12, 10)
+/* SSTrKp1000Slv */
+#define SS_TR_KP1000_SLAVE_MASK GENMASK(9, 7)
+/* SSTrKf1000Slv */
+#define SS_TR_KF1000_SLAVE_MASK GENMASK(6, 4)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x8 */
+/* clear this bit if wanna select from AFE */
+/* Regsigdet_sel_1000 */
+#define EEE1000_SELECT_SIGNAL_DETECTION_FROM_DFE BIT(4)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0xd */
+/* RegEEE_st2TrKf1000 */
+#define EEE1000_STAGE2_TR_KF_MASK GENMASK(13, 11)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0xf */
+/* RegEEE_slv_waketr_timer_tar */
+#define SLAVE_WAKETR_TIMER_MASK GENMASK(20, 11)
+/* RegEEE_slv_remtx_timer_tar */
+#define SLAVE_REMTX_TIMER_MASK GENMASK(10, 1)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x10 */
+/* RegEEE_slv_wake_int_timer_tar */
+#define SLAVE_WAKEINT_TIMER_MASK GENMASK(10, 1)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x14 */
+/* RegEEE_trfreeze_timer2 */
+#define TR_FREEZE_TIMER2_MASK GENMASK(9, 0)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x1c */
+/* RegEEE100Stg1_tar */
+#define EEE100_LPSYNC_STAGE1_UPDATE_TIMER_MASK GENMASK(8, 0)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x25 */
+/* REGEEE_wake_slv_tr_wait_dfesigdet_en */
+#define WAKE_SLAVE_TR_WAIT_DFE_DETECTION_EN BIT(11)
#define ANALOG_INTERNAL_OPERATION_MAX_US 20
#define TXRESERVE_MIN 0
@@ -210,41 +318,6 @@
#define MTK_PHY_DA_TX_R50_PAIR_D 0x540
/* Registers on MDIO_MMD_VEND2 */
-#define MTK_PHY_LED0_ON_CTRL 0x24
-#define MTK_PHY_LED1_ON_CTRL 0x26
-#define MTK_PHY_LED_ON_MASK GENMASK(6, 0)
-#define MTK_PHY_LED_ON_LINK1000 BIT(0)
-#define MTK_PHY_LED_ON_LINK100 BIT(1)
-#define MTK_PHY_LED_ON_LINK10 BIT(2)
-#define MTK_PHY_LED_ON_LINK (MTK_PHY_LED_ON_LINK10 |\
- MTK_PHY_LED_ON_LINK100 |\
- MTK_PHY_LED_ON_LINK1000)
-#define MTK_PHY_LED_ON_LINKDOWN BIT(3)
-#define MTK_PHY_LED_ON_FDX BIT(4) /* Full duplex */
-#define MTK_PHY_LED_ON_HDX BIT(5) /* Half duplex */
-#define MTK_PHY_LED_ON_FORCE_ON BIT(6)
-#define MTK_PHY_LED_ON_POLARITY BIT(14)
-#define MTK_PHY_LED_ON_ENABLE BIT(15)
-
-#define MTK_PHY_LED0_BLINK_CTRL 0x25
-#define MTK_PHY_LED1_BLINK_CTRL 0x27
-#define MTK_PHY_LED_BLINK_1000TX BIT(0)
-#define MTK_PHY_LED_BLINK_1000RX BIT(1)
-#define MTK_PHY_LED_BLINK_100TX BIT(2)
-#define MTK_PHY_LED_BLINK_100RX BIT(3)
-#define MTK_PHY_LED_BLINK_10TX BIT(4)
-#define MTK_PHY_LED_BLINK_10RX BIT(5)
-#define MTK_PHY_LED_BLINK_RX (MTK_PHY_LED_BLINK_10RX |\
- MTK_PHY_LED_BLINK_100RX |\
- MTK_PHY_LED_BLINK_1000RX)
-#define MTK_PHY_LED_BLINK_TX (MTK_PHY_LED_BLINK_10TX |\
- MTK_PHY_LED_BLINK_100TX |\
- MTK_PHY_LED_BLINK_1000TX)
-#define MTK_PHY_LED_BLINK_COLLISION BIT(6)
-#define MTK_PHY_LED_BLINK_RX_CRC_ERR BIT(7)
-#define MTK_PHY_LED_BLINK_RX_IDLE_ERR BIT(8)
-#define MTK_PHY_LED_BLINK_FORCE_BLINK BIT(9)
-
#define MTK_PHY_LED1_DEFAULT_POLARITIES BIT(1)
#define MTK_PHY_RG_BG_RASEL 0x115
@@ -299,29 +372,11 @@ enum CAL_MODE {
SW_M
};
-#define MTK_PHY_LED_STATE_FORCE_ON 0
-#define MTK_PHY_LED_STATE_FORCE_BLINK 1
-#define MTK_PHY_LED_STATE_NETDEV 2
-
-struct mtk_socphy_priv {
- unsigned long led_state;
-};
-
struct mtk_socphy_shared {
u32 boottrap;
struct mtk_socphy_priv priv[4];
};
-static int mtk_socphy_read_page(struct phy_device *phydev)
-{
- return __phy_read(phydev, MTK_EXT_PAGE_ACCESS);
-}
-
-static int mtk_socphy_write_page(struct phy_device *phydev, int page)
-{
- return __phy_write(phydev, MTK_EXT_PAGE_ACCESS, page);
-}
-
/* One calibration cycle consists of:
* 1.Set DA_CALIN_FLAG high to start calibration. Keep it high
* until AD_CAL_COMP is ready to output calibration result.
@@ -752,40 +807,36 @@ restore:
static void mt798x_phy_common_finetune(struct phy_device *phydev)
{
phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
- /* SlvDSPreadyTime = 24, MasDSPreadyTime = 24 */
- __phy_write(phydev, 0x11, 0xc71);
- __phy_write(phydev, 0x12, 0xc);
- __phy_write(phydev, 0x10, 0x8fae);
-
- /* EnabRandUpdTrig = 1 */
- __phy_write(phydev, 0x11, 0x2f00);
- __phy_write(phydev, 0x12, 0xe);
- __phy_write(phydev, 0x10, 0x8fb0);
-
- /* NormMseLoThresh = 85 */
- __phy_write(phydev, 0x11, 0x55a0);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x83aa);
-
- /* FfeUpdGainForce = 1(Enable), FfeUpdGainForceVal = 4 */
- __phy_write(phydev, 0x11, 0x240);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x9680);
-
- /* TrFreeze = 0 (mt7988 default) */
- __phy_write(phydev, 0x11, 0x0);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x9686);
-
- /* SSTrKp100 = 5 */
- /* SSTrKf100 = 6 */
- /* SSTrKp1000Mas = 5 */
- /* SSTrKf1000Mas = 6 */
- /* SSTrKp1000Slv = 5 */
- /* SSTrKf1000Slv = 6 */
- __phy_write(phydev, 0x11, 0xbaef);
- __phy_write(phydev, 0x12, 0x2e);
- __phy_write(phydev, 0x10, 0x968c);
+ __mtk_tr_modify(phydev, 0x1, 0xf, 0x17,
+ SLAVE_DSP_READY_TIME_MASK | MASTER_DSP_READY_TIME_MASK,
+ FIELD_PREP(SLAVE_DSP_READY_TIME_MASK, 0x18) |
+ FIELD_PREP(MASTER_DSP_READY_TIME_MASK, 0x18));
+
+ __mtk_tr_set_bits(phydev, 0x1, 0xf, 0x18,
+ ENABLE_RANDOM_UPDOWN_COUNTER_TRIGGER);
+
+ __mtk_tr_modify(phydev, 0x0, 0x7, 0x15,
+ NORMAL_MSE_LO_THRESH_MASK,
+ FIELD_PREP(NORMAL_MSE_LO_THRESH_MASK, 0x55));
+
+ __mtk_tr_modify(phydev, 0x2, 0xd, 0x0,
+ FFE_UPDATE_GAIN_FORCE_VAL_MASK,
+ FIELD_PREP(FFE_UPDATE_GAIN_FORCE_VAL_MASK, 0x4) |
+ FFE_UPDATE_GAIN_FORCE);
+
+ __mtk_tr_clr_bits(phydev, 0x2, 0xd, 0x3, TR_FREEZE_MASK);
+
+ __mtk_tr_modify(phydev, 0x2, 0xd, 0x6,
+ SS_TR_KP100_MASK | SS_TR_KF100_MASK |
+ SS_TR_KP1000_MASTER_MASK | SS_TR_KF1000_MASTER_MASK |
+ SS_TR_KP1000_SLAVE_MASK | SS_TR_KF1000_SLAVE_MASK,
+ FIELD_PREP(SS_TR_KP100_MASK, 0x5) |
+ FIELD_PREP(SS_TR_KF100_MASK, 0x6) |
+ FIELD_PREP(SS_TR_KP1000_MASTER_MASK, 0x5) |
+ FIELD_PREP(SS_TR_KF1000_MASTER_MASK, 0x6) |
+ FIELD_PREP(SS_TR_KP1000_SLAVE_MASK, 0x5) |
+ FIELD_PREP(SS_TR_KF1000_SLAVE_MASK, 0x6));
+
phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
}
@@ -808,27 +859,29 @@ static void mt7981_phy_finetune(struct phy_device *phydev)
}
phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
- /* ResetSyncOffset = 6 */
- __phy_write(phydev, 0x11, 0x600);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x8fc0);
+ __mtk_tr_modify(phydev, 0x1, 0xf, 0x20,
+ RESET_SYNC_OFFSET_MASK,
+ FIELD_PREP(RESET_SYNC_OFFSET_MASK, 0x6));
- /* VgaDecRate = 1 */
- __phy_write(phydev, 0x11, 0x4c2a);
- __phy_write(phydev, 0x12, 0x3e);
- __phy_write(phydev, 0x10, 0x8fa4);
+ __mtk_tr_modify(phydev, 0x1, 0xf, 0x12,
+ VGA_DECIMATION_RATE_MASK,
+ FIELD_PREP(VGA_DECIMATION_RATE_MASK, 0x1));
/* MrvlTrFix100Kp = 3, MrvlTrFix100Kf = 2,
* MrvlTrFix1000Kp = 3, MrvlTrFix1000Kf = 2
*/
- __phy_write(phydev, 0x11, 0xd10a);
- __phy_write(phydev, 0x12, 0x34);
- __phy_write(phydev, 0x10, 0x8f82);
+ __mtk_tr_modify(phydev, 0x1, 0xf, 0x1,
+ MRVL_TR_FIX_100KP_MASK | MRVL_TR_FIX_100KF_MASK |
+ MRVL_TR_FIX_1000KP_MASK | MRVL_TR_FIX_1000KF_MASK,
+ FIELD_PREP(MRVL_TR_FIX_100KP_MASK, 0x3) |
+ FIELD_PREP(MRVL_TR_FIX_100KF_MASK, 0x2) |
+ FIELD_PREP(MRVL_TR_FIX_1000KP_MASK, 0x3) |
+ FIELD_PREP(MRVL_TR_FIX_1000KF_MASK, 0x2));
/* VcoSlicerThreshBitsHigh */
- __phy_write(phydev, 0x11, 0x5555);
- __phy_write(phydev, 0x12, 0x55);
- __phy_write(phydev, 0x10, 0x8ec0);
+ __mtk_tr_modify(phydev, 0x1, 0xd, 0x20,
+ VCO_SLICER_THRESH_HIGH_MASK,
+ FIELD_PREP(VCO_SLICER_THRESH_HIGH_MASK, 0x555555));
phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
/* TR_OPEN_LOOP_EN = 1, lpf_x_average = 9 */
@@ -880,25 +933,23 @@ static void mt7988_phy_finetune(struct phy_device *phydev)
phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_TX_FILTER, 0x5);
phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
- /* ResetSyncOffset = 5 */
- __phy_write(phydev, 0x11, 0x500);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x8fc0);
+ __mtk_tr_modify(phydev, 0x1, 0xf, 0x20,
+ RESET_SYNC_OFFSET_MASK,
+ FIELD_PREP(RESET_SYNC_OFFSET_MASK, 0x5));
/* VgaDecRate is 1 at default on mt7988 */
- /* MrvlTrFix100Kp = 6, MrvlTrFix100Kf = 7,
- * MrvlTrFix1000Kp = 6, MrvlTrFix1000Kf = 7
- */
- __phy_write(phydev, 0x11, 0xb90a);
- __phy_write(phydev, 0x12, 0x6f);
- __phy_write(phydev, 0x10, 0x8f82);
-
- /* RemAckCntLimitCtrl = 1 */
- __phy_write(phydev, 0x11, 0xfbba);
- __phy_write(phydev, 0x12, 0xc3);
- __phy_write(phydev, 0x10, 0x87f8);
-
+ __mtk_tr_modify(phydev, 0x1, 0xf, 0x1,
+ MRVL_TR_FIX_100KP_MASK | MRVL_TR_FIX_100KF_MASK |
+ MRVL_TR_FIX_1000KP_MASK | MRVL_TR_FIX_1000KF_MASK,
+ FIELD_PREP(MRVL_TR_FIX_100KP_MASK, 0x6) |
+ FIELD_PREP(MRVL_TR_FIX_100KF_MASK, 0x7) |
+ FIELD_PREP(MRVL_TR_FIX_1000KP_MASK, 0x6) |
+ FIELD_PREP(MRVL_TR_FIX_1000KF_MASK, 0x7));
+
+ __mtk_tr_modify(phydev, 0x0, 0xf, 0x3c,
+ REMOTE_ACK_COUNT_LIMIT_CTRL_MASK,
+ FIELD_PREP(REMOTE_ACK_COUNT_LIMIT_CTRL_MASK, 0x1));
phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
/* TR_OPEN_LOOP_EN = 1, lpf_x_average = 10 */
@@ -974,45 +1025,37 @@ static void mt798x_phy_eee(struct phy_device *phydev)
MTK_PHY_TR_READY_SKIP_AFE_WAKEUP);
phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
- /* Regsigdet_sel_1000 = 0 */
- __phy_write(phydev, 0x11, 0xb);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x9690);
-
- /* REG_EEE_st2TrKf1000 = 2 */
- __phy_write(phydev, 0x11, 0x114f);
- __phy_write(phydev, 0x12, 0x2);
- __phy_write(phydev, 0x10, 0x969a);
-
- /* RegEEE_slv_wake_tr_timer_tar = 6, RegEEE_slv_remtx_timer_tar = 20 */
- __phy_write(phydev, 0x11, 0x3028);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x969e);
-
- /* RegEEE_slv_wake_int_timer_tar = 8 */
- __phy_write(phydev, 0x11, 0x5010);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x96a0);
-
- /* RegEEE_trfreeze_timer2 = 586 */
- __phy_write(phydev, 0x11, 0x24a);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x96a8);
-
- /* RegEEE100Stg1_tar = 16 */
- __phy_write(phydev, 0x11, 0x3210);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x96b8);
-
- /* REGEEE_wake_slv_tr_wait_dfesigdet_en = 0 */
- __phy_write(phydev, 0x11, 0x1463);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x96ca);
-
- /* DfeTailEnableVgaThresh1000 = 27 */
- __phy_write(phydev, 0x11, 0x36);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x8f80);
+ __mtk_tr_clr_bits(phydev, 0x2, 0xd, 0x8,
+ EEE1000_SELECT_SIGNAL_DETECTION_FROM_DFE);
+
+ __mtk_tr_modify(phydev, 0x2, 0xd, 0xd,
+ EEE1000_STAGE2_TR_KF_MASK,
+ FIELD_PREP(EEE1000_STAGE2_TR_KF_MASK, 0x2));
+
+ __mtk_tr_modify(phydev, 0x2, 0xd, 0xf,
+ SLAVE_WAKETR_TIMER_MASK | SLAVE_REMTX_TIMER_MASK,
+ FIELD_PREP(SLAVE_WAKETR_TIMER_MASK, 0x6) |
+ FIELD_PREP(SLAVE_REMTX_TIMER_MASK, 0x14));
+
+ __mtk_tr_modify(phydev, 0x2, 0xd, 0x10,
+ SLAVE_WAKEINT_TIMER_MASK,
+ FIELD_PREP(SLAVE_WAKEINT_TIMER_MASK, 0x8));
+
+ __mtk_tr_modify(phydev, 0x2, 0xd, 0x14,
+ TR_FREEZE_TIMER2_MASK,
+ FIELD_PREP(TR_FREEZE_TIMER2_MASK, 0x24a));
+
+ __mtk_tr_modify(phydev, 0x2, 0xd, 0x1c,
+ EEE100_LPSYNC_STAGE1_UPDATE_TIMER_MASK,
+ FIELD_PREP(EEE100_LPSYNC_STAGE1_UPDATE_TIMER_MASK,
+ 0x10));
+
+ __mtk_tr_clr_bits(phydev, 0x2, 0xd, 0x25,
+ WAKE_SLAVE_TR_WAIT_DFE_DETECTION_EN);
+
+ __mtk_tr_modify(phydev, 0x1, 0xf, 0x0,
+ DFE_TAIL_EANBLE_VGA_TRHESH_1000,
+ FIELD_PREP(DFE_TAIL_EANBLE_VGA_TRHESH_1000, 0x1b));
phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_3);
@@ -1172,76 +1215,23 @@ static int mt798x_phy_config_init(struct phy_device *phydev)
return mt798x_phy_calibration(phydev);
}
-static int mt798x_phy_hw_led_on_set(struct phy_device *phydev, u8 index,
- bool on)
-{
- unsigned int bit_on = MTK_PHY_LED_STATE_FORCE_ON + (index ? 16 : 0);
- struct mtk_socphy_priv *priv = phydev->priv;
- bool changed;
-
- if (on)
- changed = !test_and_set_bit(bit_on, &priv->led_state);
- else
- changed = !!test_and_clear_bit(bit_on, &priv->led_state);
-
- changed |= !!test_and_clear_bit(MTK_PHY_LED_STATE_NETDEV +
- (index ? 16 : 0), &priv->led_state);
- if (changed)
- return phy_modify_mmd(phydev, MDIO_MMD_VEND2, index ?
- MTK_PHY_LED1_ON_CTRL :
- MTK_PHY_LED0_ON_CTRL,
- MTK_PHY_LED_ON_MASK,
- on ? MTK_PHY_LED_ON_FORCE_ON : 0);
- else
- return 0;
-}
-
-static int mt798x_phy_hw_led_blink_set(struct phy_device *phydev, u8 index,
- bool blinking)
-{
- unsigned int bit_blink = MTK_PHY_LED_STATE_FORCE_BLINK +
- (index ? 16 : 0);
- struct mtk_socphy_priv *priv = phydev->priv;
- bool changed;
-
- if (blinking)
- changed = !test_and_set_bit(bit_blink, &priv->led_state);
- else
- changed = !!test_and_clear_bit(bit_blink, &priv->led_state);
-
- changed |= !!test_bit(MTK_PHY_LED_STATE_NETDEV +
- (index ? 16 : 0), &priv->led_state);
- if (changed)
- return phy_write_mmd(phydev, MDIO_MMD_VEND2, index ?
- MTK_PHY_LED1_BLINK_CTRL :
- MTK_PHY_LED0_BLINK_CTRL,
- blinking ?
- MTK_PHY_LED_BLINK_FORCE_BLINK : 0);
- else
- return 0;
-}
-
static int mt798x_phy_led_blink_set(struct phy_device *phydev, u8 index,
unsigned long *delay_on,
unsigned long *delay_off)
{
bool blinking = false;
- int err = 0;
-
- if (index > 1)
- return -EINVAL;
+ int err;
- if (delay_on && delay_off && (*delay_on > 0) && (*delay_off > 0)) {
- blinking = true;
- *delay_on = 50;
- *delay_off = 50;
- }
+ err = mtk_phy_led_num_dly_cfg(index, delay_on, delay_off, &blinking);
+ if (err < 0)
+ return err;
- err = mt798x_phy_hw_led_blink_set(phydev, index, blinking);
+ err = mtk_phy_hw_led_blink_set(phydev, index, blinking);
if (err)
return err;
- return mt798x_phy_hw_led_on_set(phydev, index, false);
+ return mtk_phy_hw_led_on_set(phydev, index, MTK_GPHY_LED_ON_MASK,
+ false);
}
static int mt798x_phy_led_brightness_set(struct phy_device *phydev,
@@ -1249,11 +1239,12 @@ static int mt798x_phy_led_brightness_set(struct phy_device *phydev,
{
int err;
- err = mt798x_phy_hw_led_blink_set(phydev, index, false);
+ err = mtk_phy_hw_led_blink_set(phydev, index, false);
if (err)
return err;
- return mt798x_phy_hw_led_on_set(phydev, index, (value != LED_OFF));
+ return mtk_phy_hw_led_on_set(phydev, index, MTK_GPHY_LED_ON_MASK,
+ (value != LED_OFF));
}
static const unsigned long supported_triggers =
@@ -1269,160 +1260,31 @@ static const unsigned long supported_triggers =
static int mt798x_phy_led_hw_is_supported(struct phy_device *phydev, u8 index,
unsigned long rules)
{
- if (index > 1)
- return -EINVAL;
-
- /* All combinations of the supported triggers are allowed */
- if (rules & ~supported_triggers)
- return -EOPNOTSUPP;
-
- return 0;
-};
+ return mtk_phy_led_hw_is_supported(phydev, index, rules,
+ supported_triggers);
+}
static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
unsigned long *rules)
{
- unsigned int bit_blink = MTK_PHY_LED_STATE_FORCE_BLINK +
- (index ? 16 : 0);
- unsigned int bit_netdev = MTK_PHY_LED_STATE_NETDEV + (index ? 16 : 0);
- unsigned int bit_on = MTK_PHY_LED_STATE_FORCE_ON + (index ? 16 : 0);
- struct mtk_socphy_priv *priv = phydev->priv;
- int on, blink;
-
- if (index > 1)
- return -EINVAL;
-
- on = phy_read_mmd(phydev, MDIO_MMD_VEND2,
- index ? MTK_PHY_LED1_ON_CTRL : MTK_PHY_LED0_ON_CTRL);
-
- if (on < 0)
- return -EIO;
-
- blink = phy_read_mmd(phydev, MDIO_MMD_VEND2,
- index ? MTK_PHY_LED1_BLINK_CTRL :
- MTK_PHY_LED0_BLINK_CTRL);
- if (blink < 0)
- return -EIO;
-
- if ((on & (MTK_PHY_LED_ON_LINK | MTK_PHY_LED_ON_FDX |
- MTK_PHY_LED_ON_HDX | MTK_PHY_LED_ON_LINKDOWN)) ||
- (blink & (MTK_PHY_LED_BLINK_RX | MTK_PHY_LED_BLINK_TX)))
- set_bit(bit_netdev, &priv->led_state);
- else
- clear_bit(bit_netdev, &priv->led_state);
-
- if (on & MTK_PHY_LED_ON_FORCE_ON)
- set_bit(bit_on, &priv->led_state);
- else
- clear_bit(bit_on, &priv->led_state);
-
- if (blink & MTK_PHY_LED_BLINK_FORCE_BLINK)
- set_bit(bit_blink, &priv->led_state);
- else
- clear_bit(bit_blink, &priv->led_state);
-
- if (!rules)
- return 0;
-
- if (on & MTK_PHY_LED_ON_LINK)
- *rules |= BIT(TRIGGER_NETDEV_LINK);
-
- if (on & MTK_PHY_LED_ON_LINK10)
- *rules |= BIT(TRIGGER_NETDEV_LINK_10);
-
- if (on & MTK_PHY_LED_ON_LINK100)
- *rules |= BIT(TRIGGER_NETDEV_LINK_100);
-
- if (on & MTK_PHY_LED_ON_LINK1000)
- *rules |= BIT(TRIGGER_NETDEV_LINK_1000);
-
- if (on & MTK_PHY_LED_ON_FDX)
- *rules |= BIT(TRIGGER_NETDEV_FULL_DUPLEX);
-
- if (on & MTK_PHY_LED_ON_HDX)
- *rules |= BIT(TRIGGER_NETDEV_HALF_DUPLEX);
-
- if (blink & MTK_PHY_LED_BLINK_RX)
- *rules |= BIT(TRIGGER_NETDEV_RX);
-
- if (blink & MTK_PHY_LED_BLINK_TX)
- *rules |= BIT(TRIGGER_NETDEV_TX);
-
- return 0;
+ return mtk_phy_led_hw_ctrl_get(phydev, index, rules,
+ MTK_GPHY_LED_ON_SET,
+ MTK_GPHY_LED_RX_BLINK_SET,
+ MTK_GPHY_LED_TX_BLINK_SET);
};
static int mt798x_phy_led_hw_control_set(struct phy_device *phydev, u8 index,
unsigned long rules)
{
- unsigned int bit_netdev = MTK_PHY_LED_STATE_NETDEV + (index ? 16 : 0);
- struct mtk_socphy_priv *priv = phydev->priv;
- u16 on = 0, blink = 0;
- int ret;
-
- if (index > 1)
- return -EINVAL;
-
- if (rules & BIT(TRIGGER_NETDEV_FULL_DUPLEX))
- on |= MTK_PHY_LED_ON_FDX;
-
- if (rules & BIT(TRIGGER_NETDEV_HALF_DUPLEX))
- on |= MTK_PHY_LED_ON_HDX;
-
- if (rules & (BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_LINK)))
- on |= MTK_PHY_LED_ON_LINK10;
-
- if (rules & (BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK)))
- on |= MTK_PHY_LED_ON_LINK100;
-
- if (rules & (BIT(TRIGGER_NETDEV_LINK_1000) | BIT(TRIGGER_NETDEV_LINK)))
- on |= MTK_PHY_LED_ON_LINK1000;
-
- if (rules & BIT(TRIGGER_NETDEV_RX)) {
- blink |= (on & MTK_PHY_LED_ON_LINK) ?
- (((on & MTK_PHY_LED_ON_LINK10) ?
- MTK_PHY_LED_BLINK_10RX : 0) |
- ((on & MTK_PHY_LED_ON_LINK100) ?
- MTK_PHY_LED_BLINK_100RX : 0) |
- ((on & MTK_PHY_LED_ON_LINK1000) ?
- MTK_PHY_LED_BLINK_1000RX : 0)) :
- MTK_PHY_LED_BLINK_RX;
- }
-
- if (rules & BIT(TRIGGER_NETDEV_TX)) {
- blink |= (on & MTK_PHY_LED_ON_LINK) ?
- (((on & MTK_PHY_LED_ON_LINK10) ?
- MTK_PHY_LED_BLINK_10TX : 0) |
- ((on & MTK_PHY_LED_ON_LINK100) ?
- MTK_PHY_LED_BLINK_100TX : 0) |
- ((on & MTK_PHY_LED_ON_LINK1000) ?
- MTK_PHY_LED_BLINK_1000TX : 0)) :
- MTK_PHY_LED_BLINK_TX;
- }
-
- if (blink || on)
- set_bit(bit_netdev, &priv->led_state);
- else
- clear_bit(bit_netdev, &priv->led_state);
-
- ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, index ?
- MTK_PHY_LED1_ON_CTRL :
- MTK_PHY_LED0_ON_CTRL,
- MTK_PHY_LED_ON_FDX |
- MTK_PHY_LED_ON_HDX |
- MTK_PHY_LED_ON_LINK,
- on);
-
- if (ret)
- return ret;
-
- return phy_write_mmd(phydev, MDIO_MMD_VEND2, index ?
- MTK_PHY_LED1_BLINK_CTRL :
- MTK_PHY_LED0_BLINK_CTRL, blink);
+ return mtk_phy_led_hw_ctrl_set(phydev, index, rules,
+ MTK_GPHY_LED_ON_SET,
+ MTK_GPHY_LED_RX_BLINK_SET,
+ MTK_GPHY_LED_TX_BLINK_SET);
};
static bool mt7988_phy_led_get_polarity(struct phy_device *phydev, int led_num)
{
- struct mtk_socphy_shared *priv = phydev->shared->priv;
+ struct mtk_socphy_shared *priv = phy_package_get_priv(phydev);
u32 polarities;
if (led_num == 0)
@@ -1461,7 +1323,8 @@ static int mt7988_phy_fix_leds_polarities(struct phy_device *phydev)
static int mt7988_phy_probe_shared(struct phy_device *phydev)
{
struct device_node *np = dev_of_node(&phydev->mdio.bus->dev);
- struct mtk_socphy_shared *shared = phydev->shared->priv;
+ struct mtk_socphy_shared *shared = phy_package_get_priv(phydev);
+ struct device_node *pio_np;
struct regmap *regmap;
u32 reg;
int ret;
@@ -1479,7 +1342,13 @@ static int mt7988_phy_probe_shared(struct phy_device *phydev)
* The 4 bits in TPBANK0 are kept as package shared data and are used to
* set LED polarity for each of the LED0.
*/
- regmap = syscon_regmap_lookup_by_phandle(np, "mediatek,pio");
+ pio_np = of_parse_phandle(np, "mediatek,pio", 0);
+ if (!pio_np)
+ return -ENODEV;
+
+ regmap = device_node_to_regmap(pio_np);
+ of_node_put(pio_np);
+
if (IS_ERR(regmap))
return PTR_ERR(regmap);
@@ -1492,14 +1361,6 @@ static int mt7988_phy_probe_shared(struct phy_device *phydev)
return 0;
}
-static void mt798x_phy_leds_state_init(struct phy_device *phydev)
-{
- int i;
-
- for (i = 0; i < 2; ++i)
- mt798x_phy_led_hw_control_get(phydev, i, NULL);
-}
-
static int mt7988_phy_probe(struct phy_device *phydev)
{
struct mtk_socphy_shared *shared;
@@ -1520,12 +1381,12 @@ static int mt7988_phy_probe(struct phy_device *phydev)
return err;
}
- shared = phydev->shared->priv;
+ shared = phy_package_get_priv(phydev);
priv = &shared->priv[phydev->mdio.addr];
phydev->priv = priv;
- mt798x_phy_leds_state_init(phydev);
+ mtk_phy_leds_state_init(phydev);
err = mt7988_phy_fix_leds_polarities(phydev);
if (err)
@@ -1552,11 +1413,63 @@ static int mt7981_phy_probe(struct phy_device *phydev)
phydev->priv = priv;
- mt798x_phy_leds_state_init(phydev);
+ mtk_phy_leds_state_init(phydev);
return mt798x_phy_calibration(phydev);
}
+static int an7581_phy_probe(struct phy_device *phydev)
+{
+ struct mtk_socphy_priv *priv;
+ struct pinctrl *pinctrl;
+
+ /* Toggle pinctrl to enable PHY LED */
+ pinctrl = devm_pinctrl_get_select(&phydev->mdio.dev, "gbe-led");
+ if (IS_ERR(pinctrl))
+ dev_err(&phydev->mdio.bus->dev,
+ "Failed to setup PHY LED pinctrl\n");
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
+static int an7581_phy_led_polarity_set(struct phy_device *phydev, int index,
+ unsigned long modes)
+{
+ u16 val = 0;
+ u32 mode;
+
+ if (index >= MTK_PHY_MAX_LEDS)
+ return -EINVAL;
+
+ for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) {
+ switch (mode) {
+ case PHY_LED_ACTIVE_LOW:
+ val = MTK_PHY_LED_ON_POLARITY;
+ break;
+ case PHY_LED_ACTIVE_HIGH:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2, index ?
+ MTK_PHY_LED1_ON_CTRL : MTK_PHY_LED0_ON_CTRL,
+ MTK_PHY_LED_ON_POLARITY, val);
+}
+
+static int an7583_phy_config_init(struct phy_device *phydev)
+{
+ /* BMCR_PDOWN is enabled by default */
+ return phy_clear_bits(phydev, MII_BMCR, BMCR_PDOWN);
+}
+
static struct phy_driver mtk_socphy_driver[] = {
{
PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7981),
@@ -1567,8 +1480,8 @@ static struct phy_driver mtk_socphy_driver[] = {
.probe = mt7981_phy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
- .read_page = mtk_socphy_read_page,
- .write_page = mtk_socphy_write_page,
+ .read_page = mtk_phy_read_page,
+ .write_page = mtk_phy_write_page,
.led_blink_set = mt798x_phy_led_blink_set,
.led_brightness_set = mt798x_phy_led_brightness_set,
.led_hw_is_supported = mt798x_phy_led_hw_is_supported,
@@ -1584,21 +1497,46 @@ static struct phy_driver mtk_socphy_driver[] = {
.probe = mt7988_phy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
- .read_page = mtk_socphy_read_page,
- .write_page = mtk_socphy_write_page,
+ .read_page = mtk_phy_read_page,
+ .write_page = mtk_phy_write_page,
+ .led_blink_set = mt798x_phy_led_blink_set,
+ .led_brightness_set = mt798x_phy_led_brightness_set,
+ .led_hw_is_supported = mt798x_phy_led_hw_is_supported,
+ .led_hw_control_set = mt798x_phy_led_hw_control_set,
+ .led_hw_control_get = mt798x_phy_led_hw_control_get,
+ },
+ {
+ PHY_ID_MATCH_EXACT(MTK_GPHY_ID_AN7581),
+ .name = "Airoha AN7581 PHY",
+ .probe = an7581_phy_probe,
+ .led_blink_set = mt798x_phy_led_blink_set,
+ .led_brightness_set = mt798x_phy_led_brightness_set,
+ .led_hw_is_supported = mt798x_phy_led_hw_is_supported,
+ .led_hw_control_set = mt798x_phy_led_hw_control_set,
+ .led_hw_control_get = mt798x_phy_led_hw_control_get,
+ .led_polarity_set = an7581_phy_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(MTK_GPHY_ID_AN7583),
+ .name = "Airoha AN7583 PHY",
+ .config_init = an7583_phy_config_init,
+ .probe = an7581_phy_probe,
.led_blink_set = mt798x_phy_led_blink_set,
.led_brightness_set = mt798x_phy_led_brightness_set,
.led_hw_is_supported = mt798x_phy_led_hw_is_supported,
.led_hw_control_set = mt798x_phy_led_hw_control_set,
.led_hw_control_get = mt798x_phy_led_hw_control_get,
+ .led_polarity_set = an7581_phy_led_polarity_set,
},
};
module_phy_driver(mtk_socphy_driver);
-static struct mdio_device_id __maybe_unused mtk_socphy_tbl[] = {
+static const struct mdio_device_id __maybe_unused mtk_socphy_tbl[] = {
{ PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7981) },
{ PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7988) },
+ { PHY_ID_MATCH_EXACT(MTK_GPHY_ID_AN7581) },
+ { PHY_ID_MATCH_EXACT(MTK_GPHY_ID_AN7583) },
{ }
};
diff --git a/drivers/net/phy/mediatek/mtk-ge.c b/drivers/net/phy/mediatek/mtk-ge.c
new file mode 100644
index 000000000000..73d9b72f9d9e
--- /dev/null
+++ b/drivers/net/phy/mediatek/mtk-ge.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+
+#include "mtk.h"
+
+#define MTK_GPHY_ID_MT7530 0x03a29412
+#define MTK_GPHY_ID_MT7531 0x03a29441
+
+#define MTK_PHY_PAGE_EXTENDED_2 0x0002
+#define MTK_PHY_PAGE_EXTENDED_3 0x0003
+#define MTK_PHY_RG_LPI_PCS_DSP_CTRL_REG11 0x11
+
+#define MTK_PHY_PAGE_EXTENDED_2A30 0x2a30
+
+/* Registers on Token Ring debug nodes */
+/* ch_addr = 0x1, node_addr = 0xf, data_addr = 0x17 */
+#define SLAVE_DSP_READY_TIME_MASK GENMASK(22, 15)
+
+/* Registers on MDIO_MMD_VEND1 */
+#define MTK_PHY_GBE_MODE_TX_DELAY_SEL 0x13
+#define MTK_PHY_TEST_MODE_TX_DELAY_SEL 0x14
+#define MTK_TX_DELAY_PAIR_B_MASK GENMASK(10, 8)
+#define MTK_TX_DELAY_PAIR_D_MASK GENMASK(2, 0)
+
+#define MTK_PHY_MCC_CTRL_AND_TX_POWER_CTRL 0xa6
+#define MTK_MCC_NEARECHO_OFFSET_MASK GENMASK(15, 8)
+
+#define MTK_PHY_RXADC_CTRL_RG7 0xc6
+#define MTK_PHY_DA_AD_BUF_BIAS_LP_MASK GENMASK(9, 8)
+
+#define MTK_PHY_RG_LPI_PCS_DSP_CTRL_REG123 0x123
+#define MTK_PHY_LPI_NORM_MSE_LO_THRESH100_MASK GENMASK(15, 8)
+#define MTK_PHY_LPI_NORM_MSE_HI_THRESH100_MASK GENMASK(7, 0)
+
+static void mtk_gephy_config_init(struct phy_device *phydev)
+{
+ /* Enable HW auto downshift */
+ phy_modify_paged(phydev, MTK_PHY_PAGE_EXTENDED_1,
+ MTK_PHY_AUX_CTRL_AND_STATUS,
+ 0, MTK_PHY_ENABLE_DOWNSHIFT);
+
+ /* Increase SlvDPSready time */
+ mtk_tr_modify(phydev, 0x1, 0xf, 0x17, SLAVE_DSP_READY_TIME_MASK,
+ FIELD_PREP(SLAVE_DSP_READY_TIME_MASK, 0x5e));
+
+ /* Adjust 100_mse_threshold */
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ MTK_PHY_RG_LPI_PCS_DSP_CTRL_REG123,
+ MTK_PHY_LPI_NORM_MSE_LO_THRESH100_MASK |
+ MTK_PHY_LPI_NORM_MSE_HI_THRESH100_MASK,
+ FIELD_PREP(MTK_PHY_LPI_NORM_MSE_LO_THRESH100_MASK,
+ 0xff) |
+ FIELD_PREP(MTK_PHY_LPI_NORM_MSE_HI_THRESH100_MASK,
+ 0xff));
+
+ /* If echo time is narrower than 0x3, it will be regarded as noise */
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ MTK_PHY_MCC_CTRL_AND_TX_POWER_CTRL,
+ MTK_MCC_NEARECHO_OFFSET_MASK,
+ FIELD_PREP(MTK_MCC_NEARECHO_OFFSET_MASK, 0x3));
+}
+
+static int mt7530_phy_config_init(struct phy_device *phydev)
+{
+ mtk_gephy_config_init(phydev);
+
+ /* Increase post_update_timer */
+ phy_write_paged(phydev, MTK_PHY_PAGE_EXTENDED_3,
+ MTK_PHY_RG_LPI_PCS_DSP_CTRL_REG11, 0x4b);
+
+ return 0;
+}
+
+static int mt7531_phy_config_init(struct phy_device *phydev)
+{
+ mtk_gephy_config_init(phydev);
+
+ /* PHY link down power saving enable */
+ phy_set_bits(phydev, 0x17, BIT(4));
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RXADC_CTRL_RG7,
+ MTK_PHY_DA_AD_BUF_BIAS_LP_MASK,
+ FIELD_PREP(MTK_PHY_DA_AD_BUF_BIAS_LP_MASK, 0x3));
+
+ /* Set TX Pair delay selection */
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_GBE_MODE_TX_DELAY_SEL,
+ MTK_TX_DELAY_PAIR_B_MASK | MTK_TX_DELAY_PAIR_D_MASK,
+ FIELD_PREP(MTK_TX_DELAY_PAIR_B_MASK, 0x4) |
+ FIELD_PREP(MTK_TX_DELAY_PAIR_D_MASK, 0x4));
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_TEST_MODE_TX_DELAY_SEL,
+ MTK_TX_DELAY_PAIR_B_MASK | MTK_TX_DELAY_PAIR_D_MASK,
+ FIELD_PREP(MTK_TX_DELAY_PAIR_B_MASK, 0x4) |
+ FIELD_PREP(MTK_TX_DELAY_PAIR_D_MASK, 0x4));
+
+ return 0;
+}
+
+static struct phy_driver mtk_gephy_driver[] = {
+ {
+ PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7530),
+ .name = "MediaTek MT7530 PHY",
+ .config_init = mt7530_phy_config_init,
+ /* Interrupts are handled by the switch, not the PHY
+ * itself.
+ */
+ .config_intr = genphy_no_config_intr,
+ .handle_interrupt = genphy_handle_interrupt_no_ack,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .read_page = mtk_phy_read_page,
+ .write_page = mtk_phy_write_page,
+ },
+ {
+ PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7531),
+ .name = "MediaTek MT7531 PHY",
+ .config_init = mt7531_phy_config_init,
+ /* Interrupts are handled by the switch, not the PHY
+ * itself.
+ */
+ .config_intr = genphy_no_config_intr,
+ .handle_interrupt = genphy_handle_interrupt_no_ack,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .read_page = mtk_phy_read_page,
+ .write_page = mtk_phy_write_page,
+ },
+};
+
+module_phy_driver(mtk_gephy_driver);
+
+static const struct mdio_device_id __maybe_unused mtk_gephy_tbl[] = {
+ { PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7530) },
+ { PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7531) },
+ { }
+};
+
+MODULE_DESCRIPTION("MediaTek Gigabit Ethernet PHY driver");
+MODULE_AUTHOR("DENG, Qingfang <dqfext@gmail.com>");
+MODULE_LICENSE("GPL");
+
+MODULE_DEVICE_TABLE(mdio, mtk_gephy_tbl);
diff --git a/drivers/net/phy/mediatek/mtk-phy-lib.c b/drivers/net/phy/mediatek/mtk-phy-lib.c
new file mode 100644
index 000000000000..dfd0f4e439a2
--- /dev/null
+++ b/drivers/net/phy/mediatek/mtk-phy-lib.c
@@ -0,0 +1,347 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/phy.h>
+#include <linux/module.h>
+
+#include <linux/netdevice.h>
+
+#include "mtk.h"
+
+/* Difference between functions with mtk_tr* and __mtk_tr* prefixes is
+ * mtk_tr* functions: wrapped by page switching operations
+ * __mtk_tr* functions: no page switching operations
+ */
+
+static void __mtk_tr_access(struct phy_device *phydev, bool read, u8 ch_addr,
+ u8 node_addr, u8 data_addr)
+{
+ u16 tr_cmd = BIT(15); /* bit 14 & 0 are reserved */
+
+ if (read)
+ tr_cmd |= BIT(13);
+
+ tr_cmd |= (((ch_addr & 0x3) << 11) |
+ ((node_addr & 0xf) << 7) |
+ ((data_addr & 0x3f) << 1));
+ dev_dbg(&phydev->mdio.dev, "tr_cmd: 0x%x\n", tr_cmd);
+ __phy_write(phydev, 0x10, tr_cmd);
+}
+
+static void __mtk_tr_read(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u16 *tr_high, u16 *tr_low)
+{
+ __mtk_tr_access(phydev, true, ch_addr, node_addr, data_addr);
+ *tr_low = __phy_read(phydev, 0x11);
+ *tr_high = __phy_read(phydev, 0x12);
+ dev_dbg(&phydev->mdio.dev, "tr_high read: 0x%x, tr_low read: 0x%x\n",
+ *tr_high, *tr_low);
+}
+
+static void __mtk_tr_write(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u32 tr_data)
+{
+ __phy_write(phydev, 0x11, tr_data & 0xffff);
+ __phy_write(phydev, 0x12, tr_data >> 16);
+ dev_dbg(&phydev->mdio.dev, "tr_high write: 0x%x, tr_low write: 0x%x\n",
+ tr_data >> 16, tr_data & 0xffff);
+ __mtk_tr_access(phydev, false, ch_addr, node_addr, data_addr);
+}
+
+void __mtk_tr_modify(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u32 mask, u32 set)
+{
+ u32 tr_data;
+ u16 tr_high;
+ u16 tr_low;
+
+ __mtk_tr_read(phydev, ch_addr, node_addr, data_addr, &tr_high, &tr_low);
+ tr_data = (tr_high << 16) | tr_low;
+ tr_data = (tr_data & ~mask) | set;
+ __mtk_tr_write(phydev, ch_addr, node_addr, data_addr, tr_data);
+}
+EXPORT_SYMBOL_GPL(__mtk_tr_modify);
+
+void mtk_tr_modify(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u32 mask, u32 set)
+{
+ phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
+ __mtk_tr_modify(phydev, ch_addr, node_addr, data_addr, mask, set);
+ phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
+}
+EXPORT_SYMBOL_GPL(mtk_tr_modify);
+
+void __mtk_tr_set_bits(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u32 set)
+{
+ __mtk_tr_modify(phydev, ch_addr, node_addr, data_addr, 0, set);
+}
+EXPORT_SYMBOL_GPL(__mtk_tr_set_bits);
+
+void __mtk_tr_clr_bits(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u32 clr)
+{
+ __mtk_tr_modify(phydev, ch_addr, node_addr, data_addr, clr, 0);
+}
+EXPORT_SYMBOL_GPL(__mtk_tr_clr_bits);
+
+int mtk_phy_read_page(struct phy_device *phydev)
+{
+ return __phy_read(phydev, MTK_EXT_PAGE_ACCESS);
+}
+EXPORT_SYMBOL_GPL(mtk_phy_read_page);
+
+int mtk_phy_write_page(struct phy_device *phydev, int page)
+{
+ return __phy_write(phydev, MTK_EXT_PAGE_ACCESS, page);
+}
+EXPORT_SYMBOL_GPL(mtk_phy_write_page);
+
+int mtk_phy_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules,
+ unsigned long supported_triggers)
+{
+ if (index > 1)
+ return -EINVAL;
+
+ /* All combinations of the supported triggers are allowed */
+ if (rules & ~supported_triggers)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_phy_led_hw_is_supported);
+
+int mtk_phy_led_hw_ctrl_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules, u16 on_set,
+ u16 rx_blink_set, u16 tx_blink_set)
+{
+ unsigned int bit_blink = MTK_PHY_LED_STATE_FORCE_BLINK +
+ (index ? 16 : 0);
+ unsigned int bit_netdev = MTK_PHY_LED_STATE_NETDEV + (index ? 16 : 0);
+ unsigned int bit_on = MTK_PHY_LED_STATE_FORCE_ON + (index ? 16 : 0);
+ struct mtk_socphy_priv *priv = phydev->priv;
+ int on, blink;
+
+ if (index > 1)
+ return -EINVAL;
+
+ on = phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ index ? MTK_PHY_LED1_ON_CTRL : MTK_PHY_LED0_ON_CTRL);
+
+ if (on < 0)
+ return -EIO;
+
+ blink = phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ index ? MTK_PHY_LED1_BLINK_CTRL :
+ MTK_PHY_LED0_BLINK_CTRL);
+ if (blink < 0)
+ return -EIO;
+
+ if ((on & (on_set | MTK_PHY_LED_ON_FDX |
+ MTK_PHY_LED_ON_HDX | MTK_PHY_LED_ON_LINKDOWN)) ||
+ (blink & (rx_blink_set | tx_blink_set)))
+ set_bit(bit_netdev, &priv->led_state);
+ else
+ clear_bit(bit_netdev, &priv->led_state);
+
+ if (on & MTK_PHY_LED_ON_FORCE_ON)
+ set_bit(bit_on, &priv->led_state);
+ else
+ clear_bit(bit_on, &priv->led_state);
+
+ if (blink & MTK_PHY_LED_BLINK_FORCE_BLINK)
+ set_bit(bit_blink, &priv->led_state);
+ else
+ clear_bit(bit_blink, &priv->led_state);
+
+ if (!rules)
+ return 0;
+
+ if (on & on_set)
+ *rules |= BIT(TRIGGER_NETDEV_LINK);
+
+ if (on & MTK_PHY_LED_ON_LINK10)
+ *rules |= BIT(TRIGGER_NETDEV_LINK_10);
+
+ if (on & MTK_PHY_LED_ON_LINK100)
+ *rules |= BIT(TRIGGER_NETDEV_LINK_100);
+
+ if (on & MTK_PHY_LED_ON_LINK1000)
+ *rules |= BIT(TRIGGER_NETDEV_LINK_1000);
+
+ if (on & MTK_PHY_LED_ON_LINK2500)
+ *rules |= BIT(TRIGGER_NETDEV_LINK_2500);
+
+ if (on & MTK_PHY_LED_ON_FDX)
+ *rules |= BIT(TRIGGER_NETDEV_FULL_DUPLEX);
+
+ if (on & MTK_PHY_LED_ON_HDX)
+ *rules |= BIT(TRIGGER_NETDEV_HALF_DUPLEX);
+
+ if (blink & rx_blink_set)
+ *rules |= BIT(TRIGGER_NETDEV_RX);
+
+ if (blink & tx_blink_set)
+ *rules |= BIT(TRIGGER_NETDEV_TX);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_phy_led_hw_ctrl_get);
+
+int mtk_phy_led_hw_ctrl_set(struct phy_device *phydev, u8 index,
+ unsigned long rules, u16 on_set,
+ u16 rx_blink_set, u16 tx_blink_set)
+{
+ unsigned int bit_netdev = MTK_PHY_LED_STATE_NETDEV + (index ? 16 : 0);
+ struct mtk_socphy_priv *priv = phydev->priv;
+ u16 on = 0, blink = 0;
+ int ret;
+
+ if (index > 1)
+ return -EINVAL;
+
+ if (rules & BIT(TRIGGER_NETDEV_FULL_DUPLEX))
+ on |= MTK_PHY_LED_ON_FDX;
+
+ if (rules & BIT(TRIGGER_NETDEV_HALF_DUPLEX))
+ on |= MTK_PHY_LED_ON_HDX;
+
+ if (rules & (BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_LINK)))
+ on |= MTK_PHY_LED_ON_LINK10;
+
+ if (rules & (BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK)))
+ on |= MTK_PHY_LED_ON_LINK100;
+
+ if (rules & (BIT(TRIGGER_NETDEV_LINK_1000) | BIT(TRIGGER_NETDEV_LINK)))
+ on |= MTK_PHY_LED_ON_LINK1000;
+
+ if (rules & (BIT(TRIGGER_NETDEV_LINK_2500) | BIT(TRIGGER_NETDEV_LINK)))
+ on |= MTK_PHY_LED_ON_LINK2500;
+
+ if (rules & BIT(TRIGGER_NETDEV_RX)) {
+ if (on & on_set) {
+ if (on & MTK_PHY_LED_ON_LINK10)
+ blink |= MTK_PHY_LED_BLINK_10RX;
+ if (on & MTK_PHY_LED_ON_LINK100)
+ blink |= MTK_PHY_LED_BLINK_100RX;
+ if (on & MTK_PHY_LED_ON_LINK1000)
+ blink |= MTK_PHY_LED_BLINK_1000RX;
+ if (on & MTK_PHY_LED_ON_LINK2500)
+ blink |= MTK_PHY_LED_BLINK_2500RX;
+ } else {
+ blink |= rx_blink_set;
+ }
+ }
+
+ if (rules & BIT(TRIGGER_NETDEV_TX)) {
+ if (on & on_set) {
+ if (on & MTK_PHY_LED_ON_LINK10)
+ blink |= MTK_PHY_LED_BLINK_10TX;
+ if (on & MTK_PHY_LED_ON_LINK100)
+ blink |= MTK_PHY_LED_BLINK_100TX;
+ if (on & MTK_PHY_LED_ON_LINK1000)
+ blink |= MTK_PHY_LED_BLINK_1000TX;
+ if (on & MTK_PHY_LED_ON_LINK2500)
+ blink |= MTK_PHY_LED_BLINK_2500TX;
+ } else {
+ blink |= tx_blink_set;
+ }
+ }
+
+ if (blink || on)
+ set_bit(bit_netdev, &priv->led_state);
+ else
+ clear_bit(bit_netdev, &priv->led_state);
+
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, index ?
+ MTK_PHY_LED1_ON_CTRL : MTK_PHY_LED0_ON_CTRL,
+ MTK_PHY_LED_ON_FDX | MTK_PHY_LED_ON_HDX | on_set,
+ on);
+
+ if (ret)
+ return ret;
+
+ return phy_write_mmd(phydev, MDIO_MMD_VEND2, index ?
+ MTK_PHY_LED1_BLINK_CTRL :
+ MTK_PHY_LED0_BLINK_CTRL, blink);
+}
+EXPORT_SYMBOL_GPL(mtk_phy_led_hw_ctrl_set);
+
+int mtk_phy_led_num_dly_cfg(u8 index, unsigned long *delay_on,
+ unsigned long *delay_off, bool *blinking)
+{
+ if (index > 1)
+ return -EINVAL;
+
+ if (delay_on && delay_off && (*delay_on > 0) && (*delay_off > 0)) {
+ *blinking = true;
+ *delay_on = 50;
+ *delay_off = 50;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_phy_led_num_dly_cfg);
+
+int mtk_phy_hw_led_on_set(struct phy_device *phydev, u8 index,
+ u16 led_on_mask, bool on)
+{
+ unsigned int bit_on = MTK_PHY_LED_STATE_FORCE_ON + (index ? 16 : 0);
+ struct mtk_socphy_priv *priv = phydev->priv;
+ bool changed;
+
+ if (on)
+ changed = !test_and_set_bit(bit_on, &priv->led_state);
+ else
+ changed = !!test_and_clear_bit(bit_on, &priv->led_state);
+
+ changed |= !!test_and_clear_bit(MTK_PHY_LED_STATE_NETDEV +
+ (index ? 16 : 0), &priv->led_state);
+ if (changed)
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2, index ?
+ MTK_PHY_LED1_ON_CTRL :
+ MTK_PHY_LED0_ON_CTRL,
+ led_on_mask,
+ on ? MTK_PHY_LED_ON_FORCE_ON : 0);
+ else
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_phy_hw_led_on_set);
+
+int mtk_phy_hw_led_blink_set(struct phy_device *phydev, u8 index, bool blinking)
+{
+ unsigned int bit_blink = MTK_PHY_LED_STATE_FORCE_BLINK +
+ (index ? 16 : 0);
+ struct mtk_socphy_priv *priv = phydev->priv;
+ bool changed;
+
+ if (blinking)
+ changed = !test_and_set_bit(bit_blink, &priv->led_state);
+ else
+ changed = !!test_and_clear_bit(bit_blink, &priv->led_state);
+
+ changed |= !!test_bit(MTK_PHY_LED_STATE_NETDEV +
+ (index ? 16 : 0), &priv->led_state);
+ if (changed)
+ return phy_write_mmd(phydev, MDIO_MMD_VEND2, index ?
+ MTK_PHY_LED1_BLINK_CTRL :
+ MTK_PHY_LED0_BLINK_CTRL,
+ blinking ?
+ MTK_PHY_LED_BLINK_FORCE_BLINK : 0);
+ else
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_phy_hw_led_blink_set);
+
+void mtk_phy_leds_state_init(struct phy_device *phydev)
+{
+ int i;
+
+ for (i = 0; i < 2; ++i)
+ phydev->drv->led_hw_control_get(phydev, i, NULL);
+}
+EXPORT_SYMBOL_GPL(mtk_phy_leds_state_init);
+
+MODULE_DESCRIPTION("MediaTek Ethernet PHY driver common");
+MODULE_AUTHOR("Sky Huang <SkyLake.Huang@mediatek.com>");
+MODULE_AUTHOR("Daniel Golle <daniel@makrotopia.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mediatek/mtk.h b/drivers/net/phy/mediatek/mtk.h
new file mode 100644
index 000000000000..320f76ffa81f
--- /dev/null
+++ b/drivers/net/phy/mediatek/mtk.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Common definition for Mediatek Ethernet PHYs
+ * Author: SkyLake Huang <SkyLake.Huang@mediatek.com>
+ * Copyright (c) 2024 MediaTek Inc.
+ */
+
+#ifndef _MTK_EPHY_H_
+#define _MTK_EPHY_H_
+
+#define MTK_PHY_AUX_CTRL_AND_STATUS 0x14
+#define MTK_PHY_ENABLE_DOWNSHIFT BIT(4)
+
+#define MTK_EXT_PAGE_ACCESS 0x1f
+#define MTK_PHY_PAGE_EXTENDED_1 0x0001
+#define MTK_PHY_PAGE_STANDARD 0x0000
+#define MTK_PHY_PAGE_EXTENDED_52B5 0x52b5
+
+/* Registers on MDIO_MMD_VEND2 */
+#define MTK_PHY_LED0_ON_CTRL 0x24
+#define MTK_PHY_LED1_ON_CTRL 0x26
+#define MTK_GPHY_LED_ON_MASK GENMASK(6, 0)
+#define MTK_2P5GPHY_LED_ON_MASK GENMASK(7, 0)
+#define MTK_PHY_LED_ON_LINK1000 BIT(0)
+#define MTK_PHY_LED_ON_LINK100 BIT(1)
+#define MTK_PHY_LED_ON_LINK10 BIT(2)
+#define MTK_PHY_LED_ON_LINKDOWN BIT(3)
+#define MTK_PHY_LED_ON_FDX BIT(4) /* Full duplex */
+#define MTK_PHY_LED_ON_HDX BIT(5) /* Half duplex */
+#define MTK_PHY_LED_ON_FORCE_ON BIT(6)
+#define MTK_PHY_LED_ON_LINK2500 BIT(7)
+#define MTK_PHY_LED_ON_POLARITY BIT(14)
+#define MTK_PHY_LED_ON_ENABLE BIT(15)
+
+#define MTK_PHY_LED0_BLINK_CTRL 0x25
+#define MTK_PHY_LED1_BLINK_CTRL 0x27
+#define MTK_PHY_LED_BLINK_1000TX BIT(0)
+#define MTK_PHY_LED_BLINK_1000RX BIT(1)
+#define MTK_PHY_LED_BLINK_100TX BIT(2)
+#define MTK_PHY_LED_BLINK_100RX BIT(3)
+#define MTK_PHY_LED_BLINK_10TX BIT(4)
+#define MTK_PHY_LED_BLINK_10RX BIT(5)
+#define MTK_PHY_LED_BLINK_COLLISION BIT(6)
+#define MTK_PHY_LED_BLINK_RX_CRC_ERR BIT(7)
+#define MTK_PHY_LED_BLINK_RX_IDLE_ERR BIT(8)
+#define MTK_PHY_LED_BLINK_FORCE_BLINK BIT(9)
+#define MTK_PHY_LED_BLINK_2500TX BIT(10)
+#define MTK_PHY_LED_BLINK_2500RX BIT(11)
+
+#define MTK_GPHY_LED_ON_SET (MTK_PHY_LED_ON_LINK1000 | \
+ MTK_PHY_LED_ON_LINK100 | \
+ MTK_PHY_LED_ON_LINK10)
+#define MTK_GPHY_LED_RX_BLINK_SET (MTK_PHY_LED_BLINK_1000RX | \
+ MTK_PHY_LED_BLINK_100RX | \
+ MTK_PHY_LED_BLINK_10RX)
+#define MTK_GPHY_LED_TX_BLINK_SET (MTK_PHY_LED_BLINK_1000RX | \
+ MTK_PHY_LED_BLINK_100RX | \
+ MTK_PHY_LED_BLINK_10RX)
+
+#define MTK_2P5GPHY_LED_ON_SET (MTK_PHY_LED_ON_LINK2500 | \
+ MTK_GPHY_LED_ON_SET)
+#define MTK_2P5GPHY_LED_RX_BLINK_SET (MTK_PHY_LED_BLINK_2500RX | \
+ MTK_GPHY_LED_RX_BLINK_SET)
+#define MTK_2P5GPHY_LED_TX_BLINK_SET (MTK_PHY_LED_BLINK_2500RX | \
+ MTK_GPHY_LED_TX_BLINK_SET)
+
+#define MTK_PHY_LED_STATE_FORCE_ON 0
+#define MTK_PHY_LED_STATE_FORCE_BLINK 1
+#define MTK_PHY_LED_STATE_NETDEV 2
+
+struct mtk_socphy_priv {
+ unsigned long led_state;
+};
+
+void __mtk_tr_modify(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u32 mask, u32 set);
+void mtk_tr_modify(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u32 mask, u32 set);
+void __mtk_tr_set_bits(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u32 set);
+void __mtk_tr_clr_bits(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u32 clr);
+
+int mtk_phy_read_page(struct phy_device *phydev);
+int mtk_phy_write_page(struct phy_device *phydev, int page);
+
+int mtk_phy_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules,
+ unsigned long supported_triggers);
+int mtk_phy_led_hw_ctrl_set(struct phy_device *phydev, u8 index,
+ unsigned long rules, u16 on_set,
+ u16 rx_blink_set, u16 tx_blink_set);
+int mtk_phy_led_hw_ctrl_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules, u16 on_set,
+ u16 rx_blink_set, u16 tx_blink_set);
+int mtk_phy_led_num_dly_cfg(u8 index, unsigned long *delay_on,
+ unsigned long *delay_off, bool *blinking);
+int mtk_phy_hw_led_on_set(struct phy_device *phydev, u8 index,
+ u16 led_on_mask, bool on);
+int mtk_phy_hw_led_blink_set(struct phy_device *phydev, u8 index,
+ bool blinking);
+void mtk_phy_leds_state_init(struct phy_device *phydev);
+
+#endif /* _MTK_EPHY_H_ */
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index bb9b33b6bce2..962ebbbc1348 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -221,7 +221,7 @@ static struct phy_driver meson_gxl_phy[] = {
},
};
-static struct mdio_device_id __maybe_unused meson_gxl_tbl[] = {
+static const struct mdio_device_id __maybe_unused meson_gxl_tbl[] = {
{ PHY_ID_MATCH_VENDOR(0x01814400) },
{ PHY_ID_MATCH_VENDOR(0x01803301) },
{ }
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 43c82a87bc3a..05de68b9f719 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -34,6 +34,8 @@
#include <linux/net_tstamp.h>
#include <linux/gpio/consumer.h>
+#include "phylib.h"
+
/* Operation Mode Strap Override */
#define MII_KSZPHY_OMSO 0x16
#define KSZPHY_OMSO_FACTORY_TEST BIT(15)
@@ -99,12 +101,15 @@
#define LAN8814_CABLE_DIAG_VCT_DATA_MASK GENMASK(7, 0)
#define LAN8814_PAIR_BIT_SHIFT 12
+#define LAN8814_SKUS 0xB
+
#define LAN8814_WIRE_PAIR_MASK 0xF
/* Lan8814 general Interrupt control/status reg in GPHY specific block. */
#define LAN8814_INTC 0x18
#define LAN8814_INTS 0x1B
+#define LAN8814_INT_FLF BIT(15)
#define LAN8814_INT_LINK_DOWN BIT(2)
#define LAN8814_INT_LINK_UP BIT(0)
#define LAN8814_INT_LINK (LAN8814_INT_LINK_UP |\
@@ -264,6 +269,8 @@
#define LAN8814_LED_CTRL_1 0x0
#define LAN8814_LED_CTRL_1_KSZ9031_LED_MODE_ BIT(6)
+#define LAN8814_LED_CTRL_2 0x1
+#define LAN8814_LED_CTRL_2_LED1_COM_DIS BIT(8)
/* PHY Control 1 */
#define MII_KSZPHY_CTRL_1 0x1e
@@ -360,6 +367,11 @@
/* Delay used to get the second part from the LTC */
#define LAN8841_GET_SEC_LTC_DELAY (500 * NSEC_PER_MSEC)
+#define LAN8842_REV_8832 0x8832
+
+#define LAN8814_REV_LAN8814 0x8814
+#define LAN8814_REV_LAN8818 0x8818
+
struct kszphy_hw_stat {
const char *string;
u8 reg;
@@ -429,14 +441,41 @@ struct kszphy_ptp_priv {
spinlock_t seconds_lock;
};
+struct kszphy_phy_stats {
+ u64 rx_err_pkt_cnt;
+};
+
struct kszphy_priv {
struct kszphy_ptp_priv ptp_priv;
const struct kszphy_type *type;
+ struct clk *clk;
int led_mode;
u16 vct_ctrl1000;
bool rmii_ref_clk_sel;
bool rmii_ref_clk_sel_val;
+ bool clk_enable;
+ bool is_ptp_available;
u64 stats[ARRAY_SIZE(kszphy_hw_stats)];
+ struct kszphy_phy_stats phy_stats;
+};
+
+struct lan8842_phy_stats {
+ u64 rx_packets;
+ u64 rx_errors;
+ u64 tx_packets;
+ u64 tx_errors;
+};
+
+struct lan8842_priv {
+ struct lan8842_phy_stats phy_stats;
+ struct kszphy_ptp_priv ptp_priv;
+ u16 rev;
+};
+
+struct lanphy_reg_data {
+ int page;
+ u16 addr;
+ u16 val;
};
static const struct kszphy_type lan8814_type = {
@@ -468,6 +507,8 @@ static const struct kszphy_type ksz8051_type = {
static const struct kszphy_type ksz8081_type = {
.led_mode_reg = MII_KSZPHY_CTRL_2,
+ .cable_diag_reg = KSZ8081_LMD,
+ .pair_mask = KSZPHY_WIRE_PAIR_MASK,
.has_broadcast_disable = true,
.has_nand_tree_disable = true,
.has_rmii_ref_clk_sel = true,
@@ -764,7 +805,8 @@ static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
return !ret;
}
-static int ksz8051_match_phy_device(struct phy_device *phydev)
+static int ksz8051_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return ksz8051_ksz8795_match_phy_device(phydev, true);
}
@@ -884,7 +926,8 @@ static int ksz8061_config_init(struct phy_device *phydev)
return kszphy_config_init(phydev);
}
-static int ksz8795_match_phy_device(struct phy_device *phydev)
+static int ksz8795_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return ksz8051_ksz8795_match_phy_device(phydev, false);
}
@@ -1019,7 +1062,7 @@ static int ksz9021_config_init(struct phy_device *phydev)
#define TX_CLK_ID 0x1f
/* set tx and tx_clk to "No delay adjustment" to keep 0ns
- * dealy
+ * delay
*/
#define TX_ND 0x7
#define TX_CLK_ND 0xf
@@ -1028,6 +1071,29 @@ static int ksz9021_config_init(struct phy_device *phydev)
#define MII_KSZ9031RN_EDPD 0x23
#define MII_KSZ9031RN_EDPD_ENABLE BIT(0)
+static int ksz9031_set_loopback(struct phy_device *phydev, bool enable,
+ int speed)
+{
+ u16 ctl = BMCR_LOOPBACK;
+ int val;
+
+ if (!enable)
+ return genphy_loopback(phydev, enable, 0);
+
+ if (speed == SPEED_10 || speed == SPEED_100 || speed == SPEED_1000)
+ phydev->speed = speed;
+ else if (speed)
+ return -EINVAL;
+ phydev->duplex = DUPLEX_FULL;
+
+ ctl |= mii_bmcr_encode_fixed(phydev->speed, phydev->duplex);
+
+ phy_write(phydev, MII_BMCR, ctl);
+
+ return phy_read_poll_timeout(phydev, MII_BMSR, val, val & BMSR_LSTATUS,
+ 5000, 500000, true);
+}
+
static int ksz9031_of_load_skew_values(struct phy_device *phydev,
const struct device_node *of_node,
u16 reg, size_t field_sz,
@@ -1689,7 +1755,8 @@ static int ksz9x31_cable_test_fault_length(struct phy_device *phydev, u16 stat)
*
* distance to fault = (VCT_DATA - 22) * 4 / cable propagation velocity
*/
- if (phydev_id_compare(phydev, PHY_ID_KSZ9131))
+ if (phydev_id_compare(phydev, PHY_ID_KSZ9131) ||
+ phydev_id_compare(phydev, PHY_ID_KSZ9477))
dt = clamp(dt - 22, 0, 255);
return (dt * 400) / 10;
@@ -1763,12 +1830,20 @@ static int ksz9x31_cable_test_get_status(struct phy_device *phydev,
bool *finished)
{
struct kszphy_priv *priv = phydev->priv;
- unsigned long pair_mask = 0xf;
+ unsigned long pair_mask;
int retries = 20;
int pair, ret, rv;
*finished = false;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phydev->supported))
+ pair_mask = 0xf; /* All pairs */
+ else
+ pair_mask = 0x3; /* Pairs A and B only */
+
/* Try harder if link partner is active */
while (pair_mask && retries--) {
for_each_set_bit(pair, &pair_mask, 4) {
@@ -1850,7 +1925,7 @@ static int ksz886x_config_aneg(struct phy_device *phydev)
return ret;
if (phydev->autoneg != AUTONEG_ENABLE) {
- /* When autonegotation is disabled, we need to manually force
+ /* When autonegotiation is disabled, we need to manually force
* the link state. If we don't do this, the PHY will keep
* sending Fast Link Pulses (FLPs) which are part of the
* autonegotiation process. This is not desired when
@@ -1919,6 +1994,56 @@ static int ksz886x_read_status(struct phy_device *phydev)
return genphy_read_status(phydev);
}
+static int ksz9477_mdix_update(struct phy_device *phydev)
+{
+ if (phydev->mdix_ctrl != ETH_TP_MDI_AUTO)
+ phydev->mdix = phydev->mdix_ctrl;
+ else
+ phydev->mdix = ETH_TP_MDI_INVALID;
+
+ return 0;
+}
+
+static int ksz9477_read_mdix_ctrl(struct phy_device *phydev)
+{
+ int val;
+
+ val = phy_read(phydev, MII_KSZ9131_AUTO_MDIX);
+ if (val < 0)
+ return val;
+
+ if (!(val & MII_KSZ9131_AUTO_MDIX_SWAP_OFF))
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+ else if (val & MII_KSZ9131_AUTO_MDI_SET)
+ phydev->mdix_ctrl = ETH_TP_MDI;
+ else
+ phydev->mdix_ctrl = ETH_TP_MDI_X;
+
+ return 0;
+}
+
+static int ksz9477_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = ksz9477_mdix_update(phydev);
+ if (ret)
+ return ret;
+
+ return genphy_read_status(phydev);
+}
+
+static int ksz9477_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = ksz9131_config_mdix(phydev, phydev->mdix_ctrl);
+ if (ret)
+ return ret;
+
+ return genphy_config_aneg(phydev);
+}
+
struct ksz9477_errata_write {
u8 dev_addr;
u8 reg_addr;
@@ -1982,11 +2107,7 @@ static int ksz9477_phy_errata(struct phy_device *phydev)
return err;
}
- err = genphy_restart_aneg(phydev);
- if (err)
- return err;
-
- return err;
+ return genphy_restart_aneg(phydev);
}
static int ksz9477_config_init(struct phy_device *phydev)
@@ -2000,11 +2121,12 @@ static int ksz9477_config_init(struct phy_device *phydev)
return err;
}
- /* According to KSZ9477 Errata DS80000754C (Module 4) all EEE modes
- * in this switch shall be regarded as broken.
+ /* Read initial MDI-X config state. So, we do not need to poll it
+ * later on.
*/
- if (phydev->dev_flags & MICREL_NO_EEE)
- phydev->eee_broken_modes = -1;
+ err = ksz9477_read_mdix_ctrl(phydev);
+ if (err)
+ return err;
return kszphy_config_init(phydev);
}
@@ -2050,6 +2172,305 @@ static void kszphy_get_stats(struct phy_device *phydev,
data[i] = kszphy_get_stat(phydev, i);
}
+/* KSZ9477 PHY RXER Counter. Probably supported by other PHYs like KSZ9313,
+ * etc. The counter is incremented when the PHY receives a frame with one or
+ * more symbol errors. The counter is cleared when the register is read.
+ */
+#define MII_KSZ9477_PHY_RXER_COUNTER 0x15
+
+static int kszphy_update_stats(struct phy_device *phydev)
+{
+ struct kszphy_priv *priv = phydev->priv;
+ int ret;
+
+ ret = phy_read(phydev, MII_KSZ9477_PHY_RXER_COUNTER);
+ if (ret < 0)
+ return ret;
+
+ priv->phy_stats.rx_err_pkt_cnt += ret;
+
+ return 0;
+}
+
+static void kszphy_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *eth_stats,
+ struct ethtool_phy_stats *stats)
+{
+ struct kszphy_priv *priv = phydev->priv;
+
+ stats->rx_errors = priv->phy_stats.rx_err_pkt_cnt;
+}
+
+/* Base register for Signal Quality Indicator (SQI) - Channel A
+ *
+ * MMD Address: MDIO_MMD_PMAPMD (0x01)
+ * Register: 0xAC (Channel A)
+ * Each channel (pair) has its own register:
+ * Channel A: 0xAC
+ * Channel B: 0xAD
+ * Channel C: 0xAE
+ * Channel D: 0xAF
+ */
+#define KSZ9477_MMD_SIGNAL_QUALITY_CHAN_A 0xac
+
+/* SQI field mask for bits [14:8]
+ *
+ * SQI indicates relative quality of the signal.
+ * A lower value indicates better signal quality.
+ */
+#define KSZ9477_MMD_SQI_MASK GENMASK(14, 8)
+
+#define KSZ9477_MAX_CHANNELS 4
+#define KSZ9477_SQI_MAX 7
+
+/* Number of SQI samples to average for a stable result.
+ *
+ * Reference: KSZ9477S Datasheet DS00002392C, Section 4.1.11 (page 26)
+ * For noisy environments, a minimum of 30–50 readings is recommended.
+ */
+#define KSZ9477_SQI_SAMPLE_COUNT 40
+
+/* The hardware SQI register provides a raw value from 0-127, where a lower
+ * value indicates better signal quality. However, empirical testing has
+ * shown that only the 0-7 range is relevant for a functional link. A raw
+ * value of 8 or higher was measured directly before link drop. This aligns
+ * with the OPEN Alliance recommendation that SQI=0 should represent the
+ * pre-failure state.
+ *
+ * This table provides a non-linear mapping from the useful raw hardware
+ * values (0-7) to the standard 0-7 SQI scale, where higher is better.
+ */
+static const u8 ksz_sqi_mapping[] = {
+ 7, /* raw 0 -> SQI 7 */
+ 7, /* raw 1 -> SQI 7 */
+ 6, /* raw 2 -> SQI 6 */
+ 5, /* raw 3 -> SQI 5 */
+ 4, /* raw 4 -> SQI 4 */
+ 3, /* raw 5 -> SQI 3 */
+ 2, /* raw 6 -> SQI 2 */
+ 1, /* raw 7 -> SQI 1 */
+};
+
+/**
+ * kszphy_get_sqi - Read, average, and map Signal Quality Index (SQI)
+ * @phydev: the PHY device
+ *
+ * This function reads and processes the raw Signal Quality Index from the
+ * PHY. Based on empirical testing, a raw value of 8 or higher indicates a
+ * pre-failure state and is mapped to SQI 0. Raw values from 0-7 are
+ * mapped to the standard 0-7 SQI scale via a lookup table.
+ *
+ * Return: SQI value (0–7), or a negative errno on failure.
+ */
+static int kszphy_get_sqi(struct phy_device *phydev)
+{
+ int sum[KSZ9477_MAX_CHANNELS] = { 0 };
+ int worst_sqi = KSZ9477_SQI_MAX;
+ int i, val, raw_sqi, ch;
+ u8 channels;
+
+ /* Determine applicable channels based on link speed */
+ if (phydev->speed == SPEED_1000)
+ channels = 4;
+ else if (phydev->speed == SPEED_100)
+ channels = 1;
+ else
+ return -EOPNOTSUPP;
+
+ /* Sample and accumulate SQI readings for each pair (currently only one).
+ *
+ * Reference: KSZ9477S Datasheet DS00002392C, Section 4.1.11 (page 26)
+ * - The SQI register is updated every 2 µs.
+ * - Values may fluctuate significantly, even in low-noise environments.
+ * - For reliable estimation, average a minimum of 30–50 samples
+ * (recommended for noisy environments)
+ * - In noisy environments, individual readings are highly unreliable.
+ *
+ * We use 40 samples per pair with a delay of 3 µs between each
+ * read to ensure new values are captured (2 µs update interval).
+ */
+ for (i = 0; i < KSZ9477_SQI_SAMPLE_COUNT; i++) {
+ for (ch = 0; ch < channels; ch++) {
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
+ KSZ9477_MMD_SIGNAL_QUALITY_CHAN_A + ch);
+ if (val < 0)
+ return val;
+
+ raw_sqi = FIELD_GET(KSZ9477_MMD_SQI_MASK, val);
+ sum[ch] += raw_sqi;
+
+ /* We communicate with the PHY via MDIO via SPI or
+ * I2C, which is relatively slow. At least slower than
+ * the update interval of the SQI register.
+ * So, we can skip the delay between reads.
+ */
+ }
+ }
+
+ /* Calculate average for each channel and find the worst SQI */
+ for (ch = 0; ch < channels; ch++) {
+ int avg_raw_sqi = sum[ch] / KSZ9477_SQI_SAMPLE_COUNT;
+ int mapped_sqi;
+
+ /* Handle the pre-fail/failed state first. */
+ if (avg_raw_sqi >= ARRAY_SIZE(ksz_sqi_mapping))
+ mapped_sqi = 0;
+ else
+ /* Use the lookup table for the good signal range. */
+ mapped_sqi = ksz_sqi_mapping[avg_raw_sqi];
+
+ if (mapped_sqi < worst_sqi)
+ worst_sqi = mapped_sqi;
+ }
+
+ return worst_sqi;
+}
+
+static int kszphy_get_sqi_max(struct phy_device *phydev)
+{
+ return KSZ9477_SQI_MAX;
+}
+
+static int kszphy_get_mse_capability(struct phy_device *phydev,
+ struct phy_mse_capability *cap)
+{
+ /* Capabilities depend on link mode:
+ * - 1000BASE-T: per-pair SQI registers exist => expose A..D
+ * and a WORST selector.
+ * - 100BASE-TX: HW provides a single MSE/SQI reading in the "channel A"
+ * register, but with auto MDI-X there is no MDI-X resolution bit,
+ * so we cannot map that register to a specific wire pair reliably.
+ * To avoid misleading per-channel data, advertise only LINK.
+ * Other speeds: no MSE exposure via this driver.
+ *
+ * Note: WORST is *not* a hardware selector on this family.
+ * We expose it because the driver computes it in software
+ * by scanning per-channel readouts (A..D) and picking the
+ * maximum average MSE.
+ */
+ if (phydev->speed == SPEED_1000)
+ cap->supported_caps = PHY_MSE_CAP_CHANNEL_A |
+ PHY_MSE_CAP_CHANNEL_B |
+ PHY_MSE_CAP_CHANNEL_C |
+ PHY_MSE_CAP_CHANNEL_D |
+ PHY_MSE_CAP_WORST_CHANNEL;
+ else if (phydev->speed == SPEED_100)
+ cap->supported_caps = PHY_MSE_CAP_LINK;
+ else
+ return -EOPNOTSUPP;
+
+ cap->max_average_mse = FIELD_MAX(KSZ9477_MMD_SQI_MASK);
+ cap->refresh_rate_ps = 2000000; /* 2 us */
+ /* Estimated from link modulation (125 MBd per channel) and documented
+ * refresh rate of 2 us
+ */
+ cap->num_symbols = 250;
+
+ cap->supported_caps |= PHY_MSE_CAP_AVG;
+
+ return 0;
+}
+
+static int kszphy_get_mse_snapshot(struct phy_device *phydev,
+ enum phy_mse_channel channel,
+ struct phy_mse_snapshot *snapshot)
+{
+ u8 num_channels;
+ int ret;
+
+ if (phydev->speed == SPEED_1000)
+ num_channels = 4;
+ else if (phydev->speed == SPEED_100)
+ num_channels = 1;
+ else
+ return -EOPNOTSUPP;
+
+ if (channel == PHY_MSE_CHANNEL_WORST) {
+ u32 worst_val = 0;
+ int i;
+
+ /* WORST is implemented in software: select the maximum
+ * average MSE across the available per-channel registers.
+ * Only defined when multiple channels exist (1000BASE-T).
+ */
+ if (num_channels < 2)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < num_channels; i++) {
+ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
+ KSZ9477_MMD_SIGNAL_QUALITY_CHAN_A + i);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(KSZ9477_MMD_SQI_MASK, ret);
+ if (ret > worst_val)
+ worst_val = ret;
+ }
+ snapshot->average_mse = worst_val;
+ } else if (channel == PHY_MSE_CHANNEL_LINK && num_channels == 1) {
+ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
+ KSZ9477_MMD_SIGNAL_QUALITY_CHAN_A);
+ if (ret < 0)
+ return ret;
+ snapshot->average_mse = FIELD_GET(KSZ9477_MMD_SQI_MASK, ret);
+ } else if (channel >= PHY_MSE_CHANNEL_A &&
+ channel <= PHY_MSE_CHANNEL_D) {
+ /* Per-channel readouts are valid only for 1000BASE-T. */
+ if (phydev->speed != SPEED_1000)
+ return -EOPNOTSUPP;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
+ KSZ9477_MMD_SIGNAL_QUALITY_CHAN_A + channel);
+ if (ret < 0)
+ return ret;
+ snapshot->average_mse = FIELD_GET(KSZ9477_MMD_SQI_MASK, ret);
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void kszphy_enable_clk(struct phy_device *phydev)
+{
+ struct kszphy_priv *priv = phydev->priv;
+
+ if (!priv->clk_enable && priv->clk) {
+ clk_prepare_enable(priv->clk);
+ priv->clk_enable = true;
+ }
+}
+
+static void kszphy_disable_clk(struct phy_device *phydev)
+{
+ struct kszphy_priv *priv = phydev->priv;
+
+ if (priv->clk_enable && priv->clk) {
+ clk_disable_unprepare(priv->clk);
+ priv->clk_enable = false;
+ }
+}
+
+static int kszphy_generic_resume(struct phy_device *phydev)
+{
+ kszphy_enable_clk(phydev);
+
+ return genphy_resume(phydev);
+}
+
+static int kszphy_generic_suspend(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_suspend(phydev);
+ if (ret)
+ return ret;
+
+ kszphy_disable_clk(phydev);
+
+ return 0;
+}
+
static int kszphy_suspend(struct phy_device *phydev)
{
/* Disable PHY Interrupts */
@@ -2059,7 +2480,7 @@ static int kszphy_suspend(struct phy_device *phydev)
phydev->drv->config_intr(phydev);
}
- return genphy_suspend(phydev);
+ return kszphy_generic_suspend(phydev);
}
static void kszphy_parse_led_mode(struct phy_device *phydev)
@@ -2090,7 +2511,9 @@ static int kszphy_resume(struct phy_device *phydev)
{
int ret;
- genphy_resume(phydev);
+ ret = kszphy_generic_resume(phydev);
+ if (ret)
+ return ret;
/* After switching from power-down to normal mode, an internal global
* reset is automatically generated. Wait a minimum of 1 ms before
@@ -2112,6 +2535,24 @@ static int kszphy_resume(struct phy_device *phydev)
return 0;
}
+/* Because of errata DS80000700A, receiver error following software
+ * power down. Suspend and resume callbacks only disable and enable
+ * external rmii reference clock.
+ */
+static int ksz8041_resume(struct phy_device *phydev)
+{
+ kszphy_enable_clk(phydev);
+
+ return 0;
+}
+
+static int ksz8041_suspend(struct phy_device *phydev)
+{
+ kszphy_disable_clk(phydev);
+
+ return 0;
+}
+
static int ksz9477_resume(struct phy_device *phydev)
{
int ret;
@@ -2159,7 +2600,10 @@ static int ksz8061_resume(struct phy_device *phydev)
if (!(ret & BMCR_PDOWN))
return 0;
- genphy_resume(phydev);
+ ret = kszphy_generic_resume(phydev);
+ if (ret)
+ return ret;
+
usleep_range(1000, 2000);
/* Re-program the value after chip is reset. */
@@ -2177,6 +2621,11 @@ static int ksz8061_resume(struct phy_device *phydev)
return 0;
}
+static int ksz8061_suspend(struct phy_device *phydev)
+{
+ return kszphy_suspend(phydev);
+}
+
static int kszphy_probe(struct phy_device *phydev)
{
const struct kszphy_type *type = phydev->drv->driver_data;
@@ -2217,10 +2666,14 @@ static int kszphy_probe(struct phy_device *phydev)
} else if (!clk) {
/* unnamed clock from the generic ethernet-phy binding */
clk = devm_clk_get_optional_enabled(&phydev->mdio.dev, NULL);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
}
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ clk_disable_unprepare(clk);
+ priv->clk = clk;
+
if (ksz8041_fiber_mode(phydev))
phydev->port = PORT_FIBRE;
@@ -2463,10 +2916,80 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev,
return ret;
}
+/**
+ * LAN8814_PAGE_PCS - Selects Extended Page 0.
+ *
+ * This page contains timers used for auto-negotiation, debug registers and
+ * register to configure fast link failure.
+ */
+#define LAN8814_PAGE_PCS 0
+
+/**
+ * LAN8814_PAGE_AFE_PMA - Selects Extended Page 1.
+ *
+ * This page appears to control the Analog Front-End (AFE) and Physical
+ * Medium Attachment (PMA) layers. It is used to access registers like
+ * LAN8814_PD_CONTROLS and LAN8814_LINK_QUALITY.
+ */
+#define LAN8814_PAGE_AFE_PMA 1
+
+/**
+ * LAN8814_PAGE_PCS_DIGITAL - Selects Extended Page 2.
+ *
+ * This page seems dedicated to the Physical Coding Sublayer (PCS) and other
+ * digital logic. It is used for MDI-X alignment (LAN8814_ALIGN_SWAP) and EEE
+ * state (LAN8814_EEE_STATE) in the LAN8814, and is repurposed for statistics
+ * and self-test counters in the LAN8842.
+ */
+#define LAN8814_PAGE_PCS_DIGITAL 2
+
+/**
+ * LAN8814_PAGE_EEE - Selects Extended Page 3.
+ *
+ * This page contains EEE registers
+ */
+#define LAN8814_PAGE_EEE 3
+
+/**
+ * LAN8814_PAGE_COMMON_REGS - Selects Extended Page 4.
+ *
+ * This page contains device-common registers that affect the entire chip.
+ * It includes controls for chip-level resets, strap status, GPIO,
+ * QSGMII, the shared 1588 PTP block, and the PVT monitor.
+ */
+#define LAN8814_PAGE_COMMON_REGS 4
+
+/**
+ * LAN8814_PAGE_PORT_REGS - Selects Extended Page 5.
+ *
+ * This page contains port-specific registers that must be accessed
+ * on a per-port basis. It includes controls for port LEDs, QSGMII PCS,
+ * rate adaptation FIFOs, and the per-port 1588 TSU block.
+ */
+#define LAN8814_PAGE_PORT_REGS 5
+
+/**
+ * LAN8814_PAGE_POWER_REGS - Selects Extended Page 28.
+ *
+ * This page contains analog control registers and power mode registers.
+ */
+#define LAN8814_PAGE_POWER_REGS 28
+
+/**
+ * LAN8814_PAGE_SYSTEM_CTRL - Selects Extended Page 31.
+ *
+ * This page appears to hold fundamental system or global controls. In the
+ * driver, it is used by the related LAN8804 to access the
+ * LAN8814_CLOCK_MANAGEMENT register.
+ */
+#define LAN8814_PAGE_SYSTEM_CTRL 31
+
#define LAN_EXT_PAGE_ACCESS_CONTROL 0x16
#define LAN_EXT_PAGE_ACCESS_ADDRESS_DATA 0x17
#define LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC 0x4000
+#define LAN8814_QSGMII_TX_CONFIG 0x35
+#define LAN8814_QSGMII_TX_CONFIG_QSGMII BIT(3)
#define LAN8814_QSGMII_SOFT_RESET 0x43
#define LAN8814_QSGMII_SOFT_RESET_BIT BIT(0)
#define LAN8814_QSGMII_PCS1G_ANEG_CONFIG 0x13
@@ -2513,6 +3036,27 @@ static int lanphy_write_page_reg(struct phy_device *phydev, int page, u16 addr,
return val;
}
+static int lanphy_modify_page_reg(struct phy_device *phydev, int page, u16 addr,
+ u16 mask, u16 set)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
+ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr);
+ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL,
+ (page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC));
+ ret = __phy_modify_changed(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA,
+ mask, set);
+ phy_unlock_mdio_bus(phydev);
+
+ if (ret < 0)
+ phydev_err(phydev, "__phy_modify_changed() failed: %pe\n",
+ ERR_PTR(ret));
+
+ return ret;
+}
+
static int lan8814_config_ts_intr(struct phy_device *phydev, bool enable)
{
u16 val = 0;
@@ -2523,42 +3067,52 @@ static int lan8814_config_ts_intr(struct phy_device *phydev, bool enable)
PTP_TSU_INT_EN_PTP_RX_TS_EN_ |
PTP_TSU_INT_EN_PTP_RX_TS_OVRFL_EN_;
- return lanphy_write_page_reg(phydev, 5, PTP_TSU_INT_EN, val);
+ return lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TSU_INT_EN, val);
}
static void lan8814_ptp_rx_ts_get(struct phy_device *phydev,
u32 *seconds, u32 *nano_seconds, u16 *seq_id)
{
- *seconds = lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_SEC_HI);
+ *seconds = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_INGRESS_SEC_HI);
*seconds = (*seconds << 16) |
- lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_SEC_LO);
+ lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_INGRESS_SEC_LO);
- *nano_seconds = lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_NS_HI);
+ *nano_seconds = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_INGRESS_NS_HI);
*nano_seconds = ((*nano_seconds & 0x3fff) << 16) |
- lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_NS_LO);
+ lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_INGRESS_NS_LO);
- *seq_id = lanphy_read_page_reg(phydev, 5, PTP_RX_MSG_HEADER2);
+ *seq_id = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_MSG_HEADER2);
}
static void lan8814_ptp_tx_ts_get(struct phy_device *phydev,
u32 *seconds, u32 *nano_seconds, u16 *seq_id)
{
- *seconds = lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_SEC_HI);
+ *seconds = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_EGRESS_SEC_HI);
*seconds = *seconds << 16 |
- lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_SEC_LO);
+ lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_EGRESS_SEC_LO);
- *nano_seconds = lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_NS_HI);
+ *nano_seconds = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_EGRESS_NS_HI);
*nano_seconds = ((*nano_seconds & 0x3fff) << 16) |
- lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_NS_LO);
+ lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_EGRESS_NS_LO);
- *seq_id = lanphy_read_page_reg(phydev, 5, PTP_TX_MSG_HEADER2);
+ *seq_id = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_MSG_HEADER2);
}
static int lan8814_ts_info(struct mii_timestamper *mii_ts, struct kernel_ethtool_ts_info *info)
{
struct kszphy_ptp_priv *ptp_priv = container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
- struct phy_device *phydev = ptp_priv->phydev;
- struct lan8814_shared_priv *shared = phydev->shared->priv;
+ struct lan8814_shared_priv *shared = phy_package_get_priv(ptp_priv->phydev);
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
@@ -2586,23 +3140,22 @@ static void lan8814_flush_fifo(struct phy_device *phydev, bool egress)
int i;
for (i = 0; i < FIFO_SIZE; ++i)
- lanphy_read_page_reg(phydev, 5,
+ lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
egress ? PTP_TX_MSG_HEADER2 : PTP_RX_MSG_HEADER2);
/* Read to clear overflow status bit */
- lanphy_read_page_reg(phydev, 5, PTP_TSU_INT_STS);
+ lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS, PTP_TSU_INT_STS);
}
-static int lan8814_hwtstamp(struct mii_timestamper *mii_ts,
- struct kernel_hwtstamp_config *config,
- struct netlink_ext_ack *extack)
+static int lan8814_hwtstamp_set(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct kszphy_ptp_priv *ptp_priv =
container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
struct lan8814_ptp_rx_ts *rx_ts, *tmp;
int txcfg = 0, rxcfg = 0;
int pkt_ts_enable;
- int tx_mod;
ptp_priv->hwts_tx_type = config->tx_type;
ptp_priv->rx_filter = config->rx_filter;
@@ -2641,21 +3194,28 @@ static int lan8814_hwtstamp(struct mii_timestamper *mii_ts,
rxcfg |= PTP_RX_PARSE_CONFIG_IPV4_EN_ | PTP_RX_PARSE_CONFIG_IPV6_EN_;
txcfg |= PTP_TX_PARSE_CONFIG_IPV4_EN_ | PTP_TX_PARSE_CONFIG_IPV6_EN_;
}
- lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_RX_PARSE_CONFIG, rxcfg);
- lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_PARSE_CONFIG, txcfg);
+ lanphy_write_page_reg(ptp_priv->phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_PARSE_CONFIG, rxcfg);
+ lanphy_write_page_reg(ptp_priv->phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_PARSE_CONFIG, txcfg);
pkt_ts_enable = PTP_TIMESTAMP_EN_SYNC_ | PTP_TIMESTAMP_EN_DREQ_ |
PTP_TIMESTAMP_EN_PDREQ_ | PTP_TIMESTAMP_EN_PDRES_;
- lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_RX_TIMESTAMP_EN, pkt_ts_enable);
- lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_TIMESTAMP_EN, pkt_ts_enable);
+ lanphy_write_page_reg(ptp_priv->phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_TIMESTAMP_EN, pkt_ts_enable);
+ lanphy_write_page_reg(ptp_priv->phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_TIMESTAMP_EN, pkt_ts_enable);
- tx_mod = lanphy_read_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD);
if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC) {
- lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD,
- tx_mod | PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
+ lanphy_modify_page_reg(ptp_priv->phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_MOD,
+ PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_,
+ PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
} else if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ON) {
- lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD,
- tx_mod & ~PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
+ lanphy_modify_page_reg(ptp_priv->phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_MOD,
+ PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_,
+ 0);
}
if (config->rx_filter != HWTSTAMP_FILTER_NONE)
@@ -2777,29 +3337,41 @@ static bool lan8814_rxtstamp(struct mii_timestamper *mii_ts, struct sk_buff *skb
static void lan8814_ptp_clock_set(struct phy_device *phydev,
time64_t sec, u32 nsec)
{
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_LO, lower_16_bits(sec));
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_MID, upper_16_bits(sec));
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_HI, upper_32_bits(sec));
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_LO, lower_16_bits(nsec));
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_HI, upper_16_bits(nsec));
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_SET_SEC_LO, lower_16_bits(sec));
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_SET_SEC_MID, upper_16_bits(sec));
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_SET_SEC_HI, upper_32_bits(sec));
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_SET_NS_LO, lower_16_bits(nsec));
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_SET_NS_HI, upper_16_bits(nsec));
- lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_LOAD_);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_CLOCK_LOAD_);
}
static void lan8814_ptp_clock_get(struct phy_device *phydev,
time64_t *sec, u32 *nsec)
{
- lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_READ_);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_CLOCK_READ_);
- *sec = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_HI);
+ *sec = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_READ_SEC_HI);
*sec <<= 16;
- *sec |= lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_MID);
+ *sec |= lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_READ_SEC_MID);
*sec <<= 16;
- *sec |= lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_LO);
+ *sec |= lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_READ_SEC_LO);
- *nsec = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_HI);
+ *nsec = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_READ_NS_HI);
*nsec <<= 16;
- *nsec |= lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_LO);
+ *nsec |= lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_READ_NS_LO);
}
static int lan8814_ptpci_gettime64(struct ptp_clock_info *ptpci,
@@ -2838,14 +3410,18 @@ static void lan8814_ptp_set_target(struct phy_device *phydev, int event,
s64 start_sec, u32 start_nsec)
{
/* Set the start time */
- lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_SEC_LO(event),
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_PTP_CLOCK_TARGET_SEC_LO(event),
lower_16_bits(start_sec));
- lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_SEC_HI(event),
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_PTP_CLOCK_TARGET_SEC_HI(event),
upper_16_bits(start_sec));
- lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_NS_LO(event),
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_PTP_CLOCK_TARGET_NS_LO(event),
lower_16_bits(start_nsec));
- lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_NS_HI(event),
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_PTP_CLOCK_TARGET_NS_HI(event),
upper_16_bits(start_nsec) & 0x3fff);
}
@@ -2943,9 +3519,11 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
adjustment_value_lo = adjustment_value & 0xffff;
adjustment_value_hi = (adjustment_value >> 16) & 0x3fff;
- lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_LO,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_LTC_STEP_ADJ_LO,
adjustment_value_lo);
- lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_HI,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_LTC_STEP_ADJ_HI,
PTP_LTC_STEP_ADJ_DIR_ |
adjustment_value_hi);
seconds -= ((s32)adjustment_value);
@@ -2963,9 +3541,11 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
adjustment_value_lo = adjustment_value & 0xffff;
adjustment_value_hi = (adjustment_value >> 16) & 0x3fff;
- lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_LO,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_LTC_STEP_ADJ_LO,
adjustment_value_lo);
- lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_HI,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_LTC_STEP_ADJ_HI,
adjustment_value_hi);
seconds += ((s32)adjustment_value);
@@ -2973,8 +3553,8 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
set_seconds += adjustment_value;
lan8814_ptp_update_target(phydev, set_seconds);
}
- lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL,
- PTP_CMD_CTL_PTP_LTC_STEP_SEC_);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CMD_CTL, PTP_CMD_CTL_PTP_LTC_STEP_SEC_);
}
if (nano_seconds) {
u16 nano_seconds_lo;
@@ -2983,12 +3563,14 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
nano_seconds_lo = nano_seconds & 0xffff;
nano_seconds_hi = (nano_seconds >> 16) & 0x3fff;
- lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_LO,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_LTC_STEP_ADJ_LO,
nano_seconds_lo);
- lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_HI,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_LTC_STEP_ADJ_HI,
PTP_LTC_STEP_ADJ_DIR_ |
nano_seconds_hi);
- lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_CMD_CTL,
PTP_CMD_CTL_PTP_LTC_STEP_NSEC_);
}
}
@@ -3030,8 +3612,10 @@ static int lan8814_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm)
kszphy_rate_adj_hi |= PTP_CLOCK_RATE_ADJ_DIR_;
mutex_lock(&shared->shared_lock);
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_RATE_ADJ_HI, kszphy_rate_adj_hi);
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_RATE_ADJ_LO, kszphy_rate_adj_lo);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_CLOCK_RATE_ADJ_HI,
+ kszphy_rate_adj_hi);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_CLOCK_RATE_ADJ_LO,
+ kszphy_rate_adj_lo);
mutex_unlock(&shared->shared_lock);
return 0;
@@ -3040,17 +3624,17 @@ static int lan8814_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm)
static void lan8814_ptp_set_reload(struct phy_device *phydev, int event,
s64 period_sec, u32 period_nsec)
{
- lanphy_write_page_reg(phydev, 4,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
LAN8814_PTP_CLOCK_TARGET_RELOAD_SEC_LO(event),
lower_16_bits(period_sec));
- lanphy_write_page_reg(phydev, 4,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
LAN8814_PTP_CLOCK_TARGET_RELOAD_SEC_HI(event),
upper_16_bits(period_sec));
- lanphy_write_page_reg(phydev, 4,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
LAN8814_PTP_CLOCK_TARGET_RELOAD_NS_LO(event),
lower_16_bits(period_nsec));
- lanphy_write_page_reg(phydev, 4,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
LAN8814_PTP_CLOCK_TARGET_RELOAD_NS_HI(event),
upper_16_bits(period_nsec) & 0x3fff);
}
@@ -3058,73 +3642,72 @@ static void lan8814_ptp_set_reload(struct phy_device *phydev, int event,
static void lan8814_ptp_enable_event(struct phy_device *phydev, int event,
int pulse_width)
{
- u16 val;
-
- val = lanphy_read_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG);
- /* Set the pulse width of the event */
- val &= ~(LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_MASK(event));
- /* Make sure that the target clock will be incremented each time when
+ /* Set the pulse width of the event,
+ * Make sure that the target clock will be incremented each time when
* local time reaches or pass it
+ * Set the polarity high
*/
- val |= LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_SET(event, pulse_width);
- val &= ~(LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event));
- /* Set the polarity high */
- val |= LAN8814_PTP_GENERAL_CONFIG_POLARITY_X(event);
- lanphy_write_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG, val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, LAN8814_PTP_GENERAL_CONFIG,
+ LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_MASK(event) |
+ LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_SET(event, pulse_width) |
+ LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event) |
+ LAN8814_PTP_GENERAL_CONFIG_POLARITY_X(event),
+ LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_SET(event, pulse_width) |
+ LAN8814_PTP_GENERAL_CONFIG_POLARITY_X(event));
}
static void lan8814_ptp_disable_event(struct phy_device *phydev, int event)
{
- u16 val;
-
/* Set target to too far in the future, effectively disabling it */
lan8814_ptp_set_target(phydev, event, 0xFFFFFFFF, 0);
- /* And then reload once it recheas the target */
- val = lanphy_read_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG);
- val |= LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event);
- lanphy_write_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG, val);
+ /* And then reload once it reaches the target */
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, LAN8814_PTP_GENERAL_CONFIG,
+ LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event),
+ LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event));
}
static void lan8814_ptp_perout_off(struct phy_device *phydev, int pin)
{
- u16 val;
-
/* Disable gpio alternate function,
* 1: select as gpio,
* 0: select alt func
*/
- val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin));
- val |= LAN8814_GPIO_EN_BIT(pin);
- lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin), val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_EN_ADDR(pin),
+ LAN8814_GPIO_EN_BIT(pin),
+ LAN8814_GPIO_EN_BIT(pin));
- val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin));
- val &= ~LAN8814_GPIO_DIR_BIT(pin);
- lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_DIR_ADDR(pin),
+ LAN8814_GPIO_DIR_BIT(pin),
+ 0);
- val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin));
- val &= ~LAN8814_GPIO_BUF_BIT(pin);
- lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin), val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_BUF_ADDR(pin),
+ LAN8814_GPIO_BUF_BIT(pin),
+ 0);
}
static void lan8814_ptp_perout_on(struct phy_device *phydev, int pin)
{
- int val;
-
/* Set as gpio output */
- val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin));
- val |= LAN8814_GPIO_DIR_BIT(pin);
- lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_DIR_ADDR(pin),
+ LAN8814_GPIO_DIR_BIT(pin),
+ LAN8814_GPIO_DIR_BIT(pin));
/* Enable gpio 0:for alternate function, 1:gpio */
- val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin));
- val &= ~LAN8814_GPIO_EN_BIT(pin);
- lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin), val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_EN_ADDR(pin),
+ LAN8814_GPIO_EN_BIT(pin),
+ 0);
/* Set buffer type to push pull */
- val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin));
- val |= LAN8814_GPIO_BUF_BIT(pin);
- lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin), val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_BUF_ADDR(pin),
+ LAN8814_GPIO_BUF_BIT(pin),
+ LAN8814_GPIO_BUF_BIT(pin));
}
static int lan8814_ptp_perout(struct ptp_clock_info *ptpci,
@@ -3138,10 +3721,6 @@ static int lan8814_ptp_perout(struct ptp_clock_info *ptpci,
int pulse_width;
int pin, event;
- /* Reject requests with unsupported flags */
- if (rq->perout.flags & ~PTP_PEROUT_DUTY_CYCLE)
- return -EOPNOTSUPP;
-
mutex_lock(&shared->shared_lock);
event = rq->perout.index;
pin = ptp_find_pin(shared->ptp_clock, PTP_PF_PEROUT, event);
@@ -3243,61 +3822,64 @@ static int lan8814_ptp_perout(struct ptp_clock_info *ptpci,
static void lan8814_ptp_extts_on(struct phy_device *phydev, int pin, u32 flags)
{
- u16 tmp;
-
/* Set as gpio input */
- tmp = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin));
- tmp &= ~LAN8814_GPIO_DIR_BIT(pin);
- lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_DIR_ADDR(pin),
+ LAN8814_GPIO_DIR_BIT(pin),
+ 0);
/* Map the pin to ltc pin 0 of the capture map registers */
- tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO);
- tmp |= pin;
- lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO, tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_CAP_MAP_LO, pin, pin);
/* Enable capture on the edges of the ltc pin */
- tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_EN);
if (flags & PTP_RISING_EDGE)
- tmp |= PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(0);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_CAP_EN,
+ PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(0),
+ PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(0));
if (flags & PTP_FALLING_EDGE)
- tmp |= PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(0);
- lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_EN, tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_CAP_EN,
+ PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(0),
+ PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(0));
/* Enable interrupt top interrupt */
- tmp = lanphy_read_page_reg(phydev, 4, PTP_COMMON_INT_ENA);
- tmp |= PTP_COMMON_INT_ENA_GPIO_CAP_EN;
- lanphy_write_page_reg(phydev, 4, PTP_COMMON_INT_ENA, tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_COMMON_INT_ENA,
+ PTP_COMMON_INT_ENA_GPIO_CAP_EN,
+ PTP_COMMON_INT_ENA_GPIO_CAP_EN);
}
static void lan8814_ptp_extts_off(struct phy_device *phydev, int pin)
{
- u16 tmp;
-
/* Set as gpio out */
- tmp = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin));
- tmp |= LAN8814_GPIO_DIR_BIT(pin);
- lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_DIR_ADDR(pin),
+ LAN8814_GPIO_DIR_BIT(pin),
+ LAN8814_GPIO_DIR_BIT(pin));
/* Enable alternate, 0:for alternate function, 1:gpio */
- tmp = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin));
- tmp &= ~LAN8814_GPIO_EN_BIT(pin);
- lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin), tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_EN_ADDR(pin),
+ LAN8814_GPIO_EN_BIT(pin),
+ 0);
/* Clear the mapping of pin to registers 0 of the capture registers */
- tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO);
- tmp &= ~GENMASK(3, 0);
- lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO, tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_CAP_MAP_LO,
+ GENMASK(3, 0),
+ 0);
/* Disable capture on both of the edges */
- tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_EN);
- tmp &= ~PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(pin);
- tmp &= ~PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(pin);
- lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_EN, tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_GPIO_CAP_EN,
+ PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(pin) |
+ PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(pin),
+ 0);
/* Disable interrupt top interrupt */
- tmp = lanphy_read_page_reg(phydev, 4, PTP_COMMON_INT_ENA);
- tmp &= ~PTP_COMMON_INT_ENA_GPIO_CAP_EN;
- lanphy_write_page_reg(phydev, 4, PTP_COMMON_INT_ENA, tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_COMMON_INT_ENA,
+ PTP_COMMON_INT_ENA_GPIO_CAP_EN,
+ 0);
}
static int lan8814_ptp_extts(struct ptp_clock_info *ptpci,
@@ -3308,11 +3890,6 @@ static int lan8814_ptp_extts(struct ptp_clock_info *ptpci,
struct phy_device *phydev = shared->phydev;
int pin;
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_EXTTS_EDGES |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
pin = ptp_find_pin(shared->ptp_clock, PTP_PF_EXTTS,
rq->extts.index);
if (pin == -1 || pin != LAN8814_PTP_EXTTS_NUM)
@@ -3432,7 +4009,8 @@ static void lan8814_get_tx_ts(struct kszphy_ptp_priv *ptp_priv)
/* If other timestamps are available in the FIFO,
* process them.
*/
- reg = lanphy_read_page_reg(phydev, 5, PTP_CAP_INFO);
+ reg = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_CAP_INFO);
} while (PTP_CAP_INFO_TX_TS_CNT_GET_(reg) > 0);
}
@@ -3505,7 +4083,8 @@ static void lan8814_get_rx_ts(struct kszphy_ptp_priv *ptp_priv)
/* If other timestamps are available in the FIFO,
* process them.
*/
- reg = lanphy_read_page_reg(phydev, 5, PTP_CAP_INFO);
+ reg = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_CAP_INFO);
} while (PTP_CAP_INFO_RX_TS_CNT_GET_(reg) > 0);
}
@@ -3542,31 +4121,40 @@ static int lan8814_gpio_process_cap(struct lan8814_shared_priv *shared)
/* This is 0 because whatever was the input pin it was mapped it to
* ltc gpio pin 0
*/
- tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_SEL);
- tmp |= PTP_GPIO_SEL_GPIO_SEL(0);
- lanphy_write_page_reg(phydev, 4, PTP_GPIO_SEL, tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_GPIO_SEL,
+ PTP_GPIO_SEL_GPIO_SEL(0),
+ PTP_GPIO_SEL_GPIO_SEL(0));
- tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_STS);
+ tmp = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_CAP_STS);
if (!(tmp & PTP_GPIO_CAP_STS_PTP_GPIO_RE_STS(0)) &&
!(tmp & PTP_GPIO_CAP_STS_PTP_GPIO_FE_STS(0)))
return -1;
if (tmp & BIT(0)) {
- sec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_SEC_HI_CAP);
+ sec = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_RE_LTC_SEC_HI_CAP);
sec <<= 16;
- sec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_SEC_LO_CAP);
+ sec |= lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_RE_LTC_SEC_LO_CAP);
- nsec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_NS_HI_CAP) & 0x3fff;
+ nsec = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_RE_LTC_NS_HI_CAP) & 0x3fff;
nsec <<= 16;
- nsec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_NS_LO_CAP);
+ nsec |= lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_RE_LTC_NS_LO_CAP);
} else {
- sec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_FE_LTC_SEC_HI_CAP);
+ sec = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_FE_LTC_SEC_HI_CAP);
sec <<= 16;
- sec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_FE_LTC_SEC_LO_CAP);
+ sec |= lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_FE_LTC_SEC_LO_CAP);
- nsec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_FE_LTC_NS_HI_CAP) & 0x3fff;
+ nsec = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_FE_LTC_NS_HI_CAP) & 0x3fff;
nsec <<= 16;
- nsec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_NS_LO_CAP);
+ nsec |= lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_RE_LTC_NS_LO_CAP);
}
ptp_event.index = 0;
@@ -3579,7 +4167,7 @@ static int lan8814_gpio_process_cap(struct lan8814_shared_priv *shared)
static int lan8814_handle_gpio_interrupt(struct phy_device *phydev, u16 status)
{
- struct lan8814_shared_priv *shared = phydev->shared->priv;
+ struct lan8814_shared_priv *shared = phy_package_get_priv(phydev);
int ret;
mutex_lock(&shared->shared_lock);
@@ -3591,19 +4179,17 @@ static int lan8814_handle_gpio_interrupt(struct phy_device *phydev, u16 status)
static int lan8804_config_init(struct phy_device *phydev)
{
- int val;
-
/* MDI-X setting for swap A,B transmit */
- val = lanphy_read_page_reg(phydev, 2, LAN8804_ALIGN_SWAP);
- val &= ~LAN8804_ALIGN_TX_A_B_SWAP_MASK;
- val |= LAN8804_ALIGN_TX_A_B_SWAP;
- lanphy_write_page_reg(phydev, 2, LAN8804_ALIGN_SWAP, val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_PCS_DIGITAL, LAN8804_ALIGN_SWAP,
+ LAN8804_ALIGN_TX_A_B_SWAP_MASK,
+ LAN8804_ALIGN_TX_A_B_SWAP);
/* Make sure that the PHY will not stop generating the clock when the
* link partner goes down
*/
- lanphy_write_page_reg(phydev, 31, LAN8814_CLOCK_MANAGEMENT, 0x27e);
- lanphy_read_page_reg(phydev, 1, LAN8814_LINK_QUALITY);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_SYSTEM_CTRL,
+ LAN8814_CLOCK_MANAGEMENT, 0x27e);
+ lanphy_read_page_reg(phydev, LAN8814_PAGE_AFE_PMA, LAN8814_LINK_QUALITY);
return 0;
}
@@ -3668,6 +4254,17 @@ static int lan8804_config_intr(struct phy_device *phydev)
return 0;
}
+/* Check if the PHY has 1588 support. There are multiple skus of the PHY and
+ * some of them support PTP while others don't support it. This function will
+ * return true is the sku supports it, otherwise will return false.
+ */
+static bool lan8814_has_ptp(struct phy_device *phydev)
+{
+ struct kszphy_priv *priv = phydev->priv;
+
+ return priv->is_ptp_available;
+}
+
static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
{
int ret = IRQ_NONE;
@@ -3684,8 +4281,12 @@ static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
ret = IRQ_HANDLED;
}
+ if (!lan8814_has_ptp(phydev))
+ return ret;
+
while (true) {
- irq_status = lanphy_read_page_reg(phydev, 5, PTP_TSU_INT_STS);
+ irq_status = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TSU_INT_STS);
if (!irq_status)
break;
@@ -3713,7 +4314,7 @@ static int lan8814_config_intr(struct phy_device *phydev)
{
int err;
- lanphy_write_page_reg(phydev, 4, LAN8814_INTR_CTRL_REG,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, LAN8814_INTR_CTRL_REG,
LAN8814_INTR_CTRL_REG_POLARITY |
LAN8814_INTR_CTRL_REG_INTR_ENABLE);
@@ -3739,35 +4340,44 @@ static void lan8814_ptp_init(struct phy_device *phydev)
{
struct kszphy_priv *priv = phydev->priv;
struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv;
- u32 temp;
if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) ||
!IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING))
return;
- lanphy_write_page_reg(phydev, 5, TSU_HARD_RESET, TSU_HARD_RESET_);
+ if (!lan8814_has_ptp(phydev))
+ return;
+
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ TSU_HARD_RESET, TSU_HARD_RESET_);
- temp = lanphy_read_page_reg(phydev, 5, PTP_TX_MOD);
- temp |= PTP_TX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_;
- lanphy_write_page_reg(phydev, 5, PTP_TX_MOD, temp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_PORT_REGS, PTP_TX_MOD,
+ PTP_TX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_,
+ PTP_TX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_);
- temp = lanphy_read_page_reg(phydev, 5, PTP_RX_MOD);
- temp |= PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_;
- lanphy_write_page_reg(phydev, 5, PTP_RX_MOD, temp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_PORT_REGS, PTP_RX_MOD,
+ PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_,
+ PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_);
- lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_CONFIG, 0);
- lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_CONFIG, 0);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_PARSE_CONFIG, 0);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_PARSE_CONFIG, 0);
/* Removing default registers configs related to L2 and IP */
- lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_L2_ADDR_EN, 0);
- lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_L2_ADDR_EN, 0);
- lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_IP_ADDR_EN, 0);
- lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_IP_ADDR_EN, 0);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_PARSE_L2_ADDR_EN, 0);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_PARSE_L2_ADDR_EN, 0);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_PARSE_IP_ADDR_EN, 0);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_PARSE_IP_ADDR_EN, 0);
/* Disable checking for minorVersionPTP field */
- lanphy_write_page_reg(phydev, 5, PTP_RX_VERSION,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS, PTP_RX_VERSION,
PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0));
- lanphy_write_page_reg(phydev, 5, PTP_TX_VERSION,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS, PTP_TX_VERSION,
PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0));
skb_queue_head_init(&ptp_priv->tx_queue);
@@ -3779,7 +4389,7 @@ static void lan8814_ptp_init(struct phy_device *phydev)
ptp_priv->mii_ts.rxtstamp = lan8814_rxtstamp;
ptp_priv->mii_ts.txtstamp = lan8814_txtstamp;
- ptp_priv->mii_ts.hwtstamp = lan8814_hwtstamp;
+ ptp_priv->mii_ts.hwtstamp_set = lan8814_hwtstamp_set;
ptp_priv->mii_ts.ts_info = lan8814_ts_info;
phydev->mii_ts = &ptp_priv->mii_ts;
@@ -3788,26 +4398,29 @@ static void lan8814_ptp_init(struct phy_device *phydev)
phydev->default_timestamp = true;
}
-static int lan8814_ptp_probe_once(struct phy_device *phydev)
+static int __lan8814_ptp_probe_once(struct phy_device *phydev, char *pin_name,
+ int gpios)
{
- struct lan8814_shared_priv *shared = phydev->shared->priv;
+ struct lan8814_shared_priv *shared = phy_package_get_priv(phydev);
+
+ shared->phydev = phydev;
/* Initialise shared lock for clock*/
mutex_init(&shared->shared_lock);
shared->pin_config = devm_kmalloc_array(&phydev->mdio.dev,
- LAN8814_PTP_GPIO_NUM,
+ gpios,
sizeof(*shared->pin_config),
GFP_KERNEL);
if (!shared->pin_config)
return -ENOMEM;
- for (int i = 0; i < LAN8814_PTP_GPIO_NUM; i++) {
+ for (int i = 0; i < gpios; i++) {
struct ptp_pin_desc *ptp_pin = &shared->pin_config[i];
memset(ptp_pin, 0, sizeof(*ptp_pin));
snprintf(ptp_pin->name,
- sizeof(ptp_pin->name), "lan8814_ptp_pin_%02d", i);
+ sizeof(ptp_pin->name), "%s_%02d", pin_name, i);
ptp_pin->index = i;
ptp_pin->func = PTP_PF_NONE;
}
@@ -3817,8 +4430,12 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev)
shared->ptp_clock_info.max_adj = 31249999;
shared->ptp_clock_info.n_alarm = 0;
shared->ptp_clock_info.n_ext_ts = LAN8814_PTP_EXTTS_NUM;
- shared->ptp_clock_info.n_pins = LAN8814_PTP_GPIO_NUM;
+ shared->ptp_clock_info.n_pins = gpios;
shared->ptp_clock_info.pps = 0;
+ shared->ptp_clock_info.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
+ shared->ptp_clock_info.supported_perout_flags = PTP_PEROUT_DUTY_CYCLE;
shared->ptp_clock_info.pin_config = shared->pin_config;
shared->ptp_clock_info.n_per_out = LAN8814_PTP_PEROUT_NUM;
shared->ptp_clock_info.adjfine = lan8814_ptpci_adjfine;
@@ -3832,8 +4449,8 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev)
shared->ptp_clock = ptp_clock_register(&shared->ptp_clock_info,
&phydev->mdio.dev);
if (IS_ERR(shared->ptp_clock)) {
- phydev_err(phydev, "ptp_clock_register failed %lu\n",
- PTR_ERR(shared->ptp_clock));
+ phydev_err(phydev, "ptp_clock_register failed %pe\n",
+ shared->ptp_clock);
return -EINVAL;
}
@@ -3843,55 +4460,72 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev)
phydev_dbg(phydev, "successfully registered ptp clock\n");
- shared->phydev = phydev;
-
/* The EP.4 is shared between all the PHYs in the package and also it
* can be accessed by any of the PHYs
*/
- lanphy_write_page_reg(phydev, 4, LTC_HARD_RESET, LTC_HARD_RESET_);
- lanphy_write_page_reg(phydev, 4, PTP_OPERATING_MODE,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LTC_HARD_RESET, LTC_HARD_RESET_);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_OPERATING_MODE,
PTP_OPERATING_MODE_STANDALONE_);
/* Enable ptp to run LTC clock for ptp and gpio 1PPS operation */
- lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_ENABLE_);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_ENABLE_);
return 0;
}
+static int lan8814_ptp_probe_once(struct phy_device *phydev)
+{
+ if (!lan8814_has_ptp(phydev))
+ return 0;
+
+ return __lan8814_ptp_probe_once(phydev, "lan8814_ptp_pin",
+ LAN8814_PTP_GPIO_NUM);
+}
+
static void lan8814_setup_led(struct phy_device *phydev, int val)
{
int temp;
- temp = lanphy_read_page_reg(phydev, 5, LAN8814_LED_CTRL_1);
+ temp = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ LAN8814_LED_CTRL_1);
if (val)
temp |= LAN8814_LED_CTRL_1_KSZ9031_LED_MODE_;
else
temp &= ~LAN8814_LED_CTRL_1_KSZ9031_LED_MODE_;
- lanphy_write_page_reg(phydev, 5, LAN8814_LED_CTRL_1, temp);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ LAN8814_LED_CTRL_1, temp);
}
static int lan8814_config_init(struct phy_device *phydev)
{
struct kszphy_priv *lan8814 = phydev->priv;
- int val;
+ int ret;
- /* Reset the PHY */
- val = lanphy_read_page_reg(phydev, 4, LAN8814_QSGMII_SOFT_RESET);
- val |= LAN8814_QSGMII_SOFT_RESET_BIT;
- lanphy_write_page_reg(phydev, 4, LAN8814_QSGMII_SOFT_RESET, val);
+ /* Based on the interface type select how the advertise ability is
+ * encoded, to set as SGMII or as USGMII.
+ */
+ if (phydev->interface == PHY_INTERFACE_MODE_QSGMII)
+ ret = lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_QSGMII_TX_CONFIG,
+ LAN8814_QSGMII_TX_CONFIG_QSGMII,
+ LAN8814_QSGMII_TX_CONFIG_QSGMII);
+ else
+ ret = lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_QSGMII_TX_CONFIG,
+ LAN8814_QSGMII_TX_CONFIG_QSGMII,
+ 0);
- /* Disable ANEG with QSGMII PCS Host side */
- val = lanphy_read_page_reg(phydev, 5, LAN8814_QSGMII_PCS1G_ANEG_CONFIG);
- val &= ~LAN8814_QSGMII_PCS1G_ANEG_CONFIG_ANEG_ENA;
- lanphy_write_page_reg(phydev, 5, LAN8814_QSGMII_PCS1G_ANEG_CONFIG, val);
+ if (ret < 0)
+ return ret;
/* MDI-X setting for swap A,B transmit */
- val = lanphy_read_page_reg(phydev, 2, LAN8814_ALIGN_SWAP);
- val &= ~LAN8814_ALIGN_TX_A_B_SWAP_MASK;
- val |= LAN8814_ALIGN_TX_A_B_SWAP;
- lanphy_write_page_reg(phydev, 2, LAN8814_ALIGN_SWAP, val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_PCS_DIGITAL, LAN8814_ALIGN_SWAP,
+ LAN8814_ALIGN_TX_A_B_SWAP_MASK,
+ LAN8814_ALIGN_TX_A_B_SWAP);
if (lan8814->led_mode >= 0)
lan8814_setup_led(phydev, lan8814->led_mode);
@@ -3922,29 +4556,24 @@ static int lan8814_release_coma_mode(struct phy_device *phydev)
static void lan8814_clear_2psp_bit(struct phy_device *phydev)
{
- u16 val;
-
/* It was noticed that when traffic is passing through the PHY and the
- * cable is removed then the LED was still one even though there is no
+ * cable is removed then the LED was still on even though there is no
* link
*/
- val = lanphy_read_page_reg(phydev, 2, LAN8814_EEE_STATE);
- val &= ~LAN8814_EEE_STATE_MASK2P5P;
- lanphy_write_page_reg(phydev, 2, LAN8814_EEE_STATE, val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_PCS_DIGITAL, LAN8814_EEE_STATE,
+ LAN8814_EEE_STATE_MASK2P5P,
+ 0);
}
static void lan8814_update_meas_time(struct phy_device *phydev)
{
- u16 val;
-
/* By setting the measure time to a value of 0xb this will allow cables
* longer than 100m to be used. This configuration can be used
* regardless of the mode of operation of the PHY
*/
- val = lanphy_read_page_reg(phydev, 1, LAN8814_PD_CONTROLS);
- val &= ~LAN8814_PD_CONTROLS_PD_MEAS_TIME_MASK;
- val |= LAN8814_PD_CONTROLS_PD_MEAS_TIME_VAL;
- lanphy_write_page_reg(phydev, 1, LAN8814_PD_CONTROLS, val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_AFE_PMA, LAN8814_PD_CONTROLS,
+ LAN8814_PD_CONTROLS_PD_MEAS_TIME_MASK,
+ LAN8814_PD_CONTROLS_PD_MEAS_TIME_VAL);
}
static int lan8814_probe(struct phy_device *phydev)
@@ -3967,11 +4596,29 @@ static int lan8814_probe(struct phy_device *phydev)
/* Strap-in value for PHY address, below register read gives starting
* phy address value
*/
- addr = lanphy_read_page_reg(phydev, 4, 0) & 0x1F;
+ addr = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, 0) & 0x1F;
devm_phy_package_join(&phydev->mdio.dev, phydev,
addr, sizeof(struct lan8814_shared_priv));
+ /* There are lan8814 SKUs that don't support PTP. Make sure that for
+ * those skus no PTP device is created. Here we check if the SKU
+ * supports PTP.
+ */
+ err = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_SKUS);
+ if (err < 0)
+ return err;
+
+ priv->is_ptp_available = err == LAN8814_REV_LAN8814 ||
+ err == LAN8814_REV_LAN8818;
+
if (phy_package_init_once(phydev)) {
+ /* Reset the PHY */
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_QSGMII_SOFT_RESET,
+ LAN8814_QSGMII_SOFT_RESET_BIT,
+ LAN8814_QSGMII_SOFT_RESET_BIT);
+
err = lan8814_release_coma_mode(phydev);
if (err)
return err;
@@ -4068,7 +4715,7 @@ static int lan8841_config_init(struct phy_device *phydev)
phy_write_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
LAN8841_PTP_TX_VERSION, 0xff00);
- /* 100BT Clause 40 improvenent errata */
+ /* 100BT Clause 40 improvement errata */
phy_write_mmd(phydev, LAN8841_MMD_ANALOG_REG,
LAN8841_ANALOG_CONTROL_1,
LAN8841_ANALOG_CONTROL_1_PLL_TRIM(0x2));
@@ -4395,9 +5042,9 @@ static void lan8841_ptp_enable_processing(struct kszphy_ptp_priv *ptp_priv,
#define LAN8841_PTP_TX_TIMESTAMP_EN 443
#define LAN8841_PTP_TX_MOD 445
-static int lan8841_hwtstamp(struct mii_timestamper *mii_ts,
- struct kernel_hwtstamp_config *config,
- struct netlink_ext_ack *extack)
+static int lan8841_hwtstamp_set(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct kszphy_ptp_priv *ptp_priv = container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
struct phy_device *phydev = ptp_priv->phydev;
@@ -4970,9 +5617,6 @@ static int lan8841_ptp_perout(struct ptp_clock_info *ptp,
int pin;
int ret;
- if (rq->perout.flags & ~PTP_PEROUT_DUTY_CYCLE)
- return -EOPNOTSUPP;
-
pin = ptp_find_pin(ptp_priv->ptp_clock, PTP_PF_PEROUT, rq->perout.index);
if (pin == -1 || pin >= LAN8841_PTP_GPIO_NUM)
return -EINVAL;
@@ -5091,7 +5735,7 @@ static int lan8841_ptp_extts_on(struct kszphy_ptp_priv *ptp_priv, int pin,
u16 tmp = 0;
int ret;
- /* Set GPIO to be intput */
+ /* Set GPIO to be input */
ret = phy_set_bits_mmd(phydev, 2, LAN8841_GPIO_EN, BIT(pin));
if (ret)
return ret;
@@ -5216,6 +5860,7 @@ static struct ptp_clock_info lan8841_ptp_clock_info = {
.n_per_out = LAN8841_PTP_GPIO_NUM,
.n_ext_ts = LAN8841_PTP_GPIO_NUM,
.n_pins = LAN8841_PTP_GPIO_NUM,
+ .supported_perout_flags = PTP_PEROUT_DUTY_CYCLE,
};
#define LAN8841_OPERATION_MODE_STRAP_LOW_REGISTER 3
@@ -5263,8 +5908,8 @@ static int lan8841_probe(struct phy_device *phydev)
ptp_priv->ptp_clock = ptp_clock_register(&ptp_priv->ptp_clock_info,
&phydev->mdio.dev);
if (IS_ERR(ptp_priv->ptp_clock)) {
- phydev_err(phydev, "ptp_clock_register failed: %lu\n",
- PTR_ERR(ptp_priv->ptp_clock));
+ phydev_err(phydev, "ptp_clock_register failed: %pe\n",
+ ptp_priv->ptp_clock);
return -EINVAL;
}
@@ -5279,7 +5924,7 @@ static int lan8841_probe(struct phy_device *phydev)
ptp_priv->mii_ts.rxtstamp = lan8841_rxtstamp;
ptp_priv->mii_ts.txtstamp = lan8814_txtstamp;
- ptp_priv->mii_ts.hwtstamp = lan8841_hwtstamp;
+ ptp_priv->mii_ts.hwtstamp_set = lan8841_hwtstamp_set;
ptp_priv->mii_ts.ts_info = lan8841_ts_info;
phydev->mii_ts = &ptp_priv->mii_ts;
@@ -5290,6 +5935,21 @@ static int lan8841_probe(struct phy_device *phydev)
return 0;
}
+static int lan8804_resume(struct phy_device *phydev)
+{
+ return kszphy_resume(phydev);
+}
+
+static int lan8804_suspend(struct phy_device *phydev)
+{
+ return kszphy_generic_suspend(phydev);
+}
+
+static int lan8841_resume(struct phy_device *phydev)
+{
+ return kszphy_generic_resume(phydev);
+}
+
static int lan8841_suspend(struct phy_device *phydev)
{
struct kszphy_priv *priv = phydev->priv;
@@ -5298,13 +5958,521 @@ static int lan8841_suspend(struct phy_device *phydev)
if (ptp_priv->ptp_clock)
ptp_cancel_worker_sync(ptp_priv->ptp_clock);
- return genphy_suspend(phydev);
+ return kszphy_generic_suspend(phydev);
+}
+
+static int ksz9131_resume(struct phy_device *phydev)
+{
+ if (phydev->suspended && phy_interface_is_rgmii(phydev))
+ ksz9131_config_rgmii_delay(phydev);
+
+ return kszphy_resume(phydev);
+}
+
+#define LAN8842_PTP_GPIO_NUM 16
+
+static int lan8842_ptp_probe_once(struct phy_device *phydev)
+{
+ return __lan8814_ptp_probe_once(phydev, "lan8842_ptp_pin",
+ LAN8842_PTP_GPIO_NUM);
+}
+
+#define LAN8842_STRAP_REG 0 /* 0x0 */
+#define LAN8842_STRAP_REG_PHYADDR_MASK GENMASK(4, 0)
+#define LAN8842_SKU_REG 11 /* 0x0b */
+#define LAN8842_SELF_TEST 14 /* 0x0e */
+#define LAN8842_SELF_TEST_RX_CNT_ENA BIT(8)
+#define LAN8842_SELF_TEST_TX_CNT_ENA BIT(4)
+
+static int lan8842_probe(struct phy_device *phydev)
+{
+ struct lan8842_priv *priv;
+ int addr;
+ int ret;
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ /* Similar to lan8814 this PHY has a pin which needs to be pulled down
+ * to enable to pass any traffic through it. Therefore use the same
+ * function as lan8814
+ */
+ ret = lan8814_release_coma_mode(phydev);
+ if (ret)
+ return ret;
+
+ /* Enable to count the RX and TX packets */
+ ret = lanphy_write_page_reg(phydev, LAN8814_PAGE_PCS_DIGITAL,
+ LAN8842_SELF_TEST,
+ LAN8842_SELF_TEST_RX_CNT_ENA |
+ LAN8842_SELF_TEST_TX_CNT_ENA);
+ if (ret < 0)
+ return ret;
+
+ /* Revision lan8832 doesn't have support for PTP, therefore don't add
+ * any PTP clocks
+ */
+ ret = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8842_SKU_REG);
+ if (ret < 0)
+ return ret;
+
+ priv->rev = ret;
+ if (priv->rev == LAN8842_REV_8832)
+ return 0;
+
+ /* As the lan8814 and lan8842 has the same IP for the PTP block, the
+ * only difference is the number of the GPIOs, then make sure that the
+ * lan8842 initialized also the shared data pointer as this is used in
+ * all the PTP functions for lan8814. The lan8842 doesn't have multiple
+ * PHYs in the same package.
+ */
+ addr = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8842_STRAP_REG);
+ if (addr < 0)
+ return addr;
+ addr &= LAN8842_STRAP_REG_PHYADDR_MASK;
+
+ ret = devm_phy_package_join(&phydev->mdio.dev, phydev, addr,
+ sizeof(struct lan8814_shared_priv));
+ if (ret)
+ return ret;
+
+ if (phy_package_init_once(phydev)) {
+ ret = lan8842_ptp_probe_once(phydev);
+ if (ret)
+ return ret;
+ }
+
+ lan8814_ptp_init(phydev);
+
+ return 0;
+}
+
+#define LAN8814_POWER_MGMT_MODE_3_ANEG_MDI 0x13
+#define LAN8814_POWER_MGMT_MODE_4_ANEG_MDIX 0x14
+#define LAN8814_POWER_MGMT_MODE_5_10BT_MDI 0x15
+#define LAN8814_POWER_MGMT_MODE_6_10BT_MDIX 0x16
+#define LAN8814_POWER_MGMT_MODE_7_100BT_TRAIN 0x17
+#define LAN8814_POWER_MGMT_MODE_8_100BT_MDI 0x18
+#define LAN8814_POWER_MGMT_MODE_9_100BT_EEE_MDI_TX 0x19
+#define LAN8814_POWER_MGMT_MODE_10_100BT_EEE_MDI_RX 0x1a
+#define LAN8814_POWER_MGMT_MODE_11_100BT_MDIX 0x1b
+#define LAN8814_POWER_MGMT_MODE_12_100BT_EEE_MDIX_TX 0x1c
+#define LAN8814_POWER_MGMT_MODE_13_100BT_EEE_MDIX_RX 0x1d
+#define LAN8814_POWER_MGMT_MODE_14_100BTX_EEE_TX_RX 0x1e
+
+#define LAN8814_POWER_MGMT_DLLPD_D BIT(0)
+#define LAN8814_POWER_MGMT_ADCPD_D BIT(1)
+#define LAN8814_POWER_MGMT_PGAPD_D BIT(2)
+#define LAN8814_POWER_MGMT_TXPD_D BIT(3)
+#define LAN8814_POWER_MGMT_DLLPD_C BIT(4)
+#define LAN8814_POWER_MGMT_ADCPD_C BIT(5)
+#define LAN8814_POWER_MGMT_PGAPD_C BIT(6)
+#define LAN8814_POWER_MGMT_TXPD_C BIT(7)
+#define LAN8814_POWER_MGMT_DLLPD_B BIT(8)
+#define LAN8814_POWER_MGMT_ADCPD_B BIT(9)
+#define LAN8814_POWER_MGMT_PGAPD_B BIT(10)
+#define LAN8814_POWER_MGMT_TXPD_B BIT(11)
+#define LAN8814_POWER_MGMT_DLLPD_A BIT(12)
+#define LAN8814_POWER_MGMT_ADCPD_A BIT(13)
+#define LAN8814_POWER_MGMT_PGAPD_A BIT(14)
+#define LAN8814_POWER_MGMT_TXPD_A BIT(15)
+
+#define LAN8814_POWER_MGMT_C_D (LAN8814_POWER_MGMT_DLLPD_D | \
+ LAN8814_POWER_MGMT_ADCPD_D | \
+ LAN8814_POWER_MGMT_PGAPD_D | \
+ LAN8814_POWER_MGMT_DLLPD_C | \
+ LAN8814_POWER_MGMT_ADCPD_C | \
+ LAN8814_POWER_MGMT_PGAPD_C)
+
+#define LAN8814_POWER_MGMT_B_C_D (LAN8814_POWER_MGMT_C_D | \
+ LAN8814_POWER_MGMT_DLLPD_B | \
+ LAN8814_POWER_MGMT_ADCPD_B | \
+ LAN8814_POWER_MGMT_PGAPD_B)
+
+#define LAN8814_POWER_MGMT_VAL1 (LAN8814_POWER_MGMT_C_D | \
+ LAN8814_POWER_MGMT_ADCPD_B | \
+ LAN8814_POWER_MGMT_PGAPD_B | \
+ LAN8814_POWER_MGMT_ADCPD_A | \
+ LAN8814_POWER_MGMT_PGAPD_A)
+
+#define LAN8814_POWER_MGMT_VAL2 LAN8814_POWER_MGMT_C_D
+
+#define LAN8814_POWER_MGMT_VAL3 (LAN8814_POWER_MGMT_C_D | \
+ LAN8814_POWER_MGMT_DLLPD_B | \
+ LAN8814_POWER_MGMT_ADCPD_B | \
+ LAN8814_POWER_MGMT_PGAPD_A)
+
+#define LAN8814_POWER_MGMT_VAL4 (LAN8814_POWER_MGMT_B_C_D | \
+ LAN8814_POWER_MGMT_ADCPD_A | \
+ LAN8814_POWER_MGMT_PGAPD_A)
+
+#define LAN8814_POWER_MGMT_VAL5 LAN8814_POWER_MGMT_B_C_D
+
+#define LAN8814_EEE_WAKE_TX_TIMER 0x0e
+#define LAN8814_EEE_WAKE_TX_TIMER_MAX_VAL 0x1f
+
+static const struct lanphy_reg_data short_center_tap_errata[] = {
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_3_ANEG_MDI,
+ LAN8814_POWER_MGMT_VAL1 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_4_ANEG_MDIX,
+ LAN8814_POWER_MGMT_VAL1 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_5_10BT_MDI,
+ LAN8814_POWER_MGMT_VAL1 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_6_10BT_MDIX,
+ LAN8814_POWER_MGMT_VAL1 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_7_100BT_TRAIN,
+ LAN8814_POWER_MGMT_VAL2 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_8_100BT_MDI,
+ LAN8814_POWER_MGMT_VAL3 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_9_100BT_EEE_MDI_TX,
+ LAN8814_POWER_MGMT_VAL3 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_10_100BT_EEE_MDI_RX,
+ LAN8814_POWER_MGMT_VAL4 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_11_100BT_MDIX,
+ LAN8814_POWER_MGMT_VAL5 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_12_100BT_EEE_MDIX_TX,
+ LAN8814_POWER_MGMT_VAL5 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_13_100BT_EEE_MDIX_RX,
+ LAN8814_POWER_MGMT_VAL4 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_14_100BTX_EEE_TX_RX,
+ LAN8814_POWER_MGMT_VAL4 },
+};
+
+static const struct lanphy_reg_data waketx_timer_errata[] = {
+ { LAN8814_PAGE_EEE,
+ LAN8814_EEE_WAKE_TX_TIMER,
+ LAN8814_EEE_WAKE_TX_TIMER_MAX_VAL },
+};
+
+static int lanphy_write_reg_data(struct phy_device *phydev,
+ const struct lanphy_reg_data *data,
+ size_t num)
+{
+ int ret = 0;
+
+ while (num--) {
+ ret = lanphy_write_page_reg(phydev, data->page, data->addr,
+ data->val);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int lan8842_erratas(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = lanphy_write_reg_data(phydev, short_center_tap_errata,
+ ARRAY_SIZE(short_center_tap_errata));
+ if (ret)
+ return ret;
+
+ return lanphy_write_reg_data(phydev, waketx_timer_errata,
+ ARRAY_SIZE(waketx_timer_errata));
+}
+
+static int lan8842_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Reset the PHY */
+ ret = lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_QSGMII_SOFT_RESET,
+ LAN8814_QSGMII_SOFT_RESET_BIT,
+ LAN8814_QSGMII_SOFT_RESET_BIT);
+ if (ret < 0)
+ return ret;
+
+ /* Apply the erratas for this device */
+ ret = lan8842_erratas(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Even if the GPIOs are set to control the LEDs the behaviour of the
+ * LEDs is wrong, they are not blinking when there is traffic.
+ * To fix this it is required to set extended LED mode
+ */
+ ret = lanphy_modify_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ LAN8814_LED_CTRL_1,
+ LAN8814_LED_CTRL_1_KSZ9031_LED_MODE_, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = lanphy_modify_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ LAN8814_LED_CTRL_2,
+ LAN8814_LED_CTRL_2_LED1_COM_DIS,
+ LAN8814_LED_CTRL_2_LED1_COM_DIS);
+ if (ret < 0)
+ return ret;
+
+ /* To allow the PHY to control the LEDs the GPIOs of the PHY should have
+ * a function mode and not the GPIO. Apparently by default the value is
+ * GPIO and not function even though the datasheet it says that it is
+ * function. Therefore set this value.
+ */
+ return lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_EN2, 0);
+}
+
+#define LAN8842_INTR_CTRL_REG 52 /* 0x34 */
+
+static int lan8842_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8842_INTR_CTRL_REG,
+ LAN8814_INTR_CTRL_REG_INTR_ENABLE);
+
+ /* enable / disable interrupts */
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = lan8814_ack_interrupt(phydev);
+ if (err)
+ return err;
+
+ err = phy_write(phydev, LAN8814_INTC,
+ LAN8814_INT_LINK | LAN8814_INT_FLF);
+ } else {
+ err = phy_write(phydev, LAN8814_INTC, 0);
+ if (err)
+ return err;
+
+ err = lan8814_ack_interrupt(phydev);
+ }
+
+ return err;
+}
+
+static unsigned int lan8842_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ /* Inband configuration can be enabled or disabled using the registers
+ * PCS1G_ANEG_CONFIG.
+ */
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+}
+
+static int lan8842_config_inband(struct phy_device *phydev, unsigned int modes)
+{
+ bool enable;
+
+ if (modes == LINK_INBAND_DISABLE)
+ enable = false;
+ else
+ enable = true;
+
+ /* Disable or enable in-band autoneg with PCS Host side
+ * It has the same address as lan8814
+ */
+ return lanphy_modify_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ LAN8814_QSGMII_PCS1G_ANEG_CONFIG,
+ LAN8814_QSGMII_PCS1G_ANEG_CONFIG_ANEG_ENA,
+ enable ? LAN8814_QSGMII_PCS1G_ANEG_CONFIG_ANEG_ENA : 0);
+}
+
+static void lan8842_handle_ptp_interrupt(struct phy_device *phydev, u16 status)
+{
+ struct kszphy_ptp_priv *ptp_priv;
+ struct lan8842_priv *priv;
+
+ priv = phydev->priv;
+ ptp_priv = &priv->ptp_priv;
+
+ if (status & PTP_TSU_INT_STS_PTP_TX_TS_EN_)
+ lan8814_get_tx_ts(ptp_priv);
+
+ if (status & PTP_TSU_INT_STS_PTP_RX_TS_EN_)
+ lan8814_get_rx_ts(ptp_priv);
+
+ if (status & PTP_TSU_INT_STS_PTP_TX_TS_OVRFL_INT_) {
+ lan8814_flush_fifo(phydev, true);
+ skb_queue_purge(&ptp_priv->tx_queue);
+ }
+
+ if (status & PTP_TSU_INT_STS_PTP_RX_TS_OVRFL_INT_) {
+ lan8814_flush_fifo(phydev, false);
+ skb_queue_purge(&ptp_priv->rx_queue);
+ }
+}
+
+static irqreturn_t lan8842_handle_interrupt(struct phy_device *phydev)
+{
+ struct lan8842_priv *priv = phydev->priv;
+ int ret = IRQ_NONE;
+ int irq_status;
+
+ irq_status = phy_read(phydev, LAN8814_INTS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (irq_status & (LAN8814_INT_LINK | LAN8814_INT_FLF)) {
+ phy_trigger_machine(phydev);
+ ret = IRQ_HANDLED;
+ }
+
+ /* Phy revision lan8832 doesn't have support for PTP therefore there is
+ * not need to check the PTP and GPIO interrupts
+ */
+ if (priv->rev == LAN8842_REV_8832)
+ goto out;
+
+ while (true) {
+ irq_status = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TSU_INT_STS);
+ if (!irq_status)
+ break;
+
+ lan8842_handle_ptp_interrupt(phydev, irq_status);
+ ret = IRQ_HANDLED;
+ }
+
+ if (!lan8814_handle_gpio_interrupt(phydev, irq_status))
+ ret = IRQ_HANDLED;
+
+out:
+ return ret;
+}
+
+static u64 lan8842_get_stat(struct phy_device *phydev, int count, int *regs)
+{
+ u64 ret = 0;
+ int val;
+
+ for (int j = 0; j < count; ++j) {
+ val = lanphy_read_page_reg(phydev, LAN8814_PAGE_PCS_DIGITAL,
+ regs[j]);
+ if (val < 0)
+ return U64_MAX;
+
+ ret <<= 16;
+ ret += val;
+ }
+ return ret;
+}
+
+static int lan8842_update_stats(struct phy_device *phydev)
+{
+ struct lan8842_priv *priv = phydev->priv;
+ int rx_packets_regs[] = {88, 61, 60};
+ int rx_errors_regs[] = {63, 62};
+ int tx_packets_regs[] = {89, 85, 84};
+ int tx_errors_regs[] = {87, 86};
+
+ priv->phy_stats.rx_packets = lan8842_get_stat(phydev,
+ ARRAY_SIZE(rx_packets_regs),
+ rx_packets_regs);
+ priv->phy_stats.rx_errors = lan8842_get_stat(phydev,
+ ARRAY_SIZE(rx_errors_regs),
+ rx_errors_regs);
+ priv->phy_stats.tx_packets = lan8842_get_stat(phydev,
+ ARRAY_SIZE(tx_packets_regs),
+ tx_packets_regs);
+ priv->phy_stats.tx_errors = lan8842_get_stat(phydev,
+ ARRAY_SIZE(tx_errors_regs),
+ tx_errors_regs);
+
+ return 0;
+}
+
+#define LAN8842_FLF 15 /* 0x0e */
+#define LAN8842_FLF_ENA BIT(1)
+#define LAN8842_FLF_ENA_LINK_DOWN BIT(0)
+
+static int lan8842_get_fast_down(struct phy_device *phydev, u8 *msecs)
+{
+ int ret;
+
+ ret = lanphy_read_page_reg(phydev, LAN8814_PAGE_PCS, LAN8842_FLF);
+ if (ret < 0)
+ return ret;
+
+ if (ret & LAN8842_FLF_ENA)
+ *msecs = ETHTOOL_PHY_FAST_LINK_DOWN_ON;
+ else
+ *msecs = ETHTOOL_PHY_FAST_LINK_DOWN_OFF;
+
+ return 0;
+}
+
+static int lan8842_set_fast_down(struct phy_device *phydev, const u8 *msecs)
+{
+ u16 flf;
+
+ switch (*msecs) {
+ case ETHTOOL_PHY_FAST_LINK_DOWN_OFF:
+ flf = 0;
+ break;
+ case ETHTOOL_PHY_FAST_LINK_DOWN_ON:
+ flf = LAN8842_FLF_ENA | LAN8842_FLF_ENA_LINK_DOWN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return lanphy_modify_page_reg(phydev, LAN8814_PAGE_PCS,
+ LAN8842_FLF,
+ LAN8842_FLF_ENA |
+ LAN8842_FLF_ENA_LINK_DOWN, flf);
+}
+
+static int lan8842_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_FAST_LINK_DOWN:
+ return lan8842_get_fast_down(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int lan8842_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_FAST_LINK_DOWN:
+ return lan8842_set_fast_down(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void lan8842_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *eth_stats,
+ struct ethtool_phy_stats *stats)
+{
+ struct lan8842_priv *priv = phydev->priv;
+
+ stats->rx_packets = priv->phy_stats.rx_packets;
+ stats->rx_errors = priv->phy_stats.rx_errors;
+ stats->tx_packets = priv->phy_stats.tx_packets;
+ stats->tx_errors = priv->phy_stats.tx_errors;
}
static struct phy_driver ksphy_driver[] = {
{
- .phy_id = PHY_ID_KS8737,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_KS8737),
.name = "Micrel KS8737",
/* PHY_BASIC_FEATURES */
.driver_data = &ks8737_type,
@@ -5345,8 +6513,7 @@ static struct phy_driver ksphy_driver[] = {
.suspend = kszphy_suspend,
.resume = kszphy_resume,
}, {
- .phy_id = PHY_ID_KSZ8041,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_KSZ8041),
.name = "Micrel KSZ8041",
/* PHY_BASIC_FEATURES */
.driver_data = &ksz8041_type,
@@ -5358,12 +6525,10 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- /* No suspend/resume callbacks because of errata DS80000700A,
- * receiver error following software power down.
- */
+ .suspend = ksz8041_suspend,
+ .resume = ksz8041_resume,
}, {
- .phy_id = PHY_ID_KSZ8041RNLI,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_KSZ8041RNLI),
.name = "Micrel KSZ8041RNLI",
/* PHY_BASIC_FEATURES */
.driver_data = &ksz8041_type,
@@ -5406,9 +6571,8 @@ static struct phy_driver ksphy_driver[] = {
.suspend = kszphy_suspend,
.resume = kszphy_resume,
}, {
- .phy_id = PHY_ID_KSZ8081,
+ PHY_ID_MATCH_MODEL(PHY_ID_KSZ8081),
.name = "Micrel KSZ8081 or KSZ8091",
- .phy_id_mask = MICREL_PHY_ID_MASK,
.flags = PHY_POLL_CABLE_TEST,
/* PHY_BASIC_FEATURES */
.driver_data = &ksz8081_type,
@@ -5427,16 +6591,15 @@ static struct phy_driver ksphy_driver[] = {
.cable_test_start = ksz886x_cable_test_start,
.cable_test_get_status = ksz886x_cable_test_get_status,
}, {
- .phy_id = PHY_ID_KSZ8061,
+ PHY_ID_MATCH_MODEL(PHY_ID_KSZ8061),
.name = "Micrel KSZ8061",
- .phy_id_mask = MICREL_PHY_ID_MASK,
/* PHY_BASIC_FEATURES */
.probe = kszphy_probe,
.config_init = ksz8061_config_init,
.soft_reset = genphy_soft_reset,
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
- .suspend = kszphy_suspend,
+ .suspend = ksz8061_suspend,
.resume = ksz8061_resume,
}, {
.phy_id = PHY_ID_KSZ9021,
@@ -5457,8 +6620,7 @@ static struct phy_driver ksphy_driver[] = {
.read_mmd = genphy_read_mmd_unsupported,
.write_mmd = genphy_write_mmd_unsupported,
}, {
- .phy_id = PHY_ID_KSZ9031,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_KSZ9031),
.name = "Micrel KSZ9031 Gigabit PHY",
.flags = PHY_POLL_CABLE_TEST,
.driver_data = &ksz9021_type,
@@ -5476,9 +6638,9 @@ static struct phy_driver ksphy_driver[] = {
.resume = kszphy_resume,
.cable_test_start = ksz9x31_cable_test_start,
.cable_test_get_status = ksz9x31_cable_test_get_status,
+ .set_loopback = ksz9031_set_loopback,
}, {
- .phy_id = PHY_ID_LAN8814,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_LAN8814),
.name = "Microchip INDY Gigabit Quad PHY",
.flags = PHY_POLL_CABLE_TEST,
.config_init = lan8814_config_init,
@@ -5492,12 +6654,13 @@ static struct phy_driver ksphy_driver[] = {
.suspend = genphy_suspend,
.resume = kszphy_resume,
.config_intr = lan8814_config_intr,
+ .inband_caps = lan8842_inband_caps,
+ .config_inband = lan8842_config_inband,
.handle_interrupt = lan8814_handle_interrupt,
.cable_test_start = lan8814_cable_test_start,
.cable_test_get_status = ksz886x_cable_test_get_status,
}, {
- .phy_id = PHY_ID_LAN8804,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_LAN8804),
.name = "Microchip LAN966X Gigabit PHY",
.config_init = lan8804_config_init,
.driver_data = &ksz9021_type,
@@ -5507,13 +6670,12 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
- .resume = kszphy_resume,
+ .suspend = lan8804_suspend,
+ .resume = lan8804_resume,
.config_intr = lan8804_config_intr,
.handle_interrupt = lan8804_handle_interrupt,
}, {
- .phy_id = PHY_ID_LAN8841,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_LAN8841),
.name = "Microchip LAN8841 Gigabit PHY",
.flags = PHY_POLL_CABLE_TEST,
.driver_data = &lan8841_type,
@@ -5526,12 +6688,28 @@ static struct phy_driver ksphy_driver[] = {
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
.suspend = lan8841_suspend,
- .resume = genphy_resume,
+ .resume = lan8841_resume,
.cable_test_start = lan8814_cable_test_start,
.cable_test_get_status = ksz886x_cable_test_get_status,
}, {
- .phy_id = PHY_ID_KSZ9131,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_LAN8842),
+ .name = "Microchip LAN8842 Gigabit PHY",
+ .flags = PHY_POLL_CABLE_TEST,
+ .driver_data = &lan8814_type,
+ .probe = lan8842_probe,
+ .config_init = lan8842_config_init,
+ .config_intr = lan8842_config_intr,
+ .inband_caps = lan8842_inband_caps,
+ .config_inband = lan8842_config_inband,
+ .handle_interrupt = lan8842_handle_interrupt,
+ .get_phy_stats = lan8842_get_phy_stats,
+ .update_stats = lan8842_update_stats,
+ .get_tunable = lan8842_get_tunable,
+ .set_tunable = lan8842_set_tunable,
+ .cable_test_start = lan8814_cable_test_start,
+ .cable_test_get_status = ksz886x_cable_test_get_status,
+}, {
+ PHY_ID_MATCH_MODEL(PHY_ID_KSZ9131),
.name = "Microchip KSZ9131 Gigabit PHY",
/* PHY_GBIT_FEATURES */
.flags = PHY_POLL_CABLE_TEST,
@@ -5547,13 +6725,12 @@ static struct phy_driver ksphy_driver[] = {
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
.suspend = kszphy_suspend,
- .resume = kszphy_resume,
+ .resume = ksz9131_resume,
.cable_test_start = ksz9x31_cable_test_start,
.cable_test_get_status = ksz9x31_cable_test_get_status,
.get_features = ksz9477_get_features,
}, {
- .phy_id = PHY_ID_KSZ8873MLL,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_KSZ8873MLL),
.name = "Micrel KSZ8873MLL Switch",
/* PHY_BASIC_FEATURES */
.config_init = kszphy_config_init,
@@ -5562,8 +6739,7 @@ static struct phy_driver ksphy_driver[] = {
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
- .phy_id = PHY_ID_KSZ886X,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_KSZ886X),
.name = "Micrel KSZ8851 Ethernet MAC or KSZ886X Switch",
.driver_data = &ksz886x_type,
/* PHY_BASIC_FEATURES */
@@ -5583,16 +6759,25 @@ static struct phy_driver ksphy_driver[] = {
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
- .phy_id = PHY_ID_KSZ9477,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_KSZ9477),
.name = "Microchip KSZ9477",
+ .probe = kszphy_probe,
/* PHY_GBIT_FEATURES */
.config_init = ksz9477_config_init,
.config_intr = kszphy_config_intr,
+ .config_aneg = ksz9477_config_aneg,
+ .read_status = ksz9477_read_status,
.handle_interrupt = kszphy_handle_interrupt,
.suspend = genphy_suspend,
.resume = ksz9477_resume,
- .get_features = ksz9477_get_features,
+ .get_phy_stats = kszphy_get_phy_stats,
+ .update_stats = kszphy_update_stats,
+ .cable_test_start = ksz9x31_cable_test_start,
+ .cable_test_get_status = ksz9x31_cable_test_get_status,
+ .get_sqi = kszphy_get_sqi,
+ .get_sqi_max = kszphy_get_sqi_max,
+ .get_mse_capability = kszphy_get_mse_capability,
+ .get_mse_snapshot = kszphy_get_mse_snapshot,
} };
module_phy_driver(ksphy_driver);
@@ -5601,24 +6786,26 @@ MODULE_DESCRIPTION("Micrel PHY driver");
MODULE_AUTHOR("David J. Choi");
MODULE_LICENSE("GPL");
-static struct mdio_device_id __maybe_unused micrel_tbl[] = {
+static const struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ9021, 0x000ffffe },
- { PHY_ID_KSZ9031, MICREL_PHY_ID_MASK },
- { PHY_ID_KSZ9131, MICREL_PHY_ID_MASK },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ9031) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ9131) },
{ PHY_ID_KSZ8001, 0x00fffffc },
- { PHY_ID_KS8737, MICREL_PHY_ID_MASK },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KS8737) },
{ PHY_ID_KSZ8021, 0x00ffffff },
{ PHY_ID_KSZ8031, 0x00ffffff },
- { PHY_ID_KSZ8041, MICREL_PHY_ID_MASK },
- { PHY_ID_KSZ8051, MICREL_PHY_ID_MASK },
- { PHY_ID_KSZ8061, MICREL_PHY_ID_MASK },
- { PHY_ID_KSZ8081, MICREL_PHY_ID_MASK },
- { PHY_ID_KSZ8873MLL, MICREL_PHY_ID_MASK },
- { PHY_ID_KSZ886X, MICREL_PHY_ID_MASK },
- { PHY_ID_KSZ9477, MICREL_PHY_ID_MASK },
- { PHY_ID_LAN8814, MICREL_PHY_ID_MASK },
- { PHY_ID_LAN8804, MICREL_PHY_ID_MASK },
- { PHY_ID_LAN8841, MICREL_PHY_ID_MASK },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ8041) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ8041RNLI) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ8051) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ8061) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ8081) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ8873MLL) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ886X) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ9477) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_LAN8814) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_LAN8804) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_LAN8841) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_LAN8842) },
{ }
};
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index d3273bc0da4a..dc8634e7bcbe 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -37,47 +37,6 @@ static int lan88xx_write_page(struct phy_device *phydev, int page)
return __phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, page);
}
-static int lan88xx_phy_config_intr(struct phy_device *phydev)
-{
- int rc;
-
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
- /* unmask all source and clear them before enable */
- rc = phy_write(phydev, LAN88XX_INT_MASK, 0x7FFF);
- rc = phy_read(phydev, LAN88XX_INT_STS);
- rc = phy_write(phydev, LAN88XX_INT_MASK,
- LAN88XX_INT_MASK_MDINTPIN_EN_ |
- LAN88XX_INT_MASK_LINK_CHANGE_);
- } else {
- rc = phy_write(phydev, LAN88XX_INT_MASK, 0);
- if (rc)
- return rc;
-
- /* Ack interrupts after they have been disabled */
- rc = phy_read(phydev, LAN88XX_INT_STS);
- }
-
- return rc < 0 ? rc : 0;
-}
-
-static irqreturn_t lan88xx_handle_interrupt(struct phy_device *phydev)
-{
- int irq_status;
-
- irq_status = phy_read(phydev, LAN88XX_INT_STS);
- if (irq_status < 0) {
- phy_error(phydev);
- return IRQ_NONE;
- }
-
- if (!(irq_status & LAN88XX_INT_STS_LINK_CHANGE_))
- return IRQ_NONE;
-
- phy_trigger_machine(phydev);
-
- return IRQ_HANDLED;
-}
-
static int lan88xx_suspend(struct phy_device *phydev)
{
struct lan88xx_priv *priv = phydev->priv;
@@ -351,13 +310,29 @@ static int lan88xx_config_aneg(struct phy_device *phydev)
static void lan88xx_link_change_notify(struct phy_device *phydev)
{
int temp;
+ int ret;
+
+ /* Reset PHY to ensure MII_LPA provides up-to-date information. This
+ * issue is reproducible only after parallel detection, as described
+ * in IEEE 802.3-2022, Section 28.2.3.1 ("Parallel detection function"),
+ * where the link partner does not support auto-negotiation.
+ */
+ if (phydev->state == PHY_NOLINK) {
+ ret = phy_init_hw(phydev);
+ if (ret < 0)
+ goto link_change_notify_failed;
+
+ ret = _phy_start_aneg(phydev);
+ if (ret < 0)
+ goto link_change_notify_failed;
+ }
/* At forced 100 F/H mode, chip may fail to set mode correctly
* when cable is switched between long(~50+m) and short one.
* As workaround, set to 10 before setting to 100
* at forced 100 F/H mode.
*/
- if (!phydev->autoneg && phydev->speed == 100) {
+ if (phydev->state == PHY_NOLINK && !phydev->autoneg && phydev->speed == 100) {
/* disable phy interrupt */
temp = phy_read(phydev, LAN88XX_INT_MASK);
temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
@@ -377,6 +352,11 @@ static void lan88xx_link_change_notify(struct phy_device *phydev)
temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
phy_write(phydev, LAN88XX_INT_MASK, temp);
}
+
+ return;
+
+link_change_notify_failed:
+ phydev_err(phydev, "Link change process failed %pe\n", ERR_PTR(ret));
}
/**
@@ -494,6 +474,8 @@ static struct phy_driver microchip_phy_driver[] = {
/* This mask (0xfffffff2) is to differentiate from
* LAN8742 (phy_id 0x0007c130 and 0x0007c131)
* and allows future phy_id revisions.
+ * These PHYs are integrated in LAN7800 and LAN7850 USB/Ethernet
+ * controllers.
*/
.phy_id_mask = 0xfffffff2,
.name = "Microchip LAN88xx",
@@ -506,9 +488,11 @@ static struct phy_driver microchip_phy_driver[] = {
.config_init = lan88xx_config_init,
.config_aneg = lan88xx_config_aneg,
.link_change_notify = lan88xx_link_change_notify,
+ .soft_reset = genphy_soft_reset,
- .config_intr = lan88xx_phy_config_intr,
- .handle_interrupt = lan88xx_handle_interrupt,
+ /* Interrupt handling is broken, do not define related
+ * functions to force polling.
+ */
.suspend = lan88xx_suspend,
.resume = genphy_resume,
@@ -527,7 +511,7 @@ static struct phy_driver microchip_phy_driver[] = {
module_phy_driver(microchip_phy_driver);
-static struct mdio_device_id __maybe_unused microchip_tbl[] = {
+static const struct mdio_device_id __maybe_unused microchip_tbl[] = {
{ 0x0007c132, 0xfffffff2 },
{ PHY_ID_MATCH_MODEL(PHY_ID_LAN937X_TX) },
{ }
diff --git a/drivers/net/phy/microchip_rds_ptp.c b/drivers/net/phy/microchip_rds_ptp.c
new file mode 100644
index 000000000000..4c6326b0ceaf
--- /dev/null
+++ b/drivers/net/phy/microchip_rds_ptp.c
@@ -0,0 +1,1306 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2024 Microchip Technology
+
+#include "microchip_rds_ptp.h"
+
+static int mchp_rds_phy_read_mmd(struct mchp_rds_ptp_clock *clock,
+ u32 offset, enum mchp_rds_ptp_base base)
+{
+ struct phy_device *phydev = clock->phydev;
+ u32 addr;
+
+ addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) :
+ BASE_CLK(clock)));
+
+ return phy_read_mmd(phydev, PTP_MMD(clock), addr);
+}
+
+static int mchp_rds_phy_write_mmd(struct mchp_rds_ptp_clock *clock,
+ u32 offset, enum mchp_rds_ptp_base base,
+ u16 val)
+{
+ struct phy_device *phydev = clock->phydev;
+ u32 addr;
+
+ addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) :
+ BASE_CLK(clock)));
+
+ return phy_write_mmd(phydev, PTP_MMD(clock), addr, val);
+}
+
+static int mchp_rds_phy_modify_mmd(struct mchp_rds_ptp_clock *clock,
+ u32 offset, enum mchp_rds_ptp_base base,
+ u16 mask, u16 val)
+{
+ struct phy_device *phydev = clock->phydev;
+ u32 addr;
+
+ addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) :
+ BASE_CLK(clock)));
+
+ return phy_modify_mmd(phydev, PTP_MMD(clock), addr, mask, val);
+}
+
+static int mchp_rds_phy_set_bits_mmd(struct mchp_rds_ptp_clock *clock,
+ u32 offset, enum mchp_rds_ptp_base base,
+ u16 val)
+{
+ struct phy_device *phydev = clock->phydev;
+ u32 addr;
+
+ addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) :
+ BASE_CLK(clock)));
+
+ return phy_set_bits_mmd(phydev, PTP_MMD(clock), addr, val);
+}
+
+static int mchp_get_pulsewidth(struct phy_device *phydev,
+ struct ptp_perout_request *perout_request,
+ int *pulse_width)
+{
+ struct timespec64 ts_period;
+ s64 ts_on_nsec, period_nsec;
+ struct timespec64 ts_on;
+ static const s64 sup_on_necs[] = {
+ 100, /* 100ns */
+ 500, /* 500ns */
+ 1000, /* 1us */
+ 5000, /* 5us */
+ 10000, /* 10us */
+ 50000, /* 50us */
+ 100000, /* 100us */
+ 500000, /* 500us */
+ 1000000, /* 1ms */
+ 5000000, /* 5ms */
+ 10000000, /* 10ms */
+ 50000000, /* 50ms */
+ 100000000, /* 100ms */
+ 200000000, /* 200ms */
+ };
+
+ ts_period.tv_sec = perout_request->period.sec;
+ ts_period.tv_nsec = perout_request->period.nsec;
+
+ ts_on.tv_sec = perout_request->on.sec;
+ ts_on.tv_nsec = perout_request->on.nsec;
+ ts_on_nsec = timespec64_to_ns(&ts_on);
+ period_nsec = timespec64_to_ns(&ts_period);
+
+ if (period_nsec < 200) {
+ phydev_warn(phydev, "perout period small, minimum is 200ns\n");
+ return -EOPNOTSUPP;
+ }
+
+ for (int i = 0; i < ARRAY_SIZE(sup_on_necs); i++) {
+ if (ts_on_nsec <= sup_on_necs[i]) {
+ *pulse_width = i;
+ break;
+ }
+ }
+
+ phydev_info(phydev, "pulse width is %d\n", *pulse_width);
+ return 0;
+}
+
+static int mchp_general_event_config(struct mchp_rds_ptp_clock *clock,
+ int pulse_width)
+{
+ int general_config;
+
+ general_config = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_GEN_CFG,
+ MCHP_RDS_PTP_CLOCK);
+ if (general_config < 0)
+ return general_config;
+
+ general_config &= ~MCHP_RDS_PTP_GEN_CFG_LTC_EVT_MASK;
+ general_config |= MCHP_RDS_PTP_GEN_CFG_LTC_EVT_SET(pulse_width);
+ general_config &= ~MCHP_RDS_PTP_GEN_CFG_RELOAD_ADD;
+ general_config |= MCHP_RDS_PTP_GEN_CFG_POLARITY;
+
+ return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_GEN_CFG,
+ MCHP_RDS_PTP_CLOCK, general_config);
+}
+
+static int mchp_set_clock_reload(struct mchp_rds_ptp_clock *clock,
+ s64 period_sec, u32 period_nsec)
+{
+ int rc;
+
+ rc = mchp_rds_phy_write_mmd(clock,
+ MCHP_RDS_PTP_CLK_TRGT_RELOAD_SEC_LO,
+ MCHP_RDS_PTP_CLOCK,
+ lower_16_bits(period_sec));
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock,
+ MCHP_RDS_PTP_CLK_TRGT_RELOAD_SEC_HI,
+ MCHP_RDS_PTP_CLOCK,
+ upper_16_bits(period_sec));
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock,
+ MCHP_RDS_PTP_CLK_TRGT_RELOAD_NS_LO,
+ MCHP_RDS_PTP_CLOCK,
+ lower_16_bits(period_nsec));
+ if (rc < 0)
+ return rc;
+
+ return mchp_rds_phy_write_mmd(clock,
+ MCHP_RDS_PTP_CLK_TRGT_RELOAD_NS_HI,
+ MCHP_RDS_PTP_CLOCK,
+ upper_16_bits(period_nsec) & 0x3fff);
+}
+
+static int mchp_set_clock_target(struct mchp_rds_ptp_clock *clock,
+ s64 start_sec, u32 start_nsec)
+{
+ int rc;
+
+ /* Set the start time */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CLK_TRGT_SEC_LO,
+ MCHP_RDS_PTP_CLOCK,
+ lower_16_bits(start_sec));
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CLK_TRGT_SEC_HI,
+ MCHP_RDS_PTP_CLOCK,
+ upper_16_bits(start_sec));
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CLK_TRGT_NS_LO,
+ MCHP_RDS_PTP_CLOCK,
+ lower_16_bits(start_nsec));
+ if (rc < 0)
+ return rc;
+
+ return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CLK_TRGT_NS_HI,
+ MCHP_RDS_PTP_CLOCK,
+ upper_16_bits(start_nsec) & 0x3fff);
+}
+
+static int mchp_rds_ptp_perout_off(struct mchp_rds_ptp_clock *clock)
+{
+ u16 general_config;
+ int rc;
+
+ /* Set target to too far in the future, effectively disabling it */
+ rc = mchp_set_clock_target(clock, 0xFFFFFFFF, 0);
+ if (rc < 0)
+ return rc;
+
+ general_config = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_GEN_CFG,
+ MCHP_RDS_PTP_CLOCK);
+ general_config |= MCHP_RDS_PTP_GEN_CFG_RELOAD_ADD;
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_GEN_CFG,
+ MCHP_RDS_PTP_CLOCK, general_config);
+ if (rc < 0)
+ return rc;
+
+ clock->mchp_rds_ptp_event = -1;
+
+ return 0;
+}
+
+static bool mchp_get_event(struct mchp_rds_ptp_clock *clock, int pin)
+{
+ if (clock->mchp_rds_ptp_event < 0 && pin == clock->event_pin) {
+ clock->mchp_rds_ptp_event = pin;
+ return true;
+ }
+
+ return false;
+}
+
+static int mchp_rds_ptp_perout(struct ptp_clock_info *ptpci,
+ struct ptp_perout_request *perout, int on)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(ptpci,
+ struct mchp_rds_ptp_clock,
+ caps);
+ struct phy_device *phydev = clock->phydev;
+ int ret, event_pin, pulsewidth;
+
+ event_pin = ptp_find_pin(clock->ptp_clock, PTP_PF_PEROUT,
+ perout->index);
+ if (event_pin != clock->event_pin)
+ return -EINVAL;
+
+ if (!on) {
+ ret = mchp_rds_ptp_perout_off(clock);
+ return ret;
+ }
+
+ if (!mchp_get_event(clock, event_pin))
+ return -EINVAL;
+
+ ret = mchp_get_pulsewidth(phydev, perout, &pulsewidth);
+ if (ret < 0)
+ return ret;
+
+ /* Configure to pulse every period */
+ ret = mchp_general_event_config(clock, pulsewidth);
+ if (ret < 0)
+ return ret;
+
+ ret = mchp_set_clock_target(clock, perout->start.sec,
+ perout->start.nsec);
+ if (ret < 0)
+ return ret;
+
+ return mchp_set_clock_reload(clock, perout->period.sec,
+ perout->period.nsec);
+}
+
+static int mchp_rds_ptpci_enable(struct ptp_clock_info *ptpci,
+ struct ptp_clock_request *request, int on)
+{
+ switch (request->type) {
+ case PTP_CLK_REQ_PEROUT:
+ return mchp_rds_ptp_perout(ptpci, &request->perout, on);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mchp_rds_ptpci_verify(struct ptp_clock_info *ptpci, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(ptpci,
+ struct mchp_rds_ptp_clock,
+ caps);
+
+ if (!(pin == clock->event_pin && chan == 0))
+ return -1;
+
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+static int mchp_rds_ptp_flush_fifo(struct mchp_rds_ptp_clock *clock,
+ enum mchp_rds_ptp_fifo_dir dir)
+{
+ int rc;
+
+ if (dir == MCHP_RDS_PTP_EGRESS_FIFO)
+ skb_queue_purge(&clock->tx_queue);
+ else
+ skb_queue_purge(&clock->rx_queue);
+
+ for (int i = 0; i < MCHP_RDS_PTP_FIFO_SIZE; ++i) {
+ rc = mchp_rds_phy_read_mmd(clock,
+ dir == MCHP_RDS_PTP_EGRESS_FIFO ?
+ MCHP_RDS_PTP_TX_MSG_HDR2 :
+ MCHP_RDS_PTP_RX_MSG_HDR2,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ return rc;
+ }
+ return mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_INT_STS,
+ MCHP_RDS_PTP_PORT);
+}
+
+static int mchp_rds_ptp_config_intr(struct mchp_rds_ptp_clock *clock,
+ bool enable)
+{
+ /* Enable or disable ptp interrupts */
+ return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_INT_EN,
+ MCHP_RDS_PTP_PORT,
+ enable ? MCHP_RDS_PTP_INT_ALL_MSK : 0);
+}
+
+static void mchp_rds_ptp_txtstamp(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(mii_ts,
+ struct mchp_rds_ptp_clock,
+ mii_ts);
+
+ switch (clock->hwts_tx_type) {
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ if (ptp_msg_is_sync(skb, type)) {
+ kfree_skb(skb);
+ return;
+ }
+ fallthrough;
+ case HWTSTAMP_TX_ON:
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ skb_queue_tail(&clock->tx_queue, skb);
+ break;
+ case HWTSTAMP_TX_OFF:
+ default:
+ kfree_skb(skb);
+ break;
+ }
+}
+
+static bool mchp_rds_ptp_get_sig_rx(struct sk_buff *skb, u16 *sig)
+{
+ struct ptp_header *ptp_header;
+ int type;
+
+ skb_push(skb, ETH_HLEN);
+ type = ptp_classify_raw(skb);
+ if (type == PTP_CLASS_NONE)
+ return false;
+
+ ptp_header = ptp_parse_header(skb, type);
+ if (!ptp_header)
+ return false;
+
+ skb_pull_inline(skb, ETH_HLEN);
+
+ *sig = (__force u16)(ntohs(ptp_header->sequence_id));
+
+ return true;
+}
+
+static bool mchp_rds_ptp_match_skb(struct mchp_rds_ptp_clock *clock,
+ struct mchp_rds_ptp_rx_ts *rx_ts)
+{
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct sk_buff *skb, *skb_tmp;
+ unsigned long flags;
+ bool rc = false;
+ u16 skb_sig;
+
+ spin_lock_irqsave(&clock->rx_queue.lock, flags);
+ skb_queue_walk_safe(&clock->rx_queue, skb, skb_tmp) {
+ if (!mchp_rds_ptp_get_sig_rx(skb, &skb_sig))
+ continue;
+
+ if (skb_sig != rx_ts->seq_id)
+ continue;
+
+ __skb_unlink(skb, &clock->rx_queue);
+
+ rc = true;
+ break;
+ }
+ spin_unlock_irqrestore(&clock->rx_queue.lock, flags);
+
+ if (rc) {
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds, rx_ts->nsec);
+ netif_rx(skb);
+ }
+
+ return rc;
+}
+
+static void mchp_rds_ptp_match_rx_ts(struct mchp_rds_ptp_clock *clock,
+ struct mchp_rds_ptp_rx_ts *rx_ts)
+{
+ unsigned long flags;
+
+ /* If we failed to match the skb add it to the queue for when
+ * the frame will come
+ */
+ if (!mchp_rds_ptp_match_skb(clock, rx_ts)) {
+ spin_lock_irqsave(&clock->rx_ts_lock, flags);
+ list_add(&rx_ts->list, &clock->rx_ts_list);
+ spin_unlock_irqrestore(&clock->rx_ts_lock, flags);
+ } else {
+ kfree(rx_ts);
+ }
+}
+
+static void mchp_rds_ptp_match_rx_skb(struct mchp_rds_ptp_clock *clock,
+ struct sk_buff *skb)
+{
+ struct mchp_rds_ptp_rx_ts *rx_ts, *tmp, *rx_ts_var = NULL;
+ struct skb_shared_hwtstamps *shhwtstamps;
+ unsigned long flags;
+ u16 skb_sig;
+
+ if (!mchp_rds_ptp_get_sig_rx(skb, &skb_sig))
+ return;
+
+ /* Iterate over all RX timestamps and match it with the received skbs */
+ spin_lock_irqsave(&clock->rx_ts_lock, flags);
+ list_for_each_entry_safe(rx_ts, tmp, &clock->rx_ts_list, list) {
+ /* Check if we found the signature we were looking for. */
+ if (skb_sig != rx_ts->seq_id)
+ continue;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds, rx_ts->nsec);
+ netif_rx(skb);
+
+ rx_ts_var = rx_ts;
+
+ break;
+ }
+ spin_unlock_irqrestore(&clock->rx_ts_lock, flags);
+
+ if (rx_ts_var) {
+ list_del(&rx_ts_var->list);
+ kfree(rx_ts_var);
+ } else {
+ skb_queue_tail(&clock->rx_queue, skb);
+ }
+}
+
+static bool mchp_rds_ptp_rxtstamp(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(mii_ts,
+ struct mchp_rds_ptp_clock,
+ mii_ts);
+
+ if (clock->rx_filter == HWTSTAMP_FILTER_NONE ||
+ type == PTP_CLASS_NONE)
+ return false;
+
+ if ((type & clock->version) == 0 || (type & clock->layer) == 0)
+ return false;
+
+ /* Here if match occurs skb is sent to application, If not skb is added
+ * to queue and sending skb to application will get handled when
+ * interrupt occurs i.e., it get handles in interrupt handler. By
+ * any means skb will reach the application so we should not return
+ * false here if skb doesn't matches.
+ */
+ mchp_rds_ptp_match_rx_skb(clock, skb);
+
+ return true;
+}
+
+static int mchp_rds_ptp_hwtstamp_set(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct mchp_rds_ptp_clock *clock =
+ container_of(mii_ts, struct mchp_rds_ptp_clock,
+ mii_ts);
+ struct mchp_rds_ptp_rx_ts *rx_ts, *tmp;
+ int txcfg = 0, rxcfg = 0;
+ unsigned long flags;
+ int rc;
+
+ clock->hwts_tx_type = config->tx_type;
+ clock->rx_filter = config->rx_filter;
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ clock->layer = 0;
+ clock->version = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ clock->layer = PTP_CLASS_L4;
+ clock->version = PTP_CLASS_V2;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ clock->layer = PTP_CLASS_L2;
+ clock->version = PTP_CLASS_V2;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ clock->layer = PTP_CLASS_L4 | PTP_CLASS_L2;
+ clock->version = PTP_CLASS_V2;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* Setup parsing of the frames and enable the timestamping for ptp
+ * frames
+ */
+ if (clock->layer & PTP_CLASS_L2) {
+ rxcfg = MCHP_RDS_PTP_PARSE_CONFIG_LAYER2_EN;
+ txcfg = MCHP_RDS_PTP_PARSE_CONFIG_LAYER2_EN;
+ }
+ if (clock->layer & PTP_CLASS_L4) {
+ rxcfg |= MCHP_RDS_PTP_PARSE_CONFIG_IPV4_EN |
+ MCHP_RDS_PTP_PARSE_CONFIG_IPV6_EN;
+ txcfg |= MCHP_RDS_PTP_PARSE_CONFIG_IPV4_EN |
+ MCHP_RDS_PTP_PARSE_CONFIG_IPV6_EN;
+ }
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_CONFIG,
+ MCHP_RDS_PTP_PORT, rxcfg);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_CONFIG,
+ MCHP_RDS_PTP_PORT, txcfg);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_TIMESTAMP_EN,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_PTP_TIMESTAMP_EN_ALL);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_TIMESTAMP_EN,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_PTP_TIMESTAMP_EN_ALL);
+ if (rc < 0)
+ return rc;
+
+ if (clock->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC)
+ /* Enable / disable of the TX timestamp in the SYNC frames */
+ rc = mchp_rds_phy_modify_mmd(clock, MCHP_RDS_PTP_TX_MOD,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT,
+ MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT);
+ else
+ rc = mchp_rds_phy_modify_mmd(clock, MCHP_RDS_PTP_TX_MOD,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT,
+ (u16)~MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT);
+
+ if (rc < 0)
+ return rc;
+
+ /* In case of multiple starts and stops, these needs to be cleared */
+ spin_lock_irqsave(&clock->rx_ts_lock, flags);
+ list_for_each_entry_safe(rx_ts, tmp, &clock->rx_ts_list, list) {
+ list_del(&rx_ts->list);
+ kfree(rx_ts);
+ }
+ spin_unlock_irqrestore(&clock->rx_ts_lock, flags);
+
+ rc = mchp_rds_ptp_flush_fifo(clock, MCHP_RDS_PTP_INGRESS_FIFO);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_ptp_flush_fifo(clock, MCHP_RDS_PTP_EGRESS_FIFO);
+ if (rc < 0)
+ return rc;
+
+ /* Now enable the timestamping interrupts */
+ rc = mchp_rds_ptp_config_intr(clock,
+ config->rx_filter != HWTSTAMP_FILTER_NONE);
+
+ return rc < 0 ? rc : 0;
+}
+
+static int mchp_rds_ptp_ts_info(struct mii_timestamper *mii_ts,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(mii_ts,
+ struct mchp_rds_ptp_clock,
+ mii_ts);
+
+ info->phc_index = ptp_clock_index(clock->ptp_clock);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
+ BIT(HWTSTAMP_TX_ONESTEP_SYNC);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+static int mchp_rds_ptp_ltc_adjtime(struct ptp_clock_info *info, s64 delta)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(info,
+ struct mchp_rds_ptp_clock,
+ caps);
+ struct timespec64 ts;
+ bool add = true;
+ int rc = 0;
+ u32 nsec;
+ s32 sec;
+
+ /* The HW allows up to 15 sec to adjust the time, but here we limit to
+ * 10 sec the adjustment. The reason is, in case the adjustment is 14
+ * sec and 999999999 nsec, then we add 8ns to compensate the actual
+ * increment so the value can be bigger than 15 sec. Therefore limit the
+ * possible adjustments so we will not have these corner cases
+ */
+ if (delta > 10000000000LL || delta < -10000000000LL) {
+ /* The timeadjustment is too big, so fall back using set time */
+ u64 now;
+
+ info->gettime64(info, &ts);
+
+ now = ktime_to_ns(timespec64_to_ktime(ts));
+ ts = ns_to_timespec64(now + delta);
+
+ info->settime64(info, &ts);
+ return 0;
+ }
+ sec = div_u64_rem(abs(delta), NSEC_PER_SEC, &nsec);
+ if (delta < 0 && nsec != 0) {
+ /* It is not allowed to adjust low the nsec part, therefore
+ * subtract more from second part and add to nanosecond such
+ * that would roll over, so the second part will increase
+ */
+ sec--;
+ nsec = NSEC_PER_SEC - nsec;
+ }
+
+ /* Calculate the adjustments and the direction */
+ if (delta < 0)
+ add = false;
+
+ if (nsec > 0) {
+ /* add 8 ns to cover the likely normal increment */
+ nsec += 8;
+
+ if (nsec >= NSEC_PER_SEC) {
+ /* carry into seconds */
+ sec++;
+ nsec -= NSEC_PER_SEC;
+ }
+ }
+
+ mutex_lock(&clock->ptp_lock);
+ if (sec) {
+ sec = abs(sec);
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_LO,
+ MCHP_RDS_PTP_CLOCK, sec);
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_HI,
+ MCHP_RDS_PTP_CLOCK,
+ ((add ?
+ MCHP_RDS_PTP_STEP_ADJ_HI_DIR :
+ 0) | ((sec >> 16) &
+ GENMASK(13, 0))));
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_CMD_CTL_LTC_STEP_SEC);
+ if (rc < 0)
+ goto out_unlock;
+ }
+
+ if (nsec) {
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_LO,
+ MCHP_RDS_PTP_CLOCK,
+ nsec & GENMASK(15, 0));
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_HI,
+ MCHP_RDS_PTP_CLOCK,
+ (nsec >> 16) & GENMASK(13, 0));
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_CMD_CTL_LTC_STEP_NSEC);
+ }
+
+ mutex_unlock(&clock->ptp_lock);
+ info->gettime64(info, &ts);
+ mutex_lock(&clock->ptp_lock);
+
+ /* Target update is required for pulse generation on events that
+ * are enabled
+ */
+ if (clock->mchp_rds_ptp_event >= 0)
+ mchp_set_clock_target(clock,
+ ts.tv_sec + MCHP_RDS_PTP_BUFFER_TIME, 0);
+out_unlock:
+ mutex_unlock(&clock->ptp_lock);
+
+ return rc;
+}
+
+static int mchp_rds_ptp_ltc_adjfine(struct ptp_clock_info *info,
+ long scaled_ppm)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(info,
+ struct mchp_rds_ptp_clock,
+ caps);
+ u16 rate_lo, rate_hi;
+ bool faster = true;
+ u32 rate;
+ int rc;
+
+ if (!scaled_ppm)
+ return 0;
+
+ if (scaled_ppm < 0) {
+ scaled_ppm = -scaled_ppm;
+ faster = false;
+ }
+
+ rate = MCHP_RDS_PTP_1PPM_FORMAT * (upper_16_bits(scaled_ppm));
+ rate += (MCHP_RDS_PTP_1PPM_FORMAT * (lower_16_bits(scaled_ppm))) >> 16;
+
+ rate_lo = rate & GENMASK(15, 0);
+ rate_hi = (rate >> 16) & GENMASK(13, 0);
+
+ if (faster)
+ rate_hi |= MCHP_RDS_PTP_LTC_RATE_ADJ_HI_DIR;
+
+ mutex_lock(&clock->ptp_lock);
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_RATE_ADJ_HI,
+ MCHP_RDS_PTP_CLOCK, rate_hi);
+ if (rc < 0)
+ goto error;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_RATE_ADJ_LO,
+ MCHP_RDS_PTP_CLOCK, rate_lo);
+ if (rc > 0)
+ rc = 0;
+error:
+ mutex_unlock(&clock->ptp_lock);
+
+ return rc;
+}
+
+static int mchp_rds_ptp_ltc_gettime64(struct ptp_clock_info *info,
+ struct timespec64 *ts)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(info,
+ struct mchp_rds_ptp_clock,
+ caps);
+ time64_t secs;
+ int rc = 0;
+ s64 nsecs;
+
+ mutex_lock(&clock->ptp_lock);
+ /* Set read bit to 1 to save current values of 1588 local time counter
+ * into PTP LTC seconds and nanoseconds registers.
+ */
+ rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_CMD_CTL_CLOCK_READ);
+ if (rc < 0)
+ goto out_unlock;
+
+ /* Get LTC clock values */
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_SEC_HI,
+ MCHP_RDS_PTP_CLOCK);
+ if (rc < 0)
+ goto out_unlock;
+ secs = rc << 16;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_SEC_MID,
+ MCHP_RDS_PTP_CLOCK);
+ if (rc < 0)
+ goto out_unlock;
+ secs |= rc;
+ secs <<= 16;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_SEC_LO,
+ MCHP_RDS_PTP_CLOCK);
+ if (rc < 0)
+ goto out_unlock;
+ secs |= rc;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_NS_HI,
+ MCHP_RDS_PTP_CLOCK);
+ if (rc < 0)
+ goto out_unlock;
+ nsecs = (rc & GENMASK(13, 0));
+ nsecs <<= 16;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_NS_LO,
+ MCHP_RDS_PTP_CLOCK);
+ if (rc < 0)
+ goto out_unlock;
+ nsecs |= rc;
+
+ set_normalized_timespec64(ts, secs, nsecs);
+
+ if (rc > 0)
+ rc = 0;
+out_unlock:
+ mutex_unlock(&clock->ptp_lock);
+
+ return rc;
+}
+
+static int mchp_rds_ptp_ltc_settime64(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(info,
+ struct mchp_rds_ptp_clock,
+ caps);
+ int rc;
+
+ mutex_lock(&clock->ptp_lock);
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_SEC_LO,
+ MCHP_RDS_PTP_CLOCK,
+ lower_16_bits(ts->tv_sec));
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_SEC_MID,
+ MCHP_RDS_PTP_CLOCK,
+ upper_16_bits(ts->tv_sec));
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_SEC_HI,
+ MCHP_RDS_PTP_CLOCK,
+ upper_32_bits(ts->tv_sec) & GENMASK(15, 0));
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_NS_LO,
+ MCHP_RDS_PTP_CLOCK,
+ lower_16_bits(ts->tv_nsec));
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_NS_HI,
+ MCHP_RDS_PTP_CLOCK,
+ upper_16_bits(ts->tv_nsec) & GENMASK(13, 0));
+ if (rc < 0)
+ goto out_unlock;
+
+ /* Set load bit to 1 to write PTP LTC seconds and nanoseconds
+ * registers to 1588 local time counter.
+ */
+ rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_CMD_CTL_CLOCK_LOAD);
+ if (rc > 0)
+ rc = 0;
+out_unlock:
+ mutex_unlock(&clock->ptp_lock);
+
+ return rc;
+}
+
+static bool mchp_rds_ptp_get_sig_tx(struct sk_buff *skb, u16 *sig)
+{
+ struct ptp_header *ptp_header;
+ int type;
+
+ type = ptp_classify_raw(skb);
+ if (type == PTP_CLASS_NONE)
+ return false;
+
+ ptp_header = ptp_parse_header(skb, type);
+ if (!ptp_header)
+ return false;
+
+ *sig = (__force u16)(ntohs(ptp_header->sequence_id));
+
+ return true;
+}
+
+static void mchp_rds_ptp_match_tx_skb(struct mchp_rds_ptp_clock *clock,
+ u32 seconds, u32 nsec, u16 seq_id)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct sk_buff *skb, *skb_tmp;
+ unsigned long flags;
+ bool rc = false;
+ u16 skb_sig;
+
+ spin_lock_irqsave(&clock->tx_queue.lock, flags);
+ skb_queue_walk_safe(&clock->tx_queue, skb, skb_tmp) {
+ if (!mchp_rds_ptp_get_sig_tx(skb, &skb_sig))
+ continue;
+
+ if (skb_sig != seq_id)
+ continue;
+
+ __skb_unlink(skb, &clock->tx_queue);
+ rc = true;
+ break;
+ }
+ spin_unlock_irqrestore(&clock->tx_queue.lock, flags);
+
+ if (rc) {
+ shhwtstamps.hwtstamp = ktime_set(seconds, nsec);
+ skb_complete_tx_timestamp(skb, &shhwtstamps);
+ }
+}
+
+static struct mchp_rds_ptp_rx_ts
+ *mchp_rds_ptp_get_rx_ts(struct mchp_rds_ptp_clock *clock)
+{
+ struct phy_device *phydev = clock->phydev;
+ struct mchp_rds_ptp_rx_ts *rx_ts = NULL;
+ u32 sec, nsec;
+ int rc;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_NS_HI,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ goto error;
+ if (!(rc & MCHP_RDS_PTP_RX_INGRESS_NS_HI_TS_VALID)) {
+ phydev_err(phydev, "RX Timestamp is not valid!\n");
+ goto error;
+ }
+ nsec = (rc & GENMASK(13, 0)) << 16;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_NS_LO,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ goto error;
+ nsec |= rc;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_SEC_HI,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ goto error;
+ sec = rc << 16;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_SEC_LO,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ goto error;
+ sec |= rc;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_MSG_HDR2,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ goto error;
+
+ rx_ts = kmalloc(sizeof(*rx_ts), GFP_KERNEL);
+ if (!rx_ts)
+ return NULL;
+
+ rx_ts->seconds = sec;
+ rx_ts->nsec = nsec;
+ rx_ts->seq_id = rc;
+
+error:
+ return rx_ts;
+}
+
+static void mchp_rds_ptp_process_rx_ts(struct mchp_rds_ptp_clock *clock)
+{
+ int caps;
+
+ do {
+ struct mchp_rds_ptp_rx_ts *rx_ts;
+
+ rx_ts = mchp_rds_ptp_get_rx_ts(clock);
+ if (rx_ts)
+ mchp_rds_ptp_match_rx_ts(clock, rx_ts);
+
+ caps = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_CAP_INFO,
+ MCHP_RDS_PTP_PORT);
+ if (caps < 0)
+ return;
+ } while (MCHP_RDS_PTP_RX_TS_CNT(caps) > 0);
+}
+
+static bool mchp_rds_ptp_get_tx_ts(struct mchp_rds_ptp_clock *clock,
+ u32 *sec, u32 *nsec, u16 *seq)
+{
+ int rc;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_NS_HI,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ return false;
+ if (!(rc & MCHP_RDS_PTP_TX_EGRESS_NS_HI_TS_VALID))
+ return false;
+ *nsec = (rc & GENMASK(13, 0)) << 16;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_NS_LO,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ return false;
+ *nsec = *nsec | rc;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_SEC_HI,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ return false;
+ *sec = rc << 16;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_SEC_LO,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ return false;
+ *sec = *sec | rc;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_MSG_HDR2,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ return false;
+
+ *seq = rc;
+
+ return true;
+}
+
+static void mchp_rds_ptp_process_tx_ts(struct mchp_rds_ptp_clock *clock)
+{
+ int caps;
+
+ do {
+ u32 sec, nsec;
+ u16 seq;
+
+ if (mchp_rds_ptp_get_tx_ts(clock, &sec, &nsec, &seq))
+ mchp_rds_ptp_match_tx_skb(clock, sec, nsec, seq);
+
+ caps = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_CAP_INFO,
+ MCHP_RDS_PTP_PORT);
+ if (caps < 0)
+ return;
+ } while (MCHP_RDS_PTP_TX_TS_CNT(caps) > 0);
+}
+
+int mchp_rds_ptp_top_config_intr(struct mchp_rds_ptp_clock *clock,
+ u16 reg, u16 val, bool clear)
+{
+ if (clear)
+ return phy_clear_bits_mmd(clock->phydev, PTP_MMD(clock), reg,
+ val);
+ else
+ return phy_set_bits_mmd(clock->phydev, PTP_MMD(clock), reg,
+ val);
+}
+EXPORT_SYMBOL_GPL(mchp_rds_ptp_top_config_intr);
+
+irqreturn_t mchp_rds_ptp_handle_interrupt(struct mchp_rds_ptp_clock *clock)
+{
+ int irq_sts;
+
+ /* To handle rogue interrupt scenarios */
+ if (!clock)
+ return IRQ_NONE;
+
+ do {
+ irq_sts = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_INT_STS,
+ MCHP_RDS_PTP_PORT);
+ if (irq_sts < 0)
+ return IRQ_NONE;
+
+ if (irq_sts & MCHP_RDS_PTP_INT_RX_TS_EN)
+ mchp_rds_ptp_process_rx_ts(clock);
+
+ if (irq_sts & MCHP_RDS_PTP_INT_TX_TS_EN)
+ mchp_rds_ptp_process_tx_ts(clock);
+
+ if (irq_sts & MCHP_RDS_PTP_INT_TX_TS_OVRFL_EN)
+ mchp_rds_ptp_flush_fifo(clock,
+ MCHP_RDS_PTP_EGRESS_FIFO);
+
+ if (irq_sts & MCHP_RDS_PTP_INT_RX_TS_OVRFL_EN)
+ mchp_rds_ptp_flush_fifo(clock,
+ MCHP_RDS_PTP_INGRESS_FIFO);
+ } while (irq_sts & (MCHP_RDS_PTP_INT_RX_TS_EN |
+ MCHP_RDS_PTP_INT_TX_TS_EN |
+ MCHP_RDS_PTP_INT_TX_TS_OVRFL_EN |
+ MCHP_RDS_PTP_INT_RX_TS_OVRFL_EN));
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(mchp_rds_ptp_handle_interrupt);
+
+static int mchp_rds_ptp_init(struct mchp_rds_ptp_clock *clock)
+{
+ int rc;
+
+ /* Disable PTP */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_CMD_CTL_DIS);
+ if (rc < 0)
+ return rc;
+
+ /* Disable TSU */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TSU_GEN_CONFIG,
+ MCHP_RDS_PTP_PORT, 0);
+ if (rc < 0)
+ return rc;
+
+ /* Clear PTP interrupt status registers */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TSU_HARD_RESET,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_PTP_TSU_HARDRESET);
+ if (rc < 0)
+ return rc;
+
+ /* Predictor enable */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LATENCY_CORRECTION_CTL,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_LATENCY_SETTING);
+ if (rc < 0)
+ return rc;
+
+ /* Configure PTP operational mode */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_OP_MODE,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_OP_MODE_STANDALONE);
+ if (rc < 0)
+ return rc;
+
+ /* Reference clock configuration */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_REF_CLK_CFG,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_REF_CLK_CFG_SET);
+ if (rc < 0)
+ return rc;
+
+ /* Classifier configurations */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_CONFIG,
+ MCHP_RDS_PTP_PORT, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_CONFIG,
+ MCHP_RDS_PTP_PORT, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_L2_ADDR_EN,
+ MCHP_RDS_PTP_PORT, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_L2_ADDR_EN,
+ MCHP_RDS_PTP_PORT, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_IPV4_ADDR_EN,
+ MCHP_RDS_PTP_PORT, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_IPV4_ADDR_EN,
+ MCHP_RDS_PTP_PORT, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_VERSION,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_PTP_MAX_VERSION(0xff) |
+ MCHP_RDS_PTP_MIN_VERSION(0x0));
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_VERSION,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_PTP_MAX_VERSION(0xff) |
+ MCHP_RDS_PTP_MIN_VERSION(0x0));
+ if (rc < 0)
+ return rc;
+
+ /* Enable TSU */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TSU_GEN_CONFIG,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_PTP_TSU_GEN_CFG_TSU_EN);
+ if (rc < 0)
+ return rc;
+
+ /* Enable PTP */
+ return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_CMD_CTL_EN);
+}
+
+struct mchp_rds_ptp_clock *mchp_rds_ptp_probe(struct phy_device *phydev, u8 mmd,
+ u16 clk_base_addr,
+ u16 port_base_addr)
+{
+ struct mchp_rds_ptp_clock *clock;
+ int rc;
+
+ clock = devm_kzalloc(&phydev->mdio.dev, sizeof(*clock), GFP_KERNEL);
+ if (!clock)
+ return ERR_PTR(-ENOMEM);
+
+ clock->port_base_addr = port_base_addr;
+ clock->clk_base_addr = clk_base_addr;
+ clock->mmd = mmd;
+
+ mutex_init(&clock->ptp_lock);
+ clock->pin_config = devm_kmalloc_array(&phydev->mdio.dev,
+ MCHP_RDS_PTP_N_PIN,
+ sizeof(*clock->pin_config),
+ GFP_KERNEL);
+ if (!clock->pin_config)
+ return ERR_PTR(-ENOMEM);
+
+ for (int i = 0; i < MCHP_RDS_PTP_N_PIN; ++i) {
+ struct ptp_pin_desc *p = &clock->pin_config[i];
+
+ memset(p, 0, sizeof(*p));
+ snprintf(p->name, sizeof(p->name), "pin%d", i);
+ p->index = i;
+ p->func = PTP_PF_NONE;
+ }
+ /* Register PTP clock */
+ clock->caps.owner = THIS_MODULE;
+ snprintf(clock->caps.name, 30, "%s", phydev->drv->name);
+ clock->caps.max_adj = MCHP_RDS_PTP_MAX_ADJ;
+ clock->caps.n_ext_ts = 0;
+ clock->caps.pps = 0;
+ clock->caps.n_pins = MCHP_RDS_PTP_N_PIN;
+ clock->caps.n_per_out = MCHP_RDS_PTP_N_PEROUT;
+ clock->caps.supported_perout_flags = PTP_PEROUT_DUTY_CYCLE;
+ clock->caps.pin_config = clock->pin_config;
+ clock->caps.adjfine = mchp_rds_ptp_ltc_adjfine;
+ clock->caps.adjtime = mchp_rds_ptp_ltc_adjtime;
+ clock->caps.gettime64 = mchp_rds_ptp_ltc_gettime64;
+ clock->caps.settime64 = mchp_rds_ptp_ltc_settime64;
+ clock->caps.enable = mchp_rds_ptpci_enable;
+ clock->caps.verify = mchp_rds_ptpci_verify;
+ clock->caps.getcrosststamp = NULL;
+ clock->ptp_clock = ptp_clock_register(&clock->caps,
+ &phydev->mdio.dev);
+ if (IS_ERR(clock->ptp_clock))
+ return ERR_PTR(-EINVAL);
+
+ /* Check if PHC support is missing at the configuration level */
+ if (!clock->ptp_clock)
+ return NULL;
+
+ /* Initialize the SW */
+ skb_queue_head_init(&clock->tx_queue);
+ skb_queue_head_init(&clock->rx_queue);
+ INIT_LIST_HEAD(&clock->rx_ts_list);
+ spin_lock_init(&clock->rx_ts_lock);
+
+ clock->mii_ts.rxtstamp = mchp_rds_ptp_rxtstamp;
+ clock->mii_ts.txtstamp = mchp_rds_ptp_txtstamp;
+ clock->mii_ts.hwtstamp_set = mchp_rds_ptp_hwtstamp_set;
+ clock->mii_ts.ts_info = mchp_rds_ptp_ts_info;
+
+ phydev->mii_ts = &clock->mii_ts;
+
+ clock->mchp_rds_ptp_event = -1;
+
+ /* Timestamp selected by default to keep legacy API */
+ phydev->default_timestamp = true;
+
+ clock->phydev = phydev;
+
+ rc = mchp_rds_ptp_init(clock);
+ if (rc < 0)
+ return ERR_PTR(rc);
+
+ return clock;
+}
+EXPORT_SYMBOL_GPL(mchp_rds_ptp_probe);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MICROCHIP PHY RDS PTP driver");
+MODULE_AUTHOR("Divya Koppera");
diff --git a/drivers/net/phy/microchip_rds_ptp.h b/drivers/net/phy/microchip_rds_ptp.h
new file mode 100644
index 000000000000..25af68337b94
--- /dev/null
+++ b/drivers/net/phy/microchip_rds_ptp.h
@@ -0,0 +1,247 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (C) 2024 Microchip Technology
+ */
+
+#ifndef _MICROCHIP_RDS_PTP_H
+#define _MICROCHIP_RDS_PTP_H
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/ptp_clock.h>
+#include <linux/ptp_classify.h>
+#include <linux/net_tstamp.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#define MCHP_RDS_PTP_CMD_CTL 0x0
+#define MCHP_RDS_PTP_CMD_CTL_LTC_STEP_NSEC BIT(6)
+#define MCHP_RDS_PTP_CMD_CTL_LTC_STEP_SEC BIT(5)
+#define MCHP_RDS_PTP_CMD_CTL_CLOCK_LOAD BIT(4)
+#define MCHP_RDS_PTP_CMD_CTL_CLOCK_READ BIT(3)
+#define MCHP_RDS_PTP_CMD_CTL_EN BIT(1)
+#define MCHP_RDS_PTP_CMD_CTL_DIS BIT(0)
+
+#define MCHP_RDS_PTP_REF_CLK_CFG 0x2
+#define MCHP_RDS_PTP_REF_CLK_SRC_250MHZ 0x0
+#define MCHP_RDS_PTP_REF_CLK_PERIOD_OVERRIDE BIT(9)
+#define MCHP_RDS_PTP_REF_CLK_PERIOD 4
+#define MCHP_RDS_PTP_REF_CLK_CFG_SET (MCHP_RDS_PTP_REF_CLK_SRC_250MHZ |\
+ MCHP_RDS_PTP_REF_CLK_PERIOD_OVERRIDE |\
+ MCHP_RDS_PTP_REF_CLK_PERIOD)
+
+#define MCHP_RDS_PTP_LTC_SEC_HI 0x5
+#define MCHP_RDS_PTP_LTC_SEC_MID 0x6
+#define MCHP_RDS_PTP_LTC_SEC_LO 0x7
+#define MCHP_RDS_PTP_LTC_NS_HI 0x8
+#define MCHP_RDS_PTP_LTC_NS_LO 0x9
+#define MCHP_RDS_PTP_LTC_RATE_ADJ_HI 0xc
+#define MCHP_RDS_PTP_LTC_RATE_ADJ_HI_DIR BIT(15)
+#define MCHP_RDS_PTP_LTC_RATE_ADJ_LO 0xd
+#define MCHP_RDS_PTP_STEP_ADJ_HI 0x12
+#define MCHP_RDS_PTP_STEP_ADJ_HI_DIR BIT(15)
+#define MCHP_RDS_PTP_STEP_ADJ_LO 0x13
+#define MCHP_RDS_PTP_LTC_READ_SEC_HI 0x29
+#define MCHP_RDS_PTP_LTC_READ_SEC_MID 0x2a
+#define MCHP_RDS_PTP_LTC_READ_SEC_LO 0x2b
+#define MCHP_RDS_PTP_LTC_READ_NS_HI 0x2c
+#define MCHP_RDS_PTP_LTC_READ_NS_LO 0x2d
+#define MCHP_RDS_PTP_OP_MODE 0x41
+#define MCHP_RDS_PTP_OP_MODE_DIS 0
+#define MCHP_RDS_PTP_OP_MODE_STANDALONE 1
+#define MCHP_RDS_PTP_LATENCY_CORRECTION_CTL 0x44
+#define MCHP_RDS_PTP_PREDICTOR_EN BIT(6)
+#define MCHP_RDS_PTP_TX_PRED_DIS BIT(1)
+#define MCHP_RDS_PTP_RX_PRED_DIS BIT(0)
+#define MCHP_RDS_PTP_LATENCY_SETTING (MCHP_RDS_PTP_PREDICTOR_EN | \
+ MCHP_RDS_PTP_TX_PRED_DIS | \
+ MCHP_RDS_PTP_RX_PRED_DIS)
+
+#define MCHP_RDS_PTP_INT_EN 0x0
+#define MCHP_RDS_PTP_INT_STS 0x01
+#define MCHP_RDS_PTP_INT_TX_TS_OVRFL_EN BIT(3)
+#define MCHP_RDS_PTP_INT_TX_TS_EN BIT(2)
+#define MCHP_RDS_PTP_INT_RX_TS_OVRFL_EN BIT(1)
+#define MCHP_RDS_PTP_INT_RX_TS_EN BIT(0)
+#define MCHP_RDS_PTP_INT_ALL_MSK (MCHP_RDS_PTP_INT_TX_TS_OVRFL_EN | \
+ MCHP_RDS_PTP_INT_TX_TS_EN | \
+ MCHP_RDS_PTP_INT_RX_TS_OVRFL_EN |\
+ MCHP_RDS_PTP_INT_RX_TS_EN)
+
+#define MCHP_RDS_PTP_CAP_INFO 0x2e
+#define MCHP_RDS_PTP_TX_TS_CNT(v) (((v) & GENMASK(11, 8)) >> 8)
+#define MCHP_RDS_PTP_RX_TS_CNT(v) ((v) & GENMASK(3, 0))
+
+#define MCHP_RDS_PTP_RX_PARSE_CONFIG 0x42
+#define MCHP_RDS_PTP_RX_PARSE_L2_ADDR_EN 0x44
+#define MCHP_RDS_PTP_RX_PARSE_IPV4_ADDR_EN 0x45
+
+#define MCHP_RDS_PTP_RX_TIMESTAMP_CONFIG 0x4e
+#define MCHP_RDS_PTP_RX_TIMESTAMP_CONFIG_PTP_FCS_DIS BIT(0)
+
+#define MCHP_RDS_PTP_RX_VERSION 0x48
+#define MCHP_RDS_PTP_RX_TIMESTAMP_EN 0x4d
+
+#define MCHP_RDS_PTP_RX_INGRESS_NS_HI 0x54
+#define MCHP_RDS_PTP_RX_INGRESS_NS_HI_TS_VALID BIT(15)
+
+#define MCHP_RDS_PTP_RX_INGRESS_NS_LO 0x55
+#define MCHP_RDS_PTP_RX_INGRESS_SEC_HI 0x56
+#define MCHP_RDS_PTP_RX_INGRESS_SEC_LO 0x57
+#define MCHP_RDS_PTP_RX_MSG_HDR2 0x59
+
+#define MCHP_RDS_PTP_TX_PARSE_CONFIG 0x82
+#define MCHP_RDS_PTP_PARSE_CONFIG_LAYER2_EN BIT(0)
+#define MCHP_RDS_PTP_PARSE_CONFIG_IPV4_EN BIT(1)
+#define MCHP_RDS_PTP_PARSE_CONFIG_IPV6_EN BIT(2)
+
+#define MCHP_RDS_PTP_TX_PARSE_L2_ADDR_EN 0x84
+#define MCHP_RDS_PTP_TX_PARSE_IPV4_ADDR_EN 0x85
+
+#define MCHP_RDS_PTP_TX_VERSION 0x88
+#define MCHP_RDS_PTP_MAX_VERSION(x) (((x) & GENMASK(7, 0)) << 8)
+#define MCHP_RDS_PTP_MIN_VERSION(x) ((x) & GENMASK(7, 0))
+
+#define MCHP_RDS_PTP_TX_TIMESTAMP_EN 0x8d
+#define MCHP_RDS_PTP_TIMESTAMP_EN_SYNC BIT(0)
+#define MCHP_RDS_PTP_TIMESTAMP_EN_DREQ BIT(1)
+#define MCHP_RDS_PTP_TIMESTAMP_EN_PDREQ BIT(2)
+#define MCHP_RDS_PTP_TIMESTAMP_EN_PDRES BIT(3)
+#define MCHP_RDS_PTP_TIMESTAMP_EN_ALL (MCHP_RDS_PTP_TIMESTAMP_EN_SYNC |\
+ MCHP_RDS_PTP_TIMESTAMP_EN_DREQ |\
+ MCHP_RDS_PTP_TIMESTAMP_EN_PDREQ |\
+ MCHP_RDS_PTP_TIMESTAMP_EN_PDRES)
+
+#define MCHP_RDS_PTP_TX_TIMESTAMP_CONFIG 0x8e
+#define MCHP_RDS_PTP_TX_TIMESTAMP_CONFIG_PTP_FCS_DIS BIT(0)
+
+#define MCHP_RDS_PTP_TX_MOD 0x8f
+#define MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT BIT(12)
+
+#define MCHP_RDS_PTP_TX_EGRESS_NS_HI 0x94
+#define MCHP_RDS_PTP_TX_EGRESS_NS_HI_TS_VALID BIT(15)
+
+#define MCHP_RDS_PTP_TX_EGRESS_NS_LO 0x95
+#define MCHP_RDS_PTP_TX_EGRESS_SEC_HI 0x96
+#define MCHP_RDS_PTP_TX_EGRESS_SEC_LO 0x97
+#define MCHP_RDS_PTP_TX_MSG_HDR2 0x99
+
+#define MCHP_RDS_PTP_TSU_GEN_CONFIG 0xc0
+#define MCHP_RDS_PTP_TSU_GEN_CFG_TSU_EN BIT(0)
+
+#define MCHP_RDS_PTP_TSU_HARD_RESET 0xc1
+#define MCHP_RDS_PTP_TSU_HARDRESET BIT(0)
+
+#define MCHP_RDS_PTP_CLK_TRGT_SEC_HI 0x15
+#define MCHP_RDS_PTP_CLK_TRGT_SEC_LO 0x16
+#define MCHP_RDS_PTP_CLK_TRGT_NS_HI 0x17
+#define MCHP_RDS_PTP_CLK_TRGT_NS_LO 0x18
+
+#define MCHP_RDS_PTP_CLK_TRGT_RELOAD_SEC_HI 0x19
+#define MCHP_RDS_PTP_CLK_TRGT_RELOAD_SEC_LO 0x1a
+#define MCHP_RDS_PTP_CLK_TRGT_RELOAD_NS_HI 0x1b
+#define MCHP_RDS_PTP_CLK_TRGT_RELOAD_NS_LO 0x1c
+
+#define MCHP_RDS_PTP_GEN_CFG 0x01
+#define MCHP_RDS_PTP_GEN_CFG_LTC_EVT_MASK GENMASK(11, 8)
+
+#define MCHP_RDS_PTP_GEN_CFG_LTC_EVT_SET(value) (((value) & 0xF) << 4)
+#define MCHP_RDS_PTP_GEN_CFG_RELOAD_ADD BIT(0)
+#define MCHP_RDS_PTP_GEN_CFG_POLARITY BIT(1)
+
+/* Represents 1ppm adjustment in 2^32 format with
+ * each nsec contains 4 clock cycles in 250MHz.
+ * The value is calculated as following: (1/1000000)/((2^-32)/4)
+ */
+#define MCHP_RDS_PTP_1PPM_FORMAT 17179
+#define MCHP_RDS_PTP_FIFO_SIZE 8
+#define MCHP_RDS_PTP_MAX_ADJ 31249999
+
+#define MCHP_RDS_PTP_BUFFER_TIME 2
+#define MCHP_RDS_PTP_N_PIN 4
+#define MCHP_RDS_PTP_N_PEROUT 1
+
+#define BASE_CLK(p) ((p)->clk_base_addr)
+#define BASE_PORT(p) ((p)->port_base_addr)
+#define PTP_MMD(p) ((p)->mmd)
+
+enum mchp_rds_ptp_base {
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_PTP_CLOCK
+};
+
+enum mchp_rds_ptp_fifo_dir {
+ MCHP_RDS_PTP_INGRESS_FIFO,
+ MCHP_RDS_PTP_EGRESS_FIFO
+};
+
+struct mchp_rds_ptp_clock {
+ struct mii_timestamper mii_ts;
+ struct phy_device *phydev;
+ struct ptp_clock *ptp_clock;
+
+ struct sk_buff_head tx_queue;
+ struct sk_buff_head rx_queue;
+ struct list_head rx_ts_list;
+
+ struct ptp_clock_info caps;
+
+ /* Lock for Rx ts fifo */
+ spinlock_t rx_ts_lock;
+ int hwts_tx_type;
+
+ enum hwtstamp_rx_filters rx_filter;
+ int layer;
+ int version;
+ u16 port_base_addr;
+ u16 clk_base_addr;
+
+ /* Lock for phc */
+ struct mutex ptp_lock;
+ u8 mmd;
+ int mchp_rds_ptp_event;
+ int event_pin;
+ struct ptp_pin_desc *pin_config;
+};
+
+struct mchp_rds_ptp_rx_ts {
+ struct list_head list;
+ u32 seconds;
+ u32 nsec;
+ u16 seq_id;
+};
+
+#if IS_ENABLED(CONFIG_MICROCHIP_PHY_RDS_PTP)
+
+struct mchp_rds_ptp_clock *mchp_rds_ptp_probe(struct phy_device *phydev, u8 mmd,
+ u16 clk_base, u16 port_base);
+
+int mchp_rds_ptp_top_config_intr(struct mchp_rds_ptp_clock *clock,
+ u16 reg, u16 val, bool enable);
+
+irqreturn_t mchp_rds_ptp_handle_interrupt(struct mchp_rds_ptp_clock *clock);
+
+#else
+
+static inline struct mchp_rds_ptp_clock *mchp_rds_ptp_probe(struct phy_device
+ *phydev, u8 mmd,
+ u16 clk_base,
+ u16 port_base)
+{
+ return NULL;
+}
+
+static inline int mchp_rds_ptp_top_config_intr(struct mchp_rds_ptp_clock *clock,
+ u16 reg, u16 val, bool enable)
+{
+ return 0;
+}
+
+static inline irqreturn_t mchp_rds_ptp_handle_interrupt(struct
+ mchp_rds_ptp_clock
+ * clock)
+{
+ return IRQ_NONE;
+}
+
+#endif //CONFIG_MICROCHIP_PHY_RDS_PTP
+
+#endif //_MICROCHIP_RDS_PTP_H
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index 71d6050b2833..62b36a318100 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -10,11 +10,15 @@
#include <linux/ethtool.h>
#include <linux/ethtool_netlink.h>
#include <linux/bitfield.h>
+#include "microchip_rds_ptp.h"
#define PHY_ID_LAN87XX 0x0007c150
#define PHY_ID_LAN937X 0x0007c180
#define PHY_ID_LAN887X 0x0007c1f0
+#define MCHP_RDS_PTP_LTC_BASE_ADDR 0xe000
+#define MCHP_RDS_PTP_PORT_BASE_ADDR (MCHP_RDS_PTP_LTC_BASE_ADDR + 0x800)
+
/* External Register Control Register */
#define LAN87XX_EXT_REG_CTL (0x14)
#define LAN87XX_EXT_REG_CTL_RD_CTL (0x1000)
@@ -229,10 +233,14 @@
#define LAN887X_INT_STS 0xf000
#define LAN887X_INT_MSK 0xf001
+#define LAN887X_INT_MSK_P1588_MOD_INT_MSK BIT(3)
#define LAN887X_INT_MSK_T1_PHY_INT_MSK BIT(2)
#define LAN887X_INT_MSK_LINK_UP_MSK BIT(1)
#define LAN887X_INT_MSK_LINK_DOWN_MSK BIT(0)
+#define LAN887X_MX_CHIP_TOP_REG_CONTROL1 0xF002
+#define LAN887X_MX_CHIP_TOP_REG_CONTROL1_EVT_EN BIT(8)
+
#define LAN887X_MX_CHIP_TOP_LINK_MSK (LAN887X_INT_MSK_LINK_UP_MSK |\
LAN887X_INT_MSK_LINK_DOWN_MSK)
@@ -319,6 +327,8 @@ struct lan887x_regwr_map {
struct lan887x_priv {
u64 stats[ARRAY_SIZE(lan887x_hw_stats)];
+ struct mchp_rds_ptp_clock *clock;
+ bool init_done;
};
static int lan937x_dsp_workaround(struct phy_device *phydev, u16 ereg, u8 bank)
@@ -1269,8 +1279,28 @@ static int lan887x_get_features(struct phy_device *phydev)
static int lan887x_phy_init(struct phy_device *phydev)
{
+ struct lan887x_priv *priv = phydev->priv;
int ret;
+ if (!priv->init_done && phy_interrupt_is_valid(phydev)) {
+ priv->clock = mchp_rds_ptp_probe(phydev, MDIO_MMD_VEND1,
+ MCHP_RDS_PTP_LTC_BASE_ADDR,
+ MCHP_RDS_PTP_PORT_BASE_ADDR);
+ if (IS_ERR(priv->clock))
+ return PTR_ERR(priv->clock);
+
+ /* Enable pin mux for EVT */
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_MX_CHIP_TOP_REG_CONTROL1,
+ LAN887X_MX_CHIP_TOP_REG_CONTROL1_EVT_EN,
+ LAN887X_MX_CHIP_TOP_REG_CONTROL1_EVT_EN);
+
+ /* Initialize pin numbers specific to PEROUT */
+ priv->clock->event_pin = 3;
+
+ priv->init_done = true;
+ }
+
/* Clear loopback */
ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
LAN887X_MIS_CFG_REG2,
@@ -1470,6 +1500,7 @@ static int lan887x_probe(struct phy_device *phydev)
if (!priv)
return -ENOMEM;
+ priv->init_done = false;
phydev->priv = priv;
return lan887x_phy_setup(phydev);
@@ -1518,6 +1549,7 @@ static void lan887x_get_strings(struct phy_device *phydev, u8 *data)
static int lan887x_config_intr(struct phy_device *phydev)
{
+ struct lan887x_priv *priv = phydev->priv;
int rc;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
@@ -1537,12 +1569,24 @@ static int lan887x_config_intr(struct phy_device *phydev)
rc = phy_read_mmd(phydev, MDIO_MMD_VEND1, LAN887X_INT_STS);
}
+ if (rc < 0)
+ return rc;
- return rc < 0 ? rc : 0;
+ if (phy_is_default_hwtstamp(phydev)) {
+ return mchp_rds_ptp_top_config_intr(priv->clock,
+ LAN887X_INT_MSK,
+ LAN887X_INT_MSK_P1588_MOD_INT_MSK,
+ (phydev->interrupts ==
+ PHY_INTERRUPT_ENABLED));
+ }
+
+ return 0;
}
static irqreturn_t lan887x_handle_interrupt(struct phy_device *phydev)
{
+ struct lan887x_priv *priv = phydev->priv;
+ int rc = IRQ_NONE;
int irq_status;
irq_status = phy_read_mmd(phydev, MDIO_MMD_VEND1, LAN887X_INT_STS);
@@ -1553,10 +1597,13 @@ static irqreturn_t lan887x_handle_interrupt(struct phy_device *phydev)
if (irq_status & LAN887X_MX_CHIP_TOP_LINK_MSK) {
phy_trigger_machine(phydev);
- return IRQ_HANDLED;
+ rc = IRQ_HANDLED;
}
- return IRQ_NONE;
+ if (irq_status & LAN887X_INT_MSK_P1588_MOD_INT_MSK)
+ rc = mchp_rds_ptp_handle_interrupt(priv->clock);
+
+ return rc;
}
static int lan887x_cd_reset(struct phy_device *phydev,
@@ -2113,6 +2160,7 @@ static struct phy_driver microchip_t1_phy_driver[] = {
.handle_interrupt = lan887x_handle_interrupt,
.get_sqi = lan887x_get_sqi,
.get_sqi_max = lan87xx_get_sqi_max,
+ .set_loopback = genphy_c45_loopback,
}
};
diff --git a/drivers/net/phy/microchip_t1s.c b/drivers/net/phy/microchip_t1s.c
index 75d291154b4c..e601d56b2507 100644
--- a/drivers/net/phy/microchip_t1s.c
+++ b/drivers/net/phy/microchip_t1s.c
@@ -3,7 +3,7 @@
* Driver for Microchip 10BASE-T1S PHYs
*
* Support: Microchip Phys:
- * lan8670/1/2 Rev.B1/C1/C2
+ * lan8670/1/2 Rev.B1/C1/C2/D0
* lan8650/1 Rev.B0/B1 Internal PHYs
*/
@@ -14,6 +14,7 @@
#define PHY_ID_LAN867X_REVB1 0x0007C162
#define PHY_ID_LAN867X_REVC1 0x0007C164
#define PHY_ID_LAN867X_REVC2 0x0007C165
+#define PHY_ID_LAN867X_REVD0 0x0007C166
/* Both Rev.B0 and B1 clause 22 PHYID's are same due to B1 chip limitation */
#define PHY_ID_LAN865X_REVB 0x0007C1B3
@@ -32,6 +33,17 @@
#define COL_DET_ENABLE BIT(15)
#define COL_DET_DISABLE 0x0000
+/* LAN8670/1/2 Rev.D0 Link Status Selection Register */
+#define LAN867X_REG_LINK_STATUS_CTRL 0x0012
+#define LINK_STATUS_CONFIGURATION GENMASK(12, 11)
+#define LINK_STATUS_SEMAPHORE BIT(0)
+
+/* Link Status Configuration */
+#define LINK_STATUS_CONFIG_PLCA_STATUS 0x1
+#define LINK_STATUS_CONFIG_SEMAPHORE 0x2
+
+#define LINK_STATUS_SEMAPHORE_SET 0x1
+
#define LAN865X_CFGPARAM_READ_ENABLE BIT(1)
/* The arrays below are pulled from the following table from AN1699
@@ -109,6 +121,21 @@ static const u16 lan865x_revb_sqi_fixup_cfg_regs[3] = {
0x00AD, 0x00AE, 0x00AF,
};
+/* LAN867x Rev.D0 configuration parameters from AN1699
+ * As per the Configuration Application Note AN1699 published in the below link,
+ * https://www.microchip.com/en-us/application-notes/an1699
+ * Revision G (DS60001699G - October 2025)
+ */
+static const u16 lan867x_revd0_fixup_regs[8] = {
+ 0x0037, 0x008A, 0x0118, 0x00D6,
+ 0x0082, 0x00FD, 0x00FD, 0x0091,
+};
+
+static const u16 lan867x_revd0_fixup_values[8] = {
+ 0x0800, 0xBFC0, 0x029C, 0x1001,
+ 0x001C, 0x0C0B, 0x8C07, 0x9660,
+};
+
/* Pulled from AN1760 describing 'indirect read'
*
* write_register(0x4, 0x00D8, addr)
@@ -377,6 +404,32 @@ static int lan867x_revb1_config_init(struct phy_device *phydev)
return 0;
}
+static int lan867x_revd0_link_active_selection(struct phy_device *phydev,
+ bool plca_enabled)
+{
+ u16 value;
+
+ if (plca_enabled) {
+ /* 0x1 - When PLCA is enabled: link status reflects plca_status.
+ */
+ value = FIELD_PREP(LINK_STATUS_CONFIGURATION,
+ LINK_STATUS_CONFIG_PLCA_STATUS);
+ } else {
+ /* 0x2 - Link status is controlled by the value written into the
+ * LINK_STATUS_SEMAPHORE bit written. Here the link semaphore
+ * bit is written with 0x1 to set the link always active in
+ * CSMA/CD mode as it doesn't support autoneg.
+ */
+ value = FIELD_PREP(LINK_STATUS_CONFIGURATION,
+ LINK_STATUS_CONFIG_SEMAPHORE) |
+ FIELD_PREP(LINK_STATUS_SEMAPHORE,
+ LINK_STATUS_SEMAPHORE_SET);
+ }
+
+ return phy_write_mmd(phydev, MDIO_MMD_VEND2,
+ LAN867X_REG_LINK_STATUS_CTRL, value);
+}
+
/* As per LAN8650/1 Rev.B0/B1 AN1760 (Revision F (DS60001760G - June 2024)) and
* LAN8670/1/2 Rev.C1/C2 AN1699 (Revision E (DS60001699F - June 2024)), under
* normal operation, the device should be operated in PLCA mode. Disabling
@@ -393,6 +446,14 @@ static int lan86xx_plca_set_cfg(struct phy_device *phydev,
{
int ret;
+ /* Link status selection must be configured for LAN8670/1/2 Rev.D0 */
+ if (phydev->phy_id == PHY_ID_LAN867X_REVD0) {
+ ret = lan867x_revd0_link_active_selection(phydev,
+ plca_cfg->enabled);
+ if (ret)
+ return ret;
+ }
+
ret = genphy_c45_plca_set_cfg(phydev, plca_cfg);
if (ret)
return ret;
@@ -407,6 +468,29 @@ static int lan86xx_plca_set_cfg(struct phy_device *phydev,
COL_DET_CTRL0_ENABLE_BIT_MASK, COL_DET_ENABLE);
}
+static int lan867x_revd0_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = lan867x_check_reset_complete(phydev);
+ if (ret)
+ return ret;
+
+ for (int i = 0; i < ARRAY_SIZE(lan867x_revd0_fixup_regs); i++) {
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2,
+ lan867x_revd0_fixup_regs[i],
+ lan867x_revd0_fixup_values[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* Initially the PHY will be in CSMA/CD mode by default. So it is
+ * required to set the link always active as it doesn't support
+ * autoneg.
+ */
+ return lan867x_revd0_link_active_selection(phydev, false);
+}
+
static int lan86xx_read_status(struct phy_device *phydev)
{
/* The phy has some limitations, namely:
@@ -482,6 +566,19 @@ static struct phy_driver microchip_t1s_driver[] = {
.get_plca_status = genphy_c45_plca_get_status,
},
{
+ PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVD0),
+ .name = "LAN867X Rev.D0",
+ .features = PHY_BASIC_T1S_P2MP_FEATURES,
+ .config_init = lan867x_revd0_config_init,
+ .get_plca_cfg = genphy_c45_plca_get_cfg,
+ .set_plca_cfg = lan86xx_plca_set_cfg,
+ .get_plca_status = genphy_c45_plca_get_status,
+ .cable_test_start = genphy_c45_oatc14_cable_test_start,
+ .cable_test_get_status = genphy_c45_oatc14_cable_test_get_status,
+ .get_sqi = genphy_c45_oatc14_get_sqi,
+ .get_sqi_max = genphy_c45_oatc14_get_sqi_max,
+ },
+ {
PHY_ID_MATCH_EXACT(PHY_ID_LAN865X_REVB),
.name = "LAN865X Rev.B0/B1 Internal Phy",
.features = PHY_BASIC_T1S_P2MP_FEATURES,
@@ -497,10 +594,11 @@ static struct phy_driver microchip_t1s_driver[] = {
module_phy_driver(microchip_t1s_driver);
-static struct mdio_device_id __maybe_unused tbl[] = {
+static const struct mdio_device_id __maybe_unused tbl[] = {
{ PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVB1) },
{ PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVC1) },
{ PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVC2) },
+ { PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVD0) },
{ PHY_ID_MATCH_EXACT(PHY_ID_LAN865X_REVB) },
{ }
};
diff --git a/drivers/net/phy/motorcomm.c b/drivers/net/phy/motorcomm.c
index 0e91f5d1a4fd..89b5b19a9bd2 100644
--- a/drivers/net/phy/motorcomm.c
+++ b/drivers/net/phy/motorcomm.c
@@ -213,6 +213,20 @@
#define YT8521_RC1R_RGMII_2_100_NS 14
#define YT8521_RC1R_RGMII_2_250_NS 15
+/* LED CONFIG */
+#define YT8521_MAX_LEDS 3
+#define YT8521_LED0_CFG_REG 0xA00C
+#define YT8521_LED1_CFG_REG 0xA00D
+#define YT8521_LED2_CFG_REG 0xA00E
+#define YT8521_LED_ACT_BLK_IND BIT(13)
+#define YT8521_LED_FDX_ON_EN BIT(12)
+#define YT8521_LED_HDX_ON_EN BIT(11)
+#define YT8521_LED_TXACT_BLK_EN BIT(10)
+#define YT8521_LED_RXACT_BLK_EN BIT(9)
+#define YT8521_LED_1000_ON_EN BIT(6)
+#define YT8521_LED_100_ON_EN BIT(5)
+#define YT8521_LED_10_ON_EN BIT(4)
+
#define YTPHY_MISC_CONFIG_REG 0xA006
#define YTPHY_MCR_FIBER_SPEED_MASK BIT(0)
#define YTPHY_MCR_FIBER_1000BX (0x1 << 0)
@@ -1681,6 +1695,106 @@ err_restore_page:
return phy_restore_page(phydev, old_page, ret);
}
+static const unsigned long supported_trgs = (BIT(TRIGGER_NETDEV_FULL_DUPLEX) |
+ BIT(TRIGGER_NETDEV_HALF_DUPLEX) |
+ BIT(TRIGGER_NETDEV_LINK) |
+ BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_RX) |
+ BIT(TRIGGER_NETDEV_TX));
+
+static int yt8521_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ if (index >= YT8521_MAX_LEDS)
+ return -EINVAL;
+
+ /* All combinations of the supported triggers are allowed */
+ if (rules & ~supported_trgs)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int yt8521_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 val = 0;
+
+ if (index >= YT8521_MAX_LEDS)
+ return -EINVAL;
+
+ if (test_bit(TRIGGER_NETDEV_LINK, &rules)) {
+ val |= YT8521_LED_10_ON_EN;
+ val |= YT8521_LED_100_ON_EN;
+ val |= YT8521_LED_1000_ON_EN;
+ }
+
+ if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
+ val |= YT8521_LED_10_ON_EN;
+
+ if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
+ val |= YT8521_LED_100_ON_EN;
+
+ if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
+ val |= YT8521_LED_1000_ON_EN;
+
+ if (test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &rules))
+ val |= YT8521_LED_HDX_ON_EN;
+
+ if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &rules))
+ val |= YT8521_LED_FDX_ON_EN;
+
+ if (test_bit(TRIGGER_NETDEV_TX, &rules) ||
+ test_bit(TRIGGER_NETDEV_RX, &rules))
+ val |= YT8521_LED_ACT_BLK_IND;
+
+ if (test_bit(TRIGGER_NETDEV_TX, &rules))
+ val |= YT8521_LED_TXACT_BLK_EN;
+
+ if (test_bit(TRIGGER_NETDEV_RX, &rules))
+ val |= YT8521_LED_RXACT_BLK_EN;
+
+ return ytphy_write_ext(phydev, YT8521_LED0_CFG_REG + index, val);
+}
+
+static int yt8521_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ int val;
+
+ if (index >= YT8521_MAX_LEDS)
+ return -EINVAL;
+
+ val = ytphy_read_ext(phydev, YT8521_LED0_CFG_REG + index);
+ if (val < 0)
+ return val;
+
+ if (val & YT8521_LED_TXACT_BLK_EN || val & YT8521_LED_ACT_BLK_IND)
+ __set_bit(TRIGGER_NETDEV_TX, rules);
+
+ if (val & YT8521_LED_RXACT_BLK_EN || val & YT8521_LED_ACT_BLK_IND)
+ __set_bit(TRIGGER_NETDEV_RX, rules);
+
+ if (val & YT8521_LED_FDX_ON_EN)
+ __set_bit(TRIGGER_NETDEV_FULL_DUPLEX, rules);
+
+ if (val & YT8521_LED_HDX_ON_EN)
+ __set_bit(TRIGGER_NETDEV_HALF_DUPLEX, rules);
+
+ if (val & YT8521_LED_1000_ON_EN)
+ __set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+
+ if (val & YT8521_LED_100_ON_EN)
+ __set_bit(TRIGGER_NETDEV_LINK_100, rules);
+
+ if (val & YT8521_LED_10_ON_EN)
+ __set_bit(TRIGGER_NETDEV_LINK_10, rules);
+
+ return 0;
+}
+
static int yt8531_config_init(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
@@ -2920,6 +3034,9 @@ static struct phy_driver motorcomm_phy_drvs[] = {
.soft_reset = yt8521_soft_reset,
.suspend = yt8521_suspend,
.resume = yt8521_resume,
+ .led_hw_is_supported = yt8521_led_hw_is_supported,
+ .led_hw_control_set = yt8521_led_hw_control_set,
+ .led_hw_control_get = yt8521_led_hw_control_get,
},
{
PHY_ID_MATCH_EXACT(PHY_ID_YT8531),
@@ -2931,6 +3048,9 @@ static struct phy_driver motorcomm_phy_drvs[] = {
.get_wol = ytphy_get_wol,
.set_wol = yt8531_set_wol,
.link_change_notify = yt8531_link_change_notify,
+ .led_hw_is_supported = yt8521_led_hw_is_supported,
+ .led_hw_control_set = yt8521_led_hw_control_set,
+ .led_hw_control_get = yt8521_led_hw_control_get,
},
{
PHY_ID_MATCH_EXACT(PHY_ID_YT8531S),
diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h
index 6a3d8a754eb8..65c9d7bd9315 100644
--- a/drivers/net/phy/mscc/mscc.h
+++ b/drivers/net/phy/mscc/mscc.h
@@ -85,6 +85,10 @@ enum rgmii_clock_delay {
#define LED_MODE_SEL_MASK(x) (GENMASK(3, 0) << LED_MODE_SEL_POS(x))
#define LED_MODE_SEL(x, mode) (((mode) << LED_MODE_SEL_POS(x)) & LED_MODE_SEL_MASK(x))
+#define MSCC_PHY_LED_BEHAVIOR 30
+#define LED_COMBINE_DIS_MASK(x) BIT(x)
+#define LED_COMBINE_DIS(x, dis) (((dis) ? 1 : 0) << (x))
+
#define MSCC_EXT_PAGE_CSR_CNTL_17 17
#define MSCC_EXT_PAGE_CSR_CNTL_18 18
@@ -196,6 +200,9 @@ enum rgmii_clock_delay {
#define MSCC_PHY_EXTENDED_INT_MS_EGR BIT(9)
/* Extended Page 3 Registers */
+#define MSCC_PHY_SERDES_PCS_CTRL 16
+#define MSCC_PHY_SERDES_ANEG BIT(7)
+
#define MSCC_PHY_SERDES_TX_VALID_CNT 21
#define MSCC_PHY_SERDES_TX_CRC_ERR_CNT 22
#define MSCC_PHY_SERDES_RX_VALID_CNT 28
@@ -286,12 +293,12 @@ enum rgmii_clock_delay {
#define PHY_ID_VSC8540 0x00070760
#define PHY_ID_VSC8541 0x00070770
#define PHY_ID_VSC8552 0x000704e0
-#define PHY_ID_VSC856X 0x000707e0
+#define PHY_ID_VSC856X 0x000707e1
#define PHY_ID_VSC8572 0x000704d0
#define PHY_ID_VSC8574 0x000704a0
-#define PHY_ID_VSC8575 0x000707d0
-#define PHY_ID_VSC8582 0x000707b0
-#define PHY_ID_VSC8584 0x000707c0
+#define PHY_ID_VSC8575 0x000707d1
+#define PHY_ID_VSC8582 0x000707b1
+#define PHY_ID_VSC8584 0x000707c1
#define PHY_VENDOR_MSCC 0x00070400
#define MSCC_VDDMAC_1500 1500
@@ -362,6 +369,13 @@ struct vsc85xx_hw_stat {
u16 mask;
};
+struct vsc8531_skb_cb {
+ u32 ns;
+};
+
+#define VSC8531_SKB_CB(skb) \
+ ((struct vsc8531_skb_cb *)((skb)->cb))
+
struct vsc8531_private {
int rate_magic;
u16 supp_led_modes;
@@ -410,6 +424,11 @@ struct vsc8531_private {
*/
struct mutex ts_lock;
struct mutex phc_lock;
+
+ /* list of skbs that were received and need timestamp information but it
+ * didn't received it yet
+ */
+ struct sk_buff_head rx_skbs_list;
};
/* Shared structure between the PHYs of the same package.
@@ -469,6 +488,7 @@ static inline void vsc8584_config_macsec_intr(struct phy_device *phydev)
void vsc85xx_link_change_notify(struct phy_device *phydev);
void vsc8584_config_ts_intr(struct phy_device *phydev);
int vsc8584_ptp_init(struct phy_device *phydev);
+void vsc8584_ptp_deinit(struct phy_device *phydev);
int vsc8584_ptp_probe_once(struct phy_device *phydev);
int vsc8584_ptp_probe(struct phy_device *phydev);
irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev);
@@ -483,6 +503,9 @@ static inline int vsc8584_ptp_init(struct phy_device *phydev)
{
return 0;
}
+static inline void vsc8584_ptp_deinit(struct phy_device *phydev)
+{
+}
static inline int vsc8584_ptp_probe_once(struct phy_device *phydev)
{
return 0;
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index bee381200ab8..2b9fb8a675a6 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -17,9 +17,29 @@
#include <linux/of.h>
#include <linux/netdevice.h>
#include <dt-bindings/net/mscc-phy-vsc8531.h>
+
+#include "../phylib.h"
#include "mscc_serdes.h"
#include "mscc.h"
+struct vsc85xx_probe_config {
+ const struct vsc85xx_hw_stat *hw_stats;
+ size_t shared_size;
+ size_t nstats;
+ u16 supp_led_modes;
+ u8 nleds;
+ bool check_rate_magic;
+ bool use_package;
+ bool has_ptp;
+};
+
+static const u32 vsc85xx_default_led_modes_4[] = {
+ VSC8531_LINK_1000_ACTIVITY,
+ VSC8531_LINK_100_ACTIVITY,
+ VSC8531_LINK_ACTIVITY,
+ VSC8531_DUPLEX_COLLISION
+};
+
static const struct vsc85xx_hw_stat vsc85xx_hw_stats[] = {
{
.string = "phy_receive_errors",
@@ -175,17 +195,19 @@ static int vsc85xx_led_cntl_set(struct phy_device *phydev,
u8 led_num,
u8 mode)
{
- int rc;
- u16 reg_val;
+ u16 mask = LED_MODE_SEL_MASK(led_num);
+ u16 val = LED_MODE_SEL(led_num, mode);
- mutex_lock(&phydev->lock);
- reg_val = phy_read(phydev, MSCC_PHY_LED_MODE_SEL);
- reg_val &= ~LED_MODE_SEL_MASK(led_num);
- reg_val |= LED_MODE_SEL(led_num, (u16)mode);
- rc = phy_write(phydev, MSCC_PHY_LED_MODE_SEL, reg_val);
- mutex_unlock(&phydev->lock);
+ return phy_modify(phydev, MSCC_PHY_LED_MODE_SEL, mask, val);
+}
- return rc;
+static int vsc85xx_led_combine_disable_set(struct phy_device *phydev,
+ u8 led_num, bool combine_disable)
+{
+ u16 val = LED_COMBINE_DIS(led_num, combine_disable);
+ u16 mask = LED_COMBINE_DIS_MASK(led_num);
+
+ return phy_modify(phydev, MSCC_PHY_LED_BEHAVIOR, mask, val);
}
static int vsc85xx_mdix_get(struct phy_device *phydev, u8 *mdix)
@@ -441,7 +463,7 @@ static int vsc85xx_dt_led_mode_get(struct phy_device *phydev,
#endif /* CONFIG_OF_MDIO */
static int vsc85xx_dt_led_modes_get(struct phy_device *phydev,
- u32 *default_mode)
+ const u32 *default_mode)
{
struct vsc8531_private *priv = phydev->priv;
char led_dt_prop[28];
@@ -528,7 +550,6 @@ static int vsc85xx_update_rgmii_cntl(struct phy_device *phydev, u32 rgmii_cntl,
u16 rgmii_rx_delay_pos = ffs(rgmii_rx_delay_mask) - 1;
u16 rgmii_tx_delay_pos = ffs(rgmii_tx_delay_mask) - 1;
int delay_size = ARRAY_SIZE(vsc85xx_internal_delay);
- struct device *dev = &phydev->mdio.dev;
u16 reg_val = 0;
u16 mask = 0;
s32 rx_delay;
@@ -547,7 +568,7 @@ static int vsc85xx_update_rgmii_cntl(struct phy_device *phydev, u32 rgmii_cntl,
if (phy_interface_is_rgmii(phydev))
mask |= rgmii_rx_delay_mask | rgmii_tx_delay_mask;
- rx_delay = phy_get_internal_delay(phydev, dev, vsc85xx_internal_delay,
+ rx_delay = phy_get_internal_delay(phydev, vsc85xx_internal_delay,
delay_size, true);
if (rx_delay < 0) {
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID ||
@@ -557,7 +578,7 @@ static int vsc85xx_update_rgmii_cntl(struct phy_device *phydev, u32 rgmii_cntl,
rx_delay = RGMII_CLK_DELAY_0_2_NS;
}
- tx_delay = phy_get_internal_delay(phydev, dev, vsc85xx_internal_delay,
+ tx_delay = phy_get_internal_delay(phydev, vsc85xx_internal_delay,
delay_size, false);
if (tx_delay < 0) {
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID ||
@@ -1723,12 +1744,6 @@ static int vsc8584_config_init(struct phy_device *phydev)
* in this pre-init function.
*/
if (phy_package_init_once(phydev)) {
- /* The following switch statement assumes that the lowest
- * nibble of the phy_id_mask is always 0. This works because
- * the lowest nibble of the PHY_ID's below are also 0.
- */
- WARN_ON(phydev->drv->phy_id_mask & 0xf);
-
switch (phydev->phy_id & phydev->drv->phy_id_mask) {
case PHY_ID_VSC8504:
case PHY_ID_VSC8552:
@@ -2201,12 +2216,35 @@ static int vsc85xx_read_status(struct phy_device *phydev)
return genphy_read_status(phydev);
}
-static int vsc8514_probe(struct phy_device *phydev)
+static unsigned int vsc85xx_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ if (interface != PHY_INTERFACE_MODE_SGMII &&
+ interface != PHY_INTERFACE_MODE_QSGMII)
+ return 0;
+
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+}
+
+static int vsc85xx_config_inband(struct phy_device *phydev, unsigned int modes)
+{
+ u16 reg_val = 0;
+
+ if (modes == LINK_INBAND_ENABLE)
+ reg_val = MSCC_PHY_SERDES_ANEG;
+
+ return phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_3,
+ MSCC_PHY_SERDES_PCS_CTRL, MSCC_PHY_SERDES_ANEG,
+ reg_val);
+}
+
+static int vsc85xx_probe_common(struct phy_device *phydev,
+ const struct vsc85xx_probe_config *cfg,
+ const u32 *default_led_mode)
{
struct vsc8531_private *vsc8531;
- u32 default_mode[4] = {VSC8531_LINK_1000_ACTIVITY,
- VSC8531_LINK_100_ACTIVITY, VSC8531_LINK_ACTIVITY,
- VSC8531_DUPLEX_COLLISION};
+ struct device_node *np;
+ int ret;
vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL);
if (!vsc8531)
@@ -2214,124 +2252,296 @@ static int vsc8514_probe(struct phy_device *phydev)
phydev->priv = vsc8531;
- vsc8584_get_base_addr(phydev);
- devm_phy_package_join(&phydev->mdio.dev, phydev,
- vsc8531->base_addr, 0);
+ /* Check rate magic if needed (only for non-package PHYs) */
+ if (cfg->check_rate_magic) {
+ ret = vsc85xx_edge_rate_magic_get(phydev);
+ if (ret < 0)
+ return ret;
+
+ vsc8531->rate_magic = ret;
+ }
+
+ /* Set up package if needed */
+ if (cfg->use_package) {
+ vsc8584_get_base_addr(phydev);
+ ret = devm_phy_package_join(&phydev->mdio.dev, phydev,
+ vsc8531->base_addr,
+ cfg->shared_size);
+ if (ret)
+ return ret;
+ }
+
+ /* Configure LED settings */
+ vsc8531->nleds = cfg->nleds;
+ vsc8531->supp_led_modes = cfg->supp_led_modes;
- vsc8531->nleds = 4;
- vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES;
- vsc8531->hw_stats = vsc85xx_hw_stats;
- vsc8531->nstats = ARRAY_SIZE(vsc85xx_hw_stats);
+ /* Configure hardware stats */
+ vsc8531->hw_stats = cfg->hw_stats;
+ vsc8531->nstats = cfg->nstats;
vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats,
sizeof(u64), GFP_KERNEL);
if (!vsc8531->stats)
return -ENOMEM;
- return vsc85xx_dt_led_modes_get(phydev, default_mode);
+ /* PTP setup for VSC8584 */
+ if (cfg->has_ptp) {
+ if (phy_package_probe_once(phydev)) {
+ ret = vsc8584_ptp_probe_once(phydev);
+ if (ret)
+ return ret;
+ }
+
+ ret = vsc8584_ptp_probe(phydev);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Check for LED configuration in device tree if available
+ * or fall back to default `vsc8531,led-x-mode` DT properties.
+ */
+ np = of_get_child_by_name(phydev->mdio.dev.of_node, "leds");
+ if (np) {
+ of_node_put(np);
+
+ /* Force to defaults */
+ for (unsigned int i = 0; i < vsc8531->nleds; i++)
+ vsc8531->leds_mode[i] = default_led_mode[i];
+
+ return 0;
+ }
+
+ /* Parse LED modes from device tree */
+ return vsc85xx_dt_led_modes_get(phydev, default_led_mode);
}
-static int vsc8574_probe(struct phy_device *phydev)
+static int vsc85xx_led_brightness_set(struct phy_device *phydev,
+ u8 index, enum led_brightness value)
{
- struct vsc8531_private *vsc8531;
- u32 default_mode[4] = {VSC8531_LINK_1000_ACTIVITY,
- VSC8531_LINK_100_ACTIVITY, VSC8531_LINK_ACTIVITY,
- VSC8531_DUPLEX_COLLISION};
+ struct vsc8531_private *vsc8531 = phydev->priv;
- vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL);
- if (!vsc8531)
- return -ENOMEM;
+ if (index >= vsc8531->nleds)
+ return -EINVAL;
- phydev->priv = vsc8531;
+ return vsc85xx_led_cntl_set(phydev, index, value == LED_OFF ?
+ VSC8531_FORCE_LED_OFF : VSC8531_FORCE_LED_ON);
+}
- vsc8584_get_base_addr(phydev);
- devm_phy_package_join(&phydev->mdio.dev, phydev,
- vsc8531->base_addr, 0);
+static int vsc85xx_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ static const unsigned long supported = BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK) |
+ BIT(TRIGGER_NETDEV_RX) |
+ BIT(TRIGGER_NETDEV_TX);
+ struct vsc8531_private *vsc8531 = phydev->priv;
- vsc8531->nleds = 4;
- vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES;
- vsc8531->hw_stats = vsc8584_hw_stats;
- vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats);
- vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats,
- sizeof(u64), GFP_KERNEL);
- if (!vsc8531->stats)
- return -ENOMEM;
+ if (index >= vsc8531->nleds)
+ return -EINVAL;
- return vsc85xx_dt_led_modes_get(phydev, default_mode);
+ if (rules & ~supported)
+ return -EOPNOTSUPP;
+
+ return 0;
}
-static int vsc8584_probe(struct phy_device *phydev)
+static int vsc85xx_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
{
- struct vsc8531_private *vsc8531;
- u32 default_mode[4] = {VSC8531_LINK_1000_ACTIVITY,
- VSC8531_LINK_100_ACTIVITY, VSC8531_LINK_ACTIVITY,
- VSC8531_DUPLEX_COLLISION};
- int ret;
+ struct vsc8531_private *vsc8531 = phydev->priv;
+ u8 mode, behavior;
+ int rc;
- if ((phydev->phy_id & MSCC_DEV_REV_MASK) != VSC8584_REVB) {
- dev_err(&phydev->mdio.dev, "Only VSC8584 revB is supported.\n");
- return -ENOTSUPP;
- }
+ if (index >= vsc8531->nleds)
+ return -EINVAL;
- vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL);
- if (!vsc8531)
- return -ENOMEM;
+ rc = phy_read(phydev, MSCC_PHY_LED_MODE_SEL);
+ if (rc < 0)
+ return rc;
+ mode = (rc & LED_MODE_SEL_MASK(index)) >> LED_MODE_SEL_POS(index);
- phydev->priv = vsc8531;
+ rc = phy_read(phydev, MSCC_PHY_LED_BEHAVIOR);
+ if (rc < 0)
+ return rc;
+ behavior = (rc & LED_COMBINE_DIS_MASK(index)) >> index;
- vsc8584_get_base_addr(phydev);
- devm_phy_package_join(&phydev->mdio.dev, phydev, vsc8531->base_addr,
- sizeof(struct vsc85xx_shared_private));
+ switch (mode) {
+ case VSC8531_LINK_ACTIVITY:
+ case VSC8531_ACTIVITY:
+ *rules = BIT(TRIGGER_NETDEV_LINK);
+ break;
- vsc8531->nleds = 4;
- vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES;
- vsc8531->hw_stats = vsc8584_hw_stats;
- vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats);
- vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats,
- sizeof(u64), GFP_KERNEL);
- if (!vsc8531->stats)
- return -ENOMEM;
+ case VSC8531_LINK_1000_ACTIVITY:
+ *rules = BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_LINK);
+ break;
- if (phy_package_probe_once(phydev)) {
- ret = vsc8584_ptp_probe_once(phydev);
- if (ret)
- return ret;
+ case VSC8531_LINK_100_ACTIVITY:
+ *rules = BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK);
+ break;
+
+ case VSC8531_LINK_10_ACTIVITY:
+ *rules = BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK);
+ break;
+
+ case VSC8531_LINK_100_1000_ACTIVITY:
+ *rules = BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK);
+ break;
+
+ case VSC8531_LINK_10_1000_ACTIVITY:
+ *rules = BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK);
+ break;
+
+ case VSC8531_LINK_10_100_ACTIVITY:
+ *rules = BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK);
+ break;
+
+ default:
+ *rules = 0;
+ break;
}
- ret = vsc8584_ptp_probe(phydev);
- if (ret)
+ if (!behavior && *rules)
+ *rules |= BIT(TRIGGER_NETDEV_RX) | BIT(TRIGGER_NETDEV_TX);
+
+ return 0;
+}
+
+static int vsc85xx_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ struct vsc8531_private *vsc8531 = phydev->priv;
+ u8 mode = VSC8531_FORCE_LED_ON;
+ bool combine_disable = false;
+ bool has_rx, has_tx;
+ int ret;
+
+ if (index >= vsc8531->nleds)
+ return -EINVAL;
+
+ if (rules & BIT(TRIGGER_NETDEV_LINK))
+ mode = VSC8531_LINK_ACTIVITY;
+
+ if (rules & BIT(TRIGGER_NETDEV_LINK_10))
+ mode = VSC8531_LINK_10_ACTIVITY;
+
+ if (rules & BIT(TRIGGER_NETDEV_LINK_100))
+ mode = VSC8531_LINK_100_ACTIVITY;
+
+ if (rules & BIT(TRIGGER_NETDEV_LINK_1000))
+ mode = VSC8531_LINK_1000_ACTIVITY;
+
+ if (rules & BIT(TRIGGER_NETDEV_LINK_100) &&
+ rules & BIT(TRIGGER_NETDEV_LINK_1000))
+ mode = VSC8531_LINK_100_1000_ACTIVITY;
+
+ if (rules & BIT(TRIGGER_NETDEV_LINK_10) &&
+ rules & BIT(TRIGGER_NETDEV_LINK_1000))
+ mode = VSC8531_LINK_10_1000_ACTIVITY;
+
+ if (rules & BIT(TRIGGER_NETDEV_LINK_10) &&
+ rules & BIT(TRIGGER_NETDEV_LINK_100))
+ mode = VSC8531_LINK_10_100_ACTIVITY;
+
+ /*
+ * The VSC85xx PHYs provides an option to control LED behavior. By
+ * default, the LEDx combine function is enabled, meaning the LED
+ * will be on when there is link/activity or duplex/collision. If
+ * the combine function is disabled, the LED will be on only for
+ * link or duplex.
+ *
+ * To control this behavior, we check the selected rules. If both
+ * RX and TX activity are not selected, the LED combine function
+ * is disabled; otherwise, it remains enabled.
+ */
+ has_rx = !!(rules & BIT(TRIGGER_NETDEV_RX));
+ has_tx = !!(rules & BIT(TRIGGER_NETDEV_TX));
+ if (!has_rx && !has_tx)
+ combine_disable = true;
+
+ ret = vsc85xx_led_combine_disable_set(phydev, index, combine_disable);
+ if (ret < 0)
return ret;
- return vsc85xx_dt_led_modes_get(phydev, default_mode);
+ return vsc85xx_led_cntl_set(phydev, index, mode);
}
-static int vsc85xx_probe(struct phy_device *phydev)
+static int vsc8514_probe(struct phy_device *phydev)
{
- struct vsc8531_private *vsc8531;
- int rate_magic;
- u32 default_mode[2] = {VSC8531_LINK_1000_ACTIVITY,
- VSC8531_LINK_100_ACTIVITY};
+ static const struct vsc85xx_probe_config vsc8514_cfg = {
+ .nleds = 4,
+ .supp_led_modes = VSC85XX_SUPP_LED_MODES,
+ .hw_stats = vsc85xx_hw_stats,
+ .nstats = ARRAY_SIZE(vsc85xx_hw_stats),
+ .use_package = true,
+ .shared_size = 0,
+ .has_ptp = false,
+ .check_rate_magic = false,
+ };
- rate_magic = vsc85xx_edge_rate_magic_get(phydev);
- if (rate_magic < 0)
- return rate_magic;
+ return vsc85xx_probe_common(phydev, &vsc8514_cfg, vsc85xx_default_led_modes_4);
+}
- vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL);
- if (!vsc8531)
- return -ENOMEM;
+static int vsc8574_probe(struct phy_device *phydev)
+{
+ static const struct vsc85xx_probe_config vsc8574_cfg = {
+ .nleds = 4,
+ .supp_led_modes = VSC8584_SUPP_LED_MODES,
+ .hw_stats = vsc8584_hw_stats,
+ .nstats = ARRAY_SIZE(vsc8584_hw_stats),
+ .use_package = true,
+ .shared_size = 0,
+ .has_ptp = false,
+ .check_rate_magic = false,
+ };
- phydev->priv = vsc8531;
+ return vsc85xx_probe_common(phydev, &vsc8574_cfg, vsc85xx_default_led_modes_4);
+}
- vsc8531->rate_magic = rate_magic;
- vsc8531->nleds = 2;
- vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES;
- vsc8531->hw_stats = vsc85xx_hw_stats;
- vsc8531->nstats = ARRAY_SIZE(vsc85xx_hw_stats);
- vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats,
- sizeof(u64), GFP_KERNEL);
- if (!vsc8531->stats)
- return -ENOMEM;
+static int vsc8584_probe(struct phy_device *phydev)
+{
+ static const struct vsc85xx_probe_config vsc8584_cfg = {
+ .nleds = 4,
+ .supp_led_modes = VSC8584_SUPP_LED_MODES,
+ .hw_stats = vsc8584_hw_stats,
+ .nstats = ARRAY_SIZE(vsc8584_hw_stats),
+ .use_package = true,
+ .shared_size = sizeof(struct vsc85xx_shared_private),
+ .has_ptp = true,
+ .check_rate_magic = false,
+ };
+
+ return vsc85xx_probe_common(phydev, &vsc8584_cfg, vsc85xx_default_led_modes_4);
+}
+
+static int vsc85xx_probe(struct phy_device *phydev)
+{
+ static const struct vsc85xx_probe_config vsc85xx_cfg = {
+ .nleds = 2,
+ .supp_led_modes = VSC85XX_SUPP_LED_MODES,
+ .hw_stats = vsc85xx_hw_stats,
+ .nstats = ARRAY_SIZE(vsc85xx_hw_stats),
+ .use_package = false,
+ .has_ptp = false,
+ .check_rate_magic = true,
+ };
+
+ return vsc85xx_probe_common(phydev, &vsc85xx_cfg, vsc85xx_default_led_modes_4);
+}
- return vsc85xx_dt_led_modes_get(phydev, default_mode);
+static void vsc85xx_remove(struct phy_device *phydev)
+{
+ vsc8584_ptp_deinit(phydev);
}
/* Microsemi VSC85xx PHYs */
@@ -2359,6 +2569,10 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
},
{
.phy_id = PHY_ID_VSC8502,
@@ -2383,6 +2597,10 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
},
{
.phy_id = PHY_ID_VSC8504,
@@ -2408,6 +2626,12 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .inband_caps = vsc85xx_inband_caps,
+ .config_inband = vsc85xx_config_inband,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
},
{
.phy_id = PHY_ID_VSC8514,
@@ -2431,6 +2655,12 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .inband_caps = vsc85xx_inband_caps,
+ .config_inband = vsc85xx_config_inband,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
},
{
.phy_id = PHY_ID_VSC8530,
@@ -2455,6 +2685,10 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
},
{
.phy_id = PHY_ID_VSC8531,
@@ -2479,6 +2713,10 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
},
{
.phy_id = PHY_ID_VSC8540,
@@ -2503,6 +2741,10 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
},
{
.phy_id = PHY_ID_VSC8541,
@@ -2527,6 +2769,10 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
},
{
.phy_id = PHY_ID_VSC8552,
@@ -2551,11 +2797,16 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .inband_caps = vsc85xx_inband_caps,
+ .config_inband = vsc85xx_config_inband,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
},
{
- .phy_id = PHY_ID_VSC856X,
+ PHY_ID_MATCH_EXACT(PHY_ID_VSC856X),
.name = "Microsemi GE VSC856X SyncE",
- .phy_id_mask = 0xfffffff0,
/* PHY_GBIT_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8584_config_init,
@@ -2573,6 +2824,12 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .inband_caps = vsc85xx_inband_caps,
+ .config_inband = vsc85xx_config_inband,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
},
{
.phy_id = PHY_ID_VSC8572,
@@ -2588,7 +2845,8 @@ static struct phy_driver vsc85xx_driver[] = {
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
- .probe = &vsc8574_probe,
+ .remove = &vsc85xx_remove,
+ .probe = &vsc8584_probe,
.set_wol = &vsc85xx_wol_set,
.get_wol = &vsc85xx_wol_get,
.get_tunable = &vsc85xx_get_tunable,
@@ -2598,6 +2856,12 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .inband_caps = vsc85xx_inband_caps,
+ .config_inband = vsc85xx_config_inband,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
},
{
.phy_id = PHY_ID_VSC8574,
@@ -2609,11 +2873,12 @@ static struct phy_driver vsc85xx_driver[] = {
.config_aneg = &vsc85xx_config_aneg,
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
- .handle_interrupt = vsc85xx_handle_interrupt,
+ .handle_interrupt = vsc8584_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
- .probe = &vsc8574_probe,
+ .remove = &vsc85xx_remove,
+ .probe = &vsc8584_probe,
.set_wol = &vsc85xx_wol_set,
.get_wol = &vsc85xx_wol_get,
.get_tunable = &vsc85xx_get_tunable,
@@ -2623,11 +2888,16 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .inband_caps = vsc85xx_inband_caps,
+ .config_inband = vsc85xx_config_inband,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
},
{
- .phy_id = PHY_ID_VSC8575,
+ PHY_ID_MATCH_EXACT(PHY_ID_VSC8575),
.name = "Microsemi GE VSC8575 SyncE",
- .phy_id_mask = 0xfffffff0,
/* PHY_GBIT_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8584_config_init,
@@ -2638,6 +2908,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
+ .remove = &vsc85xx_remove,
.probe = &vsc8584_probe,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
@@ -2646,11 +2917,16 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .inband_caps = vsc85xx_inband_caps,
+ .config_inband = vsc85xx_config_inband,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
},
{
- .phy_id = PHY_ID_VSC8582,
+ PHY_ID_MATCH_EXACT(PHY_ID_VSC8582),
.name = "Microsemi GE VSC8582 SyncE",
- .phy_id_mask = 0xfffffff0,
/* PHY_GBIT_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8584_config_init,
@@ -2661,6 +2937,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
+ .remove = &vsc85xx_remove,
.probe = &vsc8584_probe,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
@@ -2669,11 +2946,16 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .inband_caps = vsc85xx_inband_caps,
+ .config_inband = vsc85xx_config_inband,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
},
{
- .phy_id = PHY_ID_VSC8584,
+ PHY_ID_MATCH_EXACT(PHY_ID_VSC8584),
.name = "Microsemi GE VSC8584 SyncE",
- .phy_id_mask = 0xfffffff0,
/* PHY_GBIT_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8584_config_init,
@@ -2684,6 +2966,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
+ .remove = &vsc85xx_remove,
.probe = &vsc8584_probe,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
@@ -2693,13 +2976,19 @@ static struct phy_driver vsc85xx_driver[] = {
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
.link_change_notify = &vsc85xx_link_change_notify,
+ .inband_caps = vsc85xx_inband_caps,
+ .config_inband = vsc85xx_config_inband,
+ .led_brightness_set = vsc85xx_led_brightness_set,
+ .led_hw_is_supported = vsc85xx_led_hw_is_supported,
+ .led_hw_control_get = vsc85xx_led_hw_control_get,
+ .led_hw_control_set = vsc85xx_led_hw_control_set,
}
};
module_phy_driver(vsc85xx_driver);
-static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = {
+static const struct mdio_device_id __maybe_unused vsc85xx_tbl[] = {
{ PHY_ID_MATCH_VENDOR(PHY_VENDOR_MSCC) },
{ }
};
diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
index 738a8822fcf0..4865eac74b0e 100644
--- a/drivers/net/phy/mscc/mscc_ptp.c
+++ b/drivers/net/phy/mscc/mscc_ptp.c
@@ -17,6 +17,7 @@
#include <linux/udp.h>
#include <linux/unaligned.h>
+#include "../phylib.h"
#include "mscc.h"
#include "mscc_ptp.h"
@@ -455,12 +456,12 @@ static void vsc85xx_dequeue_skb(struct vsc85xx_ptp *ptp)
*p++ = (reg >> 24) & 0xff;
}
- len = skb_queue_len(&ptp->tx_queue);
+ len = skb_queue_len_lockless(&ptp->tx_queue);
if (len < 1)
return;
while (len--) {
- skb = __skb_dequeue(&ptp->tx_queue);
+ skb = skb_dequeue(&ptp->tx_queue);
if (!skb)
return;
@@ -485,7 +486,7 @@ static void vsc85xx_dequeue_skb(struct vsc85xx_ptp *ptp)
* packet in the FIFO right now, reschedule it for later
* packets.
*/
- __skb_queue_tail(&ptp->tx_queue, skb);
+ skb_queue_tail(&ptp->tx_queue, skb);
}
}
@@ -645,11 +646,12 @@ static int __vsc85xx_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
{
struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
struct phy_device *phydev = ptp->phydev;
- struct vsc85xx_shared_private *shared =
- (struct vsc85xx_shared_private *)phydev->shared->priv;
struct vsc8531_private *priv = phydev->priv;
+ struct vsc85xx_shared_private *shared;
u32 val;
+ shared = phy_package_get_priv(phydev);
+
val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
val |= PTP_LTC_CTRL_SAVE_ENA;
vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
@@ -696,11 +698,12 @@ static int __vsc85xx_settime(struct ptp_clock_info *info,
{
struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
struct phy_device *phydev = ptp->phydev;
- struct vsc85xx_shared_private *shared =
- (struct vsc85xx_shared_private *)phydev->shared->priv;
struct vsc8531_private *priv = phydev->priv;
+ struct vsc85xx_shared_private *shared;
u32 val;
+ shared = phy_package_get_priv(phydev);
+
vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_SEC_MSB,
PTP_LTC_LOAD_SEC_MSB(ts->tv_sec));
vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_SEC_LSB,
@@ -897,6 +900,7 @@ static int vsc85xx_eth1_conf(struct phy_device *phydev, enum ts_blk blk,
get_unaligned_be32(ptp_multicast));
} else {
val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST;
+ val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_UNICAST;
vsc85xx_ts_write_csr(phydev, blk,
MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), val);
vsc85xx_ts_write_csr(phydev, blk,
@@ -943,7 +947,9 @@ static int vsc85xx_ip1_conf(struct phy_device *phydev, enum ts_blk blk,
/* UDP checksum offset in IPv4 packet
* according to: https://tools.ietf.org/html/rfc768
*/
- val |= IP1_NXT_PROT_UDP_CHKSUM_OFF(26) | IP1_NXT_PROT_UDP_CHKSUM_CLEAR;
+ val |= IP1_NXT_PROT_UDP_CHKSUM_OFF(26);
+ if (enable)
+ val |= IP1_NXT_PROT_UDP_CHKSUM_CLEAR;
vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM,
val);
@@ -1045,9 +1051,21 @@ static void vsc85xx_ts_reset_fifo(struct phy_device *phydev)
val);
}
-static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts,
- struct kernel_hwtstamp_config *cfg,
- struct netlink_ext_ack *extack)
+static int vsc85xx_hwtstamp_get(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct vsc8531_private *vsc8531 =
+ container_of(mii_ts, struct vsc8531_private, mii_ts);
+
+ cfg->tx_type = vsc8531->ptp->tx_type;
+ cfg->rx_filter = vsc8531->ptp->rx_filter;
+
+ return 0;
+}
+
+static int vsc85xx_hwtstamp_set(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct vsc8531_private *vsc8531 =
container_of(mii_ts, struct vsc8531_private, mii_ts);
@@ -1062,6 +1080,7 @@ static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts,
case HWTSTAMP_TX_ON:
break;
case HWTSTAMP_TX_OFF:
+ skb_queue_purge(&vsc8531->ptp->tx_queue);
break;
default:
return -ERANGE;
@@ -1086,9 +1105,6 @@ static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts,
mutex_lock(&vsc8531->ts_lock);
- __skb_queue_purge(&vsc8531->ptp->tx_queue);
- __skb_queue_head_init(&vsc8531->ptp->tx_queue);
-
/* Disable predictor while configuring the 1588 block */
val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
MSCC_PHY_PTP_INGR_PREDICTOR);
@@ -1163,18 +1179,22 @@ static void vsc85xx_txtstamp(struct mii_timestamper *mii_ts,
container_of(mii_ts, struct vsc8531_private, mii_ts);
if (!vsc8531->ptp->configured)
- return;
+ goto out;
- if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF) {
- kfree_skb(skb);
- return;
- }
+ if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF)
+ goto out;
+
+ if (vsc8531->ptp->tx_type == HWTSTAMP_TX_ONESTEP_SYNC)
+ if (ptp_msg_is_sync(skb, type))
+ goto out;
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- mutex_lock(&vsc8531->ts_lock);
- __skb_queue_tail(&vsc8531->ptp->tx_queue, skb);
- mutex_unlock(&vsc8531->ts_lock);
+ skb_queue_tail(&vsc8531->ptp->tx_queue, skb);
+ return;
+
+out:
+ kfree_skb(skb);
}
static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
@@ -1182,9 +1202,7 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
{
struct vsc8531_private *vsc8531 =
container_of(mii_ts, struct vsc8531_private, mii_ts);
- struct skb_shared_hwtstamps *shhwtstamps = NULL;
struct vsc85xx_ptphdr *ptphdr;
- struct timespec64 ts;
unsigned long ns;
if (!vsc8531->ptp->configured)
@@ -1194,27 +1212,52 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
type == PTP_CLASS_NONE)
return false;
- vsc85xx_gettime(&vsc8531->ptp->caps, &ts);
-
ptphdr = get_ptp_header_rx(skb, vsc8531->ptp->rx_filter);
if (!ptphdr)
return false;
- shhwtstamps = skb_hwtstamps(skb);
- memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
-
ns = ntohl(ptphdr->rsrvd2);
- /* nsec is in reserved field */
- if (ts.tv_nsec < ns)
- ts.tv_sec--;
+ VSC8531_SKB_CB(skb)->ns = ns;
+ skb_queue_tail(&vsc8531->rx_skbs_list, skb);
- shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ns);
- netif_rx(skb);
+ ptp_schedule_worker(vsc8531->ptp->ptp_clock, 0);
return true;
}
+static long vsc85xx_do_aux_work(struct ptp_clock_info *info)
+{
+ struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
+ struct skb_shared_hwtstamps *shhwtstamps = NULL;
+ struct phy_device *phydev = ptp->phydev;
+ struct vsc8531_private *priv = phydev->priv;
+ struct sk_buff_head received;
+ struct sk_buff *rx_skb;
+ struct timespec64 ts;
+ unsigned long flags;
+
+ __skb_queue_head_init(&received);
+ spin_lock_irqsave(&priv->rx_skbs_list.lock, flags);
+ skb_queue_splice_tail_init(&priv->rx_skbs_list, &received);
+ spin_unlock_irqrestore(&priv->rx_skbs_list.lock, flags);
+
+ vsc85xx_gettime(info, &ts);
+ while ((rx_skb = __skb_dequeue(&received)) != NULL) {
+ shhwtstamps = skb_hwtstamps(rx_skb);
+ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+
+ if (ts.tv_nsec < VSC8531_SKB_CB(rx_skb)->ns)
+ ts.tv_sec--;
+
+ shhwtstamps->hwtstamp = ktime_set(ts.tv_sec,
+ VSC8531_SKB_CB(rx_skb)->ns);
+ netif_rx(rx_skb);
+ }
+
+ return -1;
+}
+
static const struct ptp_clock_info vsc85xx_clk_caps = {
.owner = THIS_MODULE,
.name = "VSC85xx timer",
@@ -1228,6 +1271,7 @@ static const struct ptp_clock_info vsc85xx_clk_caps = {
.adjfine = &vsc85xx_adjfine,
.gettime64 = &vsc85xx_gettime,
.settime64 = &vsc85xx_settime,
+ .do_aux_work = &vsc85xx_do_aux_work,
};
static struct vsc8531_private *vsc8584_base_priv(struct phy_device *phydev)
@@ -1262,7 +1306,6 @@ static void vsc8584_set_input_clk_configured(struct phy_device *phydev)
static int __vsc8584_init_ptp(struct phy_device *phydev)
{
- struct vsc8531_private *vsc8531 = phydev->priv;
static const u32 ltc_seq_e[] = { 0, 400000, 0, 0, 0 };
static const u8 ltc_seq_a[] = { 8, 6, 5, 4, 2 };
u32 val;
@@ -1479,17 +1522,7 @@ static int __vsc8584_init_ptp(struct phy_device *phydev)
vsc85xx_ts_eth_cmp1_sig(phydev);
- vsc8531->mii_ts.rxtstamp = vsc85xx_rxtstamp;
- vsc8531->mii_ts.txtstamp = vsc85xx_txtstamp;
- vsc8531->mii_ts.hwtstamp = vsc85xx_hwtstamp;
- vsc8531->mii_ts.ts_info = vsc85xx_ts_info;
- phydev->mii_ts = &vsc8531->mii_ts;
-
- memcpy(&vsc8531->ptp->caps, &vsc85xx_clk_caps, sizeof(vsc85xx_clk_caps));
-
- vsc8531->ptp->ptp_clock = ptp_clock_register(&vsc8531->ptp->caps,
- &phydev->mdio.dev);
- return PTR_ERR_OR_ZERO(vsc8531->ptp->ptp_clock);
+ return 0;
}
void vsc8584_config_ts_intr(struct phy_device *phydev)
@@ -1516,6 +1549,17 @@ int vsc8584_ptp_init(struct phy_device *phydev)
return 0;
}
+void vsc8584_ptp_deinit(struct phy_device *phydev)
+{
+ struct vsc8531_private *vsc8531 = phydev->priv;
+
+ if (vsc8531->ptp->ptp_clock) {
+ ptp_clock_unregister(vsc8531->ptp->ptp_clock);
+ skb_queue_purge(&vsc8531->rx_skbs_list);
+ skb_queue_purge(&vsc8531->ptp->tx_queue);
+ }
+}
+
irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev)
{
struct vsc8531_private *priv = phydev->priv;
@@ -1536,7 +1580,7 @@ irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev)
if (rc & VSC85XX_1588_INT_FIFO_ADD) {
vsc85xx_get_tx_ts(priv->ptp);
} else if (rc & VSC85XX_1588_INT_FIFO_OVERFLOW) {
- __skb_queue_purge(&priv->ptp->tx_queue);
+ skb_queue_purge(&priv->ptp->tx_queue);
vsc85xx_ts_reset_fifo(phydev);
}
@@ -1555,6 +1599,8 @@ int vsc8584_ptp_probe(struct phy_device *phydev)
mutex_init(&vsc8531->phc_lock);
mutex_init(&vsc8531->ts_lock);
+ skb_queue_head_init(&vsc8531->rx_skbs_list);
+ skb_queue_head_init(&vsc8531->ptp->tx_queue);
/* Retrieve the shared load/save GPIO. Request it as non exclusive as
* the same GPIO can be requested by all the PHYs of the same package.
@@ -1575,13 +1621,22 @@ int vsc8584_ptp_probe(struct phy_device *phydev)
vsc8531->ptp->phydev = phydev;
- return 0;
+ vsc8531->mii_ts.rxtstamp = vsc85xx_rxtstamp;
+ vsc8531->mii_ts.txtstamp = vsc85xx_txtstamp;
+ vsc8531->mii_ts.hwtstamp_set = vsc85xx_hwtstamp_set;
+ vsc8531->mii_ts.hwtstamp_get = vsc85xx_hwtstamp_get;
+ vsc8531->mii_ts.ts_info = vsc85xx_ts_info;
+ phydev->mii_ts = &vsc8531->mii_ts;
+
+ memcpy(&vsc8531->ptp->caps, &vsc85xx_clk_caps, sizeof(vsc85xx_clk_caps));
+ vsc8531->ptp->ptp_clock = ptp_clock_register(&vsc8531->ptp->caps,
+ &phydev->mdio.dev);
+ return PTR_ERR_OR_ZERO(vsc8531->ptp->ptp_clock);
}
int vsc8584_ptp_probe_once(struct phy_device *phydev)
{
- struct vsc85xx_shared_private *shared =
- (struct vsc85xx_shared_private *)phydev->shared->priv;
+ struct vsc85xx_shared_private *shared = phy_package_get_priv(phydev);
/* Initialize shared GPIO lock */
mutex_init(&shared->gpio_lock);
diff --git a/drivers/net/phy/mscc/mscc_ptp.h b/drivers/net/phy/mscc/mscc_ptp.h
index da3465360e90..ae9ad925bfa8 100644
--- a/drivers/net/phy/mscc/mscc_ptp.h
+++ b/drivers/net/phy/mscc/mscc_ptp.h
@@ -98,6 +98,7 @@
#define MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(x) (MSCC_ANA_ETH1_FLOW_ENA(x) + 3)
#define ANA_ETH1_FLOW_ADDR_MATCH2_MASK_MASK GENMASK(22, 20)
#define ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST 0x400000
+#define ANA_ETH1_FLOW_ADDR_MATCH2_ANY_UNICAST 0x200000
#define ANA_ETH1_FLOW_ADDR_MATCH2_FULL_ADDR 0x100000
#define ANA_ETH1_FLOW_ADDR_MATCH2_SRC_DEST_MASK GENMASK(17, 16)
#define ANA_ETH1_FLOW_ADDR_MATCH2_SRC_DEST 0x020000
diff --git a/drivers/net/phy/mxl-86110.c b/drivers/net/phy/mxl-86110.c
new file mode 100644
index 000000000000..e5d137a37a1d
--- /dev/null
+++ b/drivers/net/phy/mxl-86110.c
@@ -0,0 +1,978 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PHY driver for Maxlinear MXL86110
+ *
+ * Copyright 2023 MaxLinear Inc.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+
+/* PHY ID */
+#define PHY_ID_MXL86110 0xc1335580
+#define PHY_ID_MXL86111 0xc1335588
+
+/* required to access extended registers */
+#define MXL86110_EXTD_REG_ADDR_OFFSET 0x1E
+#define MXL86110_EXTD_REG_ADDR_DATA 0x1F
+#define PHY_IRQ_ENABLE_REG 0x12
+#define PHY_IRQ_ENABLE_REG_WOL BIT(6)
+
+/* different pages for EXTD access for MXL86111 */
+/* SerDes/PHY Control Access Register - COM_EXT_SMI_SDS_PHY */
+#define MXL86111_EXT_SMI_SDS_PHY_REG 0xA000
+#define MXL86111_EXT_SMI_SDS_PHYSPACE_MASK BIT(1)
+#define MXL86111_EXT_SMI_SDS_PHYFIBER_SPACE (0x1 << 1)
+#define MXL86111_EXT_SMI_SDS_PHYUTP_SPACE (0x0 << 1)
+#define MXL86111_EXT_SMI_SDS_PHY_AUTO 0xff
+
+/* SyncE Configuration Register - COM_EXT_SYNCE_CFG */
+#define MXL86110_EXT_SYNCE_CFG_REG 0xA012
+#define MXL86110_EXT_SYNCE_CFG_CLK_FRE_SEL BIT(4)
+#define MXL86110_EXT_SYNCE_CFG_EN_SYNC_E_DURING_LNKDN BIT(5)
+#define MXL86110_EXT_SYNCE_CFG_EN_SYNC_E BIT(6)
+#define MXL86110_EXT_SYNCE_CFG_CLK_SRC_SEL_MASK GENMASK(3, 1)
+#define MXL86110_EXT_SYNCE_CFG_CLK_SRC_SEL_125M_PLL 0
+#define MXL86110_EXT_SYNCE_CFG_CLK_SRC_SEL_25M 4
+
+/* MAC Address registers */
+#define MXL86110_EXT_MAC_ADDR_CFG1 0xA007
+#define MXL86110_EXT_MAC_ADDR_CFG2 0xA008
+#define MXL86110_EXT_MAC_ADDR_CFG3 0xA009
+
+#define MXL86110_EXT_WOL_CFG_REG 0xA00A
+#define MXL86110_WOL_CFG_WOL_MASK BIT(3)
+
+/* RGMII register */
+#define MXL86110_EXT_RGMII_CFG1_REG 0xA003
+/* delay can be adjusted in steps of about 150ps */
+#define MXL86110_EXT_RGMII_CFG1_RX_NO_DELAY (0x0 << 10)
+/* Closest value to 2000 ps */
+#define MXL86110_EXT_RGMII_CFG1_RX_DELAY_1950PS (0xD << 10)
+#define MXL86110_EXT_RGMII_CFG1_RX_DELAY_MASK GENMASK(13, 10)
+
+#define MXL86110_EXT_RGMII_CFG1_TX_1G_DELAY_1950PS (0xD << 0)
+#define MXL86110_EXT_RGMII_CFG1_TX_1G_DELAY_MASK GENMASK(3, 0)
+
+#define MXL86110_EXT_RGMII_CFG1_TX_10MB_100MB_DELAY_1950PS (0xD << 4)
+#define MXL86110_EXT_RGMII_CFG1_TX_10MB_100MB_DELAY_MASK GENMASK(7, 4)
+
+#define MXL86110_EXT_RGMII_CFG1_FULL_MASK \
+ ((MXL86110_EXT_RGMII_CFG1_RX_DELAY_MASK) | \
+ (MXL86110_EXT_RGMII_CFG1_TX_1G_DELAY_MASK) | \
+ (MXL86110_EXT_RGMII_CFG1_TX_10MB_100MB_DELAY_MASK))
+
+/* EXT Sleep Control register */
+#define MXL86110_UTP_EXT_SLEEP_CTRL_REG 0x27
+#define MXL86110_UTP_EXT_SLEEP_CTRL_EN_SLEEP_SW_OFF 0
+#define MXL86110_UTP_EXT_SLEEP_CTRL_EN_SLEEP_SW_MASK BIT(15)
+
+/* RGMII In-Band Status and MDIO Configuration Register */
+#define MXL86110_EXT_RGMII_MDIO_CFG 0xA005
+#define MXL86110_RGMII_MDIO_CFG_EPA0_MASK GENMASK(6, 6)
+#define MXL86110_EXT_RGMII_MDIO_CFG_EBA_MASK GENMASK(5, 5)
+#define MXL86110_EXT_RGMII_MDIO_CFG_BA_MASK GENMASK(4, 0)
+
+#define MXL86110_MAX_LEDS 3
+/* LED registers and defines */
+#define MXL86110_COM_EXT_LED_GEN_CFG 0xA00B
+# define MXL86110_COM_EXT_LED_GEN_CFG_LFM(x) ((BIT(0) | BIT(1)) << (3 * (x)))
+# define MXL86110_COM_EXT_LED_GEN_CFG_LFME(x) (BIT(0) << (3 * (x)))
+# define MXL86110_COM_EXT_LED_GEN_CFG_LFE(x) (BIT(2) << (3 * (x)))
+
+#define MXL86110_LED0_CFG_REG 0xA00C
+#define MXL86110_LED1_CFG_REG 0xA00D
+#define MXL86110_LED2_CFG_REG 0xA00E
+
+#define MXL86110_LEDX_CFG_BLINK BIT(13)
+#define MXL86110_LEDX_CFG_LINK_UP_FULL_DUPLEX_ON BIT(12)
+#define MXL86110_LEDX_CFG_LINK_UP_HALF_DUPLEX_ON BIT(11)
+#define MXL86110_LEDX_CFG_LINK_UP_TX_ACT_ON BIT(10)
+#define MXL86110_LEDX_CFG_LINK_UP_RX_ACT_ON BIT(9)
+#define MXL86110_LEDX_CFG_LINK_UP_TX_ON BIT(8)
+#define MXL86110_LEDX_CFG_LINK_UP_RX_ON BIT(7)
+#define MXL86110_LEDX_CFG_LINK_UP_1GB_ON BIT(6)
+#define MXL86110_LEDX_CFG_LINK_UP_100MB_ON BIT(5)
+#define MXL86110_LEDX_CFG_LINK_UP_10MB_ON BIT(4)
+#define MXL86110_LEDX_CFG_LINK_UP_COLLISION BIT(3)
+#define MXL86110_LEDX_CFG_LINK_UP_1GB_BLINK BIT(2)
+#define MXL86110_LEDX_CFG_LINK_UP_100MB_BLINK BIT(1)
+#define MXL86110_LEDX_CFG_LINK_UP_10MB_BLINK BIT(0)
+
+#define MXL86110_LED_BLINK_CFG_REG 0xA00F
+#define MXL86110_LED_BLINK_CFG_FREQ_MODE1_2HZ 0
+#define MXL86110_LED_BLINK_CFG_FREQ_MODE1_4HZ BIT(0)
+#define MXL86110_LED_BLINK_CFG_FREQ_MODE1_8HZ BIT(1)
+#define MXL86110_LED_BLINK_CFG_FREQ_MODE1_16HZ (BIT(1) | BIT(0))
+#define MXL86110_LED_BLINK_CFG_FREQ_MODE2_2HZ 0
+#define MXL86110_LED_BLINK_CFG_FREQ_MODE2_4HZ BIT(2)
+#define MXL86110_LED_BLINK_CFG_FREQ_MODE2_8HZ BIT(3)
+#define MXL86110_LED_BLINK_CFG_FREQ_MODE2_16HZ (BIT(3) | BIT(2))
+#define MXL86110_LED_BLINK_CFG_DUTY_CYCLE_50_ON 0
+#define MXL86110_LED_BLINK_CFG_DUTY_CYCLE_67_ON (BIT(4))
+#define MXL86110_LED_BLINK_CFG_DUTY_CYCLE_75_ON (BIT(5))
+#define MXL86110_LED_BLINK_CFG_DUTY_CYCLE_83_ON (BIT(5) | BIT(4))
+#define MXL86110_LED_BLINK_CFG_DUTY_CYCLE_50_OFF (BIT(6))
+#define MXL86110_LED_BLINK_CFG_DUTY_CYCLE_33_ON (BIT(6) | BIT(4))
+#define MXL86110_LED_BLINK_CFG_DUTY_CYCLE_25_ON (BIT(6) | BIT(5))
+#define MXL86110_LED_BLINK_CFG_DUTY_CYCLE_17_ON (BIT(6) | BIT(5) | BIT(4))
+
+/* Chip Configuration Register - COM_EXT_CHIP_CFG */
+#define MXL86110_EXT_CHIP_CFG_REG 0xA001
+#define MXL86111_EXT_CHIP_CFG_MODE_SEL_MASK GENMASK(2, 0)
+#define MXL86111_EXT_CHIP_CFG_MODE_UTP_TO_RGMII 0
+#define MXL86111_EXT_CHIP_CFG_MODE_FIBER_TO_RGMII 1
+#define MXL86111_EXT_CHIP_CFG_MODE_UTP_FIBER_TO_RGMII 2
+#define MXL86111_EXT_CHIP_CFG_MODE_UTP_TO_SGMII 3
+#define MXL86111_EXT_CHIP_CFG_MODE_SGPHY_TO_RGMAC 4
+#define MXL86111_EXT_CHIP_CFG_MODE_SGMAC_TO_RGPHY 5
+#define MXL86111_EXT_CHIP_CFG_MODE_UTP_TO_FIBER_AUTO 6
+#define MXL86111_EXT_CHIP_CFG_MODE_UTP_TO_FIBER_FORCE 7
+
+#define MXL86111_EXT_CHIP_CFG_CLDO_MASK GENMASK(5, 4)
+#define MXL86111_EXT_CHIP_CFG_CLDO_3V3 0
+#define MXL86111_EXT_CHIP_CFG_CLDO_2V5 1
+#define MXL86111_EXT_CHIP_CFG_CLDO_1V8_2 2
+#define MXL86111_EXT_CHIP_CFG_CLDO_1V8_3 3
+#define MXL86111_EXT_CHIP_CFG_CLDO_SHIFT 4
+#define MXL86111_EXT_CHIP_CFG_ELDO BIT(6)
+#define MXL86110_EXT_CHIP_CFG_RXDLY_ENABLE BIT(8)
+#define MXL86110_EXT_CHIP_CFG_SW_RST_N_MODE BIT(15)
+
+/* Specific Status Register - PHY_STAT */
+#define MXL86111_PHY_STAT_REG 0x11
+#define MXL86111_PHY_STAT_SPEED_MASK GENMASK(15, 14)
+#define MXL86111_PHY_STAT_SPEED_OFFSET 14
+#define MXL86111_PHY_STAT_SPEED_10M 0x0
+#define MXL86111_PHY_STAT_SPEED_100M 0x1
+#define MXL86111_PHY_STAT_SPEED_1000M 0x2
+#define MXL86111_PHY_STAT_DPX_OFFSET 13
+#define MXL86111_PHY_STAT_DPX BIT(13)
+#define MXL86111_PHY_STAT_LSRT BIT(10)
+
+/* 3 phy reg page modes,auto mode combines utp and fiber mode*/
+#define MXL86111_MODE_FIBER 0x1
+#define MXL86111_MODE_UTP 0x2
+#define MXL86111_MODE_AUTO 0x3
+
+/* FIBER Auto-Negotiation link partner ability - SDS_AN_LPA */
+#define MXL86111_SDS_AN_LPA_PAUSE (0x3 << 7)
+#define MXL86111_SDS_AN_LPA_ASYM_PAUSE (0x2 << 7)
+
+/* Miscellaneous Control Register - COM_EXT _MISC_CFG */
+#define MXL86111_EXT_MISC_CONFIG_REG 0xa006
+#define MXL86111_EXT_MISC_CONFIG_FIB_SPEED_SEL BIT(0)
+#define MXL86111_EXT_MISC_CONFIG_FIB_SPEED_SEL_1000BX (0x1 << 0)
+#define MXL86111_EXT_MISC_CONFIG_FIB_SPEED_SEL_100BX (0x0 << 0)
+
+/* Phy fiber Link timer cfg2 Register - EXT_SDS_LINK_TIMER_CFG2 */
+#define MXL86111_EXT_SDS_LINK_TIMER_CFG2_REG 0xA5
+#define MXL86111_EXT_SDS_LINK_TIMER_CFG2_EN_AUTOSEN BIT(15)
+
+/* default values of PHY register, required for Dual Media mode */
+#define MII_BMSR_DEFAULT_VAL 0x7949
+#define MII_ESTATUS_DEFAULT_VAL 0x2000
+
+/* Timeout in ms for PHY SW reset check in STD_CTRL/SDS_CTRL */
+#define BMCR_RESET_TIMEOUT 500
+
+/* PL P1 requires optimized RGMII timing for 1.8V RGMII voltage
+ */
+#define MXL86111_PL_P1 0x500
+
+/**
+ * __mxl86110_write_extended_reg() - write to a PHY's extended register
+ * @phydev: pointer to the PHY device structure
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * Unlocked version of mxl86110_write_extended_reg
+ *
+ * Note: This function assumes the caller already holds the MDIO bus lock
+ * or otherwise has exclusive access to the PHY.
+ *
+ * Return: 0 or negative error code
+ */
+static int __mxl86110_write_extended_reg(struct phy_device *phydev,
+ u16 regnum, u16 val)
+{
+ int ret;
+
+ ret = __phy_write(phydev, MXL86110_EXTD_REG_ADDR_OFFSET, regnum);
+ if (ret < 0)
+ return ret;
+
+ return __phy_write(phydev, MXL86110_EXTD_REG_ADDR_DATA, val);
+}
+
+/**
+ * __mxl86110_read_extended_reg - Read a PHY's extended register
+ * @phydev: pointer to the PHY device structure
+ * @regnum: extended register number to read (address written to reg 30)
+ *
+ * Unlocked version of mxl86110_read_extended_reg
+ *
+ * Reads the content of a PHY extended register using the MaxLinear
+ * 2-step access mechanism: write the register address to reg 30 (0x1E),
+ * then read the value from reg 31 (0x1F).
+ *
+ * Note: This function assumes the caller already holds the MDIO bus lock
+ * or otherwise has exclusive access to the PHY.
+ *
+ * Return: 16-bit register value on success, or negative errno code on failure.
+ */
+static int __mxl86110_read_extended_reg(struct phy_device *phydev, u16 regnum)
+{
+ int ret;
+
+ ret = __phy_write(phydev, MXL86110_EXTD_REG_ADDR_OFFSET, regnum);
+ if (ret < 0)
+ return ret;
+ return __phy_read(phydev, MXL86110_EXTD_REG_ADDR_DATA);
+}
+
+/**
+ * __mxl86110_modify_extended_reg() - modify bits of a PHY's extended register
+ * @phydev: pointer to the PHY device structure
+ * @regnum: register number to write
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * Note: register value = (old register value & ~mask) | set.
+ * This function assumes the caller already holds the MDIO bus lock
+ * or otherwise has exclusive access to the PHY.
+ *
+ * Return: 0 or negative error code
+ */
+static int __mxl86110_modify_extended_reg(struct phy_device *phydev,
+ u16 regnum, u16 mask, u16 set)
+{
+ int ret;
+
+ ret = __phy_write(phydev, MXL86110_EXTD_REG_ADDR_OFFSET, regnum);
+ if (ret < 0)
+ return ret;
+
+ return __phy_modify(phydev, MXL86110_EXTD_REG_ADDR_DATA, mask, set);
+}
+
+/**
+ * mxl86110_write_extended_reg() - Write to a PHY's extended register
+ * @phydev: pointer to the PHY device structure
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * This function writes to an extended register of the PHY using the
+ * MaxLinear two-step access method (reg 0x1E/0x1F). It handles acquiring
+ * and releasing the MDIO bus lock internally.
+ *
+ * Return: 0 or negative error code
+ */
+static int mxl86110_write_extended_reg(struct phy_device *phydev,
+ u16 regnum, u16 val)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ ret = __mxl86110_write_extended_reg(phydev, regnum, val);
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+/**
+ * mxl86110_read_extended_reg() - Read a PHY's extended register
+ * @phydev: pointer to the PHY device structure
+ * @regnum: extended register number to read
+ *
+ * This function reads from an extended register of the PHY using the
+ * MaxLinear two-step access method (reg 0x1E/0x1F). It handles acquiring
+ * and releasing the MDIO bus lock internally.
+ *
+ * Return: 16-bit register value on success, or negative errno code on failure
+ */
+static int mxl86110_read_extended_reg(struct phy_device *phydev, u16 regnum)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ ret = __mxl86110_read_extended_reg(phydev, regnum);
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+/**
+ * mxl86110_modify_extended_reg() - modify bits of a PHY's extended register
+ * @phydev: pointer to the PHY device structure
+ * @regnum: register number to write
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * Note: register value = (old register value & ~mask) | set.
+ *
+ * Return: 0 or negative error code
+ */
+static int mxl86110_modify_extended_reg(struct phy_device *phydev,
+ u16 regnum, u16 mask, u16 set)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ ret = __mxl86110_modify_extended_reg(phydev, regnum, mask, set);
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+/**
+ * mxl86110_get_wol() - report if wake-on-lan is enabled
+ * @phydev: pointer to the phy_device
+ * @wol: a pointer to a &struct ethtool_wolinfo
+ */
+static void mxl86110_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int val;
+
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = 0;
+ val = mxl86110_read_extended_reg(phydev, MXL86110_EXT_WOL_CFG_REG);
+ if (val >= 0 && (val & MXL86110_WOL_CFG_WOL_MASK))
+ wol->wolopts |= WAKE_MAGIC;
+}
+
+/**
+ * mxl86110_set_wol() - enable/disable wake-on-lan
+ * @phydev: pointer to the phy_device
+ * @wol: a pointer to a &struct ethtool_wolinfo
+ *
+ * Configures the WOL Magic Packet MAC
+ *
+ * Return: 0 or negative errno code
+ */
+static int mxl86110_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ struct net_device *netdev;
+ const unsigned char *mac;
+ int ret = 0;
+
+ phy_lock_mdio_bus(phydev);
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ netdev = phydev->attached_dev;
+ if (!netdev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* Configure the MAC address of the WOL magic packet */
+ mac = netdev->dev_addr;
+ ret = __mxl86110_write_extended_reg(phydev,
+ MXL86110_EXT_MAC_ADDR_CFG1,
+ ((mac[0] << 8) | mac[1]));
+ if (ret < 0)
+ goto out;
+
+ ret = __mxl86110_write_extended_reg(phydev,
+ MXL86110_EXT_MAC_ADDR_CFG2,
+ ((mac[2] << 8) | mac[3]));
+ if (ret < 0)
+ goto out;
+
+ ret = __mxl86110_write_extended_reg(phydev,
+ MXL86110_EXT_MAC_ADDR_CFG3,
+ ((mac[4] << 8) | mac[5]));
+ if (ret < 0)
+ goto out;
+
+ ret = __mxl86110_modify_extended_reg(phydev,
+ MXL86110_EXT_WOL_CFG_REG,
+ MXL86110_WOL_CFG_WOL_MASK,
+ MXL86110_WOL_CFG_WOL_MASK);
+ if (ret < 0)
+ goto out;
+
+ /* Enables Wake-on-LAN interrupt in the PHY. */
+ ret = __phy_modify(phydev, PHY_IRQ_ENABLE_REG, 0,
+ PHY_IRQ_ENABLE_REG_WOL);
+ if (ret < 0)
+ goto out;
+
+ phydev_dbg(phydev,
+ "%s, MAC Addr: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ __func__,
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+ } else {
+ ret = __mxl86110_modify_extended_reg(phydev,
+ MXL86110_EXT_WOL_CFG_REG,
+ MXL86110_WOL_CFG_WOL_MASK,
+ 0);
+ if (ret < 0)
+ goto out;
+
+ /* Disables Wake-on-LAN interrupt in the PHY. */
+ ret = __phy_modify(phydev, PHY_IRQ_ENABLE_REG,
+ PHY_IRQ_ENABLE_REG_WOL, 0);
+ }
+
+out:
+ phy_unlock_mdio_bus(phydev);
+ return ret;
+}
+
+static const unsigned long supported_trgs = (BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_HALF_DUPLEX) |
+ BIT(TRIGGER_NETDEV_FULL_DUPLEX) |
+ BIT(TRIGGER_NETDEV_TX) |
+ BIT(TRIGGER_NETDEV_RX));
+
+static int mxl86110_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ if (index >= MXL86110_MAX_LEDS)
+ return -EINVAL;
+
+ /* All combinations of the supported triggers are allowed */
+ if (rules & ~supported_trgs)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int mxl86110_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ int val;
+
+ if (index >= MXL86110_MAX_LEDS)
+ return -EINVAL;
+
+ val = mxl86110_read_extended_reg(phydev,
+ MXL86110_LED0_CFG_REG + index);
+ if (val < 0)
+ return val;
+
+ if (val & MXL86110_LEDX_CFG_LINK_UP_TX_ACT_ON)
+ *rules |= BIT(TRIGGER_NETDEV_TX);
+
+ if (val & MXL86110_LEDX_CFG_LINK_UP_RX_ACT_ON)
+ *rules |= BIT(TRIGGER_NETDEV_RX);
+
+ if (val & MXL86110_LEDX_CFG_LINK_UP_HALF_DUPLEX_ON)
+ *rules |= BIT(TRIGGER_NETDEV_HALF_DUPLEX);
+
+ if (val & MXL86110_LEDX_CFG_LINK_UP_FULL_DUPLEX_ON)
+ *rules |= BIT(TRIGGER_NETDEV_FULL_DUPLEX);
+
+ if (val & MXL86110_LEDX_CFG_LINK_UP_10MB_ON)
+ *rules |= BIT(TRIGGER_NETDEV_LINK_10);
+
+ if (val & MXL86110_LEDX_CFG_LINK_UP_100MB_ON)
+ *rules |= BIT(TRIGGER_NETDEV_LINK_100);
+
+ if (val & MXL86110_LEDX_CFG_LINK_UP_1GB_ON)
+ *rules |= BIT(TRIGGER_NETDEV_LINK_1000);
+
+ return 0;
+}
+
+static int mxl86110_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 val = 0;
+ int ret;
+
+ if (index >= MXL86110_MAX_LEDS)
+ return -EINVAL;
+
+ if (rules & BIT(TRIGGER_NETDEV_LINK_10))
+ val |= MXL86110_LEDX_CFG_LINK_UP_10MB_ON;
+
+ if (rules & BIT(TRIGGER_NETDEV_LINK_100))
+ val |= MXL86110_LEDX_CFG_LINK_UP_100MB_ON;
+
+ if (rules & BIT(TRIGGER_NETDEV_LINK_1000))
+ val |= MXL86110_LEDX_CFG_LINK_UP_1GB_ON;
+
+ if (rules & BIT(TRIGGER_NETDEV_TX))
+ val |= MXL86110_LEDX_CFG_LINK_UP_TX_ACT_ON;
+
+ if (rules & BIT(TRIGGER_NETDEV_RX))
+ val |= MXL86110_LEDX_CFG_LINK_UP_RX_ACT_ON;
+
+ if (rules & BIT(TRIGGER_NETDEV_HALF_DUPLEX))
+ val |= MXL86110_LEDX_CFG_LINK_UP_HALF_DUPLEX_ON;
+
+ if (rules & BIT(TRIGGER_NETDEV_FULL_DUPLEX))
+ val |= MXL86110_LEDX_CFG_LINK_UP_FULL_DUPLEX_ON;
+
+ if (rules & BIT(TRIGGER_NETDEV_TX) ||
+ rules & BIT(TRIGGER_NETDEV_RX))
+ val |= MXL86110_LEDX_CFG_BLINK;
+
+ ret = mxl86110_write_extended_reg(phydev,
+ MXL86110_LED0_CFG_REG + index, val);
+ if (ret)
+ return ret;
+
+ /* clear manual control bit */
+ ret = mxl86110_modify_extended_reg(phydev,
+ MXL86110_COM_EXT_LED_GEN_CFG,
+ MXL86110_COM_EXT_LED_GEN_CFG_LFE(index),
+ 0);
+
+ return ret;
+}
+
+static int mxl86110_led_brightness_set(struct phy_device *phydev,
+ u8 index, enum led_brightness value)
+{
+ u16 mask, set;
+ int ret;
+
+ if (index >= MXL86110_MAX_LEDS)
+ return -EINVAL;
+
+ /* force manual control */
+ set = MXL86110_COM_EXT_LED_GEN_CFG_LFE(index);
+ /* clear previous force mode */
+ mask = MXL86110_COM_EXT_LED_GEN_CFG_LFM(index);
+
+ /* force LED to be permanently on */
+ if (value != LED_OFF)
+ set |= MXL86110_COM_EXT_LED_GEN_CFG_LFME(index);
+
+ ret = mxl86110_modify_extended_reg(phydev,
+ MXL86110_COM_EXT_LED_GEN_CFG,
+ mask, set);
+
+ return ret;
+}
+
+/**
+ * mxl86110_synce_clk_cfg() - applies syncE/clk output configuration
+ * @phydev: pointer to the phy_device
+ *
+ * Note: This function assumes the caller already holds the MDIO bus lock
+ * or otherwise has exclusive access to the PHY.
+ *
+ * Return: 0 or negative errno code
+ */
+static int mxl86110_synce_clk_cfg(struct phy_device *phydev)
+{
+ u16 mask = 0, val = 0;
+
+ /*
+ * Configures the clock output to its default
+ * setting as per the datasheet.
+ * This results in a 25MHz clock output being selected in the
+ * COM_EXT_SYNCE_CFG register for SyncE configuration.
+ */
+ val = MXL86110_EXT_SYNCE_CFG_EN_SYNC_E |
+ FIELD_PREP(MXL86110_EXT_SYNCE_CFG_CLK_SRC_SEL_MASK,
+ MXL86110_EXT_SYNCE_CFG_CLK_SRC_SEL_25M);
+ mask = MXL86110_EXT_SYNCE_CFG_EN_SYNC_E |
+ MXL86110_EXT_SYNCE_CFG_CLK_SRC_SEL_MASK |
+ MXL86110_EXT_SYNCE_CFG_CLK_FRE_SEL;
+
+ /* Write clock output configuration */
+ return __mxl86110_modify_extended_reg(phydev,
+ MXL86110_EXT_SYNCE_CFG_REG,
+ mask, val);
+}
+
+/**
+ * mxl86110_broadcast_cfg - Configure MDIO broadcast setting for PHY
+ * @phydev: Pointer to the PHY device structure
+ *
+ * This function configures the MDIO broadcast behavior of the MxL86110 PHY.
+ * Currently, broadcast mode is explicitly disabled by clearing the EPA0 bit
+ * in the RGMII_MDIO_CFG extended register.
+ *
+ * Note: This function assumes the caller already holds the MDIO bus lock
+ * or otherwise has exclusive access to the PHY.
+ *
+ * Return: 0 on success or a negative errno code on failure.
+ */
+static int mxl86110_broadcast_cfg(struct phy_device *phydev)
+{
+ return __mxl86110_modify_extended_reg(phydev,
+ MXL86110_EXT_RGMII_MDIO_CFG,
+ MXL86110_RGMII_MDIO_CFG_EPA0_MASK,
+ 0);
+}
+
+/**
+ * mxl86110_enable_led_activity_blink - Enable LEDs activity blink on PHY
+ * @phydev: Pointer to the PHY device structure
+ *
+ * Configure all PHY LEDs to blink on traffic activity regardless of whether
+ * they are ON or OFF. This behavior allows each LED to serve as a pure activity
+ * indicator, independently of its use as a link status indicator.
+ *
+ * By default, each LED blinks only when it is also in the ON state.
+ * This function modifies the appropriate registers (LABx fields)
+ * to enable blinking even when the LEDs are OFF, to allow the LED to be used
+ * as a traffic indicator without requiring it to also serve
+ * as a link status LED.
+ *
+ * Note: Any further LED customization can be performed via the
+ * /sys/class/leds interface; the functions led_hw_is_supported,
+ * led_hw_control_get, and led_hw_control_set are used
+ * to support this mechanism.
+ *
+ * This function assumes the caller already holds the MDIO bus lock
+ * or otherwise has exclusive access to the PHY.
+ *
+ * Return: 0 on success or a negative errno code on failure.
+ */
+static int mxl86110_enable_led_activity_blink(struct phy_device *phydev)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < MXL86110_MAX_LEDS; i++) {
+ ret = __mxl86110_modify_extended_reg(phydev,
+ MXL86110_LED0_CFG_REG + i,
+ 0,
+ MXL86110_LEDX_CFG_BLINK);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * mxl86110_config_rgmii_delay() - configure RGMII delays
+ * @phydev: pointer to the phy_device
+ *
+ * Return: 0 or negative errno code
+ */
+static int mxl86110_config_rgmii_delay(struct phy_device *phydev)
+{
+ int ret;
+ u16 val;
+
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val = 0;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ val = MXL86110_EXT_RGMII_CFG1_RX_DELAY_1950PS;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ val = MXL86110_EXT_RGMII_CFG1_TX_1G_DELAY_1950PS |
+ MXL86110_EXT_RGMII_CFG1_TX_10MB_100MB_DELAY_1950PS;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ val = MXL86110_EXT_RGMII_CFG1_TX_1G_DELAY_1950PS |
+ MXL86110_EXT_RGMII_CFG1_TX_10MB_100MB_DELAY_1950PS |
+ MXL86110_EXT_RGMII_CFG1_RX_DELAY_1950PS;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = __mxl86110_modify_extended_reg(phydev,
+ MXL86110_EXT_RGMII_CFG1_REG,
+ MXL86110_EXT_RGMII_CFG1_FULL_MASK,
+ val);
+ if (ret < 0)
+ goto out;
+
+ /* Configure RXDLY (RGMII Rx Clock Delay) to disable
+ * the default additional delay value on RX_CLK
+ * (2 ns for 125 MHz, 8 ns for 25 MHz/2.5 MHz)
+ * and use just the digital one selected before
+ */
+ ret = __mxl86110_modify_extended_reg(phydev,
+ MXL86110_EXT_CHIP_CFG_REG,
+ MXL86110_EXT_CHIP_CFG_RXDLY_ENABLE,
+ 0);
+ if (ret < 0)
+ goto out;
+
+out:
+ return ret;
+}
+
+/**
+ * mxl86110_config_init() - initialize the MXL86110 PHY
+ * @phydev: pointer to the phy_device
+ *
+ * Return: 0 or negative errno code
+ */
+static int mxl86110_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+
+ /* configure syncE / clk output */
+ ret = mxl86110_synce_clk_cfg(phydev);
+ if (ret < 0)
+ goto out;
+
+ ret = mxl86110_config_rgmii_delay(phydev);
+ if (ret < 0)
+ goto out;
+
+ ret = mxl86110_enable_led_activity_blink(phydev);
+ if (ret < 0)
+ goto out;
+
+ ret = mxl86110_broadcast_cfg(phydev);
+
+out:
+ phy_unlock_mdio_bus(phydev);
+ return ret;
+}
+
+/**
+ * mxl86111_probe() - validate bootstrap chip config and set UTP page
+ * @phydev: pointer to the phy_device
+ *
+ * Return: 0 or negative errno code
+ */
+static int mxl86111_probe(struct phy_device *phydev)
+{
+ int chip_config;
+ u16 reg_page;
+ int ret;
+
+ chip_config = mxl86110_read_extended_reg(phydev, MXL86110_EXT_CHIP_CFG_REG);
+ if (chip_config < 0)
+ return chip_config;
+
+ switch (chip_config & MXL86111_EXT_CHIP_CFG_MODE_SEL_MASK) {
+ case MXL86111_EXT_CHIP_CFG_MODE_UTP_TO_SGMII:
+ case MXL86111_EXT_CHIP_CFG_MODE_UTP_TO_RGMII:
+ phydev->port = PORT_TP;
+ reg_page = MXL86111_EXT_SMI_SDS_PHYUTP_SPACE;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = mxl86110_write_extended_reg(phydev,
+ MXL86111_EXT_SMI_SDS_PHY_REG,
+ reg_page);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * mxl86111_config_init() - initialize the MXL86111 PHY
+ * @phydev: pointer to the phy_device
+ *
+ * Return: 0 or negative errno code
+ */
+static int mxl86111_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+
+ /* configure syncE / clk output */
+ ret = mxl86110_synce_clk_cfg(phydev);
+ if (ret < 0)
+ goto out;
+
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_100BASEX:
+ ret = __mxl86110_modify_extended_reg(phydev,
+ MXL86111_EXT_MISC_CONFIG_REG,
+ MXL86111_EXT_MISC_CONFIG_FIB_SPEED_SEL,
+ MXL86111_EXT_MISC_CONFIG_FIB_SPEED_SEL_100BX);
+ if (ret < 0)
+ goto out;
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ ret = __mxl86110_modify_extended_reg(phydev,
+ MXL86111_EXT_MISC_CONFIG_REG,
+ MXL86111_EXT_MISC_CONFIG_FIB_SPEED_SEL,
+ MXL86111_EXT_MISC_CONFIG_FIB_SPEED_SEL_1000BX);
+ if (ret < 0)
+ goto out;
+ break;
+ default:
+ /* RGMII modes */
+ ret = mxl86110_config_rgmii_delay(phydev);
+ if (ret < 0)
+ goto out;
+ ret = __mxl86110_modify_extended_reg(phydev, MXL86110_EXT_RGMII_CFG1_REG,
+ MXL86110_EXT_RGMII_CFG1_FULL_MASK, ret);
+
+ /* PL P1 requires optimized RGMII timing for 1.8V RGMII voltage
+ */
+ ret = __mxl86110_read_extended_reg(phydev, 0xf);
+ if (ret < 0)
+ goto out;
+
+ if (ret == MXL86111_PL_P1) {
+ ret = __mxl86110_read_extended_reg(phydev, MXL86110_EXT_CHIP_CFG_REG);
+ if (ret < 0)
+ goto out;
+
+ /* check if LDO is in 1.8V mode */
+ switch (FIELD_GET(MXL86111_EXT_CHIP_CFG_CLDO_MASK, ret)) {
+ case MXL86111_EXT_CHIP_CFG_CLDO_1V8_3:
+ case MXL86111_EXT_CHIP_CFG_CLDO_1V8_2:
+ ret = __mxl86110_write_extended_reg(phydev, 0xa010, 0xabff);
+ if (ret < 0)
+ goto out;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ }
+
+ ret = mxl86110_enable_led_activity_blink(phydev);
+ if (ret < 0)
+ goto out;
+
+ ret = mxl86110_broadcast_cfg(phydev);
+out:
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+/**
+ * mxl86111_read_page() - read reg page
+ * @phydev: pointer to the phy_device
+ *
+ * Return: current reg space of mxl86111 or negative errno code
+ */
+static int mxl86111_read_page(struct phy_device *phydev)
+{
+ int page;
+
+ page = __mxl86110_read_extended_reg(phydev, MXL86111_EXT_SMI_SDS_PHY_REG);
+ if (page < 0)
+ return page;
+
+ return page & MXL86111_EXT_SMI_SDS_PHYSPACE_MASK;
+};
+
+/**
+ * mxl86111_write_page() - Set reg page
+ * @phydev: pointer to the phy_device
+ * @page: The reg page to set
+ *
+ * Return: 0 or negative errno code
+ */
+static int mxl86111_write_page(struct phy_device *phydev, int page)
+{
+ return __mxl86110_modify_extended_reg(phydev, MXL86111_EXT_SMI_SDS_PHY_REG,
+ MXL86111_EXT_SMI_SDS_PHYSPACE_MASK, page);
+};
+
+static int mxl86111_config_inband(struct phy_device *phydev, unsigned int modes)
+{
+ int ret;
+
+ ret = phy_modify_paged(phydev, MXL86111_EXT_SMI_SDS_PHYFIBER_SPACE,
+ MII_BMCR, BMCR_ANENABLE,
+ (modes == LINK_INBAND_DISABLE) ? 0 : BMCR_ANENABLE);
+ if (ret < 0)
+ goto out;
+
+ phy_lock_mdio_bus(phydev);
+
+ ret = __mxl86110_modify_extended_reg(phydev, MXL86111_EXT_SDS_LINK_TIMER_CFG2_REG,
+ MXL86111_EXT_SDS_LINK_TIMER_CFG2_EN_AUTOSEN,
+ (modes == LINK_INBAND_DISABLE) ? 0 :
+ MXL86111_EXT_SDS_LINK_TIMER_CFG2_EN_AUTOSEN);
+ if (ret < 0)
+ goto out;
+
+ ret = __mxl86110_modify_extended_reg(phydev, MXL86110_EXT_CHIP_CFG_REG,
+ MXL86110_EXT_CHIP_CFG_SW_RST_N_MODE, 0);
+ if (ret < 0)
+ goto out;
+
+ /* For fiber forced mode, power down/up to re-aneg */
+ if (modes != LINK_INBAND_DISABLE) {
+ __phy_modify(phydev, MII_BMCR, 0, BMCR_PDOWN);
+ usleep_range(1000, 1050);
+ __phy_modify(phydev, MII_BMCR, BMCR_PDOWN, 0);
+ }
+
+out:
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+static unsigned int mxl86111_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_100BASEX:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+ default:
+ return 0;
+ }
+}
+
+static struct phy_driver mxl_phy_drvs[] = {
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_MXL86110),
+ .name = "MXL86110 Gigabit Ethernet",
+ .config_init = mxl86110_config_init,
+ .get_wol = mxl86110_get_wol,
+ .set_wol = mxl86110_set_wol,
+ .led_brightness_set = mxl86110_led_brightness_set,
+ .led_hw_is_supported = mxl86110_led_hw_is_supported,
+ .led_hw_control_get = mxl86110_led_hw_control_get,
+ .led_hw_control_set = mxl86110_led_hw_control_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_MXL86111),
+ .name = "MXL86111 Gigabit Ethernet",
+ .probe = mxl86111_probe,
+ .config_init = mxl86111_config_init,
+ .get_wol = mxl86110_get_wol,
+ .set_wol = mxl86110_set_wol,
+ .inband_caps = mxl86111_inband_caps,
+ .config_inband = mxl86111_config_inband,
+ .read_page = mxl86111_read_page,
+ .write_page = mxl86111_write_page,
+ .led_brightness_set = mxl86110_led_brightness_set,
+ .led_hw_is_supported = mxl86110_led_hw_is_supported,
+ .led_hw_control_get = mxl86110_led_hw_control_get,
+ .led_hw_control_set = mxl86110_led_hw_control_set,
+ },
+};
+
+module_phy_driver(mxl_phy_drvs);
+
+static const struct mdio_device_id __maybe_unused mxl_tbl[] = {
+ { PHY_ID_MATCH_EXACT(PHY_ID_MXL86110) },
+ { PHY_ID_MATCH_EXACT(PHY_ID_MXL86111) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, mxl_tbl);
+
+MODULE_DESCRIPTION("MaxLinear MXL86110/MXL86111 PHY driver");
+MODULE_AUTHOR("Stefano Radaelli");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
index db3c1f72b407..8e2fd6b942b6 100644
--- a/drivers/net/phy/mxl-gpy.c
+++ b/drivers/net/phy/mxl-gpy.c
@@ -30,6 +30,9 @@
#define PHY_ID_GPY241B 0x67C9DE40
#define PHY_ID_GPY241BM 0x67C9DE80
#define PHY_ID_GPY245B 0x67C9DEC0
+#define PHY_ID_MXL86211C 0xC1335400
+#define PHY_ID_MXL86252 0xC1335520
+#define PHY_ID_MXL86282 0xC1335500
#define PHY_CTL1 0x13
#define PHY_CTL1_MDICD BIT(3)
@@ -199,6 +202,29 @@ static int gpy_hwmon_read(struct device *dev,
return 0;
}
+static int mxl862x2_hwmon_read(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long *value)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ long tmp;
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_TEMP_STA);
+ if (ret < 0)
+ return ret;
+ if (!ret)
+ return -ENODATA;
+
+ tmp = (s16)ret;
+ tmp *= 78125;
+ tmp /= 10000;
+
+ *value = tmp;
+
+ return 0;
+}
+
static umode_t gpy_hwmon_is_visible(const void *data,
enum hwmon_sensor_types type,
u32 attr, int channel)
@@ -216,25 +242,35 @@ static const struct hwmon_ops gpy_hwmon_hwmon_ops = {
.read = gpy_hwmon_read,
};
+static const struct hwmon_ops mxl862x2_hwmon_hwmon_ops = {
+ .is_visible = gpy_hwmon_is_visible,
+ .read = mxl862x2_hwmon_read,
+};
+
static const struct hwmon_chip_info gpy_hwmon_chip_info = {
.ops = &gpy_hwmon_hwmon_ops,
.info = gpy_hwmon_info,
};
+static const struct hwmon_chip_info mxl862x2_hwmon_chip_info = {
+ .ops = &mxl862x2_hwmon_hwmon_ops,
+ .info = gpy_hwmon_info,
+};
+
static int gpy_hwmon_register(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
+ const struct hwmon_chip_info *info;
struct device *hwmon_dev;
- char *hwmon_name;
- hwmon_name = devm_hwmon_sanitize_name(dev, dev_name(dev));
- if (IS_ERR(hwmon_name))
- return PTR_ERR(hwmon_name);
+ if (phy_id_compare_model(phydev->phy_id, PHY_ID_MXL86252) ||
+ phy_id_compare_model(phydev->phy_id, PHY_ID_MXL86282))
+ info = &mxl862x2_hwmon_chip_info;
+ else
+ info = &gpy_hwmon_chip_info;
- hwmon_dev = devm_hwmon_device_register_with_info(dev, hwmon_name,
- phydev,
- &gpy_hwmon_chip_info,
- NULL);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, NULL, phydev,
+ info, NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
@@ -546,7 +582,7 @@ static int gpy_update_interface(struct phy_device *phydev)
/* Interface mode is fixed for USXGMII and integrated PHY */
if (phydev->interface == PHY_INTERFACE_MODE_USXGMII ||
phydev->interface == PHY_INTERFACE_MODE_INTERNAL)
- return -EINVAL;
+ return 0;
/* Automatically switch SERDES interface between SGMII and 2500-BaseX
* according to speed. Disable ANEG in 2500-BaseX mode.
@@ -584,13 +620,7 @@ static int gpy_update_interface(struct phy_device *phydev)
break;
}
- if (phydev->speed == SPEED_2500 || phydev->speed == SPEED_1000) {
- ret = genphy_read_master_slave(phydev);
- if (ret < 0)
- return ret;
- }
-
- return gpy_update_mdix(phydev);
+ return 0;
}
static int gpy_read_status(struct phy_device *phydev)
@@ -645,6 +675,16 @@ static int gpy_read_status(struct phy_device *phydev)
ret = gpy_update_interface(phydev);
if (ret < 0)
return ret;
+
+ if (phydev->speed == SPEED_2500 || phydev->speed == SPEED_1000) {
+ ret = genphy_read_master_slave(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = gpy_update_mdix(phydev);
+ if (ret < 0)
+ return ret;
}
return 0;
@@ -813,7 +853,7 @@ static void gpy_get_wol(struct phy_device *phydev,
wol->wolopts = priv->wolopts;
}
-static int gpy_loopback(struct phy_device *phydev, bool enable)
+static int gpy_loopback(struct phy_device *phydev, bool enable, int speed)
{
struct gpy_priv *priv = phydev->priv;
u16 set = 0;
@@ -822,6 +862,9 @@ static int gpy_loopback(struct phy_device *phydev, bool enable)
if (enable) {
u64 now = get_jiffies_64();
+ if (speed)
+ return -EOPNOTSUPP;
+
/* wait until 3 seconds from last disable */
if (time_before64(now, priv->lb_dis_to))
msleep(jiffies64_to_msecs(priv->lb_dis_to - now));
@@ -845,15 +888,15 @@ static int gpy_loopback(struct phy_device *phydev, bool enable)
return 0;
}
-static int gpy115_loopback(struct phy_device *phydev, bool enable)
+static int gpy115_loopback(struct phy_device *phydev, bool enable, int speed)
{
struct gpy_priv *priv = phydev->priv;
if (enable)
- return gpy_loopback(phydev, enable);
+ return gpy_loopback(phydev, enable, speed);
if (priv->fw_minor > 0x76)
- return gpy_loopback(phydev, 0);
+ return gpy_loopback(phydev, 0, 0);
return genphy_soft_reset(phydev);
}
@@ -1014,7 +1057,7 @@ static int gpy_led_polarity_set(struct phy_device *phydev, int index,
if (force_active_high)
return phy_clear_bits(phydev, PHY_LED, PHY_LED_POLARITY(index));
- unreachable();
+ return -EINVAL;
}
static struct phy_driver gpy_drivers[] = {
@@ -1271,10 +1314,76 @@ static struct phy_driver gpy_drivers[] = {
.get_wol = gpy_get_wol,
.set_loopback = gpy_loopback,
},
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_MXL86211C),
+ .name = "Maxlinear Ethernet MxL86211C",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ .led_brightness_set = gpy_led_brightness_set,
+ .led_hw_is_supported = gpy_led_hw_is_supported,
+ .led_hw_control_get = gpy_led_hw_control_get,
+ .led_hw_control_set = gpy_led_hw_control_set,
+ .led_polarity_set = gpy_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_MXL86252),
+ .name = "MaxLinear Ethernet MxL86252",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ .led_brightness_set = gpy_led_brightness_set,
+ .led_hw_is_supported = gpy_led_hw_is_supported,
+ .led_hw_control_get = gpy_led_hw_control_get,
+ .led_hw_control_set = gpy_led_hw_control_set,
+ .led_polarity_set = gpy_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_MXL86282),
+ .name = "MaxLinear Ethernet MxL86282",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ .led_brightness_set = gpy_led_brightness_set,
+ .led_hw_is_supported = gpy_led_hw_is_supported,
+ .led_hw_control_get = gpy_led_hw_control_get,
+ .led_hw_control_set = gpy_led_hw_control_set,
+ .led_polarity_set = gpy_led_polarity_set,
+ },
};
module_phy_driver(gpy_drivers);
-static struct mdio_device_id __maybe_unused gpy_tbl[] = {
+static const struct mdio_device_id __maybe_unused gpy_tbl[] = {
{PHY_ID_MATCH_MODEL(PHY_ID_GPY2xx)},
{PHY_ID_GPY115B, PHY_ID_GPYx15B_MASK},
{PHY_ID_MATCH_MODEL(PHY_ID_GPY115C)},
@@ -1287,6 +1396,9 @@ static struct mdio_device_id __maybe_unused gpy_tbl[] = {
{PHY_ID_MATCH_MODEL(PHY_ID_GPY241B)},
{PHY_ID_MATCH_MODEL(PHY_ID_GPY241BM)},
{PHY_ID_MATCH_MODEL(PHY_ID_GPY245B)},
+ {PHY_ID_MATCH_MODEL(PHY_ID_MXL86211C)},
+ {PHY_ID_MATCH_MODEL(PHY_ID_MXL86252)},
+ {PHY_ID_MATCH_MODEL(PHY_ID_MXL86282)},
{ }
};
MODULE_DEVICE_TABLE(mdio, gpy_tbl);
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 9ae9cc6b23c2..7f3ff322892e 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -173,7 +173,7 @@ MODULE_DESCRIPTION("NatSemi PHY driver");
MODULE_AUTHOR("Stuart Menefy");
MODULE_LICENSE("GPL");
-static struct mdio_device_id __maybe_unused ns_tbl[] = {
+static const struct mdio_device_id __maybe_unused ns_tbl[] = {
{ DP83865_PHY_ID, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/ncn26000.c b/drivers/net/phy/ncn26000.c
index 5680584f659e..cabdd83c614f 100644
--- a/drivers/net/phy/ncn26000.c
+++ b/drivers/net/phy/ncn26000.c
@@ -159,7 +159,7 @@ static struct phy_driver ncn26000_driver[] = {
module_phy_driver(ncn26000_driver);
-static struct mdio_device_id __maybe_unused ncn26000_tbl[] = {
+static const struct mdio_device_id __maybe_unused ncn26000_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_NCN26000) },
{ }
};
diff --git a/drivers/net/phy/nxp-c45-tja11xx-macsec.c b/drivers/net/phy/nxp-c45-tja11xx-macsec.c
index 550ef08970f4..fc897ba79b03 100644
--- a/drivers/net/phy/nxp-c45-tja11xx-macsec.c
+++ b/drivers/net/phy/nxp-c45-tja11xx-macsec.c
@@ -926,7 +926,6 @@ static int nxp_c45_mdo_dev_open(struct macsec_context *ctx)
struct phy_device *phydev = ctx->phydev;
struct nxp_c45_phy *priv = phydev->priv;
struct nxp_c45_secy *phy_secy;
- int any_bit_set;
phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
if (IS_ERR(phy_secy))
@@ -939,8 +938,7 @@ static int nxp_c45_mdo_dev_open(struct macsec_context *ctx)
if (phy_secy->rx_sc)
nxp_c45_rx_sc_en(phydev, phy_secy->rx_sc, true);
- any_bit_set = find_first_bit(priv->macsec->secy_bitmap, TX_SC_MAX);
- if (any_bit_set == TX_SC_MAX)
+ if (bitmap_empty(priv->macsec->secy_bitmap, TX_SC_MAX))
nxp_c45_macsec_en(phydev, true);
set_bit(phy_secy->secy_id, priv->macsec->secy_bitmap);
@@ -953,7 +951,6 @@ static int nxp_c45_mdo_dev_stop(struct macsec_context *ctx)
struct phy_device *phydev = ctx->phydev;
struct nxp_c45_phy *priv = phydev->priv;
struct nxp_c45_secy *phy_secy;
- int any_bit_set;
phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
if (IS_ERR(phy_secy))
@@ -967,8 +964,7 @@ static int nxp_c45_mdo_dev_stop(struct macsec_context *ctx)
nxp_c45_set_rx_sc0_impl(phydev, false);
clear_bit(phy_secy->secy_id, priv->macsec->secy_bitmap);
- any_bit_set = find_first_bit(priv->macsec->secy_bitmap, TX_SC_MAX);
- if (any_bit_set == TX_SC_MAX)
+ if (bitmap_empty(priv->macsec->secy_bitmap, TX_SC_MAX))
nxp_c45_macsec_en(phydev, false);
return 0;
diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
index ade544bc007d..f526528d2e32 100644
--- a/drivers/net/phy/nxp-c45-tja11xx.c
+++ b/drivers/net/phy/nxp-c45-tja11xx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* NXP C45 PHY driver
- * Copyright 2021-2023 NXP
+ * Copyright 2021-2025 NXP
* Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
*/
@@ -19,9 +19,16 @@
#include "nxp-c45-tja11xx.h"
+/* Same id: TJA1103, TJA1104 */
#define PHY_ID_TJA_1103 0x001BB010
+/* Same id: TJA1120, TJA1121 */
#define PHY_ID_TJA_1120 0x001BB031
+#define VEND1_DEVICE_ID3 0x0004
+#define TJA1120_DEV_ID3_SILICON_VERSION GENMASK(15, 12)
+#define TJA1120_DEV_ID3_SAMPLE_TYPE GENMASK(11, 8)
+#define DEVICE_ID3_SAMPLE_TYPE_R 0x9
+
#define VEND1_DEVICE_CONTROL 0x0040
#define DEVICE_CONTROL_RESET BIT(15)
#define DEVICE_CONTROL_CONFIG_GLOBAL_EN BIT(14)
@@ -109,6 +116,9 @@
#define MII_BASIC_CONFIG_RMII 0x5
#define MII_BASIC_CONFIG_MII 0x4
+#define VEND1_SGMII_BASIC_CONTROL 0xB000
+#define SGMII_LPM BIT(11)
+
#define VEND1_SYMBOL_ERROR_CNT_XTD 0x8351
#define EXTENDED_CNT_EN BIT(15)
#define VEND1_MONITOR_STATUS 0xAC80
@@ -752,9 +762,6 @@ static int nxp_c45_perout_enable(struct nxp_c45_phy *priv,
struct phy_device *phydev = priv->phydev;
int pin;
- if (perout->flags & ~PTP_PEROUT_PHASE)
- return -EOPNOTSUPP;
-
pin = ptp_find_pin(priv->ptp_clock, PTP_PF_PEROUT, perout->index);
if (pin < 0)
return pin;
@@ -850,12 +857,6 @@ static int nxp_c45_extts_enable(struct nxp_c45_phy *priv,
const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
int pin;
- if (extts->flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* Sampling on both edges is not supported */
if ((extts->flags & PTP_RISING_EDGE) &&
(extts->flags & PTP_FALLING_EDGE) &&
@@ -951,6 +952,10 @@ static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv)
.n_pins = ARRAY_SIZE(nxp_c45_ptp_pins),
.n_ext_ts = 1,
.n_per_out = 1,
+ .supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS,
+ .supported_perout_flags = PTP_PEROUT_PHASE,
};
priv->ptp_clock = ptp_clock_register(&priv->caps,
@@ -1007,9 +1012,22 @@ static bool nxp_c45_rxtstamp(struct mii_timestamper *mii_ts,
return true;
}
-static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts,
- struct kernel_hwtstamp_config *cfg,
- struct netlink_ext_ack *extack)
+static int nxp_c45_hwtstamp_get(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
+ mii_ts);
+
+ cfg->tx_type = priv->hwts_tx;
+ cfg->rx_filter = priv->hwts_rx ? HWTSTAMP_FILTER_PTP_V2_L2_EVENT
+ : HWTSTAMP_FILTER_NONE;
+
+ return 0;
+}
+
+static int nxp_c45_hwtstamp_set(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
mii_ts);
@@ -1297,6 +1315,8 @@ static int nxp_c45_soft_reset(struct phy_device *phydev)
if (ret)
return ret;
+ usleep_range(2000, 2050);
+
return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
VEND1_DEVICE_CONTROL, ret,
!(ret & DEVICE_CONTROL_RESET), 20000,
@@ -1591,6 +1611,63 @@ static int nxp_c45_set_phy_mode(struct phy_device *phydev)
return 0;
}
+/* Errata: ES_TJA1120 and ES_TJA1121 Rev. 1.0 — 28 November 2024 Section 3.1 & 3.2 */
+static void nxp_c45_tja1120_errata(struct phy_device *phydev)
+{
+ bool macsec_ability, sgmii_ability;
+ int silicon_version, sample_type;
+ int phy_abilities;
+ int ret = 0;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_ID3);
+ if (ret < 0)
+ return;
+
+ sample_type = FIELD_GET(TJA1120_DEV_ID3_SAMPLE_TYPE, ret);
+ if (sample_type != DEVICE_ID3_SAMPLE_TYPE_R)
+ return;
+
+ silicon_version = FIELD_GET(TJA1120_DEV_ID3_SILICON_VERSION, ret);
+
+ phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_PORT_ABILITIES);
+ macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
+ sgmii_ability = !!(phy_abilities & SGMII_ABILITY);
+ if ((!macsec_ability && silicon_version == 2) ||
+ (macsec_ability && silicon_version == 1)) {
+ /* TJA1120/TJA1121 PHY configuration errata workaround.
+ * Apply PHY writes sequence before link up.
+ */
+ if (!macsec_ability) {
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x4b95);
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0xf3cd);
+ } else {
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x89c7);
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0x0893);
+ }
+
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x0476, 0x58a0);
+
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x8921, 0xa3a);
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x89F1, 0x16c1);
+
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x0);
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0x0);
+
+ if (sgmii_ability) {
+ /* TJA1120B/TJA1121B SGMII PCS restart errata workaround.
+ * Put SGMII PCS into power down mode and back up.
+ */
+ phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_SGMII_BASIC_CONTROL,
+ SGMII_LPM);
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_SGMII_BASIC_CONTROL,
+ SGMII_LPM);
+ }
+ }
+}
+
static int nxp_c45_config_init(struct phy_device *phydev)
{
int ret;
@@ -1607,6 +1684,9 @@ static int nxp_c45_config_init(struct phy_device *phydev)
phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
+ if (phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, GENMASK(31, 4)))
+ nxp_c45_tja1120_errata(phydev);
+
phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
PHY_CONFIG_AUTO);
@@ -1682,7 +1762,8 @@ static int nxp_c45_probe(struct phy_device *phydev)
IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) {
priv->mii_ts.rxtstamp = nxp_c45_rxtstamp;
priv->mii_ts.txtstamp = nxp_c45_txtstamp;
- priv->mii_ts.hwtstamp = nxp_c45_hwtstamp;
+ priv->mii_ts.hwtstamp_set = nxp_c45_hwtstamp_set;
+ priv->mii_ts.hwtstamp_get = nxp_c45_hwtstamp_get;
priv->mii_ts.ts_info = nxp_c45_ts_info;
phydev->mii_ts = &priv->mii_ts;
ret = nxp_c45_init_ptp_clock(priv);
@@ -1886,6 +1967,41 @@ static void tja1120_nmi_handler(struct phy_device *phydev,
}
}
+static int nxp_c45_macsec_ability(struct phy_device *phydev)
+{
+ bool macsec_ability;
+ int phy_abilities;
+
+ phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_PORT_ABILITIES);
+ macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
+
+ return macsec_ability;
+}
+
+static bool tja11xx_phy_id_compare(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
+{
+ u32 id = phydev->is_c45 ? phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] :
+ phydev->phy_id;
+
+ return phy_id_compare(id, phydrv->phy_id, phydrv->phy_id_mask);
+}
+
+static int tja11xx_no_macsec_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
+{
+ return tja11xx_phy_id_compare(phydev, phydrv) &&
+ !nxp_c45_macsec_ability(phydev);
+}
+
+static int tja11xx_macsec_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
+{
+ return tja11xx_phy_id_compare(phydev, phydrv) &&
+ nxp_c45_macsec_ability(phydev);
+}
+
static const struct nxp_c45_regmap tja1120_regmap = {
.vend1_ptp_clk_period = 0x1020,
.vend1_event_msg_filt = 0x9010,
@@ -1978,6 +2094,32 @@ static struct phy_driver nxp_c45_driver[] = {
.get_sqi = nxp_c45_get_sqi,
.get_sqi_max = nxp_c45_get_sqi_max,
.remove = nxp_c45_remove,
+ .match_phy_device = tja11xx_no_macsec_match_phy_device,
+ },
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
+ .name = "NXP C45 TJA1104",
+ .get_features = nxp_c45_get_features,
+ .driver_data = &tja1103_phy_data,
+ .probe = nxp_c45_probe,
+ .soft_reset = nxp_c45_soft_reset,
+ .config_aneg = genphy_c45_config_aneg,
+ .config_init = nxp_c45_config_init,
+ .config_intr = tja1103_config_intr,
+ .handle_interrupt = nxp_c45_handle_interrupt,
+ .read_status = genphy_c45_read_status,
+ .suspend = genphy_c45_pma_suspend,
+ .resume = genphy_c45_pma_resume,
+ .get_sset_count = nxp_c45_get_sset_count,
+ .get_strings = nxp_c45_get_strings,
+ .get_stats = nxp_c45_get_stats,
+ .cable_test_start = nxp_c45_cable_test_start,
+ .cable_test_get_status = nxp_c45_cable_test_get_status,
+ .set_loopback = genphy_c45_loopback,
+ .get_sqi = nxp_c45_get_sqi,
+ .get_sqi_max = nxp_c45_get_sqi_max,
+ .remove = nxp_c45_remove,
+ .match_phy_device = tja11xx_macsec_match_phy_device,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120),
@@ -2003,12 +2145,39 @@ static struct phy_driver nxp_c45_driver[] = {
.get_sqi = nxp_c45_get_sqi,
.get_sqi_max = nxp_c45_get_sqi_max,
.remove = nxp_c45_remove,
+ .match_phy_device = tja11xx_no_macsec_match_phy_device,
+ },
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120),
+ .name = "NXP C45 TJA1121",
+ .get_features = nxp_c45_get_features,
+ .driver_data = &tja1120_phy_data,
+ .probe = nxp_c45_probe,
+ .soft_reset = nxp_c45_soft_reset,
+ .config_aneg = genphy_c45_config_aneg,
+ .config_init = nxp_c45_config_init,
+ .config_intr = tja1120_config_intr,
+ .handle_interrupt = nxp_c45_handle_interrupt,
+ .read_status = genphy_c45_read_status,
+ .link_change_notify = tja1120_link_change_notify,
+ .suspend = genphy_c45_pma_suspend,
+ .resume = genphy_c45_pma_resume,
+ .get_sset_count = nxp_c45_get_sset_count,
+ .get_strings = nxp_c45_get_strings,
+ .get_stats = nxp_c45_get_stats,
+ .cable_test_start = nxp_c45_cable_test_start,
+ .cable_test_get_status = nxp_c45_cable_test_get_status,
+ .set_loopback = genphy_c45_loopback,
+ .get_sqi = nxp_c45_get_sqi,
+ .get_sqi_max = nxp_c45_get_sqi_max,
+ .remove = nxp_c45_remove,
+ .match_phy_device = tja11xx_macsec_match_phy_device,
},
};
module_phy_driver(nxp_c45_driver);
-static struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
+static const struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) },
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120) },
{ /*sentinel*/ },
diff --git a/drivers/net/phy/nxp-cbtx.c b/drivers/net/phy/nxp-cbtx.c
index 3d25491043a3..3286fcb4f47e 100644
--- a/drivers/net/phy/nxp-cbtx.c
+++ b/drivers/net/phy/nxp-cbtx.c
@@ -215,7 +215,7 @@ static struct phy_driver cbtx_driver[] = {
module_phy_driver(cbtx_driver);
-static struct mdio_device_id __maybe_unused cbtx_tbl[] = {
+static const struct mdio_device_id __maybe_unused cbtx_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_CBTX_SJA1110) },
{ },
};
diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c
index 2c263ae44b4f..3c38a8ddae2f 100644
--- a/drivers/net/phy/nxp-tja11xx.c
+++ b/drivers/net/phy/nxp-tja11xx.c
@@ -21,12 +21,14 @@
#define PHY_ID_TJA1100 0x0180dc40
#define PHY_ID_TJA1101 0x0180dd00
#define PHY_ID_TJA1102 0x0180dc80
+#define PHY_ID_TJA1102S 0x0180dc90
#define MII_ECTRL 17
#define MII_ECTRL_LINK_CONTROL BIT(15)
#define MII_ECTRL_POWER_MODE_MASK GENMASK(14, 11)
#define MII_ECTRL_POWER_MODE_NO_CHANGE (0x0 << 11)
#define MII_ECTRL_POWER_MODE_NORMAL (0x3 << 11)
+#define MII_ECTRL_POWER_MODE_SLEEP (0xa << 11)
#define MII_ECTRL_POWER_MODE_STANDBY (0xc << 11)
#define MII_ECTRL_CABLE_TEST BIT(5)
#define MII_ECTRL_CONFIG_EN BIT(2)
@@ -78,12 +80,13 @@
#define MII_COMMCFG 27
#define MII_COMMCFG_AUTO_OP BIT(15)
+#define MII_CFG3 28
+#define MII_CFG3_PHY_EN BIT(0)
+
/* Configure REF_CLK as input in RMII mode */
#define TJA110X_RMII_MODE_REFCLK_IN BIT(0)
struct tja11xx_priv {
- char *hwmon_name;
- struct device *hwmon_dev;
struct phy_device *phydev;
struct work_struct phy_register_work;
u32 flags;
@@ -179,6 +182,14 @@ static int tja11xx_wakeup(struct phy_device *phydev)
return ret;
return tja11xx_enable_link_control(phydev);
+ case MII_ECTRL_POWER_MODE_SLEEP:
+ switch (phydev->phy_id & PHY_ID_MASK) {
+ case PHY_ID_TJA1102S:
+ /* Enable PHY, maybe it is disabled due to pin strapping */
+ return phy_set_bits(phydev, MII_CFG3, MII_CFG3_PHY_EN);
+ default:
+ return 0;
+ }
default:
break;
}
@@ -316,6 +327,7 @@ static int tja11xx_config_init(struct phy_device *phydev)
if (ret)
return ret;
break;
+ case PHY_ID_TJA1102S:
case PHY_ID_TJA1101:
reg_mask = MII_CFG1_INTERFACE_MODE_MASK;
ret = tja11xx_get_interface_mode(phydev);
@@ -494,19 +506,12 @@ static const struct hwmon_chip_info tja11xx_hwmon_chip_info = {
static int tja11xx_hwmon_register(struct phy_device *phydev,
struct tja11xx_priv *priv)
{
- struct device *dev = &phydev->mdio.dev;
-
- priv->hwmon_name = devm_hwmon_sanitize_name(dev, dev_name(dev));
- if (IS_ERR(priv->hwmon_name))
- return PTR_ERR(priv->hwmon_name);
+ struct device *hdev, *dev = &phydev->mdio.dev;
- priv->hwmon_dev =
- devm_hwmon_device_register_with_info(dev, priv->hwmon_name,
- phydev,
- &tja11xx_hwmon_chip_info,
- NULL);
-
- return PTR_ERR_OR_ZERO(priv->hwmon_dev);
+ hdev = devm_hwmon_device_register_with_info(dev, NULL, phydev,
+ &tja11xx_hwmon_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hdev);
}
static int tja11xx_parse_dt(struct phy_device *phydev)
@@ -646,12 +651,14 @@ static int tja1102_match_phy_device(struct phy_device *phydev, bool port0)
return !ret;
}
-static int tja1102_p0_match_phy_device(struct phy_device *phydev)
+static int tja1102_p0_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return tja1102_match_phy_device(phydev, true);
}
-static int tja1102_p1_match_phy_device(struct phy_device *phydev)
+static int tja1102_p1_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return tja1102_match_phy_device(phydev, false);
}
@@ -883,15 +890,39 @@ static struct phy_driver tja11xx_driver[] = {
.handle_interrupt = tja11xx_handle_interrupt,
.cable_test_start = tja11xx_cable_test_start,
.cable_test_get_status = tja11xx_cable_test_get_status,
+ }, {
+ PHY_ID_MATCH_MODEL(PHY_ID_TJA1102S),
+ .name = "NXP TJA1102S",
+ .features = PHY_BASIC_T1_FEATURES,
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = tja11xx_probe,
+ .soft_reset = tja11xx_soft_reset,
+ .config_aneg = tja11xx_config_aneg,
+ .config_init = tja11xx_config_init,
+ .read_status = tja11xx_read_status,
+ .get_sqi = tja11xx_get_sqi,
+ .get_sqi_max = tja11xx_get_sqi_max,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .set_loopback = genphy_loopback,
+ /* Statistics */
+ .get_sset_count = tja11xx_get_sset_count,
+ .get_strings = tja11xx_get_strings,
+ .get_stats = tja11xx_get_stats,
+ .config_intr = tja11xx_config_intr,
+ .handle_interrupt = tja11xx_handle_interrupt,
+ .cable_test_start = tja11xx_cable_test_start,
+ .cable_test_get_status = tja11xx_cable_test_get_status,
}
};
module_phy_driver(tja11xx_driver);
-static struct mdio_device_id __maybe_unused tja11xx_tbl[] = {
+static const struct mdio_device_id __maybe_unused tja11xx_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA1100) },
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA1101) },
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA1102) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_TJA1102S) },
{ }
};
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index 5695935fdce9..d48aa7231b37 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -7,8 +7,10 @@
#include <linux/mdio.h>
#include <linux/mii.h>
#include <linux/phy.h>
+#include <linux/ethtool_netlink.h>
#include "mdio-open-alliance.h"
+#include "phylib-internal.h"
/**
* genphy_c45_baset1_able - checks if the PMA has BASE-T1 extended abilities
@@ -146,12 +148,12 @@ int genphy_c45_pma_setup_forced(struct phy_device *phydev)
ctrl2 |= MDIO_PMA_CTRL2_1000BT;
break;
case SPEED_2500:
- ctrl1 |= MDIO_CTRL1_SPEED2_5G;
+ ctrl1 |= MDIO_PMA_CTRL1_SPEED2_5G;
/* Assume 2.5Gbase-T */
ctrl2 |= MDIO_PMA_CTRL2_2_5GBT;
break;
case SPEED_5000:
- ctrl1 |= MDIO_CTRL1_SPEED5G;
+ ctrl1 |= MDIO_PMA_CTRL1_SPEED5G;
/* Assume 5Gbase-T */
ctrl2 |= MDIO_PMA_CTRL2_5GBT;
break;
@@ -484,8 +486,8 @@ static int genphy_c45_baset1_read_lpa(struct phy_device *phydev)
mii_t1_adv_l_mod_linkmode_t(phydev->lp_advertising, 0);
mii_t1_adv_m_mod_linkmode_t(phydev->lp_advertising, 0);
- phydev->pause = 0;
- phydev->asym_pause = 0;
+ phydev->pause = false;
+ phydev->asym_pause = false;
return 0;
}
@@ -497,8 +499,8 @@ static int genphy_c45_baset1_read_lpa(struct phy_device *phydev)
return val;
mii_t1_adv_l_mod_linkmode_t(phydev->lp_advertising, val);
- phydev->pause = val & MDIO_AN_T1_ADV_L_PAUSE_CAP ? 1 : 0;
- phydev->asym_pause = val & MDIO_AN_T1_ADV_L_PAUSE_ASYM ? 1 : 0;
+ phydev->pause = val & MDIO_AN_T1_ADV_L_PAUSE_CAP;
+ phydev->asym_pause = val & MDIO_AN_T1_ADV_L_PAUSE_ASYM;
val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_LP_M);
if (val < 0)
@@ -535,8 +537,8 @@ int genphy_c45_read_lpa(struct phy_device *phydev)
phydev->lp_advertising);
mii_10gbt_stat_mod_linkmode_lpa_t(phydev->lp_advertising, 0);
mii_adv_mod_linkmode_adv_t(phydev->lp_advertising, 0);
- phydev->pause = 0;
- phydev->asym_pause = 0;
+ phydev->pause = false;
+ phydev->asym_pause = false;
return 0;
}
@@ -550,8 +552,8 @@ int genphy_c45_read_lpa(struct phy_device *phydev)
return val;
mii_adv_mod_linkmode_adv_t(phydev->lp_advertising, val);
- phydev->pause = val & LPA_PAUSE_CAP ? 1 : 0;
- phydev->asym_pause = val & LPA_PAUSE_ASYM ? 1 : 0;
+ phydev->pause = val & LPA_PAUSE_CAP;
+ phydev->asym_pause = val & LPA_PAUSE_ASYM;
/* Read the link partner's 10G advertisement */
val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
@@ -616,10 +618,10 @@ int genphy_c45_read_pma(struct phy_device *phydev)
case MDIO_PMA_CTRL1_SPEED1000:
phydev->speed = SPEED_1000;
break;
- case MDIO_CTRL1_SPEED2_5G:
+ case MDIO_PMA_CTRL1_SPEED2_5G:
phydev->speed = SPEED_2500;
break;
- case MDIO_CTRL1_SPEED5G:
+ case MDIO_PMA_CTRL1_SPEED5G:
phydev->speed = SPEED_5000;
break;
case MDIO_CTRL1_SPEED10G:
@@ -680,18 +682,14 @@ EXPORT_SYMBOL_GPL(genphy_c45_read_mdix);
* @phydev: target phy_device struct
* @adv: the linkmode advertisement settings
*/
-int genphy_c45_write_eee_adv(struct phy_device *phydev, unsigned long *adv)
+static int genphy_c45_write_eee_adv(struct phy_device *phydev,
+ unsigned long *adv)
{
int val, changed = 0;
if (linkmode_intersects(phydev->supported_eee, PHY_EEE_CAP1_FEATURES)) {
val = linkmode_to_mii_eee_cap1_t(adv);
- /* In eee_broken_modes are stored MDIO_AN_EEE_ADV specific raw
- * register values.
- */
- val &= ~phydev->eee_broken_modes;
-
/* IEEE 802.3-2018 45.2.7.13 EEE advertisement 1
* (Register 7.60)
*/
@@ -942,7 +940,7 @@ EXPORT_SYMBOL_GPL(genphy_c45_read_eee_abilities);
*/
int genphy_c45_an_config_eee_aneg(struct phy_device *phydev)
{
- if (!phydev->eee_enabled) {
+ if (!phydev->eee_cfg.eee_enabled) {
__ETHTOOL_DECLARE_LINK_MODE_MASK(adv) = {};
return genphy_c45_write_eee_adv(phydev, adv);
@@ -950,6 +948,7 @@ int genphy_c45_an_config_eee_aneg(struct phy_device *phydev)
return genphy_c45_write_eee_adv(phydev, phydev->advertising_eee);
}
+EXPORT_SYMBOL_GPL(genphy_c45_an_config_eee_aneg);
/**
* genphy_c45_pma_baset1_read_abilities - read supported baset1 link modes from PMA
@@ -1173,8 +1172,8 @@ int genphy_c45_read_status(struct phy_device *phydev)
phydev->speed = SPEED_UNKNOWN;
phydev->duplex = DUPLEX_UNKNOWN;
- phydev->pause = 0;
- phydev->asym_pause = 0;
+ phydev->pause = false;
+ phydev->asym_pause = false;
if (phydev->autoneg == AUTONEG_ENABLE) {
ret = genphy_c45_read_lpa(phydev);
@@ -1230,8 +1229,11 @@ int gen10g_config_aneg(struct phy_device *phydev)
}
EXPORT_SYMBOL_GPL(gen10g_config_aneg);
-int genphy_c45_loopback(struct phy_device *phydev, bool enable)
+int genphy_c45_loopback(struct phy_device *phydev, bool enable, int speed)
{
+ if (enable && speed)
+ return -EOPNOTSUPP;
+
return phy_modify_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1,
MDIO_PCS_CTRL1_LOOPBACK,
enable ? MDIO_PCS_CTRL1_LOOPBACK : 0);
@@ -1467,46 +1469,32 @@ EXPORT_SYMBOL_GPL(genphy_c45_plca_get_status);
/**
* genphy_c45_eee_is_active - get EEE status
* @phydev: target phy_device struct
- * @adv: variable to store advertised linkmodes
* @lp: variable to store LP advertised linkmodes
- * @is_enabled: variable to store EEE enabled/disabled configuration value
*
- * Description: this function will read local and link partner PHY
- * advertisements. Compare them return current EEE state.
+ * Description: this function will read link partner PHY advertisement
+ * and compare it to local advertisement to return current EEE state.
*/
-int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *adv,
- unsigned long *lp, bool *is_enabled)
+int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *lp)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp_adv) = {};
__ETHTOOL_DECLARE_LINK_MODE_MASK(tmp_lp) = {};
__ETHTOOL_DECLARE_LINK_MODE_MASK(common);
- bool eee_enabled, eee_active;
int ret;
- ret = genphy_c45_read_eee_adv(phydev, tmp_adv);
- if (ret)
- return ret;
+ if (!phydev->eee_cfg.eee_enabled)
+ return 0;
ret = genphy_c45_read_eee_lpa(phydev, tmp_lp);
if (ret)
return ret;
- eee_enabled = !linkmode_empty(tmp_adv);
- linkmode_and(common, tmp_adv, tmp_lp);
- if (eee_enabled && !linkmode_empty(common))
- eee_active = phy_check_valid(phydev->speed, phydev->duplex,
- common);
- else
- eee_active = false;
-
- if (adv)
- linkmode_copy(adv, tmp_adv);
if (lp)
linkmode_copy(lp, tmp_lp);
- if (is_enabled)
- *is_enabled = eee_enabled;
- return eee_active;
+ linkmode_and(common, phydev->advertising_eee, tmp_lp);
+ if (linkmode_empty(common))
+ return 0;
+
+ return phy_check_valid(phydev->speed, phydev->duplex, common);
}
EXPORT_SYMBOL(genphy_c45_eee_is_active);
@@ -1521,21 +1509,16 @@ EXPORT_SYMBOL(genphy_c45_eee_is_active);
int genphy_c45_ethtool_get_eee(struct phy_device *phydev,
struct ethtool_keee *data)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(adv) = {};
- __ETHTOOL_DECLARE_LINK_MODE_MASK(lp) = {};
- bool is_enabled;
int ret;
- ret = genphy_c45_eee_is_active(phydev, adv, lp, &is_enabled);
+ ret = genphy_c45_eee_is_active(phydev, data->lp_advertised);
if (ret < 0)
return ret;
- data->eee_enabled = is_enabled;
- data->eee_active = ret;
- linkmode_copy(data->supported, phydev->supported_eee);
- linkmode_copy(data->advertised, adv);
- linkmode_copy(data->lp_advertised, lp);
-
+ data->eee_active = phydev->eee_active;
+ linkmode_andnot(data->supported, phydev->supported_eee,
+ phydev->eee_disabled_modes);
+ linkmode_copy(data->advertised, phydev->advertising_eee);
return 0;
}
EXPORT_SYMBOL(genphy_c45_ethtool_get_eee);
@@ -1568,15 +1551,14 @@ int genphy_c45_ethtool_set_eee(struct phy_device *phydev,
phydev_warn(phydev, "At least some EEE link modes are not supported.\n");
return -EINVAL;
}
- } else {
- adv = phydev->supported_eee;
- }
- linkmode_copy(phydev->advertising_eee, adv);
+ linkmode_andnot(phydev->advertising_eee, adv,
+ phydev->eee_disabled_modes);
+ } else if (linkmode_empty(phydev->advertising_eee)) {
+ phy_advertise_eee_all(phydev);
+ }
}
- phydev->eee_enabled = data->eee_enabled;
-
ret = genphy_c45_an_config_eee_aneg(phydev);
if (ret > 0) {
ret = phy_restart_aneg(phydev);
@@ -1593,9 +1575,260 @@ int genphy_c45_ethtool_set_eee(struct phy_device *phydev,
}
EXPORT_SYMBOL(genphy_c45_ethtool_set_eee);
-struct phy_driver genphy_c45_driver = {
- .phy_id = 0xffffffff,
- .phy_id_mask = 0xffffffff,
- .name = "Generic Clause 45 PHY",
- .read_status = genphy_c45_read_status,
-};
+/**
+ * oatc14_cable_test_get_result_code - Convert hardware cable test status to
+ * ethtool result code.
+ * @status: The hardware-reported cable test status
+ *
+ * This helper function maps the OATC14 HDD cable test status to the
+ * corresponding ethtool cable test result code. It provides a translation
+ * between the device-specific status values and the standardized ethtool
+ * result codes.
+ *
+ * Return:
+ * * ETHTOOL_A_CABLE_RESULT_CODE_OK - Cable is OK
+ * * ETHTOOL_A_CABLE_RESULT_CODE_OPEN - Open circuit detected
+ * * ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT - Short circuit detected
+ * * ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC - Status not detectable or invalid
+ */
+static int oatc14_cable_test_get_result_code(enum oatc14_hdd_status status)
+{
+ switch (status) {
+ case OATC14_HDD_STATUS_CABLE_OK:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ case OATC14_HDD_STATUS_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case OATC14_HDD_STATUS_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ case OATC14_HDD_STATUS_NOT_DETECTABLE:
+ default:
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+
+/**
+ * genphy_c45_oatc14_cable_test_get_status - Get status of OATC14 10Base-T1S
+ * PHY cable test.
+ * @phydev: pointer to the PHY device structure
+ * @finished: pointer to a boolean set true if the test is complete
+ *
+ * Retrieves the current status of the OATC14 10Base-T1S PHY cable test.
+ * This function reads the OATC14 HDD register to determine whether the test
+ * results are valid and whether the test has finished.
+ *
+ * If the test is complete, the function reports the cable test result via
+ * the ethtool cable test interface using ethnl_cable_test_result(), and then
+ * clears the test control bit in the PHY register to reset the test state.
+ *
+ * Return: 0 on success, or a negative error code on failure (e.g. register
+ * read/write error).
+ */
+int genphy_c45_oatc14_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ int ret;
+ u8 sts;
+
+ *finished = false;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_HDD);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & OATC14_HDD_VALID))
+ return 0;
+
+ *finished = true;
+
+ sts = FIELD_GET(OATC14_HDD_SHORT_OPEN_STATUS, ret);
+
+ ret = ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ oatc14_cable_test_get_result_code(sts));
+ if (ret)
+ return ret;
+
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2,
+ MDIO_OATC14_HDD, OATC14_HDD_CONTROL);
+}
+EXPORT_SYMBOL(genphy_c45_oatc14_cable_test_get_status);
+
+/**
+ * genphy_c45_oatc14_cable_test_start - Start a cable test on an OATC14
+ * 10Base-T1S PHY.
+ * @phydev: Pointer to the PHY device structure
+ *
+ * This function initiates a cable diagnostic test on a Clause 45 OATC14
+ * 10Base-T1S capable PHY device. It first reads the PHY’s advanced diagnostic
+ * capability register to check if High Definition Diagnostics (HDD) mode is
+ * supported. If the PHY does not report HDD capability, cable testing is not
+ * supported and the function returns -EOPNOTSUPP.
+ *
+ * For PHYs that support HDD, the function sets the appropriate control bits in
+ * the OATC14_HDD register to enable and start the cable diagnostic test.
+ *
+ * Return:
+ * * 0 on success
+ * * -EOPNOTSUPP if the PHY does not support HDD capability
+ * * A negative error code on I/O or register access failures
+ */
+int genphy_c45_oatc14_cable_test_start(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_ADFCAP);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & OATC14_ADFCAP_HDD_CAPABILITY))
+ return -EOPNOTSUPP;
+
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_HDD,
+ OATC14_HDD_CONTROL);
+ if (ret)
+ return ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_HDD);
+ if (ret < 0)
+ return ret;
+
+ return phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_HDD,
+ OATC14_HDD_START_CONTROL);
+}
+EXPORT_SYMBOL(genphy_c45_oatc14_cable_test_start);
+
+/**
+ * oatc14_update_sqi_capability - Read and update OATC14 10Base-T1S PHY SQI/SQI+
+ * capability
+ * @phydev: Pointer to the PHY device structure
+ *
+ * This helper reads the OATC14 ADFCAP capability register to determine whether
+ * the PHY supports SQI or SQI+ reporting.
+ *
+ * SQI+ capability is detected first. The SQI+ field indicates the number of
+ * valid MSBs (3–8), corresponding to 8–256 SQI+ levels. When present, the
+ * function stores the number of SQI+ bits and computes the maximum SQI+ value
+ * as (2^bits - 1).
+ *
+ * If SQI+ is not supported, the function checks for basic SQI capability,
+ * which provides 0–7 SQI levels.
+ *
+ * On success, the capability information is stored in
+ * @phydev->oatc14_sqi_capability and marked as updated.
+ *
+ * Return:
+ * * 0 - capability successfully read and stored
+ * * -EOPNOTSUPP - SQI/SQI+ not supported by this PHY
+ * * Negative errno on read failure
+ */
+static int oatc14_update_sqi_capability(struct phy_device *phydev)
+{
+ u8 bits;
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_ADFCAP);
+ if (ret < 0)
+ return ret;
+
+ /* Check for SQI+ capability
+ * 0 - SQI+ is not supported
+ * (3-8) bits for (8-256) SQI+ levels supported
+ */
+ bits = FIELD_GET(OATC14_ADFCAP_SQIPLUS_CAPABILITY, ret);
+ if (bits) {
+ phydev->oatc14_sqi_capability.sqiplus_bits = bits;
+ /* Max sqi+ level supported: (2 ^ bits) - 1 */
+ phydev->oatc14_sqi_capability.sqi_max = BIT(bits) - 1;
+ goto update_done;
+ }
+
+ /* Check for SQI capability
+ * 0 - SQI is not supported
+ * 1 - SQI is supported (0-7 levels)
+ */
+ if (ret & OATC14_ADFCAP_SQI_CAPABILITY) {
+ phydev->oatc14_sqi_capability.sqi_max = OATC14_SQI_MAX_LEVEL;
+ goto update_done;
+ }
+
+ return -EOPNOTSUPP;
+
+update_done:
+ phydev->oatc14_sqi_capability.updated = true;
+ return 0;
+}
+
+/**
+ * genphy_c45_oatc14_get_sqi_max - Get maximum supported SQI or SQI+ level of
+ * OATC14 10Base-T1S PHY
+ * @phydev: pointer to the PHY device structure
+ *
+ * This function returns the maximum supported Signal Quality Indicator (SQI) or
+ * SQI+ level. The SQI capability is updated on first invocation if it has not
+ * already been updated.
+ *
+ * Return:
+ * * Maximum SQI/SQI+ level supported
+ * * Negative errno on capability read failure
+ */
+int genphy_c45_oatc14_get_sqi_max(struct phy_device *phydev)
+{
+ int ret;
+
+ if (!phydev->oatc14_sqi_capability.updated) {
+ ret = oatc14_update_sqi_capability(phydev);
+ if (ret)
+ return ret;
+ }
+
+ return phydev->oatc14_sqi_capability.sqi_max;
+}
+EXPORT_SYMBOL(genphy_c45_oatc14_get_sqi_max);
+
+/**
+ * genphy_c45_oatc14_get_sqi - Get Signal Quality Indicator (SQI) from an OATC14
+ * 10Base-T1S PHY
+ * @phydev: pointer to the PHY device structure
+ *
+ * This function reads the SQI+ or SQI value from an OATC14-compatible
+ * 10Base-T1S PHY. If SQI+ capability is supported, the function returns the
+ * extended SQI+ value; otherwise, it returns the basic SQI value. The SQI
+ * capability is updated on first invocation if it has not already been updated.
+ *
+ * Return:
+ * * SQI/SQI+ value on success
+ * * Negative errno on read failure
+ */
+int genphy_c45_oatc14_get_sqi(struct phy_device *phydev)
+{
+ u8 shift;
+ int ret;
+
+ if (!phydev->oatc14_sqi_capability.updated) {
+ ret = oatc14_update_sqi_capability(phydev);
+ if (ret)
+ return ret;
+ }
+
+ /* Calculate and return SQI+ value if supported */
+ if (phydev->oatc14_sqi_capability.sqiplus_bits) {
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ MDIO_OATC14_DCQ_SQIPLUS);
+ if (ret < 0)
+ return ret;
+
+ /* SQI+ uses N MSBs out of 8 bits, left-aligned with padding 1's
+ * Calculate the right-shift needed to isolate the N bits.
+ */
+ shift = 8 - phydev->oatc14_sqi_capability.sqiplus_bits;
+
+ return (ret & OATC14_DCQ_SQIPLUS_VALUE) >> shift;
+ }
+
+ /* Read and return SQI value if SQI+ capability is not supported */
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_DCQ_SQI);
+ if (ret < 0)
+ return ret;
+
+ return ret & OATC14_DCQ_SQI_VALUE;
+}
+EXPORT_SYMBOL(genphy_c45_oatc14_get_sqi);
diff --git a/drivers/net/phy/phy-caps.h b/drivers/net/phy/phy-caps.h
new file mode 100644
index 000000000000..4951a39f3828
--- /dev/null
+++ b/drivers/net/phy/phy-caps.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * link caps internal header, for link modes <-> capabilities <-> interfaces
+ * conversions.
+ */
+
+#ifndef __PHY_CAPS_H
+#define __PHY_CAPS_H
+
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+
+enum {
+ LINK_CAPA_10HD = 0,
+ LINK_CAPA_10FD,
+ LINK_CAPA_100HD,
+ LINK_CAPA_100FD,
+ LINK_CAPA_1000HD,
+ LINK_CAPA_1000FD,
+ LINK_CAPA_2500FD,
+ LINK_CAPA_5000FD,
+ LINK_CAPA_10000FD,
+ LINK_CAPA_20000FD,
+ LINK_CAPA_25000FD,
+ LINK_CAPA_40000FD,
+ LINK_CAPA_50000FD,
+ LINK_CAPA_56000FD,
+ LINK_CAPA_100000FD,
+ LINK_CAPA_200000FD,
+ LINK_CAPA_400000FD,
+ LINK_CAPA_800000FD,
+ LINK_CAPA_1600000FD,
+
+ __LINK_CAPA_MAX,
+};
+
+#define LINK_CAPA_ALL GENMASK((__LINK_CAPA_MAX - 1), 0)
+
+struct link_capabilities {
+ int speed;
+ unsigned int duplex;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(linkmodes);
+};
+
+int __init phy_caps_init(void);
+
+size_t phy_caps_speeds(unsigned int *speeds, size_t size,
+ unsigned long *linkmodes);
+void phy_caps_linkmode_max_speed(u32 max_speed, unsigned long *linkmodes);
+bool phy_caps_valid(int speed, int duplex, const unsigned long *linkmodes);
+void phy_caps_linkmodes(unsigned long caps, unsigned long *linkmodes);
+unsigned long phy_caps_from_interface(phy_interface_t interface);
+
+const struct link_capabilities *
+phy_caps_lookup_by_linkmode(const unsigned long *linkmodes);
+
+const struct link_capabilities *
+phy_caps_lookup_by_linkmode_rev(const unsigned long *linkmodes, bool fdx_only);
+
+const struct link_capabilities *
+phy_caps_lookup(int speed, unsigned int duplex, const unsigned long *supported,
+ bool exact);
+
+#endif /* __PHY_CAPS_H */
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 4e8db12d6092..277c034bc32f 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -6,6 +6,10 @@
#include <linux/phy.h>
#include <linux/of.h>
+#include "phylib.h"
+#include "phylib-internal.h"
+#include "phy-caps.h"
+
/**
* phy_speed_to_str - Return a string representing the PHY link speed
*
@@ -13,7 +17,7 @@
*/
const char *phy_speed_to_str(int speed)
{
- BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 103,
+ BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 125,
"Enum ethtool_link_mode_bit_indices and phylib are out of sync. "
"If a speed or mode has been added please update phy_speed_to_str "
"and the PHY settings array.\n");
@@ -51,6 +55,8 @@ const char *phy_speed_to_str(int speed)
return "400Gbps";
case SPEED_800000:
return "800Gbps";
+ case SPEED_1600000:
+ return "1600Gbps";
case SPEED_UNKNOWN:
return "Unknown";
default:
@@ -98,6 +104,49 @@ const char *phy_rate_matching_to_str(int rate_matching)
EXPORT_SYMBOL_GPL(phy_rate_matching_to_str);
/**
+ * phy_fix_phy_mode_for_mac_delays - Convenience function for fixing PHY
+ * mode based on whether mac adds internal delay
+ *
+ * @interface: The current interface mode of the port
+ * @mac_txid: True if the mac adds internal tx delay
+ * @mac_rxid: True if the mac adds internal rx delay
+ *
+ * Return: fixed PHY mode, or PHY_INTERFACE_MODE_NA if the interface can
+ * not apply the internal delay
+ */
+phy_interface_t phy_fix_phy_mode_for_mac_delays(phy_interface_t interface,
+ bool mac_txid, bool mac_rxid)
+{
+ if (!phy_interface_mode_is_rgmii(interface))
+ return interface;
+
+ if (mac_txid && mac_rxid) {
+ if (interface == PHY_INTERFACE_MODE_RGMII_ID)
+ return PHY_INTERFACE_MODE_RGMII;
+ return PHY_INTERFACE_MODE_NA;
+ }
+
+ if (mac_txid) {
+ if (interface == PHY_INTERFACE_MODE_RGMII_ID)
+ return PHY_INTERFACE_MODE_RGMII_RXID;
+ if (interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ return PHY_INTERFACE_MODE_RGMII;
+ return PHY_INTERFACE_MODE_NA;
+ }
+
+ if (mac_rxid) {
+ if (interface == PHY_INTERFACE_MODE_RGMII_ID)
+ return PHY_INTERFACE_MODE_RGMII_TXID;
+ if (interface == PHY_INTERFACE_MODE_RGMII_RXID)
+ return PHY_INTERFACE_MODE_RGMII;
+ return PHY_INTERFACE_MODE_NA;
+ }
+
+ return interface;
+}
+EXPORT_SYMBOL_GPL(phy_fix_phy_mode_for_mac_delays);
+
+/**
* phy_interface_num_ports - Return the number of links that can be carried by
* a given MAC-PHY physical link. Returns 0 if this is
* unknown, the number of links else.
@@ -111,6 +160,7 @@ int phy_interface_num_ports(phy_interface_t interface)
return 0;
case PHY_INTERFACE_MODE_INTERNAL:
case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_MIILITE:
case PHY_INTERFACE_MODE_GMII:
case PHY_INTERFACE_MODE_TBI:
case PHY_INTERFACE_MODE_REVMII:
@@ -138,6 +188,9 @@ int phy_interface_num_ports(phy_interface_t interface)
case PHY_INTERFACE_MODE_RXAUI:
case PHY_INTERFACE_MODE_XAUI:
case PHY_INTERFACE_MODE_1000BASEKX:
+ case PHY_INTERFACE_MODE_50GBASER:
+ case PHY_INTERFACE_MODE_LAUI:
+ case PHY_INTERFACE_MODE_100GBASEP:
return 1;
case PHY_INTERFACE_MODE_QSGMII:
case PHY_INTERFACE_MODE_QUSGMII:
@@ -153,203 +206,9 @@ int phy_interface_num_ports(phy_interface_t interface)
}
EXPORT_SYMBOL_GPL(phy_interface_num_ports);
-/* A mapping of all SUPPORTED settings to speed/duplex. This table
- * must be grouped by speed and sorted in descending match priority
- * - iow, descending speed.
- */
-
-#define PHY_SETTING(s, d, b) { .speed = SPEED_ ## s, .duplex = DUPLEX_ ## d, \
- .bit = ETHTOOL_LINK_MODE_ ## b ## _BIT}
-
-static const struct phy_setting settings[] = {
- /* 800G */
- PHY_SETTING( 800000, FULL, 800000baseCR8_Full ),
- PHY_SETTING( 800000, FULL, 800000baseKR8_Full ),
- PHY_SETTING( 800000, FULL, 800000baseDR8_Full ),
- PHY_SETTING( 800000, FULL, 800000baseDR8_2_Full ),
- PHY_SETTING( 800000, FULL, 800000baseSR8_Full ),
- PHY_SETTING( 800000, FULL, 800000baseVR8_Full ),
- /* 400G */
- PHY_SETTING( 400000, FULL, 400000baseCR8_Full ),
- PHY_SETTING( 400000, FULL, 400000baseKR8_Full ),
- PHY_SETTING( 400000, FULL, 400000baseLR8_ER8_FR8_Full ),
- PHY_SETTING( 400000, FULL, 400000baseDR8_Full ),
- PHY_SETTING( 400000, FULL, 400000baseSR8_Full ),
- PHY_SETTING( 400000, FULL, 400000baseCR4_Full ),
- PHY_SETTING( 400000, FULL, 400000baseKR4_Full ),
- PHY_SETTING( 400000, FULL, 400000baseLR4_ER4_FR4_Full ),
- PHY_SETTING( 400000, FULL, 400000baseDR4_Full ),
- PHY_SETTING( 400000, FULL, 400000baseSR4_Full ),
- /* 200G */
- PHY_SETTING( 200000, FULL, 200000baseCR4_Full ),
- PHY_SETTING( 200000, FULL, 200000baseKR4_Full ),
- PHY_SETTING( 200000, FULL, 200000baseLR4_ER4_FR4_Full ),
- PHY_SETTING( 200000, FULL, 200000baseDR4_Full ),
- PHY_SETTING( 200000, FULL, 200000baseSR4_Full ),
- PHY_SETTING( 200000, FULL, 200000baseCR2_Full ),
- PHY_SETTING( 200000, FULL, 200000baseKR2_Full ),
- PHY_SETTING( 200000, FULL, 200000baseLR2_ER2_FR2_Full ),
- PHY_SETTING( 200000, FULL, 200000baseDR2_Full ),
- PHY_SETTING( 200000, FULL, 200000baseSR2_Full ),
- /* 100G */
- PHY_SETTING( 100000, FULL, 100000baseCR4_Full ),
- PHY_SETTING( 100000, FULL, 100000baseKR4_Full ),
- PHY_SETTING( 100000, FULL, 100000baseLR4_ER4_Full ),
- PHY_SETTING( 100000, FULL, 100000baseSR4_Full ),
- PHY_SETTING( 100000, FULL, 100000baseCR2_Full ),
- PHY_SETTING( 100000, FULL, 100000baseKR2_Full ),
- PHY_SETTING( 100000, FULL, 100000baseLR2_ER2_FR2_Full ),
- PHY_SETTING( 100000, FULL, 100000baseDR2_Full ),
- PHY_SETTING( 100000, FULL, 100000baseSR2_Full ),
- PHY_SETTING( 100000, FULL, 100000baseCR_Full ),
- PHY_SETTING( 100000, FULL, 100000baseKR_Full ),
- PHY_SETTING( 100000, FULL, 100000baseLR_ER_FR_Full ),
- PHY_SETTING( 100000, FULL, 100000baseDR_Full ),
- PHY_SETTING( 100000, FULL, 100000baseSR_Full ),
- /* 56G */
- PHY_SETTING( 56000, FULL, 56000baseCR4_Full ),
- PHY_SETTING( 56000, FULL, 56000baseKR4_Full ),
- PHY_SETTING( 56000, FULL, 56000baseLR4_Full ),
- PHY_SETTING( 56000, FULL, 56000baseSR4_Full ),
- /* 50G */
- PHY_SETTING( 50000, FULL, 50000baseCR2_Full ),
- PHY_SETTING( 50000, FULL, 50000baseKR2_Full ),
- PHY_SETTING( 50000, FULL, 50000baseSR2_Full ),
- PHY_SETTING( 50000, FULL, 50000baseCR_Full ),
- PHY_SETTING( 50000, FULL, 50000baseKR_Full ),
- PHY_SETTING( 50000, FULL, 50000baseLR_ER_FR_Full ),
- PHY_SETTING( 50000, FULL, 50000baseDR_Full ),
- PHY_SETTING( 50000, FULL, 50000baseSR_Full ),
- /* 40G */
- PHY_SETTING( 40000, FULL, 40000baseCR4_Full ),
- PHY_SETTING( 40000, FULL, 40000baseKR4_Full ),
- PHY_SETTING( 40000, FULL, 40000baseLR4_Full ),
- PHY_SETTING( 40000, FULL, 40000baseSR4_Full ),
- /* 25G */
- PHY_SETTING( 25000, FULL, 25000baseCR_Full ),
- PHY_SETTING( 25000, FULL, 25000baseKR_Full ),
- PHY_SETTING( 25000, FULL, 25000baseSR_Full ),
- /* 20G */
- PHY_SETTING( 20000, FULL, 20000baseKR2_Full ),
- PHY_SETTING( 20000, FULL, 20000baseMLD2_Full ),
- /* 10G */
- PHY_SETTING( 10000, FULL, 10000baseCR_Full ),
- PHY_SETTING( 10000, FULL, 10000baseER_Full ),
- PHY_SETTING( 10000, FULL, 10000baseKR_Full ),
- PHY_SETTING( 10000, FULL, 10000baseKX4_Full ),
- PHY_SETTING( 10000, FULL, 10000baseLR_Full ),
- PHY_SETTING( 10000, FULL, 10000baseLRM_Full ),
- PHY_SETTING( 10000, FULL, 10000baseR_FEC ),
- PHY_SETTING( 10000, FULL, 10000baseSR_Full ),
- PHY_SETTING( 10000, FULL, 10000baseT_Full ),
- /* 5G */
- PHY_SETTING( 5000, FULL, 5000baseT_Full ),
- /* 2.5G */
- PHY_SETTING( 2500, FULL, 2500baseT_Full ),
- PHY_SETTING( 2500, FULL, 2500baseX_Full ),
- /* 1G */
- PHY_SETTING( 1000, FULL, 1000baseT_Full ),
- PHY_SETTING( 1000, HALF, 1000baseT_Half ),
- PHY_SETTING( 1000, FULL, 1000baseT1_Full ),
- PHY_SETTING( 1000, FULL, 1000baseX_Full ),
- PHY_SETTING( 1000, FULL, 1000baseKX_Full ),
- /* 100M */
- PHY_SETTING( 100, FULL, 100baseT_Full ),
- PHY_SETTING( 100, FULL, 100baseT1_Full ),
- PHY_SETTING( 100, HALF, 100baseT_Half ),
- PHY_SETTING( 100, HALF, 100baseFX_Half ),
- PHY_SETTING( 100, FULL, 100baseFX_Full ),
- /* 10M */
- PHY_SETTING( 10, FULL, 10baseT_Full ),
- PHY_SETTING( 10, HALF, 10baseT_Half ),
- PHY_SETTING( 10, FULL, 10baseT1L_Full ),
- PHY_SETTING( 10, FULL, 10baseT1S_Full ),
- PHY_SETTING( 10, HALF, 10baseT1S_Half ),
- PHY_SETTING( 10, HALF, 10baseT1S_P2MP_Half ),
- PHY_SETTING( 10, FULL, 10baseT1BRR_Full ),
-};
-#undef PHY_SETTING
-
-/**
- * phy_lookup_setting - lookup a PHY setting
- * @speed: speed to match
- * @duplex: duplex to match
- * @mask: allowed link modes
- * @exact: an exact match is required
- *
- * Search the settings array for a setting that matches the speed and
- * duplex, and which is supported.
- *
- * If @exact is unset, either an exact match or %NULL for no match will
- * be returned.
- *
- * If @exact is set, an exact match, the fastest supported setting at
- * or below the specified speed, the slowest supported setting, or if
- * they all fail, %NULL will be returned.
- */
-const struct phy_setting *
-phy_lookup_setting(int speed, int duplex, const unsigned long *mask, bool exact)
-{
- const struct phy_setting *p, *match = NULL, *last = NULL;
- int i;
-
- for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) {
- if (p->bit < __ETHTOOL_LINK_MODE_MASK_NBITS &&
- test_bit(p->bit, mask)) {
- last = p;
- if (p->speed == speed && p->duplex == duplex) {
- /* Exact match for speed and duplex */
- match = p;
- break;
- } else if (!exact) {
- if (!match && p->speed <= speed)
- /* Candidate */
- match = p;
-
- if (p->speed < speed)
- break;
- }
- }
- }
-
- if (!match && !exact)
- match = last;
-
- return match;
-}
-EXPORT_SYMBOL_GPL(phy_lookup_setting);
-
-size_t phy_speeds(unsigned int *speeds, size_t size,
- unsigned long *mask)
-{
- size_t count;
- int i;
-
- for (i = 0, count = 0; i < ARRAY_SIZE(settings) && count < size; i++)
- if (settings[i].bit < __ETHTOOL_LINK_MODE_MASK_NBITS &&
- test_bit(settings[i].bit, mask) &&
- (count == 0 || speeds[count - 1] != settings[i].speed))
- speeds[count++] = settings[i].speed;
-
- return count;
-}
-
-static void __set_linkmode_max_speed(u32 max_speed, unsigned long *addr)
-{
- const struct phy_setting *p;
- int i;
-
- for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) {
- if (p->speed > max_speed)
- linkmode_clear_bit(p->bit, addr);
- else
- break;
- }
-}
-
static void __set_phy_supported(struct phy_device *phydev, u32 max_speed)
{
- __set_linkmode_max_speed(max_speed, phydev->supported);
+ phy_caps_linkmode_max_speed(max_speed, phydev->supported);
}
/**
@@ -388,28 +247,25 @@ void of_set_phy_supported(struct phy_device *phydev)
void of_set_phy_eee_broken(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
- u32 broken = 0;
+ unsigned long *modes = phydev->eee_disabled_modes;
- if (!IS_ENABLED(CONFIG_OF_MDIO))
+ if (!IS_ENABLED(CONFIG_OF_MDIO) || !node)
return;
- if (!node)
- return;
+ linkmode_zero(modes);
if (of_property_read_bool(node, "eee-broken-100tx"))
- broken |= MDIO_EEE_100TX;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, modes);
if (of_property_read_bool(node, "eee-broken-1000t"))
- broken |= MDIO_EEE_1000T;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, modes);
if (of_property_read_bool(node, "eee-broken-10gt"))
- broken |= MDIO_EEE_10GT;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, modes);
if (of_property_read_bool(node, "eee-broken-1000kx"))
- broken |= MDIO_EEE_1000KX;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, modes);
if (of_property_read_bool(node, "eee-broken-10gkx4"))
- broken |= MDIO_EEE_10GKX4;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, modes);
if (of_property_read_bool(node, "eee-broken-10gkr"))
- broken |= MDIO_EEE_10GKR;
-
- phydev->eee_broken_modes = broken;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, modes);
}
/**
@@ -478,16 +334,15 @@ EXPORT_SYMBOL_GPL(phy_resolve_aneg_pause);
void phy_resolve_aneg_linkmode(struct phy_device *phydev)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(common);
- int i;
+ const struct link_capabilities *c;
linkmode_and(common, phydev->lp_advertising, phydev->advertising);
- for (i = 0; i < ARRAY_SIZE(settings); i++)
- if (test_bit(settings[i].bit, common)) {
- phydev->speed = settings[i].speed;
- phydev->duplex = settings[i].duplex;
- break;
- }
+ c = phy_caps_lookup_by_linkmode(common);
+ if (c) {
+ phydev->speed = c->speed;
+ phydev->duplex = c->duplex;
+ }
phy_resolve_aneg_pause(phydev);
}
@@ -505,7 +360,8 @@ EXPORT_SYMBOL_GPL(phy_resolve_aneg_linkmode);
void phy_check_downshift(struct phy_device *phydev)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(common);
- int i, speed = SPEED_UNKNOWN;
+ const struct link_capabilities *c;
+ int speed = SPEED_UNKNOWN;
phydev->downshifted_rate = 0;
@@ -515,11 +371,9 @@ void phy_check_downshift(struct phy_device *phydev)
linkmode_and(common, phydev->lp_advertising, phydev->advertising);
- for (i = 0; i < ARRAY_SIZE(settings); i++)
- if (test_bit(settings[i].bit, common)) {
- speed = settings[i].speed;
- break;
- }
+ c = phy_caps_lookup_by_linkmode(common);
+ if (c)
+ speed = c->speed;
if (speed == SPEED_UNKNOWN || phydev->speed >= speed)
return;
@@ -529,22 +383,17 @@ void phy_check_downshift(struct phy_device *phydev)
phydev->downshifted_rate = 1;
}
-EXPORT_SYMBOL_GPL(phy_check_downshift);
static int phy_resolve_min_speed(struct phy_device *phydev, bool fdx_only)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(common);
- int i = ARRAY_SIZE(settings);
+ const struct link_capabilities *c;
linkmode_and(common, phydev->lp_advertising, phydev->advertising);
- while (--i >= 0) {
- if (test_bit(settings[i].bit, common)) {
- if (fdx_only && settings[i].duplex != DUPLEX_FULL)
- continue;
- return settings[i].speed;
- }
- }
+ c = phy_caps_lookup_by_linkmode_rev(common, fdx_only);
+ if (c)
+ return c->speed;
return SPEED_UNKNOWN;
}
@@ -556,7 +405,7 @@ int phy_speed_down_core(struct phy_device *phydev)
if (min_common_speed == SPEED_UNKNOWN)
return -EINVAL;
- __set_linkmode_max_speed(min_common_speed, phydev->advertising);
+ phy_caps_linkmode_max_speed(min_common_speed, phydev->advertising);
return 0;
}
@@ -575,8 +424,8 @@ static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad,
devad | MII_MMD_CTRL_NOINCR);
}
-static int mmd_phy_read(struct mii_bus *bus, int phy_addr, bool is_c45,
- int devad, u32 regnum)
+int mmd_phy_read(struct mii_bus *bus, int phy_addr, bool is_c45,
+ int devad, u32 regnum)
{
if (is_c45)
return __mdiobus_c45_read(bus, phy_addr, devad, regnum);
@@ -585,9 +434,10 @@ static int mmd_phy_read(struct mii_bus *bus, int phy_addr, bool is_c45,
/* Read the content of the MMD's selected register */
return __mdiobus_read(bus, phy_addr, MII_MMD_DATA);
}
+EXPORT_SYMBOL_GPL(mmd_phy_read);
-static int mmd_phy_write(struct mii_bus *bus, int phy_addr, bool is_c45,
- int devad, u32 regnum, u16 val)
+int mmd_phy_write(struct mii_bus *bus, int phy_addr, bool is_c45,
+ int devad, u32 regnum, u16 val)
{
if (is_c45)
return __mdiobus_c45_write(bus, phy_addr, devad, regnum, val);
@@ -596,6 +446,7 @@ static int mmd_phy_write(struct mii_bus *bus, int phy_addr, bool is_c45,
/* Write the data into MMD's selected register */
return __mdiobus_write(bus, phy_addr, MII_MMD_DATA, val);
}
+EXPORT_SYMBOL_GPL(mmd_phy_write);
/**
* __phy_read_mmd - Convenience function for reading a register
@@ -686,146 +537,6 @@ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
EXPORT_SYMBOL(phy_write_mmd);
/**
- * __phy_package_read_mmd - read MMD reg relative to PHY package base addr
- * @phydev: The phy_device struct
- * @addr_offset: The offset to be added to PHY package base_addr
- * @devad: The MMD to read from
- * @regnum: The register on the MMD to read
- *
- * Convenience helper for reading a register of an MMD on a given PHY
- * using the PHY package base address. The base address is added to
- * the addr_offset value.
- *
- * Same calling rules as for __phy_read();
- *
- * NOTE: It's assumed that the entire PHY package is either C22 or C45.
- */
-int __phy_package_read_mmd(struct phy_device *phydev,
- unsigned int addr_offset, int devad,
- u32 regnum)
-{
- int addr = phy_package_address(phydev, addr_offset);
-
- if (addr < 0)
- return addr;
-
- if (regnum > (u16)~0 || devad > 32)
- return -EINVAL;
-
- return mmd_phy_read(phydev->mdio.bus, addr, phydev->is_c45, devad,
- regnum);
-}
-EXPORT_SYMBOL(__phy_package_read_mmd);
-
-/**
- * phy_package_read_mmd - read MMD reg relative to PHY package base addr
- * @phydev: The phy_device struct
- * @addr_offset: The offset to be added to PHY package base_addr
- * @devad: The MMD to read from
- * @regnum: The register on the MMD to read
- *
- * Convenience helper for reading a register of an MMD on a given PHY
- * using the PHY package base address. The base address is added to
- * the addr_offset value.
- *
- * Same calling rules as for phy_read();
- *
- * NOTE: It's assumed that the entire PHY package is either C22 or C45.
- */
-int phy_package_read_mmd(struct phy_device *phydev,
- unsigned int addr_offset, int devad,
- u32 regnum)
-{
- int addr = phy_package_address(phydev, addr_offset);
- int val;
-
- if (addr < 0)
- return addr;
-
- if (regnum > (u16)~0 || devad > 32)
- return -EINVAL;
-
- phy_lock_mdio_bus(phydev);
- val = mmd_phy_read(phydev->mdio.bus, addr, phydev->is_c45, devad,
- regnum);
- phy_unlock_mdio_bus(phydev);
-
- return val;
-}
-EXPORT_SYMBOL(phy_package_read_mmd);
-
-/**
- * __phy_package_write_mmd - write MMD reg relative to PHY package base addr
- * @phydev: The phy_device struct
- * @addr_offset: The offset to be added to PHY package base_addr
- * @devad: The MMD to write to
- * @regnum: The register on the MMD to write
- * @val: value to write to @regnum
- *
- * Convenience helper for writing a register of an MMD on a given PHY
- * using the PHY package base address. The base address is added to
- * the addr_offset value.
- *
- * Same calling rules as for __phy_write();
- *
- * NOTE: It's assumed that the entire PHY package is either C22 or C45.
- */
-int __phy_package_write_mmd(struct phy_device *phydev,
- unsigned int addr_offset, int devad,
- u32 regnum, u16 val)
-{
- int addr = phy_package_address(phydev, addr_offset);
-
- if (addr < 0)
- return addr;
-
- if (regnum > (u16)~0 || devad > 32)
- return -EINVAL;
-
- return mmd_phy_write(phydev->mdio.bus, addr, phydev->is_c45, devad,
- regnum, val);
-}
-EXPORT_SYMBOL(__phy_package_write_mmd);
-
-/**
- * phy_package_write_mmd - write MMD reg relative to PHY package base addr
- * @phydev: The phy_device struct
- * @addr_offset: The offset to be added to PHY package base_addr
- * @devad: The MMD to write to
- * @regnum: The register on the MMD to write
- * @val: value to write to @regnum
- *
- * Convenience helper for writing a register of an MMD on a given PHY
- * using the PHY package base address. The base address is added to
- * the addr_offset value.
- *
- * Same calling rules as for phy_write();
- *
- * NOTE: It's assumed that the entire PHY package is either C22 or C45.
- */
-int phy_package_write_mmd(struct phy_device *phydev,
- unsigned int addr_offset, int devad,
- u32 regnum, u16 val)
-{
- int addr = phy_package_address(phydev, addr_offset);
- int ret;
-
- if (addr < 0)
- return addr;
-
- if (regnum > (u16)~0 || devad > 32)
- return -EINVAL;
-
- phy_lock_mdio_bus(phydev);
- ret = mmd_phy_write(phydev->mdio.bus, addr, phydev->is_c45, devad,
- regnum, val);
- phy_unlock_mdio_bus(phydev);
-
- return ret;
-}
-EXPORT_SYMBOL(phy_package_write_mmd);
-
-/**
* phy_modify_changed - Function for modifying a PHY register
* @phydev: the phy_device struct
* @regnum: register number to modify
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 4f3e742907cb..13dd1691886d 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -36,6 +36,9 @@
#include <net/genetlink.h>
#include <net/sock.h>
+#include "phylib-internal.h"
+#include "phy-caps.h"
+
#define PHY_STATE_TIME HZ
#define PHY_STATE_STR(_state) \
@@ -211,25 +214,6 @@ int phy_aneg_done(struct phy_device *phydev)
EXPORT_SYMBOL(phy_aneg_done);
/**
- * phy_find_valid - find a PHY setting that matches the requested parameters
- * @speed: desired speed
- * @duplex: desired duplex
- * @supported: mask of supported link modes
- *
- * Locate a supported phy setting that is, in priority order:
- * - an exact match for the specified speed and duplex mode
- * - a match for the specified speed, or slower speed
- * - the slowest supported speed
- * Returns the matched phy_setting entry, or %NULL if no supported phy
- * settings were found.
- */
-static const struct phy_setting *
-phy_find_valid(int speed, int duplex, unsigned long *supported)
-{
- return phy_lookup_setting(speed, duplex, supported, false);
-}
-
-/**
* phy_supported_speeds - return all speeds currently supported by a phy device
* @phy: The phy device to return supported speeds of.
* @speeds: buffer to store supported speeds in.
@@ -243,7 +227,7 @@ unsigned int phy_supported_speeds(struct phy_device *phy,
unsigned int *speeds,
unsigned int size)
{
- return phy_speeds(speeds, size, phy->supported);
+ return phy_caps_speeds(speeds, size, phy->supported);
}
/**
@@ -257,7 +241,7 @@ unsigned int phy_supported_speeds(struct phy_device *phy,
*/
bool phy_check_valid(int speed, int duplex, unsigned long *features)
{
- return !!phy_lookup_setting(speed, duplex, features, true);
+ return phy_caps_valid(speed, duplex, features);
}
EXPORT_SYMBOL(phy_check_valid);
@@ -271,13 +255,14 @@ EXPORT_SYMBOL(phy_check_valid);
*/
static void phy_sanitize_settings(struct phy_device *phydev)
{
- const struct phy_setting *setting;
+ const struct link_capabilities *c;
+
+ c = phy_caps_lookup(phydev->speed, phydev->duplex, phydev->supported,
+ false);
- setting = phy_find_valid(phydev->speed, phydev->duplex,
- phydev->supported);
- if (setting) {
- phydev->speed = setting->speed;
- phydev->duplex = setting->duplex;
+ if (c) {
+ phydev->speed = c->speed;
+ phydev->duplex = c->duplex;
} else {
/* We failed to find anything (no supported speeds?) */
phydev->speed = SPEED_UNKNOWN;
@@ -302,7 +287,7 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev,
cmd->base.port = PORT_BNC;
else
cmd->base.port = phydev->port;
- cmd->base.transceiver = phy_is_internal(phydev) ?
+ cmd->base.transceiver = phydev->is_internal ?
XCVR_INTERNAL : XCVR_EXTERNAL;
cmd->base.phy_address = phydev->mdio.addr;
cmd->base.autoneg = phydev->autoneg;
@@ -420,12 +405,14 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
return 0;
case SIOCSHWTSTAMP:
- if (phydev->mii_ts && phydev->mii_ts->hwtstamp) {
+ if (phydev->mii_ts && phydev->mii_ts->hwtstamp_set) {
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
hwtstamp_config_to_kernel(&kernel_cfg, &cfg);
- ret = phydev->mii_ts->hwtstamp(phydev->mii_ts, &kernel_cfg, &extack);
+ ret = phydev->mii_ts->hwtstamp_set(phydev->mii_ts,
+ &kernel_cfg,
+ &extack);
if (ret)
return ret;
@@ -491,6 +478,9 @@ int __phy_hwtstamp_get(struct phy_device *phydev,
if (!phydev)
return -ENODEV;
+ if (phydev->mii_ts && phydev->mii_ts->hwtstamp_get)
+ return phydev->mii_ts->hwtstamp_get(phydev->mii_ts, config);
+
return -EOPNOTSUPP;
}
@@ -508,8 +498,9 @@ int __phy_hwtstamp_set(struct phy_device *phydev,
if (!phydev)
return -ENODEV;
- if (phydev->mii_ts && phydev->mii_ts->hwtstamp)
- return phydev->mii_ts->hwtstamp(phydev->mii_ts, config, extack);
+ if (phydev->mii_ts && phydev->mii_ts->hwtstamp_set)
+ return phydev->mii_ts->hwtstamp_set(phydev->mii_ts, config,
+ extack);
return -EOPNOTSUPP;
}
@@ -520,12 +511,12 @@ int __phy_hwtstamp_set(struct phy_device *phydev,
* @phydev: the phy_device struct
* @jiffies: Run the state machine after these jiffies
*/
-void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies)
+static void phy_queue_state_machine(struct phy_device *phydev,
+ unsigned long jiffies)
{
mod_delayed_work(system_power_efficient_wq, &phydev->state_queue,
jiffies);
}
-EXPORT_SYMBOL(phy_queue_state_machine);
/**
* phy_trigger_machine - Trigger the state machine to run now
@@ -616,6 +607,49 @@ int phy_ethtool_get_stats(struct phy_device *phydev,
EXPORT_SYMBOL(phy_ethtool_get_stats);
/**
+ * __phy_ethtool_get_phy_stats - Retrieve standardized PHY statistics
+ * @phydev: Pointer to the PHY device
+ * @phy_stats: Pointer to ethtool_eth_phy_stats structure
+ * @phydev_stats: Pointer to ethtool_phy_stats structure
+ *
+ * Fetches PHY statistics using a kernel-defined interface for consistent
+ * diagnostics. Unlike phy_ethtool_get_stats(), which allows custom stats,
+ * this function enforces a standardized format for better interoperability.
+ */
+void __phy_ethtool_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *phy_stats,
+ struct ethtool_phy_stats *phydev_stats)
+{
+ if (!phydev->drv || !phydev->drv->get_phy_stats)
+ return;
+
+ mutex_lock(&phydev->lock);
+ phydev->drv->get_phy_stats(phydev, phy_stats, phydev_stats);
+ mutex_unlock(&phydev->lock);
+}
+
+/**
+ * __phy_ethtool_get_link_ext_stats - Retrieve extended link statistics for a PHY
+ * @phydev: Pointer to the PHY device
+ * @link_stats: Pointer to the structure to store extended link statistics
+ *
+ * Populates the ethtool_link_ext_stats structure with link down event counts
+ * and additional driver-specific link statistics, if available.
+ */
+void __phy_ethtool_get_link_ext_stats(struct phy_device *phydev,
+ struct ethtool_link_ext_stats *link_stats)
+{
+ link_stats->link_down_events = READ_ONCE(phydev->link_down_events);
+
+ if (!phydev->drv || !phydev->drv->get_link_stats)
+ return;
+
+ mutex_lock(&phydev->lock);
+ phydev->drv->get_link_stats(phydev, link_stats);
+ mutex_unlock(&phydev->lock);
+}
+
+/**
* phy_ethtool_get_plca_cfg - Get PLCA RS configuration
* @phydev: the phy_device struct
* @plca_cfg: where to store the retrieved configuration
@@ -988,16 +1022,15 @@ static int phy_check_link_status(struct phy_device *phydev)
if (phydev->link && phydev->state != PHY_RUNNING) {
phy_check_downshift(phydev);
phydev->state = PHY_RUNNING;
- err = genphy_c45_eee_is_active(phydev,
- NULL, NULL, NULL);
- if (err <= 0)
- phydev->enable_tx_lpi = false;
- else
- phydev->enable_tx_lpi = phydev->eee_cfg.tx_lpi_enabled;
+ err = genphy_c45_eee_is_active(phydev, NULL);
+ phydev->eee_active = err > 0;
+ phydev->enable_tx_lpi = phydev->eee_cfg.tx_lpi_enabled &&
+ phydev->eee_active;
phy_link_up(phydev);
} else if (!phydev->link && phydev->state != PHY_NOLINK) {
phydev->state = PHY_NOLINK;
+ phydev->eee_active = false;
phydev->enable_tx_lpi = false;
phy_link_down(phydev);
}
@@ -1006,6 +1039,55 @@ static int phy_check_link_status(struct phy_device *phydev)
}
/**
+ * phy_inband_caps - query which in-band signalling modes are supported
+ * @phydev: a pointer to a &struct phy_device
+ * @interface: the interface mode for the PHY
+ *
+ * Returns zero if it is unknown what in-band signalling is supported by the
+ * PHY (e.g. because the PHY driver doesn't implement the method.) Otherwise,
+ * returns a bit mask of the LINK_INBAND_* values from
+ * &enum link_inband_signalling to describe which inband modes are supported
+ * by the PHY for this interface mode.
+ */
+unsigned int phy_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ if (phydev->drv && phydev->drv->inband_caps)
+ return phydev->drv->inband_caps(phydev, interface);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(phy_inband_caps);
+
+/**
+ * phy_config_inband - configure the desired PHY in-band mode
+ * @phydev: the phy_device struct
+ * @modes: in-band modes to configure
+ *
+ * Description: disables, enables or enables-with-bypass in-band signalling
+ * between the PHY and host system.
+ *
+ * Returns: zero on success, or negative errno value.
+ */
+int phy_config_inband(struct phy_device *phydev, unsigned int modes)
+{
+ lockdep_assert_held(&phydev->lock);
+
+ if (!!(modes & LINK_INBAND_DISABLE) +
+ !!(modes & LINK_INBAND_ENABLE) +
+ !!(modes & LINK_INBAND_BYPASS) != 1)
+ return -EINVAL;
+
+ if (!phydev->drv)
+ return -EIO;
+ else if (!phydev->drv->config_inband)
+ return -EOPNOTSUPP;
+
+ return phydev->drv->config_inband(phydev, modes);
+}
+EXPORT_SYMBOL(phy_config_inband);
+
+/**
* _phy_start_aneg - start auto-negotiation for this PHY device
* @phydev: the phy_device struct
*
@@ -1347,6 +1429,23 @@ static int phy_enable_interrupts(struct phy_device *phydev)
}
/**
+ * phy_update_stats - Update PHY device statistics if supported.
+ * @phydev: Pointer to the PHY device structure.
+ *
+ * If the PHY driver provides an update_stats callback, this function
+ * invokes it to update the PHY statistics. If not, it returns 0.
+ *
+ * Return: 0 on success, or a negative error code if the callback fails.
+ */
+static int phy_update_stats(struct phy_device *phydev)
+{
+ if (!phydev->drv->update_stats)
+ return 0;
+
+ return phydev->drv->update_stats(phydev);
+}
+
+/**
* phy_request_interrupt - request and enable interrupt for a PHY device
* @phydev: target phy_device struct
*
@@ -1389,6 +1488,24 @@ void phy_free_interrupt(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_free_interrupt);
+/**
+ * phy_get_next_update_time - Determine the next PHY update time
+ * @phydev: Pointer to the phy_device structure
+ *
+ * This function queries the PHY driver to get the time for the next polling
+ * event. If the driver does not implement the callback, a default value is
+ * used.
+ *
+ * Return: The time for the next polling event in jiffies
+ */
+static unsigned int phy_get_next_update_time(struct phy_device *phydev)
+{
+ if (phydev->drv && phydev->drv->get_next_update_time)
+ return phydev->drv->get_next_update_time(phydev);
+
+ return PHY_STATE_TIME;
+}
+
enum phy_state_work {
PHY_STATE_WORK_NONE,
PHY_STATE_WORK_ANEG,
@@ -1415,6 +1532,9 @@ static enum phy_state_work _phy_state_machine(struct phy_device *phydev)
case PHY_RUNNING:
err = phy_check_link_status(phydev);
func = &phy_check_link_status;
+
+ if (!err)
+ err = phy_update_stats(phydev);
break;
case PHY_CABLETEST:
err = phydev->drv->cable_test_get_status(phydev, &finished);
@@ -1434,9 +1554,24 @@ static enum phy_state_work _phy_state_machine(struct phy_device *phydev)
}
break;
case PHY_HALTED:
+ if (phydev->link) {
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ }
+ if (phydev->master_slave_state !=
+ MASTER_SLAVE_STATE_UNSUPPORTED)
+ phydev->master_slave_state =
+ MASTER_SLAVE_STATE_UNKNOWN;
+ phydev->mdix = ETH_TP_MDI_INVALID;
+ linkmode_zero(phydev->lp_advertising);
+ }
+ fallthrough;
case PHY_ERROR:
if (phydev->link) {
phydev->link = 0;
+ phydev->eee_active = false;
+ phydev->enable_tx_lpi = false;
phy_link_down(phydev);
}
state_work = PHY_STATE_WORK_SUSPEND;
@@ -1465,7 +1600,8 @@ static enum phy_state_work _phy_state_machine(struct phy_device *phydev)
* called from phy_disconnect() synchronously.
*/
if (phy_polling_mode(phydev) && phy_is_started(phydev))
- phy_queue_state_machine(phydev, PHY_STATE_TIME);
+ phy_queue_state_machine(phydev,
+ phy_get_next_update_time(phydev));
return state_work;
}
@@ -1589,6 +1725,134 @@ void phy_mac_interrupt(struct phy_device *phydev)
EXPORT_SYMBOL(phy_mac_interrupt);
/**
+ * phy_loopback - Configure loopback mode of PHY
+ * @phydev: target phy_device struct
+ * @enable: enable or disable loopback mode
+ * @speed: enable loopback mode with speed
+ *
+ * Configure loopback mode of PHY and signal link down and link up if speed is
+ * changing.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int phy_loopback(struct phy_device *phydev, bool enable, int speed)
+{
+ bool link_up = false;
+ int ret = 0;
+
+ if (!phydev->drv)
+ return -EIO;
+
+ mutex_lock(&phydev->lock);
+
+ if (enable && phydev->loopback_enabled) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (!enable && !phydev->loopback_enabled) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (enable) {
+ /*
+ * Link up is signaled with a defined speed. If speed changes,
+ * then first link down and after that link up needs to be
+ * signaled.
+ */
+ if (phydev->link && phydev->state == PHY_RUNNING) {
+ /* link is up and signaled */
+ if (speed && phydev->speed != speed) {
+ /* signal link down and up for new speed */
+ phydev->link = false;
+ phydev->state = PHY_NOLINK;
+ phy_link_down(phydev);
+
+ link_up = true;
+ }
+ } else {
+ /* link is not signaled */
+ if (speed) {
+ /* signal link up for new speed */
+ link_up = true;
+ }
+ }
+ }
+
+ if (phydev->drv->set_loopback)
+ ret = phydev->drv->set_loopback(phydev, enable, speed);
+ else
+ ret = genphy_loopback(phydev, enable, speed);
+
+ if (ret) {
+ if (enable) {
+ /* try to restore link if enabling loopback fails */
+ if (phydev->drv->set_loopback)
+ phydev->drv->set_loopback(phydev, false, 0);
+ else
+ genphy_loopback(phydev, false, 0);
+ }
+
+ goto out;
+ }
+
+ if (link_up) {
+ phydev->link = true;
+ phydev->state = PHY_RUNNING;
+ phy_link_up(phydev);
+ }
+
+ phydev->loopback_enabled = enable;
+
+out:
+ mutex_unlock(&phydev->lock);
+ return ret;
+}
+EXPORT_SYMBOL(phy_loopback);
+
+/**
+ * phy_eee_tx_clock_stop_capable() - indicate whether the MAC can stop tx clock
+ * @phydev: target phy_device struct
+ *
+ * Indicate whether the MAC can disable the transmit xMII clock while in LPI
+ * state. Returns 1 if the MAC may stop the transmit clock, 0 if the MAC must
+ * not stop the transmit clock, or negative error.
+ */
+int phy_eee_tx_clock_stop_capable(struct phy_device *phydev)
+{
+ int stat1;
+
+ stat1 = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
+ if (stat1 < 0)
+ return stat1;
+
+ return !!(stat1 & MDIO_PCS_STAT1_CLKSTOP_CAP);
+}
+EXPORT_SYMBOL_GPL(phy_eee_tx_clock_stop_capable);
+
+/**
+ * phy_eee_rx_clock_stop() - configure PHY receive clock in LPI
+ * @phydev: target phy_device struct
+ * @clk_stop_enable: flag to indicate whether the clock can be stopped
+ *
+ * Configure whether the PHY can disable its receive clock during LPI mode,
+ * See IEEE 802.3 sections 22.2.2.2, 35.2.2.10, and 45.2.3.1.4.
+ *
+ * Returns: 0 or negative error.
+ */
+int phy_eee_rx_clock_stop(struct phy_device *phydev, bool clk_stop_enable)
+{
+ /* Configure the PHY to stop receiving xMII
+ * clock while it is signaling LPI.
+ */
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1,
+ MDIO_PCS_CTRL1_CLKSTOP_EN,
+ clk_stop_enable ? MDIO_PCS_CTRL1_CLKSTOP_EN : 0);
+}
+EXPORT_SYMBOL_GPL(phy_eee_rx_clock_stop);
+
+/**
* phy_init_eee - init and check the EEE feature
* @phydev: target phy_device struct
* @clk_stop_enable: PHY may stop the clock during LPI
@@ -1605,18 +1869,14 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
if (!phydev->drv)
return -EIO;
- ret = genphy_c45_eee_is_active(phydev, NULL, NULL, NULL);
+ ret = genphy_c45_eee_is_active(phydev, NULL);
if (ret < 0)
return ret;
if (!ret)
return -EPROTONOSUPPORT;
if (clk_stop_enable)
- /* Configure the PHY to stop receiving xMII
- * clock while it is signaling LPI.
- */
- ret = phy_set_bits_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1,
- MDIO_PCS_CTRL1_CLKSTOP_EN);
+ ret = phy_eee_rx_clock_stop(phydev, true);
return ret < 0 ? ret : 0;
}
@@ -1649,8 +1909,8 @@ EXPORT_SYMBOL(phy_get_eee_err);
* @phydev: target phy_device struct
* @data: ethtool_keee data
*
- * Description: reports the Supported/Advertisement/LP Advertisement
- * capabilities, etc.
+ * Description: get the current EEE settings, filling in all members of
+ * @data.
*/
int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_keee *data)
{
@@ -1672,7 +1932,7 @@ EXPORT_SYMBOL(phy_ethtool_get_eee);
* phy_ethtool_set_eee_noneg - Adjusts MAC LPI configuration without PHY
* renegotiation
* @phydev: pointer to the target PHY device structure
- * @data: pointer to the ethtool_keee structure containing the new EEE settings
+ * @old_cfg: pointer to the eee_config structure containing the old EEE settings
*
* This function updates the Energy Efficient Ethernet (EEE) configuration
* for cases where only the MAC's Low Power Idle (LPI) configuration changes,
@@ -1683,18 +1943,23 @@ EXPORT_SYMBOL(phy_ethtool_get_eee);
* configuration.
*/
static void phy_ethtool_set_eee_noneg(struct phy_device *phydev,
- struct ethtool_keee *data)
+ const struct eee_config *old_cfg)
{
- if (phydev->eee_cfg.tx_lpi_enabled != data->tx_lpi_enabled ||
- phydev->eee_cfg.tx_lpi_timer != data->tx_lpi_timer) {
- eee_to_eeecfg(&phydev->eee_cfg, data);
- phydev->enable_tx_lpi = eeecfg_mac_can_tx_lpi(&phydev->eee_cfg);
- if (phydev->link) {
- phydev->link = false;
- phy_link_down(phydev);
- phydev->link = true;
- phy_link_up(phydev);
- }
+ bool enable_tx_lpi;
+
+ if (!phydev->link)
+ return;
+
+ enable_tx_lpi = phydev->eee_cfg.tx_lpi_enabled && phydev->eee_active;
+
+ if (phydev->enable_tx_lpi != enable_tx_lpi ||
+ phydev->eee_cfg.tx_lpi_timer != old_cfg->tx_lpi_timer) {
+ phydev->enable_tx_lpi = false;
+ phydev->link = false;
+ phy_link_down(phydev);
+ phydev->enable_tx_lpi = enable_tx_lpi;
+ phydev->link = true;
+ phy_link_up(phydev);
}
}
@@ -1707,18 +1972,23 @@ static void phy_ethtool_set_eee_noneg(struct phy_device *phydev,
*/
int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_keee *data)
{
+ struct eee_config old_cfg;
int ret;
if (!phydev->drv)
return -EIO;
mutex_lock(&phydev->lock);
+
+ old_cfg = phydev->eee_cfg;
+ eee_to_eeecfg(&phydev->eee_cfg, data);
+
ret = genphy_c45_ethtool_set_eee(phydev, data);
- if (ret >= 0) {
- if (ret == 0)
- phy_ethtool_set_eee_noneg(phydev, data);
- eee_to_eeecfg(&phydev->eee_cfg, data);
- }
+ if (ret == 0)
+ phy_ethtool_set_eee_noneg(phydev, &old_cfg);
+ else if (ret < 0)
+ phydev->eee_cfg = old_cfg;
+
mutex_unlock(&phydev->lock);
return ret < 0 ? ret : 0;
diff --git a/drivers/net/phy/phy_caps.c b/drivers/net/phy/phy_caps.c
new file mode 100644
index 000000000000..3a05982b39bf
--- /dev/null
+++ b/drivers/net/phy/phy_caps.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/ethtool.h>
+#include <linux/linkmode.h>
+#include <linux/phy.h>
+
+#include "phy-caps.h"
+
+static struct link_capabilities link_caps[__LINK_CAPA_MAX] __ro_after_init = {
+ { SPEED_10, DUPLEX_HALF, {0} }, /* LINK_CAPA_10HD */
+ { SPEED_10, DUPLEX_FULL, {0} }, /* LINK_CAPA_10FD */
+ { SPEED_100, DUPLEX_HALF, {0} }, /* LINK_CAPA_100HD */
+ { SPEED_100, DUPLEX_FULL, {0} }, /* LINK_CAPA_100FD */
+ { SPEED_1000, DUPLEX_HALF, {0} }, /* LINK_CAPA_1000HD */
+ { SPEED_1000, DUPLEX_FULL, {0} }, /* LINK_CAPA_1000FD */
+ { SPEED_2500, DUPLEX_FULL, {0} }, /* LINK_CAPA_2500FD */
+ { SPEED_5000, DUPLEX_FULL, {0} }, /* LINK_CAPA_5000FD */
+ { SPEED_10000, DUPLEX_FULL, {0} }, /* LINK_CAPA_10000FD */
+ { SPEED_20000, DUPLEX_FULL, {0} }, /* LINK_CAPA_20000FD */
+ { SPEED_25000, DUPLEX_FULL, {0} }, /* LINK_CAPA_25000FD */
+ { SPEED_40000, DUPLEX_FULL, {0} }, /* LINK_CAPA_40000FD */
+ { SPEED_50000, DUPLEX_FULL, {0} }, /* LINK_CAPA_50000FD */
+ { SPEED_56000, DUPLEX_FULL, {0} }, /* LINK_CAPA_56000FD */
+ { SPEED_100000, DUPLEX_FULL, {0} }, /* LINK_CAPA_100000FD */
+ { SPEED_200000, DUPLEX_FULL, {0} }, /* LINK_CAPA_200000FD */
+ { SPEED_400000, DUPLEX_FULL, {0} }, /* LINK_CAPA_400000FD */
+ { SPEED_800000, DUPLEX_FULL, {0} }, /* LINK_CAPA_800000FD */
+ { SPEED_1600000, DUPLEX_FULL, {0} }, /* LINK_CAPA_1600000FD */
+};
+
+static int speed_duplex_to_capa(int speed, unsigned int duplex)
+{
+ if (duplex == DUPLEX_UNKNOWN ||
+ (speed > SPEED_1000 && duplex != DUPLEX_FULL))
+ return -EINVAL;
+
+ switch (speed) {
+ case SPEED_10: return duplex == DUPLEX_FULL ?
+ LINK_CAPA_10FD : LINK_CAPA_10HD;
+ case SPEED_100: return duplex == DUPLEX_FULL ?
+ LINK_CAPA_100FD : LINK_CAPA_100HD;
+ case SPEED_1000: return duplex == DUPLEX_FULL ?
+ LINK_CAPA_1000FD : LINK_CAPA_1000HD;
+ case SPEED_2500: return LINK_CAPA_2500FD;
+ case SPEED_5000: return LINK_CAPA_5000FD;
+ case SPEED_10000: return LINK_CAPA_10000FD;
+ case SPEED_20000: return LINK_CAPA_20000FD;
+ case SPEED_25000: return LINK_CAPA_25000FD;
+ case SPEED_40000: return LINK_CAPA_40000FD;
+ case SPEED_50000: return LINK_CAPA_50000FD;
+ case SPEED_56000: return LINK_CAPA_56000FD;
+ case SPEED_100000: return LINK_CAPA_100000FD;
+ case SPEED_200000: return LINK_CAPA_200000FD;
+ case SPEED_400000: return LINK_CAPA_400000FD;
+ case SPEED_800000: return LINK_CAPA_800000FD;
+ case SPEED_1600000: return LINK_CAPA_1600000FD;
+ }
+
+ return -EINVAL;
+}
+
+#define for_each_link_caps_asc_speed(cap) \
+ for (cap = link_caps; cap < &link_caps[__LINK_CAPA_MAX]; cap++)
+
+#define for_each_link_caps_desc_speed(cap) \
+ for (cap = &link_caps[__LINK_CAPA_MAX - 1]; cap >= link_caps; cap--)
+
+/**
+ * phy_caps_init() - Initializes the link_caps array from the link_mode_params.
+ *
+ * Returns: 0 if phy caps init was successful, -EINVAL if we found an
+ * unexpected linkmode setting that requires LINK_CAPS update.
+ *
+ */
+int __init phy_caps_init(void)
+{
+ const struct link_mode_info *linkmode;
+ int i, capa;
+
+ /* Fill the caps array from net/ethtool/common.c */
+ for (i = 0; i < __ETHTOOL_LINK_MODE_MASK_NBITS; i++) {
+ linkmode = &link_mode_params[i];
+ capa = speed_duplex_to_capa(linkmode->speed, linkmode->duplex);
+
+ if (capa < 0) {
+ if (linkmode->speed != SPEED_UNKNOWN) {
+ pr_err("Unknown speed %d, please update LINK_CAPS\n",
+ linkmode->speed);
+ return -EINVAL;
+ }
+ continue;
+ }
+
+ __set_bit(i, link_caps[capa].linkmodes);
+ }
+
+ return 0;
+}
+
+/**
+ * phy_caps_speeds() - Fill an array of supported SPEED_* values for given modes
+ * @speeds: Output array to store the speeds list into
+ * @size: Size of the output array
+ * @linkmodes: Linkmodes to get the speeds from
+ *
+ * Fills the speeds array with all possible speeds that can be achieved with
+ * the specified linkmodes.
+ *
+ * Returns: The number of speeds filled into the array. If the input array isn't
+ * big enough to store all speeds, fill it as much as possible.
+ */
+size_t phy_caps_speeds(unsigned int *speeds, size_t size,
+ unsigned long *linkmodes)
+{
+ struct link_capabilities *lcap;
+ size_t count = 0;
+
+ for_each_link_caps_asc_speed(lcap) {
+ if (linkmode_intersects(lcap->linkmodes, linkmodes) &&
+ (count == 0 || speeds[count - 1] != lcap->speed)) {
+ speeds[count++] = lcap->speed;
+ if (count >= size)
+ break;
+ }
+ }
+
+ return count;
+}
+
+/**
+ * phy_caps_lookup_by_linkmode() - Lookup the fastest matching link_capabilities
+ * @linkmodes: Linkmodes to match against
+ *
+ * Returns: The highest-speed link_capabilities that intersects the given
+ * linkmodes. In case several DUPLEX_ options exist at that speed,
+ * DUPLEX_FULL is matched first. NULL is returned if no match.
+ */
+const struct link_capabilities *
+phy_caps_lookup_by_linkmode(const unsigned long *linkmodes)
+{
+ struct link_capabilities *lcap;
+
+ for_each_link_caps_desc_speed(lcap)
+ if (linkmode_intersects(lcap->linkmodes, linkmodes))
+ return lcap;
+
+ return NULL;
+}
+
+/**
+ * phy_caps_lookup_by_linkmode_rev() - Lookup the slowest matching link_capabilities
+ * @linkmodes: Linkmodes to match against
+ * @fdx_only: Full duplex match only when set
+ *
+ * Returns: The lowest-speed link_capabilities that intersects the given
+ * linkmodes. When set, fdx_only will ignore half-duplex matches.
+ * NULL is returned if no match.
+ */
+const struct link_capabilities *
+phy_caps_lookup_by_linkmode_rev(const unsigned long *linkmodes, bool fdx_only)
+{
+ struct link_capabilities *lcap;
+
+ for_each_link_caps_asc_speed(lcap) {
+ if (fdx_only && lcap->duplex != DUPLEX_FULL)
+ continue;
+
+ if (linkmode_intersects(lcap->linkmodes, linkmodes))
+ return lcap;
+ }
+
+ return NULL;
+}
+
+/**
+ * phy_caps_lookup() - Lookup capabilities by speed/duplex that matches a mask
+ * @speed: Speed to match
+ * @duplex: Duplex to match
+ * @supported: Mask of linkmodes to match
+ * @exact: Perform an exact match or not.
+ *
+ * Lookup a link_capabilities entry that intersect the supported linkmodes mask,
+ * and that matches the passed speed and duplex.
+ *
+ * When @exact is set, an exact match is performed on speed and duplex, meaning
+ * that if the linkmodes for the given speed and duplex intersect the supported
+ * mask, this capability is returned, otherwise we don't have a match and return
+ * NULL.
+ *
+ * When @exact is not set, we return either an exact match, or matching capabilities
+ * at lower speed, or the lowest matching speed, or NULL.
+ *
+ * Non-exact matches will try to return an exact speed and duplex match, but may
+ * return matching capabilities with same speed but a different duplex.
+ *
+ * Returns: a matched link_capabilities according to the above process, NULL
+ * otherwise.
+ */
+const struct link_capabilities *
+phy_caps_lookup(int speed, unsigned int duplex, const unsigned long *supported,
+ bool exact)
+{
+ const struct link_capabilities *lcap, *match = NULL, *last = NULL;
+
+ for_each_link_caps_desc_speed(lcap) {
+ if (linkmode_intersects(lcap->linkmodes, supported)) {
+ last = lcap;
+ /* exact match on speed and duplex*/
+ if (lcap->speed == speed && lcap->duplex == duplex) {
+ return lcap;
+ } else if (!exact) {
+ if (!match && lcap->speed <= speed)
+ match = lcap;
+
+ if (lcap->speed < speed)
+ break;
+ }
+ }
+ }
+
+ if (!match && !exact)
+ match = last;
+
+ return match;
+}
+EXPORT_SYMBOL_GPL(phy_caps_lookup);
+
+/**
+ * phy_caps_linkmode_max_speed() - Clamp a linkmodes set to a max speed
+ * @max_speed: Speed limit for the linkmode set
+ * @linkmodes: Linkmodes to limit
+ */
+void phy_caps_linkmode_max_speed(u32 max_speed, unsigned long *linkmodes)
+{
+ struct link_capabilities *lcap;
+
+ for_each_link_caps_desc_speed(lcap)
+ if (lcap->speed > max_speed)
+ linkmode_andnot(linkmodes, linkmodes, lcap->linkmodes);
+ else
+ break;
+}
+
+/**
+ * phy_caps_valid() - Validate a linkmodes set agains given speed and duplex
+ * @speed: input speed to validate
+ * @duplex: input duplex to validate. Passing DUPLEX_UNKNOWN is always not valid
+ * @linkmodes: The linkmodes to validate
+ *
+ * Returns: True if at least one of the linkmodes in @linkmodes can function at
+ * the given speed and duplex, false otherwise.
+ */
+bool phy_caps_valid(int speed, int duplex, const unsigned long *linkmodes)
+{
+ int capa = speed_duplex_to_capa(speed, duplex);
+
+ if (capa < 0)
+ return false;
+
+ return linkmode_intersects(link_caps[capa].linkmodes, linkmodes);
+}
+
+/**
+ * phy_caps_linkmodes() - Convert a bitfield of capabilities into linkmodes
+ * @caps: The list of caps, each bit corresponding to a LINK_CAPA value
+ * @linkmodes: The set of linkmodes to fill. Must be previously initialized.
+ */
+void phy_caps_linkmodes(unsigned long caps, unsigned long *linkmodes)
+{
+ unsigned long capa;
+
+ for_each_set_bit(capa, &caps, __LINK_CAPA_MAX)
+ linkmode_or(linkmodes, linkmodes, link_caps[capa].linkmodes);
+}
+EXPORT_SYMBOL_GPL(phy_caps_linkmodes);
+
+/**
+ * phy_caps_from_interface() - Get the link capa from a given PHY interface
+ * @interface: The PHY interface we want to get the possible Speed/Duplex from
+ *
+ * Returns: A bitmask of LINK_CAPA_xxx values that can be achieved with the
+ * provided interface.
+ */
+unsigned long phy_caps_from_interface(phy_interface_t interface)
+{
+ unsigned long link_caps = 0;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_USXGMII:
+ link_caps |= BIT(LINK_CAPA_10000FD) | BIT(LINK_CAPA_5000FD);
+ fallthrough;
+
+ case PHY_INTERFACE_MODE_10G_QXGMII:
+ link_caps |= BIT(LINK_CAPA_2500FD);
+ fallthrough;
+
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_PSGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_QUSGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_GMII:
+ link_caps |= BIT(LINK_CAPA_1000HD) | BIT(LINK_CAPA_1000FD);
+ fallthrough;
+
+ case PHY_INTERFACE_MODE_REVRMII:
+ case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_SMII:
+ case PHY_INTERFACE_MODE_REVMII:
+ case PHY_INTERFACE_MODE_MII:
+ link_caps |= BIT(LINK_CAPA_10HD) | BIT(LINK_CAPA_10FD);
+ fallthrough;
+
+ case PHY_INTERFACE_MODE_100BASEX:
+ link_caps |= BIT(LINK_CAPA_100HD) | BIT(LINK_CAPA_100FD);
+ break;
+
+ case PHY_INTERFACE_MODE_MIILITE:
+ link_caps |= BIT(LINK_CAPA_10FD) | BIT(LINK_CAPA_100FD);
+ break;
+
+ case PHY_INTERFACE_MODE_TBI:
+ case PHY_INTERFACE_MODE_MOCA:
+ case PHY_INTERFACE_MODE_RTBI:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ link_caps |= BIT(LINK_CAPA_1000HD);
+ fallthrough;
+ case PHY_INTERFACE_MODE_1000BASEKX:
+ case PHY_INTERFACE_MODE_TRGMII:
+ link_caps |= BIT(LINK_CAPA_1000FD);
+ break;
+
+ case PHY_INTERFACE_MODE_2500BASEX:
+ link_caps |= BIT(LINK_CAPA_2500FD);
+ break;
+
+ case PHY_INTERFACE_MODE_5GBASER:
+ link_caps |= BIT(LINK_CAPA_5000FD);
+ break;
+
+ case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_10GKR:
+ link_caps |= BIT(LINK_CAPA_10000FD);
+ break;
+
+ case PHY_INTERFACE_MODE_25GBASER:
+ link_caps |= BIT(LINK_CAPA_25000FD);
+ break;
+
+ case PHY_INTERFACE_MODE_XLGMII:
+ link_caps |= BIT(LINK_CAPA_40000FD);
+ break;
+
+ case PHY_INTERFACE_MODE_50GBASER:
+ case PHY_INTERFACE_MODE_LAUI:
+ link_caps |= BIT(LINK_CAPA_50000FD);
+ break;
+
+ case PHY_INTERFACE_MODE_100GBASEP:
+ link_caps |= BIT(LINK_CAPA_100000FD);
+ break;
+
+ case PHY_INTERFACE_MODE_INTERNAL:
+ link_caps |= LINK_CAPA_ALL;
+ break;
+
+ case PHY_INTERFACE_MODE_NA:
+ case PHY_INTERFACE_MODE_MAX:
+ break;
+ }
+
+ return link_caps;
+}
+EXPORT_SYMBOL_GPL(phy_caps_from_interface);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 563497a3274c..81984d4ebb7c 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -32,6 +32,7 @@
#include <linux/phy_link_topology.h>
#include <linux/pse-pd/pse.h>
#include <linux/property.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/rtnetlink.h>
#include <linux/sfp.h>
#include <linux/skbuff.h>
@@ -40,10 +41,31 @@
#include <linux/uaccess.h>
#include <linux/unistd.h>
+#include "phylib-internal.h"
+#include "phy-caps.h"
+
MODULE_DESCRIPTION("PHY library");
MODULE_AUTHOR("Andy Fleming");
MODULE_LICENSE("GPL");
+#define PHY_ANY_ID "MATCH ANY PHY"
+#define PHY_ANY_UID 0xffffffff
+
+struct phy_fixup {
+ struct list_head list;
+ char bus_id[MII_BUS_ID_SIZE + 3];
+ u32 phy_uid;
+ u32 phy_uid_mask;
+ int (*run)(struct phy_device *phydev);
+};
+
+static struct phy_driver genphy_c45_driver = {
+ .phy_id = 0xffffffff,
+ .phy_id_mask = 0xffffffff,
+ .name = "Generic Clause 45 PHY",
+ .read_status = genphy_c45_read_status,
+};
+
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_basic_features);
@@ -59,15 +81,9 @@ EXPORT_SYMBOL_GPL(phy_gbit_features);
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_gbit_fibre_features);
-__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init;
-EXPORT_SYMBOL_GPL(phy_gbit_all_ports_features);
-
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_10gbit_features);
-__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init;
-EXPORT_SYMBOL_GPL(phy_10gbit_fec_features);
-
const int phy_basic_ports_array[3] = {
ETHTOOL_LINK_MODE_Autoneg_BIT,
ETHTOOL_LINK_MODE_TP_BIT,
@@ -75,12 +91,7 @@ const int phy_basic_ports_array[3] = {
};
EXPORT_SYMBOL_GPL(phy_basic_ports_array);
-const int phy_fibre_port_array[1] = {
- ETHTOOL_LINK_MODE_FIBRE_BIT,
-};
-EXPORT_SYMBOL_GPL(phy_fibre_port_array);
-
-const int phy_all_ports_features_array[7] = {
+static const int phy_all_ports_features_array[7] __initconst = {
ETHTOOL_LINK_MODE_Autoneg_BIT,
ETHTOOL_LINK_MODE_TP_BIT,
ETHTOOL_LINK_MODE_MII_BIT,
@@ -89,55 +100,31 @@ const int phy_all_ports_features_array[7] = {
ETHTOOL_LINK_MODE_BNC_BIT,
ETHTOOL_LINK_MODE_Backplane_BIT,
};
-EXPORT_SYMBOL_GPL(phy_all_ports_features_array);
-const int phy_10_100_features_array[4] = {
+static const int phy_10_100_features_array[4] __initconst = {
ETHTOOL_LINK_MODE_10baseT_Half_BIT,
ETHTOOL_LINK_MODE_10baseT_Full_BIT,
ETHTOOL_LINK_MODE_100baseT_Half_BIT,
ETHTOOL_LINK_MODE_100baseT_Full_BIT,
};
-EXPORT_SYMBOL_GPL(phy_10_100_features_array);
-const int phy_basic_t1_features_array[3] = {
+static const int phy_basic_t1_features_array[3] __initconst = {
ETHTOOL_LINK_MODE_TP_BIT,
ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
ETHTOOL_LINK_MODE_100baseT1_Full_BIT,
};
-EXPORT_SYMBOL_GPL(phy_basic_t1_features_array);
-const int phy_basic_t1s_p2mp_features_array[2] = {
+static const int phy_basic_t1s_p2mp_features_array[2] __initconst = {
ETHTOOL_LINK_MODE_TP_BIT,
ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT,
};
-EXPORT_SYMBOL_GPL(phy_basic_t1s_p2mp_features_array);
-const int phy_gbit_features_array[2] = {
+static const int phy_gbit_features_array[2] __initconst = {
ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
};
-EXPORT_SYMBOL_GPL(phy_gbit_features_array);
-
-const int phy_10gbit_features_array[1] = {
- ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
-};
-EXPORT_SYMBOL_GPL(phy_10gbit_features_array);
-
-static const int phy_10gbit_fec_features_array[1] = {
- ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
-};
-
-__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
-EXPORT_SYMBOL_GPL(phy_10gbit_full_features);
-
-static const int phy_10gbit_full_features_array[] = {
- ETHTOOL_LINK_MODE_10baseT_Full_BIT,
- ETHTOOL_LINK_MODE_100baseT_Full_BIT,
- ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
-};
-static const int phy_eee_cap1_features_array[] = {
+static const int phy_eee_cap1_features_array[] __initconst = {
ETHTOOL_LINK_MODE_100baseT_Full_BIT,
ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
@@ -149,7 +136,7 @@ static const int phy_eee_cap1_features_array[] = {
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap1_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_eee_cap1_features);
-static const int phy_eee_cap2_features_array[] = {
+static const int phy_eee_cap2_features_array[] __initconst = {
ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
};
@@ -157,7 +144,7 @@ static const int phy_eee_cap2_features_array[] = {
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap2_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_eee_cap2_features);
-static void features_init(void)
+static void __init features_init(void)
{
/* 10/100 half/full*/
linkmode_set_bit_array(phy_basic_ports_array,
@@ -198,20 +185,7 @@ static void features_init(void)
linkmode_set_bit_array(phy_gbit_features_array,
ARRAY_SIZE(phy_gbit_features_array),
phy_gbit_fibre_features);
- linkmode_set_bit_array(phy_fibre_port_array,
- ARRAY_SIZE(phy_fibre_port_array),
- phy_gbit_fibre_features);
-
- /* 10/100 half/full + 1000 half/full + TP/MII/FIBRE/AUI/BNC/Backplane*/
- linkmode_set_bit_array(phy_all_ports_features_array,
- ARRAY_SIZE(phy_all_ports_features_array),
- phy_gbit_all_ports_features);
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- phy_gbit_all_ports_features);
- linkmode_set_bit_array(phy_gbit_features_array,
- ARRAY_SIZE(phy_gbit_features_array),
- phy_gbit_all_ports_features);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phy_gbit_fibre_features);
/* 10/100 half/full + 1000 half/full + 10G full*/
linkmode_set_bit_array(phy_all_ports_features_array,
@@ -223,21 +197,9 @@ static void features_init(void)
linkmode_set_bit_array(phy_gbit_features_array,
ARRAY_SIZE(phy_gbit_features_array),
phy_10gbit_features);
- linkmode_set_bit_array(phy_10gbit_features_array,
- ARRAY_SIZE(phy_10gbit_features_array),
- phy_10gbit_features);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ phy_10gbit_features);
- /* 10/100/1000/10G full */
- linkmode_set_bit_array(phy_all_ports_features_array,
- ARRAY_SIZE(phy_all_ports_features_array),
- phy_10gbit_full_features);
- linkmode_set_bit_array(phy_10gbit_full_features_array,
- ARRAY_SIZE(phy_10gbit_full_features_array),
- phy_10gbit_full_features);
- /* 10G FEC only */
- linkmode_set_bit_array(phy_10gbit_fec_features_array,
- ARRAY_SIZE(phy_10gbit_fec_features_array),
- phy_10gbit_fec_features);
linkmode_set_bit_array(phy_eee_cap1_features_array,
ARRAY_SIZE(phy_eee_cap1_features_array),
phy_eee_cap1_features);
@@ -289,6 +251,55 @@ static bool phy_drv_wol_enabled(struct phy_device *phydev)
return wol.wolopts != 0;
}
+bool phy_may_wakeup(struct phy_device *phydev)
+{
+ /* If the PHY is using driver-model based wakeup, use that state. */
+ if (phy_can_wakeup(phydev))
+ return device_may_wakeup(&phydev->mdio.dev);
+
+ return phy_drv_wol_enabled(phydev);
+}
+EXPORT_SYMBOL_GPL(phy_may_wakeup);
+
+static void phy_link_change(struct phy_device *phydev, bool up)
+{
+ struct net_device *netdev = phydev->attached_dev;
+
+ if (up)
+ netif_carrier_on(netdev);
+ else
+ netif_carrier_off(netdev);
+ phydev->adjust_link(netdev);
+ if (phydev->mii_ts && phydev->mii_ts->link_state)
+ phydev->mii_ts->link_state(phydev->mii_ts, phydev);
+}
+
+/**
+ * phy_uses_state_machine - test whether consumer driver uses PAL state machine
+ * @phydev: the target PHY device structure
+ *
+ * Ultimately, this aims to indirectly determine whether the PHY is attached
+ * to a consumer which uses the state machine by calling phy_start() and
+ * phy_stop().
+ *
+ * When the PHY driver consumer uses phylib, it must have previously called
+ * phy_connect_direct() or one of its derivatives, so that phy_prepare_link()
+ * has set up a hook for monitoring state changes.
+ *
+ * When the PHY driver is used by the MAC driver consumer through phylink (the
+ * only other provider of a phy_link_change() method), using the PHY state
+ * machine is not optional.
+ *
+ * Return: true if consumer calls phy_start() and phy_stop(), false otherwise.
+ */
+static bool phy_uses_state_machine(struct phy_device *phydev)
+{
+ if (phydev->phy_link_change == phy_link_change)
+ return phydev->attached_dev && phydev->adjust_link;
+
+ return !!phydev->phy_link_change;
+}
+
static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
{
struct device_driver *drv = phydev->mdio.dev.driver;
@@ -301,7 +312,7 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
/* If the PHY on the mido bus is not attached but has WOL enabled
* we cannot suspend the PHY.
*/
- if (!netdev && phy_drv_wol_enabled(phydev))
+ if (!netdev && phy_may_wakeup(phydev))
return false;
/* PHY not attached? May suspend if the PHY has not already been
@@ -355,7 +366,7 @@ static __maybe_unused int mdio_bus_phy_suspend(struct device *dev)
* may call phy routines that try to grab the same lock, and that may
* lead to a deadlock.
*/
- if (phydev->attached_dev && phydev->adjust_link)
+ if (phy_uses_state_machine(phydev))
phy_stop_machine(phydev);
if (!mdio_bus_phy_may_suspend(phydev))
@@ -409,7 +420,7 @@ no_resume:
}
}
- if (phydev->attached_dev && phydev->adjust_link)
+ if (phy_uses_state_machine(phydev))
phy_start_machine(phydev);
return 0;
@@ -427,8 +438,8 @@ static SIMPLE_DEV_PM_OPS(mdio_bus_phy_pm_ops, mdio_bus_phy_suspend,
* comparison
* @run: The actual code to be run when a matching PHY is found
*/
-int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
- int (*run)(struct phy_device *))
+static int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
+ int (*run)(struct phy_device *))
{
struct phy_fixup *fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
@@ -446,7 +457,6 @@ int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
return 0;
}
-EXPORT_SYMBOL(phy_register_fixup);
/* Registers a fixup to be run on any PHY with the UID in phy_uid */
int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask,
@@ -549,20 +559,26 @@ static int phy_scan_fixups(struct phy_device *phydev)
return 0;
}
-static int phy_bus_match(struct device *dev, const struct device_driver *drv)
+/**
+ * genphy_match_phy_device - match a PHY device with a PHY driver
+ * @phydev: target phy_device struct
+ * @phydrv: target phy_driver struct
+ *
+ * Description: Checks whether the given PHY device matches the specified
+ * PHY driver. For Clause 45 PHYs, iterates over the available device
+ * identifiers and compares them against the driver's expected PHY ID,
+ * applying the provided mask. For Clause 22 PHYs, a direct ID comparison
+ * is performed.
+ *
+ * Return: 1 if the PHY device matches the driver, 0 otherwise.
+ */
+int genphy_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
- struct phy_device *phydev = to_phy_device(dev);
- const struct phy_driver *phydrv = to_phy_driver(drv);
- const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids);
- int i;
-
- if (!(phydrv->mdiodrv.flags & MDIO_DEVICE_IS_PHY))
- return 0;
-
- if (phydrv->match_phy_device)
- return phydrv->match_phy_device(phydev);
-
if (phydev->is_c45) {
+ const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids);
+ int i;
+
for (i = 1; i < num_ids; i++) {
if (phydev->c45_ids.device_ids[i] == 0xffffffff)
continue;
@@ -571,11 +587,27 @@ static int phy_bus_match(struct device *dev, const struct device_driver *drv)
phydrv->phy_id, phydrv->phy_id_mask))
return 1;
}
+
return 0;
- } else {
- return phy_id_compare(phydev->phy_id, phydrv->phy_id,
- phydrv->phy_id_mask);
}
+
+ return phy_id_compare(phydev->phy_id, phydrv->phy_id,
+ phydrv->phy_id_mask);
+}
+EXPORT_SYMBOL_GPL(genphy_match_phy_device);
+
+static int phy_bus_match(struct device *dev, const struct device_driver *drv)
+{
+ struct phy_device *phydev = to_phy_device(dev);
+ const struct phy_driver *phydrv = to_phy_driver(drv);
+
+ if (!(phydrv->mdiodrv.flags & MDIO_DEVICE_IS_PHY))
+ return 0;
+
+ if (phydrv->match_phy_device)
+ return phydrv->match_phy_device(phydev, phydrv);
+
+ return genphy_match_phy_device(phydev, phydrv);
}
static ssize_t
@@ -593,7 +625,7 @@ phy_interface_show(struct device *dev, struct device_attribute *attr, char *buf)
struct phy_device *phydev = to_phy_device(dev);
const char *mode = NULL;
- if (phy_is_internal(phydev))
+ if (phydev->is_internal)
mode = "internal";
else
mode = phy_modes(phydev->interface);
@@ -629,11 +661,119 @@ static struct attribute *phy_dev_attrs[] = {
&dev_attr_phy_dev_flags.attr,
NULL,
};
-ATTRIBUTE_GROUPS(phy_dev);
+
+static const struct attribute_group phy_dev_group = {
+ .attrs = phy_dev_attrs,
+};
+
+#define MMD_DEVICE_ID_ATTR(n) \
+static ssize_t mmd##n##_device_id_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct phy_device *phydev = to_phy_device(dev); \
+ return sysfs_emit(buf, "0x%.8lx\n", \
+ (unsigned long)phydev->c45_ids.device_ids[n]); \
+} \
+static DEVICE_ATTR_RO(mmd##n##_device_id)
+
+MMD_DEVICE_ID_ATTR(1);
+MMD_DEVICE_ID_ATTR(2);
+MMD_DEVICE_ID_ATTR(3);
+MMD_DEVICE_ID_ATTR(4);
+MMD_DEVICE_ID_ATTR(5);
+MMD_DEVICE_ID_ATTR(6);
+MMD_DEVICE_ID_ATTR(7);
+MMD_DEVICE_ID_ATTR(8);
+MMD_DEVICE_ID_ATTR(9);
+MMD_DEVICE_ID_ATTR(10);
+MMD_DEVICE_ID_ATTR(11);
+MMD_DEVICE_ID_ATTR(12);
+MMD_DEVICE_ID_ATTR(13);
+MMD_DEVICE_ID_ATTR(14);
+MMD_DEVICE_ID_ATTR(15);
+MMD_DEVICE_ID_ATTR(16);
+MMD_DEVICE_ID_ATTR(17);
+MMD_DEVICE_ID_ATTR(18);
+MMD_DEVICE_ID_ATTR(19);
+MMD_DEVICE_ID_ATTR(20);
+MMD_DEVICE_ID_ATTR(21);
+MMD_DEVICE_ID_ATTR(22);
+MMD_DEVICE_ID_ATTR(23);
+MMD_DEVICE_ID_ATTR(24);
+MMD_DEVICE_ID_ATTR(25);
+MMD_DEVICE_ID_ATTR(26);
+MMD_DEVICE_ID_ATTR(27);
+MMD_DEVICE_ID_ATTR(28);
+MMD_DEVICE_ID_ATTR(29);
+MMD_DEVICE_ID_ATTR(30);
+MMD_DEVICE_ID_ATTR(31);
+
+static struct attribute *phy_mmd_attrs[] = {
+ &dev_attr_mmd1_device_id.attr,
+ &dev_attr_mmd2_device_id.attr,
+ &dev_attr_mmd3_device_id.attr,
+ &dev_attr_mmd4_device_id.attr,
+ &dev_attr_mmd5_device_id.attr,
+ &dev_attr_mmd6_device_id.attr,
+ &dev_attr_mmd7_device_id.attr,
+ &dev_attr_mmd8_device_id.attr,
+ &dev_attr_mmd9_device_id.attr,
+ &dev_attr_mmd10_device_id.attr,
+ &dev_attr_mmd11_device_id.attr,
+ &dev_attr_mmd12_device_id.attr,
+ &dev_attr_mmd13_device_id.attr,
+ &dev_attr_mmd14_device_id.attr,
+ &dev_attr_mmd15_device_id.attr,
+ &dev_attr_mmd16_device_id.attr,
+ &dev_attr_mmd17_device_id.attr,
+ &dev_attr_mmd18_device_id.attr,
+ &dev_attr_mmd19_device_id.attr,
+ &dev_attr_mmd20_device_id.attr,
+ &dev_attr_mmd21_device_id.attr,
+ &dev_attr_mmd22_device_id.attr,
+ &dev_attr_mmd23_device_id.attr,
+ &dev_attr_mmd24_device_id.attr,
+ &dev_attr_mmd25_device_id.attr,
+ &dev_attr_mmd26_device_id.attr,
+ &dev_attr_mmd27_device_id.attr,
+ &dev_attr_mmd28_device_id.attr,
+ &dev_attr_mmd29_device_id.attr,
+ &dev_attr_mmd30_device_id.attr,
+ &dev_attr_mmd31_device_id.attr,
+ NULL
+};
+
+static umode_t phy_mmd_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct phy_device *phydev = to_phy_device(dev);
+ const int i = index + 1;
+
+ if (!phydev->is_c45)
+ return 0;
+ if (i >= ARRAY_SIZE(phydev->c45_ids.device_ids) ||
+ phydev->c45_ids.device_ids[i] == 0xffffffff)
+ return 0;
+
+ return attr->mode;
+}
+
+static const struct attribute_group phy_mmd_group = {
+ .name = "c45_phy_ids",
+ .attrs = phy_mmd_attrs,
+ .is_visible = phy_mmd_is_visible,
+};
+
+static const struct attribute_group *phy_device_groups[] = {
+ &phy_dev_group,
+ &phy_mmd_group,
+ NULL,
+};
static const struct device_type mdio_bus_phy_type = {
.name = "PHY",
- .groups = phy_dev_groups,
+ .groups = phy_device_groups,
.release = phy_device_release,
.pm = pm_ptr(&mdio_bus_phy_pm_ops),
};
@@ -685,8 +825,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
dev->speed = SPEED_UNKNOWN;
dev->duplex = DUPLEX_UNKNOWN;
- dev->pause = 0;
- dev->asym_pause = 0;
+ dev->pause = false;
+ dev->asym_pause = false;
dev->link = 0;
dev->port = PORT_TP;
dev->interface = PHY_INTERFACE_MODE_GMII;
@@ -1084,35 +1224,24 @@ int phy_get_c45_ids(struct phy_device *phydev)
EXPORT_SYMBOL(phy_get_c45_ids);
/**
- * phy_find_first - finds the first PHY device on the bus
+ * phy_find_next - finds the next PHY device on the bus
* @bus: the target MII bus
+ * @pos: cursor
+ *
+ * Return: next phy_device on the bus, or NULL
*/
-struct phy_device *phy_find_first(struct mii_bus *bus)
+struct phy_device *phy_find_next(struct mii_bus *bus, struct phy_device *pos)
{
- struct phy_device *phydev;
- int addr;
+ for (int addr = pos ? pos->mdio.addr + 1 : 0;
+ addr < PHY_MAX_ADDR; addr++) {
+ struct phy_device *phydev = mdiobus_get_phy(bus, addr);
- for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
- phydev = mdiobus_get_phy(bus, addr);
if (phydev)
return phydev;
}
return NULL;
}
-EXPORT_SYMBOL(phy_find_first);
-
-static void phy_link_change(struct phy_device *phydev, bool up)
-{
- struct net_device *netdev = phydev->attached_dev;
-
- if (up)
- netif_carrier_on(netdev);
- else
- netif_carrier_off(netdev);
- phydev->adjust_link(netdev);
- if (phydev->mii_ts && phydev->mii_ts->link_state)
- phydev->mii_ts->link_state(phydev->mii_ts, phydev);
-}
+EXPORT_SYMBOL_GPL(phy_find_next);
/**
* phy_prepare_link - prepares the PHY layer to monitor link status
@@ -1512,7 +1641,6 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
struct mii_bus *bus = phydev->mdio.bus;
struct device *d = &phydev->mdio.dev;
struct module *ndev_owner = NULL;
- bool using_genphy = false;
int err;
/* For Ethernet device drivers that register their own MDIO bus, we
@@ -1538,7 +1666,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
else
d->driver = &genphy_driver.mdiodrv.driver;
- using_genphy = true;
+ phydev->is_genphy_driven = 1;
}
if (!try_module_get(d->driver->owner)) {
@@ -1547,7 +1675,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
goto error_put_device;
}
- if (using_genphy) {
+ if (phydev->is_genphy_driven) {
err = d->driver->probe(d);
if (err >= 0)
err = device_bind_driver(d);
@@ -1617,7 +1745,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
* the generic PHY driver we can't figure it out, thus set the old
* legacy PORT_MII value.
*/
- if (using_genphy)
+ if (phydev->is_genphy_driven)
phydev->port = PORT_MII;
/* Initial carrier state is off as the phy is about to be
@@ -1656,6 +1784,7 @@ error:
error_module_put:
module_put(d->driver->owner);
+ phydev->is_genphy_driven = 0;
d->driver = NULL;
error_put_device:
put_device(d);
@@ -1703,273 +1832,6 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
}
EXPORT_SYMBOL(phy_attach);
-static bool phy_driver_is_genphy_kind(struct phy_device *phydev,
- struct device_driver *driver)
-{
- struct device *d = &phydev->mdio.dev;
- bool ret = false;
-
- if (!phydev->drv)
- return ret;
-
- get_device(d);
- ret = d->driver == driver;
- put_device(d);
-
- return ret;
-}
-
-bool phy_driver_is_genphy(struct phy_device *phydev)
-{
- return phy_driver_is_genphy_kind(phydev,
- &genphy_driver.mdiodrv.driver);
-}
-EXPORT_SYMBOL_GPL(phy_driver_is_genphy);
-
-bool phy_driver_is_genphy_10g(struct phy_device *phydev)
-{
- return phy_driver_is_genphy_kind(phydev,
- &genphy_c45_driver.mdiodrv.driver);
-}
-EXPORT_SYMBOL_GPL(phy_driver_is_genphy_10g);
-
-/**
- * phy_package_join - join a common PHY group
- * @phydev: target phy_device struct
- * @base_addr: cookie and base PHY address of PHY package for offset
- * calculation of global register access
- * @priv_size: if non-zero allocate this amount of bytes for private data
- *
- * This joins a PHY group and provides a shared storage for all phydevs in
- * this group. This is intended to be used for packages which contain
- * more than one PHY, for example a quad PHY transceiver.
- *
- * The base_addr parameter serves as cookie which has to have the same values
- * for all members of one group and as the base PHY address of the PHY package
- * for offset calculation to access generic registers of a PHY package.
- * Usually, one of the PHY addresses of the different PHYs in the package
- * provides access to these global registers.
- * The address which is given here, will be used in the phy_package_read()
- * and phy_package_write() convenience functions as base and added to the
- * passed offset in those functions.
- *
- * This will set the shared pointer of the phydev to the shared storage.
- * If this is the first call for a this cookie the shared storage will be
- * allocated. If priv_size is non-zero, the given amount of bytes are
- * allocated for the priv member.
- *
- * Returns < 1 on error, 0 on success. Esp. calling phy_package_join()
- * with the same cookie but a different priv_size is an error.
- */
-int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size)
-{
- struct mii_bus *bus = phydev->mdio.bus;
- struct phy_package_shared *shared;
- int ret;
-
- if (base_addr < 0 || base_addr >= PHY_MAX_ADDR)
- return -EINVAL;
-
- mutex_lock(&bus->shared_lock);
- shared = bus->shared[base_addr];
- if (!shared) {
- ret = -ENOMEM;
- shared = kzalloc(sizeof(*shared), GFP_KERNEL);
- if (!shared)
- goto err_unlock;
- if (priv_size) {
- shared->priv = kzalloc(priv_size, GFP_KERNEL);
- if (!shared->priv)
- goto err_free;
- shared->priv_size = priv_size;
- }
- shared->base_addr = base_addr;
- shared->np = NULL;
- refcount_set(&shared->refcnt, 1);
- bus->shared[base_addr] = shared;
- } else {
- ret = -EINVAL;
- if (priv_size && priv_size != shared->priv_size)
- goto err_unlock;
- refcount_inc(&shared->refcnt);
- }
- mutex_unlock(&bus->shared_lock);
-
- phydev->shared = shared;
-
- return 0;
-
-err_free:
- kfree(shared);
-err_unlock:
- mutex_unlock(&bus->shared_lock);
- return ret;
-}
-EXPORT_SYMBOL_GPL(phy_package_join);
-
-/**
- * of_phy_package_join - join a common PHY group in PHY package
- * @phydev: target phy_device struct
- * @priv_size: if non-zero allocate this amount of bytes for private data
- *
- * This is a variant of phy_package_join for PHY package defined in DT.
- *
- * The parent node of the @phydev is checked as a valid PHY package node
- * structure (by matching the node name "ethernet-phy-package") and the
- * base_addr for the PHY package is passed to phy_package_join.
- *
- * With this configuration the shared struct will also have the np value
- * filled to use additional DT defined properties in PHY specific
- * probe_once and config_init_once PHY package OPs.
- *
- * Returns < 0 on error, 0 on success. Esp. calling phy_package_join()
- * with the same cookie but a different priv_size is an error. Or a parent
- * node is not detected or is not valid or doesn't match the expected node
- * name for PHY package.
- */
-int of_phy_package_join(struct phy_device *phydev, size_t priv_size)
-{
- struct device_node *node = phydev->mdio.dev.of_node;
- struct device_node *package_node;
- u32 base_addr;
- int ret;
-
- if (!node)
- return -EINVAL;
-
- package_node = of_get_parent(node);
- if (!package_node)
- return -EINVAL;
-
- if (!of_node_name_eq(package_node, "ethernet-phy-package")) {
- ret = -EINVAL;
- goto exit;
- }
-
- if (of_property_read_u32(package_node, "reg", &base_addr)) {
- ret = -EINVAL;
- goto exit;
- }
-
- ret = phy_package_join(phydev, base_addr, priv_size);
- if (ret)
- goto exit;
-
- phydev->shared->np = package_node;
-
- return 0;
-exit:
- of_node_put(package_node);
- return ret;
-}
-EXPORT_SYMBOL_GPL(of_phy_package_join);
-
-/**
- * phy_package_leave - leave a common PHY group
- * @phydev: target phy_device struct
- *
- * This leaves a PHY group created by phy_package_join(). If this phydev
- * was the last user of the shared data between the group, this data is
- * freed. Resets the phydev->shared pointer to NULL.
- */
-void phy_package_leave(struct phy_device *phydev)
-{
- struct phy_package_shared *shared = phydev->shared;
- struct mii_bus *bus = phydev->mdio.bus;
-
- if (!shared)
- return;
-
- /* Decrease the node refcount on leave if present */
- if (shared->np)
- of_node_put(shared->np);
-
- if (refcount_dec_and_mutex_lock(&shared->refcnt, &bus->shared_lock)) {
- bus->shared[shared->base_addr] = NULL;
- mutex_unlock(&bus->shared_lock);
- kfree(shared->priv);
- kfree(shared);
- }
-
- phydev->shared = NULL;
-}
-EXPORT_SYMBOL_GPL(phy_package_leave);
-
-static void devm_phy_package_leave(struct device *dev, void *res)
-{
- phy_package_leave(*(struct phy_device **)res);
-}
-
-/**
- * devm_phy_package_join - resource managed phy_package_join()
- * @dev: device that is registering this PHY package
- * @phydev: target phy_device struct
- * @base_addr: cookie and base PHY address of PHY package for offset
- * calculation of global register access
- * @priv_size: if non-zero allocate this amount of bytes for private data
- *
- * Managed phy_package_join(). Shared storage fetched by this function,
- * phy_package_leave() is automatically called on driver detach. See
- * phy_package_join() for more information.
- */
-int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
- int base_addr, size_t priv_size)
-{
- struct phy_device **ptr;
- int ret;
-
- ptr = devres_alloc(devm_phy_package_leave, sizeof(*ptr),
- GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
-
- ret = phy_package_join(phydev, base_addr, priv_size);
-
- if (!ret) {
- *ptr = phydev;
- devres_add(dev, ptr);
- } else {
- devres_free(ptr);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(devm_phy_package_join);
-
-/**
- * devm_of_phy_package_join - resource managed of_phy_package_join()
- * @dev: device that is registering this PHY package
- * @phydev: target phy_device struct
- * @priv_size: if non-zero allocate this amount of bytes for private data
- *
- * Managed of_phy_package_join(). Shared storage fetched by this function,
- * phy_package_leave() is automatically called on driver detach. See
- * of_phy_package_join() for more information.
- */
-int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev,
- size_t priv_size)
-{
- struct phy_device **ptr;
- int ret;
-
- ptr = devres_alloc(devm_phy_package_leave, sizeof(*ptr),
- GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
-
- ret = of_phy_package_join(phydev, priv_size);
-
- if (!ret) {
- *ptr = phydev;
- devres_add(dev, ptr);
- } else {
- devres_free(ptr);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(devm_of_phy_package_join);
-
/**
* phy_detach - detach a PHY device from its network device
* @phydev: target phy_device struct
@@ -1983,8 +1845,10 @@ void phy_detach(struct phy_device *phydev)
struct module *ndev_owner = NULL;
struct mii_bus *bus;
- if (phydev->devlink)
+ if (phydev->devlink) {
device_link_del(phydev->devlink);
+ phydev->devlink = NULL;
+ }
if (phydev->sysfs_links) {
if (dev)
@@ -1998,10 +1862,21 @@ void phy_detach(struct phy_device *phydev)
phy_suspend(phydev);
if (dev) {
+ struct hwtstamp_provider *hwprov;
+
+ hwprov = rtnl_dereference(dev->hwprov);
+ /* Disable timestamp if it is the one selected */
+ if (hwprov && hwprov->phydev == phydev) {
+ rcu_assign_pointer(dev->hwprov, NULL);
+ kfree_rcu(hwprov, rcu_head);
+ }
+
phydev->attached_dev->phydev = NULL;
phydev->attached_dev = NULL;
phy_link_topo_del_phy(dev, phydev);
}
+
+ phydev->phy_link_change = NULL;
phydev->phylink = NULL;
if (!phydev->is_on_sfp_module)
@@ -2015,9 +1890,10 @@ void phy_detach(struct phy_device *phydev)
* from the generic driver so that there's a chance a
* real driver could be loaded
*/
- if (phy_driver_is_genphy(phydev) ||
- phy_driver_is_genphy_10g(phydev))
+ if (phydev->is_genphy_driven) {
device_release_driver(&phydev->mdio.dev);
+ phydev->is_genphy_driven = 0;
+ }
/* Assert the reset signal */
phy_device_reset(phydev, 1);
@@ -2045,7 +1921,7 @@ int phy_suspend(struct phy_device *phydev)
if (phydev->suspended || !phydrv)
return 0;
- phydev->wol_enabled = phy_drv_wol_enabled(phydev) ||
+ phydev->wol_enabled = phy_may_wakeup(phydev) ||
(netdev && netdev->ethtool->wol_enabled);
/* If the device has WOL enabled, we cannot suspend the PHY */
if (phydev->wol_enabled && !(phydrv->flags & PHY_ALWAYS_CALL_SUSPEND))
@@ -2092,41 +1968,6 @@ int phy_resume(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_resume);
-int phy_loopback(struct phy_device *phydev, bool enable)
-{
- int ret = 0;
-
- if (!phydev->drv)
- return -EIO;
-
- mutex_lock(&phydev->lock);
-
- if (enable && phydev->loopback_enabled) {
- ret = -EBUSY;
- goto out;
- }
-
- if (!enable && !phydev->loopback_enabled) {
- ret = -EINVAL;
- goto out;
- }
-
- if (phydev->drv->set_loopback)
- ret = phydev->drv->set_loopback(phydev, enable);
- else
- ret = genphy_loopback(phydev, enable);
-
- if (ret)
- goto out;
-
- phydev->loopback_enabled = enable;
-
-out:
- mutex_unlock(&phydev->lock);
- return ret;
-}
-EXPORT_SYMBOL(phy_loopback);
-
/**
* phy_reset_after_clk_enable - perform a PHY reset if needed
* @phydev: target phy_device struct
@@ -2240,29 +2081,6 @@ static int genphy_c37_config_advert(struct phy_device *phydev)
}
/**
- * genphy_config_eee_advert - disable unwanted eee mode advertisement
- * @phydev: target phy_device struct
- *
- * Description: Writes MDIO_AN_EEE_ADV after disabling unsupported energy
- * efficent ethernet modes. Returns 0 if the PHY's advertisement hasn't
- * changed, and 1 if it has changed.
- */
-int genphy_config_eee_advert(struct phy_device *phydev)
-{
- int err;
-
- /* Nothing to disable */
- if (!phydev->eee_broken_modes)
- return 0;
-
- err = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV,
- phydev->eee_broken_modes, 0);
- /* If the call failed, we assume that EEE is not supported */
- return err < 0 ? 0 : err;
-}
-EXPORT_SYMBOL(genphy_config_eee_advert);
-
-/**
* genphy_setup_forced - configures/forces speed/duplex from @phydev
* @phydev: target phy_device struct
*
@@ -2274,8 +2092,8 @@ int genphy_setup_forced(struct phy_device *phydev)
{
u16 ctl;
- phydev->pause = 0;
- phydev->asym_pause = 0;
+ phydev->pause = false;
+ phydev->asym_pause = false;
ctl = mii_bmcr_encode_fixed(phydev->speed, phydev->duplex);
@@ -2417,7 +2235,7 @@ EXPORT_SYMBOL(genphy_check_and_restart_aneg);
int __genphy_config_aneg(struct phy_device *phydev, bool changed)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(fixed_advert);
- const struct phy_setting *set;
+ const struct link_capabilities *c;
unsigned long *advert;
int err;
@@ -2443,10 +2261,11 @@ int __genphy_config_aneg(struct phy_device *phydev, bool changed)
} else {
linkmode_zero(fixed_advert);
- set = phy_lookup_setting(phydev->speed, phydev->duplex,
- phydev->supported, true);
- if (set)
- linkmode_set_bit(set->bit, fixed_advert);
+ c = phy_caps_lookup(phydev->speed, phydev->duplex,
+ phydev->supported, true);
+ if (c)
+ linkmode_and(fixed_advert, phydev->supported,
+ c->linkmodes);
advert = fixed_advert;
}
@@ -2681,8 +2500,8 @@ int genphy_read_status(struct phy_device *phydev)
phydev->master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED;
phydev->speed = SPEED_UNKNOWN;
phydev->duplex = DUPLEX_UNKNOWN;
- phydev->pause = 0;
- phydev->asym_pause = 0;
+ phydev->pause = false;
+ phydev->asym_pause = false;
if (phydev->is_gigabit_capable) {
err = genphy_read_master_slave(phydev);
@@ -2735,8 +2554,8 @@ int genphy_c37_read_status(struct phy_device *phydev, bool *changed)
/* Signal link has changed */
*changed = true;
phydev->duplex = DUPLEX_UNKNOWN;
- phydev->pause = 0;
- phydev->asym_pause = 0;
+ phydev->pause = false;
+ phydev->asym_pause = false;
if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
lpa = phy_read(phydev, MII_LPA);
@@ -2906,12 +2725,18 @@ int genphy_resume(struct phy_device *phydev)
}
EXPORT_SYMBOL(genphy_resume);
-int genphy_loopback(struct phy_device *phydev, bool enable)
+int genphy_loopback(struct phy_device *phydev, bool enable, int speed)
{
if (enable) {
u16 ctl = BMCR_LOOPBACK;
int ret, val;
+ if (speed == SPEED_10 || speed == SPEED_100 ||
+ speed == SPEED_1000)
+ phydev->speed = speed;
+ else if (speed)
+ return -EINVAL;
+
ctl |= mii_bmcr_encode_fixed(phydev->speed, phydev->duplex);
phy_modify(phydev, MII_BMCR, ~0, ctl);
@@ -3017,6 +2842,23 @@ void phy_support_eee(struct phy_device *phydev)
EXPORT_SYMBOL(phy_support_eee);
/**
+ * phy_disable_eee - Disable EEE for the PHY
+ * @phydev: Target phy_device struct
+ *
+ * This function is used by MAC drivers for MAC's which don't support EEE.
+ * It disables EEE on the PHY layer.
+ */
+void phy_disable_eee(struct phy_device *phydev)
+{
+ linkmode_zero(phydev->advertising_eee);
+ phydev->eee_cfg.tx_lpi_enabled = false;
+ phydev->eee_cfg.eee_enabled = false;
+ /* don't let userspace re-enable EEE advertisement */
+ linkmode_fill(phydev->eee_disabled_modes);
+}
+EXPORT_SYMBOL_GPL(phy_disable_eee);
+
+/**
* phy_support_sym_pause - Enable support of symmetrical pause
* @phydev: target phy_device struct
*
@@ -3142,19 +2984,12 @@ void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause)
EXPORT_SYMBOL(phy_get_pause);
#if IS_ENABLED(CONFIG_OF_MDIO)
-static int phy_get_int_delay_property(struct device *dev, const char *name)
+static int phy_get_u32_property(struct device *dev, const char *name, u32 *val)
{
- s32 int_delay;
- int ret;
-
- ret = device_property_read_u32(dev, name, &int_delay);
- if (ret)
- return ret;
-
- return int_delay;
+ return device_property_read_u32(dev, name, val);
}
#else
-static int phy_get_int_delay_property(struct device *dev, const char *name)
+static int phy_get_u32_property(struct device *dev, const char *name, u32 *val)
{
return -EINVAL;
}
@@ -3163,7 +2998,6 @@ static int phy_get_int_delay_property(struct device *dev, const char *name)
/**
* phy_get_internal_delay - returns the index of the internal delay
* @phydev: phy_device struct
- * @dev: pointer to the devices device struct
* @delay_values: array of delays the PHY supports
* @size: the size of the delay array
* @is_rx: boolean to indicate to get the rx internal delay
@@ -3176,15 +3010,16 @@ static int phy_get_int_delay_property(struct device *dev, const char *name)
* array then size = 0 and the value of the delay property is returned.
* Return -EINVAL if the delay is invalid or cannot be found.
*/
-s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
- const int *delay_values, int size, bool is_rx)
+s32 phy_get_internal_delay(struct phy_device *phydev, const int *delay_values,
+ int size, bool is_rx)
{
- s32 delay;
- int i;
+ struct device *dev = &phydev->mdio.dev;
+ int i, ret;
+ u32 delay;
if (is_rx) {
- delay = phy_get_int_delay_property(dev, "rx-internal-delay-ps");
- if (delay < 0 && size == 0) {
+ ret = phy_get_u32_property(dev, "rx-internal-delay-ps", &delay);
+ if (ret < 0 && size == 0) {
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
return 1;
@@ -3193,8 +3028,8 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
}
} else {
- delay = phy_get_int_delay_property(dev, "tx-internal-delay-ps");
- if (delay < 0 && size == 0) {
+ ret = phy_get_u32_property(dev, "tx-internal-delay-ps", &delay);
+ if (ret < 0 && size == 0) {
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
return 1;
@@ -3203,8 +3038,8 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
}
}
- if (delay < 0)
- return delay;
+ if (ret < 0)
+ return ret;
if (size == 0)
return delay;
@@ -3239,6 +3074,45 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
}
EXPORT_SYMBOL(phy_get_internal_delay);
+/**
+ * phy_get_tx_amplitude_gain - stores tx amplitude gain in @val
+ * @phydev: phy_device struct
+ * @dev: pointer to the devices device struct
+ * @linkmode: linkmode for which the tx amplitude gain should be retrieved
+ * @val: tx amplitude gain
+ *
+ * Returns: 0 on success, < 0 on failure
+ */
+int phy_get_tx_amplitude_gain(struct phy_device *phydev, struct device *dev,
+ enum ethtool_link_mode_bit_indices linkmode,
+ u32 *val)
+{
+ switch (linkmode) {
+ case ETHTOOL_LINK_MODE_100baseT_Full_BIT:
+ return phy_get_u32_property(dev,
+ "tx-amplitude-100base-tx-percent",
+ val);
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(phy_get_tx_amplitude_gain);
+
+/**
+ * phy_get_mac_termination - stores MAC termination in @val
+ * @phydev: phy_device struct
+ * @dev: pointer to the devices device struct
+ * @val: MAC termination
+ *
+ * Returns: 0 on success, < 0 on failure
+ */
+int phy_get_mac_termination(struct phy_device *phydev, struct device *dev,
+ u32 *val)
+{
+ return phy_get_u32_property(dev, "mac-termination-ohms", val);
+}
+EXPORT_SYMBOL_GPL(phy_get_mac_termination);
+
static int phy_led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness value)
{
@@ -3500,18 +3374,6 @@ struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode)
EXPORT_SYMBOL(fwnode_phy_find_device);
/**
- * device_phy_find_device - For the given device, get the phy_device
- * @dev: Pointer to the given device
- *
- * Refer return conditions of fwnode_phy_find_device().
- */
-struct phy_device *device_phy_find_device(struct device *dev)
-{
- return fwnode_phy_find_device(dev_fwnode(dev));
-}
-EXPORT_SYMBOL_GPL(device_phy_find_device);
-
-/**
* fwnode_get_phy_node - Get the phy_node using the named reference.
* @fwnode: Pointer to fwnode from which phy_node has to be obtained.
*
@@ -3526,12 +3388,12 @@ struct fwnode_handle *fwnode_get_phy_node(const struct fwnode_handle *fwnode)
/* Only phy-handle is used for ACPI */
phy_node = fwnode_find_reference(fwnode, "phy-handle", 0);
- if (is_acpi_node(fwnode) || !IS_ERR(phy_node))
+ if (!IS_ERR(phy_node) || is_acpi_node(fwnode))
return phy_node;
phy_node = fwnode_find_reference(fwnode, "phy", 0);
- if (IS_ERR(phy_node))
- phy_node = fwnode_find_reference(fwnode, "phy-device", 0);
- return phy_node;
+ if (!IS_ERR(phy_node))
+ return phy_node;
+ return fwnode_find_reference(fwnode, "phy-device", 0);
}
EXPORT_SYMBOL_GPL(fwnode_get_phy_node);
@@ -3609,22 +3471,21 @@ static int phy_probe(struct device *dev)
if (err)
goto out;
- /* There is no "enabled" flag. If PHY is advertising, assume it is
- * kind of enabled.
- */
- phydev->eee_enabled = !linkmode_empty(phydev->advertising_eee);
+ /* Get the EEE modes we want to prohibit. */
+ of_set_phy_eee_broken(phydev);
/* Some PHYs may advertise, by default, not support EEE modes. So,
- * we need to clean them.
+ * we need to clean them. In addition remove all disabled EEE modes.
*/
- if (phydev->eee_enabled)
- linkmode_and(phydev->advertising_eee, phydev->supported_eee,
- phydev->advertising_eee);
+ linkmode_and(phydev->advertising_eee, phydev->supported_eee,
+ phydev->advertising_eee);
+ linkmode_andnot(phydev->advertising_eee, phydev->advertising_eee,
+ phydev->eee_disabled_modes);
- /* Get the EEE modes we want to prohibit. We will ask
- * the PHY stop advertising these mode later on
+ /* There is no "enabled" flag. If PHY is advertising, assume it is
+ * kind of enabled.
*/
- of_set_phy_eee_broken(phydev);
+ phydev->eee_cfg.eee_enabled = !linkmode_empty(phydev->advertising_eee);
/* Get master/slave strap overrides */
of_set_phy_timing_role(phydev);
@@ -3654,7 +3515,7 @@ static int phy_probe(struct device *dev)
/* Get the LEDs from the device tree, and instantiate standard
* LEDs for them.
*/
- if (IS_ENABLED(CONFIG_PHYLIB_LEDS))
+ if (IS_ENABLED(CONFIG_PHYLIB_LEDS) && !phy_driver_is_genphy(phydev))
err = of_phy_leds(phydev);
out:
@@ -3671,7 +3532,7 @@ static int phy_remove(struct device *dev)
cancel_delayed_work_sync(&phydev->state_queue);
- if (IS_ENABLED(CONFIG_PHYLIB_LEDS))
+ if (IS_ENABLED(CONFIG_PHYLIB_LEDS) && !phy_driver_is_genphy(phydev))
phy_leds_unregister(phydev);
phydev->state = PHY_DOWN;
@@ -3695,7 +3556,8 @@ static int phy_remove(struct device *dev)
* @new_driver: new phy_driver to register
* @owner: module owning this PHY
*/
-int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
+static int phy_driver_register(struct phy_driver *new_driver,
+ struct module *owner)
{
int retval;
@@ -3738,7 +3600,11 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
return 0;
}
-EXPORT_SYMBOL(phy_driver_register);
+
+static void phy_driver_unregister(struct phy_driver *drv)
+{
+ driver_unregister(&drv->mdiodrv.driver);
+}
int phy_drivers_register(struct phy_driver *new_driver, int n,
struct module *owner)
@@ -3757,12 +3623,6 @@ int phy_drivers_register(struct phy_driver *new_driver, int n,
}
EXPORT_SYMBOL(phy_drivers_register);
-void phy_driver_unregister(struct phy_driver *drv)
-{
- driver_unregister(&drv->mdiodrv.driver);
-}
-EXPORT_SYMBOL(phy_driver_unregister);
-
void phy_drivers_unregister(struct phy_driver *drv, int n)
{
int i;
@@ -3796,6 +3656,8 @@ static const struct ethtool_phy_ops phy_ethtool_phy_ops = {
static const struct phylib_stubs __phylib_stubs = {
.hwtstamp_get = __phy_hwtstamp_get,
.hwtstamp_set = __phy_hwtstamp_set,
+ .get_phy_stats = __phy_ethtool_get_phy_stats,
+ .get_link_ext_stats = __phy_ethtool_get_link_ext_stats,
};
static void phylib_register_stubs(void)
@@ -3817,7 +3679,7 @@ static int __init phy_init(void)
phylib_register_stubs();
rtnl_unlock();
- rc = mdio_bus_init();
+ rc = phy_caps_init();
if (rc)
goto err_ethtool_phy_ops;
@@ -3825,7 +3687,7 @@ static int __init phy_init(void)
rc = phy_driver_register(&genphy_c45_driver, THIS_MODULE);
if (rc)
- goto err_mdio_bus;
+ goto err_ethtool_phy_ops;
rc = phy_driver_register(&genphy_driver, THIS_MODULE);
if (rc)
@@ -3835,8 +3697,6 @@ static int __init phy_init(void)
err_c45:
phy_driver_unregister(&genphy_c45_driver);
-err_mdio_bus:
- mdio_bus_exit();
err_ethtool_phy_ops:
rtnl_lock();
phylib_unregister_stubs();
@@ -3850,7 +3710,6 @@ static void __exit phy_exit(void)
{
phy_driver_unregister(&genphy_c45_driver);
phy_driver_unregister(&genphy_driver);
- mdio_bus_exit();
rtnl_lock();
phylib_unregister_stubs();
ethtool_set_ethtool_phy_ops(NULL);
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
index f550576eb9da..60893691d4c3 100644
--- a/drivers/net/phy/phy_led_triggers.c
+++ b/drivers/net/phy/phy_led_triggers.c
@@ -5,6 +5,8 @@
#include <linux/phy_led_triggers.h>
#include <linux/netdevice.h>
+#include "phylib-internal.h"
+
static struct phy_led_trigger *phy_speed_to_led_trigger(struct phy_device *phy,
unsigned int speed)
{
@@ -91,9 +93,8 @@ int phy_led_triggers_register(struct phy_device *phy)
if (!phy->phy_num_led_triggers)
return 0;
- phy->led_link_trigger = devm_kzalloc(&phy->mdio.dev,
- sizeof(*phy->led_link_trigger),
- GFP_KERNEL);
+ phy->led_link_trigger = kzalloc(sizeof(*phy->led_link_trigger),
+ GFP_KERNEL);
if (!phy->led_link_trigger) {
err = -ENOMEM;
goto out_clear;
@@ -103,10 +104,9 @@ int phy_led_triggers_register(struct phy_device *phy)
if (err)
goto out_free_link;
- phy->phy_led_triggers = devm_kcalloc(&phy->mdio.dev,
- phy->phy_num_led_triggers,
- sizeof(struct phy_led_trigger),
- GFP_KERNEL);
+ phy->phy_led_triggers = kcalloc(phy->phy_num_led_triggers,
+ sizeof(struct phy_led_trigger),
+ GFP_KERNEL);
if (!phy->phy_led_triggers) {
err = -ENOMEM;
goto out_unreg_link;
@@ -127,11 +127,11 @@ int phy_led_triggers_register(struct phy_device *phy)
out_unreg:
while (i--)
phy_led_trigger_unregister(&phy->phy_led_triggers[i]);
- devm_kfree(&phy->mdio.dev, phy->phy_led_triggers);
+ kfree(phy->phy_led_triggers);
out_unreg_link:
phy_led_trigger_unregister(phy->led_link_trigger);
out_free_link:
- devm_kfree(&phy->mdio.dev, phy->led_link_trigger);
+ kfree(phy->led_link_trigger);
phy->led_link_trigger = NULL;
out_clear:
phy->phy_num_led_triggers = 0;
@@ -145,8 +145,13 @@ void phy_led_triggers_unregister(struct phy_device *phy)
for (i = 0; i < phy->phy_num_led_triggers; i++)
phy_led_trigger_unregister(&phy->phy_led_triggers[i]);
+ kfree(phy->phy_led_triggers);
+ phy->phy_led_triggers = NULL;
- if (phy->led_link_trigger)
+ if (phy->led_link_trigger) {
phy_led_trigger_unregister(phy->led_link_trigger);
+ kfree(phy->led_link_trigger);
+ phy->led_link_trigger = NULL;
+ }
}
EXPORT_SYMBOL_GPL(phy_led_triggers_unregister);
diff --git a/drivers/net/phy/phy_link_topology.c b/drivers/net/phy/phy_link_topology.c
index 4a5d73002a1a..0e9e987f37dd 100644
--- a/drivers/net/phy/phy_link_topology.c
+++ b/drivers/net/phy/phy_link_topology.c
@@ -73,7 +73,7 @@ int phy_link_topo_add_phy(struct net_device *dev,
xa_limit_32b, &topo->next_phy_index,
GFP_KERNEL);
- if (ret)
+ if (ret < 0)
goto err;
return 0;
diff --git a/drivers/net/phy/phy_package.c b/drivers/net/phy/phy_package.c
new file mode 100644
index 000000000000..3024da0bbf7b
--- /dev/null
+++ b/drivers/net/phy/phy_package.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PHY package support
+ */
+
+#include <linux/of.h>
+#include <linux/phy.h>
+
+#include "phylib.h"
+#include "phylib-internal.h"
+
+/**
+ * struct phy_package_shared - Shared information in PHY packages
+ * @base_addr: Base PHY address of PHY package used to combine PHYs
+ * in one package and for offset calculation of phy_package_read/write
+ * @np: Pointer to the Device Node if PHY package defined in DT
+ * @refcnt: Number of PHYs connected to this shared data
+ * @flags: Initialization of PHY package
+ * @priv_size: Size of the shared private data @priv
+ * @priv: Driver private data shared across a PHY package
+ *
+ * Represents a shared structure between different phydev's in the same
+ * package, for example a quad PHY. See phy_package_join() and
+ * phy_package_leave().
+ */
+struct phy_package_shared {
+ u8 base_addr;
+ /* With PHY package defined in DT this points to the PHY package node */
+ struct device_node *np;
+ refcount_t refcnt;
+ unsigned long flags;
+ size_t priv_size;
+
+ /* private data pointer */
+ /* note that this pointer is shared between different phydevs and
+ * the user has to take care of appropriate locking. It is allocated
+ * and freed automatically by phy_package_join() and
+ * phy_package_leave().
+ */
+ void *priv;
+};
+
+struct device_node *phy_package_get_node(struct phy_device *phydev)
+{
+ return phydev->shared->np;
+}
+EXPORT_SYMBOL_GPL(phy_package_get_node);
+
+void *phy_package_get_priv(struct phy_device *phydev)
+{
+ return phydev->shared->priv;
+}
+EXPORT_SYMBOL_GPL(phy_package_get_priv);
+
+static int phy_package_address(struct phy_device *phydev,
+ unsigned int addr_offset)
+{
+ struct phy_package_shared *shared = phydev->shared;
+ u8 base_addr = shared->base_addr;
+
+ if (addr_offset >= PHY_MAX_ADDR - base_addr)
+ return -EIO;
+
+ /* we know that addr will be in the range 0..31 and thus the
+ * implicit cast to a signed int is not a problem.
+ */
+ return base_addr + addr_offset;
+}
+
+int __phy_package_read(struct phy_device *phydev, unsigned int addr_offset,
+ u32 regnum)
+{
+ int addr = phy_package_address(phydev, addr_offset);
+
+ if (addr < 0)
+ return addr;
+
+ return __mdiobus_read(phydev->mdio.bus, addr, regnum);
+}
+EXPORT_SYMBOL_GPL(__phy_package_read);
+
+int __phy_package_write(struct phy_device *phydev, unsigned int addr_offset,
+ u32 regnum, u16 val)
+{
+ int addr = phy_package_address(phydev, addr_offset);
+
+ if (addr < 0)
+ return addr;
+
+ return __mdiobus_write(phydev->mdio.bus, addr, regnum, val);
+}
+EXPORT_SYMBOL_GPL(__phy_package_write);
+
+/**
+ * __phy_package_read_mmd - read MMD reg relative to PHY package base addr
+ * @phydev: The phy_device struct
+ * @addr_offset: The offset to be added to PHY package base_addr
+ * @devad: The MMD to read from
+ * @regnum: The register on the MMD to read
+ *
+ * Convenience helper for reading a register of an MMD on a given PHY
+ * using the PHY package base address. The base address is added to
+ * the addr_offset value.
+ *
+ * Same calling rules as for __phy_read();
+ *
+ * NOTE: It's assumed that the entire PHY package is either C22 or C45.
+ */
+int __phy_package_read_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum)
+{
+ int addr = phy_package_address(phydev, addr_offset);
+
+ if (addr < 0)
+ return addr;
+
+ if (regnum > (u16)~0 || devad > 32)
+ return -EINVAL;
+
+ return mmd_phy_read(phydev->mdio.bus, addr, phydev->is_c45, devad,
+ regnum);
+}
+EXPORT_SYMBOL(__phy_package_read_mmd);
+
+/**
+ * __phy_package_write_mmd - write MMD reg relative to PHY package base addr
+ * @phydev: The phy_device struct
+ * @addr_offset: The offset to be added to PHY package base_addr
+ * @devad: The MMD to write to
+ * @regnum: The register on the MMD to write
+ * @val: value to write to @regnum
+ *
+ * Convenience helper for writing a register of an MMD on a given PHY
+ * using the PHY package base address. The base address is added to
+ * the addr_offset value.
+ *
+ * Same calling rules as for __phy_write();
+ *
+ * NOTE: It's assumed that the entire PHY package is either C22 or C45.
+ */
+int __phy_package_write_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum, u16 val)
+{
+ int addr = phy_package_address(phydev, addr_offset);
+
+ if (addr < 0)
+ return addr;
+
+ if (regnum > (u16)~0 || devad > 32)
+ return -EINVAL;
+
+ return mmd_phy_write(phydev->mdio.bus, addr, phydev->is_c45, devad,
+ regnum, val);
+}
+EXPORT_SYMBOL(__phy_package_write_mmd);
+
+static bool __phy_package_set_once(struct phy_device *phydev, unsigned int b)
+{
+ struct phy_package_shared *shared = phydev->shared;
+
+ if (!shared)
+ return false;
+
+ return !test_and_set_bit(b, &shared->flags);
+}
+
+bool phy_package_init_once(struct phy_device *phydev)
+{
+ return __phy_package_set_once(phydev, 0);
+}
+EXPORT_SYMBOL_GPL(phy_package_init_once);
+
+bool phy_package_probe_once(struct phy_device *phydev)
+{
+ return __phy_package_set_once(phydev, 1);
+}
+EXPORT_SYMBOL_GPL(phy_package_probe_once);
+
+/**
+ * phy_package_join - join a common PHY group
+ * @phydev: target phy_device struct
+ * @base_addr: cookie and base PHY address of PHY package for offset
+ * calculation of global register access
+ * @priv_size: if non-zero allocate this amount of bytes for private data
+ *
+ * This joins a PHY group and provides a shared storage for all phydevs in
+ * this group. This is intended to be used for packages which contain
+ * more than one PHY, for example a quad PHY transceiver.
+ *
+ * The base_addr parameter serves as cookie which has to have the same values
+ * for all members of one group and as the base PHY address of the PHY package
+ * for offset calculation to access generic registers of a PHY package.
+ * Usually, one of the PHY addresses of the different PHYs in the package
+ * provides access to these global registers.
+ * The address which is given here, will be used in the phy_package_read()
+ * and phy_package_write() convenience functions as base and added to the
+ * passed offset in those functions.
+ *
+ * This will set the shared pointer of the phydev to the shared storage.
+ * If this is the first call for a this cookie the shared storage will be
+ * allocated. If priv_size is non-zero, the given amount of bytes are
+ * allocated for the priv member.
+ *
+ * Returns < 1 on error, 0 on success. Esp. calling phy_package_join()
+ * with the same cookie but a different priv_size is an error.
+ */
+int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size)
+{
+ struct mii_bus *bus = phydev->mdio.bus;
+ struct phy_package_shared *shared;
+ int ret;
+
+ if (base_addr < 0 || base_addr >= PHY_MAX_ADDR)
+ return -EINVAL;
+
+ mutex_lock(&bus->shared_lock);
+ shared = bus->shared[base_addr];
+ if (!shared) {
+ ret = -ENOMEM;
+ shared = kzalloc(sizeof(*shared), GFP_KERNEL);
+ if (!shared)
+ goto err_unlock;
+ if (priv_size) {
+ shared->priv = kzalloc(priv_size, GFP_KERNEL);
+ if (!shared->priv)
+ goto err_free;
+ shared->priv_size = priv_size;
+ }
+ shared->base_addr = base_addr;
+ shared->np = NULL;
+ refcount_set(&shared->refcnt, 1);
+ bus->shared[base_addr] = shared;
+ } else {
+ ret = -EINVAL;
+ if (priv_size && priv_size != shared->priv_size)
+ goto err_unlock;
+ refcount_inc(&shared->refcnt);
+ }
+ mutex_unlock(&bus->shared_lock);
+
+ phydev->shared = shared;
+
+ return 0;
+
+err_free:
+ kfree(shared);
+err_unlock:
+ mutex_unlock(&bus->shared_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_package_join);
+
+/**
+ * of_phy_package_join - join a common PHY group in PHY package
+ * @phydev: target phy_device struct
+ * @priv_size: if non-zero allocate this amount of bytes for private data
+ *
+ * This is a variant of phy_package_join for PHY package defined in DT.
+ *
+ * The parent node of the @phydev is checked as a valid PHY package node
+ * structure (by matching the node name "ethernet-phy-package") and the
+ * base_addr for the PHY package is passed to phy_package_join.
+ *
+ * With this configuration the shared struct will also have the np value
+ * filled to use additional DT defined properties in PHY specific
+ * probe_once and config_init_once PHY package OPs.
+ *
+ * Returns < 0 on error, 0 on success. Esp. calling phy_package_join()
+ * with the same cookie but a different priv_size is an error. Or a parent
+ * node is not detected or is not valid or doesn't match the expected node
+ * name for PHY package.
+ */
+int of_phy_package_join(struct phy_device *phydev, size_t priv_size)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct device_node *package_node;
+ u32 base_addr;
+ int ret;
+
+ if (!node)
+ return -EINVAL;
+
+ package_node = of_get_parent(node);
+ if (!package_node)
+ return -EINVAL;
+
+ if (!of_node_name_eq(package_node, "ethernet-phy-package")) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (of_property_read_u32(package_node, "reg", &base_addr)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = phy_package_join(phydev, base_addr, priv_size);
+ if (ret)
+ goto exit;
+
+ phydev->shared->np = package_node;
+
+ return 0;
+exit:
+ of_node_put(package_node);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_phy_package_join);
+
+/**
+ * phy_package_leave - leave a common PHY group
+ * @phydev: target phy_device struct
+ *
+ * This leaves a PHY group created by phy_package_join(). If this phydev
+ * was the last user of the shared data between the group, this data is
+ * freed. Resets the phydev->shared pointer to NULL.
+ */
+void phy_package_leave(struct phy_device *phydev)
+{
+ struct phy_package_shared *shared = phydev->shared;
+ struct mii_bus *bus = phydev->mdio.bus;
+
+ if (!shared)
+ return;
+
+ /* Decrease the node refcount on leave if present */
+ if (shared->np)
+ of_node_put(shared->np);
+
+ if (refcount_dec_and_mutex_lock(&shared->refcnt, &bus->shared_lock)) {
+ bus->shared[shared->base_addr] = NULL;
+ mutex_unlock(&bus->shared_lock);
+ kfree(shared->priv);
+ kfree(shared);
+ }
+
+ phydev->shared = NULL;
+}
+EXPORT_SYMBOL_GPL(phy_package_leave);
+
+static void devm_phy_package_leave(struct device *dev, void *res)
+{
+ phy_package_leave(*(struct phy_device **)res);
+}
+
+/**
+ * devm_phy_package_join - resource managed phy_package_join()
+ * @dev: device that is registering this PHY package
+ * @phydev: target phy_device struct
+ * @base_addr: cookie and base PHY address of PHY package for offset
+ * calculation of global register access
+ * @priv_size: if non-zero allocate this amount of bytes for private data
+ *
+ * Managed phy_package_join(). Shared storage fetched by this function,
+ * phy_package_leave() is automatically called on driver detach. See
+ * phy_package_join() for more information.
+ */
+int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
+ int base_addr, size_t priv_size)
+{
+ struct phy_device **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_phy_package_leave, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = phy_package_join(phydev, base_addr, priv_size);
+
+ if (!ret) {
+ *ptr = phydev;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_phy_package_join);
+
+/**
+ * devm_of_phy_package_join - resource managed of_phy_package_join()
+ * @dev: device that is registering this PHY package
+ * @phydev: target phy_device struct
+ * @priv_size: if non-zero allocate this amount of bytes for private data
+ *
+ * Managed of_phy_package_join(). Shared storage fetched by this function,
+ * phy_package_leave() is automatically called on driver detach. See
+ * of_phy_package_join() for more information.
+ */
+int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev,
+ size_t priv_size)
+{
+ struct phy_device **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_phy_package_leave, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = of_phy_package_join(phydev, priv_size);
+
+ if (!ret) {
+ *ptr = phydev;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_of_phy_package_join);
+
+MODULE_DESCRIPTION("PHY package support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/phylib-internal.h b/drivers/net/phy/phylib-internal.h
new file mode 100644
index 000000000000..ebda74eb60a5
--- /dev/null
+++ b/drivers/net/phy/phylib-internal.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * phylib-internal header
+ */
+
+#ifndef __PHYLIB_INTERNAL_H
+#define __PHYLIB_INTERNAL_H
+
+struct phy_device;
+struct mii_bus;
+
+/*
+ * phy_supported_speeds - return all speeds currently supported by a PHY device
+ */
+unsigned int phy_supported_speeds(struct phy_device *phy,
+ unsigned int *speeds,
+ unsigned int size);
+void of_set_phy_supported(struct phy_device *phydev);
+void of_set_phy_eee_broken(struct phy_device *phydev);
+void of_set_phy_timing_role(struct phy_device *phydev);
+int phy_speed_down_core(struct phy_device *phydev);
+void phy_check_downshift(struct phy_device *phydev);
+
+int mmd_phy_read(struct mii_bus *bus, int phy_addr, bool is_c45,
+ int devad, u32 regnum);
+int mmd_phy_write(struct mii_bus *bus, int phy_addr, bool is_c45,
+ int devad, u32 regnum, u16 val);
+
+int genphy_c45_read_eee_adv(struct phy_device *phydev, unsigned long *adv);
+
+#endif /* __PHYLIB_INTERNAL_H */
diff --git a/drivers/net/phy/phylib.h b/drivers/net/phy/phylib.h
new file mode 100644
index 000000000000..c15484a805b3
--- /dev/null
+++ b/drivers/net/phy/phylib.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * phylib header
+ */
+
+#ifndef __PHYLIB_H
+#define __PHYLIB_H
+
+struct device_node;
+struct phy_device;
+
+struct device_node *phy_package_get_node(struct phy_device *phydev);
+void *phy_package_get_priv(struct phy_device *phydev);
+int __phy_package_read(struct phy_device *phydev, unsigned int addr_offset,
+ u32 regnum);
+int __phy_package_write(struct phy_device *phydev, unsigned int addr_offset,
+ u32 regnum, u16 val);
+int __phy_package_read_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum);
+int __phy_package_write_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum, u16 val);
+bool phy_package_init_once(struct phy_device *phydev);
+bool phy_package_probe_once(struct phy_device *phydev);
+int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size);
+int of_phy_package_join(struct phy_device *phydev, size_t priv_size);
+void phy_package_leave(struct phy_device *phydev);
+int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
+ int base_addr, size_t priv_size);
+int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev,
+ size_t priv_size);
+
+#endif /* __PHYLIB_H */
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 6ca7ea970f51..43d8380aaefb 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -20,16 +20,10 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
+#include "phy-caps.h"
#include "sfp.h"
#include "swphy.h"
-#define SUPPORTED_INTERFACES \
- (SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE | \
- SUPPORTED_BNC | SUPPORTED_AUI | SUPPORTED_Backplane)
-#define ADVERTISED_INTERFACES \
- (ADVERTISED_TP | ADVERTISED_MII | ADVERTISED_FIBRE | \
- ADVERTISED_BNC | ADVERTISED_AUI | ADVERTISED_Backplane)
-
enum {
PHYLINK_DISABLE_STOPPED,
PHYLINK_DISABLE_LINK,
@@ -56,9 +50,11 @@ struct phylink {
struct phy_device *phydev;
phy_interface_t link_interface; /* PHY_INTERFACE_xxx */
u8 cfg_link_an_mode; /* MLO_AN_xxx */
- u8 cur_link_an_mode;
+ u8 req_link_an_mode; /* Requested MLO_AN_xxx mode */
+ u8 act_link_an_mode; /* Active MLO_AN_xxx mode */
u8 link_port; /* The current non-phy ethtool port */
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_lpi);
/* The link configuration settings */
struct phylink_link_state link_config;
@@ -69,22 +65,37 @@ struct phylink {
struct gpio_desc *link_gpio;
unsigned int link_irq;
struct timer_list link_poll;
- void (*get_fixed_state)(struct net_device *dev,
- struct phylink_link_state *s);
struct mutex state_mutex;
+ /* Serialize updates to pl->phydev with phylink_resolve() */
+ struct mutex phydev_mutex;
struct phylink_link_state phy_state;
+ unsigned int phy_ib_mode;
struct work_struct resolve;
unsigned int pcs_neg_mode;
unsigned int pcs_state;
- bool mac_link_dropped;
+ bool link_failed;
+ bool suspend_link_up;
+ bool major_config_failed;
+ bool mac_supports_eee_ops;
+ bool mac_supports_eee;
+ bool phy_enable_tx_lpi;
+ bool mac_enable_tx_lpi;
+ bool mac_tx_clk_stop;
+ u32 mac_tx_lpi_timer;
+ u8 mac_rx_clk_stop_blocked;
struct sfp_bus *sfp_bus;
bool sfp_may_have_phy;
DECLARE_PHY_INTERFACE_MASK(sfp_interfaces);
__ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
u8 sfp_port;
+
+ struct eee_config eee_cfg;
+
+ u32 wolopts_mac;
+ u8 wol_sopass[SOPASS_MAX];
};
#define phylink_printk(level, pl, fmt, ...) \
@@ -121,6 +132,9 @@ do { \
#endif
static const phy_interface_t phylink_sfp_interface_preference[] = {
+ PHY_INTERFACE_MODE_100GBASEP,
+ PHY_INTERFACE_MODE_50GBASER,
+ PHY_INTERFACE_MODE_LAUI,
PHY_INTERFACE_MODE_25GBASER,
PHY_INTERFACE_MODE_USXGMII,
PHY_INTERFACE_MODE_10GBASER,
@@ -174,6 +188,24 @@ static const char *phylink_an_mode_str(unsigned int mode)
return mode < ARRAY_SIZE(modestr) ? modestr[mode] : "unknown";
}
+static const char *phylink_pcs_mode_str(unsigned int mode)
+{
+ if (!mode)
+ return "none";
+
+ if (mode & PHYLINK_PCS_NEG_OUTBAND)
+ return "outband";
+
+ if (mode & PHYLINK_PCS_NEG_INBAND) {
+ if (mode & PHYLINK_PCS_NEG_ENABLED)
+ return "inband,an-enabled";
+ else
+ return "inband,an-disabled";
+ }
+
+ return "unknown";
+}
+
static unsigned int phylink_interface_signal_rate(phy_interface_t interface)
{
switch (interface) {
@@ -210,6 +242,7 @@ static int phylink_interface_max_speed(phy_interface_t interface)
case PHY_INTERFACE_MODE_SMII:
case PHY_INTERFACE_MODE_REVMII:
case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_MIILITE:
return SPEED_100;
case PHY_INTERFACE_MODE_TBI:
@@ -250,6 +283,13 @@ static int phylink_interface_max_speed(phy_interface_t interface)
case PHY_INTERFACE_MODE_XLGMII:
return SPEED_40000;
+ case PHY_INTERFACE_MODE_50GBASER:
+ case PHY_INTERFACE_MODE_LAUI:
+ return SPEED_50000;
+
+ case PHY_INTERFACE_MODE_100GBASEP:
+ return SPEED_100000;
+
case PHY_INTERFACE_MODE_INTERNAL:
case PHY_INTERFACE_MODE_NA:
case PHY_INTERFACE_MODE_MAX:
@@ -262,6 +302,61 @@ static int phylink_interface_max_speed(phy_interface_t interface)
return SPEED_UNKNOWN;
}
+static struct {
+ unsigned long mask;
+ int speed;
+ unsigned int duplex;
+ unsigned int caps_bit;
+} phylink_caps_params[] = {
+ { MAC_400000FD, SPEED_400000, DUPLEX_FULL, BIT(LINK_CAPA_400000FD) },
+ { MAC_200000FD, SPEED_200000, DUPLEX_FULL, BIT(LINK_CAPA_200000FD) },
+ { MAC_100000FD, SPEED_100000, DUPLEX_FULL, BIT(LINK_CAPA_100000FD) },
+ { MAC_56000FD, SPEED_56000, DUPLEX_FULL, BIT(LINK_CAPA_56000FD) },
+ { MAC_50000FD, SPEED_50000, DUPLEX_FULL, BIT(LINK_CAPA_50000FD) },
+ { MAC_40000FD, SPEED_40000, DUPLEX_FULL, BIT(LINK_CAPA_40000FD) },
+ { MAC_25000FD, SPEED_25000, DUPLEX_FULL, BIT(LINK_CAPA_25000FD) },
+ { MAC_20000FD, SPEED_20000, DUPLEX_FULL, BIT(LINK_CAPA_20000FD) },
+ { MAC_10000FD, SPEED_10000, DUPLEX_FULL, BIT(LINK_CAPA_10000FD) },
+ { MAC_5000FD, SPEED_5000, DUPLEX_FULL, BIT(LINK_CAPA_5000FD) },
+ { MAC_2500FD, SPEED_2500, DUPLEX_FULL, BIT(LINK_CAPA_2500FD) },
+ { MAC_1000FD, SPEED_1000, DUPLEX_FULL, BIT(LINK_CAPA_1000FD) },
+ { MAC_1000HD, SPEED_1000, DUPLEX_HALF, BIT(LINK_CAPA_1000HD) },
+ { MAC_100FD, SPEED_100, DUPLEX_FULL, BIT(LINK_CAPA_100FD) },
+ { MAC_100HD, SPEED_100, DUPLEX_HALF, BIT(LINK_CAPA_100HD) },
+ { MAC_10FD, SPEED_10, DUPLEX_FULL, BIT(LINK_CAPA_10FD) },
+ { MAC_10HD, SPEED_10, DUPLEX_HALF, BIT(LINK_CAPA_10HD) },
+};
+
+/**
+ * phylink_caps_to_link_caps() - Convert a set of MAC capabilities LINK caps
+ * @caps: A set of MAC capabilities
+ *
+ * Returns: The corresponding set of LINK_CAPA as defined in phy-caps.h
+ */
+static unsigned long phylink_caps_to_link_caps(unsigned long caps)
+{
+ unsigned long link_caps = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(phylink_caps_params); i++)
+ if (caps & phylink_caps_params[i].mask)
+ link_caps |= phylink_caps_params[i].caps_bit;
+
+ return link_caps;
+}
+
+static unsigned long phylink_link_caps_to_mac_caps(unsigned long link_caps)
+{
+ unsigned long caps = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(phylink_caps_params); i++)
+ if (link_caps & phylink_caps_params[i].caps_bit)
+ caps |= phylink_caps_params[i].mask;
+
+ return caps;
+}
+
/**
* phylink_caps_to_linkmodes() - Convert capabilities to ethtool link modes
* @linkmodes: ethtool linkmode mask (must be already initialised)
@@ -273,172 +368,17 @@ static int phylink_interface_max_speed(phy_interface_t interface)
static void phylink_caps_to_linkmodes(unsigned long *linkmodes,
unsigned long caps)
{
+ unsigned long link_caps = phylink_caps_to_link_caps(caps);
+
if (caps & MAC_SYM_PAUSE)
__set_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes);
if (caps & MAC_ASYM_PAUSE)
__set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, linkmodes);
- if (caps & MAC_10HD) {
- __set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10baseT1S_Half_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT, linkmodes);
- }
-
- if (caps & MAC_10FD) {
- __set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10baseT1S_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_100HD) {
- __set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100baseFX_Half_BIT, linkmodes);
- }
-
- if (caps & MAC_100FD) {
- __set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100baseFX_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_1000HD)
- __set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, linkmodes);
-
- if (caps & MAC_1000FD) {
- __set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_1000baseT1_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_2500FD) {
- __set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_5000FD)
- __set_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, linkmodes);
-
- if (caps & MAC_10000FD) {
- __set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10000baseER_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_25000FD) {
- __set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_40000FD) {
- __set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_50000FD) {
- __set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
- linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_56000FD) {
- __set_bit(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_100000FD) {
- __set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
- linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
- linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT,
- linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseCR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseDR_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_200000FD) {
- __set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
- linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT,
- linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_400000FD) {
- __set_bit(ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
- linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT,
- linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT, linkmodes);
- }
+ phy_caps_linkmodes(link_caps, linkmodes);
}
-static struct {
- unsigned long mask;
- int speed;
- unsigned int duplex;
-} phylink_caps_params[] = {
- { MAC_400000FD, SPEED_400000, DUPLEX_FULL },
- { MAC_200000FD, SPEED_200000, DUPLEX_FULL },
- { MAC_100000FD, SPEED_100000, DUPLEX_FULL },
- { MAC_56000FD, SPEED_56000, DUPLEX_FULL },
- { MAC_50000FD, SPEED_50000, DUPLEX_FULL },
- { MAC_40000FD, SPEED_40000, DUPLEX_FULL },
- { MAC_25000FD, SPEED_25000, DUPLEX_FULL },
- { MAC_20000FD, SPEED_20000, DUPLEX_FULL },
- { MAC_10000FD, SPEED_10000, DUPLEX_FULL },
- { MAC_5000FD, SPEED_5000, DUPLEX_FULL },
- { MAC_2500FD, SPEED_2500, DUPLEX_FULL },
- { MAC_1000FD, SPEED_1000, DUPLEX_FULL },
- { MAC_1000HD, SPEED_1000, DUPLEX_HALF },
- { MAC_100FD, SPEED_100, DUPLEX_FULL },
- { MAC_100HD, SPEED_100, DUPLEX_HALF },
- { MAC_10FD, SPEED_10, DUPLEX_FULL },
- { MAC_10HD, SPEED_10, DUPLEX_HALF },
-};
-
/**
* phylink_limit_mac_speed - limit the phylink_config to a maximum speed
* @config: pointer to a &struct phylink_config
@@ -494,86 +434,12 @@ static unsigned long phylink_get_capabilities(phy_interface_t interface,
unsigned long mac_capabilities,
int rate_matching)
{
+ unsigned long link_caps = phy_caps_from_interface(interface);
int max_speed = phylink_interface_max_speed(interface);
unsigned long caps = MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
unsigned long matched_caps = 0;
- switch (interface) {
- case PHY_INTERFACE_MODE_USXGMII:
- caps |= MAC_10000FD | MAC_5000FD;
- fallthrough;
-
- case PHY_INTERFACE_MODE_10G_QXGMII:
- caps |= MAC_2500FD;
- fallthrough;
-
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_PSGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_QUSGMII:
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_GMII:
- caps |= MAC_1000HD | MAC_1000FD;
- fallthrough;
-
- case PHY_INTERFACE_MODE_REVRMII:
- case PHY_INTERFACE_MODE_RMII:
- case PHY_INTERFACE_MODE_SMII:
- case PHY_INTERFACE_MODE_REVMII:
- case PHY_INTERFACE_MODE_MII:
- caps |= MAC_10HD | MAC_10FD;
- fallthrough;
-
- case PHY_INTERFACE_MODE_100BASEX:
- caps |= MAC_100HD | MAC_100FD;
- break;
-
- case PHY_INTERFACE_MODE_TBI:
- case PHY_INTERFACE_MODE_MOCA:
- case PHY_INTERFACE_MODE_RTBI:
- case PHY_INTERFACE_MODE_1000BASEX:
- caps |= MAC_1000HD;
- fallthrough;
- case PHY_INTERFACE_MODE_1000BASEKX:
- case PHY_INTERFACE_MODE_TRGMII:
- caps |= MAC_1000FD;
- break;
-
- case PHY_INTERFACE_MODE_2500BASEX:
- caps |= MAC_2500FD;
- break;
-
- case PHY_INTERFACE_MODE_5GBASER:
- caps |= MAC_5000FD;
- break;
-
- case PHY_INTERFACE_MODE_XGMII:
- case PHY_INTERFACE_MODE_RXAUI:
- case PHY_INTERFACE_MODE_XAUI:
- case PHY_INTERFACE_MODE_10GBASER:
- case PHY_INTERFACE_MODE_10GKR:
- caps |= MAC_10000FD;
- break;
-
- case PHY_INTERFACE_MODE_25GBASER:
- caps |= MAC_25000FD;
- break;
-
- case PHY_INTERFACE_MODE_XLGMII:
- caps |= MAC_40000FD;
- break;
-
- case PHY_INTERFACE_MODE_INTERNAL:
- caps |= ~0;
- break;
-
- case PHY_INTERFACE_MODE_NA:
- case PHY_INTERFACE_MODE_MAX:
- break;
- }
+ caps |= phylink_link_caps_to_mac_caps(link_caps);
switch (rate_matching) {
case RATE_MATCH_OPEN_LOOP:
@@ -671,6 +537,17 @@ static int phylink_validate_mac_and_pcs(struct phylink *pl,
return -EINVAL;
}
+ /* Ensure that this PCS supports the interface which the MAC
+ * returned it for. It is an error for the MAC to return a PCS
+ * that does not support the interface mode.
+ */
+ if (!phy_interface_empty(pcs->supported_interfaces) &&
+ !test_bit(state->interface, pcs->supported_interfaces)) {
+ phylink_err(pl, "MAC returned PCS which does not support %s\n",
+ phy_modes(state->interface));
+ return -EINVAL;
+ }
+
/* Validate the link parameters with the PCS */
if (pcs->ops->pcs_validate) {
ret = pcs->ops->pcs_validate(pcs, supported, state);
@@ -761,12 +638,29 @@ static int phylink_validate(struct phylink *pl, unsigned long *supported,
return phylink_validate_mac_and_pcs(pl, supported, state);
}
+static void phylink_fill_fixedlink_supported(unsigned long *supported)
+{
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, supported);
+}
+
static int phylink_parse_fixedlink(struct phylink *pl,
const struct fwnode_handle *fwnode)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(match) = { 0, };
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ const struct link_capabilities *c;
struct fwnode_handle *fixed_node;
- const struct phy_setting *s;
struct gpio_desc *desc;
u32 speed;
int ret;
@@ -814,6 +708,9 @@ static int phylink_parse_fixedlink(struct phylink *pl,
return -EINVAL;
}
+ phylink_warn(pl, "%pfw uses deprecated array-style fixed-link binding!\n",
+ fwnode);
+
ret = fwnode_property_read_u32_array(fwnode, "fixed-link",
prop, ARRAY_SIZE(prop));
if (!ret) {
@@ -834,12 +731,16 @@ static int phylink_parse_fixedlink(struct phylink *pl,
phylink_warn(pl, "fixed link specifies half duplex for %dMbps link?\n",
pl->link_config.speed);
- linkmode_fill(pl->supported);
+ linkmode_zero(pl->supported);
+ phylink_fill_fixedlink_supported(pl->supported);
+
linkmode_copy(pl->link_config.advertising, pl->supported);
phylink_validate(pl, pl->supported, &pl->link_config);
- s = phy_lookup_setting(pl->link_config.speed, pl->link_config.duplex,
- pl->supported, true);
+ c = phy_caps_lookup(pl->link_config.speed, pl->link_config.duplex,
+ pl->supported, true);
+ if (c)
+ linkmode_and(match, pl->supported, c->linkmodes);
linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mask);
linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, mask);
@@ -848,9 +749,10 @@ static int phylink_parse_fixedlink(struct phylink *pl,
phylink_set(pl->supported, MII);
- if (s) {
- __set_bit(s->bit, pl->supported);
- __set_bit(s->bit, pl->link_config.lp_advertising);
+ if (c) {
+ linkmode_or(pl->supported, pl->supported, match);
+ linkmode_or(pl->link_config.lp_advertising,
+ pl->link_config.lp_advertising, match);
} else {
phylink_warn(pl, "fixed link %s duplex %dMbps not recognised\n",
pl->link_config.duplex == DUPLEX_FULL ? "full" : "half",
@@ -918,6 +820,9 @@ static int phylink_parse_mode(struct phylink *pl,
case PHY_INTERFACE_MODE_10GKR:
case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_XLGMII:
+ case PHY_INTERFACE_MODE_50GBASER:
+ case PHY_INTERFACE_MODE_LAUI:
+ case PHY_INTERFACE_MODE_100GBASEP:
caps = ~(MAC_SYM_PAUSE | MAC_ASYM_PAUSE);
caps = phylink_get_capabilities(pl->link_config.interface, caps,
RATE_MATCH_NONE);
@@ -971,6 +876,15 @@ static void phylink_resolve_an_pause(struct phylink_link_state *state)
}
}
+static unsigned int phylink_pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ if (pcs && pcs->ops->pcs_inband_caps)
+ return pcs->ops->pcs_inband_caps(pcs, interface);
+
+ return 0;
+}
+
static void phylink_pcs_pre_config(struct phylink_pcs *pcs,
phy_interface_t interface)
{
@@ -1024,10 +938,40 @@ static void phylink_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
pcs->ops->pcs_link_up(pcs, neg_mode, interface, speed, duplex);
}
+static void phylink_pcs_disable_eee(struct phylink_pcs *pcs)
+{
+ if (pcs && pcs->ops->pcs_disable_eee)
+ pcs->ops->pcs_disable_eee(pcs);
+}
+
+static void phylink_pcs_enable_eee(struct phylink_pcs *pcs)
+{
+ if (pcs && pcs->ops->pcs_enable_eee)
+ pcs->ops->pcs_enable_eee(pcs);
+}
+
+/* Query inband for a specific interface mode, asking the MAC for the
+ * PCS which will be used to handle the interface mode.
+ */
+static unsigned int phylink_inband_caps(struct phylink *pl,
+ phy_interface_t interface)
+{
+ struct phylink_pcs *pcs;
+
+ if (!pl->mac_ops->mac_select_pcs)
+ return 0;
+
+ pcs = pl->mac_ops->mac_select_pcs(pl->config, interface);
+ if (!pcs)
+ return 0;
+
+ return phylink_pcs_inband_caps(pcs, interface);
+}
+
static void phylink_pcs_poll_stop(struct phylink *pl)
{
if (pl->cfg_link_an_mode == MLO_AN_INBAND)
- del_timer(&pl->link_poll);
+ timer_delete(&pl->link_poll);
}
static void phylink_pcs_poll_start(struct phylink *pl)
@@ -1065,13 +1009,13 @@ static void phylink_mac_config(struct phylink *pl,
phylink_dbg(pl,
"%s: mode=%s/%s/%s adv=%*pb pause=%02x\n",
- __func__, phylink_an_mode_str(pl->cur_link_an_mode),
+ __func__, phylink_an_mode_str(pl->act_link_an_mode),
phy_modes(st.interface),
phy_rate_matching_to_str(st.rate_matching),
__ETHTOOL_LINK_MODE_MASK_NBITS, st.advertising,
st.pause);
- pl->mac_ops->mac_config(pl->config, pl->cur_link_an_mode, &st);
+ pl->mac_ops->mac_config(pl->config, pl->act_link_an_mode, &st);
}
static void phylink_pcs_an_restart(struct phylink *pl)
@@ -1079,13 +1023,50 @@ static void phylink_pcs_an_restart(struct phylink *pl)
if (pl->pcs && linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
pl->link_config.advertising) &&
phy_interface_mode_is_8023z(pl->link_config.interface) &&
- phylink_autoneg_inband(pl->cur_link_an_mode))
+ phylink_autoneg_inband(pl->act_link_an_mode))
pl->pcs->ops->pcs_an_restart(pl->pcs);
}
+enum inband_type {
+ INBAND_NONE,
+ INBAND_CISCO_SGMII,
+ INBAND_BASEX,
+};
+
+static enum inband_type phylink_get_inband_type(phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_QUSGMII:
+ case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_10G_QXGMII:
+ /* These protocols are designed for use with a PHY which
+ * communicates its negotiation result back to the MAC via
+ * inband communication. Note: there exist PHYs that run
+ * with SGMII but do not send the inband data.
+ */
+ return INBAND_CISCO_SGMII;
+
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ /* 1000base-X is designed for use media-side for Fibre
+ * connections, and thus the Autoneg bit needs to be
+ * taken into account. We also do this for 2500base-X
+ * as well, but drivers may not support this, so may
+ * need to override this.
+ */
+ return INBAND_BASEX;
+
+ default:
+ return INBAND_NONE;
+ }
+}
+
/**
* phylink_pcs_neg_mode() - helper to determine PCS inband mode
- * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @pcs: a pointer to &struct phylink_pcs
* @interface: interface mode to be used
* @advertising: adertisement ethtool link mode mask
*
@@ -1102,52 +1083,154 @@ static void phylink_pcs_an_restart(struct phylink *pl)
* Note: this is for cases where the PCS itself is involved in negotiation
* (e.g. Clause 37, SGMII and similar) not Clause 73.
*/
-static unsigned int phylink_pcs_neg_mode(unsigned int mode,
- phy_interface_t interface,
- const unsigned long *advertising)
+static void phylink_pcs_neg_mode(struct phylink *pl, struct phylink_pcs *pcs,
+ phy_interface_t interface,
+ const unsigned long *advertising)
{
- unsigned int neg_mode;
+ unsigned int pcs_ib_caps = 0;
+ unsigned int phy_ib_caps = 0;
+ unsigned int neg_mode, mode;
+ enum inband_type type;
+
+ type = phylink_get_inband_type(interface);
+ if (type == INBAND_NONE) {
+ pl->pcs_neg_mode = PHYLINK_PCS_NEG_NONE;
+ pl->act_link_an_mode = pl->req_link_an_mode;
+ return;
+ }
- switch (interface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_QUSGMII:
- case PHY_INTERFACE_MODE_USXGMII:
- case PHY_INTERFACE_MODE_10G_QXGMII:
- /* These protocols are designed for use with a PHY which
- * communicates its negotiation result back to the MAC via
- * inband communication. Note: there exist PHYs that run
- * with SGMII but do not send the inband data.
+ mode = pl->req_link_an_mode;
+
+ pl->phy_ib_mode = 0;
+
+ if (pcs)
+ pcs_ib_caps = phylink_pcs_inband_caps(pcs, interface);
+
+ if (pl->phydev)
+ phy_ib_caps = phy_inband_caps(pl->phydev, interface);
+
+ phylink_dbg(pl, "interface %s inband modes: pcs=%02x phy=%02x\n",
+ phy_modes(interface), pcs_ib_caps, phy_ib_caps);
+
+ if (!phylink_autoneg_inband(mode)) {
+ bool pcs_ib_only = false;
+ bool phy_ib_only = false;
+
+ if (pcs_ib_caps && pcs_ib_caps != LINK_INBAND_DISABLE) {
+ /* PCS supports reporting in-band capabilities, and
+ * supports more than disable mode.
+ */
+ if (pcs_ib_caps & LINK_INBAND_DISABLE)
+ neg_mode = PHYLINK_PCS_NEG_OUTBAND;
+ else if (pcs_ib_caps & LINK_INBAND_ENABLE)
+ pcs_ib_only = true;
+ }
+
+ if (phy_ib_caps && phy_ib_caps != LINK_INBAND_DISABLE) {
+ /* PHY supports in-band capabilities, and supports
+ * more than disable mode.
+ */
+ if (phy_ib_caps & LINK_INBAND_DISABLE)
+ pl->phy_ib_mode = LINK_INBAND_DISABLE;
+ else if (phy_ib_caps & LINK_INBAND_BYPASS)
+ pl->phy_ib_mode = LINK_INBAND_BYPASS;
+ else if (phy_ib_caps & LINK_INBAND_ENABLE)
+ phy_ib_only = true;
+ }
+
+ /* If either the PCS or PHY requires inband to be enabled,
+ * this is an invalid configuration. Provide a diagnostic
+ * message for this case, but don't try to force the issue.
*/
- if (!phylink_autoneg_inband(mode))
- neg_mode = PHYLINK_PCS_NEG_OUTBAND;
- else
+ if (pcs_ib_only || phy_ib_only)
+ phylink_warn(pl,
+ "firmware wants %s mode, but %s%s%s requires inband\n",
+ phylink_an_mode_str(mode),
+ pcs_ib_only ? "PCS" : "",
+ pcs_ib_only && phy_ib_only ? " and " : "",
+ phy_ib_only ? "PHY" : "");
+
+ neg_mode = PHYLINK_PCS_NEG_OUTBAND;
+ } else if (type == INBAND_CISCO_SGMII || pl->phydev) {
+ /* For SGMII modes which are designed to be used with PHYs, or
+ * Base-X with a PHY, we try to use in-band mode where-ever
+ * possible. However, there are some PHYs e.g. BCM84881 which
+ * do not support in-band.
+ */
+ const unsigned int inband_ok = LINK_INBAND_ENABLE |
+ LINK_INBAND_BYPASS;
+ const unsigned int outband_ok = LINK_INBAND_DISABLE |
+ LINK_INBAND_BYPASS;
+ /* PCS PHY
+ * D E D E
+ * 0 0 0 0 no information inband enabled
+ * 1 0 0 0 pcs doesn't support outband
+ * 0 1 0 0 pcs required inband enabled
+ * 1 1 0 0 pcs optional inband enabled
+ * 0 0 1 0 phy doesn't support outband
+ * 1 0 1 0 pcs+phy doesn't support outband
+ * 0 1 1 0 pcs required, phy doesn't support, invalid
+ * 1 1 1 0 pcs optional, phy doesn't support, outband
+ * 0 0 0 1 phy required inband enabled
+ * 1 0 0 1 pcs doesn't support, phy required, invalid
+ * 0 1 0 1 pcs+phy required inband enabled
+ * 1 1 0 1 pcs optional, phy required inband enabled
+ * 0 0 1 1 phy optional inband enabled
+ * 1 0 1 1 pcs doesn't support, phy optional, outband
+ * 0 1 1 1 pcs required, phy optional inband enabled
+ * 1 1 1 1 pcs+phy optional inband enabled
+ */
+ if ((!pcs_ib_caps || pcs_ib_caps & inband_ok) &&
+ (!phy_ib_caps || phy_ib_caps & inband_ok)) {
+ /* In-band supported or unknown at both ends. Enable
+ * in-band mode with or without bypass at the PHY.
+ */
+ if (phy_ib_caps & LINK_INBAND_ENABLE)
+ pl->phy_ib_mode = LINK_INBAND_ENABLE;
+ else if (phy_ib_caps & LINK_INBAND_BYPASS)
+ pl->phy_ib_mode = LINK_INBAND_BYPASS;
+
neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
- break;
+ } else if ((!pcs_ib_caps || pcs_ib_caps & outband_ok) &&
+ (!phy_ib_caps || phy_ib_caps & outband_ok)) {
+ /* Either in-band not supported at at least one end.
+ * In-band bypass at the other end is possible.
+ */
+ if (phy_ib_caps & LINK_INBAND_DISABLE)
+ pl->phy_ib_mode = LINK_INBAND_DISABLE;
+ else if (phy_ib_caps & LINK_INBAND_BYPASS)
+ pl->phy_ib_mode = LINK_INBAND_BYPASS;
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_2500BASEX:
- /* 1000base-X is designed for use media-side for Fibre
- * connections, and thus the Autoneg bit needs to be
- * taken into account. We also do this for 2500base-X
- * as well, but drivers may not support this, so may
- * need to override this.
- */
- if (!phylink_autoneg_inband(mode))
neg_mode = PHYLINK_PCS_NEG_OUTBAND;
+ if (pl->phydev)
+ mode = MLO_AN_PHY;
+ } else {
+ /* invalid */
+ phylink_warn(pl, "%s: incompatible in-band capabilities, trying in-band",
+ phy_modes(interface));
+ neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
+ }
+ } else {
+ /* For Base-X without a PHY */
+ if (pcs_ib_caps == LINK_INBAND_DISABLE)
+ /* If the PCS doesn't support inband, then inband must
+ * be disabled.
+ */
+ neg_mode = PHYLINK_PCS_NEG_INBAND_DISABLED;
+ else if (pcs_ib_caps == LINK_INBAND_ENABLE)
+ /* If the PCS requires inband, then inband must always
+ * be enabled.
+ */
+ neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
else if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
advertising))
neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
else
neg_mode = PHYLINK_PCS_NEG_INBAND_DISABLED;
- break;
-
- default:
- neg_mode = PHYLINK_PCS_NEG_NONE;
- break;
}
- return neg_mode;
+ pl->pcs_neg_mode = neg_mode;
+ pl->act_link_an_mode = mode;
}
static void phylink_major_config(struct phylink *pl, bool restart,
@@ -1156,14 +1239,13 @@ static void phylink_major_config(struct phylink *pl, bool restart,
struct phylink_pcs *pcs = NULL;
bool pcs_changed = false;
unsigned int rate_kbd;
- unsigned int neg_mode;
int err;
- phylink_dbg(pl, "major config %s\n", phy_modes(state->interface));
+ phylink_dbg(pl, "major config, requested %s/%s\n",
+ phylink_an_mode_str(pl->req_link_an_mode),
+ phy_modes(state->interface));
- pl->pcs_neg_mode = phylink_pcs_neg_mode(pl->cur_link_an_mode,
- state->interface,
- state->advertising);
+ pl->major_config_failed = false;
if (pl->mac_ops->mac_select_pcs) {
pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface);
@@ -1171,20 +1253,30 @@ static void phylink_major_config(struct phylink *pl, bool restart,
phylink_err(pl,
"mac_select_pcs unexpectedly failed: %pe\n",
pcs);
+
+ pl->major_config_failed = true;
return;
}
pcs_changed = pl->pcs != pcs;
}
+ phylink_pcs_neg_mode(pl, pcs, state->interface, state->advertising);
+
+ phylink_dbg(pl, "major config, active %s/%s/%s\n",
+ phylink_an_mode_str(pl->act_link_an_mode),
+ phylink_pcs_mode_str(pl->pcs_neg_mode),
+ phy_modes(state->interface));
+
phylink_pcs_poll_stop(pl);
if (pl->mac_ops->mac_prepare) {
- err = pl->mac_ops->mac_prepare(pl->config, pl->cur_link_an_mode,
+ err = pl->mac_ops->mac_prepare(pl->config, pl->act_link_an_mode,
state->interface);
if (err < 0) {
phylink_err(pl, "mac_prepare failed: %pe\n",
ERR_PTR(err));
+ pl->major_config_failed = true;
return;
}
}
@@ -1208,33 +1300,50 @@ static void phylink_major_config(struct phylink *pl, bool restart,
phylink_mac_config(pl, state);
- if (pl->pcs)
- phylink_pcs_post_config(pl->pcs, state->interface);
+ if (pl->pcs) {
+ err = phylink_pcs_post_config(pl->pcs, state->interface);
+ if (err < 0) {
+ phylink_err(pl, "pcs_post_config failed: %pe\n",
+ ERR_PTR(err));
+
+ pl->major_config_failed = true;
+ }
+ }
if (pl->pcs_state == PCS_STATE_STARTING || pcs_changed)
phylink_pcs_enable(pl->pcs);
- neg_mode = pl->cur_link_an_mode;
- if (pl->pcs && pl->pcs->neg_mode)
- neg_mode = pl->pcs_neg_mode;
-
- err = phylink_pcs_config(pl->pcs, neg_mode, state,
+ err = phylink_pcs_config(pl->pcs, pl->pcs_neg_mode, state,
!!(pl->link_config.pause & MLO_PAUSE_AN));
- if (err < 0)
- phylink_err(pl, "pcs_config failed: %pe\n",
- ERR_PTR(err));
- else if (err > 0)
+ if (err < 0) {
+ phylink_err(pl, "pcs_config failed: %pe\n", ERR_PTR(err));
+ pl->major_config_failed = true;
+ } else if (err > 0) {
restart = true;
+ }
if (restart)
phylink_pcs_an_restart(pl);
if (pl->mac_ops->mac_finish) {
- err = pl->mac_ops->mac_finish(pl->config, pl->cur_link_an_mode,
+ err = pl->mac_ops->mac_finish(pl->config, pl->act_link_an_mode,
state->interface);
- if (err < 0)
+ if (err < 0) {
phylink_err(pl, "mac_finish failed: %pe\n",
ERR_PTR(err));
+
+ pl->major_config_failed = true;
+ }
+ }
+
+ if (pl->phydev && pl->phy_ib_mode) {
+ err = phy_config_inband(pl->phydev, pl->phy_ib_mode);
+ if (err < 0) {
+ phylink_err(pl, "phy_config_inband: %pe\n",
+ ERR_PTR(err));
+
+ pl->major_config_failed = true;
+ }
}
if (pl->sfp_bus) {
@@ -1254,32 +1363,26 @@ static void phylink_major_config(struct phylink *pl, bool restart,
*/
static int phylink_change_inband_advert(struct phylink *pl)
{
- unsigned int neg_mode;
int ret;
if (test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state))
return 0;
phylink_dbg(pl, "%s: mode=%s/%s adv=%*pb pause=%02x\n", __func__,
- phylink_an_mode_str(pl->cur_link_an_mode),
+ phylink_an_mode_str(pl->req_link_an_mode),
phy_modes(pl->link_config.interface),
__ETHTOOL_LINK_MODE_MASK_NBITS, pl->link_config.advertising,
pl->link_config.pause);
/* Recompute the PCS neg mode */
- pl->pcs_neg_mode = phylink_pcs_neg_mode(pl->cur_link_an_mode,
- pl->link_config.interface,
- pl->link_config.advertising);
-
- neg_mode = pl->cur_link_an_mode;
- if (pl->pcs->neg_mode)
- neg_mode = pl->pcs_neg_mode;
+ phylink_pcs_neg_mode(pl, pl->pcs, pl->link_config.interface,
+ pl->link_config.advertising);
/* Modern PCS-based method; update the advert at the PCS, and
* restart negotiation if the pcs_config() helper indicates that
* the programmed advertisement has changed.
*/
- ret = phylink_pcs_config(pl->pcs, neg_mode, &pl->link_config,
+ ret = phylink_pcs_config(pl->pcs, pl->pcs_neg_mode, &pl->link_config,
!!(pl->link_config.pause & MLO_PAUSE_AN));
if (ret < 0)
return ret;
@@ -1293,12 +1396,18 @@ static int phylink_change_inband_advert(struct phylink *pl)
static void phylink_mac_pcs_get_state(struct phylink *pl,
struct phylink_link_state *state)
{
+ struct phylink_pcs *pcs;
+ bool autoneg;
+
linkmode_copy(state->advertising, pl->link_config.advertising);
linkmode_zero(state->lp_advertising);
state->interface = pl->link_config.interface;
state->rate_matching = pl->link_config.rate_matching;
- if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- state->advertising)) {
+ state->an_complete = 0;
+ state->link = 1;
+
+ autoneg = pl->pcs_neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED;
+ if (autoneg) {
state->speed = SPEED_UNKNOWN;
state->duplex = DUPLEX_UNKNOWN;
state->pause = MLO_PAUSE_NONE;
@@ -1307,11 +1416,10 @@ static void phylink_mac_pcs_get_state(struct phylink *pl,
state->duplex = pl->link_config.duplex;
state->pause = pl->link_config.pause;
}
- state->an_complete = 0;
- state->link = 1;
- if (pl->pcs)
- pl->pcs->ops->pcs_get_state(pl->pcs, state);
+ pcs = pl->pcs;
+ if (pcs)
+ pcs->ops->pcs_get_state(pcs, pl->pcs_neg_mode, state);
else
state->link = 0;
}
@@ -1335,8 +1443,9 @@ static void phylink_get_fixed_state(struct phylink *pl,
static void phylink_mac_initial_config(struct phylink *pl, bool force_restart)
{
struct phylink_link_state link_state;
+ struct phy_device *phy = pl->phydev;
- switch (pl->cur_link_an_mode) {
+ switch (pl->req_link_an_mode) {
case MLO_AN_PHY:
link_state = pl->phy_state;
break;
@@ -1358,7 +1467,11 @@ static void phylink_mac_initial_config(struct phylink *pl, bool force_restart)
link_state.link = false;
phylink_apply_manual_flow(pl, &link_state);
+ if (phy)
+ mutex_lock(&phy->lock);
phylink_major_config(pl, force_restart, &link_state);
+ if (phy)
+ mutex_unlock(&phy->lock);
}
static const char *phylink_pause_to_str(int pause)
@@ -1375,11 +1488,50 @@ static const char *phylink_pause_to_str(int pause)
}
}
+static void phylink_deactivate_lpi(struct phylink *pl)
+{
+ if (pl->mac_enable_tx_lpi) {
+ pl->mac_enable_tx_lpi = false;
+
+ phylink_dbg(pl, "disabling LPI\n");
+
+ pl->mac_ops->mac_disable_tx_lpi(pl->config);
+
+ phylink_pcs_disable_eee(pl->pcs);
+ }
+}
+
+static void phylink_activate_lpi(struct phylink *pl)
+{
+ int err;
+
+ if (!test_bit(pl->cur_interface, pl->config->lpi_interfaces)) {
+ phylink_dbg(pl, "MAC does not support LPI with %s\n",
+ phy_modes(pl->cur_interface));
+ return;
+ }
+
+ phylink_dbg(pl, "LPI timer %uus, tx clock stop %u\n",
+ pl->mac_tx_lpi_timer, pl->mac_tx_clk_stop);
+
+ phylink_pcs_enable_eee(pl->pcs);
+
+ err = pl->mac_ops->mac_enable_tx_lpi(pl->config, pl->mac_tx_lpi_timer,
+ pl->mac_tx_clk_stop);
+ if (err) {
+ phylink_pcs_disable_eee(pl->pcs);
+ phylink_err(pl, "%ps() failed: %pe\n",
+ pl->mac_ops->mac_enable_tx_lpi, ERR_PTR(err));
+ return;
+ }
+
+ pl->mac_enable_tx_lpi = true;
+}
+
static void phylink_link_up(struct phylink *pl,
struct phylink_link_state link_state)
{
struct net_device *ndev = pl->netdev;
- unsigned int neg_mode;
int speed, duplex;
bool rx_pause;
@@ -1410,17 +1562,16 @@ static void phylink_link_up(struct phylink *pl,
pl->cur_interface = link_state.interface;
- neg_mode = pl->cur_link_an_mode;
- if (pl->pcs && pl->pcs->neg_mode)
- neg_mode = pl->pcs_neg_mode;
-
- phylink_pcs_link_up(pl->pcs, neg_mode, pl->cur_interface, speed,
+ phylink_pcs_link_up(pl->pcs, pl->pcs_neg_mode, pl->cur_interface, speed,
duplex);
- pl->mac_ops->mac_link_up(pl->config, pl->phydev, pl->cur_link_an_mode,
+ pl->mac_ops->mac_link_up(pl->config, pl->phydev, pl->act_link_an_mode,
pl->cur_interface, speed, duplex,
!!(link_state.pause & MLO_PAUSE_TX), rx_pause);
+ if (pl->mac_supports_eee && pl->phy_enable_tx_lpi)
+ phylink_activate_lpi(pl);
+
if (ndev)
netif_carrier_on(ndev);
@@ -1437,102 +1588,101 @@ static void phylink_link_down(struct phylink *pl)
if (ndev)
netif_carrier_off(ndev);
- pl->mac_ops->mac_link_down(pl->config, pl->cur_link_an_mode,
+
+ phylink_deactivate_lpi(pl);
+
+ pl->mac_ops->mac_link_down(pl->config, pl->act_link_an_mode,
pl->cur_interface);
phylink_info(pl, "Link is Down\n");
}
+static bool phylink_link_is_up(struct phylink *pl)
+{
+ return pl->netdev ? netif_carrier_ok(pl->netdev) : pl->old_link_state;
+}
+
static void phylink_resolve(struct work_struct *w)
{
struct phylink *pl = container_of(w, struct phylink, resolve);
struct phylink_link_state link_state;
- struct net_device *ndev = pl->netdev;
bool mac_config = false;
bool retrigger = false;
+ struct phy_device *phy;
bool cur_link_state;
+ mutex_lock(&pl->phydev_mutex);
+ phy = pl->phydev;
+ if (phy)
+ mutex_lock(&phy->lock);
mutex_lock(&pl->state_mutex);
- if (pl->netdev)
- cur_link_state = netif_carrier_ok(ndev);
- else
- cur_link_state = pl->old_link_state;
+ cur_link_state = phylink_link_is_up(pl);
if (pl->phylink_disable_state) {
- pl->mac_link_dropped = false;
+ pl->link_failed = false;
link_state.link = false;
- } else if (pl->mac_link_dropped) {
+ } else if (pl->link_failed) {
link_state.link = false;
retrigger = true;
+ } else if (pl->act_link_an_mode == MLO_AN_FIXED) {
+ phylink_get_fixed_state(pl, &link_state);
+ mac_config = link_state.link;
+ } else if (pl->act_link_an_mode == MLO_AN_PHY) {
+ link_state = pl->phy_state;
+ mac_config = link_state.link;
} else {
- switch (pl->cur_link_an_mode) {
- case MLO_AN_PHY:
- link_state = pl->phy_state;
- phylink_apply_manual_flow(pl, &link_state);
- mac_config = link_state.link;
- break;
+ phylink_mac_pcs_get_state(pl, &link_state);
- case MLO_AN_FIXED:
- phylink_get_fixed_state(pl, &link_state);
- mac_config = link_state.link;
- break;
+ /* The PCS may have a latching link-fail indicator. If the link
+ * was up, bring the link down and re-trigger the resolve.
+ * Otherwise, re-read the PCS state to get the current status
+ * of the link.
+ */
+ if (!link_state.link) {
+ if (cur_link_state)
+ retrigger = true;
+ else
+ phylink_mac_pcs_get_state(pl, &link_state);
+ }
- case MLO_AN_INBAND:
- phylink_mac_pcs_get_state(pl, &link_state);
+ /* If we have a phy, the "up" state is the union of both the
+ * PHY and the MAC
+ */
+ if (phy)
+ link_state.link &= pl->phy_state.link;
- /* The PCS may have a latching link-fail indicator.
- * If the link was up, bring the link down and
- * re-trigger the resolve. Otherwise, re-read the
- * PCS state to get the current status of the link.
+ /* Only update if the PHY link is up */
+ if (phy && pl->phy_state.link) {
+ /* If the interface has changed, force a link down
+ * event if the link isn't already down, and re-resolve.
*/
- if (!link_state.link) {
- if (cur_link_state)
- retrigger = true;
- else
- phylink_mac_pcs_get_state(pl,
- &link_state);
+ if (link_state.interface != pl->phy_state.interface) {
+ retrigger = true;
+ link_state.link = false;
}
- /* If we have a phy, the "up" state is the union of
- * both the PHY and the MAC
+ link_state.interface = pl->phy_state.interface;
+
+ /* If we are doing rate matching, then the link
+ * speed/duplex comes from the PHY
*/
- if (pl->phydev)
- link_state.link &= pl->phy_state.link;
-
- /* Only update if the PHY link is up */
- if (pl->phydev && pl->phy_state.link) {
- /* If the interface has changed, force a
- * link down event if the link isn't already
- * down, and re-resolve.
- */
- if (link_state.interface !=
- pl->phy_state.interface) {
- retrigger = true;
- link_state.link = false;
- }
- link_state.interface = pl->phy_state.interface;
-
- /* If we are doing rate matching, then the
- * link speed/duplex comes from the PHY
- */
- if (pl->phy_state.rate_matching) {
- link_state.rate_matching =
- pl->phy_state.rate_matching;
- link_state.speed = pl->phy_state.speed;
- link_state.duplex =
- pl->phy_state.duplex;
- }
-
- /* If we have a PHY, we need to update with
- * the PHY flow control bits.
- */
- link_state.pause = pl->phy_state.pause;
- mac_config = true;
+ if (pl->phy_state.rate_matching) {
+ link_state.rate_matching =
+ pl->phy_state.rate_matching;
+ link_state.speed = pl->phy_state.speed;
+ link_state.duplex = pl->phy_state.duplex;
}
- phylink_apply_manual_flow(pl, &link_state);
- break;
+
+ /* If we have a PHY, we need to update with the PHY
+ * flow control bits.
+ */
+ link_state.pause = pl->phy_state.pause;
+ mac_config = true;
}
}
+ if (pl->act_link_an_mode != MLO_AN_FIXED)
+ phylink_apply_manual_flow(pl, &link_state);
+
if (mac_config) {
if (link_state.interface != pl->link_config.interface) {
/* The interface has changed, force the link down and
@@ -1547,6 +1697,12 @@ static void phylink_resolve(struct work_struct *w)
}
}
+ /* If configuration of the interface failed, force the link down
+ * until we get a successful configuration.
+ */
+ if (pl->major_config_failed)
+ link_state.link = false;
+
if (link_state.link != cur_link_state) {
pl->old_link_state = link_state.link;
if (!link_state.link)
@@ -1555,10 +1711,13 @@ static void phylink_resolve(struct work_struct *w)
phylink_link_up(pl, link_state);
}
if (!link_state.link && retrigger) {
- pl->mac_link_dropped = false;
+ pl->link_failed = false;
queue_work(system_power_efficient_wq, &pl->resolve);
}
mutex_unlock(&pl->state_mutex);
+ if (phy)
+ mutex_unlock(&phy->lock);
+ mutex_unlock(&pl->phydev_mutex);
}
static void phylink_run_resolve(struct phylink *pl)
@@ -1631,21 +1790,20 @@ static int phylink_register_sfp(struct phylink *pl,
int phylink_set_fixed_link(struct phylink *pl,
const struct phylink_link_state *state)
{
- const struct phy_setting *s;
+ const struct link_capabilities *c;
unsigned long *adv;
if (pl->cfg_link_an_mode != MLO_AN_PHY || !state ||
!test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state))
return -EINVAL;
- s = phy_lookup_setting(state->speed, state->duplex,
- pl->supported, true);
- if (!s)
+ c = phy_caps_lookup(state->speed, state->duplex,
+ pl->supported, true);
+ if (!c)
return -EINVAL;
adv = pl->link_config.advertising;
- linkmode_zero(adv);
- linkmode_set_bit(s->bit, adv);
+ linkmode_and(adv, pl->supported, c->linkmodes);
linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, adv);
pl->link_config.speed = state->speed;
@@ -1654,7 +1812,7 @@ int phylink_set_fixed_link(struct phylink *pl,
pl->link_config.an_complete = 1;
pl->cfg_link_an_mode = MLO_AN_FIXED;
- pl->cur_link_an_mode = pl->cfg_link_an_mode;
+ pl->req_link_an_mode = pl->cfg_link_an_mode;
return 0;
}
@@ -1695,6 +1853,7 @@ struct phylink *phylink_create(struct phylink_config *config,
if (!pl)
return ERR_PTR(-ENOMEM);
+ mutex_init(&pl->phydev_mutex);
mutex_init(&pl->state_mutex);
INIT_WORK(&pl->resolve, phylink_resolve);
@@ -1709,6 +1868,16 @@ struct phylink *phylink_create(struct phylink_config *config,
return ERR_PTR(-EINVAL);
}
+ pl->mac_supports_eee_ops = phylink_mac_implements_lpi(mac_ops);
+ pl->mac_supports_eee = pl->mac_supports_eee_ops &&
+ pl->config->lpi_capabilities &&
+ !phy_interface_empty(pl->config->lpi_interfaces);
+
+ /* Set the default EEE configuration */
+ pl->eee_cfg.eee_enabled = pl->config->eee_enabled_default;
+ pl->eee_cfg.tx_lpi_enabled = pl->eee_cfg.eee_enabled;
+ pl->eee_cfg.tx_lpi_timer = pl->config->lpi_timer_default;
+
pl->phy_state.interface = iface;
pl->link_interface = iface;
if (iface == PHY_INTERFACE_MODE_MOCA)
@@ -1742,7 +1911,7 @@ struct phylink *phylink_create(struct phylink_config *config,
}
}
- pl->cur_link_an_mode = pl->cfg_link_an_mode;
+ pl->req_link_an_mode = pl->cfg_link_an_mode;
ret = phylink_register_sfp(pl, fwnode);
if (ret < 0) {
@@ -1787,7 +1956,7 @@ bool phylink_expects_phy(struct phylink *pl)
{
if (pl->cfg_link_an_mode == MLO_AN_FIXED ||
(pl->cfg_link_an_mode == MLO_AN_INBAND &&
- phy_interface_mode_is_8023z(pl->link_config.interface)))
+ phy_interface_mode_is_8023z(pl->link_interface)))
return false;
return true;
}
@@ -1811,16 +1980,24 @@ static void phylink_phy_change(struct phy_device *phydev, bool up)
pl->phy_state.pause |= MLO_PAUSE_RX;
pl->phy_state.interface = phydev->interface;
pl->phy_state.link = up;
+ if (!up)
+ pl->link_failed = true;
+
+ /* Get the LPI state from phylib */
+ pl->phy_enable_tx_lpi = phydev->enable_tx_lpi;
+ pl->mac_tx_lpi_timer = phydev->eee_cfg.tx_lpi_timer;
mutex_unlock(&pl->state_mutex);
phylink_run_resolve(pl);
- phylink_dbg(pl, "phy link %s %s/%s/%s/%s/%s\n", up ? "up" : "down",
+ phylink_dbg(pl, "phy link %s %s/%s/%s/%s/%s/%slpi\n",
+ up ? "up" : "down",
phy_modes(phydev->interface),
phy_speed_to_str(phydev->speed),
phy_duplex_to_str(phydev->duplex),
phy_rate_matching_to_str(phydev->rate_matching),
- phylink_pause_to_str(pl->phy_state.pause));
+ phylink_pause_to_str(pl->phy_state.pause),
+ phydev->enable_tx_lpi ? "" : "no");
}
static int phylink_validate_phy(struct phylink *pl, struct phy_device *phy,
@@ -1937,6 +2114,7 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
dev_name(&phy->mdio.dev), phy->drv->name, irq_str);
kfree(irq_str);
+ mutex_lock(&pl->phydev_mutex);
mutex_lock(&phy->lock);
mutex_lock(&pl->state_mutex);
pl->phydev = phy;
@@ -1950,8 +2128,39 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
/* Restrict the phy advertisement according to the MAC support. */
linkmode_copy(phy->advertising, config.advertising);
+
+ /* If the MAC supports phylink managed EEE, restrict the EEE
+ * advertisement according to the MAC's LPI capabilities.
+ */
+ if (pl->mac_supports_eee) {
+ /* If EEE is enabled, then we need to call phy_support_eee()
+ * to ensure that the advertising mask is appropriately set.
+ * This also enables EEE at the PHY.
+ */
+ if (pl->eee_cfg.eee_enabled)
+ phy_support_eee(phy);
+
+ phy->eee_cfg.tx_lpi_enabled = pl->eee_cfg.tx_lpi_enabled;
+ phy->eee_cfg.tx_lpi_timer = pl->eee_cfg.tx_lpi_timer;
+
+ /* Convert the MAC's LPI capabilities to linkmodes */
+ linkmode_zero(pl->supported_lpi);
+ phylink_caps_to_linkmodes(pl->supported_lpi,
+ pl->config->lpi_capabilities);
+
+ /* Restrict the PHYs EEE support/advertisement to the modes
+ * that the MAC supports.
+ */
+ linkmode_and(phy->advertising_eee, phy->advertising_eee,
+ pl->supported_lpi);
+ } else if (pl->mac_supports_eee_ops) {
+ /* MAC supports phylink EEE, but wants EEE always disabled. */
+ phy_disable_eee(phy);
+ }
+
mutex_unlock(&pl->state_mutex);
mutex_unlock(&phy->lock);
+ mutex_unlock(&pl->phydev_mutex);
phylink_dbg(pl,
"phy: %s setting supported %*pb advertising %*pb\n",
@@ -1959,13 +2168,26 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
__ETHTOOL_LINK_MODE_MASK_NBITS, pl->supported,
__ETHTOOL_LINK_MODE_MASK_NBITS, phy->advertising);
- if (phy_interrupt_is_valid(phy))
- phy_request_interrupt(phy);
-
if (pl->config->mac_managed_pm)
phy->mac_managed_pm = true;
- return 0;
+ /* Allow the MAC to stop its clock if the PHY has the capability */
+ pl->mac_tx_clk_stop = phy_eee_tx_clock_stop_capable(phy) > 0;
+
+ if (pl->mac_supports_eee_ops) {
+ /* Explicitly configure whether the PHY is allowed to stop it's
+ * receive clock.
+ */
+ ret = phy_eee_rx_clock_stop(phy,
+ pl->config->eee_rx_clk_stop_enable);
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
+ }
+
+ if (ret == 0 && phy_interrupt_is_valid(phy))
+ phy_request_interrupt(phy);
+
+ return ret;
}
static int phylink_attach_phy(struct phylink *pl, struct phy_device *phy,
@@ -2117,15 +2339,21 @@ void phylink_disconnect_phy(struct phylink *pl)
ASSERT_RTNL();
+ mutex_lock(&pl->phydev_mutex);
phy = pl->phydev;
if (phy) {
mutex_lock(&phy->lock);
mutex_lock(&pl->state_mutex);
pl->phydev = NULL;
+ pl->phy_enable_tx_lpi = false;
+ pl->mac_tx_clk_stop = false;
mutex_unlock(&pl->state_mutex);
mutex_unlock(&phy->lock);
- flush_work(&pl->resolve);
+ }
+ mutex_unlock(&pl->phydev_mutex);
+ if (phy) {
+ flush_work(&pl->resolve);
phy_disconnect(phy);
}
}
@@ -2134,7 +2362,7 @@ EXPORT_SYMBOL_GPL(phylink_disconnect_phy);
static void phylink_link_changed(struct phylink *pl, bool up, const char *what)
{
if (!up)
- pl->mac_link_dropped = true;
+ pl->link_failed = true;
phylink_run_resolve(pl);
phylink_dbg(pl, "%s link %s\n", what, up ? "up" : "down");
}
@@ -2197,7 +2425,7 @@ void phylink_start(struct phylink *pl)
ASSERT_RTNL();
phylink_info(pl, "configuring for %s/%s link mode\n",
- phylink_an_mode_str(pl->cur_link_an_mode),
+ phylink_an_mode_str(pl->req_link_an_mode),
phy_modes(pl->link_config.interface));
/* Always set the carrier off */
@@ -2268,7 +2496,7 @@ void phylink_stop(struct phylink *pl)
sfp_upstream_stop(pl->sfp_bus);
if (pl->phydev)
phy_stop(pl->phydev);
- del_timer_sync(&pl->link_poll);
+ timer_delete_sync(&pl->link_poll);
if (pl->link_irq) {
free_irq(pl->link_irq, pl);
pl->link_irq = 0;
@@ -2283,6 +2511,81 @@ void phylink_stop(struct phylink *pl)
EXPORT_SYMBOL_GPL(phylink_stop);
/**
+ * phylink_rx_clk_stop_block() - block PHY ability to stop receive clock in LPI
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Disable the PHY's ability to stop the receive clock while the receive path
+ * is in EEE LPI state, until the number of calls to phylink_rx_clk_stop_block()
+ * are balanced by calls to phylink_rx_clk_stop_unblock().
+ */
+void phylink_rx_clk_stop_block(struct phylink *pl)
+{
+ ASSERT_RTNL();
+
+ if (pl->mac_rx_clk_stop_blocked == U8_MAX) {
+ phylink_warn(pl, "%s called too many times - ignoring\n",
+ __func__);
+ dump_stack();
+ return;
+ }
+
+ /* Disable PHY receive clock stop if this is the first time this
+ * function has been called and clock-stop was previously enabled.
+ */
+ if (pl->mac_rx_clk_stop_blocked++ == 0 &&
+ pl->mac_supports_eee_ops && pl->phydev &&
+ pl->config->eee_rx_clk_stop_enable)
+ phy_eee_rx_clock_stop(pl->phydev, false);
+}
+EXPORT_SYMBOL_GPL(phylink_rx_clk_stop_block);
+
+/**
+ * phylink_rx_clk_stop_unblock() - unblock PHY ability to stop receive clock
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * All calls to phylink_rx_clk_stop_block() must be balanced with a
+ * corresponding call to phylink_rx_clk_stop_unblock() to restore the PHYs
+ * ability to stop the receive clock when the receive path is in EEE LPI mode.
+ */
+void phylink_rx_clk_stop_unblock(struct phylink *pl)
+{
+ ASSERT_RTNL();
+
+ if (pl->mac_rx_clk_stop_blocked == 0) {
+ phylink_warn(pl, "%s called too many times - ignoring\n",
+ __func__);
+ dump_stack();
+ return;
+ }
+
+ /* Re-enable PHY receive clock stop if the number of unblocks matches
+ * the number of calls to the block function above.
+ */
+ if (--pl->mac_rx_clk_stop_blocked == 0 &&
+ pl->mac_supports_eee_ops && pl->phydev &&
+ pl->config->eee_rx_clk_stop_enable)
+ phy_eee_rx_clock_stop(pl->phydev, true);
+}
+EXPORT_SYMBOL_GPL(phylink_rx_clk_stop_unblock);
+
+static bool phylink_mac_supports_wol(struct phylink *pl)
+{
+ return !!pl->mac_ops->mac_wol_set;
+}
+
+static bool phylink_phy_supports_wol(struct phylink *pl,
+ struct phy_device *phydev)
+{
+ return phydev && (pl->config->wol_phy_legacy || phy_can_wakeup(phydev));
+}
+
+static bool phylink_phy_pm_speed_ctrl(struct phylink *pl)
+{
+ return pl->config->wol_phy_speed_ctrl && !pl->wolopts_mac &&
+ pl->phydev && phy_may_wakeup(pl->phydev);
+}
+
+/**
* phylink_suspend() - handle a network device suspend event
* @pl: a pointer to a &struct phylink returned from phylink_create()
* @mac_wol: true if the MAC needs to receive packets for Wake-on-Lan
@@ -2295,11 +2598,17 @@ EXPORT_SYMBOL_GPL(phylink_stop);
* can also bring down the link between the MAC and PHY.
* - If Wake-on-Lan is active, but being handled by the MAC, the MAC
* still needs to receive packets, so we can not bring the link down.
+ *
+ * Note: when phylink managed Wake-on-Lan is in use, @mac_wol is ignored.
+ * (struct phylink_mac_ops.mac_set_wol populated.)
*/
void phylink_suspend(struct phylink *pl, bool mac_wol)
{
ASSERT_RTNL();
+ if (phylink_mac_supports_wol(pl))
+ mac_wol = !!pl->wolopts_mac;
+
if (mac_wol && (!pl->netdev || pl->netdev->ethtool->wol_enabled)) {
/* Wake-on-Lan enabled, MAC handling */
mutex_lock(&pl->state_mutex);
@@ -2307,14 +2616,16 @@ void phylink_suspend(struct phylink *pl, bool mac_wol)
/* Stop the resolver bringing the link up */
__set_bit(PHYLINK_DISABLE_MAC_WOL, &pl->phylink_disable_state);
- /* Disable the carrier, to prevent transmit timeouts,
- * but one would hope all packets have been sent. This
- * also means phylink_resolve() will do nothing.
- */
- if (pl->netdev)
- netif_carrier_off(pl->netdev);
- else
+ pl->suspend_link_up = phylink_link_is_up(pl);
+ if (pl->suspend_link_up) {
+ /* Disable the carrier, to prevent transmit timeouts,
+ * but one would hope all packets have been sent. This
+ * also means phylink_resolve() will do nothing.
+ */
+ if (pl->netdev)
+ netif_carrier_off(pl->netdev);
pl->old_link_state = false;
+ }
/* We do not call mac_link_down() here as we want the
* link to remain up to receive the WoL packets.
@@ -2323,10 +2634,38 @@ void phylink_suspend(struct phylink *pl, bool mac_wol)
} else {
phylink_stop(pl);
}
+
+ if (phylink_phy_pm_speed_ctrl(pl))
+ phylink_speed_down(pl, false);
}
EXPORT_SYMBOL_GPL(phylink_suspend);
/**
+ * phylink_prepare_resume() - prepare to resume a network device
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Optional, but if called must be called prior to phylink_resume().
+ *
+ * Prepare to resume a network device, preparing the PHY as necessary.
+ */
+void phylink_prepare_resume(struct phylink *pl)
+{
+ struct phy_device *phydev = pl->phydev;
+
+ ASSERT_RTNL();
+
+ /* IEEE 802.3 22.2.4.1.5 allows PHYs to stop their receive clock
+ * when PDOWN is set. However, some MACs require RXC to be running
+ * in order to resume. If the MAC requires RXC, and we have a PHY,
+ * then resume the PHY. Note that 802.3 allows PHYs 500ms before
+ * the clock meets requirements. We do not implement this delay.
+ */
+ if (pl->config->mac_requires_rxc && phydev && phydev->suspended)
+ phy_resume(phydev);
+}
+EXPORT_SYMBOL_GPL(phylink_prepare_resume);
+
+/**
* phylink_resume() - handle a network device resume event
* @pl: a pointer to a &struct phylink returned from phylink_create()
*
@@ -2337,18 +2676,24 @@ void phylink_resume(struct phylink *pl)
{
ASSERT_RTNL();
+ if (phylink_phy_pm_speed_ctrl(pl))
+ phylink_speed_up(pl);
+
if (test_bit(PHYLINK_DISABLE_MAC_WOL, &pl->phylink_disable_state)) {
/* Wake-on-Lan enabled, MAC handling */
- /* Call mac_link_down() so we keep the overall state balanced.
- * Do this under the state_mutex lock for consistency. This
- * will cause a "Link Down" message to be printed during
- * resume, which is harmless - the true link state will be
- * printed when we run a resolve.
- */
- mutex_lock(&pl->state_mutex);
- phylink_link_down(pl);
- mutex_unlock(&pl->state_mutex);
+ if (pl->suspend_link_up) {
+ /* Call mac_link_down() so we keep the overall state
+ * balanced. Do this under the state_mutex lock for
+ * consistency. This will cause a "Link Down" message
+ * to be printed during resume, which is harmless -
+ * the true link state will be printed when we run a
+ * resolve.
+ */
+ mutex_lock(&pl->state_mutex);
+ phylink_link_down(pl);
+ mutex_unlock(&pl->state_mutex);
+ }
/* Re-apply the link parameters so that all the settings get
* restored to the MAC.
@@ -2379,8 +2724,24 @@ void phylink_ethtool_get_wol(struct phylink *pl, struct ethtool_wolinfo *wol)
wol->supported = 0;
wol->wolopts = 0;
- if (pl->phydev)
- phy_ethtool_get_wol(pl->phydev, wol);
+ if (phylink_mac_supports_wol(pl)) {
+ if (phylink_phy_supports_wol(pl, pl->phydev))
+ phy_ethtool_get_wol(pl->phydev, wol);
+
+ /* Where the MAC augments the WoL support, merge its support and
+ * current configuration.
+ */
+ if (~wol->wolopts & pl->wolopts_mac & WAKE_MAGICSECURE)
+ memcpy(wol->sopass, pl->wol_sopass,
+ sizeof(wol->sopass));
+
+ wol->supported |= pl->config->wol_mac_support;
+ wol->wolopts |= pl->wolopts_mac;
+ } else {
+ /* Legacy */
+ if (pl->phydev)
+ phy_ethtool_get_wol(pl->phydev, wol);
+ }
}
EXPORT_SYMBOL_GPL(phylink_ethtool_get_wol);
@@ -2397,12 +2758,48 @@ EXPORT_SYMBOL_GPL(phylink_ethtool_get_wol);
*/
int phylink_ethtool_set_wol(struct phylink *pl, struct ethtool_wolinfo *wol)
{
+ struct ethtool_wolinfo w = { .cmd = ETHTOOL_GWOL };
int ret = -EOPNOTSUPP;
+ bool changed;
+ u32 wolopts;
ASSERT_RTNL();
- if (pl->phydev)
- ret = phy_ethtool_set_wol(pl->phydev, wol);
+ if (phylink_mac_supports_wol(pl)) {
+ wolopts = wol->wolopts;
+
+ if (phylink_phy_supports_wol(pl, pl->phydev)) {
+ ret = phy_ethtool_set_wol(pl->phydev, wol);
+ if (ret != 0 && ret != -EOPNOTSUPP)
+ return ret;
+
+ phy_ethtool_get_wol(pl->phydev, &w);
+
+ /* Any Wake-on-Lan modes which the PHY is handling
+ * should not be passed on to the MAC.
+ */
+ wolopts &= ~w.wolopts;
+ }
+
+ wolopts &= pl->config->wol_mac_support;
+ changed = pl->wolopts_mac != wolopts;
+ if (wolopts & WAKE_MAGICSECURE)
+ changed |= !!memcmp(wol->sopass, pl->wol_sopass,
+ sizeof(wol->sopass));
+ memcpy(pl->wol_sopass, wol->sopass, sizeof(pl->wol_sopass));
+
+ if (changed) {
+ ret = pl->mac_ops->mac_wol_set(pl->config, wolopts,
+ wol->sopass);
+ if (!ret)
+ pl->wolopts_mac = wolopts;
+ } else {
+ ret = 0;
+ }
+ } else {
+ if (pl->phydev)
+ ret = phy_ethtool_set_wol(pl->phydev, wol);
+ }
return ret;
}
@@ -2434,6 +2831,39 @@ static phy_interface_t phylink_sfp_select_interface(struct phylink *pl,
return interface;
}
+static phy_interface_t phylink_sfp_select_interface_speed(struct phylink *pl,
+ u32 speed)
+{
+ phy_interface_t best_interface = PHY_INTERFACE_MODE_NA;
+ phy_interface_t interface;
+ u32 max_speed;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(phylink_sfp_interface_preference); i++) {
+ interface = phylink_sfp_interface_preference[i];
+ if (!test_bit(interface, pl->sfp_interfaces))
+ continue;
+
+ max_speed = phylink_interface_max_speed(interface);
+
+ /* The logic here is: if speed == max_speed, then we've found
+ * the best interface. Otherwise we find the interface that
+ * can just support the requested speed.
+ */
+ if (max_speed >= speed)
+ best_interface = interface;
+
+ if (max_speed <= speed)
+ break;
+ }
+
+ if (best_interface == PHY_INTERFACE_MODE_NA)
+ phylink_err(pl, "selection of interface failed, speed %u\n",
+ speed);
+
+ return best_interface;
+}
+
static void phylink_merge_link_mode(unsigned long *dst, const unsigned long *b)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask);
@@ -2482,7 +2912,7 @@ int phylink_ethtool_ksettings_get(struct phylink *pl,
linkmode_copy(kset->link_modes.supported, pl->supported);
- switch (pl->cur_link_an_mode) {
+ switch (pl->act_link_an_mode) {
case MLO_AN_FIXED:
/* We are using fixed settings. Report these as the
* current link settings - and note that these also
@@ -2513,6 +2943,26 @@ int phylink_ethtool_ksettings_get(struct phylink *pl,
}
EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_get);
+static bool phylink_validate_pcs_inband_autoneg(struct phylink *pl,
+ phy_interface_t interface,
+ unsigned long *adv)
+{
+ unsigned int inband = phylink_inband_caps(pl, interface);
+ unsigned int mask;
+
+ /* If the PCS doesn't implement inband support, be permissive. */
+ if (!inband)
+ return true;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, adv))
+ mask = LINK_INBAND_ENABLE;
+ else
+ mask = LINK_INBAND_DISABLE;
+
+ /* Check whether the PCS implements the required mode */
+ return !!(inband & mask);
+}
+
/**
* phylink_ethtool_ksettings_set() - set the link settings
* @pl: a pointer to a &struct phylink returned from phylink_create()
@@ -2522,8 +2972,8 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
const struct ethtool_link_ksettings *kset)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(support);
+ const struct link_capabilities *c;
struct phylink_link_state config;
- const struct phy_setting *s;
ASSERT_RTNL();
@@ -2566,23 +3016,23 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
/* Autonegotiation disabled, select a suitable speed and
* duplex.
*/
- s = phy_lookup_setting(kset->base.speed, kset->base.duplex,
- pl->supported, false);
- if (!s)
+ c = phy_caps_lookup(kset->base.speed, kset->base.duplex,
+ pl->supported, false);
+ if (!c)
return -EINVAL;
/* If we have a fixed link, refuse to change link parameters.
* If the link parameters match, accept them but do nothing.
*/
- if (pl->cur_link_an_mode == MLO_AN_FIXED) {
- if (s->speed != pl->link_config.speed ||
- s->duplex != pl->link_config.duplex)
+ if (pl->req_link_an_mode == MLO_AN_FIXED) {
+ if (c->speed != pl->link_config.speed ||
+ c->duplex != pl->link_config.duplex)
return -EINVAL;
return 0;
}
- config.speed = s->speed;
- config.duplex = s->duplex;
+ config.speed = c->speed;
+ config.duplex = c->duplex;
break;
case AUTONEG_ENABLE:
@@ -2590,7 +3040,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
* is our default case) but do not allow the advertisement to
* be changed. If the advertisement matches, simply return.
*/
- if (pl->cur_link_an_mode == MLO_AN_FIXED) {
+ if (pl->req_link_an_mode == MLO_AN_FIXED) {
if (!linkmode_equal(config.advertising,
pl->link_config.advertising))
return -EINVAL;
@@ -2616,8 +3066,14 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
* link can be configured correctly.
*/
if (pl->sfp_bus) {
- config.interface = phylink_sfp_select_interface(pl,
+ if (kset->base.autoneg == AUTONEG_ENABLE)
+ config.interface =
+ phylink_sfp_select_interface(pl,
config.advertising);
+ else
+ config.interface =
+ phylink_sfp_select_interface_speed(pl,
+ config.speed);
if (config.interface == PHY_INTERFACE_MODE_NA)
return -EINVAL;
@@ -2625,7 +3081,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
linkmode_copy(support, pl->supported);
if (phylink_validate(pl, support, &config)) {
phylink_err(pl, "validation of %s/%s with support %*pb failed\n",
- phylink_an_mode_str(pl->cur_link_an_mode),
+ phylink_an_mode_str(pl->req_link_an_mode),
phy_modes(config.interface),
__ETHTOOL_LINK_MODE_MASK_NBITS, support);
return -EINVAL;
@@ -2643,6 +3099,13 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
phylink_is_empty_linkmode(config.advertising))
return -EINVAL;
+ /* Validate the autonegotiation state. We don't have a PHY in this
+ * situation, so the PCS is the media-facing entity.
+ */
+ if (!phylink_validate_pcs_inband_autoneg(pl, config.interface,
+ config.advertising))
+ return -EINVAL;
+
mutex_lock(&pl->state_mutex);
pl->link_config.speed = config.speed;
pl->link_config.duplex = config.duplex;
@@ -2725,7 +3188,7 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
ASSERT_RTNL();
- if (pl->cur_link_an_mode == MLO_AN_FIXED)
+ if (pl->req_link_an_mode == MLO_AN_FIXED)
return -EOPNOTSUPP;
if (!phylink_test(pl->supported, Pause) &&
@@ -2789,7 +3252,7 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
* link will cycle.
*/
if (manual_changed) {
- pl->mac_link_dropped = true;
+ pl->link_failed = true;
phylink_run_resolve(pl);
}
@@ -2821,24 +3284,6 @@ int phylink_get_eee_err(struct phylink *pl)
EXPORT_SYMBOL_GPL(phylink_get_eee_err);
/**
- * phylink_init_eee() - init and check the EEE features
- * @pl: a pointer to a &struct phylink returned from phylink_create()
- * @clk_stop_enable: allow PHY to stop receive clock
- *
- * Must be called either with RTNL held or within mac_link_up()
- */
-int phylink_init_eee(struct phylink *pl, bool clk_stop_enable)
-{
- int ret = -EOPNOTSUPP;
-
- if (pl->phydev)
- ret = phy_init_eee(pl->phydev, clk_stop_enable);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(phylink_init_eee);
-
-/**
* phylink_ethtool_get_eee() - read the energy efficient ethernet parameters
* @pl: a pointer to a &struct phylink returned from phylink_create()
* @eee: a pointer to a &struct ethtool_keee for the read parameters
@@ -2849,8 +3294,16 @@ int phylink_ethtool_get_eee(struct phylink *pl, struct ethtool_keee *eee)
ASSERT_RTNL();
- if (pl->phydev)
+ if (pl->mac_supports_eee_ops && !pl->mac_supports_eee)
+ return ret;
+
+ if (pl->phydev) {
ret = phy_ethtool_get_eee(pl->phydev, eee);
+ /* Restrict supported linkmode mask */
+ if (ret == 0 && pl->mac_supports_eee_ops)
+ linkmode_and(eee->supported, eee->supported,
+ pl->supported_lpi);
+ }
return ret;
}
@@ -2863,12 +3316,29 @@ EXPORT_SYMBOL_GPL(phylink_ethtool_get_eee);
*/
int phylink_ethtool_set_eee(struct phylink *pl, struct ethtool_keee *eee)
{
+ bool mac_eee = pl->mac_supports_eee;
int ret = -EOPNOTSUPP;
ASSERT_RTNL();
- if (pl->phydev)
+ phylink_dbg(pl, "mac %s phylink EEE%s, adv %*pbl, LPI%s timer %uus\n",
+ mac_eee ? "supports" : "does not support",
+ eee->eee_enabled ? ", enabled" : "",
+ __ETHTOOL_LINK_MODE_MASK_NBITS, eee->advertised,
+ eee->tx_lpi_enabled ? " enabled" : "", eee->tx_lpi_timer);
+
+ if (pl->mac_supports_eee_ops && !mac_eee)
+ return ret;
+
+ if (pl->phydev) {
+ /* Restrict advertisement mask */
+ if (pl->mac_supports_eee_ops)
+ linkmode_and(eee->advertised, eee->advertised,
+ pl->supported_lpi);
ret = phy_ethtool_set_eee(pl->phydev, eee);
+ if (ret == 0)
+ eee_to_eeecfg(&pl->eee_cfg, eee);
+ }
return ret;
}
@@ -2989,7 +3459,7 @@ static int phylink_mii_read(struct phylink *pl, unsigned int phy_id,
struct phylink_link_state state;
int val = 0xffff;
- switch (pl->cur_link_an_mode) {
+ switch (pl->act_link_an_mode) {
case MLO_AN_FIXED:
if (phy_id == 0) {
phylink_get_fixed_state(pl, &state);
@@ -3014,7 +3484,7 @@ static int phylink_mii_read(struct phylink *pl, unsigned int phy_id,
static int phylink_mii_write(struct phylink *pl, unsigned int phy_id,
unsigned int reg, unsigned int val)
{
- switch (pl->cur_link_an_mode) {
+ switch (pl->act_link_an_mode) {
case MLO_AN_FIXED:
break;
@@ -3184,11 +3654,11 @@ static phy_interface_t phylink_choose_sfp_interface(struct phylink *pl,
return interface;
}
-static void phylink_sfp_set_config(struct phylink *pl, u8 mode,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void phylink_sfp_set_config(struct phylink *pl, unsigned long *supported,
+ struct phylink_link_state *state,
+ bool changed)
{
- bool changed = false;
+ u8 mode = MLO_AN_INBAND;
phylink_dbg(pl, "requesting link mode %s/%s with support %*pb\n",
phylink_an_mode_str(mode), phy_modes(state->interface),
@@ -3204,9 +3674,9 @@ static void phylink_sfp_set_config(struct phylink *pl, u8 mode,
changed = true;
}
- if (pl->cur_link_an_mode != mode ||
+ if (pl->req_link_an_mode != mode ||
pl->link_config.interface != state->interface) {
- pl->cur_link_an_mode = mode;
+ pl->req_link_an_mode = mode;
pl->link_config.interface = state->interface;
changed = true;
@@ -3221,13 +3691,14 @@ static void phylink_sfp_set_config(struct phylink *pl, u8 mode,
phylink_mac_initial_config(pl, false);
}
-static int phylink_sfp_config_phy(struct phylink *pl, u8 mode,
- struct phy_device *phy)
+static int phylink_sfp_config_phy(struct phylink *pl, struct phy_device *phy)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(support);
struct phylink_link_state config;
int ret;
+ /* We're not using pl->sfp_interfaces, so clear it. */
+ phy_interface_zero(pl->sfp_interfaces);
linkmode_copy(support, phy->supported);
memset(&config, 0, sizeof(config));
@@ -3266,7 +3737,7 @@ static int phylink_sfp_config_phy(struct phylink *pl, u8 mode,
pl->link_port = pl->sfp_port;
- phylink_sfp_set_config(pl, mode, support, &config);
+ phylink_sfp_set_config(pl, support, &config, true);
return 0;
}
@@ -3274,8 +3745,8 @@ static int phylink_sfp_config_phy(struct phylink *pl, u8 mode,
static int phylink_sfp_config_optical(struct phylink *pl)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(support);
- DECLARE_PHY_INTERFACE_MASK(interfaces);
struct phylink_link_state config;
+ enum inband_type inband_type;
phy_interface_t interface;
int ret;
@@ -3288,9 +3759,9 @@ static int phylink_sfp_config_optical(struct phylink *pl)
/* Find the union of the supported interfaces by the PCS/MAC and
* the SFP module.
*/
- phy_interface_and(interfaces, pl->config->supported_interfaces,
+ phy_interface_and(pl->sfp_interfaces, pl->config->supported_interfaces,
pl->sfp_interfaces);
- if (phy_interface_empty(interfaces)) {
+ if (phy_interface_empty(pl->sfp_interfaces)) {
phylink_err(pl, "unsupported SFP module: no common interface modes\n");
return -EINVAL;
}
@@ -3306,14 +3777,14 @@ static int phylink_sfp_config_optical(struct phylink *pl)
* mask to only those link modes that can be supported.
*/
ret = phylink_validate_mask(pl, NULL, pl->sfp_support, &config,
- interfaces);
+ pl->sfp_interfaces);
if (ret) {
phylink_err(pl, "unsupported SFP module: validation with support %*pb failed\n",
__ETHTOOL_LINK_MODE_MASK_NBITS, support);
return ret;
}
- interface = phylink_choose_sfp_interface(pl, interfaces);
+ interface = phylink_choose_sfp_interface(pl, pl->sfp_interfaces);
if (interface == PHY_INTERFACE_MODE_NA) {
phylink_err(pl, "failed to select SFP interface\n");
return -EINVAL;
@@ -3322,6 +3793,29 @@ static int phylink_sfp_config_optical(struct phylink *pl)
phylink_dbg(pl, "optical SFP: chosen %s interface\n",
phy_modes(interface));
+ inband_type = phylink_get_inband_type(interface);
+ if (inband_type == INBAND_NONE) {
+ /* If this is the sole interface, and there is no inband
+ * support, clear the advertising mask and Autoneg bit in
+ * the support mask. Otherwise, just clear the Autoneg bit
+ * in the advertising mask.
+ */
+ if (phy_interface_weight(pl->sfp_interfaces) == 1) {
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ pl->sfp_support);
+ linkmode_zero(config.advertising);
+ } else {
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ config.advertising);
+ }
+ }
+
+ if (!phylink_validate_pcs_inband_autoneg(pl, interface,
+ config.advertising)) {
+ phylink_err(pl, "autoneg setting not compatible with PCS");
+ return -EINVAL;
+ }
+
config.interface = interface;
/* Ignore errors if we're expecting a PHY to attach later */
@@ -3335,7 +3829,7 @@ static int phylink_sfp_config_optical(struct phylink *pl)
pl->link_port = pl->sfp_port;
- phylink_sfp_set_config(pl, MLO_AN_INBAND, pl->sfp_support, &config);
+ phylink_sfp_set_config(pl, pl->sfp_support, &config, false);
return 0;
}
@@ -3343,23 +3837,31 @@ static int phylink_sfp_config_optical(struct phylink *pl)
static int phylink_sfp_module_insert(void *upstream,
const struct sfp_eeprom_id *id)
{
+ const struct sfp_module_caps *caps;
struct phylink *pl = upstream;
ASSERT_RTNL();
- linkmode_zero(pl->sfp_support);
- phy_interface_zero(pl->sfp_interfaces);
- sfp_parse_support(pl->sfp_bus, id, pl->sfp_support, pl->sfp_interfaces);
- pl->sfp_port = sfp_parse_port(pl->sfp_bus, id, pl->sfp_support);
+ caps = sfp_get_module_caps(pl->sfp_bus);
+ phy_interface_copy(pl->sfp_interfaces, caps->interfaces);
+ linkmode_copy(pl->sfp_support, caps->link_modes);
+ pl->sfp_may_have_phy = caps->may_have_phy;
+ pl->sfp_port = caps->port;
/* If this module may have a PHY connecting later, defer until later */
- pl->sfp_may_have_phy = sfp_may_have_phy(pl->sfp_bus, id);
if (pl->sfp_may_have_phy)
return 0;
return phylink_sfp_config_optical(pl);
}
+static void phylink_sfp_module_remove(void *upstream)
+{
+ struct phylink *pl = upstream;
+
+ phy_interface_zero(pl->sfp_interfaces);
+}
+
static int phylink_sfp_module_start(void *upstream)
{
struct phylink *pl = upstream;
@@ -3406,19 +3908,16 @@ static void phylink_sfp_link_up(void *upstream)
phylink_enable_and_run_resolve(pl, PHYLINK_DISABLE_LINK);
}
-/* The Broadcom BCM84881 in the Methode DM7052 is unable to provide a SGMII
- * or 802.3z control word, so inband will not work.
- */
-static bool phylink_phy_no_inband(struct phy_device *phy)
-{
- return phy->is_c45 && phy_id_compare(phy->c45_ids.device_ids[1],
- 0xae025150, 0xfffffff0);
-}
-
static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
{
struct phylink *pl = upstream;
- u8 mode;
+
+ if (!phy->drv) {
+ phylink_err(pl, "PHY %s (id 0x%.8lx) has no driver loaded\n",
+ phydev_name(phy), (unsigned long)phy->phy_id);
+ phylink_err(pl, "Drivers which handle known common cases: CONFIG_BCM84881_PHY, CONFIG_MARVELL_PHY\n");
+ return -EINVAL;
+ }
/*
* This is the new way of dealing with flow control for PHYs,
@@ -3429,17 +3928,12 @@ static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
*/
phy_support_asym_pause(phy);
- if (phylink_phy_no_inband(phy))
- mode = MLO_AN_PHY;
- else
- mode = MLO_AN_INBAND;
-
/* Set the PHY's host supported interfaces */
phy_interface_and(phy->host_interfaces, phylink_sfp_interfaces,
pl->config->supported_interfaces);
/* Do the initial configuration */
- return phylink_sfp_config_phy(pl, mode, phy);
+ return phylink_sfp_config_phy(pl, phy);
}
static void phylink_sfp_disconnect_phy(void *upstream,
@@ -3452,6 +3946,7 @@ static const struct sfp_upstream_ops sfp_phylink_ops = {
.attach = phylink_sfp_attach,
.detach = phylink_sfp_detach,
.module_insert = phylink_sfp_module_insert,
+ .module_remove = phylink_sfp_module_remove,
.module_start = phylink_sfp_module_start,
.module_stop = phylink_sfp_module_stop,
.link_up = phylink_sfp_link_up,
@@ -3634,6 +4129,7 @@ static void phylink_decode_usgmii_word(struct phylink_link_state *state,
/**
* phylink_mii_c22_pcs_decode_state() - Decode MAC PCS state from MII registers
* @state: a pointer to a &struct phylink_link_state.
+ * @neg_mode: link negotiation mode (PHYLINK_PCS_NEG_xxx)
* @bmsr: The value of the %MII_BMSR register
* @lpa: The value of the %MII_LPA register
*
@@ -3646,32 +4142,45 @@ static void phylink_decode_usgmii_word(struct phylink_link_state *state,
* accessing @bmsr and @lpa cannot be done with MDIO directly.
*/
void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state,
- u16 bmsr, u16 lpa)
+ unsigned int neg_mode, u16 bmsr, u16 lpa)
{
state->link = !!(bmsr & BMSR_LSTATUS);
state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
- /* If there is no link or autonegotiation is disabled, the LP advertisement
- * data is not meaningful, so don't go any further.
- */
- if (!state->link || !linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- state->advertising))
+
+ /* If the link is down, the advertisement data is undefined. */
+ if (!state->link)
return;
switch (state->interface) {
case PHY_INTERFACE_MODE_1000BASEX:
- phylink_decode_c37_word(state, lpa, SPEED_1000);
+ if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) {
+ phylink_decode_c37_word(state, lpa, SPEED_1000);
+ } else {
+ state->speed = SPEED_1000;
+ state->duplex = DUPLEX_FULL;
+ state->pause |= MLO_PAUSE_TX | MLO_PAUSE_RX;
+ }
break;
case PHY_INTERFACE_MODE_2500BASEX:
- phylink_decode_c37_word(state, lpa, SPEED_2500);
+ if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) {
+ phylink_decode_c37_word(state, lpa, SPEED_2500);
+ } else {
+ state->speed = SPEED_2500;
+ state->duplex = DUPLEX_FULL;
+ state->pause |= MLO_PAUSE_TX | MLO_PAUSE_RX;
+ }
break;
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_QSGMII:
- phylink_decode_sgmii_word(state, lpa);
+ if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
+ phylink_decode_sgmii_word(state, lpa);
break;
+
case PHY_INTERFACE_MODE_QUSGMII:
- phylink_decode_usgmii_word(state, lpa);
+ if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
+ phylink_decode_usgmii_word(state, lpa);
break;
default:
@@ -3684,6 +4193,7 @@ EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_decode_state);
/**
* phylink_mii_c22_pcs_get_state() - read the MAC PCS state
* @pcs: a pointer to a &struct mdio_device.
+ * @neg_mode: link negotiation mode (PHYLINK_PCS_NEG_xxx)
* @state: a pointer to a &struct phylink_link_state.
*
* Helper for MAC PCS supporting the 802.3 clause 22 register set for
@@ -3696,6 +4206,7 @@ EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_decode_state);
* structure.
*/
void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
int bmsr, lpa;
@@ -3707,7 +4218,7 @@ void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
return;
}
- phylink_mii_c22_pcs_decode_state(state, bmsr, lpa);
+ phylink_mii_c22_pcs_decode_state(state, neg_mode, bmsr, lpa);
}
EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_get_state);
diff --git a/drivers/net/phy/qcom/Kconfig b/drivers/net/phy/qcom/Kconfig
index 570626cc8e14..06e8430c13b1 100644
--- a/drivers/net/phy/qcom/Kconfig
+++ b/drivers/net/phy/qcom/Kconfig
@@ -7,7 +7,7 @@ config AT803X_PHY
select QCOM_NET_PHYLIB
depends on REGULATOR
help
- Currently supports the AR8030, AR8031, AR8033, AR8035 model
+ Currently supports the AR8030, AR8031, AR8033, AR8035, IPQ5018 model
config QCA83XX_PHY
tristate "Qualcomm Atheros QCA833x PHYs"
@@ -24,6 +24,7 @@ config QCA808X_PHY
config QCA807X_PHY
tristate "Qualcomm QCA807x PHYs"
select QCOM_NET_PHYLIB
+ select PHY_PACKAGE
depends on OF_MDIO
help
Currently supports the Qualcomm QCA8072, QCA8075 and the PSGMII
diff --git a/drivers/net/phy/qcom/at803x.c b/drivers/net/phy/qcom/at803x.c
index 105602581a03..338acd11a9b6 100644
--- a/drivers/net/phy/qcom/at803x.c
+++ b/drivers/net/phy/qcom/at803x.c
@@ -19,6 +19,7 @@
#include <linux/regulator/consumer.h>
#include <linux/of.h>
#include <linux/phylink.h>
+#include <linux/reset.h>
#include <linux/sfp.h>
#include <dt-bindings/net/qca-ar803x.h>
@@ -26,9 +27,6 @@
#define AT803X_LED_CONTROL 0x18
-#define AT803X_PHY_MMD3_WOL_CTRL 0x8012
-#define AT803X_WOL_EN BIT(5)
-
#define AT803X_REG_CHIP_CONFIG 0x1f
#define AT803X_BT_BX_REG_SEL 0x8000
@@ -96,6 +94,8 @@
#define ATH8035_PHY_ID 0x004dd072
#define AT8030_PHY_ID_MASK 0xffffffef
+#define IPQ5018_PHY_ID 0x004dd0c0
+
#define QCA9561_PHY_ID 0x004dd042
#define AT803X_PAGE_FIBER 0
@@ -108,6 +108,48 @@
/* disable hibernation mode */
#define AT803X_DISABLE_HIBERNATION_MODE BIT(2)
+#define IPQ5018_PHY_FIFO_CONTROL 0x19
+#define IPQ5018_PHY_FIFO_RESET GENMASK(1, 0)
+
+#define IPQ5018_PHY_DEBUG_EDAC 0x4380
+#define IPQ5018_PHY_MMD1_MDAC 0x8100
+#define IPQ5018_PHY_DAC_MASK GENMASK(15, 8)
+
+/* MDAC and EDAC values for short cable length */
+#define IPQ5018_PHY_DEBUG_EDAC_VAL 0x10
+#define IPQ5018_PHY_MMD1_MDAC_VAL 0x10
+
+#define IPQ5018_PHY_MMD1_MSE_THRESH1 0x1000
+#define IPQ5018_PHY_MMD1_MSE_THRESH2 0x1001
+#define IPQ5018_PHY_PCS_EEE_TX_TIMER 0x8008
+#define IPQ5018_PHY_PCS_EEE_RX_TIMER 0x8009
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL3 0x8074
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL4 0x8075
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL5 0x8076
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL6 0x8077
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL7 0x8078
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL9 0x807a
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL13 0x807e
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL14 0x807f
+
+#define IPQ5018_PHY_MMD1_MSE_THRESH1_VAL 0xf1
+#define IPQ5018_PHY_MMD1_MSE_THRESH2_VAL 0x1f6
+#define IPQ5018_PHY_PCS_EEE_TX_TIMER_VAL 0x7880
+#define IPQ5018_PHY_PCS_EEE_RX_TIMER_VAL 0xc8
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL3_VAL 0xc040
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL4_VAL 0xa060
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL5_VAL 0xc040
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL6_VAL 0xa060
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL7_VAL 0xc24c
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL9_VAL 0xc060
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL13_VAL 0xb060
+#define IPQ5018_PHY_PCS_NEAR_ECHO_THRESH_VAL 0x90b0
+
+#define IPQ5018_PHY_DEBUG_ANA_LDO_EFUSE 0x1
+#define IPQ5018_PHY_DEBUG_ANA_LDO_EFUSE_MASK GENMASK(7, 4)
+#define IPQ5018_PHY_DEBUG_ANA_LDO_EFUSE_DEFAULT 0x50
+#define IPQ5018_PHY_DEBUG_ANA_DAC_FILTER 0xa080
+
MODULE_DESCRIPTION("Qualcomm Atheros AR803x PHY driver");
MODULE_AUTHOR("Matus Ujhelyi");
MODULE_LICENSE("GPL");
@@ -133,6 +175,11 @@ struct at803x_context {
u16 led_control;
};
+struct ipq5018_priv {
+ struct reset_control *rst;
+ bool set_short_cable_dac;
+};
+
static int at803x_write_page(struct phy_device *phydev, int page)
{
int mask;
@@ -724,10 +771,10 @@ static int at8031_register_regulators(struct phy_device *phydev)
static int at8031_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
{
- struct phy_device *phydev = upstream;
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support);
__ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
- DECLARE_PHY_INTERFACE_MASK(interfaces);
+ struct phy_device *phydev = upstream;
+ const struct sfp_module_caps *caps;
phy_interface_t iface;
linkmode_zero(phy_support);
@@ -737,12 +784,11 @@ static int at8031_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
phylink_set(phy_support, Pause);
phylink_set(phy_support, Asym_Pause);
- linkmode_zero(sfp_support);
- sfp_parse_support(phydev->sfp_bus, id, sfp_support, interfaces);
+ caps = sfp_get_module_caps(phydev->sfp_bus);
/* Some modules support 10G modes as well as others we support.
* Mask out non-supported modes so the correct interface is picked.
*/
- linkmode_and(sfp_support, phy_support, sfp_support);
+ linkmode_and(sfp_support, phy_support, caps->link_modes);
if (linkmode_empty(sfp_support)) {
dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
@@ -866,30 +912,6 @@ static int at8031_config_init(struct phy_device *phydev)
return at803x_config_init(phydev);
}
-static int at8031_set_wol(struct phy_device *phydev,
- struct ethtool_wolinfo *wol)
-{
- int ret;
-
- /* First setup MAC address and enable WOL interrupt */
- ret = at803x_set_wol(phydev, wol);
- if (ret)
- return ret;
-
- if (wol->wolopts & WAKE_MAGIC)
- /* Enable WOL function for 1588 */
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
- AT803X_PHY_MMD3_WOL_CTRL,
- 0, AT803X_WOL_EN);
- else
- /* Disable WoL function for 1588 */
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
- AT803X_PHY_MMD3_WOL_CTRL,
- AT803X_WOL_EN, 0);
-
- return ret;
-}
-
static int at8031_config_intr(struct phy_device *phydev)
{
struct at803x_priv *priv = phydev->priv;
@@ -987,6 +1009,109 @@ static int at8035_probe(struct phy_device *phydev)
return at8035_parse_dt(phydev);
}
+static int ipq5018_cable_test_start(struct phy_device *phydev)
+{
+ phy_write_mmd(phydev, MDIO_MMD_PCS, IPQ5018_PHY_PCS_CDT_THRESH_CTRL3,
+ IPQ5018_PHY_PCS_CDT_THRESH_CTRL3_VAL);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, IPQ5018_PHY_PCS_CDT_THRESH_CTRL4,
+ IPQ5018_PHY_PCS_CDT_THRESH_CTRL4_VAL);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, IPQ5018_PHY_PCS_CDT_THRESH_CTRL5,
+ IPQ5018_PHY_PCS_CDT_THRESH_CTRL5_VAL);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, IPQ5018_PHY_PCS_CDT_THRESH_CTRL6,
+ IPQ5018_PHY_PCS_CDT_THRESH_CTRL6_VAL);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, IPQ5018_PHY_PCS_CDT_THRESH_CTRL7,
+ IPQ5018_PHY_PCS_CDT_THRESH_CTRL7_VAL);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, IPQ5018_PHY_PCS_CDT_THRESH_CTRL9,
+ IPQ5018_PHY_PCS_CDT_THRESH_CTRL9_VAL);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, IPQ5018_PHY_PCS_CDT_THRESH_CTRL13,
+ IPQ5018_PHY_PCS_CDT_THRESH_CTRL13_VAL);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, IPQ5018_PHY_PCS_CDT_THRESH_CTRL3,
+ IPQ5018_PHY_PCS_NEAR_ECHO_THRESH_VAL);
+
+ /* we do all the (time consuming) work later */
+ return 0;
+}
+
+static int ipq5018_config_init(struct phy_device *phydev)
+{
+ struct ipq5018_priv *priv = phydev->priv;
+ u16 val;
+
+ /*
+ * set LDO efuse: first temporarily store ANA_DAC_FILTER value from
+ * debug register as it will be reset once the ANA_LDO_EFUSE register
+ * is written to
+ */
+ val = at803x_debug_reg_read(phydev, IPQ5018_PHY_DEBUG_ANA_DAC_FILTER);
+ at803x_debug_reg_mask(phydev, IPQ5018_PHY_DEBUG_ANA_LDO_EFUSE,
+ IPQ5018_PHY_DEBUG_ANA_LDO_EFUSE_MASK,
+ IPQ5018_PHY_DEBUG_ANA_LDO_EFUSE_DEFAULT);
+ at803x_debug_reg_write(phydev, IPQ5018_PHY_DEBUG_ANA_DAC_FILTER, val);
+
+ /* set 8023AZ EEE TX and RX timer values */
+ phy_write_mmd(phydev, MDIO_MMD_PCS, IPQ5018_PHY_PCS_EEE_TX_TIMER,
+ IPQ5018_PHY_PCS_EEE_TX_TIMER_VAL);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, IPQ5018_PHY_PCS_EEE_RX_TIMER,
+ IPQ5018_PHY_PCS_EEE_RX_TIMER_VAL);
+
+ /* set MSE threshold values */
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, IPQ5018_PHY_MMD1_MSE_THRESH1,
+ IPQ5018_PHY_MMD1_MSE_THRESH1_VAL);
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, IPQ5018_PHY_MMD1_MSE_THRESH2,
+ IPQ5018_PHY_MMD1_MSE_THRESH2_VAL);
+
+ /* PHY DAC values are optional and only set in a PHY to PHY link architecture */
+ if (priv->set_short_cable_dac) {
+ /* setting MDAC (Multi-level Digital-to-Analog Converter) in MMD1 */
+ phy_modify_mmd(phydev, MDIO_MMD_PMAPMD, IPQ5018_PHY_MMD1_MDAC,
+ IPQ5018_PHY_DAC_MASK, IPQ5018_PHY_MMD1_MDAC_VAL);
+
+ /* setting EDAC (Error-detection and Correction) in debug register */
+ at803x_debug_reg_mask(phydev, IPQ5018_PHY_DEBUG_EDAC,
+ IPQ5018_PHY_DAC_MASK, IPQ5018_PHY_DEBUG_EDAC_VAL);
+ }
+
+ return 0;
+}
+
+static void ipq5018_link_change_notify(struct phy_device *phydev)
+{
+ /*
+ * Reset the FIFO buffer upon link disconnects to clear any residual data
+ * which may cause issues with the FIFO which it cannot recover from.
+ */
+ mdiobus_modify_changed(phydev->mdio.bus, phydev->mdio.addr,
+ IPQ5018_PHY_FIFO_CONTROL, IPQ5018_PHY_FIFO_RESET,
+ phydev->link ? IPQ5018_PHY_FIFO_RESET : 0);
+}
+
+static int ipq5018_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct ipq5018_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->set_short_cable_dac = of_property_read_bool(dev->of_node,
+ "qcom,dac-preset-short-cable");
+
+ priv->rst = devm_reset_control_array_get_exclusive(dev);
+ if (IS_ERR(priv->rst))
+ return dev_err_probe(dev, PTR_ERR(priv->rst),
+ "failed to acquire reset\n");
+
+ ret = reset_control_reset(priv->rst);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to reset\n");
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
static struct phy_driver at803x_driver[] = {
{
/* Qualcomm Atheros AR8035 */
@@ -1079,6 +1204,19 @@ static struct phy_driver at803x_driver[] = {
.soft_reset = genphy_soft_reset,
.config_aneg = at803x_config_aneg,
}, {
+ PHY_ID_MATCH_EXACT(IPQ5018_PHY_ID),
+ .name = "Qualcomm Atheros IPQ5018 internal PHY",
+ .flags = PHY_IS_INTERNAL | PHY_POLL_CABLE_TEST,
+ .probe = ipq5018_probe,
+ .config_init = ipq5018_config_init,
+ .link_change_notify = ipq5018_link_change_notify,
+ .read_status = at803x_read_status,
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .cable_test_start = ipq5018_cable_test_start,
+ .cable_test_get_status = qca808x_cable_test_get_status,
+ .soft_reset = genphy_soft_reset,
+}, {
/* Qualcomm Atheros QCA9561 */
PHY_ID_MATCH_EXACT(QCA9561_PHY_ID),
.name = "Qualcomm Atheros QCA9561 built-in PHY",
@@ -1098,12 +1236,13 @@ static struct phy_driver at803x_driver[] = {
module_phy_driver(at803x_driver);
-static struct mdio_device_id __maybe_unused atheros_tbl[] = {
+static const struct mdio_device_id __maybe_unused atheros_tbl[] = {
{ ATH8030_PHY_ID, AT8030_PHY_ID_MASK },
{ PHY_ID_MATCH_EXACT(ATH8031_PHY_ID) },
{ PHY_ID_MATCH_EXACT(ATH8032_PHY_ID) },
{ PHY_ID_MATCH_EXACT(ATH8035_PHY_ID) },
{ PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(IPQ5018_PHY_ID) },
{ PHY_ID_MATCH_EXACT(QCA9561_PHY_ID) },
{ }
};
diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c
index bd8a51ec0ecd..1be8295a95cb 100644
--- a/drivers/net/phy/qcom/qca807x.c
+++ b/drivers/net/phy/qcom/qca807x.c
@@ -15,6 +15,7 @@
#include <linux/gpio/driver.h>
#include <linux/sfp.h>
+#include "../phylib.h"
#include "qcom.h"
#define QCA807X_CHIP_CONFIGURATION 0x1f
@@ -123,6 +124,7 @@ struct qca807x_priv {
bool dac_full_amplitude;
bool dac_full_bias_current;
bool dac_disable_bias_current_tweak;
+ struct qcom_phy_hw_stats hw_stats;
};
static int qca807x_cable_test_start(struct phy_device *phydev)
@@ -376,7 +378,7 @@ static int qca807x_gpio_get(struct gpio_chip *gc, unsigned int offset)
return FIELD_GET(QCA807X_GPIO_FORCE_MODE_MASK, val);
}
-static void qca807x_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+static int qca807x_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct qca807x_gpio_priv *priv = gpiochip_get_data(gc);
u16 reg;
@@ -385,18 +387,19 @@ static void qca807x_gpio_set(struct gpio_chip *gc, unsigned int offset, int valu
reg = QCA807X_MMD7_LED_FORCE_CTRL(offset);
val = phy_read_mmd(priv->phy, MDIO_MMD_AN, reg);
+ if (val < 0)
+ return val;
+
val &= ~QCA807X_GPIO_FORCE_MODE_MASK;
val |= QCA807X_GPIO_FORCE_EN;
val |= FIELD_PREP(QCA807X_GPIO_FORCE_MODE_MASK, value);
- phy_write_mmd(priv->phy, MDIO_MMD_AN, reg, val);
+ return phy_write_mmd(priv->phy, MDIO_MMD_AN, reg, val);
}
static int qca807x_gpio_dir_out(struct gpio_chip *gc, unsigned int offset, int value)
{
- qca807x_gpio_set(gc, offset, value);
-
- return 0;
+ return qca807x_gpio_set(gc, offset, value);
}
static int qca807x_gpio(struct phy_device *phydev)
@@ -486,13 +489,13 @@ static int qca807x_read_status(struct phy_device *phydev)
static int qca807x_phy_package_probe_once(struct phy_device *phydev)
{
- struct phy_package_shared *shared = phydev->shared;
- struct qca807x_shared_priv *priv = shared->priv;
+ struct qca807x_shared_priv *priv = phy_package_get_priv(phydev);
+ struct device_node *np = phy_package_get_node(phydev);
unsigned int tx_drive_strength;
const char *package_mode_name;
/* Default to 600mw if not defined */
- if (of_property_read_u32(shared->np, "qcom,tx-drive-strength-milliwatt",
+ if (of_property_read_u32(np, "qcom,tx-drive-strength-milliwatt",
&tx_drive_strength))
tx_drive_strength = 600;
@@ -541,7 +544,7 @@ static int qca807x_phy_package_probe_once(struct phy_device *phydev)
}
priv->package_mode = PHY_INTERFACE_MODE_NA;
- if (!of_property_read_string(shared->np, "qcom,package-mode",
+ if (!of_property_read_string(np, "qcom,package-mode",
&package_mode_name)) {
if (!strcasecmp(package_mode_name,
phy_modes(PHY_INTERFACE_MODE_PSGMII)))
@@ -558,8 +561,7 @@ static int qca807x_phy_package_probe_once(struct phy_device *phydev)
static int qca807x_phy_package_config_init_once(struct phy_device *phydev)
{
- struct phy_package_shared *shared = phydev->shared;
- struct qca807x_shared_priv *priv = shared->priv;
+ struct qca807x_shared_priv *priv = phy_package_get_priv(phydev);
int val, ret;
/* Make sure PHY follow PHY package mode if enforced */
@@ -644,13 +646,12 @@ exit:
static int qca807x_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
{
struct phy_device *phydev = upstream;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
+ const struct sfp_module_caps *caps;
phy_interface_t iface;
int ret;
- DECLARE_PHY_INTERFACE_MASK(interfaces);
- sfp_parse_support(phydev->sfp_bus, id, support, interfaces);
- iface = sfp_select_interface(phydev->sfp_bus, support);
+ caps = sfp_get_module_caps(phydev->sfp_bus);
+ iface = sfp_select_interface(phydev->sfp_bus, caps->link_modes);
dev_info(&phydev->mdio.dev, "%s SFP module inserted\n", phy_modes(iface));
@@ -708,7 +709,6 @@ static int qca807x_probe(struct phy_device *phydev)
struct device_node *node = phydev->mdio.dev.of_node;
struct qca807x_shared_priv *shared_priv;
struct device *dev = &phydev->mdio.dev;
- struct phy_package_shared *shared;
struct qca807x_priv *priv;
int ret;
@@ -722,8 +722,7 @@ static int qca807x_probe(struct phy_device *phydev)
return ret;
}
- shared = phydev->shared;
- shared_priv = shared->priv;
+ shared_priv = phy_package_get_priv(phydev);
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -769,12 +768,16 @@ static int qca807x_config_init(struct phy_device *phydev)
return ret;
}
+ ret = qcom_phy_counter_config(phydev);
+ if (ret)
+ return ret;
+
control_dac = phy_read_mmd(phydev, MDIO_MMD_AN,
QCA807X_MMD7_1000BASE_T_POWER_SAVE_PER_CABLE_LENGTH);
control_dac &= ~QCA807X_CONTROL_DAC_MASK;
if (!priv->dac_full_amplitude)
control_dac |= QCA807X_CONTROL_DAC_DSP_AMPLITUDE;
- if (!priv->dac_full_amplitude)
+ if (!priv->dac_full_bias_current)
control_dac |= QCA807X_CONTROL_DAC_DSP_BIAS_CURRENT;
if (!priv->dac_disable_bias_current_tweak)
control_dac |= QCA807X_CONTROL_DAC_BIAS_CURRENT_TWEAK;
@@ -783,6 +786,22 @@ static int qca807x_config_init(struct phy_device *phydev)
control_dac);
}
+static int qca807x_update_stats(struct phy_device *phydev)
+{
+ struct qca807x_priv *priv = phydev->priv;
+
+ return qcom_phy_update_stats(phydev, &priv->hw_stats);
+}
+
+static void qca807x_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *eth_stats,
+ struct ethtool_phy_stats *stats)
+{
+ struct qca807x_priv *priv = phydev->priv;
+
+ qcom_phy_get_stats(stats, priv->hw_stats);
+}
+
static struct phy_driver qca807x_drivers[] = {
{
PHY_ID_MATCH_EXACT(PHY_ID_QCA8072),
@@ -801,6 +820,10 @@ static struct phy_driver qca807x_drivers[] = {
.suspend = genphy_suspend,
.cable_test_start = qca807x_cable_test_start,
.cable_test_get_status = qca808x_cable_test_get_status,
+ .update_stats = qca807x_update_stats,
+ .get_phy_stats = qca807x_get_phy_stats,
+ .set_wol = at8031_set_wol,
+ .get_wol = at803x_get_wol,
},
{
PHY_ID_MATCH_EXACT(PHY_ID_QCA8075),
@@ -824,11 +847,15 @@ static struct phy_driver qca807x_drivers[] = {
.led_hw_is_supported = qca807x_led_hw_is_supported,
.led_hw_control_set = qca807x_led_hw_control_set,
.led_hw_control_get = qca807x_led_hw_control_get,
+ .update_stats = qca807x_update_stats,
+ .get_phy_stats = qca807x_get_phy_stats,
+ .set_wol = at8031_set_wol,
+ .get_wol = at803x_get_wol,
},
};
module_phy_driver(qca807x_drivers);
-static struct mdio_device_id __maybe_unused qca807x_tbl[] = {
+static const struct mdio_device_id __maybe_unused qca807x_tbl[] = {
{ PHY_ID_MATCH_EXACT(PHY_ID_QCA8072) },
{ PHY_ID_MATCH_EXACT(PHY_ID_QCA8075) },
{ }
diff --git a/drivers/net/phy/qcom/qca808x.c b/drivers/net/phy/qcom/qca808x.c
index 5048304ccc9e..8eb51b1a006c 100644
--- a/drivers/net/phy/qcom/qca808x.c
+++ b/drivers/net/phy/qcom/qca808x.c
@@ -93,6 +93,7 @@ MODULE_LICENSE("GPL");
struct qca808x_priv {
int led_polarity_mode;
+ struct qcom_phy_hw_stats hw_stats;
};
static int qca808x_phy_fast_retrain_config(struct phy_device *phydev)
@@ -243,6 +244,10 @@ static int qca808x_config_init(struct phy_device *phydev)
qca808x_fill_possible_interfaces(phydev);
+ ret = qcom_phy_counter_config(phydev);
+ if (ret)
+ return ret;
+
/* Configure adc threshold as 100mv for the link 10M */
return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_ADC_THRESHOLD,
QCA808X_ADC_THRESHOLD_MASK,
@@ -622,6 +627,22 @@ static int qca808x_led_polarity_set(struct phy_device *phydev, int index,
active_low ? 0 : QCA808X_LED_ACTIVE_HIGH);
}
+static int qca808x_update_stats(struct phy_device *phydev)
+{
+ struct qca808x_priv *priv = phydev->priv;
+
+ return qcom_phy_update_stats(phydev, &priv->hw_stats);
+}
+
+static void qca808x_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *eth_stats,
+ struct ethtool_phy_stats *stats)
+{
+ struct qca808x_priv *priv = phydev->priv;
+
+ qcom_phy_get_stats(stats, priv->hw_stats);
+}
+
static struct phy_driver qca808x_driver[] = {
{
/* Qualcomm QCA8081 */
@@ -633,7 +654,7 @@ static struct phy_driver qca808x_driver[] = {
.handle_interrupt = at803x_handle_interrupt,
.get_tunable = at803x_get_tunable,
.set_tunable = at803x_set_tunable,
- .set_wol = at803x_set_wol,
+ .set_wol = at8031_set_wol,
.get_wol = at803x_get_wol,
.get_features = qca808x_get_features,
.config_aneg = qca808x_config_aneg,
@@ -651,11 +672,13 @@ static struct phy_driver qca808x_driver[] = {
.led_hw_control_set = qca808x_led_hw_control_set,
.led_hw_control_get = qca808x_led_hw_control_get,
.led_polarity_set = qca808x_led_polarity_set,
+ .update_stats = qca808x_update_stats,
+ .get_phy_stats = qca808x_get_phy_stats,
}, };
module_phy_driver(qca808x_driver);
-static struct mdio_device_id __maybe_unused qca808x_tbl[] = {
+static const struct mdio_device_id __maybe_unused qca808x_tbl[] = {
{ PHY_ID_MATCH_EXACT(QCA8081_PHY_ID) },
{ }
};
diff --git a/drivers/net/phy/qcom/qca83xx.c b/drivers/net/phy/qcom/qca83xx.c
index 7a5039920b9f..bc70ed8efd86 100644
--- a/drivers/net/phy/qcom/qca83xx.c
+++ b/drivers/net/phy/qcom/qca83xx.c
@@ -259,7 +259,7 @@ static struct phy_driver qca83xx_driver[] = {
module_phy_driver(qca83xx_driver);
-static struct mdio_device_id __maybe_unused qca83xx_tbl[] = {
+static const struct mdio_device_id __maybe_unused qca83xx_tbl[] = {
{ PHY_ID_MATCH_EXACT(QCA8337_PHY_ID) },
{ PHY_ID_MATCH_EXACT(QCA8327_A_PHY_ID) },
{ PHY_ID_MATCH_EXACT(QCA8327_B_PHY_ID) },
diff --git a/drivers/net/phy/qcom/qcom-phy-lib.c b/drivers/net/phy/qcom/qcom-phy-lib.c
index d28815ef56bb..965c2bb99a9b 100644
--- a/drivers/net/phy/qcom/qcom-phy-lib.c
+++ b/drivers/net/phy/qcom/qcom-phy-lib.c
@@ -115,6 +115,31 @@ int at803x_set_wol(struct phy_device *phydev,
}
EXPORT_SYMBOL_GPL(at803x_set_wol);
+int at8031_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int ret;
+
+ /* First setup MAC address and enable WOL interrupt */
+ ret = at803x_set_wol(phydev, wol);
+ if (ret)
+ return ret;
+
+ if (wol->wolopts & WAKE_MAGIC)
+ /* Enable WOL function for 1588 */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_PHY_MMD3_WOL_CTRL,
+ 0, AT803X_WOL_EN);
+ else
+ /* Disable WoL function for 1588 */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_PHY_MMD3_WOL_CTRL,
+ AT803X_WOL_EN, 0);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(at8031_set_wol);
+
void at803x_get_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
@@ -674,3 +699,78 @@ int qca808x_led_reg_blink_set(struct phy_device *phydev, u16 reg,
return 0;
}
EXPORT_SYMBOL_GPL(qca808x_led_reg_blink_set);
+
+/* Enable CRC checking for both received and transmitted frames to ensure
+ * accurate counter recording. The hardware supports a 32-bit counter,
+ * configure the counter to clear after it is read to facilitate the
+ * implementation of a 64-bit software counter
+ */
+int qcom_phy_counter_config(struct phy_device *phydev)
+{
+ return phy_set_bits_mmd(phydev, MDIO_MMD_AN, QCA808X_MMD7_CNT_CTRL,
+ QCA808X_MMD7_CNT_CTRL_CRC_CHECK_EN |
+ QCA808X_MMD7_CNT_CTRL_READ_CLEAR_EN);
+}
+EXPORT_SYMBOL_GPL(qcom_phy_counter_config);
+
+int qcom_phy_update_stats(struct phy_device *phydev,
+ struct qcom_phy_hw_stats *hw_stats)
+{
+ int ret;
+ u32 cnt;
+
+ /* PHY 32-bit counter for RX packets. */
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, QCA808X_MMD7_CNT_RX_PKT_15_0);
+ if (ret < 0)
+ return ret;
+
+ cnt = ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, QCA808X_MMD7_CNT_RX_PKT_31_16);
+ if (ret < 0)
+ return ret;
+
+ cnt |= ret << 16;
+ hw_stats->rx_pkts += cnt;
+
+ /* PHY 16-bit counter for RX CRC error packets. */
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, QCA808X_MMD7_CNT_RX_ERR_PKT);
+ if (ret < 0)
+ return ret;
+
+ hw_stats->rx_err_pkts += ret;
+
+ /* PHY 32-bit counter for TX packets. */
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, QCA808X_MMD7_CNT_TX_PKT_15_0);
+ if (ret < 0)
+ return ret;
+
+ cnt = ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, QCA808X_MMD7_CNT_TX_PKT_31_16);
+ if (ret < 0)
+ return ret;
+
+ cnt |= ret << 16;
+ hw_stats->tx_pkts += cnt;
+
+ /* PHY 16-bit counter for TX CRC error packets. */
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, QCA808X_MMD7_CNT_TX_ERR_PKT);
+ if (ret < 0)
+ return ret;
+
+ hw_stats->tx_err_pkts += ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_phy_update_stats);
+
+void qcom_phy_get_stats(struct ethtool_phy_stats *stats,
+ struct qcom_phy_hw_stats hw_stats)
+{
+ stats->tx_packets = hw_stats.tx_pkts;
+ stats->tx_errors = hw_stats.tx_err_pkts;
+ stats->rx_packets = hw_stats.rx_pkts;
+ stats->rx_errors = hw_stats.rx_err_pkts;
+}
+EXPORT_SYMBOL_GPL(qcom_phy_get_stats);
diff --git a/drivers/net/phy/qcom/qcom.h b/drivers/net/phy/qcom/qcom.h
index 4bb541728846..5071e7149a11 100644
--- a/drivers/net/phy/qcom/qcom.h
+++ b/drivers/net/phy/qcom/qcom.h
@@ -172,6 +172,9 @@
#define AT803X_LOC_MAC_ADDR_16_31_OFFSET 0x804B
#define AT803X_LOC_MAC_ADDR_32_47_OFFSET 0x804A
+#define AT803X_PHY_MMD3_WOL_CTRL 0x8012
+#define AT803X_WOL_EN BIT(5)
+
#define AT803X_DEBUG_ADDR 0x1D
#define AT803X_DEBUG_DATA 0x1E
@@ -192,6 +195,17 @@
#define AT803X_MIN_DOWNSHIFT 2
#define AT803X_MAX_DOWNSHIFT 9
+#define QCA808X_MMD7_CNT_CTRL 0x8029
+#define QCA808X_MMD7_CNT_CTRL_READ_CLEAR_EN BIT(1)
+#define QCA808X_MMD7_CNT_CTRL_CRC_CHECK_EN BIT(0)
+
+#define QCA808X_MMD7_CNT_RX_PKT_31_16 0x802a
+#define QCA808X_MMD7_CNT_RX_PKT_15_0 0x802b
+#define QCA808X_MMD7_CNT_RX_ERR_PKT 0x802c
+#define QCA808X_MMD7_CNT_TX_PKT_31_16 0x802d
+#define QCA808X_MMD7_CNT_TX_PKT_15_0 0x802e
+#define QCA808X_MMD7_CNT_TX_ERR_PKT 0x802f
+
enum stat_access_type {
PHY,
MMD
@@ -209,12 +223,21 @@ struct at803x_ss_mask {
u8 speed_shift;
};
+struct qcom_phy_hw_stats {
+ u64 rx_pkts;
+ u64 rx_err_pkts;
+ u64 tx_pkts;
+ u64 tx_err_pkts;
+};
+
int at803x_debug_reg_read(struct phy_device *phydev, u16 reg);
int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg,
u16 clear, u16 set);
int at803x_debug_reg_write(struct phy_device *phydev, u16 reg, u16 data);
int at803x_set_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol);
+int at8031_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol);
void at803x_get_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol);
int at803x_ack_interrupt(struct phy_device *phydev);
@@ -241,3 +264,8 @@ int qca808x_led_reg_brightness_set(struct phy_device *phydev,
int qca808x_led_reg_blink_set(struct phy_device *phydev, u16 reg,
unsigned long *delay_on,
unsigned long *delay_off);
+int qcom_phy_counter_config(struct phy_device *phydev);
+int qcom_phy_update_stats(struct phy_device *phydev,
+ struct qcom_phy_hw_stats *hw_stats);
+void qcom_phy_get_stats(struct ethtool_phy_stats *stats,
+ struct qcom_phy_hw_stats hw_stats);
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index 30d15f7c9b03..7b70ba6cab66 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -155,7 +155,7 @@ static struct phy_driver qs6612_driver[] = { {
module_phy_driver(qs6612_driver);
-static struct mdio_device_id __maybe_unused qs6612_tbl[] = {
+static const struct mdio_device_id __maybe_unused qs6612_tbl[] = {
{ 0x00181440, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/qt2025.rs b/drivers/net/phy/qt2025.rs
index 1ab065798175..aaaead6512a0 100644
--- a/drivers/net/phy/qt2025.rs
+++ b/drivers/net/phy/qt2025.rs
@@ -12,6 +12,7 @@
use kernel::c_str;
use kernel::error::code;
use kernel::firmware::Firmware;
+use kernel::io::poll::read_poll_timeout;
use kernel::net::phy::{
self,
reg::{Mmd, C45},
@@ -19,6 +20,7 @@ use kernel::net::phy::{
};
use kernel::prelude::*;
use kernel::sizes::{SZ_16K, SZ_8K};
+use kernel::time::Delta;
kernel::module_phy_driver! {
drivers: [PhyQT2025],
@@ -26,7 +28,7 @@ kernel::module_phy_driver! {
phy::DeviceId::new_with_driver::<PhyQT2025>(),
],
name: "qt2025_phy",
- author: "FUJITA Tomonori <fujita.tomonori@gmail.com>",
+ authors: ["FUJITA Tomonori <fujita.tomonori@gmail.com>"],
description: "AMCC QT2025 PHY driver",
license: "GPL",
firmware: ["qt2025-2.0.3.3.fw"],
@@ -41,7 +43,7 @@ impl Driver for PhyQT2025 {
fn probe(dev: &mut phy::Device) -> Result<()> {
// Check the hardware revision code.
- // Only 0x3b works with this driver and firmware.
+ // Only 0xb3 works with this driver and firmware.
let hw_rev = dev.read(C45::new(Mmd::PMAPMD, 0xd001))?;
if (hw_rev >> 8) != 0xb3 {
return Err(code::ENODEV);
@@ -93,7 +95,13 @@ impl Driver for PhyQT2025 {
// The micro-controller will start running from SRAM.
dev.write(C45::new(Mmd::PCS, 0xe854), 0x0040)?;
- // TODO: sleep here until the hw becomes ready.
+ read_poll_timeout(
+ || dev.read(C45::new(Mmd::PCS, 0xd7fd)),
+ |val| *val != 0x00 && *val != 0x10,
+ Delta::from_millis(50),
+ Delta::from_secs(3),
+ )?;
+
Ok(())
}
diff --git a/drivers/net/phy/realtek/Kconfig b/drivers/net/phy/realtek/Kconfig
new file mode 100644
index 000000000000..b05c2a1e9024
--- /dev/null
+++ b/drivers/net/phy/realtek/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config REALTEK_PHY
+ tristate "Realtek PHYs"
+ help
+ Currently supports RTL821x/RTL822x and fast ethernet PHYs
+
+if REALTEK_PHY
+
+config REALTEK_PHY_HWMON
+ bool "HWMON support for Realtek PHYs"
+ depends on HWMON && !(REALTEK_PHY=y && HWMON=m)
+ help
+ Optional hwmon support for the temperature sensor
+
+endif # REALTEK_PHY
diff --git a/drivers/net/phy/realtek/Makefile b/drivers/net/phy/realtek/Makefile
new file mode 100644
index 000000000000..dd21cf87f2f1
--- /dev/null
+++ b/drivers/net/phy/realtek/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+realtek-y += realtek_main.o
+realtek-$(CONFIG_REALTEK_PHY_HWMON) += realtek_hwmon.o
+obj-$(CONFIG_REALTEK_PHY) += realtek.o
diff --git a/drivers/net/phy/realtek/realtek.h b/drivers/net/phy/realtek/realtek.h
new file mode 100644
index 000000000000..a39b44fa18a0
--- /dev/null
+++ b/drivers/net/phy/realtek/realtek.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef REALTEK_H
+#define REALTEK_H
+
+#include <linux/phy.h>
+
+int rtl822x_hwmon_init(struct phy_device *phydev);
+
+#endif /* REALTEK_H */
diff --git a/drivers/net/phy/realtek/realtek_hwmon.c b/drivers/net/phy/realtek/realtek_hwmon.c
new file mode 100644
index 000000000000..ac96e2d1ebe8
--- /dev/null
+++ b/drivers/net/phy/realtek/realtek_hwmon.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * HWMON support for Realtek PHY's
+ *
+ * Author: Heiner Kallweit <hkallweit1@gmail.com>
+ */
+
+#include <linux/hwmon.h>
+#include <linux/phy.h>
+
+#include "realtek.h"
+
+#define RTL822X_VND2_TSALRM 0xa662
+#define RTL822X_VND2_TSRR 0xbd84
+#define RTL822X_VND2_TSSR 0xb54c
+
+static int rtl822x_hwmon_get_temp(int raw)
+{
+ if (raw >= 512)
+ raw -= 1024;
+
+ return 1000 * raw / 2;
+}
+
+static int rtl822x_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ int raw;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ raw = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL822X_VND2_TSRR) & 0x3ff;
+ *val = rtl822x_hwmon_get_temp(raw);
+ break;
+ case hwmon_temp_max:
+ /* Chip reduces speed to 1G if threshold is exceeded */
+ raw = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL822X_VND2_TSSR) >> 6;
+ *val = rtl822x_hwmon_get_temp(raw);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops rtl822x_hwmon_ops = {
+ .visible = 0444,
+ .read = rtl822x_hwmon_read,
+};
+
+static const struct hwmon_channel_info * const rtl822x_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX),
+ NULL
+};
+
+static const struct hwmon_chip_info rtl822x_hwmon_chip_info = {
+ .ops = &rtl822x_hwmon_ops,
+ .info = rtl822x_hwmon_info,
+};
+
+int rtl822x_hwmon_init(struct phy_device *phydev)
+{
+ struct device *hwdev, *dev = &phydev->mdio.dev;
+
+ /* Ensure over-temp alarm is reset. */
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, RTL822X_VND2_TSALRM, 3);
+
+ hwdev = devm_hwmon_device_register_with_info(dev, NULL, phydev,
+ &rtl822x_hwmon_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwdev);
+}
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek/realtek_main.c
index f65d7f1f348e..67ecf3d4af2b 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek/realtek_main.c
@@ -8,30 +8,79 @@
* Copyright (c) 2004 Freescale Semiconductor, Inc.
*/
#include <linux/bitops.h>
+#include <linux/ethtool_netlink.h>
#include <linux/of.h>
#include <linux/phy.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/clk.h>
+#include <linux/string_choices.h>
-#define RTL821x_PHYSR 0x11
-#define RTL821x_PHYSR_DUPLEX BIT(13)
-#define RTL821x_PHYSR_SPEED GENMASK(15, 14)
+#include "realtek.h"
+
+#define RTL8201F_IER 0x13
+
+#define RTL8201F_ISR 0x1e
+#define RTL8201F_ISR_ANERR BIT(15)
+#define RTL8201F_ISR_DUPLEX BIT(13)
+#define RTL8201F_ISR_LINK BIT(11)
+#define RTL8201F_ISR_MASK (RTL8201F_ISR_ANERR | \
+ RTL8201F_ISR_DUPLEX | \
+ RTL8201F_ISR_LINK)
#define RTL821x_INER 0x12
#define RTL8211B_INER_INIT 0x6400
#define RTL8211E_INER_LINK_STATUS BIT(10)
+#define RTL8211F_INER_PME BIT(7)
#define RTL8211F_INER_LINK_STATUS BIT(4)
#define RTL821x_INSR 0x13
#define RTL821x_EXT_PAGE_SELECT 0x1e
+
#define RTL821x_PAGE_SELECT 0x1f
+#define RTL821x_SET_EXT_PAGE 0x07
+
+/* RTL8211E extension page 44/0x2c */
+#define RTL8211E_LEDCR_EXT_PAGE 0x2c
+#define RTL8211E_LEDCR1 0x1a
+#define RTL8211E_LEDCR1_ACT_TXRX BIT(4)
+#define RTL8211E_LEDCR1_MASK BIT(4)
+#define RTL8211E_LEDCR1_SHIFT 1
+
+#define RTL8211E_LEDCR2 0x1c
+#define RTL8211E_LEDCR2_LINK_1000 BIT(2)
+#define RTL8211E_LEDCR2_LINK_100 BIT(1)
+#define RTL8211E_LEDCR2_LINK_10 BIT(0)
+#define RTL8211E_LEDCR2_MASK GENMASK(2, 0)
+#define RTL8211E_LEDCR2_SHIFT 4
+
+/* RTL8211E extension page 164/0xa4 */
+#define RTL8211E_RGMII_EXT_PAGE 0xa4
+#define RTL8211E_RGMII_DELAY 0x1c
+#define RTL8211E_CTRL_DELAY BIT(13)
+#define RTL8211E_TX_DELAY BIT(12)
+#define RTL8211E_RX_DELAY BIT(11)
+#define RTL8211E_DELAY_MASK GENMASK(13, 11)
+/* RTL8211F PHY configuration */
+#define RTL8211F_PHYCR_PAGE 0xa43
#define RTL8211F_PHYCR1 0x18
+#define RTL8211F_ALDPS_PLL_OFF BIT(1)
+#define RTL8211F_ALDPS_ENABLE BIT(2)
+#define RTL8211F_ALDPS_XTAL_OFF BIT(12)
+
#define RTL8211F_PHYCR2 0x19
+#define RTL8211F_CLKOUT_EN BIT(0)
+#define RTL8211F_PHYCR2_PHY_EEE_ENABLE BIT(5)
+
+#define RTL8211F_INSR_PAGE 0xa43
#define RTL8211F_INSR 0x1d
+/* RTL8211F LED configuration */
+#define RTL8211F_LEDCR_PAGE 0xd04
#define RTL8211F_LEDCR 0x10
#define RTL8211F_LEDCR_MODE BIT(15)
#define RTL8211F_LEDCR_ACT_TXRX BIT(4)
@@ -41,27 +90,36 @@
#define RTL8211F_LEDCR_MASK GENMASK(4, 0)
#define RTL8211F_LEDCR_SHIFT 5
-#define RTL8211F_TX_DELAY BIT(8)
-#define RTL8211F_RX_DELAY BIT(3)
+/* RTL8211F(D)(I)-VD-CG CLKOUT configuration is specified via magic values
+ * to undocumented register pages. The names here do not reflect the datasheet.
+ * Unlike other PHY models, CLKOUT configuration does not go through PHYCR2.
+ */
+#define RTL8211FVD_CLKOUT_PAGE 0xd05
+#define RTL8211FVD_CLKOUT_REG 0x11
+#define RTL8211FVD_CLKOUT_EN BIT(8)
-#define RTL8211F_ALDPS_PLL_OFF BIT(1)
-#define RTL8211F_ALDPS_ENABLE BIT(2)
-#define RTL8211F_ALDPS_XTAL_OFF BIT(12)
+/* RTL8211F RGMII configuration */
+#define RTL8211F_RGMII_PAGE 0xd08
-#define RTL8211E_CTRL_DELAY BIT(13)
-#define RTL8211E_TX_DELAY BIT(12)
-#define RTL8211E_RX_DELAY BIT(11)
+#define RTL8211F_TXCR 0x11
+#define RTL8211F_TX_DELAY BIT(8)
-#define RTL8211F_CLKOUT_EN BIT(0)
+#define RTL8211F_RXCR 0x15
+#define RTL8211F_RX_DELAY BIT(3)
-#define RTL8201F_ISR 0x1e
-#define RTL8201F_ISR_ANERR BIT(15)
-#define RTL8201F_ISR_DUPLEX BIT(13)
-#define RTL8201F_ISR_LINK BIT(11)
-#define RTL8201F_ISR_MASK (RTL8201F_ISR_ANERR | \
- RTL8201F_ISR_DUPLEX | \
- RTL8201F_ISR_LINK)
-#define RTL8201F_IER 0x13
+/* RTL8211F WOL settings */
+#define RTL8211F_WOL_PAGE 0xd8a
+#define RTL8211F_WOL_SETTINGS_EVENTS 16
+#define RTL8211F_WOL_EVENT_MAGIC BIT(12)
+#define RTL8211F_WOL_RST_RMSQ 17
+#define RTL8211F_WOL_RG_RSTB BIT(15)
+#define RTL8211F_WOL_RMSQ 0x1fff
+
+/* RTL8211F Unique phyiscal and multicast address (WOL) */
+#define RTL8211F_PHYSICAL_ADDR_PAGE 0xd8c
+#define RTL8211F_PHYSICAL_ADDR_WORD0 16
+#define RTL8211F_PHYSICAL_ADDR_WORD1 17
+#define RTL8211F_PHYSICAL_ADDR_WORD2 18
#define RTL822X_VND1_SERDES_OPTION 0x697a
#define RTL822X_VND1_SERDES_OPTION_MODE_MASK GENMASK(5, 0)
@@ -76,9 +134,33 @@
/* RTL822X_VND2_XXXXX registers are only accessible when phydev->is_c45
* is set, they cannot be accessed by C45-over-C22.
*/
-#define RTL822X_VND2_GBCR 0xa412
+#define RTL822X_VND2_C22_REG(reg) (0xa400 + 2 * (reg))
+
+#define RTL8221B_VND2_INER 0xa4d2
+#define RTL8221B_VND2_INER_LINK_STATUS BIT(4)
+
+#define RTL8221B_VND2_INSR 0xa4d4
-#define RTL822X_VND2_GANLPAR 0xa414
+#define RTL8224_MII_RTCT 0x11
+#define RTL8224_MII_RTCT_ENABLE BIT(0)
+#define RTL8224_MII_RTCT_PAIR_A BIT(4)
+#define RTL8224_MII_RTCT_PAIR_B BIT(5)
+#define RTL8224_MII_RTCT_PAIR_C BIT(6)
+#define RTL8224_MII_RTCT_PAIR_D BIT(7)
+#define RTL8224_MII_RTCT_DONE BIT(15)
+
+#define RTL8224_MII_SRAM_ADDR 0x1b
+#define RTL8224_MII_SRAM_DATA 0x1c
+
+#define RTL8224_SRAM_RTCT_FAULT(pair) (0x8026 + (pair) * 4)
+#define RTL8224_SRAM_RTCT_FAULT_BUSY BIT(0)
+#define RTL8224_SRAM_RTCT_FAULT_OPEN BIT(3)
+#define RTL8224_SRAM_RTCT_FAULT_SAME_SHORT BIT(4)
+#define RTL8224_SRAM_RTCT_FAULT_OK BIT(5)
+#define RTL8224_SRAM_RTCT_FAULT_DONE BIT(6)
+#define RTL8224_SRAM_RTCT_FAULT_CROSS_SHORT BIT(7)
+
+#define RTL8224_SRAM_RTCT_LEN(pair) (0x8028 + (pair) * 4)
#define RTL8366RB_POWER_SAVE 0x15
#define RTL8366RB_POWER_SAVE_ON BIT(12)
@@ -93,24 +175,37 @@
#define RTL_VND2_PHYSR_MASTER BIT(11)
#define RTL_VND2_PHYSR_SPEED_MASK (RTL_VND2_PHYSR_SPEEDL | RTL_VND2_PHYSR_SPEEDH)
+#define RTL_MDIO_PCS_EEE_ABLE 0xa5c4
+#define RTL_MDIO_AN_EEE_ADV 0xa5d0
+#define RTL_MDIO_AN_EEE_LPABLE 0xa5d2
+#define RTL_MDIO_AN_10GBT_CTRL 0xa5d4
+#define RTL_MDIO_AN_10GBT_STAT 0xa5d6
+#define RTL_MDIO_PMA_SPEED 0xa616
+#define RTL_MDIO_AN_EEE_LPABLE2 0xa6d0
+#define RTL_MDIO_AN_EEE_ADV2 0xa6d4
+#define RTL_MDIO_PCS_EEE_ABLE2 0xa6ec
+
#define RTL_GENERIC_PHYID 0x001cc800
#define RTL_8211FVD_PHYID 0x001cc878
#define RTL_8221B 0x001cc840
#define RTL_8221B_VB_CG 0x001cc849
-#define RTL_8221B_VN_CG 0x001cc84a
+#define RTL_8221B_VM_CG 0x001cc84a
#define RTL_8251B 0x001cc862
+#define RTL_8261C 0x001cc890
-#define RTL8211F_LED_COUNT 3
+/* RTL8211E and RTL8211F support up to three LEDs */
+#define RTL8211x_LED_COUNT 3
MODULE_DESCRIPTION("Realtek PHY driver");
MODULE_AUTHOR("Johnson Leung");
MODULE_LICENSE("GPL");
struct rtl821x_priv {
- u16 phycr1;
- u16 phycr2;
- bool has_phycr2;
+ bool enable_aldps;
+ bool disable_clk_out;
struct clk *clk;
+ /* rtl8211f */
+ u16 iner;
};
static int rtl821x_read_page(struct phy_device *phydev)
@@ -123,12 +218,40 @@ static int rtl821x_write_page(struct phy_device *phydev, int page)
return __phy_write(phydev, RTL821x_PAGE_SELECT, page);
}
+static int rtl821x_read_ext_page(struct phy_device *phydev, u16 ext_page,
+ u32 regnum)
+{
+ int oldpage, ret = 0;
+
+ oldpage = phy_select_page(phydev, RTL821x_SET_EXT_PAGE);
+ if (oldpage >= 0) {
+ ret = __phy_write(phydev, RTL821x_EXT_PAGE_SELECT, ext_page);
+ if (ret == 0)
+ ret = __phy_read(phydev, regnum);
+ }
+
+ return phy_restore_page(phydev, oldpage, ret);
+}
+
+static int rtl821x_modify_ext_page(struct phy_device *phydev, u16 ext_page,
+ u32 regnum, u16 mask, u16 set)
+{
+ int oldpage, ret = 0;
+
+ oldpage = phy_select_page(phydev, RTL821x_SET_EXT_PAGE);
+ if (oldpage >= 0) {
+ ret = __phy_write(phydev, RTL821x_EXT_PAGE_SELECT, ext_page);
+ if (ret == 0)
+ ret = __phy_modify(phydev, regnum, mask, set);
+ }
+
+ return phy_restore_page(phydev, oldpage, ret);
+}
+
static int rtl821x_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
struct rtl821x_priv *priv;
- u32 phy_id = phydev->drv->phy_id;
- int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -139,28 +262,42 @@ static int rtl821x_probe(struct phy_device *phydev)
return dev_err_probe(dev, PTR_ERR(priv->clk),
"failed to get phy clock\n");
- ret = phy_read_paged(phydev, 0xa43, RTL8211F_PHYCR1);
+ priv->enable_aldps = of_property_read_bool(dev->of_node,
+ "realtek,aldps-enable");
+ priv->disable_clk_out = of_property_read_bool(dev->of_node,
+ "realtek,clkout-disable");
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
+static int rtl8211f_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ int ret;
+
+ ret = rtl821x_probe(phydev);
if (ret < 0)
return ret;
- priv->phycr1 = ret & (RTL8211F_ALDPS_PLL_OFF | RTL8211F_ALDPS_ENABLE | RTL8211F_ALDPS_XTAL_OFF);
- if (of_property_read_bool(dev->of_node, "realtek,aldps-enable"))
- priv->phycr1 |= RTL8211F_ALDPS_PLL_OFF | RTL8211F_ALDPS_ENABLE | RTL8211F_ALDPS_XTAL_OFF;
-
- priv->has_phycr2 = !(phy_id == RTL_8211FVD_PHYID);
- if (priv->has_phycr2) {
- ret = phy_read_paged(phydev, 0xa43, RTL8211F_PHYCR2);
- if (ret < 0)
- return ret;
+ /* Disable all PME events */
+ ret = phy_write_paged(phydev, RTL8211F_WOL_PAGE,
+ RTL8211F_WOL_SETTINGS_EVENTS, 0);
+ if (ret < 0)
+ return ret;
- priv->phycr2 = ret & RTL8211F_CLKOUT_EN;
- if (of_property_read_bool(dev->of_node, "realtek,clkout-disable"))
- priv->phycr2 &= ~RTL8211F_CLKOUT_EN;
+ /* Mark this PHY as wakeup capable and register the interrupt as a
+ * wakeup IRQ if the PHY is marked as a wakeup source in firmware,
+ * and the interrupt is valid.
+ */
+ if (device_property_read_bool(dev, "wakeup-source") &&
+ phy_interrupt_is_valid(phydev)) {
+ device_set_wakeup_capable(dev, true);
+ devm_pm_set_wake_irq(dev, phydev->irq);
}
- phydev->priv = priv;
-
- return 0;
+ return ret;
}
static int rtl8201_ack_interrupt(struct phy_device *phydev)
@@ -185,7 +322,7 @@ static int rtl8211f_ack_interrupt(struct phy_device *phydev)
{
int err;
- err = phy_read_paged(phydev, 0xa43, RTL8211F_INSR);
+ err = phy_read_paged(phydev, RTL8211F_INSR_PAGE, RTL8211F_INSR);
return (err < 0) ? err : 0;
}
@@ -260,6 +397,7 @@ static int rtl8211e_config_intr(struct phy_device *phydev)
static int rtl8211f_config_intr(struct phy_device *phydev)
{
+ struct rtl821x_priv *priv = phydev->priv;
u16 val;
int err;
@@ -270,8 +408,10 @@ static int rtl8211f_config_intr(struct phy_device *phydev)
val = RTL8211F_INER_LINK_STATUS;
err = phy_write_paged(phydev, 0xa42, RTL821x_INER, val);
+ if (err == 0)
+ priv->iner = val;
} else {
- val = 0;
+ priv->iner = val = 0;
err = phy_write_paged(phydev, 0xa42, RTL821x_INER, val);
if (err)
return err;
@@ -328,18 +468,85 @@ static irqreturn_t rtl8211f_handle_interrupt(struct phy_device *phydev)
{
int irq_status;
- irq_status = phy_read_paged(phydev, 0xa43, RTL8211F_INSR);
+ irq_status = phy_read_paged(phydev, RTL8211F_INSR_PAGE, RTL8211F_INSR);
if (irq_status < 0) {
phy_error(phydev);
return IRQ_NONE;
}
- if (!(irq_status & RTL8211F_INER_LINK_STATUS))
- return IRQ_NONE;
+ if (irq_status & RTL8211F_INER_LINK_STATUS) {
+ phy_trigger_machine(phydev);
+ return IRQ_HANDLED;
+ }
- phy_trigger_machine(phydev);
+ if (irq_status & RTL8211F_INER_PME) {
+ pm_wakeup_event(&phydev->mdio.dev, 0);
+ return IRQ_HANDLED;
+ }
- return IRQ_HANDLED;
+ return IRQ_NONE;
+}
+
+static void rtl8211f_get_wol(struct phy_device *dev, struct ethtool_wolinfo *wol)
+{
+ int wol_events;
+
+ /* If the PHY is not capable of waking the system, then WoL can not
+ * be supported.
+ */
+ if (!device_can_wakeup(&dev->mdio.dev)) {
+ wol->supported = 0;
+ return;
+ }
+
+ wol->supported = WAKE_MAGIC;
+
+ wol_events = phy_read_paged(dev, RTL8211F_WOL_PAGE, RTL8211F_WOL_SETTINGS_EVENTS);
+ if (wol_events < 0)
+ return;
+
+ if (wol_events & RTL8211F_WOL_EVENT_MAGIC)
+ wol->wolopts = WAKE_MAGIC;
+}
+
+static int rtl8211f_set_wol(struct phy_device *dev, struct ethtool_wolinfo *wol)
+{
+ const u8 *mac_addr = dev->attached_dev->dev_addr;
+ int oldpage;
+
+ if (!device_can_wakeup(&dev->mdio.dev))
+ return -EOPNOTSUPP;
+
+ oldpage = phy_save_page(dev);
+ if (oldpage < 0)
+ goto err;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ /* Store the device address for the magic packet */
+ rtl821x_write_page(dev, RTL8211F_PHYSICAL_ADDR_PAGE);
+ __phy_write(dev, RTL8211F_PHYSICAL_ADDR_WORD0, mac_addr[1] << 8 | (mac_addr[0]));
+ __phy_write(dev, RTL8211F_PHYSICAL_ADDR_WORD1, mac_addr[3] << 8 | (mac_addr[2]));
+ __phy_write(dev, RTL8211F_PHYSICAL_ADDR_WORD2, mac_addr[5] << 8 | (mac_addr[4]));
+
+ /* Enable magic packet matching */
+ rtl821x_write_page(dev, RTL8211F_WOL_PAGE);
+ __phy_write(dev, RTL8211F_WOL_SETTINGS_EVENTS, RTL8211F_WOL_EVENT_MAGIC);
+ /* Set the maximum packet size, and assert WoL reset */
+ __phy_write(dev, RTL8211F_WOL_RST_RMSQ, RTL8211F_WOL_RMSQ);
+ } else {
+ /* Disable magic packet matching */
+ rtl821x_write_page(dev, RTL8211F_WOL_PAGE);
+ __phy_write(dev, RTL8211F_WOL_SETTINGS_EVENTS, 0);
+
+ /* Place WoL in reset */
+ __phy_clear_bits(dev, RTL8211F_WOL_RST_RMSQ,
+ RTL8211F_WOL_RG_RSTB);
+ }
+
+ device_set_wakeup_enable(&dev->mdio.dev, !!(wol->wolopts & WAKE_MAGIC));
+
+err:
+ return phy_restore_page(dev, oldpage, 0);
}
static int rtl8211_config_aneg(struct phy_device *phydev)
@@ -371,22 +578,11 @@ static int rtl8211c_config_init(struct phy_device *phydev)
CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER);
}
-static int rtl8211f_config_init(struct phy_device *phydev)
+static int rtl8211f_config_rgmii_delay(struct phy_device *phydev)
{
- struct rtl821x_priv *priv = phydev->priv;
- struct device *dev = &phydev->mdio.dev;
u16 val_txdly, val_rxdly;
int ret;
- ret = phy_modify_paged_changed(phydev, 0xa43, RTL8211F_PHYCR1,
- RTL8211F_ALDPS_PLL_OFF | RTL8211F_ALDPS_ENABLE | RTL8211F_ALDPS_XTAL_OFF,
- priv->phycr1);
- if (ret < 0) {
- dev_err(dev, "aldps mode configuration failed: %pe\n",
- ERR_PTR(ret));
- return ret;
- }
-
switch (phydev->interface) {
case PHY_INTERFACE_MODE_RGMII:
val_txdly = 0;
@@ -412,49 +608,122 @@ static int rtl8211f_config_init(struct phy_device *phydev)
return 0;
}
- ret = phy_modify_paged_changed(phydev, 0xd08, 0x11, RTL8211F_TX_DELAY,
+ ret = phy_modify_paged_changed(phydev, RTL8211F_RGMII_PAGE,
+ RTL8211F_TXCR, RTL8211F_TX_DELAY,
val_txdly);
if (ret < 0) {
- dev_err(dev, "Failed to update the TX delay register\n");
+ phydev_err(phydev, "Failed to update the TX delay register: %pe\n",
+ ERR_PTR(ret));
return ret;
} else if (ret) {
- dev_dbg(dev,
- "%s 2ns TX delay (and changing the value from pin-strapping RXD1 or the bootloader)\n",
- val_txdly ? "Enabling" : "Disabling");
+ phydev_dbg(phydev,
+ "%s 2ns TX delay (and changing the value from pin-strapping RXD1 or the bootloader)\n",
+ str_enable_disable(val_txdly));
} else {
- dev_dbg(dev,
- "2ns TX delay was already %s (by pin-strapping RXD1 or bootloader configuration)\n",
- val_txdly ? "enabled" : "disabled");
+ phydev_dbg(phydev,
+ "2ns TX delay was already %s (by pin-strapping RXD1 or bootloader configuration)\n",
+ str_enabled_disabled(val_txdly));
}
- ret = phy_modify_paged_changed(phydev, 0xd08, 0x15, RTL8211F_RX_DELAY,
+ ret = phy_modify_paged_changed(phydev, RTL8211F_RGMII_PAGE,
+ RTL8211F_RXCR, RTL8211F_RX_DELAY,
val_rxdly);
if (ret < 0) {
- dev_err(dev, "Failed to update the RX delay register\n");
+ phydev_err(phydev, "Failed to update the RX delay register: %pe\n",
+ ERR_PTR(ret));
return ret;
} else if (ret) {
- dev_dbg(dev,
- "%s 2ns RX delay (and changing the value from pin-strapping RXD0 or the bootloader)\n",
- val_rxdly ? "Enabling" : "Disabling");
+ phydev_dbg(phydev,
+ "%s 2ns RX delay (and changing the value from pin-strapping RXD0 or the bootloader)\n",
+ str_enable_disable(val_rxdly));
} else {
- dev_dbg(dev,
- "2ns RX delay was already %s (by pin-strapping RXD0 or bootloader configuration)\n",
- val_rxdly ? "enabled" : "disabled");
+ phydev_dbg(phydev,
+ "2ns RX delay was already %s (by pin-strapping RXD0 or bootloader configuration)\n",
+ str_enabled_disabled(val_rxdly));
}
- if (priv->has_phycr2) {
- ret = phy_modify_paged(phydev, 0xa43, RTL8211F_PHYCR2,
- RTL8211F_CLKOUT_EN, priv->phycr2);
- if (ret < 0) {
- dev_err(dev, "clkout configuration failed: %pe\n",
- ERR_PTR(ret));
- return ret;
- }
+ return 0;
+}
+
+static int rtl8211f_config_clk_out(struct phy_device *phydev)
+{
+ struct rtl821x_priv *priv = phydev->priv;
+ int ret;
+
+ /* The value is preserved if the device tree property is absent */
+ if (!priv->disable_clk_out)
+ return 0;
+
+ if (phydev->drv->phy_id == RTL_8211FVD_PHYID)
+ ret = phy_modify_paged(phydev, RTL8211FVD_CLKOUT_PAGE,
+ RTL8211FVD_CLKOUT_REG,
+ RTL8211FVD_CLKOUT_EN, 0);
+ else
+ ret = phy_modify_paged(phydev, RTL8211F_PHYCR_PAGE,
+ RTL8211F_PHYCR2, RTL8211F_CLKOUT_EN, 0);
+ if (ret)
+ return ret;
+
+ return genphy_soft_reset(phydev);
+}
+
+/* Advance Link Down Power Saving (ALDPS) mode changes crystal/clock behaviour,
+ * which causes the RXC clock signal to stop for tens to hundreds of
+ * milliseconds.
+ *
+ * Some MACs need the RXC clock to support their internal RX logic, so ALDPS is
+ * only enabled based on an opt-in device tree property.
+ */
+static int rtl8211f_config_aldps(struct phy_device *phydev)
+{
+ struct rtl821x_priv *priv = phydev->priv;
+ u16 mask = RTL8211F_ALDPS_PLL_OFF |
+ RTL8211F_ALDPS_ENABLE |
+ RTL8211F_ALDPS_XTAL_OFF;
- return genphy_soft_reset(phydev);
+ /* The value is preserved if the device tree property is absent */
+ if (!priv->enable_aldps)
+ return 0;
+
+ return phy_modify_paged(phydev, RTL8211F_PHYCR_PAGE, RTL8211F_PHYCR1,
+ mask, mask);
+}
+
+static int rtl8211f_config_phy_eee(struct phy_device *phydev)
+{
+ /* RTL8211FVD has no PHYCR2 register */
+ if (phydev->drv->phy_id == RTL_8211FVD_PHYID)
+ return 0;
+
+ /* Disable PHY-mode EEE so LPI is passed to the MAC */
+ return phy_modify_paged(phydev, RTL8211F_PHYCR_PAGE, RTL8211F_PHYCR2,
+ RTL8211F_PHYCR2_PHY_EEE_ENABLE, 0);
+}
+
+static int rtl8211f_config_init(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ int ret;
+
+ ret = rtl8211f_config_aldps(phydev);
+ if (ret) {
+ dev_err(dev, "aldps mode configuration failed: %pe\n",
+ ERR_PTR(ret));
+ return ret;
}
- return 0;
+ ret = rtl8211f_config_rgmii_delay(phydev);
+ if (ret)
+ return ret;
+
+ ret = rtl8211f_config_clk_out(phydev);
+ if (ret) {
+ dev_err(dev, "clkout configuration failed: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ return rtl8211f_config_phy_eee(phydev);
}
static int rtl821x_suspend(struct phy_device *phydev)
@@ -474,6 +743,52 @@ static int rtl821x_suspend(struct phy_device *phydev)
return ret;
}
+static int rtl8211f_suspend(struct phy_device *phydev)
+{
+ u16 wol_rst;
+ int ret;
+
+ ret = rtl821x_suspend(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* If a PME event is enabled, then configure the interrupt for
+ * PME events only, disabling link interrupt. We avoid switching
+ * to PMEB mode as we don't have a status bit for that.
+ */
+ if (device_may_wakeup(&phydev->mdio.dev)) {
+ ret = phy_write_paged(phydev, 0xa42, RTL821x_INER,
+ RTL8211F_INER_PME);
+ if (ret < 0)
+ goto err;
+
+ /* Read the INSR to clear any pending interrupt */
+ phy_read_paged(phydev, RTL8211F_INSR_PAGE, RTL8211F_INSR);
+
+ /* Reset the WoL to ensure that an event is picked up.
+ * Unless we do this, even if we receive another packet,
+ * we may not have a PME interrupt raised.
+ */
+ ret = phy_read_paged(phydev, RTL8211F_WOL_PAGE,
+ RTL8211F_WOL_RST_RMSQ);
+ if (ret < 0)
+ goto err;
+
+ wol_rst = ret & ~RTL8211F_WOL_RG_RSTB;
+ ret = phy_write_paged(phydev, RTL8211F_WOL_PAGE,
+ RTL8211F_WOL_RST_RMSQ, wol_rst);
+ if (ret < 0)
+ goto err;
+
+ wol_rst |= RTL8211F_WOL_RG_RSTB;
+ ret = phy_write_paged(phydev, RTL8211F_WOL_PAGE,
+ RTL8211F_WOL_RST_RMSQ, wol_rst);
+ }
+
+err:
+ return ret;
+}
+
static int rtl821x_resume(struct phy_device *phydev)
{
struct rtl821x_priv *priv = phydev->priv;
@@ -491,10 +806,29 @@ static int rtl821x_resume(struct phy_device *phydev)
return 0;
}
-static int rtl8211f_led_hw_is_supported(struct phy_device *phydev, u8 index,
+static int rtl8211f_resume(struct phy_device *phydev)
+{
+ struct rtl821x_priv *priv = phydev->priv;
+ int ret;
+
+ ret = rtl821x_resume(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* If the device was programmed for a PME event, restore the interrupt
+ * enable so phylib can receive link state interrupts.
+ */
+ if (device_may_wakeup(&phydev->mdio.dev))
+ ret = phy_write_paged(phydev, 0xa42, RTL821x_INER, priv->iner);
+
+ return ret;
+}
+
+static int rtl8211x_led_hw_is_supported(struct phy_device *phydev, u8 index,
unsigned long rules)
{
- const unsigned long mask = BIT(TRIGGER_NETDEV_LINK_10) |
+ const unsigned long mask = BIT(TRIGGER_NETDEV_LINK) |
+ BIT(TRIGGER_NETDEV_LINK_10) |
BIT(TRIGGER_NETDEV_LINK_100) |
BIT(TRIGGER_NETDEV_LINK_1000) |
BIT(TRIGGER_NETDEV_RX) |
@@ -510,9 +844,11 @@ static int rtl8211f_led_hw_is_supported(struct phy_device *phydev, u8 index,
* rates and Active indication always at all three 10+100+1000
* link rates.
* This code currently uses mode B only.
+ *
+ * RTL8211E PHY LED has one mode, which works like RTL8211F mode B.
*/
- if (index >= RTL8211F_LED_COUNT)
+ if (index >= RTL8211x_LED_COUNT)
return -EINVAL;
/* Filter out any other unsupported triggers. */
@@ -531,7 +867,7 @@ static int rtl8211f_led_hw_control_get(struct phy_device *phydev, u8 index,
{
int val;
- if (index >= RTL8211F_LED_COUNT)
+ if (index >= RTL8211x_LED_COUNT)
return -EINVAL;
val = phy_read_paged(phydev, 0xd04, RTL8211F_LEDCR);
@@ -542,17 +878,23 @@ static int rtl8211f_led_hw_control_get(struct phy_device *phydev, u8 index,
val &= RTL8211F_LEDCR_MASK;
if (val & RTL8211F_LEDCR_LINK_10)
- set_bit(TRIGGER_NETDEV_LINK_10, rules);
+ __set_bit(TRIGGER_NETDEV_LINK_10, rules);
if (val & RTL8211F_LEDCR_LINK_100)
- set_bit(TRIGGER_NETDEV_LINK_100, rules);
+ __set_bit(TRIGGER_NETDEV_LINK_100, rules);
if (val & RTL8211F_LEDCR_LINK_1000)
- set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+ __set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+
+ if ((val & RTL8211F_LEDCR_LINK_10) &&
+ (val & RTL8211F_LEDCR_LINK_100) &&
+ (val & RTL8211F_LEDCR_LINK_1000)) {
+ __set_bit(TRIGGER_NETDEV_LINK, rules);
+ }
if (val & RTL8211F_LEDCR_ACT_TXRX) {
- set_bit(TRIGGER_NETDEV_RX, rules);
- set_bit(TRIGGER_NETDEV_TX, rules);
+ __set_bit(TRIGGER_NETDEV_RX, rules);
+ __set_bit(TRIGGER_NETDEV_TX, rules);
}
return 0;
@@ -564,17 +906,23 @@ static int rtl8211f_led_hw_control_set(struct phy_device *phydev, u8 index,
const u16 mask = RTL8211F_LEDCR_MASK << (RTL8211F_LEDCR_SHIFT * index);
u16 reg = 0;
- if (index >= RTL8211F_LED_COUNT)
+ if (index >= RTL8211x_LED_COUNT)
return -EINVAL;
- if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
+ if (test_bit(TRIGGER_NETDEV_LINK, &rules) ||
+ test_bit(TRIGGER_NETDEV_LINK_10, &rules)) {
reg |= RTL8211F_LEDCR_LINK_10;
+ }
- if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
+ if (test_bit(TRIGGER_NETDEV_LINK, &rules) ||
+ test_bit(TRIGGER_NETDEV_LINK_100, &rules)) {
reg |= RTL8211F_LEDCR_LINK_100;
+ }
- if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
+ if (test_bit(TRIGGER_NETDEV_LINK, &rules) ||
+ test_bit(TRIGGER_NETDEV_LINK_1000, &rules)) {
reg |= RTL8211F_LEDCR_LINK_1000;
+ }
if (test_bit(TRIGGER_NETDEV_RX, &rules) ||
test_bit(TRIGGER_NETDEV_TX, &rules)) {
@@ -587,9 +935,98 @@ static int rtl8211f_led_hw_control_set(struct phy_device *phydev, u8 index,
return phy_modify_paged(phydev, 0xd04, RTL8211F_LEDCR, mask, reg);
}
+static int rtl8211e_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ int ret;
+ u16 cr1, cr2;
+
+ if (index >= RTL8211x_LED_COUNT)
+ return -EINVAL;
+
+ ret = rtl821x_read_ext_page(phydev, RTL8211E_LEDCR_EXT_PAGE,
+ RTL8211E_LEDCR1);
+ if (ret < 0)
+ return ret;
+
+ cr1 = ret >> RTL8211E_LEDCR1_SHIFT * index;
+ if (cr1 & RTL8211E_LEDCR1_ACT_TXRX) {
+ __set_bit(TRIGGER_NETDEV_RX, rules);
+ __set_bit(TRIGGER_NETDEV_TX, rules);
+ }
+
+ ret = rtl821x_read_ext_page(phydev, RTL8211E_LEDCR_EXT_PAGE,
+ RTL8211E_LEDCR2);
+ if (ret < 0)
+ return ret;
+
+ cr2 = ret >> RTL8211E_LEDCR2_SHIFT * index;
+ if (cr2 & RTL8211E_LEDCR2_LINK_10)
+ __set_bit(TRIGGER_NETDEV_LINK_10, rules);
+
+ if (cr2 & RTL8211E_LEDCR2_LINK_100)
+ __set_bit(TRIGGER_NETDEV_LINK_100, rules);
+
+ if (cr2 & RTL8211E_LEDCR2_LINK_1000)
+ __set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+
+ if ((cr2 & RTL8211E_LEDCR2_LINK_10) &&
+ (cr2 & RTL8211E_LEDCR2_LINK_100) &&
+ (cr2 & RTL8211E_LEDCR2_LINK_1000)) {
+ __set_bit(TRIGGER_NETDEV_LINK, rules);
+ }
+
+ return ret;
+}
+
+static int rtl8211e_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ const u16 cr1mask =
+ RTL8211E_LEDCR1_MASK << (RTL8211E_LEDCR1_SHIFT * index);
+ const u16 cr2mask =
+ RTL8211E_LEDCR2_MASK << (RTL8211E_LEDCR2_SHIFT * index);
+ u16 cr1 = 0, cr2 = 0;
+ int ret;
+
+ if (index >= RTL8211x_LED_COUNT)
+ return -EINVAL;
+
+ if (test_bit(TRIGGER_NETDEV_RX, &rules) ||
+ test_bit(TRIGGER_NETDEV_TX, &rules)) {
+ cr1 |= RTL8211E_LEDCR1_ACT_TXRX;
+ }
+
+ cr1 <<= RTL8211E_LEDCR1_SHIFT * index;
+ ret = rtl821x_modify_ext_page(phydev, RTL8211E_LEDCR_EXT_PAGE,
+ RTL8211E_LEDCR1, cr1mask, cr1);
+ if (ret < 0)
+ return ret;
+
+ if (test_bit(TRIGGER_NETDEV_LINK, &rules) ||
+ test_bit(TRIGGER_NETDEV_LINK_10, &rules)) {
+ cr2 |= RTL8211E_LEDCR2_LINK_10;
+ }
+
+ if (test_bit(TRIGGER_NETDEV_LINK, &rules) ||
+ test_bit(TRIGGER_NETDEV_LINK_100, &rules)) {
+ cr2 |= RTL8211E_LEDCR2_LINK_100;
+ }
+
+ if (test_bit(TRIGGER_NETDEV_LINK, &rules) ||
+ test_bit(TRIGGER_NETDEV_LINK_1000, &rules)) {
+ cr2 |= RTL8211E_LEDCR2_LINK_1000;
+ }
+
+ cr2 <<= RTL8211E_LEDCR2_SHIFT * index;
+ ret = rtl821x_modify_ext_page(phydev, RTL8211E_LEDCR_EXT_PAGE,
+ RTL8211E_LEDCR2, cr2mask, cr2);
+
+ return ret;
+}
+
static int rtl8211e_config_init(struct phy_device *phydev)
{
- int ret = 0, oldpage;
u16 val;
/* enable TX/RX delay for rgmii-* modes, and disable them for rgmii. */
@@ -619,20 +1056,9 @@ static int rtl8211e_config_init(struct phy_device *phydev)
* 12 = RX Delay, 11 = TX Delay
* 10:0 = Test && debug settings reserved by realtek
*/
- oldpage = phy_select_page(phydev, 0x7);
- if (oldpage < 0)
- goto err_restore_page;
-
- ret = __phy_write(phydev, RTL821x_EXT_PAGE_SELECT, 0xa4);
- if (ret)
- goto err_restore_page;
-
- ret = __phy_modify(phydev, 0x1c, RTL8211E_CTRL_DELAY
- | RTL8211E_TX_DELAY | RTL8211E_RX_DELAY,
- val);
-
-err_restore_page:
- return phy_restore_page(phydev, oldpage, ret);
+ return rtl821x_modify_ext_page(phydev, RTL8211E_RGMII_EXT_PAGE,
+ RTL8211E_RGMII_DELAY,
+ RTL8211E_DELAY_MASK, val);
}
static int rtl8211b_suspend(struct phy_device *phydev)
@@ -732,25 +1158,31 @@ static int rtlgen_read_status(struct phy_device *phydev)
return 0;
}
+static int rtlgen_read_vend2(struct phy_device *phydev, int regnum)
+{
+ return __mdiobus_c45_read(phydev->mdio.bus, 0, MDIO_MMD_VEND2, regnum);
+}
+
+static int rtlgen_write_vend2(struct phy_device *phydev, int regnum, u16 val)
+{
+ return __mdiobus_c45_write(phydev->mdio.bus, 0, MDIO_MMD_VEND2, regnum,
+ val);
+}
+
static int rtlgen_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
{
int ret;
- if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE) {
- rtl821x_write_page(phydev, 0xa5c);
- ret = __phy_read(phydev, 0x12);
- rtl821x_write_page(phydev, 0);
- } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV) {
- rtl821x_write_page(phydev, 0xa5d);
- ret = __phy_read(phydev, 0x10);
- rtl821x_write_page(phydev, 0);
- } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_LPABLE) {
- rtl821x_write_page(phydev, 0xa5d);
- ret = __phy_read(phydev, 0x11);
- rtl821x_write_page(phydev, 0);
- } else {
+ if (devnum == MDIO_MMD_VEND2)
+ ret = rtlgen_read_vend2(phydev, regnum);
+ else if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE)
+ ret = rtlgen_read_vend2(phydev, RTL_MDIO_PCS_EEE_ABLE);
+ else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV)
+ ret = rtlgen_read_vend2(phydev, RTL_MDIO_AN_EEE_ADV);
+ else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_LPABLE)
+ ret = rtlgen_read_vend2(phydev, RTL_MDIO_AN_EEE_LPABLE);
+ else
ret = -EOPNOTSUPP;
- }
return ret;
}
@@ -760,13 +1192,12 @@ static int rtlgen_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
{
int ret;
- if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV) {
- rtl821x_write_page(phydev, 0xa5d);
- ret = __phy_write(phydev, 0x10, val);
- rtl821x_write_page(phydev, 0);
- } else {
+ if (devnum == MDIO_MMD_VEND2)
+ ret = rtlgen_write_vend2(phydev, regnum, val);
+ else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV)
+ ret = rtlgen_write_vend2(phydev, regnum, RTL_MDIO_AN_EEE_ADV);
+ else
ret = -EOPNOTSUPP;
- }
return ret;
}
@@ -778,19 +1209,12 @@ static int rtl822x_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
if (ret != -EOPNOTSUPP)
return ret;
- if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE2) {
- rtl821x_write_page(phydev, 0xa6e);
- ret = __phy_read(phydev, 0x16);
- rtl821x_write_page(phydev, 0);
- } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV2) {
- rtl821x_write_page(phydev, 0xa6d);
- ret = __phy_read(phydev, 0x12);
- rtl821x_write_page(phydev, 0);
- } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_LPABLE2) {
- rtl821x_write_page(phydev, 0xa6d);
- ret = __phy_read(phydev, 0x10);
- rtl821x_write_page(phydev, 0);
- }
+ if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE2)
+ ret = rtlgen_read_vend2(phydev, RTL_MDIO_PCS_EEE_ABLE2);
+ else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV2)
+ ret = rtlgen_read_vend2(phydev, RTL_MDIO_AN_EEE_ADV2);
+ else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_LPABLE2)
+ ret = rtlgen_read_vend2(phydev, RTL_MDIO_AN_EEE_LPABLE2);
return ret;
}
@@ -803,16 +1227,22 @@ static int rtl822x_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
if (ret != -EOPNOTSUPP)
return ret;
- if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV2) {
- rtl821x_write_page(phydev, 0xa6d);
- ret = __phy_write(phydev, 0x12, val);
- rtl821x_write_page(phydev, 0);
- }
+ if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV2)
+ ret = rtlgen_write_vend2(phydev, RTL_MDIO_AN_EEE_ADV2, val);
return ret;
}
-static int rtl822xb_config_init(struct phy_device *phydev)
+static int rtl822x_probe(struct phy_device *phydev)
+{
+ if (IS_ENABLED(CONFIG_REALTEK_PHY_HWMON) &&
+ phydev->phy_id != RTL_GENERIC_PHYID)
+ return rtl822x_hwmon_init(phydev);
+
+ return 0;
+}
+
+static int rtl822x_set_serdes_option_mode(struct phy_device *phydev, bool gen1)
{
bool has_2500, has_sgmii;
u16 mode;
@@ -847,15 +1277,18 @@ static int rtl822xb_config_init(struct phy_device *phydev)
/* the following sequence with magic numbers sets up the SerDes
* option mode
*/
- ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x75f3, 0);
- if (ret < 0)
- return ret;
+
+ if (!gen1) {
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x75f3, 0);
+ if (ret < 0)
+ return ret;
+ }
ret = phy_modify_mmd_changed(phydev, MDIO_MMD_VEND1,
RTL822X_VND1_SERDES_OPTION,
RTL822X_VND1_SERDES_OPTION_MODE_MASK,
mode);
- if (ret < 0)
+ if (gen1 || ret < 0)
return ret;
ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x6a04, 0x0503);
@@ -869,6 +1302,16 @@ static int rtl822xb_config_init(struct phy_device *phydev)
return phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x6f11, 0x8020);
}
+static int rtl822x_config_init(struct phy_device *phydev)
+{
+ return rtl822x_set_serdes_option_mode(phydev, true);
+}
+
+static int rtl822xb_config_init(struct phy_device *phydev)
+{
+ return rtl822x_set_serdes_option_mode(phydev, false);
+}
+
static int rtl822xb_get_rate_matching(struct phy_device *phydev,
phy_interface_t iface)
{
@@ -894,7 +1337,7 @@ static int rtl822x_get_features(struct phy_device *phydev)
{
int val;
- val = phy_read_paged(phydev, 0xa61, 0x13);
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL_MDIO_PMA_SPEED);
if (val < 0)
return val;
@@ -915,10 +1358,10 @@ static int rtl822x_config_aneg(struct phy_device *phydev)
if (phydev->autoneg == AUTONEG_ENABLE) {
u16 adv = linkmode_adv_to_mii_10gbt_adv_t(phydev->advertising);
- ret = phy_modify_paged_changed(phydev, 0xa5d, 0x12,
- MDIO_AN_10GBT_CTRL_ADV2_5G |
- MDIO_AN_10GBT_CTRL_ADV5G,
- adv);
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_VEND2,
+ RTL_MDIO_AN_10GBT_CTRL,
+ MDIO_AN_10GBT_CTRL_ADV2_5G |
+ MDIO_AN_10GBT_CTRL_ADV5G, adv);
if (ret < 0)
return ret;
}
@@ -952,17 +1395,17 @@ static int rtl822x_read_status(struct phy_device *phydev)
{
int lpadv, ret;
+ mii_10gbt_stat_mod_linkmode_lpa_t(phydev->lp_advertising, 0);
+
ret = rtlgen_read_status(phydev);
if (ret < 0)
return ret;
if (phydev->autoneg == AUTONEG_DISABLE ||
- !phydev->autoneg_complete) {
- mii_10gbt_stat_mod_linkmode_lpa_t(phydev->lp_advertising, 0);
+ !phydev->autoneg_complete)
return 0;
- }
- lpadv = phy_read_paged(phydev, 0xa5d, 0x13);
+ lpadv = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL_MDIO_AN_10GBT_STAT);
if (lpadv < 0)
return lpadv;
@@ -1009,7 +1452,8 @@ static int rtl822x_c45_config_aneg(struct phy_device *phydev)
val = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising);
/* Vendor register as C45 has no standardized support for 1000BaseT */
- ret = phy_modify_mmd_changed(phydev, MDIO_MMD_VEND2, RTL822X_VND2_GBCR,
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_VEND2,
+ RTL822X_VND2_C22_REG(MII_CTRL1000),
ADVERTISE_1000FULL, val);
if (ret < 0)
return ret;
@@ -1023,26 +1467,25 @@ static int rtl822x_c45_read_status(struct phy_device *phydev)
{
int ret, val;
- ret = genphy_c45_read_status(phydev);
- if (ret < 0)
- return ret;
-
- if (phydev->autoneg == AUTONEG_DISABLE ||
- !genphy_c45_aneg_done(phydev))
- mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, 0);
-
/* Vendor register as C45 has no standardized support for 1000BaseT */
- if (phydev->autoneg == AUTONEG_ENABLE) {
+ if (phydev->autoneg == AUTONEG_ENABLE && genphy_c45_aneg_done(phydev)) {
val = phy_read_mmd(phydev, MDIO_MMD_VEND2,
- RTL822X_VND2_GANLPAR);
+ RTL822X_VND2_C22_REG(MII_STAT1000));
if (val < 0)
return val;
-
- mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, val);
+ } else {
+ val = 0;
}
+ mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, val);
- if (!phydev->link)
+ ret = genphy_c45_read_status(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (!phydev->link) {
+ phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
return 0;
+ }
/* Read actual speed from vendor register. */
val = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL_VND2_PHYSR);
@@ -1054,6 +1497,21 @@ static int rtl822x_c45_read_status(struct phy_device *phydev)
return 0;
}
+static int rtl822x_c45_soft_reset(struct phy_device *phydev)
+{
+ int ret, val;
+
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_CTRL1_RESET, MDIO_CTRL1_RESET);
+ if (ret < 0)
+ return ret;
+
+ return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_PMAPMD,
+ MDIO_CTRL1, val,
+ !(val & MDIO_CTRL1_RESET),
+ 5000, 100000, true);
+}
+
static int rtl822xb_c45_read_status(struct phy_device *phydev)
{
int ret;
@@ -1067,6 +1525,168 @@ static int rtl822xb_c45_read_status(struct phy_device *phydev)
return 0;
}
+static int rtl8224_cable_test_start(struct phy_device *phydev)
+{
+ u32 val;
+ int ret;
+
+ /* disable auto-negotiation and force 1000/Full */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2,
+ RTL822X_VND2_C22_REG(MII_BMCR),
+ BMCR_ANENABLE | BMCR_SPEED100 | BMCR_SPEED10,
+ BMCR_SPEED1000 | BMCR_FULLDPLX);
+ if (ret)
+ return ret;
+
+ mdelay(500);
+
+ /* trigger cable test */
+ val = RTL8224_MII_RTCT_ENABLE;
+ val |= RTL8224_MII_RTCT_PAIR_A;
+ val |= RTL8224_MII_RTCT_PAIR_B;
+ val |= RTL8224_MII_RTCT_PAIR_C;
+ val |= RTL8224_MII_RTCT_PAIR_D;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2,
+ RTL822X_VND2_C22_REG(RTL8224_MII_RTCT),
+ RTL8224_MII_RTCT_DONE, val);
+}
+
+static int rtl8224_sram_read(struct phy_device *phydev, u32 reg)
+{
+ int ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2,
+ RTL822X_VND2_C22_REG(RTL8224_MII_SRAM_ADDR),
+ reg);
+ if (ret)
+ return ret;
+
+ return phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ RTL822X_VND2_C22_REG(RTL8224_MII_SRAM_DATA));
+}
+
+static int rtl8224_pair_len_get(struct phy_device *phydev, u32 pair)
+{
+ int cable_len;
+ u32 reg_len;
+ int ret;
+ u32 cm;
+
+ reg_len = RTL8224_SRAM_RTCT_LEN(pair);
+
+ ret = rtl8224_sram_read(phydev, reg_len);
+ if (ret < 0)
+ return ret;
+
+ cable_len = ret & 0xff00;
+
+ ret = rtl8224_sram_read(phydev, reg_len + 1);
+ if (ret < 0)
+ return ret;
+
+ cable_len |= (ret & 0xff00) >> 8;
+
+ cable_len -= 620;
+ cable_len = max(cable_len, 0);
+
+ cm = cable_len * 100 / 78;
+
+ return cm;
+}
+
+static int rtl8224_cable_test_result_trans(u32 result)
+{
+ if (!(result & RTL8224_SRAM_RTCT_FAULT_DONE))
+ return -EBUSY;
+
+ if (result & RTL8224_SRAM_RTCT_FAULT_OK)
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+
+ if (result & RTL8224_SRAM_RTCT_FAULT_OPEN)
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+
+ if (result & RTL8224_SRAM_RTCT_FAULT_SAME_SHORT)
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+
+ if (result & RTL8224_SRAM_RTCT_FAULT_BUSY)
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+
+ if (result & RTL8224_SRAM_RTCT_FAULT_CROSS_SHORT)
+ return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
+
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+}
+
+static int rtl8224_cable_test_report_pair(struct phy_device *phydev, unsigned int pair)
+{
+ int fault_rslt;
+ int ret;
+
+ ret = rtl8224_sram_read(phydev, RTL8224_SRAM_RTCT_FAULT(pair));
+ if (ret < 0)
+ return ret;
+
+ fault_rslt = rtl8224_cable_test_result_trans(ret);
+ if (fault_rslt < 0)
+ return 0;
+
+ ret = ethnl_cable_test_result(phydev, pair, fault_rslt);
+ if (ret < 0)
+ return ret;
+
+ switch (fault_rslt) {
+ case ETHTOOL_A_CABLE_RESULT_CODE_OPEN:
+ case ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT:
+ case ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT:
+ ret = rtl8224_pair_len_get(phydev, pair);
+ if (ret < 0)
+ return ret;
+
+ return ethnl_cable_test_fault_length(phydev, pair, ret);
+ default:
+ return 0;
+ }
+}
+
+static int rtl8224_cable_test_report(struct phy_device *phydev, bool *finished)
+{
+ unsigned int pair;
+ int ret;
+
+ for (pair = ETHTOOL_A_CABLE_PAIR_A; pair <= ETHTOOL_A_CABLE_PAIR_D; pair++) {
+ ret = rtl8224_cable_test_report_pair(phydev, pair);
+ if (ret == -EBUSY) {
+ *finished = false;
+ return 0;
+ }
+
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtl8224_cable_test_get_status(struct phy_device *phydev, bool *finished)
+{
+ int ret;
+
+ *finished = false;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ RTL822X_VND2_C22_REG(RTL8224_MII_RTCT));
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & RTL8224_MII_RTCT_DONE))
+ return 0;
+
+ *finished = true;
+
+ return rtl8224_cable_test_report(phydev, finished);
+}
+
static bool rtlgen_supports_2_5gbps(struct phy_device *phydev)
{
int val;
@@ -1095,13 +1715,15 @@ static bool rtlgen_supports_mmd(struct phy_device *phydev)
return val > 0;
}
-static int rtlgen_match_phy_device(struct phy_device *phydev)
+static int rtlgen_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return phydev->phy_id == RTL_GENERIC_PHYID &&
!rtlgen_supports_2_5gbps(phydev);
}
-static int rtl8226_match_phy_device(struct phy_device *phydev)
+static int rtl8226_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return phydev->phy_id == RTL_GENERIC_PHYID &&
rtlgen_supports_2_5gbps(phydev) &&
@@ -1117,32 +1739,38 @@ static int rtlgen_is_c45_match(struct phy_device *phydev, unsigned int id,
return !is_c45 && (id == phydev->phy_id);
}
-static int rtl8221b_match_phy_device(struct phy_device *phydev)
+static int rtl8221b_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return phydev->phy_id == RTL_8221B && rtlgen_supports_mmd(phydev);
}
-static int rtl8221b_vb_cg_c22_match_phy_device(struct phy_device *phydev)
+static int rtl8221b_vb_cg_c22_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return rtlgen_is_c45_match(phydev, RTL_8221B_VB_CG, false);
}
-static int rtl8221b_vb_cg_c45_match_phy_device(struct phy_device *phydev)
+static int rtl8221b_vb_cg_c45_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return rtlgen_is_c45_match(phydev, RTL_8221B_VB_CG, true);
}
-static int rtl8221b_vn_cg_c22_match_phy_device(struct phy_device *phydev)
+static int rtl8221b_vm_cg_c22_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
- return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, false);
+ return rtlgen_is_c45_match(phydev, RTL_8221B_VM_CG, false);
}
-static int rtl8221b_vn_cg_c45_match_phy_device(struct phy_device *phydev)
+static int rtl8221b_vm_cg_c45_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
- return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, true);
+ return rtlgen_is_c45_match(phydev, RTL_8221B_VM_CG, true);
}
-static int rtl_internal_nbaset_match_phy_device(struct phy_device *phydev)
+static int rtl_internal_nbaset_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
if (phydev->is_c45)
return false;
@@ -1151,6 +1779,7 @@ static int rtl_internal_nbaset_match_phy_device(struct phy_device *phydev)
case RTL_GENERIC_PHYID:
case RTL_8221B:
case RTL_8251B:
+ case RTL_8261C:
case 0x001cc841:
break;
default:
@@ -1160,7 +1789,8 @@ static int rtl_internal_nbaset_match_phy_device(struct phy_device *phydev)
return rtlgen_supports_2_5gbps(phydev) && !rtlgen_supports_mmd(phydev);
}
-static int rtl8251b_c45_match_phy_device(struct phy_device *phydev)
+static int rtl8251b_c45_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return rtlgen_is_c45_match(phydev, RTL_8251B, true);
}
@@ -1300,6 +1930,53 @@ static irqreturn_t rtl9000a_handle_interrupt(struct phy_device *phydev)
return IRQ_HANDLED;
}
+static int rtl8221b_ack_interrupt(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL8221B_VND2_INSR);
+
+ return (err < 0) ? err : 0;
+}
+
+static int rtl8221b_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = rtl8221b_ack_interrupt(phydev);
+ if (err)
+ return err;
+
+ err = phy_write_mmd(phydev, MDIO_MMD_VEND2, RTL8221B_VND2_INER,
+ RTL8221B_VND2_INER_LINK_STATUS);
+ } else {
+ err = phy_write_mmd(phydev, MDIO_MMD_VEND2,
+ RTL8221B_VND2_INER, 0);
+ if (err)
+ return err;
+
+ err = rtl8221b_ack_interrupt(phydev);
+ }
+
+ return err;
+}
+
+static irqreturn_t rtl8221b_handle_interrupt(struct phy_device *phydev)
+{
+ int err;
+
+ err = rtl8221b_ack_interrupt(phydev);
+ if (err) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
static struct phy_driver realtek_drvs[] = {
{
PHY_ID_MATCH_EXACT(0x00008201),
@@ -1370,20 +2047,25 @@ static struct phy_driver realtek_drvs[] = {
.resume = genphy_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
+ .led_hw_is_supported = rtl8211x_led_hw_is_supported,
+ .led_hw_control_get = rtl8211e_led_hw_control_get,
+ .led_hw_control_set = rtl8211e_led_hw_control_set,
}, {
PHY_ID_MATCH_EXACT(0x001cc916),
.name = "RTL8211F Gigabit Ethernet",
- .probe = rtl821x_probe,
+ .probe = rtl8211f_probe,
.config_init = &rtl8211f_config_init,
.read_status = rtlgen_read_status,
.config_intr = &rtl8211f_config_intr,
.handle_interrupt = rtl8211f_handle_interrupt,
- .suspend = rtl821x_suspend,
- .resume = rtl821x_resume,
+ .set_wol = rtl8211f_set_wol,
+ .get_wol = rtl8211f_get_wol,
+ .suspend = rtl8211f_suspend,
+ .resume = rtl8211f_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
.flags = PHY_ALWAYS_CALL_SUSPEND,
- .led_hw_is_supported = rtl8211f_led_hw_is_supported,
+ .led_hw_is_supported = rtl8211x_led_hw_is_supported,
.led_hw_control_get = rtl8211f_led_hw_control_get,
.led_hw_control_set = rtl8211f_led_hw_control_set,
}, {
@@ -1434,13 +2116,13 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc838),
.name = "RTL8226-CG 2.5Gbps PHY",
- .get_features = rtl822x_get_features,
- .config_aneg = rtl822x_config_aneg,
- .read_status = rtl822x_read_status,
- .suspend = genphy_suspend,
- .resume = rtlgen_resume,
- .read_page = rtl821x_read_page,
- .write_page = rtl821x_write_page,
+ .soft_reset = rtl822x_c45_soft_reset,
+ .get_features = rtl822x_c45_get_features,
+ .config_aneg = rtl822x_c45_config_aneg,
+ .config_init = rtl822x_config_init,
+ .read_status = rtl822xb_c45_read_status,
+ .suspend = genphy_c45_pma_suspend,
+ .resume = rtlgen_c45_resume,
}, {
PHY_ID_MATCH_EXACT(0x001cc848),
.name = "RTL8226B-CG_RTL8221B-CG 2.5Gbps PHY",
@@ -1456,6 +2138,7 @@ static struct phy_driver realtek_drvs[] = {
}, {
.match_phy_device = rtl8221b_vb_cg_c22_match_phy_device,
.name = "RTL8221B-VB-CG 2.5Gbps PHY (C22)",
+ .probe = rtl822x_probe,
.get_features = rtl822x_get_features,
.config_aneg = rtl822x_config_aneg,
.config_init = rtl822xb_config_init,
@@ -1468,6 +2151,9 @@ static struct phy_driver realtek_drvs[] = {
}, {
.match_phy_device = rtl8221b_vb_cg_c45_match_phy_device,
.name = "RTL8221B-VB-CG 2.5Gbps PHY (C45)",
+ .config_intr = rtl8221b_config_intr,
+ .handle_interrupt = rtl8221b_handle_interrupt,
+ .probe = rtl822x_probe,
.config_init = rtl822xb_config_init,
.get_rate_matching = rtl822xb_get_rate_matching,
.get_features = rtl822x_c45_get_features,
@@ -1476,8 +2162,9 @@ static struct phy_driver realtek_drvs[] = {
.suspend = genphy_c45_pma_suspend,
.resume = rtlgen_c45_resume,
}, {
- .match_phy_device = rtl8221b_vn_cg_c22_match_phy_device,
+ .match_phy_device = rtl8221b_vm_cg_c22_match_phy_device,
.name = "RTL8221B-VM-CG 2.5Gbps PHY (C22)",
+ .probe = rtl822x_probe,
.get_features = rtl822x_get_features,
.config_aneg = rtl822x_config_aneg,
.config_init = rtl822xb_config_init,
@@ -1488,8 +2175,11 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
- .match_phy_device = rtl8221b_vn_cg_c45_match_phy_device,
- .name = "RTL8221B-VN-CG 2.5Gbps PHY (C45)",
+ .match_phy_device = rtl8221b_vm_cg_c45_match_phy_device,
+ .name = "RTL8221B-VM-CG 2.5Gbps PHY (C45)",
+ .config_intr = rtl8221b_config_intr,
+ .handle_interrupt = rtl8221b_handle_interrupt,
+ .probe = rtl822x_probe,
.config_init = rtl822xb_config_init,
.get_rate_matching = rtl822xb_get_rate_matching,
.get_features = rtl822x_c45_get_features,
@@ -1500,6 +2190,7 @@ static struct phy_driver realtek_drvs[] = {
}, {
.match_phy_device = rtl8251b_c45_match_phy_device,
.name = "RTL8251B 5Gbps PHY",
+ .probe = rtl822x_probe,
.get_features = rtl822x_get_features,
.config_aneg = rtl822x_config_aneg,
.read_status = rtl822x_read_status,
@@ -1511,6 +2202,7 @@ static struct phy_driver realtek_drvs[] = {
.match_phy_device = rtl_internal_nbaset_match_phy_device,
.name = "Realtek Internal NBASE-T PHY",
.flags = PHY_IS_INTERNAL,
+ .probe = rtl822x_probe,
.get_features = rtl822x_get_features,
.config_aneg = rtl822x_config_aneg,
.read_status = rtl822x_read_status,
@@ -1523,11 +2215,14 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001ccad0),
.name = "RTL8224 2.5Gbps PHY",
+ .flags = PHY_POLL_CABLE_TEST,
.get_features = rtl822x_c45_get_features,
.config_aneg = rtl822x_c45_config_aneg,
.read_status = rtl822x_c45_read_status,
.suspend = genphy_c45_pma_suspend,
.resume = rtlgen_c45_resume,
+ .cable_test_start = rtl8224_cable_test_start,
+ .cable_test_get_status = rtl8224_cable_test_get_status,
}, {
PHY_ID_MATCH_EXACT(0x001cc961),
.name = "RTL8366RB Gigabit Ethernet",
diff --git a/drivers/net/phy/rockchip.c b/drivers/net/phy/rockchip.c
index bb13e75183ee..b338f385e15a 100644
--- a/drivers/net/phy/rockchip.c
+++ b/drivers/net/phy/rockchip.c
@@ -188,7 +188,7 @@ static struct phy_driver rockchip_phy_driver[] = {
module_phy_driver(rockchip_phy_driver);
-static struct mdio_device_id __maybe_unused rockchip_phy_tbl[] = {
+static const struct mdio_device_id __maybe_unused rockchip_phy_tbl[] = {
{ INTERNAL_EPHY_ID, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index f13c00b5b449..b945d75966d5 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -22,7 +22,6 @@ struct sfp_bus {
const struct sfp_socket_ops *socket_ops;
struct device *sfp_dev;
struct sfp *sfp;
- const struct sfp_quirk *sfp_quirk;
const struct sfp_upstream_ops *upstream_ops;
void *upstream;
@@ -30,24 +29,18 @@ struct sfp_bus {
bool registered;
bool started;
+
+ struct sfp_module_caps caps;
};
-/**
- * sfp_parse_port() - Parse the EEPROM base ID, setting the port type
- * @bus: a pointer to the &struct sfp_bus structure for the sfp module
- * @id: a pointer to the module's &struct sfp_eeprom_id
- * @support: optional pointer to an array of unsigned long for the
- * ethtool support mask
- *
- * Parse the EEPROM identification given in @id, and return one of
- * %PORT_TP, %PORT_FIBRE or %PORT_OTHER. If @support is non-%NULL,
- * also set the ethtool %ETHTOOL_LINK_MODE_xxx_BIT corresponding with
- * the connector type.
- *
- * If the port type is not known, returns %PORT_OTHER.
- */
-int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
- unsigned long *support)
+const struct sfp_module_caps *sfp_get_module_caps(struct sfp_bus *bus)
+{
+ return &bus->caps;
+}
+EXPORT_SYMBOL_GPL(sfp_get_module_caps);
+
+static void sfp_module_parse_port(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id)
{
int port;
@@ -91,34 +84,26 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
break;
}
- if (support) {
- switch (port) {
- case PORT_FIBRE:
- phylink_set(support, FIBRE);
- break;
+ switch (port) {
+ case PORT_FIBRE:
+ phylink_set(bus->caps.link_modes, FIBRE);
+ break;
- case PORT_TP:
- phylink_set(support, TP);
- break;
- }
+ case PORT_TP:
+ phylink_set(bus->caps.link_modes, TP);
+ break;
}
- return port;
+ bus->caps.port = port;
}
-EXPORT_SYMBOL_GPL(sfp_parse_port);
-/**
- * sfp_may_have_phy() - indicate whether the module may have a PHY
- * @bus: a pointer to the &struct sfp_bus structure for the sfp module
- * @id: a pointer to the module's &struct sfp_eeprom_id
- *
- * Parse the EEPROM identification given in @id, and return whether
- * this module may have a PHY.
- */
-bool sfp_may_have_phy(struct sfp_bus *bus, const struct sfp_eeprom_id *id)
+static void sfp_module_parse_may_have_phy(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id)
{
- if (id->base.e1000_base_t)
- return true;
+ if (id->base.e1000_base_t) {
+ bus->caps.may_have_phy = true;
+ return;
+ }
if (id->base.phys_id != SFF8024_ID_DWDM_SFP) {
switch (id->base.extended_cc) {
@@ -126,30 +111,20 @@ bool sfp_may_have_phy(struct sfp_bus *bus, const struct sfp_eeprom_id *id)
case SFF8024_ECC_10GBASE_T_SR:
case SFF8024_ECC_5GBASE_T:
case SFF8024_ECC_2_5GBASE_T:
- return true;
+ bus->caps.may_have_phy = true;
+ return;
}
}
- return false;
+ bus->caps.may_have_phy = false;
}
-EXPORT_SYMBOL_GPL(sfp_may_have_phy);
-/**
- * sfp_parse_support() - Parse the eeprom id for supported link modes
- * @bus: a pointer to the &struct sfp_bus structure for the sfp module
- * @id: a pointer to the module's &struct sfp_eeprom_id
- * @support: pointer to an array of unsigned long for the ethtool support mask
- * @interfaces: pointer to an array of unsigned long for phy interface modes
- * mask
- *
- * Parse the EEPROM identification information and derive the supported
- * ethtool link modes for the module.
- */
-void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
- unsigned long *support, unsigned long *interfaces)
+static void sfp_module_parse_support(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id)
{
+ unsigned long *interfaces = bus->caps.interfaces;
+ unsigned long *modes = bus->caps.link_modes;
unsigned int br_min, br_nom, br_max;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, };
/* Decode the bitrate information to MBd */
br_min = br_nom = br_max = 0;
@@ -338,13 +313,21 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
phylink_set(modes, Autoneg);
phylink_set(modes, Pause);
phylink_set(modes, Asym_Pause);
+}
- if (bus->sfp_quirk && bus->sfp_quirk->modes)
- bus->sfp_quirk->modes(id, modes, interfaces);
+static void sfp_init_module(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id,
+ const struct sfp_quirk *quirk)
+{
+ memset(&bus->caps, 0, sizeof(bus->caps));
+
+ sfp_module_parse_support(bus, id);
+ sfp_module_parse_port(bus, id);
+ sfp_module_parse_may_have_phy(bus, id);
- linkmode_or(support, support, modes);
+ if (quirk && quirk->support)
+ quirk->support(id, &bus->caps);
}
-EXPORT_SYMBOL_GPL(sfp_parse_support);
/**
* sfp_select_interface() - Select appropriate phy_interface_t mode
@@ -794,7 +777,7 @@ int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus);
int ret = 0;
- bus->sfp_quirk = quirk;
+ sfp_init_module(bus, id, quirk);
if (ops && ops->module_insert)
ret = ops->module_insert(bus->upstream, id);
@@ -809,8 +792,6 @@ void sfp_module_remove(struct sfp_bus *bus)
if (ops && ops->module_remove)
ops->module_remove(bus->upstream);
-
- bus->sfp_quirk = NULL;
}
EXPORT_SYMBOL_GPL(sfp_module_remove);
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 7dbcbf0a4ee2..0401fa6b24d2 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -17,7 +17,6 @@
#include <linux/workqueue.h>
#include "sfp.h"
-#include "swphy.h"
enum {
GPIO_MODDEF0,
@@ -221,6 +220,8 @@ static const enum gpiod_flags gpio_flags[] = {
*/
#define SFP_EEPROM_BLOCK_SIZE 16
+#define SFP_POLL_INTERVAL msecs_to_jiffies(100)
+
struct sff_data {
unsigned int gpios;
bool (*module_supported)(const struct sfp_eeprom_id *id);
@@ -234,6 +235,7 @@ struct sfp {
enum mdio_i2c_proto mdio_protocol;
struct phy_device *mod_phy;
const struct sff_data *type;
+ size_t i2c_max_block_size;
size_t i2c_block_size;
u32 max_power_mW;
@@ -298,6 +300,11 @@ struct sfp {
#endif
};
+static void sfp_schedule_poll(struct sfp *sfp)
+{
+ mod_delayed_work(system_percpu_wq, &sfp->poll, SFP_POLL_INTERVAL);
+}
+
static bool sff_module_supported(const struct sfp_eeprom_id *id)
{
return id->base.phys_id == SFF8024_ID_SFF_8472 &&
@@ -360,6 +367,11 @@ static void sfp_fixup_ignore_tx_fault(struct sfp *sfp)
sfp->state_ignore_mask |= SFP_F_TX_FAULT;
}
+static void sfp_fixup_ignore_hw(struct sfp *sfp, unsigned int mask)
+{
+ sfp->state_hw_mask &= ~mask;
+}
+
static void sfp_fixup_nokia(struct sfp *sfp)
{
sfp_fixup_long_startup(sfp);
@@ -385,7 +397,7 @@ static void sfp_fixup_rollball(struct sfp *sfp)
sfp->phy_t_retry = msecs_to_jiffies(1000);
}
-static void sfp_fixup_fs_2_5gt(struct sfp *sfp)
+static void sfp_fixup_rollball_wait4s(struct sfp *sfp)
{
sfp_fixup_rollball(sfp);
@@ -399,7 +411,7 @@ static void sfp_fixup_fs_2_5gt(struct sfp *sfp)
static void sfp_fixup_fs_10gt(struct sfp *sfp)
{
sfp_fixup_10gbaset_30m(sfp);
- sfp_fixup_fs_2_5gt(sfp);
+ sfp_fixup_rollball_wait4s(sfp);
}
static void sfp_fixup_halny_gsfp(struct sfp *sfp)
@@ -408,7 +420,19 @@ static void sfp_fixup_halny_gsfp(struct sfp *sfp)
* these are possibly used for other purposes on this
* module, e.g. a serial port.
*/
- sfp->state_hw_mask &= ~(SFP_F_TX_FAULT | SFP_F_LOS);
+ sfp_fixup_ignore_hw(sfp, SFP_F_TX_FAULT | SFP_F_LOS);
+}
+
+static void sfp_fixup_potron(struct sfp *sfp)
+{
+ /*
+ * The TX_FAULT and LOS pins on this device are used for serial
+ * communication, so ignore them. Additionally, provide extra
+ * time for this device to fully start up.
+ */
+
+ sfp_fixup_long_startup(sfp);
+ sfp_fixup_ignore_hw(sfp, SFP_F_TX_FAULT | SFP_F_LOS);
}
static void sfp_fixup_rollball_cc(struct sfp *sfp)
@@ -422,45 +446,44 @@ static void sfp_fixup_rollball_cc(struct sfp *sfp)
}
static void sfp_quirk_2500basex(const struct sfp_eeprom_id *id,
- unsigned long *modes,
- unsigned long *interfaces)
+ struct sfp_module_caps *caps)
{
- linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, modes);
- __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+ caps->link_modes);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, caps->interfaces);
}
static void sfp_quirk_disable_autoneg(const struct sfp_eeprom_id *id,
- unsigned long *modes,
- unsigned long *interfaces)
+ struct sfp_module_caps *caps)
{
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, modes);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, caps->link_modes);
}
static void sfp_quirk_oem_2_5g(const struct sfp_eeprom_id *id,
- unsigned long *modes,
- unsigned long *interfaces)
+ struct sfp_module_caps *caps)
{
/* Copper 2.5G SFP */
- linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, modes);
- __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces);
- sfp_quirk_disable_autoneg(id, modes, interfaces);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ caps->link_modes);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, caps->interfaces);
+ sfp_quirk_disable_autoneg(id, caps);
}
static void sfp_quirk_ubnt_uf_instant(const struct sfp_eeprom_id *id,
- unsigned long *modes,
- unsigned long *interfaces)
+ struct sfp_module_caps *caps)
{
/* Ubiquiti U-Fiber Instant module claims that support all transceiver
* types including 10G Ethernet which is not truth. So clear all claimed
* modes and set only one mode which module supports: 1000baseX_Full.
*/
- linkmode_zero(modes);
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, modes);
+ linkmode_zero(caps->link_modes);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ caps->link_modes);
}
-#define SFP_QUIRK(_v, _p, _m, _f) \
- { .vendor = _v, .part = _p, .modes = _m, .fixup = _f, }
-#define SFP_QUIRK_M(_v, _p, _m) SFP_QUIRK(_v, _p, _m, NULL)
+#define SFP_QUIRK(_v, _p, _s, _f) \
+ { .vendor = _v, .part = _p, .support = _s, .fixup = _f, }
+#define SFP_QUIRK_S(_v, _p, _s) SFP_QUIRK(_v, _p, _s, NULL)
#define SFP_QUIRK_F(_v, _p, _f) SFP_QUIRK(_v, _p, NULL, _f)
static const struct sfp_quirk sfp_quirks[] = {
@@ -474,14 +497,18 @@ static const struct sfp_quirk sfp_quirks[] = {
SFP_QUIRK("ALCATELLUCENT", "3FE46541AA", sfp_quirk_2500basex,
sfp_fixup_nokia),
+ // FLYPRO SFP-10GT-CS-30M uses Rollball protocol to talk to the PHY.
+ SFP_QUIRK_F("FLYPRO", "SFP-10GT-CS-30M", sfp_fixup_rollball),
+
// Fiberstore SFP-10G-T doesn't identify as copper, uses the Rollball
// protocol to talk to the PHY and needs 4 sec wait before probing the
// PHY.
SFP_QUIRK_F("FS", "SFP-10G-T", sfp_fixup_fs_10gt),
- // Fiberstore SFP-2.5G-T uses Rollball protocol to talk to the PHY and
- // needs 4 sec wait before probing the PHY.
- SFP_QUIRK_F("FS", "SFP-2.5G-T", sfp_fixup_fs_2_5gt),
+ // Fiberstore SFP-2.5G-T and SFP-10GM-T uses Rollball protocol to talk
+ // to the PHY and needs 4 sec wait before probing the PHY.
+ SFP_QUIRK_F("FS", "SFP-2.5G-T", sfp_fixup_rollball_wait4s),
+ SFP_QUIRK_F("FS", "SFP-10GM-T", sfp_fixup_rollball_wait4s),
// Fiberstore GPON-ONU-34-20BI can operate at 2500base-X, but report 1.2GBd
// NRZ in their EEPROM
@@ -492,7 +519,7 @@ static const struct sfp_quirk sfp_quirks[] = {
// HG MXPD-483II-F 2.5G supports 2500Base-X, but incorrectly reports
// 2600MBd in their EERPOM
- SFP_QUIRK_M("HG GENUINE", "MXPD-483II", sfp_quirk_2500basex),
+ SFP_QUIRK_S("HG GENUINE", "MXPD-483II", sfp_quirk_2500basex),
// Huawei MA5671A can operate at 2500base-X, but report 1.2GBd NRZ in
// their EEPROM
@@ -501,20 +528,24 @@ static const struct sfp_quirk sfp_quirks[] = {
// Lantech 8330-262D-E can operate at 2500base-X, but incorrectly report
// 2500MBd NRZ in their EEPROM
- SFP_QUIRK_M("Lantech", "8330-262D-E", sfp_quirk_2500basex),
+ SFP_QUIRK_S("Lantech", "8330-262D-E", sfp_quirk_2500basex),
- SFP_QUIRK_M("UBNT", "UF-INSTANT", sfp_quirk_ubnt_uf_instant),
+ SFP_QUIRK_S("UBNT", "UF-INSTANT", sfp_quirk_ubnt_uf_instant),
// Walsun HXSX-ATR[CI]-1 don't identify as copper, and use the
// Rollball protocol to talk to the PHY.
SFP_QUIRK_F("Walsun", "HXSX-ATRC-1", sfp_fixup_fs_10gt),
SFP_QUIRK_F("Walsun", "HXSX-ATRI-1", sfp_fixup_fs_10gt),
+ SFP_QUIRK_F("YV", "SFP+ONU-XGSPON", sfp_fixup_potron),
+
// OEM SFP-GE-T is a 1000Base-T module with broken TX_FAULT indicator
SFP_QUIRK_F("OEM", "SFP-GE-T", sfp_fixup_ignore_tx_fault),
SFP_QUIRK_F("OEM", "SFP-10G-T", sfp_fixup_rollball_cc),
- SFP_QUIRK_M("OEM", "SFP-2.5G-T", sfp_quirk_oem_2_5g),
+ SFP_QUIRK_S("OEM", "SFP-2.5G-T", sfp_quirk_oem_2_5g),
+ SFP_QUIRK_S("OEM", "SFP-2.5G-BX10-D", sfp_quirk_2500basex),
+ SFP_QUIRK_S("OEM", "SFP-2.5G-BX10-U", sfp_quirk_2500basex),
SFP_QUIRK_F("OEM", "RTSFP-10", sfp_fixup_rollball_cc),
SFP_QUIRK_F("OEM", "RTSFP-10G", sfp_fixup_rollball_cc),
SFP_QUIRK_F("Turris", "RTSFP-2.5G", sfp_fixup_rollball),
@@ -562,8 +593,6 @@ static const struct sfp_quirk *sfp_lookup_quirk(const struct sfp_eeprom_id *id)
return NULL;
}
-static unsigned long poll_jiffies;
-
static unsigned int sfp_gpio_get_state(struct sfp *sfp)
{
unsigned int i, state, v;
@@ -688,14 +717,71 @@ static int sfp_i2c_write(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
return ret == ARRAY_SIZE(msgs) ? len : 0;
}
-static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c)
+static int sfp_smbus_byte_read(struct sfp *sfp, bool a2, u8 dev_addr,
+ void *buf, size_t len)
{
- if (!i2c_check_functionality(i2c, I2C_FUNC_I2C))
- return -EINVAL;
+ union i2c_smbus_data smbus_data;
+ u8 bus_addr = a2 ? 0x51 : 0x50;
+ u8 *data = buf;
+ int ret;
+
+ while (len) {
+ ret = i2c_smbus_xfer(sfp->i2c, bus_addr, 0,
+ I2C_SMBUS_READ, dev_addr,
+ I2C_SMBUS_BYTE_DATA, &smbus_data);
+ if (ret < 0)
+ return ret;
+
+ *data = smbus_data.byte;
+ len--;
+ data++;
+ dev_addr++;
+ }
+
+ return data - (u8 *)buf;
+}
+
+static int sfp_smbus_byte_write(struct sfp *sfp, bool a2, u8 dev_addr,
+ void *buf, size_t len)
+{
+ union i2c_smbus_data smbus_data;
+ u8 bus_addr = a2 ? 0x51 : 0x50;
+ u8 *data = buf;
+ int ret;
+
+ while (len) {
+ smbus_data.byte = *data;
+ ret = i2c_smbus_xfer(sfp->i2c, bus_addr, 0,
+ I2C_SMBUS_WRITE, dev_addr,
+ I2C_SMBUS_BYTE_DATA, &smbus_data);
+ if (ret)
+ return ret;
+
+ len--;
+ data++;
+ dev_addr++;
+ }
+
+ return 0;
+}
+
+static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c)
+{
sfp->i2c = i2c;
- sfp->read = sfp_i2c_read;
- sfp->write = sfp_i2c_write;
+
+ if (i2c_check_functionality(i2c, I2C_FUNC_I2C)) {
+ sfp->read = sfp_i2c_read;
+ sfp->write = sfp_i2c_write;
+ sfp->i2c_max_block_size = SFP_EEPROM_BLOCK_SIZE;
+ } else if (i2c_check_functionality(i2c, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ sfp->read = sfp_smbus_byte_read;
+ sfp->write = sfp_smbus_byte_write;
+ sfp->i2c_max_block_size = 1;
+ } else {
+ sfp->i2c = NULL;
+ return -EINVAL;
+ }
return 0;
}
@@ -829,7 +915,7 @@ static void sfp_soft_start_poll(struct sfp *sfp)
if (sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT) &&
!sfp->need_poll)
- mod_delayed_work(system_wq, &sfp->poll, poll_jiffies);
+ sfp_schedule_poll(sfp);
mutex_unlock(&sfp->st_mutex);
}
@@ -1591,7 +1677,7 @@ static void sfp_hwmon_probe(struct work_struct *work)
*/
if (sfp->i2c_block_size < 2) {
dev_info(sfp->dev,
- "skipping hwmon device registration due to broken EEPROM\n");
+ "skipping hwmon device registration\n");
dev_info(sfp->dev,
"diagnostic EEPROM area cannot be read atomically to guarantee data coherency\n");
return;
@@ -1600,7 +1686,7 @@ static void sfp_hwmon_probe(struct work_struct *work)
err = sfp_read(sfp, true, 0, &sfp->diag, sizeof(sfp->diag));
if (err < 0) {
if (sfp->hwmon_tries--) {
- mod_delayed_work(system_wq, &sfp->hwmon_probe,
+ mod_delayed_work(system_percpu_wq, &sfp->hwmon_probe,
T_PROBE_RETRY_SLOW);
} else {
dev_warn(sfp->dev, "hwmon probe failed: %pe\n",
@@ -1627,7 +1713,7 @@ static void sfp_hwmon_probe(struct work_struct *work)
static int sfp_hwmon_insert(struct sfp *sfp)
{
if (sfp->have_a2 && sfp->id.ext.diagmon & SFP_DIAGMON_DDM) {
- mod_delayed_work(system_wq, &sfp->hwmon_probe, 1);
+ mod_delayed_work(system_percpu_wq, &sfp->hwmon_probe, 1);
sfp->hwmon_tries = R_PROBE_RETRY_SLOW;
}
@@ -2198,7 +2284,7 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
u8 check;
int ret;
- sfp->i2c_block_size = SFP_EEPROM_BLOCK_SIZE;
+ sfp->i2c_block_size = sfp->i2c_max_block_size;
ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base));
if (ret < 0) {
@@ -2481,7 +2567,7 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
/* Force a poll to re-read the hardware signal state after
* sfp_sm_mod_probe() changed state_hw_mask.
*/
- mod_delayed_work(system_wq, &sfp->poll, 1);
+ mod_delayed_work(system_percpu_wq, &sfp->poll, 1);
err = sfp_hwmon_insert(sfp);
if (err)
@@ -2926,7 +3012,7 @@ static void sfp_poll(struct work_struct *work)
// it's unimportant if we race while reading this.
if (sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT) ||
sfp->need_poll)
- mod_delayed_work(system_wq, &sfp->poll, poll_jiffies);
+ sfp_schedule_poll(sfp);
}
static struct sfp *sfp_alloc(struct device *dev)
@@ -2938,7 +3024,6 @@ static struct sfp *sfp_alloc(struct device *dev)
return ERR_PTR(-ENOMEM);
sfp->dev = dev;
- sfp->i2c_block_size = SFP_EEPROM_BLOCK_SIZE;
mutex_init(&sfp->sm_mutex);
mutex_init(&sfp->st_mutex);
@@ -3097,7 +3182,7 @@ static int sfp_probe(struct platform_device *pdev)
}
if (sfp->need_poll)
- mod_delayed_work(system_wq, &sfp->poll, poll_jiffies);
+ sfp_schedule_poll(sfp);
/* We could have an issue in cases no Tx disable pin is available or
* wired as modules using a laser as their light source will continue to
@@ -3112,6 +3197,15 @@ static int sfp_probe(struct platform_device *pdev)
if (!sfp->sfp_bus)
return -ENOMEM;
+ if (sfp->i2c_max_block_size < 2)
+ dev_warn(sfp->dev,
+ "Please note:\n"
+ "This SFP cage is accessed via an SMBus only capable of single byte\n"
+ "transactions. Some features are disabled, other may be unreliable or\n"
+ "sporadically fail. Use with caution. There is nothing that the kernel\n"
+ "or community can do to fix it, the kernel will try best efforts. Please\n"
+ "verify any problems on hardware that supports multi-byte I2C transactions.\n");
+
sfp_debugfs_init(sfp);
return 0;
@@ -3155,19 +3249,7 @@ static struct platform_driver sfp_driver = {
},
};
-static int sfp_init(void)
-{
- poll_jiffies = msecs_to_jiffies(100);
-
- return platform_driver_register(&sfp_driver);
-}
-module_init(sfp_init);
-
-static void sfp_exit(void)
-{
- platform_driver_unregister(&sfp_driver);
-}
-module_exit(sfp_exit);
+module_platform_driver(sfp_driver);
MODULE_ALIAS("platform:sfp");
MODULE_AUTHOR("Russell King");
diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h
index 1fd097dccb9f..879dff7afe6a 100644
--- a/drivers/net/phy/sfp.h
+++ b/drivers/net/phy/sfp.h
@@ -9,8 +9,8 @@ struct sfp;
struct sfp_quirk {
const char *vendor;
const char *part;
- void (*modes)(const struct sfp_eeprom_id *id, unsigned long *modes,
- unsigned long *interfaces);
+ void (*support)(const struct sfp_eeprom_id *id,
+ struct sfp_module_caps *caps);
void (*fixup)(struct sfp *sfp);
};
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index e1853599d9ba..48487149c225 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -155,10 +155,29 @@ static int smsc_phy_reset(struct phy_device *phydev)
static int lan87xx_config_aneg(struct phy_device *phydev)
{
- int rc;
+ u8 mdix_ctrl;
int val;
+ int rc;
- switch (phydev->mdix_ctrl) {
+ /* When auto-negotiation is disabled (forced mode), the PHY's
+ * Auto-MDIX will continue toggling the TX/RX pairs.
+ *
+ * To establish a stable link, we must select a fixed MDI mode.
+ * If the user has not specified a fixed MDI mode (i.e., mdix_ctrl is
+ * 'auto'), we default to ETH_TP_MDI. This choice of a ETH_TP_MDI mode
+ * mirrors the behavior the hardware would exhibit if the AUTOMDIX_EN
+ * strap were configured for a fixed MDI connection.
+ */
+ if (phydev->autoneg == AUTONEG_DISABLE) {
+ if (phydev->mdix_ctrl == ETH_TP_MDI_AUTO)
+ mdix_ctrl = ETH_TP_MDI;
+ else
+ mdix_ctrl = phydev->mdix_ctrl;
+ } else {
+ mdix_ctrl = phydev->mdix_ctrl;
+ }
+
+ switch (mdix_ctrl) {
case ETH_TP_MDI:
val = SPECIAL_CTRL_STS_OVRRD_AMDIX_;
break;
@@ -167,7 +186,8 @@ static int lan87xx_config_aneg(struct phy_device *phydev)
SPECIAL_CTRL_STS_AMDIX_STATE_;
break;
case ETH_TP_MDI_AUTO:
- val = SPECIAL_CTRL_STS_AMDIX_ENABLE_;
+ val = SPECIAL_CTRL_STS_OVRRD_AMDIX_ |
+ SPECIAL_CTRL_STS_AMDIX_ENABLE_;
break;
default:
return genphy_config_aneg(phydev);
@@ -183,7 +203,7 @@ static int lan87xx_config_aneg(struct phy_device *phydev)
rc |= val;
phy_write(phydev, SPECIAL_CTRL_STS, rc);
- phydev->mdix = phydev->mdix_ctrl;
+ phydev->mdix = mdix_ctrl;
return genphy_config_aneg(phydev);
}
@@ -261,6 +281,33 @@ int lan87xx_read_status(struct phy_device *phydev)
}
EXPORT_SYMBOL_GPL(lan87xx_read_status);
+static int lan87xx_phy_config_init(struct phy_device *phydev)
+{
+ int rc;
+
+ /* The LAN87xx PHY's initial MDI-X mode is determined by the AUTOMDIX_EN
+ * hardware strap, but the driver cannot read the strap's status. This
+ * creates an unpredictable initial state.
+ *
+ * To ensure consistent and reliable behavior across all boards,
+ * override the strap configuration on initialization and force the PHY
+ * into a known state with Auto-MDIX enabled, which is the expected
+ * default for modern hardware.
+ */
+ rc = phy_modify(phydev, SPECIAL_CTRL_STS,
+ SPECIAL_CTRL_STS_OVRRD_AMDIX_ |
+ SPECIAL_CTRL_STS_AMDIX_ENABLE_ |
+ SPECIAL_CTRL_STS_AMDIX_STATE_,
+ SPECIAL_CTRL_STS_OVRRD_AMDIX_ |
+ SPECIAL_CTRL_STS_AMDIX_ENABLE_);
+ if (rc < 0)
+ return rc;
+
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
+ return smsc_phy_config_init(phydev);
+}
+
static int lan874x_phy_config_init(struct phy_device *phydev)
{
u16 val;
@@ -695,7 +742,7 @@ static struct phy_driver smsc_phy_driver[] = {
/* basic functions */
.read_status = lan87xx_read_status,
- .config_init = smsc_phy_config_init,
+ .config_init = lan87xx_phy_config_init,
.soft_reset = smsc_phy_reset,
.config_aneg = lan87xx_config_aneg,
@@ -738,6 +785,7 @@ static struct phy_driver smsc_phy_driver[] = {
/* PHY_BASIC_FEATURES */
+ .flags = PHY_RST_AFTER_CLK_EN,
.probe = smsc_phy_probe,
/* basic functions */
@@ -838,7 +886,7 @@ MODULE_DESCRIPTION("SMSC PHY driver");
MODULE_AUTHOR("Herbert Valerio Riedel");
MODULE_LICENSE("GPL");
-static struct mdio_device_id __maybe_unused smsc_tbl[] = {
+static const struct mdio_device_id __maybe_unused smsc_tbl[] = {
{ 0x0007c0a0, 0xfffffff0 },
{ 0x0007c0b0, 0xfffffff0 },
{ 0x0007c0c0, 0xfffffff0 },
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
deleted file mode 100644
index 7196e927c2cd..000000000000
--- a/drivers/net/phy/spi_ks8995.c
+++ /dev/null
@@ -1,506 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * SPI driver for Micrel/Kendin KS8995M and KSZ8864RMN ethernet switches
- *
- * Copyright (C) 2008 Gabor Juhos <juhosg at openwrt.org>
- *
- * This file was based on: drivers/spi/at25.c
- * Copyright (C) 2006 David Brownell
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/gpio/consumer.h>
-#include <linux/of.h>
-
-#include <linux/spi/spi.h>
-
-#define DRV_VERSION "0.1.1"
-#define DRV_DESC "Micrel KS8995 Ethernet switch SPI driver"
-
-/* ------------------------------------------------------------------------ */
-
-#define KS8995_REG_ID0 0x00 /* Chip ID0 */
-#define KS8995_REG_ID1 0x01 /* Chip ID1 */
-
-#define KS8995_REG_GC0 0x02 /* Global Control 0 */
-#define KS8995_REG_GC1 0x03 /* Global Control 1 */
-#define KS8995_REG_GC2 0x04 /* Global Control 2 */
-#define KS8995_REG_GC3 0x05 /* Global Control 3 */
-#define KS8995_REG_GC4 0x06 /* Global Control 4 */
-#define KS8995_REG_GC5 0x07 /* Global Control 5 */
-#define KS8995_REG_GC6 0x08 /* Global Control 6 */
-#define KS8995_REG_GC7 0x09 /* Global Control 7 */
-#define KS8995_REG_GC8 0x0a /* Global Control 8 */
-#define KS8995_REG_GC9 0x0b /* Global Control 9 */
-
-#define KS8995_REG_PC(p, r) ((0x10 * p) + r) /* Port Control */
-#define KS8995_REG_PS(p, r) ((0x10 * p) + r + 0xe) /* Port Status */
-
-#define KS8995_REG_TPC0 0x60 /* TOS Priority Control 0 */
-#define KS8995_REG_TPC1 0x61 /* TOS Priority Control 1 */
-#define KS8995_REG_TPC2 0x62 /* TOS Priority Control 2 */
-#define KS8995_REG_TPC3 0x63 /* TOS Priority Control 3 */
-#define KS8995_REG_TPC4 0x64 /* TOS Priority Control 4 */
-#define KS8995_REG_TPC5 0x65 /* TOS Priority Control 5 */
-#define KS8995_REG_TPC6 0x66 /* TOS Priority Control 6 */
-#define KS8995_REG_TPC7 0x67 /* TOS Priority Control 7 */
-
-#define KS8995_REG_MAC0 0x68 /* MAC address 0 */
-#define KS8995_REG_MAC1 0x69 /* MAC address 1 */
-#define KS8995_REG_MAC2 0x6a /* MAC address 2 */
-#define KS8995_REG_MAC3 0x6b /* MAC address 3 */
-#define KS8995_REG_MAC4 0x6c /* MAC address 4 */
-#define KS8995_REG_MAC5 0x6d /* MAC address 5 */
-
-#define KS8995_REG_IAC0 0x6e /* Indirect Access Control 0 */
-#define KS8995_REG_IAC1 0x6f /* Indirect Access Control 0 */
-#define KS8995_REG_IAD7 0x70 /* Indirect Access Data 7 */
-#define KS8995_REG_IAD6 0x71 /* Indirect Access Data 6 */
-#define KS8995_REG_IAD5 0x72 /* Indirect Access Data 5 */
-#define KS8995_REG_IAD4 0x73 /* Indirect Access Data 4 */
-#define KS8995_REG_IAD3 0x74 /* Indirect Access Data 3 */
-#define KS8995_REG_IAD2 0x75 /* Indirect Access Data 2 */
-#define KS8995_REG_IAD1 0x76 /* Indirect Access Data 1 */
-#define KS8995_REG_IAD0 0x77 /* Indirect Access Data 0 */
-
-#define KSZ8864_REG_ID1 0xfe /* Chip ID in bit 7 */
-
-#define KS8995_REGS_SIZE 0x80
-#define KSZ8864_REGS_SIZE 0x100
-#define KSZ8795_REGS_SIZE 0x100
-
-#define ID1_CHIPID_M 0xf
-#define ID1_CHIPID_S 4
-#define ID1_REVISION_M 0x7
-#define ID1_REVISION_S 1
-#define ID1_START_SW 1 /* start the switch */
-
-#define FAMILY_KS8995 0x95
-#define FAMILY_KSZ8795 0x87
-#define CHIPID_M 0
-#define KS8995_CHIP_ID 0x00
-#define KSZ8864_CHIP_ID 0x01
-#define KSZ8795_CHIP_ID 0x09
-
-#define KS8995_CMD_WRITE 0x02U
-#define KS8995_CMD_READ 0x03U
-
-#define KS8995_RESET_DELAY 10 /* usec */
-
-enum ks8995_chip_variant {
- ks8995,
- ksz8864,
- ksz8795,
- max_variant
-};
-
-struct ks8995_chip_params {
- char *name;
- int family_id;
- int chip_id;
- int regs_size;
- int addr_width;
- int addr_shift;
-};
-
-static const struct ks8995_chip_params ks8995_chip[] = {
- [ks8995] = {
- .name = "KS8995MA",
- .family_id = FAMILY_KS8995,
- .chip_id = KS8995_CHIP_ID,
- .regs_size = KS8995_REGS_SIZE,
- .addr_width = 8,
- .addr_shift = 0,
- },
- [ksz8864] = {
- .name = "KSZ8864RMN",
- .family_id = FAMILY_KS8995,
- .chip_id = KSZ8864_CHIP_ID,
- .regs_size = KSZ8864_REGS_SIZE,
- .addr_width = 8,
- .addr_shift = 0,
- },
- [ksz8795] = {
- .name = "KSZ8795CLX",
- .family_id = FAMILY_KSZ8795,
- .chip_id = KSZ8795_CHIP_ID,
- .regs_size = KSZ8795_REGS_SIZE,
- .addr_width = 12,
- .addr_shift = 1,
- },
-};
-
-struct ks8995_switch {
- struct spi_device *spi;
- struct mutex lock;
- struct gpio_desc *reset_gpio;
- struct bin_attribute regs_attr;
- const struct ks8995_chip_params *chip;
- int revision_id;
-};
-
-static const struct spi_device_id ks8995_id[] = {
- {"ks8995", ks8995},
- {"ksz8864", ksz8864},
- {"ksz8795", ksz8795},
- { }
-};
-MODULE_DEVICE_TABLE(spi, ks8995_id);
-
-static const struct of_device_id ks8895_spi_of_match[] = {
- { .compatible = "micrel,ks8995" },
- { .compatible = "micrel,ksz8864" },
- { .compatible = "micrel,ksz8795" },
- { },
-};
-MODULE_DEVICE_TABLE(of, ks8895_spi_of_match);
-
-static inline u8 get_chip_id(u8 val)
-{
- return (val >> ID1_CHIPID_S) & ID1_CHIPID_M;
-}
-
-static inline u8 get_chip_rev(u8 val)
-{
- return (val >> ID1_REVISION_S) & ID1_REVISION_M;
-}
-
-/* create_spi_cmd - create a chip specific SPI command header
- * @ks: pointer to switch instance
- * @cmd: SPI command for switch
- * @address: register address for command
- *
- * Different chip families use different bit pattern to address the switches
- * registers:
- *
- * KS8995: 8bit command + 8bit address
- * KSZ8795: 3bit command + 12bit address + 1bit TR (?)
- */
-static inline __be16 create_spi_cmd(struct ks8995_switch *ks, int cmd,
- unsigned address)
-{
- u16 result = cmd;
-
- /* make room for address (incl. address shift) */
- result <<= ks->chip->addr_width + ks->chip->addr_shift;
- /* add address */
- result |= address << ks->chip->addr_shift;
- /* SPI protocol needs big endian */
- return cpu_to_be16(result);
-}
-/* ------------------------------------------------------------------------ */
-static int ks8995_read(struct ks8995_switch *ks, char *buf,
- unsigned offset, size_t count)
-{
- __be16 cmd;
- struct spi_transfer t[2];
- struct spi_message m;
- int err;
-
- cmd = create_spi_cmd(ks, KS8995_CMD_READ, offset);
- spi_message_init(&m);
-
- memset(&t, 0, sizeof(t));
-
- t[0].tx_buf = &cmd;
- t[0].len = sizeof(cmd);
- spi_message_add_tail(&t[0], &m);
-
- t[1].rx_buf = buf;
- t[1].len = count;
- spi_message_add_tail(&t[1], &m);
-
- mutex_lock(&ks->lock);
- err = spi_sync(ks->spi, &m);
- mutex_unlock(&ks->lock);
-
- return err ? err : count;
-}
-
-static int ks8995_write(struct ks8995_switch *ks, char *buf,
- unsigned offset, size_t count)
-{
- __be16 cmd;
- struct spi_transfer t[2];
- struct spi_message m;
- int err;
-
- cmd = create_spi_cmd(ks, KS8995_CMD_WRITE, offset);
- spi_message_init(&m);
-
- memset(&t, 0, sizeof(t));
-
- t[0].tx_buf = &cmd;
- t[0].len = sizeof(cmd);
- spi_message_add_tail(&t[0], &m);
-
- t[1].tx_buf = buf;
- t[1].len = count;
- spi_message_add_tail(&t[1], &m);
-
- mutex_lock(&ks->lock);
- err = spi_sync(ks->spi, &m);
- mutex_unlock(&ks->lock);
-
- return err ? err : count;
-}
-
-static inline int ks8995_read_reg(struct ks8995_switch *ks, u8 addr, u8 *buf)
-{
- return ks8995_read(ks, buf, addr, 1) != 1;
-}
-
-static inline int ks8995_write_reg(struct ks8995_switch *ks, u8 addr, u8 val)
-{
- char buf = val;
-
- return ks8995_write(ks, &buf, addr, 1) != 1;
-}
-
-/* ------------------------------------------------------------------------ */
-
-static int ks8995_stop(struct ks8995_switch *ks)
-{
- return ks8995_write_reg(ks, KS8995_REG_ID1, 0);
-}
-
-static int ks8995_start(struct ks8995_switch *ks)
-{
- return ks8995_write_reg(ks, KS8995_REG_ID1, 1);
-}
-
-static int ks8995_reset(struct ks8995_switch *ks)
-{
- int err;
-
- err = ks8995_stop(ks);
- if (err)
- return err;
-
- udelay(KS8995_RESET_DELAY);
-
- return ks8995_start(ks);
-}
-
-static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
-{
- struct device *dev;
- struct ks8995_switch *ks8995;
-
- dev = kobj_to_dev(kobj);
- ks8995 = dev_get_drvdata(dev);
-
- return ks8995_read(ks8995, buf, off, count);
-}
-
-static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
-{
- struct device *dev;
- struct ks8995_switch *ks8995;
-
- dev = kobj_to_dev(kobj);
- ks8995 = dev_get_drvdata(dev);
-
- return ks8995_write(ks8995, buf, off, count);
-}
-
-/* ks8995_get_revision - get chip revision
- * @ks: pointer to switch instance
- *
- * Verify chip family and id and get chip revision.
- */
-static int ks8995_get_revision(struct ks8995_switch *ks)
-{
- int err;
- u8 id0, id1, ksz8864_id;
-
- /* read family id */
- err = ks8995_read_reg(ks, KS8995_REG_ID0, &id0);
- if (err) {
- err = -EIO;
- goto err_out;
- }
-
- /* verify family id */
- if (id0 != ks->chip->family_id) {
- dev_err(&ks->spi->dev, "chip family id mismatch: expected 0x%02x but 0x%02x read\n",
- ks->chip->family_id, id0);
- err = -ENODEV;
- goto err_out;
- }
-
- switch (ks->chip->family_id) {
- case FAMILY_KS8995:
- /* try reading chip id at CHIP ID1 */
- err = ks8995_read_reg(ks, KS8995_REG_ID1, &id1);
- if (err) {
- err = -EIO;
- goto err_out;
- }
-
- /* verify chip id */
- if ((get_chip_id(id1) == CHIPID_M) &&
- (get_chip_id(id1) == ks->chip->chip_id)) {
- /* KS8995MA */
- ks->revision_id = get_chip_rev(id1);
- } else if (get_chip_id(id1) != CHIPID_M) {
- /* KSZ8864RMN */
- err = ks8995_read_reg(ks, KS8995_REG_ID1, &ksz8864_id);
- if (err) {
- err = -EIO;
- goto err_out;
- }
-
- if ((ksz8864_id & 0x80) &&
- (ks->chip->chip_id == KSZ8864_CHIP_ID)) {
- ks->revision_id = get_chip_rev(id1);
- }
-
- } else {
- dev_err(&ks->spi->dev, "unsupported chip id for KS8995 family: 0x%02x\n",
- id1);
- err = -ENODEV;
- }
- break;
- case FAMILY_KSZ8795:
- /* try reading chip id at CHIP ID1 */
- err = ks8995_read_reg(ks, KS8995_REG_ID1, &id1);
- if (err) {
- err = -EIO;
- goto err_out;
- }
-
- if (get_chip_id(id1) == ks->chip->chip_id) {
- ks->revision_id = get_chip_rev(id1);
- } else {
- dev_err(&ks->spi->dev, "unsupported chip id for KSZ8795 family: 0x%02x\n",
- id1);
- err = -ENODEV;
- }
- break;
- default:
- dev_err(&ks->spi->dev, "unsupported family id: 0x%02x\n", id0);
- err = -ENODEV;
- break;
- }
-err_out:
- return err;
-}
-
-static const struct bin_attribute ks8995_registers_attr = {
- .attr = {
- .name = "registers",
- .mode = 0600,
- },
- .size = KS8995_REGS_SIZE,
- .read = ks8995_registers_read,
- .write = ks8995_registers_write,
-};
-
-/* ------------------------------------------------------------------------ */
-static int ks8995_probe(struct spi_device *spi)
-{
- struct ks8995_switch *ks;
- int err;
- int variant = spi_get_device_id(spi)->driver_data;
-
- if (variant >= max_variant) {
- dev_err(&spi->dev, "bad chip variant %d\n", variant);
- return -ENODEV;
- }
-
- ks = devm_kzalloc(&spi->dev, sizeof(*ks), GFP_KERNEL);
- if (!ks)
- return -ENOMEM;
-
- mutex_init(&ks->lock);
- ks->spi = spi;
- ks->chip = &ks8995_chip[variant];
-
- ks->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
- GPIOD_OUT_HIGH);
- err = PTR_ERR_OR_ZERO(ks->reset_gpio);
- if (err) {
- dev_err(&spi->dev,
- "failed to get reset gpio: %d\n", err);
- return err;
- }
-
- err = gpiod_set_consumer_name(ks->reset_gpio, "switch-reset");
- if (err)
- return err;
-
- /* de-assert switch reset */
- /* FIXME: this likely requires a delay */
- gpiod_set_value_cansleep(ks->reset_gpio, 0);
-
- spi_set_drvdata(spi, ks);
-
- spi->mode = SPI_MODE_0;
- spi->bits_per_word = 8;
- err = spi_setup(spi);
- if (err) {
- dev_err(&spi->dev, "spi_setup failed, err=%d\n", err);
- return err;
- }
-
- err = ks8995_get_revision(ks);
- if (err)
- return err;
-
- memcpy(&ks->regs_attr, &ks8995_registers_attr, sizeof(ks->regs_attr));
- ks->regs_attr.size = ks->chip->regs_size;
-
- err = ks8995_reset(ks);
- if (err)
- return err;
-
- sysfs_attr_init(&ks->regs_attr.attr);
- err = sysfs_create_bin_file(&spi->dev.kobj, &ks->regs_attr);
- if (err) {
- dev_err(&spi->dev, "unable to create sysfs file, err=%d\n",
- err);
- return err;
- }
-
- dev_info(&spi->dev, "%s device found, Chip ID:%x, Revision:%x\n",
- ks->chip->name, ks->chip->chip_id, ks->revision_id);
-
- return 0;
-}
-
-static void ks8995_remove(struct spi_device *spi)
-{
- struct ks8995_switch *ks = spi_get_drvdata(spi);
-
- sysfs_remove_bin_file(&spi->dev.kobj, &ks->regs_attr);
-
- /* assert reset */
- gpiod_set_value_cansleep(ks->reset_gpio, 1);
-}
-
-/* ------------------------------------------------------------------------ */
-static struct spi_driver ks8995_driver = {
- .driver = {
- .name = "spi-ks8995",
- .of_match_table = ks8895_spi_of_match,
- },
- .probe = ks8995_probe,
- .remove = ks8995_remove,
- .id_table = ks8995_id,
-};
-
-module_spi_driver(ks8995_driver);
-
-MODULE_DESCRIPTION(DRV_DESC);
-MODULE_VERSION(DRV_VERSION);
-MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 309e4c3496c4..d4835d4c50e0 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -124,7 +124,7 @@ static struct phy_driver ste10xp_pdriver[] = {
module_phy_driver(ste10xp_pdriver);
-static struct mdio_device_id __maybe_unused ste10Xp_tbl[] = {
+static const struct mdio_device_id __maybe_unused ste10Xp_tbl[] = {
{ STE101P_PHY_ID, 0xfffffff0 },
{ STE100P_PHY_ID, 0xffffffff },
{ }
diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c
index 8057ea8dbc21..46c5ff7d7b56 100644
--- a/drivers/net/phy/teranetics.c
+++ b/drivers/net/phy/teranetics.c
@@ -67,7 +67,8 @@ static int teranetics_read_status(struct phy_device *phydev)
return 0;
}
-static int teranetics_match_phy_device(struct phy_device *phydev)
+static int teranetics_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return phydev->c45_ids.device_ids[3] == PHY_ID_TN2020;
}
@@ -87,7 +88,7 @@ static struct phy_driver teranetics_driver[] = {
module_phy_driver(teranetics_driver);
-static struct mdio_device_id __maybe_unused teranetics_tbl[] = {
+static const struct mdio_device_id __maybe_unused teranetics_tbl[] = {
{ PHY_ID_TN2020, 0xffffffff },
{ }
};
diff --git a/drivers/net/phy/uPD60620.c b/drivers/net/phy/uPD60620.c
index 38834347a427..900cb756c366 100644
--- a/drivers/net/phy/uPD60620.c
+++ b/drivers/net/phy/uPD60620.c
@@ -90,7 +90,7 @@ static struct phy_driver upd60620_driver[1] = { {
module_phy_driver(upd60620_driver);
-static struct mdio_device_id __maybe_unused upd60620_tbl[] = {
+static const struct mdio_device_id __maybe_unused upd60620_tbl[] = {
{ UPD60620_PHY_ID, 0xfffffffe },
{ }
};
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 2377179de017..b1b7bbba284e 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -674,7 +674,7 @@ static struct phy_driver vsc82xx_driver[] = {
module_phy_driver(vsc82xx_driver);
-static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
+static const struct mdio_device_id __maybe_unused vitesse_tbl[] = {
{ PHY_ID_VSC8234, 0x000ffff0 },
{ PHY_ID_VSC8244, 0x000fffc0 },
{ PHY_ID_VSC8572, 0x000ffff0 },
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
index 7c51daecf18e..2024d8ef36d9 100644
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -64,15 +64,16 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
return 0;
}
-static int xgmiitorgmii_set_loopback(struct phy_device *phydev, bool enable)
+static int xgmiitorgmii_set_loopback(struct phy_device *phydev, bool enable,
+ int speed)
{
struct gmii2rgmii *priv = mdiodev_get_drvdata(&phydev->mdio);
int err;
if (priv->phy_drv->set_loopback)
- err = priv->phy_drv->set_loopback(phydev, enable);
+ err = priv->phy_drv->set_loopback(phydev, enable, speed);
else
- err = genphy_loopback(phydev, enable);
+ err = genphy_loopback(phydev, enable, speed);
if (err < 0)
return err;
diff --git a/drivers/net/ppp/Kconfig b/drivers/net/ppp/Kconfig
index 8c9ed1889d1a..a1806b4b84be 100644
--- a/drivers/net/ppp/Kconfig
+++ b/drivers/net/ppp/Kconfig
@@ -85,9 +85,8 @@ config PPP_FILTER
config PPP_MPPE
tristate "PPP MPPE compression (encryption)"
depends on PPP
- select CRYPTO
- select CRYPTO_SHA1
select CRYPTO_LIB_ARC4
+ select CRYPTO_LIB_SHA1
help
Support for the MPPE Encryption protocol, as employed by the
Microsoft Point-to-Point Tunneling Protocol.
diff --git a/drivers/net/ppp/bsd_comp.c b/drivers/net/ppp/bsd_comp.c
index 55954594e157..f385b759d5cf 100644
--- a/drivers/net/ppp/bsd_comp.c
+++ b/drivers/net/ppp/bsd_comp.c
@@ -406,7 +406,7 @@ static void *bsd_alloc (unsigned char *options, int opt_len, int decomp)
* Allocate space for the dictionary. This may be more than one page in
* length.
*/
- db->dict = vmalloc(array_size(hsize, sizeof(struct bsd_dict)));
+ db->dict = vmalloc_array(hsize, sizeof(struct bsd_dict));
if (!db->dict)
{
bsd_free (db);
@@ -425,7 +425,7 @@ static void *bsd_alloc (unsigned char *options, int opt_len, int decomp)
*/
else
{
- db->lens = vmalloc(array_size(sizeof(db->lens[0]), (maxmaxcode + 1)));
+ db->lens = vmalloc_array(maxmaxcode + 1, sizeof(db->lens[0]));
if (!db->lens)
{
bsd_free (db);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 4583e15ad03a..f9f0f16c41d1 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -33,6 +33,7 @@
#include <linux/ppp_channel.h>
#include <linux/ppp-comp.h>
#include <linux/skbuff.h>
+#include <linux/rculist.h>
#include <linux/rtnetlink.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
@@ -45,6 +46,7 @@
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/unaligned.h>
+#include <net/netdev_lock.h>
#include <net/slhc_vj.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
@@ -72,6 +74,17 @@
#define PPP_PROTO_LEN 2
#define PPP_LCP_HDRLEN 4
+/* The filter instructions generated by libpcap are constructed
+ * assuming a four-byte PPP header on each packet, where the last
+ * 2 bytes are the protocol field defined in the RFC and the first
+ * byte of the first 2 bytes indicates the direction.
+ * The second byte is currently unused, but we still need to initialize
+ * it to prevent crafted BPF programs from reading them which would
+ * cause reading of uninitialized data.
+ */
+#define PPP_FILTER_OUTBOUND_TAG 0x0100
+#define PPP_FILTER_INBOUND_TAG 0x0000
+
/*
* An instance of /dev/ppp can be associated with either a ppp
* interface unit or a ppp channel. In both cases, file->private_data
@@ -95,16 +108,9 @@ struct ppp_file {
#define PF_TO_PPP(pf) PF_TO_X(pf, struct ppp)
#define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel)
-/*
- * Data structure to hold primary network stats for which
- * we want to use 64 bit storage. Other network stats
- * are stored in dev->stats of the ppp strucute.
- */
-struct ppp_link_stats {
- u64 rx_packets;
- u64 tx_packets;
- u64 rx_bytes;
- u64 tx_bytes;
+struct ppp_xmit_recursion {
+ struct task_struct *owner;
+ local_lock_t bh_lock;
};
/*
@@ -120,7 +126,7 @@ struct ppp {
int n_channels; /* how many channels are attached 54 */
spinlock_t rlock; /* lock for receive side 58 */
spinlock_t wlock; /* lock for transmit side 5c */
- int __percpu *xmit_recursion; /* xmit recursion detect */
+ struct ppp_xmit_recursion __percpu *xmit_recursion; /* xmit recursion detect */
int mru; /* max receive unit 60 */
unsigned int flags; /* control bits 64 */
unsigned int xstate; /* transmit state bits 68 */
@@ -150,7 +156,6 @@ struct ppp {
struct bpf_prog *active_filter; /* filter for pkts to reset idle */
#endif /* CONFIG_PPP_FILTER */
struct net *ppp_net; /* the net we belong to */
- struct ppp_link_stats stats64; /* 64 bit network stats */
};
/*
@@ -174,11 +179,11 @@ struct channel {
struct ppp_channel *chan; /* public channel data structure */
struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */
spinlock_t downl; /* protects `chan', file.xq dequeue */
- struct ppp *ppp; /* ppp unit we're connected to */
+ struct ppp __rcu *ppp; /* ppp unit we're connected to */
struct net *chan_net; /* the net channel belongs to */
netns_tracker ns_tracker;
struct list_head clist; /* link in list of channels per unit */
- rwlock_t upl; /* protects `ppp' and 'bridge' */
+ spinlock_t upl; /* protects `ppp' and 'bridge' */
struct channel __rcu *bridge; /* "bridged" ppp channel */
#ifdef CONFIG_PPP_MULTILINK
u8 avail; /* flag used in multilink stuff */
@@ -640,34 +645,34 @@ static struct bpf_prog *compat_ppp_get_filter(struct sock_fprog32 __user *p)
*/
static int ppp_bridge_channels(struct channel *pch, struct channel *pchb)
{
- write_lock_bh(&pch->upl);
- if (pch->ppp ||
+ spin_lock(&pch->upl);
+ if (rcu_dereference_protected(pch->ppp, lockdep_is_held(&pch->upl)) ||
rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl))) {
- write_unlock_bh(&pch->upl);
+ spin_unlock(&pch->upl);
return -EALREADY;
}
refcount_inc(&pchb->file.refcnt);
rcu_assign_pointer(pch->bridge, pchb);
- write_unlock_bh(&pch->upl);
+ spin_unlock(&pch->upl);
- write_lock_bh(&pchb->upl);
- if (pchb->ppp ||
+ spin_lock(&pchb->upl);
+ if (rcu_dereference_protected(pchb->ppp, lockdep_is_held(&pchb->upl)) ||
rcu_dereference_protected(pchb->bridge, lockdep_is_held(&pchb->upl))) {
- write_unlock_bh(&pchb->upl);
+ spin_unlock(&pchb->upl);
goto err_unset;
}
refcount_inc(&pch->file.refcnt);
rcu_assign_pointer(pchb->bridge, pch);
- write_unlock_bh(&pchb->upl);
+ spin_unlock(&pchb->upl);
return 0;
err_unset:
- write_lock_bh(&pch->upl);
+ spin_lock(&pch->upl);
/* Re-read pch->bridge with upl held in case it was modified concurrently */
pchb = rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl));
RCU_INIT_POINTER(pch->bridge, NULL);
- write_unlock_bh(&pch->upl);
+ spin_unlock(&pch->upl);
synchronize_rcu();
if (pchb)
@@ -681,25 +686,25 @@ static int ppp_unbridge_channels(struct channel *pch)
{
struct channel *pchb, *pchbb;
- write_lock_bh(&pch->upl);
+ spin_lock(&pch->upl);
pchb = rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl));
if (!pchb) {
- write_unlock_bh(&pch->upl);
+ spin_unlock(&pch->upl);
return -EINVAL;
}
RCU_INIT_POINTER(pch->bridge, NULL);
- write_unlock_bh(&pch->upl);
+ spin_unlock(&pch->upl);
/* Only modify pchb if phcb->bridge points back to pch.
* If not, it implies that there has been a race unbridging (and possibly
* even rebridging) pchb. We should leave pchb alone to avoid either a
* refcount underflow, or breaking another established bridge instance.
*/
- write_lock_bh(&pchb->upl);
+ spin_lock(&pchb->upl);
pchbb = rcu_dereference_protected(pchb->bridge, lockdep_is_held(&pchb->upl));
if (pchbb == pch)
RCU_INIT_POINTER(pchb->bridge, NULL);
- write_unlock_bh(&pchb->upl);
+ spin_unlock(&pchb->upl);
synchronize_rcu();
@@ -1119,6 +1124,8 @@ static const struct file_operations ppp_device_fops = {
.llseek = noop_llseek,
};
+static void ppp_nl_dellink(struct net_device *dev, struct list_head *head);
+
static __net_init int ppp_init_net(struct net *net)
{
struct ppp_net *pn = net_generic(net, ppp_net_id);
@@ -1134,28 +1141,20 @@ static __net_init int ppp_init_net(struct net *net)
return 0;
}
-static __net_exit void ppp_exit_net(struct net *net)
+static __net_exit void ppp_exit_rtnl_net(struct net *net,
+ struct list_head *dev_to_kill)
{
struct ppp_net *pn = net_generic(net, ppp_net_id);
- struct net_device *dev;
- struct net_device *aux;
struct ppp *ppp;
- LIST_HEAD(list);
int id;
- rtnl_lock();
- for_each_netdev_safe(net, dev, aux) {
- if (dev->netdev_ops == &ppp_netdev_ops)
- unregister_netdevice_queue(dev, &list);
- }
-
idr_for_each_entry(&pn->units_idr, ppp, id)
- /* Skip devices already unregistered by previous loop */
- if (!net_eq(dev_net(ppp->dev), net))
- unregister_netdevice_queue(ppp->dev, &list);
+ ppp_nl_dellink(ppp->dev, dev_to_kill);
+}
- unregister_netdevice_many(&list);
- rtnl_unlock();
+static __net_exit void ppp_exit_net(struct net *net)
+{
+ struct ppp_net *pn = net_generic(net, ppp_net_id);
mutex_destroy(&pn->all_ppp_mutex);
idr_destroy(&pn->units_idr);
@@ -1165,6 +1164,7 @@ static __net_exit void ppp_exit_net(struct net *net)
static struct pernet_operations ppp_net_ops = {
.init = ppp_init_net,
+ .exit_rtnl = ppp_exit_rtnl_net,
.exit = ppp_exit_net,
.id = &ppp_net_id,
.size = sizeof(struct ppp_net),
@@ -1255,13 +1255,18 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
spin_lock_init(&ppp->rlock);
spin_lock_init(&ppp->wlock);
- ppp->xmit_recursion = alloc_percpu(int);
+ ppp->xmit_recursion = alloc_percpu(struct ppp_xmit_recursion);
if (!ppp->xmit_recursion) {
err = -ENOMEM;
goto err1;
}
- for_each_possible_cpu(cpu)
- (*per_cpu_ptr(ppp->xmit_recursion, cpu)) = 0;
+ for_each_possible_cpu(cpu) {
+ struct ppp_xmit_recursion *xmit_recursion;
+
+ xmit_recursion = per_cpu_ptr(ppp->xmit_recursion, cpu);
+ xmit_recursion->owner = NULL;
+ local_lock_init(&xmit_recursion->bh_lock);
+ }
#ifdef CONFIG_PPP_MULTILINK
ppp->minseq = -1;
@@ -1303,10 +1308,13 @@ static int ppp_nl_validate(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
-static int ppp_nl_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int ppp_nl_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct ppp_config conf = {
.unit = -1,
.ifname_is_set = true,
@@ -1343,7 +1351,7 @@ static int ppp_nl_newlink(struct net *src_net, struct net_device *dev,
if (!tb[IFLA_IFNAME] || !nla_len(tb[IFLA_IFNAME]) || !*(char *)nla_data(tb[IFLA_IFNAME]))
conf.ifname_is_set = false;
- err = ppp_dev_configure(src_net, dev, &conf);
+ err = ppp_dev_configure(link_net, dev, &conf);
out_unlock:
mutex_unlock(&ppp_mutex);
@@ -1529,23 +1537,12 @@ ppp_net_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
static void
ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
{
- struct ppp *ppp = netdev_priv(dev);
-
- ppp_recv_lock(ppp);
- stats64->rx_packets = ppp->stats64.rx_packets;
- stats64->rx_bytes = ppp->stats64.rx_bytes;
- ppp_recv_unlock(ppp);
-
- ppp_xmit_lock(ppp);
- stats64->tx_packets = ppp->stats64.tx_packets;
- stats64->tx_bytes = ppp->stats64.tx_bytes;
- ppp_xmit_unlock(ppp);
-
stats64->rx_errors = dev->stats.rx_errors;
stats64->tx_errors = dev->stats.tx_errors;
stats64->rx_dropped = dev->stats.rx_dropped;
stats64->tx_dropped = dev->stats.tx_dropped;
stats64->rx_length_errors = dev->stats.rx_length_errors;
+ dev_fetch_sw_netstats(stats64, dev->tstats);
}
static int ppp_dev_init(struct net_device *dev)
@@ -1602,11 +1599,14 @@ static int ppp_fill_forward_path(struct net_device_path_ctx *ctx,
if (ppp->flags & SC_MULTILINK)
return -EOPNOTSUPP;
- if (list_empty(&ppp->channels))
+ pch = list_first_or_null_rcu(&ppp->channels, struct channel, clist);
+ if (!pch)
+ return -ENODEV;
+
+ chan = READ_ONCE(pch->chan);
+ if (!chan)
return -ENODEV;
- pch = list_first_entry(&ppp->channels, struct channel, clist);
- chan = pch->chan;
if (!chan->ops->fill_forward_path)
return -EOPNOTSUPP;
@@ -1640,6 +1640,7 @@ static void ppp_setup(struct net_device *dev)
dev->type = ARPHRD_PPP;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
dev->priv_destructor = ppp_dev_priv_destructor;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
netif_keep_dst(dev);
}
@@ -1673,15 +1674,20 @@ static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
{
+ struct ppp_xmit_recursion *xmit_recursion;
+
local_bh_disable();
- if (unlikely(*this_cpu_ptr(ppp->xmit_recursion)))
+ xmit_recursion = this_cpu_ptr(ppp->xmit_recursion);
+ if (xmit_recursion->owner == current)
goto err;
+ local_lock_nested_bh(&ppp->xmit_recursion->bh_lock);
+ xmit_recursion->owner = current;
- (*this_cpu_ptr(ppp->xmit_recursion))++;
__ppp_xmit_process(ppp, skb);
- (*this_cpu_ptr(ppp->xmit_recursion))--;
+ xmit_recursion->owner = NULL;
+ local_unlock_nested_bh(&ppp->xmit_recursion->bh_lock);
local_bh_enable();
return;
@@ -1738,7 +1744,6 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
*/
if (net_ratelimit())
netdev_err(ppp->dev, "ppp: compressor dropped pkt\n");
- kfree_skb(skb);
consume_skb(new_skb);
new_skb = NULL;
}
@@ -1762,10 +1767,10 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
if (proto < 0x8000) {
#ifdef CONFIG_PPP_FILTER
- /* check if we should pass this packet */
- /* the filter instructions are constructed assuming
- a four-byte PPP header on each packet */
- *(u8 *)skb_push(skb, 2) = 1;
+ /* check if the packet passes the pass and active filters.
+ * See comment for PPP_FILTER_OUTBOUND_TAG above.
+ */
+ *(__be16 *)skb_push(skb, 2) = htons(PPP_FILTER_OUTBOUND_TAG);
if (ppp->pass_filter &&
bpf_prog_run(ppp->pass_filter, skb) == 0) {
if (ppp->debug & 1)
@@ -1786,8 +1791,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
#endif /* CONFIG_PPP_FILTER */
}
- ++ppp->stats64.tx_packets;
- ppp->stats64.tx_bytes += skb->len - PPP_PROTO_LEN;
+ dev_sw_netstats_tx_add(ppp->dev, 1, skb->len - PPP_PROTO_LEN);
switch (proto) {
case PPP_IP:
@@ -1840,9 +1844,10 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
"down - pkt dropped.\n");
goto drop;
}
- skb = pad_compress_skb(ppp, skb);
- if (!skb)
+ new_skb = pad_compress_skb(ppp, skb);
+ if (!new_skb)
goto drop;
+ skb = new_skb;
}
/*
@@ -2153,10 +2158,9 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
#endif /* CONFIG_PPP_MULTILINK */
/* Try to send data out on a channel */
-static void __ppp_channel_push(struct channel *pch)
+static void __ppp_channel_push(struct channel *pch, struct ppp *ppp)
{
struct sk_buff *skb;
- struct ppp *ppp;
spin_lock(&pch->downl);
if (pch->chan) {
@@ -2175,7 +2179,6 @@ static void __ppp_channel_push(struct channel *pch)
spin_unlock(&pch->downl);
/* see if there is anything from the attached unit to be sent */
if (skb_queue_empty(&pch->file.xq)) {
- ppp = pch->ppp;
if (ppp)
__ppp_xmit_process(ppp, NULL);
}
@@ -2183,15 +2186,22 @@ static void __ppp_channel_push(struct channel *pch)
static void ppp_channel_push(struct channel *pch)
{
- read_lock_bh(&pch->upl);
- if (pch->ppp) {
- (*this_cpu_ptr(pch->ppp->xmit_recursion))++;
- __ppp_channel_push(pch);
- (*this_cpu_ptr(pch->ppp->xmit_recursion))--;
+ struct ppp_xmit_recursion *xmit_recursion;
+ struct ppp *ppp;
+
+ rcu_read_lock_bh();
+ ppp = rcu_dereference_bh(pch->ppp);
+ if (ppp) {
+ xmit_recursion = this_cpu_ptr(ppp->xmit_recursion);
+ local_lock_nested_bh(&ppp->xmit_recursion->bh_lock);
+ xmit_recursion->owner = current;
+ __ppp_channel_push(pch, ppp);
+ xmit_recursion->owner = NULL;
+ local_unlock_nested_bh(&ppp->xmit_recursion->bh_lock);
} else {
- __ppp_channel_push(pch);
+ __ppp_channel_push(pch, NULL);
}
- read_unlock_bh(&pch->upl);
+ rcu_read_unlock_bh();
}
/*
@@ -2293,6 +2303,7 @@ void
ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
{
struct channel *pch = chan->ppp;
+ struct ppp *ppp;
int proto;
if (!pch) {
@@ -2304,18 +2315,19 @@ ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
if (ppp_channel_bridge_input(pch, skb))
return;
- read_lock_bh(&pch->upl);
+ rcu_read_lock_bh();
+ ppp = rcu_dereference_bh(pch->ppp);
if (!ppp_decompress_proto(skb)) {
kfree_skb(skb);
- if (pch->ppp) {
- ++pch->ppp->dev->stats.rx_length_errors;
- ppp_receive_error(pch->ppp);
+ if (ppp) {
+ ++ppp->dev->stats.rx_length_errors;
+ ppp_receive_error(ppp);
}
goto done;
}
proto = PPP_PROTO(skb);
- if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) {
+ if (!ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) {
/* put it on the channel queue */
skb_queue_tail(&pch->file.rq, skb);
/* drop old frames if queue too long */
@@ -2324,11 +2336,11 @@ ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
kfree_skb(skb);
wake_up_interruptible(&pch->file.rwait);
} else {
- ppp_do_recv(pch->ppp, skb, pch);
+ ppp_do_recv(ppp, skb, pch);
}
done:
- read_unlock_bh(&pch->upl);
+ rcu_read_unlock_bh();
}
/* Put a 0-length skb in the receive queue as an error indication */
@@ -2337,20 +2349,22 @@ ppp_input_error(struct ppp_channel *chan, int code)
{
struct channel *pch = chan->ppp;
struct sk_buff *skb;
+ struct ppp *ppp;
if (!pch)
return;
- read_lock_bh(&pch->upl);
- if (pch->ppp) {
+ rcu_read_lock_bh();
+ ppp = rcu_dereference_bh(pch->ppp);
+ if (ppp) {
skb = alloc_skb(0, GFP_ATOMIC);
if (skb) {
skb->len = 0; /* probably unnecessary */
skb->cb[0] = code;
- ppp_do_recv(pch->ppp, skb, pch);
+ ppp_do_recv(ppp, skb, pch);
}
}
- read_unlock_bh(&pch->upl);
+ rcu_read_unlock_bh();
}
/*
@@ -2464,8 +2478,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
break;
}
- ++ppp->stats64.rx_packets;
- ppp->stats64.rx_bytes += skb->len - 2;
+ dev_sw_netstats_rx_add(ppp->dev, skb->len - PPP_PROTO_LEN);
npi = proto_to_npindex(proto);
if (npi < 0) {
@@ -2482,14 +2495,13 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
/* network protocol frame - give it to the kernel */
#ifdef CONFIG_PPP_FILTER
- /* check if the packet passes the pass and active filters */
- /* the filter instructions are constructed assuming
- a four-byte PPP header on each packet */
if (ppp->pass_filter || ppp->active_filter) {
if (skb_unclone(skb, GFP_ATOMIC))
goto err;
-
- *(u8 *)skb_push(skb, 2) = 0;
+ /* Check if the packet passes the pass and active filters.
+ * See comment for PPP_FILTER_INBOUND_TAG above.
+ */
+ *(__be16 *)skb_push(skb, 2) = htons(PPP_FILTER_INBOUND_TAG);
if (ppp->pass_filter &&
bpf_prog_run(ppp->pass_filter, skb) == 0) {
if (ppp->debug & 1)
@@ -2900,7 +2912,6 @@ int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
pn = ppp_pernet(net);
- pch->ppp = NULL;
pch->chan = chan;
pch->chan_net = get_net_track(net, &pch->ns_tracker, GFP_KERNEL);
chan->ppp = pch;
@@ -2911,7 +2922,7 @@ int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
#endif /* CONFIG_PPP_MULTILINK */
init_rwsem(&pch->chan_sem);
spin_lock_init(&pch->downl);
- rwlock_init(&pch->upl);
+ spin_lock_init(&pch->upl);
spin_lock_bh(&pn->all_channels_lock);
pch->file.index = ++pn->last_channel_index;
@@ -2940,13 +2951,15 @@ int ppp_channel_index(struct ppp_channel *chan)
int ppp_unit_number(struct ppp_channel *chan)
{
struct channel *pch = chan->ppp;
+ struct ppp *ppp;
int unit = -1;
if (pch) {
- read_lock_bh(&pch->upl);
- if (pch->ppp)
- unit = pch->ppp->file.index;
- read_unlock_bh(&pch->upl);
+ rcu_read_lock();
+ ppp = rcu_dereference(pch->ppp);
+ if (ppp)
+ unit = ppp->file.index;
+ rcu_read_unlock();
}
return unit;
}
@@ -2958,12 +2971,14 @@ char *ppp_dev_name(struct ppp_channel *chan)
{
struct channel *pch = chan->ppp;
char *name = NULL;
+ struct ppp *ppp;
if (pch) {
- read_lock_bh(&pch->upl);
- if (pch->ppp && pch->ppp->dev)
- name = pch->ppp->dev->name;
- read_unlock_bh(&pch->upl);
+ rcu_read_lock();
+ ppp = rcu_dereference(pch->ppp);
+ if (ppp && ppp->dev)
+ name = ppp->dev->name;
+ rcu_read_unlock();
}
return name;
}
@@ -2990,7 +3005,7 @@ ppp_unregister_channel(struct ppp_channel *chan)
*/
down_write(&pch->chan_sem);
spin_lock_bh(&pch->downl);
- pch->chan = NULL;
+ WRITE_ONCE(pch->chan, NULL);
spin_unlock_bh(&pch->downl);
up_write(&pch->chan_sem);
ppp_disconnect_channel(pch);
@@ -3294,14 +3309,25 @@ static void
ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
{
struct slcompress *vj = ppp->vj;
+ int cpu;
memset(st, 0, sizeof(*st));
- st->p.ppp_ipackets = ppp->stats64.rx_packets;
+ for_each_possible_cpu(cpu) {
+ struct pcpu_sw_netstats *p = per_cpu_ptr(ppp->dev->tstats, cpu);
+ u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+
+ rx_packets = u64_stats_read(&p->rx_packets);
+ rx_bytes = u64_stats_read(&p->rx_bytes);
+ tx_packets = u64_stats_read(&p->tx_packets);
+ tx_bytes = u64_stats_read(&p->tx_bytes);
+
+ st->p.ppp_ipackets += rx_packets;
+ st->p.ppp_ibytes += rx_bytes;
+ st->p.ppp_opackets += tx_packets;
+ st->p.ppp_obytes += tx_bytes;
+ }
st->p.ppp_ierrors = ppp->dev->stats.rx_errors;
- st->p.ppp_ibytes = ppp->stats64.rx_bytes;
- st->p.ppp_opackets = ppp->stats64.tx_packets;
st->p.ppp_oerrors = ppp->dev->stats.tx_errors;
- st->p.ppp_obytes = ppp->stats64.tx_bytes;
if (!vj)
return;
st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed;
@@ -3475,9 +3501,9 @@ ppp_connect_channel(struct channel *pch, int unit)
ppp = ppp_find_unit(pn, unit);
if (!ppp)
goto out;
- write_lock_bh(&pch->upl);
+ spin_lock(&pch->upl);
ret = -EINVAL;
- if (pch->ppp ||
+ if (rcu_dereference_protected(pch->ppp, lockdep_is_held(&pch->upl)) ||
rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl)))
goto outl;
@@ -3490,21 +3516,25 @@ ppp_connect_channel(struct channel *pch, int unit)
ret = -ENOTCONN;
goto outl;
}
+ if (pch->chan->direct_xmit)
+ ppp->dev->priv_flags |= IFF_NO_QUEUE;
+ else
+ ppp->dev->priv_flags &= ~IFF_NO_QUEUE;
spin_unlock_bh(&pch->downl);
if (pch->file.hdrlen > ppp->file.hdrlen)
ppp->file.hdrlen = pch->file.hdrlen;
hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
if (hdrlen > ppp->dev->hard_header_len)
ppp->dev->hard_header_len = hdrlen;
- list_add_tail(&pch->clist, &ppp->channels);
+ list_add_tail_rcu(&pch->clist, &ppp->channels);
++ppp->n_channels;
- pch->ppp = ppp;
+ rcu_assign_pointer(pch->ppp, ppp);
refcount_inc(&ppp->file.refcnt);
ppp_unlock(ppp);
ret = 0;
outl:
- write_unlock_bh(&pch->upl);
+ spin_unlock(&pch->upl);
out:
mutex_unlock(&pn->all_ppp_mutex);
return ret;
@@ -3519,17 +3549,17 @@ ppp_disconnect_channel(struct channel *pch)
struct ppp *ppp;
int err = -EINVAL;
- write_lock_bh(&pch->upl);
- ppp = pch->ppp;
- pch->ppp = NULL;
- write_unlock_bh(&pch->upl);
+ spin_lock(&pch->upl);
+ ppp = rcu_replace_pointer(pch->ppp, NULL, lockdep_is_held(&pch->upl));
+ spin_unlock(&pch->upl);
if (ppp) {
/* remove it from the ppp unit's list */
ppp_lock(ppp);
- list_del(&pch->clist);
+ list_del_rcu(&pch->clist);
if (--ppp->n_channels == 0)
wake_up_interruptible(&ppp->file.rwait);
ppp_unlock(ppp);
+ synchronize_net();
if (refcount_dec_and_test(&ppp->file.refcnt))
ppp_destroy_interface(ppp);
err = 0;
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index bcc1eaedf58f..630cbf71c147 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -43,7 +43,7 @@
*/
#include <crypto/arc4.h>
-#include <crypto/hash.h>
+#include <crypto/sha1.h>
#include <linux/err.h>
#include <linux/fips.h>
#include <linux/module.h>
@@ -55,7 +55,6 @@
#include <linux/mm.h>
#include <linux/ppp_defs.h>
#include <linux/ppp-comp.h>
-#include <linux/scatterlist.h>
#include <linux/unaligned.h>
#include "ppp_mppe.h"
@@ -67,31 +66,15 @@ MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE));
MODULE_VERSION("1.0.2");
#define SHA1_PAD_SIZE 40
-
-/*
- * kernel crypto API needs its arguments to be in kmalloc'd memory, not in the module
- * static data area. That means sha_pad needs to be kmalloc'd.
- */
-
-struct sha_pad {
- unsigned char sha_pad1[SHA1_PAD_SIZE];
- unsigned char sha_pad2[SHA1_PAD_SIZE];
-};
-static struct sha_pad *sha_pad;
-
-static inline void sha_pad_init(struct sha_pad *shapad)
-{
- memset(shapad->sha_pad1, 0x00, sizeof(shapad->sha_pad1));
- memset(shapad->sha_pad2, 0xF2, sizeof(shapad->sha_pad2));
-}
+static const u8 sha_pad1[SHA1_PAD_SIZE] = { 0 };
+static const u8 sha_pad2[SHA1_PAD_SIZE] = { [0 ... SHA1_PAD_SIZE - 1] = 0xF2 };
/*
* State for an MPPE (de)compressor.
*/
struct ppp_mppe_state {
struct arc4_ctx arc4;
- struct shash_desc *sha1;
- unsigned char *sha1_digest;
+ unsigned char sha1_digest[SHA1_DIGEST_SIZE];
unsigned char master_key[MPPE_MAX_KEY_LEN];
unsigned char session_key[MPPE_MAX_KEY_LEN];
unsigned keylen; /* key length in bytes */
@@ -130,16 +113,14 @@ struct ppp_mppe_state {
*/
static void get_new_key_from_sha(struct ppp_mppe_state * state)
{
- crypto_shash_init(state->sha1);
- crypto_shash_update(state->sha1, state->master_key,
- state->keylen);
- crypto_shash_update(state->sha1, sha_pad->sha_pad1,
- sizeof(sha_pad->sha_pad1));
- crypto_shash_update(state->sha1, state->session_key,
- state->keylen);
- crypto_shash_update(state->sha1, sha_pad->sha_pad2,
- sizeof(sha_pad->sha_pad2));
- crypto_shash_final(state->sha1, state->sha1_digest);
+ struct sha1_ctx ctx;
+
+ sha1_init(&ctx);
+ sha1_update(&ctx, state->master_key, state->keylen);
+ sha1_update(&ctx, sha_pad1, sizeof(sha_pad1));
+ sha1_update(&ctx, state->session_key, state->keylen);
+ sha1_update(&ctx, sha_pad2, sizeof(sha_pad2));
+ sha1_final(&ctx, state->sha1_digest);
}
/*
@@ -171,39 +152,15 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
static void *mppe_alloc(unsigned char *options, int optlen)
{
struct ppp_mppe_state *state;
- struct crypto_shash *shash;
- unsigned int digestsize;
if (optlen != CILEN_MPPE + sizeof(state->master_key) ||
options[0] != CI_MPPE || options[1] != CILEN_MPPE ||
fips_enabled)
- goto out;
+ return NULL;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state == NULL)
- goto out;
-
-
- shash = crypto_alloc_shash("sha1", 0, 0);
- if (IS_ERR(shash))
- goto out_free;
-
- state->sha1 = kmalloc(sizeof(*state->sha1) +
- crypto_shash_descsize(shash),
- GFP_KERNEL);
- if (!state->sha1) {
- crypto_free_shash(shash);
- goto out_free;
- }
- state->sha1->tfm = shash;
-
- digestsize = crypto_shash_digestsize(shash);
- if (digestsize < MPPE_MAX_KEY_LEN)
- goto out_free;
-
- state->sha1_digest = kmalloc(digestsize, GFP_KERNEL);
- if (!state->sha1_digest)
- goto out_free;
+ return NULL;
/* Save keys. */
memcpy(state->master_key, &options[CILEN_MPPE],
@@ -217,16 +174,6 @@ static void *mppe_alloc(unsigned char *options, int optlen)
*/
return (void *)state;
-
-out_free:
- kfree(state->sha1_digest);
- if (state->sha1) {
- crypto_free_shash(state->sha1->tfm);
- kfree_sensitive(state->sha1);
- }
- kfree(state);
-out:
- return NULL;
}
/*
@@ -235,12 +182,8 @@ out:
static void mppe_free(void *arg)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
- if (state) {
- kfree(state->sha1_digest);
- crypto_free_shash(state->sha1->tfm);
- kfree_sensitive(state->sha1);
- kfree_sensitive(state);
- }
+
+ kfree_sensitive(state);
}
/*
@@ -649,31 +592,17 @@ static struct compressor ppp_mppe = {
.comp_extra = MPPE_PAD,
};
-/*
- * ppp_mppe_init()
- *
- * Prior to allowing load, try to load the arc4 and sha1 crypto
- * libraries. The actual use will be allocated later, but
- * this way the module will fail to insmod if they aren't available.
- */
-
static int __init ppp_mppe_init(void)
{
int answer;
- if (fips_enabled || !crypto_has_ahash("sha1", 0, CRYPTO_ALG_ASYNC))
- return -ENODEV;
- sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL);
- if (!sha_pad)
- return -ENOMEM;
- sha_pad_init(sha_pad);
+ if (fips_enabled)
+ return -ENODEV;
answer = ppp_register_compressor(&ppp_mppe);
if (answer == 0)
printk(KERN_INFO "PPP MPPE Compression module registered\n");
- else
- kfree(sha_pad);
return answer;
}
@@ -681,7 +610,6 @@ static int __init ppp_mppe_init(void)
static void __exit ppp_mppe_cleanup(void)
{
ppp_unregister_compressor(&ppp_mppe);
- kfree(sha_pad);
}
module_init(ppp_mppe_init);
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index 644e99fc3623..9c4932198931 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -506,6 +506,11 @@ ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb)
unsigned char *data;
int islcp;
+ /* Ensure we can safely access protocol field and LCP code */
+ if (!pskb_may_pull(skb, 3)) {
+ kfree_skb(skb);
+ return NULL;
+ }
data = skb->data;
proto = get_unaligned_be16(data);
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 2ea4f4890d23..4275b393a454 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -100,8 +100,8 @@ struct pppoe_net {
* as well, moreover in case of SMP less locking
* controversy here
*/
- struct pppox_sock *hash_table[PPPOE_HASH_SIZE];
- rwlock_t hash_lock;
+ struct pppox_sock __rcu *hash_table[PPPOE_HASH_SIZE];
+ spinlock_t hash_lock;
};
/*
@@ -162,13 +162,13 @@ static struct pppox_sock *__get_item(struct pppoe_net *pn, __be16 sid,
int hash = hash_item(sid, addr);
struct pppox_sock *ret;
- ret = pn->hash_table[hash];
+ ret = rcu_dereference(pn->hash_table[hash]);
while (ret) {
if (cmp_addr(&ret->pppoe_pa, sid, addr) &&
ret->pppoe_ifindex == ifindex)
return ret;
- ret = ret->next;
+ ret = rcu_dereference(ret->next);
}
return NULL;
@@ -177,19 +177,20 @@ static struct pppox_sock *__get_item(struct pppoe_net *pn, __be16 sid,
static int __set_item(struct pppoe_net *pn, struct pppox_sock *po)
{
int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote);
- struct pppox_sock *ret;
+ struct pppox_sock *ret, *first;
- ret = pn->hash_table[hash];
+ first = rcu_dereference_protected(pn->hash_table[hash], lockdep_is_held(&pn->hash_lock));
+ ret = first;
while (ret) {
if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) &&
ret->pppoe_ifindex == po->pppoe_ifindex)
return -EALREADY;
- ret = ret->next;
+ ret = rcu_dereference_protected(ret->next, lockdep_is_held(&pn->hash_lock));
}
- po->next = pn->hash_table[hash];
- pn->hash_table[hash] = po;
+ RCU_INIT_POINTER(po->next, first);
+ rcu_assign_pointer(pn->hash_table[hash], po);
return 0;
}
@@ -198,20 +199,24 @@ static void __delete_item(struct pppoe_net *pn, __be16 sid,
char *addr, int ifindex)
{
int hash = hash_item(sid, addr);
- struct pppox_sock *ret, **src;
+ struct pppox_sock *ret, __rcu **src;
- ret = pn->hash_table[hash];
+ ret = rcu_dereference_protected(pn->hash_table[hash], lockdep_is_held(&pn->hash_lock));
src = &pn->hash_table[hash];
while (ret) {
if (cmp_addr(&ret->pppoe_pa, sid, addr) &&
ret->pppoe_ifindex == ifindex) {
- *src = ret->next;
+ struct pppox_sock *next;
+
+ next = rcu_dereference_protected(ret->next,
+ lockdep_is_held(&pn->hash_lock));
+ rcu_assign_pointer(*src, next);
break;
}
src = &ret->next;
- ret = ret->next;
+ ret = rcu_dereference_protected(ret->next, lockdep_is_held(&pn->hash_lock));
}
}
@@ -225,17 +230,15 @@ static inline struct pppox_sock *get_item(struct pppoe_net *pn, __be16 sid,
{
struct pppox_sock *po;
- read_lock_bh(&pn->hash_lock);
po = __get_item(pn, sid, addr, ifindex);
- if (po)
- sock_hold(sk_pppox(po));
- read_unlock_bh(&pn->hash_lock);
+ if (po && !refcount_inc_not_zero(&sk_pppox(po)->sk_refcnt))
+ po = NULL;
return po;
}
-static inline struct pppox_sock *get_item_by_addr(struct net *net,
- struct sockaddr_pppox *sp)
+static inline struct pppox_sock *__get_item_by_addr(struct net *net,
+ struct sockaddr_pppox *sp)
{
struct net_device *dev;
struct pppoe_net *pn;
@@ -243,24 +246,22 @@ static inline struct pppox_sock *get_item_by_addr(struct net *net,
int ifindex;
- rcu_read_lock();
dev = dev_get_by_name_rcu(net, sp->sa_addr.pppoe.dev);
if (dev) {
ifindex = dev->ifindex;
pn = pppoe_pernet(net);
- pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid,
- sp->sa_addr.pppoe.remote, ifindex);
+ pppox_sock = __get_item(pn, sp->sa_addr.pppoe.sid,
+ sp->sa_addr.pppoe.remote, ifindex);
}
- rcu_read_unlock();
return pppox_sock;
}
static inline void delete_item(struct pppoe_net *pn, __be16 sid,
char *addr, int ifindex)
{
- write_lock_bh(&pn->hash_lock);
+ spin_lock(&pn->hash_lock);
__delete_item(pn, sid, addr, ifindex);
- write_unlock_bh(&pn->hash_lock);
+ spin_unlock(&pn->hash_lock);
}
/***************************************************************************
@@ -276,14 +277,16 @@ static void pppoe_flush_dev(struct net_device *dev)
int i;
pn = pppoe_pernet(dev_net(dev));
- write_lock_bh(&pn->hash_lock);
+ spin_lock(&pn->hash_lock);
for (i = 0; i < PPPOE_HASH_SIZE; i++) {
- struct pppox_sock *po = pn->hash_table[i];
+ struct pppox_sock *po = rcu_dereference_protected(pn->hash_table[i],
+ lockdep_is_held(&pn->hash_lock));
struct sock *sk;
while (po) {
while (po && po->pppoe_dev != dev) {
- po = po->next;
+ po = rcu_dereference_protected(po->next,
+ lockdep_is_held(&pn->hash_lock));
}
if (!po)
@@ -300,7 +303,7 @@ static void pppoe_flush_dev(struct net_device *dev)
*/
sock_hold(sk);
- write_unlock_bh(&pn->hash_lock);
+ spin_unlock(&pn->hash_lock);
lock_sock(sk);
if (po->pppoe_dev == dev &&
@@ -320,11 +323,12 @@ static void pppoe_flush_dev(struct net_device *dev)
*/
BUG_ON(pppoe_pernet(dev_net(dev)) == NULL);
- write_lock_bh(&pn->hash_lock);
- po = pn->hash_table[i];
+ spin_lock(&pn->hash_lock);
+ po = rcu_dereference_protected(pn->hash_table[i],
+ lockdep_is_held(&pn->hash_lock));
}
}
- write_unlock_bh(&pn->hash_lock);
+ spin_unlock(&pn->hash_lock);
}
static int pppoe_device_event(struct notifier_block *this,
@@ -372,24 +376,19 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
* can't change.
*/
- if (skb->pkt_type == PACKET_OTHERHOST)
- goto abort_kfree;
-
if (sk->sk_state & PPPOX_BOUND) {
ppp_input(&po->chan, skb);
} else if (sk->sk_state & PPPOX_RELAY) {
- relay_po = get_item_by_addr(sock_net(sk),
- &po->pppoe_relay);
+ relay_po = __get_item_by_addr(sock_net(sk),
+ &po->pppoe_relay);
if (relay_po == NULL)
goto abort_kfree;
if ((sk_pppox(relay_po)->sk_state & PPPOX_CONNECTED) == 0)
- goto abort_put;
+ goto abort_kfree;
if (!__pppoe_xmit(sk_pppox(relay_po), skb))
- goto abort_put;
-
- sock_put(sk_pppox(relay_po));
+ goto abort_kfree;
} else {
if (sock_queue_rcv_skb(sk, skb))
goto abort_kfree;
@@ -397,9 +396,6 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
return NET_RX_SUCCESS;
-abort_put:
- sock_put(sk_pppox(relay_po));
-
abort_kfree:
kfree_skb(skb);
return NET_RX_DROP;
@@ -418,6 +414,9 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
struct pppoe_net *pn;
int len;
+ if (skb->pkt_type == PACKET_OTHERHOST)
+ goto drop;
+
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
goto out;
@@ -441,14 +440,11 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
ph = pppoe_hdr(skb);
pn = pppoe_pernet(dev_net(dev));
- /* Note that get_item does a sock_hold(), so sk_pppox(po)
- * is known to be safe.
- */
- po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
+ po = __get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
if (!po)
goto drop;
- return sk_receive_skb(sk_pppox(po), skb, 0);
+ return __sk_receive_skb(sk_pppox(po), skb, 0, 1, false);
drop:
kfree_skb(skb);
@@ -528,6 +524,11 @@ static struct proto pppoe_sk_proto __read_mostly = {
.obj_size = sizeof(struct pppox_sock),
};
+static void pppoe_destruct(struct sock *sk)
+{
+ skb_queue_purge(&sk->sk_receive_queue);
+}
+
/***********************************************************************
*
* Initialize a new struct sock.
@@ -542,11 +543,13 @@ static int pppoe_create(struct net *net, struct socket *sock, int kern)
return -ENOMEM;
sock_init_data(sock, sk);
+ sock_set_flag(sk, SOCK_RCU_FREE);
sock->state = SS_UNCONNECTED;
sock->ops = &pppoe_ops;
sk->sk_backlog_rcv = pppoe_rcv_core;
+ sk->sk_destruct = pppoe_destruct;
sk->sk_state = PPPOX_NONE;
sk->sk_type = SOCK_STREAM;
sk->sk_family = PF_PPPOX;
@@ -599,15 +602,14 @@ static int pppoe_release(struct socket *sock)
sock_orphan(sk);
sock->sk = NULL;
- skb_queue_purge(&sk->sk_receive_queue);
release_sock(sk);
sock_put(sk);
return 0;
}
-static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
- int sockaddr_len, int flags)
+static int pppoe_connect(struct socket *sock, struct sockaddr_unsized *uservaddr,
+ int sockaddr_len, int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_pppox *sp = (struct sockaddr_pppox *)uservaddr;
@@ -681,9 +683,9 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
&sp->sa_addr.pppoe,
sizeof(struct pppoe_addr));
- write_lock_bh(&pn->hash_lock);
+ spin_lock(&pn->hash_lock);
error = __set_item(pn, po);
- write_unlock_bh(&pn->hash_lock);
+ spin_unlock(&pn->hash_lock);
if (error < 0)
goto err_put;
@@ -693,6 +695,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
po->chan.private = sk;
po->chan.ops = &pppoe_chan_ops;
+ po->chan.direct_xmit = true;
error = ppp_register_net_channel(dev_net(dev), &po->chan);
if (error) {
@@ -807,11 +810,12 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
/* Check that the socket referenced by the address
actually exists. */
- relay_po = get_item_by_addr(sock_net(sk), &po->pppoe_relay);
+ rcu_read_lock();
+ relay_po = __get_item_by_addr(sock_net(sk), &po->pppoe_relay);
+ rcu_read_unlock();
if (!relay_po)
break;
- sock_put(sk_pppox(relay_po));
sk->sk_state |= PPPOX_RELAY;
err = 0;
break;
@@ -1051,11 +1055,11 @@ static inline struct pppox_sock *pppoe_get_idx(struct pppoe_net *pn, loff_t pos)
int i;
for (i = 0; i < PPPOE_HASH_SIZE; i++) {
- po = pn->hash_table[i];
+ po = rcu_dereference(pn->hash_table[i]);
while (po) {
if (!pos--)
goto out;
- po = po->next;
+ po = rcu_dereference(po->next);
}
}
@@ -1064,19 +1068,19 @@ out:
}
static void *pppoe_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(pn->hash_lock)
+ __acquires(RCU)
{
struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq));
loff_t l = *pos;
- read_lock_bh(&pn->hash_lock);
+ rcu_read_lock();
return l ? pppoe_get_idx(pn, --l) : SEQ_START_TOKEN;
}
static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq));
- struct pppox_sock *po;
+ struct pppox_sock *po, *next;
++*pos;
if (v == SEQ_START_TOKEN) {
@@ -1084,14 +1088,15 @@ static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos)
goto out;
}
po = v;
- if (po->next)
- po = po->next;
+ next = rcu_dereference(po->next);
+ if (next)
+ po = next;
else {
int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote);
po = NULL;
while (++hash < PPPOE_HASH_SIZE) {
- po = pn->hash_table[hash];
+ po = rcu_dereference(pn->hash_table[hash]);
if (po)
break;
}
@@ -1102,10 +1107,9 @@ out:
}
static void pppoe_seq_stop(struct seq_file *seq, void *v)
- __releases(pn->hash_lock)
+ __releases(RCU)
{
- struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq));
- read_unlock_bh(&pn->hash_lock);
+ rcu_read_unlock();
}
static const struct seq_operations pppoe_seq_ops = {
@@ -1148,7 +1152,7 @@ static __net_init int pppoe_init_net(struct net *net)
struct pppoe_net *pn = pppoe_pernet(net);
struct proc_dir_entry *pde;
- rwlock_init(&pn->hash_lock);
+ spin_lock_init(&pn->hash_lock);
pde = proc_create_net("pppoe", 0444, net->proc_net,
&pppoe_seq_ops, sizeof(struct seq_net_private));
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 689687bd2574..b18acd810561 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -159,19 +159,17 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
int len;
unsigned char *data;
__u32 seq_recv;
-
-
struct rtable *rt;
struct net_device *tdev;
struct iphdr *iph;
int max_headroom;
if (sk_pppox(po)->sk_state & PPPOX_DEAD)
- goto tx_error;
+ goto tx_drop;
rt = pptp_route_output(po, &fl4);
if (IS_ERR(rt))
- goto tx_error;
+ goto tx_drop;
tdev = rt->dst.dev;
@@ -179,16 +177,20 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
- if (!new_skb) {
- ip_rt_put(rt);
+
+ if (!new_skb)
goto tx_error;
- }
+
if (skb->sk)
skb_set_owner_w(new_skb, skb->sk);
consume_skb(skb);
skb = new_skb;
}
+ /* Ensure we can safely access protocol field and LCP code */
+ if (!pskb_may_pull(skb, 3))
+ goto tx_error;
+
data = skb->data;
islcp = ((data[0] << 8) + data[1]) == PPP_LCP && 1 <= data[2] && data[2] <= 7;
@@ -262,6 +264,8 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
return 1;
tx_error:
+ ip_rt_put(rt);
+tx_drop:
kfree_skb(skb);
return 1;
}
@@ -378,8 +382,8 @@ drop:
return NET_RX_DROP;
}
-static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
- int sockaddr_len)
+static int pptp_bind(struct socket *sock, struct sockaddr_unsized *uservaddr,
+ int sockaddr_len)
{
struct sock *sk = sock->sk;
struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
@@ -411,8 +415,8 @@ out:
return error;
}
-static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
- int sockaddr_len, int flags)
+static int pptp_connect(struct socket *sock, struct sockaddr_unsized *uservaddr,
+ int sockaddr_len, int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
@@ -465,6 +469,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
po->chan.mtu -= PPTP_HEADER_OVERHEAD;
po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header);
+ po->chan.direct_xmit = true;
error = ppp_register_channel(&po->chan);
if (error) {
pr_err("PPTP: failed to register PPP channel (%d)\n", error);
diff --git a/drivers/net/pse-pd/Kconfig b/drivers/net/pse-pd/Kconfig
index 7fab916a7f46..7ef29657ee5d 100644
--- a/drivers/net/pse-pd/Kconfig
+++ b/drivers/net/pse-pd/Kconfig
@@ -32,6 +32,17 @@ config PSE_PD692X0
To compile this driver as a module, choose M here: the
module will be called pd692x0.
+config PSE_SI3474
+ tristate "Si3474 PSE controller"
+ depends on I2C
+ help
+ This module provides support for Si3474 regulator based Ethernet
+ Power Sourcing Equipment.
+ Only 4-pair PSE configurations are supported.
+
+ To compile this driver as a module, choose M here: the
+ module will be called si3474.
+
config PSE_TPS23881
tristate "TPS23881 PSE controller"
depends on I2C
diff --git a/drivers/net/pse-pd/Makefile b/drivers/net/pse-pd/Makefile
index 9d2898b36737..cc78f7ea7f5f 100644
--- a/drivers/net/pse-pd/Makefile
+++ b/drivers/net/pse-pd/Makefile
@@ -5,4 +5,5 @@ obj-$(CONFIG_PSE_CONTROLLER) += pse_core.o
obj-$(CONFIG_PSE_REGULATOR) += pse_regulator.o
obj-$(CONFIG_PSE_PD692X0) += pd692x0.o
+obj-$(CONFIG_PSE_SI3474) += si3474.o
obj-$(CONFIG_PSE_TPS23881) += tps23881.o
diff --git a/drivers/net/pse-pd/pd692x0.c b/drivers/net/pse-pd/pd692x0.c
index 0af7db80b2f8..134435e90073 100644
--- a/drivers/net/pse-pd/pd692x0.c
+++ b/drivers/net/pse-pd/pd692x0.c
@@ -12,6 +12,8 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pse-pd/pse.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
#define PD692X0_PSE_NAME "pd692x0_pse"
@@ -28,6 +30,8 @@
#define PD692X0_FW_MIN_VER 5
#define PD692X0_FW_PATCH_VER 5
+#define PD692X0_USER_BYTE 42
+
enum pd692x0_fw_state {
PD692X0_FW_UNKNOWN,
PD692X0_FW_OK,
@@ -76,11 +80,19 @@ enum {
PD692X0_MSG_GET_PORT_CLASS,
PD692X0_MSG_GET_PORT_MEAS,
PD692X0_MSG_GET_PORT_PARAM,
+ PD692X0_MSG_GET_POWER_BANK,
+ PD692X0_MSG_SET_POWER_BANK,
+ PD692X0_MSG_SET_USER_BYTE,
/* add new message above here */
PD692X0_MSG_CNT
};
+struct pd692x0_matrix {
+ u8 hw_port_a;
+ u8 hw_port_b;
+};
+
struct pd692x0_priv {
struct i2c_client *client;
struct pse_controller_dev pcdev;
@@ -94,7 +106,12 @@ struct pd692x0_priv {
bool last_cmd_key;
unsigned long last_cmd_key_time;
+ bool cfg_saved;
enum ethtool_c33_pse_admin_state admin_state[PD692X0_MAX_PIS];
+ struct regulator_dev *manager_reg[PD692X0_MAX_MANAGERS];
+ int manager_pw_budget[PD692X0_MAX_MANAGERS];
+ int nmanagers;
+ struct pd692x0_matrix *port_matrix;
};
/* Template list of communication messages. The non-null bytes defined here
@@ -170,6 +187,22 @@ static const struct pd692x0_msg pd692x0_msg_template_list[PD692X0_MSG_CNT] = {
.data = {0x4e, 0x4e, 0x4e, 0x4e,
0x4e, 0x4e, 0x4e, 0x4e},
},
+ [PD692X0_MSG_GET_POWER_BANK] = {
+ .key = PD692X0_KEY_REQ,
+ .sub = {0x07, 0x0b, 0x57},
+ .data = { 0, 0x4e, 0x4e, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e},
+ },
+ [PD692X0_MSG_SET_POWER_BANK] = {
+ .key = PD692X0_KEY_CMD,
+ .sub = {0x07, 0x0b, 0x57},
+ },
+ [PD692X0_MSG_SET_USER_BYTE] = {
+ .key = PD692X0_KEY_PRG,
+ .sub = {0x41, PD692X0_USER_BYTE},
+ .data = {0x4e, 0x4e, 0x4e, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e},
+ },
};
static u8 pd692x0_build_msg(struct pd692x0_msg *msg, u8 echo)
@@ -431,31 +464,6 @@ static int pd692x0_pi_disable(struct pse_controller_dev *pcdev, int id)
return 0;
}
-static int pd692x0_pi_is_enabled(struct pse_controller_dev *pcdev, int id)
-{
- struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
- struct pd692x0_msg msg, buf = {0};
- int ret;
-
- ret = pd692x0_fw_unavailable(priv);
- if (ret)
- return ret;
-
- msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_STATUS];
- msg.sub[2] = id;
- ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
- if (ret < 0)
- return ret;
-
- if (buf.sub[1]) {
- priv->admin_state[id] = ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
- return 1;
- } else {
- priv->admin_state[id] = ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
- return 0;
- }
-}
-
struct pd692x0_pse_ext_state_mapping {
u32 status_code;
enum ethtool_c33_pse_ext_state pse_ext_state;
@@ -517,21 +525,38 @@ pd692x0_pse_ext_state_map[] = {
{ /* sentinel */ }
};
-static void
-pd692x0_get_ext_state(struct ethtool_c33_pse_ext_state_info *c33_ext_state_info,
- u32 status_code)
+static int
+pd692x0_pi_get_ext_state(struct pse_controller_dev *pcdev, int id,
+ struct pse_ext_state_info *ext_state_info)
{
+ struct ethtool_c33_pse_ext_state_info *c33_ext_state_info;
const struct pd692x0_pse_ext_state_mapping *ext_state_map;
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ int ret;
+
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
+ return ret;
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_STATUS];
+ msg.sub[2] = id;
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0)
+ return ret;
+
+ c33_ext_state_info = &ext_state_info->c33_ext_state_info;
ext_state_map = pd692x0_pse_ext_state_map;
while (ext_state_map->status_code) {
- if (ext_state_map->status_code == status_code) {
+ if (ext_state_map->status_code == buf.sub[0]) {
c33_ext_state_info->c33_pse_ext_state = ext_state_map->pse_ext_state;
c33_ext_state_info->__c33_pse_ext_substate = ext_state_map->pse_ext_substate;
- return;
+ return 0;
}
ext_state_map++;
}
+
+ return 0;
}
struct pd692x0_class_pw {
@@ -613,35 +638,66 @@ static int pd692x0_pi_set_pw_from_table(struct device *dev,
}
static int
-pd692x0_pi_get_pw_ranges(struct pse_control_status *st)
+pd692x0_pi_get_pw_limit_ranges(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_limit_ranges *pw_limit_ranges)
{
+ struct ethtool_c33_pse_pw_limit_range *c33_pw_limit_ranges;
const struct pd692x0_class_pw *pw_table;
int i;
pw_table = pd692x0_class_pw_table;
- st->c33_pw_limit_ranges = kcalloc(PD692X0_CLASS_PW_TABLE_SIZE,
- sizeof(struct ethtool_c33_pse_pw_limit_range),
- GFP_KERNEL);
- if (!st->c33_pw_limit_ranges)
+ c33_pw_limit_ranges = kcalloc(PD692X0_CLASS_PW_TABLE_SIZE,
+ sizeof(*c33_pw_limit_ranges),
+ GFP_KERNEL);
+ if (!c33_pw_limit_ranges)
return -ENOMEM;
for (i = 0; i < PD692X0_CLASS_PW_TABLE_SIZE; i++, pw_table++) {
- st->c33_pw_limit_ranges[i].min = pw_table->class_pw;
- st->c33_pw_limit_ranges[i].max = pw_table->class_pw + pw_table->max_added_class_pw;
+ c33_pw_limit_ranges[i].min = pw_table->class_pw;
+ c33_pw_limit_ranges[i].max = pw_table->class_pw +
+ pw_table->max_added_class_pw;
}
- st->c33_pw_limit_nb_ranges = i;
+ pw_limit_ranges->c33_pw_limit_ranges = c33_pw_limit_ranges;
+ return i;
+}
+
+static int
+pd692x0_pi_get_admin_state(struct pse_controller_dev *pcdev, int id,
+ struct pse_admin_state *admin_state)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ int ret;
+
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
+ return ret;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_STATUS];
+ msg.sub[2] = id;
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0)
+ return ret;
+
+ if (buf.sub[1])
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
+ else
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
+
+ priv->admin_state[id] = admin_state->c33_admin_state;
+
return 0;
}
-static int pd692x0_ethtool_get_status(struct pse_controller_dev *pcdev,
- unsigned long id,
- struct netlink_ext_ack *extack,
- struct pse_control_status *status)
+static int
+pd692x0_pi_get_pw_status(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_status *pw_status)
{
struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
struct pd692x0_msg msg, buf = {0};
- u32 class;
int ret;
ret = pd692x0_fw_unavailable(priv);
@@ -656,52 +712,87 @@ static int pd692x0_ethtool_get_status(struct pse_controller_dev *pcdev,
/* Compare Port Status (Communication Protocol Document par. 7.1) */
if ((buf.sub[0] & 0xf0) == 0x80 || (buf.sub[0] & 0xf0) == 0x90)
- status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING;
+ pw_status->c33_pw_status =
+ ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING;
else if (buf.sub[0] == 0x1b || buf.sub[0] == 0x22)
- status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_SEARCHING;
+ pw_status->c33_pw_status =
+ ETHTOOL_C33_PSE_PW_D_STATUS_SEARCHING;
else if (buf.sub[0] == 0x12)
- status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_FAULT;
+ pw_status->c33_pw_status =
+ ETHTOOL_C33_PSE_PW_D_STATUS_FAULT;
else
- status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED;
+ pw_status->c33_pw_status =
+ ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED;
- if (buf.sub[1])
- status->c33_admin_state = ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
- else
- status->c33_admin_state = ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
+ return 0;
+}
- priv->admin_state[id] = status->c33_admin_state;
+static int
+pd692x0_pi_get_pw_class(struct pse_controller_dev *pcdev, int id)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ u32 class;
+ int ret;
- pd692x0_get_ext_state(&status->c33_ext_state_info, buf.sub[0]);
- status->c33_actual_pw = (buf.data[0] << 4 | buf.data[1]) * 100;
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
+ return ret;
- msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_PARAM];
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_CLASS];
msg.sub[2] = id;
- memset(&buf, 0, sizeof(buf));
ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
if (ret < 0)
return ret;
- ret = pd692x0_pi_get_pw_from_table(buf.data[0], buf.data[1]);
- if (ret < 0)
+ class = buf.data[3] >> 4;
+ if (class <= 8)
+ return class;
+
+ return 0;
+}
+
+static int
+pd692x0_pi_get_actual_pw(struct pse_controller_dev *pcdev, int id)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ int ret;
+
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
return ret;
- status->c33_avail_pw_limit = ret;
- memset(&buf, 0, sizeof(buf));
- msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_CLASS];
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_STATUS];
msg.sub[2] = id;
ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
if (ret < 0)
return ret;
- class = buf.data[3] >> 4;
- if (class <= 8)
- status->c33_pw_class = class;
+ return (buf.data[0] << 4 | buf.data[1]) * 100;
+}
- ret = pd692x0_pi_get_pw_ranges(status);
+static int
+pd692x0_pi_get_prio(struct pse_controller_dev *pcdev, int id)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ int ret;
+
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
+ return ret;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_PARAM];
+ msg.sub[2] = id;
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
if (ret < 0)
return ret;
+ if (!buf.data[2] || buf.data[2] > pcdev->pis_prio_max + 1)
+ return -ERANGE;
- return 0;
+ /* PSE core priority start at 0 */
+ return buf.data[2] - 1;
}
static struct pd692x0_msg_ver pd692x0_get_sw_version(struct pd692x0_priv *priv)
@@ -731,14 +822,10 @@ static struct pd692x0_msg_ver pd692x0_get_sw_version(struct pd692x0_priv *priv)
struct pd692x0_manager {
struct device_node *port_node[PD692X0_MAX_MANAGER_PORTS];
+ struct device_node *node;
int nports;
};
-struct pd692x0_matrix {
- u8 hw_port_a;
- u8 hw_port_b;
-};
-
static int
pd692x0_of_get_ports_manager(struct pd692x0_priv *priv,
struct pd692x0_manager *manager,
@@ -785,7 +872,7 @@ out:
static int
pd692x0_of_get_managers(struct pd692x0_priv *priv,
- struct pd692x0_manager manager[PD692X0_MAX_MANAGERS])
+ struct pd692x0_manager *manager)
{
struct device_node *managers_node, *node;
int ret, nmanagers, i, j;
@@ -822,11 +909,14 @@ pd692x0_of_get_managers(struct pd692x0_priv *priv,
if (ret)
goto out;
+ of_node_get(node);
+ manager[manager_id].node = node;
nmanagers++;
}
of_node_put(managers_node);
- return nmanagers;
+ priv->nmanagers = nmanagers;
+ return 0;
out:
for (i = 0; i < nmanagers; i++) {
@@ -834,6 +924,8 @@ out:
of_node_put(manager[i].port_node[j]);
manager[i].port_node[j] = NULL;
}
+ of_node_put(manager[i].node);
+ manager[i].node = NULL;
}
of_node_put(node);
@@ -841,6 +933,161 @@ out:
return ret;
}
+static const struct regulator_ops dummy_ops;
+
+static struct regulator_dev *
+pd692x0_register_manager_regulator(struct device *dev, char *reg_name,
+ struct device_node *node)
+{
+ struct regulator_init_data *rinit_data;
+ struct regulator_config rconfig = {0};
+ struct regulator_desc *rdesc;
+ struct regulator_dev *rdev;
+
+ rinit_data = devm_kzalloc(dev, sizeof(*rinit_data),
+ GFP_KERNEL);
+ if (!rinit_data)
+ return ERR_PTR(-ENOMEM);
+
+ rdesc = devm_kzalloc(dev, sizeof(*rdesc), GFP_KERNEL);
+ if (!rdesc)
+ return ERR_PTR(-ENOMEM);
+
+ rdesc->name = reg_name;
+ rdesc->type = REGULATOR_VOLTAGE;
+ rdesc->ops = &dummy_ops;
+ rdesc->owner = THIS_MODULE;
+
+ rinit_data->supply_regulator = "vmain";
+
+ rconfig.dev = dev;
+ rconfig.init_data = rinit_data;
+ rconfig.of_node = node;
+
+ rdev = devm_regulator_register(dev, rdesc, &rconfig);
+ if (IS_ERR(rdev)) {
+ dev_err_probe(dev, PTR_ERR(rdev),
+ "Failed to register regulator\n");
+ return rdev;
+ }
+
+ return rdev;
+}
+
+static int
+pd692x0_register_managers_regulator(struct pd692x0_priv *priv,
+ const struct pd692x0_manager *manager)
+{
+ struct device *dev = &priv->client->dev;
+ size_t reg_name_len;
+ int i;
+
+ /* Each regulator name len is dev name + 12 char +
+ * int max digit number (10) + 1
+ */
+ reg_name_len = strlen(dev_name(dev)) + 23;
+
+ for (i = 0; i < priv->nmanagers; i++) {
+ static const char * const regulators[] = { "vaux5", "vaux3p3" };
+ struct regulator_dev *rdev;
+ char *reg_name;
+ int ret;
+
+ reg_name = devm_kzalloc(dev, reg_name_len, GFP_KERNEL);
+ if (!reg_name)
+ return -ENOMEM;
+ snprintf(reg_name, 26, "pse-%s-manager%d", dev_name(dev), i);
+ rdev = pd692x0_register_manager_regulator(dev, reg_name,
+ manager[i].node);
+ if (IS_ERR(rdev))
+ return PTR_ERR(rdev);
+
+ /* VMAIN is described as main supply for the manager.
+ * Add other VAUX power supplies and link them to the
+ * virtual device rdev->dev.
+ */
+ ret = devm_regulator_bulk_get_enable(&rdev->dev,
+ ARRAY_SIZE(regulators),
+ regulators);
+ if (ret)
+ return dev_err_probe(&rdev->dev, ret,
+ "Failed to enable regulators\n");
+
+ priv->manager_reg[i] = rdev;
+ }
+
+ return 0;
+}
+
+static int
+pd692x0_conf_manager_power_budget(struct pd692x0_priv *priv, int id)
+{
+ struct pd692x0_msg msg, buf;
+ int ret, pw_mW;
+
+ pw_mW = priv->manager_pw_budget[id] / 1000;
+ if (!pw_mW)
+ return 0;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_POWER_BANK];
+ msg.data[0] = id;
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0)
+ return ret;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_SET_POWER_BANK];
+ msg.data[0] = id;
+ msg.data[1] = pw_mW >> 8;
+ msg.data[2] = pw_mW & 0xff;
+ msg.data[3] = buf.sub[2];
+ msg.data[4] = buf.data[0];
+ msg.data[5] = buf.data[1];
+ msg.data[6] = buf.data[2];
+ msg.data[7] = buf.data[3];
+ return pd692x0_sendrecv_msg(priv, &msg, &buf);
+}
+
+static int
+pd692x0_req_managers_pw_budget(struct pd692x0_priv *priv)
+{
+ int i, ret;
+
+ for (i = 0; i < priv->nmanagers; i++) {
+ struct regulator *supply = priv->manager_reg[i]->supply;
+ int pw_budget;
+
+ pw_budget = regulator_get_unclaimed_power_budget(supply);
+ if (!pw_budget)
+ /* Do nothing if no power budget */
+ continue;
+
+ /* Max power budget per manager */
+ if (pw_budget > 6000000)
+ pw_budget = 6000000;
+ ret = regulator_request_power_budget(supply, pw_budget);
+ if (ret < 0)
+ return ret;
+
+ priv->manager_pw_budget[i] = pw_budget;
+ }
+
+ return 0;
+}
+
+static int
+pd692x0_configure_managers(struct pd692x0_priv *priv)
+{
+ int i, ret;
+
+ for (i = 0; i < priv->nmanagers; i++) {
+ ret = pd692x0_conf_manager_power_budget(priv, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static int
pd692x0_set_port_matrix(const struct pse_pi_pairset *pairset,
const struct pd692x0_manager *manager,
@@ -881,10 +1128,9 @@ pd692x0_set_port_matrix(const struct pse_pi_pairset *pairset,
static int
pd692x0_set_ports_matrix(struct pd692x0_priv *priv,
- const struct pd692x0_manager *manager,
- int nmanagers,
- struct pd692x0_matrix port_matrix[PD692X0_MAX_PIS])
+ const struct pd692x0_manager *manager)
{
+ struct pd692x0_matrix *port_matrix = priv->port_matrix;
struct pse_controller_dev *pcdev = &priv->pcdev;
int i, ret;
@@ -897,7 +1143,7 @@ pd692x0_set_ports_matrix(struct pd692x0_priv *priv,
/* Update with values for every PSE PIs */
for (i = 0; i < pcdev->nr_lines; i++) {
ret = pd692x0_set_port_matrix(&pcdev->pi[i].pairset[0],
- manager, nmanagers,
+ manager, priv->nmanagers,
&port_matrix[i]);
if (ret) {
dev_err(&priv->client->dev,
@@ -906,7 +1152,7 @@ pd692x0_set_ports_matrix(struct pd692x0_priv *priv,
}
ret = pd692x0_set_port_matrix(&pcdev->pi[i].pairset[1],
- manager, nmanagers,
+ manager, priv->nmanagers,
&port_matrix[i]);
if (ret) {
dev_err(&priv->client->dev,
@@ -919,9 +1165,9 @@ pd692x0_set_ports_matrix(struct pd692x0_priv *priv,
}
static int
-pd692x0_write_ports_matrix(struct pd692x0_priv *priv,
- const struct pd692x0_matrix port_matrix[PD692X0_MAX_PIS])
+pd692x0_write_ports_matrix(struct pd692x0_priv *priv)
{
+ struct pd692x0_matrix *port_matrix = priv->port_matrix;
struct pd692x0_msg msg, buf;
int ret, i;
@@ -946,36 +1192,122 @@ pd692x0_write_ports_matrix(struct pd692x0_priv *priv,
return 0;
}
-static int pd692x0_setup_pi_matrix(struct pse_controller_dev *pcdev)
+static int pd692x0_hw_conf_init(struct pd692x0_priv *priv)
{
- struct pd692x0_manager manager[PD692X0_MAX_MANAGERS] = {0};
- struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
- struct pd692x0_matrix port_matrix[PD692X0_MAX_PIS];
- int ret, i, j, nmanagers;
+ int ret;
- /* Should we flash the port matrix */
+ /* Is PD692x0 ready to be configured? */
if (priv->fw_state != PD692X0_FW_OK &&
priv->fw_state != PD692X0_FW_COMPLETE)
return 0;
- ret = pd692x0_of_get_managers(priv, manager);
- if (ret < 0)
+ ret = pd692x0_configure_managers(priv);
+ if (ret)
return ret;
- nmanagers = ret;
- ret = pd692x0_set_ports_matrix(priv, manager, nmanagers, port_matrix);
+ ret = pd692x0_write_ports_matrix(priv);
if (ret)
- goto out;
+ return ret;
- ret = pd692x0_write_ports_matrix(priv, port_matrix);
- if (ret)
- goto out;
+ return 0;
+}
-out:
- for (i = 0; i < nmanagers; i++) {
+static void pd692x0_of_put_managers(struct pd692x0_priv *priv,
+ struct pd692x0_manager *manager)
+{
+ int i, j;
+
+ for (i = 0; i < priv->nmanagers; i++) {
for (j = 0; j < manager[i].nports; j++)
of_node_put(manager[i].port_node[j]);
+ of_node_put(manager[i].node);
}
+}
+
+static void pd692x0_managers_free_pw_budget(struct pd692x0_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < PD692X0_MAX_MANAGERS; i++) {
+ struct regulator *supply;
+
+ if (!priv->manager_reg[i] || !priv->manager_pw_budget[i])
+ continue;
+
+ supply = priv->manager_reg[i]->supply;
+ if (!supply)
+ continue;
+
+ regulator_free_power_budget(supply,
+ priv->manager_pw_budget[i]);
+ }
+}
+
+static int
+pd692x0_save_user_byte(struct pd692x0_priv *priv)
+{
+ struct pd692x0_msg msg, buf;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_SET_USER_BYTE];
+ return pd692x0_sendrecv_msg(priv, &msg, &buf);
+}
+
+static int pd692x0_setup_pi_matrix(struct pse_controller_dev *pcdev)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_matrix *port_matrix;
+ struct pd692x0_manager *manager;
+ int ret;
+
+ manager = kcalloc(PD692X0_MAX_MANAGERS, sizeof(*manager), GFP_KERNEL);
+ if (!manager)
+ return -ENOMEM;
+
+ port_matrix = devm_kcalloc(&priv->client->dev, PD692X0_MAX_PIS,
+ sizeof(*port_matrix), GFP_KERNEL);
+ if (!port_matrix) {
+ ret = -ENOMEM;
+ goto err_free_manager;
+ }
+ priv->port_matrix = port_matrix;
+
+ ret = pd692x0_of_get_managers(priv, manager);
+ if (ret < 0)
+ goto err_free_manager;
+
+ ret = pd692x0_register_managers_regulator(priv, manager);
+ if (ret)
+ goto err_of_managers;
+
+ ret = pd692x0_req_managers_pw_budget(priv);
+ if (ret)
+ goto err_of_managers;
+
+ ret = pd692x0_set_ports_matrix(priv, manager);
+ if (ret)
+ goto err_managers_req_pw;
+
+ /* Do not init the conf if it is already saved */
+ if (!priv->cfg_saved) {
+ ret = pd692x0_hw_conf_init(priv);
+ if (ret)
+ goto err_managers_req_pw;
+
+ ret = pd692x0_save_user_byte(priv);
+ if (ret)
+ goto err_managers_req_pw;
+ }
+
+ pd692x0_of_put_managers(priv, manager);
+ kfree(manager);
+ return 0;
+
+err_managers_req_pw:
+ pd692x0_managers_free_pw_budget(priv);
+err_of_managers:
+ pd692x0_of_put_managers(priv, manager);
+err_free_manager:
+ kfree(manager);
return ret;
}
@@ -999,13 +1331,12 @@ static int pd692x0_pi_get_voltage(struct pse_controller_dev *pcdev, int id)
return (buf.sub[0] << 8 | buf.sub[1]) * 100000;
}
-static int pd692x0_pi_get_current_limit(struct pse_controller_dev *pcdev,
- int id)
+static int pd692x0_pi_get_pw_limit(struct pse_controller_dev *pcdev,
+ int id)
{
struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
struct pd692x0_msg msg, buf = {0};
- int mW, uV, uA, ret;
- s64 tmp_64;
+ int ret;
msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_PARAM];
msg.sub[2] = id;
@@ -1013,63 +1344,64 @@ static int pd692x0_pi_get_current_limit(struct pse_controller_dev *pcdev,
if (ret < 0)
return ret;
- ret = pd692x0_pi_get_pw_from_table(buf.data[2], buf.data[3]);
- if (ret < 0)
+ return pd692x0_pi_get_pw_from_table(buf.data[0], buf.data[1]);
+}
+
+static int pd692x0_pi_set_pw_limit(struct pse_controller_dev *pcdev,
+ int id, int max_mW)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct device *dev = &priv->client->dev;
+ struct pd692x0_msg msg, buf = {0};
+ int ret;
+
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
return ret;
- mW = ret;
- ret = pd692x0_pi_get_voltage(pcdev, id);
- if (ret < 0)
+ msg = pd692x0_msg_template_list[PD692X0_MSG_SET_PORT_PARAM];
+ msg.sub[2] = id;
+ ret = pd692x0_pi_set_pw_from_table(dev, &msg, max_mW);
+ if (ret)
return ret;
- uV = ret;
- tmp_64 = mW;
- tmp_64 *= 1000000000ull;
- /* uA = mW * 1000000000 / uV */
- uA = DIV_ROUND_CLOSEST_ULL(tmp_64, uV);
- return uA;
+ return pd692x0_sendrecv_msg(priv, &msg, &buf);
}
-static int pd692x0_pi_set_current_limit(struct pse_controller_dev *pcdev,
- int id, int max_uA)
+static int pd692x0_pi_set_prio(struct pse_controller_dev *pcdev, int id,
+ unsigned int prio)
{
struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
- struct device *dev = &priv->client->dev;
struct pd692x0_msg msg, buf = {0};
- int uV, ret, mW;
- s64 tmp_64;
+ int ret;
ret = pd692x0_fw_unavailable(priv);
if (ret)
return ret;
- ret = pd692x0_pi_get_voltage(pcdev, id);
- if (ret < 0)
- return ret;
- uV = ret;
-
msg = pd692x0_msg_template_list[PD692X0_MSG_SET_PORT_PARAM];
msg.sub[2] = id;
- tmp_64 = uV;
- tmp_64 *= max_uA;
- /* mW = uV * uA / 1000000000 */
- mW = DIV_ROUND_CLOSEST_ULL(tmp_64, 1000000000);
- ret = pd692x0_pi_set_pw_from_table(dev, &msg, mW);
- if (ret)
- return ret;
+ /* Controller priority from 1 to 3 */
+ msg.data[4] = prio + 1;
return pd692x0_sendrecv_msg(priv, &msg, &buf);
}
static const struct pse_controller_ops pd692x0_ops = {
.setup_pi_matrix = pd692x0_setup_pi_matrix,
- .ethtool_get_status = pd692x0_ethtool_get_status,
+ .pi_get_admin_state = pd692x0_pi_get_admin_state,
+ .pi_get_pw_status = pd692x0_pi_get_pw_status,
+ .pi_get_ext_state = pd692x0_pi_get_ext_state,
+ .pi_get_pw_class = pd692x0_pi_get_pw_class,
+ .pi_get_actual_pw = pd692x0_pi_get_actual_pw,
.pi_enable = pd692x0_pi_enable,
.pi_disable = pd692x0_pi_disable,
- .pi_is_enabled = pd692x0_pi_is_enabled,
.pi_get_voltage = pd692x0_pi_get_voltage,
- .pi_get_current_limit = pd692x0_pi_get_current_limit,
- .pi_set_current_limit = pd692x0_pi_set_current_limit,
+ .pi_get_pw_limit = pd692x0_pi_get_pw_limit,
+ .pi_set_pw_limit = pd692x0_pi_set_pw_limit,
+ .pi_get_pw_limit_ranges = pd692x0_pi_get_pw_limit_ranges,
+ .pi_get_prio = pd692x0_pi_get_prio,
+ .pi_set_prio = pd692x0_pi_set_prio,
};
#define PD692X0_FW_LINE_MAX_SZ 0xff
@@ -1378,7 +1710,7 @@ static enum fw_upload_err pd692x0_fw_poll_complete(struct fw_upload *fwl)
return FW_UPLOAD_ERR_FW_INVALID;
}
- ret = pd692x0_setup_pi_matrix(&priv->pcdev);
+ ret = pd692x0_hw_conf_init(priv);
if (ret < 0) {
dev_err(&client->dev, "Error configuring ports matrix (%pe)\n",
ERR_PTR(ret));
@@ -1423,6 +1755,7 @@ static const struct fw_upload_ops pd692x0_fw_ops = {
static int pd692x0_i2c_probe(struct i2c_client *client)
{
+ static const char * const regulators[] = { "vdd", "vdda" };
struct pd692x0_msg msg, buf = {0}, zero = {0};
struct device *dev = &client->dev;
struct pd692x0_msg_ver ver;
@@ -1430,6 +1763,12 @@ static int pd692x0_i2c_probe(struct i2c_client *client)
struct fw_upload *fwl;
int ret;
+ ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(regulators),
+ regulators);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to enable regulators\n");
+
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(dev, "i2c check functionality failed\n");
return -ENXIO;
@@ -1480,12 +1819,17 @@ static int pd692x0_i2c_probe(struct i2c_client *client)
}
}
+ if (buf.data[2] == PD692X0_USER_BYTE)
+ priv->cfg_saved = true;
+
priv->np = dev->of_node;
priv->pcdev.nr_lines = PD692X0_MAX_PIS;
priv->pcdev.owner = THIS_MODULE;
priv->pcdev.ops = &pd692x0_ops;
priv->pcdev.dev = dev;
priv->pcdev.types = ETHTOOL_PSE_C33;
+ priv->pcdev.supp_budget_eval_strategies = PSE_BUDGET_EVAL_STRAT_DYNAMIC;
+ priv->pcdev.pis_prio_max = 2;
ret = devm_pse_controller_register(dev, &priv->pcdev);
if (ret)
return dev_err_probe(dev, ret,
@@ -1505,6 +1849,7 @@ static void pd692x0_i2c_remove(struct i2c_client *client)
{
struct pd692x0_priv *priv = i2c_get_clientdata(client);
+ pd692x0_managers_free_pw_budget(priv);
firmware_upload_unregister(priv->fwl);
}
diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c
index 2906ce173f66..23eb3c9d0bcd 100644
--- a/drivers/net/pse-pd/pse_core.c
+++ b/drivers/net/pse-pd/pse_core.c
@@ -6,13 +6,22 @@
//
#include <linux/device.h>
+#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
#include <linux/of.h>
+#include <linux/phy.h>
#include <linux/pse-pd/pse.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include <linux/rtnetlink.h>
+#include <net/net_trackers.h>
+
+#define PSE_PW_D_LIMIT INT_MAX
static DEFINE_MUTEX(pse_list_mutex);
static LIST_HEAD(pse_controller_list);
+static DEFINE_XARRAY_ALLOC(pse_pw_d_map);
+static DEFINE_MUTEX(pse_pw_d_mutex);
/**
* struct pse_control - a PSE control
@@ -22,6 +31,7 @@ static LIST_HEAD(pse_controller_list);
* @list: list entry for the pcdev's PSE controller list
* @id: ID of the PSE line in the PSE controller device
* @refcnt: Number of gets of this pse_control
+ * @attached_phydev: PHY device pointer attached by the PSE control
*/
struct pse_control {
struct pse_controller_dev *pcdev;
@@ -29,6 +39,22 @@ struct pse_control {
struct list_head list;
unsigned int id;
struct kref refcnt;
+ struct phy_device *attached_phydev;
+};
+
+/**
+ * struct pse_power_domain - a PSE power domain
+ * @id: ID of the power domain
+ * @supply: Power supply the Power Domain
+ * @refcnt: Number of gets of this pse_power_domain
+ * @budget_eval_strategy: Current power budget evaluation strategy of the
+ * power domain
+ */
+struct pse_power_domain {
+ int id;
+ struct regulator *supply;
+ struct kref refcnt;
+ u32 budget_eval_strategy;
};
static int of_load_single_pse_pi_pairset(struct device_node *node,
@@ -207,6 +233,182 @@ out:
return ret;
}
+/**
+ * pse_control_find_net_by_id - Find net attached to the pse control id
+ * @pcdev: a pointer to the PSE
+ * @id: index of the PSE control
+ *
+ * Return: pse_control pointer or NULL. The device returned has had a
+ * reference added and the pointer is safe until the user calls
+ * pse_control_put() to indicate they have finished with it.
+ */
+static struct pse_control *
+pse_control_find_by_id(struct pse_controller_dev *pcdev, int id)
+{
+ struct pse_control *psec;
+
+ mutex_lock(&pse_list_mutex);
+ list_for_each_entry(psec, &pcdev->pse_control_head, list) {
+ if (psec->id == id) {
+ kref_get(&psec->refcnt);
+ mutex_unlock(&pse_list_mutex);
+ return psec;
+ }
+ }
+ mutex_unlock(&pse_list_mutex);
+ return NULL;
+}
+
+/**
+ * pse_control_get_netdev - Return netdev associated to a PSE control
+ * @psec: PSE control pointer
+ *
+ * Return: netdev pointer or NULL
+ */
+static struct net_device *pse_control_get_netdev(struct pse_control *psec)
+{
+ ASSERT_RTNL();
+
+ if (!psec || !psec->attached_phydev)
+ return NULL;
+
+ return psec->attached_phydev->attached_dev;
+}
+
+/**
+ * pse_pi_is_hw_enabled - Is PI enabled at the hardware level
+ * @pcdev: a pointer to the PSE controller device
+ * @id: Index of the PI
+ *
+ * Return: 1 if the PI is enabled at the hardware level, 0 if not, and
+ * a failure value on error
+ */
+static int pse_pi_is_hw_enabled(struct pse_controller_dev *pcdev, int id)
+{
+ struct pse_admin_state admin_state = {0};
+ int ret;
+
+ ret = pcdev->ops->pi_get_admin_state(pcdev, id, &admin_state);
+ if (ret < 0)
+ return ret;
+
+ /* PI is well enabled at the hardware level */
+ if (admin_state.podl_admin_state == ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED ||
+ admin_state.c33_admin_state == ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * pse_pi_is_admin_enable_pending - Check if PI is in admin enable pending state
+ * which mean the power is not yet being
+ * delivered
+ * @pcdev: a pointer to the PSE controller device
+ * @id: Index of the PI
+ *
+ * Detects if a PI is enabled in software with a PD detected, but the hardware
+ * admin state hasn't been applied yet.
+ *
+ * This function is used in the power delivery and retry mechanisms to determine
+ * which PIs need to have power delivery attempted again.
+ *
+ * Return: true if the PI has admin enable flag set in software but not yet
+ * reflected in the hardware admin state, false otherwise.
+ */
+static bool
+pse_pi_is_admin_enable_pending(struct pse_controller_dev *pcdev, int id)
+{
+ int ret;
+
+ /* PI not enabled or nothing is plugged */
+ if (!pcdev->pi[id].admin_state_enabled ||
+ !pcdev->pi[id].isr_pd_detected)
+ return false;
+
+ ret = pse_pi_is_hw_enabled(pcdev, id);
+ /* PSE PI is already enabled at hardware level */
+ if (ret == 1)
+ return false;
+
+ return true;
+}
+
+static int _pse_pi_delivery_power_sw_pw_ctrl(struct pse_controller_dev *pcdev,
+ int id,
+ struct netlink_ext_ack *extack);
+
+/**
+ * pse_pw_d_retry_power_delivery - Retry power delivery for pending ports in a
+ * PSE power domain
+ * @pcdev: a pointer to the PSE controller device
+ * @pw_d: a pointer to the PSE power domain
+ *
+ * Scans all ports in the specified power domain and attempts to enable power
+ * delivery to any ports that have admin enable state set but don't yet have
+ * hardware power enabled. Used when there are changes in connection status,
+ * admin state, or priority that might allow previously unpowered ports to
+ * receive power, especially in over-budget conditions.
+ */
+static void pse_pw_d_retry_power_delivery(struct pse_controller_dev *pcdev,
+ struct pse_power_domain *pw_d)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < pcdev->nr_lines; i++) {
+ int prio_max = pcdev->nr_lines;
+ struct netlink_ext_ack extack;
+
+ if (pcdev->pi[i].pw_d != pw_d)
+ continue;
+
+ if (!pse_pi_is_admin_enable_pending(pcdev, i))
+ continue;
+
+ /* Do not try to enable PI with a lower prio (higher value)
+ * than one which already can't be enabled.
+ */
+ if (pcdev->pi[i].prio > prio_max)
+ continue;
+
+ ret = _pse_pi_delivery_power_sw_pw_ctrl(pcdev, i, &extack);
+ if (ret == -ERANGE)
+ prio_max = pcdev->pi[i].prio;
+ }
+}
+
+/**
+ * pse_pw_d_is_sw_pw_control - Determine if power control is software managed
+ * @pcdev: a pointer to the PSE controller device
+ * @pw_d: a pointer to the PSE power domain
+ *
+ * This function determines whether the power control for a specific power
+ * domain is managed by software in the interrupt handler rather than directly
+ * by hardware.
+ *
+ * Software power control is active in the following cases:
+ * - When the budget evaluation strategy is set to static
+ * - When the budget evaluation strategy is disabled but the PSE controller
+ * has an interrupt handler that can report if a Powered Device is connected
+ *
+ * Return: true if the power control of the power domain is managed by software,
+ * false otherwise
+ */
+static bool pse_pw_d_is_sw_pw_control(struct pse_controller_dev *pcdev,
+ struct pse_power_domain *pw_d)
+{
+ if (!pw_d)
+ return false;
+
+ if (pw_d->budget_eval_strategy == PSE_BUDGET_EVAL_STRAT_STATIC)
+ return true;
+ if (pw_d->budget_eval_strategy == PSE_BUDGET_EVAL_STRAT_DISABLED &&
+ pcdev->ops->pi_enable && pcdev->irq)
+ return true;
+
+ return false;
+}
+
static int pse_pi_is_enabled(struct regulator_dev *rdev)
{
struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
@@ -214,22 +416,257 @@ static int pse_pi_is_enabled(struct regulator_dev *rdev)
int id, ret;
ops = pcdev->ops;
- if (!ops->pi_is_enabled)
+ if (!ops->pi_get_admin_state)
return -EOPNOTSUPP;
id = rdev_get_id(rdev);
mutex_lock(&pcdev->lock);
- ret = ops->pi_is_enabled(pcdev, id);
+ if (pse_pw_d_is_sw_pw_control(pcdev, pcdev->pi[id].pw_d)) {
+ ret = pcdev->pi[id].admin_state_enabled;
+ goto out;
+ }
+
+ ret = pse_pi_is_hw_enabled(pcdev, id);
+
+out:
mutex_unlock(&pcdev->lock);
return ret;
}
+/**
+ * pse_pi_deallocate_pw_budget - Deallocate power budget of the PI
+ * @pi: a pointer to the PSE PI
+ */
+static void pse_pi_deallocate_pw_budget(struct pse_pi *pi)
+{
+ if (!pi->pw_d || !pi->pw_allocated_mW)
+ return;
+
+ regulator_free_power_budget(pi->pw_d->supply, pi->pw_allocated_mW);
+ pi->pw_allocated_mW = 0;
+}
+
+/**
+ * _pse_pi_disable - Call disable operation. Assumes the PSE lock has been
+ * acquired.
+ * @pcdev: a pointer to the PSE
+ * @id: index of the PSE control
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int _pse_pi_disable(struct pse_controller_dev *pcdev, int id)
+{
+ const struct pse_controller_ops *ops = pcdev->ops;
+ int ret;
+
+ if (!ops->pi_disable)
+ return -EOPNOTSUPP;
+
+ ret = ops->pi_disable(pcdev, id);
+ if (ret)
+ return ret;
+
+ pse_pi_deallocate_pw_budget(&pcdev->pi[id]);
+
+ if (pse_pw_d_is_sw_pw_control(pcdev, pcdev->pi[id].pw_d))
+ pse_pw_d_retry_power_delivery(pcdev, pcdev->pi[id].pw_d);
+
+ return 0;
+}
+
+/**
+ * pse_disable_pi_pol - Disable a PI on a power budget policy
+ * @pcdev: a pointer to the PSE
+ * @id: index of the PSE PI
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int pse_disable_pi_pol(struct pse_controller_dev *pcdev, int id)
+{
+ unsigned long notifs = ETHTOOL_PSE_EVENT_OVER_BUDGET;
+ struct pse_ntf ntf = {};
+ int ret;
+
+ dev_dbg(pcdev->dev, "Disabling PI %d to free power budget\n", id);
+
+ ret = _pse_pi_disable(pcdev, id);
+ if (ret)
+ notifs |= ETHTOOL_PSE_EVENT_SW_PW_CONTROL_ERROR;
+
+ ntf.notifs = notifs;
+ ntf.id = id;
+ kfifo_in_spinlocked(&pcdev->ntf_fifo, &ntf, 1, &pcdev->ntf_fifo_lock);
+ schedule_work(&pcdev->ntf_work);
+
+ return ret;
+}
+
+/**
+ * pse_disable_pi_prio - Disable all PIs of a given priority inside a PSE
+ * power domain
+ * @pcdev: a pointer to the PSE
+ * @pw_d: a pointer to the PSE power domain
+ * @prio: priority
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int pse_disable_pi_prio(struct pse_controller_dev *pcdev,
+ struct pse_power_domain *pw_d,
+ int prio)
+{
+ int i;
+
+ for (i = 0; i < pcdev->nr_lines; i++) {
+ int ret;
+
+ if (pcdev->pi[i].prio != prio ||
+ pcdev->pi[i].pw_d != pw_d ||
+ pse_pi_is_hw_enabled(pcdev, i) <= 0)
+ continue;
+
+ ret = pse_disable_pi_pol(pcdev, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * pse_pi_allocate_pw_budget_static_prio - Allocate power budget for the PI
+ * when the budget eval strategy is
+ * static
+ * @pcdev: a pointer to the PSE
+ * @id: index of the PSE control
+ * @pw_req: power requested in mW
+ * @extack: extack for error reporting
+ *
+ * Allocates power using static budget evaluation strategy, where allocation
+ * is based on PD classification. When insufficient budget is available,
+ * lower-priority ports (higher priority numbers) are turned off first.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int
+pse_pi_allocate_pw_budget_static_prio(struct pse_controller_dev *pcdev, int id,
+ int pw_req, struct netlink_ext_ack *extack)
+{
+ struct pse_pi *pi = &pcdev->pi[id];
+ int ret, _prio;
+
+ _prio = pcdev->nr_lines;
+ while (regulator_request_power_budget(pi->pw_d->supply, pw_req) == -ERANGE) {
+ if (_prio <= pi->prio) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PI %d: not enough power budget available",
+ id);
+ return -ERANGE;
+ }
+
+ ret = pse_disable_pi_prio(pcdev, pi->pw_d, _prio);
+ if (ret < 0)
+ return ret;
+
+ _prio--;
+ }
+
+ pi->pw_allocated_mW = pw_req;
+ return 0;
+}
+
+/**
+ * pse_pi_allocate_pw_budget - Allocate power budget for the PI
+ * @pcdev: a pointer to the PSE
+ * @id: index of the PSE control
+ * @pw_req: power requested in mW
+ * @extack: extack for error reporting
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int pse_pi_allocate_pw_budget(struct pse_controller_dev *pcdev, int id,
+ int pw_req, struct netlink_ext_ack *extack)
+{
+ struct pse_pi *pi = &pcdev->pi[id];
+
+ if (!pi->pw_d)
+ return 0;
+
+ /* PSE_BUDGET_EVAL_STRAT_STATIC */
+ if (pi->pw_d->budget_eval_strategy == PSE_BUDGET_EVAL_STRAT_STATIC)
+ return pse_pi_allocate_pw_budget_static_prio(pcdev, id, pw_req,
+ extack);
+
+ return 0;
+}
+
+/**
+ * _pse_pi_delivery_power_sw_pw_ctrl - Enable PSE PI in case of software power
+ * control. Assumes the PSE lock has been
+ * acquired.
+ * @pcdev: a pointer to the PSE
+ * @id: index of the PSE control
+ * @extack: extack for error reporting
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int _pse_pi_delivery_power_sw_pw_ctrl(struct pse_controller_dev *pcdev,
+ int id,
+ struct netlink_ext_ack *extack)
+{
+ const struct pse_controller_ops *ops = pcdev->ops;
+ struct pse_pi *pi = &pcdev->pi[id];
+ int ret, pw_req;
+
+ if (!ops->pi_get_pw_req) {
+ /* No power allocation management */
+ ret = ops->pi_enable(pcdev, id);
+ if (ret)
+ NL_SET_ERR_MSG_FMT(extack,
+ "PI %d: enable error %d",
+ id, ret);
+ return ret;
+ }
+
+ ret = ops->pi_get_pw_req(pcdev, id);
+ if (ret < 0)
+ return ret;
+
+ pw_req = ret;
+
+ /* Compare requested power with port power limit and use the lowest
+ * one.
+ */
+ if (ops->pi_get_pw_limit) {
+ ret = ops->pi_get_pw_limit(pcdev, id);
+ if (ret < 0)
+ return ret;
+
+ if (ret < pw_req)
+ pw_req = ret;
+ }
+
+ ret = pse_pi_allocate_pw_budget(pcdev, id, pw_req, extack);
+ if (ret)
+ return ret;
+
+ ret = ops->pi_enable(pcdev, id);
+ if (ret) {
+ pse_pi_deallocate_pw_budget(pi);
+ NL_SET_ERR_MSG_FMT(extack,
+ "PI %d: enable error %d",
+ id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int pse_pi_enable(struct regulator_dev *rdev)
{
struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
const struct pse_controller_ops *ops;
- int id, ret;
+ int id, ret = 0;
ops = pcdev->ops;
if (!ops->pi_enable)
@@ -237,6 +674,23 @@ static int pse_pi_enable(struct regulator_dev *rdev)
id = rdev_get_id(rdev);
mutex_lock(&pcdev->lock);
+ if (pse_pw_d_is_sw_pw_control(pcdev, pcdev->pi[id].pw_d)) {
+ /* Manage enabled status by software.
+ * Real enable process will happen if a port is connected.
+ */
+ if (pcdev->pi[id].isr_pd_detected) {
+ struct netlink_ext_ack extack;
+
+ ret = _pse_pi_delivery_power_sw_pw_ctrl(pcdev, id, &extack);
+ }
+ if (!ret || ret == -ERANGE) {
+ pcdev->pi[id].admin_state_enabled = 1;
+ ret = 0;
+ }
+ mutex_unlock(&pcdev->lock);
+ return ret;
+ }
+
ret = ops->pi_enable(pcdev, id);
if (!ret)
pcdev->pi[id].admin_state_enabled = 1;
@@ -248,21 +702,18 @@ static int pse_pi_enable(struct regulator_dev *rdev)
static int pse_pi_disable(struct regulator_dev *rdev)
{
struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
- const struct pse_controller_ops *ops;
+ struct pse_pi *pi;
int id, ret;
- ops = pcdev->ops;
- if (!ops->pi_disable)
- return -EOPNOTSUPP;
-
id = rdev_get_id(rdev);
+ pi = &pcdev->pi[id];
mutex_lock(&pcdev->lock);
- ret = ops->pi_disable(pcdev, id);
+ ret = _pse_pi_disable(pcdev, id);
if (!ret)
- pcdev->pi[id].admin_state_enabled = 0;
- mutex_unlock(&pcdev->lock);
+ pi->admin_state_enabled = 0;
- return ret;
+ mutex_unlock(&pcdev->lock);
+ return 0;
}
static int _pse_pi_get_voltage(struct regulator_dev *rdev)
@@ -291,32 +742,24 @@ static int pse_pi_get_voltage(struct regulator_dev *rdev)
return ret;
}
-static int _pse_ethtool_get_status(struct pse_controller_dev *pcdev,
- int id,
- struct netlink_ext_ack *extack,
- struct pse_control_status *status);
-
static int pse_pi_get_current_limit(struct regulator_dev *rdev)
{
struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
const struct pse_controller_ops *ops;
- struct netlink_ext_ack extack = {};
- struct pse_control_status st = {};
- int id, uV, ret;
+ int id, uV, mW, ret;
s64 tmp_64;
ops = pcdev->ops;
id = rdev_get_id(rdev);
+ if (!ops->pi_get_pw_limit || !ops->pi_get_voltage)
+ return -EOPNOTSUPP;
+
mutex_lock(&pcdev->lock);
- if (ops->pi_get_current_limit) {
- ret = ops->pi_get_current_limit(pcdev, id);
+ ret = ops->pi_get_pw_limit(pcdev, id);
+ if (ret < 0)
goto out;
- }
+ mW = ret;
- /* If pi_get_current_limit() callback not populated get voltage
- * from pi_get_voltage() and power limit from ethtool_get_status()
- * to calculate current limit.
- */
ret = _pse_pi_get_voltage(rdev);
if (!ret) {
dev_err(pcdev->dev, "Voltage null\n");
@@ -327,16 +770,7 @@ static int pse_pi_get_current_limit(struct regulator_dev *rdev)
goto out;
uV = ret;
- ret = _pse_ethtool_get_status(pcdev, id, &extack, &st);
- if (ret)
- goto out;
-
- if (!st.c33_avail_pw_limit) {
- ret = -ENODATA;
- goto out;
- }
-
- tmp_64 = st.c33_avail_pw_limit;
+ tmp_64 = mW;
tmp_64 *= 1000000000ull;
/* uA = mW * 1000000000 / uV */
ret = DIV_ROUND_CLOSEST_ULL(tmp_64, uV);
@@ -351,15 +785,33 @@ static int pse_pi_set_current_limit(struct regulator_dev *rdev, int min_uA,
{
struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
const struct pse_controller_ops *ops;
- int id, ret;
+ int id, mW, ret;
+ s64 tmp_64;
ops = pcdev->ops;
- if (!ops->pi_set_current_limit)
+ if (!ops->pi_set_pw_limit || !ops->pi_get_voltage)
return -EOPNOTSUPP;
+ if (max_uA > MAX_PI_CURRENT)
+ return -ERANGE;
+
id = rdev_get_id(rdev);
mutex_lock(&pcdev->lock);
- ret = ops->pi_set_current_limit(pcdev, id, max_uA);
+ ret = _pse_pi_get_voltage(rdev);
+ if (!ret) {
+ dev_err(pcdev->dev, "Voltage null\n");
+ ret = -ERANGE;
+ goto out;
+ }
+ if (ret < 0)
+ goto out;
+
+ tmp_64 = ret;
+ tmp_64 *= max_uA;
+ /* mW = uA * uV / 1000000000 */
+ mW = DIV_ROUND_CLOSEST_ULL(tmp_64, 1000000000);
+ ret = ops->pi_set_pw_limit(pcdev, id, mW);
+out:
mutex_unlock(&pcdev->lock);
return ret;
@@ -403,17 +855,16 @@ devm_pse_pi_regulator_register(struct pse_controller_dev *pcdev,
rinit_data->constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS;
- if (pcdev->ops->pi_set_current_limit) {
+ if (pcdev->ops->pi_set_pw_limit)
rinit_data->constraints.valid_ops_mask |=
REGULATOR_CHANGE_CURRENT;
- rinit_data->constraints.max_uA = MAX_PI_CURRENT;
- }
rinit_data->supply_regulator = "vpwr";
rconfig.dev = pcdev->dev;
rconfig.driver_data = pcdev;
rconfig.init_data = rinit_data;
+ rconfig.of_node = pcdev->pi[id].np;
rdev = devm_regulator_register(pcdev->dev, rdesc, &rconfig);
if (IS_ERR(rdev)) {
@@ -427,6 +878,158 @@ devm_pse_pi_regulator_register(struct pse_controller_dev *pcdev,
return 0;
}
+static void __pse_pw_d_release(struct kref *kref)
+{
+ struct pse_power_domain *pw_d = container_of(kref,
+ struct pse_power_domain,
+ refcnt);
+
+ regulator_put(pw_d->supply);
+ xa_erase(&pse_pw_d_map, pw_d->id);
+ mutex_unlock(&pse_pw_d_mutex);
+}
+
+/**
+ * pse_flush_pw_ds - flush all PSE power domains of a PSE
+ * @pcdev: a pointer to the initialized PSE controller device
+ */
+static void pse_flush_pw_ds(struct pse_controller_dev *pcdev)
+{
+ struct pse_power_domain *pw_d;
+ int i;
+
+ for (i = 0; i < pcdev->nr_lines; i++) {
+ if (!pcdev->pi[i].pw_d)
+ continue;
+
+ pw_d = xa_load(&pse_pw_d_map, pcdev->pi[i].pw_d->id);
+ if (!pw_d)
+ continue;
+
+ kref_put_mutex(&pw_d->refcnt, __pse_pw_d_release,
+ &pse_pw_d_mutex);
+ }
+}
+
+/**
+ * devm_pse_alloc_pw_d - allocate a new PSE power domain for a device
+ * @dev: device that is registering this PSE power domain
+ *
+ * Return: Pointer to the newly allocated PSE power domain or error pointers
+ */
+static struct pse_power_domain *devm_pse_alloc_pw_d(struct device *dev)
+{
+ struct pse_power_domain *pw_d;
+ int index, ret;
+
+ pw_d = devm_kzalloc(dev, sizeof(*pw_d), GFP_KERNEL);
+ if (!pw_d)
+ return ERR_PTR(-ENOMEM);
+
+ ret = xa_alloc(&pse_pw_d_map, &index, pw_d, XA_LIMIT(1, PSE_PW_D_LIMIT),
+ GFP_KERNEL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ kref_init(&pw_d->refcnt);
+ pw_d->id = index;
+ return pw_d;
+}
+
+/**
+ * pse_register_pw_ds - register the PSE power domains for a PSE
+ * @pcdev: a pointer to the PSE controller device
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int pse_register_pw_ds(struct pse_controller_dev *pcdev)
+{
+ int i, ret = 0;
+
+ mutex_lock(&pse_pw_d_mutex);
+ for (i = 0; i < pcdev->nr_lines; i++) {
+ struct regulator_dev *rdev = pcdev->pi[i].rdev;
+ struct pse_power_domain *pw_d;
+ struct regulator *supply;
+ bool present = false;
+ unsigned long index;
+
+ /* No regulator or regulator parent supply registered.
+ * We need a regulator parent to register a PSE power domain
+ */
+ if (!rdev || !rdev->supply)
+ continue;
+
+ xa_for_each(&pse_pw_d_map, index, pw_d) {
+ /* Power supply already registered as a PSE power
+ * domain.
+ */
+ if (regulator_is_equal(pw_d->supply, rdev->supply)) {
+ present = true;
+ pcdev->pi[i].pw_d = pw_d;
+ break;
+ }
+ }
+ if (present) {
+ kref_get(&pw_d->refcnt);
+ continue;
+ }
+
+ pw_d = devm_pse_alloc_pw_d(pcdev->dev);
+ if (IS_ERR(pw_d)) {
+ ret = PTR_ERR(pw_d);
+ goto out;
+ }
+
+ supply = regulator_get(&rdev->dev, rdev->supply_name);
+ if (IS_ERR(supply)) {
+ xa_erase(&pse_pw_d_map, pw_d->id);
+ ret = PTR_ERR(supply);
+ goto out;
+ }
+
+ pw_d->supply = supply;
+ if (pcdev->supp_budget_eval_strategies)
+ pw_d->budget_eval_strategy = pcdev->supp_budget_eval_strategies;
+ else
+ pw_d->budget_eval_strategy = PSE_BUDGET_EVAL_STRAT_DISABLED;
+ kref_init(&pw_d->refcnt);
+ pcdev->pi[i].pw_d = pw_d;
+ }
+
+out:
+ mutex_unlock(&pse_pw_d_mutex);
+ return ret;
+}
+
+/**
+ * pse_send_ntf_worker - Worker to send PSE notifications
+ * @work: work object
+ *
+ * Manage and send PSE netlink notifications using a workqueue to avoid
+ * deadlock between pcdev_lock and pse_list_mutex.
+ */
+static void pse_send_ntf_worker(struct work_struct *work)
+{
+ struct pse_controller_dev *pcdev;
+ struct pse_ntf ntf;
+
+ pcdev = container_of(work, struct pse_controller_dev, ntf_work);
+
+ while (kfifo_out(&pcdev->ntf_fifo, &ntf, 1)) {
+ struct net_device *netdev;
+ struct pse_control *psec;
+
+ psec = pse_control_find_by_id(pcdev, ntf.id);
+ rtnl_lock();
+ netdev = pse_control_get_netdev(psec);
+ if (netdev)
+ ethnl_pse_send_ntf(netdev, ntf.notifs);
+ rtnl_unlock();
+ pse_control_put(psec);
+ }
+}
+
/**
* pse_controller_register - register a PSE controller device
* @pcdev: a pointer to the initialized PSE controller device
@@ -440,10 +1043,24 @@ int pse_controller_register(struct pse_controller_dev *pcdev)
mutex_init(&pcdev->lock);
INIT_LIST_HEAD(&pcdev->pse_control_head);
+ spin_lock_init(&pcdev->ntf_fifo_lock);
+ ret = kfifo_alloc(&pcdev->ntf_fifo, pcdev->nr_lines, GFP_KERNEL);
+ if (ret) {
+ dev_err(pcdev->dev, "failed to allocate kfifo notifications\n");
+ return ret;
+ }
+ INIT_WORK(&pcdev->ntf_work, pse_send_ntf_worker);
if (!pcdev->nr_lines)
pcdev->nr_lines = 1;
+ if (!pcdev->ops->pi_get_admin_state ||
+ !pcdev->ops->pi_get_pw_status) {
+ dev_err(pcdev->dev,
+ "Mandatory status report callbacks are missing");
+ return -EINVAL;
+ }
+
ret = of_load_pse_pis(pcdev);
if (ret)
return ret;
@@ -479,6 +1096,10 @@ int pse_controller_register(struct pse_controller_dev *pcdev)
return ret;
}
+ ret = pse_register_pw_ds(pcdev);
+ if (ret)
+ return ret;
+
mutex_lock(&pse_list_mutex);
list_add(&pcdev->list, &pse_controller_list);
mutex_unlock(&pse_list_mutex);
@@ -493,7 +1114,12 @@ EXPORT_SYMBOL_GPL(pse_controller_register);
*/
void pse_controller_unregister(struct pse_controller_dev *pcdev)
{
+ pse_flush_pw_ds(pcdev);
pse_release_pis(pcdev);
+ if (pcdev->irq)
+ disable_irq(pcdev->irq);
+ cancel_work_sync(&pcdev->ntf_work);
+ kfifo_free(&pcdev->ntf_fifo);
mutex_lock(&pse_list_mutex);
list_del(&pcdev->list);
mutex_unlock(&pse_list_mutex);
@@ -540,6 +1166,191 @@ int devm_pse_controller_register(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_pse_controller_register);
+struct pse_irq {
+ struct pse_controller_dev *pcdev;
+ struct pse_irq_desc desc;
+ unsigned long *notifs;
+};
+
+/**
+ * pse_to_regulator_notifs - Convert PSE notifications to Regulator
+ * notifications
+ * @notifs: PSE notifications
+ *
+ * Return: Regulator notifications
+ */
+static unsigned long pse_to_regulator_notifs(unsigned long notifs)
+{
+ unsigned long rnotifs = 0;
+
+ if (notifs & ETHTOOL_PSE_EVENT_OVER_CURRENT)
+ rnotifs |= REGULATOR_EVENT_OVER_CURRENT;
+ if (notifs & ETHTOOL_PSE_EVENT_OVER_TEMP)
+ rnotifs |= REGULATOR_EVENT_OVER_TEMP;
+
+ return rnotifs;
+}
+
+/**
+ * pse_set_config_isr - Set PSE control config according to the PSE
+ * notifications
+ * @pcdev: a pointer to the PSE
+ * @id: index of the PSE control
+ * @notifs: PSE event notifications
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int pse_set_config_isr(struct pse_controller_dev *pcdev, int id,
+ unsigned long notifs)
+{
+ int ret = 0;
+
+ if (notifs & PSE_BUDGET_EVAL_STRAT_DYNAMIC)
+ return 0;
+
+ if ((notifs & ETHTOOL_C33_PSE_EVENT_DISCONNECTION) &&
+ ((notifs & ETHTOOL_C33_PSE_EVENT_DETECTION) ||
+ (notifs & ETHTOOL_C33_PSE_EVENT_CLASSIFICATION))) {
+ dev_dbg(pcdev->dev,
+ "PI %d: error, connection and disconnection reported simultaneously",
+ id);
+ return -EINVAL;
+ }
+
+ if (notifs & ETHTOOL_C33_PSE_EVENT_CLASSIFICATION) {
+ struct netlink_ext_ack extack;
+
+ pcdev->pi[id].isr_pd_detected = true;
+ if (pcdev->pi[id].admin_state_enabled) {
+ ret = _pse_pi_delivery_power_sw_pw_ctrl(pcdev, id,
+ &extack);
+ if (ret == -ERANGE)
+ ret = 0;
+ }
+ } else if (notifs & ETHTOOL_C33_PSE_EVENT_DISCONNECTION) {
+ if (pcdev->pi[id].admin_state_enabled &&
+ pcdev->pi[id].isr_pd_detected)
+ ret = _pse_pi_disable(pcdev, id);
+ pcdev->pi[id].isr_pd_detected = false;
+ }
+
+ return ret;
+}
+
+/**
+ * pse_isr - IRQ handler for PSE
+ * @irq: irq number
+ * @data: pointer to user interrupt structure
+ *
+ * Return: irqreturn_t - status of IRQ
+ */
+static irqreturn_t pse_isr(int irq, void *data)
+{
+ struct pse_controller_dev *pcdev;
+ unsigned long notifs_mask = 0;
+ struct pse_irq_desc *desc;
+ struct pse_irq *h = data;
+ int ret, i;
+
+ desc = &h->desc;
+ pcdev = h->pcdev;
+
+ /* Clear notifs mask */
+ memset(h->notifs, 0, pcdev->nr_lines * sizeof(*h->notifs));
+ mutex_lock(&pcdev->lock);
+ ret = desc->map_event(irq, pcdev, h->notifs, &notifs_mask);
+ if (ret || !notifs_mask) {
+ mutex_unlock(&pcdev->lock);
+ return IRQ_NONE;
+ }
+
+ for_each_set_bit(i, &notifs_mask, pcdev->nr_lines) {
+ unsigned long notifs, rnotifs;
+ struct pse_ntf ntf = {};
+
+ /* Do nothing PI not described */
+ if (!pcdev->pi[i].rdev)
+ continue;
+
+ notifs = h->notifs[i];
+ if (pse_pw_d_is_sw_pw_control(pcdev, pcdev->pi[i].pw_d)) {
+ ret = pse_set_config_isr(pcdev, i, notifs);
+ if (ret)
+ notifs |= ETHTOOL_PSE_EVENT_SW_PW_CONTROL_ERROR;
+ }
+
+ dev_dbg(h->pcdev->dev,
+ "Sending PSE notification EVT 0x%lx\n", notifs);
+
+ ntf.notifs = notifs;
+ ntf.id = i;
+ kfifo_in_spinlocked(&pcdev->ntf_fifo, &ntf, 1,
+ &pcdev->ntf_fifo_lock);
+ schedule_work(&pcdev->ntf_work);
+
+ rnotifs = pse_to_regulator_notifs(notifs);
+ regulator_notifier_call_chain(pcdev->pi[i].rdev, rnotifs,
+ NULL);
+ }
+
+ mutex_unlock(&pcdev->lock);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * devm_pse_irq_helper - Register IRQ based PSE event notifier
+ * @pcdev: a pointer to the PSE
+ * @irq: the irq value to be passed to request_irq
+ * @irq_flags: the flags to be passed to request_irq
+ * @d: PSE interrupt description
+ *
+ * Return: 0 on success and errno on failure
+ */
+int devm_pse_irq_helper(struct pse_controller_dev *pcdev, int irq,
+ int irq_flags, const struct pse_irq_desc *d)
+{
+ struct device *dev = pcdev->dev;
+ size_t irq_name_len;
+ struct pse_irq *h;
+ char *irq_name;
+ int ret;
+
+ if (!d || !d->map_event || !d->name)
+ return -EINVAL;
+
+ h = devm_kzalloc(dev, sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ h->pcdev = pcdev;
+ h->desc = *d;
+
+ /* IRQ name len is pcdev dev name + 5 char + irq desc name + 1 */
+ irq_name_len = strlen(dev_name(pcdev->dev)) + 5 + strlen(d->name) + 1;
+ irq_name = devm_kzalloc(dev, irq_name_len, GFP_KERNEL);
+ if (!irq_name)
+ return -ENOMEM;
+
+ snprintf(irq_name, irq_name_len, "pse-%s:%s", dev_name(pcdev->dev),
+ d->name);
+
+ h->notifs = devm_kcalloc(dev, pcdev->nr_lines,
+ sizeof(*h->notifs), GFP_KERNEL);
+ if (!h->notifs)
+ return -ENOMEM;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, pse_isr,
+ IRQF_ONESHOT | irq_flags,
+ irq_name, h);
+ if (ret)
+ dev_err(pcdev->dev, "Failed to request IRQ %d\n", irq);
+
+ pcdev->irq = irq;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_pse_irq_helper);
+
/* PSE control section */
static void __pse_control_release(struct kref *kref)
@@ -582,7 +1393,8 @@ void pse_control_put(struct pse_control *psec)
EXPORT_SYMBOL_GPL(pse_control_put);
static struct pse_control *
-pse_control_get_internal(struct pse_controller_dev *pcdev, unsigned int index)
+pse_control_get_internal(struct pse_controller_dev *pcdev, unsigned int index,
+ struct phy_device *phydev)
{
struct pse_control *psec;
int ret;
@@ -605,6 +1417,20 @@ pse_control_get_internal(struct pse_controller_dev *pcdev, unsigned int index)
goto free_psec;
}
+ if (!pcdev->ops->pi_get_admin_state) {
+ ret = -EOPNOTSUPP;
+ goto free_psec;
+ }
+
+ /* Initialize admin_state_enabled before the regulator_get. This
+ * aims to have the right value reported in the first is_enabled
+ * call in case of control managed by software.
+ */
+ ret = pse_pi_is_hw_enabled(pcdev, index);
+ if (ret < 0)
+ goto free_psec;
+
+ pcdev->pi[index].admin_state_enabled = ret;
psec->ps = devm_regulator_get_exclusive(pcdev->dev,
rdev_get_name(pcdev->pi[index].rdev));
if (IS_ERR(psec->ps)) {
@@ -612,21 +1438,14 @@ pse_control_get_internal(struct pse_controller_dev *pcdev, unsigned int index)
goto put_module;
}
- ret = regulator_is_enabled(psec->ps);
- if (ret < 0)
- goto regulator_put;
-
- pcdev->pi[index].admin_state_enabled = ret;
-
psec->pcdev = pcdev;
list_add(&psec->list, &pcdev->pse_control_head);
psec->id = index;
+ psec->attached_phydev = phydev;
kref_init(&psec->refcnt);
return psec;
-regulator_put:
- devm_regulator_put(psec->ps);
put_module:
module_put(pcdev->owner);
free_psec:
@@ -676,7 +1495,8 @@ static int psec_id_xlate(struct pse_controller_dev *pcdev,
return pse_spec->args[0];
}
-struct pse_control *of_pse_control_get(struct device_node *node)
+struct pse_control *of_pse_control_get(struct device_node *node,
+ struct phy_device *phydev)
{
struct pse_controller_dev *r, *pcdev;
struct of_phandle_args args;
@@ -726,7 +1546,7 @@ struct pse_control *of_pse_control_get(struct device_node *node)
}
/* pse_list_mutex also protects the pcdev's pse_control list */
- psec = pse_control_get_internal(pcdev, psec_id);
+ psec = pse_control_get_internal(pcdev, psec_id, phydev);
out:
mutex_unlock(&pse_list_mutex);
@@ -736,21 +1556,33 @@ out:
}
EXPORT_SYMBOL_GPL(of_pse_control_get);
-static int _pse_ethtool_get_status(struct pse_controller_dev *pcdev,
- int id,
- struct netlink_ext_ack *extack,
- struct pse_control_status *status)
+/**
+ * pse_get_sw_admin_state - Convert the software admin state to c33 or podl
+ * admin state value used in the standard
+ * @psec: PSE control pointer
+ * @admin_state: a pointer to the admin_state structure
+ */
+static void pse_get_sw_admin_state(struct pse_control *psec,
+ struct pse_admin_state *admin_state)
{
- const struct pse_controller_ops *ops;
-
- ops = pcdev->ops;
- if (!ops->ethtool_get_status) {
- NL_SET_ERR_MSG(extack,
- "PSE driver does not support status report");
- return -EOPNOTSUPP;
+ struct pse_pi *pi = &psec->pcdev->pi[psec->id];
+
+ if (pse_has_podl(psec)) {
+ if (pi->admin_state_enabled)
+ admin_state->podl_admin_state =
+ ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED;
+ else
+ admin_state->podl_admin_state =
+ ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED;
+ }
+ if (pse_has_c33(psec)) {
+ if (pi->admin_state_enabled)
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
+ else
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
}
-
- return ops->ethtool_get_status(pcdev, id, extack, status);
}
/**
@@ -763,15 +1595,111 @@ static int _pse_ethtool_get_status(struct pse_controller_dev *pcdev,
*/
int pse_ethtool_get_status(struct pse_control *psec,
struct netlink_ext_ack *extack,
- struct pse_control_status *status)
+ struct ethtool_pse_control_status *status)
{
- int err;
+ struct pse_admin_state admin_state = {0};
+ struct pse_pw_status pw_status = {0};
+ const struct pse_controller_ops *ops;
+ struct pse_controller_dev *pcdev;
+ struct pse_pi *pi;
+ int ret;
- mutex_lock(&psec->pcdev->lock);
- err = _pse_ethtool_get_status(psec->pcdev, psec->id, extack, status);
- mutex_unlock(&psec->pcdev->lock);
+ pcdev = psec->pcdev;
+ ops = pcdev->ops;
- return err;
+ pi = &pcdev->pi[psec->id];
+ mutex_lock(&pcdev->lock);
+ if (pi->pw_d) {
+ status->pw_d_id = pi->pw_d->id;
+ if (pse_pw_d_is_sw_pw_control(pcdev, pi->pw_d)) {
+ pse_get_sw_admin_state(psec, &admin_state);
+ } else {
+ ret = ops->pi_get_admin_state(pcdev, psec->id,
+ &admin_state);
+ if (ret)
+ goto out;
+ }
+ status->podl_admin_state = admin_state.podl_admin_state;
+ status->c33_admin_state = admin_state.c33_admin_state;
+
+ switch (pi->pw_d->budget_eval_strategy) {
+ case PSE_BUDGET_EVAL_STRAT_STATIC:
+ status->prio_max = pcdev->nr_lines - 1;
+ status->prio = pi->prio;
+ break;
+ case PSE_BUDGET_EVAL_STRAT_DYNAMIC:
+ status->prio_max = pcdev->pis_prio_max;
+ if (ops->pi_get_prio) {
+ ret = ops->pi_get_prio(pcdev, psec->id);
+ if (ret < 0)
+ goto out;
+
+ status->prio = ret;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ ret = ops->pi_get_pw_status(pcdev, psec->id, &pw_status);
+ if (ret)
+ goto out;
+ status->podl_pw_status = pw_status.podl_pw_status;
+ status->c33_pw_status = pw_status.c33_pw_status;
+
+ if (ops->pi_get_ext_state) {
+ struct pse_ext_state_info ext_state_info = {0};
+
+ ret = ops->pi_get_ext_state(pcdev, psec->id,
+ &ext_state_info);
+ if (ret)
+ goto out;
+
+ memcpy(&status->c33_ext_state_info,
+ &ext_state_info.c33_ext_state_info,
+ sizeof(status->c33_ext_state_info));
+ }
+
+ if (ops->pi_get_pw_class) {
+ ret = ops->pi_get_pw_class(pcdev, psec->id);
+ if (ret < 0)
+ goto out;
+
+ status->c33_pw_class = ret;
+ }
+
+ if (ops->pi_get_actual_pw) {
+ ret = ops->pi_get_actual_pw(pcdev, psec->id);
+ if (ret < 0)
+ goto out;
+
+ status->c33_actual_pw = ret;
+ }
+
+ if (ops->pi_get_pw_limit) {
+ ret = ops->pi_get_pw_limit(pcdev, psec->id);
+ if (ret < 0)
+ goto out;
+
+ status->c33_avail_pw_limit = ret;
+ }
+
+ if (ops->pi_get_pw_limit_ranges) {
+ struct pse_pw_limit_ranges pw_limit_ranges = {0};
+
+ ret = ops->pi_get_pw_limit_ranges(pcdev, psec->id,
+ &pw_limit_ranges);
+ if (ret < 0)
+ goto out;
+
+ status->c33_pw_limit_ranges =
+ pw_limit_ranges.c33_pw_limit_ranges;
+ status->c33_pw_limit_nb_ranges = ret;
+ }
+out:
+ mutex_unlock(&psec->pcdev->lock);
+ return ret;
}
EXPORT_SYMBOL_GPL(pse_ethtool_get_status);
@@ -862,6 +1790,52 @@ int pse_ethtool_set_config(struct pse_control *psec,
EXPORT_SYMBOL_GPL(pse_ethtool_set_config);
/**
+ * pse_pi_update_pw_budget - Update PSE power budget allocated with new
+ * power in mW
+ * @pcdev: a pointer to the PSE controller device
+ * @id: index of the PSE PI
+ * @pw_req: power requested
+ * @extack: extack for reporting useful error messages
+ *
+ * Return: Previous power allocated on success and failure value on error
+ */
+static int pse_pi_update_pw_budget(struct pse_controller_dev *pcdev, int id,
+ const unsigned int pw_req,
+ struct netlink_ext_ack *extack)
+{
+ struct pse_pi *pi = &pcdev->pi[id];
+ int previous_pw_allocated;
+ int pw_diff, ret = 0;
+
+ /* We don't want pw_allocated_mW value change in the middle of an
+ * power budget update
+ */
+ mutex_lock(&pcdev->lock);
+ previous_pw_allocated = pi->pw_allocated_mW;
+ pw_diff = pw_req - previous_pw_allocated;
+ if (!pw_diff) {
+ goto out;
+ } else if (pw_diff > 0) {
+ ret = regulator_request_power_budget(pi->pw_d->supply, pw_diff);
+ if (ret) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PI %d: not enough power budget available",
+ id);
+ goto out;
+ }
+
+ } else {
+ regulator_free_power_budget(pi->pw_d->supply, -pw_diff);
+ }
+ pi->pw_allocated_mW = pw_req;
+ ret = previous_pw_allocated;
+
+out:
+ mutex_unlock(&pcdev->lock);
+ return ret;
+}
+
+/**
* pse_ethtool_set_pw_limit - set PSE control power limit
* @psec: PSE control pointer
* @extack: extack for reporting useful error messages
@@ -873,9 +1847,12 @@ int pse_ethtool_set_pw_limit(struct pse_control *psec,
struct netlink_ext_ack *extack,
const unsigned int pw_limit)
{
- int uV, uA, ret;
+ int uV, uA, ret, previous_pw_allocated = 0;
s64 tmp_64;
+ if (pw_limit > MAX_PI_PW)
+ return -ERANGE;
+
ret = regulator_get_voltage(psec->ps);
if (!ret) {
NL_SET_ERR_MSG(extack,
@@ -894,10 +1871,100 @@ int pse_ethtool_set_pw_limit(struct pse_control *psec,
/* uA = mW * 1000000000 / uV */
uA = DIV_ROUND_CLOSEST_ULL(tmp_64, uV);
- return regulator_set_current_limit(psec->ps, 0, uA);
+ /* Update power budget only in software power control case and
+ * if a Power Device is powered.
+ */
+ if (pse_pw_d_is_sw_pw_control(psec->pcdev,
+ psec->pcdev->pi[psec->id].pw_d) &&
+ psec->pcdev->pi[psec->id].admin_state_enabled &&
+ psec->pcdev->pi[psec->id].isr_pd_detected) {
+ ret = pse_pi_update_pw_budget(psec->pcdev, psec->id,
+ pw_limit, extack);
+ if (ret < 0)
+ return ret;
+ previous_pw_allocated = ret;
+ }
+
+ ret = regulator_set_current_limit(psec->ps, 0, uA);
+ if (ret < 0 && previous_pw_allocated) {
+ pse_pi_update_pw_budget(psec->pcdev, psec->id,
+ previous_pw_allocated, extack);
+ }
+
+ return ret;
}
EXPORT_SYMBOL_GPL(pse_ethtool_set_pw_limit);
+/**
+ * pse_ethtool_set_prio - Set PSE PI priority according to the budget
+ * evaluation strategy
+ * @psec: PSE control pointer
+ * @extack: extack for reporting useful error messages
+ * @prio: priovity value
+ *
+ * Return: 0 on success and failure value on error
+ */
+int pse_ethtool_set_prio(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ unsigned int prio)
+{
+ struct pse_controller_dev *pcdev = psec->pcdev;
+ const struct pse_controller_ops *ops;
+ int ret = 0;
+
+ if (!pcdev->pi[psec->id].pw_d) {
+ NL_SET_ERR_MSG(extack, "no power domain attached");
+ return -EOPNOTSUPP;
+ }
+
+ /* We don't want priority change in the middle of an
+ * enable/disable call or a priority mode change
+ */
+ mutex_lock(&pcdev->lock);
+ switch (pcdev->pi[psec->id].pw_d->budget_eval_strategy) {
+ case PSE_BUDGET_EVAL_STRAT_STATIC:
+ if (prio >= pcdev->nr_lines) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "priority %d exceed priority max %d",
+ prio, pcdev->nr_lines);
+ ret = -ERANGE;
+ goto out;
+ }
+
+ pcdev->pi[psec->id].prio = prio;
+ pse_pw_d_retry_power_delivery(pcdev, pcdev->pi[psec->id].pw_d);
+ break;
+
+ case PSE_BUDGET_EVAL_STRAT_DYNAMIC:
+ ops = psec->pcdev->ops;
+ if (!ops->pi_set_prio) {
+ NL_SET_ERR_MSG(extack,
+ "pse driver does not support setting port priority");
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (prio > pcdev->pis_prio_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "priority %d exceed priority max %d",
+ prio, pcdev->pis_prio_max);
+ ret = -ERANGE;
+ goto out;
+ }
+
+ ret = ops->pi_set_prio(pcdev, psec->id, prio);
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+out:
+ mutex_unlock(&pcdev->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pse_ethtool_set_prio);
+
bool pse_has_podl(struct pse_control *psec)
{
return psec->pcdev->types & ETHTOOL_PSE_PODL;
diff --git a/drivers/net/pse-pd/pse_regulator.c b/drivers/net/pse-pd/pse_regulator.c
index 64ab36974fe0..6ce6773fff31 100644
--- a/drivers/net/pse-pd/pse_regulator.c
+++ b/drivers/net/pse-pd/pse_regulator.c
@@ -52,17 +52,19 @@ pse_reg_pi_disable(struct pse_controller_dev *pcdev, int id)
}
static int
-pse_reg_pi_is_enabled(struct pse_controller_dev *pcdev, int id)
+pse_reg_pi_get_admin_state(struct pse_controller_dev *pcdev, int id,
+ struct pse_admin_state *admin_state)
{
struct pse_reg_priv *priv = to_pse_reg(pcdev);
- return regulator_is_enabled(priv->ps);
+ admin_state->podl_admin_state = priv->admin_state;
+
+ return 0;
}
static int
-pse_reg_ethtool_get_status(struct pse_controller_dev *pcdev, unsigned long id,
- struct netlink_ext_ack *extack,
- struct pse_control_status *status)
+pse_reg_pi_get_pw_status(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_status *pw_status)
{
struct pse_reg_priv *priv = to_pse_reg(pcdev);
int ret;
@@ -72,20 +74,19 @@ pse_reg_ethtool_get_status(struct pse_controller_dev *pcdev, unsigned long id,
return ret;
if (!ret)
- status->podl_pw_status = ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED;
+ pw_status->podl_pw_status =
+ ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED;
else
- status->podl_pw_status =
+ pw_status->podl_pw_status =
ETHTOOL_PODL_PSE_PW_D_STATUS_DELIVERING;
- status->podl_admin_state = priv->admin_state;
-
return 0;
}
static const struct pse_controller_ops pse_reg_ops = {
- .ethtool_get_status = pse_reg_ethtool_get_status,
+ .pi_get_admin_state = pse_reg_pi_get_admin_state,
+ .pi_get_pw_status = pse_reg_pi_get_pw_status,
.pi_enable = pse_reg_pi_enable,
- .pi_is_enabled = pse_reg_pi_is_enabled,
.pi_disable = pse_reg_pi_disable,
};
diff --git a/drivers/net/pse-pd/si3474.c b/drivers/net/pse-pd/si3474.c
new file mode 100644
index 000000000000..aa07ffbce54d
--- /dev/null
+++ b/drivers/net/pse-pd/si3474.c
@@ -0,0 +1,578 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for the Skyworks Si3474 PoE PSE Controller
+ *
+ * Chip Architecture & Terminology:
+ *
+ * The Si3474 is a single-chip PoE PSE controller managing 8 physical power
+ * delivery channels. Internally, it's structured into two logical "Quads".
+ *
+ * Quad 0: Manages physical channels ('ports' in datasheet) 0, 1, 2, 3
+ * Quad 1: Manages physical channels ('ports' in datasheet) 4, 5, 6, 7
+ *
+ * Each Quad is accessed via a separate I2C address. The base address range is
+ * set by hardware pins A1-A4, and the specific address selects Quad 0 (usually
+ * the lower/even address) or Quad 1 (usually the higher/odd address).
+ * See datasheet Table 2.2 for the address mapping.
+ *
+ * While the Quads manage channel-specific operations, the Si3474 package has
+ * several resources shared across the entire chip:
+ * - Single RESETb input pin.
+ * - Single INTb output pin (signals interrupts from *either* Quad).
+ * - Single OSS input pin (Emergency Shutdown).
+ * - Global I2C Address (0x7F) used for firmware updates.
+ * - Global status monitoring (Temperature, VDD/VPWR Undervoltage Lockout).
+ *
+ * Driver Architecture:
+ *
+ * To handle the mix of per-Quad access and shared resources correctly, this
+ * driver treats the entire Si3474 package as one logical device. The driver
+ * instance associated with the primary I2C address (Quad 0) takes ownership.
+ * It discovers and manages the I2C client for the secondary address (Quad 1).
+ * This primary instance handles shared resources like IRQ management and
+ * registers a single PSE controller device representing all logical PIs.
+ * Internal functions route I2C commands to the appropriate Quad's i2c_client
+ * based on the target channel or PI.
+ *
+ * Terminology Mapping:
+ *
+ * - "PI" (Power Interface): Refers to the logical PSE port as defined by
+ * IEEE 802.3 (typically corresponds to an RJ45 connector). This is the
+ * `id` (0-7) used in the pse_controller_ops.
+ * - "Channel": Refers to one of the 8 physical power control paths within
+ * the Si3474 chip itself (hardware channels 0-7). This terminology is
+ * used internally within the driver to avoid confusion with 'ports'.
+ * - "Quad": One of the two internal 4-channel management units within the
+ * Si3474, each accessed via its own I2C address.
+ *
+ * Relationship:
+ * - A 2-Pair PoE PI uses 1 Channel.
+ * - A 4-Pair PoE PI uses 2 Channels.
+ *
+ * ASCII Schematic:
+ *
+ * +-----------------------------------------------------+
+ * | Si3474 Chip |
+ * | |
+ * | +---------------------+ +---------------------+ |
+ * | | Quad 0 | | Quad 1 | |
+ * | | Channels 0, 1, 2, 3 | | Channels 4, 5, 6, 7 | |
+ * | +----------^----------+ +-------^-------------+ |
+ * | I2C Addr 0 | | I2C Addr 1 |
+ * | +------------------------+ |
+ * | (Primary Driver Instance) (Managed by Primary) |
+ * | |
+ * | Shared Resources (affect whole chip): |
+ * | - Single INTb Output -> Handled by Primary |
+ * | - Single RESETb Input |
+ * | - Single OSS Input -> Handled by Primary |
+ * | - Global I2C Addr (0x7F) for Firmware Update |
+ * | - Global Status (Temp, VDD/VPWR UVLO) |
+ * +-----------------------------------------------------+
+ * | | | | | | | |
+ * Ch0 Ch1 Ch2 Ch3 Ch4 Ch5 Ch6 Ch7 (Physical Channels)
+ *
+ * Example Mapping (Logical PI to Physical Channel(s)):
+ * * 2-Pair Mode (8 PIs):
+ * PI 0 -> Ch 0
+ * PI 1 -> Ch 1
+ * ...
+ * PI 7 -> Ch 7
+ * * 4-Pair Mode (4 PIs):
+ * PI 0 -> Ch 0 + Ch 1 (Managed via Quad 0 Addr)
+ * PI 1 -> Ch 2 + Ch 3 (Managed via Quad 0 Addr)
+ * PI 2 -> Ch 4 + Ch 5 (Managed via Quad 1 Addr)
+ * PI 3 -> Ch 6 + Ch 7 (Managed via Quad 1 Addr)
+ * (Note: Actual mapping depends on Device Tree and PORT_REMAP config)
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pse-pd/pse.h>
+
+#define SI3474_MAX_CHANS 8
+
+#define MANUFACTURER_ID 0x08
+#define IC_ID 0x05
+#define SI3474_DEVICE_ID (MANUFACTURER_ID << 3 | IC_ID)
+
+/* Misc registers */
+#define VENDOR_IC_ID_REG 0x1B
+#define TEMPERATURE_REG 0x2C
+#define FIRMWARE_REVISION_REG 0x41
+#define CHIP_REVISION_REG 0x43
+
+/* Main status registers */
+#define POWER_STATUS_REG 0x10
+#define PORT_MODE_REG 0x12
+#define DETECT_CLASS_ENABLE_REG 0x14
+
+/* PORTn Current */
+#define PORT1_CURRENT_LSB_REG 0x30
+
+/* PORTn Current [mA], return in [nA] */
+/* 1000 * ((PORTn_CURRENT_MSB << 8) + PORTn_CURRENT_LSB) / 16384 */
+#define SI3474_NA_STEP (1000 * 1000 * 1000 / 16384)
+
+/* VPWR Voltage */
+#define VPWR_LSB_REG 0x2E
+#define VPWR_MSB_REG 0x2F
+
+/* PORTn Voltage */
+#define PORT1_VOLTAGE_LSB_REG 0x32
+
+/* VPWR Voltage [V], return in [uV] */
+/* 60 * (( VPWR_MSB << 8) + VPWR_LSB) / 16384 */
+#define SI3474_UV_STEP (1000 * 1000 * 60 / 16384)
+
+/* Helper macros */
+#define CHAN_IDX(chan) ((chan) % 4)
+#define CHAN_BIT(chan) BIT(CHAN_IDX(chan))
+#define CHAN_UPPER_BIT(chan) BIT(CHAN_IDX(chan) + 4)
+
+#define CHAN_MASK(chan) (0x03U << (2 * CHAN_IDX(chan)))
+#define CHAN_REG(base, chan) ((base) + (CHAN_IDX(chan) * 4))
+
+struct si3474_pi_desc {
+ u8 chan[2];
+ bool is_4p;
+};
+
+struct si3474_priv {
+ struct i2c_client *client[2];
+ struct pse_controller_dev pcdev;
+ struct device_node *np;
+ struct si3474_pi_desc pi[SI3474_MAX_CHANS];
+};
+
+static struct si3474_priv *to_si3474_priv(struct pse_controller_dev *pcdev)
+{
+ return container_of(pcdev, struct si3474_priv, pcdev);
+}
+
+static void si3474_get_channels(struct si3474_priv *priv, int id,
+ u8 *chan0, u8 *chan1)
+{
+ *chan0 = priv->pi[id].chan[0];
+ *chan1 = priv->pi[id].chan[1];
+}
+
+static struct i2c_client *si3474_get_chan_client(struct si3474_priv *priv,
+ u8 chan)
+{
+ return (chan < 4) ? priv->client[0] : priv->client[1];
+}
+
+static int si3474_pi_get_admin_state(struct pse_controller_dev *pcdev, int id,
+ struct pse_admin_state *admin_state)
+{
+ struct si3474_priv *priv = to_si3474_priv(pcdev);
+ struct i2c_client *client;
+ bool is_enabled;
+ u8 chan0, chan1;
+ s32 ret;
+
+ si3474_get_channels(priv, id, &chan0, &chan1);
+ client = si3474_get_chan_client(priv, chan0);
+
+ ret = i2c_smbus_read_byte_data(client, PORT_MODE_REG);
+ if (ret < 0) {
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_UNKNOWN;
+ return ret;
+ }
+
+ is_enabled = ret & (CHAN_MASK(chan0) | CHAN_MASK(chan1));
+
+ if (is_enabled)
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
+ else
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
+
+ return 0;
+}
+
+static int si3474_pi_get_pw_status(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_status *pw_status)
+{
+ struct si3474_priv *priv = to_si3474_priv(pcdev);
+ struct i2c_client *client;
+ bool delivering;
+ u8 chan0, chan1;
+ s32 ret;
+
+ si3474_get_channels(priv, id, &chan0, &chan1);
+ client = si3474_get_chan_client(priv, chan0);
+
+ ret = i2c_smbus_read_byte_data(client, POWER_STATUS_REG);
+ if (ret < 0) {
+ pw_status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_UNKNOWN;
+ return ret;
+ }
+
+ delivering = ret & (CHAN_UPPER_BIT(chan0) | CHAN_UPPER_BIT(chan1));
+
+ if (delivering)
+ pw_status->c33_pw_status =
+ ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING;
+ else
+ pw_status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED;
+
+ return 0;
+}
+
+static int si3474_get_of_channels(struct si3474_priv *priv)
+{
+ struct pse_pi *pi;
+ u32 chan_id;
+ u8 pi_no;
+ s32 ret;
+
+ for (pi_no = 0; pi_no < SI3474_MAX_CHANS; pi_no++) {
+ pi = &priv->pcdev.pi[pi_no];
+ bool pairset_found = false;
+ u8 pairset_no;
+
+ for (pairset_no = 0; pairset_no < 2; pairset_no++) {
+ if (!pi->pairset[pairset_no].np)
+ continue;
+
+ pairset_found = true;
+
+ ret = of_property_read_u32(pi->pairset[pairset_no].np,
+ "reg", &chan_id);
+ if (ret) {
+ dev_err(&priv->client[0]->dev,
+ "Failed to read channel reg property\n");
+ return ret;
+ }
+ if (chan_id > SI3474_MAX_CHANS) {
+ dev_err(&priv->client[0]->dev,
+ "Incorrect channel number: %d\n", chan_id);
+ return -EINVAL;
+ }
+
+ priv->pi[pi_no].chan[pairset_no] = chan_id;
+ /* Mark as 4-pair if second pairset is present */
+ priv->pi[pi_no].is_4p = (pairset_no == 1);
+ }
+
+ if (pairset_found && !priv->pi[pi_no].is_4p) {
+ dev_err(&priv->client[0]->dev,
+ "Second pairset is missing for PI %pOF, only 4p configs are supported\n",
+ pi->np);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int si3474_setup_pi_matrix(struct pse_controller_dev *pcdev)
+{
+ struct si3474_priv *priv = to_si3474_priv(pcdev);
+ s32 ret;
+
+ ret = si3474_get_of_channels(priv);
+ if (ret < 0)
+ dev_warn(&priv->client[0]->dev,
+ "Unable to parse DT PSE power interface matrix\n");
+
+ return ret;
+}
+
+static int si3474_pi_enable(struct pse_controller_dev *pcdev, int id)
+{
+ struct si3474_priv *priv = to_si3474_priv(pcdev);
+ struct i2c_client *client;
+ u8 chan0, chan1;
+ s32 ret;
+ u8 val;
+
+ si3474_get_channels(priv, id, &chan0, &chan1);
+ client = si3474_get_chan_client(priv, chan0);
+
+ /* Release PI from shutdown */
+ ret = i2c_smbus_read_byte_data(client, PORT_MODE_REG);
+ if (ret < 0)
+ return ret;
+
+ val = (u8)ret;
+ val |= CHAN_MASK(chan0);
+ val |= CHAN_MASK(chan1);
+
+ ret = i2c_smbus_write_byte_data(client, PORT_MODE_REG, val);
+ if (ret)
+ return ret;
+
+ /* DETECT_CLASS_ENABLE must be set when using AUTO mode,
+ * otherwise PI does not power up - datasheet section 2.10.2
+ */
+ val = CHAN_BIT(chan0) | CHAN_UPPER_BIT(chan0) |
+ CHAN_BIT(chan1) | CHAN_UPPER_BIT(chan1);
+
+ ret = i2c_smbus_write_byte_data(client, DETECT_CLASS_ENABLE_REG, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int si3474_pi_disable(struct pse_controller_dev *pcdev, int id)
+{
+ struct si3474_priv *priv = to_si3474_priv(pcdev);
+ struct i2c_client *client;
+ u8 chan0, chan1;
+ s32 ret;
+ u8 val;
+
+ si3474_get_channels(priv, id, &chan0, &chan1);
+ client = si3474_get_chan_client(priv, chan0);
+
+ /* Set PI in shutdown mode */
+ ret = i2c_smbus_read_byte_data(client, PORT_MODE_REG);
+ if (ret < 0)
+ return ret;
+
+ val = (u8)ret;
+ val &= ~CHAN_MASK(chan0);
+ val &= ~CHAN_MASK(chan1);
+
+ ret = i2c_smbus_write_byte_data(client, PORT_MODE_REG, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int si3474_pi_get_chan_current(struct si3474_priv *priv, u8 chan)
+{
+ struct i2c_client *client;
+ u64 tmp_64;
+ s32 ret;
+ u8 reg;
+
+ client = si3474_get_chan_client(priv, chan);
+
+ /* Registers 0x30 to 0x3d */
+ reg = CHAN_REG(PORT1_CURRENT_LSB_REG, chan);
+
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+
+ tmp_64 = ret * SI3474_NA_STEP;
+
+ /* uA = nA / 1000 */
+ tmp_64 = DIV_ROUND_CLOSEST_ULL(tmp_64, 1000);
+ return (int)tmp_64;
+}
+
+static int si3474_pi_get_chan_voltage(struct si3474_priv *priv, u8 chan)
+{
+ struct i2c_client *client;
+ s32 ret;
+ u32 val;
+ u8 reg;
+
+ client = si3474_get_chan_client(priv, chan);
+
+ /* Registers 0x32 to 0x3f */
+ reg = CHAN_REG(PORT1_VOLTAGE_LSB_REG, chan);
+
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+
+ val = ret * SI3474_UV_STEP;
+
+ return (int)val;
+}
+
+static int si3474_pi_get_voltage(struct pse_controller_dev *pcdev, int id)
+{
+ struct si3474_priv *priv = to_si3474_priv(pcdev);
+ struct i2c_client *client;
+ u8 chan0, chan1;
+ s32 ret;
+
+ si3474_get_channels(priv, id, &chan0, &chan1);
+ client = si3474_get_chan_client(priv, chan0);
+
+ /* Check which channels are enabled*/
+ ret = i2c_smbus_read_byte_data(client, POWER_STATUS_REG);
+ if (ret < 0)
+ return ret;
+
+ /* Take voltage from the first enabled channel */
+ if (ret & CHAN_BIT(chan0))
+ ret = si3474_pi_get_chan_voltage(priv, chan0);
+ else if (ret & CHAN_BIT(chan1))
+ ret = si3474_pi_get_chan_voltage(priv, chan1);
+ else
+ /* 'should' be no voltage in this case */
+ return 0;
+
+ return ret;
+}
+
+static int si3474_pi_get_actual_pw(struct pse_controller_dev *pcdev, int id)
+{
+ struct si3474_priv *priv = to_si3474_priv(pcdev);
+ u8 chan0, chan1;
+ u32 uV, uA;
+ u64 tmp_64;
+ s32 ret;
+
+ ret = si3474_pi_get_voltage(&priv->pcdev, id);
+
+ /* Do not read currents if voltage is 0 */
+ if (ret <= 0)
+ return ret;
+ uV = ret;
+
+ si3474_get_channels(priv, id, &chan0, &chan1);
+
+ ret = si3474_pi_get_chan_current(priv, chan0);
+ if (ret < 0)
+ return ret;
+ uA = ret;
+
+ ret = si3474_pi_get_chan_current(priv, chan1);
+ if (ret < 0)
+ return ret;
+ uA += ret;
+
+ tmp_64 = uV;
+ tmp_64 *= uA;
+ /* mW = uV * uA / 1000000000 */
+ return DIV_ROUND_CLOSEST_ULL(tmp_64, 1000000000);
+}
+
+static const struct pse_controller_ops si3474_ops = {
+ .setup_pi_matrix = si3474_setup_pi_matrix,
+ .pi_enable = si3474_pi_enable,
+ .pi_disable = si3474_pi_disable,
+ .pi_get_actual_pw = si3474_pi_get_actual_pw,
+ .pi_get_voltage = si3474_pi_get_voltage,
+ .pi_get_admin_state = si3474_pi_get_admin_state,
+ .pi_get_pw_status = si3474_pi_get_pw_status,
+};
+
+static void si3474_ancillary_i2c_remove(void *data)
+{
+ struct i2c_client *client = data;
+
+ i2c_unregister_device(client);
+}
+
+static int si3474_i2c_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct si3474_priv *priv;
+ u8 fw_version;
+ s32 ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(dev, "i2c check functionality failed\n");
+ return -ENXIO;
+ }
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = i2c_smbus_read_byte_data(client, VENDOR_IC_ID_REG);
+ if (ret < 0)
+ return ret;
+
+ if (ret != SI3474_DEVICE_ID) {
+ dev_err(dev, "Wrong device ID: 0x%x\n", ret);
+ return -ENXIO;
+ }
+
+ ret = i2c_smbus_read_byte_data(client, FIRMWARE_REVISION_REG);
+ if (ret < 0)
+ return ret;
+ fw_version = ret;
+
+ ret = i2c_smbus_read_byte_data(client, CHIP_REVISION_REG);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dev, "Chip revision: 0x%x, firmware version: 0x%x\n",
+ ret, fw_version);
+
+ priv->client[0] = client;
+ i2c_set_clientdata(client, priv);
+
+ priv->client[1] = i2c_new_ancillary_device(priv->client[0], "secondary",
+ priv->client[0]->addr + 1);
+ if (IS_ERR(priv->client[1]))
+ return PTR_ERR(priv->client[1]);
+
+ ret = devm_add_action_or_reset(dev, si3474_ancillary_i2c_remove, priv->client[1]);
+ if (ret < 0) {
+ dev_err(&priv->client[1]->dev, "Cannot register remove callback\n");
+ return ret;
+ }
+
+ ret = i2c_smbus_read_byte_data(priv->client[1], VENDOR_IC_ID_REG);
+ if (ret < 0) {
+ dev_err(&priv->client[1]->dev, "Cannot access secondary PSE controller\n");
+ return ret;
+ }
+
+ if (ret != SI3474_DEVICE_ID) {
+ dev_err(&priv->client[1]->dev,
+ "Wrong device ID for secondary PSE controller: 0x%x\n", ret);
+ return -ENXIO;
+ }
+
+ priv->np = dev->of_node;
+ priv->pcdev.owner = THIS_MODULE;
+ priv->pcdev.ops = &si3474_ops;
+ priv->pcdev.dev = dev;
+ priv->pcdev.types = ETHTOOL_PSE_C33;
+ priv->pcdev.nr_lines = SI3474_MAX_CHANS;
+
+ ret = devm_pse_controller_register(dev, &priv->pcdev);
+ if (ret) {
+ dev_err(dev, "Failed to register PSE controller: 0x%x\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id si3474_id[] = {
+ { "si3474" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, si3474_id);
+
+static const struct of_device_id si3474_of_match[] = {
+ {
+ .compatible = "skyworks,si3474",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, si3474_of_match);
+
+static struct i2c_driver si3474_driver = {
+ .probe = si3474_i2c_probe,
+ .id_table = si3474_id,
+ .driver = {
+ .name = "si3474",
+ .of_match_table = si3474_of_match,
+ },
+};
+module_i2c_driver(si3474_driver);
+
+MODULE_AUTHOR("Piotr Kubik <piotr.kubik@adtran.com>");
+MODULE_DESCRIPTION("Skyworks Si3474 PoE PSE Controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/pse-pd/tps23881.c b/drivers/net/pse-pd/tps23881.c
index 5c4e88be46ee..76ec1555d60d 100644
--- a/drivers/net/pse-pd/tps23881.c
+++ b/drivers/net/pse-pd/tps23881.c
@@ -16,29 +16,59 @@
#include <linux/pse-pd/pse.h>
#define TPS23881_MAX_CHANS 8
-
+#define TPS23881_MAX_IRQ_RETRIES 10
+
+#define TPS23881_REG_IT 0x0
+#define TPS23881_REG_IT_MASK 0x1
+#define TPS23881_REG_IT_DISF BIT(2)
+#define TPS23881_REG_IT_DETC BIT(3)
+#define TPS23881_REG_IT_CLASC BIT(4)
+#define TPS23881_REG_IT_IFAULT BIT(5)
+#define TPS23881_REG_IT_SUPF BIT(7)
+#define TPS23881_REG_DET_EVENT 0x5
+#define TPS23881_REG_FAULT 0x7
+#define TPS23881_REG_SUPF_EVENT 0xb
+#define TPS23881_REG_TSD BIT(7)
+#define TPS23881_REG_DISC 0xc
#define TPS23881_REG_PW_STATUS 0x10
#define TPS23881_REG_OP_MODE 0x12
+#define TPS23881_REG_DISC_EN 0x13
#define TPS23881_OP_MODE_SEMIAUTO 0xaaaa
#define TPS23881_REG_DIS_EN 0x13
#define TPS23881_REG_DET_CLA_EN 0x14
#define TPS23881_REG_GEN_MASK 0x17
+#define TPS23881_REG_CLCHE BIT(2)
+#define TPS23881_REG_DECHE BIT(3)
#define TPS23881_REG_NBITACC BIT(5)
+#define TPS23881_REG_INTEN BIT(7)
#define TPS23881_REG_PW_EN 0x19
+#define TPS23881_REG_RESET 0x1a
+#define TPS23881_REG_CLRAIN BIT(7)
+#define TPS23881_REG_2PAIR_POL1 0x1e
#define TPS23881_REG_PORT_MAP 0x26
#define TPS23881_REG_PORT_POWER 0x29
-#define TPS23881_REG_POEPLUS 0x40
+#define TPS23881_REG_4PAIR_POL1 0x2a
+#define TPS23881_REG_INPUT_V 0x2e
+#define TPS23881_REG_CHAN1_A 0x30
+#define TPS23881_REG_CHAN1_V 0x32
+#define TPS23881_REG_FOLDBACK 0x40
#define TPS23881_REG_TPON BIT(0)
#define TPS23881_REG_FWREV 0x41
#define TPS23881_REG_DEVID 0x43
-#define TPS23881_REG_DEVID_MASK 0xF0
-#define TPS23881_DEVICE_ID 0x02
+#define TPS23881_REG_CHAN1_CLASS 0x4c
#define TPS23881_REG_SRAM_CTRL 0x60
#define TPS23881_REG_SRAM_DATA 0x61
+#define TPS23881_UV_STEP 3662
+#define TPS23881_NA_STEP 89500
+#define TPS23881_MW_STEP 500
+#define TPS23881_MIN_PI_PW_LIMIT_MW 2000
+
struct tps23881_port_desc {
u8 chan[2];
bool is_4p;
+ int pw_pol;
+ bool exist;
};
struct tps23881_priv {
@@ -53,6 +83,103 @@ static struct tps23881_priv *to_tps23881_priv(struct pse_controller_dev *pcdev)
return container_of(pcdev, struct tps23881_priv, pcdev);
}
+/*
+ * Helper to extract a value from a u16 register value, which is made of two
+ * u8 registers. The function calculates the bit offset based on the channel
+ * and extracts the relevant bits using a provided field mask.
+ *
+ * @param reg_val: The u16 register value (composed of two u8 registers).
+ * @param chan: The channel number (0-7).
+ * @param field_offset: The base bit offset to apply (e.g., 0 or 4).
+ * @param field_mask: The mask to apply to extract the required bits.
+ * @return: The extracted value for the specific channel.
+ */
+static u16 tps23881_calc_val(u16 reg_val, u8 chan, u8 field_offset,
+ u16 field_mask)
+{
+ if (chan >= 4)
+ reg_val >>= 8;
+
+ return (reg_val >> field_offset) & field_mask;
+}
+
+/*
+ * Helper to combine individual channel values into a u16 register value.
+ * The function sets the value for a specific channel in the appropriate
+ * position.
+ *
+ * @param reg_val: The current u16 register value.
+ * @param chan: The channel number (0-7).
+ * @param field_offset: The base bit offset to apply (e.g., 0 or 4).
+ * @param field_mask: The mask to apply for the field (e.g., 0x0F).
+ * @param field_val: The value to set for the specific channel (masked by
+ * field_mask).
+ * @return: The updated u16 register value with the channel value set.
+ */
+static u16 tps23881_set_val(u16 reg_val, u8 chan, u8 field_offset,
+ u16 field_mask, u16 field_val)
+{
+ field_val &= field_mask;
+
+ if (chan < 4) {
+ reg_val &= ~(field_mask << field_offset);
+ reg_val |= (field_val << field_offset);
+ } else {
+ reg_val &= ~(field_mask << (field_offset + 8));
+ reg_val |= (field_val << (field_offset + 8));
+ }
+
+ return reg_val;
+}
+
+static int
+tps23881_pi_set_pw_pol_limit(struct tps23881_priv *priv, int id, u8 pw_pol,
+ bool is_4p)
+{
+ struct i2c_client *client = priv->client;
+ int ret, reg;
+ u16 val;
+ u8 chan;
+
+ chan = priv->port[id].chan[0];
+ if (!is_4p) {
+ reg = TPS23881_REG_2PAIR_POL1 + (chan % 4);
+ } else {
+ /* One chan is enough to configure the 4p PI power limit */
+ if ((chan % 4) < 2)
+ reg = TPS23881_REG_4PAIR_POL1;
+ else
+ reg = TPS23881_REG_4PAIR_POL1 + 1;
+ }
+
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+
+ val = tps23881_set_val(ret, chan, 0, 0xff, pw_pol);
+ return i2c_smbus_write_word_data(client, reg, val);
+}
+
+static int tps23881_pi_enable_manual_pol(struct tps23881_priv *priv, int id)
+{
+ struct i2c_client *client = priv->client;
+ int ret;
+ u8 chan;
+ u16 val;
+
+ ret = i2c_smbus_read_byte_data(client, TPS23881_REG_FOLDBACK);
+ if (ret < 0)
+ return ret;
+
+ /* No need to test if the chan is PoE4 as setting either bit for a
+ * 4P configured port disables the automatic configuration on both
+ * channels.
+ */
+ chan = priv->port[id].chan[0];
+ val = tps23881_set_val(ret, chan, 0, BIT(chan % 4), BIT(chan % 4));
+ return i2c_smbus_write_byte_data(client, TPS23881_REG_FOLDBACK, val);
+}
+
static int tps23881_pi_enable(struct pse_controller_dev *pcdev, int id)
{
struct tps23881_priv *priv = to_tps23881_priv(pcdev);
@@ -64,28 +191,30 @@ static int tps23881_pi_enable(struct pse_controller_dev *pcdev, int id)
if (id >= TPS23881_MAX_CHANS)
return -ERANGE;
- ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
- if (ret < 0)
- return ret;
-
chan = priv->port[id].chan[0];
- if (chan < 4)
- val = (u16)(ret | BIT(chan));
- else
- val = (u16)(ret | BIT(chan + 4));
+ val = tps23881_set_val(0, chan, 0, BIT(chan % 4), BIT(chan % 4));
if (priv->port[id].is_4p) {
chan = priv->port[id].chan[1];
- if (chan < 4)
- val |= BIT(chan);
- else
- val |= BIT(chan + 4);
+ val = tps23881_set_val(val, chan, 0, BIT(chan % 4),
+ BIT(chan % 4));
}
ret = i2c_smbus_write_word_data(client, TPS23881_REG_PW_EN, val);
if (ret)
return ret;
+ /* Enable DC disconnect*/
+ chan = priv->port[id].chan[0];
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_DISC_EN);
+ if (ret < 0)
+ return ret;
+
+ val = tps23881_set_val(ret, chan, 0, BIT(chan % 4), BIT(chan % 4));
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_DISC_EN, val);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -100,37 +229,79 @@ static int tps23881_pi_disable(struct pse_controller_dev *pcdev, int id)
if (id >= TPS23881_MAX_CHANS)
return -ERANGE;
- ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
+ chan = priv->port[id].chan[0];
+ val = tps23881_set_val(0, chan, 4, BIT(chan % 4), BIT(chan % 4));
+
+ if (priv->port[id].is_4p) {
+ chan = priv->port[id].chan[1];
+ val = tps23881_set_val(val, chan, 4, BIT(chan % 4),
+ BIT(chan % 4));
+ }
+
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_PW_EN, val);
+ if (ret)
+ return ret;
+
+ /* PWOFF command resets lots of register which need to be
+ * configured again. According to the datasheet "It may take upwards
+ * of 5ms after PWOFFn command for all register values to be updated"
+ */
+ mdelay(5);
+
+ /* Disable DC disconnect*/
+ chan = priv->port[id].chan[0];
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_DISC_EN);
+ if (ret < 0)
+ return ret;
+
+ val = tps23881_set_val(ret, chan, 0, 0, BIT(chan % 4));
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_DISC_EN, val);
+ if (ret)
+ return ret;
+
+ /* Enable detection and classification */
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_DET_CLA_EN);
if (ret < 0)
return ret;
chan = priv->port[id].chan[0];
- if (chan < 4)
- val = (u16)(ret | BIT(chan + 4));
- else
- val = (u16)(ret | BIT(chan + 8));
+ val = tps23881_set_val(ret, chan, 0, BIT(chan % 4), BIT(chan % 4));
+ val = tps23881_set_val(val, chan, 4, BIT(chan % 4), BIT(chan % 4));
if (priv->port[id].is_4p) {
chan = priv->port[id].chan[1];
- if (chan < 4)
- val |= BIT(chan + 4);
- else
- val |= BIT(chan + 8);
+ val = tps23881_set_val(ret, chan, 0, BIT(chan % 4),
+ BIT(chan % 4));
+ val = tps23881_set_val(val, chan, 4, BIT(chan % 4),
+ BIT(chan % 4));
}
- ret = i2c_smbus_write_word_data(client, TPS23881_REG_PW_EN, val);
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_DET_CLA_EN, val);
if (ret)
return ret;
- return 0;
+ /* No power policy */
+ if (priv->port[id].pw_pol < 0)
+ return 0;
+
+ ret = tps23881_pi_enable_manual_pol(priv, id);
+ if (ret < 0)
+ return ret;
+
+ /* Set power policy */
+ return tps23881_pi_set_pw_pol_limit(priv, id, priv->port[id].pw_pol,
+ priv->port[id].is_4p);
}
-static int tps23881_pi_is_enabled(struct pse_controller_dev *pcdev, int id)
+static int
+tps23881_pi_get_admin_state(struct pse_controller_dev *pcdev, int id,
+ struct pse_admin_state *admin_state)
{
struct tps23881_priv *priv = to_tps23881_priv(pcdev);
struct i2c_client *client = priv->client;
bool enabled;
u8 chan;
+ u16 val;
int ret;
ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
@@ -138,32 +309,35 @@ static int tps23881_pi_is_enabled(struct pse_controller_dev *pcdev, int id)
return ret;
chan = priv->port[id].chan[0];
- if (chan < 4)
- enabled = ret & BIT(chan);
- else
- enabled = ret & BIT(chan + 4);
+ val = tps23881_calc_val(ret, chan, 0, BIT(chan % 4));
+ enabled = !!(val);
if (priv->port[id].is_4p) {
chan = priv->port[id].chan[1];
- if (chan < 4)
- enabled &= !!(ret & BIT(chan));
- else
- enabled &= !!(ret & BIT(chan + 4));
+ val = tps23881_calc_val(ret, chan, 0, BIT(chan % 4));
+ enabled &= !!(val);
}
/* Return enabled status only if both channel are on this state */
- return enabled;
+ if (enabled)
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
+ else
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
+
+ return 0;
}
-static int tps23881_ethtool_get_status(struct pse_controller_dev *pcdev,
- unsigned long id,
- struct netlink_ext_ack *extack,
- struct pse_control_status *status)
+static int
+tps23881_pi_get_pw_status(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_status *pw_status)
{
struct tps23881_priv *priv = to_tps23881_priv(pcdev);
struct i2c_client *client = priv->client;
- bool enabled, delivering;
+ bool delivering;
u8 chan;
+ u16 val;
int ret;
ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
@@ -171,40 +345,197 @@ static int tps23881_ethtool_get_status(struct pse_controller_dev *pcdev,
return ret;
chan = priv->port[id].chan[0];
- if (chan < 4) {
- enabled = ret & BIT(chan);
- delivering = ret & BIT(chan + 4);
- } else {
- enabled = ret & BIT(chan + 4);
- delivering = ret & BIT(chan + 8);
- }
+ val = tps23881_calc_val(ret, chan, 4, BIT(chan % 4));
+ delivering = !!(val);
if (priv->port[id].is_4p) {
chan = priv->port[id].chan[1];
- if (chan < 4) {
- enabled &= !!(ret & BIT(chan));
- delivering &= !!(ret & BIT(chan + 4));
- } else {
- enabled &= !!(ret & BIT(chan + 4));
- delivering &= !!(ret & BIT(chan + 8));
- }
+ val = tps23881_calc_val(ret, chan, 4, BIT(chan % 4));
+ delivering &= !!(val);
}
/* Return delivering status only if both channel are on this state */
if (delivering)
- status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING;
- else
- status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED;
-
- /* Return enabled status only if both channel are on this state */
- if (enabled)
- status->c33_admin_state = ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
+ pw_status->c33_pw_status =
+ ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING;
else
- status->c33_admin_state = ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
+ pw_status->c33_pw_status =
+ ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED;
return 0;
}
+static int tps23881_pi_get_voltage(struct pse_controller_dev *pcdev, int id)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ struct i2c_client *client = priv->client;
+ int ret;
+ u64 uV;
+
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_INPUT_V);
+ if (ret < 0)
+ return ret;
+
+ uV = ret & 0x3fff;
+ uV *= TPS23881_UV_STEP;
+
+ return (int)uV;
+}
+
+static int
+tps23881_pi_get_chan_current(struct tps23881_priv *priv, u8 chan)
+{
+ struct i2c_client *client = priv->client;
+ int reg, ret;
+ u64 tmp_64;
+
+ /* Registers 0x30 to 0x3d */
+ reg = TPS23881_REG_CHAN1_A + (chan % 4) * 4 + (chan >= 4);
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+
+ tmp_64 = ret & 0x3fff;
+ tmp_64 *= TPS23881_NA_STEP;
+ /* uA = nA / 1000 */
+ tmp_64 = DIV_ROUND_CLOSEST_ULL(tmp_64, 1000);
+ return (int)tmp_64;
+}
+
+static int tps23881_pi_get_pw_class(struct pse_controller_dev *pcdev,
+ int id)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ struct i2c_client *client = priv->client;
+ int ret, reg;
+ u8 chan;
+
+ chan = priv->port[id].chan[0];
+ reg = TPS23881_REG_CHAN1_CLASS + (chan % 4);
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+
+ return tps23881_calc_val(ret, chan, 4, 0x0f);
+}
+
+static int
+tps23881_pi_get_actual_pw(struct pse_controller_dev *pcdev, int id)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ int ret, uV, uA;
+ u64 tmp_64;
+ u8 chan;
+
+ ret = tps23881_pi_get_voltage(&priv->pcdev, id);
+ if (ret < 0)
+ return ret;
+ uV = ret;
+
+ chan = priv->port[id].chan[0];
+ ret = tps23881_pi_get_chan_current(priv, chan);
+ if (ret < 0)
+ return ret;
+ uA = ret;
+
+ if (priv->port[id].is_4p) {
+ chan = priv->port[id].chan[1];
+ ret = tps23881_pi_get_chan_current(priv, chan);
+ if (ret < 0)
+ return ret;
+ uA += ret;
+ }
+
+ tmp_64 = uV;
+ tmp_64 *= uA;
+ /* mW = uV * uA / 1000000000 */
+ return DIV_ROUND_CLOSEST_ULL(tmp_64, 1000000000);
+}
+
+static int
+tps23881_pi_get_pw_limit_chan(struct tps23881_priv *priv, u8 chan)
+{
+ struct i2c_client *client = priv->client;
+ int ret, reg;
+ u16 val;
+
+ reg = TPS23881_REG_2PAIR_POL1 + (chan % 4);
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+
+ val = tps23881_calc_val(ret, chan, 0, 0xff);
+ return val * TPS23881_MW_STEP;
+}
+
+static int tps23881_pi_get_pw_limit(struct pse_controller_dev *pcdev, int id)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ int ret, mW;
+ u8 chan;
+
+ chan = priv->port[id].chan[0];
+ ret = tps23881_pi_get_pw_limit_chan(priv, chan);
+ if (ret < 0)
+ return ret;
+
+ mW = ret;
+ if (priv->port[id].is_4p) {
+ chan = priv->port[id].chan[1];
+ ret = tps23881_pi_get_pw_limit_chan(priv, chan);
+ if (ret < 0)
+ return ret;
+ mW += ret;
+ }
+
+ return mW;
+}
+
+static int tps23881_pi_set_pw_limit(struct pse_controller_dev *pcdev,
+ int id, int max_mW)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ u8 pw_pol;
+ int ret;
+
+ if (max_mW < TPS23881_MIN_PI_PW_LIMIT_MW || MAX_PI_PW < max_mW) {
+ dev_err(&priv->client->dev,
+ "power limit %d out of ranges [%d,%d]",
+ max_mW, TPS23881_MIN_PI_PW_LIMIT_MW, MAX_PI_PW);
+ return -ERANGE;
+ }
+
+ ret = tps23881_pi_enable_manual_pol(priv, id);
+ if (ret < 0)
+ return ret;
+
+ pw_pol = DIV_ROUND_CLOSEST_ULL(max_mW, TPS23881_MW_STEP);
+
+ /* Save power policy to reconfigure it after a disabled call */
+ priv->port[id].pw_pol = pw_pol;
+ return tps23881_pi_set_pw_pol_limit(priv, id, pw_pol,
+ priv->port[id].is_4p);
+}
+
+static int
+tps23881_pi_get_pw_limit_ranges(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_limit_ranges *pw_limit_ranges)
+{
+ struct ethtool_c33_pse_pw_limit_range *c33_pw_limit_ranges;
+
+ c33_pw_limit_ranges = kzalloc(sizeof(*c33_pw_limit_ranges),
+ GFP_KERNEL);
+ if (!c33_pw_limit_ranges)
+ return -ENOMEM;
+
+ c33_pw_limit_ranges->min = TPS23881_MIN_PI_PW_LIMIT_MW;
+ c33_pw_limit_ranges->max = MAX_PI_PW;
+ pw_limit_ranges->c33_pw_limit_ranges = c33_pw_limit_ranges;
+
+ /* Return the number of ranges */
+ return 1;
+}
+
/* Parse managers subnode into a array of device node */
static int
tps23881_get_of_channels(struct tps23881_priv *priv,
@@ -488,7 +819,7 @@ tps23881_write_port_matrix(struct tps23881_priv *priv,
struct i2c_client *client = priv->client;
u8 pi_id, lgcl_chan, hw_chan;
u16 val = 0;
- int i, ret;
+ int i;
for (i = 0; i < port_cnt; i++) {
pi_id = port_matrix[i].pi_id;
@@ -496,8 +827,13 @@ tps23881_write_port_matrix(struct tps23881_priv *priv,
hw_chan = port_matrix[i].hw_chan[0] % 4;
/* Set software port matrix for existing ports */
- if (port_matrix[i].exist)
+ if (port_matrix[i].exist) {
priv->port[pi_id].chan[0] = lgcl_chan;
+ priv->port[pi_id].exist = true;
+ }
+
+ /* Initialize power policy internal value */
+ priv->port[pi_id].pw_pol = -1;
/* Set hardware port matrix for all ports */
val |= hw_chan << (lgcl_chan * 2);
@@ -519,11 +855,7 @@ tps23881_write_port_matrix(struct tps23881_priv *priv,
}
/* Write hardware ports matrix */
- ret = i2c_smbus_write_word_data(client, TPS23881_REG_PORT_MAP, val);
- if (ret)
- return ret;
-
- return 0;
+ return i2c_smbus_write_word_data(client, TPS23881_REG_PORT_MAP, val);
}
static int
@@ -572,11 +904,7 @@ tps23881_set_ports_conf(struct tps23881_priv *priv,
val |= BIT(port_matrix[i].lgcl_chan[1]) |
BIT(port_matrix[i].lgcl_chan[1] + 4);
}
- ret = i2c_smbus_write_word_data(client, TPS23881_REG_DET_CLA_EN, val);
- if (ret)
- return ret;
-
- return 0;
+ return i2c_smbus_write_word_data(client, TPS23881_REG_DET_CLA_EN, val);
}
static int
@@ -602,11 +930,7 @@ tps23881_set_ports_matrix(struct tps23881_priv *priv,
if (ret)
return ret;
- ret = tps23881_set_ports_conf(priv, port_matrix);
- if (ret)
- return ret;
-
- return 0;
+ return tps23881_set_ports_conf(priv, port_matrix);
}
static int tps23881_setup_pi_matrix(struct pse_controller_dev *pcdev)
@@ -630,16 +954,84 @@ static int tps23881_setup_pi_matrix(struct pse_controller_dev *pcdev)
return ret;
}
+static int tps23881_power_class_table[] = {
+ -ERANGE,
+ 4000,
+ 7000,
+ 15500,
+ 30000,
+ 15500,
+ 15500,
+ -ERANGE,
+ 45000,
+ 60000,
+ 75000,
+ 90000,
+ 15500,
+ 45000,
+ -ERANGE,
+ -ERANGE,
+};
+
+static int tps23881_pi_get_pw_req(struct pse_controller_dev *pcdev, int id)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ struct i2c_client *client = priv->client;
+ u8 reg, chan;
+ int ret;
+ u16 val;
+
+ /* For a 4-pair the classification need 5ms to be completed */
+ if (priv->port[id].is_4p)
+ mdelay(5);
+
+ chan = priv->port[id].chan[0];
+ reg = TPS23881_REG_DISC + (chan % 4);
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+
+ val = tps23881_calc_val(ret, chan, 4, 0xf);
+ return tps23881_power_class_table[val];
+}
+
static const struct pse_controller_ops tps23881_ops = {
.setup_pi_matrix = tps23881_setup_pi_matrix,
.pi_enable = tps23881_pi_enable,
.pi_disable = tps23881_pi_disable,
- .pi_is_enabled = tps23881_pi_is_enabled,
- .ethtool_get_status = tps23881_ethtool_get_status,
+ .pi_get_admin_state = tps23881_pi_get_admin_state,
+ .pi_get_pw_status = tps23881_pi_get_pw_status,
+ .pi_get_pw_class = tps23881_pi_get_pw_class,
+ .pi_get_actual_pw = tps23881_pi_get_actual_pw,
+ .pi_get_voltage = tps23881_pi_get_voltage,
+ .pi_get_pw_limit = tps23881_pi_get_pw_limit,
+ .pi_set_pw_limit = tps23881_pi_set_pw_limit,
+ .pi_get_pw_limit_ranges = tps23881_pi_get_pw_limit_ranges,
+ .pi_get_pw_req = tps23881_pi_get_pw_req,
};
-static const char fw_parity_name[] = "ti/tps23881/tps23881-parity-14.bin";
-static const char fw_sram_name[] = "ti/tps23881/tps23881-sram-14.bin";
+struct tps23881_info {
+ u8 dev_id; /* device ID and silicon revision */
+ const char *fw_parity_name; /* parity code firmware file name */
+ const char *fw_sram_name; /* SRAM code firmware file name */
+};
+
+enum tps23881_model {
+ TPS23881,
+ TPS23881B,
+};
+
+static const struct tps23881_info tps23881_info[] = {
+ [TPS23881] = {
+ .dev_id = 0x22,
+ .fw_parity_name = "ti/tps23881/tps23881-parity-14.bin",
+ .fw_sram_name = "ti/tps23881/tps23881-sram-14.bin",
+ },
+ [TPS23881B] = {
+ .dev_id = 0x24,
+ /* skip SRAM load, ROM provides Clause 145 hardware-level support */
+ },
+};
struct tps23881_fw_conf {
u8 reg;
@@ -711,16 +1103,17 @@ out:
return ret;
}
-static int tps23881_flash_sram_fw(struct i2c_client *client)
+static int tps23881_flash_sram_fw(struct i2c_client *client,
+ const struct tps23881_info *info)
{
int ret;
- ret = tps23881_flash_sram_fw_part(client, fw_parity_name,
+ ret = tps23881_flash_sram_fw_part(client, info->fw_parity_name,
tps23881_fw_parity_conf);
if (ret)
return ret;
- ret = tps23881_flash_sram_fw_part(client, fw_sram_name,
+ ret = tps23881_flash_sram_fw_part(client, info->fw_sram_name,
tps23881_fw_sram_conf);
if (ret)
return ret;
@@ -734,9 +1127,311 @@ static int tps23881_flash_sram_fw(struct i2c_client *client)
return 0;
}
+/* Convert interrupt events to 0xff to be aligned with the chan
+ * number.
+ */
+static u8 tps23881_irq_export_chans_helper(u16 reg_val, u8 field_offset)
+{
+ u8 val;
+
+ val = (reg_val >> (4 + field_offset) & 0xf0) |
+ (reg_val >> field_offset & 0x0f);
+
+ return val;
+}
+
+/* Convert chan number to port number */
+static void tps23881_set_notifs_helper(struct tps23881_priv *priv,
+ u8 chans,
+ unsigned long *notifs,
+ unsigned long *notifs_mask,
+ enum ethtool_pse_event event)
+{
+ u8 chan;
+ int i;
+
+ if (!chans)
+ return;
+
+ for (i = 0; i < TPS23881_MAX_CHANS; i++) {
+ if (!priv->port[i].exist)
+ continue;
+ /* No need to look at the 2nd channel in case of PoE4 as
+ * both registers are set.
+ */
+ chan = priv->port[i].chan[0];
+
+ if (BIT(chan) & chans) {
+ *notifs_mask |= BIT(i);
+ notifs[i] |= event;
+ }
+ }
+}
+
+static void tps23881_irq_event_over_temp(struct tps23881_priv *priv,
+ u16 reg_val,
+ unsigned long *notifs,
+ unsigned long *notifs_mask)
+{
+ int i;
+
+ if (reg_val & TPS23881_REG_TSD) {
+ for (i = 0; i < TPS23881_MAX_CHANS; i++) {
+ if (!priv->port[i].exist)
+ continue;
+
+ *notifs_mask |= BIT(i);
+ notifs[i] |= ETHTOOL_PSE_EVENT_OVER_TEMP;
+ }
+ }
+}
+
+static int tps23881_irq_event_over_current(struct tps23881_priv *priv,
+ u16 reg_val,
+ unsigned long *notifs,
+ unsigned long *notifs_mask)
+{
+ int i, ret;
+ u8 chans;
+
+ chans = tps23881_irq_export_chans_helper(reg_val, 0);
+ if (!chans)
+ return 0;
+
+ tps23881_set_notifs_helper(priv, chans, notifs, notifs_mask,
+ ETHTOOL_PSE_EVENT_OVER_CURRENT |
+ ETHTOOL_C33_PSE_EVENT_DISCONNECTION);
+
+ /* Over Current event resets the power limit registers so we need
+ * to configured it again.
+ */
+ for_each_set_bit(i, notifs_mask, priv->pcdev.nr_lines) {
+ if (priv->port[i].pw_pol < 0)
+ continue;
+
+ ret = tps23881_pi_enable_manual_pol(priv, i);
+ if (ret < 0)
+ return ret;
+
+ /* Set power policy */
+ ret = tps23881_pi_set_pw_pol_limit(priv, i,
+ priv->port[i].pw_pol,
+ priv->port[i].is_4p);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void tps23881_irq_event_disconnection(struct tps23881_priv *priv,
+ u16 reg_val,
+ unsigned long *notifs,
+ unsigned long *notifs_mask)
+{
+ u8 chans;
+
+ chans = tps23881_irq_export_chans_helper(reg_val, 4);
+ if (chans)
+ tps23881_set_notifs_helper(priv, chans, notifs, notifs_mask,
+ ETHTOOL_C33_PSE_EVENT_DISCONNECTION);
+}
+
+static int tps23881_irq_event_detection(struct tps23881_priv *priv,
+ u16 reg_val,
+ unsigned long *notifs,
+ unsigned long *notifs_mask)
+{
+ enum ethtool_pse_event event;
+ int reg, ret, i, val;
+ unsigned long chans;
+
+ chans = tps23881_irq_export_chans_helper(reg_val, 0);
+ for_each_set_bit(i, &chans, TPS23881_MAX_CHANS) {
+ reg = TPS23881_REG_DISC + (i % 4);
+ ret = i2c_smbus_read_word_data(priv->client, reg);
+ if (ret < 0)
+ return ret;
+
+ val = tps23881_calc_val(ret, i, 0, 0xf);
+ /* If detection valid */
+ if (val == 0x4)
+ event = ETHTOOL_C33_PSE_EVENT_DETECTION;
+ else
+ event = ETHTOOL_C33_PSE_EVENT_DISCONNECTION;
+
+ tps23881_set_notifs_helper(priv, BIT(i), notifs,
+ notifs_mask, event);
+ }
+
+ return 0;
+}
+
+static int tps23881_irq_event_classification(struct tps23881_priv *priv,
+ u16 reg_val,
+ unsigned long *notifs,
+ unsigned long *notifs_mask)
+{
+ int reg, ret, val, i;
+ unsigned long chans;
+
+ chans = tps23881_irq_export_chans_helper(reg_val, 4);
+ for_each_set_bit(i, &chans, TPS23881_MAX_CHANS) {
+ reg = TPS23881_REG_DISC + (i % 4);
+ ret = i2c_smbus_read_word_data(priv->client, reg);
+ if (ret < 0)
+ return ret;
+
+ val = tps23881_calc_val(ret, i, 4, 0xf);
+ /* Do not report classification event for unknown class */
+ if (!val || val == 0x8 || val == 0xf)
+ continue;
+
+ tps23881_set_notifs_helper(priv, BIT(i), notifs,
+ notifs_mask,
+ ETHTOOL_C33_PSE_EVENT_CLASSIFICATION);
+ }
+
+ return 0;
+}
+
+static int tps23881_irq_event_handler(struct tps23881_priv *priv, u16 reg,
+ unsigned long *notifs,
+ unsigned long *notifs_mask)
+{
+ struct i2c_client *client = priv->client;
+ int ret, val;
+
+ /* The Supply event bit is repeated twice so we only need to read
+ * the one from the first byte.
+ */
+ if (reg & TPS23881_REG_IT_SUPF) {
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_SUPF_EVENT);
+ if (ret < 0)
+ return ret;
+ tps23881_irq_event_over_temp(priv, ret, notifs, notifs_mask);
+ }
+
+ if (reg & (TPS23881_REG_IT_IFAULT | TPS23881_REG_IT_IFAULT << 8 |
+ TPS23881_REG_IT_DISF | TPS23881_REG_IT_DISF << 8)) {
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_FAULT);
+ if (ret < 0)
+ return ret;
+ ret = tps23881_irq_event_over_current(priv, ret, notifs,
+ notifs_mask);
+ if (ret)
+ return ret;
+
+ tps23881_irq_event_disconnection(priv, ret, notifs, notifs_mask);
+ }
+
+ if (reg & (TPS23881_REG_IT_DETC | TPS23881_REG_IT_DETC << 8 |
+ TPS23881_REG_IT_CLASC | TPS23881_REG_IT_CLASC << 8)) {
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_DET_EVENT);
+ if (ret < 0)
+ return ret;
+
+ val = ret;
+ ret = tps23881_irq_event_detection(priv, val, notifs,
+ notifs_mask);
+ if (ret)
+ return ret;
+
+ ret = tps23881_irq_event_classification(priv, val, notifs,
+ notifs_mask);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int tps23881_irq_handler(int irq, struct pse_controller_dev *pcdev,
+ unsigned long *notifs,
+ unsigned long *notifs_mask)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ struct i2c_client *client = priv->client;
+ int ret, it_mask, retry;
+
+ /* Get interruption mask */
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_IT_MASK);
+ if (ret < 0)
+ return ret;
+ it_mask = ret;
+
+ /* Read interrupt register until it frees the interruption pin. */
+ retry = 0;
+ while (true) {
+ if (retry > TPS23881_MAX_IRQ_RETRIES) {
+ dev_err(&client->dev, "interrupt never freed");
+ return -ETIMEDOUT;
+ }
+
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_IT);
+ if (ret < 0)
+ return ret;
+
+ /* No more relevant interruption */
+ if (!(ret & it_mask))
+ return 0;
+
+ ret = tps23881_irq_event_handler(priv, (u16)ret, notifs,
+ notifs_mask);
+ if (ret)
+ return ret;
+
+ retry++;
+ }
+ return 0;
+}
+
+static int tps23881_setup_irq(struct tps23881_priv *priv, int irq)
+{
+ struct i2c_client *client = priv->client;
+ struct pse_irq_desc irq_desc = {
+ .name = "tps23881-irq",
+ .map_event = tps23881_irq_handler,
+ };
+ int ret;
+ u16 val;
+
+ if (!irq) {
+ dev_err(&client->dev, "interrupt is missing");
+ return -EINVAL;
+ }
+
+ val = TPS23881_REG_IT_IFAULT | TPS23881_REG_IT_SUPF |
+ TPS23881_REG_IT_DETC | TPS23881_REG_IT_CLASC |
+ TPS23881_REG_IT_DISF;
+ val |= val << 8;
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_IT_MASK, val);
+ if (ret)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_GEN_MASK);
+ if (ret < 0)
+ return ret;
+
+ val = TPS23881_REG_INTEN | TPS23881_REG_CLCHE | TPS23881_REG_DECHE;
+ val |= val << 8;
+ val |= (u16)ret;
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_GEN_MASK, val);
+ if (ret < 0)
+ return ret;
+
+ /* Reset interrupts registers */
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_RESET,
+ TPS23881_REG_CLRAIN);
+ if (ret < 0)
+ return ret;
+
+ return devm_pse_irq_helper(&priv->pcdev, irq, 0, &irq_desc);
+}
+
static int tps23881_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
+ const struct tps23881_info *info;
struct tps23881_priv *priv;
struct gpio_desc *reset;
int ret;
@@ -747,6 +1442,10 @@ static int tps23881_i2c_probe(struct i2c_client *client)
return -ENXIO;
}
+ info = i2c_get_match_data(client);
+ if (!info)
+ return -EINVAL;
+
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -765,7 +1464,7 @@ static int tps23881_i2c_probe(struct i2c_client *client)
* to Load TPS2388x SRAM and Parity Code over I2C" (Rev E))
* indicates we should delay that programming by at least 50ms. So
* we'll wait the entire 50ms here to ensure we're safe to go to the
- * SRAM loading proceedure.
+ * SRAM loading procedure.
*/
msleep(50);
}
@@ -774,20 +1473,27 @@ static int tps23881_i2c_probe(struct i2c_client *client)
if (ret < 0)
return ret;
- if (FIELD_GET(TPS23881_REG_DEVID_MASK, ret) != TPS23881_DEVICE_ID) {
+ if (ret != info->dev_id) {
dev_err(dev, "Wrong device ID\n");
return -ENXIO;
}
- ret = tps23881_flash_sram_fw(client);
- if (ret < 0)
- return ret;
+ if (info->fw_sram_name) {
+ ret = tps23881_flash_sram_fw(client, info);
+ if (ret < 0)
+ return ret;
+ }
ret = i2c_smbus_read_byte_data(client, TPS23881_REG_FWREV);
if (ret < 0)
return ret;
- dev_info(&client->dev, "Firmware revision 0x%x\n", ret);
+ if (ret == 0xFF) {
+ dev_err(&client->dev, "Device entered safe mode\n");
+ return -ENXIO;
+ }
+ dev_info(&client->dev, "Firmware revision 0x%x%s\n", ret,
+ ret == 0x00 ? " (ROM firmware)" : "");
/* Set configuration B, 16 bit access on a single device address */
ret = i2c_smbus_read_byte_data(client, TPS23881_REG_GEN_MASK);
@@ -808,23 +1514,36 @@ static int tps23881_i2c_probe(struct i2c_client *client)
priv->pcdev.dev = dev;
priv->pcdev.types = ETHTOOL_PSE_C33;
priv->pcdev.nr_lines = TPS23881_MAX_CHANS;
+ priv->pcdev.supp_budget_eval_strategies = PSE_BUDGET_EVAL_STRAT_STATIC;
ret = devm_pse_controller_register(dev, &priv->pcdev);
if (ret) {
return dev_err_probe(dev, ret,
"failed to register PSE controller\n");
}
+ ret = tps23881_setup_irq(priv, client->irq);
+ if (ret)
+ return ret;
+
return ret;
}
static const struct i2c_device_id tps23881_id[] = {
- { "tps23881" },
+ { "tps23881", .driver_data = (kernel_ulong_t)&tps23881_info[TPS23881] },
+ { "tps23881b", .driver_data = (kernel_ulong_t)&tps23881_info[TPS23881B] },
{ }
};
MODULE_DEVICE_TABLE(i2c, tps23881_id);
static const struct of_device_id tps23881_of_match[] = {
- { .compatible = "ti,tps23881", },
+ {
+ .compatible = "ti,tps23881",
+ .data = &tps23881_info[TPS23881]
+ },
+ {
+ .compatible = "ti,tps23881b",
+ .data = &tps23881_info[TPS23881B]
+ },
{ },
};
MODULE_DEVICE_TABLE(of, tps23881_of_match);
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
deleted file mode 100644
index c3f8020571ad..000000000000
--- a/drivers/net/sb1000.c
+++ /dev/null
@@ -1,1179 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* sb1000.c: A General Instruments SB1000 driver for linux. */
-/*
- Written 1998 by Franco Venturi.
-
- Copyright 1998 by Franco Venturi.
- Copyright 1994,1995 by Donald Becker.
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency.
-
- This driver is for the General Instruments SB1000 (internal SURFboard)
-
- The author may be reached as fventuri@mediaone.net
-
-
- Changes:
-
- 981115 Steven Hirsch <shirsch@adelphia.net>
-
- Linus changed the timer interface. Should work on all recent
- development kernels.
-
- 980608 Steven Hirsch <shirsch@adelphia.net>
-
- Small changes to make it work with 2.1.x kernels. Hopefully,
- nothing major will change before official release of Linux 2.2.
-
- Merged with 2.2 - Alan Cox
-*/
-
-static char version[] = "sb1000.c:v1.1.2 6/01/98 (fventuri@mediaone.net)\n";
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/if_cablemodem.h> /* for SIOGCM/SIOSCM stuff */
-#include <linux/in.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h> /* for udelay() */
-#include <linux/etherdevice.h>
-#include <linux/pnp.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <linux/gfp.h>
-
-#include <asm/io.h>
-#include <asm/processor.h>
-#include <linux/uaccess.h>
-
-#ifdef SB1000_DEBUG
-static int sb1000_debug = SB1000_DEBUG;
-#else
-static const int sb1000_debug = 1;
-#endif
-
-static const int SB1000_IO_EXTENT = 8;
-/* SB1000 Maximum Receive Unit */
-static const int SB1000_MRU = 1500; /* octects */
-
-#define NPIDS 4
-struct sb1000_private {
- struct sk_buff *rx_skb[NPIDS];
- short rx_dlen[NPIDS];
- unsigned int rx_frames;
- short rx_error_count;
- short rx_error_dpc_count;
- unsigned char rx_session_id[NPIDS];
- unsigned char rx_frame_id[NPIDS];
- unsigned char rx_pkt_type[NPIDS];
-};
-
-/* prototypes for Linux interface */
-extern int sb1000_probe(struct net_device *dev);
-static int sb1000_open(struct net_device *dev);
-static int sb1000_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
- void __user *data, int cmd);
-static netdev_tx_t sb1000_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t sb1000_interrupt(int irq, void *dev_id);
-static int sb1000_close(struct net_device *dev);
-
-
-/* SB1000 hardware routines to be used during open/configuration phases */
-static int card_wait_for_busy_clear(const int ioaddr[],
- const char* name);
-static int card_wait_for_ready(const int ioaddr[], const char* name,
- unsigned char in[]);
-static int card_send_command(const int ioaddr[], const char* name,
- const unsigned char out[], unsigned char in[]);
-
-/* SB1000 hardware routines to be used during frame rx interrupt */
-static int sb1000_wait_for_ready(const int ioaddr[], const char* name);
-static int sb1000_wait_for_ready_clear(const int ioaddr[],
- const char* name);
-static void sb1000_send_command(const int ioaddr[], const char* name,
- const unsigned char out[]);
-static void sb1000_read_status(const int ioaddr[], unsigned char in[]);
-static void sb1000_issue_read_command(const int ioaddr[],
- const char* name);
-
-/* SB1000 commands for open/configuration */
-static int sb1000_reset(const int ioaddr[], const char* name);
-static int sb1000_check_CRC(const int ioaddr[], const char* name);
-static inline int sb1000_start_get_set_command(const int ioaddr[],
- const char* name);
-static int sb1000_end_get_set_command(const int ioaddr[],
- const char* name);
-static int sb1000_activate(const int ioaddr[], const char* name);
-static int sb1000_get_firmware_version(const int ioaddr[],
- const char* name, unsigned char version[], int do_end);
-static int sb1000_get_frequency(const int ioaddr[], const char* name,
- int* frequency);
-static int sb1000_set_frequency(const int ioaddr[], const char* name,
- int frequency);
-static int sb1000_get_PIDs(const int ioaddr[], const char* name,
- short PID[]);
-static int sb1000_set_PIDs(const int ioaddr[], const char* name,
- const short PID[]);
-
-/* SB1000 commands for frame rx interrupt */
-static int sb1000_rx(struct net_device *dev);
-static void sb1000_error_dpc(struct net_device *dev);
-
-static const struct pnp_device_id sb1000_pnp_ids[] = {
- { "GIC1000", 0 },
- { "", 0 }
-};
-MODULE_DEVICE_TABLE(pnp, sb1000_pnp_ids);
-
-static const struct net_device_ops sb1000_netdev_ops = {
- .ndo_open = sb1000_open,
- .ndo_start_xmit = sb1000_start_xmit,
- .ndo_siocdevprivate = sb1000_siocdevprivate,
- .ndo_stop = sb1000_close,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int
-sb1000_probe_one(struct pnp_dev *pdev, const struct pnp_device_id *id)
-{
- struct net_device *dev;
- unsigned short ioaddr[2], irq;
- unsigned int serial_number;
- int error = -ENODEV;
- u8 addr[ETH_ALEN];
-
- if (pnp_device_attach(pdev) < 0)
- return -ENODEV;
- if (pnp_activate_dev(pdev) < 0)
- goto out_detach;
-
- if (!pnp_port_valid(pdev, 0) || !pnp_port_valid(pdev, 1))
- goto out_disable;
- if (!pnp_irq_valid(pdev, 0))
- goto out_disable;
-
- serial_number = pdev->card->serial;
-
- ioaddr[0] = pnp_port_start(pdev, 0);
- ioaddr[1] = pnp_port_start(pdev, 0);
-
- irq = pnp_irq(pdev, 0);
-
- if (!request_region(ioaddr[0], 16, "sb1000"))
- goto out_disable;
- if (!request_region(ioaddr[1], 16, "sb1000"))
- goto out_release_region0;
-
- dev = alloc_etherdev(sizeof(struct sb1000_private));
- if (!dev) {
- error = -ENOMEM;
- goto out_release_regions;
- }
-
-
- dev->base_addr = ioaddr[0];
- /* mem_start holds the second I/O address */
- dev->mem_start = ioaddr[1];
- dev->irq = irq;
-
- if (sb1000_debug > 0)
- printk(KERN_NOTICE "%s: sb1000 at (%#3.3lx,%#3.3lx), "
- "S/N %#8.8x, IRQ %d.\n", dev->name, dev->base_addr,
- dev->mem_start, serial_number, dev->irq);
-
- /*
- * The SB1000 is an rx-only cable modem device. The uplink is a modem
- * and we do not want to arp on it.
- */
- dev->flags = IFF_POINTOPOINT|IFF_NOARP;
-
- SET_NETDEV_DEV(dev, &pdev->dev);
-
- if (sb1000_debug > 0)
- printk(KERN_NOTICE "%s", version);
-
- dev->netdev_ops = &sb1000_netdev_ops;
-
- /* hardware address is 0:0:serial_number */
- addr[0] = 0;
- addr[1] = 0;
- addr[2] = serial_number >> 24 & 0xff;
- addr[3] = serial_number >> 16 & 0xff;
- addr[4] = serial_number >> 8 & 0xff;
- addr[5] = serial_number >> 0 & 0xff;
- eth_hw_addr_set(dev, addr);
-
- pnp_set_drvdata(pdev, dev);
-
- error = register_netdev(dev);
- if (error)
- goto out_free_netdev;
- return 0;
-
- out_free_netdev:
- free_netdev(dev);
- out_release_regions:
- release_region(ioaddr[1], 16);
- out_release_region0:
- release_region(ioaddr[0], 16);
- out_disable:
- pnp_disable_dev(pdev);
- out_detach:
- pnp_device_detach(pdev);
- return error;
-}
-
-static void
-sb1000_remove_one(struct pnp_dev *pdev)
-{
- struct net_device *dev = pnp_get_drvdata(pdev);
-
- unregister_netdev(dev);
- release_region(dev->base_addr, 16);
- release_region(dev->mem_start, 16);
- free_netdev(dev);
-}
-
-static struct pnp_driver sb1000_driver = {
- .name = "sb1000",
- .id_table = sb1000_pnp_ids,
- .probe = sb1000_probe_one,
- .remove = sb1000_remove_one,
-};
-
-
-/*
- * SB1000 hardware routines to be used during open/configuration phases
- */
-
-static const int TimeOutJiffies = (875 * HZ) / 100;
-
-/* Card Wait For Busy Clear (cannot be used during an interrupt) */
-static int
-card_wait_for_busy_clear(const int ioaddr[], const char* name)
-{
- unsigned char a;
- unsigned long timeout;
-
- a = inb(ioaddr[0] + 7);
- timeout = jiffies + TimeOutJiffies;
- while (a & 0x80 || a & 0x40) {
- /* a little sleep */
- yield();
-
- a = inb(ioaddr[0] + 7);
- if (time_after_eq(jiffies, timeout)) {
- printk(KERN_WARNING "%s: card_wait_for_busy_clear timeout\n",
- name);
- return -ETIME;
- }
- }
-
- return 0;
-}
-
-/* Card Wait For Ready (cannot be used during an interrupt) */
-static int
-card_wait_for_ready(const int ioaddr[], const char* name, unsigned char in[])
-{
- unsigned char a;
- unsigned long timeout;
-
- a = inb(ioaddr[1] + 6);
- timeout = jiffies + TimeOutJiffies;
- while (a & 0x80 || !(a & 0x40)) {
- /* a little sleep */
- yield();
-
- a = inb(ioaddr[1] + 6);
- if (time_after_eq(jiffies, timeout)) {
- printk(KERN_WARNING "%s: card_wait_for_ready timeout\n",
- name);
- return -ETIME;
- }
- }
-
- in[1] = inb(ioaddr[0] + 1);
- in[2] = inb(ioaddr[0] + 2);
- in[3] = inb(ioaddr[0] + 3);
- in[4] = inb(ioaddr[0] + 4);
- in[0] = inb(ioaddr[0] + 5);
- in[6] = inb(ioaddr[0] + 6);
- in[5] = inb(ioaddr[1] + 6);
- return 0;
-}
-
-/* Card Send Command (cannot be used during an interrupt) */
-static int
-card_send_command(const int ioaddr[], const char* name,
- const unsigned char out[], unsigned char in[])
-{
- int status;
-
- if ((status = card_wait_for_busy_clear(ioaddr, name)))
- return status;
- outb(0xa0, ioaddr[0] + 6);
- outb(out[2], ioaddr[0] + 1);
- outb(out[3], ioaddr[0] + 2);
- outb(out[4], ioaddr[0] + 3);
- outb(out[5], ioaddr[0] + 4);
- outb(out[1], ioaddr[0] + 5);
- outb(0xa0, ioaddr[0] + 6);
- outb(out[0], ioaddr[0] + 7);
- if (out[0] != 0x20 && out[0] != 0x30) {
- if ((status = card_wait_for_ready(ioaddr, name, in)))
- return status;
- inb(ioaddr[0] + 7);
- if (sb1000_debug > 3)
- printk(KERN_DEBUG "%s: card_send_command "
- "out: %02x%02x%02x%02x%02x%02x "
- "in: %02x%02x%02x%02x%02x%02x%02x\n", name,
- out[0], out[1], out[2], out[3], out[4], out[5],
- in[0], in[1], in[2], in[3], in[4], in[5], in[6]);
- } else {
- if (sb1000_debug > 3)
- printk(KERN_DEBUG "%s: card_send_command "
- "out: %02x%02x%02x%02x%02x%02x\n", name,
- out[0], out[1], out[2], out[3], out[4], out[5]);
- }
-
- if (out[1] != 0x1b) {
- if (out[0] >= 0x80 && in[0] != (out[1] | 0x80))
- return -EIO;
- }
- return 0;
-}
-
-
-/*
- * SB1000 hardware routines to be used during frame rx interrupt
- */
-static const int Sb1000TimeOutJiffies = 7 * HZ;
-
-/* Card Wait For Ready (to be used during frame rx) */
-static int
-sb1000_wait_for_ready(const int ioaddr[], const char* name)
-{
- unsigned long timeout;
-
- timeout = jiffies + Sb1000TimeOutJiffies;
- while (inb(ioaddr[1] + 6) & 0x80) {
- if (time_after_eq(jiffies, timeout)) {
- printk(KERN_WARNING "%s: sb1000_wait_for_ready timeout\n",
- name);
- return -ETIME;
- }
- }
- timeout = jiffies + Sb1000TimeOutJiffies;
- while (!(inb(ioaddr[1] + 6) & 0x40)) {
- if (time_after_eq(jiffies, timeout)) {
- printk(KERN_WARNING "%s: sb1000_wait_for_ready timeout\n",
- name);
- return -ETIME;
- }
- }
- inb(ioaddr[0] + 7);
- return 0;
-}
-
-/* Card Wait For Ready Clear (to be used during frame rx) */
-static int
-sb1000_wait_for_ready_clear(const int ioaddr[], const char* name)
-{
- unsigned long timeout;
-
- timeout = jiffies + Sb1000TimeOutJiffies;
- while (inb(ioaddr[1] + 6) & 0x80) {
- if (time_after_eq(jiffies, timeout)) {
- printk(KERN_WARNING "%s: sb1000_wait_for_ready_clear timeout\n",
- name);
- return -ETIME;
- }
- }
- timeout = jiffies + Sb1000TimeOutJiffies;
- while (inb(ioaddr[1] + 6) & 0x40) {
- if (time_after_eq(jiffies, timeout)) {
- printk(KERN_WARNING "%s: sb1000_wait_for_ready_clear timeout\n",
- name);
- return -ETIME;
- }
- }
- return 0;
-}
-
-/* Card Send Command (to be used during frame rx) */
-static void
-sb1000_send_command(const int ioaddr[], const char* name,
- const unsigned char out[])
-{
- outb(out[2], ioaddr[0] + 1);
- outb(out[3], ioaddr[0] + 2);
- outb(out[4], ioaddr[0] + 3);
- outb(out[5], ioaddr[0] + 4);
- outb(out[1], ioaddr[0] + 5);
- outb(out[0], ioaddr[0] + 7);
- if (sb1000_debug > 3)
- printk(KERN_DEBUG "%s: sb1000_send_command out: %02x%02x%02x%02x"
- "%02x%02x\n", name, out[0], out[1], out[2], out[3], out[4], out[5]);
-}
-
-/* Card Read Status (to be used during frame rx) */
-static void
-sb1000_read_status(const int ioaddr[], unsigned char in[])
-{
- in[1] = inb(ioaddr[0] + 1);
- in[2] = inb(ioaddr[0] + 2);
- in[3] = inb(ioaddr[0] + 3);
- in[4] = inb(ioaddr[0] + 4);
- in[0] = inb(ioaddr[0] + 5);
-}
-
-/* Issue Read Command (to be used during frame rx) */
-static void
-sb1000_issue_read_command(const int ioaddr[], const char* name)
-{
- static const unsigned char Command0[6] = {0x20, 0x00, 0x00, 0x01, 0x00, 0x00};
-
- sb1000_wait_for_ready_clear(ioaddr, name);
- outb(0xa0, ioaddr[0] + 6);
- sb1000_send_command(ioaddr, name, Command0);
-}
-
-
-/*
- * SB1000 commands for open/configuration
- */
-/* reset SB1000 card */
-static int
-sb1000_reset(const int ioaddr[], const char* name)
-{
- static const unsigned char Command0[6] = {0x80, 0x16, 0x00, 0x00, 0x00, 0x00};
-
- unsigned char st[7];
- int port, status;
-
- port = ioaddr[1] + 6;
- outb(0x4, port);
- inb(port);
- udelay(1000);
- outb(0x0, port);
- inb(port);
- ssleep(1);
- outb(0x4, port);
- inb(port);
- udelay(1000);
- outb(0x0, port);
- inb(port);
- udelay(0);
-
- if ((status = card_send_command(ioaddr, name, Command0, st)))
- return status;
- if (st[3] != 0xf0)
- return -EIO;
- return 0;
-}
-
-/* check SB1000 firmware CRC */
-static int
-sb1000_check_CRC(const int ioaddr[], const char* name)
-{
- static const unsigned char Command0[6] = {0x80, 0x1f, 0x00, 0x00, 0x00, 0x00};
-
- unsigned char st[7];
- int status;
-
- /* check CRC */
- if ((status = card_send_command(ioaddr, name, Command0, st)))
- return status;
- if (st[1] != st[3] || st[2] != st[4])
- return -EIO;
- return 0;
-}
-
-static inline int
-sb1000_start_get_set_command(const int ioaddr[], const char* name)
-{
- static const unsigned char Command0[6] = {0x80, 0x1b, 0x00, 0x00, 0x00, 0x00};
-
- unsigned char st[7];
-
- return card_send_command(ioaddr, name, Command0, st);
-}
-
-static int
-sb1000_end_get_set_command(const int ioaddr[], const char* name)
-{
- static const unsigned char Command0[6] = {0x80, 0x1b, 0x02, 0x00, 0x00, 0x00};
- static const unsigned char Command1[6] = {0x20, 0x00, 0x00, 0x00, 0x00, 0x00};
-
- unsigned char st[7];
- int status;
-
- if ((status = card_send_command(ioaddr, name, Command0, st)))
- return status;
- return card_send_command(ioaddr, name, Command1, st);
-}
-
-static int
-sb1000_activate(const int ioaddr[], const char* name)
-{
- static const unsigned char Command0[6] = {0x80, 0x11, 0x00, 0x00, 0x00, 0x00};
- static const unsigned char Command1[6] = {0x80, 0x16, 0x00, 0x00, 0x00, 0x00};
-
- unsigned char st[7];
- int status;
-
- ssleep(1);
- status = card_send_command(ioaddr, name, Command0, st);
- if (status)
- return status;
- status = card_send_command(ioaddr, name, Command1, st);
- if (status)
- return status;
- if (st[3] != 0xf1) {
- status = sb1000_start_get_set_command(ioaddr, name);
- if (status)
- return status;
- return -EIO;
- }
- udelay(1000);
- return sb1000_start_get_set_command(ioaddr, name);
-}
-
-/* get SB1000 firmware version */
-static int
-sb1000_get_firmware_version(const int ioaddr[], const char* name,
- unsigned char version[], int do_end)
-{
- static const unsigned char Command0[6] = {0x80, 0x23, 0x00, 0x00, 0x00, 0x00};
-
- unsigned char st[7];
- int status;
-
- if ((status = sb1000_start_get_set_command(ioaddr, name)))
- return status;
- if ((status = card_send_command(ioaddr, name, Command0, st)))
- return status;
- if (st[0] != 0xa3)
- return -EIO;
- version[0] = st[1];
- version[1] = st[2];
- if (do_end)
- return sb1000_end_get_set_command(ioaddr, name);
- else
- return 0;
-}
-
-/* get SB1000 frequency */
-static int
-sb1000_get_frequency(const int ioaddr[], const char* name, int* frequency)
-{
- static const unsigned char Command0[6] = {0x80, 0x44, 0x00, 0x00, 0x00, 0x00};
-
- unsigned char st[7];
- int status;
-
- udelay(1000);
- if ((status = sb1000_start_get_set_command(ioaddr, name)))
- return status;
- if ((status = card_send_command(ioaddr, name, Command0, st)))
- return status;
- *frequency = ((st[1] << 8 | st[2]) << 8 | st[3]) << 8 | st[4];
- return sb1000_end_get_set_command(ioaddr, name);
-}
-
-/* set SB1000 frequency */
-static int
-sb1000_set_frequency(const int ioaddr[], const char* name, int frequency)
-{
- unsigned char st[7];
- int status;
- unsigned char Command0[6] = {0x80, 0x29, 0x00, 0x00, 0x00, 0x00};
-
- const int FrequencyLowerLimit = 57000;
- const int FrequencyUpperLimit = 804000;
-
- if (frequency < FrequencyLowerLimit || frequency > FrequencyUpperLimit) {
- printk(KERN_ERR "%s: frequency chosen (%d kHz) is not in the range "
- "[%d,%d] kHz\n", name, frequency, FrequencyLowerLimit,
- FrequencyUpperLimit);
- return -EINVAL;
- }
- udelay(1000);
- if ((status = sb1000_start_get_set_command(ioaddr, name)))
- return status;
- Command0[5] = frequency & 0xff;
- frequency >>= 8;
- Command0[4] = frequency & 0xff;
- frequency >>= 8;
- Command0[3] = frequency & 0xff;
- frequency >>= 8;
- Command0[2] = frequency & 0xff;
- return card_send_command(ioaddr, name, Command0, st);
-}
-
-/* get SB1000 PIDs */
-static int
-sb1000_get_PIDs(const int ioaddr[], const char* name, short PID[])
-{
- static const unsigned char Command0[6] = {0x80, 0x40, 0x00, 0x00, 0x00, 0x00};
- static const unsigned char Command1[6] = {0x80, 0x41, 0x00, 0x00, 0x00, 0x00};
- static const unsigned char Command2[6] = {0x80, 0x42, 0x00, 0x00, 0x00, 0x00};
- static const unsigned char Command3[6] = {0x80, 0x43, 0x00, 0x00, 0x00, 0x00};
-
- unsigned char st[7];
- int status;
-
- udelay(1000);
- if ((status = sb1000_start_get_set_command(ioaddr, name)))
- return status;
-
- if ((status = card_send_command(ioaddr, name, Command0, st)))
- return status;
- PID[0] = st[1] << 8 | st[2];
-
- if ((status = card_send_command(ioaddr, name, Command1, st)))
- return status;
- PID[1] = st[1] << 8 | st[2];
-
- if ((status = card_send_command(ioaddr, name, Command2, st)))
- return status;
- PID[2] = st[1] << 8 | st[2];
-
- if ((status = card_send_command(ioaddr, name, Command3, st)))
- return status;
- PID[3] = st[1] << 8 | st[2];
-
- return sb1000_end_get_set_command(ioaddr, name);
-}
-
-/* set SB1000 PIDs */
-static int
-sb1000_set_PIDs(const int ioaddr[], const char* name, const short PID[])
-{
- static const unsigned char Command4[6] = {0x80, 0x2e, 0x00, 0x00, 0x00, 0x00};
-
- unsigned char st[7];
- short p;
- int status;
- unsigned char Command0[6] = {0x80, 0x31, 0x00, 0x00, 0x00, 0x00};
- unsigned char Command1[6] = {0x80, 0x32, 0x00, 0x00, 0x00, 0x00};
- unsigned char Command2[6] = {0x80, 0x33, 0x00, 0x00, 0x00, 0x00};
- unsigned char Command3[6] = {0x80, 0x34, 0x00, 0x00, 0x00, 0x00};
-
- udelay(1000);
- if ((status = sb1000_start_get_set_command(ioaddr, name)))
- return status;
-
- p = PID[0];
- Command0[3] = p & 0xff;
- p >>= 8;
- Command0[2] = p & 0xff;
- if ((status = card_send_command(ioaddr, name, Command0, st)))
- return status;
-
- p = PID[1];
- Command1[3] = p & 0xff;
- p >>= 8;
- Command1[2] = p & 0xff;
- if ((status = card_send_command(ioaddr, name, Command1, st)))
- return status;
-
- p = PID[2];
- Command2[3] = p & 0xff;
- p >>= 8;
- Command2[2] = p & 0xff;
- if ((status = card_send_command(ioaddr, name, Command2, st)))
- return status;
-
- p = PID[3];
- Command3[3] = p & 0xff;
- p >>= 8;
- Command3[2] = p & 0xff;
- if ((status = card_send_command(ioaddr, name, Command3, st)))
- return status;
-
- if ((status = card_send_command(ioaddr, name, Command4, st)))
- return status;
- return sb1000_end_get_set_command(ioaddr, name);
-}
-
-
-static void
-sb1000_print_status_buffer(const char* name, unsigned char st[],
- unsigned char buffer[], int size)
-{
- int i, j, k;
-
- printk(KERN_DEBUG "%s: status: %02x %02x\n", name, st[0], st[1]);
- if (buffer[24] == 0x08 && buffer[25] == 0x00 && buffer[26] == 0x45) {
- printk(KERN_DEBUG "%s: length: %d protocol: %d from: %d.%d.%d.%d:%d "
- "to %d.%d.%d.%d:%d\n", name, buffer[28] << 8 | buffer[29],
- buffer[35], buffer[38], buffer[39], buffer[40], buffer[41],
- buffer[46] << 8 | buffer[47],
- buffer[42], buffer[43], buffer[44], buffer[45],
- buffer[48] << 8 | buffer[49]);
- } else {
- for (i = 0, k = 0; i < (size + 7) / 8; i++) {
- printk(KERN_DEBUG "%s: %s", name, i ? " " : "buffer:");
- for (j = 0; j < 8 && k < size; j++, k++)
- printk(" %02x", buffer[k]);
- printk("\n");
- }
- }
-}
-
-/*
- * SB1000 commands for frame rx interrupt
- */
-/* receive a single frame and assemble datagram
- * (this is the heart of the interrupt routine)
- */
-static int
-sb1000_rx(struct net_device *dev)
-{
-
-#define FRAMESIZE 184
- unsigned char st[2], buffer[FRAMESIZE], session_id, frame_id;
- short dlen;
- int ioaddr, ns;
- unsigned int skbsize;
- struct sk_buff *skb;
- struct sb1000_private *lp = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
-
- /* SB1000 frame constants */
- const int FrameSize = FRAMESIZE;
- const int NewDatagramHeaderSkip = 8;
- const int NewDatagramHeaderSize = NewDatagramHeaderSkip + 18;
- const int NewDatagramDataSize = FrameSize - NewDatagramHeaderSize;
- const int ContDatagramHeaderSkip = 7;
- const int ContDatagramHeaderSize = ContDatagramHeaderSkip + 1;
- const int ContDatagramDataSize = FrameSize - ContDatagramHeaderSize;
- const int TrailerSize = 4;
-
- ioaddr = dev->base_addr;
-
- insw(ioaddr, (unsigned short*) st, 1);
-#ifdef XXXDEBUG
-printk("cm0: received: %02x %02x\n", st[0], st[1]);
-#endif /* XXXDEBUG */
- lp->rx_frames++;
-
- /* decide if it is a good or bad frame */
- for (ns = 0; ns < NPIDS; ns++) {
- session_id = lp->rx_session_id[ns];
- frame_id = lp->rx_frame_id[ns];
- if (st[0] == session_id) {
- if (st[1] == frame_id || (!frame_id && (st[1] & 0xf0) == 0x30)) {
- goto good_frame;
- } else if ((st[1] & 0xf0) == 0x30 && (st[0] & 0x40)) {
- goto skipped_frame;
- } else {
- goto bad_frame;
- }
- } else if (st[0] == (session_id | 0x40)) {
- if ((st[1] & 0xf0) == 0x30) {
- goto skipped_frame;
- } else {
- goto bad_frame;
- }
- }
- }
- goto bad_frame;
-
-skipped_frame:
- stats->rx_frame_errors++;
- skb = lp->rx_skb[ns];
- if (sb1000_debug > 1)
- printk(KERN_WARNING "%s: missing frame(s): got %02x %02x "
- "expecting %02x %02x\n", dev->name, st[0], st[1],
- skb ? session_id : session_id | 0x40, frame_id);
- if (skb) {
- dev_kfree_skb(skb);
- skb = NULL;
- }
-
-good_frame:
- lp->rx_frame_id[ns] = 0x30 | ((st[1] + 1) & 0x0f);
- /* new datagram */
- if (st[0] & 0x40) {
- /* get data length */
- insw(ioaddr, buffer, NewDatagramHeaderSize / 2);
-#ifdef XXXDEBUG
-printk("cm0: IP identification: %02x%02x fragment offset: %02x%02x\n", buffer[30], buffer[31], buffer[32], buffer[33]);
-#endif /* XXXDEBUG */
- if (buffer[0] != NewDatagramHeaderSkip) {
- if (sb1000_debug > 1)
- printk(KERN_WARNING "%s: new datagram header skip error: "
- "got %02x expecting %02x\n", dev->name, buffer[0],
- NewDatagramHeaderSkip);
- stats->rx_length_errors++;
- insw(ioaddr, buffer, NewDatagramDataSize / 2);
- goto bad_frame_next;
- }
- dlen = ((buffer[NewDatagramHeaderSkip + 3] & 0x0f) << 8 |
- buffer[NewDatagramHeaderSkip + 4]) - 17;
- if (dlen > SB1000_MRU) {
- if (sb1000_debug > 1)
- printk(KERN_WARNING "%s: datagram length (%d) greater "
- "than MRU (%d)\n", dev->name, dlen, SB1000_MRU);
- stats->rx_length_errors++;
- insw(ioaddr, buffer, NewDatagramDataSize / 2);
- goto bad_frame_next;
- }
- lp->rx_dlen[ns] = dlen;
- /* compute size to allocate for datagram */
- skbsize = dlen + FrameSize;
- if ((skb = alloc_skb(skbsize, GFP_ATOMIC)) == NULL) {
- if (sb1000_debug > 1)
- printk(KERN_WARNING "%s: can't allocate %d bytes long "
- "skbuff\n", dev->name, skbsize);
- stats->rx_dropped++;
- insw(ioaddr, buffer, NewDatagramDataSize / 2);
- goto dropped_frame;
- }
- skb->dev = dev;
- skb_reset_mac_header(skb);
- skb->protocol = (unsigned short) buffer[NewDatagramHeaderSkip + 16];
- insw(ioaddr, skb_put(skb, NewDatagramDataSize),
- NewDatagramDataSize / 2);
- lp->rx_skb[ns] = skb;
- } else {
- /* continuation of previous datagram */
- insw(ioaddr, buffer, ContDatagramHeaderSize / 2);
- if (buffer[0] != ContDatagramHeaderSkip) {
- if (sb1000_debug > 1)
- printk(KERN_WARNING "%s: cont datagram header skip error: "
- "got %02x expecting %02x\n", dev->name, buffer[0],
- ContDatagramHeaderSkip);
- stats->rx_length_errors++;
- insw(ioaddr, buffer, ContDatagramDataSize / 2);
- goto bad_frame_next;
- }
- skb = lp->rx_skb[ns];
- insw(ioaddr, skb_put(skb, ContDatagramDataSize),
- ContDatagramDataSize / 2);
- dlen = lp->rx_dlen[ns];
- }
- if (skb->len < dlen + TrailerSize) {
- lp->rx_session_id[ns] &= ~0x40;
- return 0;
- }
-
- /* datagram completed: send to upper level */
- skb_trim(skb, dlen);
- __netif_rx(skb);
- stats->rx_bytes+=dlen;
- stats->rx_packets++;
- lp->rx_skb[ns] = NULL;
- lp->rx_session_id[ns] |= 0x40;
- return 0;
-
-bad_frame:
- insw(ioaddr, buffer, FrameSize / 2);
- if (sb1000_debug > 1)
- printk(KERN_WARNING "%s: frame error: got %02x %02x\n",
- dev->name, st[0], st[1]);
- stats->rx_frame_errors++;
-bad_frame_next:
- if (sb1000_debug > 2)
- sb1000_print_status_buffer(dev->name, st, buffer, FrameSize);
-dropped_frame:
- stats->rx_errors++;
- if (ns < NPIDS) {
- if ((skb = lp->rx_skb[ns])) {
- dev_kfree_skb(skb);
- lp->rx_skb[ns] = NULL;
- }
- lp->rx_session_id[ns] |= 0x40;
- }
- return -1;
-}
-
-static void
-sb1000_error_dpc(struct net_device *dev)
-{
- static const unsigned char Command0[6] = {0x80, 0x26, 0x00, 0x00, 0x00, 0x00};
-
- char *name;
- unsigned char st[5];
- int ioaddr[2];
- struct sb1000_private *lp = netdev_priv(dev);
- const int ErrorDpcCounterInitialize = 200;
-
- ioaddr[0] = dev->base_addr;
- /* mem_start holds the second I/O address */
- ioaddr[1] = dev->mem_start;
- name = dev->name;
-
- sb1000_wait_for_ready_clear(ioaddr, name);
- sb1000_send_command(ioaddr, name, Command0);
- sb1000_wait_for_ready(ioaddr, name);
- sb1000_read_status(ioaddr, st);
- if (st[1] & 0x10)
- lp->rx_error_dpc_count = ErrorDpcCounterInitialize;
-}
-
-
-/*
- * Linux interface functions
- */
-static int
-sb1000_open(struct net_device *dev)
-{
- char *name;
- int ioaddr[2], status;
- struct sb1000_private *lp = netdev_priv(dev);
- const unsigned short FirmwareVersion[] = {0x01, 0x01};
-
- ioaddr[0] = dev->base_addr;
- /* mem_start holds the second I/O address */
- ioaddr[1] = dev->mem_start;
- name = dev->name;
-
- /* initialize sb1000 */
- if ((status = sb1000_reset(ioaddr, name)))
- return status;
- ssleep(1);
- if ((status = sb1000_check_CRC(ioaddr, name)))
- return status;
-
- /* initialize private data before board can catch interrupts */
- lp->rx_skb[0] = NULL;
- lp->rx_skb[1] = NULL;
- lp->rx_skb[2] = NULL;
- lp->rx_skb[3] = NULL;
- lp->rx_dlen[0] = 0;
- lp->rx_dlen[1] = 0;
- lp->rx_dlen[2] = 0;
- lp->rx_dlen[3] = 0;
- lp->rx_frames = 0;
- lp->rx_error_count = 0;
- lp->rx_error_dpc_count = 0;
- lp->rx_session_id[0] = 0x50;
- lp->rx_session_id[1] = 0x48;
- lp->rx_session_id[2] = 0x44;
- lp->rx_session_id[3] = 0x42;
- lp->rx_frame_id[0] = 0;
- lp->rx_frame_id[1] = 0;
- lp->rx_frame_id[2] = 0;
- lp->rx_frame_id[3] = 0;
- if (request_irq(dev->irq, sb1000_interrupt, 0, "sb1000", dev)) {
- return -EAGAIN;
- }
-
- if (sb1000_debug > 2)
- printk(KERN_DEBUG "%s: Opening, IRQ %d\n", name, dev->irq);
-
- /* Activate board and check firmware version */
- udelay(1000);
- if ((status = sb1000_activate(ioaddr, name)))
- return status;
- udelay(0);
- if ((status = sb1000_get_firmware_version(ioaddr, name, version, 0)))
- return status;
- if (version[0] != FirmwareVersion[0] || version[1] != FirmwareVersion[1])
- printk(KERN_WARNING "%s: found firmware version %x.%02x "
- "(should be %x.%02x)\n", name, version[0], version[1],
- FirmwareVersion[0], FirmwareVersion[1]);
-
-
- netif_start_queue(dev);
- return 0; /* Always succeed */
-}
-
-static int sb1000_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
- void __user *data, int cmd)
-{
- char* name;
- unsigned char version[2];
- short PID[4];
- int ioaddr[2], status, frequency;
- unsigned int stats[5];
- struct sb1000_private *lp = netdev_priv(dev);
-
- if (!(dev && dev->flags & IFF_UP))
- return -ENODEV;
-
- ioaddr[0] = dev->base_addr;
- /* mem_start holds the second I/O address */
- ioaddr[1] = dev->mem_start;
- name = dev->name;
-
- switch (cmd) {
- case SIOCGCMSTATS: /* get statistics */
- stats[0] = dev->stats.rx_bytes;
- stats[1] = lp->rx_frames;
- stats[2] = dev->stats.rx_packets;
- stats[3] = dev->stats.rx_errors;
- stats[4] = dev->stats.rx_dropped;
- if (copy_to_user(data, stats, sizeof(stats)))
- return -EFAULT;
- status = 0;
- break;
-
- case SIOCGCMFIRMWARE: /* get firmware version */
- if ((status = sb1000_get_firmware_version(ioaddr, name, version, 1)))
- return status;
- if (copy_to_user(data, version, sizeof(version)))
- return -EFAULT;
- break;
-
- case SIOCGCMFREQUENCY: /* get frequency */
- if ((status = sb1000_get_frequency(ioaddr, name, &frequency)))
- return status;
- if (put_user(frequency, (int __user *)data))
- return -EFAULT;
- break;
-
- case SIOCSCMFREQUENCY: /* set frequency */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (get_user(frequency, (int __user *)data))
- return -EFAULT;
- if ((status = sb1000_set_frequency(ioaddr, name, frequency)))
- return status;
- break;
-
- case SIOCGCMPIDS: /* get PIDs */
- if ((status = sb1000_get_PIDs(ioaddr, name, PID)))
- return status;
- if (copy_to_user(data, PID, sizeof(PID)))
- return -EFAULT;
- break;
-
- case SIOCSCMPIDS: /* set PIDs */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (copy_from_user(PID, data, sizeof(PID)))
- return -EFAULT;
- if ((status = sb1000_set_PIDs(ioaddr, name, PID)))
- return status;
- /* set session_id, frame_id and pkt_type too */
- lp->rx_session_id[0] = 0x50 | (PID[0] & 0x0f);
- lp->rx_session_id[1] = 0x48;
- lp->rx_session_id[2] = 0x44;
- lp->rx_session_id[3] = 0x42;
- lp->rx_frame_id[0] = 0;
- lp->rx_frame_id[1] = 0;
- lp->rx_frame_id[2] = 0;
- lp->rx_frame_id[3] = 0;
- break;
-
- default:
- status = -EINVAL;
- break;
- }
- return status;
-}
-
-/* transmit function: do nothing since SB1000 can't send anything out */
-static netdev_tx_t
-sb1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- printk(KERN_WARNING "%s: trying to transmit!!!\n", dev->name);
- /* sb1000 can't xmit datagrams */
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-/* SB1000 interrupt handler. */
-static irqreturn_t sb1000_interrupt(int irq, void *dev_id)
-{
- static const unsigned char Command0[6] = {0x80, 0x2c, 0x00, 0x00, 0x00, 0x00};
- static const unsigned char Command1[6] = {0x80, 0x2e, 0x00, 0x00, 0x00, 0x00};
-
- char *name;
- unsigned char st;
- int ioaddr[2];
- struct net_device *dev = dev_id;
- struct sb1000_private *lp = netdev_priv(dev);
-
- const int MaxRxErrorCount = 6;
-
- ioaddr[0] = dev->base_addr;
- /* mem_start holds the second I/O address */
- ioaddr[1] = dev->mem_start;
- name = dev->name;
-
- /* is it a good interrupt? */
- st = inb(ioaddr[1] + 6);
- if (!(st & 0x08 && st & 0x20)) {
- return IRQ_NONE;
- }
-
- if (sb1000_debug > 3)
- printk(KERN_DEBUG "%s: entering interrupt\n", dev->name);
-
- st = inb(ioaddr[0] + 7);
- if (sb1000_rx(dev))
- lp->rx_error_count++;
-#ifdef SB1000_DELAY
- udelay(SB1000_DELAY);
-#endif /* SB1000_DELAY */
- sb1000_issue_read_command(ioaddr, name);
- if (st & 0x01) {
- sb1000_error_dpc(dev);
- sb1000_issue_read_command(ioaddr, name);
- }
- if (lp->rx_error_dpc_count && !(--lp->rx_error_dpc_count)) {
- sb1000_wait_for_ready_clear(ioaddr, name);
- sb1000_send_command(ioaddr, name, Command0);
- sb1000_wait_for_ready(ioaddr, name);
- sb1000_issue_read_command(ioaddr, name);
- }
- if (lp->rx_error_count >= MaxRxErrorCount) {
- sb1000_wait_for_ready_clear(ioaddr, name);
- sb1000_send_command(ioaddr, name, Command1);
- sb1000_wait_for_ready(ioaddr, name);
- sb1000_issue_read_command(ioaddr, name);
- lp->rx_error_count = 0;
- }
-
- return IRQ_HANDLED;
-}
-
-static int sb1000_close(struct net_device *dev)
-{
- int i;
- int ioaddr[2];
- struct sb1000_private *lp = netdev_priv(dev);
-
- if (sb1000_debug > 2)
- printk(KERN_DEBUG "%s: Shutting down sb1000.\n", dev->name);
-
- netif_stop_queue(dev);
-
- ioaddr[0] = dev->base_addr;
- /* mem_start holds the second I/O address */
- ioaddr[1] = dev->mem_start;
-
- free_irq(dev->irq, dev);
- /* If we don't do this, we can't re-insmod it later. */
- release_region(ioaddr[1], SB1000_IO_EXTENT);
- release_region(ioaddr[0], SB1000_IO_EXTENT);
-
- /* free rx_skb's if needed */
- for (i=0; i<4; i++) {
- if (lp->rx_skb[i]) {
- dev_kfree_skb(lp->rx_skb[i]);
- }
- }
- return 0;
-}
-
-MODULE_AUTHOR("Franco Venturi <fventuri@mediaone.net>");
-MODULE_DESCRIPTION("General Instruments SB1000 driver");
-MODULE_LICENSE("GPL");
-
-module_pnp_driver(sb1000_driver);
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index fb362ee248ff..c889fb374703 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -899,8 +899,8 @@ static void slip_close(struct tty_struct *tty)
/* VSV = very important to remove timers */
#ifdef CONFIG_SLIP_SMART
- del_timer_sync(&sl->keepalive_timer);
- del_timer_sync(&sl->outfill_timer);
+ timer_delete_sync(&sl->keepalive_timer);
+ timer_delete_sync(&sl->outfill_timer);
#endif
/* Flush network side */
unregister_netdev(sl->dev);
@@ -1137,7 +1137,7 @@ static int slip_ioctl(struct tty_struct *tty, unsigned int cmd,
jiffies + sl->keepalive * HZ);
set_bit(SLF_KEEPTEST, &sl->flags);
} else
- del_timer(&sl->keepalive_timer);
+ timer_delete(&sl->keepalive_timer);
spin_unlock_bh(&sl->lock);
return 0;
@@ -1162,7 +1162,7 @@ static int slip_ioctl(struct tty_struct *tty, unsigned int cmd,
jiffies + sl->outfill * HZ);
set_bit(SLF_OUTWAIT, &sl->flags);
} else
- del_timer(&sl->outfill_timer);
+ timer_delete(&sl->outfill_timer);
spin_unlock_bh(&sl->lock);
return 0;
@@ -1217,7 +1217,7 @@ static int sl_siocdevprivate(struct net_device *dev, struct ifreq *rq,
jiffies + sl->keepalive * HZ);
set_bit(SLF_KEEPTEST, &sl->flags);
} else
- del_timer(&sl->keepalive_timer);
+ timer_delete(&sl->keepalive_timer);
break;
case SIOCGKEEPALIVE:
@@ -1235,7 +1235,7 @@ static int sl_siocdevprivate(struct net_device *dev, struct ifreq *rq,
jiffies + sl->outfill * HZ);
set_bit(SLF_OUTWAIT, &sl->flags);
} else
- del_timer(&sl->outfill_timer);
+ timer_delete(&sl->outfill_timer);
break;
case SIOCGOUTFILL:
@@ -1378,7 +1378,7 @@ module_exit(slip_exit);
static void sl_outfill(struct timer_list *t)
{
- struct slip *sl = from_timer(sl, t, outfill_timer);
+ struct slip *sl = timer_container_of(sl, t, outfill_timer);
spin_lock(&sl->lock);
@@ -1409,7 +1409,7 @@ out:
static void sl_keepalive(struct timer_list *t)
{
- struct slip *sl = from_timer(sl, t, keepalive_timer);
+ struct slip *sl = timer_container_of(sl, t, keepalive_timer);
spin_lock(&sl->lock);
@@ -1421,7 +1421,7 @@ static void sl_keepalive(struct timer_list *t)
/* keepalive still high :(, we must hangup */
if (sl->outfill)
/* outfill timer must be deleted too */
- (void)del_timer(&sl->outfill_timer);
+ (void) timer_delete(&sl->outfill_timer);
printk(KERN_DEBUG "%s: no packets received during keepalive timeout, hangup.\n", sl->dev->name);
/* this must hangup tty & close slip */
tty_hangup(sl->tty);
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index 55aa8d0c8e1f..c10198d44576 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -1165,7 +1165,7 @@ int sungem_phy_probe(struct mii_phy *phy, int mii_id)
int i;
/* We do not reset the mii_phy structure as the driver
- * may re-probe the PHY regulary
+ * may re-probe the PHY regularly
*/
phy->mii_id = mii_id;
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 5aa41d5f7765..1197f245e873 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -26,74 +26,9 @@
#include <linux/virtio_net.h>
#include <linux/skb_array.h>
-#define TAP_IFFEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
-
-#define TAP_VNET_LE 0x80000000
-#define TAP_VNET_BE 0x40000000
-
-#ifdef CONFIG_TUN_VNET_CROSS_LE
-static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
-{
- return q->flags & TAP_VNET_BE ? false :
- virtio_legacy_is_little_endian();
-}
-
-static long tap_get_vnet_be(struct tap_queue *q, int __user *sp)
-{
- int s = !!(q->flags & TAP_VNET_BE);
-
- if (put_user(s, sp))
- return -EFAULT;
-
- return 0;
-}
-
-static long tap_set_vnet_be(struct tap_queue *q, int __user *sp)
-{
- int s;
+#include "tun_vnet.h"
- if (get_user(s, sp))
- return -EFAULT;
-
- if (s)
- q->flags |= TAP_VNET_BE;
- else
- q->flags &= ~TAP_VNET_BE;
-
- return 0;
-}
-#else
-static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
-{
- return virtio_legacy_is_little_endian();
-}
-
-static long tap_get_vnet_be(struct tap_queue *q, int __user *argp)
-{
- return -EINVAL;
-}
-
-static long tap_set_vnet_be(struct tap_queue *q, int __user *argp)
-{
- return -EINVAL;
-}
-#endif /* CONFIG_TUN_VNET_CROSS_LE */
-
-static inline bool tap_is_little_endian(struct tap_queue *q)
-{
- return q->flags & TAP_VNET_LE ||
- tap_legacy_is_little_endian(q);
-}
-
-static inline u16 tap16_to_cpu(struct tap_queue *q, __virtio16 val)
-{
- return __virtio16_to_cpu(tap_is_little_endian(q), val);
-}
-
-static inline __virtio16 cpu_to_tap16(struct tap_queue *q, u16 val)
-{
- return __cpu_to_virtio16(tap_is_little_endian(q), val);
-}
+#define TAP_IFFEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
static struct proto tap_proto = {
.name = "tap",
@@ -645,6 +580,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
int err;
struct virtio_net_hdr vnet_hdr = { 0 };
int vnet_hdr_len = 0;
+ int hdr_len = 0;
int copylen = 0;
int depth;
bool zerocopy = false;
@@ -654,25 +590,13 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
if (q->flags & IFF_VNET_HDR) {
vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
- err = -EINVAL;
- if (len < vnet_hdr_len)
+ hdr_len = tun_vnet_hdr_get(vnet_hdr_len, q->flags, from, &vnet_hdr);
+ if (hdr_len < 0) {
+ err = hdr_len;
goto err;
- len -= vnet_hdr_len;
+ }
- err = -EFAULT;
- if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from))
- goto err;
- iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
- if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
- tap16_to_cpu(q, vnet_hdr.csum_start) +
- tap16_to_cpu(q, vnet_hdr.csum_offset) + 2 >
- tap16_to_cpu(q, vnet_hdr.hdr_len))
- vnet_hdr.hdr_len = cpu_to_tap16(q,
- tap16_to_cpu(q, vnet_hdr.csum_start) +
- tap16_to_cpu(q, vnet_hdr.csum_offset) + 2);
- err = -EINVAL;
- if (tap16_to_cpu(q, vnet_hdr.hdr_len) > len)
- goto err;
+ len -= vnet_hdr_len;
}
err = -EINVAL;
@@ -682,12 +606,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
if (msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
struct iov_iter i;
- copylen = vnet_hdr.hdr_len ?
- tap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
- if (copylen > good_linear)
- copylen = good_linear;
- else if (copylen < ETH_HLEN)
- copylen = ETH_HLEN;
+ copylen = clamp(hdr_len ?: GOODCOPY_LEN, ETH_HLEN, good_linear);
linear = copylen;
i = *from;
iov_iter_advance(&i, copylen);
@@ -697,11 +616,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
if (!zerocopy) {
copylen = len;
- linear = tap16_to_cpu(q, vnet_hdr.hdr_len);
- if (linear > good_linear)
- linear = good_linear;
- else if (linear < ETH_HLEN)
- linear = ETH_HLEN;
+ linear = clamp(hdr_len, ETH_HLEN, good_linear);
}
skb = tap_alloc_skb(&q->sk, TAP_RESERVE, copylen,
@@ -733,8 +648,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
skb->dev = tap->dev;
if (vnet_hdr_len) {
- err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
- tap_is_little_endian(q));
+ err = tun_vnet_hdr_to_skb(q->flags, skb, &vnet_hdr);
if (err) {
rcu_read_unlock();
drop_reason = SKB_DROP_REASON_DEV_HDR;
@@ -797,23 +711,17 @@ static ssize_t tap_put_user(struct tap_queue *q,
int total;
if (q->flags & IFF_VNET_HDR) {
- int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0;
struct virtio_net_hdr vnet_hdr;
vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
- if (iov_iter_count(iter) < vnet_hdr_len)
- return -EINVAL;
-
- if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
- tap_is_little_endian(q), true,
- vlan_hlen))
- BUG();
- if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
- sizeof(vnet_hdr))
- return -EFAULT;
+ ret = tun_vnet_hdr_from_skb(q->flags, NULL, skb, &vnet_hdr);
+ if (ret)
+ return ret;
- iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr));
+ ret = tun_vnet_hdr_put(vnet_hdr_len, iter, &vnet_hdr);
+ if (ret)
+ return ret;
}
total = vnet_hdr_len;
total += skb->len;
@@ -1015,7 +923,7 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
unsigned int __user *up = argp;
unsigned short u;
int __user *sp = argp;
- struct sockaddr sa;
+ struct sockaddr_storage ss;
int s;
int ret;
@@ -1072,42 +980,6 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
q->sk.sk_sndbuf = s;
return 0;
- case TUNGETVNETHDRSZ:
- s = q->vnet_hdr_sz;
- if (put_user(s, sp))
- return -EFAULT;
- return 0;
-
- case TUNSETVNETHDRSZ:
- if (get_user(s, sp))
- return -EFAULT;
- if (s < (int)sizeof(struct virtio_net_hdr))
- return -EINVAL;
-
- q->vnet_hdr_sz = s;
- return 0;
-
- case TUNGETVNETLE:
- s = !!(q->flags & TAP_VNET_LE);
- if (put_user(s, sp))
- return -EFAULT;
- return 0;
-
- case TUNSETVNETLE:
- if (get_user(s, sp))
- return -EFAULT;
- if (s)
- q->flags |= TAP_VNET_LE;
- else
- q->flags &= ~TAP_VNET_LE;
- return 0;
-
- case TUNGETVNETBE:
- return tap_get_vnet_be(q, sp);
-
- case TUNSETVNETBE:
- return tap_set_vnet_be(q, sp);
-
case TUNSETOFFLOAD:
/* let the user check for future flags */
if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
@@ -1128,16 +1000,17 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
return -ENOLINK;
}
ret = 0;
- dev_get_mac_address(&sa, dev_net(tap->dev), tap->dev->name);
+ netif_get_mac_address((struct sockaddr *)&ss, dev_net(tap->dev),
+ tap->dev->name);
if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) ||
- copy_to_user(&ifr->ifr_hwaddr, &sa, sizeof(sa)))
+ copy_to_user(&ifr->ifr_hwaddr, &ss, sizeof(ifr->ifr_hwaddr)))
ret = -EFAULT;
tap_put_tap_dev(tap);
rtnl_unlock();
return ret;
case SIOCSIFHWADDR:
- if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa)))
+ if (copy_from_user(&ss, &ifr->ifr_hwaddr, sizeof(ifr->ifr_hwaddr)))
return -EFAULT;
rtnl_lock();
tap = tap_get_tap_dev(q);
@@ -1145,13 +1018,16 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
rtnl_unlock();
return -ENOLINK;
}
- ret = dev_set_mac_address_user(tap->dev, &sa, NULL);
+ if (tap->dev->addr_len > sizeof(ifr->ifr_hwaddr))
+ ret = -EINVAL;
+ else
+ ret = dev_set_mac_address_user(tap->dev, &ss, NULL);
tap_put_tap_dev(tap);
rtnl_unlock();
return ret;
default:
- return -EINVAL;
+ return tun_vnet_ioctl(&q->vnet_hdr_sz, &q->flags, cmd, sp);
}
}
@@ -1168,9 +1044,8 @@ static const struct file_operations tap_fops = {
static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
{
- struct tun_xdp_hdr *hdr = xdp->data_hard_start;
- struct virtio_net_hdr *gso = &hdr->gso;
- int buflen = hdr->buflen;
+ struct virtio_net_hdr *gso = xdp->data_hard_start;
+ int buflen = xdp->frame_sz;
int vnet_hdr_len = 0;
struct tap_dev *tap;
struct sk_buff *skb;
@@ -1198,7 +1073,7 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
skb->protocol = eth_hdr(skb)->h_proto;
if (vnet_hdr_len) {
- err = virtio_net_hdr_to_skb(skb, gso, tap_is_little_endian(q));
+ err = tun_vnet_hdr_to_skb(q->flags, skb, gso);
if (err)
goto err_kfree;
}
@@ -1329,9 +1204,9 @@ int tap_queue_resize(struct tap_dev *tap)
list_for_each_entry(q, &tap->queue_list, next)
rings[i++] = &q->ring;
- ret = ptr_ring_resize_multiple(rings, n,
- dev->tx_queue_len, GFP_KERNEL,
- __skb_array_destroy_skb);
+ ret = ptr_ring_resize_multiple_bh(rings, n,
+ dev->tx_queue_len, GFP_KERNEL,
+ __skb_array_destroy_skb);
kfree(rings);
return ret;
@@ -1407,3 +1282,4 @@ MODULE_DESCRIPTION("Common library for drivers implementing the TAP interface");
MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
MODULE_AUTHOR("Sainath Grandhi <sainath.grandhi@intel.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("NETDEV_INTERNAL");
diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c
index a1b27b69f010..4d5c9ae8f221 100644
--- a/drivers/net/team/team_core.c
+++ b/drivers/net/team/team_core.c
@@ -23,6 +23,7 @@
#include <linux/rtnetlink.h>
#include <net/rtnetlink.h>
#include <net/genetlink.h>
+#include <net/netdev_lock.h>
#include <net/netlink.h>
#include <net/sch_generic.h>
#include <linux/if_team.h>
@@ -54,7 +55,7 @@ static int __set_port_dev_addr(struct net_device *port_dev,
memcpy(addr.__data, dev_addr, port_dev->addr_len);
addr.ss_family = port_dev->type;
- return dev_set_mac_address(port_dev, (struct sockaddr *)&addr, NULL);
+ return dev_set_mac_address(port_dev, &addr, NULL);
}
static int team_port_set_orig_dev_addr(struct team_port *port)
@@ -932,7 +933,7 @@ static bool team_port_find(const struct team *team,
* Enable/disable port by adding to enabled port hashlist and setting
* port->index (Might be racy so reader could see incorrect ifindex when
* processing a flying packet, but that is not a problem). Write guarded
- * by team->lock.
+ * by RTNL.
*/
static void team_port_enable(struct team *team,
struct team_port *port)
@@ -981,57 +982,6 @@ static void team_port_disable(struct team *team,
team_lower_state_changed(port);
}
-#define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
- NETIF_F_HIGHDMA | NETIF_F_LRO)
-
-#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
-
-static void __team_compute_features(struct team *team)
-{
- struct team_port *port;
- netdev_features_t vlan_features = TEAM_VLAN_FEATURES &
- NETIF_F_ALL_FOR_ALL;
- netdev_features_t enc_features = TEAM_ENC_FEATURES;
- unsigned short max_hard_header_len = ETH_HLEN;
- unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
- IFF_XMIT_DST_RELEASE_PERM;
-
- rcu_read_lock();
- list_for_each_entry_rcu(port, &team->port_list, list) {
- vlan_features = netdev_increment_features(vlan_features,
- port->dev->vlan_features,
- TEAM_VLAN_FEATURES);
- enc_features =
- netdev_increment_features(enc_features,
- port->dev->hw_enc_features,
- TEAM_ENC_FEATURES);
-
-
- dst_release_flag &= port->dev->priv_flags;
- if (port->dev->hard_header_len > max_hard_header_len)
- max_hard_header_len = port->dev->hard_header_len;
- }
- rcu_read_unlock();
-
- team->dev->vlan_features = vlan_features;
- team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX;
- team->dev->hard_header_len = max_hard_header_len;
-
- team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
- if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
- team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
-}
-
-static void team_compute_features(struct team *team)
-{
- __team_compute_features(team);
- netdev_change_features(team->dev);
-}
-
static int team_port_enter(struct team *team, struct team_port *port)
{
int err = 0;
@@ -1169,6 +1119,13 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
return -EBUSY;
}
+ if (netdev_has_upper_dev(port_dev, dev)) {
+ NL_SET_ERR_MSG(extack, "Device is already a lower device of the team interface");
+ netdev_err(dev, "Device %s is already a lower device of the team interface\n",
+ portname);
+ return -EBUSY;
+ }
+
if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
vlan_uses_dev(dev)) {
NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
@@ -1177,10 +1134,6 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
return -EPERM;
}
- err = team_dev_type_check_change(dev, port_dev);
- if (err)
- return err;
-
if (port_dev->flags & IFF_UP) {
NL_SET_ERR_MSG(extack, "Device is up. Set it down before adding it as a team port");
netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
@@ -1198,10 +1151,16 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
INIT_LIST_HEAD(&port->qom_list);
port->orig.mtu = port_dev->mtu;
- err = dev_set_mtu(port_dev, dev->mtu);
- if (err) {
- netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
- goto err_set_mtu;
+ /*
+ * MTU assignment will be handled in team_dev_type_check_change
+ * if dev and port_dev are of different types
+ */
+ if (dev->type == port_dev->type) {
+ err = dev_set_mtu(port_dev, dev->mtu);
+ if (err) {
+ netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
+ goto err_set_mtu;
+ }
}
memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
@@ -1272,10 +1231,14 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
if (err) {
if (dev->flags & IFF_PROMISC)
dev_set_promiscuity(port_dev, -1);
- goto err_set_slave_promisc;
+ goto err_set_slave_allmulti;
}
}
+ err = team_dev_type_check_change(dev, port_dev);
+ if (err)
+ goto err_set_dev_type;
+
if (dev->flags & IFF_UP) {
netif_addr_lock_bh(dev);
dev_uc_sync_multiple(port_dev, dev);
@@ -1286,7 +1249,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
port->index = -1;
list_add_tail_rcu(&port->list, &team->port_list);
team_port_enable(team, port);
- __team_compute_features(team);
+ netdev_compute_master_upper_features(team->dev, true);
__team_port_change_port_added(port, !!netif_oper_up(port_dev));
__team_options_change_check(team);
@@ -1294,6 +1257,8 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
return 0;
+err_set_dev_type:
+err_set_slave_allmulti:
err_set_slave_promisc:
__team_option_inst_del_port(team, port);
@@ -1368,7 +1333,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
dev_set_mtu(port_dev, port->orig.mtu);
kfree_rcu(port, rcu);
netdev_info(dev, "Port device %s removed\n", portname);
- __team_compute_features(team);
+ netdev_compute_master_upper_features(team->dev, true);
return 0;
}
@@ -1646,8 +1611,6 @@ static int team_init(struct net_device *dev)
goto err_options_register;
netif_carrier_off(dev);
- lockdep_register_key(&team->team_lock_key);
- __mutex_init(&team->lock, "team->team_lock_key", &team->team_lock_key);
netdev_lockdep_set_classes(dev);
return 0;
@@ -1668,7 +1631,8 @@ static void team_uninit(struct net_device *dev)
struct team_port *port;
struct team_port *tmp;
- mutex_lock(&team->lock);
+ ASSERT_RTNL();
+
list_for_each_entry_safe(port, tmp, &team->port_list, list)
team_port_del(team, port->dev);
@@ -1677,9 +1641,7 @@ static void team_uninit(struct net_device *dev)
team_mcast_rejoin_fini(team);
team_notify_peers_fini(team);
team_queue_override_fini(team);
- mutex_unlock(&team->lock);
netdev_change_features(dev);
- lockdep_unregister_key(&team->team_lock_key);
}
static void team_destructor(struct net_device *dev)
@@ -1764,8 +1726,9 @@ static void team_change_rx_flags(struct net_device *dev, int change)
struct team_port *port;
int inc;
- rcu_read_lock();
- list_for_each_entry_rcu(port, &team->port_list, list) {
+ ASSERT_RTNL();
+
+ list_for_each_entry(port, &team->port_list, list) {
if (change & IFF_PROMISC) {
inc = dev->flags & IFF_PROMISC ? 1 : -1;
dev_set_promiscuity(port->dev, inc);
@@ -1775,7 +1738,6 @@ static void team_change_rx_flags(struct net_device *dev, int change)
dev_set_allmulti(port->dev, inc);
}
}
- rcu_read_unlock();
}
static void team_set_rx_mode(struct net_device *dev)
@@ -1797,14 +1759,14 @@ static int team_set_mac_address(struct net_device *dev, void *p)
struct team *team = netdev_priv(dev);
struct team_port *port;
+ ASSERT_RTNL();
+
if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
dev_addr_set(dev, addr->sa_data);
- mutex_lock(&team->lock);
list_for_each_entry(port, &team->port_list, list)
if (team->ops.port_change_dev_addr)
team->ops.port_change_dev_addr(team, port);
- mutex_unlock(&team->lock);
return 0;
}
@@ -1814,11 +1776,8 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
struct team_port *port;
int err;
- /*
- * Alhough this is reader, it's guarded by team lock. It's not possible
- * to traverse list in reverse under rcu_read_lock
- */
- mutex_lock(&team->lock);
+ ASSERT_RTNL();
+
team->port_mtu_change_allowed = true;
list_for_each_entry(port, &team->port_list, list) {
err = dev_set_mtu(port->dev, new_mtu);
@@ -1829,7 +1788,6 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
}
}
team->port_mtu_change_allowed = false;
- mutex_unlock(&team->lock);
WRITE_ONCE(dev->mtu, new_mtu);
@@ -1839,7 +1797,6 @@ unwind:
list_for_each_entry_continue_reverse(port, &team->port_list, list)
dev_set_mtu(port->dev, dev->mtu);
team->port_mtu_change_allowed = false;
- mutex_unlock(&team->lock);
return err;
}
@@ -1889,24 +1846,19 @@ static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
struct team_port *port;
int err;
- /*
- * Alhough this is reader, it's guarded by team lock. It's not possible
- * to traverse list in reverse under rcu_read_lock
- */
- mutex_lock(&team->lock);
+ ASSERT_RTNL();
+
list_for_each_entry(port, &team->port_list, list) {
err = vlan_vid_add(port->dev, proto, vid);
if (err)
goto unwind;
}
- mutex_unlock(&team->lock);
return 0;
unwind:
list_for_each_entry_continue_reverse(port, &team->port_list, list)
vlan_vid_del(port->dev, proto, vid);
- mutex_unlock(&team->lock);
return err;
}
@@ -1916,10 +1868,10 @@ static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
struct team *team = netdev_priv(dev);
struct team_port *port;
- mutex_lock(&team->lock);
+ ASSERT_RTNL();
+
list_for_each_entry(port, &team->port_list, list)
vlan_vid_del(port->dev, proto, vid);
- mutex_unlock(&team->lock);
return 0;
}
@@ -1941,9 +1893,9 @@ static void team_netpoll_cleanup(struct net_device *dev)
{
struct team *team = netdev_priv(dev);
- mutex_lock(&team->lock);
+ ASSERT_RTNL();
+
__team_netpoll_cleanup(team);
- mutex_unlock(&team->lock);
}
static int team_netpoll_setup(struct net_device *dev)
@@ -1952,7 +1904,8 @@ static int team_netpoll_setup(struct net_device *dev)
struct team_port *port;
int err = 0;
- mutex_lock(&team->lock);
+ ASSERT_RTNL();
+
list_for_each_entry(port, &team->port_list, list) {
err = __team_port_enable_netpoll(port);
if (err) {
@@ -1960,7 +1913,6 @@ static int team_netpoll_setup(struct net_device *dev)
break;
}
}
- mutex_unlock(&team->lock);
return err;
}
#endif
@@ -1969,38 +1921,19 @@ static int team_add_slave(struct net_device *dev, struct net_device *port_dev,
struct netlink_ext_ack *extack)
{
struct team *team = netdev_priv(dev);
- int err;
-
- mutex_lock(&team->lock);
- err = team_port_add(team, port_dev, extack);
- mutex_unlock(&team->lock);
- if (!err)
- netdev_change_features(dev);
+ ASSERT_RTNL();
- return err;
+ return team_port_add(team, port_dev, extack);
}
static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
{
struct team *team = netdev_priv(dev);
- int err;
- mutex_lock(&team->lock);
- err = team_port_del(team, port_dev);
- mutex_unlock(&team->lock);
+ ASSERT_RTNL();
- if (err)
- return err;
-
- if (netif_is_team_master(port_dev)) {
- lockdep_unregister_key(&team->team_lock_key);
- lockdep_register_key(&team->team_lock_key);
- lockdep_set_class(&team->lock, &team->team_lock_key);
- }
- netdev_change_features(dev);
-
- return err;
+ return team_port_del(team, port_dev);
}
static netdev_features_t team_fix_features(struct net_device *dev,
@@ -2011,8 +1944,7 @@ static netdev_features_t team_fix_features(struct net_device *dev,
netdev_features_t mask;
mask = features;
- features &= ~NETIF_F_ONE_FOR_ALL;
- features |= NETIF_F_ALL_FOR_ALL;
+ features = netdev_base_features(features);
rcu_read_lock();
list_for_each_entry_rcu(port, &team->port_list, list) {
@@ -2191,11 +2123,11 @@ static void team_setup(struct net_device *dev)
dev->lltx = true;
/* Don't allow team devices to change network namespaces. */
- dev->netns_local = true;
+ dev->netns_immutable = true;
dev->features |= NETIF_F_GRO;
- dev->hw_features = TEAM_VLAN_FEATURES |
+ dev->hw_features = MASTER_UPPER_DEV_VLAN_FEATURES |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_STAG_RX |
@@ -2206,10 +2138,12 @@ static void team_setup(struct net_device *dev)
dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
}
-static int team_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int team_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct nlattr **tb = params->tb;
+
if (tb[IFLA_ADDRESS] == NULL)
eth_hw_addr_random(dev);
@@ -2289,9 +2223,10 @@ err_msg_put:
static struct team *team_nl_team_get(struct genl_info *info)
{
struct net *net = genl_info_net(info);
- int ifindex;
struct net_device *dev;
- struct team *team;
+ int ifindex;
+
+ ASSERT_RTNL();
if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
return NULL;
@@ -2303,14 +2238,11 @@ static struct team *team_nl_team_get(struct genl_info *info)
return NULL;
}
- team = netdev_priv(dev);
- mutex_lock(&team->lock);
- return team;
+ return netdev_priv(dev);
}
static void team_nl_team_put(struct team *team)
{
- mutex_unlock(&team->lock);
dev_put(team->dev);
}
@@ -2500,9 +2432,13 @@ int team_nl_options_get_doit(struct sk_buff *skb, struct genl_info *info)
int err;
LIST_HEAD(sel_opt_inst_list);
+ rtnl_lock();
+
team = team_nl_team_get(info);
- if (!team)
- return -EINVAL;
+ if (!team) {
+ err = -EINVAL;
+ goto rtnl_unlock;
+ }
list_for_each_entry(opt_inst, &team->option_inst_list, list)
list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
@@ -2512,6 +2448,9 @@ int team_nl_options_get_doit(struct sk_buff *skb, struct genl_info *info)
team_nl_team_put(team);
+rtnl_unlock:
+ rtnl_unlock();
+
return err;
}
@@ -2627,7 +2566,9 @@ int team_nl_options_set_doit(struct sk_buff *skb, struct genl_info *info)
ctx.data.u32_val = nla_get_u32(attr_data);
break;
case TEAM_OPTION_TYPE_STRING:
- if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
+ if (nla_len(attr_data) > TEAM_STRING_MAX_LEN ||
+ !memchr(nla_data(attr_data), '\0',
+ nla_len(attr_data))) {
err = -EINVAL;
goto team_put;
}
@@ -2788,15 +2729,22 @@ int team_nl_port_list_get_doit(struct sk_buff *skb,
struct team *team;
int err;
+ rtnl_lock();
+
team = team_nl_team_get(info);
- if (!team)
- return -EINVAL;
+ if (!team) {
+ err = -EINVAL;
+ goto rtnl_unlock;
+ }
err = team_nl_send_port_list_get(team, info->snd_portid, info->snd_seq,
NLM_F_ACK, team_nl_send_unicast, NULL);
team_nl_team_put(team);
+rtnl_unlock:
+ rtnl_unlock();
+
return err;
}
@@ -2944,11 +2892,9 @@ static void __team_port_change_port_removed(struct team_port *port)
static void team_port_change_check(struct team_port *port, bool linkup)
{
- struct team *team = port->team;
+ ASSERT_RTNL();
- mutex_lock(&team->lock);
__team_port_change_check(port, linkup);
- mutex_unlock(&team->lock);
}
@@ -2985,7 +2931,7 @@ static int team_device_event(struct notifier_block *unused,
case NETDEV_FEAT_CHANGE:
if (!port->team->notifier_ctx) {
port->team->notifier_ctx = true;
- team_compute_features(port->team);
+ netdev_compute_master_upper_features(port->team->dev, true);
port->team->notifier_ctx = false;
}
break;
diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c
index e0f599e2a51d..1c3336c7a1b2 100644
--- a/drivers/net/team/team_mode_activebackup.c
+++ b/drivers/net/team/team_mode_activebackup.c
@@ -67,8 +67,7 @@ static void ab_active_port_get(struct team *team, struct team_gsetter_ctx *ctx)
{
struct team_port *active_port;
- active_port = rcu_dereference_protected(ab_priv(team)->active_port,
- lockdep_is_held(&team->lock));
+ active_port = rtnl_dereference(ab_priv(team)->active_port);
if (active_port)
ctx->data.u32_val = active_port->dev->ifindex;
else
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index 00f8989c29c0..b14538bde2f8 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -301,8 +301,7 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
if (lb_priv->ex->orig_fprog) {
/* Clear old filter data */
__fprog_destroy(lb_priv->ex->orig_fprog);
- orig_fp = rcu_dereference_protected(lb_priv->fp,
- lockdep_is_held(&team->lock));
+ orig_fp = rtnl_dereference(lb_priv->fp);
}
rcu_assign_pointer(lb_priv->fp, fp);
@@ -324,8 +323,7 @@ static void lb_bpf_func_free(struct team *team)
return;
__fprog_destroy(lb_priv->ex->orig_fprog);
- fp = rcu_dereference_protected(lb_priv->fp,
- lockdep_is_held(&team->lock));
+ fp = rtnl_dereference(lb_priv->fp);
bpf_prog_destroy(fp);
}
@@ -335,8 +333,7 @@ static void lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx)
lb_select_tx_port_func_t *func;
char *name;
- func = rcu_dereference_protected(lb_priv->select_tx_port_func,
- lockdep_is_held(&team->lock));
+ func = rtnl_dereference(lb_priv->select_tx_port_func);
name = lb_select_tx_port_get_name(func);
BUG_ON(!name);
ctx->data.str_val = name;
@@ -478,7 +475,7 @@ static void lb_stats_refresh(struct work_struct *work)
team = lb_priv_ex->team;
lb_priv = get_lb_priv(team);
- if (!mutex_trylock(&team->lock)) {
+ if (!rtnl_trylock()) {
schedule_delayed_work(&lb_priv_ex->stats.refresh_dw, 0);
return;
}
@@ -515,7 +512,7 @@ static void lb_stats_refresh(struct work_struct *work)
schedule_delayed_work(&lb_priv_ex->stats.refresh_dw,
(lb_priv_ex->stats.refresh_interval * HZ) / 10);
- mutex_unlock(&team->lock);
+ rtnl_unlock();
}
static void lb_stats_refresh_interval_get(struct team *team,
diff --git a/drivers/net/team/team_nl.c b/drivers/net/team/team_nl.c
index 208424ab78f5..6db21725f9cc 100644
--- a/drivers/net/team/team_nl.c
+++ b/drivers/net/team/team_nl.c
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/team.yaml */
/* YNL-GEN kernel source */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#include <net/netlink.h>
#include <net/genetlink.h>
diff --git a/drivers/net/team/team_nl.h b/drivers/net/team/team_nl.h
index c9ec1b22ac4d..74816b193475 100644
--- a/drivers/net/team/team_nl.h
+++ b/drivers/net/team/team_nl.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/team.yaml */
/* YNL-GEN kernel header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _LINUX_TEAM_GEN_H
#define _LINUX_TEAM_GEN_H
diff --git a/drivers/net/thunderbolt/main.c b/drivers/net/thunderbolt/main.c
index 0a53ec293d04..dcaa62377808 100644
--- a/drivers/net/thunderbolt/main.c
+++ b/drivers/net/thunderbolt/main.c
@@ -396,9 +396,9 @@ static void tbnet_tear_down(struct tbnet *net, bool send_logout)
ret = tb_xdomain_disable_paths(net->xd,
net->local_transmit_path,
- net->rx_ring.ring->hop,
+ net->tx_ring.ring->hop,
net->remote_transmit_path,
- net->tx_ring.ring->hop);
+ net->rx_ring.ring->hop);
if (ret)
netdev_warn(net->dev, "failed to disable DMA paths\n");
@@ -662,9 +662,9 @@ static void tbnet_connected_work(struct work_struct *work)
goto err_free_rx_buffers;
ret = tb_xdomain_enable_paths(net->xd, net->local_transmit_path,
- net->rx_ring.ring->hop,
+ net->tx_ring.ring->hop,
net->remote_transmit_path,
- net->tx_ring.ring->hop);
+ net->rx_ring.ring->hop);
if (ret) {
netdev_err(net->dev, "failed to enable DMA paths\n");
goto err_free_tx_buffers;
@@ -924,8 +924,12 @@ static int tbnet_open(struct net_device *dev)
netif_carrier_off(dev);
- ring = tb_ring_alloc_tx(xd->tb->nhi, -1, TBNET_RING_SIZE,
- RING_FLAG_FRAME);
+ flags = RING_FLAG_FRAME;
+ /* Only enable full E2E if the other end supports it too */
+ if (tbnet_e2e && net->svc->prtcstns & TBNET_E2E)
+ flags |= RING_FLAG_E2E;
+
+ ring = tb_ring_alloc_tx(xd->tb->nhi, -1, TBNET_RING_SIZE, flags);
if (!ring) {
netdev_err(dev, "failed to allocate Tx ring\n");
return -ENOMEM;
@@ -944,11 +948,6 @@ static int tbnet_open(struct net_device *dev)
sof_mask = BIT(TBIP_PDF_FRAME_START);
eof_mask = BIT(TBIP_PDF_FRAME_END);
- flags = RING_FLAG_FRAME;
- /* Only enable full E2E if the other end supports it too */
- if (tbnet_e2e && net->svc->prtcstns & TBNET_E2E)
- flags |= RING_FLAG_E2E;
-
ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE, flags,
net->tx_ring.ring->hop, sof_mask,
eof_mask, tbnet_start_poll, net);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index d7a865ef370b..8192740357a0 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -83,6 +83,8 @@
#include <linux/uaccess.h>
#include <linux/proc_fs.h>
+#include "tun_vnet.h"
+
static void tun_default_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd);
@@ -94,9 +96,6 @@ static void tun_default_link_ksettings(struct net_device *dev,
* overload it to mean fasync when stored there.
*/
#define TUN_FASYNC IFF_ATTACH_QUEUE
-/* High bits in flags field are unused. */
-#define TUN_VNET_LE 0x80000000
-#define TUN_VNET_BE 0x40000000
#define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
@@ -187,7 +186,8 @@ struct tun_struct {
struct net_device *dev;
netdev_features_t set_features;
#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
- NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4)
+ NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4 | \
+ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM)
int align;
int vnet_hdr_sz;
@@ -298,70 +298,6 @@ static bool tun_napi_frags_enabled(const struct tun_file *tfile)
return tfile->napi_frags_enabled;
}
-#ifdef CONFIG_TUN_VNET_CROSS_LE
-static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
-{
- return tun->flags & TUN_VNET_BE ? false :
- virtio_legacy_is_little_endian();
-}
-
-static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
-{
- int be = !!(tun->flags & TUN_VNET_BE);
-
- if (put_user(be, argp))
- return -EFAULT;
-
- return 0;
-}
-
-static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
-{
- int be;
-
- if (get_user(be, argp))
- return -EFAULT;
-
- if (be)
- tun->flags |= TUN_VNET_BE;
- else
- tun->flags &= ~TUN_VNET_BE;
-
- return 0;
-}
-#else
-static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
-{
- return virtio_legacy_is_little_endian();
-}
-
-static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
-{
- return -EINVAL;
-}
-
-static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
-{
- return -EINVAL;
-}
-#endif /* CONFIG_TUN_VNET_CROSS_LE */
-
-static inline bool tun_is_little_endian(struct tun_struct *tun)
-{
- return tun->flags & TUN_VNET_LE ||
- tun_legacy_is_little_endian(tun);
-}
-
-static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
-{
- return __virtio16_to_cpu(tun_is_little_endian(tun), val);
-}
-
-static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
-{
- return __cpu_to_virtio16(tun_is_little_endian(tun), val);
-}
-
static inline u32 tun_hashfn(u32 rxhash)
{
return rxhash & TUN_MASK_FLOW_ENTRIES;
@@ -442,7 +378,7 @@ static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
static void tun_flow_cleanup(struct timer_list *t)
{
- struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
+ struct tun_struct *tun = timer_container_of(tun, t, flow_gc_timer);
unsigned long delay = tun->ageing_time;
unsigned long next_timer = jiffies + delay;
unsigned long count = 0;
@@ -580,7 +516,7 @@ static inline bool tun_not_capable(struct tun_struct *tun)
struct net *net = dev_net(tun->dev);
return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
- (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
+ (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
!ns_capable(net->user_ns, CAP_NET_ADMIN);
}
@@ -990,6 +926,7 @@ static int tun_net_init(struct net_device *dev)
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
+ dev->hw_enc_features = dev->hw_features;
dev->features = dev->hw_features;
dev->vlan_features = dev->features &
~(NETIF_F_HW_VLAN_CTAG_TX |
@@ -1065,8 +1002,8 @@ static unsigned int run_ebpf_filter(struct tun_struct *tun,
/* Net device start xmit */
static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
struct tun_struct *tun = netdev_priv(dev);
- enum skb_drop_reason drop_reason;
int txq = skb->queue_mapping;
struct netdev_queue *queue;
struct tun_file *tfile;
@@ -1095,10 +1032,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
}
if (tfile->socket.sk->sk_filter &&
- sk_filter(tfile->socket.sk, skb)) {
- drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
+ sk_filter_reason(tfile->socket.sk, skb, &drop_reason))
goto drop;
- }
len = run_ebpf_filter(tun, skb, len);
if (len == 0) {
@@ -1360,7 +1295,7 @@ static void tun_flow_init(struct tun_struct *tun)
static void tun_flow_uninit(struct tun_struct *tun)
{
- del_timer_sync(&tun->flow_gc_timer);
+ timer_delete_sync(&tun->flow_gc_timer);
tun_flow_flush(tun);
}
@@ -1481,7 +1416,7 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
skb->truesize += skb->data_len;
for (i = 1; i < it->nr_segs; i++) {
- const struct iovec *iov = iter_iov(it);
+ const struct iovec *iov = iter_iov(it) + i;
size_t fragsz = iov->iov_len;
struct page *page;
void *frag;
@@ -1600,7 +1535,8 @@ static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
static struct sk_buff *__tun_build_skb(struct tun_file *tfile,
struct page_frag *alloc_frag, char *buf,
- int buflen, int len, int pad)
+ int buflen, int len, int pad,
+ int metasize)
{
struct sk_buff *skb = build_skb(buf, buflen);
@@ -1609,6 +1545,8 @@ static struct sk_buff *__tun_build_skb(struct tun_file *tfile,
skb_reserve(skb, pad);
skb_put(skb, len);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb_set_owner_w(skb, tfile->socket.sk);
get_page(alloc_frag->page);
@@ -1668,6 +1606,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
char *buf;
size_t copied;
int pad = TUN_RX_PAD;
+ int metasize = 0;
int err = 0;
rcu_read_lock();
@@ -1695,7 +1634,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
if (hdr->gso_type || !xdp_prog) {
*skb_xdp = 1;
return __tun_build_skb(tfile, alloc_frag, buf, buflen, len,
- pad);
+ pad, metasize);
}
*skb_xdp = 0;
@@ -1709,7 +1648,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
u32 act;
xdp_init_buff(&xdp, buflen, &tfile->xdp_rxq);
- xdp_prepare_buff(&xdp, buf, pad, len, false);
+ xdp_prepare_buff(&xdp, buf, pad, len, true);
act = bpf_prog_run_xdp(xdp_prog, &xdp);
if (act == XDP_REDIRECT || act == XDP_TX) {
@@ -1730,12 +1669,18 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
pad = xdp.data - xdp.data_hard_start;
len = xdp.data_end - xdp.data;
+
+ /* It is known that the xdp_buff was prepared with metadata
+ * support, so the metasize will never be negative.
+ */
+ metasize = xdp.data - xdp.data_meta;
}
bpf_net_ctx_clear(bpf_net_ctx);
rcu_read_unlock();
local_bh_enable();
- return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
+ return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad,
+ metasize);
out:
bpf_net_ctx_clear(bpf_net_ctx);
@@ -1753,15 +1698,26 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
struct sk_buff *skb;
size_t total_len = iov_iter_count(from);
size_t len = total_len, align = tun->align, linear;
- struct virtio_net_hdr gso = { 0 };
+ struct virtio_net_hdr_v1_hash_tunnel hdr;
+ struct virtio_net_hdr *gso;
int good_linear;
int copylen;
+ int hdr_len = 0;
bool zerocopy = false;
int err;
u32 rxhash = 0;
int skb_xdp = 1;
bool frags = tun_napi_frags_enabled(tfile);
enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
+ netdev_features_t features = 0;
+
+ /*
+ * Keep it easy and always zero the whole buffer, even if the
+ * tunnel-related field will be touched only when the feature
+ * is enabled and the hdr size id compatible.
+ */
+ memset(&hdr, 0, sizeof(hdr));
+ gso = (struct virtio_net_hdr *)&hdr;
if (!(tun->flags & IFF_NO_PI)) {
if (len < sizeof(pi))
@@ -1775,26 +1731,18 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (tun->flags & IFF_VNET_HDR) {
int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
- if (len < vnet_hdr_sz)
- return -EINVAL;
- len -= vnet_hdr_sz;
+ features = tun_vnet_hdr_guest_features(vnet_hdr_sz);
+ hdr_len = __tun_vnet_hdr_get(vnet_hdr_sz, tun->flags,
+ features, from, gso);
+ if (hdr_len < 0)
+ return hdr_len;
- if (!copy_from_iter_full(&gso, sizeof(gso), from))
- return -EFAULT;
-
- if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
- tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
- gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
-
- if (tun16_to_cpu(tun, gso.hdr_len) > len)
- return -EINVAL;
- iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
+ len -= vnet_hdr_sz;
}
if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
align += NET_IP_ALIGN;
- if (unlikely(len < ETH_HLEN ||
- (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
+ if (unlikely(len < ETH_HLEN || (hdr_len && hdr_len < ETH_HLEN)))
return -EINVAL;
}
@@ -1807,9 +1755,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
* enough room for skb expand head in case it is used.
* The rest of the buffer is mapped from userspace.
*/
- copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
- if (copylen > good_linear)
- copylen = good_linear;
+ copylen = min(hdr_len ? hdr_len : GOODCOPY_LEN, good_linear);
linear = copylen;
iov_iter_advance(&i, copylen);
if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
@@ -1821,7 +1767,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
* (e.g gso or jumbo packet), we will do it at after
* skb was created with generic XDP routine.
*/
- skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
+ skb = tun_build_skb(tun, tfile, from, gso, len, &skb_xdp);
err = PTR_ERR_OR_ZERO(skb);
if (err)
goto drop;
@@ -1830,10 +1776,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
} else {
if (!zerocopy) {
copylen = len;
- if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
- linear = good_linear;
- else
- linear = tun16_to_cpu(tun, gso.hdr_len);
+ linear = min(hdr_len, good_linear);
}
if (frags) {
@@ -1868,7 +1811,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
}
}
- if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
+ if (tun_vnet_hdr_tnl_to_skb(tun->flags, features, skb, &hdr)) {
atomic_long_inc(&tun->rx_frame_errors);
err = -EINVAL;
goto free_skb;
@@ -1932,6 +1875,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
local_bh_enable();
goto unlock_frags;
}
+
+ if (frags && skb != tfile->napi.skb)
+ tfile->napi.skb = skb;
}
rcu_read_unlock();
local_bh_enable();
@@ -2063,18 +2009,15 @@ static ssize_t tun_put_user_xdp(struct tun_struct *tun,
{
int vnet_hdr_sz = 0;
size_t size = xdp_frame->len;
- size_t ret;
+ ssize_t ret;
if (tun->flags & IFF_VNET_HDR) {
struct virtio_net_hdr gso = { 0 };
vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
- if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
- return -EINVAL;
- if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
- sizeof(gso)))
- return -EFAULT;
- iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
+ ret = tun_vnet_hdr_put(vnet_hdr_sz, iter, &gso);
+ if (ret)
+ return ret;
}
ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
@@ -2097,6 +2040,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
int vlan_offset = 0;
int vlan_hlen = 0;
int vnet_hdr_sz = 0;
+ int ret;
if (skb_vlan_tag_present(skb))
vlan_hlen = VLAN_HLEN;
@@ -2121,33 +2065,23 @@ static ssize_t tun_put_user(struct tun_struct *tun,
}
if (vnet_hdr_sz) {
- struct virtio_net_hdr gso;
-
- if (iov_iter_count(iter) < vnet_hdr_sz)
- return -EINVAL;
+ struct virtio_net_hdr_v1_hash_tunnel hdr;
+ struct virtio_net_hdr *gso;
- if (virtio_net_hdr_from_skb(skb, &gso,
- tun_is_little_endian(tun), true,
- vlan_hlen)) {
- struct skb_shared_info *sinfo = skb_shinfo(skb);
-
- if (net_ratelimit()) {
- netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
- sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
- tun16_to_cpu(tun, gso.hdr_len));
- print_hex_dump(KERN_ERR, "tun: ",
- DUMP_PREFIX_NONE,
- 16, 1, skb->head,
- min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
- }
- WARN_ON_ONCE(1);
- return -EINVAL;
- }
-
- if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
- return -EFAULT;
+ ret = tun_vnet_hdr_tnl_from_skb(tun->flags, tun->dev, skb,
+ &hdr);
+ if (ret)
+ return ret;
- iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
+ /*
+ * Drop the packet if the configured header size is too small
+ * WRT the enabled offloads.
+ */
+ gso = (struct virtio_net_hdr *)&hdr;
+ ret = __tun_vnet_hdr_put(vnet_hdr_sz, tun->dev->features,
+ iter, gso);
+ if (ret)
+ return ret;
}
if (vlan_hlen) {
@@ -2445,13 +2379,15 @@ static int tun_xdp_one(struct tun_struct *tun,
struct tun_page *tpage)
{
unsigned int datasize = xdp->data_end - xdp->data;
- struct tun_xdp_hdr *hdr = xdp->data_hard_start;
- struct virtio_net_hdr *gso = &hdr->gso;
+ struct virtio_net_hdr *gso = xdp->data_hard_start;
+ struct virtio_net_hdr_v1_hash_tunnel *tnl_hdr;
struct bpf_prog *xdp_prog;
struct sk_buff *skb = NULL;
struct sk_buff_head *queue;
+ netdev_features_t features;
u32 rxhash = 0, act;
- int buflen = hdr->buflen;
+ int buflen = xdp->frame_sz;
+ int metasize = 0;
int ret = 0;
bool skb_xdp = false;
struct page *page;
@@ -2467,7 +2403,6 @@ static int tun_xdp_one(struct tun_struct *tun,
}
xdp_init_buff(xdp, buflen, &tfile->xdp_rxq);
- xdp_set_data_meta_invalid(xdp);
act = bpf_prog_run_xdp(xdp_prog, xdp);
ret = tun_xdp_act(tun, xdp_prog, xdp, act);
@@ -2507,7 +2442,17 @@ build:
skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb_put(skb, xdp->data_end - xdp->data);
- if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
+ /* The externally provided xdp_buff may have no metadata support, which
+ * is marked by xdp->data_meta being xdp->data + 1. This will lead to a
+ * metasize of -1 and is the reason why the condition checks for > 0.
+ */
+ metasize = xdp->data - xdp->data_meta;
+ if (metasize > 0)
+ skb_metadata_set(skb, metasize);
+
+ features = tun_vnet_hdr_guest_features(READ_ONCE(tun->vnet_hdr_sz));
+ tnl_hdr = (struct virtio_net_hdr_v1_hash_tunnel *)gso;
+ if (tun_vnet_hdr_tnl_to_skb(tun->flags, features, skb, tnl_hdr)) {
atomic_long_inc(&tun->rx_frame_errors);
kfree_skb(skb);
ret = -EINVAL;
@@ -2881,18 +2826,20 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (netif_running(tun->dev))
netif_tx_wake_all_queues(tun->dev);
- strcpy(ifr->ifr_name, tun->dev->name);
+ strscpy(ifr->ifr_name, tun->dev->name);
return 0;
}
static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
{
- strcpy(ifr->ifr_name, tun->dev->name);
+ strscpy(ifr->ifr_name, tun->dev->name);
ifr->ifr_flags = tun_flags(tun);
}
+#define PLAIN_GSO (NETIF_F_GSO_UDP_L4 | NETIF_F_TSO | NETIF_F_TSO6)
+
/* This is like a cut-down ethtool ops, except done via tun fd so no
* privs required. */
static int set_offload(struct tun_struct *tun, unsigned long arg)
@@ -2922,6 +2869,18 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
features |= NETIF_F_GSO_UDP_L4;
arg &= ~(TUN_F_USO4 | TUN_F_USO6);
}
+
+ /*
+ * Tunnel offload is allowed only if some plain offload is
+ * available, too.
+ */
+ if (features & PLAIN_GSO && arg & TUN_F_UDP_TUNNEL_GSO) {
+ features |= NETIF_F_GSO_UDP_TUNNEL;
+ if (arg & TUN_F_UDP_TUNNEL_GSO_CSUM)
+ features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ arg &= ~(TUN_F_UDP_TUNNEL_GSO |
+ TUN_F_UDP_TUNNEL_GSO_CSUM);
+ }
}
/* This gives the user a way to test for new features in future by
@@ -3091,8 +3050,6 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
kgid_t group;
int ifindex;
int sndbuf;
- int vnet_hdr_sz;
- int le;
int ret;
bool do_notify = false;
@@ -3269,14 +3226,20 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
case SIOCGIFHWADDR:
/* Get hw address */
- dev_get_mac_address(&ifr.ifr_hwaddr, net, tun->dev->name);
+ netif_get_mac_address(&ifr.ifr_hwaddr, net, tun->dev->name);
if (copy_to_user(argp, &ifr, ifreq_len))
ret = -EFAULT;
break;
case SIOCSIFHWADDR:
/* Set hw address */
- ret = dev_set_mac_address_user(tun->dev, &ifr.ifr_hwaddr, NULL);
+ if (tun->dev->addr_len > sizeof(ifr.ifr_hwaddr)) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = dev_set_mac_address_user(tun->dev,
+ (struct sockaddr_storage *)&ifr.ifr_hwaddr,
+ NULL);
break;
case TUNGETSNDBUF:
@@ -3299,50 +3262,6 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
tun_set_sndbuf(tun);
break;
- case TUNGETVNETHDRSZ:
- vnet_hdr_sz = tun->vnet_hdr_sz;
- if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
- ret = -EFAULT;
- break;
-
- case TUNSETVNETHDRSZ:
- if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
- ret = -EFAULT;
- break;
- }
- if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
- ret = -EINVAL;
- break;
- }
-
- tun->vnet_hdr_sz = vnet_hdr_sz;
- break;
-
- case TUNGETVNETLE:
- le = !!(tun->flags & TUN_VNET_LE);
- if (put_user(le, (int __user *)argp))
- ret = -EFAULT;
- break;
-
- case TUNSETVNETLE:
- if (get_user(le, (int __user *)argp)) {
- ret = -EFAULT;
- break;
- }
- if (le)
- tun->flags |= TUN_VNET_LE;
- else
- tun->flags &= ~TUN_VNET_LE;
- break;
-
- case TUNGETVNETBE:
- ret = tun_get_vnet_be(tun, argp);
- break;
-
- case TUNSETVNETBE:
- ret = tun_set_vnet_be(tun, argp);
- break;
-
case TUNATTACHFILTER:
/* Can be set only for TAPs */
ret = -EINVAL;
@@ -3398,7 +3317,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
break;
default:
- ret = -EINVAL;
+ ret = tun_vnet_ioctl(&tun->vnet_hdr_sz, &tun->flags, cmd, argp);
break;
}
@@ -3697,9 +3616,9 @@ static int tun_queue_resize(struct tun_struct *tun)
list_for_each_entry(tfile, &tun->disabled, next)
rings[i++] = &tfile->tx_ring;
- ret = ptr_ring_resize_multiple(rings, n,
- dev->tx_queue_len, GFP_KERNEL,
- tun_ptr_free);
+ ret = ptr_ring_resize_multiple_bh(rings, n,
+ dev->tx_queue_len, GFP_KERNEL,
+ tun_ptr_free);
kfree(rings);
return ret;
@@ -3816,3 +3735,4 @@ MODULE_AUTHOR(DRV_COPYRIGHT);
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(TUN_MINOR);
MODULE_ALIAS("devname:net/tun");
+MODULE_IMPORT_NS("NETDEV_INTERNAL");
diff --git a/drivers/net/tun_vnet.h b/drivers/net/tun_vnet.h
new file mode 100644
index 000000000000..a5f93b6c4482
--- /dev/null
+++ b/drivers/net/tun_vnet.h
@@ -0,0 +1,269 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef TUN_VNET_H
+#define TUN_VNET_H
+
+/* High bits in flags field are unused. */
+#define TUN_VNET_LE 0x80000000
+#define TUN_VNET_BE 0x40000000
+
+#define TUN_VNET_TNL_SIZE sizeof(struct virtio_net_hdr_v1_hash_tunnel)
+
+static inline bool tun_vnet_legacy_is_little_endian(unsigned int flags)
+{
+ bool be = IS_ENABLED(CONFIG_TUN_VNET_CROSS_LE) &&
+ (flags & TUN_VNET_BE);
+
+ return !be && virtio_legacy_is_little_endian();
+}
+
+static inline long tun_get_vnet_be(unsigned int flags, int __user *argp)
+{
+ int be = !!(flags & TUN_VNET_BE);
+
+ if (!IS_ENABLED(CONFIG_TUN_VNET_CROSS_LE))
+ return -EINVAL;
+
+ if (put_user(be, argp))
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline long tun_set_vnet_be(unsigned int *flags, int __user *argp)
+{
+ int be;
+
+ if (!IS_ENABLED(CONFIG_TUN_VNET_CROSS_LE))
+ return -EINVAL;
+
+ if (get_user(be, argp))
+ return -EFAULT;
+
+ if (be)
+ *flags |= TUN_VNET_BE;
+ else
+ *flags &= ~TUN_VNET_BE;
+
+ return 0;
+}
+
+static inline bool tun_vnet_is_little_endian(unsigned int flags)
+{
+ return flags & TUN_VNET_LE || tun_vnet_legacy_is_little_endian(flags);
+}
+
+static inline u16 tun_vnet16_to_cpu(unsigned int flags, __virtio16 val)
+{
+ return __virtio16_to_cpu(tun_vnet_is_little_endian(flags), val);
+}
+
+static inline __virtio16 cpu_to_tun_vnet16(unsigned int flags, u16 val)
+{
+ return __cpu_to_virtio16(tun_vnet_is_little_endian(flags), val);
+}
+
+static inline long tun_vnet_ioctl(int *vnet_hdr_sz, unsigned int *flags,
+ unsigned int cmd, int __user *sp)
+{
+ int s;
+
+ switch (cmd) {
+ case TUNGETVNETHDRSZ:
+ s = *vnet_hdr_sz;
+ if (put_user(s, sp))
+ return -EFAULT;
+ return 0;
+
+ case TUNSETVNETHDRSZ:
+ if (get_user(s, sp))
+ return -EFAULT;
+ if (s < (int)sizeof(struct virtio_net_hdr))
+ return -EINVAL;
+
+ *vnet_hdr_sz = s;
+ return 0;
+
+ case TUNGETVNETLE:
+ s = !!(*flags & TUN_VNET_LE);
+ if (put_user(s, sp))
+ return -EFAULT;
+ return 0;
+
+ case TUNSETVNETLE:
+ if (get_user(s, sp))
+ return -EFAULT;
+ if (s)
+ *flags |= TUN_VNET_LE;
+ else
+ *flags &= ~TUN_VNET_LE;
+ return 0;
+
+ case TUNGETVNETBE:
+ return tun_get_vnet_be(*flags, sp);
+
+ case TUNSETVNETBE:
+ return tun_set_vnet_be(flags, sp);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline unsigned int tun_vnet_parse_size(netdev_features_t features)
+{
+ if (!(features & NETIF_F_GSO_UDP_TUNNEL))
+ return sizeof(struct virtio_net_hdr);
+
+ return TUN_VNET_TNL_SIZE;
+}
+
+static inline int __tun_vnet_hdr_get(int sz, unsigned int flags,
+ netdev_features_t features,
+ struct iov_iter *from,
+ struct virtio_net_hdr *hdr)
+{
+ unsigned int parsed_size = tun_vnet_parse_size(features);
+ u16 hdr_len;
+
+ if (iov_iter_count(from) < sz)
+ return -EINVAL;
+
+ if (!copy_from_iter_full(hdr, parsed_size, from))
+ return -EFAULT;
+
+ hdr_len = tun_vnet16_to_cpu(flags, hdr->hdr_len);
+
+ if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+ hdr_len = max(tun_vnet16_to_cpu(flags, hdr->csum_start) + tun_vnet16_to_cpu(flags, hdr->csum_offset) + 2, hdr_len);
+ hdr->hdr_len = cpu_to_tun_vnet16(flags, hdr_len);
+ }
+
+ if (hdr_len > iov_iter_count(from))
+ return -EINVAL;
+
+ iov_iter_advance(from, sz - parsed_size);
+
+ return hdr_len;
+}
+
+static inline int tun_vnet_hdr_get(int sz, unsigned int flags,
+ struct iov_iter *from,
+ struct virtio_net_hdr *hdr)
+{
+ return __tun_vnet_hdr_get(sz, flags, 0, from, hdr);
+}
+
+static inline int __tun_vnet_hdr_put(int sz, netdev_features_t features,
+ struct iov_iter *iter,
+ const struct virtio_net_hdr *hdr)
+{
+ unsigned int parsed_size = tun_vnet_parse_size(features);
+
+ if (unlikely(iov_iter_count(iter) < sz))
+ return -EINVAL;
+
+ if (unlikely(copy_to_iter(hdr, parsed_size, iter) != parsed_size))
+ return -EFAULT;
+
+ if (iov_iter_zero(sz - parsed_size, iter) != sz - parsed_size)
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline int tun_vnet_hdr_put(int sz, struct iov_iter *iter,
+ const struct virtio_net_hdr *hdr)
+{
+ return __tun_vnet_hdr_put(sz, 0, iter, hdr);
+}
+
+static inline int tun_vnet_hdr_to_skb(unsigned int flags, struct sk_buff *skb,
+ const struct virtio_net_hdr *hdr)
+{
+ return virtio_net_hdr_to_skb(skb, hdr, tun_vnet_is_little_endian(flags));
+}
+
+/*
+ * Tun is not aware of the negotiated guest features, guess them from the
+ * virtio net hdr size
+ */
+static inline netdev_features_t tun_vnet_hdr_guest_features(int vnet_hdr_sz)
+{
+ if (vnet_hdr_sz >= TUN_VNET_TNL_SIZE)
+ return NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ return 0;
+}
+
+static inline int
+tun_vnet_hdr_tnl_to_skb(unsigned int flags, netdev_features_t features,
+ struct sk_buff *skb,
+ const struct virtio_net_hdr_v1_hash_tunnel *hdr)
+{
+ return virtio_net_hdr_tnl_to_skb(skb, hdr,
+ features & NETIF_F_GSO_UDP_TUNNEL,
+ features & NETIF_F_GSO_UDP_TUNNEL_CSUM,
+ tun_vnet_is_little_endian(flags));
+}
+
+static inline int tun_vnet_hdr_from_skb(unsigned int flags,
+ const struct net_device *dev,
+ const struct sk_buff *skb,
+ struct virtio_net_hdr *hdr)
+{
+ int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0;
+
+ if (virtio_net_hdr_from_skb(skb, hdr,
+ tun_vnet_is_little_endian(flags), true,
+ vlan_hlen)) {
+ struct skb_shared_info *sinfo = skb_shinfo(skb);
+
+ if (net_ratelimit()) {
+ netdev_err(dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
+ sinfo->gso_type, tun_vnet16_to_cpu(flags, hdr->gso_size),
+ tun_vnet16_to_cpu(flags, hdr->hdr_len));
+ print_hex_dump(KERN_ERR, "tun: ",
+ DUMP_PREFIX_NONE,
+ 16, 1, skb->head,
+ min(tun_vnet16_to_cpu(flags, hdr->hdr_len), 64), true);
+ }
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int
+tun_vnet_hdr_tnl_from_skb(unsigned int flags,
+ const struct net_device *dev,
+ const struct sk_buff *skb,
+ struct virtio_net_hdr_v1_hash_tunnel *tnl_hdr)
+{
+ bool has_tnl_offload = !!(dev->features & NETIF_F_GSO_UDP_TUNNEL);
+ int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0;
+
+ if (virtio_net_hdr_tnl_from_skb(skb, tnl_hdr, has_tnl_offload,
+ tun_vnet_is_little_endian(flags),
+ vlan_hlen, true)) {
+ struct virtio_net_hdr_v1 *hdr = &tnl_hdr->hash_hdr.hdr;
+ struct skb_shared_info *sinfo = skb_shinfo(skb);
+
+ if (net_ratelimit()) {
+ int hdr_len = tun_vnet16_to_cpu(flags, hdr->hdr_len);
+
+ netdev_err(dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
+ sinfo->gso_type,
+ tun_vnet16_to_cpu(flags, hdr->gso_size),
+ tun_vnet16_to_cpu(flags, hdr->hdr_len));
+ print_hex_dump(KERN_ERR, "tun: ", DUMP_PREFIX_NONE,
+ 16, 1, skb->head, min(hdr_len, 64),
+ true);
+ }
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#endif /* TUN_VNET_H */
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 3c360d4f0635..856e648d804e 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -101,9 +101,7 @@ config USB_RTL8152
select MII
select PHYLIB
select CRC32
- select CRYPTO
- select CRYPTO_HASH
- select CRYPTO_SHA256
+ select CRYPTO_LIB_SHA256
help
This option adds support for Realtek RTL8152 based USB 2.0
10/100 Ethernet adapters and RTL8153 based USB 3.0 10/100/1000
@@ -115,10 +113,10 @@ config USB_RTL8152
config USB_LAN78XX
tristate "Microchip LAN78XX Based USB Ethernet Adapters"
select MII
- select PHYLIB
+ select PHYLINK
select MICROCHIP_PHY
- select FIXED_PHY
select CRC32
+ imply NET_SELFTESTS
help
This option adds support for Microchip LAN78XX based USB 2
& USB 3 10/100/1000 Ethernet adapters.
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index ff5be2cbf17b..9201ee10a13f 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -30,11 +30,14 @@ static int aqc111_read_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
ret = usbnet_read_cmd_nopm(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, value, index, data, size);
- if (unlikely(ret < 0))
+ if (unlikely(ret < size)) {
netdev_warn(dev->net,
"Failed to read(0x%x) reg index 0x%04x: %d\n",
cmd, index, ret);
+ ret = ret < 0 ? ret : -ENODATA;
+ }
+
return ret;
}
@@ -46,11 +49,14 @@ static int aqc111_read_cmd(struct usbnet *dev, u8 cmd, u16 value,
ret = usbnet_read_cmd(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, value, index, data, size);
- if (unlikely(ret < 0))
+ if (unlikely(ret < size)) {
netdev_warn(dev->net,
"Failed to read(0x%x) reg index 0x%04x: %d\n",
cmd, index, ret);
+ ret = ret < 0 ? ret : -ENODATA;
+ }
+
return ret;
}
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index 74162190bccc..8531b804021a 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -224,7 +224,6 @@ int asix_write_rx_ctl(struct usbnet *dev, u16 mode, int in_pm);
u16 asix_read_medium_status(struct usbnet *dev, int in_pm);
int asix_write_medium_mode(struct usbnet *dev, u16 mode, int in_pm);
-void asix_adjust_link(struct net_device *netdev);
int asix_write_gpio(struct usbnet *dev, u16 value, int sleep, int in_pm);
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 72ffc89b477a..7fd763917ae2 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -414,28 +414,6 @@ int asix_write_medium_mode(struct usbnet *dev, u16 mode, int in_pm)
return ret;
}
-/* set MAC link settings according to information from phylib */
-void asix_adjust_link(struct net_device *netdev)
-{
- struct phy_device *phydev = netdev->phydev;
- struct usbnet *dev = netdev_priv(netdev);
- u16 mode = 0;
-
- if (phydev->link) {
- mode = AX88772_MEDIUM_DEFAULT;
-
- if (phydev->duplex == DUPLEX_HALF)
- mode &= ~AX_MEDIUM_FD;
-
- if (phydev->speed != SPEED_100)
- mode &= ~AX_MEDIUM_PS;
- }
-
- asix_write_medium_mode(dev, mode, 0);
- phy_print_status(phydev);
- usbnet_link_change(dev, phydev->link, 0);
-}
-
int asix_write_gpio(struct usbnet *dev, u16 value, int sleep, int in_pm)
{
int ret;
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 57d6e5abc30e..232bbd79a4de 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -230,7 +230,9 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
int i;
unsigned long gpio_bits = dev->driver_info->data;
- usbnet_get_endpoints(dev,intf);
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret)
+ goto out;
/* Toggle the GPIOs in a manufacturer/model specific way */
for (i = 2; i >= 0; i--) {
@@ -625,6 +627,21 @@ static void ax88772_suspend(struct usbnet *dev)
asix_read_medium_status(dev, 1));
}
+/* Notes on PM callbacks and locking context:
+ *
+ * - asix_suspend()/asix_resume() are invoked for both runtime PM and
+ * system-wide suspend/resume. For struct usb_driver the ->resume()
+ * callback does not receive pm_message_t, so the resume type cannot
+ * be distinguished here.
+ *
+ * - The MAC driver must hold RTNL when calling phylink interfaces such as
+ * phylink_suspend()/resume(). Those calls will also perform MDIO I/O.
+ *
+ * - Taking RTNL and doing MDIO from a runtime-PM resume callback (while
+ * the USB PM lock is held) is fragile. Since autosuspend brings no
+ * measurable power saving here, we block it by holding a PM usage
+ * reference in ax88772_bind().
+ */
static int asix_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbnet *dev = usb_get_intfdata(intf);
@@ -676,6 +693,7 @@ static int ax88772_init_mdio(struct usbnet *dev)
priv->mdio->read = &asix_mdio_bus_read;
priv->mdio->write = &asix_mdio_bus_write;
priv->mdio->name = "Asix MDIO Bus";
+ priv->mdio->phy_mask = ~(BIT(priv->phy_addr & 0x1f) | BIT(AX_EMBD_PHY_ADDR));
/* mii bus name is usb-<usb bus number>-<usb device number> */
snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
dev->udev->bus->busnum, dev->udev->devnum);
@@ -752,7 +770,6 @@ static void ax88772_mac_link_down(struct phylink_config *config,
struct usbnet *dev = netdev_priv(to_net_dev(config->dev));
asix_write_medium_mode(dev, 0, 0);
- usbnet_link_change(dev, false, false);
}
static void ax88772_mac_link_up(struct phylink_config *config,
@@ -783,7 +800,6 @@ static void ax88772_mac_link_up(struct phylink_config *config,
m |= AX_MEDIUM_RFC;
asix_write_medium_mode(dev, m, 0);
- usbnet_link_change(dev, true, false);
}
static const struct phylink_mac_ops ax88772_phylink_mac_ops = {
@@ -834,7 +850,9 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
dev->driver_priv = priv;
- usbnet_get_endpoints(dev, intf);
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret)
+ return ret;
/* Maybe the boot loader passed the MAC address via device tree */
if (!eth_platform_get_mac_address(&dev->udev->dev, buf)) {
@@ -920,6 +938,13 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
if (ret)
goto initphy_err;
+ /* Keep this interface runtime-PM active by taking a usage ref.
+ * Prevents runtime suspend while bound and avoids resume paths
+ * that could deadlock (autoresume under RTNL while USB PM lock
+ * is held, phylink/MDIO wants RTNL).
+ */
+ pm_runtime_get_noresume(&intf->dev);
+
return 0;
initphy_err:
@@ -949,6 +974,8 @@ static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
phylink_destroy(priv->phylink);
ax88772_mdio_unregister(priv);
asix_rx_fixup_common_free(dev->driver_priv);
+ /* Drop the PM usage ref taken in bind() */
+ pm_runtime_put(&intf->dev);
}
static void ax88178_unbind(struct usbnet *dev, struct usb_interface *intf)
@@ -1258,7 +1285,9 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
int ret;
u8 buf[ETH_ALEN] = {0};
- usbnet_get_endpoints(dev,intf);
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret)
+ return ret;
/* Get the MAC address */
ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0);
@@ -1350,10 +1379,9 @@ static const struct driver_info ax88772_info = {
.description = "ASIX AX88772 USB 2.0 Ethernet",
.bind = ax88772_bind,
.unbind = ax88772_unbind,
- .status = asix_status,
.reset = ax88772_reset,
.stop = ax88772_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
};
@@ -1362,11 +1390,9 @@ static const struct driver_info ax88772b_info = {
.description = "ASIX AX88772B USB 2.0 Ethernet",
.bind = ax88772_bind,
.unbind = ax88772_unbind,
- .status = asix_status,
.reset = ax88772_reset,
.stop = ax88772_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
- FLAG_MULTI_PACKET,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
.data = FLAG_EEPROM_MAC,
@@ -1376,11 +1402,9 @@ static const struct driver_info lxausb_t1l_info = {
.description = "Linux Automation GmbH USB 10Base-T1L",
.bind = ax88772_bind,
.unbind = ax88772_unbind,
- .status = asix_status,
.reset = ax88772_reset,
.stop = ax88772_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
- FLAG_MULTI_PACKET,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
.data = FLAG_EEPROM_MAC,
@@ -1412,15 +1436,26 @@ static const struct driver_info hg20f9_info = {
.description = "HG20F9 USB 2.0 Ethernet",
.bind = ax88772_bind,
.unbind = ax88772_unbind,
- .status = asix_status,
.reset = ax88772_reset,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
- FLAG_MULTI_PACKET,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
.data = FLAG_EEPROM_MAC,
};
+static const struct driver_info lyconsys_fibergecko100_info = {
+ .description = "LyconSys FiberGecko 100 USB 2.0 to SFP Adapter",
+ .bind = ax88178_bind,
+ .status = asix_status,
+ .link_reset = ax88178_link_reset,
+ .reset = ax88178_link_reset,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
+ FLAG_MULTI_PACKET,
+ .rx_fixup = asix_rx_fixup_common,
+ .tx_fixup = asix_tx_fixup,
+ .data = 0x20061201,
+};
+
static const struct usb_device_id products [] = {
{
// Linksys USB200M
@@ -1578,6 +1613,10 @@ static const struct usb_device_id products [] = {
// Linux Automation GmbH USB 10Base-T1L
USB_DEVICE(0x33f7, 0x0004),
.driver_info = (unsigned long) &lxausb_t1l_info,
+}, {
+ /* LyconSys FiberGecko 100 */
+ USB_DEVICE(0x1d2a, 0x0801),
+ .driver_info = (unsigned long) &lyconsys_fibergecko100_info,
},
{ }, // END
};
@@ -1591,6 +1630,11 @@ static struct usb_driver asix_driver = {
.resume = asix_resume,
.reset_resume = asix_resume,
.disconnect = usbnet_disconnect,
+ /* usbnet enables autosuspend by default (supports_autosuspend=1).
+ * We keep runtime-PM active for AX88772* by taking a PM usage
+ * reference in ax88772_bind() (pm_runtime_get_noresume()) and
+ * dropping it in unbind(), which effectively blocks autosuspend.
+ */
.supports_autosuspend = 1,
.disable_hub_initiated_lpm = 1,
};
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index e47bb125048d..f613e4bc68c8 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -18,8 +18,8 @@
struct ax88172a_private {
struct mii_bus *mdio;
struct phy_device *phydev;
- char phy_name[20];
- u16 phy_addr;
+ char phy_name[PHY_ID_SIZE];
+ u8 phy_addr;
u16 oldmode;
int use_embdphy;
struct asix_rx_fixup_info rx_fixup_info;
@@ -210,7 +210,11 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
ret = asix_read_phy_addr(dev, priv->use_embdphy);
if (ret < 0)
goto free;
-
+ if (ret >= PHY_MAX_ADDR) {
+ netdev_err(dev->net, "Invalid PHY address %#x\n", ret);
+ ret = -ENODEV;
+ goto free;
+ }
priv->phy_addr = ret;
ax88172a_reset_phy(dev, priv->use_embdphy);
@@ -308,7 +312,7 @@ static int ax88172a_reset(struct usbnet *dev)
rx_ctl);
/* Connect to PHY */
- snprintf(priv->phy_name, 20, PHY_ID_FMT,
+ snprintf(priv->phy_name, sizeof(priv->phy_name), PHY_ID_FMT,
priv->mdio->id, priv->phy_addr);
priv->phydev = phy_connect(dev->net, priv->phy_name,
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index ff439ef535ac..6759388692f8 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -602,7 +602,7 @@ static void catc_stats_done(struct catc *catc, struct ctrl_queue *q)
static void catc_stats_timer(struct timer_list *t)
{
- struct catc *catc = from_timer(catc, t, timer);
+ struct catc *catc = timer_container_of(catc, t, timer);
int i;
for (i = 0; i < 8; i++)
@@ -738,7 +738,7 @@ static int catc_stop(struct net_device *netdev)
netif_stop_queue(netdev);
if (!catc->is_f5u011)
- del_timer_sync(&catc->timer);
+ timer_delete_sync(&catc->timer);
usb_kill_urb(catc->rx_urb);
usb_kill_urb(catc->tx_urb);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index a6469235d904..a032c1ded406 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -783,6 +783,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* Lenovo ThinkPad Hybrid USB-C with USB-A Dock (40af0135eu, based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa359, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* Aquantia AQtion USB to 5GbE Controller (based on AQC111U) */
{
USB_DEVICE_AND_INTERFACE_INFO(AQUANTIA_VENDOR_ID, 0xc101,
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index e13e4920ee9b..dbf01210b0e7 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -660,12 +660,12 @@ static const struct usb_device_id mbim_devs[] = {
.driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
},
- /* Telit FN990 */
+ /* Telit FN990A */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1071, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
},
- /* Telit FE990 */
+ /* Telit FE990A */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1081, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
},
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index d5c47a2a62dc..5d123df0a866 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -833,8 +833,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
ctx->dev = dev;
- hrtimer_init(&ctx->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ctx->tx_timer.function = &cdc_ncm_tx_timer_cb;
+ hrtimer_setup(&ctx->tx_timer, &cdc_ncm_tx_timer_cb, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
tasklet_setup(&ctx->bh, cdc_ncm_txpath_bh);
atomic_set(&ctx->stop, 0);
spin_lock_init(&ctx->mtx);
@@ -893,6 +892,10 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
}
}
+ if (ctx->func_desc)
+ ctx->filtering_supported = !!(ctx->func_desc->bmNetworkCapabilities
+ & USB_CDC_NCM_NCAP_ETH_FILTER);
+
iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
/* Device-specific flags */
@@ -1899,6 +1902,14 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
}
}
+static void cdc_ncm_update_filter(struct usbnet *dev)
+{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+
+ if (ctx->filtering_supported)
+ usbnet_cdc_update_filter(dev);
+}
+
static const struct driver_info cdc_ncm_info = {
.description = "CDC NCM (NO ZLP)",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
@@ -1909,7 +1920,7 @@ static const struct driver_info cdc_ncm_info = {
.status = cdc_ncm_status,
.rx_fixup = cdc_ncm_rx_fixup,
.tx_fixup = cdc_ncm_tx_fixup,
- .set_rx_mode = usbnet_cdc_update_filter,
+ .set_rx_mode = cdc_ncm_update_filter,
};
/* Same as cdc_ncm_info, but with FLAG_SEND_ZLP */
@@ -1923,7 +1934,7 @@ static const struct driver_info cdc_ncm_zlp_info = {
.status = cdc_ncm_status,
.rx_fixup = cdc_ncm_rx_fixup,
.tx_fixup = cdc_ncm_tx_fixup,
- .set_rx_mode = usbnet_cdc_update_filter,
+ .set_rx_mode = cdc_ncm_update_filter,
};
/* Same as cdc_ncm_info, but with FLAG_SEND_ZLP */
@@ -1965,7 +1976,7 @@ static const struct driver_info wwan_info = {
.status = cdc_ncm_status,
.rx_fixup = cdc_ncm_rx_fixup,
.tx_fixup = cdc_ncm_tx_fixup,
- .set_rx_mode = usbnet_cdc_update_filter,
+ .set_rx_mode = cdc_ncm_update_filter,
};
/* Same as wwan_info, but with FLAG_NOARP */
@@ -1979,7 +1990,7 @@ static const struct driver_info wwan_noarp_info = {
.status = cdc_ncm_status,
.rx_fixup = cdc_ncm_rx_fixup,
.tx_fixup = cdc_ncm_tx_fixup,
- .set_rx_mode = usbnet_cdc_update_filter,
+ .set_rx_mode = cdc_ncm_update_filter,
};
static const struct usb_device_id cdc_devs[] = {
@@ -2076,6 +2087,13 @@ static const struct usb_device_id cdc_devs[] = {
.driver_info = (unsigned long)&wwan_info,
},
+ /* Intel modem (label from OEM reads Fibocom L850-GL) */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x8087, 0x095a,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
+ },
+
/* DisplayLink docking stations */
{ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_VENDOR,
diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c
index f69d9b902da0..a206ffa76f1b 100644
--- a/drivers/net/usb/ch9200.c
+++ b/drivers/net/usb/ch9200.c
@@ -178,6 +178,7 @@ static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc)
{
struct usbnet *dev = netdev_priv(netdev);
unsigned char buff[2];
+ int ret;
netdev_dbg(netdev, "%s phy_id:%02x loc:%02x\n",
__func__, phy_id, loc);
@@ -185,8 +186,10 @@ static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc)
if (phy_id != 0)
return -ENODEV;
- control_read(dev, REQUEST_READ, 0, loc * 2, buff, 0x02,
- CONTROL_TIMEOUT_MS);
+ ret = control_read(dev, REQUEST_READ, 0, loc * 2, buff, 0x02,
+ CONTROL_TIMEOUT_MS);
+ if (ret < 0)
+ return ret;
return (buff[0] | buff[1] << 8);
}
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index 46af78caf457..0bfa37c14059 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -179,9 +179,7 @@ static int genelink_bind(struct usbnet *dev, struct usb_interface *intf)
{
dev->hard_mtu = GL_RCV_BUF_SIZE;
dev->net->hard_header_len += 4;
- dev->in = usb_rcvbulkpipe(dev->udev, dev->driver_info->in);
- dev->out = usb_sndbulkpipe(dev->udev, dev->driver_info->out);
- return 0;
+ return usbnet_get_endpoints(dev, intf);
}
static const struct driver_info genelink_info = {
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 46afb95ffabe..a19789b57190 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -61,7 +61,18 @@
#define IPHETH_USBINTF_PROTO 1
#define IPHETH_IP_ALIGN 2 /* padding at front of URB */
-#define IPHETH_NCM_HEADER_SIZE (12 + 96) /* NCMH + NCM0 */
+/* On iOS devices, NCM headers in RX have a fixed size regardless of DPE count:
+ * - NTH16 (NCMH): 12 bytes, as per CDC NCM 1.0 spec
+ * - NDP16 (NCM0): 96 bytes, of which
+ * - NDP16 fixed header: 8 bytes
+ * - maximum of 22 DPEs (21 datagrams + trailer), 4 bytes each
+ */
+#define IPHETH_NDP16_MAX_DPE 22
+#define IPHETH_NDP16_HEADER_SIZE (sizeof(struct usb_cdc_ncm_ndp16) + \
+ IPHETH_NDP16_MAX_DPE * \
+ sizeof(struct usb_cdc_ncm_dpe16))
+#define IPHETH_NCM_HEADER_SIZE (sizeof(struct usb_cdc_ncm_nth16) + \
+ IPHETH_NDP16_HEADER_SIZE)
#define IPHETH_TX_BUF_SIZE ETH_FRAME_LEN
#define IPHETH_RX_BUF_SIZE_LEGACY (IPHETH_IP_ALIGN + ETH_FRAME_LEN)
#define IPHETH_RX_BUF_SIZE_NCM 65536
@@ -207,15 +218,23 @@ static int ipheth_rcvbulk_callback_legacy(struct urb *urb)
return ipheth_consume_skb(buf, len, dev);
}
+/* In "NCM mode", the iOS device encapsulates RX (phone->computer) traffic
+ * in NCM Transfer Blocks (similarly to CDC NCM). However, unlike reverse
+ * tethering (handled by the `cdc_ncm` driver), regular tethering is not
+ * compliant with the CDC NCM spec, as the device is missing the necessary
+ * descriptors, and TX (computer->phone) traffic is not encapsulated
+ * at all. Thus `ipheth` implements a very limited subset of the spec with
+ * the sole purpose of parsing RX URBs.
+ */
static int ipheth_rcvbulk_callback_ncm(struct urb *urb)
{
struct usb_cdc_ncm_nth16 *ncmh;
struct usb_cdc_ncm_ndp16 *ncm0;
struct usb_cdc_ncm_dpe16 *dpe;
struct ipheth_device *dev;
+ u16 dg_idx, dg_len;
int retval = -EINVAL;
char *buf;
- int len;
dev = urb->context;
@@ -226,40 +245,42 @@ static int ipheth_rcvbulk_callback_ncm(struct urb *urb)
ncmh = urb->transfer_buffer;
if (ncmh->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH16_SIGN) ||
- le16_to_cpu(ncmh->wNdpIndex) >= urb->actual_length) {
- dev->net->stats.rx_errors++;
- return retval;
- }
+ /* On iOS, NDP16 directly follows NTH16 */
+ ncmh->wNdpIndex != cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16)))
+ goto rx_error;
- ncm0 = urb->transfer_buffer + le16_to_cpu(ncmh->wNdpIndex);
- if (ncm0->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN) ||
- le16_to_cpu(ncmh->wHeaderLength) + le16_to_cpu(ncm0->wLength) >=
- urb->actual_length) {
- dev->net->stats.rx_errors++;
- return retval;
- }
+ ncm0 = urb->transfer_buffer + sizeof(struct usb_cdc_ncm_nth16);
+ if (ncm0->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN))
+ goto rx_error;
dpe = ncm0->dpe16;
- while (le16_to_cpu(dpe->wDatagramIndex) != 0 &&
- le16_to_cpu(dpe->wDatagramLength) != 0) {
- if (le16_to_cpu(dpe->wDatagramIndex) >= urb->actual_length ||
- le16_to_cpu(dpe->wDatagramIndex) +
- le16_to_cpu(dpe->wDatagramLength) > urb->actual_length) {
+ for (int dpe_i = 0; dpe_i < IPHETH_NDP16_MAX_DPE; ++dpe_i, ++dpe) {
+ dg_idx = le16_to_cpu(dpe->wDatagramIndex);
+ dg_len = le16_to_cpu(dpe->wDatagramLength);
+
+ /* Null DPE must be present after last datagram pointer entry
+ * (3.3.1 USB CDC NCM spec v1.0)
+ */
+ if (dg_idx == 0 && dg_len == 0)
+ return 0;
+
+ if (dg_idx < IPHETH_NCM_HEADER_SIZE ||
+ dg_idx >= urb->actual_length ||
+ dg_len > urb->actual_length - dg_idx) {
dev->net->stats.rx_length_errors++;
return retval;
}
- buf = urb->transfer_buffer + le16_to_cpu(dpe->wDatagramIndex);
- len = le16_to_cpu(dpe->wDatagramLength);
+ buf = urb->transfer_buffer + dg_idx;
- retval = ipheth_consume_skb(buf, len, dev);
+ retval = ipheth_consume_skb(buf, dg_len, dev);
if (retval != 0)
return retval;
-
- dpe++;
}
- return 0;
+rx_error:
+ dev->net->stats.rx_errors++;
+ return retval;
}
static void ipheth_rcvbulk_callback(struct urb *urb)
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 8adf77e3557e..00397a807393 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -6,6 +6,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/phylink.h>
#include <linux/usb.h>
#include <linux/crc32.h>
#include <linux/signal.h>
@@ -19,13 +20,13 @@
#include <linux/mdio.h>
#include <linux/phy.h>
#include <net/ip6_checksum.h>
+#include <net/selftests.h>
#include <net/vxlan.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/microchipphy.h>
-#include <linux/phy_fixed.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include "lan78xx.h"
@@ -384,7 +385,7 @@ struct skb_data { /* skb->cb is one of these */
#define EVENT_RX_HALT 1
#define EVENT_RX_MEMORY 2
#define EVENT_STS_SPLIT 3
-#define EVENT_LINK_RESET 4
+#define EVENT_PHY_INT_ACK 4
#define EVENT_RX_PAUSED 5
#define EVENT_DEV_WAKING 6
#define EVENT_DEV_ASLEEP 7
@@ -413,7 +414,6 @@ struct lan78xx_net {
struct net_device *net;
struct usb_device *udev;
struct usb_interface *intf;
- void *driver_priv;
unsigned int tx_pend_data_len;
size_t n_tx_urbs;
@@ -439,7 +439,7 @@ struct lan78xx_net {
struct usb_anchor deferred;
struct mutex dev_mutex; /* serialise open/stop wrt suspend/resume */
- struct mutex phy_mutex; /* for phy access */
+ struct mutex mdiobus_mutex; /* for MDIO bus access */
unsigned int pipe_in, pipe_out, pipe_intr;
unsigned int bulk_in_delay;
@@ -448,33 +448,25 @@ struct lan78xx_net {
unsigned long flags;
wait_queue_head_t *wait;
- unsigned char suspend_count;
unsigned int maxpacket;
struct timer_list stat_monitor;
unsigned long data[5];
- int link_on;
- u8 mdix_ctrl;
-
u32 chipid;
u32 chiprev;
struct mii_bus *mdiobus;
phy_interface_t interface;
- int fc_autoneg;
- u8 fc_request_control;
-
int delta;
struct statstage stats;
struct irq_domain_data domain_data;
-};
-/* define external phy id */
-#define PHY_LAN8835 (0x0007C130)
-#define PHY_KSZ9031RNX (0x00221620)
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
+};
/* use ethtool to change the level for any given device */
static int msg_level = -1;
@@ -625,13 +617,13 @@ static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
*data = *buf;
} else if (net_ratelimit()) {
netdev_warn(dev->net,
- "Failed to read register index 0x%08x. ret = %d",
- index, ret);
+ "Failed to read register index 0x%08x. ret = %pe",
+ index, ERR_PTR(ret));
}
kfree(buf);
- return ret;
+ return ret < 0 ? ret : 0;
}
static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
@@ -656,13 +648,13 @@ static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
if (unlikely(ret < 0) &&
net_ratelimit()) {
netdev_warn(dev->net,
- "Failed to write register index 0x%08x. ret = %d",
- index, ret);
+ "Failed to write register index 0x%08x. ret = %pe",
+ index, ERR_PTR(ret));
}
kfree(buf);
- return ret;
+ return ret < 0 ? ret : 0;
}
static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
@@ -678,11 +670,7 @@ static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
buf &= ~mask;
buf |= (mask & data);
- ret = lan78xx_write_reg(dev, reg, buf);
- if (ret < 0)
- return ret;
-
- return 0;
+ return lan78xx_write_reg(dev, reg, buf);
}
static int lan78xx_read_stats(struct lan78xx_net *dev,
@@ -812,8 +800,156 @@ static void lan78xx_update_stats(struct lan78xx_net *dev)
usb_autopm_put_interface(dev->intf);
}
-/* Loop until the read is completed with timeout called with phy_mutex held */
-static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
+static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
+{
+ return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
+}
+
+static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
+ u32 hw_disabled)
+{
+ unsigned long timeout;
+ bool stopped = true;
+ int ret;
+ u32 buf;
+
+ /* Stop the h/w block (if not already stopped) */
+
+ ret = lan78xx_read_reg(dev, reg, &buf);
+ if (ret < 0)
+ return ret;
+
+ if (buf & hw_enabled) {
+ buf &= ~hw_enabled;
+
+ ret = lan78xx_write_reg(dev, reg, buf);
+ if (ret < 0)
+ return ret;
+
+ stopped = false;
+ timeout = jiffies + HW_DISABLE_TIMEOUT;
+ do {
+ ret = lan78xx_read_reg(dev, reg, &buf);
+ if (ret < 0)
+ return ret;
+
+ if (buf & hw_disabled)
+ stopped = true;
+ else
+ msleep(HW_DISABLE_DELAY_MS);
+ } while (!stopped && !time_after(jiffies, timeout));
+ }
+
+ return stopped ? 0 : -ETIMEDOUT;
+}
+
+static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
+{
+ return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
+}
+
+static int lan78xx_start_tx_path(struct lan78xx_net *dev)
+{
+ int ret;
+
+ netif_dbg(dev, drv, dev->net, "start tx path");
+
+ /* Start the MAC transmitter */
+
+ ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
+ if (ret < 0)
+ return ret;
+
+ /* Start the Tx FIFO */
+
+ ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
+{
+ int ret;
+
+ netif_dbg(dev, drv, dev->net, "stop tx path");
+
+ /* Stop the Tx FIFO */
+
+ ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
+ if (ret < 0)
+ return ret;
+
+ /* Stop the MAC transmitter */
+
+ ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/* The caller must ensure the Tx path is stopped before calling
+ * lan78xx_flush_tx_fifo().
+ */
+static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
+{
+ return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
+}
+
+static int lan78xx_start_rx_path(struct lan78xx_net *dev)
+{
+ int ret;
+
+ netif_dbg(dev, drv, dev->net, "start rx path");
+
+ /* Start the Rx FIFO */
+
+ ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
+ if (ret < 0)
+ return ret;
+
+ /* Start the MAC receiver*/
+
+ ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
+{
+ int ret;
+
+ netif_dbg(dev, drv, dev->net, "stop rx path");
+
+ /* Stop the MAC receiver */
+
+ ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
+ if (ret < 0)
+ return ret;
+
+ /* Stop the Rx FIFO */
+
+ ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/* The caller must ensure the Rx path is stopped before calling
+ * lan78xx_flush_rx_fifo().
+ */
+static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
+{
+ return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
+}
+
+/* Loop until the read is completed with timeout called with mdiobus_mutex held */
+static int lan78xx_mdiobus_wait_not_busy(struct lan78xx_net *dev)
{
unsigned long start_time = jiffies;
u32 val;
@@ -821,14 +957,14 @@ static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
do {
ret = lan78xx_read_reg(dev, MII_ACC, &val);
- if (unlikely(ret < 0))
- return -EIO;
+ if (ret < 0)
+ return ret;
if (!(val & MII_ACC_MII_BUSY_))
return 0;
} while (!time_after(jiffies, start_time + HZ));
- return -EIO;
+ return -ETIMEDOUT;
}
static inline u32 mii_access(int id, int index, int read)
@@ -854,8 +990,8 @@ static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
do {
ret = lan78xx_read_reg(dev, E2P_CMD, &val);
- if (unlikely(ret < 0))
- return -EIO;
+ if (ret < 0)
+ return ret;
if (!(val & E2P_CMD_EPC_BUSY_) ||
(val & E2P_CMD_EPC_TIMEOUT_))
@@ -865,7 +1001,7 @@ static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
netdev_warn(dev->net, "EEPROM read operation timeout");
- return -EIO;
+ return -ETIMEDOUT;
}
return 0;
@@ -879,8 +1015,8 @@ static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
do {
ret = lan78xx_read_reg(dev, E2P_CMD, &val);
- if (unlikely(ret < 0))
- return -EIO;
+ if (ret < 0)
+ return ret;
if (!(val & E2P_CMD_EPC_BUSY_))
return 0;
@@ -889,75 +1025,84 @@ static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
} while (!time_after(jiffies, start_time + HZ));
netdev_warn(dev->net, "EEPROM is busy");
- return -EIO;
+ return -ETIMEDOUT;
}
static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
u32 length, u8 *data)
{
- u32 val;
- u32 saved;
+ u32 val, saved;
int i, ret;
- int retval;
/* depends on chip, some EEPROM pins are muxed with LED function.
* disable & restore LED function to access EEPROM.
*/
ret = lan78xx_read_reg(dev, HW_CFG, &val);
+ if (ret < 0)
+ return ret;
+
saved = val;
if (dev->chipid == ID_REV_CHIP_ID_7800_) {
val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
ret = lan78xx_write_reg(dev, HW_CFG, val);
+ if (ret < 0)
+ return ret;
}
- retval = lan78xx_eeprom_confirm_not_busy(dev);
- if (retval)
- return retval;
+ ret = lan78xx_eeprom_confirm_not_busy(dev);
+ if (ret == -ETIMEDOUT)
+ goto read_raw_eeprom_done;
+ /* If USB fails, there is nothing to do */
+ if (ret < 0)
+ return ret;
for (i = 0; i < length; i++) {
val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
ret = lan78xx_write_reg(dev, E2P_CMD, val);
- if (unlikely(ret < 0)) {
- retval = -EIO;
- goto exit;
- }
+ if (ret < 0)
+ return ret;
- retval = lan78xx_wait_eeprom(dev);
- if (retval < 0)
- goto exit;
+ ret = lan78xx_wait_eeprom(dev);
+ /* Looks like not USB specific error, try to recover */
+ if (ret == -ETIMEDOUT)
+ goto read_raw_eeprom_done;
+ /* If USB fails, there is nothing to do */
+ if (ret < 0)
+ return ret;
ret = lan78xx_read_reg(dev, E2P_DATA, &val);
- if (unlikely(ret < 0)) {
- retval = -EIO;
- goto exit;
- }
+ if (ret < 0)
+ return ret;
data[i] = val & 0xFF;
offset++;
}
- retval = 0;
-exit:
- if (dev->chipid == ID_REV_CHIP_ID_7800_)
- ret = lan78xx_write_reg(dev, HW_CFG, saved);
-
- return retval;
+read_raw_eeprom_done:
+ if (dev->chipid == ID_REV_CHIP_ID_7800_) {
+ int rc = lan78xx_write_reg(dev, HW_CFG, saved);
+ /* If USB fails, there is nothing to do */
+ if (rc < 0)
+ return rc;
+ }
+ return ret;
}
static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
u32 length, u8 *data)
{
- u8 sig;
int ret;
+ u8 sig;
ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
- if ((ret == 0) && (sig == EEPROM_INDICATOR))
- ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
- else
- ret = -EINVAL;
+ if (ret < 0)
+ return ret;
- return ret;
+ if (sig != EEPROM_INDICATOR)
+ return -ENODATA;
+
+ return lan78xx_read_raw_eeprom(dev, offset, length, data);
}
static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
@@ -966,113 +1111,147 @@ static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
u32 val;
u32 saved;
int i, ret;
- int retval;
/* depends on chip, some EEPROM pins are muxed with LED function.
* disable & restore LED function to access EEPROM.
*/
ret = lan78xx_read_reg(dev, HW_CFG, &val);
+ if (ret < 0)
+ return ret;
+
saved = val;
if (dev->chipid == ID_REV_CHIP_ID_7800_) {
val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
ret = lan78xx_write_reg(dev, HW_CFG, val);
+ if (ret < 0)
+ return ret;
}
- retval = lan78xx_eeprom_confirm_not_busy(dev);
- if (retval)
- goto exit;
+ ret = lan78xx_eeprom_confirm_not_busy(dev);
+ /* Looks like not USB specific error, try to recover */
+ if (ret == -ETIMEDOUT)
+ goto write_raw_eeprom_done;
+ /* If USB fails, there is nothing to do */
+ if (ret < 0)
+ return ret;
/* Issue write/erase enable command */
val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
ret = lan78xx_write_reg(dev, E2P_CMD, val);
- if (unlikely(ret < 0)) {
- retval = -EIO;
- goto exit;
- }
+ if (ret < 0)
+ return ret;
- retval = lan78xx_wait_eeprom(dev);
- if (retval < 0)
- goto exit;
+ ret = lan78xx_wait_eeprom(dev);
+ /* Looks like not USB specific error, try to recover */
+ if (ret == -ETIMEDOUT)
+ goto write_raw_eeprom_done;
+ /* If USB fails, there is nothing to do */
+ if (ret < 0)
+ return ret;
for (i = 0; i < length; i++) {
/* Fill data register */
val = data[i];
ret = lan78xx_write_reg(dev, E2P_DATA, val);
- if (ret < 0) {
- retval = -EIO;
- goto exit;
- }
+ if (ret < 0)
+ return ret;
/* Send "write" command */
val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
ret = lan78xx_write_reg(dev, E2P_CMD, val);
- if (ret < 0) {
- retval = -EIO;
- goto exit;
- }
+ if (ret < 0)
+ return ret;
- retval = lan78xx_wait_eeprom(dev);
- if (retval < 0)
- goto exit;
+ ret = lan78xx_wait_eeprom(dev);
+ /* Looks like not USB specific error, try to recover */
+ if (ret == -ETIMEDOUT)
+ goto write_raw_eeprom_done;
+ /* If USB fails, there is nothing to do */
+ if (ret < 0)
+ return ret;
offset++;
}
- retval = 0;
-exit:
- if (dev->chipid == ID_REV_CHIP_ID_7800_)
- ret = lan78xx_write_reg(dev, HW_CFG, saved);
-
- return retval;
+write_raw_eeprom_done:
+ if (dev->chipid == ID_REV_CHIP_ID_7800_) {
+ int rc = lan78xx_write_reg(dev, HW_CFG, saved);
+ /* If USB fails, there is nothing to do */
+ if (rc < 0)
+ return rc;
+ }
+ return ret;
}
static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
u32 length, u8 *data)
{
- int i;
- u32 buf;
unsigned long timeout;
+ int ret, i;
+ u32 buf;
- lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ if (ret < 0)
+ return ret;
if (buf & OTP_PWR_DN_PWRDN_N_) {
/* clear it and wait to be cleared */
- lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+ ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+ if (ret < 0)
+ return ret;
timeout = jiffies + HZ;
do {
usleep_range(1, 10);
- lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ if (ret < 0)
+ return ret;
+
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"timeout on OTP_PWR_DN");
- return -EIO;
+ return -ETIMEDOUT;
}
} while (buf & OTP_PWR_DN_PWRDN_N_);
}
for (i = 0; i < length; i++) {
- lan78xx_write_reg(dev, OTP_ADDR1,
- ((offset + i) >> 8) & OTP_ADDR1_15_11);
- lan78xx_write_reg(dev, OTP_ADDR2,
- ((offset + i) & OTP_ADDR2_10_3));
+ ret = lan78xx_write_reg(dev, OTP_ADDR1,
+ ((offset + i) >> 8) & OTP_ADDR1_15_11);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, OTP_ADDR2,
+ ((offset + i) & OTP_ADDR2_10_3));
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
+ if (ret < 0)
+ return ret;
- lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
- lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+ ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+ if (ret < 0)
+ return ret;
timeout = jiffies + HZ;
do {
udelay(1);
- lan78xx_read_reg(dev, OTP_STATUS, &buf);
+ ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
+ if (ret < 0)
+ return ret;
+
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"timeout on OTP_STATUS");
- return -EIO;
+ return -ETIMEDOUT;
}
} while (buf & OTP_STATUS_BUSY_);
- lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
+ ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
+ if (ret < 0)
+ return ret;
data[i] = (u8)(buf & 0xFF);
}
@@ -1086,45 +1265,72 @@ static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
int i;
u32 buf;
unsigned long timeout;
+ int ret;
- lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ if (ret < 0)
+ return ret;
if (buf & OTP_PWR_DN_PWRDN_N_) {
/* clear it and wait to be cleared */
- lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+ ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+ if (ret < 0)
+ return ret;
timeout = jiffies + HZ;
do {
udelay(1);
- lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ if (ret < 0)
+ return ret;
+
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"timeout on OTP_PWR_DN completion");
- return -EIO;
+ return -ETIMEDOUT;
}
} while (buf & OTP_PWR_DN_PWRDN_N_);
}
/* set to BYTE program mode */
- lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
+ ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
+ if (ret < 0)
+ return ret;
for (i = 0; i < length; i++) {
- lan78xx_write_reg(dev, OTP_ADDR1,
- ((offset + i) >> 8) & OTP_ADDR1_15_11);
- lan78xx_write_reg(dev, OTP_ADDR2,
- ((offset + i) & OTP_ADDR2_10_3));
- lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
- lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
- lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+ ret = lan78xx_write_reg(dev, OTP_ADDR1,
+ ((offset + i) >> 8) & OTP_ADDR1_15_11);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, OTP_ADDR2,
+ ((offset + i) & OTP_ADDR2_10_3));
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+ if (ret < 0)
+ return ret;
timeout = jiffies + HZ;
do {
udelay(1);
- lan78xx_read_reg(dev, OTP_STATUS, &buf);
+ ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
+ if (ret < 0)
+ return ret;
+
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"Timeout on OTP_STATUS completion");
- return -EIO;
+ return -ETIMEDOUT;
}
} while (buf & OTP_STATUS_BUSY_);
}
@@ -1161,7 +1367,7 @@ static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
if (unlikely(ret < 0))
- return -EIO;
+ return ret;
if (dp_sel & DP_SEL_DPRDY_)
return 0;
@@ -1171,44 +1377,51 @@ static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
netdev_warn(dev->net, "%s timed out", __func__);
- return -EIO;
+ return -ETIMEDOUT;
}
static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
u32 addr, u32 length, u32 *buf)
{
struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
- u32 dp_sel;
int i, ret;
- if (usb_autopm_get_interface(dev->intf) < 0)
- return 0;
+ ret = usb_autopm_get_interface(dev->intf);
+ if (ret < 0)
+ return ret;
mutex_lock(&pdata->dataport_mutex);
ret = lan78xx_dataport_wait_not_busy(dev);
if (ret < 0)
- goto done;
+ goto dataport_write;
- ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
-
- dp_sel &= ~DP_SEL_RSEL_MASK_;
- dp_sel |= ram_select;
- ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
+ ret = lan78xx_update_reg(dev, DP_SEL, DP_SEL_RSEL_MASK_, ram_select);
+ if (ret < 0)
+ goto dataport_write;
for (i = 0; i < length; i++) {
ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
+ if (ret < 0)
+ goto dataport_write;
ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
+ if (ret < 0)
+ goto dataport_write;
ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
+ if (ret < 0)
+ goto dataport_write;
ret = lan78xx_dataport_wait_not_busy(dev);
if (ret < 0)
- goto done;
+ goto dataport_write;
}
-done:
+dataport_write:
+ if (ret < 0)
+ netdev_warn(dev->net, "dataport write failed %pe", ERR_PTR(ret));
+
mutex_unlock(&pdata->dataport_mutex);
usb_autopm_put_interface(dev->intf);
@@ -1244,23 +1457,39 @@ static void lan78xx_deferred_multicast_write(struct work_struct *param)
struct lan78xx_priv *pdata =
container_of(param, struct lan78xx_priv, set_multicast);
struct lan78xx_net *dev = pdata->dev;
- int i;
+ int i, ret;
netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
pdata->rfe_ctl);
- lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
- DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
+ ret = lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_,
+ DP_SEL_VHF_VLAN_LEN,
+ DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
+ if (ret < 0)
+ goto multicast_write_done;
for (i = 1; i < NUM_OF_MAF; i++) {
- lan78xx_write_reg(dev, MAF_HI(i), 0);
- lan78xx_write_reg(dev, MAF_LO(i),
- pdata->pfilter_table[i][1]);
- lan78xx_write_reg(dev, MAF_HI(i),
- pdata->pfilter_table[i][0]);
+ ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
+ if (ret < 0)
+ goto multicast_write_done;
+
+ ret = lan78xx_write_reg(dev, MAF_LO(i),
+ pdata->pfilter_table[i][1]);
+ if (ret < 0)
+ goto multicast_write_done;
+
+ ret = lan78xx_write_reg(dev, MAF_HI(i),
+ pdata->pfilter_table[i][0]);
+ if (ret < 0)
+ goto multicast_write_done;
}
- lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+ ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+
+multicast_write_done:
+ if (ret < 0)
+ netdev_warn(dev->net, "multicast write failed %pe", ERR_PTR(ret));
+ return;
}
static void lan78xx_set_multicast(struct net_device *netdev)
@@ -1327,40 +1556,6 @@ static void lan78xx_set_multicast(struct net_device *netdev)
schedule_work(&pdata->set_multicast);
}
-static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
- u16 lcladv, u16 rmtadv)
-{
- u32 flow = 0, fct_flow = 0;
- u8 cap;
-
- if (dev->fc_autoneg)
- cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
- else
- cap = dev->fc_request_control;
-
- if (cap & FLOW_CTRL_TX)
- flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
-
- if (cap & FLOW_CTRL_RX)
- flow |= FLOW_CR_RX_FCEN_;
-
- if (dev->udev->speed == USB_SPEED_SUPER)
- fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
- else if (dev->udev->speed == USB_SPEED_HIGH)
- fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
-
- netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
- (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
- (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
-
- lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
-
- /* threshold value should be set before enabling flow */
- lan78xx_write_reg(dev, FLOW, flow);
-
- return 0;
-}
-
static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
static int lan78xx_mac_reset(struct lan78xx_net *dev)
@@ -1369,24 +1564,24 @@ static int lan78xx_mac_reset(struct lan78xx_net *dev)
u32 val;
int ret;
- mutex_lock(&dev->phy_mutex);
+ mutex_lock(&dev->mdiobus_mutex);
/* Resetting the device while there is activity on the MDIO
* bus can result in the MAC interface locking up and not
* completing register access transactions.
*/
- ret = lan78xx_phy_wait_not_busy(dev);
+ ret = lan78xx_mdiobus_wait_not_busy(dev);
if (ret < 0)
- goto done;
+ goto exit_unlock;
ret = lan78xx_read_reg(dev, MAC_CR, &val);
if (ret < 0)
- goto done;
+ goto exit_unlock;
val |= MAC_CR_RST_;
ret = lan78xx_write_reg(dev, MAC_CR, val);
if (ret < 0)
- goto done;
+ goto exit_unlock;
/* Wait for the reset to complete before allowing any further
* MAC register accesses otherwise the MAC may lock up.
@@ -1394,114 +1589,33 @@ static int lan78xx_mac_reset(struct lan78xx_net *dev)
do {
ret = lan78xx_read_reg(dev, MAC_CR, &val);
if (ret < 0)
- goto done;
+ goto exit_unlock;
if (!(val & MAC_CR_RST_)) {
ret = 0;
- goto done;
+ goto exit_unlock;
}
} while (!time_after(jiffies, start_time + HZ));
ret = -ETIMEDOUT;
-done:
- mutex_unlock(&dev->phy_mutex);
+exit_unlock:
+ mutex_unlock(&dev->mdiobus_mutex);
return ret;
}
-static int lan78xx_link_reset(struct lan78xx_net *dev)
+/**
+ * lan78xx_phy_int_ack - Acknowledge PHY interrupt
+ * @dev: pointer to the LAN78xx device structure
+ *
+ * This function acknowledges the PHY interrupt by setting the
+ * INT_STS_PHY_INT_ bit in the interrupt status register (INT_STS).
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int lan78xx_phy_int_ack(struct lan78xx_net *dev)
{
- struct phy_device *phydev = dev->net->phydev;
- struct ethtool_link_ksettings ecmd;
- int ladv, radv, ret, link;
- u32 buf;
-
- /* clear LAN78xx interrupt status */
- ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
- if (unlikely(ret < 0))
- return ret;
-
- mutex_lock(&phydev->lock);
- phy_read_status(phydev);
- link = phydev->link;
- mutex_unlock(&phydev->lock);
-
- if (!link && dev->link_on) {
- dev->link_on = false;
-
- /* reset MAC */
- ret = lan78xx_mac_reset(dev);
- if (ret < 0)
- return ret;
-
- del_timer(&dev->stat_monitor);
- } else if (link && !dev->link_on) {
- dev->link_on = true;
-
- phy_ethtool_ksettings_get(phydev, &ecmd);
-
- if (dev->udev->speed == USB_SPEED_SUPER) {
- if (ecmd.base.speed == 1000) {
- /* disable U2 */
- ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
- if (ret < 0)
- return ret;
- buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
- ret = lan78xx_write_reg(dev, USB_CFG1, buf);
- if (ret < 0)
- return ret;
- /* enable U1 */
- ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
- if (ret < 0)
- return ret;
- buf |= USB_CFG1_DEV_U1_INIT_EN_;
- ret = lan78xx_write_reg(dev, USB_CFG1, buf);
- if (ret < 0)
- return ret;
- } else {
- /* enable U1 & U2 */
- ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
- if (ret < 0)
- return ret;
- buf |= USB_CFG1_DEV_U2_INIT_EN_;
- buf |= USB_CFG1_DEV_U1_INIT_EN_;
- ret = lan78xx_write_reg(dev, USB_CFG1, buf);
- if (ret < 0)
- return ret;
- }
- }
-
- ladv = phy_read(phydev, MII_ADVERTISE);
- if (ladv < 0)
- return ladv;
-
- radv = phy_read(phydev, MII_LPA);
- if (radv < 0)
- return radv;
-
- netif_dbg(dev, link, dev->net,
- "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
- ecmd.base.speed, ecmd.base.duplex, ladv, radv);
-
- ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
- radv);
- if (ret < 0)
- return ret;
-
- if (!timer_pending(&dev->stat_monitor)) {
- dev->delta = 1;
- mod_timer(&dev->stat_monitor,
- jiffies + STAT_UPDATE_TIMER);
- }
-
- lan78xx_rx_urb_submit_all(dev);
-
- local_bh_disable();
- napi_schedule(&dev->napi);
- local_bh_enable();
- }
-
- return 0;
+ return lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
}
/* some work can't be done in tasklets, so we use keventd
@@ -1530,7 +1644,7 @@ static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
if (intdata & INT_ENP_PHY_INT) {
netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
- lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
+ lan78xx_defer_kevent(dev, EVENT_PHY_INT_ACK);
if (dev->domain_data.phyirq > 0)
generic_handle_irq_safe(dev->domain_data.phyirq);
@@ -1595,12 +1709,16 @@ static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
{
if (stringset == ETH_SS_STATS)
memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
+ else if (stringset == ETH_SS_TEST)
+ net_selftest_get_strings(data);
}
static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
{
if (sset == ETH_SS_STATS)
return ARRAY_SIZE(lan78xx_gstrings);
+ else if (sset == ETH_SS_TEST)
+ return net_selftest_get_count();
else
return -EOPNOTSUPP;
}
@@ -1630,6 +1748,7 @@ static void lan78xx_get_wol(struct net_device *netdev,
ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
if (unlikely(ret < 0)) {
+ netdev_warn(dev->net, "failed to get WoL %pe", ERR_PTR(ret));
wol->supported = 0;
wol->wolopts = 0;
} else {
@@ -1652,19 +1771,22 @@ static int lan78xx_set_wol(struct net_device *netdev,
struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
int ret;
+ if (wol->wolopts & ~WAKE_ALL)
+ return -EINVAL;
+
ret = usb_autopm_get_interface(dev->intf);
if (ret < 0)
return ret;
- if (wol->wolopts & ~WAKE_ALL)
- return -EINVAL;
-
pdata->wol = wol->wolopts;
- device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
+ ret = device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
+ if (ret < 0)
+ goto exit_pm_put;
- phy_ethtool_set_wol(netdev->phydev, wol);
+ ret = phy_ethtool_set_wol(netdev->phydev, wol);
+exit_pm_put:
usb_autopm_put_interface(dev->intf);
return ret;
@@ -1673,66 +1795,15 @@ static int lan78xx_set_wol(struct net_device *netdev,
static int lan78xx_get_eee(struct net_device *net, struct ethtool_keee *edata)
{
struct lan78xx_net *dev = netdev_priv(net);
- struct phy_device *phydev = net->phydev;
- int ret;
- u32 buf;
-
- ret = usb_autopm_get_interface(dev->intf);
- if (ret < 0)
- return ret;
-
- ret = phy_ethtool_get_eee(phydev, edata);
- if (ret < 0)
- goto exit;
- ret = lan78xx_read_reg(dev, MAC_CR, &buf);
- if (buf & MAC_CR_EEE_EN_) {
- /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
- ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
- edata->tx_lpi_timer = buf;
- } else {
- edata->tx_lpi_timer = 0;
- }
-
- ret = 0;
-exit:
- usb_autopm_put_interface(dev->intf);
-
- return ret;
+ return phylink_ethtool_get_eee(dev->phylink, edata);
}
static int lan78xx_set_eee(struct net_device *net, struct ethtool_keee *edata)
{
struct lan78xx_net *dev = netdev_priv(net);
- int ret;
- u32 buf;
-
- ret = usb_autopm_get_interface(dev->intf);
- if (ret < 0)
- return ret;
-
- ret = phy_ethtool_set_eee(net->phydev, edata);
- if (ret < 0)
- goto out;
- buf = (u32)edata->tx_lpi_timer;
- ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
-out:
- usb_autopm_put_interface(dev->intf);
-
- return ret;
-}
-
-static u32 lan78xx_get_link(struct net_device *net)
-{
- u32 link;
-
- mutex_lock(&net->phydev->lock);
- phy_read_status(net->phydev);
- link = net->phydev->link;
- mutex_unlock(&net->phydev->lock);
-
- return link;
+ return phylink_ethtool_set_eee(dev->phylink, edata);
}
static void lan78xx_get_drvinfo(struct net_device *net,
@@ -1762,141 +1833,69 @@ static int lan78xx_get_link_ksettings(struct net_device *net,
struct ethtool_link_ksettings *cmd)
{
struct lan78xx_net *dev = netdev_priv(net);
- struct phy_device *phydev = net->phydev;
- int ret;
-
- ret = usb_autopm_get_interface(dev->intf);
- if (ret < 0)
- return ret;
-
- phy_ethtool_ksettings_get(phydev, cmd);
-
- usb_autopm_put_interface(dev->intf);
- return ret;
+ return phylink_ethtool_ksettings_get(dev->phylink, cmd);
}
static int lan78xx_set_link_ksettings(struct net_device *net,
const struct ethtool_link_ksettings *cmd)
{
struct lan78xx_net *dev = netdev_priv(net);
- struct phy_device *phydev = net->phydev;
- int ret = 0;
- int temp;
-
- ret = usb_autopm_get_interface(dev->intf);
- if (ret < 0)
- return ret;
- /* change speed & duplex */
- ret = phy_ethtool_ksettings_set(phydev, cmd);
-
- if (!cmd->base.autoneg) {
- /* force link down */
- temp = phy_read(phydev, MII_BMCR);
- phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
- mdelay(1);
- phy_write(phydev, MII_BMCR, temp);
- }
-
- usb_autopm_put_interface(dev->intf);
-
- return ret;
+ return phylink_ethtool_ksettings_set(dev->phylink, cmd);
}
static void lan78xx_get_pause(struct net_device *net,
struct ethtool_pauseparam *pause)
{
struct lan78xx_net *dev = netdev_priv(net);
- struct phy_device *phydev = net->phydev;
- struct ethtool_link_ksettings ecmd;
-
- phy_ethtool_ksettings_get(phydev, &ecmd);
- pause->autoneg = dev->fc_autoneg;
-
- if (dev->fc_request_control & FLOW_CTRL_TX)
- pause->tx_pause = 1;
-
- if (dev->fc_request_control & FLOW_CTRL_RX)
- pause->rx_pause = 1;
+ phylink_ethtool_get_pauseparam(dev->phylink, pause);
}
static int lan78xx_set_pause(struct net_device *net,
struct ethtool_pauseparam *pause)
{
struct lan78xx_net *dev = netdev_priv(net);
- struct phy_device *phydev = net->phydev;
- struct ethtool_link_ksettings ecmd;
- int ret;
-
- phy_ethtool_ksettings_get(phydev, &ecmd);
-
- if (pause->autoneg && !ecmd.base.autoneg) {
- ret = -EINVAL;
- goto exit;
- }
-
- dev->fc_request_control = 0;
- if (pause->rx_pause)
- dev->fc_request_control |= FLOW_CTRL_RX;
- if (pause->tx_pause)
- dev->fc_request_control |= FLOW_CTRL_TX;
-
- if (ecmd.base.autoneg) {
- __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
- u32 mii_adv;
-
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
- ecmd.link_modes.advertising);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
- ecmd.link_modes.advertising);
- mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
- mii_adv_to_linkmode_adv_t(fc, mii_adv);
- linkmode_or(ecmd.link_modes.advertising, fc,
- ecmd.link_modes.advertising);
-
- phy_ethtool_ksettings_set(phydev, &ecmd);
- }
-
- dev->fc_autoneg = pause->autoneg;
-
- ret = 0;
-exit:
- return ret;
+ return phylink_ethtool_set_pauseparam(dev->phylink, pause);
}
static int lan78xx_get_regs_len(struct net_device *netdev)
{
- if (!netdev->phydev)
- return (sizeof(lan78xx_regs));
- else
- return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
+ return sizeof(lan78xx_regs);
}
static void
lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
void *buf)
{
- u32 *data = buf;
- int i, j;
struct lan78xx_net *dev = netdev_priv(netdev);
+ unsigned int data_count = 0;
+ u32 *data = buf;
+ int i, ret;
/* Read Device/MAC registers */
- for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
- lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
+ for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++) {
+ ret = lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
+ if (ret < 0) {
+ netdev_warn(dev->net,
+ "failed to read register 0x%08x\n",
+ lan78xx_regs[i]);
+ goto clean_data;
+ }
- if (!netdev->phydev)
- return;
+ data_count++;
+ }
+
+ return;
- /* Read PHY registers */
- for (j = 0; j < 32; i++, j++)
- data[i] = phy_read(netdev->phydev, j);
+clean_data:
+ memset(data, 0, data_count * sizeof(u32));
}
static const struct ethtool_ops lan78xx_ethtool_ops = {
- .get_link = lan78xx_get_link,
+ .get_link = ethtool_op_get_link,
.nway_reset = phy_ethtool_nway_reset,
.get_drvinfo = lan78xx_get_drvinfo,
.get_msglevel = lan78xx_get_msglevel,
@@ -1906,6 +1905,7 @@ static const struct ethtool_ops lan78xx_ethtool_ops = {
.set_eeprom = lan78xx_ethtool_set_eeprom,
.get_ethtool_stats = lan78xx_get_stats,
.get_sset_count = lan78xx_get_sset_count,
+ .self_test = net_selftest,
.get_strings = lan78xx_get_strings,
.get_wol = lan78xx_get_wol,
.set_wol = lan78xx_set_wol,
@@ -1920,13 +1920,19 @@ static const struct ethtool_ops lan78xx_ethtool_ops = {
.get_regs = lan78xx_get_regs,
};
-static void lan78xx_init_mac_address(struct lan78xx_net *dev)
+static int lan78xx_init_mac_address(struct lan78xx_net *dev)
{
u32 addr_lo, addr_hi;
u8 addr[6];
+ int ret;
- lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
- lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
+ ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
+ if (ret < 0)
+ return ret;
addr[0] = addr_lo & 0xFF;
addr[1] = (addr_lo >> 8) & 0xFF;
@@ -1959,14 +1965,26 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev)
(addr[2] << 16) | (addr[3] << 24);
addr_hi = addr[4] | (addr[5] << 8);
- lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
- lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ if (ret < 0)
+ return ret;
}
- lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
- lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+ ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+ if (ret < 0)
+ return ret;
eth_hw_addr_set(dev->net, addr);
+
+ return 0;
}
/* MDIO read and write wrappers for phylib */
@@ -1980,27 +1998,31 @@ static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
if (ret < 0)
return ret;
- mutex_lock(&dev->phy_mutex);
+ mutex_lock(&dev->mdiobus_mutex);
/* confirm MII not busy */
- ret = lan78xx_phy_wait_not_busy(dev);
+ ret = lan78xx_mdiobus_wait_not_busy(dev);
if (ret < 0)
goto done;
/* set the address, index & direction (read from PHY) */
addr = mii_access(phy_id, idx, MII_READ);
ret = lan78xx_write_reg(dev, MII_ACC, addr);
+ if (ret < 0)
+ goto done;
- ret = lan78xx_phy_wait_not_busy(dev);
+ ret = lan78xx_mdiobus_wait_not_busy(dev);
if (ret < 0)
goto done;
ret = lan78xx_read_reg(dev, MII_DATA, &val);
+ if (ret < 0)
+ goto done;
ret = (int)(val & 0xFFFF);
done:
- mutex_unlock(&dev->phy_mutex);
+ mutex_unlock(&dev->mdiobus_mutex);
usb_autopm_put_interface(dev->intf);
return ret;
@@ -2017,28 +2039,32 @@ static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
if (ret < 0)
return ret;
- mutex_lock(&dev->phy_mutex);
+ mutex_lock(&dev->mdiobus_mutex);
/* confirm MII not busy */
- ret = lan78xx_phy_wait_not_busy(dev);
+ ret = lan78xx_mdiobus_wait_not_busy(dev);
if (ret < 0)
goto done;
val = (u32)regval;
ret = lan78xx_write_reg(dev, MII_DATA, val);
+ if (ret < 0)
+ goto done;
/* set the address, index & direction (write to PHY) */
addr = mii_access(phy_id, idx, MII_WRITE);
ret = lan78xx_write_reg(dev, MII_ACC, addr);
+ if (ret < 0)
+ goto done;
- ret = lan78xx_phy_wait_not_busy(dev);
+ ret = lan78xx_mdiobus_wait_not_busy(dev);
if (ret < 0)
goto done;
done:
- mutex_unlock(&dev->phy_mutex);
+ mutex_unlock(&dev->mdiobus_mutex);
usb_autopm_put_interface(dev->intf);
- return 0;
+ return ret;
}
static int lan78xx_mdio_init(struct lan78xx_net *dev)
@@ -2094,26 +2120,6 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev)
mdiobus_free(dev->mdiobus);
}
-static void lan78xx_link_status_change(struct net_device *net)
-{
- struct lan78xx_net *dev = netdev_priv(net);
- struct phy_device *phydev = net->phydev;
- u32 data;
- int ret;
-
- ret = lan78xx_read_reg(dev, MAC_CR, &data);
- if (ret < 0)
- return;
-
- if (phydev->enable_tx_lpi)
- data |= MAC_CR_EEE_EN_;
- else
- data &= ~MAC_CR_EEE_EN_;
- lan78xx_write_reg(dev, MAC_CR, data);
-
- phy_print_status(phydev);
-}
-
static int irq_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
@@ -2164,13 +2170,22 @@ static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
struct lan78xx_net *dev =
container_of(data, struct lan78xx_net, domain_data);
u32 buf;
+ int ret;
/* call register access here because irq_bus_lock & irq_bus_sync_unlock
* are only two callbacks executed in non-atomic contex.
*/
- lan78xx_read_reg(dev, INT_EP_CTL, &buf);
+ ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
+ if (ret < 0)
+ goto irq_bus_sync_unlock;
+
if (buf != data->irqenable)
- lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
+ ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
+
+irq_bus_sync_unlock:
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to sync IRQ enable register: %pe\n",
+ ERR_PTR(ret));
mutex_unlock(&data->irq_lock);
}
@@ -2185,24 +2200,24 @@ static struct irq_chip lan78xx_irqchip = {
static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
{
- struct device_node *of_node;
struct irq_domain *irqdomain;
unsigned int irqmap = 0;
u32 buf;
int ret = 0;
- of_node = dev->udev->dev.parent->of_node;
-
mutex_init(&dev->domain_data.irq_lock);
- lan78xx_read_reg(dev, INT_EP_CTL, &buf);
+ ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
+ if (ret < 0)
+ return ret;
+
dev->domain_data.irqenable = buf;
dev->domain_data.irqchip = &lan78xx_irqchip;
dev->domain_data.irq_handler = handle_simple_irq;
- irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
- &chip_domain_ops, &dev->domain_data);
+ irqdomain = irq_domain_create_simple(dev_fwnode(dev->udev->dev.parent), MAX_INT_EP, 0,
+ &chip_domain_ops, &dev->domain_data);
if (irqdomain) {
/* create mapping for PHY interrupt */
irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
@@ -2234,131 +2249,617 @@ static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
dev->domain_data.irqdomain = NULL;
}
-static int lan8835_fixup(struct phy_device *phydev)
+static void lan78xx_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
{
- int buf;
- struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
+ struct net_device *net = to_net_dev(config->dev);
+ struct lan78xx_net *dev = netdev_priv(net);
+ u32 mac_cr = 0;
+ int ret;
- /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
- buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
- buf &= ~0x1800;
- buf |= 0x0800;
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
+ /* Check if the mode is supported */
+ if (mode != MLO_AN_FIXED && mode != MLO_AN_PHY) {
+ netdev_err(net, "Unsupported negotiation mode: %u\n", mode);
+ return;
+ }
- /* RGMII MAC TXC Delay Enable */
- lan78xx_write_reg(dev, MAC_RGMII_ID,
- MAC_RGMII_ID_TXC_DELAY_EN_);
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_GMII:
+ mac_cr |= MAC_CR_GMII_EN_;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ break;
+ default:
+ netdev_warn(net, "Unsupported interface mode: %d\n",
+ state->interface);
+ return;
+ }
- /* RGMII TX DLL Tune Adjust */
- lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
+ ret = lan78xx_update_reg(dev, MAC_CR, MAC_CR_GMII_EN_, mac_cr);
+ if (ret < 0)
+ netdev_err(net, "Failed to config MAC with error %pe\n",
+ ERR_PTR(ret));
+}
- dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
+static void lan78xx_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct net_device *net = to_net_dev(config->dev);
+ struct lan78xx_net *dev = netdev_priv(net);
+ int ret;
- return 1;
+ netif_stop_queue(net);
+
+ /* MAC reset will not de-assert TXEN/RXEN, we need to stop them
+ * manually before reset. TX and RX should be disabled before running
+ * link_up sequence.
+ */
+ ret = lan78xx_stop_tx_path(dev);
+ if (ret < 0)
+ goto link_down_fail;
+
+ ret = lan78xx_stop_rx_path(dev);
+ if (ret < 0)
+ goto link_down_fail;
+
+ /* MAC reset seems to not affect MAC configuration, no idea if it is
+ * really needed, but it was done in previous driver version. So, leave
+ * it here.
+ */
+ ret = lan78xx_mac_reset(dev);
+ if (ret < 0)
+ goto link_down_fail;
+
+ return;
+
+link_down_fail:
+ netdev_err(dev->net, "Failed to set MAC down with error %pe\n",
+ ERR_PTR(ret));
}
-static int ksz9031rnx_fixup(struct phy_device *phydev)
+/**
+ * lan78xx_configure_usb - Configure USB link power settings
+ * @dev: pointer to the LAN78xx device structure
+ * @speed: negotiated Ethernet link speed (in Mbps)
+ *
+ * This function configures U1/U2 link power management for SuperSpeed
+ * USB devices based on the current Ethernet link speed. It uses the
+ * USB_CFG1 register to enable or disable U1 and U2 low-power states.
+ *
+ * Note: Only LAN7800 and LAN7801 support SuperSpeed (USB 3.x).
+ * LAN7850 is a High-Speed-only (USB 2.0) device and is skipped.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int lan78xx_configure_usb(struct lan78xx_net *dev, int speed)
{
- struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
+ u32 mask, val;
+ int ret;
+
+ /* Only configure USB settings for SuperSpeed devices */
+ if (dev->udev->speed != USB_SPEED_SUPER)
+ return 0;
- /* Micrel9301RNX PHY configuration */
- /* RGMII Control Signal Pad Skew */
- phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
- /* RGMII RX Data Pad Skew */
- phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
- /* RGMII RX Clock Pad Skew */
- phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
+ /* LAN7850 does not support USB 3.x */
+ if (dev->chipid == ID_REV_CHIP_ID_7850_) {
+ netdev_warn_once(dev->net, "Unexpected SuperSpeed for LAN7850 (USB 2.0 only)\n");
+ return 0;
+ }
- dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
+ switch (speed) {
+ case SPEED_1000:
+ /* Disable U2, enable U1 */
+ ret = lan78xx_update_reg(dev, USB_CFG1,
+ USB_CFG1_DEV_U2_INIT_EN_, 0);
+ if (ret < 0)
+ return ret;
- return 1;
+ return lan78xx_update_reg(dev, USB_CFG1,
+ USB_CFG1_DEV_U1_INIT_EN_,
+ USB_CFG1_DEV_U1_INIT_EN_);
+
+ case SPEED_100:
+ case SPEED_10:
+ /* Enable both U1 and U2 */
+ mask = USB_CFG1_DEV_U1_INIT_EN_ | USB_CFG1_DEV_U2_INIT_EN_;
+ val = mask;
+ return lan78xx_update_reg(dev, USB_CFG1, mask, val);
+
+ default:
+ netdev_warn(dev->net, "Unsupported link speed: %d\n", speed);
+ return -EINVAL;
+ }
+}
+
+/**
+ * lan78xx_configure_flowcontrol - Set MAC and FIFO flow control configuration
+ * @dev: pointer to the LAN78xx device structure
+ * @tx_pause: enable transmission of pause frames
+ * @rx_pause: enable reception of pause frames
+ *
+ * This function configures the LAN78xx flow control settings by writing
+ * to the FLOW and FCT_FLOW registers. The pause time is set to the
+ * maximum allowed value (65535 quanta). FIFO thresholds are selected
+ * based on USB speed.
+ *
+ * The Pause Time field is measured in units of 512-bit times (quanta):
+ * - At 1 Gbps: 1 quanta = 512 ns → max ~33.6 ms pause
+ * - At 100 Mbps: 1 quanta = 5.12 µs → max ~335 ms pause
+ * - At 10 Mbps: 1 quanta = 51.2 µs → max ~3.3 s pause
+ *
+ * Flow control thresholds (FCT_FLOW) are used to trigger pause/resume:
+ * - RXUSED is the number of bytes used in the RX FIFO
+ * - Flow is turned ON when RXUSED ≥ FLOW_ON threshold
+ * - Flow is turned OFF when RXUSED ≤ FLOW_OFF threshold
+ * - Both thresholds are encoded in units of 512 bytes (rounded up)
+ *
+ * Thresholds differ by USB speed because available USB bandwidth
+ * affects how fast packets can be drained from the RX FIFO:
+ * - USB 3.x (SuperSpeed):
+ * FLOW_ON = 9216 bytes → 18 units
+ * FLOW_OFF = 4096 bytes → 8 units
+ * - USB 2.0 (High-Speed):
+ * FLOW_ON = 8704 bytes → 17 units
+ * FLOW_OFF = 1024 bytes → 2 units
+ *
+ * Note: The FCT_FLOW register must be configured before enabling TX pause
+ * (i.e., before setting FLOW_CR_TX_FCEN_), as required by the hardware.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int lan78xx_configure_flowcontrol(struct lan78xx_net *dev,
+ bool tx_pause, bool rx_pause)
+{
+ /* Use maximum pause time: 65535 quanta (512-bit times) */
+ const u32 pause_time_quanta = 65535;
+ u32 fct_flow = 0;
+ u32 flow = 0;
+ int ret;
+
+ /* Prepare MAC flow control bits */
+ if (tx_pause)
+ flow |= FLOW_CR_TX_FCEN_ | pause_time_quanta;
+
+ if (rx_pause)
+ flow |= FLOW_CR_RX_FCEN_;
+
+ /* Select RX FIFO thresholds based on USB speed
+ *
+ * FCT_FLOW layout:
+ * bits [6:0] FLOW_ON threshold (RXUSED ≥ ON → assert pause)
+ * bits [14:8] FLOW_OFF threshold (RXUSED ≤ OFF → deassert pause)
+ * thresholds are expressed in units of 512 bytes
+ */
+ switch (dev->udev->speed) {
+ case USB_SPEED_SUPER:
+ fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
+ break;
+ case USB_SPEED_HIGH:
+ fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
+ break;
+ default:
+ netdev_warn(dev->net, "Unsupported USB speed: %d\n",
+ dev->udev->speed);
+ return -EINVAL;
+ }
+
+ /* Step 1: Write FIFO thresholds before enabling pause frames */
+ ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
+ if (ret < 0)
+ return ret;
+
+ /* Step 2: Enable MAC pause functionality */
+ return lan78xx_write_reg(dev, FLOW, flow);
}
-static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
+static void lan78xx_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
- u32 buf;
+ struct net_device *net = to_net_dev(config->dev);
+ struct lan78xx_net *dev = netdev_priv(net);
+ u32 mac_cr = 0;
int ret;
- struct fixed_phy_status fphy_status = {
- .link = 1,
+
+ switch (speed) {
+ case SPEED_1000:
+ mac_cr |= MAC_CR_SPEED_1000_;
+ break;
+ case SPEED_100:
+ mac_cr |= MAC_CR_SPEED_100_;
+ break;
+ case SPEED_10:
+ mac_cr |= MAC_CR_SPEED_10_;
+ break;
+ default:
+ netdev_err(dev->net, "Unsupported speed %d\n", speed);
+ return;
+ }
+
+ if (duplex == DUPLEX_FULL)
+ mac_cr |= MAC_CR_FULL_DUPLEX_;
+
+ /* make sure TXEN and RXEN are disabled before reconfiguring MAC */
+ ret = lan78xx_update_reg(dev, MAC_CR, MAC_CR_SPEED_MASK_ |
+ MAC_CR_FULL_DUPLEX_ | MAC_CR_EEE_EN_, mac_cr);
+ if (ret < 0)
+ goto link_up_fail;
+
+ ret = lan78xx_configure_flowcontrol(dev, tx_pause, rx_pause);
+ if (ret < 0)
+ goto link_up_fail;
+
+ ret = lan78xx_configure_usb(dev, speed);
+ if (ret < 0)
+ goto link_up_fail;
+
+ lan78xx_rx_urb_submit_all(dev);
+
+ ret = lan78xx_flush_rx_fifo(dev);
+ if (ret < 0)
+ goto link_up_fail;
+
+ ret = lan78xx_flush_tx_fifo(dev);
+ if (ret < 0)
+ goto link_up_fail;
+
+ ret = lan78xx_start_tx_path(dev);
+ if (ret < 0)
+ goto link_up_fail;
+
+ ret = lan78xx_start_rx_path(dev);
+ if (ret < 0)
+ goto link_up_fail;
+
+ netif_start_queue(net);
+
+ return;
+
+link_up_fail:
+ netdev_err(dev->net, "Failed to set MAC up with error %pe\n",
+ ERR_PTR(ret));
+}
+
+/**
+ * lan78xx_mac_eee_enable - Enable or disable MAC-side EEE support
+ * @dev: LAN78xx device
+ * @enable: true to enable EEE, false to disable
+ *
+ * This function sets or clears the MAC_CR_EEE_EN_ bit to control Energy
+ * Efficient Ethernet (EEE) operation. According to current understanding
+ * of the LAN7800 documentation, this bit can be modified while TX and RX
+ * are enabled. No explicit requirement was found to disable data paths
+ * before changing this bit.
+ *
+ * Return: 0 on success or a negative error code
+ */
+static int lan78xx_mac_eee_enable(struct lan78xx_net *dev, bool enable)
+{
+ u32 mac_cr = 0;
+
+ if (enable)
+ mac_cr |= MAC_CR_EEE_EN_;
+
+ return lan78xx_update_reg(dev, MAC_CR, MAC_CR_EEE_EN_, mac_cr);
+}
+
+static void lan78xx_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct net_device *net = to_net_dev(config->dev);
+ struct lan78xx_net *dev = netdev_priv(net);
+
+ lan78xx_mac_eee_enable(dev, false);
+}
+
+static int lan78xx_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct net_device *net = to_net_dev(config->dev);
+ struct lan78xx_net *dev = netdev_priv(net);
+ int ret;
+
+ /* Software should only change this field when Energy Efficient
+ * Ethernet Enable (EEEEN) is cleared. We ensure that by clearing
+ * EEEEN during probe, and phylink itself guarantees that
+ * mac_disable_tx_lpi() will have been previously called.
+ */
+ ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, timer);
+ if (ret < 0)
+ return ret;
+
+ return lan78xx_mac_eee_enable(dev, true);
+}
+
+static const struct phylink_mac_ops lan78xx_phylink_mac_ops = {
+ .mac_config = lan78xx_mac_config,
+ .mac_link_down = lan78xx_mac_link_down,
+ .mac_link_up = lan78xx_mac_link_up,
+ .mac_disable_tx_lpi = lan78xx_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = lan78xx_mac_enable_tx_lpi,
+};
+
+/**
+ * lan78xx_set_fixed_link() - Set fixed link configuration for LAN7801
+ * @dev: LAN78xx device
+ *
+ * Use fixed link configuration with 1 Gbps full duplex. This is used in special
+ * cases like EVB-KSZ9897-1, where LAN7801 acts as a USB-to-Ethernet interface
+ * to a switch without a visible PHY.
+ *
+ * Return: pointer to the registered fixed PHY, or ERR_PTR() on error.
+ */
+static int lan78xx_set_fixed_link(struct lan78xx_net *dev)
+{
+ static const struct phylink_link_state state = {
.speed = SPEED_1000,
.duplex = DUPLEX_FULL,
};
+
+ netdev_info(dev->net,
+ "No PHY found on LAN7801 – using fixed link instead (e.g. EVB-KSZ9897-1)\n");
+
+ return phylink_set_fixed_link(dev->phylink, &state);
+}
+
+/**
+ * lan78xx_get_phy() - Probe or register PHY device and set interface mode
+ * @dev: LAN78xx device structure
+ *
+ * This function attempts to find a PHY on the MDIO bus. If no PHY is found
+ * and the chip is LAN7801, it registers a fixed PHY as fallback. It also
+ * sets dev->interface based on chip ID and detected PHY type.
+ *
+ * Return: a valid PHY device pointer, or ERR_PTR() on failure.
+ */
+static struct phy_device *lan78xx_get_phy(struct lan78xx_net *dev)
+{
struct phy_device *phydev;
+ /* Attempt to locate a PHY on the MDIO bus */
phydev = phy_find_first(dev->mdiobus);
- if (!phydev) {
- netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
- phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
- if (IS_ERR(phydev)) {
- netdev_err(dev->net, "No PHY/fixed_PHY found\n");
- return NULL;
+
+ switch (dev->chipid) {
+ case ID_REV_CHIP_ID_7801_:
+ if (phydev) {
+ /* External RGMII PHY detected */
+ dev->interface = PHY_INTERFACE_MODE_RGMII_ID;
+ phydev->is_internal = false;
+
+ if (!phydev->drv)
+ netdev_warn(dev->net,
+ "PHY driver not found – assuming RGMII delays are on PCB or strapped for the PHY\n");
+
+ return phydev;
}
- netdev_dbg(dev->net, "Registered FIXED PHY\n");
+
dev->interface = PHY_INTERFACE_MODE_RGMII;
+ /* No PHY found – fallback to fixed PHY (e.g. KSZ switch board) */
+ return NULL;
+
+ case ID_REV_CHIP_ID_7800_:
+ case ID_REV_CHIP_ID_7850_:
+ if (!phydev)
+ return ERR_PTR(-ENODEV);
+
+ /* These use internal GMII-connected PHY */
+ dev->interface = PHY_INTERFACE_MODE_GMII;
+ phydev->is_internal = true;
+ return phydev;
+
+ default:
+ netdev_err(dev->net, "Unknown CHIP ID: 0x%08x\n", dev->chipid);
+ return ERR_PTR(-ENODEV);
+ }
+}
+
+/**
+ * lan78xx_mac_prepare_for_phy() - Preconfigure MAC-side interface settings
+ * @dev: LAN78xx device
+ *
+ * Configure MAC-side registers according to dev->interface, which should be
+ * set by lan78xx_get_phy().
+ *
+ * - For PHY_INTERFACE_MODE_RGMII:
+ * Enable MAC-side TXC delay. This mode seems to be used in a special setup
+ * without a real PHY, likely on EVB-KSZ9897-1. In that design, LAN7801 is
+ * connected to the KSZ9897 switch, and the link timing is expected to be
+ * hardwired (e.g. via strapping or board layout). No devicetree support is
+ * assumed here.
+ *
+ * - For PHY_INTERFACE_MODE_RGMII_ID:
+ * Disable MAC-side delay and rely on the PHY driver to provide delay.
+ *
+ * - For GMII, no MAC-specific config is needed.
+ *
+ * Return: 0 on success or a negative error code.
+ */
+static int lan78xx_mac_prepare_for_phy(struct lan78xx_net *dev)
+{
+ int ret;
+
+ switch (dev->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ /* Enable MAC-side TX clock delay */
ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
MAC_RGMII_ID_TXC_DELAY_EN_);
+ if (ret < 0)
+ return ret;
+
ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
- ret = lan78xx_read_reg(dev, HW_CFG, &buf);
- buf |= HW_CFG_CLK125_EN_;
- buf |= HW_CFG_REFCLK25_EN_;
- ret = lan78xx_write_reg(dev, HW_CFG, buf);
- } else {
- if (!phydev->drv) {
- netdev_err(dev->net, "no PHY driver found\n");
- return NULL;
- }
- dev->interface = PHY_INTERFACE_MODE_RGMII;
- /* external PHY fixup for KSZ9031RNX */
- ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
- ksz9031rnx_fixup);
- if (ret < 0) {
- netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
- return NULL;
- }
- /* external PHY fixup for LAN8835 */
- ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
- lan8835_fixup);
- if (ret < 0) {
- netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
- return NULL;
- }
- /* add more external PHY fixup here if needed */
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_update_reg(dev, HW_CFG,
+ HW_CFG_CLK125_EN_ | HW_CFG_REFCLK25_EN_,
+ HW_CFG_CLK125_EN_ | HW_CFG_REFCLK25_EN_);
+ if (ret < 0)
+ return ret;
+
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ /* Disable MAC-side TXC delay, PHY provides it */
+ ret = lan78xx_write_reg(dev, MAC_RGMII_ID, 0);
+ if (ret < 0)
+ return ret;
+
+ break;
+
+ case PHY_INTERFACE_MODE_GMII:
+ /* No MAC-specific configuration required */
+ break;
+
+ default:
+ netdev_warn(dev->net, "Unsupported interface mode: %d\n",
+ dev->interface);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * lan78xx_configure_leds_from_dt() - Configure LED enables based on DT
+ * @dev: LAN78xx device
+ * @phydev: PHY device (must be valid)
+ *
+ * Reads "microchip,led-modes" property from the PHY's DT node and enables
+ * the corresponding number of LEDs by writing to HW_CFG.
+ *
+ * This helper preserves the original logic, enabling up to 4 LEDs.
+ * If the property is not present, this function does nothing.
+ *
+ * Return: 0 on success or a negative error code.
+ */
+static int lan78xx_configure_leds_from_dt(struct lan78xx_net *dev,
+ struct phy_device *phydev)
+{
+ struct device_node *np = phydev->mdio.dev.of_node;
+ u32 reg;
+ int len, ret;
+
+ if (!np)
+ return 0;
+
+ len = of_property_count_elems_of_size(np, "microchip,led-modes",
+ sizeof(u32));
+ if (len < 0)
+ return 0;
+
+ ret = lan78xx_read_reg(dev, HW_CFG, &reg);
+ if (ret < 0)
+ return ret;
+
+ reg &= ~(HW_CFG_LED0_EN_ | HW_CFG_LED1_EN_ |
+ HW_CFG_LED2_EN_ | HW_CFG_LED3_EN_);
+
+ reg |= (len > 0) * HW_CFG_LED0_EN_ |
+ (len > 1) * HW_CFG_LED1_EN_ |
+ (len > 2) * HW_CFG_LED2_EN_ |
+ (len > 3) * HW_CFG_LED3_EN_;
+
+ return lan78xx_write_reg(dev, HW_CFG, reg);
+}
+
+static int lan78xx_phylink_setup(struct lan78xx_net *dev)
+{
+ struct phylink_config *pc = &dev->phylink_config;
+ struct phylink *phylink;
- phydev->is_internal = false;
+ pc->dev = &dev->net->dev;
+ pc->type = PHYLINK_NETDEV;
+ pc->mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10 |
+ MAC_100 | MAC_1000FD;
+ pc->mac_managed_pm = true;
+ pc->lpi_capabilities = MAC_100FD | MAC_1000FD;
+ /*
+ * Default TX LPI (Low Power Idle) request delay count is set to 50us.
+ *
+ * Source: LAN7800 Documentation, DS00001992H, Section 15.1.57, Page 204.
+ *
+ * Reasoning:
+ * According to the application note in the LAN7800 documentation, a
+ * zero delay may negatively impact the TX data path’s ability to
+ * support Gigabit operation. A value of 50us is recommended as a
+ * reasonable default when the part operates at Gigabit speeds,
+ * balancing stability and power efficiency in EEE mode. This delay can
+ * be increased based on performance testing, as EEE is designed for
+ * scenarios with mostly idle links and occasional bursts of full
+ * bandwidth transmission. The goal is to ensure reliable Gigabit
+ * performance without overly aggressive power optimization during
+ * inactive periods.
+ */
+ pc->lpi_timer_default = 50;
+ pc->eee_enabled_default = true;
+
+ if (dev->chipid == ID_REV_CHIP_ID_7801_)
+ phy_interface_set_rgmii(pc->supported_interfaces);
+ else
+ __set_bit(PHY_INTERFACE_MODE_GMII, pc->supported_interfaces);
+
+ memcpy(dev->phylink_config.lpi_interfaces,
+ dev->phylink_config.supported_interfaces,
+ sizeof(dev->phylink_config.lpi_interfaces));
+
+ phylink = phylink_create(pc, dev->net->dev.fwnode,
+ dev->interface, &lan78xx_phylink_mac_ops);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+
+ dev->phylink = phylink;
+
+ return 0;
+}
+
+static void lan78xx_phy_uninit(struct lan78xx_net *dev)
+{
+ if (dev->phylink) {
+ phylink_disconnect_phy(dev->phylink);
+ phylink_destroy(dev->phylink);
+ dev->phylink = NULL;
}
- return phydev;
}
static int lan78xx_phy_init(struct lan78xx_net *dev)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
- int ret;
- u32 mii_adv;
struct phy_device *phydev;
+ int ret;
- switch (dev->chipid) {
- case ID_REV_CHIP_ID_7801_:
- phydev = lan7801_phy_init(dev);
- if (!phydev) {
- netdev_err(dev->net, "lan7801: PHY Init Failed");
- return -EIO;
- }
- break;
+ phydev = lan78xx_get_phy(dev);
+ /* phydev can be NULL if no PHY is found and the chip is LAN7801,
+ * which will use a fixed link later.
+ * If an error occurs, return the error code immediately.
+ */
+ if (IS_ERR(phydev))
+ return PTR_ERR(phydev);
- case ID_REV_CHIP_ID_7800_:
- case ID_REV_CHIP_ID_7850_:
- phydev = phy_find_first(dev->mdiobus);
- if (!phydev) {
- netdev_err(dev->net, "no PHY found\n");
- return -EIO;
- }
- phydev->is_internal = true;
- dev->interface = PHY_INTERFACE_MODE_GMII;
- break;
+ ret = lan78xx_phylink_setup(dev);
+ if (ret < 0)
+ return ret;
- default:
- netdev_err(dev->net, "Unknown CHIP ID found\n");
- return -EIO;
+ ret = lan78xx_mac_prepare_for_phy(dev);
+ if (ret < 0)
+ goto phylink_uninit;
+
+ /* If no PHY is found, set up a fixed link. It is very specific to
+ * the LAN7801 and is used in special cases like EVB-KSZ9897-1 where
+ * LAN7801 acts as a USB-to-Ethernet interface to a switch without
+ * a visible PHY.
+ */
+ if (!phydev) {
+ ret = lan78xx_set_fixed_link(dev);
+ if (ret < 0)
+ goto phylink_uninit;
+
+ /* No PHY found, so set up a fixed link and return early.
+ * No need to configure PHY IRQ or attach to phylink.
+ */
+ return 0;
}
/* if phyirq is not set, use polling mode in phylib */
@@ -2368,95 +2869,57 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
phydev->irq = PHY_POLL;
netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
- /* set to AUTOMDIX */
- phydev->mdix = ETH_TP_MDI_AUTO;
-
- ret = phy_connect_direct(dev->net, phydev,
- lan78xx_link_status_change,
- dev->interface);
+ ret = phylink_connect_phy(dev->phylink, phydev);
if (ret) {
- netdev_err(dev->net, "can't attach PHY to %s\n",
- dev->mdiobus->id);
- if (dev->chipid == ID_REV_CHIP_ID_7801_) {
- if (phy_is_pseudo_fixed_link(phydev)) {
- fixed_phy_unregister(phydev);
- } else {
- phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
- 0xfffffff0);
- phy_unregister_fixup_for_uid(PHY_LAN8835,
- 0xfffffff0);
- }
- }
- return -EIO;
+ netdev_err(dev->net, "can't attach PHY to %s, error %pe\n",
+ dev->mdiobus->id, ERR_PTR(ret));
+ goto phylink_uninit;
}
- /* MAC doesn't support 1000T Half */
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
-
- /* support both flow controls */
- dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
- phydev->advertising);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
- phydev->advertising);
- mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
- mii_adv_to_linkmode_adv_t(fc, mii_adv);
- linkmode_or(phydev->advertising, fc, phydev->advertising);
-
- phy_support_eee(phydev);
-
- if (phydev->mdio.dev.of_node) {
- u32 reg;
- int len;
-
- len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
- "microchip,led-modes",
- sizeof(u32));
- if (len >= 0) {
- /* Ensure the appropriate LEDs are enabled */
- lan78xx_read_reg(dev, HW_CFG, &reg);
- reg &= ~(HW_CFG_LED0_EN_ |
- HW_CFG_LED1_EN_ |
- HW_CFG_LED2_EN_ |
- HW_CFG_LED3_EN_);
- reg |= (len > 0) * HW_CFG_LED0_EN_ |
- (len > 1) * HW_CFG_LED1_EN_ |
- (len > 2) * HW_CFG_LED2_EN_ |
- (len > 3) * HW_CFG_LED3_EN_;
- lan78xx_write_reg(dev, HW_CFG, reg);
- }
- }
+ ret = lan78xx_configure_leds_from_dt(dev, phydev);
+ if (ret < 0)
+ goto phylink_uninit;
- genphy_config_aneg(phydev);
+ return 0;
- dev->fc_autoneg = phydev->autoneg;
+phylink_uninit:
+ lan78xx_phy_uninit(dev);
- return 0;
+ return ret;
}
static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
{
- u32 buf;
bool rxenabled;
+ u32 buf;
+ int ret;
- lan78xx_read_reg(dev, MAC_RX, &buf);
+ ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+ if (ret < 0)
+ return ret;
rxenabled = ((buf & MAC_RX_RXEN_) != 0);
if (rxenabled) {
buf &= ~MAC_RX_RXEN_;
- lan78xx_write_reg(dev, MAC_RX, buf);
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ if (ret < 0)
+ return ret;
}
/* add 4 to size for FCS */
buf &= ~MAC_RX_MAX_SIZE_MASK_;
buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
- lan78xx_write_reg(dev, MAC_RX, buf);
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ if (ret < 0)
+ return ret;
if (rxenabled) {
buf |= MAC_RX_RXEN_;
- lan78xx_write_reg(dev, MAC_RX, buf);
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ if (ret < 0)
+ return ret;
}
return 0;
@@ -2522,7 +2985,10 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
return ret;
ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
- if (!ret)
+ if (ret < 0)
+ netdev_err(dev->net, "MTU changed to %d from %d failed with %pe\n",
+ new_mtu, netdev->mtu, ERR_PTR(ret));
+ else
WRITE_ONCE(netdev->mtu, new_mtu);
usb_autopm_put_interface(dev->intf);
@@ -2535,6 +3001,7 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
struct lan78xx_net *dev = netdev_priv(netdev);
struct sockaddr *addr = p;
u32 addr_lo, addr_hi;
+ int ret;
if (netif_running(netdev))
return -EBUSY;
@@ -2551,14 +3018,20 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
addr_hi = netdev->dev_addr[4] |
netdev->dev_addr[5] << 8;
- lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
- lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ if (ret < 0)
+ return ret;
/* Added to support MAC address changes */
- lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
- lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+ ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+ if (ret < 0)
+ return ret;
- return 0;
+ return lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
}
/* Enable or disable Rx checksum offload engine */
@@ -2591,9 +3064,7 @@ static int lan78xx_set_features(struct net_device *netdev,
spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
- lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
-
- return 0;
+ return lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
}
static void lan78xx_deferred_vlan_write(struct work_struct *param)
@@ -2644,13 +3115,16 @@ static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
return 0;
}
-static void lan78xx_init_ltm(struct lan78xx_net *dev)
+static int lan78xx_init_ltm(struct lan78xx_net *dev)
{
+ u32 regs[6] = { 0 };
int ret;
u32 buf;
- u32 regs[6] = { 0 };
ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+ if (ret < 0)
+ goto init_ltm_failed;
+
if (buf & USB_CFG1_LTM_ENABLE_) {
u8 temp[2];
/* Get values from EEPROM first */
@@ -2661,7 +3135,7 @@ static void lan78xx_init_ltm(struct lan78xx_net *dev)
24,
(u8 *)regs);
if (ret < 0)
- return;
+ return ret;
}
} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
if (temp[0] == 24) {
@@ -2670,17 +3144,40 @@ static void lan78xx_init_ltm(struct lan78xx_net *dev)
24,
(u8 *)regs);
if (ret < 0)
- return;
+ return ret;
}
}
}
- lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
- lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
- lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
- lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
- lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
- lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
+ ret = lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
+ if (ret < 0)
+ goto init_ltm_failed;
+
+ ret = lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
+ if (ret < 0)
+ goto init_ltm_failed;
+
+ ret = lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
+ if (ret < 0)
+ goto init_ltm_failed;
+
+ ret = lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
+ if (ret < 0)
+ goto init_ltm_failed;
+
+ ret = lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
+ if (ret < 0)
+ goto init_ltm_failed;
+
+ ret = lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
+ if (ret < 0)
+ goto init_ltm_failed;
+
+ return 0;
+
+init_ltm_failed:
+ netdev_err(dev->net, "Failed to init LTM with error %pe\n", ERR_PTR(ret));
+ return ret;
}
static int lan78xx_urb_config_init(struct lan78xx_net *dev)
@@ -2721,163 +3218,12 @@ static int lan78xx_urb_config_init(struct lan78xx_net *dev)
return result;
}
-static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
-{
- return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
-}
-
-static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
- u32 hw_disabled)
-{
- unsigned long timeout;
- bool stopped = true;
- int ret;
- u32 buf;
-
- /* Stop the h/w block (if not already stopped) */
-
- ret = lan78xx_read_reg(dev, reg, &buf);
- if (ret < 0)
- return ret;
-
- if (buf & hw_enabled) {
- buf &= ~hw_enabled;
-
- ret = lan78xx_write_reg(dev, reg, buf);
- if (ret < 0)
- return ret;
-
- stopped = false;
- timeout = jiffies + HW_DISABLE_TIMEOUT;
- do {
- ret = lan78xx_read_reg(dev, reg, &buf);
- if (ret < 0)
- return ret;
-
- if (buf & hw_disabled)
- stopped = true;
- else
- msleep(HW_DISABLE_DELAY_MS);
- } while (!stopped && !time_after(jiffies, timeout));
- }
-
- ret = stopped ? 0 : -ETIME;
-
- return ret;
-}
-
-static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
-{
- return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
-}
-
-static int lan78xx_start_tx_path(struct lan78xx_net *dev)
-{
- int ret;
-
- netif_dbg(dev, drv, dev->net, "start tx path");
-
- /* Start the MAC transmitter */
-
- ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
- if (ret < 0)
- return ret;
-
- /* Start the Tx FIFO */
-
- ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
-{
- int ret;
-
- netif_dbg(dev, drv, dev->net, "stop tx path");
-
- /* Stop the Tx FIFO */
-
- ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
- if (ret < 0)
- return ret;
-
- /* Stop the MAC transmitter */
-
- ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-/* The caller must ensure the Tx path is stopped before calling
- * lan78xx_flush_tx_fifo().
- */
-static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
-{
- return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
-}
-
-static int lan78xx_start_rx_path(struct lan78xx_net *dev)
-{
- int ret;
-
- netif_dbg(dev, drv, dev->net, "start rx path");
-
- /* Start the Rx FIFO */
-
- ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
- if (ret < 0)
- return ret;
-
- /* Start the MAC receiver*/
-
- ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
-{
- int ret;
-
- netif_dbg(dev, drv, dev->net, "stop rx path");
-
- /* Stop the MAC receiver */
-
- ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
- if (ret < 0)
- return ret;
-
- /* Stop the Rx FIFO */
-
- ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-/* The caller must ensure the Rx path is stopped before calling
- * lan78xx_flush_rx_fifo().
- */
-static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
-{
- return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
-}
-
static int lan78xx_reset(struct lan78xx_net *dev)
{
struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
unsigned long timeout;
int ret;
u32 buf;
- u8 sig;
ret = lan78xx_read_reg(dev, HW_CFG, &buf);
if (ret < 0)
@@ -2904,8 +3250,6 @@ static int lan78xx_reset(struct lan78xx_net *dev)
}
} while (buf & HW_CFG_LRST_);
- lan78xx_init_mac_address(dev);
-
/* save DEVID for later usage */
ret = lan78xx_read_reg(dev, ID_REV, &buf);
if (ret < 0)
@@ -2914,6 +3258,10 @@ static int lan78xx_reset(struct lan78xx_net *dev)
dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
+ ret = lan78xx_init_mac_address(dev);
+ if (ret < 0)
+ return ret;
+
/* Respond to the IN token with a NAK */
ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
if (ret < 0)
@@ -2926,7 +3274,9 @@ static int lan78xx_reset(struct lan78xx_net *dev)
return ret;
/* Init LTM */
- lan78xx_init_ltm(dev);
+ ret = lan78xx_init_ltm(dev);
+ if (ret < 0)
+ return ret;
ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
if (ret < 0)
@@ -3030,22 +3380,12 @@ static int lan78xx_reset(struct lan78xx_net *dev)
if (ret < 0)
return ret;
+ buf &= ~(MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_ | MAC_CR_EEE_EN_);
+
/* LAN7801 only has RGMII mode */
- if (dev->chipid == ID_REV_CHIP_ID_7801_) {
+ if (dev->chipid == ID_REV_CHIP_ID_7801_)
buf &= ~MAC_CR_GMII_EN_;
- /* Enable Auto Duplex and Auto speed */
- buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
- }
- if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
- dev->chipid == ID_REV_CHIP_ID_7850_) {
- ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
- if (!ret && sig != EEPROM_INDICATOR) {
- /* Implies there is no external eeprom. Set mac speed */
- netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
- buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
- }
- }
ret = lan78xx_write_reg(dev, MAC_CR, buf);
if (ret < 0)
return ret;
@@ -3095,9 +3435,11 @@ static int lan78xx_open(struct net_device *net)
mutex_lock(&dev->dev_mutex);
- phy_start(net->phydev);
+ lan78xx_init_stats(dev);
- netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
+ napi_enable(&dev->napi);
+
+ set_bit(EVENT_DEV_OPEN, &dev->flags);
/* for Link Check */
if (dev->urb_intr) {
@@ -3109,31 +3451,8 @@ static int lan78xx_open(struct net_device *net)
}
}
- ret = lan78xx_flush_rx_fifo(dev);
- if (ret < 0)
- goto done;
- ret = lan78xx_flush_tx_fifo(dev);
- if (ret < 0)
- goto done;
-
- ret = lan78xx_start_tx_path(dev);
- if (ret < 0)
- goto done;
- ret = lan78xx_start_rx_path(dev);
- if (ret < 0)
- goto done;
+ phylink_start(dev->phylink);
- lan78xx_init_stats(dev);
-
- set_bit(EVENT_DEV_OPEN, &dev->flags);
-
- netif_start_queue(net);
-
- dev->link_on = false;
-
- napi_enable(&dev->napi);
-
- lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
done:
mutex_unlock(&dev->dev_mutex);
@@ -3188,10 +3507,9 @@ static int lan78xx_stop(struct net_device *net)
mutex_lock(&dev->dev_mutex);
if (timer_pending(&dev->stat_monitor))
- del_timer_sync(&dev->stat_monitor);
+ timer_delete_sync(&dev->stat_monitor);
clear_bit(EVENT_DEV_OPEN, &dev->flags);
- netif_stop_queue(net);
napi_disable(&dev->napi);
lan78xx_terminate_urbs(dev);
@@ -3201,12 +3519,7 @@ static int lan78xx_stop(struct net_device *net)
net->stats.rx_packets, net->stats.tx_packets,
net->stats.rx_errors, net->stats.tx_errors);
- /* ignore errors that occur stopping the Tx and Rx data paths */
- lan78xx_stop_tx_path(dev);
- lan78xx_stop_rx_path(dev);
-
- if (net->phydev)
- phy_stop(net->phydev);
+ phylink_stop(dev->phylink);
usb_kill_urb(dev->urb_intr);
@@ -3216,7 +3529,7 @@ static int lan78xx_stop(struct net_device *net)
*/
clear_bit(EVENT_TX_HALT, &dev->flags);
clear_bit(EVENT_RX_HALT, &dev->flags);
- clear_bit(EVENT_LINK_RESET, &dev->flags);
+ clear_bit(EVENT_PHY_INT_ACK, &dev->flags);
clear_bit(EVENT_STAT_UPDATE, &dev->flags);
cancel_delayed_work_sync(&dev->wq);
@@ -4140,14 +4453,14 @@ static void lan78xx_delayedwork(struct work_struct *work)
}
}
- if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
+ if (test_bit(EVENT_PHY_INT_ACK, &dev->flags)) {
int ret = 0;
- clear_bit(EVENT_LINK_RESET, &dev->flags);
- if (lan78xx_link_reset(dev) < 0) {
- netdev_info(dev->net, "link reset failed (%d)\n",
- ret);
- }
+ clear_bit(EVENT_PHY_INT_ACK, &dev->flags);
+ ret = lan78xx_phy_int_ack(dev);
+ if (ret)
+ netdev_info(dev->net, "PHY INT ack failed (%pe)\n",
+ ERR_PTR(ret));
}
if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
@@ -4221,33 +4534,29 @@ static void lan78xx_disconnect(struct usb_interface *intf)
struct lan78xx_net *dev;
struct usb_device *udev;
struct net_device *net;
- struct phy_device *phydev;
dev = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
if (!dev)
return;
- netif_napi_del(&dev->napi);
-
udev = interface_to_usbdev(intf);
net = dev->net;
+ rtnl_lock();
+ phylink_stop(dev->phylink);
+ phylink_disconnect_phy(dev->phylink);
+ rtnl_unlock();
+
+ netif_napi_del(&dev->napi);
+
unregister_netdev(net);
timer_shutdown_sync(&dev->stat_monitor);
set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
cancel_delayed_work_sync(&dev->wq);
- phydev = net->phydev;
-
- phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
- phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
-
- phy_disconnect(net->phydev);
-
- if (phy_is_pseudo_fixed_link(phydev))
- fixed_phy_unregister(phydev);
+ phylink_destroy(dev->phylink);
usb_scuttle_anchored_urbs(&dev->deferred);
@@ -4304,7 +4613,7 @@ static const struct net_device_ops lan78xx_netdev_ops = {
static void lan78xx_stat_monitor(struct timer_list *t)
{
- struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
+ struct lan78xx_net *dev = timer_container_of(dev, t, stat_monitor);
lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
}
@@ -4331,7 +4640,6 @@ static int lan78xx_probe(struct usb_interface *intf,
goto out1;
}
- /* netdev_printk() needs this */
SET_NETDEV_DEV(netdev, &intf->dev);
dev = netdev_priv(netdev);
@@ -4346,7 +4654,7 @@ static int lan78xx_probe(struct usb_interface *intf,
skb_queue_head_init(&dev->rxq_done);
skb_queue_head_init(&dev->txq_pend);
skb_queue_head_init(&dev->rxq_overflow);
- mutex_init(&dev->phy_mutex);
+ mutex_init(&dev->mdiobus_mutex);
mutex_init(&dev->dev_mutex);
ret = lan78xx_urb_config_init(dev);
@@ -4414,29 +4722,30 @@ static int lan78xx_probe(struct usb_interface *intf,
period = ep_intr->desc.bInterval;
maxp = usb_maxpacket(dev->udev, dev->pipe_intr);
- buf = kmalloc(maxp, GFP_KERNEL);
- if (!buf) {
+
+ dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
+ if (!dev->urb_intr) {
ret = -ENOMEM;
goto out5;
}
- dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
- if (!dev->urb_intr) {
+ buf = kmalloc(maxp, GFP_KERNEL);
+ if (!buf) {
ret = -ENOMEM;
- goto out6;
- } else {
- usb_fill_int_urb(dev->urb_intr, dev->udev,
- dev->pipe_intr, buf, maxp,
- intr_complete, dev, period);
- dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
+ goto free_urbs;
}
+ usb_fill_int_urb(dev->urb_intr, dev->udev,
+ dev->pipe_intr, buf, maxp,
+ intr_complete, dev, period);
+ dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
+
dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out);
/* Reject broken descriptors. */
if (dev->maxpacket == 0) {
ret = -ENODEV;
- goto out6;
+ goto free_urbs;
}
/* driver requires remote-wakeup capability during autosuspend. */
@@ -4444,12 +4753,12 @@ static int lan78xx_probe(struct usb_interface *intf,
ret = lan78xx_phy_init(dev);
if (ret < 0)
- goto out7;
+ goto free_urbs;
ret = register_netdev(netdev);
if (ret != 0) {
netif_err(dev, probe, netdev, "couldn't register the device\n");
- goto out8;
+ goto phy_uninit;
}
usb_set_intfdata(intf, dev);
@@ -4464,12 +4773,10 @@ static int lan78xx_probe(struct usb_interface *intf,
return 0;
-out8:
- phy_disconnect(netdev->phydev);
-out7:
+phy_uninit:
+ lan78xx_phy_uninit(dev);
+free_urbs:
usb_free_urb(dev->urb_intr);
-out6:
- kfree(buf);
out5:
lan78xx_unbind(dev, intf);
out4:
@@ -4802,6 +5109,10 @@ static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
spin_unlock_irq(&dev->txq.lock);
}
+ rtnl_lock();
+ phylink_suspend(dev->phylink, false);
+ rtnl_unlock();
+
/* stop RX */
ret = lan78xx_stop_rx_path(dev);
if (ret < 0)
@@ -4824,7 +5135,7 @@ static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
/* reattach */
netif_device_attach(dev->net);
- del_timer(&dev->stat_monitor);
+ timer_delete(&dev->stat_monitor);
if (PMSG_IS_AUTO(message)) {
ret = lan78xx_set_auto_suspend(dev);
@@ -5029,11 +5340,15 @@ static int lan78xx_reset_resume(struct usb_interface *intf)
if (ret < 0)
return ret;
- phy_start(dev->net->phydev);
-
ret = lan78xx_resume(intf);
+ if (ret < 0)
+ return ret;
- return ret;
+ rtnl_lock();
+ phylink_resume(dev->phylink);
+ rtnl_unlock();
+
+ return 0;
}
static const struct usb_device_id products[] = {
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 0c011d8f5d4d..3a4985b582cb 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -192,6 +192,12 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (!skbn)
return 0;
+ /* Raw IP packets don't have a MAC header, but other subsystems
+ * (like xfrm) may still access MAC header offsets, so they must
+ * be initialized.
+ */
+ skb_reset_mac_header(skbn);
+
switch (skb->data[offset + qmimux_hdr_sz] & 0xf0) {
case 0x40:
skbn->protocol = htons(ETH_P_IP);
@@ -1355,16 +1361,25 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1034, 2)}, /* Telit LE910C4-WWX */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1037, 4)}, /* Telit LE910C4-WWX */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1038, 3)}, /* Telit LE910C4-WWX */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x103a, 0)}, /* Telit LE910C4-WWX */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1057, 2)}, /* Telit FN980 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
- {QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */
- {QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990A */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1077, 2)}, /* Telit FN990A w/audio */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990A */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a0, 0)}, /* Telit FN920C04 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a4, 0)}, /* Telit FN920C04 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a9, 0)}, /* Telit FN920C04 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10b0, 0)}, /* Telit FE990B */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10c0, 0)}, /* Telit FE910C04 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10c4, 0)}, /* Telit FE910C04 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10c8, 0)}, /* Telit FE910C04 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10d0, 0)}, /* Telit FN990B */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
@@ -1421,11 +1436,13 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x22de, 0x9051, 2)}, /* Hucom Wireless HM-211S/K */
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
{QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */
+ {QMI_QUIRK_SET_DTR(0x1e0e, 0x9071, 3)}, /* SIMCom 8230C ++ */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)}, /* Quectel EG95 */
{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x030e, 4)}, /* Quectel EM05GV2 */
+ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0316, 3)}, /* Quectel RG255C */
{QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
{QMI_QUIRK_SET_DTR(0x2cb7, 0x0112, 0)}, /* Fibocom FG132 */
{QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 468c73974046..fa5192583860 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -26,7 +26,7 @@
#include <linux/atomic.h>
#include <linux/acpi.h>
#include <linux/firmware.h>
-#include <crypto/hash.h>
+#include <crypto/sha2.h>
#include <linux/usb/r8152.h>
#include <net/gso.h>
@@ -785,6 +785,7 @@ enum rtl8152_flags {
#define DEVICE_ID_THINKPAD_USB_C_DONGLE 0x720c
#define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2 0xa387
#define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3 0x3062
+#define DEVICE_ID_THINKPAD_HYBRID_USB_C_DOCK 0xa359
struct tally_counter {
__le64 tx_packets;
@@ -1664,14 +1665,14 @@ static int
rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex,
u32 advertising);
-static int __rtl8152_set_mac_address(struct net_device *netdev, void *p,
+static int __rtl8152_set_mac_address(struct net_device *netdev,
+ struct sockaddr_storage *addr,
bool in_resume)
{
struct r8152 *tp = netdev_priv(netdev);
- struct sockaddr *addr = p;
int ret = -EADDRNOTAVAIL;
- if (!is_valid_ether_addr(addr->sa_data))
+ if (!is_valid_ether_addr(addr->__data))
goto out1;
if (!in_resume) {
@@ -1682,10 +1683,10 @@ static int __rtl8152_set_mac_address(struct net_device *netdev, void *p,
mutex_lock(&tp->control);
- eth_hw_addr_set(netdev, addr->sa_data);
+ eth_hw_addr_set(netdev, addr->__data);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
- pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES, 8, addr->sa_data);
+ pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES, 8, addr->__data);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
mutex_unlock(&tp->control);
@@ -1705,7 +1706,8 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
* host system provided MAC address.
* Examples of this are Dell TB15 and Dell WD15 docks
*/
-static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
+static int vendor_mac_passthru_addr_read(struct r8152 *tp,
+ struct sockaddr_storage *ss)
{
acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -1773,47 +1775,48 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
ret = -EINVAL;
goto amacout;
}
- memcpy(sa->sa_data, buf, 6);
+ memcpy(ss->__data, buf, 6);
tp->netdev->addr_assign_type = NET_ADDR_STOLEN;
netif_info(tp, probe, tp->netdev,
- "Using pass-thru MAC addr %pM\n", sa->sa_data);
+ "Using pass-thru MAC addr %pM\n", ss->__data);
amacout:
kfree(obj);
return ret;
}
-static int determine_ethernet_addr(struct r8152 *tp, struct sockaddr *sa)
+static int determine_ethernet_addr(struct r8152 *tp,
+ struct sockaddr_storage *ss)
{
struct net_device *dev = tp->netdev;
int ret;
- sa->sa_family = dev->type;
+ ss->ss_family = dev->type;
- ret = eth_platform_get_mac_address(&tp->udev->dev, sa->sa_data);
+ ret = eth_platform_get_mac_address(&tp->udev->dev, ss->__data);
if (ret < 0) {
if (tp->version == RTL_VER_01) {
- ret = pla_ocp_read(tp, PLA_IDR, 8, sa->sa_data);
+ ret = pla_ocp_read(tp, PLA_IDR, 8, ss->__data);
} else {
/* if device doesn't support MAC pass through this will
* be expected to be non-zero
*/
- ret = vendor_mac_passthru_addr_read(tp, sa);
+ ret = vendor_mac_passthru_addr_read(tp, ss);
if (ret < 0)
ret = pla_ocp_read(tp, PLA_BACKUP, 8,
- sa->sa_data);
+ ss->__data);
}
}
if (ret < 0) {
netif_err(tp, probe, dev, "Get ether addr fail\n");
- } else if (!is_valid_ether_addr(sa->sa_data)) {
+ } else if (!is_valid_ether_addr(ss->__data)) {
netif_err(tp, probe, dev, "Invalid ether addr %pM\n",
- sa->sa_data);
+ ss->__data);
eth_hw_addr_random(dev);
- ether_addr_copy(sa->sa_data, dev->dev_addr);
+ ether_addr_copy(ss->__data, dev->dev_addr);
netif_info(tp, probe, dev, "Random ether addr %pM\n",
- sa->sa_data);
+ ss->__data);
return 0;
}
@@ -1823,17 +1826,17 @@ static int determine_ethernet_addr(struct r8152 *tp, struct sockaddr *sa)
static int set_ethernet_addr(struct r8152 *tp, bool in_resume)
{
struct net_device *dev = tp->netdev;
- struct sockaddr sa;
+ struct sockaddr_storage ss;
int ret;
- ret = determine_ethernet_addr(tp, &sa);
+ ret = determine_ethernet_addr(tp, &ss);
if (ret < 0)
return ret;
if (tp->version == RTL_VER_01)
- eth_hw_addr_set(dev, sa.sa_data);
+ eth_hw_addr_set(dev, ss.__data);
else
- ret = __rtl8152_set_mac_address(dev, &sa, in_resume);
+ ret = __rtl8152_set_mac_address(dev, &ss, in_resume);
return ret;
}
@@ -4627,48 +4630,16 @@ out:
static long rtl8152_fw_verify_checksum(struct r8152 *tp,
struct fw_header *fw_hdr, size_t size)
{
- unsigned char checksum[sizeof(fw_hdr->checksum)];
- struct crypto_shash *alg;
- struct shash_desc *sdesc;
- size_t len;
- long rc;
+ u8 checksum[sizeof(fw_hdr->checksum)];
- alg = crypto_alloc_shash("sha256", 0, 0);
- if (IS_ERR(alg)) {
- rc = PTR_ERR(alg);
- goto out;
- }
-
- if (crypto_shash_digestsize(alg) != sizeof(fw_hdr->checksum)) {
- rc = -EFAULT;
- dev_err(&tp->intf->dev, "digestsize incorrect (%u)\n",
- crypto_shash_digestsize(alg));
- goto free_shash;
- }
+ BUILD_BUG_ON(sizeof(checksum) != SHA256_DIGEST_SIZE);
+ sha256(fw_hdr->version, size - sizeof(checksum), checksum);
- len = sizeof(*sdesc) + crypto_shash_descsize(alg);
- sdesc = kmalloc(len, GFP_KERNEL);
- if (!sdesc) {
- rc = -ENOMEM;
- goto free_shash;
- }
- sdesc->tfm = alg;
-
- len = size - sizeof(fw_hdr->checksum);
- rc = crypto_shash_digest(sdesc, fw_hdr->version, len, checksum);
- kfree(sdesc);
- if (rc)
- goto free_shash;
-
- if (memcmp(fw_hdr->checksum, checksum, sizeof(fw_hdr->checksum))) {
+ if (memcmp(fw_hdr->checksum, checksum, sizeof(checksum))) {
dev_err(&tp->intf->dev, "checksum fail\n");
- rc = -EFAULT;
+ return -EFAULT;
}
-
-free_shash:
- crypto_free_shash(alg);
-out:
- return rc;
+ return 0;
}
static long rtl8152_check_firmware(struct r8152 *tp, struct rtl_fw *rtl_fw)
@@ -8452,7 +8423,7 @@ static int rtl8152_post_reset(struct usb_interface *intf)
{
struct r8152 *tp = usb_get_intfdata(intf);
struct net_device *netdev;
- struct sockaddr sa;
+ struct sockaddr_storage ss;
if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags))
goto exit;
@@ -8460,8 +8431,8 @@ static int rtl8152_post_reset(struct usb_interface *intf)
rtl_set_accessible(tp);
/* reset the MAC address in case of policy change */
- if (determine_ethernet_addr(tp, &sa) >= 0)
- dev_set_mac_address (tp->netdev, &sa, NULL);
+ if (determine_ethernet_addr(tp, &ss) >= 0)
+ dev_set_mac_address(tp->netdev, &ss, NULL);
netdev = tp->netdev;
if (!netif_running(netdev))
@@ -9340,6 +9311,7 @@ static const struct ethtool_ops ops = {
.set_ringparam = rtl8152_set_ringparam,
.get_pauseparam = rtl8152_get_pauseparam,
.set_pauseparam = rtl8152_set_pauseparam,
+ .get_ts_info = ethtool_op_get_ts_info,
};
static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -9787,6 +9759,7 @@ static bool rtl8152_supports_lenovo_macpassthru(struct usb_device *udev)
case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3:
case DEVICE_ID_THINKPAD_USB_C_DONGLE:
+ case DEVICE_ID_THINKPAD_HYBRID_USB_C_DOCK:
return 1;
}
} else if (vendor_id == VENDOR_ID_REALTEK && parent_vendor_id == VENDOR_ID_LENOVO) {
@@ -10064,6 +10037,8 @@ static const struct usb_device_id rtl8152_table[] = {
{ USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927) },
{ USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0c5e) },
{ USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101) },
+
+ /* Lenovo */
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x304f) },
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x3054) },
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x3062) },
@@ -10074,11 +10049,15 @@ static const struct usb_device_id rtl8152_table[] = {
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x720c) },
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x7214) },
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x721e) },
+ { USB_DEVICE(VENDOR_ID_LENOVO, 0xa359) },
{ USB_DEVICE(VENDOR_ID_LENOVO, 0xa387) },
+
{ USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041) },
{ USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff) },
{ USB_DEVICE(VENDOR_ID_TPLINK, 0x0601) },
+ { USB_DEVICE(VENDOR_ID_TPLINK, 0x0602) },
{ USB_DEVICE(VENDOR_ID_DLINK, 0xb301) },
+ { USB_DEVICE(VENDOR_ID_DELL, 0xb097) },
{ USB_DEVICE(VENDOR_ID_ASUS, 0x1976) },
{}
};
@@ -10144,7 +10123,12 @@ static int __init rtl8152_driver_init(void)
ret = usb_register_device_driver(&rtl8152_cfgselector_driver, THIS_MODULE);
if (ret)
return ret;
- return usb_register(&rtl8152_driver);
+
+ ret = usb_register(&rtl8152_driver);
+ if (ret)
+ usb_deregister_device_driver(&rtl8152_cfgselector_driver);
+
+ return ret;
}
static void __exit rtl8152_driver_exit(void)
diff --git a/drivers/net/usb/r8153_ecm.c b/drivers/net/usb/r8153_ecm.c
index 20b2df8d74ae..8d860dacdf49 100644
--- a/drivers/net/usb/r8153_ecm.c
+++ b/drivers/net/usb/r8153_ecm.c
@@ -135,6 +135,12 @@ static const struct usb_device_id products[] = {
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&r8153_info,
},
+/* Lenovo ThinkPad Hybrid USB-C with USB-A Dock (40af0135eu, based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID_LENOVO, 0xa359, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&r8153_info,
+},
{ }, /* END */
};
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 01a3b2417a54..278e6cb6f4d9 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -71,6 +71,14 @@
#define MSR_SPEED (1<<3)
#define MSR_LINK (1<<2)
+/* USB endpoints */
+enum rtl8150_usb_ep {
+ RTL8150_USB_EP_CONTROL = 0,
+ RTL8150_USB_EP_BULK_IN = 1,
+ RTL8150_USB_EP_BULK_OUT = 2,
+ RTL8150_USB_EP_INT_IN = 3,
+};
+
/* Interrupt pipe data */
#define INT_TSR 0x00
#define INT_RSR 0x01
@@ -656,7 +664,6 @@ static void rtl8150_set_multicast(struct net_device *netdev)
rtl8150_t *dev = netdev_priv(netdev);
u16 rx_creg = 0x9e;
- netif_stop_queue(netdev);
if (netdev->flags & IFF_PROMISC) {
rx_creg |= 0x0001;
dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name);
@@ -670,7 +677,6 @@ static void rtl8150_set_multicast(struct net_device *netdev)
rx_creg &= 0x00fc;
}
async_set_registers(dev, RCR, sizeof(rx_creg), rx_creg);
- netif_wake_queue(netdev);
}
static netdev_tx_t rtl8150_start_xmit(struct sk_buff *skb,
@@ -679,9 +685,16 @@ static netdev_tx_t rtl8150_start_xmit(struct sk_buff *skb,
rtl8150_t *dev = netdev_priv(netdev);
int count, res;
+ /* pad the frame and ensure terminating USB packet, datasheet 9.2.3 */
+ count = max(skb->len, ETH_ZLEN);
+ if (count % 64 == 0)
+ count++;
+ if (skb_padto(skb, count)) {
+ netdev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
netif_stop_queue(netdev);
- count = (skb->len < 60) ? 60 : skb->len;
- count = (count & 0x3f) ? count : count + 1;
dev->tx_skb = skb;
usb_fill_bulk_urb(dev->tx_urb, dev->udev, usb_sndbulkpipe(dev->udev, 2),
skb->data, count, write_bulk_callback, dev);
@@ -867,6 +880,13 @@ static int rtl8150_probe(struct usb_interface *intf,
struct usb_device *udev = interface_to_usbdev(intf);
rtl8150_t *dev;
struct net_device *netdev;
+ static const u8 bulk_ep_addr[] = {
+ RTL8150_USB_EP_BULK_IN | USB_DIR_IN,
+ RTL8150_USB_EP_BULK_OUT | USB_DIR_OUT,
+ 0};
+ static const u8 int_ep_addr[] = {
+ RTL8150_USB_EP_INT_IN | USB_DIR_IN,
+ 0};
netdev = alloc_etherdev(sizeof(rtl8150_t));
if (!netdev)
@@ -880,6 +900,13 @@ static int rtl8150_probe(struct usb_interface *intf,
return -ENOMEM;
}
+ /* Verify that all required endpoints are present */
+ if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) ||
+ !usb_check_int_endpoints(intf, int_ep_addr)) {
+ dev_err(&intf->dev, "couldn't find required endpoints\n");
+ goto out;
+ }
+
tasklet_setup(&dev->tl, rx_fixup);
spin_lock_init(&dev->rx_pool_lock);
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index 3d239b8d1a1b..36c73db44f77 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -522,7 +522,7 @@ static void sierra_net_kevent(struct work_struct *work)
" stopping sync timer",
hh.msgspecific.byte);
/* Got sync resp - stop timer & clear mask */
- del_timer_sync(&priv->sync_timer);
+ timer_delete_sync(&priv->sync_timer);
clear_bit(SIERRA_NET_TIMER_EXPIRY,
&priv->kevent_flags);
break;
@@ -573,7 +573,7 @@ static void sierra_net_defer_kevent(struct usbnet *dev, int work)
*/
static void sierra_sync_timer(struct timer_list *t)
{
- struct sierra_net_data *priv = from_timer(priv, t, sync_timer);
+ struct sierra_net_data *priv = timer_container_of(priv, t, sync_timer);
struct usbnet *dev = priv->usbnet;
dev_dbg(&dev->udev->dev, "%s", __func__);
@@ -689,6 +689,10 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
status);
return -ENODEV;
}
+ if (!dev->status) {
+ dev_err(&dev->udev->dev, "No status endpoint found");
+ return -ENODEV;
+ }
/* Initialize sierra private data */
priv = kzalloc(sizeof *priv, GFP_KERNEL);
if (!priv)
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 8e82184be5e7..de733e0488bf 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -63,6 +63,9 @@ struct smsc95xx_priv {
u32 hash_hi;
u32 hash_lo;
u32 wolopts;
+ bool pause_rx;
+ bool pause_tx;
+ bool pause_autoneg;
spinlock_t mac_cr_lock;
u8 features;
u8 suspend_flags;
@@ -537,16 +540,23 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev)
{
- u32 flow = 0, afc_cfg;
struct smsc95xx_priv *pdata = dev->driver_priv;
- bool tx_pause, rx_pause;
+ u32 flow = 0, afc_cfg;
int ret = smsc95xx_read_reg(dev, AFC_CFG, &afc_cfg);
if (ret < 0)
return ret;
if (pdata->phydev->duplex == DUPLEX_FULL) {
- phy_get_pause(pdata->phydev, &tx_pause, &rx_pause);
+ bool tx_pause, rx_pause;
+
+ if (pdata->phydev->autoneg == AUTONEG_ENABLE &&
+ pdata->pause_autoneg) {
+ phy_get_pause(pdata->phydev, &tx_pause, &rx_pause);
+ } else {
+ tx_pause = pdata->pause_tx;
+ rx_pause = pdata->pause_rx;
+ }
if (rx_pause)
flow = 0xFFFF0002;
@@ -772,6 +782,55 @@ static int smsc95xx_ethtool_get_sset_count(struct net_device *ndev, int sset)
}
}
+static void smsc95xx_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct smsc95xx_priv *pdata;
+ struct usbnet *dev;
+
+ dev = netdev_priv(ndev);
+ pdata = dev->driver_priv;
+
+ pause->autoneg = pdata->pause_autoneg;
+ pause->rx_pause = pdata->pause_rx;
+ pause->tx_pause = pdata->pause_tx;
+}
+
+static int smsc95xx_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ bool pause_autoneg_rx, pause_autoneg_tx;
+ struct smsc95xx_priv *pdata;
+ struct phy_device *phydev;
+ struct usbnet *dev;
+
+ dev = netdev_priv(ndev);
+ pdata = dev->driver_priv;
+ phydev = ndev->phydev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ pdata->pause_rx = pause->rx_pause;
+ pdata->pause_tx = pause->tx_pause;
+ pdata->pause_autoneg = pause->autoneg;
+
+ if (pause->autoneg) {
+ pause_autoneg_rx = pause->rx_pause;
+ pause_autoneg_tx = pause->tx_pause;
+ } else {
+ pause_autoneg_rx = false;
+ pause_autoneg_tx = false;
+ }
+
+ phy_set_asym_pause(ndev->phydev, pause_autoneg_rx, pause_autoneg_tx);
+ if (phydev->link && (!pause->autoneg ||
+ phydev->autoneg == AUTONEG_DISABLE))
+ smsc95xx_mac_update_fullduplex(dev);
+
+ return 0;
+}
+
static const struct ethtool_ops smsc95xx_ethtool_ops = {
.get_link = smsc95xx_get_link,
.nway_reset = phy_ethtool_nway_reset,
@@ -791,6 +850,8 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
.self_test = net_selftest,
.get_strings = smsc95xx_ethtool_get_strings,
.get_sset_count = smsc95xx_ethtool_get_sset_count,
+ .get_pauseparam = smsc95xx_get_pauseparam,
+ .set_pauseparam = smsc95xx_set_pauseparam,
};
static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -1227,6 +1288,11 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->max_mtu = ETH_DATA_LEN;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+ pdata->pause_tx = true;
+ pdata->pause_rx = true;
+ pdata->pause_autoneg = true;
+ phy_support_asym_pause(pdata->phydev);
+
ret = phy_connect_direct(dev->net, pdata->phydev,
&smsc95xx_handle_link_change,
PHY_INTERFACE_MODE_MII);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 44179f4e807f..1d9faa70ba3b 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -142,16 +142,16 @@ int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
if (alt->desc.bAlternateSetting != 0 ||
!(dev->driver_info->flags & FLAG_NO_SETINT)) {
- tmp = usb_set_interface (dev->udev, alt->desc.bInterfaceNumber,
- alt->desc.bAlternateSetting);
+ tmp = usb_set_interface(dev->udev, alt->desc.bInterfaceNumber,
+ alt->desc.bAlternateSetting);
if (tmp < 0)
return tmp;
}
- dev->in = usb_rcvbulkpipe (dev->udev,
- in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
- dev->out = usb_sndbulkpipe (dev->udev,
- out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+ dev->in = usb_rcvbulkpipe(dev->udev,
+ in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+ dev->out = usb_sndbulkpipe(dev->udev,
+ out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
dev->status = status;
return 0;
}
@@ -163,7 +163,7 @@ int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
int tmp = -1, ret;
unsigned char buf [13];
- ret = usb_string(dev->udev, iMACAddress, buf, sizeof buf);
+ ret = usb_string(dev->udev, iMACAddress, buf, sizeof(buf));
if (ret == 12)
tmp = hex2bin(addr, buf, 6);
if (tmp < 0) {
@@ -178,7 +178,18 @@ int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
}
EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
-static void intr_complete (struct urb *urb)
+static bool usbnet_needs_usb_name_format(struct usbnet *dev, struct net_device *net)
+{
+ /* Point to point devices which don't have a real MAC address
+ * (or report a fake local one) have historically used the usb%d
+ * naming. Preserve this..
+ */
+ return (dev->driver_info->flags & FLAG_POINTTOPOINT) != 0 &&
+ (is_zero_ether_addr(net->dev_addr) ||
+ is_local_ether_addr(net->dev_addr));
+}
+
+static void intr_complete(struct urb *urb)
{
struct usbnet *dev = urb->context;
int status = urb->status;
@@ -204,13 +215,13 @@ static void intr_complete (struct urb *urb)
break;
}
- status = usb_submit_urb (urb, GFP_ATOMIC);
+ status = usb_submit_urb(urb, GFP_ATOMIC);
if (status != 0)
netif_err(dev, timer, dev->net,
"intr resubmit --> %d\n", status);
}
-static int init_status (struct usbnet *dev, struct usb_interface *intf)
+static int init_status(struct usbnet *dev, struct usb_interface *intf)
{
char *buf = NULL;
unsigned pipe = 0;
@@ -220,24 +231,24 @@ static int init_status (struct usbnet *dev, struct usb_interface *intf)
if (!dev->driver_info->status)
return 0;
- pipe = usb_rcvintpipe (dev->udev,
- dev->status->desc.bEndpointAddress
- & USB_ENDPOINT_NUMBER_MASK);
+ pipe = usb_rcvintpipe(dev->udev,
+ dev->status->desc.bEndpointAddress
+ & USB_ENDPOINT_NUMBER_MASK);
maxp = usb_maxpacket(dev->udev, pipe);
/* avoid 1 msec chatter: min 8 msec poll rate */
period = max ((int) dev->status->desc.bInterval,
(dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3);
- buf = kmalloc (maxp, GFP_KERNEL);
+ buf = kmalloc(maxp, GFP_KERNEL);
if (buf) {
- dev->interrupt = usb_alloc_urb (0, GFP_KERNEL);
+ dev->interrupt = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->interrupt) {
- kfree (buf);
+ kfree(buf);
return -ENOMEM;
} else {
usb_fill_int_urb(dev->interrupt, dev->udev, pipe,
- buf, maxp, intr_complete, dev, period);
+ buf, maxp, intr_complete, dev, period);
dev->interrupt->transfer_flags |= URB_FREE_BUFFER;
dev_dbg(&intf->dev,
"status ep%din, %d bytes period %d\n",
@@ -315,7 +326,7 @@ static void __usbnet_status_stop_force(struct usbnet *dev)
* Some link protocols batch packets, so their rx_fixup paths
* can return clones as well as just modify the original skb.
*/
-void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
+void usbnet_skb_return(struct usbnet *dev, struct sk_buff *skb)
{
struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->net->tstats);
unsigned long flags;
@@ -328,7 +339,7 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
/* only update if unset to allow minidriver rx_fixup override */
if (skb->protocol == 0)
- skb->protocol = eth_type_trans (skb, dev->net);
+ skb->protocol = eth_type_trans(skb, dev->net);
flags = u64_stats_update_begin_irqsave(&stats64->syncp);
u64_stats_inc(&stats64->rx_packets);
@@ -336,8 +347,8 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
u64_stats_update_end_irqrestore(&stats64->syncp, flags);
netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
- skb->len + sizeof (struct ethhdr), skb->protocol);
- memset (skb->cb, 0, sizeof (struct skb_data));
+ skb->len + sizeof(struct ethhdr), skb->protocol);
+ memset(skb->cb, 0, sizeof(struct skb_data));
if (skb_defer_rx_timestamp(skb))
return;
@@ -385,7 +396,7 @@ EXPORT_SYMBOL_GPL(usbnet_update_max_qlen);
*
*-------------------------------------------------------------------------*/
-int usbnet_change_mtu (struct net_device *net, int new_mtu)
+int usbnet_change_mtu(struct net_device *net, int new_mtu)
{
struct usbnet *dev = netdev_priv(net);
int ll_mtu = new_mtu + net->hard_header_len;
@@ -450,7 +461,7 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
__skb_queue_tail(&dev->done, skb);
if (dev->done.qlen == 1)
- tasklet_schedule(&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
spin_unlock(&dev->done.lock);
spin_unlock_irqrestore(&list->lock, flags);
return old_state;
@@ -461,7 +472,7 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
* NOTE: annoying asymmetry: if it's active, schedule_work() fails,
* but tasklet_schedule() doesn't. hope the failure is rare.
*/
-void usbnet_defer_kevent (struct usbnet *dev, int work)
+void usbnet_defer_kevent(struct usbnet *dev, int work)
{
set_bit (work, &dev->flags);
if (!usbnet_going_away(dev)) {
@@ -478,9 +489,9 @@ EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
/*-------------------------------------------------------------------------*/
-static void rx_complete (struct urb *urb);
+static void rx_complete(struct urb *urb);
-static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
+static int rx_submit(struct usbnet *dev, struct urb *urb, gfp_t flags)
{
struct sk_buff *skb;
struct skb_data *entry;
@@ -500,8 +511,8 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
if (!skb) {
netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
- usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
- usb_free_urb (urb);
+ usbnet_defer_kevent(dev, EVENT_RX_MEMORY);
+ usb_free_urb(urb);
return -ENOMEM;
}
@@ -510,26 +521,27 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
entry->dev = dev;
entry->length = 0;
- usb_fill_bulk_urb (urb, dev->udev, dev->in,
- skb->data, size, rx_complete, skb);
+ usb_fill_bulk_urb(urb, dev->udev, dev->in,
+ skb->data, size, rx_complete, skb);
- spin_lock_irqsave (&dev->rxq.lock, lockflags);
+ spin_lock_irqsave(&dev->rxq.lock, lockflags);
- if (netif_running (dev->net) &&
- netif_device_present (dev->net) &&
+ if (netif_running(dev->net) &&
+ netif_device_present(dev->net) &&
test_bit(EVENT_DEV_OPEN, &dev->flags) &&
- !test_bit (EVENT_RX_HALT, &dev->flags) &&
- !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) {
- switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) {
+ !test_bit(EVENT_RX_HALT, &dev->flags) &&
+ !test_bit(EVENT_DEV_ASLEEP, &dev->flags) &&
+ !usbnet_going_away(dev)) {
+ switch (retval = usb_submit_urb(urb, GFP_ATOMIC)) {
case -EPIPE:
- usbnet_defer_kevent (dev, EVENT_RX_HALT);
+ usbnet_defer_kevent(dev, EVENT_RX_HALT);
break;
case -ENOMEM:
- usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
+ usbnet_defer_kevent(dev, EVENT_RX_MEMORY);
break;
case -ENODEV:
netif_dbg(dev, ifdown, dev->net, "device gone\n");
- netif_device_detach (dev->net);
+ netif_device_detach(dev->net);
break;
case -EHOSTUNREACH:
retval = -ENOLINK;
@@ -537,20 +549,19 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
default:
netif_dbg(dev, rx_err, dev->net,
"rx submit, %d\n", retval);
- tasklet_schedule (&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
break;
case 0:
- if (!usbnet_going_away(dev))
- __usbnet_queue_skb(&dev->rxq, skb, rx_start);
+ __usbnet_queue_skb(&dev->rxq, skb, rx_start);
}
} else {
netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
retval = -ENOLINK;
}
- spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
+ spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
if (retval) {
- dev_kfree_skb_any (skb);
- usb_free_urb (urb);
+ dev_kfree_skb_any(skb);
+ usb_free_urb(urb);
}
return retval;
}
@@ -561,7 +572,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
static inline int rx_process(struct usbnet *dev, struct sk_buff *skb)
{
if (dev->driver_info->rx_fixup &&
- !dev->driver_info->rx_fixup (dev, skb)) {
+ !dev->driver_info->rx_fixup(dev, skb)) {
/* With RX_ASSEMBLE, rx_fixup() must update counters */
if (!(dev->driver_info->flags & FLAG_RX_ASSEMBLE))
dev->net->stats.rx_errors++;
@@ -586,7 +597,7 @@ static inline int rx_process(struct usbnet *dev, struct sk_buff *skb)
/*-------------------------------------------------------------------------*/
-static void rx_complete (struct urb *urb)
+static void rx_complete(struct urb *urb)
{
struct sk_buff *skb = (struct sk_buff *) urb->context;
struct skb_data *entry = (struct skb_data *) skb->cb;
@@ -594,7 +605,7 @@ static void rx_complete (struct urb *urb)
int urb_status = urb->status;
enum skb_state state;
- skb_put (skb, urb->actual_length);
+ skb_put(skb, urb->actual_length);
state = rx_done;
entry->urb = NULL;
@@ -610,7 +621,7 @@ static void rx_complete (struct urb *urb)
*/
case -EPIPE:
dev->net->stats.rx_errors++;
- usbnet_defer_kevent (dev, EVENT_RX_HALT);
+ usbnet_defer_kevent(dev, EVENT_RX_HALT);
fallthrough;
/* software-driven interface shutdown */
@@ -628,8 +639,8 @@ static void rx_complete (struct urb *urb)
case -ETIME:
case -EILSEQ:
dev->net->stats.rx_errors++;
- if (!timer_pending (&dev->delay)) {
- mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
+ if (!timer_pending(&dev->delay)) {
+ mod_timer(&dev->delay, jiffies + THROTTLE_JIFFIES);
netif_dbg(dev, link, dev->net,
"rx throttle %d\n", urb_status);
}
@@ -665,14 +676,14 @@ block:
state = defer_bh(dev, skb, &dev->rxq, state);
if (urb) {
- if (netif_running (dev->net) &&
- !test_bit (EVENT_RX_HALT, &dev->flags) &&
+ if (netif_running(dev->net) &&
+ !test_bit(EVENT_RX_HALT, &dev->flags) &&
state != unlink_start) {
- rx_submit (dev, urb, GFP_ATOMIC);
+ rx_submit(dev, urb, GFP_ATOMIC);
usb_mark_last_busy(dev->udev);
return;
}
- usb_free_urb (urb);
+ usb_free_urb(urb);
}
netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
}
@@ -691,6 +702,7 @@ void usbnet_resume_rx(struct usbnet *dev)
struct sk_buff *skb;
int num = 0;
+ local_bh_disable();
clear_bit(EVENT_RX_PAUSED, &dev->flags);
while ((skb = skb_dequeue(&dev->rxq_pause)) != NULL) {
@@ -698,7 +710,8 @@ void usbnet_resume_rx(struct usbnet *dev)
num++;
}
- tasklet_schedule(&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
+ local_bh_enable();
netif_dbg(dev, rx_status, dev->net,
"paused rx queue disabled, %d skbs requeued\n", num);
@@ -715,7 +728,7 @@ EXPORT_SYMBOL_GPL(usbnet_purge_paused_rxq);
// unlink pending rx/tx; completion handlers do all other cleanup
-static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
+static int unlink_urbs(struct usbnet *dev, struct sk_buff_head *q)
{
unsigned long flags;
struct sk_buff *skb;
@@ -748,7 +761,7 @@ found:
spin_unlock_irqrestore(&q->lock, flags);
// during some PM-driven resume scenarios,
// these (async) unlinks complete immediately
- retval = usb_unlink_urb (urb);
+ retval = usb_unlink_urb(urb);
if (retval != -EINPROGRESS && retval != 0)
netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
else
@@ -756,7 +769,7 @@ found:
usb_put_urb(urb);
spin_lock_irqsave(&q->lock, flags);
}
- spin_unlock_irqrestore (&q->lock, flags);
+ spin_unlock_irqrestore(&q->lock, flags);
return count;
}
@@ -767,7 +780,7 @@ void usbnet_unlink_rx_urbs(struct usbnet *dev)
{
if (netif_running(dev->net)) {
(void) unlink_urbs (dev, &dev->rxq);
- tasklet_schedule(&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
}
}
EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
@@ -810,14 +823,15 @@ static void usbnet_terminate_urbs(struct usbnet *dev)
remove_wait_queue(&dev->wait, &wait);
}
-int usbnet_stop (struct net_device *net)
+int usbnet_stop(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
const struct driver_info *info = dev->driver_info;
int retval, pm, mpn;
clear_bit(EVENT_DEV_OPEN, &dev->flags);
- netif_stop_queue (net);
+ netif_stop_queue(net);
+ netdev_reset_queue(net);
netif_info(dev, ifdown, dev->net,
"stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
@@ -849,16 +863,16 @@ int usbnet_stop (struct net_device *net)
/* deferred work (timer, softirq, task) must also stop */
dev->flags = 0;
- del_timer_sync(&dev->delay);
- tasklet_kill(&dev->bh);
+ timer_delete_sync(&dev->delay);
+ cancel_work_sync(&dev->bh_work);
cancel_work_sync(&dev->kevent);
/* We have cyclic dependencies. Those calls are needed
* to break a cycle. We cannot fall into the gaps because
* we have a flag
*/
- tasklet_kill(&dev->bh);
- del_timer_sync(&dev->delay);
+ cancel_work_sync(&dev->bh_work);
+ timer_delete_sync(&dev->delay);
cancel_work_sync(&dev->kevent);
if (!pm)
@@ -879,7 +893,7 @@ EXPORT_SYMBOL_GPL(usbnet_stop);
// precondition: never called in_interrupt
-int usbnet_open (struct net_device *net)
+int usbnet_open(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
int retval;
@@ -896,23 +910,29 @@ int usbnet_open (struct net_device *net)
}
// put into "known safe" state
- if (info->reset && (retval = info->reset (dev)) < 0) {
- netif_info(dev, ifup, dev->net,
- "open reset fail (%d) usbnet usb-%s-%s, %s\n",
- retval,
- dev->udev->bus->bus_name,
- dev->udev->devpath,
- info->description);
- goto done;
+ if (info->reset) {
+ retval = info->reset(dev);
+ if (retval < 0) {
+ netif_info(dev, ifup, dev->net,
+ "open reset fail (%d) usbnet usb-%s-%s, %s\n",
+ retval,
+ dev->udev->bus->bus_name,
+ dev->udev->devpath,
+ info->description);
+ goto done;
+ }
}
/* hard_mtu or rx_urb_size may change in reset() */
usbnet_update_max_qlen(dev);
// insist peer be connected
- if (info->check_connect && (retval = info->check_connect (dev)) < 0) {
- netif_err(dev, ifup, dev->net, "can't open; %d\n", retval);
- goto done;
+ if (info->check_connect) {
+ retval = info->check_connect(dev);
+ if (retval < 0) {
+ netif_err(dev, ifup, dev->net, "can't open; %d\n", retval);
+ goto done;
+ }
}
/* start any status interrupt transfer */
@@ -926,6 +946,7 @@ int usbnet_open (struct net_device *net)
}
set_bit(EVENT_DEV_OPEN, &dev->flags);
+ netdev_reset_queue(net);
netif_start_queue (net);
netif_info(dev, ifup, dev->net,
"open: enable queueing (rx %d, tx %d) mtu %d %s framing\n",
@@ -944,7 +965,7 @@ int usbnet_open (struct net_device *net)
clear_bit(EVENT_RX_KILL, &dev->flags);
// delay posting reads until we're fully open
- tasklet_schedule (&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
if (info->manage_power) {
retval = info->manage_power(dev, 1);
if (retval < 0) {
@@ -1002,6 +1023,13 @@ int usbnet_get_link_ksettings_internal(struct net_device *net,
else
cmd->base.speed = SPEED_UNKNOWN;
+ /* The standard "Universal Serial Bus Class Definitions
+ * for Communications Devices v1.2" does not specify
+ * anything about duplex status.
+ * So set it DUPLEX_UNKNOWN instead of default DUPLEX_HALF.
+ */
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+
return 0;
}
EXPORT_SYMBOL_GPL(usbnet_get_link_ksettings_internal);
@@ -1028,13 +1056,13 @@ int usbnet_set_link_ksettings_mii(struct net_device *net,
}
EXPORT_SYMBOL_GPL(usbnet_set_link_ksettings_mii);
-u32 usbnet_get_link (struct net_device *net)
+u32 usbnet_get_link(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
/* If a check_connect is defined, return its result */
if (dev->driver_info->check_connect)
- return dev->driver_info->check_connect (dev) == 0;
+ return dev->driver_info->check_connect(dev) == 0;
/* if the device has mii operations, use those */
if (dev->mii.mdio_read)
@@ -1056,18 +1084,18 @@ int usbnet_nway_reset(struct net_device *net)
}
EXPORT_SYMBOL_GPL(usbnet_nway_reset);
-void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info)
+void usbnet_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
{
struct usbnet *dev = netdev_priv(net);
strscpy(info->driver, dev->driver_name, sizeof(info->driver));
strscpy(info->fw_version, dev->driver_info->description,
sizeof(info->fw_version));
- usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info);
+ usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
}
EXPORT_SYMBOL_GPL(usbnet_get_drvinfo);
-u32 usbnet_get_msglevel (struct net_device *net)
+u32 usbnet_get_msglevel(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
@@ -1075,7 +1103,7 @@ u32 usbnet_get_msglevel (struct net_device *net)
}
EXPORT_SYMBOL_GPL(usbnet_get_msglevel);
-void usbnet_set_msglevel (struct net_device *net, u32 level)
+void usbnet_set_msglevel(struct net_device *net, u32 level)
{
struct usbnet *dev = netdev_priv(net);
@@ -1102,6 +1130,9 @@ static void __handle_link_change(struct usbnet *dev)
if (!test_bit(EVENT_DEV_OPEN, &dev->flags))
return;
+ if (test_and_clear_bit(EVENT_LINK_CARRIER_ON, &dev->flags))
+ netif_carrier_on(dev->net);
+
if (!netif_carrier_ok(dev->net)) {
/* kill URBs for reading packets to save bus bandwidth */
unlink_urbs(dev, &dev->rxq);
@@ -1112,7 +1143,7 @@ static void __handle_link_change(struct usbnet *dev)
*/
} else {
/* submitting URBs for reading packets */
- tasklet_schedule(&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
}
/* hard_mtu or rx_urb_size may change during link change */
@@ -1143,77 +1174,77 @@ static void __handle_set_rx_mode(struct usbnet *dev)
* especially now that control transfers can be queued.
*/
static void
-usbnet_deferred_kevent (struct work_struct *work)
+usbnet_deferred_kevent(struct work_struct *work)
{
struct usbnet *dev =
container_of(work, struct usbnet, kevent);
int status;
/* usb_clear_halt() needs a thread context */
- if (test_bit (EVENT_TX_HALT, &dev->flags)) {
- unlink_urbs (dev, &dev->txq);
+ if (test_bit(EVENT_TX_HALT, &dev->flags)) {
+ unlink_urbs(dev, &dev->txq);
status = usb_autopm_get_interface(dev->intf);
if (status < 0)
goto fail_pipe;
- status = usb_clear_halt (dev->udev, dev->out);
+ status = usb_clear_halt(dev->udev, dev->out);
usb_autopm_put_interface(dev->intf);
if (status < 0 &&
status != -EPIPE &&
status != -ESHUTDOWN) {
- if (netif_msg_tx_err (dev))
+ if (netif_msg_tx_err(dev))
fail_pipe:
netdev_err(dev->net, "can't clear tx halt, status %d\n",
status);
} else {
- clear_bit (EVENT_TX_HALT, &dev->flags);
+ clear_bit(EVENT_TX_HALT, &dev->flags);
if (status != -ESHUTDOWN)
- netif_wake_queue (dev->net);
+ netif_wake_queue(dev->net);
}
}
- if (test_bit (EVENT_RX_HALT, &dev->flags)) {
- unlink_urbs (dev, &dev->rxq);
+ if (test_bit(EVENT_RX_HALT, &dev->flags)) {
+ unlink_urbs(dev, &dev->rxq);
status = usb_autopm_get_interface(dev->intf);
if (status < 0)
goto fail_halt;
- status = usb_clear_halt (dev->udev, dev->in);
+ status = usb_clear_halt(dev->udev, dev->in);
usb_autopm_put_interface(dev->intf);
if (status < 0 &&
status != -EPIPE &&
status != -ESHUTDOWN) {
- if (netif_msg_rx_err (dev))
+ if (netif_msg_rx_err(dev))
fail_halt:
netdev_err(dev->net, "can't clear rx halt, status %d\n",
status);
} else {
- clear_bit (EVENT_RX_HALT, &dev->flags);
+ clear_bit(EVENT_RX_HALT, &dev->flags);
if (!usbnet_going_away(dev))
- tasklet_schedule(&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
}
}
- /* tasklet could resubmit itself forever if memory is tight */
- if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
+ /* work could resubmit itself forever if memory is tight */
+ if (test_bit(EVENT_RX_MEMORY, &dev->flags)) {
struct urb *urb = NULL;
int resched = 1;
- if (netif_running (dev->net))
- urb = usb_alloc_urb (0, GFP_KERNEL);
+ if (netif_running(dev->net))
+ urb = usb_alloc_urb(0, GFP_KERNEL);
else
- clear_bit (EVENT_RX_MEMORY, &dev->flags);
+ clear_bit(EVENT_RX_MEMORY, &dev->flags);
if (urb != NULL) {
- clear_bit (EVENT_RX_MEMORY, &dev->flags);
+ clear_bit(EVENT_RX_MEMORY, &dev->flags);
status = usb_autopm_get_interface(dev->intf);
if (status < 0) {
usb_free_urb(urb);
goto fail_lowmem;
}
- if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
+ if (rx_submit(dev, urb, GFP_KERNEL) == -ENOLINK)
resched = 0;
usb_autopm_put_interface(dev->intf);
fail_lowmem:
if (resched)
if (!usbnet_going_away(dev))
- tasklet_schedule(&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
}
}
@@ -1221,7 +1252,7 @@ fail_lowmem:
const struct driver_info *info = dev->driver_info;
int retval = 0;
- clear_bit (EVENT_LINK_RESET, &dev->flags);
+ clear_bit(EVENT_LINK_RESET, &dev->flags);
status = usb_autopm_get_interface(dev->intf);
if (status < 0)
goto skip_reset;
@@ -1241,10 +1272,10 @@ skip_reset:
__handle_link_change(dev);
}
- if (test_bit (EVENT_LINK_CHANGE, &dev->flags))
+ if (test_bit(EVENT_LINK_CHANGE, &dev->flags))
__handle_link_change(dev);
- if (test_bit (EVENT_SET_RX_MODE, &dev->flags))
+ if (test_bit(EVENT_SET_RX_MODE, &dev->flags))
__handle_set_rx_mode(dev);
@@ -1254,7 +1285,7 @@ skip_reset:
/*-------------------------------------------------------------------------*/
-static void tx_complete (struct urb *urb)
+static void tx_complete(struct urb *urb)
{
struct sk_buff *skb = (struct sk_buff *) urb->context;
struct skb_data *entry = (struct skb_data *) skb->cb;
@@ -1273,7 +1304,7 @@ static void tx_complete (struct urb *urb)
switch (urb->status) {
case -EPIPE:
- usbnet_defer_kevent (dev, EVENT_TX_HALT);
+ usbnet_defer_kevent(dev, EVENT_TX_HALT);
break;
/* software-driven interface shutdown */
@@ -1288,13 +1319,13 @@ static void tx_complete (struct urb *urb)
case -ETIME:
case -EILSEQ:
usb_mark_last_busy(dev->udev);
- if (!timer_pending (&dev->delay)) {
- mod_timer (&dev->delay,
- jiffies + THROTTLE_JIFFIES);
+ if (!timer_pending(&dev->delay)) {
+ mod_timer(&dev->delay,
+ jiffies + THROTTLE_JIFFIES);
netif_dbg(dev, link, dev->net,
"tx throttle %d\n", urb->status);
}
- netif_stop_queue (dev->net);
+ netif_stop_queue(dev->net);
break;
default:
netif_dbg(dev, tx_err, dev->net,
@@ -1309,12 +1340,12 @@ static void tx_complete (struct urb *urb)
/*-------------------------------------------------------------------------*/
-void usbnet_tx_timeout (struct net_device *net, unsigned int txqueue)
+void usbnet_tx_timeout(struct net_device *net, unsigned int txqueue)
{
struct usbnet *dev = netdev_priv(net);
- unlink_urbs (dev, &dev->txq);
- tasklet_schedule (&dev->bh);
+ unlink_urbs(dev, &dev->txq);
+ queue_work(system_bh_wq, &dev->bh_work);
/* this needs to be handled individually because the generic layer
* doesn't know what is sufficient and could not restore private
* information if a remedy of an unconditional reset were used.
@@ -1359,8 +1390,7 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb)
return 1;
}
-netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
- struct net_device *net)
+netdev_tx_t usbnet_start_xmit(struct sk_buff *skb, struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
unsigned int length;
@@ -1376,7 +1406,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
// some devices want funky USB-level framing, for
// win32 driver (usually) and/or hardware quirks
if (info->tx_fixup) {
- skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
+ skb = info->tx_fixup(dev, skb, GFP_ATOMIC);
if (!skb) {
/* packet collected; minidriver waiting for more */
if (info->flags & FLAG_MULTI_PACKET)
@@ -1386,7 +1416,8 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
}
}
- if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
netif_dbg(dev, tx_err, dev->net, "no urb\n");
goto drop;
}
@@ -1395,8 +1426,8 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
entry->urb = urb;
entry->dev = dev;
- usb_fill_bulk_urb (urb, dev->udev, dev->out,
- skb->data, skb->len, tx_complete, skb);
+ usb_fill_bulk_urb(urb, dev->udev, dev->out,
+ skb->data, skb->len, tx_complete, skb);
if (dev->can_dma_sg) {
if (build_dma_sg(skb, urb) < 0)
goto drop;
@@ -1466,8 +1497,8 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) {
case -EPIPE:
- netif_stop_queue (net);
- usbnet_defer_kevent (dev, EVENT_TX_HALT);
+ netif_stop_queue(net);
+ usbnet_defer_kevent(dev, EVENT_TX_HALT);
usb_autopm_put_interface_async(dev->intf);
break;
default:
@@ -1478,10 +1509,11 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
case 0:
netif_trans_update(net);
__usbnet_queue_skb(&dev->txq, skb, tx_start);
+ netdev_sent_queue(net, skb->len);
if (dev->txq.qlen >= TX_QLEN (dev))
netif_stop_queue (net);
}
- spin_unlock_irqrestore (&dev->txq.lock, flags);
+ spin_unlock_irqrestore(&dev->txq.lock, flags);
if (retval) {
netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval);
@@ -1489,7 +1521,7 @@ drop:
dev->net->stats.tx_dropped++;
not_drop:
if (skb)
- dev_kfree_skb_any (skb);
+ dev_kfree_skb_any(skb);
if (urb) {
kfree(urb->sg);
usb_free_urb(urb);
@@ -1536,11 +1568,12 @@ static inline void usb_free_skb(struct sk_buff *skb)
/*-------------------------------------------------------------------------*/
-// tasklet (work deferred from completions, in_irq) or timer
+// work (work deferred from completions, in_irq) or timer
-static void usbnet_bh (struct timer_list *t)
+static void usbnet_bh(struct timer_list *t)
{
- struct usbnet *dev = from_timer(dev, t, delay);
+ struct usbnet *dev = timer_container_of(dev, t, delay);
+ unsigned int bytes_compl = 0, pkts_compl = 0;
struct sk_buff *skb;
struct skb_data *entry;
@@ -1552,6 +1585,8 @@ static void usbnet_bh (struct timer_list *t)
usb_free_skb(skb);
continue;
case tx_done:
+ bytes_compl += skb->len;
+ pkts_compl++;
kfree(entry->urb->sg);
fallthrough;
case rx_cleanup:
@@ -1562,6 +1597,10 @@ static void usbnet_bh (struct timer_list *t)
}
}
+ spin_lock_bh(&dev->bql_spinlock);
+ netdev_completed_queue(dev->net, pkts_compl, bytes_compl);
+ spin_unlock_bh(&dev->bql_spinlock);
+
/* restart RX again after disabling due to high error rate */
clear_bit(EVENT_RX_KILL, &dev->flags);
@@ -1590,16 +1629,16 @@ static void usbnet_bh (struct timer_list *t)
"rxqlen %d --> %d\n",
temp, dev->rxq.qlen);
if (dev->rxq.qlen < RX_QLEN(dev))
- tasklet_schedule (&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
}
if (dev->txq.qlen < TX_QLEN (dev))
- netif_wake_queue (dev->net);
+ netif_wake_queue(dev->net);
}
}
-static void usbnet_bh_tasklet(struct tasklet_struct *t)
+static void usbnet_bh_work(struct work_struct *work)
{
- struct usbnet *dev = from_tasklet(dev, t, bh);
+ struct usbnet *dev = from_work(dev, work, bh_work);
usbnet_bh(&dev->delay);
}
@@ -1613,7 +1652,7 @@ static void usbnet_bh_tasklet(struct tasklet_struct *t)
// precondition: never called in_interrupt
-void usbnet_disconnect (struct usb_interface *intf)
+void usbnet_disconnect(struct usb_interface *intf)
{
struct usbnet *dev;
struct usb_device *xdev;
@@ -1626,7 +1665,7 @@ void usbnet_disconnect (struct usb_interface *intf)
return;
usbnet_mark_going_away(dev);
- xdev = interface_to_usbdev (intf);
+ xdev = interface_to_usbdev(intf);
netif_info(dev, probe, dev->net, "unregister '%s' usb-%s-%s, %s\n",
intf->dev.driver->name,
@@ -1634,7 +1673,9 @@ void usbnet_disconnect (struct usb_interface *intf)
dev->driver_info->description);
net = dev->net;
- unregister_netdev (net);
+ unregister_netdev(net);
+
+ cancel_work_sync(&dev->kevent);
while ((urb = usb_get_from_anchor(&dev->deferred))) {
dev_kfree_skb(urb->context);
@@ -1677,7 +1718,7 @@ static const struct device_type wwan_type = {
};
int
-usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+usbnet_probe(struct usb_interface *udev, const struct usb_device_id *prod)
{
struct usbnet *dev;
struct net_device *net;
@@ -1703,7 +1744,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
dev_dbg (&udev->dev, "blacklisted by %s\n", name);
return -ENODEV;
}
- xdev = interface_to_usbdev (udev);
+ xdev = interface_to_usbdev(udev);
interface = udev->cur_altsetting;
status = -ENOMEM;
@@ -1731,11 +1772,12 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
skb_queue_head_init (&dev->txq);
skb_queue_head_init (&dev->done);
skb_queue_head_init(&dev->rxq_pause);
- tasklet_setup(&dev->bh, usbnet_bh_tasklet);
- INIT_WORK (&dev->kevent, usbnet_deferred_kevent);
+ spin_lock_init(&dev->bql_spinlock);
+ INIT_WORK(&dev->bh_work, usbnet_bh_work);
+ INIT_WORK(&dev->kevent, usbnet_deferred_kevent);
init_usb_anchor(&dev->deferred);
timer_setup(&dev->delay, usbnet_bh, 0);
- mutex_init (&dev->phy_mutex);
+ mutex_init(&dev->phy_mutex);
mutex_init(&dev->interrupt_mutex);
dev->interrupt_count = 0;
@@ -1748,7 +1790,6 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
dev->hard_mtu = net->mtu + net->hard_header_len;
net->min_mtu = 0;
net->max_mtu = ETH_MAX_MTU;
- net->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
net->netdev_ops = &usbnet_netdev_ops;
net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
@@ -1758,17 +1799,15 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
// allow device-specific bind/init procedures
// NOTE net->name still not usable ...
if (info->bind) {
- status = info->bind (dev, udev);
+ status = info->bind(dev, udev);
if (status < 0)
goto out1;
- // heuristic: "usb%d" for links we know are two-host,
- // else "eth%d" when there's reasonable doubt. userspace
- // can rename the link if it knows better.
+ /* heuristic: rename to "eth%d" if we are not sure this link
+ * is two-host (these links keep "usb%d")
+ */
if ((dev->driver_info->flags & FLAG_ETHER) != 0 &&
- ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 ||
- /* somebody touched it*/
- !is_zero_ether_addr(net->dev_addr)))
+ !usbnet_needs_usb_name_format(dev, net))
strscpy(net->name, "eth%d", sizeof(net->name));
/* WLAN devices should always be named "wlan%d" */
if ((dev->driver_info->flags & FLAG_WLAN) != 0)
@@ -1785,18 +1824,18 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
if (net->mtu > (dev->hard_mtu - net->hard_header_len))
net->mtu = dev->hard_mtu - net->hard_header_len;
} else if (!info->in || !info->out)
- status = usbnet_get_endpoints (dev, udev);
+ status = usbnet_get_endpoints(dev, udev);
else {
u8 ep_addrs[3] = {
info->in + USB_DIR_IN, info->out + USB_DIR_OUT, 0
};
- dev->in = usb_rcvbulkpipe (xdev, info->in);
- dev->out = usb_sndbulkpipe (xdev, info->out);
+ dev->in = usb_rcvbulkpipe(xdev, info->in);
+ dev->out = usb_sndbulkpipe(xdev, info->out);
if (!(info->flags & FLAG_NO_SETINT))
- status = usb_set_interface (xdev,
- interface->desc.bInterfaceNumber,
- interface->desc.bAlternateSetting);
+ status = usb_set_interface(xdev,
+ interface->desc.bInterfaceNumber,
+ interface->desc.bAlternateSetting);
else
status = 0;
@@ -1804,7 +1843,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
status = -EINVAL;
}
if (status >= 0 && dev->status)
- status = init_status (dev, udev);
+ status = init_status(dev, udev);
if (status < 0)
goto out3;
@@ -1838,7 +1877,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
}
}
- status = register_netdev (net);
+ status = register_netdev(net);
if (status)
goto out5;
netif_info(dev, probe, dev->net,
@@ -1849,9 +1888,9 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
net->dev_addr);
// ok, it's ready to go.
- usb_set_intfdata (udev, dev);
+ usb_set_intfdata(udev, dev);
- netif_device_attach (net);
+ netif_device_attach(net);
if (dev->driver_info->flags & FLAG_LINK_INTR)
usbnet_link_change(dev, 0, 0);
@@ -1864,7 +1903,7 @@ out4:
usb_free_urb(dev->interrupt);
out3:
if (info->unbind)
- info->unbind (dev, udev);
+ info->unbind(dev, udev);
out1:
/* subdrivers must undo all they did in bind() if they
* fail it, but we may fail later and a deferred kevent
@@ -1873,7 +1912,7 @@ out1:
*/
usbnet_mark_going_away(dev);
cancel_work_sync(&dev->kevent);
- del_timer_sync(&dev->delay);
+ timer_delete_sync(&dev->delay);
free_netdev(net);
out:
return status;
@@ -1887,7 +1926,7 @@ EXPORT_SYMBOL_GPL(usbnet_probe);
* resume only when the last interface is resumed
*/
-int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
+int usbnet_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbnet *dev = usb_get_intfdata(intf);
@@ -1906,7 +1945,7 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
* accelerate emptying of the rx and queues, to avoid
* having everything error out.
*/
- netif_device_detach (dev->net);
+ netif_device_detach(dev->net);
usbnet_terminate_urbs(dev);
__usbnet_status_stop_force(dev);
@@ -1914,13 +1953,13 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
* reattach so runtime management can use and
* wake the device
*/
- netif_device_attach (dev->net);
+ netif_device_attach(dev->net);
}
return 0;
}
EXPORT_SYMBOL_GPL(usbnet_suspend);
-int usbnet_resume (struct usb_interface *intf)
+int usbnet_resume(struct usb_interface *intf)
{
struct usbnet *dev = usb_get_intfdata(intf);
struct sk_buff *skb;
@@ -1962,7 +2001,7 @@ int usbnet_resume (struct usb_interface *intf)
if (!(dev->txq.qlen >= TX_QLEN(dev)))
netif_tx_wake_all_queues(dev->net);
- tasklet_schedule (&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
}
}
@@ -2000,10 +2039,12 @@ EXPORT_SYMBOL(usbnet_manage_power);
void usbnet_link_change(struct usbnet *dev, bool link, bool need_reset)
{
/* update link after link is reseted */
- if (link && !need_reset)
- netif_carrier_on(dev->net);
- else
+ if (link && !need_reset) {
+ set_bit(EVENT_LINK_CARRIER_ON, &dev->flags);
+ } else {
+ clear_bit(EVENT_LINK_CARRIER_ON, &dev->flags);
netif_carrier_off(dev->net);
+ }
if (need_reset && link)
usbnet_defer_kevent(dev, EVENT_LINK_RESET);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 18148e068aa0..14e6f2a2fb77 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -17,6 +17,7 @@
#include <net/rtnetlink.h>
#include <net/dst.h>
+#include <net/netdev_lock.h>
#include <net/xfrm.h>
#include <net/xdp.h>
#include <linux/veth.h>
@@ -306,12 +307,10 @@ static void __veth_xdp_flush(struct veth_rq *rq)
static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
{
- if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) {
- dev_kfree_skb_any(skb);
- return NET_RX_DROP;
- }
+ if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb)))
+ return NETDEV_TX_BUSY; /* signal qdisc layer */
- return NET_RX_SUCCESS;
+ return NET_RX_SUCCESS; /* same as NETDEV_TX_OK */
}
static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
@@ -345,11 +344,11 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
struct veth_rq *rq = NULL;
- int ret = NETDEV_TX_OK;
+ struct netdev_queue *txq;
struct net_device *rcv;
int length = skb->len;
bool use_napi = false;
- int rxq;
+ int ret, rxq;
rcu_read_lock();
rcv = rcu_dereference(priv->peer);
@@ -372,17 +371,43 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
}
skb_tx_timestamp(skb);
- if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
+
+ ret = veth_forward_skb(rcv, skb, rq, use_napi);
+ switch (ret) {
+ case NET_RX_SUCCESS: /* same as NETDEV_TX_OK */
if (!use_napi)
dev_sw_netstats_tx_add(dev, 1, length);
else
__veth_xdp_flush(rq);
- } else {
+ break;
+ case NETDEV_TX_BUSY:
+ /* If a qdisc is attached to our virtual device, returning
+ * NETDEV_TX_BUSY is allowed.
+ */
+ txq = netdev_get_tx_queue(dev, rxq);
+
+ if (qdisc_txq_has_no_queue(txq)) {
+ dev_kfree_skb_any(skb);
+ goto drop;
+ }
+ /* Restore Eth hdr pulled by dev_forward_skb/eth_type_trans */
+ __skb_push(skb, ETH_HLEN);
+ netif_tx_stop_queue(txq);
+ /* Makes sure NAPI peer consumer runs. Consumer is responsible
+ * for starting txq again, until then ndo_start_xmit (this
+ * function) will not be invoked by the netstack again.
+ */
+ __veth_xdp_flush(rq);
+ break;
+ case NET_RX_DROP: /* same as NET_XMIT_DROP */
drop:
atomic64_inc(&priv->dropped);
ret = NET_XMIT_DROP;
+ break;
+ default:
+ net_crit_ratelimited("%s(%s): Invalid return code(%d)",
+ __func__, dev->name, ret);
}
-
rcu_read_unlock();
return ret;
@@ -634,7 +659,7 @@ static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
break;
case XDP_TX:
orig_frame = *frame;
- xdp->rxq->mem = frame->mem;
+ xdp->rxq->mem.type = frame->mem_type;
if (unlikely(veth_xdp_tx(rq, xdp, bq) < 0)) {
trace_xdp_exception(rq->dev, xdp_prog, act);
frame = &orig_frame;
@@ -646,7 +671,7 @@ static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
goto xdp_xmit;
case XDP_REDIRECT:
orig_frame = *frame;
- xdp->rxq->mem = frame->mem;
+ xdp->rxq->mem.type = frame->mem_type;
if (xdp_do_redirect(rq->dev, xdp, xdp_prog)) {
frame = &orig_frame;
stats->rx_drops++;
@@ -684,8 +709,7 @@ static void veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames,
void *skbs[VETH_XDP_BATCH];
int i;
- if (xdp_alloc_skb_bulk(skbs, n_xdpf,
- GFP_ATOMIC | __GFP_ZERO) < 0) {
+ if (unlikely(!napi_skb_cache_get_bulk(skbs, n_xdpf))) {
for (i = 0; i < n_xdpf; i++)
xdp_return_frame(frames[i]);
stats->rx_drops += n_xdpf;
@@ -932,17 +956,28 @@ static int veth_poll(struct napi_struct *napi, int budget)
{
struct veth_rq *rq =
container_of(napi, struct veth_rq, xdp_napi);
+ struct veth_priv *priv = netdev_priv(rq->dev);
+ int queue_idx = rq->xdp_rxq.queue_index;
+ struct netdev_queue *peer_txq;
struct veth_stats stats = {};
+ struct net_device *peer_dev;
struct veth_xdp_tx_bq bq;
int done;
bq.count = 0;
+ /* NAPI functions as RCU section */
+ peer_dev = rcu_dereference_check(priv->peer, rcu_read_lock_bh_held());
+ peer_txq = peer_dev ? netdev_get_tx_queue(peer_dev, queue_idx) : NULL;
+
xdp_set_return_frame_no_direct();
done = veth_xdp_rcv(rq, budget, &bq, &stats);
if (stats.xdp_redirect > 0)
xdp_do_flush();
+ if (stats.xdp_tx > 0)
+ veth_xdp_flush(rq, &bq);
+ xdp_clear_return_frame_no_direct();
if (done < budget && napi_complete_done(napi, done)) {
/* Write rx_notify_masked before reading ptr_ring */
@@ -955,9 +990,12 @@ static int veth_poll(struct napi_struct *napi, int budget)
}
}
- if (stats.xdp_tx > 0)
- veth_xdp_flush(rq, &bq);
- xdp_clear_return_frame_no_direct();
+ /* Release backpressure per NAPI poll */
+ smp_rmb(); /* Paired with netif_tx_stop_queue set_bit */
+ if (peer_txq && netif_tx_queue_stopped(peer_txq)) {
+ txq_trans_cond_update(peer_txq);
+ netif_tx_wake_queue(peer_txq);
+ }
return done;
}
@@ -1286,7 +1324,7 @@ static int veth_set_channels(struct net_device *dev,
if (peer)
netif_carrier_off(peer);
- /* try to allocate new resurces, as needed*/
+ /* try to allocate new resources, as needed*/
err = veth_enable_range_safe(dev, old_rx_count, new_rx_count);
if (err)
goto out;
@@ -1765,10 +1803,13 @@ static int veth_init_queues(struct net_device *dev, struct nlattr *tb[])
return 0;
}
-static int veth_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int veth_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *peer_net = rtnl_newlink_peer_net(params);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
int err;
struct net_device *peer;
struct veth_priv *priv;
@@ -1776,24 +1817,15 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
unsigned char name_assign_type;
struct ifinfomsg *ifmp;
- struct net *net;
/*
* create and register peer first
*/
- if (data != NULL && data[VETH_INFO_PEER] != NULL) {
- struct nlattr *nla_peer;
+ if (data && data[VETH_INFO_PEER]) {
+ struct nlattr *nla_peer = data[VETH_INFO_PEER];
- nla_peer = data[VETH_INFO_PEER];
ifmp = nla_data(nla_peer);
- err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
- if (err < 0)
- return err;
-
- err = veth_validate(peer_tb, NULL, extack);
- if (err < 0)
- return err;
-
+ rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
tbp = peer_tb;
} else {
ifmp = NULL;
@@ -1808,16 +1840,10 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
name_assign_type = NET_NAME_ENUM;
}
- net = rtnl_link_get_net(src_net, tbp);
- if (IS_ERR(net))
- return PTR_ERR(net);
-
- peer = rtnl_create_link(net, ifname, name_assign_type,
+ peer = rtnl_create_link(peer_net, ifname, name_assign_type,
&veth_link_ops, tbp, extack);
- if (IS_ERR(peer)) {
- put_net(net);
+ if (IS_ERR(peer))
return PTR_ERR(peer);
- }
if (!ifmp || !tbp[IFLA_ADDRESS])
eth_hw_addr_random(peer);
@@ -1828,8 +1854,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
netif_inherit_tso_max(peer, dev);
err = register_netdevice(peer);
- put_net(net);
- net = NULL;
if (err < 0)
goto err_register_peer;
@@ -1952,6 +1976,7 @@ static struct rtnl_link_ops veth_link_ops = {
.newlink = veth_newlink,
.dellink = veth_dellink,
.policy = veth_policy,
+ .peer_type = VETH_INFO_PEER,
.maxtype = VETH_INFO_MAX,
.get_link_net = veth_get_link_net,
.get_num_tx_queues = veth_get_num_queues,
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 869586c17ffd..1bb3aeca66c6 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -35,6 +35,23 @@ module_param(csum, bool, 0444);
module_param(gso, bool, 0444);
module_param(napi_tx, bool, 0644);
+#define VIRTIO_OFFLOAD_MAP_MIN 46
+#define VIRTIO_OFFLOAD_MAP_MAX 47
+#define VIRTIO_FEATURES_MAP_MIN 65
+#define VIRTIO_O2F_DELTA (VIRTIO_FEATURES_MAP_MIN - \
+ VIRTIO_OFFLOAD_MAP_MIN)
+
+static bool virtio_is_mapped_offload(unsigned int obit)
+{
+ return obit >= VIRTIO_OFFLOAD_MAP_MIN &&
+ obit <= VIRTIO_OFFLOAD_MAP_MAX;
+}
+
+static unsigned int virtio_offload_to_feature(unsigned int obit)
+{
+ return virtio_is_mapped_offload(obit) ? obit + VIRTIO_O2F_DELTA : obit;
+}
+
/* FIXME: MTU in config. */
#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
#define GOOD_COPY_LEN 128
@@ -45,9 +62,6 @@ module_param(napi_tx, bool, 0644);
#define VIRTIO_XDP_TX BIT(0)
#define VIRTIO_XDP_REDIR BIT(1)
-#define VIRTIO_XDP_FLAG BIT(0)
-#define VIRTIO_ORPHAN_FLAG BIT(1)
-
/* RX packet size EWMA. The average packet size is used to determine the packet
* buffer size when refilling RX rings. As the entire RX ring may be refilled
* at once, the weight is chosen so that the EWMA will be insensitive to short-
@@ -65,15 +79,19 @@ static const unsigned long guest_offloads[] = {
VIRTIO_NET_F_GUEST_CSUM,
VIRTIO_NET_F_GUEST_USO4,
VIRTIO_NET_F_GUEST_USO6,
- VIRTIO_NET_F_GUEST_HDRLEN
+ VIRTIO_NET_F_GUEST_HDRLEN,
+ VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_MAPPED,
+ VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM_MAPPED,
};
#define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
- (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
- (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
- (1ULL << VIRTIO_NET_F_GUEST_UFO) | \
- (1ULL << VIRTIO_NET_F_GUEST_USO4) | \
- (1ULL << VIRTIO_NET_F_GUEST_USO6))
+ (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
+ (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
+ (1ULL << VIRTIO_NET_F_GUEST_UFO) | \
+ (1ULL << VIRTIO_NET_F_GUEST_USO4) | \
+ (1ULL << VIRTIO_NET_F_GUEST_USO6) | \
+ (1ULL << VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_MAPPED) | \
+ (1ULL << VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM_MAPPED))
struct virtnet_stat_desc {
char desc[ETH_GSTRING_LEN];
@@ -86,6 +104,7 @@ struct virtnet_sq_free_stats {
u64 bytes;
u64 napi_packets;
u64 napi_bytes;
+ u64 xsk;
};
struct virtnet_sq_stats {
@@ -298,6 +317,10 @@ struct send_queue {
/* Record whether sq is in reset state. */
bool reset;
+
+ struct xsk_buff_pool *xsk_pool;
+
+ dma_addr_t xsk_hdr_dma_addr;
};
/* Internal representation of a receive virtqueue */
@@ -356,28 +379,9 @@ struct receive_queue {
struct xdp_rxq_info xsk_rxq_info;
struct xdp_buff **xsk_buffs;
-
- /* Do dma by self */
- bool do_dma;
};
-/* This structure can contain rss message with maximum settings for indirection table and keysize
- * Note, that default structure that describes RSS configuration virtio_net_rss_config
- * contains same info but can't handle table values.
- * In any case, structure would be passed to virtio hw through sg_buf split by parts
- * because table sizes may be differ according to the device configuration.
- */
#define VIRTIO_NET_RSS_MAX_KEY_SIZE 40
-#define VIRTIO_NET_RSS_MAX_TABLE_LEN 128
-struct virtio_net_ctrl_rss {
- u32 hash_types;
- u16 indirection_table_mask;
- u16 unclassified_queue;
- u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN];
- u16 max_tx_vq;
- u8 hash_key_length;
- u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
-};
/* Control VQ buffers: protected by the rtnl lock */
struct control_buf {
@@ -421,7 +425,9 @@ struct virtnet_info {
u16 rss_indir_table_size;
u32 rss_hash_types_supported;
u32 rss_hash_types_saved;
- struct virtio_net_ctrl_rss rss;
+ struct virtio_net_rss_config_hdr *rss_hdr;
+ struct virtio_net_rss_config_trailer rss_trailer;
+ u8 rss_hash_key_data[VIRTIO_NET_RSS_MAX_KEY_SIZE];
/* Has control virtqueue */
bool has_cvq;
@@ -438,6 +444,13 @@ struct virtnet_info {
/* Work struct for delayed refilling if we run low on memory. */
struct delayed_work refill;
+ /* UDP tunnel support */
+ bool tx_tnl;
+
+ bool rx_tnl;
+
+ bool rx_tnl_csum;
+
/* Is delayed refill enabled? */
bool refill_enabled;
@@ -497,10 +510,14 @@ struct virtio_net_common_hdr {
struct virtio_net_hdr hdr;
struct virtio_net_hdr_mrg_rxbuf mrg_hdr;
struct virtio_net_hdr_v1_hash hash_v1_hdr;
+ struct virtio_net_hdr_v1_hash_tunnel tnl_hdr;
};
};
+static struct virtio_net_common_hdr xsk_hdr;
+
static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
+static void virtnet_sq_free_unused_buf_done(struct virtqueue *vq);
static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
struct net_device *dev,
unsigned int *xdp_xmit,
@@ -511,68 +528,120 @@ static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb,
struct sk_buff *curr_skb,
struct page *page, void *buf,
int len, int truesize);
+static void virtnet_xsk_completed(struct send_queue *sq, int num);
+
+enum virtnet_xmit_type {
+ VIRTNET_XMIT_TYPE_SKB,
+ VIRTNET_XMIT_TYPE_SKB_ORPHAN,
+ VIRTNET_XMIT_TYPE_XDP,
+ VIRTNET_XMIT_TYPE_XSK,
+};
-static bool is_xdp_frame(void *ptr)
+static size_t virtnet_rss_hdr_size(const struct virtnet_info *vi)
{
- return (unsigned long)ptr & VIRTIO_XDP_FLAG;
+ u16 indir_table_size = vi->has_rss ? vi->rss_indir_table_size : 1;
+
+ return struct_size(vi->rss_hdr, indirection_table, indir_table_size);
}
-static void *xdp_to_ptr(struct xdp_frame *ptr)
+static size_t virtnet_rss_trailer_size(const struct virtnet_info *vi)
{
- return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
+ return struct_size(&vi->rss_trailer, hash_key_data, vi->rss_key_size);
}
-static struct xdp_frame *ptr_to_xdp(void *ptr)
+/* We use the last two bits of the pointer to distinguish the xmit type. */
+#define VIRTNET_XMIT_TYPE_MASK (BIT(0) | BIT(1))
+
+#define VIRTIO_XSK_FLAG_OFFSET 2
+
+static enum virtnet_xmit_type virtnet_xmit_ptr_unpack(void **ptr)
{
- return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
+ unsigned long p = (unsigned long)*ptr;
+
+ *ptr = (void *)(p & ~VIRTNET_XMIT_TYPE_MASK);
+
+ return p & VIRTNET_XMIT_TYPE_MASK;
}
-static bool is_orphan_skb(void *ptr)
+static void *virtnet_xmit_ptr_pack(void *ptr, enum virtnet_xmit_type type)
{
- return (unsigned long)ptr & VIRTIO_ORPHAN_FLAG;
+ return (void *)((unsigned long)ptr | type);
}
-static void *skb_to_ptr(struct sk_buff *skb, bool orphan)
+static int virtnet_add_outbuf(struct send_queue *sq, int num, void *data,
+ enum virtnet_xmit_type type)
{
- return (void *)((unsigned long)skb | (orphan ? VIRTIO_ORPHAN_FLAG : 0));
+ return virtqueue_add_outbuf(sq->vq, sq->sg, num,
+ virtnet_xmit_ptr_pack(data, type),
+ GFP_ATOMIC);
}
-static struct sk_buff *ptr_to_skb(void *ptr)
+static u32 virtnet_ptr_to_xsk_buff_len(void *ptr)
{
- return (struct sk_buff *)((unsigned long)ptr & ~VIRTIO_ORPHAN_FLAG);
+ return ((unsigned long)ptr) >> VIRTIO_XSK_FLAG_OFFSET;
+}
+
+static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
+{
+ sg_dma_address(sg) = addr;
+ sg_dma_len(sg) = len;
}
static void __free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
bool in_napi, struct virtnet_sq_free_stats *stats)
{
+ struct xdp_frame *frame;
+ struct sk_buff *skb;
unsigned int len;
void *ptr;
while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
- if (!is_xdp_frame(ptr)) {
- struct sk_buff *skb = ptr_to_skb(ptr);
+ switch (virtnet_xmit_ptr_unpack(&ptr)) {
+ case VIRTNET_XMIT_TYPE_SKB:
+ skb = ptr;
pr_debug("Sent skb %p\n", skb);
+ stats->napi_packets++;
+ stats->napi_bytes += skb->len;
+ napi_consume_skb(skb, in_napi);
+ break;
- if (is_orphan_skb(ptr)) {
- stats->packets++;
- stats->bytes += skb->len;
- } else {
- stats->napi_packets++;
- stats->napi_bytes += skb->len;
- }
+ case VIRTNET_XMIT_TYPE_SKB_ORPHAN:
+ skb = ptr;
+
+ stats->packets++;
+ stats->bytes += skb->len;
napi_consume_skb(skb, in_napi);
- } else {
- struct xdp_frame *frame = ptr_to_xdp(ptr);
+ break;
+
+ case VIRTNET_XMIT_TYPE_XDP:
+ frame = ptr;
stats->packets++;
stats->bytes += xdp_get_frame_len(frame);
xdp_return_frame(frame);
+ break;
+
+ case VIRTNET_XMIT_TYPE_XSK:
+ stats->bytes += virtnet_ptr_to_xsk_buff_len(ptr);
+ stats->xsk++;
+ break;
}
}
netdev_tx_completed_queue(txq, stats->napi_packets, stats->napi_bytes);
}
+static void virtnet_free_old_xmit(struct send_queue *sq,
+ struct netdev_queue *txq,
+ bool in_napi,
+ struct virtnet_sq_free_stats *stats)
+{
+ __free_old_xmit(sq, txq, in_napi, stats);
+
+ if (stats->xsk)
+ virtnet_xsk_completed(sq, stats->xsk);
+}
+
/* Converting between virtqueue no. and kernel tx/rx queue no.
* 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
*/
@@ -706,10 +775,26 @@ static bool virtqueue_napi_complete(struct napi_struct *napi,
return false;
}
+static void virtnet_tx_wake_queue(struct virtnet_info *vi,
+ struct send_queue *sq)
+{
+ unsigned int index = vq2txq(sq->vq);
+ struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
+
+ if (netif_tx_queue_stopped(txq)) {
+ u64_stats_update_begin(&sq->stats.syncp);
+ u64_stats_inc(&sq->stats.wake);
+ u64_stats_update_end(&sq->stats.syncp);
+ netif_tx_wake_queue(txq);
+ }
+}
+
static void skb_xmit_done(struct virtqueue *vq)
{
struct virtnet_info *vi = vq->vdev->priv;
- struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
+ unsigned int index = vq2txq(vq);
+ struct send_queue *sq = &vi->sq[index];
+ struct napi_struct *napi = &sq->napi;
/* Suppress further interrupts. */
virtqueue_disable_cb(vq);
@@ -717,8 +802,7 @@ static void skb_xmit_done(struct virtqueue *vq)
if (napi->weight)
virtqueue_napi_schedule(napi, vq);
else
- /* We were probably waiting for more output buffers. */
- netif_wake_subqueue(vi->dev, vq2txq(vq));
+ virtnet_tx_wake_queue(vi, sq);
}
#define MRG_CTX_HEADER_SHIFT 22
@@ -738,6 +822,26 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
}
+static int check_mergeable_len(struct net_device *dev, void *mrg_ctx,
+ unsigned int len)
+{
+ unsigned int headroom, tailroom, room, truesize;
+
+ truesize = mergeable_ctx_to_truesize(mrg_ctx);
+ headroom = mergeable_ctx_to_headroom(mrg_ctx);
+ tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
+ room = SKB_DATA_ALIGN(headroom + tailroom);
+
+ if (len > truesize - room) {
+ pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
+ dev->name, len, (unsigned long)(truesize - room));
+ DEV_STATS_INC(dev, rx_length_errors);
+ return -1;
+ }
+
+ return 0;
+}
+
static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen,
unsigned int headroom,
unsigned int len)
@@ -821,17 +925,6 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
goto ok;
}
- /*
- * Verify that we can indeed put this data into a skb.
- * This is here to handle cases when the device erroneously
- * tries to receive more than is possible. This is usually
- * the case of a broken device.
- */
- if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
- net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
- dev_kfree_skb(skb);
- return NULL;
- }
BUG_ON(offset >= PAGE_SIZE);
while (len) {
unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
@@ -856,11 +949,14 @@ ok:
static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
{
+ struct virtnet_info *vi = rq->vq->vdev->priv;
struct page *page = virt_to_head_page(buf);
struct virtnet_rq_dma *dma;
void *head;
int offset;
+ BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs);
+
head = page_address(page);
dma = head;
@@ -870,7 +966,7 @@ static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
if (dma->need_sync && len) {
offset = buf - (head + sizeof(*dma));
- virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr,
+ virtqueue_map_sync_single_range_for_cpu(rq->vq, dma->addr,
offset, len,
DMA_FROM_DEVICE);
}
@@ -878,17 +974,20 @@ static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
if (dma->ref)
return;
- virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len,
- DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ virtqueue_unmap_single_attrs(rq->vq, dma->addr, dma->len,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
put_page(page);
}
static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
{
+ struct virtnet_info *vi = rq->vq->vdev->priv;
void *buf;
+ BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs);
+
buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
- if (buf && rq->do_dma)
+ if (buf)
virtnet_rq_unmap(rq, buf, *len);
return buf;
@@ -896,15 +995,13 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
{
+ struct virtnet_info *vi = rq->vq->vdev->priv;
struct virtnet_rq_dma *dma;
dma_addr_t addr;
u32 offset;
void *head;
- if (!rq->do_dma) {
- sg_init_one(rq->sg, buf, len);
- return;
- }
+ BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs);
head = page_address(rq->alloc_frag.page);
@@ -915,60 +1012,57 @@ static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
addr = dma->addr - sizeof(*dma) + offset;
sg_init_table(rq->sg, 1);
- rq->sg[0].dma_address = addr;
- rq->sg[0].length = len;
+ sg_fill_dma(rq->sg, addr, len);
}
static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
{
struct page_frag *alloc_frag = &rq->alloc_frag;
+ struct virtnet_info *vi = rq->vq->vdev->priv;
struct virtnet_rq_dma *dma;
void *buf, *head;
dma_addr_t addr;
- if (unlikely(!skb_page_frag_refill(size, alloc_frag, gfp)))
- return NULL;
+ BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs);
head = page_address(alloc_frag->page);
- if (rq->do_dma) {
- dma = head;
-
- /* new pages */
- if (!alloc_frag->offset) {
- if (rq->last_dma) {
- /* Now, the new page is allocated, the last dma
- * will not be used. So the dma can be unmapped
- * if the ref is 0.
- */
- virtnet_rq_unmap(rq, rq->last_dma, 0);
- rq->last_dma = NULL;
- }
+ dma = head;
- dma->len = alloc_frag->size - sizeof(*dma);
+ /* new pages */
+ if (!alloc_frag->offset) {
+ if (rq->last_dma) {
+ /* Now, the new page is allocated, the last dma
+ * will not be used. So the dma can be unmapped
+ * if the ref is 0.
+ */
+ virtnet_rq_unmap(rq, rq->last_dma, 0);
+ rq->last_dma = NULL;
+ }
- addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
- dma->len, DMA_FROM_DEVICE, 0);
- if (virtqueue_dma_mapping_error(rq->vq, addr))
- return NULL;
+ dma->len = alloc_frag->size - sizeof(*dma);
- dma->addr = addr;
- dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
+ addr = virtqueue_map_single_attrs(rq->vq, dma + 1,
+ dma->len, DMA_FROM_DEVICE, 0);
+ if (virtqueue_map_mapping_error(rq->vq, addr))
+ return NULL;
- /* Add a reference to dma to prevent the entire dma from
- * being released during error handling. This reference
- * will be freed after the pages are no longer used.
- */
- get_page(alloc_frag->page);
- dma->ref = 1;
- alloc_frag->offset = sizeof(*dma);
+ dma->addr = addr;
+ dma->need_sync = virtqueue_map_need_sync(rq->vq, addr);
- rq->last_dma = dma;
- }
+ /* Add a reference to dma to prevent the entire dma from
+ * being released during error handling. This reference
+ * will be freed after the pages are no longer used.
+ */
+ get_page(alloc_frag->page);
+ dma->ref = 1;
+ alloc_frag->offset = sizeof(*dma);
- ++dma->ref;
+ rq->last_dma = dma;
}
+ ++dma->ref;
+
buf = head + alloc_frag->offset;
get_page(alloc_frag->page);
@@ -990,7 +1084,7 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
return;
}
- if (rq->do_dma)
+ if (!vi->big_packets || vi->mergeable_rx_bufs)
virtnet_rq_unmap(rq, buf, 0);
virtnet_rq_free_buf(vi, rq, buf);
@@ -1001,7 +1095,7 @@ static void free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
{
struct virtnet_sq_free_stats stats = {0};
- __free_old_xmit(sq, txq, in_napi, &stats);
+ virtnet_free_old_xmit(sq, txq, in_napi, &stats);
/* Avoid overhead when no packets have been processed
* happens when called speculatively from start_xmit.
@@ -1025,11 +1119,10 @@ static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
return false;
}
-static void check_sq_full_and_disable(struct virtnet_info *vi,
- struct net_device *dev,
- struct send_queue *sq)
+static bool tx_may_stop(struct virtnet_info *vi,
+ struct net_device *dev,
+ struct send_queue *sq)
{
- bool use_napi = sq->napi.weight;
int qnum;
qnum = sq - vi->sq;
@@ -1044,20 +1137,39 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
* Since most packets only take 1 or 2 ring slots, stopping the queue
* early means 16 slots are typically wasted.
*/
- if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
+ if (sq->vq->num_free < MAX_SKB_FRAGS + 2) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
netif_tx_stop_queue(txq);
u64_stats_update_begin(&sq->stats.syncp);
u64_stats_inc(&sq->stats.stop);
u64_stats_update_end(&sq->stats.syncp);
+
+ return true;
+ }
+
+ return false;
+}
+
+static void check_sq_full_and_disable(struct virtnet_info *vi,
+ struct net_device *dev,
+ struct send_queue *sq)
+{
+ bool use_napi = sq->napi.weight;
+ int qnum;
+
+ qnum = sq - vi->sq;
+
+ if (tx_may_stop(vi, dev, sq)) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
+
if (use_napi) {
if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
virtqueue_napi_schedule(&sq->napi, sq->vq);
} else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
/* More just got used, free them then recheck. */
free_old_xmit(sq, txq, false);
- if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
+ if (sq->vq->num_free >= MAX_SKB_FRAGS + 2) {
netif_start_subqueue(dev, qnum);
u64_stats_update_begin(&sq->stats.syncp);
u64_stats_inc(&sq->stats.wake);
@@ -1068,21 +1180,29 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
}
}
-static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
-{
- sg->dma_address = addr;
- sg->length = len;
-}
-
+/* Note that @len is the length of received data without virtio header */
static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi,
- struct receive_queue *rq, void *buf, u32 len)
+ struct receive_queue *rq, void *buf,
+ u32 len, bool first_buf)
{
struct xdp_buff *xdp;
u32 bufsize;
xdp = (struct xdp_buff *)buf;
- bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool) + vi->hdr_len;
+ /* In virtnet_add_recvbuf_xsk, we use part of XDP_PACKET_HEADROOM for
+ * virtio header and ask the vhost to fill data from
+ * hard_start + XDP_PACKET_HEADROOM - vi->hdr_len
+ * The first buffer has virtio header so the remaining region for frame
+ * data is
+ * xsk_pool_get_rx_frame_size()
+ * While other buffers than the first one do not have virtio header, so
+ * the maximum frame data's length can be
+ * xsk_pool_get_rx_frame_size() + vi->hdr_len
+ */
+ bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool);
+ if (!first_buf)
+ bufsize += vi->hdr_len;
if (unlikely(len > bufsize)) {
pr_debug("%s: rx error: len %u exceeds truesize %u\n",
@@ -1092,7 +1212,14 @@ static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi,
return NULL;
}
- xsk_buff_set_size(xdp, len);
+ if (first_buf) {
+ xsk_buff_set_size(xdp, len);
+ } else {
+ xdp_prepare_buff(xdp, xdp->data_hard_start,
+ XDP_PACKET_HEADROOM - vi->hdr_len, len, 1);
+ xdp->flags = 0;
+ }
+
xsk_buff_dma_sync_for_cpu(xdp);
return xdp;
@@ -1207,7 +1334,7 @@ static int xsk_append_merge_buffer(struct virtnet_info *vi,
u64_stats_add(&stats->bytes, len);
- xdp = buf_to_xdp(vi, rq, buf, len);
+ xdp = buf_to_xdp(vi, rq, buf, len, false);
if (!xdp)
goto err;
@@ -1217,7 +1344,7 @@ static int xsk_append_merge_buffer(struct virtnet_info *vi,
goto err;
}
- memcpy(buf, xdp->data - vi->hdr_len, len);
+ memcpy(buf, xdp->data, len);
xsk_buff_free(xdp);
@@ -1256,9 +1383,14 @@ static struct sk_buff *virtnet_receive_xsk_merge(struct net_device *dev, struct
ret = XDP_PASS;
rcu_read_lock();
prog = rcu_dereference(rq->xdp_prog);
- /* TODO: support multi buffer. */
- if (prog && num_buf == 1)
- ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats);
+ if (prog) {
+ /* TODO: support multi buffer. */
+ if (num_buf == 1)
+ ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit,
+ stats);
+ else
+ ret = XDP_ABORTED;
+ }
rcu_read_unlock();
switch (ret) {
@@ -1305,7 +1437,7 @@ static void virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queu
u64_stats_add(&stats->bytes, len);
- xdp = buf_to_xdp(vi, rq, buf, len);
+ xdp = buf_to_xdp(vi, rq, buf, len, true);
if (!xdp)
return;
@@ -1354,7 +1486,8 @@ static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue
sg_init_table(rq->sg, 1);
sg_fill_dma(rq->sg, addr, len);
- err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, xsk_buffs[i], gfp);
+ err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1,
+ xsk_buffs[i], NULL, gfp);
if (err)
goto err;
}
@@ -1368,6 +1501,120 @@ err:
return err;
}
+static void *virtnet_xsk_to_ptr(u32 len)
+{
+ unsigned long p;
+
+ p = len << VIRTIO_XSK_FLAG_OFFSET;
+
+ return virtnet_xmit_ptr_pack((void *)p, VIRTNET_XMIT_TYPE_XSK);
+}
+
+static int virtnet_xsk_xmit_one(struct send_queue *sq,
+ struct xsk_buff_pool *pool,
+ struct xdp_desc *desc)
+{
+ struct virtnet_info *vi;
+ dma_addr_t addr;
+
+ vi = sq->vq->vdev->priv;
+
+ addr = xsk_buff_raw_get_dma(pool, desc->addr);
+ xsk_buff_raw_dma_sync_for_device(pool, addr, desc->len);
+
+ sg_init_table(sq->sg, 2);
+ sg_fill_dma(sq->sg, sq->xsk_hdr_dma_addr, vi->hdr_len);
+ sg_fill_dma(sq->sg + 1, addr, desc->len);
+
+ return virtqueue_add_outbuf_premapped(sq->vq, sq->sg, 2,
+ virtnet_xsk_to_ptr(desc->len),
+ GFP_ATOMIC);
+}
+
+static int virtnet_xsk_xmit_batch(struct send_queue *sq,
+ struct xsk_buff_pool *pool,
+ unsigned int budget,
+ u64 *kicks)
+{
+ struct xdp_desc *descs = pool->tx_descs;
+ bool kick = false;
+ u32 nb_pkts, i;
+ int err;
+
+ budget = min_t(u32, budget, sq->vq->num_free);
+
+ nb_pkts = xsk_tx_peek_release_desc_batch(pool, budget);
+ if (!nb_pkts)
+ return 0;
+
+ for (i = 0; i < nb_pkts; i++) {
+ err = virtnet_xsk_xmit_one(sq, pool, &descs[i]);
+ if (unlikely(err)) {
+ xsk_tx_completed(sq->xsk_pool, nb_pkts - i);
+ break;
+ }
+
+ kick = true;
+ }
+
+ if (kick && virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
+ (*kicks)++;
+
+ return i;
+}
+
+static bool virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
+ int budget)
+{
+ struct virtnet_info *vi = sq->vq->vdev->priv;
+ struct virtnet_sq_free_stats stats = {};
+ struct net_device *dev = vi->dev;
+ u64 kicks = 0;
+ int sent;
+
+ /* Avoid to wakeup napi meanless, so call __free_old_xmit instead of
+ * free_old_xmit().
+ */
+ __free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq), true, &stats);
+
+ if (stats.xsk)
+ xsk_tx_completed(sq->xsk_pool, stats.xsk);
+
+ sent = virtnet_xsk_xmit_batch(sq, pool, budget, &kicks);
+
+ if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
+ check_sq_full_and_disable(vi, vi->dev, sq);
+
+ if (sent) {
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(vi->dev, sq - vi->sq);
+ txq_trans_cond_update(txq);
+ }
+
+ u64_stats_update_begin(&sq->stats.syncp);
+ u64_stats_add(&sq->stats.packets, stats.packets);
+ u64_stats_add(&sq->stats.bytes, stats.bytes);
+ u64_stats_add(&sq->stats.kicks, kicks);
+ u64_stats_add(&sq->stats.xdp_tx, sent);
+ u64_stats_update_end(&sq->stats.syncp);
+
+ if (xsk_uses_need_wakeup(pool))
+ xsk_set_tx_need_wakeup(pool);
+
+ return sent;
+}
+
+static void xsk_wakeup(struct send_queue *sq)
+{
+ if (napi_if_scheduled_mark_missed(&sq->napi))
+ return;
+
+ local_bh_disable();
+ virtqueue_napi_schedule(&sq->napi, sq->vq);
+ local_bh_enable();
+}
+
static int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
{
struct virtnet_info *vi = netdev_priv(dev);
@@ -1381,14 +1628,19 @@ static int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
sq = &vi->sq[qid];
- if (napi_if_scheduled_mark_missed(&sq->napi))
- return 0;
+ xsk_wakeup(sq);
+ return 0;
+}
- local_bh_disable();
- virtqueue_napi_schedule(&sq->napi, sq->vq);
- local_bh_enable();
+static void virtnet_xsk_completed(struct send_queue *sq, int num)
+{
+ xsk_tx_completed(sq->xsk_pool, num);
- return 0;
+ /* If this is called by rx poll, start_xmit and xdp xmit we should
+ * wakeup the tx napi to consume the xsk tx queue, because the tx
+ * interrupt may not be triggered.
+ */
+ xsk_wakeup(sq);
}
static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
@@ -1431,8 +1683,7 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
skb_frag_size(frag), skb_frag_off(frag));
}
- err = virtqueue_add_outbuf(sq->vq, sq->sg, nr_frags + 1,
- xdp_to_ptr(xdpf), GFP_ATOMIC);
+ err = virtnet_add_outbuf(sq, nr_frags + 1, xdpf, VIRTNET_XMIT_TYPE_XDP);
if (unlikely(err))
return -ENOSPC; /* Caller handle free/refcnt */
@@ -1505,8 +1756,8 @@ static int virtnet_xdp_xmit(struct net_device *dev,
}
/* Free up any pending old buffers before queueing new ones. */
- __free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq),
- false, &stats);
+ virtnet_free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq),
+ false, &stats);
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
@@ -1625,7 +1876,8 @@ static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
* across multiple buffers (num_buf > 1), and we make sure buffers
* have enough headroom.
*/
-static struct page *xdp_linearize_page(struct receive_queue *rq,
+static struct page *xdp_linearize_page(struct net_device *dev,
+ struct receive_queue *rq,
int *num_buf,
struct page *p,
int offset,
@@ -1645,18 +1897,27 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
page_off += *len;
+ /* Only mergeable mode can go inside this while loop. In small mode,
+ * *num_buf == 1, so it cannot go inside.
+ */
while (--*num_buf) {
unsigned int buflen;
void *buf;
+ void *ctx;
int off;
- buf = virtnet_rq_get_buf(rq, &buflen, NULL);
+ buf = virtnet_rq_get_buf(rq, &buflen, &ctx);
if (unlikely(!buf))
goto err_buf;
p = virt_to_head_page(buf);
off = buf - page_address(p);
+ if (check_mergeable_len(dev, ctx, buflen)) {
+ put_page(p);
+ goto err_buf;
+ }
+
/* guard against a misconfigured or uncooperative backend that
* is sending packet larger than the MTU.
*/
@@ -1745,7 +2006,7 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev,
headroom = vi->hdr_len + header_offset;
buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- xdp_page = xdp_linearize_page(rq, &num_buf, page,
+ xdp_page = xdp_linearize_page(dev, rq, &num_buf, page,
offset, header_offset,
&tlen);
if (!xdp_page)
@@ -1855,9 +2116,19 @@ static struct sk_buff *receive_big(struct net_device *dev,
struct virtnet_rq_stats *stats)
{
struct page *page = buf;
- struct sk_buff *skb =
- page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
+ struct sk_buff *skb;
+
+ /* Make sure that len does not exceed the size allocated in
+ * add_recvbuf_big.
+ */
+ if (unlikely(len > (vi->big_packets_num_skbfrags + 1) * PAGE_SIZE)) {
+ pr_debug("%s: rx error: len %u exceeds allocated size %lu\n",
+ dev->name, len,
+ (vi->big_packets_num_skbfrags + 1) * PAGE_SIZE);
+ goto err;
+ }
+ skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
u64_stats_add(&stats->bytes, len - vi->hdr_len);
if (unlikely(!skb))
goto err;
@@ -1933,10 +2204,9 @@ static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev,
skb_metadata_set(skb, metasize);
if (unlikely(xdp_buff_has_frags(xdp)))
- xdp_update_skb_shared_info(skb, nr_frags,
- sinfo->xdp_frags_size,
- xdp_frags_truesz,
- xdp_buff_is_frag_pfmemalloc(xdp));
+ xdp_update_skb_frags_info(skb, nr_frags, sinfo->xdp_frags_size,
+ xdp_frags_truesz,
+ xdp_buff_get_skb_flags(xdp));
return skb;
}
@@ -1954,10 +2224,9 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
struct virtnet_rq_stats *stats)
{
struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
- unsigned int headroom, tailroom, room;
- unsigned int truesize, cur_frag_size;
struct skb_shared_info *shinfo;
unsigned int xdp_frags_truesz = 0;
+ unsigned int truesize;
struct page *page;
skb_frag_t *frag;
int offset;
@@ -2000,21 +2269,14 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
page = virt_to_head_page(buf);
offset = buf - page_address(page);
- truesize = mergeable_ctx_to_truesize(ctx);
- headroom = mergeable_ctx_to_headroom(ctx);
- tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
- room = SKB_DATA_ALIGN(headroom + tailroom);
-
- cur_frag_size = truesize;
- xdp_frags_truesz += cur_frag_size;
- if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) {
+ if (check_mergeable_len(dev, ctx, len)) {
put_page(page);
- pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
- dev->name, len, (unsigned long)(truesize - room));
- DEV_STATS_INC(dev, rx_length_errors);
goto err;
}
+ truesize = mergeable_ctx_to_truesize(ctx);
+ xdp_frags_truesz += truesize;
+
frag = &shinfo->frags[shinfo->nr_frags++];
skb_frag_fill_page_desc(frag, page, offset, len);
if (page_is_pfmemalloc(page))
@@ -2080,7 +2342,7 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
*/
if (!xdp_prog->aux->xdp_has_frags) {
/* linearize data for XDP */
- xdp_page = xdp_linearize_page(rq, num_buf,
+ xdp_page = xdp_linearize_page(vi->dev, rq, num_buf,
*page, offset,
XDP_PACKET_HEADROOM,
len);
@@ -2228,18 +2490,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
struct sk_buff *head_skb, *curr_skb;
unsigned int truesize = mergeable_ctx_to_truesize(ctx);
unsigned int headroom = mergeable_ctx_to_headroom(ctx);
- unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
- unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
head_skb = NULL;
u64_stats_add(&stats->bytes, len - vi->hdr_len);
- if (unlikely(len > truesize - room)) {
- pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
- dev->name, len, (unsigned long)(truesize - room));
- DEV_STATS_INC(dev, rx_length_errors);
+ if (check_mergeable_len(dev, ctx, len))
goto err_skb;
- }
if (unlikely(vi->xdp_enabled)) {
struct bpf_prog *xdp_prog;
@@ -2274,17 +2530,10 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
u64_stats_add(&stats->bytes, len);
page = virt_to_head_page(buf);
- truesize = mergeable_ctx_to_truesize(ctx);
- headroom = mergeable_ctx_to_headroom(ctx);
- tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
- room = SKB_DATA_ALIGN(headroom + tailroom);
- if (unlikely(len > truesize - room)) {
- pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
- dev->name, len, (unsigned long)(truesize - room));
- DEV_STATS_INC(dev, rx_length_errors);
+ if (check_mergeable_len(dev, ctx, len))
goto err_skb;
- }
+ truesize = mergeable_ctx_to_truesize(ctx);
curr_skb = virtnet_skb_append_frag(head_skb, curr_skb, page,
buf, len, truesize);
if (!curr_skb)
@@ -2304,6 +2553,13 @@ err_buf:
return NULL;
}
+static inline u32
+virtio_net_hash_value(const struct virtio_net_hdr_v1_hash *hdr_hash)
+{
+ return __le16_to_cpu(hdr_hash->hash_value_lo) |
+ (__le16_to_cpu(hdr_hash->hash_value_hi) << 16);
+}
+
static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
struct sk_buff *skb)
{
@@ -2330,7 +2586,7 @@ static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
default:
rss_hash_type = PKT_HASH_TYPE_NONE;
}
- skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
+ skb_set_hash(skb, virtio_net_hash_value(hdr_hash), rss_hash_type);
}
static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq,
@@ -2343,14 +2599,21 @@ static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *
if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
virtio_skb_set_hash(&hdr->hash_v1_hdr, skb);
- if (flags & VIRTIO_NET_HDR_F_DATA_VALID)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ hdr->hdr.flags = flags;
+ if (virtio_net_handle_csum_offload(skb, &hdr->hdr, vi->rx_tnl_csum)) {
+ net_warn_ratelimited("%s: bad csum: flags: %x, gso_type: %x rx_tnl_csum %d\n",
+ dev->name, hdr->hdr.flags,
+ hdr->hdr.gso_type, vi->rx_tnl_csum);
+ goto frame_err;
+ }
- if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
- virtio_is_little_endian(vi->vdev))) {
- net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
+ if (virtio_net_hdr_tnl_to_skb(skb, &hdr->tnl_hdr, vi->rx_tnl,
+ vi->rx_tnl_csum,
+ virtio_is_little_endian(vi->vdev))) {
+ net_warn_ratelimited("%s: bad gso: type: %x, size: %u, flags %x tunnel %d tnl csum %d\n",
dev->name, hdr->hdr.gso_type,
- hdr->hdr.gso_size);
+ hdr->hdr.gso_size, hdr->hdr.flags,
+ vi->rx_tnl, vi->rx_tnl_csum);
goto frame_err;
}
@@ -2383,22 +2646,28 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
return;
}
- /* 1. Save the flags early, as the XDP program might overwrite them.
+ /* About the flags below:
+ * 1. Save the flags early, as the XDP program might overwrite them.
* These flags ensure packets marked as VIRTIO_NET_HDR_F_DATA_VALID
* stay valid after XDP processing.
* 2. XDP doesn't work with partially checksummed packets (refer to
* virtnet_xdp_set()), so packets marked as
* VIRTIO_NET_HDR_F_NEEDS_CSUM get dropped during XDP processing.
*/
- flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags;
- if (vi->mergeable_rx_bufs)
+ if (vi->mergeable_rx_bufs) {
+ flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags;
skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
stats);
- else if (vi->big_packets)
+ } else if (vi->big_packets) {
+ void *p = page_address((struct page *)buf);
+
+ flags = ((struct virtio_net_common_hdr *)p)->hdr.flags;
skb = receive_big(dev, vi, rq, buf, len, stats);
- else
+ } else {
+ flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags;
skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
+ }
if (unlikely(!skb))
return;
@@ -2423,6 +2692,9 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
len = SKB_DATA_ALIGN(len) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ if (unlikely(!skb_page_frag_refill(len, &rq->alloc_frag, gfp)))
+ return -ENOMEM;
+
buf = virtnet_rq_alloc(rq, len, gfp);
if (unlikely(!buf))
return -ENOMEM;
@@ -2431,10 +2703,9 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN);
- err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
+ err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) {
- if (rq->do_dma)
- virtnet_rq_unmap(rq, buf, 0);
+ virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf));
}
@@ -2525,6 +2796,12 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
*/
len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
+ if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
+ return -ENOMEM;
+
+ if (!alloc_frag->offset && len + room + sizeof(struct virtnet_rq_dma) > alloc_frag->size)
+ len -= sizeof(struct virtnet_rq_dma);
+
buf = virtnet_rq_alloc(rq, len + room, gfp);
if (unlikely(!buf))
return -ENOMEM;
@@ -2546,10 +2823,9 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
virtnet_rq_init_one_sg(rq, buf, len);
ctx = mergeable_len_to_ctx(len + room, headroom);
- err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
+ err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) {
- if (rq->do_dma)
- virtnet_rq_unmap(rq, buf, 0);
+ virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf));
}
@@ -2606,7 +2882,8 @@ static void skb_recv_done(struct virtqueue *rvq)
virtqueue_napi_schedule(&rq->napi, rvq);
}
-static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
+static void virtnet_napi_do_enable(struct virtqueue *vq,
+ struct napi_struct *napi)
{
napi_enable(napi);
@@ -2619,10 +2896,21 @@ static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
local_bh_enable();
}
-static void virtnet_napi_tx_enable(struct virtnet_info *vi,
- struct virtqueue *vq,
- struct napi_struct *napi)
+static void virtnet_napi_enable(struct receive_queue *rq)
{
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ int qidx = vq2rxq(rq->vq);
+
+ virtnet_napi_do_enable(rq->vq, &rq->napi);
+ netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, &rq->napi);
+}
+
+static void virtnet_napi_tx_enable(struct send_queue *sq)
+{
+ struct virtnet_info *vi = sq->vq->vdev->priv;
+ struct napi_struct *napi = &sq->napi;
+ int qidx = vq2txq(sq->vq);
+
if (!napi->weight)
return;
@@ -2634,13 +2922,30 @@ static void virtnet_napi_tx_enable(struct virtnet_info *vi,
return;
}
- return virtnet_napi_enable(vq, napi);
+ virtnet_napi_do_enable(sq->vq, napi);
+ netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, napi);
}
-static void virtnet_napi_tx_disable(struct napi_struct *napi)
+static void virtnet_napi_tx_disable(struct send_queue *sq)
{
- if (napi->weight)
+ struct virtnet_info *vi = sq->vq->vdev->priv;
+ struct napi_struct *napi = &sq->napi;
+ int qidx = vq2txq(sq->vq);
+
+ if (napi->weight) {
+ netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, NULL);
napi_disable(napi);
+ }
+}
+
+static void virtnet_napi_disable(struct receive_queue *rq)
+{
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ struct napi_struct *napi = &rq->napi;
+ int qidx = vq2rxq(rq->vq);
+
+ netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, NULL);
+ napi_disable(napi);
}
static void refill_work(struct work_struct *work)
@@ -2653,9 +2958,23 @@ static void refill_work(struct work_struct *work)
for (i = 0; i < vi->curr_queue_pairs; i++) {
struct receive_queue *rq = &vi->rq[i];
+ /*
+ * When queue API support is added in the future and the call
+ * below becomes napi_disable_locked, this driver will need to
+ * be refactored.
+ *
+ * One possible solution would be to:
+ * - cancel refill_work with cancel_delayed_work (note:
+ * non-sync)
+ * - cancel refill_work with cancel_delayed_work_sync in
+ * virtnet_remove after the netdev is unregistered
+ * - wrap all of the work in a lock (perhaps the netdev
+ * instance lock)
+ * - check netif_running() and return early to avoid a race
+ */
napi_disable(&rq->napi);
still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
- virtnet_napi_enable(rq->vq, &rq->napi);
+ virtnet_napi_do_enable(rq->vq, &rq->napi);
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again.
@@ -2706,7 +3025,7 @@ static int virtnet_receive_packets(struct virtnet_info *vi,
}
} else {
while (packets < budget &&
- (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) {
+ (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
receive_buf(vi, rq, buf, len, NULL, xdp_xmit, stats);
packets++;
}
@@ -2776,14 +3095,8 @@ static void virtnet_poll_cleantx(struct receive_queue *rq, int budget)
free_old_xmit(sq, txq, !!budget);
} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
- if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
- if (netif_tx_queue_stopped(txq)) {
- u64_stats_update_begin(&sq->stats.syncp);
- u64_stats_inc(&sq->stats.wake);
- u64_stats_update_end(&sq->stats.syncp);
- }
- netif_tx_wake_queue(txq);
- }
+ if (sq->vq->num_free >= MAX_SKB_FRAGS + 2)
+ virtnet_tx_wake_queue(vi, sq);
__netif_tx_unlock(txq);
}
@@ -2852,8 +3165,8 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
{
- virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
- napi_disable(&vi->rq[qp_index].napi);
+ virtnet_napi_tx_disable(&vi->sq[qp_index]);
+ virtnet_napi_disable(&vi->rq[qp_index]);
xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
}
@@ -2872,9 +3185,8 @@ static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
if (err < 0)
goto err_xdp_reg_mem_model;
- netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, qp_index));
- virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
- virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
+ virtnet_napi_enable(&vi->rq[qp_index]);
+ virtnet_napi_tx_enable(&vi->sq[qp_index]);
return 0;
@@ -2956,7 +3268,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
struct virtnet_info *vi = sq->vq->vdev->priv;
unsigned int index = vq2txq(sq->vq);
struct netdev_queue *txq;
- int opaque;
+ int opaque, xsk_done = 0;
bool done;
if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
@@ -2968,15 +3280,18 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
txq = netdev_get_tx_queue(vi->dev, index);
__netif_tx_lock(txq, raw_smp_processor_id());
virtqueue_disable_cb(sq->vq);
- free_old_xmit(sq, txq, !!budget);
- if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
- if (netif_tx_queue_stopped(txq)) {
- u64_stats_update_begin(&sq->stats.syncp);
- u64_stats_inc(&sq->stats.wake);
- u64_stats_update_end(&sq->stats.syncp);
- }
- netif_tx_wake_queue(txq);
+ if (sq->xsk_pool)
+ xsk_done = virtnet_xsk_xmit(sq, sq->xsk_pool, budget);
+ else
+ free_old_xmit(sq, txq, !!budget);
+
+ if (sq->vq->num_free >= MAX_SKB_FRAGS + 2)
+ virtnet_tx_wake_queue(vi, sq);
+
+ if (xsk_done >= budget) {
+ __netif_tx_unlock(txq);
+ return budget;
}
opaque = virtqueue_enable_cb_prepare(sq->vq);
@@ -3004,32 +3319,37 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
static int xmit_skb(struct send_queue *sq, struct sk_buff *skb, bool orphan)
{
- struct virtio_net_hdr_mrg_rxbuf *hdr;
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
struct virtnet_info *vi = sq->vq->vdev->priv;
+ struct virtio_net_hdr_v1_hash_tunnel *hdr;
int num_sg;
unsigned hdr_len = vi->hdr_len;
bool can_push;
pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
+ /* Make sure it's safe to cast between formats */
+ BUILD_BUG_ON(__alignof__(*hdr) != __alignof__(hdr->hash_hdr));
+ BUILD_BUG_ON(__alignof__(*hdr) != __alignof__(hdr->hash_hdr.hdr));
+
can_push = vi->any_header_sg &&
!((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
!skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
/* Even if we can, don't push here yet as this would skew
* csum_start offset below. */
if (can_push)
- hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
+ hdr = (struct virtio_net_hdr_v1_hash_tunnel *)(skb->data -
+ hdr_len);
else
- hdr = &skb_vnet_common_hdr(skb)->mrg_hdr;
+ hdr = &skb_vnet_common_hdr(skb)->tnl_hdr;
- if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
- virtio_is_little_endian(vi->vdev), false,
- 0))
+ if (virtio_net_hdr_tnl_from_skb(skb, hdr, vi->tx_tnl,
+ virtio_is_little_endian(vi->vdev), 0,
+ false))
return -EPROTO;
if (vi->mergeable_rx_bufs)
- hdr->num_buffers = 0;
+ hdr->hash_hdr.hdr.num_buffers = 0;
sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
if (can_push) {
@@ -3046,8 +3366,9 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb, bool orphan)
return num_sg;
num_sg++;
}
- return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg,
- skb_to_ptr(skb, orphan), GFP_ATOMIC);
+
+ return virtnet_add_outbuf(sq, num_sg, skb,
+ orphan ? VIRTNET_XMIT_TYPE_SKB_ORPHAN : VIRTNET_XMIT_TYPE_SKB);
}
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -3061,15 +3382,10 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
bool use_napi = sq->napi.weight;
bool kick;
- /* Free up any pending old buffers before queueing new ones. */
- do {
- if (use_napi)
- virtqueue_disable_cb(sq->vq);
-
+ if (!use_napi)
free_old_xmit(sq, txq, false);
-
- } while (use_napi && !xmit_more &&
- unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
+ else
+ virtqueue_disable_cb(sq->vq);
/* timestamp packet in software */
skb_tx_timestamp(skb);
@@ -3095,7 +3411,10 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
nf_reset_ct(skb);
}
- check_sq_full_and_disable(vi, dev, sq);
+ if (use_napi)
+ tx_may_stop(vi, dev, sq);
+ else
+ check_sq_full_and_disable(vi, dev,sq);
kick = use_napi ? __netdev_tx_sent_queue(txq, skb->len, xmit_more) :
!xmit_more || netif_xmit_stopped(txq);
@@ -3107,28 +3426,81 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
+ if (use_napi && kick && unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
+ virtqueue_napi_schedule(&sq->napi, sq->vq);
+
return NETDEV_TX_OK;
}
-static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
+static void __virtnet_rx_pause(struct virtnet_info *vi,
+ struct receive_queue *rq)
{
bool running = netif_running(vi->dev);
if (running) {
- napi_disable(&rq->napi);
+ virtnet_napi_disable(rq);
virtnet_cancel_dim(vi, &rq->dim);
}
}
-static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq)
+static void virtnet_rx_pause_all(struct virtnet_info *vi)
+{
+ int i;
+
+ /*
+ * Make sure refill_work does not run concurrently to
+ * avoid napi_disable race which leads to deadlock.
+ */
+ disable_delayed_refill(vi);
+ cancel_delayed_work_sync(&vi->refill);
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ __virtnet_rx_pause(vi, &vi->rq[i]);
+}
+
+static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
+{
+ /*
+ * Make sure refill_work does not run concurrently to
+ * avoid napi_disable race which leads to deadlock.
+ */
+ disable_delayed_refill(vi);
+ cancel_delayed_work_sync(&vi->refill);
+ __virtnet_rx_pause(vi, rq);
+}
+
+static void __virtnet_rx_resume(struct virtnet_info *vi,
+ struct receive_queue *rq,
+ bool refill)
{
bool running = netif_running(vi->dev);
+ bool schedule_refill = false;
- if (!try_fill_recv(vi, rq, GFP_KERNEL))
+ if (refill && !try_fill_recv(vi, rq, GFP_KERNEL))
+ schedule_refill = true;
+ if (running)
+ virtnet_napi_enable(rq);
+
+ if (schedule_refill)
schedule_delayed_work(&vi->refill, 0);
+}
- if (running)
- virtnet_napi_enable(rq->vq, &rq->napi);
+static void virtnet_rx_resume_all(struct virtnet_info *vi)
+{
+ int i;
+
+ enable_delayed_refill(vi);
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ if (i < vi->curr_queue_pairs)
+ __virtnet_rx_resume(vi, &vi->rq[i], true);
+ else
+ __virtnet_rx_resume(vi, &vi->rq[i], false);
+ }
+}
+
+static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq)
+{
+ enable_delayed_refill(vi);
+ __virtnet_rx_resume(vi, rq, true);
}
static int virtnet_rx_resize(struct virtnet_info *vi,
@@ -3140,7 +3512,7 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
virtnet_rx_pause(vi, rq);
- err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf);
+ err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf, NULL);
if (err)
netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
@@ -3157,7 +3529,7 @@ static void virtnet_tx_pause(struct virtnet_info *vi, struct send_queue *sq)
qindex = sq - vi->sq;
if (running)
- virtnet_napi_tx_disable(&sq->napi);
+ virtnet_napi_tx_disable(sq);
txq = netdev_get_tx_queue(vi->dev, qindex);
@@ -3171,6 +3543,9 @@ static void virtnet_tx_pause(struct virtnet_info *vi, struct send_queue *sq)
/* Prevent the upper layer from trying to send packets. */
netif_stop_subqueue(vi->dev, qindex);
+ u64_stats_update_begin(&sq->stats.syncp);
+ u64_stats_inc(&sq->stats.stop);
+ u64_stats_update_end(&sq->stats.syncp);
__netif_tx_unlock_bh(txq);
}
@@ -3187,11 +3562,11 @@ static void virtnet_tx_resume(struct virtnet_info *vi, struct send_queue *sq)
__netif_tx_lock_bh(txq);
sq->reset = false;
- netif_tx_wake_queue(txq);
+ virtnet_tx_wake_queue(vi, sq);
__netif_tx_unlock_bh(txq);
if (running)
- virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
+ virtnet_napi_tx_enable(sq);
}
static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq,
@@ -3199,11 +3574,18 @@ static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq,
{
int qindex, err;
+ if (ring_num <= MAX_SKB_FRAGS + 2) {
+ netdev_err(vi->dev, "tx size (%d) cannot be smaller than %d\n",
+ ring_num, MAX_SKB_FRAGS + 2);
+ return -EINVAL;
+ }
+
qindex = sq - vi->sq;
virtnet_tx_pause(vi, sq);
- err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
+ err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf,
+ virtnet_sq_free_unused_buf_done);
if (err)
netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
@@ -3374,15 +3756,64 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
}
+static bool virtnet_commit_rss_command(struct virtnet_info *vi);
+
+static void virtnet_rss_update_by_qpairs(struct virtnet_info *vi, u16 queue_pairs)
+{
+ u32 indir_val = 0;
+ int i = 0;
+
+ for (; i < vi->rss_indir_table_size; ++i) {
+ indir_val = ethtool_rxfh_indir_default(i, queue_pairs);
+ vi->rss_hdr->indirection_table[i] = cpu_to_le16(indir_val);
+ }
+ vi->rss_trailer.max_tx_vq = cpu_to_le16(queue_pairs);
+}
+
static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
{
struct virtio_net_ctrl_mq *mq __free(kfree) = NULL;
- struct scatterlist sg;
+ struct virtio_net_rss_config_hdr *old_rss_hdr;
+ struct virtio_net_rss_config_trailer old_rss_trailer;
struct net_device *dev = vi->dev;
+ struct scatterlist sg;
if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
return 0;
+ /* Firstly check if we need update rss. Do updating if both (1) rss enabled and
+ * (2) no user configuration.
+ *
+ * During rss command processing, device updates queue_pairs using rss.max_tx_vq. That is,
+ * the device updates queue_pairs together with rss, so we can skip the separate queue_pairs
+ * update (VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET below) and return directly.
+ */
+ if (vi->has_rss && !netif_is_rxfh_configured(dev)) {
+ old_rss_hdr = vi->rss_hdr;
+ old_rss_trailer = vi->rss_trailer;
+ vi->rss_hdr = devm_kzalloc(&dev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL);
+ if (!vi->rss_hdr) {
+ vi->rss_hdr = old_rss_hdr;
+ return -ENOMEM;
+ }
+
+ *vi->rss_hdr = *old_rss_hdr;
+ virtnet_rss_update_by_qpairs(vi, queue_pairs);
+
+ if (!virtnet_commit_rss_command(vi)) {
+ /* restore ctrl_rss if commit_rss_command failed */
+ devm_kfree(&dev->dev, vi->rss_hdr);
+ vi->rss_hdr = old_rss_hdr;
+ vi->rss_trailer = old_rss_trailer;
+
+ dev_warn(&dev->dev, "Fail to set num of queue pairs to %d, because committing RSS failed\n",
+ queue_pairs);
+ return -EINVAL;
+ }
+ devm_kfree(&dev->dev, old_rss_hdr);
+ goto succ;
+ }
+
mq = kzalloc(sizeof(*mq), GFP_KERNEL);
if (!mq)
return -ENOMEM;
@@ -3395,12 +3826,14 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
queue_pairs);
return -EINVAL;
- } else {
- vi->curr_queue_pairs = queue_pairs;
- /* virtnet_open() will refill when device is going to up. */
- if (dev->flags & IFF_UP)
- schedule_delayed_work(&vi->refill, 0);
}
+succ:
+ vi->curr_queue_pairs = queue_pairs;
+ /* virtnet_open() will refill when device is going to up. */
+ spin_lock_bh(&vi->refill_lock);
+ if (dev->flags & IFF_UP && vi->refill_enabled)
+ schedule_delayed_work(&vi->refill, 0);
+ spin_unlock_bh(&vi->refill_lock);
return 0;
}
@@ -3589,7 +4022,7 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
cpumask_var_t mask;
int stragglers;
int group_size;
- int i, j, cpu;
+ int i, start = 0, cpu;
int num_cpu;
int stride;
@@ -3603,16 +4036,18 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
stragglers = num_cpu >= vi->curr_queue_pairs ?
num_cpu % vi->curr_queue_pairs :
0;
- cpu = cpumask_first(cpu_online_mask);
for (i = 0; i < vi->curr_queue_pairs; i++) {
group_size = stride + (i < stragglers ? 1 : 0);
- for (j = 0; j < group_size; j++) {
+ for_each_online_cpu_wrap(cpu, start) {
+ if (!group_size--) {
+ start = cpu;
+ break;
+ }
cpumask_set_cpu(cpu, mask);
- cpu = cpumask_next_wrap(cpu, cpu_online_mask,
- nr_cpu_ids, false);
}
+
virtqueue_set_affinity(vi->rq[i].vq, mask);
virtqueue_set_affinity(vi->sq[i].vq, mask);
__netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
@@ -3822,24 +4257,12 @@ static int virtnet_set_ringparam(struct net_device *dev,
static bool virtnet_commit_rss_command(struct virtnet_info *vi)
{
struct net_device *dev = vi->dev;
- struct scatterlist sgs[4];
- unsigned int sg_buf_size;
+ struct scatterlist sgs[2];
/* prepare sgs */
- sg_init_table(sgs, 4);
-
- sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table);
- sg_set_buf(&sgs[0], &vi->rss, sg_buf_size);
-
- sg_buf_size = sizeof(uint16_t) * (vi->rss.indirection_table_mask + 1);
- sg_set_buf(&sgs[1], vi->rss.indirection_table, sg_buf_size);
-
- sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
- - offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
- sg_set_buf(&sgs[2], &vi->rss.max_tx_vq, sg_buf_size);
-
- sg_buf_size = vi->rss_key_size;
- sg_set_buf(&sgs[3], vi->rss.key, sg_buf_size);
+ sg_init_table(sgs, 2);
+ sg_set_buf(&sgs[0], vi->rss_hdr, virtnet_rss_hdr_size(vi));
+ sg_set_buf(&sgs[1], &vi->rss_trailer, virtnet_rss_trailer_size(vi));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
@@ -3856,28 +4279,24 @@ err:
static void virtnet_init_default_rss(struct virtnet_info *vi)
{
- u32 indir_val = 0;
- int i = 0;
-
- vi->rss.hash_types = vi->rss_hash_types_supported;
+ vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_supported);
vi->rss_hash_types_saved = vi->rss_hash_types_supported;
- vi->rss.indirection_table_mask = vi->rss_indir_table_size
- ? vi->rss_indir_table_size - 1 : 0;
- vi->rss.unclassified_queue = 0;
+ vi->rss_hdr->indirection_table_mask = vi->rss_indir_table_size
+ ? cpu_to_le16(vi->rss_indir_table_size - 1) : 0;
+ vi->rss_hdr->unclassified_queue = 0;
- for (; i < vi->rss_indir_table_size; ++i) {
- indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs);
- vi->rss.indirection_table[i] = indir_val;
- }
+ virtnet_rss_update_by_qpairs(vi, vi->curr_queue_pairs);
- vi->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
- vi->rss.hash_key_length = vi->rss_key_size;
+ vi->rss_trailer.hash_key_length = vi->rss_key_size;
- netdev_rss_key_fill(vi->rss.key, vi->rss_key_size);
+ netdev_rss_key_fill(vi->rss_hash_key_data, vi->rss_key_size);
}
-static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
+static int virtnet_get_hashflow(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
{
+ struct virtnet_info *vi = netdev_priv(dev);
+
info->data = 0;
switch (info->flow_type) {
case TCP_V4_FLOW:
@@ -3926,17 +4345,22 @@ static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_r
info->data = 0;
break;
}
+
+ return 0;
}
-static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info)
+static int virtnet_set_hashflow(struct net_device *dev,
+ const struct ethtool_rxfh_fields *info,
+ struct netlink_ext_ack *extack)
{
+ struct virtnet_info *vi = netdev_priv(dev);
u32 new_hashtypes = vi->rss_hash_types_saved;
bool is_disable = info->data & RXH_DISCARD;
bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3);
/* supports only 'sd', 'sdfn' and 'r' */
if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable))
- return false;
+ return -EINVAL;
switch (info->flow_type) {
case TCP_V4_FLOW:
@@ -3975,21 +4399,22 @@ static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *
break;
default:
/* unsupported flow */
- return false;
+ return -EINVAL;
}
/* if unsupported hashtype was set */
if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported))
- return false;
+ return -EINVAL;
if (new_hashtypes != vi->rss_hash_types_saved) {
vi->rss_hash_types_saved = new_hashtypes;
- vi->rss.hash_types = vi->rss_hash_types_saved;
+ vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved);
if (vi->dev->features & NETIF_F_RXHASH)
- return virtnet_commit_rss_command(vi);
+ if (!virtnet_commit_rss_command(vi))
+ return -EINVAL;
}
- return true;
+ return 0;
}
static void virtnet_get_drvinfo(struct net_device *dev,
@@ -5011,7 +5436,7 @@ static int virtnet_set_coalesce(struct net_device *dev,
struct netlink_ext_ack *extack)
{
struct virtnet_info *vi = netdev_priv(dev);
- int ret, queue_number, napi_weight;
+ int ret, queue_number, napi_weight, i;
bool update_napi = false;
/* Can't change NAPI weight if the link is up */
@@ -5040,6 +5465,14 @@ static int virtnet_set_coalesce(struct net_device *dev,
return ret;
if (update_napi) {
+ /* xsk xmit depends on the tx napi. So if xsk is active,
+ * prevent modifications to tx napi.
+ */
+ for (i = queue_number; i < vi->max_queue_pairs; i++) {
+ if (vi->sq[i].xsk_pool)
+ return -EBUSY;
+ }
+
for (; queue_number < vi->max_queue_pairs; queue_number++)
vi->sq[queue_number].napi.weight = napi_weight;
}
@@ -5156,11 +5589,11 @@ static int virtnet_get_rxfh(struct net_device *dev,
if (rxfh->indir) {
for (i = 0; i < vi->rss_indir_table_size; ++i)
- rxfh->indir[i] = vi->rss.indirection_table[i];
+ rxfh->indir[i] = le16_to_cpu(vi->rss_hdr->indirection_table[i]);
}
if (rxfh->key)
- memcpy(rxfh->key, vi->rss.key, vi->rss_key_size);
+ memcpy(rxfh->key, vi->rss_hash_key_data, vi->rss_key_size);
rxfh->hfunc = ETH_RSS_HASH_TOP;
@@ -5184,7 +5617,7 @@ static int virtnet_set_rxfh(struct net_device *dev,
return -EOPNOTSUPP;
for (i = 0; i < vi->rss_indir_table_size; ++i)
- vi->rss.indirection_table[i] = rxfh->indir[i];
+ vi->rss_hdr->indirection_table[i] = cpu_to_le16(rxfh->indir[i]);
update = true;
}
@@ -5196,7 +5629,7 @@ static int virtnet_set_rxfh(struct net_device *dev,
if (!vi->has_rss && !vi->has_rss_hash_report)
return -EOPNOTSUPP;
- memcpy(vi->rss.key, rxfh->key, vi->rss_key_size);
+ memcpy(vi->rss_hash_key_data, rxfh->key, vi->rss_key_size);
update = true;
}
@@ -5206,41 +5639,11 @@ static int virtnet_set_rxfh(struct net_device *dev,
return 0;
}
-static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
+static u32 virtnet_get_rx_ring_count(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
- int rc = 0;
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = vi->curr_queue_pairs;
- break;
- case ETHTOOL_GRXFH:
- virtnet_get_hashflow(vi, info);
- break;
- default:
- rc = -EOPNOTSUPP;
- }
-
- return rc;
-}
-
-static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
-{
- struct virtnet_info *vi = netdev_priv(dev);
- int rc = 0;
-
- switch (info->cmd) {
- case ETHTOOL_SRXFH:
- if (!virtnet_set_hashflow(vi, info))
- rc = -EINVAL;
-
- break;
- default:
- rc = -EOPNOTSUPP;
- }
-
- return rc;
+ return vi->curr_queue_pairs;
}
static const struct ethtool_ops virtnet_ethtool_ops = {
@@ -5266,8 +5669,9 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
.get_rxfh_indir_size = virtnet_get_rxfh_indir_size,
.get_rxfh = virtnet_get_rxfh,
.set_rxfh = virtnet_set_rxfh,
- .get_rxnfc = virtnet_get_rxnfc,
- .set_rxnfc = virtnet_set_rxnfc,
+ .get_rxfh_fields = virtnet_get_hashflow,
+ .set_rxfh_fields = virtnet_set_hashflow,
+ .get_rx_ring_count = virtnet_get_rx_ring_count,
};
static void virtnet_get_queue_stats_rx(struct net_device *dev, int i,
@@ -5353,6 +5757,10 @@ static void virtnet_get_base_stats(struct net_device *dev,
if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED)
tx->hw_drop_ratelimits = 0;
+
+ netdev_stat_queue_sum(dev,
+ dev->real_num_rx_queues, vi->max_queue_pairs, rx,
+ dev->real_num_tx_queues, vi->max_queue_pairs, tx);
}
static const struct netdev_stat_ops virtnet_stat_ops = {
@@ -5370,11 +5778,15 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
disable_rx_mode_work(vi);
flush_work(&vi->rx_mode_work);
+ if (netif_running(vi->dev)) {
+ rtnl_lock();
+ virtnet_close(vi->dev);
+ rtnl_unlock();
+ }
+
netif_tx_lock_bh(vi->dev);
netif_device_detach(vi->dev);
netif_tx_unlock_bh(vi->dev);
- if (netif_running(vi->dev))
- virtnet_close(vi->dev);
}
static int init_vqs(struct virtnet_info *vi);
@@ -5394,7 +5806,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
enable_rx_mode_work(vi);
if (netif_running(vi->dev)) {
+ rtnl_lock();
err = virtnet_open(vi->dev);
+ rtnl_unlock();
if (err)
return err;
}
@@ -5469,7 +5883,7 @@ static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queu
virtnet_rx_pause(vi, rq);
- err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf);
+ err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf, NULL);
if (err) {
netdev_err(vi->dev, "reset rx fail: rx queue index: %d err: %d\n", qindex, err);
@@ -5488,6 +5902,30 @@ unreg:
return err;
}
+static int virtnet_sq_bind_xsk_pool(struct virtnet_info *vi,
+ struct send_queue *sq,
+ struct xsk_buff_pool *pool)
+{
+ int err, qindex;
+
+ qindex = sq - vi->sq;
+
+ virtnet_tx_pause(vi, sq);
+
+ err = virtqueue_reset(sq->vq, virtnet_sq_free_unused_buf,
+ virtnet_sq_free_unused_buf_done);
+ if (err) {
+ netdev_err(vi->dev, "reset tx fail: tx queue index: %d err: %d\n", qindex, err);
+ pool = NULL;
+ }
+
+ sq->xsk_pool = pool;
+
+ virtnet_tx_resume(vi, sq);
+
+ return err;
+}
+
static int virtnet_xsk_pool_enable(struct net_device *dev,
struct xsk_buff_pool *pool,
u16 qid)
@@ -5496,6 +5934,7 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
struct receive_queue *rq;
struct device *dma_dev;
struct send_queue *sq;
+ dma_addr_t hdr_dma;
int err, size;
if (vi->hdr_len > xsk_pool_get_headroom(pool))
@@ -5533,6 +5972,13 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
if (!rq->xsk_buffs)
return -ENOMEM;
+ hdr_dma = virtqueue_map_single_attrs(sq->vq, &xsk_hdr, vi->hdr_len,
+ DMA_TO_DEVICE, 0);
+ if (virtqueue_map_mapping_error(sq->vq, hdr_dma)) {
+ err = -ENOMEM;
+ goto err_free_buffs;
+ }
+
err = xsk_pool_dma_map(pool, dma_dev, 0);
if (err)
goto err_xsk_map;
@@ -5541,11 +5987,26 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
if (err)
goto err_rq;
+ err = virtnet_sq_bind_xsk_pool(vi, sq, pool);
+ if (err)
+ goto err_sq;
+
+ /* Now, we do not support tx offload(such as tx csum), so all the tx
+ * virtnet hdr is zero. So all the tx packets can share a single hdr.
+ */
+ sq->xsk_hdr_dma_addr = hdr_dma;
+
return 0;
+err_sq:
+ virtnet_rq_bind_xsk_pool(vi, rq, NULL);
err_rq:
xsk_pool_dma_unmap(pool, 0);
err_xsk_map:
+ virtqueue_unmap_single_attrs(rq->vq, hdr_dma, vi->hdr_len,
+ DMA_TO_DEVICE, 0);
+err_free_buffs:
+ kvfree(rq->xsk_buffs);
return err;
}
@@ -5554,19 +6015,24 @@ static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid)
struct virtnet_info *vi = netdev_priv(dev);
struct xsk_buff_pool *pool;
struct receive_queue *rq;
+ struct send_queue *sq;
int err;
if (qid >= vi->curr_queue_pairs)
return -EINVAL;
+ sq = &vi->sq[qid];
rq = &vi->rq[qid];
pool = rq->xsk_pool;
err = virtnet_rq_bind_xsk_pool(vi, rq, NULL);
+ err |= virtnet_sq_bind_xsk_pool(vi, sq, NULL);
xsk_pool_dma_unmap(pool, 0);
+ virtqueue_unmap_single_attrs(sq->vq, sq->xsk_hdr_dma_addr,
+ vi->hdr_len, DMA_TO_DEVICE, 0);
kvfree(rq->xsk_buffs);
return err;
@@ -5633,12 +6099,12 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
if (prog)
bpf_prog_add(prog, vi->max_queue_pairs - 1);
+ virtnet_rx_pause_all(vi);
+
/* Make sure NAPI is not using any XDP TX queues for RX. */
if (netif_running(dev)) {
- for (i = 0; i < vi->max_queue_pairs; i++) {
- napi_disable(&vi->rq[i].napi);
- virtnet_napi_tx_disable(&vi->sq[i].napi);
- }
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ virtnet_napi_tx_disable(&vi->sq[i]);
}
if (!prog) {
@@ -5670,14 +6136,12 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
vi->xdp_enabled = false;
}
+ virtnet_rx_resume_all(vi);
for (i = 0; i < vi->max_queue_pairs; i++) {
if (old_prog)
bpf_prog_put(old_prog);
- if (netif_running(dev)) {
- virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
- virtnet_napi_tx_enable(vi, vi->sq[i].vq,
- &vi->sq[i].napi);
- }
+ if (netif_running(dev))
+ virtnet_napi_tx_enable(&vi->sq[i]);
}
return 0;
@@ -5689,12 +6153,10 @@ err:
rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
}
+ virtnet_rx_resume_all(vi);
if (netif_running(dev)) {
- for (i = 0; i < vi->max_queue_pairs; i++) {
- virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
- virtnet_napi_tx_enable(vi, vi->sq[i].vq,
- &vi->sq[i].napi);
- }
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ virtnet_napi_tx_enable(&vi->sq[i]);
}
if (prog)
bpf_prog_sub(prog, vi->max_queue_pairs - 1);
@@ -5754,9 +6216,9 @@ static int virtnet_set_features(struct net_device *dev,
if ((dev->features ^ features) & NETIF_F_RXHASH) {
if (features & NETIF_F_RXHASH)
- vi->rss.hash_types = vi->rss_hash_types_saved;
+ vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved);
else
- vi->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
+ vi->rss_hdr->hash_types = cpu_to_le32(VIRTIO_NET_HASH_REPORT_NONE);
if (!virtnet_commit_rss_command(vi))
return -EINVAL;
@@ -5916,7 +6378,7 @@ static void free_receive_page_frags(struct virtnet_info *vi)
int i;
for (i = 0; i < vi->max_queue_pairs; i++)
if (vi->rq[i].alloc_frag.page) {
- if (vi->rq[i].do_dma && vi->rq[i].last_dma)
+ if (vi->rq[i].last_dma)
virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
put_page(vi->rq[i].alloc_frag.page);
}
@@ -5924,10 +6386,34 @@ static void free_receive_page_frags(struct virtnet_info *vi)
static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
{
- if (!is_xdp_frame(buf))
+ struct virtnet_info *vi = vq->vdev->priv;
+ struct send_queue *sq;
+ int i = vq2txq(vq);
+
+ sq = &vi->sq[i];
+
+ switch (virtnet_xmit_ptr_unpack(&buf)) {
+ case VIRTNET_XMIT_TYPE_SKB:
+ case VIRTNET_XMIT_TYPE_SKB_ORPHAN:
dev_kfree_skb(buf);
- else
- xdp_return_frame(ptr_to_xdp(buf));
+ break;
+
+ case VIRTNET_XMIT_TYPE_XDP:
+ xdp_return_frame(buf);
+ break;
+
+ case VIRTNET_XMIT_TYPE_XSK:
+ xsk_tx_completed(sq->xsk_pool, 1);
+ break;
+ }
+}
+
+static void virtnet_sq_free_unused_buf_done(struct virtqueue *vq)
+{
+ struct virtnet_info *vi = vq->vdev->priv;
+ int i = vq2txq(vq);
+
+ netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i));
}
static void free_unused_bufs(struct virtnet_info *vi)
@@ -6076,8 +6562,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
INIT_DELAYED_WORK(&vi->refill, refill_work);
for (i = 0; i < vi->max_queue_pairs; i++) {
vi->rq[i].pages = NULL;
- netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll,
- napi_weight);
+ netif_napi_add_config(vi->dev, &vi->rq[i].napi, virtnet_poll,
+ i);
+ vi->rq[i].napi.weight = napi_weight;
netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi,
virtnet_poll_tx,
napi_tx ? napi_weight : 0);
@@ -6288,7 +6775,7 @@ static int virtnet_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash,
hash_report = VIRTIO_NET_HASH_REPORT_NONE;
*rss_type = virtnet_xdp_rss_type[hash_report];
- *hash = __le32_to_cpu(hdr_hash->hash_value);
+ *hash = virtio_net_hash_value(hdr_hash);
return 0;
}
@@ -6352,10 +6839,20 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_USO))
dev->hw_features |= NETIF_F_GSO_UDP_L4;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO)) {
+ dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ dev->hw_enc_features = dev->hw_features;
+ }
+ if (dev->hw_features & NETIF_F_GSO_UDP_TUNNEL &&
+ virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO_CSUM)) {
+ dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ dev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ }
+
dev->features |= NETIF_F_GSO_ROBUST;
if (gso)
- dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
+ dev->features |= dev->hw_features;
/* (!csum && gso) case will be fixed by register_netdev() */
}
@@ -6375,7 +6872,8 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->hw_features |= NETIF_F_GRO_HW;
dev->vlan_features = dev->features;
- dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
/* MTU range: 68 - 65535 */
dev->min_mtu = MIN_MTU;
@@ -6420,10 +6918,21 @@ static int virtnet_probe(struct virtio_device *vdev)
virtio_cread16(vdev, offsetof(struct virtio_net_config,
rss_max_indirection_table_length));
}
+ vi->rss_hdr = devm_kzalloc(&vdev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL);
+ if (!vi->rss_hdr) {
+ err = -ENOMEM;
+ goto free;
+ }
if (vi->has_rss || vi->has_rss_hash_report) {
vi->rss_key_size =
virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
+ if (vi->rss_key_size > VIRTIO_NET_RSS_MAX_KEY_SIZE) {
+ dev_err(&vdev->dev, "rss_max_key_size=%u exceeds the limit %u.\n",
+ vi->rss_key_size, VIRTIO_NET_RSS_MAX_KEY_SIZE);
+ err = -EINVAL;
+ goto free;
+ }
vi->rss_hash_types_supported =
virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types));
@@ -6436,7 +6945,10 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->xdp_metadata_ops = &virtnet_xdp_metadata_ops;
}
- if (vi->has_rss_hash_report)
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO) ||
+ virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO))
+ vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash_tunnel);
+ else if (vi->has_rss_hash_report)
vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash);
else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
@@ -6444,6 +6956,13 @@ static int virtnet_probe(struct virtio_device *vdev)
else
vi->hdr_len = sizeof(struct virtio_net_hdr);
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM))
+ vi->rx_tnl_csum = true;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO))
+ vi->rx_tnl = true;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO))
+ vi->tx_tnl = true;
+
if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
vi->any_header_sg = true;
@@ -6551,6 +7070,15 @@ static int virtnet_probe(struct virtio_device *vdev)
virtio_device_ready(vdev);
+ if (vi->has_rss || vi->has_rss_hash_report) {
+ if (!virtnet_commit_rss_command(vi)) {
+ dev_warn(&vdev->dev, "RSS disabled because committing failed.\n");
+ dev->hw_features &= ~NETIF_F_RXHASH;
+ vi->has_rss_hash_report = false;
+ vi->has_rss = false;
+ }
+ }
+
virtnet_set_queues(vi, vi->curr_queue_pairs);
/* a random MAC address has been assigned, notify the device.
@@ -6602,16 +7130,20 @@ static int virtnet_probe(struct virtio_device *vdev)
otherwise get link status from config. */
netif_carrier_off(dev);
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
- virtnet_config_changed_work(&vi->config_work);
+ virtio_config_changed(vi->vdev);
} else {
vi->status = VIRTIO_NET_S_LINK_UP;
virtnet_update_settings(vi);
netif_carrier_on(dev);
}
- for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
- if (virtio_has_feature(vi->vdev, guest_offloads[i]))
+ for (i = 0; i < ARRAY_SIZE(guest_offloads); i++) {
+ unsigned int fbit;
+
+ fbit = virtio_offload_to_feature(guest_offloads[i]);
+ if (virtio_has_feature(vi->vdev, fbit))
set_bit(guest_offloads[i], &vi->guest_offloads);
+ }
vi->guest_offloads_capable = vi->guest_offloads;
rtnl_unlock();
@@ -6643,11 +7175,20 @@ free:
static void remove_vq_common(struct virtnet_info *vi)
{
+ int i;
+
virtio_reset_device(vi->vdev);
/* Free unused buffers in both send and recv, if any. */
free_unused_bufs(vi);
+ /*
+ * Rule of thumb is netdev_tx_reset_queue() should follow any
+ * skb freeing not followed by netdev_tx_completed_queue()
+ */
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i));
+
free_receive_bufs(vi);
free_receive_page_frags(vi);
@@ -6732,6 +7273,10 @@ static struct virtio_device_id id_table[] = {
static unsigned int features[] = {
VIRTNET_FEATURES,
+ VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO,
+ VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM,
+ VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO,
+ VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO_CSUM,
};
static unsigned int features_legacy[] = {
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 6793fa09f9d1..0572f6a9bdb6 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -27,6 +27,10 @@
#include <linux/module.h>
#include <net/ip6_checksum.h>
+#ifdef CONFIG_X86
+#include <asm/msr.h>
+#endif
+
#include "vmxnet3_int.h"
#include "vmxnet3_xdp.h"
@@ -1568,6 +1572,30 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
return (hlen + (hdr.tcp->doff << 2));
}
+static void
+vmxnet3_lro_tunnel(struct sk_buff *skb, __be16 ip_proto)
+{
+ struct udphdr *uh = NULL;
+
+ if (ip_proto == htons(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ if (iph->protocol == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ } else {
+ struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+
+ if (iph->nexthdr == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ }
+ if (uh) {
+ if (uh->check)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
+ else
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+ }
+}
+
static int
vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
struct vmxnet3_adapter *adapter, int quota)
@@ -1881,6 +1909,8 @@ sop_done:
if (segCnt != 0 && mss != 0) {
skb_shinfo(skb)->gso_type = rcd->v4 ?
SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
+ if (encap_lro)
+ vmxnet3_lro_tunnel(skb, skb->protocol);
skb_shinfo(skb)->gso_size = mss;
skb_shinfo(skb)->gso_segs = segCnt;
} else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) {
@@ -2033,6 +2063,11 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
rq->comp_ring.gen = VMXNET3_INIT_GEN;
rq->comp_ring.next2proc = 0;
+
+ if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
+ page_pool_destroy(rq->page_pool);
+ rq->page_pool = NULL;
}
@@ -2073,11 +2108,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
}
}
- if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
- xdp_rxq_info_unreg(&rq->xdp_rxq);
- page_pool_destroy(rq->page_pool);
- rq->page_pool = NULL;
-
if (rq->data_ring.base) {
dma_free_coherent(&adapter->pdev->dev,
rq->rx_ring[0].size * rq->data_ring.desc_size,
@@ -3607,8 +3637,6 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
int err = 0;
- WRITE_ONCE(netdev->mtu, new_mtu);
-
/*
* Reset_work may be in the middle of resetting the device, wait for its
* completion.
@@ -3622,6 +3650,7 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
/* we need to re-create the rx queue based on the new mtu */
vmxnet3_rq_destroy_all(adapter);
+ WRITE_ONCE(netdev->mtu, new_mtu);
vmxnet3_adjust_rx_ring_size(adapter);
err = vmxnet3_rq_create_all(adapter);
if (err) {
@@ -3638,6 +3667,8 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
"Closing it\n", err);
goto out;
}
+ } else {
+ WRITE_ONCE(netdev->mtu, new_mtu);
}
out:
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 471f91c4204a..a14d0ad978e1 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -833,11 +833,19 @@ out:
}
static int
-vmxnet3_get_rss_hash_opts(struct vmxnet3_adapter *adapter,
- struct ethtool_rxnfc *info)
+vmxnet3_get_rss_hash_opts(struct net_device *netdev,
+ struct ethtool_rxfh_fields *info)
{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
enum Vmxnet3_RSSField rss_fields;
+ if (!VMXNET3_VERSION_GE_4(adapter))
+ return -EOPNOTSUPP;
+#ifdef VMXNET3_RSS
+ if (!adapter->rss)
+ return -EOPNOTSUPP;
+#endif
+
if (netif_running(adapter->netdev)) {
unsigned long flags;
@@ -900,10 +908,20 @@ vmxnet3_get_rss_hash_opts(struct vmxnet3_adapter *adapter,
static int
vmxnet3_set_rss_hash_opt(struct net_device *netdev,
- struct vmxnet3_adapter *adapter,
- struct ethtool_rxnfc *nfc)
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
- enum Vmxnet3_RSSField rss_fields = adapter->rss_fields;
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ enum Vmxnet3_RSSField rss_fields;
+
+ if (!VMXNET3_VERSION_GE_4(adapter))
+ return -EOPNOTSUPP;
+#ifdef VMXNET3_RSS
+ if (!adapter->rss)
+ return -EOPNOTSUPP;
+#endif
+
+ rss_fields = adapter->rss_fields;
/* RSS does not support anything other than hashing
* to queues on src and dst IPs and ports
@@ -1063,66 +1081,11 @@ vmxnet3_set_rss_hash_opt(struct net_device *netdev,
return 0;
}
-static int
-vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
- u32 *rules)
-{
- struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- int err = 0;
-
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = adapter->num_rx_queues;
- break;
- case ETHTOOL_GRXFH:
- if (!VMXNET3_VERSION_GE_4(adapter)) {
- err = -EOPNOTSUPP;
- break;
- }
-#ifdef VMXNET3_RSS
- if (!adapter->rss) {
- err = -EOPNOTSUPP;
- break;
- }
-#endif
- err = vmxnet3_get_rss_hash_opts(adapter, info);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
-static int
-vmxnet3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
+static u32 vmxnet3_get_rx_ring_count(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- int err = 0;
- if (!VMXNET3_VERSION_GE_4(adapter)) {
- err = -EOPNOTSUPP;
- goto done;
- }
-#ifdef VMXNET3_RSS
- if (!adapter->rss) {
- err = -EOPNOTSUPP;
- goto done;
- }
-#endif
-
- switch (info->cmd) {
- case ETHTOOL_SRXFH:
- err = vmxnet3_set_rss_hash_opt(netdev, adapter, info);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
-done:
- return err;
+ return adapter->num_rx_queues;
}
#ifdef VMXNET3_RSS
@@ -1360,13 +1323,14 @@ static const struct ethtool_ops vmxnet3_ethtool_ops = {
.get_ethtool_stats = vmxnet3_get_ethtool_stats,
.get_ringparam = vmxnet3_get_ringparam,
.set_ringparam = vmxnet3_set_ringparam,
- .get_rxnfc = vmxnet3_get_rxnfc,
- .set_rxnfc = vmxnet3_set_rxnfc,
+ .get_rx_ring_count = vmxnet3_get_rx_ring_count,
#ifdef VMXNET3_RSS
.get_rxfh_indir_size = vmxnet3_get_rss_indir_size,
.get_rxfh = vmxnet3_get_rss,
.set_rxfh = vmxnet3_set_rss,
#endif
+ .get_rxfh_fields = vmxnet3_get_rss_hash_opts,
+ .set_rxfh_fields = vmxnet3_set_rss_hash_opt,
.get_link_ksettings = vmxnet3_get_link_ksettings,
.get_channels = vmxnet3_get_channels,
};
diff --git a/drivers/net/vmxnet3/vmxnet3_xdp.c b/drivers/net/vmxnet3/vmxnet3_xdp.c
index 1341374a4588..5f470499e600 100644
--- a/drivers/net/vmxnet3/vmxnet3_xdp.c
+++ b/drivers/net/vmxnet3/vmxnet3_xdp.c
@@ -28,7 +28,7 @@ vmxnet3_xdp_get_tq(struct vmxnet3_adapter *adapter)
if (likely(cpu < tq_number))
tq = &adapter->tx_queue[cpu];
else
- tq = &adapter->tx_queue[reciprocal_scale(cpu, tq_number)];
+ tq = &adapter->tx_queue[cpu % tq_number];
return tq;
}
@@ -124,6 +124,7 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
u32 buf_size;
u32 dw2;
+ spin_lock_irq(&tq->tx_lock);
dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
dw2 |= xdpf->len;
ctx.sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
@@ -134,6 +135,7 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
if (vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) == 0) {
tq->stats.tx_ring_full++;
+ spin_unlock_irq(&tq->tx_lock);
return -ENOSPC;
}
@@ -142,8 +144,10 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
xdpf->data, buf_size,
DMA_TO_DEVICE);
- if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
+ if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) {
+ spin_unlock_irq(&tq->tx_lock);
return -EFAULT;
+ }
tbi->map_type |= VMXNET3_MAP_SINGLE;
} else { /* XDP buffer from page pool */
page = virt_to_page(xdpf->data);
@@ -182,6 +186,7 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
dma_wmb();
gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
VMXNET3_TXD_GEN);
+ spin_unlock_irq(&tq->tx_lock);
/* No need to handle the case when tx_num_deferred doesn't reach
* threshold. Backend driver at hypervisor side will poll and reset
@@ -225,6 +230,7 @@ vmxnet3_xdp_xmit(struct net_device *dev,
{
struct vmxnet3_adapter *adapter = netdev_priv(dev);
struct vmxnet3_tx_queue *tq;
+ struct netdev_queue *nq;
int i;
if (unlikely(test_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)))
@@ -236,6 +242,9 @@ vmxnet3_xdp_xmit(struct net_device *dev,
if (tq->stopped)
return -ENETDOWN;
+ nq = netdev_get_tx_queue(adapter->netdev, tq->qid);
+
+ __netif_tx_lock(nq, smp_processor_id());
for (i = 0; i < n; i++) {
if (vmxnet3_xdp_xmit_frame(adapter, frames[i], tq, true)) {
tq->stats.xdp_xmit_err++;
@@ -243,6 +252,7 @@ vmxnet3_xdp_xmit(struct net_device *dev,
}
}
tq->stats.xdp_xmit += i;
+ __netif_tx_unlock(nq);
return i;
}
@@ -387,7 +397,7 @@ vmxnet3_process_xdp(struct vmxnet3_adapter *adapter,
xdp_init_buff(&xdp, PAGE_SIZE, &rq->xdp_rxq);
xdp_prepare_buff(&xdp, page_address(page), rq->page_pool->p.offset,
- rbi->len, false);
+ rcd->len, false);
xdp_buff_clear_frags_flag(&xdp);
xdp_prog = rcu_dereference(rq->adapter->xdp_bpf_prog);
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 67d25f4f94ef..571847a7f86d 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -26,6 +26,7 @@
#include <linux/inetdevice.h>
#include <net/arp.h>
+#include <net/flow.h>
#include <net/ip.h>
#include <net/ip_fib.h>
#include <net/ip6_fib.h>
@@ -34,10 +35,10 @@
#include <net/addrconf.h>
#include <net/l3mdev.h>
#include <net/fib_rules.h>
+#include <net/netdev_lock.h>
#include <net/sch_generic.h>
#include <net/netns/generic.h>
#include <net/netfilter/nf_conntrack.h>
-#include <net/inet_dscp.h>
#define DRV_NAME "vrf"
#define DRV_VERSION "1.1"
@@ -122,16 +123,6 @@ struct net_vrf {
int ifindex;
};
-static void vrf_rx_stats(struct net_device *dev, int len)
-{
- struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
-
- u64_stats_update_begin(&dstats->syncp);
- u64_stats_inc(&dstats->rx_packets);
- u64_stats_add(&dstats->rx_bytes, len);
- u64_stats_update_end(&dstats->syncp);
-}
-
static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
{
vrf_dev->stats.tx_errors++;
@@ -352,15 +343,13 @@ unlock:
static bool qdisc_tx_is_default(const struct net_device *dev)
{
struct netdev_queue *txq;
- struct Qdisc *qdisc;
if (dev->num_tx_queues > 1)
return false;
txq = netdev_get_tx_queue(dev, 0);
- qdisc = rcu_access_pointer(txq->qdisc);
- return !qdisc->enqueue;
+ return qdisc_txq_has_no_queue(txq);
}
/* Local traffic destined to local address. Reinsert the packet to rx
@@ -369,7 +358,7 @@ static bool qdisc_tx_is_default(const struct net_device *dev)
static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
struct dst_entry *dst)
{
- int len = skb->len;
+ unsigned int len = skb->len;
skb_orphan(skb);
@@ -382,15 +371,10 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
skb->protocol = eth_type_trans(skb, dev);
- if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) {
- vrf_rx_stats(dev, len);
- } else {
- struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
-
- u64_stats_update_begin(&dstats->syncp);
- u64_stats_inc(&dstats->rx_drops);
- u64_stats_update_end(&dstats->syncp);
- }
+ if (likely(__netif_rx(skb) == NET_RX_SUCCESS))
+ dev_dstats_rx_add(dev, len);
+ else
+ dev_dstats_rx_dropped(dev);
return NETDEV_TX_OK;
}
@@ -521,7 +505,7 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
/* needed to match OIF rule */
fl4.flowi4_l3mdev = vrf_dev->ifindex;
fl4.flowi4_iif = LOOPBACK_IFINDEX;
- fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h));
+ fl4.flowi4_dscp = ip4h_dscp(ip4h);
fl4.flowi4_flags = FLOWI_FLAG_ANYSRC;
fl4.flowi4_proto = ip4h->protocol;
fl4.daddr = ip4h->daddr;
@@ -578,20 +562,14 @@ static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+ unsigned int len = skb->len;
+ netdev_tx_t ret;
- int len = skb->len;
- netdev_tx_t ret = is_ip_tx_frame(skb, dev);
-
- u64_stats_update_begin(&dstats->syncp);
- if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
-
- u64_stats_inc(&dstats->tx_packets);
- u64_stats_add(&dstats->tx_bytes, len);
- } else {
- u64_stats_inc(&dstats->tx_drops);
- }
- u64_stats_update_end(&dstats->syncp);
+ ret = is_ip_tx_frame(skb, dev);
+ if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN))
+ dev_dstats_tx_add(dev, len);
+ else
+ dev_dstats_tx_dropped(dev);
return ret;
}
@@ -1324,6 +1302,8 @@ static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev,
struct net *net = dev_net(vrf_dev);
struct rt6_info *rt6;
+ skb_dst_drop(skb);
+
rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex, skb,
RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE);
if (unlikely(!rt6))
@@ -1364,7 +1344,7 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
if (!is_ndisc) {
struct net_device *orig_dev = skb->dev;
- vrf_rx_stats(vrf_dev, skb->len);
+ dev_dstats_rx_add(vrf_dev, skb->len);
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
@@ -1420,7 +1400,7 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
goto out;
}
- vrf_rx_stats(vrf_dev, skb->len);
+ dev_dstats_rx_add(vrf_dev, skb->len);
if (!list_empty(&vrf_dev->ptype_all)) {
int err;
@@ -1558,14 +1538,12 @@ static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
nlmsg_end(skb, nlh);
- /* fib_nl_{new,del}rule handling looks for net from skb->sk */
- skb->sk = dev_net(dev)->rtnl;
if (add_it) {
- err = fib_nl_newrule(skb, nlh, NULL);
+ err = fib_newrule(dev_net(dev), skb, nlh, NULL, true);
if (err == -EEXIST)
err = 0;
} else {
- err = fib_nl_delrule(skb, nlh, NULL);
+ err = fib_delrule(dev_net(dev), skb, nlh, NULL, true);
if (err == -ENOENT)
err = 0;
}
@@ -1640,7 +1618,7 @@ static void vrf_setup(struct net_device *dev)
dev->lltx = true;
/* don't allow vrf devices to change network namespaces. */
- dev->netns_local = true;
+ dev->netns_immutable = true;
/* does not make sense for a VLAN to be added to a vrf device */
dev->features |= NETIF_F_VLAN_CHALLENGED;
@@ -1698,11 +1676,12 @@ static void vrf_dellink(struct net_device *dev, struct list_head *head)
unregister_netdevice_queue(dev, head);
}
-static int vrf_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int vrf_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
struct net_vrf *vrf = netdev_priv(dev);
+ struct nlattr **data = params->data;
struct netns_vrf *nn_vrf;
bool *add_fib_rules;
struct net *net;
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 841b59d1c1c2..e957aa12a8a4 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -15,6 +15,7 @@
#include <linux/igmp.h>
#include <linux/if_ether.h>
#include <linux/ethtool.h>
+#include <linux/rhashtable.h>
#include <net/arp.h>
#include <net/ndisc.h>
#include <net/gro.h>
@@ -25,6 +26,7 @@
#include <net/inet_ecn.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <net/netdev_lock.h>
#include <net/tun_proto.h>
#include <net/vxlan.h>
#include <net/nexthop.h>
@@ -62,8 +64,12 @@ static int vxlan_sock_add(struct vxlan_dev *vxlan);
static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
-/* salt for hash table */
-static u32 vxlan_salt __read_mostly;
+static const struct rhashtable_params vxlan_fdb_rht_params = {
+ .head_offset = offsetof(struct vxlan_fdb, rhnode),
+ .key_offset = offsetof(struct vxlan_fdb, key),
+ .key_len = sizeof(struct vxlan_fdb_key),
+ .automatic_shrinking = true,
+};
static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
{
@@ -185,7 +191,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
} else if (nh) {
ndm->ndm_family = nh_family;
}
- send_eth = !is_zero_ether_addr(fdb->eth_addr);
+ send_eth = !is_zero_ether_addr(fdb->key.eth_addr);
} else
ndm->ndm_family = AF_BRIDGE;
ndm->ndm_state = fdb->state;
@@ -200,7 +206,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
peernet2id(dev_net(vxlan->dev), vxlan->net)))
goto nla_put_failure;
- if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
+ if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.eth_addr))
goto nla_put_failure;
if (nh) {
if (nla_put_u32(skb, NDA_NH_ID, nh_id))
@@ -222,14 +228,14 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
goto nla_put_failure;
}
- if ((vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) && fdb->vni &&
+ if ((vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) && fdb->key.vni &&
nla_put_u32(skb, NDA_SRC_VNI,
- be32_to_cpu(fdb->vni)))
+ be32_to_cpu(fdb->key.vni)))
goto nla_put_failure;
- ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
+ ci.ndm_used = jiffies_to_clock_t(now - READ_ONCE(fdb->used));
ci.ndm_confirmed = 0;
- ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
+ ci.ndm_updated = jiffies_to_clock_t(now - READ_ONCE(fdb->updated));
ci.ndm_refcnt = 0;
if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
@@ -292,8 +298,8 @@ static void vxlan_fdb_switchdev_notifier_info(const struct vxlan_dev *vxlan,
fdb_info->remote_port = rd->remote_port;
fdb_info->remote_vni = rd->remote_vni;
fdb_info->remote_ifindex = rd->remote_ifindex;
- memcpy(fdb_info->eth_addr, fdb->eth_addr, ETH_ALEN);
- fdb_info->vni = fdb->vni;
+ memcpy(fdb_info->eth_addr, fdb->key.eth_addr, ETH_ALEN);
+ fdb_info->vni = fdb->key.vni;
fdb_info->offloaded = rd->offloaded;
fdb_info->added_by_user = fdb->flags & NTF_VXLAN_ADDED_BY_USER;
}
@@ -365,67 +371,42 @@ static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
};
struct vxlan_rdst remote = { };
- memcpy(f.eth_addr, eth_addr, ETH_ALEN);
+ memcpy(f.key.eth_addr, eth_addr, ETH_ALEN);
vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true, NULL);
}
-/* Hash Ethernet address */
-static u32 eth_hash(const unsigned char *addr)
-{
- u64 value = get_unaligned((u64 *)addr);
-
- /* only want 6 bytes */
-#ifdef __BIG_ENDIAN
- value >>= 16;
-#else
- value <<= 16;
-#endif
- return hash_64(value, FDB_HASH_BITS);
-}
-
-u32 eth_vni_hash(const unsigned char *addr, __be32 vni)
+/* Look up Ethernet address in forwarding table */
+static struct vxlan_fdb *vxlan_find_mac_rcu(struct vxlan_dev *vxlan,
+ const u8 *mac, __be32 vni)
{
- /* use 1 byte of OUI and 3 bytes of NIC */
- u32 key = get_unaligned((u32 *)(addr + 2));
-
- return jhash_2words(key, vni, vxlan_salt) & (FDB_HASH_SIZE - 1);
-}
+ struct vxlan_fdb_key key;
-u32 fdb_head_index(struct vxlan_dev *vxlan, const u8 *mac, __be32 vni)
-{
- if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)
- return eth_vni_hash(mac, vni);
+ memset(&key, 0, sizeof(key));
+ memcpy(key.eth_addr, mac, sizeof(key.eth_addr));
+ if (!(vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA))
+ key.vni = vxlan->default_dst.remote_vni;
else
- return eth_hash(mac);
-}
+ key.vni = vni;
-/* Hash chain to use given mac address */
-static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
- const u8 *mac, __be32 vni)
-{
- return &vxlan->fdb_head[fdb_head_index(vxlan, mac, vni)];
+ return rhashtable_lookup(&vxlan->fdb_hash_tbl, &key,
+ vxlan_fdb_rht_params);
}
-/* Look up Ethernet address in forwarding table */
-static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
- const u8 *mac, __be32 vni)
+static struct vxlan_fdb *vxlan_find_mac_tx(struct vxlan_dev *vxlan,
+ const u8 *mac, __be32 vni)
{
- struct hlist_head *head = vxlan_fdb_head(vxlan, mac, vni);
struct vxlan_fdb *f;
- hlist_for_each_entry_rcu(f, head, hlist) {
- if (ether_addr_equal(mac, f->eth_addr)) {
- if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) {
- if (vni == f->vni)
- return f;
- } else {
- return f;
- }
- }
+ f = vxlan_find_mac_rcu(vxlan, mac, vni);
+ if (f) {
+ unsigned long now = jiffies;
+
+ if (READ_ONCE(f->used) != now)
+ WRITE_ONCE(f->used, now);
}
- return NULL;
+ return f;
}
static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
@@ -433,9 +414,11 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
{
struct vxlan_fdb *f;
- f = __vxlan_find_mac(vxlan, mac, vni);
- if (f && f->used != jiffies)
- f->used = jiffies;
+ lockdep_assert_held_once(&vxlan->hash_lock);
+
+ rcu_read_lock();
+ f = vxlan_find_mac_rcu(vxlan, mac, vni);
+ rcu_read_unlock();
return f;
}
@@ -463,7 +446,7 @@ int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
{
struct vxlan_dev *vxlan = netdev_priv(dev);
u8 eth_addr[ETH_ALEN + 2] = { 0 };
- struct vxlan_rdst *rdst;
+ struct vxlan_rdst *rdst = NULL;
struct vxlan_fdb *f;
int rc = 0;
@@ -475,13 +458,14 @@ int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
rcu_read_lock();
- f = __vxlan_find_mac(vxlan, eth_addr, vni);
- if (!f) {
+ f = vxlan_find_mac_rcu(vxlan, eth_addr, vni);
+ if (f)
+ rdst = first_remote_rcu(f);
+ if (!rdst) {
rc = -ENOENT;
goto out;
}
- rdst = first_remote_rcu(f);
vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, NULL, fdb_info);
out:
@@ -512,32 +496,28 @@ int vxlan_fdb_replay(const struct net_device *dev, __be32 vni,
struct vxlan_dev *vxlan;
struct vxlan_rdst *rdst;
struct vxlan_fdb *f;
- unsigned int h;
int rc = 0;
if (!netif_is_vxlan(dev))
return -EINVAL;
vxlan = netdev_priv(dev);
- for (h = 0; h < FDB_HASH_SIZE; ++h) {
- spin_lock_bh(&vxlan->hash_lock[h]);
- hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist) {
- if (f->vni == vni) {
- list_for_each_entry(rdst, &f->remotes, list) {
- rc = vxlan_fdb_notify_one(nb, vxlan,
- f, rdst,
- extack);
- if (rc)
- goto unlock;
- }
+ spin_lock_bh(&vxlan->hash_lock);
+ hlist_for_each_entry(f, &vxlan->fdb_list, fdb_node) {
+ if (f->key.vni == vni) {
+ list_for_each_entry(rdst, &f->remotes, list) {
+ rc = vxlan_fdb_notify_one(nb, vxlan, f, rdst,
+ extack);
+ if (rc)
+ goto unlock;
}
}
- spin_unlock_bh(&vxlan->hash_lock[h]);
}
+ spin_unlock_bh(&vxlan->hash_lock);
return 0;
unlock:
- spin_unlock_bh(&vxlan->hash_lock[h]);
+ spin_unlock_bh(&vxlan->hash_lock);
return rc;
}
EXPORT_SYMBOL_GPL(vxlan_fdb_replay);
@@ -547,20 +527,19 @@ void vxlan_fdb_clear_offload(const struct net_device *dev, __be32 vni)
struct vxlan_dev *vxlan;
struct vxlan_rdst *rdst;
struct vxlan_fdb *f;
- unsigned int h;
if (!netif_is_vxlan(dev))
return;
vxlan = netdev_priv(dev);
- for (h = 0; h < FDB_HASH_SIZE; ++h) {
- spin_lock_bh(&vxlan->hash_lock[h]);
- hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist)
- if (f->vni == vni)
- list_for_each_entry(rdst, &f->remotes, list)
- rdst->offloaded = false;
- spin_unlock_bh(&vxlan->hash_lock[h]);
+ spin_lock_bh(&vxlan->hash_lock);
+ hlist_for_each_entry(f, &vxlan->fdb_list, fdb_node) {
+ if (f->key.vni == vni) {
+ list_for_each_entry(rdst, &f->remotes, list)
+ rdst->offloaded = false;
+ }
}
+ spin_unlock_bh(&vxlan->hash_lock);
}
EXPORT_SYMBOL_GPL(vxlan_fdb_clear_offload);
@@ -605,10 +584,10 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
if (rd == NULL)
return -ENOMEM;
- if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
- kfree(rd);
- return -ENOMEM;
- }
+ /* The driver can work correctly without a dst cache, so do not treat
+ * dst cache initialization errors as fatal.
+ */
+ dst_cache_init(&rd->dst_cache, GFP_ATOMIC | __GFP_NOWARN);
rd->remote_ip = *ip;
rd->remote_port = port;
@@ -622,9 +601,9 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
return 1;
}
-static bool vxlan_parse_gpe_proto(struct vxlanhdr *hdr, __be16 *protocol)
+static bool vxlan_parse_gpe_proto(const struct vxlanhdr *hdr, __be16 *protocol)
{
- struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)hdr;
+ const struct vxlanhdr_gpe *gpe = (const struct vxlanhdr_gpe *)hdr;
/* Need to have Next Protocol set for interfaces in GPE mode. */
if (!gpe->np_applied)
@@ -798,27 +777,20 @@ static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan, const u8 *mac,
f = kmalloc(sizeof(*f), GFP_ATOMIC);
if (!f)
return NULL;
+ memset(&f->key, 0, sizeof(f->key));
f->state = state;
f->flags = ndm_flags;
f->updated = f->used = jiffies;
- f->vni = src_vni;
+ f->key.vni = src_vni;
f->nh = NULL;
RCU_INIT_POINTER(f->vdev, vxlan);
INIT_LIST_HEAD(&f->nh_list);
INIT_LIST_HEAD(&f->remotes);
- memcpy(f->eth_addr, mac, ETH_ALEN);
+ memcpy(f->key.eth_addr, mac, ETH_ALEN);
return f;
}
-static void vxlan_fdb_insert(struct vxlan_dev *vxlan, const u8 *mac,
- __be32 src_vni, struct vxlan_fdb *f)
-{
- ++vxlan->addrcnt;
- hlist_add_head_rcu(&f->hlist,
- vxlan_fdb_head(vxlan, mac, src_vni));
-}
-
static int vxlan_fdb_nh_update(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
u32 nhid, struct netlink_ext_ack *extack)
{
@@ -908,10 +880,27 @@ int vxlan_fdb_create(struct vxlan_dev *vxlan,
if (rc < 0)
goto errout;
+ rc = rhashtable_lookup_insert_fast(&vxlan->fdb_hash_tbl, &f->rhnode,
+ vxlan_fdb_rht_params);
+ if (rc)
+ goto destroy_remote;
+
+ ++vxlan->addrcnt;
+ hlist_add_head_rcu(&f->fdb_node, &vxlan->fdb_list);
+
*fdb = f;
return 0;
+destroy_remote:
+ if (rcu_access_pointer(f->nh)) {
+ list_del_rcu(&f->nh_list);
+ nexthop_put(rtnl_dereference(f->nh));
+ } else {
+ list_del(&rd->list);
+ dst_cache_destroy(&rd->dst_cache);
+ kfree(rd);
+ }
errout:
kfree(f);
return rc;
@@ -948,7 +937,7 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
{
struct vxlan_rdst *rd;
- netdev_dbg(vxlan->dev, "delete %pM\n", f->eth_addr);
+ netdev_dbg(vxlan->dev, "delete %pM\n", f->key.eth_addr);
--vxlan->addrcnt;
if (do_notify) {
@@ -961,7 +950,9 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
swdev_notify, NULL);
}
- hlist_del_rcu(&f->hlist);
+ hlist_del_init_rcu(&f->fdb_node);
+ rhashtable_remove_fast(&vxlan->fdb_hash_tbl, &f->rhnode,
+ vxlan_fdb_rht_params);
list_del_rcu(&f->nh_list);
call_rcu(&f->rcu, vxlan_fdb_free);
}
@@ -1009,20 +1000,18 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan,
!(f->flags & NTF_VXLAN_ADDED_BY_USER)) {
if (f->state != state) {
f->state = state;
- f->updated = jiffies;
notify = 1;
}
if (f->flags != fdb_flags) {
f->flags = fdb_flags;
- f->updated = jiffies;
notify = 1;
}
}
if ((flags & NLM_F_REPLACE)) {
/* Only change unicasts */
- if (!(is_multicast_ether_addr(f->eth_addr) ||
- is_zero_ether_addr(f->eth_addr))) {
+ if (!(is_multicast_ether_addr(f->key.eth_addr) ||
+ is_zero_ether_addr(f->key.eth_addr))) {
if (nhid) {
rc = vxlan_fdb_nh_update(vxlan, f, nhid, extack);
if (rc < 0)
@@ -1038,8 +1027,8 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan,
}
}
if ((flags & NLM_F_APPEND) &&
- (is_multicast_ether_addr(f->eth_addr) ||
- is_zero_ether_addr(f->eth_addr))) {
+ (is_multicast_ether_addr(f->key.eth_addr) ||
+ is_zero_ether_addr(f->key.eth_addr))) {
rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
if (rc < 0)
@@ -1048,12 +1037,13 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan,
}
if (ndm_flags & NTF_USE)
- f->used = jiffies;
+ WRITE_ONCE(f->updated, jiffies);
if (notify) {
if (rd == NULL)
rd = first_remote_rtnl(f);
+ WRITE_ONCE(f->updated, jiffies);
err = vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH,
swdev_notify, extack);
if (err)
@@ -1097,7 +1087,6 @@ static int vxlan_fdb_update_create(struct vxlan_dev *vxlan,
if (rc < 0)
return rc;
- vxlan_fdb_insert(vxlan, mac, src_vni, f);
rc = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH,
swdev_notify, extack);
if (rc)
@@ -1121,7 +1110,7 @@ int vxlan_fdb_update(struct vxlan_dev *vxlan,
{
struct vxlan_fdb *f;
- f = __vxlan_find_mac(vxlan, mac, src_vni);
+ f = vxlan_find_mac(vxlan, mac, src_vni);
if (f) {
if (flags & NLM_F_EXCL) {
netdev_dbg(vxlan->dev,
@@ -1232,10 +1221,7 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
*ifindex = 0;
}
- if (tb[NDA_NH_ID])
- *nhid = nla_get_u32(tb[NDA_NH_ID]);
- else
- *nhid = 0;
+ *nhid = nla_get_u32_default(tb[NDA_NH_ID], 0);
return 0;
}
@@ -1244,7 +1230,7 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid, u16 flags,
- struct netlink_ext_ack *extack)
+ bool *notified, struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
/* struct net *net = dev_net(vxlan->dev); */
@@ -1252,7 +1238,6 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
__be16 port;
__be32 src_vni, vni;
u32 ifindex, nhid;
- u32 hash_index;
int err;
if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
@@ -1272,13 +1257,15 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
return -EAFNOSUPPORT;
- hash_index = fdb_head_index(vxlan, addr, src_vni);
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ spin_lock_bh(&vxlan->hash_lock);
err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags,
port, src_vni, vni, ifindex,
ndm->ndm_flags | NTF_VXLAN_ADDED_BY_USER,
nhid, true, extack);
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
+
+ if (!err)
+ *notified = true;
return err;
}
@@ -1319,14 +1306,13 @@ out:
/* Delete entry (via netlink) */
static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr, u16 vid,
+ const unsigned char *addr, u16 vid, bool *notified,
struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
union vxlan_addr ip;
__be32 src_vni, vni;
u32 ifindex, nhid;
- u32 hash_index;
__be16 port;
int err;
@@ -1335,11 +1321,13 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
if (err)
return err;
- hash_index = fdb_head_index(vxlan, addr, src_vni);
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ spin_lock_bh(&vxlan->hash_lock);
err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex,
true);
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
+
+ if (!err)
+ *notified = true;
return err;
}
@@ -1349,53 +1337,48 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev,
struct net_device *filter_dev, int *idx)
{
+ struct ndo_fdb_dump_context *ctx = (void *)cb->ctx;
struct vxlan_dev *vxlan = netdev_priv(dev);
- unsigned int h;
+ struct vxlan_fdb *f;
int err = 0;
- for (h = 0; h < FDB_HASH_SIZE; ++h) {
- struct vxlan_fdb *f;
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
- struct vxlan_rdst *rd;
-
- if (rcu_access_pointer(f->nh)) {
- if (*idx < cb->args[2])
- goto skip_nh;
- err = vxlan_fdb_info(skb, vxlan, f,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWNEIGH,
- NLM_F_MULTI, NULL);
- if (err < 0) {
- rcu_read_unlock();
- goto out;
- }
-skip_nh:
- *idx += 1;
- continue;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(f, &vxlan->fdb_list, fdb_node) {
+ struct vxlan_rdst *rd;
+
+ if (rcu_access_pointer(f->nh)) {
+ if (*idx < ctx->fdb_idx)
+ goto skip_nh;
+ err = vxlan_fdb_info(skb, vxlan, f,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWNEIGH, NLM_F_MULTI, NULL);
+ if (err < 0) {
+ rcu_read_unlock();
+ goto out;
}
+skip_nh:
+ *idx += 1;
+ continue;
+ }
- list_for_each_entry_rcu(rd, &f->remotes, list) {
- if (*idx < cb->args[2])
- goto skip;
-
- err = vxlan_fdb_info(skb, vxlan, f,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWNEIGH,
- NLM_F_MULTI, rd);
- if (err < 0) {
- rcu_read_unlock();
- goto out;
- }
-skip:
- *idx += 1;
+ list_for_each_entry_rcu(rd, &f->remotes, list) {
+ if (*idx < ctx->fdb_idx)
+ goto skip;
+
+ err = vxlan_fdb_info(skb, vxlan, f,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWNEIGH, NLM_F_MULTI, rd);
+ if (err < 0) {
+ rcu_read_unlock();
+ goto out;
}
+skip:
+ *idx += 1;
}
- rcu_read_unlock();
}
+ rcu_read_unlock();
out:
return err;
}
@@ -1419,7 +1402,7 @@ static int vxlan_fdb_get(struct sk_buff *skb,
rcu_read_lock();
- f = __vxlan_find_mac(vxlan, addr, vni);
+ f = vxlan_find_mac_rcu(vxlan, addr, vni);
if (!f) {
NL_SET_ERR_MSG(extack, "Fdb entry not found");
err = -ENOENT;
@@ -1455,9 +1438,17 @@ static enum skb_drop_reason vxlan_snoop(struct net_device *dev,
ifindex = src_ifindex;
#endif
- f = vxlan_find_mac(vxlan, src_mac, vni);
+ f = vxlan_find_mac_rcu(vxlan, src_mac, vni);
if (likely(f)) {
struct vxlan_rdst *rdst = first_remote_rcu(f);
+ unsigned long now = jiffies;
+
+ if (READ_ONCE(f->updated) != now)
+ WRITE_ONCE(f->updated, now);
+
+ /* Don't override an fdb with nexthop with a learnt entry */
+ if (rcu_access_pointer(f->nh))
+ return SKB_DROP_REASON_VXLAN_ENTRY_EXISTS;
if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip) &&
rdst->remote_ifindex == ifindex))
@@ -1467,23 +1458,16 @@ static enum skb_drop_reason vxlan_snoop(struct net_device *dev,
if (f->state & (NUD_PERMANENT | NUD_NOARP))
return SKB_DROP_REASON_VXLAN_ENTRY_EXISTS;
- /* Don't override an fdb with nexthop with a learnt entry */
- if (rcu_access_pointer(f->nh))
- return SKB_DROP_REASON_VXLAN_ENTRY_EXISTS;
-
if (net_ratelimit())
netdev_info(dev,
"%pM migrated from %pIS to %pIS\n",
src_mac, &rdst->remote_ip.sa, &src_ip->sa);
rdst->remote_ip = *src_ip;
- f->updated = jiffies;
vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true, NULL);
} else {
- u32 hash_index = fdb_head_index(vxlan, src_mac, vni);
-
/* learned new entry */
- spin_lock(&vxlan->hash_lock[hash_index]);
+ spin_lock(&vxlan->hash_lock);
/* close off race between vxlan_flush and incoming packets */
if (netif_running(dev))
@@ -1494,7 +1478,7 @@ static enum skb_drop_reason vxlan_snoop(struct net_device *dev,
vni,
vxlan->default_dst.remote_vni,
ifindex, NTF_SELF, 0, true, NULL);
- spin_unlock(&vxlan->hash_lock[hash_index]);
+ spin_unlock(&vxlan->hash_lock);
}
return SKB_NOT_DROPPED_YET;
@@ -1502,21 +1486,18 @@ static enum skb_drop_reason vxlan_snoop(struct net_device *dev,
static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
{
- struct vxlan_net *vn;
+ ASSERT_RTNL();
if (!vs)
return false;
if (!refcount_dec_and_test(&vs->refcnt))
return false;
- vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
- spin_lock(&vn->sock_lock);
hlist_del_rcu(&vs->hlist);
udp_tunnel_notify_del_rx_port(vs->sock,
(vs->flags & VXLAN_F_GPE) ?
UDP_TUNNEL_TYPE_VXLAN_GPE :
UDP_TUNNEL_TYPE_VXLAN);
- spin_unlock(&vn->sock_lock);
return true;
}
@@ -1551,18 +1532,17 @@ static void vxlan_sock_release(struct vxlan_dev *vxlan)
#endif
}
-static enum skb_drop_reason vxlan_remcsum(struct vxlanhdr *unparsed,
- struct sk_buff *skb,
- u32 vxflags)
+static enum skb_drop_reason vxlan_remcsum(struct sk_buff *skb, u32 vxflags)
{
+ const struct vxlanhdr *vh = vxlan_hdr(skb);
enum skb_drop_reason reason;
size_t start, offset;
- if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload)
- goto out;
+ if (!(vh->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload)
+ return SKB_NOT_DROPPED_YET;
- start = vxlan_rco_start(unparsed->vx_vni);
- offset = start + vxlan_rco_offset(unparsed->vx_vni);
+ start = vxlan_rco_start(vh->vx_vni);
+ offset = start + vxlan_rco_offset(vh->vx_vni);
reason = pskb_may_pull_reason(skb, offset + sizeof(u16));
if (reason)
@@ -1570,22 +1550,20 @@ static enum skb_drop_reason vxlan_remcsum(struct vxlanhdr *unparsed,
skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset,
!!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL));
-out:
- unparsed->vx_flags &= ~VXLAN_HF_RCO;
- unparsed->vx_vni &= VXLAN_VNI_MASK;
-
return SKB_NOT_DROPPED_YET;
}
-static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
- struct sk_buff *skb, u32 vxflags,
+static void vxlan_parse_gbp_hdr(struct sk_buff *skb, u32 vxflags,
struct vxlan_metadata *md)
{
- struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed;
+ const struct vxlanhdr *vh = vxlan_hdr(skb);
+ const struct vxlanhdr_gbp *gbp;
struct metadata_dst *tun_dst;
- if (!(unparsed->vx_flags & VXLAN_HF_GBP))
- goto out;
+ gbp = (const struct vxlanhdr_gbp *)vh;
+
+ if (!(vh->vx_flags & VXLAN_HF_GBP))
+ return;
md->gbp = ntohs(gbp->policy_id);
@@ -1604,8 +1582,6 @@ static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
/* In flow-based mode, GBP is carried in dst_metadata */
if (!(vxflags & VXLAN_F_COLLECT_METADATA))
skb->mark = md->gbp;
-out:
- unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
}
static enum skb_drop_reason vxlan_set_mac(struct vxlan_dev *vxlan,
@@ -1665,13 +1641,12 @@ static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph,
return err <= 1;
}
-/* Callback from net/ipv4/udp.c to receive packets */
static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
{
struct vxlan_vni_node *vninode = NULL;
+ const struct vxlanhdr *vh;
struct vxlan_dev *vxlan;
struct vxlan_sock *vs;
- struct vxlanhdr unparsed;
struct vxlan_metadata _md;
struct vxlan_metadata *md = &_md;
__be16 protocol = htons(ETH_P_TEB);
@@ -1686,24 +1661,21 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
if (reason)
goto drop;
- unparsed = *vxlan_hdr(skb);
+ vh = vxlan_hdr(skb);
/* VNI flag always required to be set */
- if (!(unparsed.vx_flags & VXLAN_HF_VNI)) {
+ if (!(vh->vx_flags & VXLAN_HF_VNI)) {
netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
- ntohl(vxlan_hdr(skb)->vx_flags),
- ntohl(vxlan_hdr(skb)->vx_vni));
+ ntohl(vh->vx_flags), ntohl(vh->vx_vni));
reason = SKB_DROP_REASON_VXLAN_INVALID_HDR;
/* Return non vxlan pkt */
goto drop;
}
- unparsed.vx_flags &= ~VXLAN_HF_VNI;
- unparsed.vx_vni &= ~VXLAN_VNI_MASK;
vs = rcu_dereference_sk_user_data(sk);
if (!vs)
goto drop;
- vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
+ vni = vxlan_vni(vh->vx_vni);
vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni, &vninode);
if (!vxlan) {
@@ -1711,13 +1683,27 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
goto drop;
}
- /* For backwards compatibility, only allow reserved fields to be
- * used by VXLAN extensions if explicitly requested.
- */
- if (vs->flags & VXLAN_F_GPE) {
- if (!vxlan_parse_gpe_proto(&unparsed, &protocol))
+ if (vh->vx_flags & vxlan->cfg.reserved_bits.vx_flags ||
+ vh->vx_vni & vxlan->cfg.reserved_bits.vx_vni) {
+ /* If the header uses bits besides those enabled by the
+ * netdevice configuration, treat this as a malformed packet.
+ * This behavior diverges from VXLAN RFC (RFC7348) which
+ * stipulates that bits in reserved in reserved fields are to be
+ * ignored. The approach here maintains compatibility with
+ * previous stack code, and also is more robust and provides a
+ * little more security in adding extensions to VXLAN.
+ */
+ reason = SKB_DROP_REASON_VXLAN_INVALID_HDR;
+ DEV_STATS_INC(vxlan->dev, rx_frame_errors);
+ DEV_STATS_INC(vxlan->dev, rx_errors);
+ vxlan_vnifilter_count(vxlan, vni, vninode,
+ VXLAN_VNI_STATS_RX_ERRORS, 0);
+ goto drop;
+ }
+
+ if (vxlan->cfg.flags & VXLAN_F_GPE) {
+ if (!vxlan_parse_gpe_proto(vh, &protocol))
goto drop;
- unparsed.vx_flags &= ~VXLAN_GPE_USED_BITS;
raw_proto = true;
}
@@ -1727,8 +1713,8 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
goto drop;
}
- if (vs->flags & VXLAN_F_REMCSUM_RX) {
- reason = vxlan_remcsum(&unparsed, skb, vs->flags);
+ if (vxlan->cfg.flags & VXLAN_F_REMCSUM_RX) {
+ reason = vxlan_remcsum(skb, vxlan->cfg.flags);
if (unlikely(reason))
goto drop;
}
@@ -1753,25 +1739,12 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
memset(md, 0, sizeof(*md));
}
- if (vs->flags & VXLAN_F_GBP)
- vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md);
+ if (vxlan->cfg.flags & VXLAN_F_GBP)
+ vxlan_parse_gbp_hdr(skb, vxlan->cfg.flags, md);
/* Note that GBP and GPE can never be active together. This is
* ensured in vxlan_dev_configure.
*/
- if (unparsed.vx_flags || unparsed.vx_vni) {
- /* If there are any unprocessed flags remaining treat
- * this as a malformed packet. This behavior diverges from
- * VXLAN RFC (RFC7348) which stipulates that bits in reserved
- * in reserved fields are to be ignored. The approach here
- * maintains compatibility with previous stack code, and also
- * is more robust and provides a little more security in
- * adding extensions to VXLAN.
- */
- reason = SKB_DROP_REASON_VXLAN_INVALID_HDR;
- goto drop;
- }
-
if (!raw_proto) {
reason = vxlan_set_mac(vxlan, vs, skb, vni);
if (reason)
@@ -1815,14 +1788,14 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
if (unlikely(!(vxlan->dev->flags & IFF_UP))) {
rcu_read_unlock();
- dev_core_stats_rx_dropped_inc(vxlan->dev);
+ dev_dstats_rx_dropped(vxlan->dev);
vxlan_vnifilter_count(vxlan, vni, vninode,
VXLAN_VNI_STATS_RX_DROPS, 0);
reason = SKB_DROP_REASON_DEV_READY;
goto drop;
}
- dev_sw_netstats_rx_add(vxlan->dev, skb->len);
+ dev_dstats_rx_add(vxlan->dev, skb->len);
vxlan_vnifilter_count(vxlan, vni, vninode, VXLAN_VNI_STATS_RX, skb->len);
gro_cells_receive(&vxlan->gro_cells, skb);
@@ -1837,7 +1810,6 @@ drop:
return 0;
}
-/* Callback from net/ipv{4,6}/udp.c to check that we have a VNI for errors */
static int vxlan_err_lookup(struct sock *sk, struct sk_buff *skb)
{
struct vxlan_dev *vxlan;
@@ -1877,7 +1849,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
goto out;
if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
- dev_core_stats_tx_dropped_inc(dev);
+ dev_dstats_tx_dropped(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
goto out;
@@ -1906,6 +1878,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
n = neigh_lookup(&arp_tbl, &tip, dev);
if (n) {
+ struct vxlan_rdst *rdst = NULL;
struct vxlan_fdb *f;
struct sk_buff *reply;
@@ -1914,12 +1887,17 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
goto out;
}
- f = vxlan_find_mac(vxlan, n->ha, vni);
- if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
+ rcu_read_lock();
+ f = vxlan_find_mac_tx(vxlan, n->ha, vni);
+ if (f)
+ rdst = first_remote_rcu(f);
+ if (rdst && vxlan_addr_any(&rdst->remote_ip)) {
/* bridge-local neighbor */
neigh_release(n);
+ rcu_read_unlock();
goto out;
}
+ rcu_read_unlock();
reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
n->ha, sha);
@@ -1935,7 +1913,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
reply->pkt_type = PACKET_HOST;
if (netif_rx(reply) == NET_RX_DROP) {
- dev_core_stats_rx_dropped_inc(dev);
+ dev_dstats_rx_dropped(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
@@ -2070,6 +2048,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
if (n) {
+ struct vxlan_rdst *rdst = NULL;
struct vxlan_fdb *f;
struct sk_buff *reply;
@@ -2078,8 +2057,10 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
goto out;
}
- f = vxlan_find_mac(vxlan, n->ha, vni);
- if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
+ f = vxlan_find_mac_tx(vxlan, n->ha, vni);
+ if (f)
+ rdst = first_remote_rcu(f);
+ if (rdst && vxlan_addr_any(&rdst->remote_ip)) {
/* bridge-local neighbor */
neigh_release(n);
goto out;
@@ -2094,7 +2075,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
goto out;
if (netif_rx(reply) == NET_RX_DROP) {
- dev_core_stats_rx_dropped_inc(dev);
+ dev_dstats_rx_dropped(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
@@ -2268,8 +2249,8 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
{
union vxlan_addr loopback;
union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
+ unsigned int len = skb->len;
struct net_device *dev;
- int len = skb->len;
skb->pkt_type = PACKET_HOST;
skb->encapsulation = 0;
@@ -2296,16 +2277,16 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
if ((dst_vxlan->cfg.flags & VXLAN_F_LEARN) && snoop)
vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni);
- dev_sw_netstats_tx_add(src_vxlan->dev, 1, len);
+ dev_dstats_tx_add(src_vxlan->dev, len);
vxlan_vnifilter_count(src_vxlan, vni, NULL, VXLAN_VNI_STATS_TX, len);
if (__netif_rx(skb) == NET_RX_SUCCESS) {
- dev_sw_netstats_rx_add(dst_vxlan->dev, len);
+ dev_dstats_rx_add(dst_vxlan->dev, len);
vxlan_vnifilter_count(dst_vxlan, vni, NULL, VXLAN_VNI_STATS_RX,
len);
} else {
drop:
- dev_core_stats_rx_dropped_inc(dev);
+ dev_dstats_rx_dropped(dev);
vxlan_vnifilter_count(dst_vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
@@ -2368,7 +2349,7 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
int addr_family;
__u8 tos, ttl;
int ifindex;
- int err;
+ int err = 0;
u32 flags = vxlan->cfg.flags;
bool use_cache;
bool udp_sum = false;
@@ -2473,11 +2454,18 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
rcu_read_lock();
if (addr_family == AF_INET) {
- struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
+ struct vxlan_sock *sock4;
+ u16 ipcb_flags = 0;
struct rtable *rt;
__be16 df = 0;
__be32 saddr;
+ sock4 = rcu_dereference(vxlan->vn4_sock);
+ if (unlikely(!sock4)) {
+ reason = SKB_DROP_REASON_DEV_READY;
+ goto tx_error;
+ }
+
if (!ifindex)
ifindex = sock4->sock->sk->sk_bound_dev_if;
@@ -2490,6 +2478,9 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto tx_error;
}
+ if (flags & VXLAN_F_MC_ROUTE)
+ ipcb_flags |= IPSKB_MCROUTE;
+
if (!info) {
/* Bypass encapsulation if the destination is local */
err = encap_bypass_if_local(skb, dev, vxlan, AF_INET,
@@ -2545,11 +2536,19 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, saddr,
pkey->u.ipv4.dst, tos, ttl, df,
- src_port, dst_port, xnet, !udp_sum);
+ src_port, dst_port, xnet, !udp_sum,
+ ipcb_flags);
#if IS_ENABLED(CONFIG_IPV6)
} else {
- struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
+ struct vxlan_sock *sock6;
struct in6_addr saddr;
+ u16 ip6cb_flags = 0;
+
+ sock6 = rcu_dereference(vxlan->vn6_sock);
+ if (unlikely(!sock6)) {
+ reason = SKB_DROP_REASON_DEV_READY;
+ goto tx_error;
+ }
if (!ifindex)
ifindex = sock6->sock->sk->sk_bound_dev_if;
@@ -2565,6 +2564,9 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto tx_error;
}
+ if (flags & VXLAN_F_MC_ROUTE)
+ ip6cb_flags |= IP6SKB_MCROUTE;
+
if (!info) {
u32 rt6i_flags = dst_rt6_info(ndst)->rt6i_flags;
@@ -2609,7 +2611,8 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev,
&saddr, &pkey->u.ipv6.dst, tos, ttl,
- pkey->label, src_port, dst_port, !udp_sum);
+ pkey->label, src_port, dst_port, !udp_sum,
+ ip6cb_flags);
#endif
}
vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX, pkt_len);
@@ -2618,7 +2621,7 @@ out_unlock:
return;
drop:
- dev_core_stats_tx_dropped_inc(dev);
+ dev_dstats_tx_dropped(dev);
vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0);
kfree_skb_reason(skb, reason);
return;
@@ -2646,14 +2649,10 @@ static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev,
memset(&nh_rdst, 0, sizeof(struct vxlan_rdst));
hash = skb_get_hash(skb);
- rcu_read_lock();
nh = rcu_dereference(f->nh);
- if (!nh) {
- rcu_read_unlock();
+ if (!nh)
goto drop;
- }
do_xmit = vxlan_fdb_nh_path_select(nh, hash, &nh_rdst);
- rcu_read_unlock();
if (likely(do_xmit))
vxlan_xmit_one(skb, dev, vni, &nh_rdst, did_rsc);
@@ -2663,7 +2662,7 @@ static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev,
return;
drop:
- dev_core_stats_tx_dropped_inc(dev);
+ dev_dstats_tx_dropped(dev);
vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
@@ -2701,7 +2700,7 @@ static netdev_tx_t vxlan_xmit_nhid(struct sk_buff *skb, struct net_device *dev,
return NETDEV_TX_OK;
drop:
- dev_core_stats_tx_dropped_inc(dev);
+ dev_dstats_tx_dropped(dev);
vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
@@ -2780,7 +2779,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
}
eth = eth_hdr(skb);
- f = vxlan_find_mac(vxlan, eth->h_dest, vni);
+ rcu_read_lock();
+ f = vxlan_find_mac_tx(vxlan, eth->h_dest, vni);
did_rsc = false;
if (f && (f->flags & NTF_ROUTER) && (vxlan->cfg.flags & VXLAN_F_RSC) &&
@@ -2788,21 +2788,21 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
ntohs(eth->h_proto) == ETH_P_IPV6)) {
did_rsc = route_shortcircuit(dev, skb);
if (did_rsc)
- f = vxlan_find_mac(vxlan, eth->h_dest, vni);
+ f = vxlan_find_mac_tx(vxlan, eth->h_dest, vni);
}
if (f == NULL) {
- f = vxlan_find_mac(vxlan, all_zeros_mac, vni);
+ f = vxlan_find_mac_tx(vxlan, all_zeros_mac, vni);
if (f == NULL) {
if ((vxlan->cfg.flags & VXLAN_F_L2MISS) &&
!is_multicast_ether_addr(eth->h_dest))
vxlan_fdb_miss(vxlan, eth->h_dest);
- dev_core_stats_tx_dropped_inc(dev);
+ dev_dstats_tx_dropped(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
- kfree_skb_reason(skb, SKB_DROP_REASON_VXLAN_NO_REMOTE);
- return NETDEV_TX_OK;
+ kfree_skb_reason(skb, SKB_DROP_REASON_NO_TX_TARGET);
+ goto out;
}
}
@@ -2824,75 +2824,72 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
if (fdst)
vxlan_xmit_one(skb, dev, vni, fdst, did_rsc);
else
- kfree_skb_reason(skb, SKB_DROP_REASON_VXLAN_NO_REMOTE);
+ kfree_skb_reason(skb, SKB_DROP_REASON_NO_TX_TARGET);
}
+out:
+ rcu_read_unlock();
return NETDEV_TX_OK;
}
/* Walk the forwarding table and purge stale entries */
static void vxlan_cleanup(struct timer_list *t)
{
- struct vxlan_dev *vxlan = from_timer(vxlan, t, age_timer);
+ struct vxlan_dev *vxlan = timer_container_of(vxlan, t, age_timer);
unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
- unsigned int h;
+ struct vxlan_fdb *f;
if (!netif_running(vxlan->dev))
return;
- for (h = 0; h < FDB_HASH_SIZE; ++h) {
- struct hlist_node *p, *n;
-
- spin_lock(&vxlan->hash_lock[h]);
- hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
- struct vxlan_fdb *f
- = container_of(p, struct vxlan_fdb, hlist);
- unsigned long timeout;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(f, &vxlan->fdb_list, fdb_node) {
+ unsigned long timeout;
- if (f->state & (NUD_PERMANENT | NUD_NOARP))
- continue;
+ if (f->state & (NUD_PERMANENT | NUD_NOARP))
+ continue;
- if (f->flags & NTF_EXT_LEARNED)
- continue;
+ if (f->flags & NTF_EXT_LEARNED)
+ continue;
- timeout = f->used + vxlan->cfg.age_interval * HZ;
- if (time_before_eq(timeout, jiffies)) {
- netdev_dbg(vxlan->dev,
- "garbage collect %pM\n",
- f->eth_addr);
+ timeout = READ_ONCE(f->updated) + vxlan->cfg.age_interval * HZ;
+ if (time_before_eq(timeout, jiffies)) {
+ spin_lock(&vxlan->hash_lock);
+ if (!hlist_unhashed(&f->fdb_node)) {
+ netdev_dbg(vxlan->dev, "garbage collect %pM\n",
+ f->key.eth_addr);
f->state = NUD_STALE;
vxlan_fdb_destroy(vxlan, f, true, true);
- } else if (time_before(timeout, next_timer))
- next_timer = timeout;
+ }
+ spin_unlock(&vxlan->hash_lock);
+ } else if (time_before(timeout, next_timer)) {
+ next_timer = timeout;
}
- spin_unlock(&vxlan->hash_lock[h]);
}
+ rcu_read_unlock();
mod_timer(&vxlan->age_timer, next_timer);
}
static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
{
- struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ ASSERT_RTNL();
- spin_lock(&vn->sock_lock);
hlist_del_init_rcu(&vxlan->hlist4.hlist);
#if IS_ENABLED(CONFIG_IPV6)
hlist_del_init_rcu(&vxlan->hlist6.hlist);
#endif
- spin_unlock(&vn->sock_lock);
}
static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan,
struct vxlan_dev_node *node)
{
- struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
__be32 vni = vxlan->default_dst.remote_vni;
+ ASSERT_RTNL();
+
node->vxlan = vxlan;
- spin_lock(&vn->sock_lock);
hlist_add_head_rcu(&node->hlist, vni_head(vs, vni));
- spin_unlock(&vn->sock_lock);
}
/* Setup stats when device is created */
@@ -2901,8 +2898,15 @@ static int vxlan_init(struct net_device *dev)
struct vxlan_dev *vxlan = netdev_priv(dev);
int err;
- if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
- vxlan_vnigroup_init(vxlan);
+ err = rhashtable_init(&vxlan->fdb_hash_tbl, &vxlan_fdb_rht_params);
+ if (err)
+ return err;
+
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) {
+ err = vxlan_vnigroup_init(vxlan);
+ if (err)
+ goto err_rhashtable_destroy;
+ }
err = gro_cells_init(&vxlan->gro_cells, dev);
if (err)
@@ -2920,21 +2924,11 @@ err_gro_cells_destroy:
err_vnigroup_uninit:
if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
vxlan_vnigroup_uninit(vxlan);
+err_rhashtable_destroy:
+ rhashtable_destroy(&vxlan->fdb_hash_tbl);
return err;
}
-static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
-{
- struct vxlan_fdb *f;
- u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, vni);
-
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
- f = __vxlan_find_mac(vxlan, all_zeros_mac, vni);
- if (f)
- vxlan_fdb_destroy(vxlan, f, true, true);
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
-}
-
static void vxlan_uninit(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
@@ -2946,7 +2940,7 @@ static void vxlan_uninit(struct net_device *dev)
gro_cells_destroy(&vxlan->gro_cells);
- vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni);
+ rhashtable_destroy(&vxlan->fdb_hash_tbl);
}
/* Start ageing timer and join group when device is brought up */
@@ -2987,7 +2981,8 @@ struct vxlan_fdb_flush_desc {
static bool vxlan_fdb_is_default_entry(const struct vxlan_fdb *f,
const struct vxlan_dev *vxlan)
{
- return is_zero_ether_addr(f->eth_addr) && f->vni == vxlan->cfg.vni;
+ return is_zero_ether_addr(f->key.eth_addr) &&
+ f->key.vni == vxlan->cfg.vni;
}
static bool vxlan_fdb_nhid_matches(const struct vxlan_fdb *f, u32 nhid)
@@ -3010,7 +3005,7 @@ static bool vxlan_fdb_flush_matches(const struct vxlan_fdb *f,
if (desc->ignore_default_entry && vxlan_fdb_is_default_entry(f, vxlan))
return false;
- if (desc->src_vni && f->vni != desc->src_vni)
+ if (desc->src_vni && f->key.vni != desc->src_vni)
return false;
if (desc->nhid && !vxlan_fdb_nhid_matches(f, desc->nhid))
@@ -3066,33 +3061,32 @@ static void vxlan_flush(struct vxlan_dev *vxlan,
const struct vxlan_fdb_flush_desc *desc)
{
bool match_remotes = vxlan_fdb_flush_should_match_remotes(desc);
- unsigned int h;
-
- for (h = 0; h < FDB_HASH_SIZE; ++h) {
- struct hlist_node *p, *n;
-
- spin_lock_bh(&vxlan->hash_lock[h]);
- hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
- struct vxlan_fdb *f
- = container_of(p, struct vxlan_fdb, hlist);
+ struct vxlan_fdb *f;
- if (!vxlan_fdb_flush_matches(f, vxlan, desc))
- continue;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(f, &vxlan->fdb_list, fdb_node) {
+ if (!vxlan_fdb_flush_matches(f, vxlan, desc))
+ continue;
- if (match_remotes) {
- bool destroy_fdb = false;
+ spin_lock_bh(&vxlan->hash_lock);
+ if (hlist_unhashed(&f->fdb_node))
+ goto unlock;
- vxlan_fdb_flush_match_remotes(f, vxlan, desc,
- &destroy_fdb);
+ if (match_remotes) {
+ bool destroy_fdb = false;
- if (!destroy_fdb)
- continue;
- }
+ vxlan_fdb_flush_match_remotes(f, vxlan, desc,
+ &destroy_fdb);
- vxlan_fdb_destroy(vxlan, f, true, true);
+ if (!destroy_fdb)
+ goto unlock;
}
- spin_unlock_bh(&vxlan->hash_lock[h]);
+
+ vxlan_fdb_destroy(vxlan, f, true, true);
+unlock:
+ spin_unlock_bh(&vxlan->hash_lock);
}
+ rcu_read_unlock();
}
static const struct nla_policy vxlan_del_bulk_policy[NDA_MAX + 1] = {
@@ -3180,7 +3174,7 @@ static int vxlan_stop(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb_flush_desc desc = {
- /* Default entry is deleted at vxlan_uninit. */
+ /* Default entry is deleted at vxlan_dellink. */
.ignore_default_entry = true,
.state = 0,
.state_mask = NUD_PERMANENT | NUD_NOARP,
@@ -3188,7 +3182,7 @@ static int vxlan_stop(struct net_device *dev)
vxlan_multicast_leave(vxlan);
- del_timer_sync(&vxlan->age_timer);
+ timer_delete_sync(&vxlan->age_timer);
vxlan_flush(vxlan, &desc);
vxlan_sock_release(vxlan);
@@ -3320,9 +3314,10 @@ static void vxlan_offload_rx_ports(struct net_device *dev, bool push)
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
unsigned int i;
- spin_lock(&vn->sock_lock);
+ ASSERT_RTNL();
+
for (i = 0; i < PORT_HASH_SIZE; ++i) {
- hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
+ hlist_for_each_entry(vs, &vn->sock_list[i], hlist) {
unsigned short type;
if (vs->flags & VXLAN_F_GPE)
@@ -3336,14 +3331,12 @@ static void vxlan_offload_rx_ports(struct net_device *dev, bool push)
udp_tunnel_drop_rx_port(dev, vs->sock, type);
}
}
- spin_unlock(&vn->sock_lock);
}
/* Initialize the device structure. */
static void vxlan_setup(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- unsigned int h;
eth_hw_addr_random(dev);
ether_setup(dev);
@@ -3368,17 +3361,15 @@ static void vxlan_setup(struct net_device *dev)
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = ETH_MAX_MTU;
- dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
INIT_LIST_HEAD(&vxlan->next);
+ spin_lock_init(&vxlan->hash_lock);
timer_setup(&vxlan->age_timer, vxlan_cleanup, TIMER_DEFERRABLE);
vxlan->dev = dev;
- for (h = 0; h < FDB_HASH_SIZE; ++h) {
- spin_lock_init(&vxlan->hash_lock[h]);
- INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
- }
+ INIT_HLIST_HEAD(&vxlan->fdb_list);
}
static void vxlan_ether_setup(struct net_device *dev)
@@ -3432,6 +3423,8 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_VNIFILTER] = { .type = NLA_U8 },
[IFLA_VXLAN_LOCALBYPASS] = NLA_POLICY_MAX(NLA_U8, 1),
[IFLA_VXLAN_LABEL_POLICY] = NLA_POLICY_MAX(NLA_U32, VXLAN_LABEL_MAX),
+ [IFLA_VXLAN_RESERVED_BITS] = NLA_POLICY_EXACT_LEN(sizeof(struct vxlanhdr)),
+ [IFLA_VXLAN_MC_ROUTE] = NLA_POLICY_MAX(NLA_U8, 1),
};
static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -3568,12 +3561,13 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
__be16 port, u32 flags,
int ifindex)
{
- struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_sock *vs;
struct socket *sock;
unsigned int h;
struct udp_tunnel_sock_cfg tunnel_cfg;
+ ASSERT_RTNL();
+
vs = kzalloc(sizeof(*vs), GFP_KERNEL);
if (!vs)
return ERR_PTR(-ENOMEM);
@@ -3591,13 +3585,11 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
refcount_set(&vs->refcnt, 1);
vs->flags = (flags & VXLAN_F_RCV_FLAGS);
- spin_lock(&vn->sock_lock);
hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
udp_tunnel_notify_add_rx_port(sock,
(vs->flags & VXLAN_F_GPE) ?
UDP_TUNNEL_TYPE_VXLAN_GPE :
UDP_TUNNEL_TYPE_VXLAN);
- spin_unlock(&vn->sock_lock);
/* Mark socket as an encapsulation socket. */
memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
@@ -3621,26 +3613,27 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
{
- struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
bool metadata = vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA;
struct vxlan_sock *vs = NULL;
struct vxlan_dev_node *node;
int l3mdev_index = 0;
+ ASSERT_RTNL();
+
if (vxlan->cfg.remote_ifindex)
l3mdev_index = l3mdev_master_upper_ifindex_by_index(
vxlan->net, vxlan->cfg.remote_ifindex);
if (!vxlan->cfg.no_share) {
- spin_lock(&vn->sock_lock);
+ rcu_read_lock();
vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
vxlan->cfg.dst_port, vxlan->cfg.flags,
l3mdev_index);
if (vs && !refcount_inc_not_zero(&vs->refcnt)) {
- spin_unlock(&vn->sock_lock);
+ rcu_read_unlock();
return -EBUSY;
}
- spin_unlock(&vn->sock_lock);
+ rcu_read_unlock();
}
if (!vs)
vs = vxlan_socket_create(vxlan->net, ipv6,
@@ -3931,7 +3924,7 @@ static void vxlan_config_apply(struct net_device *dev,
}
static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
- struct vxlan_config *conf, bool changelink,
+ struct vxlan_config *conf,
struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
@@ -3942,7 +3935,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
if (ret)
return ret;
- vxlan_config_apply(dev, conf, lowerdev, src_net, changelink);
+ vxlan_config_apply(dev, conf, lowerdev, src_net, false);
return 0;
}
@@ -3954,84 +3947,64 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
struct net_device *remote_dev = NULL;
- struct vxlan_fdb *f = NULL;
- bool unregister = false;
struct vxlan_rdst *dst;
int err;
dst = &vxlan->default_dst;
- err = vxlan_dev_configure(net, dev, conf, false, extack);
+ err = vxlan_dev_configure(net, dev, conf, extack);
if (err)
return err;
dev->ethtool_ops = &vxlan_ethtool_ops;
- /* create an fdb entry for a valid default destination */
- if (!vxlan_addr_any(&dst->remote_ip)) {
- err = vxlan_fdb_create(vxlan, all_zeros_mac,
- &dst->remote_ip,
- NUD_REACHABLE | NUD_PERMANENT,
- vxlan->cfg.dst_port,
- dst->remote_vni,
- dst->remote_vni,
- dst->remote_ifindex,
- NTF_SELF, 0, &f, extack);
- if (err)
- return err;
- }
-
err = register_netdevice(dev);
if (err)
- goto errout;
- unregister = true;
+ return err;
if (dst->remote_ifindex) {
remote_dev = __dev_get_by_index(net, dst->remote_ifindex);
if (!remote_dev) {
err = -ENODEV;
- goto errout;
+ goto unregister;
}
err = netdev_upper_dev_link(remote_dev, dev, extack);
if (err)
- goto errout;
+ goto unregister;
+
+ dst->remote_dev = remote_dev;
}
err = rtnl_configure_link(dev, NULL, 0, NULL);
if (err < 0)
goto unlink;
- if (f) {
- vxlan_fdb_insert(vxlan, all_zeros_mac, dst->remote_vni, f);
-
- /* notify default fdb entry */
- err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f),
- RTM_NEWNEIGH, true, extack);
- if (err) {
- vxlan_fdb_destroy(vxlan, f, false, false);
- if (remote_dev)
- netdev_upper_dev_unlink(remote_dev, dev);
- goto unregister;
- }
+ /* create an fdb entry for a valid default destination */
+ if (!vxlan_addr_any(&dst->remote_ip)) {
+ spin_lock_bh(&vxlan->hash_lock);
+ err = vxlan_fdb_update(vxlan, all_zeros_mac,
+ &dst->remote_ip,
+ NUD_REACHABLE | NUD_PERMANENT,
+ NLM_F_EXCL | NLM_F_CREATE,
+ vxlan->cfg.dst_port,
+ dst->remote_vni,
+ dst->remote_vni,
+ dst->remote_ifindex,
+ NTF_SELF, 0, true, extack);
+ spin_unlock_bh(&vxlan->hash_lock);
+ if (err)
+ goto unlink;
}
list_add(&vxlan->next, &vn->vxlan_list);
- if (remote_dev)
- dst->remote_dev = remote_dev;
+
return 0;
+
unlink:
if (remote_dev)
netdev_upper_dev_unlink(remote_dev, dev);
-errout:
- /* unregister_netdevice() destroys the default FDB entry with deletion
- * notification. But the addition notification was not sent yet, so
- * destroy the entry by hand here.
- */
- if (f)
- __vxlan_fdb_free(f);
unregister:
- if (unregister)
- unregister_netdevice(dev);
+ unregister_netdevice(dev);
return err;
}
@@ -4067,6 +4040,10 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
struct net_device *dev, struct vxlan_config *conf,
bool changelink, struct netlink_ext_ack *extack)
{
+ struct vxlanhdr used_bits = {
+ .vx_flags = VXLAN_HF_VNI,
+ .vx_vni = VXLAN_VNI_MASK,
+ };
struct vxlan_dev *vxlan = netdev_priv(dev);
int err = 0;
@@ -4083,7 +4060,7 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_ID], "Cannot change VNI");
return -EOPNOTSUPP;
}
- conf->vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID]));
+ conf->vni = vni;
}
if (data[IFLA_VXLAN_GROUP]) {
@@ -4293,6 +4270,8 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
extack);
if (err)
return err;
+ used_bits.vx_flags |= VXLAN_HF_RCO;
+ used_bits.vx_vni |= ~VXLAN_VNI_MASK;
}
if (data[IFLA_VXLAN_GBP]) {
@@ -4300,6 +4279,7 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
VXLAN_F_GBP, changelink, false, extack);
if (err)
return err;
+ used_bits.vx_flags |= VXLAN_GBP_USED_BITS;
}
if (data[IFLA_VXLAN_GPE]) {
@@ -4308,6 +4288,46 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
extack);
if (err)
return err;
+
+ used_bits.vx_flags |= VXLAN_GPE_USED_BITS;
+ }
+
+ if (data[IFLA_VXLAN_RESERVED_BITS]) {
+ struct vxlanhdr reserved_bits;
+
+ if (changelink) {
+ NL_SET_ERR_MSG_ATTR(extack,
+ data[IFLA_VXLAN_RESERVED_BITS],
+ "Cannot change reserved_bits");
+ return -EOPNOTSUPP;
+ }
+
+ nla_memcpy(&reserved_bits, data[IFLA_VXLAN_RESERVED_BITS],
+ sizeof(reserved_bits));
+ if (used_bits.vx_flags & reserved_bits.vx_flags ||
+ used_bits.vx_vni & reserved_bits.vx_vni) {
+ __be64 ub_be64, rb_be64;
+
+ memcpy(&ub_be64, &used_bits, sizeof(ub_be64));
+ memcpy(&rb_be64, &reserved_bits, sizeof(rb_be64));
+
+ NL_SET_ERR_MSG_ATTR_FMT(extack,
+ data[IFLA_VXLAN_RESERVED_BITS],
+ "Used bits %#018llx cannot overlap reserved bits %#018llx",
+ be64_to_cpu(ub_be64),
+ be64_to_cpu(rb_be64));
+ return -EINVAL;
+ }
+
+ conf->reserved_bits = reserved_bits;
+ } else {
+ /* For backwards compatibility, only allow reserved fields to be
+ * used by VXLAN extensions if explicitly requested.
+ */
+ conf->reserved_bits = (struct vxlanhdr) {
+ .vx_flags = ~used_bits.vx_flags,
+ .vx_vni = ~used_bits.vx_vni,
+ };
}
if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) {
@@ -4318,6 +4338,14 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
return err;
}
+ if (data[IFLA_VXLAN_MC_ROUTE]) {
+ err = vxlan_nl2flag(conf, data, IFLA_VXLAN_MC_ROUTE,
+ VXLAN_F_MC_ROUTE, changelink,
+ true, extack);
+ if (err)
+ return err;
+ }
+
if (tb[IFLA_MTU]) {
if (changelink) {
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_MTU],
@@ -4348,10 +4376,13 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
-static int vxlan_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int vxlan_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct vxlan_config conf;
int err;
@@ -4359,7 +4390,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
if (err)
return err;
- return __vxlan_dev_create(src_net, dev, &conf, extack);
+ return __vxlan_dev_create(link_net, dev, &conf, extack);
}
static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
@@ -4367,6 +4398,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
+ bool rem_ip_changed, change_igmp;
struct net_device *lowerdev;
struct vxlan_config conf;
struct vxlan_rdst *dst;
@@ -4390,11 +4422,14 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
if (err)
return err;
- /* handle default dst entry */
- if (!vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip)) {
- u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni);
+ rem_ip_changed = !vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip);
+ change_igmp = vxlan->dev->flags & IFF_UP &&
+ (rem_ip_changed ||
+ dst->remote_ifindex != conf.remote_ifindex);
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ /* handle default dst entry */
+ if (rem_ip_changed) {
+ spin_lock_bh(&vxlan->hash_lock);
if (!vxlan_addr_any(&conf.remote_ip)) {
err = vxlan_fdb_update(vxlan, all_zeros_mac,
&conf.remote_ip,
@@ -4405,7 +4440,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
conf.remote_ifindex,
NTF_SELF, 0, true, extack);
if (err) {
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
netdev_adjacent_change_abort(dst->remote_dev,
lowerdev, dev);
return err;
@@ -4419,7 +4454,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
dst->remote_vni,
dst->remote_ifindex,
true);
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
/* If vni filtering device, also update fdb entries of
* all vnis that were using default remote ip
@@ -4435,6 +4470,9 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
}
}
+ if (change_igmp && vxlan_addr_multicast(&dst->remote_ip))
+ err = vxlan_multicast_leave(vxlan);
+
if (conf.age_interval != vxlan->cfg.age_interval)
mod_timer(&vxlan->age_timer, jiffies);
@@ -4442,16 +4480,18 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
if (lowerdev && lowerdev != dst->remote_dev)
dst->remote_dev = lowerdev;
vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true);
- return 0;
+
+ if (!err && change_igmp &&
+ vxlan_addr_multicast(&dst->remote_ip))
+ err = vxlan_multicast_join(vxlan);
+
+ return err;
}
static void vxlan_dellink(struct net_device *dev, struct list_head *head)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct vxlan_fdb_flush_desc desc = {
- /* Default entry is deleted at vxlan_uninit. */
- .ignore_default_entry = true,
- };
+ struct vxlan_fdb_flush_desc desc = {};
vxlan_flush(vxlan, &desc);
@@ -4494,6 +4534,8 @@ static size_t vxlan_get_size(const struct net_device *dev)
nla_total_size(0) + /* IFLA_VXLAN_GPE */
nla_total_size(0) + /* IFLA_VXLAN_REMCSUM_NOPARTIAL */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_VNIFILTER */
+ /* IFLA_VXLAN_RESERVED_BITS */
+ nla_total_size(sizeof(struct vxlanhdr)) +
0;
}
@@ -4596,6 +4638,11 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
!!(vxlan->cfg.flags & VXLAN_F_VNIFILTER)))
goto nla_put_failure;
+ if (nla_put(skb, IFLA_VXLAN_RESERVED_BITS,
+ sizeof(vxlan->cfg.reserved_bits),
+ &vxlan->cfg.reserved_bits))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
@@ -4707,11 +4754,8 @@ vxlan_fdb_offloaded_set(struct net_device *dev,
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *rdst;
struct vxlan_fdb *f;
- u32 hash_index;
-
- hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni);
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ spin_lock_bh(&vxlan->hash_lock);
f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
if (!f)
@@ -4727,7 +4771,7 @@ vxlan_fdb_offloaded_set(struct net_device *dev,
rdst->offloaded = fdb_info->offloaded;
out:
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
}
static int
@@ -4736,13 +4780,11 @@ vxlan_fdb_external_learn_add(struct net_device *dev,
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct netlink_ext_ack *extack;
- u32 hash_index;
int err;
- hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni);
extack = switchdev_notifier_info_to_extack(&fdb_info->info);
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ spin_lock_bh(&vxlan->hash_lock);
err = vxlan_fdb_update(vxlan, fdb_info->eth_addr, &fdb_info->remote_ip,
NUD_REACHABLE,
NLM_F_CREATE | NLM_F_REPLACE,
@@ -4752,7 +4794,7 @@ vxlan_fdb_external_learn_add(struct net_device *dev,
fdb_info->remote_ifindex,
NTF_USE | NTF_SELF | NTF_EXT_LEARNED,
0, false, extack);
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
return err;
}
@@ -4763,11 +4805,9 @@ vxlan_fdb_external_learn_del(struct net_device *dev,
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb *f;
- u32 hash_index;
int err = 0;
- hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni);
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ spin_lock_bh(&vxlan->hash_lock);
f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
if (!f)
@@ -4781,7 +4821,7 @@ vxlan_fdb_external_learn_del(struct net_device *dev,
fdb_info->remote_ifindex,
false);
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
return err;
}
@@ -4830,18 +4870,15 @@ static void vxlan_fdb_nh_flush(struct nexthop *nh)
{
struct vxlan_fdb *fdb;
struct vxlan_dev *vxlan;
- u32 hash_index;
rcu_read_lock();
list_for_each_entry_rcu(fdb, &nh->fdb_list, nh_list) {
vxlan = rcu_dereference(fdb->vdev);
WARN_ON(!vxlan);
- hash_index = fdb_head_index(vxlan, fdb->eth_addr,
- vxlan->default_dst.remote_vni);
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
- if (!hlist_unhashed(&fdb->hlist))
+ spin_lock_bh(&vxlan->hash_lock);
+ if (!hlist_unhashed(&fdb->fdb_node))
vxlan_fdb_destroy(vxlan, fdb, false, false);
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
}
rcu_read_unlock();
}
@@ -4870,7 +4907,6 @@ static __net_init int vxlan_init_net(struct net *net)
unsigned int h;
INIT_LIST_HEAD(&vn->vxlan_list);
- spin_lock_init(&vn->sock_lock);
vn->nexthop_notifier_block.notifier_call = vxlan_nexthop_event;
for (h = 0; h < PORT_HASH_SIZE; ++h)
@@ -4889,19 +4925,15 @@ static void __net_exit vxlan_destroy_tunnels(struct vxlan_net *vn,
vxlan_dellink(vxlan->dev, dev_to_kill);
}
-static void __net_exit vxlan_exit_batch_rtnl(struct list_head *net_list,
- struct list_head *dev_to_kill)
+static void __net_exit vxlan_exit_rtnl(struct net *net,
+ struct list_head *dev_to_kill)
{
- struct net *net;
-
- ASSERT_RTNL();
- list_for_each_entry(net, net_list, exit_list) {
- struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
- __unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
+ ASSERT_RTNL_NET(net);
- vxlan_destroy_tunnels(vn, dev_to_kill);
- }
+ __unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
+ vxlan_destroy_tunnels(vn, dev_to_kill);
}
static void __net_exit vxlan_exit_net(struct net *net)
@@ -4915,7 +4947,7 @@ static void __net_exit vxlan_exit_net(struct net *net)
static struct pernet_operations vxlan_net_ops = {
.init = vxlan_init_net,
- .exit_batch_rtnl = vxlan_exit_batch_rtnl,
+ .exit_rtnl = vxlan_exit_rtnl,
.exit = vxlan_exit_net,
.id = &vxlan_net_id,
.size = sizeof(struct vxlan_net),
@@ -4925,8 +4957,6 @@ static int __init vxlan_init_module(void)
{
int rc;
- get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
-
rc = register_pernet_subsys(&vxlan_net_ops);
if (rc)
goto out1;
diff --git a/drivers/net/vxlan/vxlan_mdb.c b/drivers/net/vxlan/vxlan_mdb.c
index 8735891ee128..816ab1aa0526 100644
--- a/drivers/net/vxlan/vxlan_mdb.c
+++ b/drivers/net/vxlan/vxlan_mdb.c
@@ -1712,7 +1712,7 @@ netdev_tx_t vxlan_mdb_xmit(struct vxlan_dev *vxlan,
vxlan_xmit_one(skb, vxlan->dev, src_vni,
rcu_dereference(fremote->rd), false);
else
- kfree_skb_reason(skb, SKB_DROP_REASON_VXLAN_NO_REMOTE);
+ kfree_skb_reason(skb, SKB_DROP_REASON_NO_TX_TARGET);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/vxlan/vxlan_private.h b/drivers/net/vxlan/vxlan_private.h
index 76a351a997d5..b1eec2216360 100644
--- a/drivers/net/vxlan/vxlan_private.h
+++ b/drivers/net/vxlan/vxlan_private.h
@@ -19,23 +19,28 @@ extern const struct rhashtable_params vxlan_vni_rht_params;
/* per-network namespace private data for this module */
struct vxlan_net {
struct list_head vxlan_list;
+ /* sock_list is protected by rtnl lock */
struct hlist_head sock_list[PORT_HASH_SIZE];
- spinlock_t sock_lock;
struct notifier_block nexthop_notifier_block;
};
+struct vxlan_fdb_key {
+ u8 eth_addr[ETH_ALEN];
+ __be32 vni;
+};
+
/* Forwarding table entry */
struct vxlan_fdb {
- struct hlist_node hlist; /* linked list of entries */
+ struct rhash_head rhnode;
struct rcu_head rcu;
unsigned long updated; /* jiffies */
unsigned long used;
struct list_head remotes;
- u8 eth_addr[ETH_ALEN];
+ struct vxlan_fdb_key key;
u16 state; /* see ndm_state */
- __be32 vni;
u16 flags; /* see ndm_flags and below */
struct list_head nh_list;
+ struct hlist_node fdb_node;
struct nexthop __rcu *nh;
struct vxlan_dev __rcu *vdev;
};
@@ -56,9 +61,7 @@ static inline struct hlist_head *vs_head(struct net *net, __be16 port)
return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
}
-/* First remote destination for a forwarding entry.
- * Guaranteed to be non-NULL because remotes are never deleted.
- */
+/* First remote destination for a forwarding entry. */
static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
{
if (rcu_access_pointer(fdb->nh))
@@ -185,8 +188,6 @@ int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
const unsigned char *addr, union vxlan_addr ip,
__be16 port, __be32 src_vni, __be32 vni,
u32 ifindex, bool swdev_notify);
-u32 eth_vni_hash(const unsigned char *addr, __be32 vni);
-u32 fdb_head_index(struct vxlan_dev *vxlan, const u8 *mac, __be32 vni);
int vxlan_fdb_update(struct vxlan_dev *vxlan,
const u8 *mac, union vxlan_addr *ip,
__u16 state, __u16 flags,
diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c
index d2023e7131bd..adc89e651e27 100644
--- a/drivers/net/vxlan/vxlan_vnifilter.c
+++ b/drivers/net/vxlan/vxlan_vnifilter.c
@@ -40,11 +40,11 @@ static void vxlan_vs_add_del_vninode(struct vxlan_dev *vxlan,
struct vxlan_vni_node *v,
bool del)
{
- struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_dev_node *node;
struct vxlan_sock *vs;
- spin_lock(&vn->sock_lock);
+ ASSERT_RTNL();
+
if (del) {
if (!hlist_unhashed(&v->hlist4.hlist))
hlist_del_init_rcu(&v->hlist4.hlist);
@@ -52,7 +52,7 @@ static void vxlan_vs_add_del_vninode(struct vxlan_dev *vxlan,
if (!hlist_unhashed(&v->hlist6.hlist))
hlist_del_init_rcu(&v->hlist6.hlist);
#endif
- goto out;
+ return;
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -67,23 +67,21 @@ static void vxlan_vs_add_del_vninode(struct vxlan_dev *vxlan,
node = &v->hlist4;
hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni));
}
-out:
- spin_unlock(&vn->sock_lock);
}
void vxlan_vs_add_vnigrp(struct vxlan_dev *vxlan,
struct vxlan_sock *vs,
bool ipv6)
{
- struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp);
struct vxlan_vni_node *v, *tmp;
struct vxlan_dev_node *node;
+ ASSERT_RTNL();
+
if (!vg)
return;
- spin_lock(&vn->sock_lock);
list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
#if IS_ENABLED(CONFIG_IPV6)
if (ipv6)
@@ -94,26 +92,24 @@ void vxlan_vs_add_vnigrp(struct vxlan_dev *vxlan,
node->vxlan = vxlan;
hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni));
}
- spin_unlock(&vn->sock_lock);
}
void vxlan_vs_del_vnigrp(struct vxlan_dev *vxlan)
{
struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp);
- struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_vni_node *v, *tmp;
+ ASSERT_RTNL();
+
if (!vg)
return;
- spin_lock(&vn->sock_lock);
list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
hlist_del_init_rcu(&v->hlist4.hlist);
#if IS_ENABLED(CONFIG_IPV6)
hlist_del_init_rcu(&v->hlist6.hlist);
#endif
}
- spin_unlock(&vn->sock_lock);
}
static void vxlan_vnifilter_stats_get(const struct vxlan_vni_node *vninode,
@@ -411,7 +407,11 @@ static int vxlan_vnifilter_dump(struct sk_buff *skb, struct netlink_callback *cb
struct tunnel_msg *tmsg;
struct net_device *dev;
- tmsg = nlmsg_data(cb->nlh);
+ tmsg = nlmsg_payload(cb->nlh, sizeof(*tmsg));
+ if (!tmsg) {
+ NL_SET_ERR_MSG(cb->extack, "Invalid msg length");
+ return -EINVAL;
+ }
if (tmsg->flags & ~TUNNEL_MSG_VALID_USER_FLAGS) {
NL_SET_ERR_MSG(cb->extack, "Invalid tunnelmsg flags in ancillary header");
@@ -478,11 +478,9 @@ static int vxlan_update_default_fdb_entry(struct vxlan_dev *vxlan, __be32 vni,
struct netlink_ext_ack *extack)
{
struct vxlan_rdst *dst = &vxlan->default_dst;
- u32 hash_index;
int err = 0;
- hash_index = fdb_head_index(vxlan, all_zeros_mac, vni);
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ spin_lock_bh(&vxlan->hash_lock);
if (remote_ip && !vxlan_addr_any(remote_ip)) {
err = vxlan_fdb_update(vxlan, all_zeros_mac,
remote_ip,
@@ -494,7 +492,7 @@ static int vxlan_update_default_fdb_entry(struct vxlan_dev *vxlan, __be32 vni,
dst->remote_ifindex,
NTF_SELF, 0, true, extack);
if (err) {
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
return err;
}
}
@@ -507,7 +505,7 @@ static int vxlan_update_default_fdb_entry(struct vxlan_dev *vxlan, __be32 vni,
dst->remote_ifindex,
true);
}
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
return err;
}
@@ -622,7 +620,8 @@ static void vxlan_vni_delete_group(struct vxlan_dev *vxlan,
* default dst remote_ip previously added for this vni
*/
if (!vxlan_addr_any(&vninode->remote_ip) ||
- !vxlan_addr_any(&dst->remote_ip))
+ !vxlan_addr_any(&dst->remote_ip)) {
+ spin_lock_bh(&vxlan->hash_lock);
__vxlan_fdb_delete(vxlan, all_zeros_mac,
(vxlan_addr_any(&vninode->remote_ip) ?
dst->remote_ip : vninode->remote_ip),
@@ -630,6 +629,8 @@ static void vxlan_vni_delete_group(struct vxlan_dev *vxlan,
vninode->vni, vninode->vni,
dst->remote_ifindex,
true);
+ spin_unlock_bh(&vxlan->hash_lock);
+ }
if (vxlan->dev->flags & IFF_UP) {
if (vxlan_addr_multicast(&vninode->remote_ip) &&
@@ -970,15 +971,10 @@ static int vxlan_vnifilter_process(struct sk_buff *skb, struct nlmsghdr *nlh,
if (!(vxlan->cfg.flags & VXLAN_F_VNIFILTER))
return -EOPNOTSUPP;
- nlmsg_for_each_attr(attr, nlh, sizeof(*tmsg), rem) {
- switch (nla_type(attr)) {
- case VXLAN_VNIFILTER_ENTRY:
- err = vxlan_process_vni_filter(vxlan, attr,
- nlh->nlmsg_type, extack);
- break;
- default:
- continue;
- }
+ nlmsg_for_each_attr_type(attr, VXLAN_VNIFILTER_ENTRY, nlh,
+ sizeof(*tmsg), rem) {
+ err = vxlan_process_vni_filter(vxlan, attr, nlh->nlmsg_type,
+ extack);
vnis++;
if (err)
break;
diff --git a/drivers/net/wan/framer/framer-core.c b/drivers/net/wan/framer/framer-core.c
index f547c22e26ac..58f5143359df 100644
--- a/drivers/net/wan/framer/framer-core.c
+++ b/drivers/net/wan/framer/framer-core.c
@@ -732,8 +732,8 @@ EXPORT_SYMBOL_GPL(devm_framer_create);
/**
* framer_provider_simple_of_xlate() - returns the framer instance from framer provider
- * @dev: the framer provider device
- * @args: of_phandle_args (not used here)
+ * @dev: the framer provider device (not used here)
+ * @args: of_phandle_args
*
* Intended to be used by framer provider for the common case where #framer-cells is
* 0. For other cases where #framer-cells is greater than '0', the framer provider
@@ -743,21 +743,14 @@ EXPORT_SYMBOL_GPL(devm_framer_create);
struct framer *framer_provider_simple_of_xlate(struct device *dev,
const struct of_phandle_args *args)
{
- struct class_dev_iter iter;
- struct framer *framer;
-
- class_dev_iter_init(&iter, &framer_class, NULL, NULL);
- while ((dev = class_dev_iter_next(&iter))) {
- framer = dev_to_framer(dev);
- if (args->np != framer->dev.of_node)
- continue;
+ struct device *target_dev;
- class_dev_iter_exit(&iter);
- return framer;
- }
+ target_dev = class_find_device_by_of_node(&framer_class, args->np);
+ if (!target_dev)
+ return ERR_PTR(-ENODEV);
- class_dev_iter_exit(&iter);
- return ERR_PTR(-ENODEV);
+ put_device(target_dev);
+ return dev_to_framer(target_dev);
}
EXPORT_SYMBOL_GPL(framer_provider_simple_of_xlate);
diff --git a/drivers/net/wan/framer/pef2256/pef2256.c b/drivers/net/wan/framer/pef2256/pef2256.c
index 1e4c8e85d598..c058cc79137d 100644
--- a/drivers/net/wan/framer/pef2256/pef2256.c
+++ b/drivers/net/wan/framer/pef2256/pef2256.c
@@ -37,6 +37,7 @@ struct pef2256 {
struct device *dev;
struct regmap *regmap;
enum pef2256_version version;
+ const char *version_txt;
struct clk *mclk;
struct clk *sclkr;
struct clk *sclkx;
@@ -114,6 +115,16 @@ enum pef2256_version pef2256_get_version(struct pef2256 *pef2256)
}
EXPORT_SYMBOL_GPL(pef2256_get_version);
+static ssize_t version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pef2256 *pef2256 = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", pef2256->version_txt);
+}
+
+static DEVICE_ATTR_RO(version);
+
enum pef2256_gcm_config_item {
PEF2256_GCM_CONFIG_1544000 = 0,
PEF2256_GCM_CONFIG_2048000,
@@ -637,7 +648,8 @@ static int pef2256_add_audio_devices(struct pef2256 *pef2256)
audio_devs[i].id = i;
}
- ret = mfd_add_devices(pef2256->dev, 0, audio_devs, count, NULL, 0, NULL);
+ ret = devm_mfd_add_devices(pef2256->dev, 0, audio_devs, count,
+ NULL, 0, NULL);
kfree(audio_devs);
return ret;
}
@@ -697,7 +709,6 @@ static int pef2256_probe(struct platform_device *pdev)
unsigned long sclkr_rate, sclkx_rate;
struct framer_provider *framer_provider;
struct pef2256 *pef2256;
- const char *version_txt;
void __iomem *iomem;
int ret;
int irq;
@@ -719,8 +730,8 @@ static int pef2256_probe(struct platform_device *pdev)
pef2256->regmap = devm_regmap_init_mmio(&pdev->dev, iomem,
&pef2256_regmap_config);
if (IS_ERR(pef2256->regmap)) {
- dev_err(&pdev->dev, "Failed to initialise Regmap (%ld)\n",
- PTR_ERR(pef2256->regmap));
+ dev_err(&pdev->dev, "Failed to initialise Regmap (%pe)\n",
+ pef2256->regmap);
return PTR_ERR(pef2256->regmap);
}
@@ -763,18 +774,18 @@ static int pef2256_probe(struct platform_device *pdev)
pef2256->version = pef2256_get_version(pef2256);
switch (pef2256->version) {
case PEF2256_VERSION_1_2:
- version_txt = "1.2";
+ pef2256->version_txt = "1.2";
break;
case PEF2256_VERSION_2_1:
- version_txt = "2.1";
+ pef2256->version_txt = "2.1";
break;
case PEF2256_VERSION_2_2:
- version_txt = "2.2";
+ pef2256->version_txt = "2.2";
break;
default:
return -ENODEV;
}
- dev_info(pef2256->dev, "Version %s detected\n", version_txt);
+ dev_info(pef2256->dev, "Version %s detected\n", pef2256->version_txt);
ret = pef2556_of_parse(pef2256, np);
if (ret)
@@ -812,8 +823,8 @@ static int pef2256_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pef2256);
- ret = mfd_add_devices(pef2256->dev, 0, pef2256_devs,
- ARRAY_SIZE(pef2256_devs), NULL, 0, NULL);
+ ret = devm_mfd_add_devices(pef2256->dev, 0, pef2256_devs,
+ ARRAY_SIZE(pef2256_devs), NULL, 0, NULL);
if (ret) {
dev_err(pef2256->dev, "add devices failed (%d)\n", ret);
return ret;
@@ -835,6 +846,8 @@ static int pef2256_probe(struct platform_device *pdev)
return ret;
}
+ device_create_file(pef2256->dev, &dev_attr_version);
+
return 0;
}
@@ -849,6 +862,8 @@ static void pef2256_remove(struct platform_device *pdev)
pef2256_write8(pef2256, PEF2256_IMR3, 0xff);
pef2256_write8(pef2256, PEF2256_IMR4, 0xff);
pef2256_write8(pef2256, PEF2256_IMR5, 0xff);
+
+ device_remove_file(pef2256->dev, &dev_attr_version);
}
static const struct of_device_id pef2256_id_table[] = {
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index cdebe65a7e2d..bfc978b15bc2 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -244,7 +244,7 @@ rx_error:
static void cisco_timer(struct timer_list *t)
{
- struct cisco_state *st = from_timer(st, t, timer);
+ struct cisco_state *st = timer_container_of(st, t, timer);
struct net_device *dev = st->dev;
spin_lock(&st->lock);
@@ -285,7 +285,7 @@ static void cisco_stop(struct net_device *dev)
struct cisco_state *st = state(hdlc);
unsigned long flags;
- del_timer_sync(&st->timer);
+ timer_delete_sync(&st->timer);
spin_lock_irqsave(&st->lock, flags);
netif_dormant_on(dev);
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 81e72bc1891f..08a0ba5ca471 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -581,7 +581,7 @@ static void fr_set_link_state(int reliable, struct net_device *dev)
static void fr_timer(struct timer_list *t)
{
- struct frad_state *st = from_timer(st, t, timer);
+ struct frad_state *st = timer_container_of(st, t, timer);
struct net_device *dev = st->dev;
hdlc_device *hdlc = dev_to_hdlc(dev);
int i, cnt = 0, reliable;
@@ -1025,7 +1025,7 @@ static void fr_stop(struct net_device *dev)
printk(KERN_DEBUG "fr_stop\n");
#endif
if (state(hdlc)->settings.lmi != LMI_NONE)
- del_timer_sync(&state(hdlc)->timer);
+ timer_delete_sync(&state(hdlc)->timer);
fr_set_link_state(0, dev);
}
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 37a3c989cba1..159295c4bd6d 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -126,14 +126,12 @@ static inline struct proto *get_proto(struct net_device *dev, u16 pid)
static inline const char *proto_name(u16 pid)
{
switch (pid) {
- case PID_LCP:
- return "LCP";
case PID_IPCP:
return "IPCP";
case PID_IPV6CP:
return "IPV6CP";
default:
- return NULL;
+ return "LCP";
}
}
@@ -358,7 +356,7 @@ static void ppp_cp_event(struct net_device *dev, u16 pid, u16 event, u8 code,
}
}
if (old_state != CLOSED && proto->state == CLOSED)
- del_timer(&proto->timer);
+ timer_delete(&proto->timer);
#if DEBUG_STATE
printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) ... %s\n", dev->name,
@@ -561,7 +559,7 @@ out:
static void ppp_timer(struct timer_list *t)
{
- struct proto *proto = from_timer(proto, t, timer);
+ struct proto *proto = timer_container_of(proto, t, timer);
struct ppp *ppp = get_ppp(proto->dev);
unsigned long flags;
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 56326f38fe8a..f357a7ac70ac 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -39,6 +39,7 @@
#include <linux/lapb.h>
#include <linux/init.h>
+#include <net/netdev_lock.h>
#include <net/x25device.h>
static const u8 bcast_addr[6] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
@@ -80,7 +81,7 @@ static struct lapbethdev *lapbeth_get_x25_dev(struct net_device *dev)
static __inline__ int dev_is_ethdev(struct net_device *dev)
{
- return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5);
+ return dev->type == ARPHRD_ETHER && !netdev_need_ops_lock(dev);
}
/* ------------------------------------------------------------------------ */
@@ -366,6 +367,7 @@ static const struct net_device_ops lapbeth_netdev_ops = {
static void lapbeth_setup(struct net_device *dev)
{
+ netdev_lockdep_set_classes(dev);
dev->netdev_ops = &lapbeth_netdev_ops;
dev->needs_free_netdev = true;
dev->type = ARPHRD_X25;
diff --git a/drivers/net/wireguard/Makefile b/drivers/net/wireguard/Makefile
index dbe1f8514efc..00cbcc9ab69d 100644
--- a/drivers/net/wireguard/Makefile
+++ b/drivers/net/wireguard/Makefile
@@ -13,5 +13,5 @@ wireguard-y += peerlookup.o
wireguard-y += allowedips.o
wireguard-y += ratelimiter.o
wireguard-y += cookie.o
-wireguard-y += netlink.o
+wireguard-y += netlink.o generated/netlink.o
obj-$(CONFIG_WIREGUARD) := wireguard.o
diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c
index 4b8528206cc8..09f7fcd7da78 100644
--- a/drivers/net/wireguard/allowedips.c
+++ b/drivers/net/wireguard/allowedips.c
@@ -249,6 +249,52 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
return 0;
}
+static void remove_node(struct allowedips_node *node, struct mutex *lock)
+{
+ struct allowedips_node *child, **parent_bit, *parent;
+ bool free_parent;
+
+ list_del_init(&node->peer_list);
+ RCU_INIT_POINTER(node->peer, NULL);
+ if (node->bit[0] && node->bit[1])
+ return;
+ child = rcu_dereference_protected(node->bit[!rcu_access_pointer(node->bit[0])],
+ lockdep_is_held(lock));
+ if (child)
+ child->parent_bit_packed = node->parent_bit_packed;
+ parent_bit = (struct allowedips_node **)(node->parent_bit_packed & ~3UL);
+ *parent_bit = child;
+ parent = (void *)parent_bit -
+ offsetof(struct allowedips_node, bit[node->parent_bit_packed & 1]);
+ free_parent = !rcu_access_pointer(node->bit[0]) && !rcu_access_pointer(node->bit[1]) &&
+ (node->parent_bit_packed & 3) <= 1 && !rcu_access_pointer(parent->peer);
+ if (free_parent)
+ child = rcu_dereference_protected(parent->bit[!(node->parent_bit_packed & 1)],
+ lockdep_is_held(lock));
+ call_rcu(&node->rcu, node_free_rcu);
+ if (!free_parent)
+ return;
+ if (child)
+ child->parent_bit_packed = parent->parent_bit_packed;
+ *(struct allowedips_node **)(parent->parent_bit_packed & ~3UL) = child;
+ call_rcu(&parent->rcu, node_free_rcu);
+}
+
+static int remove(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock)
+{
+ struct allowedips_node *node;
+
+ if (unlikely(cidr > bits))
+ return -EINVAL;
+ if (!rcu_access_pointer(*trie) || !node_placement(*trie, key, cidr, bits, &node, lock) ||
+ peer != rcu_access_pointer(node->peer))
+ return 0;
+
+ remove_node(node, lock);
+ return 0;
+}
+
void wg_allowedips_init(struct allowedips *table)
{
table->root4 = table->root6 = NULL;
@@ -300,44 +346,38 @@ int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip,
return add(&table->root6, 128, key, cidr, peer, lock);
}
+int wg_allowedips_remove_v4(struct allowedips *table, const struct in_addr *ip,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock)
+{
+ /* Aligned so it can be passed to fls */
+ u8 key[4] __aligned(__alignof(u32));
+
+ ++table->seq;
+ swap_endian(key, (const u8 *)ip, 32);
+ return remove(&table->root4, 32, key, cidr, peer, lock);
+}
+
+int wg_allowedips_remove_v6(struct allowedips *table, const struct in6_addr *ip,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock)
+{
+ /* Aligned so it can be passed to fls64 */
+ u8 key[16] __aligned(__alignof(u64));
+
+ ++table->seq;
+ swap_endian(key, (const u8 *)ip, 128);
+ return remove(&table->root6, 128, key, cidr, peer, lock);
+}
+
void wg_allowedips_remove_by_peer(struct allowedips *table,
struct wg_peer *peer, struct mutex *lock)
{
- struct allowedips_node *node, *child, **parent_bit, *parent, *tmp;
- bool free_parent;
+ struct allowedips_node *node, *tmp;
if (list_empty(&peer->allowedips_list))
return;
++table->seq;
- list_for_each_entry_safe(node, tmp, &peer->allowedips_list, peer_list) {
- list_del_init(&node->peer_list);
- RCU_INIT_POINTER(node->peer, NULL);
- if (node->bit[0] && node->bit[1])
- continue;
- child = rcu_dereference_protected(node->bit[!rcu_access_pointer(node->bit[0])],
- lockdep_is_held(lock));
- if (child)
- child->parent_bit_packed = node->parent_bit_packed;
- parent_bit = (struct allowedips_node **)(node->parent_bit_packed & ~3UL);
- *parent_bit = child;
- parent = (void *)parent_bit -
- offsetof(struct allowedips_node, bit[node->parent_bit_packed & 1]);
- free_parent = !rcu_access_pointer(node->bit[0]) &&
- !rcu_access_pointer(node->bit[1]) &&
- (node->parent_bit_packed & 3) <= 1 &&
- !rcu_access_pointer(parent->peer);
- if (free_parent)
- child = rcu_dereference_protected(
- parent->bit[!(node->parent_bit_packed & 1)],
- lockdep_is_held(lock));
- call_rcu(&node->rcu, node_free_rcu);
- if (!free_parent)
- continue;
- if (child)
- child->parent_bit_packed = parent->parent_bit_packed;
- *(struct allowedips_node **)(parent->parent_bit_packed & ~3UL) = child;
- call_rcu(&parent->rcu, node_free_rcu);
- }
+ list_for_each_entry_safe(node, tmp, &peer->allowedips_list, peer_list)
+ remove_node(node, lock);
}
int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr)
diff --git a/drivers/net/wireguard/allowedips.h b/drivers/net/wireguard/allowedips.h
index 2346c797eb4d..931958cb6e10 100644
--- a/drivers/net/wireguard/allowedips.h
+++ b/drivers/net/wireguard/allowedips.h
@@ -38,6 +38,10 @@ int wg_allowedips_insert_v4(struct allowedips *table, const struct in_addr *ip,
u8 cidr, struct wg_peer *peer, struct mutex *lock);
int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip,
u8 cidr, struct wg_peer *peer, struct mutex *lock);
+int wg_allowedips_remove_v4(struct allowedips *table, const struct in_addr *ip,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock);
+int wg_allowedips_remove_v6(struct allowedips *table, const struct in6_addr *ip,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock);
void wg_allowedips_remove_by_peer(struct allowedips *table,
struct wg_peer *peer, struct mutex *lock);
/* The ip input pointer should be __aligned(__alignof(u64))) */
diff --git a/drivers/net/wireguard/cookie.c b/drivers/net/wireguard/cookie.c
index f89581b5e8cb..08731b3fa32b 100644
--- a/drivers/net/wireguard/cookie.c
+++ b/drivers/net/wireguard/cookie.c
@@ -26,14 +26,14 @@ void wg_cookie_checker_init(struct cookie_checker *checker,
}
enum { COOKIE_KEY_LABEL_LEN = 8 };
-static const u8 mac1_key_label[COOKIE_KEY_LABEL_LEN] = "mac1----";
-static const u8 cookie_key_label[COOKIE_KEY_LABEL_LEN] = "cookie--";
+static const u8 mac1_key_label[COOKIE_KEY_LABEL_LEN] __nonstring = "mac1----";
+static const u8 cookie_key_label[COOKIE_KEY_LABEL_LEN] __nonstring = "cookie--";
static void precompute_key(u8 key[NOISE_SYMMETRIC_KEY_LEN],
const u8 pubkey[NOISE_PUBLIC_KEY_LEN],
const u8 label[COOKIE_KEY_LABEL_LEN])
{
- struct blake2s_state blake;
+ struct blake2s_ctx blake;
blake2s_init(&blake, NOISE_SYMMETRIC_KEY_LEN);
blake2s_update(&blake, label, COOKIE_KEY_LABEL_LEN);
@@ -77,7 +77,7 @@ static void compute_mac1(u8 mac1[COOKIE_LEN], const void *message, size_t len,
{
len = len - sizeof(struct message_macs) +
offsetof(struct message_macs, mac1);
- blake2s(mac1, message, key, COOKIE_LEN, len, NOISE_SYMMETRIC_KEY_LEN);
+ blake2s(key, NOISE_SYMMETRIC_KEY_LEN, message, len, mac1, COOKIE_LEN);
}
static void compute_mac2(u8 mac2[COOKIE_LEN], const void *message, size_t len,
@@ -85,13 +85,13 @@ static void compute_mac2(u8 mac2[COOKIE_LEN], const void *message, size_t len,
{
len = len - sizeof(struct message_macs) +
offsetof(struct message_macs, mac2);
- blake2s(mac2, message, cookie, COOKIE_LEN, len, COOKIE_LEN);
+ blake2s(cookie, COOKIE_LEN, message, len, mac2, COOKIE_LEN);
}
static void make_cookie(u8 cookie[COOKIE_LEN], struct sk_buff *skb,
struct cookie_checker *checker)
{
- struct blake2s_state state;
+ struct blake2s_ctx blake;
if (wg_birthdate_has_expired(checker->secret_birthdate,
COOKIE_SECRET_MAX_AGE)) {
@@ -103,15 +103,15 @@ static void make_cookie(u8 cookie[COOKIE_LEN], struct sk_buff *skb,
down_read(&checker->secret_lock);
- blake2s_init_key(&state, COOKIE_LEN, checker->secret, NOISE_HASH_LEN);
+ blake2s_init_key(&blake, COOKIE_LEN, checker->secret, NOISE_HASH_LEN);
if (skb->protocol == htons(ETH_P_IP))
- blake2s_update(&state, (u8 *)&ip_hdr(skb)->saddr,
+ blake2s_update(&blake, (u8 *)&ip_hdr(skb)->saddr,
sizeof(struct in_addr));
else if (skb->protocol == htons(ETH_P_IPV6))
- blake2s_update(&state, (u8 *)&ipv6_hdr(skb)->saddr,
+ blake2s_update(&blake, (u8 *)&ipv6_hdr(skb)->saddr,
sizeof(struct in6_addr));
- blake2s_update(&state, (u8 *)&udp_hdr(skb)->source, sizeof(__be16));
- blake2s_final(&state, cookie);
+ blake2s_update(&blake, (u8 *)&udp_hdr(skb)->source, sizeof(__be16));
+ blake2s_final(&blake, cookie);
up_read(&checker->secret_lock);
}
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index 45e9b908dbfb..46a71ec36af8 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -81,7 +81,7 @@ static int wg_pm_notification(struct notifier_block *nb, unsigned long action, v
list_for_each_entry(wg, &device_list, device_list) {
mutex_lock(&wg->device_update_lock);
list_for_each_entry(peer, &wg->peer_list, peer_list) {
- del_timer(&peer->timer_zero_key_material);
+ timer_delete(&peer->timer_zero_key_material);
wg_noise_handshake_clear(&peer->handshake);
wg_noise_keypairs_clear(&peer->keypairs);
}
@@ -302,18 +302,20 @@ static void wg_setup(struct net_device *dev)
/* We need to keep the dst around in case of icmp replies. */
netif_keep_dst(dev);
- memset(wg, 0, sizeof(*wg));
+ netif_set_tso_max_size(dev, GSO_MAX_SIZE);
+
wg->dev = dev;
}
-static int wg_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int wg_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
struct wg_device *wg = netdev_priv(dev);
int ret = -ENOMEM;
- rcu_assign_pointer(wg->creating_net, src_net);
+ rcu_assign_pointer(wg->creating_net, link_net);
init_rwsem(&wg->static_identity.lock);
mutex_init(&wg->socket_update_lock);
mutex_init(&wg->device_update_lock);
@@ -331,7 +333,8 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
goto err_free_peer_hashtable;
wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s",
- WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name);
+ WQ_CPU_INTENSIVE | WQ_FREEZABLE | WQ_PERCPU, 0,
+ dev->name);
if (!wg->handshake_receive_wq)
goto err_free_index_hashtable;
@@ -341,7 +344,8 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
goto err_destroy_handshake_receive;
wg->packet_crypt_wq = alloc_workqueue("wg-crypt-%s",
- WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0, dev->name);
+ WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_PERCPU, 0,
+ dev->name);
if (!wg->packet_crypt_wq)
goto err_destroy_handshake_send;
@@ -364,6 +368,7 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
if (ret < 0)
goto err_free_handshake_queue;
+ netif_threaded_enable(dev);
ret = register_netdevice(dev);
if (ret < 0)
goto err_uninit_ratelimiter;
diff --git a/drivers/net/wireguard/generated/netlink.c b/drivers/net/wireguard/generated/netlink.c
new file mode 100644
index 000000000000..3ef8c29908c2
--- /dev/null
+++ b/drivers/net/wireguard/generated/netlink.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/wireguard.yaml */
+/* YNL-GEN kernel source */
+/* YNL-ARG --function-prefix wg */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "netlink.h"
+
+#include <uapi/linux/wireguard.h>
+#include <linux/time_types.h>
+
+/* Common nested types */
+const struct nla_policy wireguard_wgallowedip_nl_policy[WGALLOWEDIP_A_FLAGS + 1] = {
+ [WGALLOWEDIP_A_FAMILY] = { .type = NLA_U16, },
+ [WGALLOWEDIP_A_IPADDR] = NLA_POLICY_MIN_LEN(4),
+ [WGALLOWEDIP_A_CIDR_MASK] = { .type = NLA_U8, },
+ [WGALLOWEDIP_A_FLAGS] = NLA_POLICY_MASK(NLA_U32, 0x1),
+};
+
+const struct nla_policy wireguard_wgpeer_nl_policy[WGPEER_A_PROTOCOL_VERSION + 1] = {
+ [WGPEER_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(WG_KEY_LEN),
+ [WGPEER_A_PRESHARED_KEY] = NLA_POLICY_EXACT_LEN(WG_KEY_LEN),
+ [WGPEER_A_FLAGS] = NLA_POLICY_MASK(NLA_U32, 0x7),
+ [WGPEER_A_ENDPOINT] = NLA_POLICY_MIN_LEN(16),
+ [WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL] = { .type = NLA_U16, },
+ [WGPEER_A_LAST_HANDSHAKE_TIME] = NLA_POLICY_EXACT_LEN(16),
+ [WGPEER_A_RX_BYTES] = { .type = NLA_U64, },
+ [WGPEER_A_TX_BYTES] = { .type = NLA_U64, },
+ [WGPEER_A_ALLOWEDIPS] = NLA_POLICY_NESTED_ARRAY(wireguard_wgallowedip_nl_policy),
+ [WGPEER_A_PROTOCOL_VERSION] = { .type = NLA_U32, },
+};
+
+/* WG_CMD_GET_DEVICE - dump */
+static const struct nla_policy wireguard_get_device_nl_policy[WGDEVICE_A_IFNAME + 1] = {
+ [WGDEVICE_A_IFINDEX] = { .type = NLA_U32, },
+ [WGDEVICE_A_IFNAME] = { .type = NLA_NUL_STRING, .len = 15, },
+};
+
+/* WG_CMD_SET_DEVICE - do */
+static const struct nla_policy wireguard_set_device_nl_policy[WGDEVICE_A_PEERS + 1] = {
+ [WGDEVICE_A_IFINDEX] = { .type = NLA_U32, },
+ [WGDEVICE_A_IFNAME] = { .type = NLA_NUL_STRING, .len = 15, },
+ [WGDEVICE_A_PRIVATE_KEY] = NLA_POLICY_EXACT_LEN(WG_KEY_LEN),
+ [WGDEVICE_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(WG_KEY_LEN),
+ [WGDEVICE_A_FLAGS] = NLA_POLICY_MASK(NLA_U32, 0x1),
+ [WGDEVICE_A_LISTEN_PORT] = { .type = NLA_U16, },
+ [WGDEVICE_A_FWMARK] = { .type = NLA_U32, },
+ [WGDEVICE_A_PEERS] = NLA_POLICY_NESTED_ARRAY(wireguard_wgpeer_nl_policy),
+};
+
+/* Ops table for wireguard */
+const struct genl_split_ops wireguard_nl_ops[2] = {
+ {
+ .cmd = WG_CMD_GET_DEVICE,
+ .start = wg_get_device_start,
+ .dumpit = wg_get_device_dumpit,
+ .done = wg_get_device_done,
+ .policy = wireguard_get_device_nl_policy,
+ .maxattr = WGDEVICE_A_IFNAME,
+ .flags = GENL_UNS_ADMIN_PERM | GENL_CMD_CAP_DUMP,
+ },
+ {
+ .cmd = WG_CMD_SET_DEVICE,
+ .doit = wg_set_device_doit,
+ .policy = wireguard_set_device_nl_policy,
+ .maxattr = WGDEVICE_A_PEERS,
+ .flags = GENL_UNS_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+};
diff --git a/drivers/net/wireguard/generated/netlink.h b/drivers/net/wireguard/generated/netlink.h
new file mode 100644
index 000000000000..5dc977ee9e7c
--- /dev/null
+++ b/drivers/net/wireguard/generated/netlink.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/wireguard.yaml */
+/* YNL-GEN kernel header */
+/* YNL-ARG --function-prefix wg */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
+
+#ifndef _LINUX_WIREGUARD_GEN_H
+#define _LINUX_WIREGUARD_GEN_H
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include <uapi/linux/wireguard.h>
+#include <linux/time_types.h>
+
+/* Common nested types */
+extern const struct nla_policy wireguard_wgallowedip_nl_policy[WGALLOWEDIP_A_FLAGS + 1];
+extern const struct nla_policy wireguard_wgpeer_nl_policy[WGPEER_A_PROTOCOL_VERSION + 1];
+
+/* Ops table for wireguard */
+extern const struct genl_split_ops wireguard_nl_ops[2];
+
+int wg_get_device_start(struct netlink_callback *cb);
+int wg_get_device_done(struct netlink_callback *cb);
+
+int wg_get_device_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
+int wg_set_device_doit(struct sk_buff *skb, struct genl_info *info);
+
+#endif /* _LINUX_WIREGUARD_GEN_H */
diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
index f7055180ba4a..1da7e98d0d50 100644
--- a/drivers/net/wireguard/netlink.c
+++ b/drivers/net/wireguard/netlink.c
@@ -9,6 +9,7 @@
#include "socket.h"
#include "queueing.h"
#include "messages.h"
+#include "generated/netlink.h"
#include <uapi/linux/wireguard.h>
@@ -19,36 +20,6 @@
static struct genl_family genl_family;
-static const struct nla_policy device_policy[WGDEVICE_A_MAX + 1] = {
- [WGDEVICE_A_IFINDEX] = { .type = NLA_U32 },
- [WGDEVICE_A_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
- [WGDEVICE_A_PRIVATE_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN),
- [WGDEVICE_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN),
- [WGDEVICE_A_FLAGS] = { .type = NLA_U32 },
- [WGDEVICE_A_LISTEN_PORT] = { .type = NLA_U16 },
- [WGDEVICE_A_FWMARK] = { .type = NLA_U32 },
- [WGDEVICE_A_PEERS] = { .type = NLA_NESTED }
-};
-
-static const struct nla_policy peer_policy[WGPEER_A_MAX + 1] = {
- [WGPEER_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN),
- [WGPEER_A_PRESHARED_KEY] = NLA_POLICY_EXACT_LEN(NOISE_SYMMETRIC_KEY_LEN),
- [WGPEER_A_FLAGS] = { .type = NLA_U32 },
- [WGPEER_A_ENDPOINT] = NLA_POLICY_MIN_LEN(sizeof(struct sockaddr)),
- [WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL] = { .type = NLA_U16 },
- [WGPEER_A_LAST_HANDSHAKE_TIME] = NLA_POLICY_EXACT_LEN(sizeof(struct __kernel_timespec)),
- [WGPEER_A_RX_BYTES] = { .type = NLA_U64 },
- [WGPEER_A_TX_BYTES] = { .type = NLA_U64 },
- [WGPEER_A_ALLOWEDIPS] = { .type = NLA_NESTED },
- [WGPEER_A_PROTOCOL_VERSION] = { .type = NLA_U32 }
-};
-
-static const struct nla_policy allowedip_policy[WGALLOWEDIP_A_MAX + 1] = {
- [WGALLOWEDIP_A_FAMILY] = { .type = NLA_U16 },
- [WGALLOWEDIP_A_IPADDR] = NLA_POLICY_MIN_LEN(sizeof(struct in_addr)),
- [WGALLOWEDIP_A_CIDR_MASK] = { .type = NLA_U8 }
-};
-
static struct wg_device *lookup_interface(struct nlattr **attrs,
struct sk_buff *skb)
{
@@ -196,7 +167,7 @@ err:
return -EMSGSIZE;
}
-static int wg_get_device_start(struct netlink_callback *cb)
+int wg_get_device_start(struct netlink_callback *cb)
{
struct wg_device *wg;
@@ -207,7 +178,7 @@ static int wg_get_device_start(struct netlink_callback *cb)
return 0;
}
-static int wg_get_device_dump(struct sk_buff *skb, struct netlink_callback *cb)
+int wg_get_device_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
{
struct wg_peer *peer, *next_peer_cursor;
struct dump_ctx *ctx = DUMP_CTX(cb);
@@ -301,7 +272,7 @@ out:
*/
}
-static int wg_get_device_done(struct netlink_callback *cb)
+int wg_get_device_done(struct netlink_callback *cb)
{
struct dump_ctx *ctx = DUMP_CTX(cb);
@@ -329,6 +300,7 @@ static int set_port(struct wg_device *wg, u16 port)
static int set_allowedip(struct wg_peer *peer, struct nlattr **attrs)
{
int ret = -EINVAL;
+ u32 flags = 0;
u16 family;
u8 cidr;
@@ -337,19 +309,30 @@ static int set_allowedip(struct wg_peer *peer, struct nlattr **attrs)
return ret;
family = nla_get_u16(attrs[WGALLOWEDIP_A_FAMILY]);
cidr = nla_get_u8(attrs[WGALLOWEDIP_A_CIDR_MASK]);
+ if (attrs[WGALLOWEDIP_A_FLAGS])
+ flags = nla_get_u32(attrs[WGALLOWEDIP_A_FLAGS]);
if (family == AF_INET && cidr <= 32 &&
- nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in_addr))
- ret = wg_allowedips_insert_v4(
- &peer->device->peer_allowedips,
- nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr, peer,
- &peer->device->device_update_lock);
- else if (family == AF_INET6 && cidr <= 128 &&
- nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in6_addr))
- ret = wg_allowedips_insert_v6(
- &peer->device->peer_allowedips,
- nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr, peer,
- &peer->device->device_update_lock);
+ nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in_addr)) {
+ if (flags & WGALLOWEDIP_F_REMOVE_ME)
+ ret = wg_allowedips_remove_v4(&peer->device->peer_allowedips,
+ nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr,
+ peer, &peer->device->device_update_lock);
+ else
+ ret = wg_allowedips_insert_v4(&peer->device->peer_allowedips,
+ nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr,
+ peer, &peer->device->device_update_lock);
+ } else if (family == AF_INET6 && cidr <= 128 &&
+ nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in6_addr)) {
+ if (flags & WGALLOWEDIP_F_REMOVE_ME)
+ ret = wg_allowedips_remove_v6(&peer->device->peer_allowedips,
+ nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr,
+ peer, &peer->device->device_update_lock);
+ else
+ ret = wg_allowedips_insert_v6(&peer->device->peer_allowedips,
+ nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr,
+ peer, &peer->device->device_update_lock);
+ }
return ret;
}
@@ -373,9 +356,6 @@ static int set_peer(struct wg_device *wg, struct nlattr **attrs)
if (attrs[WGPEER_A_FLAGS])
flags = nla_get_u32(attrs[WGPEER_A_FLAGS]);
- ret = -EOPNOTSUPP;
- if (flags & ~__WGPEER_F_ALL)
- goto out;
ret = -EPFNOSUPPORT;
if (attrs[WGPEER_A_PROTOCOL_VERSION]) {
@@ -457,7 +437,7 @@ static int set_peer(struct wg_device *wg, struct nlattr **attrs)
nla_for_each_nested(attr, attrs[WGPEER_A_ALLOWEDIPS], rem) {
ret = nla_parse_nested(allowedip, WGALLOWEDIP_A_MAX,
- attr, allowedip_policy, NULL);
+ attr, NULL, NULL);
if (ret < 0)
goto out;
ret = set_allowedip(peer, allowedip);
@@ -490,7 +470,7 @@ out:
return ret;
}
-static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
+int wg_set_device_doit(struct sk_buff *skb, struct genl_info *info)
{
struct wg_device *wg = lookup_interface(info->attrs, skb);
u32 flags = 0;
@@ -506,9 +486,6 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[WGDEVICE_A_FLAGS])
flags = nla_get_u32(info->attrs[WGDEVICE_A_FLAGS]);
- ret = -EOPNOTSUPP;
- if (flags & ~__WGDEVICE_F_ALL)
- goto out;
if (info->attrs[WGDEVICE_A_LISTEN_PORT] || info->attrs[WGDEVICE_A_FWMARK]) {
struct net *net;
@@ -586,7 +563,7 @@ skip_set_private_key:
nla_for_each_nested(attr, info->attrs[WGDEVICE_A_PEERS], rem) {
ret = nla_parse_nested(peer, WGPEER_A_MAX, attr,
- peer_policy, NULL);
+ NULL, NULL);
if (ret < 0)
goto out;
ret = set_peer(wg, peer);
@@ -607,34 +584,20 @@ out_nodev:
return ret;
}
-static const struct genl_ops genl_ops[] = {
- {
- .cmd = WG_CMD_GET_DEVICE,
- .start = wg_get_device_start,
- .dumpit = wg_get_device_dump,
- .done = wg_get_device_done,
- .flags = GENL_UNS_ADMIN_PERM
- }, {
- .cmd = WG_CMD_SET_DEVICE,
- .doit = wg_set_device,
- .flags = GENL_UNS_ADMIN_PERM
- }
-};
-
static struct genl_family genl_family __ro_after_init = {
- .ops = genl_ops,
- .n_ops = ARRAY_SIZE(genl_ops),
- .resv_start_op = WG_CMD_SET_DEVICE + 1,
+ .split_ops = wireguard_nl_ops,
+ .n_split_ops = ARRAY_SIZE(wireguard_nl_ops),
.name = WG_GENL_NAME,
.version = WG_GENL_VERSION,
- .maxattr = WGDEVICE_A_MAX,
.module = THIS_MODULE,
- .policy = device_policy,
.netnsok = true
};
int __init wg_genetlink_init(void)
{
+ BUILD_BUG_ON(WG_KEY_LEN != NOISE_PUBLIC_KEY_LEN);
+ BUILD_BUG_ON(WG_KEY_LEN != NOISE_SYMMETRIC_KEY_LEN);
+
return genl_register_family(&genl_family);
}
diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c
index 202a33af5a72..1fe8468f0bef 100644
--- a/drivers/net/wireguard/noise.c
+++ b/drivers/net/wireguard/noise.c
@@ -25,18 +25,18 @@
* <- e, ee, se, psk, {}
*/
-static const u8 handshake_name[37] = "Noise_IKpsk2_25519_ChaChaPoly_BLAKE2s";
-static const u8 identifier_name[34] = "WireGuard v1 zx2c4 Jason@zx2c4.com";
+static const u8 handshake_name[37] __nonstring = "Noise_IKpsk2_25519_ChaChaPoly_BLAKE2s";
+static const u8 identifier_name[34] __nonstring = "WireGuard v1 zx2c4 Jason@zx2c4.com";
static u8 handshake_init_hash[NOISE_HASH_LEN] __ro_after_init;
static u8 handshake_init_chaining_key[NOISE_HASH_LEN] __ro_after_init;
static atomic64_t keypair_counter = ATOMIC64_INIT(0);
void __init wg_noise_init(void)
{
- struct blake2s_state blake;
+ struct blake2s_ctx blake;
- blake2s(handshake_init_chaining_key, handshake_name, NULL,
- NOISE_HASH_LEN, sizeof(handshake_name), 0);
+ blake2s(NULL, 0, handshake_name, sizeof(handshake_name),
+ handshake_init_chaining_key, NOISE_HASH_LEN);
blake2s_init(&blake, NOISE_HASH_LEN);
blake2s_update(&blake, handshake_init_chaining_key, NOISE_HASH_LEN);
blake2s_update(&blake, identifier_name, sizeof(identifier_name));
@@ -304,33 +304,33 @@ void wg_noise_set_static_identity_private_key(
static void hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen, const size_t keylen)
{
- struct blake2s_state state;
+ struct blake2s_ctx blake;
u8 x_key[BLAKE2S_BLOCK_SIZE] __aligned(__alignof__(u32)) = { 0 };
u8 i_hash[BLAKE2S_HASH_SIZE] __aligned(__alignof__(u32));
int i;
if (keylen > BLAKE2S_BLOCK_SIZE) {
- blake2s_init(&state, BLAKE2S_HASH_SIZE);
- blake2s_update(&state, key, keylen);
- blake2s_final(&state, x_key);
+ blake2s_init(&blake, BLAKE2S_HASH_SIZE);
+ blake2s_update(&blake, key, keylen);
+ blake2s_final(&blake, x_key);
} else
memcpy(x_key, key, keylen);
for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
x_key[i] ^= 0x36;
- blake2s_init(&state, BLAKE2S_HASH_SIZE);
- blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
- blake2s_update(&state, in, inlen);
- blake2s_final(&state, i_hash);
+ blake2s_init(&blake, BLAKE2S_HASH_SIZE);
+ blake2s_update(&blake, x_key, BLAKE2S_BLOCK_SIZE);
+ blake2s_update(&blake, in, inlen);
+ blake2s_final(&blake, i_hash);
for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
x_key[i] ^= 0x5c ^ 0x36;
- blake2s_init(&state, BLAKE2S_HASH_SIZE);
- blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
- blake2s_update(&state, i_hash, BLAKE2S_HASH_SIZE);
- blake2s_final(&state, i_hash);
+ blake2s_init(&blake, BLAKE2S_HASH_SIZE);
+ blake2s_update(&blake, x_key, BLAKE2S_BLOCK_SIZE);
+ blake2s_update(&blake, i_hash, BLAKE2S_HASH_SIZE);
+ blake2s_final(&blake, i_hash);
memcpy(out, i_hash, BLAKE2S_HASH_SIZE);
memzero_explicit(x_key, BLAKE2S_BLOCK_SIZE);
@@ -431,7 +431,7 @@ static bool __must_check mix_precomputed_dh(u8 chaining_key[NOISE_HASH_LEN],
static void mix_hash(u8 hash[NOISE_HASH_LEN], const u8 *src, size_t src_len)
{
- struct blake2s_state blake;
+ struct blake2s_ctx blake;
blake2s_init(&blake, NOISE_HASH_LEN);
blake2s_update(&blake, hash, NOISE_HASH_LEN);
diff --git a/drivers/net/wireguard/peer.h b/drivers/net/wireguard/peer.h
index 76e4d3128ad4..718fb42bdac7 100644
--- a/drivers/net/wireguard/peer.h
+++ b/drivers/net/wireguard/peer.h
@@ -20,7 +20,7 @@ struct wg_device;
struct endpoint {
union {
- struct sockaddr addr;
+ struct sockaddr_inet addr; /* Large enough for both address families */
struct sockaddr_in addr4;
struct sockaddr_in6 addr6;
};
diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
index 7eb76724b3ed..79b6d70de236 100644
--- a/drivers/net/wireguard/queueing.h
+++ b/drivers/net/wireguard/queueing.h
@@ -104,16 +104,11 @@ static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating)
static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
{
- unsigned int cpu = *stored_cpu, cpu_index, i;
+ unsigned int cpu = *stored_cpu;
+
+ while (unlikely(cpu >= nr_cpu_ids || !cpu_online(cpu)))
+ cpu = *stored_cpu = cpumask_nth(id % num_online_cpus(), cpu_online_mask);
- if (unlikely(cpu >= nr_cpu_ids ||
- !cpumask_test_cpu(cpu, cpu_online_mask))) {
- cpu_index = id % cpumask_weight(cpu_online_mask);
- cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < cpu_index; ++i)
- cpu = cpumask_next(cpu, cpu_online_mask);
- *stored_cpu = cpu;
- }
return cpu;
}
diff --git a/drivers/net/wireguard/selftest/allowedips.c b/drivers/net/wireguard/selftest/allowedips.c
index 3d1f64ff2e12..41837efa70cb 100644
--- a/drivers/net/wireguard/selftest/allowedips.c
+++ b/drivers/net/wireguard/selftest/allowedips.c
@@ -383,7 +383,6 @@ static __init bool randomized_test(void)
for (i = 0; i < NUM_QUERIES; ++i) {
get_random_bytes(ip, 4);
if (lookup(t.root4, 32, ip) != horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
- horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip);
pr_err("allowedips random v4 self-test: FAIL\n");
goto free;
}
@@ -461,6 +460,10 @@ static __init struct wg_peer *init_peer(void)
wg_allowedips_insert_v##version(&t, ip##version(ipa, ipb, ipc, ipd), \
cidr, mem, &mutex)
+#define remove(version, mem, ipa, ipb, ipc, ipd, cidr) \
+ wg_allowedips_remove_v##version(&t, ip##version(ipa, ipb, ipc, ipd), \
+ cidr, mem, &mutex)
+
#define maybe_fail() do { \
++i; \
if (!_s) { \
@@ -586,6 +589,50 @@ bool __init wg_allowedips_selftest(void)
test_negative(4, a, 192, 0, 0, 0);
test_negative(4, a, 255, 0, 0, 0);
+ insert(4, a, 1, 0, 0, 0, 32);
+ insert(4, a, 192, 0, 0, 0, 24);
+ insert(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef, 128);
+ insert(6, a, 0x24446800, 0xf0e40800, 0xeeaebeef, 0, 98);
+ test(4, a, 1, 0, 0, 0);
+ test(4, a, 192, 0, 0, 1);
+ test(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef);
+ test(6, a, 0x24446800, 0xf0e40800, 0xeeaebeef, 0x10101010);
+ /* Must be an exact match to remove */
+ remove(4, a, 192, 0, 0, 0, 32);
+ test(4, a, 192, 0, 0, 1);
+ /* NULL peer should have no effect and return 0 */
+ test_boolean(!remove(4, NULL, 192, 0, 0, 0, 24));
+ test(4, a, 192, 0, 0, 1);
+ /* different peer should have no effect and return 0 */
+ test_boolean(!remove(4, b, 192, 0, 0, 0, 24));
+ test(4, a, 192, 0, 0, 1);
+ /* invalid CIDR should have no effect and return -EINVAL */
+ test_boolean(remove(4, b, 192, 0, 0, 0, 33) == -EINVAL);
+ test(4, a, 192, 0, 0, 1);
+ remove(4, a, 192, 0, 0, 0, 24);
+ test_negative(4, a, 192, 0, 0, 1);
+ remove(4, a, 1, 0, 0, 0, 32);
+ test_negative(4, a, 1, 0, 0, 0);
+ /* Must be an exact match to remove */
+ remove(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef, 96);
+ test(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef);
+ /* NULL peer should have no effect and return 0 */
+ test_boolean(!remove(6, NULL, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef, 128));
+ test(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef);
+ /* different peer should have no effect and return 0 */
+ test_boolean(!remove(6, b, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef, 128));
+ test(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef);
+ /* invalid CIDR should have no effect and return -EINVAL */
+ test_boolean(remove(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef, 129) == -EINVAL);
+ test(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef);
+ remove(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef, 128);
+ test_negative(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef);
+ /* Must match the peer to remove */
+ remove(6, b, 0x24446800, 0xf0e40800, 0xeeaebeef, 0, 98);
+ test(6, a, 0x24446800, 0xf0e40800, 0xeeaebeef, 0x10101010);
+ remove(6, a, 0x24446800, 0xf0e40800, 0xeeaebeef, 0, 98);
+ test_negative(6, a, 0x24446800, 0xf0e40800, 0xeeaebeef, 0x10101010);
+
wg_allowedips_free(&t, &mutex);
wg_allowedips_init(&t);
insert(4, a, 192, 168, 0, 0, 16);
diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c
index 0414d7a6ce74..253488f8c00f 100644
--- a/drivers/net/wireguard/socket.c
+++ b/drivers/net/wireguard/socket.c
@@ -84,7 +84,7 @@ static int send4(struct wg_device *wg, struct sk_buff *skb,
skb->ignore_df = 1;
udp_tunnel_xmit_skb(rt, sock, skb, fl.saddr, fl.daddr, ds,
ip4_dst_hoplimit(&rt->dst), 0, fl.fl4_sport,
- fl.fl4_dport, false, false);
+ fl.fl4_dport, false, false, 0);
goto out;
err:
@@ -151,7 +151,7 @@ static int send6(struct wg_device *wg, struct sk_buff *skb,
skb->ignore_df = 1;
udp_tunnel6_xmit_skb(dst, sock, skb, skb->dev, &fl.saddr, &fl.daddr, ds,
ip6_dst_hoplimit(dst), 0, fl.fl6_sport,
- fl.fl6_dport, false);
+ fl.fl6_dport, false, 0);
goto out;
err:
diff --git a/drivers/net/wireguard/timers.c b/drivers/net/wireguard/timers.c
index 968bdb4df0b3..4016a3065602 100644
--- a/drivers/net/wireguard/timers.c
+++ b/drivers/net/wireguard/timers.c
@@ -40,15 +40,15 @@ static inline void mod_peer_timer(struct wg_peer *peer,
static void wg_expired_retransmit_handshake(struct timer_list *timer)
{
- struct wg_peer *peer = from_timer(peer, timer,
- timer_retransmit_handshake);
+ struct wg_peer *peer = timer_container_of(peer, timer,
+ timer_retransmit_handshake);
if (peer->timer_handshake_attempts > MAX_TIMER_HANDSHAKES) {
pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d attempts, giving up\n",
peer->device->dev->name, peer->internal_id,
&peer->endpoint.addr, (int)MAX_TIMER_HANDSHAKES + 2);
- del_timer(&peer->timer_send_keepalive);
+ timer_delete(&peer->timer_send_keepalive);
/* We drop all packets without a keypair and don't try again,
* if we try unsuccessfully for too long to make a handshake.
*/
@@ -78,7 +78,8 @@ static void wg_expired_retransmit_handshake(struct timer_list *timer)
static void wg_expired_send_keepalive(struct timer_list *timer)
{
- struct wg_peer *peer = from_timer(peer, timer, timer_send_keepalive);
+ struct wg_peer *peer = timer_container_of(peer, timer,
+ timer_send_keepalive);
wg_packet_send_keepalive(peer);
if (peer->timer_need_another_keepalive) {
@@ -90,7 +91,8 @@ static void wg_expired_send_keepalive(struct timer_list *timer)
static void wg_expired_new_handshake(struct timer_list *timer)
{
- struct wg_peer *peer = from_timer(peer, timer, timer_new_handshake);
+ struct wg_peer *peer = timer_container_of(peer, timer,
+ timer_new_handshake);
pr_debug("%s: Retrying handshake with peer %llu (%pISpfsc) because we stopped hearing back after %d seconds\n",
peer->device->dev->name, peer->internal_id,
@@ -104,7 +106,8 @@ static void wg_expired_new_handshake(struct timer_list *timer)
static void wg_expired_zero_key_material(struct timer_list *timer)
{
- struct wg_peer *peer = from_timer(peer, timer, timer_zero_key_material);
+ struct wg_peer *peer = timer_container_of(peer, timer,
+ timer_zero_key_material);
rcu_read_lock_bh();
if (!READ_ONCE(peer->is_dead)) {
@@ -134,8 +137,8 @@ static void wg_queued_expired_zero_key_material(struct work_struct *work)
static void wg_expired_send_persistent_keepalive(struct timer_list *timer)
{
- struct wg_peer *peer = from_timer(peer, timer,
- timer_persistent_keepalive);
+ struct wg_peer *peer = timer_container_of(peer, timer,
+ timer_persistent_keepalive);
if (likely(peer->persistent_keepalive_interval))
wg_packet_send_keepalive(peer);
@@ -167,7 +170,7 @@ void wg_timers_data_received(struct wg_peer *peer)
*/
void wg_timers_any_authenticated_packet_sent(struct wg_peer *peer)
{
- del_timer(&peer->timer_send_keepalive);
+ timer_delete(&peer->timer_send_keepalive);
}
/* Should be called after any type of authenticated packet is received, whether
@@ -175,7 +178,7 @@ void wg_timers_any_authenticated_packet_sent(struct wg_peer *peer)
*/
void wg_timers_any_authenticated_packet_received(struct wg_peer *peer)
{
- del_timer(&peer->timer_new_handshake);
+ timer_delete(&peer->timer_new_handshake);
}
/* Should be called after a handshake initiation message is sent. */
@@ -191,7 +194,7 @@ void wg_timers_handshake_initiated(struct wg_peer *peer)
*/
void wg_timers_handshake_complete(struct wg_peer *peer)
{
- del_timer(&peer->timer_retransmit_handshake);
+ timer_delete(&peer->timer_retransmit_handshake);
peer->timer_handshake_attempts = 0;
peer->sent_lastminute_handshake = false;
ktime_get_real_ts64(&peer->walltime_last_handshake);
diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c
index a2d87c3ad196..e94a6b180314 100644
--- a/drivers/net/wireless/admtek/adm8211.c
+++ b/drivers/net/wireless/admtek/adm8211.c
@@ -1293,7 +1293,7 @@ static void adm8211_set_bssid(struct ieee80211_hw *dev, const u8 *bssid)
ADM8211_CSR_WRITE(ABDA1, reg);
}
-static int adm8211_config(struct ieee80211_hw *dev, u32 changed)
+static int adm8211_config(struct ieee80211_hw *dev, int radio_idx, u32 changed)
{
struct adm8211_priv *priv = dev->priv;
struct ieee80211_conf *conf = &dev->conf;
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 156f3650c006..1230e6278f23 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -733,7 +733,7 @@ static void ar5523_data_tx_pkt_put(struct ar5523 *ar)
{
atomic_dec(&ar->tx_nr_total);
if (!atomic_dec_return(&ar->tx_nr_pending)) {
- del_timer(&ar->tx_wd_timer);
+ timer_delete(&ar->tx_wd_timer);
wake_up(&ar->tx_flush_waitq);
}
@@ -902,7 +902,7 @@ static void ar5523_tx_work(struct work_struct *work)
static void ar5523_tx_wd_timer(struct timer_list *t)
{
- struct ar5523 *ar = from_timer(ar, t, tx_wd_timer);
+ struct ar5523 *ar = timer_container_of(ar, t, tx_wd_timer);
ar5523_dbg(ar, "TX watchdog timer triggered\n");
ieee80211_queue_work(ar->hw, &ar->tx_wd_work);
@@ -1076,14 +1076,15 @@ static void ar5523_stop(struct ieee80211_hw *hw, bool suspend)
ar5523_cmd_write(ar, WDCMSG_TARGET_STOP, NULL, 0, 0);
- del_timer_sync(&ar->tx_wd_timer);
+ timer_delete_sync(&ar->tx_wd_timer);
cancel_work_sync(&ar->tx_wd_work);
cancel_work_sync(&ar->rx_refill_work);
ar5523_cancel_rx_bufs(ar);
mutex_unlock(&ar->mutex);
}
-static int ar5523_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+static int ar5523_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 value)
{
struct ar5523 *ar = hw->priv;
int ret;
@@ -1137,7 +1138,7 @@ static void ar5523_remove_interface(struct ieee80211_hw *hw,
ar->vif = NULL;
}
-static int ar5523_hwconfig(struct ieee80211_hw *hw, u32 changed)
+static int ar5523_hwconfig(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct ar5523 *ar = hw->priv;
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index f0441b3d7dcb..eb8b35b6224d 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -497,7 +497,7 @@ static int ath10k_ahb_resource_init(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "irq: %d\n", ar_ahb->irq);
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "mem: 0x%pK mem_len: %lu gcc mem: 0x%pK tcsr_mem: 0x%pK\n",
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "mem: 0x%p mem_len: %lu gcc mem: 0x%p tcsr_mem: 0x%p\n",
ar_ahb->mem, ar_ahb->mem_len,
ar_ahb->gcc_mem, ar_ahb->tcsr_mem);
return 0;
@@ -837,12 +837,12 @@ static void ath10k_ahb_remove(struct platform_device *pdev)
}
static struct platform_driver ath10k_ahb_driver = {
- .driver = {
- .name = "ath10k_ahb",
+ .driver = {
+ .name = "ath10k_ahb",
.of_match_table = ath10k_ahb_of_match,
},
- .probe = ath10k_ahb_probe,
- .remove_new = ath10k_ahb_remove,
+ .probe = ath10k_ahb_probe,
+ .remove = ath10k_ahb_remove,
};
int ath10k_ahb_init(void)
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 9a4f8e815412..52118867ecde 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -3,8 +3,10 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
#include "bmi.h"
#include "hif.h"
#include "debug.h"
@@ -349,7 +351,7 @@ static int ath10k_bmi_lz_data_large(struct ath10k *ar, const void *buffer, u32 l
int ret;
size_t buf_len;
- ath10k_dbg(ar, ATH10K_DBG_BMI, "large bmi lz data buffer 0x%pK length %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "large bmi lz data buffer 0x%p length %d\n",
buffer, length);
if (ar->bmi.done_sent) {
@@ -395,7 +397,7 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
u32 txlen;
int ret;
- ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%pK length %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
buffer, length);
if (ar->bmi.done_sent) {
@@ -461,7 +463,7 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
int ret;
ath10k_dbg(ar, ATH10K_DBG_BMI,
- "bmi fast download address 0x%x buffer 0x%pK length %d\n",
+ "bmi fast download address 0x%x buffer 0x%p length %d\n",
address, buffer, length);
ret = ath10k_bmi_lz_stream_start(ar, address);
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index afae4a8027f8..7bbda46cfd93 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -4,8 +4,10 @@
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
#include "hif.h"
#include "ce.h"
#include "debug.h"
@@ -80,7 +82,7 @@ static inline u32 shadow_sr_wr_ind_addr(struct ath10k *ar,
static inline unsigned int
ath10k_set_ring_byte(unsigned int offset,
- struct ath10k_hw_ce_regs_addr_map *addr_map)
+ const struct ath10k_hw_ce_regs_addr_map *addr_map)
{
return ((offset << addr_map->lsb) & addr_map->mask);
}
@@ -203,7 +205,7 @@ static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
+ const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ctrl_regs->addr);
@@ -217,7 +219,7 @@ static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
+ const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ctrl_regs->addr);
@@ -231,7 +233,7 @@ static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
+ const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ctrl_regs->addr);
@@ -313,7 +315,7 @@ static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
+ const struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
@@ -325,7 +327,7 @@ static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
+ const struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
@@ -337,7 +339,7 @@ static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
+ const struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
@@ -349,7 +351,7 @@ static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
+ const struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
@@ -360,7 +362,7 @@ static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
- struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
+ const struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->host_ie_addr);
@@ -372,7 +374,7 @@ static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
- struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
+ const struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->host_ie_addr);
@@ -384,7 +386,7 @@ static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
- struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
+ const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->host_ie_addr);
@@ -396,7 +398,7 @@ static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
- struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
+ const struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
u32 misc_ie_addr = ath10k_ce_read32(ar,
ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr);
@@ -410,7 +412,7 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int mask)
{
- struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
+ const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
ath10k_ce_write32(ar, ce_ctrl_addr + wm_regs->addr, mask);
}
@@ -1230,7 +1232,7 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
- struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
+ const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
u32 ctrl_addr = ce_state->ctrl_addr;
/*
@@ -1388,7 +1390,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "boot init ce src ring id %d entries %d base_addr %pK\n",
+ "boot init ce src ring id %d entries %d base_addr %p\n",
ce_id, nentries, src_ring->base_addr_owner_space);
return 0;
@@ -1426,7 +1428,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "boot ce dest ring id %d entries %d base_addr %pK\n",
+ "boot ce dest ring id %d entries %d base_addr %p\n",
ce_id, nentries, dest_ring->base_addr_owner_space);
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index b3294287bce1..7c2939cbde5f 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -3,9 +3,10 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/of.h>
@@ -1163,9 +1164,12 @@ int ath10k_core_check_dt(struct ath10k *ar)
if (!node)
return -ENOENT;
- of_property_read_string(node, "qcom,ath10k-calibration-variant",
+ of_property_read_string(node, "qcom,calibration-variant",
&variant);
if (!variant)
+ of_property_read_string(node, "qcom,ath10k-calibration-variant",
+ &variant);
+ if (!variant)
return -ENODATA;
if (strscpy(ar->id.bdf_ext, variant, sizeof(ar->id.bdf_ext)) < 0)
@@ -1182,7 +1186,7 @@ static int ath10k_download_fw(struct ath10k *ar)
u32 address, data_len;
const void *data;
int ret;
- struct pm_qos_request latency_qos;
+ struct pm_qos_request latency_qos = {};
address = ar->hw_params.patch_load_addr;
@@ -1197,7 +1201,7 @@ static int ath10k_download_fw(struct ath10k *ar)
}
ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "boot uploading firmware image %pK len %d\n",
+ "boot uploading firmware image %p len %d\n",
data, data_len);
/* Check if device supports to download firmware via
@@ -1216,7 +1220,6 @@ static int ath10k_download_fw(struct ath10k *ar)
ret);
}
- memset(&latency_qos, 0, sizeof(latency_qos));
cpu_latency_qos_add_request(&latency_qos, 0);
ret = ath10k_bmi_fast_download(ar, address, data, data_len);
@@ -1560,7 +1563,7 @@ static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
bool with_chip_id)
{
/* strlen(',variant=') + strlen(ar->id.bdf_ext) */
- char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = { 0 };
+ char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = {};
if (with_variant && ar->id.bdf_ext[0] != '\0')
scnprintf(variant, sizeof(variant), ",variant=%s",
@@ -1823,7 +1826,7 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
if (!ar->running_fw->fw_file.otp_data ||
!ar->running_fw->fw_file.otp_len) {
- ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %pK otp_len %zd)!\n",
+ ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
ar->running_fw->fw_file.otp_data,
ar->running_fw->fw_file.otp_len);
return 0;
@@ -2259,7 +2262,9 @@ static int ath10k_core_pre_cal_download(struct ath10k *ar)
"boot did not find a pre calibration file, try DT next: %d\n",
ret);
- ret = ath10k_download_cal_dt(ar, "qcom,ath10k-pre-calibration-data");
+ ret = ath10k_download_cal_dt(ar, "qcom,pre-calibration-data");
+ if (ret == -ENOENT)
+ ret = ath10k_download_cal_dt(ar, "qcom,ath10k-pre-calibration-data");
if (ret) {
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"unable to load pre cal data from DT: %d\n", ret);
@@ -2337,7 +2342,9 @@ static int ath10k_download_cal_data(struct ath10k *ar)
"boot did not find a calibration file, try DT next: %d\n",
ret);
- ret = ath10k_download_cal_dt(ar, "qcom,ath10k-calibration-data");
+ ret = ath10k_download_cal_dt(ar, "qcom,calibration-data");
+ if (ret == -ENOENT)
+ ret = ath10k_download_cal_dt(ar, "qcom,ath10k-calibration-data");
if (ret == 0) {
ar->cal_mode = ATH10K_CAL_MODE_DT;
goto done;
@@ -2484,15 +2491,51 @@ static int ath10k_init_hw_params(struct ath10k *ar)
return 0;
}
-void ath10k_core_start_recovery(struct ath10k *ar)
+static void ath10k_core_recovery_check_work(struct work_struct *work)
{
- if (test_and_set_bit(ATH10K_FLAG_RESTARTING, &ar->dev_flags)) {
- ath10k_warn(ar, "already restarting\n");
+ struct ath10k *ar = container_of(work, struct ath10k, recovery_check_work);
+ long time_left;
+
+ /* Sometimes the recovery will fail and then the next all recovery fail,
+ * so avoid infinite recovery.
+ */
+ if (atomic_read(&ar->fail_cont_count) >= ATH10K_RECOVERY_MAX_FAIL_COUNT) {
+ ath10k_err(ar, "consecutive fail %d times, will shutdown driver!",
+ atomic_read(&ar->fail_cont_count));
+ ar->state = ATH10K_STATE_WEDGED;
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "total recovery count: %d", ++ar->recovery_count);
+
+ if (atomic_read(&ar->pending_recovery)) {
+ /* Sometimes it happened another recovery work before the previous one
+ * completed, then the second recovery work will destroy the previous
+ * one, thus below is to avoid that.
+ */
+ time_left = wait_for_completion_timeout(&ar->driver_recovery,
+ ATH10K_RECOVERY_TIMEOUT_HZ);
+ if (time_left) {
+ ath10k_warn(ar, "previous recovery succeeded, skip this!\n");
+ return;
+ }
+
+ /* Record the continuous recovery fail count when recovery failed. */
+ atomic_inc(&ar->fail_cont_count);
+
+ /* Avoid having multiple recoveries at the same time. */
return;
}
+ atomic_inc(&ar->pending_recovery);
queue_work(ar->workqueue, &ar->restart_work);
}
+
+void ath10k_core_start_recovery(struct ath10k *ar)
+{
+ /* Use workqueue_aux to avoid blocking recovery tracking */
+ queue_work(ar->workqueue_aux, &ar->recovery_check_work);
+}
EXPORT_SYMBOL(ath10k_core_start_recovery);
void ath10k_core_napi_enable(struct ath10k *ar)
@@ -2525,6 +2568,8 @@ static void ath10k_core_restart(struct work_struct *work)
struct ath10k *ar = container_of(work, struct ath10k, restart_work);
int ret;
+ reinit_completion(&ar->driver_recovery);
+
set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
/* Place a barrier to make sure the compiler doesn't reorder
@@ -2589,8 +2634,6 @@ static void ath10k_core_restart(struct work_struct *work)
if (ret)
ath10k_warn(ar, "failed to send firmware crash dump via devcoredump: %d",
ret);
-
- complete(&ar->driver_recovery);
}
static void ath10k_core_set_coverage_class_work(struct work_struct *work)
@@ -2599,7 +2642,7 @@ static void ath10k_core_set_coverage_class_work(struct work_struct *work)
set_coverage_class_work);
if (ar->hw_params.hw_ops->set_coverage_class)
- ar->hw_params.hw_ops->set_coverage_class(ar, -1);
+ ar->hw_params.hw_ops->set_coverage_class(ar, -1, -1);
}
static int ath10k_core_init_firmware_features(struct ath10k *ar)
@@ -3309,7 +3352,7 @@ EXPORT_SYMBOL(ath10k_core_stop);
*/
static int ath10k_core_probe_fw(struct ath10k *ar)
{
- struct bmi_target_info target_info;
+ struct bmi_target_info target_info = {};
int ret = 0;
ret = ath10k_hif_power_up(ar, ATH10K_FIRMWARE_MODE_NORMAL);
@@ -3320,7 +3363,6 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
switch (ar->hif.bus) {
case ATH10K_BUS_SDIO:
- memset(&target_info, 0, sizeof(target_info));
ret = ath10k_bmi_get_target_info_sdio(ar, &target_info);
if (ret) {
ath10k_err(ar, "could not get target info (%d)\n", ret);
@@ -3332,7 +3374,6 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
case ATH10K_BUS_PCI:
case ATH10K_BUS_AHB:
case ATH10K_BUS_USB:
- memset(&target_info, 0, sizeof(target_info));
ret = ath10k_bmi_get_target_info(ar, &target_info);
if (ret) {
ath10k_err(ar, "could not get target info (%d)\n", ret);
@@ -3342,7 +3383,6 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
ar->hw->wiphy->hw_version = target_info.version;
break;
case ATH10K_BUS_SNOC:
- memset(&target_info, 0, sizeof(target_info));
ret = ath10k_hif_get_target_info(ar, &target_info);
if (ret) {
ath10k_err(ar, "could not get target info (%d)\n", ret);
@@ -3687,6 +3727,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
INIT_WORK(&ar->register_work, ath10k_core_register_work);
INIT_WORK(&ar->restart_work, ath10k_core_restart);
+ INIT_WORK(&ar->recovery_check_work, ath10k_core_recovery_check_work);
INIT_WORK(&ar->set_coverage_class_work,
ath10k_core_set_coverage_class_work);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 446dca74f06a..73a9db302245 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef _CORE_H_
@@ -87,6 +87,8 @@
IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER)
#define ATH10K_ITER_RESUME_FLAGS (IEEE80211_IFACE_ITER_RESUME_ALL |\
IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER)
+#define ATH10K_RECOVERY_TIMEOUT_HZ (5 * HZ)
+#define ATH10K_RECOVERY_MAX_FAIL_COUNT 4
struct ath10k;
@@ -779,7 +781,7 @@ enum ath10k_fw_features {
/* Firmware supports bypassing PLL setting on init. */
ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT = 9,
- /* Raw mode support. If supported, FW supports receiving and trasmitting
+ /* Raw mode support. If supported, FW supports receiving and transmitting
* frames in raw mode.
*/
ATH10K_FW_FEATURE_RAW_MODE_SUPPORT = 10,
@@ -865,9 +867,6 @@ enum ath10k_dev_flags {
/* Per Station statistics service */
ATH10K_FLAG_PEER_STATS,
- /* Indicates that ath10k device is during recovery process and not complete */
- ATH10K_FLAG_RESTARTING,
-
/* protected by conf_mutex */
ATH10K_FLAG_NAPI_ENABLED,
};
@@ -1208,9 +1207,15 @@ struct ath10k {
struct work_struct register_work;
struct work_struct restart_work;
+ struct work_struct recovery_check_work;
struct work_struct bundle_tx_work;
struct work_struct tx_complete_work;
+ atomic_t pending_recovery;
+ unsigned int recovery_count;
+ /* continuous recovery fail count */
+ atomic_t fail_cont_count;
+
/* cycle count is reported twice for each visited channel during scan.
* access protected by data_lock
*/
@@ -1254,9 +1259,13 @@ struct ath10k {
struct {
/* protected by conf_mutex */
struct ath10k_fw_components utf_mode_fw;
+ u8 ftm_msgref;
/* protected by data_lock */
bool utf_monitor;
+ u32 data_pos;
+ u32 expected_seq;
+ u8 *eventdata;
} testmode;
struct {
diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
index bb3a276b7ed5..50d0c4213ecf 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.c
+++ b/drivers/net/wireless/ath/ath10k/coredump.c
@@ -3,11 +3,13 @@
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include "coredump.h"
#include <linux/devcoredump.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/utsname.h>
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 35bfe7232e95..b7520220465a 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -4,10 +4,12 @@
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/module.h>
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/vmalloc.h>
#include <linux/crc32.h>
#include <linux/firmware.h>
@@ -545,7 +547,7 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
- char buf[32] = {0};
+ char buf[32] = {};
ssize_t rc;
int ret;
@@ -981,7 +983,7 @@ static ssize_t ath10k_write_htt_max_amsdu_ampdu(struct file *file,
{
struct ath10k *ar = file->private_data;
int res;
- char buf[64] = {0};
+ char buf[64] = {};
unsigned int amsdu, ampdu;
res = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
@@ -1037,7 +1039,7 @@ static ssize_t ath10k_write_fw_dbglog(struct file *file,
{
struct ath10k *ar = file->private_data;
int ret;
- char buf[96] = {0};
+ char buf[96] = {};
unsigned int log_level;
u64 mask;
@@ -1751,7 +1753,7 @@ void ath10k_debug_stop(struct ath10k *ar)
/* Must not use _sync to avoid deadlock, we do that in
* ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
- * warning from del_timer().
+ * warning from timer_delete().
*/
if (ar->debug.htt_stats_mask != 0)
cancel_delayed_work(&ar->debug.htt_stats_dwork);
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index 0f6de862c3a9..b9fb192e0b48 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -3,6 +3,7 @@
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include "core.h"
@@ -244,7 +245,7 @@ static ssize_t ath10k_dbg_sta_write_addba(struct file *file,
struct ath10k *ar = arsta->arvif->ar;
u32 tid, buf_size;
int ret;
- char buf[64] = {0};
+ char buf[64] = {};
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
user_buf, count);
@@ -295,7 +296,7 @@ static ssize_t ath10k_dbg_sta_write_addba_resp(struct file *file,
struct ath10k *ar = arsta->arvif->ar;
u32 tid, status;
int ret;
- char buf[64] = {0};
+ char buf[64] = {};
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
user_buf, count);
@@ -345,7 +346,7 @@ static ssize_t ath10k_dbg_sta_write_delba(struct file *file,
struct ath10k *ar = arsta->arvif->ar;
u32 tid, initiator, reason;
int ret;
- char buf[64] = {0};
+ char buf[64] = {};
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
user_buf, count);
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index a6e21ce90bad..ce9b248c12dc 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -3,8 +3,11 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
+
#include "core.h"
#include "hif.h"
#include "debug.h"
@@ -34,7 +37,7 @@ static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
skb_cb = ATH10K_SKB_CB(skb);
memset(skb_cb, 0, sizeof(*skb_cb));
- ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %pK\n", __func__, skb);
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
return skb;
}
@@ -54,7 +57,7 @@ void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
struct ath10k *ar = ep->htc->ar;
struct ath10k_htc_hdr *hdr;
- ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__,
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
ep->eid, skb);
/* A corner case where the copy completion is reaching to host but still
@@ -515,7 +518,7 @@ void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
/* zero length packet with trailer data, just drop these */
goto out;
- ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
eid, skb);
ep->ep_ops.ep_rx_complete(ar, skb);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 7d28ae5453cf..d7e429041065 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -4,8 +4,11 @@
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
+
#include "core.h"
#include "htc.h"
#include "htt.h"
@@ -257,7 +260,8 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t)
{
- struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer);
+ struct ath10k_htt *htt = timer_container_of(htt, t,
+ rx_ring.refill_retry_timer);
ath10k_htt_rx_msdu_buff_replenish(htt);
}
@@ -287,7 +291,7 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
if (htt->ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
return;
- del_timer_sync(&htt->rx_ring.refill_retry_timer);
+ timer_delete_sync(&htt->rx_ring.refill_retry_timer);
skb_queue_purge(&htt->rx_msdus_q);
skb_queue_purge(&htt->rx_in_ord_compl_q);
@@ -1373,7 +1377,7 @@ static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
}
ath10k_dbg(ar, ATH10K_DBG_DATA,
- "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
skb,
skb->len,
ieee80211_get_SA(hdr),
@@ -1880,7 +1884,7 @@ static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar,
enum htt_rx_mpdu_encrypt_type enctype)
{
struct ath10k_peer *peer;
- union htt_rx_pn_t *last_pn, new_pn = {0};
+ union htt_rx_pn_t *last_pn, new_pn = {};
struct ieee80211_hdr *hdr;
u8 tid, frag_number;
u32 seq;
@@ -2398,7 +2402,7 @@ static bool ath10k_htt_rx_pn_check_replay_hl(struct ath10k *ar,
bool last_pn_valid, pn_invalid = false;
enum htt_txrx_sec_cast_type sec_index;
enum htt_security_types sec_type;
- union htt_rx_pn_t new_pn = {0};
+ union htt_rx_pn_t new_pn = {};
struct htt_hl_rx_desc *rx_desc;
union htt_rx_pn_t *last_pn;
u32 rx_desc_info, tid;
@@ -2461,7 +2465,7 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
struct fw_rx_desc_hl *fw_desc;
enum htt_txrx_sec_cast_type sec_index;
enum htt_security_types sec_type;
- union htt_rx_pn_t new_pn = {0};
+ union htt_rx_pn_t new_pn = {};
struct htt_hl_rx_desc *rx_desc;
struct ieee80211_hdr *hdr;
struct ieee80211_rx_status *rx_status;
@@ -2763,7 +2767,7 @@ static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
struct htt_rx_indication_hl *rx_hl;
enum htt_security_types sec_type;
u32 tid, frag, seq, rx_desc_info;
- union htt_rx_pn_t new_pn = {0};
+ union htt_rx_pn_t new_pn = {};
struct htt_hl_rx_desc *rx_desc;
u16 peer_id, sc, hdr_space;
union htt_rx_pn_t *last_pn;
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 9725feecefd6..d6f1d85ba871 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -3,8 +3,10 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
#include <linux/etherdevice.h>
#include "htt.h"
#include "mac.h"
@@ -508,7 +510,7 @@ static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
{
struct ath10k *ar = ctx;
struct ath10k_htt *htt = &ar->htt;
- struct htt_tx_done tx_done = {0};
+ struct htt_tx_done tx_done = {};
ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %u\n", msdu_id);
@@ -558,7 +560,7 @@ void ath10k_htt_op_ep_tx_credits(struct ath10k *ar)
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
- struct htt_tx_done tx_done = {0};
+ struct htt_tx_done tx_done = {};
struct htt_cmd_hdr *htt_hdr;
struct htt_data_tx_desc *desc_hdr = NULL;
u16 flags1 = 0;
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 8fafe096adff..59b6cebfdd8f 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -212,40 +212,40 @@ const struct ath10k_hw_regs wcn3990_regs = {
.pcie_intr_fw_mask = 0x00100000,
};
-static struct ath10k_hw_ce_regs_addr_map wcn3990_src_ring = {
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_src_ring = {
.msb = 0x00000010,
.lsb = 0x00000010,
.mask = GENMASK(17, 17),
};
-static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_ring = {
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_dst_ring = {
.msb = 0x00000012,
.lsb = 0x00000012,
.mask = GENMASK(18, 18),
};
-static struct ath10k_hw_ce_regs_addr_map wcn3990_dmax = {
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_dmax = {
.msb = 0x00000000,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
-static struct ath10k_hw_ce_ctrl1 wcn3990_ctrl1 = {
+static const struct ath10k_hw_ce_ctrl1 wcn3990_ctrl1 = {
.addr = 0x00000018,
.src_ring = &wcn3990_src_ring,
.dst_ring = &wcn3990_dst_ring,
.dmax = &wcn3990_dmax,
};
-static struct ath10k_hw_ce_regs_addr_map wcn3990_host_ie_cc = {
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_host_ie_cc = {
.mask = GENMASK(0, 0),
};
-static struct ath10k_hw_ce_host_ie wcn3990_host_ie = {
+static const struct ath10k_hw_ce_host_ie wcn3990_host_ie = {
.copy_complete = &wcn3990_host_ie_cc,
};
-static struct ath10k_hw_ce_host_wm_regs wcn3990_wm_reg = {
+static const struct ath10k_hw_ce_host_wm_regs wcn3990_wm_reg = {
.dstr_lmask = 0x00000010,
.dstr_hmask = 0x00000008,
.srcr_lmask = 0x00000004,
@@ -255,7 +255,7 @@ static struct ath10k_hw_ce_host_wm_regs wcn3990_wm_reg = {
.addr = 0x00000030,
};
-static struct ath10k_hw_ce_misc_regs wcn3990_misc_reg = {
+static const struct ath10k_hw_ce_misc_regs wcn3990_misc_reg = {
.axi_err = 0x00000100,
.dstr_add_err = 0x00000200,
.srcr_len_err = 0x00000100,
@@ -266,19 +266,19 @@ static struct ath10k_hw_ce_misc_regs wcn3990_misc_reg = {
.addr = 0x00000038,
};
-static struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_low = {
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_low = {
.msb = 0x00000000,
.lsb = 0x00000010,
.mask = GENMASK(31, 16),
};
-static struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_high = {
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_high = {
.msb = 0x0000000f,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
-static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_src_ring = {
+static const struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_src_ring = {
.addr = 0x0000004c,
.low_rst = 0x00000000,
.high_rst = 0x00000000,
@@ -286,18 +286,18 @@ static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_src_ring = {
.wm_high = &wcn3990_src_wm_high,
};
-static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_low = {
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_low = {
.lsb = 0x00000010,
.mask = GENMASK(31, 16),
};
-static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_high = {
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_high = {
.msb = 0x0000000f,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
-static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = {
+static const struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = {
.addr = 0x00000050,
.low_rst = 0x00000000,
.high_rst = 0x00000000,
@@ -305,7 +305,7 @@ static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = {
.wm_high = &wcn3990_dst_wm_high,
};
-static struct ath10k_hw_ce_ctrl1_upd wcn3990_ctrl1_upd = {
+static const struct ath10k_hw_ce_ctrl1_upd wcn3990_ctrl1_upd = {
.shift = 19,
.mask = 0x00080000,
.enable = 0x00000000,
@@ -344,25 +344,25 @@ const struct ath10k_hw_values wcn3990_values = {
.ce_desc_meta_data_lsb = 4,
};
-static struct ath10k_hw_ce_regs_addr_map qcax_src_ring = {
+static const struct ath10k_hw_ce_regs_addr_map qcax_src_ring = {
.msb = 0x00000010,
.lsb = 0x00000010,
.mask = GENMASK(16, 16),
};
-static struct ath10k_hw_ce_regs_addr_map qcax_dst_ring = {
+static const struct ath10k_hw_ce_regs_addr_map qcax_dst_ring = {
.msb = 0x00000011,
.lsb = 0x00000011,
.mask = GENMASK(17, 17),
};
-static struct ath10k_hw_ce_regs_addr_map qcax_dmax = {
+static const struct ath10k_hw_ce_regs_addr_map qcax_dmax = {
.msb = 0x0000000f,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
-static struct ath10k_hw_ce_ctrl1 qcax_ctrl1 = {
+static const struct ath10k_hw_ce_ctrl1 qcax_ctrl1 = {
.addr = 0x00000010,
.hw_mask = 0x0007ffff,
.sw_mask = 0x0007ffff,
@@ -375,31 +375,31 @@ static struct ath10k_hw_ce_ctrl1 qcax_ctrl1 = {
.dmax = &qcax_dmax,
};
-static struct ath10k_hw_ce_regs_addr_map qcax_cmd_halt_status = {
+static const struct ath10k_hw_ce_regs_addr_map qcax_cmd_halt_status = {
.msb = 0x00000003,
.lsb = 0x00000003,
.mask = GENMASK(3, 3),
};
-static struct ath10k_hw_ce_cmd_halt qcax_cmd_halt = {
+static const struct ath10k_hw_ce_cmd_halt qcax_cmd_halt = {
.msb = 0x00000000,
.mask = GENMASK(0, 0),
.status_reset = 0x00000000,
.status = &qcax_cmd_halt_status,
};
-static struct ath10k_hw_ce_regs_addr_map qcax_host_ie_cc = {
+static const struct ath10k_hw_ce_regs_addr_map qcax_host_ie_cc = {
.msb = 0x00000000,
.lsb = 0x00000000,
.mask = GENMASK(0, 0),
};
-static struct ath10k_hw_ce_host_ie qcax_host_ie = {
+static const struct ath10k_hw_ce_host_ie qcax_host_ie = {
.copy_complete_reset = 0x00000000,
.copy_complete = &qcax_host_ie_cc,
};
-static struct ath10k_hw_ce_host_wm_regs qcax_wm_reg = {
+static const struct ath10k_hw_ce_host_wm_regs qcax_wm_reg = {
.dstr_lmask = 0x00000010,
.dstr_hmask = 0x00000008,
.srcr_lmask = 0x00000004,
@@ -409,7 +409,7 @@ static struct ath10k_hw_ce_host_wm_regs qcax_wm_reg = {
.addr = 0x00000030,
};
-static struct ath10k_hw_ce_misc_regs qcax_misc_reg = {
+static const struct ath10k_hw_ce_misc_regs qcax_misc_reg = {
.axi_err = 0x00000400,
.dstr_add_err = 0x00000200,
.srcr_len_err = 0x00000100,
@@ -420,19 +420,19 @@ static struct ath10k_hw_ce_misc_regs qcax_misc_reg = {
.addr = 0x00000038,
};
-static struct ath10k_hw_ce_regs_addr_map qcax_src_wm_low = {
+static const struct ath10k_hw_ce_regs_addr_map qcax_src_wm_low = {
.msb = 0x0000001f,
.lsb = 0x00000010,
.mask = GENMASK(31, 16),
};
-static struct ath10k_hw_ce_regs_addr_map qcax_src_wm_high = {
+static const struct ath10k_hw_ce_regs_addr_map qcax_src_wm_high = {
.msb = 0x0000000f,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
-static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_src_ring = {
+static const struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_src_ring = {
.addr = 0x0000004c,
.low_rst = 0x00000000,
.high_rst = 0x00000000,
@@ -440,18 +440,18 @@ static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_src_ring = {
.wm_high = &qcax_src_wm_high,
};
-static struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_low = {
+static const struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_low = {
.lsb = 0x00000010,
.mask = GENMASK(31, 16),
};
-static struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_high = {
+static const struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_high = {
.msb = 0x0000000f,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
-static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_dst_ring = {
+static const struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_dst_ring = {
.addr = 0x00000050,
.low_rst = 0x00000000,
.high_rst = 0x00000000,
@@ -590,6 +590,7 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
* function monitors and modifies the corresponding MAC registers.
*/
static void ath10k_hw_qca988x_set_coverage_class(struct ath10k *ar,
+ int radio_idx,
s16 value)
{
u32 slottime_reg;
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 442091c6dfd2..da71dce9babf 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -289,19 +289,22 @@ struct ath10k_hw_ce_ctrl1 {
u32 sw_wr_mask;
u32 reset_mask;
u32 reset;
- struct ath10k_hw_ce_regs_addr_map *src_ring;
- struct ath10k_hw_ce_regs_addr_map *dst_ring;
- struct ath10k_hw_ce_regs_addr_map *dmax; };
+ const struct ath10k_hw_ce_regs_addr_map *src_ring;
+ const struct ath10k_hw_ce_regs_addr_map *dst_ring;
+ const struct ath10k_hw_ce_regs_addr_map *dmax;
+};
struct ath10k_hw_ce_cmd_halt {
u32 status_reset;
u32 msb;
u32 mask;
- struct ath10k_hw_ce_regs_addr_map *status; };
+ const struct ath10k_hw_ce_regs_addr_map *status;
+};
struct ath10k_hw_ce_host_ie {
u32 copy_complete_reset;
- struct ath10k_hw_ce_regs_addr_map *copy_complete; };
+ const struct ath10k_hw_ce_regs_addr_map *copy_complete;
+};
struct ath10k_hw_ce_host_wm_regs {
u32 dstr_lmask;
@@ -328,8 +331,9 @@ struct ath10k_hw_ce_dst_src_wm_regs {
u32 addr;
u32 low_rst;
u32 high_rst;
- struct ath10k_hw_ce_regs_addr_map *wm_low;
- struct ath10k_hw_ce_regs_addr_map *wm_high; };
+ const struct ath10k_hw_ce_regs_addr_map *wm_low;
+ const struct ath10k_hw_ce_regs_addr_map *wm_high;
+};
struct ath10k_hw_ce_ctrl1_upd {
u32 shift;
@@ -355,14 +359,14 @@ struct ath10k_hw_ce_regs {
u32 ce_rri_low;
u32 ce_rri_high;
u32 host_ie_addr;
- struct ath10k_hw_ce_host_wm_regs *wm_regs;
- struct ath10k_hw_ce_misc_regs *misc_regs;
- struct ath10k_hw_ce_ctrl1 *ctrl1_regs;
- struct ath10k_hw_ce_cmd_halt *cmd_halt;
- struct ath10k_hw_ce_host_ie *host_ie;
- struct ath10k_hw_ce_dst_src_wm_regs *wm_srcr;
- struct ath10k_hw_ce_dst_src_wm_regs *wm_dstr;
- struct ath10k_hw_ce_ctrl1_upd *upd;
+ const struct ath10k_hw_ce_host_wm_regs *wm_regs;
+ const struct ath10k_hw_ce_misc_regs *misc_regs;
+ const struct ath10k_hw_ce_ctrl1 *ctrl1_regs;
+ const struct ath10k_hw_ce_cmd_halt *cmd_halt;
+ const struct ath10k_hw_ce_host_ie *host_ie;
+ const struct ath10k_hw_ce_dst_src_wm_regs *wm_srcr;
+ const struct ath10k_hw_ce_dst_src_wm_regs *wm_dstr;
+ const struct ath10k_hw_ce_ctrl1_upd *upd;
};
struct ath10k_hw_values {
@@ -469,8 +473,8 @@ enum ath10k_hw_cc_wraparound_type {
*/
ATH10K_HW_CC_WRAP_SHIFTED_ALL = 1,
- /* Each hw counter wrapsaround independently. When the
- * counter overflows the repestive counter is right shifted
+ /* Each hw counter wraps around independently. When the
+ * counter overflows the respective counter is right shifted
* by 1, i.e reset to 0x7fffffff, and other counters will be
* running unaffected. In this type of wraparound, it should
* be possible to report accurate Rx busy time unlike the
@@ -642,7 +646,7 @@ struct htt_rx_ring_rx_desc_offsets;
/* Defines needed for Rx descriptor abstraction */
struct ath10k_hw_ops {
- void (*set_coverage_class)(struct ath10k *ar, s16 value);
+ void (*set_coverage_class)(struct ath10k *ar, int radio_idx, s16 value);
int (*enable_pll_clk)(struct ath10k *ar);
int (*tx_data_rssi_pad_bytes)(struct htt_resp *htt);
int (*is_rssi_enable)(struct htt_resp *resp);
@@ -833,7 +837,7 @@ ath10k_is_rssi_enable(struct ath10k_hw_params *hw,
#define TARGET_10_4_NUM_TDLS_BUFFER_STA 1
#define TARGET_10_4_NUM_TDLS_SLEEP_STA 1
-/* Maximum number of Copy Engine's supported */
+/* Maximum number of Copy Engines supported */
#define CE_COUNT_MAX 12
/* Number of Copy Engines supported */
@@ -1130,7 +1134,7 @@ ath10k_is_rssi_enable(struct ath10k_hw_params *hw,
#define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB)
/* Register definitions for first generation ath10k cards. These cards include
- * a mac thich has a register allocation similar to ath9k and at least some
+ * a mac which has a register allocation similar to ath9k and at least some
* registers including the ones relevant for modifying the coverage class are
* identical to the ath9k definitions.
* These registers are usually managed by the ath10k firmware. However by
diff --git a/drivers/net/wireless/ath/ath10k/leds.c b/drivers/net/wireless/ath/ath10k/leds.c
index 9b1d04eb4265..3a6c8111e7c6 100644
--- a/drivers/net/wireless/ath/ath10k/leds.c
+++ b/drivers/net/wireless/ath/ath10k/leds.c
@@ -27,7 +27,7 @@ static int ath10k_leds_set_brightness_blocking(struct led_classdev *led_cdev,
goto out;
ar->leds.gpio_state_pin = (brightness != LED_OFF) ^ led->active_low;
- ath10k_wmi_gpio_output(ar, led->gpio, ar->leds.gpio_state_pin);
+ ath10k_wmi_gpio_output(ar, ar->hw_params.led_pin, ar->leds.gpio_state_pin);
out:
mutex_unlock(&ar->conf_mutex);
@@ -64,7 +64,6 @@ int ath10k_leds_register(struct ath10k *ar)
snprintf(ar->leds.label, sizeof(ar->leds.label), "ath10k-%s",
wiphy_name(ar->hw->wiphy));
ar->leds.wifi_led.active_low = 1;
- ar->leds.wifi_led.gpio = ar->hw_params.led_pin;
ar->leds.wifi_led.name = ar->leds.label;
ar->leds.wifi_led.default_state = LEDS_GPIO_DEFSTATE_KEEP;
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 41ab83c3d3f7..da6f7957a0ae 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -3,17 +3,19 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include "mac.h"
+#include <linux/export.h>
#include <net/cfg80211.h>
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <linux/acpi.h>
#include <linux/of.h>
#include <linux/bitfield.h>
+#include <linux/random.h>
#include "hif.h"
#include "core.h"
@@ -288,8 +290,15 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
if (cmd == DISABLE_KEY) {
- arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_NONE];
- arg.key_data = NULL;
+ if (flags & WMI_KEY_GROUP) {
+ /* Not all hardware handles group-key deletion operation
+ * correctly. Replace the key with a junk value to invalidate it.
+ */
+ get_random_bytes(key->key, key->keylen);
+ } else {
+ arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_NONE];
+ arg.key_data = NULL;
+ }
}
return ath10k_wmi_vdev_install_key(arvif->ar, &arg);
@@ -875,7 +884,7 @@ static void ath10k_peer_map_cleanup(struct ath10k *ar, struct ath10k_peer *peer)
*/
for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
if (ar->peer_map[i] == peer) {
- ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
+ ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %p idx %d)\n",
peer->addr, peer, i);
ar->peer_map[i] = NULL;
}
@@ -1022,6 +1031,26 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
return ar->last_wmi_vdev_start_status;
}
+static inline int ath10k_vdev_delete_sync(struct ath10k *ar)
+{
+ unsigned long time_left;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(WMI_SERVICE_SYNC_DELETE_CMDS, ar->wmi.svc_map))
+ return 0;
+
+ if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+ return -ESHUTDOWN;
+
+ time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
+ ATH10K_VDEV_DELETE_TIMEOUT_HZ);
+ if (time_left == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
{
struct cfg80211_chan_def *chandef = NULL;
@@ -3363,7 +3392,7 @@ static int ath10k_update_channel_list(struct ath10k *ar)
struct ieee80211_supported_band **bands;
enum nl80211_band band;
struct ieee80211_channel *channel;
- struct wmi_scan_chan_list_arg arg = {0};
+ struct wmi_scan_chan_list_arg arg = {};
struct wmi_channel_arg *ch;
bool passive;
int len;
@@ -4063,7 +4092,7 @@ static int ath10k_mac_tx(struct ath10k *ar,
if (!noque_offchan && info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
if (!ath10k_mac_tx_frm_has_freq(ar)) {
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac queued offchannel skb %pK len %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac queued offchannel skb %p len %d\n",
skb, skb->len);
skb_queue_tail(&ar->offchan_tx_queue, skb);
@@ -4126,7 +4155,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
mutex_lock(&ar->conf_mutex);
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK len %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %p len %d\n",
skb, skb->len);
hdr = (struct ieee80211_hdr *)skb->data;
@@ -4181,7 +4210,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
time_left =
wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
if (time_left == 0)
- ath10k_warn(ar, "timed out waiting for offchannel skb %pK, len: %d\n",
+ ath10k_warn(ar, "timed out waiting for offchannel skb %p, len: %d\n",
skb, skb->len);
if (!peer && tmp_peer_created) {
@@ -4799,7 +4828,8 @@ void ath10k_halt(struct ath10k *ar)
spin_unlock_bh(&ar->data_lock);
}
-static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+static int ath10k_get_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 *tx_ant, u32 *rx_ant)
{
struct ath10k *ar = hw->priv;
@@ -4862,7 +4892,7 @@ static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar)
static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
{
- struct ieee80211_sta_vht_cap vht_cap = {0};
+ struct ieee80211_sta_vht_cap vht_cap = {};
struct ath10k_hw_params *hw = &ar->hw_params;
u16 mcs_map;
u32 val;
@@ -4920,7 +4950,7 @@ static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
{
int i;
- struct ieee80211_sta_ht_cap ht_cap = {0};
+ struct ieee80211_sta_ht_cap ht_cap = {};
if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED))
return ht_cap;
@@ -5046,7 +5076,8 @@ static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
return 0;
}
-static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+static int ath10k_set_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 tx_ant, u32 rx_ant)
{
struct ath10k *ar = hw->priv;
int ret;
@@ -5151,7 +5182,7 @@ static int ath10k_start(struct ieee80211_hw *hw)
struct ath10k *ar = hw->priv;
u32 param;
int ret = 0;
- struct wmi_bb_timing_cfg_arg bb_timing = {0};
+ struct wmi_bb_timing_cfg_arg bb_timing = {};
/*
* This makes sense only when restarting hw. It is harmless to call
@@ -5396,6 +5427,7 @@ static void ath10k_stop(struct ieee80211_hw *hw, bool suspend)
cancel_work_sync(&ar->set_coverage_class_work);
cancel_delayed_work_sync(&ar->scan.timeout);
cancel_work_sync(&ar->restart_work);
+ cancel_work_sync(&ar->recovery_check_work);
}
static int ath10k_config_ps(struct ath10k *ar)
@@ -5416,7 +5448,7 @@ static int ath10k_config_ps(struct ath10k *ar)
return ret;
}
-static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
+static int ath10k_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct ath10k *ar = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
@@ -5900,7 +5932,6 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct ath10k_peer *peer;
- unsigned long time_left;
int ret;
int i;
@@ -5940,13 +5971,10 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
arvif->vdev_id, ret);
- if (test_bit(WMI_SERVICE_SYNC_DELETE_CMDS, ar->wmi.svc_map)) {
- time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
- ATH10K_VDEV_DELETE_TIMEOUT_HZ);
- if (time_left == 0) {
- ath10k_warn(ar, "Timeout in receiving vdev delete response\n");
- goto out;
- }
+ ret = ath10k_vdev_delete_sync(ar);
+ if (ret) {
+ ath10k_warn(ar, "Error in receiving vdev delete response: %d\n", ret);
+ goto out;
}
/* Some firmware revisions don't notify host about self-peer removal
@@ -6319,7 +6347,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
mutex_unlock(&ar->conf_mutex);
}
-static void ath10k_mac_op_set_coverage_class(struct ieee80211_hw *hw, s16 value)
+static void ath10k_mac_op_set_coverage_class(struct ieee80211_hw *hw, int radio_idx,
+ s16 value)
{
struct ath10k *ar = hw->priv;
@@ -6330,7 +6359,7 @@ static void ath10k_mac_op_set_coverage_class(struct ieee80211_hw *hw, s16 value)
WARN_ON_ONCE(1);
return;
}
- ar->hw_params.hw_ops->set_coverage_class(ar, value);
+ ar->hw_params.hw_ops->set_coverage_class(ar, -1, value);
}
struct ath10k_mac_tdls_iter_data {
@@ -6369,7 +6398,7 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct cfg80211_scan_request *req = &hw_req->req;
- struct wmi_start_scan_arg arg;
+ struct wmi_start_scan_arg *arg = NULL;
int ret = 0;
int i;
u32 scan_timeout;
@@ -6402,56 +6431,61 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
if (ret)
goto exit;
- memset(&arg, 0, sizeof(arg));
- ath10k_wmi_start_scan_init(ar, &arg);
- arg.vdev_id = arvif->vdev_id;
- arg.scan_id = ATH10K_SCAN_ID;
+ arg = kzalloc(sizeof(*arg), GFP_KERNEL);
+ if (!arg) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ath10k_wmi_start_scan_init(ar, arg);
+ arg->vdev_id = arvif->vdev_id;
+ arg->scan_id = ATH10K_SCAN_ID;
if (req->ie_len) {
- arg.ie_len = req->ie_len;
- memcpy(arg.ie, req->ie, arg.ie_len);
+ arg->ie_len = req->ie_len;
+ memcpy(arg->ie, req->ie, arg->ie_len);
}
if (req->n_ssids) {
- arg.n_ssids = req->n_ssids;
- for (i = 0; i < arg.n_ssids; i++) {
- arg.ssids[i].len = req->ssids[i].ssid_len;
- arg.ssids[i].ssid = req->ssids[i].ssid;
+ arg->n_ssids = req->n_ssids;
+ for (i = 0; i < arg->n_ssids; i++) {
+ arg->ssids[i].len = req->ssids[i].ssid_len;
+ arg->ssids[i].ssid = req->ssids[i].ssid;
}
} else {
- arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
+ arg->scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
}
if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
- arg.scan_ctrl_flags |= WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ;
- ether_addr_copy(arg.mac_addr.addr, req->mac_addr);
- ether_addr_copy(arg.mac_mask.addr, req->mac_addr_mask);
+ arg->scan_ctrl_flags |= WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ;
+ ether_addr_copy(arg->mac_addr.addr, req->mac_addr);
+ ether_addr_copy(arg->mac_mask.addr, req->mac_addr_mask);
}
if (req->n_channels) {
- arg.n_channels = req->n_channels;
- for (i = 0; i < arg.n_channels; i++)
- arg.channels[i] = req->channels[i]->center_freq;
+ arg->n_channels = req->n_channels;
+ for (i = 0; i < arg->n_channels; i++)
+ arg->channels[i] = req->channels[i]->center_freq;
}
/* if duration is set, default dwell times will be overwritten */
if (req->duration) {
- arg.dwell_time_active = req->duration;
- arg.dwell_time_passive = req->duration;
- arg.burst_duration_ms = req->duration;
+ arg->dwell_time_active = req->duration;
+ arg->dwell_time_passive = req->duration;
+ arg->burst_duration_ms = req->duration;
- scan_timeout = min_t(u32, arg.max_rest_time *
- (arg.n_channels - 1) + (req->duration +
+ scan_timeout = min_t(u32, arg->max_rest_time *
+ (arg->n_channels - 1) + (req->duration +
ATH10K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD) *
- arg.n_channels, arg.max_scan_time);
+ arg->n_channels, arg->max_scan_time);
} else {
- scan_timeout = arg.max_scan_time;
+ scan_timeout = arg->max_scan_time;
}
/* Add a 200ms margin to account for event/command processing */
scan_timeout += 200;
- ret = ath10k_start_scan(ar, &arg);
+ ret = ath10k_start_scan(ar, arg);
if (ret) {
ath10k_warn(ar, "failed to start hw scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
@@ -6463,6 +6497,8 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
msecs_to_jiffies(scan_timeout));
exit:
+ kfree(arg);
+
mutex_unlock(&ar->conf_mutex);
return ret;
}
@@ -7597,7 +7633,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
* Existing station deletion.
*/
ath10k_dbg(ar, ATH10K_DBG_STA,
- "mac vdev %d peer delete %pM sta %pK (sta gone)\n",
+ "mac vdev %d peer delete %pM sta %p (sta gone)\n",
arvif->vdev_id, sta->addr, sta);
if (sta->tdls) {
@@ -7624,7 +7660,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
continue;
if (peer->sta == sta) {
- ath10k_warn(ar, "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n",
+ ath10k_warn(ar, "found sta peer %pM (ptr %p id %d) entry on vdev %i after it was supposedly removed\n",
sta->addr, peer, i, arvif->vdev_id);
peer->sta = NULL;
@@ -7899,7 +7935,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = (void *)vif->drv_priv;
- struct wmi_start_scan_arg arg;
+ struct wmi_start_scan_arg *arg = NULL;
int ret = 0;
u32 scan_time_msec;
@@ -7936,20 +7972,25 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
- memset(&arg, 0, sizeof(arg));
- ath10k_wmi_start_scan_init(ar, &arg);
- arg.vdev_id = arvif->vdev_id;
- arg.scan_id = ATH10K_SCAN_ID;
- arg.n_channels = 1;
- arg.channels[0] = chan->center_freq;
- arg.dwell_time_active = scan_time_msec;
- arg.dwell_time_passive = scan_time_msec;
- arg.max_scan_time = scan_time_msec;
- arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
- arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
- arg.burst_duration_ms = duration;
-
- ret = ath10k_start_scan(ar, &arg);
+ arg = kzalloc(sizeof(*arg), GFP_KERNEL);
+ if (!arg) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ath10k_wmi_start_scan_init(ar, arg);
+ arg->vdev_id = arvif->vdev_id;
+ arg->scan_id = ATH10K_SCAN_ID;
+ arg->n_channels = 1;
+ arg->channels[0] = chan->center_freq;
+ arg->dwell_time_active = scan_time_msec;
+ arg->dwell_time_passive = scan_time_msec;
+ arg->max_scan_time = scan_time_msec;
+ arg->scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
+ arg->scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+ arg->burst_duration_ms = duration;
+
+ ret = ath10k_start_scan(ar, arg);
if (ret) {
ath10k_warn(ar, "failed to start roc scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
@@ -7975,6 +8016,8 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
ret = 0;
exit:
+ kfree(arg);
+
mutex_unlock(&ar->conf_mutex);
return ret;
}
@@ -8004,7 +8047,8 @@ static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw,
* in ath10k, but device-specific in mac80211.
*/
-static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 value)
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif;
@@ -8027,7 +8071,8 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
return ret;
}
-static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
+static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw,
+ int radio_idx, u32 value)
{
/* Even though there's a WMI enum for fragmentation threshold no known
* firmware actually implements it. Moreover it is not possible to rely
@@ -8125,7 +8170,12 @@ static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
ath10k_info(ar, "device successfully recovered\n");
ar->state = ATH10K_STATE_ON;
ieee80211_wake_queues(ar->hw);
- clear_bit(ATH10K_FLAG_RESTARTING, &ar->dev_flags);
+
+ /* Clear recovery state. */
+ complete(&ar->driver_recovery);
+ atomic_set(&ar->fail_cont_count, 0);
+ atomic_set(&ar->pending_recovery, 0);
+
if (ar->hw_params.hw_restart_disconnect) {
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_STA)
@@ -8797,7 +8847,7 @@ ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx add freq %u width %d ptr %pK\n",
+ "mac chanctx add freq %u width %d ptr %p\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
@@ -8821,7 +8871,7 @@ ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx remove freq %u width %d ptr %pK\n",
+ "mac chanctx remove freq %u width %d ptr %p\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
@@ -8886,7 +8936,7 @@ ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx change freq %u width %d ptr %pK changed %x\n",
+ "mac chanctx change freq %u width %d ptr %p changed %x\n",
ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
/* This shouldn't really happen because channel switching should use
@@ -8945,7 +8995,7 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx assign ptr %pK vdev_id %i\n",
+ "mac chanctx assign ptr %p vdev_id %i\n",
ctx, arvif->vdev_id);
if (WARN_ON(arvif->is_started)) {
@@ -9025,7 +9075,7 @@ ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx unassign ptr %pK vdev_id %i\n",
+ "mac chanctx unassign ptr %p vdev_id %i\n",
ctx, arvif->vdev_id);
WARN_ON(!arvif->is_started);
@@ -9122,7 +9172,7 @@ static const struct ath10k_index_vht_data_rate_type supported_vht_mcs_rate_nss1[
{6, {2633, 2925}, {1215, 1350}, {585, 650} },
{7, {2925, 3250}, {1350, 1500}, {650, 722} },
{8, {3510, 3900}, {1620, 1800}, {780, 867} },
- {9, {3900, 4333}, {1800, 2000}, {780, 867} }
+ {9, {3900, 4333}, {1800, 2000}, {865, 960} }
};
/*MCS parameters with Nss = 2 */
@@ -9137,7 +9187,7 @@ static const struct ath10k_index_vht_data_rate_type supported_vht_mcs_rate_nss2[
{6, {5265, 5850}, {2430, 2700}, {1170, 1300} },
{7, {5850, 6500}, {2700, 3000}, {1300, 1444} },
{8, {7020, 7800}, {3240, 3600}, {1560, 1733} },
- {9, {7800, 8667}, {3600, 4000}, {1560, 1733} }
+ {9, {7800, 8667}, {3600, 4000}, {1730, 1920} }
};
static void ath10k_mac_get_rate_flags_ht(struct ath10k *ar, u32 rate, u8 nss, u8 mcs,
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index c52a16f8078f..97b49bf4ad80 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -3,6 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/pci.h>
@@ -63,7 +64,7 @@ static const struct pci_device_id ath10k_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */
{ PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */
{ PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */
- {0}
+ {}
};
static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
@@ -591,7 +592,7 @@ skip:
static void ath10k_pci_ps_timer(struct timer_list *t)
{
- struct ath10k_pci *ar_pci = from_timer(ar_pci, t, ps_timer);
+ struct ath10k_pci *ar_pci = timer_container_of(ar_pci, t, ps_timer);
struct ath10k *ar = ar_pci->ar;
unsigned long flags;
@@ -619,7 +620,7 @@ static void ath10k_pci_sleep_sync(struct ath10k *ar)
return;
}
- del_timer_sync(&ar_pci->ps_timer);
+ timer_delete_sync(&ar_pci->ps_timer);
spin_lock_irqsave(&ar_pci->ps_lock, flags);
WARN_ON(ar_pci->ps_wake_refcount > 0);
@@ -844,7 +845,8 @@ void ath10k_pci_rx_post(struct ath10k *ar)
void ath10k_pci_rx_replenish_retry(struct timer_list *t)
{
- struct ath10k_pci *ar_pci = from_timer(ar_pci, t, rx_post_retry);
+ struct ath10k_pci *ar_pci = timer_container_of(ar_pci, t,
+ rx_post_retry);
struct ath10k *ar = ar_pci->ar;
ath10k_pci_rx_post(ar);
@@ -1817,7 +1819,7 @@ static void ath10k_pci_rx_retry_sync(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- del_timer_sync(&ar_pci->rx_post_retry);
+ timer_delete_sync(&ar_pci->rx_post_retry);
}
int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
@@ -3411,7 +3413,7 @@ static int ath10k_pci_claim(struct ath10k *ar)
goto err_region;
}
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n", ar_pci->mem);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
return 0;
err_region:
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index f1f33af0170a..8275345631a0 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -986,7 +986,7 @@ static int ath10k_qmi_new_server(struct qmi_handle *qmi_hdl,
ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service found\n");
- ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)&qmi->sq,
+ ret = kernel_connect(qmi_hdl->sock, (struct sockaddr_unsized *)&qmi->sq,
sizeof(qmi->sq), 0);
if (ret) {
ath10k_err(ar, "failed to connect to a remote QMI service port\n");
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 08a6f36a6be9..c06d50db40b8 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -3,7 +3,7 @@
* Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -1447,7 +1447,8 @@ release:
static void ath10k_sdio_sleep_timer_handler(struct timer_list *t)
{
- struct ath10k_sdio *ar_sdio = from_timer(ar_sdio, t, sleep_timer);
+ struct ath10k_sdio *ar_sdio = timer_container_of(ar_sdio, t,
+ sleep_timer);
ar_sdio->mbox_state = SDIO_MBOX_REQUEST_TO_SLEEP_STATE;
queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
@@ -1621,7 +1622,7 @@ static void ath10k_sdio_hif_power_down(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n");
- del_timer_sync(&ar_sdio->sleep_timer);
+ timer_delete_sync(&ar_sdio->sleep_timer);
ath10k_sdio_set_mbox_sleep(ar, true);
/* Disable the card */
@@ -1844,7 +1845,7 @@ static int ath10k_sdio_get_htt_tx_complete(struct ath10k *ar)
ret = ath10k_sdio_diag_read32(ar, addr, &val);
if (ret) {
ath10k_warn(ar,
- "unable to read hi_acs_flags for htt tx comple : %d\n", ret);
+ "unable to read hi_acs_flags for htt tx complete: %d\n", ret);
return ret;
}
@@ -2648,9 +2649,9 @@ static void ath10k_sdio_remove(struct sdio_func *func)
netif_napi_del(&ar->napi);
- ath10k_core_destroy(ar);
-
destroy_workqueue(ar_sdio->workqueue);
+
+ ath10k_core_destroy(ar);
}
static const struct sdio_device_id ath10k_sdio_devices[] = {
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index 0fe47d51013c..b3f6424c17d3 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -13,7 +13,7 @@
#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/remoteproc/qcom_rproc.h>
-#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/iommu.h>
#include "ce.h"
@@ -644,7 +644,8 @@ static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
static void ath10k_snoc_rx_replenish_retry(struct timer_list *t)
{
- struct ath10k_snoc *ar_snoc = from_timer(ar_snoc, t, rx_post_retry);
+ struct ath10k_snoc *ar_snoc = timer_container_of(ar_snoc, t,
+ rx_post_retry);
struct ath10k *ar = ar_snoc->ar;
ath10k_snoc_rx_post(ar);
@@ -911,7 +912,7 @@ static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
struct ath10k_snoc_pipe *pipe_info;
int pipe_num;
- del_timer_sync(&ar_snoc->rx_post_retry);
+ timer_delete_sync(&ar_snoc->rx_post_retry);
for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
pipe_info = &ar_snoc->pipe_info[pipe_num];
ath10k_snoc_rx_pipe_cleanup(pipe_info);
@@ -935,9 +936,11 @@ static int ath10k_snoc_hif_start(struct ath10k *ar)
bitmap_clear(ar_snoc->pending_ce_irqs, 0, CE_COUNT_MAX);
- dev_set_threaded(ar->napi_dev, true);
+ netif_threaded_enable(ar->napi_dev);
ath10k_core_napi_enable(ar);
- ath10k_snoc_irq_enable(ar);
+ /* IRQs are left enabled when we restart due to a firmware crash */
+ if (!test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
+ ath10k_snoc_irq_enable(ar);
ath10k_snoc_rx_post(ar);
clear_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
@@ -1556,19 +1559,11 @@ static void ath10k_modem_deinit(struct ath10k *ar)
static int ath10k_setup_msa_resources(struct ath10k *ar, u32 msa_size)
{
struct device *dev = ar->dev;
- struct device_node *node;
struct resource r;
int ret;
- node = of_parse_phandle(dev->of_node, "memory-region", 0);
- if (node) {
- ret = of_address_to_resource(node, 0, &r);
- of_node_put(node);
- if (ret) {
- dev_err(dev, "failed to resolve msa fixed region\n");
- return ret;
- }
-
+ ret = of_reserved_mem_region_to_resource(dev->of_node, 0, &r);
+ if (!ret) {
ar->msa.paddr = r.start;
ar->msa.mem_size = resource_size(&r);
ar->msa.vaddr = devm_memremap(dev, ar->msa.paddr,
@@ -1885,11 +1880,11 @@ static void ath10k_snoc_shutdown(struct platform_device *pdev)
}
static struct platform_driver ath10k_snoc_driver = {
- .probe = ath10k_snoc_probe,
- .remove_new = ath10k_snoc_remove,
+ .probe = ath10k_snoc_probe,
+ .remove = ath10k_snoc_remove,
.shutdown = ath10k_snoc_shutdown,
.driver = {
- .name = "ath10k_snoc",
+ .name = "ath10k_snoc",
.of_match_table = ath10k_snoc_dt_match,
},
};
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
index 7a9b9bbcdbfc..d3bd385694d6 100644
--- a/drivers/net/wireless/ath/ath10k/testmode.c
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include "testmode.h"
@@ -10,12 +11,17 @@
#include "debug.h"
#include "wmi.h"
+#include "wmi-tlv.h"
#include "hif.h"
#include "hw.h"
#include "core.h"
#include "testmode_i.h"
+#define ATH10K_FTM_SEG_NONE ((u32)-1)
+#define ATH10K_FTM_SEGHDR_CURRENT_SEQ GENMASK(3, 0)
+#define ATH10K_FTM_SEGHDR_TOTAL_SEGMENTS GENMASK(7, 4)
+
static const struct nla_policy ath10k_tm_policy[ATH10K_TM_ATTR_MAX + 1] = {
[ATH10K_TM_ATTR_CMD] = { .type = NLA_U32 },
[ATH10K_TM_ATTR_DATA] = { .type = NLA_BINARY,
@@ -25,41 +31,19 @@ static const struct nla_policy ath10k_tm_policy[ATH10K_TM_ATTR_MAX + 1] = {
[ATH10K_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
};
-/* Returns true if callee consumes the skb and the skb should be discarded.
- * Returns false if skb is not used. Does not sleep.
- */
-bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
+static void ath10k_tm_event_unsegmented(struct ath10k *ar, u32 cmd_id,
+ struct sk_buff *skb)
{
struct sk_buff *nl_skb;
- bool consumed;
int ret;
- ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
- "testmode event wmi cmd_id %d skb %pK skb->len %d\n",
- cmd_id, skb, skb->len);
-
- ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
-
- spin_lock_bh(&ar->data_lock);
-
- if (!ar->testmode.utf_monitor) {
- consumed = false;
- goto out;
- }
-
- /* Only testmode.c should be handling events from utf firmware,
- * otherwise all sort of problems will arise as mac80211 operations
- * are not initialised.
- */
- consumed = true;
-
nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
2 * sizeof(u32) + skb->len,
GFP_ATOMIC);
if (!nl_skb) {
ath10k_warn(ar,
"failed to allocate skb for testmode wmi event\n");
- goto out;
+ return;
}
ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_CMD, ATH10K_TM_CMD_WMI);
@@ -68,7 +52,7 @@ bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
"failed to put testmode wmi event cmd attribute: %d\n",
ret);
kfree_skb(nl_skb);
- goto out;
+ return;
}
ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_WMI_CMDID, cmd_id);
@@ -77,7 +61,7 @@ bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
"failed to put testmode wmi event cmd_id: %d\n",
ret);
kfree_skb(nl_skb);
- goto out;
+ return;
}
ret = nla_put(nl_skb, ATH10K_TM_ATTR_DATA, skb->len, skb->data);
@@ -86,10 +70,122 @@ bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
"failed to copy skb to testmode wmi event: %d\n",
ret);
kfree_skb(nl_skb);
- goto out;
+ return;
}
cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+}
+
+static void ath10k_tm_event_segmented(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
+{
+ struct wmi_ftm_cmd *ftm = (struct wmi_ftm_cmd *)skb->data;
+ u8 total_segments, current_seq;
+ struct sk_buff *nl_skb;
+ u8 const *buf_pos;
+ u16 datalen;
+ u32 data_pos;
+ int ret;
+
+ if (skb->len < sizeof(*ftm)) {
+ ath10k_warn(ar, "Invalid ftm event length: %d\n", skb->len);
+ return;
+ }
+
+ current_seq = FIELD_GET(ATH10K_FTM_SEGHDR_CURRENT_SEQ,
+ __le32_to_cpu(ftm->seg_hdr.segmentinfo));
+ total_segments = FIELD_GET(ATH10K_FTM_SEGHDR_TOTAL_SEGMENTS,
+ __le32_to_cpu(ftm->seg_hdr.segmentinfo));
+ datalen = skb->len - sizeof(*ftm);
+ buf_pos = ftm->data;
+
+ if (current_seq == 0) {
+ ar->testmode.expected_seq = 0;
+ ar->testmode.data_pos = 0;
+ }
+
+ data_pos = ar->testmode.data_pos;
+
+ if ((data_pos + datalen) > ATH_FTM_EVENT_MAX_BUF_LENGTH) {
+ ath10k_warn(ar, "Invalid ftm event length at %u: %u\n",
+ data_pos, datalen);
+ ret = -EINVAL;
+ return;
+ }
+
+ memcpy(&ar->testmode.eventdata[data_pos], buf_pos, datalen);
+ data_pos += datalen;
+
+ if (++ar->testmode.expected_seq != total_segments) {
+ ar->testmode.data_pos = data_pos;
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "partial data received %u/%u\n",
+ current_seq + 1, total_segments);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "total data length %u\n", data_pos);
+
+ nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
+ 2 * sizeof(u32) + data_pos,
+ GFP_ATOMIC);
+ if (!nl_skb) {
+ ath10k_warn(ar, "failed to allocate skb for testmode wmi event\n");
+ return;
+ }
+
+ ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_CMD, ATH10K_TM_CMD_TLV);
+ if (ret) {
+ ath10k_warn(ar, "failed to put testmode wmi event attribute: %d\n", ret);
+ kfree_skb(nl_skb);
+ return;
+ }
+
+ ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_WMI_CMDID, cmd_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to put testmode wmi event cmd_id: %d\n", ret);
+ kfree_skb(nl_skb);
+ return;
+ }
+
+ ret = nla_put(nl_skb, ATH10K_TM_ATTR_DATA, data_pos, &ar->testmode.eventdata[0]);
+ if (ret) {
+ ath10k_warn(ar, "failed to copy skb to testmode wmi event: %d\n", ret);
+ kfree_skb(nl_skb);
+ return;
+ }
+
+ cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+}
+
+/* Returns true if callee consumes the skb and the skb should be discarded.
+ * Returns false if skb is not used. Does not sleep.
+ */
+bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
+{
+ bool consumed;
+
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
+ "testmode event wmi cmd_id %d skb %p skb->len %d\n",
+ cmd_id, skb, skb->len);
+
+ ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (!ar->testmode.utf_monitor) {
+ consumed = false;
+ goto out;
+ }
+
+ /* Only testmode.c should be handling events from utf firmware,
+ * otherwise all sort of problems will arise as mac80211 operations
+ * are not initialised.
+ */
+ consumed = true;
+
+ if (ar->testmode.expected_seq != ATH10K_FTM_SEG_NONE)
+ ath10k_tm_event_segmented(ar, cmd_id, skb);
+ else
+ ath10k_tm_event_unsegmented(ar, cmd_id, skb);
out:
spin_unlock_bh(&ar->data_lock);
@@ -281,12 +377,18 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
goto err_release_utf_mode_fw;
}
+ ar->testmode.eventdata = kzalloc(ATH_FTM_EVENT_MAX_BUF_LENGTH, GFP_KERNEL);
+ if (!ar->testmode.eventdata) {
+ ret = -ENOMEM;
+ goto err_power_down;
+ }
+
ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF,
&ar->testmode.utf_mode_fw);
if (ret) {
ath10k_err(ar, "failed to start core (testmode): %d\n", ret);
ar->state = ATH10K_STATE_OFF;
- goto err_power_down;
+ goto err_release_eventdata;
}
ar->state = ATH10K_STATE_UTF;
@@ -302,6 +404,10 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
return 0;
+err_release_eventdata:
+ kfree(ar->testmode.eventdata);
+ ar->testmode.eventdata = NULL;
+
err_power_down:
ath10k_hif_power_down(ar);
@@ -341,6 +447,9 @@ static void __ath10k_tm_cmd_utf_stop(struct ath10k *ar)
release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware);
ar->testmode.utf_mode_fw.fw_file.firmware = NULL;
+ kfree(ar->testmode.eventdata);
+ ar->testmode.eventdata = NULL;
+
ar->state = ATH10K_STATE_OFF;
}
@@ -397,7 +506,7 @@ static int ath10k_tm_cmd_wmi(struct ath10k *ar, struct nlattr *tb[])
cmd_id = nla_get_u32(tb[ATH10K_TM_ATTR_WMI_CMDID]);
ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
- "testmode cmd wmi cmd_id %d buf %pK buf_len %d\n",
+ "testmode cmd wmi cmd_id %d buf %p buf_len %d\n",
cmd_id, buf, buf_len);
ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", buf, buf_len);
@@ -424,6 +533,85 @@ out:
return ret;
}
+static int ath10k_tm_cmd_tlv(struct ath10k *ar, struct nlattr *tb[])
+{
+ u16 total_bytes, num_segments;
+ u32 cmd_id, buf_len;
+ u8 segnumber = 0;
+ u8 *bufpos;
+ void *buf;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ buf = nla_data(tb[ATH10K_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[ATH10K_TM_ATTR_DATA]);
+ cmd_id = WMI_PDEV_UTF_CMDID;
+
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
+ "cmd wmi ftm cmd_id %d buffer length %d\n",
+ cmd_id, buf_len);
+ ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", buf, buf_len);
+
+ bufpos = buf;
+ total_bytes = buf_len;
+ num_segments = total_bytes / MAX_WMI_UTF_LEN;
+ ar->testmode.expected_seq = 0;
+
+ if (buf_len - (num_segments * MAX_WMI_UTF_LEN))
+ num_segments++;
+
+ while (buf_len) {
+ u16 chunk_len = min_t(u16, buf_len, MAX_WMI_UTF_LEN);
+ struct wmi_ftm_cmd *ftm_cmd;
+ struct sk_buff *skb;
+ u32 hdr_info;
+ u8 seginfo;
+
+ skb = ath10k_wmi_alloc_skb(ar, (chunk_len +
+ sizeof(struct wmi_ftm_cmd)));
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ftm_cmd = (struct wmi_ftm_cmd *)skb->data;
+ hdr_info = FIELD_PREP(WMI_TLV_TAG, WMI_TLV_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, (chunk_len +
+ sizeof(struct wmi_ftm_seg_hdr)));
+ ftm_cmd->tlv_header = __cpu_to_le32(hdr_info);
+ ftm_cmd->seg_hdr.len = __cpu_to_le32(total_bytes);
+ ftm_cmd->seg_hdr.msgref = __cpu_to_le32(ar->testmode.ftm_msgref);
+ seginfo = FIELD_PREP(ATH10K_FTM_SEGHDR_TOTAL_SEGMENTS, num_segments) |
+ FIELD_PREP(ATH10K_FTM_SEGHDR_CURRENT_SEQ, segnumber);
+ ftm_cmd->seg_hdr.segmentinfo = __cpu_to_le32(seginfo);
+ segnumber++;
+
+ memcpy(&ftm_cmd->data, bufpos, chunk_len);
+
+ ret = ath10k_wmi_cmd_send(ar, skb, cmd_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to send wmi ftm command: %d\n", ret);
+ goto out;
+ }
+
+ buf_len -= chunk_len;
+ bufpos += chunk_len;
+ }
+
+ ar->testmode.ftm_msgref++;
+ ret = 0;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len)
{
@@ -439,9 +627,14 @@ int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (!tb[ATH10K_TM_ATTR_CMD])
return -EINVAL;
+ ar->testmode.expected_seq = ATH10K_FTM_SEG_NONE;
+
switch (nla_get_u32(tb[ATH10K_TM_ATTR_CMD])) {
case ATH10K_TM_CMD_GET_VERSION:
- return ath10k_tm_cmd_get_version(ar, tb);
+ if (!tb[ATH10K_TM_ATTR_DATA])
+ return ath10k_tm_cmd_get_version(ar, tb);
+ else /* ATH10K_TM_CMD_TLV */
+ return ath10k_tm_cmd_tlv(ar, tb);
case ATH10K_TM_CMD_UTF_START:
return ath10k_tm_cmd_utf_start(ar, tb);
case ATH10K_TM_CMD_UTF_STOP:
diff --git a/drivers/net/wireless/ath/ath10k/testmode_i.h b/drivers/net/wireless/ath/ath10k/testmode_i.h
index ee1cb27c1d60..1603f5276682 100644
--- a/drivers/net/wireless/ath/ath10k/testmode_i.h
+++ b/drivers/net/wireless/ath/ath10k/testmode_i.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
/* "API" level of the ath10k testmode interface. Bump it after every
@@ -14,6 +15,7 @@
#define ATH10K_TESTMODE_VERSION_MINOR 0
#define ATH10K_TM_DATA_MAX_LEN 5000
+#define ATH_FTM_EVENT_MAX_BUF_LENGTH 2048
enum ath10k_tm_attr {
__ATH10K_TM_ATTR_INVALID = 0,
@@ -57,4 +59,17 @@ enum ath10k_tm_cmd {
* ATH10K_TM_ATTR_DATA.
*/
ATH10K_TM_CMD_WMI = 3,
+
+ /* The command used to transmit a test command to the firmware
+ * and the event to receive test events from the firmware. The data
+ * received only contain the TLV payload, need to add the tlv header
+ * and send the cmd to firmware with command id WMI_PDEV_UTF_CMDID.
+ * The data payload size could be large and the driver needs to
+ * send segmented data to firmware.
+ *
+ * This legacy testmode command shares the same value as the get-version
+ * command. To distinguish between them, we check whether the data attribute
+ * is present.
+ */
+ ATH10K_TM_CMD_TLV = ATH10K_TM_CMD_GET_VERSION,
};
diff --git a/drivers/net/wireless/ath/ath10k/trace.c b/drivers/net/wireless/ath/ath10k/trace.c
index c7d4c97e6079..421ec47c59bd 100644
--- a/drivers/net/wireless/ath/ath10k/trace.c
+++ b/drivers/net/wireless/ath/ath10k/trace.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012 Qualcomm Atheros, Inc.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
#include <linux/module.h>
#define CREATE_TRACE_POINTS
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index da3bc35e41aa..493bfb410aff 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -35,7 +35,7 @@ static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
complete(&ar->offchan_tx_completed);
ar->offchan_tx_skb = NULL; /* just for sanity */
- ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %pK\n", skb);
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
out:
spin_unlock_bh(&ar->data_lock);
}
diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
index 3b51b7f52130..1732a4f98418 100644
--- a/drivers/net/wireless/ath/ath10k/usb.c
+++ b/drivers/net/wireless/ath/ath10k/usb.c
@@ -131,7 +131,7 @@ static void ath10k_usb_recv_complete(struct urb *urb)
int status = 0;
ath10k_dbg(ar, ATH10K_DBG_USB_BULK,
- "usb recv pipe %d stat %d len %d urb 0x%pK\n",
+ "usb recv pipe %d stat %d len %d urb 0x%p\n",
pipe->logical_pipe_num, urb->status, urb->actual_length,
urb);
@@ -230,7 +230,7 @@ static void ath10k_usb_post_recv_transfers(struct ath10k *ar,
ath10k_usb_recv_complete, urb_context);
ath10k_dbg(ar, ATH10K_DBG_USB_BULK,
- "usb bulk recv submit %d 0x%x ep 0x%2.2x len %d buf 0x%pK\n",
+ "usb bulk recv submit %d 0x%x ep 0x%2.2x len %d buf 0x%p\n",
recv_pipe->logical_pipe_num,
recv_pipe->usb_pipe_handle, recv_pipe->ep_address,
ATH10K_USB_RX_BUFFER_SIZE, urb_context->skb);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 5e061f7525a6..b4aad6604d6d 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -4,6 +4,7 @@
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/skbuff.h>
@@ -1937,10 +1938,16 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
if (cmd_id == WMI_CMD_UNSUPPORTED) {
ath10k_warn(ar, "wmi command %d is not supported by firmware\n",
cmd_id);
+ dev_kfree_skb_any(skb);
return ret;
}
wait_event_timeout(ar->wmi.tx_credits_wq, ({
+ if (ar->state == ATH10K_STATE_WEDGED) {
+ ret = -ESHUTDOWN;
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "drop wmi command %d, hardware is wedged\n", cmd_id);
+ }
/* try to send pending beacons first. they take priority */
ath10k_wmi_tx_beacons_nowait(ar);
@@ -2029,7 +2036,7 @@ ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
memcpy(cmd->buf, msdu->data, msdu->len);
- ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %pK len %d ftype %02x stype %02x\n",
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
msdu, skb->len, fc & IEEE80211_FCTL_FTYPE,
fc & IEEE80211_FCTL_STYPE);
trace_ath10k_tx_hdr(ar, skb->data, skb->len);
@@ -2637,7 +2644,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
status->boottime_ns = ktime_get_boottime_ns();
ath10k_dbg(ar, ATH10K_DBG_MGMT,
- "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
+ "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
skb, skb->len,
fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 0faefc0a9a40..7f50a1de6b97 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef _WMI_H_
@@ -7418,6 +7418,23 @@ struct wmi_pdev_bb_timing_cfg_cmd {
__le32 bb_xpa_timing;
} __packed;
+struct wmi_ftm_seg_hdr {
+ __le32 len;
+ __le32 msgref;
+ __le32 segmentinfo;
+ __le32 pdev_id;
+} __packed;
+
+struct wmi_ftm_cmd {
+ __le32 tlv_header;
+ struct wmi_ftm_seg_hdr seg_hdr;
+ u8 data[];
+} __packed;
+
+#define WMI_TLV_LEN GENMASK(15, 0)
+#define WMI_TLV_TAG GENMASK(31, 16)
+#define MAX_WMI_UTF_LEN 252
+
struct ath10k;
struct ath10k_vif;
struct ath10k_fw_stats_pdev;
diff --git a/drivers/net/wireless/ath/ath11k/Kconfig b/drivers/net/wireless/ath/ath11k/Kconfig
index 2e935d381b6b..659ef134ef16 100644
--- a/drivers/net/wireless/ath/ath11k/Kconfig
+++ b/drivers/net/wireless/ath/ath11k/Kconfig
@@ -24,7 +24,7 @@ config ATH11K_PCI
select MHI_BUS
select QRTR
select QRTR_MHI
- select PCI_PWRCTL_PWRSEQ if HAVE_PWRCTL
+ select PCI_PWRCTRL_PWRSEQ if HAVE_PWRCTRL
help
This module adds support for PCIE bus
diff --git a/drivers/net/wireless/ath/ath11k/Makefile b/drivers/net/wireless/ath/ath11k/Makefile
index 43d2d8ddcdc0..d9092414b362 100644
--- a/drivers/net/wireless/ath/ath11k/Makefile
+++ b/drivers/net/wireless/ath/ath11k/Makefile
@@ -27,6 +27,7 @@ ath11k-$(CONFIG_ATH11K_TRACING) += trace.o
ath11k-$(CONFIG_THERMAL) += thermal.o
ath11k-$(CONFIG_ATH11K_SPECTRAL) += spectral.o
ath11k-$(CONFIG_PM) += wow.o
+ath11k-$(CONFIG_DEV_COREDUMP) += coredump.o
obj-$(CONFIG_ATH11K_AHB) += ath11k_ahb.o
ath11k_ahb-y += ahb.o
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index 97b12f51ef28..8dfe9b40c126 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -9,8 +9,8 @@
#include <linux/property.h>
#include <linux/of_device.h>
#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
#include <linux/dma-mapping.h>
-#include <linux/of_address.h>
#include <linux/iommu.h>
#include "ahb.h"
#include "debug.h"
@@ -397,7 +397,7 @@ static void ath11k_ahb_stop(struct ath11k_base *ab)
ath11k_ahb_ce_irqs_disable(ab);
ath11k_ahb_sync_ce_irqs(ab);
ath11k_ahb_kill_tasklets(ab);
- del_timer_sync(&ab->rx_replenish_retry);
+ timer_delete_sync(&ab->rx_replenish_retry);
ath11k_ce_cleanup_pipes(ab);
}
@@ -413,7 +413,7 @@ static int ath11k_ahb_power_up(struct ath11k_base *ab)
return ret;
}
-static void ath11k_ahb_power_down(struct ath11k_base *ab)
+static void ath11k_ahb_power_down(struct ath11k_base *ab, bool is_suspend)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
@@ -919,16 +919,10 @@ static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
struct device *dev = ab->dev;
- struct device_node *node;
struct resource r;
int ret;
- node = of_parse_phandle(dev->of_node, "memory-region", 0);
- if (!node)
- return -ENOENT;
-
- ret = of_address_to_resource(node, 0, &r);
- of_node_put(node);
+ ret = of_reserved_mem_region_to_resource(dev->of_node, 0, &r);
if (ret) {
dev_err(dev, "failed to resolve msa fixed region\n");
return ret;
@@ -937,12 +931,7 @@ static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab)
ab_ahb->fw.msa_paddr = r.start;
ab_ahb->fw.msa_size = resource_size(&r);
- node = of_parse_phandle(dev->of_node, "memory-region", 1);
- if (!node)
- return -ENOENT;
-
- ret = of_address_to_resource(node, 0, &r);
- of_node_put(node);
+ ret = of_reserved_mem_region_to_resource(dev->of_node, 1, &r);
if (ret) {
dev_err(dev, "failed to resolve ce fixed region\n");
return ret;
@@ -988,7 +977,7 @@ static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
struct device *host_dev = ab->dev;
- struct platform_device_info info = {0};
+ struct platform_device_info info = {};
struct iommu_domain *iommu_dom;
struct platform_device *pdev;
struct device_node *node;
@@ -1000,18 +989,18 @@ static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
if (!ab->hw_params.fixed_fw_mem)
return 0;
- ret = ath11k_ahb_setup_msa_resources(ab);
- if (ret) {
- ath11k_err(ab, "failed to setup msa resources\n");
- return ret;
- }
-
node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
if (!node) {
ab_ahb->fw.use_tz = true;
return 0;
}
+ ret = ath11k_ahb_setup_msa_resources(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to setup msa resources\n");
+ return ret;
+ }
+
info.fwnode = &node->fwnode;
info.parent = host_dev;
info.name = node->name;
@@ -1280,7 +1269,7 @@ static void ath11k_ahb_remove(struct platform_device *pdev)
struct ath11k_base *ab = platform_get_drvdata(pdev);
if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
- ath11k_ahb_power_down(ab);
+ ath11k_ahb_power_down(ab, false);
ath11k_debugfs_soc_destroy(ab);
ath11k_qmi_deinit_service(ab);
goto qmi_fail;
@@ -1290,6 +1279,7 @@ static void ath11k_ahb_remove(struct platform_device *pdev)
ath11k_core_deinit(ab);
qmi_fail:
+ ath11k_fw_destroy(ab);
ath11k_ahb_free_resources(ab);
}
@@ -1309,16 +1299,17 @@ static void ath11k_ahb_shutdown(struct platform_device *pdev)
ath11k_core_deinit(ab);
free_resources:
+ ath11k_fw_destroy(ab);
ath11k_ahb_free_resources(ab);
}
static struct platform_driver ath11k_ahb_driver = {
- .driver = {
- .name = "ath11k",
+ .driver = {
+ .name = "ath11k",
.of_match_table = ath11k_ahb_of_match,
},
- .probe = ath11k_ahb_probe,
- .remove_new = ath11k_ahb_remove,
+ .probe = ath11k_ahb_probe,
+ .remove = ath11k_ahb_remove,
.shutdown = ath11k_ahb_shutdown,
};
diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c
index e66e86bdec20..a7a163621b21 100644
--- a/drivers/net/wireless/ath/ath11k/ce.c
+++ b/drivers/net/wireless/ath/ath11k/ce.c
@@ -2,8 +2,10 @@
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
#include "dp_rx.h"
#include "debug.h"
#include "hif.h"
@@ -352,7 +354,8 @@ static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe *pipe)
ret = ath11k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
if (ret) {
- ath11k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
+ ath11k_dbg(ab, ATH11K_DBG_CE, "failed to enqueue rx buf: %d\n",
+ ret);
dma_unmap_single(ab->dev, paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
@@ -394,10 +397,6 @@ static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe,
}
*nbytes = ath11k_hal_ce_dst_status_get_length(desc);
- if (*nbytes == 0) {
- ret = -EIO;
- goto err;
- }
*skb = pipe->dest_ring->skb[sw_index];
pipe->dest_ring->skb[sw_index] = NULL;
@@ -430,8 +429,8 @@ static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe)
dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
max_nbytes, DMA_FROM_DEVICE);
- if (unlikely(max_nbytes < nbytes)) {
- ath11k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
+ if (unlikely(max_nbytes < nbytes || nbytes == 0)) {
+ ath11k_warn(ab, "unexpected rx length (nbytes %d, max %d)",
nbytes, max_nbytes);
dev_kfree_skb_any(skb);
continue;
@@ -556,7 +555,7 @@ static int ath11k_ce_init_ring(struct ath11k_base *ab,
struct ath11k_ce_ring *ce_ring,
int ce_id, enum hal_ring_type type)
{
- struct hal_srng_params params = { 0 };
+ struct hal_srng_params params = {};
int ret;
params.ring_base_paddr = ce_ring->base_addr_ce_space;
@@ -908,7 +907,7 @@ EXPORT_SYMBOL(ath11k_ce_rx_post_buf);
void ath11k_ce_rx_replenish_retry(struct timer_list *t)
{
- struct ath11k_base *ab = from_timer(ab, t, rx_replenish_retry);
+ struct ath11k_base *ab = timer_container_of(ab, t, rx_replenish_retry);
ath11k_ce_rx_post_buf(ab);
}
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index ccf4ad35fdc3..812686173ac8 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -1,9 +1,11 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/remoteproc.h>
@@ -123,6 +125,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_dual_stations = false,
+ .pdev_suspend = false,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@@ -207,6 +210,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
.support_dual_stations = false,
+ .pdev_suspend = false,
},
{
.name = "qca6390 hw2.0",
@@ -296,6 +300,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
.support_dual_stations = true,
+ .pdev_suspend = false,
},
{
.name = "qcn9074 hw1.0",
@@ -379,6 +384,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
.support_dual_stations = false,
+ .pdev_suspend = false,
},
{
.name = "wcn6855 hw2.0",
@@ -468,6 +474,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
.support_dual_stations = true,
+ .pdev_suspend = false,
},
{
.name = "wcn6855 hw2.1",
@@ -555,6 +562,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
.support_dual_stations = true,
+ .pdev_suspend = false,
},
{
.name = "wcn6750 hw1.0",
@@ -616,7 +624,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = false,
.supports_rssi_stats = true,
- .fw_wmi_diag_event = false,
+ .fw_wmi_diag_event = true,
.current_cc_support = true,
.dbr_debug_support = false,
.global_reset = false,
@@ -637,6 +645,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = true,
.support_fw_mac_sequence = true,
.support_dual_stations = false,
+ .pdev_suspend = true,
},
{
.hw_rev = ATH11K_HW_IPQ5018_HW10,
@@ -719,6 +728,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
.support_dual_stations = false,
+ .pdev_suspend = false,
},
{
.name = "qca2066 hw2.1",
@@ -809,14 +819,183 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.support_fw_mac_sequence = true,
.support_dual_stations = true,
},
-};
+ {
+ .name = "qca6698aq hw2.1",
+ .hw_rev = ATH11K_HW_QCA6698AQ_HW21,
+ .fw = {
+ .dir = "QCA6698AQ/hw2.1",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 3,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &wcn6855_ops,
+ .ring_mask = &ath11k_hw_ring_mask_qca6390,
+ .internal_sleep_clock = true,
+ .regs = &wcn6855_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
+ .host_ce_config = ath11k_host_ce_config_qca6390,
+ .ce_count = 9,
+ .target_ce_config = ath11k_target_ce_config_wlan_qca6390,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
+ .svc_to_ce_map_len = 14,
+ .single_pdev_only = true,
+ .rxdma1_enable = false,
+ .num_rxdma_per_pdev = 2,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+ .htt_peer_map_v2 = false,
-static inline struct ath11k_pdev *ath11k_core_get_single_pdev(struct ath11k_base *ab)
-{
- WARN_ON(!ab->hw_params.single_pdev_only);
+ .spectral = {
+ .fft_sz = 0,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 0,
+ .max_fft_bins = 0,
+ .fragment_160mhz = false,
+ },
- return &ab->pdevs[0];
-}
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
+ .supports_monitor = false,
+ .supports_shadow_regs = true,
+ .idle_ps = true,
+ .supports_sta_ps = true,
+ .coldboot_cal_mm = false,
+ .coldboot_cal_ftm = false,
+ .cbcal_restart_fw = false,
+ .fw_mem_mode = 0,
+ .num_vdevs = 2 + 1,
+ .num_peers = 512,
+ .supports_suspend = true,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
+ .supports_regdb = true,
+ .fix_l1ss = false,
+ .credit_flow = true,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
+ .hal_params = &ath11k_hw_hal_params_qca6390,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = false,
+ .supports_rssi_stats = true,
+ .fw_wmi_diag_event = true,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
+ .global_reset = true,
+ .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
+ .m3_fw_support = true,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {
+ .start = 0x01400000,
+ .end = 0x0177ffff,
+ },
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
+ .support_fw_mac_sequence = true,
+ .support_dual_stations = true,
+ .pdev_suspend = false,
+ },
+};
+
+static const struct dmi_system_id ath11k_pm_quirk_table[] = {
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* X13 G4 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21J3"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* X13 G4 AMD #2 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21J4"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* T14 G4 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K3"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* T14 G4 AMD #2 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K4"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* P14s G4 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K5"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* P14s G4 AMD #2 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K6"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* T16 G2 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K7"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* T16 G2 AMD #2 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K8"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* P16s G2 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K9"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* P16s G2 AMD #2 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21KA"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* T14s G4 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21F8"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* T14s G4 AMD #2 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21F9"),
+ },
+ },
+ {}
+};
void ath11k_fw_stats_pdevs_free(struct list_head *head)
{
@@ -855,6 +1034,7 @@ void ath11k_fw_stats_init(struct ath11k *ar)
INIT_LIST_HEAD(&ar->fw_stats.bcn);
init_completion(&ar->fw_stats_complete);
+ init_completion(&ar->fw_stats_done);
}
void ath11k_fw_stats_free(struct ath11k_fw_stats *stats)
@@ -876,23 +1056,33 @@ bool ath11k_core_coldboot_cal_support(struct ath11k_base *ab)
return ab->hw_params.coldboot_cal_mm;
}
-int ath11k_core_suspend(struct ath11k_base *ab)
+/* Check if we need to continue with suspend/resume operation.
+ * Return:
+ * a negative value: error happens and don't continue.
+ * 0: no error but don't continue.
+ * positive value: no error and do continue.
+ */
+static int ath11k_core_continue_suspend_resume(struct ath11k_base *ab)
{
- int ret;
- struct ath11k_pdev *pdev;
struct ath11k *ar;
if (!ab->hw_params.supports_suspend)
return -EOPNOTSUPP;
/* so far single_pdev_only chips have supports_suspend as true
- * and only the first pdev is valid.
+ * so pass 0 as a dummy pdev_id here.
*/
- pdev = ath11k_core_get_single_pdev(ab);
- ar = pdev->ar;
+ ar = ab->pdevs[0].ar;
if (!ar || ar->state != ATH11K_STATE_OFF)
return 0;
+ return 1;
+}
+
+static int ath11k_core_suspend_wow(struct ath11k_base *ab)
+{
+ int ret;
+
ret = ath11k_dp_rx_pktlog_stop(ab, true);
if (ret) {
ath11k_warn(ab, "failed to stop dp rx (and timer) pktlog during suspend: %d\n",
@@ -900,7 +1090,10 @@ int ath11k_core_suspend(struct ath11k_base *ab)
return ret;
}
- ret = ath11k_mac_wait_tx_complete(ar);
+ /* So far only single_pdev_only devices can reach here,
+ * so it is valid to handle the first, and the only, pdev.
+ */
+ ret = ath11k_mac_wait_tx_complete(ab->pdevs[0].ar);
if (ret) {
ath11k_warn(ab, "failed to wait tx complete: %d\n", ret);
return ret;
@@ -933,24 +1126,146 @@ int ath11k_core_suspend(struct ath11k_base *ab)
return 0;
}
+
+static int ath11k_core_suspend_default(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_dp_rx_pktlog_stop(ab, true);
+ if (ret) {
+ ath11k_warn(ab, "failed to stop dp rx (and timer) pktlog during suspend: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* So far only single_pdev_only devices can reach here,
+ * so it is valid to handle the first, and the only, pdev.
+ */
+ ret = ath11k_mac_wait_tx_complete(ab->pdevs[0].ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to wait tx complete: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_dp_rx_pktlog_stop(ab, false);
+ if (ret) {
+ ath11k_warn(ab, "failed to stop dp rx pktlog during suspend: %d\n",
+ ret);
+ return ret;
+ }
+
+ ath11k_ce_stop_shadow_timers(ab);
+ ath11k_dp_stop_shadow_timers(ab);
+
+ /* PM framework skips suspend_late/resume_early callbacks
+ * if other devices report errors in their suspend callbacks.
+ * However ath11k_core_resume() would still be called because
+ * here we return success thus kernel put us on dpm_suspended_list.
+ * Since we won't go through a power down/up cycle, there is
+ * no chance to call complete(&ab->restart_completed) in
+ * ath11k_core_restart(), making ath11k_core_resume() timeout.
+ * So call it here to avoid this issue. This also works in case
+ * no error happens thus suspend_late/resume_early get called,
+ * because it will be reinitialized in ath11k_core_resume_early().
+ */
+ complete(&ab->restart_completed);
+
+ return 0;
+}
+
+int ath11k_core_suspend(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
+ return ret;
+
+ if (ab->actual_pm_policy == ATH11K_PM_WOW)
+ return ath11k_core_suspend_wow(ab);
+
+ return ath11k_core_suspend_default(ab);
+}
EXPORT_SYMBOL(ath11k_core_suspend);
-int ath11k_core_resume(struct ath11k_base *ab)
+int ath11k_core_suspend_late(struct ath11k_base *ab)
{
int ret;
- struct ath11k_pdev *pdev;
+
+ ret = ath11k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
+ return ret;
+
+ if (ab->actual_pm_policy == ATH11K_PM_WOW)
+ return 0;
+
+ ath11k_hif_irq_disable(ab);
+ ath11k_hif_ce_irq_disable(ab);
+
+ ath11k_hif_power_down(ab, true);
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_core_suspend_late);
+
+int ath11k_core_resume_early(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
+ return ret;
+
+ if (ab->actual_pm_policy == ATH11K_PM_WOW)
+ return 0;
+
+ reinit_completion(&ab->restart_completed);
+ ret = ath11k_hif_power_up(ab);
+ if (ret)
+ ath11k_warn(ab, "failed to power up hif during resume: %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(ath11k_core_resume_early);
+
+static int ath11k_core_resume_default(struct ath11k_base *ab)
+{
struct ath11k *ar;
+ long time_left;
+ int ret;
- if (!ab->hw_params.supports_suspend)
- return -EOPNOTSUPP;
+ time_left = wait_for_completion_timeout(&ab->restart_completed,
+ ATH11K_RESET_TIMEOUT_HZ);
+ if (time_left == 0) {
+ ath11k_warn(ab, "timeout while waiting for restart complete");
+ return -ETIMEDOUT;
+ }
- /* so far signle_pdev_only chips have supports_suspend as true
- * and only the first pdev is valid.
+ /* So far only single_pdev_only devices can reach here,
+ * so it is valid to handle the first, and the only, pdev.
*/
- pdev = ath11k_core_get_single_pdev(ab);
- ar = pdev->ar;
- if (!ar || ar->state != ATH11K_STATE_OFF)
- return 0;
+ ar = ab->pdevs[0].ar;
+ if (ab->hw_params.current_cc_support &&
+ ar->alpha2[0] != 0 && ar->alpha2[1] != 0) {
+ ret = ath11k_reg_set_cc(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to set country code during resume: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ ret = ath11k_dp_rx_pktlog_start(ab);
+ if (ret)
+ ath11k_warn(ab, "failed to start rx pktlog during resume: %d\n",
+ ret);
+
+ return ret;
+}
+
+static int ath11k_core_resume_wow(struct ath11k_base *ab)
+{
+ int ret;
ret = ath11k_hif_resume(ab);
if (ret) {
@@ -976,6 +1291,20 @@ int ath11k_core_resume(struct ath11k_base *ab)
return 0;
}
+
+int ath11k_core_resume(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
+ return ret;
+
+ if (ab->actual_pm_policy == ATH11K_PM_WOW)
+ return ath11k_core_resume_wow(ab);
+
+ return ath11k_core_resume_default(ab);
+}
EXPORT_SYMBOL(ath11k_core_resume);
static void ath11k_core_check_cc_code_bdfext(const struct dmi_header *hdr, void *data)
@@ -1079,9 +1408,12 @@ int ath11k_core_check_dt(struct ath11k_base *ab)
if (!node)
return -ENOENT;
- of_property_read_string(node, "qcom,ath11k-calibration-variant",
+ of_property_read_string(node, "qcom,calibration-variant",
&variant);
if (!variant)
+ of_property_read_string(node, "qcom,ath11k-calibration-variant",
+ &variant);
+ if (!variant)
return -ENODATA;
if (strscpy(ab->qmi.target.bdf_ext, variant, max_len) < 0)
@@ -1103,7 +1435,7 @@ static int __ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
enum ath11k_bdf_name_type name_type)
{
/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
- char variant[9 + ATH11K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
+ char variant[9 + ATH11K_QMI_BDF_EXT_STR_LENGTH] = {};
if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
scnprintf(variant, sizeof(variant), ",variant=%s",
@@ -1669,11 +2001,47 @@ err_pdev_debug:
return ret;
}
+static void ath11k_core_pdev_suspend_target(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ unsigned long time_left;
+ int ret;
+ int i;
+
+ if (!ab->hw_params.pdev_suspend)
+ return;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+
+ reinit_completion(&ab->htc_suspend);
+
+ ret = ath11k_wmi_pdev_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
+ pdev->pdev_id);
+ if (ret) {
+ ath11k_warn(ab, "could not suspend target :%d\n", ret);
+ /* pointless to try other pdevs */
+ return;
+ }
+
+ time_left = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ);
+
+ if (!time_left) {
+ ath11k_warn(ab, "suspend timed out - target pause event never came\n");
+ /* pointless to try other pdevs */
+ return;
+ }
+ }
+}
+
static void ath11k_core_pdev_destroy(struct ath11k_base *ab)
{
ath11k_spectral_deinit(ab);
ath11k_thermal_unregister(ab);
ath11k_mac_unregister(ab);
+ ath11k_core_pdev_suspend_target(ab);
ath11k_hif_irq_disable(ab);
ath11k_dp_pdev_free(ab);
ath11k_debugfs_pdev_destroy(ab);
@@ -1811,6 +2179,20 @@ int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
{
int ret;
+ switch (ath11k_crypto_mode) {
+ case ATH11K_CRYPT_MODE_SW:
+ set_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
+ set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
+ break;
+ case ATH11K_CRYPT_MODE_HW:
+ clear_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
+ clear_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
+ break;
+ default:
+ ath11k_info(ab, "invalid crypto_mode: %d\n", ath11k_crypto_mode);
+ return -EINVAL;
+ }
+
ret = ath11k_core_start_firmware(ab, ab->fw_mode);
if (ret) {
ath11k_err(ab, "failed to start firmware: %d\n", ret);
@@ -1829,20 +2211,6 @@ int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
goto err_firmware_stop;
}
- switch (ath11k_crypto_mode) {
- case ATH11K_CRYPT_MODE_SW:
- set_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
- set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
- break;
- case ATH11K_CRYPT_MODE_HW:
- clear_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
- clear_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
- break;
- default:
- ath11k_info(ab, "invalid crypto_mode: %d\n", ath11k_crypto_mode);
- return -EINVAL;
- }
-
if (ath11k_frame_mode == ATH11K_HW_TXRX_RAW)
set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
@@ -1889,14 +2257,10 @@ static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab)
mutex_unlock(&ab->core_lock);
ath11k_dp_free(ab);
- ath11k_hal_srng_deinit(ab);
+ ath11k_hal_srng_clear(ab);
ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1;
- ret = ath11k_hal_srng_init(ab);
- if (ret)
- return ret;
-
clear_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags);
ret = ath11k_core_qmi_firmware_ready(ab);
@@ -1915,6 +2279,7 @@ err_hal_srng_deinit:
void ath11k_core_halt(struct ath11k *ar)
{
struct ath11k_base *ab = ar->ab;
+ struct list_head *pos, *n;
lockdep_assert_held(&ar->conf_mutex);
@@ -1924,12 +2289,18 @@ void ath11k_core_halt(struct ath11k *ar)
ath11k_mac_scan_finish(ar);
ath11k_mac_peer_cleanup_all(ar);
cancel_delayed_work_sync(&ar->scan.timeout);
+ cancel_work_sync(&ar->channel_update_work);
cancel_work_sync(&ar->regd_update_work);
cancel_work_sync(&ab->update_11d_work);
rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
synchronize_rcu();
- INIT_LIST_HEAD(&ar->arvifs);
+
+ spin_lock_bh(&ar->data_lock);
+ list_for_each_safe(pos, n, &ar->arvifs)
+ list_del_init(pos);
+ spin_unlock_bh(&ar->data_lock);
+
idr_init(&ar->txmgmt_idr);
}
@@ -2069,6 +2440,8 @@ static void ath11k_core_restart(struct work_struct *work)
if (!ab->is_reset)
ath11k_core_post_reconfigure_recovery(ab);
+
+ complete(&ab->restart_completed);
}
static void ath11k_core_reset(struct work_struct *work)
@@ -2125,6 +2498,7 @@ static void ath11k_core_reset(struct work_struct *work)
reinit_completion(&ab->recovery_start);
atomic_set(&ab->recovery_start_count, 0);
+ ath11k_coredump_collect(ab);
ath11k_core_pre_reconfigure_recovery(ab);
reinit_completion(&ab->reconfigure_complete);
@@ -2138,7 +2512,7 @@ static void ath11k_core_reset(struct work_struct *work)
ath11k_hif_irq_disable(ab);
ath11k_hif_ce_irq_disable(ab);
- ath11k_hif_power_down(ab);
+ ath11k_hif_power_down(ab, false);
ath11k_hif_power_up(ab);
ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset started\n");
@@ -2188,17 +2562,74 @@ int ath11k_core_pre_init(struct ath11k_base *ab)
}
EXPORT_SYMBOL(ath11k_core_pre_init);
+static int ath11k_core_pm_notify(struct notifier_block *nb,
+ unsigned long action, void *nouse)
+{
+ struct ath11k_base *ab = container_of(nb, struct ath11k_base,
+ pm_nb);
+
+ switch (action) {
+ case PM_SUSPEND_PREPARE:
+ ab->actual_pm_policy = ab->pm_policy;
+ break;
+ case PM_HIBERNATION_PREPARE:
+ ab->actual_pm_policy = ATH11K_PM_DEFAULT;
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int ath11k_core_pm_notifier_register(struct ath11k_base *ab)
+{
+ ab->pm_nb.notifier_call = ath11k_core_pm_notify;
+ return register_pm_notifier(&ab->pm_nb);
+}
+
+void ath11k_core_pm_notifier_unregister(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = unregister_pm_notifier(&ab->pm_nb);
+ if (ret)
+ /* just warn here, there is nothing can be done in fail case */
+ ath11k_warn(ab, "failed to unregister PM notifier %d\n", ret);
+}
+EXPORT_SYMBOL(ath11k_core_pm_notifier_unregister);
+
int ath11k_core_init(struct ath11k_base *ab)
{
+ const struct dmi_system_id *dmi_id;
int ret;
+ dmi_id = dmi_first_match(ath11k_pm_quirk_table);
+ if (dmi_id)
+ ab->pm_policy = (kernel_ulong_t)dmi_id->driver_data;
+ else
+ ab->pm_policy = ATH11K_PM_DEFAULT;
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "pm policy %u\n", ab->pm_policy);
+
+ ret = ath11k_core_pm_notifier_register(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to register PM notifier: %d\n", ret);
+ return ret;
+ }
+
ret = ath11k_core_soc_create(ab);
if (ret) {
ath11k_err(ab, "failed to create soc core: %d\n", ret);
- return ret;
+ goto err_unregister_pm_notifier;
}
return 0;
+
+err_unregister_pm_notifier:
+ ath11k_core_pm_notifier_unregister(ab);
+
+ return ret;
}
EXPORT_SYMBOL(ath11k_core_init);
@@ -2211,10 +2642,10 @@ void ath11k_core_deinit(struct ath11k_base *ab)
mutex_unlock(&ab->core_lock);
- ath11k_hif_power_down(ab);
+ ath11k_hif_power_down(ab, false);
ath11k_mac_destroy(ab);
ath11k_core_soc_destroy(ab);
- ath11k_fw_destroy(ab);
+ ath11k_core_pm_notifier_unregister(ab);
}
EXPORT_SYMBOL(ath11k_core_deinit);
@@ -2261,9 +2692,11 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
INIT_WORK(&ab->restart_work, ath11k_core_restart);
INIT_WORK(&ab->update_11d_work, ath11k_update_11d);
INIT_WORK(&ab->reset_work, ath11k_core_reset);
+ INIT_WORK(&ab->dump_work, ath11k_coredump_upload);
timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0);
init_completion(&ab->htc_suspend);
init_completion(&ab->wow.wakeup_completed);
+ init_completion(&ab->restart_completed);
ab->dev = dev;
ab->hif.bus = bus;
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 09c37e19a168..e8780b05ce11 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_CORE_H
@@ -16,6 +16,8 @@
#include <linux/rhashtable.h>
#include <linux/average.h>
#include <linux/firmware.h>
+#include <linux/suspend.h>
+#include <linux/of.h>
#include "qmi.h"
#include "htc.h"
@@ -32,6 +34,7 @@
#include "spectral.h"
#include "wow.h"
#include "fw.h"
+#include "coredump.h"
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -148,6 +151,7 @@ enum ath11k_hw_rev {
ATH11K_HW_WCN6750_HW10,
ATH11K_HW_IPQ5018_HW10,
ATH11K_HW_QCA2066_HW21,
+ ATH11K_HW_QCA6698AQ_HW21,
};
enum ath11k_firmware_mode {
@@ -340,7 +344,6 @@ struct ath11k_chan_power_info {
* @ap_power_type: type of power (SP/LPI/VLP)
* @num_pwr_levels: number of power levels
* @reg_max: Array of maximum TX power (dBm) per PSD value
- * @ap_constraint_power: AP constraint power (dBm)
* @tpe: TPE values processed from TPE IE
* @chan_power_info: power info to send to firmware
*/
@@ -350,7 +353,6 @@ struct ath11k_reg_tpc_power_info {
enum wmi_reg_6ghz_ap_type ap_power_type;
u8 num_pwr_levels;
u8 reg_max[ATH11K_NUM_PWR_LEVELS];
- u8 ap_constraint_power;
s8 tpe[ATH11K_NUM_PWR_LEVELS];
struct ath11k_chan_power_info chan_power_info[ATH11K_NUM_PWR_LEVELS];
};
@@ -370,8 +372,8 @@ struct ath11k_vif {
struct ath11k *ar;
struct ieee80211_vif *vif;
- u16 tx_seq_no;
struct wmi_wmm_params_all_arg wmm_params;
+ struct wmi_wmm_params_all_arg muedca_params;
struct list_head list;
union {
struct {
@@ -409,6 +411,8 @@ struct ath11k_vif {
bool do_not_send_tmpl;
struct ath11k_arp_ns_offload arp_ns_offload;
struct ath11k_rekey_data rekey_data;
+ u32 num_stations;
+ bool reinstall_group_keys;
struct ath11k_reg_tpc_power_info reg_tpc_info;
@@ -599,6 +603,8 @@ struct ath11k_fw_stats {
struct list_head pdevs;
struct list_head vdevs;
struct list_head bcn;
+ u32 num_vdev_recvd;
+ u32 num_bcn_recvd;
};
struct ath11k_dbg_htt_stats {
@@ -687,7 +693,7 @@ struct ath11k {
struct mutex conf_mutex;
/* protects the radio specific data like debug stats, ppdu_stats_info stats,
* vdev_stop_status info, scan data, ath11k_sta info, ath11k_vif info,
- * channel context data, survey info, test mode data.
+ * channel context data, survey info, test mode data, channel_update_queue.
*/
spinlock_t data_lock;
@@ -745,6 +751,9 @@ struct ath11k {
struct completion bss_survey_done;
struct work_struct regd_update_work;
+ struct work_struct channel_update_work;
+ /* protected with data_lock */
+ struct list_head channel_update_queue;
struct work_struct wmi_mgmt_tx_work;
struct sk_buff_head wmi_mgmt_tx_queue;
@@ -780,7 +789,7 @@ struct ath11k {
u8 alpha2[REG_ALPHA2_LEN + 1];
struct ath11k_fw_stats fw_stats;
struct completion fw_stats_complete;
- bool fw_stats_done;
+ struct completion fw_stats_done;
/* protected by conf_mutex */
bool ps_state_enable;
@@ -889,6 +898,11 @@ struct ath11k_msi_config {
u16 hw_rev;
};
+enum ath11k_pm_policy {
+ ATH11K_PM_DEFAULT,
+ ATH11K_PM_WOW,
+};
+
/* Master structure to hold the hw data which may be used in core module */
struct ath11k_base {
enum ath11k_hw_rev hw_rev;
@@ -902,6 +916,10 @@ struct ath11k_base {
/* HW channel counters frequency value in hertz common to all MACs */
u32 cc_freq_hz;
+ struct ath11k_dump_file_data *dump_data;
+ size_t ath11k_coredump_len;
+ struct work_struct dump_work;
+
struct ath11k_htc htc;
struct ath11k_dp dp;
@@ -1043,6 +1061,8 @@ struct ath11k_base {
DECLARE_BITMAP(fw_features, ATH11K_FW_FEATURE_COUNT);
} fw;
+ struct completion restart_completed;
+
#ifdef CONFIG_NL80211_TESTMODE
struct {
u32 data_pos;
@@ -1051,6 +1071,10 @@ struct ath11k_base {
} testmode;
#endif
+ enum ath11k_pm_policy pm_policy;
+ enum ath11k_pm_policy actual_pm_policy;
+ struct notifier_block pm_nb;
+
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
};
@@ -1242,8 +1266,10 @@ void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd);
int ath11k_core_check_dt(struct ath11k_base *ath11k);
int ath11k_core_check_smbios(struct ath11k_base *ab);
void ath11k_core_halt(struct ath11k *ar);
+int ath11k_core_resume_early(struct ath11k_base *ab);
int ath11k_core_resume(struct ath11k_base *ab);
int ath11k_core_suspend(struct ath11k_base *ab);
+int ath11k_core_suspend_late(struct ath11k_base *ab);
void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab);
bool ath11k_core_coldboot_cal_support(struct ath11k_base *ab);
@@ -1299,8 +1325,16 @@ static inline void ath11k_core_create_firmware_path(struct ath11k_base *ab,
const char *filename,
void *buf, size_t buf_len)
{
- snprintf(buf, buf_len, "%s/%s/%s", ATH11K_FW_DIR,
- ab->hw_params.fw.dir, filename);
+ const char *fw_name = NULL;
+
+ of_property_read_string(ab->dev->of_node, "firmware-name", &fw_name);
+
+ if (fw_name && strncmp(filename, "board", 5))
+ snprintf(buf, buf_len, "%s/%s/%s/%s", ATH11K_FW_DIR,
+ ab->hw_params.fw.dir, fw_name, filename);
+ else
+ snprintf(buf, buf_len, "%s/%s/%s", ATH11K_FW_DIR,
+ ab->hw_params.fw.dir, filename);
}
static inline const char *ath11k_bus_str(enum ath11k_bus bus)
@@ -1315,4 +1349,6 @@ static inline const char *ath11k_bus_str(enum ath11k_bus bus)
return "unknown";
}
+void ath11k_core_pm_notifier_unregister(struct ath11k_base *ab);
+
#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/coredump.c b/drivers/net/wireless/ath/ath11k/coredump.c
new file mode 100644
index 000000000000..1949d57b007a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/coredump.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+#include <linux/devcoredump.h>
+#include <linux/export.h>
+#include "hif.h"
+#include "coredump.h"
+#include "debug.h"
+
+enum
+ath11k_fw_crash_dump_type ath11k_coredump_get_dump_type(int type)
+{
+ enum ath11k_fw_crash_dump_type dump_type;
+
+ switch (type) {
+ case HOST_DDR_REGION_TYPE:
+ dump_type = FW_CRASH_DUMP_REMOTE_MEM_DATA;
+ break;
+ case M3_DUMP_REGION_TYPE:
+ dump_type = FW_CRASH_DUMP_M3_DUMP;
+ break;
+ case PAGEABLE_MEM_REGION_TYPE:
+ dump_type = FW_CRASH_DUMP_PAGEABLE_DATA;
+ break;
+ case BDF_MEM_REGION_TYPE:
+ case CALDB_MEM_REGION_TYPE:
+ dump_type = FW_CRASH_DUMP_NONE;
+ break;
+ default:
+ dump_type = FW_CRASH_DUMP_TYPE_MAX;
+ break;
+ }
+
+ return dump_type;
+}
+EXPORT_SYMBOL(ath11k_coredump_get_dump_type);
+
+void ath11k_coredump_upload(struct work_struct *work)
+{
+ struct ath11k_base *ab = container_of(work, struct ath11k_base, dump_work);
+
+ ath11k_info(ab, "Uploading coredump\n");
+ /* dev_coredumpv() takes ownership of the buffer */
+ dev_coredumpv(ab->dev, ab->dump_data, ab->ath11k_coredump_len, GFP_KERNEL);
+ ab->dump_data = NULL;
+}
+
+void ath11k_coredump_collect(struct ath11k_base *ab)
+{
+ ath11k_hif_coredump_download(ab);
+}
diff --git a/drivers/net/wireless/ath/ath11k/coredump.h b/drivers/net/wireless/ath/ath11k/coredump.h
new file mode 100644
index 000000000000..3960d9385261
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/coredump.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef _ATH11K_COREDUMP_H_
+#define _ATH11K_COREDUMP_H_
+
+#define ATH11K_FW_CRASH_DUMP_V2 2
+
+enum ath11k_fw_crash_dump_type {
+ FW_CRASH_DUMP_PAGING_DATA,
+ FW_CRASH_DUMP_RDDM_DATA,
+ FW_CRASH_DUMP_REMOTE_MEM_DATA,
+ FW_CRASH_DUMP_PAGEABLE_DATA,
+ FW_CRASH_DUMP_M3_DUMP,
+ FW_CRASH_DUMP_NONE,
+
+ /* keep last */
+ FW_CRASH_DUMP_TYPE_MAX,
+};
+
+#define COREDUMP_TLV_HDR_SIZE 8
+
+struct ath11k_tlv_dump_data {
+ /* see ath11k_fw_crash_dump_type above */
+ __le32 type;
+
+ /* in bytes */
+ __le32 tlv_len;
+
+ /* pad to 32-bit boundaries as needed */
+ u8 tlv_data[];
+} __packed;
+
+struct ath11k_dump_file_data {
+ /* "ATH11K-FW-DUMP" */
+ char df_magic[16];
+ /* total dump len in bytes */
+ __le32 len;
+ /* file dump version */
+ __le32 version;
+ /* pci device id */
+ __le32 chip_id;
+ /* qrtr instance id */
+ __le32 qrtr_id;
+ /* pci domain id */
+ __le32 bus_id;
+ guid_t guid;
+ /* time-of-day stamp */
+ __le64 tv_sec;
+ /* time-of-day stamp, nano-seconds */
+ __le64 tv_nsec;
+ /* room for growth w/out changing binary format */
+ u8 unused[128];
+ u8 data[];
+} __packed;
+
+#ifdef CONFIG_DEV_COREDUMP
+enum ath11k_fw_crash_dump_type ath11k_coredump_get_dump_type(int type);
+void ath11k_coredump_upload(struct work_struct *work);
+void ath11k_coredump_collect(struct ath11k_base *ab);
+#else
+static inline enum
+ath11k_fw_crash_dump_type ath11k_coredump_get_dump_type(int type)
+{
+ return FW_CRASH_DUMP_TYPE_MAX;
+}
+
+static inline void ath11k_coredump_upload(struct work_struct *work)
+{
+}
+
+static inline void ath11k_coredump_collect(struct ath11k_base *ab)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/dbring.c b/drivers/net/wireless/ath/ath11k/dbring.c
index fbb6e8d8a476..520d8b8662a2 100644
--- a/drivers/net/wireless/ath/ath11k/dbring.c
+++ b/drivers/net/wireless/ath/ath11k/dbring.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include "core.h"
@@ -153,7 +154,7 @@ int ath11k_dbring_wmi_cfg_setup(struct ath11k *ar,
struct ath11k_dbring *ring,
enum wmi_direct_buffer_module id)
{
- struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd param = {0};
+ struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd param = {};
int ret;
if (id >= WMI_DIRECT_BUF_MAX)
diff --git a/drivers/net/wireless/ath/ath11k/debug.c b/drivers/net/wireless/ath/ath11k/debug.c
index 2b8544355fc1..37d23a559ba3 100644
--- a/drivers/net/wireless/ath/ath11k/debug.c
+++ b/drivers/net/wireless/ath/ath11k/debug.c
@@ -2,8 +2,10 @@
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
#include <linux/vmalloc.h>
#include "core.h"
#include "debug.h"
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index 57281a135dd7..977f945b6e66 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -1,9 +1,11 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
#include <linux/vmalloc.h>
#include "debugfs.h"
@@ -93,57 +95,14 @@ void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
spin_unlock_bh(&dbr_data->lock);
}
-static void ath11k_debugfs_fw_stats_reset(struct ath11k *ar)
-{
- spin_lock_bh(&ar->data_lock);
- ar->fw_stats_done = false;
- ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
- ath11k_fw_stats_vdevs_free(&ar->fw_stats.vdevs);
- spin_unlock_bh(&ar->data_lock);
-}
-
void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *stats)
{
struct ath11k_base *ab = ar->ab;
- struct ath11k_pdev *pdev;
- bool is_end;
- static unsigned int num_vdev, num_bcn;
- size_t total_vdevs_started = 0;
- int i;
-
- /* WMI_REQUEST_PDEV_STAT request has been already processed */
-
- if (stats->stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) {
- ar->fw_stats_done = true;
- return;
- }
-
- if (stats->stats_id == WMI_REQUEST_VDEV_STAT) {
- if (list_empty(&stats->vdevs)) {
- ath11k_warn(ab, "empty vdev stats");
- return;
- }
- /* FW sends all the active VDEV stats irrespective of PDEV,
- * hence limit until the count of all VDEVs started
- */
- for (i = 0; i < ab->num_radios; i++) {
- pdev = rcu_dereference(ab->pdevs_active[i]);
- if (pdev && pdev->ar)
- total_vdevs_started += ar->num_started_vdevs;
- }
-
- is_end = ((++num_vdev) == total_vdevs_started);
-
- list_splice_tail_init(&stats->vdevs,
- &ar->fw_stats.vdevs);
-
- if (is_end) {
- ar->fw_stats_done = true;
- num_vdev = 0;
- }
- return;
- }
+ bool is_end = true;
+ /* WMI_REQUEST_PDEV_STAT, WMI_REQUEST_RSSI_PER_CHAIN_STAT and
+ * WMI_REQUEST_VDEV_STAT requests have been already processed.
+ */
if (stats->stats_id == WMI_REQUEST_BCN_STAT) {
if (list_empty(&stats->bcn)) {
ath11k_warn(ab, "empty bcn stats");
@@ -152,97 +111,18 @@ void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *
/* Mark end until we reached the count of all started VDEVs
* within the PDEV
*/
- is_end = ((++num_bcn) == ar->num_started_vdevs);
+ if (ar->num_started_vdevs)
+ is_end = ((++ar->fw_stats.num_bcn_recvd) ==
+ ar->num_started_vdevs);
list_splice_tail_init(&stats->bcn,
&ar->fw_stats.bcn);
- if (is_end) {
- ar->fw_stats_done = true;
- num_bcn = 0;
- }
+ if (is_end)
+ complete(&ar->fw_stats_done);
}
}
-static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
- struct stats_request_params *req_param)
-{
- struct ath11k_base *ab = ar->ab;
- unsigned long timeout, time_left;
- int ret;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- /* FW stats can get split when exceeding the stats data buffer limit.
- * In that case, since there is no end marking for the back-to-back
- * received 'update stats' event, we keep a 3 seconds timeout in case,
- * fw_stats_done is not marked yet
- */
- timeout = jiffies + msecs_to_jiffies(3 * 1000);
-
- ath11k_debugfs_fw_stats_reset(ar);
-
- reinit_completion(&ar->fw_stats_complete);
-
- ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
-
- if (ret) {
- ath11k_warn(ab, "could not request fw stats (%d)\n",
- ret);
- return ret;
- }
-
- time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ);
-
- if (!time_left)
- return -ETIMEDOUT;
-
- for (;;) {
- if (time_after(jiffies, timeout))
- break;
-
- spin_lock_bh(&ar->data_lock);
- if (ar->fw_stats_done) {
- spin_unlock_bh(&ar->data_lock);
- break;
- }
- spin_unlock_bh(&ar->data_lock);
- }
- return 0;
-}
-
-int ath11k_debugfs_get_fw_stats(struct ath11k *ar, u32 pdev_id,
- u32 vdev_id, u32 stats_id)
-{
- struct ath11k_base *ab = ar->ab;
- struct stats_request_params req_param;
- int ret;
-
- mutex_lock(&ar->conf_mutex);
-
- if (ar->state != ATH11K_STATE_ON) {
- ret = -ENETDOWN;
- goto err_unlock;
- }
-
- req_param.pdev_id = pdev_id;
- req_param.vdev_id = vdev_id;
- req_param.stats_id = stats_id;
-
- ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
- if (ret)
- ath11k_warn(ab, "failed to request fw stats: %d\n", ret);
-
- ath11k_dbg(ab, ATH11K_DBG_WMI,
- "debug get fw stat pdev id %d vdev id %d stats id 0x%x\n",
- pdev_id, vdev_id, stats_id);
-
-err_unlock:
- mutex_unlock(&ar->conf_mutex);
-
- return ret;
-}
-
static int ath11k_open_pdev_stats(struct inode *inode, struct file *file)
{
struct ath11k *ar = inode->i_private;
@@ -268,7 +148,7 @@ static int ath11k_open_pdev_stats(struct inode *inode, struct file *file)
req_param.vdev_id = 0;
req_param.stats_id = WMI_REQUEST_PDEV_STAT;
- ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
+ ret = ath11k_mac_fw_stats_request(ar, &req_param);
if (ret) {
ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
goto err_free;
@@ -339,7 +219,7 @@ static int ath11k_open_vdev_stats(struct inode *inode, struct file *file)
req_param.vdev_id = 0;
req_param.stats_id = WMI_REQUEST_VDEV_STAT;
- ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
+ ret = ath11k_mac_fw_stats_request(ar, &req_param);
if (ret) {
ath11k_warn(ar->ab, "failed to request fw vdev stats: %d\n", ret);
goto err_free;
@@ -415,7 +295,7 @@ static int ath11k_open_bcn_stats(struct inode *inode, struct file *file)
continue;
req_param.vdev_id = arvif->vdev_id;
- ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
+ ret = ath11k_mac_fw_stats_request(ar, &req_param);
if (ret) {
ath11k_warn(ar->ab, "failed to request fw bcn stats: %d\n", ret);
goto err_free;
@@ -495,7 +375,7 @@ static ssize_t ath11k_write_simulate_fw_crash(struct file *file,
struct ath11k_base *ab = file->private_data;
struct ath11k_pdev *pdev;
struct ath11k *ar = ab->pdevs[0].ar;
- char buf[32] = {0};
+ char buf[32] = {};
ssize_t rc;
int i, ret, radioup = 0;
@@ -593,7 +473,7 @@ static ssize_t ath11k_read_enable_extd_tx_stats(struct file *file,
size_t count, loff_t *ppos)
{
- char buf[32] = {0};
+ char buf[32] = {};
struct ath11k *ar = file->private_data;
int len = 0;
@@ -617,7 +497,7 @@ static ssize_t ath11k_write_extd_rx_stats(struct file *file,
{
struct ath11k *ar = file->private_data;
struct ath11k_base *ab = ar->ab;
- struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ struct htt_rx_ring_tlv_filter tlv_filter = {};
u32 enable, rx_filter = 0, ring_id;
int i;
int ret;
@@ -857,7 +737,7 @@ static ssize_t ath11k_write_fw_dbglog(struct file *file,
size_t count, loff_t *ppos)
{
struct ath11k *ar = file->private_data;
- char buf[128] = {0};
+ char buf[128] = {};
struct ath11k_fw_dbglog dbglog;
unsigned int param, mod_id_index, is_end;
u64 value;
@@ -1070,9 +950,9 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file,
{
struct ath11k *ar = file->private_data;
struct ath11k_base *ab = ar->ab;
- struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ struct htt_rx_ring_tlv_filter tlv_filter = {};
u32 rx_filter = 0, ring_id, filter, mode;
- u8 buf[128] = {0};
+ u8 buf[128] = {};
int i, ret, rx_buf_sz = 0;
ssize_t rc;
@@ -1201,7 +1081,7 @@ static ssize_t ath11k_read_pktlog_filter(struct file *file,
size_t count, loff_t *ppos)
{
- char buf[32] = {0};
+ char buf[32] = {};
struct ath11k *ar = file->private_data;
int len = 0;
@@ -1355,7 +1235,7 @@ static ssize_t ath11k_debugfs_write_enable_dbr_dbg(struct file *file,
size_t count, loff_t *ppos)
{
struct ath11k *ar = file->private_data;
- char buf[32] = {0};
+ char buf[32] = {};
u32 dbr_id, enable;
int ret;
@@ -1593,7 +1473,7 @@ int ath11k_debugfs_register(struct ath11k *ar)
{
struct ath11k_base *ab = ar->ab;
char pdev_name[10];
- char buf[100] = {0};
+ char buf[100] = {};
snprintf(pdev_name, sizeof(pdev_name), "%s%u", "mac", ar->pdev_idx);
@@ -1676,10 +1556,10 @@ static ssize_t ath11k_write_twt_add_dialog(struct file *file,
size_t count, loff_t *ppos)
{
struct ath11k_vif *arvif = file->private_data;
- struct wmi_twt_add_dialog_params params = { 0 };
- struct wmi_twt_enable_params twt_params = {0};
+ struct wmi_twt_add_dialog_params params = {};
+ struct wmi_twt_enable_params twt_params = {};
struct ath11k *ar = arvif->ar;
- u8 buf[128] = {0};
+ u8 buf[128] = {};
int ret;
if (ar->twt_enabled == 0) {
@@ -1752,10 +1632,10 @@ static ssize_t ath11k_write_twt_del_dialog(struct file *file,
size_t count, loff_t *ppos)
{
struct ath11k_vif *arvif = file->private_data;
- struct wmi_twt_del_dialog_params params = { 0 };
- struct wmi_twt_enable_params twt_params = {0};
+ struct wmi_twt_del_dialog_params params = {};
+ struct wmi_twt_enable_params twt_params = {};
struct ath11k *ar = arvif->ar;
- u8 buf[64] = {0};
+ u8 buf[64] = {};
int ret;
if (ar->twt_enabled == 0) {
@@ -1799,8 +1679,8 @@ static ssize_t ath11k_write_twt_pause_dialog(struct file *file,
size_t count, loff_t *ppos)
{
struct ath11k_vif *arvif = file->private_data;
- struct wmi_twt_pause_dialog_params params = { 0 };
- u8 buf[64] = {0};
+ struct wmi_twt_pause_dialog_params params = {};
+ u8 buf[64] = {};
int ret;
if (arvif->ar->twt_enabled == 0) {
@@ -1838,8 +1718,8 @@ static ssize_t ath11k_write_twt_resume_dialog(struct file *file,
size_t count, loff_t *ppos)
{
struct ath11k_vif *arvif = file->private_data;
- struct wmi_twt_resume_dialog_params params = { 0 };
- u8 buf[64] = {0};
+ struct wmi_twt_resume_dialog_params params = {};
+ u8 buf[64] = {};
int ret;
if (arvif->ar->twt_enabled == 0) {
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.h b/drivers/net/wireless/ath/ath11k/debugfs.h
index a39e458637b0..ed7fec177588 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH11K_DEBUGFS_H_
@@ -273,8 +273,6 @@ void ath11k_debugfs_unregister(struct ath11k *ar);
void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *stats);
void ath11k_debugfs_fw_stats_init(struct ath11k *ar);
-int ath11k_debugfs_get_fw_stats(struct ath11k *ar, u32 pdev_id,
- u32 vdev_id, u32 stats_id);
static inline bool ath11k_debugfs_is_pktlog_lite_mode_enabled(struct ath11k *ar)
{
@@ -381,12 +379,6 @@ static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
return 0;
}
-static inline int ath11k_debugfs_get_fw_stats(struct ath11k *ar,
- u32 pdev_id, u32 vdev_id, u32 stats_id)
-{
- return 0;
-}
-
static inline void
ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
enum wmi_direct_buffer_module id,
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
index 870e86a31bf8..11d28c42227e 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/vmalloc.h>
@@ -375,7 +376,7 @@ static inline void htt_print_hw_stats_intr_misc_tlv(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char hw_intr_name[HTT_STATS_MAX_HW_INTR_NAME_LEN + 1] = {0};
+ char hw_intr_name[HTT_STATS_MAX_HW_INTR_NAME_LEN + 1] = {};
len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_INTR_MISC_TLV:\n");
memcpy(hw_intr_name, &(htt_stats_buf->hw_intr_name[0]),
@@ -402,7 +403,7 @@ htt_print_hw_stats_wd_timeout_tlv(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char hw_module_name[HTT_STATS_MAX_HW_MODULE_NAME_LEN + 1] = {0};
+ char hw_module_name[HTT_STATS_MAX_HW_MODULE_NAME_LEN + 1] = {};
len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_WD_TIMEOUT_TLV:\n");
memcpy(hw_module_name, &(htt_stats_buf->hw_module_name[0]),
@@ -514,7 +515,7 @@ static inline void htt_print_tx_tid_stats_tlv(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char tid_name[MAX_HTT_TID_NAME + 1] = {0};
+ char tid_name[MAX_HTT_TID_NAME + 1] = {};
len += scnprintf(buf + len, buf_len - len, "HTT_TX_TID_STATS_TLV:\n");
memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
@@ -567,7 +568,7 @@ static inline void htt_print_tx_tid_stats_v1_tlv(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char tid_name[MAX_HTT_TID_NAME + 1] = {0};
+ char tid_name[MAX_HTT_TID_NAME + 1] = {};
len += scnprintf(buf + len, buf_len - len, "HTT_TX_TID_STATS_V1_TLV:\n");
memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
@@ -624,7 +625,7 @@ static inline void htt_print_rx_tid_stats_tlv(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char tid_name[MAX_HTT_TID_NAME + 1] = {0};
+ char tid_name[MAX_HTT_TID_NAME + 1] = {};
len += scnprintf(buf + len, buf_len - len, "HTT_RX_TID_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %lu\n",
@@ -4712,7 +4713,7 @@ int ath11k_debugfs_htt_stats_req(struct ath11k *ar)
u8 type = stats_req->type;
u64 cookie = 0;
int ret, pdev_id = ar->pdev->pdev_id;
- struct htt_ext_stats_cfg_params cfg_params = { 0 };
+ struct htt_ext_stats_cfg_params cfg_params = {};
init_completion(&stats_req->cmpln);
@@ -4852,7 +4853,7 @@ static ssize_t ath11k_write_htt_stats_reset(struct file *file,
{
struct ath11k *ar = file->private_data;
u8 type;
- struct htt_ext_stats_cfg_params cfg_params = { 0 };
+ struct htt_ext_stats_cfg_params cfg_params = {};
int ret;
ret = kstrtou8_from_user(user_buf, count, 0, &type);
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
index f56a24b6c8da..d89d0f28d890 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/vmalloc.h>
@@ -456,7 +457,7 @@ static ssize_t ath11k_dbg_sta_read_peer_pktlog(struct file *file,
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
struct ath11k *ar = arsta->arvif->ar;
- char buf[32] = {0};
+ char buf[32] = {};
int len;
mutex_lock(&ar->conf_mutex);
@@ -485,7 +486,7 @@ static ssize_t ath11k_dbg_sta_write_delba(struct file *file,
struct ath11k *ar = arsta->arvif->ar;
u32 tid, initiator, reason;
int ret;
- char buf[64] = {0};
+ char buf[64] = {};
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
user_buf, count);
@@ -536,7 +537,7 @@ static ssize_t ath11k_dbg_sta_write_addba_resp(struct file *file,
struct ath11k *ar = arsta->arvif->ar;
u32 tid, status;
int ret;
- char buf[64] = {0};
+ char buf[64] = {};
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
user_buf, count);
@@ -586,7 +587,7 @@ static ssize_t ath11k_dbg_sta_write_addba(struct file *file,
struct ath11k *ar = arsta->arvif->ar;
u32 tid, buf_size;
int ret;
- char buf[64] = {0};
+ char buf[64] = {};
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
user_buf, count);
@@ -700,7 +701,7 @@ ath11k_write_htt_peer_stats_reset(struct file *file,
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
struct ath11k *ar = arsta->arvif->ar;
- struct htt_ext_stats_cfg_params cfg_params = { 0 };
+ struct htt_ext_stats_cfg_params cfg_params = {};
int ret;
u8 type;
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
index fbf666d0ecf1..56b1a657e0b0 100644
--- a/drivers/net/wireless/ath/ath11k/dp.c
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -1,10 +1,12 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <crypto/hash.h>
+#include <linux/export.h>
#include "core.h"
#include "dp_tx.h"
#include "hal_tx.h"
@@ -104,14 +106,12 @@ void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
if (!ring->vaddr_unaligned)
return;
- if (ring->cached) {
- dma_unmap_single(ab->dev, ring->paddr_unaligned, ring->size,
- DMA_FROM_DEVICE);
- kfree(ring->vaddr_unaligned);
- } else {
+ if (ring->cached)
+ dma_free_noncoherent(ab->dev, ring->size, ring->vaddr_unaligned,
+ ring->paddr_unaligned, DMA_FROM_DEVICE);
+ else
dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
ring->paddr_unaligned);
- }
ring->vaddr_unaligned = NULL;
}
@@ -225,7 +225,7 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
enum hal_ring_type type, int ring_num,
int mac_id, int num_entries)
{
- struct hal_srng_params params = { 0 };
+ struct hal_srng_params params = {};
int entry_sz = ath11k_hal_srng_get_entrysize(ab, type);
int max_entries = ath11k_hal_srng_get_max_entries(ab, type);
int ret;
@@ -249,25 +249,14 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
default:
cached = false;
}
-
- if (cached) {
- ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
- if (!ring->vaddr_unaligned)
- return -ENOMEM;
-
- ring->paddr_unaligned = dma_map_single(ab->dev,
- ring->vaddr_unaligned,
- ring->size,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(ab->dev, ring->paddr_unaligned)) {
- kfree(ring->vaddr_unaligned);
- ring->vaddr_unaligned = NULL;
- return -ENOMEM;
- }
- }
}
- if (!cached)
+ if (cached)
+ ring->vaddr_unaligned = dma_alloc_noncoherent(ab->dev, ring->size,
+ &ring->paddr_unaligned,
+ DMA_FROM_DEVICE,
+ GFP_KERNEL);
+ else
ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
&ring->paddr_unaligned,
GFP_KERNEL);
@@ -888,7 +877,7 @@ void ath11k_dp_pdev_free(struct ath11k_base *ab)
struct ath11k *ar;
int i;
- del_timer_sync(&ab->mon_reap_timer);
+ timer_delete_sync(&ab->mon_reap_timer);
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
@@ -1130,8 +1119,9 @@ fail_link_desc_cleanup:
static void ath11k_dp_shadow_timer_handler(struct timer_list *t)
{
- struct ath11k_hp_update_timer *update_timer = from_timer(update_timer,
- t, timer);
+ struct ath11k_hp_update_timer *update_timer = timer_container_of(update_timer,
+ t,
+ timer);
struct ath11k_base *ab = update_timer->ab;
struct hal_srng *srng = &ab->hal.srng_list[update_timer->ring_id];
@@ -1183,7 +1173,7 @@ void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab,
if (!update_timer->init)
return;
- del_timer_sync(&update_timer->timer);
+ timer_delete_sync(&update_timer->timer);
}
void ath11k_dp_shadow_init_timer(struct ath11k_base *ab,
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
index 65d2bc0687c8..7a55afd33be8 100644
--- a/drivers/net/wireless/ath/ath11k/dp.h
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023, 2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_DP_H
@@ -20,7 +20,6 @@ struct ath11k_ext_irq_grp;
struct dp_rx_tid {
u8 tid;
- u32 *vaddr;
dma_addr_t paddr;
u32 size;
u32 ba_win_sz;
@@ -37,6 +36,9 @@ struct dp_rx_tid {
/* Timer info related to fragments */
struct timer_list frag_timer;
struct ath11k_base *ab;
+ u32 *vaddr_unaligned;
+ dma_addr_t paddr_unaligned;
+ u32 unaligned_size;
};
#define DP_REO_DESC_FREE_THRESHOLD 64
@@ -165,7 +167,6 @@ struct ath11k_mon_data {
struct ath11k_pdev_mon_stats rx_mon_stats;
/* lock for monitor data */
spinlock_t mon_lock;
- struct sk_buff_head rx_status_q;
};
struct ath11k_pdev_dp {
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 40088e62572e..b9e976ddcbbf 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/ieee80211.h>
@@ -308,7 +308,7 @@ static u8 *ath11k_dp_rxdesc_mpdu_start_addr2(struct ath11k_base *ab,
static void ath11k_dp_service_mon_ring(struct timer_list *t)
{
- struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
+ struct ath11k_base *ab = timer_container_of(ab, t, mon_reap_timer);
int i;
for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++)
@@ -675,11 +675,11 @@ void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
list_del(&cmd->list);
rx_tid = &cmd->data;
- if (rx_tid->vaddr) {
- dma_unmap_single(ab->dev, rx_tid->paddr,
- rx_tid->size, DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ if (rx_tid->vaddr_unaligned) {
+ dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
}
kfree(cmd);
}
@@ -689,11 +689,11 @@ void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
list_del(&cmd_cache->list);
dp->reo_cmd_cache_flush_count--;
rx_tid = &cmd_cache->data;
- if (rx_tid->vaddr) {
- dma_unmap_single(ab->dev, rx_tid->paddr,
- rx_tid->size, DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ if (rx_tid->vaddr_unaligned) {
+ dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
}
kfree(cmd_cache);
}
@@ -708,18 +708,18 @@ static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
if (status != HAL_REO_CMD_SUCCESS)
ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
rx_tid->tid, status);
- if (rx_tid->vaddr) {
- dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ if (rx_tid->vaddr_unaligned) {
+ dma_free_noncoherent(dp->ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
}
}
static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
struct dp_rx_tid *rx_tid)
{
- struct ath11k_hal_reo_cmd cmd = {0};
+ struct ath11k_hal_reo_cmd cmd = {};
unsigned long tot_desc_sz, desc_sz;
int ret;
@@ -749,10 +749,10 @@ static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
if (ret) {
ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
rx_tid->tid, ret);
- dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
}
}
@@ -802,16 +802,16 @@ static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
return;
free_desc:
- dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
}
void ath11k_peer_rx_tid_delete(struct ath11k *ar,
struct ath11k_peer *peer, u8 tid)
{
- struct ath11k_hal_reo_cmd cmd = {0};
+ struct ath11k_hal_reo_cmd cmd = {};
struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
int ret;
@@ -831,14 +831,16 @@ void ath11k_peer_rx_tid_delete(struct ath11k *ar,
if (ret != -ESHUTDOWN)
ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
tid, ret);
- dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ dma_free_noncoherent(ar->ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
}
rx_tid->paddr = 0;
+ rx_tid->paddr_unaligned = 0;
rx_tid->size = 0;
+ rx_tid->unaligned_size = 0;
}
static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
@@ -904,7 +906,7 @@ void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer)
rx_tid = &peer->rx_tid[i];
spin_unlock_bh(&ar->ab->base_lock);
- del_timer_sync(&rx_tid->frag_timer);
+ timer_delete_sync(&rx_tid->frag_timer);
spin_lock_bh(&ar->ab->base_lock);
ath11k_dp_rx_frags_cleanup(rx_tid, true);
@@ -925,7 +927,7 @@ void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
ath11k_dp_rx_frags_cleanup(rx_tid, true);
spin_unlock_bh(&ar->ab->base_lock);
- del_timer_sync(&rx_tid->frag_timer);
+ timer_delete_sync(&rx_tid->frag_timer);
spin_lock_bh(&ar->ab->base_lock);
}
}
@@ -936,7 +938,7 @@ static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar,
u32 ba_win_sz, u16 ssn,
bool update_ssn)
{
- struct ath11k_hal_reo_cmd cmd = {0};
+ struct ath11k_hal_reo_cmd cmd = {};
int ret;
cmd.addr_lo = lower_32_bits(rx_tid->paddr);
@@ -982,10 +984,9 @@ static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
if (!rx_tid->active)
goto unlock_exit;
- dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
rx_tid->active = false;
@@ -1000,9 +1001,8 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
struct ath11k_base *ab = ar->ab;
struct ath11k_peer *peer;
struct dp_rx_tid *rx_tid;
- u32 hw_desc_sz;
- u32 *addr_aligned;
- void *vaddr;
+ u32 hw_desc_sz, *vaddr;
+ void *vaddr_unaligned;
dma_addr_t paddr;
int ret;
@@ -1050,37 +1050,34 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
else
hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
- vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
- if (!vaddr) {
+ rx_tid->unaligned_size = hw_desc_sz + HAL_LINK_DESC_ALIGN - 1;
+ vaddr_unaligned = dma_alloc_noncoherent(ab->dev, rx_tid->unaligned_size, &paddr,
+ DMA_BIDIRECTIONAL, GFP_ATOMIC);
+ if (!vaddr_unaligned) {
spin_unlock_bh(&ab->base_lock);
return -ENOMEM;
}
- addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
-
- ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
- ssn, pn_type);
-
- paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
- DMA_BIDIRECTIONAL);
-
- ret = dma_mapping_error(ab->dev, paddr);
- if (ret) {
- spin_unlock_bh(&ab->base_lock);
- ath11k_warn(ab, "failed to setup dma map for peer %pM rx tid %d: %d\n",
- peer_mac, tid, ret);
- goto err_mem_free;
- }
-
- rx_tid->vaddr = vaddr;
- rx_tid->paddr = paddr;
+ rx_tid->vaddr_unaligned = vaddr_unaligned;
+ vaddr = PTR_ALIGN(vaddr_unaligned, HAL_LINK_DESC_ALIGN);
+ rx_tid->paddr_unaligned = paddr;
+ rx_tid->paddr = rx_tid->paddr_unaligned + ((unsigned long)vaddr -
+ (unsigned long)rx_tid->vaddr_unaligned);
+ ath11k_hal_reo_qdesc_setup(vaddr, tid, ba_win_sz, ssn, pn_type);
rx_tid->size = hw_desc_sz;
rx_tid->active = true;
+ /* After dma_alloc_noncoherent, vaddr is being modified for reo qdesc setup.
+ * Since these changes are not reflected in the device, driver now needs to
+ * explicitly call dma_sync_single_for_device.
+ */
+ dma_sync_single_for_device(ab->dev, rx_tid->paddr,
+ rx_tid->size,
+ DMA_TO_DEVICE);
spin_unlock_bh(&ab->base_lock);
- ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
- paddr, tid, 1, ba_win_sz);
+ ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, rx_tid->paddr,
+ tid, 1, ba_win_sz);
if (ret) {
ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n",
peer_mac, tid, ret);
@@ -1088,12 +1085,6 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
}
return ret;
-
-err_mem_free:
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
-
- return ret;
}
int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
@@ -1166,7 +1157,7 @@ int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
{
struct ath11k *ar = arvif->ar;
struct ath11k_base *ab = ar->ab;
- struct ath11k_hal_reo_cmd cmd = {0};
+ struct ath11k_hal_reo_cmd cmd = {};
struct ath11k_peer *peer;
struct dp_rx_tid *rx_tid;
u8 tid;
@@ -2600,7 +2591,7 @@ static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
{
struct sk_buff *msdu;
struct ath11k *ar;
- struct ieee80211_rx_status rx_status = {0};
+ struct ieee80211_rx_status rx_status = {};
int ret;
if (skb_queue_empty(msdu_list))
@@ -2635,7 +2626,7 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
{
struct ath11k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring;
- int num_buffs_reaped[MAX_RADIOS] = {0};
+ int num_buffs_reaped[MAX_RADIOS] = {};
struct sk_buff_head msdu_list[MAX_RADIOS];
struct ath11k_skb_rxcb *rxcb;
int total_msdu_reaped = 0;
@@ -2831,8 +2822,6 @@ static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
rx_stats->dcm_count += ppdu_info->dcm;
rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu;
- arsta->rssi_comb = ppdu_info->rssi_comb;
-
BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
ARRAY_SIZE(ppdu_info->rssi_chain_pri20));
@@ -3178,7 +3167,8 @@ move_next:
static void ath11k_dp_rx_frag_timer(struct timer_list *timer)
{
- struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
+ struct dp_rx_tid *rx_tid = timer_container_of(rx_tid, timer,
+ frag_timer);
spin_lock_bh(&rx_tid->ab->base_lock);
if (rx_tid->last_frag_no &&
@@ -3234,7 +3224,7 @@ static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
size_t data_len, u8 *mic)
{
SHASH_DESC_ON_STACK(desc, tfm);
- u8 mic_hdr[16] = {0};
+ u8 mic_hdr[16] = {};
u8 tid = 0;
int ret;
@@ -3721,7 +3711,7 @@ static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
}
spin_unlock_bh(&ab->base_lock);
- del_timer_sync(&rx_tid->frag_timer);
+ timer_delete_sync(&rx_tid->frag_timer);
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find_by_id(ab, peer_id);
@@ -3828,7 +3818,7 @@ int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
struct dp_link_desc_bank *link_desc_banks;
enum hal_rx_buf_return_buf_manager rbm;
int tot_n_bufs_reaped, quota, ret, i;
- int n_bufs_reaped[MAX_RADIOS] = {0};
+ int n_bufs_reaped[MAX_RADIOS] = {};
struct dp_rxdma_ring *rx_ring;
struct dp_srng *reo_except;
u32 desc_bank, num_msdus;
@@ -3872,6 +3862,7 @@ int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
&rbm);
if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
+ rbm != HAL_RX_BUF_RBM_SW1_BM &&
rbm != HAL_RX_BUF_RBM_SW3_BM) {
ab->soc_stats.invalid_rbm++;
ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
@@ -4108,7 +4099,7 @@ static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
struct sk_buff_head *msdu_list)
{
struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
- struct ieee80211_rx_status rxs = {0};
+ struct ieee80211_rx_status rxs = {};
bool drop = true;
switch (rxcb->err_rel_src) {
@@ -4144,7 +4135,7 @@ int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
struct ath11k_skb_rxcb *rxcb;
u32 *rx_desc;
int buf_id, mac_id;
- int num_buffs_reaped[MAX_RADIOS] = {0};
+ int num_buffs_reaped[MAX_RADIOS] = {};
int total_num_buffs_reaped = 0;
int ret, i;
@@ -4624,7 +4615,6 @@ static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar,
msdu_details[i].buf_addr_info.info0) == 0) {
msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
msdu_desc_info->info0 |= last;
- ;
break;
}
msdu_desc_info = &msdu_details[i].rx_msdu_info;
@@ -4690,11 +4680,12 @@ static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
}
}
-static u32
-ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
- void *ring_entry, struct sk_buff **head_msdu,
- struct sk_buff **tail_msdu, u32 *npackets,
- u32 *ppdu_id)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+u32 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
+ void *ring_entry, struct sk_buff **head_msdu,
+ struct sk_buff **tail_msdu, u32 *npackets,
+ u32 *ppdu_id)
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
@@ -4781,7 +4772,7 @@ ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
if (!msdu) {
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
"msdu_pop: invalid buf_id %d\n", buf_id);
- break;
+ goto next_msdu;
}
rxcb = ATH11K_SKB_RXCB(msdu);
if (!rxcb->unmapped) {
@@ -5146,7 +5137,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
const struct ath11k_hw_hal_params *hal_params;
void *ring_entry;
- void *mon_dst_srng;
+ struct hal_srng *mon_dst_srng;
u32 ppdu_id;
u32 rx_bufs_used;
u32 ring_id;
@@ -5163,6 +5154,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
spin_lock_bh(&pmon->mon_lock);
+ spin_lock_bh(&mon_dst_srng->lock);
ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
ppdu_id = pmon->mon_ppdu_info.ppdu_id;
@@ -5221,6 +5213,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
mon_dst_srng);
}
ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
+ spin_unlock_bh(&mon_dst_srng->lock);
spin_unlock_bh(&pmon->mon_lock);
@@ -5408,7 +5401,7 @@ ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar,
"full mon msdu_pop: invalid buf_id %d\n",
buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
- break;
+ goto next_msdu;
}
idr_remove(&rx_ring->bufs_idr, buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
@@ -5610,7 +5603,7 @@ static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id,
struct hal_sw_mon_ring_entries *sw_mon_entries;
struct ath11k_pdev_mon_stats *rx_mon_stats;
struct sk_buff *head_msdu, *tail_msdu;
- void *mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
+ struct hal_srng *mon_dst_srng;
void *ring_entry;
u32 rx_bufs_used = 0, mpdu_rx_bufs_used;
int quota = 0, ret;
@@ -5626,6 +5619,9 @@ static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id,
goto reap_status_ring;
}
+ mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
+ spin_lock_bh(&mon_dst_srng->lock);
+
ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
head_msdu = NULL;
@@ -5669,6 +5665,7 @@ next_entry:
}
ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
+ spin_unlock_bh(&mon_dst_srng->lock);
spin_unlock_bh(&pmon->mon_lock);
if (rx_bufs_used) {
@@ -5705,8 +5702,6 @@ static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar)
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
- skb_queue_head_init(&pmon->rx_status_q);
-
pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
memset(&pmon->rx_mon_stats, 0,
@@ -5786,7 +5781,7 @@ int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer)
int ret;
if (stop_timer)
- del_timer_sync(&ab->mon_reap_timer);
+ timer_delete_sync(&ab->mon_reap_timer);
/* reap all the monitor related rings */
ret = ath11k_dp_purge_mon_ring(ab);
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index 8522c67baabf..562aba66582f 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include "core.h"
@@ -84,7 +85,7 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
{
struct ath11k_base *ab = ar->ab;
struct ath11k_dp *dp = &ab->dp;
- struct hal_tx_info ti = {0};
+ struct hal_tx_info ti = {};
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
struct hal_srng *tcl_ring;
@@ -316,7 +317,7 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
struct dp_tx_ring *tx_ring,
struct ath11k_dp_htt_wbm_tx_status *ts)
{
- struct ieee80211_tx_status status = { 0 };
+ struct ieee80211_tx_status status = {};
struct sk_buff *msdu;
struct ieee80211_tx_info *info;
struct ath11k_skb_cb *skb_cb;
@@ -391,7 +392,7 @@ ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base *ab,
u32 msdu_id, struct dp_tx_ring *tx_ring)
{
struct htt_tx_wbm_completion *status_desc;
- struct ath11k_dp_htt_wbm_tx_status ts = {0};
+ struct ath11k_dp_htt_wbm_tx_status ts = {};
enum hal_wbm_htt_tx_comp_status wbm_status;
status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET;
@@ -551,8 +552,8 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
struct sk_buff *msdu,
struct hal_tx_status *ts)
{
- struct ieee80211_tx_status status = { 0 };
- struct ieee80211_rate_status status_rate = { 0 };
+ struct ieee80211_tx_status status = {};
+ struct ieee80211_rate_status status_rate = {};
struct ath11k_base *ab = ar->ab;
struct ieee80211_tx_info *info;
struct ath11k_skb_cb *skb_cb;
@@ -690,7 +691,7 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
struct sk_buff *msdu;
- struct hal_tx_status ts = { 0 };
+ struct hal_tx_status ts = {};
struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
u32 *desc;
u32 msdu_id;
@@ -1187,7 +1188,7 @@ int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_base *ab = ar->ab;
- struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ struct htt_rx_ring_tlv_filter tlv_filter = {};
int ret = 0, ring_id = 0, i;
if (ab->hw_params.full_monitor_mode) {
diff --git a/drivers/net/wireless/ath/ath11k/fw.c b/drivers/net/wireless/ath/ath11k/fw.c
index 4e36292a79db..07d775a7b528 100644
--- a/drivers/net/wireless/ath/ath11k/fw.c
+++ b/drivers/net/wireless/ath/ath11k/fw.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
- * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
#include "core.h"
#include "debug.h"
@@ -166,3 +168,4 @@ void ath11k_fw_destroy(struct ath11k_base *ab)
{
release_firmware(ab->fw.fw);
}
+EXPORT_SYMBOL(ath11k_fw_destroy);
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index f02599bd1c36..0c797b8d0a27 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -2,8 +2,10 @@
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/dma-mapping.h>
+#include <linux/export.h>
#include "hal_tx.h"
#include "debug.h"
#include "hal_desc.h"
@@ -823,13 +825,23 @@ u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng)
void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng)
{
+ u32 hp;
+
lockdep_assert_held(&srng->lock);
if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
srng->u.src_ring.cached_tp =
*(volatile u32 *)srng->u.src_ring.tp_addr;
} else {
- srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
+ hp = READ_ONCE(*srng->u.dst_ring.hp_addr);
+
+ if (hp != srng->u.dst_ring.cached_hp) {
+ srng->u.dst_ring.cached_hp = hp;
+ /* Make sure descriptor is read after the head
+ * pointer.
+ */
+ dma_rmb();
+ }
/* Try to prefetch the next descriptor in the ring */
if (srng->flags & HAL_SRNG_FLAGS_CACHED)
@@ -844,7 +856,6 @@ void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng)
{
lockdep_assert_held(&srng->lock);
- /* TODO: See if we need a write memory barrier here */
if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
/* For LMAC rings, ring pointer updates are done through FW and
* hence written to a shared memory location that is read by FW
@@ -852,21 +863,37 @@ void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng)
if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
srng->u.src_ring.last_tp =
*(volatile u32 *)srng->u.src_ring.tp_addr;
- *srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
+ /* Make sure descriptor is written before updating the
+ * head pointer.
+ */
+ dma_wmb();
+ WRITE_ONCE(*srng->u.src_ring.hp_addr, srng->u.src_ring.hp);
} else {
srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
- *srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp;
+ /* Make sure descriptor is read before updating the
+ * tail pointer.
+ */
+ dma_mb();
+ WRITE_ONCE(*srng->u.dst_ring.tp_addr, srng->u.dst_ring.tp);
}
} else {
if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
srng->u.src_ring.last_tp =
*(volatile u32 *)srng->u.src_ring.tp_addr;
+ /* Assume implementation use an MMIO write accessor
+ * which has the required wmb() so that the descriptor
+ * is written before the updating the head pointer.
+ */
ath11k_hif_write32(ab,
(unsigned long)srng->u.src_ring.hp_addr -
(unsigned long)ab->mem,
srng->u.src_ring.hp);
} else {
srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
+ /* Make sure descriptor is read before updating the
+ * tail pointer.
+ */
+ mb();
ath11k_hif_write32(ab,
(unsigned long)srng->u.dst_ring.tp_addr -
(unsigned long)ab->mem,
@@ -1346,14 +1373,35 @@ EXPORT_SYMBOL(ath11k_hal_srng_init);
void ath11k_hal_srng_deinit(struct ath11k_base *ab)
{
struct ath11k_hal *hal = &ab->hal;
+ int i;
+
+ for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++)
+ ab->hal.srng_list[i].initialized = 0;
ath11k_hal_unregister_srng_key(ab);
ath11k_hal_free_cont_rdp(ab);
ath11k_hal_free_cont_wrp(ab);
kfree(hal->srng_config);
+ hal->srng_config = NULL;
}
EXPORT_SYMBOL(ath11k_hal_srng_deinit);
+void ath11k_hal_srng_clear(struct ath11k_base *ab)
+{
+ /* No need to memset rdp and wrp memory since each individual
+ * segment would get cleared in ath11k_hal_srng_src_hw_init()
+ * and ath11k_hal_srng_dst_hw_init().
+ */
+ memset(ab->hal.srng_list, 0,
+ sizeof(ab->hal.srng_list));
+ memset(ab->hal.shadow_reg_addr, 0,
+ sizeof(ab->hal.shadow_reg_addr));
+ ab->hal.avail_blk_resource = 0;
+ ab->hal.current_blk_index = 0;
+ ab->hal.num_shadow_reg_configured = 0;
+}
+EXPORT_SYMBOL(ath11k_hal_srng_clear);
+
void ath11k_hal_dump_srng_stats(struct ath11k_base *ab)
{
struct hal_srng *srng;
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index dc8bbe073017..82603a389bb9 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef ATH11K_HAL_H
@@ -43,14 +43,14 @@ struct ath11k_base;
#define HAL_SEQ_WCSS_UMAC_OFFSET 0x00a00000
#define HAL_SEQ_WCSS_UMAC_REO_REG 0x00a38000
#define HAL_SEQ_WCSS_UMAC_TCL_REG 0x00a44000
-#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(x) \
- (ab->hw_params.regs->hal_seq_wcss_umac_ce0_src_reg)
-#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG(x) \
- (ab->hw_params.regs->hal_seq_wcss_umac_ce0_dst_reg)
-#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(x) \
- (ab->hw_params.regs->hal_seq_wcss_umac_ce1_src_reg)
-#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG(x) \
- (ab->hw_params.regs->hal_seq_wcss_umac_ce1_dst_reg)
+#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) \
+ ((ab)->hw_params.regs->hal_seq_wcss_umac_ce0_src_reg)
+#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) \
+ ((ab)->hw_params.regs->hal_seq_wcss_umac_ce0_dst_reg)
+#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) \
+ ((ab)->hw_params.regs->hal_seq_wcss_umac_ce1_src_reg)
+#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) \
+ ((ab)->hw_params.regs->hal_seq_wcss_umac_ce1_dst_reg)
#define HAL_SEQ_WCSS_UMAC_WBM_REG 0x00a34000
#define HAL_CE_WFSS_CE_REG_BASE 0x01b80000
@@ -209,10 +209,10 @@ struct ath11k_base;
#define HAL_REO_STATUS_HP(ab) ab->hw_params.regs->hal_reo_status_hp
/* WBM Idle R0 address */
-#define HAL_WBM_IDLE_LINK_RING_BASE_LSB(x) \
- (ab->hw_params.regs->hal_wbm_idle_link_ring_base_lsb)
-#define HAL_WBM_IDLE_LINK_RING_MISC_ADDR(x) \
- (ab->hw_params.regs->hal_wbm_idle_link_ring_misc)
+#define HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab) \
+ ((ab)->hw_params.regs->hal_wbm_idle_link_ring_base_lsb)
+#define HAL_WBM_IDLE_LINK_RING_MISC_ADDR(ab) \
+ ((ab)->hw_params.regs->hal_wbm_idle_link_ring_misc)
#define HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR 0x00000048
#define HAL_WBM_R0_IDLE_LIST_SIZE_ADDR 0x0000004c
#define HAL_WBM_SCATTERED_RING_BASE_LSB 0x00000058
@@ -227,17 +227,17 @@ struct ath11k_base;
#define HAL_WBM_IDLE_LINK_RING_HP 0x000030b0
/* SW2WBM R0 release address */
-#define HAL_WBM_RELEASE_RING_BASE_LSB(x) \
- (ab->hw_params.regs->hal_wbm_release_ring_base_lsb)
+#define HAL_WBM_RELEASE_RING_BASE_LSB(ab) \
+ ((ab)->hw_params.regs->hal_wbm_release_ring_base_lsb)
/* SW2WBM R2 release address */
#define HAL_WBM_RELEASE_RING_HP 0x00003018
/* WBM2SW R0 release address */
-#define HAL_WBM0_RELEASE_RING_BASE_LSB(x) \
- (ab->hw_params.regs->hal_wbm0_release_ring_base_lsb)
-#define HAL_WBM1_RELEASE_RING_BASE_LSB(x) \
- (ab->hw_params.regs->hal_wbm1_release_ring_base_lsb)
+#define HAL_WBM0_RELEASE_RING_BASE_LSB(ab) \
+ ((ab)->hw_params.regs->hal_wbm0_release_ring_base_lsb)
+#define HAL_WBM1_RELEASE_RING_BASE_LSB(ab) \
+ ((ab)->hw_params.regs->hal_wbm1_release_ring_base_lsb)
/* WBM2SW R2 release address */
#define HAL_WBM0_RELEASE_RING_HP 0x000030c0
@@ -700,7 +700,7 @@ enum hal_rx_buf_return_buf_manager {
#define HAL_REO_CMD_FLG_UNBLK_RESOURCE BIT(7)
#define HAL_REO_CMD_FLG_UNBLK_CACHE BIT(8)
-/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* feilds */
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* fields */
#define HAL_REO_CMD_UPD0_RX_QUEUE_NUM BIT(8)
#define HAL_REO_CMD_UPD0_VLD BIT(9)
#define HAL_REO_CMD_UPD0_ALDC BIT(10)
@@ -725,7 +725,7 @@ enum hal_rx_buf_return_buf_manager {
#define HAL_REO_CMD_UPD0_PN_VALID BIT(29)
#define HAL_REO_CMD_UPD0_PN BIT(30)
-/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* feilds */
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* fields */
#define HAL_REO_CMD_UPD1_VLD BIT(16)
#define HAL_REO_CMD_UPD1_ALDC GENMASK(18, 17)
#define HAL_REO_CMD_UPD1_DIS_DUP_DETECTION BIT(19)
@@ -741,7 +741,7 @@ enum hal_rx_buf_return_buf_manager {
#define HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE BIT(30)
#define HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG BIT(31)
-/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* feilds */
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* fields */
#define HAL_REO_CMD_UPD2_SVLD BIT(10)
#define HAL_REO_CMD_UPD2_SSN GENMASK(22, 11)
#define HAL_REO_CMD_UPD2_SEQ_2K_ERR BIT(23)
@@ -965,6 +965,7 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
struct hal_srng_params *params);
int ath11k_hal_srng_init(struct ath11k_base *ath11k);
void ath11k_hal_srng_deinit(struct ath11k_base *ath11k);
+void ath11k_hal_srng_clear(struct ath11k_base *ab);
void ath11k_hal_dump_srng_stats(struct ath11k_base *ab);
void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab,
u32 **cfg, u32 *len);
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
index 8f7dd43dc1bd..753bd93f0212 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
@@ -372,7 +372,8 @@ int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc,
ret_buf_mgr = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
wbm_desc->buf_addr_info.info1);
- if (ret_buf_mgr != HAL_RX_BUF_RBM_SW3_BM) {
+ if (ret_buf_mgr != HAL_RX_BUF_RBM_SW1_BM &&
+ ret_buf_mgr != HAL_RX_BUF_RBM_SW3_BM) {
ab->soc_stats.invalid_rbm++;
return -EINVAL;
}
diff --git a/drivers/net/wireless/ath/ath11k/hif.h b/drivers/net/wireless/ath/ath11k/hif.h
index 674ff772b181..cd9c4b838246 100644
--- a/drivers/net/wireless/ath/ath11k/hif.h
+++ b/drivers/net/wireless/ath/ath11k/hif.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _HIF_H_
@@ -18,7 +18,7 @@ struct ath11k_hif_ops {
int (*start)(struct ath11k_base *ab);
void (*stop)(struct ath11k_base *ab);
int (*power_up)(struct ath11k_base *ab);
- void (*power_down)(struct ath11k_base *ab);
+ void (*power_down)(struct ath11k_base *ab, bool is_suspend);
int (*suspend)(struct ath11k_base *ab);
int (*resume)(struct ath11k_base *ab);
int (*map_service_to_pipe)(struct ath11k_base *ab, u16 service_id,
@@ -31,6 +31,7 @@ struct ath11k_hif_ops {
void (*ce_irq_enable)(struct ath11k_base *ab);
void (*ce_irq_disable)(struct ath11k_base *ab);
void (*get_ce_msi_idx)(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx);
+ void (*coredump_download)(struct ath11k_base *ab);
};
static inline void ath11k_hif_ce_irq_enable(struct ath11k_base *ab)
@@ -67,12 +68,18 @@ static inline void ath11k_hif_irq_disable(struct ath11k_base *ab)
static inline int ath11k_hif_power_up(struct ath11k_base *ab)
{
+ if (!ab->hif.ops->power_up)
+ return -EOPNOTSUPP;
+
return ab->hif.ops->power_up(ab);
}
-static inline void ath11k_hif_power_down(struct ath11k_base *ab)
+static inline void ath11k_hif_power_down(struct ath11k_base *ab, bool is_suspend)
{
- ab->hif.ops->power_down(ab);
+ if (!ab->hif.ops->power_down)
+ return;
+
+ ab->hif.ops->power_down(ab, is_suspend);
}
static inline int ath11k_hif_suspend(struct ath11k_base *ab)
@@ -146,4 +153,10 @@ static inline void ath11k_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id,
*msi_data_idx = ce_id;
}
+static inline void ath11k_hif_coredump_download(struct ath11k_base *ab)
+{
+ if (ab->hif.ops->coredump_download)
+ ab->hif.ops->coredump_download(ab);
+}
+
#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/htc.c b/drivers/net/wireless/ath/ath11k/htc.c
index 23054ab29a5e..4571d01cc33d 100644
--- a/drivers/net/wireless/ath/ath11k/htc.c
+++ b/drivers/net/wireless/ath/ath11k/htc.c
@@ -497,7 +497,7 @@ static u8 ath11k_htc_get_credit_allocation(struct ath11k_htc *htc,
static int ath11k_htc_setup_target_buffer_assignments(struct ath11k_htc *htc)
{
struct ath11k_htc_svc_tx_credits *serv_entry;
- u32 svc_id[] = {
+ static const u32 svc_id[] = {
ATH11K_HTC_SVC_ID_WMI_CONTROL,
ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1,
ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2,
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 300322535766..52d9f4c13b13 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -227,6 +227,7 @@ struct ath11k_hw_params {
bool smp2p_wow_exit;
bool support_fw_mac_sequence;
bool support_dual_stations;
+ bool pdev_suspend;
};
struct ath11k_hw_ops {
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index e6acbff06749..3276fe443502 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <net/mac80211.h>
@@ -1037,7 +1037,7 @@ static int ath11k_mac_monitor_vdev_create(struct ath11k *ar)
struct ath11k_pdev *pdev = ar->pdev;
struct vdev_create_params param = {};
int bit, ret;
- u8 tmp_addr[6] = {0};
+ u8 tmp_addr[6] = {};
u16 nss;
lockdep_assert_held(&ar->conf_mutex);
@@ -1283,7 +1283,7 @@ static int ath11k_mac_config_ps(struct ath11k *ar)
return ret;
}
-static int ath11k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
+static int ath11k_mac_op_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct ath11k *ar = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
@@ -1529,17 +1529,29 @@ static int ath11k_mac_set_vif_params(struct ath11k_vif *arvif,
return ret;
}
-static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif)
+static struct ath11k_vif *ath11k_mac_get_tx_arvif(struct ath11k_vif *arvif)
+{
+ struct ieee80211_bss_conf *link_conf, *tx_bss_conf;
+
+ lockdep_assert_wiphy(arvif->ar->hw->wiphy);
+
+ link_conf = &arvif->vif->bss_conf;
+ tx_bss_conf = wiphy_dereference(arvif->ar->hw->wiphy, link_conf->tx_bss_conf);
+ if (tx_bss_conf)
+ return ath11k_vif_to_arvif(tx_bss_conf->vif);
+
+ return NULL;
+}
+
+static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif,
+ struct ath11k_vif *tx_arvif)
{
- struct ath11k_vif *tx_arvif;
struct ieee80211_ema_beacons *beacons;
int ret = 0;
bool nontx_vif_params_set = false;
u32 params = 0;
u8 i = 0;
- tx_arvif = ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif);
-
beacons = ieee80211_beacon_get_template_ema_list(tx_arvif->ar->hw,
tx_arvif->vif, 0);
if (!beacons || !beacons->cnt) {
@@ -1585,25 +1597,22 @@ static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif)
return ret;
}
-static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif)
+static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif,
+ struct ath11k_vif *tx_arvif)
{
struct ath11k *ar = arvif->ar;
struct ath11k_base *ab = ar->ab;
- struct ath11k_vif *tx_arvif = arvif;
struct ieee80211_hw *hw = ar->hw;
struct ieee80211_vif *vif = arvif->vif;
struct ieee80211_mutable_offsets offs = {};
struct sk_buff *bcn;
int ret;
- if (vif->mbssid_tx_vif) {
- tx_arvif = ath11k_vif_to_arvif(vif->mbssid_tx_vif);
- if (tx_arvif != arvif) {
- ar = tx_arvif->ar;
- ab = ar->ab;
- hw = ar->hw;
- vif = tx_arvif->vif;
- }
+ if (tx_arvif != arvif) {
+ ar = tx_arvif->ar;
+ ab = ar->ab;
+ hw = ar->hw;
+ vif = tx_arvif->vif;
}
bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0);
@@ -1632,6 +1641,7 @@ static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif)
static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
{
struct ieee80211_vif *vif = arvif->vif;
+ struct ath11k_vif *tx_arvif;
if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
return 0;
@@ -1639,14 +1649,18 @@ static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
/* Target does not expect beacon templates for the already up
* non-transmitting interfaces, and results in a crash if sent.
*/
- if (vif->mbssid_tx_vif &&
- arvif != ath11k_vif_to_arvif(vif->mbssid_tx_vif) && arvif->is_up)
- return 0;
+ tx_arvif = ath11k_mac_get_tx_arvif(arvif);
+ if (tx_arvif) {
+ if (arvif != tx_arvif && arvif->is_up)
+ return 0;
- if (vif->bss_conf.ema_ap && vif->mbssid_tx_vif)
- return ath11k_mac_setup_bcn_tmpl_ema(arvif);
+ if (vif->bss_conf.ema_ap)
+ return ath11k_mac_setup_bcn_tmpl_ema(arvif, tx_arvif);
+ } else {
+ tx_arvif = arvif;
+ }
- return ath11k_mac_setup_bcn_tmpl_mbssid(arvif);
+ return ath11k_mac_setup_bcn_tmpl_mbssid(arvif, tx_arvif);
}
void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif)
@@ -1674,7 +1688,7 @@ static void ath11k_control_beaconing(struct ath11k_vif *arvif,
struct ieee80211_bss_conf *info)
{
struct ath11k *ar = arvif->ar;
- struct ath11k_vif *tx_arvif = NULL;
+ struct ath11k_vif *tx_arvif;
int ret = 0;
lockdep_assert_held(&arvif->ar->conf_mutex);
@@ -1697,15 +1711,11 @@ static void ath11k_control_beaconing(struct ath11k_vif *arvif,
return;
}
- arvif->tx_seq_no = 0x1000;
-
arvif->aid = 0;
ether_addr_copy(arvif->bssid, info->bssid);
- if (arvif->vif->mbssid_tx_vif)
- tx_arvif = ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif);
-
+ tx_arvif = ath11k_mac_get_tx_arvif(arvif);
ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
arvif->bssid,
tx_arvif ? tx_arvif->bssid : NULL,
@@ -2225,12 +2235,12 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
+ arg->rx_mcs_set = ath11k_peer_assoc_h_vht_limit(arg->rx_mcs_set, vht_mcs_mask);
arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
- arg->tx_mcs_set = ath11k_peer_assoc_h_vht_limit(
- __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
+ arg->tx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
/* In IPQ8074 platform, VHT mcs rate 10 and 11 is enabled by default.
- * VHT mcs rate 10 and 11 is not suppoerted in 11ac standard.
+ * VHT mcs rate 10 and 11 is not supported in 11ac standard.
* so explicitly disable the VHT MCS rate 10 and 11 in 11ac mode.
*/
arg->tx_mcs_set &= ~IEEE80211_VHT_MCS_SUPPORT_0_11_MASK;
@@ -2512,10 +2522,10 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
he_tx_mcs = v;
}
v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
+ v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask);
arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_160);
- v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask);
arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
arg->peer_he_mcs_count++;
@@ -2525,10 +2535,10 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
default:
v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+ v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask);
arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80);
- v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask);
arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
arg->peer_he_mcs_count++;
@@ -3016,7 +3026,7 @@ static bool ath11k_mac_vif_recalc_sta_he_txbf(struct ath11k *ar,
struct ieee80211_sta_he_cap *he_cap)
{
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
- struct ieee80211_he_cap_elem he_cap_elem = {0};
+ struct ieee80211_he_cap_elem he_cap_elem = {};
struct ieee80211_sta_he_cap *cap_band = NULL;
struct cfg80211_chan_def def;
u32 param = WMI_VDEV_PARAM_SET_HEMU_MODE;
@@ -3753,7 +3763,7 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
ath11k_recalculate_mgmt_rate(ar, vif, &def);
if (changed & BSS_CHANGED_TWT) {
- struct wmi_twt_enable_params twt_params = {0};
+ struct wmi_twt_enable_params twt_params = {};
if (info->twt_requester || info->twt_responder) {
ath11k_wmi_fill_default_twt_params(&twt_params);
@@ -4018,6 +4028,150 @@ static int ath11k_start_scan(struct ath11k *ar,
return 0;
}
+static void ath11k_mac_fw_stats_reset(struct ath11k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
+ ath11k_fw_stats_vdevs_free(&ar->fw_stats.vdevs);
+ ar->fw_stats.num_vdev_recvd = 0;
+ ar->fw_stats.num_bcn_recvd = 0;
+ spin_unlock_bh(&ar->data_lock);
+}
+
+int ath11k_mac_fw_stats_request(struct ath11k *ar,
+ struct stats_request_params *req_param)
+{
+ struct ath11k_base *ab = ar->ab;
+ unsigned long time_left;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath11k_mac_fw_stats_reset(ar);
+
+ reinit_completion(&ar->fw_stats_complete);
+ reinit_completion(&ar->fw_stats_done);
+
+ ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
+
+ if (ret) {
+ ath11k_warn(ab, "could not request fw stats (%d)\n",
+ ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ /* FW stats can get split when exceeding the stats data buffer limit.
+ * In that case, since there is no end marking for the back-to-back
+ * received 'update stats' event, we keep a 3 seconds timeout in case,
+ * fw_stats_done is not marked yet
+ */
+ time_left = wait_for_completion_timeout(&ar->fw_stats_done, 3 * HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ath11k_mac_get_fw_stats(struct ath11k *ar, u32 pdev_id,
+ u32 vdev_id, u32 stats_id)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct stats_request_params req_param;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON)
+ return -ENETDOWN;
+
+ req_param.pdev_id = pdev_id;
+ req_param.vdev_id = vdev_id;
+ req_param.stats_id = stats_id;
+
+ ret = ath11k_mac_fw_stats_request(ar, &req_param);
+ if (ret)
+ ath11k_warn(ab, "failed to request fw stats: %d\n", ret);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "debug get fw stat pdev id %d vdev id %d stats id 0x%x\n",
+ pdev_id, vdev_id, stats_id);
+
+ return ret;
+}
+
+static int ath11k_mac_handle_get_txpower(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ int *dbm)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_fw_stats_pdev *pdev;
+ int ret;
+
+ /* Final Tx power is minimum of Target Power, CTL power, Regulatory
+ * Power, PSD EIRP Power. We just know the Regulatory power from the
+ * regulatory rules obtained. FW knows all these power and sets the min
+ * of these. Hence, we request the FW pdev stats in which FW reports
+ * the minimum of all vdev's channel Tx power.
+ */
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* Firmware doesn't provide Tx power during CAC hence no need to fetch
+ * the stats.
+ */
+ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags))
+ return -EAGAIN;
+
+ ret = ath11k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0,
+ WMI_REQUEST_PDEV_STAT);
+ if (ret) {
+ ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
+ goto err_fallback;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ pdev = list_first_entry_or_null(&ar->fw_stats.pdevs,
+ struct ath11k_fw_stats_pdev, list);
+ if (!pdev) {
+ spin_unlock_bh(&ar->data_lock);
+ goto err_fallback;
+ }
+
+ /* tx power is set as 2 units per dBm in FW. */
+ *dbm = pdev->chan_tx_power / 2;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower from firmware %d, reported %d dBm\n",
+ pdev->chan_tx_power, *dbm);
+ return 0;
+
+err_fallback:
+ /* We didn't get txpower from FW. Hence, relying on vif->bss_conf.txpower */
+ *dbm = vif->bss_conf.txpower;
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower from firmware NaN, reported %d dBm\n",
+ *dbm);
+ return 0;
+}
+
+static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id,
+ int *dbm)
+{
+ struct ath11k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+ ret = ath11k_mac_handle_get_txpower(ar, vif, dbm);
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_scan_request *hw_req)
@@ -4307,6 +4461,40 @@ static int ath11k_clear_peer_keys(struct ath11k_vif *arvif,
return first_errno;
}
+static int ath11k_set_group_keys(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_base *ab = ar->ab;
+ const u8 *addr = arvif->bssid;
+ int i, ret, first_errno = 0;
+ struct ath11k_peer *peer;
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find(ab, arvif->vdev_id, addr);
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!peer)
+ return -ENOENT;
+
+ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+ struct ieee80211_key_conf *key = peer->keys[i];
+
+ if (!key || (key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ continue;
+
+ ret = ath11k_install_key(arvif, key, SET_KEY, addr,
+ WMI_KEY_GROUP);
+ if (ret < 0 && first_errno == 0)
+ first_errno = ret;
+
+ if (ret < 0)
+ ath11k_warn(ab, "failed to set group key of idx %d for vdev %d: %d\n",
+ i, arvif->vdev_id, ret);
+ }
+
+ return first_errno;
+}
+
static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
@@ -4316,6 +4504,7 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ath11k_peer *peer;
struct ath11k_sta *arsta;
+ bool is_ap_with_no_sta;
const u8 *peer_addr;
int ret = 0;
u32 flags = 0;
@@ -4372,20 +4561,61 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
}
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
- flags |= WMI_KEY_PAIRWISE;
+ flags = WMI_KEY_PAIRWISE;
else
- flags |= WMI_KEY_GROUP;
+ flags = WMI_KEY_GROUP;
- ret = ath11k_install_key(arvif, key, cmd, peer_addr, flags);
- if (ret) {
- ath11k_warn(ab, "ath11k_install_key failed (%d)\n", ret);
- goto exit;
- }
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "%s for peer %pM on vdev %d flags 0x%X, type = %d, num_sta %d\n",
+ cmd == SET_KEY ? "SET_KEY" : "DEL_KEY", peer_addr, arvif->vdev_id,
+ flags, arvif->vdev_type, arvif->num_stations);
+
+ /* Allow group key clearing only in AP mode when no stations are
+ * associated. There is a known race condition in firmware where
+ * group addressed packets may be dropped if the key is cleared
+ * and immediately set again during rekey.
+ *
+ * During GTK rekey, mac80211 issues a clear key (if the old key
+ * exists) followed by an install key operation for same key
+ * index. This causes ath11k to send two WMI commands in quick
+ * succession: one to clear the old key and another to install the
+ * new key in the same slot.
+ *
+ * Under certain conditions—especially under high load or time
+ * sensitive scenarios, firmware may process these commands
+ * asynchronously in a way that firmware assumes the key is
+ * cleared whereas hardware has a valid key. This inconsistency
+ * between hardware and firmware leads to group addressed packet
+ * drops after rekey.
+ * Only setting the same key again can restore a valid key in
+ * firmware and allow packets to be transmitted.
+ *
+ * There is a use case where an AP can transition from Secure mode
+ * to open mode without a vdev restart by just deleting all
+ * associated peers and clearing key, Hence allow clear key for
+ * that case alone. Mark arvif->reinstall_group_keys in such cases
+ * and reinstall the same key when the first peer is added,
+ * allowing firmware to recover from the race if it had occurred.
+ */
- ret = ath11k_dp_peer_rx_pn_replay_config(arvif, peer_addr, cmd, key);
- if (ret) {
- ath11k_warn(ab, "failed to offload PN replay detection %d\n", ret);
- goto exit;
+ is_ap_with_no_sta = (vif->type == NL80211_IFTYPE_AP &&
+ !arvif->num_stations);
+ if (flags == WMI_KEY_PAIRWISE || cmd == SET_KEY || is_ap_with_no_sta) {
+ ret = ath11k_install_key(arvif, key, cmd, peer_addr, flags);
+ if (ret) {
+ ath11k_warn(ab, "ath11k_install_key failed (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = ath11k_dp_peer_rx_pn_replay_config(arvif, peer_addr, cmd, key);
+ if (ret) {
+ ath11k_warn(ab, "failed to offload PN replay detection %d\n",
+ ret);
+ goto exit;
+ }
+
+ if (flags == WMI_KEY_GROUP && cmd == SET_KEY && is_ap_with_no_sta)
+ arvif->reinstall_group_keys = true;
}
spin_lock_bh(&ab->base_lock);
@@ -4984,6 +5214,7 @@ static int ath11k_mac_inc_num_stations(struct ath11k_vif *arvif,
return -ENOBUFS;
ar->num_stations++;
+ arvif->num_stations++;
return 0;
}
@@ -4999,6 +5230,7 @@ static void ath11k_mac_dec_num_stations(struct ath11k_vif *arvif,
return;
ar->num_stations--;
+ arvif->num_stations--;
}
static u32 ath11k_mac_ieee80211_sta_bw_to_wmi(struct ath11k *ar,
@@ -5206,6 +5438,45 @@ exit:
return ret;
}
+static int ath11k_mac_op_conf_tx_mu_edca(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k *ar = hw->priv;
+ struct wmi_wmm_params_arg *p;
+ int ret;
+
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ p = &arvif->muedca_params.ac_vo;
+ break;
+ case IEEE80211_AC_VI:
+ p = &arvif->muedca_params.ac_vi;
+ break;
+ case IEEE80211_AC_BE:
+ p = &arvif->muedca_params.ac_be;
+ break;
+ case IEEE80211_AC_BK:
+ p = &arvif->muedca_params.ac_bk;
+ break;
+ default:
+ ath11k_warn(ar->ab, "error ac: %d", ac);
+ return -EINVAL;
+ }
+
+ p->cwmin = u8_get_bits(params->mu_edca_param_rec.ecw_min_max, GENMASK(3, 0));
+ p->cwmax = u8_get_bits(params->mu_edca_param_rec.ecw_min_max, GENMASK(7, 4));
+ p->aifs = u8_get_bits(params->mu_edca_param_rec.aifsn, GENMASK(3, 0));
+ p->txop = params->mu_edca_param_rec.mu_edca_timer;
+
+ ret = ath11k_wmi_send_wmm_update_cmd_tlv(ar, arvif->vdev_id,
+ &arvif->muedca_params,
+ WMI_WMM_PARAM_TYPE_11AX_MU_EDCA);
+ return ret;
+}
+
static int ath11k_mac_op_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
unsigned int link_id, u16 ac,
@@ -5244,12 +5515,22 @@ static int ath11k_mac_op_conf_tx(struct ieee80211_hw *hw,
p->txop = params->txop;
ret = ath11k_wmi_send_wmm_update_cmd_tlv(ar, arvif->vdev_id,
- &arvif->wmm_params);
+ &arvif->wmm_params,
+ WMI_WMM_PARAM_TYPE_LEGACY);
if (ret) {
ath11k_warn(ar->ab, "failed to set wmm params: %d\n", ret);
goto exit;
}
+ if (params->mu_edca) {
+ ret = ath11k_mac_op_conf_tx_mu_edca(hw, vif, link_id, ac,
+ params);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set mu_edca params: %d\n", ret);
+ goto exit;
+ }
+ }
+
ret = ath11k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
if (ret)
@@ -5264,7 +5545,7 @@ static struct ieee80211_sta_ht_cap
ath11k_create_ht_cap(struct ath11k *ar, u32 ar_ht_cap, u32 rate_cap_rx_chainmask)
{
int i;
- struct ieee80211_sta_ht_cap ht_cap = {0};
+ struct ieee80211_sta_ht_cap ht_cap = {};
u32 ar_vht_cap = ar->pdev->cap.vht_cap;
if (!(ar_ht_cap & WMI_HT_CAP_ENABLED))
@@ -5338,8 +5619,6 @@ static int ath11k_mac_set_txbf_conf(struct ath11k_vif *arvif)
if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) {
nsts = vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
- if (nsts > (ar->num_rx_chains - 1))
- nsts = ar->num_rx_chains - 1;
value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
}
@@ -5423,9 +5702,6 @@ static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap)
/* Enable Beamformee STS Field only if SU BF is enabled */
if (subfee) {
- if (nsts > (ar->num_rx_chains - 1))
- nsts = ar->num_rx_chains - 1;
-
nsts <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
*vht_cap |= nsts;
@@ -5436,7 +5712,7 @@ static struct ieee80211_sta_vht_cap
ath11k_create_vht_cap(struct ath11k *ar, u32 rate_cap_tx_chainmask,
u32 rate_cap_rx_chainmask)
{
- struct ieee80211_sta_vht_cap vht_cap = {0};
+ struct ieee80211_sta_vht_cap vht_cap = {};
u16 txmcs_map, rxmcs_map;
int i;
@@ -5975,6 +6251,159 @@ static void ath11k_mgmt_over_wmi_tx_purge(struct ath11k *ar)
ath11k_mgmt_over_wmi_tx_drop(ar, skb);
}
+static int ath11k_mac_mgmt_action_frame_fill_elem_data(struct ath11k_vif *arvif,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ u8 category, *buf, iv_len, action_code, dialog_token;
+ int cur_tx_power, max_tx_power;
+ struct ath11k *ar = arvif->ar;
+ struct cfg80211_chan_def def;
+ struct ath11k_skb_cb *skb_cb;
+ struct ieee80211_mgmt *mgmt;
+ unsigned int remaining_len;
+ bool has_protected;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* make sure category field is present */
+ if (skb->len < IEEE80211_MIN_ACTION_SIZE)
+ return -EINVAL;
+
+ remaining_len = skb->len - IEEE80211_MIN_ACTION_SIZE;
+ has_protected = ieee80211_has_protected(hdr->frame_control);
+
+ /* In case of SW crypto and hdr protected (PMF), packet will already be encrypted,
+ * we can't put in data in this case
+ */
+ if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) &&
+ has_protected)
+ return 0;
+
+ mgmt = (struct ieee80211_mgmt *)hdr;
+ buf = (u8 *)&mgmt->u.action;
+
+ /* FCTL_PROTECTED frame might have extra space added for HDR_LEN. Offset that
+ * many bytes if it is there
+ */
+ if (has_protected) {
+ skb_cb = ATH11K_SKB_CB(skb);
+
+ switch (skb_cb->cipher) {
+ /* Cipher suite having flag %IEEE80211_KEY_FLAG_GENERATE_IV_MGMT set in
+ * key needs to be processed. See ath11k_install_key()
+ */
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ iv_len = IEEE80211_CCMP_HDR_LEN;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ iv_len = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (remaining_len < iv_len)
+ return -EINVAL;
+
+ buf += iv_len;
+ remaining_len -= iv_len;
+ }
+
+ category = *buf++;
+ /* category code is already taken care in %IEEE80211_MIN_ACTION_SIZE hence
+ * no need to adjust remaining_len
+ */
+
+ switch (category) {
+ case WLAN_CATEGORY_RADIO_MEASUREMENT:
+ /* need action code and dialog token */
+ if (remaining_len < 2)
+ return -EINVAL;
+
+ /* Packet Format:
+ * Action Code | Dialog Token | Variable Len (based on Action Code)
+ */
+ action_code = *buf++;
+ dialog_token = *buf++;
+ remaining_len -= 2;
+
+ if (ath11k_mac_vif_chan(arvif->vif, &def))
+ return -ENOENT;
+
+ cur_tx_power = arvif->vif->bss_conf.txpower;
+ max_tx_power = min(def.chan->max_reg_power, (int)ar->max_tx_power / 2);
+ ath11k_mac_handle_get_txpower(ar, arvif->vif, &cur_tx_power);
+
+ switch (action_code) {
+ case WLAN_RM_ACTION_LINK_MEASUREMENT_REQUEST:
+ /* need variable fields to be present in len */
+ if (remaining_len < 2)
+ return -EINVAL;
+
+ /* Variable length format as defined in IEEE 802.11-2024,
+ * Figure 9-1187-Link Measurement Request frame Action field
+ * format.
+ * Transmit Power | Max Tx Power
+ * We fill both of these.
+ */
+ *buf++ = cur_tx_power;
+ *buf = max_tx_power;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "RRM: Link Measurement Req dialog_token %u cur_tx_power %d max_tx_power %d\n",
+ dialog_token, cur_tx_power, max_tx_power);
+ break;
+ case WLAN_RM_ACTION_LINK_MEASUREMENT_REPORT:
+ /* need variable fields to be present in len */
+ if (remaining_len < 3)
+ return -EINVAL;
+
+ /* Variable length format as defined in IEEE 802.11-2024,
+ * Figure 9-1188-Link Measurement Report frame Action field format
+ * TPC Report | Variable Fields
+ *
+ * TPC Report Format:
+ * Element ID | Len | Tx Power | Link Margin
+ *
+ * We fill Tx power in the TPC Report (2nd index)
+ */
+ buf[2] = cur_tx_power;
+
+ /* TODO: At present, Link margin data is not present so can't
+ * really fill it now. Once it is available, it can be added
+ * here
+ */
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "RRM: Link Measurement Report dialog_token %u cur_tx_power %d\n",
+ dialog_token, cur_tx_power);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ /* nothing to fill */
+ return 0;
+ }
+
+ return 0;
+}
+
+static int ath11k_mac_mgmt_frame_fill_elem_data(struct ath11k_vif *arvif,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (!ieee80211_is_action(hdr->frame_control))
+ return 0;
+
+ return ath11k_mac_mgmt_action_frame_fill_elem_data(arvif, skb);
+}
+
static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work)
{
struct ath11k *ar = container_of(work, struct ath11k, wmi_mgmt_tx_work);
@@ -5994,6 +6423,19 @@ static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work)
arvif = ath11k_vif_to_arvif(skb_cb->vif);
mutex_lock(&ar->conf_mutex);
if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) {
+ /* Fill in the data which is required to be filled by the driver
+ * For example: Max Tx power in Link Measurement Request/Report
+ */
+ ret = ath11k_mac_mgmt_frame_fill_elem_data(arvif, skb);
+ if (ret) {
+ /* If we couldn't fill the data due to any reason,
+ * let's not discard transmitting the packet.
+ */
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Failed to fill the required data for the mgmt packet err %d\n",
+ ret);
+ }
+
ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb);
if (ret) {
ath11k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n",
@@ -6105,7 +6547,7 @@ void ath11k_mac_drain_tx(struct ath11k *ar)
static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable)
{
- struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ struct htt_rx_ring_tlv_filter tlv_filter = {};
struct ath11k_base *ab = ar->ab;
int i, ret = 0;
u32 ring_id;
@@ -6290,6 +6732,7 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath11k *ar = hw->priv;
struct htt_ppdu_stats_info *ppdu_stats, *tmp;
+ struct scan_chan_list_params *params;
int ret;
ath11k_mac_drain_tx(ar);
@@ -6305,6 +6748,7 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw, bool suspend)
mutex_unlock(&ar->conf_mutex);
cancel_delayed_work_sync(&ar->scan.timeout);
+ cancel_work_sync(&ar->channel_update_work);
cancel_work_sync(&ar->regd_update_work);
cancel_work_sync(&ar->ab->update_11d_work);
@@ -6314,10 +6758,19 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw, bool suspend)
}
spin_lock_bh(&ar->data_lock);
+
list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
list_del(&ppdu_stats->list);
kfree(ppdu_stats);
}
+
+ while ((params = list_first_entry_or_null(&ar->channel_update_queue,
+ struct scan_chan_list_params,
+ list))) {
+ list_del(&params->list);
+ kfree(params);
+ }
+
spin_unlock_bh(&ar->data_lock);
rcu_assign_pointer(ar->ab->pdevs_active[ar->pdev_idx], NULL);
@@ -6332,23 +6785,20 @@ static int ath11k_mac_setup_vdev_params_mbssid(struct ath11k_vif *arvif,
{
struct ath11k *ar = arvif->ar;
struct ath11k_vif *tx_arvif;
- struct ieee80211_vif *tx_vif;
*tx_vdev_id = 0;
- tx_vif = arvif->vif->mbssid_tx_vif;
- if (!tx_vif) {
+ tx_arvif = ath11k_mac_get_tx_arvif(arvif);
+ if (!tx_arvif) {
*flags = WMI_HOST_VDEV_FLAGS_NON_MBSSID_AP;
return 0;
}
- tx_arvif = ath11k_vif_to_arvif(tx_vif);
-
if (arvif->vif->bss_conf.nontransmitted) {
- if (ar->hw->wiphy != ieee80211_vif_to_wdev(tx_vif)->wiphy)
+ if (ar->hw->wiphy != tx_arvif->ar->hw->wiphy)
return -EINVAL;
*flags = WMI_HOST_VDEV_FLAGS_NON_TRANSMIT_AP;
- *tx_vdev_id = ath11k_vif_to_arvif(tx_vif)->vdev_id;
+ *tx_vdev_id = tx_arvif->vdev_id;
} else if (tx_arvif == arvif) {
*flags = WMI_HOST_VDEV_FLAGS_TRANSMIT_AP;
} else {
@@ -6616,7 +7066,7 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
- struct vdev_create_params vdev_param = {0};
+ struct vdev_create_params vdev_param = {};
struct peer_create_params peer_param;
u32 param_id, param_value;
u16 nss;
@@ -6952,7 +7402,7 @@ err_vdev_del:
/* Recalc txpower for remaining vdev */
ath11k_mac_txpower_recalc(ar);
- /* TODO: recal traffic pause state based on the available vdevs */
+ /* TODO: recalc traffic pause state based on the available vdevs */
mutex_unlock(&ar->conf_mutex);
}
@@ -6982,7 +7432,8 @@ static void ath11k_mac_op_configure_filter(struct ieee80211_hw *hw,
mutex_unlock(&ar->conf_mutex);
}
-static int ath11k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+static int ath11k_mac_op_get_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 *tx_ant, u32 *rx_ant)
{
struct ath11k *ar = hw->priv;
@@ -6996,7 +7447,8 @@ static int ath11k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *
return 0;
}
-static int ath11k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+static int ath11k_mac_op_set_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 tx_ant, u32 rx_ant)
{
struct ath11k *ar = hw->priv;
int ret;
@@ -7308,8 +7760,7 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
int n_vifs)
{
struct ath11k_base *ab = ar->ab;
- struct ath11k_vif *arvif, *tx_arvif = NULL;
- struct ieee80211_vif *mbssid_tx_vif;
+ struct ath11k_vif *arvif, *tx_arvif;
int ret;
int i;
bool monitor_vif = false;
@@ -7363,10 +7814,7 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
ath11k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
ret);
- mbssid_tx_vif = arvif->vif->mbssid_tx_vif;
- if (mbssid_tx_vif)
- tx_arvif = ath11k_vif_to_arvif(mbssid_tx_vif);
-
+ tx_arvif = ath11k_mac_get_tx_arvif(arvif);
ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
arvif->bssid,
tx_arvif ? tx_arvif->bssid : NULL,
@@ -8124,7 +8572,8 @@ ath11k_set_vdev_param_to_all_vifs(struct ath11k *ar, int param, u32 value)
/* mac80211 stores device specific RTS/Fragmentation threshold value,
* this is set interface specific to firmware from ath11k driver
*/
-static int ath11k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+static int ath11k_mac_op_set_rts_threshold(struct ieee80211_hw *hw,
+ int radio_idx, u32 value)
{
struct ath11k *ar = hw->priv;
int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
@@ -8132,7 +8581,8 @@ static int ath11k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
return ath11k_set_vdev_param_to_all_vifs(ar, param_id, value);
}
-static int ath11k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
+static int ath11k_mac_op_set_frag_threshold(struct ieee80211_hw *hw,
+ int radio_idx, u32 value)
{
/* Even though there's a WMI vdev param for fragmentation threshold no
* known firmware actually implements it. Moreover it is not possible to
@@ -8682,9 +9132,9 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
return ret;
}
- ieee80211_iterate_stations_atomic(ar->hw,
- ath11k_mac_disable_peer_fixed_rate,
- arvif);
+ ieee80211_iterate_stations_mtx(ar->hw,
+ ath11k_mac_disable_peer_fixed_rate,
+ arvif);
} else if (ath11k_mac_bitrate_mask_get_single_nss(ar, arvif, band, mask,
&single_nss)) {
rate = WMI_FIXED_RATE_NONE;
@@ -8751,9 +9201,9 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
}
mutex_lock(&ar->conf_mutex);
- ieee80211_iterate_stations_atomic(ar->hw,
- ath11k_mac_disable_peer_fixed_rate,
- arvif);
+ ieee80211_iterate_stations_mtx(ar->hw,
+ ath11k_mac_disable_peer_fixed_rate,
+ arvif);
arvif->bitrate_mask = *mask;
ieee80211_iterate_stations_atomic(ar->hw,
@@ -8973,11 +9423,12 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
ath11k_mac_put_chain_rssi(sinfo, arsta, "ppdu", false);
+ mutex_lock(&ar->conf_mutex);
if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL)) &&
arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA &&
ar->ab->hw_params.supports_rssi_stats &&
- !ath11k_debugfs_get_fw_stats(ar, ar->pdev->pdev_id, 0,
- WMI_REQUEST_RSSI_PER_CHAIN_STAT)) {
+ !ath11k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0,
+ WMI_REQUEST_RSSI_PER_CHAIN_STAT)) {
ath11k_mac_put_chain_rssi(sinfo, arsta, "fw stats", true);
}
@@ -8985,9 +9436,10 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
if (!signal &&
arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA &&
ar->ab->hw_params.supports_rssi_stats &&
- !(ath11k_debugfs_get_fw_stats(ar, ar->pdev->pdev_id, 0,
- WMI_REQUEST_VDEV_STAT)))
+ !(ath11k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0,
+ WMI_REQUEST_VDEV_STAT)))
signal = arsta->rssi_beacon;
+ mutex_unlock(&ar->conf_mutex);
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"sta statistics db2dbm %u rssi comb %d rssi beacon %d\n",
@@ -9322,103 +9774,6 @@ exit:
return ret;
}
-static int ath11k_fw_stats_request(struct ath11k *ar,
- struct stats_request_params *req_param)
-{
- struct ath11k_base *ab = ar->ab;
- unsigned long time_left;
- int ret;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- spin_lock_bh(&ar->data_lock);
- ar->fw_stats_done = false;
- ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
- spin_unlock_bh(&ar->data_lock);
-
- reinit_completion(&ar->fw_stats_complete);
-
- ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
- if (ret) {
- ath11k_warn(ab, "could not request fw stats (%d)\n",
- ret);
- return ret;
- }
-
- time_left = wait_for_completion_timeout(&ar->fw_stats_complete,
- 1 * HZ);
-
- if (!time_left)
- return -ETIMEDOUT;
-
- return 0;
-}
-
-static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- int *dbm)
-{
- struct ath11k *ar = hw->priv;
- struct ath11k_base *ab = ar->ab;
- struct stats_request_params req_param = {0};
- struct ath11k_fw_stats_pdev *pdev;
- int ret;
-
- /* Final Tx power is minimum of Target Power, CTL power, Regulatory
- * Power, PSD EIRP Power. We just know the Regulatory power from the
- * regulatory rules obtained. FW knows all these power and sets the min
- * of these. Hence, we request the FW pdev stats in which FW reports
- * the minimum of all vdev's channel Tx power.
- */
- mutex_lock(&ar->conf_mutex);
-
- if (ar->state != ATH11K_STATE_ON)
- goto err_fallback;
-
- /* Firmware doesn't provide Tx power during CAC hence no need to fetch
- * the stats.
- */
- if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
- mutex_unlock(&ar->conf_mutex);
- return -EAGAIN;
- }
-
- req_param.pdev_id = ar->pdev->pdev_id;
- req_param.stats_id = WMI_REQUEST_PDEV_STAT;
-
- ret = ath11k_fw_stats_request(ar, &req_param);
- if (ret) {
- ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
- goto err_fallback;
- }
-
- spin_lock_bh(&ar->data_lock);
- pdev = list_first_entry_or_null(&ar->fw_stats.pdevs,
- struct ath11k_fw_stats_pdev, list);
- if (!pdev) {
- spin_unlock_bh(&ar->data_lock);
- goto err_fallback;
- }
-
- /* tx power is set as 2 units per dBm in FW. */
- *dbm = pdev->chan_tx_power / 2;
-
- spin_unlock_bh(&ar->data_lock);
- mutex_unlock(&ar->conf_mutex);
-
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower from firmware %d, reported %d dBm\n",
- pdev->chan_tx_power, *dbm);
- return 0;
-
-err_fallback:
- mutex_unlock(&ar->conf_mutex);
- /* We didn't get txpower from FW. Hence, relying on vif->bss_conf.txpower */
- *dbm = vif->bss_conf.txpower;
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower from firmware NaN, reported %d dBm\n",
- *dbm);
- return 0;
-}
-
static int ath11k_mac_station_add(struct ath11k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -9438,6 +9793,21 @@ static int ath11k_mac_station_add(struct ath11k *ar,
goto exit;
}
+ /* Driver allows the DEL KEY followed by SET KEY sequence for
+ * group keys for only when there is no clients associated, if at
+ * all firmware has entered the race during that window,
+ * reinstalling the same key when the first sta connects will allow
+ * firmware to recover from the race.
+ */
+ if (arvif->num_stations == 1 && arvif->reinstall_group_keys) {
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "set group keys on 1st station add for vdev %d\n",
+ arvif->vdev_id);
+ ret = ath11k_set_group_keys(arvif);
+ if (ret)
+ goto dec_num_station;
+ arvif->reinstall_group_keys = false;
+ }
+
arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL);
if (!arsta->rx_stats) {
ret = -ENOMEM;
@@ -9913,12 +10283,17 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
struct ath11k_base *ab = ar->ab;
struct ieee80211_iface_combination *combinations;
struct ieee80211_iface_limit *limits;
- int n_limits;
+ int n_limits, n_combos;
bool p2p;
p2p = ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_P2P_DEVICE);
- combinations = kzalloc(sizeof(*combinations), GFP_KERNEL);
+ if (ab->hw_params.support_dual_stations)
+ n_combos = 2;
+ else
+ n_combos = 1;
+
+ combinations = kcalloc(n_combos, sizeof(*combinations), GFP_KERNEL);
if (!combinations)
return -ENOMEM;
@@ -9933,7 +10308,9 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
return -ENOMEM;
}
+ limits[0].max = 1;
limits[0].types |= BIT(NL80211_IFTYPE_STATION);
+ limits[1].max = 16;
limits[1].types |= BIT(NL80211_IFTYPE_AP);
if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
@@ -9943,25 +10320,24 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
combinations[0].n_limits = n_limits;
combinations[0].beacon_int_infra_match = true;
combinations[0].beacon_int_min_gcd = 100;
+ combinations[0].max_interfaces = 16;
+ combinations[0].num_different_channels = 1;
+ combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_80P80) |
+ BIT(NL80211_CHAN_WIDTH_160);
if (ab->hw_params.support_dual_stations) {
limits[0].max = 2;
- limits[1].max = 1;
- combinations[0].max_interfaces = ab->hw_params.num_vdevs;
- combinations[0].num_different_channels = 2;
- } else {
- limits[0].max = 1;
- limits[1].max = 16;
-
- combinations[0].max_interfaces = 16;
- combinations[0].num_different_channels = 1;
- combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
- BIT(NL80211_CHAN_WIDTH_20) |
- BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80) |
- BIT(NL80211_CHAN_WIDTH_80P80) |
- BIT(NL80211_CHAN_WIDTH_160);
+ combinations[1].limits = limits;
+ combinations[1].n_limits = n_limits;
+ combinations[1].beacon_int_infra_match = true;
+ combinations[1].beacon_int_min_gcd = 100;
+ combinations[1].max_interfaces = ab->hw_params.num_vdevs;
+ combinations[1].num_different_channels = 2;
}
if (p2p) {
@@ -9972,7 +10348,7 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
}
ar->hw->wiphy->iface_combinations = combinations;
- ar->hw->wiphy->n_iface_combinations = 1;
+ ar->hw->wiphy->n_iface_combinations = n_combos;
return 0;
}
@@ -10020,6 +10396,7 @@ static const struct wiphy_iftype_ext_capab ath11k_iftypes_ext_capa[] = {
static void __ath11k_mac_unregister(struct ath11k *ar)
{
+ cancel_work_sync(&ar->channel_update_work);
cancel_work_sync(&ar->regd_update_work);
ieee80211_unregister_hw(ar->hw);
@@ -10166,6 +10543,8 @@ static int __ath11k_mac_register(struct ath11k *ar)
ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
NL80211_FEATURE_AP_SCAN;
+ ar->hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
+
ar->max_num_stations = TARGET_NUM_STATIONS(ab);
ar->max_num_peers = TARGET_NUM_PEERS_PDEV(ab);
@@ -10312,7 +10691,7 @@ int ath11k_mac_register(struct ath11k_base *ab)
struct ath11k_pdev *pdev;
int i;
int ret;
- u8 mac_addr[ETH_ALEN] = {0};
+ u8 mac_addr[ETH_ALEN] = {};
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
return 0;
@@ -10419,6 +10798,8 @@ int ath11k_mac_allocate(struct ath11k_base *ab)
init_completion(&ar->thermal.wmi_sync);
INIT_DELAYED_WORK(&ar->scan.timeout, ath11k_scan_timeout_work);
+ INIT_WORK(&ar->channel_update_work, ath11k_regd_update_chan_list_work);
+ INIT_LIST_HEAD(&ar->channel_update_queue);
INIT_WORK(&ar->regd_update_work, ath11k_regd_update_work);
INIT_WORK(&ar->wmi_mgmt_tx_work, ath11k_mgmt_over_wmi_tx_work);
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
index f5800fbecff8..5e61eea1bb03 100644
--- a/drivers/net/wireless/ath/ath11k/mac.h
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023, 2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_MAC_H
@@ -179,4 +179,6 @@ int ath11k_mac_vif_set_keepalive(struct ath11k_vif *arvif,
void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar,
struct ieee80211_vif *vif,
struct ieee80211_chanctx_conf *ctx);
+int ath11k_mac_fw_stats_request(struct ath11k *ar,
+ struct stats_request_params *req_param);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index 6974a551883f..acd76e9392d3 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/msi.h>
@@ -398,6 +398,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
case ATH11K_HW_WCN6855_HW20:
case ATH11K_HW_WCN6855_HW21:
case ATH11K_HW_QCA2066_HW21:
+ case ATH11K_HW_QCA6698AQ_HW21:
ath11k_mhi_config = &ath11k_mhi_config_qca6390;
break;
default:
@@ -453,9 +454,17 @@ int ath11k_mhi_start(struct ath11k_pci *ab_pci)
return 0;
}
-void ath11k_mhi_stop(struct ath11k_pci *ab_pci)
+void ath11k_mhi_stop(struct ath11k_pci *ab_pci, bool is_suspend)
{
- mhi_power_down(ab_pci->mhi_ctrl, true);
+ /* During suspend we need to use mhi_power_down_keep_dev()
+ * workaround, otherwise ath11k_core_resume() will timeout
+ * during resume.
+ */
+ if (is_suspend)
+ mhi_power_down_keep_dev(ab_pci->mhi_ctrl, true);
+ else
+ mhi_power_down(ab_pci->mhi_ctrl, true);
+
mhi_unprepare_after_power_down(ab_pci->mhi_ctrl);
}
@@ -490,3 +499,8 @@ int ath11k_mhi_resume(struct ath11k_pci *ab_pci)
return 0;
}
+
+void ath11k_mhi_coredump(struct mhi_controller *mhi_ctrl, bool in_panic)
+{
+ mhi_download_rddm_image(mhi_ctrl, in_panic);
+}
diff --git a/drivers/net/wireless/ath/ath11k/mhi.h b/drivers/net/wireless/ath/ath11k/mhi.h
index a682aad52fc5..5c5c2b03c81f 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.h
+++ b/drivers/net/wireless/ath/ath11k/mhi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH11K_MHI_H
#define _ATH11K_MHI_H
@@ -18,7 +18,7 @@
#define MHICTRL_RESET_MASK 0x2
int ath11k_mhi_start(struct ath11k_pci *ar_pci);
-void ath11k_mhi_stop(struct ath11k_pci *ar_pci);
+void ath11k_mhi_stop(struct ath11k_pci *ar_pci, bool is_suspend);
int ath11k_mhi_register(struct ath11k_pci *ar_pci);
void ath11k_mhi_unregister(struct ath11k_pci *ar_pci);
void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab);
@@ -26,5 +26,6 @@ void ath11k_mhi_clear_vector(struct ath11k_base *ab);
int ath11k_mhi_suspend(struct ath11k_pci *ar_pci);
int ath11k_mhi_resume(struct ath11k_pci *ar_pci);
+void ath11k_mhi_coredump(struct mhi_controller *mhi_ctrl, bool in_panic);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index be9d2c69cc41..7114eca8810d 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -1,13 +1,15 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/pci.h>
#include <linux/of.h>
+#include <linux/time.h>
+#include <linux/vmalloc.h>
#include "pci.h"
#include "core.h"
@@ -35,7 +37,7 @@ static const struct pci_device_id ath11k_pci_id_table[] = {
{ PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) },
{ PCI_VDEVICE(QCOM, WCN6855_DEVICE_ID) },
{ PCI_VDEVICE(QCOM, QCN9074_DEVICE_ID) },
- {0}
+ {}
};
MODULE_DEVICE_TABLE(pci, ath11k_pci_id_table);
@@ -175,6 +177,19 @@ static inline void ath11k_pci_select_static_window(struct ath11k_pci *ab_pci)
ab_pci->ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
}
+static void ath11k_pci_restore_window(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+
+ spin_lock_bh(&ab_pci->window_lock);
+
+ iowrite32(ATH11K_PCI_WINDOW_ENABLE_BIT | ab_pci->register_window,
+ ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
+ ioread32(ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
+
+ spin_unlock_bh(&ab_pci->window_lock);
+}
+
static void ath11k_pci_soc_global_reset(struct ath11k_base *ab)
{
u32 val, delay;
@@ -199,6 +214,11 @@ static void ath11k_pci_soc_global_reset(struct ath11k_base *ab)
val = ath11k_pcic_read32(ab, PCIE_SOC_GLOBAL_RESET);
if (val == 0xffffffff)
ath11k_warn(ab, "link down error during global reset\n");
+
+ /* Restore window register as its content is cleared during
+ * hardware global reset, such that it aligns with host cache.
+ */
+ ath11k_pci_restore_window(ab);
}
static void ath11k_pci_clear_dbg_registers(struct ath11k_base *ab)
@@ -610,6 +630,187 @@ static void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci)
PCI_EXP_LNKCTL_ASPMC);
}
+#ifdef CONFIG_DEV_COREDUMP
+static int ath11k_pci_coredump_calculate_size(struct ath11k_base *ab, u32 *dump_seg_sz)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
+ struct image_info *rddm_img, *fw_img;
+ struct ath11k_tlv_dump_data *dump_tlv;
+ enum ath11k_fw_crash_dump_type mem_type;
+ u32 len = 0, rddm_tlv_sz = 0, paging_tlv_sz = 0;
+ struct ath11k_dump_file_data *file_data;
+ int i;
+
+ rddm_img = mhi_ctrl->rddm_image;
+ if (!rddm_img) {
+ ath11k_err(ab, "No RDDM dump found\n");
+ return 0;
+ }
+
+ fw_img = mhi_ctrl->fbc_image;
+
+ for (i = 0; i < fw_img->entries ; i++) {
+ if (!fw_img->mhi_buf[i].buf)
+ continue;
+
+ paging_tlv_sz += fw_img->mhi_buf[i].len;
+ }
+ dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA] = paging_tlv_sz;
+
+ for (i = 0; i < rddm_img->entries; i++) {
+ if (!rddm_img->mhi_buf[i].buf)
+ continue;
+
+ rddm_tlv_sz += rddm_img->mhi_buf[i].len;
+ }
+ dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA] = rddm_tlv_sz;
+
+ for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ mem_type = ath11k_coredump_get_dump_type(ab->qmi.target_mem[i].type);
+
+ if (mem_type == FW_CRASH_DUMP_NONE)
+ continue;
+
+ if (mem_type == FW_CRASH_DUMP_TYPE_MAX) {
+ ath11k_dbg(ab, ATH11K_DBG_PCI,
+ "target mem region type %d not supported",
+ ab->qmi.target_mem[i].type);
+ continue;
+ }
+
+ if (!ab->qmi.target_mem[i].anyaddr)
+ continue;
+
+ dump_seg_sz[mem_type] += ab->qmi.target_mem[i].size;
+ }
+
+ for (i = 0; i < FW_CRASH_DUMP_TYPE_MAX; i++) {
+ if (!dump_seg_sz[i])
+ continue;
+
+ len += sizeof(*dump_tlv) + dump_seg_sz[i];
+ }
+
+ if (len)
+ len += sizeof(*file_data);
+
+ return len;
+}
+
+static void ath11k_pci_coredump_download(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
+ struct image_info *rddm_img, *fw_img;
+ struct timespec64 timestamp;
+ int i, len, mem_idx;
+ enum ath11k_fw_crash_dump_type mem_type;
+ struct ath11k_dump_file_data *file_data;
+ struct ath11k_tlv_dump_data *dump_tlv;
+ size_t hdr_len = sizeof(*file_data);
+ void *buf;
+ u32 dump_seg_sz[FW_CRASH_DUMP_TYPE_MAX] = {};
+
+ ath11k_mhi_coredump(mhi_ctrl, false);
+
+ len = ath11k_pci_coredump_calculate_size(ab, dump_seg_sz);
+ if (!len) {
+ ath11k_warn(ab, "No crash dump data found for devcoredump");
+ return;
+ }
+
+ rddm_img = mhi_ctrl->rddm_image;
+ fw_img = mhi_ctrl->fbc_image;
+
+ /* dev_coredumpv() requires vmalloc data */
+ buf = vzalloc(len);
+ if (!buf)
+ return;
+
+ ab->dump_data = buf;
+ ab->ath11k_coredump_len = len;
+ file_data = ab->dump_data;
+ strscpy(file_data->df_magic, "ATH11K-FW-DUMP", sizeof(file_data->df_magic));
+ file_data->len = cpu_to_le32(len);
+ file_data->version = cpu_to_le32(ATH11K_FW_CRASH_DUMP_V2);
+ file_data->chip_id = cpu_to_le32(ab_pci->dev_id);
+ file_data->qrtr_id = cpu_to_le32(ab_pci->ab->qmi.service_ins_id);
+ file_data->bus_id = cpu_to_le32(pci_domain_nr(ab_pci->pdev->bus));
+ guid_gen(&file_data->guid);
+ ktime_get_real_ts64(&timestamp);
+ file_data->tv_sec = cpu_to_le64(timestamp.tv_sec);
+ file_data->tv_nsec = cpu_to_le64(timestamp.tv_nsec);
+ buf += hdr_len;
+ dump_tlv = buf;
+ dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_PAGING_DATA);
+ dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA]);
+ buf += COREDUMP_TLV_HDR_SIZE;
+
+ /* append all segments together as they are all part of a single contiguous
+ * block of memory
+ */
+ for (i = 0; i < fw_img->entries ; i++) {
+ if (!fw_img->mhi_buf[i].buf)
+ continue;
+
+ memcpy_fromio(buf, (void const __iomem *)fw_img->mhi_buf[i].buf,
+ fw_img->mhi_buf[i].len);
+ buf += fw_img->mhi_buf[i].len;
+ }
+
+ dump_tlv = buf;
+ dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_RDDM_DATA);
+ dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA]);
+ buf += COREDUMP_TLV_HDR_SIZE;
+
+ /* append all segments together as they are all part of a single contiguous
+ * block of memory
+ */
+ for (i = 0; i < rddm_img->entries; i++) {
+ if (!rddm_img->mhi_buf[i].buf)
+ continue;
+
+ memcpy_fromio(buf, (void const __iomem *)rddm_img->mhi_buf[i].buf,
+ rddm_img->mhi_buf[i].len);
+ buf += rddm_img->mhi_buf[i].len;
+ }
+
+ mem_idx = FW_CRASH_DUMP_REMOTE_MEM_DATA;
+ for (; mem_idx < FW_CRASH_DUMP_TYPE_MAX; mem_idx++) {
+ if (mem_idx == FW_CRASH_DUMP_NONE)
+ continue;
+
+ for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ mem_type = ath11k_coredump_get_dump_type
+ (ab->qmi.target_mem[i].type);
+
+ if (mem_type != mem_idx)
+ continue;
+
+ if (!ab->qmi.target_mem[i].anyaddr) {
+ ath11k_dbg(ab, ATH11K_DBG_PCI,
+ "Skipping mem region type %d",
+ ab->qmi.target_mem[i].type);
+ continue;
+ }
+
+ dump_tlv = buf;
+ dump_tlv->type = cpu_to_le32(mem_idx);
+ dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[mem_idx]);
+ buf += COREDUMP_TLV_HDR_SIZE;
+
+ memcpy_fromio(buf, ab->qmi.target_mem[i].iaddr,
+ ab->qmi.target_mem[i].size);
+
+ buf += ab->qmi.target_mem[i].size;
+ }
+ }
+
+ queue_work(ab->workqueue, &ab->dump_work);
+}
+#endif
+
static int ath11k_pci_power_up(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
@@ -638,7 +839,7 @@ static int ath11k_pci_power_up(struct ath11k_base *ab)
return 0;
}
-static void ath11k_pci_power_down(struct ath11k_base *ab)
+static void ath11k_pci_power_down(struct ath11k_base *ab, bool is_suspend)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
@@ -649,7 +850,7 @@ static void ath11k_pci_power_down(struct ath11k_base *ab)
ath11k_pci_msi_disable(ab_pci);
- ath11k_mhi_stop(ab_pci);
+ ath11k_mhi_stop(ab_pci, is_suspend);
clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
ath11k_pci_sw_reset(ab_pci->ab, false);
}
@@ -713,6 +914,9 @@ static const struct ath11k_hif_ops ath11k_pci_hif_ops = {
.ce_irq_enable = ath11k_pci_hif_ce_irq_enable,
.ce_irq_disable = ath11k_pci_hif_ce_irq_disable,
.get_ce_msi_idx = ath11k_pcic_get_ce_msi_idx,
+#ifdef CONFIG_DEV_COREDUMP
+ .coredump_download = ath11k_pci_coredump_download,
+#endif
};
static void ath11k_pci_read_hw_version(struct ath11k_base *ab, u32 *major, u32 *minor)
@@ -735,7 +939,7 @@ static int ath11k_pci_set_irq_affinity_hint(struct ath11k_pci *ab_pci,
if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab_pci->ab->dev_flags))
return 0;
- return irq_set_affinity_hint(ab_pci->pdev->irq, m);
+ return irq_set_affinity_and_hint(ab_pci->pdev->irq, m);
}
static int ath11k_pci_probe(struct pci_dev *pdev,
@@ -743,7 +947,7 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
{
struct ath11k_base *ab;
struct ath11k_pci *ab_pci;
- u32 soc_hw_version_major, soc_hw_version_minor, addr;
+ u32 soc_hw_version_major, soc_hw_version_minor;
int ret;
u32 sub_version;
@@ -769,8 +973,7 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
* from DT. If memory is reserved from DT for FW, ath11k driver need not
* allocate memory.
*/
- ret = of_property_read_u32(ab->dev->of_node, "memory-region", &addr);
- if (!ret)
+ if (of_property_present(ab->dev->of_node, "memory-region"))
set_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags);
ret = ath11k_pci_claim(ab_pci, pdev);
@@ -846,6 +1049,9 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
case 0x1019D0E1:
ab->hw_rev = ATH11K_HW_QCA2066_HW21;
break;
+ case 0x001e60e1:
+ ab->hw_rev = ATH11K_HW_QCA6698AQ_HW21;
+ break;
default:
ab->hw_rev = ATH11K_HW_WCN6855_HW21;
}
@@ -936,6 +1142,8 @@ unsupported_wcn6855_soc:
return 0;
err_free_irq:
+ /* __free_irq() expects the caller to have cleared the affinity hint */
+ ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
ath11k_pcic_free_irq(ab);
err_ce_free:
@@ -970,17 +1178,21 @@ static void ath11k_pci_remove(struct pci_dev *pdev)
ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
- ath11k_pci_power_down(ab);
+ ath11k_pci_power_down(ab, false);
ath11k_debugfs_soc_destroy(ab);
ath11k_qmi_deinit_service(ab);
+ ath11k_core_pm_notifier_unregister(ab);
goto qmi_fail;
}
set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
+ cancel_work_sync(&ab->reset_work);
+ cancel_work_sync(&ab->dump_work);
ath11k_core_deinit(ab);
qmi_fail:
+ ath11k_fw_destroy(ab);
ath11k_mhi_unregister(ab_pci);
ath11k_pcic_free_irq(ab);
@@ -998,7 +1210,7 @@ static void ath11k_pci_shutdown(struct pci_dev *pdev)
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
- ath11k_pci_power_down(ab);
+ ath11k_pci_power_down(ab, false);
}
static __maybe_unused int ath11k_pci_pm_suspend(struct device *dev)
@@ -1035,9 +1247,39 @@ static __maybe_unused int ath11k_pci_pm_resume(struct device *dev)
return ret;
}
-static SIMPLE_DEV_PM_OPS(ath11k_pci_pm_ops,
- ath11k_pci_pm_suspend,
- ath11k_pci_pm_resume);
+static __maybe_unused int ath11k_pci_pm_suspend_late(struct device *dev)
+{
+ struct ath11k_base *ab = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ath11k_core_suspend_late(ab);
+ if (ret)
+ ath11k_warn(ab, "failed to late suspend core: %d\n", ret);
+
+ /* Similar to ath11k_pci_pm_suspend(), we return success here
+ * even error happens, to allow system suspend/hibernation survive.
+ */
+ return 0;
+}
+
+static __maybe_unused int ath11k_pci_pm_resume_early(struct device *dev)
+{
+ struct ath11k_base *ab = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ath11k_core_resume_early(ab);
+ if (ret)
+ ath11k_warn(ab, "failed to early resume core: %d\n", ret);
+
+ return ret;
+}
+
+static const struct dev_pm_ops __maybe_unused ath11k_pci_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ath11k_pci_pm_suspend,
+ ath11k_pci_pm_resume)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(ath11k_pci_pm_suspend_late,
+ ath11k_pci_pm_resume_early)
+};
static struct pci_driver ath11k_pci_driver = {
.name = "ath11k_pci",
diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h
index c33c7865145c..1e3005a4b64c 100644
--- a/drivers/net/wireless/ath/ath11k/pci.h
+++ b/drivers/net/wireless/ath/ath11k/pci.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022,2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef _ATH11K_PCI_H
#define _ATH11K_PCI_H
@@ -35,18 +35,18 @@
#define PCIE_SMLH_REQ_RST_LINK_DOWN 0x2
#define PCIE_INT_CLEAR_ALL 0xffffffff
-#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(x) \
- (ab->hw_params.regs->pcie_qserdes_sysclk_en_sel)
+#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(ab) \
+ ((ab)->hw_params.regs->pcie_qserdes_sysclk_en_sel)
#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL 0x10
#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK 0xffffffff
-#define PCIE_PCS_OSC_DTCT_CONFIG1_REG(x) \
- (ab->hw_params.regs->pcie_pcs_osc_dtct_config_base)
+#define PCIE_PCS_OSC_DTCT_CONFIG1_REG(ab) \
+ ((ab)->hw_params.regs->pcie_pcs_osc_dtct_config_base)
#define PCIE_PCS_OSC_DTCT_CONFIG1_VAL 0x02
-#define PCIE_PCS_OSC_DTCT_CONFIG2_REG(x) \
- (ab->hw_params.regs->pcie_pcs_osc_dtct_config_base + 0x4)
+#define PCIE_PCS_OSC_DTCT_CONFIG2_REG(ab) \
+ ((ab)->hw_params.regs->pcie_pcs_osc_dtct_config_base + 0x4)
#define PCIE_PCS_OSC_DTCT_CONFIG2_VAL 0x52
-#define PCIE_PCS_OSC_DTCT_CONFIG4_REG(x) \
- (ab->hw_params.regs->pcie_pcs_osc_dtct_config_base + 0xc)
+#define PCIE_PCS_OSC_DTCT_CONFIG4_REG(ab) \
+ ((ab)->hw_params.regs->pcie_pcs_osc_dtct_config_base + 0xc)
#define PCIE_PCS_OSC_DTCT_CONFIG4_VAL 0xff
#define PCIE_PCS_OSC_DTCT_CONFIG_MSK 0x000000ff
diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c
index debe7c5919ef..fc6e7da05c60 100644
--- a/drivers/net/wireless/ath/ath11k/pcic.c
+++ b/drivers/net/wireless/ath/ath11k/pcic.c
@@ -1,9 +1,11 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
#include "core.h"
#include "pcic.h"
#include "debug.h"
@@ -126,6 +128,17 @@ static const struct ath11k_msi_config ath11k_msi_config[] = {
},
.hw_rev = ATH11K_HW_QCA2066_HW21,
},
+ {
+ .total_vectors = 32,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+ },
+ .hw_rev = ATH11K_HW_QCA6698AQ_HW21,
+ },
};
int ath11k_pcic_init_msi_config(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index f477afd325de..ff6a97e328b8 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -1,17 +1,19 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/elf.h>
+#include <linux/export.h>
#include "qmi.h"
#include "core.h"
#include "debug.h"
#include "hif.h"
#include <linux/of.h>
-#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/ioport.h>
#include <linux/firmware.h>
#include <linux/of_irq.h>
@@ -1704,7 +1706,9 @@ static const struct qmi_elem_info qmi_wlfw_fw_init_done_ind_msg_v01_ei[] = {
},
};
-static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
{
struct qmi_wlanfw_host_cap_req_msg_v01 req;
struct qmi_wlanfw_host_cap_resp_msg_v01 resp;
@@ -1955,13 +1959,15 @@ static void ath11k_qmi_free_target_mem_chunk(struct ath11k_base *ab)
int i;
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
- if ((ab->hw_params.fixed_mem_region ||
- test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) &&
- ab->qmi.target_mem[i].iaddr)
- iounmap(ab->qmi.target_mem[i].iaddr);
+ if (!ab->qmi.target_mem[i].anyaddr)
+ continue;
- if (!ab->qmi.target_mem[i].vaddr)
+ if (ab->hw_params.fixed_mem_region ||
+ test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) {
+ iounmap(ab->qmi.target_mem[i].iaddr);
+ ab->qmi.target_mem[i].iaddr = NULL;
continue;
+ }
dma_free_coherent(ab->dev,
ab->qmi.target_mem[i].prev_size,
@@ -1989,6 +1995,15 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
chunk->prev_size == chunk->size)
continue;
+ if (ab->qmi.mem_seg_count <= ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT) {
+ ath11k_dbg(ab, ATH11K_DBG_QMI,
+ "size/type mismatch (current %d %u) (prev %d %u), try later with small size\n",
+ chunk->size, chunk->type,
+ chunk->prev_size, chunk->prev_type);
+ ab->qmi.target_mem_delayed = true;
+ return 0;
+ }
+
/* cannot reuse the existing chunk */
dma_free_coherent(ab->dev, chunk->prev_size,
chunk->vaddr, chunk->paddr);
@@ -2025,23 +2040,14 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
{
struct device *dev = ab->dev;
- struct device_node *hremote_node = NULL;
- struct resource res;
+ struct resource res = {};
u32 host_ddr_sz;
int i, idx, ret;
for (i = 0, idx = 0; i < ab->qmi.mem_seg_count; i++) {
switch (ab->qmi.target_mem[i].type) {
case HOST_DDR_REGION_TYPE:
- hremote_node = of_parse_phandle(dev->of_node, "memory-region", 0);
- if (!hremote_node) {
- ath11k_dbg(ab, ATH11K_DBG_QMI,
- "fail to get hremote_node\n");
- return -ENODEV;
- }
-
- ret = of_address_to_resource(hremote_node, 0, &res);
- of_node_put(hremote_node);
+ ret = of_reserved_mem_region_to_resource(dev->of_node, 0, &res);
if (ret) {
ath11k_dbg(ab, ATH11K_DBG_QMI,
"fail to get reg from hremote\n");
@@ -2068,7 +2074,7 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
break;
case BDF_MEM_REGION_TYPE:
ab->qmi.target_mem[idx].paddr = ab->hw_params.bdf_addr;
- ab->qmi.target_mem[idx].vaddr = NULL;
+ ab->qmi.target_mem[idx].iaddr = NULL;
ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
idx++;
@@ -2080,7 +2086,7 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
}
if (ath11k_core_coldboot_cal_support(ab)) {
- if (hremote_node) {
+ if (resource_size(&res)) {
ab->qmi.target_mem[idx].paddr =
res.start + host_ddr_sz;
ab->qmi.target_mem[idx].iaddr =
@@ -2091,10 +2097,11 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
} else {
ab->qmi.target_mem[idx].paddr =
ATH11K_QMI_CALDB_ADDRESS;
+ ab->qmi.target_mem[idx].iaddr = NULL;
}
} else {
ab->qmi.target_mem[idx].paddr = 0;
- ab->qmi.target_mem[idx].vaddr = NULL;
+ ab->qmi.target_mem[idx].iaddr = NULL;
}
ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
@@ -2180,6 +2187,9 @@ static int ath11k_qmi_request_device_info(struct ath11k_base *ab)
ab->mem = bar_addr_va;
ab->mem_len = resp.bar_size;
+ if (!ab->hw_params.ce_remap)
+ ab->mem_ce = ab->mem;
+
return 0;
out:
return ret;
@@ -2538,7 +2548,7 @@ static int ath11k_qmi_m3_load(struct ath11k_base *ab)
GFP_KERNEL);
if (!m3_mem->vaddr) {
ath11k_err(ab, "failed to allocate memory for M3 with size %zu\n",
- fw->size);
+ m3_len);
ret = -ENOMEM;
goto out;
}
@@ -2567,7 +2577,9 @@ static void ath11k_qmi_m3_free(struct ath11k_base *ab)
m3_mem->size = 0;
}
-static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
struct qmi_wlanfw_m3_info_req_msg_v01 req;
@@ -2877,7 +2889,7 @@ int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab)
}
/* reset the firmware */
- ath11k_hif_power_down(ab);
+ ath11k_hif_power_down(ab, false);
ath11k_hif_power_up(ab);
ath11k_dbg(ab, ATH11K_DBG_QMI, "exit wait for cold boot done\n");
return 0;
@@ -3165,7 +3177,7 @@ static int ath11k_qmi_ops_new_server(struct qmi_handle *qmi_hdl,
sq->sq_node = service->node;
sq->sq_port = service->port;
- ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)sq,
+ ret = kernel_connect(qmi_hdl->sock, (struct sockaddr_unsized *)sq,
sizeof(*sq), 0);
if (ret) {
ath11k_warn(ab, "failed to connect to qmi remote service: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
index 7e06d100af57..7968ab122b65 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.h
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_QMI_H
@@ -102,8 +102,11 @@ struct target_mem_chunk {
u32 prev_size;
u32 prev_type;
dma_addr_t paddr;
- u32 *vaddr;
- void __iomem *iaddr;
+ union {
+ u32 *vaddr;
+ void __iomem *iaddr;
+ void *anyaddr;
+ };
};
struct target_info {
@@ -154,6 +157,7 @@ struct ath11k_qmi {
#define BDF_MEM_REGION_TYPE 0x2
#define M3_DUMP_REGION_TYPE 0x3
#define CALDB_MEM_REGION_TYPE 0x4
+#define PAGEABLE_MEM_REGION_TYPE 0x9
struct qmi_wlanfw_host_cap_req_msg_v01 {
u8 num_clients_valid;
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
index b0f289784dd3..d62a2014315a 100644
--- a/drivers/net/wireless/ath/ath11k/reg.c
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/rtnetlink.h>
@@ -55,6 +55,19 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"Regulatory Notification received for %s\n", wiphy_name(wiphy));
+ if (request->initiator == NL80211_REGDOM_SET_BY_DRIVER) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "driver initiated regd update\n");
+ if (ar->state != ATH11K_STATE_ON)
+ return;
+
+ ret = ath11k_reg_update_chan_list(ar, true);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to update channel list: %d\n", ret);
+
+ return;
+ }
+
/* Currently supporting only General User Hints. Cell base user
* hints to be handled later.
* Hints from other sources like Core, Beacons are not expected for
@@ -111,32 +124,7 @@ int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait)
struct channel_param *ch;
enum nl80211_band band;
int num_channels = 0;
- int i, ret, left;
-
- if (wait && ar->state_11d != ATH11K_11D_IDLE) {
- left = wait_for_completion_timeout(&ar->completed_11d_scan,
- ATH11K_SCAN_TIMEOUT_HZ);
- if (!left) {
- ath11k_dbg(ar->ab, ATH11K_DBG_REG,
- "failed to receive 11d scan complete: timed out\n");
- ar->state_11d = ATH11K_11D_IDLE;
- }
- ath11k_dbg(ar->ab, ATH11K_DBG_REG,
- "11d scan wait left time %d\n", left);
- }
-
- if (wait &&
- (ar->scan.state == ATH11K_SCAN_STARTING ||
- ar->scan.state == ATH11K_SCAN_RUNNING)) {
- left = wait_for_completion_timeout(&ar->scan.completed,
- ATH11K_SCAN_TIMEOUT_HZ);
- if (!left)
- ath11k_dbg(ar->ab, ATH11K_DBG_REG,
- "failed to receive hw scan complete: timed out\n");
-
- ath11k_dbg(ar->ab, ATH11K_DBG_REG,
- "hw scan wait left time %d\n", left);
- }
+ int i, ret = 0;
if (ar->state == ATH11K_STATE_RESTARTING)
return 0;
@@ -218,6 +206,16 @@ int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait)
}
}
+ if (wait) {
+ spin_lock_bh(&ar->data_lock);
+ list_add_tail(&params->list, &ar->channel_update_queue);
+ spin_unlock_bh(&ar->data_lock);
+
+ queue_work(ar->ab->workqueue, &ar->channel_update_work);
+
+ return 0;
+ }
+
ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params);
kfree(params);
@@ -293,12 +291,6 @@ int ath11k_regd_update(struct ath11k *ar)
if (ret)
goto err;
- if (ar->state == ATH11K_STATE_ON) {
- ret = ath11k_reg_update_chan_list(ar, true);
- if (ret)
- goto err;
- }
-
return 0;
err:
ath11k_warn(ab, "failed to perform regd update : %d\n", ret);
@@ -804,6 +796,54 @@ ret:
return new_regd;
}
+void ath11k_regd_update_chan_list_work(struct work_struct *work)
+{
+ struct ath11k *ar = container_of(work, struct ath11k,
+ channel_update_work);
+ struct scan_chan_list_params *params;
+ struct list_head local_update_list;
+ int left;
+
+ INIT_LIST_HEAD(&local_update_list);
+
+ spin_lock_bh(&ar->data_lock);
+ list_splice_tail_init(&ar->channel_update_queue, &local_update_list);
+ spin_unlock_bh(&ar->data_lock);
+
+ while ((params = list_first_entry_or_null(&local_update_list,
+ struct scan_chan_list_params,
+ list))) {
+ if (ar->state_11d != ATH11K_11D_IDLE) {
+ left = wait_for_completion_timeout(&ar->completed_11d_scan,
+ ATH11K_SCAN_TIMEOUT_HZ);
+ if (!left) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "failed to receive 11d scan complete: timed out\n");
+ ar->state_11d = ATH11K_11D_IDLE;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "reg 11d scan wait left time %d\n", left);
+ }
+
+ if ((ar->scan.state == ATH11K_SCAN_STARTING ||
+ ar->scan.state == ATH11K_SCAN_RUNNING)) {
+ left = wait_for_completion_timeout(&ar->scan.completed,
+ ATH11K_SCAN_TIMEOUT_HZ);
+ if (!left)
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "failed to receive hw scan complete: timed out\n");
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "reg hw scan wait left time %d\n", left);
+ }
+
+ ath11k_wmi_send_scan_chan_list_cmd(ar, params);
+ list_del(&params->list);
+ kfree(params);
+ }
+}
+
static bool ath11k_reg_is_world_alpha(char *alpha)
{
if (alpha[0] == '0' && alpha[1] == '0')
@@ -977,6 +1017,7 @@ void ath11k_regd_update_work(struct work_struct *work)
void ath11k_reg_init(struct ath11k *ar)
{
ar->hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
+ ar->hw->wiphy->flags |= WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER;
ar->hw->wiphy->reg_notifier = ath11k_reg_notifier;
}
diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h
index 263ea9061948..72b483594015 100644
--- a/drivers/net/wireless/ath/ath11k/reg.h
+++ b/drivers/net/wireless/ath/ath11k/reg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_REG_H
@@ -33,6 +33,7 @@ void ath11k_reg_init(struct ath11k *ar);
void ath11k_reg_reset_info(struct cur_regulatory_info *reg_info);
void ath11k_reg_free(struct ath11k_base *ab);
void ath11k_regd_update_work(struct work_struct *work);
+void ath11k_regd_update_chan_list_work(struct work_struct *work);
struct ieee80211_regdomain *
ath11k_reg_build_regd(struct ath11k_base *ab,
struct cur_regulatory_info *reg_info, bool intersect,
diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c
index 79e091134515..b6b0516819a6 100644
--- a/drivers/net/wireless/ath/ath11k/spectral.c
+++ b/drivers/net/wireless/ath/ath11k/spectral.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/relay.h>
@@ -205,7 +206,7 @@ static int ath11k_spectral_scan_trigger(struct ath11k *ar)
static int ath11k_spectral_scan_config(struct ath11k *ar,
enum ath11k_spectral_mode mode)
{
- struct ath11k_wmi_vdev_spectral_conf_param param = { 0 };
+ struct ath11k_wmi_vdev_spectral_conf_param param = {};
struct ath11k_vif *arvif;
int ret, count;
diff --git a/drivers/net/wireless/ath/ath11k/testmode.c b/drivers/net/wireless/ath/ath11k/testmode.c
index 302d66092b97..a9751ea2a0b7 100644
--- a/drivers/net/wireless/ath/ath11k/testmode.c
+++ b/drivers/net/wireless/ath/ath11k/testmode.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "testmode.h"
@@ -10,18 +10,18 @@
#include "wmi.h"
#include "hw.h"
#include "core.h"
-#include "testmode_i.h"
+#include "../testmode_i.h"
#define ATH11K_FTM_SEGHDR_CURRENT_SEQ GENMASK(3, 0)
#define ATH11K_FTM_SEGHDR_TOTAL_SEGMENTS GENMASK(7, 4)
-static const struct nla_policy ath11k_tm_policy[ATH11K_TM_ATTR_MAX + 1] = {
- [ATH11K_TM_ATTR_CMD] = { .type = NLA_U32 },
- [ATH11K_TM_ATTR_DATA] = { .type = NLA_BINARY,
- .len = ATH11K_TM_DATA_MAX_LEN },
- [ATH11K_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 },
- [ATH11K_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 },
- [ATH11K_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
+static const struct nla_policy ath11k_tm_policy[ATH_TM_ATTR_MAX + 1] = {
+ [ATH_TM_ATTR_CMD] = { .type = NLA_U32 },
+ [ATH_TM_ATTR_DATA] = { .type = NLA_BINARY,
+ .len = ATH_TM_DATA_MAX_LEN },
+ [ATH_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 },
+ [ATH_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 },
+ [ATH_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
};
static struct ath11k *ath11k_tm_get_ar(struct ath11k_base *ab)
@@ -73,9 +73,9 @@ static void ath11k_tm_wmi_event_unsegmented(struct ath11k_base *ab, u32 cmd_id,
goto out;
}
- if (nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD, ATH11K_TM_CMD_WMI) ||
- nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id) ||
- nla_put(nl_skb, ATH11K_TM_ATTR_DATA, skb->len, skb->data)) {
+ if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD, ATH_TM_CMD_WMI) ||
+ nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) ||
+ nla_put(nl_skb, ATH_TM_ATTR_DATA, skb->len, skb->data)) {
ath11k_warn(ab, "failed to populate testmode unsegmented event\n");
kfree_skb(nl_skb);
goto out;
@@ -107,7 +107,7 @@ static int ath11k_tm_process_event(struct ath11k_base *ab, u32 cmd_id,
u32 pdev_id;
ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
- "event wmi cmd_id %d ftm event msg %pK datalen %d\n",
+ "event wmi cmd_id %d ftm event msg %p datalen %d\n",
cmd_id, ftm_msg, length);
ath11k_dbg_dump(ab, ATH11K_DBG_TESTMODE, NULL, "", ftm_msg, length);
pdev_id = DP_HW2SW_MACID(ftm_msg->seg_hdr.pdev_id);
@@ -140,7 +140,7 @@ static int ath11k_tm_process_event(struct ath11k_base *ab, u32 cmd_id,
data_pos = ab->testmode.data_pos;
- if ((data_pos + datalen) > ATH11K_FTM_EVENT_MAX_BUF_LENGTH) {
+ if ((data_pos + datalen) > ATH_FTM_EVENT_MAX_BUF_LENGTH) {
ath11k_warn(ab, "Invalid ftm event length at %d: %d\n",
data_pos, datalen);
ret = -EINVAL;
@@ -172,10 +172,10 @@ static int ath11k_tm_process_event(struct ath11k_base *ab, u32 cmd_id,
goto out;
}
- if (nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD,
- ATH11K_TM_CMD_WMI_FTM) ||
- nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id) ||
- nla_put(nl_skb, ATH11K_TM_ATTR_DATA, data_pos,
+ if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD,
+ ATH_TM_CMD_WMI_FTM) ||
+ nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) ||
+ nla_put(nl_skb, ATH_TM_ATTR_DATA, data_pos,
&ab->testmode.eventdata[0])) {
ath11k_warn(ab, "failed to populate segmented testmode event");
kfree_skb(nl_skb);
@@ -235,23 +235,23 @@ static int ath11k_tm_cmd_get_version(struct ath11k *ar, struct nlattr *tb[])
ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
"cmd get version_major %d version_minor %d\n",
- ATH11K_TESTMODE_VERSION_MAJOR,
- ATH11K_TESTMODE_VERSION_MINOR);
+ ATH_TESTMODE_VERSION_MAJOR,
+ ATH_TESTMODE_VERSION_MINOR);
skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy,
nla_total_size(sizeof(u32)));
if (!skb)
return -ENOMEM;
- ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MAJOR,
- ATH11K_TESTMODE_VERSION_MAJOR);
+ ret = nla_put_u32(skb, ATH_TM_ATTR_VERSION_MAJOR,
+ ATH_TESTMODE_VERSION_MAJOR);
if (ret) {
kfree_skb(skb);
return ret;
}
- ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MINOR,
- ATH11K_TESTMODE_VERSION_MINOR);
+ ret = nla_put_u32(skb, ATH_TM_ATTR_VERSION_MINOR,
+ ATH_TESTMODE_VERSION_MINOR);
if (ret) {
kfree_skb(skb);
return ret;
@@ -277,7 +277,7 @@ static int ath11k_tm_cmd_testmode_start(struct ath11k *ar, struct nlattr *tb[])
goto err;
}
- ar->ab->testmode.eventdata = kzalloc(ATH11K_FTM_EVENT_MAX_BUF_LENGTH,
+ ar->ab->testmode.eventdata = kzalloc(ATH_FTM_EVENT_MAX_BUF_LENGTH,
GFP_KERNEL);
if (!ar->ab->testmode.eventdata) {
ret = -ENOMEM;
@@ -310,25 +310,25 @@ static int ath11k_tm_cmd_wmi(struct ath11k *ar, struct nlattr *tb[],
mutex_lock(&ar->conf_mutex);
- if (!tb[ATH11K_TM_ATTR_DATA]) {
+ if (!tb[ATH_TM_ATTR_DATA]) {
ret = -EINVAL;
goto out;
}
- if (!tb[ATH11K_TM_ATTR_WMI_CMDID]) {
+ if (!tb[ATH_TM_ATTR_WMI_CMDID]) {
ret = -EINVAL;
goto out;
}
- buf = nla_data(tb[ATH11K_TM_ATTR_DATA]);
- buf_len = nla_len(tb[ATH11K_TM_ATTR_DATA]);
+ buf = nla_data(tb[ATH_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[ATH_TM_ATTR_DATA]);
if (!buf_len) {
ath11k_warn(ar->ab, "No data present in testmode wmi command\n");
ret = -EINVAL;
goto out;
}
- cmd_id = nla_get_u32(tb[ATH11K_TM_ATTR_WMI_CMDID]);
+ cmd_id = nla_get_u32(tb[ATH_TM_ATTR_WMI_CMDID]);
/* Make sure that the buffer length is long enough to
* hold TLV and pdev/vdev id.
@@ -409,13 +409,13 @@ static int ath11k_tm_cmd_wmi_ftm(struct ath11k *ar, struct nlattr *tb[])
goto out;
}
- if (!tb[ATH11K_TM_ATTR_DATA]) {
+ if (!tb[ATH_TM_ATTR_DATA]) {
ret = -EINVAL;
goto out;
}
- buf = nla_data(tb[ATH11K_TM_ATTR_DATA]);
- buf_len = nla_len(tb[ATH11K_TM_ATTR_DATA]);
+ buf = nla_data(tb[ATH_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[ATH_TM_ATTR_DATA]);
cmd_id = WMI_PDEV_UTF_CMDID;
ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
@@ -476,25 +476,25 @@ int ath11k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len)
{
struct ath11k *ar = hw->priv;
- struct nlattr *tb[ATH11K_TM_ATTR_MAX + 1];
+ struct nlattr *tb[ATH_TM_ATTR_MAX + 1];
int ret;
- ret = nla_parse(tb, ATH11K_TM_ATTR_MAX, data, len, ath11k_tm_policy,
+ ret = nla_parse(tb, ATH_TM_ATTR_MAX, data, len, ath11k_tm_policy,
NULL);
if (ret)
return ret;
- if (!tb[ATH11K_TM_ATTR_CMD])
+ if (!tb[ATH_TM_ATTR_CMD])
return -EINVAL;
- switch (nla_get_u32(tb[ATH11K_TM_ATTR_CMD])) {
- case ATH11K_TM_CMD_GET_VERSION:
+ switch (nla_get_u32(tb[ATH_TM_ATTR_CMD])) {
+ case ATH_TM_CMD_GET_VERSION:
return ath11k_tm_cmd_get_version(ar, tb);
- case ATH11K_TM_CMD_WMI:
+ case ATH_TM_CMD_WMI:
return ath11k_tm_cmd_wmi(ar, tb, vif);
- case ATH11K_TM_CMD_TESTMODE_START:
+ case ATH_TM_CMD_TESTMODE_START:
return ath11k_tm_cmd_testmode_start(ar, tb);
- case ATH11K_TM_CMD_WMI_FTM:
+ case ATH_TM_CMD_WMI_FTM:
return ath11k_tm_cmd_wmi_ftm(ar, tb);
default:
return -EOPNOTSUPP;
diff --git a/drivers/net/wireless/ath/ath11k/trace.c b/drivers/net/wireless/ath/ath11k/trace.c
index 6620650d7845..44ff8e9eff5d 100644
--- a/drivers/net/wireless/ath/ath11k/trace.c
+++ b/drivers/net/wireless/ath/ath11k/trace.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
#include <linux/module.h>
#define CREATE_TRACE_POINTS
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 87abfa547529..110035dae8a6 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
@@ -2061,10 +2061,13 @@ int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar,
cmd->peer_bw_rxnss_override |= param->peer_bw_rxnss_override;
if (param->vht_capable) {
- mcs->rx_max_rate = param->rx_max_rate;
- mcs->rx_mcs_set = param->rx_mcs_set;
- mcs->tx_max_rate = param->tx_max_rate;
- mcs->tx_mcs_set = param->tx_mcs_set;
+ /* firmware interprets mcs->tx_mcs_set field as peer's
+ * RX capability
+ */
+ mcs->tx_max_rate = param->rx_max_rate;
+ mcs->tx_mcs_set = param->rx_mcs_set;
+ mcs->rx_max_rate = param->tx_max_rate;
+ mcs->rx_mcs_set = param->tx_mcs_set;
}
/* HE Rates */
@@ -2088,8 +2091,11 @@ int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar,
FIELD_PREP(WMI_TLV_LEN,
sizeof(*he_mcs) - TLV_HDR_SIZE);
- he_mcs->rx_mcs_set = param->peer_he_tx_mcs_set[i];
- he_mcs->tx_mcs_set = param->peer_he_rx_mcs_set[i];
+ /* firmware interprets mcs->rx_mcs_set field as peer's
+ * RX capability
+ */
+ he_mcs->rx_mcs_set = param->peer_he_rx_mcs_set[i];
+ he_mcs->tx_mcs_set = param->peer_he_tx_mcs_set[i];
ptr += sizeof(*he_mcs);
}
@@ -2662,7 +2668,8 @@ int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
}
int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
- struct wmi_wmm_params_all_arg *param)
+ struct wmi_wmm_params_all_arg *param,
+ enum wmi_wmm_params_type wmm_param_type)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_vdev_set_wmm_params_cmd *cmd;
@@ -2681,7 +2688,7 @@ int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
- cmd->wmm_param_type = 0;
+ cmd->wmm_param_type = wmm_param_type;
for (ac = 0; ac < WME_NUM_AC; ac++) {
switch (ac) {
@@ -2714,8 +2721,8 @@ int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
wmm_param->no_ack = wmi_wmm_arg->no_ack;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
- "wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
- ac, wmm_param->aifs, wmm_param->cwmin,
+ "wmm set type %d ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
+ wmm_param_type, ac, wmm_param->aifs, wmm_param->cwmin,
wmm_param->cwmax, wmm_param->txoplimit,
wmm_param->acm, wmm_param->no_ack);
}
@@ -5960,6 +5967,9 @@ static int wmi_process_mgmt_tx_comp(struct ath11k *ar,
dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
info = IEEE80211_SKB_CB(msdu);
+ memset(&info->status, 0, sizeof(info->status));
+ info->status.rates[0].idx = -1;
+
if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) &&
!tx_compl_param->status) {
info->flags |= IEEE80211_TX_STAT_ACK;
@@ -7541,7 +7551,7 @@ static void ath11k_vdev_stopped_event(struct ath11k_base *ab, struct sk_buff *sk
static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb)
{
- struct mgmt_rx_event_params rx_ev = {0};
+ struct mgmt_rx_event_params rx_ev = {};
struct ath11k *ar;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr;
@@ -7656,7 +7666,7 @@ exit:
static void ath11k_mgmt_tx_compl_event(struct ath11k_base *ab, struct sk_buff *skb)
{
- struct wmi_mgmt_tx_compl_event tx_compl_param = {0};
+ struct wmi_mgmt_tx_compl_event tx_compl_param = {};
struct ath11k *ar;
if (ath11k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) {
@@ -7711,7 +7721,7 @@ static struct ath11k *ath11k_get_ar_on_scan_state(struct ath11k_base *ab,
static void ath11k_scan_event(struct ath11k_base *ab, struct sk_buff *skb)
{
struct ath11k *ar;
- struct wmi_scan_event scan_ev = {0};
+ struct wmi_scan_event scan_ev = {};
if (ath11k_pull_scan_ev(ab, skb, &scan_ev) != 0) {
ath11k_warn(ab, "failed to extract scan event");
@@ -7883,7 +7893,7 @@ static void ath11k_roam_event(struct ath11k_base *ab, struct sk_buff *skb)
static void ath11k_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb)
{
- struct wmi_chan_info_event ch_info_ev = {0};
+ struct wmi_chan_info_event ch_info_ev = {};
struct ath11k *ar;
struct survey_info *survey;
int idx;
@@ -8030,7 +8040,7 @@ exit:
static void ath11k_vdev_install_key_compl_event(struct ath11k_base *ab,
struct sk_buff *skb)
{
- struct wmi_vdev_install_key_complete_arg install_key_compl = {0};
+ struct wmi_vdev_install_key_complete_arg install_key_compl = {};
struct ath11k *ar;
if (ath11k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) {
@@ -8128,7 +8138,7 @@ static void ath11k_service_available_event(struct ath11k_base *ab, struct sk_buf
static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff *skb)
{
- struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
+ struct wmi_peer_assoc_conf_arg peer_assoc_conf = {};
struct ath11k *ar;
if (ath11k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) {
@@ -8157,6 +8167,11 @@ static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff
static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *skb)
{
struct ath11k_fw_stats stats = {};
+ size_t total_vdevs_started = 0;
+ struct ath11k_pdev *pdev;
+ bool is_end = true;
+ int i;
+
struct ath11k *ar;
int ret;
@@ -8183,25 +8198,57 @@ static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *sk
spin_lock_bh(&ar->data_lock);
- /* WMI_REQUEST_PDEV_STAT can be requested via .get_txpower mac ops or via
+ /* WMI_REQUEST_PDEV_STAT, WMI_REQUEST_VDEV_STAT and
+ * WMI_REQUEST_RSSI_PER_CHAIN_STAT can be requested via mac ops or via
* debugfs fw stats. Therefore, processing it separately.
*/
if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs);
- ar->fw_stats_done = true;
+ complete(&ar->fw_stats_done);
+ goto complete;
+ }
+
+ if (stats.stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) {
+ complete(&ar->fw_stats_done);
+ goto complete;
+ }
+
+ if (stats.stats_id == WMI_REQUEST_VDEV_STAT) {
+ if (list_empty(&stats.vdevs)) {
+ ath11k_warn(ab, "empty vdev stats");
+ goto complete;
+ }
+ /* FW sends all the active VDEV stats irrespective of PDEV,
+ * hence limit until the count of all VDEVs started
+ */
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar)
+ total_vdevs_started += ar->num_started_vdevs;
+ }
+
+ if (total_vdevs_started)
+ is_end = ((++ar->fw_stats.num_vdev_recvd) ==
+ total_vdevs_started);
+
+ list_splice_tail_init(&stats.vdevs,
+ &ar->fw_stats.vdevs);
+
+ if (is_end)
+ complete(&ar->fw_stats_done);
+
goto complete;
}
- /* WMI_REQUEST_VDEV_STAT, WMI_REQUEST_BCN_STAT and WMI_REQUEST_RSSI_PER_CHAIN_STAT
- * are currently requested only via debugfs fw stats. Hence, processing these
- * in debugfs context
+ /* WMI_REQUEST_BCN_STAT is currently requested only via debugfs fw stats.
+ * Hence, processing it in debugfs context
*/
ath11k_debugfs_fw_stats_process(ar, &stats);
complete:
complete(&ar->fw_stats_complete);
- rcu_read_unlock();
spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
/* Since the stats's pdev, vdev and beacon list are spliced and reinitialised
* at this point, no need to free the individual list.
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index 8982b909c821..0f0de24a3840 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef ATH11K_WMI_H
@@ -3463,20 +3463,6 @@ struct scan_cancel_param {
u32 pdev_id;
};
-struct wmi_bcn_send_from_host_cmd {
- u32 tlv_header;
- u32 vdev_id;
- u32 data_len;
- union {
- u32 frag_ptr;
- u32 frag_ptr_lo;
- };
- u32 frame_ctrl;
- u32 dtim_flag;
- u32 bcn_antenna;
- u32 frag_ptr_hi;
-};
-
#define WMI_CHAN_INFO_MODE GENMASK(5, 0)
#define WMI_CHAN_INFO_HT40_PLUS BIT(6)
#define WMI_CHAN_INFO_PASSIVE BIT(7)
@@ -3817,6 +3803,7 @@ struct wmi_stop_scan_cmd {
};
struct scan_chan_list_params {
+ struct list_head list;
u32 pdev_id;
u16 nallchans;
struct channel_param ch_param[];
@@ -4132,8 +4119,10 @@ struct wmi_rate_set {
struct wmi_vht_rate_set {
u32 tlv_header;
u32 rx_max_rate;
+ /* MCS at which the peer can transmit */
u32 rx_mcs_set;
u32 tx_max_rate;
+ /* MCS at which the peer can receive */
u32 tx_mcs_set;
u32 tx_max_mcs_nss;
} __packed;
@@ -6346,6 +6335,11 @@ enum wmi_sta_keepalive_method {
#define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT 30
#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0
+enum wmi_wmm_params_type {
+ WMI_WMM_PARAM_TYPE_LEGACY = 0,
+ WMI_WMM_PARAM_TYPE_11AX_MU_EDCA = 1,
+};
+
const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab,
struct sk_buff *skb, gfp_t gfp);
int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
@@ -6402,7 +6396,8 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar,
struct scan_cancel_param *param);
int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
- struct wmi_wmm_params_all_arg *param);
+ struct wmi_wmm_params_all_arg *param,
+ enum wmi_wmm_params_type wmm_param_type);
int ath11k_wmi_pdev_suspend(struct ath11k *ar, u32 suspend_opt,
u32 pdev_id);
int ath11k_wmi_pdev_resume(struct ath11k *ar, u32 pdev_id);
diff --git a/drivers/net/wireless/ath/ath11k/wow.c b/drivers/net/wireless/ath/ath11k/wow.c
index 99d8ba45a75b..b6f08755129f 100644
--- a/drivers/net/wireless/ath/ath11k/wow.c
+++ b/drivers/net/wireless/ath/ath11k/wow.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/delay.h>
@@ -148,13 +148,16 @@ static int ath11k_wow_cleanup(struct ath11k *ar)
* 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... |
* +--+------------+----+-----------+---------------+-----------+
*/
-static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
- const struct cfg80211_pkt_pattern *old)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
+ const struct cfg80211_pkt_pattern *old)
{
u8 hdr_8023_pattern[ETH_HLEN] = {};
u8 hdr_8023_bit_mask[ETH_HLEN] = {};
u8 hdr_80211_pattern[WOW_HDR_LEN] = {};
u8 hdr_80211_bit_mask[WOW_HDR_LEN] = {};
+ u8 bytemask[WOW_MAX_PATTERN_SIZE] = {};
int total_len = old->pkt_offset + old->pattern_len;
int hdr_80211_end_offset;
@@ -172,11 +175,17 @@ static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
struct rfc1042_hdr *new_rfc_mask =
(struct rfc1042_hdr *)(hdr_80211_bit_mask + hdr_len);
int rfc_len = sizeof(*new_rfc_pattern);
+ int i;
+
+ /* convert bitmask to bytemask */
+ for (i = 0; i < old->pattern_len; i++)
+ if (old->mask[i / 8] & BIT(i % 8))
+ bytemask[i] = 0xff;
memcpy(hdr_8023_pattern + old->pkt_offset,
old->pattern, ETH_HLEN - old->pkt_offset);
memcpy(hdr_8023_bit_mask + old->pkt_offset,
- old->mask, ETH_HLEN - old->pkt_offset);
+ bytemask, ETH_HLEN - old->pkt_offset);
/* Copy destination address */
memcpy(new_hdr_pattern->addr1, old_hdr_pattern->h_dest, ETH_ALEN);
@@ -232,7 +241,7 @@ static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
(void *)old->pattern + ETH_HLEN - old->pkt_offset,
total_len - ETH_HLEN);
memcpy((u8 *)new->mask + new->pattern_len,
- (void *)old->mask + ETH_HLEN - old->pkt_offset,
+ bytemask + ETH_HLEN - old->pkt_offset,
total_len - ETH_HLEN);
new->pattern_len += total_len - ETH_HLEN;
@@ -393,35 +402,31 @@ static int ath11k_vif_wow_set_wakeups(struct ath11k_vif *arvif,
}
for (i = 0; i < wowlan->n_patterns; i++) {
- u8 bitmask[WOW_MAX_PATTERN_SIZE] = {};
u8 ath_pattern[WOW_MAX_PATTERN_SIZE] = {};
u8 ath_bitmask[WOW_MAX_PATTERN_SIZE] = {};
struct cfg80211_pkt_pattern new_pattern = {};
- struct cfg80211_pkt_pattern old_pattern = patterns[i];
- int j;
new_pattern.pattern = ath_pattern;
new_pattern.mask = ath_bitmask;
if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
continue;
- /* convert bytemask to bitmask */
- for (j = 0; j < patterns[i].pattern_len; j++)
- if (patterns[i].mask[j / 8] & BIT(j % 8))
- bitmask[j] = 0xff;
- old_pattern.mask = bitmask;
if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
ATH11K_HW_TXRX_NATIVE_WIFI) {
if (patterns[i].pkt_offset < ETH_HLEN) {
- u8 pattern_ext[WOW_MAX_PATTERN_SIZE] = {};
-
- memcpy(pattern_ext, old_pattern.pattern,
- old_pattern.pattern_len);
- old_pattern.pattern = pattern_ext;
ath11k_wow_convert_8023_to_80211(&new_pattern,
- &old_pattern);
+ &patterns[i]);
} else {
- new_pattern = old_pattern;
+ int j;
+
+ new_pattern = patterns[i];
+ new_pattern.mask = ath_bitmask;
+
+ /* convert bitmask to bytemask */
+ for (j = 0; j < patterns[i].pattern_len; j++)
+ if (patterns[i].mask[j / 8] & BIT(j % 8))
+ ath_bitmask[j] = 0xff;
+
new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
}
}
diff --git a/drivers/net/wireless/ath/ath12k/Kconfig b/drivers/net/wireless/ath/ath12k/Kconfig
index f64e7c322216..1ea1af1b8f6c 100644
--- a/drivers/net/wireless/ath/ath12k/Kconfig
+++ b/drivers/net/wireless/ath/ath12k/Kconfig
@@ -7,7 +7,7 @@ config ATH12K
select MHI_BUS
select QRTR
select QRTR_MHI
- select PCI_PWRCTL_PWRSEQ if HAVE_PWRCTL
+ select PCI_PWRCTRL_PWRSEQ if HAVE_PWRCTRL
help
Enable support for Qualcomm Technologies Wi-Fi 7 (IEEE
802.11be) family of chipsets, for example WCN7850 and
@@ -15,6 +15,14 @@ config ATH12K
If you choose to build a module, it'll be called ath12k.
+config ATH12K_AHB
+ bool "QTI ath12k AHB support"
+ depends on ATH12K && REMOTEPROC
+ select QCOM_MDT_LOADER
+ select QCOM_SCM
+ help
+ Enable support for Ath12k AHB bus chipsets, example IPQ5332.
+
config ATH12K_DEBUG
bool "ath12k debugging"
depends on ATH12K
@@ -42,3 +50,13 @@ config ATH12K_TRACING
If unsure, say Y to make it easier to debug problems. But if
you want optimal performance choose N.
+
+config ATH12K_COREDUMP
+ bool "ath12k coredump"
+ depends on ATH12K
+ select WANT_DEV_COREDUMP
+ help
+ Enable ath12k coredump collection
+
+ If unsure, say Y to make it easier to debug problems. But if
+ dump collection not required choose N.
diff --git a/drivers/net/wireless/ath/ath12k/Makefile b/drivers/net/wireless/ath/ath12k/Makefile
index 5a1ed20d730e..d95ee525a6cd 100644
--- a/drivers/net/wireless/ath/ath12k/Makefile
+++ b/drivers/net/wireless/ath/ath12k/Makefile
@@ -23,10 +23,13 @@ ath12k-y += core.o \
fw.o \
p2p.o
-ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o debugfs_htt_stats.o
+ath12k-$(CONFIG_ATH12K_AHB) += ahb.o
+ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o
ath12k-$(CONFIG_ACPI) += acpi.o
ath12k-$(CONFIG_ATH12K_TRACING) += trace.o
ath12k-$(CONFIG_PM) += wow.o
+ath12k-$(CONFIG_ATH12K_COREDUMP) += coredump.o
+ath12k-$(CONFIG_NL80211_TESTMODE) += testmode.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath12k/acpi.c b/drivers/net/wireless/ath/ath12k/acpi.c
index 0555d35aab47..d81367ce6929 100644
--- a/drivers/net/wireless/ath/ath12k/acpi.c
+++ b/drivers/net/wireless/ath/ath12k/acpi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -12,7 +12,7 @@ static int ath12k_acpi_dsm_get_data(struct ath12k_base *ab, int func)
{
union acpi_object *obj;
acpi_handle root_handle;
- int ret;
+ int ret, i;
root_handle = ACPI_HANDLE(ab->dev);
if (!root_handle) {
@@ -29,9 +29,48 @@ static int ath12k_acpi_dsm_get_data(struct ath12k_base *ab, int func)
}
if (obj->type == ACPI_TYPE_INTEGER) {
- ab->acpi.func_bit = obj->integer.value;
+ switch (func) {
+ case ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS:
+ ab->acpi.func_bit = obj->integer.value;
+ break;
+ case ATH12K_ACPI_DSM_FUNC_DISABLE_FLAG:
+ ab->acpi.bit_flag = obj->integer.value;
+ break;
+ }
+ } else if (obj->type == ACPI_TYPE_STRING) {
+ switch (func) {
+ case ATH12K_ACPI_DSM_FUNC_BDF_EXT:
+ if (obj->string.length <= ATH12K_ACPI_BDF_ANCHOR_STRING_LEN ||
+ obj->string.length > ATH12K_ACPI_BDF_MAX_LEN ||
+ memcmp(obj->string.pointer, ATH12K_ACPI_BDF_ANCHOR_STRING,
+ ATH12K_ACPI_BDF_ANCHOR_STRING_LEN)) {
+ ath12k_warn(ab, "invalid ACPI DSM BDF size: %d\n",
+ obj->string.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(ab->acpi.bdf_string, obj->string.pointer,
+ obj->buffer.length);
+
+ break;
+ }
} else if (obj->type == ACPI_TYPE_BUFFER) {
switch (func) {
+ case ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS:
+ if (obj->buffer.length < ATH12K_ACPI_DSM_FUNC_MIN_BITMAP_SIZE ||
+ obj->buffer.length > ATH12K_ACPI_DSM_FUNC_MAX_BITMAP_SIZE) {
+ ath12k_warn(ab, "invalid ACPI DSM func size: %d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ab->acpi.func_bit = 0;
+ for (i = 0; i < obj->buffer.length; i++)
+ ab->acpi.func_bit += obj->buffer.pointer[i] << (i * 8);
+
+ break;
case ATH12K_ACPI_DSM_FUNC_TAS_CFG:
if (obj->buffer.length != ATH12K_ACPI_DSM_TAS_CFG_SIZE) {
ath12k_warn(ab, "invalid ACPI DSM TAS config size: %d\n",
@@ -247,24 +286,118 @@ static int ath12k_acpi_set_tas_params(struct ath12k_base *ab)
return 0;
}
+bool ath12k_acpi_get_disable_rfkill(struct ath12k_base *ab)
+{
+ return ab->acpi.acpi_disable_rfkill;
+}
+
+bool ath12k_acpi_get_disable_11be(struct ath12k_base *ab)
+{
+ return ab->acpi.acpi_disable_11be;
+}
+
+void ath12k_acpi_set_dsm_func(struct ath12k_base *ab)
+{
+ int ret;
+ u8 *buf;
+
+ if (!ab->hw_params->acpi_guid)
+ /* not supported with this hardware */
+ return;
+
+ if (ab->acpi.acpi_tas_enable) {
+ ret = ath12k_acpi_set_tas_params(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to send ACPI TAS parameters: %d\n", ret);
+ return;
+ }
+ }
+
+ if (ab->acpi.acpi_bios_sar_enable) {
+ ret = ath12k_acpi_set_bios_sar_params(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to send ACPI BIOS SAR: %d\n", ret);
+ return;
+ }
+ }
+
+ if (ab->acpi.acpi_cca_enable) {
+ buf = ab->acpi.cca_data + ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET;
+ ret = ath12k_wmi_set_bios_cmd(ab,
+ WMI_BIOS_PARAM_CCA_THRESHOLD_TYPE,
+ buf,
+ ATH12K_ACPI_CCA_THR_OFFSET_LEN);
+ if (ret) {
+ ath12k_warn(ab, "failed to set ACPI DSM CCA threshold: %d\n",
+ ret);
+ return;
+ }
+ }
+
+ if (ab->acpi.acpi_band_edge_enable) {
+ ret = ath12k_wmi_set_bios_cmd(ab,
+ WMI_BIOS_PARAM_TYPE_BANDEDGE,
+ ab->acpi.band_edge_power,
+ sizeof(ab->acpi.band_edge_power));
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to set ACPI DSM band edge channel power: %d\n",
+ ret);
+ return;
+ }
+ }
+}
+
int ath12k_acpi_start(struct ath12k_base *ab)
{
acpi_status status;
- u8 *buf;
int ret;
+ ab->acpi.acpi_tas_enable = false;
+ ab->acpi.acpi_disable_11be = false;
+ ab->acpi.acpi_disable_rfkill = false;
+ ab->acpi.acpi_bios_sar_enable = false;
+ ab->acpi.acpi_cca_enable = false;
+ ab->acpi.acpi_band_edge_enable = false;
+ ab->acpi.acpi_enable_bdf = false;
+ ab->acpi.bdf_string[0] = '\0';
+
if (!ab->hw_params->acpi_guid)
/* not supported with this hardware */
return 0;
- ab->acpi.acpi_tas_enable = false;
-
ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS);
if (ret) {
ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to get ACPI DSM data: %d\n", ret);
return ret;
}
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_DISABLE_FLAG)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_DISABLE_FLAG);
+ if (ret) {
+ ath12k_warn(ab, "failed to get ACPI DISABLE FLAG: %d\n", ret);
+ return ret;
+ }
+
+ if (ATH12K_ACPI_CHEK_BIT_VALID(ab->acpi,
+ ATH12K_ACPI_DSM_DISABLE_11BE_BIT))
+ ab->acpi.acpi_disable_11be = true;
+
+ if (!ATH12K_ACPI_CHEK_BIT_VALID(ab->acpi,
+ ATH12K_ACPI_DSM_DISABLE_RFKILL_BIT))
+ ab->acpi.acpi_disable_rfkill = true;
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_BDF_EXT)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_BDF_EXT);
+ if (ret || ab->acpi.bdf_string[0] == '\0') {
+ ath12k_warn(ab, "failed to get ACPI BDF EXT: %d\n", ret);
+ return ret;
+ }
+
+ ab->acpi.acpi_enable_bdf = true;
+ }
+
if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_TAS_CFG)) {
ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_TAS_CFG);
if (ret) {
@@ -308,20 +441,6 @@ int ath12k_acpi_start(struct ath12k_base *ab)
ab->acpi.acpi_bios_sar_enable = true;
}
- if (ab->acpi.acpi_tas_enable) {
- ret = ath12k_acpi_set_tas_params(ab);
- if (ret) {
- ath12k_warn(ab, "failed to send ACPI parameters: %d\n", ret);
- return ret;
- }
- }
-
- if (ab->acpi.acpi_bios_sar_enable) {
- ret = ath12k_acpi_set_bios_sar_params(ab);
- if (ret)
- return ret;
- }
-
if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_CCA)) {
ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_INDEX_CCA);
if (ret) {
@@ -332,18 +451,8 @@ int ath12k_acpi_start(struct ath12k_base *ab)
if (ab->acpi.cca_data[0] == ATH12K_ACPI_CCA_THR_VERSION &&
ab->acpi.cca_data[ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET] ==
- ATH12K_ACPI_CCA_THR_ENABLE_FLAG) {
- buf = ab->acpi.cca_data + ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET;
- ret = ath12k_wmi_set_bios_cmd(ab,
- WMI_BIOS_PARAM_CCA_THRESHOLD_TYPE,
- buf,
- ATH12K_ACPI_CCA_THR_OFFSET_LEN);
- if (ret) {
- ath12k_warn(ab, "failed to set ACPI DSM CCA threshold: %d\n",
- ret);
- return ret;
- }
- }
+ ATH12K_ACPI_CCA_THR_ENABLE_FLAG)
+ ab->acpi.acpi_cca_enable = true;
}
if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi,
@@ -356,18 +465,8 @@ int ath12k_acpi_start(struct ath12k_base *ab)
}
if (ab->acpi.band_edge_power[0] == ATH12K_ACPI_BAND_EDGE_VERSION &&
- ab->acpi.band_edge_power[1] == ATH12K_ACPI_BAND_EDGE_ENABLE_FLAG) {
- ret = ath12k_wmi_set_bios_cmd(ab,
- WMI_BIOS_PARAM_TYPE_BANDEDGE,
- ab->acpi.band_edge_power,
- sizeof(ab->acpi.band_edge_power));
- if (ret) {
- ath12k_warn(ab,
- "failed to set ACPI DSM band edge channel power: %d\n",
- ret);
- return ret;
- }
- }
+ ab->acpi.band_edge_power[1] == ATH12K_ACPI_BAND_EDGE_ENABLE_FLAG)
+ ab->acpi.acpi_band_edge_enable = true;
}
status = acpi_install_notify_handler(ACPI_HANDLE(ab->dev),
@@ -383,6 +482,21 @@ int ath12k_acpi_start(struct ath12k_base *ab)
return 0;
}
+int ath12k_acpi_check_bdf_variant_name(struct ath12k_base *ab)
+{
+ size_t max_len = sizeof(ab->qmi.target.bdf_ext);
+
+ if (!ab->acpi.acpi_enable_bdf)
+ return -ENODATA;
+
+ if (strscpy(ab->qmi.target.bdf_ext, ab->acpi.bdf_string + 4, max_len) < 0)
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "acpi bdf variant longer than the buffer (variant: %s)\n",
+ ab->acpi.bdf_string);
+
+ return 0;
+}
+
void ath12k_acpi_stop(struct ath12k_base *ab)
{
if (!ab->acpi.started)
diff --git a/drivers/net/wireless/ath/ath12k/acpi.h b/drivers/net/wireless/ath/ath12k/acpi.h
index 39e003d86a48..3a26fea6af1a 100644
--- a/drivers/net/wireless/ath/ath12k/acpi.h
+++ b/drivers/net/wireless/ath/ath12k/acpi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_ACPI_H
#define ATH12K_ACPI_H
@@ -9,6 +9,8 @@
#include <linux/acpi.h>
#define ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS 0
+#define ATH12K_ACPI_DSM_FUNC_DISABLE_FLAG 2
+#define ATH12K_ACPI_DSM_FUNC_BDF_EXT 3
#define ATH12K_ACPI_DSM_FUNC_BIOS_SAR 4
#define ATH12K_ACPI_DSM_FUNC_GEO_OFFSET 5
#define ATH12K_ACPI_DSM_FUNC_INDEX_CCA 6
@@ -16,6 +18,8 @@
#define ATH12K_ACPI_DSM_FUNC_TAS_DATA 9
#define ATH12K_ACPI_DSM_FUNC_INDEX_BAND_EDGE 10
+#define ATH12K_ACPI_FUNC_BIT_DISABLE_FLAG BIT(1)
+#define ATH12K_ACPI_FUNC_BIT_BDF_EXT BIT(2)
#define ATH12K_ACPI_FUNC_BIT_BIOS_SAR BIT(3)
#define ATH12K_ACPI_FUNC_BIT_GEO_OFFSET BIT(4)
#define ATH12K_ACPI_FUNC_BIT_CCA BIT(5)
@@ -25,6 +29,7 @@
#define ATH12K_ACPI_NOTIFY_EVENT 0x86
#define ATH12K_ACPI_FUNC_BIT_VALID(_acdata, _func) (((_acdata).func_bit) & (_func))
+#define ATH12K_ACPI_CHEK_BIT_VALID(_acdata, _func) (((_acdata).bit_flag) & (_func))
#define ATH12K_ACPI_TAS_DATA_VERSION 0x1
#define ATH12K_ACPI_TAS_DATA_ENABLE 0x1
@@ -48,6 +53,16 @@
#define ATH12K_ACPI_DSM_BAND_EDGE_DATA_SIZE 100
#define ATH12K_ACPI_DSM_TAS_CFG_SIZE 108
+#define ATH12K_ACPI_DSM_FUNC_MIN_BITMAP_SIZE 1
+#define ATH12K_ACPI_DSM_FUNC_MAX_BITMAP_SIZE 4
+
+#define ATH12K_ACPI_DSM_DISABLE_11BE_BIT BIT(0)
+#define ATH12K_ACPI_DSM_DISABLE_RFKILL_BIT BIT(2)
+
+#define ATH12K_ACPI_BDF_ANCHOR_STRING_LEN 3
+#define ATH12K_ACPI_BDF_ANCHOR_STRING "BDF"
+#define ATH12K_ACPI_BDF_MAX_LEN 100
+
#define ATH12K_ACPI_DSM_GEO_OFFSET_DATA_SIZE (ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET + \
ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN)
#define ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE (ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET + \
@@ -59,6 +74,10 @@
int ath12k_acpi_start(struct ath12k_base *ab);
void ath12k_acpi_stop(struct ath12k_base *ab);
+bool ath12k_acpi_get_disable_rfkill(struct ath12k_base *ab);
+bool ath12k_acpi_get_disable_11be(struct ath12k_base *ab);
+void ath12k_acpi_set_dsm_func(struct ath12k_base *ab);
+int ath12k_acpi_check_bdf_variant_name(struct ath12k_base *ab);
#else
@@ -71,6 +90,25 @@ static inline void ath12k_acpi_stop(struct ath12k_base *ab)
{
}
+static inline bool ath12k_acpi_get_disable_rfkill(struct ath12k_base *ab)
+{
+ return false;
+}
+
+static inline bool ath12k_acpi_get_disable_11be(struct ath12k_base *ab)
+{
+ return false;
+}
+
+static inline void ath12k_acpi_set_dsm_func(struct ath12k_base *ab)
+{
+}
+
+static inline int ath12k_acpi_check_bdf_variant_name(struct ath12k_base *ab)
+{
+ return 0;
+}
+
#endif /* CONFIG_ACPI */
#endif /* ATH12K_ACPI_H */
diff --git a/drivers/net/wireless/ath/ath12k/ahb.c b/drivers/net/wireless/ath/ath12k/ahb.c
new file mode 100644
index 000000000000..b30527c402f6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/ahb.c
@@ -0,0 +1,1156 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/soc/qcom/mdt_loader.h>
+#include <linux/soc/qcom/smem_state.h>
+#include "ahb.h"
+#include "debug.h"
+#include "hif.h"
+
+static const struct of_device_id ath12k_ahb_of_match[] = {
+ { .compatible = "qcom,ipq5332-wifi",
+ .data = (void *)ATH12K_HW_IPQ5332_HW10,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, ath12k_ahb_of_match);
+
+#define ATH12K_IRQ_CE0_OFFSET 4
+#define ATH12K_MAX_UPDS 1
+#define ATH12K_UPD_IRQ_WRD_LEN 18
+static const char ath12k_userpd_irq[][9] = {"spawn",
+ "ready",
+ "stop-ack"};
+
+static const char *irq_name[ATH12K_IRQ_NUM_MAX] = {
+ "misc-pulse1",
+ "misc-latch",
+ "sw-exception",
+ "watchdog",
+ "ce0",
+ "ce1",
+ "ce2",
+ "ce3",
+ "ce4",
+ "ce5",
+ "ce6",
+ "ce7",
+ "ce8",
+ "ce9",
+ "ce10",
+ "ce11",
+ "host2wbm-desc-feed",
+ "host2reo-re-injection",
+ "host2reo-command",
+ "host2rxdma-monitor-ring3",
+ "host2rxdma-monitor-ring2",
+ "host2rxdma-monitor-ring1",
+ "reo2ost-exception",
+ "wbm2host-rx-release",
+ "reo2host-status",
+ "reo2host-destination-ring4",
+ "reo2host-destination-ring3",
+ "reo2host-destination-ring2",
+ "reo2host-destination-ring1",
+ "rxdma2host-monitor-destination-mac3",
+ "rxdma2host-monitor-destination-mac2",
+ "rxdma2host-monitor-destination-mac1",
+ "ppdu-end-interrupts-mac3",
+ "ppdu-end-interrupts-mac2",
+ "ppdu-end-interrupts-mac1",
+ "rxdma2host-monitor-status-ring-mac3",
+ "rxdma2host-monitor-status-ring-mac2",
+ "rxdma2host-monitor-status-ring-mac1",
+ "host2rxdma-host-buf-ring-mac3",
+ "host2rxdma-host-buf-ring-mac2",
+ "host2rxdma-host-buf-ring-mac1",
+ "rxdma2host-destination-ring-mac3",
+ "rxdma2host-destination-ring-mac2",
+ "rxdma2host-destination-ring-mac1",
+ "host2tcl-input-ring4",
+ "host2tcl-input-ring3",
+ "host2tcl-input-ring2",
+ "host2tcl-input-ring1",
+ "wbm2host-tx-completions-ring4",
+ "wbm2host-tx-completions-ring3",
+ "wbm2host-tx-completions-ring2",
+ "wbm2host-tx-completions-ring1",
+ "tcl2host-status-ring",
+};
+
+enum ext_irq_num {
+ host2wbm_desc_feed = 16,
+ host2reo_re_injection,
+ host2reo_command,
+ host2rxdma_monitor_ring3,
+ host2rxdma_monitor_ring2,
+ host2rxdma_monitor_ring1,
+ reo2host_exception,
+ wbm2host_rx_release,
+ reo2host_status,
+ reo2host_destination_ring4,
+ reo2host_destination_ring3,
+ reo2host_destination_ring2,
+ reo2host_destination_ring1,
+ rxdma2host_monitor_destination_mac3,
+ rxdma2host_monitor_destination_mac2,
+ rxdma2host_monitor_destination_mac1,
+ ppdu_end_interrupts_mac3,
+ ppdu_end_interrupts_mac2,
+ ppdu_end_interrupts_mac1,
+ rxdma2host_monitor_status_ring_mac3,
+ rxdma2host_monitor_status_ring_mac2,
+ rxdma2host_monitor_status_ring_mac1,
+ host2rxdma_host_buf_ring_mac3,
+ host2rxdma_host_buf_ring_mac2,
+ host2rxdma_host_buf_ring_mac1,
+ rxdma2host_destination_ring_mac3,
+ rxdma2host_destination_ring_mac2,
+ rxdma2host_destination_ring_mac1,
+ host2tcl_input_ring4,
+ host2tcl_input_ring3,
+ host2tcl_input_ring2,
+ host2tcl_input_ring1,
+ wbm2host_tx_completions_ring4,
+ wbm2host_tx_completions_ring3,
+ wbm2host_tx_completions_ring2,
+ wbm2host_tx_completions_ring1,
+ tcl2host_status_ring,
+};
+
+static u32 ath12k_ahb_read32(struct ath12k_base *ab, u32 offset)
+{
+ if (ab->ce_remap && offset < HAL_SEQ_WCSS_CMEM_OFFSET)
+ return ioread32(ab->mem_ce + offset);
+ return ioread32(ab->mem + offset);
+}
+
+static void ath12k_ahb_write32(struct ath12k_base *ab, u32 offset,
+ u32 value)
+{
+ if (ab->ce_remap && offset < HAL_SEQ_WCSS_CMEM_OFFSET)
+ iowrite32(value, ab->mem_ce + offset);
+ else
+ iowrite32(value, ab->mem + offset);
+}
+
+static void ath12k_ahb_cancel_workqueue(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ cancel_work_sync(&ce_pipe->intr_wq);
+ }
+}
+
+static void ath12k_ahb_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp)
+{
+ int i;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void __ath12k_ahb_ext_irq_disable(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ ath12k_ahb_ext_grp_disable(irq_grp);
+ if (irq_grp->napi_enabled) {
+ napi_synchronize(&irq_grp->napi);
+ napi_disable(&irq_grp->napi);
+ irq_grp->napi_enabled = false;
+ }
+ }
+}
+
+static void ath12k_ahb_ext_grp_enable(struct ath12k_ext_irq_grp *irq_grp)
+{
+ int i;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void ath12k_ahb_setbit32(struct ath12k_base *ab, u8 bit, u32 offset)
+{
+ u32 val;
+
+ val = ath12k_ahb_read32(ab, offset);
+ ath12k_ahb_write32(ab, offset, val | BIT(bit));
+}
+
+static void ath12k_ahb_clearbit32(struct ath12k_base *ab, u8 bit, u32 offset)
+{
+ u32 val;
+
+ val = ath12k_ahb_read32(ab, offset);
+ ath12k_ahb_write32(ab, offset, val & ~BIT(bit));
+}
+
+static void ath12k_ahb_ce_irq_enable(struct ath12k_base *ab, u16 ce_id)
+{
+ const struct ce_attr *ce_attr;
+ const struct ce_ie_addr *ce_ie_addr = ab->hw_params->ce_ie_addr;
+ u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
+
+ ie1_reg_addr = ce_ie_addr->ie1_reg_addr;
+ ie2_reg_addr = ce_ie_addr->ie2_reg_addr;
+ ie3_reg_addr = ce_ie_addr->ie3_reg_addr;
+
+ ce_attr = &ab->hw_params->host_ce_config[ce_id];
+ if (ce_attr->src_nentries)
+ ath12k_ahb_setbit32(ab, ce_id, ie1_reg_addr);
+
+ if (ce_attr->dest_nentries) {
+ ath12k_ahb_setbit32(ab, ce_id, ie2_reg_addr);
+ ath12k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
+ ie3_reg_addr);
+ }
+}
+
+static void ath12k_ahb_ce_irq_disable(struct ath12k_base *ab, u16 ce_id)
+{
+ const struct ce_attr *ce_attr;
+ const struct ce_ie_addr *ce_ie_addr = ab->hw_params->ce_ie_addr;
+ u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
+
+ ie1_reg_addr = ce_ie_addr->ie1_reg_addr;
+ ie2_reg_addr = ce_ie_addr->ie2_reg_addr;
+ ie3_reg_addr = ce_ie_addr->ie3_reg_addr;
+
+ ce_attr = &ab->hw_params->host_ce_config[ce_id];
+ if (ce_attr->src_nentries)
+ ath12k_ahb_clearbit32(ab, ce_id, ie1_reg_addr);
+
+ if (ce_attr->dest_nentries) {
+ ath12k_ahb_clearbit32(ab, ce_id, ie2_reg_addr);
+ ath12k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
+ ie3_reg_addr);
+ }
+}
+
+static void ath12k_ahb_sync_ce_irqs(struct ath12k_base *ab)
+{
+ int i;
+ int irq_idx;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ irq_idx = ATH12K_IRQ_CE0_OFFSET + i;
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+}
+
+static void ath12k_ahb_sync_ext_irqs(struct ath12k_base *ab)
+{
+ int i, j;
+ int irq_idx;
+
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ irq_idx = irq_grp->irqs[j];
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+ }
+}
+
+static void ath12k_ahb_ce_irqs_enable(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath12k_ahb_ce_irq_enable(ab, i);
+ }
+}
+
+static void ath12k_ahb_ce_irqs_disable(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath12k_ahb_ce_irq_disable(ab, i);
+ }
+}
+
+static int ath12k_ahb_start(struct ath12k_base *ab)
+{
+ ath12k_ahb_ce_irqs_enable(ab);
+ ath12k_ce_rx_post_buf(ab);
+
+ return 0;
+}
+
+static void ath12k_ahb_ext_irq_enable(struct ath12k_base *ab)
+{
+ struct ath12k_ext_irq_grp *irq_grp;
+ int i;
+
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ irq_grp = &ab->ext_irq_grp[i];
+ if (!irq_grp->napi_enabled) {
+ napi_enable(&irq_grp->napi);
+ irq_grp->napi_enabled = true;
+ }
+ ath12k_ahb_ext_grp_enable(irq_grp);
+ }
+}
+
+static void ath12k_ahb_ext_irq_disable(struct ath12k_base *ab)
+{
+ __ath12k_ahb_ext_irq_disable(ab);
+ ath12k_ahb_sync_ext_irqs(ab);
+}
+
+static void ath12k_ahb_stop(struct ath12k_base *ab)
+{
+ if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ ath12k_ahb_ce_irqs_disable(ab);
+ ath12k_ahb_sync_ce_irqs(ab);
+ ath12k_ahb_cancel_workqueue(ab);
+ timer_delete_sync(&ab->rx_replenish_retry);
+ ath12k_ce_cleanup_pipes(ab);
+}
+
+static int ath12k_ahb_power_up(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+ char fw_name[ATH12K_USERPD_FW_NAME_LEN];
+ char fw2_name[ATH12K_USERPD_FW_NAME_LEN];
+ struct device *dev = ab->dev;
+ const struct firmware *fw, *fw2;
+ struct reserved_mem *rmem = NULL;
+ unsigned long time_left;
+ phys_addr_t mem_phys;
+ void *mem_region;
+ size_t mem_size;
+ u32 pasid;
+ int ret;
+
+ rmem = ath12k_core_get_reserved_mem(ab, 0);
+ if (!rmem)
+ return -ENODEV;
+
+ mem_phys = rmem->base;
+ mem_size = rmem->size;
+ mem_region = devm_memremap(dev, mem_phys, mem_size, MEMREMAP_WC);
+ if (IS_ERR(mem_region)) {
+ ath12k_err(ab, "unable to map memory region: %pa+%pa\n",
+ &rmem->base, &rmem->size);
+ return PTR_ERR(mem_region);
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "%s/%s/%s%d%s", ATH12K_FW_DIR,
+ ab->hw_params->fw.dir, ATH12K_AHB_FW_PREFIX, ab_ahb->userpd_id,
+ ATH12K_AHB_FW_SUFFIX);
+
+ ret = request_firmware(&fw, fw_name, dev);
+ if (ret < 0) {
+ ath12k_err(ab, "request_firmware failed\n");
+ return ret;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_AHB, "Booting fw image %s, size %zd\n", fw_name,
+ fw->size);
+
+ if (!fw->size) {
+ ath12k_err(ab, "Invalid firmware size\n");
+ ret = -EINVAL;
+ goto err_fw;
+ }
+
+ pasid = (u32_encode_bits(ab_ahb->userpd_id, ATH12K_USERPD_ID_MASK)) |
+ ATH12K_AHB_UPD_SWID;
+
+ /* Load FW image to a reserved memory location */
+ ret = qcom_mdt_load(dev, fw, fw_name, pasid, mem_region, mem_phys, mem_size,
+ &mem_phys);
+ if (ret) {
+ ath12k_err(ab, "Failed to load MDT segments: %d\n", ret);
+ goto err_fw;
+ }
+
+ snprintf(fw2_name, sizeof(fw2_name), "%s/%s/%s", ATH12K_FW_DIR,
+ ab->hw_params->fw.dir, ATH12K_AHB_FW2);
+
+ ret = request_firmware(&fw2, fw2_name, dev);
+ if (ret < 0) {
+ ath12k_err(ab, "request_firmware failed\n");
+ goto err_fw;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_AHB, "Booting fw image %s, size %zd\n", fw2_name,
+ fw2->size);
+
+ if (!fw2->size) {
+ ath12k_err(ab, "Invalid firmware size\n");
+ ret = -EINVAL;
+ goto err_fw2;
+ }
+
+ ret = qcom_mdt_load_no_init(dev, fw2, fw2_name, mem_region, mem_phys,
+ mem_size, &mem_phys);
+ if (ret) {
+ ath12k_err(ab, "Failed to load MDT segments: %d\n", ret);
+ goto err_fw2;
+ }
+
+ /* Authenticate FW image using peripheral ID */
+ ret = qcom_scm_pas_auth_and_reset(pasid);
+ if (ret) {
+ ath12k_err(ab, "failed to boot the remote processor %d\n", ret);
+ goto err_fw2;
+ }
+
+ /* Instruct Q6 to spawn userPD thread */
+ ret = qcom_smem_state_update_bits(ab_ahb->spawn_state, BIT(ab_ahb->spawn_bit),
+ BIT(ab_ahb->spawn_bit));
+ if (ret) {
+ ath12k_err(ab, "Failed to update spawn state %d\n", ret);
+ goto err_fw2;
+ }
+
+ time_left = wait_for_completion_timeout(&ab_ahb->userpd_spawned,
+ ATH12K_USERPD_SPAWN_TIMEOUT);
+ if (!time_left) {
+ ath12k_err(ab, "UserPD spawn wait timed out\n");
+ ret = -ETIMEDOUT;
+ goto err_fw2;
+ }
+
+ time_left = wait_for_completion_timeout(&ab_ahb->userpd_ready,
+ ATH12K_USERPD_READY_TIMEOUT);
+ if (!time_left) {
+ ath12k_err(ab, "UserPD ready wait timed out\n");
+ ret = -ETIMEDOUT;
+ goto err_fw2;
+ }
+
+ qcom_smem_state_update_bits(ab_ahb->spawn_state, BIT(ab_ahb->spawn_bit), 0);
+
+ ath12k_dbg(ab, ATH12K_DBG_AHB, "UserPD%d is now UP\n", ab_ahb->userpd_id);
+
+err_fw2:
+ release_firmware(fw2);
+err_fw:
+ release_firmware(fw);
+ return ret;
+}
+
+static void ath12k_ahb_power_down(struct ath12k_base *ab, bool is_suspend)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+ unsigned long time_left;
+ u32 pasid;
+ int ret;
+
+ qcom_smem_state_update_bits(ab_ahb->stop_state, BIT(ab_ahb->stop_bit),
+ BIT(ab_ahb->stop_bit));
+
+ time_left = wait_for_completion_timeout(&ab_ahb->userpd_stopped,
+ ATH12K_USERPD_STOP_TIMEOUT);
+ if (!time_left) {
+ ath12k_err(ab, "UserPD stop wait timed out\n");
+ return;
+ }
+
+ qcom_smem_state_update_bits(ab_ahb->stop_state, BIT(ab_ahb->stop_bit), 0);
+
+ pasid = (u32_encode_bits(ab_ahb->userpd_id, ATH12K_USERPD_ID_MASK)) |
+ ATH12K_AHB_UPD_SWID;
+ /* Release the firmware */
+ ret = qcom_scm_pas_shutdown(pasid);
+ if (ret)
+ ath12k_err(ab, "scm pas shutdown failed for userPD%d: %d\n",
+ ab_ahb->userpd_id, ret);
+}
+
+static void ath12k_ahb_init_qmi_ce_config(struct ath12k_base *ab)
+{
+ struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
+
+ cfg->tgt_ce_len = ab->hw_params->target_ce_count;
+ cfg->tgt_ce = ab->hw_params->target_ce_config;
+ cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len;
+ cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map;
+ ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id;
+}
+
+static void ath12k_ahb_ce_workqueue(struct work_struct *work)
+{
+ struct ath12k_ce_pipe *ce_pipe = from_work(ce_pipe, work, intr_wq);
+
+ ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
+
+ ath12k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
+}
+
+static irqreturn_t ath12k_ahb_ce_interrupt_handler(int irq, void *arg)
+{
+ struct ath12k_ce_pipe *ce_pipe = arg;
+
+ /* last interrupt received for this CE */
+ ce_pipe->timestamp = jiffies;
+
+ ath12k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
+
+ queue_work(system_bh_wq, &ce_pipe->intr_wq);
+
+ return IRQ_HANDLED;
+}
+
+static int ath12k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct ath12k_ext_irq_grp *irq_grp = container_of(napi,
+ struct ath12k_ext_irq_grp,
+ napi);
+ struct ath12k_base *ab = irq_grp->ab;
+ int work_done;
+
+ work_done = ath12k_dp_service_srng(ab, irq_grp, budget);
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ ath12k_ahb_ext_grp_enable(irq_grp);
+ }
+
+ if (work_done > budget)
+ work_done = budget;
+
+ return work_done;
+}
+
+static irqreturn_t ath12k_ahb_ext_interrupt_handler(int irq, void *arg)
+{
+ struct ath12k_ext_irq_grp *irq_grp = arg;
+
+ /* last interrupt received for this group */
+ irq_grp->timestamp = jiffies;
+
+ ath12k_ahb_ext_grp_disable(irq_grp);
+
+ napi_schedule(&irq_grp->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int ath12k_ahb_config_ext_irq(struct ath12k_base *ab)
+{
+ const struct ath12k_hw_ring_mask *ring_mask;
+ struct ath12k_ext_irq_grp *irq_grp;
+ const struct hal_ops *hal_ops;
+ int i, j, irq, irq_idx, ret;
+ u32 num_irq;
+
+ ring_mask = ab->hw_params->ring_mask;
+ hal_ops = ab->hw_params->hal_ops;
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ irq_grp = &ab->ext_irq_grp[i];
+ num_irq = 0;
+
+ irq_grp->ab = ab;
+ irq_grp->grp_id = i;
+
+ irq_grp->napi_ndev = alloc_netdev_dummy(0);
+ if (!irq_grp->napi_ndev)
+ return -ENOMEM;
+
+ netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
+ ath12k_ahb_ext_grp_napi_poll);
+
+ for (j = 0; j < ATH12K_EXT_IRQ_NUM_MAX; j++) {
+ /* For TX ring, ensure that the ring mask and the
+ * tcl_to_wbm_rbm_map point to the same ring number.
+ */
+ if (ring_mask->tx[i] &
+ BIT(hal_ops->tcl_to_wbm_rbm_map[j].wbm_ring_num)) {
+ irq_grp->irqs[num_irq++] =
+ wbm2host_tx_completions_ring1 - j;
+ }
+
+ if (ring_mask->rx[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ reo2host_destination_ring1 - j;
+ }
+
+ if (ring_mask->rx_err[i] & BIT(j))
+ irq_grp->irqs[num_irq++] = reo2host_exception;
+
+ if (ring_mask->rx_wbm_rel[i] & BIT(j))
+ irq_grp->irqs[num_irq++] = wbm2host_rx_release;
+
+ if (ring_mask->reo_status[i] & BIT(j))
+ irq_grp->irqs[num_irq++] = reo2host_status;
+
+ if (ring_mask->rx_mon_dest[i] & BIT(j))
+ irq_grp->irqs[num_irq++] =
+ rxdma2host_monitor_destination_mac1;
+ }
+
+ irq_grp->num_irq = num_irq;
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ irq_idx = irq_grp->irqs[j];
+
+ irq = platform_get_irq_byname(ab->pdev,
+ irq_name[irq_idx]);
+ ab->irq_num[irq_idx] = irq;
+ irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY);
+ ret = devm_request_irq(ab->dev, irq,
+ ath12k_ahb_ext_interrupt_handler,
+ IRQF_TRIGGER_RISING,
+ irq_name[irq_idx], irq_grp);
+ if (ret)
+ ath12k_warn(ab, "failed request_irq for %d\n", irq);
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_ahb_config_irq(struct ath12k_base *ab)
+{
+ int irq, irq_idx, i;
+ int ret;
+
+ /* Configure CE irqs */
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ irq_idx = ATH12K_IRQ_CE0_OFFSET + i;
+
+ INIT_WORK(&ce_pipe->intr_wq, ath12k_ahb_ce_workqueue);
+ irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]);
+ ret = devm_request_irq(ab->dev, irq, ath12k_ahb_ce_interrupt_handler,
+ IRQF_TRIGGER_RISING, irq_name[irq_idx],
+ ce_pipe);
+ if (ret)
+ return ret;
+
+ ab->irq_num[irq_idx] = irq;
+ }
+
+ /* Configure external interrupts */
+ ret = ath12k_ahb_config_ext_irq(ab);
+
+ return ret;
+}
+
+static int ath12k_ahb_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ const struct service_to_pipe *entry;
+ bool ul_set = false, dl_set = false;
+ u32 pipedir;
+ int i;
+
+ for (i = 0; i < ab->hw_params->svc_to_ce_map_len; i++) {
+ entry = &ab->hw_params->svc_to_ce_map[i];
+
+ if (__le32_to_cpu(entry->service_id) != service_id)
+ continue;
+
+ pipedir = __le32_to_cpu(entry->pipedir);
+ if (pipedir == PIPEDIR_IN || pipedir == PIPEDIR_INOUT) {
+ WARN_ON(dl_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ }
+
+ if (pipedir == PIPEDIR_OUT || pipedir == PIPEDIR_INOUT) {
+ WARN_ON(ul_set);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ ul_set = true;
+ }
+ }
+
+ if (WARN_ON(!ul_set || !dl_set))
+ return -ENOENT;
+
+ return 0;
+}
+
+static const struct ath12k_hif_ops ath12k_ahb_hif_ops_ipq5332 = {
+ .start = ath12k_ahb_start,
+ .stop = ath12k_ahb_stop,
+ .read32 = ath12k_ahb_read32,
+ .write32 = ath12k_ahb_write32,
+ .irq_enable = ath12k_ahb_ext_irq_enable,
+ .irq_disable = ath12k_ahb_ext_irq_disable,
+ .map_service_to_pipe = ath12k_ahb_map_service_to_pipe,
+ .power_up = ath12k_ahb_power_up,
+ .power_down = ath12k_ahb_power_down,
+};
+
+static irqreturn_t ath12k_userpd_irq_handler(int irq, void *data)
+{
+ struct ath12k_base *ab = data;
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+
+ if (irq == ab_ahb->userpd_irq_num[ATH12K_USERPD_SPAWN_IRQ]) {
+ complete(&ab_ahb->userpd_spawned);
+ } else if (irq == ab_ahb->userpd_irq_num[ATH12K_USERPD_READY_IRQ]) {
+ complete(&ab_ahb->userpd_ready);
+ } else if (irq == ab_ahb->userpd_irq_num[ATH12K_USERPD_STOP_ACK_IRQ]) {
+ complete(&ab_ahb->userpd_stopped);
+ } else {
+ ath12k_err(ab, "Invalid userpd interrupt\n");
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ath12k_ahb_config_rproc_irq(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+ int i, ret;
+ char *upd_irq_name;
+
+ for (i = 0; i < ATH12K_USERPD_MAX_IRQ; i++) {
+ ab_ahb->userpd_irq_num[i] = platform_get_irq_byname(ab->pdev,
+ ath12k_userpd_irq[i]);
+ if (ab_ahb->userpd_irq_num[i] < 0)
+ return ab_ahb->userpd_irq_num[i];
+
+ upd_irq_name = devm_kzalloc(&ab->pdev->dev, ATH12K_UPD_IRQ_WRD_LEN,
+ GFP_KERNEL);
+ if (!upd_irq_name)
+ return -ENOMEM;
+
+ scnprintf(upd_irq_name, ATH12K_UPD_IRQ_WRD_LEN, "UserPD%u-%s",
+ ab_ahb->userpd_id, ath12k_userpd_irq[i]);
+ ret = devm_request_threaded_irq(&ab->pdev->dev, ab_ahb->userpd_irq_num[i],
+ NULL, ath12k_userpd_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ upd_irq_name, ab);
+ if (ret)
+ return dev_err_probe(&ab->pdev->dev, ret,
+ "Request %s irq failed: %d\n",
+ ath12k_userpd_irq[i], ret);
+ }
+
+ ab_ahb->spawn_state = devm_qcom_smem_state_get(&ab->pdev->dev, "spawn",
+ &ab_ahb->spawn_bit);
+ if (IS_ERR(ab_ahb->spawn_state))
+ return dev_err_probe(&ab->pdev->dev, PTR_ERR(ab_ahb->spawn_state),
+ "Failed to acquire spawn state\n");
+
+ ab_ahb->stop_state = devm_qcom_smem_state_get(&ab->pdev->dev, "stop",
+ &ab_ahb->stop_bit);
+ if (IS_ERR(ab_ahb->stop_state))
+ return dev_err_probe(&ab->pdev->dev, PTR_ERR(ab_ahb->stop_state),
+ "Failed to acquire stop state\n");
+
+ init_completion(&ab_ahb->userpd_spawned);
+ init_completion(&ab_ahb->userpd_ready);
+ init_completion(&ab_ahb->userpd_stopped);
+ return 0;
+}
+
+static int ath12k_ahb_root_pd_state_notifier(struct notifier_block *nb,
+ const unsigned long event, void *data)
+{
+ struct ath12k_ahb *ab_ahb = container_of(nb, struct ath12k_ahb, root_pd_nb);
+ struct ath12k_base *ab = ab_ahb->ab;
+
+ if (event == ATH12K_RPROC_AFTER_POWERUP) {
+ ath12k_dbg(ab, ATH12K_DBG_AHB, "Root PD is UP\n");
+ complete(&ab_ahb->rootpd_ready);
+ }
+
+ return 0;
+}
+
+static int ath12k_ahb_register_rproc_notifier(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+
+ ab_ahb->root_pd_nb.notifier_call = ath12k_ahb_root_pd_state_notifier;
+ init_completion(&ab_ahb->rootpd_ready);
+
+ ab_ahb->root_pd_notifier = qcom_register_ssr_notifier(ab_ahb->tgt_rproc->name,
+ &ab_ahb->root_pd_nb);
+ if (IS_ERR(ab_ahb->root_pd_notifier))
+ return PTR_ERR(ab_ahb->root_pd_notifier);
+
+ return 0;
+}
+
+static void ath12k_ahb_unregister_rproc_notifier(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+
+ if (!ab_ahb->root_pd_notifier) {
+ ath12k_err(ab, "Rproc notifier not registered\n");
+ return;
+ }
+
+ qcom_unregister_ssr_notifier(ab_ahb->root_pd_notifier,
+ &ab_ahb->root_pd_nb);
+ ab_ahb->root_pd_notifier = NULL;
+}
+
+static int ath12k_ahb_get_rproc(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+ struct device *dev = ab->dev;
+ struct device_node *np;
+ struct rproc *prproc;
+
+ np = of_parse_phandle(dev->of_node, "qcom,rproc", 0);
+ if (!np) {
+ ath12k_err(ab, "failed to get q6_rproc handle\n");
+ return -ENOENT;
+ }
+
+ prproc = rproc_get_by_phandle(np->phandle);
+ of_node_put(np);
+ if (!prproc)
+ return dev_err_probe(&ab->pdev->dev, -EPROBE_DEFER,
+ "failed to get rproc\n");
+
+ ab_ahb->tgt_rproc = prproc;
+
+ return 0;
+}
+
+static int ath12k_ahb_boot_root_pd(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+ unsigned long time_left;
+ int ret;
+
+ ret = rproc_boot(ab_ahb->tgt_rproc);
+ if (ret < 0) {
+ ath12k_err(ab, "RootPD boot failed\n");
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ab_ahb->rootpd_ready,
+ ATH12K_ROOTPD_READY_TIMEOUT);
+ if (!time_left) {
+ ath12k_err(ab, "RootPD ready wait timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int ath12k_ahb_configure_rproc(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+ int ret;
+
+ ret = ath12k_ahb_get_rproc(ab);
+ if (ret < 0)
+ return ret;
+
+ ret = ath12k_ahb_register_rproc_notifier(ab);
+ if (ret < 0) {
+ ret = dev_err_probe(&ab->pdev->dev, ret,
+ "failed to register rproc notifier\n");
+ goto err_put_rproc;
+ }
+
+ if (ab_ahb->tgt_rproc->state != RPROC_RUNNING) {
+ ret = ath12k_ahb_boot_root_pd(ab);
+ if (ret < 0) {
+ ath12k_err(ab, "failed to boot the remote processor Q6\n");
+ goto err_unreg_notifier;
+ }
+ }
+
+ return ath12k_ahb_config_rproc_irq(ab);
+
+err_unreg_notifier:
+ ath12k_ahb_unregister_rproc_notifier(ab);
+
+err_put_rproc:
+ rproc_put(ab_ahb->tgt_rproc);
+ return ret;
+}
+
+static void ath12k_ahb_deconfigure_rproc(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+
+ ath12k_ahb_unregister_rproc_notifier(ab);
+ rproc_put(ab_ahb->tgt_rproc);
+}
+
+static int ath12k_ahb_resource_init(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+ struct platform_device *pdev = ab->pdev;
+ struct resource *mem_res;
+ int ret;
+
+ ab->mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
+ if (IS_ERR(ab->mem)) {
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(ab->mem), "ioremap error\n");
+ goto out;
+ }
+
+ ab->mem_len = resource_size(mem_res);
+
+ if (ab->hw_params->ce_remap) {
+ const struct ce_remap *ce_remap = ab->hw_params->ce_remap;
+ /* CE register space is moved out of WCSS and the space is not
+ * contiguous, hence remapping the CE registers to a new space
+ * for accessing them.
+ */
+ ab->mem_ce = ioremap(ce_remap->base, ce_remap->size);
+ if (!ab->mem_ce) {
+ dev_err(&pdev->dev, "ce ioremap error\n");
+ ret = -ENOMEM;
+ goto err_mem_unmap;
+ }
+ ab->ce_remap = true;
+ ab->ce_remap_base_addr = HAL_IPQ5332_CE_WFSS_REG_BASE;
+ }
+
+ ab_ahb->xo_clk = devm_clk_get(ab->dev, "xo");
+ if (IS_ERR(ab_ahb->xo_clk)) {
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(ab_ahb->xo_clk),
+ "failed to get xo clock\n");
+ goto err_mem_ce_unmap;
+ }
+
+ ret = clk_prepare_enable(ab_ahb->xo_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable gcc_xo_clk: %d\n", ret);
+ goto err_clock_deinit;
+ }
+
+ return 0;
+
+err_clock_deinit:
+ devm_clk_put(ab->dev, ab_ahb->xo_clk);
+
+err_mem_ce_unmap:
+ ab_ahb->xo_clk = NULL;
+ if (ab->hw_params->ce_remap)
+ iounmap(ab->mem_ce);
+
+err_mem_unmap:
+ ab->mem_ce = NULL;
+ devm_iounmap(ab->dev, ab->mem);
+
+out:
+ ab->mem = NULL;
+ return ret;
+}
+
+static void ath12k_ahb_resource_deinit(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+
+ if (ab->mem)
+ devm_iounmap(ab->dev, ab->mem);
+
+ if (ab->mem_ce)
+ iounmap(ab->mem_ce);
+
+ ab->mem = NULL;
+ ab->mem_ce = NULL;
+
+ clk_disable_unprepare(ab_ahb->xo_clk);
+ devm_clk_put(ab->dev, ab_ahb->xo_clk);
+ ab_ahb->xo_clk = NULL;
+}
+
+static int ath12k_ahb_probe(struct platform_device *pdev)
+{
+ struct ath12k_base *ab;
+ const struct ath12k_hif_ops *hif_ops;
+ struct ath12k_ahb *ab_ahb;
+ enum ath12k_hw_rev hw_rev;
+ u32 addr, userpd_id;
+ int ret;
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to set 32-bit coherent dma\n");
+ return ret;
+ }
+
+ ab = ath12k_core_alloc(&pdev->dev, sizeof(struct ath12k_ahb),
+ ATH12K_BUS_AHB);
+ if (!ab)
+ return -ENOMEM;
+
+ hw_rev = (enum ath12k_hw_rev)(kernel_ulong_t)of_device_get_match_data(&pdev->dev);
+ switch (hw_rev) {
+ case ATH12K_HW_IPQ5332_HW10:
+ hif_ops = &ath12k_ahb_hif_ops_ipq5332;
+ userpd_id = ATH12K_IPQ5332_USERPD_ID;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto err_core_free;
+ }
+
+ ab->hif.ops = hif_ops;
+ ab->pdev = pdev;
+ ab->hw_rev = hw_rev;
+ ab->target_mem_mode = ATH12K_QMI_MEMORY_MODE_DEFAULT;
+ platform_set_drvdata(pdev, ab);
+ ab_ahb = ath12k_ab_to_ahb(ab);
+ ab_ahb->ab = ab;
+ ab_ahb->userpd_id = userpd_id;
+
+ /* Set fixed_mem_region to true for platforms that support fixed memory
+ * reservation from DT. If memory is reserved from DT for FW, ath12k driver
+ * need not to allocate memory.
+ */
+ if (!of_property_read_u32(ab->dev->of_node, "memory-region", &addr))
+ set_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags);
+
+ ret = ath12k_core_pre_init(ab);
+ if (ret)
+ goto err_core_free;
+
+ ret = ath12k_ahb_resource_init(ab);
+ if (ret)
+ goto err_core_free;
+
+ ret = ath12k_hal_srng_init(ab);
+ if (ret)
+ goto err_resource_deinit;
+
+ ret = ath12k_ce_alloc_pipes(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to allocate ce pipes: %d\n", ret);
+ goto err_hal_srng_deinit;
+ }
+
+ ath12k_ahb_init_qmi_ce_config(ab);
+
+ ret = ath12k_ahb_configure_rproc(ab);
+ if (ret)
+ goto err_ce_free;
+
+ ret = ath12k_ahb_config_irq(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to configure irq: %d\n", ret);
+ goto err_rproc_deconfigure;
+ }
+
+ ret = ath12k_core_init(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to init core: %d\n", ret);
+ goto err_rproc_deconfigure;
+ }
+
+ return 0;
+
+err_rproc_deconfigure:
+ ath12k_ahb_deconfigure_rproc(ab);
+
+err_ce_free:
+ ath12k_ce_free_pipes(ab);
+
+err_hal_srng_deinit:
+ ath12k_hal_srng_deinit(ab);
+
+err_resource_deinit:
+ ath12k_ahb_resource_deinit(ab);
+
+err_core_free:
+ ath12k_core_free(ab);
+ platform_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static void ath12k_ahb_remove_prepare(struct ath12k_base *ab)
+{
+ unsigned long left;
+
+ if (test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags)) {
+ left = wait_for_completion_timeout(&ab->driver_recovery,
+ ATH12K_AHB_RECOVERY_TIMEOUT);
+ if (!left)
+ ath12k_warn(ab, "failed to receive recovery response completion\n");
+ }
+
+ set_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags);
+ cancel_work_sync(&ab->restart_work);
+ cancel_work_sync(&ab->qmi.event_work);
+}
+
+static void ath12k_ahb_free_resources(struct ath12k_base *ab)
+{
+ struct platform_device *pdev = ab->pdev;
+
+ ath12k_hal_srng_deinit(ab);
+ ath12k_ce_free_pipes(ab);
+ ath12k_ahb_resource_deinit(ab);
+ ath12k_ahb_deconfigure_rproc(ab);
+ ath12k_core_free(ab);
+ platform_set_drvdata(pdev, NULL);
+}
+
+static void ath12k_ahb_remove(struct platform_device *pdev)
+{
+ struct ath12k_base *ab = platform_get_drvdata(pdev);
+
+ if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
+ ath12k_ahb_power_down(ab, false);
+ goto qmi_fail;
+ }
+
+ ath12k_ahb_remove_prepare(ab);
+ ath12k_core_hw_group_cleanup(ab->ag);
+qmi_fail:
+ ath12k_core_deinit(ab);
+ ath12k_ahb_free_resources(ab);
+}
+
+static struct platform_driver ath12k_ahb_driver = {
+ .driver = {
+ .name = "ath12k_ahb",
+ .of_match_table = ath12k_ahb_of_match,
+ },
+ .probe = ath12k_ahb_probe,
+ .remove = ath12k_ahb_remove,
+};
+
+int ath12k_ahb_init(void)
+{
+ return platform_driver_register(&ath12k_ahb_driver);
+}
+
+void ath12k_ahb_exit(void)
+{
+ platform_driver_unregister(&ath12k_ahb_driver);
+}
diff --git a/drivers/net/wireless/ath/ath12k/ahb.h b/drivers/net/wireless/ath/ath12k/ahb.h
new file mode 100644
index 000000000000..d56244b20a6a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/ahb.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2025, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef ATH12K_AHB_H
+#define ATH12K_AHB_H
+
+#include <linux/clk.h>
+#include <linux/remoteproc/qcom_rproc.h>
+#include "core.h"
+
+#define ATH12K_AHB_RECOVERY_TIMEOUT (3 * HZ)
+
+#define ATH12K_AHB_SMP2P_SMEM_MSG GENMASK(15, 0)
+#define ATH12K_AHB_SMP2P_SMEM_SEQ_NO GENMASK(31, 16)
+#define ATH12K_AHB_SMP2P_SMEM_VALUE_MASK 0xFFFFFFFF
+#define ATH12K_PCI_CE_WAKE_IRQ 2
+#define ATH12K_PCI_IRQ_CE0_OFFSET 3
+#define ATH12K_ROOTPD_READY_TIMEOUT (5 * HZ)
+#define ATH12K_RPROC_AFTER_POWERUP QCOM_SSR_AFTER_POWERUP
+#define ATH12K_AHB_FW_PREFIX "q6_fw"
+#define ATH12K_AHB_FW_SUFFIX ".mdt"
+#define ATH12K_AHB_FW2 "iu_fw.mdt"
+#define ATH12K_AHB_UPD_SWID 0x12
+#define ATH12K_USERPD_SPAWN_TIMEOUT (5 * HZ)
+#define ATH12K_USERPD_READY_TIMEOUT (10 * HZ)
+#define ATH12K_USERPD_STOP_TIMEOUT (5 * HZ)
+#define ATH12K_USERPD_ID_MASK GENMASK(9, 8)
+#define ATH12K_USERPD_FW_NAME_LEN 35
+
+enum ath12k_ahb_smp2p_msg_id {
+ ATH12K_AHB_POWER_SAVE_ENTER = 1,
+ ATH12K_AHB_POWER_SAVE_EXIT,
+};
+
+enum ath12k_ahb_userpd_irq {
+ ATH12K_USERPD_SPAWN_IRQ,
+ ATH12K_USERPD_READY_IRQ,
+ ATH12K_USERPD_STOP_ACK_IRQ,
+ ATH12K_USERPD_MAX_IRQ,
+};
+
+struct ath12k_base;
+
+struct ath12k_ahb {
+ struct ath12k_base *ab;
+ struct rproc *tgt_rproc;
+ struct clk *xo_clk;
+ struct completion rootpd_ready;
+ struct notifier_block root_pd_nb;
+ void *root_pd_notifier;
+ struct qcom_smem_state *spawn_state;
+ struct qcom_smem_state *stop_state;
+ struct completion userpd_spawned;
+ struct completion userpd_ready;
+ struct completion userpd_stopped;
+ u32 userpd_id;
+ u32 spawn_bit;
+ u32 stop_bit;
+ int userpd_irq_num[ATH12K_USERPD_MAX_IRQ];
+};
+
+static inline struct ath12k_ahb *ath12k_ab_to_ahb(struct ath12k_base *ab)
+{
+ return (struct ath12k_ahb *)ab->drv_priv;
+}
+
+#ifdef CONFIG_ATH12K_AHB
+int ath12k_ahb_init(void);
+void ath12k_ahb_exit(void);
+#else
+static inline int ath12k_ahb_init(void)
+{
+ return 0;
+}
+
+static inline void ath12k_ahb_exit(void) {};
+#endif
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/ce.c b/drivers/net/wireless/ath/ath12k/ce.c
index be0d669d31fc..9a63608838ac 100644
--- a/drivers/net/wireless/ath/ath12k/ce.c
+++ b/drivers/net/wireless/ath/ath12k/ce.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "dp_rx.h"
@@ -219,6 +219,96 @@ const struct ce_attr ath12k_host_ce_config_wcn7850[] = {
};
+const struct ce_attr ath12k_host_ce_config_ipq5332[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath12k_htc_rx_completion_handler,
+ },
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 128,
+ .recv_cb = ath12k_htc_rx_completion_handler,
+ },
+ /* CE3: host->target WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 2048,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+ /* CE5: target -> host PKTLOG */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath12k_dp_htt_htc_t2h_msg_handler,
+ },
+ /* CE6: Target autonomous HIF_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+ /* CE7: CV Prefetch */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+ /* CE8: Target HIF memcpy (Generic HIF memcypy) */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+ /* CE9: WMI logging/CFR/Spectral/Radar */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 128,
+ },
+ /* CE10: Unused */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+ /* CE11: Unused */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+};
+
static int ath12k_ce_rx_buf_enqueue_pipe(struct ath12k_ce_pipe *pipe,
struct sk_buff *skb, dma_addr_t paddr)
{
@@ -302,7 +392,8 @@ static int ath12k_ce_rx_post_pipe(struct ath12k_ce_pipe *pipe)
ret = ath12k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
if (ret) {
- ath12k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
+ ath12k_dbg(ab, ATH12K_DBG_CE, "failed to enqueue rx buf: %d\n",
+ ret);
dma_unmap_single(ab->dev, paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
@@ -344,10 +435,6 @@ static int ath12k_ce_completed_recv_next(struct ath12k_ce_pipe *pipe,
}
*nbytes = ath12k_hal_ce_dst_status_get_length(desc);
- if (*nbytes == 0) {
- ret = -EIO;
- goto err;
- }
*skb = pipe->dest_ring->skb[sw_index];
pipe->dest_ring->skb[sw_index] = NULL;
@@ -380,8 +467,8 @@ static void ath12k_ce_recv_process_cb(struct ath12k_ce_pipe *pipe)
dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
max_nbytes, DMA_FROM_DEVICE);
- if (unlikely(max_nbytes < nbytes)) {
- ath12k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
+ if (unlikely(max_nbytes < nbytes || nbytes == 0)) {
+ ath12k_warn(ab, "unexpected rx length (nbytes %d, max %d)",
nbytes, max_nbytes);
dev_kfree_skb_any(skb);
continue;
@@ -392,7 +479,7 @@ static void ath12k_ce_recv_process_cb(struct ath12k_ce_pipe *pipe)
}
while ((skb = __skb_dequeue(&list))) {
- ath12k_dbg(ab, ATH12K_DBG_AHB, "rx ce pipe %d len %d\n",
+ ath12k_dbg(ab, ATH12K_DBG_CE, "rx ce pipe %d len %d\n",
pipe->pipe_num, skb->len);
pipe->recv_cb(ab, skb);
}
@@ -492,7 +579,7 @@ static int ath12k_ce_init_ring(struct ath12k_base *ab,
struct ath12k_ce_ring *ce_ring,
int ce_id, enum hal_ring_type type)
{
- struct hal_srng_params params = { 0 };
+ struct hal_srng_params params = {};
int ret;
params.ring_base_paddr = ce_ring->base_addr_ce_space;
@@ -779,7 +866,7 @@ void ath12k_ce_rx_post_buf(struct ath12k_base *ab)
void ath12k_ce_rx_replenish_retry(struct timer_list *t)
{
- struct ath12k_base *ab = from_timer(ab, t, rx_replenish_retry);
+ struct ath12k_base *ab = timer_container_of(ab, t, rx_replenish_retry);
ath12k_ce_rx_post_buf(ab);
}
diff --git a/drivers/net/wireless/ath/ath12k/ce.h b/drivers/net/wireless/ath/ath12k/ce.h
index 857bc5f9e946..57f75899ee03 100644
--- a/drivers/net/wireless/ath/ath12k/ce.h
+++ b/drivers/net/wireless/ath/ath12k/ce.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_CE_H
@@ -39,8 +39,8 @@
#define PIPEDIR_INOUT_H2H 4 /* bidirectional, host to host */
/* CE address/mask */
-#define CE_HOST_IE_ADDRESS 0x00A1803C
-#define CE_HOST_IE_2_ADDRESS 0x00A18040
+#define CE_HOST_IE_ADDRESS 0x75804C
+#define CE_HOST_IE_2_ADDRESS 0x758050
#define CE_HOST_IE_3_ADDRESS CE_HOST_IE_ADDRESS
#define CE_HOST_IE_3_SHIFT 0xC
@@ -76,6 +76,17 @@ struct ce_pipe_config {
__le32 reserved;
};
+struct ce_ie_addr {
+ u32 ie1_reg_addr;
+ u32 ie2_reg_addr;
+ u32 ie3_reg_addr;
+};
+
+struct ce_remap {
+ u32 base;
+ u32 size;
+};
+
struct ce_attr {
/* CE_ATTR_* values */
unsigned int flags;
@@ -148,7 +159,7 @@ struct ath12k_ce_pipe {
void (*send_cb)(struct ath12k_ce_pipe *pipe);
void (*recv_cb)(struct ath12k_base *ab, struct sk_buff *skb);
- struct tasklet_struct intr_tq;
+ struct work_struct intr_wq;
struct ath12k_ce_ring *src_ring;
struct ath12k_ce_ring *dest_ring;
struct ath12k_ce_ring *status_ring;
@@ -164,6 +175,7 @@ struct ath12k_ce {
extern const struct ce_attr ath12k_host_ce_config_qcn9274[];
extern const struct ce_attr ath12k_host_ce_config_wcn7850[];
+extern const struct ce_attr ath12k_host_ce_config_ipq5332[];
void ath12k_ce_cleanup_pipes(struct ath12k_base *ab);
void ath12k_ce_rx_replenish_retry(struct timer_list *t);
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
index 51252e8bc1ae..cc352eef1939 100644
--- a/drivers/net/wireless/ath/ath12k/core.c
+++ b/drivers/net/wireless/ath/ath12k/core.c
@@ -1,27 +1,71 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/remoteproc.h>
#include <linux/firmware.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
+#include "ahb.h"
#include "core.h"
#include "dp_tx.h"
#include "dp_rx.h"
#include "debug.h"
-#include "hif.h"
-#include "fw.h"
#include "debugfs.h"
+#include "fw.h"
+#include "hif.h"
+#include "pci.h"
#include "wow.h"
+static int ahb_err, pci_err;
unsigned int ath12k_debug_mask;
module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
+bool ath12k_ftm_mode;
+module_param_named(ftm_mode, ath12k_ftm_mode, bool, 0444);
+MODULE_PARM_DESC(ftm_mode, "Boots up in factory test mode");
+
+/* protected with ath12k_hw_group_mutex */
+static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list);
+
+static DEFINE_MUTEX(ath12k_hw_group_mutex);
+
+static const struct
+ath12k_mem_profile_based_param ath12k_mem_profile_based_param[] = {
+[ATH12K_QMI_MEMORY_MODE_DEFAULT] = {
+ .num_vdevs = 17,
+ .max_client_single = 512,
+ .max_client_dbs = 128,
+ .max_client_dbs_sbs = 128,
+ .dp_params = {
+ .tx_comp_ring_size = 32768,
+ .rxdma_monitor_buf_ring_size = 4096,
+ .rxdma_monitor_dst_ring_size = 8092,
+ .num_pool_tx_desc = 32768,
+ .rx_desc_count = 12288,
+ },
+ },
+[ATH12K_QMI_MEMORY_MODE_LOW_512_M] = {
+ .num_vdevs = 9,
+ .max_client_single = 128,
+ .max_client_dbs = 64,
+ .max_client_dbs_sbs = 64,
+ .dp_params = {
+ .tx_comp_ring_size = 16384,
+ .rxdma_monitor_buf_ring_size = 256,
+ .rxdma_monitor_dst_ring_size = 512,
+ .num_pool_tx_desc = 16384,
+ .rx_desc_count = 6144,
+ },
+ },
+};
+
static int ath12k_core_rfkill_config(struct ath12k_base *ab)
{
struct ath12k *ar;
@@ -30,6 +74,9 @@ static int ath12k_core_rfkill_config(struct ath12k_base *ab)
if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL))
return 0;
+ if (ath12k_acpi_get_disable_rfkill(ab))
+ return 0;
+
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
@@ -79,11 +126,17 @@ int ath12k_core_suspend(struct ath12k_base *ab)
ar = ab->pdevs[i].ar;
if (!ar)
continue;
+
+ wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
+
ret = ath12k_mac_wait_tx_complete(ar);
if (ret) {
+ wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
ath12k_warn(ab, "failed to wait tx complete: %d\n", ret);
return ret;
}
+
+ wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
}
/* PM framework skips suspend_late/resume_early callbacks
@@ -161,10 +214,10 @@ EXPORT_SYMBOL(ath12k_core_resume);
static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
size_t name_len, bool with_variant,
- bool bus_type_mode)
+ bool bus_type_mode, bool with_default)
{
/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
- char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
+ char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = {};
if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
scnprintf(variant, sizeof(variant), ",variant=%s",
@@ -192,7 +245,9 @@ static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
"bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
ath12k_bus_str(ab->hif.bus),
ab->qmi.target.chip_id,
- ab->qmi.target.board_id, variant);
+ with_default ?
+ ATH12K_BOARD_ID_DEFAULT : ab->qmi.target.board_id,
+ variant);
break;
}
@@ -204,19 +259,19 @@ static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
size_t name_len)
{
- return __ath12k_core_create_board_name(ab, name, name_len, true, false);
+ return __ath12k_core_create_board_name(ab, name, name_len, true, false, false);
}
static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name,
size_t name_len)
{
- return __ath12k_core_create_board_name(ab, name, name_len, false, false);
+ return __ath12k_core_create_board_name(ab, name, name_len, false, false, true);
}
static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name,
size_t name_len)
{
- return __ath12k_core_create_board_name(ab, name, name_len, false, true);
+ return __ath12k_core_create_board_name(ab, name, name_len, false, true, true);
}
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
@@ -567,45 +622,100 @@ exit:
u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab)
{
if (ab->num_radios == 2)
- return TARGET_NUM_STATIONS_DBS;
- else if (ab->num_radios == 3)
- return TARGET_NUM_PEERS_PDEV_DBS_SBS;
- return TARGET_NUM_STATIONS_SINGLE;
+ return TARGET_NUM_STATIONS(ab, DBS);
+ if (ab->num_radios == 3)
+ return TARGET_NUM_STATIONS(ab, DBS_SBS);
+ return TARGET_NUM_STATIONS(ab, SINGLE);
}
u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab)
{
- if (ab->num_radios == 2)
- return TARGET_NUM_PEERS_PDEV_DBS;
- else if (ab->num_radios == 3)
- return TARGET_NUM_PEERS_PDEV_DBS_SBS;
- return TARGET_NUM_PEERS_PDEV_SINGLE;
+ return ath12k_core_get_max_station_per_radio(ab) + TARGET_NUM_VDEVS(ab);
}
-u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab)
+struct reserved_mem *ath12k_core_get_reserved_mem(struct ath12k_base *ab,
+ int index)
{
- if (ab->num_radios == 2)
- return TARGET_NUM_TIDS(DBS);
- else if (ab->num_radios == 3)
- return TARGET_NUM_TIDS(DBS_SBS);
- return TARGET_NUM_TIDS(SINGLE);
+ struct device *dev = ab->dev;
+ struct reserved_mem *rmem;
+ struct device_node *node;
+
+ node = of_parse_phandle(dev->of_node, "memory-region", index);
+ if (!node) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "failed to parse memory-region for index %d\n", index);
+ return NULL;
+ }
+
+ rmem = of_reserved_mem_lookup(node);
+ of_node_put(node);
+ if (!rmem) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "unable to get memory-region for index %d\n", index);
+ return NULL;
+ }
+
+ return rmem;
+}
+
+static inline
+void ath12k_core_to_group_ref_get(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag = ab->ag;
+
+ lockdep_assert_held(&ag->mutex);
+
+ if (ab->hw_group_ref) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already attached to group %d\n",
+ ag->id);
+ return;
+ }
+
+ ab->hw_group_ref = true;
+ ag->num_started++;
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "core attached to group %d, num_started %d\n",
+ ag->id, ag->num_started);
+}
+
+static inline
+void ath12k_core_to_group_ref_put(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag = ab->ag;
+
+ lockdep_assert_held(&ag->mutex);
+
+ if (!ab->hw_group_ref) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already de-attached from group %d\n",
+ ag->id);
+ return;
+ }
+
+ ab->hw_group_ref = false;
+ ag->num_started--;
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "core de-attached from group %d, num_started %d\n",
+ ag->id, ag->num_started);
}
static void ath12k_core_stop(struct ath12k_base *ab)
{
+ ath12k_core_to_group_ref_put(ab);
+
if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
ath12k_qmi_firmware_stop(ab);
ath12k_acpi_stop(ab);
+ ath12k_dp_rx_pdev_reo_cleanup(ab);
ath12k_hif_stop(ab);
ath12k_wmi_detach(ab);
- ath12k_dp_rx_pdev_reo_cleanup(ab);
+ ath12k_dp_free(ab);
/* De-Init of components as needed */
}
-static void ath12k_core_check_bdfext(const struct dmi_header *hdr, void *data)
+static void ath12k_core_check_cc_code_bdfext(const struct dmi_header *hdr, void *data)
{
struct ath12k_base *ab = data;
const char *magic = ATH12K_SMBIOS_BDF_EXT_MAGIC;
@@ -627,6 +737,28 @@ static void ath12k_core_check_bdfext(const struct dmi_header *hdr, void *data)
return;
}
+ spin_lock_bh(&ab->base_lock);
+
+ switch (smbios->country_code_flag) {
+ case ATH12K_SMBIOS_CC_ISO:
+ ab->new_alpha2[0] = u16_get_bits(smbios->cc_code >> 8, 0xff);
+ ab->new_alpha2[1] = u16_get_bits(smbios->cc_code, 0xff);
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios cc_code %c%c\n",
+ ab->new_alpha2[0], ab->new_alpha2[1]);
+ break;
+ case ATH12K_SMBIOS_CC_WW:
+ ab->new_alpha2[0] = '0';
+ ab->new_alpha2[1] = '0';
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios worldwide regdomain\n");
+ break;
+ default:
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot ignore smbios country code setting %d\n",
+ smbios->country_code_flag);
+ break;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+
if (!smbios->bdf_enabled) {
ath12k_dbg(ab, ATH12K_DBG_BOOT, "bdf variant name not found.\n");
return;
@@ -666,7 +798,7 @@ static void ath12k_core_check_bdfext(const struct dmi_header *hdr, void *data)
int ath12k_core_check_smbios(struct ath12k_base *ab)
{
ab->qmi.target.bdf_ext[0] = '\0';
- dmi_walk(ath12k_core_check_bdfext, ab);
+ dmi_walk(ath12k_core_check_cc_code_bdfext, ab);
if (ab->qmi.target.bdf_ext[0] == '\0')
return -ENODATA;
@@ -678,6 +810,11 @@ static int ath12k_core_soc_create(struct ath12k_base *ab)
{
int ret;
+ if (ath12k_ftm_mode) {
+ ab->fw_mode = ATH12K_FIRMWARE_MODE_FTM;
+ ath12k_info(ab, "Booting in ftm mode\n");
+ }
+
ret = ath12k_qmi_init_service(ab);
if (ret) {
ath12k_err(ab, "failed to initialize qmi :%d\n", ret);
@@ -692,6 +829,8 @@ static int ath12k_core_soc_create(struct ath12k_base *ab)
goto err_qmi_deinit;
}
+ ath12k_debugfs_pdev_create(ab);
+
return 0;
err_qmi_deinit:
@@ -702,7 +841,7 @@ err_qmi_deinit:
static void ath12k_core_soc_destroy(struct ath12k_base *ab)
{
- ath12k_dp_free(ab);
+ ath12k_hif_power_down(ab, false);
ath12k_reg_free(ab);
ath12k_debugfs_soc_destroy(ab);
ath12k_qmi_deinit_service(ab);
@@ -712,38 +851,26 @@ static int ath12k_core_pdev_create(struct ath12k_base *ab)
{
int ret;
- ret = ath12k_mac_register(ab);
- if (ret) {
- ath12k_err(ab, "failed register the radio with mac80211: %d\n", ret);
- return ret;
- }
-
ret = ath12k_dp_pdev_alloc(ab);
if (ret) {
ath12k_err(ab, "failed to attach DP pdev: %d\n", ret);
- goto err_mac_unregister;
+ return ret;
}
return 0;
-
-err_mac_unregister:
- ath12k_mac_unregister(ab);
-
- return ret;
}
static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
{
- ath12k_mac_unregister(ab);
- ath12k_hif_irq_disable(ab);
ath12k_dp_pdev_free(ab);
}
-static int ath12k_core_start(struct ath12k_base *ab,
- enum ath12k_firmware_mode mode)
+static int ath12k_core_start(struct ath12k_base *ab)
{
int ret;
+ lockdep_assert_held(&ab->core_lock);
+
ret = ath12k_wmi_attach(ab);
if (ret) {
ath12k_err(ab, "failed to attach wmi: %d\n", ret);
@@ -793,19 +920,12 @@ static int ath12k_core_start(struct ath12k_base *ab,
goto err_hif_stop;
}
- ret = ath12k_mac_allocate(ab);
- if (ret) {
- ath12k_err(ab, "failed to create new hw device with mac80211 :%d\n",
- ret);
- goto err_hif_stop;
- }
-
ath12k_dp_cc_config(ab);
ret = ath12k_dp_rx_pdev_reo_setup(ab);
if (ret) {
ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
- goto err_mac_destroy;
+ goto err_hif_stop;
}
ath12k_dp_hal_rx_desc_init(ab);
@@ -839,17 +959,15 @@ static int ath12k_core_start(struct ath12k_base *ab,
goto err_reo_cleanup;
}
- ret = ath12k_acpi_start(ab);
- if (ret)
- /* ACPI is optional so continue in case of an error */
- ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi failed: %d\n", ret);
+ ath12k_acpi_set_dsm_func(ab);
+
+ /* Indicate the core start in the appropriate group */
+ ath12k_core_to_group_ref_get(ab);
return 0;
err_reo_cleanup:
ath12k_dp_rx_pdev_reo_cleanup(ab);
-err_mac_destroy:
- ath12k_mac_destroy(ab);
err_hif_stop:
ath12k_hif_stop(ab);
err_wmi_detach:
@@ -857,6 +975,204 @@ err_wmi_detach:
return ret;
}
+static void ath12k_core_device_cleanup(struct ath12k_base *ab)
+{
+ mutex_lock(&ab->core_lock);
+
+ ath12k_hif_irq_disable(ab);
+ ath12k_core_pdev_destroy(ab);
+
+ mutex_unlock(&ab->core_lock);
+}
+
+static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ lockdep_assert_held(&ag->mutex);
+
+ clear_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
+
+ ath12k_mac_unregister(ag);
+
+ for (i = ag->num_devices - 1; i >= 0; i--) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ clear_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
+
+ ath12k_core_device_cleanup(ab);
+ }
+
+ ath12k_mac_destroy(ag);
+}
+
+u8 ath12k_get_num_partner_link(struct ath12k *ar)
+{
+ struct ath12k_base *partner_ab, *ab = ar->ab;
+ struct ath12k_hw_group *ag = ab->ag;
+ struct ath12k_pdev *pdev;
+ u8 num_link = 0;
+ int i, j;
+
+ lockdep_assert_held(&ag->mutex);
+
+ for (i = 0; i < ag->num_devices; i++) {
+ partner_ab = ag->ab[i];
+
+ for (j = 0; j < partner_ab->num_radios; j++) {
+ pdev = &partner_ab->pdevs[j];
+
+ /* Avoid the self link */
+ if (ar == pdev->ar)
+ continue;
+
+ num_link++;
+ }
+ }
+
+ return num_link;
+}
+
+static int __ath12k_mac_mlo_ready(struct ath12k *ar)
+{
+ u8 num_link = ath12k_get_num_partner_link(ar);
+ int ret;
+
+ if (num_link == 0)
+ return 0;
+
+ ret = ath12k_wmi_mlo_ready(ar);
+ if (ret) {
+ ath12k_err(ar->ab, "MLO ready failed for pdev %d: %d\n",
+ ar->pdev_idx, ret);
+ return ret;
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mlo ready done for pdev %d\n",
+ ar->pdev_idx);
+
+ return 0;
+}
+
+int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag)
+{
+ struct ath12k_hw *ah;
+ struct ath12k *ar;
+ int ret;
+ int i, j;
+
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ag->ah[i];
+ if (!ah)
+ continue;
+
+ for_each_ar(ah, ar, j) {
+ ar = &ah->radio[j];
+ ret = __ath12k_mac_mlo_ready(ar);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_core_mlo_setup(struct ath12k_hw_group *ag)
+{
+ int ret, i;
+
+ if (!ag->mlo_capable)
+ return 0;
+
+ ret = ath12k_mac_mlo_setup(ag);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ag->num_devices; i++)
+ ath12k_dp_partner_cc_init(ag->ab[i]);
+
+ ret = ath12k_mac_mlo_ready(ag);
+ if (ret)
+ goto err_mlo_teardown;
+
+ return 0;
+
+err_mlo_teardown:
+ ath12k_mac_mlo_teardown(ag);
+
+ return ret;
+}
+
+static int ath12k_core_hw_group_start(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int ret, i;
+
+ lockdep_assert_held(&ag->mutex);
+
+ if (test_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags))
+ goto core_pdev_create;
+
+ ret = ath12k_mac_allocate(ag);
+ if (WARN_ON(ret))
+ return ret;
+
+ ret = ath12k_core_mlo_setup(ag);
+ if (WARN_ON(ret))
+ goto err_mac_destroy;
+
+ ret = ath12k_mac_register(ag);
+ if (WARN_ON(ret))
+ goto err_mlo_teardown;
+
+ set_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
+
+core_pdev_create:
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ mutex_lock(&ab->core_lock);
+
+ set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
+
+ ret = ath12k_core_pdev_create(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to create pdev core %d\n", ret);
+ mutex_unlock(&ab->core_lock);
+ goto err;
+ }
+
+ ath12k_hif_irq_enable(ab);
+
+ ret = ath12k_core_rfkill_config(ab);
+ if (ret && ret != -EOPNOTSUPP) {
+ mutex_unlock(&ab->core_lock);
+ goto err;
+ }
+
+ mutex_unlock(&ab->core_lock);
+ }
+
+ return 0;
+
+err:
+ ath12k_core_hw_group_stop(ag);
+ return ret;
+
+err_mlo_teardown:
+ ath12k_mac_mlo_teardown(ag);
+
+err_mac_destroy:
+ ath12k_mac_destroy(ag);
+
+ return ret;
+}
+
static int ath12k_core_start_firmware(struct ath12k_base *ab,
enum ath12k_firmware_mode mode)
{
@@ -874,11 +1190,93 @@ static int ath12k_core_start_firmware(struct ath12k_base *ab,
return ret;
}
+static inline
+bool ath12k_core_hw_group_start_ready(struct ath12k_hw_group *ag)
+{
+ lockdep_assert_held(&ag->mutex);
+
+ return (ag->num_started == ag->num_devices);
+}
+
+static void ath12k_fw_stats_pdevs_free(struct list_head *head)
+{
+ struct ath12k_fw_stats_pdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+void ath12k_fw_stats_bcn_free(struct list_head *head)
+{
+ struct ath12k_fw_stats_bcn *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath12k_fw_stats_vdevs_free(struct list_head *head)
+{
+ struct ath12k_fw_stats_vdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+void ath12k_fw_stats_init(struct ath12k *ar)
+{
+ INIT_LIST_HEAD(&ar->fw_stats.vdevs);
+ INIT_LIST_HEAD(&ar->fw_stats.pdevs);
+ INIT_LIST_HEAD(&ar->fw_stats.bcn);
+ init_completion(&ar->fw_stats_complete);
+ init_completion(&ar->fw_stats_done);
+}
+
+void ath12k_fw_stats_free(struct ath12k_fw_stats *stats)
+{
+ ath12k_fw_stats_pdevs_free(&stats->pdevs);
+ ath12k_fw_stats_vdevs_free(&stats->vdevs);
+ ath12k_fw_stats_bcn_free(&stats->bcn);
+}
+
+void ath12k_fw_stats_reset(struct ath12k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ ath12k_fw_stats_free(&ar->fw_stats);
+ ar->fw_stats.num_vdev_recvd = 0;
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath12k_core_trigger_partner(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag = ab->ag;
+ struct ath12k_base *partner_ab;
+ bool found = false;
+ int i;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ partner_ab = ag->ab[i];
+ if (!partner_ab)
+ continue;
+
+ if (found)
+ ath12k_qmi_trigger_host_cap(partner_ab);
+
+ found = (partner_ab == ab);
+ }
+}
+
int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
{
- int ret;
+ struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
+ int ret, i;
- ret = ath12k_core_start_firmware(ab, ATH12K_FIRMWARE_MODE_NORMAL);
+ ret = ath12k_core_start_firmware(ab, ab->fw_mode);
if (ret) {
ath12k_err(ab, "failed to start firmware: %d\n", ret);
return ret;
@@ -896,47 +1294,60 @@ int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
goto err_firmware_stop;
}
+ mutex_lock(&ag->mutex);
mutex_lock(&ab->core_lock);
- ret = ath12k_core_start(ab, ATH12K_FIRMWARE_MODE_NORMAL);
+
+ ret = ath12k_core_start(ab);
if (ret) {
ath12k_err(ab, "failed to start core: %d\n", ret);
goto err_dp_free;
}
- ret = ath12k_core_pdev_create(ab);
- if (ret) {
- ath12k_err(ab, "failed to create pdev core: %d\n", ret);
- goto err_core_stop;
- }
- ath12k_hif_irq_enable(ab);
+ mutex_unlock(&ab->core_lock);
- ret = ath12k_core_rfkill_config(ab);
- if (ret && ret != -EOPNOTSUPP) {
- ath12k_err(ab, "failed to config rfkill: %d\n", ret);
- goto err_core_pdev_destroy;
+ if (ath12k_core_hw_group_start_ready(ag)) {
+ ret = ath12k_core_hw_group_start(ag);
+ if (ret) {
+ ath12k_warn(ab, "unable to start hw group\n");
+ goto err_core_stop;
+ }
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "group %d started\n", ag->id);
+ } else {
+ ath12k_core_trigger_partner(ab);
}
- mutex_unlock(&ab->core_lock);
+ mutex_unlock(&ag->mutex);
return 0;
-err_core_pdev_destroy:
- ath12k_core_pdev_destroy(ab);
err_core_stop:
- ath12k_core_stop(ab);
- ath12k_mac_destroy(ab);
+ for (i = ag->num_devices - 1; i >= 0; i--) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ mutex_lock(&ab->core_lock);
+ ath12k_core_stop(ab);
+ mutex_unlock(&ab->core_lock);
+ }
+ mutex_unlock(&ag->mutex);
+ goto exit;
+
err_dp_free:
ath12k_dp_free(ab);
mutex_unlock(&ab->core_lock);
+ mutex_unlock(&ag->mutex);
+
err_firmware_stop:
ath12k_qmi_firmware_stop(ab);
+exit:
return ret;
}
static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab)
{
- int ret;
+ int ret, total_vdev;
mutex_lock(&ab->core_lock);
ath12k_dp_pdev_free(ab);
@@ -947,8 +1358,8 @@ static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab)
ath12k_dp_free(ab);
ath12k_hal_srng_deinit(ab);
-
- ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+ total_vdev = ab->num_radios * TARGET_NUM_VDEVS(ab);
+ ab->free_vdev_map = (1LL << total_vdev) - 1;
ret = ath12k_hal_srng_init(ab);
if (ret)
@@ -972,6 +1383,7 @@ err_hal_srng_deinit:
static void ath12k_rfkill_work(struct work_struct *work)
{
struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work);
+ struct ath12k_hw_group *ag = ab->ag;
struct ath12k *ar;
struct ath12k_hw *ah;
struct ieee80211_hw *hw;
@@ -982,8 +1394,8 @@ static void ath12k_rfkill_work(struct work_struct *work)
rfkill_radio_on = ab->rfkill_radio_on;
spin_unlock_bh(&ab->base_lock);
- for (i = 0; i < ab->num_hw; i++) {
- ah = ab->ah[i];
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ath12k_ag_to_ah(ag, i);
if (!ah)
continue;
@@ -1002,9 +1414,10 @@ static void ath12k_rfkill_work(struct work_struct *work)
void ath12k_core_halt(struct ath12k *ar)
{
+ struct list_head *pos, *n;
struct ath12k_base *ab = ar->ab;
- lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
ar->num_created_vdevs = 0;
ar->allocated_vdev_map = 0;
@@ -1013,16 +1426,24 @@ void ath12k_core_halt(struct ath12k *ar)
ath12k_mac_peer_cleanup_all(ar);
cancel_delayed_work_sync(&ar->scan.timeout);
cancel_work_sync(&ar->regd_update_work);
+ cancel_work_sync(&ar->regd_channel_update_work);
cancel_work_sync(&ab->rfkill_work);
+ cancel_work_sync(&ab->update_11d_work);
rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
synchronize_rcu();
- INIT_LIST_HEAD(&ar->arvifs);
+
+ spin_lock_bh(&ar->data_lock);
+ list_for_each_safe(pos, n, &ar->arvifs)
+ list_del_init(pos);
+ spin_unlock_bh(&ar->data_lock);
+
idr_init(&ar->txmgmt_idr);
}
static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
{
+ struct ath12k_hw_group *ag = ab->ag;
struct ath12k *ar;
struct ath12k_hw *ah;
int i, j;
@@ -1034,19 +1455,34 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
if (ab->is_reset)
set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
- for (i = 0; i < ab->num_hw; i++) {
- ah = ab->ah[i];
- if (!ah || ah->state == ATH12K_HW_STATE_OFF)
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ath12k_ag_to_ah(ag, i);
+ if (!ah || ah->state == ATH12K_HW_STATE_OFF ||
+ ah->state == ATH12K_HW_STATE_TM)
continue;
+ wiphy_lock(ah->hw->wiphy);
+
+ /* If queue 0 is stopped, it is safe to assume that all
+ * other queues are stopped by driver via
+ * ieee80211_stop_queues() below. This means, there is
+ * no need to stop it again and hence continue
+ */
+ if (ieee80211_queue_stopped(ah->hw, 0)) {
+ wiphy_unlock(ah->hw->wiphy);
+ continue;
+ }
+
ieee80211_stop_queues(ah->hw);
for (j = 0; j < ah->num_radio; j++) {
ar = &ah->radio[j];
ath12k_mac_drain_tx(ar);
+ ar->state_11d = ATH12K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
complete(&ar->scan.started);
- complete(&ar->scan.completed);
+ complete_all(&ar->scan.completed);
complete(&ar->scan.on_channel);
complete(&ar->peer_assoc_done);
complete(&ar->peer_delete_done);
@@ -1054,30 +1490,70 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
complete(&ar->vdev_setup_done);
complete(&ar->vdev_delete_done);
complete(&ar->bss_survey_done);
+ complete_all(&ar->regd_update_completed);
wake_up(&ar->dp.tx_empty_waitq);
idr_for_each(&ar->txmgmt_idr,
ath12k_mac_tx_mgmt_pending_free, ar);
idr_destroy(&ar->txmgmt_idr);
wake_up(&ar->txmgmt_empty_waitq);
+
+ ar->monitor_vdev_id = -1;
+ ar->monitor_vdev_created = false;
+ ar->monitor_started = false;
}
+
+ wiphy_unlock(ah->hw->wiphy);
}
wake_up(&ab->wmi_ab.tx_credits_wq);
wake_up(&ab->peer_mapping_wq);
}
+static void ath12k_update_11d(struct work_struct *work)
+{
+ struct ath12k_base *ab = container_of(work, struct ath12k_base, update_11d_work);
+ struct ath12k *ar;
+ struct ath12k_pdev *pdev;
+ struct wmi_set_current_country_arg arg = {};
+ int ret, i;
+
+ spin_lock_bh(&ab->base_lock);
+ memcpy(&arg.alpha2, &ab->new_alpha2, 2);
+ spin_unlock_bh(&ab->base_lock);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "update 11d new cc %c%c\n",
+ arg.alpha2[0], arg.alpha2[1]);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+
+ memcpy(&ar->alpha2, &arg.alpha2, 2);
+
+ reinit_completion(&ar->regd_update_completed);
+
+ ret = ath12k_wmi_send_set_current_country_cmd(ar, &arg);
+ if (ret)
+ ath12k_warn(ar->ab,
+ "pdev id %d failed set current country code: %d\n",
+ i, ret);
+ }
+}
+
static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
{
+ struct ath12k_hw_group *ag = ab->ag;
struct ath12k_hw *ah;
struct ath12k *ar;
int i, j;
- for (i = 0; i < ab->num_hw; i++) {
- ah = ab->ah[i];
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ath12k_ag_to_ah(ag, i);
if (!ah || ah->state == ATH12K_HW_STATE_OFF)
continue;
+ wiphy_lock(ah->hw->wiphy);
mutex_lock(&ah->hw_mutex);
switch (ah->state) {
@@ -1086,10 +1562,7 @@ static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
for (j = 0; j < ah->num_radio; j++) {
ar = &ah->radio[j];
-
- mutex_lock(&ar->conf_mutex);
ath12k_core_halt(ar);
- mutex_unlock(&ar->conf_mutex);
}
break;
@@ -1107,9 +1580,13 @@ static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
ath12k_warn(ab,
"device is wedged, will not restart hw %d\n", i);
break;
+ case ATH12K_HW_STATE_TM:
+ ath12k_warn(ab, "fw mode reset done radio %d\n", i);
+ break;
}
mutex_unlock(&ah->hw_mutex);
+ wiphy_unlock(ah->hw->wiphy);
}
complete(&ab->driver_recovery);
@@ -1118,6 +1595,7 @@ static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
static void ath12k_core_restart(struct work_struct *work)
{
struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
+ struct ath12k_hw_group *ag = ab->ag;
struct ath12k_hw *ah;
int ret, i;
@@ -1128,22 +1606,41 @@ static void ath12k_core_restart(struct work_struct *work)
}
if (ab->is_reset) {
- for (i = 0; i < ab->num_hw; i++) {
- ah = ab->ah[i];
+ if (!test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
+ atomic_dec(&ab->reset_count);
+ complete(&ab->reset_complete);
+ ab->is_reset = false;
+ atomic_set(&ab->fail_cont_count, 0);
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n");
+ }
+
+ mutex_lock(&ag->mutex);
+
+ if (!ath12k_core_hw_group_start_ready(ag)) {
+ mutex_unlock(&ag->mutex);
+ goto exit_restart;
+ }
+
+ for (i = 0; i < ag->num_hw; i++) {
+ ah = ath12k_ag_to_ah(ag, i);
ieee80211_restart_hw(ah->hw);
}
+
+ mutex_unlock(&ag->mutex);
}
+exit_restart:
complete(&ab->restart_completed);
}
static void ath12k_core_reset(struct work_struct *work)
{
struct ath12k_base *ab = container_of(work, struct ath12k_base, reset_work);
- int reset_count, fail_cont_count;
+ struct ath12k_hw_group *ag = ab->ag;
+ int reset_count, fail_cont_count, i;
long time_left;
- if (!(test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))) {
+ if (!(test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags))) {
ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
return;
}
@@ -1188,6 +1685,7 @@ static void ath12k_core_reset(struct work_struct *work)
ab->is_reset = true;
atomic_set(&ab->recovery_count, 0);
+ ath12k_coredump_collect(ab);
ath12k_core_pre_reconfigure_recovery(ab);
ath12k_core_post_reconfigure_recovery(ab);
@@ -1198,13 +1696,53 @@ static void ath12k_core_reset(struct work_struct *work)
ath12k_hif_ce_irq_disable(ab);
ath12k_hif_power_down(ab, false);
- ath12k_hif_power_up(ab);
- ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
+ /* prepare for power up */
+ ab->qmi.num_radios = U8_MAX;
+
+ mutex_lock(&ag->mutex);
+ ath12k_core_to_group_ref_put(ab);
+
+ if (ag->num_started > 0) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "waiting for %d partner device(s) to reset\n",
+ ag->num_started);
+ mutex_unlock(&ag->mutex);
+ return;
+ }
+
+ /* Prepare MLO global memory region for power up */
+ ath12k_qmi_reset_mlo_mem(ag);
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ ath12k_hif_power_up(ab);
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
+ }
+
+ mutex_unlock(&ag->mutex);
+}
+
+enum ath12k_qmi_mem_mode ath12k_core_get_memory_mode(struct ath12k_base *ab)
+{
+ unsigned long total_ram;
+ struct sysinfo si;
+
+ si_meminfo(&si);
+ total_ram = si.totalram * si.mem_unit;
+
+ if (total_ram < SZ_512M)
+ return ATH12K_QMI_MEMORY_MODE_LOW_512_M;
+
+ return ATH12K_QMI_MEMORY_MODE_DEFAULT;
}
int ath12k_core_pre_init(struct ath12k_base *ab)
{
+ const struct ath12k_mem_profile_based_param *param;
int ret;
ret = ath12k_hw_init(ab);
@@ -1213,6 +1751,8 @@ int ath12k_core_pre_init(struct ath12k_base *ab)
return ret;
}
+ param = &ath12k_mem_profile_based_param[ab->target_mem_mode];
+ ab->profile_param = param;
ath12k_fw_map(ab);
return 0;
@@ -1241,38 +1781,445 @@ static void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab)
&ab->panic_nb);
}
-int ath12k_core_init(struct ath12k_base *ab)
+static inline
+bool ath12k_core_hw_group_create_ready(struct ath12k_hw_group *ag)
{
- int ret;
+ lockdep_assert_held(&ag->mutex);
- ret = ath12k_core_soc_create(ab);
- if (ret) {
- ath12k_err(ab, "failed to create soc core: %d\n", ret);
- return ret;
+ return (ag->num_probed == ag->num_devices);
+}
+
+static struct ath12k_hw_group *ath12k_core_hw_group_alloc(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag;
+ int count = 0;
+
+ lockdep_assert_held(&ath12k_hw_group_mutex);
+
+ list_for_each_entry(ag, &ath12k_hw_group_list, list)
+ count++;
+
+ ag = kzalloc(sizeof(*ag), GFP_KERNEL);
+ if (!ag)
+ return NULL;
+
+ ag->id = count;
+ list_add(&ag->list, &ath12k_hw_group_list);
+ mutex_init(&ag->mutex);
+ ag->mlo_capable = false;
+
+ return ag;
+}
+
+static void ath12k_core_hw_group_free(struct ath12k_hw_group *ag)
+{
+ mutex_lock(&ath12k_hw_group_mutex);
+
+ list_del(&ag->list);
+ kfree(ag);
+
+ mutex_unlock(&ath12k_hw_group_mutex);
+}
+
+static struct ath12k_hw_group *ath12k_core_hw_group_find_by_dt(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag;
+ int i;
+
+ if (!ab->dev->of_node)
+ return NULL;
+
+ list_for_each_entry(ag, &ath12k_hw_group_list, list)
+ for (i = 0; i < ag->num_devices; i++)
+ if (ag->wsi_node[i] == ab->dev->of_node)
+ return ag;
+
+ return NULL;
+}
+
+static int ath12k_core_get_wsi_info(struct ath12k_hw_group *ag,
+ struct ath12k_base *ab)
+{
+ struct device_node *wsi_dev = ab->dev->of_node, *next_wsi_dev;
+ struct device_node *tx_endpoint, *next_rx_endpoint;
+ int device_count = 0;
+
+ next_wsi_dev = wsi_dev;
+
+ if (!next_wsi_dev)
+ return -ENODEV;
+
+ do {
+ ag->wsi_node[device_count] = next_wsi_dev;
+
+ tx_endpoint = of_graph_get_endpoint_by_regs(next_wsi_dev, 0, -1);
+ if (!tx_endpoint) {
+ of_node_put(next_wsi_dev);
+ return -ENODEV;
+ }
+
+ next_rx_endpoint = of_graph_get_remote_endpoint(tx_endpoint);
+ if (!next_rx_endpoint) {
+ of_node_put(next_wsi_dev);
+ of_node_put(tx_endpoint);
+ return -ENODEV;
+ }
+
+ of_node_put(tx_endpoint);
+ of_node_put(next_wsi_dev);
+
+ next_wsi_dev = of_graph_get_port_parent(next_rx_endpoint);
+ if (!next_wsi_dev) {
+ of_node_put(next_rx_endpoint);
+ return -ENODEV;
+ }
+
+ of_node_put(next_rx_endpoint);
+
+ device_count++;
+ if (device_count > ATH12K_MAX_DEVICES) {
+ ath12k_warn(ab, "device count in DT %d is more than limit %d\n",
+ device_count, ATH12K_MAX_DEVICES);
+ of_node_put(next_wsi_dev);
+ return -EINVAL;
+ }
+ } while (wsi_dev != next_wsi_dev);
+
+ of_node_put(next_wsi_dev);
+ ag->num_devices = device_count;
+
+ return 0;
+}
+
+static int ath12k_core_get_wsi_index(struct ath12k_hw_group *ag,
+ struct ath12k_base *ab)
+{
+ int i, wsi_controller_index = -1, node_index = -1;
+ bool control;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ control = of_property_read_bool(ag->wsi_node[i], "qcom,wsi-controller");
+ if (control)
+ wsi_controller_index = i;
+
+ if (ag->wsi_node[i] == ab->dev->of_node)
+ node_index = i;
}
+ if (wsi_controller_index == -1) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi controller is not defined in dt");
+ return -EINVAL;
+ }
+
+ if (node_index == -1) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "unable to get WSI node index");
+ return -EINVAL;
+ }
+
+ ab->wsi_info.index = (ag->num_devices + node_index - wsi_controller_index) %
+ ag->num_devices;
+
+ return 0;
+}
+
+static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *ab)
+{
+ struct ath12k_wsi_info *wsi = &ab->wsi_info;
+ struct ath12k_hw_group *ag;
+
+ lockdep_assert_held(&ath12k_hw_group_mutex);
+
+ if (ath12k_ftm_mode)
+ goto invalid_group;
+
+ /* The grouping of multiple devices will be done based on device tree file.
+ * The platforms that do not have any valid group information would have
+ * each device to be part of its own invalid group.
+ *
+ * We use group id ATH12K_INVALID_GROUP_ID for single device group
+ * which didn't have dt entry or wrong dt entry, there could be many
+ * groups with same group id, i.e ATH12K_INVALID_GROUP_ID. So
+ * default group id of ATH12K_INVALID_GROUP_ID combined with
+ * num devices in ath12k_hw_group determines if the group is
+ * multi device or single device group
+ */
+
+ ag = ath12k_core_hw_group_find_by_dt(ab);
+ if (!ag) {
+ ag = ath12k_core_hw_group_alloc(ab);
+ if (!ag) {
+ ath12k_warn(ab, "unable to create new hw group\n");
+ return NULL;
+ }
+
+ if (ath12k_core_get_wsi_info(ag, ab) ||
+ ath12k_core_get_wsi_index(ag, ab)) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "unable to get wsi info from dt, grouping single device");
+ ag->id = ATH12K_INVALID_GROUP_ID;
+ ag->num_devices = 1;
+ memset(ag->wsi_node, 0, sizeof(ag->wsi_node));
+ wsi->index = 0;
+ }
+
+ goto exit;
+ } else if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "group id %d in unregister state\n",
+ ag->id);
+ goto invalid_group;
+ } else {
+ if (ath12k_core_get_wsi_index(ag, ab))
+ goto invalid_group;
+ goto exit;
+ }
+
+invalid_group:
+ ag = ath12k_core_hw_group_alloc(ab);
+ if (!ag) {
+ ath12k_warn(ab, "unable to create new hw group\n");
+ return NULL;
+ }
+
+ ag->id = ATH12K_INVALID_GROUP_ID;
+ ag->num_devices = 1;
+ wsi->index = 0;
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "single device added to hardware group\n");
+
+exit:
+ if (ag->num_probed >= ag->num_devices) {
+ ath12k_warn(ab, "unable to add new device to group, max limit reached\n");
+ goto invalid_group;
+ }
+
+ ab->device_id = ag->num_probed++;
+ ag->ab[ab->device_id] = ab;
+ ab->ag = ag;
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi group-id %d num-devices %d index %d",
+ ag->id, ag->num_devices, wsi->index);
+
+ return ag;
+}
+
+void ath12k_core_hw_group_unassign(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
+ u8 device_id = ab->device_id;
+ int num_probed;
+
+ if (!ag)
+ return;
+
+ mutex_lock(&ag->mutex);
+
+ if (WARN_ON(device_id >= ag->num_devices)) {
+ mutex_unlock(&ag->mutex);
+ return;
+ }
+
+ if (WARN_ON(ag->ab[device_id] != ab)) {
+ mutex_unlock(&ag->mutex);
+ return;
+ }
+
+ ag->ab[device_id] = NULL;
+ ab->ag = NULL;
+ ab->device_id = ATH12K_INVALID_DEVICE_ID;
+
+ if (ag->num_probed)
+ ag->num_probed--;
+
+ num_probed = ag->num_probed;
+
+ mutex_unlock(&ag->mutex);
+
+ if (!num_probed)
+ ath12k_core_hw_group_free(ag);
+}
+
+static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ if (WARN_ON(!ag))
+ return;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ ath12k_core_soc_destroy(ab);
+ }
+}
+
+void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ if (!ag)
+ return;
+
+ mutex_lock(&ag->mutex);
+
+ if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
+ mutex_unlock(&ag->mutex);
+ return;
+ }
+
+ set_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags);
+
+ ath12k_core_hw_group_stop(ag);
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ mutex_lock(&ab->core_lock);
+ ath12k_core_stop(ab);
+ mutex_unlock(&ab->core_lock);
+ }
+
+ mutex_unlock(&ag->mutex);
+}
+
+static int ath12k_core_hw_group_create(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i, ret;
+
+ lockdep_assert_held(&ag->mutex);
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ mutex_lock(&ab->core_lock);
+
+ ret = ath12k_core_soc_create(ab);
+ if (ret) {
+ mutex_unlock(&ab->core_lock);
+ ath12k_err(ab, "failed to create soc %d core: %d\n", i, ret);
+ goto destroy;
+ }
+
+ mutex_unlock(&ab->core_lock);
+ }
+
+ return 0;
+
+destroy:
+ for (i--; i >= 0; i--) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ mutex_lock(&ab->core_lock);
+ ath12k_core_soc_destroy(ab);
+ mutex_unlock(&ab->core_lock);
+ }
+
+ return ret;
+}
+
+void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ if (ath12k_ftm_mode)
+ return;
+
+ lockdep_assert_held(&ag->mutex);
+
+ if (ag->num_devices == 1) {
+ ab = ag->ab[0];
+ /* QCN9274 firmware uses firmware IE for MLO advertisement */
+ if (ab->fw.fw_features_valid) {
+ ag->mlo_capable =
+ ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO);
+ return;
+ }
+
+ /* while WCN7850 firmware uses QMI single_chip_mlo_support bit */
+ ag->mlo_capable = ab->single_chip_mlo_support;
+ return;
+ }
+
+ ag->mlo_capable = true;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ /* even if 1 device's firmware feature indicates MLO
+ * unsupported, make MLO unsupported for the whole group
+ */
+ if (!ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO)) {
+ ag->mlo_capable = false;
+ return;
+ }
+ }
+}
+
+int ath12k_core_init(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag;
+ int ret;
+
ret = ath12k_core_panic_notifier_register(ab);
if (ret)
ath12k_warn(ab, "failed to register panic handler: %d\n", ret);
+ mutex_lock(&ath12k_hw_group_mutex);
+
+ ag = ath12k_core_hw_group_assign(ab);
+ if (!ag) {
+ mutex_unlock(&ath12k_hw_group_mutex);
+ ath12k_warn(ab, "unable to get hw group\n");
+ ret = -ENODEV;
+ goto err_unregister_notifier;
+ }
+
+ mutex_unlock(&ath12k_hw_group_mutex);
+
+ mutex_lock(&ag->mutex);
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "num devices %d num probed %d\n",
+ ag->num_devices, ag->num_probed);
+
+ if (ath12k_core_hw_group_create_ready(ag)) {
+ ret = ath12k_core_hw_group_create(ag);
+ if (ret) {
+ mutex_unlock(&ag->mutex);
+ ath12k_warn(ab, "unable to create hw group\n");
+ goto err_unassign_hw_group;
+ }
+ }
+
+ mutex_unlock(&ag->mutex);
+
return 0;
+
+err_unassign_hw_group:
+ ath12k_core_hw_group_unassign(ab);
+err_unregister_notifier:
+ ath12k_core_panic_notifier_unregister(ab);
+
+ return ret;
}
void ath12k_core_deinit(struct ath12k_base *ab)
{
+ ath12k_core_hw_group_destroy(ab->ag);
+ ath12k_core_hw_group_unassign(ab);
ath12k_core_panic_notifier_unregister(ab);
-
- mutex_lock(&ab->core_lock);
-
- ath12k_core_pdev_destroy(ab);
- ath12k_core_stop(ab);
-
- mutex_unlock(&ab->core_lock);
-
- ath12k_hif_power_down(ab, false);
- ath12k_mac_destroy(ab);
- ath12k_core_soc_destroy(ab);
- ath12k_fw_unmap(ab);
}
void ath12k_core_free(struct ath12k_base *ab)
@@ -1312,6 +2259,8 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
INIT_WORK(&ab->restart_work, ath12k_core_restart);
INIT_WORK(&ab->reset_work, ath12k_core_reset);
INIT_WORK(&ab->rfkill_work, ath12k_rfkill_work);
+ INIT_WORK(&ab->dump_work, ath12k_coredump_upload);
+ INIT_WORK(&ab->update_11d_work, ath12k_update_11d);
timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0);
init_completion(&ab->htc_suspend);
@@ -1321,7 +2270,7 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
ab->dev = dev;
ab->hif.bus = bus;
ab->qmi.num_radios = U8_MAX;
- ab->mlo_capable_flags = ATH12K_INTRA_DEVICE_MLO_SUPPORT;
+ ab->single_chip_mlo_support = false;
/* Device index used to identify the devices in a group.
*
@@ -1342,5 +2291,31 @@ err_sc_free:
return NULL;
}
-MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11be wireless LAN cards.");
+static int ath12k_init(void)
+{
+ ahb_err = ath12k_ahb_init();
+ if (ahb_err)
+ pr_warn("Failed to initialize ath12k AHB device: %d\n", ahb_err);
+
+ pci_err = ath12k_pci_init();
+ if (pci_err)
+ pr_warn("Failed to initialize ath12k PCI device: %d\n", pci_err);
+
+ /* If both failed, return one of the failures (arbitrary) */
+ return ahb_err && pci_err ? ahb_err : 0;
+}
+
+static void ath12k_exit(void)
+{
+ if (!pci_err)
+ ath12k_pci_exit();
+
+ if (!ahb_err)
+ ath12k_ahb_exit();
+}
+
+module_init(ath12k_init);
+module_exit(ath12k_exit);
+
+MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11be WLAN devices");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index 7f2e9a9b4097..3c1e0069be1e 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef ATH12K_CORE_H
@@ -14,7 +14,10 @@
#include <linux/dmi.h>
#include <linux/ctype.h>
#include <linux/firmware.h>
+#include <linux/of_reserved_mem.h>
#include <linux/panic_notifier.h>
+#include <linux/average.h>
+#include <linux/of.h>
#include "qmi.h"
#include "htc.h"
#include "wmi.h"
@@ -30,6 +33,7 @@
#include "acpi.h"
#include "wow.h"
#include "debugfs_htt_stats.h"
+#include "coredump.h"
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -51,8 +55,6 @@
#define ATH12K_INVALID_HW_MAC_ID 0xFF
#define ATH12K_CONNECTION_LOSS_HZ (3 * HZ)
-#define ATH12K_RX_RATE_TABLE_NUM 320
-#define ATH12K_RX_RATE_TABLE_11AX_NUM 576
#define ATH12K_MON_TIMER_INTERVAL 10
#define ATH12K_RESET_TIMEOUT_HZ (20 * HZ)
@@ -62,6 +64,17 @@
#define ATH12K_RECONFIGURE_TIMEOUT_HZ (10 * HZ)
#define ATH12K_RECOVER_START_TIMEOUT_HZ (20 * HZ)
+#define ATH12K_MAX_DEVICES 3
+#define ATH12K_GROUP_MAX_RADIO (ATH12K_MAX_DEVICES * MAX_RADIOS)
+#define ATH12K_INVALID_GROUP_ID 0xFF
+#define ATH12K_INVALID_DEVICE_ID 0xFF
+
+#define ATH12K_MAX_MLO_PEERS 256
+#define ATH12K_MLO_PEER_ID_INVALID 0xFFFF
+
+#define ATH12K_INVALID_RSSI_FULL -1
+#define ATH12K_INVALID_RSSI_EMPTY -128
+
enum ath12k_bdf_search {
ATH12K_BDF_SEARCH_DEFAULT,
ATH12K_BDF_SEARCH_BUS_AND_BOARD,
@@ -78,6 +91,7 @@ enum wme_ac {
#define ATH12K_HT_MCS_MAX 7
#define ATH12K_VHT_MCS_MAX 9
#define ATH12K_HE_MCS_MAX 11
+#define ATH12K_EHT_MCS_MAX 15
enum ath12k_crypt_mode {
/* Only use hardware crypto engine */
@@ -94,9 +108,18 @@ static inline enum wme_ac ath12k_tid_to_ac(u32 tid)
WME_AC_VO);
}
+static inline u64 ath12k_le32hilo_to_u64(__le32 hi, __le32 lo)
+{
+ u64 hi64 = le32_to_cpu(hi);
+ u64 lo64 = le32_to_cpu(lo);
+
+ return (hi64 << 32) | lo64;
+}
+
enum ath12k_skb_flags {
ATH12K_SKB_HW_80211_ENCAP = BIT(0),
ATH12K_SKB_CIPHER_SET = BIT(1),
+ ATH12K_SKB_MLO_STA = BIT(2),
};
struct ath12k_skb_cb {
@@ -106,6 +129,7 @@ struct ath12k_skb_cb {
dma_addr_t paddr_ext_desc;
u32 cipher;
u8 flags;
+ u8 link_id;
};
struct ath12k_skb_rxcb {
@@ -118,17 +142,19 @@ struct ath12k_skb_rxcb {
struct hal_rx_desc *rx_desc;
u8 err_rel_src;
u8 err_code;
- u8 mac_id;
+ u8 hw_link_id;
u8 unmapped;
u8 is_frag;
u8 tid;
u16 peer_id;
+ bool is_end_of_ppdu;
};
enum ath12k_hw_rev {
ATH12K_HW_QCN9274_HW10,
ATH12K_HW_QCN9274_HW20,
- ATH12K_HW_WCN7850_HW20
+ ATH12K_HW_WCN7850_HW20,
+ ATH12K_HW_IPQ5332_HW10,
};
enum ath12k_firmware_mode {
@@ -141,6 +167,7 @@ enum ath12k_firmware_mode {
#define ATH12K_IRQ_NUM_MAX 57
#define ATH12K_EXT_IRQ_NUM_MAX 16
+#define ATH12K_MAX_TCL_RING_NUM 3
struct ath12k_ext_irq_grp {
struct ath12k_base *ab;
@@ -148,13 +175,39 @@ struct ath12k_ext_irq_grp {
u32 num_irq;
u32 grp_id;
u64 timestamp;
+ bool napi_enabled;
struct napi_struct napi;
struct net_device *napi_ndev;
};
+enum ath12k_smbios_cc_type {
+ /* disable country code setting from SMBIOS */
+ ATH12K_SMBIOS_CC_DISABLE = 0,
+
+ /* set country code by ANSI country name, based on ISO3166-1 alpha2 */
+ ATH12K_SMBIOS_CC_ISO = 1,
+
+ /* worldwide regdomain */
+ ATH12K_SMBIOS_CC_WW = 2,
+};
+
struct ath12k_smbios_bdf {
struct dmi_header hdr;
- u32 padding;
+ u8 features_disabled;
+
+ /* enum ath12k_smbios_cc_type */
+ u8 country_code_flag;
+
+ /* To set specific country, you need to set country code
+ * flag=ATH12K_SMBIOS_CC_ISO first, then if country is United
+ * States, then country code value = 0x5553 ("US",'U' = 0x55, 'S'=
+ * 0x53). To set country to INDONESIA, then country code value =
+ * 0x4944 ("IN", 'I'=0x49, 'D'=0x44). If country code flag =
+ * ATH12K_SMBIOS_CC_WW, then you can use worldwide regulatory
+ * setting.
+ */
+ u16 cc_code;
+
u8 bdf_enabled;
u8 bdf_ext[];
} __packed;
@@ -199,8 +252,19 @@ enum ath12k_scan_state {
ATH12K_SCAN_ABORTING,
};
+enum ath12k_11d_state {
+ ATH12K_11D_IDLE,
+ ATH12K_11D_PREPARING,
+ ATH12K_11D_RUNNING,
+};
+
+enum ath12k_hw_group_flags {
+ ATH12K_GROUP_FLAG_REGISTERED,
+ ATH12K_GROUP_FLAG_UNREGISTER,
+};
+
enum ath12k_dev_flags {
- ATH12K_CAC_RUNNING,
+ ATH12K_FLAG_CAC_RUNNING,
ATH12K_FLAG_CRASH_FLUSH,
ATH12K_FLAG_RAW_MODE,
ATH12K_FLAG_HW_CRYPTO_DISABLED,
@@ -211,6 +275,9 @@ enum ath12k_dev_flags {
ATH12K_FLAG_HTC_SUSPEND_COMPLETE,
ATH12K_FLAG_CE_IRQ_ENABLED,
ATH12K_FLAG_EXT_IRQ_ENABLED,
+ ATH12K_FLAG_QMI_FW_READY_COMPLETE,
+ ATH12K_FLAG_FTM_SEGMENTED,
+ ATH12K_FLAG_FIXED_MEM_REGION,
};
struct ath12k_tx_conf {
@@ -220,8 +287,9 @@ struct ath12k_tx_conf {
};
struct ath12k_key_conf {
- bool changed;
enum set_key_cmd cmd;
+ struct list_head list;
+ struct ieee80211_sta *sta;
struct ieee80211_key_conf *key;
};
@@ -238,10 +306,8 @@ struct ath12k_rekey_data {
bool enable_offload;
};
-struct ath12k_vif {
+struct ath12k_link_vif {
u32 vdev_id;
- enum wmi_vdev_type vdev_type;
- enum wmi_vdev_subtype vdev_subtype;
u32 beacon_interval;
u32 dtim_period;
u16 ast_hash;
@@ -251,13 +317,54 @@ struct ath12k_vif {
u8 search_type;
struct ath12k *ar;
- struct ieee80211_vif *vif;
int bank_id;
u8 vdev_id_check_en;
+ bool beacon_prot;
struct wmi_wmm_params_all_arg wmm_params;
struct list_head list;
+
+ bool is_created;
+ bool is_started;
+ bool is_up;
+ u8 bssid[ETH_ALEN];
+ struct cfg80211_bitrate_mask bitrate_mask;
+ struct delayed_work connection_loss_work;
+ int num_legacy_stations;
+ int rtscts_prot_mode;
+ int txpower;
+ bool rsnie_present;
+ bool wpaie_present;
+ u8 vdev_stats_id;
+ u32 punct_bitmap;
+ u8 link_id;
+ struct ath12k_vif *ahvif;
+ struct ath12k_rekey_data rekey_data;
+ struct ath12k_link_stats link_stats;
+ spinlock_t link_stats_lock; /* Protects updates to link_stats */
+
+ u8 current_cntdown_counter;
+
+ /* only used in station mode */
+ bool is_sta_assoc_link;
+
+ struct ath12k_reg_tpc_power_info reg_tpc_info;
+
+ bool group_key_valid;
+ struct wmi_vdev_install_key_arg group_key;
+ bool pairwise_key_done;
+ u16 num_stations;
+ bool is_csa_in_progress;
+ struct wiphy_work bcn_tx_work;
+};
+
+struct ath12k_vif {
+ enum wmi_vdev_type vdev_type;
+ enum wmi_vdev_subtype vdev_subtype;
+ struct ieee80211_vif *vif;
+ struct ath12k_hw *ah;
+
union {
struct {
u32 uapsd;
@@ -275,26 +382,17 @@ struct ath12k_vif {
} ap;
} u;
- bool is_created;
- bool is_started;
- bool is_up;
u32 aid;
- u8 bssid[ETH_ALEN];
- struct cfg80211_bitrate_mask bitrate_mask;
- struct delayed_work connection_loss_work;
- int num_legacy_stations;
- int rtscts_prot_mode;
- int txpower;
- bool rsnie_present;
- bool wpaie_present;
u32 key_cipher;
u8 tx_encap_type;
- u8 vdev_stats_id;
- u32 punct_bitmap;
bool ps;
- struct ath12k_vif_cache *cache;
- struct ath12k_rekey_data rekey_data;
+ atomic_t mcbc_gsn;
+ struct ath12k_link_vif deflink;
+ struct ath12k_link_vif __rcu *link[ATH12K_NUM_MAX_LINKS];
+ struct ath12k_vif_cache *cache[IEEE80211_MLD_MAX_NUM_LINKS];
+ /* indicates bitmap of link vif created in FW */
+ u32 links_map;
/* Must be last - ends in a flexible-array member.
*
* FIXME: Driver should not copy struct ieee80211_chanctx_conf,
@@ -306,7 +404,7 @@ struct ath12k_vif {
struct ath12k_vif_iter {
u32 vdev_id;
struct ath12k *ar;
- struct ath12k_vif *arvif;
+ struct ath12k_link_vif *arvif;
};
#define HAL_AST_IDX_INVALID 0xFFFF
@@ -314,20 +412,22 @@ struct ath12k_vif_iter {
#define HAL_RX_MAX_MCS_HT 31
#define HAL_RX_MAX_MCS_VHT 9
#define HAL_RX_MAX_MCS_HE 11
+#define HAL_RX_MAX_MCS_BE 15
#define HAL_RX_MAX_NSS 8
#define HAL_RX_MAX_NUM_LEGACY_RATES 12
-#define ATH12K_RX_RATE_TABLE_11AX_NUM 576
-#define ATH12K_RX_RATE_TABLE_NUM 320
+
+#define ATH12K_SCAN_TIMEOUT_HZ (20 * HZ)
struct ath12k_rx_peer_rate_stats {
u64 ht_mcs_count[HAL_RX_MAX_MCS_HT + 1];
u64 vht_mcs_count[HAL_RX_MAX_MCS_VHT + 1];
u64 he_mcs_count[HAL_RX_MAX_MCS_HE + 1];
+ u64 be_mcs_count[HAL_RX_MAX_MCS_BE + 1];
u64 nss_count[HAL_RX_MAX_NSS];
u64 bw_count[HAL_RX_BW_MAX];
u64 gi_count[HAL_RX_GI_MAX];
u64 legacy_count[HAL_RX_MAX_NUM_LEGACY_RATES];
- u64 rx_rate[ATH12K_RX_RATE_TABLE_11AX_NUM];
+ u64 rx_rate[HAL_RX_BW_MAX][HAL_RX_GI_MAX][HAL_RX_MAX_NSS][HAL_RX_MAX_MCS_HT + 1];
};
struct ath12k_rx_peer_stats {
@@ -341,10 +441,6 @@ struct ath12k_rx_peer_stats {
u64 non_ampdu_msdu_count;
u64 stbc_count;
u64 beamformed_count;
- u64 mcs_count[HAL_RX_MAX_MCS + 1];
- u64 nss_count[HAL_RX_MAX_NSS];
- u64 bw_count[HAL_RX_BW_MAX];
- u64 gi_count[HAL_RX_GI_MAX];
u64 coding_count[HAL_RX_SU_MU_CODING_MAX];
u64 tid_count[IEEE80211_NUM_TIDS + 1];
u64 pream_cnt[HAL_RX_PREAMBLE_MAX];
@@ -441,32 +537,88 @@ struct ath12k_wbm_tx_stats {
u64 wbm_tx_comp_stats[HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX];
};
-struct ath12k_sta {
- struct ath12k_vif *arvif;
+DECLARE_EWMA(avg_rssi, 10, 8)
+
+struct ath12k_link_sta {
+ struct ath12k_link_vif *arvif;
+ struct ath12k_sta *ahsta;
+
+ /* link address similar to ieee80211_link_sta */
+ u8 addr[ETH_ALEN];
/* the following are protected by ar->data_lock */
u32 changed; /* IEEE80211_RC_* */
u32 bw;
u32 nss;
u32 smps;
- enum hal_pn_type pn_type;
- struct work_struct update_wk;
+ struct wiphy_work update_wk;
struct rate_info txrate;
struct rate_info last_txrate;
u64 rx_duration;
u64 tx_duration;
u8 rssi_comb;
+ struct ewma_avg_rssi avg_rssi;
+ u8 link_id;
struct ath12k_rx_peer_stats *rx_stats;
struct ath12k_wbm_tx_stats *wbm_tx_stats;
u32 bw_prev;
+ u32 peer_nss;
+ s8 rssi_beacon;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
+
+ /* For now the assoc link will be considered primary */
+ bool is_assoc_link;
+
+ /* for firmware use only */
+ u8 link_idx;
+ u32 tx_retry_failed;
+ u32 tx_retry_count;
};
-#define ATH12K_MIN_5G_FREQ 4150
-#define ATH12K_MIN_6G_FREQ 5925
-#define ATH12K_MAX_6G_FREQ 7115
+struct ath12k_reoq_buf {
+ void *vaddr;
+ dma_addr_t paddr_aligned;
+ u32 size;
+};
+
+struct ath12k_sta {
+ struct ath12k_vif *ahvif;
+ enum hal_pn_type pn_type;
+ struct ath12k_link_sta deflink;
+ struct ath12k_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+ /* indicates bitmap of link sta created in FW */
+ u16 links_map;
+ u8 assoc_link_id;
+ u16 ml_peer_id;
+ u8 num_peer;
+
+ enum ieee80211_sta_state state;
+
+ struct ath12k_reoq_buf reoq_bufs[IEEE80211_NUM_TIDS + 1];
+};
+
+#define ATH12K_HALF_20MHZ_BW 10
+#define ATH12K_2GHZ_MIN_CENTER 2412
+#define ATH12K_2GHZ_MAX_CENTER 2484
+#define ATH12K_5GHZ_MIN_CENTER 4900
+#define ATH12K_5GHZ_MAX_CENTER 5920
+#define ATH12K_6GHZ_MIN_CENTER 5935
+#define ATH12K_6GHZ_MAX_CENTER 7115
+#define ATH12K_MIN_2GHZ_FREQ (ATH12K_2GHZ_MIN_CENTER - ATH12K_HALF_20MHZ_BW - 1)
+#define ATH12K_MAX_2GHZ_FREQ (ATH12K_2GHZ_MAX_CENTER + ATH12K_HALF_20MHZ_BW + 1)
+#define ATH12K_MIN_5GHZ_FREQ (ATH12K_5GHZ_MIN_CENTER - ATH12K_HALF_20MHZ_BW)
+#define ATH12K_MAX_5GHZ_FREQ (ATH12K_5GHZ_MAX_CENTER + ATH12K_HALF_20MHZ_BW)
+#define ATH12K_MIN_6GHZ_FREQ (ATH12K_6GHZ_MIN_CENTER - ATH12K_HALF_20MHZ_BW)
+#define ATH12K_MAX_6GHZ_FREQ (ATH12K_6GHZ_MAX_CENTER + ATH12K_HALF_20MHZ_BW)
#define ATH12K_NUM_CHANS 101
-#define ATH12K_MAX_5G_CHAN 173
+#define ATH12K_MAX_5GHZ_CHAN 173
+
+static inline bool ath12k_is_2ghz_channel_freq(u32 freq)
+{
+ return freq >= ATH12K_MIN_2GHZ_FREQ &&
+ freq <= ATH12K_MAX_2GHZ_FREQ;
+}
enum ath12k_hw_state {
ATH12K_HW_STATE_OFF,
@@ -474,18 +626,26 @@ enum ath12k_hw_state {
ATH12K_HW_STATE_RESTARTING,
ATH12K_HW_STATE_RESTARTED,
ATH12K_HW_STATE_WEDGED,
+ ATH12K_HW_STATE_TM,
/* Add other states as required */
};
/* Antenna noise floor */
#define ATH12K_DEFAULT_NOISE_FLOOR -95
+struct ath12k_ftm_event_obj {
+ u32 data_pos;
+ u32 expected_seq;
+ u8 *eventdata;
+};
+
struct ath12k_fw_stats {
u32 pdev_id;
u32 stats_id;
struct list_head pdevs;
struct list_head vdevs;
struct list_head bcn;
+ u32 num_vdev_recvd;
};
struct ath12k_dbg_htt_stats {
@@ -499,6 +659,12 @@ struct ath12k_debug {
struct dentry *debugfs_pdev;
struct dentry *debugfs_pdev_symlink;
struct ath12k_dbg_htt_stats htt_stats;
+ enum wmi_halphy_ctrl_path_stats_id tpc_stats_type;
+ bool tpc_request;
+ struct completion tpc_complete;
+ struct wmi_tpc_stats_arg *tpc_stats;
+ u32 rx_filter;
+ bool extd_rx_stats;
};
struct ath12k_per_peer_tx_stats {
@@ -518,6 +684,15 @@ struct ath12k_per_peer_tx_stats {
bool is_ampdu;
};
+struct ath12k_pdev_rssi_offsets {
+ s32 temp_offset;
+ s8 min_nf_dbm;
+ /* Cache the sum here to avoid calculating it every time in hot path
+ * noise_floor = min_nf_dbm + temp_offset
+ */
+ s32 noise_floor;
+};
+
#define ATH12K_FLUSH_TIMEOUT (5 * HZ)
#define ATH12K_VDEV_DELETE_TIMEOUT_HZ (5 * HZ)
@@ -539,9 +714,10 @@ struct ath12k {
struct delayed_work timeout;
enum ath12k_scan_state state;
bool is_roc;
- int vdev_id;
int roc_freq;
bool roc_notify;
+ struct wiphy_work vdev_clean_wk;
+ struct ath12k_link_vif *arvif;
} scan;
struct {
@@ -559,16 +735,13 @@ struct ath12k {
u32 txpower_scale;
u32 power_scale;
u32 chan_tx_pwr;
+ u32 rts_threshold;
u32 num_stations;
u32 max_num_stations;
- bool monitor_present;
- /* To synchronize concurrent synchronous mac80211 callback operations,
- * concurrent debugfs configuration and concurrent FW statistics events.
- */
- struct mutex conf_mutex;
+
/* protects the radio specific data like debug stats, ppdu_stats_info stats,
- * vdev_stop_status info, scan data, ath12k_sta info, ath12k_vif info,
- * channel context data, survey info, test mode data.
+ * vdev_stop_status info, scan data, ath12k_sta info, ath12k_link_vif info,
+ * channel context data, survey info, test mode data, regd_channel_update_queue.
*/
spinlock_t data_lock;
@@ -627,8 +800,10 @@ struct ath12k {
struct completion bss_survey_done;
struct work_struct regd_update_work;
+ struct work_struct regd_channel_update_work;
+ struct list_head regd_channel_update_queue;
- struct work_struct wmi_mgmt_tx_work;
+ struct wiphy_work wmi_mgmt_tx_work;
struct sk_buff_head wmi_mgmt_tx_queue;
struct ath12k_wow wow;
@@ -646,19 +821,39 @@ struct ath12k {
#endif
bool dfs_block_radar_events;
- bool monitor_conf_enabled;
bool monitor_vdev_created;
bool monitor_started;
int monitor_vdev_id;
- u32 freq_low;
- u32 freq_high;
+ struct wiphy_radio_freq_range freq_range;
bool nlo_enabled;
+
+ /* Protected by wiphy::mtx lock. */
+ u32 vdev_id_11d_scan;
+ struct completion completed_11d_scan;
+ enum ath12k_11d_state state_11d;
+ u8 alpha2[REG_ALPHA2_LEN];
+ bool regdom_set_by_user;
+ struct completion regd_update_completed;
+
+ struct completion fw_stats_complete;
+ struct completion fw_stats_done;
+
+ struct completion mlo_setup_done;
+ u32 mlo_setup_status;
+ u8 ftm_msgref;
+ struct ath12k_fw_stats fw_stats;
+ unsigned long last_tx_power_update;
+
+ s8 max_allowed_tx_power;
+ struct ath12k_pdev_rssi_offsets rssi_info;
};
struct ath12k_hw {
struct ieee80211_hw *hw;
+ struct device *dev;
+
/* Protect the write operation of the hardware state ath12k_hw::state
* between hardware start<=>reconfigure<=>stop transitions.
*/
@@ -666,8 +861,14 @@ struct ath12k_hw {
enum ath12k_hw_state state;
bool regd_updated;
bool use_6ghz_regd;
+
u8 num_radio;
+ DECLARE_BITMAP(free_ml_peer_id_map, ATH12K_MAX_MLO_PEERS);
+
+ /* protected by wiphy_lock() */
+ struct list_head ml_peers;
+
/* Keep last */
struct ath12k radio[] __aligned(sizeof(void *));
};
@@ -702,6 +903,10 @@ struct ath12k_pdev_cap {
u32 tx_chain_mask_shift;
u32 rx_chain_mask_shift;
struct ath12k_band_cap band[NUM_NL80211_BANDS];
+ u32 eml_cap;
+ u32 mld_cap;
+ bool nss_ratio_enabled;
+ u8 nss_ratio_info;
};
struct mlo_timestamp {
@@ -736,7 +941,7 @@ struct ath12k_board_data {
size_t len;
};
-struct ath12k_soc_dp_tx_err_stats {
+struct ath12k_device_dp_tx_err_stats {
/* TCL Ring Descriptor unavailable */
u32 desc_na[DP_TCL_NUM_RING_MAX];
/* Other failures during dp_tx due to mem allocation failure
@@ -745,28 +950,89 @@ struct ath12k_soc_dp_tx_err_stats {
atomic_t misc_fail;
};
-struct ath12k_soc_dp_stats {
+struct ath12k_device_dp_stats {
u32 err_ring_pkts;
u32 invalid_rbm;
u32 rxdma_error[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX];
u32 reo_error[HAL_REO_DEST_RING_ERROR_CODE_MAX];
u32 hal_reo_error[DP_REO_DST_RING_MAX];
- struct ath12k_soc_dp_tx_err_stats tx_err;
-};
-
-/**
- * enum ath12k_link_capable_flags - link capable flags
- *
- * Single/Multi link capability information
- *
- * @ATH12K_INTRA_DEVICE_MLO_SUPPORT: SLO/MLO form between the radio, where all
- * the links (radios) present within a device.
- * @ATH12K_INTER_DEVICE_MLO_SUPPORT: SLO/MLO form between the radio, where all
- * the links (radios) present across the devices.
+ struct ath12k_device_dp_tx_err_stats tx_err;
+ u32 reo_rx[DP_REO_DST_RING_MAX][ATH12K_MAX_DEVICES];
+ u32 rx_wbm_rel_source[HAL_WBM_REL_SRC_MODULE_MAX][ATH12K_MAX_DEVICES];
+ u32 tqm_rel_reason[MAX_TQM_RELEASE_REASON];
+ u32 fw_tx_status[MAX_FW_TX_STATUS];
+ u32 tx_wbm_rel_source[HAL_WBM_REL_SRC_MODULE_MAX];
+ u32 tx_enqueued[DP_TCL_NUM_RING_MAX];
+ u32 tx_completed[DP_TCL_NUM_RING_MAX];
+ u32 reo_excep_msdu_buf_type;
+};
+
+struct ath12k_reg_freq {
+ u32 start_freq;
+ u32 end_freq;
+};
+
+struct ath12k_mlo_memory {
+ struct target_mem_chunk chunk[ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+ int mlo_mem_size;
+ bool init_done;
+};
+
+struct ath12k_hw_link {
+ u8 device_id;
+ u8 pdev_idx;
+};
+
+/* Holds info on the group of devices that are registered as a single
+ * wiphy, protected with struct ath12k_hw_group::mutex.
*/
-enum ath12k_link_capable_flags {
- ATH12K_INTRA_DEVICE_MLO_SUPPORT = BIT(0),
- ATH12K_INTER_DEVICE_MLO_SUPPORT = BIT(1),
+struct ath12k_hw_group {
+ struct list_head list;
+ u8 id;
+ u8 num_devices;
+ u8 num_probed;
+ u8 num_started;
+ unsigned long flags;
+ struct ath12k_base *ab[ATH12K_MAX_DEVICES];
+
+ /* protects access to this struct */
+ struct mutex mutex;
+
+ /* Holds information of wiphy (hw) registration.
+ *
+ * In Multi/Single Link Operation case, all pdevs are registered as
+ * a single wiphy. In other (legacy/Non-MLO) cases, each pdev is
+ * registered as separate wiphys.
+ */
+ struct ath12k_hw *ah[ATH12K_GROUP_MAX_RADIO];
+ u8 num_hw;
+ bool mlo_capable;
+ struct device_node *wsi_node[ATH12K_MAX_DEVICES];
+ struct ath12k_mlo_memory mlo_mem;
+ struct ath12k_hw_link hw_links[ATH12K_GROUP_MAX_RADIO];
+ bool hw_link_id_init_done;
+};
+
+/* Holds WSI info specific to each device, excluding WSI group info */
+struct ath12k_wsi_info {
+ u32 index;
+ u32 hw_link_id_base;
+};
+
+struct ath12k_dp_profile_params {
+ u32 tx_comp_ring_size;
+ u32 rxdma_monitor_buf_ring_size;
+ u32 rxdma_monitor_dst_ring_size;
+ u32 num_pool_tx_desc;
+ u32 rx_desc_count;
+};
+
+struct ath12k_mem_profile_based_param {
+ u32 num_vdevs;
+ u32 max_client_single;
+ u32 max_client_dbs;
+ u32 max_client_dbs_sbs;
+ struct ath12k_dp_profile_params dp_params;
};
/* Master structure to hold the hw data which may be used in core module */
@@ -782,6 +1048,10 @@ struct ath12k_base {
/* HW channel counters frequency value in hertz common to all MACs */
u32 cc_freq_hz;
+ struct ath12k_dump_file_data *dump_data;
+ size_t ath12k_coredump_len;
+ struct work_struct dump_work;
+
struct ath12k_htc htc;
struct ath12k_dp dp;
@@ -789,6 +1059,10 @@ struct ath12k_base {
void __iomem *mem;
unsigned long mem_len;
+ void __iomem *mem_ce;
+ u32 ce_remap_base_addr;
+ bool ce_remap;
+
struct {
enum ath12k_bus bus;
const struct ath12k_hif_ops *ops;
@@ -828,15 +1102,6 @@ struct ath12k_base {
struct ath12k_pdev __rcu *pdevs_active[MAX_RADIOS];
- /* Holds information of wiphy (hw) registration.
- *
- * In Multi/Single Link Operation case, all pdevs are registered as
- * a single wiphy. In other (legacy/Non-MLO) cases, each pdev is
- * registered as separate wiphys.
- */
- struct ath12k_hw *ah[MAX_RADIOS];
- u8 num_hw;
-
struct ath12k_wmi_hal_reg_capabilities_ext_arg hal_reg_cap[MAX_RADIOS];
unsigned long long free_vdev_map;
unsigned long long free_vdev_stats_id_map;
@@ -866,9 +1131,11 @@ struct ath12k_base {
*/
struct ieee80211_regdomain *new_regd[MAX_RADIOS];
+ struct ath12k_reg_info *reg_info[MAX_RADIOS];
+
/* Current DFS Regulatory */
enum ath12k_dfs_region dfs_region;
- struct ath12k_soc_dp_stats soc_stats;
+ struct ath12k_device_dp_stats device_stats;
#ifdef CONFIG_ATH12K_DEBUGFS
struct dentry *debugfs_soc;
#endif
@@ -886,6 +1153,8 @@ struct ath12k_base {
/* continuous recovery fail count */
atomic_t fail_cont_count;
unsigned long reset_fail_timeout;
+ struct work_struct update_11d_work;
+ u8 new_alpha2[2];
struct {
/* protected by data_lock */
u32 fw_crash_counter;
@@ -895,8 +1164,6 @@ struct ath12k_base {
struct ath12k_dbring_cap *db_caps;
u32 num_db_cap;
- struct timer_list mon_reap_timer;
-
struct completion htc_suspend;
u64 fw_soc_drop_count;
@@ -926,17 +1193,11 @@ struct ath12k_base {
size_t m3_len;
DECLARE_BITMAP(fw_features, ATH12K_FW_FEATURE_COUNT);
+ bool fw_features_valid;
} fw;
const struct hal_rx_ops *hal_rx_ops;
- /* mlo_capable_flags denotes the single/multi link operation
- * capabilities of the Device.
- *
- * See enum ath12k_link_capable_flags
- */
- u8 mlo_capable_flags;
-
struct completion restart_completed;
#ifdef CONFIG_ACPI
@@ -946,6 +1207,13 @@ struct ath12k_base {
u32 func_bit;
bool acpi_tas_enable;
bool acpi_bios_sar_enable;
+ bool acpi_disable_11be;
+ bool acpi_disable_rfkill;
+ bool acpi_cca_enable;
+ bool acpi_band_edge_enable;
+ bool acpi_enable_bdf;
+ u32 bit_flag;
+ char bdf_string[ATH12K_ACPI_BDF_MAX_LEN];
u8 tas_cfg[ATH12K_ACPI_DSM_TAS_CFG_SIZE];
u8 tas_sar_power_table[ATH12K_ACPI_DSM_TAS_DATA_SIZE];
u8 bios_sar_data[ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE];
@@ -958,6 +1226,21 @@ struct ath12k_base {
struct notifier_block panic_nb;
+ struct ath12k_hw_group *ag;
+ struct ath12k_wsi_info wsi_info;
+ enum ath12k_firmware_mode fw_mode;
+ struct ath12k_ftm_event_obj ftm_event_obj;
+ bool hw_group_ref;
+
+ /* Denote whether MLO is possible within the device */
+ bool single_chip_mlo_support;
+
+ struct ath12k_reg_freq reg_freq_2ghz;
+ struct ath12k_reg_freq reg_freq_5ghz;
+ struct ath12k_reg_freq reg_freq_6ghz;
+ const struct ath12k_mem_profile_based_param *profile_param;
+ enum ath12k_qmi_mem_mode target_mem_mode;
+
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
};
@@ -967,7 +1250,95 @@ struct ath12k_pdev_map {
u8 pdev_idx;
};
+struct ath12k_fw_stats_vdev {
+ struct list_head list;
+
+ u32 vdev_id;
+ u32 beacon_snr;
+ u32 data_snr;
+ u32 num_tx_frames[WLAN_MAX_AC];
+ u32 num_rx_frames;
+ u32 num_tx_frames_retries[WLAN_MAX_AC];
+ u32 num_tx_frames_failures[WLAN_MAX_AC];
+ u32 num_rts_fail;
+ u32 num_rts_success;
+ u32 num_rx_err;
+ u32 num_rx_discard;
+ u32 num_tx_not_acked;
+ u32 tx_rate_history[MAX_TX_RATE_VALUES];
+ u32 beacon_rssi_history[MAX_TX_RATE_VALUES];
+};
+
+struct ath12k_fw_stats_bcn {
+ struct list_head list;
+
+ u32 vdev_id;
+ u32 tx_bcn_succ_cnt;
+ u32 tx_bcn_outage_cnt;
+};
+
+struct ath12k_fw_stats_pdev {
+ struct list_head list;
+
+ /* PDEV stats */
+ s32 ch_noise_floor;
+ u32 tx_frame_count;
+ u32 rx_frame_count;
+ u32 rx_clear_count;
+ u32 cycle_count;
+ u32 phy_err_count;
+ u32 chan_tx_power;
+ u32 ack_rx_bad;
+ u32 rts_bad;
+ u32 rts_good;
+ u32 fcs_bad;
+ u32 no_beacons;
+ u32 mib_int_count;
+
+ /* PDEV TX stats */
+ s32 comp_queued;
+ s32 comp_delivered;
+ s32 msdu_enqued;
+ s32 mpdu_enqued;
+ s32 wmm_drop;
+ s32 local_enqued;
+ s32 local_freed;
+ s32 hw_queued;
+ s32 hw_reaped;
+ s32 underrun;
+ s32 tx_abort;
+ s32 mpdus_requed;
+ u32 tx_ko;
+ u32 data_rc;
+ u32 self_triggers;
+ u32 sw_retry_failure;
+ u32 illgl_rate_phy_err;
+ u32 pdev_cont_xretry;
+ u32 pdev_tx_timeout;
+ u32 pdev_resets;
+ u32 stateless_tid_alloc_failure;
+ u32 phy_underrun;
+ u32 txop_ovf;
+
+ /* PDEV RX stats */
+ s32 mid_ppdu_route_change;
+ s32 status_rcvd;
+ s32 r0_frags;
+ s32 r1_frags;
+ s32 r2_frags;
+ s32 r3_frags;
+ s32 htt_msdus;
+ s32 htt_mpdus;
+ s32 loc_msdus;
+ s32 loc_mpdus;
+ s32 oversize_amsdu;
+ s32 phy_errs;
+ s32 phy_err_drop;
+ s32 mpdu_errs;
+};
+
int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab);
+void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag);
int ath12k_core_pre_init(struct ath12k_base *ab);
int ath12k_core_init(struct ath12k_base *ath12k);
void ath12k_core_deinit(struct ath12k_base *ath12k);
@@ -988,12 +1359,22 @@ int ath12k_core_resume_early(struct ath12k_base *ab);
int ath12k_core_resume(struct ath12k_base *ab);
int ath12k_core_suspend(struct ath12k_base *ab);
int ath12k_core_suspend_late(struct ath12k_base *ab);
+void ath12k_core_hw_group_unassign(struct ath12k_base *ab);
+u8 ath12k_get_num_partner_link(struct ath12k *ar);
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
const char *filename);
u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab);
u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab);
-u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab);
+
+void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag);
+void ath12k_fw_stats_init(struct ath12k *ar);
+void ath12k_fw_stats_bcn_free(struct list_head *head);
+void ath12k_fw_stats_free(struct ath12k_fw_stats *stats);
+void ath12k_fw_stats_reset(struct ath12k *ar);
+struct reserved_mem *ath12k_core_get_reserved_mem(struct ath12k_base *ab,
+ int index);
+enum ath12k_qmi_mem_mode ath12k_core_get_memory_mode(struct ath12k_base *ab);
static inline const char *ath12k_scan_state_str(enum ath12k_scan_state state)
{
@@ -1024,16 +1405,26 @@ static inline struct ath12k_skb_rxcb *ATH12K_SKB_RXCB(struct sk_buff *skb)
return (struct ath12k_skb_rxcb *)skb->cb;
}
-static inline struct ath12k_vif *ath12k_vif_to_arvif(struct ieee80211_vif *vif)
+static inline struct ath12k_vif *ath12k_vif_to_ahvif(struct ieee80211_vif *vif)
{
return (struct ath12k_vif *)vif->drv_priv;
}
-static inline struct ath12k_sta *ath12k_sta_to_arsta(struct ieee80211_sta *sta)
+static inline struct ath12k_sta *ath12k_sta_to_ahsta(struct ieee80211_sta *sta)
{
return (struct ath12k_sta *)sta->drv_priv;
}
+static inline struct ieee80211_sta *ath12k_ahsta_to_sta(struct ath12k_sta *ahsta)
+{
+ return container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
+}
+
+static inline struct ieee80211_vif *ath12k_ahvif_to_vif(struct ath12k_vif *ahvif)
+{
+ return container_of((void *)ahvif, struct ieee80211_vif, drv_priv);
+}
+
static inline struct ath12k *ath12k_ab_to_ar(struct ath12k_base *ab,
int mac_id)
{
@@ -1044,8 +1435,16 @@ static inline void ath12k_core_create_firmware_path(struct ath12k_base *ab,
const char *filename,
void *buf, size_t buf_len)
{
- snprintf(buf, buf_len, "%s/%s/%s", ATH12K_FW_DIR,
- ab->hw_params->fw.dir, filename);
+ const char *fw_name = NULL;
+
+ of_property_read_string(ab->dev->of_node, "firmware-name", &fw_name);
+
+ if (fw_name && strncmp(filename, "board", 5))
+ snprintf(buf, buf_len, "%s/%s/%s/%s", ATH12K_FW_DIR,
+ ab->hw_params->fw.dir, fw_name, filename);
+ else
+ snprintf(buf, buf_len, "%s/%s/%s", ATH12K_FW_DIR,
+ ab->hw_params->fw.dir, filename);
}
static inline const char *ath12k_bus_str(enum ath12k_bus bus)
@@ -1053,6 +1452,8 @@ static inline const char *ath12k_bus_str(enum ath12k_bus bus)
switch (bus) {
case ATH12K_BUS_PCI:
return "pci";
+ case ATH12K_BUS_AHB:
+ return "ahb";
}
return "unknown";
@@ -1085,4 +1486,34 @@ static inline struct ieee80211_hw *ath12k_ar_to_hw(struct ath12k *ar)
#define for_each_ar(ah, ar, index) \
for ((index) = 0; ((index) < (ah)->num_radio && \
((ar) = &(ah)->radio[(index)])); (index)++)
+
+static inline struct ath12k_hw *ath12k_ag_to_ah(struct ath12k_hw_group *ag, int idx)
+{
+ return ag->ah[idx];
+}
+
+static inline void ath12k_ag_set_ah(struct ath12k_hw_group *ag, int idx,
+ struct ath12k_hw *ah)
+{
+ ag->ah[idx] = ah;
+}
+
+static inline struct ath12k_hw_group *ath12k_ab_to_ag(struct ath12k_base *ab)
+{
+ return ab->ag;
+}
+
+static inline struct ath12k_base *ath12k_ag_to_ab(struct ath12k_hw_group *ag,
+ u8 device_id)
+{
+ return ag->ab[device_id];
+}
+
+static inline s32 ath12k_pdev_get_noise_floor(struct ath12k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ return ar->rssi_info.noise_floor;
+}
+
#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/coredump.c b/drivers/net/wireless/ath/ath12k/coredump.c
new file mode 100644
index 000000000000..ce1beeb54836
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/coredump.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/devcoredump.h>
+#include "hif.h"
+#include "coredump.h"
+#include "debug.h"
+
+enum
+ath12k_fw_crash_dump_type ath12k_coredump_get_dump_type(enum ath12k_qmi_target_mem type)
+{
+ enum ath12k_fw_crash_dump_type dump_type;
+
+ switch (type) {
+ case HOST_DDR_REGION_TYPE:
+ dump_type = FW_CRASH_DUMP_REMOTE_MEM_DATA;
+ break;
+ case M3_DUMP_REGION_TYPE:
+ dump_type = FW_CRASH_DUMP_M3_DUMP;
+ break;
+ case PAGEABLE_MEM_REGION_TYPE:
+ dump_type = FW_CRASH_DUMP_PAGEABLE_DATA;
+ break;
+ case BDF_MEM_REGION_TYPE:
+ case CALDB_MEM_REGION_TYPE:
+ dump_type = FW_CRASH_DUMP_NONE;
+ break;
+ case MLO_GLOBAL_MEM_REGION_TYPE:
+ dump_type = FW_CRASH_DUMP_MLO_GLOBAL_DATA;
+ break;
+ default:
+ dump_type = FW_CRASH_DUMP_TYPE_MAX;
+ break;
+ }
+
+ return dump_type;
+}
+
+void ath12k_coredump_upload(struct work_struct *work)
+{
+ struct ath12k_base *ab = container_of(work, struct ath12k_base, dump_work);
+
+ ath12k_info(ab, "Uploading coredump\n");
+ /* dev_coredumpv() takes ownership of the buffer */
+ dev_coredumpv(ab->dev, ab->dump_data, ab->ath12k_coredump_len, GFP_KERNEL);
+ ab->dump_data = NULL;
+}
+
+void ath12k_coredump_collect(struct ath12k_base *ab)
+{
+ ath12k_hif_coredump_download(ab);
+}
diff --git a/drivers/net/wireless/ath/ath12k/coredump.h b/drivers/net/wireless/ath/ath12k/coredump.h
new file mode 100644
index 000000000000..13f46a605113
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/coredump.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef _ATH12K_COREDUMP_H_
+#define _ATH12K_COREDUMP_H_
+
+#define ATH12K_FW_CRASH_DUMP_V2 2
+
+enum ath12k_fw_crash_dump_type {
+ FW_CRASH_DUMP_PAGING_DATA,
+ FW_CRASH_DUMP_RDDM_DATA,
+ FW_CRASH_DUMP_REMOTE_MEM_DATA,
+ FW_CRASH_DUMP_PAGEABLE_DATA,
+ FW_CRASH_DUMP_M3_DUMP,
+ FW_CRASH_DUMP_NONE,
+ FW_CRASH_DUMP_MLO_GLOBAL_DATA,
+
+ /* keep last */
+ FW_CRASH_DUMP_TYPE_MAX,
+};
+
+#define COREDUMP_TLV_HDR_SIZE 8
+
+struct ath12k_tlv_dump_data {
+ /* see ath11k_fw_crash_dump_type above */
+ __le32 type;
+
+ /* in bytes */
+ __le32 tlv_len;
+
+ /* pad to 32-bit boundaries as needed */
+ u8 tlv_data[];
+} __packed;
+
+struct ath12k_dump_file_data {
+ /* "ATH12K-FW-DUMP" */
+ char df_magic[16];
+ /* total dump len in bytes */
+ __le32 len;
+ /* file dump version */
+ __le32 version;
+ /* pci device id */
+ __le32 chip_id;
+ /* qrtr instance id */
+ __le32 qrtr_id;
+ /* pci domain id */
+ __le32 bus_id;
+ guid_t guid;
+ /* time-of-day stamp */
+ __le64 tv_sec;
+ /* time-of-day stamp, nano-seconds */
+ __le64 tv_nsec;
+ /* room for growth w/out changing binary format */
+ u8 unused[128];
+ u8 data[];
+} __packed;
+
+#ifdef CONFIG_ATH12K_COREDUMP
+enum ath12k_fw_crash_dump_type ath12k_coredump_get_dump_type
+ (enum ath12k_qmi_target_mem type);
+void ath12k_coredump_upload(struct work_struct *work);
+void ath12k_coredump_collect(struct ath12k_base *ab);
+#else
+static inline enum ath12k_fw_crash_dump_type ath12k_coredump_get_dump_type
+ (enum ath12k_qmi_target_mem type)
+{
+ return FW_CRASH_DUMP_TYPE_MAX;
+}
+
+static inline void ath12k_coredump_upload(struct work_struct *work)
+{
+}
+
+static inline void ath12k_coredump_collect(struct ath12k_base *ab)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/dbring.c b/drivers/net/wireless/ath/ath12k/dbring.c
index 788160c84c68..6604dacea2ae 100644
--- a/drivers/net/wireless/ath/ath12k/dbring.c
+++ b/drivers/net/wireless/ath/ath12k/dbring.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include "core.h"
@@ -117,7 +118,7 @@ int ath12k_dbring_wmi_cfg_setup(struct ath12k *ar,
struct ath12k_dbring *ring,
enum wmi_direct_buffer_module id)
{
- struct ath12k_wmi_pdev_dma_ring_cfg_arg arg = {0};
+ struct ath12k_wmi_pdev_dma_ring_cfg_arg arg = {};
int ret;
if (id >= WMI_DIRECT_BUF_MAX)
diff --git a/drivers/net/wireless/ath/ath12k/debug.c b/drivers/net/wireless/ath/ath12k/debug.c
index fe5a732ba9ec..5ce100cd9a9d 100644
--- a/drivers/net/wireless/ath/ath12k/debug.c
+++ b/drivers/net/wireless/ath/ath12k/debug.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
@@ -36,7 +36,7 @@ void ath12k_err(struct ath12k_base *ab, const char *fmt, ...)
va_end(args);
}
-void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...)
+void __ath12k_warn(struct device *dev, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
@@ -45,7 +45,7 @@ void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...)
va_start(args, fmt);
vaf.va = &args;
- dev_warn_ratelimited(ab->dev, "%pV", &vaf);
+ dev_warn_ratelimited(dev, "%pV", &vaf);
/* TODO: Trace the log */
va_end(args);
}
@@ -63,8 +63,10 @@ void __ath12k_dbg(struct ath12k_base *ab, enum ath12k_debug_mask mask,
vaf.fmt = fmt;
vaf.va = &args;
- if (ath12k_debug_mask & mask)
+ if (likely(ab))
dev_printk(KERN_DEBUG, ab->dev, "%pV", &vaf);
+ else
+ printk(KERN_DEBUG "ath12k: %pV", &vaf);
/* TODO: trace log */
diff --git a/drivers/net/wireless/ath/ath12k/debug.h b/drivers/net/wireless/ath/ath12k/debug.h
index f7005917362c..bf254e43a68d 100644
--- a/drivers/net/wireless/ath/ath12k/debug.h
+++ b/drivers/net/wireless/ath/ath12k/debug.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH12K_DEBUG_H_
@@ -26,14 +26,19 @@ enum ath12k_debug_mask {
ATH12K_DBG_DP_TX = 0x00002000,
ATH12K_DBG_DP_RX = 0x00004000,
ATH12K_DBG_WOW = 0x00008000,
+ ATH12K_DBG_CE = 0x00010000,
ATH12K_DBG_ANY = 0xffffffff,
};
__printf(2, 3) void ath12k_info(struct ath12k_base *ab, const char *fmt, ...);
__printf(2, 3) void ath12k_err(struct ath12k_base *ab, const char *fmt, ...);
-__printf(2, 3) void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...);
+__printf(2, 3) void __ath12k_warn(struct device *dev, const char *fmt, ...);
+
+#define ath12k_warn(ab, fmt, ...) __ath12k_warn((ab)->dev, fmt, ##__VA_ARGS__)
+#define ath12k_hw_warn(ah, fmt, ...) __ath12k_warn((ah)->dev, fmt, ##__VA_ARGS__)
extern unsigned int ath12k_debug_mask;
+extern bool ath12k_ftm_mode;
#ifdef CONFIG_ATH12K_DEBUG
__printf(3, 4) void __ath12k_dbg(struct ath12k_base *ab,
@@ -58,11 +63,14 @@ static inline void ath12k_dbg_dump(struct ath12k_base *ab,
}
#endif /* CONFIG_ATH12K_DEBUG */
-#define ath12k_dbg(ar, dbg_mask, fmt, ...) \
+#define ath12k_dbg(ab, dbg_mask, fmt, ...) \
do { \
typeof(dbg_mask) mask = (dbg_mask); \
if (ath12k_debug_mask & mask) \
- __ath12k_dbg(ar, mask, fmt, ##__VA_ARGS__); \
+ __ath12k_dbg(ab, mask, fmt, ##__VA_ARGS__); \
} while (0)
+#define ath12k_generic_dbg(dbg_mask, fmt, ...) \
+ ath12k_dbg(NULL, dbg_mask, fmt, ##__VA_ARGS__)
+
#endif /* _ATH12K_DEBUG_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/debugfs.c b/drivers/net/wireless/ath/ath12k/debugfs.c
index 2a977c36af00..d6a86f075d73 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs.c
+++ b/drivers/net/wireless/ath/ath12k/debugfs.c
@@ -1,10 +1,12 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include "core.h"
+#include "dp_tx.h"
+#include "debug.h"
#include "debugfs.h"
#include "debugfs_htt_stats.h"
@@ -15,14 +17,14 @@ static ssize_t ath12k_write_simulate_radar(struct file *file,
struct ath12k *ar = file->private_data;
int ret;
- mutex_lock(&ar->conf_mutex);
+ wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
ret = ath12k_wmi_simulate_radar(ar);
if (ret)
goto exit;
ret = count;
exit:
- mutex_unlock(&ar->conf_mutex);
+ wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
return ret;
}
@@ -31,10 +33,1194 @@ static const struct file_operations fops_simulate_radar = {
.open = simple_open
};
+static ssize_t ath12k_read_simulate_fw_crash(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char buf[] =
+ "To simulate firmware crash write one of the keywords to this file:\n"
+ "`assert` - send WMI_FORCE_FW_HANG_CMDID to firmware to cause assert.\n";
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+static ssize_t
+ath12k_write_simulate_fw_crash(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath12k_base *ab = file->private_data;
+ struct ath12k_pdev *pdev;
+ struct ath12k *ar = NULL;
+ char buf[32] = {};
+ int i, ret;
+ ssize_t rc;
+
+ /* filter partial writes and invalid commands */
+ if (*ppos != 0 || count >= sizeof(buf) || count == 0)
+ return -EINVAL;
+
+ rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+ if (rc < 0)
+ return rc;
+
+ /* drop the possible '\n' from the end */
+ if (buf[*ppos - 1] == '\n')
+ buf[*ppos - 1] = '\0';
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (ar)
+ break;
+ }
+
+ if (!ar)
+ return -ENETDOWN;
+
+ if (!strcmp(buf, "assert")) {
+ ath12k_info(ab, "simulating firmware assert crash\n");
+ ret = ath12k_wmi_force_fw_hang_cmd(ar,
+ ATH12K_WMI_FW_HANG_ASSERT_TYPE,
+ ATH12K_WMI_FW_HANG_DELAY);
+ } else {
+ return -EINVAL;
+ }
+
+ if (ret) {
+ ath12k_warn(ab, "failed to simulate firmware crash: %d\n", ret);
+ return ret;
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_simulate_fw_crash = {
+ .read = ath12k_read_simulate_fw_crash,
+ .write = ath12k_write_simulate_fw_crash,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath12k_write_tpc_stats_type(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath12k *ar = file->private_data;
+ u8 type;
+ int ret;
+
+ ret = kstrtou8_from_user(user_buf, count, 0, &type);
+ if (ret)
+ return ret;
+
+ if (type >= WMI_HALPHY_PDEV_TX_STATS_MAX)
+ return -EINVAL;
+
+ spin_lock_bh(&ar->data_lock);
+ ar->debug.tpc_stats_type = type;
+ spin_unlock_bh(&ar->data_lock);
+
+ return count;
+}
+
+static int ath12k_debug_tpc_stats_request(struct ath12k *ar)
+{
+ enum wmi_halphy_ctrl_path_stats_id tpc_stats_sub_id;
+ struct ath12k_base *ab = ar->ab;
+ int ret;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ reinit_completion(&ar->debug.tpc_complete);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->debug.tpc_request = true;
+ tpc_stats_sub_id = ar->debug.tpc_stats_type;
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath12k_wmi_send_tpc_stats_request(ar, tpc_stats_sub_id);
+ if (ret) {
+ ath12k_warn(ab, "failed to request pdev tpc stats: %d\n", ret);
+ spin_lock_bh(&ar->data_lock);
+ ar->debug.tpc_request = false;
+ spin_unlock_bh(&ar->data_lock);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath12k_get_tpc_ctl_mode_idx(struct wmi_tpc_stats_arg *tpc_stats,
+ enum wmi_tpc_pream_bw pream_bw, int *mode_idx)
+{
+ u32 chan_freq = le32_to_cpu(tpc_stats->tpc_config.chan_freq);
+ u8 band;
+
+ band = ((chan_freq > ATH12K_MIN_6GHZ_FREQ) ? NL80211_BAND_6GHZ :
+ ((chan_freq > ATH12K_MIN_5GHZ_FREQ) ? NL80211_BAND_5GHZ :
+ NL80211_BAND_2GHZ));
+
+ if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) {
+ switch (pream_bw) {
+ case WMI_TPC_PREAM_HT20:
+ case WMI_TPC_PREAM_VHT20:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HT_VHT20_5GHZ_6GHZ;
+ break;
+ case WMI_TPC_PREAM_HE20:
+ case WMI_TPC_PREAM_EHT20:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT20_5GHZ_6GHZ;
+ break;
+ case WMI_TPC_PREAM_HT40:
+ case WMI_TPC_PREAM_VHT40:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HT_VHT40_5GHZ_6GHZ;
+ break;
+ case WMI_TPC_PREAM_HE40:
+ case WMI_TPC_PREAM_EHT40:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT40_5GHZ_6GHZ;
+ break;
+ case WMI_TPC_PREAM_VHT80:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_VHT80_5GHZ_6GHZ;
+ break;
+ case WMI_TPC_PREAM_EHT60:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT80_SU_PUNC20;
+ break;
+ case WMI_TPC_PREAM_HE80:
+ case WMI_TPC_PREAM_EHT80:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT80_5GHZ_6GHZ;
+ break;
+ case WMI_TPC_PREAM_VHT160:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_VHT160_5GHZ_6GHZ;
+ break;
+ case WMI_TPC_PREAM_EHT120:
+ case WMI_TPC_PREAM_EHT140:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT160_SU_PUNC20;
+ break;
+ case WMI_TPC_PREAM_HE160:
+ case WMI_TPC_PREAM_EHT160:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT160_5GHZ_6GHZ;
+ break;
+ case WMI_TPC_PREAM_EHT200:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC120;
+ break;
+ case WMI_TPC_PREAM_EHT240:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC80;
+ break;
+ case WMI_TPC_PREAM_EHT280:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC40;
+ break;
+ case WMI_TPC_PREAM_EHT320:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT320_5GHZ_6GHZ;
+ break;
+ default:
+ /* for 5GHZ and 6GHZ, default case will be for OFDM */
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_LEGACY_5GHZ_6GHZ;
+ break;
+ }
+ } else {
+ switch (pream_bw) {
+ case WMI_TPC_PREAM_OFDM:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_LEGACY_2GHZ;
+ break;
+ case WMI_TPC_PREAM_HT20:
+ case WMI_TPC_PREAM_VHT20:
+ case WMI_TPC_PREAM_HE20:
+ case WMI_TPC_PREAM_EHT20:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HT20_2GHZ;
+ break;
+ case WMI_TPC_PREAM_HT40:
+ case WMI_TPC_PREAM_VHT40:
+ case WMI_TPC_PREAM_HE40:
+ case WMI_TPC_PREAM_EHT40:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HT40_2GHZ;
+ break;
+ default:
+ /* for 2GHZ, default case will be CCK */
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_CCK_2GHZ;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static s16 ath12k_tpc_get_rate(struct ath12k *ar,
+ struct wmi_tpc_stats_arg *tpc_stats,
+ u32 rate_idx, u32 num_chains, u32 rate_code,
+ enum wmi_tpc_pream_bw pream_bw,
+ enum wmi_halphy_ctrl_path_stats_id type,
+ u32 eht_rate_idx)
+{
+ u32 tot_nss, tot_modes, txbf_on_off, index_offset1, index_offset2, index_offset3;
+ u8 chain_idx, stm_idx, num_streams;
+ bool is_mu, txbf_enabled = 0;
+ s8 rates_ctl_min, tpc_ctl;
+ s16 rates, tpc, reg_pwr;
+ u16 rate1, rate2;
+ int mode, ret;
+
+ num_streams = 1 + ATH12K_HW_NSS(rate_code);
+ chain_idx = num_chains - 1;
+ stm_idx = num_streams - 1;
+ mode = -1;
+
+ ret = ath12k_get_tpc_ctl_mode_idx(tpc_stats, pream_bw, &mode);
+ if (ret) {
+ ath12k_warn(ar->ab, "Invalid mode index received\n");
+ tpc = TPC_INVAL;
+ goto out;
+ }
+
+ if (num_chains < num_streams) {
+ tpc = TPC_INVAL;
+ goto out;
+ }
+
+ if (le32_to_cpu(tpc_stats->tpc_config.num_tx_chain) <= 1) {
+ tpc = TPC_INVAL;
+ goto out;
+ }
+
+ if (type == WMI_HALPHY_PDEV_TX_SUTXBF_STATS ||
+ type == WMI_HALPHY_PDEV_TX_MUTXBF_STATS)
+ txbf_enabled = 1;
+
+ if (type == WMI_HALPHY_PDEV_TX_MU_STATS ||
+ type == WMI_HALPHY_PDEV_TX_MUTXBF_STATS) {
+ is_mu = true;
+ } else {
+ is_mu = false;
+ }
+
+ /* Below is the min calculation of ctl array, rates array and
+ * regulator power table. tpc is minimum of all 3
+ */
+ if (pream_bw >= WMI_TPC_PREAM_EHT20 && pream_bw <= WMI_TPC_PREAM_EHT320) {
+ rate2 = tpc_stats->rates_array2.rate_array[eht_rate_idx];
+ if (is_mu)
+ rates = u32_get_bits(rate2, ATH12K_TPC_RATE_ARRAY_MU);
+ else
+ rates = u32_get_bits(rate2, ATH12K_TPC_RATE_ARRAY_SU);
+ } else {
+ rate1 = tpc_stats->rates_array1.rate_array[rate_idx];
+ if (is_mu)
+ rates = u32_get_bits(rate1, ATH12K_TPC_RATE_ARRAY_MU);
+ else
+ rates = u32_get_bits(rate1, ATH12K_TPC_RATE_ARRAY_SU);
+ }
+
+ if (tpc_stats->tlvs_rcvd & WMI_TPC_CTL_PWR_ARRAY) {
+ tot_nss = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.d1);
+ tot_modes = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.d2);
+ txbf_on_off = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.d3);
+ index_offset1 = txbf_on_off * tot_modes * tot_nss;
+ index_offset2 = tot_modes * tot_nss;
+ index_offset3 = tot_nss;
+
+ tpc_ctl = *(tpc_stats->ctl_array.ctl_pwr_table +
+ chain_idx * index_offset1 + txbf_enabled * index_offset2
+ + mode * index_offset3 + stm_idx);
+ } else {
+ tpc_ctl = TPC_MAX;
+ ath12k_warn(ar->ab,
+ "ctl array for tpc stats not received from fw\n");
+ }
+
+ rates_ctl_min = min_t(s16, rates, tpc_ctl);
+
+ reg_pwr = tpc_stats->max_reg_allowed_power.reg_pwr_array[chain_idx];
+
+ if (reg_pwr < 0)
+ reg_pwr = TPC_INVAL;
+
+ tpc = min_t(s16, rates_ctl_min, reg_pwr);
+
+ /* MODULATION_LIMIT is the maximum power limit,tpc should not exceed
+ * modulation limit even if min tpc of all three array is greater
+ * modulation limit
+ */
+ tpc = min_t(s16, tpc, MODULATION_LIMIT);
+
+out:
+ return tpc;
+}
+
+static u16 ath12k_get_ratecode(u16 pream_idx, u16 nss, u16 mcs_rate)
+{
+ u16 mode_type = ~0;
+
+ /* Below assignments are just for printing purpose only */
+ switch (pream_idx) {
+ case WMI_TPC_PREAM_CCK:
+ mode_type = WMI_RATE_PREAMBLE_CCK;
+ break;
+ case WMI_TPC_PREAM_OFDM:
+ mode_type = WMI_RATE_PREAMBLE_OFDM;
+ break;
+ case WMI_TPC_PREAM_HT20:
+ case WMI_TPC_PREAM_HT40:
+ mode_type = WMI_RATE_PREAMBLE_HT;
+ break;
+ case WMI_TPC_PREAM_VHT20:
+ case WMI_TPC_PREAM_VHT40:
+ case WMI_TPC_PREAM_VHT80:
+ case WMI_TPC_PREAM_VHT160:
+ mode_type = WMI_RATE_PREAMBLE_VHT;
+ break;
+ case WMI_TPC_PREAM_HE20:
+ case WMI_TPC_PREAM_HE40:
+ case WMI_TPC_PREAM_HE80:
+ case WMI_TPC_PREAM_HE160:
+ mode_type = WMI_RATE_PREAMBLE_HE;
+ break;
+ case WMI_TPC_PREAM_EHT20:
+ case WMI_TPC_PREAM_EHT40:
+ case WMI_TPC_PREAM_EHT60:
+ case WMI_TPC_PREAM_EHT80:
+ case WMI_TPC_PREAM_EHT120:
+ case WMI_TPC_PREAM_EHT140:
+ case WMI_TPC_PREAM_EHT160:
+ case WMI_TPC_PREAM_EHT200:
+ case WMI_TPC_PREAM_EHT240:
+ case WMI_TPC_PREAM_EHT280:
+ case WMI_TPC_PREAM_EHT320:
+ mode_type = WMI_RATE_PREAMBLE_EHT;
+ if (mcs_rate == 0 || mcs_rate == 1)
+ mcs_rate += 14;
+ else
+ mcs_rate -= 2;
+ break;
+ default:
+ return mode_type;
+ }
+ return ((mode_type << 8) | ((nss & 0x7) << 5) | (mcs_rate & 0x1F));
+}
+
+static bool ath12k_he_supports_extra_mcs(struct ath12k *ar, int freq)
+{
+ struct ath12k_pdev_cap *cap = &ar->pdev->cap;
+ struct ath12k_band_cap *cap_band;
+ bool extra_mcs_supported;
+
+ if (freq <= ATH12K_2GHZ_MAX_FREQUENCY)
+ cap_band = &cap->band[NL80211_BAND_2GHZ];
+ else if (freq <= ATH12K_5GHZ_MAX_FREQUENCY)
+ cap_band = &cap->band[NL80211_BAND_5GHZ];
+ else
+ cap_band = &cap->band[NL80211_BAND_6GHZ];
+
+ extra_mcs_supported = u32_get_bits(cap_band->he_cap_info[1],
+ HE_EXTRA_MCS_SUPPORT);
+ return extra_mcs_supported;
+}
+
+static int ath12k_tpc_fill_pream(struct ath12k *ar, char *buf, int buf_len, int len,
+ enum wmi_tpc_pream_bw pream_bw, u32 max_rix,
+ int max_nss, int max_rates, int pream_type,
+ enum wmi_halphy_ctrl_path_stats_id tpc_type,
+ int rate_idx, int eht_rate_idx)
+{
+ struct wmi_tpc_stats_arg *tpc_stats = ar->debug.tpc_stats;
+ int nss, rates, chains;
+ u8 active_tx_chains;
+ u16 rate_code;
+ s16 tpc;
+
+ static const char *const pream_str[] = {
+ [WMI_TPC_PREAM_CCK] = "CCK",
+ [WMI_TPC_PREAM_OFDM] = "OFDM",
+ [WMI_TPC_PREAM_HT20] = "HT20",
+ [WMI_TPC_PREAM_HT40] = "HT40",
+ [WMI_TPC_PREAM_VHT20] = "VHT20",
+ [WMI_TPC_PREAM_VHT40] = "VHT40",
+ [WMI_TPC_PREAM_VHT80] = "VHT80",
+ [WMI_TPC_PREAM_VHT160] = "VHT160",
+ [WMI_TPC_PREAM_HE20] = "HE20",
+ [WMI_TPC_PREAM_HE40] = "HE40",
+ [WMI_TPC_PREAM_HE80] = "HE80",
+ [WMI_TPC_PREAM_HE160] = "HE160",
+ [WMI_TPC_PREAM_EHT20] = "EHT20",
+ [WMI_TPC_PREAM_EHT40] = "EHT40",
+ [WMI_TPC_PREAM_EHT60] = "EHT60",
+ [WMI_TPC_PREAM_EHT80] = "EHT80",
+ [WMI_TPC_PREAM_EHT120] = "EHT120",
+ [WMI_TPC_PREAM_EHT140] = "EHT140",
+ [WMI_TPC_PREAM_EHT160] = "EHT160",
+ [WMI_TPC_PREAM_EHT200] = "EHT200",
+ [WMI_TPC_PREAM_EHT240] = "EHT240",
+ [WMI_TPC_PREAM_EHT280] = "EHT280",
+ [WMI_TPC_PREAM_EHT320] = "EHT320"};
+
+ active_tx_chains = ar->num_tx_chains;
+
+ for (nss = 0; nss < max_nss; nss++) {
+ for (rates = 0; rates < max_rates; rates++, rate_idx++, max_rix++) {
+ /* FW send extra MCS(10&11) for VHT and HE rates,
+ * this is not used. Hence skipping it here
+ */
+ if (pream_type == WMI_RATE_PREAMBLE_VHT &&
+ rates > ATH12K_VHT_MCS_MAX)
+ continue;
+
+ if (pream_type == WMI_RATE_PREAMBLE_HE &&
+ rates > ATH12K_HE_MCS_MAX)
+ continue;
+
+ if (pream_type == WMI_RATE_PREAMBLE_EHT &&
+ rates > ATH12K_EHT_MCS_MAX)
+ continue;
+
+ rate_code = ath12k_get_ratecode(pream_bw, nss, rates);
+ len += scnprintf(buf + len, buf_len - len,
+ "%d\t %s\t 0x%03x\t", max_rix,
+ pream_str[pream_bw], rate_code);
+
+ for (chains = 0; chains < active_tx_chains; chains++) {
+ if (nss > chains) {
+ len += scnprintf(buf + len,
+ buf_len - len,
+ "\t%s", "NA");
+ } else {
+ tpc = ath12k_tpc_get_rate(ar, tpc_stats,
+ rate_idx, chains + 1,
+ rate_code, pream_bw,
+ tpc_type,
+ eht_rate_idx);
+
+ if (tpc == TPC_INVAL) {
+ len += scnprintf(buf + len,
+ buf_len - len, "\tNA");
+ } else {
+ len += scnprintf(buf + len,
+ buf_len - len, "\t%d",
+ tpc);
+ }
+ }
+ }
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ if (pream_type == WMI_RATE_PREAMBLE_EHT)
+ /*For fetching the next eht rates pwr from rates array2*/
+ ++eht_rate_idx;
+ }
+ }
+
+ return len;
+}
+
+static int ath12k_tpc_stats_print(struct ath12k *ar,
+ struct wmi_tpc_stats_arg *tpc_stats,
+ char *buf, size_t len,
+ enum wmi_halphy_ctrl_path_stats_id type)
+{
+ u32 eht_idx = 0, pream_idx = 0, rate_pream_idx = 0, total_rates = 0, max_rix = 0;
+ u32 chan_freq, num_tx_chain, caps, i, j = 1;
+ size_t buf_len = ATH12K_TPC_STATS_BUF_SIZE;
+ u8 nss, active_tx_chains;
+ bool he_ext_mcs;
+ static const char *const type_str[WMI_HALPHY_PDEV_TX_STATS_MAX] = {
+ [WMI_HALPHY_PDEV_TX_SU_STATS] = "SU",
+ [WMI_HALPHY_PDEV_TX_SUTXBF_STATS] = "SU WITH TXBF",
+ [WMI_HALPHY_PDEV_TX_MU_STATS] = "MU",
+ [WMI_HALPHY_PDEV_TX_MUTXBF_STATS] = "MU WITH TXBF"};
+
+ u8 max_rates[WMI_TPC_PREAM_MAX] = {
+ [WMI_TPC_PREAM_CCK] = ATH12K_CCK_RATES,
+ [WMI_TPC_PREAM_OFDM] = ATH12K_OFDM_RATES,
+ [WMI_TPC_PREAM_HT20] = ATH12K_HT_RATES,
+ [WMI_TPC_PREAM_HT40] = ATH12K_HT_RATES,
+ [WMI_TPC_PREAM_VHT20] = ATH12K_VHT_RATES,
+ [WMI_TPC_PREAM_VHT40] = ATH12K_VHT_RATES,
+ [WMI_TPC_PREAM_VHT80] = ATH12K_VHT_RATES,
+ [WMI_TPC_PREAM_VHT160] = ATH12K_VHT_RATES,
+ [WMI_TPC_PREAM_HE20] = ATH12K_HE_RATES,
+ [WMI_TPC_PREAM_HE40] = ATH12K_HE_RATES,
+ [WMI_TPC_PREAM_HE80] = ATH12K_HE_RATES,
+ [WMI_TPC_PREAM_HE160] = ATH12K_HE_RATES,
+ [WMI_TPC_PREAM_EHT20] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT40] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT60] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT80] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT120] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT140] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT160] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT200] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT240] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT280] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT320] = ATH12K_EHT_RATES};
+ static const u8 max_nss[WMI_TPC_PREAM_MAX] = {
+ [WMI_TPC_PREAM_CCK] = ATH12K_NSS_1,
+ [WMI_TPC_PREAM_OFDM] = ATH12K_NSS_1,
+ [WMI_TPC_PREAM_HT20] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_HT40] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_VHT20] = ATH12K_NSS_8,
+ [WMI_TPC_PREAM_VHT40] = ATH12K_NSS_8,
+ [WMI_TPC_PREAM_VHT80] = ATH12K_NSS_8,
+ [WMI_TPC_PREAM_VHT160] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_HE20] = ATH12K_NSS_8,
+ [WMI_TPC_PREAM_HE40] = ATH12K_NSS_8,
+ [WMI_TPC_PREAM_HE80] = ATH12K_NSS_8,
+ [WMI_TPC_PREAM_HE160] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT20] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT40] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT60] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT80] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT120] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT140] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT160] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT200] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT240] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT280] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT320] = ATH12K_NSS_4};
+
+ u16 rate_idx[WMI_TPC_PREAM_MAX] = {}, eht_rate_idx[WMI_TPC_PREAM_MAX] = {};
+ static const u8 pream_type[WMI_TPC_PREAM_MAX] = {
+ [WMI_TPC_PREAM_CCK] = WMI_RATE_PREAMBLE_CCK,
+ [WMI_TPC_PREAM_OFDM] = WMI_RATE_PREAMBLE_OFDM,
+ [WMI_TPC_PREAM_HT20] = WMI_RATE_PREAMBLE_HT,
+ [WMI_TPC_PREAM_HT40] = WMI_RATE_PREAMBLE_HT,
+ [WMI_TPC_PREAM_VHT20] = WMI_RATE_PREAMBLE_VHT,
+ [WMI_TPC_PREAM_VHT40] = WMI_RATE_PREAMBLE_VHT,
+ [WMI_TPC_PREAM_VHT80] = WMI_RATE_PREAMBLE_VHT,
+ [WMI_TPC_PREAM_VHT160] = WMI_RATE_PREAMBLE_VHT,
+ [WMI_TPC_PREAM_HE20] = WMI_RATE_PREAMBLE_HE,
+ [WMI_TPC_PREAM_HE40] = WMI_RATE_PREAMBLE_HE,
+ [WMI_TPC_PREAM_HE80] = WMI_RATE_PREAMBLE_HE,
+ [WMI_TPC_PREAM_HE160] = WMI_RATE_PREAMBLE_HE,
+ [WMI_TPC_PREAM_EHT20] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT40] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT60] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT80] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT120] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT140] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT160] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT200] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT240] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT280] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT320] = WMI_RATE_PREAMBLE_EHT};
+
+ chan_freq = le32_to_cpu(tpc_stats->tpc_config.chan_freq);
+ num_tx_chain = le32_to_cpu(tpc_stats->tpc_config.num_tx_chain);
+ caps = le32_to_cpu(tpc_stats->tpc_config.caps);
+
+ active_tx_chains = ar->num_tx_chains;
+ he_ext_mcs = ath12k_he_supports_extra_mcs(ar, chan_freq);
+
+ /* mcs 12&13 is sent by FW for certain HWs in rate array, skipping it as
+ * it is not supported
+ */
+ if (he_ext_mcs) {
+ for (i = WMI_TPC_PREAM_HE20; i <= WMI_TPC_PREAM_HE160; ++i)
+ max_rates[i] = ATH12K_HE_RATES;
+ }
+
+ if (type == WMI_HALPHY_PDEV_TX_MU_STATS ||
+ type == WMI_HALPHY_PDEV_TX_MUTXBF_STATS) {
+ pream_idx = WMI_TPC_PREAM_VHT20;
+
+ for (i = WMI_TPC_PREAM_CCK; i <= WMI_TPC_PREAM_HT40; ++i)
+ max_rix += max_nss[i] * max_rates[i];
+ }
+ /* Enumerate all the rate indices */
+ for (i = rate_pream_idx + 1; i < WMI_TPC_PREAM_MAX; i++) {
+ nss = (max_nss[i - 1] < num_tx_chain ?
+ max_nss[i - 1] : num_tx_chain);
+
+ rate_idx[i] = rate_idx[i - 1] + max_rates[i - 1] * nss;
+
+ if (pream_type[i] == WMI_RATE_PREAMBLE_EHT) {
+ eht_rate_idx[j] = eht_rate_idx[j - 1] + max_rates[i] * nss;
+ ++j;
+ }
+ }
+
+ for (i = 0; i < WMI_TPC_PREAM_MAX; i++) {
+ nss = (max_nss[i] < num_tx_chain ?
+ max_nss[i] : num_tx_chain);
+ total_rates += max_rates[i] * nss;
+ }
+
+ len += scnprintf(buf + len, buf_len - len,
+ "No.of rates-%d\n", total_rates);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "**************** %s ****************\n",
+ type_str[type]);
+ len += scnprintf(buf + len, buf_len - len,
+ "\t\t\t\tTPC values for Active chains\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "Rate idx Preamble Rate code");
+
+ for (i = 1; i <= active_tx_chains; ++i) {
+ len += scnprintf(buf + len, buf_len - len,
+ "\t%d-Chain", i);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ for (i = pream_idx; i < WMI_TPC_PREAM_MAX; i++) {
+ if (chan_freq <= 2483) {
+ if (i == WMI_TPC_PREAM_VHT80 ||
+ i == WMI_TPC_PREAM_VHT160 ||
+ i == WMI_TPC_PREAM_HE80 ||
+ i == WMI_TPC_PREAM_HE160 ||
+ (i >= WMI_TPC_PREAM_EHT60 &&
+ i <= WMI_TPC_PREAM_EHT320)) {
+ max_rix += max_nss[i] * max_rates[i];
+ continue;
+ }
+ } else {
+ if (i == WMI_TPC_PREAM_CCK) {
+ max_rix += max_rates[i];
+ continue;
+ }
+ }
+
+ nss = (max_nss[i] < ar->num_tx_chains ? max_nss[i] : ar->num_tx_chains);
+
+ if (!(caps &
+ (1 << ATH12K_TPC_STATS_SUPPORT_BE_PUNC))) {
+ if (i == WMI_TPC_PREAM_EHT60 || i == WMI_TPC_PREAM_EHT120 ||
+ i == WMI_TPC_PREAM_EHT140 || i == WMI_TPC_PREAM_EHT200 ||
+ i == WMI_TPC_PREAM_EHT240 || i == WMI_TPC_PREAM_EHT280) {
+ max_rix += max_nss[i] * max_rates[i];
+ continue;
+ }
+ }
+
+ len = ath12k_tpc_fill_pream(ar, buf, buf_len, len, i, max_rix, nss,
+ max_rates[i], pream_type[i],
+ type, rate_idx[i], eht_rate_idx[eht_idx]);
+
+ if (pream_type[i] == WMI_RATE_PREAMBLE_EHT)
+ /*For fetch the next index eht rates from rates array2*/
+ ++eht_idx;
+
+ max_rix += max_nss[i] * max_rates[i];
+ }
+ return len;
+}
+
+static void ath12k_tpc_stats_fill(struct ath12k *ar,
+ struct wmi_tpc_stats_arg *tpc_stats,
+ char *buf)
+{
+ size_t buf_len = ATH12K_TPC_STATS_BUF_SIZE;
+ struct wmi_tpc_config_params *tpc;
+ size_t len = 0;
+
+ if (!tpc_stats) {
+ ath12k_warn(ar->ab, "failed to find tpc stats\n");
+ return;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ tpc = &tpc_stats->tpc_config;
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "*************** TPC config **************\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "* powers are in 0.25 dBm steps\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "reg domain-%d\t\tchan freq-%d\n",
+ tpc->reg_domain, tpc->chan_freq);
+ len += scnprintf(buf + len, buf_len - len,
+ "power limit-%d\t\tmax reg-domain Power-%d\n",
+ le32_to_cpu(tpc->twice_max_reg_power) / 2, tpc->power_limit);
+ len += scnprintf(buf + len, buf_len - len,
+ "No.of tx chain-%d\t",
+ ar->num_tx_chains);
+
+ ath12k_tpc_stats_print(ar, tpc_stats, buf, len,
+ ar->debug.tpc_stats_type);
+
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath12k_open_tpc_stats(struct inode *inode, struct file *file)
+{
+ struct ath12k *ar = inode->i_private;
+ struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+ int ret;
+
+ guard(wiphy)(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (ah->state != ATH12K_HW_STATE_ON) {
+ ath12k_warn(ar->ab, "Interface not up\n");
+ return -ENETDOWN;
+ }
+
+ void *buf __free(kfree) = kzalloc(ATH12K_TPC_STATS_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = ath12k_debug_tpc_stats_request(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to request tpc stats: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (!wait_for_completion_timeout(&ar->debug.tpc_complete, TPC_STATS_WAIT_TIME)) {
+ spin_lock_bh(&ar->data_lock);
+ ath12k_wmi_free_tpc_stats_mem(ar);
+ ar->debug.tpc_request = false;
+ spin_unlock_bh(&ar->data_lock);
+ return -ETIMEDOUT;
+ }
+
+ ath12k_tpc_stats_fill(ar, ar->debug.tpc_stats, buf);
+ file->private_data = no_free_ptr(buf);
+
+ spin_lock_bh(&ar->data_lock);
+ ath12k_wmi_free_tpc_stats_mem(ar);
+ spin_unlock_bh(&ar->data_lock);
+
+ return 0;
+}
+
+static ssize_t ath12k_read_tpc_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static int ath12k_release_tpc_stats(struct inode *inode,
+ struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static const struct file_operations fops_tpc_stats = {
+ .open = ath12k_open_tpc_stats,
+ .release = ath12k_release_tpc_stats,
+ .read = ath12k_read_tpc_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static const struct file_operations fops_tpc_stats_type = {
+ .write = ath12k_write_tpc_stats_type,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath12k_write_extd_rx_stats(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath12k *ar = file->private_data;
+ struct htt_rx_ring_tlv_filter tlv_filter = {};
+ u32 ring_id, rx_filter = 0;
+ bool enable;
+ int ret, i;
+
+ if (kstrtobool_from_user(ubuf, count, &enable))
+ return -EINVAL;
+
+ wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (!ar->ab->hw_params->rxdma1_enable) {
+ ret = count;
+ goto exit;
+ }
+
+ if (ar->ah->state != ATH12K_HW_STATE_ON) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (enable == ar->debug.extd_rx_stats) {
+ ret = count;
+ goto exit;
+ }
+
+ if (enable) {
+ rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO;
+
+ tlv_filter.rx_filter = rx_filter;
+ tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0;
+ tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1;
+ tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2;
+ tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 |
+ HTT_RX_FP_DATA_FILTER_FLASG3;
+ } else {
+ tlv_filter = ath12k_mac_mon_status_filter_default;
+ }
+
+ ar->debug.rx_filter = tlv_filter.rx_filter;
+
+ for (i = 0; i < ar->ab->hw_params->num_rxdma_per_pdev; i++) {
+ ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id;
+ ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id + i,
+ HAL_RXDMA_MONITOR_DST,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set rx filter for monitor status ring\n");
+ goto exit;
+ }
+ }
+
+ ar->debug.extd_rx_stats = !!enable;
+ ret = count;
+exit:
+ wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
+ return ret;
+}
+
+static ssize_t ath12k_read_extd_rx_stats(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath12k *ar = file->private_data;
+ char buf[32];
+ int len = 0;
+
+ wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ ar->debug.extd_rx_stats);
+ wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_extd_rx_stats = {
+ .read = ath12k_read_extd_rx_stats,
+ .write = ath12k_write_extd_rx_stats,
+ .open = simple_open,
+};
+
+static int ath12k_open_link_stats(struct inode *inode, struct file *file)
+{
+ struct ath12k_vif *ahvif = inode->i_private;
+ size_t len = 0, buf_len = (PAGE_SIZE * 2);
+ struct ath12k_link_stats linkstat;
+ struct ath12k_link_vif *arvif;
+ unsigned long links_map;
+ struct wiphy *wiphy;
+ int link_id, i;
+ char *buf;
+
+ if (!ahvif)
+ return -EINVAL;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ wiphy = ahvif->ah->hw->wiphy;
+ wiphy_lock(wiphy);
+
+ links_map = ahvif->links_map;
+ for_each_set_bit(link_id, &links_map,
+ IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = rcu_dereference_protected(ahvif->link[link_id],
+ lockdep_is_held(&wiphy->mtx));
+
+ spin_lock_bh(&arvif->link_stats_lock);
+ linkstat = arvif->link_stats;
+ spin_unlock_bh(&arvif->link_stats_lock);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "link[%d] Tx Unicast Frames Enqueued = %d\n",
+ link_id, linkstat.tx_enqueued);
+ len += scnprintf(buf + len, buf_len - len,
+ "link[%d] Tx Broadcast Frames Enqueued = %d\n",
+ link_id, linkstat.tx_bcast_mcast);
+ len += scnprintf(buf + len, buf_len - len,
+ "link[%d] Tx Frames Completed = %d\n",
+ link_id, linkstat.tx_completed);
+ len += scnprintf(buf + len, buf_len - len,
+ "link[%d] Tx Frames Dropped = %d\n",
+ link_id, linkstat.tx_dropped);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "link[%d] Tx Frame descriptor Encap Type = ",
+ link_id);
+
+ len += scnprintf(buf + len, buf_len - len,
+ " raw:%d",
+ linkstat.tx_encap_type[0]);
+
+ len += scnprintf(buf + len, buf_len - len,
+ " native_wifi:%d",
+ linkstat.tx_encap_type[1]);
+
+ len += scnprintf(buf + len, buf_len - len,
+ " ethernet:%d",
+ linkstat.tx_encap_type[2]);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "\nlink[%d] Tx Frame descriptor Encrypt Type = ",
+ link_id);
+
+ for (i = 0; i < HAL_ENCRYPT_TYPE_MAX; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ " %d:%d", i,
+ linkstat.tx_encrypt_type[i]);
+ }
+ len += scnprintf(buf + len, buf_len - len,
+ "\nlink[%d] Tx Frame descriptor Type = buffer:%d extension:%d\n",
+ link_id, linkstat.tx_desc_type[0],
+ linkstat.tx_desc_type[1]);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "------------------------------------------------------\n");
+ }
+
+ wiphy_unlock(wiphy);
+
+ file->private_data = buf;
+
+ return 0;
+}
+
+static int ath12k_release_link_stats(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static ssize_t ath12k_read_link_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations ath12k_fops_link_stats = {
+ .open = ath12k_open_link_stats,
+ .release = ath12k_release_link_stats,
+ .read = ath12k_read_link_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath12k_debugfs_op_vif_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+
+ debugfs_create_file("link_stats", 0400, vif->debugfs_dir, ahvif,
+ &ath12k_fops_link_stats);
+}
+
+static ssize_t ath12k_debugfs_dump_device_dp_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath12k_base *ab = file->private_data;
+ struct ath12k_device_dp_stats *device_stats = &ab->device_stats;
+ int len = 0, i, j, ret;
+ struct ath12k *ar;
+ const int size = 4096;
+ static const char *rxdma_err[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX] = {
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR] = "Overflow",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR] = "MPDU len",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_FCS_ERR] = "FCS",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR] = "Decrypt",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR] = "TKIP MIC",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_UNECRYPTED_ERR] = "Unencrypt",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LEN_ERR] = "MSDU len",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LIMIT_ERR] = "MSDU limit",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_WIFI_PARSE_ERR] = "WiFi parse",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_PARSE_ERR] = "AMSDU parse",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_SA_TIMEOUT_ERR] = "SA timeout",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_DA_TIMEOUT_ERR] = "DA timeout",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_FLOW_TIMEOUT_ERR] = "Flow timeout",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR] = "Flush req",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_FRAG_ERR] = "AMSDU frag",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_MULTICAST_ECHO_ERR] = "Multicast echo",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_MISMATCH_ERR] = "AMSDU mismatch",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_UNAUTH_WDS_ERR] = "Unauth WDS",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_GRPCAST_AMSDU_WDS_ERR] = "AMSDU or WDS"};
+
+ static const char *reo_err[HAL_REO_DEST_RING_ERROR_CODE_MAX] = {
+ [HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO] = "Desc addr zero",
+ [HAL_REO_DEST_RING_ERROR_CODE_DESC_INVALID] = "Desc inval",
+ [HAL_REO_DEST_RING_ERROR_CODE_AMPDU_IN_NON_BA] = "AMPDU in non BA",
+ [HAL_REO_DEST_RING_ERROR_CODE_NON_BA_DUPLICATE] = "Non BA dup",
+ [HAL_REO_DEST_RING_ERROR_CODE_BA_DUPLICATE] = "BA dup",
+ [HAL_REO_DEST_RING_ERROR_CODE_FRAME_2K_JUMP] = "Frame 2k jump",
+ [HAL_REO_DEST_RING_ERROR_CODE_BAR_2K_JUMP] = "BAR 2k jump",
+ [HAL_REO_DEST_RING_ERROR_CODE_FRAME_OOR] = "Frame OOR",
+ [HAL_REO_DEST_RING_ERROR_CODE_BAR_OOR] = "BAR OOR",
+ [HAL_REO_DEST_RING_ERROR_CODE_NO_BA_SESSION] = "No BA session",
+ [HAL_REO_DEST_RING_ERROR_CODE_FRAME_SN_EQUALS_SSN] = "Frame SN equal SSN",
+ [HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED] = "PN check fail",
+ [HAL_REO_DEST_RING_ERROR_CODE_2K_ERR_FLAG_SET] = "2k err",
+ [HAL_REO_DEST_RING_ERROR_CODE_PN_ERR_FLAG_SET] = "PN err",
+ [HAL_REO_DEST_RING_ERROR_CODE_DESC_BLOCKED] = "Desc blocked"};
+
+ static const char *wbm_rel_src[HAL_WBM_REL_SRC_MODULE_MAX] = {
+ [HAL_WBM_REL_SRC_MODULE_TQM] = "TQM",
+ [HAL_WBM_REL_SRC_MODULE_RXDMA] = "Rxdma",
+ [HAL_WBM_REL_SRC_MODULE_REO] = "Reo",
+ [HAL_WBM_REL_SRC_MODULE_FW] = "FW",
+ [HAL_WBM_REL_SRC_MODULE_SW] = "SW"};
+
+ char *buf __free(kfree) = kzalloc(size, GFP_KERNEL);
+
+ if (!buf)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, size - len, "DEVICE RX STATS:\n\n");
+ len += scnprintf(buf + len, size - len, "err ring pkts: %u\n",
+ device_stats->err_ring_pkts);
+ len += scnprintf(buf + len, size - len, "Invalid RBM: %u\n\n",
+ device_stats->invalid_rbm);
+ len += scnprintf(buf + len, size - len, "RXDMA errors:\n");
+
+ for (i = 0; i < HAL_REO_ENTR_RING_RXDMA_ECODE_MAX; i++)
+ len += scnprintf(buf + len, size - len, "%s: %u\n",
+ rxdma_err[i], device_stats->rxdma_error[i]);
+
+ len += scnprintf(buf + len, size - len, "\nREO errors:\n");
+
+ for (i = 0; i < HAL_REO_DEST_RING_ERROR_CODE_MAX; i++)
+ len += scnprintf(buf + len, size - len, "%s: %u\n",
+ reo_err[i], device_stats->reo_error[i]);
+
+ len += scnprintf(buf + len, size - len, "\nHAL REO errors:\n");
+
+ for (i = 0; i < DP_REO_DST_RING_MAX; i++)
+ len += scnprintf(buf + len, size - len,
+ "ring%d: %u\n", i,
+ device_stats->hal_reo_error[i]);
+
+ len += scnprintf(buf + len, size - len, "\nDEVICE TX STATS:\n");
+ len += scnprintf(buf + len, size - len, "\nTCL Ring Full Failures:\n");
+
+ for (i = 0; i < DP_TCL_NUM_RING_MAX; i++)
+ len += scnprintf(buf + len, size - len, "ring%d: %u\n",
+ i, device_stats->tx_err.desc_na[i]);
+
+ len += scnprintf(buf + len, size - len,
+ "\nMisc Transmit Failures: %d\n",
+ atomic_read(&device_stats->tx_err.misc_fail));
+
+ len += scnprintf(buf + len, size - len, "\ntx_wbm_rel_source:");
+
+ for (i = 0; i < HAL_WBM_REL_SRC_MODULE_MAX; i++)
+ len += scnprintf(buf + len, size - len, " %d:%u",
+ i, device_stats->tx_wbm_rel_source[i]);
+
+ len += scnprintf(buf + len, size - len, "\n");
+
+ len += scnprintf(buf + len, size - len, "\ntqm_rel_reason:");
+
+ for (i = 0; i < MAX_TQM_RELEASE_REASON; i++)
+ len += scnprintf(buf + len, size - len, " %d:%u",
+ i, device_stats->tqm_rel_reason[i]);
+
+ len += scnprintf(buf + len, size - len, "\n");
+
+ len += scnprintf(buf + len, size - len, "\nfw_tx_status:");
+
+ for (i = 0; i < MAX_FW_TX_STATUS; i++)
+ len += scnprintf(buf + len, size - len, " %d:%u",
+ i, device_stats->fw_tx_status[i]);
+
+ len += scnprintf(buf + len, size - len, "\n");
+
+ len += scnprintf(buf + len, size - len, "\ntx_enqueued:");
+
+ for (i = 0; i < DP_TCL_NUM_RING_MAX; i++)
+ len += scnprintf(buf + len, size - len, " %d:%u", i,
+ device_stats->tx_enqueued[i]);
+
+ len += scnprintf(buf + len, size - len, "\n");
+
+ len += scnprintf(buf + len, size - len, "\ntx_completed:");
+
+ for (i = 0; i < DP_TCL_NUM_RING_MAX; i++)
+ len += scnprintf(buf + len, size - len, " %d:%u",
+ i, device_stats->tx_completed[i]);
+
+ len += scnprintf(buf + len, size - len, "\n");
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, DP_SW2HW_MACID(i));
+ if (ar) {
+ len += scnprintf(buf + len, size - len,
+ "\nradio%d tx_pending: %u\n", i,
+ atomic_read(&ar->dp.num_tx_pending));
+ }
+ }
+
+ len += scnprintf(buf + len, size - len, "\nREO Rx Received:\n");
+
+ for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
+ len += scnprintf(buf + len, size - len, "Ring%d:", i + 1);
+
+ for (j = 0; j < ATH12K_MAX_DEVICES; j++) {
+ len += scnprintf(buf + len, size - len,
+ "\t%d:%u", j,
+ device_stats->reo_rx[i][j]);
+ }
+
+ len += scnprintf(buf + len, size - len, "\n");
+ }
+
+ len += scnprintf(buf + len, size - len, "\nREO excep MSDU buf type:%u\n",
+ device_stats->reo_excep_msdu_buf_type);
+
+ len += scnprintf(buf + len, size - len, "\nRx WBM REL SRC Errors:\n");
+
+ for (i = 0; i < HAL_WBM_REL_SRC_MODULE_MAX; i++) {
+ len += scnprintf(buf + len, size - len, "%s:", wbm_rel_src[i]);
+
+ for (j = 0; j < ATH12K_MAX_DEVICES; j++) {
+ len += scnprintf(buf + len,
+ size - len,
+ "\t%d:%u", j,
+ device_stats->rx_wbm_rel_source[i][j]);
+ }
+
+ len += scnprintf(buf + len, size - len, "\n");
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ return ret;
+}
+
+static const struct file_operations fops_device_dp_stats = {
+ .read = ath12k_debugfs_dump_device_dp_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath12k_debugfs_pdev_create(struct ath12k_base *ab)
+{
+ debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab,
+ &fops_simulate_fw_crash);
+
+ debugfs_create_file("device_dp_stats", 0400, ab->debugfs_soc, ab,
+ &fops_device_dp_stats);
+}
+
void ath12k_debugfs_soc_create(struct ath12k_base *ab)
{
bool dput_needed;
- char soc_name[64] = { 0 };
+ char soc_name[64] = {};
struct dentry *debugfs_ath12k;
debugfs_ath12k = debugfs_lookup("ath12k", NULL);
@@ -68,12 +1254,223 @@ void ath12k_debugfs_soc_destroy(struct ath12k_base *ab)
*/
}
+static int ath12k_open_vdev_stats(struct inode *inode, struct file *file)
+{
+ struct ath12k *ar = inode->i_private;
+ struct ath12k_fw_stats_req_params param;
+ struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+ int ret;
+
+ guard(wiphy)(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (!ah)
+ return -ENETDOWN;
+
+ if (ah->state != ATH12K_HW_STATE_ON)
+ return -ENETDOWN;
+
+ void *buf __free(kfree) = kzalloc(ATH12K_FW_STATS_BUF_SIZE, GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+
+ param.pdev_id = ath12k_mac_get_target_pdev_id(ar);
+ /* VDEV stats is always sent for all active VDEVs from FW */
+ param.vdev_id = 0;
+ param.stats_id = WMI_REQUEST_VDEV_STAT;
+
+ ret = ath12k_mac_get_fw_stats(ar, &param);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to request fw vdev stats: %d\n", ret);
+ return ret;
+ }
+
+ ath12k_wmi_fw_stats_dump(ar, &ar->fw_stats, param.stats_id,
+ buf);
+ ath12k_fw_stats_reset(ar);
+
+ file->private_data = no_free_ptr(buf);
+
+ return 0;
+}
+
+static int ath12k_release_vdev_stats(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath12k_read_vdev_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_vdev_stats = {
+ .open = ath12k_open_vdev_stats,
+ .release = ath12k_release_vdev_stats,
+ .read = ath12k_read_vdev_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath12k_open_bcn_stats(struct inode *inode, struct file *file)
+{
+ struct ath12k *ar = inode->i_private;
+ struct ath12k_link_vif *arvif;
+ struct ath12k_fw_stats_req_params param;
+ struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+ int ret;
+
+ guard(wiphy)(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (ah && ah->state != ATH12K_HW_STATE_ON)
+ return -ENETDOWN;
+
+ void *buf __free(kfree) = kzalloc(ATH12K_FW_STATS_BUF_SIZE, GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+
+ param.pdev_id = ath12k_mac_get_target_pdev_id(ar);
+ param.stats_id = WMI_REQUEST_BCN_STAT;
+
+ /* loop all active VDEVs for bcn stats */
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (!arvif->is_up)
+ continue;
+
+ param.vdev_id = arvif->vdev_id;
+ ret = ath12k_mac_get_fw_stats(ar, &param);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to request fw bcn stats: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ath12k_wmi_fw_stats_dump(ar, &ar->fw_stats, param.stats_id,
+ buf);
+ ath12k_fw_stats_reset(ar);
+
+ file->private_data = no_free_ptr(buf);
+
+ return 0;
+}
+
+static int ath12k_release_bcn_stats(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath12k_read_bcn_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_bcn_stats = {
+ .open = ath12k_open_bcn_stats,
+ .release = ath12k_release_bcn_stats,
+ .read = ath12k_read_bcn_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath12k_open_pdev_stats(struct inode *inode, struct file *file)
+{
+ struct ath12k *ar = inode->i_private;
+ struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_fw_stats_req_params param;
+ int ret;
+
+ guard(wiphy)(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (ah && ah->state != ATH12K_HW_STATE_ON)
+ return -ENETDOWN;
+
+ void *buf __free(kfree) = kzalloc(ATH12K_FW_STATS_BUF_SIZE, GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+
+ param.pdev_id = ath12k_mac_get_target_pdev_id(ar);
+ param.vdev_id = 0;
+ param.stats_id = WMI_REQUEST_PDEV_STAT;
+
+ ret = ath12k_mac_get_fw_stats(ar, &param);
+ if (ret) {
+ ath12k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
+ return ret;
+ }
+
+ ath12k_wmi_fw_stats_dump(ar, &ar->fw_stats, param.stats_id,
+ buf);
+ ath12k_fw_stats_reset(ar);
+
+ file->private_data = no_free_ptr(buf);
+
+ return 0;
+}
+
+static int ath12k_release_pdev_stats(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath12k_read_pdev_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_pdev_stats = {
+ .open = ath12k_open_pdev_stats,
+ .release = ath12k_release_pdev_stats,
+ .read = ath12k_read_pdev_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static
+void ath12k_debugfs_fw_stats_register(struct ath12k *ar)
+{
+ struct dentry *fwstats_dir = debugfs_create_dir("fw_stats",
+ ar->debug.debugfs_pdev);
+
+ /* all stats debugfs files created are under "fw_stats" directory
+ * created per PDEV
+ */
+ debugfs_create_file("vdev_stats", 0600, fwstats_dir, ar,
+ &fops_vdev_stats);
+ debugfs_create_file("beacon_stats", 0600, fwstats_dir, ar,
+ &fops_bcn_stats);
+ debugfs_create_file("pdev_stats", 0600, fwstats_dir, ar,
+ &fops_pdev_stats);
+
+ ath12k_fw_stats_init(ar);
+}
+
void ath12k_debugfs_register(struct ath12k *ar)
{
struct ath12k_base *ab = ar->ab;
struct ieee80211_hw *hw = ar->ah->hw;
char pdev_name[5];
- char buf[100] = {0};
+ char buf[100] = {};
scnprintf(pdev_name, sizeof(pdev_name), "%s%d", "mac", ar->pdev_idx);
@@ -91,7 +1488,18 @@ void ath12k_debugfs_register(struct ath12k *ar)
&fops_simulate_radar);
}
+ debugfs_create_file("tpc_stats", 0400, ar->debug.debugfs_pdev, ar,
+ &fops_tpc_stats);
+ debugfs_create_file("tpc_stats_type", 0200, ar->debug.debugfs_pdev,
+ ar, &fops_tpc_stats_type);
+ init_completion(&ar->debug.tpc_complete);
+
ath12k_debugfs_htt_stats_register(ar);
+ ath12k_debugfs_fw_stats_register(ar);
+
+ debugfs_create_file("ext_rx_stats", 0644,
+ ar->debug.debugfs_pdev, ar,
+ &fops_extd_rx_stats);
}
void ath12k_debugfs_unregister(struct ath12k *ar)
diff --git a/drivers/net/wireless/ath/ath12k/debugfs.h b/drivers/net/wireless/ath/ath12k/debugfs.h
index 8d64ba03aa9a..21641a8a0346 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs.h
+++ b/drivers/net/wireless/ath/ath12k/debugfs.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH12K_DEBUGFS_H_
@@ -12,6 +12,101 @@ void ath12k_debugfs_soc_create(struct ath12k_base *ab);
void ath12k_debugfs_soc_destroy(struct ath12k_base *ab);
void ath12k_debugfs_register(struct ath12k *ar);
void ath12k_debugfs_unregister(struct ath12k *ar);
+void ath12k_debugfs_op_vif_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+void ath12k_debugfs_pdev_create(struct ath12k_base *ab);
+
+static inline bool ath12k_debugfs_is_extd_rx_stats_enabled(struct ath12k *ar)
+{
+ return ar->debug.extd_rx_stats;
+}
+
+static inline int ath12k_debugfs_rx_filter(struct ath12k *ar)
+{
+ return ar->debug.rx_filter;
+}
+
+#define ATH12K_CCK_RATES 4
+#define ATH12K_OFDM_RATES 8
+#define ATH12K_HT_RATES 8
+#define ATH12K_VHT_RATES 12
+#define ATH12K_HE_RATES 12
+#define ATH12K_HE_RATES_WITH_EXTRA_MCS 14
+#define ATH12K_EHT_RATES 16
+#define HE_EXTRA_MCS_SUPPORT GENMASK(31, 16)
+#define ATH12K_NSS_1 1
+#define ATH12K_NSS_4 4
+#define ATH12K_NSS_8 8
+#define ATH12K_HW_NSS(_rcode) (((_rcode) >> 5) & 0x7)
+#define TPC_STATS_WAIT_TIME (1 * HZ)
+#define MAX_TPC_PREAM_STR_LEN 7
+#define TPC_INVAL -128
+#define TPC_MAX 127
+#define TPC_STATS_WAIT_TIME (1 * HZ)
+#define TPC_STATS_TOT_ROW 700
+#define TPC_STATS_TOT_COLUMN 100
+#define MODULATION_LIMIT 126
+
+#define ATH12K_TPC_STATS_BUF_SIZE (TPC_STATS_TOT_ROW * TPC_STATS_TOT_COLUMN)
+
+enum wmi_tpc_pream_bw {
+ WMI_TPC_PREAM_CCK,
+ WMI_TPC_PREAM_OFDM,
+ WMI_TPC_PREAM_HT20,
+ WMI_TPC_PREAM_HT40,
+ WMI_TPC_PREAM_VHT20,
+ WMI_TPC_PREAM_VHT40,
+ WMI_TPC_PREAM_VHT80,
+ WMI_TPC_PREAM_VHT160,
+ WMI_TPC_PREAM_HE20,
+ WMI_TPC_PREAM_HE40,
+ WMI_TPC_PREAM_HE80,
+ WMI_TPC_PREAM_HE160,
+ WMI_TPC_PREAM_EHT20,
+ WMI_TPC_PREAM_EHT40,
+ WMI_TPC_PREAM_EHT60,
+ WMI_TPC_PREAM_EHT80,
+ WMI_TPC_PREAM_EHT120,
+ WMI_TPC_PREAM_EHT140,
+ WMI_TPC_PREAM_EHT160,
+ WMI_TPC_PREAM_EHT200,
+ WMI_TPC_PREAM_EHT240,
+ WMI_TPC_PREAM_EHT280,
+ WMI_TPC_PREAM_EHT320,
+ WMI_TPC_PREAM_MAX
+};
+
+enum ath12k_debug_tpc_stats_ctl_mode {
+ ATH12K_TPC_STATS_CTL_MODE_LEGACY_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_HT_VHT20_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_HE_EHT20_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_HT_VHT40_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_HE_EHT40_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_VHT80_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_HE_EHT80_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_VHT160_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_HE_EHT160_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_HE_EHT320_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_CCK_2GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_LEGACY_2GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_HT20_2GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_HT40_2GHZ,
+
+ ATH12K_TPC_STATS_CTL_MODE_EHT80_SU_PUNC20 = 23,
+ ATH12K_TPC_STATS_CTL_MODE_EHT160_SU_PUNC20,
+ ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC40,
+ ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC80,
+ ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC120
+};
+
+enum ath12k_debug_tpc_stats_support_modes {
+ ATH12K_TPC_STATS_SUPPORT_160 = 0,
+ ATH12K_TPC_STATS_SUPPORT_320,
+ ATH12K_TPC_STATS_SUPPORT_AX,
+ ATH12K_TPC_STATS_SUPPORT_AX_EXTRA_MCS,
+ ATH12K_TPC_STATS_SUPPORT_BE,
+ ATH12K_TPC_STATS_SUPPORT_BE_PUNC,
+};
#else
static inline void ath12k_debugfs_soc_create(struct ath12k_base *ab)
{
@@ -29,6 +124,24 @@ static inline void ath12k_debugfs_unregister(struct ath12k *ar)
{
}
+static inline bool ath12k_debugfs_is_extd_rx_stats_enabled(struct ath12k *ar)
+{
+ return false;
+}
+
+static inline int ath12k_debugfs_rx_filter(struct ath12k *ar)
+{
+ return 0;
+}
+
+static inline void ath12k_debugfs_op_vif_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+}
+
+static inline void ath12k_debugfs_pdev_create(struct ath12k_base *ab)
+{
+}
#endif /* CONFIG_ATH12K_DEBUGFS */
#endif /* _ATH12K_DEBUGFS_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
index f1b7e74aefe4..48b010a1b756 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
@@ -12,8 +12,8 @@
#include "dp_rx.h"
static u32
-print_array_to_buf(u8 *buf, u32 offset, const char *header,
- const __le32 *array, u32 array_len, const char *footer)
+print_array_to_buf_index(u8 *buf, u32 offset, const char *header, u32 stats_index,
+ const __le32 *array, u32 array_len, const char *footer)
{
int index = 0;
u8 i;
@@ -26,7 +26,7 @@ print_array_to_buf(u8 *buf, u32 offset, const char *header,
for (i = 0; i < array_len; i++) {
index += scnprintf(buf + offset + index,
(ATH12K_HTT_STATS_BUF_SIZE - offset) - index,
- " %u:%u,", i, le32_to_cpu(array[i]));
+ " %u:%u,", stats_index++, le32_to_cpu(array[i]));
}
/* To overwrite the last trailing comma */
index--;
@@ -40,6 +40,115 @@ print_array_to_buf(u8 *buf, u32 offset, const char *header,
return index;
}
+static u32
+print_array_to_buf(u8 *buf, u32 offset, const char *header,
+ const __le32 *array, u32 array_len, const char *footer)
+{
+ return print_array_to_buf_index(buf, offset, header, 0, array, array_len,
+ footer);
+}
+
+static u32
+print_array_to_buf_s8(u8 *buf, u32 offset, const char *header, u32 stats_index,
+ const s8 *array, u32 array_len, const char *footer)
+{
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ int index = 0;
+ u8 i;
+
+ if (header)
+ index += scnprintf(buf + offset, buf_len - offset, "%s = ", header);
+
+ for (i = 0; i < array_len; i++) {
+ index += scnprintf(buf + offset + index, (buf_len - offset) - index,
+ " %u:%d,", stats_index++, array[i]);
+ }
+
+ index--;
+ if ((offset + index) < buf_len)
+ buf[offset + index] = '\0';
+
+ if (footer) {
+ index += scnprintf(buf + offset + index, (buf_len - offset) - index,
+ "%s", footer);
+ }
+
+ return index;
+}
+
+static const char *ath12k_htt_ax_tx_rx_ru_size_to_str(u8 ru_size)
+{
+ switch (ru_size) {
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_26:
+ return "26";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_52:
+ return "52";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_106:
+ return "106";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_242:
+ return "242";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_484:
+ return "484";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996:
+ return "996";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996x2:
+ return "996x2";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *ath12k_htt_be_tx_rx_ru_size_to_str(u8 ru_size)
+{
+ switch (ru_size) {
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_26:
+ return "26";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_52:
+ return "52";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_52_26:
+ return "52+26";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_106:
+ return "106";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_106_26:
+ return "106+26";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_242:
+ return "242";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_484:
+ return "484";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_484_242:
+ return "484+242";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996:
+ return "996";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996_484:
+ return "996+484";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996_484_242:
+ return "996+484+242";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x2:
+ return "996x2";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x2_484:
+ return "996x2+484";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x3:
+ return "996x3";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x3_484:
+ return "996x3+484";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x4:
+ return "996x4";
+ default:
+ return "unknown";
+ }
+}
+
+static const char*
+ath12k_tx_ru_size_to_str(enum ath12k_htt_stats_ru_type ru_type, u8 ru_size)
+{
+ if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY)
+ return ath12k_htt_ax_tx_rx_ru_size_to_str(ru_size);
+ else if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU)
+ return ath12k_htt_be_tx_rx_ru_size_to_str(ru_size);
+ else
+ return "unknown";
+}
+
static void
htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf, u16 tag_len,
struct debug_htt_stats_req *stats_req)
@@ -1447,6 +1556,3987 @@ ath12k_htt_print_tx_de_compl_stats_tlv(const void *tag_buf, u16 tag_len,
stats_req->buf_len = len;
}
+static void
+ath12k_htt_print_tx_selfgen_cmn_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_selfgen_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "su_bar = %u\n",
+ le32_to_cpu(htt_stats_buf->su_bar));
+ len += scnprintf(buf + len, buf_len - len, "rts = %u\n",
+ le32_to_cpu(htt_stats_buf->rts));
+ len += scnprintf(buf + len, buf_len - len, "cts2self = %u\n",
+ le32_to_cpu(htt_stats_buf->cts2self));
+ len += scnprintf(buf + len, buf_len - len, "qos_null = %u\n",
+ le32_to_cpu(htt_stats_buf->qos_null));
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_1 = %u\n",
+ le32_to_cpu(htt_stats_buf->delayed_bar_1));
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_2 = %u\n",
+ le32_to_cpu(htt_stats_buf->delayed_bar_2));
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_3 = %u\n",
+ le32_to_cpu(htt_stats_buf->delayed_bar_3));
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_4 = %u\n",
+ le32_to_cpu(htt_stats_buf->delayed_bar_4));
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_5 = %u\n",
+ le32_to_cpu(htt_stats_buf->delayed_bar_5));
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_6 = %u\n",
+ le32_to_cpu(htt_stats_buf->delayed_bar_6));
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_7 = %u\n\n",
+ le32_to_cpu(htt_stats_buf->delayed_bar_7));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_selfgen_ac_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_selfgen_ac_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ac_su_ndpa_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->ac_su_ndpa));
+ len += scnprintf(buf + len, buf_len - len, "ac_su_ndp_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->ac_su_ndp));
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndpa_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->ac_mu_mimo_ndpa));
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndp_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->ac_mu_mimo_ndp));
+ len += print_array_to_buf_index(buf, len, "ac_mu_mimo_brpollX_tried = ", 1,
+ htt_stats_buf->ac_mu_mimo_brpoll,
+ ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS - 1,
+ "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_selfgen_ax_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_selfgen_ax_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_su_ndpa_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_su_ndpa));
+ len += scnprintf(buf + len, buf_len - len, "ax_su_ndp_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_su_ndp));
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndpa_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_mu_mimo_ndpa));
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndp_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_mu_mimo_ndp));
+ len += print_array_to_buf_index(buf, len, "ax_mu_mimo_brpollX_tried = ", 1,
+ htt_stats_buf->ax_mu_mimo_brpoll,
+ ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS - 1, "\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_basic_trigger));
+ len += scnprintf(buf + len, buf_len - len, "ax_ulmumimo_total_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_ulmumimo_trigger));
+ len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_bsr_trigger));
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_mu_bar_trigger));
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_rts_trigger = %u\n\n",
+ le32_to_cpu(htt_stats_buf->ax_mu_rts_trigger));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_selfgen_be_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_selfgen_be_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_BE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "be_su_ndpa_queued = %u\n",
+ le32_to_cpu(htt_stats_buf->be_su_ndpa_queued));
+ len += scnprintf(buf + len, buf_len - len, "be_su_ndpa_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->be_su_ndpa));
+ len += scnprintf(buf + len, buf_len - len, "be_su_ndp_queued = %u\n",
+ le32_to_cpu(htt_stats_buf->be_su_ndp_queued));
+ len += scnprintf(buf + len, buf_len - len, "be_su_ndp_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->be_su_ndp));
+ len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndpa_queued = %u\n",
+ le32_to_cpu(htt_stats_buf->be_mu_mimo_ndpa_queued));
+ len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndpa_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->be_mu_mimo_ndpa));
+ len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndp_queued = %u\n",
+ le32_to_cpu(htt_stats_buf->be_mu_mimo_ndp_queued));
+ len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndp_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->be_mu_mimo_ndp));
+ len += print_array_to_buf_index(buf, len, "be_mu_mimo_brpollX_queued = ", 1,
+ htt_stats_buf->be_mu_mimo_brpoll_queued,
+ ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1,
+ "\n");
+ len += print_array_to_buf_index(buf, len, "be_mu_mimo_brpollX_tried = ", 1,
+ htt_stats_buf->be_mu_mimo_brpoll,
+ ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1,
+ "\n");
+ len += print_array_to_buf(buf, len, "be_ul_mumimo_trigger = ",
+ htt_stats_buf->be_ul_mumimo_trigger,
+ ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS, "\n");
+ len += scnprintf(buf + len, buf_len - len, "be_basic_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->be_basic_trigger));
+ len += scnprintf(buf + len, buf_len - len, "be_ulmumimo_total_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->be_ulmumimo_trigger));
+ len += scnprintf(buf + len, buf_len - len, "be_bsr_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->be_bsr_trigger));
+ len += scnprintf(buf + len, buf_len - len, "be_mu_bar_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->be_mu_bar_trigger));
+ len += scnprintf(buf + len, buf_len - len, "be_mu_rts_trigger = %u\n\n",
+ le32_to_cpu(htt_stats_buf->be_mu_rts_trigger));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_selfgen_ac_err_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_selfgen_ac_err_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_ERR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ac_su_ndp_err = %u\n",
+ le32_to_cpu(htt_stats_buf->ac_su_ndp_err));
+ len += scnprintf(buf + len, buf_len - len, "ac_su_ndpa_err = %u\n",
+ le32_to_cpu(htt_stats_buf->ac_su_ndpa_err));
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndpa_err = %u\n",
+ le32_to_cpu(htt_stats_buf->ac_mu_mimo_ndpa_err));
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndp_err = %u\n",
+ le32_to_cpu(htt_stats_buf->ac_mu_mimo_ndp_err));
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp1_err = %u\n",
+ le32_to_cpu(htt_stats_buf->ac_mu_mimo_brp1_err));
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp2_err = %u\n",
+ le32_to_cpu(htt_stats_buf->ac_mu_mimo_brp2_err));
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp3_err = %u\n\n",
+ le32_to_cpu(htt_stats_buf->ac_mu_mimo_brp3_err));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_selfgen_ax_err_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_selfgen_ax_err_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_ERR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_su_ndp_err = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_su_ndp_err));
+ len += scnprintf(buf + len, buf_len - len, "ax_su_ndpa_err = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_su_ndpa_err));
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndpa_err = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_mu_mimo_ndpa_err));
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndp_err = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_mu_mimo_ndp_err));
+ len += print_array_to_buf_index(buf, len, "ax_mu_mimo_brpX_err", 1,
+ htt_stats_buf->ax_mu_mimo_brp_err,
+ ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS - 1,
+ "\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger_err = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_basic_trigger_err));
+ len += scnprintf(buf + len, buf_len - len, "ax_ulmumimo_total_trigger_err = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_ulmumimo_trigger_err));
+ len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger_err = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_bsr_trigger_err));
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger_err = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_mu_bar_trigger_err));
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_rts_trigger_err = %u\n\n",
+ le32_to_cpu(htt_stats_buf->ax_mu_rts_trigger_err));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_selfgen_be_err_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_selfgen_be_err_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_BE_ERR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "be_su_ndp_err = %u\n",
+ le32_to_cpu(htt_stats_buf->be_su_ndp_err));
+ len += scnprintf(buf + len, buf_len - len, "be_su_ndp_flushed = %u\n",
+ le32_to_cpu(htt_stats_buf->be_su_ndp_flushed));
+ len += scnprintf(buf + len, buf_len - len, "be_su_ndpa_err = %u\n",
+ le32_to_cpu(htt_stats_buf->be_su_ndpa_err));
+ len += scnprintf(buf + len, buf_len - len, "be_su_ndpa_flushed = %u\n",
+ le32_to_cpu(htt_stats_buf->be_su_ndpa_flushed));
+ len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndpa_err = %u\n",
+ le32_to_cpu(htt_stats_buf->be_mu_mimo_ndpa_err));
+ len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndpa_flushed = %u\n",
+ le32_to_cpu(htt_stats_buf->be_mu_mimo_ndpa_flushed));
+ len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndp_err = %u\n",
+ le32_to_cpu(htt_stats_buf->be_mu_mimo_ndp_err));
+ len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndp_flushed = %u\n",
+ le32_to_cpu(htt_stats_buf->be_mu_mimo_ndp_flushed));
+ len += print_array_to_buf_index(buf, len, "be_mu_mimo_brpX_err", 1,
+ htt_stats_buf->be_mu_mimo_brp_err,
+ ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1,
+ "\n");
+ len += print_array_to_buf_index(buf, len, "be_mu_mimo_brpollX_flushed", 1,
+ htt_stats_buf->be_mu_mimo_brpoll_flushed,
+ ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1,
+ "\n");
+ len += print_array_to_buf(buf, len, "be_mu_mimo_num_cbf_rcvd_on_brp_err",
+ htt_stats_buf->be_mu_mimo_brp_err_num_cbf_rxd,
+ ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS, "\n");
+ len += print_array_to_buf(buf, len, "be_ul_mumimo_trigger_err",
+ htt_stats_buf->be_ul_mumimo_trigger_err,
+ ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS, "\n");
+ len += scnprintf(buf + len, buf_len - len, "be_basic_trigger_err = %u\n",
+ le32_to_cpu(htt_stats_buf->be_basic_trigger_err));
+ len += scnprintf(buf + len, buf_len - len, "be_ulmumimo_total_trig_err = %u\n",
+ le32_to_cpu(htt_stats_buf->be_ulmumimo_trigger_err));
+ len += scnprintf(buf + len, buf_len - len, "be_bsr_trigger_err = %u\n",
+ le32_to_cpu(htt_stats_buf->be_bsr_trigger_err));
+ len += scnprintf(buf + len, buf_len - len, "be_mu_bar_trigger_err = %u\n",
+ le32_to_cpu(htt_stats_buf->be_mu_bar_trigger_err));
+ len += scnprintf(buf + len, buf_len - len, "be_mu_rts_trigger_err = %u\n\n",
+ le32_to_cpu(htt_stats_buf->be_mu_rts_trigger_err));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_selfgen_ac_sched_status_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats)
+{
+ const struct ath12k_htt_tx_selfgen_ac_sched_status_stats_tlv *htt_stats_buf =
+ tag_buf;
+ u8 *buf = stats->buf;
+ u32 len = stats->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_SELFGEN_AC_SCHED_STATUS_STATS_TLV:\n");
+ len += print_array_to_buf(buf, len, "ac_su_ndpa_sch_status",
+ htt_stats_buf->ac_su_ndpa_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "ac_su_ndp_sch_status",
+ htt_stats_buf->ac_su_ndp_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "ac_mu_mimo_ndpa_sch_status",
+ htt_stats_buf->ac_mu_mimo_ndpa_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "ac_mu_mimo_ndp_sch_status",
+ htt_stats_buf->ac_mu_mimo_ndp_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "ac_mu_mimo_brp_sch_status",
+ htt_stats_buf->ac_mu_mimo_brp_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "ac_su_ndp_sch_flag_err",
+ htt_stats_buf->ac_su_ndp_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n");
+ len += print_array_to_buf(buf, len, "ac_mu_mimo_ndp_sch_flag_err",
+ htt_stats_buf->ac_mu_mimo_ndp_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n");
+ len += print_array_to_buf(buf, len, "ac_mu_mimo_brp_sch_flag_err",
+ htt_stats_buf->ac_mu_mimo_brp_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n\n");
+
+ stats->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_selfgen_ax_sched_status_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats)
+{
+ const struct ath12k_htt_tx_selfgen_ax_sched_status_stats_tlv *htt_stats_buf =
+ tag_buf;
+ u8 *buf = stats->buf;
+ u32 len = stats->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_SELFGEN_AX_SCHED_STATUS_STATS_TLV:\n");
+ len += print_array_to_buf(buf, len, "ax_su_ndpa_sch_status",
+ htt_stats_buf->ax_su_ndpa_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "ax_su_ndp_sch_status",
+ htt_stats_buf->ax_su_ndp_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "ax_mu_mimo_ndpa_sch_status",
+ htt_stats_buf->ax_mu_mimo_ndpa_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "ax_mu_mimo_ndp_sch_status",
+ htt_stats_buf->ax_mu_mimo_ndp_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "ax_mu_brp_sch_status",
+ htt_stats_buf->ax_mu_brp_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "ax_mu_bar_sch_status",
+ htt_stats_buf->ax_mu_bar_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "ax_basic_trig_sch_status",
+ htt_stats_buf->ax_basic_trig_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "ax_su_ndp_sch_flag_err",
+ htt_stats_buf->ax_su_ndp_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n");
+ len += print_array_to_buf(buf, len, "ax_mu_mimo_ndp_sch_flag_err",
+ htt_stats_buf->ax_mu_mimo_ndp_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n");
+ len += print_array_to_buf(buf, len, "ax_mu_brp_sch_flag_err",
+ htt_stats_buf->ax_mu_brp_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n");
+ len += print_array_to_buf(buf, len, "ax_mu_bar_sch_flag_err",
+ htt_stats_buf->ax_mu_bar_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n");
+ len += print_array_to_buf(buf, len, "ax_basic_trig_sch_flag_err",
+ htt_stats_buf->ax_basic_trig_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n");
+ len += print_array_to_buf(buf, len, "ax_ulmumimo_trig_sch_status",
+ htt_stats_buf->ax_ulmumimo_trig_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "ax_ulmumimo_trig_sch_flag_err",
+ htt_stats_buf->ax_ulmumimo_trig_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n\n");
+
+ stats->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_selfgen_be_sched_status_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats)
+{
+ const struct ath12k_htt_tx_selfgen_be_sched_status_stats_tlv *htt_stats_buf =
+ tag_buf;
+ u8 *buf = stats->buf;
+ u32 len = stats->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_SELFGEN_BE_SCHED_STATUS_STATS_TLV:\n");
+ len += print_array_to_buf(buf, len, "be_su_ndpa_sch_status",
+ htt_stats_buf->be_su_ndpa_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "be_su_ndp_sch_status",
+ htt_stats_buf->be_su_ndp_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "be_mu_mimo_ndpa_sch_status",
+ htt_stats_buf->be_mu_mimo_ndpa_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "be_mu_mimo_ndp_sch_status",
+ htt_stats_buf->be_mu_mimo_ndp_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "be_mu_brp_sch_status",
+ htt_stats_buf->be_mu_brp_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "be_mu_bar_sch_status",
+ htt_stats_buf->be_mu_bar_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "be_basic_trig_sch_status",
+ htt_stats_buf->be_basic_trig_sch_status,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "be_su_ndp_sch_flag_err",
+ htt_stats_buf->be_su_ndp_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n");
+ len += print_array_to_buf(buf, len, "be_mu_mimo_ndp_sch_flag_err",
+ htt_stats_buf->be_mu_mimo_ndp_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n");
+ len += print_array_to_buf(buf, len, "be_mu_brp_sch_flag_err",
+ htt_stats_buf->be_mu_brp_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n");
+ len += print_array_to_buf(buf, len, "be_mu_bar_sch_flag_err",
+ htt_stats_buf->be_mu_bar_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n");
+ len += print_array_to_buf(buf, len, "be_basic_trig_sch_flag_err",
+ htt_stats_buf->be_basic_trig_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n");
+ len += print_array_to_buf(buf, len, "be_basic_trig_sch_flag_err",
+ htt_stats_buf->be_basic_trig_sch_flag_err,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+ len += print_array_to_buf(buf, len, "be_ulmumimo_trig_sch_flag_err",
+ htt_stats_buf->be_ulmumimo_trig_sch_flag_err,
+ ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n\n");
+
+ stats->buf_len = len;
+}
+
+static void
+ath12k_htt_print_stats_string_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_stats_string_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 i;
+ u16 index = 0;
+ u32 datum;
+ char data[ATH12K_HTT_MAX_STRING_LEN] = {};
+
+ tag_len = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_STATS_STRING_TLV:\n");
+ for (i = 0; i < tag_len; i++) {
+ datum = __le32_to_cpu(htt_stats_buf->data[i]);
+ index += scnprintf(&data[index], ATH12K_HTT_MAX_STRING_LEN - index,
+ "%.*s", 4, (char *)&datum);
+ if (index >= ATH12K_HTT_MAX_STRING_LEN)
+ break;
+ }
+ len += scnprintf(buf + len, buf_len - len, "data = %s\n\n", data);
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_sring_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_sring_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+ u32 avail_words;
+ u32 head_tail_ptr;
+ u32 sring_stat;
+ u32 tail_ptr;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__ring_id__arena__ep);
+ avail_words = __le32_to_cpu(htt_stats_buf->num_avail_words__num_valid_words);
+ head_tail_ptr = __le32_to_cpu(htt_stats_buf->head_ptr__tail_ptr);
+ sring_stat = __le32_to_cpu(htt_stats_buf->consumer_empty__producer_full);
+ tail_ptr = __le32_to_cpu(htt_stats_buf->prefetch_count__internal_tail_ptr);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SRING_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_SRING_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "ring_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_SRING_STATS_RING_ID));
+ len += scnprintf(buf + len, buf_len - len, "arena = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_SRING_STATS_ARENA));
+ len += scnprintf(buf + len, buf_len - len, "ep = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_SRING_STATS_EP));
+ len += scnprintf(buf + len, buf_len - len, "base_addr_lsb = 0x%x\n",
+ le32_to_cpu(htt_stats_buf->base_addr_lsb));
+ len += scnprintf(buf + len, buf_len - len, "base_addr_msb = 0x%x\n",
+ le32_to_cpu(htt_stats_buf->base_addr_msb));
+ len += scnprintf(buf + len, buf_len - len, "ring_size = %u\n",
+ le32_to_cpu(htt_stats_buf->ring_size));
+ len += scnprintf(buf + len, buf_len - len, "elem_size = %u\n",
+ le32_to_cpu(htt_stats_buf->elem_size));
+ len += scnprintf(buf + len, buf_len - len, "num_avail_words = %u\n",
+ u32_get_bits(avail_words,
+ ATH12K_HTT_SRING_STATS_NUM_AVAIL_WORDS));
+ len += scnprintf(buf + len, buf_len - len, "num_valid_words = %u\n",
+ u32_get_bits(avail_words,
+ ATH12K_HTT_SRING_STATS_NUM_VALID_WORDS));
+ len += scnprintf(buf + len, buf_len - len, "head_ptr = %u\n",
+ u32_get_bits(head_tail_ptr, ATH12K_HTT_SRING_STATS_HEAD_PTR));
+ len += scnprintf(buf + len, buf_len - len, "tail_ptr = %u\n",
+ u32_get_bits(head_tail_ptr, ATH12K_HTT_SRING_STATS_TAIL_PTR));
+ len += scnprintf(buf + len, buf_len - len, "consumer_empty = %u\n",
+ u32_get_bits(sring_stat,
+ ATH12K_HTT_SRING_STATS_CONSUMER_EMPTY));
+ len += scnprintf(buf + len, buf_len - len, "producer_full = %u\n",
+ u32_get_bits(head_tail_ptr,
+ ATH12K_HTT_SRING_STATS_PRODUCER_FULL));
+ len += scnprintf(buf + len, buf_len - len, "prefetch_count = %u\n",
+ u32_get_bits(tail_ptr, ATH12K_HTT_SRING_STATS_PREFETCH_COUNT));
+ len += scnprintf(buf + len, buf_len - len, "internal_tail_ptr = %u\n\n",
+ u32_get_bits(tail_ptr,
+ ATH12K_HTT_SRING_STATS_INTERNAL_TAIL_PTR));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_sfm_cmn_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_sfm_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "buf_total = %u\n",
+ le32_to_cpu(htt_stats_buf->buf_total));
+ len += scnprintf(buf + len, buf_len - len, "mem_empty = %u\n",
+ le32_to_cpu(htt_stats_buf->mem_empty));
+ len += scnprintf(buf + len, buf_len - len, "deallocate_bufs = %u\n",
+ le32_to_cpu(htt_stats_buf->deallocate_bufs));
+ len += scnprintf(buf + len, buf_len - len, "num_records = %u\n\n",
+ le32_to_cpu(htt_stats_buf->num_records));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_sfm_client_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_sfm_client_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CLIENT_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "client_id = %u\n",
+ le32_to_cpu(htt_stats_buf->client_id));
+ len += scnprintf(buf + len, buf_len - len, "buf_min = %u\n",
+ le32_to_cpu(htt_stats_buf->buf_min));
+ len += scnprintf(buf + len, buf_len - len, "buf_max = %u\n",
+ le32_to_cpu(htt_stats_buf->buf_max));
+ len += scnprintf(buf + len, buf_len - len, "buf_busy = %u\n",
+ le32_to_cpu(htt_stats_buf->buf_busy));
+ len += scnprintf(buf + len, buf_len - len, "buf_alloc = %u\n",
+ le32_to_cpu(htt_stats_buf->buf_alloc));
+ len += scnprintf(buf + len, buf_len - len, "buf_avail = %u\n",
+ le32_to_cpu(htt_stats_buf->buf_avail));
+ len += scnprintf(buf + len, buf_len - len, "num_users = %u\n\n",
+ le32_to_cpu(htt_stats_buf->num_users));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_sfm_client_user_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_sfm_client_user_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CLIENT_USER_TLV:\n");
+ len += print_array_to_buf(buf, len, "dwords_used_by_user_n",
+ htt_stats_buf->dwords_used_by_user_n,
+ num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_pdev_mu_mimo_sch_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_mu_mimo_sch_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 i;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_SCH_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_posted = %u\n",
+ le32_to_cpu(htt_stats_buf->mu_mimo_sch_posted));
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_failed = %u\n",
+ le32_to_cpu(htt_stats_buf->mu_mimo_sch_failed));
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n",
+ le32_to_cpu(htt_stats_buf->mu_mimo_ppdu_posted));
+ len += scnprintf(buf + len, buf_len - len,
+ "\nac_mu_mimo_sch_posted_per_group_index %u (SU) = %u\n", 0,
+ le32_to_cpu(htt_stats_buf->ac_mu_mimo_per_grp_sz[0]));
+ for (i = 1; i < ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_sch_posted_per_group_index %u ", i);
+ len += scnprintf(buf + len, buf_len - len,
+ "(TOTAL STREAMS = %u) = %u\n", i + 1,
+ le32_to_cpu(htt_stats_buf->ac_mu_mimo_per_grp_sz[i]));
+ }
+
+ for (i = 0; i < ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_sch_posted_per_group_index %u ",
+ i + ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS);
+ len += scnprintf(buf + len, buf_len - len,
+ "(TOTAL STREAMS = %u) = %u\n",
+ i + ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS + 1,
+ le32_to_cpu(htt_stats_buf->ac_mu_mimo_grp_sz_ext[i]));
+ }
+
+ len += scnprintf(buf + len, buf_len - len,
+ "\nax_mu_mimo_sch_posted_per_group_index %u (SU) = %u\n", 0,
+ le32_to_cpu(htt_stats_buf->ax_mu_mimo_per_grp_sz[0]));
+ for (i = 1; i < ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_sch_posted_per_group_index %u ", i);
+ len += scnprintf(buf + len, buf_len - len,
+ "(TOTAL STREAMS = %u) = %u\n", i + 1,
+ le32_to_cpu(htt_stats_buf->ax_mu_mimo_per_grp_sz[i]));
+ }
+
+ len += scnprintf(buf + len, buf_len - len,
+ "\nbe_mu_mimo_sch_posted_per_group_index %u (SU) = %u\n", 0,
+ le32_to_cpu(htt_stats_buf->be_mu_mimo_per_grp_sz[0]));
+ for (i = 1; i < ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "be_mu_mimo_sch_posted_per_group_index %u ", i);
+ len += scnprintf(buf + len, buf_len - len,
+ "(TOTAL STREAMS = %u) = %u\n", i + 1,
+ le32_to_cpu(htt_stats_buf->be_mu_mimo_per_grp_sz[i]));
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n11ac MU_MIMO SCH STATS:\n");
+ for (i = 0; i < ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_sch_nusers_");
+ len += scnprintf(buf + len, buf_len - len, "%u = %u\n", i,
+ le32_to_cpu(htt_stats_buf->ac_mu_mimo_sch_nusers[i]));
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n11ax MU_MIMO SCH STATS:\n");
+ for (i = 0; i < ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_sch_nusers_");
+ len += scnprintf(buf + len, buf_len - len, "%u = %u\n", i,
+ le32_to_cpu(htt_stats_buf->ax_mu_mimo_sch_nusers[i]));
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n11be MU_MIMO SCH STATS:\n");
+ for (i = 0; i < ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_sch_nusers_");
+ len += scnprintf(buf + len, buf_len - len, "%u = %u\n", i,
+ le32_to_cpu(htt_stats_buf->be_mu_mimo_sch_nusers[i]));
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n11ax OFDMA SCH STATS:\n");
+ for (i = 0; i < ATH12K_HTT_TX_NUM_OFDMA_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_sch_nusers_%u = %u\n", i,
+ le32_to_cpu(htt_stats_buf->ax_ofdma_sch_nusers[i]));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_ofdma_basic_sch_nusers_%u = %u\n", i,
+ le32_to_cpu(htt_stats_buf->ax_ul_ofdma_nusers[i]));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_ofdma_bsr_sch_nusers_%u = %u\n", i,
+ le32_to_cpu(htt_stats_buf->ax_ul_ofdma_bsr_nusers[i]));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_ofdma_bar_sch_nusers_%u = %u\n", i,
+ le32_to_cpu(htt_stats_buf->ax_ul_ofdma_bar_nusers[i]));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_ofdma_brp_sch_nusers_%u = %u\n\n", i,
+ le32_to_cpu(htt_stats_buf->ax_ul_ofdma_brp_nusers[i]));
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "11ax UL MUMIMO SCH STATS:\n");
+ for (i = 0; i < ATH12K_HTT_TX_NUM_UL_MUMIMO_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_mumimo_basic_sch_nusers_%u = %u\n", i,
+ le32_to_cpu(htt_stats_buf->ax_ul_mumimo_nusers[i]));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_mumimo_brp_sch_nusers_%u = %u\n\n", i,
+ le32_to_cpu(htt_stats_buf->ax_ul_mumimo_brp_nusers[i]));
+ }
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_pdev_mumimo_grp_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_mumimo_grp_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ int j;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_MUMIMO_GRP_STATS:\n");
+ len += print_array_to_buf(buf, len,
+ "dl_mumimo_grp_tputs_observed (per bin = 300 mbps)",
+ htt_stats_buf->dl_mumimo_grp_tputs,
+ ATH12K_HTT_STATS_MUMIMO_TPUT_NUM_BINS, "\n");
+ len += print_array_to_buf(buf, len, "dl_mumimo_grp eligible",
+ htt_stats_buf->dl_mumimo_grp_eligible,
+ ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ, "\n");
+ len += print_array_to_buf(buf, len, "dl_mumimo_grp_ineligible",
+ htt_stats_buf->dl_mumimo_grp_ineligible,
+ ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ, "\n");
+ len += scnprintf(buf + len, buf_len - len, "dl_mumimo_grp_invalid:\n");
+ for (j = 0; j < ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ; j++) {
+ len += scnprintf(buf + len, buf_len - len, "grp_id = %u", j);
+ len += print_array_to_buf(buf, len, "",
+ htt_stats_buf->dl_mumimo_grp_invalid,
+ ATH12K_HTT_STATS_MAX_INVALID_REASON_CODE,
+ "\n");
+ }
+
+ len += print_array_to_buf(buf, len, "ul_mumimo_grp_best_grp_size",
+ htt_stats_buf->ul_mumimo_grp_best_grp_size,
+ ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ, "\n");
+ len += print_array_to_buf(buf, len, "ul_mumimo_grp_best_num_usrs = ",
+ htt_stats_buf->ul_mumimo_grp_best_usrs,
+ ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS, "\n");
+ len += print_array_to_buf(buf, len,
+ "ul_mumimo_grp_tputs_observed (per bin = 300 mbps)",
+ htt_stats_buf->ul_mumimo_grp_tputs,
+ ATH12K_HTT_STATS_MUMIMO_TPUT_NUM_BINS, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_mpdu_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 user_index;
+ u32 tx_sched_mode;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ user_index = __le32_to_cpu(htt_stats_buf->user_index);
+ tx_sched_mode = __le32_to_cpu(htt_stats_buf->tx_sched_mode);
+
+ if (tx_sched_mode == ATH12K_HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC) {
+ if (!user_index)
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_AC_MPDU_STATS:\n");
+
+ if (user_index < ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_queued_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdus_queued_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_tried_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdus_tried_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_failed_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdus_failed_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_requeued_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdus_requeued_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_err_no_ba_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->err_no_ba_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdu_underrun_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdu_underrun_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_ampdu_underrun_usr_%u = %u\n\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->ampdu_underrun_usr));
+ }
+ }
+
+ if (tx_sched_mode == ATH12K_HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX) {
+ if (!user_index)
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_AX_MPDU_STATS:\n");
+
+ if (user_index < ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_queued_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdus_queued_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_tried_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdus_tried_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_failed_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdus_failed_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_requeued_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdus_requeued_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_err_no_ba_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->err_no_ba_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdu_underrun_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdu_underrun_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_ampdu_underrun_usr_%u = %u\n\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->ampdu_underrun_usr));
+ }
+ }
+
+ if (tx_sched_mode == ATH12K_HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX) {
+ if (!user_index)
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_AX_MU_OFDMA_MPDU_STATS:\n");
+
+ if (user_index < ATH12K_HTT_TX_NUM_OFDMA_USER_STATS) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_queued_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdus_queued_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_tried_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdus_tried_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_failed_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdus_failed_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_requeued_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdus_requeued_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_err_no_ba_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->err_no_ba_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdu_underrun_usr_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->mpdu_underrun_usr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_ampdu_underrun_usr_%u = %u\n\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->ampdu_underrun_usr));
+ }
+ }
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_pdev_cca_stats_hist_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_pdev_cca_stats_hist_v1_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_CCA_STATS_HIST_TLV :\n");
+ len += scnprintf(buf + len, buf_len - len, "chan_num = %u\n",
+ le32_to_cpu(htt_stats_buf->chan_num));
+ len += scnprintf(buf + len, buf_len - len, "num_records = %u\n",
+ le32_to_cpu(htt_stats_buf->num_records));
+ len += scnprintf(buf + len, buf_len - len, "valid_cca_counters_bitmap = 0x%x\n",
+ le32_to_cpu(htt_stats_buf->valid_cca_counters_bitmap));
+ len += scnprintf(buf + len, buf_len - len, "collection_interval = %u\n\n",
+ le32_to_cpu(htt_stats_buf->collection_interval));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_pdev_stats_cca_counters_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_pdev_stats_cca_counters_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_PDEV_STATS_CCA_COUNTERS_TLV:(in usec)\n");
+ len += scnprintf(buf + len, buf_len - len, "tx_frame_usec = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_frame_usec));
+ len += scnprintf(buf + len, buf_len - len, "rx_frame_usec = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_frame_usec));
+ len += scnprintf(buf + len, buf_len - len, "rx_clear_usec = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_clear_usec));
+ len += scnprintf(buf + len, buf_len - len, "my_rx_frame_usec = %u\n",
+ le32_to_cpu(htt_stats_buf->my_rx_frame_usec));
+ len += scnprintf(buf + len, buf_len - len, "usec_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->usec_cnt));
+ len += scnprintf(buf + len, buf_len - len, "med_rx_idle_usec = %u\n",
+ le32_to_cpu(htt_stats_buf->med_rx_idle_usec));
+ len += scnprintf(buf + len, buf_len - len, "med_tx_idle_global_usec = %u\n",
+ le32_to_cpu(htt_stats_buf->med_tx_idle_global_usec));
+ len += scnprintf(buf + len, buf_len - len, "cca_obss_usec = %u\n\n",
+ le32_to_cpu(htt_stats_buf->cca_obss_usec));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_sounding_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_sounding_stats_tlv *htt_stats_buf = tag_buf;
+ const __le32 *cbf_20, *cbf_40, *cbf_80, *cbf_160, *cbf_320;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 tx_sounding_mode;
+ u8 i, u;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ cbf_20 = htt_stats_buf->cbf_20;
+ cbf_40 = htt_stats_buf->cbf_40;
+ cbf_80 = htt_stats_buf->cbf_80;
+ cbf_160 = htt_stats_buf->cbf_160;
+ cbf_320 = htt_stats_buf->cbf_320;
+ tx_sounding_mode = le32_to_cpu(htt_stats_buf->tx_sounding_mode);
+
+ if (tx_sounding_mode == ATH12K_HTT_TX_AC_SOUNDING_MODE) {
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_AC_SOUNDING_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_20 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_20[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_40 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_40[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_80 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_80[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_160 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_160[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+
+ for (u = 0, i = 0; u < ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS; u++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "Sounding User_%u = 20MHz: %u, ", u,
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ len += scnprintf(buf + len, buf_len - len, "40MHz: %u, ",
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ len += scnprintf(buf + len, buf_len - len, "80MHz: %u, ",
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ len += scnprintf(buf + len, buf_len - len, "160MHz: %u\n",
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ }
+ } else if (tx_sounding_mode == ATH12K_HTT_TX_AX_SOUNDING_MODE) {
+ len += scnprintf(buf + len, buf_len - len,
+ "\nHTT_TX_AX_SOUNDING_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_20 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_20[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_40 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_40[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_80 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_80[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_160 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_160[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+
+ for (u = 0, i = 0; u < ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS; u++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "Sounding User_%u = 20MHz: %u, ", u,
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ len += scnprintf(buf + len, buf_len - len, "40MHz: %u, ",
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ len += scnprintf(buf + len, buf_len - len, "80MHz: %u, ",
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ len += scnprintf(buf + len, buf_len - len, "160MHz: %u\n",
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ }
+ } else if (tx_sounding_mode == ATH12K_HTT_TX_BE_SOUNDING_MODE) {
+ len += scnprintf(buf + len, buf_len - len,
+ "\nHTT_TX_BE_SOUNDING_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "be_cbf_20 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_20[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "be_cbf_40 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_40[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "be_cbf_80 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_80[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "be_cbf_160 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_160[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "be_cbf_320 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_320[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_320[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_320[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_320[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_320[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ for (u = 0, i = 0; u < ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS; u++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "Sounding User_%u = 20MHz: %u, ", u,
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ len += scnprintf(buf + len, buf_len - len, "40MHz: %u, ",
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ len += scnprintf(buf + len, buf_len - len, "80MHz: %u, ",
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ len += scnprintf(buf + len, buf_len - len,
+ "160MHz: %u, 320MHz: %u\n",
+ le32_to_cpu(htt_stats_buf->sounding[i++]),
+ le32_to_cpu(htt_stats_buf->sounding_320[u]));
+ }
+ } else if (tx_sounding_mode == ATH12K_HTT_TX_CMN_SOUNDING_MODE) {
+ len += scnprintf(buf + len, buf_len - len,
+ "\nCV UPLOAD HANDLER STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "cv_nc_mismatch_err = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_nc_mismatch_err));
+ len += scnprintf(buf + len, buf_len - len, "cv_fcs_err = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_fcs_err));
+ len += scnprintf(buf + len, buf_len - len, "cv_frag_idx_mismatch = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_frag_idx_mismatch));
+ len += scnprintf(buf + len, buf_len - len, "cv_invalid_peer_id = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_invalid_peer_id));
+ len += scnprintf(buf + len, buf_len - len, "cv_no_txbf_setup = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_no_txbf_setup));
+ len += scnprintf(buf + len, buf_len - len, "cv_expiry_in_update = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_expiry_in_update));
+ len += scnprintf(buf + len, buf_len - len, "cv_pkt_bw_exceed = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_pkt_bw_exceed));
+ len += scnprintf(buf + len, buf_len - len, "cv_dma_not_done_err = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_dma_not_done_err));
+ len += scnprintf(buf + len, buf_len - len, "cv_update_failed = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_update_failed));
+ len += scnprintf(buf + len, buf_len - len, "cv_dma_timeout_error = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_dma_timeout_error));
+ len += scnprintf(buf + len, buf_len - len, "cv_buf_ibf_uploads = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_buf_ibf_uploads));
+ len += scnprintf(buf + len, buf_len - len, "cv_buf_ebf_uploads = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_buf_ebf_uploads));
+ len += scnprintf(buf + len, buf_len - len, "cv_buf_received = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_buf_received));
+ len += scnprintf(buf + len, buf_len - len, "cv_buf_fed_back = %u\n\n",
+ le32_to_cpu(htt_stats_buf->cv_buf_fed_back));
+
+ len += scnprintf(buf + len, buf_len - len, "CV QUERY STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "cv_total_query = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_total_query));
+ len += scnprintf(buf + len, buf_len - len,
+ "cv_total_pattern_query = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_total_pattern_query));
+ len += scnprintf(buf + len, buf_len - len, "cv_total_bw_query = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_total_bw_query));
+ len += scnprintf(buf + len, buf_len - len, "cv_invalid_bw_coding = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_invalid_bw_coding));
+ len += scnprintf(buf + len, buf_len - len, "cv_forced_sounding = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_forced_sounding));
+ len += scnprintf(buf + len, buf_len - len,
+ "cv_standalone_sounding = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_standalone_sounding));
+ len += scnprintf(buf + len, buf_len - len, "cv_nc_mismatch = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_nc_mismatch));
+ len += scnprintf(buf + len, buf_len - len, "cv_fb_type_mismatch = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_fb_type_mismatch));
+ len += scnprintf(buf + len, buf_len - len, "cv_ofdma_bw_mismatch = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_ofdma_bw_mismatch));
+ len += scnprintf(buf + len, buf_len - len, "cv_bw_mismatch = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_bw_mismatch));
+ len += scnprintf(buf + len, buf_len - len, "cv_pattern_mismatch = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_pattern_mismatch));
+ len += scnprintf(buf + len, buf_len - len, "cv_preamble_mismatch = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_preamble_mismatch));
+ len += scnprintf(buf + len, buf_len - len, "cv_nr_mismatch = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_nr_mismatch));
+ len += scnprintf(buf + len, buf_len - len,
+ "cv_in_use_cnt_exceeded = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_in_use_cnt_exceeded));
+ len += scnprintf(buf + len, buf_len - len, "cv_ntbr_sounding = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_ntbr_sounding));
+ len += scnprintf(buf + len, buf_len - len,
+ "cv_found_upload_in_progress = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_found_upload_in_progress));
+ len += scnprintf(buf + len, buf_len - len,
+ "cv_expired_during_query = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_expired_during_query));
+ len += scnprintf(buf + len, buf_len - len, "cv_found = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_found));
+ len += scnprintf(buf + len, buf_len - len, "cv_not_found = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_not_found));
+ len += scnprintf(buf + len, buf_len - len, "cv_total_query_ibf = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_total_query_ibf));
+ len += scnprintf(buf + len, buf_len - len, "cv_found_ibf = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_found_ibf));
+ len += scnprintf(buf + len, buf_len - len, "cv_not_found_ibf = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_not_found_ibf));
+ len += scnprintf(buf + len, buf_len - len,
+ "cv_expired_during_query_ibf = %u\n\n",
+ le32_to_cpu(htt_stats_buf->cv_expired_during_query_ibf));
+ }
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_pdev_obss_pd_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_pdev_obss_pd_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 i;
+ static const char *access_cat_names[ATH12K_HTT_NUM_AC_WMM] = {"best effort",
+ "background",
+ "video", "voice"};
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_OBSS_PD_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "num_spatial_reuse_tx = %u\n",
+ le32_to_cpu(htt_stats_buf->num_sr_tx_transmissions));
+ len += scnprintf(buf + len, buf_len - len,
+ "num_spatial_reuse_opportunities = %u\n",
+ le32_to_cpu(htt_stats_buf->num_spatial_reuse_opportunities));
+ len += scnprintf(buf + len, buf_len - len, "num_non_srg_opportunities = %u\n",
+ le32_to_cpu(htt_stats_buf->num_non_srg_opportunities));
+ len += scnprintf(buf + len, buf_len - len, "num_non_srg_ppdu_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->num_non_srg_ppdu_tried));
+ len += scnprintf(buf + len, buf_len - len, "num_non_srg_ppdu_success = %u\n",
+ le32_to_cpu(htt_stats_buf->num_non_srg_ppdu_success));
+ len += scnprintf(buf + len, buf_len - len, "num_srg_opportunities = %u\n",
+ le32_to_cpu(htt_stats_buf->num_srg_opportunities));
+ len += scnprintf(buf + len, buf_len - len, "num_srg_ppdu_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->num_srg_ppdu_tried));
+ len += scnprintf(buf + len, buf_len - len, "num_srg_ppdu_success = %u\n",
+ le32_to_cpu(htt_stats_buf->num_srg_ppdu_success));
+ len += scnprintf(buf + len, buf_len - len, "num_psr_opportunities = %u\n",
+ le32_to_cpu(htt_stats_buf->num_psr_opportunities));
+ len += scnprintf(buf + len, buf_len - len, "num_psr_ppdu_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->num_psr_ppdu_tried));
+ len += scnprintf(buf + len, buf_len - len, "num_psr_ppdu_success = %u\n",
+ le32_to_cpu(htt_stats_buf->num_psr_ppdu_success));
+ len += scnprintf(buf + len, buf_len - len, "min_duration_check_flush_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->num_obss_min_dur_check_flush_cnt));
+ len += scnprintf(buf + len, buf_len - len, "sr_ppdu_abort_flush_cnt = %u\n\n",
+ le32_to_cpu(htt_stats_buf->num_sr_ppdu_abort_flush_cnt));
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_OBSS_PD_PER_AC_STATS:\n");
+ for (i = 0; i < ATH12K_HTT_NUM_AC_WMM; i++) {
+ len += scnprintf(buf + len, buf_len - len, "Access Category %u (%s)\n",
+ i, access_cat_names[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "num_non_srg_ppdu_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->num_non_srg_tried_per_ac[i]));
+ len += scnprintf(buf + len, buf_len - len,
+ "num_non_srg_ppdu_success = %u\n",
+ le32_to_cpu(htt_stats_buf->num_non_srg_success_ac[i]));
+ len += scnprintf(buf + len, buf_len - len, "num_srg_ppdu_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->num_srg_tried_per_ac[i]));
+ len += scnprintf(buf + len, buf_len - len,
+ "num_srg_ppdu_success = %u\n\n",
+ le32_to_cpu(htt_stats_buf->num_srg_success_per_ac[i]));
+ }
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_latency_prof_ctx_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_latency_prof_ctx_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_STATS_LATENCY_CTX_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "duration = %u\n",
+ le32_to_cpu(htt_stats_buf->duration));
+ len += scnprintf(buf + len, buf_len - len, "tx_msdu_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_msdu_cnt));
+ len += scnprintf(buf + len, buf_len - len, "tx_mpdu_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_mpdu_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_msdu_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_msdu_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_mpdu_cnt = %u\n\n",
+ le32_to_cpu(htt_stats_buf->rx_mpdu_cnt));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_latency_prof_cnt(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_latency_prof_cnt_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_STATS_LATENCY_CNT_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "prof_enable_cnt = %u\n\n",
+ le32_to_cpu(htt_stats_buf->prof_enable_cnt));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_latency_prof_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_latency_prof_stats_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ if (le32_to_cpu(htt_stats_buf->print_header) == 1) {
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_STATS_LATENCY_PROF_TLV:\n");
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "Latency name = %s\n",
+ htt_stats_buf->latency_prof_name);
+ len += scnprintf(buf + len, buf_len - len, "count = %u\n",
+ le32_to_cpu(htt_stats_buf->cnt));
+ len += scnprintf(buf + len, buf_len - len, "minimum = %u\n",
+ le32_to_cpu(htt_stats_buf->min));
+ len += scnprintf(buf + len, buf_len - len, "maximum = %u\n",
+ le32_to_cpu(htt_stats_buf->max));
+ len += scnprintf(buf + len, buf_len - len, "last = %u\n",
+ le32_to_cpu(htt_stats_buf->last));
+ len += scnprintf(buf + len, buf_len - len, "total = %u\n",
+ le32_to_cpu(htt_stats_buf->tot));
+ len += scnprintf(buf + len, buf_len - len, "average = %u\n",
+ le32_to_cpu(htt_stats_buf->avg));
+ len += scnprintf(buf + len, buf_len - len, "histogram interval = %u\n",
+ le32_to_cpu(htt_stats_buf->hist_intvl));
+ len += print_array_to_buf(buf, len, "histogram", htt_stats_buf->hist,
+ ATH12K_HTT_LATENCY_PROFILE_NUM_MAX_HIST, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_ul_ofdma_trigger_stats(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_rx_pdev_ul_trigger_stats_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 mac_id;
+ u8 j;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_PDEV_UL_TRIGGER_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_ul_ofdma = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_11ax_ul_ofdma));
+ len += print_array_to_buf(buf, len, "ul_ofdma_rx_mcs",
+ htt_stats_buf->ul_ofdma_rx_mcs,
+ ATH12K_HTT_RX_NUM_MCS_CNTRS, "\n");
+ for (j = 0; j < ATH12K_HTT_RX_NUM_GI_CNTRS; j++) {
+ len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_gi[%u]", j);
+ len += print_array_to_buf(buf, len, "",
+ htt_stats_buf->ul_ofdma_rx_gi[j],
+ ATH12K_HTT_RX_NUM_MCS_CNTRS, "\n");
+ }
+
+ len += print_array_to_buf_index(buf, len, "ul_ofdma_rx_nss", 1,
+ htt_stats_buf->ul_ofdma_rx_nss,
+ ATH12K_HTT_RX_NUM_SPATIAL_STREAMS, "\n");
+ len += print_array_to_buf(buf, len, "ul_ofdma_rx_bw",
+ htt_stats_buf->ul_ofdma_rx_bw,
+ ATH12K_HTT_RX_NUM_BW_CNTRS, "\n");
+
+ for (j = 0; j < ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES; j++) {
+ len += scnprintf(buf + len, buf_len - len, j == 0 ?
+ "half_ul_ofdma_rx_bw" :
+ "quarter_ul_ofdma_rx_bw");
+ len += print_array_to_buf(buf, len, "", htt_stats_buf->red_bw[j],
+ ATH12K_HTT_RX_NUM_BW_CNTRS, "\n");
+ }
+ len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_stbc = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_ofdma_rx_stbc));
+ len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_ldpc = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_ofdma_rx_ldpc));
+
+ len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_data_ru_size_ppdu = ");
+ for (j = 0; j < ATH12K_HTT_RX_NUM_RU_SIZE_CNTRS; j++)
+ len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+ ath12k_htt_ax_tx_rx_ru_size_to_str(j),
+ le32_to_cpu(htt_stats_buf->data_ru_size_ppdu[j]));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ulofdma_non_data_ru_size_ppdu = ");
+ for (j = 0; j < ATH12K_HTT_RX_NUM_RU_SIZE_CNTRS; j++)
+ len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+ ath12k_htt_ax_tx_rx_ru_size_to_str(j),
+ le32_to_cpu(htt_stats_buf->non_data_ru_size_ppdu[j]));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += print_array_to_buf(buf, len, "rx_rssi_track_sta_aid",
+ htt_stats_buf->uplink_sta_aid,
+ ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n");
+ len += print_array_to_buf(buf, len, "rx_sta_target_rssi",
+ htt_stats_buf->uplink_sta_target_rssi,
+ ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n");
+ len += print_array_to_buf(buf, len, "rx_sta_fd_rssi",
+ htt_stats_buf->uplink_sta_fd_rssi,
+ ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n");
+ len += print_array_to_buf(buf, len, "rx_sta_power_headroom",
+ htt_stats_buf->uplink_sta_power_headroom,
+ ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "ul_ofdma_basic_trigger_rx_qos_null_only = %u\n\n",
+ le32_to_cpu(htt_stats_buf->ul_ofdma_bsc_trig_rx_qos_null_only));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_ul_ofdma_user_stats(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_rx_pdev_ul_ofdma_user_stats_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 user_index;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ user_index = __le32_to_cpu(htt_stats_buf->user_index);
+
+ if (!user_index)
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_PDEV_UL_OFDMA_USER_STAS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_non_data_ppdu_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->rx_ulofdma_non_data_ppdu));
+ len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_data_ppdu_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->rx_ulofdma_data_ppdu));
+ len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_mpdu_ok_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->rx_ulofdma_mpdu_ok));
+ len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_mpdu_fail_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->rx_ulofdma_mpdu_fail));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ulofdma_non_data_nusers_%u = %u\n", user_index,
+ le32_to_cpu(htt_stats_buf->rx_ulofdma_non_data_nusers));
+ len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_data_nusers_%u = %u\n\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->rx_ulofdma_data_nusers));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_ul_mumimo_trig_stats(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_rx_ul_mumimo_trig_stats_tlv *htt_stats_buf = tag_buf;
+ char str_buf[ATH12K_HTT_MAX_STRING_LEN] = {};
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 mac_id;
+ u16 index;
+ u8 i, j;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_PDEV_UL_MUMIMO_TRIG_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_ul_mumimo = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_11ax_ul_mumimo));
+ index = 0;
+ memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN);
+ for (i = 0; i < ATH12K_HTT_RX_NUM_MCS_CNTRS; i++)
+ index += scnprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index,
+ " %u:%u,", i,
+ le32_to_cpu(htt_stats_buf->ul_mumimo_rx_mcs[i]));
+
+ for (i = 0; i < ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS; i++)
+ index += scnprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index,
+ " %u:%u,", i + ATH12K_HTT_RX_NUM_MCS_CNTRS,
+ le32_to_cpu(htt_stats_buf->ul_mumimo_rx_mcs_ext[i]));
+ str_buf[--index] = '\0';
+ len += scnprintf(buf + len, buf_len - len, "ul_mumimo_rx_mcs = %s\n", str_buf);
+
+ for (j = 0; j < ATH12K_HTT_RX_NUM_GI_CNTRS; j++) {
+ index = 0;
+ memset(&str_buf[index], 0x0, ATH12K_HTT_MAX_STRING_LEN);
+ for (i = 0; i < ATH12K_HTT_RX_NUM_MCS_CNTRS; i++)
+ index += scnprintf(&str_buf[index],
+ ATH12K_HTT_MAX_STRING_LEN - index,
+ " %u:%u,", i,
+ le32_to_cpu(htt_stats_buf->ul_rx_gi[j][i]));
+
+ for (i = 0; i < ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS; i++)
+ index += scnprintf(&str_buf[index],
+ ATH12K_HTT_MAX_STRING_LEN - index,
+ " %u:%u,", i + ATH12K_HTT_RX_NUM_MCS_CNTRS,
+ le32_to_cpu(htt_stats_buf->ul_gi_ext[j][i]));
+ str_buf[--index] = '\0';
+ len += scnprintf(buf + len, buf_len - len,
+ "ul_mumimo_rx_gi_%u = %s\n", j, str_buf);
+ }
+
+ index = 0;
+ memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN);
+ len += print_array_to_buf_index(buf, len, "ul_mumimo_rx_nss", 1,
+ htt_stats_buf->ul_mumimo_rx_nss,
+ ATH12K_HTT_RX_NUM_SPATIAL_STREAMS, "\n");
+
+ len += print_array_to_buf(buf, len, "ul_mumimo_rx_bw",
+ htt_stats_buf->ul_mumimo_rx_bw,
+ ATH12K_HTT_RX_NUM_BW_CNTRS, "\n");
+ for (i = 0; i < ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES; i++) {
+ index = 0;
+ memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN);
+ for (j = 0; j < ATH12K_HTT_RX_NUM_BW_CNTRS; j++)
+ index += scnprintf(&str_buf[index],
+ ATH12K_HTT_MAX_STRING_LEN - index,
+ " %u:%u,", j,
+ le32_to_cpu(htt_stats_buf->red_bw[i][j]));
+ str_buf[--index] = '\0';
+ len += scnprintf(buf + len, buf_len - len, "%s = %s\n",
+ i == 0 ? "half_ul_mumimo_rx_bw" :
+ "quarter_ul_mumimo_rx_bw", str_buf);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "ul_mumimo_rx_stbc = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_mumimo_rx_stbc));
+ len += scnprintf(buf + len, buf_len - len, "ul_mumimo_rx_ldpc = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_mumimo_rx_ldpc));
+
+ for (j = 0; j < ATH12K_HTT_RX_NUM_SPATIAL_STREAMS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ul_mumimo_rssi_in_dbm: chain%u ", j);
+ len += print_array_to_buf_s8(buf, len, "", 0,
+ htt_stats_buf->ul_rssi[j],
+ ATH12K_HTT_RX_NUM_BW_CNTRS, "\n");
+ }
+
+ for (j = 0; j < ATH12K_HTT_TX_UL_MUMIMO_USER_STATS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ul_mumimo_target_rssi: user_%u ", j);
+ len += print_array_to_buf_s8(buf, len, "", 0,
+ htt_stats_buf->tgt_rssi[j],
+ ATH12K_HTT_RX_NUM_BW_CNTRS, "\n");
+ }
+
+ for (j = 0; j < ATH12K_HTT_TX_UL_MUMIMO_USER_STATS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ul_mumimo_fd_rssi: user_%u ", j);
+ len += print_array_to_buf_s8(buf, len, "", 0,
+ htt_stats_buf->fd[j],
+ ATH12K_HTT_RX_NUM_SPATIAL_STREAMS, "\n");
+ }
+
+ for (j = 0; j < ATH12K_HTT_TX_UL_MUMIMO_USER_STATS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ulmumimo_pilot_evm_db_mean: user_%u ", j);
+ len += print_array_to_buf_s8(buf, len, "", 0,
+ htt_stats_buf->db[j],
+ ATH12K_HTT_RX_NUM_SPATIAL_STREAMS, "\n");
+ }
+
+ len += scnprintf(buf + len, buf_len - len,
+ "ul_mumimo_basic_trigger_rx_qos_null_only = %u\n\n",
+ le32_to_cpu(htt_stats_buf->mumimo_bsc_trig_rx_qos_null_only));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_rx_fse_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_rx_fse_stats_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_STATS_RX_FSE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "=== Software RX FSE STATS ===\n");
+ len += scnprintf(buf + len, buf_len - len, "Enable count = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_enable_cnt));
+ len += scnprintf(buf + len, buf_len - len, "Disable count = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_disable_cnt));
+ len += scnprintf(buf + len, buf_len - len, "Cache invalidate entry count = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_cache_invalidate_entry_cnt));
+ len += scnprintf(buf + len, buf_len - len, "Full cache invalidate count = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_full_cache_invalidate_cnt));
+
+ len += scnprintf(buf + len, buf_len - len, "\n=== Hardware RX FSE STATS ===\n");
+ len += scnprintf(buf + len, buf_len - len, "Cache hits count = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_num_cache_hits_cnt));
+ len += scnprintf(buf + len, buf_len - len, "Cache no. of searches = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_num_searches_cnt));
+ len += scnprintf(buf + len, buf_len - len, "Cache occupancy peak count:\n");
+ len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-16] = %u [17-32] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[0]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[1]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[2]));
+ len += scnprintf(buf + len, buf_len - len, "[33-48] = %u [49-64] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[3]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[4]));
+ len += scnprintf(buf + len, buf_len - len, "[65-80] = %u [81-96] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[5]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[6]));
+ len += scnprintf(buf + len, buf_len - len, "[97-112] = %u [113-127] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[7]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[8]));
+ len += scnprintf(buf + len, buf_len - len, "[128] = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[9]));
+ len += scnprintf(buf + len, buf_len - len, "Cache occupancy current count:\n");
+ len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-16] = %u [17-32] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[0]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[1]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[2]));
+ len += scnprintf(buf + len, buf_len - len, "[33-48] = %u [49-64] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[3]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[4]));
+ len += scnprintf(buf + len, buf_len - len, "[65-80] = %u [81-96] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[5]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[6]));
+ len += scnprintf(buf + len, buf_len - len, "[97-112] = %u [113-127] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[7]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[8]));
+ len += scnprintf(buf + len, buf_len - len, "[128] = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[9]));
+ len += scnprintf(buf + len, buf_len - len, "Cache search square count:\n");
+ len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-50] = %u [51-100] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[0]),
+ le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[1]),
+ le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[2]));
+ len += scnprintf(buf + len, buf_len - len, "[101-200] = %u [201-255] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[3]),
+ le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[4]));
+ len += scnprintf(buf + len, buf_len - len, "[256] = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[5]));
+ len += scnprintf(buf + len, buf_len - len, "Cache search peak pending count:\n");
+ len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-2] = %u [3-4] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_search_stat_peak_cnt[0]),
+ le32_to_cpu(htt_stats_buf->fse_search_stat_peak_cnt[1]),
+ le32_to_cpu(htt_stats_buf->fse_search_stat_peak_cnt[2]));
+ len += scnprintf(buf + len, buf_len - len, "[Greater/Equal to 5] = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_search_stat_peak_cnt[3]));
+ len += scnprintf(buf + len, buf_len - len, "Cache search tot pending count:\n");
+ len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-2] = %u [3-4] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_search_stat_pending_cnt[0]),
+ le32_to_cpu(htt_stats_buf->fse_search_stat_pending_cnt[1]),
+ le32_to_cpu(htt_stats_buf->fse_search_stat_pending_cnt[2]));
+ len += scnprintf(buf + len, buf_len - len, "[Greater/Equal to 5] = %u\n\n",
+ le32_to_cpu(htt_stats_buf->fse_search_stat_pending_cnt[3]));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_pdev_tx_rate_txbf_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_pdev_txrate_txbf_stats_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u8 i;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_STATS_PDEV_TX_RATE_TXBF_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "Legacy OFDM Rates: 6 Mbps: %u, ",
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[0]));
+ len += scnprintf(buf + len, buf_len - len, "9 Mbps: %u, 12 Mbps: %u, ",
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[1]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[2]));
+ len += scnprintf(buf + len, buf_len - len, "18 Mbps: %u\n",
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[3]));
+ len += scnprintf(buf + len, buf_len - len, "24 Mbps: %u, 36 Mbps: %u, ",
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[4]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[5]));
+ len += scnprintf(buf + len, buf_len - len, "48 Mbps: %u, 54 Mbps: %u\n",
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[6]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[7]));
+
+ len += print_array_to_buf(buf, len, "tx_ol_mcs", htt_stats_buf->tx_su_ol_mcs,
+ ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "tx_ibf_mcs", htt_stats_buf->tx_su_ibf_mcs,
+ ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "tx_txbf_mcs", htt_stats_buf->tx_su_txbf_mcs,
+ ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS, "\n");
+ len += print_array_to_buf_index(buf, len, "tx_ol_nss", 1,
+ htt_stats_buf->tx_su_ol_nss,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS,
+ "\n");
+ len += print_array_to_buf_index(buf, len, "tx_ibf_nss", 1,
+ htt_stats_buf->tx_su_ibf_nss,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS,
+ "\n");
+ len += print_array_to_buf_index(buf, len, "tx_txbf_nss", 1,
+ htt_stats_buf->tx_su_txbf_nss,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS,
+ "\n");
+ len += print_array_to_buf(buf, len, "tx_ol_bw", htt_stats_buf->tx_su_ol_bw,
+ ATH12K_HTT_TXBF_NUM_BW_CNTRS, "\n");
+ for (i = 0; i < ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES; i++)
+ len += print_array_to_buf(buf, len, i ? "quarter_tx_ol_bw" :
+ "half_tx_ol_bw",
+ htt_stats_buf->ol[i],
+ ATH12K_HTT_TXBF_NUM_BW_CNTRS,
+ "\n");
+
+ len += print_array_to_buf(buf, len, "tx_ibf_bw", htt_stats_buf->tx_su_ibf_bw,
+ ATH12K_HTT_TXBF_NUM_BW_CNTRS, "\n");
+ for (i = 0; i < ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES; i++)
+ len += print_array_to_buf(buf, len, i ? "quarter_tx_ibf_bw" :
+ "half_tx_ibf_bw",
+ htt_stats_buf->ibf[i],
+ ATH12K_HTT_TXBF_NUM_BW_CNTRS,
+ "\n");
+
+ len += print_array_to_buf(buf, len, "tx_txbf_bw", htt_stats_buf->tx_su_txbf_bw,
+ ATH12K_HTT_TXBF_NUM_BW_CNTRS, "\n");
+ for (i = 0; i < ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES; i++)
+ len += print_array_to_buf(buf, len, i ? "quarter_tx_txbf_bw" :
+ "half_tx_txbf_bw",
+ htt_stats_buf->txbf[i],
+ ATH12K_HTT_TXBF_NUM_BW_CNTRS,
+ "\n");
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_STATS_PDEV_TXBF_FLAG_RETURN_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "TXBF_reason_code_stats: 0:%u, 1:%u,",
+ le32_to_cpu(htt_stats_buf->txbf_flag_set_mu_mode),
+ le32_to_cpu(htt_stats_buf->txbf_flag_set_final_status));
+ len += scnprintf(buf + len, buf_len - len, " 2:%u, 3:%u, 4:%u, 5:%u, ",
+ le32_to_cpu(htt_stats_buf->txbf_flag_not_set_verified_txbf_mode),
+ le32_to_cpu(htt_stats_buf->txbf_flag_not_set_disable_p2p_access),
+ le32_to_cpu(htt_stats_buf->txbf_flag_not_set_max_nss_in_he160),
+ le32_to_cpu(htt_stats_buf->txbf_flag_not_set_disable_uldlofdma));
+ len += scnprintf(buf + len, buf_len - len, "6:%u, 7:%u\n\n",
+ le32_to_cpu(htt_stats_buf->txbf_flag_not_set_mcs_threshold_val),
+ le32_to_cpu(htt_stats_buf->txbf_flag_not_set_final_status));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_txbf_ofdma_ax_ndpa_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_txbf_ofdma_ax_ndpa_stats_tlv *stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 num_elements;
+ u8 i;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ num_elements = le32_to_cpu(stats_buf->num_elems_ax_ndpa_arr);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_AX_NDPA_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndpa_queued =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndpa[i].ax_ofdma_ndpa_queued));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndpa_tried =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndpa[i].ax_ofdma_ndpa_tried));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndpa_flushed =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndpa[i].ax_ofdma_ndpa_flush));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndpa_err =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndpa[i].ax_ofdma_ndpa_err));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_txbf_ofdma_ax_ndp_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_txbf_ofdma_ax_ndp_stats_tlv *stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 num_elements;
+ u8 i;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ num_elements = le32_to_cpu(stats_buf->num_elems_ax_ndp_arr);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_AX_NDP_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndp_queued =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndp[i].ax_ofdma_ndp_queued));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndp_tried =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndp[i].ax_ofdma_ndp_tried));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndp_flushed =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndp[i].ax_ofdma_ndp_flush));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndp_err =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_ndp[i].ax_ofdma_ndp_err));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_txbf_ofdma_ax_brp_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_txbf_ofdma_ax_brp_stats_tlv *stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 num_elements;
+ u8 i;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ num_elements = le32_to_cpu(stats_buf->num_elems_ax_brp_arr);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_AX_BRP_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_ofdma_brpoll_queued =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_brp_queued));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_brpoll_tied =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_brp_tried));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_brpoll_flushed =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_brp_flushed));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_brp_err =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_brp_err));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_brp_err_num_cbf_rcvd =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_num_cbf_rcvd));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_txbf_ofdma_ax_steer_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_txbf_ofdma_ax_steer_stats_tlv *stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 num_elements;
+ u8 i;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ num_elements = le32_to_cpu(stats_buf->num_elems_ax_steer_arr);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TXBF_OFDMA_AX_STEER_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_ofdma_num_ppdu_steer =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_steer[i].num_ppdu_steer));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_num_usrs_prefetch =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_steer[i].num_usr_prefetch));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_num_usrs_sound =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_steer[i].num_usr_sound));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_num_usrs_force_sound =");
+ for (i = 0; i < num_elements; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+ le32_to_cpu(stats_buf->ax_steer[i].num_usr_force_sound));
+ len--;
+ *(buf + len) = '\0';
+
+ len += scnprintf(buf + len, buf_len - len, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_txbf_ofdma_ax_steer_mpdu_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_txbf_ofdma_ax_steer_mpdu_stats_tlv *stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TXBF_OFDMA_AX_STEER_MPDU_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "rbo_steer_mpdus_tried = %u\n",
+ le32_to_cpu(stats_buf->ax_ofdma_rbo_steer_mpdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "rbo_steer_mpdus_failed = %u\n",
+ le32_to_cpu(stats_buf->ax_ofdma_rbo_steer_mpdus_failed));
+ len += scnprintf(buf + len, buf_len - len, "sifs_steer_mpdus_tried = %u\n",
+ le32_to_cpu(stats_buf->ax_ofdma_sifs_steer_mpdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "sifs_steer_mpdus_failed = %u\n\n",
+ le32_to_cpu(stats_buf->ax_ofdma_sifs_steer_mpdus_failed));
+
+ stats_req->buf_len = len;
+}
+
+static void ath12k_htt_print_dlpager_entry(const struct ath12k_htt_pgs_info *pg_info,
+ int idx, char *str_buf)
+{
+ u64 page_timestamp;
+ u16 index = 0;
+
+ page_timestamp = ath12k_le32hilo_to_u64(pg_info->ts_msb, pg_info->ts_lsb);
+
+ index += snprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index,
+ "Index - %u ; Page Number - %u ; ",
+ idx, le32_to_cpu(pg_info->page_num));
+ index += snprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index,
+ "Num of pages - %u ; Timestamp - %lluus\n",
+ le32_to_cpu(pg_info->num_pgs), page_timestamp);
+}
+
+static void
+ath12k_htt_print_dlpager_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_dl_pager_stats_tlv *stat_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 dword_lock, dword_unlock;
+ int i;
+ u8 *buf = stats_req->buf;
+ u8 pg_locked;
+ u8 pg_unlock;
+ char str_buf[ATH12K_HTT_MAX_STRING_LEN] = {};
+
+ if (tag_len < sizeof(*stat_buf))
+ return;
+
+ dword_lock = le32_get_bits(stat_buf->info2,
+ ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO2);
+ dword_unlock = le32_get_bits(stat_buf->info2,
+ ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO2);
+
+ pg_locked = ATH12K_HTT_STATS_PAGE_LOCKED;
+ pg_unlock = ATH12K_HTT_STATS_PAGE_UNLOCKED;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_DLPAGER_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ASYNC locked pages = %u\n",
+ le32_get_bits(stat_buf->info0,
+ ATH12K_HTT_DLPAGER_ASYNC_LOCK_PG_CNT_INFO0));
+ len += scnprintf(buf + len, buf_len - len, "SYNC locked pages = %u\n",
+ le32_get_bits(stat_buf->info0,
+ ATH12K_HTT_DLPAGER_SYNC_LOCK_PG_CNT_INFO0));
+ len += scnprintf(buf + len, buf_len - len, "Total locked pages = %u\n",
+ le32_get_bits(stat_buf->info1,
+ ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO1));
+ len += scnprintf(buf + len, buf_len - len, "Total free pages = %u\n",
+ le32_get_bits(stat_buf->info1,
+ ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO1));
+
+ len += scnprintf(buf + len, buf_len - len, "\nLOCKED PAGES HISTORY\n");
+ len += scnprintf(buf + len, buf_len - len, "last_locked_page_idx = %u\n",
+ dword_lock ? dword_lock - 1 : (ATH12K_PAGER_MAX - 1));
+
+ for (i = 0; i < ATH12K_PAGER_MAX; i++) {
+ memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN);
+ ath12k_htt_print_dlpager_entry(&stat_buf->pgs_info[pg_locked][i],
+ i, str_buf);
+ len += scnprintf(buf + len, buf_len - len, "%s", str_buf);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\nUNLOCKED PAGES HISTORY\n");
+ len += scnprintf(buf + len, buf_len - len, "last_unlocked_page_idx = %u\n",
+ dword_unlock ? dword_unlock - 1 : ATH12K_PAGER_MAX - 1);
+
+ for (i = 0; i < ATH12K_PAGER_MAX; i++) {
+ memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN);
+ ath12k_htt_print_dlpager_entry(&stat_buf->pgs_info[pg_unlock][i],
+ i, str_buf);
+ len += scnprintf(buf + len, buf_len - len, "%s", str_buf);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_phy_stats_tlv *htt_stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf, i;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_STATS_TLV:\n");
+ for (i = 0; i < ATH12K_HTT_STATS_MAX_CHAINS; i++)
+ len += scnprintf(buf + len, buf_len - len, "bdf_nf_chain[%d] = %d\n",
+ i, a_sle32_to_cpu(htt_stats_buf->nf_chain[i]));
+ for (i = 0; i < ATH12K_HTT_STATS_MAX_CHAINS; i++)
+ len += scnprintf(buf + len, buf_len - len, "runtime_nf_chain[%d] = %d\n",
+ i, a_sle32_to_cpu(htt_stats_buf->runtime_nf_chain[i]));
+ len += scnprintf(buf + len, buf_len - len, "false_radar_cnt = %u / %u (mins)\n",
+ le32_to_cpu(htt_stats_buf->false_radar_cnt),
+ le32_to_cpu(htt_stats_buf->fw_run_time));
+ len += scnprintf(buf + len, buf_len - len, "radar_cs_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->radar_cs_cnt));
+ len += scnprintf(buf + len, buf_len - len, "ani_level = %d\n\n",
+ a_sle32_to_cpu(htt_stats_buf->ani_level));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_counters_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_phy_counters_tlv *htt_stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_COUNTERS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "rx_ofdma_timing_err_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_ofdma_timing_err_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_cck_fail_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_cck_fail_cnt));
+ len += scnprintf(buf + len, buf_len - len, "mactx_abort_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->mactx_abort_cnt));
+ len += scnprintf(buf + len, buf_len - len, "macrx_abort_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->macrx_abort_cnt));
+ len += scnprintf(buf + len, buf_len - len, "phytx_abort_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->phytx_abort_cnt));
+ len += scnprintf(buf + len, buf_len - len, "phyrx_abort_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->phyrx_abort_cnt));
+ len += scnprintf(buf + len, buf_len - len, "phyrx_defer_abort_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->phyrx_defer_abort_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_lstf_event_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_gain_adj_lstf_event_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_non_legacy_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_gain_adj_non_legacy_cnt));
+ len += print_array_to_buf(buf, len, "rx_pkt_cnt", htt_stats_buf->rx_pkt_cnt,
+ ATH12K_HTT_MAX_RX_PKT_CNT, "\n");
+ len += print_array_to_buf(buf, len, "rx_pkt_crc_pass_cnt",
+ htt_stats_buf->rx_pkt_crc_pass_cnt,
+ ATH12K_HTT_MAX_RX_PKT_CRC_PASS_CNT, "\n");
+ len += print_array_to_buf(buf, len, "per_blk_err_cnt",
+ htt_stats_buf->per_blk_err_cnt,
+ ATH12K_HTT_MAX_PER_BLK_ERR_CNT, "\n");
+ len += print_array_to_buf(buf, len, "rx_ota_err_cnt",
+ htt_stats_buf->rx_ota_err_cnt,
+ ATH12K_HTT_MAX_RX_OTA_ERR_CNT, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_reset_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_phy_reset_stats_tlv *htt_stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ le32_to_cpu(htt_stats_buf->pdev_id));
+ len += scnprintf(buf + len, buf_len - len, "chan_mhz = %u\n",
+ le32_to_cpu(htt_stats_buf->chan_mhz));
+ len += scnprintf(buf + len, buf_len - len, "chan_band_center_freq1 = %u\n",
+ le32_to_cpu(htt_stats_buf->chan_band_center_freq1));
+ len += scnprintf(buf + len, buf_len - len, "chan_band_center_freq2 = %u\n",
+ le32_to_cpu(htt_stats_buf->chan_band_center_freq2));
+ len += scnprintf(buf + len, buf_len - len, "chan_phy_mode = %u\n",
+ le32_to_cpu(htt_stats_buf->chan_phy_mode));
+ len += scnprintf(buf + len, buf_len - len, "chan_flags = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->chan_flags));
+ len += scnprintf(buf + len, buf_len - len, "chan_num = %u\n",
+ le32_to_cpu(htt_stats_buf->chan_num));
+ len += scnprintf(buf + len, buf_len - len, "reset_cause = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->reset_cause));
+ len += scnprintf(buf + len, buf_len - len, "prev_reset_cause = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->prev_reset_cause));
+ len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_src = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_warm_reset_src));
+ len += scnprintf(buf + len, buf_len - len, "rx_gain_tbl_mode = %d\n",
+ le32_to_cpu(htt_stats_buf->rx_gain_tbl_mode));
+ len += scnprintf(buf + len, buf_len - len, "xbar_val = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->xbar_val));
+ len += scnprintf(buf + len, buf_len - len, "force_calibration = %u\n",
+ le32_to_cpu(htt_stats_buf->force_calibration));
+ len += scnprintf(buf + len, buf_len - len, "phyrf_mode = %u\n",
+ le32_to_cpu(htt_stats_buf->phyrf_mode));
+ len += scnprintf(buf + len, buf_len - len, "phy_homechan = %u\n",
+ le32_to_cpu(htt_stats_buf->phy_homechan));
+ len += scnprintf(buf + len, buf_len - len, "phy_tx_ch_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_tx_ch_mask));
+ len += scnprintf(buf + len, buf_len - len, "phy_rx_ch_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_rx_ch_mask));
+ len += scnprintf(buf + len, buf_len - len, "phybb_ini_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phybb_ini_mask));
+ len += scnprintf(buf + len, buf_len - len, "phyrf_ini_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phyrf_ini_mask));
+ len += scnprintf(buf + len, buf_len - len, "phy_dfs_en_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_dfs_en_mask));
+ len += scnprintf(buf + len, buf_len - len, "phy_sscan_en_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_sscan_en_mask));
+ len += scnprintf(buf + len, buf_len - len, "phy_synth_sel_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_synth_sel_mask));
+ len += scnprintf(buf + len, buf_len - len, "phy_adfs_freq = %u\n",
+ le32_to_cpu(htt_stats_buf->phy_adfs_freq));
+ len += scnprintf(buf + len, buf_len - len, "cck_fir_settings = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->cck_fir_settings));
+ len += scnprintf(buf + len, buf_len - len, "phy_dyn_pri_chan = %u\n",
+ le32_to_cpu(htt_stats_buf->phy_dyn_pri_chan));
+ len += scnprintf(buf + len, buf_len - len, "cca_thresh = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->cca_thresh));
+ len += scnprintf(buf + len, buf_len - len, "dyn_cca_status = %u\n",
+ le32_to_cpu(htt_stats_buf->dyn_cca_status));
+ len += scnprintf(buf + len, buf_len - len, "rxdesense_thresh_hw = 0x%x\n",
+ le32_to_cpu(htt_stats_buf->rxdesense_thresh_hw));
+ len += scnprintf(buf + len, buf_len - len, "rxdesense_thresh_sw = 0x%x\n\n",
+ le32_to_cpu(htt_stats_buf->rxdesense_thresh_sw));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_reset_counters_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_phy_reset_counters_tlv *htt_stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_COUNTERS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ le32_to_cpu(htt_stats_buf->pdev_id));
+ len += scnprintf(buf + len, buf_len - len, "cf_active_low_fail_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->cf_active_low_fail_cnt));
+ len += scnprintf(buf + len, buf_len - len, "cf_active_low_pass_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->cf_active_low_pass_cnt));
+ len += scnprintf(buf + len, buf_len - len, "phy_off_through_vreg_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->phy_off_through_vreg_cnt));
+ len += scnprintf(buf + len, buf_len - len, "force_calibration_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->force_calibration_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rf_mode_switch_phy_off_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rf_mode_switch_phy_off_cnt));
+ len += scnprintf(buf + len, buf_len - len, "temperature_recal_cnt = %u\n\n",
+ le32_to_cpu(htt_stats_buf->temperature_recal_cnt));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_tpc_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_phy_tpc_stats_tlv *htt_stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_TPC_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ le32_to_cpu(htt_stats_buf->pdev_id));
+ len += scnprintf(buf + len, buf_len - len, "tx_power_scale = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_power_scale));
+ len += scnprintf(buf + len, buf_len - len, "tx_power_scale_db = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_power_scale_db));
+ len += scnprintf(buf + len, buf_len - len, "min_negative_tx_power = %d\n",
+ le32_to_cpu(htt_stats_buf->min_negative_tx_power));
+ len += scnprintf(buf + len, buf_len - len, "reg_ctl_domain = %u\n",
+ le32_to_cpu(htt_stats_buf->reg_ctl_domain));
+ len += scnprintf(buf + len, buf_len - len, "twice_max_rd_power = %u\n",
+ le32_to_cpu(htt_stats_buf->twice_max_rd_power));
+ len += scnprintf(buf + len, buf_len - len, "max_tx_power = %u\n",
+ le32_to_cpu(htt_stats_buf->max_tx_power));
+ len += scnprintf(buf + len, buf_len - len, "home_max_tx_power = %u\n",
+ le32_to_cpu(htt_stats_buf->home_max_tx_power));
+ len += scnprintf(buf + len, buf_len - len, "psd_power = %d\n",
+ le32_to_cpu(htt_stats_buf->psd_power));
+ len += scnprintf(buf + len, buf_len - len, "eirp_power = %u\n",
+ le32_to_cpu(htt_stats_buf->eirp_power));
+ len += scnprintf(buf + len, buf_len - len, "power_type_6ghz = %u\n",
+ le32_to_cpu(htt_stats_buf->power_type_6ghz));
+ len += print_array_to_buf(buf, len, "max_reg_allowed_power",
+ htt_stats_buf->max_reg_allowed_power,
+ ATH12K_HTT_STATS_MAX_CHAINS, "\n");
+ len += print_array_to_buf(buf, len, "max_reg_allowed_power_6ghz",
+ htt_stats_buf->max_reg_allowed_power_6ghz,
+ ATH12K_HTT_STATS_MAX_CHAINS, "\n");
+ len += print_array_to_buf(buf, len, "sub_band_cfreq",
+ htt_stats_buf->sub_band_cfreq,
+ ATH12K_HTT_MAX_CH_PWR_INFO_SIZE, "\n");
+ len += print_array_to_buf(buf, len, "sub_band_txpower",
+ htt_stats_buf->sub_band_txpower,
+ ATH12K_HTT_MAX_CH_PWR_INFO_SIZE, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_soc_txrx_stats_common_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_t2h_soc_txrx_stats_common_tlv *htt_stats_buf = tag_buf;
+ u64 drop_count;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ drop_count = ath12k_le32hilo_to_u64(htt_stats_buf->inv_peers_msdu_drop_count_hi,
+ htt_stats_buf->inv_peers_msdu_drop_count_lo);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SOC_COMMON_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "soc_drop_count = %llu\n\n",
+ drop_count);
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_per_rate_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_per_rate_stats_tlv *stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 ru_size_cnt = 0;
+ u32 rc_mode, ru_type;
+ u8 *buf = stats_req->buf, i;
+ const char *mode_prefix;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ rc_mode = le32_to_cpu(stats_buf->rc_mode);
+ ru_type = le32_to_cpu(stats_buf->ru_type);
+
+ switch (rc_mode) {
+ case ATH12K_HTT_STATS_RC_MODE_DLSU:
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PER_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_SU:\n");
+ mode_prefix = "su";
+ break;
+ case ATH12K_HTT_STATS_RC_MODE_DLMUMIMO:
+ len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_DL_MUMIMO:\n");
+ mode_prefix = "mu";
+ break;
+ case ATH12K_HTT_STATS_RC_MODE_DLOFDMA:
+ len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_DL_OFDMA:\n");
+ mode_prefix = "ofdma";
+ if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY)
+ ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS;
+ else if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU)
+ ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS;
+ break;
+ case ATH12K_HTT_STATS_RC_MODE_ULMUMIMO:
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PER_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_UL_MUMIMO:\n");
+ mode_prefix = "ulmu";
+ break;
+ case ATH12K_HTT_STATS_RC_MODE_ULOFDMA:
+ len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_UL_OFDMA:\n");
+ mode_prefix = "ulofdma";
+ if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY)
+ ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS;
+ else if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU)
+ ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS;
+ break;
+ default:
+ return;
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\nPER per BW:\n");
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_bw[i].ppdus_tried));
+ len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i,
+ le32_to_cpu(stats_buf->per_bw320.ppdus_tried));
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "non_data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_ack_failed_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_bw[i].ppdus_ack_failed));
+ len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i,
+ le32_to_cpu(stats_buf->per_bw320.ppdus_ack_failed));
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_bw[i].mpdus_tried));
+ len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i,
+ le32_to_cpu(stats_buf->per_bw320.mpdus_tried));
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u", i,
+ le32_to_cpu(stats_buf->per_bw[i].mpdus_failed));
+ len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i,
+ le32_to_cpu(stats_buf->per_bw320.mpdus_failed));
+
+ len += scnprintf(buf + len, buf_len - len, "\nPER per NSS:\n");
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1,
+ le32_to_cpu(stats_buf->per_nss[i].ppdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "non_data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_ack_failed_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1,
+ le32_to_cpu(stats_buf->per_nss[i].ppdus_ack_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1,
+ le32_to_cpu(stats_buf->per_nss[i].mpdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1,
+ le32_to_cpu(stats_buf->per_nss[i].mpdus_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "\nPER per MCS:\n");
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_mcs[i].ppdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "non_data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_ack_failed_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_mcs[i].ppdus_ack_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_mcs[i].mpdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_mcs[i].mpdus_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ if ((rc_mode == ATH12K_HTT_STATS_RC_MODE_DLOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA) &&
+ ru_type != ATH12K_HTT_STATS_RU_TYPE_INVALID) {
+ len += scnprintf(buf + len, buf_len - len, "\nPER per RU:\n");
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA)
+ len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ",
+ mode_prefix);
+ for (i = 0; i < ru_size_cnt; i++)
+ len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+ ath12k_tx_ru_size_to_str(ru_type, i),
+ le32_to_cpu(stats_buf->ru[i].ppdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA)
+ len += scnprintf(buf + len, buf_len - len,
+ "non_data_ppdus_%s = ", mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len,
+ "ppdus_ack_failed_%s = ", mode_prefix);
+ for (i = 0; i < ru_size_cnt; i++)
+ len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+ ath12k_tx_ru_size_to_str(ru_type, i),
+ le32_to_cpu(stats_buf->ru[i].ppdus_ack_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ",
+ mode_prefix);
+ for (i = 0; i < ru_size_cnt; i++)
+ len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+ ath12k_tx_ru_size_to_str(ru_type, i),
+ le32_to_cpu(stats_buf->ru[i].mpdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ",
+ mode_prefix);
+ for (i = 0; i < ru_size_cnt; i++)
+ len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+ ath12k_tx_ru_size_to_str(ru_type, i),
+ le32_to_cpu(stats_buf->ru[i].mpdus_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n\n");
+ }
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_DLMUMIMO) {
+ len += scnprintf(buf + len, buf_len - len, "\nlast_probed_bw = %u\n",
+ le32_to_cpu(stats_buf->last_probed_bw));
+ len += scnprintf(buf + len, buf_len - len, "last_probed_nss = %u\n",
+ le32_to_cpu(stats_buf->last_probed_nss));
+ len += scnprintf(buf + len, buf_len - len, "last_probed_mcs = %u\n",
+ le32_to_cpu(stats_buf->last_probed_mcs));
+ len += print_array_to_buf(buf, len, "MU Probe count per RC MODE",
+ stats_buf->probe_cnt,
+ ATH12K_HTT_RC_MODE_2D_COUNT, "\n\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_ast_entry_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_ast_entry_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 mac_addr_l32;
+ u32 mac_addr_h16;
+ u32 ast_info;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_addr_l32 = le32_to_cpu(htt_stats_buf->mac_addr.mac_addr_l32);
+ mac_addr_h16 = le32_to_cpu(htt_stats_buf->mac_addr.mac_addr_h16);
+ ast_info = le32_to_cpu(htt_stats_buf->info);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_AST_ENTRY_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ast_index = %u\n",
+ le32_to_cpu(htt_stats_buf->ast_index));
+ len += scnprintf(buf + len, buf_len - len,
+ "mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ u32_get_bits(mac_addr_l32, ATH12K_HTT_MAC_ADDR_L32_0),
+ u32_get_bits(mac_addr_l32, ATH12K_HTT_MAC_ADDR_L32_1),
+ u32_get_bits(mac_addr_l32, ATH12K_HTT_MAC_ADDR_L32_2),
+ u32_get_bits(mac_addr_l32, ATH12K_HTT_MAC_ADDR_L32_3),
+ u32_get_bits(mac_addr_h16, ATH12K_HTT_MAC_ADDR_H16_0),
+ u32_get_bits(mac_addr_h16, ATH12K_HTT_MAC_ADDR_H16_1));
+
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %u\n",
+ le32_to_cpu(htt_stats_buf->sw_peer_id));
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_PDEV_ID_INFO));
+ len += scnprintf(buf + len, buf_len - len, "vdev_id = %u\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_VDEV_ID_INFO));
+ len += scnprintf(buf + len, buf_len - len, "next_hop = %u\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_NEXT_HOP_INFO));
+ len += scnprintf(buf + len, buf_len - len, "mcast = %u\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_MCAST_INFO));
+ len += scnprintf(buf + len, buf_len - len, "monitor_direct = %u\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_MONITOR_DIRECT_INFO));
+ len += scnprintf(buf + len, buf_len - len, "mesh_sta = %u\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_MESH_STA_INFO));
+ len += scnprintf(buf + len, buf_len - len, "mec = %u\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_MEC_INFO));
+ len += scnprintf(buf + len, buf_len - len, "intra_bss = %u\n\n",
+ u32_get_bits(ast_info, ATH12K_HTT_AST_INTRA_BSS_INFO));
+
+ stats_req->buf_len = len;
+}
+
+static const char*
+ath12k_htt_get_punct_dir_type_str(enum ath12k_htt_stats_direction direction)
+{
+ switch (direction) {
+ case ATH12K_HTT_STATS_DIRECTION_TX:
+ return "tx";
+ case ATH12K_HTT_STATS_DIRECTION_RX:
+ return "rx";
+ default:
+ return "unknown";
+ }
+}
+
+static const char*
+ath12k_htt_get_punct_ppdu_type_str(enum ath12k_htt_stats_ppdu_type ppdu_type)
+{
+ switch (ppdu_type) {
+ case ATH12K_HTT_STATS_PPDU_TYPE_MODE_SU:
+ return "su";
+ case ATH12K_HTT_STATS_PPDU_TYPE_DL_MU_MIMO:
+ return "dl_mu_mimo";
+ case ATH12K_HTT_STATS_PPDU_TYPE_UL_MU_MIMO:
+ return "ul_mu_mimo";
+ case ATH12K_HTT_STATS_PPDU_TYPE_DL_MU_OFDMA:
+ return "dl_mu_ofdma";
+ case ATH12K_HTT_STATS_PPDU_TYPE_UL_MU_OFDMA:
+ return "ul_mu_ofdma";
+ default:
+ return "unknown";
+ }
+}
+
+static const char*
+ath12k_htt_get_punct_pream_type_str(enum ath12k_htt_stats_param_type pream_type)
+{
+ switch (pream_type) {
+ case ATH12K_HTT_STATS_PREAM_OFDM:
+ return "ofdm";
+ case ATH12K_HTT_STATS_PREAM_CCK:
+ return "cck";
+ case ATH12K_HTT_STATS_PREAM_HT:
+ return "ht";
+ case ATH12K_HTT_STATS_PREAM_VHT:
+ return "ac";
+ case ATH12K_HTT_STATS_PREAM_HE:
+ return "ax";
+ case ATH12K_HTT_STATS_PREAM_EHT:
+ return "be";
+ default:
+ return "unknown";
+ }
+}
+
+static void
+ath12k_htt_print_puncture_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_pdev_puncture_stats_tlv *stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ const char *direction;
+ const char *ppdu_type;
+ const char *preamble;
+ u32 mac_id__word;
+ u32 subband_limit;
+ u8 i;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ mac_id__word = le32_to_cpu(stats_buf->mac_id__word);
+ subband_limit = min(le32_to_cpu(stats_buf->subband_cnt),
+ ATH12K_HTT_PUNCT_STATS_MAX_SUBBAND_CNT);
+
+ direction = ath12k_htt_get_punct_dir_type_str(le32_to_cpu(stats_buf->direction));
+ ppdu_type = ath12k_htt_get_punct_ppdu_type_str(le32_to_cpu(stats_buf->ppdu_type));
+ preamble = ath12k_htt_get_punct_pream_type_str(le32_to_cpu(stats_buf->preamble));
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_PUNCTURE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id__word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len,
+ "%s_%s_%s_last_used_pattern_mask = 0x%08x\n",
+ direction, preamble, ppdu_type,
+ le32_to_cpu(stats_buf->last_used_pattern_mask));
+
+ for (i = 0; i < subband_limit; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "%s_%s_%s_num_subbands_used_cnt_%02d = %u\n",
+ direction, preamble, ppdu_type, i + 1,
+ le32_to_cpu(stats_buf->num_subbands_used_cnt[i]));
+ }
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_dmac_reset_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_dmac_reset_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u64 time;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_DMAC_RESET_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "reset_count = %u\n",
+ le32_to_cpu(htt_stats_buf->reset_count));
+ time = ath12k_le32hilo_to_u64(htt_stats_buf->reset_time_hi_ms,
+ htt_stats_buf->reset_time_lo_ms);
+ len += scnprintf(buf + len, buf_len - len, "reset_time_ms = %llu\n", time);
+ time = ath12k_le32hilo_to_u64(htt_stats_buf->disengage_time_hi_ms,
+ htt_stats_buf->disengage_time_lo_ms);
+ len += scnprintf(buf + len, buf_len - len, "disengage_time_ms = %llu\n", time);
+
+ time = ath12k_le32hilo_to_u64(htt_stats_buf->engage_time_hi_ms,
+ htt_stats_buf->engage_time_lo_ms);
+ len += scnprintf(buf + len, buf_len - len, "engage_time_ms = %llu\n", time);
+
+ len += scnprintf(buf + len, buf_len - len, "disengage_count = %u\n",
+ le32_to_cpu(htt_stats_buf->disengage_count));
+ len += scnprintf(buf + len, buf_len - len, "engage_count = %u\n",
+ le32_to_cpu(htt_stats_buf->engage_count));
+ len += scnprintf(buf + len, buf_len - len, "drain_dest_ring_mask = 0x%x\n\n",
+ le32_to_cpu(htt_stats_buf->drain_dest_ring_mask));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_pdev_sched_algo_ofdma_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_pdev_sched_algo_ofdma_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_SCHED_ALGO_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += print_array_to_buf(buf, len, "rate_based_dlofdma_enabled_count",
+ htt_stats_buf->rate_based_dlofdma_enabled_cnt,
+ ATH12K_HTT_NUM_AC_WMM, "\n");
+ len += print_array_to_buf(buf, len, "rate_based_dlofdma_disabled_count",
+ htt_stats_buf->rate_based_dlofdma_disabled_cnt,
+ ATH12K_HTT_NUM_AC_WMM, "\n");
+ len += print_array_to_buf(buf, len, "rate_based_dlofdma_probing_count",
+ htt_stats_buf->rate_based_dlofdma_disabled_cnt,
+ ATH12K_HTT_NUM_AC_WMM, "\n");
+ len += print_array_to_buf(buf, len, "rate_based_dlofdma_monitoring_count",
+ htt_stats_buf->rate_based_dlofdma_monitor_cnt,
+ ATH12K_HTT_NUM_AC_WMM, "\n");
+ len += print_array_to_buf(buf, len, "chan_acc_lat_based_dlofdma_enabled_count",
+ htt_stats_buf->chan_acc_lat_based_dlofdma_enabled_cnt,
+ ATH12K_HTT_NUM_AC_WMM, "\n");
+ len += print_array_to_buf(buf, len, "chan_acc_lat_based_dlofdma_disabled_count",
+ htt_stats_buf->chan_acc_lat_based_dlofdma_disabled_cnt,
+ ATH12K_HTT_NUM_AC_WMM, "\n");
+ len += print_array_to_buf(buf, len, "chan_acc_lat_based_dlofdma_monitoring_count",
+ htt_stats_buf->chan_acc_lat_based_dlofdma_monitor_cnt,
+ ATH12K_HTT_NUM_AC_WMM, "\n");
+ len += print_array_to_buf(buf, len, "downgrade_to_dl_su_ru_alloc_fail",
+ htt_stats_buf->downgrade_to_dl_su_ru_alloc_fail,
+ ATH12K_HTT_NUM_AC_WMM, "\n");
+ len += print_array_to_buf(buf, len, "candidate_list_single_user_disable_ofdma",
+ htt_stats_buf->candidate_list_single_user_disable_ofdma,
+ ATH12K_HTT_NUM_AC_WMM, "\n");
+ len += print_array_to_buf(buf, len, "dl_cand_list_dropped_high_ul_qos_weight",
+ htt_stats_buf->dl_cand_list_dropped_high_ul_qos_weight,
+ ATH12K_HTT_NUM_AC_WMM, "\n");
+ len += print_array_to_buf(buf, len, "ax_dlofdma_disabled_due_to_pipelining",
+ htt_stats_buf->ax_dlofdma_disabled_due_to_pipelining,
+ ATH12K_HTT_NUM_AC_WMM, "\n");
+ len += print_array_to_buf(buf, len, "dlofdma_disabled_su_only_eligible",
+ htt_stats_buf->dlofdma_disabled_su_only_eligible,
+ ATH12K_HTT_NUM_AC_WMM, "\n");
+ len += print_array_to_buf(buf, len, "dlofdma_disabled_consec_no_mpdus_tried",
+ htt_stats_buf->dlofdma_disabled_consec_no_mpdus_tried,
+ ATH12K_HTT_NUM_AC_WMM, "\n");
+ len += print_array_to_buf(buf, len, "dlofdma_disabled_consec_no_mpdus_success",
+ htt_stats_buf->dlofdma_disabled_consec_no_mpdus_success,
+ ATH12K_HTT_NUM_AC_WMM, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_pdev_rate_stats_be_ofdma_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_rate_stats_be_ofdma_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+ u8 i;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_RATE_STATS_BE_OFDMA_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "be_ofdma_tx_ldpc = %u\n",
+ le32_to_cpu(htt_stats_buf->be_ofdma_tx_ldpc));
+ len += print_array_to_buf(buf, len, "be_ofdma_tx_mcs",
+ htt_stats_buf->be_ofdma_tx_mcs,
+ ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS, "\n");
+ len += print_array_to_buf(buf, len, "be_ofdma_eht_sig_mcs",
+ htt_stats_buf->be_ofdma_eht_sig_mcs,
+ ATH12K_HTT_TX_PDEV_NUM_EHT_SIG_MCS_CNTRS, "\n");
+ len += scnprintf(buf + len, buf_len - len, "be_ofdma_tx_ru_size = ");
+ for (i = 0; i < ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+ ath12k_htt_be_tx_rx_ru_size_to_str(i),
+ le32_to_cpu(htt_stats_buf->be_ofdma_tx_ru_size[i]));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += print_array_to_buf_index(buf, len, "be_ofdma_tx_nss = ", 1,
+ htt_stats_buf->be_ofdma_tx_nss,
+ ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS,
+ "\n");
+ len += print_array_to_buf(buf, len, "be_ofdma_tx_bw",
+ htt_stats_buf->be_ofdma_tx_bw,
+ ATH12K_HTT_TX_PDEV_NUM_BE_BW_CNTRS, "\n");
+ for (i = 0; i < ATH12K_HTT_TX_PDEV_NUM_GI_CNTRS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "be_ofdma_tx_gi[%u]", i);
+ len += print_array_to_buf(buf, len, "", htt_stats_buf->gi[i],
+ ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS, "\n");
+ }
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_pdev_mbssid_ctrl_frame_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_pdev_mbssid_ctrl_frame_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_MBSSID_CTRL_FRAME_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "basic_trigger_across_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->basic_trigger_across_bss));
+ len += scnprintf(buf + len, buf_len - len, "basic_trigger_within_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->basic_trigger_within_bss));
+ len += scnprintf(buf + len, buf_len - len, "bsr_trigger_across_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->bsr_trigger_across_bss));
+ len += scnprintf(buf + len, buf_len - len, "bsr_trigger_within_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->bsr_trigger_within_bss));
+ len += scnprintf(buf + len, buf_len - len, "mu_rts_across_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->mu_rts_across_bss));
+ len += scnprintf(buf + len, buf_len - len, "mu_rts_within_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->mu_rts_within_bss));
+ len += scnprintf(buf + len, buf_len - len, "ul_mumimo_trigger_across_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_mumimo_trigger_across_bss));
+ len += scnprintf(buf + len, buf_len - len,
+ "ul_mumimo_trigger_within_bss = %u\n\n",
+ le32_to_cpu(htt_stats_buf->ul_mumimo_trigger_within_bss));
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+ath12k_htt_print_tx_pdev_rate_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 i, j;